Merge branch 'beta/v1.6.5.3'

# Conflicts:
#	src/client/src/tscServer.c
#	src/util/src/version.c
This commit is contained in:
slguan 2020-01-31 12:00:27 +08:00
commit 04a759d797
229 changed files with 22099 additions and 3748 deletions

View File

@ -26,6 +26,13 @@ SET(CMAKE_VERBOSE_MAKEFILE ON)
# open the file named TDengine.sln
#
SET(TD_GODLL FALSE)
IF (${DLLTYPE} MATCHES "go")
ADD_DEFINITIONS(-D_TD_GO_DLL_)
MESSAGE(STATUS "input dll type: " ${DLLTYPE})
SET(TD_GODLL TRUE)
ENDIF ()
IF (NOT DEFINED TD_CLUSTER)
MESSAGE(STATUS "Build the Lite Version")
SET(TD_CLUSTER FALSE)
@ -41,34 +48,43 @@ IF (NOT DEFINED TD_CLUSTER)
SET(TD_ARM FALSE)
SET(TD_ARM_64 FALSE)
SET(TD_ARM_32 FALSE)
SET(TD_MIPS FALSE)
SET(TD_MIPS_64 FALSE)
SET(TD_MIPS_32 FALSE)
SET(TD_DARWIN_64 FALSE)
SET(TD_WINDOWS_64 FALSE)
# if generate ARM version:
# cmake -DARMVER=arm32 .. or cmake -DARMVER=arm64
IF (${ARMVER} MATCHES "arm32")
# cmake -DCPUTYPE=aarch32 .. or cmake -DCPUTYPE=aarch64
IF (${CPUTYPE} MATCHES "aarch32")
SET(TD_ARM TRUE)
SET(TD_ARM_32 TRUE)
ADD_DEFINITIONS(-D_TD_ARM_)
ADD_DEFINITIONS(-D_TD_ARM_32_)
ELSEIF (${ARMVER} MATCHES "arm64")
ELSEIF (${CPUTYPE} MATCHES "aarch64")
SET(TD_ARM TRUE)
SET(TD_ARM_64 TRUE)
ADD_DEFINITIONS(-D_TD_ARM_)
ADD_DEFINITIONS(-D_TD_ARM_64_)
ELSEIF (${CPUTYPE} MATCHES "mips64")
SET(TD_MIPS TRUE)
SET(TD_MIPS_64 TRUE)
ADD_DEFINITIONS(-D_TD_MIPS_)
ADD_DEFINITIONS(-D_TD_MIPS_64_)
ELSEIF (${CPUTYPE} MATCHES "x64")
MESSAGE(STATUS "input cpuType: " ${CPUTYPE})
ELSEIF (${CPUTYPE} MATCHES "x86")
MESSAGE(STATUS "input cpuType: " ${CPUTYPE})
ELSE ()
MESSAGE(STATUS "input cpuType: " ${CPUTYPE})
ENDIF ()
IF (TD_ARM)
ADD_DEFINITIONS(-D_TD_ARM_)
IF (TD_ARM_32)
ADD_DEFINITIONS(-D_TD_ARM_32_)
ELSEIF (TD_ARM_64)
ADD_DEFINITIONS(-D_TD_ARM_64_)
ELSE ()
EXIT ()
ENDIF ()
ENDIF ()
#
# Get OS information and store in variable TD_OS_INFO.
#
execute_process(COMMAND chmod 777 ${TD_COMMUNITY_DIR}/packaging/tools/get_os.sh)
execute_process(COMMAND ${TD_COMMUNITY_DIR}/packaging/tools/get_os.sh "" OUTPUT_VARIABLE TD_OS_INFO)
MESSAGE(STATUS "The current os is " ${TD_OS_INFO})
IF (${CMAKE_SYSTEM_NAME} MATCHES "Linux")
IF (${CMAKE_SIZEOF_VOID_P} MATCHES 8)
@ -141,37 +157,51 @@ IF (NOT DEFINED TD_CLUSTER)
SET(RELEASE_FLAGS "-O0")
IF (NOT TD_ARM)
IF (${CMAKE_CXX_COMPILER_ID} MATCHES "Clang")
SET(COMMON_FLAGS "-std=gnu99 -Wall -fPIC -malign-double -g -Wno-char-subscripts -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
SET(COMMON_FLAGS "-std=gnu99 -Wall -fPIC -malign-double -g -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
ELSE ()
SET(COMMON_FLAGS "-std=gnu99 -Wall -fPIC -malign-double -g -Wno-char-subscripts -malign-stringops -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
SET(COMMON_FLAGS "-std=gnu99 -Wall -fPIC -malign-double -g -malign-stringops -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
ENDIF ()
ELSE ()
SET(COMMON_FLAGS "-std=gnu99 -Wall -fPIC -g -Wno-char-subscripts -fsigned-char -munaligned-access -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
SET(COMMON_FLAGS "-std=gnu99 -Wall -fPIC -g -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
ENDIF ()
ADD_DEFINITIONS(-DLINUX)
ADD_DEFINITIONS(-D_REENTRANT -D__USE_POSIX -D_LIBC_REENTRANT)
IF (${TD_OS_INFO} MATCHES "Alpine")
MESSAGE(STATUS "The current OS is Alpine, append extra flags")
SET(COMMON_FLAGS "${COMMON_FLAGS} -largp")
link_libraries(/usr/lib/libargp.a)
ADD_DEFINITIONS(-D_ALPINE)
ENDIF ()
ELSEIF (TD_LINUX_32)
IF (NOT TD_ARM)
EXIT ()
ENDIF ()
SET(DEBUG_FLAGS "-O0 -DDEBUG")
SET(RELEASE_FLAGS "-O0")
SET(COMMON_FLAGS "-std=gnu99 -Wall -fPIC -g -Wno-char-subscripts -fsigned-char -munaligned-access -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
SET(COMMON_FLAGS "-std=gnu99 -Wall -fPIC -g -fsigned-char -munaligned-access -fpack-struct=8 -latomic -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
ADD_DEFINITIONS(-DLINUX)
ADD_DEFINITIONS(-D_REENTRANT -D__USE_POSIX -D_LIBC_REENTRANT)
ADD_DEFINITIONS(-DUSE_LIBICONV)
IF (${TD_OS_INFO} MATCHES "Alpine")
MESSAGE(STATUS "The current OS is Alpine, add extra flags")
SET(COMMON_FLAGS "${COMMON_FLAGS} -largp")
link_library(/usr/lib/libargp.a)
ADD_DEFINITIONS(-D_ALPINE)
ENDIF ()
ELSEIF (TD_WINDOWS_64)
SET(CMAKE_GENERATOR "NMake Makefiles" CACHE INTERNAL "" FORCE)
IF (NOT TD_GODLL)
SET(COMMON_FLAGS "/nologo /WX- /Oi /Oy- /Gm- /EHsc /MT /GS /Gy /fp:precise /Zc:wchar_t /Zc:forScope /Gd /errorReport:prompt /analyze-")
SET(DEBUG_FLAGS "/Zi /W3 /GL")
SET(RELEASE_FLAGS "/W0 /GL")
ENDIF ()
ADD_DEFINITIONS(-DWINDOWS)
ADD_DEFINITIONS(-D__CLEANUP_C)
ADD_DEFINITIONS(-DPTW32_STATIC_LIB)
ADD_DEFINITIONS(-DPTW32_BUILD)
ADD_DEFINITIONS(-D_MBCS -D_CRT_SECURE_NO_DEPRECATE -D_CRT_NONSTDC_NO_DEPRECATE)
ELSEIF (TD_DARWIN_64)
SET(COMMON_FLAGS "-std=gnu99 -Wall -fPIC -malign-double -g -Wno-char-subscripts -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE -Wno-unused-variable -Wno-bitfield-constant-conversion")
SET(COMMON_FLAGS "-std=gnu99 -Wall -fPIC -malign-double -g -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
SET(DEBUG_FLAGS "-O0 -DDEBUG")
SET(RELEASE_FLAGS "-O0")
ADD_DEFINITIONS(-DDARWIN)
@ -230,6 +260,7 @@ IF (NOT DEFINED TD_CLUSTER)
INSTALL(CODE "execute_process(COMMAND ${TD_MAKE_INSTALL_SH} ${TD_COMMUNITY_DIR} ${PROJECT_BINARY_DIR})")
ELSEIF (TD_WINDOWS_64)
SET(CMAKE_INSTALL_PREFIX C:/TDengine)
IF (NOT TD_GODLL)
INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/go DESTINATION connector)
INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/grafana DESTINATION connector)
INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/python DESTINATION connector)
@ -245,6 +276,10 @@ IF (NOT DEFINED TD_CLUSTER)
IF (TD_MVN_INSTALLED)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-1.0.2-dist.jar DESTINATION connector/jdbc)
ENDIF ()
ELSE ()
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/libtaos.dll DESTINATION driver)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/libtaos.dll.a DESTINATION driver)
ENDIF ()
ENDIF ()
ENDIF ()

View File

@ -39,12 +39,18 @@ sudo apt-get install maven
```
Build TDengine:
```cmd
```
mkdir build && cd build
cmake .. && cmake --build .
```
if compiling on an aarch64 processor, you need add one parameter:
```cmd
cmake .. -DCPUTYPE=aarch64 && cmake --build .
```
# Quick Run
To quickly start a TDengine server after building, run the command below in terminal:
```cmd

5
deps/iconv/iconv.c vendored
View File

@ -175,7 +175,10 @@ static const struct alias sysdep_aliases[] = {
#ifdef __GNUC__
__inline
#endif
const struct alias *
// gcc -o0 bug fix
// see http://git.savannah.gnu.org/gitweb/?p=libiconv.git;a=blobdiff;f=lib/iconv.c;h=31853a7f1c47871221189dbf597473a16d8a8da7;hp=5a1a32597fa3efc5f69624d37a2eb96f308cd241;hb=b29089d8b43abc8fba073da7e6dccaeba56b2b70;hpb=0a04404c90d6a725b8b6bbcd65e10c5fcf5993e9
static const struct alias *
aliases2_lookup (register const char *str)
{
const struct alias * ptr;

View File

@ -114,23 +114,84 @@ public Connection getConn() throws Exception{
</ul>
<p>对于TDengine操作的报错信息用户可使用JDBCDriver包里提供的枚举类TSDBError.java来获取error message和error code的列表。对于更多的具体操作的相关代码请参考TDengine提供的使用示范项目<code>JDBCDemo</code></p>
<a class='anchor' id='Python-Connector'></a><h2>Python Connector</h2>
<a class='anchor' id='Python客户端安装'></a><h3>Python客户端安装</h3>
<p>用户可以在源代码的src/connector/python文件夹下找到python2和python3的安装包。用户可以通过pip命令安装 </p>
<p> <code>pip install src/connector/python/python2/</code></p>
<p></p>
<p> <code>pip install src/connector/python/python3/</code></p>
<p>如果机器上没有pip命令用户可将src/connector/python/python3或src/connector/python/python2下的taos文件夹拷贝到应用程序的目录使用。</p>
<a class='anchor' id='Python客户端接口'></a><h3>Python客户端接口</h3>
<p>在使用TDengine的python接口时需导入TDengine客户端模块</p>
<pre><code>import taos </code></pre>
<p>用户可通过python的帮助信息直接查看模块的使用信息或者参考code/examples/python中的示例程序。以下为部分常用类和方法</p>
<a class='anchor' id='安装准备'></a><h3>安装准备</h3>
<li>已安装TDengine, 如果客户端在Windows上需要安装Windows 版本的TDengine客户端</li>
<li>已安装python 2.7 or >= 3.4</li>
<li>已安装pip</li>
<a class='anchor' id='安装'></a><h3>安装</h3>
<a class='anchor' id='Linux'></a><h4>Linux</h4>
<p>用户可以在源代码的src/connector/python文件夹下找到python2和python3的安装包 然后通过pip命令安装</p>
<pre><code class="cmd language-cmd">pip install src/connector/python/linux/python2/</code></pre>
<p>或者</p>
<pre><code>pip install src/connector/python/linux/python3/</code></pre>
<a class='anchor' id='Windows'></a><h4>Windows</h4>
<p>在已安装Windows TDengine 客户端的情况下, 将文件"C:\TDengine\driver\taos.dll" 拷贝到 "C:\windows\system32" 目录下, 然后进入Windwos <em>cmd</em> 命令行界面</p>
<pre><code>cd C:\TDengine\connector\python\windows</code></pre>
<pre><code>pip install python2\</code></pre>
<p>或者</p>
<pre><code>cd C:\TDengine\connector\python\windows</code></pre>
<pre><code>pip install python3\</code></pre>
<p>* 如果机器上没有<em>pip</em>命令用户可将src/connector/python/windows/python3或src/connector/python/windows/python2下的taos文件夹拷贝到应用程序的目录使用。 </p>
<a class='anchor' id='使用'></a><h3>使用</h3>
<a class='anchor' id='代码示例'></a><h4>代码示例</h4>
<li>导入TDengine客户端模块</li>
<pre><code class="python language-python">import taos </code></pre>
<li>获取连接</li>
<pre><code>
conn = taos.connect(host="127.0.0.1", user="root", password="taosdata", config="/etc/taos")
c1 = conn.cursor()
</code></pre>
<p>* <em>host 是TDengine 服务端所有IP, config 为客户端配置文件所在目录</em></p>
<li>写入数据</li>
<pre><code>
import datetime
# 创建数据库
c1.execute('create database db')
c1.execute('use db')
# 建表
c1.execute('create table tb (ts timestamp, temperature int, humidity float)')
# 插入数据
start_time = datetime.datetime(2019, 11, 1)
affected_rows = c1.execute('insert into tb values (\'%s\', 0, 0.0)' %start_time)
# 批量插入数据
time_interval = datetime.timedelta(seconds=60)
sqlcmd = ['insert into tb values']
for irow in range(1,11):
start_time += time_interval
sqlcmd.append('(\'%s\', %d, %f)' %(start_time, irow, irow*1.2))
affected_rows = c1.execute(' '.join(sqlcmd))
</code></pre>
<li>查询数据</li>
<code><pre>
c1.execute('select * from tb')
# 拉取查询结果
data = c1.fetchall()
# 返回的结果是一个列表,每一行构成列表的一个元素
numOfRows = c1.rowcount
numOfCols = c1.descriptions
for irow in range(numOfRows):
print("Row%d: ts=%s, temperature=%d, humidity=%f" %(irow, data[irow][0], data[irow][1],data[irow][2])
# 直接使用cursor 循环拉取查询结果
c1.execute('select * from tb')
for data in c1:
print("ts=%s, temperature=%d, humidity=%f" %(data[0], data[1],data[2])
</pre></code>
<li>关闭连接</li>
<code><pre>
c1.close()
conn.close()
</pre></code>
<a class='anchor' id='帮助信息''></a><h4>帮助信息</h4>
<p>用户可通过python的帮助信息直接查看模块的使用信息或者参考code/examples/python中的示例程序。以下为部分常用类和方法:</p>
<ul>
<li><p><em>TaosConnection</em></p>
<p>参考python中help(taos.TaosConnection)。</p></li>
<li><p><em>TaosCursor</em></p>
<p>参考python中help(taos.TaosCursor)。</p></li>
<li><p><em>connect</em>方法</p>
<p>用于生成taos.TaosConnection的实例。</p></li>
<li><p><em>TaosConnection</em> </p>
<p>参考python中<code>help(taos.TDengineConnection)</code></p></li>
<li><p><em>TaosCursor</em> </p>
<p>参考python中<code>help(taos.TDengineCursor)</code></p></li>
<li><p>connect 方法</p>
<p>用于生成taos.TDengineConnection的实例。</p></li>
</ul>
<a class='anchor' id='RESTful-Connector'></a><h2>RESTful Connector</h2>
<p>为支持各种不同类型平台的开发TDengine提供符合REST设计标准的API即RESTful API。为最大程度降低学习成本不同于其他数据库RESTful API的设计方法TDengine直接通过HTTP POST 请求BODY中包含的SQL语句来操作数据库仅需要一个URL。 </p>

View File

@ -28,7 +28,7 @@
<p>在TDengine终端中用户可以通过SQL命令来创建/删除数据库、表等并进行插入查询操作。在终端中运行的SQL语句需要以分号结束来运行。示例</p>
<pre><code class="mysql language-mysql">create database db;
use db;
create table t (ts timestamp, cdata int);
create table t (ts timestamp, speed int);
insert into t values ('2019-07-15 00:00:00', 10);
insert into t values ('2019-07-15 01:00:00', 20);
select * from t;

View File

@ -122,15 +122,76 @@ public Connection getConn() throws Exception{
</ul>
<p>All the error codes and error messages can be found in <code>TSDBError.java</code> . For a more detailed coding example, please refer to the demo project <code>JDBCDemo</code> in TDengine's code examples. </p>
<a class='anchor' id='Python-Connector'></a><h2>Python Connector</h2>
<a class='anchor' id='Install-TDengine-Python-client'></a><h3>Install TDengine Python client</h3>
<p>Users can find python client packages in our source code directory <em>src/connector/python</em>. There are two directories corresponding two python versions. Please choose the correct package to install. Users can use <em>pip</em> command to install:</p>
<pre><code class="cmd language-cmd">pip install src/connector/python/python2/</code></pre>
<a class='anchor' id='Pre-requirement'></a><h3>Pre-requirement</h3>
<li>TDengine installed, TDengine-client installed if on Windows</li>
<li>python 2.7 or >= 3.4</li>
<li>pip installed </li>
<a class='anchor' id='Installation'></a><h3>Installation</h3>
<a class='anchor' id='Linux'></a><h4>Linux</h4>
<p>Users can find python client packages in our source code directory <em>src/connector/python</em>. There are two directories corresponding to two python versions. Please choose the correct package to install. Users can use <em>pip</em> command to install:</p>
<pre><code class="cmd language-cmd">pip install src/connector/python/linux/python2/</code></pre>
<p>or</p>
<pre><code>pip install src/connector/python/python3/</code></pre>
<p>If <em>pip</em> command is not installed on the system, users can choose to install pip or just copy the <em>taos</em> directory in the python client directory to the application directory to use.</p>
<a class='anchor' id='Python-client-interfaces'></a><h3>Python client interfaces</h3>
<p>To use TDengine Python client, import TDengine module at first:</p>
<pre><code>pip install src/connector/python/linux/python3/</code></pre>
<a class='anchor' id='Windows'></a><h4>Windows</h4>
<p>Assumed the Windows TDengine client has been installed , copy the file "C:\TDengine\driver\taos.dll" to the folder "C:\windows\system32", and then enter the <em>cmd</em> Windows command interface</p>
<pre><code>cd C:\TDengine\connector\python\windows</code></pre>
<pre><code>pip install python2\</code></pre>
<p>or</p>
<pre><code>cd C:\TDengine\connector\python\windows</code></pre>
<pre><code>pip install python3\</code></pre>
<p>* If <em>pip</em> command is not installed on the system, users can choose to install pip or just copy the <em>taos</em> directory in the python client directory to the application directory to use.</p>
<a class='anchor' id='Usage'></a><h3>Usage</h3>
<a class='anchor' id='Examples'></a><h4>Examples</h4>
<li>import TDengine module at first:</li>
<pre><code class="python language-python">import taos </code></pre>
<li>get the connection</li>
<pre><code>
conn = taos.connect(host="127.0.0.1", user="root", password="taosdata", config="/etc/taos")
c1 = conn.cursor()
</code></pre>
<p>* <em>host is the IP of TDengine server, and config is the directory where exists the TDengine client configure file</em></p>
<li>insert records into the database</li>
<pre><code>
import datetime
# create a database
c1.execute('create database db')
c1.execute('use db')
# create a table
c1.execute('create table tb (ts timestamp, temperature int, humidity float)')
# insert a record
start_time = datetime.datetime(2019, 11, 1)
affected_rows = c1.execute('insert into tb values (\'%s\', 0, 0.0)' %start_time)
# insert multiple records in a batch
time_interval = datetime.timedelta(seconds=60)
sqlcmd = ['insert into tb values']
for irow in range(1,11):
start_time += time_interval
sqlcmd.append('(\'%s\', %d, %f)' %(start_time, irow, irow*1.2))
affected_rows = c1.execute(' '.join(sqlcmd))
</code></pre>
<li>query the database</li>
<code><pre>
c1.execute('select * from tb')
# fetch all returned results
data = c1.fetchall()
# data is a list of returned rows with each row being a tuple
numOfRows = c1.rowcount
numOfCols = c1.descriptions
for irow in range(numOfRows):
print("Row%d: ts=%s, temperature=%d, humidity=%f" %(irow, data[irow][0], data[irow][1],data[irow][2])
# use the cursor as an iterator to retrieve all returned results
c1.execute('select * from tb')
for data in c1:
print("ts=%s, temperature=%d, humidity=%f" %(data[0], data[1],data[2])
</pre></code>
<li>close the connection</li>
<code><pre>
c1.close()
conn.close()
</pre></code>
<a class='anchor' id='Help information''></a><h4>Help information</h4>
<p>Users can get module information from Python help interface or refer to our [python code example](). We list the main classes and methods below:</p>
<ul>
<li><p><em>TaosConnection</em> class</p>

View File

@ -28,7 +28,7 @@
<p>In the TDengine shell, you can create databases, create tables and insert/query data with SQL. Each query command ends with a semicolon. It works like MySQL, for example:</p>
<pre><code class="mysql language-mysql">create database db;
use db;
create table t (ts timestamp, cdata int);
create table t (ts timestamp, speed int);
insert into t values ('2019-07-15 10:00:00', 10);
insert into t values ('2019-07-15 10:01:05', 20);
select * from t;

Binary file not shown.

After

Width:  |  Height:  |  Size: 53 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 44 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 54 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 120 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 75 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 26 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 44 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 67 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 91 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 97 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 45 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 162 KiB

View File

@ -34,7 +34,7 @@ TDengine能够与开源数据可视化系统[Grafana](https://www.grafana.com/)
### 安装Grafana
目前TDengine支持Grafana 5.2.4以上的版本。用户可以根据当前的操作系统到Grafana官网下载安装包并执行安装。下载地址如下https://grafana.com/grafana/download
目前TDengine支持Grafana 5.2.4以上的版本。用户可以根据当前的操作系统到Grafana官网下载安装包并执行安装。下载地址如下https://grafana.com/grafana/download
### 配置Grafana
@ -42,43 +42,60 @@ TDengine的Grafana插件在安装包的/usr/local/taos/connector/grafana目录
以CentOS 7.2操作系统为例将tdengine目录拷贝到/var/lib/grafana/plugins目录下重新启动grafana即可。
### 使用Grafana
### 使用 Grafana
用户可以直接通过localhost:3000的网址登录Grafana服务器(用户名/密码:admin/admin)配置TDengine数据源如下图所示此时可以在下拉列表中看到TDengine数据源。
#### 配置数据源
![img](../assets/clip_image001.png)
用户可以直接通过 localhost:3000 的网址,登录 Grafana 服务器(用户名/密码:admin/admin),通过左侧 `Configuration -> Data Sources` 可以添加数据源,如下图所示:
TDengine数据源中的HTTP配置里面的Host地址要设置为TDengine集群的中任意一台服务器的IP地址与TDengine RESTful接口的端口号(6020)。假设TDengine数据库与Grafana部署在同一机器那么应输入http://localhost:6020。
![img](../assets/add_datasource1.jpg)
此外还需配置登录TDengine的用户名与密码然后点击下图中的Save&Test按钮保存。
点击 `Add data source` 可进入新增数据源页面,在查询框中输入 TDengine 可选择添加,如下图所示:
![img](../assets/clip_image001-2474914.png)
![img](../assets/add_datasource2.jpg)
进入数据源配置页面,按照默认提示修改相应配置即可:
![img](../assets/add_datasource3.jpg)
然后就可以在Grafana的数据源列表中看到刚创建好的TDengine的数据源
* Host TDengine 集群的中任意一台服务器的 IP 地址与 TDengine RESTful 接口的端口号(6020),默认 http://localhost:6020。
* UserTDengine 用户名。
* PasswordTDengine 用户密码。
![img](../assets/clip_image001-2474939.png)
点击 `Save & Test` 进行测试,成功会有如下提示:
![img](../assets/add_datasource4.jpg)
#### 创建 Dashboard
基于上面的步骤就可以在创建Dashboard的时候使用TDengine数据源如下图所示
回到主界面创建 Dashboard点击 Add Query 进入面板查询页面:
![img](../assets/clip_image001-2474961.png)
![img](../assets/create_dashboard1.jpg)
如上图所示,在 Query 中选中 `TDengine` 数据源,在下方查询框可输入相应 sql 进行查询,具体说明如下:
* INPUT SQL输入要查询的语句该 SQL 语句的结果集应为两列多行),例如:`select avg(mem_system) from log.dn where ts >= $from and ts < $to interval($interval)` 其中from、to 和 interval 为 TDengine插件的内置变量表示从Grafana插件面板获取的查询范围和时间间隔。除了内置变量外`也支持可以使用自定义模板变量`。
* ALIAS BY可设置当前查询别名。
* GENERATE SQL 点击该按钮会自动替换相应变量,并生成最终执行的语句。
然后可以点击Add Query按钮增加一个新查询。
按照默认提示查询当前 TDengine 部署所在服务器指定间隔系统内存平均使用量如下:
在INPUT SQL输入框中输入查询SQL语句该SQL语句的结果集应为两行多列的曲线数据例如SELECT count(*) FROM sys.cpu WHERE ts>=from and ts<to interval(interval)。其中from、to和interval为TDengine插件的内置变量表示从Grafana插件面板获取的查询范围和时间间隔。
![img](../assets/create_dashboard2.jpg)
ALIAS BY输入框为查询的别名点击GENERATE SQL 按钮可以获取发送给TDengine的SQL语句。如下图所示
> 关于如何使用Grafana创建相应的监测界面以及更多有关使用Grafana的信息请参考Grafana官方的[文档](https://grafana.com/docs/)。
![img](../assets/clip_image001-2474987.png)
#### 导入 Dashboard
在 Grafana 插件目录 /usr/local/taos/connector/grafana/tdengine/dashboard/ 下提供了一个 `tdengine-grafana.json` 可导入的 dashboard。
点击左侧 `Import` 按钮,并上传 `tdengine-grafana.json` 文件:
![img](../assets/import_dashboard1.jpg)
导入完成之后可看到如下效果:
![img](../assets/import_dashboard2.jpg)
关于如何使用Grafana创建相应的监测界面以及更多有关使用Grafana的信息请参考Grafana官方的[文档](https://grafana.com/docs/)。
## Matlab

View File

@ -198,56 +198,104 @@ For the time being, TDengine supports subscription on one table. It is implement
## Java Connector
### JDBC Interface
To Java delevopers, TDengine provides `taos-jdbcdriver` according to the JDBC(3.0) API. Users can find and download it through [Sonatype Repository][1].
TDengine provides a JDBC driver `taos-jdbcdriver-x.x.x.jar` for Enterprise Java developers. TDengine's JDBC Driver is implemented as a subset of the standard JDBC 3.0 Specification and supports the most common Java development frameworks. The driver have been published to dependency repositories such as Sonatype Maven Repository, and users could refer to the following `pom.xml` configuration file.
Since the native language of TDengine is C, the necessary TDengine library should be checked before using the taos-jdbcdriver:
* libtaos.so (Linux)
After TDengine is installed successfully, the library `libtaos.so` will be automatically copied to the `/usr/lib/`, which is the system's default search path.
* taos.dll (Windows)
After TDengine client is installed, the library `taos.dll` will be automatically copied to the `C:/Windows/System32`, which is the system's default search path.
> Note: Please make sure that TDengine Windows client has been installed if developing on Windows.
Since TDengine is time-series database, there are still some differences compared with traditional databases in using TDengine JDBC driver:
* TDengine doesn't allow to delete/modify a single record, and thus JDBC driver also has no such method.
* No support for transaction
* No support for union between tables
* No support for nested query`There is at most one open ResultSet for each Connection. Thus, TSDB JDBC Driver will close current ResultSet if it is not closed and a new query begins`.
## Version list of TAOS-JDBCDriver and required TDengine and JDK
| taos-jdbcdriver | TDengine | JDK |
| --- | --- | --- |
| 1.0.3 | 1.6.1.x or higher | 1.8.x |
| 1.0.2 | 1.6.1.x or higher | 1.8.x |
| 1.0.1 | 1.6.1.x or higher | 1.8.x |
## DataType in TDengine and Java
The datatypes in TDengine include timestamp, number, string and boolean, which are converted as follows in Java:
| TDengine | Java |
| --- | --- |
| TIMESTAMP | java.sql.Timestamp |
| INT | java.lang.Integer |
| BIGINT | java.lang.Long |
| FLOAT | java.lang.Float |
| DOUBLE | java.lang.Double |
| SMALLINT, TINYINT |java.lang.Short |
| BOOL | java.lang.Boolean |
| BINARY, NCHAR | java.lang.String |
## How to get TAOS-JDBC Driver
### maven repository
taos-jdbcdriver has been published to [Sonatype Repository][1]:
* [sonatype][8]
* [mvnrepository][9]
* [maven.aliyun][10]
Using the following pom.xml for maven projects
```xml
<repositories>
<repository>
<id>oss-sonatype</id>
<name>oss-sonatype</name>
<url>https://oss.sonatype.org/content/groups/public</url>
</repository>
</repositories>
<dependencies>
<dependency>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>1.0.1</version>
<version>1.0.3</version>
</dependency>
</dependencies>
```
Please note the JDBC driver itself relies on a native library written in C. On a Linux OS, the driver relies on a `libtaos.so` native library, where .so stands for "Shared Object". After the successful installation of TDengine on Linux, `libtaos.so` should be automatically copied to `/usr/local/lib/taos` and added to the system's default search path. On a Windows OS, the driver relies on a `taos.dll` native library, where .dll stands for "Dynamic Link Library". After the successful installation of the TDengine client on Windows, the `taos-jdbcdriver.jar` file can be found in `C:/TDengine/driver/JDBC`; the `taos.dll` file can be found in `C:/TDengine/driver/C` and should have been automatically copied to the system's searching path `C:/Windows/System32`.
### JAR file from the source code
Developers can refer to the Oracle's official JDBC API documentation for detailed usage on classes and methods. However, there are some differences of connection configurations and supported methods in the driver implementation between TDengine and traditional relational databases.
After downloading the [TDengine][3] source code, execute `mvn clean package` in the directory `src/connector/jdbc` and then the corresponding jar file is generated.
For database connections, TDengine's JDBC driver has the following configurable parameters in the JDBC URL. The standard format of a TDengine JDBC URL is:
## Usage
`jdbc:TSDB://{host_ip}:{port}/{database_name}?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]`
where `{}` marks the required parameters and `[]` marks the optional. The usage of each parameter is pretty straightforward:
* user - login user name for TDengine; by default, it's `root`
* password - login password; by default, it's `taosdata`
* charset - the client-side charset; by default, it's the operation system's charset
* cfgdir - the directory of TDengine client configuration file; by default it's `/etc/taos` on Linux and `C:\TDengine/cfg` on Windows
* locale - the language environment of TDengine client; by default, it's the operation system's locale
* timezone - the timezone of the TDengine client; by default, it's the operation system's timezone
All parameters can be configured at the time when creating a connection using the java.sql.DriverManager class, for example:
### get the connection
```java
import java.sql.Connection;
import java.sql.DriverManager;
import java.util.Properties;
import com.taosdata.jdbc.TSDBDriver;
Class.forName("com.taosdata.jdbc.TSDBDriver");
String jdbcUrl = "jdbc:TAOS://127.0.0.1:6030/log?user=root&password=taosdata";
Connection conn = DriverManager.getConnection(jdbcUrl);
```
> `6030` is the default port and `log` is the default database for system monitor.
A normal JDBC URL looks as follows:
`jdbc:TSDB://{host_ip}:{port}/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]`
values in `{}` are necessary while values in `[]` are optional。Each option in the above URL denotes:
* useruser name for login, defaultly root。
* passwordpassword for logindefaultly taosdata。
* charsetcharset for clientdefaultly system charset
* cfgdirlog directory for client, defaultly _/etc/taos/_ on Linux and _C:/TDengine/cfg_ on Windows。
* localelanguage for clientdefaultly system locale。
* timezonetimezone for clientdefaultly system timezone。
The options above can be configures (`ordered by priority`):
1. JDBC URL
As explained above.
2. java.sql.DriverManager.getConnection(String jdbcUrl, Properties connProps)
```java
public Connection getConn() throws Exception{
Class.forName("com.taosdata.jdbc.TSDBDriver");
String jdbcUrl = "jdbc:TAOS://127.0.0.1:0/db?user=root&password=taosdata";
String jdbcUrl = "jdbc:TAOS://127.0.0.1:0/log?user=root&password=taosdata";
Properties connProps = new Properties();
connProps.setProperty(TSDBDriver.PROPERTY_KEY_USER, "root");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, "taosdata");
@ -260,42 +308,294 @@ public Connection getConn() throws Exception{
}
```
Except `cfgdir`, all the parameters listed above can also be configured in the configuration file. The properties specified when calling DriverManager.getConnection() has the highest priority among all configuration methods. The JDBC URL has the second-highest priority, and the configuration file has the lowest priority. The explicitly configured parameters in a method with higher priorities always overwrite that same parameter configured in methods with lower priorities. For example, if `charset` is explicitly configured as "UTF-8" in the JDBC URL and "GKB" in the `taos.cfg` file, then "UTF-8" will be used.
3. Configuration file (taos.cfg)
Although the JDBC driver is implemented following the JDBC standard as much as possible, there are major differences between TDengine and traditional databases in terms of data models that lead to the differences in the driver implementation. Here is a list of head-ups for developers who have plenty of experience on traditional databases but little on TDengine:
Default configuration file is _/var/lib/taos/taos.cfg_ On Linux and _C:\TDengine\cfg\taos.cfg_ on Windows
```properties
# client default username
# defaultUser root
* TDengine does NOT support updating or deleting a specific record, which leads to some unsupported methods in the JDBC driver
* TDengine currently does not support `join` or `union` operations, and thus, is lack of support for associated methods in the JDBC driver
* TDengine supports batch insertions which are controlled at the level of SQL statement writing instead of API calls
* TDengine doesn't support nested queries and neither does the JDBC driver. Thus for each established connection to TDengine, there should be only one open result set associated with it
# client default password
# defaultPass taosdata
# default system charset
# charset UTF-8
# system locale
# locale en_US.UTF-8
```
> More options can refer to [client configuration][13]
### Create databases and tables
```java
Statement stmt = conn.createStatement();
// create database
stmt.executeUpdate("create database if not exists db");
// use database
stmt.executeUpdate("use db");
// create table
stmt.executeUpdate("create table if not exists tb (ts timestamp, temperature int, humidity float)");
```
> Note: if no step like `use db`, the name of database must be added as prefix like _db.tb_ when operating on tables
### Insert data
```java
// insert data
int affectedRows = stmt.executeUpdate("insert into tb values(now, 23, 10.3) (now + 1s, 20, 9.3)");
System.out.println("insert " + affectedRows + " rows.");
```
> _now_ is the server time.
> _now+1s_ is 1 second later than current server time. The time unit includes: _a_(millisecond), _s_(second), _m_(minute), _h_(hour), _d_(day), _w_(week), _n_(month), _y_(year).
### Query database
```java
// query data
ResultSet resultSet = stmt.executeQuery("select * from tb");
Timestamp ts = null;
int temperature = 0;
float humidity = 0;
while(resultSet.next()){
ts = resultSet.getTimestamp(1);
temperature = resultSet.getInt(2);
humidity = resultSet.getFloat("humidity");
System.out.printf("%s, %d, %s\n", ts, temperature, humidity);
}
```
> query is consistent with relational database. The subscript start with 1 when retrieving return results. It is recommended to use the column name to retrieve results.
### Close all
```java
resultSet.close();
stmt.close();
conn.close();
```
> `please make sure the connection is closed to avoid the error like connection leakage`
## Using connection pool
**HikariCP**
* dependence in pom.xml
```xml
<dependency>
<groupId>com.zaxxer</groupId>
<artifactId>HikariCP</artifactId>
<version>3.4.1</version>
</dependency>
```
* Examples
```java
public static void main(String[] args) throws SQLException {
HikariConfig config = new HikariConfig();
config.setJdbcUrl("jdbc:TAOS://127.0.0.1:6030/log");
config.setUsername("root");
config.setPassword("taosdata");
config.setMinimumIdle(3); //minimum number of idle connection
config.setMaximumPoolSize(10); //maximum number of connection in the pool
config.setConnectionTimeout(10000); //maximum wait milliseconds for get connection from pool
config.setIdleTimeout(60000); // max idle time for recycle idle connection
config.setConnectionTestQuery("describe log.dn"); //validation query
config.setValidationTimeout(3000); //validation query timeout
HikariDataSource ds = new HikariDataSource(config); //create datasource
Connection connection = ds.getConnection(); // get connection
Statement statement = connection.createStatement(); // get statement
//query or insert
// ...
connection.close(); // put back to conneciton pool
}
```
> The close() method will not close the connection from HikariDataSource.getConnection(). Instead, the connection is put back to the connection pool.
> More instructions can refer to [User Guide][5]
**Druid**
* dependency in pom.xml
```xml
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>druid</artifactId>
<version>1.1.20</version>
</dependency>
```
* Examples
```java
public static void main(String[] args) throws Exception {
Properties properties = new Properties();
properties.put("driverClassName","com.taosdata.jdbc.TSDBDriver");
properties.put("url","jdbc:TAOS://127.0.0.1:6030/log");
properties.put("username","root");
properties.put("password","taosdata");
properties.put("maxActive","10"); //maximum number of connection in the pool
properties.put("initialSize","3");//initial number of connection
properties.put("maxWait","10000");//maximum wait milliseconds for get connection from pool
properties.put("minIdle","3");//minimum number of connection in the pool
properties.put("timeBetweenEvictionRunsMillis","3000");// the interval milliseconds to test connection
properties.put("minEvictableIdleTimeMillis","60000");//the minimum milliseconds to keep idle
properties.put("maxEvictableIdleTimeMillis","90000");//the maximum milliseconds to keep idle
properties.put("validationQuery","describe log.dn"); //validation query
properties.put("testWhileIdle","true"); // test connection while idle
properties.put("testOnBorrow","false"); // don't need while testWhileIdle is true
properties.put("testOnReturn","false"); // don't need while testWhileIdle is true
//create druid datasource
DataSource ds = DruidDataSourceFactory.createDataSource(properties);
Connection connection = ds.getConnection(); // get connection
Statement statement = connection.createStatement(); // get statement
//query or insert
// ...
connection.close(); // put back to conneciton pool
}
```
> More instructions can refer to [User Guide][6]
**Notice**
* TDengine `v1.6.4.1` provides a function `select server_status()` to check heartbeat. It is highly recommended to use this function for `Validation Query`.
As follows`1` will be returned if `select server_status()` is successfully executed。
```shell
taos> select server_status();
server_status()|
================
1 |
Query OK, 1 row(s) in set (0.000141s)
```
## Integrated with framework
* Please refer to [SpringJdbcTemplate][11] if using taos-jdbcdriver in Spring JdbcTemplate
* Please refer to [springbootdemo][12] if using taos-jdbcdriver in Spring JdbcTemplate
## FAQ
* java.lang.UnsatisfiedLinkError: no taos in java.library.path
**Cause**The application program cannot find Library function _taos_
**Answer**Copy `C:\TDengine\driver\taos.dll` to `C:\Windows\System32\` on Windows and make a soft link through ` ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` on Linux.
* java.lang.UnsatisfiedLinkError: taos.dll Can't load AMD 64 bit on a IA 32-bit platform
**Cause**Currently TDengine only support 64bit JDK
**Answer**re-install 64bit JDK.
* For other questions, please refer to [Issues][7]
All the error codes and error messages can be found in `TSDBError.java` . For a more detailed coding example, please refer to the demo project `JDBCDemo` in TDengine's code examples.
## Python Connector
### Install TDengine Python client
### Pre-requirement
* TDengine installed, TDengine-client installed if on Windows [(Windows TDengine client installation)](https://www.taosdata.com/cn/documentation/connector/#Windows客户端及程序接口)
* python 2.7 or >= 3.4
* pip installed
Users can find python client packages in our source code directory _src/connector/python_. There are two directories corresponding two python versions. Please choose the correct package to install. Users can use _pip_ command to install:
### Installation
#### Linux
Users can find python client packages in our source code directory _src/connector/python_. There are two directories corresponding to two python versions. Please choose the correct package to install. Users can use _pip_ command to install:
```cmd
pip install src/connector/python/[linux|Windows]/python2/
pip install src/connector/python/linux/python3/
```
or
```
pip install src/connector/python/[linux|Windows]/python3/
pip install src/connector/python/linux/python2/
```
#### Windows
Assumed the Windows TDengine client has been installed , copy the file "C:\TDengine\driver\taos.dll" to the folder "C:\windows\system32", and then enter the _cmd_ Windows command interface
```
cd C:\TDengine\connector\python\windows
pip install python3\
```
or
```
cd C:\TDengine\connector\python\windows
pip install python2\
```
*If _pip_ command is not installed on the system, users can choose to install pip or just copy the _taos_ directory in the python client directory to the application directory to use.
If _pip_ command is not installed on the system, users can choose to install pip or just copy the _taos_ directory in the python client directory to the application directory to use.
### Python client interfaces
To use TDengine Python client, import TDengine module at first:
### Usage
#### Examples
* import TDengine module
```python
import taos
```
* get the connection
```python
conn = taos.connect(host="127.0.0.1", user="root", password="taosdata", config="/etc/taos")
c1 = conn.cursor()
```
*<em>host</em> is the IP of TDengine server, and <em>config</em> is the directory where exists the TDengine client configure file
* insert records into the database
```python
import datetime
# create a database
c1.execute('create database db')
c1.execute('use db')
# create a table
c1.execute('create table tb (ts timestamp, temperature int, humidity float)')
# insert a record
start_time = datetime.datetime(2019, 11, 1)
affected_rows = c1.execute('insert into tb values (\'%s\', 0, 0.0)' %start_time)
# insert multiple records in a batch
time_interval = datetime.timedelta(seconds=60)
sqlcmd = ['insert into tb values']
for irow in range(1,11):
start_time += time_interval
sqlcmd.append('(\'%s\', %d, %f)' %(start_time, irow, irow*1.2))
affected_rows = c1.execute(' '.join(sqlcmd))
```
* query the database
```python
c1.execute('select * from tb')
# fetch all returned results
data = c1.fetchall()
# data is a list of returned rows with each row being a tuple
numOfRows = c1.rowcount
numOfCols = len(c1.description)
for irow in range(numOfRows):
print("Row%d: ts=%s, temperature=%d, humidity=%f" %(irow, data[irow][0], data[irow][1],data[irow][2])
# use the cursor as an iterator to retrieve all returned results
c1.execute('select * from tb')
for data in c1:
print("ts=%s, temperature=%d, humidity=%f" %(data[0], data[1],data[2])
```
* close the connection
```python
c1.close()
conn.close()
```
#### Help information
Users can get module information from Python help interface or refer to our [python code example](). We list the main classes and methods below:
@ -569,3 +869,17 @@ An example of using the NodeJS connector to create a table with weather data and
An example of using the NodeJS connector to achieve the same things but without all the object wrappers that wrap around the data returned to achieve higher functionality can be found [here](https://github.com/taosdata/TDengine/tree/master/tests/examples/nodejs/node-example-raw.js)
[1]: https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver
[2]: https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver
[3]: https://github.com/taosdata/TDengine
[4]: https://www.taosdata.com/blog/2019/12/03/jdbcdriver%e6%89%be%e4%b8%8d%e5%88%b0%e5%8a%a8%e6%80%81%e9%93%be%e6%8e%a5%e5%ba%93/
[5]: https://github.com/brettwooldridge/HikariCP
[6]: https://github.com/alibaba/druid
[7]: https://github.com/taosdata/TDengine/issues
[8]: https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver
[9]: https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver
[10]: https://maven.aliyun.com/mvn/search
[11]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/SpringJdbcTemplate
[12]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/springbootdemo
[13]: https://www.taosdata.com/cn/documentation/administrator/#%E5%AE%A2%E6%88%B7%E7%AB%AF%E9%85%8D%E7%BD%AE

View File

@ -1,6 +1,8 @@
# TAOS SQL
TDengine提供类似SQL语法用户可以在TDengine Shell中使用SQL语句操纵数据库也可以通过C/C++, Java(JDBC), Python, Go等各种程序来执行SQL语句。
本文档说明TAOS SQL支持的语法规则、主要查询功能、支持的SQL查询函数以及常用技巧等内容。阅读本文档需要读者具有基本的SQL语言的基础。
TAOS SQL是用户对TDengine进行数据写入和查询的主要工具。TAOS SQL为了便于用户快速上手在一定程度上提供类似于标准SQL类似的风格和模式。严格意义上TAOS SQL并不是也不试图提供SQL标准的语法。此外由于TDengine针对的时序性结构化数据不提供修改和更新功能因此在TAO SQL中不提供数据更新和数据删除的相关功能。
本章节SQL语法遵循如下约定
@ -9,15 +11,46 @@ TDengine提供类似SQL语法用户可以在TDengine Shell中使用SQL语句
- | 表示多选一,选择其中一个即可,但不能输入|本身
- … 表示前面的项可重复多个
为更好地说明SQL语法的规则及其特点本文假设存在一个数据集。该数据集是针对两种类型的设备温度湿度传感器、气压海拔传感器建立的数据模型。
针对温度传感器具有超级表super table temp_stable。其数据模型如下
```
taos> describe temp_stable;
Field | Type | Length | Note |
=======================================================================================================
ts |TIMESTAMP | 8 | |
temperature |FLOAT | 4 | |
humidity |TINYINT | 1 | |
status |TINYINT | 1 | |
deviceid |BIGINT | 12 |tag |
location |BINARY | 20 |tag |
```
数据集包含2个温度传感器的数据按照TDengine的建模规则对应2个子表其名称分别是 temp_tb_1temp_tb_2 。
针对压力海拔传感器具有超级表super table pressure_stable。其数据模型如下
数据集包含2个压力传感器数据对应2个子表分别是 press_tb_1press_tb_2。
```text
taos> describe pressure_stable;
Field | Type | Length | Note |
=======================================================================================================
ts |TIMESTAMP | 8 | |
height |FLOAT | 4 | |
pressure |FLOAT | 4 | |
devstat |TINYINT | 1 | |
id |BIGINT | 8 |tag |
city |NCHAR | 20 |tag |
longitude |FLOAT | 4 |tag |
latitude |FLOAT | 4 |tag |
```
## 支持的数据类型
使用TDengine最重要的是时间戳。创建并插入记录、查询历史记录的时候均需要指定时间戳。时间戳有如下规则
- 时间格式为YYYY-MM-DD HH:mm:ss.MS, 默认时间分辨率为毫秒。比如2017-08-12 18:25:58.128
- 时间格式为```YYYY-MM-DD HH:mm:ss.MS```, 默认时间分辨率为毫秒。比如:```2017-08-12 18:25:58.128```
- 内部函数now是服务器的当前时间
- 插入记录时如果时间戳为0插入数据时使用服务器当前时间
- Epoch Time: 时间戳也可以是一个长整数表示从1970-01-01 08:00:00.000开始的毫秒数
- 时间可以加减,比如 now-2h表明查询时刻向前推2个小时(最近2小时)。数字后面的时间单位a(毫秒), s(秒), m(分), h(小时), d(天)w(周), n(月), y(年)。比如select * from t1 where ts > now-2w and ts <= now-1w, 表示查询两周前整整一周的数据
- TDengine暂不支持时间窗口按照自然年和自然月切分。Where条件中的时间窗口单位的换算关系如下interval(1y) 等效于 interval(365d), interval(1n) 等效于 interval(30d), interval(1w) 等效于 interval(7d)
TDengine缺省的时间戳是毫秒精度但通过修改配置参数enableMicrosecond就可支持微秒。
@ -26,13 +59,13 @@ TDengine缺省的时间戳是毫秒精度但通过修改配置参数enableMic
| | 类型 | Bytes | 说明 |
| ---- | :-------: | ------ | ------------------------------------------------------------ |
| 1 | TIMESTAMP | 8 | 时间戳。最小精度毫秒。从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始,计时不能早于该时间。 |
| 2 | INT | 4 | 整型,范围 [-2^31+1, 2^31-1], -2^31用作Null |
| 3 | BIGINT | 8 | 长整型,范围 [-2^59, 2^59] |
| 2 | INT | 4 | 整型,范围 [-2^31+1, 2^31-1], -2^31用作Null |
| 3 | BIGINT | 8 | 长整型,范围 [-2^63+1, 2^63-1], -2^63用于NULL |
| 4 | FLOAT | 4 | 浮点型有效位数6-7范围 [-3.4E38, 3.4E38] |
| 5 | DOUBLE | 8 | 双精度浮点型有效位数15-16范围 [-1.7E308, 1.7E308] |
| 6 | BINARY | 自定义 | 用于记录字符串最长不能超过504 bytes。binary仅支持字符串输入字符串两端使用单引号引用否则英文全部自动转化为小写。使用时须指定大小如binary(20)定义了最长为20个字符的字符串每个字符占1byte的存储空间。如果用户字符串超出20字节将被自动截断。对于字符串内的单引号可以用转义字符反斜线加单引号来表示**\**。 |
| 7 | SMALLINT | 2 | 短整型, 范围 [-32767, 32767] |
| 8 | TINYINT | 1 | 单字节整型,范围 [-127, 127] |
| 7 | SMALLINT | 2 | 短整型, 范围 [-32767, 32767], -32768用于NULL |
| 8 | TINYINT | 1 | 单字节整型,范围 [-127, 127], -128用于NULL |
| 9 | BOOL | 1 | 布尔型,{true, false} |
| 10 | NCHAR | 自定义 | 用于记录非ASCII字符串如中文字符。每个nchar字符占用4bytes的存储空间。字符串两端使用单引号引用字符串内的单引号需用转义字符 **\**。nchar使用时须指定字符串大小类型为nchar(10)的列表示此列的字符串最多存储10个nchar字符会固定占用40bytes的空间。如用户字符串长度超出声明长度则将被自动截断。 |
@ -164,19 +197,172 @@ TDengine缺省的时间戳是毫秒精度但通过修改配置参数enableMic
## 数据查询
###查询语法
### 查询语法:
```mysql
SELECT {* | expr_list} FROM tb_name
[WHERE where_condition]
[ORDER BY _c0 { DESC | ASC }]
[LIMIT limit [, OFFSET offset]]
[>> export_file]
SELECT [DISTINCT] select_expr [, select_expr ...]
FROM {tb_name_list}
[WHERE where_condition]
[INTERVAL [interval_offset,] interval_val]
[FILL fill_val]
[SLIDING fill_val]
[GROUP BY col_list]
[ORDER BY col_list { DESC | ASC }]
[HAVING expr_list]
[SLIMIT limit_val [, SOFFSET offset_val]]
[LIMIT limit_val [, OFFSET offset_val]]
[>> export_file]
```
#### SELECT子句
一个选择子句可以是联合查询UNION和另一个查询的子查询SUBQUERY
SELECT function_list FROM tb_name
[WHERE where_condition]
[LIMIT limit [, OFFSET offset]]
[>> export_file]
##### 通配符
通配符 * 可以用于代指全部列。对于普通表,结果中只有普通列。
```
taos> select * from temp_tb_1;
ts | temperature |humidity|status|
============================================================
19-04-28 14:22:07.000| 20.00000 | 34 | 1 |
19-04-28 14:22:08.000| 21.50000 | 38 | 1 |
19-04-28 14:22:09.000| 21.30000 | 38 | 1 |
19-04-28 14:22:10.000| 21.20000 | 38 | 1 |
19-04-28 14:22:11.000| 21.30000 | 35 | 0 |
19-04-28 14:22:12.000| 22.00000 | 34 | 0 |
```
在针对超级表,通配符包含 _标签列_
```
taos> select * from temp_stable;
ts | temperature |humidity|status| deviceid | location |
==============================================================================================
19-04-28 14:22:07.000| 21.00000 | 37 | 1 |54197 |beijing |
19-04-28 14:22:07.000| 20.00000 | 34 | 1 |91234 |beijing |
19-04-28 14:22:08.000| 21.50000 | 38 | 1 |91234 |beijing |
19-04-28 14:22:09.000| 21.30000 | 38 | 1 |91234 |beijing |
19-04-28 14:22:10.000| 21.20000 | 38 | 1 |91234 |beijing |
19-04-28 14:22:11.000| 21.30000 | 35 | 0 |91234 |beijing |
19-04-28 14:22:12.000| 22.00000 | 34 | 0 |91234 |beijing |
```
通配符支持表名前缀以下两个SQL语句均为返回全部的列
```
select * from temp_tb_1;
select temp_tb_1.* from temp_tb_1;
```
在Join查询中带前缀的\*和不带前缀\*返回的结果有差别, \*返回全部表的所有列数据(不包含标签),带前缀的通配符,则只返回该表的列数据。
```
taos> select * from temp_tb_1,temp_tb_2 where temp_tb_1.ts=temp_tb_2.ts;
ts | temperature |humidity|status| ts | temperature |humidity|status|
========================================================================================================================
19-04-28 14:22:07.000| 20.00000 | 34 | 1 | 19-04-28 14:22:07.000| 21.00000 | 37 | 1 |
```
```
taos> select temp_tb_1.* from temp_tb_1,temp_tb_2 where temp_tb_1.ts=temp_tb_2.ts;
ts | temperature |humidity|status|
============================================================
19-04-28 14:22:07.000| 20.00000 | 34 | 1 |
```
在使用SQL函数来进行查询过程中部分SQL函数支持通配符操作。其中的区别在于
```count(\*)```函数只返回一列。```first```、```last```、```last_row```函数则是返回全部列。
```
taos> select count(*) from temp_tb_1;
count(*) |
======================
1 |
```
```
taos> select first(*) from temp_tb_1;
first(ts) | first(temperature) |first(humidity)|first(status)|
==========================================================================
19-04-28 14:22:07.000| 20.00000 | 34 | 1 |
```
#### 结果集列名
```SELECT```子句中,如果不指定返回结果集合的列名,结果集列名称默认使用```SELECT```子句中的表达式名称作为列名称。此外,用户可使用```AS```来重命名返回结果集合中列的名称。例如:
```
taos> select ts, ts as primary_key_ts from temp_tb_1;
ts | primary_key_ts |
==============================================
19-04-28 14:22:07.000| 19-04-28 14:22:07.000|
```
但是针对```first(*)```、```last(*)```、```last_row(*)```不支持针对单列的重命名。
#### DISTINCT修饰符*
只能用于修饰标签列TAGS的结果不能用于修饰普通列来获得去重后的结果。并且应用```DISTINCT```以后,只能进行单列的标签输出。
```count(distinct column_name)```用以返回近似的不重复结果的数量,该结果是近似值。
#### 隐式结果列
```Select_exprs```可以是表所属列的列名也可以是基于列的函数表达式或计算式数量的上限256个。当用户使用了```interval```或```group by tags```的子句以后在最后返回结果中会强制返回时间戳列第一列和group by子句中的标签列。后续的版本中可以支持关闭group by子句中隐式列的输出列输出完全由select子句控制。
#### 表(超级表)列表
FROM关键字后面可以是若干个表超级表列表也可以是子查询的结果。
如果没有指定用户的当前数据库,可以在表名称之前使用数据库的名称来指定表所属的数据库。例如:```sample.temp_tb_1``` 方式来跨库使用表。
```
SELECT * FROM sample.temp_tb_1;
------------------------------
use sample;
SELECT * FROM temp_tb_1;
```
From子句中列表可以使用别名来让SQL整体更加简单。
```
SELECT t.ts FROM temp_tb_1 t ;
```
> 暂不支持FROM子句的表别名
#### 特殊功能
部分特殊的查询功能可以不使用FROM子句执行。获取当前所在的数据库 database()
```
taos> SELECT database();
database() |
=================================
sample |
```
如果登录的时候没有指定默认数据库,且没有使用```use``命令切换数据则返回NULL。
```
taos> select database();
database() |
=================================
NULL |
```
获取服务器和客户端版本号:
```
SELECT client_version()
SELECT server_version()
```
服务器状态检测语句。如果服务器正常,返回一个数字(例如 1。如果服务器异常返回error code。该SQL语法能兼容连接池对于TDengine状态的检查及第三方工具对于数据库服务器状态的检查。并可以避免出现使用了错误的心跳检测SQL语句导致的连接池连接丢失的问题。
```
SELECT server_status()
SELECT server_status() AS result
```
#### TAOS SQL中特殊关键词
> TBNAME 在超级表查询中可视为一个特殊的标签,代表查询涉及的子表名<br>
\_c0: 表示表(超级表)的第一列
#### 小技巧
获取一个超级表所有的子表名及相关的标签信息:
```
SELECT TBNAME, location FROM temp_stable
```
统计超级表下辖子表数量:
```
SELECT COUNT(TBNAME) FROM temp_stable
```
以上两个查询均只支持在Where条件子句中添加针对标签TAGS的过滤条件。例如
```
taos> select count(tbname) from temp_stable;
count(tbname) |
======================
2 |
taos> select count(tbname) from temp_stable where deviceid > 60000;
count(tbname) |
======================
1 |
```
- 可以使用* 返回所有列,或指定列名。可以对数字列进行四则运算,可以给输出的列取列名

View File

@ -2,15 +2,15 @@
## 文件目录结构
安装TDengine后,默认会在操作系统中生成下列目录或文件:
安装TDengine的过程中,安装程序将在操作系统中创建以下目录或文件:
| 目录/文件 | 说明 |
| ---------------------- | :------------------------------------------------|
| /etc/taos/taos.cfg | TDengine默认[配置文件] |
| /usr/local/taos/driver | TDengine动态链接库目录 |
| /var/lib/taos | TDengine默认数据文件目录,可通过[配置文件]修改位置. |
| /var/log/taos | TDengine默认日志文件目录,可通过[配置文件]修改位置 |
| /usr/local/taos/bin | TDengine可执行文件目录 |
| /etc/taos/taos.cfg | 默认[配置文件] |
| /usr/local/taos/driver | 动态链接库目录 |
| /var/lib/taos | 默认数据文件目录,可通过[配置文件]修改位置. |
| /var/log/taos | 默认日志文件目录,可通过[配置文件]修改位置 |
| /usr/local/taos/bin | 可执行文件目录 |
### 可执行文件
@ -19,33 +19,126 @@ TDengine的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下
- _taosd_TDengine服务端可执行文件
- _taos_ TDengine Shell可执行文件
- _taosdump_:数据导出工具
- *rmtaos* 一个卸载TDengine的脚本, 请谨慎执行
- *rmtaos* 卸载TDengine的脚本, 该脚本会删除全部的程序和数据文件。务必谨慎执行,如非必须不建议使用。
您可以通过修改系统配置文件taos.cfg来配置不同的数据目录和日志目录
## 服务端配置
TDengine系统后台服务由taosd提供可以在配置文件taos.cfg里修改配置参数以满足不同场景的需求。配置文件的缺省位置在/etc/taos目录可以通过taosd命令行执行参数-c指定配置文件目录。比如taosd -c /home/user来指定配置文件位于/home/user这个目录。
TDengine系统后台服务程序是`taosd`,其启动时候读取的配置文件缺省目录是`/etc/taos`。可以通过命令行执行参数-c指定配置文件目录比如
```
taosd -c /home/user
```
指定`taosd`启动的时候读取`/home/user`目录下的配置文件taos.cfg。
下面仅仅列出一些重要的配置参数,更多的参数请看配置文件里的说明。各个参数的详细介绍及作用请看前述章节。**注意:配置修改后,需要重启*taosd*服务才能生效。**
- internalIp: 对外提供服务的IP地址默认取第一个IP地址
- mgmtShellPort管理节点与客户端通信使用的TCP/UDP端口号默认值是6030。此端口号在内向后连续的5个端口都会被UDP通信占用即UDP占用[6030-6034]同时TCP通信也会使用端口[6030]。
- vnodeShellPort数据节点与客户端通信使用的TCP/UDP端口号默认值是6035。此端口号在内向后连续的5个端口都会被UDP通信占用即UDP占用[6035-6039]同时TCP通信也会使用端口[6035]
- httpPort数据节点对外提供RESTful服务使用TCP端口号[6020]
- dataDir: 数据文件目录,缺省是/var/lib/taos
- maxUsers用户的最大数量
- maxDbs数据库的最大数量
- maxTables数据表的最大数量
- enableMonitor: 系统监测标志位0关闭1打开
- logDir: 日志文件目录,缺省是/var/log/taos
- numOfLogLines日志文件的最大行数
- debugFlag: 系统debug日志开关131仅错误和报警信息135所有
**privateIp**
- 默认值物理节点IP地址列表中的第一个IP地址
对外提供服务的IP地址。
**publicIp**
- 默认值与privateIp相同
对于阿里等云平台此为公网IP地址publicIp在内部映射为对应的privateIP地址仅对企业版有效。
**masterIp**
- 默认值与privateIp相同
集群内第一个物理节点的privateIp地址仅对企业版有效。
**secondIp**
- 默认值与privateIp相同
集群内第二个物理节点的privateIp地址仅对企业版有效。
**mgmtShellPort**
- 默认值: _6030_
数据库服务中管理节点与客户端通信使用的TCP/UDP端口号。
> 端口范围 _6030_ - _6034_ 均用于UDP通讯。此外还使用端口 _6030_ 用于TCP通讯。
**vnodeShellPort**
- 默认值: _6035_
数据节点与客户端通信使用的TCP/UDP端口号。
> 端口范围 _6035_ - _6039_ 的5个端口用于UDP通信。此外还使用端口 _6035_ 用于TCP通讯。
**mgmtVnodePort**
- 默认值: _6040_
管理节点与数据节点通信使用的TCP/UDP端口号仅对企业版有效。
> 端口范围 _6040_ - _6044_ 的5个端口用于UDP通信。此外还使用端口 _6040_ 用于TCP通讯。
**vnodeVnodePort**
- 默认值: _6045_
数据节点与数据节点通信使用的TCP/UDP端口号仅对企业版有效。
> 端口范围 _6045_ - _6049_ 的5个端口用于UDP通信。此外还使用端口 _6045_ 用于TCP通讯。
**mgmtMgmtPort**
- 默认值: _6050_
管理节点与管理节点通信使用的UDP端口号仅对企业版有效。
**mgmtSyncPort**
- 默认值: _6050_
管理节点与管理节点同步使用的TCP端口号仅对企业版有效。
**httpPort**
- 默认值: _6020_
RESTful服务使用的端口号所有的HTTP请求TCP都需要向该接口发起查询/写入请求。
**dataDir**
- 默认值:/var/lib/taos
数据文件目录,所有的数据文件都将写入该目录。
**logDir**
- 默认值:/var/log/taos
日志文件目录,客户端和服务器的运行日志将写入该目录。
**maxUsers**
- 默认值10,000
系统允许创建用户数量的上限
**maxDbs**
- 默认值1,000
系统允许的创建数据库的上限
**maxTables**
- 默认值650,000
系统允许创建数据表的上限。
>系统能够创建的表受到多种因素的限制,单纯地增大该参数并不能直接增加系统能够创建的表数量。例如,由于每个表创建均需要消耗一定量的缓存空间,系统可用内存一定的情况下,创建表的总数的上限是一个固定的值。
**monitor**
- 默认值1激活状态
服务器内部的系统监控开关。监控主要负责收集物理节点的负载状况包括CPU、内存、硬盘、网络带宽、HTTP请求量的监控记录记录信息存储在`LOG`库中。0表示关闭监控服务1表示激活监控服务。
**numOfLogLines**
- 默认值10,000,000
单个日志文件允许的最大行数10,000,000行
**debugFlag**
- 默认值131仅输出错误和警告信息
系统(服务端和客户端)运行日志开关:
- 131 仅输出错误和警告信息
- 135 输入错误ERROR、警告WARN、信息Info
不同应用场景的数据往往具有不同的数据特征比如保留天数、副本数、采集频次、记录大小、采集点的数量、压缩等都可完全不同。为获得在存储上的最高效率TDengine提供如下存储相关的系统配置参数
- days一个数据文件覆盖的时间长度单位为天
- keep数据库中数据保留的天数
- days数据文件存储数据的时间跨度,单位为天
- keep数据保留的天数
- rows: 文件块中记录条数
- comp: 文件压缩标志位0关闭1:一阶段压缩2:两阶段压缩
- ctime数据从写入内存到写入硬盘的最长时间间隔单位为秒
@ -66,19 +159,139 @@ TDengine系统后台服务由taosd提供可以在配置文件taos.cfg里修
## 客户端配置
TDengine系统的前台交互客户端应用程序为taos它与taosd共享同一个配置文件taos.cfg。运行taos时使用参数-c指定配置文件目录如taos -c /home/cfg表示使用/home/cfg/目录下的taos.cfg配置文件中的参数缺省目录是/etc/taos。更多taos的使用方法请见[Shell命令行程序](#_TDengine_Shell命令行程序)。本节主要讲解taos客户端应用在配置文件taos.cfg文件中使用到的参数。
TDengine系统的前台交互客户端应用程序为taosWindows平台上为taos.exe。与服务端程序一样也可以通过设置taos.cfg来配置`taos`启动和运行的配置项。启动的时候如果不指定taos加载配置文件路径默认读取`/etc/taos/`路径下的`taos.cfg`文件。指定配置文件来启动`taos`的命令如下:
客户端配置参数列表及解释
```
taos -c /home/cfg/
```
**注意:启动设置的是配置文件所在目录,而不是配置文件本身**
- masterIP客户端默认发起请求的服务器的IP地址
- charset指明客户端所使用的字符集默认值为UTF-8。TDengine存储nchar类型数据时使用的是unicode存储因此客户端需要告知服务自己所使用的字符集也即客户端所在系统的字符集。
- locale设置系统语言环境。Linux上客户端与服务端共享
- defaultUser默认登录用户默认值root
- defaultPass默认登录密码默认值taosdata
如果`/home/cfg/`目录下没有配置文件,程序会继续启动并打印如下告警信息:
```plaintext
Welcome to the TDengine shell from linux, client version:1.6.4.0
option file:/home/cfg/taos.cfg not found, all options are set to system default
```
更多taos的使用方法请见[Shell命令行程序](#_TDengine_Shell命令行程序)。本节主要讲解taos客户端应用在配置文件taos.cfg文件中使用到的参数。
TCP/UDP端口以及日志的配置参数与server的配置参数完全一样。
客户端配置参数说明
启动taos时你也可以从命令行指定IP地址、端口号用户名和密码否则就从taos.cfg读取。
**masterIP**
- 默认值127.0.0.1
客户端连接的TDengine服务器IP地址如果不设置默认连接127.0.0.1的节点。以下两个命令等效:
```
taos
taos -h 127.0.0.1
```
其中的IP地址是从配置文件中读取的masterIP的值。
**locale**
- 默认值系统中动态获取如果自动获取失败需要用户在配置文件设置或通过API设置
TDengine为存储中文、日文、韩文等非ASCII编码的宽字符提供一种专门的字段类型`nchar`。写入`nchar`字段的数据将统一采用`UCS4-LE`格式进行编码并发送到服务器。需要注意的是,**编码正确性**是客户端来保证。因此,如果用户想要正常使用`nchar`字段来存储诸如中文、日文、韩文等非ASCII字符需要正确设置客户端的编码格式。
客户端的输入的字符均采用操作系统当前默认的编码格式在Linux系统上多为`UTF-8`,部分中文系统编码则可能是`GB18030`或`GBK`等。在docker环境中默认的编码是`POSIX`。在中文版Windows系统中编码则是`CP936`。客户端需要确保正确设置自己所使用的字符集,即客户端运行的操作系统当前编码字符集,才能保证`nchar`中的数据正确转换为`UCS4-LE`编码格式。
在 Linux 中 locale 的命名规则为:
`<语言>_<地区>.<字符集编码>`
如:`zh_CN.UTF-8`zh代表中文CN代表大陆地区UTF-8表示字符集。字符集编码为客户端正确解析本地字符串提供编码转换的说明。Linux系统与Mac OSX系统可以通过设置locale来确定系统的字符编码由于Windows使用的locale中不是POSIX标准的locale格式因此在Windows下需要采用另一个配置参数`charset`来指定字符编码。在Linux系统中也可以使用charset来指定字符编码。
**charset**
- 默认值系统中动态获取如果自动获取失败需要用户在配置文件设置或通过API设置
如果配置文件中不设置`charset`在Linux系统中taos在启动时候自动读取系统当前的locale信息并从locale信息中解析提取charset编码格式。如果自动读取locale信息失败则尝试读取charset配置如果读取charset配置也失败**则中断启动过程**。
在Linux系统中locale信息包含了字符编码信息因此正确设置了Linux系统locale以后可以不用再单独设置charset。例如
```
locale zh_CN.UTF-8
```
在Windows系统中无法从locale获取系统当前编码。如果无法从配置文件中读取字符串编码信息`taos`默认设置为字符编码为`CP936`。其等效在配置文件中添加如下配置:
```
charset CP936
```
如果需要调整字符编码,请查阅当前操作系统使用的编码,并在配置文件中正确设置。
在Linux系统中如果用户同时设置了locale和字符集编码charset并且locale和charset的不一致后设置的值将覆盖前面设置的值。
```
locale zh_CN.UTF-8
charset GBK
```
则`charset`的有效值是`GBK`。
```
charset GBK
locale zh_CN.UTF-8
```
`charset`的有效值是`UTF-8`。
**sockettype**
- 默认值UDP
客户端连接服务端的套接字的方式,可以使用`UDP`和`TCP`两种配置。
在客户端和服务端之间的通讯需要经过恶劣的网络环境下如公共网络、互联网、客户端与数据库服务端连接不稳定由于MTU的问题导致UDP丢包的情况下可以将连接的套接字类型调整为`TCP`
>注意:客户端套接字的类型需要和服务端的套接字类型相同,否则无法连接数据库。
**compressMsgSize**
- 默认值:-1不压缩
客户端与服务器之间进行消息通讯过程中,对通讯的消息进行压缩的阈值,默认值为-1不压缩。如果要压缩消息建议设置为64330字节即大于64330字节的消息体才进行压缩。在配置文件中增加如下配置项即可
```
compressMsgSize 64330
```
如果配置项设置为0`compressMsgSize 0`表示对所有的消息均进行压缩。
**timezone**
- 默认值:从系统中动态获取当前的时区设置
客户端运行系统所在的时区。为应对多时区的数据写入和查询问题TDengine采用Unix时间戳([Unix Timestamp](https://en.wikipedia.org/wiki/Unix_time))来记录和存储时间戳。Unix时间戳的特点决定了任一时刻不论在任何时区产生的时间戳均一致。需要注意的是Unix时间戳是在客户端完成转换和记录。为了确保客户端其他形式的时间转换为正确的Unix时间戳需要设置正确的时区。
在Linux系统中客户端会自动读取系统设置的时区信息。用户也可以采用多种方式在配置文件设置时区。例如
```
timezone UTC-8
timezone GMT-8
timezone Asia/Shanghai
```
均是合法的设置东八区时区的格式。
时区的设置对于查询和写入SQL语句中非Unix时间戳的内容时间戳字符串、关键词`now`的解析)产生影响。例如:
```
SELECT count(*) FROM table_name WHERE TS<'2019-04-11 12:01:08';
```
在东八区SQL语句等效于
```
SELECT count(*) FROM table_name WHERE TS<1554955268000;
```
在UTC时区SQL语句等效于
```
SELECT count(*) FROM table_name WHERE TS<1554984068000;
```
为了避免使用字符串时间格式带来的不确定性也可以直接使用Unix时间戳。此外还可以在SQL语句中使用带有时区的时间戳字符串例如RFC3339格式的时间戳字符串`2013-04-12T15:52:01.123+08:00`或者ISO-8601格式时间戳字符串`2013-04-12T15:52:01.123+0800`。上述两个字符串转化为Unix时间戳不受系统所在时区的影响。
**defaultUser**
- 默认值root
登录用户名,客户端登录的时候,如果不指定用户名,则自动使用该用户名登录。默认情况下,以下的两个命令等效
```
taos
taos -u root
```
用户名为从配置中读取的`defaultUser`配置项。如果更改`defaultUser abc`,则以下两个命令等效:
```
taos
taos -u abc
```
**defaultPass**
- 默认值taosdata
登录用户名,客户端登录的时候,如果不指定密码,则自动使用该密码登录。默认情况下,以下的两个命令等效
```
taos
taos -ptaosdata
```
TCP/UDP端口以及日志的配置参数与server的配置参数完全一样。使用命令`taos -?` 可查看`taos`允许的可选项。
## 用户管理
@ -191,6 +404,6 @@ KILL STREAM <stream-id>
## 系统监控
TDengine启动后会自动创建一个监测数据库SYS并自动将服务器的CPU、内存、硬盘空间、带宽、请求数、磁盘读写速度、慢查询等信息定时写入该数据库。TDengine还将重要的系统操作比如登录、创建、删除数据库等日志以及各种错误报警信息记录下来存放在SYS库里。系统管理员可以从CLI直接查看这个数据库也可以在WEB通过图形化界面查看这些监测信息
TDengine启动后会自动创建一个监测数据库`LOG`并自动将服务器的CPU、内存、硬盘空间、带宽、请求数、磁盘读写速度、慢查询等信息定时写入该数据库。TDengine还将重要的系统操作比如登录、创建、删除数据库等日志以及各种错误报警信息记录下来存放在`LOG`库里。系统管理员可以通过客户端程序查看记录库中的运行负载信息,(在企业版中)还可以通过浏览器查看数据的图标可视化结果
这些监测信息的采集缺省是打开的,但可以修改配置文件里的选项enableMonitor将其关闭或打开。
这些监测信息的采集缺省是打开的,但可以修改配置文件里的选项`monitor`将其关闭或打开。

View File

@ -188,58 +188,107 @@ TDengine提供时间驱动的实时流式计算API。可以每隔一指定的时
## Java Connector
### JDBC接口
TDengine 为了方便 Java 应用使用,提供了遵循 JDBC 标准(3.0)API 规范的 `taos-jdbcdriver` 实现。目前可以通过 [Sonatype Repository][1] 搜索并下载。
如果用户使用Java开发企业级应用可选用 TDengine 提供的 JDBC Driver 来调用服务。TDengine 提供的 JDBC Driver 是标准 JDBC 规范的子集,遵循 JDBC 标准 (3.0)API 规范,支持现有的各种 Java 开发框架。目前 TDengine 的 JDBC driver 已经发布到 Sonatype Maven Repository。因此用户开发时需要在 pom.xml 文件中进行如下配置:
由于 TDengine 是使用 c 语言开发的,使用 taos-jdbcdriver 驱动包时需要依赖系统对应的本地函数库。
* libtaos.so
在 linux 系统中成功安装 TDengine 后,依赖的本地函数库 libtaos.so 文件会被自动拷贝至 /usr/lib/libtaos.so该目录包含在 Linux 自动扫描路径上,无需单独指定。
* taos.dll
在 windows 系统中安装完客户端之后,驱动包依赖的 taos.dll 文件会自动拷贝到系统默认搜索路径 C:/Windows/System32 下,同样无需要单独指定。
> 注意:在 windows 环境开发时需要安装 TDengine 对应的 windows 版本客户端,由于目前没有提供 Linux 环境单独的客户端,需要安装 TDengine 才能使用。
TDengine 的 JDBC 驱动实现尽可能的与关系型数据库驱动保持一致,但时序空间数据库与关系对象型数据库服务的对象和技术特征的差异导致 taos-jdbcdriver 并未完全实现 JDBC 标准规范。在使用时需要注意以下几点:
* TDengine 不提供针对单条数据记录的删除和修改的操作,驱动中也没有支持相关方法。
* 由于不支持删除和修改,所以也不支持事务操作。
* 目前不支持表间的 union 操作。
* 目前不支持嵌套查询(nested query)`对每个 Connection 的实例,至多只能有一个打开的 ResultSet 实例;如果在 ResultSet还没关闭的情况下执行了新的查询TSDBJDBCDriver 则会自动关闭上一个 ResultSet`。
## TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本
| taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 |
| --- | --- | --- |
| 1.0.3 | 1.6.1.x 及以上 | 1.8.x |
| 1.0.2 | 1.6.1.x 及以上 | 1.8.x |
| 1.0.1 | 1.6.1.x 及以上 | 1.8.x |
## TDengine DataType 和 Java DataType
TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对应类型转换如下:
| TDengine DataType | Java DataType |
| --- | --- |
| TIMESTAMP | java.sql.Timestamp |
| INT | java.lang.Integer |
| BIGINT | java.lang.Long |
| FLOAT | java.lang.Float |
| DOUBLE | java.lang.Double |
| SMALLINT, TINYINT |java.lang.Short |
| BOOL | java.lang.Boolean |
| BINARY, NCHAR | java.lang.String |
## 如何获取 TAOS-JDBCDriver
### maven 仓库
目前 taos-jdbcdriver 已经发布到 [Sonatype Repository][1] 仓库,且各大仓库都已同步。
* [sonatype][8]
* [mvnrepository][9]
* [maven.aliyun][10]
maven 项目中使用如下 pom.xml 配置即可:
```xml
<repositories>
<repository>
<id>oss-sonatype</id>
<name>oss-sonatype</name>
<url>https://oss.sonatype.org/content/groups/public</url>
</repository>
</repositories>
<dependencies>
<dependency>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>1.0.1</version>
<version>1.0.3</version>
</dependency>
</dependencies>
```
TDengine 的驱动程序包的在不同操作系统上依赖不同的本地函数库均由C语言编写。Linux系统上依赖一个名为`libtaos.so` 的本地库,.so即"Shared Object"缩写。成功安装TDengine后`libtaos.so` 文件会被自动拷贝至`/usr/local/lib/taos`目录下该目录也包含在Linux上自动扫描路径上。Windows系统上JDBC驱动程序依赖于一个名为`taos.dll` 的本地库,.dll是动态链接库"Dynamic Link Library"的缩写。Windows上成功安装客户端后JDBC驱动程序包默认位于`C:/TDengine/driver/JDBC/`目录下;其依赖的动态链接库`taos.dll`文件位于`C:/TDengine/driver/C`目录下,`taos.dll` 会被自动拷贝至系统默认搜索路径`C:/Windows/System32`下。
### 源码编译打包
TDengine的JDBC Driver遵循标准JDBC规范开发人员可以参考Oracle官方的JDBC相关文档来找到具体的接口和方法的定义与用法。TDengine的JDBC驱动在连接配置和支持的方法上与传统数据库驱动稍有不同。
下载 [TDengine][3] 源码之后,进入 taos-jdbcdriver 源码目录 `src/connector/jdbc` 执行 `mvn clean package` 即可生成相应 jar 包。
TDengine的JDBC URL规范格式为
`jdbc:TSDB://{host_ip}:{port}/{database_name}?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]`
## 使用说明
其中,`{}`中的内容必须,`[]`中为可选。配置参数说明如下:
- user登陆TDengine所用用户名默认值root
- password用户登陆密码默认值taosdata
- charset客户端使用的字符集默认值为系统字符集
- cfgdir客户端配置文件目录路径Linux OS上默认值`/etc/taos` Windows OS上默认值 `C:/TDengine/cfg`
- locale客户端语言环境默认值系统当前locale
- timezone客户端使用的时区默认值为系统当前时区
以上所有参数均可在调用java.sql.DriverManager类创建连接时指定示例如下
### 获取连接
如下所示配置即可获取 TDengine Connection
```java
import java.sql.Connection;
import java.sql.DriverManager;
import java.util.Properties;
import com.taosdata.jdbc.TSDBDriver;
Class.forName("com.taosdata.jdbc.TSDBDriver");
String jdbcUrl = "jdbc:TAOS://127.0.0.1:6030/log?user=root&password=taosdata";
Connection conn = DriverManager.getConnection(jdbcUrl);
```
> 端口 6030 为默认连接端口JDBC URL 中的 log 为系统本身的监控数据库。
TDengine 的 JDBC URL 规范格式为:
`jdbc:TSDB://{host_ip}:{port}/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]`
其中,`{}` 中的内容必须,`[]` 中为可选。配置参数说明如下:
* user登录 TDengine 用户名,默认值 root。
* password用户登录密码默认值 taosdata。
* charset客户端使用的字符集默认值为系统字符集。
* cfgdir客户端配置文件目录路径Linux OS 上默认值 /etc/taos Windows OS 上默认值 C:/TDengine/cfg。
* locale客户端语言环境默认值系统当前 locale。
* timezone客户端使用的时区默认值为系统当前时区。
以上参数可以在 3 处配置,`优先级由高到低`分别如下:
1. JDBC URL 参数
如上所述,可以在 JDBC URL 的参数中指定。
2. java.sql.DriverManager.getConnection(String jdbcUrl, Properties connProps)
```java
public Connection getConn() throws Exception{
Class.forName("com.taosdata.jdbc.TSDBDriver");
String jdbcUrl = "jdbc:TAOS://127.0.0.1:0/db?user=root&password=taosdata";
String jdbcUrl = "jdbc:TAOS://127.0.0.1:0/log?user=root&password=taosdata";
Properties connProps = new Properties();
connProps.setProperty(TSDBDriver.PROPERTY_KEY_USER, "root");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, "taosdata");
@ -252,39 +301,297 @@ public Connection getConn() throws Exception{
}
```
这些配置参数中除了cfgdir外均可在客户端配置文件taos.cfg中进行配置。调用java.sql.DriverManager时声明的配置参数优先级最高JDBC URL的优先级次之配置文件的优先级最低。例如charset同时在配置文件taos.cfg中配置也在JDBC URL中配置则使用JDBC URL中的配置值。
3. 客户端配置文件 taos.cfg
此外尽管TDengine的JDBC驱动实现尽可能的与关系型数据库驱动保持一致但时序空间数据库与关系对象型数据库服务的对象和技术特征的差异导致TDengine的Java API并不能与标准完全相同。对于有大量关系型数据库开发经验而初次接触TDengine的开发者来说有以下一些值的注意的地方
linux 系统默认配置文件为 /var/lib/taos/taos.cfgwindows 系统默认配置文件路径为 C:\TDengine\cfg\taos.cfg。
```properties
# client default username
# defaultUser root
* TDengine不提供针对单条数据记录的删除和修改的操作驱动中也没有支持相关方法
* 目前TDengine不支持表间的join或union操作因此也缺乏对该部分API的支持
* TDengine支持批量写入但是支持停留在SQL语句级别而不是API级别也就是说用户需要通过写特殊的SQL语句来实现批量
* 目前TDengine不支持嵌套查询(nested query)对每个Connection的实例至多只能有一个打开的ResultSet实例如果在ResultSet还没关闭的情况下执行了新的查询TSDBJDBCDriver则会自动关闭上一个ResultSet
# client default password
# defaultPass taosdata
对于TDengine操作的报错信息用户可使用JDBCDriver包里提供的枚举类TSDBError.java来获取error message和error code的列表。对于更多的具体操作的相关代码请参考TDengine提供的使用示范项目`JDBCDemo`。
# default system charset
# charset UTF-8
# system locale
# locale en_US.UTF-8
```
> 更多详细配置请参考[客户端配置][13]
### 创建数据库和表
```java
Statement stmt = conn.createStatement();
// create database
stmt.executeUpdate("create database if not exists db");
// use database
stmt.executeUpdate("use db");
// create table
stmt.executeUpdate("create table if not exists tb (ts timestamp, temperature int, humidity float)");
```
> 注意:如果不使用 `use db` 指定数据库,则后续对表的操作都需要增加数据库名称作为前缀,如 db.tb。
### 插入数据
```java
// insert data
int affectedRows = stmt.executeUpdate("insert into tb values(now, 23, 10.3) (now + 1s, 20, 9.3)");
System.out.println("insert " + affectedRows + " rows.");
```
> now 为系统内部函数,默认为服务器当前时间。
> `now + 1s` 代表服务器当前时间往后加 1 秒数字后面代表时间单位a(毫秒), s(秒), m(分), h(小时), d(天)w(周), n(月), y(年)。
### 查询数据
```java
// query data
ResultSet resultSet = stmt.executeQuery("select * from tb");
Timestamp ts = null;
int temperature = 0;
float humidity = 0;
while(resultSet.next()){
ts = resultSet.getTimestamp(1);
temperature = resultSet.getInt(2);
humidity = resultSet.getFloat("humidity");
System.out.printf("%s, %d, %s\n", ts, temperature, humidity);
}
```
> 查询和操作关系型数据库一致,使用下标获取返回字段内容时从 1 开始,建议使用字段名称获取。
### 关闭资源
```java
resultSet.close();
stmt.close();
conn.close();
```
> `注意务必要将 connection 进行关闭`,否则会出现连接泄露。
## 与连接池使用
**HikariCP**
* 引入相应 HikariCP maven 依赖:
```xml
<dependency>
<groupId>com.zaxxer</groupId>
<artifactId>HikariCP</artifactId>
<version>3.4.1</version>
</dependency>
```
* 使用示例如下:
```java
public static void main(String[] args) throws SQLException {
HikariConfig config = new HikariConfig();
config.setJdbcUrl("jdbc:TAOS://127.0.0.1:6030/log");
config.setUsername("root");
config.setPassword("taosdata");
config.setMinimumIdle(3); //minimum number of idle connection
config.setMaximumPoolSize(10); //maximum number of connection in the pool
config.setConnectionTimeout(10000); //maximum wait milliseconds for get connection from pool
config.setIdleTimeout(60000); // max idle time for recycle idle connection
config.setConnectionTestQuery("describe log.dn"); //validation query
config.setValidationTimeout(3000); //validation query timeout
HikariDataSource ds = new HikariDataSource(config); //create datasource
Connection connection = ds.getConnection(); // get connection
Statement statement = connection.createStatement(); // get statement
//query or insert
// ...
connection.close(); // put back to conneciton pool
}
```
> 通过 HikariDataSource.getConnection() 获取连接后,使用完成后需要调用 close() 方法,实际上它并不会关闭连接,只是放回连接池中。
> 更多 HikariCP 使用问题请查看[官方说明][5]
**Druid**
* 引入相应 Druid maven 依赖:
```xml
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>druid</artifactId>
<version>1.1.20</version>
</dependency>
```
* 使用示例如下:
```java
public static void main(String[] args) throws Exception {
Properties properties = new Properties();
properties.put("driverClassName","com.taosdata.jdbc.TSDBDriver");
properties.put("url","jdbc:TAOS://127.0.0.1:6030/log");
properties.put("username","root");
properties.put("password","taosdata");
properties.put("maxActive","10"); //maximum number of connection in the pool
properties.put("initialSize","3");//initial number of connection
properties.put("maxWait","10000");//maximum wait milliseconds for get connection from pool
properties.put("minIdle","3");//minimum number of connection in the pool
properties.put("timeBetweenEvictionRunsMillis","3000");// the interval milliseconds to test connection
properties.put("minEvictableIdleTimeMillis","60000");//the minimum milliseconds to keep idle
properties.put("maxEvictableIdleTimeMillis","90000");//the maximum milliseconds to keep idle
properties.put("validationQuery","describe log.dn"); //validation query
properties.put("testWhileIdle","true"); // test connection while idle
properties.put("testOnBorrow","false"); // don't need while testWhileIdle is true
properties.put("testOnReturn","false"); // don't need while testWhileIdle is true
//create druid datasource
DataSource ds = DruidDataSourceFactory.createDataSource(properties);
Connection connection = ds.getConnection(); // get connection
Statement statement = connection.createStatement(); // get statement
//query or insert
// ...
connection.close(); // put back to conneciton pool
}
```
> 更多 druid 使用问题请查看[官方说明][6]
**注意事项**
* TDengine `v1.6.4.1` 版本开始提供了一个专门用于心跳检测的函数 `select server_status()`,所以在使用连接池时推荐使用 `select server_status()` 进行 Validation Query。
如下所示,`select server_status()` 执行成功会返回 `1`
```shell
taos> select server_status();
server_status()|
================
1 |
Query OK, 1 row(s) in set (0.000141s)
```
## 与框架使用
* Spring JdbcTemplate 中使用 taos-jdbcdriver可参考 [SpringJdbcTemplate][11]
* Springboot + Mybatis 中使用,可参考 [springbootdemo][12]
## 常见问题
* java.lang.UnsatisfiedLinkError: no taos in java.library.path
**原因**:程序没有找到依赖的本地函数库 taos。
**解决方法**windows 下可以将 C:\TDengine\driver\taos.dll 拷贝到 C:\Windows\System32\ 目录下linux 下将建立如下软链 ` ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` 即可。
* java.lang.UnsatisfiedLinkError: taos.dll Can't load AMD 64 bit on a IA 32-bit platform
**原因**:目前 TDengine 只支持 64 位 JDK。
**解决方法**:重新安装 64 位 JDK。
* 其它问题请参考 [Issues][7]
## Python Connector
### 安装准备
* 已安装TDengine, 如果客户端在Windows上需要安装Windows 版本的TDengine客户端 [Windows TDengine 客户端安装)](https://www.taosdata.com/cn/documentation/connector/#Windows客户端及程序接口)
* 已安装python 2.7 or >= 3.4
* 已安装pip
### Python客户端安装
#### Linux
用户可以在源代码的src/connector/python文件夹下找到python2和python3的安装包。用户可以通过pip命令安装
`pip install src/connector/python/[linux|windows]/python2/`
`pip install src/connector/python/linux/python2/`
`pip install src/connector/python/[linux|windows]/python3/`
如果机器上没有pip命令用户可将src/connector/python/python3或src/connector/python/python2下的taos文件夹拷贝到应用程序的目录使用。
对于windows 客户端安装TDengine windows 客户端后将C:\TDengine\driver\taos.dll拷贝到C:\windows\system32目录下即可。所有TDengine的连接器均需依赖taos.dll。
### Python客户端接口
在使用TDengine的python接口时需导入TDengine客户端模块
`pip install src/connector/python/linux/python3/`
#### Windows
在已安装Windows TDengine 客户端的情况下, 将文件"C:\TDengine\driver\taos.dll" 拷贝到 "C:\windows\system32" 目录下, 然后进入Windwos <em>cmd</em> 命令行界面
```cmd
cd C:\TDengine\connector\python\windows
pip install python2\
```
```cmd
cd C:\TDengine\connector\python\windows
pip install python3\
```
*如果机器上没有pip命令用户可将src/connector/python/python3或src/connector/python/python2下的taos文件夹拷贝到应用程序的目录使用。
对于windows 客户端安装TDengine windows 客户端后将C:\TDengine\driver\taos.dll拷贝到C:\windows\system32目录下即可。
### 使用
#### 代码示例
* 导入TDengine客户端模块
```python
import taos
```
* 获取连接
```python
conn = taos.connect(host="127.0.0.1", user="root", password="taosdata", config="/etc/taos")
c1 = conn.cursor()
```
*<em>host</em> 是TDengine 服务端所有IP, <em>config</em> 为客户端配置文件所在目录
* 写入数据
```python
import datetime
# 创建数据库
c1.execute('create database db')
c1.execute('use db')
# 建表
c1.execute('create table tb (ts timestamp, temperature int, humidity float)')
# 插入数据
start_time = datetime.datetime(2019, 11, 1)
affected_rows = c1.execute('insert into tb values (\'%s\', 0, 0.0)' %start_time)
# 批量插入数据
time_interval = datetime.timedelta(seconds=60)
sqlcmd = ['insert into tb values']
for irow in range(1,11):
start_time += time_interval
sqlcmd.append('(\'%s\', %d, %f)' %(start_time, irow, irow*1.2))
affected_rows = c1.execute(' '.join(sqlcmd))
```
* 查询数据
```python
c1.execute('select * from tb')
# 拉取查询结果
data = c1.fetchall()
# 返回的结果是一个列表,每一行构成列表的一个元素
numOfRows = c1.rowcount
numOfCols = len(c1.description)
for irow in range(numOfRows):
print("Row%d: ts=%s, temperature=%d, humidity=%f" %(irow, data[irow][0], data[irow][1],data[irow][2])
# 直接使用cursor 循环拉取查询结果
c1.execute('select * from tb')
for data in c1:
print("ts=%s, temperature=%d, humidity=%f" %(data[0], data[1],data[2])
```
* 关闭连接
```python
c1.close()
conn.close()
```
#### 帮助信息
用户可通过python的帮助信息直接查看模块的使用信息或者参考code/examples/python中的示例程序。以下为部分常用类和方法
@ -306,21 +613,34 @@ import taos
### HTTP请求格式
`http://<ip>:<PORT>/rest/sql`
```
http://<ip>:<PORT>/rest/sql
```
参数说明:
参数说明:
IP: 集群中的任一台主机
- IP: 集群中的任一台主机
- PORT: 配置文件中httpPort配置项缺省为6020
PORT: 配置文件中httpPort配置项缺省为6020
例如http://192.168.0.1:6020/rest/sql 是指向IP地址为192.168.0.1的URL.
http://192.168.0.1:6020/rest/sql 是指向IP地址为192.168.0.1的URL.
HTTP请求的Header里需带有身份认证信息TDengine支持Basic认证与自定义认证两种机制后续版本将提供标准安全的数字签名机制来做身份验证。
HTTP请求的Header里需带有身份认证信息TDengine单机版仅支持Basic认证机制。
- 自定义身份认证信息如下所示(<token>稍后介绍)
```
Authorization: Taosd <TOKEN>
```
- Basic身份认证信息如下所示
```
Authorization: Basic <TOKEN>
```
HTTP请求的BODY里就是一个完整的SQL语句SQL语句中的数据表应提供数据库前缀例如\<db-name>.\<tb-name>。如果表名不带数据库前缀系统会返回错误。因为HTTP模块只是一个简单的转发没有当前DB的概念。
使用curl来发起一个HTTP Request, 语法如下:
使用curl通过自定义身份认证方式来发起一个HTTP Request, 语法如下:
```
curl -H 'Authorization: Basic <TOKEN>' -d '<SQL>' <ip>:<PORT>/rest/sql
@ -332,11 +652,12 @@ curl -H 'Authorization: Basic <TOKEN>' -d '<SQL>' <ip>:<PORT>/rest/sql
curl -u username:password -d '<SQL>' <ip>:<PORT>/rest/sql
```
其中,`TOKEN`为`{username}:{password}`经过Base64编码之后的字符串例如`root:taosdata`编码后为`cm9vdDp0YW9zZGF0YQ==`
其中,`TOKEN`为`{username}:{password}`经过Base64编码之后的字符串, 例如`root:taosdata`编码后为`cm9vdDp0YW9zZGF0YQ==`
### HTTP返回格式
返回值为JSON格式如下
返回值为JSON格式如下:
```
{
"status": "succ",
@ -351,26 +672,60 @@ curl -u username:password -d '<SQL>' <ip>:<PORT>/rest/sql
说明:
- 第一行”status”告知操作结果是成功还是失败;
- 第二行”head”是表的定义如果不返回结果集仅有一列“affected_rows”;
- 第三行是具体返回的数据,一排一排的呈现。如果不返回结果集,仅[[affected_rows]]
- 第四行”rows”表明总共多少行数据
- status: 告知操作结果是成功还是失败
- head: 表的定义如果不返回结果集仅有一列“affected_rows”
- data: 具体返回的数据,一排一排的呈现,如果不返回结果集,仅[[affected_rows]]
- rows: 表明总共多少行数据
### 自定义授权码
HTTP请求中需要带有授权码`<TOKEN>`, 用于身份识别。授权码通常由管理员提供, 可简单的通过发送`HTTP GET`请求来获取授权码, 操作如下:
```
curl http://<ip>:6020/rest/login/<username>/<password>
```
其中, `ip`是TDengine数据库的IP地址, `username`为数据库用户名, `password`为数据库密码, 返回值为`JSON`格式, 各字段含义如下:
- status请求结果的标志位
- code返回值代码
- desc: 授权码
获取授权码示例:
```
curl http://192.168.0.1:6020/rest/login/root/taosdata
```
返回值:
```
{
"status": "succ",
"code": 0,
"desc":
"/KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04"
}
```
### 使用示例
- 在demo库里查询表t1的所有记录, curl如下
- 在demo库里查询表t1的所有记录
`curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.t1' 192.168.0.1:6020/rest/sql`
返回值:
```
curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.t1' 192.168.0.1:6020/rest/sql`
```
返回值:
```
{
"status": "succ",
"head": ["column1","column2","column3"],
"data": [
["2017-12-12 23:44:25.730", 1, 2.3],
["2017-12-12 22:44:25.728", 4, 5.6]
["2017-12-12 22:44:25.728",4,5.60000],
["2017-12-12 23:44:25.730",1,2.30000]
],
"rows": 2
}
@ -378,9 +733,11 @@ curl -u username:password -d '<SQL>' <ip>:<PORT>/rest/sql
- 创建库demo
`curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'create database demo' 192.168.0.1:6020/rest/sql`
```
curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'create database demo' 192.168.0.1:6020/rest/sql`
```
返回值:
返回值:
```
{
"status": "succ",
@ -390,11 +747,69 @@ curl -u username:password -d '<SQL>' <ip>:<PORT>/rest/sql
}
```
### 其他用法
#### 结果集采用Unix时间戳
HTTP请求URL采用`sqlt`时返回结果集的时间戳将采用Unix时间戳格式表示例如
```
curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.t1' 192.168.0.1:6020/rest/sqlt
```
返回值:
```
{
"status": "succ",
"head": ["column1","column2","column3"],
"data": [
[1513089865728,4,5.60000],
[1513093465730,1,2.30000]
],
"rows": 2
}
```
#### 结果集采用UTC时间字符串
HTTP请求URL采用`sqlutc`时返回结果集的时间戳将采用UTC时间字符串表示例如
```
curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.t1' 192.168.0.1:6020/rest/sqlutc
```
返回值:
```
{
"status": "succ",
"head": ["column1","column2","column3"],
"data": [
["2017-12-12T22:44:25.728+0800",4,5.60000],
["2017-12-12T23:44:25.730+0800",1,2.30000]
],
"rows": 2
}
```
### 重要配置项
下面仅列出一些与RESTFul接口有关的配置参数其他系统参数请看配置文件里的说明。注意配置修改后需要重启taosd服务才能生效
- httpIp: 对外提供RESTFul服务的IP地址默认绑定到0.0.0.0
- httpPort: 对外提供RESTFul服务的端口号默认绑定到6020
- httpMaxThreads: 启动的线程数量默认为2
- httpCacheSessions: 缓存连接的数量并发请求数目需小于此数值的10倍默认值为100
- restfulRowLimit: 返回结果集JSON格式的最大条数默认值为10240
- httpEnableCompress: 是否支持压缩默认不支持目前TDengine仅支持gzip压缩格式
- httpDebugFlag: 日志开关131仅错误和报警信息135所有默认131
## Go Connector
#### 安装TDengine
Go的链接器使用了到了 libtaos.so 和taos.h因此在使用Go连接器之前需要在程序运行的机器上安装TDengine以获得相关的驱动文件。
Go的连接器使用到了 libtaos.so 和taos.h因此在使用Go连接器之前需要在程序运行的机器上安装TDengine以获得相关的驱动文件。
#### Go语言引入package
TDengine提供了GO驱动程序“taosSql”包。taosSql驱动包是基于GO的“database/sql/driver”接口的实现。用户可以通过`go get`命令来获取驱动包。
@ -453,3 +868,270 @@ taosSql驱动包内采用cgo模式调用了TDengine的C/C++同步接口,与
3. 创建表、写入和查询数据
在创建好了数据库后就可以开始创建表和写入查询数据了。这些操作的基本思路都是首先组装SQL语句然后调用db.Exec执行并检查错误信息和执行相应的处理。可以参考上面的样例代码
## Node.js Connector
TDengine 同时也提供了node.js 的连接器。用户可以通过[npm](https://www.npmjs.com/)来进行安装,也可以通过源代码*src/connector/nodejs/* 来进行安装。[具体安装步骤如下](https://github.com/taosdata/tdengine/tree/master/src/connector/nodejs)
首先,通过[npm](https://www.npmjs.com/)安装node.js 连接器.
```cmd
npm install td-connector
```
我们建议用户使用npm 安装node.js连接器。如果您没有安装npm, 可以将*src/connector/nodejs/*拷贝到您的nodejs 项目目录下
我们使用[node-gyp](https://github.com/nodejs/node-gyp)和TDengine服务端进行交互。安装node.js 连接器之前,还需安装以下软件:
### Unix
- `python` (建议`v2.7` , `v3.x.x` 目前还不支持)
- `make`
- c语言编译器比如[GCC](https://gcc.gnu.org)
### macOS
- `python` (建议`v2.7` , `v3.x.x` 目前还不支持)
- Xcode
- 然后通过Xcode安装
```
Command Line Tools
```
```
Xcode -> Preferences -> Locations
```
目录下可以找到这个工具。或者在终端里执行
```
xcode-select --install
```
- 该步执行后 `gcc``make`就被安装上了
### Windows
#### 安装方法1
使用微软的[windows-build-tools](https://github.com/felixrieseberg/windows-build-tools)在`cmd` 命令行界面执行`npm install --global --production windows-build-tools` 即可安装所有的必备工具
#### 安装方法2
手动安装以下工具:
- 安装Visual Studio相关[Visual Studio Build 工具](https://visualstudio.microsoft.com/thank-you-downloading-visual-studio/?sku=BuildTools) 或者 [Visual Studio 2017 Community](https://visualstudio.microsoft.com/pl/thank-you-downloading-visual-studio/?sku=Community)
- 安装 [Python 2.7](https://www.python.org/downloads/) (`v3.x.x` 暂不支持) 并执行 `npm config set python python2.7`
- 进入`cmd`命令行界面, `npm config set msvs_version 2017`
如果以上步骤不能成功执行, 可以参考微软的node.js用户手册[Microsoft's Node.js Guidelines for Windows](https://github.com/Microsoft/nodejs-guidelines/blob/master/windows-environment.md#compiling-native-addon-modules)
如果在Windows 10 ARM 上使用ARM64 Node.js, 还需添加 "Visual C++ compilers and libraries for ARM64" 和 "Visual C++ ATL for ARM64".
### 使用方法
(http://docs.taosdata.com/node)
以下是node.js 连接器的一些基本使用方法,详细的使用方法可参考[该文档](http://docs.taosdata.com/node)
#### 连接
使用node.js连接器时必须先<em>require</em> ```td-connector```,然后使用 ```taos.connect``` 函数。```taos.connect``` 函数必须提供的参数是```host```,其它参数在没有提供的情况下会使用如下的默认值。最后需要初始化```cursor``` 来和TDengine服务端通信
```javascript
const taos = require('td-connector');
var conn = taos.connect({host:"127.0.0.1", user:"root", password:"taosdata", config:"/etc/taos",port:0})
var cursor = conn.cursor(); // Initializing a new cursor
```
关闭连接可执行
```javascript
conn.close();
```
#### 查询
可通过 ```cursor.query``` 函数来查询数据库。
```javascript
var query = cursor.query('show databases;')
```
查询的结果可以通过 ```query.execute()``` 函数获取并打印出来
```javascript
var promise = query.execute();
promise.then(function(result) {
result.pretty();
});
```
格式化查询语句还可以使用```query```的```bind```方法。如下面的示例:```query```会自动将提供的数值填入查询语句的```?```里。
```javascript
var query = cursor.query('select * from meterinfo.meters where ts <= ? and areaid = ?;').bind(new Date(), 5);
query.execute().then(function(result) {
result.pretty();
})
```
如果在```query```语句里提供第二个参数并设为```true```也可以立即获取查询结果。如下:
```javascript
var promise = cursor.query('select * from meterinfo.meters where v1 = 30;', true)
promise.then(function(result) {
result.pretty();
})
```
#### 异步函数
异步查询数据库的操作和上面类似,只需要在`cursor.execute`, `TaosQuery.execute`等函数后面加上`_a`。
```javascript
var promise1 = cursor.query('select count(*), avg(v1), avg(v2) from meter1;').execute_a()
var promise2 = cursor.query('select count(*), avg(v1), avg(v2) from meter2;').execute_a();
promise1.then(function(result) {
result.pretty();
})
promise2.then(function(result) {
result.pretty();
})
```
### 示例
[这里](https://github.com/taosdata/TDengine/tree/master/tests/examples/nodejs/node-example.js)提供了一个使用NodeJS 连接器建表,插入天气数据并查询插入的数据的代码示例
[这里](https://github.com/taosdata/TDengine/tree/master/tests/examples/nodejs/node-example-raw.js)同样是一个使用NodeJS 连接器建表,插入天气数据并查询插入的数据的代码示例,但和上面不同的是,该示例只使用`cursor`.
## CSharp Connector
在Windows系统上C#应用程序可以使用TDengine的原生C接口来执行所有数据库操作后续版本将提供ORMdapper框架驱动。
#### 安装TDengine客户端
C#连接器需要使用`libtaos.so`和`taos.h`。因此在使用C#连接器之前需在程序运行的Windows环境安装TDengine的Windows客户端以便获得相关驱动文件。
安装完成后,在文件夹`C:/TDengine/examples/C#`中,将会看到两个文件
- TDengineDriver.cs 调用taos.dll文件的Native C方法
- TDengineTest.cs 参考程序示例
在文件夹`C:\Windows\System32`,将会看到`taos.dll`文件
#### 使用方法
- 将C#接口文件TDengineDriver.cs加入到应用程序所在.NET项目中
- 参考TDengineTest.cs来定义数据库连接参数及执行数据插入、查询等操作的方法
- 因为C#接口需要用到`taos.dll`文件,用户可以将`taos.dll`文件加入.NET解决方案中
#### 注意事项
- `taos.dll`文件使用x64平台编译所以.NET项目在生成.exe文件时“解决方案”/“项目”的“平台”请均选择“x64”。
- 此.NET接口目前已经在Visual Studio 2013/2015/2017中验证过其它VS版本尚待验证。
#### 第三方驱动
Maikebing.Data.Taos是一个基于TDengine的RESTful Connector构建的ADO.Net提供器该开发包由热心贡献者`麦壳饼@@maikebing`提供,具体请参考
```
https://gitee.com/maikebing/Maikebing.EntityFrameworkCore.Taos
```
## Windows客户端及程序接口
### 客户端安装
在Windows操作系统下TDengine提供64位的Windows客户端([点击下载](https://www.taosdata.com/cn/all-downloads/#tdengine_win-list)),客户端安装程序为.exe文件运行该文件即可安装安装路径为C:\TDengine。Windows的客户端可运行在主流的64位Windows平台之上客户端目录结构如下
```
├── cfg
├───└── taos.cfg
├── connector
├───├── go
├───├── grafana
├───├── jdbc
├───└── python
├── driver
├───├── taos.dll
├───├── taos.exp
├───└── taos.lib
├── examples
├───├── bash
├───├── c
├───├── C#
├───├── go
├───├── JDBC
├───├── lua
├───├── matlab
├───├── nodejs
├───├── python
├───├── R
├───└── rust
├── include
├───└── taos.h
└── taos.exe
```
其中,最常用的文件列出如下:
+ Client可执行文件: C:/TDengine/taos.exe
+ 配置文件: C:/TDengine/cfg/taos.cfg
+ C驱动程序目录: C:/TDengine/driver
+ C驱动程序头文件: C:/TDengine/include
+ JDBC驱动程序目录: C:/TDengine/connector/jdbc
+ GO驱动程序目录C:/TDengine/connector/go
+ Python驱动程序目录C:/TDengine/connector/python
+ C#驱动程序及示例代码: C:/TDengine/examples/C#
+ 日志目录第一次运行程序时生成C:/TDengine/log
### 注意事项
#### Shell工具注意事项
在开始菜单中搜索cmd程序通过命令行方式执行taos.exe即可打开TDengine的Client程序如下所示其中ServerIP为TDengine所在Linux服务器的IP地址
```
taos -h <ServerIP>
```
在cmd中对taos的使用与Linux平台没有差别但需要注意以下几点
+ 确保Windows防火墙或者其他杀毒软件处于关闭状态TDengine的服务端与客户端通信的端口请参考`服务端配置`章节
+ 确认客户端连接时指定了正确的服务器IP地址
+ ping服务器IP如果没有反应请检查你的网络
#### C++接口注意事项
TDengine在Window系统上提供的API与Linux系统是相同的 应用程序使用时需要包含TDengine头文件taos.h连接时需要链接TDengine库taos.lib运行时将taos.dll放到可执行文件目录下。
#### JDBC接口注意事项
在Windows系统上应用程序可以使用JDBC接口来操纵数据库使用JDBC接口的注意事项如下
+ 将JDBC驱动程序(JDBCDriver-1.0.0-dist.jar)放置到当前的CLASS_PATH中;
+ 将Windows开发包(taos.dll)放置到system32目录下。
#### python接口注意事项
在Windows系统上应用程序可以通过导入taos这个模块来操纵数据库使用python接口的注意事项如下
+ 确定在Windows上安装了TDengine客户端
+ 将Windows开发包(taos.dll)放置到system32目录下。
[1]: https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver
[2]: https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver
[3]: https://github.com/taosdata/TDengine
[4]: https://www.taosdata.com/blog/2019/12/03/jdbcdriver%e6%89%be%e4%b8%8d%e5%88%b0%e5%8a%a8%e6%80%81%e9%93%be%e6%8e%a5%e5%ba%93/
[5]: https://github.com/brettwooldridge/HikariCP
[6]: https://github.com/alibaba/druid
[7]: https://github.com/taosdata/TDengine/issues
[8]: https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver
[9]: https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver
[10]: https://maven.aliyun.com/mvn/search
[11]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/SpringJdbcTemplate
[12]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/springbootdemo
[13]: https://www.taosdata.com/cn/documentation/administrator/#%E5%AE%A2%E6%88%B7%E7%AB%AF%E9%85%8D%E7%BD%AE

17
importSampleData/.gitignore vendored Normal file
View File

@ -0,0 +1,17 @@
# Binaries for programs and plugins
*.exe
*.exe~
*.dll
*.so
*.dylib
# Test binary, built with `go test -c`
*.test
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
# Dependency directories (remove the comment below to include it)
# vendor/
.idea/
.vscode/

661
importSampleData/LICENSE Normal file
View File

@ -0,0 +1,661 @@
GNU AFFERO GENERAL PUBLIC LICENSE
Version 3, 19 November 2007
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU Affero General Public License is a free, copyleft license for
software and other kinds of works, specifically designed to ensure
cooperation with the community in the case of network server software.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
our General Public Licenses are intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
Developers that use our General Public Licenses protect your rights
with two steps: (1) assert copyright on the software, and (2) offer
you this License which gives you legal permission to copy, distribute
and/or modify the software.
A secondary benefit of defending all users' freedom is that
improvements made in alternate versions of the program, if they
receive widespread use, become available for other developers to
incorporate. Many developers of free software are heartened and
encouraged by the resulting cooperation. However, in the case of
software used on network servers, this result may fail to come about.
The GNU General Public License permits making a modified version and
letting the public access it on a server without ever releasing its
source code to the public.
The GNU Affero General Public License is designed specifically to
ensure that, in such cases, the modified source code becomes available
to the community. It requires the operator of a network server to
provide the source code of the modified version running there to the
users of that server. Therefore, public use of a modified version, on
a publicly accessible server, gives the public access to the source
code of the modified version.
An older license, called the Affero General Public License and
published by Affero, was designed to accomplish similar goals. This is
a different license, not a version of the Affero GPL, but Affero has
released a new version of the Affero GPL which permits relicensing under
this license.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU Affero General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Remote Network Interaction; Use with the GNU General Public License.
Notwithstanding any other provision of this License, if you modify the
Program, your modified version must prominently offer all users
interacting with it remotely through a computer network (if your version
supports such interaction) an opportunity to receive the Corresponding
Source of your version by providing access to the Corresponding Source
from a network server at no charge, through some standard or customary
means of facilitating copying of software. This Corresponding Source
shall include the Corresponding Source for any work covered by version 3
of the GNU General Public License that is incorporated pursuant to the
following paragraph.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the work with which it is combined will remain governed by version
3 of the GNU General Public License.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU Affero General Public License from time to time. Such new versions
will be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU Affero General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU Affero General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU Affero General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If your software can interact with users remotely through a computer
network, you should also make sure that it provides a way for users to
get its source. For example, if your program is a web application, its
interface could display a "Source" link that leads users to an archive
of the code. There are many ways you could offer source, and different
solutions will be better for different programs; see section 13 for the
specific requirements.
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU AGPL, see
<https://www.gnu.org/licenses/>.

245
importSampleData/README.md Normal file
View File

@ -0,0 +1,245 @@
## 样例数据导入
该工具可以根据用户提供的 `json``csv` 格式样例数据文件快速导入 `TDengine`,目前仅能在 Linux 上运行。
为了体验写入和查询性能,可以对样例数据进行横向、纵向扩展。横向扩展是指将一个表(监测点)的数据克隆到多张表,纵向扩展是指将样例数据中的一段时间范围内的数据在时间轴上复制。该工具还支持历史数据导入至当前时间后持续导入,这样可以测试插入和查询并行进行的场景,以模拟真实环境。
## 下载安装
### 下载可执行文件
由于该工具使用 go 语言开发,为了方便使用,项目中已经提供了编译好的可执行文件 `bin/taosimport`。通过 `git clone https://github.com/taosdata/TDengine.git` 命令或者直接下载 `ZIP` 文件解压进入样例导入程序目录 `cd importSampleData`,执行 `bin/taosimport`
### go 源码编译
由于该工具使用 go 语言开发,编译之前需要先安装 go具体请参考 [Getting Started][2],而且需要安装 TDengine 的 Go Connector, 具体请参考[TDengine 连接器文档][3]。安装完成之后,执行以下命令即可编译成可执行文件 `bin/taosimport`
```shell
go get https://github.com/taosdata/TDengine/importSampleData
cd $GOPATH/src/github.com/taosdata/TDengine/importSampleData
go build -o bin/taosimport app/main.go
```
> 注:由于目前 TDengine 的 go connector 只支持 linux 环境,所以该工具暂时只能在 linux 系统中运行。
> 如果 go get 失败可以下载之后复制 `github.com/taosdata/TDengine/importSampleData` 文件夹到 $GOPATH 的 src 目录下再执行 `go build -o bin/taosimport app/main.go`
## 使用
### 快速体验
执行命令 `bin/taosimport` 会根据默认配置执行以下操作:
1. 创建数据库
自动创建名称为 `test_yyyyMMdd` 的数据库。
2. 创建超级表
根据配置文件 `config/cfg.toml` 中指定的 `sensor_info` 场景信息创建相应的超级表。
> 建表语句: create table s_sensor_info(ts timestamp, temperature int, humidity float) tags(location binary(20), color binary(16), devgroup int);
3. 自动建立子表并插入数据
根据配置文件 `config/cfg.toml``sensor_info` 场景指定的 `data/sensor_info.csv` 样例数据进行横向扩展 `100` 倍(可通过 hnum 参数指定),即自动创建 `10*100=1000` 张子表(默认样例数据中有 10 张子表,每张表 100 条数据),启动 `10` 个线程(可通过 thread 参数指定)对每张子表循环导入 `1000` 次(可通过 vnum 参数指定)。
进入 `taos shell`,可运行如下查询验证:
* 查询记录数
```shell
taos> use test_yyyyMMdd;
taos> select count(*) from s_sensor_info;
```
* 查询各个分组的记录数
```shell
taos> select count(*) from s_sensor_info group by devgroup;
```
* 按 1h 间隔查询各聚合指标
```shell
taos> select count(temperature), sum(temperature), avg(temperature) from s_sensor_info interval(1h);
```
* 查询指定位置最新上传指标
```shell
taos> select last(*) from s_sensor_info where location = 'beijing';
```
> 更多查询及函数使用请参考 [数据查询][4]
### 详细使用说明
执行命令 `bin/taosimport -h` 可以查看详细参数使用说明:
* -cfg string
导入配置文件路径,包含样例数据文件相关描述及对应 TDengine 配置信息。默认使用 `config/cfg.toml`
* -cases string
需要导入的场景名称,该名称可从 -cfg 指定的配置文件中 `[usecase]` 查看,可同时导入多个场景,中间使用逗号分隔,如:`sensor_info,camera_detection`,默认为 `sensor_info`
* -hnum int
需要将样例数据进行横向扩展的倍数,假设原有样例数据包含 1 张子表 `t_0` 数据,指定 hnum 为 2 时会根据原有表名创建 `t_0、t_1` 两张子表。默认为 100。
* -vnum int
需要将样例数据进行纵向扩展的次数,如果设置为 0 代表将历史数据导入至当前时间后持续按照指定间隔导入。默认为 1000表示将样例数据在时间轴上纵向复制1000 次。
* -delay int
当 vnum 设置为 0 时持续导入的时间间隔,默认为所有场景中最小记录间隔时间的一半,单位 ms。
* -tick int
打印统计信息的时间间隔,默认 2000 ms。
* -save int
是否保存统计信息到 tdengine 的 statistic 表中1 是0 否, 默认 0。
* -savetb int
当 save 为 1 时保存统计信息的表名, 默认 statistic。
* -auto int
是否自动生成样例数据中的主键时间戳1 是0 否, 默认 0。
* -start string
导入的记录开始时间,格式为 `"yyyy-MM-dd HH:mm:ss.SSS"`,不设置会使用样例数据中最小时间,设置后会忽略样例数据中的主键时间,会按照指定的 start 进行导入。如果 auto 为 1则必须设置 start默认为空。
* -interval int
导入的记录时间间隔,该设置只会在指定 `auto=1` 之后生效,否则会根据样例数据自动计算间隔时间。单位为毫秒,默认 1000。
* -thread int
执行导入数据的线程数目,默认为 10。
* -batch int
执行导入数据时的批量大小,默认为 100。批量是指一次写操作时包含多少条记录。
* -host string
导入的 TDengine 服务器 IP默认为 127.0.0.1。
* -port int
导入的 TDengine 服务器端口,默认为 6030。
* -user string
导入的 TDengine 用户名,默认为 root。
* -password string
导入的 TDengine 用户密码,默认为 taosdata。
* -dropdb int
导入数据之前是否删除数据库1 是0 否, 默认 0。
* -db string
导入的 TDengine 数据库名称,默认为 test_yyyyMMdd。
* -dbparam string
当指定的数据库不存在时,自动创建数据库时可选项配置参数,如 `days 10 cache 16000 ablocks 4`,默认为空。
### 常见使用示例
* `bin/taosimport -cfg config/cfg.toml -cases sensor_info,camera_detection -hnum 1 -vnum 10`
执行上述命令后会将 sensor_info、camera_detection 两个场景的数据各导入 10 次。
* `bin/taosimport -cfg config/cfg.toml -cases sensor_info -hnum 2 -vnum 0 -start "2019-12-12 00:00:00.000" -interval 5000`
执行上述命令后会将 sensor_info 场景的数据横向扩展2倍从指定时间 `2019-12-12 00:00:00.000` 开始且记录间隔时间为 5000 毫秒开始导入,导入至当前时间后会自动持续导入。
### config/cfg.toml 配置文件说明
``` toml
# 传感器场景
[sensor_info] # 场景名称
format = "csv" # 样例数据文件格式,可以是 json 或 csv具体字段应至少包含 subTableName、tags、fields 指定的字段。
filePath = "data/sensor_info.csv" # 样例数据文件路径,程序会循环使用该文件数据
separator = "," # csv 样例文件中字段分隔符,默认逗号
stname = "sensor_info" # 超级表名称
subTableName = "devid" # 使用样例数据中指定字段当作子表名称一部分,子表名称格式为 t_subTableName_stname扩展表名为 t_subTableName_stname_i。
timestamp = "ts" # 使用 fields 中哪个字段当作主键,类型必须为 timestamp
timestampType="millisecond" # 样例数据中主键时间字段是 millisecond 还是 dateTime 格式
#timestampTypeFormat = "2006-01-02 15:04:05.000" # 主键日期时间格式timestampType 为 dateTime 时需要指定
tags = [
# 标签列表name 为标签名称type 为标签类型
{ name = "location", type = "binary(20)" },
{ name = "color", type = "binary(16)" },
{ name = "devgroup", type = "int" },
]
fields = [
# 字段列表name 为字段名称type 为字段类型
{ name = "ts", type = "timestamp" },
{ name = "temperature", type = "int" },
{ name = "humidity", type = "float" },
]
# 摄像头检测场景
[camera_detection] # 场景名称
format = "json" # 样例数据文件格式,可以是 json 或 csv具体字段应至少包含 subTableName、tags、fields 指定的字段。
filePath = "data/camera_detection.json" # 样例数据文件路径,程序会循环使用该文件数据
#separator = "," # csv 样例文件中字段分隔符,默认逗号, 如果是 json 文件可以不用配置
stname = "camera_detection" # 超级表名称
subTableName = "sensor_id" # 使用样例数据中指定字段当作子表名称一部分,子表名称格式为 t_subTableName_stname扩展表名为 t_subTableName_stname_i。
timestamp = "ts" # 使用 fields 中哪个字段当作主键,类型必须为 timestamp
timestampType="dateTime" # 样例数据中主键时间字段是 millisecond 还是 dateTime 格式
timestampTypeFormat = "2006-01-02 15:04:05.000" # 主键日期时间格式timestampType 为 dateTime 时需要指定
tags = [
# 标签列表name 为标签名称type 为标签类型
{ name = "home_id", type = "binary(30)" },
{ name = "object_type", type = "int" },
{ name = "object_kind", type = "binary(20)" },
]
fields = [
# 字段列表name 为字段名称type 为字段类型
{ name = "ts", type = "timestamp" },
{ name = "states", type = "tinyint" },
{ name = "battery_voltage", type = "float" },
]
# other cases
```
### 样例数据格式说明
#### json
当配置文件 `config/cfg.toml` 中各场景的 format="json" 时,样例数据文件需要提供 tags 和 fields 字段列表中的字段值。样例数据格式如下:
```json
{"home_id": "603", "sensor_id": "s100", "ts": "2019-01-01 00:00:00.000", "object_type": 1, "object_kind": "night", "battery_voltage": 0.8, "states": 1}
{"home_id": "604", "sensor_id": "s200", "ts": "2019-01-01 00:00:00.000", "object_type": 2, "object_kind": "day", "battery_voltage": 0.6, "states": 0}
```
#### csv
当配置文件 `config/cfg.toml` 中各场景的 format="csv" 时,样例数据文件需要提供表头和对应的数据,其中字段分隔符由使用场景中 `separator` 指定,默认逗号。具体格式如下:
```csv
devid,location,color,devgroup,ts,temperature,humidity
0, beijing, white, 0, 1575129600000, 16, 19.405091
0, beijing, white, 0, 1575129601000, 22, 14.377142
```
[1]: https://github.com/taosdata/TDengine
[2]: https://golang.org/doc/install
[3]: https://www.taosdata.com/cn/documentation/connector/#Go-Connector
[4]: https://www.taosdata.com/cn/documentation/taos-sql/#%E6%95%B0%E6%8D%AE%E6%9F%A5%E8%AF%A2

1080
importSampleData/app/main.go Normal file

File diff suppressed because it is too large Load Diff

BIN
importSampleData/bin/taosimport Executable file

Binary file not shown.

View File

@ -0,0 +1,51 @@
# 传感器场景
[sensor_info] # 场景名称
format = "csv" # 样例数据文件格式,可以是 json 或 csv具体字段应至少包含 subTableName、tags、fields 指定的字段。
filePath = "data/sensor_info.csv" # 样例数据文件路径,程序会循环使用该文件数据
separator = "," # csv 样例文件中字段分隔符,默认逗号
stname = "sensor_info" # 超级表名称
subTableName = "devid" # 使用样例数据中指定字段当作子表名称一部分,子表名称格式为 t_subTableName_stname扩展表名为 t_subTableName_stname_i。
timestamp = "ts" # 使用 fields 中哪个字段当作主键,类型必须为 timestamp
timestampType="millisecond" # 样例数据中主键时间字段是 millisecond 还是 dateTime 格式
#timestampTypeFormat = "2006-01-02 15:04:05.000" # 主键日期时间格式timestampType 为 dateTime 时需要指定
tags = [
# 标签列表name 为标签名称type 为标签类型
{ name = "location", type = "binary(20)" },
{ name = "color", type = "binary(16)" },
{ name = "devgroup", type = "int" },
]
fields = [
# 字段列表name 为字段名称type 为字段类型
{ name = "ts", type = "timestamp" },
{ name = "temperature", type = "int" },
{ name = "humidity", type = "float" },
]
# 摄像头检测场景
[camera_detection] # 场景名称
format = "json" # 样例数据文件格式,可以是 json 或 csv具体字段应至少包含 subTableName、tags、fields 指定的字段。
filePath = "data/camera_detection.json" # 样例数据文件路径,程序会循环使用该文件数据
#separator = "," # csv 样例文件中字段分隔符,默认逗号, 如果是 json 文件可以不用配置
stname = "camera_detection" # 超级表名称
subTableName = "sensor_id" # 使用样例数据中指定字段当作子表名称一部分,子表名称格式为 t_subTableName_stname扩展表名为 t_subTableName_stname_i。
timestamp = "ts" # 使用 fields 中哪个字段当作主键,类型必须为 timestamp
timestampType="dateTime" # 样例数据中主键时间字段是 millisecond 还是 dateTime 格式
timestampTypeFormat = "2006-01-02 15:04:05.000" # 主键日期时间格式timestampType 为 dateTime 时需要指定
tags = [
# 标签列表name 为标签名称type 为标签类型
{ name = "home_id", type = "binary(30)" },
{ name = "object_type", type = "int" },
{ name = "object_kind", type = "binary(20)" },
]
fields = [
# 字段列表name 为字段名称type 为字段类型
{ name = "ts", type = "timestamp" },
{ name = "states", type = "tinyint" },
{ name = "battery_voltage", type = "float" },
]
# other case

View File

@ -0,0 +1,380 @@
{
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": "-- Grafana --",
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"type": "dashboard"
}
]
},
"editable": true,
"gnetId": null,
"graphTooltip": 0,
"id": 7,
"links": [],
"panels": [
{
"cacheTimeout": null,
"colorBackground": false,
"colorValue": true,
"colors": [
"#299c46",
"rgba(237, 129, 40, 0.89)",
"#d44a3a"
],
"datasource": null,
"format": "celsius",
"gauge": {
"maxValue": 100,
"minValue": 0,
"show": false,
"thresholdLabels": false,
"thresholdMarkers": true
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 0
},
"id": 6,
"interval": null,
"links": [],
"mappingType": 1,
"mappingTypes": [
{
"name": "value to text",
"value": 1
},
{
"name": "range to text",
"value": 2
}
],
"maxDataPoints": 100,
"nullPointMode": "connected",
"nullText": null,
"options": {},
"postfix": "",
"postfixFontSize": "50%",
"prefix": "",
"prefixFontSize": "50%",
"rangeMaps": [
{
"from": "null",
"text": "N/A",
"to": "null"
}
],
"sparkline": {
"fillColor": "rgba(31, 118, 189, 0.18)",
"full": false,
"lineColor": "rgb(31, 120, 193)",
"show": true,
"ymax": null,
"ymin": null
},
"tableColumn": "",
"targets": [
{
"alias": "lastest_temperature",
"refId": "A",
"sql": "select ts, temp from test.stream_temp_last where ts >= $from and ts < $to",
"target": "select metric",
"type": "timeserie"
}
],
"thresholds": "20,30",
"timeFrom": null,
"timeShift": null,
"title": "最新温度",
"type": "singlestat",
"valueFontSize": "80%",
"valueMaps": [
{
"op": "=",
"text": "N/A",
"value": "null"
}
],
"valueName": "current"
},
{
"datasource": null,
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 0
},
"id": 8,
"options": {
"fieldOptions": {
"calcs": [
"last"
],
"defaults": {
"decimals": 2,
"mappings": [],
"max": 100,
"min": 0,
"thresholds": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
],
"title": ""
},
"override": {},
"values": false
},
"orientation": "auto",
"showThresholdLabels": false,
"showThresholdMarkers": true
},
"pluginVersion": "6.4.3",
"targets": [
{
"alias": "maxHumidity",
"refId": "A",
"sql": "select ts, humidity from test.stream_humidity_max where ts >= $from and ts < $to",
"target": "select metric",
"type": "timeserie"
}
],
"timeFrom": null,
"timeShift": null,
"title": "最大湿度",
"type": "gauge"
},
{
"aliasColors": {},
"bars": true,
"dashLength": 10,
"dashes": false,
"datasource": null,
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 10,
"w": 12,
"x": 0,
"y": 8
},
"id": 4,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": false,
"linewidth": 1,
"nullPointMode": "null",
"options": {
"dataLinks": []
},
"percentage": false,
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"alias": "avgTemperature",
"refId": "A",
"sql": "select ts, temp from test.stream_temp_avg where ts >= $from and ts < $to",
"target": "select metric",
"type": "timeserie"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "平均温度",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "celsius",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": null,
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 10,
"w": 12,
"x": 12,
"y": 8
},
"id": 10,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"nullPointMode": "null",
"options": {
"dataLinks": []
},
"percentage": false,
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"alias": "max",
"refId": "A",
"sql": "select ts, max_temp from test.stream_sensor where ts >= $from and ts < $to",
"target": "select metric",
"type": "timeserie"
},
{
"alias": "avg",
"refId": "B",
"sql": "select ts, avg_temp from test.stream_sensor where ts >= $from and ts < $to",
"target": "select metric",
"type": "timeserie"
},
{
"alias": "min",
"refId": "C",
"sql": "select ts, min_temp from test.stream_sensor where ts >= $from and ts < $to",
"target": "select metric",
"type": "timeserie"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "某传感器",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "celsius",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
}
],
"refresh": "5s",
"schemaVersion": 20,
"style": "dark",
"tags": [],
"templating": {
"list": []
},
"time": {
"from": "now-5m",
"to": "now"
},
"timepicker": {
"refresh_intervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
]
},
"timezone": "",
"title": "sensor_info",
"uid": "dGSoaTLWz",
"version": 2
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,66 @@
package dataimport
import (
"encoding/json"
"fmt"
"path/filepath"
"sync"
"github.com/pelletier/go-toml"
)
var (
cfg Config
once sync.Once
)
// Config inclue all scene import config
type Config struct {
UserCases map[string]CaseConfig
}
// CaseConfig include the sample data config and tdengine config
type CaseConfig struct {
Format string
FilePath string
Separator string
Stname string
SubTableName string
Timestamp string
TimestampType string
TimestampTypeFormat string
Tags []FieldInfo
Fields []FieldInfo
}
// FieldInfo is field or tag info
type FieldInfo struct {
Name string
Type string
}
// LoadConfig will load the specified file config
func LoadConfig(filePath string) Config {
once.Do(func() {
filePath, err := filepath.Abs(filePath)
if err != nil {
panic(err)
}
fmt.Printf("parse toml file once. filePath: %s\n", filePath)
tree, err := toml.LoadFile(filePath)
if err != nil {
panic(err)
}
bytes, err := json.Marshal(tree.ToMap())
if err != nil {
panic(err)
}
err = json.Unmarshal(bytes, &cfg.UserCases)
if err != nil {
panic(err)
}
})
return cfg
}

View File

@ -5,14 +5,39 @@
# #
########################################################
# master IP for TDengine system
# masterIp 127.0.0.1
# Internal IP address of the server, which can be acquired by using ifconfig command.
# internalIp 127.0.0.1
# second IP for TDengine system, for cluster version only
# secondIp 127.0.0.1
# IP address of the server
# privateIp 127.0.0.1
# public IP of server, on which the tdengine are deployed
# this IP is assigned by cloud service provider, for cluster version only
# publicIp 127.0.0.1
# network is bound to 0.0.0.0
# anyIp 1
# set socket type ("udp" and "tcp")
# the server and client should have the same socket type. Otherwise, connect will fail
# sockettype udp
# client local IP
# localIp 127.0.0.1
# data file's directory
# for the cluster version, data file's directory is configured this way
# option mount_path tier_level
# dataDir /mnt/disk1/taos 0
# dataDir /mnt/disk2/taos 0
# dataDir /mnt/disk3/taos 0
# dataDir /mnt/disk4/taos 0
# dataDir /mnt/disk5/taos 0
# dataDir /mnt/disk6/taos 1
# dataDir /mnt/disk7/taos 1
# for the stand-alone version, data file's directory is configured this way
# dataDir /var/lib/taos
# log file's directory
@ -27,6 +52,18 @@
# port for DNode connect to Client, default udp[6035-6039] tcp[6035]
# vnodeShellPort 6035
# port for MNode connect to VNode, default udp[6040-6044] tcp[6040], for cluster version only
# mgmtVnodePort 6040
# port for DNode connect to DNode, default tcp[6045], for cluster version only
# vnodeVnodePort 6045
# port for MNode connect to MNode, default udp[6050], for cluster version only
# mgmtMgmtPort 6050
# port sync file MNode and MNode, default tcp[6050], for cluster version only
# mgmtSyncPort 6050
# number of threads per CPU core
# numOfThreadsPerCore 1
@ -54,11 +91,7 @@
# interval of system monitor
# monitorInterval 60
# set socket type("udp" and "tcp").
# The server and client should have the same socket type. Otherwise, connect will fail.
# sockettype udp
# The compressed rpc message, option:
# the compressed rpc message, option:
# -1 (no compression)
# 0 (all message compressed),
# > 0 (rpc message body which larger than this value will be compressed)
@ -73,12 +106,18 @@
# commit intervalunit is second
# ctime 3600
# interval of DNode report status to MNode, unit is Second
# interval of DNode report status to MNode, unit is Second, for cluster version only
# statusInterval 1
# interval of Shell send HB to MNode, unit is Second
# shellActivityTimer 3
# interval of DNode send HB to DNode, unit is Second, for cluster version only
# vnodePeerHBTimer 1
# interval of MNode send HB to MNode, unit is Second, for cluster version only
# mgmtPeerHBTimer 1
# time to keep MeterMeta in Cache, seconds
# meterMetaKeepTimer 7200
@ -94,12 +133,21 @@
# max number of tables
# maxTables 650000
# max number of Dnodes, for cluster version only
# maxDnodes 1000
# Max number of VGroups, for cluster version only
# maxVGroups 1000
# system locale
# locale en_US.UTF-8
# default system charset
# charset UTF-8
# system time zone
# timezone Asia/Shanghai (CST, +0800)
# enable/disable commit log
# clog 1
@ -115,6 +163,9 @@
# number of days to keep DB file
# keep 3650
# number of replications, for cluster version only
# replications 1
# client default database(database should be created)
# defaultDB
@ -136,18 +187,30 @@
# max connection to Vnode
# maxVnodeConnections 10000
# start http service in the cluster
# mnode take into account while balance, for cluster version only
# mgmtEqualVnodeNum 4
# number of seconds allowed for a dnode to be offline, for cluster version only
# offlineThreshold 864000
# start http service
# http 1
# start system monitor module in the cluster
# start system monitor module
# monitor 1
# maximum number of rows returned by the restful interface
# restfulRowLimit 10240
# number of threads used to process http requests
# httpMaxThreads 2
# pre-allocated number of http sessions
# httpCacheSessions 100
# whether to enable HTTP compression transmission
# httpEnableCompress 0
# whether the telegraf table name contains the number of tags and the number of fields
# telegrafUseFieldNum 0

View File

@ -1,15 +1,20 @@
#!/bin/bash
#
# Generate deb package for ubuntu
# set -x
set -e
#set -x
#curr_dir=$(pwd)
compile_dir=$1
output_dir=$2
tdengine_ver=$3
cpuType=$4
osType=$5
verMode=$6
verType=$7
script_dir="$(dirname $(readlink -f $0))"
top_dir="$(readlink -m ${script_dir}/../..)"
top_dir="$(readlink -f ${script_dir}/../..)"
pkg_dir="${top_dir}/debworkroom"
#echo "curr_dir: ${curr_dir}"
@ -63,7 +68,25 @@ debver="Version: "$tdengine_ver
sed -i "2c$debver" ${pkg_dir}/DEBIAN/control
#get taos version, then set deb name
debname="TDengine-"${tdengine_ver}".deb"
if [ "$verMode" == "cluster" ]; then
debname="TDengine-server-"${tdengine_ver}-${osType}-${cpuType}
elif [ "$verMode" == "lite" ]; then
debname="TDengine-server-edge"-${tdengine_ver}-${osType}-${cpuType}
else
echo "unknow verMode, nor cluster or lite"
exit 1
fi
if [ "$verType" == "beta" ]; then
debname=${debname}-${verType}".deb"
elif [ "$verType" == "stable" ]; then
debname=${debname}".deb"
else
echo "unknow verType, nor stabel or beta"
exit 1
fi
# make deb package
dpkg -b ${pkg_dir} $debname

View File

@ -5,11 +5,49 @@
set -e
# set -x
armver=$1
# releash.sh -v [cluster | lite] -c [aarch32 | aarch64 | x64 | x86 | mips64 ...] -o [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | ...] -V [stable | beta]
# set parameters by default value
verMode=lite # [cluster, lite]
verType=stable # [stable, beta]
cpuType=x64 # [aarch32 | aarch64 | x64 | x86 | mips64 ...]
osType=Linux # [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | ...]
while getopts "hv:V:c:o:" arg
do
case $arg in
v)
#echo "verMode=$OPTARG"
verMode=$( echo $OPTARG )
;;
V)
#echo "verType=$OPTARG"
verType=$(echo $OPTARG)
;;
c)
#echo "cpuType=$OPTARG"
cpuType=$(echo $OPTARG)
;;
o)
#echo "osType=$OPTARG"
osType=$(echo $OPTARG)
;;
h)
echo "Usage: `basename $0` -v [cluster | lite] -c [aarch32 | aarch64 | x64 | x86 | mips64 ...] -o [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | ...] -V [stable | beta]"
exit 0
;;
?) #unknow option
echo "unkonw argument"
exit 1
;;
esac
done
echo "verMode=${verMode} verType=${verType} cpuType=${cpuType} osType=${osType}"
curr_dir=$(pwd)
script_dir="$(dirname $(readlink -f $0))"
top_dir="$(readlink -m ${script_dir}/..)"
top_dir="$(readlink -f ${script_dir}/..)"
versioninfo="${top_dir}/src/util/src/version.c"
csudo=""
@ -111,9 +149,19 @@ echo "char compatible_version[64] = \"${compatible_version}\";" >> ${versioninfo
echo "char gitinfo[128] = \"$(git rev-parse --verify HEAD)\";" >> ${versioninfo}
echo "char gitinfoOfInternal[128] = \"\";" >> ${versioninfo}
echo "char buildinfo[512] = \"Built by ${USER} at ${build_time}\";" >> ${versioninfo}
echo "" >> ${versioninfo}
tmp_version=$(echo $version | tr -s "." "_")
if [ "$verMode" == "cluster" ]; then
libtaos_info=${tmp_version}_${osType}_${cpuType}
else
libtaos_info=edge_${tmp_version}_${osType}_${cpuType}
fi
if [ "$verType" == "beta" ]; then
libtaos_info=${libtaos_info}_${verType}
fi
echo "void libtaos_${libtaos_info}() {};" >> ${versioninfo}
# 2. cmake executable file
compile_dir="${top_dir}/debug"
if [ -d ${compile_dir} ]; then
${csudo} rm -rf ${compile_dir}
@ -122,16 +170,12 @@ fi
${csudo} mkdir -p ${compile_dir}
cd ${compile_dir}
# arm only support lite ver
if [ -z "$armver" ]; then
cmake ../
elif [ "$armver" == "arm64" ]; then
cmake ../ -DARMVER=arm64
elif [ "$armver" == "arm32" ]; then
cmake ../ -DARMVER=arm32
# check support cpu type
if [[ "$cpuType" == "x64" ]] || [[ "$cpuType" == "aarch64" ]] || [[ "$cpuType" == "aarch32" ]] || [[ "$cpuType" == "mips64" ]] ; then
cmake ../ -DCPUTYPE=${cpuType}
else
echo "input parameter error!!!"
return
echo "input cpuType=${cpuType} error!!!"
exit 1
fi
make
@ -143,28 +187,28 @@ cd ${curr_dir}
#osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2)
#echo "osinfo: ${osinfo}"
echo "do deb package for the ubuntu system"
echo "====do deb package for the ubuntu system===="
output_dir="${top_dir}/debs"
if [ -d ${output_dir} ]; then
${csudo} rm -rf ${output_dir}
fi
${csudo} mkdir -p ${output_dir}
cd ${script_dir}/deb
${csudo} ./makedeb.sh ${compile_dir} ${output_dir} ${version}
${csudo} ./makedeb.sh ${compile_dir} ${output_dir} ${version} ${cpuType} ${osType} ${verMode} ${verType}
echo "do rpm package for the centos system"
echo "====do rpm package for the centos system===="
output_dir="${top_dir}/rpms"
if [ -d ${output_dir} ]; then
${csudo} rm -rf ${output_dir}
fi
${csudo} mkdir -p ${output_dir}
cd ${script_dir}/rpm
${csudo} ./makerpm.sh ${compile_dir} ${output_dir} ${version}
${csudo} ./makerpm.sh ${compile_dir} ${output_dir} ${version} ${cpuType} ${osType} ${verMode} ${verType}
echo "do tar.gz package for all systems"
echo "====do tar.gz package for all systems===="
cd ${script_dir}/tools
${csudo} ./makepkg.sh ${compile_dir} ${version} "${build_time}" ${armver}
${csudo} ./makeclient.sh ${compile_dir} ${version} "${build_time}" ${armver}
${csudo} ./makepkg.sh ${compile_dir} ${version} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType}
${csudo} ./makeclient.sh ${compile_dir} ${version} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType}
# 4. Clean up temporary compile directories
#${csudo} rm -rf ${compile_dir}

View File

@ -9,9 +9,13 @@
compile_dir=$1
output_dir=$2
tdengine_ver=$3
cpuType=$4
osType=$5
verMode=$6
verType=$7
script_dir="$(dirname $(readlink -f $0))"
top_dir="$(readlink -m ${script_dir}/../..)"
top_dir="$(readlink -f ${script_dir}/../..)"
pkg_dir="${top_dir}/rpmworkroom"
spec_file="${script_dir}/tdengine.spec"
@ -54,9 +58,30 @@ ${csudo} mkdir -p BUILD BUILDROOT RPMS SOURCES SPECS SRPMS
${csudo} rpmbuild --define="_version ${tdengine_ver}" --define="_topdir ${pkg_dir}" --define="_compiledir ${compile_dir}" -bb ${spec_file}
# copy rpm package to output_dir, then clean temp dir
# copy rpm package to output_dir, and modify package name, then clean temp dir
#${csudo} cp -rf RPMS/* ${output_dir}
cp_rpm_package ${pkg_dir}/RPMS
if [ "$verMode" == "cluster" ]; then
rpmname="TDengine-server-"${tdengine_ver}-${osType}-${cpuType}
elif [ "$verMode" == "lite" ]; then
rpmname="TDengine-server-edge"-${tdengine_ver}-${osType}-${cpuType}
else
echo "unknow verMode, nor cluster or lite"
exit 1
fi
if [ "$verType" == "beta" ]; then
rpmname=${rpmname}-${verType}".rpm"
elif [ "$verType" == "stable" ]; then
rpmname=${rpmname}".rpm"
else
echo "unknow verType, nor stabel or beta"
exit 1
fi
mv ${output_dir}/TDengine-${tdengine_ver}.rpm ${output_dir}/${rpmname}
cd ..
${csudo} rm -rf ${pkg_dir}

14
packaging/tools/get_os.sh Executable file
View File

@ -0,0 +1,14 @@
#!/bin/bash
#
# This file is used to install TAOS time-series database on linux systems. The operating system
# is required to use systemd to manage services at boot
set -e
# set -x
# -----------------------Variables definition---------------------
OS=$(cat /etc/*-release | grep "^NAME=" | cut -d= -f2)
len=$(echo ${#OS})
len=$((len-2))
retval=$(echo -ne ${OS:1:${len}} | cut -d" " -f1)
echo -ne $retval

View File

@ -7,7 +7,7 @@ set -e
#set -x
# -----------------------Variables definition---------------------
script_dir=$(dirname $(readlink -m "$0"))
script_dir=$(dirname $(readlink -f "$0"))
# Dynamic directory
data_dir="/var/lib/taos"
log_dir="/var/log/taos"

View File

@ -7,7 +7,7 @@ set -e
#set -x
# -----------------------Variables definition---------------------
script_dir=$(dirname $(readlink -m "$0"))
script_dir=$(dirname $(readlink -f "$0"))
# Dynamic directory
data_dir="/var/lib/taos"
log_dir="/var/log/taos"

View File

@ -9,7 +9,7 @@ set -e
# -----------------------Variables definition---------------------
source_dir=$1
binary_dir=$2
script_dir=$(dirname $(readlink -m "$0"))
script_dir=$(dirname $(readlink -f "$0"))
# Dynamic directory
data_dir="/var/lib/taos"
log_dir="/var/log/taos"
@ -47,21 +47,54 @@ initd_mod=0
service_mod=2
if pidof systemd &> /dev/null; then
service_mod=0
elif $(which insserv &> /dev/null); then
elif $(which service &> /dev/null); then
service_mod=1
service_config_dir="/etc/init.d"
if $(which chkconfig &> /dev/null); then
initd_mod=1
service_config_dir="/etc/init.d"
elif $(which update-rc.d &> /dev/null); then
service_mod=1
elif $(which insserv &> /dev/null); then
initd_mod=2
service_config_dir="/etc/init.d"
elif $(which update-rc.d &> /dev/null); then
initd_mod=3
else
service_mod=2
fi
else
service_mod=2
fi
# get the operating system type for using the corresponding init file
# ubuntu/debian(deb), centos/fedora(rpm), others: opensuse, redhat, ..., no verification
#osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release)
osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2)
#echo "osinfo: ${osinfo}"
os_type=0
if echo $osinfo | grep -qwi "ubuntu" ; then
echo "this is ubuntu system"
os_type=1
elif echo $osinfo | grep -qwi "debian" ; then
echo "this is debian system"
os_type=1
elif echo $osinfo | grep -qwi "Kylin" ; then
echo "this is Kylin system"
os_type=1
elif echo $osinfo | grep -qwi "centos" ; then
echo "this is centos system"
os_type=2
elif echo $osinfo | grep -qwi "fedora" ; then
echo "this is fedora system"
os_type=2
else
echo "this is other linux system"
os_type=0
fi
function kill_taosd() {
pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}')
${csudo} kill -9 ${pid} || :
if [ -n "$pid" ]; then
${csudo} kill -9 $pid || :
fi
}
function install_main_path() {
@ -153,20 +186,26 @@ function install_examples() {
}
function clean_service_on_sysvinit() {
restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start"
#restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start"
#${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || :
if pidof taosd &> /dev/null; then
${csudo} service taosd stop || :
fi
${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || :
${csudo} rm -f ${service_config_dir}/taosd || :
if ((${initd_mod}==1)); then
${csudo} grep -q -F "taos" /etc/inittab && ${csudo} insserv -r taosd || :
${csudo} chkconfig --del taosd || :
elif ((${initd_mod}==2)); then
${csudo} grep -q -F "taos" /etc/inittab && ${csudo} update-rc.d -f taosd remove || :
${csudo} insserv -r taosd || :
elif ((${initd_mod}==3)); then
${csudo} update-rc.d -f taosd remove || :
fi
# ${csudo} update-rc.d -f taosd remove || :
${csudo} rm -f ${service_config_dir}/taosd || :
if $(which init &> /dev/null); then
${csudo} init q || :
fi
}
function install_service_on_sysvinit() {
@ -175,19 +214,26 @@ function install_service_on_sysvinit() {
sleep 1
# Install taosd service
if ((${os_type}==1)); then
${csudo} cp -f ${script_dir}/../deb/init.d/taosd ${install_main_dir}/init.d
${csudo} cp ${script_dir}/../deb/init.d/taosd ${service_config_dir} && ${csudo} chmod a+x ${service_config_dir}/taosd
restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start"
elif ((${os_type}==2)); then
${csudo} cp -f ${script_dir}/../rpm/init.d/taosd ${install_main_dir}/init.d
${csudo} cp ${script_dir}/../rpm/init.d/taosd ${service_config_dir} && ${csudo} chmod a+x ${service_config_dir}/taosd
fi
#restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start"
#${csudo} grep -q -F "$restart_config_str" /etc/inittab || ${csudo} bash -c "echo '${restart_config_str}' >> /etc/inittab"
${csudo} grep -q -F "$restart_config_str" /etc/inittab || ${csudo} bash -c "echo '${restart_config_str}' >> /etc/inittab"
# TODO: for centos, change here
if ((${initd_mod}==1)); then
${csudo} insserv taosd || :
${csudo} chkconfig --add taosd || :
${csudo} chkconfig --level 2345 taosd on || :
elif ((${initd_mod}==2)); then
${csudo} insserv taosd || :
${csudo} insserv -d taosd || :
elif ((${initd_mod}==3)); then
${csudo} update-rc.d taosd defaults || :
fi
# ${csudo} update-rc.d taosd defaults
# chkconfig mysqld on
}
function clean_service_on_systemd() {
@ -237,7 +283,7 @@ function install_service() {
elif ((${service_mod}==1)); then
install_service_on_sysvinit
else
# must manual start taosd
# must manual stop taosd
kill_taosd
fi
}

View File

@ -1,17 +1,20 @@
#!/bin/bash
#
# Generate tar.gz package for linux client
# Generate tar.gz package for linux client in all os system
set -e
set -x
#set -x
curr_dir=$(pwd)
compile_dir=$1
version=$2
build_time=$3
armver=$4
cpuType=$4
osType=$5
verMode=$6
verType=$7
script_dir="$(dirname $(readlink -f $0))"
top_dir="$(readlink -m ${script_dir}/../..)"
top_dir="$(readlink -f ${script_dir}/../..)"
# create compressed install file.
build_dir="${compile_dir}/build"
@ -19,7 +22,7 @@ code_dir="${top_dir}/src"
release_dir="${top_dir}/release"
#package_name='linux'
install_dir="${release_dir}/TDengine-client-${version}"
install_dir="${release_dir}/TDengine-client"
# Directories and files.
bin_files="${build_dir}/bin/taos ${build_dir}/bin/taosdump ${script_dir}/remove_client.sh"
@ -42,12 +45,13 @@ cp ${install_files} ${install_dir} && chmod a+x ${install_dir}/install*
# Copy example code
mkdir -p ${install_dir}/examples
cp -r ${top_dir}/tests/examples/c ${install_dir}/examples
cp -r ${top_dir}/tests/examples/JDBC ${install_dir}/examples
cp -r ${top_dir}/tests/examples/matlab ${install_dir}/examples
cp -r ${top_dir}/tests/examples/python ${install_dir}/examples
cp -r ${top_dir}/tests/examples/R ${install_dir}/examples
cp -r ${top_dir}/tests/examples/go ${install_dir}/examples
examples_dir="${top_dir}/tests/examples"
cp -r ${examples_dir}/c ${install_dir}/examples
cp -r ${examples_dir}/JDBC ${install_dir}/examples
cp -r ${examples_dir}/matlab ${install_dir}/examples
cp -r ${examples_dir}/python ${install_dir}/examples
cp -r ${examples_dir}/R ${install_dir}/examples
cp -r ${examples_dir}/go ${install_dir}/examples
# Copy driver
mkdir -p ${install_dir}/driver
@ -67,12 +71,25 @@ cp -r ${connector_dir}/go ${install_dir}/connector
# exit 1
cd ${release_dir}
if [ -z "$armver" ]; then
tar -zcv -f "$(basename ${install_dir}).tar.gz" $(basename ${install_dir}) --remove-files
elif [ "$armver" == "arm64" ]; then
tar -zcv -f "$(basename ${install_dir})-arm64.tar.gz" $(basename ${install_dir}) --remove-files
elif [ "$armver" == "arm32" ]; then
tar -zcv -f "$(basename ${install_dir})-arm32.tar.gz" $(basename ${install_dir}) --remove-files
if [ "$verMode" == "cluster" ]; then
pkg_name=${install_dir}-${version}-${osType}-${cpuType}
elif [ "$verMode" == "lite" ]; then
pkg_name=${install_dir}-edge-${version}-${osType}-${cpuType}
else
echo "unknow verMode, nor cluster or lite"
exit 1
fi
if [ "$verType" == "beta" ]; then
pkg_name=${pkg_name}-${verType}
elif [ "$verType" == "stable" ]; then
pkg_name=${pkg_name}
else
echo "unknow verType, nor stable or beta"
exit 1
fi
tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files
cd ${curr_dir}

View File

@ -1,15 +1,21 @@
#!/bin/bash
#
# Generate deb package for other os system (no unbutu or centos)
# Generate tar.gz package for all os system
set -e
#set -x
curr_dir=$(pwd)
compile_dir=$1
version=$2
build_time=$3
armver=$4
cpuType=$4
osType=$5
verMode=$6
verType=$7
script_dir="$(dirname $(readlink -f $0))"
top_dir="$(readlink -m ${script_dir}/../..)"
top_dir="$(readlink -f ${script_dir}/../..)"
# create compressed install file.
build_dir="${compile_dir}/build"
@ -17,7 +23,7 @@ code_dir="${top_dir}/src"
release_dir="${top_dir}/release"
#package_name='linux'
install_dir="${release_dir}/TDengine-${version}"
install_dir="${release_dir}/TDengine-server"
# Directories and files.
bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${build_dir}/bin/taosdemo ${build_dir}/bin/taosdump ${script_dir}/remove.sh"
@ -78,12 +84,25 @@ cp -r ${connector_dir}/go ${install_dir}/connector
# exit 1
cd ${release_dir}
if [ -z "$armver" ]; then
tar -zcv -f "$(basename ${install_dir}).tar.gz" $(basename ${install_dir}) --remove-files
elif [ "$armver" == "arm64" ]; then
tar -zcv -f "$(basename ${install_dir})-arm64.tar.gz" $(basename ${install_dir}) --remove-files
elif [ "$armver" == "arm32" ]; then
tar -zcv -f "$(basename ${install_dir})-arm32.tar.gz" $(basename ${install_dir}) --remove-files
if [ "$verMode" == "cluster" ]; then
pkg_name=${install_dir}-${version}-${osType}-${cpuType}
elif [ "$verMode" == "lite" ]; then
pkg_name=${install_dir}-edge-${version}-${osType}-${cpuType}
else
echo "unknow verMode, nor cluster or lite"
exit 1
fi
if [ "$verType" == "beta" ]; then
pkg_name=${pkg_name}-${verType}
elif [ "$verType" == "stable" ]; then
pkg_name=${pkg_name}
else
echo "unknow verType, nor stabel or beta"
exit 1
fi
tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files
cd ${curr_dir}

View File

@ -4,7 +4,7 @@
# is required to use systemd to manage services at boot
#set -x
# -----------------------Variables definition---------------------
script_dir=$(dirname $(readlink -m "$0"))
script_dir=$(dirname $(readlink -f "$0"))
# Dynamic directory
data_dir="/var/lib/taos"
log_dir="/var/log/taos"

View File

@ -17,7 +17,7 @@ done
declare -A dirHash
for linkFile in $(find -L $linkDir -xtype l); do
targetFile=$(readlink -m $linkFile)
targetFile=$(readlink -f $linkFile)
echo "targetFile: ${targetFile}"
# TODO : Extract directory part and basename part
dirName=$(dirname $(dirname ${targetFile}))

View File

@ -51,7 +51,9 @@ ELSEIF (TD_WINDOWS_64)
# generate dynamic library (*.dll)
ADD_LIBRARY(taos SHARED ${SRC})
IF (NOT TD_GODLL)
SET_TARGET_PROPERTIES(taos PROPERTIES LINK_FLAGS /DEF:${TD_COMMUNITY_DIR}/src/client/src/taos.def)
ENDIF ()
TARGET_LINK_LIBRARIES(taos trpc)
ELSEIF (TD_DARWIN_64)

View File

@ -94,7 +94,7 @@ typedef struct SRetrieveSupport {
tOrderDescriptor *pOrderDescriptor;
tColModel * pFinalColModel; // colModel for final result
SSubqueryState * pState;
int32_t vnodeIdx; // index of current vnode in vnode list
int32_t subqueryIndex; // index of current vnode in vnode list
SSqlObj * pParentSqlObj;
tFilePage * localBuffer; // temp buffer, there is a buffer for each vnode to
uint32_t numOfRetry; // record the number of retry times

View File

@ -23,14 +23,14 @@ extern "C" {
/*
* @date 2018/09/30
*/
#include <limits.h>
#include <stdio.h>
#include "os.h"
#include "textbuffer.h"
#include "tscSecondaryMerge.h"
#include "tsclient.h"
#include "tsdb.h"
#include "tscSecondaryMerge.h"
#define UTIL_METER_IS_METRIC(metaInfo) (((metaInfo)->pMeterMeta != NULL) && ((metaInfo)->pMeterMeta->meterType == TSDB_METER_METRIC))
#define UTIL_METER_IS_METRIC(metaInfo) \
(((metaInfo)->pMeterMeta != NULL) && ((metaInfo)->pMeterMeta->meterType == TSDB_METER_METRIC))
#define UTIL_METER_IS_NOMRAL_METER(metaInfo) (!(UTIL_METER_IS_METRIC(metaInfo)))
#define UTIL_METER_IS_CREATE_FROM_METRIC(metaInfo) \
(((metaInfo)->pMeterMeta != NULL) && ((metaInfo)->pMeterMeta->meterType == TSDB_METER_MTABLE))
@ -52,7 +52,6 @@ typedef struct SParsedDataColInfo {
typedef struct SJoinSubquerySupporter {
SSubqueryState* pState;
SSqlObj* pObj; // parent SqlObj
bool hasMore; // has data from vnode to fetch
int32_t subqueryIndex; // index of sub query
int64_t interval; // interval time
SLimitVal limit; // limit info
@ -62,26 +61,28 @@ typedef struct SJoinSubquerySupporter {
SFieldInfo fieldsInfo;
STagCond tagCond;
SSqlGroupbyExpr groupbyExpr;
struct STSBuf* pTSBuf;
FILE* f;
char path[PATH_MAX];
struct STSBuf* pTSBuf; // the TSBuf struct that holds the compressed timestamp array
FILE* f; // temporary file in order to create TSBuf
char path[PATH_MAX]; // temporary file path
} SJoinSubquerySupporter;
void tscDestroyDataBlock(STableDataBlocks* pDataBlock);
STableDataBlocks* tscCreateDataBlock(int32_t size);
int32_t tscCreateDataBlock(size_t initialSize, int32_t rowSize, int32_t startOffset, const char* name,
STableDataBlocks** dataBlocks);
void tscAppendDataBlock(SDataBlockList* pList, STableDataBlocks* pBlocks);
SParamInfo* tscAddParamToDataBlock(STableDataBlocks* pDataBlock, char type, uint8_t timePrec, short bytes, uint32_t offset);
void tscDestroyDataBlock(STableDataBlocks* pDataBlock);
SParamInfo* tscAddParamToDataBlock(STableDataBlocks* pDataBlock, char type, uint8_t timePrec, short bytes,
uint32_t offset);
SDataBlockList* tscCreateBlockArrayList();
void* tscDestroyBlockArrayList(SDataBlockList* pList);
int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock);
void tscFreeUnusedDataBlocks(SDataBlockList* pList);
int32_t tscMergeTableDataBlocks(SSqlObj* pSql, SDataBlockList* pDataList);
STableDataBlocks* tscGetDataBlockFromList(void* pHashList, SDataBlockList* pDataBlockList, int64_t id, int32_t size,
int32_t startOffset, int32_t rowSize, char* tableId);
STableDataBlocks* tscCreateDataBlockEx(size_t size, int32_t rowSize, int32_t startOffset, char* name);
int32_t tscGetDataBlockFromList(void* pHashList, SDataBlockList* pDataBlockList, int64_t id, int32_t size,
int32_t startOffset, int32_t rowSize, const char* tableId,
STableDataBlocks** dataBlocks);
SVnodeSidList* tscGetVnodeSidList(SMetricMeta* pMetricmeta, int32_t vnodeIdx);
SMeterSidExtInfo* tscGetMeterSidInfo(SVnodeSidList* pSidList, int32_t idx);
@ -97,6 +98,8 @@ SMeterSidExtInfo* tscGetMeterSidInfo(SVnodeSidList* pSidList, int32_t idx);
bool tscIsPointInterpQuery(SSqlCmd* pCmd);
bool tscIsTWAQuery(SSqlCmd* pCmd);
bool tscProjectionQueryOnMetric(SSqlCmd* pCmd);
bool tscProjectionQueryOnTable(SSqlCmd* pCmd);
bool tscIsTwoStageMergeMetricQuery(SSqlCmd* pCmd);
bool tscQueryOnMetric(SSqlCmd* pCmd);
bool tscQueryMetricTags(SSqlCmd* pCmd);
@ -107,14 +110,6 @@ void tscAddSpecialColumnForSelect(SSqlCmd* pCmd, int32_t outputColIndex, int16_t
void addRequiredTagColumn(SSqlCmd* pCmd, int32_t tagColIndex, int32_t tableIndex);
//TODO refactor, remove
void SStringFree(SString* str);
void SStringCopy(SString* pDest, const SString* pSrc);
SString SStringCreate(const char* str);
int32_t SStringAlloc(SString* pStr, int32_t size);
int32_t SStringEnsureRemain(SString* pStr, int32_t size);
int32_t setMeterID(SSqlObj* pSql, SSQLToken* pzTableName, int32_t tableIndex);
void tscClearInterpInfo(SSqlCmd* pCmd);
@ -175,7 +170,6 @@ void tsSetMetricQueryCond(STagCond* pTagCond, uint64_t uid, const char* str);
void tscTagCondCopy(STagCond* dest, const STagCond* src);
void tscTagCondRelease(STagCond* pCond);
void tscTagCondSetQueryCondType(STagCond* pCond, int16_t type);
void tscGetSrcColumnInfo(SSrcColumnInfo* pColInfo, SSqlCmd* pCmd);
@ -220,18 +214,20 @@ void tscDoQuery(SSqlObj* pSql);
* @param pPrevSql
* @return
*/
SSqlObj* createSubqueryObj(SSqlObj* pSql, int32_t vnodeIndex, int16_t tableIndex, void (*fp)(), void* param,
SSqlObj* pPrevSql);
SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void* param, SSqlObj* pPrevSql);
void addGroupInfoForSubquery(SSqlObj* pParentObj, SSqlObj* pSql, int32_t tableIndex);
void doAddGroupColumnForSubquery(SSqlCmd* pCmd, int32_t tagIndex);
int16_t tscGetJoinTagColIndexByUid(SSqlCmd* pCmd, uint64_t uid);
int16_t tscGetJoinTagColIndexByUid(STagCond* pTagCond, uint64_t uid);
TAOS* taos_connect_a(char* ip, char* user, char* pass, char* db, uint16_t port, void (*fp)(void*, TAOS_RES*, int),
void* param, void** taos);
void sortRemoveDuplicates(STableDataBlocks* dataBuf);
void tscPrintSelectClause(SSqlCmd* pCmd);
#ifdef __cplusplus
}
#endif

View File

@ -108,10 +108,8 @@ struct SSqlInfo;
typedef struct SSqlGroupbyExpr {
int16_t tableIndex;
int16_t numOfGroupCols;
SColIndexEx columnInfo[TSDB_MAX_TAGS]; // group by columns information
int16_t orderIndex; // order by column index
int16_t orderType; // order by type: asc/desc
} SSqlGroupbyExpr;
@ -120,7 +118,12 @@ typedef struct SMeterMetaInfo {
SMeterMeta * pMeterMeta; // metermeta
SMetricMeta *pMetricMeta; // metricmeta
char name[TSDB_METER_ID_LEN + 1];
/*
* 1. keep the vnode index during the multi-vnode super table projection query
* 2. keep the vnode index for multi-vnode insertion
*/
int32_t vnodeIndex;
char name[TSDB_METER_ID_LEN + 1]; // table(super table) name
int16_t numOfTags; // total required tags in query, including groupby tags
int16_t tagColumnIndex[TSDB_MAX_TAGS]; // clause + tag projection
} SMeterMetaInfo;
@ -188,7 +191,7 @@ typedef struct SString {
typedef struct SCond {
uint64_t uid;
SString cond;
char * cond;
} SCond;
typedef struct SJoinNode {
@ -228,17 +231,23 @@ typedef struct SParamInfo {
typedef struct STableDataBlocks {
char meterId[TSDB_METER_ID_LEN];
int8_t tsSource;
bool ordered;
int8_t tsSource; // where does the UNIX timestamp come from, server or client
bool ordered; // if current rows are ordered or not
int64_t vgid; // virtual group id
int64_t prevTS; // previous timestamp, recorded to decide if the records array is ts ascending
int32_t numOfMeters; // number of tables in current submit block
int64_t vgid;
int64_t prevTS;
int32_t numOfMeters;
int32_t rowSize;
int32_t rowSize; // row size for current table
uint32_t nAllocSize;
uint32_t headerSize; // header for metadata (submit metadata)
uint32_t size;
/*
* the metermeta for current table, the metermeta will be used during submit stage, keep a ref
* to avoid it to be removed from cache
*/
SMeterMeta* pMeterMeta;
union {
char *filename;
char *pData;
@ -252,8 +261,8 @@ typedef struct STableDataBlocks {
typedef struct SDataBlockList {
int32_t idx;
int32_t nSize;
int32_t nAlloc;
uint32_t nSize;
uint32_t nAlloc;
char * userParam; /* user assigned parameters for async query */
void * udfp; /* user defined function pointer, used in async model */
STableDataBlocks **pData;
@ -262,16 +271,17 @@ typedef struct SDataBlockList {
typedef struct {
SOrderVal order;
int command;
int count;// TODO refactor
int count; // TODO refactor
union {
bool existsCheck; // check if the table exists
int8_t showType; // show command type
};
int8_t isParseFinish;
int8_t isInsertFromFile; // load data from file or not
bool import; // import/insert type
char msgType;
uint8_t msgType;
uint16_t type; // query type
char intervalTimeUnit;
int64_t etime, stime;
@ -296,7 +306,6 @@ typedef struct {
SLimitVal slimit;
int64_t globalLimit;
STagCond tagCond;
int16_t vnodeIdx; // vnode index in pMetricMeta for metric query
int16_t interpoType; // interpolate type
int16_t numOfTables;
@ -376,16 +385,14 @@ typedef struct _sql_obj {
char * sqlstr;
char retry;
char maxRetry;
char index;
uint8_t index;
char freed : 4;
char listed : 4;
tsem_t rspSem;
tsem_t emptyRspSem;
SSqlCmd cmd;
SSqlRes res;
char numOfSubs;
uint16_t numOfSubs;
char* asyncTblPos;
void* pTableHashList;
struct _sql_obj **pSubs;
@ -479,6 +486,8 @@ void tscProcessMultiVnodesInsertForFile(SSqlObj *pSql);
void tscKillMetricQuery(SSqlObj *pSql);
void tscInitResObjForLocalQuery(SSqlObj *pObj, int32_t numOfRes, int32_t rowLen);
bool tscIsUpdateQuery(STscObj *pObj);
bool tscHasReachLimitation(SSqlObj* pSql);
int32_t tscInvalidSQLErrMsg(char *msg, const char *additionalInfo, const char *sql);
// transfer SSqlInfo to SqlCmd struct

View File

@ -239,7 +239,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeQueryImp(J
jbyteArray jsql, jlong con) {
TAOS *tscon = (TAOS *)con;
if (tscon == NULL) {
jniError("jobj:%p, connection is closed", jobj);
jniError("jobj:%p, connection is already closed", jobj);
return JNI_CONNECTION_NULL;
}
@ -252,6 +252,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeQueryImp(J
char *dst = (char *)calloc(1, sizeof(char) * (len + 1));
if (dst == NULL) {
jniError("jobj:%p, conn:%p, can not alloc memory", jobj, tscon);
return JNI_OUT_OF_MEMORY;
}
@ -260,9 +261,11 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeQueryImp(J
//todo handle error
}
jniTrace("jobj:%p, conn:%p, sql:%s", jobj, tscon, dst);
int code = taos_query(tscon, dst);
if (code != 0) {
jniError("jobj:%p, conn:%p, code:%d, msg:%s, sql:%s", jobj, tscon, code, taos_errstr(tscon), dst);
jniError("jobj:%p, conn:%p, code:%d, msg:%s", jobj, tscon, code, taos_errstr(tscon));
free(dst);
return JNI_TDENGINE_ERROR;
} else {
@ -271,9 +274,9 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeQueryImp(J
if (pSql->cmd.command == TSDB_SQL_INSERT) {
affectRows = taos_affected_rows(tscon);
jniTrace("jobj:%p, conn:%p, code:%d, affect rows:%d, sql:%s", jobj, tscon, code, affectRows, dst);
jniTrace("jobj:%p, conn:%p, code:%d, affect rows:%d", jobj, tscon, code, affectRows);
} else {
jniTrace("jobj:%p, conn:%p, code:%d, sql:%s", jobj, tscon, code, dst);
jniTrace("jobj:%p, conn:%p, code:%d", jobj, tscon, code);
}
free(dst);
@ -307,7 +310,7 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getResultSetImp(
if (tscIsUpdateQuery(tscon)) {
ret = 0; // for update query, no result pointer
jniTrace("jobj:%p, conn:%p, no result", jobj, tscon);
jniTrace("jobj:%p, conn:%p, no resultset", jobj, tscon);
} else {
ret = (jlong) taos_use_result(tscon);
jniTrace("jobj:%p, conn:%p, get resultset:%p", jobj, tscon, (void *) ret);
@ -463,11 +466,17 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchRowImp(JNIEn
case TSDB_DATA_TYPE_BIGINT:
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetLongFp, i, (jlong) * ((int64_t *)row[i]));
break;
case TSDB_DATA_TYPE_FLOAT:
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetFloatFp, i, (jfloat) * ((float *)row[i]));
case TSDB_DATA_TYPE_FLOAT: {
float fv = 0;
fv = GET_FLOAT_VAL(row[i]);
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetFloatFp, i, (jfloat)fv);
}
break;
case TSDB_DATA_TYPE_DOUBLE:
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetDoubleFp, i, (jdouble) * ((double *)row[i]));
case TSDB_DATA_TYPE_DOUBLE: {
double dv = 0;
dv = GET_DOUBLE_VAL(row[i]);
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetDoubleFp, i, (jdouble)dv);
}
break;
case TSDB_DATA_TYPE_BINARY: {
strncpy(tmp, row[i], (size_t) fields[i].bytes); // handle the case that terminated does not exist
@ -496,7 +505,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_closeConnectionIm
jlong con) {
TAOS *tscon = (TAOS *)con;
if (tscon == NULL) {
jniError("jobj:%p, connection is closed", jobj);
jniError("jobj:%p, connection is already closed", jobj);
return JNI_CONNECTION_NULL;
} else {
jniTrace("jobj:%p, conn:%p, close connection success", jobj, tscon);
@ -612,11 +621,17 @@ JNIEXPORT jobject JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_consumeImp(JNI
case TSDB_DATA_TYPE_BIGINT:
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetLongFp, i, (jlong) * ((int64_t *)row[i]));
break;
case TSDB_DATA_TYPE_FLOAT:
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetFloatFp, i, (jfloat) * ((float *)row[i]));
case TSDB_DATA_TYPE_FLOAT: {
float fv = 0;
fv = GET_FLOAT_VAL(row[i]);
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetFloatFp, i, (jfloat)fv);
}
break;
case TSDB_DATA_TYPE_DOUBLE:
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetDoubleFp, i, (jdouble) * ((double *)row[i]));
case TSDB_DATA_TYPE_DOUBLE:{
double dv = 0;
dv = GET_DOUBLE_VAL(row[i]);
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetDoubleFp, i, (jdouble)dv);
}
break;
case TSDB_DATA_TYPE_BINARY: {
strncpy(tmp, row[i], (size_t) fields[i].bytes); // handle the case that terminated does not exist

File diff suppressed because it is too large Load Diff

View File

@ -108,11 +108,11 @@ static tSQLSyntaxNode *tSQLSyntaxNodeCreate(SSchema *pSchema, int32_t numOfCols,
return NULL;
}
int32_t i = 0;
size_t nodeSize = sizeof(tSQLSyntaxNode);
tSQLSyntaxNode *pNode = NULL;
if (pToken->type == TK_ID || pToken->type == TK_TBNAME) {
int32_t i = 0;
if (pToken->type == TK_ID) {
do {
size_t len = strlen(pSchema[i].name);
@ -269,7 +269,7 @@ static tSQLSyntaxNode *createSyntaxTree(SSchema *pSchema, int32_t numOfCols, cha
// get the operator of expr
uint8_t optr = getBinaryExprOptr(&t0);
if (optr <= 0) {
if (optr == 0) {
pError("not support binary operator:%d", t0.type);
tSQLSyntaxNodeDestroy(pLeft, NULL);
return NULL;
@ -324,8 +324,9 @@ static tSQLSyntaxNode *createSyntaxTree(SSchema *pSchema, int32_t numOfCols, cha
return pn;
} else {
uint8_t localOptr = getBinaryExprOptr(&t0);
if (localOptr <= 0) {
if (localOptr == 0) {
pError("not support binary operator:%d", t0.type);
free(pBinExpr);
return NULL;
}
@ -418,6 +419,7 @@ void tSQLBinaryExprToString(tSQLBinaryExpr *pExpr, char *dst, int32_t *len) {
if (pExpr == NULL) {
*dst = 0;
*len = 0;
return;
}
int32_t lhs = tSQLBinaryExprToStringImpl(pExpr->pLeft, dst, pExpr->pLeft->nodeType);
@ -490,12 +492,12 @@ static void setInitialValueForRangeQueryCondition(tSKipListQueryCond *q, int8_t
case TSDB_DATA_TYPE_NCHAR:
case TSDB_DATA_TYPE_BINARY: {
q->upperBnd.nType = type;
q->upperBnd.pz = "\0";
q->upperBnd.pz = NULL;
q->upperBnd.nLen = -1;
q->lowerBnd.nType = type;
q->lowerBnd.pz = "\0";
q->lowerBnd.nLen = 0;
q->lowerBnd.pz = NULL;
q->lowerBnd.nLen = -1;
}
}
}
@ -641,16 +643,15 @@ int32_t intersect(tQueryResultset *pLeft, tQueryResultset *pRight, tQueryResults
}
/*
*
* traverse the result and apply the function to each item to check if the item is qualified or not
*/
void tSQLListTraverseOnResult(struct tSQLBinaryExpr *pExpr, bool (*fp)(tSkipListNode *, void *),
tQueryResultset * pResult) {
static void tSQLListTraverseOnResult(struct tSQLBinaryExpr *pExpr, __result_filter_fn_t fp, tQueryResultset *pResult) {
assert(pExpr->pLeft->nodeType == TSQL_NODE_COL && pExpr->pRight->nodeType == TSQL_NODE_VALUE);
// brutal force search
// brutal force scan the result list and check for each item in the list
int64_t num = pResult->num;
for (int32_t i = 0, j = 0; i < pResult->num; ++i) {
if (fp == NULL || (fp != NULL && fp(pResult->pRes[i], pExpr->info) == true)) {
if (fp == NULL || (fp(pResult->pRes[i], pExpr->info) == true)) {
pResult->pRes[j++] = pResult->pRes[i];
} else {
num--;

View File

@ -51,7 +51,7 @@ void taos_query_a(TAOS *taos, const char *sqlstr, void (*fp)(void *, TAOS_RES *,
}
int32_t sqlLen = strlen(sqlstr);
if (sqlLen > TSDB_MAX_SQL_LEN) {
if (sqlLen > tsMaxSQLStringLen) {
tscError("sql string too long");
tscQueueAsyncError(fp, param);
return;
@ -95,7 +95,7 @@ void taos_query_a(TAOS *taos, const char *sqlstr, void (*fp)(void *, TAOS_RES *,
pRes->numOfRows = 1;
strtolower(pSql->sqlstr, sqlstr);
tscTrace("%p Async SQL: %s, pObj:%p", pSql, pSql->sqlstr, pObj);
tscDump("%p pObj:%p, Async SQL: %s", pSql, pObj, pSql->sqlstr);
int32_t code = tsParseSql(pSql, pObj->acctId, pObj->db, true);
if (code == TSDB_CODE_ACTION_IN_PROGRESS) return;
@ -121,7 +121,8 @@ static void tscProcessAsyncFetchRowsProxy(void *param, TAOS_RES *tres, int numOf
// sequentially retrieve data from remain vnodes first, query vnode specified by vnodeIdx
if (numOfRows == 0 && tscProjectionQueryOnMetric(pCmd)) {
// vnode is denoted by vnodeIdx, continue to query vnode specified by vnodeIdx
assert(pCmd->vnodeIdx >= 0);
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0);
assert(pMeterMetaInfo->vnodeIndex >= 0);
/* reach the maximum number of output rows, abort */
if (pCmd->globalLimit > 0 && pRes->numOfTotal >= pCmd->globalLimit) {
@ -133,8 +134,8 @@ static void tscProcessAsyncFetchRowsProxy(void *param, TAOS_RES *tres, int numOf
pCmd->limit.limit = pCmd->globalLimit - pRes->numOfTotal;
pCmd->limit.offset = pRes->offset;
if ((++(pCmd->vnodeIdx)) < tscGetMeterMetaInfo(pCmd, 0)->pMetricMeta->numOfVnodes) {
tscTrace("%p retrieve data from next vnode:%d", pSql, pCmd->vnodeIdx);
if ((++(pMeterMetaInfo->vnodeIndex)) < pMeterMetaInfo->pMetricMeta->numOfVnodes) {
tscTrace("%p retrieve data from next vnode:%d", pSql, pMeterMetaInfo->vnodeIndex);
pSql->cmd.command = TSDB_SQL_SELECT; // reset flag to launch query first.
@ -157,7 +158,6 @@ static void tscProcessAsyncRetrieveImpl(void *param, TAOS_RES *tres, int numOfRo
SSqlObj *pSql = (SSqlObj *)tres;
if (pSql == NULL) { // error
tscError("sql object is NULL");
tscQueueAsyncError(pSql->fetchFp, param);
return;
}
@ -272,7 +272,8 @@ void tscProcessAsyncRetrieve(void *param, TAOS_RES *tres, int numOfRows) {
/*
* vnode is denoted by vnodeIdx, continue to query vnode specified by vnodeIdx till all vnode have been retrieved
*/
assert(pCmd->vnodeIdx >= 1);
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0);
assert(pMeterMetaInfo->vnodeIndex >= 0);
/* reach the maximum number of output rows, abort */
if (pCmd->globalLimit > 0 && pRes->numOfTotal >= pCmd->globalLimit) {
@ -283,7 +284,7 @@ void tscProcessAsyncRetrieve(void *param, TAOS_RES *tres, int numOfRows) {
/* update the limit value according to current retrieval results */
pCmd->limit.limit = pCmd->globalLimit - pRes->numOfTotal;
if ((++pCmd->vnodeIdx) <= tscGetMeterMetaInfo(pCmd, 0)->pMetricMeta->numOfVnodes) {
if ((++pMeterMetaInfo->vnodeIndex) <= pMeterMetaInfo->pMetricMeta->numOfVnodes) {
pSql->cmd.command = TSDB_SQL_SELECT; // reset flag to launch query first.
tscResetForNextRetrieve(pRes);
@ -405,8 +406,11 @@ void tscAsyncInsertMultiVnodesProxy(void *param, TAOS_RES *tres, int numOfRows)
assert(!pCmd->isInsertFromFile && pSql->signature == pSql);
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0);
assert(pCmd->numOfTables == 1);
SDataBlockList *pDataBlocks = pCmd->pDataBlocks;
if (pDataBlocks == NULL || pCmd->vnodeIdx >= pDataBlocks->nSize) {
if (pDataBlocks == NULL || pMeterMetaInfo->vnodeIndex >= pDataBlocks->nSize) {
// restore user defined fp
pSql->fp = pSql->fetchFp;
tscTrace("%p Async insertion completed, destroy data block list", pSql);
@ -418,17 +422,17 @@ void tscAsyncInsertMultiVnodesProxy(void *param, TAOS_RES *tres, int numOfRows)
(*pSql->fp)(pSql->param, tres, numOfRows);
} else {
do {
code = tscCopyDataBlockToPayload(pSql, pDataBlocks->pData[pCmd->vnodeIdx++]);
code = tscCopyDataBlockToPayload(pSql, pDataBlocks->pData[pMeterMetaInfo->vnodeIndex++]);
if (code != TSDB_CODE_SUCCESS) {
tscTrace("%p prepare submit data block failed in async insertion, vnodeIdx:%d, total:%d, code:%d",
pSql, pCmd->vnodeIdx - 1, pDataBlocks->nSize, code);
pSql, pMeterMetaInfo->vnodeIndex - 1, pDataBlocks->nSize, code);
}
} while (code != TSDB_CODE_SUCCESS && pCmd->vnodeIdx < pDataBlocks->nSize);
} while (code != TSDB_CODE_SUCCESS && pMeterMetaInfo->vnodeIndex < pDataBlocks->nSize);
// build submit msg may fail
if (code == TSDB_CODE_SUCCESS) {
tscTrace("%p async insertion, vnodeIdx:%d, total:%d", pSql, pCmd->vnodeIdx - 1, pDataBlocks->nSize);
tscTrace("%p async insertion, vnodeIdx:%d, total:%d", pSql, pMeterMetaInfo->vnodeIndex - 1, pDataBlocks->nSize);
tscProcessSql(pSql);
}
}
@ -484,11 +488,11 @@ void tscMeterMetaCallBack(void *param, TAOS_RES *res, int code) {
// check if it is a sub-query of metric query first, if true, enter another routine
if ((pSql->cmd.type & TSDB_QUERY_TYPE_STABLE_SUBQUERY) == TSDB_QUERY_TYPE_STABLE_SUBQUERY) {
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0);
assert(pMeterMetaInfo->pMeterMeta->numOfTags != 0 && pCmd->vnodeIdx >= 0 && pSql->param != NULL);
assert(pMeterMetaInfo->pMeterMeta->numOfTags != 0 && pMeterMetaInfo->vnodeIndex >= 0 && pSql->param != NULL);
SRetrieveSupport *trs = (SRetrieveSupport *)pSql->param;
SSqlObj * pParObj = trs->pParentSqlObj;
assert(pParObj->signature == pParObj && trs->vnodeIdx == pCmd->vnodeIdx &&
assert(pParObj->signature == pParObj && trs->subqueryIndex == pMeterMetaInfo->vnodeIndex &&
pMeterMetaInfo->pMeterMeta->numOfTags != 0);
tscTrace("%p get metricMeta during metric query successfully", pSql);
@ -503,9 +507,22 @@ void tscMeterMetaCallBack(void *param, TAOS_RES *res, int code) {
if (code == TSDB_CODE_ACTION_IN_PROGRESS) return;
} else { // normal async query continues
if (pCmd->isParseFinish) {
tscTrace("%p resend data to vnode in metermeta callback since sql has been parsed completed", pSql);
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0);
code = tscGetMeterMeta(pSql, pMeterMetaInfo->name, 0);
assert(code == TSDB_CODE_SUCCESS);
if (pMeterMetaInfo->pMeterMeta) {
code = tscSendMsgToServer(pSql);
if (code == TSDB_CODE_SUCCESS) return;
}
} else {
code = tsParseSql(pSql, pObj->acctId, pObj->db, false);
if (code == TSDB_CODE_ACTION_IN_PROGRESS) return;
}
}
} else { // stream computing
SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0);

View File

@ -13,8 +13,6 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma GCC diagnostic ignored "-Wincompatible-pointer-types"
#include "os.h"
#include "taosmsg.h"
#include "tast.h"
@ -71,7 +69,8 @@ for (int32_t i = 0; i < (ctx)->tagInfo.numOfTagCols; ++i) { \
} \
} while(0);
void noop(SQLFunctionCtx *UNUSED_PARAM(pCtx)) {}
void noop1(SQLFunctionCtx *UNUSED_PARAM(pCtx)) {}
void noop2(SQLFunctionCtx *UNUSED_PARAM(pCtx), int32_t UNUSED_PARAM(index)) {}
typedef struct tValuePair {
tVariant v;
@ -509,10 +508,10 @@ static void do_sum(SQLFunctionCtx *pCtx) {
assert(pCtx->size >= pCtx->preAggVals.numOfNull);
if (pCtx->inputType >= TSDB_DATA_TYPE_TINYINT && pCtx->inputType <= TSDB_DATA_TYPE_BIGINT) {
int64_t *retVal = pCtx->aOutputBuf;
int64_t *retVal = (int64_t*) pCtx->aOutputBuf;
*retVal += pCtx->preAggVals.sum;
} else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE || pCtx->inputType == TSDB_DATA_TYPE_FLOAT) {
double *retVal = pCtx->aOutputBuf;
double *retVal = (double*) pCtx->aOutputBuf;
*retVal += GET_DOUBLE_VAL(&(pCtx->preAggVals.sum));
}
} else { // computing based on the true data block
@ -520,7 +519,7 @@ static void do_sum(SQLFunctionCtx *pCtx) {
notNullElems = 0;
if (pCtx->inputType >= TSDB_DATA_TYPE_TINYINT && pCtx->inputType <= TSDB_DATA_TYPE_BIGINT) {
int64_t *retVal = pCtx->aOutputBuf;
int64_t *retVal = (int64_t*) pCtx->aOutputBuf;
if (pCtx->inputType == TSDB_DATA_TYPE_TINYINT) {
LIST_ADD_N(*retVal, pCtx, pData, int8_t, notNullElems, pCtx->inputType);
@ -532,10 +531,10 @@ static void do_sum(SQLFunctionCtx *pCtx) {
LIST_ADD_N(*retVal, pCtx, pData, int64_t, notNullElems, pCtx->inputType);
}
} else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE) {
double *retVal = pCtx->aOutputBuf;
double *retVal = (double*) pCtx->aOutputBuf;
LIST_ADD_N(*retVal, pCtx, pData, double, notNullElems, pCtx->inputType);
} else if (pCtx->inputType == TSDB_DATA_TYPE_FLOAT) {
double *retVal = pCtx->aOutputBuf;
double *retVal = (double*) pCtx->aOutputBuf;
LIST_ADD_N(*retVal, pCtx, pData, float, notNullElems, pCtx->inputType);
}
}
@ -555,7 +554,7 @@ static void do_sum_f(SQLFunctionCtx *pCtx, int32_t index) {
}
SET_VAL(pCtx, 1, 1);
int64_t *res = pCtx->aOutputBuf;
int64_t *res = (int64_t*) pCtx->aOutputBuf;
if (pCtx->inputType == TSDB_DATA_TYPE_TINYINT) {
*res += GET_INT8_VAL(pData);
@ -566,10 +565,10 @@ static void do_sum_f(SQLFunctionCtx *pCtx, int32_t index) {
} else if (pCtx->inputType == TSDB_DATA_TYPE_BIGINT) {
*res += GET_INT64_VAL(pData);
} else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE) {
double *retVal = pCtx->aOutputBuf;
double *retVal = (double*) pCtx->aOutputBuf;
*retVal += GET_DOUBLE_VAL(pData);
} else if (pCtx->inputType == TSDB_DATA_TYPE_FLOAT) {
double *retVal = pCtx->aOutputBuf;
double *retVal = (double*) pCtx->aOutputBuf;
*retVal += GET_FLOAT_VAL(pData);
}
@ -696,7 +695,7 @@ static int32_t first_dist_data_req_info(SQLFunctionCtx *pCtx, TSKEY start, TSKEY
return BLK_DATA_NO_NEEDED;
}
SFirstLastInfo *pInfo = (pCtx->aOutputBuf + pCtx->inputBytes);
SFirstLastInfo *pInfo = (SFirstLastInfo*) (pCtx->aOutputBuf + pCtx->inputBytes);
if (pInfo->hasResult != DATA_SET_FLAG) {
return BLK_DATA_ALL_NEEDED;
} else { // data in current block is not earlier than current result
@ -710,7 +709,7 @@ static int32_t last_dist_data_req_info(SQLFunctionCtx *pCtx, TSKEY start, TSKEY
return BLK_DATA_NO_NEEDED;
}
SFirstLastInfo *pInfo = (pCtx->aOutputBuf + pCtx->inputBytes);
SFirstLastInfo *pInfo = (SFirstLastInfo*) (pCtx->aOutputBuf + pCtx->inputBytes);
if (pInfo->hasResult != DATA_SET_FLAG) {
return BLK_DATA_ALL_NEEDED;
} else {
@ -844,7 +843,7 @@ static void avg_func_merge(SQLFunctionCtx *pCtx) {
static void avg_func_second_merge(SQLFunctionCtx *pCtx) {
SResultInfo *pResInfo = GET_RES_INFO(pCtx);
double *sum = pCtx->aOutputBuf;
double *sum = (double*) pCtx->aOutputBuf;
char * input = GET_INPUT_CHAR(pCtx);
for (int32_t i = 0; i < pCtx->size; ++i, input += pCtx->inputBytes) {
@ -969,10 +968,10 @@ static void minMax_function(SQLFunctionCtx *pCtx, char *pOutput, int32_t isMin,
TYPED_LOOPCHECK_N(int16_t, pOutput, p, pCtx, pCtx->inputType, isMin, *notNullElems);
} else if (pCtx->inputType == TSDB_DATA_TYPE_INT) {
int32_t *pData = p;
int32_t *retVal = pOutput;
int32_t *retVal = (int32_t*) pOutput;
for (int32_t i = 0; i < pCtx->size; ++i) {
if (pCtx->hasNull && isNull(&pData[i], pCtx->inputType)) {
if (pCtx->hasNull && isNull((const char*)&pData[i], pCtx->inputType)) {
continue;
}
@ -1217,27 +1216,27 @@ static void minMax_function_f(SQLFunctionCtx *pCtx, int32_t index, int32_t isMin
UPDATE_DATA(pCtx, *output, i, num, isMin, key);
} else if (pCtx->inputType == TSDB_DATA_TYPE_SMALLINT) {
int16_t *output = pCtx->aOutputBuf;
int16_t *output = (int16_t*) pCtx->aOutputBuf;
int16_t i = GET_INT16_VAL(pData);
UPDATE_DATA(pCtx, *output, i, num, isMin, key);
} else if (pCtx->inputType == TSDB_DATA_TYPE_INT) {
int32_t *output = pCtx->aOutputBuf;
int32_t *output = (int32_t*) pCtx->aOutputBuf;
int32_t i = GET_INT32_VAL(pData);
UPDATE_DATA(pCtx, *output, i, num, isMin, key);
} else if (pCtx->inputType == TSDB_DATA_TYPE_BIGINT) {
int64_t *output = pCtx->aOutputBuf;
int64_t *output = (int64_t*) pCtx->aOutputBuf;
int64_t i = GET_INT64_VAL(pData);
UPDATE_DATA(pCtx, *output, i, num, isMin, key);
} else if (pCtx->inputType == TSDB_DATA_TYPE_FLOAT) {
float *output = pCtx->aOutputBuf;
float *output = (float*) pCtx->aOutputBuf;
float i = GET_FLOAT_VAL(pData);
UPDATE_DATA(pCtx, *output, i, num, isMin, key);
} else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE) {
double *output = pCtx->aOutputBuf;
double *output = (double*) pCtx->aOutputBuf;
double i = GET_DOUBLE_VAL(pData);
UPDATE_DATA(pCtx, *output, i, num, isMin, key);
@ -1301,7 +1300,7 @@ static void stddev_function(SQLFunctionCtx *pCtx) {
switch (pCtx->inputType) {
case TSDB_DATA_TYPE_INT: {
for (int32_t i = 0; i < pCtx->size; ++i) {
if (pCtx->hasNull && isNull(&((int32_t *)pData)[i], pCtx->inputType)) {
if (pCtx->hasNull && isNull((const char*) (&((int32_t *)pData)[i]), pCtx->inputType)) {
continue;
}
*retVal += POW2(((int32_t *)pData)[i] - avg);
@ -1585,7 +1584,7 @@ static void first_dist_func_second_merge(SQLFunctionCtx *pCtx) {
assert(pCtx->resultInfo->superTableQ);
char * pData = GET_INPUT_CHAR(pCtx);
SFirstLastInfo *pInput = (pData + pCtx->outputBytes);
SFirstLastInfo *pInput = (SFirstLastInfo*) (pData + pCtx->outputBytes);
if (pInput->hasResult != DATA_SET_FLAG) {
return;
}
@ -1668,7 +1667,7 @@ static void last_data_assign_impl(SQLFunctionCtx *pCtx, char *pData, int32_t ind
if (pInfo->hasResult != DATA_SET_FLAG || pInfo->ts < timestamp[index]) {
#if defined(_DEBUG_VIEW)
pTrace("assign index:%d, ts:%lld, val:%d, ", index, timestamp[index], *(int32_t *)pData);
pTrace("assign index:%d, ts:%" PRId64 ", val:%d, ", index, timestamp[index], *(int32_t *)pData);
#endif
memcpy(pCtx->aOutputBuf, pData, pCtx->inputBytes);
@ -1763,7 +1762,7 @@ static void last_dist_func_merge(SQLFunctionCtx *pCtx) {
static void last_dist_func_second_merge(SQLFunctionCtx *pCtx) {
char *pData = GET_INPUT_CHAR(pCtx);
SFirstLastInfo *pInput = (pData + pCtx->outputBytes);
SFirstLastInfo *pInput = (SFirstLastInfo*) (pData + pCtx->outputBytes);
if (pInput->hasResult != DATA_SET_FLAG) {
return;
}
@ -1871,7 +1870,7 @@ static void do_top_function_add(STopBotInfo *pInfo, int32_t maxLen, void *pData,
val.i64Key >= pList[pInfo->num - 1]->v.i64Key) ||
((type >= TSDB_DATA_TYPE_FLOAT && type <= TSDB_DATA_TYPE_DOUBLE) &&
val.dKey >= pList[pInfo->num - 1]->v.dKey)) {
valuePairAssign(pList[pInfo->num], type, &val.i64Key, ts, pTags, pTagInfo, stage);
valuePairAssign(pList[pInfo->num], type, (const char*)&val.i64Key, ts, pTags, pTagInfo, stage);
} else {
int32_t i = pInfo->num - 1;
@ -1887,7 +1886,7 @@ static void do_top_function_add(STopBotInfo *pInfo, int32_t maxLen, void *pData,
}
}
valuePairAssign(pList[i + 1], type, &val.i64Key, ts, pTags, pTagInfo, stage);
valuePairAssign(pList[i + 1], type, (const char*) &val.i64Key, ts, pTags, pTagInfo, stage);
}
pInfo->num++;
@ -1909,7 +1908,7 @@ static void do_top_function_add(STopBotInfo *pInfo, int32_t maxLen, void *pData,
}
}
valuePairAssign(pList[i], type, &val.i64Key, ts, pTags, pTagInfo, stage);
valuePairAssign(pList[i], type, (const char*) &val.i64Key, ts, pTags, pTagInfo, stage);
}
}
}
@ -1923,7 +1922,7 @@ static void do_bottom_function_add(STopBotInfo *pInfo, int32_t maxLen, void *pDa
if (pInfo->num < maxLen) {
if (pInfo->num == 0) {
valuePairAssign(pList[pInfo->num], type, &val.i64Key, ts, pTags, pTagInfo, stage);
valuePairAssign(pList[pInfo->num], type, (const char*) &val.i64Key, ts, pTags, pTagInfo, stage);
} else {
int32_t i = pInfo->num - 1;
@ -1939,7 +1938,7 @@ static void do_bottom_function_add(STopBotInfo *pInfo, int32_t maxLen, void *pDa
}
}
valuePairAssign(pList[i + 1], type, &val.i64Key, ts, pTags, pTagInfo, stage);
valuePairAssign(pList[i + 1], type, (const char*)&val.i64Key, ts, pTags, pTagInfo, stage);
}
pInfo->num++;
@ -1961,7 +1960,7 @@ static void do_bottom_function_add(STopBotInfo *pInfo, int32_t maxLen, void *pDa
}
}
valuePairAssign(pList[i], type, &val.i64Key, ts, pTags, pTagInfo, stage);
valuePairAssign(pList[i], type, (const char*)&val.i64Key, ts, pTags, pTagInfo, stage);
}
}
}
@ -2101,7 +2100,7 @@ bool top_bot_datablock_filter(SQLFunctionCtx *pCtx, int32_t functionId, char *mi
return true;
}
tValuePair *pRes = pTopBotInfo->res;
tValuePair *pRes = (tValuePair*) pTopBotInfo->res;
if (functionId == TSDB_FUNC_TOP) {
switch (pCtx->inputType) {
@ -2153,7 +2152,7 @@ static STopBotInfo *getTopBotOutputInfo(SQLFunctionCtx *pCtx) {
// only the first_stage_merge is directly written data into final output buffer
if (pResInfo->superTableQ && pCtx->currentStage != SECONDARY_STAGE_MERGE) {
return pCtx->aOutputBuf;
return (STopBotInfo*) pCtx->aOutputBuf;
} else { // for normal table query and super table at the secondary_stage, result is written to intermediate buffer
return pResInfo->interResultBuf;
}
@ -2167,14 +2166,14 @@ static STopBotInfo *getTopBotOutputInfo(SQLFunctionCtx *pCtx) {
*/
static void buildTopBotStruct(STopBotInfo *pTopBotInfo, SQLFunctionCtx *pCtx) {
char *tmp = (char *)pTopBotInfo + sizeof(STopBotInfo);
pTopBotInfo->res = tmp;
pTopBotInfo->res = (tValuePair**) tmp;
tmp += POINTER_BYTES * pCtx->param[0].i64Key;
size_t size = sizeof(tValuePair) + pCtx->tagInfo.tagsLen;
for (int32_t i = 0; i < pCtx->param[0].i64Key; ++i) {
pTopBotInfo->res[i] = tmp;
pTopBotInfo->res[i] = (tValuePair*) tmp;
pTopBotInfo->res[i]->pTags = tmp + sizeof(tValuePair);
tmp += size;
}
@ -2479,7 +2478,7 @@ static SAPercentileInfo *getAPerctInfo(SQLFunctionCtx *pCtx) {
SResultInfo *pResInfo = GET_RES_INFO(pCtx);
if (pResInfo->superTableQ && pCtx->currentStage != SECONDARY_STAGE_MERGE) {
return pCtx->aOutputBuf;
return (SAPercentileInfo*) pCtx->aOutputBuf;
} else {
return pResInfo->interResultBuf;
}
@ -2590,8 +2589,8 @@ static void apercentile_func_merge(SQLFunctionCtx *pCtx) {
SAPercentileInfo *pInput = (SAPercentileInfo *)GET_INPUT_CHAR(pCtx);
pInput->pHisto = (char *)pInput + sizeof(SAPercentileInfo);
pInput->pHisto->elems = (char *)pInput->pHisto + sizeof(SHistogramInfo);
pInput->pHisto = (SHistogramInfo*) ((char *)pInput + sizeof(SAPercentileInfo));
pInput->pHisto->elems = (SHistBin*) ((char *)pInput->pHisto + sizeof(SHistogramInfo));
if (pInput->pHisto->numOfElems <= 0) {
return;
@ -2604,13 +2603,13 @@ static void apercentile_func_merge(SQLFunctionCtx *pCtx) {
if (pHisto->numOfElems <= 0) {
memcpy(pHisto, pInput->pHisto, size);
pHisto->elems = (char *)pHisto + sizeof(SHistogramInfo);
pHisto->elems = (SHistBin*) ((char *)pHisto + sizeof(SHistogramInfo));
} else {
pHisto->elems = (char *)pHisto + sizeof(SHistogramInfo);
pHisto->elems = (SHistBin*) ((char *)pHisto + sizeof(SHistogramInfo));
SHistogramInfo *pRes = tHistogramMerge(pHisto, pInput->pHisto, MAX_HISTOGRAM_BIN);
memcpy(pHisto, pRes, sizeof(SHistogramInfo) + sizeof(SHistBin) * MAX_HISTOGRAM_BIN);
pHisto->elems = (char *)pHisto + sizeof(SHistogramInfo);
pHisto->elems = (SHistBin*) ((char *)pHisto + sizeof(SHistogramInfo));
tHistogramDestroy(&pRes);
}
@ -2622,8 +2621,8 @@ static void apercentile_func_merge(SQLFunctionCtx *pCtx) {
static void apercentile_func_second_merge(SQLFunctionCtx *pCtx) {
SAPercentileInfo *pInput = (SAPercentileInfo *)GET_INPUT_CHAR(pCtx);
pInput->pHisto = (char *)pInput + sizeof(SAPercentileInfo);
pInput->pHisto->elems = (char *)pInput->pHisto + sizeof(SHistogramInfo);
pInput->pHisto = (SHistogramInfo*) ((char *)pInput + sizeof(SAPercentileInfo));
pInput->pHisto->elems = (SHistBin*) ((char *)pInput->pHisto + sizeof(SHistogramInfo));
if (pInput->pHisto->numOfElems <= 0) {
return;
@ -2634,9 +2633,9 @@ static void apercentile_func_second_merge(SQLFunctionCtx *pCtx) {
if (pHisto->numOfElems <= 0) {
memcpy(pHisto, pInput->pHisto, sizeof(SHistogramInfo) + sizeof(SHistBin) * (MAX_HISTOGRAM_BIN + 1));
pHisto->elems = (char *)pHisto + sizeof(SHistogramInfo);
pHisto->elems = (SHistBin*) ((char *)pHisto + sizeof(SHistogramInfo));
} else {
pHisto->elems = (char *)pHisto + sizeof(SHistogramInfo);
pHisto->elems = (SHistBin*) ((char *)pHisto + sizeof(SHistogramInfo));
SHistogramInfo *pRes = tHistogramMerge(pHisto, pInput->pHisto, MAX_HISTOGRAM_BIN);
tHistogramDestroy(&pOutput->pHisto);
@ -2730,7 +2729,7 @@ static void leastsquares_function(SQLFunctionCtx *pCtx) {
int32_t *p = pData;
// LEASTSQR_CAL_LOOP(pCtx, param, pParamData, p);
for (int32_t i = 0; i < pCtx->size; ++i) {
if (pCtx->hasNull && isNull(p, pCtx->inputType)) {
if (pCtx->hasNull && isNull((const char*) p, pCtx->inputType)) {
continue;
}
@ -2872,6 +2871,10 @@ static void date_col_output_function(SQLFunctionCtx *pCtx) {
*(int64_t *)(pCtx->aOutputBuf) = pCtx->nStartQueryTimestamp;
}
static FORCE_INLINE void date_col_output_function_f(SQLFunctionCtx *pCtx, int32_t index) {
date_col_output_function(pCtx);
}
static void col_project_function(SQLFunctionCtx *pCtx) {
INC_INIT_VAL(pCtx, pCtx->size);
@ -2982,7 +2985,7 @@ static void diff_function(SQLFunctionCtx *pCtx) {
int32_t *pOutput = (int32_t *)pCtx->aOutputBuf;
for (; i < pCtx->size && i >= 0; i += step) {
if (pCtx->hasNull && isNull(&pData[i], pCtx->inputType)) {
if (pCtx->hasNull && isNull((const char*) &pData[i], pCtx->inputType)) {
continue;
}
@ -3014,7 +3017,7 @@ static void diff_function(SQLFunctionCtx *pCtx) {
int64_t *pOutput = (int64_t *)pCtx->aOutputBuf;
for (; i < pCtx->size && i >= 0; i += step) {
if (pCtx->hasNull && isNull(&pData[i], pCtx->inputType)) {
if (pCtx->hasNull && isNull((const char*) &pData[i], pCtx->inputType)) {
continue;
}
@ -3046,7 +3049,7 @@ static void diff_function(SQLFunctionCtx *pCtx) {
double *pOutput = (double *)pCtx->aOutputBuf;
for (; i < pCtx->size && i >= 0; i += step) {
if (pCtx->hasNull && isNull(&pData[i], pCtx->inputType)) {
if (pCtx->hasNull && isNull((const char*) &pData[i], pCtx->inputType)) {
continue;
}
@ -3076,7 +3079,7 @@ static void diff_function(SQLFunctionCtx *pCtx) {
float *pOutput = (float *)pCtx->aOutputBuf;
for (; i < pCtx->size && i >= 0; i += step) {
if (pCtx->hasNull && isNull(&pData[i], pCtx->inputType)) {
if (pCtx->hasNull && isNull((const char*) &pData[i], pCtx->inputType)) {
continue;
}
@ -3107,7 +3110,7 @@ static void diff_function(SQLFunctionCtx *pCtx) {
int16_t *pOutput = (int16_t *)pCtx->aOutputBuf;
for (; i < pCtx->size && i >= 0; i += step) {
if (pCtx->hasNull && isNull(&pData[i], pCtx->inputType)) {
if (pCtx->hasNull && isNull((const char*) &pData[i], pCtx->inputType)) {
continue;
}
@ -3269,7 +3272,7 @@ char *arithmetic_callback_function(void *param, char *name, int32_t colId) {
static void arithmetic_function(SQLFunctionCtx *pCtx) {
GET_RES_INFO(pCtx)->numOfRes += pCtx->size;
SArithmeticSupport *sas = (SArithmeticSupport *)pCtx->param[0].pz;
SArithmeticSupport *sas = (SArithmeticSupport *)pCtx->param[1].pz;
tSQLBinaryExprCalcTraverse(sas->pExpr->pBinExprInfo.pBinExpr, pCtx->size, pCtx->aOutputBuf, sas, pCtx->order,
arithmetic_callback_function);
@ -3277,16 +3280,15 @@ static void arithmetic_function(SQLFunctionCtx *pCtx) {
pCtx->aOutputBuf += pCtx->outputBytes * pCtx->size * GET_FORWARD_DIRECTION_FACTOR(pCtx->order);
}
static bool arithmetic_function_f(SQLFunctionCtx *pCtx, int32_t index) {
static void arithmetic_function_f(SQLFunctionCtx *pCtx, int32_t index) {
INC_INIT_VAL(pCtx, 1);
SArithmeticSupport *sas = (SArithmeticSupport *)pCtx->param[0].pz;
SArithmeticSupport *sas = (SArithmeticSupport *)pCtx->param[1].pz;
sas->offset = index;
tSQLBinaryExprCalcTraverse(sas->pExpr->pBinExprInfo.pBinExpr, 1, pCtx->aOutputBuf, sas, pCtx->order,
arithmetic_callback_function);
pCtx->aOutputBuf += pCtx->outputBytes * GET_FORWARD_DIRECTION_FACTOR(pCtx->order);
return true;
}
#define LIST_MINMAX_N(ctx, minOutput, maxOutput, elemCnt, data, type, tsdbType, numOfNotNullElem) \
@ -3709,7 +3711,7 @@ static void getStatics_i16(int64_t *primaryKey, int16_t *data, int32_t numOfRow,
// int16_t lastVal = TSDB_DATA_SMALLINT_NULL;
for (int32_t i = 0; i < numOfRow; ++i) {
if (isNull(&data[i], TSDB_DATA_TYPE_SMALLINT)) {
if (isNull((const char*) &data[i], TSDB_DATA_TYPE_SMALLINT)) {
(*numOfNull) += 1;
continue;
}
@ -3749,7 +3751,7 @@ static void getStatics_i32(int64_t *primaryKey, int32_t *data, int32_t numOfRow,
// int32_t lastVal = TSDB_DATA_INT_NULL;
for (int32_t i = 0; i < numOfRow; ++i) {
if (isNull(&data[i], TSDB_DATA_TYPE_INT)) {
if (isNull((const char*) &data[i], TSDB_DATA_TYPE_INT)) {
(*numOfNull) += 1;
continue;
}
@ -3786,7 +3788,7 @@ static void getStatics_i64(int64_t *primaryKey, int64_t *data, int32_t numOfRow,
assert(numOfRow <= INT16_MAX);
for (int32_t i = 0; i < numOfRow; ++i) {
if (isNull(&data[i], TSDB_DATA_TYPE_BIGINT)) {
if (isNull((const char*) &data[i], TSDB_DATA_TYPE_BIGINT)) {
(*numOfNull) += 1;
continue;
}
@ -3941,9 +3943,9 @@ void getStatistics(char *priData, char *data, int32_t size, int32_t numOfRow, in
} else if (type == TSDB_DATA_TYPE_BIGINT || type == TSDB_DATA_TYPE_TIMESTAMP) {
getStatics_i64(primaryKey, (int64_t *)data, numOfRow, min, max, sum, minIndex, maxIndex, numOfNull);
} else if (type == TSDB_DATA_TYPE_DOUBLE) {
getStatics_d(primaryKey, (double *)data, numOfRow, min, max, sum, minIndex, maxIndex, numOfNull);
getStatics_d(primaryKey, (double *)data, numOfRow, (double*) min, (double*) max, (double*) sum, minIndex, maxIndex, numOfNull);
} else if (type == TSDB_DATA_TYPE_FLOAT) {
getStatics_f(primaryKey, (float *)data, numOfRow, min, max, sum, minIndex, maxIndex, numOfNull);
getStatics_f(primaryKey, (float *)data, numOfRow, (double*) min, (double*) max, (double*) sum, minIndex, maxIndex, numOfNull);
}
}
}
@ -4061,10 +4063,10 @@ static void twa_function(SQLFunctionCtx *pCtx) {
// pCtx->numOfIteratedElems += notNullElems;
}
static bool twa_function_f(SQLFunctionCtx *pCtx, int32_t index) {
static void twa_function_f(SQLFunctionCtx *pCtx, int32_t index) {
void *pData = GET_INPUT_CHAR_INDEX(pCtx, index);
if (pCtx->hasNull && isNull(pData, pCtx->inputType)) {
return true;
return;
}
SET_VAL(pCtx, 1, 1);
@ -4072,7 +4074,7 @@ static bool twa_function_f(SQLFunctionCtx *pCtx, int32_t index) {
TSKEY *primaryKey = pCtx->ptsList;
SResultInfo *pResInfo = GET_RES_INFO(pCtx);
STwaInfo * pInfo = pResInfo->interResultBuf;
STwaInfo *pInfo = pResInfo->interResultBuf;
if (pInfo->lastKey == INT64_MIN) {
pInfo->lastKey = pCtx->nStartQueryTimestamp;
@ -4097,8 +4099,6 @@ static bool twa_function_f(SQLFunctionCtx *pCtx, int32_t index) {
if (pResInfo->superTableQ) {
memcpy(pCtx->aOutputBuf, pResInfo->interResultBuf, sizeof(STwaInfo));
}
return true;
}
static void twa_func_merge(SQLFunctionCtx *pCtx) {
@ -4110,7 +4110,7 @@ static void twa_func_merge(SQLFunctionCtx *pCtx) {
int32_t numOfNotNull = 0;
for (int32_t i = 0; i < pCtx->size; ++i, indicator += sizeof(STwaInfo)) {
STwaInfo *pInput = indicator;
STwaInfo *pInput = (STwaInfo*) indicator;
if (pInput->hasResult != DATA_SET_FLAG) {
continue;
@ -4212,7 +4212,7 @@ static void interp_function(SQLFunctionCtx *pCtx) {
if (pCtx->outputType == TSDB_DATA_TYPE_FLOAT) {
float v = GET_DOUBLE_VAL(pVal);
assignVal(pCtx->aOutputBuf, &v, pCtx->outputBytes, pCtx->outputType);
assignVal(pCtx->aOutputBuf, (const char*) &v, pCtx->outputBytes, pCtx->outputType);
} else {
assignVal(pCtx->aOutputBuf, pVal, pCtx->outputBytes, pCtx->outputType);
}
@ -4373,7 +4373,7 @@ SQLAggFuncElem aAggs[28] = {{
count_function,
count_function_f,
no_next_step,
noop,
noop1,
count_func_merge,
count_func_merge,
count_load_data_info,
@ -4449,8 +4449,8 @@ SQLAggFuncElem aAggs[28] = {{
stddev_function_f,
stddev_next_step,
stddev_finalizer,
noop,
noop,
noop1,
noop1,
data_req_load_info,
},
{
@ -4464,8 +4464,8 @@ SQLAggFuncElem aAggs[28] = {{
percentile_function_f,
no_next_step,
percentile_finalizer,
noop,
noop,
noop1,
noop1,
data_req_load_info,
},
{
@ -4494,8 +4494,8 @@ SQLAggFuncElem aAggs[28] = {{
first_function_f,
no_next_step,
function_finalizer,
noop,
noop,
noop1,
noop1,
first_data_req_info,
},
{
@ -4509,8 +4509,8 @@ SQLAggFuncElem aAggs[28] = {{
last_function_f,
no_next_step,
function_finalizer,
noop,
noop,
noop1,
noop1,
last_data_req_info,
},
{
@ -4522,10 +4522,10 @@ SQLAggFuncElem aAggs[28] = {{
TSDB_FUNCSTATE_SELECTIVITY,
first_last_function_setup,
last_row_function,
noop,
noop2,
no_next_step,
last_row_finalizer,
noop,
noop1,
last_dist_func_second_merge,
data_req_load_info,
},
@ -4602,8 +4602,8 @@ SQLAggFuncElem aAggs[28] = {{
leastsquares_function_f,
no_next_step,
leastsquares_finalizer,
noop,
noop,
noop1,
noop1,
data_req_load_info,
},
{
@ -4614,9 +4614,9 @@ SQLAggFuncElem aAggs[28] = {{
TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS,
function_setup,
date_col_output_function,
date_col_output_function,
date_col_output_function_f,
no_next_step,
noop,
noop1,
copy_function,
copy_function,
no_data_info,
@ -4628,10 +4628,10 @@ SQLAggFuncElem aAggs[28] = {{
TSDB_FUNC_TS_DUMMY,
TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS,
function_setup,
noop,
noop,
noop1,
noop2,
no_next_step,
noop,
noop1,
copy_function,
copy_function,
data_req_load_info,
@ -4644,9 +4644,9 @@ SQLAggFuncElem aAggs[28] = {{
TSDB_BASE_FUNC_SO,
function_setup,
tag_function,
noop,
noop2,
no_next_step,
noop,
noop1,
copy_function,
copy_function,
no_data_info,
@ -4676,7 +4676,7 @@ SQLAggFuncElem aAggs[28] = {{
tag_function,
tag_function_f,
no_next_step,
noop,
noop1,
copy_function,
copy_function,
no_data_info,
@ -4691,7 +4691,7 @@ SQLAggFuncElem aAggs[28] = {{
col_project_function,
col_project_function_f,
no_next_step,
noop,
noop1,
copy_function,
copy_function,
data_req_load_info,
@ -4706,7 +4706,7 @@ SQLAggFuncElem aAggs[28] = {{
tag_project_function,
tag_project_function_f,
no_next_step,
noop,
noop1,
copy_function,
copy_function,
no_data_info,
@ -4721,7 +4721,7 @@ SQLAggFuncElem aAggs[28] = {{
arithmetic_function,
arithmetic_function_f,
no_next_step,
noop,
noop1,
copy_function,
copy_function,
data_req_load_info,
@ -4736,9 +4736,9 @@ SQLAggFuncElem aAggs[28] = {{
diff_function,
diff_function_f,
no_next_step,
noop,
noop,
noop,
noop1,
noop1,
noop1,
data_req_load_info,
},
// distributed version used in two-stage aggregation processes
@ -4782,8 +4782,8 @@ SQLAggFuncElem aAggs[28] = {{
interp_function,
do_sum_f, // todo filter handle
no_next_step,
noop,
noop,
noop1,
noop1,
copy_function,
no_data_info,
}};

View File

@ -14,8 +14,8 @@
*/
#include "os.h"
#include "tcache.h"
#include "tscJoinProcess.h"
#include "tcache.h"
#include "tscUtil.h"
#include "tsclient.h"
#include "tscompression.h"
@ -45,8 +45,8 @@ static bool doCompare(int32_t order, int64_t left, int64_t right) {
}
}
static int64_t doTSBlockIntersect(SSqlObj* pSql, SJoinSubquerySupporter* pSupporter1, SJoinSubquerySupporter* pSupporter2,
TSKEY* st, TSKEY* et) {
static int64_t doTSBlockIntersect(SSqlObj* pSql, SJoinSubquerySupporter* pSupporter1,
SJoinSubquerySupporter* pSupporter2, TSKEY* st, TSKEY* et) {
STSBuf* output1 = tsBufCreate(true);
STSBuf* output2 = tsBufCreate(true);
@ -88,7 +88,7 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, SJoinSubquerySupporter* pSuppor
#ifdef _DEBUG_VIEW
// for debug purpose
tscPrint("%lld, tags:%d \t %lld, tags:%d", elem1.ts, elem1.tag, elem2.ts, elem2.tag);
tscPrint("%" PRId64 ", tags:%d \t %" PRId64 ", tags:%d", elem1.ts, elem1.tag, elem2.ts, elem2.tag);
#endif
if (elem1.tag < elem2.tag || (elem1.tag == elem2.tag && doCompare(order, elem1.ts, elem2.ts))) {
@ -150,22 +150,21 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, SJoinSubquerySupporter* pSuppor
tsBufDestory(pSupporter1->pTSBuf);
tsBufDestory(pSupporter2->pTSBuf);
tscTrace("%p input1:%lld, input2:%lld, %lld for secondary query after ts blocks intersecting",
pSql, numOfInput1, numOfInput2, output1->numOfTotal);
tscTrace("%p input1:%" PRId64 ", input2:%" PRId64 ", final:%" PRId64 " for secondary query after ts blocks intersecting", pSql,
numOfInput1, numOfInput2, output1->numOfTotal);
return output1->numOfTotal;
}
//todo handle failed to create sub query
SJoinSubquerySupporter* tscCreateJoinSupporter(SSqlObj* pSql, SSubqueryState* pState, /*int32_t* numOfComplete, int32_t* gc,*/ int32_t index) {
// todo handle failed to create sub query
SJoinSubquerySupporter* tscCreateJoinSupporter(SSqlObj* pSql, SSubqueryState* pState,
/*int32_t* numOfComplete, int32_t* gc,*/ int32_t index) {
SJoinSubquerySupporter* pSupporter = calloc(1, sizeof(SJoinSubquerySupporter));
if (pSupporter == NULL) {
return NULL;
}
pSupporter->pObj = pSql;
pSupporter->hasMore = true;
pSupporter->pState = pState;
pSupporter->subqueryIndex = index;
@ -226,12 +225,6 @@ bool needSecondaryQuery(SSqlObj* pSql) {
* launch secondary stage query to fetch the result that contains timestamp in set
*/
int32_t tscLaunchSecondSubquery(SSqlObj* pSql) {
// TODO not launch secondary stage query
// if (!needSecondaryQuery(pSql)) {
// return;
// }
// sub query may not be necessary
int32_t numOfSub = 0;
SJoinSubquerySupporter* pSupporter = NULL;
@ -239,15 +232,22 @@ int32_t tscLaunchSecondSubquery(SSqlObj* pSql) {
pSupporter = pSql->pSubs[i]->param;
pSupporter->pState->numOfCompleted = 0;
/*
* If the columns are not involved in the final select clause, the secondary query will not be launched
* for the subquery.
*/
if (pSupporter->exprsInfo.numOfExprs > 0) {
++numOfSub;
}
}
// scan all subquery, if one sub query has only ts, ignore it
int32_t j = 0;
tscTrace("%p start to launch secondary subqueries: %d", pSql, pSql->numOfSubs);
tscTrace(
"%p start to launch secondary subqueries, total:%d, only:%d needs to query, others are not retrieve in "
"select clause",
pSql, pSql->numOfSubs, numOfSub);
int32_t j = 0;
for (int32_t i = 0; i < pSql->numOfSubs; ++i) {
SSqlObj* pSub = pSql->pSubs[i];
pSupporter = pSub->param;
@ -259,15 +259,14 @@ int32_t tscLaunchSecondSubquery(SSqlObj* pSql) {
continue;
}
SSqlObj* pNew = createSubqueryObj(pSql, 0, (int16_t)i, tscJoinQueryCallback, pSupporter, NULL);
SSqlObj* pNew = createSubqueryObj(pSql, (int16_t)i, tscJoinQueryCallback, pSupporter, NULL);
if (pNew == NULL) {
pSql->numOfSubs = i; //revise the number of subquery
pSql->numOfSubs = i; // revise the number of subquery
pSupporter->pState->numOfTotal = i;
pSupporter->pState->code = TSDB_CODE_CLI_OUT_OF_MEMORY;
tscDestroyJoinSupporter(pSupporter);
return NULL;
return 0;
}
tscFreeSqlCmdData(&pNew->cmd);
@ -282,7 +281,6 @@ int32_t tscLaunchSecondSubquery(SSqlObj* pSql) {
pNew->cmd.type |= TSDB_QUERY_TYPE_JOIN_SEC_STAGE;
pNew->cmd.nAggTimeInterval = pSupporter->interval;
pNew->cmd.limit = pSupporter->limit;
pNew->cmd.groupbyExpr = pSupporter->groupbyExpr;
tscColumnBaseInfoCopy(&pNew->cmd.colList, &pSupporter->colList, 0);
@ -302,18 +300,27 @@ int32_t tscLaunchSecondSubquery(SSqlObj* pSql) {
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pNew->cmd, 0);
/*
* When handling the projection query, the offset value will be modified for table-table join, which is changed
* during the timestamp intersection.
*/
pSupporter->limit = pSql->cmd.limit;
pNew->cmd.limit = pSupporter->limit;
// fetch the join tag column
if (UTIL_METER_IS_METRIC(pMeterMetaInfo)) {
SSqlExpr* pExpr = tscSqlExprGet(&pNew->cmd, 0);
assert(pNew->cmd.tagCond.joinInfo.hasJoin);
int16_t tagColIndex = tscGetJoinTagColIndexByUid(&pNew->cmd, pMeterMetaInfo->pMeterMeta->uid);
int16_t tagColIndex = tscGetJoinTagColIndexByUid(&pNew->cmd.tagCond, pMeterMetaInfo->pMeterMeta->uid);
pExpr->param[0].i64Key = tagColIndex;
pExpr->numOfParams = 1;
addRequiredTagColumn(&pNew->cmd, tagColIndex, 0);
}
#ifdef _DEBUG_VIEW
tscPrintSelectClause(&pNew->cmd);
#endif
tscProcessSql(pNew);
}
@ -386,7 +393,7 @@ static void joinRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) {
if (numOfRows > 0) { // write the data into disk
fwrite(pSql->res.data, pSql->res.numOfRows, 1, pSupporter->f);
fflush(pSupporter->f);
fclose(pSupporter->f);
STSBuf* pBuf = tsBufCreateFromFile(pSupporter->path, true);
if (pBuf == NULL) {
@ -401,7 +408,10 @@ static void joinRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) {
tscTrace("%p create tmp file for ts block:%s", pSql, pBuf->path);
pSupporter->pTSBuf = pBuf;
} else {
tsBufMerge(pSupporter->pTSBuf, pBuf, pSql->cmd.vnodeIdx);
assert(pSql->cmd.numOfTables == 1); // for subquery, only one metermetaInfo
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0);
tsBufMerge(pSupporter->pTSBuf, pBuf, pMeterMetaInfo->vnodeIndex);
tsBufDestory(pBuf);
}
@ -412,8 +422,21 @@ static void joinRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) {
taos_fetch_rows_a(tres, joinRetrieveCallback, param);
} else if (numOfRows == 0) { // no data from this vnode anymore
if (atomic_add_fetch_32(&pSupporter->pState->numOfCompleted, 1) >= pSupporter->pState->numOfTotal) {
if (tscProjectionQueryOnMetric(&pParentSql->cmd)) {
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0);
assert(pSql->cmd.numOfTables == 1);
// for projection query, need to try next vnode
if ((++pMeterMetaInfo->vnodeIndex) < pMeterMetaInfo->pMetricMeta->numOfVnodes) {
pSql->cmd.command = TSDB_SQL_SELECT;
pSql->fp = tscJoinQueryCallback;
tscProcessSql(pSql);
return;
}
}
if (atomic_add_fetch_32(&pSupporter->pState->numOfCompleted, 1) >= pSupporter->pState->numOfTotal) {
if (pSupporter->pState->code != TSDB_CODE_SUCCESS) {
tscTrace("%p sub:%p, numOfSub:%d, quit from further procedure due to other queries failure", pParentSql, tres,
pSupporter->subqueryIndex);
@ -451,8 +474,33 @@ static void joinRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) {
tscError("%p retrieve failed, code:%d, index:%d", pSql, numOfRows, pSupporter->subqueryIndex);
}
if (numOfRows >= 0) {
pSql->res.numOfTotal += pSql->res.numOfRows;
}
if (tscProjectionQueryOnMetric(&pSql->cmd) && numOfRows == 0) {
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0);
assert(pSql->cmd.numOfTables == 1);
// for projection query, need to try next vnode if current vnode is exhausted
if ((++pMeterMetaInfo->vnodeIndex) < pMeterMetaInfo->pMetricMeta->numOfVnodes) {
pSupporter->pState->numOfCompleted = 0;
pSupporter->pState->numOfTotal = 1;
pSql->cmd.command = TSDB_SQL_SELECT;
pSql->fp = tscJoinQueryCallback;
tscProcessSql(pSql);
return;
}
}
if (atomic_add_fetch_32(&pSupporter->pState->numOfCompleted, 1) >= pSupporter->pState->numOfTotal) {
tscTrace("%p secondary retrieve completed, global code:%d", tres, pParentSql->res.code);
assert(pSupporter->pState->numOfCompleted == pSupporter->pState->numOfTotal);
tscTrace("%p all %d secondary retrieves are completed, global code:%d", tres, pSupporter->pState->numOfTotal,
pParentSql->res.code);
if (pSupporter->pState->code != TSDB_CODE_SUCCESS) {
pParentSql->res.code = abs(pSupporter->pState->code);
freeSubqueryObj(pParentSql);
@ -466,16 +514,31 @@ static void joinRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) {
void tscFetchDatablockFromSubquery(SSqlObj* pSql) {
int32_t numOfFetch = 0;
for (int32_t i = 0; i < pSql->numOfSubs; ++i) {
SJoinSubquerySupporter* pSupporter = (SJoinSubquerySupporter*)pSql->pSubs[i]->param;
assert(pSql->numOfSubs >= 1);
SSqlRes* pRes = &pSql->pSubs[i]->res;
if (pRes->row >= pRes->numOfRows && pSupporter->hasMore) {
for (int32_t i = 0; i < pSql->numOfSubs; ++i) {
SSqlRes *pRes = &pSql->pSubs[i]->res;
SSqlCmd *pCmd = &pSql->pSubs[i]->cmd;
SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0);
if (tscProjectionQueryOnMetric(pCmd)) {
if (pRes->row >= pRes->numOfRows && pMeterMetaInfo->vnodeIndex < pMeterMetaInfo->pMetricMeta->numOfVnodes &&
(!tscHasReachLimitation(pSql->pSubs[i]))) {
numOfFetch++;
}
} else {
if (pRes->row >= pRes->numOfRows && (!tscHasReachLimitation(pSql->pSubs[i]))) {
numOfFetch++;
}
}
}
if (numOfFetch > 0) {
if (numOfFetch <= 0) {
return ;
}
// TODO multi-vnode retrieve for projection query with limitation has bugs, since the global limiation is not handled
tscTrace("%p retrieve data from %d subqueries", pSql, numOfFetch);
SJoinSubquerySupporter* pSupporter = (SJoinSubquerySupporter*)pSql->pSubs[0]->param;
@ -492,11 +555,15 @@ void tscFetchDatablockFromSubquery(SSqlObj* pSql) {
// wait for all subqueries completed
pSupporter->pState->numOfTotal = numOfFetch;
if (pRes1->row >= pRes1->numOfRows && pSupporter->hasMore) {
tscTrace("%p subquery:%p retrieve data from vnode, index:%d", pSql, pSql1, pSupporter->subqueryIndex);
assert(pRes1->numOfRows >= 0 && pCmd1->numOfTables == 1);
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd1, 0);
if (pRes1->row >= pRes1->numOfRows) {
tscTrace("%p subquery:%p retrieve data from vnode, subquery:%d, vnodeIndex:%d", pSql, pSql1,
pSupporter->subqueryIndex, pMeterMetaInfo->vnodeIndex);
tscResetForNextRetrieve(pRes1);
pSql1->fp = joinRetrieveCallback;
if (pCmd1->command < TSDB_SQL_LOCAL) {
@ -509,7 +576,6 @@ void tscFetchDatablockFromSubquery(SSqlObj* pSql) {
// wait for all subquery completed
tsem_wait(&pSql->rspSem);
}
}
// all subqueries return, set the result output index
@ -519,6 +585,10 @@ void tscSetupOutputColumnIndex(SSqlObj* pSql) {
tscTrace("%p all subquery response, retrieve data", pSql);
if (pRes->pColumnIndex != NULL) {
return; // the column transfer support struct has been built
}
pRes->pColumnIndex = calloc(1, sizeof(SColumnIndex) * pCmd->fieldsInfo.numOfOutputCols);
for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) {
@ -608,6 +678,20 @@ void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code) {
if (atomic_add_fetch_32(&pSupporter->pState->numOfCompleted, 1) >= pSupporter->pState->numOfTotal) {
tscSetupOutputColumnIndex(pParentSql);
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0);
/**
* if the query is a continue query (vnodeIndex > 0 for projection query) for next vnode, do the retrieval of
* data instead of returning to its invoker
*/
if (pMeterMetaInfo->vnodeIndex > 0 && tscProjectionQueryOnMetric(&pSql->cmd)) {
assert(pMeterMetaInfo->vnodeIndex < pMeterMetaInfo->pMetricMeta->numOfVnodes);
pSupporter->pState->numOfCompleted = 0; // reset the record value
pSql->fp = joinRetrieveCallback; // continue retrieve data
pSql->cmd.command = TSDB_SQL_FETCH;
tscProcessSql(pSql);
} else { // first retrieve from vnode during the secondary stage sub-query
if (pParentSql->fp == NULL) {
tsem_wait(&pParentSql->emptyRspSem);
tsem_wait(&pParentSql->emptyRspSem);
@ -626,6 +710,7 @@ void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code) {
}
}
}
}
}
static int32_t getDataStartOffset() {
@ -731,8 +816,9 @@ STSBuf* tsBufCreateFromFile(const char* path, bool autoDelete) {
strncpy(pTSBuf->path, path, PATH_MAX);
pTSBuf->f = fopen(pTSBuf->path, "r");
pTSBuf->f = fopen(pTSBuf->path, "r+");
if (pTSBuf->f == NULL) {
free(pTSBuf);
return NULL;
}
@ -774,7 +860,8 @@ STSBuf* tsBufCreateFromFile(const char* path, bool autoDelete) {
size_t infoSize = sizeof(STSVnodeBlockInfo) * pTSBuf->numOfVnodes;
STSVnodeBlockInfo* buf = (STSVnodeBlockInfo*)calloc(1, infoSize);
int64_t pos = ftell(pTSBuf->f);
//int64_t pos = ftell(pTSBuf->f); //pos not used
fread(buf, infoSize, 1, pTSBuf->f);
// the length value for each vnode is not kept in file, so does not set the length value
@ -790,13 +877,17 @@ STSBuf* tsBufCreateFromFile(const char* path, bool autoDelete) {
struct stat fileStat;
fstat(fileno(pTSBuf->f), &fileStat);
pTSBuf->fileSize = (uint32_t) fileStat.st_size;
pTSBuf->fileSize = (uint32_t)fileStat.st_size;
tsBufResetPos(pTSBuf);
// ascending by default
pTSBuf->cur.order = TSQL_SO_ASC;
pTSBuf->autoDelete = autoDelete;
tscTrace("create tsBuf from file:%s, fd:%d, size:%d, numOfVnode:%d, autoDelete:%d", pTSBuf->path, fileno(pTSBuf->f),
pTSBuf->fileSize, pTSBuf->numOfVnodes, pTSBuf->autoDelete);
return pTSBuf;
}
@ -814,12 +905,22 @@ void tsBufDestory(STSBuf* pTSBuf) {
fclose(pTSBuf->f);
if (pTSBuf->autoDelete) {
tscTrace("tsBuf %p destroyed, delete tmp file:%s", pTSBuf, pTSBuf->path);
unlink(pTSBuf->path);
} else {
tscTrace("tsBuf %p destroyed, tmp file:%s, remains", pTSBuf, pTSBuf->path);
}
free(pTSBuf);
}
static STSVnodeBlockInfoEx* tsBufGetLastVnodeInfo(STSBuf* pTSBuf) {
int32_t last = pTSBuf->numOfVnodes - 1;
assert(last >= 0);
return &pTSBuf->pData[last];
}
static STSVnodeBlockInfoEx* addOneVnodeInfo(STSBuf* pTSBuf, int32_t vnodeId) {
if (pTSBuf->numOfAlloc <= pTSBuf->numOfVnodes) {
uint32_t newSize = (uint32_t)(pTSBuf->numOfAlloc * 1.5);
@ -836,10 +937,10 @@ static STSVnodeBlockInfoEx* addOneVnodeInfo(STSBuf* pTSBuf, int32_t vnodeId) {
}
if (pTSBuf->numOfVnodes > 0) {
STSVnodeBlockInfo* pPrevBlockInfo = &pTSBuf->pData[pTSBuf->numOfVnodes - 1].info;
STSVnodeBlockInfoEx* pPrevBlockInfoEx = tsBufGetLastVnodeInfo(pTSBuf);
// update prev vnode length info in file
TSBufUpdateVnodeInfo(pTSBuf, pTSBuf->numOfVnodes - 1, pPrevBlockInfo);
TSBufUpdateVnodeInfo(pTSBuf, pTSBuf->numOfVnodes - 1, &pPrevBlockInfoEx->info);
}
// set initial value for vnode block
@ -857,9 +958,9 @@ static STSVnodeBlockInfoEx* addOneVnodeInfo(STSBuf* pTSBuf, int32_t vnodeId) {
// update the header info
STSBufFileHeader header = {
.magic = TS_COMP_FILE_MAGIC, .numOfVnode = pTSBuf->numOfVnodes, .tsOrder = pTSBuf->tsOrder};
STSBufUpdateHeader(pTSBuf, &header);
return &pTSBuf->pData[pTSBuf->numOfVnodes - 1];
STSBufUpdateHeader(pTSBuf, &header);
return tsBufGetLastVnodeInfo(pTSBuf);
}
static void shrinkBuffer(STSList* ptsData) {
@ -906,8 +1007,10 @@ static void writeDataToDisk(STSBuf* pTSBuf) {
pTSBuf->tsData.len = 0;
pTSBuf->pData[pTSBuf->numOfVnodes - 1].info.compLen += blockSize;
pTSBuf->pData[pTSBuf->numOfVnodes - 1].info.numOfBlocks += 1;
STSVnodeBlockInfoEx* pVnodeBlockInfoEx = tsBufGetLastVnodeInfo(pTSBuf);
pVnodeBlockInfoEx->info.compLen += blockSize;
pVnodeBlockInfoEx->info.numOfBlocks += 1;
shrinkBuffer(&pTSBuf->tsData);
}
@ -1008,13 +1111,13 @@ void tsBufAppend(STSBuf* pTSBuf, int32_t vnodeId, int64_t tag, const char* pData
STSVnodeBlockInfoEx* pBlockInfo = NULL;
STSList* ptsData = &pTSBuf->tsData;
if (pTSBuf->numOfVnodes == 0 || pTSBuf->pData[pTSBuf->numOfVnodes - 1].info.vnode != vnodeId) {
if (pTSBuf->numOfVnodes == 0 || tsBufGetLastVnodeInfo(pTSBuf)->info.vnode != vnodeId) {
writeDataToDisk(pTSBuf);
shrinkBuffer(ptsData);
pBlockInfo = addOneVnodeInfo(pTSBuf, vnodeId);
} else {
pBlockInfo = &pTSBuf->pData[pTSBuf->numOfVnodes - 1];
pBlockInfo = tsBufGetLastVnodeInfo(pTSBuf);
}
assert(pBlockInfo->info.vnode == vnodeId);
@ -1037,6 +1140,8 @@ void tsBufAppend(STSBuf* pTSBuf, int32_t vnodeId, int64_t tag, const char* pData
pTSBuf->numOfTotal += len / TSDB_KEYSIZE;
// the size of raw data exceeds the size of the default prepared buffer, so
// during getBufBlock, the output buffer needs to be large enough.
if (ptsData->len >= ptsData->threshold) {
writeDataToDisk(pTSBuf);
shrinkBuffer(ptsData);
@ -1053,10 +1158,10 @@ void tsBufFlush(STSBuf* pTSBuf) {
writeDataToDisk(pTSBuf);
shrinkBuffer(&pTSBuf->tsData);
STSVnodeBlockInfo* pBlockInfo = &pTSBuf->pData[pTSBuf->numOfVnodes - 1].info;
STSVnodeBlockInfoEx* pBlockInfoEx = tsBufGetLastVnodeInfo(pTSBuf);
// update prev vnode length info in file
TSBufUpdateVnodeInfo(pTSBuf, pTSBuf->numOfVnodes - 1, pBlockInfo);
TSBufUpdateVnodeInfo(pTSBuf, pTSBuf->numOfVnodes - 1, &pBlockInfoEx->info);
// save the ts order into header
STSBufFileHeader header = {
@ -1157,11 +1262,22 @@ static void tsBufGetBlock(STSBuf* pTSBuf, int32_t vnodeIndex, int32_t blockIndex
}
STSBlock* pBlock = &pTSBuf->block;
size_t s = pBlock->numOfElem * TSDB_KEYSIZE;
/*
* In order to accommodate all the qualified data, the actual buffer size for one block with identical tags value
* may exceed the maximum allowed size during *tsBufAppend* function by invoking expandBuffer function
*/
if (s > pTSBuf->tsData.allocSize) {
expandBuffer(&pTSBuf->tsData, s);
}
pTSBuf->tsData.len =
tsDecompressTimestamp(pBlock->payload, pBlock->compLen, pBlock->numOfElem, pTSBuf->tsData.rawBuf,
pTSBuf->tsData.allocSize, TWO_STAGE_COMP, pTSBuf->assistBuf, pTSBuf->bufSize);
assert(pTSBuf->tsData.len / TSDB_KEYSIZE == pBlock->numOfElem);
assert((pTSBuf->tsData.len / TSDB_KEYSIZE == pBlock->numOfElem) && (pTSBuf->tsData.allocSize >= pTSBuf->tsData.len));
pCur->vnodeIndex = vnodeIndex;
pCur->blockIndex = blockIndex;
@ -1251,6 +1367,10 @@ bool tsBufNextPos(STSBuf* pTSBuf) {
return false;
}
if (pBlockInfo == NULL) {
return false;
}
int32_t blockIndex = pCur->order == TSQL_SO_ASC ? 0 : pBlockInfo->numOfBlocks - 1;
tsBufGetBlock(pTSBuf, pCur->vnodeIndex + step, blockIndex);
break;
@ -1318,7 +1438,7 @@ int32_t tsBufMerge(STSBuf* pDestBuf, const STSBuf* pSrcBuf, int32_t vnodeId) {
tsBufFlush(pDestBuf);
// compared with the last vnode id
if (vnodeId != pDestBuf->pData[pDestBuf->numOfVnodes - 1].info.vnode) {
if (vnodeId != tsBufGetLastVnodeInfo(pDestBuf)->info.vnode) {
int32_t oldSize = pDestBuf->numOfVnodes;
int32_t newSize = oldSize + pSrcBuf->numOfVnodes;
@ -1345,36 +1465,49 @@ int32_t tsBufMerge(STSBuf* pDestBuf, const STSBuf* pSrcBuf, int32_t vnodeId) {
pDestBuf->numOfVnodes = newSize;
} else {
STSVnodeBlockInfoEx* pBlockInfoEx = &pDestBuf->pData[pDestBuf->numOfVnodes - 1];
STSVnodeBlockInfoEx* pBlockInfoEx = tsBufGetLastVnodeInfo(pDestBuf);
pBlockInfoEx->len += pSrcBuf->pData[0].len;
pBlockInfoEx->info.numOfBlocks += pSrcBuf->pData[0].info.numOfBlocks;
pBlockInfoEx->info.compLen += pSrcBuf->pData[0].info.compLen;
pBlockInfoEx->info.vnode = vnodeId;
}
int64_t r = fseek(pDestBuf->f, 0, SEEK_END);
int32_t r = fseek(pDestBuf->f, 0, SEEK_END);
assert(r == 0);
int64_t offset = getDataStartOffset();
int32_t size = pSrcBuf->fileSize - offset;
#ifdef LINUX
ssize_t rc = sendfile(fileno(pDestBuf->f), fileno(pSrcBuf->f), &offset, size);
ssize_t rc = tsendfile(fileno(pDestBuf->f), fileno(pSrcBuf->f), &offset, size);
#else
ssize_t rc = fsendfile(pDestBuf->f, pSrcBuf->f, &offset, size);
#endif
if (rc == -1) {
printf("%s\n", strerror(errno));
tscError("failed to merge tsBuf from:%s to %s, reason:%s\n", pSrcBuf->path, pDestBuf->path, strerror(errno));
return -1;
}
if (rc != size) {
printf("%s\n", strerror(errno));
tscError("failed to merge tsBuf from:%s to %s, reason:%s\n", pSrcBuf->path, pDestBuf->path, strerror(errno));
return -1;
}
pDestBuf->numOfTotal += pSrcBuf->numOfTotal;
int32_t oldSize = pDestBuf->fileSize;
struct stat fileStat;
fstat(fileno(pDestBuf->f), &fileStat);
pDestBuf->fileSize = (uint32_t)fileStat.st_size;
assert(pDestBuf->fileSize == oldSize + size);
tscTrace("tsBuf merge success, %p, path:%s, fd:%d, file size:%d, numOfVnode:%d, autoDelete:%d", pDestBuf,
pDestBuf->path, fileno(pDestBuf->f), pDestBuf->fileSize, pDestBuf->numOfVnodes, pDestBuf->autoDelete);
return 0;
}
@ -1391,7 +1524,7 @@ STSBuf* tsBufCreateFromCompBlocks(const char* pData, int32_t numOfBlocks, int32_
TSBufUpdateVnodeInfo(pTSBuf, pTSBuf->numOfVnodes - 1, pBlockInfo);
fseek(pTSBuf->f, pBlockInfo->offset, SEEK_SET);
fwrite((void*) pData, 1, len, pTSBuf->f);
fwrite((void*)pData, 1, len, pTSBuf->f);
pTSBuf->fileSize += len;
pTSBuf->tsOrder = order;
@ -1485,7 +1618,7 @@ void tsBufDisplay(STSBuf* pTSBuf) {
while (tsBufNextPos(pTSBuf)) {
STSElem elem = tsBufGetElem(pTSBuf);
printf("%d-%lld-%lld\n", elem.vnode, elem.tag, elem.ts);
printf("%d-%" PRId64 "-%" PRId64 "\n", elem.vnode, *(int64_t*) elem.tag, elem.ts);
}
pTSBuf->cur.order = old;

View File

@ -39,32 +39,24 @@ static int32_t getToStringLength(const char *pData, int32_t length, int32_t type
case TSDB_DATA_TYPE_NCHAR:
return length;
case TSDB_DATA_TYPE_DOUBLE: {
#ifdef _TD_ARM_32_
double dv = 0;
*(int64_t *)(&dv) = *(int64_t *)pData;
len = sprintf(buf, "%f", dv);
#else
len = sprintf(buf, "%lf", *(double *)pData);
#endif
dv = GET_DOUBLE_VAL(pData);
len = sprintf(buf, "%lf", dv);
if (strncasecmp("nan", buf, 3) == 0) {
len = 4;
}
} break;
case TSDB_DATA_TYPE_FLOAT: {
#ifdef _TD_ARM_32_
float fv = 0;
*(int32_t *)(&fv) = *(int32_t *)pData;
fv = GET_FLOAT_VAL(pData);
len = sprintf(buf, "%f", fv);
#else
len = sprintf(buf, "%f", *(float *)pData);
#endif
if (strncasecmp("nan", buf, 3) == 0) {
len = 4;
}
} break;
case TSDB_DATA_TYPE_TIMESTAMP:
case TSDB_DATA_TYPE_BIGINT:
len = sprintf(buf, "%lld", *(int64_t *)pData);
len = sprintf(buf, "%" PRId64 "", *(int64_t *)pData);
break;
case TSDB_DATA_TYPE_BOOL:
len = MAX_BOOL_TYPE_LENGTH;
@ -201,22 +193,14 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) {
taosUcs4ToMbs(pTagValue, pSchema[i].bytes, target);
break;
case TSDB_DATA_TYPE_FLOAT: {
#ifdef _TD_ARM_32_
float fv = 0;
*(int32_t *)(&fv) = *(int32_t *)pTagValue;
fv = GET_FLOAT_VAL(pTagValue);
sprintf(target, "%f", fv);
#else
sprintf(target, "%f", *(float *)pTagValue);
#endif
} break;
case TSDB_DATA_TYPE_DOUBLE: {
#ifdef _TD_ARM_32_
double dv = 0;
*(int64_t *)(&dv) = *(int64_t *)pTagValue;
dv = GET_DOUBLE_VAL(pTagValue);
sprintf(target, "%lf", dv);
#else
sprintf(target, "%lf", *(double *)pTagValue);
#endif
} break;
case TSDB_DATA_TYPE_TINYINT:
sprintf(target, "%d", *(int8_t *)pTagValue);
@ -228,7 +212,7 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) {
sprintf(target, "%d", *(int32_t *)pTagValue);
break;
case TSDB_DATA_TYPE_BIGINT:
sprintf(target, "%lld", *(int64_t *)pTagValue);
sprintf(target, "%" PRId64 "", *(int64_t *)pTagValue);
break;
case TSDB_DATA_TYPE_BOOL: {
char *val = (*((int8_t *)pTagValue) == 0) ? "false" : "true";

View File

@ -18,9 +18,6 @@
#define _XOPEN_SOURCE
#pragma GCC diagnostic ignored "-Woverflow"
#pragma GCC diagnostic ignored "-Wunused-variable"
#include "os.h"
#include "ihash.h"
#include "tscSecondaryMerge.h"
@ -39,7 +36,7 @@ enum {
TSDB_USE_CLI_TS = 1,
};
static int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize);
static int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize, int32_t * numOfRows);
static int32_t tscToInteger(SSQLToken *pToken, int64_t *value, char **endPtr) {
int32_t numType = isValidNumber(pToken);
@ -74,8 +71,8 @@ static int32_t tscToDouble(SSQLToken *pToken, double *value, char **endPtr) {
}
int tsParseTime(SSQLToken *pToken, int64_t *time, char **next, char *error, int16_t timePrec) {
char * token;
int tokenlen;
//char * token; //fang not used
//int tokenlen; //fang not used
int32_t index = 0;
SSQLToken sToken;
int64_t interval;
@ -315,6 +312,10 @@ int32_t tsParseOneColumnData(SSchema *pSchema, SSQLToken *pToken, char *payload,
}
strncpy(payload, pToken->z, pToken->n);
if (pToken->n < pSchema->bytes) {
payload[pToken->n] = 0; // add the null-terminated char if the length of the string is shorter than the available space
}
}
break;
@ -392,9 +393,9 @@ static int32_t tsCheckTimestamp(STableDataBlocks *pDataBlocks, const char *start
}
int tsParseOneRowData(char **str, STableDataBlocks *pDataBlocks, SSchema schema[], SParsedDataColInfo *spd, char *error,
int16_t timePrec) {
int16_t timePrec, int32_t *code, char* tmpTokenBuf) {
int32_t index = 0;
bool isPrevOptr;
//bool isPrevOptr; //fang, never used
SSQLToken sToken = {0};
char * payload = pDataBlocks->pData + pDataBlocks->size;
@ -418,6 +419,7 @@ int tsParseOneRowData(char **str, STableDataBlocks *pDataBlocks, SSchema schema[
}
strcpy(error, "client out of memory");
*code = TSDB_CODE_CLI_OUT_OF_MEMORY;
return -1;
}
@ -425,23 +427,45 @@ int tsParseOneRowData(char **str, STableDataBlocks *pDataBlocks, SSchema schema[
(sToken.type != TK_FLOAT) && (sToken.type != TK_BOOL) && (sToken.type != TK_NULL)) ||
(sToken.n == 0) || (sToken.type == TK_RP)) {
tscInvalidSQLErrMsg(error, "invalid data or symbol", sToken.z);
*code = TSDB_CODE_INVALID_SQL;
return -1;
}
// Remove quotation marks
if (TK_STRING == sToken.type) {
sToken.z++;
sToken.n -= 2;
// delete escape character: \\, \', \"
char delim = sToken.z[0];
int32_t cnt = 0;
int32_t j = 0;
for (int32_t k = 1; k < sToken.n - 1; ++k) {
if (sToken.z[k] == delim || sToken.z[k] == '\\') {
if (sToken.z[k + 1] == delim) {
cnt++;
tmpTokenBuf[j] = sToken.z[k + 1];
j++;
k++;
continue;
}
}
tmpTokenBuf[j] = sToken.z[k];
j++;
}
tmpTokenBuf[j] = 0;
sToken.z = tmpTokenBuf;
sToken.n -= 2 + cnt;
}
bool isPrimaryKey = (colIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX);
int32_t ret = tsParseOneColumnData(pSchema, &sToken, start, error, str, isPrimaryKey, timePrec);
if (ret != TSDB_CODE_SUCCESS) {
*code = TSDB_CODE_INVALID_SQL;
return -1; // NOTE: here 0 mean error!
}
if (isPrimaryKey && tsCheckTimestamp(pDataBlocks, start) != TSDB_CODE_SUCCESS) {
tscInvalidSQLErrMsg(error, "client time/server time can not be mixed up", sToken.z);
*code = TSDB_CODE_INVALID_TIME_STAMP;
return -1;
}
}
@ -476,7 +500,7 @@ static int32_t rowDataCompar(const void *lhs, const void *rhs) {
}
int tsParseValues(char **str, STableDataBlocks *pDataBlock, SMeterMeta *pMeterMeta, int maxRows,
SParsedDataColInfo *spd, char *error) {
SParsedDataColInfo *spd, char *error, int32_t *code, char* tmpTokenBuf) {
int32_t index = 0;
SSQLToken sToken;
@ -487,6 +511,7 @@ int tsParseValues(char **str, STableDataBlocks *pDataBlock, SMeterMeta *pMeterMe
if (spd->hasVal[0] == false) {
strcpy(error, "primary timestamp column can not be null");
*code = TSDB_CODE_INVALID_SQL;
return -1;
}
@ -497,15 +522,18 @@ int tsParseValues(char **str, STableDataBlocks *pDataBlock, SMeterMeta *pMeterMe
*str += index;
if (numOfRows >= maxRows || pDataBlock->size + pMeterMeta->rowSize >= pDataBlock->nAllocSize) {
int32_t tSize = tscAllocateMemIfNeed(pDataBlock, pMeterMeta->rowSize);
if (0 == tSize) {
int32_t tSize;
int32_t retcode = tscAllocateMemIfNeed(pDataBlock, pMeterMeta->rowSize, &tSize);
if (retcode != TSDB_CODE_SUCCESS) { //TODO pass the correct error code to client
strcpy(error, "client out of memory");
*code = retcode;
return -1;
}
maxRows += tSize;
ASSERT(tSize > maxRows);
maxRows = tSize;
}
int32_t len = tsParseOneRowData(str, pDataBlock, pSchema, spd, error, precision);
int32_t len = tsParseOneRowData(str, pDataBlock, pSchema, spd, error, precision, code, tmpTokenBuf);
if (len <= 0) { // error message has been set in tsParseOneRowData
return -1;
}
@ -517,6 +545,7 @@ int tsParseValues(char **str, STableDataBlocks *pDataBlock, SMeterMeta *pMeterMe
*str += index;
if (sToken.n == 0 || sToken.type != TK_RP) {
tscInvalidSQLErrMsg(error, ") expected", *str);
*code = TSDB_CODE_INVALID_SQL;
return -1;
}
@ -525,6 +554,7 @@ int tsParseValues(char **str, STableDataBlocks *pDataBlock, SMeterMeta *pMeterMe
if (numOfRows <= 0) {
strcpy(error, "no any data points");
*code = TSDB_CODE_INVALID_SQL;
return -1;
} else {
return numOfRows;
@ -545,10 +575,11 @@ static void tscSetAssignedColumnInfo(SParsedDataColInfo *spd, SSchema *pSchema,
}
}
int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize) {
int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize, int32_t * numOfRows) {
size_t remain = pDataBlock->nAllocSize - pDataBlock->size;
const int factor = 5;
uint32_t nAllocSizeOld = pDataBlock->nAllocSize;
assert(pDataBlock->headerSize >= 0);
// expand the allocated size
if (remain < rowSize * factor) {
@ -565,11 +596,13 @@ int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize) {
//assert(false);
// do nothing
pDataBlock->nAllocSize = nAllocSizeOld;
return 0;
*numOfRows = (int32_t)(pDataBlock->nAllocSize - pDataBlock->headerSize) / rowSize;
return TSDB_CODE_CLI_OUT_OF_MEMORY;
}
}
return (int32_t)(pDataBlock->nAllocSize - pDataBlock->size) / rowSize;
*numOfRows = (int32_t)(pDataBlock->nAllocSize - pDataBlock->headerSize) / rowSize;
return TSDB_CODE_SUCCESS;
}
static void tsSetBlockInfo(SShellSubmitBlock *pBlocks, const SMeterMeta *pMeterMeta, int32_t numOfRows) {
@ -628,18 +661,29 @@ static int32_t doParseInsertStatement(SSqlObj *pSql, void *pTableHashList, char
SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0);
SMeterMeta * pMeterMeta = pMeterMetaInfo->pMeterMeta;
STableDataBlocks *dataBuf =
tscGetDataBlockFromList(pTableHashList, pCmd->pDataBlocks, pMeterMeta->uid, TSDB_DEFAULT_PAYLOAD_SIZE,
sizeof(SShellSubmitBlock), pMeterMeta->rowSize, pMeterMetaInfo->name);
STableDataBlocks *dataBuf = NULL;
int32_t ret = tscGetDataBlockFromList(pTableHashList, pCmd->pDataBlocks, pMeterMeta->uid, TSDB_DEFAULT_PAYLOAD_SIZE,
sizeof(SShellSubmitBlock), pMeterMeta->rowSize, pMeterMetaInfo->name, &dataBuf);
if (ret != TSDB_CODE_SUCCESS) {
return ret;
}
int32_t maxNumOfRows = tscAllocateMemIfNeed(dataBuf, pMeterMeta->rowSize);
if (0 == maxNumOfRows) {
int32_t maxNumOfRows;
ret = tscAllocateMemIfNeed(dataBuf, pMeterMeta->rowSize, &maxNumOfRows);
if (TSDB_CODE_SUCCESS != ret) {
return TSDB_CODE_CLI_OUT_OF_MEMORY;
}
int32_t numOfRows = tsParseValues(str, dataBuf, pMeterMeta, maxNumOfRows, spd, pCmd->payload);
int32_t code = TSDB_CODE_INVALID_SQL;
char* tmpTokenBuf = calloc(1, 4096); // used for deleting Escape character: \\, \', \"
if (NULL == tmpTokenBuf) {
return TSDB_CODE_CLI_OUT_OF_MEMORY;
}
int32_t numOfRows = tsParseValues(str, dataBuf, pMeterMeta, maxNumOfRows, spd, pCmd->payload, &code, tmpTokenBuf);
free(tmpTokenBuf);
if (numOfRows <= 0) {
return TSDB_CODE_INVALID_SQL;
return code;
}
for (uint32_t i = 0; i < dataBuf->numOfParams; ++i) {
@ -730,20 +774,84 @@ static int32_t tscParseSqlForCreateTableOnDemand(char **sqlstr, SSqlObj *pSql) {
return tscInvalidSQLErrMsg(pCmd->payload, "create table only from super table is allowed", sToken.z);
}
char * tagVal = pTag->data;
SSchema *pTagSchema = tsGetTagSchema(pMeterMetaInfo->pMeterMeta);
index = 0;
sToken = tStrGetToken(sql, &index, false, 0, NULL);
sql += index;
if (sToken.type != TK_TAGS) {
return tscInvalidSQLErrMsg(pCmd->payload, "keyword TAGS expected", sql);
SParsedDataColInfo spd = {0};
uint8_t numOfTags = pMeterMetaInfo->pMeterMeta->numOfTags;
spd.numOfCols = numOfTags;
// if specify some tags column
if (sToken.type != TK_LP) {
tscSetAssignedColumnInfo(&spd, pTagSchema, numOfTags);
} else {
/* insert into tablename (col1, col2,..., coln) using superTableName (tagName1, tagName2, ..., tagNamen) tags(tagVal1, tagVal2, ..., tagValn) values(v1, v2,... vn); */
int16_t offset[TSDB_MAX_COLUMNS] = {0};
for (int32_t t = 1; t < numOfTags; ++t) {
offset[t] = offset[t - 1] + pTagSchema[t - 1].bytes;
}
while (1) {
index = 0;
sToken = tStrGetToken(sql, &index, false, 0, NULL);
sql += index;
if (TK_STRING == sToken.type) {
sToken.n = strdequote(sToken.z);
strtrim(sToken.z);
sToken.n = (uint32_t)strlen(sToken.z);
}
if (sToken.type == TK_RP) {
break;
}
bool findColumnIndex = false;
// todo speedup by using hash list
for (int32_t t = 0; t < numOfTags; ++t) {
if (strncmp(sToken.z, pTagSchema[t].name, sToken.n) == 0 && strlen(pTagSchema[t].name) == sToken.n) {
SParsedColElem *pElem = &spd.elems[spd.numOfAssignedCols++];
pElem->offset = offset[t];
pElem->colIndex = t;
if (spd.hasVal[t] == true) {
return tscInvalidSQLErrMsg(pCmd->payload, "duplicated tag name", sToken.z);
}
spd.hasVal[t] = true;
findColumnIndex = true;
break;
}
}
if (!findColumnIndex) {
return tscInvalidSQLErrMsg(pCmd->payload, "invalid tag name", sToken.z);
}
}
if (spd.numOfAssignedCols == 0 || spd.numOfAssignedCols > numOfTags) {
return tscInvalidSQLErrMsg(pCmd->payload, "tag name expected", sToken.z);
}
index = 0;
sToken = tStrGetToken(sql, &index, false, 0, NULL);
sql += index;
}
if (sToken.type != TK_TAGS) {
return tscInvalidSQLErrMsg(pCmd->payload, "keyword TAGS expected", sToken.z);
}
int32_t numOfTagValues = 0;
uint32_t ignoreTokenTypes = TK_LP;
uint32_t numOfIgnoreToken = 1;
while (1) {
for (int i = 0; i < spd.numOfAssignedCols; ++i) {
char* tagVal = pTag->data + spd.elems[i].offset;
int16_t colIndex = spd.elems[i].colIndex;
index = 0;
sToken = tStrGetToken(sql, &index, true, numOfIgnoreToken, &ignoreTokenTypes);
sql += index;
@ -759,26 +867,39 @@ static int32_t tscParseSqlForCreateTableOnDemand(char **sqlstr, SSqlObj *pSql) {
sToken.n -= 2;
}
code = tsParseOneColumnData(&pTagSchema[numOfTagValues], &sToken, tagVal, pCmd->payload, &sql, false,
pMeterMetaInfo->pMeterMeta->precision);
code = tsParseOneColumnData(&pTagSchema[colIndex], &sToken, tagVal, pCmd->payload, &sql, false, pMeterMetaInfo->pMeterMeta->precision);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
if ((pTagSchema[numOfTagValues].type == TSDB_DATA_TYPE_BINARY ||
pTagSchema[numOfTagValues].type == TSDB_DATA_TYPE_NCHAR) && sToken.n > pTagSchema[numOfTagValues].bytes) {
if ((pTagSchema[colIndex].type == TSDB_DATA_TYPE_BINARY ||
pTagSchema[colIndex].type == TSDB_DATA_TYPE_NCHAR) && sToken.n > pTagSchema[colIndex].bytes) {
return tscInvalidSQLErrMsg(pCmd->payload, "string too long", sToken.z);
}
tagVal += pTagSchema[numOfTagValues++].bytes;
}
if (numOfTagValues != pMeterMetaInfo->pMeterMeta->numOfTags) {
return tscInvalidSQLErrMsg(pCmd->payload, "number of tags mismatch", sql);
index = 0;
sToken = tStrGetToken(sql, &index, false, 0, NULL);
sql += index;
if (sToken.n == 0 || sToken.type != TK_RP) {
return tscInvalidSQLErrMsg(pCmd->payload, ") expected", sToken.z);
}
// 2. set the null value for the columns that do not assign values
if (spd.numOfAssignedCols < spd.numOfCols) {
char *ptr = pTag->data;
for (int32_t i = 0; i < spd.numOfCols; ++i) {
if (!spd.hasVal[i]) { // current tag column do not have any value to insert, set it to null
setNull(ptr, pTagSchema[i].type, pTagSchema[i].bytes);
}
ptr += pTagSchema[i].bytes;
}
}
if (tscValidateName(&tableToken) != TSDB_CODE_SUCCESS) {
return tscInvalidSQLErrMsg(pCmd->payload, "invalid table name", sql);
return tscInvalidSQLErrMsg(pCmd->payload, "invalid table name", *sqlstr);
}
int32_t ret = setMeterID(pSql, &tableToken, 0);
@ -788,6 +909,7 @@ static int32_t tscParseSqlForCreateTableOnDemand(char **sqlstr, SSqlObj *pSql) {
createTable = true;
code = tscGetMeterMetaEx(pSql, pMeterMetaInfo->name, true);
if (TSDB_CODE_ACTION_IN_PROGRESS == code) return code;
} else {
if (cstart != NULL) {
sql = cstart;
@ -840,6 +962,9 @@ int doParserInsertSql(SSqlObj *pSql, char *str) {
return code;
}
ASSERT(((NULL == pSql->asyncTblPos) && (NULL == pSql->pTableHashList))
|| ((NULL != pSql->asyncTblPos) && (NULL != pSql->pTableHashList)));
if ((NULL == pSql->asyncTblPos) && (NULL == pSql->pTableHashList)) {
pSql->pTableHashList = taosInitIntHash(128, POINTER_BYTES, taosHashInt);
pSql->cmd.pDataBlocks = tscCreateBlockArrayList();
@ -848,10 +973,11 @@ int doParserInsertSql(SSqlObj *pSql, char *str) {
goto _error_clean;
}
} else {
ASSERT((NULL != pSql->asyncTblPos) && (NULL != pSql->pTableHashList));
str = pSql->asyncTblPos;
}
tscTrace("%p create data block list for submit data, %p", pSql, pSql->cmd.pDataBlocks);
tscTrace("%p create data block list for submit data:%p, asyncTblPos:%p, pTableHashList:%p", pSql, pSql->cmd.pDataBlocks, pSql->asyncTblPos, pSql->pTableHashList);
while (1) {
int32_t index = 0;
@ -883,10 +1009,17 @@ int doParserInsertSql(SSqlObj *pSql, char *str) {
}
void *fp = pSql->fp;
ptrdiff_t pos = pSql->asyncTblPos - pSql->sqlstr;
if ((code = tscParseSqlForCreateTableOnDemand(&str, pSql)) != TSDB_CODE_SUCCESS) {
if (fp != NULL) {
//goto _clean;
if (TSDB_CODE_ACTION_IN_PROGRESS == code) {
tscTrace("async insert and waiting to get meter meta, then continue parse sql from offset: %" PRId64, pos);
return code;
}
tscError("async insert parse error, code:%d, %s", code, tsError[code]);
pSql->asyncTblPos = NULL;
goto _error_clean; // TODO: should _clean or _error_clean to async flow ????
} else {
/*
* for async insert, the free data block operations, which is tscDestroyBlockArrayList,
@ -933,7 +1066,6 @@ int doParserInsertSql(SSqlObj *pSql, char *str) {
if (code != TSDB_CODE_SUCCESS) {
goto _error_clean;
}
} else if (sToken.type == TK_FILE) {
if (pCmd->isInsertFromFile == -1) {
pCmd->isInsertFromFile = 1;
@ -964,8 +1096,12 @@ int doParserInsertSql(SSqlObj *pSql, char *str) {
strcpy(fname, full_path.we_wordv[0]);
wordfree(&full_path);
STableDataBlocks *pDataBlock = tscCreateDataBlockEx(PATH_MAX, pMeterMetaInfo->pMeterMeta->rowSize,
sizeof(SShellSubmitBlock), pMeterMetaInfo->name);
STableDataBlocks *pDataBlock = NULL;
int32_t ret = tscCreateDataBlock(PATH_MAX, pMeterMetaInfo->pMeterMeta->rowSize, sizeof(SShellSubmitBlock),
pMeterMetaInfo->name, &pDataBlock);
if (ret != TSDB_CODE_SUCCESS) {
goto _error_clean;
}
tscAppendDataBlock(pCmd->pDataBlocks, pDataBlock);
strcpy(pDataBlock->filename, fname);
@ -1071,8 +1207,10 @@ int doParserInsertSql(SSqlObj *pSql, char *str) {
goto _error_clean;
}
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0);
// set the next sent data vnode index in data block arraylist
pCmd->vnodeIdx = 1;
pMeterMetaInfo->vnodeIndex = 1;
} else {
pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks);
}
@ -1087,6 +1225,7 @@ _clean:
taosCleanUpIntHash(pSql->pTableHashList);
pSql->pTableHashList = NULL;
pSql->asyncTblPos = NULL;
pCmd->isParseFinish = 1;
return code;
}
@ -1123,10 +1262,12 @@ int tsParseSql(SSqlObj *pSql, char *acct, char *db, bool multiVnodeInsertion) {
tscRemoveAllMeterMetaInfo(&pSql->cmd, false);
if (NULL == pSql->asyncTblPos) {
tscTrace("continue parse sql: %s", pSql->asyncTblPos);
tscCleanSqlCmd(&pSql->cmd);
} else {
tscTrace("continue parse sql: %s", pSql->asyncTblPos);
}
if (tscIsInsertOrImportData(pSql->sqlstr)) {
/*
* only for async multi-vnode insertion
@ -1190,12 +1331,12 @@ static int doPackSendDataBlock(SSqlObj *pSql, int32_t numOfRows, STableDataBlock
return TSDB_CODE_SUCCESS;
}
static int tscInsertDataFromFile(SSqlObj *pSql, FILE *fp) {
static int tscInsertDataFromFile(SSqlObj *pSql, FILE *fp, char *tmpTokenBuf) {
size_t readLen = 0;
char * line = NULL;
size_t n = 0;
int len = 0;
uint32_t maxRows = 0;
int32_t maxRows = 0;
SSqlCmd * pCmd = &pSql->cmd;
int numOfRows = 0;
int32_t code = 0;
@ -1205,13 +1346,17 @@ static int tscInsertDataFromFile(SSqlObj *pSql, FILE *fp) {
int32_t rowSize = pMeterMeta->rowSize;
pCmd->pDataBlocks = tscCreateBlockArrayList();
STableDataBlocks *pTableDataBlock =
tscCreateDataBlockEx(TSDB_PAYLOAD_SIZE, pMeterMeta->rowSize, sizeof(SShellSubmitBlock), pMeterMetaInfo->name);
STableDataBlocks *pTableDataBlock = NULL;
int32_t ret = tscCreateDataBlock(TSDB_PAYLOAD_SIZE, pMeterMeta->rowSize, sizeof(SShellSubmitBlock),
pMeterMetaInfo->name, &pTableDataBlock);
if (ret != TSDB_CODE_SUCCESS) {
return -1;
}
tscAppendDataBlock(pCmd->pDataBlocks, pTableDataBlock);
maxRows = tscAllocateMemIfNeed(pTableDataBlock, rowSize);
if (maxRows < 1) return -1;
code = tscAllocateMemIfNeed(pTableDataBlock, rowSize, &maxRows);
if (TSDB_CODE_SUCCESS != code) return -1;
int count = 0;
SParsedDataColInfo spd = {.numOfCols = pMeterMetaInfo->pMeterMeta->numOfColumns};
@ -1222,21 +1367,15 @@ static int tscInsertDataFromFile(SSqlObj *pSql, FILE *fp) {
while ((readLen = getline(&line, &n, fp)) != -1) {
// line[--readLen] = '\0';
if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) line[--readLen] = 0;
if (readLen <= 0) continue;
if (readLen == 0) continue; //fang, <= to ==
char *lineptr = line;
strtolower(line, line);
if (numOfRows >= maxRows || pTableDataBlock->size + pMeterMeta->rowSize >= pTableDataBlock->nAllocSize) {
uint32_t tSize = tscAllocateMemIfNeed(pTableDataBlock, pMeterMeta->rowSize);
if (0 == tSize) return (-TSDB_CODE_CLI_OUT_OF_MEMORY);
maxRows += tSize;
}
len = tsParseOneRowData(&lineptr, pTableDataBlock, pSchema, &spd, pCmd->payload, pMeterMeta->precision);
len = tsParseOneRowData(&lineptr, pTableDataBlock, pSchema, &spd, pCmd->payload, pMeterMeta->precision, &code, tmpTokenBuf);
if (len <= 0 || pTableDataBlock->numOfParams > 0) {
pSql->res.code = TSDB_CODE_INVALID_SQL;
return -1;
pSql->res.code = code;
return (-code);
}
pTableDataBlock->size += len;
@ -1296,19 +1435,19 @@ void tscProcessMultiVnodesInsert(SSqlObj *pSql) {
int32_t code = TSDB_CODE_SUCCESS;
/* the first block has been sent to server in processSQL function */
assert(pCmd->isInsertFromFile != -1 && pCmd->vnodeIdx >= 1 && pCmd->pDataBlocks != NULL);
assert(pCmd->isInsertFromFile != -1 && pMeterMetaInfo->vnodeIndex >= 1 && pCmd->pDataBlocks != NULL);
if (pCmd->vnodeIdx < pCmd->pDataBlocks->nSize) {
if (pMeterMetaInfo->vnodeIndex < pCmd->pDataBlocks->nSize) {
SDataBlockList *pDataBlocks = pCmd->pDataBlocks;
for (int32_t i = pCmd->vnodeIdx; i < pDataBlocks->nSize; ++i) {
for (int32_t i = pMeterMetaInfo->vnodeIndex; i < pDataBlocks->nSize; ++i) {
pDataBlock = pDataBlocks->pData[i];
if (pDataBlock == NULL) {
continue;
}
if ((code = tscCopyDataBlockToPayload(pSql, pDataBlock)) != TSDB_CODE_SUCCESS) {
tscTrace("%p build submit data block failed, vnodeIdx:%d, total:%d", pSql, pCmd->vnodeIdx, pDataBlocks->nSize);
tscTrace("%p build submit data block failed, vnodeIdx:%d, total:%d", pSql, pMeterMetaInfo->vnodeIndex, pDataBlocks->nSize);
continue;
}
@ -1366,7 +1505,15 @@ void tscProcessMultiVnodesInsertForFile(SSqlObj *pSql) {
continue;
}
int nrows = tscInsertDataFromFile(pSql, fp);
char* tmpTokenBuf = calloc(1, 4096); // used for deleting Escape character: \\, \', \"
if (NULL == tmpTokenBuf) {
tscError("%p calloc failed", pSql);
continue;
}
int nrows = tscInsertDataFromFile(pSql, fp, tmpTokenBuf);
free(tmpTokenBuf);
pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks);
if (nrows < 0) {

View File

@ -75,7 +75,6 @@ static int normalStmtAddPart(SNormalStmt* stmt, bool isParam, char* str, uint32_
if (isParam) {
++stmt->numParams;
}
return TSDB_CODE_SUCCESS;
}
@ -122,11 +121,11 @@ static int normalStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) {
break;
case TSDB_DATA_TYPE_FLOAT:
var->dKey = *(float*)tb->buffer;
var->dKey = GET_FLOAT_VAL(tb->buffer);
break;
case TSDB_DATA_TYPE_DOUBLE:
var->dKey = *(double*)tb->buffer;
var->dKey = GET_DOUBLE_VAL(tb->buffer);
break;
case TSDB_DATA_TYPE_BINARY:
@ -409,7 +408,9 @@ static int insertStmtReset(STscStmt* pStmt) {
}
}
pCmd->batchSize = 0;
pCmd->vnodeIdx = 0;
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0);
pMeterMetaInfo->vnodeIndex = 0;
return TSDB_CODE_SUCCESS;
}
@ -422,6 +423,8 @@ static int insertStmtExecute(STscStmt* stmt) {
++pCmd->batchSize;
}
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0);
if (pCmd->pDataBlocks->nSize > 0) {
// merge according to vgid
int code = tscMergeTableDataBlocks(stmt->pSql, pCmd->pDataBlocks);
@ -436,7 +439,7 @@ static int insertStmtExecute(STscStmt* stmt) {
}
// set the next sent data vnode index in data block arraylist
pCmd->vnodeIdx = 1;
pMeterMetaInfo->vnodeIndex = 1;
} else {
pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks);
}

View File

@ -93,10 +93,10 @@ void tscSaveSlowQuery(SSqlObj *pSql) {
const static int64_t SLOW_QUERY_INTERVAL = 3000000L;
if (pSql->res.useconds < SLOW_QUERY_INTERVAL) return;
tscTrace("%p query time:%lld sql:%s", pSql, pSql->res.useconds, pSql->sqlstr);
tscTrace("%p query time:%" PRId64 " sql:%s", pSql, pSql->res.useconds, pSql->sqlstr);
char *sql = malloc(200);
int len = snprintf(sql, 200, "insert into %s.slowquery values(now, '%s', %lld, %lld, '", tsMonitorDbName,
int len = snprintf(sql, 200, "insert into %s.slowquery values(now, '%s', %" PRId64 ", %" PRId64 ", '", tsMonitorDbName,
pSql->pTscObj->user, pSql->stime, pSql->res.useconds);
int sqlLen = snprintf(sql + len, TSDB_SHOW_SQL_LEN, "%s", pSql->sqlstr);
if (sqlLen > TSDB_SHOW_SQL_LEN - 1) {
@ -198,7 +198,9 @@ void tscKillStream(STscObj *pObj, uint32_t killId) {
pthread_mutex_unlock(&pObj->mutex);
if (pStream) {
tscTrace("%p stream:%p is killed, streamId:%d", pStream->pSql, pStream, killId);
}
taos_close_stream(pStream);
if (pStream->callback) {

File diff suppressed because it is too large Load Diff

View File

@ -83,6 +83,13 @@ struct SSchema* tsGetColumnSchema(SMeterMeta* pMeta, int32_t startCol) {
return (SSchema*)(((char*)pMeta + sizeof(SMeterMeta)) + startCol * sizeof(SSchema));
}
struct SSchema tsGetTbnameColumnSchema() {
struct SSchema s = {.colId = TSDB_TBNAME_COLUMN_INDEX, .type = TSDB_DATA_TYPE_BINARY, .bytes = TSDB_METER_NAME_LEN};
strcpy(s.name, TSQL_TBNAME_L);
return s;
}
/**
* the MeterMeta data format in memory is as follows:
*
@ -123,36 +130,40 @@ bool tsMeterMetaIdentical(SMeterMeta* p1, SMeterMeta* p2) {
return memcmp(p1, p2, size) == 0;
}
//todo refactor
static FORCE_INLINE char* skipSegments(char* input, char delimiter, int32_t num) {
// todo refactor
static FORCE_INLINE char* skipSegments(char* input, char delim, int32_t num) {
for (int32_t i = 0; i < num; ++i) {
while (*input != 0 && *input++ != delimiter) {
while (*input != 0 && *input++ != delim) {
};
}
return input;
}
static FORCE_INLINE void copySegment(char* dst, char* src, char delimiter) {
static FORCE_INLINE size_t copy(char* dst, const char* src, char delimiter) {
size_t len = 0;
while (*src != delimiter && *src != 0) {
*dst++ = *src++;
len++;
}
return len;
}
/**
* extract meter name from meterid, which the format of userid.dbname.metername
* extract table name from meterid, which the format of userid.dbname.metername
* @param meterId
* @return
*/
void extractMeterName(char* meterId, char* name) {
void extractTableName(char* meterId, char* name) {
char* r = skipSegments(meterId, TS_PATH_DELIMITER[0], 2);
copySegment(name, r, TS_PATH_DELIMITER[0]);
copy(name, r, TS_PATH_DELIMITER[0]);
}
SSQLToken extractDBName(char* meterId, char* name) {
char* r = skipSegments(meterId, TS_PATH_DELIMITER[0], 1);
copySegment(name, r, TS_PATH_DELIMITER[0]);
size_t len = copy(name, r, TS_PATH_DELIMITER[0]);
SSQLToken token = {.z = name, .n = strlen(name), .type = TK_STRING};
SSQLToken token = {.z = name, .n = len, .type = TK_STRING};
return token;
}

View File

@ -440,13 +440,16 @@ void tscDestroyLocalReducer(SSqlObj *pSql) {
tscTrace("%p waiting for delete procedure, status: %d", pSql, status);
}
tfree(pLocalReducer->interpolationInfo.prevValues);
tfree(pLocalReducer->interpolationInfo.pTags);
taosDestoryInterpoInfo(&pLocalReducer->interpolationInfo);
if (pLocalReducer->pCtx != NULL) {
for(int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) {
SQLFunctionCtx *pCtx = &pLocalReducer->pCtx[i];
tVariantDestroy(&pCtx->tag);
if (pCtx->tagInfo.pTagCtxList != NULL) {
tfree(pCtx->tagInfo.pTagCtxList);
}
}
tfree(pLocalReducer->pCtx);
@ -1314,8 +1317,10 @@ int32_t tscLocalDoReduce(SSqlObj *pSql) {
tscTrace("%s call the drop local reducer", __FUNCTION__);
tscDestroyLocalReducer(pSql);
if (pRes) {
pRes->numOfRows = 0;
pRes->row = 0;
}
return 0;
}

View File

@ -183,7 +183,7 @@ void tscGetConnToMgmt(SSqlObj *pSql, uint8_t *pCode) {
#ifdef CLUSTER
connInit.peerIp = tscMgmtIpList.ipstr[pSql->index];
#else
connInit.peerIp = tsServerIpStr;
connInit.peerIp = tsMasterIp;
#endif
thandle = taosOpenRpcConn(&connInit, pCode);
}
@ -211,7 +211,6 @@ void tscGetConnToMgmt(SSqlObj *pSql, uint8_t *pCode) {
}
void tscGetConnToVnode(SSqlObj *pSql, uint8_t *pCode) {
char ipstr[40] = {0};
SVPeerDesc *pVPeersDesc = NULL;
static int vidIndex = 0;
STscObj * pTscObj = pSql->pTscObj;
@ -222,7 +221,7 @@ void tscGetConnToVnode(SSqlObj *pSql, uint8_t *pCode) {
SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0);
if (UTIL_METER_IS_METRIC(pMeterMetaInfo)) { // multiple vnode query
SVnodeSidList *vnodeList = tscGetVnodeSidList(pMeterMetaInfo->pMetricMeta, pCmd->vnodeIdx);
SVnodeSidList *vnodeList = tscGetVnodeSidList(pMeterMetaInfo->pMetricMeta, pMeterMetaInfo->vnodeIndex);
if (vnodeList != NULL) {
pVPeersDesc = vnodeList->vpeerDesc;
}
@ -244,6 +243,7 @@ void tscGetConnToVnode(SSqlObj *pSql, uint8_t *pCode) {
while (pSql->retry < pSql->maxRetry) {
(pSql->retry)++;
#ifdef CLUSTER
char ipstr[40] = {0};
if (pVPeersDesc[pSql->index].ip == 0) {
(pSql->index) = (pSql->index + 1) % TSDB_VNODES_SUPPORT;
continue;
@ -291,7 +291,7 @@ void tscGetConnToVnode(SSqlObj *pSql, uint8_t *pCode) {
connInit.peerId = htonl((pVPeersDesc[0].vnode << TSDB_SHELL_VNODE_BITS));
connInit.shandle = pVnodeConn;
connInit.ahandle = pSql;
connInit.peerIp = tsServerIpStr;
connInit.peerIp = tsMasterIp;
connInit.peerPort = tsVnodeShellPort;
thandle = taosOpenRpcConn(&connInit, pCode);
vidIndex = (vidIndex + 1) % tscNumOfThreads;
@ -481,7 +481,8 @@ void *tscProcessMsgFromServer(char *msg, void *ahandle, void *thandle) {
msg = NULL;
} else if (rspCode == TSDB_CODE_NOT_ACTIVE_TABLE || rspCode == TSDB_CODE_INVALID_TABLE_ID ||
rspCode == TSDB_CODE_INVALID_VNODE_ID || rspCode == TSDB_CODE_NOT_ACTIVE_VNODE ||
rspCode == TSDB_CODE_NETWORK_UNAVAIL || rspCode == TSDB_CODE_NOT_ACTIVE_SESSION) {
rspCode == TSDB_CODE_NETWORK_UNAVAIL || rspCode == TSDB_CODE_NOT_ACTIVE_SESSION ||
rspCode == TSDB_CODE_TABLE_ID_MISMATCH) {
/*
* not_active_table: 1. the virtual node may fail to create table, since the procedure of create table is asynchronized,
* the virtual node may have not create table till now, so try again by using the new metermeta.
@ -492,24 +493,15 @@ void *tscProcessMsgFromServer(char *msg, void *ahandle, void *thandle) {
* removed. So, renew metermeta and try again.
* not_active_session: db has been move to other node, the vnode does not exist on this dnode anymore.
*/
#else
if (rspCode == TSDB_CODE_NOT_ACTIVE_TABLE || rspCode == TSDB_CODE_INVALID_TABLE_ID ||
rspCode == TSDB_CODE_INVALID_VNODE_ID || rspCode == TSDB_CODE_NOT_ACTIVE_VNODE ||
rspCode == TSDB_CODE_NETWORK_UNAVAIL) {
rspCode == TSDB_CODE_NOT_ACTIVE_VNODE || rspCode == TSDB_CODE_INVALID_VNODE_ID ||
rspCode == TSDB_CODE_TABLE_ID_MISMATCH || rspCode == TSDB_CODE_NETWORK_UNAVAIL) {
#endif
pSql->thandle = NULL;
taosAddConnIntoCache(tscConnCache, thandle, pSql->ip, pSql->vnode, pObj->user);
if ((pCmd->command == TSDB_SQL_INSERT || pCmd->command == TSDB_SQL_SELECT) &&
(rspCode == TSDB_CODE_INVALID_TABLE_ID || rspCode == TSDB_CODE_INVALID_VNODE_ID)) {
/*
* In case of the insert/select operations, the invalid table(vnode) id means
* the submit/query msg is invalid, renew meter meta will not help to fix this problem,
* so return the invalid_query_msg to client directly.
*/
code = TSDB_CODE_INVALID_QUERY_MSG;
} else if (pCmd->command == TSDB_SQL_CONNECT) {
if (pCmd->command == TSDB_SQL_CONNECT) {
code = TSDB_CODE_NETWORK_UNAVAIL;
} else if (pCmd->command == TSDB_SQL_HB) {
code = TSDB_CODE_NOT_READY;
@ -542,7 +534,7 @@ void *tscProcessMsgFromServer(char *msg, void *ahandle, void *thandle) {
if (pMeterMetaInfo->pMeterMeta) // it may be deleted
pMeterMetaInfo->pMeterMeta->index = pSql->index;
} else {
SVnodeSidList *pVnodeSidList = tscGetVnodeSidList(pMeterMetaInfo->pMetricMeta, pSql->cmd.vnodeIdx);
SVnodeSidList *pVnodeSidList = tscGetVnodeSidList(pMeterMetaInfo->pMetricMeta, pMeterMetaInfo->vnodeIndex);
pVnodeSidList->index = pSql->index;
}
} else {
@ -653,7 +645,7 @@ static SSqlObj *tscCreateSqlObjForSubquery(SSqlObj *pSql, SRetrieveSupport *trsu
static int tscLaunchMetricSubQueries(SSqlObj *pSql);
// todo merge with callback
int32_t tscLaunchJoinSubquery(SSqlObj *pSql, int16_t tableIndex, int16_t vnodeIdx, SJoinSubquerySupporter *pSupporter) {
int32_t tscLaunchJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSubquerySupporter *pSupporter) {
SSqlCmd *pCmd = &pSql->cmd;
pSql->res.qhandle = 0x1;
@ -666,12 +658,13 @@ int32_t tscLaunchJoinSubquery(SSqlObj *pSql, int16_t tableIndex, int16_t vnodeId
}
}
SSqlObj *pNew = createSubqueryObj(pSql, vnodeIdx, tableIndex, tscJoinQueryCallback, pSupporter, NULL);
SSqlObj *pNew = createSubqueryObj(pSql, tableIndex, tscJoinQueryCallback, pSupporter, NULL);
if (pNew == NULL) {
return TSDB_CODE_CLI_OUT_OF_MEMORY;
}
pSql->pSubs[pSql->numOfSubs++] = pNew;
assert(pSql->numOfSubs <= pSupporter->pState->numOfTotal);
if (QUERY_IS_JOIN_QUERY(pCmd->type)) {
addGroupInfoForSubquery(pSql, pNew, tableIndex);
@ -703,13 +696,11 @@ int32_t tscLaunchJoinSubquery(SSqlObj *pSql, int16_t tableIndex, int16_t vnodeId
SSqlExpr *pExpr = tscSqlExprGet(&pNew->cmd, 0);
SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(&pNew->cmd, 0);
int16_t tagColIndex = tscGetJoinTagColIndexByUid(&pNew->cmd, pMeterMetaInfo->pMeterMeta->uid);
int16_t tagColIndex = tscGetJoinTagColIndexByUid(&pSupporter->tagCond, pMeterMetaInfo->pMeterMeta->uid);
pExpr->param->i64Key = tagColIndex;
pExpr->numOfParams = 1;
addRequiredTagColumn(pCmd, tagColIndex, 0);
// add the filter tag column
for (int32_t i = 0; i < pSupporter->colList.numOfCols; ++i) {
SColumnBase *pColBase = &pSupporter->colList.pColList[i];
@ -722,6 +713,10 @@ int32_t tscLaunchJoinSubquery(SSqlObj *pSql, int16_t tableIndex, int16_t vnodeId
pNew->cmd.type |= TSDB_QUERY_TYPE_SUBQUERY;
}
#ifdef _DEBUG_VIEW
tscPrintSelectClause(&pNew->cmd);
#endif
return tscProcessSql(pNew);
}
@ -788,7 +783,7 @@ int tscProcessSql(SSqlObj *pSql) {
pSql->index = pMeterMetaInfo->pMeterMeta->index;
} else { // it must be the parent SSqlObj for super table query
if ((pSql->cmd.type & TSDB_QUERY_TYPE_SUBQUERY) != 0) {
int32_t idx = pSql->cmd.vnodeIdx;
int32_t idx = pMeterMetaInfo->vnodeIndex;
SVnodeSidList *pSidList = tscGetVnodeSidList(pMeterMetaInfo->pMetricMeta, idx);
pSql->index = pSidList->index;
}
@ -816,7 +811,7 @@ int tscProcessSql(SSqlObj *pSql) {
return pSql->res.code;
}
int32_t code = tscLaunchJoinSubquery(pSql, i, 0, pSupporter);
int32_t code = tscLaunchJoinSubquery(pSql, i, pSupporter);
if (code != TSDB_CODE_SUCCESS) { // failed to create subquery object, quit query
tscDestroyJoinSupporter(pSupporter);
pSql->res.code = TSDB_CODE_CLI_OUT_OF_MEMORY;
@ -875,28 +870,26 @@ int tscProcessSql(SSqlObj *pSql) {
return doProcessSql(pSql);
}
static void doCleanupSubqueries(SSqlObj *pSql, int32_t vnodeIndex, int32_t numOfVnodes, SRetrieveSupport *pTrs,
tOrderDescriptor *pDesc, tColModel *pModel, tExtMemBuffer **pMemoryBuf,
SSubqueryState *pState) {
pSql->cmd.command = TSDB_SQL_RETRIEVE_METRIC;
pSql->res.code = TSDB_CODE_CLI_OUT_OF_MEMORY;
static void doCleanupSubqueries(SSqlObj *pSql, int32_t numOfSubs, SSubqueryState* pState) {
assert(numOfSubs <= pSql->numOfSubs && numOfSubs >= 0 && pState != NULL);
/*
* if i > 0, at least one sub query is issued, the allocated resource is
* freed by it when subquery completed.
*/
if (vnodeIndex == 0) {
tscLocalReducerEnvDestroy(pMemoryBuf, pDesc, pModel, numOfVnodes);
tfree(pState);
for(int32_t i = 0; i < numOfSubs; ++i) {
SSqlObj* pSub = pSql->pSubs[i];
assert(pSub != NULL);
if (pTrs != NULL) {
tfree(pTrs->localBuffer);
SRetrieveSupport* pSupport = pSub->param;
pthread_mutex_unlock(&pTrs->queryMutex);
pthread_mutex_destroy(&pTrs->queryMutex);
tfree(pTrs);
}
tfree(pSupport->localBuffer);
pthread_mutex_unlock(&pSupport->queryMutex);
pthread_mutex_destroy(&pSupport->queryMutex);
tfree(pSupport);
tscFreeSqlObj(pSub);
}
free(pState);
}
int tscLaunchMetricSubQueries(SSqlObj *pSql) {
@ -916,8 +909,9 @@ int tscLaunchMetricSubQueries(SSqlObj *pSql) {
const uint32_t nBufferSize = (1 << 16); // 64KB
SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0);
int32_t numOfVnodes = pMeterMetaInfo->pMetricMeta->numOfVnodes;
assert(numOfVnodes > 0);
int32_t numOfSubQueries = pMeterMetaInfo->pMetricMeta->numOfVnodes;
assert(numOfSubQueries > 0);
int32_t ret = tscLocalReducerEnvCreate(pSql, &pMemoryBuf, &pDesc, &pModel, nBufferSize);
if (ret != 0) {
@ -928,37 +922,34 @@ int tscLaunchMetricSubQueries(SSqlObj *pSql) {
return pRes->code;
}
pSql->pSubs = malloc(POINTER_BYTES * numOfVnodes);
pSql->numOfSubs = numOfVnodes;
pSql->pSubs = calloc(numOfSubQueries, POINTER_BYTES);
pSql->numOfSubs = numOfSubQueries;
tscTrace("%p retrieved query data from %d vnode(s)", pSql, numOfVnodes);
tscTrace("%p retrieved query data from %d vnode(s)", pSql, numOfSubQueries);
SSubqueryState *pState = calloc(1, sizeof(SSubqueryState));
pState->numOfTotal = numOfVnodes;
pState->numOfTotal = numOfSubQueries;
pRes->code = TSDB_CODE_SUCCESS;
for (int32_t i = 0; i < numOfVnodes; ++i) {
if (pRes->code == TSDB_CODE_QUERY_CANCELLED || pRes->code == TSDB_CODE_CLI_OUT_OF_MEMORY) {
/*
* during launch sub queries, if the master query is cancelled. the remain is ignored and set the retrieveDoneRec
* to the value of remaining not built sub-queries. So, the already issued sub queries can successfully free
* allocated resources.
*/
pState->numOfCompleted = (numOfVnodes - i);
doCleanupSubqueries(pSql, i, numOfVnodes, NULL, pDesc, pModel, pMemoryBuf, pState);
if (i == 0) {
return pSql->res.code;
}
int32_t i = 0;
for (; i < numOfSubQueries; ++i) {
SRetrieveSupport *trs = (SRetrieveSupport *)calloc(1, sizeof(SRetrieveSupport));
if (trs == NULL) {
tscError("%p failed to malloc buffer for SRetrieveSupport, orderOfSub:%d, reason:%s", pSql, i, strerror(errno));
break;
}
SRetrieveSupport *trs = (SRetrieveSupport *)calloc(1, sizeof(SRetrieveSupport));
trs->pExtMemBuffer = pMemoryBuf;
trs->pOrderDescriptor = pDesc;
trs->pState = pState;
trs->localBuffer = (tFilePage *)calloc(1, nBufferSize + sizeof(tFilePage));
trs->vnodeIdx = i;
if (trs->localBuffer == NULL) {
tscError("%p failed to malloc buffer for local buffer, orderOfSub:%d, reason:%s", pSql, i, strerror(errno));
tfree(trs);
break;
}
trs->subqueryIndex = i;
trs->pParentSqlObj = pSql;
trs->pFinalColModel = pModel;
@ -968,15 +959,10 @@ int tscLaunchMetricSubQueries(SSqlObj *pSql) {
pthread_mutexattr_destroy(&mutexattr);
SSqlObj *pNew = tscCreateSqlObjForSubquery(pSql, trs, NULL);
if (pNew == NULL) {
pState->numOfCompleted = (numOfVnodes - i);
doCleanupSubqueries(pSql, i, numOfVnodes, trs, pDesc, pModel, pMemoryBuf, pState);
if (i == 0) {
return pSql->res.code;
}
tscError("%p failed to malloc buffer for subObj, orderOfSub:%d, reason:%s", pSql, i, strerror(errno));
tfree(trs->localBuffer);
tfree(trs);
break;
}
@ -985,8 +971,30 @@ int tscLaunchMetricSubQueries(SSqlObj *pSql) {
pNew->cmd.tsBuf = tsBufClone(pSql->cmd.tsBuf);
}
tscTrace("%p sub:%p launch subquery.orderOfSub:%d", pSql, pNew, pNew->cmd.vnodeIdx);
tscProcessSql(pNew);
tscTrace("%p sub:%p create subquery success. orderOfSub:%d", pSql, pNew, trs->subqueryIndex);
}
if (i < numOfSubQueries) {
tscError("%p failed to prepare subquery structure and launch subqueries", pSql);
pRes->code = TSDB_CODE_CLI_OUT_OF_MEMORY;
tscLocalReducerEnvDestroy(pMemoryBuf, pDesc, pModel, numOfSubQueries);
doCleanupSubqueries(pSql, i, pState);
return pRes->code; // free all allocated resource
}
if (pRes->code == TSDB_CODE_QUERY_CANCELLED) {
tscLocalReducerEnvDestroy(pMemoryBuf, pDesc, pModel, numOfSubQueries);
doCleanupSubqueries(pSql, i, pState);
return pRes->code;
}
for(int32_t j = 0; j < numOfSubQueries; ++j) {
SSqlObj* pSub = pSql->pSubs[j];
SRetrieveSupport* pSupport = pSub->param;
tscTrace("%p sub:%p launch subquery, orderOfSub:%d.", pSql, pSub, pSupport->subqueryIndex);
tscProcessSql(pSub);
}
return TSDB_CODE_SUCCESS;
@ -1034,13 +1042,16 @@ static void tscAbortFurtherRetryRetrieval(SRetrieveSupport *trsupport, TAOS_RES
static void tscHandleSubRetrievalError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numOfRows) {
SSqlObj *pPObj = trsupport->pParentSqlObj;
int32_t idx = trsupport->vnodeIdx;
int32_t subqueryIndex = trsupport->subqueryIndex;
assert(pSql != NULL);
SSubqueryState* pState = trsupport->pState;
assert(pState->numOfCompleted < pState->numOfTotal && pState->numOfCompleted >= 0 &&
pPObj->numOfSubs == pState->numOfTotal);
/* retrieved in subquery failed. OR query cancelled in retrieve phase. */
if (trsupport->pState->code == TSDB_CODE_SUCCESS && pPObj->res.code != TSDB_CODE_SUCCESS) {
trsupport->pState->code = -(int)pPObj->res.code;
if (pState->code == TSDB_CODE_SUCCESS && pPObj->res.code != TSDB_CODE_SUCCESS) {
pState->code = -(int)pPObj->res.code;
/*
* kill current sub-query connection, which may retrieve data from vnodes;
@ -1049,34 +1060,34 @@ static void tscHandleSubRetrievalError(SRetrieveSupport *trsupport, SSqlObj *pSq
pSql->res.numOfRows = 0;
trsupport->numOfRetry = MAX_NUM_OF_SUBQUERY_RETRY; // disable retry efforts
tscTrace("%p query is cancelled, sub:%p, orderOfSub:%d abort retrieve, code:%d", trsupport->pParentSqlObj, pSql,
trsupport->vnodeIdx, trsupport->pState->code);
subqueryIndex, pState->code);
}
if (numOfRows >= 0) { // current query is successful, but other sub query failed, still abort current query.
tscTrace("%p sub:%p retrieve numOfRows:%d,orderOfSub:%d", pPObj, pSql, numOfRows, idx);
tscError("%p sub:%p abort further retrieval due to other queries failure,orderOfSub:%d,code:%d", pPObj, pSql, idx,
trsupport->pState->code);
tscTrace("%p sub:%p retrieve numOfRows:%d,orderOfSub:%d", pPObj, pSql, numOfRows, subqueryIndex);
tscError("%p sub:%p abort further retrieval due to other queries failure,orderOfSub:%d,code:%d", pPObj, pSql,
subqueryIndex, pState->code);
} else {
if (trsupport->numOfRetry++ < MAX_NUM_OF_SUBQUERY_RETRY && trsupport->pState->code == TSDB_CODE_SUCCESS) {
if (trsupport->numOfRetry++ < MAX_NUM_OF_SUBQUERY_RETRY && pState->code == TSDB_CODE_SUCCESS) {
/*
* current query failed, and the retry count is less than the available
* count, retry query clear previous retrieved data, then launch a new sub query
*/
tExtMemBufferClear(trsupport->pExtMemBuffer[idx]);
tExtMemBufferClear(trsupport->pExtMemBuffer[subqueryIndex]);
// clear local saved number of results
trsupport->localBuffer->numOfElems = 0;
pthread_mutex_unlock(&trsupport->queryMutex);
tscTrace("%p sub:%p retrieve failed, code:%d, orderOfSub:%d, retry:%d", trsupport->pParentSqlObj, pSql, numOfRows,
idx, trsupport->numOfRetry);
subqueryIndex, trsupport->numOfRetry);
SSqlObj *pNew = tscCreateSqlObjForSubquery(trsupport->pParentSqlObj, trsupport, pSql);
if (pNew == NULL) {
tscError("%p sub:%p failed to create new subquery sqlobj due to out of memory, abort retry",
trsupport->pParentSqlObj, pSql);
trsupport->pState->code = TSDB_CODE_CLI_OUT_OF_MEMORY;
pState->code = TSDB_CODE_CLI_OUT_OF_MEMORY;
trsupport->numOfRetry = MAX_NUM_OF_SUBQUERY_RETRY;
return;
}
@ -1084,26 +1095,28 @@ static void tscHandleSubRetrievalError(SRetrieveSupport *trsupport, SSqlObj *pSq
tscProcessSql(pNew);
return;
} else { // reach the maximum retry count, abort
atomic_val_compare_exchange_32(&trsupport->pState->code, TSDB_CODE_SUCCESS, numOfRows);
atomic_val_compare_exchange_32(&pState->code, TSDB_CODE_SUCCESS, numOfRows);
tscError("%p sub:%p retrieve failed,code:%d,orderOfSub:%d failed.no more retry,set global code:%d", pPObj, pSql,
numOfRows, idx, trsupport->pState->code);
numOfRows, subqueryIndex, pState->code);
}
}
int32_t numOfTotal = trsupport->pState->numOfTotal;
int32_t finished = atomic_add_fetch_32(&trsupport->pState->numOfCompleted, 1);
if (finished < numOfTotal) { // pState may be released by otherthreads, so keep the value in a local variable.
int32_t numOfTotal = pState->numOfTotal;
int32_t finished = atomic_add_fetch_32(&pState->numOfCompleted, 1);
if (finished < numOfTotal) {
tscTrace("%p sub:%p orderOfSub:%d freed, finished subqueries:%d", pPObj, pSql, trsupport->subqueryIndex, finished);
return tscFreeSubSqlObj(trsupport, pSql);
}
// all subqueries are failed
tscError("%p retrieve from %d vnode(s) completed,code:%d.FAILED.", pPObj, trsupport->pState->numOfTotal,
trsupport->pState->code);
pPObj->res.code = -(trsupport->pState->code);
tscError("%p retrieve from %d vnode(s) completed,code:%d.FAILED.", pPObj, pState->numOfTotal,
pState->code);
pPObj->res.code = -(pState->code);
// release allocated resource
tscLocalReducerEnvDestroy(trsupport->pExtMemBuffer, trsupport->pOrderDescriptor, trsupport->pFinalColModel,
trsupport->pState->numOfTotal);
pState->numOfTotal);
tfree(trsupport->pState);
tscFreeSubSqlObj(trsupport, pSql);
@ -1131,21 +1144,24 @@ static void tscHandleSubRetrievalError(SRetrieveSupport *trsupport, SSqlObj *pSq
void tscRetrieveFromVnodeCallBack(void *param, TAOS_RES *tres, int numOfRows) {
SRetrieveSupport *trsupport = (SRetrieveSupport *)param;
int32_t idx = trsupport->vnodeIdx;
int32_t idx = trsupport->subqueryIndex;
SSqlObj * pPObj = trsupport->pParentSqlObj;
tOrderDescriptor *pDesc = trsupport->pOrderDescriptor;
SSqlObj *pSql = (SSqlObj *)tres;
if (pSql == NULL) {
/* sql object has been released in error process, return immediately */
if (pSql == NULL) { // sql object has been released in error process, return immediately
tscTrace("%p subquery has been released, idx:%d, abort", pPObj, idx);
return;
}
SSubqueryState* pState = trsupport->pState;
assert(pState->numOfCompleted < pState->numOfTotal && pState->numOfCompleted >= 0 &&
pPObj->numOfSubs == pState->numOfTotal);
// query process and cancel query process may execute at the same time
pthread_mutex_lock(&trsupport->queryMutex);
if (numOfRows < 0 || trsupport->pState->code < 0 || pPObj->res.code != TSDB_CODE_SUCCESS) {
if (numOfRows < 0 || pState->code < 0 || pPObj->res.code != TSDB_CODE_SUCCESS) {
return tscHandleSubRetrievalError(trsupport, pSql, numOfRows);
}
@ -1158,10 +1174,10 @@ void tscRetrieveFromVnodeCallBack(void *param, TAOS_RES *tres, int numOfRows) {
if (numOfRows > 0) {
assert(pRes->numOfRows == numOfRows);
atomic_add_fetch_64(&trsupport->pState->numOfRetrievedRows, numOfRows);
atomic_add_fetch_64(&pState->numOfRetrievedRows, numOfRows);
tscTrace("%p sub:%p retrieve numOfRows:%d totalNumOfRows:%d from ip:%u,vid:%d,orderOfSub:%d", pPObj, pSql,
pRes->numOfRows, trsupport->pState->numOfRetrievedRows, pSvd->ip, pSvd->vnode, idx);
pRes->numOfRows, pState->numOfRetrievedRows, pSvd->ip, pSvd->vnode, idx);
#ifdef _DEBUG_VIEW
printf("received data from vnode: %d rows\n", pRes->numOfRows);
@ -1188,7 +1204,7 @@ void tscRetrieveFromVnodeCallBack(void *param, TAOS_RES *tres, int numOfRows) {
} else { // all data has been retrieved to client
/* data in from current vnode is stored in cache and disk */
uint32_t numOfRowsFromVnode =
trsupport->pExtMemBuffer[pCmd->vnodeIdx]->numOfAllElems + trsupport->localBuffer->numOfElems;
trsupport->pExtMemBuffer[idx]->numOfAllElems + trsupport->localBuffer->numOfElems;
tscTrace("%p sub:%p all data retrieved from ip:%u,vid:%d, numOfRows:%d, orderOfSub:%d", pPObj, pSql, pSvd->ip,
pSvd->vnode, numOfRowsFromVnode, idx);
@ -1220,11 +1236,11 @@ void tscRetrieveFromVnodeCallBack(void *param, TAOS_RES *tres, int numOfRows) {
// keep this value local variable, since the pState variable may be released by other threads, if atomic_add opertion
// increases the finished value up to pState->numOfTotal value, which means all subqueries are completed.
// In this case, the comparsion between finished value and released pState->numOfTotal is not safe.
int32_t numOfTotal = trsupport->pState->numOfTotal;
int32_t numOfTotal = pState->numOfTotal;
int32_t finished = atomic_add_fetch_32(&trsupport->pState->numOfCompleted, 1);
int32_t finished = atomic_add_fetch_32(&pState->numOfCompleted, 1);
if (finished < numOfTotal) {
tscTrace("%p sub:%p orderOfSub:%d freed, finished subqueries:%d", pPObj, pSql, trsupport->vnodeIdx, finished);
tscTrace("%p sub:%p orderOfSub:%d freed, finished subqueries:%d", pPObj, pSql, trsupport->subqueryIndex, finished);
return tscFreeSubSqlObj(trsupport, pSql);
}
@ -1232,10 +1248,10 @@ void tscRetrieveFromVnodeCallBack(void *param, TAOS_RES *tres, int numOfRows) {
pDesc->pSchema->maxCapacity = trsupport->pExtMemBuffer[idx]->numOfElemsPerPage;
tscTrace("%p retrieve from %d vnodes completed.final NumOfRows:%d,start to build loser tree", pPObj,
trsupport->pState->numOfTotal, trsupport->pState->numOfRetrievedRows);
pState->numOfTotal, pState->numOfRetrievedRows);
tscClearInterpInfo(&pPObj->cmd);
tscCreateLocalReducer(trsupport->pExtMemBuffer, trsupport->pState->numOfTotal, pDesc, trsupport->pFinalColModel,
tscCreateLocalReducer(trsupport->pExtMemBuffer, pState->numOfTotal, pDesc, trsupport->pFinalColModel,
&pPObj->cmd, &pPObj->res);
tscTrace("%p build loser tree completed", pPObj);
@ -1244,7 +1260,8 @@ void tscRetrieveFromVnodeCallBack(void *param, TAOS_RES *tres, int numOfRows) {
pPObj->res.row = 0;
// only free once
free(trsupport->pState);
tfree(trsupport->pState);
tscFreeSubSqlObj(trsupport, pSql);
if (pPObj->fp == NULL) {
@ -1308,10 +1325,16 @@ void tscKillMetricQuery(SSqlObj *pSql) {
static void tscRetrieveDataRes(void *param, TAOS_RES *tres, int retCode);
static SSqlObj *tscCreateSqlObjForSubquery(SSqlObj *pSql, SRetrieveSupport *trsupport, SSqlObj *prevSqlObj) {
SSqlObj *pNew = createSubqueryObj(pSql, trsupport->vnodeIdx, 0, tscRetrieveDataRes, trsupport, prevSqlObj);
SSqlObj *pNew = createSubqueryObj(pSql, 0, tscRetrieveDataRes, trsupport, prevSqlObj);
if (pNew != NULL) { // the sub query of two-stage super table query
pNew->cmd.type |= TSDB_QUERY_TYPE_STABLE_SUBQUERY;
pSql->pSubs[trsupport->vnodeIdx] = pNew;
assert(pNew->cmd.numOfTables == 1);
//launch subquery for each vnode, so the subquery index equals to the vnodeIndex.
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pNew->cmd, 0);
pMeterMetaInfo->vnodeIndex = trsupport->subqueryIndex;
pSql->pSubs[trsupport->subqueryIndex] = pNew;
}
return pNew;
@ -1321,8 +1344,8 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) {
SRetrieveSupport *trsupport = (SRetrieveSupport *)param;
SSqlObj * pSql = (SSqlObj *)tres;
int32_t idx = pSql->cmd.vnodeIdx;
SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0);
int32_t idx = pMeterMetaInfo->vnodeIndex;
SVnodeSidList *vnodeInfo = NULL;
SVPeerDesc * pSvd = NULL;
@ -1331,16 +1354,20 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) {
pSvd = &vnodeInfo->vpeerDesc[vnodeInfo->index];
}
if (trsupport->pParentSqlObj->res.code != TSDB_CODE_SUCCESS || trsupport->pState->code != TSDB_CODE_SUCCESS) {
SSubqueryState* pState = trsupport->pState;
assert(pState->numOfCompleted < pState->numOfTotal && pState->numOfCompleted >= 0 &&
trsupport->pParentSqlObj->numOfSubs == pState->numOfTotal);
if (trsupport->pParentSqlObj->res.code != TSDB_CODE_SUCCESS || pState->code != TSDB_CODE_SUCCESS) {
// metric query is killed, Note: code must be less than 0
trsupport->numOfRetry = MAX_NUM_OF_SUBQUERY_RETRY;
if (trsupport->pParentSqlObj->res.code != TSDB_CODE_SUCCESS) {
code = -(int)(trsupport->pParentSqlObj->res.code);
} else {
code = trsupport->pState->code;
code = pState->code;
}
tscTrace("%p query cancelled or failed, sub:%p, orderOfSub:%d abort, code:%d", trsupport->pParentSqlObj, pSql,
trsupport->vnodeIdx, code);
trsupport->subqueryIndex, code);
}
/*
@ -1353,16 +1380,16 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) {
if (code != TSDB_CODE_SUCCESS) {
if (trsupport->numOfRetry++ >= MAX_NUM_OF_SUBQUERY_RETRY) {
tscTrace("%p sub:%p reach the max retry count,set global code:%d", trsupport->pParentSqlObj, pSql, code);
atomic_val_compare_exchange_32(&trsupport->pState->code, 0, code);
atomic_val_compare_exchange_32(&pState->code, 0, code);
} else { // does not reach the maximum retry count, go on
tscTrace("%p sub:%p failed code:%d, retry:%d", trsupport->pParentSqlObj, pSql, code, trsupport->numOfRetry);
SSqlObj *pNew = tscCreateSqlObjForSubquery(trsupport->pParentSqlObj, trsupport, pSql);
if (pNew == NULL) {
tscError("%p sub:%p failed to create new subquery due to out of memory, abort retry, vid:%d, orderOfSub:%d",
trsupport->pParentSqlObj, pSql, pSvd->vnode, trsupport->vnodeIdx);
trsupport->pParentSqlObj, pSql, pSvd->vnode, trsupport->subqueryIndex);
trsupport->pState->code = -TSDB_CODE_CLI_OUT_OF_MEMORY;
pState->code = -TSDB_CODE_CLI_OUT_OF_MEMORY;
trsupport->numOfRetry = MAX_NUM_OF_SUBQUERY_RETRY;
} else {
assert(pNew->cmd.pMeterInfo[0]->pMeterMeta != NULL && pNew->cmd.pMeterInfo[0]->pMetricMeta != NULL);
@ -1372,21 +1399,21 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) {
}
}
if (trsupport->pState->code != TSDB_CODE_SUCCESS) { // failed, abort
if (pState->code != TSDB_CODE_SUCCESS) { // failed, abort
if (vnodeInfo != NULL) {
tscTrace("%p sub:%p query failed,ip:%u,vid:%d,orderOfSub:%d,global code:%d", trsupport->pParentSqlObj, pSql,
vnodeInfo->vpeerDesc[vnodeInfo->index].ip, vnodeInfo->vpeerDesc[vnodeInfo->index].vnode,
trsupport->vnodeIdx, trsupport->pState->code);
trsupport->subqueryIndex, pState->code);
} else {
tscTrace("%p sub:%p query failed,orderOfSub:%d,global code:%d", trsupport->pParentSqlObj, pSql,
trsupport->vnodeIdx, trsupport->pState->code);
trsupport->subqueryIndex, pState->code);
}
tscRetrieveFromVnodeCallBack(param, tres, trsupport->pState->code);
tscRetrieveFromVnodeCallBack(param, tres, pState->code);
} else { // success, proceed to retrieve data from dnode
tscTrace("%p sub:%p query complete,ip:%u,vid:%d,orderOfSub:%d,retrieve data", trsupport->pParentSqlObj, pSql,
vnodeInfo->vpeerDesc[vnodeInfo->index].ip, vnodeInfo->vpeerDesc[vnodeInfo->index].vnode,
trsupport->vnodeIdx);
trsupport->subqueryIndex);
taos_fetch_rows_a(tres, tscRetrieveFromVnodeCallBack, param);
}
@ -1461,7 +1488,7 @@ void tscUpdateVnodeInQueryMsg(SSqlObj *pSql, char *buf) {
pQueryMsg->vnode = htons(pMeterMeta->vpeerDesc[pSql->index].vnode);
} else { // query on metric
SMetricMeta * pMetricMeta = pMeterMetaInfo->pMetricMeta;
SVnodeSidList *pVnodeSidList = tscGetVnodeSidList(pMetricMeta, pCmd->vnodeIdx);
SVnodeSidList *pVnodeSidList = tscGetVnodeSidList(pMetricMeta, pMeterMetaInfo->vnodeIndex);
pQueryMsg->vnode = htons(pVnodeSidList->vpeerDesc[pSql->index].vnode);
}
}
@ -1484,7 +1511,7 @@ static int32_t tscEstimateQueryMsgSize(SSqlCmd *pCmd) {
SMetricMeta *pMetricMeta = pMeterMetaInfo->pMetricMeta;
SVnodeSidList *pVnodeSidList = tscGetVnodeSidList(pMetricMeta, pCmd->vnodeIdx);
SVnodeSidList *pVnodeSidList = tscGetVnodeSidList(pMetricMeta, pMeterMetaInfo->vnodeIndex);
int32_t meterInfoSize = (pMetricMeta->tagLen + sizeof(SMeterSidExtInfo)) * pVnodeSidList->numOfSids;
int32_t outputColumnSize = pCmd->fieldsInfo.numOfOutputCols * sizeof(SSqlFuncExprMsg);
@ -1497,6 +1524,46 @@ static int32_t tscEstimateQueryMsgSize(SSqlCmd *pCmd) {
return size;
}
static char* doSerializeTableInfo(SSqlObj* pSql, int32_t numOfMeters, int32_t vnodeId, char* pMsg) {
SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0);
SMeterMeta * pMeterMeta = pMeterMetaInfo->pMeterMeta;
SMetricMeta *pMetricMeta = pMeterMetaInfo->pMetricMeta;
tscTrace("%p vid:%d, query on %d meters", pSql, htons(vnodeId), numOfMeters);
if (UTIL_METER_IS_NOMRAL_METER(pMeterMetaInfo)) {
#ifdef _DEBUG_VIEW
tscTrace("%p sid:%d, uid:%lld", pSql, pMeterMetaInfo->pMeterMeta->sid, pMeterMetaInfo->pMeterMeta->uid);
#endif
SMeterSidExtInfo *pMeterInfo = (SMeterSidExtInfo *)pMsg;
pMeterInfo->sid = htonl(pMeterMeta->sid);
pMeterInfo->uid = htobe64(pMeterMeta->uid);
pMsg += sizeof(SMeterSidExtInfo);
} else {
SVnodeSidList *pVnodeSidList = tscGetVnodeSidList(pMetricMeta, pMeterMetaInfo->vnodeIndex);
for (int32_t i = 0; i < numOfMeters; ++i) {
SMeterSidExtInfo *pMeterInfo = (SMeterSidExtInfo *)pMsg;
SMeterSidExtInfo *pQueryMeterInfo = tscGetMeterSidInfo(pVnodeSidList, i);
pMeterInfo->sid = htonl(pQueryMeterInfo->sid);
pMeterInfo->uid = htobe64(pQueryMeterInfo->uid);
pMsg += sizeof(SMeterSidExtInfo);
memcpy(pMsg, pQueryMeterInfo->tags, pMetricMeta->tagLen);
pMsg += pMetricMeta->tagLen;
#ifdef _DEBUG_VIEW
tscTrace("%p sid:%d, uid:%lld", pSql, pQueryMeterInfo->sid, pQueryMeterInfo->uid);
#endif
}
}
return pMsg;
}
int tscBuildQueryMsg(SSqlObj *pSql) {
SSqlCmd *pCmd = &pSql->cmd;
@ -1527,14 +1594,13 @@ int tscBuildQueryMsg(SSqlObj *pSql) {
pQueryMsg->vnode = htons(pMeterMeta->vpeerDesc[pMeterMeta->index].vnode);
pQueryMsg->uid = pMeterMeta->uid;
pQueryMsg->numOfTagsCols = 0;
} else { // query on metric
SMetricMeta *pMetricMeta = pMeterMetaInfo->pMetricMeta;
if (pCmd->vnodeIdx < 0) {
tscError("%p error vnodeIdx:%d", pSql, pCmd->vnodeIdx);
} else { // query on super table
if (pMeterMetaInfo->vnodeIndex < 0) {
tscError("%p error vnodeIdx:%d", pSql, pMeterMetaInfo->vnodeIndex);
return -1;
}
SVnodeSidList *pVnodeSidList = tscGetVnodeSidList(pMetricMeta, pCmd->vnodeIdx);
SVnodeSidList *pVnodeSidList = tscGetVnodeSidList(pMetricMeta, pMeterMetaInfo->vnodeIndex);
uint32_t vnodeId = pVnodeSidList->vpeerDesc[pVnodeSidList->index].vnode;
numOfMeters = pVnodeSidList->numOfSids;
@ -1715,34 +1781,8 @@ int tscBuildQueryMsg(SSqlObj *pSql) {
pQueryMsg->colNameLen = htonl(len);
// set sids list
tscTrace("%p vid:%d, query on %d meters", pSql, pSql->cmd.vnodeIdx, numOfMeters);
if (UTIL_METER_IS_NOMRAL_METER(pMeterMetaInfo)) {
#ifdef _DEBUG_VIEW
tscTrace("%p %d", pSql, pMeterMetaInfo->pMeterMeta->sid);
#endif
SMeterSidExtInfo *pSMeterTagInfo = (SMeterSidExtInfo *)pMsg;
pSMeterTagInfo->sid = htonl(pMeterMeta->sid);
pMsg += sizeof(SMeterSidExtInfo);
} else {
SVnodeSidList *pVnodeSidList = tscGetVnodeSidList(pMetricMeta, pCmd->vnodeIdx);
for (int32_t i = 0; i < numOfMeters; ++i) {
SMeterSidExtInfo *pMeterTagInfo = (SMeterSidExtInfo *)pMsg;
SMeterSidExtInfo *pQueryMeterInfo = tscGetMeterSidInfo(pVnodeSidList, i);
pMeterTagInfo->sid = htonl(pQueryMeterInfo->sid);
pMsg += sizeof(SMeterSidExtInfo);
#ifdef _DEBUG_VIEW
tscTrace("%p %d", pSql, pQueryMeterInfo->sid);
#endif
memcpy(pMsg, pQueryMeterInfo->tags, pMetricMeta->tagLen);
pMsg += pMetricMeta->tagLen;
}
}
// serialize the table info (sid, uid, tags)
pMsg = doSerializeTableInfo(pSql, numOfMeters, htons(pQueryMsg->vnode), pMsg);
// only include the required tag column schema. If a tag is not required, it won't be sent to vnode
if (pMeterMetaInfo->numOfTags > 0) {
@ -1797,7 +1837,7 @@ int tscBuildQueryMsg(SSqlObj *pSql) {
int32_t numOfBlocks = 0;
if (pCmd->tsBuf != NULL) {
STSVnodeBlockInfo *pBlockInfo = tsBufGetVnodeBlockInfo(pCmd->tsBuf, pCmd->vnodeIdx);
STSVnodeBlockInfo *pBlockInfo = tsBufGetVnodeBlockInfo(pCmd->tsBuf, pMeterMetaInfo->vnodeIndex);
assert(QUERY_IS_JOIN_QUERY(pCmd->type) && pBlockInfo != NULL); // this query should not be sent
// todo refactor
@ -2198,7 +2238,8 @@ int tscBuildShowMsg(SSqlObj *pSql) {
pShowMsg = (SShowMsg *)pMsg;
pShowMsg->type = pCmd->showType;
if ((pShowMsg->type == TSDB_MGMT_TABLE_TABLE || pShowMsg->type == TSDB_MGMT_TABLE_METRIC || pShowMsg->type == TSDB_MGMT_TABLE_VNODES ) && pCmd->payloadLen != 0) {
if ((pShowMsg->type == TSDB_MGMT_TABLE_TABLE || pShowMsg->type == TSDB_MGMT_TABLE_METRIC || pShowMsg->type == TSDB_MGMT_TABLE_VNODES || pShowMsg->type == TSDB_MGMT_TABLE_VGROUP)
&& pCmd->payloadLen != 0) {
// only show tables support wildcard query
pShowMsg->payloadLen = htons(pCmd->payloadLen);
memcpy(pShowMsg->payload, payload, pCmd->payloadLen);
@ -2333,6 +2374,7 @@ int tscBuildCreateTableMsg(SSqlObj *pSql) {
size = tscEstimateCreateTableMsgLength(pSql);
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, size)) {
tscError("%p failed to malloc for create table msg", pSql);
free(tmpData);
return -1;
}
@ -2665,6 +2707,8 @@ int tscBuildConnectMsg(SSqlObj *pSql) {
db = (db == NULL) ? pObj->db : db + 1;
strcpy(pConnect->db, db);
strcpy(pConnect->clientVersion, version);
pMsg += sizeof(SConnectMsg);
msgLen = pMsg - pStart;
@ -2764,10 +2808,14 @@ static int32_t tscEstimateMetricMetaMsgSize(SSqlCmd *pCmd) {
int32_t n = 0;
for (int32_t i = 0; i < pCmd->tagCond.numOfTagCond; ++i) {
n += pCmd->tagCond.cond[i].cond.n;
n += strlen(pCmd->tagCond.cond[i].cond);
}
int32_t tagLen = n * TSDB_NCHAR_SIZE;
if (pCmd->tagCond.tbnameCond.cond != NULL) {
tagLen += strlen(pCmd->tagCond.tbnameCond.cond) * TSDB_NCHAR_SIZE;
}
int32_t tagLen = n * TSDB_NCHAR_SIZE + pCmd->tagCond.tbnameCond.cond.n * TSDB_NCHAR_SIZE;
int32_t joinCondLen = (TSDB_METER_ID_LEN + sizeof(int16_t)) * 2;
int32_t elemSize = sizeof(SMetricMetaElemMsg) * pCmd->numOfTables;
@ -2839,8 +2887,9 @@ int tscBuildMetricMetaMsg(SSqlObj *pSql) {
if (pTagCond->numOfTagCond > 0) {
SCond *pCond = tsGetMetricQueryCondPos(pTagCond, uid);
if (pCond != NULL) {
condLen = pCond->cond.n + 1;
bool ret = taosMbsToUcs4(pCond->cond.z, pCond->cond.n, pMsg, pCond->cond.n * TSDB_NCHAR_SIZE);
condLen = strlen(pCond->cond) + 1;
bool ret = taosMbsToUcs4(pCond->cond, condLen, pMsg, condLen * TSDB_NCHAR_SIZE);
if (!ret) {
tscError("%p mbs to ucs4 failed:%s", pSql, tsGetMetricQueryCondPos(pTagCond, uid));
return 0;
@ -2859,15 +2908,17 @@ int tscBuildMetricMetaMsg(SSqlObj *pSql) {
offset = pMsg - (char *)pMetaMsg;
pElem->tableCond = htonl(offset);
pElem->tableCondLen = htonl(pTagCond->tbnameCond.cond.n);
memcpy(pMsg, pTagCond->tbnameCond.cond.z, pTagCond->tbnameCond.cond.n);
pMsg += pTagCond->tbnameCond.cond.n;
uint32_t len = strlen(pTagCond->tbnameCond.cond);
pElem->tableCondLen = htonl(len);
memcpy(pMsg, pTagCond->tbnameCond.cond, len);
pMsg += len;
}
SSqlGroupbyExpr *pGroupby = &pCmd->groupbyExpr;
if (pGroupby->tableIndex != i) {
if (pGroupby->tableIndex != i && pGroupby->numOfGroupCols > 0) {
pElem->orderType = 0;
pElem->orderIndex = 0;
pElem->numOfGroupCols = 0;
@ -2885,15 +2936,14 @@ int tscBuildMetricMetaMsg(SSqlObj *pSql) {
pElem->groupbyTagColumnList = htonl(offset);
for (int32_t j = 0; j < pCmd->groupbyExpr.numOfGroupCols; ++j) {
SColIndexEx *pCol = &pCmd->groupbyExpr.columnInfo[j];
SColIndexEx* pDestCol = (SColIndexEx*) pMsg;
*((int16_t *)pMsg) = pCol->colId;
pMsg += sizeof(pCol->colId);
pDestCol->colIdxInBuf = 0;
pDestCol->colIdx = htons(pCol->colIdx);
pDestCol->colId = htons(pDestCol->colId);
pDestCol->flag = htons(pDestCol->flag);
*((int16_t *)pMsg) += pCol->colIdx;
pMsg += sizeof(pCol->colIdx);
*((int16_t *)pMsg) += pCol->flag;
pMsg += sizeof(pCol->flag);
pMsg += sizeof(SColIndexEx);
}
}
}
@ -3235,44 +3285,47 @@ int tscProcessMetricMetaRsp(SSqlObj *pSql) {
size += pMeta->numOfVnodes * sizeof(SVnodeSidList *) + pMeta->numOfMeters * sizeof(SMeterSidExtInfo *);
char *pStr = calloc(1, size);
if (pStr == NULL) {
char *pBuf = calloc(1, size);
if (pBuf == NULL) {
pSql->res.code = TSDB_CODE_CLI_OUT_OF_MEMORY;
goto _error_clean;
}
SMetricMeta *pNewMetricMeta = (SMetricMeta *)pStr;
SMetricMeta *pNewMetricMeta = (SMetricMeta *)pBuf;
metricMetaList[k] = pNewMetricMeta;
pNewMetricMeta->numOfMeters = pMeta->numOfMeters;
pNewMetricMeta->numOfVnodes = pMeta->numOfVnodes;
pNewMetricMeta->tagLen = pMeta->tagLen;
pStr = pStr + sizeof(SMetricMeta) + pNewMetricMeta->numOfVnodes * sizeof(SVnodeSidList *);
pBuf = pBuf + sizeof(SMetricMeta) + pNewMetricMeta->numOfVnodes * sizeof(SVnodeSidList *);
for (int32_t i = 0; i < pMeta->numOfVnodes; ++i) {
SVnodeSidList *pSidLists = (SVnodeSidList *)rsp;
memcpy(pStr, pSidLists, sizeof(SVnodeSidList));
memcpy(pBuf, pSidLists, sizeof(SVnodeSidList));
pNewMetricMeta->list[i] = pStr - (char *)pNewMetricMeta; // offset value
SVnodeSidList *pLists = (SVnodeSidList *)pStr;
pNewMetricMeta->list[i] = pBuf - (char *)pNewMetricMeta; // offset value
SVnodeSidList *pLists = (SVnodeSidList *)pBuf;
tscTrace("%p metricmeta:vid:%d,numOfMeters:%d", pSql, i, pLists->numOfSids);
pStr += sizeof(SVnodeSidList) + sizeof(SMeterSidExtInfo *) * pSidLists->numOfSids;
pBuf += sizeof(SVnodeSidList) + sizeof(SMeterSidExtInfo *) * pSidLists->numOfSids;
rsp += sizeof(SVnodeSidList);
size_t sidSize = sizeof(SMeterSidExtInfo) + pNewMetricMeta->tagLen;
size_t elemSize = sizeof(SMeterSidExtInfo) + pNewMetricMeta->tagLen;
for (int32_t j = 0; j < pSidLists->numOfSids; ++j) {
pLists->pSidExtInfoList[j] = pStr - (char *)pLists;
memcpy(pStr, rsp, sidSize);
pLists->pSidExtInfoList[j] = pBuf - (char *)pLists;
memcpy(pBuf, rsp, elemSize);
rsp += sidSize;
pStr += sidSize;
((SMeterSidExtInfo*) pBuf)->uid = htobe64(((SMeterSidExtInfo*) pBuf)->uid);
((SMeterSidExtInfo*) pBuf)->sid = htonl(((SMeterSidExtInfo*) pBuf)->sid);
rsp += elemSize;
pBuf += elemSize;
}
}
sizes[k] = pStr - (char *)pNewMetricMeta;
sizes[k] = pBuf - (char *)pNewMetricMeta;
}
for (int32_t i = 0; i < num; ++i) {
@ -3364,7 +3417,7 @@ int tscProcessShowRsp(SSqlObj *pSql) {
}
int tscProcessConnectRsp(SSqlObj *pSql) {
char temp[TSDB_METER_ID_LEN];
char temp[TSDB_METER_ID_LEN*2];
SConnectRsp *pConnect;
STscObj *pObj = pSql->pTscObj;
@ -3372,8 +3425,11 @@ int tscProcessConnectRsp(SSqlObj *pSql) {
pConnect = (SConnectRsp *)pRes->pRsp;
strcpy(pObj->acctId, pConnect->acctId); // copy acctId from response
sprintf(temp, "%s%s%s", pObj->acctId, TS_PATH_DELIMITER, pObj->db);
strcpy(pObj->db, temp);
int32_t len =sprintf(temp, "%s%s%s", pObj->acctId, TS_PATH_DELIMITER, pObj->db);
assert(len <= tListLen(pObj->db));
strncpy(pObj->db, temp, tListLen(pObj->db));
#ifdef CLUSTER
SIpList * pIpList;
char *rsp = pRes->pRsp + sizeof(SConnectRsp);
@ -3648,7 +3704,7 @@ int tscRenewMeterMeta(SSqlObj *pSql, char *meterId) {
*/
if (pMeterMetaInfo->pMeterMeta == NULL || !tscQueryOnMetric(pCmd)) {
if (pMeterMetaInfo->pMeterMeta) {
tscTrace("%p update meter meta, old: numOfTags:%d, numOfCols:%d, uid:%lld, addr:%p", pSql,
tscTrace("%p update meter meta, old: numOfTags:%d, numOfCols:%d, uid:%" PRId64 ", addr:%p", pSql,
pMeterMetaInfo->numOfTags, pCmd->numOfCols, pMeterMetaInfo->pMeterMeta->uid, pMeterMetaInfo->pMeterMeta);
}
tscWaitingForCreateTable(&pSql->cmd);
@ -3656,7 +3712,7 @@ int tscRenewMeterMeta(SSqlObj *pSql, char *meterId) {
code = tscDoGetMeterMeta(pSql, meterId, 0); // todo ??
} else {
tscTrace("%p metric query not update metric meta, numOfTags:%d, numOfCols:%d, uid:%lld, addr:%p", pSql,
tscTrace("%p metric query not update metric meta, numOfTags:%d, numOfCols:%d, uid:%" PRId64 ", addr:%p", pSql,
pMeterMetaInfo->pMeterMeta->numOfTags, pCmd->numOfCols, pMeterMetaInfo->pMeterMeta->uid,
pMeterMetaInfo->pMeterMeta);
}

View File

@ -16,21 +16,22 @@
#include "os.h"
#include "tcache.h"
#include "tlog.h"
#include "tnote.h"
#include "trpc.h"
#include "tscJoinProcess.h"
#include "tscProfile.h"
#include "tscSQLParser.h"
#include "tscSecondaryMerge.h"
#include "tscUtil.h"
#include "tsclient.h"
#include "tscompression.h"
#include "tsocket.h"
#include "tscSQLParser.h"
#include "ttimer.h"
#include "tutil.h"
#include "tnote.h"
#include "ihash.h"
TAOS *taos_connect_imp(const char *ip, const char *user, const char *pass, const char *db, uint16_t port, void (*fp)(void *, TAOS_RES *, int),
void *param, void **taos) {
TAOS *taos_connect_imp(const char *ip, const char *user, const char *pass, const char *db, uint16_t port,
void (*fp)(void *, TAOS_RES *, int), void *param, void **taos) {
STscObj *pObj;
taos_init();
@ -69,8 +70,8 @@ TAOS *taos_connect_imp(const char *ip, const char *user, const char *pass, const
}
#else
if (ip && ip[0]) {
if (ip != tsServerIpStr) {
strcpy(tsServerIpStr, ip);
if (ip != tsMasterIp) {
strcpy(tsMasterIp, ip);
}
tsServerIp = inet_addr(ip);
}
@ -152,51 +153,21 @@ TAOS *taos_connect_imp(const char *ip, const char *user, const char *pass, const
TAOS *taos_connect(const char *ip, const char *user, const char *pass, const char *db, uint16_t port) {
if (ip == NULL || (ip != NULL && (strcmp("127.0.0.1", ip) == 0 || strcasecmp("localhost", ip) == 0))) {
#ifdef CLUSTER
ip = tsMasterIp;
#else
ip = tsServerIpStr;
#endif
}
tscTrace("try to create a connection to %s", ip);
void *taos = taos_connect_imp(ip, user, pass, db, port, NULL, NULL, NULL);
if (taos != NULL) {
STscObj* pObj = (STscObj*) taos;
STscObj *pObj = (STscObj *)taos;
// version compare only requires the first 3 segments of the version string
int32_t comparedSegments = 3;
char client_version[64] = {0};
char server_version[64] = {0};
int clientVersionNumber[4] = {0};
int serverVersionNumber[4] = {0};
strcpy(client_version, version);
strcpy(server_version, taos_get_server_info(taos));
if (!taosGetVersionNumber(client_version, clientVersionNumber)) {
tscError("taos:%p, invalid client version:%s", taos, client_version);
pObj->pSql->res.code = TSDB_CODE_INVALID_CLIENT_VERSION;
int code = taosCheckVersion(version, taos_get_server_info(taos), 3);
if (code != 0) {
pObj->pSql->res.code = code;
taos_close(taos);
return NULL;
}
if (!taosGetVersionNumber(server_version, serverVersionNumber)) {
tscError("taos:%p, invalid server version:%s", taos, server_version);
pObj->pSql->res.code = TSDB_CODE_INVALID_CLIENT_VERSION;
taos_close(taos);
return NULL;
}
for(int32_t i = 0; i < comparedSegments; ++i) {
if (clientVersionNumber[i] != serverVersionNumber[i]) {
tscError("taos:%p, the %d-th number of server version:%s not matched with client version:%s, close connection",
taos, i, server_version, version);
pObj->pSql->res.code = TSDB_CODE_INVALID_CLIENT_VERSION;
taos_close(taos);
return NULL;
}
}
}
return taos;
@ -206,7 +177,7 @@ TAOS *taos_connect_a(char *ip, char *user, char *pass, char *db, uint16_t port,
void *param, void **taos) {
#ifndef CLUSTER
if (ip == NULL) {
ip = tsServerIpStr;
ip = tsMasterIp;
}
#endif
return taos_connect_imp(ip, user, pass, db, port, fp, param, taos);
@ -225,7 +196,7 @@ void taos_close(TAOS *taos) {
}
}
int taos_query_imp(STscObj* pObj, SSqlObj* pSql) {
int taos_query_imp(STscObj *pObj, SSqlObj *pSql) {
SSqlRes *pRes = &pSql->res;
pRes->numOfRows = 1;
@ -236,7 +207,7 @@ int taos_query_imp(STscObj* pObj, SSqlObj* pSql) {
pSql->pTableHashList = NULL;
}
tscTrace("%p SQL: %s pObj:%p", pSql, pSql->sqlstr, pObj);
tscDump("%p pObj:%p, SQL: %s", pSql, pObj, pSql->sqlstr);
pRes->code = (uint8_t)tsParseSql(pSql, pObj->acctId, pObj->db, false);
@ -276,8 +247,9 @@ int taos_query(TAOS *taos, const char *sqlstr) {
SSqlRes *pRes = &pSql->res;
size_t sqlLen = strlen(sqlstr);
if (sqlLen > TSDB_MAX_SQL_LEN) {
pRes->code = tscInvalidSQLErrMsg(pSql->cmd.payload, "sql too long", NULL); // set the additional error msg for invalid sql
if (sqlLen > tsMaxSQLStringLen) {
pRes->code =
tscInvalidSQLErrMsg(pSql->cmd.payload, "sql too long", NULL); // set the additional error msg for invalid sql
tscError("%p SQL result:%d, %s pObj:%p", pSql, pRes->code, taos_errstr(taos), pObj);
return pRes->code;
@ -457,25 +429,56 @@ static void **getOneRowFromBuf(SSqlObj *pSql) {
return pRes->tsrow;
}
static bool tscHashRemainDataInSubqueryResultSet(SSqlObj *pSql) {
bool hasData = true;
SSqlCmd *pCmd = &pSql->cmd;
if (tscProjectionQueryOnMetric(pCmd)) {
bool allSubqueryExhausted = true;
for (int32_t i = 0; i < pSql->numOfSubs; ++i) {
SSqlRes *pRes1 = &pSql->pSubs[i]->res;
SSqlCmd *pCmd1 = &pSql->pSubs[i]->cmd;
SMeterMetaInfo *pMetaInfo = tscGetMeterMetaInfo(pCmd1, 0);
assert(pCmd1->numOfTables == 1);
/*
* if the global limitation is not reached, and current result has not exhausted, or next more vnodes are
* available, go on
*/
if (pMetaInfo->vnodeIndex < pMetaInfo->pMetricMeta->numOfVnodes && pRes1->row < pRes1->numOfRows &&
(!tscHasReachLimitation(pSql->pSubs[i]))) {
allSubqueryExhausted = false;
break;
}
}
hasData = !allSubqueryExhausted;
} else { // otherwise, in case inner join, if any subquery exhausted, query completed.
for (int32_t i = 0; i < pSql->numOfSubs; ++i) {
SSqlRes *pRes1 = &pSql->pSubs[i]->res;
if ((pRes1->row >= pRes1->numOfRows && tscHasReachLimitation(pSql->pSubs[i]) &&
tscProjectionQueryOnTable(&pSql->pSubs[i]->cmd)) ||
(pRes1->numOfRows == 0)) {
hasData = false;
break;
}
}
}
return hasData;
}
static void **tscJoinResultsetFromBuf(SSqlObj *pSql) {
SSqlCmd *pCmd = &pSql->cmd;
SSqlRes *pRes = &pSql->res;
while (1) {
bool hasData = true;
for (int32_t i = 0; i < pSql->numOfSubs; ++i) {
SSqlRes *pRes1 = &pSql->pSubs[i]->res;
// in case inner join, if any subquery exhausted, query completed
if (pRes1->numOfRows == 0) {
hasData = false;
break;
}
}
if (!hasData) { // free all sub sqlobj
tscTrace("%p one subquery exhausted, free other %d subquery", pSql, pSql->numOfSubs - 1);
if (!tscHashRemainDataInSubqueryResultSet(pSql)) { // free all sub sqlobj
tscTrace("%p at least one subquery exhausted, free all other %d subqueries", pSql, pSql->numOfSubs - 1);
SSubqueryState *pState = NULL;
@ -493,41 +496,32 @@ static void **tscJoinResultsetFromBuf(SSqlObj *pSql) {
}
if (pRes->tsrow == NULL) {
pRes->tsrow = malloc(sizeof(void *) * pCmd->exprsInfo.numOfExprs);
pRes->tsrow = malloc(POINTER_BYTES * pCmd->exprsInfo.numOfExprs);
}
bool success = false;
if (pSql->numOfSubs >= 2) {
// do merge result
if (pSql->numOfSubs >= 2) { // do merge result
SSqlRes *pRes1 = &pSql->pSubs[0]->res;
SSqlRes *pRes2 = &pSql->pSubs[1]->res;
while (pRes1->row < pRes1->numOfRows && pRes2->row < pRes2->numOfRows) {
if (pRes1->row < pRes1->numOfRows && pRes2->row < pRes2->numOfRows) {
doSetResultRowData(pSql->pSubs[0]);
doSetResultRowData(pSql->pSubs[1]);
TSKEY key1 = *(TSKEY *)pRes1->tsrow[0];
TSKEY key2 = *(TSKEY *)pRes2->tsrow[0];
if (key1 == key2) {
// TSKEY key1 = *(TSKEY *)pRes1->tsrow[0];
// TSKEY key2 = *(TSKEY *)pRes2->tsrow[0];
// printf("first:%" PRId64 ", second:%" PRId64 "\n", key1, key2);
success = true;
pRes1->row++;
pRes2->row++;
break;
} else if (key1 < key2) {
pRes1->row++;
} else if (key1 > key2) {
pRes2->row++;
}
}
} else {
} else { // only one subquery
SSqlRes *pRes1 = &pSql->pSubs[0]->res;
doSetResultRowData(pSql->pSubs[0]);
success = (pRes1->row++ < pRes1->numOfRows);
}
if (success) {
if (success) { // current row of final output has been built, return to app
for (int32_t i = 0; i < pCmd->exprsInfo.numOfExprs; ++i) {
int32_t tableIndex = pRes->pColumnIndex[i].tableIndex;
int32_t columnIndex = pRes->pColumnIndex[i].columnIndex;
@ -537,7 +531,7 @@ static void **tscJoinResultsetFromBuf(SSqlObj *pSql) {
}
break;
} else {
} else { // continue retrieve data from vnode
tscFetchDatablockFromSubquery(pSql);
if (pRes->code != TSDB_CODE_SUCCESS) {
return NULL;
@ -559,9 +553,12 @@ TAOS_ROW taos_fetch_row_impl(TAOS_RES *res) {
if (pCmd->command == TSDB_SQL_METRIC_JOIN_RETRIEVE) {
tscFetchDatablockFromSubquery(pSql);
if (pRes->code == TSDB_CODE_SUCCESS) {
tscTrace("%p data from all subqueries have been retrieved to client", pSql);
return tscJoinResultsetFromBuf(pSql);
} else {
tscTrace("%p retrieve data from subquery failed, code:%d", pSql, pRes->code);
return NULL;
}
@ -602,7 +599,7 @@ TAOS_ROW taos_fetch_row(TAOS_RES *res) {
SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0);
// reach the maximum number of output rows, abort
if (pCmd->globalLimit > 0 && pRes->numOfTotal >= pCmd->globalLimit) {
if (tscHasReachLimitation(pSql)) {
return NULL;
}
@ -615,7 +612,15 @@ TAOS_ROW taos_fetch_row(TAOS_RES *res) {
assert((pRes->offset >= 0 && pRes->numOfRows == 0) || (pRes->offset == 0 && pRes->numOfRows >= 0));
if ((++pCmd->vnodeIdx) < pMeterMetaInfo->pMetricMeta->numOfVnodes) {
/*
* For project query with super table join, the numOfSub is equalled to the number of all subqueries, so
* we need to reset the value of numOfSubs to be 0.
*
* For super table join with projection query, if anyone of the subquery is exhausted, the query completed.
*/
pSql->numOfSubs = 0;
if ((++pMeterMetaInfo->vnodeIndex) < pMeterMetaInfo->pMetricMeta->numOfVnodes) {
pCmd->command = TSDB_SQL_SELECT;
assert(pSql->fp == NULL);
tscProcessSql(pSql);
@ -623,7 +628,7 @@ TAOS_ROW taos_fetch_row(TAOS_RES *res) {
}
// check!!!
if (rows != NULL || pCmd->vnodeIdx >= pMeterMetaInfo->pMetricMeta->numOfVnodes) {
if (rows != NULL || pMeterMetaInfo->vnodeIndex >= pMeterMetaInfo->pMetricMeta->numOfVnodes) {
break;
}
}
@ -649,7 +654,7 @@ int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows) {
nRows = taos_fetch_block_impl(res, rows);
while (*rows == NULL && tscProjectionQueryOnMetric(pCmd)) {
/* reach the maximum number of output rows, abort */
if (pCmd->globalLimit > 0 && pRes->numOfTotal >= pCmd->globalLimit) {
if (tscHasReachLimitation(pSql)) {
return 0;
}
@ -659,8 +664,7 @@ int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows) {
pCmd->limit.limit = pSql->cmd.globalLimit - pRes->numOfTotal;
pCmd->limit.offset = pRes->offset;
if ((++pSql->cmd.vnodeIdx) < pMeterMetaInfo->pMetricMeta->numOfVnodes) {
if ((++pMeterMetaInfo->vnodeIndex) < pMeterMetaInfo->pMetricMeta->numOfVnodes) {
pSql->cmd.command = TSDB_SQL_SELECT;
assert(pSql->fp == NULL);
tscProcessSql(pSql);
@ -668,7 +672,7 @@ int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows) {
}
// check!!!
if (*rows != NULL || pCmd->vnodeIdx >= pMeterMetaInfo->pMetricMeta->numOfVnodes) {
if (*rows != NULL || pMeterMetaInfo->vnodeIndex >= pMeterMetaInfo->pMetricMeta->numOfVnodes) {
break;
}
}
@ -792,7 +796,6 @@ int taos_errno(TAOS *taos) {
char *taos_errstr(TAOS *taos) {
STscObj *pObj = (STscObj *)taos;
uint8_t code;
// char temp[256] = {0};
if (pObj == NULL || pObj->signature != pObj) return tsError[globalCode];
@ -803,12 +806,14 @@ char *taos_errstr(TAOS *taos) {
// for invalid sql, additional information is attached to explain why the sql is invalid
if (code == TSDB_CODE_INVALID_SQL) {
// snprintf(temp, tListLen(temp), "invalid SQL: %s", pObj->pSql->cmd.payload);
// strcpy(pObj->pSql->cmd.payload, temp);
return pObj->pSql->cmd.payload;
} else {
if (code < 0 || code > TSDB_CODE_MAX_ERROR_CODE) {
return tsError[TSDB_CODE_SUCCESS];
} else {
return tsError[code];
}
}
}
void taos_config(int debug, char *log_path) {
@ -875,15 +880,21 @@ int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields)
break;
case TSDB_DATA_TYPE_BIGINT:
len += sprintf(str + len, "%lld ", *((int64_t *)row[i]));
len += sprintf(str + len, "%" PRId64 " ", *((int64_t *)row[i]));
break;
case TSDB_DATA_TYPE_FLOAT:
len += sprintf(str + len, "%f ", *((float *)row[i]));
case TSDB_DATA_TYPE_FLOAT: {
float fv = 0;
fv = GET_FLOAT_VAL(row[i]);
len += sprintf(str + len, "%f ", fv);
}
break;
case TSDB_DATA_TYPE_DOUBLE:
len += sprintf(str + len, "%lf ", *((double *)row[i]));
case TSDB_DATA_TYPE_DOUBLE:{
double dv = 0;
dv = GET_DOUBLE_VAL(row[i]);
len += sprintf(str + len, "%lf ", dv);
}
break;
case TSDB_DATA_TYPE_BINARY:
@ -893,15 +904,14 @@ int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields)
size_t xlen = strlen(row[i]);
size_t trueLen = MIN(xlen, fields[i].bytes);
memcpy(str + len, (char*) row[i], trueLen);
memcpy(str + len, (char *)row[i], trueLen);
str[len + trueLen] = ' ';
len += (trueLen + 1);
}
break;
} break;
case TSDB_DATA_TYPE_TIMESTAMP:
len += sprintf(str + len, "%lld ", *((int64_t *)row[i]));
len += sprintf(str + len, "%" PRId64 " ", *((int64_t *)row[i]));
break;
case TSDB_DATA_TYPE_BOOL:
@ -930,7 +940,7 @@ int taos_validate_sql(TAOS *taos, const char *sql) {
tscTrace("%p Valid SQL: %s pObj:%p", pSql, sql, pObj);
int32_t sqlLen = strlen(sql);
if (sqlLen > TSDB_MAX_SQL_LEN) {
if (sqlLen > tsMaxSQLStringLen) {
tscError("%p sql too long", pSql);
pRes->code = TSDB_CODE_INVALID_SQL;
return pRes->code;
@ -961,7 +971,7 @@ int taos_validate_sql(TAOS *taos, const char *sql) {
return code;
}
static int tscParseTblNameList(SSqlObj *pSql, const char* tblNameList, int32_t tblListLen) {
static int tscParseTblNameList(SSqlObj *pSql, const char *tblNameList, int32_t tblListLen) {
// must before clean the sqlcmd object
tscRemoveAllMeterMetaInfo(&pSql->cmd, false);
tscCleanSqlCmd(&pSql->cmd);
@ -972,11 +982,11 @@ static int tscParseTblNameList(SSqlObj *pSql, const char* tblNameList, int32_t t
pCmd->count = 0;
int code = TSDB_CODE_INVALID_METER_ID;
char *str = (char*) tblNameList;
char *str = (char *)tblNameList;
SMeterMetaInfo *pMeterMetaInfo = tscAddEmptyMeterMetaInfo(pCmd);
if ((code = tscAllocPayload(pCmd, tblListLen+16)) != TSDB_CODE_SUCCESS) {
if ((code = tscAllocPayload(pCmd, tblListLen + 16)) != TSDB_CODE_SUCCESS) {
return code;
}
@ -1042,7 +1052,7 @@ static int tscParseTblNameList(SSqlObj *pSql, const char* tblNameList, int32_t t
}
int taos_load_table_info(TAOS *taos, const char *tableNameList) {
const int32_t MAX_TABLE_NAME_LENGTH = 12*1024*1024; // 12MB list
const int32_t MAX_TABLE_NAME_LENGTH = 12 * 1024 * 1024; // 12MB list
STscObj *pObj = (STscObj *)taos;
if (pObj == NULL || pObj->signature != pObj) {
@ -1066,7 +1076,7 @@ int taos_load_table_info(TAOS *taos, const char *tableNameList) {
return pRes->code;
}
char* str = calloc(1, tblListLen + 1);
char *str = calloc(1, tblListLen + 1);
if (str == NULL) {
pRes->code = TSDB_CODE_CLI_OUT_OF_MEMORY;
tscError("%p failed to malloc sql string buffer", pSql);
@ -1074,7 +1084,7 @@ int taos_load_table_info(TAOS *taos, const char *tableNameList) {
}
strtolower(str, tableNameList);
pRes->code = (uint8_t) tscParseTblNameList(pSql, str, tblListLen);
pRes->code = (uint8_t)tscParseTblNameList(pSql, str, tblListLen);
/*
* set the qhandle to 0 before return in order to erase the qhandle value assigned in the previous successful query.

View File

@ -85,7 +85,7 @@ static void tscProcessStreamLaunchQuery(SSchedMsg *pMsg) {
// failed to get meter/metric meta, retry in 10sec.
if (code != TSDB_CODE_SUCCESS) {
int64_t retryDelayTime = tscGetRetryDelayTime(pStream->slidingTime, pStream->precision);
tscError("%p stream:%p,get metermeta failed, retry in %lldms", pStream->pSql, pStream, retryDelayTime);
tscError("%p stream:%p,get metermeta failed, retry in %" PRId64 "ms", pStream->pSql, pStream, retryDelayTime);
tscSetRetryTimer(pStream, pSql, retryDelayTime);
return;
@ -136,7 +136,7 @@ static void tscProcessStreamQueryCallback(void *param, TAOS_RES *tres, int numOf
SSqlStream *pStream = (SSqlStream *)param;
if (tres == NULL || numOfRows < 0) {
int64_t retryDelay = tscGetRetryDelayTime(pStream->slidingTime, pStream->precision);
tscError("%p stream:%p, query data failed, code:%d, retry in %lldms", pStream->pSql, pStream, numOfRows,
tscError("%p stream:%p, query data failed, code:%d, retry in %" PRId64 "ms", pStream->pSql, pStream, numOfRows,
retryDelay);
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pStream->pSql->cmd, 0);
@ -158,7 +158,7 @@ static void tscSetTimestampForRes(SSqlStream *pStream, SSqlObj *pSql) {
if (timestamp != actualTimestamp) {
// reset the timestamp of each agg point by using start time of each interval
*((int64_t *)pRes->data) = actualTimestamp;
tscWarn("%p stream:%p, timestamp of points is:%lld, reset to %lld", pSql, pStream, timestamp, actualTimestamp);
tscWarn("%p stream:%p, timestamp of points is:%" PRId64 ", reset to %" PRId64 "", pSql, pStream, timestamp, actualTimestamp);
}
}
@ -169,7 +169,7 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf
if (pSql == NULL || numOfRows < 0) {
int64_t retryDelayTime = tscGetRetryDelayTime(pStream->slidingTime, pStream->precision);
tscError("%p stream:%p, retrieve data failed, code:%d, retry in %lldms", pSql, pStream, numOfRows, retryDelayTime);
tscError("%p stream:%p, retrieve data failed, code:%d, retry in %" PRId64 "ms", pSql, pStream, numOfRows, retryDelayTime);
tscClearMeterMetaInfo(pMeterMetaInfo, true);
tscSetRetryTimer(pStream, pStream->pSql, retryDelayTime);
@ -235,7 +235,7 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf
/* no resuls in the query range, retry */
// todo set retry dynamic time
int32_t retry = tsProjectExecInterval;
tscError("%p stream:%p, retrieve no data, code:%d, retry in %lldms", pSql, pStream, numOfRows, retry);
tscError("%p stream:%p, retrieve no data, code:%d, retry in %" PRId64 "ms", pSql, pStream, numOfRows, retry);
tscClearSqlMetaInfoForce(&(pStream->pSql->cmd));
tscSetRetryTimer(pStream, pStream->pSql, retry);
@ -265,7 +265,7 @@ static void tscSetRetryTimer(SSqlStream *pStream, SSqlObj *pSql, int64_t timer)
/*
* current time window will be closed, since it too early to exceed the maxRetentWindow value
*/
tscTrace("%p stream:%p, etime:%lld is too old, exceeds the max retention time window:%lld, stop the stream",
tscTrace("%p stream:%p, etime:%" PRId64 " is too old, exceeds the max retention time window:%" PRId64 ", stop the stream",
pStream->pSql, pStream, pStream->stime, pStream->etime);
// TODO : How to terminate stream here
taos_close_stream(pStream);
@ -276,10 +276,10 @@ static void tscSetRetryTimer(SSqlStream *pStream, SSqlObj *pSql, int64_t timer)
return;
}
tscTrace("%p stream:%p, next query start at %lld, in %lldms. query range %lld-%lld", pStream->pSql, pStream,
tscTrace("%p stream:%p, next query start at %" PRId64 ", in %" PRId64 "ms. query range %" PRId64 "-%" PRId64 "", pStream->pSql, pStream,
now + timer, timer, pStream->stime, etime);
} else {
tscTrace("%p stream:%p, next query start at %lld, in %lldms. query range %lld-%lld", pStream->pSql, pStream,
tscTrace("%p stream:%p, next query start at %" PRId64 ", in %" PRId64 "ms. query range %" PRId64 "-%" PRId64 "", pStream->pSql, pStream,
pStream->stime, timer, pStream->stime - pStream->interval, pStream->stime - 1);
}
@ -299,7 +299,7 @@ static void tscSetNextLaunchTimer(SSqlStream *pStream, SSqlObj *pSql) {
*/
timer = pStream->slidingTime;
if (pStream->stime > pStream->etime) {
tscTrace("%p stream:%p, stime:%lld is larger than end time: %lld, stop the stream", pStream->pSql, pStream,
tscTrace("%p stream:%p, stime:%" PRId64 " is larger than end time: %" PRId64 ", stop the stream", pStream->pSql, pStream,
pStream->stime, pStream->etime);
// TODO : How to terminate stream here
taos_close_stream(pStream);
@ -353,7 +353,7 @@ static void tscSetSlidingWindowInfo(SSqlObj *pSql, SSqlStream *pStream) {
int64_t minIntervalTime =
(pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMinIntervalTime * 1000L : tsMinIntervalTime;
if (pCmd->nAggTimeInterval < minIntervalTime) {
tscWarn("%p stream:%p, original sample interval:%ld too small, reset to:%lld", pSql, pStream,
tscWarn("%p stream:%p, original sample interval:%ld too small, reset to:%" PRId64 "", pSql, pStream,
pCmd->nAggTimeInterval, minIntervalTime);
pCmd->nAggTimeInterval = minIntervalTime;
}
@ -368,20 +368,21 @@ static void tscSetSlidingWindowInfo(SSqlObj *pSql, SSqlStream *pStream) {
(pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMinSlidingTime * 1000L : tsMinSlidingTime;
if (pCmd->nSlidingTime < minSlidingTime) {
tscWarn("%p stream:%p, original sliding value:%lld too small, reset to:%lld", pSql, pStream, pCmd->nSlidingTime,
tscWarn("%p stream:%p, original sliding value:%" PRId64 " too small, reset to:%" PRId64 "", pSql, pStream, pCmd->nSlidingTime,
minSlidingTime);
pCmd->nSlidingTime = minSlidingTime;
}
if (pCmd->nSlidingTime > pCmd->nAggTimeInterval) {
tscWarn("%p stream:%p, sliding value:%lld can not be larger than interval range, reset to:%lld", pSql, pStream,
tscWarn("%p stream:%p, sliding value:%" PRId64 " can not be larger than interval range, reset to:%" PRId64 "", pSql, pStream,
pCmd->nSlidingTime, pCmd->nAggTimeInterval);
pCmd->nSlidingTime = pCmd->nAggTimeInterval;
}
pStream->slidingTime = pCmd->nSlidingTime;
pCmd->nAggTimeInterval = 0; // clear the interval value to avoid the force time window split by query processor
}
static int64_t tscGetStreamStartTimestamp(SSqlObj *pSql, SSqlStream *pStream, int64_t stime) {
@ -401,11 +402,11 @@ static int64_t tscGetStreamStartTimestamp(SSqlObj *pSql, SSqlStream *pStream, in
} else { // timewindow based aggregation stream
if (stime == 0) { // no data in meter till now
stime = ((int64_t)taosGetTimestamp(pStream->precision) / pStream->interval) * pStream->interval;
tscWarn("%p stream:%p, last timestamp:0, reset to:%lld", pSql, pStream, stime);
tscWarn("%p stream:%p, last timestamp:0, reset to:%" PRId64 "", pSql, pStream, stime);
} else {
int64_t newStime = (stime / pStream->interval) * pStream->interval;
if (newStime != stime) {
tscWarn("%p stream:%p, last timestamp:%lld, reset to:%lld", pSql, pStream, stime, newStime);
tscWarn("%p stream:%p, last timestamp:%" PRId64 ", reset to:%" PRId64 "", pSql, pStream, stime, newStime);
stime = newStime;
}
}
@ -447,7 +448,10 @@ static void setErrorInfo(STscObj* pObj, int32_t code, char* info) {
SSqlCmd* pCmd = &pObj->pSql->cmd;
pObj->pSql->res.code = code;
if (info != NULL) {
strncpy(pCmd->payload, info, pCmd->payloadLen);
}
}
TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row),
@ -537,7 +541,7 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p
int64_t starttime = tscGetLaunchTimestamp(pStream);
taosTmrReset(tscProcessStreamTimer, starttime, pStream, tscTmr, &pStream->pTimer);
tscTrace("%p stream:%p is opened, query on:%s, interval:%lld, sliding:%lld, first launched in:%lld, sql:%s", pSql,
tscTrace("%p stream:%p is opened, query on:%s, interval:%" PRId64 ", sliding:%" PRId64 ", first launched in:%" PRId64 ", sql:%s", pSql,
pStream, pMeterMetaInfo->name, pStream->interval, pStream->slidingTime, starttime, sqlstr);
return pStream;

View File

@ -56,7 +56,7 @@ TAOS_SUB *taos_subscribe(const char *host, const char *user, const char *pass, c
if (pSub->taos == NULL) {
tfree(pSub);
} else {
char qstr[128];
char qstr[256] = {0};
sprintf(qstr, "use %s", db);
int res = taos_query(pSub->taos, qstr);
if (res != 0) {
@ -64,7 +64,7 @@ TAOS_SUB *taos_subscribe(const char *host, const char *user, const char *pass, c
taos_close(pSub->taos);
tfree(pSub);
} else {
sprintf(qstr, "select * from %s where _c0 > now+1000d", pSub->name);
snprintf(qstr, tListLen(qstr), "select * from %s where _c0 > now+1000d", pSub->name);
if (taos_query(pSub->taos, qstr)) {
tscTrace("failed to select, reason:%s", taos_errstr(pSub->taos));
taos_close(pSub->taos);
@ -106,7 +106,7 @@ TAOS_ROW taos_consume(TAOS_SUB *tsub) {
pSub->stime = taosGetTimestampMs();
sprintf(qstr, "select * from %s where _c0 > %lld order by _c0 asc", pSub->name, pSub->lastKey);
sprintf(qstr, "select * from %s where _c0 > %" PRId64 " order by _c0 asc", pSub->name, pSub->lastKey);
if (taos_query(pSub->taos, qstr)) {
tscTrace("failed to select, reason:%s", taos_errstr(pSub->taos));
return NULL;

View File

@ -198,7 +198,9 @@ static int taos_options_imp(TSDB_OPTION option, const char *pStr) {
switch (option) {
case TSDB_OPTION_CONFIGDIR:
cfg = tsGetConfigOption("configDir");
if (cfg && cfg->cfgStatus <= TSDB_CFG_CSTATUS_OPTION) {
assert(cfg != NULL);
if (cfg->cfgStatus <= TSDB_CFG_CSTATUS_OPTION) {
strncpy(configDir, pStr, TSDB_FILENAME_LEN);
cfg->cfgStatus = TSDB_CFG_CSTATUS_OPTION;
tscPrint("set config file directory:%s", pStr);
@ -210,7 +212,9 @@ static int taos_options_imp(TSDB_OPTION option, const char *pStr) {
case TSDB_OPTION_SHELL_ACTIVITY_TIMER:
cfg = tsGetConfigOption("shellActivityTimer");
if (cfg && cfg->cfgStatus <= TSDB_CFG_CSTATUS_OPTION) {
assert(cfg != NULL);
if (cfg->cfgStatus <= TSDB_CFG_CSTATUS_OPTION) {
tsShellActivityTimer = atoi(pStr);
if (tsShellActivityTimer < 1) tsShellActivityTimer = 1;
if (tsShellActivityTimer > 3600) tsShellActivityTimer = 3600;
@ -224,13 +228,15 @@ static int taos_options_imp(TSDB_OPTION option, const char *pStr) {
case TSDB_OPTION_LOCALE: { // set locale
cfg = tsGetConfigOption("locale");
assert(cfg != NULL);
size_t len = strlen(pStr);
if (len == 0 || len > TSDB_LOCALE_LEN) {
tscPrint("Invalid locale:%s, use default", pStr);
return -1;
}
if (cfg && cfg && cfg->cfgStatus <= TSDB_CFG_CSTATUS_OPTION) {
if (cfg->cfgStatus <= TSDB_CFG_CSTATUS_OPTION) {
char sep = '.';
if (strlen(tsLocale) == 0) { // locale does not set yet
@ -285,13 +291,15 @@ static int taos_options_imp(TSDB_OPTION option, const char *pStr) {
case TSDB_OPTION_CHARSET: {
/* set charset will override the value of charset, assigned during system locale changed */
cfg = tsGetConfigOption("charset");
assert(cfg != NULL);
size_t len = strlen(pStr);
if (len == 0 || len > TSDB_LOCALE_LEN) {
tscPrint("failed to set charset:%s", pStr);
return -1;
}
if (cfg && cfg->cfgStatus <= TSDB_CFG_CSTATUS_OPTION) {
if (cfg->cfgStatus <= TSDB_CFG_CSTATUS_OPTION) {
if (taosValidateEncodec(pStr)) {
if (strlen(tsCharset) == 0) {
tscPrint("charset is set:%s", pStr);
@ -314,7 +322,9 @@ static int taos_options_imp(TSDB_OPTION option, const char *pStr) {
case TSDB_OPTION_TIMEZONE:
cfg = tsGetConfigOption("timezone");
if (cfg && cfg->cfgStatus <= TSDB_CFG_CSTATUS_OPTION) {
assert(cfg != NULL);
if (cfg->cfgStatus <= TSDB_CFG_CSTATUS_OPTION) {
strcpy(tsTimezone, pStr);
tsSetTimeZone();
cfg->cfgStatus = TSDB_CFG_CSTATUS_OPTION;
@ -327,7 +337,9 @@ static int taos_options_imp(TSDB_OPTION option, const char *pStr) {
case TSDB_OPTION_SOCKET_TYPE:
cfg = tsGetConfigOption("sockettype");
if (cfg && cfg->cfgStatus <= TSDB_CFG_CSTATUS_OPTION) {
assert(cfg != NULL);
if (cfg->cfgStatus <= TSDB_CFG_CSTATUS_OPTION) {
if (strcasecmp(pStr, TAOS_SOCKET_TYPE_NAME_UDP) != 0 && strcasecmp(pStr, TAOS_SOCKET_TYPE_NAME_TCP) != 0) {
tscError("only 'tcp' or 'udp' allowed for configuring the socket type");
return -1;
@ -340,6 +352,7 @@ static int taos_options_imp(TSDB_OPTION option, const char *pStr) {
break;
default:
// TODO return the correct error code to client in the format for taos_errstr()
tscError("Invalid option %d", option);
return -1;
}

View File

@ -22,8 +22,8 @@
#include "tscJoinProcess.h"
#include "tscProfile.h"
#include "tscSecondaryMerge.h"
#include "tscUtil.h"
#include "tschemautil.h"
#include "tscUtil.h"
#include "tsclient.h"
#include "tsqldef.h"
#include "ttimer.h"
@ -51,7 +51,6 @@ void tscGetMetricMetaCacheKey(SSqlCmd* pCmd, char* str, uint64_t uid) {
assert(len < tListLen(tagIdBuf));
const int32_t maxKeySize = TSDB_MAX_TAGS_LEN; // allowed max key size
char* tmp = calloc(1, TSDB_MAX_SQL_LEN);
SCond* cond = tsGetMetricQueryCondPos(pTagCond, uid);
@ -60,12 +59,23 @@ void tscGetMetricMetaCacheKey(SSqlCmd* pCmd, char* str, uint64_t uid) {
sprintf(join, "%s,%s", pTagCond->joinInfo.left.meterId, pTagCond->joinInfo.right.meterId);
}
int32_t keyLen =
snprintf(tmp, TSDB_MAX_SQL_LEN, "%s,%s,%s,%d,%s,[%s],%d", pMeterMetaInfo->name,
(cond != NULL ? cond->cond.z : NULL), pTagCond->tbnameCond.cond.n > 0 ? pTagCond->tbnameCond.cond.z : NULL,
// estimate the buffer size
size_t tbnameCondLen = pTagCond->tbnameCond.cond != NULL ? strlen(pTagCond->tbnameCond.cond) : 0;
size_t redundantLen = 20;
size_t bufSize = strlen(pMeterMetaInfo->name) + tbnameCondLen + strlen(join) + strlen(tagIdBuf);
if (cond != NULL) {
bufSize += strlen(cond->cond);
}
bufSize = (size_t)((bufSize + redundantLen) * 1.5);
char* tmp = calloc(1, bufSize);
int32_t keyLen = snprintf(tmp, bufSize, "%s,%s,%s,%d,%s,[%s],%d", pMeterMetaInfo->name,
(cond != NULL ? cond->cond : NULL), (tbnameCondLen > 0 ? pTagCond->tbnameCond.cond : NULL),
pTagCond->relType, join, tagIdBuf, pCmd->groupbyExpr.orderType);
assert(keyLen <= TSDB_MAX_SQL_LEN);
assert(keyLen <= bufSize);
if (keyLen < maxKeySize) {
strcpy(str, tmp);
@ -73,7 +83,7 @@ void tscGetMetricMetaCacheKey(SSqlCmd* pCmd, char* str, uint64_t uid) {
MD5_CTX ctx;
MD5Init(&ctx);
MD5Update(&ctx, (uint8_t*) tmp, keyLen);
MD5Update(&ctx, (uint8_t*)tmp, keyLen);
char* pStr = base64_encode(ctx.digest, tListLen(ctx.digest));
strcpy(str, pStr);
}
@ -99,7 +109,7 @@ void tsSetMetricQueryCond(STagCond* pTagCond, uint64_t uid, const char* str) {
SCond* pDest = &pTagCond->cond[pTagCond->numOfTagCond];
pDest->uid = uid;
pDest->cond = SStringCreate(str);
pDest->cond = strdup(str);
pTagCond->numOfTagCond += 1;
}
@ -231,12 +241,22 @@ bool tscProjectionQueryOnMetric(SSqlCmd* pCmd) {
return false;
}
//for project query, only the following two function is allowed
// for project query, only the following two function is allowed
for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) {
SSqlExpr* pExpr = tscSqlExprGet(pCmd, i);
int32_t functionId = pExpr->functionId;
if (functionId != TSDB_FUNC_PRJ && functionId != TSDB_FUNC_TAGPRJ &&
functionId != TSDB_FUNC_TAG && functionId != TSDB_FUNC_TS && functionId != TSDB_FUNC_ARITHM) {
int32_t functionId = tscSqlExprGet(pCmd, i)->functionId;
if (functionId != TSDB_FUNC_PRJ && functionId != TSDB_FUNC_TAGPRJ && functionId != TSDB_FUNC_TAG &&
functionId != TSDB_FUNC_TS && functionId != TSDB_FUNC_ARITHM) {
return false;
}
}
return true;
}
bool tscProjectionQueryOnTable(SSqlCmd* pCmd) {
for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) {
int32_t functionId = tscSqlExprGet(pCmd, i)->functionId;
if (functionId != TSDB_FUNC_PRJ && functionId != TSDB_FUNC_TS) {
return false;
}
}
@ -430,15 +450,6 @@ void tscFreeSqlObj(SSqlObj* pSql) {
free(pSql);
}
STableDataBlocks* tscCreateDataBlock(int32_t size) {
STableDataBlocks* dataBuf = (STableDataBlocks*)calloc(1, sizeof(STableDataBlocks));
dataBuf->nAllocSize = (uint32_t)size;
dataBuf->pData = calloc(1, dataBuf->nAllocSize);
dataBuf->ordered = true;
dataBuf->prevTS = INT64_MIN;
return dataBuf;
}
void tscDestroyDataBlock(STableDataBlocks* pDataBlock) {
if (pDataBlock == NULL) {
return;
@ -446,6 +457,9 @@ void tscDestroyDataBlock(STableDataBlocks* pDataBlock) {
tfree(pDataBlock->pData);
tfree(pDataBlock->params);
// free the refcount for metermeta
taosRemoveDataFromCache(tscCacheHandle, (void**)&(pDataBlock->pMeterMeta), false);
tfree(pDataBlock);
}
@ -492,11 +506,11 @@ SDataBlockList* tscCreateBlockArrayList() {
void tscAppendDataBlock(SDataBlockList* pList, STableDataBlocks* pBlocks) {
if (pList->nSize >= pList->nAlloc) {
pList->nAlloc = pList->nAlloc << 1;
pList->pData = realloc(pList->pData, sizeof(void*) * (size_t)pList->nAlloc);
pList->nAlloc = (pList->nAlloc) << 1U;
pList->pData = realloc(pList->pData, POINTER_BYTES * (size_t)pList->nAlloc);
// reset allocated memory
memset(pList->pData + pList->nSize, 0, sizeof(void*) * (pList->nAlloc - pList->nSize));
memset(pList->pData + pList->nSize, 0, POINTER_BYTES * (pList->nAlloc - pList->nSize));
}
pList->pData[pList->nSize++] = pBlocks;
@ -519,10 +533,21 @@ void* tscDestroyBlockArrayList(SDataBlockList* pList) {
int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock) {
SSqlCmd* pCmd = &pSql->cmd;
assert(pDataBlock->pMeterMeta != NULL);
pCmd->count = pDataBlock->numOfMeters;
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0);
// set the correct metermeta object, the metermeta has been locked in pDataBlocks, so it must be in the cache
if (pMeterMetaInfo->pMeterMeta != pDataBlock->pMeterMeta) {
strcpy(pMeterMetaInfo->name, pDataBlock->meterId);
taosRemoveDataFromCache(tscCacheHandle, (void**)&(pMeterMetaInfo->pMeterMeta), false);
pMeterMetaInfo->pMeterMeta = pDataBlock->pMeterMeta;
pDataBlock->pMeterMeta = NULL; // delegate the ownership of metermeta to pMeterMetaInfo
} else {
assert(strncmp(pMeterMetaInfo->name, pDataBlock->meterId, tListLen(pDataBlock->meterId)) == 0);
}
/*
* the submit message consists of : [RPC header|message body|digest]
@ -530,7 +555,10 @@ int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock) {
* additional space.
*/
int ret = tscAllocPayload(pCmd, pDataBlock->nAllocSize + sizeof(STaosDigest));
if (TSDB_CODE_SUCCESS != ret) return ret;
if (TSDB_CODE_SUCCESS != ret) {
return ret;
}
memcpy(pCmd->payload, pDataBlock->pData, pDataBlock->nAllocSize);
/*
@ -540,7 +568,7 @@ int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock) {
pCmd->payloadLen = pDataBlock->nAllocSize - tsRpcHeadSize;
assert(pCmd->allocSize >= pCmd->payloadLen + tsRpcHeadSize + sizeof(STaosDigest));
return tscGetMeterMeta(pSql, pMeterMetaInfo->name, 0);
return TSDB_CODE_SUCCESS;
}
void tscFreeUnusedDataBlocks(SDataBlockList* pList) {
@ -552,33 +580,72 @@ void tscFreeUnusedDataBlocks(SDataBlockList* pList) {
}
}
STableDataBlocks* tscCreateDataBlockEx(size_t size, int32_t rowSize, int32_t startOffset, char* name) {
STableDataBlocks* dataBuf = tscCreateDataBlock(size);
/**
* create the in-memory buffer for each table to keep the submitted data block
* @param initialSize
* @param rowSize
* @param startOffset
* @param name
* @param dataBlocks
* @return
*/
int32_t tscCreateDataBlock(size_t initialSize, int32_t rowSize, int32_t startOffset, const char* name,
STableDataBlocks** dataBlocks) {
STableDataBlocks* dataBuf = (STableDataBlocks*)calloc(1, sizeof(STableDataBlocks));
if (dataBuf == NULL) {
tscError("failed to allocated memory, reason:%s", strerror(errno));
return TSDB_CODE_CLI_OUT_OF_MEMORY;
}
dataBuf->nAllocSize = (uint32_t)initialSize;
dataBuf->headerSize = startOffset; // the header size will always be the startOffset value, reserved for the subumit block header
dataBuf->pData = calloc(1, dataBuf->nAllocSize);
dataBuf->ordered = true;
dataBuf->prevTS = INT64_MIN;
dataBuf->rowSize = rowSize;
dataBuf->size = startOffset;
dataBuf->tsSource = -1;
strncpy(dataBuf->meterId, name, TSDB_METER_ID_LEN);
return dataBuf;
/*
* The metermeta may be released since the metermeta cache are completed clean by other thread
* due to operation such as drop database.
*/
dataBuf->pMeterMeta = taosGetDataFromCache(tscCacheHandle, dataBuf->meterId);
assert(initialSize > 0);
if (dataBuf->pMeterMeta == NULL) {
tfree(dataBuf);
return TSDB_CODE_QUERY_CACHE_ERASED;
} else {
*dataBlocks = dataBuf;
return TSDB_CODE_SUCCESS;
}
}
STableDataBlocks* tscGetDataBlockFromList(void* pHashList, SDataBlockList* pDataBlockList, int64_t id, int32_t size,
int32_t startOffset, int32_t rowSize, char* tableId) {
STableDataBlocks* dataBuf = NULL;
int32_t tscGetDataBlockFromList(void* pHashList, SDataBlockList* pDataBlockList, int64_t id, int32_t size,
int32_t startOffset, int32_t rowSize, const char* tableId,
STableDataBlocks** dataBlocks) {
*dataBlocks = NULL;
STableDataBlocks** t1 = (STableDataBlocks**)taosGetIntHashData(pHashList, id);
STableDataBlocks** t1 = (STableDataBlocks**) taosGetIntHashData(pHashList, id);
if (t1 != NULL) {
dataBuf = *t1;
*dataBlocks = *t1;
}
if (dataBuf == NULL) {
dataBuf = tscCreateDataBlockEx((size_t)size, rowSize, startOffset, tableId);
dataBuf = *(STableDataBlocks**)taosAddIntHash(pHashList, id, (char*)&dataBuf);
tscAppendDataBlock(pDataBlockList, dataBuf);
if (*dataBlocks == NULL) {
int32_t ret = tscCreateDataBlock((size_t) size, rowSize, startOffset, tableId, dataBlocks);
if (ret != TSDB_CODE_SUCCESS) {
return ret;
}
return dataBuf;
*dataBlocks = *(STableDataBlocks**)taosAddIntHash(pHashList, id, (char*)dataBlocks);
tscAppendDataBlock(pDataBlockList, *dataBlocks);
}
return TSDB_CODE_SUCCESS;
}
int32_t tscMergeTableDataBlocks(SSqlObj* pSql, SDataBlockList* pTableDataBlockList) {
@ -590,9 +657,16 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, SDataBlockList* pTableDataBlockLi
for (int32_t i = 0; i < pTableDataBlockList->nSize; ++i) {
STableDataBlocks* pOneTableBlock = pTableDataBlockList->pData[i];
STableDataBlocks* dataBuf =
tscGetDataBlockFromList(pVnodeDataBlockHashList, pVnodeDataBlockList, pOneTableBlock->vgid, TSDB_PAYLOAD_SIZE,
tsInsertHeadSize, 0, pOneTableBlock->meterId);
STableDataBlocks* dataBuf = NULL;
int32_t ret = tscGetDataBlockFromList(pVnodeDataBlockHashList, pVnodeDataBlockList, pOneTableBlock->vgid,
TSDB_PAYLOAD_SIZE, tsInsertHeadSize, 0, pOneTableBlock->meterId, &dataBuf);
if (ret != TSDB_CODE_SUCCESS) {
tscError("%p failed to prepare the data block buffer for merging table data, code:%d", pSql, ret);
taosCleanUpIntHash(pVnodeDataBlockHashList);
tscDestroyBlockArrayList(pVnodeDataBlockList);
return ret;
}
int64_t destSize = dataBuf->size + pOneTableBlock->size;
if (dataBuf->nAllocSize < destSize) {
@ -618,8 +692,10 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, SDataBlockList* pTableDataBlockLi
SShellSubmitBlock* pBlocks = (SShellSubmitBlock*)pOneTableBlock->pData;
sortRemoveDuplicates(pOneTableBlock);
tscTrace("%p meterId:%s, sid:%d, rows:%d, sversion:%d", pSql, pOneTableBlock->meterId, pBlocks->sid,
pBlocks->numOfRows, pBlocks->sversion);
char* e = (char*)pBlocks->payLoad + pOneTableBlock->rowSize*(pBlocks->numOfRows-1);
tscTrace("%p meterId:%s, sid:%d rows:%d sversion:%d skey:%" PRId64 ", ekey:%" PRId64, pSql, pOneTableBlock->meterId, pBlocks->sid,
pBlocks->numOfRows, pBlocks->sversion, GET_INT64_VAL(pBlocks->payLoad), GET_INT64_VAL(e));
pBlocks->sid = htonl(pBlocks->sid);
pBlocks->uid = htobe64(pBlocks->uid);
@ -1107,7 +1183,7 @@ SColumnBase* tscColumnBaseInfoInsert(SSqlCmd* pCmd, SColumnIndex* pColIndex) {
}
void tscColumnFilterInfoCopy(SColumnFilterInfo* dst, const SColumnFilterInfo* src) {
assert (src != NULL && dst != NULL);
assert(src != NULL && dst != NULL);
assert(src->filterOnBinary == 0 || src->filterOnBinary == 1);
if (src->lowerRelOptr == TSDB_RELATION_INVALID && src->upperRelOptr == TSDB_RELATION_INVALID) {
@ -1116,14 +1192,15 @@ void tscColumnFilterInfoCopy(SColumnFilterInfo* dst, const SColumnFilterInfo* sr
*dst = *src;
if (dst->filterOnBinary) {
size_t len = (size_t) dst->len + 1;
dst->pz = calloc(1, len);
memcpy((char*) dst->pz, (char*) src->pz, (size_t) len);
size_t len = (size_t)dst->len + 1;
char* pTmp = calloc(1, len);
dst->pz = (int64_t)pTmp;
memcpy((char*)dst->pz, (char*)src->pz, (size_t)len);
}
}
void tscColumnBaseCopy(SColumnBase* dst, const SColumnBase* src) {
assert (src != NULL && dst != NULL);
assert(src != NULL && dst != NULL);
*dst = *src;
@ -1181,7 +1258,8 @@ void tscColumnBaseInfoDestroy(SColumnBaseInfo* pColumnBaseInfo) {
assert(pColBase->filterInfo[j].filterOnBinary == 0 || pColBase->filterInfo[j].filterOnBinary == 1);
if (pColBase->filterInfo[j].filterOnBinary) {
tfree(pColBase->filterInfo[j].pz);
free((char*)pColBase->filterInfo[j].pz);
pColBase->filterInfo[j].pz = 0;
}
}
}
@ -1341,13 +1419,19 @@ bool tscValidateColumnId(SSqlCmd* pCmd, int32_t colId) {
void tscTagCondCopy(STagCond* dest, const STagCond* src) {
memset(dest, 0, sizeof(STagCond));
SStringCopy(&dest->tbnameCond.cond, &src->tbnameCond.cond);
if (src->tbnameCond.cond != NULL) {
dest->tbnameCond.cond = strdup(src->tbnameCond.cond);
}
dest->tbnameCond.uid = src->tbnameCond.uid;
memcpy(&dest->joinInfo, &src->joinInfo, sizeof(SJoinInfo));
for (int32_t i = 0; i < src->numOfTagCond; ++i) {
SStringCopy(&dest->cond[i].cond, &src->cond[i].cond);
if (src->cond[i].cond != NULL) {
dest->cond[i].cond = strdup(src->cond[i].cond);
}
dest->cond[i].uid = src->cond[i].uid;
}
@ -1356,10 +1440,9 @@ void tscTagCondCopy(STagCond* dest, const STagCond* src) {
}
void tscTagCondRelease(STagCond* pCond) {
SStringFree(&pCond->tbnameCond.cond);
free(pCond->tbnameCond.cond);
for (int32_t i = 0; i < pCond->numOfTagCond; ++i) {
SStringFree(&pCond->cond[i].cond);
free(pCond->cond[i].cond);
}
memset(pCond, 0, sizeof(STagCond));
@ -1458,7 +1541,11 @@ bool tscShouldFreeAsyncSqlObj(SSqlObj* pSql) {
* data blocks have been submit to vnode.
*/
SDataBlockList* pDataBlocks = pCmd->pDataBlocks;
if (pDataBlocks == NULL || pCmd->vnodeIdx >= pDataBlocks->nSize) {
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0);
assert(pSql->cmd.numOfTables == 1);
if (pDataBlocks == NULL || pMeterMetaInfo->vnodeIndex >= pDataBlocks->nSize) {
tscTrace("%p object should be release since all data blocks have been submit", pSql);
return true;
} else {
@ -1471,10 +1558,11 @@ bool tscShouldFreeAsyncSqlObj(SSqlObj* pSql) {
}
SMeterMetaInfo* tscGetMeterMetaInfo(SSqlCmd* pCmd, int32_t index) {
if (pCmd == NULL || index >= pCmd->numOfTables || index < 0) {
if (pCmd == NULL || pCmd->numOfTables == 0) {
return NULL;
}
assert(index >= 0 && index <= pCmd->numOfTables && pCmd->pMeterInfo != NULL);
return pCmd->pMeterInfo[index];
}
@ -1517,7 +1605,7 @@ SMeterMetaInfo* tscAddMeterMetaInfo(SSqlCmd* pCmd, const char* name, SMeterMeta*
pMeterMetaInfo->numOfTags = numOfTags;
if (tags != NULL) {
memcpy(pMeterMetaInfo->tagColumnIndex, tags, sizeof(int16_t) * numOfTags);
memcpy(pMeterMetaInfo->tagColumnIndex, tags, sizeof(pMeterMetaInfo->tagColumnIndex[0]) * numOfTags);
}
pCmd->numOfTables += 1;
@ -1571,130 +1659,13 @@ void tscResetForNextRetrieve(SSqlRes* pRes) {
pRes->numOfRows = 0;
}
SString SStringCreate(const char* str) {
size_t len = strlen(str);
SString dest = {.n = len, .alloc = len + 1};
dest.z = calloc(1, dest.alloc);
strcpy(dest.z, str);
return dest;
}
void SStringCopy(SString* pDest, const SString* pSrc) {
if (pSrc->n > 0) {
pDest->n = pSrc->n;
pDest->alloc = pDest->n + 1; // one additional space for null terminate
pDest->z = calloc(1, pDest->alloc);
memcpy(pDest->z, pSrc->z, pDest->n);
} else {
memset(pDest, 0, sizeof(SString));
}
}
void SStringFree(SString* pStr) {
if (pStr->alloc > 0) {
tfree(pStr->z);
pStr->alloc = 0;
}
}
void SStringShrink(SString* pStr) {
if (pStr->alloc > (pStr->n + 1) && pStr->alloc > (pStr->n * 2)) {
pStr->z = realloc(pStr->z, pStr->n + 1);
assert(pStr->z != NULL);
pStr->alloc = pStr->n + 1;
}
}
int32_t SStringAlloc(SString* pStr, int32_t size) {
if (pStr->alloc >= size) {
return TSDB_CODE_SUCCESS;
}
size = ALIGN8(size);
char* tmp = NULL;
if (pStr->z != NULL) {
tmp = realloc(pStr->z, size);
memset(pStr->z + pStr->n, 0, size - pStr->n);
} else {
tmp = calloc(1, size);
}
if (tmp == NULL) {
#ifdef WINDOWS
LPVOID lpMsgBuf;
FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, NULL,
GetLastError(), MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), // Default language
(LPTSTR)&lpMsgBuf, 0, NULL);
tscTrace("failed to allocate memory, reason:%s", lpMsgBuf);
LocalFree(lpMsgBuf);
#else
char errmsg[256] = {0};
strerror_r(errno, errmsg, tListLen(errmsg));
tscTrace("failed to allocate memory, reason:%s", errmsg);
#endif
return TSDB_CODE_CLI_OUT_OF_MEMORY;
}
pStr->z = tmp;
pStr->alloc = size;
return TSDB_CODE_SUCCESS;
}
#define MIN_ALLOC_SIZE 8
int32_t SStringEnsureRemain(SString* pStr, int32_t size) {
if (pStr->alloc - pStr->n > size) {
return TSDB_CODE_SUCCESS;
}
// remain space is insufficient, allocate more spaces
int32_t inc = (size >= MIN_ALLOC_SIZE) ? size : MIN_ALLOC_SIZE;
if (inc < (pStr->alloc >> 1)) {
inc = (pStr->alloc >> 1);
}
// get the new size
int32_t newsize = pStr->alloc + inc;
char* tmp = realloc(pStr->z, newsize);
if (tmp == NULL) {
#ifdef WINDOWS
LPVOID lpMsgBuf;
FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, NULL,
GetLastError(), MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), // Default language
(LPTSTR)&lpMsgBuf, 0, NULL);
tscTrace("failed to allocate memory, reason:%s", lpMsgBuf);
LocalFree(lpMsgBuf);
#else
char errmsg[256] = {0};
strerror_r(errno, errmsg, tListLen(errmsg));
tscTrace("failed to allocate memory, reason:%s", errmsg);
#endif
return TSDB_CODE_CLI_OUT_OF_MEMORY;
}
memset(tmp + pStr->n, 0, inc);
pStr->alloc = newsize;
pStr->z = tmp;
return TSDB_CODE_SUCCESS;
}
SSqlObj* createSubqueryObj(SSqlObj* pSql, int32_t vnodeIndex, int16_t tableIndex, void (*fp)(), void* param,
SSqlObj* pPrevSql) {
SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void* param, SSqlObj* pPrevSql) {
SSqlCmd* pCmd = &pSql->cmd;
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, tableIndex);
SSqlObj* pNew = (SSqlObj*)calloc(1, sizeof(SSqlObj));
if (pNew == NULL) {
tscError("%p new subquery failed, vnodeIdx:%d, tableIndex:%d", pSql, vnodeIndex, tableIndex);
tscError("%p new subquery failed, tableIndex:%d, vnodeIndex:%d", pSql, tableIndex, pMeterMetaInfo->vnodeIndex);
return NULL;
}
@ -1703,7 +1674,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int32_t vnodeIndex, int16_t tableIndex
pNew->sqlstr = strdup(pSql->sqlstr);
if (pNew->sqlstr == NULL) {
tscError("%p new subquery failed, vnodeIdx:%d, tableIndex:%d", pSql, vnodeIndex, tableIndex);
tscError("%p new subquery failed, tableIndex:%d, vnodeIndex:%d", pSql, tableIndex, pMeterMetaInfo->vnodeIndex);
free(pNew);
return NULL;
@ -1728,15 +1699,13 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int32_t vnodeIndex, int16_t tableIndex
tscTagCondCopy(&pNew->cmd.tagCond, &pCmd->tagCond);
if (tscAllocPayload(&pNew->cmd, TSDB_DEFAULT_PAYLOAD_SIZE) != TSDB_CODE_SUCCESS) {
tscError("%p new subquery failed, vnodeIdx:%d, tableIndex:%d", pSql, vnodeIndex, tableIndex);
tscError("%p new subquery failed, tableIndex:%d, vnodeIndex:%d", pSql, tableIndex, pMeterMetaInfo->vnodeIndex);
tscFreeSqlObj(pNew);
return NULL;
}
tscColumnBaseInfoCopy(&pNew->cmd.colList, &pCmd->colList, (int16_t)tableIndex);
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, tableIndex);
// set the correct query type
if (pPrevSql != NULL) {
pNew->cmd.type = pPrevSql->cmd.type;
@ -1765,13 +1734,14 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int32_t vnodeIndex, int16_t tableIndex
}
pNew->fp = fp;
pNew->param = param;
pNew->cmd.vnodeIdx = vnodeIndex;
SMeterMetaInfo* pMetermetaInfo = tscGetMeterMetaInfo(pCmd, tableIndex);
char key[TSDB_MAX_TAGS_LEN + 1] = {0};
tscGetMetricMetaCacheKey(pCmd, key, pMetermetaInfo->pMeterMeta->uid);
tscGetMetricMetaCacheKey(pCmd, key, uid);
#ifdef _DEBUG_VIEW
printf("the metricmeta key is:%s\n", key);
#endif
char* name = pMeterMetaInfo->name;
SMeterMetaInfo* pFinalInfo = NULL;
@ -1792,12 +1762,12 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int32_t vnodeIndex, int16_t tableIndex
}
assert(pFinalInfo->pMeterMeta != NULL);
if (UTIL_METER_IS_METRIC(pMetermetaInfo)) {
if (UTIL_METER_IS_METRIC(pMeterMetaInfo)) {
assert(pFinalInfo->pMetricMeta != NULL);
}
tscTrace("%p new subquery %p, vnodeIdx:%d, tableIndex:%d, type:%d", pSql, pNew, vnodeIndex, tableIndex,
pNew->cmd.type);
tscTrace("%p new subquery %p, tableIndex:%d, vnodeIdx:%d, type:%d", pSql, pNew, tableIndex,
pMeterMetaInfo->vnodeIndex, pNew->cmd.type);
return pNew;
}
@ -1822,9 +1792,7 @@ void tscDoQuery(SSqlObj* pSql) {
}
}
int16_t tscGetJoinTagColIndexByUid(SSqlCmd* pCmd, uint64_t uid) {
STagCond* pTagCond = &pCmd->tagCond;
int16_t tscGetJoinTagColIndexByUid(STagCond* pTagCond, uint64_t uid) {
if (pTagCond->joinInfo.left.uid == uid) {
return pTagCond->joinInfo.left.tagCol;
} else {
@ -1840,13 +1808,15 @@ bool tscIsUpdateQuery(STscObj* pObj) {
SSqlCmd* pCmd = &pObj->pSql->cmd;
return ((pCmd->command >= TSDB_SQL_INSERT && pCmd->command <= TSDB_SQL_DROP_DNODE) ||
TSDB_SQL_USE_DB == pCmd->command) ? 1 : 0;
TSDB_SQL_USE_DB == pCmd->command)
? 1
: 0;
}
int32_t tscInvalidSQLErrMsg(char *msg, const char *additionalInfo, const char *sql) {
const char *msgFormat1 = "invalid SQL: %s";
const char *msgFormat2 = "invalid SQL: syntax error near \"%s\" (%s)";
const char *msgFormat3 = "invalid SQL: syntax error near \"%s\"";
int32_t tscInvalidSQLErrMsg(char* msg, const char* additionalInfo, const char* sql) {
const char* msgFormat1 = "invalid SQL: %s";
const char* msgFormat2 = "invalid SQL: syntax error near \"%s\" (%s)";
const char* msgFormat3 = "invalid SQL: syntax error near \"%s\"";
const int32_t BACKWARD_CHAR_STEP = 0;
@ -1868,3 +1838,11 @@ int32_t tscInvalidSQLErrMsg(char *msg, const char *additionalInfo, const char *s
return TSDB_CODE_INVALID_SQL;
}
bool tscHasReachLimitation(SSqlObj* pSql) {
assert(pSql != NULL && pSql->cmd.globalLimit != 0);
SSqlCmd* pCmd = &pSql->cmd;
SSqlRes* pRes = &pSql->res;
return (pCmd->globalLimit > 0 && pRes->numOfTotal >= pCmd->globalLimit);
}

View File

@ -27,11 +27,11 @@ import "C"
import (
"database/sql/driver"
"errors"
"strconv"
"unsafe"
"fmt"
"io"
"strconv"
"time"
"unsafe"
)
/******************************************************************************
@ -44,7 +44,7 @@ func (mc *taosConn) readColumns(count int) ([]taosSqlField, error) {
var result unsafe.Pointer
result = C.taos_use_result(mc.taos)
if result == nil {
return nil , errors.New("invalid result")
return nil, errors.New("invalid result")
}
pFields := (*C.struct_taosField)(C.taos_fetch_fields(result))
@ -52,7 +52,7 @@ func (mc *taosConn) readColumns(count int) ([]taosSqlField, error) {
// TODO: Optimized rewriting !!!!
fields := (*[1 << 30]C.struct_taosField)(unsafe.Pointer(pFields))
for i := 0; i<count; i++ {
for i := 0; i < count; i++ {
//columns[i].tableName = ms.taos.
//fmt.Println(reflect.TypeOf(fields[i].name))
var charray []byte
@ -60,7 +60,7 @@ func (mc *taosConn) readColumns(count int) ([]taosSqlField, error) {
//fmt.Println("fields[i].name[j]: ", fields[i].name[j])
if fields[i].name[j] != 0 {
charray = append(charray, byte(fields[i].name[j]))
}else {
} else {
break
}
}
@ -91,6 +91,7 @@ func (rows *taosSqlRows) readRow(dest []driver.Value) error {
row := C.taos_fetch_row(result)
if row == nil {
rows.rs.done = true
C.taos_free_result(result)
rows.mc = nil
return io.EOF
}
@ -98,7 +99,7 @@ func (rows *taosSqlRows) readRow(dest []driver.Value) error {
// because sizeof(void*) == sizeof(int*) == 8
// notes: sizeof(int) == 8 in go, but sizeof(int) == 4 in C.
for i := range dest {
currentRow := (unsafe.Pointer)(uintptr(*((*int)(unsafe.Pointer(uintptr(unsafe.Pointer(row)) + uintptr(i) * unsafe.Sizeof(int(0)))))))
currentRow := (unsafe.Pointer)(uintptr(*((*int)(unsafe.Pointer(uintptr(unsafe.Pointer(row)) + uintptr(i)*unsafe.Sizeof(int(0)))))))
if currentRow == nil {
dest[i] = nil
@ -107,7 +108,7 @@ func (rows *taosSqlRows) readRow(dest []driver.Value) error {
switch rows.rs.columns[i].fieldType {
case C.TSDB_DATA_TYPE_BOOL:
if (*((*byte)(currentRow))) != 0{
if (*((*byte)(currentRow))) != 0 {
dest[i] = true
} else {
dest[i] = false
@ -142,7 +143,7 @@ func (rows *taosSqlRows) readRow(dest []driver.Value) error {
charLen := rows.rs.columns[i].length
var index uint32
binaryVal := make([]byte, charLen)
for index=0; index < charLen; index++ {
for index = 0; index < charLen; index++ {
binaryVal[index] = *((*byte)(unsafe.Pointer(uintptr(currentRow) + uintptr(index))))
}
dest[i] = string(binaryVal[:])
@ -152,7 +153,7 @@ func (rows *taosSqlRows) readRow(dest []driver.Value) error {
if mc.cfg.parseTime == true {
timestamp := (int64)(*((*int64)(currentRow)))
dest[i] = timestampConvertToString(timestamp, int(C.taos_result_precision(result)))
}else {
} else {
dest[i] = (int64)(*((*int64)(currentRow)))
}
break

37
src/connector/grafana/tdengine/.gitignore vendored Executable file
View File

@ -0,0 +1,37 @@
node_modules
npm-debug.log
coverage/
.aws-config.json
awsconfig
/emails/dist
/public_gen
/tmp
vendor/phantomjs/phantomjs
docs/AWS_S3_BUCKET
docs/GIT_BRANCH
docs/VERSION
docs/GITCOMMIT
docs/changed-files
docs/changed-files
# locally required config files
public/css/*.min.css
# Editor junk
*.sublime-workspace
*.swp
.idea/
*.iml
/data/*
/bin/*
conf/custom.ini
fig.yml
profile.cov
grafana
.notouch
# Test artifacts
/dist/test/

View File

@ -0,0 +1,14 @@
{
"esnext": true,
"disallowImplicitTypeConversion": ["string"],
"disallowKeywords": ["with"],
"disallowMultipleLineBreaks": true,
"disallowMixedSpacesAndTabs": true,
"disallowTrailingWhitespace": true,
"requireSpacesInFunctionExpression": {
"beforeOpeningCurlyBrace": true
},
"disallowSpacesInsideArrayBrackets": true,
"disallowSpacesInsideParentheses": true,
"validateIndentation": 2
}

View File

@ -0,0 +1,85 @@
module.exports = function(grunt) {
require('load-grunt-tasks')(grunt);
grunt.loadNpmTasks('grunt-execute');
grunt.loadNpmTasks('grunt-contrib-clean');
grunt.initConfig({
clean: ["dist"],
copy: {
src_to_dist: {
cwd: 'src',
expand: true,
src: ['**/*', '!**/*.js', '!**/*.scss'],
dest: 'dist'
},
dashboard_to_dist: {
expand: true,
src: ['dashboard/*'],
dest: 'dist'
},
pluginDef: {
expand: true,
src: ['README.md'],
dest: 'dist'
}
},
watch: {
rebuild_all: {
files: ['src/**/*'],
tasks: ['default'],
options: {spawn: false}
}
},
babel: {
options: {
sourceMap: true,
presets: ['env'],
plugins: ['transform-object-rest-spread']
},
dist: {
files: [{
cwd: 'src',
expand: true,
src: ['**/*.js'],
dest: 'dist',
ext:'.js'
}]
},
distTestNoSystemJs: {
files: [{
cwd: 'src',
expand: true,
src: ['**/*.js'],
dest: 'dist/test',
ext:'.js'
}]
},
distTestsSpecsNoSystemJs: {
files: [{
expand: true,
cwd: 'spec',
src: ['**/*.js'],
dest: 'dist/test/spec',
ext:'.js'
}]
}
},
mochaTest: {
test: {
options: {
reporter: 'spec'
},
src: ['dist/test/spec/test-main.js', 'dist/test/spec/*_spec.js']
}
}
});
grunt.registerTask('default', ['clean', 'copy:src_to_dist', 'copy:dashboard_to_dist', 'copy:pluginDef', 'babel', 'mochaTest']);
};

View File

@ -0,0 +1,661 @@
GNU AFFERO GENERAL PUBLIC LICENSE
Version 3, 19 November 2007
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU Affero General Public License is a free, copyleft license for
software and other kinds of works, specifically designed to ensure
cooperation with the community in the case of network server software.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
our General Public Licenses are intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
Developers that use our General Public Licenses protect your rights
with two steps: (1) assert copyright on the software, and (2) offer
you this License which gives you legal permission to copy, distribute
and/or modify the software.
A secondary benefit of defending all users' freedom is that
improvements made in alternate versions of the program, if they
receive widespread use, become available for other developers to
incorporate. Many developers of free software are heartened and
encouraged by the resulting cooperation. However, in the case of
software used on network servers, this result may fail to come about.
The GNU General Public License permits making a modified version and
letting the public access it on a server without ever releasing its
source code to the public.
The GNU Affero General Public License is designed specifically to
ensure that, in such cases, the modified source code becomes available
to the community. It requires the operator of a network server to
provide the source code of the modified version running there to the
users of that server. Therefore, public use of a modified version, on
a publicly accessible server, gives the public access to the source
code of the modified version.
An older license, called the Affero General Public License and
published by Affero, was designed to accomplish similar goals. This is
a different license, not a version of the Affero GPL, but Affero has
released a new version of the Affero GPL which permits relicensing under
this license.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU Affero General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Remote Network Interaction; Use with the GNU General Public License.
Notwithstanding any other provision of this License, if you modify the
Program, your modified version must prominently offer all users
interacting with it remotely through a computer network (if your version
supports such interaction) an opportunity to receive the Corresponding
Source of your version by providing access to the Corresponding Source
from a network server at no charge, through some standard or customary
means of facilitating copying of software. This Corresponding Source
shall include the Corresponding Source for any work covered by version 3
of the GNU General Public License that is incorporated pursuant to the
following paragraph.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the work with which it is combined will remain governed by version
3 of the GNU General Public License.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU Affero General Public License from time to time. Such new versions
will be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU Affero General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU Affero General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU Affero General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If your software can interact with users remotely through a computer
network, you should also make sure that it provides a way for users to
get its source. For example, if your program is a web application, its
interface could display a "Source" link that leads users to an archive
of the code. There are many ways you could offer source, and different
solutions will be better for different programs; see section 13 for the
specific requirements.
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU AGPL, see
<https://www.gnu.org/licenses/>.

View File

@ -61,7 +61,7 @@ Example response
Example request
``` javascript
<null> get request
Get request /heartbeat
```
Example response
@ -70,3 +70,27 @@ Example response
"message": "Grafana server receive a quest from you!"
}
```
### Dev setup
This plugin requires node 6.10.0
``` javascript
npm install -g yarn
yarn install
npm run build
```
### Import Dashboard
after login `http://localhost:3000 `, then you can import the tdengine demo dashboard to monitor the system metrics.
you can import the `dashboard/tdengine-grafana.json`:
![import_dashboard](dashboard/import_dashboard.png)
after finished import:
![import_dashboard](dashboard/tdengine_dashboard.png)

Binary file not shown.

After

Width:  |  Height:  |  Size: 98 KiB

View File

@ -0,0 +1,588 @@
{
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": "-- Grafana --",
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"type": "dashboard"
}
]
},
"editable": true,
"gnetId": null,
"graphTooltip": 0,
"id": 3,
"links": [],
"panels": [
{
"cacheTimeout": null,
"colorBackground": false,
"colorValue": true,
"colors": [
"#299c46",
"rgba(237, 129, 40, 0.89)",
"#d44a3a"
],
"datasource": "TDengine",
"description": "total select request per minute last hour",
"format": "none",
"gauge": {
"maxValue": 100,
"minValue": 0,
"show": false,
"thresholdLabels": false,
"thresholdMarkers": true
},
"gridPos": {
"h": 6,
"w": 12,
"x": 0,
"y": 0
},
"id": 8,
"interval": null,
"links": [],
"mappingType": 1,
"mappingTypes": [
{
"name": "value to text",
"value": 1
},
{
"name": "range to text",
"value": 2
}
],
"maxDataPoints": 100,
"nullPointMode": "connected",
"nullText": null,
"options": {},
"postfix": "次数/min",
"postfixFontSize": "20%",
"prefix": "",
"prefixFontSize": "50%",
"rangeMaps": [
{
"from": "null",
"text": "N/A",
"to": "null"
}
],
"sparkline": {
"fillColor": "rgba(31, 118, 189, 0.18)",
"full": true,
"lineColor": "rgb(31, 120, 193)",
"show": true,
"ymax": null,
"ymin": null
},
"tableColumn": "",
"targets": [
{
"alias": "req_select",
"refId": "A",
"sql": "select sum(req_select) from log.dn where ts >= now-1h and ts < now interval(1m)",
"target": "select metric",
"type": "timeserie"
}
],
"thresholds": "120,240",
"timeFrom": null,
"timeShift": null,
"title": "req select",
"type": "singlestat",
"valueFontSize": "150%",
"valueMaps": [
{
"op": "=",
"text": "N/A",
"value": "null"
}
],
"valueName": "total"
},
{
"cacheTimeout": null,
"colorBackground": false,
"colorValue": true,
"colors": [
"#299c46",
"rgba(237, 129, 40, 0.89)",
"#d44a3a"
],
"datasource": "TDengine",
"description": "total insert request per minute for last hour",
"format": "none",
"gauge": {
"maxValue": 100,
"minValue": 0,
"show": false,
"thresholdLabels": false,
"thresholdMarkers": true
},
"gridPos": {
"h": 6,
"w": 12,
"x": 12,
"y": 0
},
"id": 6,
"interval": null,
"links": [],
"mappingType": 1,
"mappingTypes": [
{
"name": "value to text",
"value": 1
},
{
"name": "range to text",
"value": 2
}
],
"maxDataPoints": 100,
"nullPointMode": "connected",
"nullText": null,
"options": {},
"postfix": "次数/min",
"postfixFontSize": "20%",
"prefix": "",
"prefixFontSize": "50%",
"rangeMaps": [
{
"from": "null",
"text": "N/A",
"to": "null"
}
],
"sparkline": {
"fillColor": "rgba(31, 118, 189, 0.18)",
"full": false,
"lineColor": "rgb(31, 120, 193)",
"show": true,
"ymax": null,
"ymin": null
},
"tableColumn": "",
"targets": [
{
"alias": "req_insert",
"refId": "A",
"sql": "select sum(req_insert) from log.dn where ts >= now-1h and ts < now interval(1m)",
"target": "select metric",
"type": "timeserie"
}
],
"thresholds": "110,240",
"timeFrom": null,
"timeShift": null,
"title": "req insert",
"type": "singlestat",
"valueFontSize": "150%",
"valueMaps": [
{
"op": "=",
"text": "N/A",
"value": "null"
}
],
"valueName": "total"
},
{
"datasource": "TDengine",
"description": "taosd max memery last 10 minutes",
"gridPos": {
"h": 6,
"w": 8,
"x": 0,
"y": 6
},
"id": 12,
"options": {
"fieldOptions": {
"calcs": [
"mean"
],
"defaults": {
"mappings": [],
"max": 4096,
"min": 0,
"thresholds": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
},
{
"color": "#EAB839",
"value": 2048
}
],
"unit": "decmbytes"
},
"override": {},
"values": false
},
"orientation": "auto",
"showThresholdLabels": true,
"showThresholdMarkers": true
},
"pluginVersion": "6.4.3",
"targets": [
{
"alias": "mem_taosd",
"refId": "A",
"sql": "select max(mem_taosd) from log.dn where ts >= now -10m and ts < now",
"target": "select metric",
"type": "timeserie"
}
],
"timeFrom": null,
"timeShift": null,
"title": "taosd memery",
"type": "gauge"
},
{
"datasource": "TDengine",
"description": "max System Memory last 1 hour",
"gridPos": {
"h": 6,
"w": 8,
"x": 8,
"y": 6
},
"id": 10,
"options": {
"fieldOptions": {
"calcs": [
"last"
],
"defaults": {
"mappings": [],
"max": 4,
"min": 0,
"thresholds": [
{
"color": "green",
"value": null
},
{
"color": "semi-dark-orange",
"value": 60
},
{
"color": "dark-red",
"value": 80
}
],
"title": "",
"unit": "decmbytes"
},
"override": {},
"values": false
},
"orientation": "auto",
"showThresholdLabels": true,
"showThresholdMarkers": true
},
"pluginVersion": "6.4.3",
"targets": [
{
"alias": "mem_system",
"refId": "A",
"sql": "select max(mem_system) from log.dn where ts >= now -10h and ts < now",
"target": "select metric",
"type": "timeserie"
}
],
"timeFrom": null,
"timeShift": null,
"title": "system memory",
"type": "gauge"
},
{
"datasource": "TDengine",
"description": "avg band speed last one minute",
"gridPos": {
"h": 6,
"w": 8,
"x": 16,
"y": 6
},
"id": 14,
"options": {
"fieldOptions": {
"calcs": [
"last"
],
"defaults": {
"mappings": [],
"max": 8192,
"min": 0,
"thresholds": [
{
"color": "green",
"value": null
},
{
"color": "#EAB839",
"value": 4916
},
{
"color": "red",
"value": 6554
}
],
"unit": "Kbits"
},
"override": {},
"values": false
},
"orientation": "auto",
"showThresholdLabels": true,
"showThresholdMarkers": true
},
"pluginVersion": "6.4.3",
"targets": [
{
"alias": "band_speed",
"refId": "A",
"sql": "select avg(band_speed) from log.dn where ts >= now-1h and ts < now interval(1m)",
"target": "select metric",
"type": "timeserie"
}
],
"timeFrom": null,
"timeShift": null,
"title": "band speed",
"type": "gauge"
},
{
"aliasColors": {},
"bars": false,
"cacheTimeout": null,
"dashLength": 10,
"dashes": false,
"datasource": "TDengine",
"description": "monitor system cpu",
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 11,
"w": 12,
"x": 0,
"y": 12
},
"hideTimeOverride": true,
"id": 2,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"options": {
"dataLinks": []
},
"percentage": false,
"pluginVersion": "6.4.3",
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"alias": "cpu_system11",
"hide": false,
"refId": "A",
"sql": "select avg(cpu_system) from log.dn where ts >= now-1h and ts < now interval(1s)",
"target": "select metric",
"type": "timeserie"
},
{
"alias": "cpu_taosd",
"hide": false,
"refId": "B",
"sql": "select avg(cpu_taosd) from log.dn where ts >= now-1h and ts < now interval(1s)",
"target": "select metric",
"type": "timeserie"
}
],
"thresholds": [],
"timeFrom": "1h",
"timeRegions": [],
"timeShift": "30s",
"title": "cpu_system",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"decimals": null,
"format": "percent",
"label": "使用占比",
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": false
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": "TDengine",
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 11,
"w": 12,
"x": 12,
"y": 12
},
"id": 18,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"nullPointMode": "null",
"options": {
"dataLinks": []
},
"percentage": false,
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"alias": "",
"refId": "A",
"sql": "select avg(disk_used) disk_used from log.dn where ts >= $from and ts < $to interval(1s) group by ipaddr",
"target": "select metric",
"type": "timeserie"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "avg_disk_used",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "decgbytes",
"label": "",
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
}
],
"refresh": "5s",
"schemaVersion": 20,
"style": "dark",
"tags": [],
"templating": {
"list": []
},
"time": {
"from": "now-1h",
"to": "now"
},
"timepicker": {
"refresh_intervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
]
},
"timezone": "",
"title": "TDengine",
"uid": "FE-vpe0Wk",
"version": 1
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 173 KiB

View File

@ -1,170 +0,0 @@
'use strict';
System.register(['lodash'], function (_export, _context) {
"use strict";
var _, _createClass, GenericDatasource;
function strTrim(str) {
return str.replace(/^\s+|\s+$/gm,'');
}
function _classCallCheck(instance, Constructor) {
if (!(instance instanceof Constructor)) {
throw new TypeError("Cannot call a class as a function");
}
}
return {
setters: [function (_lodash) {
_ = _lodash.default;
}],
execute: function () {
_createClass = function () {
function defineProperties(target, props) {
for (var i = 0; i < props.length; i++) {
var descriptor = props[i];
descriptor.enumerable = descriptor.enumerable || false;
descriptor.configurable = true;
if ("value" in descriptor) descriptor.writable = true;
Object.defineProperty(target, descriptor.key, descriptor);
}
}
return function (Constructor, protoProps, staticProps) {
if (protoProps) defineProperties(Constructor.prototype, protoProps);
if (staticProps) defineProperties(Constructor, staticProps);
return Constructor;
};
}();
_export('GenericDatasource', GenericDatasource = function () {
function GenericDatasource(instanceSettings, $q, backendSrv, templateSrv) {
_classCallCheck(this, GenericDatasource);
this.type = instanceSettings.type;
this.url = instanceSettings.url;
this.name = instanceSettings.name;
this.q = $q;
this.backendSrv = backendSrv;
this.templateSrv = templateSrv;
//this.withCredentials = instanceSettings.withCredentials;
this.headers = { 'Content-Type': 'application/json' };
var taosuser = instanceSettings.jsonData.user;
var taospwd = instanceSettings.jsonData.password;
if (taosuser == null || taosuser == undefined || taosuser == "") {
taosuser = "root";
}
if (taospwd == null || taospwd == undefined || taospwd == "") {
taospwd = "taosdata";
}
this.headers.Authorization = "Basic " + this.encode(taosuser + ":" + taospwd);
}
_createClass(GenericDatasource, [{
key: 'encode',
value: function encode(input) {
var _keyStr = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=";
var output = "";
var chr1, chr2, chr3, enc1, enc2, enc3, enc4;
var i = 0;
while (i < input.length) {
chr1 = input.charCodeAt(i++);
chr2 = input.charCodeAt(i++);
chr3 = input.charCodeAt(i++);
enc1 = chr1 >> 2;
enc2 = ((chr1 & 3) << 4) | (chr2 >> 4);
enc3 = ((chr2 & 15) << 2) | (chr3 >> 6);
enc4 = chr3 & 63;
if (isNaN(chr2)) {
enc3 = enc4 = 64;
} else if (isNaN(chr3)) {
enc4 = 64;
}
output = output + _keyStr.charAt(enc1) + _keyStr.charAt(enc2) + _keyStr.charAt(enc3) + _keyStr.charAt(enc4);
}
return output;
}
}, {
key: 'generateSql',
value: function generateSql(sql, queryStart, queryEnd, intervalMs) {
if (queryStart == undefined || queryStart == null) {
queryStart = "now-1h";
}
if (queryEnd == undefined || queryEnd == null) {
queryEnd = "now";
}
if (intervalMs == undefined || intervalMs == null) {
intervalMs = "20000";
}
intervalMs += "a";
sql = sql.replace(/^\s+|\s+$/gm, '');
sql = sql.replace("$from", "'" + queryStart + "'");
sql = sql.replace("$begin", "'" + queryStart + "'");
sql = sql.replace("$to", "'" + queryEnd + "'");
sql = sql.replace("$end", "'" + queryEnd + "'");
sql = sql.replace("$interval", intervalMs);
return sql;
}
}, {
key: 'query',
value: function query(options) {
var querys = new Array;
for (var i = 0; i < options.targets.length; ++i) {
var query = new Object;
query.refId = options.targets[i].refId;
query.alias = options.targets[i].alias;
if (query.alias == null || query.alias == undefined) {
query.alias = "";
}
//query.sql = this.generateSql(options.targets[i].sql, options.range.raw.from, options.range.raw.to, options.intervalMs);
query.sql = this.generateSql(options.targets[i].sql, options.range.from.toISOString(), options.range.to.toISOString(), options.intervalMs);
console.log(query.sql);
querys.push(query);
}
if (querys.length <= 0) {
return this.q.when({ data: [] });
}
return this.doRequest({
url: this.url + '/grafana/query',
data: querys,
method: 'POST'
});
}
}, {
key: 'testDatasource',
value: function testDatasource() {
return this.doRequest({
url: this.url + '/grafana/heartbeat',
method: 'GET'
}).then(function (response) {
if (response.status === 200) {
return { status: "success", message: "TDengine Data source is working", title: "Success" };
}
});
}
}, {
key: 'doRequest',
value: function doRequest(options) {
options.headers = this.headers;
//console.log(options);
return this.backendSrv.datasourceRequest(options);
}
}]);
return GenericDatasource;
}());
_export('GenericDatasource', GenericDatasource);
}
};
});
//# sourceMappingURL=datasource.js.map

View File

@ -0,0 +1,96 @@
TDengine Datasource - build by Taosdata Inc. www.taosdata.com
TDengine backend server implement 2 urls:
* `/heartbeat` return 200 ok. Used for "Test connection" on the datasource config page.
* `/query` return data based on input sqls.
## Installation
To install this plugin:
Copy the data source you want to /var/lib/grafana/plugins/. Then restart grafana-server. The new data source should now be available in the data source type dropdown in the Add Data Source View.
```
cp -r <tdengine-extrach-dir>/connector/grafana/tdengine /var/lib/grafana/plugins/
sudo service grafana-server restart
```
### Query API
Example request
``` javascript
[{
"refId": "A",
"alias": "taosd-memory",
"sql": "select avg(mem_taosd) from sys.dn where ts > now-5m and ts < now interval(500a)"
},
{
"refId": "B",
"alias": "system-memory",
"sql": "select avg(mem_system) from sys.dn where ts > now-5m and ts < now interval(500a)"
}]
```
Example response
``` javascript
[{
"datapoints": [
[206.488281, 1538137825000],
[206.488281, 1538137855000],
[206.488281, 1538137885500],
[210.609375, 1538137915500],
[210.867188, 1538137945500]
],
"refId": "A",
"target": "taosd-memory"
},
{
"datapoints": [
[2910.218750, 1538137825000],
[2912.265625, 1538137855000],
[2912.437500, 1538137885500],
[2916.644531, 1538137915500],
[2917.066406, 1538137945500]
],
"refId": "B",
"target": "system-memory"
}]
```
### Heartbeat API
Example request
``` javascript
Get request /heartbeat
```
Example response
``` javascript
{
"message": "Grafana server receive a quest from you!"
}
```
### Dev setup
This plugin requires node 6.10.0
``` javascript
npm install -g yarn
yarn install
npm run build
```
### Import Dashboard
after login `http://localhost:3000 `, then you can import the tdengine demo dashboard to monitor the system metrics.
you can import the `dashboard/tdengine-grafana.json`:
![import_dashboard](dashboard/import_dashboard.png)
after finished import:
![import_dashboard](dashboard/tdengine_dashboard.png)

Binary file not shown.

After

Width:  |  Height:  |  Size: 98 KiB

View File

@ -0,0 +1,588 @@
{
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": "-- Grafana --",
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"type": "dashboard"
}
]
},
"editable": true,
"gnetId": null,
"graphTooltip": 0,
"id": 3,
"links": [],
"panels": [
{
"cacheTimeout": null,
"colorBackground": false,
"colorValue": true,
"colors": [
"#299c46",
"rgba(237, 129, 40, 0.89)",
"#d44a3a"
],
"datasource": "TDengine",
"description": "total select request per minute last hour",
"format": "none",
"gauge": {
"maxValue": 100,
"minValue": 0,
"show": false,
"thresholdLabels": false,
"thresholdMarkers": true
},
"gridPos": {
"h": 6,
"w": 12,
"x": 0,
"y": 0
},
"id": 8,
"interval": null,
"links": [],
"mappingType": 1,
"mappingTypes": [
{
"name": "value to text",
"value": 1
},
{
"name": "range to text",
"value": 2
}
],
"maxDataPoints": 100,
"nullPointMode": "connected",
"nullText": null,
"options": {},
"postfix": "次数/min",
"postfixFontSize": "20%",
"prefix": "",
"prefixFontSize": "50%",
"rangeMaps": [
{
"from": "null",
"text": "N/A",
"to": "null"
}
],
"sparkline": {
"fillColor": "rgba(31, 118, 189, 0.18)",
"full": true,
"lineColor": "rgb(31, 120, 193)",
"show": true,
"ymax": null,
"ymin": null
},
"tableColumn": "",
"targets": [
{
"alias": "req_select",
"refId": "A",
"sql": "select sum(req_select) from log.dn where ts >= now-1h and ts < now interval(1m)",
"target": "select metric",
"type": "timeserie"
}
],
"thresholds": "120,240",
"timeFrom": null,
"timeShift": null,
"title": "req select",
"type": "singlestat",
"valueFontSize": "150%",
"valueMaps": [
{
"op": "=",
"text": "N/A",
"value": "null"
}
],
"valueName": "total"
},
{
"cacheTimeout": null,
"colorBackground": false,
"colorValue": true,
"colors": [
"#299c46",
"rgba(237, 129, 40, 0.89)",
"#d44a3a"
],
"datasource": "TDengine",
"description": "total insert request per minute for last hour",
"format": "none",
"gauge": {
"maxValue": 100,
"minValue": 0,
"show": false,
"thresholdLabels": false,
"thresholdMarkers": true
},
"gridPos": {
"h": 6,
"w": 12,
"x": 12,
"y": 0
},
"id": 6,
"interval": null,
"links": [],
"mappingType": 1,
"mappingTypes": [
{
"name": "value to text",
"value": 1
},
{
"name": "range to text",
"value": 2
}
],
"maxDataPoints": 100,
"nullPointMode": "connected",
"nullText": null,
"options": {},
"postfix": "次数/min",
"postfixFontSize": "20%",
"prefix": "",
"prefixFontSize": "50%",
"rangeMaps": [
{
"from": "null",
"text": "N/A",
"to": "null"
}
],
"sparkline": {
"fillColor": "rgba(31, 118, 189, 0.18)",
"full": false,
"lineColor": "rgb(31, 120, 193)",
"show": true,
"ymax": null,
"ymin": null
},
"tableColumn": "",
"targets": [
{
"alias": "req_insert",
"refId": "A",
"sql": "select sum(req_insert) from log.dn where ts >= now-1h and ts < now interval(1m)",
"target": "select metric",
"type": "timeserie"
}
],
"thresholds": "110,240",
"timeFrom": null,
"timeShift": null,
"title": "req insert",
"type": "singlestat",
"valueFontSize": "150%",
"valueMaps": [
{
"op": "=",
"text": "N/A",
"value": "null"
}
],
"valueName": "total"
},
{
"datasource": "TDengine",
"description": "taosd max memery last 10 minutes",
"gridPos": {
"h": 6,
"w": 8,
"x": 0,
"y": 6
},
"id": 12,
"options": {
"fieldOptions": {
"calcs": [
"mean"
],
"defaults": {
"mappings": [],
"max": 4096,
"min": 0,
"thresholds": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
},
{
"color": "#EAB839",
"value": 2048
}
],
"unit": "decmbytes"
},
"override": {},
"values": false
},
"orientation": "auto",
"showThresholdLabels": true,
"showThresholdMarkers": true
},
"pluginVersion": "6.4.3",
"targets": [
{
"alias": "mem_taosd",
"refId": "A",
"sql": "select max(mem_taosd) from log.dn where ts >= now -10m and ts < now",
"target": "select metric",
"type": "timeserie"
}
],
"timeFrom": null,
"timeShift": null,
"title": "taosd memery",
"type": "gauge"
},
{
"datasource": "TDengine",
"description": "max System Memory last 1 hour",
"gridPos": {
"h": 6,
"w": 8,
"x": 8,
"y": 6
},
"id": 10,
"options": {
"fieldOptions": {
"calcs": [
"last"
],
"defaults": {
"mappings": [],
"max": 4,
"min": 0,
"thresholds": [
{
"color": "green",
"value": null
},
{
"color": "semi-dark-orange",
"value": 60
},
{
"color": "dark-red",
"value": 80
}
],
"title": "",
"unit": "decmbytes"
},
"override": {},
"values": false
},
"orientation": "auto",
"showThresholdLabels": true,
"showThresholdMarkers": true
},
"pluginVersion": "6.4.3",
"targets": [
{
"alias": "mem_system",
"refId": "A",
"sql": "select max(mem_system) from log.dn where ts >= now -10h and ts < now",
"target": "select metric",
"type": "timeserie"
}
],
"timeFrom": null,
"timeShift": null,
"title": "system memory",
"type": "gauge"
},
{
"datasource": "TDengine",
"description": "avg band speed last one minute",
"gridPos": {
"h": 6,
"w": 8,
"x": 16,
"y": 6
},
"id": 14,
"options": {
"fieldOptions": {
"calcs": [
"last"
],
"defaults": {
"mappings": [],
"max": 8192,
"min": 0,
"thresholds": [
{
"color": "green",
"value": null
},
{
"color": "#EAB839",
"value": 4916
},
{
"color": "red",
"value": 6554
}
],
"unit": "Kbits"
},
"override": {},
"values": false
},
"orientation": "auto",
"showThresholdLabels": true,
"showThresholdMarkers": true
},
"pluginVersion": "6.4.3",
"targets": [
{
"alias": "band_speed",
"refId": "A",
"sql": "select avg(band_speed) from log.dn where ts >= now-1h and ts < now interval(1m)",
"target": "select metric",
"type": "timeserie"
}
],
"timeFrom": null,
"timeShift": null,
"title": "band speed",
"type": "gauge"
},
{
"aliasColors": {},
"bars": false,
"cacheTimeout": null,
"dashLength": 10,
"dashes": false,
"datasource": "TDengine",
"description": "monitor system cpu",
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 11,
"w": 12,
"x": 0,
"y": 12
},
"hideTimeOverride": true,
"id": 2,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"options": {
"dataLinks": []
},
"percentage": false,
"pluginVersion": "6.4.3",
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"alias": "cpu_system11",
"hide": false,
"refId": "A",
"sql": "select avg(cpu_system) from log.dn where ts >= now-1h and ts < now interval(1s)",
"target": "select metric",
"type": "timeserie"
},
{
"alias": "cpu_taosd",
"hide": false,
"refId": "B",
"sql": "select avg(cpu_taosd) from log.dn where ts >= now-1h and ts < now interval(1s)",
"target": "select metric",
"type": "timeserie"
}
],
"thresholds": [],
"timeFrom": "1h",
"timeRegions": [],
"timeShift": "30s",
"title": "cpu_system",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"decimals": null,
"format": "percent",
"label": "使用占比",
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": false
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": "TDengine",
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 11,
"w": 12,
"x": 12,
"y": 12
},
"id": 18,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"nullPointMode": "null",
"options": {
"dataLinks": []
},
"percentage": false,
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"alias": "",
"refId": "A",
"sql": "select avg(disk_used) disk_used from log.dn where ts >= $from and ts < $to interval(1s) group by ipaddr",
"target": "select metric",
"type": "timeserie"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "avg_disk_used",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "decgbytes",
"label": "",
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
}
],
"refresh": "5s",
"schemaVersion": 20,
"style": "dark",
"tags": [],
"templating": {
"list": []
},
"time": {
"from": "now-1h",
"to": "now"
},
"timepicker": {
"refresh_intervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
]
},
"timezone": "",
"title": "TDengine",
"uid": "FE-vpe0Wk",
"version": 1
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 173 KiB

View File

@ -0,0 +1,156 @@
'use strict';
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.GenericDatasource = undefined;
var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();
var _lodash = require('lodash');
var _lodash2 = _interopRequireDefault(_lodash);
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } }
var GenericDatasource = exports.GenericDatasource = function () {
function GenericDatasource(instanceSettings, $q, backendSrv, templateSrv) {
_classCallCheck(this, GenericDatasource);
this.type = instanceSettings.type;
this.url = instanceSettings.url;
this.name = instanceSettings.name;
this.q = $q;
this.backendSrv = backendSrv;
this.templateSrv = templateSrv;
this.headers = { 'Content-Type': 'application/json' };
this.headers.Authorization = this.getAuthorization(instanceSettings.jsonData);
}
_createClass(GenericDatasource, [{
key: 'query',
value: function query(options) {
var targets = this.buildQueryParameters(options);
if (targets.length <= 0) {
return this.q.when({ data: [] });
}
return this.doRequest({
url: this.url + '/grafana/query',
data: targets,
method: 'POST'
});
}
}, {
key: 'testDatasource',
value: function testDatasource() {
return this.doRequest({
url: this.url + '/grafana/heartbeat',
method: 'GET'
}).then(function (response) {
if (response.status === 200) {
return { status: "success", message: "TDengine Data source is working", title: "Success" };
}
});
}
}, {
key: 'doRequest',
value: function doRequest(options) {
options.headers = this.headers;
return this.backendSrv.datasourceRequest(options);
}
}, {
key: 'buildQueryParameters',
value: function buildQueryParameters(options) {
var _this = this;
var targets = _lodash2.default.map(options.targets, function (target) {
return {
refId: target.refId,
alias: _this.generateAlias(options, target),
sql: _this.generateSql(options, target)
};
});
return targets;
}
}, {
key: 'encode',
value: function encode(input) {
var _keyStr = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=";
var output = "";
var chr1, chr2, chr3, enc1, enc2, enc3, enc4;
var i = 0;
while (i < input.length) {
chr1 = input.charCodeAt(i++);
chr2 = input.charCodeAt(i++);
chr3 = input.charCodeAt(i++);
enc1 = chr1 >> 2;
enc2 = (chr1 & 3) << 4 | chr2 >> 4;
enc3 = (chr2 & 15) << 2 | chr3 >> 6;
enc4 = chr3 & 63;
if (isNaN(chr2)) {
enc3 = enc4 = 64;
} else if (isNaN(chr3)) {
enc4 = 64;
}
output = output + _keyStr.charAt(enc1) + _keyStr.charAt(enc2) + _keyStr.charAt(enc3) + _keyStr.charAt(enc4);
}
return output;
}
}, {
key: 'getAuthorization',
value: function getAuthorization(jsonData) {
jsonData = jsonData || {};
var defaultUser = jsonData.user || "root";
var defaultPassword = jsonData.password || "taosdata";
return "Basic " + this.encode(defaultUser + ":" + defaultPassword);
}
}, {
key: 'generateAlias',
value: function generateAlias(options, target) {
var alias = target.alias || "";
alias = this.templateSrv.replace(alias, options.scopedVars, 'csv');
return alias;
}
}, {
key: 'generateSql',
value: function generateSql(options, target) {
var sql = target.sql;
if (sql == null || sql == "") {
return sql;
}
var queryStart = "now-1h";
if (options != null && options.range != null && options.range.from != null) {
queryStart = options.range.from.toISOString();
}
var queryEnd = "now";
if (options != null && options.range != null && options.range.to != null) {
queryEnd = options.range.to.toISOString();
}
var intervalMs = options.intervalMs || "20000";
intervalMs += "a";
sql = sql.replace(/^\s+|\s+$/gm, '');
sql = sql.replace("$from", "'" + queryStart + "'");
sql = sql.replace("$begin", "'" + queryStart + "'");
sql = sql.replace("$to", "'" + queryEnd + "'");
sql = sql.replace("$end", "'" + queryEnd + "'");
sql = sql.replace("$interval", intervalMs);
sql = this.templateSrv.replace(sql, options.scopedVars, 'csv');
return sql;
}
}]);
return GenericDatasource;
}();
//# sourceMappingURL=datasource.js.map

File diff suppressed because one or more lines are too long

View File

Before

Width:  |  Height:  |  Size: 3.1 KiB

After

Width:  |  Height:  |  Size: 3.1 KiB

View File

@ -0,0 +1,37 @@
'use strict';
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.AnnotationsQueryCtrl = exports.QueryOptionsCtrl = exports.ConfigCtrl = exports.QueryCtrl = exports.Datasource = undefined;
var _datasource = require('./datasource');
var _query_ctrl = require('./query_ctrl');
function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } }
var GenericConfigCtrl = function GenericConfigCtrl() {
_classCallCheck(this, GenericConfigCtrl);
};
GenericConfigCtrl.templateUrl = 'partials/config.html';
var GenericQueryOptionsCtrl = function GenericQueryOptionsCtrl() {
_classCallCheck(this, GenericQueryOptionsCtrl);
};
GenericQueryOptionsCtrl.templateUrl = 'partials/query.options.html';
var GenericAnnotationsQueryCtrl = function GenericAnnotationsQueryCtrl() {
_classCallCheck(this, GenericAnnotationsQueryCtrl);
};
GenericAnnotationsQueryCtrl.templateUrl = 'partials/annotations.editor.html';
exports.Datasource = _datasource.GenericDatasource;
exports.QueryCtrl = _query_ctrl.GenericDatasourceQueryCtrl;
exports.ConfigCtrl = GenericConfigCtrl;
exports.QueryOptionsCtrl = GenericQueryOptionsCtrl;
exports.AnnotationsQueryCtrl = GenericAnnotationsQueryCtrl;
//# sourceMappingURL=module.js.map

View File

@ -0,0 +1 @@
{"version":3,"sources":["../src/module.js"],"names":["GenericConfigCtrl","templateUrl","GenericQueryOptionsCtrl","GenericAnnotationsQueryCtrl","Datasource","GenericDatasource","QueryCtrl","GenericDatasourceQueryCtrl","ConfigCtrl","QueryOptionsCtrl","AnnotationsQueryCtrl"],"mappings":";;;;;;;AAAA;;AACA;;;;IAEMA,iB;;;;AACNA,kBAAkBC,WAAlB,GAAgC,sBAAhC;;IAEMC,uB;;;;AACNA,wBAAwBD,WAAxB,GAAsC,6BAAtC;;IAEME,2B;;;;AACNA,4BAA4BF,WAA5B,GAA0C,kCAA1C;;QAGuBG,U,GAArBC,6B;QAC8BC,S,GAA9BC,sC;QACqBC,U,GAArBR,iB;QAC2BS,gB,GAA3BP,uB;QAC+BQ,oB,GAA/BP,2B","file":"module.js","sourcesContent":["import {GenericDatasource} from './datasource';\nimport {GenericDatasourceQueryCtrl} from './query_ctrl';\n\nclass GenericConfigCtrl {}\nGenericConfigCtrl.templateUrl = 'partials/config.html';\n\nclass GenericQueryOptionsCtrl {}\nGenericQueryOptionsCtrl.templateUrl = 'partials/query.options.html';\n\nclass GenericAnnotationsQueryCtrl {}\nGenericAnnotationsQueryCtrl.templateUrl = 'partials/annotations.editor.html'\n\nexport {\n GenericDatasource as Datasource,\n GenericDatasourceQueryCtrl as QueryCtrl,\n GenericConfigCtrl as ConfigCtrl,\n GenericQueryOptionsCtrl as QueryOptionsCtrl,\n GenericAnnotationsQueryCtrl as AnnotationsQueryCtrl\n};\n"]}

View File

@ -3,7 +3,7 @@
<div class="gf-form-inline">
<div class="gf-form gf-form--grow">
<label class="gf-form-label query-keyword width-7">INPUT SQL</label>
<input type="text" class="gf-form-input" ng-model="ctrl.target.sql" spellcheck='false' placeholder="select count(*) from sys.cpu where ts >= $from and ts < $to interval($interval)" ng-blur="ctrl.panelCtrl.refresh()" data-mode="sql"></input>
<input type="text" class="gf-form-input" ng-model="ctrl.target.sql" spellcheck='false' placeholder="select avg(mem_system) from log.dn where ts >= $from and ts < $to interval($interval)" ng-blur="ctrl.panelCtrl.refresh()" data-mode="sql"></input>
</div>
</div>

View File

@ -9,10 +9,9 @@
"metrics": true,
"annotations": false,
"alerting": true,
"info": {
"description": "TDengine datasource",
"description": "grafana datasource plugin for tdengine",
"author": {
"name": "Taosdata Inc.",
"url": "https://www.taosdata.com"
@ -21,8 +20,12 @@
"small": "img/taosdata_logo.png",
"large": "img/taosdata_logo.png"
},
"links": [
{"name": "GitHub", "url": "https://github.com/taosdata/TDengine/tree/develop/src/connector/grafana/tdengine"},
{"name": "AGPL 3.0", "url": "https://github.com/taosdata/TDengine/tree/develop/src/connector/grafana/tdengine/LICENSE"}
],
"version": "1.6.0",
"updated": "2019-07-01"
"updated": "2019-11-12"
},
"dependencies": {

View File

@ -0,0 +1,51 @@
'use strict';
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.GenericDatasourceQueryCtrl = undefined;
var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();
var _sdk = require('app/plugins/sdk');
require('./css/query-editor.css!');
function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } }
function _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError("this hasn't been initialised - super() hasn't been called"); } return call && (typeof call === "object" || typeof call === "function") ? call : self; }
function _inherits(subClass, superClass) { if (typeof superClass !== "function" && superClass !== null) { throw new TypeError("Super expression must either be null or a function, not " + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; }
var GenericDatasourceQueryCtrl = exports.GenericDatasourceQueryCtrl = function (_QueryCtrl) {
_inherits(GenericDatasourceQueryCtrl, _QueryCtrl);
function GenericDatasourceQueryCtrl($scope, $injector) {
_classCallCheck(this, GenericDatasourceQueryCtrl);
var _this = _possibleConstructorReturn(this, (GenericDatasourceQueryCtrl.__proto__ || Object.getPrototypeOf(GenericDatasourceQueryCtrl)).call(this, $scope, $injector));
_this.scope = $scope;
_this.target.target = _this.target.target || 'select metric';
_this.target.type = _this.target.type || 'timeserie';
return _this;
}
_createClass(GenericDatasourceQueryCtrl, [{
key: 'onChangeInternal',
value: function onChangeInternal() {
this.panelCtrl.refresh(); // Asks the panel to refresh data.
}
}, {
key: 'generateSQL',
value: function generateSQL(query) {
this.lastGenerateSQL = this.datasource.generateSql(this.panelCtrl, this.target);
this.showGenerateSQL = !this.showGenerateSQL;
}
}]);
return GenericDatasourceQueryCtrl;
}(_sdk.QueryCtrl);
GenericDatasourceQueryCtrl.templateUrl = 'partials/query.editor.html';
//# sourceMappingURL=query_ctrl.js.map

View File

@ -0,0 +1 @@
{"version":3,"sources":["../src/query_ctrl.js"],"names":["GenericDatasourceQueryCtrl","$scope","$injector","scope","target","type","panelCtrl","refresh","query","lastGenerateSQL","datasource","generateSql","showGenerateSQL","QueryCtrl","templateUrl"],"mappings":";;;;;;;;;AAAA;;AACA;;;;;;;;IAEaA,0B,WAAAA,0B;;;AAEX,sCAAYC,MAAZ,EAAoBC,SAApB,EAAgC;AAAA;;AAAA,wJACxBD,MADwB,EAChBC,SADgB;;AAG9B,UAAKC,KAAL,GAAaF,MAAb;AACA,UAAKG,MAAL,CAAYA,MAAZ,GAAqB,MAAKA,MAAL,CAAYA,MAAZ,IAAsB,eAA3C;AACA,UAAKA,MAAL,CAAYC,IAAZ,GAAmB,MAAKD,MAAL,CAAYC,IAAZ,IAAoB,WAAvC;AAL8B;AAM/B;;;;uCAEkB;AACjB,WAAKC,SAAL,CAAeC,OAAf,GADiB,CACS;AAC3B;;;gCAEWC,K,EAAO;AACjB,WAAKC,eAAL,GAAuB,KAAKC,UAAL,CAAgBC,WAAhB,CAA6B,KAAKL,SAAlC,EAA6C,KAAKF,MAAlD,CAAvB;AACA,WAAKQ,eAAL,GAAuB,CAAC,KAAKA,eAA7B;AACD;;;;EAjB6CC,c;;AAqBhDb,2BAA2Bc,WAA3B,GAAyC,4BAAzC","file":"query_ctrl.js","sourcesContent":["import {QueryCtrl} from 'app/plugins/sdk';\nimport './css/query-editor.css!'\n\nexport class GenericDatasourceQueryCtrl extends QueryCtrl {\n\n constructor($scope, $injector) {\n super($scope, $injector);\n\n this.scope = $scope;\n this.target.target = this.target.target || 'select metric';\n this.target.type = this.target.type || 'timeserie';\n }\n\n onChangeInternal() {\n this.panelCtrl.refresh(); // Asks the panel to refresh data.\n }\n\n generateSQL(query) {\n this.lastGenerateSQL = this.datasource.generateSql( this.panelCtrl, this.target);\n this.showGenerateSQL = !this.showGenerateSQL;\n }\n\n}\n\nGenericDatasourceQueryCtrl.templateUrl = 'partials/query.editor.html';"]}

View File

@ -1,51 +0,0 @@
'use strict';
System.register(['./datasource', './query_ctrl'], function (_export, _context) {
"use strict";
var GenericDatasource, GenericDatasourceQueryCtrl, GenericConfigCtrl, GenericQueryOptionsCtrl, GenericAnnotationsQueryCtrl;
function _classCallCheck(instance, Constructor) {
if (!(instance instanceof Constructor)) {
throw new TypeError("Cannot call a class as a function");
}
}
return {
setters: [function (_datasource) {
GenericDatasource = _datasource.GenericDatasource;
}, function (_query_ctrl) {
GenericDatasourceQueryCtrl = _query_ctrl.GenericDatasourceQueryCtrl;
}],
execute: function () {
_export('ConfigCtrl', GenericConfigCtrl = function GenericConfigCtrl() {
_classCallCheck(this, GenericConfigCtrl);
});
GenericConfigCtrl.templateUrl = 'partials/config.html';
_export('QueryOptionsCtrl', GenericQueryOptionsCtrl = function GenericQueryOptionsCtrl() {
_classCallCheck(this, GenericQueryOptionsCtrl);
});
GenericQueryOptionsCtrl.templateUrl = 'partials/query.options.html';
_export('AnnotationsQueryCtrl', GenericAnnotationsQueryCtrl = function GenericAnnotationsQueryCtrl() {
_classCallCheck(this, GenericAnnotationsQueryCtrl);
});
GenericAnnotationsQueryCtrl.templateUrl = 'partials/annotations.editor.html';
_export('Datasource', GenericDatasource);
_export('QueryCtrl', GenericDatasourceQueryCtrl);
_export('ConfigCtrl', GenericConfigCtrl);
_export('QueryOptionsCtrl', GenericQueryOptionsCtrl);
_export('AnnotationsQueryCtrl', GenericAnnotationsQueryCtrl);
}
};
});
//# sourceMappingURL=module.js.map

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,46 @@
{
"name": "TDengine",
"private": true,
"version": "1.0.0",
"description": "grafana datasource plugin for tdengine",
"main": "index.js",
"scripts": {
"build": "./node_modules/grunt-cli/bin/grunt",
"test": "./node_modules/grunt-cli/bin/grunt mochaTest"
},
"repository": {
"type": "git",
"url": "git+https://github.com/taosdata/TDengine.git"
},
"author": "",
"license": "AGPL 3.0",
"bugs": {
"url": "https://github.com/taosdata/TDengine/issues"
},
"engineStrict": true,
"devDependencies": {
"babel": "^6.23.0",
"babel-plugin-transform-object-rest-spread": "^6.26.0",
"babel-preset-env": "^1.7.0",
"chai": "~3.5.0",
"grunt": "^1.0.4",
"grunt-babel": "~6.0.0",
"grunt-cli": "^1.2.0",
"grunt-contrib-clean": "^1.1.0",
"grunt-contrib-copy": "^1.0.0",
"grunt-contrib-uglify": "^2.3.0",
"grunt-contrib-watch": "^1.0.0",
"grunt-mocha-test": "^0.13.2",
"grunt-systemjs-builder": "^1.0.0",
"jsdom": "~9.12.0",
"load-grunt-tasks": "^3.5.2",
"mocha": "^6.2.2",
"prunk": "^1.3.0",
"q": "^1.5.0"
},
"dependencies": {
"lodash": "^4.17.13",
"yarn": "^1.21.1"
},
"homepage": "https://github.com/taosdata/TDengine/tree/develop/src/connector/grafana/tdengine"
}

View File

@ -1,91 +0,0 @@
'use strict';
System.register(['app/plugins/sdk'], function (_export, _context) {
"use strict";
var QueryCtrl, _createClass, GenericDatasourceQueryCtrl;
function _classCallCheck(instance, Constructor) {
if (!(instance instanceof Constructor)) {
throw new TypeError("Cannot call a class as a function");
}
}
function _possibleConstructorReturn(self, call) {
if (!self) {
throw new ReferenceError("this hasn't been initialised - super() hasn't been called");
}
return call && (typeof call === "object" || typeof call === "function") ? call : self;
}
function _inherits(subClass, superClass) {
if (typeof superClass !== "function" && superClass !== null) {
throw new TypeError("Super expression must either be null or a function, not " + typeof superClass);
}
subClass.prototype = Object.create(superClass && superClass.prototype, {
constructor: {
value: subClass,
enumerable: false,
writable: true,
configurable: true
}
});
if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass;
}
return {
setters: [function (_appPluginsSdk) {
QueryCtrl = _appPluginsSdk.QueryCtrl;
}, function (_cssQueryEditorCss) {}],
execute: function () {
_createClass = function () {
function defineProperties(target, props) {
for (var i = 0; i < props.length; i++) {
var descriptor = props[i];
descriptor.enumerable = descriptor.enumerable || false;
descriptor.configurable = true;
if ("value" in descriptor) descriptor.writable = true;
Object.defineProperty(target, descriptor.key, descriptor);
}
}
return function (Constructor, protoProps, staticProps) {
if (protoProps) defineProperties(Constructor.prototype, protoProps);
if (staticProps) defineProperties(Constructor, staticProps);
return Constructor;
};
}();
_export('GenericDatasourceQueryCtrl', GenericDatasourceQueryCtrl = function (_QueryCtrl) {
_inherits(GenericDatasourceQueryCtrl, _QueryCtrl);
function GenericDatasourceQueryCtrl($scope, $injector) {
_classCallCheck(this, GenericDatasourceQueryCtrl);
var _this = _possibleConstructorReturn(this, (GenericDatasourceQueryCtrl.__proto__ || Object.getPrototypeOf(GenericDatasourceQueryCtrl)).call(this, $scope, $injector));
_this.scope = $scope;
return _this;
}
_createClass(GenericDatasourceQueryCtrl, [{
key: 'generateSQL',
value: function generateSQL(query) {
//this.lastGenerateSQL = this.datasource.generateSql(this.target.sql, this.panelCtrl.range.raw.from, this.panelCtrl.range.raw.to, this.panelCtrl.intervalMs);
this.lastGenerateSQL = this.datasource.generateSql(this.target.sql, this.panelCtrl.range.from.toISOString(), this.panelCtrl.range.to.toISOString(), this.panelCtrl.intervalMs);
this.showGenerateSQL = !this.showGenerateSQL;
}
}]);
return GenericDatasourceQueryCtrl;
}(QueryCtrl));
_export('GenericDatasourceQueryCtrl', GenericDatasourceQueryCtrl);
GenericDatasourceQueryCtrl.templateUrl = 'partials/query.editor.html';
}
};
});
//# sourceMappingURL=query_ctrl.js.map

View File

@ -0,0 +1,22 @@
import {Datasource} from "../module";
import Q from "q";
describe('GenericDatasource', function() {
var ctx = {};
beforeEach(function() {
ctx.$q = Q;
ctx.backendSrv = {};
ctx.templateSrv = {};
ctx.ds = new Datasource({}, ctx.$q, ctx.backendSrv, ctx.templateSrv);
});
it('should return an empty array when no targets are set', function(done) {
ctx.ds.query({targets: []}).then(function(result) {
expect(result.data).to.have.length(0);
done();
});
});
});

Some files were not shown because too many files have changed in this diff Show More