Merge branch 'develop' into feature/dockerfile

This commit is contained in:
Tao Liu 2020-02-26 12:00:33 +08:00 committed by GitHub
commit da642c48ce
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
293 changed files with 37086 additions and 12659 deletions

View File

@ -26,10 +26,17 @@ SET(CMAKE_VERBOSE_MAKEFILE ON)
# open the file named TDengine.sln
#
SET(TD_GODLL FALSE)
IF (${DLLTYPE} MATCHES "go")
ADD_DEFINITIONS(-D_TD_GO_DLL_)
MESSAGE(STATUS "input dll type: " ${DLLTYPE})
SET(TD_GODLL TRUE)
ENDIF ()
IF (NOT DEFINED TD_CLUSTER)
MESSAGE(STATUS "Build the Lite Version")
SET(TD_CLUSTER FALSE)
SET(TD_LITE TRUE)
SET(TD_EDGE TRUE)
SET(TD_COMMUNITY_DIR ${PROJECT_SOURCE_DIR})
MESSAGE(STATUS "Community directory: " ${TD_COMMUNITY_DIR})
@ -41,34 +48,49 @@ IF (NOT DEFINED TD_CLUSTER)
SET(TD_ARM FALSE)
SET(TD_ARM_64 FALSE)
SET(TD_ARM_32 FALSE)
SET(TD_MIPS FALSE)
SET(TD_MIPS_64 FALSE)
SET(TD_MIPS_32 FALSE)
SET(TD_DARWIN_64 FALSE)
SET(TD_WINDOWS_64 FALSE)
SET(TD_PAGMODE_LITE FALSE)
IF (${PAGMODE} MATCHES "lite")
SET(TD_PAGMODE_LITE TRUE)
ENDIF ()
# if generate ARM version:
# cmake -DARMVER=arm32 .. or cmake -DARMVER=arm64
IF (${ARMVER} MATCHES "arm32")
# cmake -DCPUTYPE=aarch32 .. or cmake -DCPUTYPE=aarch64
IF (${CPUTYPE} MATCHES "aarch32")
SET(TD_ARM TRUE)
SET(TD_ARM_32 TRUE)
SET(TD_PAGMODE_LITE TRUE)
ADD_DEFINITIONS(-D_TD_ARM_)
ADD_DEFINITIONS(-D_TD_ARM_32_)
ELSEIF (${ARMVER} MATCHES "arm64")
ELSEIF (${CPUTYPE} MATCHES "aarch64")
SET(TD_ARM TRUE)
SET(TD_ARM_64 TRUE)
ADD_DEFINITIONS(-D_TD_ARM_)
ADD_DEFINITIONS(-D_TD_ARM_64_)
ELSEIF (${CPUTYPE} MATCHES "mips64")
SET(TD_MIPS TRUE)
SET(TD_MIPS_64 TRUE)
ADD_DEFINITIONS(-D_TD_MIPS_)
ADD_DEFINITIONS(-D_TD_MIPS_64_)
ELSEIF (${CPUTYPE} MATCHES "x64")
MESSAGE(STATUS "input cpuType: " ${CPUTYPE})
ELSEIF (${CPUTYPE} MATCHES "x86")
MESSAGE(STATUS "input cpuType: " ${CPUTYPE})
ELSE ()
MESSAGE(STATUS "input cpuType: " ${CPUTYPE})
ENDIF ()
IF (TD_ARM)
ADD_DEFINITIONS(-D_TD_ARM_)
IF (TD_ARM_32)
ADD_DEFINITIONS(-D_TD_ARM_32_)
ELSEIF (TD_ARM_64)
ADD_DEFINITIONS(-D_TD_ARM_64_)
ELSE ()
EXIT ()
ENDIF ()
ENDIF ()
#
# Get OS information and store in variable TD_OS_INFO.
#
execute_process(COMMAND chmod 777 ${TD_COMMUNITY_DIR}/packaging/tools/get_os.sh)
execute_process(COMMAND ${TD_COMMUNITY_DIR}/packaging/tools/get_os.sh "" OUTPUT_VARIABLE TD_OS_INFO)
MESSAGE(STATUS "The current os is " ${TD_OS_INFO})
IF (${CMAKE_SYSTEM_NAME} MATCHES "Linux")
IF (${CMAKE_SIZEOF_VOID_P} MATCHES 8)
@ -141,37 +163,51 @@ IF (NOT DEFINED TD_CLUSTER)
SET(RELEASE_FLAGS "-O0")
IF (NOT TD_ARM)
IF (${CMAKE_CXX_COMPILER_ID} MATCHES "Clang")
SET(COMMON_FLAGS "-std=gnu99 -Wall -fPIC -malign-double -g -Wno-char-subscripts -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
SET(COMMON_FLAGS "-std=gnu99 -Wall -fPIC -malign-double -g -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
ELSE ()
SET(COMMON_FLAGS "-std=gnu99 -Wall -fPIC -malign-double -g -Wno-char-subscripts -malign-stringops -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
SET(COMMON_FLAGS "-std=gnu99 -Wall -fPIC -malign-double -g -malign-stringops -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
ENDIF ()
ELSE ()
SET(COMMON_FLAGS "-std=gnu99 -Wall -fPIC -g -Wno-char-subscripts -fsigned-char -munaligned-access -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
SET(COMMON_FLAGS "-std=gnu99 -Wall -fPIC -g -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
ENDIF ()
ADD_DEFINITIONS(-DLINUX)
ADD_DEFINITIONS(-D_REENTRANT -D__USE_POSIX -D_LIBC_REENTRANT)
IF (${TD_OS_INFO} MATCHES "Alpine")
MESSAGE(STATUS "The current OS is Alpine, append extra flags")
SET(COMMON_FLAGS "${COMMON_FLAGS} -largp")
link_libraries(/usr/lib/libargp.a)
ADD_DEFINITIONS(-D_ALPINE)
ENDIF ()
ELSEIF (TD_LINUX_32)
IF (NOT TD_ARM)
EXIT ()
ENDIF ()
SET(DEBUG_FLAGS "-O0 -DDEBUG")
SET(RELEASE_FLAGS "-O0")
SET(COMMON_FLAGS "-std=gnu99 -Wall -fPIC -g -Wno-char-subscripts -fsigned-char -munaligned-access -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
SET(COMMON_FLAGS "-std=gnu99 -Wall -fPIC -g -fsigned-char -munaligned-access -fpack-struct=8 -latomic -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
ADD_DEFINITIONS(-DLINUX)
ADD_DEFINITIONS(-D_REENTRANT -D__USE_POSIX -D_LIBC_REENTRANT)
ADD_DEFINITIONS(-DUSE_LIBICONV)
IF (${TD_OS_INFO} MATCHES "Alpine")
MESSAGE(STATUS "The current OS is Alpine, add extra flags")
SET(COMMON_FLAGS "${COMMON_FLAGS} -largp")
link_library(/usr/lib/libargp.a)
ADD_DEFINITIONS(-D_ALPINE)
ENDIF ()
ELSEIF (TD_WINDOWS_64)
SET(CMAKE_GENERATOR "NMake Makefiles" CACHE INTERNAL "" FORCE)
IF (NOT TD_GODLL)
SET(COMMON_FLAGS "/nologo /WX- /Oi /Oy- /Gm- /EHsc /MT /GS /Gy /fp:precise /Zc:wchar_t /Zc:forScope /Gd /errorReport:prompt /analyze-")
SET(DEBUG_FLAGS "/Zi /W3 /GL")
SET(RELEASE_FLAGS "/W0 /GL")
ENDIF ()
ADD_DEFINITIONS(-DWINDOWS)
ADD_DEFINITIONS(-D__CLEANUP_C)
ADD_DEFINITIONS(-DPTW32_STATIC_LIB)
ADD_DEFINITIONS(-DPTW32_BUILD)
ADD_DEFINITIONS(-D_MBCS -D_CRT_SECURE_NO_DEPRECATE -D_CRT_NONSTDC_NO_DEPRECATE)
ELSEIF (TD_DARWIN_64)
SET(COMMON_FLAGS "-std=gnu99 -Wall -fPIC -malign-double -g -Wno-char-subscripts -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE -Wno-unused-variable -Wno-bitfield-constant-conversion")
SET(COMMON_FLAGS "-std=gnu99 -Wall -fPIC -malign-double -g -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
SET(DEBUG_FLAGS "-O0 -DDEBUG")
SET(RELEASE_FLAGS "-O0")
ADD_DEFINITIONS(-DDARWIN)
@ -230,6 +266,7 @@ IF (NOT DEFINED TD_CLUSTER)
INSTALL(CODE "execute_process(COMMAND ${TD_MAKE_INSTALL_SH} ${TD_COMMUNITY_DIR} ${PROJECT_BINARY_DIR})")
ELSEIF (TD_WINDOWS_64)
SET(CMAKE_INSTALL_PREFIX C:/TDengine)
IF (NOT TD_GODLL)
INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/go DESTINATION connector)
INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/grafana DESTINATION connector)
INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/python DESTINATION connector)
@ -245,6 +282,15 @@ IF (NOT DEFINED TD_CLUSTER)
IF (TD_MVN_INSTALLED)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-1.0.2-dist.jar DESTINATION connector/jdbc)
ENDIF ()
ELSE ()
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/libtaos.dll DESTINATION driver)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/libtaos.dll.a DESTINATION driver)
ENDIF ()
ELSEIF (TD_DARWIN_64)
SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh")
INSTALL(CODE "MESSAGE(\"make install script: ${TD_MAKE_INSTALL_SH}\")")
INSTALL(CODE "execute_process(COMMAND chmod 777 ${TD_MAKE_INSTALL_SH})")
INSTALL(CODE "execute_process(COMMAND ${TD_MAKE_INSTALL_SH} ${TD_COMMUNITY_DIR} ${PROJECT_BINARY_DIR} Darwin)")
ENDIF ()
ENDIF ()

View File

@ -39,12 +39,24 @@ sudo apt-get install maven
```
Build TDengine:
```cmd
```
mkdir build && cd build
cmake .. && cmake --build .
```
To compile on an ARM processor (aarch64 or aarch32), please add option CPUTYPE as below:
aarch64:
```cmd
cmake .. -DCPUTYPE=aarch64 && cmake --build .
```
aarch32:
```cmd
cmake .. -DCPUTYPE=aarch32 && cmake --build .
```
# Quick Run
To quickly start a TDengine server after building, run the command below in terminal:
```cmd
@ -118,3 +130,8 @@ The TDengine community has also kindly built some of their own connectors! Follo
# Contribute to TDengine
Please follow the [contribution guidelines](CONTRIBUTING.md) to contribute to the project.
# Join TDengine WeChat Group
Add WeChat “tdengine” to join the groupyou can communicate with other users.

5
deps/iconv/iconv.c vendored
View File

@ -175,7 +175,10 @@ static const struct alias sysdep_aliases[] = {
#ifdef __GNUC__
__inline
#endif
const struct alias *
// gcc -o0 bug fix
// see http://git.savannah.gnu.org/gitweb/?p=libiconv.git;a=blobdiff;f=lib/iconv.c;h=31853a7f1c47871221189dbf597473a16d8a8da7;hp=5a1a32597fa3efc5f69624d37a2eb96f308cd241;hb=b29089d8b43abc8fba073da7e6dccaeba56b2b70;hpb=0a04404c90d6a725b8b6bbcd65e10c5fcf5993e9
static const struct alias *
aliases2_lookup (register const char *str)
{
const struct alias * ptr;

View File

@ -114,23 +114,84 @@ public Connection getConn() throws Exception{
</ul>
<p>对于TDengine操作的报错信息用户可使用JDBCDriver包里提供的枚举类TSDBError.java来获取error message和error code的列表。对于更多的具体操作的相关代码请参考TDengine提供的使用示范项目<code>JDBCDemo</code></p>
<a class='anchor' id='Python-Connector'></a><h2>Python Connector</h2>
<a class='anchor' id='Python客户端安装'></a><h3>Python客户端安装</h3>
<p>用户可以在源代码的src/connector/python文件夹下找到python2和python3的安装包。用户可以通过pip命令安装 </p>
<p> <code>pip install src/connector/python/python2/</code></p>
<p></p>
<p> <code>pip install src/connector/python/python3/</code></p>
<p>如果机器上没有pip命令用户可将src/connector/python/python3或src/connector/python/python2下的taos文件夹拷贝到应用程序的目录使用。</p>
<a class='anchor' id='Python客户端接口'></a><h3>Python客户端接口</h3>
<p>在使用TDengine的python接口时需导入TDengine客户端模块</p>
<pre><code>import taos </code></pre>
<p>用户可通过python的帮助信息直接查看模块的使用信息或者参考code/examples/python中的示例程序。以下为部分常用类和方法</p>
<a class='anchor' id='安装准备'></a><h3>安装准备</h3>
<li>已安装TDengine, 如果客户端在Windows上需要安装Windows 版本的TDengine客户端</li>
<li>已安装python 2.7 or >= 3.4</li>
<li>已安装pip</li>
<a class='anchor' id='安装'></a><h3>安装</h3>
<a class='anchor' id='Linux'></a><h4>Linux</h4>
<p>用户可以在源代码的src/connector/python文件夹下找到python2和python3的安装包 然后通过pip命令安装</p>
<pre><code class="cmd language-cmd">pip install src/connector/python/linux/python2/</code></pre>
<p>或者</p>
<pre><code>pip install src/connector/python/linux/python3/</code></pre>
<a class='anchor' id='Windows'></a><h4>Windows</h4>
<p>在已安装Windows TDengine 客户端的情况下, 将文件"C:\TDengine\driver\taos.dll" 拷贝到 "C:\windows\system32" 目录下, 然后进入Windwos <em>cmd</em> 命令行界面</p>
<pre><code>cd C:\TDengine\connector\python\windows</code></pre>
<pre><code>pip install python2\</code></pre>
<p>或者</p>
<pre><code>cd C:\TDengine\connector\python\windows</code></pre>
<pre><code>pip install python3\</code></pre>
<p>* 如果机器上没有<em>pip</em>命令用户可将src/connector/python/windows/python3或src/connector/python/windows/python2下的taos文件夹拷贝到应用程序的目录使用。 </p>
<a class='anchor' id='使用'></a><h3>使用</h3>
<a class='anchor' id='代码示例'></a><h4>代码示例</h4>
<li>导入TDengine客户端模块</li>
<pre><code class="python language-python">import taos </code></pre>
<li>获取连接</li>
<pre><code>
conn = taos.connect(host="127.0.0.1", user="root", password="taosdata", config="/etc/taos")
c1 = conn.cursor()
</code></pre>
<p>* <em>host 是TDengine 服务端所有IP, config 为客户端配置文件所在目录</em></p>
<li>写入数据</li>
<pre><code>
import datetime
# 创建数据库
c1.execute('create database db')
c1.execute('use db')
# 建表
c1.execute('create table tb (ts timestamp, temperature int, humidity float)')
# 插入数据
start_time = datetime.datetime(2019, 11, 1)
affected_rows = c1.execute('insert into tb values (\'%s\', 0, 0.0)' %start_time)
# 批量插入数据
time_interval = datetime.timedelta(seconds=60)
sqlcmd = ['insert into tb values']
for irow in range(1,11):
start_time += time_interval
sqlcmd.append('(\'%s\', %d, %f)' %(start_time, irow, irow*1.2))
affected_rows = c1.execute(' '.join(sqlcmd))
</code></pre>
<li>查询数据</li>
<code><pre>
c1.execute('select * from tb')
# 拉取查询结果
data = c1.fetchall()
# 返回的结果是一个列表,每一行构成列表的一个元素
numOfRows = c1.rowcount
numOfCols = c1.descriptions
for irow in range(numOfRows):
print("Row%d: ts=%s, temperature=%d, humidity=%f" %(irow, data[irow][0], data[irow][1],data[irow][2])
# 直接使用cursor 循环拉取查询结果
c1.execute('select * from tb')
for data in c1:
print("ts=%s, temperature=%d, humidity=%f" %(data[0], data[1],data[2])
</pre></code>
<li>关闭连接</li>
<code><pre>
c1.close()
conn.close()
</pre></code>
<a class='anchor' id='帮助信息''></a><h4>帮助信息</h4>
<p>用户可通过python的帮助信息直接查看模块的使用信息或者参考code/examples/python中的示例程序。以下为部分常用类和方法:</p>
<ul>
<li><p><em>TaosConnection</em></p>
<p>参考python中help(taos.TaosConnection)。</p></li>
<p>参考python中<code>help(taos.TDengineConnection)</code></p></li>
<li><p><em>TaosCursor</em></p>
<p>参考python中help(taos.TaosCursor)。</p></li>
<li><p><em>connect</em>方法</p>
<p>用于生成taos.TaosConnection的实例。</p></li>
<p>参考python中<code>help(taos.TDengineCursor)</code></p></li>
<li><p>connect 方法</p>
<p>用于生成taos.TDengineConnection的实例。</p></li>
</ul>
<a class='anchor' id='RESTful-Connector'></a><h2>RESTful Connector</h2>
<p>为支持各种不同类型平台的开发TDengine提供符合REST设计标准的API即RESTful API。为最大程度降低学习成本不同于其他数据库RESTful API的设计方法TDengine直接通过HTTP POST 请求BODY中包含的SQL语句来操作数据库仅需要一个URL。 </p>

View File

@ -28,7 +28,7 @@
<p>在TDengine终端中用户可以通过SQL命令来创建/删除数据库、表等并进行插入查询操作。在终端中运行的SQL语句需要以分号结束来运行。示例</p>
<pre><code class="mysql language-mysql">create database db;
use db;
create table t (ts timestamp, cdata int);
create table t (ts timestamp, speed int);
insert into t values ('2019-07-15 00:00:00', 10);
insert into t values ('2019-07-15 01:00:00', 20);
select * from t;

View File

@ -32,7 +32,7 @@ tags (location binary(20), type int)</code></pre>
<p>查看数据库内全部STable及其相关信息包括STable的名称、创建时间、列数量、标签TAG数量、通过该STable建表的数量。 </p></li>
<li><p>删除超级表</p>
<pre><code class="mysql language-mysql">DROP TABLE &lt;stable_name&gt;</code></pre>
<p>Note: 删除STable不会级联删除通过STable创建的表相反删除STable时要求通过该STable创建的表都已经被删除。</p></li>
<p>Note: 删除STable所有通过该STable创建的表都将被删除。</p></li>
<li><p>查看属于某STable并满足查询条件的表</p>
<pre><code class="mysql language-mysql">SELECT TBNAME,[TAG_NAME,…] FROM &lt;stable_name&gt; WHERE &lt;tag_name&gt; &lt;[=|=&lt;|&gt;=|&lt;&gt;] values..&gt; ([AND|OR] …)</code></pre>
<p>查看属于某STable并满足查询条件的表。说明TBNAME为关键词显示通过STable建立的子表表名查询过程中可以使用针对标签的条件。</p>

View File

@ -122,15 +122,76 @@ public Connection getConn() throws Exception{
</ul>
<p>All the error codes and error messages can be found in <code>TSDBError.java</code> . For a more detailed coding example, please refer to the demo project <code>JDBCDemo</code> in TDengine's code examples. </p>
<a class='anchor' id='Python-Connector'></a><h2>Python Connector</h2>
<a class='anchor' id='Install-TDengine-Python-client'></a><h3>Install TDengine Python client</h3>
<p>Users can find python client packages in our source code directory <em>src/connector/python</em>. There are two directories corresponding two python versions. Please choose the correct package to install. Users can use <em>pip</em> command to install:</p>
<pre><code class="cmd language-cmd">pip install src/connector/python/python2/</code></pre>
<a class='anchor' id='Pre-requirement'></a><h3>Pre-requirement</h3>
<li>TDengine installed, TDengine-client installed if on Windows</li>
<li>python 2.7 or >= 3.4</li>
<li>pip installed </li>
<a class='anchor' id='Installation'></a><h3>Installation</h3>
<a class='anchor' id='Linux'></a><h4>Linux</h4>
<p>Users can find python client packages in our source code directory <em>src/connector/python</em>. There are two directories corresponding to two python versions. Please choose the correct package to install. Users can use <em>pip</em> command to install:</p>
<pre><code class="cmd language-cmd">pip install src/connector/python/linux/python2/</code></pre>
<p>or</p>
<pre><code>pip install src/connector/python/python3/</code></pre>
<p>If <em>pip</em> command is not installed on the system, users can choose to install pip or just copy the <em>taos</em> directory in the python client directory to the application directory to use.</p>
<a class='anchor' id='Python-client-interfaces'></a><h3>Python client interfaces</h3>
<p>To use TDengine Python client, import TDengine module at first:</p>
<pre><code>pip install src/connector/python/linux/python3/</code></pre>
<a class='anchor' id='Windows'></a><h4>Windows</h4>
<p>Assumed the Windows TDengine client has been installed , copy the file "C:\TDengine\driver\taos.dll" to the folder "C:\windows\system32", and then enter the <em>cmd</em> Windows command interface</p>
<pre><code>cd C:\TDengine\connector\python\windows</code></pre>
<pre><code>pip install python2\</code></pre>
<p>or</p>
<pre><code>cd C:\TDengine\connector\python\windows</code></pre>
<pre><code>pip install python3\</code></pre>
<p>* If <em>pip</em> command is not installed on the system, users can choose to install pip or just copy the <em>taos</em> directory in the python client directory to the application directory to use.</p>
<a class='anchor' id='Usage'></a><h3>Usage</h3>
<a class='anchor' id='Examples'></a><h4>Examples</h4>
<li>import TDengine module at first:</li>
<pre><code class="python language-python">import taos </code></pre>
<li>get the connection</li>
<pre><code>
conn = taos.connect(host="127.0.0.1", user="root", password="taosdata", config="/etc/taos")
c1 = conn.cursor()
</code></pre>
<p>* <em>host is the IP of TDengine server, and config is the directory where exists the TDengine client configure file</em></p>
<li>insert records into the database</li>
<pre><code>
import datetime
# create a database
c1.execute('create database db')
c1.execute('use db')
# create a table
c1.execute('create table tb (ts timestamp, temperature int, humidity float)')
# insert a record
start_time = datetime.datetime(2019, 11, 1)
affected_rows = c1.execute('insert into tb values (\'%s\', 0, 0.0)' %start_time)
# insert multiple records in a batch
time_interval = datetime.timedelta(seconds=60)
sqlcmd = ['insert into tb values']
for irow in range(1,11):
start_time += time_interval
sqlcmd.append('(\'%s\', %d, %f)' %(start_time, irow, irow*1.2))
affected_rows = c1.execute(' '.join(sqlcmd))
</code></pre>
<li>query the database</li>
<code><pre>
c1.execute('select * from tb')
# fetch all returned results
data = c1.fetchall()
# data is a list of returned rows with each row being a tuple
numOfRows = c1.rowcount
numOfCols = c1.descriptions
for irow in range(numOfRows):
print("Row%d: ts=%s, temperature=%d, humidity=%f" %(irow, data[irow][0], data[irow][1],data[irow][2])
# use the cursor as an iterator to retrieve all returned results
c1.execute('select * from tb')
for data in c1:
print("ts=%s, temperature=%d, humidity=%f" %(data[0], data[1],data[2])
</pre></code>
<li>close the connection</li>
<code><pre>
c1.close()
conn.close()
</pre></code>
<a class='anchor' id='Help information''></a><h4>Help information</h4>
<p>Users can get module information from Python help interface or refer to our [python code example](). We list the main classes and methods below:</p>
<ul>
<li><p><em>TaosConnection</em> class</p>

View File

@ -28,7 +28,7 @@
<p>In the TDengine shell, you can create databases, create tables and insert/query data with SQL. Each query command ends with a semicolon. It works like MySQL, for example:</p>
<pre><code class="mysql language-mysql">create database db;
use db;
create table t (ts timestamp, cdata int);
create table t (ts timestamp, speed int);
insert into t values ('2019-07-15 10:00:00', 10);
insert into t values ('2019-07-15 10:01:05', 20);
select * from t;

View File

@ -73,7 +73,7 @@ INTERVAL(10M)</code></pre>
<p>It lists the STable's schema and tags</p>
<a class='anchor' id='Drop-a-STable'></a><h3>Drop a STable</h3>
<pre><code class="mysql language-mysql">DROP TABLE &lt;stable_name&gt;</code></pre>
<p>To delete a STable, all the tables created via this STable shall be deleted first, otherwise, it will fail.</p>
<p>To delete a STable, all the tables created via this STable will be deleted.</p>
<a class='anchor' id='List-the-Associated-Tables-of-a-STable'></a><h3>List the Associated Tables of a STable</h3>
<pre><code class="mysql language-mysql">SELECT TBNAME,[TAG_NAME, ...] FROM &lt;stable_name&gt; WHERE &lt;tag_name&gt; &lt;[=|=&lt;|&gt;=|&lt;&gt;] values..&gt; ([AND|OR] ...)</code></pre>
<p>It will list all the tables which satisfy the tag filter conditions. The tables are all created from this specific STable. TBNAME is a new keyword introduced, it is the table name associated with the STable. </p>

Binary file not shown.

After

Width:  |  Height:  |  Size: 53 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 44 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 54 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 120 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 75 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 26 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 44 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 67 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 91 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 97 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 45 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 162 KiB

View File

@ -34,7 +34,7 @@ TDengine能够与开源数据可视化系统[Grafana](https://www.grafana.com/)
### 安装Grafana
目前TDengine支持Grafana 5.2.4以上的版本。用户可以根据当前的操作系统到Grafana官网下载安装包并执行安装。下载地址如下https://grafana.com/grafana/download
目前TDengine支持Grafana 5.2.4以上的版本。用户可以根据当前的操作系统到Grafana官网下载安装包并执行安装。下载地址如下https://grafana.com/grafana/download
### 配置Grafana
@ -44,41 +44,58 @@ TDengine的Grafana插件在安装包的/usr/local/taos/connector/grafana目录
### 使用 Grafana
用户可以直接通过localhost:3000的网址登录Grafana服务器(用户名/密码:admin/admin)配置TDengine数据源如下图所示此时可以在下拉列表中看到TDengine数据源。
#### 配置数据源
![img](../assets/clip_image001.png)
用户可以直接通过 localhost:3000 的网址,登录 Grafana 服务器(用户名/密码:admin/admin),通过左侧 `Configuration -> Data Sources` 可以添加数据源,如下图所示:
TDengine数据源中的HTTP配置里面的Host地址要设置为TDengine集群的中任意一台服务器的IP地址与TDengine RESTful接口的端口号(6020)。假设TDengine数据库与Grafana部署在同一机器那么应输入http://localhost:6020。
![img](../assets/add_datasource1.jpg)
此外还需配置登录TDengine的用户名与密码然后点击下图中的Save&Test按钮保存。
点击 `Add data source` 可进入新增数据源页面,在查询框中输入 TDengine 可选择添加,如下图所示:
![img](../assets/clip_image001-2474914.png)
![img](../assets/add_datasource2.jpg)
进入数据源配置页面,按照默认提示修改相应配置即可:
![img](../assets/add_datasource3.jpg)
然后就可以在Grafana的数据源列表中看到刚创建好的TDengine的数据源
* Host TDengine 集群的中任意一台服务器的 IP 地址与 TDengine RESTful 接口的端口号(6020),默认 http://localhost:6020。
* UserTDengine 用户名。
* PasswordTDengine 用户密码。
![img](../assets/clip_image001-2474939.png)
点击 `Save & Test` 进行测试,成功会有如下提示:
![img](../assets/add_datasource4.jpg)
#### 创建 Dashboard
基于上面的步骤就可以在创建Dashboard的时候使用TDengine数据源如下图所示
回到主界面创建 Dashboard点击 Add Query 进入面板查询页面:
![img](../assets/clip_image001-2474961.png)
![img](../assets/create_dashboard1.jpg)
如上图所示,在 Query 中选中 `TDengine` 数据源,在下方查询框可输入相应 sql 进行查询,具体说明如下:
* INPUT SQL输入要查询的语句该 SQL 语句的结果集应为两列多行),例如:`select avg(mem_system) from log.dn where ts >= $from and ts < $to interval($interval)` 其中from、to 和 interval 为 TDengine插件的内置变量表示从Grafana插件面板获取的查询范围和时间间隔。除了内置变量外`也支持可以使用自定义模板变量`。
* ALIAS BY可设置当前查询别名。
* GENERATE SQL 点击该按钮会自动替换相应变量,并生成最终执行的语句。
然后可以点击Add Query按钮增加一个新查询。
按照默认提示查询当前 TDengine 部署所在服务器指定间隔系统内存平均使用量如下:
在INPUT SQL输入框中输入查询SQL语句该SQL语句的结果集应为两行多列的曲线数据例如SELECT count(*) FROM sys.cpu WHERE ts>=from and ts<to interval(interval)。其中from、to和interval为TDengine插件的内置变量表示从Grafana插件面板获取的查询范围和时间间隔。
![img](../assets/create_dashboard2.jpg)
ALIAS BY输入框为查询的别名点击GENERATE SQL 按钮可以获取发送给TDengine的SQL语句。如下图所示
> 关于如何使用Grafana创建相应的监测界面以及更多有关使用Grafana的信息请参考Grafana官方的[文档](https://grafana.com/docs/)。
![img](../assets/clip_image001-2474987.png)
#### 导入 Dashboard
在 Grafana 插件目录 /usr/local/taos/connector/grafana/tdengine/dashboard/ 下提供了一个 `tdengine-grafana.json` 可导入的 dashboard。
点击左侧 `Import` 按钮,并上传 `tdengine-grafana.json` 文件:
![img](../assets/import_dashboard1.jpg)
导入完成之后可看到如下效果:
![img](../assets/import_dashboard2.jpg)
关于如何使用Grafana创建相应的监测界面以及更多有关使用Grafana的信息请参考Grafana官方的[文档](https://grafana.com/docs/)。
## Matlab

View File

@ -175,79 +175,135 @@ TDengine provides APIs for continuous query driven by time, which run queries pe
### C/C++ subscription API
For the time being, TDengine supports subscription on one table. It is implemented through periodic pulling from a TDengine server.
For the time being, TDengine supports subscription on one or multiple tables. It is implemented through periodic pulling from a TDengine server.
- `TAOS_SUB *taos_subscribe(char *host, char *user, char *pass, char *db, char *table, int64_t time, int mseconds)`
The API is used to start a subscription session by given a handle. The parameters required are _host_ (IP address of a TDenginer server), _user_ (username), _pass_ (password), _db_ (database to use), _table_ (table name to subscribe), _time_ (start time to subscribe, 0 for now), _mseconds_ (pulling period). If failed to open a subscription session, a _NULL_ pointer is returned.
* `TAOS_SUB *taos_subscribe(TAOS* taos, int restart, const char* topic, const char *sql, TAOS_SUBSCRIBE_CALLBACK fp, void *param, int interval)`
The API is used to start a subscription session, it returns the subscription object on success and `NULL` in case of failure, the parameters are:
* **taos**: The database connnection, which must be established already.
* **restart**: `Zero` to continue a subscription if it already exits, other value to start from the beginning.
* **topic**: The unique identifier of a subscription.
* **sql**: A sql statement for data query, it can only be a `select` statement, can only query for raw data, and can only query data in ascending order of the timestamp field.
* **fp**: A callback function to receive query result, only used in asynchronization mode and should be `NULL` in synchronization mode, please refer below for its prototype.
* **param**: User provided additional parameter for the callback function.
* **interval**: Pulling interval in millisecond. Under asynchronization mode, API will call the callback function `fp` in this interval, system performance will be impacted if this interval is too short. Under synchronization mode, if the duration between two call to `taos_consume` is less than this interval, the second call blocks until the duration exceed this interval.
- `TAOS_ROW taos_consume(TAOS_SUB *tsub)`
The API used to get the new data from a TDengine server. It should be put in an infinite loop. The parameter _tsub_ is the handle returned by _taos_subscribe_. If new data are updated, the API will return a row of the result. Otherwise, the API is blocked until new data arrives. If _NULL_ pointer is returned, it means an error occurs.
* `typedef void (*TAOS_SUBSCRIBE_CALLBACK)(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code)`
Prototype of the callback function, the parameters are:
* tsub: The subscription object.
* res: The query result.
* param: User provided additional parameter when calling `taos_subscribe`.
* code: Error code in case of failures.
- `void taos_unsubscribe(TAOS_SUB *tsub)`
Stop a subscription session by the handle returned by _taos_subscribe_.
* `TAOS_RES *taos_consume(TAOS_SUB *tsub)`
The API used to get the new data from a TDengine server. It should be put in an loop. The parameter `tsub` is the handle returned by `taos_subscribe`. This API should only be called in synchronization mode. If the duration between two call to `taos_consume` is less than pulling interval, the second call blocks until the duration exceed the interval. The API returns the new rows if new data arrives, or empty rowset otherwise, and if there's an error, it returns `NULL`.
- `int taos_num_subfields(TAOS_SUB *tsub)`
The API used to get the number of fields in a row.
* `void taos_unsubscribe(TAOS_SUB *tsub, int keepProgress)`
- `TAOS_FIELD *taos_fetch_subfields(TAOS_SUB *tsub)`
The API used to get the description of each column.
Stop a subscription session by the handle returned by `taos_subscribe`. If `keepProgress` is **not** zero, the subscription progress information is kept and can be reused in later call to `taos_subscribe`, the information is removed otherwise.
## Java Connector
### JDBC Interface
To Java delevopers, TDengine provides `taos-jdbcdriver` according to the JDBC(3.0) API. Users can find and download it through [Sonatype Repository][1].
TDengine provides a JDBC driver `taos-jdbcdriver-x.x.x.jar` for Enterprise Java developers. TDengine's JDBC Driver is implemented as a subset of the standard JDBC 3.0 Specification and supports the most common Java development frameworks. The driver have been published to dependency repositories such as Sonatype Maven Repository, and users could refer to the following `pom.xml` configuration file.
Since the native language of TDengine is C, the necessary TDengine library should be checked before using the taos-jdbcdriver:
* libtaos.so (Linux)
After TDengine is installed successfully, the library `libtaos.so` will be automatically copied to the `/usr/lib/`, which is the system's default search path.
* taos.dll (Windows)
After TDengine client is installed, the library `taos.dll` will be automatically copied to the `C:/Windows/System32`, which is the system's default search path.
> Note: Please make sure that [TDengine Windows client][14] has been installed if developing on Windows. Now although TDengine client would be defaultly installed together with TDengine server, it can also be installed [alone][15].
Since TDengine is time-series database, there are still some differences compared with traditional databases in using TDengine JDBC driver:
* TDengine doesn't allow to delete/modify a single record, and thus JDBC driver also has no such method.
* No support for transaction
* No support for union between tables
* No support for nested query`There is at most one open ResultSet for each Connection. Thus, TSDB JDBC Driver will close current ResultSet if it is not closed and a new query begins`.
## Version list of TAOS-JDBCDriver and required TDengine and JDK
| taos-jdbcdriver | TDengine | JDK |
| --- | --- | --- |
| 1.0.3 | 1.6.1.x or higher | 1.8.x |
| 1.0.2 | 1.6.1.x or higher | 1.8.x |
| 1.0.1 | 1.6.1.x or higher | 1.8.x |
## DataType in TDengine and Java
The datatypes in TDengine include timestamp, number, string and boolean, which are converted as follows in Java:
| TDengine | Java |
| --- | --- |
| TIMESTAMP | java.sql.Timestamp |
| INT | java.lang.Integer |
| BIGINT | java.lang.Long |
| FLOAT | java.lang.Float |
| DOUBLE | java.lang.Double |
| SMALLINT, TINYINT |java.lang.Short |
| BOOL | java.lang.Boolean |
| BINARY, NCHAR | java.lang.String |
## How to get TAOS-JDBC Driver
### maven repository
taos-jdbcdriver has been published to [Sonatype Repository][1]:
* [sonatype][8]
* [mvnrepository][9]
* [maven.aliyun][10]
Using the following pom.xml for maven projects
```xml
<repositories>
<repository>
<id>oss-sonatype</id>
<name>oss-sonatype</name>
<url>https://oss.sonatype.org/content/groups/public</url>
</repository>
</repositories>
<dependencies>
<dependency>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>1.0.1</version>
<version>1.0.3</version>
</dependency>
</dependencies>
```
Please note the JDBC driver itself relies on a native library written in C. On a Linux OS, the driver relies on a `libtaos.so` native library, where .so stands for "Shared Object". After the successful installation of TDengine on Linux, `libtaos.so` should be automatically copied to `/usr/local/lib/taos` and added to the system's default search path. On a Windows OS, the driver relies on a `taos.dll` native library, where .dll stands for "Dynamic Link Library". After the successful installation of the TDengine client on Windows, the `taos-jdbcdriver.jar` file can be found in `C:/TDengine/driver/JDBC`; the `taos.dll` file can be found in `C:/TDengine/driver/C` and should have been automatically copied to the system's searching path `C:/Windows/System32`.
### JAR file from the source code
Developers can refer to the Oracle's official JDBC API documentation for detailed usage on classes and methods. However, there are some differences of connection configurations and supported methods in the driver implementation between TDengine and traditional relational databases.
After downloading the [TDengine][3] source code, execute `mvn clean package` in the directory `src/connector/jdbc` and then the corresponding jar file is generated.
For database connections, TDengine's JDBC driver has the following configurable parameters in the JDBC URL. The standard format of a TDengine JDBC URL is:
## Usage
`jdbc:TSDB://{host_ip}:{port}/{database_name}?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]`
where `{}` marks the required parameters and `[]` marks the optional. The usage of each parameter is pretty straightforward:
* user - login user name for TDengine; by default, it's `root`
* password - login password; by default, it's `taosdata`
* charset - the client-side charset; by default, it's the operation system's charset
* cfgdir - the directory of TDengine client configuration file; by default it's `/etc/taos` on Linux and `C:\TDengine/cfg` on Windows
* locale - the language environment of TDengine client; by default, it's the operation system's locale
* timezone - the timezone of the TDengine client; by default, it's the operation system's timezone
All parameters can be configured at the time when creating a connection using the java.sql.DriverManager class, for example:
### get the connection
```java
import java.sql.Connection;
import java.sql.DriverManager;
import java.util.Properties;
import com.taosdata.jdbc.TSDBDriver;
Class.forName("com.taosdata.jdbc.TSDBDriver");
String jdbcUrl = "jdbc:TAOS://127.0.0.1:6030/log?user=root&password=taosdata";
Connection conn = DriverManager.getConnection(jdbcUrl);
```
> `6030` is the default port and `log` is the default database for system monitor.
A normal JDBC URL looks as follows:
`jdbc:TSDB://{host_ip}:{port}/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]`
values in `{}` are necessary while values in `[]` are optional。Each option in the above URL denotes:
* useruser name for login, defaultly root。
* passwordpassword for logindefaultly taosdata。
* charsetcharset for clientdefaultly system charset
* cfgdirlog directory for client, defaultly _/etc/taos/_ on Linux and _C:/TDengine/cfg_ on Windows。
* localelanguage for clientdefaultly system locale。
* timezonetimezone for clientdefaultly system timezone。
The options above can be configures (`ordered by priority`):
1. JDBC URL
As explained above.
2. java.sql.DriverManager.getConnection(String jdbcUrl, Properties connProps)
```java
public Connection getConn() throws Exception{
Class.forName("com.taosdata.jdbc.TSDBDriver");
String jdbcUrl = "jdbc:TAOS://127.0.0.1:0/db?user=root&password=taosdata";
String jdbcUrl = "jdbc:TAOS://127.0.0.1:0/log?user=root&password=taosdata";
Properties connProps = new Properties();
connProps.setProperty(TSDBDriver.PROPERTY_KEY_USER, "root");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, "taosdata");
@ -260,42 +316,316 @@ public Connection getConn() throws Exception{
}
```
Except `cfgdir`, all the parameters listed above can also be configured in the configuration file. The properties specified when calling DriverManager.getConnection() has the highest priority among all configuration methods. The JDBC URL has the second-highest priority, and the configuration file has the lowest priority. The explicitly configured parameters in a method with higher priorities always overwrite that same parameter configured in methods with lower priorities. For example, if `charset` is explicitly configured as "UTF-8" in the JDBC URL and "GKB" in the `taos.cfg` file, then "UTF-8" will be used.
3. Configuration file (taos.cfg)
Although the JDBC driver is implemented following the JDBC standard as much as possible, there are major differences between TDengine and traditional databases in terms of data models that lead to the differences in the driver implementation. Here is a list of head-ups for developers who have plenty of experience on traditional databases but little on TDengine:
Default configuration file is _/var/lib/taos/taos.cfg_ On Linux and _C:\TDengine\cfg\taos.cfg_ on Windows
```properties
# client default username
# defaultUser root
* TDengine does NOT support updating or deleting a specific record, which leads to some unsupported methods in the JDBC driver
* TDengine currently does not support `join` or `union` operations, and thus, is lack of support for associated methods in the JDBC driver
* TDengine supports batch insertions which are controlled at the level of SQL statement writing instead of API calls
* TDengine doesn't support nested queries and neither does the JDBC driver. Thus for each established connection to TDengine, there should be only one open result set associated with it
# client default password
# defaultPass taosdata
# default system charset
# charset UTF-8
# system locale
# locale en_US.UTF-8
```
> More options can refer to [client configuration][13]
### Create databases and tables
```java
Statement stmt = conn.createStatement();
// create database
stmt.executeUpdate("create database if not exists db");
// use database
stmt.executeUpdate("use db");
// create table
stmt.executeUpdate("create table if not exists tb (ts timestamp, temperature int, humidity float)");
```
> Note: if no step like `use db`, the name of database must be added as prefix like _db.tb_ when operating on tables
### Insert data
```java
// insert data
int affectedRows = stmt.executeUpdate("insert into tb values(now, 23, 10.3) (now + 1s, 20, 9.3)");
System.out.println("insert " + affectedRows + " rows.");
```
> _now_ is the server time.
> _now+1s_ is 1 second later than current server time. The time unit includes: _a_(millisecond), _s_(second), _m_(minute), _h_(hour), _d_(day), _w_(week), _n_(month), _y_(year).
### Query database
```java
// query data
ResultSet resultSet = stmt.executeQuery("select * from tb");
Timestamp ts = null;
int temperature = 0;
float humidity = 0;
while(resultSet.next()){
ts = resultSet.getTimestamp(1);
temperature = resultSet.getInt(2);
humidity = resultSet.getFloat("humidity");
System.out.printf("%s, %d, %s\n", ts, temperature, humidity);
}
```
> query is consistent with relational database. The subscript start with 1 when retrieving return results. It is recommended to use the column name to retrieve results.
### Close all
```java
resultSet.close();
stmt.close();
conn.close();
```
> `please make sure the connection is closed to avoid the error like connection leakage`
## Using connection pool
**HikariCP**
* dependence in pom.xml
```xml
<dependency>
<groupId>com.zaxxer</groupId>
<artifactId>HikariCP</artifactId>
<version>3.4.1</version>
</dependency>
```
* Examples
```java
public static void main(String[] args) throws SQLException {
HikariConfig config = new HikariConfig();
config.setJdbcUrl("jdbc:TAOS://127.0.0.1:6030/log");
config.setUsername("root");
config.setPassword("taosdata");
config.setMinimumIdle(3); //minimum number of idle connection
config.setMaximumPoolSize(10); //maximum number of connection in the pool
config.setConnectionTimeout(10000); //maximum wait milliseconds for get connection from pool
config.setIdleTimeout(60000); // max idle time for recycle idle connection
config.setConnectionTestQuery("describe log.dn"); //validation query
config.setValidationTimeout(3000); //validation query timeout
HikariDataSource ds = new HikariDataSource(config); //create datasource
Connection connection = ds.getConnection(); // get connection
Statement statement = connection.createStatement(); // get statement
//query or insert
// ...
connection.close(); // put back to conneciton pool
}
```
> The close() method will not close the connection from HikariDataSource.getConnection(). Instead, the connection is put back to the connection pool.
> More instructions can refer to [User Guide][5]
**Druid**
* dependency in pom.xml
```xml
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>druid</artifactId>
<version>1.1.20</version>
</dependency>
```
* Examples
```java
public static void main(String[] args) throws Exception {
Properties properties = new Properties();
properties.put("driverClassName","com.taosdata.jdbc.TSDBDriver");
properties.put("url","jdbc:TAOS://127.0.0.1:6030/log");
properties.put("username","root");
properties.put("password","taosdata");
properties.put("maxActive","10"); //maximum number of connection in the pool
properties.put("initialSize","3");//initial number of connection
properties.put("maxWait","10000");//maximum wait milliseconds for get connection from pool
properties.put("minIdle","3");//minimum number of connection in the pool
properties.put("timeBetweenEvictionRunsMillis","3000");// the interval milliseconds to test connection
properties.put("minEvictableIdleTimeMillis","60000");//the minimum milliseconds to keep idle
properties.put("maxEvictableIdleTimeMillis","90000");//the maximum milliseconds to keep idle
properties.put("validationQuery","describe log.dn"); //validation query
properties.put("testWhileIdle","true"); // test connection while idle
properties.put("testOnBorrow","false"); // don't need while testWhileIdle is true
properties.put("testOnReturn","false"); // don't need while testWhileIdle is true
//create druid datasource
DataSource ds = DruidDataSourceFactory.createDataSource(properties);
Connection connection = ds.getConnection(); // get connection
Statement statement = connection.createStatement(); // get statement
//query or insert
// ...
connection.close(); // put back to conneciton pool
}
```
> More instructions can refer to [User Guide][6]
**Notice**
* TDengine `v1.6.4.1` provides a function `select server_status()` to check heartbeat. It is highly recommended to use this function for `Validation Query`.
As follows`1` will be returned if `select server_status()` is successfully executed。
```shell
taos> select server_status();
server_status()|
================
1 |
Query OK, 1 row(s) in set (0.000141s)
```
## Integrated with framework
* Please refer to [SpringJdbcTemplate][11] if using taos-jdbcdriver in Spring JdbcTemplate
* Please refer to [springbootdemo][12] if using taos-jdbcdriver in Spring JdbcTemplate
## FAQ
* java.lang.UnsatisfiedLinkError: no taos in java.library.path
**Cause**The application program cannot find Library function _taos_
**Answer**Copy `C:\TDengine\driver\taos.dll` to `C:\Windows\System32\` on Windows and make a soft link through ` ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` on Linux.
* java.lang.UnsatisfiedLinkError: taos.dll Can't load AMD 64 bit on a IA 32-bit platform
**Cause**Currently TDengine only support 64bit JDK
**Answer**re-install 64bit JDK.
* For other questions, please refer to [Issues][7]
All the error codes and error messages can be found in `TSDBError.java` . For a more detailed coding example, please refer to the demo project `JDBCDemo` in TDengine's code examples.
## Python Connector
### Install TDengine Python client
### Pre-requirement
* TDengine installed, TDengine-client installed if on Windows [(Windows TDengine client installation)](https://www.taosdata.com/cn/documentation/connector/#Windows客户端及程序接口)
* python 2.7 or >= 3.4
* pip installed
Users can find python client packages in our source code directory _src/connector/python_. There are two directories corresponding two python versions. Please choose the correct package to install. Users can use _pip_ command to install:
### Installation
#### Linux
Users can find python client packages in our source code directory _src/connector/python_. There are two directories corresponding to two python versions. Please choose the correct package to install. Users can use _pip_ command to install:
```cmd
pip install src/connector/python/[linux|Windows]/python2/
pip install src/connector/python/linux/python3/
```
or
```
pip install src/connector/python/[linux|Windows]/python3/
pip install src/connector/python/linux/python2/
```
#### Windows
Assumed the Windows TDengine client has been installed , copy the file "C:\TDengine\driver\taos.dll" to the folder "C:\windows\system32", and then enter the _cmd_ Windows command interface
```
cd C:\TDengine\connector\python\windows
pip install python3\
```
or
```
cd C:\TDengine\connector\python\windows
pip install python2\
```
*If _pip_ command is not installed on the system, users can choose to install pip or just copy the _taos_ directory in the python client directory to the application directory to use.
If _pip_ command is not installed on the system, users can choose to install pip or just copy the _taos_ directory in the python client directory to the application directory to use.
### Python client interfaces
To use TDengine Python client, import TDengine module at first:
### Usage
#### Examples
* import TDengine module
```python
import taos
```
* get the connection
```python
conn = taos.connect(host="127.0.0.1", user="root", password="taosdata", config="/etc/taos")
c1 = conn.cursor()
```
*<em>host</em> is the IP of TDengine server, and <em>config</em> is the directory where exists the TDengine client configure file
* insert records into the database
```python
import datetime
# create a database
c1.execute('create database db')
c1.execute('use db')
# create a table
c1.execute('create table tb (ts timestamp, temperature int, humidity float)')
# insert a record
start_time = datetime.datetime(2019, 11, 1)
affected_rows = c1.execute('insert into tb values (\'%s\', 0, 0.0)' %start_time)
# insert multiple records in a batch
time_interval = datetime.timedelta(seconds=60)
sqlcmd = ['insert into tb values']
for irow in range(1,11):
start_time += time_interval
sqlcmd.append('(\'%s\', %d, %f)' %(start_time, irow, irow*1.2))
affected_rows = c1.execute(' '.join(sqlcmd))
```
* query the database
```python
c1.execute('select * from tb')
# fetch all returned results
data = c1.fetchall()
# data is a list of returned rows with each row being a tuple
numOfRows = c1.rowcount
numOfCols = len(c1.description)
for irow in range(numOfRows):
print("Row%d: ts=%s, temperature=%d, humidity=%f" %(irow, data[irow][0], data[irow][1],data[irow][2]))
# use the cursor as an iterator to retrieve all returned results
c1.execute('select * from tb')
for data in c1:
print("ts=%s, temperature=%d, humidity=%f" %(data[0], data[1],data[2])
```
* create a subscription
```python
# Create a subscription with topic 'test' and consumption interval 1000ms.
# The first argument is True means to restart the subscription;
# if the subscription with topic 'test' has already been created, then pass
# False to this argument means to continue the existing subscription.
sub = conn.subscribe(True, "test", "select * from meters;", 1000)
```
* consume a subscription
```python
data = sub.consume()
for d in data:
print(d)
```
* close the subscription
```python
sub.close()
```
* close the connection
```python
c1.close()
conn.close()
```
#### Help information
Users can get module information from Python help interface or refer to our [python code example](). We list the main classes and methods below:
@ -569,3 +899,18 @@ An example of using the NodeJS connector to create a table with weather data and
An example of using the NodeJS connector to achieve the same things but without all the object wrappers that wrap around the data returned to achieve higher functionality can be found [here](https://github.com/taosdata/TDengine/tree/master/tests/examples/nodejs/node-example-raw.js)
[1]: https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver
[2]: https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver
[3]: https://github.com/taosdata/TDengine
[4]: https://www.taosdata.com/blog/2019/12/03/jdbcdriver%e6%89%be%e4%b8%8d%e5%88%b0%e5%8a%a8%e6%80%81%e9%93%be%e6%8e%a5%e5%ba%93/
[5]: https://github.com/brettwooldridge/HikariCP
[6]: https://github.com/alibaba/druid
[7]: https://github.com/taosdata/TDengine/issues
[8]: https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver
[9]: https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver
[10]: https://maven.aliyun.com/mvn/search
[11]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/SpringJdbcTemplate
[12]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/springbootdemo
[13]: https://www.taosdata.com/cn/documentation/administrator/#%E5%AE%A2%E6%88%B7%E7%AB%AF%E9%85%8D%E7%BD%AE
[14]: https://www.taosdata.com/cn/documentation/connector/#Windows%E5%AE%A2%E6%88%B7%E7%AB%AF%E5%8F%8A%E7%A8%8B%E5%BA%8F%E6%8E%A5%E5%8F%A3
[15]: https://www.taosdata.com/cn/getting-started/#%E5%BF%AB%E9%80%9F%E4%B8%8A%E6%89%8B

View File

@ -1,6 +1,6 @@
# 超级表STable多表聚合
TDengine要求每个数据采集点单独建表,这样能极大提高数据的插入/查询性能,但是导致系统中表的数量猛增让应用对表的维护以及聚合、统计操作难度加大。为降低应用的开发难度TDengine引入了超级表STable (Super Table)的概念。
TDengine要求每个数据采集点单独建表。独立建表的模式能够避免写入过程中的同步加锁,因此能够极大地提升数据的插入/查询性能。但是独立建表意味着系统中表的数量与采集点的数量在同一个量级。如果采集点众多,将导致系统中表的数量也非常庞大让应用对表的维护以及聚合、统计操作难度加大。为降低应用的开发难度TDengine引入了超级表(Super Table, 简称为STable)的概念。
## 什么是超级表
@ -12,11 +12,11 @@ TDengine扩展标准SQL语法用于定义STable使用关键词tags指定标
CREATE TABLE <stable_name> (<field_name> TIMESTAMP, field_name1 field_type,…) TAGS(tag_name tag_type, …)
```
其中tag_name是标签名tag_type是标签的数据类型。标签可以使用时间戳之外的其他TDengine支持的数据类型标签的个数最多为6个,名字不能与系统关键词相同,也不能与其他列名相同。如:
其中tag_name是标签名tag_type是标签的数据类型。标签可以使用时间戳之外的其他TDengine支持的数据类型标签的个数最多为32个,名字不能与系统关键词相同,也不能与其他列名相同。如:
```mysql
create table thermometer (ts timestamp, degree float)
tags (location binary(20), type int)
CREATE TABLE thermometer (ts timestamp, degree float)
TAGS (location binary(20), type int)
```
上述SQL创建了一个名为thermometer的STable带有标签location和标签type。
@ -30,7 +30,7 @@ CREATE TABLE <tb_name> USING <stb_name> TAGS (tag_value1,...)
沿用上面温度计的例子使用超级表thermometer建立单个温度计数据表的语句如下
```mysql
create table t1 using thermometer tags ('beijing', 10)
CREATE TABLE t1 USING thermometer TAGS ('beijing', 10)
```
上述SQL以thermometer为模板创建了名为t1的表这张表的Schema就是thermometer的Schema但标签location值为'beijing'标签type值为10。
@ -72,7 +72,7 @@ STable从属于库一个STable只属于一个库但一个库可以有一
DROP TABLE <stable_name>
```
Note: 删除STable不会级联删除通过STable创建的表相反删除STable时要求通过该STable创建的表都已经被删除。
Note: 删除STable所有通过该STable创建的表都将被删除。
- 查看属于某STable并满足查询条件的表

View File

@ -142,7 +142,7 @@ It lists the STable's schema and tags
DROP TABLE <stable_name>
```
To delete a STable, all the tables created via this STable shall be deleted first, otherwise, it will fail.
To delete a STable, all the tables created via this STable will be deleted first.
### List the Associated Tables of a STable

View File

@ -1,6 +1,8 @@
# TAOS SQL
TDengine提供类似SQL语法用户可以在TDengine Shell中使用SQL语句操纵数据库也可以通过C/C++, Java(JDBC), Python, Go等各种程序来执行SQL语句。
本文档说明TAOS SQL支持的语法规则、主要查询功能、支持的SQL查询函数以及常用技巧等内容。阅读本文档需要读者具有基本的SQL语言的基础。
TAOS SQL是用户对TDengine进行数据写入和查询的主要工具。TAOS SQL为了便于用户快速上手在一定程度上提供类似于标准SQL类似的风格和模式。严格意义上TAOS SQL并不是也不试图提供SQL标准的语法。此外由于TDengine针对的时序性结构化数据不提供修改和更新功能因此在TAO SQL中不提供数据更新和数据删除的相关功能。
本章节SQL语法遵循如下约定
@ -9,15 +11,46 @@ TDengine提供类似SQL语法用户可以在TDengine Shell中使用SQL语句
- | 表示多选一,选择其中一个即可,但不能输入|本身
- … 表示前面的项可重复多个
为更好地说明SQL语法的规则及其特点本文假设存在一个数据集。该数据集是针对两种类型的设备温度湿度传感器、气压海拔传感器建立的数据模型。
针对温度传感器具有超级表super table temp_stable。其数据模型如下
```
taos> describe temp_stable;
Field | Type | Length | Note |
=======================================================================================================
ts |TIMESTAMP | 8 | |
temperature |FLOAT | 4 | |
humidity |TINYINT | 1 | |
status |TINYINT | 1 | |
deviceid |BIGINT | 12 |tag |
location |BINARY | 20 |tag |
```
数据集包含2个温度传感器的数据按照TDengine的建模规则对应2个子表其名称分别是 temp_tb_1temp_tb_2 。
针对压力海拔传感器具有超级表super table pressure_stable。其数据模型如下
数据集包含2个压力传感器数据对应2个子表分别是 press_tb_1press_tb_2。
```text
taos> describe pressure_stable;
Field | Type | Length | Note |
=======================================================================================================
ts |TIMESTAMP | 8 | |
height |FLOAT | 4 | |
pressure |FLOAT | 4 | |
devstat |TINYINT | 1 | |
id |BIGINT | 8 |tag |
city |NCHAR | 20 |tag |
longitude |FLOAT | 4 |tag |
latitude |FLOAT | 4 |tag |
```
## 支持的数据类型
使用TDengine最重要的是时间戳。创建并插入记录、查询历史记录的时候均需要指定时间戳。时间戳有如下规则
- 时间格式为YYYY-MM-DD HH:mm:ss.MS, 默认时间分辨率为毫秒。比如2017-08-12 18:25:58.128
- 时间格式为```YYYY-MM-DD HH:mm:ss.MS```, 默认时间分辨率为毫秒。比如:```2017-08-12 18:25:58.128```
- 内部函数now是服务器的当前时间
- 插入记录时如果时间戳为0插入数据时使用服务器当前时间
- Epoch Time: 时间戳也可以是一个长整数表示从1970-01-01 08:00:00.000开始的毫秒数
- 时间可以加减,比如 now-2h表明查询时刻向前推2个小时(最近2小时)。数字后面的时间单位a(毫秒), s(秒), m(分), h(小时), d(天)w(周), n(月), y(年)。比如select * from t1 where ts > now-2w and ts <= now-1w, 表示查询两周前整整一周的数据
- TDengine暂不支持时间窗口按照自然年和自然月切分。Where条件中的时间窗口单位的换算关系如下interval(1y) 等效于 interval(365d), interval(1n) 等效于 interval(30d), interval(1w) 等效于 interval(7d)
TDengine缺省的时间戳是毫秒精度但通过修改配置参数enableMicrosecond就可支持微秒。
@ -26,13 +59,13 @@ TDengine缺省的时间戳是毫秒精度但通过修改配置参数enableMic
| | 类型 | Bytes | 说明 |
| ---- | :-------: | ------ | ------------------------------------------------------------ |
| 1 | TIMESTAMP | 8 | 时间戳。最小精度毫秒。从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始,计时不能早于该时间。 |
| 2 | INT | 4 | 整型,范围 [-2^31+1, 2^31-1], -2^31用作Null |
| 3 | BIGINT | 8 | 长整型,范围 [-2^59, 2^59] |
| 2 | INT | 4 | 整型,范围 [-2^31+1, 2^31-1], -2^31用作Null |
| 3 | BIGINT | 8 | 长整型,范围 [-2^63+1, 2^63-1], -2^63用于NULL |
| 4 | FLOAT | 4 | 浮点型有效位数6-7范围 [-3.4E38, 3.4E38] |
| 5 | DOUBLE | 8 | 双精度浮点型有效位数15-16范围 [-1.7E308, 1.7E308] |
| 6 | BINARY | 自定义 | 用于记录字符串最长不能超过504 bytes。binary仅支持字符串输入字符串两端使用单引号引用否则英文全部自动转化为小写。使用时须指定大小如binary(20)定义了最长为20个字符的字符串每个字符占1byte的存储空间。如果用户字符串超出20字节将被自动截断。对于字符串内的单引号可以用转义字符反斜线加单引号来表示**\**。 |
| 7 | SMALLINT | 2 | 短整型, 范围 [-32767, 32767] |
| 8 | TINYINT | 1 | 单字节整型,范围 [-127, 127] |
| 7 | SMALLINT | 2 | 短整型, 范围 [-32767, 32767], -32768用于NULL |
| 8 | TINYINT | 1 | 单字节整型,范围 [-127, 127], -128用于NULL |
| 9 | BOOL | 1 | 布尔型,{true, false} |
| 10 | NCHAR | 自定义 | 用于记录非ASCII字符串如中文字符。每个nchar字符占用4bytes的存储空间。字符串两端使用单引号引用字符串内的单引号需用转义字符 **\**。nchar使用时须指定字符串大小类型为nchar(10)的列表示此列的字符串最多存储10个nchar字符会固定占用40bytes的空间。如用户字符串长度超出声明长度则将被自动截断。 |
@ -158,25 +191,179 @@ TDengine缺省的时间戳是毫秒精度但通过修改配置参数enableMic
```
同时向表tb1_name和tb2_name中按列分别插入多条记录
注意对同一张表插入的新记录的时间戳必须递增否则会跳过插入该条记录。如果时间戳为0系统将自动使用服务器当前时间作为该记录的时间戳。
注意1、对同一张表插入的新记录的时间戳必须递增否则会跳过插入该条记录。如果时间戳为0系统将自动使用服务器当前时间作为该记录的时间戳。
2、允许插入的最老记录的时间戳是相对于当前服务器时间减去配置的keep值数据保留的天数允许插入的最新记录的时间戳是相对于当前服务器时间加上配置的days值数据文件存储数据的时间跨度单位为天。keep和days都是可以在创建数据库时指定的缺省值分别是3650天和10天。
**IMPORT**如果需要将时间戳小于最后一条记录时间的记录写入到数据库中可使用IMPORT替代INSERT命令IMPORT的语法与INSERT完全一样。如果同时IMPORT多条记录需要保证一批记录是按时间戳排序好的。
**IMPORT**如果需要将时间戳小于最后一条记录时间的记录写入到数据库中可使用IMPORT替代INSERT命令IMPORT的语法与INSERT完全一样。
## 数据查询
###查询语法
### 查询语法:
```mysql
SELECT {* | expr_list} FROM tb_name
SELECT [DISTINCT] select_expr [, select_expr ...]
FROM {tb_name_list}
[WHERE where_condition]
[ORDER BY _c0 { DESC | ASC }]
[LIMIT limit [, OFFSET offset]]
[INTERVAL [interval_offset,] interval_val]
[FILL fill_val]
[SLIDING fill_val]
[GROUP BY col_list]
[ORDER BY col_list { DESC | ASC }]
[HAVING expr_list]
[SLIMIT limit_val [, SOFFSET offset_val]]
[LIMIT limit_val [, OFFSET offset_val]]
[>> export_file]
```
#### SELECT子句
一个选择子句可以是联合查询UNION和另一个查询的子查询SUBQUERY
SELECT function_list FROM tb_name
[WHERE where_condition]
[LIMIT limit [, OFFSET offset]]
[>> export_file]
##### 通配符
通配符 * 可以用于代指全部列。对于普通表,结果中只有普通列。
```
taos> select * from temp_tb_1;
ts | temperature |humidity|status|
============================================================
19-04-28 14:22:07.000| 20.00000 | 34 | 1 |
19-04-28 14:22:08.000| 21.50000 | 38 | 1 |
19-04-28 14:22:09.000| 21.30000 | 38 | 1 |
19-04-28 14:22:10.000| 21.20000 | 38 | 1 |
19-04-28 14:22:11.000| 21.30000 | 35 | 0 |
19-04-28 14:22:12.000| 22.00000 | 34 | 0 |
```
在针对超级表,通配符包含 _标签列_
```
taos> select * from temp_stable;
ts | temperature |humidity|status| deviceid | location |
==============================================================================================
19-04-28 14:22:07.000| 21.00000 | 37 | 1 |54197 |beijing |
19-04-28 14:22:07.000| 20.00000 | 34 | 1 |91234 |beijing |
19-04-28 14:22:08.000| 21.50000 | 38 | 1 |91234 |beijing |
19-04-28 14:22:09.000| 21.30000 | 38 | 1 |91234 |beijing |
19-04-28 14:22:10.000| 21.20000 | 38 | 1 |91234 |beijing |
19-04-28 14:22:11.000| 21.30000 | 35 | 0 |91234 |beijing |
19-04-28 14:22:12.000| 22.00000 | 34 | 0 |91234 |beijing |
```
通配符支持表名前缀以下两个SQL语句均为返回全部的列
```
select * from temp_tb_1;
select temp_tb_1.* from temp_tb_1;
```
在Join查询中带前缀的\*和不带前缀\*返回的结果有差别, \*返回全部表的所有列数据(不包含标签),带前缀的通配符,则只返回该表的列数据。
```
taos> select * from temp_tb_1,temp_tb_2 where temp_tb_1.ts=temp_tb_2.ts;
ts | temperature |humidity|status| ts | temperature |humidity|status|
========================================================================================================================
19-04-28 14:22:07.000| 20.00000 | 34 | 1 | 19-04-28 14:22:07.000| 21.00000 | 37 | 1 |
```
```
taos> select temp_tb_1.* from temp_tb_1,temp_tb_2 where temp_tb_1.ts=temp_tb_2.ts;
ts | temperature |humidity|status|
============================================================
19-04-28 14:22:07.000| 20.00000 | 34 | 1 |
```
在使用SQL函数来进行查询过程中部分SQL函数支持通配符操作。其中的区别在于
```count(\*)```函数只返回一列。```first```、```last```、```last_row```函数则是返回全部列。
```
taos> select count(*) from temp_tb_1;
count(*) |
======================
1 |
```
```
taos> select first(*) from temp_tb_1;
first(ts) | first(temperature) |first(humidity)|first(status)|
==========================================================================
19-04-28 14:22:07.000| 20.00000 | 34 | 1 |
```
#### 结果集列名
```SELECT```子句中,如果不指定返回结果集合的列名,结果集列名称默认使用```SELECT```子句中的表达式名称作为列名称。此外,用户可使用```AS```来重命名返回结果集合中列的名称。例如:
```
taos> select ts, ts as primary_key_ts from temp_tb_1;
ts | primary_key_ts |
==============================================
19-04-28 14:22:07.000| 19-04-28 14:22:07.000|
```
但是针对```first(*)```、```last(*)```、```last_row(*)```不支持针对单列的重命名。
#### DISTINCT修饰符*
只能用于修饰标签列TAGS的结果不能用于修饰普通列来获得去重后的结果。并且应用```DISTINCT```以后,只能进行单列的标签输出。
```count(distinct column_name)```用以返回近似的不重复结果的数量,该结果是近似值。
#### 隐式结果列
```Select_exprs```可以是表所属列的列名也可以是基于列的函数表达式或计算式数量的上限256个。当用户使用了```interval```或```group by tags```的子句以后在最后返回结果中会强制返回时间戳列第一列和group by子句中的标签列。后续的版本中可以支持关闭group by子句中隐式列的输出列输出完全由select子句控制。
#### 表(超级表)列表
FROM关键字后面可以是若干个表超级表列表也可以是子查询的结果。
如果没有指定用户的当前数据库,可以在表名称之前使用数据库的名称来指定表所属的数据库。例如:```sample.temp_tb_1``` 方式来跨库使用表。
```
SELECT * FROM sample.temp_tb_1;
------------------------------
use sample;
SELECT * FROM temp_tb_1;
```
From子句中列表可以使用别名来让SQL整体更加简单。
```
SELECT t.ts FROM temp_tb_1 t ;
```
> 暂不支持FROM子句的表别名
#### 特殊功能
部分特殊的查询功能可以不使用FROM子句执行。获取当前所在的数据库 database()
```
taos> SELECT database();
database() |
=================================
sample |
```
如果登录的时候没有指定默认数据库,且没有使用```use``命令切换数据则返回NULL。
```
taos> select database();
database() |
=================================
NULL |
```
获取服务器和客户端版本号:
```
SELECT client_version()
SELECT server_version()
```
服务器状态检测语句。如果服务器正常,返回一个数字(例如 1。如果服务器异常返回error code。该SQL语法能兼容连接池对于TDengine状态的检查及第三方工具对于数据库服务器状态的检查。并可以避免出现使用了错误的心跳检测SQL语句导致的连接池连接丢失的问题。
```
SELECT server_status()
SELECT server_status() AS result
```
#### TAOS SQL中特殊关键词
> TBNAME 在超级表查询中可视为一个特殊的标签,代表查询涉及的子表名<br>
\_c0: 表示表(超级表)的第一列
#### 小技巧
获取一个超级表所有的子表名及相关的标签信息:
```
SELECT TBNAME, location FROM temp_stable
```
统计超级表下辖子表数量:
```
SELECT COUNT(TBNAME) FROM temp_stable
```
以上两个查询均只支持在Where条件子句中添加针对标签TAGS的过滤条件。例如
```
taos> select count(tbname) from temp_stable;
count(tbname) |
======================
2 |
taos> select count(tbname) from temp_stable where deviceid > 60000;
count(tbname) |
======================
1 |
```
- 可以使用* 返回所有列,或指定列名。可以对数字列进行四则运算,可以给输出的列取列名
@ -237,7 +424,7 @@ SELECT function_list FROM tb_name
###聚合函数
TDengine支持针对数据的聚合查询。提供支持的聚合和提取函数如下表
TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数如下
- **COUNT**
```mysql
@ -260,13 +447,14 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和提取函数
适用于:表、超级表。
- **WAVG**
- **TWA**
```mysql
SELECT WAVG(field_name) FROM tb_name WHERE clause
SELECT TWA(field_name) FROM tb_name WHERE clause
```
功能说明:统计表/超级表中某列在一段时间内的时间加权平均。
功能说明:时间加权平均函数。统计表/超级表中某列在一段时间内的时间加权平均。
返回结果数据类型双精度浮点数Double。
应用字段不能应用在timestamp、binary、nchar、bool类型字段。
说明:时间加权平均(time weighted average, TWA查询需要指定查询时间段的 _开始时间__结束时间_
适用于:表、超级表。
@ -370,6 +558,14 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和提取函数
应用字段不能应用在timestamp、binary、nchar、bool类型字段。
说明:*k*值取值范围0≤*k*≤100为0的时候等同于MIN为100的时候等同于MAX。
- **APERCENTILE**
```mysql
SELECT APERCENTILE(field_name, P) FROM { tb_name | stb_name } [WHERE clause]
```
功能说明统计表中某列的值百分比分位数与PERCENTILE函数相似但是返回近似结果。
返回结果数据类型: 双精度浮点数Double。
应用字段不能应用在timestamp、binary、nchar、bool类型字段。
说明:*k*值取值范围0≤*k*≤100为0的时候等同于MIN为100的时候等同于MAX。推荐使用```APERCENTILE```函数,该函数性能远胜于```PERCENTILE```函数
- **LAST_ROW**
```mysql

View File

@ -181,9 +181,10 @@ All the keywords in a SQL statement are case-insensitive, but strings values are
tb2_name (tb2_field1_name, ...) VALUES(field1_value1, ...) (field1_value2, ...)
```
Note: For a table, the new record must have a timestamp bigger than the last data record, otherwise, it will be discarded and not inserted. If the timestamp is 0, the time stamp will be set to the system time on the server.
Note: 1. For a table, the new record must have a timestamp bigger than the last data record, otherwise, it will be discarded and not inserted. If the timestamp is 0, the time stamp will be set to the system time on the server.
2.The timestamp of the oldest record allowed to be inserted is relative to the current server time, minus the configured keep value (the number of days the data is retained), and the timestamp of the latest record allowed to be inserted is relative to the current server time, plus the configured days value (the time span in which the data file stores data, in days). Both keep and days can be specified when creating the database. The default values are 3650 days and 10 days, respectively.
**IMPORT**: If you do want to insert a historical data record into a table, use IMPORT command instead of INSERT. IMPORT has the same syntax as INSERT. If you want to import a batch of historical records, the records must be ordered by the timestamp, otherwise, TDengine won't handle it in the right way.
**IMPORT**: If you do want to insert a historical data record into a table, use IMPORT command instead of INSERT. IMPORT has the same syntax as INSERT.
## Data Query

View File

@ -2,15 +2,15 @@
## 文件目录结构
安装TDengine后,默认会在操作系统中生成下列目录或文件:
安装TDengine的过程中,安装程序将在操作系统中创建以下目录或文件:
| 目录/文件 | 说明 |
| ---------------------- | :------------------------------------------------|
| /etc/taos/taos.cfg | TDengine默认[配置文件] |
| /usr/local/taos/driver | TDengine动态链接库目录 |
| /var/lib/taos | TDengine默认数据文件目录,可通过[配置文件]修改位置. |
| /var/log/taos | TDengine默认日志文件目录,可通过[配置文件]修改位置 |
| /usr/local/taos/bin | TDengine可执行文件目录 |
| /etc/taos/taos.cfg | 默认[配置文件] |
| /usr/local/taos/driver | 动态链接库目录 |
| /var/lib/taos | 默认数据文件目录,可通过[配置文件]修改位置. |
| /var/log/taos | 默认日志文件目录,可通过[配置文件]修改位置 |
| /usr/local/taos/bin | 可执行文件目录 |
### 可执行文件
@ -19,33 +19,126 @@ TDengine的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下
- _taosd_TDengine服务端可执行文件
- _taos_ TDengine Shell可执行文件
- _taosdump_:数据导出工具
- *rmtaos* 一个卸载TDengine的脚本, 请谨慎执行
- *rmtaos* 卸载TDengine的脚本, 该脚本会删除全部的程序和数据文件。务必谨慎执行,如非必须不建议使用。
您可以通过修改系统配置文件taos.cfg来配置不同的数据目录和日志目录
## 服务端配置
TDengine系统后台服务由taosd提供可以在配置文件taos.cfg里修改配置参数以满足不同场景的需求。配置文件的缺省位置在/etc/taos目录可以通过taosd命令行执行参数-c指定配置文件目录。比如taosd -c /home/user来指定配置文件位于/home/user这个目录。
TDengine系统后台服务程序是`taosd`,其启动时候读取的配置文件缺省目录是`/etc/taos`。可以通过命令行执行参数-c指定配置文件目录比如
```
taosd -c /home/user
```
指定`taosd`启动的时候读取`/home/user`目录下的配置文件taos.cfg。
下面仅仅列出一些重要的配置参数,更多的参数请看配置文件里的说明。各个参数的详细介绍及作用请看前述章节。**注意:配置修改后,需要重启*taosd*服务才能生效。**
- internalIp: 对外提供服务的IP地址默认取第一个IP地址
- mgmtShellPort管理节点与客户端通信使用的TCP/UDP端口号默认值是6030。此端口号在内向后连续的5个端口都会被UDP通信占用即UDP占用[6030-6034]同时TCP通信也会使用端口[6030]。
- vnodeShellPort数据节点与客户端通信使用的TCP/UDP端口号默认值是6035。此端口号在内向后连续的5个端口都会被UDP通信占用即UDP占用[6035-6039]同时TCP通信也会使用端口[6035]
- httpPort数据节点对外提供RESTful服务使用TCP端口号[6020]
- dataDir: 数据文件目录,缺省是/var/lib/taos
- maxUsers用户的最大数量
- maxDbs数据库的最大数量
- maxTables数据表的最大数量
- enableMonitor: 系统监测标志位0关闭1打开
- logDir: 日志文件目录,缺省是/var/log/taos
- numOfLogLines日志文件的最大行数
- debugFlag: 系统debug日志开关131仅错误和报警信息135所有
**privateIp**
- 默认值物理节点IP地址列表中的第一个IP地址
对外提供服务的IP地址。
**publicIp**
- 默认值与privateIp相同
对于阿里等云平台此为公网IP地址publicIp在内部映射为对应的privateIP地址仅对企业版有效。
**masterIp**
- 默认值与privateIp相同
集群内第一个物理节点的privateIp地址仅对企业版有效。
**secondIp**
- 默认值与privateIp相同
集群内第二个物理节点的privateIp地址仅对企业版有效。
**mgmtShellPort**
- 默认值: _6030_
数据库服务中管理节点与客户端通信使用的TCP/UDP端口号。
> 端口范围 _6030_ - _6034_ 均用于UDP通讯。此外还使用端口 _6030_ 用于TCP通讯。
**vnodeShellPort**
- 默认值: _6035_
数据节点与客户端通信使用的TCP/UDP端口号。
> 端口范围 _6035_ - _6039_ 的5个端口用于UDP通信。此外还使用端口 _6035_ 用于TCP通讯。
**mgmtVnodePort**
- 默认值: _6040_
管理节点与数据节点通信使用的TCP/UDP端口号仅对企业版有效。
> 端口范围 _6040_ - _6044_ 的5个端口用于UDP通信。此外还使用端口 _6040_ 用于TCP通讯。
**vnodeVnodePort**
- 默认值: _6045_
数据节点与数据节点通信使用的TCP/UDP端口号仅对企业版有效。
> 端口范围 _6045_ - _6049_ 的5个端口用于UDP通信。此外还使用端口 _6045_ 用于TCP通讯。
**mgmtMgmtPort**
- 默认值: _6050_
管理节点与管理节点通信使用的UDP端口号仅对企业版有效。
**mgmtSyncPort**
- 默认值: _6050_
管理节点与管理节点同步使用的TCP端口号仅对企业版有效。
**httpPort**
- 默认值: _6020_
RESTful服务使用的端口号所有的HTTP请求TCP都需要向该接口发起查询/写入请求。
**dataDir**
- 默认值:/var/lib/taos
数据文件目录,所有的数据文件都将写入该目录。
**logDir**
- 默认值:/var/log/taos
日志文件目录,客户端和服务器的运行日志将写入该目录。
**maxUsers**
- 默认值10,000
系统允许创建用户数量的上限
**maxDbs**
- 默认值1,000
系统允许的创建数据库的上限
**maxTables**
- 默认值650,000
系统允许创建数据表的上限。
>系统能够创建的表受到多种因素的限制,单纯地增大该参数并不能直接增加系统能够创建的表数量。例如,由于每个表创建均需要消耗一定量的缓存空间,系统可用内存一定的情况下,创建表的总数的上限是一个固定的值。
**monitor**
- 默认值1激活状态
服务器内部的系统监控开关。监控主要负责收集物理节点的负载状况包括CPU、内存、硬盘、网络带宽、HTTP请求量的监控记录记录信息存储在`LOG`库中。0表示关闭监控服务1表示激活监控服务。
**numOfLogLines**
- 默认值10,000,000
单个日志文件允许的最大行数10,000,000行
**debugFlag**
- 默认值131仅输出错误和警告信息
系统(服务端和客户端)运行日志开关:
- 131 仅输出错误和警告信息
- 135 输入错误ERROR、警告WARN、信息Info
不同应用场景的数据往往具有不同的数据特征比如保留天数、副本数、采集频次、记录大小、采集点的数量、压缩等都可完全不同。为获得在存储上的最高效率TDengine提供如下存储相关的系统配置参数
- days一个数据文件覆盖的时间长度单位为天
- keep数据库中数据保留的天数
- days数据文件存储数据的时间跨度,单位为天
- keep数据保留的天数
- rows: 文件块中记录条数
- comp: 文件压缩标志位0关闭1:一阶段压缩2:两阶段压缩
- ctime数据从写入内存到写入硬盘的最长时间间隔单位为秒
@ -66,19 +159,139 @@ TDengine系统后台服务由taosd提供可以在配置文件taos.cfg里修
## 客户端配置
TDengine系统的前台交互客户端应用程序为taos它与taosd共享同一个配置文件taos.cfg。运行taos时使用参数-c指定配置文件目录如taos -c /home/cfg表示使用/home/cfg/目录下的taos.cfg配置文件中的参数缺省目录是/etc/taos。更多taos的使用方法请见[Shell命令行程序](#_TDengine_Shell命令行程序)。本节主要讲解taos客户端应用在配置文件taos.cfg文件中使用到的参数。
TDengine系统的前台交互客户端应用程序为taosWindows平台上为taos.exe。与服务端程序一样也可以通过设置taos.cfg来配置`taos`启动和运行的配置项。启动的时候如果不指定taos加载配置文件路径默认读取`/etc/taos/`路径下的`taos.cfg`文件。指定配置文件来启动`taos`的命令如下:
客户端配置参数列表及解释
```
taos -c /home/cfg/
```
**注意:启动设置的是配置文件所在目录,而不是配置文件本身**
- masterIP客户端默认发起请求的服务器的IP地址
- charset指明客户端所使用的字符集默认值为UTF-8。TDengine存储nchar类型数据时使用的是unicode存储因此客户端需要告知服务自己所使用的字符集也即客户端所在系统的字符集。
- locale设置系统语言环境。Linux上客户端与服务端共享
- defaultUser默认登录用户默认值root
- defaultPass默认登录密码默认值taosdata
如果`/home/cfg/`目录下没有配置文件,程序会继续启动并打印如下告警信息:
```plaintext
Welcome to the TDengine shell from linux, client version:1.6.4.0
option file:/home/cfg/taos.cfg not found, all options are set to system default
```
更多taos的使用方法请见[Shell命令行程序](#_TDengine_Shell命令行程序)。本节主要讲解taos客户端应用在配置文件taos.cfg文件中使用到的参数。
TCP/UDP端口以及日志的配置参数与server的配置参数完全一样。
客户端配置参数说明
启动taos时你也可以从命令行指定IP地址、端口号用户名和密码否则就从taos.cfg读取。
**masterIP**
- 默认值127.0.0.1
客户端连接的TDengine服务器IP地址如果不设置默认连接127.0.0.1的节点。以下两个命令等效:
```
taos
taos -h 127.0.0.1
```
其中的IP地址是从配置文件中读取的masterIP的值。
**locale**
- 默认值系统中动态获取如果自动获取失败需要用户在配置文件设置或通过API设置
TDengine为存储中文、日文、韩文等非ASCII编码的宽字符提供一种专门的字段类型`nchar`。写入`nchar`字段的数据将统一采用`UCS4-LE`格式进行编码并发送到服务器。需要注意的是,**编码正确性**是客户端来保证。因此,如果用户想要正常使用`nchar`字段来存储诸如中文、日文、韩文等非ASCII字符需要正确设置客户端的编码格式。
客户端的输入的字符均采用操作系统当前默认的编码格式在Linux系统上多为`UTF-8`,部分中文系统编码则可能是`GB18030`或`GBK`等。在docker环境中默认的编码是`POSIX`。在中文版Windows系统中编码则是`CP936`。客户端需要确保正确设置自己所使用的字符集,即客户端运行的操作系统当前编码字符集,才能保证`nchar`中的数据正确转换为`UCS4-LE`编码格式。
在 Linux 中 locale 的命名规则为:
`<语言>_<地区>.<字符集编码>`
如:`zh_CN.UTF-8`zh代表中文CN代表大陆地区UTF-8表示字符集。字符集编码为客户端正确解析本地字符串提供编码转换的说明。Linux系统与Mac OSX系统可以通过设置locale来确定系统的字符编码由于Windows使用的locale中不是POSIX标准的locale格式因此在Windows下需要采用另一个配置参数`charset`来指定字符编码。在Linux系统中也可以使用charset来指定字符编码。
**charset**
- 默认值系统中动态获取如果自动获取失败需要用户在配置文件设置或通过API设置
如果配置文件中不设置`charset`在Linux系统中taos在启动时候自动读取系统当前的locale信息并从locale信息中解析提取charset编码格式。如果自动读取locale信息失败则尝试读取charset配置如果读取charset配置也失败**则中断启动过程**。
在Linux系统中locale信息包含了字符编码信息因此正确设置了Linux系统locale以后可以不用再单独设置charset。例如
```
locale zh_CN.UTF-8
```
在Windows系统中无法从locale获取系统当前编码。如果无法从配置文件中读取字符串编码信息`taos`默认设置为字符编码为`CP936`。其等效在配置文件中添加如下配置:
```
charset CP936
```
如果需要调整字符编码,请查阅当前操作系统使用的编码,并在配置文件中正确设置。
在Linux系统中如果用户同时设置了locale和字符集编码charset并且locale和charset的不一致后设置的值将覆盖前面设置的值。
```
locale zh_CN.UTF-8
charset GBK
```
则`charset`的有效值是`GBK`。
```
charset GBK
locale zh_CN.UTF-8
```
`charset`的有效值是`UTF-8`。
**sockettype**
- 默认值UDP
客户端连接服务端的套接字的方式,可以使用`UDP`和`TCP`两种配置。
在客户端和服务端之间的通讯需要经过恶劣的网络环境下如公共网络、互联网、客户端与数据库服务端连接不稳定由于MTU的问题导致UDP丢包的情况下可以将连接的套接字类型调整为`TCP`
>注意:客户端套接字的类型需要和服务端的套接字类型相同,否则无法连接数据库。
**compressMsgSize**
- 默认值:-1不压缩
客户端与服务器之间进行消息通讯过程中,对通讯的消息进行压缩的阈值,默认值为-1不压缩。如果要压缩消息建议设置为64330字节即大于64330字节的消息体才进行压缩。在配置文件中增加如下配置项即可
```
compressMsgSize 64330
```
如果配置项设置为0`compressMsgSize 0`表示对所有的消息均进行压缩。
**timezone**
- 默认值:从系统中动态获取当前的时区设置
客户端运行系统所在的时区。为应对多时区的数据写入和查询问题TDengine采用Unix时间戳([Unix Timestamp](https://en.wikipedia.org/wiki/Unix_time))来记录和存储时间戳。Unix时间戳的特点决定了任一时刻不论在任何时区产生的时间戳均一致。需要注意的是Unix时间戳是在客户端完成转换和记录。为了确保客户端其他形式的时间转换为正确的Unix时间戳需要设置正确的时区。
在Linux系统中客户端会自动读取系统设置的时区信息。用户也可以采用多种方式在配置文件设置时区。例如
```
timezone UTC-8
timezone GMT-8
timezone Asia/Shanghai
```
均是合法的设置东八区时区的格式。
时区的设置对于查询和写入SQL语句中非Unix时间戳的内容时间戳字符串、关键词`now`的解析)产生影响。例如:
```
SELECT count(*) FROM table_name WHERE TS<'2019-04-11 12:01:08';
```
在东八区SQL语句等效于
```
SELECT count(*) FROM table_name WHERE TS<1554955268000;
```
在UTC时区SQL语句等效于
```
SELECT count(*) FROM table_name WHERE TS<1554984068000;
```
为了避免使用字符串时间格式带来的不确定性也可以直接使用Unix时间戳。此外还可以在SQL语句中使用带有时区的时间戳字符串例如RFC3339格式的时间戳字符串`2013-04-12T15:52:01.123+08:00`或者ISO-8601格式时间戳字符串`2013-04-12T15:52:01.123+0800`。上述两个字符串转化为Unix时间戳不受系统所在时区的影响。
**defaultUser**
- 默认值root
登录用户名,客户端登录的时候,如果不指定用户名,则自动使用该用户名登录。默认情况下,以下的两个命令等效
```
taos
taos -u root
```
用户名为从配置中读取的`defaultUser`配置项。如果更改`defaultUser abc`,则以下两个命令等效:
```
taos
taos -u abc
```
**defaultPass**
- 默认值taosdata
登录用户名,客户端登录的时候,如果不指定密码,则自动使用该密码登录。默认情况下,以下的两个命令等效
```
taos
taos -ptaosdata
```
TCP/UDP端口以及日志的配置参数与server的配置参数完全一样。使用命令`taos -?` 可查看`taos`允许的可选项。
## 用户管理
@ -124,6 +337,8 @@ TDengine也支持在shell对已存在的表从CSV文件中进行数据导入。
insert into tb1 file a.csv b.csv tb2 c.csv …
import into tb1 file a.csv b.csv tb2 c.csv …
```
> 注意导入的CSV文件不能够带表头, 且表的列与CSV文件的列需要严格对应。
> 同样还可以使用[样例数据导入工具][1]对数据进行横向和纵向扩展导入。
## 数据导出
@ -191,6 +406,9 @@ KILL STREAM <stream-id>
## 系统监控
TDengine启动后会自动创建一个监测数据库SYS并自动将服务器的CPU、内存、硬盘空间、带宽、请求数、磁盘读写速度、慢查询等信息定时写入该数据库。TDengine还将重要的系统操作比如登录、创建、删除数据库等日志以及各种错误报警信息记录下来存放在SYS库里。系统管理员可以从CLI直接查看这个数据库也可以在WEB通过图形化界面查看这些监测信息
TDengine启动后会自动创建一个监测数据库`LOG`并自动将服务器的CPU、内存、硬盘空间、带宽、请求数、磁盘读写速度、慢查询等信息定时写入该数据库。TDengine还将重要的系统操作比如登录、创建、删除数据库等日志以及各种错误报警信息记录下来存放在`LOG`库里。系统管理员可以通过客户端程序查看记录库中的运行负载信息,(在企业版中)还可以通过浏览器查看数据的图标可视化结果
这些监测信息的采集缺省是打开的但可以修改配置文件里的选项enableMonitor将其关闭或打开。
这些监测信息的采集缺省是打开的,但可以修改配置文件里的选项`monitor`将其关闭或打开。
[1]: https://github.com/taosdata/TDengine/tree/develop/importSampleData

View File

@ -63,28 +63,11 @@ CREATE TABLE QUERY_RES
## 数据订阅(Publisher/Subscriber)
基于数据天然的时间序列特性TDengine的数据写入insert与消息系统的数据发布pub逻辑上一致均可视为系统中插入一条带时间戳的新记录。同时TDengine在内部严格按照数据时间序列单调递增的方式保存数据。本质上来说TDengine中里每一张表均可视为一个标准的消息队列。
TDengine内嵌支持轻量级的消息订阅与推送服务。使用系统提供的API用户可订阅数据库中的某一张表(或超级表)。订阅的逻辑和操作状态的维护均是由客户端完成,客户端定时轮询服务器是否有新的记录到达,有新的记录到达就会将结果反馈到客户。
TDengine内嵌支持轻量级的消息订阅与推送服务。使用系统提供的API用户可使用普通查询语句订阅数据库中的一张或多张表。订阅的逻辑和操作状态的维护均是由客户端完成,客户端定时轮询服务器是否有新的记录到达,有新的记录到达就会将结果反馈到客户。
TDengine的订阅与推送服务的状态是客户端维持TDengine服务器并不维持。因此如果应用重启从哪个时间点开始获取最新数据由应用决定。
#### API说明
使用订阅的功能主要API如下
<ul>
<li><p><code>TAOS_SUB *taos_subscribe(char *host, char *user, char *pass, char *db, char *table, int64_t time, int mseconds)</code></p><p>该函数负责启动订阅服务。其中参数说明:</p></li><ul>
<li><p>host主机IP地址</p></li>
<li><p>user数据库登录用户名</p></li>
<li><p>pass密码</p></li>
<li><p>db数据库名称</p></li>
<li><p>table(超级) 表的名称</p></li>
<li><p>time启动时间Unix Epoch时间单位为毫秒。从1970年1月1日起计算的毫秒数。如果设为0表示从当前时间开始订阅</p></li>
<li><p>mseconds查询数据库更新的时间间隔单位为毫秒。一般设置为1000毫秒。返回值为指向TDengine_SUB 结构的指针,如果返回为空,表示失败。</p></li>
</ul><li><p><code>TAOS_ROW taos_consume(TAOS_SUB *tsub)</code>
</p><p>该函数用来获取订阅的结果用户应用程序将其置于一个无限循环语句。如果数据库有新记录到达该API将返回该最新的记录。如果没有新的记录该API将阻塞。如果返回值为空说明系统出错。参数说明</p></li><ul><li><p>tsubtaos_subscribe的结构体指针。</p></li></ul><li><p><code>void taos_unsubscribe(TAOS_SUB *tsub)</code></p><p>取消订阅。应用程序退出时,务必调用该函数以避免资源泄露。</p></li>
<li><p><code>int taos_num_subfields(TAOS_SUB *tsub)</code></p><p>获取返回的一行记录中数据包含多少列。</p></li>
<li><p><code>TAOS_FIELD *taos_fetch_subfields(TAOS_SUB *tsub)</code></p><p>获取每列数据的属性数据类型、名字、长度与taos_num_subfileds配合使用可解析返回的每行数据。</p></li></ul>
示例代码:请看安装包中的的示范程序
订阅相关API文档请见 [C/C++ 数据订阅接口](https://www.taosdata.com/cn/documentation/connector/#C/C++-%E6%95%B0%E6%8D%AE%E8%AE%A2%E9%98%85%E6%8E%A5%E5%8F%A3),《[TDEngine中订阅的用途和用法](https://www.taosdata.com/blog/2020/02/12/1277.html)》则以一个示例详细介绍了这些API的用法。
## 缓存 (Cache)
TDengine采用时间驱动缓存管理策略First-In-First-OutFIFO又称为写驱动的缓存管理机制。这种策略有别于读驱动的数据缓存模式Least-Recent-UseLRU直接将最近写入的数据保存在系统的缓存中。当缓存达到临界值的时候将最早的数据批量写入磁盘。一般意义上来说对于物联网数据的使用用户最为关心最近产生的数据即当前状态。TDengine充分利用了这一特性将最近到达的当前状态数据保存在缓存中。
@ -93,7 +76,7 @@ TDengine通过查询函数向用户提供毫秒级的数据获取能力。直接
TDengine分配固定大小的内存空间作为缓存空间缓存空间可根据应用的需求和硬件资源配置。通过适当的设置缓存空间TDengine可以提供极高性能的写入和查询的支持。TDengine中每个虚拟节点virtual node创建时分配独立的缓存池。每个虚拟节点管理自己的缓存池不同虚拟节点间不共享缓存池。每个虚拟节点内部所属的全部表共享该虚拟节点的缓存池。
TDengine将内存池按块划分进行管理数据在内存块里按照列式存储。一个vnode的内存池是在vnode创建时按块分配好的而且每个内存块按照先进先出的原则进行管理。一张表所需要的内存块是从vnode的内存池中进行分配的块的大小由系统配置参数cache决定。每张表最大内存块的数目由配置参数tblocks决定每张表平均的内存块的个数由配置参数ablocks决定。因此对于一个vnode, 总的内存大小为: cache*ablocks*tables。内存块参数cache不宜过小一个cache block需要能存储至少几十条以上记录才会有效率。参数ablocks最小为2保证每张表平均至少能分配两个内存块。
TDengine将内存池按块划分进行管理数据在内存块里按照列式存储。一个vnode的内存池是在vnode创建时按块分配好的而且每个内存块按照先进先出的原则进行管理。一张表所需要的内存块是从vnode的内存池中进行分配的块的大小由系统配置参数cache决定。每张表最大内存块的数目由配置参数tblocks决定每张表平均的内存块的个数由配置参数ablocks决定。因此对于一个vnode, 总的内存大小为: `cache * ablocks * tables`。内存块参数cache不宜过小一个cache block需要能存储至少几十条以上记录才会有效率。参数ablocks最小为2保证每张表平均至少能分配两个内存块。
你可以通过函数last_row快速获取一张表或一张超级表的最后一条记录这样很便于在大屏显示各设备的实时状态或采集值。例如

View File

@ -62,7 +62,7 @@ Time series data is a sequence of data points over time. Inside a table, the dat
To reduce the development complexity and improve data consistency, TDengine provides the pub/sub functionality. To publish a message, you simply insert a record into a table. Compared with popular messaging tool Kafka, you subscribe to a table or a SQL query statement, instead of a topic. Once new data points arrive, TDengine will notify the application. The process is just like Kafka.
The detailed API will be introduced in the [connectors](https://www.taosdata.com/en/documentation/connector/) section.
The API documentation is at [C/C++ subscription API](https://www.taosdata.com/en/documentation/connector/#C/C++-subscription-API) section, and you can find more information from blog article (only Chinese version at present) [The usage of subscription](https://www.taosdata.com/blog/2020/02/12/1277.html).
##Caching
TDengine allocates a fixed-size buffer in memory, the newly arrived data will be written into the buffer first. Every device or table gets one or more memory blocks. For typical IoT scenarios, the hot data shall always be newly arrived data, they are more important for timely analysis. Based on this observation, TDengine manages the cache blocks in First-In-First-Out strategy. If no enough space in the buffer, the oldest data will be saved into hard disk first, then be overwritten by newly arrived data. TDengine also guarantees every device can keep at least one block of data in the buffer.

File diff suppressed because it is too large Load Diff

17
importSampleData/.gitignore vendored Normal file
View File

@ -0,0 +1,17 @@
# Binaries for programs and plugins
*.exe
*.exe~
*.dll
*.so
*.dylib
# Test binary, built with `go test -c`
*.test
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
# Dependency directories (remove the comment below to include it)
# vendor/
.idea/
.vscode/

661
importSampleData/LICENSE Normal file
View File

@ -0,0 +1,661 @@
GNU AFFERO GENERAL PUBLIC LICENSE
Version 3, 19 November 2007
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU Affero General Public License is a free, copyleft license for
software and other kinds of works, specifically designed to ensure
cooperation with the community in the case of network server software.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
our General Public Licenses are intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
Developers that use our General Public Licenses protect your rights
with two steps: (1) assert copyright on the software, and (2) offer
you this License which gives you legal permission to copy, distribute
and/or modify the software.
A secondary benefit of defending all users' freedom is that
improvements made in alternate versions of the program, if they
receive widespread use, become available for other developers to
incorporate. Many developers of free software are heartened and
encouraged by the resulting cooperation. However, in the case of
software used on network servers, this result may fail to come about.
The GNU General Public License permits making a modified version and
letting the public access it on a server without ever releasing its
source code to the public.
The GNU Affero General Public License is designed specifically to
ensure that, in such cases, the modified source code becomes available
to the community. It requires the operator of a network server to
provide the source code of the modified version running there to the
users of that server. Therefore, public use of a modified version, on
a publicly accessible server, gives the public access to the source
code of the modified version.
An older license, called the Affero General Public License and
published by Affero, was designed to accomplish similar goals. This is
a different license, not a version of the Affero GPL, but Affero has
released a new version of the Affero GPL which permits relicensing under
this license.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU Affero General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Remote Network Interaction; Use with the GNU General Public License.
Notwithstanding any other provision of this License, if you modify the
Program, your modified version must prominently offer all users
interacting with it remotely through a computer network (if your version
supports such interaction) an opportunity to receive the Corresponding
Source of your version by providing access to the Corresponding Source
from a network server at no charge, through some standard or customary
means of facilitating copying of software. This Corresponding Source
shall include the Corresponding Source for any work covered by version 3
of the GNU General Public License that is incorporated pursuant to the
following paragraph.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the work with which it is combined will remain governed by version
3 of the GNU General Public License.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU Affero General Public License from time to time. Such new versions
will be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU Affero General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU Affero General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU Affero General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If your software can interact with users remotely through a computer
network, you should also make sure that it provides a way for users to
get its source. For example, if your program is a web application, its
interface could display a "Source" link that leads users to an archive
of the code. There are many ways you could offer source, and different
solutions will be better for different programs; see section 13 for the
specific requirements.
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU AGPL, see
<https://www.gnu.org/licenses/>.

245
importSampleData/README.md Normal file
View File

@ -0,0 +1,245 @@
## 样例数据导入
该工具可以根据用户提供的 `json``csv` 格式样例数据文件快速导入 `TDengine`,目前仅能在 Linux 上运行。
为了体验写入和查询性能,可以对样例数据进行横向、纵向扩展。横向扩展是指将一个表(监测点)的数据克隆到多张表,纵向扩展是指将样例数据中的一段时间范围内的数据在时间轴上复制。该工具还支持历史数据导入至当前时间后持续导入,这样可以测试插入和查询并行进行的场景,以模拟真实环境。
## 下载安装
### 下载可执行文件
由于该工具使用 go 语言开发,为了方便使用,项目中已经提供了编译好的可执行文件 `bin/taosimport`。通过 `git clone https://github.com/taosdata/TDengine.git` 命令或者直接下载 `ZIP` 文件解压进入样例导入程序目录 `cd importSampleData`,执行 `bin/taosimport`
### go 源码编译
由于该工具使用 go 语言开发,编译之前需要先安装 go具体请参考 [Getting Started][2],而且需要安装 TDengine 的 Go Connector, 具体请参考[TDengine 连接器文档][3]。安装完成之后,执行以下命令即可编译成可执行文件 `bin/taosimport`
```shell
go get https://github.com/taosdata/TDengine/importSampleData
cd $GOPATH/src/github.com/taosdata/TDengine/importSampleData
go build -o bin/taosimport app/main.go
```
> 注:由于目前 TDengine 的 go connector 只支持 linux 环境,所以该工具暂时只能在 linux 系统中运行。
> 如果 go get 失败可以下载之后复制 `github.com/taosdata/TDengine/importSampleData` 文件夹到 $GOPATH 的 src 目录下再执行 `go build -o bin/taosimport app/main.go`
## 使用
### 快速体验
执行命令 `bin/taosimport` 会根据默认配置执行以下操作:
1. 创建数据库
自动创建名称为 `test_yyyyMMdd` 的数据库。
2. 创建超级表
根据配置文件 `config/cfg.toml` 中指定的 `sensor_info` 场景信息创建相应的超级表。
> 建表语句: create table s_sensor_info(ts timestamp, temperature int, humidity float) tags(location binary(20), color binary(16), devgroup int);
3. 自动建立子表并插入数据
根据配置文件 `config/cfg.toml``sensor_info` 场景指定的 `data/sensor_info.csv` 样例数据进行横向扩展 `100` 倍(可通过 hnum 参数指定),即自动创建 `10*100=1000` 张子表(默认样例数据中有 10 张子表,每张表 100 条数据),启动 `10` 个线程(可通过 thread 参数指定)对每张子表循环导入 `1000` 次(可通过 vnum 参数指定)。
进入 `taos shell`,可运行如下查询验证:
* 查询记录数
```shell
taos> use test_yyyyMMdd;
taos> select count(*) from s_sensor_info;
```
* 查询各个分组的记录数
```shell
taos> select count(*) from s_sensor_info group by devgroup;
```
* 按 1h 间隔查询各聚合指标
```shell
taos> select count(temperature), sum(temperature), avg(temperature) from s_sensor_info interval(1h);
```
* 查询指定位置最新上传指标
```shell
taos> select last(*) from s_sensor_info where location = 'beijing';
```
> 更多查询及函数使用请参考 [数据查询][4]
### 详细使用说明
执行命令 `bin/taosimport -h` 可以查看详细参数使用说明:
* -cfg string
导入配置文件路径,包含样例数据文件相关描述及对应 TDengine 配置信息。默认使用 `config/cfg.toml`
* -cases string
需要导入的场景名称,该名称可从 -cfg 指定的配置文件中 `[usecase]` 查看,可同时导入多个场景,中间使用逗号分隔,如:`sensor_info,camera_detection`,默认为 `sensor_info`
* -hnum int
需要将样例数据进行横向扩展的倍数,假设原有样例数据包含 1 张子表 `t_0` 数据,指定 hnum 为 2 时会根据原有表名创建 `t_0、t_1` 两张子表。默认为 100。
* -vnum int
需要将样例数据进行纵向扩展的次数,如果设置为 0 代表将历史数据导入至当前时间后持续按照指定间隔导入。默认为 1000表示将样例数据在时间轴上纵向复制1000 次。
* -delay int
当 vnum 设置为 0 时持续导入的时间间隔,默认为所有场景中最小记录间隔时间的一半,单位 ms。
* -tick int
打印统计信息的时间间隔,默认 2000 ms。
* -save int
是否保存统计信息到 tdengine 的 statistic 表中1 是0 否, 默认 0。
* -savetb int
当 save 为 1 时保存统计信息的表名, 默认 statistic。
* -auto int
是否自动生成样例数据中的主键时间戳1 是0 否, 默认 0。
* -start string
导入的记录开始时间,格式为 `"yyyy-MM-dd HH:mm:ss.SSS"`,不设置会使用样例数据中最小时间,设置后会忽略样例数据中的主键时间,会按照指定的 start 进行导入。如果 auto 为 1则必须设置 start默认为空。
* -interval int
导入的记录时间间隔,该设置只会在指定 `auto=1` 之后生效,否则会根据样例数据自动计算间隔时间。单位为毫秒,默认 1000。
* -thread int
执行导入数据的线程数目,默认为 10。
* -batch int
执行导入数据时的批量大小,默认为 100。批量是指一次写操作时包含多少条记录。
* -host string
导入的 TDengine 服务器 IP默认为 127.0.0.1。
* -port int
导入的 TDengine 服务器端口,默认为 6030。
* -user string
导入的 TDengine 用户名,默认为 root。
* -password string
导入的 TDengine 用户密码,默认为 taosdata。
* -dropdb int
导入数据之前是否删除数据库1 是0 否, 默认 0。
* -db string
导入的 TDengine 数据库名称,默认为 test_yyyyMMdd。
* -dbparam string
当指定的数据库不存在时,自动创建数据库时可选项配置参数,如 `days 10 cache 16000 ablocks 4`,默认为空。
### 常见使用示例
* `bin/taosimport -cfg config/cfg.toml -cases sensor_info,camera_detection -hnum 1 -vnum 10`
执行上述命令后会将 sensor_info、camera_detection 两个场景的数据各导入 10 次。
* `bin/taosimport -cfg config/cfg.toml -cases sensor_info -hnum 2 -vnum 0 -start "2019-12-12 00:00:00.000" -interval 5000`
执行上述命令后会将 sensor_info 场景的数据横向扩展2倍从指定时间 `2019-12-12 00:00:00.000` 开始且记录间隔时间为 5000 毫秒开始导入,导入至当前时间后会自动持续导入。
### config/cfg.toml 配置文件说明
``` toml
# 传感器场景
[sensor_info] # 场景名称
format = "csv" # 样例数据文件格式,可以是 json 或 csv具体字段应至少包含 subTableName、tags、fields 指定的字段。
filePath = "data/sensor_info.csv" # 样例数据文件路径,程序会循环使用该文件数据
separator = "," # csv 样例文件中字段分隔符,默认逗号
stname = "sensor_info" # 超级表名称
subTableName = "devid" # 使用样例数据中指定字段当作子表名称一部分,子表名称格式为 t_subTableName_stname扩展表名为 t_subTableName_stname_i。
timestamp = "ts" # 使用 fields 中哪个字段当作主键,类型必须为 timestamp
timestampType="millisecond" # 样例数据中主键时间字段是 millisecond 还是 dateTime 格式
#timestampTypeFormat = "2006-01-02 15:04:05.000" # 主键日期时间格式timestampType 为 dateTime 时需要指定
tags = [
# 标签列表name 为标签名称type 为标签类型
{ name = "location", type = "binary(20)" },
{ name = "color", type = "binary(16)" },
{ name = "devgroup", type = "int" },
]
fields = [
# 字段列表name 为字段名称type 为字段类型
{ name = "ts", type = "timestamp" },
{ name = "temperature", type = "int" },
{ name = "humidity", type = "float" },
]
# 摄像头检测场景
[camera_detection] # 场景名称
format = "json" # 样例数据文件格式,可以是 json 或 csv具体字段应至少包含 subTableName、tags、fields 指定的字段。
filePath = "data/camera_detection.json" # 样例数据文件路径,程序会循环使用该文件数据
#separator = "," # csv 样例文件中字段分隔符,默认逗号, 如果是 json 文件可以不用配置
stname = "camera_detection" # 超级表名称
subTableName = "sensor_id" # 使用样例数据中指定字段当作子表名称一部分,子表名称格式为 t_subTableName_stname扩展表名为 t_subTableName_stname_i。
timestamp = "ts" # 使用 fields 中哪个字段当作主键,类型必须为 timestamp
timestampType="dateTime" # 样例数据中主键时间字段是 millisecond 还是 dateTime 格式
timestampTypeFormat = "2006-01-02 15:04:05.000" # 主键日期时间格式timestampType 为 dateTime 时需要指定
tags = [
# 标签列表name 为标签名称type 为标签类型
{ name = "home_id", type = "binary(30)" },
{ name = "object_type", type = "int" },
{ name = "object_kind", type = "binary(20)" },
]
fields = [
# 字段列表name 为字段名称type 为字段类型
{ name = "ts", type = "timestamp" },
{ name = "states", type = "tinyint" },
{ name = "battery_voltage", type = "float" },
]
# other cases
```
### 样例数据格式说明
#### json
当配置文件 `config/cfg.toml` 中各场景的 format="json" 时,样例数据文件需要提供 tags 和 fields 字段列表中的字段值。样例数据格式如下:
```json
{"home_id": "603", "sensor_id": "s100", "ts": "2019-01-01 00:00:00.000", "object_type": 1, "object_kind": "night", "battery_voltage": 0.8, "states": 1}
{"home_id": "604", "sensor_id": "s200", "ts": "2019-01-01 00:00:00.000", "object_type": 2, "object_kind": "day", "battery_voltage": 0.6, "states": 0}
```
#### csv
当配置文件 `config/cfg.toml` 中各场景的 format="csv" 时,样例数据文件需要提供表头和对应的数据,其中字段分隔符由使用场景中 `separator` 指定,默认逗号。具体格式如下:
```csv
devid,location,color,devgroup,ts,temperature,humidity
0, beijing, white, 0, 1575129600000, 16, 19.405091
0, beijing, white, 0, 1575129601000, 22, 14.377142
```
[1]: https://github.com/taosdata/TDengine
[2]: https://golang.org/doc/install
[3]: https://www.taosdata.com/cn/documentation/connector/#Go-Connector
[4]: https://www.taosdata.com/cn/documentation/taos-sql/#%E6%95%B0%E6%8D%AE%E6%9F%A5%E8%AF%A2

1080
importSampleData/app/main.go Normal file

File diff suppressed because it is too large Load Diff

BIN
importSampleData/bin/taosimport Executable file

Binary file not shown.

View File

@ -0,0 +1,51 @@
# 传感器场景
[sensor_info] # 场景名称
format = "csv" # 样例数据文件格式,可以是 json 或 csv具体字段应至少包含 subTableName、tags、fields 指定的字段。
filePath = "data/sensor_info.csv" # 样例数据文件路径,程序会循环使用该文件数据
separator = "," # csv 样例文件中字段分隔符,默认逗号
stname = "sensor_info" # 超级表名称
subTableName = "devid" # 使用样例数据中指定字段当作子表名称一部分,子表名称格式为 t_subTableName_stname扩展表名为 t_subTableName_stname_i。
timestamp = "ts" # 使用 fields 中哪个字段当作主键,类型必须为 timestamp
timestampType="millisecond" # 样例数据中主键时间字段是 millisecond 还是 dateTime 格式
#timestampTypeFormat = "2006-01-02 15:04:05.000" # 主键日期时间格式timestampType 为 dateTime 时需要指定
tags = [
# 标签列表name 为标签名称type 为标签类型
{ name = "location", type = "binary(20)" },
{ name = "color", type = "binary(16)" },
{ name = "devgroup", type = "int" },
]
fields = [
# 字段列表name 为字段名称type 为字段类型
{ name = "ts", type = "timestamp" },
{ name = "temperature", type = "int" },
{ name = "humidity", type = "float" },
]
# 摄像头检测场景
[camera_detection] # 场景名称
format = "json" # 样例数据文件格式,可以是 json 或 csv具体字段应至少包含 subTableName、tags、fields 指定的字段。
filePath = "data/camera_detection.json" # 样例数据文件路径,程序会循环使用该文件数据
#separator = "," # csv 样例文件中字段分隔符,默认逗号, 如果是 json 文件可以不用配置
stname = "camera_detection" # 超级表名称
subTableName = "sensor_id" # 使用样例数据中指定字段当作子表名称一部分,子表名称格式为 t_subTableName_stname扩展表名为 t_subTableName_stname_i。
timestamp = "ts" # 使用 fields 中哪个字段当作主键,类型必须为 timestamp
timestampType="dateTime" # 样例数据中主键时间字段是 millisecond 还是 dateTime 格式
timestampTypeFormat = "2006-01-02 15:04:05.000" # 主键日期时间格式timestampType 为 dateTime 时需要指定
tags = [
# 标签列表name 为标签名称type 为标签类型
{ name = "home_id", type = "binary(30)" },
{ name = "object_type", type = "int" },
{ name = "object_kind", type = "binary(20)" },
]
fields = [
# 字段列表name 为字段名称type 为字段类型
{ name = "ts", type = "timestamp" },
{ name = "states", type = "tinyint" },
{ name = "battery_voltage", type = "float" },
]
# other case

View File

@ -0,0 +1,380 @@
{
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": "-- Grafana --",
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"type": "dashboard"
}
]
},
"editable": true,
"gnetId": null,
"graphTooltip": 0,
"id": 7,
"links": [],
"panels": [
{
"cacheTimeout": null,
"colorBackground": false,
"colorValue": true,
"colors": [
"#299c46",
"rgba(237, 129, 40, 0.89)",
"#d44a3a"
],
"datasource": null,
"format": "celsius",
"gauge": {
"maxValue": 100,
"minValue": 0,
"show": false,
"thresholdLabels": false,
"thresholdMarkers": true
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 0
},
"id": 6,
"interval": null,
"links": [],
"mappingType": 1,
"mappingTypes": [
{
"name": "value to text",
"value": 1
},
{
"name": "range to text",
"value": 2
}
],
"maxDataPoints": 100,
"nullPointMode": "connected",
"nullText": null,
"options": {},
"postfix": "",
"postfixFontSize": "50%",
"prefix": "",
"prefixFontSize": "50%",
"rangeMaps": [
{
"from": "null",
"text": "N/A",
"to": "null"
}
],
"sparkline": {
"fillColor": "rgba(31, 118, 189, 0.18)",
"full": false,
"lineColor": "rgb(31, 120, 193)",
"show": true,
"ymax": null,
"ymin": null
},
"tableColumn": "",
"targets": [
{
"alias": "lastest_temperature",
"refId": "A",
"sql": "select ts, temp from test.stream_temp_last where ts >= $from and ts < $to",
"target": "select metric",
"type": "timeserie"
}
],
"thresholds": "20,30",
"timeFrom": null,
"timeShift": null,
"title": "最新温度",
"type": "singlestat",
"valueFontSize": "80%",
"valueMaps": [
{
"op": "=",
"text": "N/A",
"value": "null"
}
],
"valueName": "current"
},
{
"datasource": null,
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 0
},
"id": 8,
"options": {
"fieldOptions": {
"calcs": [
"last"
],
"defaults": {
"decimals": 2,
"mappings": [],
"max": 100,
"min": 0,
"thresholds": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
],
"title": ""
},
"override": {},
"values": false
},
"orientation": "auto",
"showThresholdLabels": false,
"showThresholdMarkers": true
},
"pluginVersion": "6.4.3",
"targets": [
{
"alias": "maxHumidity",
"refId": "A",
"sql": "select ts, humidity from test.stream_humidity_max where ts >= $from and ts < $to",
"target": "select metric",
"type": "timeserie"
}
],
"timeFrom": null,
"timeShift": null,
"title": "最大湿度",
"type": "gauge"
},
{
"aliasColors": {},
"bars": true,
"dashLength": 10,
"dashes": false,
"datasource": null,
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 10,
"w": 12,
"x": 0,
"y": 8
},
"id": 4,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": false,
"linewidth": 1,
"nullPointMode": "null",
"options": {
"dataLinks": []
},
"percentage": false,
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"alias": "avgTemperature",
"refId": "A",
"sql": "select ts, temp from test.stream_temp_avg where ts >= $from and ts < $to",
"target": "select metric",
"type": "timeserie"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "平均温度",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "celsius",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": null,
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 10,
"w": 12,
"x": 12,
"y": 8
},
"id": 10,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"nullPointMode": "null",
"options": {
"dataLinks": []
},
"percentage": false,
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"alias": "max",
"refId": "A",
"sql": "select ts, max_temp from test.stream_sensor where ts >= $from and ts < $to",
"target": "select metric",
"type": "timeserie"
},
{
"alias": "avg",
"refId": "B",
"sql": "select ts, avg_temp from test.stream_sensor where ts >= $from and ts < $to",
"target": "select metric",
"type": "timeserie"
},
{
"alias": "min",
"refId": "C",
"sql": "select ts, min_temp from test.stream_sensor where ts >= $from and ts < $to",
"target": "select metric",
"type": "timeserie"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "某传感器",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "celsius",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
}
],
"refresh": "5s",
"schemaVersion": 20,
"style": "dark",
"tags": [],
"templating": {
"list": []
},
"time": {
"from": "now-5m",
"to": "now"
},
"timepicker": {
"refresh_intervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
]
},
"timezone": "",
"title": "sensor_info",
"uid": "dGSoaTLWz",
"version": 2
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,66 @@
package dataimport
import (
"encoding/json"
"fmt"
"path/filepath"
"sync"
"github.com/pelletier/go-toml"
)
var (
cfg Config
once sync.Once
)
// Config inclue all scene import config
type Config struct {
UserCases map[string]CaseConfig
}
// CaseConfig include the sample data config and tdengine config
type CaseConfig struct {
Format string
FilePath string
Separator string
Stname string
SubTableName string
Timestamp string
TimestampType string
TimestampTypeFormat string
Tags []FieldInfo
Fields []FieldInfo
}
// FieldInfo is field or tag info
type FieldInfo struct {
Name string
Type string
}
// LoadConfig will load the specified file config
func LoadConfig(filePath string) Config {
once.Do(func() {
filePath, err := filepath.Abs(filePath)
if err != nil {
panic(err)
}
fmt.Printf("parse toml file once. filePath: %s\n", filePath)
tree, err := toml.LoadFile(filePath)
if err != nil {
panic(err)
}
bytes, err := json.Marshal(tree.ToMap())
if err != nil {
panic(err)
}
err = json.Unmarshal(bytes, &cfg.UserCases)
if err != nil {
panic(err)
}
})
return cfg
}

View File

@ -5,14 +5,39 @@
# #
########################################################
# master IP for TDengine system
# masterIp 127.0.0.1
# Internal IP address of the server, which can be acquired by using ifconfig command.
# internalIp 127.0.0.1
# second IP for TDengine system, for cluster version only
# secondIp 127.0.0.1
# IP address of the server
# privateIp 127.0.0.1
# public IP of server, on which the tdengine are deployed
# this IP is assigned by cloud service provider, for cluster version only
# publicIp 127.0.0.1
# network is bound to 0.0.0.0
# anyIp 1
# set socket type ("udp" and "tcp")
# the server and client should have the same socket type. Otherwise, connect will fail
# sockettype udp
# client local IP
# localIp 127.0.0.1
# data file's directory
# for the cluster version, data file's directory is configured this way
# option mount_path tier_level
# dataDir /mnt/disk1/taos 0
# dataDir /mnt/disk2/taos 0
# dataDir /mnt/disk3/taos 0
# dataDir /mnt/disk4/taos 0
# dataDir /mnt/disk5/taos 0
# dataDir /mnt/disk6/taos 1
# dataDir /mnt/disk7/taos 1
# for the stand-alone version, data file's directory is configured this way
# dataDir /var/lib/taos
# log file's directory
@ -27,6 +52,18 @@
# port for DNode connect to Client, default udp[6035-6039] tcp[6035]
# vnodeShellPort 6035
# port for MNode connect to VNode, default udp[6040-6044] tcp[6040], for cluster version only
# mgmtVnodePort 6040
# port for DNode connect to DNode, default tcp[6045], for cluster version only
# vnodeVnodePort 6045
# port for MNode connect to MNode, default udp[6050], for cluster version only
# mgmtMgmtPort 6050
# port sync file MNode and MNode, default tcp[6050], for cluster version only
# mgmtSyncPort 6050
# number of threads per CPU core
# numOfThreadsPerCore 1
@ -54,11 +91,7 @@
# interval of system monitor
# monitorInterval 60
# set socket type("udp" and "tcp").
# The server and client should have the same socket type. Otherwise, connect will fail.
# sockettype udp
# The compressed rpc message, option:
# the compressed rpc message, option:
# -1 (no compression)
# 0 (all message compressed),
# > 0 (rpc message body which larger than this value will be compressed)
@ -73,12 +106,18 @@
# commit intervalunit is second
# ctime 3600
# interval of DNode report status to MNode, unit is Second
# interval of DNode report status to MNode, unit is Second, for cluster version only
# statusInterval 1
# interval of Shell send HB to MNode, unit is Second
# shellActivityTimer 3
# interval of DNode send HB to DNode, unit is Second, for cluster version only
# vnodePeerHBTimer 1
# interval of MNode send HB to MNode, unit is Second, for cluster version only
# mgmtPeerHBTimer 1
# time to keep MeterMeta in Cache, seconds
# meterMetaKeepTimer 7200
@ -94,12 +133,21 @@
# max number of tables
# maxTables 650000
# max number of Dnodes, for cluster version only
# maxDnodes 1000
# Max number of VGroups, for cluster version only
# maxVGroups 1000
# system locale
# locale en_US.UTF-8
# default system charset
# charset UTF-8
# system time zone
# timezone Asia/Shanghai (CST, +0800)
# enable/disable commit log
# clog 1
@ -115,6 +163,9 @@
# number of days to keep DB file
# keep 3650
# number of replications, for cluster version only
# replications 1
# client default database(database should be created)
# defaultDB
@ -136,18 +187,36 @@
# max connection to Vnode
# maxVnodeConnections 10000
# start http service in the cluster
# mnode take into account while balance, for cluster version only
# mgmtEqualVnodeNum 4
# number of seconds allowed for a dnode to be offline, for cluster version only
# offlineThreshold 864000
# start http service
# http 1
# start system monitor module in the cluster
# start system monitor module
# monitor 1
# maximum number of rows returned by the restful interface
# restfulRowLimit 10240
# number of threads used to process http requests
# httpMaxThreads 2
# pre-allocated number of http sessions
# httpCacheSessions 100
# whether to enable HTTP compression transmission
# httpEnableCompress 0
# the delayed time for launching each continuous query. 10% of the whole computing time window by default.
# streamCompDelayRatio 0.1
# the max allowed delayed time for launching continuous query. 20ms by default
# tsMaxStreamComputDelay 20000
# whether the telegraf table name contains the number of tags and the number of fields
# telegrafUseFieldNum 0

View File

@ -1,15 +1,20 @@
#!/bin/bash
#
# Generate deb package for ubuntu
set -e
# set -x
#curr_dir=$(pwd)
compile_dir=$1
output_dir=$2
tdengine_ver=$3
cpuType=$4
osType=$5
verMode=$6
verType=$7
script_dir="$(dirname $(readlink -f $0))"
top_dir="$(readlink -m ${script_dir}/../..)"
top_dir="$(readlink -f ${script_dir}/../..)"
pkg_dir="${top_dir}/debworkroom"
#echo "curr_dir: ${curr_dir}"
@ -63,7 +68,25 @@ debver="Version: "$tdengine_ver
sed -i "2c$debver" ${pkg_dir}/DEBIAN/control
#get taos version, then set deb name
debname="TDengine-"${tdengine_ver}".deb"
if [ "$verMode" == "cluster" ]; then
debname="TDengine-server-"${tdengine_ver}-${osType}-${cpuType}
elif [ "$verMode" == "edge" ]; then
debname="TDengine-server"-${tdengine_ver}-${osType}-${cpuType}
else
echo "unknow verMode, nor cluster or edge"
exit 1
fi
if [ "$verType" == "beta" ]; then
debname=${debname}-${verType}".deb"
elif [ "$verType" == "stable" ]; then
debname=${debname}".deb"
else
echo "unknow verType, nor stabel or beta"
exit 1
fi
# make deb package
dpkg -b ${pkg_dir} $debname

View File

@ -5,11 +5,67 @@
set -e
#set -x
armver=$1
# releash.sh -v [cluster | edge]
# -c [aarch32 | aarch64 | x64 | x86 | mips64 ...]
# -o [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | ...]
# -V [stable | beta]
# -l [full | lite]
# set parameters by default value
verMode=edge # [cluster, edge]
verType=stable # [stable, beta]
cpuType=x64 # [aarch32 | aarch64 | x64 | x86 | mips64 ...]
osType=Linux # [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | ...]
pagMode=full # [full | lite]
while getopts "hv:V:c:o:l:" arg
do
case $arg in
v)
#echo "verMode=$OPTARG"
verMode=$( echo $OPTARG )
;;
V)
#echo "verType=$OPTARG"
verType=$(echo $OPTARG)
;;
c)
#echo "cpuType=$OPTARG"
cpuType=$(echo $OPTARG)
;;
l)
#echo "pagMode=$OPTARG"
pagMode=$(echo $OPTARG)
;;
o)
#echo "osType=$OPTARG"
osType=$(echo $OPTARG)
;;
h)
echo "Usage: `basename $0` -v [cluster | edge] -c [aarch32 | aarch64 | x64 | x86 | mips64 ...] -o [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | ...] -V [stable | beta] -l [full | lite]"
exit 0
;;
?) #unknow option
echo "unkonw argument"
exit 1
;;
esac
done
echo "verMode=${verMode} verType=${verType} cpuType=${cpuType} osType=${osType} pagMode=${pagMode}"
curr_dir=$(pwd)
if [ "$osType" != "Darwin" ]; then
script_dir="$(dirname $(readlink -f $0))"
top_dir="$(readlink -m ${script_dir}/..)"
top_dir="$(readlink -f ${script_dir}/..)"
else
script_dir=`dirname $0`
cd ${script_dir}
script_dir="$(pwd)"
top_dir=${script_dir}/..
fi
versioninfo="${top_dir}/src/util/src/version.c"
csudo=""
@ -109,29 +165,50 @@ build_time=$(date +"%F %R")
echo "char version[64] = \"${version}\";" > ${versioninfo}
echo "char compatible_version[64] = \"${compatible_version}\";" >> ${versioninfo}
echo "char gitinfo[128] = \"$(git rev-parse --verify HEAD)\";" >> ${versioninfo}
if [ "$verMode" != "cluster" ]; then
echo "char gitinfoOfInternal[128] = \"\";" >> ${versioninfo}
else
enterprise_dir="${top_dir}/../enterprise"
cd ${enterprise_dir}
echo "char gitinfoOfInternal[128] = \"$(git rev-parse --verify HEAD)\";" >> ${versioninfo}
cd ${curr_dir}
fi
echo "char buildinfo[512] = \"Built by ${USER} at ${build_time}\";" >> ${versioninfo}
echo "" >> ${versioninfo}
tmp_version=$(echo $version | tr -s "." "_")
if [ "$verMode" == "cluster" ]; then
libtaos_info=${tmp_version}_${osType}_${cpuType}
else
libtaos_info=edge_${tmp_version}_${osType}_${cpuType}
fi
if [ "$verType" == "beta" ]; then
libtaos_info=${libtaos_info}_${verType}
fi
echo "void libtaos_${libtaos_info}() {};" >> ${versioninfo}
# 2. cmake executable file
compile_dir="${top_dir}/debug"
if [ -d ${compile_dir} ]; then
${csudo} rm -rf ${compile_dir}
fi
if [ "$osType" != "Darwin" ]; then
${csudo} mkdir -p ${compile_dir}
else
mkdir -p ${compile_dir}
fi
cd ${compile_dir}
# arm only support lite ver
if [ -z "$armver" ]; then
cmake ../
elif [ "$armver" == "arm64" ]; then
cmake ../ -DARMVER=arm64
elif [ "$armver" == "arm32" ]; then
cmake ../ -DARMVER=arm32
# check support cpu type
if [[ "$cpuType" == "x64" ]] || [[ "$cpuType" == "aarch64" ]] || [[ "$cpuType" == "aarch32" ]] || [[ "$cpuType" == "mips64" ]] ; then
if [ "$verMode" != "cluster" ]; then
cmake ../ -DCPUTYPE=${cpuType} -DPAGMODE=${pagMode}
else
echo "input parameter error!!!"
return
cmake ../../ -DCPUTYPE=${cpuType}
fi
else
echo "input cpuType=${cpuType} error!!!"
exit 1
fi
make
@ -143,28 +220,36 @@ cd ${curr_dir}
#osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2)
#echo "osinfo: ${osinfo}"
echo "do deb package for the ubuntu system"
if [ "$osType" != "Darwin" ]; then
if [[ "$verMode" != "cluster" ]] && [[ "$cpuType" == "x64" ]]; then
echo "====do deb package for the ubuntu system===="
output_dir="${top_dir}/debs"
if [ -d ${output_dir} ]; then
${csudo} rm -rf ${output_dir}
fi
${csudo} mkdir -p ${output_dir}
cd ${script_dir}/deb
${csudo} ./makedeb.sh ${compile_dir} ${output_dir} ${version}
${csudo} ./makedeb.sh ${compile_dir} ${output_dir} ${version} ${cpuType} ${osType} ${verMode} ${verType}
echo "do rpm package for the centos system"
echo "====do rpm package for the centos system===="
output_dir="${top_dir}/rpms"
if [ -d ${output_dir} ]; then
${csudo} rm -rf ${output_dir}
fi
${csudo} mkdir -p ${output_dir}
cd ${script_dir}/rpm
${csudo} ./makerpm.sh ${compile_dir} ${output_dir} ${version}
${csudo} ./makerpm.sh ${compile_dir} ${output_dir} ${version} ${cpuType} ${osType} ${verMode} ${verType}
fi
echo "do tar.gz package for all systems"
echo "====do tar.gz package for all systems===="
cd ${script_dir}/tools
${csudo} ./makepkg.sh ${compile_dir} ${version} "${build_time}" ${armver}
${csudo} ./makeclient.sh ${compile_dir} ${version} "${build_time}" ${armver}
${csudo} ./makepkg.sh ${compile_dir} ${version} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode}
${csudo} ./makeclient.sh ${compile_dir} ${version} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode}
else
cd ${script_dir}/tools
./makeclient.sh ${compile_dir} ${version} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType}
fi
# 4. Clean up temporary compile directories
#${csudo} rm -rf ${compile_dir}

View File

@ -2,16 +2,20 @@
#
# Generate rpm package for centos
#set -e
set -e
# set -x
#curr_dir=$(pwd)
compile_dir=$1
output_dir=$2
tdengine_ver=$3
cpuType=$4
osType=$5
verMode=$6
verType=$7
script_dir="$(dirname $(readlink -f $0))"
top_dir="$(readlink -m ${script_dir}/../..)"
top_dir="$(readlink -f ${script_dir}/../..)"
pkg_dir="${top_dir}/rpmworkroom"
spec_file="${script_dir}/tdengine.spec"
@ -54,9 +58,30 @@ ${csudo} mkdir -p BUILD BUILDROOT RPMS SOURCES SPECS SRPMS
${csudo} rpmbuild --define="_version ${tdengine_ver}" --define="_topdir ${pkg_dir}" --define="_compiledir ${compile_dir}" -bb ${spec_file}
# copy rpm package to output_dir, then clean temp dir
# copy rpm package to output_dir, and modify package name, then clean temp dir
#${csudo} cp -rf RPMS/* ${output_dir}
cp_rpm_package ${pkg_dir}/RPMS
if [ "$verMode" == "cluster" ]; then
rpmname="TDengine-server-"${tdengine_ver}-${osType}-${cpuType}
elif [ "$verMode" == "edge" ]; then
rpmname="TDengine-server"-${tdengine_ver}-${osType}-${cpuType}
else
echo "unknow verMode, nor cluster or edge"
exit 1
fi
if [ "$verType" == "beta" ]; then
rpmname=${rpmname}-${verType}".rpm"
elif [ "$verType" == "stable" ]; then
rpmname=${rpmname}".rpm"
else
echo "unknow verType, nor stabel or beta"
exit 1
fi
mv ${output_dir}/TDengine-${tdengine_ver}.rpm ${output_dir}/${rpmname}
cd ..
${csudo} rm -rf ${pkg_dir}

View File

@ -26,7 +26,7 @@ MAX_OPEN_FILES=65535
# Default program options
NAME=taosd
PROG=/usr/local/bin/taos/taosd
PROG=/usr/local/taos/bin/taosd
USER=root
GROUP=root

14
packaging/tools/get_os.sh Executable file
View File

@ -0,0 +1,14 @@
#!/bin/bash
#
# This file is used to install TAOS time-series database on linux systems. The operating system
# is required to use systemd to manage services at boot
set -e
# set -x
# -----------------------Variables definition---------------------
OS=$(cat /etc/*-release | grep "^NAME=" | cut -d= -f2)
len=$(echo ${#OS})
len=$((len-2))
retval=$(echo -ne ${OS:1:${len}} | cut -d" " -f1)
echo -ne $retval

191
packaging/tools/install.sh Executable file → Normal file
View File

@ -6,8 +6,11 @@
set -e
#set -x
verMode=edge
pagMode=full
# -----------------------Variables definition---------------------
script_dir=$(dirname $(readlink -m "$0"))
script_dir=$(dirname $(readlink -f "$0"))
# Dynamic directory
data_dir="/var/lib/taos"
log_dir="/var/log/taos"
@ -27,7 +30,12 @@ install_main_dir="/usr/local/taos"
# old bin dir
bin_dir="/usr/local/taos/bin"
# v1.5 jar dir
v15_java_app_dir="/usr/local/lib/taos"
service_config_dir="/etc/systemd/system"
nginx_port=6060
nginx_dir="/usr/local/nginxd"
# Color setting
RED='\033[0;31m'
@ -41,6 +49,8 @@ if command -v sudo > /dev/null; then
csudo="sudo"
fi
update_flag=0
initd_mod=0
service_mod=2
if pidof systemd &> /dev/null; then
@ -69,23 +79,24 @@ osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2)
#echo "osinfo: ${osinfo}"
os_type=0
if echo $osinfo | grep -qwi "ubuntu" ; then
echo "this is ubuntu system"
echo "This is ubuntu system"
os_type=1
elif echo $osinfo | grep -qwi "debian" ; then
echo "this is debian system"
echo "This is debian system"
os_type=1
elif echo $osinfo | grep -qwi "Kylin" ; then
echo "this is Kylin system"
echo "This is Kylin system"
os_type=1
elif echo $osinfo | grep -qwi "centos" ; then
echo "this is centos system"
echo "This is centos system"
os_type=2
elif echo $osinfo | grep -qwi "fedora" ; then
echo "this is fedora system"
echo "This is fedora system"
os_type=2
else
echo "this is other linux system"
os_type=0
echo "${osinfo}: This is an officially unverified linux system, If there are any problems with the installation and operation, "
echo "please feel free to contact taosdata.com for support."
os_type=1
fi
function kill_taosd() {
@ -106,6 +117,9 @@ function install_main_path() {
${csudo} mkdir -p ${install_main_dir}/examples
${csudo} mkdir -p ${install_main_dir}/include
${csudo} mkdir -p ${install_main_dir}/init.d
if [ "$verMode" == "cluster" ]; then
${csudo} mkdir -p ${nginx_dir}
fi
}
function install_bin() {
@ -124,16 +138,30 @@ function install_bin() {
[ -x ${install_main_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || :
[ -x ${install_main_dir}/bin/taosdemo ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || :
[ -x ${install_main_dir}/bin/remove.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/rmtaos || :
if [ "$verMode" == "cluster" ]; then
${csudo} cp -r ${script_dir}/nginxd/* ${nginx_dir} && ${csudo} chmod 0555 ${nginx_dir}/*
${csudo} mkdir -p ${nginx_dir}/logs
${csudo} chmod 777 ${nginx_dir}/sbin/nginx
fi
}
function install_lib() {
# Remove links
${csudo} rm -f ${lib_link_dir}/libtaos.* || :
${csudo} rm -rf ${v15_java_app_dir} || :
${csudo} cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/*
${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1
${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so
if [ "$verMode" == "cluster" ]; then
# Compatible with version 1.5
${csudo} mkdir -p ${v15_java_app_dir}
${csudo} ln -s ${install_main_dir}/connector/taos-jdbcdriver-1.0.2-dist.jar ${v15_java_app_dir}/JDBCDriver-1.0.2-dist.jar
${csudo} chmod 777 ${v15_java_app_dir} || :
fi
}
function install_header() {
@ -154,6 +182,57 @@ function install_config() {
${csudo} cp -f ${script_dir}/cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org
${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg
if [ "$verMode" == "cluster" ]; then
[ ! -z $1 ] && return 0 || : # only install client
if ((${update_flag}==1)); then
return 0
fi
IP_FORMAT="(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)"
IP_PATTERN="\b$IP_FORMAT\.$IP_FORMAT\.$IP_FORMAT\.$IP_FORMAT\b"
echo
echo -e -n "${GREEN}Enter the IP address of an existing TDengine cluster node to join${NC} OR ${GREEN}leave it blank to build one${NC} :"
read masterIp
while true; do
if [ ! -z "$masterIp" ]; then
# check the format of the masterIp
if [[ $masterIp =~ $IP_PATTERN ]]; then
# Write the first IP to configuration file
sudo sed -i -r "s/#*\s*(masterIp\s*).*/\1$masterIp/" ${cfg_dir}/taos.cfg
# Get the second IP address
echo
echo -e -n "${GREEN}Enter the IP address of another node in cluster${NC} OR ${GREEN}leave it blank to skip${NC}: "
read secondIp
while true; do
if [ ! -z "$secondIp" ]; then
if [[ $secondIp =~ $IP_PATTERN ]]; then
# Write the second IP to configuration file
sudo sed -i -r "s/#*\s*(secondIp\s*).*/\1$secondIp/" ${cfg_dir}/taos.cfg
break
else
read -p "Please enter the correct IP address: " secondIp
fi
else
break
fi
done
break
else
read -p "Please enter the correct IP address: " masterIp
fi
else
break
fi
done
fi
}
@ -175,7 +254,9 @@ function install_connector() {
}
function install_examples() {
if [ -d ${script_dir}/examples ]; then
${csudo} cp -rf ${script_dir}/examples/* ${install_main_dir}/examples
fi
}
function clean_service_on_sysvinit() {
@ -240,6 +321,18 @@ function clean_service_on_systemd() {
${csudo} systemctl disable taosd &> /dev/null || echo &> /dev/null
${csudo} rm -f ${taosd_service_config}
if [ "$verMode" == "cluster" ]; then
nginx_service_config="${service_config_dir}/nginxd.service"
if systemctl is-active --quiet nginxd; then
echo "Nginx for TDengine is running, stopping it..."
${csudo} systemctl stop nginxd &> /dev/null || echo &> /dev/null
fi
${csudo} systemctl disable nginxd &> /dev/null || echo &> /dev/null
${csudo} rm -f ${nginx_service_config}
fi
}
# taos:2345:respawn:/etc/init.d/taosd start
@ -269,6 +362,36 @@ function install_service_on_systemd() {
${csudo} bash -c "echo '[Install]' >> ${taosd_service_config}"
${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${taosd_service_config}"
${csudo} systemctl enable taosd
if [ "$verMode" == "cluster" ]; then
nginx_service_config="${service_config_dir}/nginxd.service"
${csudo} bash -c "echo '[Unit]' >> ${nginx_service_config}"
${csudo} bash -c "echo 'Description=Nginx For TDengine Service' >> ${nginx_service_config}"
${csudo} bash -c "echo 'After=network-online.target' >> ${nginx_service_config}"
${csudo} bash -c "echo 'Wants=network-online.target' >> ${nginx_service_config}"
${csudo} bash -c "echo >> ${nginx_service_config}"
${csudo} bash -c "echo '[Service]' >> ${nginx_service_config}"
${csudo} bash -c "echo 'Type=forking' >> ${nginx_service_config}"
${csudo} bash -c "echo 'PIDFile=/usr/local/nginxd/logs/nginx.pid' >> ${nginx_service_config}"
${csudo} bash -c "echo 'ExecStart=/usr/local/nginxd/sbin/nginx' >> ${nginx_service_config}"
${csudo} bash -c "echo 'ExecStop=/usr/local/nginxd/sbin/nginx -s stop' >> ${nginx_service_config}"
${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${nginx_service_config}"
${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${nginx_service_config}"
${csudo} bash -c "echo 'LimitCORE=infinity' >> ${nginx_service_config}"
${csudo} bash -c "echo 'TimeoutStartSec=0' >> ${nginx_service_config}"
${csudo} bash -c "echo 'StandardOutput=null' >> ${nginx_service_config}"
${csudo} bash -c "echo 'Restart=always' >> ${nginx_service_config}"
${csudo} bash -c "echo 'StartLimitBurst=3' >> ${nginx_service_config}"
${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${nginx_service_config}"
${csudo} bash -c "echo >> ${nginx_service_config}"
${csudo} bash -c "echo '[Install]' >> ${nginx_service_config}"
${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${nginx_service_config}"
if ! ${csudo} systemctl enable nginxd &> /dev/null; then
${csudo} systemctl daemon-reexec
${csudo} systemctl enable nginxd
fi
${csudo} systemctl start nginxd
fi
}
function install_service() {
@ -357,13 +480,30 @@ function update_TDengine() {
install_log
install_header
install_lib
if [ "$pagMode" != "lite" ]; then
install_connector
fi
install_examples
if [ -z $1 ]; then
install_bin
install_service
install_config
if [ "$verMode" == "cluster" ]; then
# Check if openresty is installed
openresty_work=false
# Check if nginx is installed successfully
if type curl &> /dev/null; then
if curl -sSf http://127.0.0.1:${nginx_port} &> /dev/null; then
echo -e "\033[44;32;1mNginx for TDengine is updated successfully!${NC}"
openresty_work=true
else
echo -e "\033[44;31;5mNginx for TDengine does not work! Please try again!\033[0m"
fi
fi
fi
echo
echo -e "\033[44;32;1mTDengine is updated successfully!${NC}"
echo
@ -376,7 +516,15 @@ function update_TDengine() {
echo -e "${GREEN_DARK}To start TDengine ${NC}: ./taosd${NC}"
fi
if [ "$verMode" == "cluster" ]; then
if [ ${openresty_work} = 'true' ]; then
echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell OR from ${GREEN_UNDERLINE}http://127.0.0.1:${nginx_port}${NC}"
else
echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell${NC}"
fi
else
echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell${NC}"
fi
echo
echo -e "\033[44;32;1mTDengine is updated successfully!${NC}"
else
@ -409,13 +557,29 @@ function install_TDengine() {
install_log
install_header
install_lib
if [ "$pagMode" != "lite" ]; then
install_connector
fi
install_examples
if [ -z $1 ]; then # install service and client
# For installing new
install_bin
install_service
if [ "$verMode" == "cluster" ]; then
openresty_work=false
# Check if nginx is installed successfully
if type curl &> /dev/null; then
if curl -sSf http://127.0.0.1:${nginx_port} &> /dev/null; then
echo -e "\033[44;32;1mNginx for TDengine is installed successfully!${NC}"
openresty_work=true
else
echo -e "\033[44;31;5mNginx for TDengine does not work! Please try again!\033[0m"
fi
fi
fi
install_config
# Ask if to start the service
@ -431,7 +595,16 @@ function install_TDengine() {
echo -e "${GREEN_DARK}To start TDengine ${NC}: taosd${NC}"
fi
if [ "$verMode" == "cluster" ]; then
if [ ${openresty_work} = 'true' ]; then
echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell OR from ${GREEN_UNDERLINE}http://127.0.0.1:${nginx_port}${NC}"
else
echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell${NC}"
fi
else
echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell${NC}"
fi
echo
echo -e "\033[44;32;1mTDengine is installed successfully!${NC}"
else # Only install client
@ -450,6 +623,7 @@ function install_TDengine() {
if [ -z $1 ]; then
# Install server and client
if [ -x ${bin_dir}/taosd ]; then
update_flag=1
update_TDengine
else
install_TDengine
@ -457,6 +631,7 @@ if [ -z $1 ]; then
else
# Only install client
if [ -x ${bin_dir}/taos ]; then
update_flag=1
update_TDengine client
else
install_TDengine client

View File

@ -7,18 +7,36 @@ set -e
#set -x
# -----------------------Variables definition---------------------
script_dir=$(dirname $(readlink -m "$0"))
osType=Linux
pagMode=full
if [ "$osType" != "Darwin" ]; then
script_dir=$(dirname $(readlink -f "$0"))
# Dynamic directory
data_dir="/var/lib/taos"
log_dir="/var/log/taos"
else
script_dir=`dirname $0`
cd ${script_dir}
script_dir="$(pwd)"
data_dir="/var/lib/taos"
log_dir="~/TDengineLog"
fi
log_link_dir="/usr/local/taos/log"
cfg_install_dir="/etc/taos"
if [ "$osType" != "Darwin" ]; then
bin_link_dir="/usr/bin"
lib_link_dir="/usr/lib"
inc_link_dir="/usr/include"
else
bin_link_dir="/usr/local/bin"
lib_link_dir="/usr/local/lib"
inc_link_dir="/usr/local/include"
fi
#install main path
install_main_dir="/usr/local/taos"
@ -26,6 +44,8 @@ install_main_dir="/usr/local/taos"
# old bin dir
bin_dir="/usr/local/taos/bin"
# v1.5 jar dir
v15_java_app_dir="/usr/local/lib/taos"
# Color setting
RED='\033[0;31m'
@ -63,31 +83,40 @@ function install_main_path() {
function install_bin() {
# Remove links
${csudo} rm -f ${bin_link_dir}/taos || :
if [ "$osType" == "Darwin" ]; then
${csudo} rm -f ${bin_link_dir}/taosdump || :
fi
${csudo} rm -f ${bin_link_dir}/rmtaos || :
${csudo} cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo} chmod 0555 ${install_main_dir}/bin/*
#Make link
[ -x ${install_main_dir}/bin/taos ] && ${csudo} ln -s ${install_main_dir}/bin/taos ${bin_link_dir}/taos || :
if [ "$osType" == "Darwin" ]; then
[ -x ${install_main_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || :
fi
[ -x ${install_main_dir}/bin/remove_client.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_client.sh ${bin_link_dir}/rmtaos || :
}
function clean_lib() {
sudo rm -f /usr/lib/libtaos.so || :
sudo rm -f /usr/lib/libtaos.* || :
sudo rm -rf ${lib_dir} || :
}
function install_lib() {
# Remove links
${csudo} rm -f ${lib_link_dir}/libtaos.* || :
${csudo} rm -rf ${v15_java_app_dir} || :
${csudo} cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/*
if [ "$osType" != "Darwin" ]; then
${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1
${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so
else
${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.1.dylib
${csudo} ln -s ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib
fi
}
function install_header() {
@ -113,8 +142,12 @@ function install_config() {
function install_log() {
${csudo} rm -rf ${log_dir} || :
${csudo} mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir}
if [ "$osType" != "Darwin" ]; then
${csudo} mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir}
else
mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir}
fi
${csudo} ln -s ${log_dir} ${install_main_dir}/log
}
@ -148,7 +181,9 @@ function update_TDengine() {
install_log
install_header
install_lib
if [ "$pagMode" != "lite" ]; then
install_connector
fi
install_examples
install_bin
install_config
@ -173,7 +208,9 @@ function install_TDengine() {
install_log
install_header
install_lib
if [ "$pagMode" != "lite" ]; then
install_connector
fi
install_examples
install_bin
install_config

View File

@ -9,19 +9,37 @@ set -e
# -----------------------Variables definition---------------------
source_dir=$1
binary_dir=$2
script_dir=$(dirname $(readlink -m "$0"))
osType=$3
if [ "$osType" != "Darwin" ]; then
script_dir=$(dirname $(readlink -f "$0"))
else
script_dir=${source_dir}/packaging/tools
fi
# Dynamic directory
data_dir="/var/lib/taos"
if [ "$osType" != "Darwin" ]; then
log_dir="/var/log/taos"
else
log_dir="~/TDengineLog"
fi
data_link_dir="/usr/local/taos/data"
log_link_dir="/usr/local/taos/log"
cfg_install_dir="/etc/taos"
if [ "$osType" != "Darwin" ]; then
bin_link_dir="/usr/bin"
lib_link_dir="/usr/lib"
inc_link_dir="/usr/include"
else
bin_link_dir="/usr/local/bin"
lib_link_dir="/usr/local/lib"
inc_link_dir="/usr/local/include"
fi
#install main path
install_main_dir="/usr/local/taos"
@ -43,25 +61,61 @@ if command -v sudo > /dev/null; then
csudo="sudo"
fi
if [ "$osType" != "Darwin" ]; then
initd_mod=0
service_mod=2
if pidof systemd &> /dev/null; then
service_mod=0
elif $(which insserv &> /dev/null); then
elif $(which service &> /dev/null); then
service_mod=1
service_config_dir="/etc/init.d"
if $(which chkconfig &> /dev/null); then
initd_mod=1
service_config_dir="/etc/init.d"
elif $(which update-rc.d &> /dev/null); then
service_mod=1
elif $(which insserv &> /dev/null); then
initd_mod=2
service_config_dir="/etc/init.d"
elif $(which update-rc.d &> /dev/null); then
initd_mod=3
else
service_mod=2
fi
else
service_mod=2
fi
# get the operating system type for using the corresponding init file
# ubuntu/debian(deb), centos/fedora(rpm), others: opensuse, redhat, ..., no verification
#osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release)
osinfo=$(cat /etc/os-release | grep "NAME" | cut -d '"' -f2)
#echo "osinfo: ${osinfo}"
os_type=0
if echo $osinfo | grep -qwi "ubuntu" ; then
echo "this is ubuntu system"
os_type=1
elif echo $osinfo | grep -qwi "debian" ; then
echo "this is debian system"
os_type=1
elif echo $osinfo | grep -qwi "Kylin" ; then
echo "this is Kylin system"
os_type=1
elif echo $osinfo | grep -qwi "centos" ; then
echo "this is centos system"
os_type=2
elif echo $osinfo | grep -qwi "fedora" ; then
echo "this is fedora system"
os_type=2
else
echo "${osinfo}: This is an officially unverified linux system, If there are any problems with the installation and operation, "
echo "please feel free to contact taosdata.com for support."
os_type=1
fi
fi
function kill_taosd() {
pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}')
${csudo} kill -9 ${pid} || :
if [ -n "$pid" ]; then
${csudo} kill -9 $pid || :
fi
}
function install_main_path() {
@ -74,27 +128,46 @@ function install_main_path() {
${csudo} mkdir -p ${install_main_dir}/driver
${csudo} mkdir -p ${install_main_dir}/examples
${csudo} mkdir -p ${install_main_dir}/include
if [ "$osType" != "Darwin" ]; then
${csudo} mkdir -p ${install_main_dir}/init.d
fi
}
function install_bin() {
# Remove links
${csudo} rm -f ${bin_link_dir}/taos || :
if [ "$osType" != "Darwin" ]; then
${csudo} rm -f ${bin_link_dir}/taosd || :
${csudo} rm -f ${bin_link_dir}/taosdemo || :
${csudo} rm -f ${bin_link_dir}/taosdump || :
fi
${csudo} rm -f ${bin_link_dir}/rmtaos || :
${csudo} cp -r ${binary_dir}/build/bin/* ${install_main_dir}/bin
if [ "$osType" != "Darwin" ]; then
${csudo} cp -r ${script_dir}/remove.sh ${install_main_dir}/bin
else
${csudo} cp -r ${script_dir}/remove_client.sh ${install_main_dir}/bin
fi
${csudo} chmod 0555 ${install_main_dir}/bin/*
#Make link
[ -x ${install_main_dir}/bin/taos ] && ${csudo} ln -s ${install_main_dir}/bin/taos ${bin_link_dir}/taos || :
if [ "$osType" != "Darwin" ]; then
[ -x ${install_main_dir}/bin/taosd ] && ${csudo} ln -s ${install_main_dir}/bin/taosd ${bin_link_dir}/taosd || :
[ -x ${install_main_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || :
[ -x ${install_main_dir}/bin/taosdemo ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || :
fi
if [ "$osType" != "Darwin" ]; then
[ -x ${install_main_dir}/bin/remove.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/rmtaos || :
else
[ -x ${install_main_dir}/bin/remove_client.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_client.sh ${bin_link_dir}/rmtaos || :
fi
}
function install_lib() {
@ -102,9 +175,15 @@ function install_lib() {
${csudo} rm -f ${lib_link_dir}/libtaos.* || :
versioninfo=$(${script_dir}/get_version.sh ${source_dir}/src/util/src/version.c)
if [ "$osType" != "Darwin" ]; then
${csudo} cp ${binary_dir}/build/lib/libtaos.so.${versioninfo} ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/*
${csudo} ln -sf ${install_main_dir}/driver/libtaos.so.${versioninfo} ${lib_link_dir}/libtaos.so.1
${csudo} ln -sf ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so
else
${csudo} cp ${binary_dir}/build/lib/libtaos.${versioninfo}.dylib ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/*
${csudo} ln -sf ${install_main_dir}/driver/libtaos.${versioninfo}.dylib ${lib_link_dir}/libtaos.1.dylib
${csudo} ln -sf ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib
fi
}
function install_header() {
@ -130,7 +209,12 @@ function install_config() {
function install_log() {
${csudo} rm -rf ${log_dir} || :
if [ "$osType" != "Darwin" ]; then
${csudo} mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir}
else
mkdir -p ${log_dir} && chmod 777 ${log_dir}
fi
${csudo} ln -s ${log_dir} ${install_main_dir}/log
}
@ -153,20 +237,26 @@ function install_examples() {
}
function clean_service_on_sysvinit() {
restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start"
#restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start"
#${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || :
if pidof taosd &> /dev/null; then
${csudo} service taosd stop || :
fi
${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || :
${csudo} rm -f ${service_config_dir}/taosd || :
if ((${initd_mod}==1)); then
${csudo} grep -q -F "taos" /etc/inittab && ${csudo} insserv -r taosd || :
${csudo} chkconfig --del taosd || :
elif ((${initd_mod}==2)); then
${csudo} grep -q -F "taos" /etc/inittab && ${csudo} update-rc.d -f taosd remove || :
${csudo} insserv -r taosd || :
elif ((${initd_mod}==3)); then
${csudo} update-rc.d -f taosd remove || :
fi
# ${csudo} update-rc.d -f taosd remove || :
${csudo} rm -f ${service_config_dir}/taosd || :
if $(which init &> /dev/null); then
${csudo} init q || :
fi
}
function install_service_on_sysvinit() {
@ -175,19 +265,26 @@ function install_service_on_sysvinit() {
sleep 1
# Install taosd service
if ((${os_type}==1)); then
${csudo} cp -f ${script_dir}/../deb/init.d/taosd ${install_main_dir}/init.d
${csudo} cp ${script_dir}/../deb/init.d/taosd ${service_config_dir} && ${csudo} chmod a+x ${service_config_dir}/taosd
restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start"
elif ((${os_type}==2)); then
${csudo} cp -f ${script_dir}/../rpm/init.d/taosd ${install_main_dir}/init.d
${csudo} cp ${script_dir}/../rpm/init.d/taosd ${service_config_dir} && ${csudo} chmod a+x ${service_config_dir}/taosd
fi
#restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start"
#${csudo} grep -q -F "$restart_config_str" /etc/inittab || ${csudo} bash -c "echo '${restart_config_str}' >> /etc/inittab"
${csudo} grep -q -F "$restart_config_str" /etc/inittab || ${csudo} bash -c "echo '${restart_config_str}' >> /etc/inittab"
# TODO: for centos, change here
if ((${initd_mod}==1)); then
${csudo} insserv taosd || :
${csudo} chkconfig --add taosd || :
${csudo} chkconfig --level 2345 taosd on || :
elif ((${initd_mod}==2)); then
${csudo} insserv taosd || :
${csudo} insserv -d taosd || :
elif ((${initd_mod}==3)); then
${csudo} update-rc.d taosd defaults || :
fi
# ${csudo} update-rc.d taosd defaults
# chkconfig mysqld on
}
function clean_service_on_systemd() {
@ -237,7 +334,7 @@ function install_service() {
elif ((${service_mod}==1)); then
install_service_on_sysvinit
else
# must manual start taosd
# must manual stop taosd
kill_taosd
fi
}
@ -245,6 +342,8 @@ function install_service() {
function update_TDengine() {
echo -e "${GREEN}Start to update TDEngine...${NC}"
# Stop the service if running
if [ "$osType" != "Darwin" ]; then
if pidof taosd &> /dev/null; then
if ((${service_mod}==0)); then
${csudo} systemctl stop taosd || :
@ -255,6 +354,7 @@ function update_TDengine() {
fi
sleep 1
fi
fi
install_main_path
@ -264,12 +364,18 @@ function update_TDengine() {
install_connector
install_examples
install_bin
if [ "$osType" != "Darwin" ]; then
install_service
fi
install_config
if [ "$osType" != "Darwin" ]; then
echo
echo -e "\033[44;32;1mTDengine is updated successfully!${NC}"
echo
echo -e "${GREEN_DARK}To configure TDengine ${NC}: edit /etc/taos/taos.cfg"
if ((${service_mod}==0)); then
echo -e "${GREEN_DARK}To start TDengine ${NC}: ${csudo} systemctl start taosd${NC}"
@ -282,14 +388,30 @@ function update_TDengine() {
echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell${NC}"
echo
echo -e "\033[44;32;1mTDengine is updated successfully!${NC}"
else
echo
echo -e "\033[44;32;1mTDengine Client is updated successfully!${NC}"
echo
echo -e "${GREEN_DARK}To access TDengine Client ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell${NC}"
echo
echo -e "\033[44;32;1mTDengine Client is updated successfully!${NC}"
fi
}
function install_TDengine() {
# Start to install
if [ "$osType" != "Darwin" ]; then
echo -e "${GREEN}Start to install TDEngine...${NC}"
else
echo -e "${GREEN}Start to install TDEngine Client ...${NC}"
fi
install_main_path
if [ "$osType" != "Darwin" ]; then
install_data
fi
install_log
install_header
install_lib
@ -297,8 +419,14 @@ function install_TDengine() {
install_examples
install_bin
if [ "$osType" != "Darwin" ]; then
install_service
fi
install_config
if [ "$osType" != "Darwin" ]; then
# Ask if to start the service
echo
echo -e "\033[44;32;1mTDengine is installed successfully!${NC}"
@ -315,12 +443,17 @@ function install_TDengine() {
echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell${NC}"
echo
echo -e "\033[44;32;1mTDengine is installed successfully!${NC}"
else
echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell${NC}"
echo
echo -e "\033[44;32;1mTDengine Client is installed successfully!${NC}"
fi
}
## ==============================Main program starts from here============================
echo source directory: $1
echo binary directory: $2
if [ -x ${bin_dir}/taosd ]; then
if [ -x ${bin_dir}/taos ]; then
update_TDengine
else
install_TDengine

View File

@ -1,17 +1,28 @@
#!/bin/bash
#
# Generate tar.gz package for linux client
# Generate tar.gz package for linux client in all os system
set -e
set -x
#set -x
curr_dir=$(pwd)
compile_dir=$1
version=$2
build_time=$3
armver=$4
cpuType=$4
osType=$5
verMode=$6
verType=$7
pagMode=$8
if [ "$osType" != "Darwin" ]; then
script_dir="$(dirname $(readlink -f $0))"
top_dir="$(readlink -m ${script_dir}/../..)"
top_dir="$(readlink -f ${script_dir}/../..)"
else
script_dir=`dirname $0`
cd ${script_dir}
script_dir="$(pwd)"
top_dir=${script_dir}/../..
fi
# create compressed install file.
build_dir="${compile_dir}/build"
@ -19,13 +30,32 @@ code_dir="${top_dir}/src"
release_dir="${top_dir}/release"
#package_name='linux'
install_dir="${release_dir}/TDengine-client-${version}"
if [ "$verMode" == "cluster" ]; then
install_dir="${release_dir}/TDengine-enterprise-client"
else
install_dir="${release_dir}/TDengine-client"
fi
# Directories and files.
if [ "$osType" != "Darwin" ]; then
if [ "$pagMode" == "lite" ]; then
strip ${build_dir}/bin/taosd
strip ${build_dir}/bin/taos
bin_files="${build_dir}/bin/taos ${script_dir}/remove_client.sh"
else
bin_files="${build_dir}/bin/taos ${build_dir}/bin/taosdump ${script_dir}/remove_client.sh"
fi
lib_files="${build_dir}/lib/libtaos.so.${version}"
else
bin_files="${build_dir}/bin/taos ${script_dir}/remove_client.sh"
lib_files="${build_dir}/lib/libtaos.${version}.dylib"
fi
header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h"
cfg_dir="${top_dir}/packaging/cfg"
install_files="${script_dir}/install_client.sh"
# make directories.
@ -35,20 +65,39 @@ mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/taos.cf
mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/*
cd ${install_dir}
if [ "$osType" != "Darwin" ]; then
tar -zcv -f taos.tar.gz * --remove-files || :
else
tar -zcv -f taos.tar.gz * || :
mv taos.tar.gz ..
rm -rf ./*
mv ../taos.tar.gz .
fi
cd ${curr_dir}
cp ${install_files} ${install_dir} && chmod a+x ${install_dir}/install*
cp ${install_files} ${install_dir}
if [ "$osType" == "Darwin" ]; then
sed 's/osType=Linux/osType=Darwin/g' ${install_dir}/install_client.sh >> install_client_temp.sh
mv install_client_temp.sh ${install_dir}/install_client.sh
fi
if [ "$pagMode" == "lite" ]; then
sed 's/pagMode=full/pagMode=lite/g' ${install_dir}/install_client.sh >> install_client_temp.sh
mv install_client_temp.sh ${install_dir}/install_client.sh
fi
chmod a+x ${install_dir}/install_client.sh
# Copy example code
mkdir -p ${install_dir}/examples
cp -r ${top_dir}/tests/examples/c ${install_dir}/examples
cp -r ${top_dir}/tests/examples/JDBC ${install_dir}/examples
cp -r ${top_dir}/tests/examples/matlab ${install_dir}/examples
cp -r ${top_dir}/tests/examples/python ${install_dir}/examples
cp -r ${top_dir}/tests/examples/R ${install_dir}/examples
cp -r ${top_dir}/tests/examples/go ${install_dir}/examples
examples_dir="${top_dir}/tests/examples"
cp -r ${examples_dir}/c ${install_dir}/examples
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
cp -r ${examples_dir}/JDBC ${install_dir}/examples
cp -r ${examples_dir}/matlab ${install_dir}/examples
cp -r ${examples_dir}/python ${install_dir}/examples
cp -r ${examples_dir}/R ${install_dir}/examples
cp -r ${examples_dir}/go ${install_dir}/examples
fi
# Copy driver
mkdir -p ${install_dir}/driver
cp ${lib_files} ${install_dir}/driver
@ -56,23 +105,51 @@ cp ${lib_files} ${install_dir}/driver
# Copy connector
connector_dir="${code_dir}/connector"
mkdir -p ${install_dir}/connector
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
if [ "$osType" != "Darwin" ]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector
fi
cp -r ${connector_dir}/grafana ${install_dir}/connector/
cp -r ${connector_dir}/python ${install_dir}/connector/
cp -r ${connector_dir}/go ${install_dir}/connector
fi
# Copy release note
# cp ${script_dir}/release_note ${install_dir}
# exit 1
cd ${release_dir}
if [ -z "$armver" ]; then
tar -zcv -f "$(basename ${install_dir}).tar.gz" $(basename ${install_dir}) --remove-files
elif [ "$armver" == "arm64" ]; then
tar -zcv -f "$(basename ${install_dir})-arm64.tar.gz" $(basename ${install_dir}) --remove-files
elif [ "$armver" == "arm32" ]; then
tar -zcv -f "$(basename ${install_dir})-arm32.tar.gz" $(basename ${install_dir}) --remove-files
if [ "$verMode" == "cluster" ]; then
pkg_name=${install_dir}-${version}-${osType}-${cpuType}
elif [ "$verMode" == "edge" ]; then
pkg_name=${install_dir}-${version}-${osType}-${cpuType}
else
echo "unknow verMode, nor cluster or edge"
exit 1
fi
if [ "$pagMode" == "lite" ]; then
pkg_name=${pkg_name}-Lite
fi
if [ "$verType" == "beta" ]; then
pkg_name=${pkg_name}-${verType}
elif [ "$verType" == "stable" ]; then
pkg_name=${pkg_name}
else
echo "unknow verType, nor stable or beta"
exit 1
fi
if [ "$osType" != "Darwin" ]; then
tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || :
else
tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) || :
mv "$(basename ${pkg_name}).tar.gz" ..
rm -rf ./*
mv ../"$(basename ${pkg_name}).tar.gz" .
fi
cd ${curr_dir}

View File

@ -1,15 +1,22 @@
#!/bin/bash
#
# Generate deb package for other os system (no unbutu or centos)
# Generate tar.gz package for all os system
set -e
#set -x
curr_dir=$(pwd)
compile_dir=$1
version=$2
build_time=$3
armver=$4
cpuType=$4
osType=$5
verMode=$6
verType=$7
pagMode=$8
script_dir="$(dirname $(readlink -f $0))"
top_dir="$(readlink -m ${script_dir}/../..)"
top_dir="$(readlink -f ${script_dir}/../..)"
# create compressed install file.
build_dir="${compile_dir}/build"
@ -17,14 +24,26 @@ code_dir="${top_dir}/src"
release_dir="${top_dir}/release"
#package_name='linux'
install_dir="${release_dir}/TDengine-${version}"
if [ "$verMode" == "cluster" ]; then
install_dir="${release_dir}/TDengine-enterprise-server"
else
install_dir="${release_dir}/TDengine-server"
fi
# Directories and files.
if [ "$pagMode" == "lite" ]; then
strip ${build_dir}/bin/taosd
strip ${build_dir}/bin/taos
bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${script_dir}/remove.sh"
else
bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${build_dir}/bin/taosdemo ${build_dir}/bin/taosdump ${script_dir}/remove.sh"
fi
lib_files="${build_dir}/lib/libtaos.so.${version}"
header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h"
cfg_dir="${top_dir}/packaging/cfg"
install_files="${script_dir}/install.sh"
nginx_dir="${code_dir}/../../enterprise/src/modules/web"
# Init file
#init_dir=${script_dir}/deb
@ -44,22 +63,53 @@ mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x $
mkdir -p ${install_dir}/init.d && cp ${init_file_deb} ${install_dir}/init.d/taosd.deb
mkdir -p ${install_dir}/init.d && cp ${init_file_rpm} ${install_dir}/init.d/taosd.rpm
if [ "$verMode" == "cluster" ]; then
sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/bin/remove.sh >> remove_temp.sh
mv remove_temp.sh ${install_dir}/bin/remove.sh
mkdir -p ${install_dir}/nginxd && cp -r ${nginx_dir}/* ${install_dir}/nginxd
cp ${nginx_dir}/png/taos.png ${install_dir}/nginxd/admin/images/taos.png
rm -rf ${install_dir}/nginxd/png
if [ "$cpuType" == "aarch64" ]; then
cp -f ${install_dir}/nginxd/sbin/arm/64bit/nginx ${install_dir}/nginxd/sbin/
elif [ "$cpuType" == "aarch32" ]; then
cp -f ${install_dir}/nginxd/sbin/arm/32bit/nginx ${install_dir}/nginxd/sbin/
fi
rm -rf ${install_dir}/nginxd/sbin/arm
fi
cd ${install_dir}
tar -zcv -f taos.tar.gz * --remove-files || :
exitcode=$?
if [ "$exitcode" != "0" ]; then
echo "tar taos.tar.gz error !!!"
exit $exitcode
fi
cd ${curr_dir}
cp ${install_files} ${install_dir} && chmod a+x ${install_dir}/install*
cp ${install_files} ${install_dir}
if [ "$verMode" == "cluster" ]; then
sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/install.sh >> install_temp.sh
mv install_temp.sh ${install_dir}/install.sh
fi
if [ "$pagMode" == "lite" ]; then
sed 's/pagMode=full/pagMode=lite/g' ${install_dir}/install.sh >> install_temp.sh
mv install_temp.sh ${install_dir}/install.sh
fi
chmod a+x ${install_dir}/install.sh
# Copy example code
mkdir -p ${install_dir}/examples
examples_dir="${top_dir}/tests/examples"
cp -r ${examples_dir}/c ${install_dir}/examples
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
cp -r ${examples_dir}/JDBC ${install_dir}/examples
cp -r ${examples_dir}/matlab ${install_dir}/examples
cp -r ${examples_dir}/python ${install_dir}/examples
cp -r ${examples_dir}/R ${install_dir}/examples
cp -r ${examples_dir}/go ${install_dir}/examples
fi
# Copy driver
mkdir -p ${install_dir}/driver
cp ${lib_files} ${install_dir}/driver
@ -67,23 +117,46 @@ cp ${lib_files} ${install_dir}/driver
# Copy connector
connector_dir="${code_dir}/connector"
mkdir -p ${install_dir}/connector
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector
cp -r ${connector_dir}/grafana ${install_dir}/connector/
cp -r ${connector_dir}/python ${install_dir}/connector/
cp -r ${connector_dir}/go ${install_dir}/connector
fi
# Copy release note
# cp ${script_dir}/release_note ${install_dir}
# exit 1
cd ${release_dir}
if [ -z "$armver" ]; then
tar -zcv -f "$(basename ${install_dir}).tar.gz" $(basename ${install_dir}) --remove-files
elif [ "$armver" == "arm64" ]; then
tar -zcv -f "$(basename ${install_dir})-arm64.tar.gz" $(basename ${install_dir}) --remove-files
elif [ "$armver" == "arm32" ]; then
tar -zcv -f "$(basename ${install_dir})-arm32.tar.gz" $(basename ${install_dir}) --remove-files
if [ "$verMode" == "cluster" ]; then
pkg_name=${install_dir}-${version}-${osType}-${cpuType}
elif [ "$verMode" == "edge" ]; then
pkg_name=${install_dir}-${version}-${osType}-${cpuType}
else
echo "unknow verMode, nor cluster or edge"
exit 1
fi
if [ "$pagMode" == "lite" ]; then
pkg_name=${pkg_name}-Lite
fi
if [ "$verType" == "beta" ]; then
pkg_name=${pkg_name}-${verType}
elif [ "$verType" == "stable" ]; then
pkg_name=${pkg_name}
else
echo "unknow verType, nor stabel or beta"
exit 1
fi
tar -zcv -f "$(basename ${pkg_name}).tar.gz" $(basename ${install_dir}) --remove-files || :
exitcode=$?
if [ "$exitcode" != "0" ]; then
echo "tar ${pkg_name}.tar.gz error !!!"
exit $exitcode
fi
cd ${curr_dir}

View File

@ -4,7 +4,7 @@
# is required to use systemd to manage services at boot
#set -x
# -----------------------Variables definition---------------------
script_dir=$(dirname $(readlink -m "$0"))
script_dir=$(dirname $(readlink -f "$0"))
# Dynamic directory
data_dir="/var/lib/taos"
log_dir="/var/log/taos"

View File

@ -2,6 +2,11 @@
#
# Script to stop the service and uninstall TDengine, but retain the config, data and log files.
set -e
#set -x
verMode=edge
RED='\033[0;31m'
GREEN='\033[1;32m'
NC='\033[0m'
@ -14,10 +19,14 @@ cfg_link_dir="/usr/local/taos/cfg"
bin_link_dir="/usr/bin"
lib_link_dir="/usr/lib"
inc_link_dir="/usr/include"
install_nginxd_dir="/usr/local/nginxd"
# v1.5 jar dir
v15_java_app_dir="/usr/local/lib/taos"
service_config_dir="/etc/systemd/system"
taos_service_name="taosd"
nginx_service_name="nginxd"
csudo=""
if command -v sudo > /dev/null; then
csudo="sudo"
@ -62,6 +71,7 @@ function clean_bin() {
function clean_lib() {
# Remove link
${csudo} rm -f ${lib_link_dir}/libtaos.* || :
${csudo} rm -rf ${v15_java_app_dir} || :
}
function clean_header() {
@ -90,6 +100,20 @@ function clean_service_on_systemd() {
${csudo} systemctl disable ${taos_service_name} &> /dev/null || echo &> /dev/null
${csudo} rm -f ${taosd_service_config}
if [ "$verMode" == "cluster" ]; then
nginx_service_config="${service_config_dir}/${nginx_service_name}.service"
if [ -d ${bin_dir}/web ]; then
if systemctl is-active --quiet ${nginx_service_name}; then
echo "Nginx for TDengine is running, stopping it..."
${csudo} systemctl stop ${nginx_service_name} &> /dev/null || echo &> /dev/null
fi
${csudo} systemctl disable ${nginx_service_name} &> /dev/null || echo &> /dev/null
${csudo} rm -f ${nginx_service_config}
fi
fi
}
function clean_service_on_sysvinit() {
@ -143,6 +167,7 @@ clean_config
${csudo} rm -rf ${data_link_dir} || :
${csudo} rm -rf ${install_main_dir}
${csudo} rm -rf ${install_nginxd_dir}
osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release)
if echo $osinfo | grep -qwi "ubuntu" ; then

View File

@ -17,6 +17,10 @@ bin_link_dir="/usr/bin"
lib_link_dir="/usr/lib"
inc_link_dir="/usr/include"
# v1.5 jar dir
v15_java_app_dir="/usr/local/lib/taos"
csudo=""
if command -v sudo > /dev/null; then
csudo="sudo"
@ -39,6 +43,7 @@ function clean_bin() {
function clean_lib() {
# Remove link
${csudo} rm -f ${lib_link_dir}/libtaos.* || :
${csudo} rm -rf ${v15_java_app_dir} || :
}
function clean_header() {

View File

@ -17,7 +17,7 @@ done
declare -A dirHash
for linkFile in $(find -L $linkDir -xtype l); do
targetFile=$(readlink -m $linkFile)
targetFile=$(readlink -f $linkFile)
echo "targetFile: ${targetFile}"
# TODO : Extract directory part and basename part
dirName=$(dirname $(dirname ${targetFile}))

View File

@ -24,20 +24,10 @@ IF ((TD_LINUX_64) OR (TD_LINUX_32 AND TD_ARM))
#set version of .so
#VERSION so version
#SOVERSION api version
IF (TD_LITE)
execute_process(COMMAND chmod 777 ${TD_COMMUNITY_DIR}/packaging/tools/get_version.sh)
execute_process(COMMAND ${TD_COMMUNITY_DIR}/packaging/tools/get_version.sh ${TD_COMMUNITY_DIR}/src/util/src/version.c
OUTPUT_VARIABLE
VERSION_INFO)
MESSAGE(STATUS "build lite version ${VERSION_INFO}")
ELSE ()
execute_process(COMMAND chmod 777 ${TD_COMMUNITY_DIR}/packaging/tools/get_version.sh)
execute_process(COMMAND ${TD_COMMUNITY_DIR}/packaging/tools/get_version.sh ${TD_COMMUNITY_DIR}/src/util/src/version.c
OUTPUT_VARIABLE
VERSION_INFO)
MESSAGE(STATUS "build cluster version ${VERSION_INFO}")
ENDIF ()
MESSAGE(STATUS "build version ${VERSION_INFO}")
SET_TARGET_PROPERTIES(taos PROPERTIES VERSION ${VERSION_INFO} SOVERSION 1)
@ -51,10 +41,13 @@ ELSEIF (TD_WINDOWS_64)
# generate dynamic library (*.dll)
ADD_LIBRARY(taos SHARED ${SRC})
IF (NOT TD_GODLL)
SET_TARGET_PROPERTIES(taos PROPERTIES LINK_FLAGS /DEF:${TD_COMMUNITY_DIR}/src/client/src/taos.def)
ENDIF ()
TARGET_LINK_LIBRARIES(taos trpc)
ELSEIF (TD_DARWIN_64)
SET(CMAKE_MACOSX_RPATH 1)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/jni/linux)
ADD_LIBRARY(taos_static STATIC ${SRC})
@ -65,5 +58,16 @@ ELSEIF (TD_DARWIN_64)
ADD_LIBRARY(taos SHARED ${SRC})
TARGET_LINK_LIBRARIES(taos trpc tutil pthread m)
SET_TARGET_PROPERTIES(taos PROPERTIES CLEAN_DIRECT_OUTPUT 1)
#set version of .so
#VERSION so version
#SOVERSION api version
execute_process(COMMAND chmod 777 ${TD_COMMUNITY_DIR}/packaging/tools/get_version.sh)
execute_process(COMMAND ${TD_COMMUNITY_DIR}/packaging/tools/get_version.sh ${TD_COMMUNITY_DIR}/src/util/src/version.c
OUTPUT_VARIABLE
VERSION_INFO)
MESSAGE(STATUS "build version ${VERSION_INFO}")
SET_TARGET_PROPERTIES(taos PROPERTIES VERSION ${VERSION_INFO} SOVERSION 1)
ENDIF ()

View File

@ -27,7 +27,7 @@ void tscFetchDatablockFromSubquery(SSqlObj* pSql);
void tscGetQualifiedTSList(SSqlObj* pSql, SJoinSubquerySupporter* p1, SJoinSubquerySupporter* p2, int32_t* num);
void tscSetupOutputColumnIndex(SSqlObj* pSql);
int32_t tscLaunchSecondSubquery(SSqlObj* pSql);
int32_t tscLaunchSecondPhaseSubqueries(SSqlObj* pSql);
void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code);
SJoinSubquerySupporter* tscCreateJoinSupporter(SSqlObj* pSql, SSubqueryState* pState, int32_t index);
@ -121,7 +121,7 @@ STSBuf* tsBufCreate(bool autoDelete);
STSBuf* tsBufCreateFromFile(const char* path, bool autoDelete);
STSBuf* tsBufCreateFromCompBlocks(const char* pData, int32_t numOfBlocks, int32_t len, int32_t tsOrder);
void tsBufDestory(STSBuf* pTSBuf);
void* tsBufDestory(STSBuf* pTSBuf);
void tsBufAppend(STSBuf* pTSBuf, int32_t vnodeId, int64_t tag, const char* pData, int32_t len);
int32_t tsBufMerge(STSBuf* pDestBuf, const STSBuf* pSrcBuf, int32_t vnodeIdx);

View File

@ -21,9 +21,70 @@ extern "C" {
#endif
#include "taos.h"
#include "taosmsg.h"
#include "tsqldef.h"
#include "ttypes.h"
#include "taosmsg.h"
enum _sql_cmd {
TSDB_SQL_SELECT = 1,
TSDB_SQL_FETCH,
TSDB_SQL_INSERT,
TSDB_SQL_MGMT, // the SQL below is for mgmt node
TSDB_SQL_CREATE_DB,
TSDB_SQL_CREATE_TABLE,
TSDB_SQL_DROP_DB,
TSDB_SQL_DROP_TABLE,
TSDB_SQL_CREATE_ACCT,
TSDB_SQL_CREATE_USER, //10
TSDB_SQL_DROP_ACCT,
TSDB_SQL_DROP_USER,
TSDB_SQL_ALTER_USER,
TSDB_SQL_ALTER_ACCT,
TSDB_SQL_ALTER_TABLE,
TSDB_SQL_ALTER_DB,
TSDB_SQL_CREATE_MNODE,
TSDB_SQL_DROP_MNODE,
TSDB_SQL_CREATE_DNODE,
TSDB_SQL_DROP_DNODE, // 20
TSDB_SQL_CFG_DNODE,
TSDB_SQL_CFG_MNODE,
TSDB_SQL_SHOW,
TSDB_SQL_RETRIEVE,
TSDB_SQL_KILL_QUERY,
TSDB_SQL_KILL_STREAM,
TSDB_SQL_KILL_CONNECTION,
TSDB_SQL_READ, // SQL below is for read operation
TSDB_SQL_CONNECT,
TSDB_SQL_USE_DB, // 30
TSDB_SQL_META,
TSDB_SQL_METRIC,
TSDB_SQL_MULTI_META,
TSDB_SQL_HB,
TSDB_SQL_LOCAL, // SQL below for client local
TSDB_SQL_DESCRIBE_TABLE,
TSDB_SQL_RETRIEVE_METRIC,
TSDB_SQL_METRIC_JOIN_RETRIEVE,
TSDB_SQL_RETRIEVE_TAGS,
/*
* build empty result instead of accessing dnode to fetch result
* reset the client cache
*/
TSDB_SQL_RETRIEVE_EMPTY_RESULT, //40
TSDB_SQL_RESET_CACHE,
TSDB_SQL_SERV_STATUS,
TSDB_SQL_CURRENT_DB,
TSDB_SQL_SERV_VERSION,
TSDB_SQL_CLI_VERSION,
TSDB_SQL_CURRENT_USER,
TSDB_SQL_CFG_LOCAL,
TSDB_SQL_MAX //48
};
#define MAX_TOKEN_LEN 30
@ -72,72 +133,12 @@ typedef struct tFieldList {
TAOS_FIELD *p;
} tFieldList;
// sql operation type
// create table operation type
enum TSQL_TYPE {
TSQL_CREATE_NORMAL_METER = 0x01,
TSQL_CREATE_NORMAL_METRIC = 0x02,
TSQL_CREATE_METER_FROM_METRIC = 0x04,
TSQL_CREATE_STREAM = 0x08,
TSQL_QUERY_METER = 0x10,
TSQL_INSERT = 0x20,
DROP_DNODE = 0x40,
DROP_DATABASE = 0x41,
DROP_TABLE = 0x42,
DROP_USER = 0x43,
DROP_ACCOUNT = 0x44,
USE_DATABASE = 0x50,
// show operation
SHOW_DATABASES = 0x60,
SHOW_TABLES = 0x61,
SHOW_STABLES = 0x62,
SHOW_MNODES = 0x63,
SHOW_DNODES = 0x64,
SHOW_ACCOUNTS = 0x65,
SHOW_USERS = 0x66,
SHOW_VGROUPS = 0x67,
SHOW_QUERIES = 0x68,
SHOW_STREAMS = 0x69,
SHOW_CONFIGS = 0x6a,
SHOW_SCORES = 0x6b,
SHOW_MODULES = 0x6c,
SHOW_CONNECTIONS = 0x6d,
SHOW_GRANTS = 0x6e,
SHOW_VNODES = 0x6f,
// create dnode
CREATE_DNODE = 0x80,
CREATE_DATABASE = 0x81,
CREATE_USER = 0x82,
CREATE_ACCOUNT = 0x83,
DESCRIBE_TABLE = 0x90,
ALTER_USER_PASSWD = 0xA0,
ALTER_USER_PRIVILEGES = 0xA1,
ALTER_DNODE = 0xA2,
ALTER_LOCAL = 0xA3,
ALTER_DATABASE = 0xA4,
ALTER_ACCT = 0xA5,
// reset operation
RESET_QUERY_CACHE = 0xB0,
// alter tags
ALTER_TABLE_TAGS_ADD = 0xC0,
ALTER_TABLE_TAGS_DROP = 0xC1,
ALTER_TABLE_TAGS_CHG = 0xC2,
ALTER_TABLE_TAGS_SET = 0xC4,
// alter table column
ALTER_TABLE_ADD_COLUMN = 0xD0,
ALTER_TABLE_DROP_COLUMN = 0xD1,
KILL_QUERY = 0xD2,
KILL_STREAM = 0xD3,
KILL_CONNECTION = 0xD4,
TSQL_CREATE_TABLE = 0x1,
TSQL_CREATE_STABLE = 0x2,
TSQL_CREATE_TABLE_FROM_STABLE = 0x3,
TSQL_CREATE_STREAM = 0x4,
};
typedef struct SQuerySQL {
@ -158,32 +159,30 @@ typedef struct SCreateTableSQL {
struct SSQLToken name; // meter name, create table [meterName] xxx
bool existCheck;
int8_t type; // create normal table/from super table/ stream
struct {
tFieldList *pTagColumns; // for normal table, pTagColumns = NULL;
tFieldList *pColumns;
} colInfo;
struct {
SSQLToken metricName; // metric name, for using clause
SSQLToken stableName; // super table name, for using clause
tVariantList *pTagVals; // create by using metric, tag value
STagData tagdata;
} usingInfo;
SQuerySQL *pSelect;
} SCreateTableSQL;
typedef struct SAlterTableSQL {
SSQLToken name;
int16_t type;
STagData tagData;
tFieldList * pAddColumns;
SSQLToken dropTagToken;
tVariantList *varList; // set t=val or: change src dst
} SAlterTableSQL;
typedef struct SInsertSQL {
SSQLToken name;
struct tSQLExprListList *pValue;
} SInsertSQL;
typedef struct SCreateDBInfo {
SSQLToken dbname;
int32_t replica;
@ -204,40 +203,67 @@ typedef struct SCreateDBInfo {
} SCreateDBInfo;
typedef struct SCreateAcctSQL {
int32_t users;
int32_t dbs;
int32_t tseries;
int32_t streams;
int32_t pps;
int64_t storage;
int64_t qtime;
int32_t conns;
int32_t maxUsers;
int32_t maxDbs;
int32_t maxTimeSeries;
int32_t maxStreams;
int32_t maxPointsPerSecond;
int64_t maxStorage;
int64_t maxQueryTime;
int32_t maxConnections;
SSQLToken stat;
} SCreateAcctSQL;
typedef struct SShowInfo {
uint8_t showType;
SSQLToken prefix;
SSQLToken pattern;
} SShowInfo;
typedef struct SUserInfo {
SSQLToken user;
SSQLToken passwd;
// bool hasPasswd;
SSQLToken privilege;
// bool hasPrivilege;
int16_t type;
} SUserInfo;
typedef struct tDCLSQL {
int32_t nTokens; /* Number of expressions on the list */
int32_t nAlloc; /* Number of entries allocated below */
SSQLToken *a; /* one entry for element */
bool existsCheck;
union {
SCreateDBInfo dbOpt;
SCreateAcctSQL acctOpt;
SShowInfo showOpt;
SSQLToken ip;
};
SUserInfo user;
} tDCLSQL;
typedef struct SSubclauseInfo { // "UNION" multiple select sub-clause
SQuerySQL **pClause;
int32_t numOfClause;
} SSubclauseInfo;
typedef struct SSqlInfo {
int32_t sqlType;
bool validSql;
int32_t type;
bool valid;
union {
SCreateTableSQL *pCreateTableInfo;
SInsertSQL * pInsertInfo;
SAlterTableSQL * pAlterInfo;
SQuerySQL * pQueryInfo;
tDCLSQL * pDCLInfo;
};
SSubclauseInfo subclauseInfo;
char pzErrMsg[256];
} SSqlInfo;
@ -338,7 +364,7 @@ SQuerySQL *tSetQuerySQLElems(SSQLToken *pSelectToken, tSQLExprList *pSelection,
SCreateTableSQL *tSetCreateSQLElems(tFieldList *pCols, tFieldList *pTags, SSQLToken *pMetricName,
tVariantList *pTagVals, SQuerySQL *pSelect, int32_t type);
void tSQLExprDestroy(tSQLExpr *);
void tSQLExprNodeDestroy(tSQLExpr *pExpr);
tSQLExpr *tSQLExprNodeClone(tSQLExpr *pExpr);
@ -346,23 +372,31 @@ SAlterTableSQL *tAlterTableSQLElems(SSQLToken *pMeterName, tFieldList *pCols, tV
tSQLExprListList *tSQLListListAppend(tSQLExprListList *pList, tSQLExprList *pExprList);
void tSetInsertSQLElems(SSqlInfo *pInfo, SSQLToken *pName, tSQLExprListList *pList);
void destroyAllSelectClause(SSubclauseInfo *pSql);
void doDestroyQuerySql(SQuerySQL *pSql);
void destroyQuerySql(SQuerySQL *pSql);
SSqlInfo * setSQLInfo(SSqlInfo *pInfo, void *pSqlExprInfo, SSQLToken *pMeterName, int32_t type);
SSubclauseInfo *setSubclause(SSubclauseInfo *pClause, void *pSqlExprInfo);
void setSQLInfo(SSqlInfo *pInfo, void *pSqlExprInfo, SSQLToken *pMeterName, int32_t type);
SSubclauseInfo *appendSelectClause(SSubclauseInfo *pInfo, void *pSubclause);
void setCreatedMeterName(SSqlInfo *pInfo, SSQLToken *pMeterName, SSQLToken *pIfNotExists);
void SQLInfoDestroy(SSqlInfo *pInfo);
void setDCLSQLElems(SSqlInfo *pInfo, int32_t type, int32_t nParams, ...);
void setDropDBTableInfo(SSqlInfo *pInfo, int32_t type, SSQLToken* pToken, SSQLToken* existsCheck);
void setShowOptions(SSqlInfo *pInfo, int32_t type, SSQLToken* prefix, SSQLToken* pPatterns);
tDCLSQL *tTokenListAppend(tDCLSQL *pTokenList, SSQLToken *pToken);
void setCreateDBSQL(SSqlInfo *pInfo, int32_t type, SSQLToken *pToken, SCreateDBInfo *pDB, SSQLToken *pIgExists);
void setCreateAcctSQL(SSqlInfo *pInfo, int32_t type, SSQLToken *pName, SSQLToken *pPwd, SCreateAcctSQL *pAcctInfo);
void setCreateUserSQL(SSqlInfo *pInfo, SSQLToken *pName, SSQLToken *pPasswd);
void setKillSQL(SSqlInfo *pInfo, int32_t type, SSQLToken *ip);
void setAlterUserSQL(SSqlInfo *pInfo, int16_t type, SSQLToken *pName, SSQLToken* pPwd, SSQLToken *pPrivilege);
void setDefaultCreateDbOption(SCreateDBInfo *pDBInfo);
// prefix show db.tables;

View File

@ -68,7 +68,7 @@ typedef struct SLocalReducer {
bool hasPrevRow; // cannot be released
bool hasUnprocessedRow;
tOrderDescriptor * pDesc;
tColModel * resColModel;
SColumnModel * resColModel;
tExtMemBuffer ** pExtMemBuffer; // disk-based buffer
SInterpolationInfo interpolationInfo; // interpolation support structure
char * pFinalRes; // result data after interpo
@ -92,9 +92,9 @@ typedef struct SSubqueryState {
typedef struct SRetrieveSupport {
tExtMemBuffer ** pExtMemBuffer; // for build loser tree
tOrderDescriptor *pOrderDescriptor;
tColModel * pFinalColModel; // colModel for final result
SColumnModel * pFinalColModel; // colModel for final result
SSubqueryState * pState;
int32_t vnodeIdx; // index of current vnode in vnode list
int32_t subqueryIndex; // index of current vnode in vnode list
SSqlObj * pParentSqlObj;
tFilePage * localBuffer; // temp buffer, there is a buffer for each vnode to
uint32_t numOfRetry; // record the number of retry times
@ -102,9 +102,9 @@ typedef struct SRetrieveSupport {
} SRetrieveSupport;
int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOrderDescriptor **pDesc,
tColModel **pFinalModel, uint32_t nBufferSize);
SColumnModel **pFinalModel, uint32_t nBufferSize);
void tscLocalReducerEnvDestroy(tExtMemBuffer **pMemBuffer, tOrderDescriptor *pDesc, tColModel *pFinalModel,
void tscLocalReducerEnvDestroy(tExtMemBuffer **pMemBuffer, tOrderDescriptor *pDesc, SColumnModel *pFinalModel,
int32_t numOfVnodes);
int32_t saveToBuffer(tExtMemBuffer *pMemoryBuf, tOrderDescriptor *pDesc, tFilePage *pPage, void *data,
@ -116,11 +116,11 @@ int32_t tscFlushTmpBuffer(tExtMemBuffer *pMemoryBuf, tOrderDescriptor *pDesc, tF
* create local reducer to launch the second-stage reduce process at client site
*/
void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrderDescriptor *pDesc,
tColModel *finalModel, SSqlCmd *pSqlCmd, SSqlRes *pRes);
SColumnModel *finalModel, SSqlCmd *pSqlCmd, SSqlRes *pRes);
void tscDestroyLocalReducer(SSqlObj *pSql);
int32_t tscLocalDoReduce(SSqlObj *pSql);
int32_t tscDoLocalreduce(SSqlObj *pSql);
#ifdef __cplusplus
}

View File

@ -23,15 +23,15 @@ extern "C" {
/*
* @date 2018/09/30
*/
#include <limits.h>
#include <stdio.h>
#include "os.h"
#include "textbuffer.h"
#include "tscSecondaryMerge.h"
#include "tsclient.h"
#include "tsdb.h"
#include "tscSecondaryMerge.h"
#define UTIL_METER_IS_METRIC(metaInfo) (((metaInfo)->pMeterMeta != NULL) && ((metaInfo)->pMeterMeta->meterType == TSDB_METER_METRIC))
#define UTIL_METER_IS_NOMRAL_METER(metaInfo) (!(UTIL_METER_IS_METRIC(metaInfo)))
#define UTIL_METER_IS_SUPERTABLE(metaInfo) \
(((metaInfo)->pMeterMeta != NULL) && ((metaInfo)->pMeterMeta->meterType == TSDB_METER_METRIC))
#define UTIL_METER_IS_NOMRAL_METER(metaInfo) (!(UTIL_METER_IS_SUPERTABLE(metaInfo)))
#define UTIL_METER_IS_CREATE_FROM_METRIC(metaInfo) \
(((metaInfo)->pMeterMeta != NULL) && ((metaInfo)->pMeterMeta->meterType == TSDB_METER_MTABLE))
@ -52,7 +52,6 @@ typedef struct SParsedDataColInfo {
typedef struct SJoinSubquerySupporter {
SSubqueryState* pState;
SSqlObj* pObj; // parent SqlObj
bool hasMore; // has data from vnode to fetch
int32_t subqueryIndex; // index of sub query
int64_t interval; // interval time
SLimitVal limit; // limit info
@ -62,26 +61,28 @@ typedef struct SJoinSubquerySupporter {
SFieldInfo fieldsInfo;
STagCond tagCond;
SSqlGroupbyExpr groupbyExpr;
struct STSBuf* pTSBuf;
FILE* f;
char path[PATH_MAX];
struct STSBuf* pTSBuf; // the TSBuf struct that holds the compressed timestamp array
FILE* f; // temporary file in order to create TSBuf
char path[PATH_MAX]; // temporary file path
} SJoinSubquerySupporter;
void tscDestroyDataBlock(STableDataBlocks* pDataBlock);
STableDataBlocks* tscCreateDataBlock(int32_t size);
int32_t tscCreateDataBlock(size_t initialSize, int32_t rowSize, int32_t startOffset, const char* name,
SMeterMeta* pMeterMeta, STableDataBlocks** dataBlocks);
void tscAppendDataBlock(SDataBlockList* pList, STableDataBlocks* pBlocks);
SParamInfo* tscAddParamToDataBlock(STableDataBlocks* pDataBlock, char type, uint8_t timePrec, short bytes, uint32_t offset);
void tscDestroyDataBlock(STableDataBlocks* pDataBlock);
SParamInfo* tscAddParamToDataBlock(STableDataBlocks* pDataBlock, char type, uint8_t timePrec, short bytes,
uint32_t offset);
SDataBlockList* tscCreateBlockArrayList();
void* tscDestroyBlockArrayList(SDataBlockList* pList);
int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock);
void tscFreeUnusedDataBlocks(SDataBlockList* pList);
int32_t tscMergeTableDataBlocks(SSqlObj* pSql, SDataBlockList* pDataList);
STableDataBlocks* tscGetDataBlockFromList(void* pHashList, SDataBlockList* pDataBlockList, int64_t id, int32_t size,
int32_t startOffset, int32_t rowSize, char* tableId);
STableDataBlocks* tscCreateDataBlockEx(size_t size, int32_t rowSize, int32_t startOffset, char* name);
int32_t tscGetDataBlockFromList(void* pHashList, SDataBlockList* pDataBlockList, int64_t id, int32_t size,
int32_t startOffset, int32_t rowSize, const char* tableId, SMeterMeta* pMeterMeta,
STableDataBlocks** dataBlocks);
SVnodeSidList* tscGetVnodeSidList(SMetricMeta* pMetricmeta, int32_t vnodeIdx);
SMeterSidExtInfo* tscGetMeterSidInfo(SVnodeSidList* pSidList, int32_t idx);
@ -94,29 +95,27 @@ SMeterSidExtInfo* tscGetMeterSidInfo(SVnodeSidList* pSidList, int32_t idx);
* @param pSql sql object
* @return
*/
bool tscIsPointInterpQuery(SSqlCmd* pCmd);
bool tscIsTWAQuery(SSqlCmd* pCmd);
bool tscProjectionQueryOnMetric(SSqlCmd* pCmd);
bool tscIsTwoStageMergeMetricQuery(SSqlCmd* pCmd);
bool tscIsPointInterpQuery(SQueryInfo* pQueryInfo);
bool tscIsTWAQuery(SQueryInfo* pQueryInfo);
bool tscNonOrderedProjectionQueryOnSTable(SQueryInfo *pQueryInfo, int32_t tableIndex);
bool tscOrderedProjectionQueryOnSTable(SQueryInfo* pQueryInfo, int32_t tableIndex);
bool tscIsProjectionQueryOnSTable(SQueryInfo* pQueryInfo, int32_t tableIndex);
bool tscProjectionQueryOnTable(SQueryInfo* pQueryInfo);
bool tscIsTwoStageMergeMetricQuery(SQueryInfo* pQueryInfo, int32_t tableIndex);
bool tscQueryOnMetric(SSqlCmd* pCmd);
bool tscQueryMetricTags(SSqlCmd* pCmd);
bool tscQueryMetricTags(SQueryInfo* pQueryInfo);
bool tscIsSelectivityWithTagQuery(SSqlCmd* pCmd);
void tscAddSpecialColumnForSelect(SSqlCmd* pCmd, int32_t outputColIndex, int16_t functionId, SColumnIndex* pIndex,
void tscAddSpecialColumnForSelect(SQueryInfo* pQueryInfo, int32_t outputColIndex, int16_t functionId, SColumnIndex* pIndex,
SSchema* pColSchema, int16_t isTag);
void addRequiredTagColumn(SSqlCmd* pCmd, int32_t tagColIndex, int32_t tableIndex);
void addRequiredTagColumn(SQueryInfo* pQueryInfo, int32_t tagColIndex, int32_t tableIndex);
//TODO refactor, remove
void SStringFree(SString* str);
void SStringCopy(SString* pDest, const SString* pSrc);
SString SStringCreate(const char* str);
int32_t SStringAlloc(SString* pStr, int32_t size);
int32_t SStringEnsureRemain(SString* pStr, int32_t size);
int32_t setMeterID(SSqlObj* pSql, SSQLToken* pzTableName, int32_t tableIndex);
void tscClearInterpInfo(SSqlCmd* pCmd);
int32_t setMeterID(SMeterMetaInfo* pMeterMetaInfo, SSQLToken* pzTableName, SSqlObj* pSql);
void tscClearInterpInfo(SQueryInfo* pQueryInfo);
bool tscIsInsertOrImportData(char* sqlstr);
@ -130,29 +129,33 @@ void tscFieldInfoSetValFromField(SFieldInfo* pFieldInfo, int32_t index, TAOS_FIE
void tscFieldInfoSetValue(SFieldInfo* pFieldInfo, int32_t index, int8_t type, const char* name, int16_t bytes);
void tscFieldInfoUpdateVisible(SFieldInfo* pFieldInfo, int32_t index, bool visible);
void tscFieldInfoCalOffset(SSqlCmd* pCmd);
void tscFieldInfoUpdateOffset(SSqlCmd* pCmd);
void tscFieldInfoCalOffset(SQueryInfo* pQueryInfo);
void tscFieldInfoUpdateOffsetForInterResult(SQueryInfo* pQueryInfo);
void tscFieldInfoCopy(SFieldInfo* src, SFieldInfo* dst, const int32_t* indexList, int32_t size);
void tscFieldInfoCopyAll(SFieldInfo* src, SFieldInfo* dst);
void tscFieldInfoCopyAll(SFieldInfo* dst, SFieldInfo* src);
TAOS_FIELD* tscFieldInfoGetField(SSqlCmd* pCmd, int32_t index);
int16_t tscFieldInfoGetOffset(SSqlCmd* pCmd, int32_t index);
int32_t tscGetResRowLength(SSqlCmd* pCmd);
TAOS_FIELD* tscFieldInfoGetField(SQueryInfo* pQueryInfo, int32_t index);
int16_t tscFieldInfoGetOffset(SQueryInfo* pQueryInfo, int32_t index);
int32_t tscGetResRowLength(SQueryInfo* pQueryInfo);
void tscClearFieldInfo(SFieldInfo* pFieldInfo);
int32_t tscNumOfFields(SQueryInfo* pQueryInfo);
int32_t tscFieldInfoCompare(SFieldInfo* pFieldInfo1, SFieldInfo* pFieldInfo2);
void addExprParams(SSqlExpr* pExpr, char* argument, int32_t type, int32_t bytes, int16_t tableIndex);
SSqlExpr* tscSqlExprInsert(SSqlCmd* pCmd, int32_t index, int16_t functionId, SColumnIndex* pColIndex, int16_t type,
SSqlExpr* tscSqlExprInsert(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId, SColumnIndex* pColIndex, int16_t type,
int16_t size, int16_t interSize);
SSqlExpr* tscSqlExprInsertEmpty(SSqlCmd* pCmd, int32_t index, int16_t functionId);
SSqlExpr* tscSqlExprInsertEmpty(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId);
SSqlExpr* tscSqlExprUpdate(SSqlCmd* pCmd, int32_t index, int16_t functionId, int16_t srcColumnIndex, int16_t type,
SSqlExpr* tscSqlExprUpdate(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId, int16_t srcColumnIndex, int16_t type,
int16_t size);
SSqlExpr* tscSqlExprGet(SSqlCmd* pCmd, int32_t index);
SSqlExpr* tscSqlExprGet(SQueryInfo* pQueryInfo, int32_t index);
void tscSqlExprCopy(SSqlExprInfo* dst, const SSqlExprInfo* src, uint64_t uid);
void* tscSqlExprDestroy(SSqlExpr* pExpr);
void tscSqlExprInfoDestroy(SSqlExprInfo* pExprInfo);
SColumnBase* tscColumnBaseInfoInsert(SSqlCmd* pCmd, SColumnIndex* colIndex);
SColumnBase* tscColumnBaseInfoInsert(SQueryInfo* pQueryInfo, SColumnIndex* colIndex);
void tscColumnFilterInfoCopy(SColumnFilterInfo* dst, const SColumnFilterInfo* src);
void tscColumnBaseCopy(SColumnBase* dst, const SColumnBase* src);
@ -167,7 +170,7 @@ int32_t tscValidateName(SSQLToken* pToken);
void tscIncStreamExecutionCount(void* pStream);
bool tscValidateColumnId(SSqlCmd* pCmd, int32_t colId);
bool tscValidateColumnId(SMeterMetaInfo* pMeterMetaInfo, int32_t colId);
// get starter position of metric query condition (query on tags) in SSqlCmd.payload
SCond* tsGetMetricQueryCondPos(STagCond* pCond, uint64_t tableIndex);
@ -175,32 +178,39 @@ void tsSetMetricQueryCond(STagCond* pTagCond, uint64_t uid, const char* str);
void tscTagCondCopy(STagCond* dest, const STagCond* src);
void tscTagCondRelease(STagCond* pCond);
void tscTagCondSetQueryCondType(STagCond* pCond, int16_t type);
void tscGetSrcColumnInfo(SSrcColumnInfo* pColInfo, SSqlCmd* pCmd);
void tscGetSrcColumnInfo(SSrcColumnInfo* pColInfo, SQueryInfo* pQueryInfo);
void tscSetFreeHeatBeat(STscObj* pObj);
bool tscShouldFreeHeatBeat(SSqlObj* pHb);
void tscCleanSqlCmd(SSqlCmd* pCmd);
bool tscShouldFreeAsyncSqlObj(SSqlObj* pSql);
void tscRemoveAllMeterMetaInfo(SSqlCmd* pCmd, bool removeFromCache);
SMeterMetaInfo* tscGetMeterMetaInfo(SSqlCmd* pCmd, int32_t index);
SMeterMetaInfo* tscGetMeterMetaInfoByUid(SSqlCmd* pCmd, uint64_t uid, int32_t* index);
void tscRemoveAllMeterMetaInfo(SQueryInfo* pQueryInfo, const char* address, bool removeFromCache);
SMeterMetaInfo* tscGetMeterMetaInfo(SSqlCmd *pCmd, int32_t subClauseIndex, int32_t tableIndex);
SMeterMetaInfo* tscGetMeterMetaInfoFromQueryInfo(SQueryInfo *pQueryInfo, int32_t tableIndex);
SQueryInfo *tscGetQueryInfoDetail(SSqlCmd* pCmd, int32_t subClauseIndex);
int32_t tscGetQueryInfoDetailSafely(SSqlCmd *pCmd, int32_t subClauseIndex, SQueryInfo** pQueryInfo);
SMeterMetaInfo* tscGetMeterMetaInfoByUid(SQueryInfo* pQueryInfo, uint64_t uid, int32_t* index);
void tscClearMeterMetaInfo(SMeterMetaInfo* pMeterMetaInfo, bool removeFromCache);
SMeterMetaInfo* tscAddMeterMetaInfo(SSqlCmd* pCmd, const char* name, SMeterMeta* pMeterMeta, SMetricMeta* pMetricMeta,
SMeterMetaInfo* tscAddMeterMetaInfo(SQueryInfo* pQueryInfo, const char* name, SMeterMeta* pMeterMeta, SMetricMeta* pMetricMeta,
int16_t numOfTags, int16_t* tags);
SMeterMetaInfo* tscAddEmptyMeterMetaInfo(SSqlCmd* pCmd);
SMeterMetaInfo* tscAddEmptyMeterMetaInfo(SQueryInfo *pQueryInfo);
int32_t tscAddSubqueryInfo(SSqlCmd *pCmd);
void tscFreeSubqueryInfo(SSqlCmd* pCmd);
void tscClearSubqueryInfo(SSqlCmd* pCmd);
void tscGetMetricMetaCacheKey(SSqlCmd* pCmd, char* keyStr, uint64_t uid);
int tscGetMetricMeta(SSqlObj* pSql);
int tscGetMeterMeta(SSqlObj* pSql, char* meterId, int32_t tableIndex);
int tscGetMeterMetaEx(SSqlObj* pSql, char* meterId, bool createIfNotExists);
void tscGetMetricMetaCacheKey(SQueryInfo* pQueryInfo, char* keyStr, uint64_t uid);
int tscGetMetricMeta(SSqlObj* pSql, int32_t clauseIndex);
int tscGetMeterMeta(SSqlObj* pSql, SMeterMetaInfo* pMeterMetaInfo);
int tscGetMeterMetaEx(SSqlObj* pSql, SMeterMetaInfo* pMeterMetaInfo, bool createIfNotExists);
void tscResetForNextRetrieve(SSqlRes* pRes);
void tscAddTimestampColumn(SSqlCmd* pCmd, int16_t functionId, int16_t tableIndex);
void tscAddTimestampColumn(SQueryInfo* pQueryInfo, int16_t functionId, int16_t tableIndex);
void tscDoQuery(SSqlObj* pSql);
/**
@ -220,18 +230,26 @@ void tscDoQuery(SSqlObj* pSql);
* @param pPrevSql
* @return
*/
SSqlObj* createSubqueryObj(SSqlObj* pSql, int32_t vnodeIndex, int16_t tableIndex, void (*fp)(), void* param,
SSqlObj* pPrevSql);
void addGroupInfoForSubquery(SSqlObj* pParentObj, SSqlObj* pSql, int32_t tableIndex);
SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void* param, SSqlObj* pPrevSql);
void addGroupInfoForSubquery(SSqlObj* pParentObj, SSqlObj* pSql, int32_t subClauseIndex, int32_t tableIndex);
void doAddGroupColumnForSubquery(SSqlCmd* pCmd, int32_t tagIndex);
void doAddGroupColumnForSubquery(SQueryInfo* pQueryInfo, int32_t tagIndex);
int16_t tscGetJoinTagColIndexByUid(SSqlCmd* pCmd, uint64_t uid);
int16_t tscGetJoinTagColIndexByUid(STagCond* pTagCond, uint64_t uid);
TAOS* taos_connect_a(char* ip, char* user, char* pass, char* db, uint16_t port, void (*fp)(void*, TAOS_RES*, int),
void* param, void** taos);
void sortRemoveDuplicates(STableDataBlocks* dataBuf);
void tscPrintSelectClause(SSqlObj* pSql, int32_t subClauseIndex);
bool hasMoreVnodesToTry(SSqlObj *pSql);
void tscTryQueryNextVnode(SSqlObj *pSql, __async_cb_func_t fp);
void tscAsyncQuerySingleRowForNextVnode(void *param, TAOS_RES *tres, int numOfRows);
void tscTryQueryNextClause(SSqlObj* pSql, void (*queryFp)());
#ifdef __cplusplus
}
#endif

View File

@ -20,14 +20,6 @@
extern "C" {
#endif
#include <errno.h>
#include <pthread.h>
#include <semaphore.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include "os.h"
#include "taos.h"
#include "taosmsg.h"
@ -39,79 +31,16 @@ extern "C" {
#include "tsqlfunction.h"
#include "tutil.h"
#define TSC_GET_RESPTR_BASE(res, cmd, col, ord) \
((res->data + tscFieldInfoGetOffset(cmd, col) * res->numOfRows) + \
(1 - ord.order) * (res->numOfRows - 1) * tscFieldInfoGetField(cmd, col)->bytes)
enum _sql_cmd {
TSDB_SQL_SELECT,
TSDB_SQL_FETCH,
TSDB_SQL_INSERT,
TSDB_SQL_MGMT, // the SQL below is for mgmt node
TSDB_SQL_CREATE_DB,
TSDB_SQL_CREATE_TABLE,
TSDB_SQL_DROP_DB,
TSDB_SQL_DROP_TABLE,
TSDB_SQL_CREATE_ACCT,
TSDB_SQL_CREATE_USER,
TSDB_SQL_DROP_ACCT, // 10
TSDB_SQL_DROP_USER,
TSDB_SQL_ALTER_USER,
TSDB_SQL_ALTER_ACCT,
TSDB_SQL_ALTER_TABLE,
TSDB_SQL_ALTER_DB,
TSDB_SQL_CREATE_MNODE,
TSDB_SQL_DROP_MNODE,
TSDB_SQL_CREATE_DNODE,
TSDB_SQL_DROP_DNODE,
TSDB_SQL_CFG_DNODE, // 20
TSDB_SQL_CFG_MNODE,
TSDB_SQL_SHOW,
TSDB_SQL_RETRIEVE,
TSDB_SQL_KILL_QUERY,
TSDB_SQL_KILL_STREAM,
TSDB_SQL_KILL_CONNECTION,
TSDB_SQL_READ, // SQL below is for read operation
TSDB_SQL_CONNECT,
TSDB_SQL_USE_DB,
TSDB_SQL_META, // 30
TSDB_SQL_METRIC,
TSDB_SQL_MULTI_META,
TSDB_SQL_HB,
TSDB_SQL_LOCAL, // SQL below for client local
TSDB_SQL_DESCRIBE_TABLE,
TSDB_SQL_RETRIEVE_METRIC,
TSDB_SQL_METRIC_JOIN_RETRIEVE,
TSDB_SQL_RETRIEVE_TAGS,
/*
* build empty result instead of accessing dnode to fetch result
* reset the client cache
*/
TSDB_SQL_RETRIEVE_EMPTY_RESULT,
TSDB_SQL_RESET_CACHE, // 40
TSDB_SQL_SERV_STATUS,
TSDB_SQL_CURRENT_DB,
TSDB_SQL_SERV_VERSION,
TSDB_SQL_CLI_VERSION,
TSDB_SQL_CURRENT_USER,
TSDB_SQL_CFG_LOCAL,
TSDB_SQL_MAX
};
#define TSC_GET_RESPTR_BASE(res, _queryinfo, col, ord) \
(res->data + tscFieldInfoGetOffset(_queryinfo, col) * res->numOfRows)
// forward declaration
struct SSqlInfo;
typedef struct SSqlGroupbyExpr {
int16_t tableIndex;
int16_t numOfGroupCols;
SColIndexEx columnInfo[TSDB_MAX_TAGS]; // group by columns information
int16_t orderIndex; // order by column index
int16_t orderType; // order by type: asc/desc
} SSqlGroupbyExpr;
@ -120,7 +49,12 @@ typedef struct SMeterMetaInfo {
SMeterMeta * pMeterMeta; // metermeta
SMetricMeta *pMetricMeta; // metricmeta
char name[TSDB_METER_ID_LEN + 1];
/*
* 1. keep the vnode index during the multi-vnode super table projection query
* 2. keep the vnode index for multi-vnode insertion
*/
int32_t vnodeIndex;
char name[TSDB_METER_ID_LEN + 1]; // table(super table) name
int16_t numOfTags; // total required tags in query, including groupby tags
int16_t tagColumnIndex[TSDB_MAX_TAGS]; // clause + tag projection
} SMeterMetaInfo;
@ -179,16 +113,9 @@ typedef struct SColumnBaseInfo {
struct SLocalReducer;
// todo move to utility
typedef struct SString {
int32_t alloc;
int32_t n;
char * z;
} SString;
typedef struct SCond {
uint64_t uid;
SString cond;
char * cond;
} SCond;
typedef struct SJoinNode {
@ -228,17 +155,23 @@ typedef struct SParamInfo {
typedef struct STableDataBlocks {
char meterId[TSDB_METER_ID_LEN];
int8_t tsSource;
bool ordered;
int8_t tsSource; // where does the UNIX timestamp come from, server or client
bool ordered; // if current rows are ordered or not
int64_t vgid; // virtual group id
int64_t prevTS; // previous timestamp, recorded to decide if the records array is ts ascending
int32_t numOfMeters; // number of tables in current submit block
int64_t vgid;
int64_t prevTS;
int32_t numOfMeters;
int32_t rowSize;
int32_t rowSize; // row size for current table
uint32_t nAllocSize;
uint32_t headerSize; // header for metadata (submit metadata)
uint32_t size;
/*
* the metermeta for current table, the metermeta will be used during submit stage, keep a ref
* to avoid it to be removed from cache
*/
SMeterMeta *pMeterMeta;
union {
char *filename;
char *pData;
@ -252,60 +185,76 @@ typedef struct STableDataBlocks {
typedef struct SDataBlockList {
int32_t idx;
int32_t nSize;
int32_t nAlloc;
uint32_t nSize;
uint32_t nAlloc;
char * userParam; /* user assigned parameters for async query */
void * udfp; /* user defined function pointer, used in async model */
STableDataBlocks **pData;
} SDataBlockList;
typedef struct {
SOrderVal order;
int command;
int count;// TODO refactor
union {
bool existsCheck; // check if the table exists
int8_t showType; // show command type
};
int8_t isInsertFromFile; // load data from file or not
bool import; // import/insert type
char msgType;
uint16_t type; // query type
typedef struct SQueryInfo {
int16_t command; // the command may be different for each subclause, so keep it seperately.
uint16_t type; // query/insert/import type
char intervalTimeUnit;
int64_t etime, stime;
int64_t nAggTimeInterval; // aggregation time interval
int64_t nSlidingTime; // sliding window in mseconds
SSqlGroupbyExpr groupbyExpr; // group by tags info
/*
* use to keep short request msg and error msg, in such case, SSqlCmd->payload == SSqlCmd->ext;
* create table/query/insert operations will exceed the TSDB_SQLCMD_SIZE.
*
* In such cases, allocate the memory dynamically, and need to free the memory
*/
uint32_t allocSize;
char * payload;
int payloadLen;
short numOfCols;
SColumnBaseInfo colList;
SFieldInfo fieldsInfo;
SSqlExprInfo exprsInfo;
SLimitVal limit;
SLimitVal slimit;
int64_t globalLimit;
STagCond tagCond;
int16_t vnodeIdx; // vnode index in pMetricMeta for metric query
SOrderVal order;
int16_t interpoType; // interpolate type
int16_t numOfTables;
SMeterMetaInfo **pMeterInfo;
struct STSBuf * tsBuf;
int64_t * defaultVal; // default value for interpolation
char * msg; // pointer to the pCmd->payload to keep error message temporarily
int64_t clauseLimit; // limit for current sub clause
// offset value in the original sql expression, NOT sent to virtual node, only applied at client side
int64_t prjOffset;
} SQueryInfo;
// data source from sql string or from file
enum {
DATA_FROM_SQL_STRING = 1,
DATA_FROM_DATA_FILE = 2,
};
typedef struct {
int command;
uint8_t msgType;
union {
bool existsCheck; // check if the table exists or not
bool inStream; // denote if current sql is executed in stream or not
bool createOnDemand; // if the table is missing, on-the-fly create it. during getmeterMeta
int8_t dataSourceType; // load data from file or not
};
union {
int32_t count;
int32_t numOfTablesInSubmit;
};
int32_t clauseIndex; // index of multiple subclause query
int8_t isParseFinish;
short numOfCols;
uint32_t allocSize;
char * payload;
int payloadLen;
SQueryInfo **pQueryInfo;
int32_t numOfClause;
// submit data blocks branched according to vnode
SDataBlockList *pDataBlocks;
SMeterMetaInfo **pMeterInfo;
struct STSBuf * tsBuf;
// todo use dynamic allocated memory for defaultVal
int64_t defaultVal[TSDB_MAX_COLUMNS]; // default value for interpolation
// for parameter ('?') binding and batch processing
int32_t batchSize;
@ -321,12 +270,15 @@ struct STSBuf;
typedef struct {
uint8_t code;
int numOfRows; // num of results in current retrieved
int numOfTotal; // num of total results
int64_t numOfRows; // num of results in current retrieved
int64_t numOfTotal; // num of total results
int64_t numOfTotalInCurrentClause; // num of total result in current subclause
char * pRsp;
int rspType;
int rspLen;
uint64_t qhandle;
int64_t uid;
int64_t useconds;
int64_t offset; // offset value from vnode during projection query of stable
int row;
@ -373,19 +325,18 @@ typedef struct _sql_obj {
uint32_t queryId;
void * thandle;
void * pStream;
void * pSubscription;
char * sqlstr;
char retry;
char maxRetry;
char index;
uint8_t index;
char freed : 4;
char listed : 4;
tsem_t rspSem;
tsem_t emptyRspSem;
SSqlCmd cmd;
SSqlRes res;
char numOfSubs;
uint8_t numOfSubs;
char * asyncTblPos;
void * pTableHashList;
struct _sql_obj **pSubs;
@ -427,9 +378,11 @@ typedef struct {
} SIpStrList;
// tscSql API
int tsParseSql(SSqlObj *pSql, char *acct, char *db, bool multiVnodeInsertion);
int tsParseSql(SSqlObj *pSql, bool multiVnodeInsertion);
void tscInitMsgs();
extern int (*tscBuildMsg[TSDB_SQL_MAX])(SSqlObj *pSql, SSqlInfo *pInfo);
void *tscProcessMsgFromServer(char *msg, void *ahandle, void *thandle);
int tscProcessSql(SSqlObj *pSql);
@ -448,15 +401,22 @@ int taos_retrieve(TAOS_RES *res);
* transfer function for metric query in stream computing, the function need to be change
* before send query message to vnode
*/
int32_t tscTansformSQLFunctionForMetricQuery(SSqlCmd *pCmd);
void tscRestoreSQLFunctionForMetricQuery(SSqlCmd *pCmd);
int32_t tscTansformSQLFunctionForSTableQuery(SQueryInfo *pQueryInfo);
void tscRestoreSQLFunctionForMetricQuery(SQueryInfo *pQueryInfo);
void tscClearSqlMetaInfoForce(SSqlCmd *pCmd);
int32_t tscCreateResPointerInfo(SSqlCmd *pCmd, SSqlRes *pRes);
int32_t tscCreateResPointerInfo(SSqlRes *pRes, SQueryInfo *pQueryInfo);
void tscDestroyResPointerInfo(SSqlRes *pRes);
void tscFreeSqlCmdData(SSqlCmd *pCmd);
void tscFreeResData(SSqlObj* pSql);
/**
* free query result of the sql object
* @param pObj
*/
void tscFreeSqlResult(SSqlObj* pSql);
/**
* only free part of resources allocated during query.
@ -475,10 +435,14 @@ void tscFreeSqlObj(SSqlObj *pObj);
void tscCloseTscObj(STscObj *pObj);
void tscProcessMultiVnodesInsert(SSqlObj *pSql);
void tscProcessMultiVnodesInsertForFile(SSqlObj *pSql);
void tscProcessMultiVnodesInsertFromFile(SSqlObj *pSql);
void tscKillMetricQuery(SSqlObj *pSql);
void tscInitResObjForLocalQuery(SSqlObj *pObj, int32_t numOfRes, int32_t rowLen);
bool tscIsUpdateQuery(STscObj *pObj);
bool tscHasReachLimitation(SQueryInfo *pQueryInfo, SSqlRes *pRes);
char *tscGetErrorMsgPayload(SSqlCmd *pCmd);
int32_t tscInvalidSQLErrMsg(char *msg, const char *additionalInfo, const char *sql);
// transfer SSqlInfo to SqlCmd struct
@ -499,6 +463,8 @@ extern int tsInsertHeadSize;
extern int tscNumOfThreads;
extern SIpStrList tscMgmtIpList;
typedef void (*__async_cb_func_t)(void *param, TAOS_RES *tres, int numOfRows);
#ifdef __cplusplus
}
#endif

View File

@ -135,7 +135,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_closeConnectionIm
* Signature: (Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;JI)J
*/
JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_subscribeImp
(JNIEnv *, jobject, jstring, jstring, jstring, jstring, jstring, jlong, jint);
(JNIEnv *, jobject, jlong, jboolean, jstring, jstring, jint);
/*
* Class: com_taosdata_jdbc_TSDBJNIConnector
@ -143,7 +143,7 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_subscribeImp
* Signature: (J)Lcom/taosdata/jdbc/TSDBResultSetRowData;
*/
JNIEXPORT jobject JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_consumeImp
(JNIEnv *, jobject, jlong);
(JNIEnv *, jobject, jlong, jint);
/*
* Class: com_taosdata_jdbc_TSDBJNIConnector
@ -151,7 +151,7 @@ JNIEXPORT jobject JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_consumeImp
* Signature: (J)V
*/
JNIEXPORT void JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_unsubscribeImp
(JNIEnv *, jobject, jlong);
(JNIEnv *, jobject, jlong, jboolean);
/*
* Class: com_taosdata_jdbc_TSDBJNIConnector

View File

@ -20,6 +20,7 @@
#include "tscJoinProcess.h"
#include "tsclient.h"
#include "tscUtil.h"
#include "ttime.h"
int __init = 0;
@ -239,7 +240,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeQueryImp(J
jbyteArray jsql, jlong con) {
TAOS *tscon = (TAOS *)con;
if (tscon == NULL) {
jniError("jobj:%p, connection is closed", jobj);
jniError("jobj:%p, connection is already closed", jobj);
return JNI_CONNECTION_NULL;
}
@ -252,6 +253,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeQueryImp(J
char *dst = (char *)calloc(1, sizeof(char) * (len + 1));
if (dst == NULL) {
jniError("jobj:%p, conn:%p, can not alloc memory", jobj, tscon);
return JNI_OUT_OF_MEMORY;
}
@ -260,9 +262,11 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeQueryImp(J
//todo handle error
}
jniTrace("jobj:%p, conn:%p, sql:%s", jobj, tscon, dst);
int code = taos_query(tscon, dst);
if (code != 0) {
jniError("jobj:%p, conn:%p, code:%d, msg:%s, sql:%s", jobj, tscon, code, taos_errstr(tscon), dst);
jniError("jobj:%p, conn:%p, code:%d, msg:%s", jobj, tscon, code, taos_errstr(tscon));
free(dst);
return JNI_TDENGINE_ERROR;
} else {
@ -271,9 +275,9 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeQueryImp(J
if (pSql->cmd.command == TSDB_SQL_INSERT) {
affectRows = taos_affected_rows(tscon);
jniTrace("jobj:%p, conn:%p, code:%d, affect rows:%d, sql:%s", jobj, tscon, code, affectRows, dst);
jniTrace("jobj:%p, conn:%p, code:%d, affect rows:%d", jobj, tscon, code, affectRows);
} else {
jniTrace("jobj:%p, conn:%p, code:%d, sql:%s", jobj, tscon, code, dst);
jniTrace("jobj:%p, conn:%p, code:%d", jobj, tscon, code);
}
free(dst);
@ -307,7 +311,7 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getResultSetImp(
if (tscIsUpdateQuery(tscon)) {
ret = 0; // for update query, no result pointer
jniTrace("jobj:%p, conn:%p, no result", jobj, tscon);
jniTrace("jobj:%p, conn:%p, no resultset", jobj, tscon);
} else {
ret = (jlong) taos_use_result(tscon);
jniTrace("jobj:%p, conn:%p, get resultset:%p", jobj, tscon, (void *) ret);
@ -463,11 +467,17 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchRowImp(JNIEn
case TSDB_DATA_TYPE_BIGINT:
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetLongFp, i, (jlong) * ((int64_t *)row[i]));
break;
case TSDB_DATA_TYPE_FLOAT:
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetFloatFp, i, (jfloat) * ((float *)row[i]));
case TSDB_DATA_TYPE_FLOAT: {
float fv = 0;
fv = GET_FLOAT_VAL(row[i]);
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetFloatFp, i, (jfloat)fv);
}
break;
case TSDB_DATA_TYPE_DOUBLE:
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetDoubleFp, i, (jdouble) * ((double *)row[i]));
case TSDB_DATA_TYPE_DOUBLE: {
double dv = 0;
dv = GET_DOUBLE_VAL(row[i]);
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetDoubleFp, i, (jdouble)dv);
}
break;
case TSDB_DATA_TYPE_BINARY: {
strncpy(tmp, row[i], (size_t) fields[i].bytes); // handle the case that terminated does not exist
@ -496,7 +506,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_closeConnectionIm
jlong con) {
TAOS *tscon = (TAOS *)con;
if (tscon == NULL) {
jniError("jobj:%p, connection is closed", jobj);
jniError("jobj:%p, connection is already closed", jobj);
return JNI_CONNECTION_NULL;
} else {
jniTrace("jobj:%p, conn:%p, close connection success", jobj, tscon);
@ -505,92 +515,42 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_closeConnectionIm
}
}
JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_subscribeImp(JNIEnv *env, jobject jobj, jstring jhost,
jstring juser, jstring jpass, jstring jdb,
jstring jtable, jlong jtime,
jint jperiod) {
TAOS_SUB *tsub;
JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_subscribeImp(JNIEnv *env, jobject jobj, jlong con,
jboolean restart, jstring jtopic, jstring jsql, jint jinterval) {
jlong sub = 0;
char * host = NULL;
char * user = NULL;
char * pass = NULL;
char * db = NULL;
char * table = NULL;
int64_t time = 0;
int period = 0;
TAOS *taos = (TAOS *)con;
char *topic = NULL;
char *sql = NULL;
jniGetGlobalMethod(env);
jniTrace("jobj:%p, in TSDBJNIConnector_subscribeImp", jobj);
if (jhost != NULL) {
host = (char *)(*env)->GetStringUTFChars(env, jhost, NULL);
if (jtopic != NULL) {
topic = (char *)(*env)->GetStringUTFChars(env, jtopic, NULL);
}
if (juser != NULL) {
user = (char *)(*env)->GetStringUTFChars(env, juser, NULL);
}
if (jpass != NULL) {
pass = (char *)(*env)->GetStringUTFChars(env, jpass, NULL);
}
if (jdb != NULL) {
db = (char *)(*env)->GetStringUTFChars(env, jdb, NULL);
}
if (jtable != NULL) {
table = (char *)(*env)->GetStringUTFChars(env, jtable, NULL);
}
time = (int64_t)jtime;
period = (int)jperiod;
if (user == NULL) {
jniTrace("jobj:%p, user is null, use tsDefaultUser", jobj);
user = tsDefaultUser;
}
if (pass == NULL) {
jniTrace("jobj:%p, pass is null, use tsDefaultPass", jobj);
pass = tsDefaultPass;
if (jsql != NULL) {
sql = (char *)(*env)->GetStringUTFChars(env, jsql, NULL);
}
jniTrace("jobj:%p, host:%s, user:%s, pass:%s, db:%s, table:%s, time:%d, period:%d", jobj, host, user, pass, db, table,
time, period);
tsub = taos_subscribe(host, user, pass, db, table, time, period);
TAOS_SUB *tsub = taos_subscribe(taos, (int)restart, topic, sql, NULL, NULL, jinterval);
sub = (jlong)tsub;
if (sub == 0) {
jniTrace("jobj:%p, failed to subscribe to db:%s, table:%s", jobj, db, table);
jniTrace("jobj:%p, failed to subscribe: topic:%s", jobj, jtopic);
} else {
jniTrace("jobj:%p, successfully subscribe to db:%s, table:%s, sub:%ld, tsub:%p", jobj, db, table, sub, tsub);
jniTrace("jobj:%p, successfully subscribe: topic: %s", jobj, jtopic);
}
if (host != NULL) (*env)->ReleaseStringUTFChars(env, jhost, host);
if (user != NULL && user != tsDefaultUser) (*env)->ReleaseStringUTFChars(env, juser, user);
if (pass != NULL && pass != tsDefaultPass) (*env)->ReleaseStringUTFChars(env, jpass, pass);
if (db != NULL) (*env)->ReleaseStringUTFChars(env, jdb, db);
if (table != NULL) (*env)->ReleaseStringUTFChars(env, jtable, table);
if (topic != NULL) (*env)->ReleaseStringUTFChars(env, jtopic, topic);
if (sql != NULL) (*env)->ReleaseStringUTFChars(env, jsql, sql);
return sub;
}
JNIEXPORT jobject JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_consumeImp(JNIEnv *env, jobject jobj, jlong sub) {
jniTrace("jobj:%p, in TSDBJNIConnector_consumeImp, sub:%ld", jobj, sub);
TAOS_SUB * tsub = (TAOS_SUB *)sub;
TAOS_ROW row = taos_consume(tsub);
TAOS_FIELD *fields = taos_fetch_subfields(tsub);
int num_fields = taos_subfields_count(tsub);
jniGetGlobalMethod(env);
jniTrace("jobj:%p, check fields:%p, num_fields=%d", jobj, fields, num_fields);
static jobject convert_one_row(JNIEnv *env, TAOS_ROW row, TAOS_FIELD* fields, int num_fields) {
jobject rowobj = (*env)->NewObject(env, g_rowdataClass, g_rowdataConstructor, num_fields);
jniTrace("created a rowdata object, rowobj:%p", rowobj);
if (row == NULL) {
jniTrace("jobj:%p, tsub:%p, fields size is %d, fetch row to the end", jobj, tsub, num_fields);
return NULL;
}
char tmp[TSDB_MAX_BYTES_PER_ROW] = {0};
for (int i = 0; i < num_fields; i++) {
if (row[i] == NULL) {
continue;
@ -612,13 +572,20 @@ JNIEXPORT jobject JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_consumeImp(JNI
case TSDB_DATA_TYPE_BIGINT:
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetLongFp, i, (jlong) * ((int64_t *)row[i]));
break;
case TSDB_DATA_TYPE_FLOAT:
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetFloatFp, i, (jfloat) * ((float *)row[i]));
case TSDB_DATA_TYPE_FLOAT: {
float fv = 0;
fv = GET_FLOAT_VAL(row[i]);
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetFloatFp, i, (jfloat)fv);
}
break;
case TSDB_DATA_TYPE_DOUBLE:
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetDoubleFp, i, (jdouble) * ((double *)row[i]));
case TSDB_DATA_TYPE_DOUBLE:{
double dv = 0;
dv = GET_DOUBLE_VAL(row[i]);
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetDoubleFp, i, (jdouble)dv);
}
break;
case TSDB_DATA_TYPE_BINARY: {
char tmp[TSDB_MAX_BYTES_PER_ROW] = {0};
strncpy(tmp, row[i], (size_t) fields[i].bytes); // handle the case that terminated does not exist
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetStringFp, i, (*env)->NewStringUTF(env, tmp));
@ -636,13 +603,56 @@ JNIEXPORT jobject JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_consumeImp(JNI
break;
}
}
jniTrace("jobj:%p, rowdata retrieved, rowobj:%p", jobj, rowobj);
return rowobj;
}
JNIEXPORT void JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_unsubscribeImp(JNIEnv *env, jobject jobj, jlong sub) {
JNIEXPORT jobject JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_consumeImp(JNIEnv *env, jobject jobj, jlong sub, jint timeout) {
jniTrace("jobj:%p, in TSDBJNIConnector_consumeImp, sub:%ld", jobj, sub);
jniGetGlobalMethod(env);
TAOS_SUB *tsub = (TAOS_SUB *)sub;
taos_unsubscribe(tsub);
jobject rows = (*env)->NewObject(env, g_arrayListClass, g_arrayListConstructFp);
int64_t start = taosGetTimestampMs();
int count = 0;
while (true) {
TAOS_RES * res = taos_consume(tsub);
if (res == NULL) {
jniError("jobj:%p, tsub:%p, taos_consume returns NULL", jobj, tsub);
return NULL;
}
TAOS_FIELD *fields = taos_fetch_fields(res);
int num_fields = taos_num_fields(res);
while (true) {
TAOS_ROW row = taos_fetch_row(res);
if (row == NULL) {
break;
}
jobject rowobj = convert_one_row(env, row, fields, num_fields);
(*env)->CallBooleanMethod(env, rows, g_arrayListAddFp, rowobj);
count++;
}
if (count > 0) {
break;
}
if (timeout == -1) {
continue;
}
if (((int)(taosGetTimestampMs() - start)) >= timeout) {
jniTrace("jobj:%p, sub:%ld, timeout", jobj, sub);
break;
}
}
return rows;
}
JNIEXPORT void JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_unsubscribeImp(JNIEnv *env, jobject jobj, jlong sub, jboolean keepProgress) {
TAOS_SUB *tsub = (TAOS_SUB *)sub;
taos_unsubscribe(tsub, keepProgress);
}
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_validateCreateTableSqlImp(JNIEnv *env, jobject jobj,

File diff suppressed because it is too large Load Diff

View File

@ -24,8 +24,6 @@ taos_fetch_row_a
taos_subscribe
taos_consume
taos_unsubscribe
taos_subfields_count
taos_fetch_subfields
taos_open_stream
taos_close_stream
taos_fetch_block

View File

@ -17,6 +17,7 @@
#include "taosmsg.h"
#include "tast.h"
#include "tlog.h"
#include "tscSQLParser.h"
#include "tscSyntaxtreefunction.h"
#include "tschemautil.h"
#include "tsdb.h"
@ -26,7 +27,6 @@
#include "tstoken.h"
#include "ttypes.h"
#include "tutil.h"
#include "tscSQLParser.h"
/*
*
@ -108,13 +108,16 @@ static tSQLSyntaxNode *tSQLSyntaxNodeCreate(SSchema *pSchema, int32_t numOfCols,
return NULL;
}
int32_t i = 0;
size_t nodeSize = sizeof(tSQLSyntaxNode);
tSQLSyntaxNode *pNode = NULL;
if (pToken->type == TK_ID || pToken->type == TK_TBNAME) {
int32_t i = 0;
if (pToken->type == TK_ID) {
do {
SSQLToken tableToken = {0};
extractTableNameFromToken(pToken, &tableToken);
size_t len = strlen(pSchema[i].name);
if (strncmp(pToken->z, pSchema[i].name, pToken->n) == 0 && pToken->n == len) break;
} while (++i < numOfCols);
@ -269,7 +272,7 @@ static tSQLSyntaxNode *createSyntaxTree(SSchema *pSchema, int32_t numOfCols, cha
// get the operator of expr
uint8_t optr = getBinaryExprOptr(&t0);
if (optr <= 0) {
if (optr == 0) {
pError("not support binary operator:%d", t0.type);
tSQLSyntaxNodeDestroy(pLeft, NULL);
return NULL;
@ -324,8 +327,9 @@ static tSQLSyntaxNode *createSyntaxTree(SSchema *pSchema, int32_t numOfCols, cha
return pn;
} else {
uint8_t localOptr = getBinaryExprOptr(&t0);
if (localOptr <= 0) {
if (localOptr == 0) {
pError("not support binary operator:%d", t0.type);
free(pBinExpr);
return NULL;
}
@ -418,6 +422,7 @@ void tSQLBinaryExprToString(tSQLBinaryExpr *pExpr, char *dst, int32_t *len) {
if (pExpr == NULL) {
*dst = 0;
*len = 0;
return;
}
int32_t lhs = tSQLBinaryExprToStringImpl(pExpr->pLeft, dst, pExpr->pLeft->nodeType);
@ -490,12 +495,12 @@ static void setInitialValueForRangeQueryCondition(tSKipListQueryCond *q, int8_t
case TSDB_DATA_TYPE_NCHAR:
case TSDB_DATA_TYPE_BINARY: {
q->upperBnd.nType = type;
q->upperBnd.pz = "\0";
q->upperBnd.pz = NULL;
q->upperBnd.nLen = -1;
q->lowerBnd.nType = type;
q->lowerBnd.pz = "\0";
q->lowerBnd.nLen = 0;
q->lowerBnd.pz = NULL;
q->lowerBnd.nLen = -1;
}
}
}
@ -641,16 +646,15 @@ int32_t intersect(tQueryResultset *pLeft, tQueryResultset *pRight, tQueryResults
}
/*
*
* traverse the result and apply the function to each item to check if the item is qualified or not
*/
void tSQLListTraverseOnResult(struct tSQLBinaryExpr *pExpr, bool (*fp)(tSkipListNode *, void *),
tQueryResultset * pResult) {
static void tSQLListTraverseOnResult(struct tSQLBinaryExpr *pExpr, __result_filter_fn_t fp, tQueryResultset *pResult) {
assert(pExpr->pLeft->nodeType == TSQL_NODE_COL && pExpr->pRight->nodeType == TSQL_NODE_VALUE);
// brutal force search
// brutal force scan the result list and check for each item in the list
int64_t num = pResult->num;
for (int32_t i = 0, j = 0; i < pResult->num; ++i) {
if (fp == NULL || (fp != NULL && fp(pResult->pRes[i], pExpr->info) == true)) {
if (fp == NULL || (fp(pResult->pRes[i], pExpr->info) == true)) {
pResult->pRes[j++] = pResult->pRes[i];
} else {
num--;
@ -832,7 +836,7 @@ void tSQLBinaryExprCalcTraverse(tSQLBinaryExpr *pExprs, int32_t numOfRows, char
tSQLSyntaxNode *pRight = pExprs->pRight;
/* the left output has result from the left child syntax tree */
char *pLeftOutput = malloc(sizeof(int64_t) * numOfRows);
char *pLeftOutput = (char*)malloc(sizeof(int64_t) * numOfRows);
if (pLeft->nodeType == TSQL_NODE_EXPR) {
tSQLBinaryExprCalcTraverse(pLeft->pExpr, numOfRows, pLeftOutput, param, order, getSourceDataBlock);
}

View File

@ -26,19 +26,18 @@
#include "tutil.h"
#include "tnote.h"
void tscProcessFetchRow(SSchedMsg *pMsg);
void tscProcessAsyncRetrieve(void *param, TAOS_RES *tres, int numOfRows);
static void tscProcessAsyncRetrieveNextVnode(void *param, TAOS_RES *tres, int numOfRows);
static void tscProcessAsyncContinueRetrieve(void *param, TAOS_RES *tres, int numOfRows);
static void tscProcessFetchRow(SSchedMsg *pMsg);
static void tscAsyncQueryRowsForNextVnode(void *param, TAOS_RES *tres, int numOfRows);
static void tscProcessAsyncRetrieveImpl(void *param, TAOS_RES *tres, int numOfRows, void (*fp)());
/*
* proxy function to perform sequentially query&retrieve operation.
* If sql queries upon metric and two-stage merge procedure is not needed,
* it will sequentially query&retrieve data for all vnodes in pCmd->pMetricMeta
* Proxy function to perform sequentially query&retrieve operation.
* If sql queries upon a super table and two-stage merge procedure is not involved (when employ the projection
* query), it will sequentially query&retrieve data for all vnodes
*/
static void tscProcessAsyncFetchRowsProxy(void *param, TAOS_RES *tres, int numOfRows);
static void tscAsyncFetchRowsProxy(void *param, TAOS_RES *tres, int numOfRows);
static void tscAsyncFetchSingleRowProxy(void *param, TAOS_RES *tres, int numOfRows);
// TODO return the correct error code to client in tscQueueAsyncError
void taos_query_a(TAOS *taos, const char *sqlstr, void (*fp)(void *, TAOS_RES *, int), void *param) {
@ -51,7 +50,7 @@ void taos_query_a(TAOS *taos, const char *sqlstr, void (*fp)(void *, TAOS_RES *,
}
int32_t sqlLen = strlen(sqlstr);
if (sqlLen > TSDB_MAX_SQL_LEN) {
if (sqlLen > tsMaxSQLStringLen) {
tscError("sql string too long");
tscQueueAsyncError(fp, param);
return;
@ -81,7 +80,6 @@ void taos_query_a(TAOS *taos, const char *sqlstr, void (*fp)(void *, TAOS_RES *,
return;
}
pSql->sqlstr = malloc(sqlLen + 1);
if (pSql->sqlstr == NULL) {
tscError("%p failed to malloc sql string buffer", pSql);
@ -95,9 +93,9 @@ void taos_query_a(TAOS *taos, const char *sqlstr, void (*fp)(void *, TAOS_RES *,
pRes->numOfRows = 1;
strtolower(pSql->sqlstr, sqlstr);
tscTrace("%p Async SQL: %s, pObj:%p", pSql, pSql->sqlstr, pObj);
tscDump("%p pObj:%p, Async SQL: %s", pSql, pObj, pSql->sqlstr);
int32_t code = tsParseSql(pSql, pObj->acctId, pObj->db, true);
int32_t code = tsParseSql(pSql, true);
if (code == TSDB_CODE_ACTION_IN_PROGRESS) return;
if (code != TSDB_CODE_SUCCESS) {
@ -109,7 +107,7 @@ void taos_query_a(TAOS *taos, const char *sqlstr, void (*fp)(void *, TAOS_RES *,
tscDoQuery(pSql);
}
static void tscProcessAsyncFetchRowsProxy(void *param, TAOS_RES *tres, int numOfRows) {
static void tscAsyncFetchRowsProxy(void *param, TAOS_RES *tres, int numOfRows) {
if (tres == NULL) {
return;
}
@ -118,35 +116,32 @@ static void tscProcessAsyncFetchRowsProxy(void *param, TAOS_RES *tres, int numOf
SSqlRes *pRes = &pSql->res;
SSqlCmd *pCmd = &pSql->cmd;
// sequentially retrieve data from remain vnodes first, query vnode specified by vnodeIdx
if (numOfRows == 0 && tscProjectionQueryOnMetric(pCmd)) {
// vnode is denoted by vnodeIdx, continue to query vnode specified by vnodeIdx
assert(pCmd->vnodeIdx >= 0);
/* reach the maximum number of output rows, abort */
if (pCmd->globalLimit > 0 && pRes->numOfTotal >= pCmd->globalLimit) {
(*pSql->fetchFp)(param, tres, 0);
if (numOfRows == 0) {
if (hasMoreVnodesToTry(pSql)) { // sequentially retrieve data from remain vnodes.
tscTryQueryNextVnode(pSql, tscAsyncQueryRowsForNextVnode);
} else {
/*
* all available virtual node has been checked already, now we need to check
* for the next subclause queries
*/
if (pCmd->clauseIndex < pCmd->numOfClause - 1) {
tscTryQueryNextClause(pSql, tscAsyncQueryRowsForNextVnode);
return;
}
/* update the limit value according to current retrieval results */
pCmd->limit.limit = pCmd->globalLimit - pRes->numOfTotal;
pCmd->limit.offset = pRes->offset;
/*
* 1. has reach the limitation
* 2. no remain virtual nodes to be retrieved anymore
*/
(*pSql->fetchFp)(param, pSql, 0);
}
if ((++(pCmd->vnodeIdx)) < tscGetMeterMetaInfo(pCmd, 0)->pMetricMeta->numOfVnodes) {
tscTrace("%p retrieve data from next vnode:%d", pSql, pCmd->vnodeIdx);
pSql->cmd.command = TSDB_SQL_SELECT; // reset flag to launch query first.
tscResetForNextRetrieve(pRes);
pSql->fp = tscProcessAsyncRetrieveNextVnode;
tscProcessSql(pSql);
return;
}
} else { // localreducer has handle this situation
// local reducer has handle this situation during super table non-projection query.
if (pCmd->command != TSDB_SQL_RETRIEVE_METRIC) {
pRes->numOfTotal += pRes->numOfRows;
}
pRes->numOfTotalInCurrentClause += pRes->numOfRows;
}
(*pSql->fetchFp)(param, tres, numOfRows);
@ -157,14 +152,13 @@ static void tscProcessAsyncRetrieveImpl(void *param, TAOS_RES *tres, int numOfRo
SSqlObj *pSql = (SSqlObj *)tres;
if (pSql == NULL) { // error
tscError("sql object is NULL");
tscQueueAsyncError(pSql->fetchFp, param);
return;
}
SSqlCmd *pCmd = &pSql->cmd;
SSqlRes *pRes = &pSql->res;
if (pRes->qhandle == 0 || numOfRows != 0) {
if ((pRes->qhandle == 0 || numOfRows != 0) && pCmd->command < TSDB_SQL_LOCAL) {
if (pRes->qhandle == 0) {
tscError("qhandle is NULL");
} else {
@ -183,14 +177,18 @@ static void tscProcessAsyncRetrieveImpl(void *param, TAOS_RES *tres, int numOfRo
}
/*
* retrieve callback for fetch rows proxy. It serves as the callback function of querying vnode
* retrieve callback for fetch rows proxy.
* The below two functions both serve as the callback function of query virtual node.
* query callback first, and then followed by retrieve callback
*/
static void tscProcessAsyncRetrieveNextVnode(void *param, TAOS_RES *tres, int numOfRows) {
tscProcessAsyncRetrieveImpl(param, tres, numOfRows, tscProcessAsyncFetchRowsProxy);
static void tscAsyncQueryRowsForNextVnode(void *param, TAOS_RES *tres, int numOfRows) {
// query completed, continue to retrieve
tscProcessAsyncRetrieveImpl(param, tres, numOfRows, tscAsyncFetchRowsProxy);
}
static void tscProcessAsyncContinueRetrieve(void *param, TAOS_RES *tres, int numOfRows) {
tscProcessAsyncRetrieveImpl(param, tres, numOfRows, tscProcessAsyncRetrieve);
void tscAsyncQuerySingleRowForNextVnode(void *param, TAOS_RES *tres, int numOfRows) {
// query completed, continue to retrieve
tscProcessAsyncRetrieveImpl(param, tres, numOfRows, tscAsyncFetchSingleRowProxy);
}
void taos_fetch_rows_a(TAOS_RES *taosa, void (*fp)(void *, TAOS_RES *, int), void *param) {
@ -213,7 +211,7 @@ void taos_fetch_rows_a(TAOS_RES *taosa, void (*fp)(void *, TAOS_RES *, int), voi
// user-defined callback function is stored in fetchFp
pSql->fetchFp = fp;
pSql->fp = tscProcessAsyncFetchRowsProxy;
pSql->fp = tscAsyncFetchRowsProxy;
pSql->param = param;
tscResetForNextRetrieve(pRes);
@ -248,8 +246,12 @@ void taos_fetch_row_a(TAOS_RES *taosa, void (*fp)(void *, TAOS_RES *, TAOS_ROW),
if (pRes->row >= pRes->numOfRows) {
tscResetForNextRetrieve(pRes);
pSql->fp = tscProcessAsyncRetrieve;
pSql->fp = tscAsyncFetchSingleRowProxy;
if (pCmd->command != TSDB_SQL_RETRIEVE_METRIC && pCmd->command < TSDB_SQL_LOCAL) {
pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH;
}
tscProcessSql(pSql);
} else {
SSchedMsg schedMsg;
@ -261,57 +263,45 @@ void taos_fetch_row_a(TAOS_RES *taosa, void (*fp)(void *, TAOS_RES *, TAOS_ROW),
}
}
void tscProcessAsyncRetrieve(void *param, TAOS_RES *tres, int numOfRows) {
void tscAsyncFetchSingleRowProxy(void *param, TAOS_RES *tres, int numOfRows) {
SSqlObj *pSql = (SSqlObj *)tres;
SSqlRes *pRes = &pSql->res;
SSqlCmd *pCmd = &pSql->cmd;
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
if (numOfRows == 0) {
// sequentially retrieve data from remain vnodes.
if (tscProjectionQueryOnMetric(pCmd)) {
if (hasMoreVnodesToTry(pSql)) { // sequentially retrieve data from remain vnodes.
tscTryQueryNextVnode(pSql, tscAsyncQuerySingleRowForNextVnode);
} else {
/*
* vnode is denoted by vnodeIdx, continue to query vnode specified by vnodeIdx till all vnode have been retrieved
* 1. has reach the limitation
* 2. no remain virtual nodes to be retrieved anymore
*/
assert(pCmd->vnodeIdx >= 1);
/* reach the maximum number of output rows, abort */
if (pCmd->globalLimit > 0 && pRes->numOfTotal >= pCmd->globalLimit) {
(*pSql->fetchFp)(pSql->param, pSql, NULL);
}
return;
}
/* update the limit value according to current retrieval results */
pCmd->limit.limit = pCmd->globalLimit - pRes->numOfTotal;
if ((++pCmd->vnodeIdx) <= tscGetMeterMetaInfo(pCmd, 0)->pMetricMeta->numOfVnodes) {
pSql->cmd.command = TSDB_SQL_SELECT; // reset flag to launch query first.
tscResetForNextRetrieve(pRes);
pSql->fp = tscProcessAsyncContinueRetrieve;
tscProcessSql(pSql);
return;
}
} else {
(*pSql->fetchFp)(pSql->param, pSql, NULL);
}
} else {
for (int i = 0; i < pCmd->numOfCols; ++i)
pRes->tsrow[i] = TSC_GET_RESPTR_BASE(pRes, pCmd, i, pCmd->order) + pRes->bytes[i] * pRes->row;
pRes->tsrow[i] = TSC_GET_RESPTR_BASE(pRes, pQueryInfo, i, pQueryInfo->order) + pRes->bytes[i] * pRes->row;
pRes->row++;
(*pSql->fetchFp)(pSql->param, pSql, pSql->res.tsrow);
}
}
void tscProcessFetchRow(SSchedMsg *pMsg) {
SSqlObj *pSql = (SSqlObj *)pMsg->ahandle;
SSqlRes *pRes = &pSql->res;
SSqlCmd *pCmd = &pSql->cmd;
for (int i = 0; i < pCmd->numOfCols; ++i)
pRes->tsrow[i] = TSC_GET_RESPTR_BASE(pRes, pCmd, i, pCmd->order) + pRes->bytes[i] * pRes->row;
pRes->row++;
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
for (int i = 0; i < pCmd->numOfCols; ++i) {
pRes->tsrow[i] = TSC_GET_RESPTR_BASE(pRes, pQueryInfo, i, pQueryInfo->order) + pRes->bytes[i] * pRes->row;
}
pRes->row++;
(*pSql->fetchFp)(pSql->param, pSql, pRes->tsrow);
}
@ -370,7 +360,7 @@ void tscQueueAsyncRes(SSqlObj *pSql) {
tscTrace("%p SqlObj is freed, not add into queue async res", pSql);
return;
} else {
tscTrace("%p add into queued async res, code:%d", pSql, pSql->res.code);
tscError("%p add into queued async res, code:%d", pSql, pSql->res.code);
}
SSchedMsg schedMsg;
@ -403,10 +393,16 @@ void tscAsyncInsertMultiVnodesProxy(void *param, TAOS_RES *tres, int numOfRows)
SSqlCmd *pCmd = &pSql->cmd;
int32_t code = TSDB_CODE_SUCCESS;
assert(!pCmd->isInsertFromFile && pSql->signature == pSql);
assert(pCmd->dataSourceType != 0 && pSql->signature == pSql);
int32_t index = 0;
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, index);
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0);
assert(pQueryInfo->numOfTables == 1 || pQueryInfo->numOfTables == 2);
SDataBlockList *pDataBlocks = pCmd->pDataBlocks;
if (pDataBlocks == NULL || pCmd->vnodeIdx >= pDataBlocks->nSize) {
if (pDataBlocks == NULL || pMeterMetaInfo->vnodeIndex >= pDataBlocks->nSize) {
// restore user defined fp
pSql->fp = pSql->fetchFp;
tscTrace("%p Async insertion completed, destroy data block list", pSql);
@ -418,17 +414,17 @@ void tscAsyncInsertMultiVnodesProxy(void *param, TAOS_RES *tres, int numOfRows)
(*pSql->fp)(pSql->param, tres, numOfRows);
} else {
do {
code = tscCopyDataBlockToPayload(pSql, pDataBlocks->pData[pCmd->vnodeIdx++]);
code = tscCopyDataBlockToPayload(pSql, pDataBlocks->pData[pMeterMetaInfo->vnodeIndex++]);
if (code != TSDB_CODE_SUCCESS) {
tscTrace("%p prepare submit data block failed in async insertion, vnodeIdx:%d, total:%d, code:%d",
pSql, pCmd->vnodeIdx - 1, pDataBlocks->nSize, code);
pSql, pMeterMetaInfo->vnodeIndex - 1, pDataBlocks->nSize, code);
}
} while (code != TSDB_CODE_SUCCESS && pCmd->vnodeIdx < pDataBlocks->nSize);
} while (code != TSDB_CODE_SUCCESS && pMeterMetaInfo->vnodeIndex < pDataBlocks->nSize);
// build submit msg may fail
if (code == TSDB_CODE_SUCCESS) {
tscTrace("%p async insertion, vnodeIdx:%d, total:%d", pSql, pCmd->vnodeIdx - 1, pDataBlocks->nSize);
tscTrace("%p async insertion, vnodeIdx:%d, total:%d", pSql, pMeterMetaInfo->vnodeIndex - 1, pDataBlocks->nSize);
tscProcessSql(pSql);
}
}
@ -440,7 +436,6 @@ void tscMeterMetaCallBack(void *param, TAOS_RES *res, int code) {
SSqlObj *pSql = (SSqlObj *)param;
if (pSql == NULL || pSql->signature != pSql) return;
STscObj *pObj = pSql->pTscObj;
SSqlCmd *pCmd = &pSql->cmd;
SSqlRes *pRes = &pSql->res;
@ -461,9 +456,10 @@ void tscMeterMetaCallBack(void *param, TAOS_RES *res, int code) {
tscTrace("%p renew meterMeta successfully, command:%d, code:%d, thandle:%p, retry:%d",
pSql, pSql->cmd.command, pSql->res.code, pSql->thandle, pSql->retry);
assert(tscGetMeterMetaInfo(&pSql->cmd, 0)->pMeterMeta == NULL);
tscGetMeterMeta(pSql, tscGetMeterMetaInfo(&pSql->cmd, 0)->name, 0);
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0, 0);
assert(pMeterMetaInfo->pMeterMeta == NULL);
tscGetMeterMeta(pSql, pMeterMetaInfo);
code = tscSendMsgToServer(pSql);
if (code != 0) {
pRes->code = code;
@ -481,49 +477,65 @@ void tscMeterMetaCallBack(void *param, TAOS_RES *res, int code) {
}
if (pSql->pStream == NULL) {
// check if it is a sub-query of metric query first, if true, enter another routine
if ((pSql->cmd.type & TSDB_QUERY_TYPE_STABLE_SUBQUERY) == TSDB_QUERY_TYPE_STABLE_SUBQUERY) {
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0);
assert(pMeterMetaInfo->pMeterMeta->numOfTags != 0 && pCmd->vnodeIdx >= 0 && pSql->param != NULL);
// check if it is a sub-query of super table query first, if true, enter another routine
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
if ((pQueryInfo->type & TSDB_QUERY_TYPE_STABLE_SUBQUERY) == TSDB_QUERY_TYPE_STABLE_SUBQUERY) {
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0);
assert(pMeterMetaInfo->pMeterMeta->numOfTags != 0 && pMeterMetaInfo->vnodeIndex >= 0 && pSql->param != NULL);
SRetrieveSupport *trs = (SRetrieveSupport *)pSql->param;
SSqlObj * pParObj = trs->pParentSqlObj;
assert(pParObj->signature == pParObj && trs->vnodeIdx == pCmd->vnodeIdx &&
assert(pParObj->signature == pParObj && trs->subqueryIndex == pMeterMetaInfo->vnodeIndex &&
pMeterMetaInfo->pMeterMeta->numOfTags != 0);
tscTrace("%p get metricMeta during metric query successfully", pSql);
tscTrace("%p get metricMeta during super table query successfully", pSql);
code = tscGetMeterMeta(pSql, tscGetMeterMetaInfo(&pSql->cmd, 0)->name, 0);
code = tscGetMeterMeta(pSql, pMeterMetaInfo);
pRes->code = code;
if (code == TSDB_CODE_ACTION_IN_PROGRESS) return;
code = tscGetMetricMeta(pSql);
code = tscGetMetricMeta(pSql, 0);
pRes->code = code;
if (code == TSDB_CODE_ACTION_IN_PROGRESS) return;
} else { // normal async query continues
code = tsParseSql(pSql, pObj->acctId, pObj->db, false);
if (pCmd->isParseFinish) {
tscTrace("%p resend data to vnode in metermeta callback since sql has been parsed completed", pSql);
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0);
code = tscGetMeterMeta(pSql, pMeterMetaInfo);
assert(code == TSDB_CODE_SUCCESS);
if (pMeterMetaInfo->pMeterMeta) {
code = tscSendMsgToServer(pSql);
if (code == TSDB_CODE_SUCCESS) return;
}
} else {
code = tsParseSql(pSql, false);
if (code == TSDB_CODE_ACTION_IN_PROGRESS) return;
}
}
} else { // stream computing
SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0);
code = tscGetMeterMeta(pSql, pMeterMetaInfo->name, 0);
SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0);
code = tscGetMeterMeta(pSql, pMeterMetaInfo);
pRes->code = code;
if (code == TSDB_CODE_ACTION_IN_PROGRESS) return;
pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0);
if (code == TSDB_CODE_SUCCESS && UTIL_METER_IS_METRIC(pMeterMetaInfo)) {
code = tscGetMetricMeta(pSql);
if (code == TSDB_CODE_SUCCESS && UTIL_METER_IS_SUPERTABLE(pMeterMetaInfo)) {
code = tscGetMetricMeta(pSql, pCmd->clauseIndex);
pRes->code = code;
if (code == TSDB_CODE_ACTION_IN_PROGRESS) return;
}
}
if (code != 0) {
if (code != TSDB_CODE_SUCCESS) {
pSql->res.code = code;
tscQueueAsyncRes(pSql);
return;
}
@ -532,10 +544,12 @@ void tscMeterMetaCallBack(void *param, TAOS_RES *res, int code) {
tscTrace("%p stream:%p meta is updated, start new query, command:%d", pSql, pSql->pStream, pSql->cmd.command);
/*
* NOTE:
* transfer the sql function for metric query before get meter/metric meta,
* transfer the sql function for super table query before get meter/metric meta,
* since in callback functions, only tscProcessSql(pStream->pSql) is executed!
*/
tscTansformSQLFunctionForMetricQuery(&pSql->cmd);
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
tscTansformSQLFunctionForSTableQuery(pQueryInfo);
tscIncStreamExecutionCount(pSql->pStream);
} else {
tscTrace("%p get meterMeta/metricMeta successfully", pSql);

View File

@ -96,11 +96,7 @@ void *taosAddConnIntoCache(void *handle, void *data, uint32_t ip, uint16_t port,
pObj = (SConnCache *)handle;
if (pObj == NULL || pObj->maxSessions == 0) return NULL;
#ifdef CLUSTER
if (data == NULL || ip == 0) {
#else
if (data == NULL) {
#endif
tscTrace("data:%p ip:%p:%d not valid, not added in cache", data, ip, port);
return NULL;
}

File diff suppressed because it is too large Load Diff

View File

@ -14,28 +14,15 @@
*/
#include "os.h"
#include "tcache.h"
#include "tscJoinProcess.h"
#include "tcache.h"
#include "tscUtil.h"
#include "tsclient.h"
#include "tscompression.h"
#include "ttime.h"
#include "tutil.h"
static UNUSED_FUNC bool isSubqueryCompleted(SSqlObj* pSql) {
bool hasData = true;
for (int32_t i = 0; i < pSql->numOfSubs; ++i) {
SSqlRes* pRes = &pSql->pSubs[i]->res;
// in case inner join, if any subquery exhausted, query completed
if (pRes->numOfRows == 0) {
hasData = false;
break;
}
}
return hasData;
}
static void freeSubqueryObj(SSqlObj* pSql);
static bool doCompare(int32_t order, int64_t left, int64_t right) {
if (order == TSQL_SO_ASC) {
@ -45,19 +32,24 @@ static bool doCompare(int32_t order, int64_t left, int64_t right) {
}
}
static int64_t doTSBlockIntersect(SSqlObj* pSql, SJoinSubquerySupporter* pSupporter1, SJoinSubquerySupporter* pSupporter2,
TSKEY* st, TSKEY* et) {
static int64_t doTSBlockIntersect(SSqlObj* pSql, SJoinSubquerySupporter* pSupporter1,
SJoinSubquerySupporter* pSupporter2, TSKEY* st, TSKEY* et) {
STSBuf* output1 = tsBufCreate(true);
STSBuf* output2 = tsBufCreate(true);
*st = INT64_MAX;
*et = INT64_MIN;
SLimitVal* pLimit = &pSql->cmd.limit;
int32_t order = pSql->cmd.order.order;
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex);
pSql->pSubs[0]->cmd.tsBuf = output1;
pSql->pSubs[1]->cmd.tsBuf = output2;
SLimitVal* pLimit = &pQueryInfo->limit;
int32_t order = pQueryInfo->order.order;
SQueryInfo* pSubQueryInfo1 = tscGetQueryInfoDetail(&pSql->pSubs[0]->cmd, 0);
SQueryInfo* pSubQueryInfo2 = tscGetQueryInfoDetail(&pSql->pSubs[1]->cmd, 0);
pSubQueryInfo1->tsBuf = output1;
pSubQueryInfo2->tsBuf = output2;
tsBufResetPos(pSupporter1->pTSBuf);
tsBufResetPos(pSupporter2->pTSBuf);
@ -88,7 +80,7 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, SJoinSubquerySupporter* pSuppor
#ifdef _DEBUG_VIEW
// for debug purpose
tscPrint("%lld, tags:%d \t %lld, tags:%d", elem1.ts, elem1.tag, elem2.ts, elem2.tag);
tscPrint("%" PRId64 ", tags:%d \t %" PRId64 ", tags:%d", elem1.ts, elem1.tag, elem2.ts, elem2.tag);
#endif
if (elem1.tag < elem2.tag || (elem1.tag == elem2.tag && doCompare(order, elem1.ts, elem2.ts))) {
@ -104,6 +96,11 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, SJoinSubquerySupporter* pSuppor
numOfInput2++;
} else {
/*
* in case of stable query, limit/offset is not applied here. the limit/offset is applied to the
* final results which is acquired after the secondry merge of in the client.
*/
if (pLimit->offset == 0 || pQueryInfo->nAggTimeInterval > 0 || QUERY_IS_STABLE_QUERY(pQueryInfo->type)) {
if (*st > elem1.ts) {
*st = elem1.ts;
}
@ -112,8 +109,6 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, SJoinSubquerySupporter* pSuppor
*et = elem1.ts;
}
// in case of stable query, limit/offset is not applied here
if (pLimit->offset == 0 || pSql->cmd.nAggTimeInterval > 0 || QUERY_IS_STABLE_QUERY(pSql->cmd.type)) {
tsBufAppend(output1, elem1.vnode, elem1.tag, (const char*)&elem1.ts, sizeof(elem1.ts));
tsBufAppend(output2, elem2.vnode, elem2.tag, (const char*)&elem2.ts, sizeof(elem2.ts));
} else {
@ -150,31 +145,34 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, SJoinSubquerySupporter* pSuppor
tsBufDestory(pSupporter1->pTSBuf);
tsBufDestory(pSupporter2->pTSBuf);
tscTrace("%p input1:%lld, input2:%lld, %lld for secondary query after ts blocks intersecting",
pSql, numOfInput1, numOfInput2, output1->numOfTotal);
tscTrace("%p input1:%" PRId64 ", input2:%" PRId64 ", final:%" PRId64 " for secondary query after ts blocks "
"intersecting, skey:%" PRId64 ", ekey:%" PRId64, pSql,
numOfInput1, numOfInput2, output1->numOfTotal, *st, *et);
return output1->numOfTotal;
}
// todo handle failed to create sub query
SJoinSubquerySupporter* tscCreateJoinSupporter(SSqlObj* pSql, SSubqueryState* pState, /*int32_t* numOfComplete, int32_t* gc,*/ int32_t index) {
SJoinSubquerySupporter* tscCreateJoinSupporter(SSqlObj* pSql, SSubqueryState* pState, int32_t index) {
SJoinSubquerySupporter* pSupporter = calloc(1, sizeof(SJoinSubquerySupporter));
if (pSupporter == NULL) {
return NULL;
}
pSupporter->pObj = pSql;
pSupporter->hasMore = true;
pSupporter->pState = pState;
pSupporter->subqueryIndex = index;
pSupporter->interval = pSql->cmd.nAggTimeInterval;
pSupporter->limit = pSql->cmd.limit;
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex);
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, index);
pSupporter->interval = pQueryInfo->nAggTimeInterval;
pSupporter->limit = pQueryInfo->limit;
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, pSql->cmd.clauseIndex, index);
pSupporter->uid = pMeterMetaInfo->pMeterMeta->uid;
assert (pSupporter->uid != 0);
getTmpfilePath("join-", pSupporter->path);
pSupporter->f = fopen(pSupporter->path, "w");
@ -190,7 +188,7 @@ void tscDestroyJoinSupporter(SJoinSubquerySupporter* pSupporter) {
return;
}
tfree(pSupporter->exprsInfo.pExprs);
tscSqlExprInfoDestroy(&pSupporter->exprsInfo);
tscColumnBaseInfoDestroy(&pSupporter->colList);
tscClearFieldInfo(&pSupporter->fieldsInfo);
@ -210,10 +208,9 @@ void tscDestroyJoinSupporter(SJoinSubquerySupporter* pSupporter) {
* primary timestamp column , the secondary query is not necessary
*
*/
bool needSecondaryQuery(SSqlObj* pSql) {
SSqlCmd* pCmd = &pSql->cmd;
for (int32_t i = 0; i < pCmd->colList.numOfCols; ++i) {
SColumnBase* pBase = tscColumnBaseInfoGet(&pCmd->colList, i);
bool needSecondaryQuery(SQueryInfo* pQueryInfo) {
for (int32_t i = 0; i < pQueryInfo->colList.numOfCols; ++i) {
SColumnBase* pBase = tscColumnBaseInfoGet(&pQueryInfo->colList, i);
if (pBase->colIndex.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) {
return true;
}
@ -225,102 +222,147 @@ bool needSecondaryQuery(SSqlObj* pSql) {
/*
* launch secondary stage query to fetch the result that contains timestamp in set
*/
int32_t tscLaunchSecondSubquery(SSqlObj* pSql) {
// TODO not launch secondary stage query
// if (!needSecondaryQuery(pSql)) {
// return;
// }
// sub query may not be necessary
int32_t tscLaunchSecondPhaseSubqueries(SSqlObj* pSql) {
int32_t numOfSub = 0;
SJoinSubquerySupporter* pSupporter = NULL;
/*
* If the columns are not involved in the final select clause, the secondary query will not be launched
* for the subquery.
*/
SSubqueryState* pState = NULL;
for (int32_t i = 0; i < pSql->numOfSubs; ++i) {
pSupporter = pSql->pSubs[i]->param;
pSupporter->pState->numOfCompleted = 0;
if (pSupporter->exprsInfo.numOfExprs > 0) {
++numOfSub;
}
}
assert(numOfSub > 0);
// scan all subquery, if one sub query has only ts, ignore it
int32_t j = 0;
tscTrace("%p start to launch secondary subqueries: %d", pSql, pSql->numOfSubs);
tscTrace("%p start to launch secondary subqueries, total:%d, only:%d needs to query, others are not retrieve in "
"select clause", pSql, pSql->numOfSubs, numOfSub);
/*
* the subqueries that do not actually launch the secondary query to virtual node is set as completed.
*/
pState = pSupporter->pState;
pState->numOfTotal = pSql->numOfSubs;
pState->numOfCompleted = (pSql->numOfSubs - numOfSub);
bool success = true;
for (int32_t i = 0; i < pSql->numOfSubs; ++i) {
SSqlObj* pSub = pSql->pSubs[i];
pSupporter = pSub->param;
pSupporter->pState->numOfTotal = numOfSub;
SSqlObj *pPrevSub = pSql->pSubs[i];
pSql->pSubs[i] = NULL;
pSupporter = pPrevSub->param;
if (pSupporter->exprsInfo.numOfExprs == 0) {
tscTrace("%p subIndex: %d, not need to launch query, ignore it", pSql, i);
tscDestroyJoinSupporter(pSupporter);
taos_free_result(pSub);
tscFreeSqlObj(pPrevSub);
pSql->pSubs[i] = NULL;
continue;
}
SSqlObj* pNew = createSubqueryObj(pSql, 0, (int16_t)i, tscJoinQueryCallback, pSupporter, NULL);
SQueryInfo *pSubQueryInfo = tscGetQueryInfoDetail(&pPrevSub->cmd, 0);
STSBuf *pTSBuf = pSubQueryInfo->tsBuf;
pSubQueryInfo->tsBuf = NULL;
// free result for async object will also free sqlObj
taos_free_result(pPrevSub);
SSqlObj *pNew = createSubqueryObj(pSql, (int16_t) i, tscJoinQueryCallback, pSupporter, NULL);
if (pNew == NULL) {
pSql->numOfSubs = i; //revise the number of subquery
pSupporter->pState->numOfTotal = i;
pSupporter->pState->code = TSDB_CODE_CLI_OUT_OF_MEMORY;
tscDestroyJoinSupporter(pSupporter);
return NULL;
success = false;
break;
}
tscFreeSqlCmdData(&pNew->cmd);
tscClearSubqueryInfo(&pNew->cmd);
pSql->pSubs[i] = pNew;
pSql->pSubs[j++] = pNew;
pNew->cmd.tsBuf = pSub->cmd.tsBuf;
pSub->cmd.tsBuf = NULL;
taos_free_result(pSub);
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pNew->cmd, 0);
pQueryInfo->tsBuf = pTSBuf; // transfer the ownership of timestamp comp-z data to the new created object
// set the second stage sub query for join process
pNew->cmd.type |= TSDB_QUERY_TYPE_JOIN_SEC_STAGE;
pQueryInfo->type |= TSDB_QUERY_TYPE_JOIN_SEC_STAGE;
pNew->cmd.nAggTimeInterval = pSupporter->interval;
pNew->cmd.limit = pSupporter->limit;
pNew->cmd.groupbyExpr = pSupporter->groupbyExpr;
pQueryInfo->nAggTimeInterval = pSupporter->interval;
pQueryInfo->groupbyExpr = pSupporter->groupbyExpr;
tscColumnBaseInfoCopy(&pNew->cmd.colList, &pSupporter->colList, 0);
tscTagCondCopy(&pNew->cmd.tagCond, &pSupporter->tagCond);
tscColumnBaseInfoCopy(&pQueryInfo->colList, &pSupporter->colList, 0);
tscTagCondCopy(&pQueryInfo->tagCond, &pSupporter->tagCond);
tscSqlExprCopy(&pNew->cmd.exprsInfo, &pSupporter->exprsInfo, pSupporter->uid);
tscFieldInfoCopyAll(&pSupporter->fieldsInfo, &pNew->cmd.fieldsInfo);
tscSqlExprCopy(&pQueryInfo->exprsInfo, &pSupporter->exprsInfo, pSupporter->uid);
tscFieldInfoCopyAll(&pQueryInfo->fieldsInfo, &pSupporter->fieldsInfo);
// add the ts function for interval query if it is missing
if (pSupporter->exprsInfo.pExprs[0].functionId != TSDB_FUNC_TS && pNew->cmd.nAggTimeInterval > 0) {
tscAddTimestampColumn(&pNew->cmd, TSDB_FUNC_TS, 0);
/*
* if the first column of the secondary query is not ts function, add this function.
* Because this column is required to filter with timestamp after intersecting.
*/
if (pSupporter->exprsInfo.pExprs[0].functionId != TSDB_FUNC_TS) {
tscAddTimestampColumn(pQueryInfo, TSDB_FUNC_TS, 0);
}
// todo refactor function name
tscAddTimestampColumn(&pNew->cmd, TSDB_FUNC_TS, 0);
tscFieldInfoCalOffset(&pNew->cmd);
SQueryInfo *pNewQueryInfo = tscGetQueryInfoDetail(&pNew->cmd, 0);
assert(pNew->numOfSubs == 0 && pNew->cmd.numOfClause == 1 && pNewQueryInfo->numOfTables == 1);
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pNew->cmd, 0);
tscFieldInfoCalOffset(pNewQueryInfo);
SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pNewQueryInfo, 0);
/*
* When handling the projection query, the offset value will be modified for table-table join, which is changed
* during the timestamp intersection.
*/
pSupporter->limit = pQueryInfo->limit;
pNewQueryInfo->limit = pSupporter->limit;
// fetch the join tag column
if (UTIL_METER_IS_METRIC(pMeterMetaInfo)) {
SSqlExpr* pExpr = tscSqlExprGet(&pNew->cmd, 0);
assert(pNew->cmd.tagCond.joinInfo.hasJoin);
if (UTIL_METER_IS_SUPERTABLE(pMeterMetaInfo)) {
SSqlExpr *pExpr = tscSqlExprGet(pNewQueryInfo, 0);
assert(pQueryInfo->tagCond.joinInfo.hasJoin);
int16_t tagColIndex = tscGetJoinTagColIndexByUid(&pNew->cmd, pMeterMetaInfo->pMeterMeta->uid);
int16_t tagColIndex = tscGetJoinTagColIndexByUid(&pQueryInfo->tagCond, pMeterMetaInfo->pMeterMeta->uid);
pExpr->param[0].i64Key = tagColIndex;
pExpr->numOfParams = 1;
addRequiredTagColumn(&pNew->cmd, tagColIndex, 0);
}
tscProcessSql(pNew);
tscPrintSelectClause(pNew, 0);
tscTrace("%p subquery:%p tableIndex:%d, vnodeIdx:%d, type:%d, exprInfo:%d, colList:%d, fieldsInfo:%d, name:%s",
pSql, pNew, 0, pMeterMetaInfo->vnodeIndex, pNewQueryInfo->type,
pNewQueryInfo->exprsInfo.numOfExprs, pNewQueryInfo->colList.numOfCols,
pNewQueryInfo->fieldsInfo.numOfOutputCols, pNewQueryInfo->pMeterInfo[0]->name);
}
// revise the number of subs
pSql->numOfSubs = j;
//prepare the subqueries object failed, abort
if (!success) {
pSql->res.code = TSDB_CODE_CLI_OUT_OF_MEMORY;
tscError("%p failed to prepare subqueries objs for secondary phase query, numOfSub:%d, code:%d", pSql,
pSql->numOfSubs, pSql->res.code);
freeSubqueryObj(pSql);
return 0;
return pSql->res.code;
}
for(int32_t i = 0; i < pSql->numOfSubs; ++i) {
SSqlObj* pSub = pSql->pSubs[i];
if (pSub == NULL) {
continue;
}
tscProcessSql(pSub);
}
return TSDB_CODE_SUCCESS;
}
static void freeSubqueryObj(SSqlObj* pSql) {
@ -353,7 +395,10 @@ static void doQuitSubquery(SSqlObj* pParentSql) {
}
static void quitAllSubquery(SSqlObj* pSqlObj, SJoinSubquerySupporter* pSupporter) {
if (atomic_add_fetch_32(&pSupporter->pState->numOfCompleted, 1) >= pSupporter->pState->numOfTotal) {
int32_t numOfTotal = pSupporter->pState->numOfTotal;
int32_t finished = atomic_add_fetch_32(&pSupporter->pState->numOfCompleted, 1);
if (finished >= numOfTotal) {
pSqlObj->res.code = abs(pSupporter->pState->code);
tscError("%p all subquery return and query failed, global code:%d", pSqlObj, pSqlObj->res.code);
@ -362,11 +407,11 @@ static void quitAllSubquery(SSqlObj* pSqlObj, SJoinSubquerySupporter* pSupporter
}
// update the query time range according to the join results on timestamp
static void updateQueryTimeRange(SSqlObj* pSql, int64_t st, int64_t et) {
assert(pSql->cmd.stime <= st && pSql->cmd.etime >= et);
static void updateQueryTimeRange(SQueryInfo* pQueryInfo, int64_t st, int64_t et) {
assert(pQueryInfo->stime <= st && pQueryInfo->etime >= et);
pSql->cmd.stime = st;
pSql->cmd.etime = et;
pQueryInfo->stime = st;
pQueryInfo->etime = et;
}
static void joinRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) {
@ -374,8 +419,12 @@ static void joinRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) {
SSqlObj* pParentSql = pSupporter->pObj;
SSqlObj* pSql = (SSqlObj*)tres;
SSqlCmd* pCmd = &pSql->cmd;
SSqlRes* pRes = &pSql->res;
if ((pSql->cmd.type & TSDB_QUERY_TYPE_JOIN_SEC_STAGE) == 0) {
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
if ((pQueryInfo->type & TSDB_QUERY_TYPE_JOIN_SEC_STAGE) == 0) {
if (pSupporter->pState->code != TSDB_CODE_SUCCESS) {
tscError("%p abort query due to other subquery failure. code:%d, global code:%d", pSql, numOfRows,
pSupporter->pState->code);
@ -386,7 +435,7 @@ static void joinRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) {
if (numOfRows > 0) { // write the data into disk
fwrite(pSql->res.data, pSql->res.numOfRows, 1, pSupporter->f);
fflush(pSupporter->f);
fclose(pSupporter->f);
STSBuf* pBuf = tsBufCreateFromFile(pSupporter->path, true);
if (pBuf == NULL) {
@ -401,7 +450,10 @@ static void joinRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) {
tscTrace("%p create tmp file for ts block:%s", pSql, pBuf->path);
pSupporter->pTSBuf = pBuf;
} else {
tsBufMerge(pSupporter->pTSBuf, pBuf, pSql->cmd.vnodeIdx);
assert(pQueryInfo->numOfTables == 1); // for subquery, only one metermetaInfo
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0);
tsBufMerge(pSupporter->pTSBuf, pBuf, pMeterMetaInfo->vnodeIndex);
tsBufDestory(pBuf);
}
@ -412,7 +464,32 @@ static void joinRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) {
taos_fetch_rows_a(tres, joinRetrieveCallback, param);
} else if (numOfRows == 0) { // no data from this vnode anymore
if (atomic_add_fetch_32(&pSupporter->pState->numOfCompleted, 1) >= pSupporter->pState->numOfTotal) {
SQueryInfo* pParentQueryInfo = tscGetQueryInfoDetail(&pParentSql->cmd, pParentSql->cmd.clauseIndex);
//todo refactor
if (tscNonOrderedProjectionQueryOnSTable(pParentQueryInfo, 0)) {
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0);
assert(pQueryInfo->numOfTables == 1);
// for projection query, need to try next vnode
int32_t totalVnode = pMeterMetaInfo->pMetricMeta->numOfVnodes;
if ((++pMeterMetaInfo->vnodeIndex) < totalVnode) {
tscTrace("%p current vnode:%d exhausted, try next:%d. total vnode:%d. current numOfRes:%d", pSql,
pMeterMetaInfo->vnodeIndex - 1, pMeterMetaInfo->vnodeIndex, totalVnode, pRes->numOfTotal);
pSql->cmd.command = TSDB_SQL_SELECT;
pSql->fp = tscJoinQueryCallback;
tscProcessSql(pSql);
return;
}
}
int32_t numOfTotal = pSupporter->pState->numOfTotal;
int32_t finished = atomic_add_fetch_32(&pSupporter->pState->numOfCompleted, 1);
if (finished >= numOfTotal) {
assert(finished == numOfTotal);
if (pSupporter->pState->code != TSDB_CODE_SUCCESS) {
tscTrace("%p sub:%p, numOfSub:%d, quit from further procedure due to other queries failure", pParentSql, tres,
@ -433,8 +510,8 @@ static void joinRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) {
tscTrace("%p free all sub SqlObj and quit", pParentSql);
doQuitSubquery(pParentSql);
} else {
updateQueryTimeRange(pParentSql, st, et);
tscLaunchSecondSubquery(pParentSql);
updateQueryTimeRange(pParentQueryInfo, st, et);
tscLaunchSecondPhaseSubqueries(pParentSql);
}
}
} else { // failure of sub query
@ -451,39 +528,107 @@ static void joinRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) {
tscError("%p retrieve failed, code:%d, index:%d", pSql, numOfRows, pSupporter->subqueryIndex);
}
if (atomic_add_fetch_32(&pSupporter->pState->numOfCompleted, 1) >= pSupporter->pState->numOfTotal) {
tscTrace("%p secondary retrieve completed, global code:%d", tres, pParentSql->res.code);
if (numOfRows >= 0) {
pSql->res.numOfTotal += pSql->res.numOfRows;
}
if (tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0) && numOfRows == 0) {
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0);
assert(pQueryInfo->numOfTables == 1);
// for projection query, need to try next vnode if current vnode is exhausted
if ((++pMeterMetaInfo->vnodeIndex) < pMeterMetaInfo->pMetricMeta->numOfVnodes) {
pSupporter->pState->numOfCompleted = 0;
pSupporter->pState->numOfTotal = 1;
pSql->cmd.command = TSDB_SQL_SELECT;
pSql->fp = tscJoinQueryCallback;
tscProcessSql(pSql);
return;
}
}
int32_t numOfTotal = pSupporter->pState->numOfTotal;
int32_t finished = atomic_add_fetch_32(&pSupporter->pState->numOfCompleted, 1);
if (finished >= numOfTotal) {
assert(finished == numOfTotal);
tscTrace("%p all %d secondary subquery retrieves completed, global code:%d", tres, numOfTotal,
pParentSql->res.code);
if (pSupporter->pState->code != TSDB_CODE_SUCCESS) {
pParentSql->res.code = abs(pSupporter->pState->code);
freeSubqueryObj(pParentSql);
}
tsem_post(&pParentSql->rspSem);
} else {
tscTrace("%p sub:%p completed, completed:%d, total:%d", pParentSql, tres, finished, numOfTotal);
}
}
}
static SJoinSubquerySupporter* tscUpdateSubqueryStatus(SSqlObj* pSql, int32_t numOfFetch) {
int32_t notInvolved = 0;
SJoinSubquerySupporter* pSupporter = NULL;
SSubqueryState* pState = NULL;
for(int32_t i = 0; i < pSql->numOfSubs; ++i) {
if (pSql->pSubs[i] == NULL) {
notInvolved++;
} else {
pSupporter = (SJoinSubquerySupporter*)pSql->pSubs[i]->param;
pState = pSupporter->pState;
}
}
pState->numOfTotal = pSql->numOfSubs;
pState->numOfCompleted = pSql->numOfSubs - numOfFetch;
return pSupporter;
}
void tscFetchDatablockFromSubquery(SSqlObj* pSql) {
int32_t numOfFetch = 0;
assert(pSql->numOfSubs >= 1);
for (int32_t i = 0; i < pSql->numOfSubs; ++i) {
SJoinSubquerySupporter* pSupporter = (SJoinSubquerySupporter*)pSql->pSubs[i]->param;
if (pSql->pSubs[i] == NULL) { // this subquery does not need to involve in secondary query
continue;
}
SSqlRes *pRes = &pSql->pSubs[i]->res;
if (pRes->row >= pRes->numOfRows && pSupporter->hasMore) {
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->pSubs[i]->cmd, 0);
SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0);
if (tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0)) {
if (pRes->row >= pRes->numOfRows && pMeterMetaInfo->vnodeIndex < pMeterMetaInfo->pMetricMeta->numOfVnodes &&
(!tscHasReachLimitation(pQueryInfo, pRes))) {
numOfFetch++;
}
} else {
if (pRes->row >= pRes->numOfRows && (!tscHasReachLimitation(pQueryInfo, pRes))) {
numOfFetch++;
}
}
}
if (numOfFetch > 0) {
if (numOfFetch <= 0) {
return;
}
// TODO multi-vnode retrieve for projection query with limitation has bugs, since the global limiation is not handled
tscTrace("%p retrieve data from %d subqueries", pSql, numOfFetch);
SJoinSubquerySupporter* pSupporter = (SJoinSubquerySupporter*)pSql->pSubs[0]->param;
pSupporter->pState->numOfTotal = numOfFetch; // wait for all subqueries completed
pSupporter->pState->numOfCompleted = 0;
SJoinSubquerySupporter* pSupporter = tscUpdateSubqueryStatus(pSql, numOfFetch);
for (int32_t i = 0; i < pSql->numOfSubs; ++i) {
SSqlObj* pSql1 = pSql->pSubs[i];
if (pSql1 == NULL) {
continue;
}
SSqlRes* pRes1 = &pSql1->res;
SSqlCmd* pCmd1 = &pSql1->cmd;
@ -491,12 +636,16 @@ void tscFetchDatablockFromSubquery(SSqlObj* pSql) {
pSupporter = (SJoinSubquerySupporter*)pSql1->param;
// wait for all subqueries completed
pSupporter->pState->numOfTotal = numOfFetch;
if (pRes1->row >= pRes1->numOfRows && pSupporter->hasMore) {
tscTrace("%p subquery:%p retrieve data from vnode, index:%d", pSql, pSql1, pSupporter->subqueryIndex);
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd1, 0);
assert(pRes1->numOfRows >= 0 && pQueryInfo->numOfTables == 1);
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0);
if (pRes1->row >= pRes1->numOfRows) {
tscTrace("%p subquery:%p retrieve data from vnode, subquery:%d, vnodeIndex:%d", pSql, pSql1,
pSupporter->subqueryIndex, pMeterMetaInfo->vnodeIndex);
tscResetForNextRetrieve(pRes1);
pSql1->fp = joinRetrieveCallback;
if (pCmd1->command < TSDB_SQL_LOCAL) {
@ -509,6 +658,15 @@ void tscFetchDatablockFromSubquery(SSqlObj* pSql) {
// wait for all subquery completed
tsem_wait(&pSql->rspSem);
// update the records for each subquery
for(int32_t i = 0; i < pSql->numOfSubs; ++i) {
if (pSql->pSubs[i] == NULL) {
continue;
}
SSqlRes* pRes1 = &pSql->pSubs[i]->res;
pRes1->numOfTotalInCurrentClause += pRes1->numOfRows;
}
}
@ -519,26 +677,32 @@ void tscSetupOutputColumnIndex(SSqlObj* pSql) {
tscTrace("%p all subquery response, retrieve data", pSql);
pRes->pColumnIndex = calloc(1, sizeof(SColumnIndex) * pCmd->fieldsInfo.numOfOutputCols);
if (pRes->pColumnIndex != NULL) {
return; // the column transfer support struct has been built
}
for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) {
SSqlExpr* pExpr = tscSqlExprGet(pCmd, i);
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
pRes->pColumnIndex = calloc(1, sizeof(SColumnIndex) * pQueryInfo->fieldsInfo.numOfOutputCols);
for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) {
SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
int32_t tableIndexOfSub = -1;
for (int32_t j = 0; j < pCmd->numOfTables; ++j) {
SSqlObj* pSub = pSql->pSubs[j];
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pSub->cmd, 0);
for (int32_t j = 0; j < pQueryInfo->numOfTables; ++j) {
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, j);
if (pMeterMetaInfo->pMeterMeta->uid == pExpr->uid) {
tableIndexOfSub = j;
break;
}
}
SSqlCmd* pSubCmd = &pSql->pSubs[tableIndexOfSub]->cmd;
assert(tableIndexOfSub >= 0 && tableIndexOfSub < pQueryInfo->numOfTables);
for (int32_t k = 0; k < pSubCmd->exprsInfo.numOfExprs; ++k) {
SSqlExpr* pSubExpr = tscSqlExprGet(pSubCmd, k);
SSqlCmd* pSubCmd = &pSql->pSubs[tableIndexOfSub]->cmd;
SQueryInfo* pSubQueryInfo = tscGetQueryInfoDetail(pSubCmd, 0);
for (int32_t k = 0; k < pSubQueryInfo->exprsInfo.numOfExprs; ++k) {
SSqlExpr* pSubExpr = tscSqlExprGet(pSubQueryInfo, k);
if (pExpr->functionId == pSubExpr->functionId && pExpr->colInfo.colId == pSubExpr->colInfo.colId) {
pRes->pColumnIndex[i] = (SColumnIndex){.tableIndex = tableIndexOfSub, .columnIndex = k};
break;
@ -549,7 +713,7 @@ void tscSetupOutputColumnIndex(SSqlObj* pSql) {
void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code) {
SSqlObj* pSql = (SSqlObj*)tres;
// SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0);
// SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0, 0);
// int32_t idx = pSql->cmd.vnodeIdx;
@ -573,12 +737,13 @@ void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code) {
// // no qualified result
// }
//
// tscLaunchSecondSubquery(pSql, ts, num);
// tscLaunchSecondPhaseSubqueries(pSql, ts, num);
// } else {
// }
// } else {
if ((pSql->cmd.type & TSDB_QUERY_TYPE_JOIN_SEC_STAGE) != TSDB_QUERY_TYPE_JOIN_SEC_STAGE) {
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
if ((pQueryInfo->type & TSDB_QUERY_TYPE_JOIN_SEC_STAGE) != TSDB_QUERY_TYPE_JOIN_SEC_STAGE) {
if (code != TSDB_CODE_SUCCESS) { // direct call joinRetrieveCallback and set the error code
joinRetrieveCallback(param, pSql, code);
} else { // first stage query, continue to retrieve data
@ -605,9 +770,27 @@ void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code) {
quitAllSubquery(pParentSql, pSupporter);
} else {
if (atomic_add_fetch_32(&pSupporter->pState->numOfCompleted, 1) >= pSupporter->pState->numOfTotal) {
tscSetupOutputColumnIndex(pParentSql);
int32_t numOfTotal = pSupporter->pState->numOfTotal;
int32_t finished = atomic_add_fetch_32(&pSupporter->pState->numOfCompleted, 1);
if (finished >= numOfTotal) {
assert(finished == numOfTotal);
tscSetupOutputColumnIndex(pParentSql);
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0);
/**
* if the query is a continue query (vnodeIndex > 0 for projection query) for next vnode, do the retrieval of
* data instead of returning to its invoker
*/
if (pMeterMetaInfo->vnodeIndex > 0 && tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0)) {
assert(pMeterMetaInfo->vnodeIndex < pMeterMetaInfo->pMetricMeta->numOfVnodes);
pSupporter->pState->numOfCompleted = 0; // reset the record value
pSql->fp = joinRetrieveCallback; // continue retrieve data
pSql->cmd.command = TSDB_SQL_FETCH;
tscProcessSql(pSql);
} else { // first retrieve from vnode during the secondary stage sub-query
if (pParentSql->fp == NULL) {
tsem_wait(&pParentSql->emptyRspSem);
tsem_wait(&pParentSql->emptyRspSem);
@ -627,6 +810,7 @@ void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code) {
}
}
}
}
static int32_t getDataStartOffset() {
return sizeof(STSBufFileHeader) + TS_COMP_FILE_VNODE_MAX * sizeof(STSVnodeBlockInfo);
@ -708,7 +892,9 @@ STSBuf* tsBufCreate(bool autoDelete) {
return NULL;
}
allocResForTSBuf(pTSBuf);
if (NULL == allocResForTSBuf(pTSBuf)) {
return NULL;
}
// update the header info
STSBufFileHeader header = {.magic = TS_COMP_FILE_MAGIC, .numOfVnode = pTSBuf->numOfVnodes, .tsOrder = TSQL_SO_ASC};
@ -731,8 +917,9 @@ STSBuf* tsBufCreateFromFile(const char* path, bool autoDelete) {
strncpy(pTSBuf->path, path, PATH_MAX);
pTSBuf->f = fopen(pTSBuf->path, "r");
pTSBuf->f = fopen(pTSBuf->path, "r+");
if (pTSBuf->f == NULL) {
free(pTSBuf);
return NULL;
}
@ -774,7 +961,8 @@ STSBuf* tsBufCreateFromFile(const char* path, bool autoDelete) {
size_t infoSize = sizeof(STSVnodeBlockInfo) * pTSBuf->numOfVnodes;
STSVnodeBlockInfo* buf = (STSVnodeBlockInfo*)calloc(1, infoSize);
int64_t pos = ftell(pTSBuf->f);
//int64_t pos = ftell(pTSBuf->f); //pos not used
fread(buf, infoSize, 1, pTSBuf->f);
// the length value for each vnode is not kept in file, so does not set the length value
@ -797,12 +985,16 @@ STSBuf* tsBufCreateFromFile(const char* path, bool autoDelete) {
pTSBuf->cur.order = TSQL_SO_ASC;
pTSBuf->autoDelete = autoDelete;
tscTrace("create tsBuf from file:%s, fd:%d, size:%d, numOfVnode:%d, autoDelete:%d", pTSBuf->path, fileno(pTSBuf->f),
pTSBuf->fileSize, pTSBuf->numOfVnodes, pTSBuf->autoDelete);
return pTSBuf;
}
void tsBufDestory(STSBuf* pTSBuf) {
void* tsBufDestory(STSBuf* pTSBuf) {
if (pTSBuf == NULL) {
return;
return NULL;
}
tfree(pTSBuf->assistBuf);
@ -814,10 +1006,21 @@ void tsBufDestory(STSBuf* pTSBuf) {
fclose(pTSBuf->f);
if (pTSBuf->autoDelete) {
tscTrace("tsBuf %p destroyed, delete tmp file:%s", pTSBuf, pTSBuf->path);
unlink(pTSBuf->path);
} else {
tscTrace("tsBuf %p destroyed, tmp file:%s, remains", pTSBuf, pTSBuf->path);
}
free(pTSBuf);
return NULL;
}
static STSVnodeBlockInfoEx* tsBufGetLastVnodeInfo(STSBuf* pTSBuf) {
int32_t last = pTSBuf->numOfVnodes - 1;
assert(last >= 0);
return &pTSBuf->pData[last];
}
static STSVnodeBlockInfoEx* addOneVnodeInfo(STSBuf* pTSBuf, int32_t vnodeId) {
@ -836,10 +1039,10 @@ static STSVnodeBlockInfoEx* addOneVnodeInfo(STSBuf* pTSBuf, int32_t vnodeId) {
}
if (pTSBuf->numOfVnodes > 0) {
STSVnodeBlockInfo* pPrevBlockInfo = &pTSBuf->pData[pTSBuf->numOfVnodes - 1].info;
STSVnodeBlockInfoEx* pPrevBlockInfoEx = tsBufGetLastVnodeInfo(pTSBuf);
// update prev vnode length info in file
TSBufUpdateVnodeInfo(pTSBuf, pTSBuf->numOfVnodes - 1, pPrevBlockInfo);
TSBufUpdateVnodeInfo(pTSBuf, pTSBuf->numOfVnodes - 1, &pPrevBlockInfoEx->info);
}
// set initial value for vnode block
@ -857,9 +1060,9 @@ static STSVnodeBlockInfoEx* addOneVnodeInfo(STSBuf* pTSBuf, int32_t vnodeId) {
// update the header info
STSBufFileHeader header = {
.magic = TS_COMP_FILE_MAGIC, .numOfVnode = pTSBuf->numOfVnodes, .tsOrder = pTSBuf->tsOrder};
STSBufUpdateHeader(pTSBuf, &header);
return &pTSBuf->pData[pTSBuf->numOfVnodes - 1];
STSBufUpdateHeader(pTSBuf, &header);
return tsBufGetLastVnodeInfo(pTSBuf);
}
static void shrinkBuffer(STSList* ptsData) {
@ -906,8 +1109,10 @@ static void writeDataToDisk(STSBuf* pTSBuf) {
pTSBuf->tsData.len = 0;
pTSBuf->pData[pTSBuf->numOfVnodes - 1].info.compLen += blockSize;
pTSBuf->pData[pTSBuf->numOfVnodes - 1].info.numOfBlocks += 1;
STSVnodeBlockInfoEx* pVnodeBlockInfoEx = tsBufGetLastVnodeInfo(pTSBuf);
pVnodeBlockInfoEx->info.compLen += blockSize;
pVnodeBlockInfoEx->info.numOfBlocks += 1;
shrinkBuffer(&pTSBuf->tsData);
}
@ -1008,13 +1213,13 @@ void tsBufAppend(STSBuf* pTSBuf, int32_t vnodeId, int64_t tag, const char* pData
STSVnodeBlockInfoEx* pBlockInfo = NULL;
STSList* ptsData = &pTSBuf->tsData;
if (pTSBuf->numOfVnodes == 0 || pTSBuf->pData[pTSBuf->numOfVnodes - 1].info.vnode != vnodeId) {
if (pTSBuf->numOfVnodes == 0 || tsBufGetLastVnodeInfo(pTSBuf)->info.vnode != vnodeId) {
writeDataToDisk(pTSBuf);
shrinkBuffer(ptsData);
pBlockInfo = addOneVnodeInfo(pTSBuf, vnodeId);
} else {
pBlockInfo = &pTSBuf->pData[pTSBuf->numOfVnodes - 1];
pBlockInfo = tsBufGetLastVnodeInfo(pTSBuf);
}
assert(pBlockInfo->info.vnode == vnodeId);
@ -1037,6 +1242,8 @@ void tsBufAppend(STSBuf* pTSBuf, int32_t vnodeId, int64_t tag, const char* pData
pTSBuf->numOfTotal += len / TSDB_KEYSIZE;
// the size of raw data exceeds the size of the default prepared buffer, so
// during getBufBlock, the output buffer needs to be large enough.
if (ptsData->len >= ptsData->threshold) {
writeDataToDisk(pTSBuf);
shrinkBuffer(ptsData);
@ -1053,10 +1260,10 @@ void tsBufFlush(STSBuf* pTSBuf) {
writeDataToDisk(pTSBuf);
shrinkBuffer(&pTSBuf->tsData);
STSVnodeBlockInfo* pBlockInfo = &pTSBuf->pData[pTSBuf->numOfVnodes - 1].info;
STSVnodeBlockInfoEx* pBlockInfoEx = tsBufGetLastVnodeInfo(pTSBuf);
// update prev vnode length info in file
TSBufUpdateVnodeInfo(pTSBuf, pTSBuf->numOfVnodes - 1, pBlockInfo);
TSBufUpdateVnodeInfo(pTSBuf, pTSBuf->numOfVnodes - 1, &pBlockInfoEx->info);
// save the ts order into header
STSBufFileHeader header = {
@ -1157,11 +1364,22 @@ static void tsBufGetBlock(STSBuf* pTSBuf, int32_t vnodeIndex, int32_t blockIndex
}
STSBlock* pBlock = &pTSBuf->block;
size_t s = pBlock->numOfElem * TSDB_KEYSIZE;
/*
* In order to accommodate all the qualified data, the actual buffer size for one block with identical tags value
* may exceed the maximum allowed size during *tsBufAppend* function by invoking expandBuffer function
*/
if (s > pTSBuf->tsData.allocSize) {
expandBuffer(&pTSBuf->tsData, s);
}
pTSBuf->tsData.len =
tsDecompressTimestamp(pBlock->payload, pBlock->compLen, pBlock->numOfElem, pTSBuf->tsData.rawBuf,
pTSBuf->tsData.allocSize, TWO_STAGE_COMP, pTSBuf->assistBuf, pTSBuf->bufSize);
assert(pTSBuf->tsData.len / TSDB_KEYSIZE == pBlock->numOfElem);
assert((pTSBuf->tsData.len / TSDB_KEYSIZE == pBlock->numOfElem) && (pTSBuf->tsData.allocSize >= pTSBuf->tsData.len));
pCur->vnodeIndex = vnodeIndex;
pCur->blockIndex = blockIndex;
@ -1251,6 +1469,10 @@ bool tsBufNextPos(STSBuf* pTSBuf) {
return false;
}
if (pBlockInfo == NULL) {
return false;
}
int32_t blockIndex = pCur->order == TSQL_SO_ASC ? 0 : pBlockInfo->numOfBlocks - 1;
tsBufGetBlock(pTSBuf, pCur->vnodeIndex + step, blockIndex);
break;
@ -1318,7 +1540,7 @@ int32_t tsBufMerge(STSBuf* pDestBuf, const STSBuf* pSrcBuf, int32_t vnodeId) {
tsBufFlush(pDestBuf);
// compared with the last vnode id
if (vnodeId != pDestBuf->pData[pDestBuf->numOfVnodes - 1].info.vnode) {
if (vnodeId != tsBufGetLastVnodeInfo(pDestBuf)->info.vnode) {
int32_t oldSize = pDestBuf->numOfVnodes;
int32_t newSize = oldSize + pSrcBuf->numOfVnodes;
@ -1345,36 +1567,49 @@ int32_t tsBufMerge(STSBuf* pDestBuf, const STSBuf* pSrcBuf, int32_t vnodeId) {
pDestBuf->numOfVnodes = newSize;
} else {
STSVnodeBlockInfoEx* pBlockInfoEx = &pDestBuf->pData[pDestBuf->numOfVnodes - 1];
STSVnodeBlockInfoEx* pBlockInfoEx = tsBufGetLastVnodeInfo(pDestBuf);
pBlockInfoEx->len += pSrcBuf->pData[0].len;
pBlockInfoEx->info.numOfBlocks += pSrcBuf->pData[0].info.numOfBlocks;
pBlockInfoEx->info.compLen += pSrcBuf->pData[0].info.compLen;
pBlockInfoEx->info.vnode = vnodeId;
}
int64_t r = fseek(pDestBuf->f, 0, SEEK_END);
int32_t r = fseek(pDestBuf->f, 0, SEEK_END);
assert(r == 0);
int64_t offset = getDataStartOffset();
int32_t size = pSrcBuf->fileSize - offset;
#ifdef LINUX
ssize_t rc = sendfile(fileno(pDestBuf->f), fileno(pSrcBuf->f), &offset, size);
ssize_t rc = tsendfile(fileno(pDestBuf->f), fileno(pSrcBuf->f), &offset, size);
#else
ssize_t rc = fsendfile(pDestBuf->f, pSrcBuf->f, &offset, size);
#endif
if (rc == -1) {
printf("%s\n", strerror(errno));
tscError("failed to merge tsBuf from:%s to %s, reason:%s\n", pSrcBuf->path, pDestBuf->path, strerror(errno));
return -1;
}
if (rc != size) {
printf("%s\n", strerror(errno));
tscError("failed to merge tsBuf from:%s to %s, reason:%s\n", pSrcBuf->path, pDestBuf->path, strerror(errno));
return -1;
}
pDestBuf->numOfTotal += pSrcBuf->numOfTotal;
int32_t oldSize = pDestBuf->fileSize;
struct stat fileStat;
fstat(fileno(pDestBuf->f), &fileStat);
pDestBuf->fileSize = (uint32_t)fileStat.st_size;
assert(pDestBuf->fileSize == oldSize + size);
tscTrace("tsBuf merge success, %p, path:%s, fd:%d, file size:%d, numOfVnode:%d, autoDelete:%d", pDestBuf,
pDestBuf->path, fileno(pDestBuf->f), pDestBuf->fileSize, pDestBuf->numOfVnodes, pDestBuf->autoDelete);
return 0;
}
@ -1485,7 +1720,7 @@ void tsBufDisplay(STSBuf* pTSBuf) {
while (tsBufNextPos(pTSBuf)) {
STSElem elem = tsBufGetElem(pTSBuf);
printf("%d-%lld-%lld\n", elem.vnode, elem.tag, elem.ts);
printf("%d-%" PRId64 "-%" PRId64 "\n", elem.vnode, *(int64_t*) elem.tag, elem.ts);
}
pTSBuf->cur.order = old;

View File

@ -39,32 +39,24 @@ static int32_t getToStringLength(const char *pData, int32_t length, int32_t type
case TSDB_DATA_TYPE_NCHAR:
return length;
case TSDB_DATA_TYPE_DOUBLE: {
#ifdef _TD_ARM_32_
double dv = 0;
*(int64_t *)(&dv) = *(int64_t *)pData;
len = sprintf(buf, "%f", dv);
#else
len = sprintf(buf, "%lf", *(double *)pData);
#endif
dv = GET_DOUBLE_VAL(pData);
len = sprintf(buf, "%lf", dv);
if (strncasecmp("nan", buf, 3) == 0) {
len = 4;
}
} break;
case TSDB_DATA_TYPE_FLOAT: {
#ifdef _TD_ARM_32_
float fv = 0;
*(int32_t *)(&fv) = *(int32_t *)pData;
fv = GET_FLOAT_VAL(pData);
len = sprintf(buf, "%f", fv);
#else
len = sprintf(buf, "%f", *(float *)pData);
#endif
if (strncasecmp("nan", buf, 3) == 0) {
len = 4;
}
} break;
case TSDB_DATA_TYPE_TIMESTAMP:
case TSDB_DATA_TYPE_BIGINT:
len = sprintf(buf, "%lld", *(int64_t *)pData);
len = sprintf(buf, "%" PRId64 "", *(int64_t *)pData);
break;
case TSDB_DATA_TYPE_BOOL:
len = MAX_BOOL_TYPE_LENGTH;
@ -85,7 +77,7 @@ static int32_t getToStringLength(const char *pData, int32_t length, int32_t type
* length((uint64_t) 123456789011) > 12, greater than sizsof(uint64_t)
*/
static int32_t tscMaxLengthOfTagsFields(SSqlObj *pSql) {
SMeterMeta *pMeta = tscGetMeterMetaInfo(&pSql->cmd, 0)->pMeterMeta;
SMeterMeta *pMeta = tscGetMeterMetaInfo(&pSql->cmd, 0, 0)->pMeterMeta;
if (pMeta->meterType == TSDB_METER_METRIC || pMeta->meterType == TSDB_METER_OTABLE ||
pMeta->meterType == TSDB_METER_STABLE) {
@ -114,8 +106,9 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) {
SSqlRes *pRes = &pSql->res;
// one column for each row
SSqlCmd * pCmd = &pSql->cmd;
SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0);
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0);
SMeterMeta * pMeta = pMeterMetaInfo->pMeterMeta;
/*
@ -127,7 +120,7 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) {
int32_t numOfRows = pMeta->numOfColumns;
int32_t totalNumOfRows = numOfRows + pMeta->numOfTags;
if (UTIL_METER_IS_METRIC(pMeterMetaInfo)) {
if (UTIL_METER_IS_SUPERTABLE(pMeterMetaInfo)) {
numOfRows = pMeta->numOfColumns + pMeta->numOfTags;
}
@ -135,31 +128,31 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) {
SSchema *pSchema = tsGetSchema(pMeta);
for (int32_t i = 0; i < numOfRows; ++i) {
TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, 0);
strncpy(pRes->data + tscFieldInfoGetOffset(pCmd, 0) * totalNumOfRows + pField->bytes * i, pSchema[i].name,
TAOS_FIELD *pField = tscFieldInfoGetField(pQueryInfo, 0);
strncpy(pRes->data + tscFieldInfoGetOffset(pQueryInfo, 0) * totalNumOfRows + pField->bytes * i, pSchema[i].name,
TSDB_COL_NAME_LEN);
char *type = tDataTypeDesc[pSchema[i].type].aName;
pField = tscFieldInfoGetField(pCmd, 1);
strncpy(pRes->data + tscFieldInfoGetOffset(pCmd, 1) * totalNumOfRows + pField->bytes * i, type, pField->bytes);
pField = tscFieldInfoGetField(pQueryInfo, 1);
strncpy(pRes->data + tscFieldInfoGetOffset(pQueryInfo, 1) * totalNumOfRows + pField->bytes * i, type, pField->bytes);
int32_t bytes = pSchema[i].bytes;
if (pSchema[i].type == TSDB_DATA_TYPE_NCHAR) {
bytes = bytes / TSDB_NCHAR_SIZE;
}
pField = tscFieldInfoGetField(pCmd, 2);
*(int32_t *)(pRes->data + tscFieldInfoGetOffset(pCmd, 2) * totalNumOfRows + pField->bytes * i) = bytes;
pField = tscFieldInfoGetField(pQueryInfo, 2);
*(int32_t *)(pRes->data + tscFieldInfoGetOffset(pQueryInfo, 2) * totalNumOfRows + pField->bytes * i) = bytes;
pField = tscFieldInfoGetField(pCmd, 3);
pField = tscFieldInfoGetField(pQueryInfo, 3);
if (i >= pMeta->numOfColumns && pMeta->numOfTags != 0) {
strncpy(pRes->data + tscFieldInfoGetOffset(pCmd, 3) * totalNumOfRows + pField->bytes * i, "tag",
strncpy(pRes->data + tscFieldInfoGetOffset(pQueryInfo, 3) * totalNumOfRows + pField->bytes * i, "tag",
strlen("tag") + 1);
}
}
if (UTIL_METER_IS_METRIC(pMeterMetaInfo)) {
if (UTIL_METER_IS_SUPERTABLE(pMeterMetaInfo)) {
return 0;
}
@ -167,27 +160,27 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) {
char *pTagValue = tsGetTagsValue(pMeta);
for (int32_t i = numOfRows; i < totalNumOfRows; ++i) {
// field name
TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, 0);
strncpy(pRes->data + tscFieldInfoGetOffset(pCmd, 0) * totalNumOfRows + pField->bytes * i, pSchema[i].name,
TAOS_FIELD *pField = tscFieldInfoGetField(pQueryInfo, 0);
strncpy(pRes->data + tscFieldInfoGetOffset(pQueryInfo, 0) * totalNumOfRows + pField->bytes * i, pSchema[i].name,
TSDB_COL_NAME_LEN);
// type name
pField = tscFieldInfoGetField(pCmd, 1);
pField = tscFieldInfoGetField(pQueryInfo, 1);
char *type = tDataTypeDesc[pSchema[i].type].aName;
strncpy(pRes->data + tscFieldInfoGetOffset(pCmd, 1) * totalNumOfRows + pField->bytes * i, type, pField->bytes);
strncpy(pRes->data + tscFieldInfoGetOffset(pQueryInfo, 1) * totalNumOfRows + pField->bytes * i, type, pField->bytes);
// type length
int32_t bytes = pSchema[i].bytes;
pField = tscFieldInfoGetField(pCmd, 2);
pField = tscFieldInfoGetField(pQueryInfo, 2);
if (pSchema[i].type == TSDB_DATA_TYPE_NCHAR) {
bytes = bytes / TSDB_NCHAR_SIZE;
}
*(int32_t *)(pRes->data + tscFieldInfoGetOffset(pCmd, 2) * totalNumOfRows + pField->bytes * i) = bytes;
*(int32_t *)(pRes->data + tscFieldInfoGetOffset(pQueryInfo, 2) * totalNumOfRows + pField->bytes * i) = bytes;
// tag value
pField = tscFieldInfoGetField(pCmd, 3);
char *target = pRes->data + tscFieldInfoGetOffset(pCmd, 3) * totalNumOfRows + pField->bytes * i;
pField = tscFieldInfoGetField(pQueryInfo, 3);
char *target = pRes->data + tscFieldInfoGetOffset(pQueryInfo, 3) * totalNumOfRows + pField->bytes * i;
if (isNull(pTagValue, pSchema[i].type)) {
sprintf(target, "%s", TSDB_DATA_NULL_STR);
@ -201,22 +194,14 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) {
taosUcs4ToMbs(pTagValue, pSchema[i].bytes, target);
break;
case TSDB_DATA_TYPE_FLOAT: {
#ifdef _TD_ARM_32_
float fv = 0;
*(int32_t *)(&fv) = *(int32_t *)pTagValue;
fv = GET_FLOAT_VAL(pTagValue);
sprintf(target, "%f", fv);
#else
sprintf(target, "%f", *(float *)pTagValue);
#endif
} break;
case TSDB_DATA_TYPE_DOUBLE: {
#ifdef _TD_ARM_32_
double dv = 0;
*(int64_t *)(&dv) = *(int64_t *)pTagValue;
dv = GET_DOUBLE_VAL(pTagValue);
sprintf(target, "%lf", dv);
#else
sprintf(target, "%lf", *(double *)pTagValue);
#endif
} break;
case TSDB_DATA_TYPE_TINYINT:
sprintf(target, "%d", *(int8_t *)pTagValue);
@ -228,7 +213,7 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) {
sprintf(target, "%d", *(int32_t *)pTagValue);
break;
case TSDB_DATA_TYPE_BIGINT:
sprintf(target, "%lld", *(int64_t *)pTagValue);
sprintf(target, "%" PRId64 "", *(int64_t *)pTagValue);
break;
case TSDB_DATA_TYPE_BOOL: {
char *val = (*((int8_t *)pTagValue) == 0) ? "false" : "true";
@ -252,25 +237,28 @@ static int32_t tscBuildMeterSchemaResultFields(SSqlObj *pSql, int32_t numOfCols,
SSqlCmd *pCmd = &pSql->cmd;
pCmd->numOfCols = numOfCols;
pCmd->order.order = TSQL_SO_ASC;
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
pQueryInfo->order.order = TSQL_SO_ASC;
tscFieldInfoSetValue(&pCmd->fieldsInfo, 0, TSDB_DATA_TYPE_BINARY, "Field", TSDB_COL_NAME_LEN);
tscFieldInfoSetValue(&pQueryInfo->fieldsInfo, 0, TSDB_DATA_TYPE_BINARY, "Field", TSDB_COL_NAME_LEN);
rowLen += TSDB_COL_NAME_LEN;
tscFieldInfoSetValue(&pCmd->fieldsInfo, 1, TSDB_DATA_TYPE_BINARY, "Type", typeColLength);
tscFieldInfoSetValue(&pQueryInfo->fieldsInfo, 1, TSDB_DATA_TYPE_BINARY, "Type", typeColLength);
rowLen += typeColLength;
tscFieldInfoSetValue(&pCmd->fieldsInfo, 2, TSDB_DATA_TYPE_INT, "Length", sizeof(int32_t));
tscFieldInfoSetValue(&pQueryInfo->fieldsInfo, 2, TSDB_DATA_TYPE_INT, "Length", sizeof(int32_t));
rowLen += sizeof(int32_t);
tscFieldInfoSetValue(&pCmd->fieldsInfo, 3, TSDB_DATA_TYPE_BINARY, "Note", noteColLength);
tscFieldInfoSetValue(&pQueryInfo->fieldsInfo, 3, TSDB_DATA_TYPE_BINARY, "Note", noteColLength);
rowLen += noteColLength;
return rowLen;
}
static int32_t tscProcessDescribeTable(SSqlObj *pSql) {
assert(tscGetMeterMetaInfo(&pSql->cmd, 0)->pMeterMeta != NULL);
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
assert(tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0)->pMeterMeta != NULL);
const int32_t NUM_OF_DESCRIBE_TABLE_COLUMNS = 4;
const int32_t TYPE_COLUMN_LENGTH = 16;
@ -283,7 +271,7 @@ static int32_t tscProcessDescribeTable(SSqlObj *pSql) {
int32_t rowLen =
tscBuildMeterSchemaResultFields(pSql, NUM_OF_DESCRIBE_TABLE_COLUMNS, TYPE_COLUMN_LENGTH, note_field_length);
tscFieldInfoCalOffset(&pSql->cmd);
tscFieldInfoCalOffset(pQueryInfo);
return tscSetValueToResObj(pSql, rowLen);
}
@ -293,7 +281,9 @@ static int tscBuildMetricTagProjectionResult(SSqlObj *pSql) {
// only need to reorganize the results in the column format
SSqlCmd * pCmd = &pSql->cmd;
SSqlRes * pRes = &pSql->res;
SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0);
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0);
SMetricMeta *pMetricMeta = pMeterMetaInfo->pMetricMeta;
SSchema * pSchema = tsGetTagSchema(pMeterMetaInfo->pMeterMeta);
@ -310,7 +300,7 @@ static int tscBuildMetricTagProjectionResult(SSqlObj *pSql) {
}
int32_t totalNumOfResults = pMetricMeta->numOfMeters;
int32_t rowLen = tscGetResRowLength(pCmd);
int32_t rowLen = tscGetResRowLength(pQueryInfo);
tscInitResObjForLocalQuery(pSql, totalNumOfResults, rowLen);
@ -321,16 +311,16 @@ static int tscBuildMetricTagProjectionResult(SSqlObj *pSql) {
for (int32_t j = 0; j < pSidList->numOfSids; ++j) {
SMeterSidExtInfo *pSidExt = tscGetMeterSidInfo(pSidList, j);
for (int32_t k = 0; k < pCmd->fieldsInfo.numOfOutputCols; ++k) {
SColIndexEx *pColIndex = &tscSqlExprGet(pCmd, k)->colInfo;
for (int32_t k = 0; k < pQueryInfo->fieldsInfo.numOfOutputCols; ++k) {
SColIndexEx *pColIndex = &tscSqlExprGet(pQueryInfo, k)->colInfo;
int16_t offsetId = pColIndex->colIdx;
assert((pColIndex->flag & TSDB_COL_TAG) != 0);
char * val = pSidExt->tags + vOffset[offsetId];
TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, k);
TAOS_FIELD *pField = tscFieldInfoGetField(pQueryInfo, k);
memcpy(pRes->data + tscFieldInfoGetOffset(pCmd, k) * totalNumOfResults + pField->bytes * rowIdx, val,
memcpy(pRes->data + tscFieldInfoGetOffset(pQueryInfo, k) * totalNumOfResults + pField->bytes * rowIdx, val,
(size_t)pField->bytes);
}
rowIdx++;
@ -344,21 +334,23 @@ static int tscBuildMetricTagSqlFunctionResult(SSqlObj *pSql) {
SSqlCmd *pCmd = &pSql->cmd;
SSqlRes *pRes = &pSql->res;
SMetricMeta *pMetricMeta = tscGetMeterMetaInfo(pCmd, 0)->pMetricMeta;
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
SMetricMeta *pMetricMeta = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0)->pMetricMeta;
int32_t totalNumOfResults = 1; // count function only produce one result
int32_t rowLen = tscGetResRowLength(pCmd);
int32_t rowLen = tscGetResRowLength(pQueryInfo);
tscInitResObjForLocalQuery(pSql, totalNumOfResults, rowLen);
int32_t rowIdx = 0;
for (int32_t i = 0; i < totalNumOfResults; ++i) {
for (int32_t k = 0; k < pCmd->fieldsInfo.numOfOutputCols; ++k) {
SSqlExpr *pExpr = tscSqlExprGet(pCmd, i);
for (int32_t k = 0; k < pQueryInfo->fieldsInfo.numOfOutputCols; ++k) {
SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, i);
if (pExpr->colInfo.colIdx == -1 && pExpr->functionId == TSDB_FUNC_COUNT) {
TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, k);
TAOS_FIELD *pField = tscFieldInfoGetField(pQueryInfo, k);
memcpy(pRes->data + tscFieldInfoGetOffset(pCmd, i) * totalNumOfResults + pField->bytes * rowIdx,
memcpy(pRes->data + tscFieldInfoGetOffset(pQueryInfo, i) * totalNumOfResults + pField->bytes * rowIdx,
&pMetricMeta->numOfMeters, sizeof(pMetricMeta->numOfMeters));
} else {
tscError("not support operations");
@ -374,14 +366,16 @@ static int tscBuildMetricTagSqlFunctionResult(SSqlObj *pSql) {
static int tscProcessQueryTags(SSqlObj *pSql) {
SSqlCmd *pCmd = &pSql->cmd;
SMeterMeta *pMeterMeta = tscGetMeterMetaInfo(pCmd, 0)->pMeterMeta;
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
SMeterMeta *pMeterMeta = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0)->pMeterMeta;
if (pMeterMeta == NULL || pMeterMeta->numOfTags == 0 || pMeterMeta->numOfColumns == 0) {
strcpy(pCmd->payload, "invalid table");
pSql->res.code = TSDB_CODE_INVALID_TABLE;
return pSql->res.code;
}
SSqlExpr *pExpr = tscSqlExprGet(pCmd, 0);
SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, 0);
if (pExpr->functionId == TSDB_FUNC_COUNT) {
return tscBuildMetricTagSqlFunctionResult(pSql);
} else {
@ -390,7 +384,9 @@ static int tscProcessQueryTags(SSqlObj *pSql) {
}
static void tscProcessCurrentUser(SSqlObj *pSql) {
SSqlExpr* pExpr = tscSqlExprGet(&pSql->cmd, 0);
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, 0);
tscSetLocalQueryResult(pSql, pSql->pTscObj->user, pExpr->aliasName, TSDB_USER_LEN);
}
@ -403,19 +399,24 @@ static void tscProcessCurrentDB(SSqlObj *pSql) {
setNull(db, TSDB_DATA_TYPE_BINARY, TSDB_DB_NAME_LEN);
}
SSqlExpr* pExpr = tscSqlExprGet(&pSql->cmd, 0);
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, 0);
tscSetLocalQueryResult(pSql, db, pExpr->aliasName, TSDB_DB_NAME_LEN);
}
static void tscProcessServerVer(SSqlObj *pSql) {
const char* v = pSql->pTscObj->sversion;
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
SSqlExpr* pExpr = tscSqlExprGet(&pSql->cmd, 0);
SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, 0);
tscSetLocalQueryResult(pSql, v, pExpr->aliasName, tListLen(pSql->pTscObj->sversion));
}
static void tscProcessClientVer(SSqlObj *pSql) {
SSqlExpr* pExpr = tscSqlExprGet(&pSql->cmd, 0);
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, 0);
tscSetLocalQueryResult(pSql, version, pExpr->aliasName, strlen(version));
}
@ -433,7 +434,9 @@ static void tscProcessServStatus(SSqlObj *pSql) {
}
}
SSqlExpr* pExpr = tscSqlExprGet(&pSql->cmd, 0);
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, 0);
tscSetLocalQueryResult(pSql, "1", pExpr->aliasName, 2);
}
@ -442,12 +445,16 @@ void tscSetLocalQueryResult(SSqlObj *pSql, const char *val, const char *columnNa
SSqlRes *pRes = &pSql->res;
pCmd->numOfCols = 1;
pCmd->order.order = TSQL_SO_ASC;
tscFieldInfoSetValue(&pCmd->fieldsInfo, 0, TSDB_DATA_TYPE_BINARY, columnName, valueLength);
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
pQueryInfo->order.order = TSQL_SO_ASC;
tscClearFieldInfo(&pQueryInfo->fieldsInfo);
tscFieldInfoSetValue(&pQueryInfo->fieldsInfo, 0, TSDB_DATA_TYPE_BINARY, columnName, valueLength);
tscInitResObjForLocalQuery(pSql, 1, valueLength);
TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, 0);
TAOS_FIELD *pField = tscFieldInfoGetField(pQueryInfo, 0);
strncpy(pRes->data, val, pField->bytes);
}

View File

@ -18,11 +18,8 @@
#define _XOPEN_SOURCE
#pragma GCC diagnostic ignored "-Woverflow"
#pragma GCC diagnostic ignored "-Wunused-variable"
#include "os.h"
#include "ihash.h"
#include "hash.h"
#include "tscSecondaryMerge.h"
#include "tscUtil.h"
#include "tschemautil.h"
@ -39,7 +36,7 @@ enum {
TSDB_USE_CLI_TS = 1,
};
static int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize);
static int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize, int32_t * numOfRows);
static int32_t tscToInteger(SSQLToken *pToken, int64_t *value, char **endPtr) {
int32_t numType = isValidNumber(pToken);
@ -74,8 +71,6 @@ static int32_t tscToDouble(SSQLToken *pToken, double *value, char **endPtr) {
}
int tsParseTime(SSQLToken *pToken, int64_t *time, char **next, char *error, int16_t timePrec) {
char * token;
int tokenlen;
int32_t index = 0;
SSQLToken sToken;
int64_t interval;
@ -120,7 +115,6 @@ int tsParseTime(SSQLToken *pToken, int64_t *time, char **next, char *error, int1
pTokenEnd += index;
if (sToken.type == TK_MINUS || sToken.type == TK_PLUS) {
index = 0;
valueToken = tStrGetToken(pTokenEnd, &index, false, 0, NULL);
pTokenEnd += index;
@ -315,6 +309,10 @@ int32_t tsParseOneColumnData(SSchema *pSchema, SSQLToken *pToken, char *payload,
}
strncpy(payload, pToken->z, pToken->n);
if (pToken->n < pSchema->bytes) {
payload[pToken->n] = 0; // add the null-terminated char if the length of the string is shorter than the available space
}
}
break;
@ -323,7 +321,7 @@ int32_t tsParseOneColumnData(SSchema *pSchema, SSQLToken *pToken, char *payload,
if (pToken->type == TK_NULL) {
*(uint32_t *)payload = TSDB_DATA_NCHAR_NULL;
} else {
// if the converted output len is over than pSchema->bytes, return error: 'Argument list too long'
// if the converted output len is over than pColumnModel->bytes, return error: 'Argument list too long'
if (!taosMbsToUcs4(pToken->z, pToken->n, payload, pSchema->bytes)) {
char buf[512] = {0};
snprintf(buf, 512, "%s", strerror(errno));
@ -392,9 +390,9 @@ static int32_t tsCheckTimestamp(STableDataBlocks *pDataBlocks, const char *start
}
int tsParseOneRowData(char **str, STableDataBlocks *pDataBlocks, SSchema schema[], SParsedDataColInfo *spd, char *error,
int16_t timePrec) {
int16_t timePrec, int32_t *code, char *tmpTokenBuf) {
int32_t index = 0;
bool isPrevOptr;
// bool isPrevOptr; //fang, never used
SSQLToken sToken = {0};
char * payload = pDataBlocks->pData + pDataBlocks->size;
@ -418,6 +416,7 @@ int tsParseOneRowData(char **str, STableDataBlocks *pDataBlocks, SSchema schema[
}
strcpy(error, "client out of memory");
*code = TSDB_CODE_CLI_OUT_OF_MEMORY;
return -1;
}
@ -425,23 +424,45 @@ int tsParseOneRowData(char **str, STableDataBlocks *pDataBlocks, SSchema schema[
(sToken.type != TK_FLOAT) && (sToken.type != TK_BOOL) && (sToken.type != TK_NULL)) ||
(sToken.n == 0) || (sToken.type == TK_RP)) {
tscInvalidSQLErrMsg(error, "invalid data or symbol", sToken.z);
*code = TSDB_CODE_INVALID_SQL;
return -1;
}
// Remove quotation marks
if (TK_STRING == sToken.type) {
sToken.z++;
sToken.n -= 2;
// delete escape character: \\, \', \"
char delim = sToken.z[0];
int32_t cnt = 0;
int32_t j = 0;
for (int32_t k = 1; k < sToken.n - 1; ++k) {
if (sToken.z[k] == delim || sToken.z[k] == '\\') {
if (sToken.z[k + 1] == delim) {
cnt++;
tmpTokenBuf[j] = sToken.z[k + 1];
j++;
k++;
continue;
}
}
tmpTokenBuf[j] = sToken.z[k];
j++;
}
tmpTokenBuf[j] = 0;
sToken.z = tmpTokenBuf;
sToken.n -= 2 + cnt;
}
bool isPrimaryKey = (colIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX);
int32_t ret = tsParseOneColumnData(pSchema, &sToken, start, error, str, isPrimaryKey, timePrec);
if (ret != TSDB_CODE_SUCCESS) {
*code = TSDB_CODE_INVALID_SQL;
return -1; // NOTE: here 0 mean error!
}
if (isPrimaryKey && tsCheckTimestamp(pDataBlocks, start) != TSDB_CODE_SUCCESS) {
tscInvalidSQLErrMsg(error, "client time/server time can not be mixed up", sToken.z);
*code = TSDB_CODE_INVALID_TIME_STAMP;
return -1;
}
}
@ -476,7 +497,7 @@ static int32_t rowDataCompar(const void *lhs, const void *rhs) {
}
int tsParseValues(char **str, STableDataBlocks *pDataBlock, SMeterMeta *pMeterMeta, int maxRows,
SParsedDataColInfo *spd, char *error) {
SParsedDataColInfo *spd, char *error, int32_t *code, char *tmpTokenBuf) {
int32_t index = 0;
SSQLToken sToken;
@ -487,6 +508,7 @@ int tsParseValues(char **str, STableDataBlocks *pDataBlock, SMeterMeta *pMeterMe
if (spd->hasVal[0] == false) {
strcpy(error, "primary timestamp column can not be null");
*code = TSDB_CODE_INVALID_SQL;
return -1;
}
@ -497,15 +519,18 @@ int tsParseValues(char **str, STableDataBlocks *pDataBlock, SMeterMeta *pMeterMe
*str += index;
if (numOfRows >= maxRows || pDataBlock->size + pMeterMeta->rowSize >= pDataBlock->nAllocSize) {
int32_t tSize = tscAllocateMemIfNeed(pDataBlock, pMeterMeta->rowSize);
if (0 == tSize) {
int32_t tSize;
int32_t retcode = tscAllocateMemIfNeed(pDataBlock, pMeterMeta->rowSize, &tSize);
if (retcode != TSDB_CODE_SUCCESS) { //TODO pass the correct error code to client
strcpy(error, "client out of memory");
*code = retcode;
return -1;
}
maxRows += tSize;
ASSERT(tSize > maxRows);
maxRows = tSize;
}
int32_t len = tsParseOneRowData(str, pDataBlock, pSchema, spd, error, precision);
int32_t len = tsParseOneRowData(str, pDataBlock, pSchema, spd, error, precision, code, tmpTokenBuf);
if (len <= 0) { // error message has been set in tsParseOneRowData
return -1;
}
@ -517,6 +542,7 @@ int tsParseValues(char **str, STableDataBlocks *pDataBlock, SMeterMeta *pMeterMe
*str += index;
if (sToken.n == 0 || sToken.type != TK_RP) {
tscInvalidSQLErrMsg(error, ") expected", *str);
*code = TSDB_CODE_INVALID_SQL;
return -1;
}
@ -525,6 +551,7 @@ int tsParseValues(char **str, STableDataBlocks *pDataBlock, SMeterMeta *pMeterMe
if (numOfRows <= 0) {
strcpy(error, "no any data points");
*code = TSDB_CODE_INVALID_SQL;
return -1;
} else {
return numOfRows;
@ -545,10 +572,11 @@ static void tscSetAssignedColumnInfo(SParsedDataColInfo *spd, SSchema *pSchema,
}
}
int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize) {
int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize, int32_t * numOfRows) {
size_t remain = pDataBlock->nAllocSize - pDataBlock->size;
const int factor = 5;
uint32_t nAllocSizeOld = pDataBlock->nAllocSize;
assert(pDataBlock->headerSize >= 0);
// expand the allocated size
if (remain < rowSize * factor) {
@ -562,14 +590,15 @@ int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize) {
pDataBlock->pData = tmp;
memset(pDataBlock->pData + pDataBlock->size, 0, pDataBlock->nAllocSize - pDataBlock->size);
} else {
//assert(false);
// do nothing
// do nothing, if allocate more memory failed
pDataBlock->nAllocSize = nAllocSizeOld;
return 0;
*numOfRows = (int32_t)(pDataBlock->nAllocSize - pDataBlock->headerSize) / rowSize;
return TSDB_CODE_CLI_OUT_OF_MEMORY;
}
}
return (int32_t)(pDataBlock->nAllocSize - pDataBlock->size) / rowSize;
*numOfRows = (int32_t)(pDataBlock->nAllocSize - pDataBlock->headerSize) / rowSize;
return TSDB_CODE_SUCCESS;
}
static void tsSetBlockInfo(SShellSubmitBlock *pBlocks, const SMeterMeta *pMeterMeta, int32_t numOfRows) {
@ -625,21 +654,33 @@ void sortRemoveDuplicates(STableDataBlocks *dataBuf) {
static int32_t doParseInsertStatement(SSqlObj *pSql, void *pTableHashList, char **str, SParsedDataColInfo *spd,
int32_t *totalNum) {
SSqlCmd * pCmd = &pSql->cmd;
SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0);
SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0);
SMeterMeta * pMeterMeta = pMeterMetaInfo->pMeterMeta;
STableDataBlocks *dataBuf =
tscGetDataBlockFromList(pTableHashList, pCmd->pDataBlocks, pMeterMeta->uid, TSDB_DEFAULT_PAYLOAD_SIZE,
sizeof(SShellSubmitBlock), pMeterMeta->rowSize, pMeterMetaInfo->name);
STableDataBlocks *dataBuf = NULL;
int32_t ret = tscGetDataBlockFromList(pTableHashList, pCmd->pDataBlocks, pMeterMeta->uid, TSDB_DEFAULT_PAYLOAD_SIZE,
sizeof(SShellSubmitBlock), pMeterMeta->rowSize, pMeterMetaInfo->name,
pMeterMeta, &dataBuf);
if (ret != TSDB_CODE_SUCCESS) {
return ret;
}
int32_t maxNumOfRows = tscAllocateMemIfNeed(dataBuf, pMeterMeta->rowSize);
if (0 == maxNumOfRows) {
int32_t maxNumOfRows;
ret = tscAllocateMemIfNeed(dataBuf, pMeterMeta->rowSize, &maxNumOfRows);
if (TSDB_CODE_SUCCESS != ret) {
return TSDB_CODE_CLI_OUT_OF_MEMORY;
}
int32_t numOfRows = tsParseValues(str, dataBuf, pMeterMeta, maxNumOfRows, spd, pCmd->payload);
int32_t code = TSDB_CODE_INVALID_SQL;
char * tmpTokenBuf = calloc(1, 4096); // used for deleting Escape character: \\, \', \"
if (NULL == tmpTokenBuf) {
return TSDB_CODE_CLI_OUT_OF_MEMORY;
}
int32_t numOfRows = tsParseValues(str, dataBuf, pMeterMeta, maxNumOfRows, spd, pCmd->payload, &code, tmpTokenBuf);
free(tmpTokenBuf);
if (numOfRows <= 0) {
return TSDB_CODE_INVALID_SQL;
return code;
}
for (uint32_t i = 0; i < dataBuf->numOfParams; ++i) {
@ -664,16 +705,20 @@ static int32_t doParseInsertStatement(SSqlObj *pSql, void *pTableHashList, char
return TSDB_CODE_SUCCESS;
}
static int32_t tscParseSqlForCreateTableOnDemand(char **sqlstr, SSqlObj *pSql) {
static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
int32_t index = 0;
SSQLToken sToken;
SSQLToken tableToken;
SSQLToken sToken = {0};
SSQLToken tableToken = {0};
int32_t code = TSDB_CODE_SUCCESS;
const int32_t TABLE_INDEX = 0;
const int32_t STABLE_INDEX = 1;
SSqlCmd * pCmd = &pSql->cmd;
SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0);
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
char *sql = *sqlstr;
// get the token of specified table
index = 0;
tableToken = tStrGetToken(sql, &index, false, 0, NULL);
@ -711,39 +756,116 @@ static int32_t tscParseSqlForCreateTableOnDemand(char **sqlstr, SSqlObj *pSql) {
return TSDB_CODE_INVALID_SQL;
}
if (sToken.type == TK_USING) { // create table if not exists
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, TABLE_INDEX);
if (sToken.type == TK_USING) { // create table if not exists according to the super table
index = 0;
sToken = tStrGetToken(sql, &index, false, 0, NULL);
sql += index;
STagData *pTag = (STagData *)pCmd->payload;
memset(pTag, 0, sizeof(STagData));
setMeterID(pSql, &sToken, 0);
strncpy(pTag->name, pMeterMetaInfo->name, TSDB_METER_ID_LEN);
code = tscGetMeterMeta(pSql, pTag->name, 0);
/*
* the source super table is moved to the secondary position of the pMeterMetaInfo list
*/
if (pQueryInfo->numOfTables < 2) {
tscAddEmptyMeterMetaInfo(pQueryInfo);
}
SMeterMetaInfo *pSTableMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, STABLE_INDEX);
setMeterID(pSTableMeterMetaInfo, &sToken, pSql);
strncpy(pTag->name, pSTableMeterMetaInfo->name, TSDB_METER_ID_LEN);
code = tscGetMeterMeta(pSql, pSTableMeterMetaInfo);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
if (!UTIL_METER_IS_METRIC(pMeterMetaInfo)) {
if (!UTIL_METER_IS_SUPERTABLE(pSTableMeterMetaInfo)) {
return tscInvalidSQLErrMsg(pCmd->payload, "create table only from super table is allowed", sToken.z);
}
char * tagVal = pTag->data;
SSchema *pTagSchema = tsGetTagSchema(pMeterMetaInfo->pMeterMeta);
SSchema *pTagSchema = tsGetTagSchema(pSTableMeterMetaInfo->pMeterMeta);
index = 0;
sToken = tStrGetToken(sql, &index, false, 0, NULL);
sql += index;
if (sToken.type != TK_TAGS) {
return tscInvalidSQLErrMsg(pCmd->payload, "keyword TAGS expected", sql);
SParsedDataColInfo spd = {0};
uint8_t numOfTags = pSTableMeterMetaInfo->pMeterMeta->numOfTags;
spd.numOfCols = numOfTags;
// if specify some tags column
if (sToken.type != TK_LP) {
tscSetAssignedColumnInfo(&spd, pTagSchema, numOfTags);
} else {
/* insert into tablename (col1, col2,..., coln) using superTableName (tagName1, tagName2, ..., tagNamen)
* tags(tagVal1, tagVal2, ..., tagValn) values(v1, v2,... vn); */
int16_t offset[TSDB_MAX_COLUMNS] = {0};
for (int32_t t = 1; t < numOfTags; ++t) {
offset[t] = offset[t - 1] + pTagSchema[t - 1].bytes;
}
while (1) {
index = 0;
sToken = tStrGetToken(sql, &index, false, 0, NULL);
sql += index;
if (TK_STRING == sToken.type) {
sToken.n = strdequote(sToken.z);
strtrim(sToken.z);
sToken.n = (uint32_t)strlen(sToken.z);
}
if (sToken.type == TK_RP) {
break;
}
bool findColumnIndex = false;
// todo speedup by using hash list
for (int32_t t = 0; t < numOfTags; ++t) {
if (strncmp(sToken.z, pTagSchema[t].name, sToken.n) == 0 && strlen(pTagSchema[t].name) == sToken.n) {
SParsedColElem *pElem = &spd.elems[spd.numOfAssignedCols++];
pElem->offset = offset[t];
pElem->colIndex = t;
if (spd.hasVal[t] == true) {
return tscInvalidSQLErrMsg(pCmd->payload, "duplicated tag name", sToken.z);
}
spd.hasVal[t] = true;
findColumnIndex = true;
break;
}
}
if (!findColumnIndex) {
return tscInvalidSQLErrMsg(pCmd->payload, "invalid tag name", sToken.z);
}
}
if (spd.numOfAssignedCols == 0 || spd.numOfAssignedCols > numOfTags) {
return tscInvalidSQLErrMsg(pCmd->payload, "tag name expected", sToken.z);
}
index = 0;
sToken = tStrGetToken(sql, &index, false, 0, NULL);
sql += index;
}
if (sToken.type != TK_TAGS) {
return tscInvalidSQLErrMsg(pCmd->payload, "keyword TAGS expected", sToken.z);
}
int32_t numOfTagValues = 0;
uint32_t ignoreTokenTypes = TK_LP;
uint32_t numOfIgnoreToken = 1;
while (1) {
for (int i = 0; i < spd.numOfAssignedCols; ++i) {
char * tagVal = pTag->data + spd.elems[i].offset;
int16_t colIndex = spd.elems[i].colIndex;
index = 0;
sToken = tStrGetToken(sql, &index, true, numOfIgnoreToken, &ignoreTokenTypes);
sql += index;
@ -759,42 +881,60 @@ static int32_t tscParseSqlForCreateTableOnDemand(char **sqlstr, SSqlObj *pSql) {
sToken.n -= 2;
}
code = tsParseOneColumnData(&pTagSchema[numOfTagValues], &sToken, tagVal, pCmd->payload, &sql, false,
pMeterMetaInfo->pMeterMeta->precision);
code = tsParseOneColumnData(&pTagSchema[colIndex], &sToken, tagVal, pCmd->payload, &sql, false,
pSTableMeterMetaInfo->pMeterMeta->precision);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
if ((pTagSchema[numOfTagValues].type == TSDB_DATA_TYPE_BINARY ||
pTagSchema[numOfTagValues].type == TSDB_DATA_TYPE_NCHAR) && sToken.n > pTagSchema[numOfTagValues].bytes) {
if ((pTagSchema[colIndex].type == TSDB_DATA_TYPE_BINARY || pTagSchema[colIndex].type == TSDB_DATA_TYPE_NCHAR) &&
sToken.n > pTagSchema[colIndex].bytes) {
return tscInvalidSQLErrMsg(pCmd->payload, "string too long", sToken.z);
}
tagVal += pTagSchema[numOfTagValues++].bytes;
}
if (numOfTagValues != pMeterMetaInfo->pMeterMeta->numOfTags) {
return tscInvalidSQLErrMsg(pCmd->payload, "number of tags mismatch", sql);
index = 0;
sToken = tStrGetToken(sql, &index, false, 0, NULL);
sql += index;
if (sToken.n == 0 || sToken.type != TK_RP) {
return tscInvalidSQLErrMsg(pCmd->payload, ") expected", sToken.z);
}
// 2. set the null value for the columns that do not assign values
if (spd.numOfAssignedCols < spd.numOfCols) {
char *ptr = pTag->data;
for (int32_t i = 0; i < spd.numOfCols; ++i) {
if (!spd.hasVal[i]) { // current tag column do not have any value to insert, set it to null
setNull(ptr, pTagSchema[i].type, pTagSchema[i].bytes);
}
ptr += pTagSchema[i].bytes;
}
}
if (tscValidateName(&tableToken) != TSDB_CODE_SUCCESS) {
return tscInvalidSQLErrMsg(pCmd->payload, "invalid table name", sql);
return tscInvalidSQLErrMsg(pCmd->payload, "invalid table name", *sqlstr);
}
int32_t ret = setMeterID(pSql, &tableToken, 0);
int32_t ret = setMeterID(pMeterMetaInfo, &tableToken, pSql);
if (ret != TSDB_CODE_SUCCESS) {
return ret;
}
createTable = true;
code = tscGetMeterMetaEx(pSql, pMeterMetaInfo->name, true);
code = tscGetMeterMetaEx(pSql, pMeterMetaInfo, true);
if (TSDB_CODE_ACTION_IN_PROGRESS == code) {
return code;
}
} else {
if (cstart != NULL) {
sql = cstart;
} else {
sql = sToken.z;
}
code = tscGetMeterMeta(pSql, pMeterMetaInfo->name, 0);
code = tscGetMeterMeta(pSql, pMeterMetaInfo);
}
int32_t len = cend - cstart + 1;
@ -819,6 +959,15 @@ int validateTableName(char *tblName, int len) {
return tscValidateName(&token);
}
static int32_t validateDataSource(SSqlCmd *pCmd, int8_t type, const char *sql) {
if (pCmd->dataSourceType != 0 && pCmd->dataSourceType != type) {
return tscInvalidSQLErrMsg(pCmd->payload, "keyword VALUES and FILE are not allowed to mix up", sql);
}
pCmd->dataSourceType = type;
return TSDB_CODE_SUCCESS;
}
/**
* usage: insert into table1 values() () table2 values()()
*
@ -828,44 +977,68 @@ int validateTableName(char *tblName, int len) {
* @param pSql
* @return
*/
int doParserInsertSql(SSqlObj *pSql, char *str) {
int doParseInsertSql(SSqlObj *pSql, char *str) {
SSqlCmd *pCmd = &pSql->cmd;
int32_t code = TSDB_CODE_INVALID_SQL;
int32_t totalNum = 0;
int32_t code = TSDB_CODE_SUCCESS;
SMeterMetaInfo *pMeterMetaInfo = tscAddEmptyMeterMetaInfo(pCmd);
SMeterMetaInfo *pMeterMetaInfo = NULL;
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
assert(pQueryInfo != NULL);
if (pQueryInfo->numOfTables == 0) {
pMeterMetaInfo = tscAddEmptyMeterMetaInfo(pQueryInfo);
} else {
pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0);
}
if ((code = tscAllocPayload(pCmd, TSDB_PAYLOAD_SIZE)) != TSDB_CODE_SUCCESS) {
return code;
}
assert(((NULL == pSql->asyncTblPos) && (NULL == pSql->pTableHashList))
|| ((NULL != pSql->asyncTblPos) && (NULL != pSql->pTableHashList)));
if ((NULL == pSql->asyncTblPos) && (NULL == pSql->pTableHashList)) {
pSql->pTableHashList = taosInitIntHash(128, POINTER_BYTES, taosHashInt);
pSql->pTableHashList = taosInitHashTable(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false);
pSql->cmd.pDataBlocks = tscCreateBlockArrayList();
if (NULL == pSql->pTableHashList || NULL == pSql->cmd.pDataBlocks) {
code = TSDB_CODE_CLI_OUT_OF_MEMORY;
goto _error_clean;
}
} else {
assert((NULL != pSql->asyncTblPos) && (NULL != pSql->pTableHashList));
str = pSql->asyncTblPos;
}
tscTrace("%p create data block list for submit data, %p", pSql, pSql->cmd.pDataBlocks);
tscTrace("%p create data block list for submit data:%p, asyncTblPos:%p, pTableHashList:%p", pSql, pSql->cmd.pDataBlocks, pSql->asyncTblPos, pSql->pTableHashList);
while (1) {
int32_t index = 0;
SSQLToken sToken = tStrGetToken(str, &index, false, 0, NULL);
if (sToken.n == 0) { // parse file, do not release the STableDataBlock
if (pCmd->isInsertFromFile == 1) {
// no data in the sql string anymore.
if (sToken.n == 0) {
/*
* if the data is from the data file, no data has been generated yet. So, there no data to
* merge or submit, save the file path and parse the file in other routines.
*/
if (pCmd->dataSourceType == DATA_FROM_DATA_FILE) {
goto _clean;
}
if (totalNum > 0) {
break;
} else { // no data in current sql string, error
/*
* if no data has been generated during parsing the sql string, error msg will return
* Otherwise, create the first submit block and submit to virtual node.
*/
if (totalNum == 0) {
code = TSDB_CODE_INVALID_SQL;
goto _error_clean;
} else {
break;
}
}
@ -877,27 +1050,35 @@ int doParserInsertSql(SSqlObj *pSql, char *str) {
goto _error_clean;
}
//TODO refactor
if ((code = setMeterID(pSql, &sToken, 0)) != TSDB_CODE_SUCCESS) {
if ((code = setMeterID(pMeterMetaInfo, &sToken, pSql)) != TSDB_CODE_SUCCESS) {
goto _error_clean;
}
void *fp = pSql->fp;
if ((code = tscParseSqlForCreateTableOnDemand(&str, pSql)) != TSDB_CODE_SUCCESS) {
if (fp != NULL) {
//goto _clean;
return code;
} else {
ptrdiff_t pos = pSql->asyncTblPos - pSql->sqlstr;
if ((code = tscCheckIfCreateTable(&str, pSql)) != TSDB_CODE_SUCCESS) {
/*
* for async insert, the free data block operations, which is tscDestroyBlockArrayList,
* must be executed before launch another threads to get metermeta, since the
* later ops may manipulate SSqlObj through another thread in getMeterMetaCallback function.
* For async insert, after get the metermeta from server, the sql string will not be
* parsed using the new metermeta to avoid the overhead cause by get metermeta data information.
* And during the getMeterMetaCallback function, the sql string will be parsed from the
* interrupted position.
*/
goto _error_clean;
}
if (fp != NULL) {
if (TSDB_CODE_ACTION_IN_PROGRESS == code) {
tscTrace("async insert and waiting to get meter meta, then continue parse sql from offset: %" PRId64, pos);
return code;
}
if (UTIL_METER_IS_METRIC(pMeterMetaInfo)) {
// todo add to return
tscError("async insert parse error, code:%d, %s", code, tsError[code]);
pSql->asyncTblPos = NULL;
}
goto _error_clean; // TODO: should _clean or _error_clean to async flow ????
}
if (UTIL_METER_IS_SUPERTABLE(pMeterMetaInfo)) {
code = tscInvalidSQLErrMsg(pCmd->payload, "insert data into super table is not supported", NULL);
goto _error_clean;
}
@ -905,8 +1086,9 @@ int doParserInsertSql(SSqlObj *pSql, char *str) {
index = 0;
sToken = tStrGetToken(str, &index, false, 0, NULL);
str += index;
if (sToken.n == 0) {
code = tscInvalidSQLErrMsg(pCmd->payload, "keyword VALUES or FILE are required", sToken.z);
code = tscInvalidSQLErrMsg(pCmd->payload, "keyword VALUES or FILE required", sToken.z);
goto _error_clean;
}
@ -916,14 +1098,9 @@ int doParserInsertSql(SSqlObj *pSql, char *str) {
tscSetAssignedColumnInfo(&spd, pSchema, pMeterMetaInfo->pMeterMeta->numOfColumns);
if (pCmd->isInsertFromFile == -1) {
pCmd->isInsertFromFile = 0;
} else {
if (pCmd->isInsertFromFile == 1) {
code = tscInvalidSQLErrMsg(pCmd->payload, "keyword VALUES and FILE are not allowed to mix up", sToken.z);
if (validateDataSource(pCmd, DATA_FROM_SQL_STRING, sToken.z) != TSDB_CODE_SUCCESS) {
goto _error_clean;
}
}
/*
* app here insert data in different vnodes, so we need to set the following
@ -933,16 +1110,10 @@ int doParserInsertSql(SSqlObj *pSql, char *str) {
if (code != TSDB_CODE_SUCCESS) {
goto _error_clean;
}
} else if (sToken.type == TK_FILE) {
if (pCmd->isInsertFromFile == -1) {
pCmd->isInsertFromFile = 1;
} else {
if (pCmd->isInsertFromFile == 0) {
code = tscInvalidSQLErrMsg(pCmd->payload, "keyword VALUES and FILE are not allowed to mix up", sToken.z);
if (validateDataSource(pCmd, DATA_FROM_DATA_FILE, sToken.z) != TSDB_CODE_SUCCESS) {
goto _error_clean;
}
}
index = 0;
sToken = tStrGetToken(str, &index, false, 0, NULL);
@ -964,20 +1135,23 @@ int doParserInsertSql(SSqlObj *pSql, char *str) {
strcpy(fname, full_path.we_wordv[0]);
wordfree(&full_path);
STableDataBlocks *pDataBlock = tscCreateDataBlockEx(PATH_MAX, pMeterMetaInfo->pMeterMeta->rowSize,
sizeof(SShellSubmitBlock), pMeterMetaInfo->name);
STableDataBlocks *pDataBlock = NULL;
SMeterMeta* pMeterMeta = pMeterMetaInfo->pMeterMeta;
int32_t ret = tscCreateDataBlock(PATH_MAX, pMeterMeta->rowSize, sizeof(SShellSubmitBlock), pMeterMetaInfo->name,
pMeterMeta, &pDataBlock);
if (ret != TSDB_CODE_SUCCESS) {
goto _error_clean;
}
tscAppendDataBlock(pCmd->pDataBlocks, pDataBlock);
strcpy(pDataBlock->filename, fname);
} else if (sToken.type == TK_LP) {
/* insert into tablename(col1, col2,..., coln) values(v1, v2,... vn); */
SMeterMeta *pMeterMeta = tscGetMeterMetaInfo(pCmd, 0)->pMeterMeta;
SMeterMeta *pMeterMeta = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0)->pMeterMeta;
SSchema * pSchema = tsGetSchema(pMeterMeta);
if (pCmd->isInsertFromFile == -1) {
pCmd->isInsertFromFile = 0;
} else if (pCmd->isInsertFromFile == 1) {
code = tscInvalidSQLErrMsg(pCmd->payload, "keyword VALUES and FILE are not allowed to mix up", sToken.z);
if (validateDataSource(pCmd, DATA_FROM_SQL_STRING, sToken.z) != TSDB_CODE_SUCCESS) {
goto _error_clean;
}
@ -1071,8 +1245,10 @@ int doParserInsertSql(SSqlObj *pSql, char *str) {
goto _error_clean;
}
pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0, 0);
// set the next sent data vnode index in data block arraylist
pCmd->vnodeIdx = 1;
pMeterMetaInfo->vnodeIndex = 1;
} else {
pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks);
}
@ -1084,13 +1260,16 @@ _error_clean:
pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks);
_clean:
taosCleanUpIntHash(pSql->pTableHashList);
taosCleanUpHashTable(pSql->pTableHashList);
pSql->pTableHashList = NULL;
pSql->asyncTblPos = NULL;
pCmd->isParseFinish = 1;
return code;
}
int tsParseInsertSql(SSqlObj *pSql, char *sql, char *acct, char *db) {
int tsParseInsertSql(SSqlObj *pSql) {
if (!pSql->pTscObj->writeAuth) {
return TSDB_CODE_NO_RIGHTS;
}
@ -1098,33 +1277,34 @@ int tsParseInsertSql(SSqlObj *pSql, char *sql, char *acct, char *db) {
int32_t index = 0;
SSqlCmd *pCmd = &pSql->cmd;
SSQLToken sToken = tStrGetToken(sql, &index, false, 0, NULL);
SSQLToken sToken = tStrGetToken(pSql->sqlstr, &index, false, 0, NULL);
assert(sToken.type == TK_INSERT || sToken.type == TK_IMPORT);
pCmd->import = (sToken.type == TK_IMPORT);
sToken = tStrGetToken(sql, &index, false, 0, NULL);
pCmd->count = 0;
pCmd->command = TSDB_SQL_INSERT;
SQueryInfo *pQueryInfo = NULL;
tscGetQueryInfoDetailSafely(pCmd, pCmd->clauseIndex, &pQueryInfo);
uint16_t type = (sToken.type == TK_INSERT)? TSDB_QUERY_TYPE_INSERT:TSDB_QUERY_TYPE_IMPORT;
TSDB_QUERY_SET_TYPE(pQueryInfo->type, type);
sToken = tStrGetToken(pSql->sqlstr, &index, false, 0, NULL);
if (sToken.type != TK_INTO) {
return tscInvalidSQLErrMsg(pCmd->payload, "keyword INTO is expected", sToken.z);
}
pCmd->count = 0;
pCmd->command = TSDB_SQL_INSERT;
pCmd->isInsertFromFile = -1;
pSql->res.numOfRows = 0;
return doParserInsertSql(pSql, sql + index);
return doParseInsertSql(pSql, pSql->sqlstr + index);
}
int tsParseSql(SSqlObj *pSql, char *acct, char *db, bool multiVnodeInsertion) {
int tsParseSql(SSqlObj *pSql, bool multiVnodeInsertion) {
int32_t ret = TSDB_CODE_SUCCESS;
// must before clean the sqlcmd object
tscRemoveAllMeterMetaInfo(&pSql->cmd, false);
if (NULL == pSql->asyncTblPos) {
tscTrace("continue parse sql: %s", pSql->asyncTblPos);
tscCleanSqlCmd(&pSql->cmd);
} else {
tscTrace("continue parse sql: %s", pSql->asyncTblPos);
}
if (tscIsInsertOrImportData(pSql->sqlstr)) {
@ -1141,7 +1321,7 @@ int tsParseSql(SSqlObj *pSql, char *acct, char *db, bool multiVnodeInsertion) {
pSql->fp = tscAsyncInsertMultiVnodesProxy;
}
ret = tsParseInsertSql(pSql, pSql->sqlstr, acct, db);
ret = tsParseInsertSql(pSql);
} else {
ret = tscAllocPayload(&pSql->cmd, TSDB_DEFAULT_PAYLOAD_SIZE);
if (TSDB_CODE_SUCCESS != ret) return ret;
@ -1168,7 +1348,8 @@ static int doPackSendDataBlock(SSqlObj *pSql, int32_t numOfRows, STableDataBlock
int32_t code = TSDB_CODE_SUCCESS;
SSqlCmd *pCmd = &pSql->cmd;
SMeterMeta *pMeterMeta = tscGetMeterMetaInfo(pCmd, 0)->pMeterMeta;
assert(pCmd->numOfClause == 1);
SMeterMeta *pMeterMeta = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0)->pMeterMeta;
SShellSubmitBlock *pBlocks = (SShellSubmitBlock *)(pTableDataBlocks->pData);
tsSetBlockInfo(pBlocks, pMeterMeta, numOfRows);
@ -1190,53 +1371,54 @@ static int doPackSendDataBlock(SSqlObj *pSql, int32_t numOfRows, STableDataBlock
return TSDB_CODE_SUCCESS;
}
static int tscInsertDataFromFile(SSqlObj *pSql, FILE *fp) {
static int tscInsertDataFromFile(SSqlObj *pSql, FILE *fp, char *tmpTokenBuf) {
size_t readLen = 0;
char * line = NULL;
size_t n = 0;
int len = 0;
uint32_t maxRows = 0;
int32_t maxRows = 0;
SSqlCmd * pCmd = &pSql->cmd;
int numOfRows = 0;
int32_t code = 0;
int nrows = 0;
SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0);
SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0);
SMeterMeta * pMeterMeta = pMeterMetaInfo->pMeterMeta;
assert(pCmd->numOfClause == 1);
int32_t rowSize = pMeterMeta->rowSize;
pCmd->pDataBlocks = tscCreateBlockArrayList();
STableDataBlocks *pTableDataBlock =
tscCreateDataBlockEx(TSDB_PAYLOAD_SIZE, pMeterMeta->rowSize, sizeof(SShellSubmitBlock), pMeterMetaInfo->name);
STableDataBlocks *pTableDataBlock = NULL;
int32_t ret = tscCreateDataBlock(TSDB_PAYLOAD_SIZE, rowSize, sizeof(SShellSubmitBlock),
pMeterMetaInfo->name, pMeterMeta, &pTableDataBlock);
if (ret != TSDB_CODE_SUCCESS) {
return -1;
}
tscAppendDataBlock(pCmd->pDataBlocks, pTableDataBlock);
maxRows = tscAllocateMemIfNeed(pTableDataBlock, rowSize);
if (maxRows < 1) return -1;
code = tscAllocateMemIfNeed(pTableDataBlock, rowSize, &maxRows);
if (TSDB_CODE_SUCCESS != code) return -1;
int count = 0;
SParsedDataColInfo spd = {.numOfCols = pMeterMetaInfo->pMeterMeta->numOfColumns};
SSchema * pSchema = tsGetSchema(pMeterMetaInfo->pMeterMeta);
SParsedDataColInfo spd = {.numOfCols = pMeterMeta->numOfColumns};
SSchema * pSchema = tsGetSchema(pMeterMeta);
tscSetAssignedColumnInfo(&spd, pSchema, pMeterMetaInfo->pMeterMeta->numOfColumns);
tscSetAssignedColumnInfo(&spd, pSchema, pMeterMeta->numOfColumns);
while ((readLen = getline(&line, &n, fp)) != -1) {
// line[--readLen] = '\0';
if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) line[--readLen] = 0;
if (readLen <= 0) continue;
if (readLen == 0) continue; // fang, <= to ==
char *lineptr = line;
strtolower(line, line);
if (numOfRows >= maxRows || pTableDataBlock->size + pMeterMeta->rowSize >= pTableDataBlock->nAllocSize) {
uint32_t tSize = tscAllocateMemIfNeed(pTableDataBlock, pMeterMeta->rowSize);
if (0 == tSize) return (-TSDB_CODE_CLI_OUT_OF_MEMORY);
maxRows += tSize;
}
len = tsParseOneRowData(&lineptr, pTableDataBlock, pSchema, &spd, pCmd->payload, pMeterMeta->precision);
len = tsParseOneRowData(&lineptr, pTableDataBlock, pSchema, &spd, pCmd->payload, pMeterMeta->precision, &code, tmpTokenBuf);
if (len <= 0 || pTableDataBlock->numOfParams > 0) {
pSql->res.code = TSDB_CODE_INVALID_SQL;
return -1;
pSql->res.code = code;
return (-code);
}
pTableDataBlock->size += len;
@ -1292,23 +1474,26 @@ void tscProcessMultiVnodesInsert(SSqlObj *pSql) {
}
STableDataBlocks *pDataBlock = NULL;
SMeterMetaInfo * pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0);
SMeterMetaInfo * pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0);
assert(pCmd->numOfClause == 1);
int32_t code = TSDB_CODE_SUCCESS;
/* the first block has been sent to server in processSQL function */
assert(pCmd->isInsertFromFile != -1 && pCmd->vnodeIdx >= 1 && pCmd->pDataBlocks != NULL);
assert(pMeterMetaInfo->vnodeIndex >= 1 && pCmd->pDataBlocks != NULL);
if (pCmd->vnodeIdx < pCmd->pDataBlocks->nSize) {
if (pMeterMetaInfo->vnodeIndex < pCmd->pDataBlocks->nSize) {
SDataBlockList *pDataBlocks = pCmd->pDataBlocks;
for (int32_t i = pCmd->vnodeIdx; i < pDataBlocks->nSize; ++i) {
for (int32_t i = pMeterMetaInfo->vnodeIndex; i < pDataBlocks->nSize; ++i) {
pDataBlock = pDataBlocks->pData[i];
if (pDataBlock == NULL) {
continue;
}
if ((code = tscCopyDataBlockToPayload(pSql, pDataBlock)) != TSDB_CODE_SUCCESS) {
tscTrace("%p build submit data block failed, vnodeIdx:%d, total:%d", pSql, pCmd->vnodeIdx, pDataBlocks->nSize);
tscTrace("%p build submit data block failed, vnodeIdx:%d, total:%d", pSql, pMeterMetaInfo->vnodeIndex,
pDataBlocks->nSize);
continue;
}
@ -1321,17 +1506,19 @@ void tscProcessMultiVnodesInsert(SSqlObj *pSql) {
}
// multi-vnodes insertion in sync query model
void tscProcessMultiVnodesInsertForFile(SSqlObj *pSql) {
void tscProcessMultiVnodesInsertFromFile(SSqlObj *pSql) {
SSqlCmd *pCmd = &pSql->cmd;
if (pCmd->command != TSDB_SQL_INSERT) {
return;
}
SMeterMetaInfo * pInfo = tscGetMeterMetaInfo(pCmd, 0);
SQueryInfo * pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0);
STableDataBlocks *pDataBlock = NULL;
int32_t affected_rows = 0;
assert(pCmd->isInsertFromFile == 1 && pCmd->pDataBlocks != NULL);
assert(pCmd->dataSourceType == DATA_FROM_DATA_FILE && pCmd->pDataBlocks != NULL);
SDataBlockList *pDataBlockList = pCmd->pDataBlocks;
pCmd->pDataBlocks = NULL;
@ -1357,16 +1544,24 @@ void tscProcessMultiVnodesInsertForFile(SSqlObj *pSql) {
continue;
}
strncpy(pInfo->name, pDataBlock->meterId, TSDB_METER_ID_LEN);
strncpy(pMeterMetaInfo->name, pDataBlock->meterId, TSDB_METER_ID_LEN);
memset(pDataBlock->pData, 0, pDataBlock->nAllocSize);
int32_t ret = tscGetMeterMeta(pSql, pInfo->name, 0);
int32_t ret = tscGetMeterMeta(pSql, pMeterMetaInfo);
if (ret != TSDB_CODE_SUCCESS) {
tscError("%p get meter meta failed, abort", pSql);
continue;
}
int nrows = tscInsertDataFromFile(pSql, fp);
char *tmpTokenBuf = calloc(1, 4096); // used for deleting Escape character: \\, \', \"
if (NULL == tmpTokenBuf) {
tscError("%p calloc failed", pSql);
continue;
}
int nrows = tscInsertDataFromFile(pSql, fp, tmpTokenBuf);
free(tmpTokenBuf);
pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks);
if (nrows < 0) {

View File

@ -22,7 +22,7 @@
#include "tstrbuild.h"
int tsParseInsertSql(SSqlObj *pSql, char *sql, char *acct, char *db);
int tsParseInsertSql(SSqlObj *pSql);
int taos_query_imp(STscObj* pObj, SSqlObj* pSql);
////////////////////////////////////////////////////////////////////////////////
@ -75,7 +75,6 @@ static int normalStmtAddPart(SNormalStmt* stmt, bool isParam, char* str, uint32_
if (isParam) {
++stmt->numParams;
}
return TSDB_CODE_SUCCESS;
}
@ -122,11 +121,11 @@ static int normalStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) {
break;
case TSDB_DATA_TYPE_FLOAT:
var->dKey = *(float*)tb->buffer;
var->dKey = GET_FLOAT_VAL(tb->buffer);
break;
case TSDB_DATA_TYPE_DOUBLE:
var->dKey = *(double*)tb->buffer;
var->dKey = GET_DOUBLE_VAL(tb->buffer);
break;
case TSDB_DATA_TYPE_BINARY:
@ -386,12 +385,11 @@ static int insertStmtAddBatch(STscStmt* stmt) {
}
static int insertStmtPrepare(STscStmt* stmt) {
STscObj* taos = stmt->taos;
SSqlObj *pSql = stmt->pSql;
pSql->cmd.numOfParams = 0;
pSql->cmd.batchSize = 0;
return tsParseInsertSql(pSql, pSql->sqlstr, taos->acctId, taos->db);
return tsParseInsertSql(pSql);
}
static int insertStmtReset(STscStmt* pStmt) {
@ -409,7 +407,9 @@ static int insertStmtReset(STscStmt* pStmt) {
}
}
pCmd->batchSize = 0;
pCmd->vnodeIdx = 0;
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0);
pMeterMetaInfo->vnodeIndex = 0;
return TSDB_CODE_SUCCESS;
}
@ -422,6 +422,9 @@ static int insertStmtExecute(STscStmt* stmt) {
++pCmd->batchSize;
}
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, pCmd->clauseIndex, 0);
assert(pCmd->numOfClause == 1);
if (pCmd->pDataBlocks->nSize > 0) {
// merge according to vgid
int code = tscMergeTableDataBlocks(stmt->pSql, pCmd->pDataBlocks);
@ -436,7 +439,7 @@ static int insertStmtExecute(STscStmt* stmt) {
}
// set the next sent data vnode index in data block arraylist
pCmd->vnodeIdx = 1;
pMeterMetaInfo->vnodeIndex = 1;
} else {
pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks);
}
@ -445,6 +448,8 @@ static int insertStmtExecute(STscStmt* stmt) {
SSqlRes *pRes = &pSql->res;
pRes->numOfRows = 0;
pRes->numOfTotal = 0;
pRes->numOfTotalInCurrentClause = 0;
pRes->qhandle = 0;
pSql->thandle = NULL;

View File

@ -93,10 +93,10 @@ void tscSaveSlowQuery(SSqlObj *pSql) {
const static int64_t SLOW_QUERY_INTERVAL = 3000000L;
if (pSql->res.useconds < SLOW_QUERY_INTERVAL) return;
tscTrace("%p query time:%lld sql:%s", pSql, pSql->res.useconds, pSql->sqlstr);
tscTrace("%p query time:%" PRId64 " sql:%s", pSql, pSql->res.useconds, pSql->sqlstr);
char *sql = malloc(200);
int len = snprintf(sql, 200, "insert into %s.slowquery values(now, '%s', %lld, %lld, '", tsMonitorDbName,
int len = snprintf(sql, 200, "insert into %s.slowquery values(now, '%s', %" PRId64 ", %" PRId64 ", '", tsMonitorDbName,
pSql->pTscObj->user, pSql->stime, pSql->res.useconds);
int sqlLen = snprintf(sql + len, TSDB_SHOW_SQL_LEN, "%s", pSql->sqlstr);
if (sqlLen > TSDB_SHOW_SQL_LEN - 1) {
@ -198,12 +198,14 @@ void tscKillStream(STscObj *pObj, uint32_t killId) {
pthread_mutex_unlock(&pObj->mutex);
if (pStream) {
tscTrace("%p stream:%p is killed, streamId:%d", pStream->pSql, pStream, killId);
}
taos_close_stream(pStream);
if (pStream->callback) {
pStream->callback(pStream->param);
}
taos_close_stream(pStream);
}
char *tscBuildQueryStreamDesc(char *pMsg, STscObj *pObj) {
@ -283,8 +285,9 @@ void tscKillConnection(STscObj *pObj) {
SSqlStream *pStream = pObj->streamList;
while (pStream) {
SSqlStream *tmp = pStream->next;
taos_close_stream(pStream);
pStream = pStream->next;
pStream = tmp;
}
pthread_mutex_unlock(&pObj->mutex);

File diff suppressed because it is too large Load Diff

View File

@ -24,7 +24,7 @@
int32_t tSQLParse(SSqlInfo *pSQLInfo, const char *pStr) {
void *pParser = ParseAlloc(malloc);
pSQLInfo->validSql = true;
pSQLInfo->valid = true;
int32_t i = 0;
while (1) {
@ -50,12 +50,12 @@ int32_t tSQLParse(SSqlInfo *pSQLInfo, const char *pStr) {
}
case TK_ILLEGAL: {
snprintf(pSQLInfo->pzErrMsg, tListLen(pSQLInfo->pzErrMsg), "unrecognized token: \"%s\"", t0.z);
pSQLInfo->validSql = false;
pSQLInfo->valid = false;
goto abort_parse;
}
default:
Parse(pParser, t0.type, t0, pSQLInfo);
if (pSQLInfo->validSql == false) {
if (pSQLInfo->valid == false) {
goto abort_parse;
}
}
@ -554,58 +554,64 @@ tSQLExprListList *tSQLListListAppend(tSQLExprListList *pList, tSQLExprList *pExp
return pList;
}
void tSetInsertSQLElems(SSqlInfo *pInfo, SSQLToken *pName, tSQLExprListList *pList) {
SInsertSQL *pInsert = calloc(1, sizeof(SInsertSQL));
pInsert->name = *pName;
pInsert->pValue = pList;
pInfo->pInsertInfo = pInsert;
pInfo->sqlType = TSQL_INSERT;
void doDestroyQuerySql(SQuerySQL *pQuerySql) {
if (pQuerySql == NULL) {
return;
}
void destroyQuerySql(SQuerySQL *pSql) {
if (pSql == NULL) return;
tSQLExprListDestroy(pQuerySql->pSelection);
tSQLExprListDestroy(pSql->pSelection);
pSql->pSelection = NULL;
pQuerySql->pSelection = NULL;
tSQLExprDestroy(pSql->pWhere);
pSql->pWhere = NULL;
tSQLExprDestroy(pQuerySql->pWhere);
pQuerySql->pWhere = NULL;
tVariantListDestroy(pSql->pSortOrder);
pSql->pSortOrder = NULL;
tVariantListDestroy(pQuerySql->pSortOrder);
pQuerySql->pSortOrder = NULL;
tVariantListDestroy(pSql->pGroupby);
pSql->pGroupby = NULL;
tVariantListDestroy(pQuerySql->pGroupby);
pQuerySql->pGroupby = NULL;
tVariantListDestroy(pSql->from);
pSql->from = NULL;
tVariantListDestroy(pQuerySql->from);
pQuerySql->from = NULL;
tVariantListDestroy(pSql->fillType);
tVariantListDestroy(pQuerySql->fillType);
free(pSql);
free(pQuerySql);
}
SCreateTableSQL *tSetCreateSQLElems(tFieldList *pCols, tFieldList *pTags, SSQLToken *pMetricName,
void destroyAllSelectClause(SSubclauseInfo *pClause) {
if (pClause == NULL || pClause->numOfClause == 0) {
return;
}
for(int32_t i = 0; i < pClause->numOfClause; ++i) {
SQuerySQL *pQuerySql = pClause->pClause[i];
doDestroyQuerySql(pQuerySql);
}
tfree(pClause->pClause);
}
SCreateTableSQL *tSetCreateSQLElems(tFieldList *pCols, tFieldList *pTags, SSQLToken *pStableName,
tVariantList *pTagVals, SQuerySQL *pSelect, int32_t type) {
SCreateTableSQL *pCreate = calloc(1, sizeof(SCreateTableSQL));
switch (type) {
case TSQL_CREATE_NORMAL_METER: {
case TSQL_CREATE_TABLE: {
pCreate->colInfo.pColumns = pCols;
assert(pTagVals == NULL && pTags == NULL);
break;
}
case TSQL_CREATE_NORMAL_METRIC: {
case TSQL_CREATE_STABLE: {
pCreate->colInfo.pColumns = pCols;
pCreate->colInfo.pTagColumns = pTags;
assert(pTagVals == NULL && pTags != NULL && pCols != NULL);
break;
}
case TSQL_CREATE_METER_FROM_METRIC: {
case TSQL_CREATE_TABLE_FROM_STABLE: {
pCreate->usingInfo.pTagVals = pTagVals;
pCreate->usingInfo.metricName = *pMetricName;
pCreate->usingInfo.stableName = *pStableName;
break;
}
case TSQL_CREATE_STREAM: {
@ -616,19 +622,24 @@ SCreateTableSQL *tSetCreateSQLElems(tFieldList *pCols, tFieldList *pTags, SSQLTo
assert(false);
}
pCreate->type = type;
return pCreate;
}
SAlterTableSQL *tAlterTableSQLElems(SSQLToken *pMeterName, tFieldList *pCols, tVariantList *pVals, int32_t type) {
SAlterTableSQL *pAlterTable = calloc(1, sizeof(SAlterTableSQL));
pAlterTable->name = *pMeterName;
if (type == ALTER_TABLE_ADD_COLUMN || type == ALTER_TABLE_TAGS_ADD) {
pAlterTable->name = *pMeterName;
pAlterTable->type = type;
if (type == TSDB_ALTER_TABLE_ADD_COLUMN || type == TSDB_ALTER_TABLE_ADD_TAG_COLUMN) {
pAlterTable->pAddColumns = pCols;
assert(pVals == NULL);
} else {
/* ALTER_TABLE_TAGS_CHG, ALTER_TABLE_TAGS_SET, ALTER_TABLE_TAGS_DROP,
* ALTER_TABLE_DROP_COLUMN */
/*
* ALTER_TABLE_TAGS_CHG, ALTER_TABLE_TAGS_SET, ALTER_TABLE_TAGS_DROP,
* ALTER_TABLE_DROP_COLUMN
*/
pAlterTable->varList = pVals;
assert(pCols == NULL);
}
@ -639,27 +650,28 @@ SAlterTableSQL *tAlterTableSQLElems(SSQLToken *pMeterName, tFieldList *pCols, tV
void SQLInfoDestroy(SSqlInfo *pInfo) {
if (pInfo == NULL) return;
if (pInfo->sqlType == TSQL_QUERY_METER) {
destroyQuerySql(pInfo->pQueryInfo);
} else if (pInfo->sqlType >= TSQL_CREATE_NORMAL_METER && pInfo->sqlType <= TSQL_CREATE_STREAM) {
if (pInfo->type == TSDB_SQL_SELECT) {
destroyAllSelectClause(&pInfo->subclauseInfo);
} else if (pInfo->type == TSDB_SQL_CREATE_TABLE) {
SCreateTableSQL *pCreateTableInfo = pInfo->pCreateTableInfo;
destroyQuerySql(pCreateTableInfo->pSelect);
doDestroyQuerySql(pCreateTableInfo->pSelect);
tFieldListDestroy(pCreateTableInfo->colInfo.pColumns);
tFieldListDestroy(pCreateTableInfo->colInfo.pTagColumns);
tVariantListDestroy(pCreateTableInfo->usingInfo.pTagVals);
tfree(pInfo->pCreateTableInfo);
} else if (pInfo->sqlType >= ALTER_TABLE_TAGS_ADD && pInfo->sqlType <= ALTER_TABLE_DROP_COLUMN) {
} else if (pInfo->type == TSDB_SQL_ALTER_TABLE) {
tVariantListDestroy(pInfo->pAlterInfo->varList);
tFieldListDestroy(pInfo->pAlterInfo->pAddColumns);
tfree(pInfo->pAlterInfo);
} else {
if (pInfo->pDCLInfo != NULL && pInfo->pDCLInfo->nAlloc > 0) {
free(pInfo->pDCLInfo->a);
}
if (pInfo->sqlType == CREATE_DATABASE) {
if (pInfo->type == TSDB_SQL_CREATE_DB) {
tVariantListDestroy(pInfo->pDCLInfo->dbOpt.keep);
}
@ -667,13 +679,52 @@ void SQLInfoDestroy(SSqlInfo *pInfo) {
}
}
void setSQLInfo(SSqlInfo *pInfo, void *pSqlExprInfo, SSQLToken *pMeterName, int32_t type) {
pInfo->sqlType = type;
SSubclauseInfo* setSubclause(SSubclauseInfo* pSubclause, void *pSqlExprInfo) {
if (pSubclause == NULL) {
pSubclause = calloc(1, sizeof(SSubclauseInfo));
}
int32_t newSize = pSubclause->numOfClause + 1;
char* tmp = realloc(pSubclause->pClause, newSize * POINTER_BYTES);
if (tmp == NULL) {
return pSubclause;
}
pSubclause->pClause = (SQuerySQL**) tmp;
pSubclause->pClause[newSize - 1] = pSqlExprInfo;
pSubclause->numOfClause++;
return pSubclause;
}
SSqlInfo* setSQLInfo(SSqlInfo *pInfo, void *pSqlExprInfo, SSQLToken *pMeterName, int32_t type) {
pInfo->type = type;
if (type == TSDB_SQL_SELECT) {
pInfo->subclauseInfo = *(SSubclauseInfo*) pSqlExprInfo;
free(pSqlExprInfo);
} else {
pInfo->pCreateTableInfo = pSqlExprInfo;
}
if (pMeterName != NULL) {
pInfo->pCreateTableInfo->name = *pMeterName;
}
return pInfo;
}
SSubclauseInfo* appendSelectClause(SSubclauseInfo *pQueryInfo, void *pSubclause) {
char* tmp = realloc(pQueryInfo->pClause, (pQueryInfo->numOfClause + 1) * POINTER_BYTES);
if (tmp == NULL) { // out of memory
return pQueryInfo;
}
pQueryInfo->pClause = (SQuerySQL**) tmp;
pQueryInfo->pClause[pQueryInfo->numOfClause++] = pSubclause;
return pQueryInfo;
}
void setCreatedMeterName(SSqlInfo *pInfo, SSQLToken *pMeterName, SSQLToken *pIfNotExists) {
@ -703,23 +754,57 @@ tDCLSQL *tTokenListAppend(tDCLSQL *pTokenList, SSQLToken *pToken) {
}
void setDCLSQLElems(SSqlInfo *pInfo, int32_t type, int32_t nParam, ...) {
pInfo->sqlType = type;
pInfo->type = type;
if (nParam == 0) return;
if (pInfo->pDCLInfo == NULL) pInfo->pDCLInfo = calloc(1, sizeof(tDCLSQL));
if (pInfo->pDCLInfo == NULL) pInfo->pDCLInfo = (tDCLSQL *)calloc(1, sizeof(tDCLSQL));
va_list va;
va_start(va, nParam);
while (nParam-- > 0) {
SSQLToken *pToken = va_arg(va, SSQLToken *);
tTokenListAppend(pInfo->pDCLInfo, pToken);
(void)tTokenListAppend(pInfo->pDCLInfo, pToken);
}
va_end(va);
}
void setDropDBTableInfo(SSqlInfo *pInfo, int32_t type, SSQLToken* pToken, SSQLToken* existsCheck) {
pInfo->type = type;
if (pInfo->pDCLInfo == NULL) {
pInfo->pDCLInfo = calloc(1, sizeof(tDCLSQL));
}
tTokenListAppend(pInfo->pDCLInfo, pToken);
pInfo->pDCLInfo->existsCheck = (existsCheck->n == 1);
}
void setShowOptions(SSqlInfo *pInfo, int32_t type, SSQLToken* prefix, SSQLToken* pPatterns) {
if (pInfo->pDCLInfo == NULL) {
pInfo->pDCLInfo = calloc(1, sizeof(tDCLSQL));
}
pInfo->type = TSDB_SQL_SHOW;
SShowInfo* pShowInfo = &pInfo->pDCLInfo->showOpt;
pShowInfo->showType = type;
if (prefix != NULL && prefix->type != 0) {
pShowInfo->prefix = *prefix;
} else {
pShowInfo->prefix.type = 0;
}
if (pPatterns != NULL && pPatterns->type != 0) {
pShowInfo->pattern = *pPatterns;
} else {
pShowInfo->pattern.type = 0;
}
}
void setCreateDBSQL(SSqlInfo *pInfo, int32_t type, SSQLToken *pToken, SCreateDBInfo *pDB, SSQLToken *pIgExists) {
pInfo->sqlType = type;
pInfo->type = type;
if (pInfo->pDCLInfo == NULL) {
pInfo->pDCLInfo = calloc(1, sizeof(tDCLSQL));
}
@ -731,20 +816,69 @@ void setCreateDBSQL(SSqlInfo *pInfo, int32_t type, SSQLToken *pToken, SCreateDBI
}
void setCreateAcctSQL(SSqlInfo *pInfo, int32_t type, SSQLToken *pName, SSQLToken *pPwd, SCreateAcctSQL *pAcctInfo) {
pInfo->sqlType = type;
pInfo->type = type;
if (pInfo->pDCLInfo == NULL) {
pInfo->pDCLInfo = calloc(1, sizeof(tDCLSQL));
}
pInfo->pDCLInfo->acctOpt = *pAcctInfo;
tTokenListAppend(pInfo->pDCLInfo, pName);
assert(pName != NULL);
pInfo->pDCLInfo->user.user = *pName;
if (pPwd->n > 0) {
tTokenListAppend(pInfo->pDCLInfo, pPwd);
if (pPwd != NULL) {
pInfo->pDCLInfo->user.passwd = *pPwd;
}
}
void setCreateUserSQL(SSqlInfo *pInfo, SSQLToken *pName, SSQLToken *pPasswd) {
pInfo->type = TSDB_SQL_CREATE_USER;
if (pInfo->pDCLInfo == NULL) {
pInfo->pDCLInfo = calloc(1, sizeof(tDCLSQL));
}
assert(pName != NULL && pPasswd != NULL);
pInfo->pDCLInfo->user.user = *pName;
pInfo->pDCLInfo->user.passwd = *pPasswd;
}
void setAlterUserSQL(SSqlInfo *pInfo, int16_t type, SSQLToken *pName, SSQLToken* pPwd, SSQLToken *pPrivilege) {
pInfo->type = TSDB_SQL_ALTER_USER;
if (pInfo->pDCLInfo == NULL) {
pInfo->pDCLInfo = calloc(1, sizeof(tDCLSQL));
}
assert(pName != NULL);
SUserInfo* pUser = &pInfo->pDCLInfo->user;
pUser->type = type;
pUser->user = *pName;
if (pPwd != NULL) {
pUser->passwd = *pPwd;
} else {
pUser->passwd.type = TSDB_DATA_TYPE_NULL;
}
if (pPrivilege != NULL) {
pUser->privilege = *pPrivilege;
} else {
pUser->privilege.type = TSDB_DATA_TYPE_NULL;
}
}
void setKillSQL(SSqlInfo *pInfo, int32_t type, SSQLToken *ip) {
pInfo->type = type;
if (pInfo->pDCLInfo == NULL) {
pInfo->pDCLInfo = calloc(1, sizeof(tDCLSQL));
}
assert(ip != NULL);
pInfo->pDCLInfo->ip = *ip;
}
void setDefaultCreateDbOption(SCreateDBInfo *pDBInfo) {
pDBInfo->numOfBlocksPerTable = 50;
pDBInfo->compressionLevel = -1;

View File

@ -83,6 +83,13 @@ struct SSchema* tsGetColumnSchema(SMeterMeta* pMeta, int32_t startCol) {
return (SSchema*)(((char*)pMeta + sizeof(SMeterMeta)) + startCol * sizeof(SSchema));
}
struct SSchema tsGetTbnameColumnSchema() {
struct SSchema s = {.colId = TSDB_TBNAME_COLUMN_INDEX, .type = TSDB_DATA_TYPE_BINARY, .bytes = TSDB_METER_NAME_LEN};
strcpy(s.name, TSQL_TBNAME_L);
return s;
}
/**
* the MeterMeta data format in memory is as follows:
*
@ -124,35 +131,39 @@ bool tsMeterMetaIdentical(SMeterMeta* p1, SMeterMeta* p2) {
}
// todo refactor
static FORCE_INLINE char* skipSegments(char* input, char delimiter, int32_t num) {
static FORCE_INLINE char* skipSegments(char* input, char delim, int32_t num) {
for (int32_t i = 0; i < num; ++i) {
while (*input != 0 && *input++ != delimiter) {
while (*input != 0 && *input++ != delim) {
};
}
return input;
}
static FORCE_INLINE void copySegment(char* dst, char* src, char delimiter) {
static FORCE_INLINE size_t copy(char* dst, const char* src, char delimiter) {
size_t len = 0;
while (*src != delimiter && *src != 0) {
*dst++ = *src++;
len++;
}
return len;
}
/**
* extract meter name from meterid, which the format of userid.dbname.metername
* extract table name from meterid, which the format of userid.dbname.metername
* @param meterId
* @return
*/
void extractMeterName(char* meterId, char* name) {
void extractTableName(char* meterId, char* name) {
char* r = skipSegments(meterId, TS_PATH_DELIMITER[0], 2);
copySegment(name, r, TS_PATH_DELIMITER[0]);
copy(name, r, TS_PATH_DELIMITER[0]);
}
SSQLToken extractDBName(char* meterId, char* name) {
char* r = skipSegments(meterId, TS_PATH_DELIMITER[0], 1);
copySegment(name, r, TS_PATH_DELIMITER[0]);
size_t len = copy(name, r, TS_PATH_DELIMITER[0]);
SSQLToken token = {.z = name, .n = strlen(name), .type = TK_STRING};
SSQLToken token = {.z = name, .n = len, .type = TK_STRING};
return token;
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -14,23 +14,24 @@
*/
#include "os.h"
#include "hash.h"
#include "tcache.h"
#include "tlog.h"
#include "tnote.h"
#include "trpc.h"
#include "tscJoinProcess.h"
#include "tscProfile.h"
#include "tscSQLParser.h"
#include "tscSecondaryMerge.h"
#include "tscUtil.h"
#include "tsclient.h"
#include "tscompression.h"
#include "tsocket.h"
#include "tscSQLParser.h"
#include "ttimer.h"
#include "tutil.h"
#include "tnote.h"
TAOS *taos_connect_imp(const char *ip, const char *user, const char *pass, const char *db, uint16_t port, void (*fp)(void *, TAOS_RES *, int),
void *param, void **taos) {
TAOS *taos_connect_imp(const char *ip, const char *user, const char *pass, const char *db, uint16_t port,
void (*fp)(void *, TAOS_RES *, int), void *param, void **taos) {
STscObj *pObj;
taos_init();
@ -62,19 +63,17 @@ TAOS *taos_connect_imp(const char *ip, const char *user, const char *pass, const
}
}
#ifdef CLUSTER
if (ip && ip[0]) {
tscMgmtIpList.numOfIps = 4;
strcpy(tscMgmtIpList.ipstr[0], ip);
tscMgmtIpList.ip[0] = inet_addr(ip);
strcpy(tscMgmtIpList.ipstr[1], ip);
tscMgmtIpList.ip[1] = inet_addr(ip);
strcpy(tscMgmtIpList.ipstr[2], tsMasterIp);
tscMgmtIpList.ip[2] = inet_addr(tsMasterIp);
strcpy(tscMgmtIpList.ipstr[3], tsSecondIp);
tscMgmtIpList.ip[3] = inet_addr(tsSecondIp);
}
#else
if (ip && ip[0]) {
if (ip != tsServerIpStr) {
strcpy(tsServerIpStr, ip);
}
tsServerIp = inet_addr(ip);
}
#endif
pObj = (STscObj *)malloc(sizeof(STscObj));
if (NULL == pObj) {
@ -127,8 +126,7 @@ TAOS *taos_connect_imp(const char *ip, const char *user, const char *pass, const
}
pSql->cmd.command = TSDB_SQL_CONNECT;
int ret = tscAllocPayload(&pSql->cmd, TSDB_DEFAULT_PAYLOAD_SIZE);
if (TSDB_CODE_SUCCESS != ret) {
if (TSDB_CODE_SUCCESS != tscAllocPayload(&pSql->cmd, TSDB_DEFAULT_PAYLOAD_SIZE)) {
globalCode = TSDB_CODE_CLI_OUT_OF_MEMORY;
free(pSql);
free(pObj);
@ -152,11 +150,7 @@ TAOS *taos_connect_imp(const char *ip, const char *user, const char *pass, const
TAOS *taos_connect(const char *ip, const char *user, const char *pass, const char *db, uint16_t port) {
if (ip == NULL || (ip != NULL && (strcmp("127.0.0.1", ip) == 0 || strcasecmp("localhost", ip) == 0))) {
#ifdef CLUSTER
ip = tsMasterIp;
#else
ip = tsServerIpStr;
#endif
}
tscTrace("try to create a connection to %s", ip);
@ -165,38 +159,12 @@ TAOS *taos_connect(const char *ip, const char *user, const char *pass, const cha
STscObj *pObj = (STscObj *)taos;
// version compare only requires the first 3 segments of the version string
int32_t comparedSegments = 3;
char client_version[64] = {0};
char server_version[64] = {0};
int clientVersionNumber[4] = {0};
int serverVersionNumber[4] = {0};
strcpy(client_version, version);
strcpy(server_version, taos_get_server_info(taos));
if (!taosGetVersionNumber(client_version, clientVersionNumber)) {
tscError("taos:%p, invalid client version:%s", taos, client_version);
pObj->pSql->res.code = TSDB_CODE_INVALID_CLIENT_VERSION;
int code = taosCheckVersion(version, taos_get_server_info(taos), 3);
if (code != 0) {
pObj->pSql->res.code = code;
taos_close(taos);
return NULL;
}
if (!taosGetVersionNumber(server_version, serverVersionNumber)) {
tscError("taos:%p, invalid server version:%s", taos, server_version);
pObj->pSql->res.code = TSDB_CODE_INVALID_CLIENT_VERSION;
taos_close(taos);
return NULL;
}
for(int32_t i = 0; i < comparedSegments; ++i) {
if (clientVersionNumber[i] != serverVersionNumber[i]) {
tscError("taos:%p, the %d-th number of server version:%s not matched with client version:%s, close connection",
taos, i, server_version, version);
pObj->pSql->res.code = TSDB_CODE_INVALID_CLIENT_VERSION;
taos_close(taos);
return NULL;
}
}
}
return taos;
@ -204,11 +172,6 @@ TAOS *taos_connect(const char *ip, const char *user, const char *pass, const cha
TAOS *taos_connect_a(char *ip, char *user, char *pass, char *db, uint16_t port, void (*fp)(void *, TAOS_RES *, int),
void *param, void **taos) {
#ifndef CLUSTER
if (ip == NULL) {
ip = tsServerIpStr;
}
#endif
return taos_connect_imp(ip, user, pass, db, port, fp, param, taos);
}
@ -230,15 +193,17 @@ int taos_query_imp(STscObj* pObj, SSqlObj* pSql) {
pRes->numOfRows = 1;
pRes->numOfTotal = 0;
pRes->numOfTotalInCurrentClause = 0;
pSql->asyncTblPos = NULL;
if (NULL != pSql->pTableHashList) {
taosCleanUpIntHash(pSql->pTableHashList);
taosCleanUpHashTable(pSql->pTableHashList);
pSql->pTableHashList = NULL;
}
tscTrace("%p SQL: %s pObj:%p", pSql, pSql->sqlstr, pObj);
tscDump("%p pObj:%p, SQL: %s", pSql, pObj, pSql->sqlstr);
pRes->code = (uint8_t)tsParseSql(pSql, pObj->acctId, pObj->db, false);
pRes->code = (uint8_t)tsParseSql(pSql, false);
/*
* set the qhandle to 0 before return in order to erase the qhandle value assigned in the previous successful query.
@ -276,8 +241,9 @@ int taos_query(TAOS *taos, const char *sqlstr) {
SSqlRes *pRes = &pSql->res;
size_t sqlLen = strlen(sqlstr);
if (sqlLen > TSDB_MAX_SQL_LEN) {
pRes->code = tscInvalidSQLErrMsg(pSql->cmd.payload, "sql too long", NULL); // set the additional error msg for invalid sql
if (sqlLen > tsMaxSQLStringLen) {
pRes->code =
tscInvalidSQLErrMsg(pSql->cmd.payload, "sql too long", NULL); // set the additional error msg for invalid sql
tscError("%p SQL result:%d, %s pObj:%p", pSql, pRes->code, taos_errstr(taos), pObj);
return pRes->code;
@ -322,8 +288,12 @@ int taos_num_fields(TAOS_RES *res) {
SSqlObj *pSql = (SSqlObj *)res;
if (pSql == NULL || pSql->signature != pSql) return 0;
SFieldInfo *pFieldsInfo = &pSql->cmd.fieldsInfo;
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
if (pQueryInfo == NULL) {
return 0;
}
SFieldInfo *pFieldsInfo = &pQueryInfo->fieldsInfo;
return (pFieldsInfo->numOfOutputCols - pFieldsInfo->numOfHiddenCols);
}
@ -345,7 +315,8 @@ TAOS_FIELD *taos_fetch_fields(TAOS_RES *res) {
SSqlObj *pSql = (SSqlObj *)res;
if (pSql == NULL || pSql->signature != pSql) return 0;
return pSql->cmd.fieldsInfo.pFields;
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
return pQueryInfo->fieldsInfo.pFields;
}
int taos_retrieve(TAOS_RES *res) {
@ -391,47 +362,54 @@ int taos_fetch_block_impl(TAOS_RES *res, TAOS_ROW *rows) {
// secondary merge has handle this situation
if (pCmd->command != TSDB_SQL_RETRIEVE_METRIC) {
pRes->numOfTotal += pRes->numOfRows;
pRes->numOfTotalInCurrentClause += pRes->numOfRows;
}
for (int i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) {
pRes->tsrow[i] = TSC_GET_RESPTR_BASE(pRes, pCmd, i, pCmd->order) +
pRes->bytes[i] * (1 - pCmd->order.order) * (pRes->numOfRows - 1);
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
for (int i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) {
// pRes->tsrow[i] = TSC_GET_RESPTR_BASE(pRes, pQueryInfo, i, pQueryInfo->order) +
// pRes->bytes[i] * (1 - pQueryInfo->order.order) * (pRes->numOfRows - 1);
pRes->tsrow[i] = TSC_GET_RESPTR_BASE(pRes, pQueryInfo, i, pQueryInfo->order);
}
*rows = pRes->tsrow;
return (pCmd->order.order == TSQL_SO_DESC) ? pRes->numOfRows : -pRes->numOfRows;
return (pQueryInfo->order.order == TSQL_SO_DESC) ? pRes->numOfRows : -pRes->numOfRows;
}
static void **doSetResultRowData(SSqlObj *pSql) {
SSqlCmd *pCmd = &pSql->cmd;
SSqlRes *pRes = &pSql->res;
int32_t num = 0;
assert(pRes->row >= 0 && pRes->row <= pRes->numOfRows);
for (int i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) {
pRes->tsrow[i] = TSC_GET_RESPTR_BASE(pRes, pCmd, i, pCmd->order) + pRes->bytes[i] * pRes->row;
if (pRes->row >= pRes->numOfRows) { // all the results has returned to invoker
tfree(pRes->tsrow);
return pRes->tsrow;
}
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
int32_t num = 0;
for (int i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) {
pRes->tsrow[i] = TSC_GET_RESPTR_BASE(pRes, pQueryInfo, i, pQueryInfo->order) + pRes->bytes[i] * pRes->row;
// primary key column cannot be null in interval query, no need to check
if (i == 0 && pCmd->nAggTimeInterval > 0) {
if (i == 0 && pQueryInfo->nAggTimeInterval > 0) {
continue;
}
TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, i);
TAOS_FIELD *pField = tscFieldInfoGetField(pQueryInfo, i);
if (isNull(pRes->tsrow[i], pField->type)) {
pRes->tsrow[i] = NULL;
} else if (pField->type == TSDB_DATA_TYPE_NCHAR) {
// convert unicode to native code in a temporary buffer extra one byte for terminated symbol
if (pRes->buffer[num] == NULL) {
pRes->buffer[num] = malloc(pField->bytes + 1);
} else {
pRes->buffer[num] = realloc(pRes->buffer[num], pField->bytes + 1);
pRes->buffer[num] = malloc(pField->bytes + TSDB_NCHAR_SIZE);
}
/* string terminated */
memset(pRes->buffer[num], 0, pField->bytes + 1);
/* string terminated char for binary data*/
memset(pRes->buffer[num], 0, pField->bytes + TSDB_NCHAR_SIZE);
if (taosUcs4ToMbs(pRes->tsrow[i], pField->bytes, pRes->buffer[num])) {
pRes->tsrow[i] = pRes->buffer[num];
@ -439,48 +417,129 @@ static void **doSetResultRowData(SSqlObj *pSql) {
tscError("%p charset:%s to %s. val:%ls convert failed.", pSql, DEFAULT_UNICODE_ENCODEC, tsCharset, pRes->tsrow);
pRes->tsrow[i] = NULL;
}
num++;
}
}
assert(num <= pCmd->fieldsInfo.numOfOutputCols);
assert(num <= pQueryInfo->fieldsInfo.numOfOutputCols);
pRes->row++; // index increase one-step
return pRes->tsrow;
}
static void **getOneRowFromBuf(SSqlObj *pSql) {
doSetResultRowData(pSql);
SSqlRes *pRes = &pSql->res;
pRes->row++;
return pRes->tsrow;
}
static void **tscJoinResultsetFromBuf(SSqlObj *pSql) {
SSqlCmd *pCmd = &pSql->cmd;
SSqlRes *pRes = &pSql->res;
while (1) {
static bool tscHashRemainDataInSubqueryResultSet(SSqlObj *pSql) {
bool hasData = true;
SSqlCmd *pCmd = &pSql->cmd;
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
if (tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0)) {
bool allSubqueryExhausted = true;
for (int32_t i = 0; i < pSql->numOfSubs; ++i) {
SSqlRes *pRes1 = &pSql->pSubs[i]->res;
if (pSql->pSubs[i] == NULL) {
continue;
}
// in case inner join, if any subquery exhausted, query completed
if (pRes1->numOfRows == 0) {
hasData = false;
SSqlRes *pRes1 = &pSql->pSubs[i]->res;
SSqlCmd *pCmd1 = &pSql->pSubs[i]->cmd;
SQueryInfo * pQueryInfo1 = tscGetQueryInfoDetail(pCmd1, pCmd1->clauseIndex);
SMeterMetaInfo *pMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo1, 0);
assert(pQueryInfo1->numOfTables == 1);
/*
* if the global limitation is not reached, and current result has not exhausted, or next more vnodes are
* available, goes on
*/
if (pMetaInfo->vnodeIndex < pMetaInfo->pMetricMeta->numOfVnodes && pRes1->row < pRes1->numOfRows &&
(!tscHasReachLimitation(pQueryInfo1, pRes1))) {
allSubqueryExhausted = false;
break;
}
}
if (!hasData) { // free all sub sqlobj
tscTrace("%p one subquery exhausted, free other %d subquery", pSql, pSql->numOfSubs - 1);
hasData = !allSubqueryExhausted;
} else { // otherwise, in case inner join, if any subquery exhausted, query completed.
for (int32_t i = 0; i < pSql->numOfSubs; ++i) {
if (pSql->pSubs[i] == 0) {
continue;
}
SSqlRes * pRes1 = &pSql->pSubs[i]->res;
SQueryInfo *pQueryInfo1 = tscGetQueryInfoDetail(&pSql->pSubs[i]->cmd, 0);
if ((pRes1->row >= pRes1->numOfRows && tscHasReachLimitation(pQueryInfo1, pRes1) &&
tscProjectionQueryOnTable(pQueryInfo1)) ||
(pRes1->numOfRows == 0)) {
hasData = false;
break;
}
}
}
return hasData;
}
static void **tscBuildResFromSubqueries(SSqlObj *pSql) {
SSqlRes *pRes = &pSql->res;
while (1) {
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex);
if (pRes->tsrow == NULL) {
pRes->tsrow = calloc(pQueryInfo->exprsInfo.numOfExprs, POINTER_BYTES);
}
bool success = false;
int32_t numOfTableHasRes = 0;
for (int32_t i = 0; i < pSql->numOfSubs; ++i) {
if (pSql->pSubs[i] != 0) {
numOfTableHasRes++;
}
}
if (numOfTableHasRes >= 2) { // do merge result
success = (doSetResultRowData(pSql->pSubs[0]) != NULL) &&
(doSetResultRowData(pSql->pSubs[1]) != NULL);
// TSKEY key1 = *(TSKEY *)pRes1->tsrow[0];
// TSKEY key2 = *(TSKEY *)pRes2->tsrow[0];
// printf("first:%" PRId64 ", second:%" PRId64 "\n", key1, key2);
} else { // only one subquery
SSqlObj *pSub = pSql->pSubs[0];
if (pSub == NULL) {
pSub = pSql->pSubs[1];
}
success = (doSetResultRowData(pSub) != NULL);
}
if (success) { // current row of final output has been built, return to app
for (int32_t i = 0; i < pQueryInfo->exprsInfo.numOfExprs; ++i) {
int32_t tableIndex = pRes->pColumnIndex[i].tableIndex;
int32_t columnIndex = pRes->pColumnIndex[i].columnIndex;
SSqlRes *pRes1 = &pSql->pSubs[tableIndex]->res;
pRes->tsrow[i] = pRes1->tsrow[columnIndex];
}
pRes->numOfTotalInCurrentClause++;
break;
} else { // continue retrieve data from vnode
if (!tscHashRemainDataInSubqueryResultSet(pSql)) {
tscTrace("%p at least one subquery exhausted, free all other %d subqueries", pSql, pSql->numOfSubs - 1);
SSubqueryState *pState = NULL;
// free all sub sqlobj
for (int32_t i = 0; i < pSql->numOfSubs; ++i) {
SSqlObj *pChildObj = pSql->pSubs[i];
if (pChildObj == NULL) {
continue;
}
SJoinSubquerySupporter *pSupporter = (SJoinSubquerySupporter *)pChildObj->param;
pState = pSupporter->pState;
@ -492,52 +551,6 @@ static void **tscJoinResultsetFromBuf(SSqlObj *pSql) {
return NULL;
}
if (pRes->tsrow == NULL) {
pRes->tsrow = malloc(sizeof(void *) * pCmd->exprsInfo.numOfExprs);
}
bool success = false;
if (pSql->numOfSubs >= 2) {
// do merge result
SSqlRes *pRes1 = &pSql->pSubs[0]->res;
SSqlRes *pRes2 = &pSql->pSubs[1]->res;
while (pRes1->row < pRes1->numOfRows && pRes2->row < pRes2->numOfRows) {
doSetResultRowData(pSql->pSubs[0]);
doSetResultRowData(pSql->pSubs[1]);
TSKEY key1 = *(TSKEY *)pRes1->tsrow[0];
TSKEY key2 = *(TSKEY *)pRes2->tsrow[0];
if (key1 == key2) {
success = true;
pRes1->row++;
pRes2->row++;
break;
} else if (key1 < key2) {
pRes1->row++;
} else if (key1 > key2) {
pRes2->row++;
}
}
} else {
SSqlRes *pRes1 = &pSql->pSubs[0]->res;
doSetResultRowData(pSql->pSubs[0]);
success = (pRes1->row++ < pRes1->numOfRows);
}
if (success) {
for (int32_t i = 0; i < pCmd->exprsInfo.numOfExprs; ++i) {
int32_t tableIndex = pRes->pColumnIndex[i].tableIndex;
int32_t columnIndex = pRes->pColumnIndex[i].columnIndex;
SSqlRes *pRes1 = &pSql->pSubs[tableIndex]->res;
pRes->tsrow[i] = pRes1->tsrow[columnIndex];
}
break;
} else {
tscFetchDatablockFromSubquery(pSql);
if (pRes->code != TSDB_CODE_SUCCESS) {
return NULL;
@ -559,75 +572,77 @@ TAOS_ROW taos_fetch_row_impl(TAOS_RES *res) {
if (pCmd->command == TSDB_SQL_METRIC_JOIN_RETRIEVE) {
tscFetchDatablockFromSubquery(pSql);
if (pRes->code == TSDB_CODE_SUCCESS) {
return tscJoinResultsetFromBuf(pSql);
tscTrace("%p data from all subqueries have been retrieved to client", pSql);
return tscBuildResFromSubqueries(pSql);
} else {
tscTrace("%p retrieve data from subquery failed, code:%d", pSql, pRes->code);
return NULL;
}
} else if (pRes->row >= pRes->numOfRows) {
/**
* NOT a join query
*
* If the data block of current result set have been consumed already, try fetch next result
* data block from virtual node.
*/
tscResetForNextRetrieve(pRes);
if (pCmd->command < TSDB_SQL_LOCAL) {
pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH;
}
tscProcessSql(pSql);
tscProcessSql(pSql); // retrieve data from virtual node
//if failed to retrieve data from current virtual node, try next one if exists
if (hasMoreVnodesToTry(pSql)) {
tscTryQueryNextVnode(pSql, NULL);
}
/*
* local reducer has handle this case,
* so no need to add the pRes->numOfRows for super table query
*/
if (pCmd->command != TSDB_SQL_RETRIEVE_METRIC) {
pRes->numOfTotalInCurrentClause += pRes->numOfRows;
}
if (pRes->numOfRows == 0) {
return NULL;
}
// local reducer has handle this situation
if (pCmd->command != TSDB_SQL_RETRIEVE_METRIC) {
pRes->numOfTotal += pRes->numOfRows;
}
}
return getOneRowFromBuf(pSql);
return doSetResultRowData(pSql);
}
TAOS_ROW taos_fetch_row(TAOS_RES *res) {
SSqlObj *pSql = (SSqlObj *)res;
SSqlCmd *pCmd = &pSql->cmd;
SSqlRes *pRes = &pSql->res;
if (pSql == NULL || pSql->signature != pSql) {
globalCode = TSDB_CODE_DISCONNECTED;
return NULL;
}
// projection query on metric, pipeline retrieve data from vnode list, instead of two-stage merge
TAOS_ROW rows = taos_fetch_row_impl(res);
while (rows == NULL && tscProjectionQueryOnMetric(pCmd)) {
SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0);
// reach the maximum number of output rows, abort
if (pCmd->globalLimit > 0 && pRes->numOfTotal >= pCmd->globalLimit) {
return NULL;
}
/*
* update the limit and offset value according to current retrieval results
* Note: if pRes->offset > 0, pRes->numOfRows = 0, pRes->numOfTotal = 0;
* projection query on super table, access each virtual node sequentially retrieve data from vnode list,
* instead of two-stage merge
*/
pCmd->limit.limit = pCmd->globalLimit - pRes->numOfTotal;
pCmd->limit.offset = pRes->offset;
TAOS_ROW rows = taos_fetch_row_impl(res);
if (rows != NULL) {
return rows;
}
assert((pRes->offset >= 0 && pRes->numOfRows == 0) || (pRes->offset == 0 && pRes->numOfRows >= 0));
// current subclause is completed, try the next subclause
while (rows == NULL && pCmd->clauseIndex < pCmd->numOfClause - 1) {
tscTryQueryNextClause(pSql, NULL);
if ((++pCmd->vnodeIdx) < pMeterMetaInfo->pMetricMeta->numOfVnodes) {
pCmd->command = TSDB_SQL_SELECT;
assert(pSql->fp == NULL);
tscProcessSql(pSql);
// if the rows is not NULL, return immediately
rows = taos_fetch_row_impl(res);
}
// check!!!
if (rows != NULL || pCmd->vnodeIdx >= pMeterMetaInfo->pMetricMeta->numOfVnodes) {
break;
}
}
return rows;
}
@ -647,37 +662,34 @@ int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows) {
// projection query on metric, pipeline retrieve data from vnode list,
// instead of two-stage mergevnodeProcessMsgFromShell free qhandle
nRows = taos_fetch_block_impl(res, rows);
while (*rows == NULL && tscProjectionQueryOnMetric(pCmd)) {
/* reach the maximum number of output rows, abort */
if (pCmd->globalLimit > 0 && pRes->numOfTotal >= pCmd->globalLimit) {
return 0;
}
SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0);
// current subclause is completed, try the next subclause
while (rows == NULL && pCmd->clauseIndex < pCmd->numOfClause - 1) {
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
/* update the limit value according to current retrieval results */
pCmd->limit.limit = pSql->cmd.globalLimit - pRes->numOfTotal;
pCmd->limit.offset = pRes->offset;
pSql->cmd.command = pQueryInfo->command;
pCmd->clauseIndex++;
pRes->numOfTotal += pRes->numOfTotalInCurrentClause;
pRes->numOfTotalInCurrentClause = 0;
pRes->rspType = 0;
pSql->numOfSubs = 0;
tfree(pSql->pSubs);
if ((++pSql->cmd.vnodeIdx) < pMeterMetaInfo->pMetricMeta->numOfVnodes) {
pSql->cmd.command = TSDB_SQL_SELECT;
assert(pSql->fp == NULL);
tscProcessSql(pSql);
nRows = taos_fetch_block_impl(res, rows);
}
// check!!!
if (*rows != NULL || pCmd->vnodeIdx >= pMeterMetaInfo->pMetricMeta->numOfVnodes) {
break;
}
tscTrace("%p try data in the next subclause:%d, total subclause:%d", pSql, pCmd->clauseIndex, pCmd->numOfClause);
tscProcessSql(pSql);
nRows = taos_fetch_block_impl(res, rows);
}
return nRows;
}
int taos_select_db(TAOS *taos, const char *db) {
char sql[64];
char sql[256] = {0};
STscObj *pObj = (STscObj *)taos;
if (pObj == NULL || pObj->signature != pObj) {
@ -685,12 +697,11 @@ int taos_select_db(TAOS *taos, const char *db) {
return TSDB_CODE_DISCONNECTED;
}
sprintf(sql, "use %s", db);
snprintf(sql, tListLen(sql), "use %s", db);
return taos_query(taos, sql);
}
void taos_free_result(TAOS_RES *res) {
void taos_free_result_imp(TAOS_RES* res, int keepCmd) {
if (res == NULL) return;
SSqlObj *pSql = (SSqlObj *)res;
@ -708,6 +719,8 @@ void taos_free_result(TAOS_RES *res) {
pSql->thandle = NULL;
tscFreeSqlObj(pSql);
tscTrace("%p Async SqlObj is freed by app", pSql);
} else if (keepCmd) {
tscFreeSqlResult(pSql);
} else {
tscFreeSqlObjPartial(pSql);
}
@ -715,9 +728,15 @@ void taos_free_result(TAOS_RES *res) {
}
// set freeFlag to 1 in retrieve message if there are un-retrieved results
pCmd->type = TSDB_QUERY_TYPE_FREE_RESOURCE;
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
if (pQueryInfo == NULL) {
tscFreeSqlObjPartial(pSql);
return;
}
SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0);
pQueryInfo->type = TSDB_QUERY_TYPE_FREE_RESOURCE;
SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0);
/*
* case 1. Partial data have been retrieved from vnodes, but not all data has been retrieved yet.
@ -735,6 +754,8 @@ void taos_free_result(TAOS_RES *res) {
pSql->pStream == NULL && pMeterMetaInfo->pMeterMeta != NULL))) {
pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH;
tscTrace("%p code:%d, numOfRows:%d, command:%d", pSql, pRes->code, pRes->numOfRows, pCmd->command);
void *fp = pSql->fp;
if (fp != NULL) {
pSql->freed = 1;
@ -757,9 +778,14 @@ void taos_free_result(TAOS_RES *res) {
* Then this object will be reused and no free operation is required.
*/
pSql->thandle = NULL;
if (keepCmd) {
tscFreeSqlResult(pSql);
tscTrace("%p sql result is freed by app while sql command is kept", pSql);
} else {
tscFreeSqlObjPartial(pSql);
tscTrace("%p sql result is freed by app", pSql);
}
}
} else {
// if no free resource msg is sent to vnode, we free this object immediately.
pSql->thandle = NULL;
@ -768,6 +794,9 @@ void taos_free_result(TAOS_RES *res) {
assert(pRes->numOfRows == 0 || (pCmd->command > TSDB_SQL_LOCAL));
tscFreeSqlObj(pSql);
tscTrace("%p Async sql result is freed by app", pSql);
} else if (keepCmd) {
tscFreeSqlResult(pSql);
tscTrace("%p sql result is freed while sql command is kept", pSql);
} else {
tscFreeSqlObjPartial(pSql);
tscTrace("%p sql result is freed", pSql);
@ -775,6 +804,10 @@ void taos_free_result(TAOS_RES *res) {
}
}
void taos_free_result(TAOS_RES *res) {
taos_free_result_imp(res, 0);
}
int taos_errno(TAOS *taos) {
STscObj *pObj = (STscObj *)taos;
int code;
@ -789,23 +822,45 @@ int taos_errno(TAOS *taos) {
return code;
}
static bool validErrorCode(int32_t code) {
return code >= TSDB_CODE_SUCCESS && code < TSDB_CODE_MAX_ERROR_CODE;
}
/*
* In case of invalid sql error, additional information is attached to explain
* why the sql is invalid
*/
static bool hasAdditionalErrorInfo(int32_t code, SSqlCmd* pCmd) {
if (code != TSDB_CODE_INVALID_SQL) {
return false;
}
size_t len = strlen(pCmd->payload);
char* z = NULL;
if (len > 0) {
z = strstr (pCmd->payload, "invalid SQL");
}
return z != NULL;
}
char *taos_errstr(TAOS *taos) {
STscObj *pObj = (STscObj *)taos;
uint8_t code;
// char temp[256] = {0};
if (pObj == NULL || pObj->signature != pObj) return tsError[globalCode];
if ((int8_t)(pObj->pSql->res.code) == -1)
code = TSDB_CODE_OTHERS;
else
code = pObj->pSql->res.code;
SSqlObj* pSql = pObj->pSql;
// for invalid sql, additional information is attached to explain why the sql is invalid
if (code == TSDB_CODE_INVALID_SQL) {
// snprintf(temp, tListLen(temp), "invalid SQL: %s", pObj->pSql->cmd.payload);
// strcpy(pObj->pSql->cmd.payload, temp);
return pObj->pSql->cmd.payload;
if (validErrorCode(pSql->res.code)) {
code = pSql->res.code;
} else {
code = TSDB_CODE_OTHERS; //unknown error
}
if (hasAdditionalErrorInfo(code, &pSql->cmd)) {
return pSql->cmd.payload;
} else {
return tsError[code];
}
@ -830,12 +885,15 @@ void taos_stop_query(TAOS_RES *res) {
if (res == NULL) return;
SSqlObj *pSql = (SSqlObj *)res;
SSqlCmd *pCmd = &pSql->cmd;
if (pSql->signature != pSql) return;
tscTrace("%p start to cancel query", res);
pSql->res.code = TSDB_CODE_QUERY_CANCELLED;
if (tscIsTwoStageMergeMetricQuery(&pSql->cmd)) {
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
if (tscIsTwoStageMergeMetricQuery(pQueryInfo, 0)) {
tscKillMetricQuery(pSql);
return;
}
@ -856,6 +914,10 @@ void taos_stop_query(TAOS_RES *res) {
int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields) {
int len = 0;
for (int i = 0; i < num_fields; ++i) {
if (i > 0) {
str[len++] = ' ';
}
if (row[i] == NULL) {
len += sprintf(str + len, "%s", TSDB_DATA_NULL_STR);
continue;
@ -875,33 +937,34 @@ int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields)
break;
case TSDB_DATA_TYPE_BIGINT:
len += sprintf(str + len, "%lld ", *((int64_t *)row[i]));
len += sprintf(str + len, "%" PRId64, *((int64_t *)row[i]));
break;
case TSDB_DATA_TYPE_FLOAT:
len += sprintf(str + len, "%f ", *((float *)row[i]));
break;
case TSDB_DATA_TYPE_FLOAT: {
float fv = 0;
fv = GET_FLOAT_VAL(row[i]);
len += sprintf(str + len, "%f", fv);
} break;
case TSDB_DATA_TYPE_DOUBLE:
len += sprintf(str + len, "%lf ", *((double *)row[i]));
break;
case TSDB_DATA_TYPE_DOUBLE: {
double dv = 0;
dv = GET_DOUBLE_VAL(row[i]);
len += sprintf(str + len, "%lf", dv);
} break;
case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_NCHAR: {
/* limit the max length of string to no greater than the maximum length,
* in case of not null-terminated string */
size_t xlen = strlen(row[i]);
size_t trueLen = MIN(xlen, fields[i].bytes);
memcpy(str + len, (char*) row[i], trueLen);
str[len + trueLen] = ' ';
len += (trueLen + 1);
size_t xlen = 0;
for (xlen = 0; xlen <= fields[i].bytes; xlen++) {
char c = ((char*)row[i])[xlen];
if (c == 0) break;
str[len++] = c;
}
break;
str[len] = 0;
} break;
case TSDB_DATA_TYPE_TIMESTAMP:
len += sprintf(str + len, "%lld ", *((int64_t *)row[i]));
len += sprintf(str + len, "%" PRId64, *((int64_t *)row[i]));
break;
case TSDB_DATA_TYPE_BOOL:
@ -926,11 +989,12 @@ int taos_validate_sql(TAOS *taos, const char *sql) {
pRes->numOfRows = 1;
pRes->numOfTotal = 0;
pRes->numOfTotalInCurrentClause = 0;
tscTrace("%p Valid SQL: %s pObj:%p", pSql, sql, pObj);
int32_t sqlLen = strlen(sql);
if (sqlLen > TSDB_MAX_SQL_LEN) {
if (sqlLen > tsMaxSQLStringLen) {
tscError("%p sql too long", pSql);
pRes->code = TSDB_CODE_INVALID_SQL;
return pRes->code;
@ -948,11 +1012,11 @@ int taos_validate_sql(TAOS *taos, const char *sql) {
pSql->asyncTblPos = NULL;
if (NULL != pSql->pTableHashList) {
taosCleanUpIntHash(pSql->pTableHashList);
taosCleanUpHashTable(pSql->pTableHashList);
pSql->pTableHashList = NULL;
}
pRes->code = (uint8_t)tsParseSql(pSql, pObj->acctId, pObj->db, false);
pRes->code = (uint8_t)tsParseSql(pSql, false);
int code = pRes->code;
tscTrace("%p Valid SQL result:%d, %s pObj:%p", pSql, pRes->code, taos_errstr(taos), pObj);
@ -963,7 +1027,6 @@ int taos_validate_sql(TAOS *taos, const char *sql) {
static int tscParseTblNameList(SSqlObj *pSql, const char *tblNameList, int32_t tblListLen) {
// must before clean the sqlcmd object
tscRemoveAllMeterMetaInfo(&pSql->cmd, false);
tscCleanSqlCmd(&pSql->cmd);
SSqlCmd *pCmd = &pSql->cmd;
@ -974,7 +1037,10 @@ static int tscParseTblNameList(SSqlObj *pSql, const char* tblNameList, int32_t t
int code = TSDB_CODE_INVALID_METER_ID;
char *str = (char *)tblNameList;
SMeterMetaInfo *pMeterMetaInfo = tscAddEmptyMeterMetaInfo(pCmd);
SQueryInfo *pQueryInfo = NULL;
tscGetQueryInfoDetailSafely(pCmd, pCmd->clauseIndex, &pQueryInfo);
SMeterMetaInfo *pMeterMetaInfo = tscAddEmptyMeterMetaInfo(pQueryInfo);
if ((code = tscAllocPayload(pCmd, tblListLen + 16)) != TSDB_CODE_SUCCESS) {
return code;
@ -1009,7 +1075,7 @@ static int tscParseTblNameList(SSqlObj *pSql, const char* tblNameList, int32_t t
return code;
}
if ((code = setMeterID(pSql, &sToken, 0)) != TSDB_CODE_SUCCESS) {
if ((code = setMeterID(pMeterMetaInfo, &sToken, pSql)) != TSDB_CODE_SUCCESS) {
return code;
}
@ -1054,6 +1120,8 @@ int taos_load_table_info(TAOS *taos, const char *tableNameList) {
SSqlRes *pRes = &pSql->res;
pRes->numOfTotal = 0; // the number of getting table meta from server
pRes->numOfTotalInCurrentClause = 0;
pRes->code = 0;
assert(pSql->fp == NULL);

View File

@ -31,9 +31,13 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf
static void tscSetNextLaunchTimer(SSqlStream *pStream, SSqlObj *pSql);
static void tscSetRetryTimer(SSqlStream *pStream, SSqlObj *pSql, int64_t timer);
static bool isProjectStream(SSqlCmd *pCmd) {
for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) {
SSqlExpr *pExpr = tscSqlExprGet(pCmd, i);
static int64_t getDelayValueAfterTimewindowClosed(SSqlStream* pStream, int64_t launchDelay) {
return taosGetTimestamp(pStream->precision) + launchDelay - pStream->stime - 1;
}
static bool isProjectStream(SQueryInfo* pQueryInfo) {
for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) {
SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, i);
if (pExpr->functionId != TSDB_FUNC_PRJ) {
return false;
}
@ -66,26 +70,28 @@ static void tscProcessStreamLaunchQuery(SSchedMsg *pMsg) {
pSql->fp = tscProcessStreamQueryCallback;
pSql->param = pStream;
SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0);
int code = tscGetMeterMeta(pSql, pMeterMetaInfo->name, 0);
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0);
int code = tscGetMeterMeta(pSql, pMeterMetaInfo);
pSql->res.code = code;
if (code == TSDB_CODE_ACTION_IN_PROGRESS) return;
if (code == 0 && UTIL_METER_IS_METRIC(pMeterMetaInfo)) {
code = tscGetMetricMeta(pSql);
if (code == 0 && UTIL_METER_IS_SUPERTABLE(pMeterMetaInfo)) {
code = tscGetMetricMeta(pSql, 0);
pSql->res.code = code;
if (code == TSDB_CODE_ACTION_IN_PROGRESS) return;
}
tscTansformSQLFunctionForMetricQuery(&pSql->cmd);
tscTansformSQLFunctionForSTableQuery(pQueryInfo);
// failed to get meter/metric meta, retry in 10sec.
if (code != TSDB_CODE_SUCCESS) {
int64_t retryDelayTime = tscGetRetryDelayTime(pStream->slidingTime, pStream->precision);
tscError("%p stream:%p,get metermeta failed, retry in %lldms", pStream->pSql, pStream, retryDelayTime);
tscError("%p stream:%p,get metermeta failed, retry in %" PRId64 "ms", pStream->pSql, pStream, retryDelayTime);
tscSetRetryTimer(pStream, pSql, retryDelayTime);
return;
@ -105,22 +111,23 @@ static void tscProcessStreamTimer(void *handle, void *tmrId) {
pStream->numOfRes = 0; // reset the numOfRes.
SSqlObj *pSql = pStream->pSql;
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
tscTrace("%p add into timer", pSql);
if (isProjectStream(&pSql->cmd)) {
if (isProjectStream(pQueryInfo)) {
/*
* pSql->cmd.etime, which is the start time, does not change in case of
* pQueryInfo->etime, which is the start time, does not change in case of
* repeat first execution, once the first execution failed.
*/
pSql->cmd.stime = pStream->stime; // start time
pQueryInfo->stime = pStream->stime; // start time
pSql->cmd.etime = taosGetTimestamp(pStream->precision); // end time
if (pSql->cmd.etime > pStream->etime) {
pSql->cmd.etime = pStream->etime;
pQueryInfo->etime = taosGetTimestamp(pStream->precision); // end time
if (pQueryInfo->etime > pStream->etime) {
pQueryInfo->etime = pStream->etime;
}
} else {
pSql->cmd.stime = pStream->stime - pStream->interval;
pSql->cmd.etime = pStream->stime - 1;
pQueryInfo->stime = pStream->stime - pStream->interval;
pQueryInfo->etime = pStream->stime - 1;
}
// launch stream computing in a new thread
@ -136,10 +143,10 @@ static void tscProcessStreamQueryCallback(void *param, TAOS_RES *tres, int numOf
SSqlStream *pStream = (SSqlStream *)param;
if (tres == NULL || numOfRows < 0) {
int64_t retryDelay = tscGetRetryDelayTime(pStream->slidingTime, pStream->precision);
tscError("%p stream:%p, query data failed, code:%d, retry in %lldms", pStream->pSql, pStream, numOfRows,
tscError("%p stream:%p, query data failed, code:%d, retry in %" PRId64 "ms", pStream->pSql, pStream, numOfRows,
retryDelay);
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pStream->pSql->cmd, 0);
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pStream->pSql->cmd, 0, 0);
tscClearMeterMetaInfo(pMeterMetaInfo, true);
tscSetRetryTimer(pStream, pStream->pSql, retryDelay);
@ -158,18 +165,18 @@ static void tscSetTimestampForRes(SSqlStream *pStream, SSqlObj *pSql) {
if (timestamp != actualTimestamp) {
// reset the timestamp of each agg point by using start time of each interval
*((int64_t *)pRes->data) = actualTimestamp;
tscWarn("%p stream:%p, timestamp of points is:%lld, reset to %lld", pSql, pStream, timestamp, actualTimestamp);
tscWarn("%p stream:%p, timestamp of points is:%" PRId64 ", reset to %" PRId64 "", pSql, pStream, timestamp, actualTimestamp);
}
}
static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOfRows) {
SSqlStream * pStream = (SSqlStream *)param;
SSqlObj * pSql = (SSqlObj *)res;
SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0);
SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0, 0);
if (pSql == NULL || numOfRows < 0) {
int64_t retryDelayTime = tscGetRetryDelayTime(pStream->slidingTime, pStream->precision);
tscError("%p stream:%p, retrieve data failed, code:%d, retry in %lldms", pSql, pStream, numOfRows, retryDelayTime);
tscError("%p stream:%p, retrieve data failed, code:%d, retry in %" PRId64 "ms", pSql, pStream, numOfRows, retryDelayTime);
tscClearMeterMetaInfo(pMeterMetaInfo, true);
tscSetRetryTimer(pStream, pStream->pSql, retryDelayTime);
@ -178,11 +185,12 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf
if (numOfRows > 0) { // when reaching here the first execution of stream computing is successful.
pStream->numOfRes += numOfRows;
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
for(int32_t i = 0; i < numOfRows; ++i) {
TAOS_ROW row = taos_fetch_row(res);
tscTrace("%p stream:%p fetch result", pSql, pStream);
if (isProjectStream(&pSql->cmd)) {
if (isProjectStream(pQueryInfo)) {
pStream->stime = *(TSKEY *)row[0];
} else {
tscSetTimestampForRes(pStream, pSql);
@ -197,9 +205,10 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf
} else { // numOfRows == 0, all data has been retrieved
pStream->useconds += pSql->res.useconds;
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
if (pStream->numOfRes == 0) {
if (pSql->cmd.interpoType == TSDB_INTERPO_SET_VALUE || pSql->cmd.interpoType == TSDB_INTERPO_NULL) {
SSqlCmd *pCmd = &pSql->cmd;
if (pQueryInfo->interpoType == TSDB_INTERPO_SET_VALUE || pQueryInfo->interpoType == TSDB_INTERPO_NULL) {
SSqlRes *pRes = &pSql->res;
/* failed to retrieve any result in this retrieve */
@ -210,11 +219,11 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf
void *oldPtr = pSql->res.data;
pSql->res.data = tmpRes;
for (int32_t i = 1; i < pSql->cmd.fieldsInfo.numOfOutputCols; ++i) {
int16_t offset = tscFieldInfoGetOffset(pCmd, i);
TAOS_FIELD *pField = tscFieldInfoGetField(pCmd, i);
for (int32_t i = 1; i < pQueryInfo->fieldsInfo.numOfOutputCols; ++i) {
int16_t offset = tscFieldInfoGetOffset(pQueryInfo, i);
TAOS_FIELD *pField = tscFieldInfoGetField(pQueryInfo, i);
assignVal(pSql->res.data + offset, (char *)(&pCmd->defaultVal[i]), pField->bytes, pField->type);
assignVal(pSql->res.data + offset, (char *)(&pQueryInfo->defaultVal[i]), pField->bytes, pField->type);
row[i] = pSql->res.data + offset;
}
@ -222,7 +231,7 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf
row[0] = pRes->data;
// char result[512] = {0};
// taos_print_row(result, row, pSql->cmd.fieldsInfo.pFields, pSql->cmd.fieldsInfo.numOfOutputCols);
// taos_print_row(result, row, pQueryInfo->fieldsInfo.pFields, pQueryInfo->fieldsInfo.numOfOutputCols);
// tscPrint("%p stream:%p query result: %s", pSql, pStream, result);
tscTrace("%p stream:%p fetch result", pSql, pStream);
@ -231,18 +240,19 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf
pRes->numOfRows = 0;
pRes->data = oldPtr;
} else if (isProjectStream(&pSql->cmd)) {
} else if (isProjectStream(pQueryInfo)) {
/* no resuls in the query range, retry */
// todo set retry dynamic time
int32_t retry = tsProjectExecInterval;
tscError("%p stream:%p, retrieve no data, code:%d, retry in %lldms", pSql, pStream, numOfRows, retry);
tscError("%p stream:%p, retrieve no data, code:%d, retry in %" PRId64 "ms", pSql, pStream, numOfRows, retry);
tscClearSqlMetaInfoForce(&(pStream->pSql->cmd));
tscSetRetryTimer(pStream, pStream->pSql, retry);
return;
}
} else {
if (isProjectStream(&pSql->cmd)) {
if (isProjectStream(pQueryInfo)) {
pStream->stime += 1;
}
}
@ -257,7 +267,10 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf
}
static void tscSetRetryTimer(SSqlStream *pStream, SSqlObj *pSql, int64_t timer) {
if (isProjectStream(&pSql->cmd)) {
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
int64_t delay = getDelayValueAfterTimewindowClosed(pStream, timer);
if (isProjectStream(pQueryInfo)) {
int64_t now = taosGetTimestamp(pStream->precision);
int64_t etime = now > pStream->etime ? pStream->etime : now;
@ -265,22 +278,22 @@ static void tscSetRetryTimer(SSqlStream *pStream, SSqlObj *pSql, int64_t timer)
/*
* current time window will be closed, since it too early to exceed the maxRetentWindow value
*/
tscTrace("%p stream:%p, etime:%lld is too old, exceeds the max retention time window:%lld, stop the stream",
tscTrace("%p stream:%p, etime:%" PRId64 " is too old, exceeds the max retention time window:%" PRId64 ", stop the stream",
pStream->pSql, pStream, pStream->stime, pStream->etime);
// TODO : How to terminate stream here
taos_close_stream(pStream);
if (pStream->callback) {
// Callback function from upper level
pStream->callback(pStream->param);
}
taos_close_stream(pStream);
return;
}
tscTrace("%p stream:%p, next query start at %lld, in %lldms. query range %lld-%lld", pStream->pSql, pStream,
now + timer, timer, pStream->stime, etime);
tscTrace("%p stream:%p, next start at %" PRId64 ", in %" PRId64 "ms. delay:%" PRId64 "ms qrange %" PRId64 "-%" PRId64 "", pStream->pSql, pStream,
now + timer, timer, delay, pStream->stime, etime);
} else {
tscTrace("%p stream:%p, next query start at %lld, in %lldms. query range %lld-%lld", pStream->pSql, pStream,
pStream->stime, timer, pStream->stime - pStream->interval, pStream->stime - 1);
tscTrace("%p stream:%p, next start at %" PRId64 ", in %" PRId64 "ms. delay:%" PRId64 "ms qrange %" PRId64 "-%" PRId64 "", pStream->pSql, pStream,
pStream->stime, timer, delay, pStream->stime - pStream->interval, pStream->stime - 1);
}
pSql->cmd.command = TSDB_SQL_SELECT;
@ -289,37 +302,61 @@ static void tscSetRetryTimer(SSqlStream *pStream, SSqlObj *pSql, int64_t timer)
taosTmrReset(tscProcessStreamTimer, timer, pStream, tscTmr, &pStream->pTimer);
}
static int64_t getLaunchTimeDelay(const SSqlStream* pStream) {
int64_t delayDelta = (int64_t)(pStream->slidingTime * tsStreamComputDelayRatio);
int64_t maxDelay =
(pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMaxStreamComputDelay * 1000L : tsMaxStreamComputDelay;
if (delayDelta > maxDelay) {
delayDelta = maxDelay;
}
int64_t remainTimeWindow = pStream->slidingTime - delayDelta;
if (maxDelay > remainTimeWindow) {
maxDelay = (remainTimeWindow / 1.5);
}
int64_t currentDelay = (rand() % maxDelay); // a random number
currentDelay += delayDelta;
assert(currentDelay < pStream->slidingTime);
return currentDelay;
}
static void tscSetNextLaunchTimer(SSqlStream *pStream, SSqlObj *pSql) {
int64_t timer = 0;
if (isProjectStream(&pSql->cmd)) {
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
if (isProjectStream(pQueryInfo)) {
/*
* for project query, no mater fetch data successfully or not, next launch will issue
* more than the sliding time window
*/
timer = pStream->slidingTime;
if (pStream->stime > pStream->etime) {
tscTrace("%p stream:%p, stime:%lld is larger than end time: %lld, stop the stream", pStream->pSql, pStream,
tscTrace("%p stream:%p, stime:%" PRId64 " is larger than end time: %" PRId64 ", stop the stream", pStream->pSql, pStream,
pStream->stime, pStream->etime);
// TODO : How to terminate stream here
taos_close_stream(pStream);
if (pStream->callback) {
// Callback function from upper level
pStream->callback(pStream->param);
}
taos_close_stream(pStream);
return;
}
} else {
pStream->stime += pStream->slidingTime;
if ((pStream->stime - pStream->interval) >= pStream->etime) {
tscTrace("%p stream:%p, stime:%ld is larger than end time: %ld, stop the stream", pStream->pSql, pStream,
tscTrace("%p stream:%p, stime:%" PRId64 " is larger than end time: %" PRId64 ", stop the stream", pStream->pSql, pStream,
pStream->stime, pStream->etime);
// TODO : How to terminate stream here
taos_close_stream(pStream);
if (pStream->callback) {
// Callback function from upper level
pStream->callback(pStream->param);
}
taos_close_stream(pStream);
return;
}
@ -329,17 +366,8 @@ static void tscSetNextLaunchTimer(SSqlStream *pStream, SSqlObj *pSql) {
}
}
int64_t delayDelta = (int64_t)(pStream->slidingTime * 0.1);
delayDelta = (rand() % delayDelta);
timer += getLaunchTimeDelay(pStream);
int64_t maxDelay =
(pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMaxStreamComputDelay * 1000L : tsMaxStreamComputDelay;
if (delayDelta > maxDelay) {
delayDelta = maxDelay;
}
timer += delayDelta; // a random number
if (pStream->precision == TSDB_TIME_PRECISION_MICRO) {
timer = timer / 1000L;
}
@ -348,64 +376,70 @@ static void tscSetNextLaunchTimer(SSqlStream *pStream, SSqlObj *pSql) {
}
static void tscSetSlidingWindowInfo(SSqlObj *pSql, SSqlStream *pStream) {
SSqlCmd *pCmd = &pSql->cmd;
int64_t minIntervalTime =
(pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMinIntervalTime * 1000L : tsMinIntervalTime;
if (pCmd->nAggTimeInterval < minIntervalTime) {
tscWarn("%p stream:%p, original sample interval:%ld too small, reset to:%lld", pSql, pStream,
pCmd->nAggTimeInterval, minIntervalTime);
pCmd->nAggTimeInterval = minIntervalTime;
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
if (pQueryInfo->nAggTimeInterval < minIntervalTime) {
tscWarn("%p stream:%p, original sample interval:%ld too small, reset to:%" PRId64 "", pSql, pStream,
pQueryInfo->nAggTimeInterval, minIntervalTime);
pQueryInfo->nAggTimeInterval = minIntervalTime;
}
pStream->interval = pCmd->nAggTimeInterval; // it shall be derived from sql string
pStream->interval = pQueryInfo->nAggTimeInterval; // it shall be derived from sql string
if (pCmd->nSlidingTime == 0) {
pCmd->nSlidingTime = pCmd->nAggTimeInterval;
if (pQueryInfo->nSlidingTime == 0) {
pQueryInfo->nSlidingTime = pQueryInfo->nAggTimeInterval;
}
int64_t minSlidingTime =
(pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMinSlidingTime * 1000L : tsMinSlidingTime;
if (pCmd->nSlidingTime < minSlidingTime) {
tscWarn("%p stream:%p, original sliding value:%lld too small, reset to:%lld", pSql, pStream, pCmd->nSlidingTime,
minSlidingTime);
if (pQueryInfo->nSlidingTime == -1) {
pQueryInfo->nSlidingTime = pQueryInfo->nAggTimeInterval;
} else if (pQueryInfo->nSlidingTime < minSlidingTime) {
tscWarn("%p stream:%p, original sliding value:%" PRId64 " too small, reset to:%" PRId64 "", pSql, pStream,
pQueryInfo->nSlidingTime, minSlidingTime);
pCmd->nSlidingTime = minSlidingTime;
pQueryInfo->nSlidingTime = minSlidingTime;
}
if (pCmd->nSlidingTime > pCmd->nAggTimeInterval) {
tscWarn("%p stream:%p, sliding value:%lld can not be larger than interval range, reset to:%lld", pSql, pStream,
pCmd->nSlidingTime, pCmd->nAggTimeInterval);
if (pQueryInfo->nSlidingTime > pQueryInfo->nAggTimeInterval) {
tscWarn("%p stream:%p, sliding value:%" PRId64 " can not be larger than interval range, reset to:%" PRId64 "", pSql, pStream,
pQueryInfo->nSlidingTime, pQueryInfo->nAggTimeInterval);
pCmd->nSlidingTime = pCmd->nAggTimeInterval;
pQueryInfo->nSlidingTime = pQueryInfo->nAggTimeInterval;
}
pStream->slidingTime = pCmd->nSlidingTime;
pStream->slidingTime = pQueryInfo->nSlidingTime;
pQueryInfo->nAggTimeInterval = 0; // clear the interval value to avoid the force time window split by query processor
pQueryInfo->nSlidingTime = 0;
}
static int64_t tscGetStreamStartTimestamp(SSqlObj *pSql, SSqlStream *pStream, int64_t stime) {
SSqlCmd *pCmd = &pSql->cmd;
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
if (isProjectStream(pCmd)) {
if (isProjectStream(pQueryInfo)) {
// no data in table, flush all data till now to destination meter, 10sec delay
pStream->interval = tsProjectExecInterval;
pStream->slidingTime = tsProjectExecInterval;
if (stime != 0) { // first projection start from the latest event timestamp
assert(stime >= pCmd->stime);
assert(stime >= pQueryInfo->stime);
stime += 1; // exclude the last records from table
} else {
stime = pCmd->stime;
stime = pQueryInfo->stime;
}
} else { // timewindow based aggregation stream
if (stime == 0) { // no data in meter till now
stime = ((int64_t)taosGetTimestamp(pStream->precision) / pStream->interval) * pStream->interval;
tscWarn("%p stream:%p, last timestamp:0, reset to:%lld", pSql, pStream, stime);
tscWarn("%p stream:%p, last timestamp:0, reset to:%" PRId64 "", pSql, pStream, stime);
} else {
int64_t newStime = (stime / pStream->interval) * pStream->interval;
if (newStime != stime) {
tscWarn("%p stream:%p, last timestamp:%lld, reset to:%lld", pSql, pStream, stime, newStime);
tscWarn("%p stream:%p, last timestamp:%" PRId64 ", reset to:%" PRId64 "", pSql, pStream, stime, newStime);
stime = newStime;
}
}
@ -418,23 +452,11 @@ static int64_t tscGetLaunchTimestamp(const SSqlStream *pStream) {
int64_t timer = pStream->stime - taosGetTimestamp(pStream->precision);
if (timer < 0) timer = 0;
int64_t delayDelta = (int64_t)(pStream->interval * 0.1);
int64_t maxDelay =
(pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMaxStreamComputDelay * 1000L : tsMaxStreamComputDelay;
if (delayDelta > maxDelay) {
delayDelta = maxDelay;
}
int64_t startDelay =
(pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsStreamCompStartDelay * 1000L : tsStreamCompStartDelay;
srand(time(NULL));
timer += (rand() % delayDelta); // a random number
if (timer < startDelay || timer > maxDelay) {
timer = (timer % startDelay) + startDelay;
}
timer += getLaunchTimeDelay(pStream);
timer += startDelay;
return (pStream->precision == TSDB_TIME_PRECISION_MICRO) ? timer / 1000L : timer;
}
@ -447,8 +469,11 @@ static void setErrorInfo(STscObj* pObj, int32_t code, char* info) {
SSqlCmd* pCmd = &pObj->pSql->cmd;
pObj->pSql->res.code = code;
if (info != NULL) {
strncpy(pCmd->payload, info, pCmd->payloadLen);
}
}
TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row),
int64_t stime, void *param, void (*callback)(void *)) {
@ -495,8 +520,7 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p
return NULL;
}
// TODO later refactor use enum
pSql->cmd.count = 1; // 1 means sql in stream, allowed the sliding clause.
pSql->cmd.inStream = 1; // 1 means sql in stream, allowed the sliding clause.
pRes->code = tscToSQLCmd(pSql, &SQLInfo);
SQLInfoDestroy(&SQLInfo);
@ -517,7 +541,8 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p
return NULL;
}
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0);
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfoFromQueryInfo(pQueryInfo, 0);
pStream->fp = fp;
pStream->callback = callback;
@ -526,7 +551,7 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p
pStream->precision = pMeterMetaInfo->pMeterMeta->precision;
pStream->ctime = taosGetTimestamp(pStream->precision);
pStream->etime = pCmd->etime;
pStream->etime = pQueryInfo->etime;
pSql->pStream = pStream;
tscAddIntoStreamList(pStream);
@ -537,7 +562,7 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p
int64_t starttime = tscGetLaunchTimestamp(pStream);
taosTmrReset(tscProcessStreamTimer, starttime, pStream, tscTmr, &pStream->pTimer);
tscTrace("%p stream:%p is opened, query on:%s, interval:%lld, sliding:%lld, first launched in:%lld, sql:%s", pSql,
tscTrace("%p stream:%p is opened, query on:%s, interval:%" PRId64 ", sliding:%" PRId64 ", first launched in:%" PRId64 ", sql:%s", pSql,
pStream, pMeterMetaInfo->name, pStream->interval, pStream->slidingTime, starttime, sqlstr);
return pStream;

View File

@ -22,125 +22,410 @@
#include "tsclient.h"
#include "tsocket.h"
#include "ttime.h"
#include "ttimer.h"
#include "tutil.h"
#include "tscUtil.h"
#include "tcache.h"
#include "tscProfile.h"
typedef struct {
typedef struct SSubscriptionProgress {
int64_t uid;
TSKEY key;
} SSubscriptionProgress;
typedef struct SSub {
void * signature;
char name[TSDB_METER_ID_LEN];
int mseconds;
TSKEY lastKey;
uint64_t stime;
TAOS_FIELD fields[TSDB_MAX_COLUMNS];
int numOfFields;
char topic[32];
int64_t lastSyncTime;
int64_t lastConsumeTime;
TAOS * taos;
TAOS_RES * result;
void * pTimer;
SSqlObj * pSql;
int interval;
TAOS_SUBSCRIBE_CALLBACK fp;
void * param;
int numOfMeters;
SSubscriptionProgress * progress;
} SSub;
TAOS_SUB *taos_subscribe(const char *host, const char *user, const char *pass, const char *db, const char *name, int64_t time, int mseconds) {
SSub *pSub;
pSub = (SSub *)malloc(sizeof(SSub));
if (pSub == NULL) return NULL;
memset(pSub, 0, sizeof(SSub));
pSub->signature = pSub;
strcpy(pSub->name, name);
pSub->mseconds = mseconds;
pSub->lastKey = time;
if (pSub->lastKey == 0) {
pSub->lastKey = taosGetTimestampMs();
static int tscCompareSubscriptionProgress(const void* a, const void* b) {
const SSubscriptionProgress* x = (const SSubscriptionProgress*)a;
const SSubscriptionProgress* y = (const SSubscriptionProgress*)b;
if (x->uid > y->uid) return 1;
if (x->uid < y->uid) return -1;
return 0;
}
taos_init();
pSub->taos = taos_connect(host, user, pass, NULL, 0);
if (pSub->taos == NULL) {
tfree(pSub);
} else {
char qstr[128];
sprintf(qstr, "use %s", db);
int res = taos_query(pSub->taos, qstr);
if (res != 0) {
tscError("failed to open DB:%s", db);
taos_close(pSub->taos);
tfree(pSub);
} else {
sprintf(qstr, "select * from %s where _c0 > now+1000d", pSub->name);
if (taos_query(pSub->taos, qstr)) {
tscTrace("failed to select, reason:%s", taos_errstr(pSub->taos));
taos_close(pSub->taos);
tfree(pSub);
TSKEY tscGetSubscriptionProgress(void* sub, int64_t uid) {
if (sub == NULL)
return 0;
SSub* pSub = (SSub*)sub;
for (int s = 0, e = pSub->numOfMeters; s < e;) {
int m = (s + e) / 2;
SSubscriptionProgress* p = pSub->progress + m;
if (p->uid > uid)
e = m;
else if (p->uid < uid)
s = m + 1;
else
return p->key;
}
return 0;
}
void tscUpdateSubscriptionProgress(void* sub, int64_t uid, TSKEY ts) {
if( sub == NULL)
return;
SSub* pSub = (SSub*)sub;
for (int s = 0, e = pSub->numOfMeters; s < e;) {
int m = (s + e) / 2;
SSubscriptionProgress* p = pSub->progress + m;
if (p->uid > uid)
e = m;
else if (p->uid < uid)
s = m + 1;
else {
if (ts >= p->key) p->key = ts;
break;
}
}
}
static SSub* tscCreateSubscription(STscObj* pObj, const char* topic, const char* sql) {
SSub* pSub = calloc(1, sizeof(SSub));
if (pSub == NULL) {
globalCode = TSDB_CODE_CLI_OUT_OF_MEMORY;
tscError("failed to allocate memory for subscription");
return NULL;
}
pSub->result = taos_use_result(pSub->taos);
pSub->numOfFields = taos_num_fields(pSub->result);
memcpy(pSub->fields, taos_fetch_fields(pSub->result), sizeof(TAOS_FIELD) * pSub->numOfFields);
SSqlObj* pSql = calloc(1, sizeof(SSqlObj));
if (pSql == NULL) {
globalCode = TSDB_CODE_CLI_OUT_OF_MEMORY;
tscError("failed to allocate SSqlObj for subscription");
goto failed;
}
pSql->signature = pSql;
pSql->pTscObj = pObj;
char* sqlstr = (char*)malloc(strlen(sql) + 1);
if (sqlstr == NULL) {
tscError("failed to allocate sql string for subscription");
goto failed;
}
strcpy(sqlstr, sql);
strtolower(sqlstr, sqlstr);
pSql->sqlstr = sqlstr;
tsem_init(&pSql->rspSem, 0, 0);
tsem_init(&pSql->emptyRspSem, 0, 1);
SSqlRes *pRes = &pSql->res;
pRes->numOfRows = 1;
pRes->numOfTotal = 0;
pSql->pSubscription = pSub;
pSub->pSql = pSql;
pSub->signature = pSub;
strncpy(pSub->topic, topic, sizeof(pSub->topic));
pSub->topic[sizeof(pSub->topic) - 1] = 0;
return pSub;
failed:
if (sqlstr != NULL) {
free(sqlstr);
}
if (pSql != NULL) {
free(pSql);
}
free(pSub);
return NULL;
}
static void tscProcessSubscriptionTimer(void *handle, void *tmrId) {
SSub *pSub = (SSub *)handle;
if (pSub == NULL || pSub->pTimer != tmrId) return;
TAOS_RES* res = taos_consume(pSub);
if (res != NULL) {
pSub->fp(pSub, res, pSub->param, 0);
}
taosTmrReset(tscProcessSubscriptionTimer, pSub->interval, pSub, tscTmr, &pSub->pTimer);
}
int tscUpdateSubscription(STscObj* pObj, SSub* pSub) {
int code = (uint8_t)tsParseSql(pSub->pSql, false);
if (code != TSDB_CODE_SUCCESS) {
tscError("failed to parse sql statement: %s", pSub->topic);
return 0;
}
SSqlCmd* pCmd = &pSub->pSql->cmd;
if (pCmd->command != TSDB_SQL_SELECT) {
tscError("only 'select' statement is allowed in subscription: %s", pSub->topic);
return 0;
}
SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0, 0);
int numOfMeters = 0;
if (!UTIL_METER_IS_NOMRAL_METER(pMeterMetaInfo)) {
SMetricMeta* pMetricMeta = pMeterMetaInfo->pMetricMeta;
for (int32_t i = 0; i < pMetricMeta->numOfVnodes; i++) {
SVnodeSidList *pVnodeSidList = tscGetVnodeSidList(pMetricMeta, i);
numOfMeters += pVnodeSidList->numOfSids;
}
}
SSubscriptionProgress* progress = (SSubscriptionProgress*)calloc(numOfMeters, sizeof(SSubscriptionProgress));
if (progress == NULL) {
tscError("failed to allocate memory for progress: %s", pSub->topic);
return 0;
}
if (UTIL_METER_IS_NOMRAL_METER(pMeterMetaInfo)) {
numOfMeters = 1;
int64_t uid = pMeterMetaInfo->pMeterMeta->uid;
progress[0].uid = uid;
progress[0].key = tscGetSubscriptionProgress(pSub, uid);
} else {
SMetricMeta* pMetricMeta = pMeterMetaInfo->pMetricMeta;
numOfMeters = 0;
for (int32_t i = 0; i < pMetricMeta->numOfVnodes; i++) {
SVnodeSidList *pVnodeSidList = tscGetVnodeSidList(pMetricMeta, i);
for (int32_t j = 0; j < pVnodeSidList->numOfSids; j++) {
SMeterSidExtInfo *pMeterInfo = tscGetMeterSidInfo(pVnodeSidList, j);
int64_t uid = pMeterInfo->uid;
progress[numOfMeters].uid = uid;
progress[numOfMeters++].key = tscGetSubscriptionProgress(pSub, uid);
}
}
qsort(progress, numOfMeters, sizeof(SSubscriptionProgress), tscCompareSubscriptionProgress);
}
free(pSub->progress);
pSub->numOfMeters = numOfMeters;
pSub->progress = progress;
pSub->lastSyncTime = taosGetTimestampMs();
return 1;
}
static int tscLoadSubscriptionProgress(SSub* pSub) {
char buf[TSDB_MAX_SQL_LEN];
sprintf(buf, "%s/subscribe/%s", dataDir, pSub->topic);
FILE* fp = fopen(buf, "r");
if (fp == NULL) {
tscTrace("subscription progress file does not exist: %s", pSub->topic);
return 1;
}
if (fgets(buf, sizeof(buf), fp) == NULL) {
tscTrace("invalid subscription progress file: %s", pSub->topic);
fclose(fp);
return 0;
}
for (int i = 0; i < sizeof(buf); i++) {
if (buf[i] == 0)
break;
if (buf[i] == '\r' || buf[i] == '\n') {
buf[i] = 0;
break;
}
}
if (strcmp(buf, pSub->pSql->sqlstr) != 0) {
tscTrace("subscription sql statement mismatch: %s", pSub->topic);
fclose(fp);
return 0;
}
if (fgets(buf, sizeof(buf), fp) == NULL || atoi(buf) < 0) {
tscTrace("invalid subscription progress file: %s", pSub->topic);
fclose(fp);
return 0;
}
int numOfMeters = atoi(buf);
SSubscriptionProgress* progress = calloc(numOfMeters, sizeof(SSubscriptionProgress));
for (int i = 0; i < numOfMeters; i++) {
if (fgets(buf, sizeof(buf), fp) == NULL) {
fclose(fp);
free(progress);
return 0;
}
int64_t uid, key;
sscanf(buf, "%" SCNd64 ":%" SCNd64, &uid, &key);
progress[i].uid = uid;
progress[i].key = key;
}
fclose(fp);
qsort(progress, numOfMeters, sizeof(SSubscriptionProgress), tscCompareSubscriptionProgress);
pSub->numOfMeters = numOfMeters;
pSub->progress = progress;
tscTrace("subscription progress loaded, %d tables: %s", numOfMeters, pSub->topic);
return 1;
}
void tscSaveSubscriptionProgress(void* sub) {
SSub* pSub = (SSub*)sub;
char path[256];
sprintf(path, "%s/subscribe", dataDir);
if (access(path, 0) != 0) {
mkdir(path, 0777);
}
sprintf(path, "%s/subscribe/%s", dataDir, pSub->topic);
FILE* fp = fopen(path, "w+");
if (fp == NULL) {
tscError("failed to create progress file for subscription: %s", pSub->topic);
return;
}
fputs(pSub->pSql->sqlstr, fp);
fprintf(fp, "\n%d\n", pSub->numOfMeters);
for (int i = 0; i < pSub->numOfMeters; i++) {
int64_t uid = pSub->progress[i].uid;
TSKEY key = pSub->progress[i].key;
fprintf(fp, "%" PRId64 ":%" PRId64 "\n", uid, key);
}
fclose(fp);
}
TAOS_SUB *taos_subscribe(TAOS *taos, int restart, const char* topic, const char *sql, TAOS_SUBSCRIBE_CALLBACK fp, void *param, int interval) {
STscObj* pObj = (STscObj*)taos;
if (pObj == NULL || pObj->signature != pObj) {
globalCode = TSDB_CODE_DISCONNECTED;
tscError("connection disconnected");
return NULL;
}
SSub* pSub = tscCreateSubscription(pObj, topic, sql);
if (pSub == NULL) {
return NULL;
}
pSub->taos = taos;
if (restart) {
tscTrace("restart subscription: %s", topic);
} else {
tscLoadSubscriptionProgress(pSub);
}
if (!tscUpdateSubscription(pObj, pSub)) {
taos_unsubscribe(pSub, 1);
return NULL;
}
pSub->interval = interval;
if (fp != NULL) {
tscTrace("asynchronize subscription, create new timer", topic);
pSub->fp = fp;
pSub->param = param;
taosTmrReset(tscProcessSubscriptionTimer, interval, pSub, tscTmr, &pSub->pTimer);
}
return pSub;
}
TAOS_ROW taos_consume(TAOS_SUB *tsub) {
SSub * pSub = (SSub *)tsub;
TAOS_ROW row;
char qstr[256];
void taos_free_result_imp(SSqlObj* pSql, int keepCmd);
TAOS_RES *taos_consume(TAOS_SUB *tsub) {
SSub *pSub = (SSub *)tsub;
if (pSub == NULL) return NULL;
if (pSub->signature != pSub) return NULL;
while (1) {
if (pSub->result != NULL) {
row = taos_fetch_row(pSub->result);
if (row != NULL) {
pSub->lastKey = *((uint64_t *)row[0]);
return row;
tscSaveSubscriptionProgress(pSub);
SSqlObj* pSql = pSub->pSql;
SSqlRes *pRes = &pSql->res;
if (pSub->pTimer == NULL) {
int64_t duration = taosGetTimestampMs() - pSub->lastConsumeTime;
if (duration < (int64_t)(pSub->interval)) {
tscTrace("subscription consume too frequently, blocking...");
taosMsleep(pSub->interval - (int32_t)duration);
}
}
taos_free_result(pSub->result);
pSub->result = NULL;
uint64_t etime = taosGetTimestampMs();
int64_t mseconds = pSub->mseconds - etime + pSub->stime;
if (mseconds < 0) mseconds = 0;
taosMsleep((int)mseconds);
for (int retry = 0; retry < 3; retry++) {
tscRemoveFromSqlList(pSql);
if (taosGetTimestampMs() - pSub->lastSyncTime > 10 * 60 * 1000) {
tscTrace("begin meter synchronization");
char* sqlstr = pSql->sqlstr;
pSql->sqlstr = NULL;
taos_free_result_imp(pSql, 0);
pSql->sqlstr = sqlstr;
taosClearDataCache(tscCacheHandle);
if (!tscUpdateSubscription(pSub->taos, pSub)) return NULL;
tscTrace("meter synchronization completed");
} else {
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
uint16_t type = pQueryInfo->type;
taos_free_result_imp(pSql, 1);
pRes->numOfRows = 1;
pRes->numOfTotal = 0;
pRes->qhandle = 0;
pSql->thandle = NULL;
pSql->cmd.command = TSDB_SQL_SELECT;
pQueryInfo->type = type;
tscGetMeterMetaInfo(&pSql->cmd, 0, 0)->vnodeIndex = 0;
}
pSub->stime = taosGetTimestampMs();
tscDoQuery(pSql);
if (pRes->code != TSDB_CODE_NOT_ACTIVE_TABLE) {
break;
}
// meter was removed, make sync time zero, so that next retry will
// do synchronization first
pSub->lastSyncTime = 0;
}
sprintf(qstr, "select * from %s where _c0 > %lld order by _c0 asc", pSub->name, pSub->lastKey);
if (taos_query(pSub->taos, qstr)) {
tscTrace("failed to select, reason:%s", taos_errstr(pSub->taos));
if (pRes->code != TSDB_CODE_SUCCESS) {
tscError("failed to query data, error code=%d", pRes->code);
tscRemoveFromSqlList(pSql);
return NULL;
}
pSub->result = taos_use_result(pSub->taos);
if (pSub->result == NULL) {
tscTrace("failed to get result, reason:%s", taos_errstr(pSub->taos));
return NULL;
}
pSub->lastConsumeTime = taosGetTimestampMs();
return pSql;
}
return NULL;
}
void taos_unsubscribe(TAOS_SUB *tsub) {
void taos_unsubscribe(TAOS_SUB *tsub, int keepProgress) {
SSub *pSub = (SSub *)tsub;
if (pSub == NULL || pSub->signature != pSub) return;
if (pSub == NULL) return;
if (pSub->signature != pSub) return;
if (pSub->pTimer != NULL) {
taosTmrStop(pSub->pTimer);
}
taos_close(pSub->taos);
if (keepProgress) {
tscSaveSubscriptionProgress(pSub);
} else {
char path[256];
sprintf(path, "%s/subscribe/%s", dataDir, pSub->topic);
remove(path);
}
tscFreeSqlObj(pSub->pSql);
free(pSub->progress);
memset(pSub, 0, sizeof(*pSub));
free(pSub);
}
int taos_subfields_count(TAOS_SUB *tsub) {
SSub *pSub = (SSub *)tsub;
return pSub->numOfFields;
}
TAOS_FIELD *taos_fetch_subfields(TAOS_SUB *tsub) {
SSub *pSub = (SSub *)tsub;
return pSub->fields;
}

View File

@ -26,7 +26,7 @@
int32_t step = ((_ord) == TSQL_SO_ASC) ? 1 : -1; \
\
if ((len1) == (len2)) { \
for (; i < (len2) && i >= 0; i += step, (out) += step) { \
for (; i < (len2) && i >= 0; i += step, (out) += 1) { \
if (isNull((char *)&((left)[i]), _left_type) || isNull((char *)&((right)[i]), _right_type)) { \
setNull((char *)(out), _res_type, tDataTypeDesc[_res_type].nSize); \
continue; \
@ -34,7 +34,7 @@
*(out) = (double)(left)[i] op(right)[i]; \
} \
} else if ((len1) == 1) { \
for (; i >= 0 && i < (len2); i += step, (out) += step) { \
for (; i >= 0 && i < (len2); i += step, (out) += 1) { \
if (isNull((char *)(left), _left_type) || isNull((char *)&(right)[i], _right_type)) { \
setNull((char *)(out), _res_type, tDataTypeDesc[_res_type].nSize); \
continue; \
@ -42,7 +42,7 @@
*(out) = (double)(left)[0] op(right)[i]; \
} \
} else if ((len2) == 1) { \
for (; i >= 0 && i < (len1); i += step, (out) += step) { \
for (; i >= 0 && i < (len1); i += step, (out) += 1) { \
if (isNull((char *)&(left)[i], _left_type) || isNull((char *)(right), _right_type)) { \
setNull((char *)(out), _res_type, tDataTypeDesc[_res_type].nSize); \
continue; \
@ -58,7 +58,7 @@
int32_t step = (_ord == TSQL_SO_ASC) ? 1 : -1; \
\
if (len1 == (len2)) { \
for (; i >= 0 && i < (len2); i += step, (out) += step) { \
for (; i >= 0 && i < (len2); i += step, (out) += 1) { \
if (isNull((char *)&(left[i]), _left_type) || isNull((char *)&(right[i]), _right_type)) { \
setNull((char *)(out), _res_type, tDataTypeDesc[_res_type].nSize); \
continue; \
@ -66,7 +66,7 @@
*(out) = (double)(left)[i] - ((int64_t)(((double)(left)[i]) / (right)[i])) * (right)[i]; \
} \
} else if (len1 == 1) { \
for (; i >= 0 && i < (len2); i += step, (out) += step) { \
for (; i >= 0 && i < (len2); i += step, (out) += 1) { \
if (isNull((char *)(left), _left_type) || isNull((char *)&((right)[i]), _right_type)) { \
setNull((char *)(out), _res_type, tDataTypeDesc[_res_type].nSize); \
continue; \
@ -74,7 +74,7 @@
*(out) = (double)(left)[0] - ((int64_t)(((double)(left)[0]) / (right)[i])) * (right)[i]; \
} \
} else if ((len2) == 1) { \
for (; i >= 0 && i < len1; i += step, (out) += step) { \
for (; i >= 0 && i < len1; i += step, (out) += 1) { \
if (isNull((char *)&((left)[i]), _left_type) || isNull((char *)(right), _right_type)) { \
setNull((char *)(out), _res_type, tDataTypeDesc[_res_type].nSize); \
continue; \
@ -112,7 +112,7 @@ void calc_fn_i32_i32_add(void *left, void *right, int32_t numLeft, int32_t numRi
int32_t step = (order == TSQL_SO_ASC) ? 1 : -1;
if (numLeft == numRight) {
for (; i >= 0 && i < numRight; i += step, pOutput += step) {
for (; i >= 0 && i < numRight; i += step, pOutput += 1) {
if (isNull((char *)&(pLeft[i]), TSDB_DATA_TYPE_INT) || isNull((char *)&(pRight[i]), TSDB_DATA_TYPE_INT)) {
setNull((char *)(pOutput), TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize);
continue;
@ -121,7 +121,7 @@ void calc_fn_i32_i32_add(void *left, void *right, int32_t numLeft, int32_t numRi
*pOutput = (double)pLeft[i] + pRight[i];
}
} else if (numLeft == 1) {
for (; i >= 0 && i < numRight; i += step, pOutput += step) {
for (; i >= 0 && i < numRight; i += step, pOutput += 1) {
if (isNull((char *)(pLeft), TSDB_DATA_TYPE_INT) || isNull((char *)&(pRight[i]), TSDB_DATA_TYPE_INT)) {
setNull((char *)pOutput, TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize);
continue;
@ -130,7 +130,7 @@ void calc_fn_i32_i32_add(void *left, void *right, int32_t numLeft, int32_t numRi
*pOutput = (double)pLeft[0] + pRight[i];
}
} else if (numRight == 1) {
for (; i >= 0 && i < numLeft; i += step, pOutput += step) {
for (; i >= 0 && i < numLeft; i += step, pOutput += 1) {
if (isNull((char *)&(pLeft[i]), TSDB_DATA_TYPE_INT) || isNull((char *)(pRight), TSDB_DATA_TYPE_INT)) {
setNull((char *)pOutput, TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize);
continue;
@ -310,7 +310,7 @@ void calc_fn_i32_i32_sub(void *left, void *right, int32_t numLeft, int32_t numRi
int32_t step = (order == TSQL_SO_ASC) ? 1 : -1;
if (numLeft == numRight) {
for (; i >= 0 && i < numRight; i += step, pOutput += step) {
for (; i >= 0 && i < numRight; i += step, pOutput += 1) {
if (isNull((char *)&(pLeft[i]), TSDB_DATA_TYPE_INT) || isNull((char *)&(pRight[i]), TSDB_DATA_TYPE_INT)) {
setNull((char *)&(pOutput[i]), TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize);
continue;
@ -318,7 +318,7 @@ void calc_fn_i32_i32_sub(void *left, void *right, int32_t numLeft, int32_t numRi
*pOutput = (double)pLeft[i] - pRight[i];
}
} else if (numLeft == 1) {
for (; i >= 0 && i < numRight; i += step, pOutput += step) {
for (; i >= 0 && i < numRight; i += step, pOutput += 1) {
if (isNull((char *)(pLeft), TSDB_DATA_TYPE_INT) || isNull((char *)&(pRight[i]), TSDB_DATA_TYPE_INT)) {
setNull((char *)(pOutput), TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize);
continue;
@ -326,7 +326,7 @@ void calc_fn_i32_i32_sub(void *left, void *right, int32_t numLeft, int32_t numRi
*pOutput = (double)pLeft[0] - pRight[i];
}
} else if (numRight == 1) {
for (; i >= 0 && i < numLeft; i += step, pOutput += step) {
for (; i >= 0 && i < numLeft; i += step, pOutput += 1) {
if (isNull((char *)&pLeft[i], TSDB_DATA_TYPE_INT) || isNull((char *)(pRight), TSDB_DATA_TYPE_INT)) {
setNull((char *)(pOutput), TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize);
continue;
@ -521,7 +521,7 @@ void calc_fn_i32_i32_multi(void *left, void *right, int32_t numLeft, int32_t num
int32_t step = (order == TSQL_SO_ASC) ? 1 : -1;
if (numLeft == numRight) {
for (; i >= 0 && i < numRight; i += step, pOutput += step) {
for (; i >= 0 && i < numRight; i += step, pOutput += 1) {
if (isNull((char *)&(pLeft[i]), TSDB_DATA_TYPE_INT) || isNull((char *)&(pRight[i]), TSDB_DATA_TYPE_INT)) {
setNull((char *)(pOutput), TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize);
continue;
@ -530,7 +530,7 @@ void calc_fn_i32_i32_multi(void *left, void *right, int32_t numLeft, int32_t num
*pOutput = (double)pLeft[i] * pRight[i];
}
} else if (numLeft == 1) {
for (; i >= 0 && i < numRight; i += step, pOutput += step) {
for (; i >= 0 && i < numRight; i += step, pOutput += 1) {
if (isNull((char *)(pLeft), TSDB_DATA_TYPE_INT) || isNull((char *)&(pRight[i]), TSDB_DATA_TYPE_INT)) {
setNull((char *)pOutput, TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize);
continue;
@ -539,7 +539,7 @@ void calc_fn_i32_i32_multi(void *left, void *right, int32_t numLeft, int32_t num
*pOutput = (double)pLeft[0] * pRight[i];
}
} else if (numRight == 1) {
for (; i >= 0 && i < numLeft; i += step, pOutput += step) {
for (; i >= 0 && i < numLeft; i += step, pOutput += 1) {
if (isNull((char *)&(pLeft[i]), TSDB_DATA_TYPE_INT) || isNull((char *)(pRight), TSDB_DATA_TYPE_INT)) {
setNull((char *)pOutput, TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize);
continue;
@ -719,7 +719,7 @@ void calc_fn_i32_i32_div(void *left, void *right, int32_t numLeft, int32_t numRi
int32_t step = (order == TSQL_SO_ASC) ? 1 : -1;
if (numLeft == numRight) {
for (; i >= 0 && i < numRight; i += step, pOutput += step) {
for (; i >= 0 && i < numRight; i += step, pOutput += 1) {
if (isNull((char *)&(pLeft[i]), TSDB_DATA_TYPE_INT) || isNull((char *)&(pRight[i]), TSDB_DATA_TYPE_INT)) {
setNull((char *)(pOutput), TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize);
continue;
@ -728,7 +728,7 @@ void calc_fn_i32_i32_div(void *left, void *right, int32_t numLeft, int32_t numRi
*pOutput = (double)pLeft[i] / pRight[i];
}
} else if (numLeft == 1) {
for (; i >= 0 && i < numRight; i += step, pOutput += step) {
for (; i >= 0 && i < numRight; i += step, pOutput += 1) {
if (isNull((char *)(pLeft), TSDB_DATA_TYPE_INT) || isNull((char *)&(pRight[i]), TSDB_DATA_TYPE_INT)) {
setNull((char *)pOutput, TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize);
continue;
@ -737,7 +737,7 @@ void calc_fn_i32_i32_div(void *left, void *right, int32_t numLeft, int32_t numRi
*pOutput = (double)pLeft[0] / pRight[i];
}
} else if (numRight == 1) {
for (; i >= 0 && i < numLeft; i += step, pOutput += step) {
for (; i >= 0 && i < numLeft; i += step, pOutput += 1) {
if (isNull((char *)&(pLeft[i]), TSDB_DATA_TYPE_INT) || isNull((char *)(pRight), TSDB_DATA_TYPE_INT)) {
setNull((char *)pOutput, TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize);
continue;
@ -933,7 +933,7 @@ void calc_fn_i32_i32_rem(void *left, void *right, int32_t numLeft, int32_t numRi
int32_t step = (order == TSQL_SO_ASC) ? 1 : -1;
if (numLeft == numRight) {
for (; i >= 0 && i < numRight; i += step, pOutput += step) {
for (; i >= 0 && i < numRight; i += step, pOutput += 1) {
if (isNull((char *)&(pLeft[i]), TSDB_DATA_TYPE_INT) || isNull((char *)&(pRight[i]), TSDB_DATA_TYPE_INT)) {
setNull((char *)(pOutput), TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize);
continue;
@ -942,7 +942,7 @@ void calc_fn_i32_i32_rem(void *left, void *right, int32_t numLeft, int32_t numRi
*pOutput = (double)pLeft[i] - ((int64_t)(((double)pLeft[i]) / pRight[i])) * pRight[i];
}
} else if (numLeft == 1) {
for (; i >= 0 && i < numRight; i += step, pOutput += step) {
for (; i >= 0 && i < numRight; i += step, pOutput += 1) {
if (isNull((char *)(pLeft), TSDB_DATA_TYPE_INT) || isNull((char *)&(pRight[i]), TSDB_DATA_TYPE_INT)) {
setNull((char *)pOutput, TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize);
continue;
@ -951,7 +951,7 @@ void calc_fn_i32_i32_rem(void *left, void *right, int32_t numLeft, int32_t numRi
*pOutput = (double)pLeft[0] - ((int64_t)(((double)pLeft[0]) / pRight[i])) * pRight[i];
}
} else if (numRight == 1) {
for (; i >= 0 && i < numLeft; i += step, pOutput += step) {
for (; i >= 0 && i < numLeft; i += step, pOutput += 1) {
if (isNull((char *)&(pLeft[i]), TSDB_DATA_TYPE_INT) || isNull((char *)(pRight), TSDB_DATA_TYPE_INT)) {
setNull((char *)pOutput, TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize);
continue;
@ -991,7 +991,7 @@ void calc_fn_i32_d_rem(void *left, void *right, int32_t numLeft, int32_t numRigh
int32_t step = (order == TSQL_SO_ASC) ? 1 : -1;
if (numLeft == numRight) {
for (; i >= 0 && i < numRight; i += step, pOutput += step) {
for (; i >= 0 && i < numRight; i += step, pOutput += 1) {
if (isNull((char *)&(pLeft[i]), TSDB_DATA_TYPE_INT) || isNull((char *)&(pRight[i]), TSDB_DATA_TYPE_INT)) {
setNull((char *)(pOutput), TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize);
continue;
@ -1000,7 +1000,7 @@ void calc_fn_i32_d_rem(void *left, void *right, int32_t numLeft, int32_t numRigh
*pOutput = (double)pLeft[i] - ((int64_t)(((double)pLeft[i]) / pRight[i])) * pRight[i];
}
} else if (numLeft == 1) {
for (; i >= 0 && i < numRight; i += step, pOutput += step) {
for (; i >= 0 && i < numRight; i += step, pOutput += 1) {
if (isNull((char *)(pLeft), TSDB_DATA_TYPE_INT) || isNull((char *)&(pRight[i]), TSDB_DATA_TYPE_INT)) {
setNull((char *)pOutput, TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize);
continue;
@ -1009,7 +1009,7 @@ void calc_fn_i32_d_rem(void *left, void *right, int32_t numLeft, int32_t numRigh
*pOutput = (double)pLeft[0] - ((int64_t)(((double)pLeft[0]) / pRight[i])) * pRight[i];
}
} else if (numRight == 1) {
for (; i >= 0 && i < numLeft; i += step, pOutput += step) {
for (; i >= 0 && i < numLeft; i += step, pOutput += 1) {
if (isNull((char *)&(pLeft[i]), TSDB_DATA_TYPE_INT) || isNull((char *)(pRight), TSDB_DATA_TYPE_INT)) {
setNull((char *)pOutput, TSDB_DATA_TYPE_DOUBLE, tDataTypeDesc[TSDB_DATA_TYPE_DOUBLE].nSize);
continue;

View File

@ -48,6 +48,7 @@ static pthread_once_t tscinit = PTHREAD_ONCE_INIT;
extern int tsTscEnableRecordSql;
extern int tsNumOfLogLines;
void taosInitNote(int numOfNoteLines, int maxNotes, char* lable);
void deltaToUtcInitOnce();
void tscCheckDiskUsage(void *para, void *unused) {
taosGetDisk();
@ -60,6 +61,7 @@ void taos_init_imp() {
SRpcInit rpcInit;
srand(taosGetTimestampSec());
deltaToUtcInitOnce();
if (tscEmbedded == 0) {
/*
@ -93,7 +95,6 @@ void taos_init_imp() {
taosInitNote(tsNumOfLogLines / 10, 1, (char*)"tsc_note");
}
#ifdef CLUSTER
tscMgmtIpList.numOfIps = 2;
strcpy(tscMgmtIpList.ipstr[0], tsMasterIp);
tscMgmtIpList.ip[0] = inet_addr(tsMasterIp);
@ -106,7 +107,6 @@ void taos_init_imp() {
strcpy(tscMgmtIpList.ipstr[2], tsSecondIp);
tscMgmtIpList.ip[2] = inet_addr(tsSecondIp);
}
#endif
tscInitMsgs();
slaveIndex = rand();
@ -198,7 +198,9 @@ static int taos_options_imp(TSDB_OPTION option, const char *pStr) {
switch (option) {
case TSDB_OPTION_CONFIGDIR:
cfg = tsGetConfigOption("configDir");
if (cfg && cfg->cfgStatus <= TSDB_CFG_CSTATUS_OPTION) {
assert(cfg != NULL);
if (cfg->cfgStatus <= TSDB_CFG_CSTATUS_OPTION) {
strncpy(configDir, pStr, TSDB_FILENAME_LEN);
cfg->cfgStatus = TSDB_CFG_CSTATUS_OPTION;
tscPrint("set config file directory:%s", pStr);
@ -210,7 +212,9 @@ static int taos_options_imp(TSDB_OPTION option, const char *pStr) {
case TSDB_OPTION_SHELL_ACTIVITY_TIMER:
cfg = tsGetConfigOption("shellActivityTimer");
if (cfg && cfg->cfgStatus <= TSDB_CFG_CSTATUS_OPTION) {
assert(cfg != NULL);
if (cfg->cfgStatus <= TSDB_CFG_CSTATUS_OPTION) {
tsShellActivityTimer = atoi(pStr);
if (tsShellActivityTimer < 1) tsShellActivityTimer = 1;
if (tsShellActivityTimer > 3600) tsShellActivityTimer = 3600;
@ -224,13 +228,15 @@ static int taos_options_imp(TSDB_OPTION option, const char *pStr) {
case TSDB_OPTION_LOCALE: { // set locale
cfg = tsGetConfigOption("locale");
assert(cfg != NULL);
size_t len = strlen(pStr);
if (len == 0 || len > TSDB_LOCALE_LEN) {
tscPrint("Invalid locale:%s, use default", pStr);
return -1;
}
if (cfg && cfg && cfg->cfgStatus <= TSDB_CFG_CSTATUS_OPTION) {
if (cfg->cfgStatus <= TSDB_CFG_CSTATUS_OPTION) {
char sep = '.';
if (strlen(tsLocale) == 0) { // locale does not set yet
@ -285,13 +291,15 @@ static int taos_options_imp(TSDB_OPTION option, const char *pStr) {
case TSDB_OPTION_CHARSET: {
/* set charset will override the value of charset, assigned during system locale changed */
cfg = tsGetConfigOption("charset");
assert(cfg != NULL);
size_t len = strlen(pStr);
if (len == 0 || len > TSDB_LOCALE_LEN) {
tscPrint("failed to set charset:%s", pStr);
return -1;
}
if (cfg && cfg->cfgStatus <= TSDB_CFG_CSTATUS_OPTION) {
if (cfg->cfgStatus <= TSDB_CFG_CSTATUS_OPTION) {
if (taosValidateEncodec(pStr)) {
if (strlen(tsCharset) == 0) {
tscPrint("charset is set:%s", pStr);
@ -314,7 +322,9 @@ static int taos_options_imp(TSDB_OPTION option, const char *pStr) {
case TSDB_OPTION_TIMEZONE:
cfg = tsGetConfigOption("timezone");
if (cfg && cfg->cfgStatus <= TSDB_CFG_CSTATUS_OPTION) {
assert(cfg != NULL);
if (cfg->cfgStatus <= TSDB_CFG_CSTATUS_OPTION) {
strcpy(tsTimezone, pStr);
tsSetTimeZone();
cfg->cfgStatus = TSDB_CFG_CSTATUS_OPTION;
@ -327,7 +337,9 @@ static int taos_options_imp(TSDB_OPTION option, const char *pStr) {
case TSDB_OPTION_SOCKET_TYPE:
cfg = tsGetConfigOption("sockettype");
if (cfg && cfg->cfgStatus <= TSDB_CFG_CSTATUS_OPTION) {
assert(cfg != NULL);
if (cfg->cfgStatus <= TSDB_CFG_CSTATUS_OPTION) {
if (strcasecmp(pStr, TAOS_SOCKET_TYPE_NAME_UDP) != 0 && strcasecmp(pStr, TAOS_SOCKET_TYPE_NAME_TCP) != 0) {
tscError("only 'tcp' or 'udp' allowed for configuring the socket type");
return -1;
@ -340,6 +352,7 @@ static int taos_options_imp(TSDB_OPTION option, const char *pStr) {
break;
default:
// TODO return the correct error code to client in the format for taos_errstr()
tscError("Invalid option %d", option);
return -1;
}

File diff suppressed because it is too large Load Diff

View File

@ -27,11 +27,11 @@ import "C"
import (
"database/sql/driver"
"errors"
"strconv"
"unsafe"
"fmt"
"io"
"strconv"
"time"
"unsafe"
)
/******************************************************************************
@ -91,6 +91,7 @@ func (rows *taosSqlRows) readRow(dest []driver.Value) error {
row := C.taos_fetch_row(result)
if row == nil {
rows.rs.done = true
C.taos_free_result(result)
rows.mc = nil
return io.EOF
}

37
src/connector/grafana/tdengine/.gitignore vendored Executable file
View File

@ -0,0 +1,37 @@
node_modules
npm-debug.log
coverage/
.aws-config.json
awsconfig
/emails/dist
/public_gen
/tmp
vendor/phantomjs/phantomjs
docs/AWS_S3_BUCKET
docs/GIT_BRANCH
docs/VERSION
docs/GITCOMMIT
docs/changed-files
docs/changed-files
# locally required config files
public/css/*.min.css
# Editor junk
*.sublime-workspace
*.swp
.idea/
*.iml
/data/*
/bin/*
conf/custom.ini
fig.yml
profile.cov
grafana
.notouch
# Test artifacts
/dist/test/

View File

@ -0,0 +1,14 @@
{
"esnext": true,
"disallowImplicitTypeConversion": ["string"],
"disallowKeywords": ["with"],
"disallowMultipleLineBreaks": true,
"disallowMixedSpacesAndTabs": true,
"disallowTrailingWhitespace": true,
"requireSpacesInFunctionExpression": {
"beforeOpeningCurlyBrace": true
},
"disallowSpacesInsideArrayBrackets": true,
"disallowSpacesInsideParentheses": true,
"validateIndentation": 2
}

View File

@ -0,0 +1,85 @@
module.exports = function(grunt) {
require('load-grunt-tasks')(grunt);
grunt.loadNpmTasks('grunt-execute');
grunt.loadNpmTasks('grunt-contrib-clean');
grunt.initConfig({
clean: ["dist"],
copy: {
src_to_dist: {
cwd: 'src',
expand: true,
src: ['**/*', '!**/*.js', '!**/*.scss'],
dest: 'dist'
},
dashboard_to_dist: {
expand: true,
src: ['dashboard/*'],
dest: 'dist'
},
pluginDef: {
expand: true,
src: ['README.md'],
dest: 'dist'
}
},
watch: {
rebuild_all: {
files: ['src/**/*'],
tasks: ['default'],
options: {spawn: false}
}
},
babel: {
options: {
sourceMap: true,
presets: ['env'],
plugins: ['transform-object-rest-spread']
},
dist: {
files: [{
cwd: 'src',
expand: true,
src: ['**/*.js'],
dest: 'dist',
ext:'.js'
}]
},
distTestNoSystemJs: {
files: [{
cwd: 'src',
expand: true,
src: ['**/*.js'],
dest: 'dist/test',
ext:'.js'
}]
},
distTestsSpecsNoSystemJs: {
files: [{
expand: true,
cwd: 'spec',
src: ['**/*.js'],
dest: 'dist/test/spec',
ext:'.js'
}]
}
},
mochaTest: {
test: {
options: {
reporter: 'spec'
},
src: ['dist/test/spec/test-main.js', 'dist/test/spec/*_spec.js']
}
}
});
grunt.registerTask('default', ['clean', 'copy:src_to_dist', 'copy:dashboard_to_dist', 'copy:pluginDef', 'babel', 'mochaTest']);
};

View File

@ -0,0 +1,661 @@
GNU AFFERO GENERAL PUBLIC LICENSE
Version 3, 19 November 2007
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU Affero General Public License is a free, copyleft license for
software and other kinds of works, specifically designed to ensure
cooperation with the community in the case of network server software.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
our General Public Licenses are intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
Developers that use our General Public Licenses protect your rights
with two steps: (1) assert copyright on the software, and (2) offer
you this License which gives you legal permission to copy, distribute
and/or modify the software.
A secondary benefit of defending all users' freedom is that
improvements made in alternate versions of the program, if they
receive widespread use, become available for other developers to
incorporate. Many developers of free software are heartened and
encouraged by the resulting cooperation. However, in the case of
software used on network servers, this result may fail to come about.
The GNU General Public License permits making a modified version and
letting the public access it on a server without ever releasing its
source code to the public.
The GNU Affero General Public License is designed specifically to
ensure that, in such cases, the modified source code becomes available
to the community. It requires the operator of a network server to
provide the source code of the modified version running there to the
users of that server. Therefore, public use of a modified version, on
a publicly accessible server, gives the public access to the source
code of the modified version.
An older license, called the Affero General Public License and
published by Affero, was designed to accomplish similar goals. This is
a different license, not a version of the Affero GPL, but Affero has
released a new version of the Affero GPL which permits relicensing under
this license.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU Affero General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Remote Network Interaction; Use with the GNU General Public License.
Notwithstanding any other provision of this License, if you modify the
Program, your modified version must prominently offer all users
interacting with it remotely through a computer network (if your version
supports such interaction) an opportunity to receive the Corresponding
Source of your version by providing access to the Corresponding Source
from a network server at no charge, through some standard or customary
means of facilitating copying of software. This Corresponding Source
shall include the Corresponding Source for any work covered by version 3
of the GNU General Public License that is incorporated pursuant to the
following paragraph.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the work with which it is combined will remain governed by version
3 of the GNU General Public License.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU Affero General Public License from time to time. Such new versions
will be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU Affero General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU Affero General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU Affero General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If your software can interact with users remotely through a computer
network, you should also make sure that it provides a way for users to
get its source. For example, if your program is a web application, its
interface could display a "Source" link that leads users to an archive
of the code. There are many ways you could offer source, and different
solutions will be better for different programs; see section 13 for the
specific requirements.
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU AGPL, see
<https://www.gnu.org/licenses/>.

View File

@ -61,7 +61,7 @@ Example response
Example request
``` javascript
<null> get request
Get request /heartbeat
```
Example response
@ -70,3 +70,27 @@ Example response
"message": "Grafana server receive a quest from you!"
}
```
### Dev setup
This plugin requires node 6.10.0
``` javascript
npm install -g yarn
yarn install
npm run build
```
### Import Dashboard
after login `http://localhost:3000 `, then you can import the tdengine demo dashboard to monitor the system metrics.
you can import the `dashboard/tdengine-grafana.json`:
![import_dashboard](dashboard/import_dashboard.png)
after finished import:
![import_dashboard](dashboard/tdengine_dashboard.png)

Binary file not shown.

After

Width:  |  Height:  |  Size: 98 KiB

View File

@ -0,0 +1,588 @@
{
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": "-- Grafana --",
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"type": "dashboard"
}
]
},
"editable": true,
"gnetId": null,
"graphTooltip": 0,
"id": 3,
"links": [],
"panels": [
{
"cacheTimeout": null,
"colorBackground": false,
"colorValue": true,
"colors": [
"#299c46",
"rgba(237, 129, 40, 0.89)",
"#d44a3a"
],
"datasource": "TDengine",
"description": "total select request per minute last hour",
"format": "none",
"gauge": {
"maxValue": 100,
"minValue": 0,
"show": false,
"thresholdLabels": false,
"thresholdMarkers": true
},
"gridPos": {
"h": 6,
"w": 12,
"x": 0,
"y": 0
},
"id": 8,
"interval": null,
"links": [],
"mappingType": 1,
"mappingTypes": [
{
"name": "value to text",
"value": 1
},
{
"name": "range to text",
"value": 2
}
],
"maxDataPoints": 100,
"nullPointMode": "connected",
"nullText": null,
"options": {},
"postfix": "次数/min",
"postfixFontSize": "20%",
"prefix": "",
"prefixFontSize": "50%",
"rangeMaps": [
{
"from": "null",
"text": "N/A",
"to": "null"
}
],
"sparkline": {
"fillColor": "rgba(31, 118, 189, 0.18)",
"full": true,
"lineColor": "rgb(31, 120, 193)",
"show": true,
"ymax": null,
"ymin": null
},
"tableColumn": "",
"targets": [
{
"alias": "req_select",
"refId": "A",
"sql": "select sum(req_select) from log.dn where ts >= now-1h and ts < now interval(1m)",
"target": "select metric",
"type": "timeserie"
}
],
"thresholds": "120,240",
"timeFrom": null,
"timeShift": null,
"title": "req select",
"type": "singlestat",
"valueFontSize": "150%",
"valueMaps": [
{
"op": "=",
"text": "N/A",
"value": "null"
}
],
"valueName": "total"
},
{
"cacheTimeout": null,
"colorBackground": false,
"colorValue": true,
"colors": [
"#299c46",
"rgba(237, 129, 40, 0.89)",
"#d44a3a"
],
"datasource": "TDengine",
"description": "total insert request per minute for last hour",
"format": "none",
"gauge": {
"maxValue": 100,
"minValue": 0,
"show": false,
"thresholdLabels": false,
"thresholdMarkers": true
},
"gridPos": {
"h": 6,
"w": 12,
"x": 12,
"y": 0
},
"id": 6,
"interval": null,
"links": [],
"mappingType": 1,
"mappingTypes": [
{
"name": "value to text",
"value": 1
},
{
"name": "range to text",
"value": 2
}
],
"maxDataPoints": 100,
"nullPointMode": "connected",
"nullText": null,
"options": {},
"postfix": "次数/min",
"postfixFontSize": "20%",
"prefix": "",
"prefixFontSize": "50%",
"rangeMaps": [
{
"from": "null",
"text": "N/A",
"to": "null"
}
],
"sparkline": {
"fillColor": "rgba(31, 118, 189, 0.18)",
"full": false,
"lineColor": "rgb(31, 120, 193)",
"show": true,
"ymax": null,
"ymin": null
},
"tableColumn": "",
"targets": [
{
"alias": "req_insert",
"refId": "A",
"sql": "select sum(req_insert) from log.dn where ts >= now-1h and ts < now interval(1m)",
"target": "select metric",
"type": "timeserie"
}
],
"thresholds": "110,240",
"timeFrom": null,
"timeShift": null,
"title": "req insert",
"type": "singlestat",
"valueFontSize": "150%",
"valueMaps": [
{
"op": "=",
"text": "N/A",
"value": "null"
}
],
"valueName": "total"
},
{
"datasource": "TDengine",
"description": "taosd max memery last 10 minutes",
"gridPos": {
"h": 6,
"w": 8,
"x": 0,
"y": 6
},
"id": 12,
"options": {
"fieldOptions": {
"calcs": [
"mean"
],
"defaults": {
"mappings": [],
"max": 4096,
"min": 0,
"thresholds": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
},
{
"color": "#EAB839",
"value": 2048
}
],
"unit": "decmbytes"
},
"override": {},
"values": false
},
"orientation": "auto",
"showThresholdLabels": true,
"showThresholdMarkers": true
},
"pluginVersion": "6.4.3",
"targets": [
{
"alias": "mem_taosd",
"refId": "A",
"sql": "select max(mem_taosd) from log.dn where ts >= now -10m and ts < now",
"target": "select metric",
"type": "timeserie"
}
],
"timeFrom": null,
"timeShift": null,
"title": "taosd memery",
"type": "gauge"
},
{
"datasource": "TDengine",
"description": "max System Memory last 1 hour",
"gridPos": {
"h": 6,
"w": 8,
"x": 8,
"y": 6
},
"id": 10,
"options": {
"fieldOptions": {
"calcs": [
"last"
],
"defaults": {
"mappings": [],
"max": 4,
"min": 0,
"thresholds": [
{
"color": "green",
"value": null
},
{
"color": "semi-dark-orange",
"value": 60
},
{
"color": "dark-red",
"value": 80
}
],
"title": "",
"unit": "decmbytes"
},
"override": {},
"values": false
},
"orientation": "auto",
"showThresholdLabels": true,
"showThresholdMarkers": true
},
"pluginVersion": "6.4.3",
"targets": [
{
"alias": "mem_system",
"refId": "A",
"sql": "select max(mem_system) from log.dn where ts >= now -10h and ts < now",
"target": "select metric",
"type": "timeserie"
}
],
"timeFrom": null,
"timeShift": null,
"title": "system memory",
"type": "gauge"
},
{
"datasource": "TDengine",
"description": "avg band speed last one minute",
"gridPos": {
"h": 6,
"w": 8,
"x": 16,
"y": 6
},
"id": 14,
"options": {
"fieldOptions": {
"calcs": [
"last"
],
"defaults": {
"mappings": [],
"max": 8192,
"min": 0,
"thresholds": [
{
"color": "green",
"value": null
},
{
"color": "#EAB839",
"value": 4916
},
{
"color": "red",
"value": 6554
}
],
"unit": "Kbits"
},
"override": {},
"values": false
},
"orientation": "auto",
"showThresholdLabels": true,
"showThresholdMarkers": true
},
"pluginVersion": "6.4.3",
"targets": [
{
"alias": "band_speed",
"refId": "A",
"sql": "select avg(band_speed) from log.dn where ts >= now-1h and ts < now interval(1m)",
"target": "select metric",
"type": "timeserie"
}
],
"timeFrom": null,
"timeShift": null,
"title": "band speed",
"type": "gauge"
},
{
"aliasColors": {},
"bars": false,
"cacheTimeout": null,
"dashLength": 10,
"dashes": false,
"datasource": "TDengine",
"description": "monitor system cpu",
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 11,
"w": 12,
"x": 0,
"y": 12
},
"hideTimeOverride": true,
"id": 2,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"links": [],
"nullPointMode": "null",
"options": {
"dataLinks": []
},
"percentage": false,
"pluginVersion": "6.4.3",
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"alias": "cpu_system11",
"hide": false,
"refId": "A",
"sql": "select avg(cpu_system) from log.dn where ts >= now-1h and ts < now interval(1s)",
"target": "select metric",
"type": "timeserie"
},
{
"alias": "cpu_taosd",
"hide": false,
"refId": "B",
"sql": "select avg(cpu_taosd) from log.dn where ts >= now-1h and ts < now interval(1s)",
"target": "select metric",
"type": "timeserie"
}
],
"thresholds": [],
"timeFrom": "1h",
"timeRegions": [],
"timeShift": "30s",
"title": "cpu_system",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"decimals": null,
"format": "percent",
"label": "使用占比",
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": false
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": "TDengine",
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 11,
"w": 12,
"x": 12,
"y": 12
},
"id": 18,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"nullPointMode": "null",
"options": {
"dataLinks": []
},
"percentage": false,
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"alias": "",
"refId": "A",
"sql": "select avg(disk_used) disk_used from log.dn where ts >= $from and ts < $to interval(1s) group by ipaddr",
"target": "select metric",
"type": "timeserie"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "avg_disk_used",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "decgbytes",
"label": "",
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
}
],
"refresh": "5s",
"schemaVersion": 20,
"style": "dark",
"tags": [],
"templating": {
"list": []
},
"time": {
"from": "now-1h",
"to": "now"
},
"timepicker": {
"refresh_intervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
]
},
"timezone": "",
"title": "TDengine",
"uid": "FE-vpe0Wk",
"version": 1
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 173 KiB

View File

@ -1,170 +0,0 @@
'use strict';
System.register(['lodash'], function (_export, _context) {
"use strict";
var _, _createClass, GenericDatasource;
function strTrim(str) {
return str.replace(/^\s+|\s+$/gm,'');
}
function _classCallCheck(instance, Constructor) {
if (!(instance instanceof Constructor)) {
throw new TypeError("Cannot call a class as a function");
}
}
return {
setters: [function (_lodash) {
_ = _lodash.default;
}],
execute: function () {
_createClass = function () {
function defineProperties(target, props) {
for (var i = 0; i < props.length; i++) {
var descriptor = props[i];
descriptor.enumerable = descriptor.enumerable || false;
descriptor.configurable = true;
if ("value" in descriptor) descriptor.writable = true;
Object.defineProperty(target, descriptor.key, descriptor);
}
}
return function (Constructor, protoProps, staticProps) {
if (protoProps) defineProperties(Constructor.prototype, protoProps);
if (staticProps) defineProperties(Constructor, staticProps);
return Constructor;
};
}();
_export('GenericDatasource', GenericDatasource = function () {
function GenericDatasource(instanceSettings, $q, backendSrv, templateSrv) {
_classCallCheck(this, GenericDatasource);
this.type = instanceSettings.type;
this.url = instanceSettings.url;
this.name = instanceSettings.name;
this.q = $q;
this.backendSrv = backendSrv;
this.templateSrv = templateSrv;
//this.withCredentials = instanceSettings.withCredentials;
this.headers = { 'Content-Type': 'application/json' };
var taosuser = instanceSettings.jsonData.user;
var taospwd = instanceSettings.jsonData.password;
if (taosuser == null || taosuser == undefined || taosuser == "") {
taosuser = "root";
}
if (taospwd == null || taospwd == undefined || taospwd == "") {
taospwd = "taosdata";
}
this.headers.Authorization = "Basic " + this.encode(taosuser + ":" + taospwd);
}
_createClass(GenericDatasource, [{
key: 'encode',
value: function encode(input) {
var _keyStr = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=";
var output = "";
var chr1, chr2, chr3, enc1, enc2, enc3, enc4;
var i = 0;
while (i < input.length) {
chr1 = input.charCodeAt(i++);
chr2 = input.charCodeAt(i++);
chr3 = input.charCodeAt(i++);
enc1 = chr1 >> 2;
enc2 = ((chr1 & 3) << 4) | (chr2 >> 4);
enc3 = ((chr2 & 15) << 2) | (chr3 >> 6);
enc4 = chr3 & 63;
if (isNaN(chr2)) {
enc3 = enc4 = 64;
} else if (isNaN(chr3)) {
enc4 = 64;
}
output = output + _keyStr.charAt(enc1) + _keyStr.charAt(enc2) + _keyStr.charAt(enc3) + _keyStr.charAt(enc4);
}
return output;
}
}, {
key: 'generateSql',
value: function generateSql(sql, queryStart, queryEnd, intervalMs) {
if (queryStart == undefined || queryStart == null) {
queryStart = "now-1h";
}
if (queryEnd == undefined || queryEnd == null) {
queryEnd = "now";
}
if (intervalMs == undefined || intervalMs == null) {
intervalMs = "20000";
}
intervalMs += "a";
sql = sql.replace(/^\s+|\s+$/gm, '');
sql = sql.replace("$from", "'" + queryStart + "'");
sql = sql.replace("$begin", "'" + queryStart + "'");
sql = sql.replace("$to", "'" + queryEnd + "'");
sql = sql.replace("$end", "'" + queryEnd + "'");
sql = sql.replace("$interval", intervalMs);
return sql;
}
}, {
key: 'query',
value: function query(options) {
var querys = new Array;
for (var i = 0; i < options.targets.length; ++i) {
var query = new Object;
query.refId = options.targets[i].refId;
query.alias = options.targets[i].alias;
if (query.alias == null || query.alias == undefined) {
query.alias = "";
}
//query.sql = this.generateSql(options.targets[i].sql, options.range.raw.from, options.range.raw.to, options.intervalMs);
query.sql = this.generateSql(options.targets[i].sql, options.range.from.toISOString(), options.range.to.toISOString(), options.intervalMs);
console.log(query.sql);
querys.push(query);
}
if (querys.length <= 0) {
return this.q.when({ data: [] });
}
return this.doRequest({
url: this.url + '/grafana/query',
data: querys,
method: 'POST'
});
}
}, {
key: 'testDatasource',
value: function testDatasource() {
return this.doRequest({
url: this.url + '/grafana/heartbeat',
method: 'GET'
}).then(function (response) {
if (response.status === 200) {
return { status: "success", message: "TDengine Data source is working", title: "Success" };
}
});
}
}, {
key: 'doRequest',
value: function doRequest(options) {
options.headers = this.headers;
//console.log(options);
return this.backendSrv.datasourceRequest(options);
}
}]);
return GenericDatasource;
}());
_export('GenericDatasource', GenericDatasource);
}
};
});
//# sourceMappingURL=datasource.js.map

View File

@ -0,0 +1,96 @@
TDengine Datasource - build by Taosdata Inc. www.taosdata.com
TDengine backend server implement 2 urls:
* `/heartbeat` return 200 ok. Used for "Test connection" on the datasource config page.
* `/query` return data based on input sqls.
## Installation
To install this plugin:
Copy the data source you want to /var/lib/grafana/plugins/. Then restart grafana-server. The new data source should now be available in the data source type dropdown in the Add Data Source View.
```
cp -r <tdengine-extrach-dir>/connector/grafana/tdengine /var/lib/grafana/plugins/
sudo service grafana-server restart
```
### Query API
Example request
``` javascript
[{
"refId": "A",
"alias": "taosd-memory",
"sql": "select avg(mem_taosd) from sys.dn where ts > now-5m and ts < now interval(500a)"
},
{
"refId": "B",
"alias": "system-memory",
"sql": "select avg(mem_system) from sys.dn where ts > now-5m and ts < now interval(500a)"
}]
```
Example response
``` javascript
[{
"datapoints": [
[206.488281, 1538137825000],
[206.488281, 1538137855000],
[206.488281, 1538137885500],
[210.609375, 1538137915500],
[210.867188, 1538137945500]
],
"refId": "A",
"target": "taosd-memory"
},
{
"datapoints": [
[2910.218750, 1538137825000],
[2912.265625, 1538137855000],
[2912.437500, 1538137885500],
[2916.644531, 1538137915500],
[2917.066406, 1538137945500]
],
"refId": "B",
"target": "system-memory"
}]
```
### Heartbeat API
Example request
``` javascript
Get request /heartbeat
```
Example response
``` javascript
{
"message": "Grafana server receive a quest from you!"
}
```
### Dev setup
This plugin requires node 6.10.0
``` javascript
npm install -g yarn
yarn install
npm run build
```
### Import Dashboard
after login `http://localhost:3000 `, then you can import the tdengine demo dashboard to monitor the system metrics.
you can import the `dashboard/tdengine-grafana.json`:
![import_dashboard](dashboard/import_dashboard.png)
after finished import:
![import_dashboard](dashboard/tdengine_dashboard.png)

Binary file not shown.

After

Width:  |  Height:  |  Size: 98 KiB

Some files were not shown because too many files have changed in this diff Show More