[TD-3178]<feature> merge develop and TD-4556
|
@ -131,10 +131,10 @@ cmake .. -DCPUTYPE=mips64 && cmake --build .
|
|||
### On Windows platform
|
||||
|
||||
If you use the Visual Studio 2013, please open a command window by executing "cmd.exe".
|
||||
Please specify "x86_amd64" for 64 bits Windows or specify "x86" is for 32 bits Windows when you execute vcvarsall.bat.
|
||||
Please specify "amd64" for 64 bits Windows or specify "x86" is for 32 bits Windows when you execute vcvarsall.bat.
|
||||
```cmd
|
||||
mkdir debug && cd debug
|
||||
"C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\vcvarsall.bat" < x86_amd64 | x86 >
|
||||
"C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\vcvarsall.bat" < amd64 | x86 >
|
||||
cmake .. -G "NMake Makefiles"
|
||||
nmake
|
||||
```
|
||||
|
|
|
@ -122,10 +122,14 @@ IF (TD_LINUX)
|
|||
ADD_DEFINITIONS(-D_TD_NINGSI_60)
|
||||
MESSAGE(STATUS "set ningsi macro to true")
|
||||
ENDIF ()
|
||||
|
||||
SET(DEBUG_FLAGS "-O0 -g3 -DDEBUG")
|
||||
|
||||
IF (TD_MEMORY_SANITIZER)
|
||||
SET(DEBUG_FLAGS "-fsanitize=address -fsanitize=undefined -fno-sanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=null -fno-sanitize=alignment -static-libasan -O0 -g3 -DDEBUG")
|
||||
ELSE ()
|
||||
SET(DEBUG_FLAGS "-O0 -g3 -DDEBUG")
|
||||
ENDIF ()
|
||||
SET(RELEASE_FLAGS "-O3 -Wno-error")
|
||||
|
||||
|
||||
IF (${COVER} MATCHES "true")
|
||||
MESSAGE(STATUS "Test coverage mode, add extra flags")
|
||||
SET(GCC_COVERAGE_COMPILE_FLAGS "-fprofile-arcs -ftest-coverage")
|
||||
|
@ -144,7 +148,11 @@ IF (TD_DARWIN_64)
|
|||
ADD_DEFINITIONS(-DUSE_LIBICONV)
|
||||
MESSAGE(STATUS "darwin64 is defined")
|
||||
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -Wno-missing-braces -fPIC -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
|
||||
SET(DEBUG_FLAGS "-O0 -g3 -DDEBUG")
|
||||
IF (TD_MEMORY_SANITIZER)
|
||||
SET(DEBUG_FLAGS "-fsanitize=address -fsanitize=undefined -fno-sanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=null -fno-sanitize=alignment -O0 -g3 -DDEBUG")
|
||||
ELSE ()
|
||||
SET(DEBUG_FLAGS "-O0 -g3 -DDEBUG")
|
||||
ENDIF ()
|
||||
SET(RELEASE_FLAGS "-Og")
|
||||
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/cJson/inc)
|
||||
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/lz4/inc)
|
||||
|
@ -162,7 +170,14 @@ IF (TD_WINDOWS)
|
|||
IF (MSVC AND (MSVC_VERSION GREATER_EQUAL 1900))
|
||||
SET(COMMON_FLAGS "${COMMON_FLAGS} /Wv:18")
|
||||
ENDIF ()
|
||||
SET(DEBUG_FLAGS "/fsanitize=thread /fsanitize=leak /fsanitize=memory /fsanitize=undefined /fsanitize=hwaddress /Zi /W3 /GL")
|
||||
|
||||
IF (TD_MEMORY_SANITIZER)
|
||||
MESSAGE("memory sanitizer detected as true")
|
||||
SET(DEBUG_FLAGS "/fsanitize=address /Zi /W3 /GL")
|
||||
ELSE ()
|
||||
MESSAGE("memory sanitizer detected as false")
|
||||
SET(DEBUG_FLAGS "/Zi /W3 /GL")
|
||||
ENDIF ()
|
||||
SET(RELEASE_FLAGS "/W0 /O3 /GL")
|
||||
ENDIF ()
|
||||
|
||||
|
@ -171,7 +186,7 @@ IF (TD_WINDOWS)
|
|||
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/regex)
|
||||
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/wepoll/inc)
|
||||
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/MsvcLibX/include)
|
||||
ENDIF ()
|
||||
ENDIF ()
|
||||
|
||||
IF (TD_WINDOWS_64)
|
||||
ADD_DEFINITIONS(-D_M_X64)
|
||||
|
|
|
@ -83,3 +83,8 @@ SET(TD_BUILD_JDBC TRUE)
|
|||
IF (${BUILD_JDBC} MATCHES "false")
|
||||
SET(TD_BUILD_JDBC FALSE)
|
||||
ENDIF ()
|
||||
|
||||
SET(TD_MEMORY_SANITIZER FALSE)
|
||||
IF (${MEMORY_SANITIZER} MATCHES "true")
|
||||
SET(TD_MEMORY_SANITIZER TRUE)
|
||||
ENDIF ()
|
||||
|
|
|
@ -32,7 +32,7 @@ ELSEIF (TD_WINDOWS)
|
|||
#INSTALL(TARGETS taos RUNTIME DESTINATION driver)
|
||||
#INSTALL(TARGETS shell RUNTIME DESTINATION .)
|
||||
IF (TD_MVN_INSTALLED)
|
||||
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.29.jar DESTINATION connector/jdbc)
|
||||
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.30.jar DESTINATION connector/jdbc)
|
||||
ENDIF ()
|
||||
ELSEIF (TD_DARWIN)
|
||||
SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh")
|
||||
|
|
|
@ -15,6 +15,7 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专
|
|||
* [命令行程序TAOS](/getting-started#console):访问TDengine的简便方式
|
||||
* [极速体验](/getting-started#demo):运行示例程序,快速体验高效的数据插入、查询
|
||||
* [支持平台列表](/getting-started#platforms):TDengine服务器和客户端支持的平台列表
|
||||
* [Kubenetes部署](https://taosdata.github.io/TDengine-Operator/zh/index.html):TDengine在Kubenetes环境进行部署的详细说明
|
||||
|
||||
## [整体架构](/architecture)
|
||||
|
||||
|
|
|
@ -176,7 +176,7 @@ TDengine 分布式架构的逻辑结构图如下:
|
|||
|
||||
**通讯方式:**TDengine系统的各个数据节点之间,以及应用驱动与各数据节点之间的通讯是通过TCP/UDP进行的。因为考虑到物联网场景,数据写入的包一般不大,因此TDengine 除采用TCP做传输之外,还采用UDP方式,因为UDP 更加高效,而且不受连接数的限制。TDengine实现了自己的超时、重传、确认等机制,以确保UDP的可靠传输。对于数据量不到15K的数据包,采取UDP的方式进行传输,超过15K的,或者是查询类的操作,自动采取TCP的方式进行传输。同时,TDengine根据配置和数据包,会自动对数据进行压缩/解压缩,数字签名/认证等处理。对于数据节点之间的数据复制,只采用TCP方式进行数据传输。
|
||||
|
||||
**FQDN配置**:一个数据节点有一个或多个FQDN,可以在系统配置文件taos.cfg通过参数“fqdn"进行指定,如果没有指定,系统将自动获取计算机的hostname作为其FQDN。如果节点没有配置FQDN,可以直接将该节点的配置参数fqdn设置为它的IP地址。但不建议使用IP,因为IP地址可变,一旦变化,将让集群无法正常工作。一个数据节点的EP(End Point)由FQDN + Port组成。采用FQDN,需要保证DNS服务正常工作,或者在节点以及应用所在的节点配置好hosts文件。
|
||||
**FQDN配置**:一个数据节点有一个或多个FQDN,可以在系统配置文件taos.cfg通过参数“fqdn"进行指定,如果没有指定,系统将自动获取计算机的hostname作为其FQDN。如果节点没有配置FQDN,可以直接将该节点的配置参数fqdn设置为它的IP地址。但不建议使用IP,因为IP地址可变,一旦变化,将让集群无法正常工作。一个数据节点的EP(End Point)由FQDN + Port组成。采用FQDN,需要保证DNS服务正常工作,或者在节点以及应用所在的节点配置好hosts文件。另外,这个参数值的长度需要控制在 96 个字符以内。
|
||||
|
||||
**端口配置:**一个数据节点对外的端口由TDengine的系统配置参数serverPort决定,对集群内部通讯的端口是serverPort+5。集群内数据节点之间的数据复制操作还占有一个TCP端口,是serverPort+10. 为支持多线程高效的处理UDP数据,每个对内和对外的UDP连接,都需要占用5个连续的端口。因此一个数据节点总的端口范围为serverPort到serverPort + 10,总共11个TCP/UDP端口。(另外还可能有 RESTful、Arbitrator 所使用的端口,那样的话就一共是 13 个。)使用时,需要确保防火墙将这些端口打开,以备使用。每个数据节点可以配置不同的serverPort。(详细的端口情况请参见 [TDengine 2.0 端口说明](https://www.taosdata.com/cn/documentation/faq#port))
|
||||
|
||||
|
|
|
@ -325,10 +325,12 @@ for (int i = 0; i < numOfRows; i++){
|
|||
}
|
||||
s.setString(2, s2, 10);
|
||||
|
||||
// AddBatch 之后,可以再设定新的表名、TAGS、VALUES 取值,这样就能实现一次执行向多个数据表写入:
|
||||
// AddBatch 之后,缓存并未清空。为避免混乱,并不推荐在 ExecuteBatch 之前再次绑定新一批的数据:
|
||||
s.columnDataAddBatch();
|
||||
// 执行语句:
|
||||
// 执行绑定数据后的语句:
|
||||
s.columnDataExecuteBatch();
|
||||
// 执行语句后清空缓存。在清空之后,可以复用当前的对象,绑定新的一批数据(可以是新表名、新 TAGS 值、新 VALUES 值):
|
||||
s.columnDataClearBatch();
|
||||
// 执行完毕,释放资源:
|
||||
s.columnDataCloseBatch();
|
||||
```
|
||||
|
@ -361,6 +363,7 @@ public void setShort(int columnIndex, ArrayList<Short> list) throws SQLException
|
|||
public void setString(int columnIndex, ArrayList<String> list, int size) throws SQLException
|
||||
public void setNString(int columnIndex, ArrayList<String> list, int size) throws SQLException
|
||||
```
|
||||
其中 setString 和 setNString 都要求用户在 size 参数里声明表定义中对应列的列宽。
|
||||
|
||||
### <a class="anchor" id="subscribe"></a>订阅
|
||||
|
||||
|
|
|
@ -301,12 +301,14 @@ TDengine的异步API均采用非阻塞调用模式。应用程序可以用多线
|
|||
2. 调用 `taos_stmt_prepare` 解析 INSERT 语句;
|
||||
3. 如果 INSERT 语句中预留了表名但没有预留 TAGS,那么调用 `taos_stmt_set_tbname` 来设置表名;
|
||||
4. 如果 INSERT 语句中既预留了表名又预留了 TAGS(例如 INSERT 语句采取的是自动建表的方式),那么调用 `taos_stmt_set_tbname_tags` 来设置表名和 TAGS 的值;
|
||||
5. 调用 `taos_stmt_bind_param_batch` 以多列的方式设置 VALUES 的值;
|
||||
5. 调用 `taos_stmt_bind_param_batch` 以多列的方式设置 VALUES 的值,或者调用 `taos_stmt_bind_param` 以单行的方式设置 VALUES 的值;
|
||||
6. 调用 `taos_stmt_add_batch` 把当前绑定的参数加入批处理;
|
||||
7. 可以重复第 3~6 步,为批处理加入更多的数据行;
|
||||
8. 调用 `taos_stmt_execute` 执行已经准备好的批处理指令;
|
||||
9. 执行完毕,调用 `taos_stmt_close` 释放所有资源。
|
||||
|
||||
说明:如果 `taos_stmt_execute` 执行成功,假如不需要改变 SQL 语句的话,那么是可以复用 `taos_stmt_prepare` 的解析结果,直接进行第 3~6 步绑定新数据的。但如果执行出错,那么并不建议继续在当前的环境上下文下继续工作,而是建议释放资源,然后从 `taos_stmt_init` 步骤重新开始。
|
||||
|
||||
除 C/C++ 语言外,TDengine 的 Java 语言 JNI Connector 也提供参数绑定接口支持,具体请另外参见:[参数绑定接口的 Java 用法](https://www.taosdata.com/cn/documentation/connector/java#stmt-java)。
|
||||
|
||||
接口相关的具体函数如下(也可以参考 [apitest.c](https://github.com/taosdata/TDengine/blob/develop/tests/examples/c/apitest.c) 文件中使用对应函数的方式):
|
||||
|
@ -338,17 +340,17 @@ typedef struct TAOS_BIND {
|
|||
|
||||
- `int taos_stmt_set_tbname(TAOS_STMT* stmt, const char* name)`
|
||||
|
||||
(2.1.1.0 版本新增)
|
||||
(2.1.1.0 版本新增,仅支持用于替换 INSERT 语句中的参数值)
|
||||
当 SQL 语句中的表名使用了 `?` 占位时,可以使用此函数绑定一个具体的表名。
|
||||
|
||||
- `int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags)`
|
||||
|
||||
(2.1.2.0 版本新增)
|
||||
(2.1.2.0 版本新增,仅支持用于替换 INSERT 语句中的参数值)
|
||||
当 SQL 语句中的表名和 TAGS 都使用了 `?` 占位时,可以使用此函数绑定具体的表名和具体的 TAGS 取值。最典型的使用场景是使用了自动建表功能的 INSERT 语句(目前版本不支持指定具体的 TAGS 列)。tags 参数中的列数量需要与 SQL 语句中要求的 TAGS 数量完全一致。
|
||||
|
||||
- `int taos_stmt_bind_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind)`
|
||||
|
||||
(2.1.1.0 版本新增)
|
||||
(2.1.1.0 版本新增,仅支持用于替换 INSERT 语句中的参数值)
|
||||
以多列的方式传递待绑定的数据,需要保证这里传递的数据列的顺序、列的数量与 SQL 语句中的 VALUES 参数完全一致。TAOS_MULTI_BIND 的具体定义如下:
|
||||
|
||||
```c
|
||||
|
@ -378,6 +380,11 @@ typedef struct TAOS_MULTI_BIND {
|
|||
|
||||
执行完毕,释放所有资源。
|
||||
|
||||
- `char * taos_stmt_errstr(TAOS_STMT *stmt)`
|
||||
|
||||
(2.1.3.0 版本新增)
|
||||
用于在其他 stmt API 返回错误(返回错误码或空指针)时获取错误信息。
|
||||
|
||||
### 连续查询接口
|
||||
|
||||
TDengine提供时间驱动的实时流式计算API。可以每隔一指定的时间段,对一张或多张数据库的表(数据流)进行各种实时聚合计算操作。操作简单,仅有打开、关闭流的API。具体如下:
|
||||
|
|
|
@ -99,7 +99,7 @@ taosd -C
|
|||
下面仅仅列出一些重要的配置参数,更多的参数请看配置文件里的说明。各个参数的详细介绍及作用请看前述章节,而且这些参数的缺省配置都是工作的,一般无需设置。**注意:配置修改后,需要重启*taosd*服务才能生效。**
|
||||
|
||||
- firstEp: taosd启动时,主动连接的集群中首个dnode的end point, 默认值为localhost:6030。
|
||||
- fqdn:数据节点的FQDN,缺省为操作系统配置的第一个hostname。如果习惯IP地址访问,可设置为该节点的IP地址。
|
||||
- fqdn:数据节点的FQDN,缺省为操作系统配置的第一个hostname。如果习惯IP地址访问,可设置为该节点的IP地址。这个参数值的长度需要控制在 96 个字符以内。
|
||||
- serverPort:taosd启动后,对外服务的端口号,默认值为6030。(RESTful服务使用的端口号是在此基础上+11,即默认值为6041。)
|
||||
- dataDir: 数据文件目录,所有的数据文件都将写入该目录。默认值:/var/lib/taos。
|
||||
- logDir:日志文件目录,客户端和服务器的运行日志文件将写入该目录。默认值:/var/log/taos。
|
||||
|
|
|
@ -1306,7 +1306,7 @@ SELECT AVG(current), MAX(current), LEASTSQUARES(current, start_val, step_val), P
|
|||
- 数据库名最大长度为 32
|
||||
- 表名最大长度为 192,每行数据最大长度 16k 个字符(注意:数据行内每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)
|
||||
- 列名最大长度为 64,最多允许 1024 列,最少需要 2 列,第一列必须是时间戳
|
||||
- 标签最多允许 128 个,可以 1 个,标签总长度不超过 16k 个字符
|
||||
- 标签名最大长度为 64,最多允许 128 个,可以 1 个,一个表中标签值的总长度不超过 16k 个字符
|
||||
- SQL 语句最大长度 65480 个字符,但可通过系统配置参数 maxSQLLength 修改,最长可配置为 1M
|
||||
- SELECT 语句的查询结果,最多允许返回 1024 列(语句中的函数调用可能也会占用一些列空间),超限时需要显式指定较少的返回数据列,以避免语句执行报错。
|
||||
- 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制
|
||||
|
|
|
@ -0,0 +1,141 @@
|
|||
# TDengine Documentation
|
||||
|
||||
TDengine is a highly efficient platform to store, query, and analyze time-series data. It is specially designed and optimized for IoT, Internet of Vehicles, Industrial IoT, IT Infrastructure and Application Monitoring, etc. It works like a relational database, such as MySQL, but you are strongly encouraged to read through the following documentation before you experience it, especially the Data Model and Data Modeling sections. In addition to this document, you should also download and read our technology white paper. For the older TDengine version 1.6 documentation, please click here.
|
||||
|
||||
## [TDengine Introduction](/evaluation)
|
||||
|
||||
* [TDengine Introduction and Features](/evaluation#intro)
|
||||
* [TDengine Use Scenes](/evaluation#scenes)
|
||||
* [TDengine Performance Metrics and Verification]((/evaluation#))
|
||||
|
||||
## [Getting Started](/getting-started)
|
||||
|
||||
* [Quickly Install](/getting-started#install): install via source code/package / Docker within seconds
|
||||
|
||||
- [Easy to Launch](/getting-started#start): start / stop TDengine with systemctl
|
||||
- [Command-line](/getting-started#console) : an easy way to access TDengine server
|
||||
- [Experience Lightning Speed](/getting-started#demo): running a demo, inserting/querying data to experience faster speed
|
||||
- [List of Supported Platforms](/getting-started#platforms): a list of platforms supported by TDengine server and client
|
||||
|
||||
## [Overall Architecture](/architecture)
|
||||
|
||||
- [Data Model](/architecture#model): relational database model, but one table for one device with static tags
|
||||
- [Cluster and Primary Logical Unit](/architecture#cluster): Take advantage of NoSQL, support scale-out and high-reliability
|
||||
- [Storage Model and Data Partitioning/Sharding](/architecture#sharding): tag data will be separated from time-series data, segmented by vnode and time
|
||||
- [Data Writing and Replication Process](/architecture#replication): records received are written to WAL, cached, with acknowledgement is sent back to client, while supporting multi-replicas
|
||||
- [Caching and Persistence](/architecture#persistence): latest records are cached in memory, but are written in columnar format with an ultra-high compression ratio
|
||||
- [Data Query](/architecture#query): support various functions, time-axis aggregation, interpolation, and multi-table aggregation
|
||||
|
||||
## [Data Modeling](/model)
|
||||
|
||||
- [Create a Library](/model#create-db): create a library for all data collection points with similar features
|
||||
- [Create a Super Table(STable)](/model#create-stable): create a STable for all data collection points with the same type
|
||||
- [Create a Table](/model#create-table): use STable as the template, to create a table for each data collecting point
|
||||
|
||||
## [TAOS SQL](/taos-sql)
|
||||
|
||||
- [Data Types](/taos-sql#data-type): support timestamp, int, float, nchar, bool, and other types
|
||||
- [Database Management](/taos-sql#management): add, drop, check databases
|
||||
- [Table Management](/taos-sql#table): add, drop, check, alter tables
|
||||
- [STable Management](/taos-sql#super-table): add, drop, check, alter STables
|
||||
- [Tag Management](/taos-sql#tags): add, drop, alter tags
|
||||
- [Inserting Records](/taos-sql#insert): support to write single/multiple items per table, multiple items across tables, and support to write historical data
|
||||
- [Data Query](/taos-sql#select): support time segment, value filtering, sorting, manual paging of query results, etc
|
||||
- [SQL Function](/taos-sql#functions): support various aggregation functions, selection functions, and calculation functions, such as avg, min, diff, etc
|
||||
- [Time Dimensions Aggregation](/taos-sql#aggregation): aggregate and reduce the dimension after cutting table data by time segment
|
||||
- [Boundary Restrictions](/taos-sql#limitation): restrictions for the library, table, SQL, and others
|
||||
- [Error Code](/taos-sql/error-code): TDengine 2.0 error codes and corresponding decimal codes
|
||||
|
||||
## [Efficient Data Ingestion](/insert)
|
||||
|
||||
- [SQL Ingestion](/insert#sql): write one or multiple records into one or multiple tables via SQL insert command
|
||||
- [Prometheus Ingestion](/insert#prometheus): Configure Prometheus to write data directly without any code
|
||||
- [Telegraf Ingestion](/insert#telegraf): Configure Telegraf to write collected data directly without any code
|
||||
- [EMQ X Broker](/insert#emq): Configure EMQ X to write MQTT data directly without any code
|
||||
- [HiveMQ Broker](/insert#hivemq): Configure HiveMQ to write MQTT data directly without any code
|
||||
|
||||
## [Efficient Data Querying](/queries)
|
||||
|
||||
- [Main Query Features](/queries#queries): support various standard functions, setting filter conditions, and querying per time segment
|
||||
- [Multi-table Aggregation Query](/queries#aggregation): use STable and set tag filter conditions to perform efficient aggregation queries
|
||||
- [Downsampling to Query Value](/queries#sampling): aggregate data in successive time windows, support interpolation
|
||||
|
||||
## [Advanced Features](/advanced-features)
|
||||
|
||||
- [Continuous Query](/advanced-features#continuous-query): Based on sliding windows, the data stream is automatically queried and calculated at regular intervals
|
||||
- [Data Publisher/Subscriber](/advanced-features#subscribe): subscribe to the newly arrived data like a typical messaging system
|
||||
- [Cache](/advanced-features#cache): the newly arrived data of each device/table will always be cached
|
||||
- [Alarm Monitoring](/advanced-features#alert): automatically monitor out-of-threshold data, and actively push it based-on configuration rules
|
||||
|
||||
## [Connector](/connector)
|
||||
|
||||
- [C/C++ Connector](/connector#c-cpp): primary method to connect to TDengine server through libtaos client library
|
||||
- [Java Connector(JDBC)](/connector/java): driver for connecting to the server from Java applications using the JDBC API
|
||||
- [Python Connector](/connector#python): driver for connecting to TDengine server from Python applications
|
||||
- [RESTful Connector](/connector#restful): a simple way to interact with TDengine via HTTP
|
||||
- [Go Connector](/connector#go): driver for connecting to TDengine server from Go applications
|
||||
- [Node.js Connector](/connector#nodejs): driver for connecting to TDengine server from Node.js applications
|
||||
- [C# Connector](/connector#csharp): driver for connecting to TDengine server from C# applications
|
||||
- [Windows Client](https://www.taosdata.com/blog/2019/07/26/514.html): compile your own Windows client, which is required by various connectors on the Windows environment
|
||||
|
||||
## [Connections with Other Tools](/connections)
|
||||
|
||||
- [Grafana](/connections#grafana): query the data saved in TDengine and provide visualization
|
||||
- [Matlab](/connections#matlab): access data stored in TDengine server via JDBC configured within Matlab
|
||||
- [R](/connections#r): access data stored in TDengine server via JDBC configured within R
|
||||
- [IDEA Database](https://www.taosdata.com/blog/2020/08/27/1767.html): use TDengine visually through IDEA Database Management Tool
|
||||
|
||||
## [Installation and Management of TDengine Cluster](/cluster)
|
||||
|
||||
- [Preparation](/cluster#prepare): important considerations before deploying TDengine for production usage
|
||||
- [Create Your First Node](/cluster#node-one): simple to follow the quick setup
|
||||
- [Create Subsequent Nodes](/cluster#node-other): configure taos.cfg for new nodes to add more to the existing cluster
|
||||
- [Node Management](/cluster#management): add, delete, and check nodes in the cluster
|
||||
- [High-availability of Vnode](/cluster#high-availability): implement high-availability of Vnode through multi-replicas
|
||||
- [Mnode Management](/cluster#mnode): automatic system creation without any manual intervention
|
||||
- [Load Balancing](/cluster#load-balancing): automatically performed once the number of nodes or load changes
|
||||
- [Offline Node Processing](/cluster#offline): any node that offline for more than a certain period will be removed from the cluster
|
||||
- [Arbitrator](/cluster#arbitrator): used in the case of an even number of replicas to prevent split-brain
|
||||
|
||||
## [TDengine Operation and Maintenance](/administrator)
|
||||
|
||||
- [Capacity Planning](/administrator#planning): Estimating hardware resources based on scenarios
|
||||
- [Fault Tolerance and Disaster Recovery](/administrator#tolerance): set the correct WAL and number of data replicas
|
||||
- [System Configuration](/administrator#config): port, cache size, file block size, and other system configurations
|
||||
- [User Management](/administrator#user): add/delete TDengine users, modify user password
|
||||
- [Import Data](/administrator#import): import data into TDengine from either script or CSV file
|
||||
- [Export Data](/administrator#export): export data either from TDengine shell or from the taosdump tool
|
||||
- [System Monitor](/administrator#status): monitor the system connections, queries, streaming calculation, logs, and events
|
||||
- [File Directory Structure](/administrator#directories): directories where TDengine data files and configuration files located
|
||||
- [Parameter Restrictions and Reserved Keywords](/administrator#keywords): TDengine’s list of parameter restrictions and reserved keywords
|
||||
|
||||
## TDengine Technical Design
|
||||
|
||||
- [System Module](/architecture/taosd): taosd functions and modules partitioning
|
||||
- [Data Replication](/architecture/replica): support real-time synchronous/asynchronous replication, to ensure high-availability of the system
|
||||
- [Technical Blog](https://www.taosdata.com/cn/blog/?categories=3): More technical analysis and architecture design articles
|
||||
|
||||
## Common Tools
|
||||
|
||||
- [TDengine sample import tools](https://www.taosdata.com/blog/2020/01/18/1166.html)
|
||||
- [TDengine performance comparison test tools](https://www.taosdata.com/blog/2020/01/18/1166.html)
|
||||
- [Use TDengine visually through IDEA Database Management Tool](https://www.taosdata.com/blog/2020/08/27/1767.html)
|
||||
|
||||
## [Performance: TDengine vs Others
|
||||
|
||||
- [Performance: TDengine vs InfluxDB with InfluxDB’s open-source performance testing tool](https://www.taosdata.com/blog/2020/01/13/1105.html)
|
||||
- [Performance: TDengine vs OpenTSDB](https://www.taosdata.com/blog/2019/08/21/621.html)
|
||||
- [Performance: TDengine vs Cassandra](https://www.taosdata.com/blog/2019/08/14/573.html)
|
||||
- [Performance: TDengine vs InfluxDB](https://www.taosdata.com/blog/2019/07/19/419.html)
|
||||
- [Performance Test Reports of TDengine vs InfluxDB/OpenTSDB/Cassandra/MySQL/ClickHouse](https://www.taosdata.com/downloads/TDengine_Testing_Report_cn.pdf)
|
||||
|
||||
## More on IoT Big Data
|
||||
|
||||
- [Characteristics of IoT and Industry Internet Big Data](https://www.taosdata.com/blog/2019/07/09/characteristics-of-iot-big-data/)
|
||||
- [Features and Functions of IoT Big Data platforms](https://www.taosdata.com/blog/2019/07/29/542.html)
|
||||
- [Why don’t General Big Data Platforms Fit IoT Scenarios?](https://www.taosdata.com/blog/2019/07/09/why-does-the-general-big-data-platform-not-fit-iot-data-processing/)
|
||||
- [Why TDengine is the best choice for IoT, Internet of Vehicles, and Industry Internet Big Data platforms?](https://www.taosdata.com/blog/2019/07/09/why-tdengine-is-the-best-choice-for-iot-big-data-processing/)
|
||||
|
||||
## FAQ
|
||||
|
||||
- [FAQ: Common questions and answers](/faq)
|
|
@ -0,0 +1,68 @@
|
|||
# TDengine Introduction
|
||||
|
||||
## <a class="anchor" id="intro"></a> About TDengine
|
||||
|
||||
TDengine is an innovative Big Data processing product launched by Taos Data in the face of the fast-growing Internet of Things (IoT) Big Data market and technical challenges. It does not rely on any third-party software, nor does it optimize or package any open-source database or stream computing product. Instead, it is a product independently developed after absorbing the advantages of many traditional relational databases, NoSQL databases, stream computing engines, message queues, and other software. TDengine has its own unique Big Data processing advantages in time-series space.
|
||||
|
||||
One of the modules of TDengine is the time-series database. However, in addition to this, to reduce the complexity of research and development and the difficulty of system operation, TDengine also provides functions such as caching, message queuing, subscription, stream computing, etc. TDengine provides a full-stack technical solution for the processing of IoT and Industrial Internet BigData. It is an efficient and easy-to-use IoT Big Data platform. Compared with typical Big Data platforms such as Hadoop, TDengine has the following distinct characteristics:
|
||||
|
||||
- **Performance improvement over 10 times**: An innovative data storage structure is defined, with each single core can process at least 20,000 requests per second, insert millions of data points, and read more than 10 million data points, which is more than 10 times faster than other existing general database.
|
||||
- **Reduce the cost of hardware or cloud services to 1/5**: Due to its ultra-performance, TDengine’s computing resources consumption is less than 1/5 of other common Big Data solutions; through columnar storage and advanced compression algorithms, the storage consumption is less than 1/10 of other general databases.
|
||||
- **Full-stack time-series data processing engine**: Integrate database, message queue, cache, stream computing, and other functions, and the applications do not need to integrate with software such as Kafka/Redis/HBase/Spark/HDFS, thus greatly reducing the complexity cost of application development and maintenance.
|
||||
- **Powerful analysis functions**: Data from ten years ago or one second ago, can all be queried based on a specified time range. Data can be aggregated on a timeline or multiple devices. Ad-hoc queries can be made at any time through Shell, Python, R, and Matlab.
|
||||
- **Seamless connection with third-party tools**: Integration with Telegraf, Grafana, EMQ, HiveMQ, Prometheus, Matlab, R, etc. without even one single line of code. OPC, Hadoop, Spark, etc. will be supported in the future, and more BI tools will be seamlessly connected to.
|
||||
- **Zero operation cost & zero learning cost**: Installing clusters is simple and quick, with real-time backup built-in, and no need to split libraries or tables. Similar to standard SQL, TDengine can support RESTful, Python/Java/C/C + +/C#/Go/Node.js, and similar to MySQL with zero learning cost.
|
||||
|
||||
With TDengine, the total cost of ownership of typical IoT, Internet of Vehicles, and Industrial Internet Big Data platforms can be greatly reduced. However, it should be pointed out that due to making full use of the characteristics of IoT time-series data, TDengine cannot be used to process general data from web crawlers, microblogs, WeChat, e-commerce, ERP, CRM, and other sources.
|
||||
|
||||

|
||||
|
||||
<center>Figure 1. TDengine Technology Ecosystem</center>
|
||||
|
||||
## <a class="anchor" id="scenes"></a>Overall Scenarios of TDengine
|
||||
|
||||
As an IoT Big Data platform, the typical application scenarios of TDengine are mainly presented in the IoT category, with users having a certain amount of data. The following sections of this document are mainly aimed at IoT-relevant systems. Other systems, such as CRM, ERP, etc., are beyond the scope of this article.
|
||||
|
||||
### Characteristics and Requirements of Data Sources
|
||||
|
||||
From the perspective of data sources, designers can analyze the applicability of TDengine in target application systems as following.
|
||||
|
||||
| **Data Source Characteristics and Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
|
||||
| -------------------------------------------------------- | ------------------ | ----------------------- | ------------------- | :----------------------------------------------------------- |
|
||||
| A huge amount of total data | | | √ | TDengine provides excellent scale-out functions in terms of capacity, and has a storage structure matching high compression ratio to achieve the best storage efficiency in the industry. |
|
||||
| Data input velocity is occasionally or continuously huge | | | √ | TDengine's performance is much higher than other similar products. It can continuously process a large amount of input data in the same hardware environment, and provide a performance evaluation tool that can easily run in the user environment. |
|
||||
| A huge amount of data sources | | | √ | TDengine is designed to include optimizations specifically for a huge amount of data sources, such as data writing and querying, which is especially suitable for efficiently processing massive (tens of millions or more) data sources. |
|
||||
|
||||
### System Architecture Requirements
|
||||
|
||||
| **System Architecture Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
|
||||
| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ |
|
||||
| Require a simple and reliable system architecture | | | √ | TDengine's system architecture is very simple and reliable, with its own message queue, cache, stream computing, monitoring and other functions, and no need to integrate any additional third-party products. |
|
||||
| Require fault-tolerance and high-reliability | | | √ | TDengine has cluster functions to automatically provide high-reliability functions such as fault tolerance and disaster recovery. |
|
||||
| Standardization specifications | | | √ | TDengine uses standard SQL language to provide main functions and follow standardization specifications. |
|
||||
|
||||
### System Function Requirements
|
||||
|
||||
| | | | | |
|
||||
| ----------------------------------------------------- | ------------------ | -------------------- | ------------------- | ------------------------------------------------------------ |
|
||||
| **System Function Requirements** | **Not Applicable** | **Might Applicable** | **Very Applicable** | **Description** |
|
||||
| Require completed data processing algorithms built-in | | √ | | TDengine implements various general data processing algorithms, but has not properly handled all requirements of different industries, so special types of processing shall be processed at the application level. |
|
||||
| Require a huge amount of crosstab queries | | √ | | This type of processing should be handled more by relational database systems, or TDengine and relational database systems should fit together to implement system functions. |
|
||||
|
||||
### System Performance Requirements
|
||||
|
||||
| | | | | |
|
||||
| -------------------------------------------- | ------------------ | -------------------- | ------------------- | ------------------------------------------------------------ |
|
||||
| **System Performance Requirements** | **Not Applicable** | **Might Applicable** | **Very Applicable** | **Description** |
|
||||
| Require larger total processing capacity | | | √ | TDengine’s cluster functions can easily improve processing capacity via multi-server-cooperating. |
|
||||
| Require high-speed data processing | | | √ | TDengine’s storage and data processing are designed to be optimized for IoT, can generally improve the processing speed by multiple times than other similar products. |
|
||||
| Require fast processing of fine-grained data | | | √ | TDengine has achieved the same level of performance with relational and NoSQL data processing systems. |
|
||||
|
||||
### System Maintenance Requirements
|
||||
|
||||
| | | | | |
|
||||
| -------------------------------------------- | ------------------ | -------------------- | ------------------- | ------------------------------------------------------------ |
|
||||
| **System Maintenance Requirements** | **Not Applicable** | **Might Applicable** | **Very Applicable** | **Description** |
|
||||
| Require system with high-reliability | | | √ | TDengine has a very robust and reliable system architecture to implement simple and convenient daily operation with streamlined experiences for operators, thus human errors and accidents are eliminated to the greatest extent. |
|
||||
| Require controllable operation learning cost | | | √ | As above. |
|
||||
| Require abundant talent supply | √ | | | As a new-generation product, it’s still difficult to find talents with TDengine experiences from market. However, the learning cost is low. As the vendor, we also provide extensive operation training and counselling services. |
|
|
@ -0,0 +1,277 @@
|
|||
# Quick Start
|
||||
|
||||
## <a class="anchor" id="install"></a>Quick Install
|
||||
|
||||
TDegnine software consists of 3 parts: server, client, and alarm module. At the moment, TDengine server only runs on Linux (Windows, mac OS and more OS supports will come soon), but client can run on either Windows or Linux. TDengine client can be installed and run on Windows or Linux. Applications based-on any OSes can all connect to server taosd via a RESTful interface. About CPU, TDegnine supports X64/ARM64/MIPS64/Alpha64, and ARM32、RISC-V, other more CPU architectures will be supported soon. You can set up and install TDengine server either from the [source code](https://www.taosdata.com/en/getting-started/#Install-from-Source) or the [packages](https://www.taosdata.com/en/getting-started/#Install-from-Package).
|
||||
|
||||
### <a class="anchor" id="source-install"></a>Install from Source
|
||||
|
||||
Please visit our [TDengine github page](https://github.com/taosdata/TDengine) for instructions on installation from the source code.
|
||||
|
||||
### Install from Docker Container
|
||||
|
||||
Please visit our [TDengine Official Docker Image: Distribution, Downloading, and Usage](https://www.taosdata.com/blog/2020/05/13/1509.html).
|
||||
|
||||
### <a class="anchor" id="package-install"></a>Install from Package
|
||||
|
||||
It’s extremely easy to install for TDegnine, which takes only a few seconds from downloaded to successful installed. The server installation package includes clients and connectors. We provide 3 installation packages, which you can choose according to actual needs:
|
||||
|
||||
Click [here](https://www.taosdata.com/cn/getting-started/#%E9%80%9A%E8%BF%87%E5%AE%89%E8%A3%85%E5%8C%85%E5%AE%89%E8%A3%85) to download the install package.
|
||||
|
||||
For more about installation process, please refer [TDengine Installation Packages: Install and Uninstall](https://www.taosdata.com/blog/2019/08/09/566.html), and [Video Tutorials](https://www.taosdata.com/blog/2020/11/11/1941.html).
|
||||
|
||||
## <a class="anchor" id="start"></a>Quick Launch
|
||||
|
||||
After installation, you can start the TDengine service by the `systemctl` command.
|
||||
|
||||
```bash
|
||||
```
|
||||
|
||||
$ systemctl start taosd
|
||||
|
||||
```
|
||||
```
|
||||
|
||||
Then check if the service is working now.
|
||||
|
||||
```bash
|
||||
```
|
||||
|
||||
$ systemctl status taosd
|
||||
|
||||
```
|
||||
```
|
||||
|
||||
If the service is running successfully, you can play around through TDengine shell `taos`.
|
||||
|
||||
**Note:**
|
||||
|
||||
- The `systemctl` command needs the **root** privilege. Use **sudo** if you are not the **root** user.
|
||||
- To get better product feedback and improve our solution, TDegnine will collect basic usage information, but you can modify the configuration parameter **telemetryReporting** in the system configuration file taos.cfg, and set it to 0 to turn it off.
|
||||
- TDegnine uses FQDN (usually hostname) as the node ID. In order to ensure normal operation, you need to set hostname for the server running taosd, and configure DNS service or hosts file for the machine running client application, to ensure the FQDN can be resolved.
|
||||
- TDengine supports installation on Linux systems with[ systemd ](https://en.wikipedia.org/wiki/Systemd)as the process service management, and uses `which systemctl` command to detect whether `systemd` packages exist in the system:
|
||||
- ```bash
|
||||
```
|
||||
|
||||
- $ which systemctl
|
||||
|
||||
- ```
|
||||
|
||||
If `systemd` is not supported in the system, TDengine service can also be launched via `/usr/local/taos/bin/taosd` manually.
|
||||
|
||||
## <a class="anchor" id="console"></a>TDengine Shell Command Line
|
||||
|
||||
To launch TDengine shell, the command line interface, in a Linux terminal, type:
|
||||
|
||||
```bash
|
||||
```
|
||||
|
||||
$ taos
|
||||
|
||||
```
|
||||
```
|
||||
|
||||
The welcome message is printed if the shell connects to TDengine server successfully, otherwise, an error message will be printed (refer to our [FAQ](https://www.taosdata.com/en/faq) page for troubleshooting the connection error). The TDengine shell prompt is:
|
||||
|
||||
```cmd
|
||||
```
|
||||
|
||||
taos>
|
||||
|
||||
```
|
||||
```
|
||||
|
||||
In the TDengine shell, you can create databases, create tables and insert/query data with SQL. Each query command ends with a semicolon. It works like MySQL, for example:
|
||||
|
||||
```mysql
|
||||
```
|
||||
|
||||
create database demo;
|
||||
|
||||
use demo;
|
||||
|
||||
create table t (ts timestamp, speed int);
|
||||
|
||||
insert into t values ('2019-07-15 00:00:00', 10);
|
||||
|
||||
insert into t values ('2019-07-15 01:00:00', 20);
|
||||
|
||||
select * from t;
|
||||
|
||||
ts | speed |
|
||||
|
||||
===================================
|
||||
|
||||
19-07-15 00:00:00.000| 10|
|
||||
|
||||
19-07-15 01:00:00.000| 20|
|
||||
|
||||
Query OK, 2 row(s) in set (0.001700s)
|
||||
|
||||
```
|
||||
```
|
||||
|
||||
Besides the SQL commands, the system administrator can check system status, add or delete accounts, and manage the servers.
|
||||
|
||||
### Shell Command Line Parameters
|
||||
|
||||
You can configure command parameters to change how TDengine shell executes. Some frequently used options are listed below:
|
||||
|
||||
- -c, --config-dir: set the configuration directory. It is */etc/taos* by default.
|
||||
- -h, --host: set the IP address of the server it will connect to. Default is localhost.
|
||||
- -s, --commands: set the command to run without entering the shell.
|
||||
- -u, -- user: user name to connect to server. Default is root.
|
||||
- -p, --password: password. Default is 'taosdata'.
|
||||
- -?, --help: get a full list of supported options.
|
||||
|
||||
Examples:
|
||||
|
||||
```bash
|
||||
```
|
||||
|
||||
$ taos -h 192.168.0.1 -s "use db; show tables;"
|
||||
|
||||
```
|
||||
```
|
||||
|
||||
### Run SQL Command Scripts
|
||||
|
||||
Inside TDengine shell, you can run SQL scripts in a file with source command.
|
||||
|
||||
```mysql
|
||||
```
|
||||
|
||||
taos> source <filename>;
|
||||
|
||||
```
|
||||
```
|
||||
|
||||
### Shell Tips
|
||||
|
||||
- Use up/down arrow key to check the command history
|
||||
- To change the default password, use "alter user" command
|
||||
- Use ctrl+c to interrupt any queries
|
||||
- To clean the schema of local cached tables, execute command `RESET QUERY CACHE`
|
||||
|
||||
## <a class="anchor" id="demo"></a>Experience TDengine’s Lightning Speed
|
||||
|
||||
After starting the TDengine server, you can execute the command `taosdemo` in the Linux terminal.
|
||||
|
||||
```bash
|
||||
```
|
||||
|
||||
$ taosdemo
|
||||
|
||||
```
|
||||
```
|
||||
|
||||
Using this command, a STable named `meters` will be created in the database `test` There are 10k tables under this stable, named from `t0` to `t9999`. In each table there are 100k rows of records, each row with columns (`f1`, `f2` and `f3`. The timestamp is from "2017-07-14 10:40:00 000" to "2017-07-14 10:41:39 999". Each table also has tags `areaid` and `loc`: `areaid` is set from 1 to 10, `loc` is set to "beijing" or "shanghai".
|
||||
|
||||
It takes about 10 minutes to execute this command. Once finished, 1 billion rows of records will be inserted.
|
||||
|
||||
In the TDengine client, enter sql query commands and then experience our lightning query speed.
|
||||
|
||||
- query total rows of records:
|
||||
|
||||
```mysql
|
||||
```
|
||||
|
||||
taos> select count(*) from test.meters;
|
||||
|
||||
```
|
||||
```
|
||||
|
||||
- query average, max and min of the total 1 billion records:
|
||||
|
||||
```mysql
|
||||
```
|
||||
|
||||
taos> select avg(f1), max(f2), min(f3) from test.meters;
|
||||
|
||||
```
|
||||
```
|
||||
|
||||
- query the number of records where loc="beijing":
|
||||
|
||||
```mysql
|
||||
```
|
||||
|
||||
taos> select count(*) from test.meters where loc="beijing";
|
||||
|
||||
```
|
||||
```
|
||||
|
||||
- query the average, max and min of total records where areaid=10:
|
||||
|
||||
```mysql
|
||||
```
|
||||
|
||||
taos> select avg(f1), max(f2), min(f3) from test.meters where areaid=10;
|
||||
|
||||
```
|
||||
```
|
||||
|
||||
- query the average, max, min from table t10 when aggregating over every 10s:
|
||||
|
||||
```mysql
|
||||
```
|
||||
|
||||
taos> select avg(f1), max(f2), min(f3) from test.t10 interval(10s);
|
||||
|
||||
```
|
||||
```
|
||||
|
||||
**Note**: you can run command `taosdemo` with many options, like number of tables, rows of records and so on. To know more about these options, you can execute `taosdemo --help` and then take a try using different options.
|
||||
|
||||
## Client and Alarm Module
|
||||
|
||||
If your client and server running on different machines, please install the client separately. Linux and Windows packages are provided:
|
||||
|
||||
- TDengine-client-2.0.10.0-Linux-x64.tar.gz(3.0M)
|
||||
- TDengine-client-2.0.10.0-Windows-x64.exe(2.8M)
|
||||
- TDengine-client-2.0.10.0-Windows-x86.exe(2.8M)
|
||||
|
||||
Linux package of Alarm Module is as following (please refer [How to Use Alarm Module](https://github.com/taosdata/TDengine/blob/master/alert/README_cn.md)):
|
||||
|
||||
- TDengine-alert-2.0.10.0-Linux-x64.tar.gz (8.1M)
|
||||
|
||||
## <a class="anchor" id="platforms"></a>List of Supported Platforms
|
||||
|
||||
List of platforms supported by TDengine server
|
||||
|
||||
| | **CentOS 6/7/8** | **Ubuntu 16/18/20** | **Other Linux** | UnionTech UOS | NeoKylin | LINX V60/V80 |
|
||||
| ------------------ | ---------------- | ------------------- | --------------- | ------------- | -------- | ------------ |
|
||||
| X64 | ● | ● | | ○ | ● | ● |
|
||||
| Raspberry ARM32 | | ● | ● | | | |
|
||||
| Loongson MIPS64 | | | ● | | | |
|
||||
| Kunpeng ARM64 | | ○ | ○ | | ● | |
|
||||
| SWCPU Alpha64 | | | ○ | ● | | |
|
||||
| FT ARM64 | | ○Ubuntu Kylin | | | | |
|
||||
| Hygon X64 | ● | ● | ● | ○ | ● | ● |
|
||||
| Rockchip ARM64/32 | | | ○ | | | |
|
||||
| Allwinner ARM64/32 | | | ○ | | | |
|
||||
| Actions ARM64/32 | | | ○ | | | |
|
||||
| TI ARM32 | | | ○ | | | |
|
||||
|
||||
Note: ● has been verified by official tests; ○ has been verified by unofficial tests.
|
||||
|
||||
List of platforms supported by TDengine client and connectors
|
||||
|
||||
At the moment, TDengine connectors can support a wide range of platforms, including hardware platforms such as X64/X86/ARM64/ARM32/MIPS/Alpha, and development environments such as Linux/Win64/Win32.
|
||||
|
||||
Comparison matrix as following:
|
||||
|
||||
| **CPU** | **X64 64bit** | | | **X86 32bit** | **ARM64** | **ARM32** | **MIPS Godson** | **Alpha Shenwei** | **X64 TimecomTech** |
|
||||
| ----------- | ------------- | --------- | --------- | ------------- | --------- | --------- | --------------- | ----------------- | ------------------- |
|
||||
| **OS** | **Linux** | **Win64** | **Win32** | **Win32** | **Linux** | **Linux** | **Linux** | **Linux** | **Linux** |
|
||||
| **C/C++** | ● | ● | ● | ○ | ● | ● | ● | ● | ● |
|
||||
| **JDBC** | ● | ● | ● | ○ | ● | ● | ● | ● | ● |
|
||||
| **Python** | ● | ● | ● | ○ | ● | ● | ● | -- | ● |
|
||||
| **Go** | ● | ● | ● | ○ | ● | ● | ○ | -- | -- |
|
||||
| **NodeJs** | ● | ● | ○ | ○ | ● | ● | ○ | -- | -- |
|
||||
| **C#** | ○ | ● | ● | ○ | ○ | ○ | ○ | -- | -- |
|
||||
| **RESTful** | ● | ● | ● | ● | ● | ● | ● | ● | ● |
|
||||
|
||||
Note: ● has been verified by official tests; ○ has been verified by unofficial tests.
|
||||
|
||||
Please visit [Connectors](https://www.taosdata.com/cn/documentation/connector) section for more detailed information.
|
|
@ -0,0 +1,370 @@
|
|||
# Data Model and Architecture
|
||||
|
||||
## ## <a class="anchor" id="model"></a> Data Model
|
||||
|
||||
### ### A Typical IoT Scenario
|
||||
|
||||
In typical IoT, Internet of Vehicles and Operation Monitoring scenarios, there are often many different types of data collecting devices that collect one or more different physical metrics. However, for the collection devices of the same type, there are often many specific collection devices distributed in places. BigData processing system aims to collect all kinds of data, and then calculate and analyze them. For the same kind of devices, the data collected are very regular. Taking smart meters as an example, assuming that each smart meter collects three metrics of current, voltage and phase, the collected data are similar to the following table:
|
||||
|
||||
| | | | | | | |
|
||||
| ------------- | -------------- | ----------- | ----------- | --------- | ---------------- | ----------- |
|
||||
| **Device ID** | **Time Stamp** | **current** | **voltage** | **phase** | **location** | **groupId** |
|
||||
| d1001 | 1538548685000 | 10.3 | 219 | 0.31 | Beijing.Chaoyang | 2 |
|
||||
| d1002 | 1538548684000 | 10.2 | 220 | 0.23 | Beijing.Chaoyang | 3 |
|
||||
| d1003 | 1538548686500 | 11.5 | 221 | 0.35 | Beijing.Haidian | 3 |
|
||||
| d1004 | 1538548685500 | 13.4 | 223 | 0.29 | Beijing.Haidian | 2 |
|
||||
| d1001 | 1538548695000 | 12.6 | 218 | 0.33 | Beijing.Chaoyang | 2 |
|
||||
| d1004 | 1538548696600 | 11.8 | 221 | 0.28 | Beijing.Haidian | 2 |
|
||||
| d1002 | 1538548696650 | 10.3 | 218 | 0.25 | Beijing.Chaoyang | 3 |
|
||||
| d1001 | 1538548696800 | 12.3 | 221 | 0.31 | Beijing.Chaoyang | 2 |
|
||||
|
||||
|
||||
|
||||
| **Device ID** | **Time Stamp** | **Collected Metrics** | **Tags** | | | |
|
||||
| ------------- | -------------- | --------------------- | ----------- | --------- | ---------------- | ----------- |
|
||||
| **Device ID** | **Time Stamp** | **current** | **voltage** | **phase** | **location** | **groupId** |
|
||||
| d1001 | 1538548685000 | 10.3 | 219 | 0.31 | Beijing.Chaoyang | 2 |
|
||||
| d1002 | 1538548684000 | 10.2 | 220 | 0.23 | Beijing.Chaoyang | 3 |
|
||||
| d1003 | 1538548686500 | 11.5 | 221 | 0.35 | Beijing.Haidian | 3 |
|
||||
| d1004 | 1538548685500 | 13.4 | 223 | 0.29 | Beijing.Haidian | 2 |
|
||||
| d1001 | 1538548695000 | 12.6 | 218 | 0.33 | Beijing.Chaoyang | 2 |
|
||||
| d1004 | 1538548696600 | 11.8 | 221 | 0.28 | Beijing.Haidian | 2 |
|
||||
| d1002 | 1538548696650 | 10.3 | 218 | 0.25 | Beijing.Chaoyang | 3 |
|
||||
| d1001 | 1538548696800 | 12.3 | 221 | 0.31 | Beijing.Chaoyang | 2 |
|
||||
|
||||
Table 1: Smart meter example data
|
||||
|
||||
Each data record contains the device ID, timestamp, collected metrics (current, voltage, phase as above), and static tags (Location and groupId in Table 1) associated with the devices. Each device generates a data record in a pre-defined timer or triggered by an external event. It is a sequence of data points like a stream.
|
||||
|
||||
### Data Characteristics
|
||||
|
||||
As the data points are a series of data points over time, the data points generated by IoT, Internet of Vehicles, and Operation Monitoring have some strong common characteristics:
|
||||
|
||||
1. Metrics are always structured data;
|
||||
2. There are rarely delete/update operations on collected data;
|
||||
3. No need for transactions of traditional databases
|
||||
4. The ratio of reading is lower but write is higher than typical Internet applications;
|
||||
5. data flow is uniform and can be predicted according to the number of devices and collection frequency;
|
||||
6. the user pays attention to the trend of data, not a specific value at a specific time;
|
||||
7. there is always a data retention policy;
|
||||
8. the data query is always executed in a given time range and a subset of space;
|
||||
9. in addition to storage and query operations, various statistical and real-time calculation operations are also required;
|
||||
10. data volume is huge, a system may generate over 10 billion data points in a day.
|
||||
|
||||
By utilizing the above characteristics, TDengine designs the storage and computing engine in a special and optimized way for time-series data, resulting in massive improvements in system efficiency.
|
||||
|
||||
### Relational Database Model
|
||||
|
||||
Since time-series data is most likely to be structured data, TDengine adopts the traditional relational database model to process them with a shallow learning curve. You need to create a database, create tables with schema definitions, then insert data points and execute queries to explore the data. Standard SQL is used, instead of NoSQL’s key-value storage.
|
||||
|
||||
### One Table for One Collection Point
|
||||
|
||||
To utilize this time-series and other data features, TDengine requires the user to create a table for each collection point to store collected time-series data. For example, if there are over 10 millions smart meters, means 10 millions tables shall be created. For the table above, 4 tables shall be created for devices D1001, D1002, D1003, and D1004 to store the data collected. This design has several advantages:
|
||||
|
||||
1. Guarantee that all data from a collection point can be saved in a continuous memory/hard disk space block by block. If queries are applied only on one point in a time range, this design will reduce the random read latency significantly, thus increase read and query speed by orders of magnitude.
|
||||
2. Since the data generation process of each collection device is completely independent, means each device has its unique data source, thus writes can be carried out in a lock-free manner to greatly improve the speed.
|
||||
3. Write latency can be significantly reduced too as the data points generated by the same device will arrive in time order, the new data point will be simply appended to a block.
|
||||
|
||||
If the data of multiple devices are written into a table in the traditional way, due to the uncontrollable network delay, the timing of the data from different devices arriving at the server cannot be guaranteed, the writing operation must be protected by locks, and the data of one device cannot be guaranteed to continuously stored together. **The method of one table for each data collection point can ensure the optimal performance of insertion and query of a single data collection point to the greatest extent.**
|
||||
|
||||
TDengine suggests using collection point ID as the table name (like D1001 in the above table). Each point may collect one or more metrics (like the current, voltage, phase as above). Each metric has a column in the table. The data type for a column can be int, float, string and others. In addition, the first column in the table must be a timestamp. TDengine uses the time stamp as the index, and won’t build the index on any metrics stored. All data will be stored in columns.
|
||||
|
||||
### STable: A Collection of Data Points in the Same Type
|
||||
|
||||
The method of one table for each point will bring a greatly increasing number of tables, which is difficult to manage. Moreover, applications often need to take aggregation operations between collection points, thus aggregation operations will become complicated. To support aggregation over multiple tables efficiently, the [STable(Super Table)](https://www.taosdata.com/en/documentation/super-table) concept is introduced by TDengine.
|
||||
|
||||
STable is an abstract collection for a type of data point. A STable contains a set of points (tables) that have the same schema or data structure, but with different static attributes (tags). To describe a STable (a combination of data collection points of a specific type), in addition to defining the table structure of the collected metrics, it is also necessary to define the schema of its tag. The data type of tags can be int, float, string, and there can be multiple tags, which can be added, deleted, or modified afterward. If the whole system has N different types of data collection points, N STables need to be established.
|
||||
|
||||
In the design of TDengine, **a table is used to represent a specific data collection point, and STable is used to represent a set of data collection points of the same type**. When creating a table for a specific data collection point, the user uses the definition of STable as a template and specifies the tag value of the specific collection point (table). Compared with the traditional relational database, the table (a data collection point) has static tags, and these tags can be added, deleted, and modified afterward. **A STable contains multiple tables with the same time-series data schema but different tag values.**
|
||||
|
||||
When aggregating multiple data collection points with the same data type, TDEngine will first find out the tables that meet the tag filters from the STables, and then scan the time-series data of these tables to perform aggregation operation, which can greatly reduce the data sets to be scanned, thus greatly improving the performance of aggregation calculation.
|
||||
|
||||
## <a class="anchor" id="cluster"></a> Cluster and Primary Logic Unit
|
||||
|
||||
The design of TDengine is based on the assumption that one single hardware or software system is unreliable and that no single computer can provide sufficient computing and storage resources to process massive data. Therefore, TDengine has been designed according to a distributed and high-reliability architecture since Day One of R&D, which supports scale-out, so that hardware failure or software failure of any single or multiple servers will not affect the availability and reliability of the system. At the same time, through node virtualization and automatic load-balancing technology, TDengine can make the most efficient use of computing and storage resources in heterogeneous clusters to reduce hardware investment.
|
||||
|
||||
### Primary Logic Unit
|
||||
|
||||
Logical structure diagram of TDengine distributed architecture as following:
|
||||
|
||||
Figure 1: TDengine architecture diagram
|
||||
|
||||
A complete TDengine system runs on one or more physical nodes. Logically, it includes data node (dnode), TDEngine application driver (taosc) and application (app). There are one or more data nodes in the system, which form a cluster. The application interacts with the TDengine cluster through taosc's API. The following is a brief introduction to each logical unit.
|
||||
|
||||
**Physical node (pnode)**: A pnode is a computer that runs independently and has its own computing, storage and network capabilities. It can be a physical machine, virtual machine or Docker container installed with OS. The physical node is identified by its configured FQDN (Fully Qualified Domain Name). TDengine relies entirely on FQDN for network communication. If you don't know about FQDN, please read the blog post "[All about FQDN of TDengine](https://www.taosdata.com/blog/2020/09/11/1824.html)".
|
||||
|
||||
**Data node (dnode):** A dnode is a running instance of the TDengine server-side execution code taosd on a physical node. A working system must have at least one data node. A dnode contains zero to multiple logical virtual nodes (VNODE), zero or at most one logical management node (mnode). The unique identification of a dnode in the system is determined by the instance's End Point (EP). EP is a combination of FQDN (Fully Qualified Domain Name) of the physical node where the dnode is located and the network port number (Port) configured by the system. By configuring different ports, a physical node (a physical machine, virtual machine or container) can run multiple instances or have multiple data nodes.
|
||||
|
||||
**Virtual node (vnode)**: In order to better support data sharding, load balancing and prevent data from overheating or skewing, data nodes are virtualized into multiple virtual nodes (vnode, V2, V3, V4, etc. in the figure). Each vnode is a relatively independent work unit, which is the basic unit of time-series data storage, and has independent running threads, memory space and persistent storage path. A vnode contains a certain number of tables (data collection points). When a new table is created, the system checks whether a new vnode needs to be created. The number of vnodes that can be created on a data node depends on the hardware capacities of the physical node where the data node is located. A vnode belongs to only one DB, but a DB can have multiple vnodes. In addition to the stored time-series data, a vnode also stores the schema and tag values of the included tables. A virtual node is uniquely identified in the system by the EP of the data node and the VGroup ID to which it belongs, and is created and managed by the management node.
|
||||
|
||||
**Management node (mnode)**: A virtual logical unit responsible for monitoring and maintaining the running status of all data nodes and load balancing among nodes (M in figure). At the same time, the management node is also responsible for the storage and management of metadata (including users, databases, tables, static tags, etc.), so it is also called Meta Node. Multiple (up to 5) mnodes can be configured in a TDengine cluster, and they are automatically constructed into a virtual management node group (M0, M1, M2 in the figure). The master/slave mechanism is used to manage between mnodes, and the data synchronization is carried out in a strong consistent way. Any data update operation can only be done on the master. The creation of mnode cluster is completed automatically by the system without manual intervention. There is at most one mnode on each dnode, which is uniquely identified by the EP of the data node to which it belongs. Each dnode automatically obtains the EP of the dnode where all mnodes in the whole cluster are located through internal messaging interaction.
|
||||
|
||||
**Virtual node group (VGroup)**: Vnodes on different data nodes can form a virtual node group to ensure the high reliability of the system. The virtual node group is managed in a master/slave structure. Write operations can only be performed on the master vnode, and the system synchronizes data to the slave vnode via replication, thus ensuring that one single replica of data is copied on multiple physical nodes. The number of virtual nodes in a vgroup equals the number of data replicas. If the number of replicas of a DB is N, the system must have at least N data nodes. The number of replicas can be specified by the parameter replica when creating DB, and the default is 1. Using the multi-replica feature of TDengine, the same high data reliability can be done without the need for expensive storage devices such as disk arrays. Virtual node group is created and managed by management node, and the management node assigns a system unique ID, aka VGroup ID. If two virtual nodes has the same vnode group ID, means that they belong to the same group and the data is backed up to each other. The number of virtual nodes in a virtual node group can be dynamically changed, allowing only one, that is, no data replication. VGroup ID is never changed. Even if a virtual node group is deleted, its ID will not be reused.
|
||||
|
||||
**TAOSC**: TAOSC is the driver provided by TDengine to applications, which is responsible for dealing with the interface interaction between application and cluster, and provides the native interface of C/C + + language, which is embedded in JDBC, C #, Python, Go, Node.js language connection libraries. Applications interact with the whole cluster through taosc instead of directly connecting to data nodes in the cluster. This module is responsible for obtaining and caching metadata; forwarding requests for insertion, query, etc. to the correct data node; when returning the results to the application, taosc also need to be responsible for the final level of aggregation, sorting, filtering and other operations. For JDBC, C/C + +/C #/Python/Go/Node.js interfaces, this module runs on the physical node where the application is located. At the same time, in order to support the fully distributed RESTful interface, taosc has a running instance on each dnode of TDengine cluster.
|
||||
|
||||
### Node Communication
|
||||
|
||||
**Communication mode**: The communication among each data node of TDengine system, and among application driver and each data node is carried out through TCP/UDP. Considering an IoT scenario, the data writing packets are generally not large, so TDengine uses UDP in addition to TCP for transmission, because UDP is more efficient and is not limited by the number of connections. TDengine implements its own timeout, retransmission, confirmation and other mechanisms to ensure reliable transmission of UDP. For packets with a data volume of less than 15K, UDP is adopted for transmission, and TCP is automatically adopted for transmission of packets with a data volume of more than 15K or query operations. At the same time, TDengine will automatically compress/decompress the data, digital sign/authenticate the data according to the configuration and data packet. For data replication among data nodes, only TCP is used for data transmission.
|
||||
|
||||
**FQDN configuration:** A data node has one or more FQDNs, which can be specified in the system configuration file taos.cfg with the parameter "fqdn". If it is not specified, the system will automatically use the hostname of the computer as its FQDN. If the node is not configured with FQDN, you can directly set the configuration parameter fqdn of the node to its IP address. However, IP is not recommended because IP address is variable, and once it changes, the cluster will not work properly. The EP (End Point) of a data node consists of FQDN + Port. With FQDN, it is necessary to ensure the normal operation of DNS service, or configure hosts files on nodes and the nodes where applications are located.
|
||||
|
||||
**Port configuration**: The external port of a data node is determined by the system configuration parameter serverPort in TDengine, and the port for internal communication of cluster is serverPort+5. The data replication operation among data nodes in the cluster also occupies a TCP port, which is serverPort+10. In order to support multithreading and efficient processing of UDP data, each internal and external UDP connection needs to occupy 5 consecutive ports. Therefore, the total port range of a data node will be serverPort to serverPort + 10, for a total of 11 TCP/UDP ports. When using, make sure that the firewall keeps these ports open. Each data node can be configured with a different serverPort.
|
||||
|
||||
**Cluster external connection**: TDengine cluster can accommodate one single, multiple or even thousands of data nodes. The application only needs to initiate a connection to any data node in the cluster. The network parameter required for connection is the End Point (FQDN plus configured port number) of a data node. When starting the application taos through CLI, the FQDN of the data node can be specified through the option-h, and the configured port number can be specified through -p. If the port is not configured, the system configuration parameter serverPort of TDengine will be adopted.
|
||||
|
||||
**Inter-cluster communication**: Data nodes connect with each other through TCP/UDP. When a data node starts, it will obtain the EP information of the dnode where the mnode is located, and then establish a connection with the mnode in the system to exchange information. There are three steps to obtain EP information of the mnode: 1. Check whether the mnodeEpList file exists, if it does not exist or cannot be opened normally to obtain EP information of the mnode, skip to the second step; 2: Check the system configuration file taos.cfg to obtain node configuration parameters firstEp and secondEp (the node specified by these two parameters can be a normal node without mnode, in this case, the node will try to redirect to the mnode node when connected). If these two configuration parameters do not exist or do not exist in taos.cfg, or are invalid, skip to the third step; 3: Set your own EP as a mnode EP and run it independently. After obtaining the mnode EP list, the data node initiates the connection. It will successfully join the working cluster after connected. If not successful, it will try the next item in the mnode EP list. If all attempts are made, but the connection still fails, sleep for a few seconds before trying again.
|
||||
|
||||
**The choice of MNODE**: TDengine logically has a management node, but there is no separated execution code. The server side only has a set of execution code taosd. So which data node will be the management node? This is determined automatically by the system without any manual intervention. The principle is as follows: when a data node starts, it will check its End Point and compare it with the obtained mnode EP List. If its EP exists in it, the data node shall start the mnode module and become a mnode. If your own EP is not in the mnode EP List, the mnode module will not start. During the system operation, due to load balancing, downtime and other reasons, mnode may migrate to the new dnode, while totally transparent without manual intervention. The modification of configuration parameters is the decision made by mnode itself according to resources usage.
|
||||
|
||||
**Add new data nodes:** After the system has a data node, it has become a working system. There are two steps to add a new node into the cluster. Step1: Connect to the existing working data node using TDengine CLI, and then add the End Point of the new data node with the command "create dnode"; Step 2: In the system configuration parameter file taos.cfg of the new data node, set the firstEp and secondEp parameters to the EP of any two data nodes in the existing cluster. Please refer to the detailed user tutorial for detailed steps. In this way, the cluster will be established step by step.
|
||||
|
||||
**Redirection**: No matter about dnode or taosc, the connection to the mnode shall be initiated first, but the mnode is automatically created and maintained by the system, so user does not know which dnode is running the mnode. TDengine only requires a connection to any working dnode in the system. Because any running dnode maintains the currently running mnode EP List, when receiving a connecting request from the newly started dnode or taosc, if it’s not an mnode by self, it will reply the mnode EP List back. After receiving this list, taosc or the newly started dnode will try to establish the connection again. When the mnode EP List changes, each data node quickly obtains the latest list and notifies taosc through messaging interaction among nodes.
|
||||
|
||||
### A Typical Messaging Process
|
||||
|
||||
To explain the relationship between vnode, mnode, taosc and application and their respective roles, the following is an analysis of a typical data writing process.
|
||||
|
||||
Figure 2: A typical process of TDengine
|
||||
|
||||
1. Application initiates a request to insert data through JDBC, ODBC, or other APIs.
|
||||
2. Cache be checked by taosc that if meta data existing for the table. If so, go straight to Step 4. If not, taosc sends a get meta-data request to mnode.
|
||||
3. Mnode returns the meta-data of the table to taosc. Meta-data contains the schema of the table, and also the vgroup information to which the table belongs (the vnode ID and the End Point of the dnode where the table belongs. If the number of replicas is N, there will be N groups of End Points). If taosc does not receive a response from the mnode for a long time, and there are multiple mnodes, taosc will send a request to the next mnode.
|
||||
4. Taosc initiates an insert request to master vnode.
|
||||
5. After vnode inserts the data, it gives a reply to taosc, indicating that the insertion is successful. If taosc doesn't get a response from vnode for a long time, taosc will judge the node as offline. In this case, if there are multiple replicas of the inserted database, taosc will issue an insert request to the next vnode in vgroup.
|
||||
6. Taosc notifies APP that writing is successful.
|
||||
|
||||
For Step 2 and 3, when taosc starts, it does not know the End Point of mnode, so it will directly initiate a request to the externally serving End Point of the configured cluster. If the dnode that received the request does not have an mnode configured, it will inform the mnode EP list in a reply message, so that taosc will re-issue a request to obtain meta-data to the EP of another new mnode.
|
||||
|
||||
For Step 4 and 5, without caching, taosc can't recognize the master in the virtual node group, so assumes that the first vnodeID is the master and send a request to it. If the requested vnode is not the master, it will reply the actual master as a new target taosc makes a request to. Once the reply of successful insertion is obtained, taosc will cache the information of master node.
|
||||
|
||||
The above is the process of inserting data, and the processes of querying and calculating are completely consistent. Taosc encapsulates and shields all these complicated processes, and has no perception and no special treatment for applications.
|
||||
|
||||
Through taosc caching mechanism, mnode needs to be accessed only when a table is operated for the first time, so mnode will not become a system bottleneck. However, because schema and vgroup may change (such as load balancing), taosc will interact with mnode regularly to automatically update the cache.
|
||||
|
||||
## <a class="anchor" id="sharding"></a> Storage Model and Data Partitioning/Sharding
|
||||
|
||||
### Storage Model
|
||||
|
||||
The data stored by TDengine include collected time-series data, metadata related to libraries and tables, tag data, etc. These data are specifically divided into three parts:
|
||||
|
||||
- Time-series data: stored in vnode and composed of data, head and last files. The amount of data is large and query amount depends on the application scenario. Out-of-order writing is allowed, but delete operation is not supported for the time being, and update operation is only allowed when update parameter is set to 1. By adopting the model with one table for each collection point, the data of a given time period is continuously stored, and the writing against one single table is a simple add operation. Multiple records can be read at one time, thus ensuring the insert and query operation of a single collection point with best performance.
|
||||
- Tag data: meta files stored in vnode support four standard operations of add, delete, modify and check. The amount of data is not large. If there are N tables, there are N records, so all can be stored in memory. If there are many tag filtering operations, queries will be very frequent and TDengine supports multi-core and multi-threaded concurrent queries. As long as the computing resources are sufficient, even in face of millions of tables, the filtering results will return in milliseconds.
|
||||
- Metadata: stored in mnode, including system node, user, DB, Table Schema and other information. Four standard operations of add, delete, modify and query are supported. The amount of these data are not large and can be stored in memory, moreover the query amount is not large because of the client cache. Therefore, TDengine uses centralized storage management, however, there will be no performance bottleneck.
|
||||
|
||||
Compared with the typical NoSQL storage model, TDengine stores tag data and time-series data completely separately, which has two major advantages:
|
||||
|
||||
- Greatly reduce the redundancy of tag data storage: general NoSQL database or time-series database adopts K-V storage, in which Key includes timestamp, device ID and various tags. Each record carries these duplicates, so wasting storage space. Moreover, if the application needs to add, modify or delete tags on historical data, it has to traverse the data and rewrite again, which is extremely expensive to operate.
|
||||
- Realize extremely efficient aggregation query between multiple tables: when doing aggregation query between multiple tables, it firstly finds out the tag filtered tables, and then find out the corresponding data blocks of these tables to greatly reduce the data sets to be scanned, thus greatly improving the query efficiency. Moreover, tag data is managed and maintained in a full-memory structure, and tag data queries in tens of millions can return in milliseconds.
|
||||
|
||||
### Data Sharding
|
||||
|
||||
For large-scale data management, to achieve scale-out, it is generally necessary to adopt the a Partitioning strategy as Sharding. TDengine implements data sharding via vnode, and time-series data partitioning via one data file for each time range.
|
||||
|
||||
VNode (Virtual Data Node) is responsible for providing writing, query and calculation functions for collected time-series data. To facilitate load balancing, data recovery and support heterogeneous environments, TDengine splits a data node into multiple vnodes according to its computing and storage resources. The management of these vnodes is done automatically by TDengine and completely transparent to the application.
|
||||
|
||||
For a single data collection point, regardless of the amount of data, a vnode (or vnode group, if the number of replicas is greater than 1) has enough computing resource and storage resource to process (if a 16-byte record is generated per second, the original data generated in one year will be less than 0.5 G), so TDengine stores all the data of a table (a data collection point) in one vnode instead of distributing the data to two or more dnodes. Moreover, a vnode can store data from multiple data collection points (tables), and the upper limit of the tables’ quantity for a vnode is one million. By design, all tables in a vnode belong to the same DB. On a data node, unless specially configured, the number of vnodes owned by a DB will not exceed the number of system cores.
|
||||
|
||||
When creating a DB, the system does not allocate resources immediately. However, when creating a table, the system will check if there is an allocated vnode with free tablespace. If so, the table will be created in the vacant vnode immediately. If not, the system will create a new vnode on a dnode from the cluster according to the current workload, and then a table. If there are multiple replicas of a DB, the system does not create only one vnode, but a vgroup (virtual data node group). The system has no limit on the number of vnodes, which is just limited by the computing and storage resources of physical nodes.
|
||||
|
||||
The meda data of each table (including schema, tags, etc.) is also stored in vnode instead of centralized storage in mnode. In fact, this means sharding of meta data, which is convenient for efficient and parallel tag filtering operations.
|
||||
|
||||
### Data Partitioning
|
||||
|
||||
In addition to vnode sharding, TDengine partitions the time-series data by time range. Each data file contains only one time range of time-series data, and the length of the time range is determined by DB's configuration parameter “days”. This method of partitioning by time rang is also convenient to efficiently implement the data retention strategy. As long as the data file exceeds the specified number of days (system configuration parameter ‘keep’), it will be automatically deleted. Moreover, different time ranges can be stored in different paths and storage media, so as to facilitate the cold/hot management of big data and realize tiered-storage.
|
||||
|
||||
In general, **TDengine splits big data by vnode and time as two dimensions**, which is convenient for parallel and efficient management with scale-out.
|
||||
|
||||
### Load Balancing
|
||||
|
||||
Each dnode regularly reports its status (including hard disk space, memory size, CPU, network, number of virtual nodes, etc.) to the mnode (virtual management node) for declaring the status of the entire cluster. Based on the overall state, when an mnode finds an overloaded dnode, it will migrate one or more vnodes to other dnodes. In the process, external services keep running and the data insertion, query and calculation operations are not affected.
|
||||
|
||||
If the mnode has not received the dnode status for a period of time, the dnode will be judged as offline. When offline lasts a certain period of time (the duration is determined by the configuration parameter ‘offlineThreshold’), the dnode will be forcibly removed from the cluster by mnode. If the number of replicas of vnodes on this dnode is greater than one, the system will automatically create new replicas on other dnodes to ensure the replica number. If there are other mnodes on this dnode and the number of mnodes replicas is greater than one, the system will automatically create new mnodes on other dnodes to ensure t the replica number.
|
||||
|
||||
When new data nodes are added to the cluster, with new computing and storage are added, the system will automatically start the load balancing process.
|
||||
|
||||
The load balancing process does not require any manual intervention without application restarted. It will automatically connect new nodes with completely transparence. **Note: load balancing is controlled by parameter “balance”, which determines to turn on/off automatic load balancing.**
|
||||
|
||||
## <a class="anchor" id="replication"></a> Data Writing and Replication Process
|
||||
|
||||
If a database has N replicas, thus a virtual node group has N virtual nodes, but only one as Master and all others are slaves. When the application writes a new record to system, only the Master vnode can accept the writing request. If a slave vnode receives a writing request, the system will notifies taosc to redirect.
|
||||
|
||||
### Master vnode Writing Process
|
||||
|
||||
Master Vnode uses a writing process as follows:
|
||||
|
||||
Figure 3: TDengine Master writing process
|
||||
|
||||
1. Master vnode receives the application data insertion request, verifies, and to next step;
|
||||
2. If the system configuration parameter “walLevel” is greater than 0, vnode will write the original request packet into database log file WAL. If walLevel is set to 2 and fsync is set to 0, TDengine will make WAL data written immediately to ensure that even system goes down, all data can be recovered from database log file;
|
||||
3. If there are multiple replicas, vnode will forward data packet to slave vnodes in the same virtual node group, and the forwarded packet has a version number with data;
|
||||
4. Write into memory and add the record to “skip list”;
|
||||
5. Master vnode returns a confirmation message to the application, indicating a successful writing.
|
||||
6. If any of Step 2, 3 or 4 fails, the error will directly return to the application.
|
||||
|
||||
### Slave vnode Writing Process
|
||||
|
||||
For a slave vnode, the write process as follows:
|
||||
|
||||
Fiture 4: TDengine Slave Writing Process
|
||||
|
||||
1. Slave vnode receives a data insertion request forwarded by Master vnode.
|
||||
2. If the system configuration parameter “walLevel” is greater than 0, vnode will write the original request packet into database log file WAL. If walLevel is set to 2 and fsync is set to 0, TDengine will make WAL data written immediately to ensure that even system goes down, all data can be recovered from database log file;
|
||||
3. Write into memory and add the record to “skip list”;
|
||||
|
||||
Compared with Master vnode, slave vnode has no forwarding or reply confirmation step, means two steps less. But writing into memory is exactly the same as WAL.
|
||||
|
||||
### Remote Disaster Recovery and IDC Migration
|
||||
|
||||
As above Master and Slave processes discussed, TDengine adopts asynchronous replication for data synchronization. This method can greatly improve the writing performance, with not obvious impact from network delay. By configuring IDC and rack number for each physical node, it can be ensured that for a virtual node group, virtual nodes are composed of physical nodes from different IDC and different racks, thus implementing remote disaster recovery without other tools.
|
||||
|
||||
On the other hand, TDengine supports dynamic modification of the replicas number. Once the number of replicas increases, the newly added virtual nodes will immediately enter the data synchronization process. After synchronization completed, added virtual nodes can provide services. In the synchronization process, master and other synchronized virtual nodes keep serving. With this feature, TDengine can realize IDC room migration without service interruption. It is only necessary to add new physical nodes to the existing IDC cluster, and then remove old physical nodes after the data synchronization is completed.
|
||||
|
||||
However, this asynchronous replication method has a tiny time window of written data lost. The specific scenario is as follows:
|
||||
|
||||
1. Master vnode has completed its 5-step operations, confirmed the success of writing to APP, and then went down;
|
||||
2. Slave vnode receives the write request, then processing fails before writing to the log in Step 2;
|
||||
3. Slave vnode will become the new master, thus losing one record
|
||||
|
||||
In theory, as long as in asynchronous replication, there is no guarantee for no losing. However, this window is extremely small, only if mater and slave fail at the same time, and just confirm the successful write to the application before.
|
||||
|
||||
Note: Remote disaster recovery and no-downtime IDC migration are only supported by Enterprise Edition. **Hint: This function is not available yet**
|
||||
|
||||
### Master/slave Selection
|
||||
|
||||
Vnode maintains a Version number. When memory data is persisted, the version number will also be persisted. For each data update operation, whether it is collecting time-series data or metadata, this version number will be increased by one.
|
||||
|
||||
When a vnode starts, the roles (master, slave) are uncertain, and the data is in an unsynchronized state. It’s necessary to establish TCP connections with other nodes in the virtual node group and exchange status, including version and its own roles. Through the exchange, the system implements a master-selection process. The rules are as follows:
|
||||
|
||||
1. If there’s only one replica, it’s always master
|
||||
2. When all replicas are online, the one with latest version is master
|
||||
3. Over half of online nodes are virtual nodes, and some virtual node is slave, it will automatically become master
|
||||
4. For 2 and 3, if multiple virtual nodes meet the requirement, the first vnode in virtual node group list will be selected as master
|
||||
|
||||
See [TDengine 2.0 Data Replication Module Design](https://www.taosdata.com/cn/documentation/architecture/replica/) for more information on the data replication process.
|
||||
|
||||
### Synchronous Replication
|
||||
|
||||
For scenarios with higher data consistency requirements, asynchronous data replication is not applicable, because there is some small probability of data loss. So, TDengine provides a synchronous replication mechanism for users. When creating a database, in addition to specifying the number of replicas, user also needs to specify a new parameter “quorum”. If quorum is greater than one, it means that every time the Master forwards a message to the replica, it needs to wait for “quorum-1” reply confirms before informing the application that data has been successfully written in slave. If “quorum-1” reply confirms are not received within a certain period of time, the master vnode will return an error to the application.
|
||||
|
||||
With synchronous replication, performance of system will decrease and latency will increase. Because metadata needs strong consistent, the default for data synchronization between mnodes is synchronous replication.
|
||||
|
||||
Note: synchronous replication between vnodes is only supported in Enterprise Edition
|
||||
|
||||
## <a class="anchor" id="persistence"></a> Caching and Persistence
|
||||
|
||||
### Caching
|
||||
|
||||
TDengine adopts a time-driven cache management strategy (First-In-First-Out, FIFO), also known as a Write-driven Cache Management Mechanism. This strategy is different from the read-driven data caching mode (Least-Recent-Used, LRU), which directly put the most recently written data in the system buffer. When the buffer reaches a threshold, the earliest data are written to disk in batches. Generally speaking, for the use of IoT data, users are most concerned about the newly generated data, that is, the current status. TDengine takes full advantage of this feature to put the most recently arrived (current state) data in the buffer.
|
||||
|
||||
TDengine provides millisecond-level data collecting capability to users through query functions. Putting the recently arrived data directly in the buffer can respond to users' analysis query for the latest piece or batch of data more quickly, and provide faster database query response capability as a whole. In this sense, **TDengine can be used as a data buffer by setting appropriate configuration parameters without deploying Redis or other additional cache systems**, which can effectively simplify the system architecture and reduce the operation costs. It should be noted that after the TDengine is restarted, the buffer of the system will be emptied, the previously cached data will be written to disk in batches, and the previously cached data will not be reloaded into the buffer as so in a proprietary key-value cache system.
|
||||
|
||||
Each vnode has its own independent memory, and it is composed of multiple memory blocks of fixed size, and different vnodes are completely isolated. When writing data, similar to the writing of logs, data is sequentially added to memory, but each vnode maintains its own skip list for quick search. When more than one third of the memory block are used, the disk writing operation will start, and the subsequent writing operation is carried out in a new memory block. By this design, one third of the memory blocks in a vnode keep the latest data, so as to achieve the purpose of caching and quick search. The number of memory blocks of a vnode is determined by the configuration parameter “blocks”, and the size of memory blocks is determined by the configuration parameter “cache”.
|
||||
|
||||
### Persistent Storage
|
||||
|
||||
TDengine uses a data-driven method to write the data from buffer into hard disk for persistent storage. When the cached data in vnode reaches a certain volume, TDengine will also pull up the disk-writing thread to write the cached data into persistent storage in order not to block subsequent data writing. TDengine will open a new database log file when the data is written, and delete the old database log file after written successfully to avoid unlimited log growth.
|
||||
|
||||
To make full use of the characteristics of time-series data, TDengine splits the data stored in persistent storage by a vnode into multiple files, each file only saves data for a fixed number of days, which is determined by the system configuration parameter “days”. By so, for the given start and end date of a query, you can locate the data files to open immediately without any index, thus greatly speeding up reading operations.
|
||||
|
||||
For collected data, there is generally a retention period, which is determined by the system configuration parameter “keep”. Data files exceeding this set number of days will be automatically deleted by the system to free up storage space.
|
||||
|
||||
Given “days” and “keep” parameters, the total number of data files in a vnode is: keep/days. The total number of data files should not be too large or too small. 10 to 100 is appropriate. Based on this principle, reasonable days can be set. In the current version, parameter “keep” can be modified, but parameter “days” cannot be modified once it is set.
|
||||
|
||||
In each data file, the data of a table is stored by blocks. A table can have one or more data file blocks. In a file block, data is stored in columns, occupying a continuous storage space, thus greatly improving the reading speed. The size of file block is determined by the system parameter “maxRows” (the maximum number of records per block), and the default value is 4096. This value should not be too large or too small. If it is too large, the data locating in search will cost longer; if too small, the index of data block is too large, and the compression efficiency will be low with slower reading speed.
|
||||
|
||||
Each data file (with a .data postfix) has a corresponding index file (with a .head postfix). The index file has summary information of a data block for each table, recording the offset of each data block in the data file, start and end time of data and other information, so as to lead system quickly locate the data to be found. Each data file also has a corresponding last file (with a .last postfix), which is designed to prevent data block fragmentation when written in disk. If the number of written records from a table does not reach the system configuration parameter “minRows” (minimum number of records per block), it will be stored in the last file first. When write to disk next time, the newly written records will be merged with the records in last file and then written into data file.
|
||||
|
||||
When data is written to disk, it is decided whether to compress the data according to system configuration parameter “comp”. TDengine provides three compression options: no compression, one-stage compression and two-stage compression, corresponding to comp values of 0, 1 and 2 respectively. One-stage compression is carried out according to the type of data. Compression algorithms include delta-delta coding, simple 8B method, zig-zag coding, LZ4 and other algorithms. Two-stage compression is based on one-stage compression and compressed by general compression algorithm, which has higher compression ratio.
|
||||
|
||||
### Tiered Storage
|
||||
|
||||
By default, TDengine saves all data in /var/lib/taos directory, and the data files of each vnode are saved in a different directory under this directory. In order to expand the storage space, minimize the bottleneck of file reading and improve the data throughput rate, TDengine can configure the system parameter “dataDir” to allow multiple mounted hard disks to be used by system at the same time. In addition, TDengine also provides the function of tiered data storage, i.e. storage on different storage media according to the time stamps of data files. For example, the latest data is stored on SSD, the data for more than one week is stored on local hard disk, and the data for more than four weeks is stored on network storage device, thus reducing the storage cost and ensuring efficient data access. The movement of data on different storage media is automatically done by the system and completely transparent to applications. Tiered storage of data is also configured through the system parameter “dataDir”.
|
||||
|
||||
|
||||
|
||||
dataDir format is as follows:
|
||||
|
||||
1. dataDir data_path [tier_level]
|
||||
|
||||
Where data_path is the folder path of mount point and tier_level is the media storage-tier. The higher the media storage-tier, means the older the data file. Multiple hard disks can be mounted at the same storage-tier, and data files on the same storage-tier are distributed on all hard disks within the tier. TDengine supports up to 3 tiers of storage, so tier_level values are 0, 1, and 2. When configuring dataDir, there must be only one mount path without specifying tier_level, which is called special mount disk (path). The mount path defaults to level 0 storage media and contains special file links, which cannot be removed, otherwise it will have a devastating impact on the written data.
|
||||
|
||||
|
||||
|
||||
Suppose a physical node with six mountable hard disks/mnt/disk1,/mnt/disk2, …,/mnt/disk6, where disk1 and disk2 need to be designated as level 0 storage media, disk3 and disk4 are level 1 storage media, and disk5 and disk6 are level 2 storage media. Disk1 is a special mount disk, you can configure it in/etc/taos/taos.cfg as follows:
|
||||
|
||||
|
||||
|
||||
1. dataDir /mnt/disk1/taos
|
||||
2. dataDir /mnt/disk2/taos 0
|
||||
3. dataDir /mnt/disk3/taos 1
|
||||
4. dataDir /mnt/disk4/taos 1
|
||||
5. dataDir /mnt/disk5/taos 2
|
||||
6. dataDir /mnt/disk6/taos 2
|
||||
|
||||
|
||||
|
||||
Mounted disks can also be a non-local network disk, as long as the system can access it.
|
||||
|
||||
|
||||
|
||||
Note: Tiered Storage is only supported in Enterprise Edition
|
||||
|
||||
## <a class="anchor" id="query"></a>Data Query
|
||||
|
||||
TDengine provides a variety of query processing functions for tables and STables. In addition to common aggregation queries, TDengine also provides window queries and statistical aggregation functions for time-series data. The query processing of TDengine needs the collaboration of client, vnode and mnode.
|
||||
|
||||
### Single Table Query
|
||||
|
||||
The parsing and verification of SQL statements are completed on the client side. SQL statements are parsed and generate an Abstract Syntax Tree (AST), which is then checksummed. Then request metadata information (table metadata) for the table specified in the query from management node (mnode).
|
||||
|
||||
According to the End Point information in metadata information, the query request is serialized and sent to the data node (dnode) where the table is located. After receiving the query, the dnode identifies the virtual node (vnode) pointed to and forwards the message to the query execution queue of the vnode. The query execution thread of vnode establishes the basic query execution environment, immediately returns the query request and starts executing the query at the same time.
|
||||
|
||||
When client obtains query result, the worker thread in query execution queue of dnode will wait for the execution of vnode execution thread to complete before returning the query result to the requesting client.
|
||||
|
||||
### Aggregation by Time Axis, Downsampling, Interpolation
|
||||
|
||||
The remarkable feature that time-series data is different from ordinary data is that each record has a timestamp, so aggregating data with timestamps on the time axis is an important and unique function from common databases. From this point of view, it is similar to the window query of stream computing engine.
|
||||
|
||||
The keyword “interval” is introduced into TDengine to split fixed length time windows on time axis, and the data are aggregated according to time windows, and the data within window range are aggregated as needed. For example:
|
||||
|
||||
|
||||
|
||||
1. select count(*) from d1001 interval(1h);
|
||||
|
||||
|
||||
|
||||
According to the data collected by device D1001, the number of records stored per hour is returned by a 1-hour time window.
|
||||
|
||||
|
||||
|
||||
In application scenarios where query results need to be obtained continuously, if there is data missing in a given time interval, the data results in this interval will also be lost. TDengine provides a strategy to interpolate the results of timeline aggregation calculation. The results of time axis aggregation can be interpolated by using keyword Fill. For example:
|
||||
|
||||
|
||||
|
||||
1. select count(*) from d1001 interval(1h) fill(prev);
|
||||
|
||||
|
||||
|
||||
According to the data collected by device D1001, the number of records per hour is counted. If there is no data in a certain hour, statistical data of the previous hour is returned. TDengine provides forward interpolation (prev), linear interpolation (linear), NULL value populating (NULL), and specific value populating (value).
|
||||
|
||||
### Multi-table Aggregation Query
|
||||
|
||||
TDengine creates a separate table for each data collection point, but in practical applications, it is often necessary to aggregate data from different collection points. In order to perform aggregation operations efficiently, TDengine introduces the concept of STable. STable is used to represent a specific type of data collection point. It is a table set containing multiple tables. The schema of each table in the set is completely consistent, but each table has its own static tag. The tags can be multiple and be added, deleted and modified at any time. Applications can aggregate or statistically operate all or a subset of tables under a STABLE by specifying tag filters, thus greatly simplifying the development of applications. The process is shown in the following figure:
|
||||
|
||||
|
||||
|
||||
Figure 5: Diagram of multi-table aggregation query
|
||||
|
||||
1. Application sends a query condition to system;
|
||||
2. taosc sends the STable name to Meta Node(management node);
|
||||
3. Management node sends the vnode list owned by the STable back to taosc;
|
||||
4. taosc sends the computing request together with tag filters to multiple data nodes corresponding to these vnodes;
|
||||
5. Each vnode first finds out the set of tables within its own node that meet the tag filters from memory, then scans the stored time-series data, completes corresponding aggregation calculations, and returns result to taosc;
|
||||
6. taosc finally aggregates the results returned by multiple data nodes and send them back to application.
|
||||
|
||||
Since TDengine stores tag data and time-series data separately in vnode, by filtering tag data in memory, the set of tables that need to participate in aggregation operation is first found, which greatly reduces the volume of data scanned and improves aggregation calculation speed. At the same time, because the data is distributed in multiple vnodes/dnodes, the aggregation calculation operation is carried out concurrently in multiple vnodes, which further improves the aggregation speed. Aggregation functions for ordinary tables and most operations are applicable to STables. The syntax is exactly the same. Please see TAOS SQL for details.
|
||||
|
||||
### Precomputation
|
||||
|
||||
In order to effectively improve the performance of query processing, based-on the unchangeable feature of IoT data, statistical information of data stored in data block is recorded in the head of data block, including max value, min value, and sum. We call it a precomputing unit. If the query processing involves all the data of a whole data block, the pre-calculated results are directly used, and no need to read the data block contents at all. Since the amount of pre-calculated data is much smaller than the actual size of data block stored on disk, for query processing with disk IO as bottleneck, the use of pre-calculated results can greatly reduce the pressure of reading IO and accelerate the query process. The precomputation mechanism is similar to the index BRIN (Block Range Index) of PostgreSQL.
|
|
@ -0,0 +1,300 @@
|
|||
# Efficient Data Writing
|
||||
|
||||
TDengine supports multiple interfaces to write data, including SQL, Prometheus, Telegraf, EMQ MQTT Broker, HiveMQ Broker, CSV file, etc. Kafka, OPC and other interfaces will be provided in the future. Data can be inserted in a single piece or in batches, data from one or multiple data collection points can be inserted at the same time. TDengine supports multi-thread insertion, nonsequential data insertion, and also historical data insertion.
|
||||
|
||||
## <a class="anchor" id="sql"></a> SQL Writing
|
||||
|
||||
Applications insert data by executing SQL insert statements through C/C + +, JDBC, GO, or Python Connector, and users can manually enter SQL insert statements to insert data through TAOS Shell. For example, the following insert writes a record to table d1001:
|
||||
|
||||
```mysql
|
||||
```
|
||||
|
||||
INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31);
|
||||
|
||||
```
|
||||
```
|
||||
|
||||
TDengine supports writing multiple records at a time. For example, the following command writes two records to table d1001:
|
||||
|
||||
```mysql
|
||||
```
|
||||
|
||||
INSERT INTO d1001 VALUES (1538548684000, 10.2, 220, 0.23) (1538548696650, 10.3, 218, 0.25);
|
||||
|
||||
```
|
||||
```
|
||||
|
||||
TDengine also supports writing data to multiple tables at a time. For example, the following command writes two records to d1001 and one record to d1002:
|
||||
|
||||
```mysql
|
||||
```
|
||||
|
||||
INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6, 218, 0.33) d1002 VALUES (1538548696800, 12.3, 221, 0.31);
|
||||
|
||||
```
|
||||
```
|
||||
|
||||
For the SQL INSERT Grammar, please refer to [Taos SQL insert](https://www.taosdata.com/en/documentation/taos-sql#insert)。
|
||||
|
||||
**Tips:**
|
||||
|
||||
- To improve writing efficiency, batch writing is required. The more records written in a batch, the higher the insertion efficiency. However, a record cannot exceed 16K, and the total length of an SQL statement cannot exceed 64K (it can be configured by parameter maxSQLLength, and the maximum can be configured to 1M).
|
||||
- TDengine supports multi-thread parallel writing. To further improve writing speed, a client needs to open more than 20 threads to write parallelly. However, after the number of threads reaches a certain threshold, it cannot be increased or even become decreased, because too much frequent thread switching brings extra overhead.
|
||||
- For a same table, if the timestamp of a newly inserted record already exists, (no database was created using UPDATE 1) the new record will be discarded as default, that is, the timestamp must be unique in a table. If an application automatically generates records, it is very likely that the generated timestamps will be the same, so the number of records successfully inserted will be smaller than the number of records the application try to insert. If you use UPDATE 1 option when creating a database, inserting a new record with the same timestamp will overwrite the original record.
|
||||
- The timestamp of written data must be greater than the current time minus the time of configuration parameter keep. If keep is configured for 3650 days, data older than 3650 days cannot be written. The timestamp for writing data cannot be greater than the current time plus configuration parameter days. If days is configured to 2, data 2 days later than the current time cannot be written.
|
||||
|
||||
## <a class="anchor" id="prometheus"></a> Direct Writing of Prometheus
|
||||
|
||||
As a graduate project of Cloud Native Computing Foundation, [Prometheus](https://www.prometheus.io/) is widely used in the field of performance monitoring and K8S performance monitoring. TDengine provides a simple tool [Bailongma](https://github.com/taosdata/Bailongma), which only needs to be simply configured in Prometheus without any code, and can directly write the data collected by Prometheus into TDengine, then automatically create databases and related table entries in TDengine according to rules. Blog post [Use Docker Container to Quickly Build a Devops Monitoring Demo](https://www.taosdata.com/blog/2020/02/03/1189.html), which is an example of using bailongma to write Prometheus and Telegraf data into TDengine.
|
||||
|
||||
### Compile blm_prometheus From Source
|
||||
|
||||
Users need to download the source code of [Bailongma](https://github.com/taosdata/Bailongma) from github, then compile and generate an executable file using Golang language compiler. Before you start compiling, you need to complete following prepares:
|
||||
|
||||
- A server running Linux OS
|
||||
- Golang version 1.10 and higher installed
|
||||
- An appropriated TDengine version. Because the client dynamic link library of TDengine is used, it is necessary to install the same version of TDengine as the server-side; for example, if the server version is TDengine 2.0. 0, ensure install the same version on the linux server where bailongma is located (can be on the same server as TDengine, or on a different server)
|
||||
|
||||
Bailongma project has a folder, blm_prometheus, which holds the prometheus writing API. The compiling process is as follows:
|
||||
|
||||
```bash
|
||||
```
|
||||
|
||||
cd blm_prometheus
|
||||
|
||||
go build
|
||||
|
||||
```
|
||||
```
|
||||
|
||||
If everything goes well, an executable of blm_prometheus will be generated in the corresponding directory.
|
||||
|
||||
### Install Prometheus
|
||||
|
||||
Download and install as the instruction of Prometheus official website. [Download Address](https://prometheus.io/download/)
|
||||
|
||||
### Configure Prometheus
|
||||
|
||||
Read the Prometheus [configuration document](https://prometheus.io/docs/prometheus/latest/configuration/configuration/) and add following configurations in the section of Prometheus configuration file
|
||||
|
||||
- url: The URL provided by bailongma API service, refer to the blm_prometheus startup example section below
|
||||
|
||||
After Prometheus launched, you can check whether data is written successfully through query taos client.
|
||||
|
||||
### Launch blm_prometheus
|
||||
|
||||
blm_prometheus has following options that you can configure when you launch blm_prometheus.
|
||||
|
||||
```sh
|
||||
--tdengine-name
|
||||
|
||||
If TDengine is installed on a server with a domain name, you can also access the TDengine by configuring the domain name of it. In K8S environment, it can be configured as the service name that TDengine runs
|
||||
|
||||
--batch-size
|
||||
|
||||
blm_prometheus assembles the received prometheus data into a TDengine writing request. This parameter controls the number of data pieces carried in a writing request sent to TDengine at a time.
|
||||
|
||||
--dbname
|
||||
|
||||
Set a name for the database created in TDengine, blm_prometheus will automatically create a database named dbname in TDengine, and the default value is prometheus.
|
||||
|
||||
--dbuser
|
||||
|
||||
Set the user name to access TDengine, the default value is'root '
|
||||
|
||||
--dbpassword
|
||||
|
||||
Set the password to access TDengine, the default value is'taosdata '
|
||||
|
||||
--port
|
||||
|
||||
The port number blm_prometheus used to serve prometheus.
|
||||
```
|
||||
|
||||
|
||||
|
||||
### Example
|
||||
|
||||
Launch an API service for blm_prometheus with the following command:
|
||||
|
||||
```bash
|
||||
./blm_prometheus -port 8088
|
||||
```
|
||||
|
||||
Assuming that the IP address of the server where blm_prometheus located is "10.1.2. 3", the URL shall be added to the configuration file of Prometheus as:
|
||||
|
||||
remote_write:
|
||||
|
||||
\- url: "http://10.1.2.3:8088/receive"
|
||||
|
||||
|
||||
|
||||
### Query written data of prometheus
|
||||
|
||||
The format of generated data by Prometheus is as follows:
|
||||
|
||||
```json
|
||||
|
||||
|
||||
{
|
||||
Timestamp: 1576466279341,
|
||||
Value: 37.000000,
|
||||
apiserver_request_latencies_bucket {
|
||||
component="apiserver",
|
||||
instance="192.168.99.116:8443",
|
||||
job="kubernetes-apiservers",
|
||||
le="125000",
|
||||
resource="persistentvolumes", s
|
||||
cope="cluster",
|
||||
verb="LIST",
|
||||
version=“v1"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Where apiserver_request_latencies_bucket is the name of the time-series data collected by prometheus, and the tag of the time-series data is in the following {}. blm_prometheus automatically creates a STable in TDengine with the name of the time series data, and converts the tag in {} into the tag value of TDengine, with Timestamp as the timestamp and value as the value of the time-series data. Therefore, in the client of TDEngine, you can check whether this data was successfully written through the following instruction.
|
||||
|
||||
```mysql
|
||||
use prometheus;
|
||||
|
||||
select * from apiserver_request_latencies_bucket;
|
||||
```
|
||||
|
||||
|
||||
|
||||
## <a class="anchor" id="telegraf"></a> Direct Writing of Telegraf
|
||||
|
||||
[Telegraf](https://www.influxdata.com/time-series-platform/telegraf/) is a popular open source tool for IT operation data collection. TDengine provides a simple tool [Bailongma](https://github.com/taosdata/Bailongma), which only needs to be simply configured in Telegraf without any code, and can directly write the data collected by Telegraf into TDengine, then automatically create databases and related table entries in TDengine according to rules. Blog post [Use Docker Container to Quickly Build a Devops Monitoring Demo](https://www.taosdata.com/blog/2020/02/03/1189.html), which is an example of using bailongma to write Prometheus and Telegraf data into TDengine.
|
||||
|
||||
### Compile blm_telegraf From Source Code
|
||||
|
||||
Users need to download the source code of [Bailongma](https://github.com/taosdata/Bailongma) from github, then compile and generate an executable file using Golang language compiler. Before you start compiling, you need to complete following prepares:
|
||||
|
||||
- A server running Linux OS
|
||||
- Golang version 1.10 and higher installed
|
||||
- An appropriated TDengine version. Because the client dynamic link library of TDengine is used, it is necessary to install the same version of TDengine as the server-side; for example, if the server version is TDengine 2.0. 0, ensure install the same version on the linux server where bailongma is located (can be on the same server as TDengine, or on a different server)
|
||||
|
||||
Bailongma project has a folder, blm_telegraf, which holds the Telegraf writing API. The compiling process is as follows:
|
||||
|
||||
```bash
|
||||
cd blm_telegraf
|
||||
|
||||
go build
|
||||
```
|
||||
|
||||
If everything goes well, an executable of blm_telegraf will be generated in the corresponding directory.
|
||||
|
||||
### Install Telegraf
|
||||
|
||||
At the moment, TDengine supports Telegraf version 1.7. 4 and above. Users can download the installation package on Telegraf's website according to your current operating system. The download address is as follows: https://portal.influxdata.com/downloads
|
||||
|
||||
### Configure Telegraf
|
||||
|
||||
Modify the TDengine-related configurations in the Telegraf configuration file /etc/telegraf/telegraf.conf.
|
||||
|
||||
In the output plugins section, add the [[outputs.http]] configuration:
|
||||
|
||||
- url: The URL provided by bailongma API service, please refer to the example section below
|
||||
- data_format: "json"
|
||||
- json_timestamp_units: "1ms"
|
||||
|
||||
In agent section:
|
||||
|
||||
- hostname: The machine name that distinguishes different collection devices, and it is necessary to ensure its uniqueness
|
||||
- metric_batch_size: 100, which is the max number of records per batch wriiten by Telegraf allowed. Increasing the number can reduce the request sending frequency of Telegraf.
|
||||
|
||||
For information on how to use Telegraf to collect data and more about using Telegraf, please refer to the official [document](https://docs.influxdata.com/telegraf/v1.11/) of Telegraf.
|
||||
|
||||
### Launch blm_telegraf
|
||||
|
||||
blm_telegraf has following options, which can be set to tune configurations of blm_telegraf when launching.
|
||||
|
||||
```sh
|
||||
--host
|
||||
|
||||
The ip address of TDengine server, default is null
|
||||
|
||||
--batch-size
|
||||
|
||||
blm_prometheus assembles the received telegraf data into a TDengine writing request. This parameter controls the number of data pieces carried in a writing request sent to TDengine at a time.
|
||||
|
||||
--dbname
|
||||
|
||||
Set a name for the database created in TDengine, blm_telegraf will automatically create a database named dbname in TDengine, and the default value is prometheus.
|
||||
|
||||
--dbuser
|
||||
|
||||
Set the user name to access TDengine, the default value is 'root '
|
||||
|
||||
--dbpassword
|
||||
|
||||
Set the password to access TDengine, the default value is'taosdata '
|
||||
|
||||
--port
|
||||
|
||||
The port number blm_telegraf used to serve Telegraf.
|
||||
```
|
||||
|
||||
|
||||
|
||||
### Example
|
||||
|
||||
Launch an API service for blm_telegraf with the following command
|
||||
|
||||
```bash
|
||||
./blm_telegraf -host 127.0.0.1 -port 8089
|
||||
```
|
||||
|
||||
Assuming that the IP address of the server where blm_telegraf located is "10.1.2. 3", the URL shall be added to the configuration file of telegraf as:
|
||||
|
||||
```yaml
|
||||
url = "http://10.1.2.3:8089/telegraf"
|
||||
```
|
||||
|
||||
### Query written data of telegraf
|
||||
|
||||
The format of generated data by telegraf is as follows:
|
||||
|
||||
```json
|
||||
{
|
||||
"fields": {
|
||||
"usage_guest": 0,
|
||||
"usage_guest_nice": 0,
|
||||
"usage_idle": 89.7897897897898,
|
||||
"usage_iowait": 0,
|
||||
"usage_irq": 0,
|
||||
"usage_nice": 0,
|
||||
"usage_softirq": 0,
|
||||
"usage_steal": 0,
|
||||
"usage_system": 5.405405405405405,
|
||||
"usage_user": 4.804804804804805
|
||||
},
|
||||
|
||||
"name": "cpu",
|
||||
"tags": {
|
||||
"cpu": "cpu2",
|
||||
"host": "bogon"
|
||||
},
|
||||
"timestamp": 1576464360
|
||||
}
|
||||
```
|
||||
|
||||
Where the name field is the name of the time-series data collected by telegraf, and the tag field is the tag of the time-series data. blm_telegraf automatically creates a STable in TDengine with the name of the time series data, and converts the tag field into the tag value of TDengine, with Timestamp as the timestamp and fields values as the value of the time-series data. Therefore, in the client of TDEngine, you can check whether this data was successfully written through the following instruction.
|
||||
|
||||
```mysql
|
||||
use telegraf;
|
||||
|
||||
select * from cpu;
|
||||
```
|
||||
|
||||
MQTT is a popular data transmission protocol in the IoT. TDengine can easily access the data received by MQTT Broker and write it to TDengine.
|
||||
|
||||
## <a class="anchor" id="emq"></a> Direct Writing of EMQ Broker
|
||||
|
||||
[EMQ](https://github.com/emqx/emqx) is an open source MQTT Broker software, with no need of coding, only to use "rules" in EMQ Dashboard for simple configuration, and MQTT data can be directly written into TDengine. EMQ X supports storing data to the TDengine by sending it to a Web service, and also provides a native TDengine driver on Enterprise Edition for direct data store. Please refer to [EMQ official documents](https://docs.emqx.io/broker/latest/cn/rule/rule-example.html#%E4%BF%9D%E5%AD%98%E6%95%B0%E6%8D%AE%E5%88%B0-tdengine) for more details.
|
||||
|
||||
|
||||
|
||||
## <a class="anchor" id="hivemq"></a> Direct Writing of HiveMQ Broker
|
||||
|
||||
[HiveMQ](https://www.hivemq.com/) is an MQTT agent that provides Free Personal and Enterprise Edition versions. It is mainly used for enterprises, emerging machine-to-machine(M2M) communication and internal transmission to meet scalability, easy management and security features. HiveMQ provides an open source plug-in development kit. You can store data to TDengine via HiveMQ extension-TDengine. Refer to the [HiveMQ extension-TDengine documentation](https://github.com/huskar-t/hivemq-tdengine-extension/blob/b62a26ecc164a310104df57691691b237e091c89/README.md) for more details.
|
|
@ -0,0 +1,99 @@
|
|||
# Efficient Data Querying
|
||||
|
||||
## <a class="anchor" id="queries"></a> Main Query Features
|
||||
|
||||
TDengine uses SQL as the query language. Applications can send SQL statements through C/C + +, Java, Go, Python connectors, and users can manually execute SQL Ad-Hoc Query through the Command Line Interface (CLI) tool TAOS Shell provided by TDengine. TDengine supports the following query functions:
|
||||
|
||||
- Single-column and multi-column data query
|
||||
- Multiple filters for tags and numeric values: >, <, =, < >, like, etc
|
||||
- Group by, Order by, Limit/Offset of aggregation results
|
||||
- Four operations for numeric columns and aggregation results
|
||||
- Time stamp aligned join query (implicit join) operations
|
||||
- Multiple aggregation/calculation functions: count, max, min, avg, sum, twa, stddev, leastsquares, top, bottom, first, last, percentile, apercentile, last_row, spread, diff, etc
|
||||
|
||||
For example, in TAOS shell, the records with vlotage > 215 are queried from table d1001, sorted in descending order by timestamps, and only two records are outputted.
|
||||
|
||||
```mysql
|
||||
taos> select * from d1001 where voltage > 215 order by ts desc limit 2;
|
||||
ts | current | voltage | phase |
|
||||
======================================================================================
|
||||
2018-10-03 14:38:16.800 | 12.30000 | 221 | 0.31000 |
|
||||
2018-10-03 14:38:15.000 | 12.60000 | 218 | 0.33000 |
|
||||
Query OK, 2 row(s) in set (0.001100s)
|
||||
```
|
||||
|
||||
In order to meet the needs of an IoT scenario, TDengine supports several special functions, such as twa (time weighted average), spread (difference between maximum and minimum), last_row (last record), etc. More functions related to IoT scenarios will be added. TDengine also supports continuous queries.
|
||||
|
||||
For specific query syntax, please see the [Data Query section of TAOS SQL](https://www.taosdata.com/cn/documentation/taos-sql#select).
|
||||
|
||||
## <a class="anchor" id="aggregation"></a> Multi-table Aggregation Query
|
||||
|
||||
In an IoT scenario, there are often multiple data collection points in a same type. TDengine uses the concept of STable to describe a certain type of data collection point, and an ordinary table to describe a specific data collection point. At the same time, TDengine uses tags to describe the statical attributes of data collection points. A given data collection point has a specific tag value. By specifying the filters of tags, TDengine provides an efficient method to aggregate and query the sub-tables of STables (data collection points of a certain type). Aggregation functions and most operations on ordinary tables are applicable to STables, and the syntax is exactly the same.
|
||||
|
||||
**Example 1**: In TAOS Shell, look up the average voltages collected by all smart meters in Beijing and group them by location
|
||||
|
||||
```mysql
|
||||
taos> SELECT AVG(voltage) FROM meters GROUP BY location;
|
||||
avg(voltage) | location |
|
||||
=============================================================
|
||||
222.000000000 | Beijing.Haidian |
|
||||
219.200000000 | Beijing.Chaoyang |
|
||||
Query OK, 2 row(s) in set (0.002136s)
|
||||
```
|
||||
|
||||
**Example 2**: In TAOS Shell, look up the number of records with groupId 2 in the past 24 hours, check the maximum current of all smart meters
|
||||
|
||||
```mysql
|
||||
taos> SELECT count(*), max(current) FROM meters where groupId = 2 and ts > now - 24h;
|
||||
cunt(*) | max(current) |
|
||||
==================================
|
||||
5 | 13.4 |
|
||||
Query OK, 1 row(s) in set (0.002136s)
|
||||
```
|
||||
|
||||
TDengine only allows aggregation queries between tables belonging to a same STable, means aggregation queries between different STables are not supported. In the Data Query section of TAOS SQL, query class operations will all be indicated that whether STables are supported.
|
||||
|
||||
## <a class="anchor" id="sampling"></a> Down Sampling Query, Interpolation
|
||||
|
||||
In a scenario of IoT, it is often necessary to aggregate the collected data by intervals through down sampling. TDengine provides a simple keyword interval, which makes query operations according to time windows extremely simple. For example, the current values collected by smart meter d1001 are summed every 10 seconds.
|
||||
|
||||
```mysql
|
||||
taos> SELECT sum(current) FROM d1001 INTERVAL(10s);
|
||||
ts | sum(current) |
|
||||
======================================================
|
||||
2018-10-03 14:38:00.000 | 10.300000191 |
|
||||
2018-10-03 14:38:10.000 | 24.900000572 |
|
||||
Query OK, 2 row(s) in set (0.000883s)
|
||||
```
|
||||
|
||||
The down sampling operation is also applicable to STables, such as summing the current values collected by all smart meters in Beijing every second.
|
||||
|
||||
```mysql
|
||||
taos> SELECT SUM(current) FROM meters where location like "Beijing%" INTERVAL(1s);
|
||||
ts | sum(current) |
|
||||
======================================================
|
||||
2018-10-03 14:38:04.000 | 10.199999809 |
|
||||
2018-10-03 14:38:05.000 | 32.900000572 |
|
||||
2018-10-03 14:38:06.000 | 11.500000000 |
|
||||
2018-10-03 14:38:15.000 | 12.600000381 |
|
||||
2018-10-03 14:38:16.000 | 36.000000000 |
|
||||
Query OK, 5 row(s) in set (0.001538s)
|
||||
```
|
||||
|
||||
The down sampling operation also supports time offset, such as summing the current values collected by all smart meters every second, but requires each time window to start from 500 milliseconds.
|
||||
|
||||
```mysql
|
||||
taos> SELECT SUM(current) FROM meters INTERVAL(1s, 500a);
|
||||
ts | sum(current) |
|
||||
======================================================
|
||||
2018-10-03 14:38:04.500 | 11.189999809 |
|
||||
2018-10-03 14:38:05.500 | 31.900000572 |
|
||||
2018-10-03 14:38:06.500 | 11.600000000 |
|
||||
2018-10-03 14:38:15.500 | 12.300000381 |
|
||||
2018-10-03 14:38:16.500 | 35.000000000 |
|
||||
Query OK, 5 row(s) in set (0.001521s)
|
||||
```
|
||||
|
||||
In a scenario of IoT, it is difficult to synchronize the time stamp of collected data at each point, but many analysis algorithms (such as FFT) need to align the collected data strictly at equal intervals of time. In many systems, it’s required to write their own programs to process, but the down sampling operation of TDengine can be easily solved. If there is no collected data in an interval, TDengine also provides interpolation calculation function.
|
||||
|
||||
For details of syntax rules, please refer to the [Time-dimension Aggregation section of TAOS SQL](https://www.taosdata.com/en/documentation/taos-sql#aggregation).
|
|
@ -0,0 +1,360 @@
|
|||
# Advanced Features
|
||||
|
||||
## <a class="anchor" id="continuous-query"></a> Continuous Query
|
||||
|
||||
Continuous Query is a query executed by TDengine periodically with a sliding window, it is a simplified stream computing driven by timers. Continuous query can be applied to a table or a STable automatically and periodically, and the result set can be passed to the application directly via call back function, or written into a new table in TDengine. The query is always executed on a specified time window (window size is specified by parameter interval), and this window slides forward while time flows (the sliding period is specified by parameter sliding).
|
||||
|
||||
Continuous query of TDengine adopts time-driven mode, which can be defined directly by TAOS SQL without additional operation. Using continuous query, results can be generated conveniently and quickly according to the time window, thus down sampling the original collected data. After the user defines a continuous query through TAOS SQL, TDengine automatically pulls up the query at the end of the last complete time period and pushes the calculated results to the user or writes them back to TDengine.
|
||||
|
||||
The continuous query provided by TDengine differs from the time window calculation in ordinary stream computing in the following ways:
|
||||
|
||||
- Unlike the real-time feedback calculated results of stream computing, continuous query only starts calculation after the time window is closed. For example, if the time period is 1 day, the results of that day will only be generated after 23:59:59.
|
||||
- If a history record is written to the time interval that has been calculated, the continuous query will not recalculate and will not push the results to the user again. For the mode of writing back to TDengine, the existing calculated results will not be updated.
|
||||
- Using the mode of continuous query pushing results, the server does not cache the client's calculation status, nor does it provide Exactly-Once semantic guarantee. If the user's application side crashed, the continuous query pulled up again would only recalculate the latest complete time window from the time pulled up again. If writeback mode is used, TDengine can ensure the validity and continuity of data writeback.
|
||||
|
||||
### How to use continuous query
|
||||
|
||||
The following is an example of the smart meter scenario to introduce the specific use of continuous query. Suppose we create a STables and sub-tables through the following SQL statement:
|
||||
|
||||
```sql
|
||||
create table meters (ts timestamp, current float, voltage int, phase float) tags (location binary(64), groupdId int);
|
||||
create table D1001 using meters tags ("Beijing.Chaoyang", 2);
|
||||
create table D1002 using meters tags ("Beijing.Haidian", 2);
|
||||
...
|
||||
```
|
||||
|
||||
We already know that the average voltage of these meters can be counted with one minute as the time window and 30 seconds as the forward increment through the following SQL statement.
|
||||
|
||||
```sql
|
||||
select avg(voltage) from meters interval(1m) sliding(30s);
|
||||
```
|
||||
|
||||
Every time this statement is executed, all data will be recalculated. If you need to execute every 30 seconds to incrementally calculate the data of the latest minute, you can improve the above statement as following, using a different `startTime` each time and executing it regularly:
|
||||
|
||||
```sql
|
||||
select avg(voltage) from meters where ts > {startTime} interval(1m) sliding(30s);
|
||||
```
|
||||
|
||||
There is no problem with this, but TDengine provides a simpler method, just add `create table {tableName} as` before the initial query statement, for example:
|
||||
|
||||
```sql
|
||||
create table avg_vol as select avg(voltage) from meters interval(1m) sliding(30s);
|
||||
```
|
||||
|
||||
A new table named `avg_vol` will be automatically created, and then every 30 seconds, TDengine will incrementally execute the SQL statement after `as` and write the query result into this table. The user program only needs to query the data from `avg_vol`. For example:
|
||||
|
||||
```mysql
|
||||
taos> select * from avg_vol;
|
||||
ts | avg_voltage_ |
|
||||
===================================================
|
||||
2020-07-29 13:37:30.000 | 222.0000000 |
|
||||
2020-07-29 13:38:00.000 | 221.3500000 |
|
||||
2020-07-29 13:38:30.000 | 220.1700000 |
|
||||
2020-07-29 13:39:00.000 | 223.0800000 |
|
||||
```
|
||||
|
||||
It should be noted that the minimum value of the query time window is 10 milliseconds, and there is no upper limit of the time window range.
|
||||
|
||||
In addition, TDengine also supports users to specify the starting and ending times of a continuous query. If the start time is not entered, the continuous query will start from the time window where the first original data is located; If no end time is entered, the continuous query will run permanently; If the user specifies an end time, the continuous query stops running after the system time reaches the specified time. For example, a continuous query created with the following SQL will run for one hour and then automatically stop.
|
||||
|
||||
```mysql
|
||||
create table avg_vol as select avg(voltage) from meters where ts > now and ts <= now + 1h interval(1m) sliding(30s);
|
||||
```
|
||||
|
||||
It should be noted that now in the above example refers to the time when continuous queries are created, not the time when queries are executed, otherwise, queries cannot be stopped automatically. In addition, in order to avoid the problems caused by delayed writing of original data as much as possible, there is a certain delay in the calculation of continuous queries in TDengine. In other words, after a time window has passed, TDengine will not immediately calculate the data of this window, so it will take a while (usually not more than 1 minute) to find the calculation result.
|
||||
|
||||
### Manage the Continuous Query
|
||||
|
||||
Users can view all continuous queries running in the system through the show streams command in the console, and can kill the corresponding continuous queries through the kill stream command. Subsequent versions will provide more finer-grained and convenient continuous query management commands.
|
||||
|
||||
## <a class="anchor" id="subscribe"></a> Publisher/Subscriber
|
||||
|
||||
Based on the natural time-series characteristics of data, the data insert of TDengine is logically consistent with the data publish (pub) of messaging system, which can be regarded as a new record inserted with timestamp in the system. At the same time, TDengine stores data in strict accordance with the monotonous increment of time-series. Essentially, every table in TDengine can be regarded as a standard messaging queue.
|
||||
|
||||
TDengine supports embedded lightweight message subscription and publishment services. Using the API provided by the system, users can subscribe to one or more tables in the database using common query statements. The maintenance of subscription logic and operation status is completed by the client. The client regularly polls the server for whether new records arrive, and the results will be fed back to the client when new records arrive.
|
||||
|
||||
The status of the subscription and publishment services of TDengine is maintained by the client, but not by the TDengine server. Therefore, if the application restarts, it is up to the application to decide from which point of time to obtain the latest data.
|
||||
|
||||
In TDengine, there are three main APIs relevant to subscription:
|
||||
|
||||
```c
|
||||
taos_subscribe
|
||||
taos_consume
|
||||
taos_unsubscribe
|
||||
```
|
||||
|
||||
Please refer to the [C/C++ Connector](https://www.taosdata.com/cn/documentation/connector/) for the documentation of these APIs. The following is still a smart meter scenario as an example to introduce their specific usage (please refer to the previous section "Continuous Query" for the structure of STables and sub-tables). The complete sample code can be found [here](https://github.com/taosdata/TDengine/blob/master/tests/examples/c/subscribe.c).
|
||||
|
||||
If we want to be notified and do some process when the current of a smart meter exceeds a certain limit (e.g. 10A), there are two methods: one is to query each sub-table separately, record the timestamp of the last piece of data after each query, and then only query all data after this timestamp:
|
||||
|
||||
```sql
|
||||
select * from D1001 where ts > {last_timestamp1} and current > 10;
|
||||
select * from D1002 where ts > {last_timestamp2} and current > 10;
|
||||
...
|
||||
```
|
||||
|
||||
This is indeed feasible, but as the number of meters increases, the number of queries will also increase, and the performance of both the client and the server will be affected, until the system cannot afford it.
|
||||
|
||||
Another method is to query the STable. In this way, no matter how many meters there are, only one query is required:
|
||||
|
||||
```sql
|
||||
select * from meters where ts > {last_timestamp} and current > 10;
|
||||
```
|
||||
|
||||
However, how to choose `last_timestamp` has become a new problem. Because, on the one hand, the time of data generation (the data timestamp) and the time of data storage are generally not the same, and sometimes the deviation is still very large; On the other hand, the time when the data of different meters arrive at TDengine will also vary. Therefore, if we use the timestamp of the data from the slowest meter as `last_timestamp` in the query, we may repeatedly read the data of other meters; If the timestamp of the fastest meter is used, the data of other meters may be missed.
|
||||
|
||||
The subscription function of TDengine provides a thorough solution to the above problem.
|
||||
|
||||
First, use `taos_subscribe` to create a subscription:
|
||||
|
||||
```c
|
||||
TAOS_SUB* tsub = NULL;
|
||||
if (async) {
|
||||
// create an asynchronized subscription, the callback function will be called every 1s
|
||||
tsub = taos_subscribe(taos, restart, topic, sql, subscribe_callback, &blockFetch, 1000);
|
||||
} else {
|
||||
// create an synchronized subscription, need to call 'taos_consume' manually
|
||||
tsub = taos_subscribe(taos, restart, topic, sql, NULL, NULL, 0);
|
||||
}
|
||||
```
|
||||
|
||||
Subscriptions in TDengine can be either synchronous or asynchronous, and the above code will decide which method to use based on the value of parameter `async` obtained from the command line. Here, synchronous means that the user program calls `taos_consume` directly to pull data, while asynchronous means that the API calls `taos_consume` in another internal thread, and then gives the pulled data to the callback function `subscribe_callback` for processing.
|
||||
|
||||
Parameter `taos` is an established database connection and has no special requirements in synchronous mode. However, in asynchronous mode, it should be noted that it will not be used by other threads, otherwise it may lead to unpredictable errors, because the callback function is called in the internal thread of the API, while some APIs of TDengine are not thread-safe.
|
||||
|
||||
Parameter `sql` is a query statement in which you can specify filters using where clause. In our example, if you only want to subscribe to data when the current exceeds 10A, you can write as follows:
|
||||
|
||||
```sql
|
||||
select * from meters where current > 10;
|
||||
```
|
||||
|
||||
Note that the starting time is not specified here, so the data of all timers will be read. If you only want to start subscribing from the data one day ago and do not need earlier historical data, you can add a time condition:
|
||||
|
||||
```sql
|
||||
select * from meters where ts > now - 1d and current > 10;
|
||||
```
|
||||
|
||||
The `topic` of the subscription is actually its name, because the subscription function is implemented in the client API, so it is not necessary to ensure that it is globally unique, but it needs to be unique on a client machine.
|
||||
|
||||
If the subscription of name `topic` does not exist, the parameter restart is meaningless; However, if the user program exits after creating this subscription, when it starts again and reuses this `topic`, `restart` will be used to decide whether to read data from scratch or from the previous location. In this example, if `restart` is **true** (non-zero value), the user program will definitely read all the data. However, if this subscription exists before, and some data has been read, and `restart` is **false** (zero), the user program will not read the previously read data.
|
||||
|
||||
The last parameter of `taos_subscribe` is the polling period in milliseconds. In synchronous mode, if the interval between the two calls to `taos_consume` is less than this time, `taos_consume` will block until the interval exceeds this time. In asynchronous mode, this time is the minimum time interval between two calls to the callback function.
|
||||
|
||||
The penultimate parameter of `taos_subscribe` is used by the user program to pass additional parameters to the callback function, which is passed to the callback function as it is without any processing by the subscription API. This parameter is meaningless in sync mode.
|
||||
|
||||
After created, the subscription can consume data. In synchronous mode, the sample code is the following as the `else` section:
|
||||
|
||||
```c
|
||||
if (async) {
|
||||
getchar();
|
||||
} else while(1) {
|
||||
TAOS_RES* res = taos_consume(tsub);
|
||||
if (res == NULL) {
|
||||
printf("failed to consume data.");
|
||||
break;
|
||||
} else {
|
||||
print_result(res, blockFetch);
|
||||
getchar();
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Here is a **while** loop. Every time the user presses the Enter key, `taos_consume` is called, and the return value of `taos_consume` is the query result set, which is exactly the same as `taos_use_result`. In the example, the code using this result set is the function `print_result`:
|
||||
|
||||
```c
|
||||
void print_result(TAOS_RES* res, int blockFetch) {
|
||||
TAOS_ROW row = NULL;
|
||||
int num_fields = taos_num_fields(res);
|
||||
TAOS_FIELD* fields = taos_fetch_fields(res);
|
||||
int nRows = 0;
|
||||
if (blockFetch) {
|
||||
nRows = taos_fetch_block(res, &row);
|
||||
for (int i = 0; i < nRows; i++) {
|
||||
char temp[256];
|
||||
taos_print_row(temp, row + i, fields, num_fields);
|
||||
puts(temp);
|
||||
}
|
||||
} else {
|
||||
while ((row = taos_fetch_row(res))) {
|
||||
char temp[256];
|
||||
taos_print_row(temp, row, fields, num_fields);puts(temp);
|
||||
nRows++;
|
||||
}
|
||||
}
|
||||
printf("%d rows consumed.\n", nRows);
|
||||
}
|
||||
```
|
||||
|
||||
Among them, `taos_print_row` is used to process subscription to data. In our example, it will print out all eligible records. In asynchronous mode, it is simpler to consume subscribed data:
|
||||
|
||||
```c
|
||||
void subscribe_callback(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code) {
|
||||
print_result(res, *(int*)param);
|
||||
}
|
||||
```
|
||||
|
||||
To end a data subscription, you need to call `taos_unsubscribe`:
|
||||
|
||||
```c
|
||||
taos_unsubscribe(tsub, keep);
|
||||
```
|
||||
|
||||
Its second parameter is used to decide whether to keep the progress information of subscription on the client. If this parameter is **false** (zero), the subscription can only be restarted no matter what the `restart` parameter is when `taos_subscribe` is called next time. In addition, progress information is saved in the directory {DataDir}/subscribe/. Each subscription has a file with the same name as its `topic`. Deleting a file will also lead to a new start when the corresponding subscription is created next time.
|
||||
|
||||
After introducing the code, let's take a look at the actual running effect. For exmaple:
|
||||
|
||||
- Sample code has been downloaded locally
|
||||
- TDengine has been installed on the same machine
|
||||
- All the databases, STables and sub-tables required by the example have been created
|
||||
|
||||
You can compile and start the sample program by executing the following command in the directory where the sample code is located:
|
||||
|
||||
```shell
|
||||
$ make
|
||||
$ ./subscribe -sql='select * from meters where current > 10;'
|
||||
```
|
||||
|
||||
After the sample program starts, open another terminal window, and the shell that starts TDengine inserts a data with a current of 12A into **D1001**:
|
||||
|
||||
```shell
|
||||
$ taos
|
||||
> use test;
|
||||
> insert into D1001 values(now, 12, 220, 1);
|
||||
```
|
||||
|
||||
At this time, because the current exceeds 10A, you should see that the sample program outputs it to the screen. You can continue to insert some data to observe the output of the sample program.
|
||||
|
||||
### Use data subscription in Java
|
||||
|
||||
The subscription function also provides a Java development interface, as described in [Java Connector](https://www.taosdata.com/cn/documentation/connector/). It should be noted that the Java interface does not provide asynchronous subscription mode at present, but user programs can achieve the same feature by creating TimerTask.
|
||||
|
||||
The following is an example to introduce its specific use. The function it completes is basically the same as the C language example described earlier, and it is also to subscribe to all records with current exceeding 10A in the database.
|
||||
|
||||
#### Prepare data
|
||||
|
||||
```sql
|
||||
# Create power Database
|
||||
taos> create database power;
|
||||
# Switch to the database
|
||||
taos> use power;
|
||||
# Create a STable
|
||||
taos> create table meters(ts timestamp, current float, voltage int, phase int) tags(location binary(64), groupId int);
|
||||
# Create tables
|
||||
taos> create table d1001 using meters tags ("Beijing.Chaoyang", 2);
|
||||
taos> create table d1002 using meters tags ("Beijing.Haidian", 2);
|
||||
# Insert test data
|
||||
taos> insert into d1001 values("2020-08-15 12:00:00.000", 12, 220, 1),("2020-08-15 12:10:00.000", 12.3, 220, 2),("2020-08-15 12:20:00.000", 12.2, 220, 1);
|
||||
taos> insert into d1002 values("2020-08-15 12:00:00.000", 9.9, 220, 1),("2020-08-15 12:10:00.000", 10.3, 220, 1),("2020-08-15 12:20:00.000", 11.2, 220, 1);
|
||||
# Query all records with current over 10A from STable meters
|
||||
taos> select * from meters where current > 10;
|
||||
ts | current | voltage | phase | location | groupid |
|
||||
===========================================================================================================
|
||||
2020-08-15 12:10:00.000 | 10.30000 | 220 | 1 | Beijing.Haidian | 2 |
|
||||
2020-08-15 12:20:00.000 | 11.20000 | 220 | 1 | Beijing.Haidian | 2 |
|
||||
2020-08-15 12:00:00.000 | 12.00000 | 220 | 1 | Beijing.Chaoyang | 2 |
|
||||
2020-08-15 12:10:00.000 | 12.30000 | 220 | 2 | Beijing.Chaoyang | 2 |
|
||||
2020-08-15 12:20:00.000 | 12.20000 | 220 | 1 | Beijing.Chaoyang | 2 |
|
||||
Query OK, 5 row(s) in set (0.004896s)
|
||||
```
|
||||
|
||||
#### Example
|
||||
|
||||
```java
|
||||
public class SubscribeDemo {
|
||||
private static final String topic = "topic-meter-current-bg-10";
|
||||
private static final String sql = "select * from meters where current > 10";
|
||||
|
||||
public static void main(String[] args) {
|
||||
Connection connection = null;
|
||||
TSDBSubscribe subscribe = null;
|
||||
|
||||
try {
|
||||
Class.forName("com.taosdata.jdbc.TSDBDriver");
|
||||
Properties properties = new Properties();
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
|
||||
String jdbcUrl = "jdbc:TAOS://127.0.0.1:6030/power?user=root&password=taosdata";
|
||||
connection = DriverManager.getConnection(jdbcUrl, properties);
|
||||
subscribe = ((TSDBConnection) connection).subscribe(topic, sql, true); // Create a subscription
|
||||
int count = 0;
|
||||
while (count < 10) {
|
||||
TimeUnit.SECONDS.sleep(1); / Wait 1 second to avoid calling consume too frequently and causing pressure on server
|
||||
TSDBResultSet resultSet = subscribe.consume(); // 消费数据
|
||||
if (resultSet == null) {
|
||||
continue;
|
||||
}
|
||||
ResultSetMetaData metaData = resultSet.getMetaData();
|
||||
while (resultSet.next()) {
|
||||
int columnCount = metaData.getColumnCount();
|
||||
for (int i = 1; i <= columnCount; i++) {
|
||||
System.out.print(metaData.getColumnLabel(i) + ": " + resultSet.getString(i) + "\t");
|
||||
}
|
||||
System.out.println();
|
||||
count++;
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
} finally {
|
||||
try {
|
||||
if (null != subscribe)
|
||||
subscribe.close(true); // Close the subscription
|
||||
if (connection != null)
|
||||
connection.close();
|
||||
} catch (SQLException throwables) {
|
||||
throwables.printStackTrace();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Run the sample program. First, it consumes all the historical data that meets the query conditions:
|
||||
|
||||
```shell
|
||||
# java -jar subscribe.jar
|
||||
|
||||
ts: 1597464000000 current: 12.0 voltage: 220 phase: 1 location: Beijing.Chaoyang groupid : 2
|
||||
ts: 1597464600000 current: 12.3 voltage: 220 phase: 2 location: Beijing.Chaoyang groupid : 2
|
||||
ts: 1597465200000 current: 12.2 voltage: 220 phase: 1 location: Beijing.Chaoyang groupid : 2
|
||||
ts: 1597464600000 current: 10.3 voltage: 220 phase: 1 location: Beijing.Haidian groupid : 2
|
||||
ts: 1597465200000 current: 11.2 voltage: 220 phase: 1 location: Beijing.Haidian groupid : 2
|
||||
```
|
||||
|
||||
Then, add a piece of data to the table via taos client:
|
||||
|
||||
```sql
|
||||
# taos
|
||||
taos> use power;
|
||||
taos> insert into d1001 values("2020-08-15 12:40:00.000", 12.4, 220, 1);
|
||||
```
|
||||
|
||||
Because the current of this data is greater than 10A, the sample program will consume it:
|
||||
|
||||
```shell
|
||||
ts: 1597466400000 current: 12.4 voltage: 220 phase: 1 location: Beijing.Chaoyang groupid: 2
|
||||
```
|
||||
|
||||
## <a class="anchor" id="cache"></a> Cache
|
||||
|
||||
TDengine adopts a time-driven cache management strategy (First-In-First-Out, FIFO), also known as a write-driven cache management mechanism. This strategy is different from the read-driven data cache mode (Least-Recent-Use, LRU), which directly saves the most recently written data in the system buffer. When the buffer reaches a threshold, the oldest data is written to disk in batches. Generally speaking, for the use of IoT data, users are most concerned about the recently generated data, that is, the current status. TDengine takes full advantage of this feature by storing the most recently arrived (current status) data in the buffer.
|
||||
|
||||
TDengine provides data collection in milliseconds to users through query functions. Saving the recently arrived data directly in buffer can respond to the user's query analysis for the latest piece or batch of data more quickly, and provide faster database query response as a whole. In this way, TDengine can be used as a data buffer by setting appropriate configuration parameters without deploying additional caching systems, which can effectively simplify the system architecture and reduce the operation costs. It should be noted that after the TDengine is restarted, the buffer of the system will be emptied, the previously cached data will be written to disk in batches, and the cached data will not reload the previously cached data into the buffer like some proprietary Key-value cache system.
|
||||
|
||||
TDengine allocates a fixed size of memory space as a buffer, which can be configured according to application requirements and hardware resources. By properly setting the buffer space, TDengine can provide extremely high-performance write and query support. Each virtual node in TDengine is allocated a separate cache pool when it is created. Each virtual node manages its own cache pool, and different virtual nodes do not share the pool. All tables belonging to each virtual node share the cache pool owned by itself.
|
||||
|
||||
TDengine manages the memory pool by blocks, and the data is stored in the form of rows within. The memory pool of a vnode is allocated by blocks when the vnode is created, and each memory block is managed according to the First-In-First-Out strategy. When creating a memory pool, the size of the blocks is determined by the system configuration parameter cache; The number of memory blocks in each vnode is determined by the configuration parameter blocks. So for a vnode, the total memory size is: cache * blocks. A cache block needs to ensure that each table can store at least dozens of records in order to be efficient.
|
||||
|
||||
You can quickly obtain the last record of a table or a STable through the function last_row, which is very convenient to show the real-time status or collected values of each device on a large screen. For example:
|
||||
|
||||
```mysql
|
||||
select last_row(voltage) from meters where location='Beijing.Chaoyang';
|
||||
```
|
||||
|
||||
This SQL statement will obtain the last recorded voltage value of all smart meters located in Chaoyang District, Beijing.
|
||||
|
||||
## <a class="anchor" id="alert"></a> Alert
|
||||
|
||||
In scenarios of TDengine, alarm monitoring is a common requirement. Conceptually, it requires the program to filter out data that meet certain conditions from the data of the latest period of time, and calculate a result according to a defined formula based on these data. When the result meets certain conditions and lasts for a certain period of time, it will notify the user in some form.
|
||||
|
||||
In order to meet the needs of users for alarm monitoring, TDengine provides this function in the form of an independent module. For its installation and use, please refer to the blog [How to Use TDengine for Alarm Monitoring](https://www.taosdata.com/blog/2020/04/14/1438.html).
|
|
@ -0,0 +1,157 @@
|
|||
# Connections with Other Tools
|
||||
|
||||
## <a class="anchor" id="grafana"></a> Grafana
|
||||
|
||||
TDengine can quickly integrate with [Grafana](https://www.grafana.com/), an open source data visualization system, to build a data monitoring and alarming system. The whole process does not require any code to write. The contents of the data table in TDengine can be visually showed on DashBoard.
|
||||
|
||||
### Install Grafana
|
||||
|
||||
TDengine currently supports Grafana 5.2.4 and above. You can download and install the package from Grafana website according to the current operating system. The download address is as follows:
|
||||
|
||||
https://grafana.com/grafana/download.
|
||||
|
||||
### Configure Grafana
|
||||
|
||||
TDengine Grafana plugin is in the /usr/local/taos/connector/grafanaplugin directory.
|
||||
|
||||
Taking Centos 7.2 as an example, just copy grafanaplugin directory to /var/lib/grafana/plugins directory and restart Grafana.
|
||||
|
||||
```bash
|
||||
sudo cp -rf /usr/local/taos/connector/grafanaplugin /var/lib/grafana/plugins/tdengine
|
||||
```
|
||||
|
||||
### Use Grafana
|
||||
|
||||
#### Configure data source
|
||||
|
||||
You can log in the Grafana server (username/password:admin/admin) through localhost:3000, and add data sources through `Configuration -> Data Sources` on the left panel, as shown in the following figure:
|
||||
|
||||

|
||||
|
||||
Click `Add data source` to enter the Add Data Source page, and enter TDengine in the query box to select Add, as shown in the following figure:
|
||||
|
||||

|
||||
|
||||
Enter the data source configuration page and modify the corresponding configuration according to the default prompt:
|
||||
|
||||

|
||||
|
||||
- Host: IP address of any server in TDengine cluster and port number of TDengine RESTful interface (6041), default [http://localhost:6041](http://localhost:6041/)
|
||||
- User: TDengine username.
|
||||
- Password: TDengine user password.
|
||||
|
||||
Click `Save & Test` to test. Success will be prompted as follows:
|
||||
|
||||

|
||||
|
||||
#### Create Dashboard
|
||||
|
||||
Go back to the home to create Dashboard, and click `Add Query` to enter the panel query page:
|
||||
|
||||

|
||||
|
||||
As shown in the figure above, select the TDengine data source in Query, and enter the corresponding sql in the query box below to query. Details are as follows:
|
||||
|
||||
- INPUT SQL: Enter the statement to query (the result set of the SQL statement should be two columns and multiple rows), for example: `select avg(mem_system) from log.dn where ts >= $from and ts < $to interval($interval)` , where `from`, `to` and `interval` are built-in variables of the TDengine plug-in, representing the query range and time interval obtained from the Grafana plug-in panel. In addition to built-in variables, it is also supported to use custom template variables.
|
||||
- ALIAS BY: You can set alias for the current queries.
|
||||
- GENERATE SQL: Clicking this button will automatically replace the corresponding variable and generate the final statement to execute.
|
||||
|
||||
According to the default prompt, query the average system memory usage at the specified interval of the server where the current TDengine deployed in as follows:
|
||||
|
||||

|
||||
|
||||
> Please refer to Grafana [documents](https://grafana.com/docs/) for how to use Grafana to create the corresponding monitoring interface and for more about Grafana usage.
|
||||
|
||||
#### Import Dashboard
|
||||
|
||||
A `tdengine-grafana.json` importable dashboard is provided under the Grafana plug-in directory/usr/local/taos/connector/grafana/tdengine/dashboard/.
|
||||
|
||||
Click the `Import` button on the left panel and upload the `tdengine-grafana.json` file:
|
||||
|
||||

|
||||
|
||||
You can see as follows after Dashboard imported.
|
||||
|
||||

|
||||
|
||||
## <a class="anchor" id="matlab"></a> Matlab
|
||||
|
||||
MatLab can access data to the local workspace by connecting directly to the TDengine via the JDBC Driver provided in the installation package.
|
||||
|
||||
### JDBC Interface Adaptation of MatLab
|
||||
|
||||
Several steps are required to adapt Matlab to TDengine. Taking adapting Matlab2017a on Windows10 as an example:
|
||||
|
||||
- Copy the file JDBCDriver-1.0.0-dist.ja*r* in TDengine package to the directory ${matlab_root}\MATLAB\R2017a\java\jar\toolbox
|
||||
- Copy the file taos.lib in TDengine package to ${matlab root dir}\MATLAB\R2017a\lib\win64
|
||||
- Add the .jar package just copied to the Matlab classpath. Append the line below as the end of the file of ${matlab root dir}\MATLAB\R2017a\toolbox\local\classpath.txt
|
||||
- ```
|
||||
$matlabroot/java/jar/toolbox/JDBCDriver-1.0.0-dist.jar
|
||||
```
|
||||
|
||||
- Create a file called javalibrarypath.txt in directory ${user_home}\AppData\Roaming\MathWorks\MATLAB\R2017a_, and add the _taos.dll path in the file. For example, if the file taos.dll is in the directory of C:\Windows\System32,then add the following line in file javalibrarypath.txt:
|
||||
- ```
|
||||
C:\Windows\System32
|
||||
```
|
||||
|
||||
- ### Connect to TDengine in MatLab to get data
|
||||
|
||||
After the above configured successfully, open MatLab.
|
||||
|
||||
- Create a connection:
|
||||
|
||||
```matlab
|
||||
conn = database(‘db’, ‘root’, ‘taosdata’, ‘com.taosdata.jdbc.TSDBDriver’, ‘jdbc:TSDB://127.0.0.1:0/’)
|
||||
```
|
||||
|
||||
* Make a query:
|
||||
|
||||
```matlab
|
||||
sql0 = [‘select * from tb’]
|
||||
data = select(conn, sql0);
|
||||
```
|
||||
|
||||
* Insert a record:
|
||||
|
||||
```matlab
|
||||
sql1 = [‘insert into tb values (now, 1)’]
|
||||
exec(conn, sql1)
|
||||
```
|
||||
|
||||
For more detailed examples, please refer to the examples\Matlab\TDEngineDemo.m file in the package.
|
||||
|
||||
## <a class="anchor" id="r"></a> R
|
||||
|
||||
R language supports connection to the TDengine database through the JDBC interface. First, you need to install the JDBC package of R language. Launch the R language environment, and then execute the following command to install the JDBC support library for R language:
|
||||
|
||||
```R
|
||||
install.packages('RJDBC', repos='http://cran.us.r-project.org')
|
||||
```
|
||||
|
||||
After installed, load the RJDBC package by executing `library('RJDBC')` command.
|
||||
|
||||
Then load the TDengine JDBC driver:
|
||||
|
||||
```R
|
||||
drv<-JDBC("com.taosdata.jdbc.TSDBDriver","JDBCDriver-2.0.0-dist.jar", identifier.quote="\"")
|
||||
```
|
||||
|
||||
If succeed, no error message will display. Then use the following command to try a database connection:
|
||||
|
||||
```R
|
||||
conn<-dbConnect(drv,"jdbc:TSDB://192.168.0.1:0/?user=root&password=taosdata","root","taosdata")
|
||||
```
|
||||
|
||||
Please replace the IP address in the command above to the correct one. If no error message is shown, then the connection is established successfully, otherwise the connection command needs to be adjusted according to the error prompt. TDengine supports below functions in *RJDBC* package:
|
||||
|
||||
- `dbWriteTable(conn, "test", iris, overwrite=FALSE, append=TRUE)`: Write the data in a data frame iris to the table test in the TDengine server. Parameter overwrite must be false. append must be TRUE and the schema of the data frame iris should be the same as the table test.
|
||||
- `dbGetQuery(conn, "select count(*) from test")`: run a query command
|
||||
- `dbSendUpdate (conn, "use db")`: Execute any non-query sql statement. For example, `dbSendUpdate (conn, "use db")`, write data `dbSendUpdate (conn, "insert into t1 values (now, 99)")`, and the like.
|
||||
- `dbReadTable(conn, "test")`: read all the data in table test
|
||||
- `dbDisconnect(conn)`: close a connection
|
||||
- `dbRemoveTable(conn, "test")`: remove table test
|
||||
|
||||
The functions below are not supported currently:
|
||||
|
||||
- `dbExistsTable(conn, "test")`: if table test exists
|
||||
- `dbListTables(conn)`: list all tables in the connection
|
|
@ -0,0 +1,235 @@
|
|||
# Installation and Management of TDengine Cluster
|
||||
|
||||
Multiple TDengine servers, that is, multiple running instances of taosd, can form a cluster to ensure the highly reliable operation of TDengine and provide scale-out features. To understand cluster management in TDengine 2.0, it is necessary to understand the basic concepts of clustering. Please refer to the chapter "Overall Architecture of TDengine 2.0". And before installing the cluster, please follow the chapter ["Getting started"](https://www.taosdata.com/en/documentation/getting-started/) to install and experience the single node function.
|
||||
|
||||
Each data node of the cluster is uniquely identified by End Point, which is composed of FQDN (Fully Qualified Domain Name) plus Port, such as [h1.taosdata.com](http://h1.taosdata.com/):6030. The general FQDN is the hostname of the server, which can be obtained through the Linux command `hostname -f` (how to configure FQDN, please refer to: [All about FQDN of TDengine](https://www.taosdata.com/blog/2020/09/11/1824.html)). Port is the external service port number of this data node. The default is 6030, but it can be modified by configuring the parameter serverPort in taos.cfg. A physical node may be configured with multiple hostnames, and TDengine will automatically get the first one, but it can also be specified through the configuration parameter fqdn in taos.cfg. If you are accustomed to direct IP address access, you can set the parameter fqdn to the IP address of this node.
|
||||
|
||||
The cluster management of TDengine is extremely simple. Except for manual intervention in adding and deleting nodes, all other tasks are completed automatically, thus minimizing the workload of operation. This chapter describes the operations of cluster management in detail.
|
||||
|
||||
Please refer to the [video tutorial](https://www.taosdata.com/blog/2020/11/11/1961.html) for cluster building.
|
||||
|
||||
## <a class="anchor" id="prepare"></a> Preparation
|
||||
|
||||
**Step 0:** Plan FQDN of all physical nodes in the cluster, and add the planned FQDN to /etc/hostname of each physical node respectively; modify the /etc/hosts of each physical node, and add the corresponding IP and FQDN of all cluster physical nodes. [If DNS is deployed, contact your network administrator to configure it on DNS]
|
||||
|
||||
**Step 1:** If the physical nodes have previous test data, installed with version 1. x, or installed with other versions of TDengine, please delete it first and drop all data. For specific steps, please refer to the blog "[Installation and Uninstallation of Various Packages of TDengine](https://www.taosdata.com/blog/2019/08/09/566.html)"
|
||||
|
||||
**Note 1:** Because the information of FQDN will be written into a file, if FQDN has not been configured or changed before, and TDengine has been started, be sure to clean up the previous data (`rm -rf /var/lib/taos/*`)on the premise of ensuring that the data is useless or backed up;
|
||||
|
||||
**Note 2:** The client also needs to be configured to ensure that it can correctly parse the FQDN configuration of each node, whether through DNS service or Host file.
|
||||
|
||||
**Step 2:** It is recommended to close the firewall of all physical nodes, and at least ensure that the TCP and UDP ports of ports 6030-6042 are open. It is **strongly recommended** to close the firewall first and configure the ports after the cluster is built;
|
||||
|
||||
**Step 3:** Install TDengine on all physical nodes, and the version must be consistent, **but do not start taosd**. During installation, when prompted to enter whether to join an existing TDengine cluster, press enter for the first physical node directly to create a new cluster, and enter the FQDN: port number (default 6030) of any online physical node in the cluster for the subsequent physical nodes;
|
||||
|
||||
**Step 4:** Check the network settings of all data nodes and the physical nodes where the application is located:
|
||||
|
||||
1. Execute command `hostname -f` on each physical node, and check and confirm that the hostnames of all nodes are different (the node where the application driver is located does not need to do this check).
|
||||
2. Execute `ping host` on each physical node, wherein host is that hostname of other physical node, and see if other physical nodes can be communicated to; if not, you need to check the network settings, or the /etc/hosts file (the default path for Windows systems is C:\ Windows\ system32\ drivers\ etc\ hosts), or the configuration of DNS. If it fails to ping, then we cann't build the cluster.
|
||||
3. From the physical node where the application runs, ping the data node where taosd runs. If the ping fails, the application cannot connect to taosd. Please check the DNS settings or hosts file of the physical node where the application is located;
|
||||
4. The End Point of each data node is the output hostname plus the port number, for example, [h1.taosdata.com](http://h1.taosdata.com/): 6030
|
||||
|
||||
**Step 5:** Modify the TDengine configuration file (the file/etc/taos/taos.cfg for all nodes needs to be modified). Assume that the first data node End Point to be started is [h1.taosdata.com](http://h1.taosdata.com/): 6030, and its parameters related to cluster configuration are as follows:
|
||||
|
||||
```
|
||||
// firstEp is the first data node connected after each data node’s first launch
|
||||
firstEp h1.taosdata.com:6030
|
||||
// Must configure it as the FQDN of this data node. If this machine has only one hostname, you can comment out this configuration
|
||||
fqdn h1.taosdata.com
|
||||
// Configure the port number of this data node, the default is 6030
|
||||
serverPort 6030
|
||||
// For application scenarios, please refer to the section “Use of Arbitrator”
|
||||
arbitrator ha.taosdata.com:6042
|
||||
```
|
||||
|
||||
The parameters that must be modified are firstEp and fqdn. At each data node, every firstEp needs to be configured to be the same, **but fqdn must be configured to the value of the data node where it is located**. Other parameters may not be modified unless you have clear reasons.
|
||||
|
||||
**The data node dnode added to the cluster must be exactly the same as the 11 parameters in the following table related to the cluster, otherwise it cannot be successfully added to the cluster.**
|
||||
|
||||
|
||||
|
||||
| **#** | **Configuration Parameter Name** | **Description** |
|
||||
| ----- | -------------------------------- | ------------------------------------------------------------ |
|
||||
| 1 | numOfMnodes | Number of management nodes in system |
|
||||
| 2 | mnodeEqualVnodeNum | A mnode equals to the number of vnodes consumed |
|
||||
| 3 | offlineThreshold | Offline threshold of dnode to judge if the dnode is offline |
|
||||
| 4 | statusInterval | The interval for dnode to report its status to mnode |
|
||||
| 5 | arbitrator | The end point of the arbitrator in system |
|
||||
| 6 | timezone | Time zone |
|
||||
| 7 | locale | Location information and coding format of system |
|
||||
| 8 | charset | Character set encoding |
|
||||
| 9 | balance | Whether to start load balancing |
|
||||
| 10 | maxTablesPerVnode | The maximum number of tables that can be created in each vnode |
|
||||
| 11 | maxVgroupsPerDb | The maximum number of vgroups that can be used per DB |
|
||||
|
||||
## <a class="anchor" id="node-one"></a> Launch the First Data Node
|
||||
|
||||
Follow the instructions in "[Getting started](https://www.taosdata.com/en/documentation/getting-started/)", launch the first data node, such as [h1.taosdata.com](http://h1.taosdata.com/), then execute taos, start the taos shell, and execute command "show dnodes" from the shell; ", as follows:
|
||||
|
||||
```
|
||||
Welcome to the TDengine shell from Linux, Client Version:2.0.0.0
|
||||
Copyright (c) 2017 by TAOS Data, Inc. All rights reserved.
|
||||
|
||||
taos> show dnodes;
|
||||
id | end_point | vnodes | cores | status | role | create_time |
|
||||
=====================================================================================
|
||||
1 | h1.taos.com:6030 | 0 | 2 | ready | any | 2020-07-31 03:49:29.202 |
|
||||
Query OK, 1 row(s) in set (0.006385s)
|
||||
|
||||
taos>
|
||||
```
|
||||
|
||||
In the above command, you can see that the End Point of the newly launched data node is: [h1.taos.com](http://h1.taos.com/): 6030, which is the firstEP of the new cluster.
|
||||
|
||||
## <a class="anchor" id="node-other"></a>Launch Subsequent Data Nodes
|
||||
|
||||
To add subsequent data nodes to the existing cluster, there are the following steps:
|
||||
|
||||
1. Start taosd at each physical node according to the chapter "[Getting started](https://www.taosdata.com/en/documentation/getting-started/)";
|
||||
|
||||
2. On the first data node, use CLI program taos to log in to TDengine system and execute the command:
|
||||
|
||||
```
|
||||
CREATE DNODE "h2.taos.com:6030";
|
||||
```
|
||||
|
||||
Add the End Point of the new data node (learned in Step 4 of the preparation) to the cluster's EP list. **"fqdn: port" needs to be enclosed in double quotation marks**, otherwise an error will occur. Notice that the example "[h2.taos.com](http://h2.taos.com/): 6030" is replaced with the End Point for this new data node.
|
||||
|
||||
3. And then execute the command
|
||||
|
||||
1. ```
|
||||
SHOW DNODES;
|
||||
```
|
||||
|
||||
2. Check to see if the new node was successfully joined. If the added data node is offline, then check:
|
||||
|
||||
1. - Check whether the taosd of this data node is working properly. If it is not working properly, you need to check the reason first
|
||||
- Check the first few lines of the data node taosd log file taosdlog.0 (usually in the /var/log/taos directory) to see if the data node fqdn and port number output in the log are the just added End Point. If not, you need to add the correct End Point.
|
||||
|
||||
According to the above steps, new data nodes can be continuously added to the cluster.
|
||||
|
||||
**Tips**:
|
||||
|
||||
- Any data node that has joined the cluster online can be used as the firstEP of the subsequent node to be joined.
|
||||
- firstEp is only effective when the data node joins the cluster for the first time. After joining the cluster, the data node will save the latest End Point list of mnode and no longer rely on this parameter.
|
||||
- The two dnode data nodes dnode that are not configured with the firstEp parameter will run independently after startup. At this time, one data node cannot be added to another data node to form a cluster. **You cannot merge two independent clusters into a new cluster**.
|
||||
|
||||
## <a class="anchor" id="management"></a> Data Node Management
|
||||
|
||||
The above has already introduced how to build clusters from scratch. After the cluster is formed, new data nodes can be added at any time for expansion, or data nodes can be deleted, and the current status of the cluster can be checked.
|
||||
|
||||
### Add data nodes
|
||||
|
||||
Execute CLI program taos, log in to the system using root account, and execute:
|
||||
|
||||
```
|
||||
CREATE DNODE "fqdn:port";
|
||||
```
|
||||
|
||||
Add the End Point for the new data node to the cluster's EP list. **"fqdn: port" needs to be enclosed in double quotation marks**, otherwise an error will occur. The fqdn and port of a data node's external service can be configured through the configuration file taos.cfg, which is automatically obtained by default. [It is strongly not recommended to configure FQDN with automatic acquisition, which may cause the End Point of the generated data node to be not expected]
|
||||
|
||||
### Delete data nodes
|
||||
|
||||
Execute the CLI program taos, log in to the TDengine system using the root account, and execute:
|
||||
|
||||
```
|
||||
DROP DNODE "fqdn:port";
|
||||
```
|
||||
|
||||
Where fqdn is the FQDN of the deleted node, and port is the port number of its external server.
|
||||
|
||||
<font color=green>**【Note】**</font>
|
||||
|
||||
- Once a data node is dropped, it cannot rejoin the cluster. This node needs to be redeployed (emptying the data folder). The cluster migrates the data from the dnode before it completes the drop dnode operation.
|
||||
- Note that dropping a dnode and stopping the taosd process are two different concepts. Don't be confused: the data migration operation must be performed before deleting a dnode, thus the deleted dnode must remain online. The taosd process cannot be stopped until the delete operation is completed.
|
||||
- After a data node is dropped, other nodes will perceive the deletion of this dnodeID, and no node in any cluster will receive the request of the dnodeID.
|
||||
- dnodeID is automatically assigned by the cluster and cannot be specified manually. It is incremented at the time of generation and does not repeat.
|
||||
|
||||
### View data nodes
|
||||
|
||||
Execute the CLI program taos, log in to the TDengine system using the root account, and execute:
|
||||
|
||||
```
|
||||
SHOW DNODES;
|
||||
```
|
||||
|
||||
All dnodes, fqdn: port for each dnode, status (ready, offline, etc.), number of vnodes, number of unused vnodes in the cluster will be listed. You can use this command to view after adding or deleting a data node.
|
||||
|
||||
### View virtual node group
|
||||
|
||||
In order to make full use of multi-core technology and provide scalability, data needs to be processed in partitions. Therefore, TDengine will split the data of a DB into multiple parts and store them in multiple vnodes. These vnodes may be distributed in multiple data node dnodes, thus realizing scale-out. A vnode belongs to only one DB, but a DB can have multiple vnodes. vnode is allocated automatically by mnode according to the current system resources without any manual intervention.
|
||||
|
||||
Execute the CLI program taos, log in to the TDengine system using the root account, and execute:
|
||||
|
||||
```
|
||||
SHOW VGROUPS;
|
||||
```
|
||||
|
||||
## <a class="anchor" id="high-availability"></a> High-availability of vnode
|
||||
|
||||
TDengine provides high-availability of system through a multi-replica mechanism, including high-availability of vnode and mnode.
|
||||
|
||||
The number of replicas of vnode is associated with DB. There can be multiple DBs in a cluster. Each DB can be configured with different replicas according to operational requirements. When creating a database, specify the number of replicas with parameter replica (the default is 1). If the number of replicas is 1, the reliability of the system cannot be guaranteed. As long as the node where the data is located goes down, the service cannot be provided. The number of nodes in the cluster must be greater than or equal to the number of replicas, otherwise the error "more dnodes are needed" will be returned when creating a table. For example, the following command will create a database demo with 3 replicas:
|
||||
|
||||
```
|
||||
CREATE DATABASE demo replica 3;
|
||||
```
|
||||
|
||||
The data in a DB will be partitioned and splitted into multiple vnode groups. The number of vnodes in a vnode group is the number of replicas of the DB, and the data of each vnode in the same vnode group is completely consistent. In order to ensure high-availability, the vnodes in a vnode group must be distributed in different dnode data nodes (in actual deployment, they need to be on different physical machines). As long as more than half of the vnodes in a vgroup are working, the vgroup can be normally serving.
|
||||
|
||||
There may be data from multiple DBs of data in a data node dnode, so when a dnode is offline, it may affect multiple DBs. If half or more of the vnodes in a vnode group do not work, then the vnode group cannot serve externally and cannot insert or read data, which will affect the reading and writing operations of some tables in the DB to which it belongs.
|
||||
|
||||
Because of the introduction of vnode, it is impossible to simply draw a conclusion: "If more than half of the data nodes in the cluster work in dnode, the cluster should work." But for simple cases, it is easier to judge. For example, if the number of replicas is 3 and there are only 3 dnodes, the whole cluster can still work normally if only one node does not work, but if two data nodes do not work, the whole cluster cannot work normally.
|
||||
|
||||
## <a class="anchor" id="mnode"></a> High-availability of mnode
|
||||
|
||||
TDengine cluster is managed by mnode (a module of taosd, management node). In order to ensure the high-availability of mnode, multiple mnode replicas can be configured. The number of replicas is determined by system configuration parameter numOfMnodes, and the effective range is 1-3. In order to ensure the strong consistency of metadata, mnode replicas are duplicated synchronously.
|
||||
|
||||
A cluster has multiple data node dnodes, but a dnode runs at most one mnode instance. In the case of multiple dnodes, which dnode can be used as an mnode? This is automatically specified by the system according to the resource situation on the whole. User can execute the following command in the console of TDengine through the CLI program taos:
|
||||
|
||||
```
|
||||
SHOW MNODES;
|
||||
```
|
||||
|
||||
To view the mnode list, which lists the End Point and roles (master, slave, unsynced, or offline) of the dnode where the mnode is located. When the first data node in the cluster starts, the data node must run an mnode instance, otherwise the dnode of the data node cannot work properly because a system must have at least one mnode. If numOfMnodes is configured to 2, when the second dnode is started, the latter will also run an mnode instance.
|
||||
|
||||
To ensure the high-availability of mnode service, numOfMnodes must be set to 2 or greater. Because the metadata saved by mnode must be strongly consistent, if numOfMnodes is greater than 2, the duplication parameter quorum is automatically set to 2, that is to say, at least two replicas must be guaranteed to write the data successfully before notifying the client application of successful writing.
|
||||
|
||||
**Note:** A TDengine highly-available system, whether vnode or mnode, must be configured with multiple replicas.
|
||||
|
||||
## <a class="anchor" id="load-balancing"></a> Load Balancing
|
||||
|
||||
There are three situations in which load balancing will be triggered, and no manual intervention is required.
|
||||
|
||||
- When a new data node is added to the cluster, the system will automatically trigger load balancing, and the data on some nodes will be automatically migrated to the new data node without any manual intervention.
|
||||
- When a data node is removed from the cluster, the system will automatically migrate the data on the data node to other data nodes without any manual intervention.
|
||||
- If a data node is overheated (too large amount of data), the system will automatically load balance and migrate some vnodes of the data node to other nodes.
|
||||
|
||||
When the above three situations occur, the system will start a load computing of each data node to decide how to migrate.
|
||||
|
||||
**[Tip] Load balancing is controlled by parameter balance, which determines whether to start automatic load balancing.**
|
||||
|
||||
## <a class="anchor" id="offline"></a> Offline Processing of Data Nodes
|
||||
|
||||
If a data node is offline, the TDengine cluster will automatically detect it. There are two detailed situations:
|
||||
|
||||
- If the data node is offline for more than a certain period of time (configuration parameter offlineThreshold in taos.cfg controls the duration), the system will automatically delete the data node, generate system alarm information and trigger the load balancing process. If the deleted data node is online again, it will not be able to join the cluster, and the system administrator will need to add it to the cluster again.
|
||||
- After offline, the system will automatically start the data recovery process if it goes online again within the duration of offlineThreshold. After the data is fully recovered, the node will start to work normally.
|
||||
|
||||
**Note:** If each data node belonging to a virtual node group (including mnode group) is in offline or unsynced state, Master can only be elected after all data nodes in the virtual node group are online and can exchange status information, and the virtual node group can serve externally. For example, the whole cluster has 3 data nodes with 3 replicas. If all 3 data nodes go down and then 2 data nodes restart, it will not work. Only when all 3 data nodes restart successfully can serve externally again.
|
||||
|
||||
## <a class="anchor" id="arbitrator"></a> How to Use Arbitrator
|
||||
|
||||
If the number of replicas is even, it is impossible to elect a master from a vnode group when half of the vnodes are not working. Similarly, when half of the mnodes are not working, the master of the mnode cannot be elected because of the "split brain" problem. To solve this problem, TDengine introduced the concept of Arbitrator. Arbitrator simulates a vnode or mnode working, but is simply responsible for networking, and does not handle any data insertion or access. As long as more than half of the vnodes or mnodes, including the Arbitrator, work, the vnode group or mnode group can normally provide data insertion or query services. For example, in the case of 2 replicas, if one node A is offline, but the other node B is normal on and can connect to the Arbitrator, then node B can work normally.
|
||||
|
||||
In a word, under the current version, TDengine recommends configuring Arbitrator in double-replica environment to improve the availability.
|
||||
|
||||
The name of the executable for Arbitrator is tarbitrator. The executable has almost no requirements for system resources, just need to ensure a network connection, with any Linux server to run it. The following briefly describes the steps to install the configuration:
|
||||
|
||||
|
||||
|
||||
1. Click [Package Download](https://www.taosdata.com/cn/all-downloads/), and in the TDengine Arbitrator Linux section, select the appropriate version to download and install.
|
||||
2. The command line parameter -p of this application can specify the port number of its external service, and the default is 6042.
|
||||
3. Modify the configuration file of each taosd instance, and set parameter arbitrator to the End Point corresponding to the tarbitrator in taos.cfg. (If this parameter is configured, when the number of replicas is even, the system will automatically connect the configured Arbitrator. If the number of replicas is odd, even if the Arbitrator is configured, the system will not establish a connection.)
|
||||
4. The Arbitrator configured in the configuration file will appear in the return result of instruction `SHOW DNODES`; the value of the corresponding role column will be "arb".
|
||||
|
|
@ -0,0 +1,511 @@
|
|||
# TDengine Operation and Maintenance
|
||||
|
||||
## <a class="anchor" id="planning"></a> Capacity Planing
|
||||
|
||||
Using TDengine to build an IoT big data platform, computing resource and storage resource need to be planned according to business scenarios. The following is a discussion of the memory, CPU and hard disk space required for the system to run.
|
||||
|
||||
### Memory requirements
|
||||
|
||||
Each DB can create a fixed number of vgroups, which is the same as the CPU cores by default and can be configured by maxVgroupsPerDb; each replica in the vgroup would be a vnode; each vnode takes up a fixed amount of memory (the size is related to the database's configuration parameters blocks and cache); each table takes up memory related to the total length of the tag; in addition, the system will have some fixed memory overhead. Therefore, the system memory required for each DB can be calculated by the following formula:
|
||||
|
||||
```
|
||||
Database Memory Size = maxVgroupsPerDb * (blocks * cache + 10MB) + numOfTables * (tagSizePerTable + 0.5KB)
|
||||
```
|
||||
|
||||
Example: Assuming a 4-core machine, cache is the default size of 16M, blocks is the default value of 6, assuming there are 100,000 tables, and the total tag length is 256 bytes, the total memory requirement is: 4 * (16 * 6 + 10) + 100,000 * (0.25 + 0.5)/1000 = 499M.
|
||||
|
||||
The actual running system often stores the data in different DBs according to different characteristics of the data. All these shall be considered when planning.
|
||||
|
||||
If there is plenty of memory, the configuration of Blocks can be increased so that more data will be stored in memory and the query speed will be improved.
|
||||
|
||||
### CPU requirements
|
||||
|
||||
CPU requirements depend on the following two aspects:
|
||||
|
||||
- **Data insertion** TDengine single core can handle at least 10,000 insertion requests per second. Each insertion request can take multiple records, and inserting one record at a time is almost the same as inserting 10 records in computing resources consuming. Therefore, the larger the number of inserts, the higher the insertion efficiency. If an insert request has more than 200 records, a single core can insert 1 million records per second. However, the faster the insertion speed, the higher the requirement for front-end data collection, because records need to be cached and then inserted in batches.
|
||||
- **Query requirements** TDengine to provide efficient queries, but the queries in each scenario vary greatly and the query frequency too, making it difficult to give objective figures. Users need to write some query statements for their own scenes to determine.
|
||||
|
||||
Therefore, only for data insertion, CPU can be estimated, but the computing resources consumed by query cannot be that clear. In the actual operation, it is not recommended to make CPU utilization rate over 50%. After that, new nodes need to be added to bring more computing resources.
|
||||
|
||||
### Storage requirements
|
||||
|
||||
Compared with general databases, TDengine has an ultra-high compression ratio. In most scenarios, the compression ratio of TDengine will not be less than 5:1, and in some scenarios, maybe over 10:1, depending on the actual data characteristics. The raw data size before compressed can be calculated as follows:
|
||||
|
||||
```
|
||||
Raw DataSize = numOfTables * rowSizePerTable * rowsPerTable
|
||||
```
|
||||
|
||||
Example: 10 million smart meters, each meter collects data every 15 minutes, and the data collected each time is 128 bytes, so the original data amount in one year is: 10000000 * 128 * 24 * 60/15 * 365 = 44.8512 T. The TDengine consumes approximately 44.851/5 = 8.97024 T.
|
||||
|
||||
User can set the maximum retention time of data on disk through parameter `keep`. In order to further reduce the storage cost, TDengine also provides tiered storage. The coldest data can be stored on the cheapest storage media. Application access does not need to be adjusted, but lower reading speed.
|
||||
|
||||
To improve speed, multiple hard disks can be configured so that data can be written or read concurrently. It should be reminded that TDengine provides high reliability of data in the form of multiple replicas, so it is no longer necessary to use expensive disk arrays.
|
||||
|
||||
### Number of physical or virtual machines
|
||||
|
||||
According to the above estimation of memory, CPU and storage, we can know how many cores, how much memory and storage space the whole system needs. If the number of data replicas is not 1, the total demand needs to be multiplied by the number of replicas.
|
||||
|
||||
Because TDengine provides great scale-out feature, it is easy to decide how many physical or virtual machines need to be purchased according to the total amount and the resources of a single physical/ virtual machine.
|
||||
|
||||
**Calculate CPU, memory and storage immediately, see:** [**Resource Estimation**](https://www.taosdata.com/config/config.html)
|
||||
|
||||
### Fault Tolerance and Disaster Recovery
|
||||
|
||||
### Fault tolerance
|
||||
|
||||
TDengine supports WAL (Write Ahead Log) mechanism to realize fault tolerance of data and ensure high-availability of data.
|
||||
|
||||
When TDengine receives the application's request packet, it first writes the requested original packet into the database log file, and then deletes the corresponding WAL after the data is successfully written. This ensures that TDengine can recover data from the database log file when the service is restarted due to power failure or other reasons, thus avoiding data loss.
|
||||
|
||||
There are two system configuration parameters involved:
|
||||
|
||||
- walLevel: WAL level, 0: do not write wal; 1: write wal, but do not execute fsync; 2: write wal and execute fsync.
|
||||
- fsync: the cycle in which fsync is executed when walLevel is set to 2. Setting to 0 means that fsync is executed immediately whenever there is a write.
|
||||
|
||||
To guarantee 100% data safe, you need to set walLevel to 2 and fsync to 0. In that way, the write speed will decrease. However, if the number of threads starting to write data on the application side reaches a certain number (more than 50), the performance of writing data will also be good, only about 30% lower than that of fsync set to 3000 milliseconds.
|
||||
|
||||
### Disaster recovery
|
||||
|
||||
The cluster of TDengine provides high-availability of the system and implements disaster recovery through the multipl-replica mechanism.
|
||||
|
||||
TDengine cluster is managed by mnode. In order to ensure the high reliability of the mnode, multiple mnode replicas can be configured. The number of replicas is determined by system configuration parameter numOfMnodes. In order to support high reliability, it needs to be set to be greater than 1. In order to ensure the strong consistency of metadata, mnode replicas duplicate data synchronously to ensure the strong consistency of metadata.
|
||||
|
||||
The number of replicas of time-series data in TDengine cluster is associated with databases. There can be multiple databases in a cluster, and each database can be configured with different replicas. When creating a database, specify the number of replicas through parameter replica. In order to support high reliability, it is necessary to set the number of replicas greater than 1.
|
||||
|
||||
The number of nodes in TDengine cluster must be greater than or equal to the number of replicas, otherwise an error will be reported in table creation.
|
||||
|
||||
When the nodes in TDengine cluster are deployed on different physical machines and multiple replicas are set, the high reliability of the system is implemented without using other software or tools. TDengine Enterprise Edition can also deploy replicas in different server rooms, thus realizing remote disaster recovery.
|
||||
|
||||
## <a class="anchor" id="config"></a> Server-side Configuration
|
||||
|
||||
The background service of TDengine system is provided by taosd, and the configuration parameters can be modified in the configuration file taos.cfg to meet the requirements of different scenarios. The default location of the configuration file is the /etc/taos directory, which can be specified by executing the parameter -c from the taosd command line. Such as taosd-c/home/user, to specify that the configuration file is located in the /home/user directory.
|
||||
|
||||
You can also use “-C” to show the current server configuration parameters:
|
||||
|
||||
```
|
||||
taosd -C
|
||||
```
|
||||
|
||||
Only some important configuration parameters are listed below. For more parameters, please refer to the instructions in the configuration file. Please refer to the previous chapters for detailed introduction and function of each parameter, and the default of these parameters is working and generally does not need to be set. **Note: After the configuration is modified, \*taosd service\* needs to be restarted to take effect.**
|
||||
|
||||
- firstEp: end point of the first dnode in the actively connected cluster when taosd starts, the default value is localhost: 6030.
|
||||
- fqdn: FQDN of the data node, which defaults to the first hostname configured by the operating system. If you are accustomed to IP address access, you can set it to the IP address of the node.
|
||||
- serverPort: the port number of the external service after taosd started, the default value is 6030.
|
||||
- httpPort: the port number used by the RESTful service to which all HTTP requests (TCP) require a query/write request. The default value is 6041.
|
||||
- dataDir: the data file directory to which all data files will be written. [Default:/var/lib/taos](http://default/var/lib/taos).
|
||||
- logDir: the log file directory to which the running log files of the client and server will be written. [Default:/var/log/taos](http://default/var/log/taos).
|
||||
- arbitrator: the end point of the arbiter in the system; the default value is null.
|
||||
- role: optional role for dnode. 0-any; it can be used as an mnode and to allocate vnodes; 1-mgmt; It can only be an mnode, but not to allocate vnodes; 2-dnode; caannot be an mnode, only vnode can be allocated
|
||||
- debugFlage: run the log switch. 131 (output error and warning logs), 135 (output error, warning, and debug logs), 143 (output error, warning, debug, and trace logs). Default value: 131 or 135 (different modules have different default values).
|
||||
- numOfLogLines: the maximum number of lines allowed for a single log file. Default: 10,000,000 lines.
|
||||
- logKeepDays: the maximum retention time of the log file. When it is greater than 0, the log file will be renamed to taosdlog.xxx, where xxx is the timestamp of the last modification of the log file in seconds. Default: 0 days.
|
||||
- maxSQLLength: the maximum length allowed for a single SQL statement. Default: 65380 bytes.
|
||||
- telemetryReporting: whether TDengine is allowed to collect and report basic usage information. 0 means not allowed, and 1 means allowed. Default: 1.
|
||||
- stream: whether continuous query (a stream computing function) is enabled, 0 means not allowed, 1 means allowed. Default: 1.
|
||||
- queryBufferSize: the amount of memory reserved for all concurrent queries. The calculation rule can be multiplied by the number of the table according to the maximum possible concurrent number in practical application, and then multiplied by 170. The unit is MB (in versions before 2.0. 15, the unit of this parameter is byte).
|
||||
- ratioOfQueryCores: set the maximum number of query threads. The minimum value of 0 means that there is only one query thread; the maximum value of 2 indicates that the maximum number of query threads established is 2 times the number of CPU cores. The default is 1, which indicates the maximum number of query threads equals to the number of CPU cores. This value can be a decimal, that is, 0.5 indicates that the query thread with half of the maximum CPU cores is established.
|
||||
|
||||
**Note:** for ports, TDengine will use 13 continuous TCP and UDP port numbers from serverPort, so be sure to open them in the firewall. Therefore, if it is the default configuration, a total of 13 ports from 6030 to 6042 need to be opened, and the same for both TCP and UDP.
|
||||
|
||||
Data in different application scenarios often have different data characteristics, such as retention days, number of replicas, collection frequency, record size, number of collection points, compression, etc. In order to obtain the best efficiency in storage, TDengine provides the following storage-related system configuration parameters:
|
||||
|
||||
- days: the time span for a data file to store data, in days, the default value is 10.
|
||||
- keep: the number of days to keep data in the database, in days, default value: 3650.
|
||||
- minRows: the minimum number of records in a file block, in pieces, default: 100.
|
||||
- maxRows: the maximum number of records in a file block, in pieces, default: 4096.
|
||||
- comp: file compression flag bit, 0: off; 1: one-stage compression; 2: two-stage compression. Default: 2.
|
||||
- walLevel: WAL level. 1: write wal, but do not execute fsync; 2: write wal and execute fsync. Default: 1.
|
||||
- fsync: the period during which fsync is executed when wal is set to 2. Setting to 0 means that fsync is executed immediately whenever a write happens, in milliseconds, and the default value is 3000.
|
||||
- cache: the size of the memory block in megabytes (MB), default: 16.
|
||||
- blocks: how many cache-sized memory blocks are in each VNODE (TSDB). Therefore, the memory size used by a VNODE is roughly (cache * blocks), in blocks, and the default value is 4.
|
||||
- replica: number of replicas; value range: 1-3, in items, default value: 1
|
||||
- precision: timestamp precision identification, ms for milliseconds and us for microseconds. Default: ms
|
||||
- cacheLast: whether the sub-table last_row is cached in memory, 0: off; 1: on. Default: 0. (This parameter is supported as of version 2.0. 11)
|
||||
|
||||
For an application scenario, there may be data with multiple characteristics coexisting. The best design is to put tables with the same data characteristics in one database. Such an application has multiple databases, and each one can be configured with different storage parameters, thus ensuring the optimal performance of the system. TDengine allows the application to specify the above storage parameter in database creation. If specified, the parameters will override the corresponding system configuration parameters. For example, there is the following SQL:
|
||||
|
||||
```
|
||||
create database demo days 10 cache 32 blocks 8 replica 3 update 1;
|
||||
```
|
||||
|
||||
The SQL creates a database demo, each data file stores 10 days of data, the memory block is 32 megabytes, each VNODE occupies 8 memory blocks, the number of replicas is 3, updates are allowed, and other parameters are completely consistent with the system configuration.
|
||||
|
||||
When adding a new dnode to the TDengine cluster, some parameters related to the cluster must be the same as the configuration of the existing cluster, otherwise it cannot be successfully added to the cluster. The parameters that will be verified are as follows:
|
||||
|
||||
- numOfMnodes: the number of management nodes in the system. Default: 3.
|
||||
- balance: whether to enable load balancing. 0: No, 1: Yes. Default: 1.
|
||||
- mnodeEqualVnodeNum: an mnode is equal to the number of vnodes consumed. Default: 4.
|
||||
- offlineThreshold: the threshold for a dnode to be offline, exceed which the dnode will be removed from the cluster. The unit is seconds, and the default value is 86400*10 (that is, 10 days).
|
||||
- statusInterval: the length of time dnode reports status to mnode. The unit is seconds, and the default value is 1.
|
||||
- maxTablesPerVnode: the maximum number of tables that can be created in each vnode. Default: 1000000.
|
||||
- maxVgroupsPerDb: the maximum number of vgroups that can be used in each database.
|
||||
- arbitrator: the end point of the arbiter in system, which is empty by default.
|
||||
- See Client Configuration for the configuration of timezone, locale and charset.
|
||||
|
||||
For the convenience of debugging, the log configuration of each dnode can be temporarily adjusted through SQL statements, and all will be invalid after system restarting:
|
||||
|
||||
```mysql
|
||||
ALTER DNODE <dnode_id> <config>
|
||||
```
|
||||
|
||||
- dnode_id: available from the SQL statement "SHOW DNODES" command
|
||||
- config: the log parameter to be adjusted, and the value is taken in the following list
|
||||
|
||||
resetlog truncates the old log file and creates a new log file debugFlag < 131 135 143 > Set debugFlag to 131, 135 or 143.
|
||||
|
||||
For example:
|
||||
|
||||
```
|
||||
alter dnode 1 debugFlag 135;
|
||||
```
|
||||
|
||||
## <a class="anchor" id="client"></a> Client Configuration
|
||||
|
||||
The foreground interactive client application of TDengine system is taos and application driver, which shares the same configuration file taos.cfg with taosd. When running taos, use the parameter -c to specify the configuration file directory, such as taos-c/home/cfg, which means using the parameters in the taos.cfg configuration file under the /home/cfg/ directory. The default directory is /etc/taos. For more information on how to use taos, see the help information taos --help. This section mainly describes the parameters used by the taos client application in the configuration file taos.cfg.
|
||||
|
||||
**Versions after 2.0. 10.0 support the following parameters on command line to display the current client configuration parameters**
|
||||
|
||||
```bash
|
||||
taos -C 或 taos --dump-config
|
||||
```
|
||||
|
||||
Client configuration parameters:
|
||||
|
||||
- firstEp: end point of the first taosd instance in the actively connected cluster when taos is started, the default value is localhost: 6030.
|
||||
- secondEp: when taos starts, if not impossible to connect to firstEp, it will try to connect to secondEp.
|
||||
- locale
|
||||
|
||||
Default value: obtained dynamically from the system. If the automatic acquisition fails, user needs to set it in the configuration file or through API
|
||||
|
||||
TDengine provides a special field type nchar for storing non-ASCII encoded wide characters such as Chinese, Japanese and Korean. The data written to the nchar field will be uniformly encoded in UCS4-LE format and sent to the server. It should be noted that the correctness of coding is guaranteed by the client. Therefore, if users want to normally use nchar fields to store non-ASCII characters such as Chinese, Japanese, Korean, etc., it’s needed to set the encoding format of the client correctly.
|
||||
|
||||
The characters inputted by the client are all in the current default coding format of the operating system, mostly UTF-8 on Linux systems, and some Chinese system codes may be GB18030 or GBK, etc. The default encoding in the docker environment is POSIX. In the Chinese versions of Windows system, the code is CP936. The client needs to ensure that the character set it uses is correctly set, that is, the current encoded character set of the operating system running by the client, in order to ensure that the data in nchar is correctly converted into UCS4-LE encoding format.
|
||||
|
||||
The naming rules of locale in Linux are: < language > _ < region >. < character set coding >, such as: zh_CN.UTF-8, zh stands for Chinese, CN stands for mainland region, and UTF-8 stands for character set. Character set encoding provides a description of encoding transformations for clients to correctly parse local strings. Linux system and Mac OSX system can determine the character encoding of the system by setting locale. Because the locale used by Windows is not the POSIX standard locale format, another configuration parameter charset is needed to specify the character encoding under Windows. You can also use charset to specify character encoding in Linux systems.
|
||||
|
||||
- charset
|
||||
|
||||
Default value: obtained dynamically from the system. If the automatic acquisition fails, user needs to set it in the configuration file or through API
|
||||
|
||||
If charset is not set in the configuration file, in Linux system, when taos starts up, it automatically reads the current locale information of the system, and parses and extracts the charset encoding format from the locale information. If the automatic reading of locale information fails, an attempt is made to read the charset configuration, and if the reading of the charset configuration also fails, the startup process is interrupted.
|
||||
|
||||
In Linux system, locale information contains character encoding information, so it is unnecessary to set charset separately after setting locale of Linux system correctly. For example:
|
||||
|
||||
```
|
||||
locale zh_CN.UTF-8
|
||||
```
|
||||
|
||||
- On Windows systems, the current system encoding cannot be obtained from locale. If string encoding information cannot be read from the configuration file, taos defaults to CP936. It is equivalent to adding the following to the configuration file:
|
||||
|
||||
```
|
||||
charset CP936
|
||||
```
|
||||
|
||||
- If you need to adjust the character encoding, check the encoding used by the current operating system and set it correctly in the configuration file.
|
||||
|
||||
In Linux systems, if user sets both locale and charset encoding charset, and the locale and charset are inconsistent, the value set later will override the value set earlier.
|
||||
|
||||
```
|
||||
locale zh_CN.UTF-8
|
||||
charset GBK
|
||||
```
|
||||
|
||||
- The valid value for charset is GBK.
|
||||
|
||||
And the valid value for charset is UTF-8.
|
||||
|
||||
The configuration parameters of log are exactly the same as those of server.
|
||||
|
||||
- timezone
|
||||
|
||||
Default value: get the current time zone option dynamically from the system
|
||||
|
||||
The time zone in which the client runs the system. In order to deal with the problem of data writing and query in multiple time zones, TDengine uses Unix Timestamp to record and store timestamps. The characteristics of UNIX timestamps determine that the generated timestamps are consistent at any time regardless of any time zone. It should be noted that UNIX timestamps are converted and recorded on the client side. In order to ensure that other forms of time on the client are converted into the correct Unix timestamp, the correct time zone needs to be set.
|
||||
|
||||
In Linux system, the client will automatically read the time zone information set by the system. Users can also set time zones in profiles in a number of ways. For example:
|
||||
|
||||
```
|
||||
timezone UTC-8
|
||||
timezone GMT-8
|
||||
timezone Asia/Shanghai
|
||||
```
|
||||
|
||||
- All above are legal to set the format of the East Eight Zone.
|
||||
|
||||
The setting of time zone affects the content of non-Unix timestamp (timestamp string, parsing of keyword now) in query and writing SQL statements. For example:
|
||||
|
||||
```sql
|
||||
SELECT count(*) FROM table_name WHERE TS<'2019-04-11 12:01:08';
|
||||
```
|
||||
|
||||
- In East Eight Zone, the SQL statement is equivalent to
|
||||
|
||||
```sql
|
||||
SELECT count(*) FROM table_name WHERE TS<1554955268000;
|
||||
```
|
||||
|
||||
-
|
||||
|
||||
In the UTC time zone, the SQL statement is equivalent to
|
||||
|
||||
```sql
|
||||
SELECT count(*) FROM table_name WHERE TS<1554984068000;
|
||||
```
|
||||
|
||||
-
|
||||
|
||||
In order to avoid the uncertainty caused by using string time format, Unix timestamp can also be used directly. In addition, timestamp strings with time zones can also be used in SQL statements, such as: timestamp strings in RFC3339 format, 2013-04-12T15: 52: 01.123 +08:00, or ISO-8601 format timestamp strings 2013-04-12T15: 52: 01.123 +0800. The conversion of the above two strings into Unix timestamps is not affected by the time zone in which the system is located.
|
||||
|
||||
When starting taos, you can also specify an end point for an instance of taosd from the command line, otherwise read from taos.cfg.
|
||||
|
||||
- maxBinaryDisplayWidth
|
||||
|
||||
The upper limit of the display width of binary and nchar fields in a shell, beyond which parts will be hidden. Default: 30. You can modify this option dynamically in the shell with the command set max_binary_display_width nn.
|
||||
|
||||
## <a class="anchor" id="user"></a>User Management
|
||||
|
||||
System administrators can add and delete users in CLI, and also modify passwords. The SQL syntax in the CLI is as follows:
|
||||
|
||||
```sql
|
||||
CREATE USER <user_name> PASS <'password'>;
|
||||
```
|
||||
|
||||
Create a user, and specify the user name and password. The password needs to be enclosed in single quotation marks. The single quotation marks are in English half-width.
|
||||
|
||||
```sql
|
||||
DROP USER <user_name>;
|
||||
```
|
||||
|
||||
Delete a user, root only.
|
||||
|
||||
```sql
|
||||
ALTER USER <user_name> PASS <'password'>;
|
||||
```
|
||||
|
||||
Modify the user password. In order to avoid being converted to lowercase, the password needs to be quoted in single quotation marks. The single quotation marks are in English half-width
|
||||
|
||||
```sql
|
||||
ALTER USER <user_name> PRIVILEGE <write|read>;
|
||||
```
|
||||
|
||||
Modify the user privilege to: write or read, without adding single quotation marks.
|
||||
|
||||
Note: There are three privilege levels: super/write/read in the system, but it is not allowed to give super privilege to users through alter instruction at present.
|
||||
|
||||
```mysql
|
||||
SHOW USERS;
|
||||
```
|
||||
|
||||
Show all users
|
||||
|
||||
**Note:** In SQL syntax, < > indicates the part that requires user to input, but do not enter < > itself
|
||||
|
||||
## <a class="anchor" id="import"></a> Import Data
|
||||
|
||||
TDengine provides a variety of convenient data import functions, including imported by script file, by data file, and by taosdump tool.
|
||||
|
||||
**Import by script file**
|
||||
|
||||
TDengine shell supports source filename command, which is used to run SQL statements from a file in batch. Users can write SQL commands such as database building, table building and data writing in the same file. Each command has a separate line. By running source command in the shell, SQL statements in the file can be run in batches in sequence. SQL statements beginning with '#' are considered comments and are automatically ignored by the shell.
|
||||
|
||||
**Import by data file**
|
||||
|
||||
TDengine also supports data import from CSV files on existing tables in the shell. The CSV file belongs to only one table, and the data format in the CSV file should be the same as the structure of the table to be imported. When importing, its syntax is as follows:
|
||||
|
||||
```mysql
|
||||
insert into tb1 file 'path/data.csv';
|
||||
```
|
||||
|
||||
Note: if there is descriptive information in the first line of the CSV file, please delete it manually before importing
|
||||
|
||||
For example, there is now a sub-table d1001 whose table structure is as follows:
|
||||
|
||||
```mysql
|
||||
taos> DESCRIBE d1001
|
||||
Field | Type | Length | Note |
|
||||
=================================================================================
|
||||
ts | TIMESTAMP | 8 | |
|
||||
current | FLOAT | 4 | |
|
||||
voltage | INT | 4 | |
|
||||
phase | FLOAT | 4 | |
|
||||
location | BINARY | 64 | TAG |
|
||||
groupid | INT | 4 | TAG |
|
||||
```
|
||||
|
||||
And the format of the data.csv to import is as follows:
|
||||
|
||||
```csv
|
||||
'2018-10-04 06:38:05.000',10.30000,219,0.31000
|
||||
'2018-10-05 06:38:15.000',12.60000,218,0.33000
|
||||
'2018-10-06 06:38:16.800',13.30000,221,0.32000
|
||||
'2018-10-07 06:38:05.000',13.30000,219,0.33000
|
||||
'2018-10-08 06:38:05.000',14.30000,219,0.34000
|
||||
'2018-10-09 06:38:05.000',15.30000,219,0.35000
|
||||
'2018-10-10 06:38:05.000',16.30000,219,0.31000
|
||||
'2018-10-11 06:38:05.000',17.30000,219,0.32000
|
||||
'2018-10-12 06:38:05.000',18.30000,219,0.31000
|
||||
```
|
||||
|
||||
Then we can use the following command to import:
|
||||
|
||||
```mysql
|
||||
taos> insert into d1001 file '~/data.csv';
|
||||
Query OK, 9 row(s) affected (0.004763s)
|
||||
```
|
||||
|
||||
**Import via taosdump tool**
|
||||
|
||||
TDengine provides a convenient database import and export tool, taosdump. Users can import data exported by taosdump from one system into other systems. Please refer to the blog: [User Guide of TDengine DUMP Tool](https://www.taosdata.com/blog/2020/03/09/1334.html).
|
||||
|
||||
## <a class="anchor" id="export"></a> Export Data
|
||||
|
||||
To facilitate data export, TDengine provides two export methods, namely, export by table and export by taosdump.
|
||||
|
||||
**Export CSV file by table**
|
||||
|
||||
If user needs to export data from a table or a STable, it can run in a shell
|
||||
|
||||
```mysql
|
||||
select * from <tb_name> >> data.csv;
|
||||
```
|
||||
|
||||
In this way, the data in table tb_name will be exported to the file data.csv in CSV format.
|
||||
|
||||
**Export data by taosdump**
|
||||
|
||||
TDengine provides a convenient database export tool, taosdump. Users can choose to export all databases, a database or a table in a database, all data or data for a time period, or even just the definition of a table as needed. Please refer to the blog: [User Guide of TDengine DUMP Tool](https://www.taosdata.com/blog/2020/03/09/1334.html)
|
||||
|
||||
## <a class="anchor" id="status"></a> System Connection and Task Query Management
|
||||
|
||||
The system administrator can query the connection, ongoing query and stream computing of the system from CLI, and can close the connection and stop the ongoing query and stream computing. The SQL syntax in the CLI is as follows:
|
||||
|
||||
```mysql
|
||||
SHOW CONNECTIONS;
|
||||
```
|
||||
|
||||
Show the connection of the database, and one column shows ip: port, which is the IP address and port number of the connection.
|
||||
|
||||
```mysql
|
||||
KILL CONNECTION <connection-id>;
|
||||
```
|
||||
|
||||
Force the database connection to close, where connection-id is the number in the first column displayed in SHOW CONNECTIONS.
|
||||
|
||||
```mysql
|
||||
SHOW QUERIES;
|
||||
```
|
||||
|
||||
Show the data query, where the two numbers separated by colons displayed in the first column are query-id and the connection-id that initiated the query application connection and the number of queries.
|
||||
|
||||
```mysql
|
||||
KILL QUERY <query-id>;
|
||||
```
|
||||
|
||||
Force to close the data query, where query-id is the connection-id: query-no string displayed in SHOW QUERIES, such as "105: 2", copy and paste it.
|
||||
|
||||
```mysql
|
||||
SHOW STREAMS;
|
||||
```
|
||||
|
||||
Show the stream computing, where the first column shows the two numbers separated by colons as stream-id and the connection-id to start the stream application connection and the number of times the stream was initiated.
|
||||
|
||||
```mysql
|
||||
KILL STREAM <stream-id>;
|
||||
```
|
||||
|
||||
Force to turn off the stream computing, in which stream-id is the connection-id: stream-no string displayed in SHOW STREAMS, such as 103: 2, copy and paste it.
|
||||
|
||||
## System Monitoring
|
||||
|
||||
After TDengine is started, it will automatically create a monitoring database log and write the server's CPU, memory, hard disk space, bandwidth, number of requests, disk read-write speed, slow query and other information into the database regularly. TDengine also records important system operations (such as logging in, creating, deleting databases, etc.) logs and various error alarm information and stores them in the log database. The system administrator can view the database directly from CLI or view the monitoring information through GUI on WEB.
|
||||
|
||||
The collection of these monitoring metrics is turned on by default, but you can modify option enableMonitor in the configuration file to turn it off or on.
|
||||
|
||||
## <a class="anchor" id="directories"></a> File Directory Structure
|
||||
|
||||
After installing TDengine, the following directories or files are generated in the operating system by default:
|
||||
|
||||
|
||||
|
||||
| **Directory/File** | **Description** |
|
||||
| ------------------------- | ------------------------------------------------------------ |
|
||||
| /usr/local/taos/bin | TEngine’s executable directory. The executables are connected to the/usr/bin directory via softly links. |
|
||||
| /usr/local/taos/connector | TDengine’s various connector directories. |
|
||||
| /usr/local/taos/driver | TDengine’s dynamic link library directory. Connect to /usr/lib directory via soft links. |
|
||||
| /usr/local/taos/examples | TDengine’s application example directory for various languages. |
|
||||
| /usr/local/taos/include | TDengine’s header files of C interface for externally serving. |
|
||||
| /etc/taos/taos.cfg | TDengine’s default [configuration files]. |
|
||||
| /var/lib/taos | TDengine’s default data file directory, where the local can be modified via [configuration files]. |
|
||||
| /var/log/taos | TDengine’s default log file directory, where the local can be modified via [configuration files]. |
|
||||
|
||||
**Executables**
|
||||
|
||||
All executables of TDengine are stored in the directory /usr/local/taos/bin by default. Including:
|
||||
|
||||
- *taosd*: TDengine server-side executable
|
||||
- *taos*: TDengine Shell executable
|
||||
- *taosdump*: A data import/export tool
|
||||
- remove.sh: uninstall the TDengine script, please execute carefully, and link to rmtaos command in the/usr/bin directory. The TDengine installation directory /usr/local/taos will be removed, but/etc/taos,/var/lib/taos,/var/log/taos will remain.
|
||||
|
||||
You can configure different data directories and log directories by modifying system configuration file taos.cfg.
|
||||
|
||||
## <a class="anchor" id="keywords"></a> TDengine Parameter Limits and Reserved Keywords
|
||||
|
||||
- Database name: cannot contain "." and other special characters, and cannot exceed 32 characters
|
||||
- Table name: cannot contain "." and other special characters, and cannot exceed 192 characters together with the database name to which it belongs
|
||||
- Table column name: cannot contain special characters, and cannot exceed 64 characters
|
||||
- Database name, table name, column name cannot begin with a number
|
||||
- Number of columns in table: cannot exceed 1024 columns
|
||||
- Maximum length of record: including 8 bytes as timestamp, no more than 16KB (each column of BINARY/NCHAR type will occupy an additional 2 bytes of storage location)
|
||||
- Default maximum string length for a single SQL statement: 65480 bytes
|
||||
- Number of database replicas: no more than 3
|
||||
- User name: no more than 23 bytes
|
||||
- User password: no more than 15 bytes
|
||||
- Number of Tags: no more than 128
|
||||
- Total length of label: cannot exceed 16K bytes
|
||||
- Number of records: limited by storage space only
|
||||
- Number of tables: limited only by the number of nodes
|
||||
- Number of databases: limited only by the number of nodes
|
||||
- Number of virtual nodes on a single database: cannot exceed 64
|
||||
|
||||
At the moment, TDengine has nearly 200 internal reserved keywords, which cannot be used as database name, table name, STable name, data column name or tag column name regardless of case. The list of these keywords is as follows:
|
||||
|
||||
| **List of Keywords** | | | | |
|
||||
| -------------------- | ----------- | ------------ | ---------- | --------- |
|
||||
| ABLOCKS | CONNECTIONS | GT | MNODES | SLIDING |
|
||||
| ABORT | COPY | ID | MODULES | SLIMIT |
|
||||
| ACCOUNT | COUNT | IF | NCHAR | SMALLINT |
|
||||
| ACCOUNTS | CREATE | IGNORE | NE | SPREAD |
|
||||
| ADD | CTIME | IMMEDIATE | NONE | STABLE |
|
||||
| AFTER | DATABASE | IMPORT | NOT | STABLES |
|
||||
| ALL | DATABASES | IN | NOTNULL | STAR |
|
||||
| ALTER | DAYS | INITIALLY | NOW | STATEMENT |
|
||||
| AND | DEFERRED | INSERT | OF | STDDEV |
|
||||
| AS | DELIMITERS | INSTEAD | OFFSET | STREAM |
|
||||
| ASC | DESC | INTEGER | OR | STREAMS |
|
||||
| ATTACH | DESCRIBE | INTERVAL | ORDER | STRING |
|
||||
| AVG | DETACH | INTO | PASS | SUM |
|
||||
| BEFORE | DIFF | IP | PERCENTILE | TABLE |
|
||||
| BEGIN | DISTINCT | IS | PLUS | TABLES |
|
||||
| BETWEEN | DIVIDE | ISNULL | PRAGMA | TAG |
|
||||
| BIGINT | DNODE | JOIN | PREV | TAGS |
|
||||
| BINARY | DNODES | KEEP | PRIVILEGE | TBLOCKS |
|
||||
| BITAND | DOT | KEY | QUERIES | TBNAME |
|
||||
| BITNOT | DOUBLE | KILL | QUERY | TIMES |
|
||||
| BITOR | DROP | LAST | RAISE | TIMESTAMP |
|
||||
| BOOL | EACH | LE | REM | TINYINT |
|
||||
| BOTTOM | END | LEASTSQUARES | REPLACE | TOP |
|
||||
| BY | EQ | LIKE | REPLICA | TRIGGER |
|
||||
| CACHE | EXISTS | LIMIT | RESET | UMINUS |
|
||||
| CASCADE | EXPLAIN | LINEAR | RESTRICT | UPLUS |
|
||||
| CHANGE | FAIL | LOCAL | ROW | USE |
|
||||
| CLOG | FILL | LP | ROWS | USER |
|
||||
| CLUSTER | FIRST | LSHIFT | RP | USERS |
|
||||
| COLON | FLOAT | LT | RSHIFT | USING |
|
||||
| COLUMN | FOR | MATCH | SCORES | VALUES |
|
||||
| COMMA | FROM | MAX | SELECT | VARIABLE |
|
||||
| COMP | GE | METRIC | SEMI | VGROUPS |
|
||||
| CONCAT | GLOB | METRICS | SET | VIEW |
|
||||
| CONFIGS | GRANTS | MIN | SHOW | WAVG |
|
||||
| CONFLICT | GROUP | MINUS | SLASH | WHERE |
|
||||
| CONNECTION | | | | |
|
|
@ -0,0 +1,161 @@
|
|||
# FAQ
|
||||
|
||||
Tutorials & FAQ
|
||||
|
||||
## 0.How to report an issue?
|
||||
|
||||
If the contents in FAQ cannot help you and you need the technical support and assistance of TDengine team, please package the contents in the following two directories:
|
||||
|
||||
1./var/log/taos (if default path has not been modified)
|
||||
|
||||
2./etc/taos
|
||||
|
||||
Provide the necessary description of the problem, including the version information of TDengine used, the platform environment information, the execution operation of the problem, the characterization of the problem and the approximate time, and submit the Issue on [GitHub](https://github.com/taosdata/TDengine).
|
||||
|
||||
To ensure that there is enough debug information, if the problem can be repeated, please modify the/etc/taos/taos.cfg file, add a line of "debugFlag 135" at the end (without quotation marks themselves), then restart taosd, repeat the problem, and then submit. You can also temporarily set the log level of taosd through the following SQL statement.
|
||||
|
||||
```
|
||||
alter dnode <dnode_id> debugFlag 135;
|
||||
```
|
||||
|
||||
However, when the system is running normally, please set debugFlag to 131, otherwise a large amount of log information will be generated and the system efficiency will be reduced.
|
||||
|
||||
## 1.What should I pay attention to when upgrading TDengine from older versions to 2.0 and above? ☆☆☆
|
||||
|
||||
Version 2.0 is a complete refactoring of the previous version, and the configuration and data files are incompatible. Be sure to do the following before upgrading:
|
||||
|
||||
1. Delete the configuration file, execute sudo rm `-rf /etc/taos/taos.cfg`
|
||||
2. Delete the log file, execute `sudo rm -rf /var/log/taos/`
|
||||
3. By ensuring that the data is no longer needed, delete the data file and execute `sudo rm -rf /var/lib/taos/`
|
||||
4. Install the latest stable version of TDengine
|
||||
5. If you need to migrate data or the data file is corrupted, please contact the official technical support team of TAOS Data to assist
|
||||
|
||||
## 2. When encoutered with the error " Unable to establish connection " in Windows, what can I do?
|
||||
|
||||
See the [technical blog](https://www.taosdata.com/blog/2019/12/03/jdbcdriver%E6%89%BE%E4%B8%8D%E5%88%B0%E5%8A%A8%E6%80%81%E9%93%BE%E6%8E%A5%E5%BA%93/) for this issue.
|
||||
|
||||
## 3. Why I get “more dnodes are needed” when create a table?
|
||||
|
||||
See the [technical blog](https://www.taosdata.com/blog/2019/12/03/%E5%88%9B%E5%BB%BA%E6%95%B0%E6%8D%AE%E8%A1%A8%E6%97%B6%E6%8F%90%E7%A4%BAmore-dnodes-are-needed/) for this issue.
|
||||
|
||||
## 4. How do I generate a core file when TDengine crashes?
|
||||
|
||||
See the [technical blog](https://www.taosdata.com/blog/2019/12/06/tdengine-crash%E6%97%B6%E7%94%9F%E6%88%90core%E6%96%87%E4%BB%B6%E7%9A%84%E6%96%B9%E6%B3%95/) for this issue.
|
||||
|
||||
## 5. What should I do if I encounter an error "Unable to establish connection"?
|
||||
|
||||
When the client encountered a connection failure, please follow the following steps to check:
|
||||
|
||||
1. Check your network environment
|
||||
|
||||
2. - Cloud server: Check whether the security group of the cloud server opens access to TCP/UDP ports 6030-6042
|
||||
- Local virtual machine: Check whether the network can be pinged, and try to avoid using localhost as hostname
|
||||
- Corporate server: If you are in a NAT network environment, be sure to check whether the server can return messages to the client
|
||||
|
||||
2. Make sure that the client and server version numbers are exactly the same, and the open source Community Edition and Enterprise Edition cannot be mixed.
|
||||
3. On the server, execute systemctl status taosd to check the running status of *taosd*. If not running, start *taosd*.
|
||||
4. Verify that the correct server FQDN (Fully Qualified Domain Name, which is available by executing the Linux command hostname-f on the server) is specified when the client connects. FQDN configuration reference: "[All about FQDN of TDengine](https://www.taosdata.com/blog/2020/09/11/1824.html)".
|
||||
5. Ping the server FQDN. If there is no response, please check your network, DNS settings, or the system hosts file of the computer where the client is located.
|
||||
6. Check the firewall settings (Ubuntu uses ufw status, CentOS uses firewall-cmd-list-port) to confirm that TCP/UDP ports 6030-6042 are open.
|
||||
7. For JDBC (ODBC, Python, Go and other interfaces are similar) connections on Linux, make sure that libtaos.so is in the directory /usr/local/taos/driver, and /usr/local/taos/driver is in the system library function search path LD_LIBRARY_PATH.
|
||||
8. For JDBC, ODBC, Python, Go, etc. connections on Windows, make sure that C:\ TDengine\ driver\ taos.dll is in your system library function search directory (it is recommended that taos.dll be placed in the directory C:\ Windows\ System32)
|
||||
9. If the connection issue still exist
|
||||
|
||||
1. - On Linux system, please use the command line tool nc to determine whether the TCP and UDP connections on the specified ports are unobstructed. Check whether the UDP port connection works: nc -vuz {hostIP} {port} Check whether the server-side TCP port connection works: nc -l {port}Check whether the client-side TCP port connection works: nc {hostIP} {port}
|
||||
- Windows systems use the PowerShell command Net-TestConnection-ComputerName {fqdn} Port {port} to detect whether the service-segment port is accessed
|
||||
|
||||
10. You can also use the built-in network connectivity detection function of taos program to verify whether the specified port connection between the server and the client is unobstructed (including TCP and UDP): [TDengine's Built-in Network Detection Tool Use Guide](https://www.taosdata.com/blog/2020/09/08/1816.html).
|
||||
|
||||
|
||||
|
||||
## 6.What to do if I encounter an error "Unexpected generic error in RPC" or "TDengine error: Unable to resolve FQDN"?
|
||||
|
||||
This error occurs because the client or data node cannot parse the FQDN (Fully Qualified Domain Name). For TAOS shell or client applications, check the following:
|
||||
|
||||
1. Please verify whether the FQDN of the connected server is correct. FQDN configuration reference: "[All about FQDN of TDengine](https://www.taosdata.com/blog/2020/09/11/1824.html)".
|
||||
2. If the network is configured with a DNS server, check that it is working properly.
|
||||
3. If the network does not have a DNS server configured, check the hosts file of the machine where the client is located to see if the FQDN is configured and has the correct IP address.
|
||||
4. If the network configuration is OK, from the machine where the client is located, you need to be able to ping the connected FQDN, otherwise the client cannot connect to the server
|
||||
|
||||
## 7.Although the syntax is corrected, why do I still get the “Invalid SQL" error?
|
||||
|
||||
If you confirm that the syntax is correct, for versions older than 2.0, please check whether the SQL statement length exceeds 64K. If it does, this error will also be returned.
|
||||
|
||||
## 8. Are “validation queries” supported?
|
||||
|
||||
The TDengine does not yet have a dedicated set of validation queries. However, it is recommended to use the database "log" monitored by the system.
|
||||
|
||||
## 9. Can I delete or update a record?
|
||||
|
||||
TDengine does not support the deletion function at present, and may support it in the future according to user requirements.
|
||||
|
||||
Starting from 2.0. 8.0, TDengine supports the function of updating written data. Using the update function requires using UPDATE 1 parameter when creating the database, and then you can use INSERT INTO command to update the same timestamp data that has been written. UPDATE parameter does not support ALTER DATABASE command modification. Without a database created using UPDATE 1 parameter, writing data with the same timestamp will not modify the previous data with no error reported.
|
||||
|
||||
It should also be noted that when UPDATE is set to 0, the data with the same timestamp sent later will be discarded directly, but no error will be reported, and will still be included in affected rows (so the return information of INSERT instruction cannot be used for timestamp duplicate checking). The main reason for this design is that TDengine regards the written data as a stream. Regardless of whether the timestamp conflicts or not, TDengine believes that the original device that generates the data actually generates such data. The UPDATE parameter only controls how such stream data should be processed when persistence-when UPDATE is 0, it means that the data written first overwrites the data written later; When UPDATE is 1, it means that the data written later overwrites the data written first. How to choose this coverage relationship depends on whether the data generated first or later is expected in the subsequent use and statistics compile.
|
||||
|
||||
## 10. How to create a table with more than 1024 columns?
|
||||
|
||||
Using version 2.0 and above, 1024 columns are supported by default; for older versions, TDengine allowed the creation of a table with a maximum of 250 columns. However, if the limit is exceeded, it is recommended to logically split this wide table into several small ones according to the data characteristics.
|
||||
|
||||
## 11. What is the most effective way to write data?
|
||||
|
||||
Insert in batches. Each write statement can insert multiple records into one or multiple tables at the same time.
|
||||
|
||||
## 12. What is the most effective way to write data? How to solve the problem that Chinese characters in nchar inserted under Windows systems are parsed into messy code?
|
||||
|
||||
If there are Chinese characters in nchar data under Windows, please first confirm that the region of the system is set to China (which can be set in the Control Panel), then the taos client in cmd should already support it normally; If you are developing Java applications in an IDE, such as Eclipse and Intellij, please confirm that the file code in the IDE is GBK (this is the default coding type of Java), and then initialize the configuration of the client when generating the Connection. The specific statement is as follows:
|
||||
|
||||
```JAVA
|
||||
Class.forName("com.taosdata.jdbc.TSDBDriver");
|
||||
Properties properties = new Properties();
|
||||
properties.setProperty(TSDBDriver.LOCALE_KEY, "UTF-8");
|
||||
Connection = DriverManager.getConnection(url, properties);
|
||||
```
|
||||
|
||||
## 13. JDBC error: the excluded SQL is not a DML or a DDL?
|
||||
|
||||
Please update to the latest JDBC driver.
|
||||
|
||||
```xml
|
||||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>2.0.27</version>
|
||||
</dependency>
|
||||
```
|
||||
|
||||
## 14. taos connect failed, reason: invalid timestamp.
|
||||
|
||||
The common reason is that the server time and client time are not calibrated, which can be calibrated by synchronizing with the time server (use ntpdate command under Linux, and select automatic synchronization in the Windows time setting).
|
||||
|
||||
## 15. Incomplete display of table name
|
||||
|
||||
Due to the limited display width of taos shell in the terminal, it is possible that a relatively long table name is not displayed completely. If relevant operations are carried out according to the displayed incomplete table name, a Table does not exist error will occur. The workaround can be by modifying the setting option maxBinaryDisplayWidth in the taos.cfg file, or directly entering the command `set max_binary_display_width 100`. Or, use the \\G parameter at the end of the command to adjust how the results are displayed.
|
||||
|
||||
## 16. How to migrate data?
|
||||
|
||||
TDengine uniquely identifies a machine according to hostname. When moving data files from machine A to machine B, pay attention to the following three points:
|
||||
|
||||
- For versions 2.0. 0.0 to 2.0. 6. x, reconfigure machine B's hostname to machine A's.
|
||||
- For 2.0. 7.0 and later versions, go to/var/lib/taos/dnode, repair the FQDN corresponding to dnodeId of dnodeEps.json, and restart. Make sure this file is identical for all machines.
|
||||
- The storage structures of versions 1. x and 2. x are incompatible, and it is necessary to use migration tools or your own application to export and import data.
|
||||
|
||||
## 17. How to temporarily adjust the log level in command line program taos?
|
||||
|
||||
For the convenience of debugging, since version 2.0. 16, command line program taos gets two new instructions related to logging:
|
||||
|
||||
```mysql
|
||||
ALTER LOCAL flag_name flag_value;
|
||||
```
|
||||
|
||||
This means that under the current command line program, modify the loglevel of a specific module (only valid for the current command line program, if taos is restarted, it needs to be reset):
|
||||
|
||||
- The values of flag_name can be: debugFlag, cDebugFlag, tmrDebugFlag, uDebugFlag, rpcDebugFlag
|
||||
- Flag_value values can be: 131 (output error and alarm logs), 135 (output error, alarm, and debug logs), 143 (output error, alarm, debug, and trace logs)
|
||||
|
||||
```mysql
|
||||
ALTER LOCAL RESETLOG;
|
||||
```
|
||||
|
||||
This means wiping up all client-generated log files on the machine.
|
||||
|
After Width: | Height: | Size: 97 KiB |
After Width: | Height: | Size: 43 KiB |
After Width: | Height: | Size: 87 KiB |
After Width: | Height: | Size: 66 KiB |
After Width: | Height: | Size: 37 KiB |
After Width: | Height: | Size: 114 KiB |
After Width: | Height: | Size: 91 KiB |
After Width: | Height: | Size: 92 KiB |
After Width: | Height: | Size: 54 KiB |
After Width: | Height: | Size: 71 KiB |
After Width: | Height: | Size: 56 KiB |
After Width: | Height: | Size: 53 KiB |
After Width: | Height: | Size: 44 KiB |
After Width: | Height: | Size: 55 KiB |
After Width: | Height: | Size: 60 KiB |
After Width: | Height: | Size: 91 KiB |
After Width: | Height: | Size: 97 KiB |
After Width: | Height: | Size: 45 KiB |
After Width: | Height: | Size: 162 KiB |
After Width: | Height: | Size: 85 KiB |
After Width: | Height: | Size: 45 KiB |
After Width: | Height: | Size: 42 KiB |
|
@ -123,6 +123,7 @@ int32_t tscGetDataBlockFromList(SHashObj* pHashList, int64_t id, int32_t size, i
|
|||
*/
|
||||
bool tscIsPointInterpQuery(SQueryInfo* pQueryInfo);
|
||||
bool tscIsTWAQuery(SQueryInfo* pQueryInfo);
|
||||
bool tscIsDiffQuery(SQueryInfo* pQueryInfo);
|
||||
bool tscIsSessionWindowQuery(SQueryInfo* pQueryInfo);
|
||||
bool tscIsSecondStageQuery(SQueryInfo* pQueryInfo);
|
||||
bool tsIsArithmeticQueryOnAggResult(SQueryInfo* pQueryInfo);
|
||||
|
@ -132,12 +133,12 @@ bool hasTagValOutput(SQueryInfo* pQueryInfo);
|
|||
bool timeWindowInterpoRequired(SQueryInfo *pQueryInfo);
|
||||
bool isStabledev(SQueryInfo* pQueryInfo);
|
||||
bool isTsCompQuery(SQueryInfo* pQueryInfo);
|
||||
bool isSimpleAggregate(SQueryInfo* pQueryInfo);
|
||||
bool isBlockDistQuery(SQueryInfo* pQueryInfo);
|
||||
bool isSimpleAggregateRv(SQueryInfo* pQueryInfo);
|
||||
|
||||
bool tscNonOrderedProjectionQueryOnSTable(SQueryInfo *pQueryInfo, int32_t tableIndex);
|
||||
bool tscOrderedProjectionQueryOnSTable(SQueryInfo* pQueryInfo, int32_t tableIndex);
|
||||
bool tscIsDiffDerivQuery(SQueryInfo* pQueryInfo);
|
||||
bool tscIsProjectionQueryOnSTable(SQueryInfo* pQueryInfo, int32_t tableIndex);
|
||||
|
||||
bool tscIsProjectionQuery(SQueryInfo* pQueryInfo);
|
||||
|
@ -214,7 +215,7 @@ void tscColumnListDestroy(SArray* pColList);
|
|||
void tscColumnListCopy(SArray* dst, const SArray* src, uint64_t tableUid);
|
||||
void tscColumnListCopyAll(SArray* dst, const SArray* src);
|
||||
|
||||
void convertQueryResult(SSqlRes* pRes, SQueryInfo* pQueryInfo);
|
||||
void convertQueryResult(SSqlRes* pRes, SQueryInfo* pQueryInfo, uint64_t objId);
|
||||
|
||||
void tscDequoteAndTrimToken(SStrToken* pToken);
|
||||
int32_t tscValidateName(SStrToken* pToken);
|
||||
|
@ -329,9 +330,7 @@ STableMeta* tscTableMetaDup(STableMeta* pTableMeta);
|
|||
SVgroupsInfo* tscVgroupsInfoDup(SVgroupsInfo* pVgroupsInfo);
|
||||
|
||||
int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAttr, void* addr);
|
||||
|
||||
void tsCreateSQLFunctionCtx(SQueryInfo* pQueryInfo, SQLFunctionCtx* pCtx, SSchema* pSchema);
|
||||
void* createQInfoFromQueryNode(SQueryInfo* pQueryInfo, STableGroupInfo* pTableGroupInfo, SOperatorInfo* pOperator, char* sql, void* addr, int32_t stage);
|
||||
void* createQInfoFromQueryNode(SQueryInfo* pQueryInfo, STableGroupInfo* pTableGroupInfo, SOperatorInfo* pOperator, char* sql, void* addr, int32_t stage, uint64_t qId);
|
||||
|
||||
void* malloc_throw(size_t size);
|
||||
void* calloc_throw(size_t nmemb, size_t size);
|
||||
|
|
|
@ -266,6 +266,7 @@ typedef struct SSqlObj {
|
|||
|
||||
typedef struct SSqlStream {
|
||||
SSqlObj *pSql;
|
||||
void * cqhandle; // stream belong to SCQContext handle
|
||||
const char* dstTable;
|
||||
uint32_t streamId;
|
||||
char listed;
|
||||
|
@ -320,7 +321,7 @@ int32_t tscCreateResPointerInfo(SSqlRes *pRes, SQueryInfo *pQueryInfo);
|
|||
void tscSetResRawPtr(SSqlRes* pRes, SQueryInfo* pQueryInfo);
|
||||
void tscSetResRawPtrRv(SSqlRes* pRes, SQueryInfo* pQueryInfo, SSDataBlock* pBlock);
|
||||
|
||||
void handleDownstreamOperator(SSqlObj** pSqlList, int32_t numOfUpstream, SQueryInfo* px, SSqlRes* pOutput);
|
||||
void handleDownstreamOperator(SSqlObj** pSqlList, int32_t numOfUpstream, SQueryInfo* px, SSqlObj* pParent);
|
||||
void destroyTableNameList(SInsertStatementParam* pInsertParam);
|
||||
|
||||
void tscResetSqlCmd(SSqlCmd *pCmd, bool removeMeta);
|
||||
|
|
|
@ -144,7 +144,7 @@ static void tscAsyncFetchRowsProxy(void *param, TAOS_RES *tres, int numOfRows) {
|
|||
}
|
||||
|
||||
// local merge has handle this situation during super table non-projection query.
|
||||
if (pCmd->command != TSDB_SQL_RETRIEVE_LOCALMERGE) {
|
||||
if (pCmd->command != TSDB_SQL_RETRIEVE_GLOBALMERGE) {
|
||||
pRes->numOfClauseTotal += pRes->numOfRows;
|
||||
}
|
||||
|
||||
|
@ -174,7 +174,7 @@ static void tscProcessAsyncRetrieveImpl(void *param, TAOS_RES *tres, int numOfRo
|
|||
}
|
||||
|
||||
pSql->fp = fp;
|
||||
if (pCmd->command != TSDB_SQL_RETRIEVE_LOCALMERGE && pCmd->command < TSDB_SQL_LOCAL) {
|
||||
if (pCmd->command != TSDB_SQL_RETRIEVE_GLOBALMERGE && pCmd->command < TSDB_SQL_LOCAL) {
|
||||
pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH;
|
||||
}
|
||||
|
||||
|
@ -257,14 +257,14 @@ void taos_fetch_rows_a(TAOS_RES *tres, __async_cb_func_t fp, void *param) {
|
|||
}
|
||||
|
||||
return;
|
||||
} else if (pCmd->command == TSDB_SQL_RETRIEVE || pCmd->command == TSDB_SQL_RETRIEVE_LOCALMERGE) {
|
||||
} else if (pCmd->command == TSDB_SQL_RETRIEVE || pCmd->command == TSDB_SQL_RETRIEVE_GLOBALMERGE) {
|
||||
// in case of show command, return no data
|
||||
(*pSql->fetchFp)(param, pSql, 0);
|
||||
} else {
|
||||
assert(0);
|
||||
}
|
||||
} else { // current query is not completed, continue retrieve from node
|
||||
if (pCmd->command != TSDB_SQL_RETRIEVE_LOCALMERGE && pCmd->command < TSDB_SQL_LOCAL) {
|
||||
if (pCmd->command != TSDB_SQL_RETRIEVE_GLOBALMERGE && pCmd->command < TSDB_SQL_LOCAL) {
|
||||
pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH;
|
||||
}
|
||||
|
||||
|
|
|
@ -323,7 +323,7 @@ TAOS_ROW tscFetchRow(void *param) {
|
|||
// current data set are exhausted, fetch more data from node
|
||||
if (pRes->row >= pRes->numOfRows && (pRes->completed != true || hasMoreVnodesToTry(pSql) || hasMoreClauseToTry(pSql)) &&
|
||||
(pCmd->command == TSDB_SQL_RETRIEVE ||
|
||||
pCmd->command == TSDB_SQL_RETRIEVE_LOCALMERGE ||
|
||||
pCmd->command == TSDB_SQL_RETRIEVE_GLOBALMERGE ||
|
||||
pCmd->command == TSDB_SQL_TABLE_JOIN_RETRIEVE ||
|
||||
pCmd->command == TSDB_SQL_FETCH ||
|
||||
pCmd->command == TSDB_SQL_SHOW ||
|
||||
|
|
|
@ -107,14 +107,10 @@ int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int1
|
|||
return tscInvalidOperationMsg(error, "value expected in timestamp", sToken.z);
|
||||
}
|
||||
|
||||
if (parseAbsoluteDuration(valueToken.z, valueToken.n, &interval) != TSDB_CODE_SUCCESS) {
|
||||
if (parseAbsoluteDuration(valueToken.z, valueToken.n, &interval, timePrec) != TSDB_CODE_SUCCESS) {
|
||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||
}
|
||||
|
||||
if (timePrec == TSDB_TIME_PRECISION_MILLI) {
|
||||
interval /= 1000;
|
||||
}
|
||||
|
||||
if (sToken.type == TK_PLUS) {
|
||||
useconds += interval;
|
||||
} else {
|
||||
|
@ -468,6 +464,10 @@ int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, int16_t timePrec, i
|
|||
|
||||
int32_t cnt = 0;
|
||||
int32_t j = 0;
|
||||
if (sToken.n >= TSDB_MAX_BYTES_PER_ROW) {
|
||||
return tscSQLSyntaxErrMsg(pInsertParam->msg, "too long string", sToken.z);
|
||||
}
|
||||
|
||||
for (uint32_t k = 1; k < sToken.n - 1; ++k) {
|
||||
if (sToken.z[k] == '\\' || (sToken.z[k] == delim && sToken.z[k + 1] == delim)) {
|
||||
tmpTokenBuf[j] = sToken.z[k + 1];
|
||||
|
@ -711,7 +711,7 @@ static int32_t doParseInsertStatement(SInsertStatementParam *pInsertParam, char
|
|||
}
|
||||
|
||||
code = TSDB_CODE_TSC_INVALID_OPERATION;
|
||||
char tmpTokenBuf[16*1024] = {0}; // used for deleting Escape character: \\, \', \"
|
||||
char tmpTokenBuf[TSDB_MAX_BYTES_PER_ROW] = {0}; // used for deleting Escape character: \\, \', \"
|
||||
|
||||
int32_t numOfRows = 0;
|
||||
code = tsParseValues(str, dataBuf, maxNumOfRows, pInsertParam, &numOfRows, tmpTokenBuf);
|
||||
|
|
|
@ -78,6 +78,16 @@ typedef struct STscStmt {
|
|||
SNormalStmt normal;
|
||||
} STscStmt;
|
||||
|
||||
#define STMT_RET(c) do { \
|
||||
int32_t _code = c; \
|
||||
if (pStmt && pStmt->pSql) { pStmt->pSql->res.code = _code; } else {terrno = _code;} \
|
||||
return _code; \
|
||||
} while (0)
|
||||
|
||||
static int32_t invalidOperationMsg(char* dstBuffer, const char* errMsg) {
|
||||
return tscInvalidOperationMsg(dstBuffer, errMsg, NULL);
|
||||
}
|
||||
|
||||
static int normalStmtAddPart(SNormalStmt* stmt, bool isParam, char* str, uint32_t len) {
|
||||
uint16_t size = stmt->numParts + 1;
|
||||
if (size > stmt->sizeParts) {
|
||||
|
@ -163,8 +173,8 @@ static int normalStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) {
|
|||
break;
|
||||
|
||||
default:
|
||||
tscDebug("0x%"PRIx64" bind column%d: type mismatch or invalid", stmt->pSql->self, i);
|
||||
return TSDB_CODE_TSC_INVALID_VALUE;
|
||||
tscError("0x%"PRIx64" bind column%d: type mismatch or invalid", stmt->pSql->self, i);
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(&stmt->pSql->cmd), "bind type mismatch or invalid");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -727,6 +737,7 @@ static int doBindParam(STableDataBlocks* pBlock, char* data, SParamInfo* param,
|
|||
#endif
|
||||
|
||||
if (bind->buffer_type != param->type) {
|
||||
tscError("column type mismatch");
|
||||
return TSDB_CODE_TSC_INVALID_VALUE;
|
||||
}
|
||||
|
||||
|
@ -754,6 +765,7 @@ static int doBindParam(STableDataBlocks* pBlock, char* data, SParamInfo* param,
|
|||
|
||||
case TSDB_DATA_TYPE_BINARY:
|
||||
if ((*bind->length) > (uintptr_t)param->bytes) {
|
||||
tscError("column length is too big");
|
||||
return TSDB_CODE_TSC_INVALID_VALUE;
|
||||
}
|
||||
size = (short)*bind->length;
|
||||
|
@ -763,6 +775,7 @@ static int doBindParam(STableDataBlocks* pBlock, char* data, SParamInfo* param,
|
|||
case TSDB_DATA_TYPE_NCHAR: {
|
||||
int32_t output = 0;
|
||||
if (!taosMbsToUcs4(bind->buffer, *bind->length, varDataVal(data + param->offset), param->bytes - VARSTR_HEADER_SIZE, &output)) {
|
||||
tscError("convert nchar failed");
|
||||
return TSDB_CODE_TSC_INVALID_VALUE;
|
||||
}
|
||||
varDataSetLen(data + param->offset, output);
|
||||
|
@ -787,6 +800,7 @@ static int doBindParam(STableDataBlocks* pBlock, char* data, SParamInfo* param,
|
|||
|
||||
static int doBindBatchParam(STableDataBlocks* pBlock, SParamInfo* param, TAOS_MULTI_BIND* bind, int32_t rowNum) {
|
||||
if (bind->buffer_type != param->type || !isValidDataType(param->type)) {
|
||||
tscError("column mismatch or invalid");
|
||||
return TSDB_CODE_TSC_INVALID_VALUE;
|
||||
}
|
||||
|
||||
|
@ -892,8 +906,8 @@ static int insertStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) {
|
|||
|
||||
int code = doBindParam(pBlock, data, param, &bind[param->idx], 1);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tscDebug("0x%"PRIx64" bind column %d: type mismatch or invalid", pStmt->pSql->self, param->idx);
|
||||
return code;
|
||||
tscDebug("0x%"PRIx64" bind column %d: type mismatch or invalid", pStmt->pSql->self, param->idx);
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(&stmt->pSql->cmd), "bind column type mismatch or invalid");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -957,13 +971,13 @@ static int insertStmtBindParamBatch(STscStmt* stmt, TAOS_MULTI_BIND* bind, int c
|
|||
SParamInfo* param = &pBlock->params[j];
|
||||
if (bind[param->idx].num != rowNum) {
|
||||
tscError("0x%"PRIx64" param %d: num[%d:%d] not match", pStmt->pSql->self, param->idx, rowNum, bind[param->idx].num);
|
||||
return TSDB_CODE_TSC_INVALID_VALUE;
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(&stmt->pSql->cmd), "bind row num mismatch");
|
||||
}
|
||||
|
||||
int code = doBindBatchParam(pBlock, param, &bind[param->idx], pCmd->batchSize);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tscError("0x%"PRIx64" bind column %d: type mismatch or invalid", pStmt->pSql->self, param->idx);
|
||||
return code;
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(&stmt->pSql->cmd), "bind column type mismatch or invalid");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -974,7 +988,7 @@ static int insertStmtBindParamBatch(STscStmt* stmt, TAOS_MULTI_BIND* bind, int c
|
|||
int code = doBindBatchParam(pBlock, param, bind, pCmd->batchSize);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tscError("0x%"PRIx64" bind column %d: type mismatch or invalid", pStmt->pSql->self, param->idx);
|
||||
return code;
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(&stmt->pSql->cmd), "bind column type mismatch or invalid");
|
||||
}
|
||||
|
||||
if (colIdx == (pBlock->numOfParams - 1)) {
|
||||
|
@ -993,7 +1007,7 @@ static int insertStmtUpdateBatch(STscStmt* stmt) {
|
|||
|
||||
if (pCmd->batchSize > INT16_MAX) {
|
||||
tscError("too many record:%d", pCmd->batchSize);
|
||||
return TSDB_CODE_TSC_APP_ERROR;
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(&stmt->pSql->cmd), "too many records");
|
||||
}
|
||||
|
||||
if (taosHashGetSize(pCmd->insertParam.pTableBlockHashList) == 0) {
|
||||
|
@ -1057,7 +1071,8 @@ static int insertStmtReset(STscStmt* pStmt) {
|
|||
static int insertStmtExecute(STscStmt* stmt) {
|
||||
SSqlCmd* pCmd = &stmt->pSql->cmd;
|
||||
if (pCmd->batchSize == 0) {
|
||||
return TSDB_CODE_TSC_INVALID_VALUE;
|
||||
tscError("no records bind");
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(&stmt->pSql->cmd), "no records bind");
|
||||
}
|
||||
|
||||
if (taosHashGetSize(pCmd->insertParam.pTableBlockHashList) == 0) {
|
||||
|
@ -1174,7 +1189,7 @@ static int insertBatchStmtExecute(STscStmt* pStmt) {
|
|||
|
||||
if(pStmt->mtb.nameSet == false) {
|
||||
tscError("0x%"PRIx64" no table name set", pStmt->pSql->self);
|
||||
return TSDB_CODE_TSC_APP_ERROR;
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(&pStmt->pSql->cmd), "no table name set");
|
||||
}
|
||||
|
||||
pStmt->pSql->retry = pStmt->pSql->maxRetry + 1; //no retry
|
||||
|
@ -1215,7 +1230,8 @@ int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) {
|
|||
int32_t index = 0;
|
||||
SStrToken sToken = tStrGetToken(pCmd->insertParam.sql, &index, false);
|
||||
if (sToken.n == 0) {
|
||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||
tscError("table is is expected, sql:%s", pCmd->insertParam.sql);
|
||||
return tscSQLSyntaxErrMsg(pCmd->payload, "table name is expected", pCmd->insertParam.sql);
|
||||
}
|
||||
|
||||
if (sToken.n == 1 && sToken.type == TK_QUESTION) {
|
||||
|
@ -1237,24 +1253,28 @@ int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
if (sToken.n <= 0 || sToken.type != TK_USING) {
|
||||
return tscSQLSyntaxErrMsg(pCmd->payload, "keywords USING is expected", sToken.z);
|
||||
if (sToken.n <= 0 || sToken.type != TK_USING) {
|
||||
tscError("keywords USING is expected, sql:%s", pCmd->insertParam.sql);
|
||||
return tscSQLSyntaxErrMsg(pCmd->payload, "keywords USING is expected", sToken.z ? sToken.z : pCmd->insertParam.sql);
|
||||
}
|
||||
|
||||
sToken = tStrGetToken(pCmd->insertParam.sql, &index, false);
|
||||
if (sToken.n <= 0 || ((sToken.type != TK_ID) && (sToken.type != TK_STRING))) {
|
||||
return tscSQLSyntaxErrMsg(pCmd->payload, "invalid token", sToken.z);
|
||||
tscError("invalid token, sql:%s", pCmd->insertParam.sql);
|
||||
return tscSQLSyntaxErrMsg(pCmd->payload, "invalid token", sToken.z ? sToken.z : pCmd->insertParam.sql);
|
||||
}
|
||||
pStmt->mtb.stbname = sToken;
|
||||
|
||||
sToken = tStrGetToken(pCmd->insertParam.sql, &index, false);
|
||||
if (sToken.n <= 0 || sToken.type != TK_TAGS) {
|
||||
return tscSQLSyntaxErrMsg(pCmd->payload, "keyword TAGS expected", sToken.z);
|
||||
tscError("keyword TAGS expected, sql:%s", pCmd->insertParam.sql);
|
||||
return tscSQLSyntaxErrMsg(pCmd->payload, "keyword TAGS expected", sToken.z ? sToken.z : pCmd->insertParam.sql);
|
||||
}
|
||||
|
||||
sToken = tStrGetToken(pCmd->insertParam.sql, &index, false);
|
||||
if (sToken.n <= 0 || sToken.type != TK_LP) {
|
||||
return tscSQLSyntaxErrMsg(pCmd->payload, ") expected", sToken.z);
|
||||
tscError("( expected, sql:%s", pCmd->insertParam.sql);
|
||||
return tscSQLSyntaxErrMsg(pCmd->payload, "( expected", sToken.z ? sToken.z : pCmd->insertParam.sql);
|
||||
}
|
||||
|
||||
pStmt->mtb.tags = taosArrayInit(4, sizeof(SStrToken));
|
||||
|
@ -1264,7 +1284,8 @@ int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) {
|
|||
while (loopCont) {
|
||||
sToken = tStrGetToken(pCmd->insertParam.sql, &index, false);
|
||||
if (sToken.n <= 0) {
|
||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||
tscError("unexpected sql end, sql:%s", pCmd->insertParam.sql);
|
||||
return tscSQLSyntaxErrMsg(pCmd->payload, "unexpected sql end", pCmd->insertParam.sql);
|
||||
}
|
||||
|
||||
switch (sToken.type) {
|
||||
|
@ -1272,7 +1293,8 @@ int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) {
|
|||
loopCont = 0;
|
||||
break;
|
||||
case TK_VALUES:
|
||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||
tscError("unexpected token values, sql:%s", pCmd->insertParam.sql);
|
||||
return tscSQLSyntaxErrMsg(pCmd->payload, "unexpected token", sToken.z);
|
||||
case TK_QUESTION:
|
||||
pStmt->mtb.tagSet = false; //continue
|
||||
default:
|
||||
|
@ -1282,12 +1304,14 @@ int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) {
|
|||
}
|
||||
|
||||
if (taosArrayGetSize(pStmt->mtb.tags) <= 0) {
|
||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||
tscError("no tags, sql:%s", pCmd->insertParam.sql);
|
||||
return tscSQLSyntaxErrMsg(pCmd->payload, "no tags", pCmd->insertParam.sql);
|
||||
}
|
||||
|
||||
sToken = tStrGetToken(pCmd->insertParam.sql, &index, false);
|
||||
if (sToken.n <= 0 || (sToken.type != TK_VALUES && sToken.type != TK_LP)) {
|
||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||
tscError("sql error, sql:%s", pCmd->insertParam.sql);
|
||||
return tscSQLSyntaxErrMsg(pCmd->payload, "sql error", sToken.z ? sToken.z : pCmd->insertParam.sql);
|
||||
}
|
||||
|
||||
pStmt->mtb.values = sToken;
|
||||
|
@ -1329,8 +1353,8 @@ int stmtGenInsertStatement(SSqlObj* pSql, STscStmt* pStmt, const char* name, TAO
|
|||
} else {
|
||||
if (tags[j].buffer == NULL) {
|
||||
free(str);
|
||||
tscError("empty");
|
||||
return TSDB_CODE_TSC_APP_ERROR;
|
||||
tscError("empty tag value in params");
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(&pStmt->pSql->cmd), "empty tag value in params");
|
||||
}
|
||||
|
||||
ret = converToStr(str + len, tags[j].buffer_type, tags[j].buffer, tags[j].length ? (int32_t)*tags[j].length : -1, &l);
|
||||
|
@ -1387,13 +1411,15 @@ int stmtGenInsertStatement(SSqlObj* pSql, STscStmt* pStmt, const char* name, TAO
|
|||
|
||||
TAOS_STMT* taos_stmt_init(TAOS* taos) {
|
||||
STscObj* pObj = (STscObj*)taos;
|
||||
STscStmt* pStmt = NULL;
|
||||
|
||||
if (pObj == NULL || pObj->signature != pObj) {
|
||||
terrno = TSDB_CODE_TSC_DISCONNECTED;
|
||||
tscError("connection disconnected");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
STscStmt* pStmt = calloc(1, sizeof(STscStmt));
|
||||
pStmt = calloc(1, sizeof(STscStmt));
|
||||
if (pStmt == NULL) {
|
||||
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
tscError("failed to allocate memory for statement");
|
||||
|
@ -1410,6 +1436,14 @@ TAOS_STMT* taos_stmt_init(TAOS* taos) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
if (TSDB_CODE_SUCCESS != tscAllocPayload(&pSql->cmd, TSDB_DEFAULT_PAYLOAD_SIZE)) {
|
||||
free(pSql);
|
||||
free(pStmt);
|
||||
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
tscError("failed to malloc payload buffer");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
tsem_init(&pSql->rspSem, 0, 0);
|
||||
pSql->signature = pSql;
|
||||
pSql->pTscObj = pObj;
|
||||
|
@ -1425,13 +1459,12 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) {
|
|||
STscStmt* pStmt = (STscStmt*)stmt;
|
||||
|
||||
if (stmt == NULL || pStmt->taos == NULL || pStmt->pSql == NULL) {
|
||||
terrno = TSDB_CODE_TSC_DISCONNECTED;
|
||||
return TSDB_CODE_TSC_DISCONNECTED;
|
||||
STMT_RET(TSDB_CODE_TSC_DISCONNECTED);
|
||||
}
|
||||
|
||||
if (pStmt->last != STMT_INIT) {
|
||||
tscError("prepare status error, last:%d", pStmt->last);
|
||||
return TSDB_CODE_TSC_APP_ERROR;
|
||||
STMT_RET(invalidOperationMsg(tscGetErrorMsgPayload(&pStmt->pSql->cmd), "prepare status error"));
|
||||
}
|
||||
|
||||
pStmt->last = STMT_PREPARE;
|
||||
|
@ -1447,17 +1480,11 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) {
|
|||
|
||||
pCmd->insertParam.insertType = TSDB_QUERY_TYPE_STMT_INSERT;
|
||||
|
||||
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, TSDB_DEFAULT_PAYLOAD_SIZE)) {
|
||||
tscError("%p failed to malloc payload buffer", pSql);
|
||||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
pSql->sqlstr = realloc(pSql->sqlstr, sqlLen + 1);
|
||||
|
||||
if (pSql->sqlstr == NULL) {
|
||||
tscError("%p failed to malloc sql string buffer", pSql);
|
||||
free(pCmd->payload);
|
||||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
STMT_RET(TSDB_CODE_TSC_OUT_OF_MEMORY);
|
||||
}
|
||||
|
||||
pRes->qId = 0;
|
||||
|
@ -1476,11 +1503,11 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) {
|
|||
|
||||
int32_t ret = stmtParseInsertTbTags(pSql, pStmt);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
return ret;
|
||||
STMT_RET(ret);
|
||||
}
|
||||
|
||||
if (pStmt->multiTbInsert) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
STMT_RET(TSDB_CODE_SUCCESS);
|
||||
}
|
||||
|
||||
memset(&pStmt->mtb, 0, sizeof(pStmt->mtb));
|
||||
|
@ -1489,14 +1516,14 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) {
|
|||
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
|
||||
// wait for the callback function to post the semaphore
|
||||
tsem_wait(&pSql->rspSem);
|
||||
return pSql->res.code;
|
||||
STMT_RET(pSql->res.code);
|
||||
}
|
||||
|
||||
return code;
|
||||
STMT_RET(code);
|
||||
}
|
||||
|
||||
pStmt->isInsert = false;
|
||||
return normalStmtPrepare(pStmt);
|
||||
STMT_RET(normalStmtPrepare(pStmt));
|
||||
}
|
||||
|
||||
int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags) {
|
||||
|
@ -1505,25 +1532,22 @@ int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags
|
|||
SSqlCmd* pCmd = &pSql->cmd;
|
||||
|
||||
if (stmt == NULL || pStmt->pSql == NULL || pStmt->taos == NULL) {
|
||||
terrno = TSDB_CODE_TSC_DISCONNECTED;
|
||||
return TSDB_CODE_TSC_DISCONNECTED;
|
||||
STMT_RET(TSDB_CODE_TSC_DISCONNECTED);
|
||||
}
|
||||
|
||||
if (name == NULL) {
|
||||
terrno = TSDB_CODE_TSC_APP_ERROR;
|
||||
tscError("0x%"PRIx64" name is NULL", pSql->self);
|
||||
return TSDB_CODE_TSC_APP_ERROR;
|
||||
STMT_RET(invalidOperationMsg(tscGetErrorMsgPayload(&pStmt->pSql->cmd), "name is NULL"));
|
||||
}
|
||||
|
||||
if (pStmt->multiTbInsert == false || !tscIsInsertData(pSql->sqlstr)) {
|
||||
terrno = TSDB_CODE_TSC_APP_ERROR;
|
||||
tscError("0x%"PRIx64" not multi table insert", pSql->self);
|
||||
return TSDB_CODE_TSC_APP_ERROR;
|
||||
tscError("0x%"PRIx64" not multiple table insert", pSql->self);
|
||||
STMT_RET(invalidOperationMsg(tscGetErrorMsgPayload(&pStmt->pSql->cmd), "not multiple table insert"));
|
||||
}
|
||||
|
||||
if (pStmt->last == STMT_INIT || pStmt->last == STMT_BIND || pStmt->last == STMT_BIND_COL) {
|
||||
tscError("0x%"PRIx64" settbname status error, last:%d", pSql->self, pStmt->last);
|
||||
return TSDB_CODE_TSC_APP_ERROR;
|
||||
tscError("0x%"PRIx64" set_tbname_tags status error, last:%d", pSql->self, pStmt->last);
|
||||
STMT_RET(invalidOperationMsg(tscGetErrorMsgPayload(&pStmt->pSql->cmd), "set_tbname_tags status error"));
|
||||
}
|
||||
|
||||
pStmt->last = STMT_SETTBNAME;
|
||||
|
@ -1535,7 +1559,7 @@ int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags
|
|||
STableDataBlocks** t1 = (STableDataBlocks**)taosHashGet(pStmt->mtb.pTableBlockHashList, (const char*)&pStmt->mtb.currentUid, sizeof(pStmt->mtb.currentUid));
|
||||
if (t1 == NULL) {
|
||||
tscError("0x%"PRIx64" no table data block in hash list, uid:%" PRId64 , pSql->self, pStmt->mtb.currentUid);
|
||||
return TSDB_CODE_TSC_APP_ERROR;
|
||||
STMT_RET(TSDB_CODE_TSC_APP_ERROR);
|
||||
}
|
||||
|
||||
SSubmitBlk* pBlk = (SSubmitBlk*) (*t1)->pData;
|
||||
|
@ -1544,7 +1568,7 @@ int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags
|
|||
taosHashPut(pCmd->insertParam.pTableBlockHashList, (void *)&pStmt->mtb.currentUid, sizeof(pStmt->mtb.currentUid), (void*)t1, POINTER_BYTES);
|
||||
|
||||
tscDebug("0x%"PRIx64" table:%s is already prepared, uid:%" PRIu64, pSql->self, name, pStmt->mtb.currentUid);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
STMT_RET(TSDB_CODE_SUCCESS);
|
||||
}
|
||||
|
||||
if (pStmt->mtb.tagSet) {
|
||||
|
@ -1552,12 +1576,12 @@ int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags
|
|||
} else {
|
||||
if (tags == NULL) {
|
||||
tscError("No tags set");
|
||||
return TSDB_CODE_TSC_APP_ERROR;
|
||||
STMT_RET(invalidOperationMsg(tscGetErrorMsgPayload(&pStmt->pSql->cmd), "no tags set"));
|
||||
}
|
||||
|
||||
int32_t ret = stmtGenInsertStatement(pSql, pStmt, name, tags);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
return ret;
|
||||
STMT_RET(ret);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1591,7 +1615,7 @@ int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags
|
|||
code = tscGetDataBlockFromList(pCmd->insertParam.pTableBlockHashList, pTableMeta->id.uid, TSDB_PAYLOAD_SIZE, sizeof(SSubmitBlk),
|
||||
pTableMeta->tableInfo.rowSize, &pTableMetaInfo->name, pTableMeta, &pBlock, NULL);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
STMT_RET(code);
|
||||
}
|
||||
|
||||
SSubmitBlk* blk = (SSubmitBlk*)pBlock->pData;
|
||||
|
@ -1606,7 +1630,7 @@ int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags
|
|||
tscDebug("0x%"PRIx64" table:%s is prepared, uid:%" PRIx64, pSql->self, name, pStmt->mtb.currentUid);
|
||||
}
|
||||
|
||||
return code;
|
||||
STMT_RET(code);
|
||||
}
|
||||
|
||||
|
||||
|
@ -1639,35 +1663,34 @@ int taos_stmt_close(TAOS_STMT* stmt) {
|
|||
}
|
||||
|
||||
taos_free_result(pStmt->pSql);
|
||||
free(pStmt);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
tfree(pStmt);
|
||||
STMT_RET(TSDB_CODE_SUCCESS);
|
||||
}
|
||||
|
||||
int taos_stmt_bind_param(TAOS_STMT* stmt, TAOS_BIND* bind) {
|
||||
STscStmt* pStmt = (STscStmt*)stmt;
|
||||
if (stmt == NULL || pStmt->pSql == NULL || pStmt->taos == NULL) {
|
||||
terrno = TSDB_CODE_TSC_DISCONNECTED;
|
||||
return TSDB_CODE_TSC_DISCONNECTED;
|
||||
STMT_RET(TSDB_CODE_TSC_DISCONNECTED);
|
||||
}
|
||||
|
||||
if (pStmt->isInsert) {
|
||||
if (pStmt->multiTbInsert) {
|
||||
if (pStmt->last != STMT_SETTBNAME && pStmt->last != STMT_ADD_BATCH) {
|
||||
tscError("0x%"PRIx64" bind param status error, last:%d", pStmt->pSql->self, pStmt->last);
|
||||
return TSDB_CODE_TSC_APP_ERROR;
|
||||
STMT_RET(invalidOperationMsg(tscGetErrorMsgPayload(&pStmt->pSql->cmd), "bind param status error"));
|
||||
}
|
||||
} else {
|
||||
if (pStmt->last != STMT_PREPARE && pStmt->last != STMT_ADD_BATCH && pStmt->last != STMT_EXECUTE) {
|
||||
tscError("0x%"PRIx64" bind param status error, last:%d", pStmt->pSql->self, pStmt->last);
|
||||
return TSDB_CODE_TSC_APP_ERROR;
|
||||
STMT_RET(invalidOperationMsg(tscGetErrorMsgPayload(&pStmt->pSql->cmd), "bind param status error"));
|
||||
}
|
||||
}
|
||||
|
||||
pStmt->last = STMT_BIND;
|
||||
|
||||
return insertStmtBindParam(pStmt, bind);
|
||||
STMT_RET(insertStmtBindParam(pStmt, bind));
|
||||
} else {
|
||||
return normalStmtBindParam(pStmt, bind);
|
||||
STMT_RET(normalStmtBindParam(pStmt, bind));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1676,69 +1699,67 @@ int taos_stmt_bind_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind) {
|
|||
STscStmt* pStmt = (STscStmt*)stmt;
|
||||
|
||||
if (stmt == NULL || pStmt->pSql == NULL || pStmt->taos == NULL) {
|
||||
terrno = TSDB_CODE_TSC_DISCONNECTED;
|
||||
return TSDB_CODE_TSC_DISCONNECTED;
|
||||
STMT_RET(TSDB_CODE_TSC_DISCONNECTED);
|
||||
}
|
||||
|
||||
if (bind == NULL || bind->num <= 0 || bind->num > INT16_MAX) {
|
||||
tscError("0x%"PRIx64" invalid parameter", pStmt->pSql->self);
|
||||
return TSDB_CODE_TSC_APP_ERROR;
|
||||
STMT_RET(invalidOperationMsg(tscGetErrorMsgPayload(&pStmt->pSql->cmd), "invalid bind param"));
|
||||
}
|
||||
|
||||
if (!pStmt->isInsert) {
|
||||
tscError("0x%"PRIx64" not or invalid batch insert", pStmt->pSql->self);
|
||||
return TSDB_CODE_TSC_APP_ERROR;
|
||||
STMT_RET(invalidOperationMsg(tscGetErrorMsgPayload(&pStmt->pSql->cmd), "not or invalid batch insert"));
|
||||
}
|
||||
|
||||
if (pStmt->multiTbInsert) {
|
||||
if (pStmt->last != STMT_SETTBNAME && pStmt->last != STMT_ADD_BATCH) {
|
||||
tscError("0x%"PRIx64" bind param status error, last:%d", pStmt->pSql->self, pStmt->last);
|
||||
return TSDB_CODE_TSC_APP_ERROR;
|
||||
STMT_RET(invalidOperationMsg(tscGetErrorMsgPayload(&pStmt->pSql->cmd), "bind param status error"));
|
||||
}
|
||||
} else {
|
||||
if (pStmt->last != STMT_PREPARE && pStmt->last != STMT_ADD_BATCH && pStmt->last != STMT_EXECUTE) {
|
||||
tscError("0x%"PRIx64" bind param status error, last:%d", pStmt->pSql->self, pStmt->last);
|
||||
return TSDB_CODE_TSC_APP_ERROR;
|
||||
STMT_RET(invalidOperationMsg(tscGetErrorMsgPayload(&pStmt->pSql->cmd), "bind param status error"));
|
||||
}
|
||||
}
|
||||
|
||||
pStmt->last = STMT_BIND;
|
||||
|
||||
return insertStmtBindParamBatch(pStmt, bind, -1);
|
||||
STMT_RET(insertStmtBindParamBatch(pStmt, bind, -1));
|
||||
}
|
||||
|
||||
int taos_stmt_bind_single_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind, int colIdx) {
|
||||
STscStmt* pStmt = (STscStmt*)stmt;
|
||||
if (stmt == NULL || pStmt->pSql == NULL || pStmt->taos == NULL) {
|
||||
terrno = TSDB_CODE_TSC_DISCONNECTED;
|
||||
return TSDB_CODE_TSC_DISCONNECTED;
|
||||
STMT_RET(TSDB_CODE_TSC_DISCONNECTED);
|
||||
}
|
||||
|
||||
if (bind == NULL || bind->num <= 0 || bind->num > INT16_MAX) {
|
||||
tscError("0x%"PRIx64" invalid parameter", pStmt->pSql->self);
|
||||
return TSDB_CODE_TSC_APP_ERROR;
|
||||
STMT_RET(invalidOperationMsg(tscGetErrorMsgPayload(&pStmt->pSql->cmd), "invalid bind param"));
|
||||
}
|
||||
|
||||
if (!pStmt->isInsert) {
|
||||
tscError("0x%"PRIx64" not or invalid batch insert", pStmt->pSql->self);
|
||||
return TSDB_CODE_TSC_APP_ERROR;
|
||||
STMT_RET(invalidOperationMsg(tscGetErrorMsgPayload(&pStmt->pSql->cmd), "not or invalid batch insert"));
|
||||
}
|
||||
|
||||
if (pStmt->multiTbInsert) {
|
||||
if (pStmt->last != STMT_SETTBNAME && pStmt->last != STMT_ADD_BATCH && pStmt->last != STMT_BIND_COL) {
|
||||
tscError("0x%"PRIx64" bind param status error, last:%d", pStmt->pSql->self, pStmt->last);
|
||||
return TSDB_CODE_TSC_APP_ERROR;
|
||||
STMT_RET(invalidOperationMsg(tscGetErrorMsgPayload(&pStmt->pSql->cmd), "bind param status error"));
|
||||
}
|
||||
} else {
|
||||
if (pStmt->last != STMT_PREPARE && pStmt->last != STMT_ADD_BATCH && pStmt->last != STMT_BIND_COL && pStmt->last != STMT_EXECUTE) {
|
||||
tscError("0x%"PRIx64" bind param status error, last:%d", pStmt->pSql->self, pStmt->last);
|
||||
return TSDB_CODE_TSC_APP_ERROR;
|
||||
STMT_RET(invalidOperationMsg(tscGetErrorMsgPayload(&pStmt->pSql->cmd), "bind param status error"));
|
||||
}
|
||||
}
|
||||
|
||||
pStmt->last = STMT_BIND_COL;
|
||||
|
||||
return insertStmtBindParamBatch(pStmt, bind, colIdx);
|
||||
STMT_RET(insertStmtBindParamBatch(pStmt, bind, colIdx));
|
||||
}
|
||||
|
||||
|
||||
|
@ -1746,44 +1767,42 @@ int taos_stmt_bind_single_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind, in
|
|||
int taos_stmt_add_batch(TAOS_STMT* stmt) {
|
||||
STscStmt* pStmt = (STscStmt*)stmt;
|
||||
if (stmt == NULL || pStmt->pSql == NULL || pStmt->taos == NULL) {
|
||||
terrno = TSDB_CODE_TSC_DISCONNECTED;
|
||||
return TSDB_CODE_TSC_DISCONNECTED;
|
||||
STMT_RET(TSDB_CODE_TSC_DISCONNECTED);
|
||||
}
|
||||
|
||||
if (pStmt->isInsert) {
|
||||
if (pStmt->last != STMT_BIND && pStmt->last != STMT_BIND_COL) {
|
||||
tscError("0x%"PRIx64" add batch status error, last:%d", pStmt->pSql->self, pStmt->last);
|
||||
return TSDB_CODE_TSC_APP_ERROR;
|
||||
STMT_RET(invalidOperationMsg(tscGetErrorMsgPayload(&pStmt->pSql->cmd), "add batch status error"));
|
||||
}
|
||||
|
||||
pStmt->last = STMT_ADD_BATCH;
|
||||
|
||||
return insertStmtAddBatch(pStmt);
|
||||
STMT_RET(insertStmtAddBatch(pStmt));
|
||||
}
|
||||
|
||||
return TSDB_CODE_COM_OPS_NOT_SUPPORT;
|
||||
STMT_RET(TSDB_CODE_COM_OPS_NOT_SUPPORT);
|
||||
}
|
||||
|
||||
int taos_stmt_reset(TAOS_STMT* stmt) {
|
||||
STscStmt* pStmt = (STscStmt*)stmt;
|
||||
if (pStmt->isInsert) {
|
||||
return insertStmtReset(pStmt);
|
||||
STMT_RET(insertStmtReset(pStmt));
|
||||
}
|
||||
return TSDB_CODE_SUCCESS;
|
||||
STMT_RET(TSDB_CODE_SUCCESS);
|
||||
}
|
||||
|
||||
int taos_stmt_execute(TAOS_STMT* stmt) {
|
||||
int ret = 0;
|
||||
STscStmt* pStmt = (STscStmt*)stmt;
|
||||
if (stmt == NULL || pStmt->pSql == NULL || pStmt->taos == NULL) {
|
||||
terrno = TSDB_CODE_TSC_DISCONNECTED;
|
||||
return TSDB_CODE_TSC_DISCONNECTED;
|
||||
STMT_RET(TSDB_CODE_TSC_DISCONNECTED);
|
||||
}
|
||||
|
||||
if (pStmt->isInsert) {
|
||||
if (pStmt->last != STMT_ADD_BATCH) {
|
||||
tscError("0x%"PRIx64" exec status error, last:%d", pStmt->pSql->self, pStmt->last);
|
||||
return TSDB_CODE_TSC_APP_ERROR;
|
||||
STMT_RET(invalidOperationMsg(tscGetErrorMsgPayload(&pStmt->pSql->cmd), "exec status error"));
|
||||
}
|
||||
|
||||
pStmt->last = STMT_EXECUTE;
|
||||
|
@ -1809,7 +1828,7 @@ int taos_stmt_execute(TAOS_STMT* stmt) {
|
|||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
STMT_RET(ret);
|
||||
}
|
||||
|
||||
TAOS_RES *taos_stmt_use_result(TAOS_STMT* stmt) {
|
||||
|
@ -1833,32 +1852,30 @@ int taos_stmt_is_insert(TAOS_STMT *stmt, int *insert) {
|
|||
STscStmt* pStmt = (STscStmt*)stmt;
|
||||
|
||||
if (stmt == NULL || pStmt->taos == NULL || pStmt->pSql == NULL) {
|
||||
terrno = TSDB_CODE_TSC_DISCONNECTED;
|
||||
return TSDB_CODE_TSC_DISCONNECTED;
|
||||
STMT_RET(TSDB_CODE_TSC_DISCONNECTED);
|
||||
}
|
||||
|
||||
if (insert) *insert = pStmt->isInsert;
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
STMT_RET(TSDB_CODE_SUCCESS);
|
||||
}
|
||||
|
||||
int taos_stmt_num_params(TAOS_STMT *stmt, int *nums) {
|
||||
STscStmt* pStmt = (STscStmt*)stmt;
|
||||
|
||||
if (stmt == NULL || pStmt->taos == NULL || pStmt->pSql == NULL) {
|
||||
terrno = TSDB_CODE_TSC_DISCONNECTED;
|
||||
return TSDB_CODE_TSC_DISCONNECTED;
|
||||
STMT_RET(TSDB_CODE_TSC_DISCONNECTED);
|
||||
}
|
||||
|
||||
if (pStmt->isInsert) {
|
||||
SSqlObj* pSql = pStmt->pSql;
|
||||
SSqlCmd *pCmd = &pSql->cmd;
|
||||
*nums = pCmd->insertParam.numOfParams;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
STMT_RET(TSDB_CODE_SUCCESS);
|
||||
} else {
|
||||
SNormalStmt* normal = &pStmt->normal;
|
||||
*nums = normal->numParams;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
STMT_RET(TSDB_CODE_SUCCESS);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1866,8 +1883,7 @@ int taos_stmt_get_param(TAOS_STMT *stmt, int idx, int *type, int *bytes) {
|
|||
STscStmt* pStmt = (STscStmt*)stmt;
|
||||
|
||||
if (stmt == NULL || pStmt->taos == NULL || pStmt->pSql == NULL) {
|
||||
terrno = TSDB_CODE_TSC_DISCONNECTED;
|
||||
return TSDB_CODE_TSC_DISCONNECTED;
|
||||
STMT_RET(TSDB_CODE_TSC_DISCONNECTED);
|
||||
}
|
||||
|
||||
if (pStmt->isInsert) {
|
||||
|
@ -1884,24 +1900,37 @@ int taos_stmt_get_param(TAOS_STMT *stmt, int idx, int *type, int *bytes) {
|
|||
tscGetDataBlockFromList(pCmd->insertParam.pTableBlockHashList, pTableMeta->id.uid, TSDB_PAYLOAD_SIZE, sizeof(SSubmitBlk),
|
||||
pTableMeta->tableInfo.rowSize, &pTableMetaInfo->name, pTableMeta, &pBlock, NULL);
|
||||
if (ret != 0) {
|
||||
// todo handle error
|
||||
STMT_RET(ret);
|
||||
}
|
||||
|
||||
if (idx<0 || idx>=pBlock->numOfParams) {
|
||||
tscError("0x%"PRIx64" param %d: out of range", pStmt->pSql->self, idx);
|
||||
abort();
|
||||
STMT_RET(invalidOperationMsg(tscGetErrorMsgPayload(&pStmt->pSql->cmd), "idx out of range"));
|
||||
}
|
||||
|
||||
SParamInfo* param = &pBlock->params[idx];
|
||||
if (type) *type = param->type;
|
||||
if (bytes) *bytes = param->bytes;
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
STMT_RET(TSDB_CODE_SUCCESS);
|
||||
} else {
|
||||
return TSDB_CODE_TSC_APP_ERROR;
|
||||
STMT_RET(TSDB_CODE_COM_OPS_NOT_SUPPORT);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
char *taos_stmt_errstr(TAOS_STMT *stmt) {
|
||||
STscStmt* pStmt = (STscStmt*)stmt;
|
||||
|
||||
if (stmt == NULL) {
|
||||
return (char*) tstrerror(terrno);
|
||||
}
|
||||
|
||||
return taos_errstr(pStmt->pSql);
|
||||
}
|
||||
|
||||
|
||||
|
||||
const char *taos_data_type(int type) {
|
||||
switch (type) {
|
||||
case TSDB_DATA_TYPE_NULL: return "TSDB_DATA_TYPE_NULL";
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#include "ttimer.h"
|
||||
#include "tutil.h"
|
||||
#include "taosmsg.h"
|
||||
#include "tcq.h"
|
||||
|
||||
#include "taos.h"
|
||||
|
||||
|
@ -294,24 +295,34 @@ int tscBuildQueryStreamDesc(void *pMsg, STscObj *pObj) {
|
|||
return msgLen;
|
||||
}
|
||||
|
||||
// cqContext->dbconn is killed then call this callback
|
||||
void cqConnKilledNotify(void* handle, void* conn) {
|
||||
if (handle == NULL || conn == NULL){
|
||||
return ;
|
||||
}
|
||||
|
||||
SCqContext* pContext = (SCqContext*) handle;
|
||||
if (pContext->dbConn == conn){
|
||||
atomic_store_ptr(&(pContext->dbConn), NULL);
|
||||
}
|
||||
}
|
||||
|
||||
void tscKillConnection(STscObj *pObj) {
|
||||
// get stream header by locked
|
||||
pthread_mutex_lock(&pObj->mutex);
|
||||
|
||||
SSqlObj *pSql = pObj->sqlList;
|
||||
while (pSql) {
|
||||
pSql = pSql->next;
|
||||
}
|
||||
|
||||
|
||||
SSqlStream *pStream = pObj->streamList;
|
||||
pthread_mutex_unlock(&pObj->mutex);
|
||||
|
||||
while (pStream) {
|
||||
SSqlStream *tmp = pStream->next;
|
||||
// set associate variant to NULL
|
||||
cqConnKilledNotify(pStream->cqhandle, pObj);
|
||||
// taos_close_stream function call pObj->mutet lock , careful death-lock
|
||||
taos_close_stream(pStream);
|
||||
pStream = tmp;
|
||||
}
|
||||
|
||||
pthread_mutex_unlock(&pObj->mutex);
|
||||
|
||||
tscDebug("connection:%p is killed", pObj);
|
||||
taos_close(pObj);
|
||||
}
|
||||
|
||||
|
|
|
@ -63,6 +63,9 @@ static SExprInfo* doAddProjectCol(SQueryInfo* pQueryInfo, int32_t colIndex, int3
|
|||
static int32_t setShowInfo(SSqlObj* pSql, SSqlInfo* pInfo);
|
||||
static char* getAccountId(SSqlObj* pSql);
|
||||
|
||||
static bool serializeExprListToVariant(SArray* pList, tVariant **dest, int16_t colType);
|
||||
static int32_t validateParamOfRelationIn(tVariant *pVar, int32_t colType);
|
||||
|
||||
static bool has(SArray* pFieldList, int32_t startIdx, const char* name);
|
||||
static char* cloneCurrentDBName(SSqlObj* pSql);
|
||||
static int32_t getDelimiterIndex(SStrToken* pTableName);
|
||||
|
@ -120,7 +123,7 @@ static int32_t getColumnIndexByName(SSqlCmd* pCmd, const SStrToken* pToken, SQue
|
|||
static int32_t getTableIndexByName(SStrToken* pToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex);
|
||||
|
||||
static int32_t getTableIndexImpl(SStrToken* pTableToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex);
|
||||
static int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo);
|
||||
static int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, char* msg);
|
||||
static int32_t doLocalQueryProcess(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNode);
|
||||
static int32_t tscCheckCreateDbParams(SSqlCmd* pCmd, SCreateDbMsg* pCreate);
|
||||
|
||||
|
@ -135,14 +138,97 @@ static bool validateDebugFlag(int32_t v);
|
|||
static int32_t checkQueryRangeForFill(SSqlCmd* pCmd, SQueryInfo* pQueryInfo);
|
||||
static int32_t loadAllTableMeta(SSqlObj* pSql, struct SSqlInfo* pInfo);
|
||||
|
||||
static bool isTimeWindowQuery(SQueryInfo* pQueryInfo) {
|
||||
static bool isTimeWindowQuery(SQueryInfo* pQueryInfo) {
|
||||
return pQueryInfo->interval.interval > 0 || pQueryInfo->sessionWindow.gap > 0;
|
||||
}
|
||||
|
||||
|
||||
int16_t getNewResColId(SSqlCmd* pCmd) {
|
||||
return pCmd->resColumnId--;
|
||||
}
|
||||
|
||||
// serialize expr in exprlist to binary
|
||||
// formate "type | size | value"
|
||||
bool serializeExprListToVariant(SArray* pList, tVariant **dst, int16_t colType) {
|
||||
bool ret = false;
|
||||
if (!pList || pList->size <= 0) {
|
||||
return ret;
|
||||
}
|
||||
if (colType == TSDB_DATA_TYPE_DOUBLE || colType == TSDB_DATA_TYPE_FLOAT) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
tSqlExprItem* item = (tSqlExprItem *)taosArrayGet(pList, 0);
|
||||
int32_t firstTokenType = item->pNode->token.type;
|
||||
int32_t type = firstTokenType;
|
||||
|
||||
//nchar to binary and
|
||||
toTSDBType(type);
|
||||
if (type != colType && (type != TSDB_DATA_TYPE_BINARY || colType != TSDB_DATA_TYPE_NCHAR)) {
|
||||
return false;
|
||||
}
|
||||
type = colType;
|
||||
|
||||
SBufferWriter bw = tbufInitWriter( NULL, false );
|
||||
tbufEnsureCapacity(&bw, 512);
|
||||
|
||||
int32_t size = (int32_t)(pList->size);
|
||||
tbufWriteUint32(&bw, type);
|
||||
tbufWriteInt32(&bw, size);
|
||||
|
||||
for (int32_t i = 0; i < size; i++) {
|
||||
tSqlExpr* pSub = ((tSqlExprItem*)(taosArrayGet(pList, i)))->pNode;
|
||||
|
||||
// check all the token type in expr list same or not
|
||||
if (firstTokenType != pSub->token.type) {
|
||||
break;
|
||||
}
|
||||
|
||||
toTSDBType(pSub->token.type);
|
||||
|
||||
tVariant var;
|
||||
tVariantCreate(&var, &pSub->token);
|
||||
if (type == TSDB_DATA_TYPE_BOOL || type == TSDB_DATA_TYPE_TINYINT || type == TSDB_DATA_TYPE_SMALLINT
|
||||
|| type == TSDB_DATA_TYPE_BIGINT || type == TSDB_DATA_TYPE_INT) {
|
||||
tbufWriteInt64(&bw, var.i64);
|
||||
} else if (type == TSDB_DATA_TYPE_DOUBLE || type == TSDB_DATA_TYPE_FLOAT) {
|
||||
tbufWriteDouble(&bw, var.dKey);
|
||||
} else if (type == TSDB_DATA_TYPE_BINARY){
|
||||
tbufWriteBinary(&bw, var.pz, var.nLen);
|
||||
} else if (type == TSDB_DATA_TYPE_NCHAR) {
|
||||
char *buf = (char *)calloc(1, (var.nLen + 1)*TSDB_NCHAR_SIZE);
|
||||
if (tVariantDump(&var, buf, type, false) != TSDB_CODE_SUCCESS) {
|
||||
free(buf);
|
||||
tVariantDestroy(&var);
|
||||
break;
|
||||
}
|
||||
tbufWriteBinary(&bw, buf, twcslen((wchar_t *)buf) * TSDB_NCHAR_SIZE);
|
||||
free(buf);
|
||||
}
|
||||
tVariantDestroy(&var);
|
||||
|
||||
if (i == size - 1) { ret = true;}
|
||||
}
|
||||
|
||||
if (ret == true) {
|
||||
if ((*dst = calloc(1, sizeof(tVariant))) != NULL) {
|
||||
tVariantCreateFromBinary(*dst, tbufGetData(&bw, false), tbufTell(&bw), TSDB_DATA_TYPE_BINARY);
|
||||
} else {
|
||||
ret = false;
|
||||
}
|
||||
}
|
||||
tbufCloseWriter(&bw);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int32_t validateParamOfRelationIn(tVariant *pVar, int32_t colType) {
|
||||
if (pVar->nType != TSDB_DATA_TYPE_BINARY) {
|
||||
return -1;
|
||||
}
|
||||
SBufferReader br = tbufInitReader(pVar->pz, pVar->nLen, false);
|
||||
return colType == TSDB_DATA_TYPE_NCHAR ? 0 : (tbufReadUint32(&br) == colType ? 0: -1);
|
||||
}
|
||||
|
||||
static uint8_t convertOptr(SStrToken *pToken) {
|
||||
switch (pToken->type) {
|
||||
case TK_LT:
|
||||
|
@ -163,6 +249,7 @@ static uint8_t convertOptr(SStrToken *pToken) {
|
|||
return TSDB_RELATION_EQUAL;
|
||||
case TK_PLUS:
|
||||
return TSDB_BINARY_OP_ADD;
|
||||
|
||||
case TK_MINUS:
|
||||
return TSDB_BINARY_OP_SUBTRACT;
|
||||
case TK_STAR:
|
||||
|
@ -178,6 +265,8 @@ static uint8_t convertOptr(SStrToken *pToken) {
|
|||
return TSDB_RELATION_ISNULL;
|
||||
case TK_NOTNULL:
|
||||
return TSDB_RELATION_NOTNULL;
|
||||
case TK_IN:
|
||||
return TSDB_RELATION_IN;
|
||||
default: { return 0; }
|
||||
}
|
||||
}
|
||||
|
@ -350,7 +439,9 @@ int32_t tscValidateSqlInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
|
|||
}
|
||||
|
||||
} else if (pInfo->type == TSDB_SQL_DROP_DNODE) {
|
||||
pzName->n = strdequote(pzName->z);
|
||||
if (pzName->type == TK_STRING) {
|
||||
pzName->n = strdequote(pzName->z);
|
||||
}
|
||||
strncpy(pCmd->payload, pzName->z, pzName->n);
|
||||
} else { // drop user/account
|
||||
if (pzName->n >= TSDB_USER_LEN) {
|
||||
|
@ -428,7 +519,9 @@ int32_t tscValidateSqlInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
|
|||
}
|
||||
|
||||
SStrToken* id = taosArrayGet(pInfo->pMiscInfo->a, 0);
|
||||
id->n = strdequote(id->z);
|
||||
if (id->type == TK_STRING) {
|
||||
id->n = strdequote(id->z);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -839,18 +932,15 @@ int32_t validateIntervalNode(SSqlObj* pSql, SQueryInfo* pQueryInfo, SSqlNode* pS
|
|||
|
||||
// interval is not null
|
||||
SStrToken *t = &pSqlNode->interval.interval;
|
||||
if (parseNatualDuration(t->z, t->n, &pQueryInfo->interval.interval, &pQueryInfo->interval.intervalUnit) != TSDB_CODE_SUCCESS) {
|
||||
if (parseNatualDuration(t->z, t->n, &pQueryInfo->interval.interval,
|
||||
&pQueryInfo->interval.intervalUnit, tinfo.precision) != TSDB_CODE_SUCCESS) {
|
||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||
}
|
||||
|
||||
if (pQueryInfo->interval.intervalUnit != 'n' && pQueryInfo->interval.intervalUnit != 'y') {
|
||||
// if the unit of time window value is millisecond, change the value from microsecond
|
||||
if (tinfo.precision == TSDB_TIME_PRECISION_MILLI) {
|
||||
pQueryInfo->interval.interval = pQueryInfo->interval.interval / 1000;
|
||||
}
|
||||
|
||||
// interval cannot be less than 10 milliseconds
|
||||
if (pQueryInfo->interval.interval < tsMinIntervalTime) {
|
||||
if (convertTimePrecision(pQueryInfo->interval.interval, tinfo.precision, TSDB_TIME_PRECISION_MILLI) < tsMinIntervalTime) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
|
||||
}
|
||||
}
|
||||
|
@ -926,6 +1016,8 @@ int32_t validateSessionNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode * pS
|
|||
const char* msg3 = "invalid column name";
|
||||
const char* msg4 = "invalid time window";
|
||||
|
||||
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
|
||||
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
|
||||
// no session window
|
||||
if (!TPARSER_HAS_TOKEN(pSqlNode->sessionVal.gap)) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -935,7 +1027,7 @@ int32_t validateSessionNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode * pS
|
|||
SStrToken* gap = &pSqlNode->sessionVal.gap;
|
||||
|
||||
char timeUnit = 0;
|
||||
if (parseNatualDuration(gap->z, gap->n, &pQueryInfo->sessionWindow.gap, &timeUnit) != TSDB_CODE_SUCCESS) {
|
||||
if (parseNatualDuration(gap->z, gap->n, &pQueryInfo->sessionWindow.gap, &timeUnit, tinfo.precision) != TSDB_CODE_SUCCESS) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
|
||||
}
|
||||
|
||||
|
@ -943,13 +1035,6 @@ int32_t validateSessionNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode * pS
|
|||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
||||
}
|
||||
|
||||
// if the unit of time window value is millisecond, change the value from microsecond
|
||||
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
|
||||
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
|
||||
if (tinfo.precision == TSDB_TIME_PRECISION_MILLI) {
|
||||
pQueryInfo->sessionWindow.gap = pQueryInfo->sessionWindow.gap / 1000;
|
||||
}
|
||||
|
||||
if (pQueryInfo->sessionWindow.gap != 0 && pQueryInfo->interval.interval != 0) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
|
||||
}
|
||||
|
@ -986,7 +1071,8 @@ int32_t parseIntervalOffset(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SStrToken* of
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
if (parseNatualDuration(t->z, t->n, &pQueryInfo->interval.offset, &pQueryInfo->interval.offsetUnit) != TSDB_CODE_SUCCESS) {
|
||||
if (parseNatualDuration(t->z, t->n, &pQueryInfo->interval.offset,
|
||||
&pQueryInfo->interval.offsetUnit, tinfo.precision) != TSDB_CODE_SUCCESS) {
|
||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||
}
|
||||
|
||||
|
@ -995,10 +1081,6 @@ int32_t parseIntervalOffset(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SStrToken* of
|
|||
}
|
||||
|
||||
if (pQueryInfo->interval.offsetUnit != 'n' && pQueryInfo->interval.offsetUnit != 'y') {
|
||||
// if the unit of time window value is millisecond, change the value from microsecond
|
||||
if (tinfo.precision == TSDB_TIME_PRECISION_MILLI) {
|
||||
pQueryInfo->interval.offset = pQueryInfo->interval.offset / 1000;
|
||||
}
|
||||
if (pQueryInfo->interval.intervalUnit != 'n' && pQueryInfo->interval.intervalUnit != 'y') {
|
||||
if (pQueryInfo->interval.offset >= pQueryInfo->interval.interval) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
|
||||
|
@ -1043,12 +1125,10 @@ int32_t parseSlidingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SStrToken* pSl
|
|||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
|
||||
}
|
||||
|
||||
parseAbsoluteDuration(pSliding->z, pSliding->n, &pQueryInfo->interval.sliding);
|
||||
if (tinfo.precision == TSDB_TIME_PRECISION_MILLI) {
|
||||
pQueryInfo->interval.sliding /= 1000;
|
||||
}
|
||||
parseAbsoluteDuration(pSliding->z, pSliding->n, &pQueryInfo->interval.sliding, tinfo.precision);
|
||||
|
||||
if (pQueryInfo->interval.sliding < tsMinSlidingTime) {
|
||||
if (pQueryInfo->interval.sliding <
|
||||
convertTimePrecision(tsMinSlidingTime, TSDB_TIME_PRECISION_MILLI, tinfo.precision)) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg0);
|
||||
}
|
||||
|
||||
|
@ -2075,7 +2155,10 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
|
|||
const char* msg6 = "function applied to tags not allowed";
|
||||
const char* msg7 = "normal table can not apply this function";
|
||||
const char* msg8 = "multi-columns selection does not support alias column name";
|
||||
const char* msg9 = "diff can no be applied to unsigned numeric type";
|
||||
const char* msg9 = "diff/derivative can no be applied to unsigned numeric type";
|
||||
const char* msg10 = "derivative duration should be greater than 1 Second";
|
||||
const char* msg11 = "third parameter in derivative should be 0 or 1";
|
||||
const char* msg12 = "parameter is out of range [1, 100]";
|
||||
|
||||
switch (functionId) {
|
||||
case TSDB_FUNC_COUNT: {
|
||||
|
@ -2164,11 +2247,14 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
|
|||
case TSDB_FUNC_MIN:
|
||||
case TSDB_FUNC_MAX:
|
||||
case TSDB_FUNC_DIFF:
|
||||
case TSDB_FUNC_DERIVATIVE:
|
||||
case TSDB_FUNC_STDDEV:
|
||||
case TSDB_FUNC_LEASTSQR: {
|
||||
// 1. valid the number of parameters
|
||||
if (pItem->pNode->pParam == NULL || (functionId != TSDB_FUNC_LEASTSQR && taosArrayGetSize(pItem->pNode->pParam) != 1) ||
|
||||
(functionId == TSDB_FUNC_LEASTSQR && taosArrayGetSize(pItem->pNode->pParam) != 3)) {
|
||||
int32_t numOfParams = (pItem->pNode->pParam == NULL)? 0: (int32_t) taosArrayGetSize(pItem->pNode->pParam);
|
||||
if (pItem->pNode->pParam == NULL ||
|
||||
(functionId != TSDB_FUNC_LEASTSQR && functionId != TSDB_FUNC_DERIVATIVE && numOfParams != 1) ||
|
||||
((functionId == TSDB_FUNC_LEASTSQR || functionId == TSDB_FUNC_DERIVATIVE) && numOfParams != 3)) {
|
||||
/* no parameters or more than one parameter for function */
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
|
||||
}
|
||||
|
@ -2189,11 +2275,13 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
|
|||
|
||||
// 2. check if sql function can be applied on this column data type
|
||||
pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
|
||||
STableComInfo info = tscGetTableInfo(pTableMetaInfo->pTableMeta);
|
||||
|
||||
SSchema* pSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, index.columnIndex);
|
||||
|
||||
if (!IS_NUMERIC_TYPE(pSchema->type)) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
||||
} else if (IS_UNSIGNED_NUMERIC_TYPE(pSchema->type) && functionId == TSDB_FUNC_DIFF) {
|
||||
} else if (IS_UNSIGNED_NUMERIC_TYPE(pSchema->type) && (functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_DERIVATIVE)) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg9);
|
||||
}
|
||||
|
||||
|
@ -2207,11 +2295,11 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
|
|||
}
|
||||
|
||||
// set the first column ts for diff query
|
||||
if (functionId == TSDB_FUNC_DIFF) {
|
||||
if (functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_DERIVATIVE) {
|
||||
colIndex += 1;
|
||||
SColumnIndex indexTS = {.tableIndex = index.tableIndex, .columnIndex = 0};
|
||||
SExprInfo* pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &indexTS, TSDB_DATA_TYPE_TIMESTAMP, TSDB_KEYSIZE,
|
||||
getNewResColId(pCmd), TSDB_KEYSIZE, false);
|
||||
SExprInfo* pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &indexTS, TSDB_DATA_TYPE_TIMESTAMP,
|
||||
TSDB_KEYSIZE, getNewResColId(pCmd), TSDB_KEYSIZE, false);
|
||||
|
||||
SColumnList ids = createColumnList(1, 0, 0);
|
||||
insertResultField(pQueryInfo, 0, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP, aAggs[TSDB_FUNC_TS_DUMMY].name, pExpr);
|
||||
|
@ -2222,7 +2310,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
|
|||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
|
||||
}
|
||||
|
||||
SExprInfo* pExpr = tscExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, getNewResColId(pCmd), resultSize, false);
|
||||
SExprInfo* pExpr = tscExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, getNewResColId(pCmd), intermediateResSize, false);
|
||||
|
||||
if (functionId == TSDB_FUNC_LEASTSQR) { // set the leastsquares parameters
|
||||
char val[8] = {0};
|
||||
|
@ -2237,12 +2325,41 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
|
|||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||
}
|
||||
|
||||
tscExprAddParams(&pExpr->base, val, TSDB_DATA_TYPE_DOUBLE, sizeof(double));
|
||||
tscExprAddParams(&pExpr->base, val, TSDB_DATA_TYPE_DOUBLE, DOUBLE_BYTES);
|
||||
} else if (functionId == TSDB_FUNC_IRATE) {
|
||||
STableComInfo info = tscGetTableInfo(pTableMetaInfo->pTableMeta);
|
||||
int64_t prec = info.precision;
|
||||
|
||||
tscExprAddParams(&pExpr->base, (char*)&prec, TSDB_DATA_TYPE_BIGINT, LONG_BYTES);
|
||||
} else if (functionId == TSDB_FUNC_DERIVATIVE) {
|
||||
char val[8] = {0};
|
||||
|
||||
int64_t tickPerSec = 0;
|
||||
if (tVariantDump(&pParamElem[1].pNode->value, (char*) &tickPerSec, TSDB_DATA_TYPE_BIGINT, true) < 0) {
|
||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||
}
|
||||
|
||||
if (info.precision == TSDB_TIME_PRECISION_MILLI) {
|
||||
tickPerSec /= 1000000;
|
||||
} else if (info.precision == TSDB_TIME_PRECISION_MICRO) {
|
||||
tickPerSec /= 1000;
|
||||
}
|
||||
|
||||
if (tickPerSec <= 0 || tickPerSec < TSDB_TICK_PER_SECOND(info.precision)) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg10);
|
||||
}
|
||||
|
||||
tscExprAddParams(&pExpr->base, (char*) &tickPerSec, TSDB_DATA_TYPE_BIGINT, LONG_BYTES);
|
||||
memset(val, 0, tListLen(val));
|
||||
|
||||
if (tVariantDump(&pParamElem[2].pNode->value, val, TSDB_DATA_TYPE_BIGINT, true) < 0) {
|
||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||
}
|
||||
|
||||
int64_t v = *(int64_t*) val;
|
||||
if (v != 0 && v != 1) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg11);
|
||||
}
|
||||
|
||||
tscExprAddParams(&pExpr->base, val, TSDB_DATA_TYPE_BIGINT, LONG_BYTES);
|
||||
}
|
||||
|
||||
SColumnList ids = createColumnList(1, index.tableIndex, index.columnIndex);
|
||||
|
@ -2447,7 +2564,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
|
|||
|
||||
int64_t nTop = GET_INT32_VAL(val);
|
||||
if (nTop <= 0 || nTop > 100) { // todo use macro
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5);
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg12);
|
||||
}
|
||||
|
||||
// todo REFACTOR
|
||||
|
@ -2947,8 +3064,8 @@ void tscRestoreFuncForSTableQuery(SQueryInfo* pQueryInfo) {
|
|||
}
|
||||
|
||||
bool hasUnsupportFunctionsForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
|
||||
const char* msg1 = "TWA not allowed to apply to super table directly";
|
||||
const char* msg2 = "TWA only support group by tbname for super table query";
|
||||
const char* msg1 = "TWA/Diff not allowed to apply to super table directly";
|
||||
const char* msg2 = "TWA/Diff only support group by tbname for super table query";
|
||||
const char* msg3 = "function not support for super table query";
|
||||
|
||||
// filter sql function not supported by metric query yet.
|
||||
|
@ -2961,7 +3078,7 @@ bool hasUnsupportFunctionsForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo)
|
|||
}
|
||||
}
|
||||
|
||||
if (tscIsTWAQuery(pQueryInfo)) {
|
||||
if (tscIsTWAQuery(pQueryInfo) || tscIsDiffQuery(pQueryInfo)) {
|
||||
if (pQueryInfo->groupbyExpr.numOfGroupCols == 0) {
|
||||
invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
||||
return true;
|
||||
|
@ -3201,8 +3318,9 @@ static int32_t doExtractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo,
|
|||
return retVal;
|
||||
}
|
||||
} else if ((colType == TSDB_DATA_TYPE_TIMESTAMP) && (TSDB_DATA_TYPE_BIGINT == pRight->value.nType)) {
|
||||
if ((timePrecision == TSDB_TIME_PRECISION_MILLI) && (pRight->flags & (1 << EXPR_FLAG_US_TIMESTAMP))) {
|
||||
pRight->value.i64 /= 1000;
|
||||
if (pRight->flags & (1 << EXPR_FLAG_NS_TIMESTAMP)) {
|
||||
pRight->value.i64 =
|
||||
convertTimePrecision(pRight->value.i64, TSDB_TIME_PRECISION_NANO, timePrecision);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3219,6 +3337,25 @@ static int32_t doExtractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo,
|
|||
retVal = tVariantDump(&pRight->value, (char*)&pColumnFilter->upperBndd, colType, false);
|
||||
|
||||
// TK_GT,TK_GE,TK_EQ,TK_NE are based on the pColumn->lowerBndd
|
||||
} else if (pExpr->tokenId == TK_IN) {
|
||||
tVariant *pVal;
|
||||
if (pRight->tokenId != TK_SET || !serializeExprListToVariant(pRight->pParam, &pVal, colType) || colType == TSDB_DATA_TYPE_TIMESTAMP) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
|
||||
}
|
||||
if (validateParamOfRelationIn(pVal, colType) != TSDB_CODE_SUCCESS) {
|
||||
tVariantDestroy(pVal);
|
||||
free(pVal);
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
|
||||
}
|
||||
pColumnFilter->pz = (int64_t)calloc(1, pVal->nLen + 1);
|
||||
pColumnFilter->len = pVal->nLen;
|
||||
pColumnFilter->filterstr = 1;
|
||||
memcpy((char *)(pColumnFilter->pz), (char *)(pVal->pz), pVal->nLen);
|
||||
//retVal = tVariantDump(pVal, (char *)(pColumnFilter->pz), TSDB_DATA_TYPE_BINARY, false);
|
||||
|
||||
tVariantDestroy(pVal);
|
||||
free(pVal);
|
||||
|
||||
} else if (colType == TSDB_DATA_TYPE_BINARY) {
|
||||
pColumnFilter->pz = (int64_t)calloc(1, bufLen * TSDB_NCHAR_SIZE);
|
||||
pColumnFilter->len = pRight->value.nLen;
|
||||
|
@ -3267,6 +3404,9 @@ static int32_t doExtractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo,
|
|||
case TK_NOTNULL:
|
||||
pColumnFilter->lowerRelOptr = TSDB_RELATION_NOTNULL;
|
||||
break;
|
||||
case TK_IN:
|
||||
pColumnFilter->lowerRelOptr = TSDB_RELATION_IN;
|
||||
break;
|
||||
default:
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
|
||||
}
|
||||
|
@ -3382,7 +3522,7 @@ static int32_t extractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SC
|
|||
&& pExpr->tokenId != TK_ISNULL
|
||||
&& pExpr->tokenId != TK_NOTNULL
|
||||
&& pExpr->tokenId != TK_LIKE
|
||||
) {
|
||||
&& pExpr->tokenId != TK_IN) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
|
||||
}
|
||||
} else {
|
||||
|
@ -3392,7 +3532,7 @@ static int32_t extractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SC
|
|||
|
||||
if (pSchema->type == TSDB_DATA_TYPE_BOOL) {
|
||||
int32_t t = pExpr->tokenId;
|
||||
if (t != TK_EQ && t != TK_NE && t != TK_NOTNULL && t != TK_ISNULL) {
|
||||
if (t != TK_EQ && t != TK_NE && t != TK_NOTNULL && t != TK_ISNULL && t != TK_IN) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
|
||||
}
|
||||
}
|
||||
|
@ -4434,7 +4574,11 @@ static int32_t validateTagCondExpr(SSqlCmd* pCmd, tExprNode *p) {
|
|||
free(tmp);
|
||||
} else {
|
||||
double tmp;
|
||||
retVal = tVariantDump(vVariant, (char*)&tmp, schemaType, false);
|
||||
if (p->_node.optr == TSDB_RELATION_IN) {
|
||||
retVal = validateParamOfRelationIn(vVariant, schemaType);
|
||||
} else {
|
||||
retVal = tVariantDump(vVariant, (char*)&tmp, schemaType, false);
|
||||
}
|
||||
}
|
||||
|
||||
if (retVal != TSDB_CODE_SUCCESS) {
|
||||
|
@ -4719,7 +4863,7 @@ int32_t getTimeRange(STimeWindow* win, tSqlExpr* pRight, int32_t optr, int16_t t
|
|||
|
||||
char* seg = strnchr(pRight->value.pz, '-', pRight->value.nLen, false);
|
||||
if (seg != NULL) {
|
||||
if (taosParseTime(pRight->value.pz, &val, pRight->value.nLen, TSDB_TIME_PRECISION_MICRO, tsDaylight) == TSDB_CODE_SUCCESS) {
|
||||
if (taosParseTime(pRight->value.pz, &val, pRight->value.nLen, timePrecision, tsDaylight) == TSDB_CODE_SUCCESS) {
|
||||
parsed = true;
|
||||
} else {
|
||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||
|
@ -4732,18 +4876,6 @@ int32_t getTimeRange(STimeWindow* win, tSqlExpr* pRight, int32_t optr, int16_t t
|
|||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||
}
|
||||
}
|
||||
} else if (pRight->tokenId == TK_INTEGER && timePrecision == TSDB_TIME_PRECISION_MILLI) {
|
||||
/*
|
||||
* if the pRight->tokenId == TK_INTEGER/TK_FLOAT, the value is adaptive, we
|
||||
* need the time precision in metermeta to transfer the value in MICROSECOND
|
||||
*
|
||||
* Additional check to avoid data overflow
|
||||
*/
|
||||
if (pRight->value.i64 <= INT64_MAX / 1000) {
|
||||
pRight->value.i64 *= 1000;
|
||||
}
|
||||
} else if (pRight->tokenId == TK_FLOAT && timePrecision == TSDB_TIME_PRECISION_MILLI) {
|
||||
pRight->value.dKey *= 1000;
|
||||
}
|
||||
|
||||
if (!parsed) {
|
||||
|
@ -4751,33 +4883,19 @@ int32_t getTimeRange(STimeWindow* win, tSqlExpr* pRight, int32_t optr, int16_t t
|
|||
* failed to parse timestamp in regular formation, try next
|
||||
* it may be a epoch time in string format
|
||||
*/
|
||||
tVariantDump(&pRight->value, (char*)&val, TSDB_DATA_TYPE_BIGINT, true);
|
||||
|
||||
/*
|
||||
* transfer it into MICROSECOND format if it is a string, since for
|
||||
* TK_INTEGER/TK_FLOAT the value has been transferred
|
||||
*
|
||||
* additional check to avoid data overflow
|
||||
*/
|
||||
if (pRight->tokenId == TK_STRING && timePrecision == TSDB_TIME_PRECISION_MILLI) {
|
||||
if (val <= INT64_MAX / 1000) {
|
||||
val *= 1000;
|
||||
}
|
||||
if (pRight->flags & (1 << EXPR_FLAG_NS_TIMESTAMP)) {
|
||||
pRight->value.i64 = convertTimePrecision(pRight->value.i64, TSDB_TIME_PRECISION_NANO, timePrecision);
|
||||
}
|
||||
}
|
||||
|
||||
int32_t delta = 1;
|
||||
/* for millisecond, delta is 1ms=1000us */
|
||||
if (timePrecision == TSDB_TIME_PRECISION_MILLI) {
|
||||
delta *= 1000;
|
||||
|
||||
tVariantDump(&pRight->value, (char*)&val, TSDB_DATA_TYPE_BIGINT, true);
|
||||
}
|
||||
|
||||
if (optr == TK_LE) {
|
||||
win->ekey = val;
|
||||
} else if (optr == TK_LT) {
|
||||
win->ekey = val - delta;
|
||||
win->ekey = val - 1;
|
||||
} else if (optr == TK_GT) {
|
||||
win->skey = val + delta;
|
||||
win->skey = val + 1;
|
||||
} else if (optr == TK_GE) {
|
||||
win->skey = val;
|
||||
} else if (optr == TK_EQ) {
|
||||
|
@ -5500,8 +5618,10 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
|
|||
int32_t validateSqlFunctionInStreamSql(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
|
||||
const char* msg0 = "sample interval can not be less than 10ms.";
|
||||
const char* msg1 = "functions not allowed in select clause";
|
||||
|
||||
if (pQueryInfo->interval.interval != 0 && pQueryInfo->interval.interval < 10 &&
|
||||
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
|
||||
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
|
||||
if (pQueryInfo->interval.interval != 0 &&
|
||||
convertTimePrecision(pQueryInfo->interval.interval, tinfo.precision, TSDB_TIME_PRECISION_MILLI)< 10 &&
|
||||
pQueryInfo->interval.intervalUnit != 'n' &&
|
||||
pQueryInfo->interval.intervalUnit != 'y') {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg0);
|
||||
|
@ -5520,7 +5640,7 @@ int32_t validateSqlFunctionInStreamSql(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
|
|||
|
||||
int32_t validateFunctionsInIntervalOrGroupbyQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
|
||||
bool isProjectionFunction = false;
|
||||
const char* msg1 = "column projection is not compatible with interval";
|
||||
const char* msg1 = "functions not compatible with interval";
|
||||
|
||||
// multi-output set/ todo refactor
|
||||
size_t size = taosArrayGetSize(pQueryInfo->exprList);
|
||||
|
@ -5544,8 +5664,8 @@ int32_t validateFunctionsInIntervalOrGroupbyQuery(SSqlCmd* pCmd, SQueryInfo* pQu
|
|||
}
|
||||
}
|
||||
|
||||
if ((pExpr->base.functionId == TSDB_FUNC_PRJ && pExpr->base.numOfParams == 0) || pExpr->base.functionId == TSDB_FUNC_DIFF ||
|
||||
pExpr->base.functionId == TSDB_FUNC_ARITHM) {
|
||||
int32_t f = pExpr->base.functionId;
|
||||
if ((f == TSDB_FUNC_PRJ && pExpr->base.numOfParams == 0) || f == TSDB_FUNC_DIFF || f == TSDB_FUNC_ARITHM || f == TSDB_FUNC_DERIVATIVE) {
|
||||
isProjectionFunction = true;
|
||||
}
|
||||
}
|
||||
|
@ -5831,48 +5951,40 @@ int32_t validateLimitNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlN
|
|||
}
|
||||
|
||||
static int32_t setKeepOption(SSqlCmd* pCmd, SCreateDbMsg* pMsg, SCreateDbInfo* pCreateDb) {
|
||||
const char* msg = "invalid number of options";
|
||||
const char* msg1 = "invalid number of keep options";
|
||||
const char* msg2 = "invalid keep value";
|
||||
const char* msg3 = "invalid keep value, should be keep0 <= keep1 <= keep2";
|
||||
|
||||
pMsg->daysToKeep = htonl(-1);
|
||||
pMsg->daysToKeep0 = htonl(-1);
|
||||
pMsg->daysToKeep1 = htonl(-1);
|
||||
pMsg->daysToKeep2 = htonl(-1);
|
||||
|
||||
SArray* pKeep = pCreateDb->keep;
|
||||
if (pKeep != NULL) {
|
||||
size_t s = taosArrayGetSize(pKeep);
|
||||
tVariantListItem* p0 = taosArrayGet(pKeep, 0);
|
||||
switch (s) {
|
||||
case 1: {
|
||||
if ((int32_t)p0->pVar.i64 <= 0) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
|
||||
}
|
||||
pMsg->daysToKeep = htonl((int32_t)p0->pVar.i64);
|
||||
}
|
||||
break;
|
||||
case 2: {
|
||||
tVariantListItem* p1 = taosArrayGet(pKeep, 1);
|
||||
if ((int32_t)p0->pVar.i64 <= 0 || (int32_t)p1->pVar.i64 <= 0) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
|
||||
}
|
||||
pMsg->daysToKeep = htonl((int32_t)p0->pVar.i64);
|
||||
pMsg->daysToKeep1 = htonl((int32_t)p1->pVar.i64);
|
||||
break;
|
||||
}
|
||||
case 3: {
|
||||
tVariantListItem* p1 = taosArrayGet(pKeep, 1);
|
||||
tVariantListItem* p2 = taosArrayGet(pKeep, 2);
|
||||
|
||||
if ((int32_t)p0->pVar.i64 <= 0 || (int32_t)p1->pVar.i64 <= 0 || (int32_t)p2->pVar.i64 <= 0) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
|
||||
}
|
||||
|
||||
pMsg->daysToKeep = htonl((int32_t)p0->pVar.i64);
|
||||
pMsg->daysToKeep1 = htonl((int32_t)p1->pVar.i64);
|
||||
pMsg->daysToKeep2 = htonl((int32_t)p2->pVar.i64);
|
||||
break;
|
||||
}
|
||||
default: { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg); }
|
||||
#ifdef _STORAGE
|
||||
if (s >= 4 ||s <= 0) {
|
||||
#else
|
||||
if (s != 1) {
|
||||
#endif
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
||||
}
|
||||
|
||||
tVariantListItem* p0 = taosArrayGet(pKeep, 0);
|
||||
tVariantListItem* p1 = (s > 1) ? taosArrayGet(pKeep, 1) : p0;
|
||||
tVariantListItem* p2 = (s > 2) ? taosArrayGet(pKeep, 2) : p1;
|
||||
|
||||
if ((int32_t)p0->pVar.i64 <= 0 || (int32_t)p1->pVar.i64 <= 0 || (int32_t)p2->pVar.i64 <= 0) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
|
||||
}
|
||||
if (!(((int32_t)p0->pVar.i64 <= (int32_t)p1->pVar.i64) && ((int32_t)p1->pVar.i64 <= (int32_t)p2->pVar.i64))) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
|
||||
}
|
||||
|
||||
pMsg->daysToKeep0 = htonl((int32_t)p0->pVar.i64);
|
||||
pMsg->daysToKeep1 = htonl((int32_t)p1->pVar.i64);
|
||||
pMsg->daysToKeep2 = htonl((int32_t)p2->pVar.i64);
|
||||
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -5894,11 +6006,15 @@ static int32_t setTimePrecision(SSqlCmd* pCmd, SCreateDbMsg* pMsg, SCreateDbInfo
|
|||
} else if (strncmp(pToken->z, TSDB_TIME_PRECISION_MICRO_STR, pToken->n) == 0 &&
|
||||
strlen(TSDB_TIME_PRECISION_MICRO_STR) == pToken->n) {
|
||||
pMsg->precision = TSDB_TIME_PRECISION_MICRO;
|
||||
} else if (strncmp(pToken->z, TSDB_TIME_PRECISION_NANO_STR, pToken->n) == 0 &&
|
||||
strlen(TSDB_TIME_PRECISION_NANO_STR) == pToken->n) {
|
||||
pMsg->precision = TSDB_TIME_PRECISION_NANO;
|
||||
} else {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -6141,7 +6257,7 @@ static void updateTagPrjFunction(SQueryInfo* pQueryInfo) {
|
|||
* 2. if selectivity function and tagprj function both exist, there should be only
|
||||
* one selectivity function exists.
|
||||
*/
|
||||
static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo, SSqlCmd* pCmd) {
|
||||
static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo, char* msg) {
|
||||
const char* msg1 = "only one selectivity function allowed in presence of tags function";
|
||||
const char* msg3 = "aggregation function should not be mixed up with projection";
|
||||
|
||||
|
@ -6164,10 +6280,11 @@ static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo, SSqlCmd* pCmd)
|
|||
|
||||
int16_t functionId = pExpr->base.functionId;
|
||||
if (functionId == TSDB_FUNC_TAGPRJ || functionId == TSDB_FUNC_PRJ || functionId == TSDB_FUNC_TS ||
|
||||
functionId == TSDB_FUNC_ARITHM) {
|
||||
functionId == TSDB_FUNC_ARITHM || functionId == TSDB_FUNC_TS_DUMMY) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
||||
if ((aAggs[functionId].status & TSDB_FUNCSTATE_SELECTIVITY) != 0) {
|
||||
numOfSelectivity++;
|
||||
} else {
|
||||
|
@ -6179,7 +6296,7 @@ static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo, SSqlCmd* pCmd)
|
|||
// When the tag projection function on tag column that is not in the group by clause, aggregation function and
|
||||
// selectivity function exist in select clause is not allowed.
|
||||
if (numOfAggregation > 0) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
||||
return invalidOperationMsg(msg, msg1);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -6208,7 +6325,7 @@ static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo, SSqlCmd* pCmd)
|
|||
(functionId == TSDB_FUNC_LAST_DST && (pExpr->base.colInfo.flag & TSDB_COL_NULL) != 0)) {
|
||||
// do nothing
|
||||
} else {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
||||
return invalidOperationMsg(msg, msg1);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -6221,7 +6338,7 @@ static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo, SSqlCmd* pCmd)
|
|||
} else {
|
||||
if ((pQueryInfo->type & TSDB_QUERY_TYPE_PROJECTION_QUERY) != 0) {
|
||||
if (numOfAggregation > 0 && pQueryInfo->groupbyExpr.numOfGroupCols == 0) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
|
||||
return invalidOperationMsg(msg, msg3);
|
||||
}
|
||||
|
||||
if (numOfAggregation > 0 || numOfSelectivity > 0) {
|
||||
|
@ -6269,9 +6386,14 @@ static int32_t doAddGroupbyColumnsOnDemand(SSqlCmd* pCmd, SQueryInfo* pQueryInfo
|
|||
size_t size = tscNumOfExprs(pQueryInfo);
|
||||
|
||||
if (TSDB_COL_IS_TAG(pColIndex->flag)) {
|
||||
|
||||
int32_t f = TSDB_FUNC_TAG;
|
||||
if (tscIsDiffDerivQuery(pQueryInfo)) {
|
||||
f = TSDB_FUNC_TAGPRJ;
|
||||
}
|
||||
|
||||
SColumnIndex index = {.tableIndex = pQueryInfo->groupbyExpr.tableIndex, .columnIndex = colIndex};
|
||||
SExprInfo* pExpr = tscExprAppend(pQueryInfo, TSDB_FUNC_TAG, &index, s->type, s->bytes,
|
||||
getNewResColId(pCmd), s->bytes, true);
|
||||
SExprInfo* pExpr = tscExprAppend(pQueryInfo, f, &index, s->type, s->bytes, getNewResColId(pCmd), s->bytes, true);
|
||||
|
||||
memset(pExpr->base.aliasName, 0, sizeof(pExpr->base.aliasName));
|
||||
tstrncpy(pExpr->base.aliasName, s->name, sizeof(pExpr->base.aliasName));
|
||||
|
@ -6331,7 +6453,7 @@ static int32_t doTagFunctionCheck(SQueryInfo* pQueryInfo) {
|
|||
return (tableCounting && tagProjection)? -1:0;
|
||||
}
|
||||
|
||||
int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
|
||||
int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, char* msg) {
|
||||
const char* msg1 = "functions/columns not allowed in group by query";
|
||||
const char* msg2 = "projection query on columns not allowed";
|
||||
const char* msg3 = "group by/session/state_window not allowed on projection query";
|
||||
|
@ -6341,17 +6463,17 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
|
|||
// only retrieve tags, group by is not supportted
|
||||
if (tscQueryTags(pQueryInfo)) {
|
||||
if (doTagFunctionCheck(pQueryInfo) != TSDB_CODE_SUCCESS) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5);
|
||||
return invalidOperationMsg(msg, msg5);
|
||||
}
|
||||
|
||||
if (pQueryInfo->groupbyExpr.numOfGroupCols > 0 || isTimeWindowQuery(pQueryInfo)) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
|
||||
return invalidOperationMsg(msg, msg4);
|
||||
} else {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
}
|
||||
if (tscIsProjectionQuery(pQueryInfo) && tscIsSessionWindowQuery(pQueryInfo)) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
|
||||
return invalidOperationMsg(msg, msg3);
|
||||
}
|
||||
|
||||
if (pQueryInfo->groupbyExpr.numOfGroupCols > 0) {
|
||||
|
@ -6359,6 +6481,7 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
|
|||
if (onlyTagPrjFunction(pQueryInfo) && allTagPrjInGroupby(pQueryInfo)) {
|
||||
// It is a groupby aggregate query, the tag project function is not suitable for this case.
|
||||
updateTagPrjFunction(pQueryInfo);
|
||||
|
||||
return doAddGroupbyColumnsOnDemand(pCmd, pQueryInfo);
|
||||
}
|
||||
|
||||
|
@ -6366,13 +6489,13 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
|
|||
size_t size = tscNumOfExprs(pQueryInfo);
|
||||
for (int32_t i = 0; i < size; ++i) {
|
||||
SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
|
||||
int32_t functId = pExpr->base.functionId;
|
||||
int32_t f = pExpr->base.functionId;
|
||||
|
||||
/*
|
||||
* group by normal columns.
|
||||
* Check if the column projection is identical to the group by column or not
|
||||
*/
|
||||
if (functId == TSDB_FUNC_PRJ && pExpr->base.colInfo.colId != PRIMARYKEY_TIMESTAMP_COL_INDEX) {
|
||||
if (f == TSDB_FUNC_PRJ && pExpr->base.colInfo.colId != PRIMARYKEY_TIMESTAMP_COL_INDEX) {
|
||||
bool qualified = false;
|
||||
for (int32_t j = 0; j < pQueryInfo->groupbyExpr.numOfGroupCols; ++j) {
|
||||
SColIndex* pColIndex = taosArrayGet(pQueryInfo->groupbyExpr.columnInfo, j);
|
||||
|
@ -6383,21 +6506,21 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
|
|||
}
|
||||
|
||||
if (!qualified) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
|
||||
return invalidOperationMsg(msg, msg2);
|
||||
}
|
||||
}
|
||||
|
||||
if (IS_MULTIOUTPUT(aAggs[functId].status) && functId != TSDB_FUNC_TOP && functId != TSDB_FUNC_BOTTOM &&
|
||||
functId != TSDB_FUNC_TAGPRJ && functId != TSDB_FUNC_PRJ) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
||||
if (IS_MULTIOUTPUT(aAggs[f].status) && f != TSDB_FUNC_TOP && f != TSDB_FUNC_BOTTOM &&
|
||||
f != TSDB_FUNC_DIFF && f != TSDB_FUNC_DERIVATIVE && f != TSDB_FUNC_TAGPRJ && f != TSDB_FUNC_PRJ) {
|
||||
return invalidOperationMsg(msg, msg1);
|
||||
}
|
||||
|
||||
if (functId == TSDB_FUNC_COUNT && pExpr->base.colInfo.colIndex == TSDB_TBNAME_COLUMN_INDEX) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
||||
if (f == TSDB_FUNC_COUNT && pExpr->base.colInfo.colIndex == TSDB_TBNAME_COLUMN_INDEX) {
|
||||
return invalidOperationMsg(msg, msg1);
|
||||
}
|
||||
}
|
||||
|
||||
if (checkUpdateTagPrjFunctions(pQueryInfo, pCmd) != TSDB_CODE_SUCCESS) {
|
||||
if (checkUpdateTagPrjFunctions(pQueryInfo, msg) != TSDB_CODE_SUCCESS) {
|
||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||
}
|
||||
|
||||
|
@ -6406,13 +6529,13 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
|
|||
}
|
||||
|
||||
// projection query on super table does not compatible with "group by" syntax
|
||||
if (tscIsProjectionQuery(pQueryInfo)) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
|
||||
if (tscIsProjectionQuery(pQueryInfo) && !(tscIsDiffDerivQuery(pQueryInfo))) {
|
||||
return invalidOperationMsg(msg, msg3);
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
} else {
|
||||
return checkUpdateTagPrjFunctions(pQueryInfo, pCmd);
|
||||
return checkUpdateTagPrjFunctions(pQueryInfo, msg);
|
||||
}
|
||||
}
|
||||
int32_t doLocalQueryProcess(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNode) {
|
||||
|
@ -6498,6 +6621,13 @@ int32_t tscCheckCreateDbParams(SSqlCmd* pCmd, SCreateDbMsg* pCreate) {
|
|||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
|
||||
}
|
||||
|
||||
int32_t blocks = ntohl(pCreate->totalBlocks);
|
||||
if (blocks != -1 && (blocks < TSDB_MIN_TOTAL_BLOCKS || blocks > TSDB_MAX_TOTAL_BLOCKS)) {
|
||||
snprintf(msg, tListLen(msg), "invalid db option totalBlocks: %d valid range: [%d, %d]", blocks,
|
||||
TSDB_MIN_TOTAL_BLOCKS, TSDB_MAX_TOTAL_BLOCKS);
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
|
||||
}
|
||||
|
||||
if (pCreate->quorum != -1 &&
|
||||
(pCreate->quorum < TSDB_MIN_DB_QUORUM_OPTION || pCreate->quorum > TSDB_MAX_DB_QUORUM_OPTION)) {
|
||||
snprintf(msg, tListLen(msg), "invalid db option quorum: %d valid range: [%d, %d]", pCreate->quorum,
|
||||
|
@ -6526,9 +6656,10 @@ int32_t tscCheckCreateDbParams(SSqlCmd* pCmd, SCreateDbMsg* pCreate) {
|
|||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
|
||||
}
|
||||
|
||||
if (pCreate->precision != TSDB_TIME_PRECISION_MILLI && pCreate->precision != TSDB_TIME_PRECISION_MICRO) {
|
||||
snprintf(msg, tListLen(msg), "invalid db option timePrecision: %d valid value: [%d, %d]", pCreate->precision,
|
||||
TSDB_TIME_PRECISION_MILLI, TSDB_TIME_PRECISION_MICRO);
|
||||
if (pCreate->precision != TSDB_TIME_PRECISION_MILLI && pCreate->precision != TSDB_TIME_PRECISION_MICRO &&
|
||||
pCreate->precision != TSDB_TIME_PRECISION_NANO) {
|
||||
snprintf(msg, tListLen(msg), "invalid db option timePrecision: %d valid value: [%d, %d, %d]", pCreate->precision,
|
||||
TSDB_TIME_PRECISION_MILLI, TSDB_TIME_PRECISION_MICRO, TSDB_TIME_PRECISION_NANO);
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg);
|
||||
}
|
||||
|
||||
|
@ -6870,7 +7001,7 @@ int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo) {
|
|||
const char* msg6 = "from missing in subclause";
|
||||
const char* msg7 = "time interval is required";
|
||||
const char* msg8 = "the first column should be primary timestamp column";
|
||||
|
||||
|
||||
SSqlCmd* pCmd = &pSql->cmd;
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd);
|
||||
assert(pQueryInfo->numOfTables == 1);
|
||||
|
@ -6927,6 +7058,7 @@ int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo) {
|
|||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
|
||||
}
|
||||
|
||||
// project query primary column must be timestamp type
|
||||
if (tscIsProjectionQuery(pQueryInfo)) {
|
||||
SExprInfo* pExpr = tscExprGet(pQueryInfo, 0);
|
||||
if (pExpr->base.colInfo.colId != PRIMARYKEY_TIMESTAMP_COL_INDEX) {
|
||||
|
@ -6935,7 +7067,7 @@ int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo) {
|
|||
} else {
|
||||
if (pQueryInfo->interval.interval == 0) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg7);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// set the created table[stream] name
|
||||
|
@ -7606,7 +7738,8 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
|
|||
const char* msg2 = "too many tables in from clause";
|
||||
const char* msg3 = "start(end) time of query range required or time range too large";
|
||||
const char* msg4 = "interval query not supported, since the result of sub query not include valid timestamp column";
|
||||
const char* msg9 = "only tag query not compatible with normal column filter";
|
||||
const char* msg5 = "only tag query not compatible with normal column filter";
|
||||
const char* msg6 = "not support stddev/percentile in outer query yet";
|
||||
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
|
||||
|
@ -7647,24 +7780,27 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
|
|||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||
}
|
||||
|
||||
// todo NOT support yet
|
||||
for(int32_t i = 0; i < tscNumOfExprs(pQueryInfo); ++i) {
|
||||
SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
|
||||
int32_t f = pExpr->base.functionId;
|
||||
if (f == TSDB_FUNC_STDDEV || f == TSDB_FUNC_PERCT) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6);
|
||||
}
|
||||
}
|
||||
|
||||
// validate the query filter condition info
|
||||
if (pSqlNode->pWhere != NULL) {
|
||||
if (validateWhereNode(pQueryInfo, &pSqlNode->pWhere, pSql) != TSDB_CODE_SUCCESS) {
|
||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||
}
|
||||
|
||||
STableMeta* pTableMeta = tscGetMetaInfo(pQueryInfo, 0)->pTableMeta;
|
||||
if (pTableMeta->tableInfo.precision == TSDB_TIME_PRECISION_MILLI) {
|
||||
pQueryInfo->window.skey = pQueryInfo->window.skey / 1000;
|
||||
pQueryInfo->window.ekey = pQueryInfo->window.ekey / 1000;
|
||||
}
|
||||
}
|
||||
|
||||
// validate the interval info
|
||||
if (validateIntervalNode(pSql, pQueryInfo, pSqlNode) != TSDB_CODE_SUCCESS) {
|
||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||
} else {
|
||||
if (isTimeWindowQuery(pQueryInfo)) {
|
||||
if (isTimeWindowQuery(pQueryInfo) || pQueryInfo->sessionWindow.gap > 0) {
|
||||
// check if the first column of the nest query result is timestamp column
|
||||
SColumn* pCol = taosArrayGetP(pQueryInfo->colList, 0);
|
||||
if (pCol->info.type != TSDB_DATA_TYPE_TIMESTAMP) {
|
||||
|
@ -7679,10 +7815,13 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
|
|||
|
||||
// set order by info
|
||||
STableMeta* pTableMeta = tscGetMetaInfo(pQueryInfo, 0)->pTableMeta;
|
||||
if (validateOrderbyNode(pCmd, pQueryInfo, pSqlNode, tscGetTableSchema(pTableMeta)) !=
|
||||
TSDB_CODE_SUCCESS) {
|
||||
if (validateOrderbyNode(pCmd, pQueryInfo, pSqlNode, tscGetTableSchema(pTableMeta)) != TSDB_CODE_SUCCESS) {
|
||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||
}
|
||||
|
||||
if ((code = doFunctionsCompatibleCheck(pCmd, pQueryInfo, tscGetErrorMsgPayload(pCmd))) != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
} else {
|
||||
pQueryInfo->command = TSDB_SQL_SELECT;
|
||||
|
||||
|
@ -7708,18 +7847,12 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
|
|||
}
|
||||
|
||||
// set where info
|
||||
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
|
||||
|
||||
if (pSqlNode->pWhere != NULL) {
|
||||
if (validateWhereNode(pQueryInfo, &pSqlNode->pWhere, pSql) != TSDB_CODE_SUCCESS) {
|
||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||
}
|
||||
|
||||
pSqlNode->pWhere = NULL;
|
||||
if (tinfo.precision == TSDB_TIME_PRECISION_MILLI) {
|
||||
pQueryInfo->window.skey = pQueryInfo->window.skey / 1000;
|
||||
pQueryInfo->window.ekey = pQueryInfo->window.ekey / 1000;
|
||||
}
|
||||
} else {
|
||||
if (taosArrayGetSize(pSqlNode->from->list) > 1) { // Cross join not allowed yet
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), "cross join not supported yet");
|
||||
|
@ -7747,11 +7880,6 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
|
|||
// set interval value
|
||||
if (validateIntervalNode(pSql, pQueryInfo, pSqlNode) != TSDB_CODE_SUCCESS) {
|
||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||
} else {
|
||||
if (isTimeWindowQuery(pQueryInfo) &&
|
||||
(validateFunctionsInIntervalOrGroupbyQuery(pCmd, pQueryInfo) != TSDB_CODE_SUCCESS)) {
|
||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||
}
|
||||
}
|
||||
|
||||
if (tscQueryTags(pQueryInfo)) {
|
||||
|
@ -7762,7 +7890,7 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
|
|||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
SColumn* pCols = taosArrayGetP(pQueryInfo->colList, i);
|
||||
if (pCols->info.flist.numOfFilters > 0) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg9);
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -7782,6 +7910,11 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
|
|||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||
}
|
||||
|
||||
if ((isTimeWindowQuery(pQueryInfo) || pQueryInfo->sessionWindow.gap > 0) &&
|
||||
(validateFunctionsInIntervalOrGroupbyQuery(pCmd, pQueryInfo) != TSDB_CODE_SUCCESS)) {
|
||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||
}
|
||||
|
||||
if (isSTable) {
|
||||
tscTansformFuncForSTableQuery(pQueryInfo);
|
||||
if (hasUnsupportFunctionsForSTableQuery(pCmd, pQueryInfo)) {
|
||||
|
@ -7811,7 +7944,7 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
|
|||
return code;
|
||||
}
|
||||
|
||||
if ((code = doFunctionsCompatibleCheck(pCmd, pQueryInfo)) != TSDB_CODE_SUCCESS) {
|
||||
if ((code = doFunctionsCompatibleCheck(pCmd, pQueryInfo,tscGetErrorMsgPayload(pCmd))) != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -7824,14 +7957,15 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
|
|||
}
|
||||
|
||||
{ // set the query info
|
||||
pQueryInfo->projectionQuery = tscIsProjectionQuery(pQueryInfo);
|
||||
pQueryInfo->hasFilter = tscHasColumnFilter(pQueryInfo);
|
||||
pQueryInfo->simpleAgg = isSimpleAggregateRv(pQueryInfo);
|
||||
pQueryInfo->onlyTagQuery = onlyTagPrjFunction(pQueryInfo);
|
||||
pQueryInfo->groupbyColumn = tscGroupbyColumn(pQueryInfo);
|
||||
pQueryInfo->projectionQuery = tscIsProjectionQuery(pQueryInfo);
|
||||
pQueryInfo->hasFilter = tscHasColumnFilter(pQueryInfo);
|
||||
pQueryInfo->simpleAgg = isSimpleAggregateRv(pQueryInfo);
|
||||
pQueryInfo->onlyTagQuery = onlyTagPrjFunction(pQueryInfo);
|
||||
pQueryInfo->groupbyColumn = tscGroupbyColumn(pQueryInfo);
|
||||
|
||||
pQueryInfo->arithmeticOnAgg = tsIsArithmeticQueryOnAggResult(pQueryInfo);
|
||||
pQueryInfo->arithmeticOnAgg = tsIsArithmeticQueryOnAggResult(pQueryInfo);
|
||||
pQueryInfo->orderProjectQuery = tscOrderedProjectionQueryOnSTable(pQueryInfo, 0);
|
||||
// pQueryInfo->diffQuery = tscIsDiffQuery(pQueryInfo);
|
||||
|
||||
SExprInfo** p = NULL;
|
||||
int32_t numOfExpr = 0;
|
||||
|
@ -7942,6 +8076,24 @@ int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSqlExpr* pS
|
|||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
} else if (pSqlExpr->tokenId == TK_SET) {
|
||||
int32_t type = -1;
|
||||
STableMeta* pTableMeta = tscGetMetaInfo(pQueryInfo, 0)->pTableMeta;
|
||||
if (pCols != NULL) {
|
||||
SColIndex* idx = taosArrayGet(pCols, 0);
|
||||
SSchema* pSchema = tscGetTableColumnSchema(pTableMeta, idx->colIndex);
|
||||
if (pSchema != NULL) {
|
||||
type = pSchema->type;
|
||||
}
|
||||
}
|
||||
|
||||
tVariant *pVal;
|
||||
if (serializeExprListToVariant(pSqlExpr->pParam, &pVal, type) == false) {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), "not support filter expression");
|
||||
}
|
||||
*pExpr = calloc(1, sizeof(tExprNode));
|
||||
(*pExpr)->nodeType = TSQL_NODE_VALUE;
|
||||
(*pExpr)->pVal = pVal;
|
||||
} else {
|
||||
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), "not support filter expression");
|
||||
}
|
||||
|
|
|
@ -937,7 +937,7 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
|||
}
|
||||
|
||||
SGroupbyExpr *pGroupbyExpr = query.pGroupbyExpr;
|
||||
if (pGroupbyExpr->numOfGroupCols > 0) {
|
||||
if (pGroupbyExpr != NULL && pGroupbyExpr->numOfGroupCols > 0) {
|
||||
pQueryMsg->orderByIdx = htons(pGroupbyExpr->orderIndex);
|
||||
pQueryMsg->orderType = htons(pGroupbyExpr->orderType);
|
||||
|
||||
|
@ -1668,7 +1668,7 @@ int tscProcessLocalRetrieveRsp(SSqlObj *pSql) {
|
|||
return tscLocalResultCommonBuilder(pSql, numOfRes);
|
||||
}
|
||||
|
||||
int tscProcessRetrieveLocalMergeRsp(SSqlObj *pSql) {
|
||||
int tscProcessRetrieveGlobalMergeRsp(SSqlObj *pSql) {
|
||||
SSqlRes *pRes = &pSql->res;
|
||||
SSqlCmd* pCmd = &pSql->cmd;
|
||||
|
||||
|
@ -1695,12 +1695,13 @@ int tscProcessRetrieveLocalMergeRsp(SSqlObj *pSql) {
|
|||
taosArrayPush(group, &tableKeyInfo);
|
||||
taosArrayPush(tableGroupInfo.pGroupList, &group);
|
||||
|
||||
pQueryInfo->pQInfo = createQInfoFromQueryNode(pQueryInfo, &tableGroupInfo, NULL, NULL, pRes->pMerger, MERGE_STAGE);
|
||||
tscDebug("0x%"PRIx64" create QInfo 0x%"PRIx64" to execute query processing", pSql->self, pSql->self);
|
||||
pQueryInfo->pQInfo = createQInfoFromQueryNode(pQueryInfo, &tableGroupInfo, NULL, NULL, pRes->pMerger, MERGE_STAGE, pSql->self);
|
||||
}
|
||||
|
||||
uint64_t localQueryId = 0;
|
||||
uint64_t localQueryId = pSql->self;
|
||||
qTableQuery(pQueryInfo->pQInfo, &localQueryId);
|
||||
convertQueryResult(pRes, pQueryInfo);
|
||||
convertQueryResult(pRes, pQueryInfo, pSql->self);
|
||||
|
||||
code = pRes->code;
|
||||
if (pRes->code == TSDB_CODE_SUCCESS) {
|
||||
|
@ -2774,7 +2775,7 @@ void tscInitMsgsFp() {
|
|||
|
||||
tscProcessMsgRsp[TSDB_SQL_RETRIEVE_EMPTY_RESULT] = tscProcessEmptyResultRsp;
|
||||
|
||||
tscProcessMsgRsp[TSDB_SQL_RETRIEVE_LOCALMERGE] = tscProcessRetrieveLocalMergeRsp;
|
||||
tscProcessMsgRsp[TSDB_SQL_RETRIEVE_GLOBALMERGE] = tscProcessRetrieveGlobalMergeRsp;
|
||||
|
||||
tscProcessMsgRsp[TSDB_SQL_ALTER_TABLE] = tscProcessAlterTableMsgRsp;
|
||||
tscProcessMsgRsp[TSDB_SQL_ALTER_DB] = tscProcessAlterDbMsgRsp;
|
||||
|
|
|
@ -456,7 +456,7 @@ static bool needToFetchNewBlock(SSqlObj* pSql) {
|
|||
|
||||
return (pRes->completed != true || hasMoreVnodesToTry(pSql) || hasMoreClauseToTry(pSql)) &&
|
||||
(pCmd->command == TSDB_SQL_RETRIEVE ||
|
||||
pCmd->command == TSDB_SQL_RETRIEVE_LOCALMERGE ||
|
||||
pCmd->command == TSDB_SQL_RETRIEVE_GLOBALMERGE ||
|
||||
pCmd->command == TSDB_SQL_TABLE_JOIN_RETRIEVE ||
|
||||
pCmd->command == TSDB_SQL_FETCH ||
|
||||
pCmd->command == TSDB_SQL_SHOW ||
|
||||
|
|
|
@ -53,9 +53,7 @@ static int64_t tscGetRetryDelayTime(SSqlStream* pStream, int64_t slidingTime, in
|
|||
|
||||
if (pStream->interval.intervalUnit != 'n' && pStream->interval.intervalUnit != 'y') {
|
||||
// change to ms
|
||||
if (prec == TSDB_TIME_PRECISION_MICRO) {
|
||||
slidingTime = slidingTime / 1000;
|
||||
}
|
||||
slidingTime = convertTimePrecision(slidingTime, pStream->precision, TSDB_TIME_PRECISION_MILLI);
|
||||
|
||||
if (slidingTime < retryDelta) {
|
||||
return slidingTime;
|
||||
|
@ -139,8 +137,13 @@ static void tscProcessStreamTimer(void *handle, void *tmrId) {
|
|||
|
||||
pStream->numOfRes = 0; // reset the numOfRes.
|
||||
SSqlObj *pSql = pStream->pSql;
|
||||
|
||||
// pSql == NULL maybe killStream already called
|
||||
if(pSql == NULL) {
|
||||
return ;
|
||||
}
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd);
|
||||
tscDebug("0x%"PRIx64" timer launch query", pSql->self);
|
||||
tscDebug("0x%"PRIx64" add into timer", pSql->self);
|
||||
|
||||
if (pStream->isProject) {
|
||||
/*
|
||||
|
@ -157,11 +160,7 @@ static void tscProcessStreamTimer(void *handle, void *tmrId) {
|
|||
pQueryInfo->window.skey = pStream->stime;
|
||||
int64_t etime = taosGetTimestamp(pStream->precision);
|
||||
// delay to wait all data in last time window
|
||||
if (pStream->precision == TSDB_TIME_PRECISION_MICRO) {
|
||||
etime -= tsMaxStreamComputDelay * 1000l;
|
||||
} else {
|
||||
etime -= tsMaxStreamComputDelay;
|
||||
}
|
||||
etime -= convertTimePrecision(tsMaxStreamComputDelay, TSDB_TIME_PRECISION_MILLI, pStream->precision);
|
||||
if (etime > pStream->etime) {
|
||||
etime = pStream->etime;
|
||||
} else if (pStream->interval.intervalUnit != 'y' && pStream->interval.intervalUnit != 'n') {
|
||||
|
@ -178,8 +177,8 @@ static void tscProcessStreamTimer(void *handle, void *tmrId) {
|
|||
int64_t timer = pStream->interval.sliding;
|
||||
if (pStream->interval.intervalUnit == 'y' || pStream->interval.intervalUnit == 'n') {
|
||||
timer = 86400 * 1000l;
|
||||
} else if (pStream->precision == TSDB_TIME_PRECISION_MICRO) {
|
||||
timer /= 1000l;
|
||||
} else {
|
||||
timer = convertTimePrecision(timer, pStream->precision, TSDB_TIME_PRECISION_MILLI);
|
||||
}
|
||||
tscSetRetryTimer(pStream, pSql, timer);
|
||||
return;
|
||||
|
@ -339,8 +338,12 @@ static void tscSetRetryTimer(SSqlStream *pStream, SSqlObj *pSql, int64_t timer)
|
|||
if (pStream->isProject) {
|
||||
int64_t now = taosGetTimestamp(pStream->precision);
|
||||
int64_t etime = now > pStream->etime ? pStream->etime : now;
|
||||
|
||||
if (pStream->etime < now && now - pStream->etime > tsMaxRetentWindow) {
|
||||
int64_t maxRetent = tsMaxRetentWindow * 1000;
|
||||
if(pStream->precision == TSDB_TIME_PRECISION_MICRO) {
|
||||
maxRetent *= 1000;
|
||||
}
|
||||
|
||||
if (pStream->etime < now && now - pStream->etime > maxRetent) {
|
||||
/*
|
||||
* current time window will be closed, since it too early to exceed the maxRetentWindow value
|
||||
*/
|
||||
|
@ -369,9 +372,8 @@ static void tscSetRetryTimer(SSqlStream *pStream, SSqlObj *pSql, int64_t timer)
|
|||
}
|
||||
|
||||
static int64_t getLaunchTimeDelay(const SSqlStream* pStream) {
|
||||
int64_t maxDelay =
|
||||
(pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMaxStreamComputDelay * 1000L : tsMaxStreamComputDelay;
|
||||
|
||||
int64_t maxDelay = convertTimePrecision(tsMaxStreamComputDelay, TSDB_TIME_PRECISION_MILLI, pStream->precision);
|
||||
|
||||
int64_t delayDelta = maxDelay;
|
||||
if (pStream->interval.intervalUnit != 'n' && pStream->interval.intervalUnit != 'y') {
|
||||
delayDelta = (int64_t)(pStream->interval.sliding * tsStreamComputDelayRatio);
|
||||
|
@ -438,16 +440,14 @@ static void tscSetNextLaunchTimer(SSqlStream *pStream, SSqlObj *pSql) {
|
|||
|
||||
timer += getLaunchTimeDelay(pStream);
|
||||
|
||||
if (pStream->precision == TSDB_TIME_PRECISION_MICRO) {
|
||||
timer = timer / 1000L;
|
||||
}
|
||||
timer = convertTimePrecision(timer, pStream->precision, TSDB_TIME_PRECISION_MILLI);
|
||||
|
||||
tscSetRetryTimer(pStream, pSql, timer);
|
||||
}
|
||||
|
||||
static int32_t tscSetSlidingWindowInfo(SSqlObj *pSql, SSqlStream *pStream) {
|
||||
int64_t minIntervalTime =
|
||||
(pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMinIntervalTime * 1000L : tsMinIntervalTime;
|
||||
convertTimePrecision(tsMinIntervalTime, TSDB_TIME_PRECISION_MILLI, pStream->precision);
|
||||
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd);
|
||||
|
||||
|
@ -471,7 +471,7 @@ static int32_t tscSetSlidingWindowInfo(SSqlObj *pSql, SSqlStream *pStream) {
|
|||
}
|
||||
|
||||
int64_t minSlidingTime =
|
||||
(pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMinSlidingTime * 1000L : tsMinSlidingTime;
|
||||
convertTimePrecision(tsMinSlidingTime, TSDB_TIME_PRECISION_MILLI, pStream->precision);
|
||||
|
||||
if (pQueryInfo->interval.intervalUnit != 'n' && pQueryInfo->interval.intervalUnit!= 'y' && pQueryInfo->interval.sliding < minSlidingTime) {
|
||||
tscWarn("0x%"PRIx64" stream:%p, original sliding value:%" PRId64 " too small, reset to:%" PRId64, pSql->self, pStream,
|
||||
|
@ -539,13 +539,12 @@ static int64_t tscGetLaunchTimestamp(const SSqlStream *pStream) {
|
|||
timer = pStream->stime - now;
|
||||
}
|
||||
|
||||
int64_t startDelay =
|
||||
(pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsStreamCompStartDelay * 1000L : tsStreamCompStartDelay;
|
||||
|
||||
int64_t startDelay = convertTimePrecision(tsStreamCompStartDelay, TSDB_TIME_PRECISION_MILLI, pStream->precision);
|
||||
|
||||
timer += getLaunchTimeDelay(pStream);
|
||||
timer += startDelay;
|
||||
|
||||
return (pStream->precision == TSDB_TIME_PRECISION_MICRO) ? timer / 1000L : timer;
|
||||
return convertTimePrecision(timer, pStream->precision, TSDB_TIME_PRECISION_MILLI);
|
||||
}
|
||||
|
||||
static void tscCreateStream(void *param, TAOS_RES *res, int code) {
|
||||
|
@ -664,7 +663,7 @@ void cbParseSql(void* param, TAOS_RES* res, int code) {
|
|||
}
|
||||
|
||||
TAOS_STREAM *taos_open_stream_withname(TAOS *taos, const char* dstTable, const char *sqlstr, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row),
|
||||
int64_t stime, void *param, void (*callback)(void *)) {
|
||||
int64_t stime, void *param, void (*callback)(void *), void* cqhandle) {
|
||||
STscObj *pObj = (STscObj *)taos;
|
||||
if (pObj == NULL || pObj->signature != pObj) return NULL;
|
||||
|
||||
|
@ -697,6 +696,7 @@ TAOS_STREAM *taos_open_stream_withname(TAOS *taos, const char* dstTable, const c
|
|||
pStream->callback = callback;
|
||||
pStream->param = param;
|
||||
pStream->pSql = pSql;
|
||||
pStream->cqhandle = cqhandle;
|
||||
pSql->pStream = pStream;
|
||||
pSql->param = pStream;
|
||||
pSql->maxRetry = TSDB_MAX_REPLICA;
|
||||
|
@ -745,7 +745,7 @@ TAOS_STREAM *taos_open_stream_withname(TAOS *taos, const char* dstTable, const c
|
|||
|
||||
TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row),
|
||||
int64_t stime, void *param, void (*callback)(void *)) {
|
||||
return taos_open_stream_withname(taos, "", sqlstr, fp, stime, param, callback);
|
||||
return taos_open_stream_withname(taos, "", sqlstr, fp, stime, param, callback, NULL);
|
||||
}
|
||||
|
||||
void taos_close_stream(TAOS_STREAM *handle) {
|
||||
|
|
|
@ -265,7 +265,7 @@ static int tscUpdateSubscription(STscObj* pObj, SSub* pSub) {
|
|||
|
||||
SSqlCmd* pCmd = &pSql->cmd;
|
||||
|
||||
pSub->lastSyncTime = taosGetTimestampMs();
|
||||
TSDB_QUERY_CLEAR_TYPE(tscGetQueryInfo(pCmd)->type, TSDB_QUERY_TYPE_MULTITABLE_QUERY);
|
||||
|
||||
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, 0);
|
||||
if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo)) {
|
||||
|
@ -276,6 +276,8 @@ static int tscUpdateSubscription(STscObj* pObj, SSub* pSub) {
|
|||
taosArrayClear(pSub->progress);
|
||||
taosArrayPush(pSub->progress, &target);
|
||||
}
|
||||
|
||||
pSub->lastSyncTime = taosGetTimestampMs();
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -305,7 +307,11 @@ static int tscUpdateSubscription(STscObj* pObj, SSub* pSub) {
|
|||
}
|
||||
taosArrayDestroy(tables);
|
||||
|
||||
TSDB_QUERY_SET_TYPE(tscGetQueryInfo(pCmd)->type, TSDB_QUERY_TYPE_MULTITABLE_QUERY);
|
||||
if (pTableMetaInfo->pVgroupTables && taosArrayGetSize(pTableMetaInfo->pVgroupTables) > 0) {
|
||||
TSDB_QUERY_SET_TYPE(tscGetQueryInfo(pCmd)->type, TSDB_QUERY_TYPE_MULTITABLE_QUERY);
|
||||
}
|
||||
|
||||
pSub->lastSyncTime = taosGetTimestampMs();
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
|
|
@ -1977,9 +1977,8 @@ void tscHandleMasterJoinQuery(SSqlObj* pSql) {
|
|||
}
|
||||
|
||||
memset(pSql->subState.states, 0, sizeof(*pSql->subState.states) * pSql->subState.numOfSub);
|
||||
tscDebug("0x%"PRIx64" reset all sub states to 0", pSql->self);
|
||||
tscDebug("0x%"PRIx64" reset all sub states to 0, start subquery, total:%d", pSql->self, pQueryInfo->numOfTables);
|
||||
|
||||
tscDebug("0x%"PRIx64" start subquery, total:%d", pSql->self, pQueryInfo->numOfTables);
|
||||
for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) {
|
||||
SJoinSupporter *pSupporter = tscCreateJoinSupporter(pSql, i);
|
||||
if (pSupporter == NULL) { // failed to create support struct, abort current query
|
||||
|
@ -2424,7 +2423,7 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
|
|||
|
||||
// pRes->code check only serves in launching metric sub-queries
|
||||
if (pRes->code == TSDB_CODE_TSC_QUERY_CANCELLED) {
|
||||
pCmd->command = TSDB_SQL_RETRIEVE_LOCALMERGE; // enable the abort of kill super table function.
|
||||
pCmd->command = TSDB_SQL_RETRIEVE_GLOBALMERGE; // enable the abort of kill super table function.
|
||||
return pRes->code;
|
||||
}
|
||||
|
||||
|
@ -2780,7 +2779,7 @@ static void tscAllDataRetrievedFromDnode(SRetrieveSupport *trsupport, SSqlObj* p
|
|||
if (code == TSDB_CODE_SUCCESS && trsupport->pExtMemBuffer == NULL) {
|
||||
pParentSql->cmd.command = TSDB_SQL_RETRIEVE_EMPTY_RESULT; // no result, set the result empty
|
||||
} else {
|
||||
pParentSql->cmd.command = TSDB_SQL_RETRIEVE_LOCALMERGE;
|
||||
pParentSql->cmd.command = TSDB_SQL_RETRIEVE_GLOBALMERGE;
|
||||
}
|
||||
|
||||
tscCreateResPointerInfo(&pParentSql->res, pPQueryInfo);
|
||||
|
@ -3502,7 +3501,7 @@ static UNUSED_FUNC bool tscHasRemainDataInSubqueryResultSet(SSqlObj *pSql) {
|
|||
}
|
||||
|
||||
void* createQInfoFromQueryNode(SQueryInfo* pQueryInfo, STableGroupInfo* pTableGroupInfo, SOperatorInfo* pSourceOperator,
|
||||
char* sql, void* merger, int32_t stage) {
|
||||
char* sql, void* merger, int32_t stage, uint64_t qId) {
|
||||
assert(pQueryInfo != NULL);
|
||||
SQInfo *pQInfo = (SQInfo *)calloc(1, sizeof(SQInfo));
|
||||
if (pQInfo == NULL) {
|
||||
|
@ -3511,7 +3510,7 @@ void* createQInfoFromQueryNode(SQueryInfo* pQueryInfo, STableGroupInfo* pTableGr
|
|||
|
||||
// to make sure third party won't overwrite this structure
|
||||
pQInfo->signature = pQInfo;
|
||||
|
||||
pQInfo->qId = qId;
|
||||
SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv;
|
||||
SQueryAttr *pQueryAttr = &pQInfo->query;
|
||||
|
||||
|
|
|
@ -222,6 +222,9 @@ bool tscIsProjectionQueryOnSTable(SQueryInfo* pQueryInfo, int32_t tableIndex) {
|
|||
functionId != TSDB_FUNC_TS &&
|
||||
functionId != TSDB_FUNC_ARITHM &&
|
||||
functionId != TSDB_FUNC_TS_COMP &&
|
||||
functionId != TSDB_FUNC_DIFF &&
|
||||
functionId != TSDB_FUNC_DERIVATIVE &&
|
||||
functionId != TSDB_FUNC_TS_DUMMY &&
|
||||
functionId != TSDB_FUNC_TID_TAG) {
|
||||
return false;
|
||||
}
|
||||
|
@ -253,10 +256,14 @@ bool tscIsProjectionQuery(SQueryInfo* pQueryInfo) {
|
|||
size_t size = tscNumOfExprs(pQueryInfo);
|
||||
|
||||
for (int32_t i = 0; i < size; ++i) {
|
||||
int32_t functionId = tscExprGet(pQueryInfo, i)->base.functionId;
|
||||
int32_t f = tscExprGet(pQueryInfo, i)->base.functionId;
|
||||
if (f == TSDB_FUNC_TS_DUMMY) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (functionId != TSDB_FUNC_PRJ && functionId != TSDB_FUNC_TAGPRJ && functionId != TSDB_FUNC_TAG &&
|
||||
functionId != TSDB_FUNC_TS && functionId != TSDB_FUNC_ARITHM) {
|
||||
if (f != TSDB_FUNC_PRJ && f != TSDB_FUNC_TAGPRJ && f != TSDB_FUNC_TAG &&
|
||||
f != TSDB_FUNC_TS && f != TSDB_FUNC_ARITHM && f != TSDB_FUNC_DIFF &&
|
||||
f != TSDB_FUNC_DERIVATIVE) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
@ -264,6 +271,24 @@ bool tscIsProjectionQuery(SQueryInfo* pQueryInfo) {
|
|||
return true;
|
||||
}
|
||||
|
||||
bool tscIsDiffDerivQuery(SQueryInfo* pQueryInfo) {
|
||||
size_t size = tscNumOfExprs(pQueryInfo);
|
||||
|
||||
for (int32_t i = 0; i < size; ++i) {
|
||||
int32_t f = tscExprGet(pQueryInfo, i)->base.functionId;
|
||||
if (f == TSDB_FUNC_TS_DUMMY) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (f == TSDB_FUNC_DIFF || f == TSDB_FUNC_DERIVATIVE) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
bool tscHasColumnFilter(SQueryInfo* pQueryInfo) {
|
||||
// filter on primary timestamp column
|
||||
if (pQueryInfo->window.skey != INT64_MIN || pQueryInfo->window.ekey != INT64_MAX) {
|
||||
|
@ -434,6 +459,25 @@ bool tscIsTWAQuery(SQueryInfo* pQueryInfo) {
|
|||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool tscIsDiffQuery(SQueryInfo* pQueryInfo) {
|
||||
size_t num = tscNumOfExprs(pQueryInfo);
|
||||
for(int32_t i = 0; i < num; ++i) {
|
||||
SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
|
||||
|
||||
int32_t f = pExpr->base.functionId;
|
||||
if (pExpr == NULL || f == TSDB_FUNC_TS_DUMMY) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (f == TSDB_FUNC_DIFF || f == TSDB_FUNC_DERIVATIVE) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool tscIsSessionWindowQuery(SQueryInfo* pQueryInfo) {
|
||||
return pQueryInfo->sessionWindow.gap > 0;
|
||||
}
|
||||
|
@ -467,42 +511,12 @@ bool tscNeedReverseScan(SQueryInfo* pQueryInfo) {
|
|||
return false;
|
||||
}
|
||||
|
||||
bool isSimpleAggregate(SQueryInfo* pQueryInfo) {
|
||||
if (pQueryInfo->interval.interval > 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Note:top/bottom query is fixed output query
|
||||
if (tscIsTopBotQuery(pQueryInfo) || tscGroupbyColumn(pQueryInfo) || isTsCompQuery(pQueryInfo)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
size_t numOfExprs = tscNumOfExprs(pQueryInfo);
|
||||
for (int32_t i = 0; i < numOfExprs; ++i) {
|
||||
SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
|
||||
if (pExpr == NULL) {
|
||||
continue;
|
||||
}
|
||||
|
||||
int32_t functionId = pExpr->base.functionId;
|
||||
if (functionId == TSDB_FUNC_TS || functionId == TSDB_FUNC_TS_DUMMY) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!IS_MULTIOUTPUT(aAggs[functionId].status)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool isSimpleAggregateRv(SQueryInfo* pQueryInfo) {
|
||||
if (pQueryInfo->interval.interval > 0 || pQueryInfo->sessionWindow.gap > 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (tscGroupbyColumn(pQueryInfo) || isTsCompQuery(pQueryInfo)) {
|
||||
if (tscIsDiffQuery(pQueryInfo)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -518,13 +532,13 @@ bool isSimpleAggregateRv(SQueryInfo* pQueryInfo) {
|
|||
continue;
|
||||
}
|
||||
|
||||
if (!IS_MULTIOUTPUT(aAggs[functionId].status)) {
|
||||
if ((!IS_MULTIOUTPUT(aAggs[functionId].status)) ||
|
||||
(functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_TS_COMP)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
|
||||
}
|
||||
|
||||
bool isBlockDistQuery(SQueryInfo* pQueryInfo) {
|
||||
|
@ -812,6 +826,7 @@ static void fetchNextBlockIfCompleted(SOperatorInfo* pOperator, bool* newgroup)
|
|||
for (int32_t i = 0; i < pOperator->numOfUpstream; ++i) {
|
||||
SJoinStatus* pStatus = &pJoinInfo->status[i];
|
||||
if (pStatus->pBlock == NULL || pStatus->index >= pStatus->pBlock->info.rows) {
|
||||
tscDebug("Retrieve nest query result, index:%d, total:%d", i, pOperator->numOfUpstream);
|
||||
pStatus->pBlock = pOperator->upstream[i]->exec(pOperator->upstream[i], newgroup);
|
||||
pStatus->index = 0;
|
||||
|
||||
|
@ -972,6 +987,9 @@ static void destroyDummyInputOperator(void* param, int32_t numOfOutput) {
|
|||
|
||||
pInfo->block = destroyOutputBuf(pInfo->block);
|
||||
pInfo->pSql = NULL;
|
||||
|
||||
cleanupResultRowInfo(&pInfo->pTableQueryInfo->resInfo);
|
||||
tfree(pInfo->pTableQueryInfo);
|
||||
}
|
||||
|
||||
// todo this operator servers as the adapter for Operator tree and SqlRes result, remove it later
|
||||
|
@ -1056,7 +1074,7 @@ SOperatorInfo* createJoinOperatorInfo(SOperatorInfo** pUpstream, int32_t numOfUp
|
|||
return pOperator;
|
||||
}
|
||||
|
||||
void convertQueryResult(SSqlRes* pRes, SQueryInfo* pQueryInfo) {
|
||||
void convertQueryResult(SSqlRes* pRes, SQueryInfo* pQueryInfo, uint64_t objId) {
|
||||
// set the correct result
|
||||
SSDataBlock* p = pQueryInfo->pQInfo->runtimeEnv.outputBuf;
|
||||
pRes->numOfRows = (p != NULL)? p->info.rows: 0;
|
||||
|
@ -1066,6 +1084,7 @@ void convertQueryResult(SSqlRes* pRes, SQueryInfo* pQueryInfo) {
|
|||
tscSetResRawPtrRv(pRes, pQueryInfo, p);
|
||||
}
|
||||
|
||||
tscDebug("0x%"PRIx64" retrieve result in pRes, numOfRows:%d", objId, pRes->numOfRows);
|
||||
pRes->row = 0;
|
||||
pRes->completed = (pRes->numOfRows == 0);
|
||||
}
|
||||
|
@ -1088,7 +1107,9 @@ static void createInputDataFilterInfo(SQueryInfo* px, int32_t numOfCol1, int32_t
|
|||
tfree(tableCols);
|
||||
}
|
||||
|
||||
void handleDownstreamOperator(SSqlObj** pSqlObjList, int32_t numOfUpstream, SQueryInfo* px, SSqlRes* pOutput) {
|
||||
void handleDownstreamOperator(SSqlObj** pSqlObjList, int32_t numOfUpstream, SQueryInfo* px, SSqlObj* pSql) {
|
||||
SSqlRes* pOutput = &pSql->res;
|
||||
|
||||
// handle the following query process
|
||||
if (px->pQInfo == NULL) {
|
||||
SColumnInfo* pColumnInfo = extractColumnInfoFromResult(px->colList);
|
||||
|
@ -1168,7 +1189,9 @@ void handleDownstreamOperator(SSqlObj** pSqlObjList, int32_t numOfUpstream, SQue
|
|||
}
|
||||
}
|
||||
|
||||
px->pQInfo = createQInfoFromQueryNode(px, &tableGroupInfo, pSourceOperator, NULL, NULL, MASTER_SCAN);
|
||||
tscDebug("0x%"PRIx64" create QInfo 0x%"PRIx64" to execute the main query while all nest queries are ready", pSql->self, pSql->self);
|
||||
px->pQInfo = createQInfoFromQueryNode(px, &tableGroupInfo, pSourceOperator, NULL, NULL, MASTER_SCAN, pSql->self);
|
||||
|
||||
tfree(pColumnInfo);
|
||||
tfree(schema);
|
||||
|
||||
|
@ -1176,9 +1199,9 @@ void handleDownstreamOperator(SSqlObj** pSqlObjList, int32_t numOfUpstream, SQue
|
|||
pSourceOperator->pRuntimeEnv = &px->pQInfo->runtimeEnv;
|
||||
}
|
||||
|
||||
uint64_t qId = 0;
|
||||
uint64_t qId = pSql->self;
|
||||
qTableQuery(px->pQInfo, &qId);
|
||||
convertQueryResult(pOutput, px);
|
||||
convertQueryResult(pOutput, px, pSql->self);
|
||||
}
|
||||
|
||||
static void tscDestroyResPointerInfo(SSqlRes* pRes) {
|
||||
|
@ -1374,7 +1397,7 @@ void tscFreeSqlObj(SSqlObj* pSql) {
|
|||
|
||||
SSqlCmd* pCmd = &pSql->cmd;
|
||||
int32_t cmd = pCmd->command;
|
||||
if (cmd < TSDB_SQL_INSERT || cmd == TSDB_SQL_RETRIEVE_LOCALMERGE || cmd == TSDB_SQL_RETRIEVE_EMPTY_RESULT ||
|
||||
if (cmd < TSDB_SQL_INSERT || cmd == TSDB_SQL_RETRIEVE_GLOBALMERGE || cmd == TSDB_SQL_RETRIEVE_EMPTY_RESULT ||
|
||||
cmd == TSDB_SQL_TABLE_JOIN_RETRIEVE) {
|
||||
tscRemoveFromSqlList(pSql);
|
||||
}
|
||||
|
@ -2177,6 +2200,7 @@ size_t tscNumOfExprs(SQueryInfo* pQueryInfo) {
|
|||
return taosArrayGetSize(pQueryInfo->exprList);
|
||||
}
|
||||
|
||||
// todo REFACTOR
|
||||
void tscExprAddParams(SSqlExpr* pExpr, char* argument, int32_t type, int32_t bytes) {
|
||||
assert (pExpr != NULL || argument != NULL || bytes != 0);
|
||||
|
||||
|
@ -3278,7 +3302,6 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
|
|||
pNewQueryInfo->numOfTables = 0;
|
||||
pNewQueryInfo->pTableMetaInfo = NULL;
|
||||
pNewQueryInfo->bufLen = pQueryInfo->bufLen;
|
||||
|
||||
pNewQueryInfo->buf = malloc(pQueryInfo->bufLen);
|
||||
if (pNewQueryInfo->buf == NULL) {
|
||||
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
|
@ -3438,7 +3461,7 @@ void doRetrieveSubqueryData(SSchedMsg *pMsg) {
|
|||
}
|
||||
|
||||
SQueryInfo *pQueryInfo = tscGetQueryInfo(&pSql->cmd);
|
||||
handleDownstreamOperator(pSql->pSubs, pSql->subState.numOfSub, pQueryInfo, &pSql->res);
|
||||
handleDownstreamOperator(pSql->pSubs, pSql->subState.numOfSub, pQueryInfo, pSql);
|
||||
|
||||
pSql->res.qId = -1;
|
||||
if (pSql->res.code == TSDB_CODE_SUCCESS) {
|
||||
|
@ -3468,13 +3491,12 @@ static void tscSubqueryRetrieveCallback(void* param, TAOS_RES* tres, int code) {
|
|||
}
|
||||
|
||||
pParentSql->cmd.active = pParentSql->cmd.pQueryInfo;
|
||||
|
||||
SSchedMsg schedMsg = {0};
|
||||
schedMsg.fp = doRetrieveSubqueryData;
|
||||
schedMsg.ahandle = (void *)pParentSql;
|
||||
schedMsg.thandle = (void *)1;
|
||||
schedMsg.msg = 0;
|
||||
taosScheduleTask(tscQhandle, &schedMsg);
|
||||
pParentSql->res.qId = -1;
|
||||
if (pSql->res.code == TSDB_CODE_SUCCESS) {
|
||||
(*pSql->fp)(pParentSql->param, pParentSql, pParentSql->res.numOfRows);
|
||||
} else {
|
||||
tscAsyncResultOnError(pParentSql);
|
||||
}
|
||||
}
|
||||
|
||||
// todo handle the failure
|
||||
|
@ -4238,7 +4260,8 @@ int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAt
|
|||
pQueryAttr->hasTagResults = hasTagValOutput(pQueryInfo);
|
||||
pQueryAttr->stabledev = isStabledev(pQueryInfo);
|
||||
pQueryAttr->tsCompQuery = isTsCompQuery(pQueryInfo);
|
||||
pQueryAttr->simpleAgg = isSimpleAggregate(pQueryInfo);
|
||||
pQueryAttr->diffQuery = tscIsDiffQuery(pQueryInfo);
|
||||
pQueryAttr->simpleAgg = isSimpleAggregateRv(pQueryInfo);
|
||||
pQueryAttr->needReverseScan = tscNeedReverseScan(pQueryInfo);
|
||||
pQueryAttr->stableQuery = QUERY_IS_STABLE_QUERY(pQueryInfo->type);
|
||||
pQueryAttr->groupbyColumn = (!pQueryInfo->stateWindow) && tscGroupbyColumn(pQueryInfo);
|
||||
|
@ -4257,7 +4280,6 @@ int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAt
|
|||
pQueryAttr->fillType = pQueryInfo->fillType;
|
||||
pQueryAttr->havingNum = pQueryInfo->havingFieldNum;
|
||||
|
||||
|
||||
if (pQueryInfo->order.order == TSDB_ORDER_ASC) { // TODO refactor
|
||||
pQueryAttr->window = pQueryInfo->window;
|
||||
} else {
|
||||
|
@ -4269,10 +4291,9 @@ int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAt
|
|||
|
||||
STableMetaInfo* pTableMetaInfo = pQueryInfo->pTableMetaInfo[0];
|
||||
|
||||
pQueryAttr->pGroupbyExpr = calloc(1, sizeof(SGroupbyExpr));
|
||||
*(pQueryAttr->pGroupbyExpr) = pQueryInfo->groupbyExpr;
|
||||
|
||||
if (pQueryInfo->groupbyExpr.numOfGroupCols > 0) {
|
||||
pQueryAttr->pGroupbyExpr = calloc(1, sizeof(SGroupbyExpr));
|
||||
*(pQueryAttr->pGroupbyExpr) = pQueryInfo->groupbyExpr;
|
||||
pQueryAttr->pGroupbyExpr->columnInfo = taosArrayDup(pQueryInfo->groupbyExpr.columnInfo);
|
||||
} else {
|
||||
assert(pQueryInfo->groupbyExpr.columnInfo == NULL);
|
||||
|
@ -4351,7 +4372,7 @@ int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAt
|
|||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||
}
|
||||
|
||||
if (pQueryAttr->pGroupbyExpr->numOfGroupCols < 0) {
|
||||
if (pQueryAttr->pGroupbyExpr != NULL && pQueryAttr->pGroupbyExpr->numOfGroupCols < 0) {
|
||||
tscError("%p illegal value of numOfGroupCols in query msg: %d", addr, pQueryInfo->groupbyExpr.numOfGroupCols);
|
||||
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||
}
|
||||
|
|
|
@ -78,7 +78,7 @@ enum {
|
|||
// SQL below for client local
|
||||
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_LOCAL, "local" )
|
||||
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_DESCRIBE_TABLE, "describe-table" )
|
||||
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_RETRIEVE_LOCALMERGE, "retrieve-localmerge" )
|
||||
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_RETRIEVE_GLOBALMERGE, "retrieve-globalmerge" )
|
||||
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_TABLE_JOIN_RETRIEVE, "join-retrieve" )
|
||||
|
||||
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_SHOW_CREATE_TABLE, "show-create-table")
|
||||
|
|
|
@ -94,6 +94,8 @@ bool exprTreeApplyFilter(tExprNode *pExpr, const void *pItem, SExprTraverseSupp
|
|||
void arithmeticTreeTraverse(tExprNode *pExprs, int32_t numOfRows, char *pOutput, void *param, int32_t order,
|
||||
char *(*cb)(void *, const char*, int32_t));
|
||||
|
||||
void buildFilterSetFromBinary(void **q, const char *buf, int32_t len);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -466,6 +466,32 @@ tExprNode* exprTreeFromTableName(const char* tbnameCond) {
|
|||
return expr;
|
||||
}
|
||||
|
||||
void buildFilterSetFromBinary(void **q, const char *buf, int32_t len) {
|
||||
SBufferReader br = tbufInitReader(buf, len, false);
|
||||
uint32_t type = tbufReadUint32(&br);
|
||||
SHashObj *pObj = taosHashInit(256, taosGetDefaultHashFunction(type), true, false);
|
||||
int dummy = -1;
|
||||
int32_t sz = tbufReadInt32(&br);
|
||||
for (int32_t i = 0; i < sz; i++) {
|
||||
if (type == TSDB_DATA_TYPE_BOOL || type == TSDB_DATA_TYPE_TINYINT || type == TSDB_DATA_TYPE_SMALLINT || type == TSDB_DATA_TYPE_BIGINT || type == TSDB_DATA_TYPE_INT) {
|
||||
int64_t val = tbufReadInt64(&br);
|
||||
taosHashPut(pObj, (char *)&val, sizeof(val), &dummy, sizeof(dummy));
|
||||
} else if (type == TSDB_DATA_TYPE_DOUBLE || type == TSDB_DATA_TYPE_FLOAT) {
|
||||
double val = tbufReadDouble(&br);
|
||||
taosHashPut(pObj, (char *)&val, sizeof(val), &dummy, sizeof(dummy));
|
||||
} else if (type == TSDB_DATA_TYPE_BINARY) {
|
||||
size_t t = 0;
|
||||
const char *val = tbufReadBinary(&br, &t);
|
||||
taosHashPut(pObj, (char *)val, t, &dummy, sizeof(dummy));
|
||||
} else if (type == TSDB_DATA_TYPE_NCHAR) {
|
||||
size_t t = 0;
|
||||
const char *val = tbufReadBinary(&br, &t);
|
||||
taosHashPut(pObj, (char *)val, t, &dummy, sizeof(dummy));
|
||||
}
|
||||
}
|
||||
*q = (void *)pObj;
|
||||
}
|
||||
|
||||
tExprNode* exprdup(tExprNode* pNode) {
|
||||
if (pNode == NULL) {
|
||||
return NULL;
|
||||
|
|
|
@ -831,6 +831,16 @@ static void doInitGlobalConfig(void) {
|
|||
cfg.unitType = TAOS_CFG_UTYPE_NONE;
|
||||
taosInitConfigOption(cfg);
|
||||
|
||||
cfg.option = "precision";
|
||||
cfg.ptr = &tsTimePrecision;
|
||||
cfg.valType = TAOS_CFG_VTYPE_INT8;
|
||||
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
|
||||
cfg.minValue = TSDB_MIN_PRECISION;
|
||||
cfg.maxValue = TSDB_MAX_PRECISION;
|
||||
cfg.ptrLength = 0;
|
||||
cfg.unitType = TAOS_CFG_UTYPE_NONE;
|
||||
taosInitConfigOption(cfg);
|
||||
|
||||
cfg.option = "comp";
|
||||
cfg.ptr = &tsCompression;
|
||||
cfg.valType = TAOS_CFG_VTYPE_INT8;
|
||||
|
@ -901,6 +911,16 @@ static void doInitGlobalConfig(void) {
|
|||
cfg.unitType = TAOS_CFG_UTYPE_NONE;
|
||||
taosInitConfigOption(cfg);
|
||||
|
||||
cfg.option = "cachelast";
|
||||
cfg.ptr = &tsCacheLastRow;
|
||||
cfg.valType = TAOS_CFG_VTYPE_INT8;
|
||||
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
|
||||
cfg.minValue = TSDB_MIN_DB_CACHE_LAST_ROW;
|
||||
cfg.maxValue = TSDB_MAX_DB_CACHE_LAST_ROW;
|
||||
cfg.ptrLength = 0;
|
||||
cfg.unitType = TAOS_CFG_UTYPE_NONE;
|
||||
taosInitConfigOption(cfg);
|
||||
|
||||
cfg.option = "mqttHostName";
|
||||
cfg.ptr = tsMqttHostName;
|
||||
cfg.valType = TAOS_CFG_VTYPE_STRING;
|
||||
|
|
|
@ -74,7 +74,7 @@ void tVariantCreate(tVariant *pVar, SStrToken *token) {
|
|||
|
||||
case TSDB_DATA_TYPE_BINARY: {
|
||||
pVar->pz = strndup(token->z, token->n);
|
||||
pVar->nLen = strdequote(pVar->pz);
|
||||
pVar->nLen = strRmquote(pVar->pz, token->n);
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
@ -8,10 +8,9 @@ IF (TD_MVN_INSTALLED)
|
|||
ADD_CUSTOM_COMMAND(OUTPUT ${JDBC_CMD_NAME}
|
||||
POST_BUILD
|
||||
COMMAND mvn -Dmaven.test.skip=true install -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
|
||||
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.29.jar ${LIBRARY_OUTPUT_PATH}
|
||||
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.30.jar ${LIBRARY_OUTPUT_PATH}
|
||||
COMMAND mvn -Dmaven.test.skip=true clean -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
|
||||
COMMENT "build jdbc driver")
|
||||
ADD_CUSTOM_TARGET(${JDBC_TARGET_NAME} ALL WORKING_DIRECTORY ${EXECUTABLE_OUTPUT_PATH} DEPENDS ${JDBC_CMD_NAME})
|
||||
ENDIF ()
|
||||
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>2.0.29</version>
|
||||
<version>2.0.30</version>
|
||||
<packaging>jar</packaging>
|
||||
|
||||
<name>JDBCDriver</name>
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
<modelVersion>4.0.0</modelVersion>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>2.0.29</version>
|
||||
<version>2.0.30</version>
|
||||
<packaging>jar</packaging>
|
||||
<name>JDBCDriver</name>
|
||||
<url>https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc</url>
|
||||
|
@ -47,7 +47,7 @@
|
|||
<dependency>
|
||||
<groupId>org.apache.httpcomponents</groupId>
|
||||
<artifactId>httpclient</artifactId>
|
||||
<version>4.5.8</version>
|
||||
<version>4.5.13</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.alibaba</groupId>
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
package com.taosdata.jdbc;
|
||||
|
||||
import com.sun.org.apache.xpath.internal.operations.Bool;
|
||||
|
||||
import java.sql.ParameterMetaData;
|
||||
import java.sql.SQLException;
|
||||
import java.sql.Timestamp;
|
||||
|
@ -49,6 +51,22 @@ public abstract class AbstractParameterMetaData extends WrapperImpl implements P
|
|||
if (param < 1 && param >= parameters.length)
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PARAMETER_INDEX_OUT_RANGE);
|
||||
|
||||
if (parameters[param - 1] instanceof Boolean)
|
||||
return TSDBConstants.BOOLEAN_PRECISION;
|
||||
if (parameters[param - 1] instanceof Byte)
|
||||
return TSDBConstants.TINYINT_PRECISION;
|
||||
if (parameters[param - 1] instanceof Short)
|
||||
return TSDBConstants.SMALLINT_PRECISION;
|
||||
if (parameters[param - 1] instanceof Integer)
|
||||
return TSDBConstants.INT_PRECISION;
|
||||
if (parameters[param - 1] instanceof Long)
|
||||
return TSDBConstants.BIGINT_PRECISION;
|
||||
if (parameters[param - 1] instanceof Timestamp)
|
||||
return TSDBConstants.TIMESTAMP_MS_PRECISION;
|
||||
if (parameters[param - 1] instanceof Float)
|
||||
return TSDBConstants.FLOAT_PRECISION;
|
||||
if (parameters[param - 1] instanceof Double)
|
||||
return TSDBConstants.DOUBLE_PRECISION;
|
||||
if (parameters[param - 1] instanceof String)
|
||||
return ((String) parameters[param - 1]).length();
|
||||
if (parameters[param - 1] instanceof byte[])
|
||||
|
@ -60,6 +78,11 @@ public abstract class AbstractParameterMetaData extends WrapperImpl implements P
|
|||
public int getScale(int param) throws SQLException {
|
||||
if (param < 1 && param >= parameters.length)
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PARAMETER_INDEX_OUT_RANGE);
|
||||
|
||||
if (parameters[param - 1] instanceof Float)
|
||||
return TSDBConstants.FLOAT_SCALE;
|
||||
if (parameters[param - 1] instanceof Double)
|
||||
return TSDBConstants.DOUBLE_SCALE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -66,10 +66,16 @@ public abstract class AbstractResultSet extends WrapperImpl implements ResultSet
|
|||
public abstract byte[] getBytes(int columnIndex) throws SQLException;
|
||||
|
||||
@Override
|
||||
public abstract Date getDate(int columnIndex) throws SQLException;
|
||||
public Date getDate(int columnIndex) throws SQLException {
|
||||
Timestamp timestamp = getTimestamp(columnIndex);
|
||||
return timestamp == null ? null : new Date(timestamp.getTime());
|
||||
}
|
||||
|
||||
@Override
|
||||
public abstract Time getTime(int columnIndex) throws SQLException;
|
||||
public Time getTime(int columnIndex) throws SQLException {
|
||||
Timestamp timestamp = getTimestamp(columnIndex);
|
||||
return timestamp == null ? null : new Time(timestamp.getTime());
|
||||
}
|
||||
|
||||
@Override
|
||||
public abstract Timestamp getTimestamp(int columnIndex) throws SQLException;
|
||||
|
|
|
@ -41,15 +41,15 @@ public abstract class TSDBConstants {
|
|||
public static final int TSDB_DATA_TYPE_BINARY = 8;
|
||||
public static final int TSDB_DATA_TYPE_TIMESTAMP = 9;
|
||||
public static final int TSDB_DATA_TYPE_NCHAR = 10;
|
||||
/*
|
||||
系统增加新的无符号数据类型,分别是:
|
||||
unsigned tinyint, 数值范围:0-254, NULL 为255
|
||||
unsigned smallint,数值范围: 0-65534, NULL 为65535
|
||||
unsigned int,数值范围:0-4294967294,NULL 为4294967295u
|
||||
unsigned bigint,数值范围:0-18446744073709551614u,NULL 为18446744073709551615u。
|
||||
example:
|
||||
create table tb(ts timestamp, a tinyint unsigned, b smallint unsigned, c int unsigned, d bigint unsigned);
|
||||
*/
|
||||
/**
|
||||
* 系统增加新的无符号数据类型,分别是:
|
||||
* unsigned tinyint, 数值范围:0-254, NULL 为255
|
||||
* unsigned smallint,数值范围: 0-65534, NULL 为65535
|
||||
* unsigned int,数值范围:0-4294967294,NULL 为4294967295u
|
||||
* unsigned bigint,数值范围:0-18446744073709551614u,NULL 为18446744073709551615u。
|
||||
* example:
|
||||
* create table tb(ts timestamp, a tinyint unsigned, b smallint unsigned, c int unsigned, d bigint unsigned);
|
||||
*/
|
||||
public static final int TSDB_DATA_TYPE_UTINYINT = 11; //unsigned tinyint
|
||||
public static final int TSDB_DATA_TYPE_USMALLINT = 12; //unsigned smallint
|
||||
public static final int TSDB_DATA_TYPE_UINT = 13; //unsigned int
|
||||
|
@ -57,6 +57,47 @@ public abstract class TSDBConstants {
|
|||
// nchar column max length
|
||||
public static final int maxFieldSize = 16 * 1024;
|
||||
|
||||
// precision for data types
|
||||
public static final int BOOLEAN_PRECISION = 1;
|
||||
public static final int TINYINT_PRECISION = 4;
|
||||
public static final int SMALLINT_PRECISION = 6;
|
||||
public static final int INT_PRECISION = 11;
|
||||
public static final int BIGINT_PRECISION = 20;
|
||||
public static final int FLOAT_PRECISION = 12;
|
||||
public static final int DOUBLE_PRECISION = 22;
|
||||
public static final int TIMESTAMP_MS_PRECISION = 23;
|
||||
public static final int TIMESTAMP_US_PRECISION = 26;
|
||||
// scale for data types
|
||||
public static final int FLOAT_SCALE = 31;
|
||||
public static final int DOUBLE_SCALE = 31;
|
||||
|
||||
public static int typeName2JdbcType(String type) {
|
||||
switch (type.toUpperCase()) {
|
||||
case "TIMESTAMP":
|
||||
return Types.TIMESTAMP;
|
||||
case "INT":
|
||||
return Types.INTEGER;
|
||||
case "BIGINT":
|
||||
return Types.BIGINT;
|
||||
case "FLOAT":
|
||||
return Types.FLOAT;
|
||||
case "DOUBLE":
|
||||
return Types.DOUBLE;
|
||||
case "BINARY":
|
||||
return Types.BINARY;
|
||||
case "SMALLINT":
|
||||
return Types.SMALLINT;
|
||||
case "TINYINT":
|
||||
return Types.TINYINT;
|
||||
case "BOOL":
|
||||
return Types.BOOLEAN;
|
||||
case "NCHAR":
|
||||
return Types.NCHAR;
|
||||
default:
|
||||
return Types.NULL;
|
||||
}
|
||||
}
|
||||
|
||||
public static int taosType2JdbcType(int taosType) throws SQLException {
|
||||
switch (taosType) {
|
||||
case TSDBConstants.TSDB_DATA_TYPE_BOOL:
|
||||
|
@ -88,7 +129,7 @@ public abstract class TSDBConstants {
|
|||
}
|
||||
|
||||
public static String taosType2JdbcTypeName(int taosType) throws SQLException {
|
||||
switch (taosType){
|
||||
switch (taosType) {
|
||||
case TSDBConstants.TSDB_DATA_TYPE_BOOL:
|
||||
return "BOOL";
|
||||
case TSDBConstants.TSDB_DATA_TYPE_UTINYINT:
|
||||
|
@ -119,7 +160,7 @@ public abstract class TSDBConstants {
|
|||
}
|
||||
|
||||
public static int jdbcType2TaosType(int jdbcType) throws SQLException {
|
||||
switch (jdbcType){
|
||||
switch (jdbcType) {
|
||||
case Types.BOOLEAN:
|
||||
return TSDBConstants.TSDB_DATA_TYPE_BOOL;
|
||||
case Types.TINYINT:
|
||||
|
@ -145,7 +186,7 @@ public abstract class TSDBConstants {
|
|||
}
|
||||
|
||||
public static String jdbcType2TaosTypeName(int jdbcType) throws SQLException {
|
||||
switch (jdbcType){
|
||||
switch (jdbcType) {
|
||||
case Types.BOOLEAN:
|
||||
return "BOOL";
|
||||
case Types.TINYINT:
|
||||
|
|
|
@ -16,13 +16,13 @@
|
|||
*/
|
||||
package com.taosdata.jdbc;
|
||||
|
||||
import com.taosdata.jdbc.utils.TaosInfo;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
import java.sql.SQLException;
|
||||
import java.sql.SQLWarning;
|
||||
import java.util.List;
|
||||
|
||||
import com.taosdata.jdbc.utils.TaosInfo;
|
||||
|
||||
/**
|
||||
* JNI connector
|
||||
*/
|
||||
|
@ -30,10 +30,10 @@ public class TSDBJNIConnector {
|
|||
private static volatile Boolean isInitialized = false;
|
||||
|
||||
private TaosInfo taosInfo = TaosInfo.getInstance();
|
||||
|
||||
|
||||
// Connection pointer used in C
|
||||
private long taos = TSDBConstants.JNI_NULL_POINTER;
|
||||
|
||||
|
||||
// result set status in current connection
|
||||
private boolean isResultsetClosed;
|
||||
|
||||
|
@ -194,7 +194,9 @@ public class TSDBJNIConnector {
|
|||
* Get schema metadata
|
||||
*/
|
||||
public int getSchemaMetaData(long resultSet, List<ColumnMetaData> columnMetaData) {
|
||||
return this.getSchemaMetaDataImp(this.taos, resultSet, columnMetaData);
|
||||
int ret = this.getSchemaMetaDataImp(this.taos, resultSet, columnMetaData);
|
||||
columnMetaData.stream().forEach(column -> column.setColIndex(column.getColIndex() + 1));
|
||||
return ret;
|
||||
}
|
||||
|
||||
private native int getSchemaMetaDataImp(long connection, long resultSet, List<ColumnMetaData> columnMetaData);
|
||||
|
@ -221,7 +223,7 @@ public class TSDBJNIConnector {
|
|||
*/
|
||||
public void closeConnection() throws SQLException {
|
||||
int code = this.closeConnectionImp(this.taos);
|
||||
|
||||
|
||||
if (code < 0) {
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_CONNECTION_NULL);
|
||||
} else if (code == 0) {
|
||||
|
@ -229,7 +231,7 @@ public class TSDBJNIConnector {
|
|||
} else {
|
||||
throw new SQLException("Undefined error code returned by TDengine when closing a connection");
|
||||
}
|
||||
|
||||
|
||||
// invoke closeConnectionImpl only here
|
||||
taosInfo.connect_close_increment();
|
||||
}
|
||||
|
@ -274,67 +276,76 @@ public class TSDBJNIConnector {
|
|||
}
|
||||
|
||||
private native int validateCreateTableSqlImp(long connection, byte[] sqlBytes);
|
||||
|
||||
public long prepareStmt(String sql) throws SQLException {
|
||||
Long stmt = prepareStmtImp(sql.getBytes(), this.taos);
|
||||
if (stmt == TSDBConstants.JNI_TDENGINE_ERROR) {
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_SQL);
|
||||
} else if (stmt == TSDBConstants.JNI_CONNECTION_NULL) {
|
||||
|
||||
public long prepareStmt(String sql) throws SQLException {
|
||||
Long stmt;
|
||||
try {
|
||||
stmt = prepareStmtImp(sql.getBytes(), this.taos);
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_ENCODING);
|
||||
}
|
||||
|
||||
if (stmt == TSDBConstants.JNI_CONNECTION_NULL) {
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_CONNECTION_NULL);
|
||||
} else if (stmt == TSDBConstants.JNI_SQL_NULL) {
|
||||
}
|
||||
|
||||
if (stmt == TSDBConstants.JNI_SQL_NULL) {
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_SQL_NULL);
|
||||
} else if (stmt == TSDBConstants.JNI_OUT_OF_MEMORY) {
|
||||
}
|
||||
|
||||
if (stmt == TSDBConstants.JNI_OUT_OF_MEMORY) {
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_OUT_OF_MEMORY);
|
||||
}
|
||||
|
||||
return stmt;
|
||||
|
||||
return stmt;
|
||||
}
|
||||
|
||||
|
||||
private native long prepareStmtImp(byte[] sql, long con);
|
||||
|
||||
|
||||
public void setBindTableName(long stmt, String tableName) throws SQLException {
|
||||
int code = setBindTableNameImp(stmt, tableName, this.taos);
|
||||
if (code != TSDBConstants.JNI_SUCCESS) {
|
||||
int code = setBindTableNameImp(stmt, tableName, this.taos);
|
||||
if (code != TSDBConstants.JNI_SUCCESS) {
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "failed to set table name");
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
private native int setBindTableNameImp(long stmt, String name, long conn);
|
||||
|
||||
|
||||
public void setBindTableNameAndTags(long stmt, String tableName, int numOfTags, ByteBuffer tags, ByteBuffer typeList, ByteBuffer lengthList, ByteBuffer nullList) throws SQLException {
|
||||
int code = setTableNameTagsImp(stmt, tableName, numOfTags, tags.array(), typeList.array(), lengthList.array(),
|
||||
nullList.array(), this.taos);
|
||||
if (code != TSDBConstants.JNI_SUCCESS) {
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "failed to bind table name and corresponding tags");
|
||||
}
|
||||
int code = setTableNameTagsImp(stmt, tableName, numOfTags, tags.array(), typeList.array(), lengthList.array(),
|
||||
nullList.array(), this.taos);
|
||||
if (code != TSDBConstants.JNI_SUCCESS) {
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "failed to bind table name and corresponding tags");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private native int setTableNameTagsImp(long stmt, String name, int numOfTags, byte[] tags, byte[] typeList, byte[] lengthList, byte[] nullList, long conn);
|
||||
|
||||
public void bindColumnDataArray(long stmt, ByteBuffer colDataList, ByteBuffer lengthList, ByteBuffer isNullList, int type, int bytes, int numOfRows,int columnIndex) throws SQLException {
|
||||
int code = bindColDataImp(stmt, colDataList.array(), lengthList.array(), isNullList.array(), type, bytes, numOfRows, columnIndex, this.taos);
|
||||
if (code != TSDBConstants.JNI_SUCCESS) {
|
||||
|
||||
public void bindColumnDataArray(long stmt, ByteBuffer colDataList, ByteBuffer lengthList, ByteBuffer isNullList, int type, int bytes, int numOfRows, int columnIndex) throws SQLException {
|
||||
int code = bindColDataImp(stmt, colDataList.array(), lengthList.array(), isNullList.array(), type, bytes, numOfRows, columnIndex, this.taos);
|
||||
if (code != TSDBConstants.JNI_SUCCESS) {
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "failed to bind column data");
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
private native int bindColDataImp(long stmt, byte[] colDataList, byte[] lengthList, byte[] isNullList, int type, int bytes, int numOfRows, int columnIndex, long conn);
|
||||
|
||||
|
||||
public void executeBatch(long stmt) throws SQLException {
|
||||
int code = executeBatchImp(stmt, this.taos);
|
||||
if (code != TSDBConstants.JNI_SUCCESS) {
|
||||
int code = executeBatchImp(stmt, this.taos);
|
||||
if (code != TSDBConstants.JNI_SUCCESS) {
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "failed to execute batch bind");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private native int executeBatchImp(long stmt, long con);
|
||||
|
||||
|
||||
public void closeBatch(long stmt) throws SQLException {
|
||||
int code = closeStmt(stmt, this.taos);
|
||||
if (code != TSDBConstants.JNI_SUCCESS) {
|
||||
int code = closeStmt(stmt, this.taos);
|
||||
if (code != TSDBConstants.JNI_SUCCESS) {
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "failed to close batch bind");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private native int closeStmt(long stmt, long con);
|
||||
}
|
||||
|
|
|
@ -133,9 +133,10 @@ public class TSDBResultSet extends AbstractResultSet implements ResultSet {
|
|||
if (this.getBatchFetch())
|
||||
return this.blockData.getString(columnIndex - 1);
|
||||
|
||||
this.lastWasNull = this.rowData.wasNull(columnIndex - 1);
|
||||
this.lastWasNull = this.rowData.wasNull(columnIndex);
|
||||
if (!lastWasNull) {
|
||||
res = this.rowData.getString(columnIndex - 1, this.columnMetaDataList.get(columnIndex - 1).getColType());
|
||||
int nativeType = this.columnMetaDataList.get(columnIndex - 1).getColType();
|
||||
res = this.rowData.getString(columnIndex, nativeType);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
@ -147,9 +148,10 @@ public class TSDBResultSet extends AbstractResultSet implements ResultSet {
|
|||
if (this.getBatchFetch())
|
||||
return this.blockData.getBoolean(columnIndex - 1);
|
||||
|
||||
this.lastWasNull = this.rowData.wasNull(columnIndex - 1);
|
||||
this.lastWasNull = this.rowData.wasNull(columnIndex);
|
||||
if (!lastWasNull) {
|
||||
res = this.rowData.getBoolean(columnIndex - 1, this.columnMetaDataList.get(columnIndex - 1).getColType());
|
||||
int nativeType = this.columnMetaDataList.get(columnIndex - 1).getColType();
|
||||
res = this.rowData.getBoolean(columnIndex, nativeType);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
@ -161,9 +163,10 @@ public class TSDBResultSet extends AbstractResultSet implements ResultSet {
|
|||
if (this.getBatchFetch())
|
||||
return (byte) this.blockData.getInt(columnIndex - 1);
|
||||
|
||||
this.lastWasNull = this.rowData.wasNull(columnIndex - 1);
|
||||
this.lastWasNull = this.rowData.wasNull(columnIndex);
|
||||
if (!lastWasNull) {
|
||||
res = (byte) this.rowData.getInt(columnIndex - 1, this.columnMetaDataList.get(columnIndex - 1).getColType());
|
||||
int nativeType = this.columnMetaDataList.get(columnIndex - 1).getColType();
|
||||
res = (byte) this.rowData.getInt(columnIndex, nativeType);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
@ -175,9 +178,10 @@ public class TSDBResultSet extends AbstractResultSet implements ResultSet {
|
|||
if (this.getBatchFetch())
|
||||
return (short) this.blockData.getInt(columnIndex - 1);
|
||||
|
||||
this.lastWasNull = this.rowData.wasNull(columnIndex - 1);
|
||||
this.lastWasNull = this.rowData.wasNull(columnIndex);
|
||||
if (!lastWasNull) {
|
||||
res = (short) this.rowData.getInt(columnIndex - 1, this.columnMetaDataList.get(columnIndex - 1).getColType());
|
||||
int nativeType = this.columnMetaDataList.get(columnIndex - 1).getColType();
|
||||
res = (short) this.rowData.getInt(columnIndex, nativeType);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
@ -189,9 +193,11 @@ public class TSDBResultSet extends AbstractResultSet implements ResultSet {
|
|||
if (this.getBatchFetch())
|
||||
return this.blockData.getInt(columnIndex - 1);
|
||||
|
||||
this.lastWasNull = this.rowData.wasNull(columnIndex - 1);
|
||||
|
||||
this.lastWasNull = this.rowData.wasNull(columnIndex);
|
||||
if (!lastWasNull) {
|
||||
res = this.rowData.getInt(columnIndex - 1, this.columnMetaDataList.get(columnIndex - 1).getColType());
|
||||
int nativeType = this.columnMetaDataList.get(columnIndex - 1).getColType();
|
||||
res = this.rowData.getInt(columnIndex, nativeType);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
@ -203,13 +209,15 @@ public class TSDBResultSet extends AbstractResultSet implements ResultSet {
|
|||
if (this.getBatchFetch())
|
||||
return this.blockData.getLong(columnIndex - 1);
|
||||
|
||||
this.lastWasNull = this.rowData.wasNull(columnIndex - 1);
|
||||
this.lastWasNull = this.rowData.wasNull(columnIndex);
|
||||
if (!lastWasNull) {
|
||||
Object value = this.rowData.get(columnIndex - 1);
|
||||
if (value instanceof Timestamp)
|
||||
Object value = this.rowData.getObject(columnIndex);
|
||||
if (value instanceof Timestamp) {
|
||||
res = ((Timestamp) value).getTime();
|
||||
else
|
||||
res = this.rowData.getLong(columnIndex - 1, this.columnMetaDataList.get(columnIndex - 1).getColType());
|
||||
} else {
|
||||
int nativeType = this.columnMetaDataList.get(columnIndex - 1).getColType();
|
||||
res = this.rowData.getLong(columnIndex, nativeType);
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
@ -221,9 +229,11 @@ public class TSDBResultSet extends AbstractResultSet implements ResultSet {
|
|||
if (this.getBatchFetch())
|
||||
return (float) this.blockData.getDouble(columnIndex - 1);
|
||||
|
||||
this.lastWasNull = this.rowData.wasNull(columnIndex - 1);
|
||||
if (!lastWasNull)
|
||||
res = this.rowData.getFloat(columnIndex - 1, this.columnMetaDataList.get(columnIndex - 1).getColType());
|
||||
this.lastWasNull = this.rowData.wasNull(columnIndex);
|
||||
if (!lastWasNull) {
|
||||
int nativeType = this.columnMetaDataList.get(columnIndex - 1).getColType();
|
||||
res = this.rowData.getFloat(columnIndex, nativeType);
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
@ -235,9 +245,10 @@ public class TSDBResultSet extends AbstractResultSet implements ResultSet {
|
|||
if (this.getBatchFetch())
|
||||
return this.blockData.getDouble(columnIndex - 1);
|
||||
|
||||
this.lastWasNull = this.rowData.wasNull(columnIndex - 1);
|
||||
this.lastWasNull = this.rowData.wasNull(columnIndex);
|
||||
if (!lastWasNull) {
|
||||
res = this.rowData.getDouble(columnIndex - 1, this.columnMetaDataList.get(columnIndex - 1).getColType());
|
||||
int nativeType = this.columnMetaDataList.get(columnIndex - 1).getColType();
|
||||
res = this.rowData.getDouble(columnIndex, nativeType);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
@ -245,34 +256,27 @@ public class TSDBResultSet extends AbstractResultSet implements ResultSet {
|
|||
public byte[] getBytes(int columnIndex) throws SQLException {
|
||||
checkAvailability(columnIndex, this.columnMetaDataList.size());
|
||||
|
||||
Object value = this.rowData.get(columnIndex - 1);
|
||||
Object value = this.rowData.getObject(columnIndex);
|
||||
if (value == null)
|
||||
return null;
|
||||
|
||||
int colType = this.columnMetaDataList.get(columnIndex - 1).getColType();
|
||||
switch (colType) {
|
||||
int nativeType = this.columnMetaDataList.get(columnIndex - 1).getColType();
|
||||
switch (nativeType) {
|
||||
case TSDBConstants.TSDB_DATA_TYPE_BIGINT:
|
||||
return Longs.toByteArray((Long) value);
|
||||
return Longs.toByteArray((long) value);
|
||||
case TSDBConstants.TSDB_DATA_TYPE_INT:
|
||||
return Ints.toByteArray((int) value);
|
||||
case TSDBConstants.TSDB_DATA_TYPE_SMALLINT:
|
||||
return Shorts.toByteArray((Short) value);
|
||||
return Shorts.toByteArray((short) value);
|
||||
case TSDBConstants.TSDB_DATA_TYPE_TINYINT:
|
||||
return new byte[]{(byte) value};
|
||||
case TSDBConstants.TSDB_DATA_TYPE_BINARY:
|
||||
return (byte[]) value;
|
||||
case TSDBConstants.TSDB_DATA_TYPE_BOOL:
|
||||
case TSDBConstants.TSDB_DATA_TYPE_NCHAR:
|
||||
default:
|
||||
return value.toString().getBytes();
|
||||
}
|
||||
return value.toString().getBytes();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Date getDate(int columnIndex) throws SQLException {
|
||||
Timestamp timestamp = getTimestamp(columnIndex);
|
||||
return timestamp == null ? null : new Date(timestamp.getTime());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Time getTime(int columnIndex) throws SQLException {
|
||||
Timestamp timestamp = getTimestamp(columnIndex);
|
||||
return timestamp == null ? null : new Time(timestamp.getTime());
|
||||
}
|
||||
|
||||
public Timestamp getTimestamp(int columnIndex) throws SQLException {
|
||||
|
@ -282,9 +286,10 @@ public class TSDBResultSet extends AbstractResultSet implements ResultSet {
|
|||
if (this.getBatchFetch())
|
||||
return this.blockData.getTimestamp(columnIndex - 1);
|
||||
|
||||
this.lastWasNull = this.rowData.wasNull(columnIndex - 1);
|
||||
this.lastWasNull = this.rowData.wasNull(columnIndex);
|
||||
if (!lastWasNull) {
|
||||
res = this.rowData.getTimestamp(columnIndex - 1);
|
||||
int nativeType = this.columnMetaDataList.get(columnIndex - 1).getColType();
|
||||
res = this.rowData.getTimestamp(columnIndex, nativeType);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
@ -304,13 +309,9 @@ public class TSDBResultSet extends AbstractResultSet implements ResultSet {
|
|||
if (this.getBatchFetch())
|
||||
return this.blockData.get(columnIndex - 1);
|
||||
|
||||
this.lastWasNull = this.rowData.wasNull(columnIndex - 1);
|
||||
this.lastWasNull = this.rowData.wasNull(columnIndex);
|
||||
if (!lastWasNull) {
|
||||
int colType = this.columnMetaDataList.get(columnIndex - 1).getColType();
|
||||
if (colType == TSDBConstants.TSDB_DATA_TYPE_BINARY)
|
||||
res = ((String) this.rowData.get(columnIndex - 1)).getBytes();
|
||||
else
|
||||
res = this.rowData.get(columnIndex - 1);
|
||||
res = this.rowData.getObject(columnIndex);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
@ -318,7 +319,7 @@ public class TSDBResultSet extends AbstractResultSet implements ResultSet {
|
|||
public int findColumn(String columnLabel) throws SQLException {
|
||||
for (ColumnMetaData colMetaData : this.columnMetaDataList) {
|
||||
if (colMetaData.getColName() != null && colMetaData.getColName().equalsIgnoreCase(columnLabel)) {
|
||||
return colMetaData.getColIndex() + 1;
|
||||
return colMetaData.getColIndex();
|
||||
}
|
||||
}
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE);
|
||||
|
@ -329,25 +330,25 @@ public class TSDBResultSet extends AbstractResultSet implements ResultSet {
|
|||
if (this.getBatchFetch())
|
||||
return new BigDecimal(this.blockData.getLong(columnIndex - 1));
|
||||
|
||||
this.lastWasNull = this.rowData.wasNull(columnIndex - 1);
|
||||
this.lastWasNull = this.rowData.wasNull(columnIndex);
|
||||
BigDecimal res = null;
|
||||
if (!lastWasNull) {
|
||||
int colType = this.columnMetaDataList.get(columnIndex - 1).getColType();
|
||||
switch (colType) {
|
||||
int nativeType = this.columnMetaDataList.get(columnIndex - 1).getColType();
|
||||
switch (nativeType) {
|
||||
case TSDBConstants.TSDB_DATA_TYPE_TINYINT:
|
||||
case TSDBConstants.TSDB_DATA_TYPE_SMALLINT:
|
||||
case TSDBConstants.TSDB_DATA_TYPE_INT:
|
||||
case TSDBConstants.TSDB_DATA_TYPE_BIGINT:
|
||||
res = new BigDecimal(Long.valueOf(this.rowData.get(columnIndex - 1).toString()));
|
||||
res = new BigDecimal(Long.valueOf(this.rowData.getObject(columnIndex).toString()));
|
||||
break;
|
||||
case TSDBConstants.TSDB_DATA_TYPE_FLOAT:
|
||||
case TSDBConstants.TSDB_DATA_TYPE_DOUBLE:
|
||||
res = new BigDecimal(Double.valueOf(this.rowData.get(columnIndex - 1).toString()));
|
||||
res = new BigDecimal(Double.valueOf(this.rowData.getObject(columnIndex).toString()));
|
||||
break;
|
||||
case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP:
|
||||
return new BigDecimal(((Timestamp) this.rowData.get(columnIndex - 1)).getTime());
|
||||
return new BigDecimal(((Timestamp) this.rowData.getObject(columnIndex)).getTime());
|
||||
default:
|
||||
res = new BigDecimal(this.rowData.get(columnIndex - 1).toString());
|
||||
res = new BigDecimal(this.rowData.getObject(columnIndex).toString());
|
||||
}
|
||||
}
|
||||
return res;
|
||||
|
|
|
@ -113,6 +113,7 @@ public class TSDBResultSetMetaData extends WrapperImpl implements ResultSetMetaD
|
|||
|
||||
ColumnMetaData columnMetaData = this.colMetaDataList.get(column - 1);
|
||||
switch (columnMetaData.getColType()) {
|
||||
|
||||
case TSDBConstants.TSDB_DATA_TYPE_FLOAT:
|
||||
return 5;
|
||||
case TSDBConstants.TSDB_DATA_TYPE_DOUBLE:
|
||||
|
|
|
@ -14,6 +14,8 @@
|
|||
*****************************************************************************/
|
||||
package com.taosdata.jdbc;
|
||||
|
||||
import com.taosdata.jdbc.utils.NullType;
|
||||
|
||||
import java.math.BigDecimal;
|
||||
import java.sql.SQLException;
|
||||
import java.sql.Timestamp;
|
||||
|
@ -22,11 +24,13 @@ import java.util.ArrayList;
|
|||
import java.util.Collections;
|
||||
|
||||
public class TSDBResultSetRowData {
|
||||
|
||||
private ArrayList<Object> data;
|
||||
private int colSize = 0;
|
||||
private int colSize;
|
||||
|
||||
public TSDBResultSetRowData(int colSize) {
|
||||
this.setColSize(colSize);
|
||||
this.colSize = colSize;
|
||||
this.clear();
|
||||
}
|
||||
|
||||
public void clear() {
|
||||
|
@ -41,68 +45,104 @@ public class TSDBResultSetRowData {
|
|||
}
|
||||
|
||||
public boolean wasNull(int col) {
|
||||
return data.get(col) == null;
|
||||
return data.get(col - 1) == null;
|
||||
}
|
||||
|
||||
/**
|
||||
* $$$ this method is invoked by databaseMetaDataResultSet and so on which use a index start from 1 in JDBC api
|
||||
*/
|
||||
public void setBooleanValue(int col, boolean value) {
|
||||
setBoolean(col - 1, value);
|
||||
}
|
||||
|
||||
/**
|
||||
* !!! this method is invoked by JNI method and the index start from 0 in C implementations
|
||||
*/
|
||||
public void setBoolean(int col, boolean value) {
|
||||
data.set(col, value);
|
||||
}
|
||||
|
||||
public boolean getBoolean(int col, int srcType) throws SQLException {
|
||||
Object obj = data.get(col);
|
||||
public boolean getBoolean(int col, int nativeType) throws SQLException {
|
||||
Object obj = data.get(col - 1);
|
||||
|
||||
switch (srcType) {
|
||||
switch (nativeType) {
|
||||
case TSDBConstants.TSDB_DATA_TYPE_BOOL:
|
||||
return (Boolean) obj;
|
||||
case TSDBConstants.TSDB_DATA_TYPE_FLOAT:
|
||||
return ((Float) obj) == 1.0 ? Boolean.TRUE : Boolean.FALSE;
|
||||
case TSDBConstants.TSDB_DATA_TYPE_DOUBLE:
|
||||
return ((Double) obj) == 1.0 ? Boolean.TRUE : Boolean.FALSE;
|
||||
case TSDBConstants.TSDB_DATA_TYPE_TINYINT:
|
||||
return ((Byte) obj) == 1 ? Boolean.TRUE : Boolean.FALSE;
|
||||
case TSDBConstants.TSDB_DATA_TYPE_SMALLINT:
|
||||
return ((Short) obj) == 1 ? Boolean.TRUE : Boolean.FALSE;
|
||||
case TSDBConstants.TSDB_DATA_TYPE_INT:
|
||||
return ((Integer) obj) == 1 ? Boolean.TRUE : Boolean.FALSE;
|
||||
case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP:
|
||||
case TSDBConstants.TSDB_DATA_TYPE_BIGINT:
|
||||
return ((Long) obj) == 1L ? Boolean.TRUE : Boolean.FALSE;
|
||||
case TSDBConstants.TSDB_DATA_TYPE_BINARY:
|
||||
case TSDBConstants.TSDB_DATA_TYPE_NCHAR: {
|
||||
return obj.toString().contains("1");
|
||||
}
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* $$$ this method is invoked by databaseMetaDataResultSet and so on which use a index start from 1 in JDBC api
|
||||
*/
|
||||
public void setByteValue(int colIndex, byte value) {
|
||||
setByte(colIndex - 1, value);
|
||||
}
|
||||
|
||||
/**
|
||||
* !!! this method is invoked by JNI method and the index start from 0 in C implementations
|
||||
*/
|
||||
public void setByte(int col, byte value) {
|
||||
data.set(col, value);
|
||||
}
|
||||
|
||||
/**
|
||||
* $$$ this method is invoked by databaseMetaDataResultSet and so on which use a index start from 1 in JDBC api
|
||||
*/
|
||||
public void setShortValue(int colIndex, short value) {
|
||||
setShort(colIndex - 1, value);
|
||||
}
|
||||
|
||||
/**
|
||||
* !!! this method is invoked by JNI method and the index start from 0 in C implementations
|
||||
*/
|
||||
public void setShort(int col, short value) {
|
||||
data.set(col, value);
|
||||
}
|
||||
|
||||
/**
|
||||
* $$$ this method is invoked by databaseMetaDataResultSet and so on which use a index start from 1 in JDBC api
|
||||
*/
|
||||
public void setIntValue(int colIndex, int value) {
|
||||
setInt(colIndex - 1, value);
|
||||
}
|
||||
|
||||
/**
|
||||
* !!! this method is invoked by JNI method and the index start from 0 in C implementations
|
||||
*/
|
||||
public void setInt(int col, int value) {
|
||||
data.set(col, value);
|
||||
}
|
||||
|
||||
@SuppressWarnings("deprecation")
|
||||
public int getInt(int col, int srcType) throws SQLException {
|
||||
Object obj = data.get(col);
|
||||
public int getInt(int col, int nativeType) throws SQLException {
|
||||
Object obj = data.get(col - 1);
|
||||
if (obj == null)
|
||||
return NullType.getIntNull();
|
||||
|
||||
switch (srcType) {
|
||||
switch (nativeType) {
|
||||
case TSDBConstants.TSDB_DATA_TYPE_BOOL:
|
||||
return Boolean.TRUE.equals(obj) ? 1 : 0;
|
||||
case TSDBConstants.TSDB_DATA_TYPE_FLOAT:
|
||||
return ((Float) obj).intValue();
|
||||
case TSDBConstants.TSDB_DATA_TYPE_DOUBLE:
|
||||
return ((Double) obj).intValue();
|
||||
case TSDBConstants.TSDB_DATA_TYPE_TINYINT:
|
||||
return (Byte) obj;
|
||||
case TSDBConstants.TSDB_DATA_TYPE_SMALLINT:
|
||||
return (Short) obj;
|
||||
case TSDBConstants.TSDB_DATA_TYPE_INT:
|
||||
return (Integer) obj;
|
||||
case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP:
|
||||
case TSDBConstants.TSDB_DATA_TYPE_BIGINT:
|
||||
case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP:
|
||||
return ((Long) obj).intValue();
|
||||
case TSDBConstants.TSDB_DATA_TYPE_NCHAR:
|
||||
case TSDBConstants.TSDB_DATA_TYPE_BINARY:
|
||||
|
@ -131,33 +171,46 @@ public class TSDBResultSetRowData {
|
|||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_NUMERIC_VALUE_OUT_OF_RANGE);
|
||||
return Long.valueOf(value).intValue();
|
||||
}
|
||||
case TSDBConstants.TSDB_DATA_TYPE_FLOAT:
|
||||
return ((Float) obj).intValue();
|
||||
case TSDBConstants.TSDB_DATA_TYPE_DOUBLE:
|
||||
return ((Double) obj).intValue();
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* $$$ this method is invoked by databaseMetaDataResultSet and so on which use a index start from 1 in JDBC api
|
||||
*/
|
||||
public void setLongValue(int colIndex, long value) {
|
||||
setLong(colIndex - 1, value);
|
||||
}
|
||||
|
||||
/**
|
||||
* !!! this method is invoked by JNI method and the index start from 0 in C implementations
|
||||
*/
|
||||
public void setLong(int col, long value) {
|
||||
data.set(col, value);
|
||||
}
|
||||
|
||||
public long getLong(int col, int srcType) throws SQLException {
|
||||
Object obj = data.get(col);
|
||||
public long getLong(int col, int nativeType) throws SQLException {
|
||||
Object obj = data.get(col - 1);
|
||||
if (obj == null) {
|
||||
return NullType.getBigIntNull();
|
||||
}
|
||||
|
||||
switch (srcType) {
|
||||
switch (nativeType) {
|
||||
case TSDBConstants.TSDB_DATA_TYPE_BOOL:
|
||||
return Boolean.TRUE.equals(obj) ? 1 : 0;
|
||||
case TSDBConstants.TSDB_DATA_TYPE_FLOAT:
|
||||
return ((Float) obj).longValue();
|
||||
case TSDBConstants.TSDB_DATA_TYPE_DOUBLE:
|
||||
return ((Double) obj).longValue();
|
||||
case TSDBConstants.TSDB_DATA_TYPE_TINYINT:
|
||||
return (Byte) obj;
|
||||
case TSDBConstants.TSDB_DATA_TYPE_SMALLINT:
|
||||
return (Short) obj;
|
||||
case TSDBConstants.TSDB_DATA_TYPE_INT:
|
||||
return (Integer) obj;
|
||||
case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP:
|
||||
case TSDBConstants.TSDB_DATA_TYPE_BIGINT:
|
||||
case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP:
|
||||
return (Long) obj;
|
||||
case TSDBConstants.TSDB_DATA_TYPE_NCHAR:
|
||||
case TSDBConstants.TSDB_DATA_TYPE_BINARY:
|
||||
|
@ -186,19 +239,35 @@ public class TSDBResultSetRowData {
|
|||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_NUMERIC_VALUE_OUT_OF_RANGE);
|
||||
return value;
|
||||
}
|
||||
case TSDBConstants.TSDB_DATA_TYPE_FLOAT:
|
||||
return ((Float) obj).longValue();
|
||||
case TSDBConstants.TSDB_DATA_TYPE_DOUBLE:
|
||||
return ((Double) obj).longValue();
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* $$$ this method is invoked by databaseMetaDataResultSet and so on which use a index start from 1 in JDBC api
|
||||
*/
|
||||
public void setFloatValue(int colIndex, float value) {
|
||||
setFloat(colIndex - 1, value);
|
||||
}
|
||||
|
||||
/**
|
||||
* !!! this method is invoked by JNI method and the index start from 0 in C implementations
|
||||
*/
|
||||
public void setFloat(int col, float value) {
|
||||
data.set(col, value);
|
||||
}
|
||||
|
||||
public float getFloat(int col, int srcType) {
|
||||
Object obj = data.get(col);
|
||||
public float getFloat(int col, int nativeType) {
|
||||
Object obj = data.get(col - 1);
|
||||
if (obj == null)
|
||||
return NullType.getFloatNull();
|
||||
|
||||
switch (srcType) {
|
||||
switch (nativeType) {
|
||||
case TSDBConstants.TSDB_DATA_TYPE_BOOL:
|
||||
return Boolean.TRUE.equals(obj) ? 1 : 0;
|
||||
case TSDBConstants.TSDB_DATA_TYPE_FLOAT:
|
||||
|
@ -214,19 +283,31 @@ public class TSDBResultSetRowData {
|
|||
case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP:
|
||||
case TSDBConstants.TSDB_DATA_TYPE_BIGINT:
|
||||
return (Long) obj;
|
||||
default:
|
||||
return NullType.getFloatNull();
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* $$$ this method is invoked by databaseMetaDataResultSet and so on which use a index start from 1 in JDBC api
|
||||
*/
|
||||
public void setDoubleValue(int colIndex, double value) {
|
||||
setDouble(colIndex - 1, value);
|
||||
}
|
||||
|
||||
/**
|
||||
* !!! this method is invoked by JNI method and the index start from 0 in C implementations
|
||||
*/
|
||||
public void setDouble(int col, double value) {
|
||||
data.set(col, value);
|
||||
}
|
||||
|
||||
public double getDouble(int col, int srcType) {
|
||||
Object obj = data.get(col);
|
||||
public double getDouble(int col, int nativeType) {
|
||||
Object obj = data.get(col - 1);
|
||||
if (obj == null)
|
||||
return NullType.getDoubleNull();
|
||||
|
||||
switch (srcType) {
|
||||
switch (nativeType) {
|
||||
case TSDBConstants.TSDB_DATA_TYPE_BOOL:
|
||||
return Boolean.TRUE.equals(obj) ? 1 : 0;
|
||||
case TSDBConstants.TSDB_DATA_TYPE_FLOAT:
|
||||
|
@ -242,16 +323,46 @@ public class TSDBResultSetRowData {
|
|||
case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP:
|
||||
case TSDBConstants.TSDB_DATA_TYPE_BIGINT:
|
||||
return (Long) obj;
|
||||
default:
|
||||
return NullType.getDoubleNull();
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* $$$ this method is invoked by databaseMetaDataResultSet and so on which use a index start from 1 in JDBC api
|
||||
*/
|
||||
public void setStringValue(int colIndex, String value) {
|
||||
data.set(colIndex - 1, value);
|
||||
}
|
||||
|
||||
/**
|
||||
* !!! this method is invoked by JNI method and the index start from 0 in C implementations
|
||||
*/
|
||||
public void setString(int col, String value) {
|
||||
data.set(col, value);
|
||||
// TODO:
|
||||
// !!!NOTE!!!
|
||||
// this is very confusing problem which related to JNI-method implementation,
|
||||
// the JNI method return a String(encoded in UTF) for BINARY value, which means the JNI method will invoke
|
||||
// this setString(int, String) to handle BINARY value, we need to build a byte[] with default charsetEncoding
|
||||
data.set(col, value == null ? null : value.getBytes());
|
||||
}
|
||||
|
||||
/**
|
||||
* $$$ this method is invoked by databaseMetaDataResultSet and so on which use a index start from 1 in JDBC api
|
||||
*/
|
||||
public void setByteArrayValue(int colIndex, byte[] value) {
|
||||
setByteArray(colIndex - 1, value);
|
||||
}
|
||||
|
||||
/**
|
||||
* !!! this method is invoked by JNI method and the index start from 0 in C implementations
|
||||
*/
|
||||
public void setByteArray(int col, byte[] value) {
|
||||
// TODO:
|
||||
// !!!NOTE!!!
|
||||
// this is very confusing problem which related to JNI-method implementation,
|
||||
// the JNI method return a byte[] for NCHAR value, which means the JNI method will invoke
|
||||
// this setByteArr(int, byte[]) to handle NCHAR value, we need to build a String with charsetEncoding by TaosGlobalConfig
|
||||
try {
|
||||
data.set(col, new String(value, TaosGlobalConfig.getCharset()));
|
||||
} catch (Exception e) {
|
||||
|
@ -259,47 +370,56 @@ public class TSDBResultSetRowData {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* The original type may not be a string type, but will be converted to by calling this method
|
||||
*
|
||||
* @param col column index
|
||||
* @return
|
||||
*/
|
||||
public String getString(int col, int srcType) {
|
||||
switch (srcType) {
|
||||
case TSDBConstants.TSDB_DATA_TYPE_BINARY:
|
||||
case TSDBConstants.TSDB_DATA_TYPE_NCHAR:
|
||||
return (String) data.get(col);
|
||||
public String getString(int col, int nativeType) {
|
||||
Object obj = data.get(col - 1);
|
||||
if (obj == null)
|
||||
return null;
|
||||
|
||||
switch (nativeType) {
|
||||
case TSDBConstants.TSDB_DATA_TYPE_UTINYINT: {
|
||||
Byte value = new Byte(String.valueOf(data.get(col)));
|
||||
Byte value = new Byte(String.valueOf(obj));
|
||||
if (value >= 0)
|
||||
return value.toString();
|
||||
return Integer.toString(value & 0xff);
|
||||
}
|
||||
case TSDBConstants.TSDB_DATA_TYPE_USMALLINT: {
|
||||
Short value = new Short(String.valueOf(data.get(col)));
|
||||
Short value = new Short(String.valueOf(obj));
|
||||
if (value >= 0)
|
||||
return value.toString();
|
||||
return Integer.toString(value & 0xffff);
|
||||
}
|
||||
case TSDBConstants.TSDB_DATA_TYPE_UINT: {
|
||||
Integer value = new Integer(String.valueOf(data.get(col)));
|
||||
Integer value = new Integer(String.valueOf(obj));
|
||||
if (value >= 0)
|
||||
return value.toString();
|
||||
return Long.toString(value & 0xffffffffl);
|
||||
}
|
||||
case TSDBConstants.TSDB_DATA_TYPE_UBIGINT: {
|
||||
Long value = new Long(String.valueOf(data.get(col)));
|
||||
Long value = new Long(String.valueOf(obj));
|
||||
if (value >= 0)
|
||||
return value.toString();
|
||||
long lowValue = value & 0x7fffffffffffffffL;
|
||||
return BigDecimal.valueOf(lowValue).add(BigDecimal.valueOf(Long.MAX_VALUE)).add(BigDecimal.valueOf(1)).toString();
|
||||
}
|
||||
case TSDBConstants.TSDB_DATA_TYPE_BINARY:
|
||||
return new String((byte[]) obj);
|
||||
case TSDBConstants.TSDB_DATA_TYPE_NCHAR:
|
||||
return (String) obj;
|
||||
default:
|
||||
return String.valueOf(data.get(col));
|
||||
return String.valueOf(obj);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* $$$ this method is invoked by databaseMetaDataResultSet and so on which use a index start from 1 in JDBC api
|
||||
*/
|
||||
public void setTimestampValue(int colIndex, long value) {
|
||||
setTimestamp(colIndex - 1, value);
|
||||
}
|
||||
|
||||
/**
|
||||
* !!! this method is invoked by JNI method and the index start from 0 in C implementations
|
||||
*/
|
||||
public void setTimestamp(int col, long ts) {
|
||||
//TODO: this implementation contains logical error
|
||||
// when precision is us the (long ts) is 16 digital number
|
||||
|
@ -316,28 +436,20 @@ public class TSDBResultSetRowData {
|
|||
}
|
||||
}
|
||||
|
||||
public Timestamp getTimestamp(int col) {
|
||||
return (Timestamp) data.get(col);
|
||||
public Timestamp getTimestamp(int col, int nativeType) {
|
||||
Object obj = data.get(col - 1);
|
||||
if (obj == null)
|
||||
return null;
|
||||
switch (nativeType) {
|
||||
case TSDBConstants.TSDB_DATA_TYPE_BIGINT:
|
||||
return new Timestamp((Long) obj);
|
||||
default:
|
||||
return (Timestamp) obj;
|
||||
}
|
||||
}
|
||||
|
||||
public Object get(int col) {
|
||||
return data.get(col);
|
||||
public Object getObject(int col) {
|
||||
return data.get(col - 1);
|
||||
}
|
||||
|
||||
public int getColSize() {
|
||||
return colSize;
|
||||
}
|
||||
|
||||
private void setColSize(int colSize) {
|
||||
this.colSize = colSize;
|
||||
this.clear();
|
||||
}
|
||||
|
||||
public ArrayList<Object> getData() {
|
||||
return data;
|
||||
}
|
||||
|
||||
public void setData(ArrayList<Object> data) {
|
||||
this.data = (ArrayList<Object>) data.clone();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -32,14 +32,15 @@ public class TSDBStatement extends AbstractStatement {
|
|||
}
|
||||
|
||||
public ResultSet executeQuery(String sql) throws SQLException {
|
||||
// check if closed
|
||||
if (isClosed()) {
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
|
||||
}
|
||||
|
||||
//TODO: 如果在executeQuery方法中执行insert语句,那么先执行了SQL,再通过pSql来检查是否为一个insert语句,但这个insert SQL已经执行成功了
|
||||
|
||||
// execute query
|
||||
//TODO:
|
||||
// this is an unreasonable implementation, if the paratemer is a insert statement,
|
||||
// the JNI connector will execute the sql at first and return a pointer: pSql,
|
||||
// we use this pSql and invoke the isUpdateQuery(long pSql) method to decide .
|
||||
// but the insert sql is already executed in database.
|
||||
//execute query
|
||||
long pSql = this.connection.getConnector().executeQuery(sql);
|
||||
// if pSql is create/insert/update/delete/alter SQL
|
||||
if (this.connection.getConnector().isUpdateQuery(pSql)) {
|
||||
|
|
|
@ -95,16 +95,7 @@ public class Utils {
|
|||
public static String getNativeSql(String rawSql, Object[] parameters) {
|
||||
// toLowerCase
|
||||
String preparedSql = rawSql.trim().toLowerCase();
|
||||
|
||||
String[] clause = new String[0];
|
||||
if (SqlSyntaxValidator.isInsertSql(preparedSql)) {
|
||||
// insert or import
|
||||
clause = new String[]{"values\\s*\\(.*?\\)", "tags\\s*\\(.*?\\)"};
|
||||
}
|
||||
if (SqlSyntaxValidator.isSelectSql(preparedSql)) {
|
||||
// select
|
||||
clause = new String[]{"where\\s*.*"};
|
||||
}
|
||||
String[] clause = new String[]{"values\\s*\\(.*?\\)", "tags\\s*\\(.*?\\)", "where\\s*.*"};
|
||||
Map<Integer, Integer> placeholderPositions = new HashMap<>();
|
||||
RangeSet<Integer> clauseRangeSet = TreeRangeSet.create();
|
||||
findPlaceholderPosition(preparedSql, placeholderPositions);
|
||||
|
|
|
@ -32,20 +32,34 @@ public class TSDBConnectionTest {
|
|||
}
|
||||
|
||||
@Test
|
||||
public void subscribe() {
|
||||
public void runSubscribe() {
|
||||
try {
|
||||
// given
|
||||
TSDBConnection unwrap = conn.unwrap(TSDBConnection.class);
|
||||
TSDBSubscribe subscribe = unwrap.subscribe("topic1", "select * from log.log", false);
|
||||
// when
|
||||
TSDBResultSet rs = subscribe.consume();
|
||||
ResultSetMetaData metaData = rs.getMetaData();
|
||||
for (int count = 0; count < 10 && rs.next(); count++) {
|
||||
for (int i = 1; i <= metaData.getColumnCount(); i++) {
|
||||
String value = rs.getString(i);
|
||||
System.out.print(metaData.getColumnLabel(i) + ":" + value + "\t");
|
||||
}
|
||||
System.out.println();
|
||||
}
|
||||
|
||||
// then
|
||||
Assert.assertNotNull(rs);
|
||||
Assert.assertEquals(4, metaData.getColumnCount());
|
||||
Assert.assertEquals("ts", metaData.getColumnLabel(1));
|
||||
Assert.assertEquals("level", metaData.getColumnLabel(2));
|
||||
Assert.assertEquals("content", metaData.getColumnLabel(3));
|
||||
Assert.assertEquals("ipaddr", metaData.getColumnLabel(4));
|
||||
rs.next();
|
||||
// row 1
|
||||
{
|
||||
Assert.assertNotNull(rs.getTimestamp(1));
|
||||
Assert.assertNotNull(rs.getTimestamp("ts"));
|
||||
Assert.assertNotNull(rs.getByte(2));
|
||||
Assert.assertNotNull(rs.getByte("level"));
|
||||
Assert.assertNotNull(rs.getString(3));
|
||||
Assert.assertNotNull(rs.getString("content"));
|
||||
Assert.assertNotNull(rs.getString(4));
|
||||
Assert.assertNotNull(rs.getString("ipaddr"));
|
||||
}
|
||||
subscribe.close(false);
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
|
|
|
@ -7,9 +7,11 @@ import java.util.Properties;
|
|||
|
||||
public class TSDBDatabaseMetaDataTest {
|
||||
private static final String host = "127.0.0.1";
|
||||
private static final String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata";
|
||||
private static Connection connection;
|
||||
private static TSDBDatabaseMetaData metaData;
|
||||
|
||||
|
||||
@Test
|
||||
public void unwrap() throws SQLException {
|
||||
TSDBDatabaseMetaData unwrap = metaData.unwrap(TSDBDatabaseMetaData.class);
|
||||
|
@ -33,7 +35,7 @@ public class TSDBDatabaseMetaDataTest {
|
|||
|
||||
@Test
|
||||
public void getURL() throws SQLException {
|
||||
Assert.assertEquals("jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata", metaData.getURL());
|
||||
Assert.assertEquals(url, metaData.getURL());
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -627,17 +629,32 @@ public class TSDBDatabaseMetaDataTest {
|
|||
|
||||
@Test
|
||||
public void getTables() throws SQLException {
|
||||
System.out.println("****************************************************");
|
||||
ResultSet tables = metaData.getTables("log", "", null, null);
|
||||
ResultSetMetaData metaData = tables.getMetaData();
|
||||
while (tables.next()) {
|
||||
System.out.print(metaData.getColumnLabel(1) + ":" + tables.getString(1) + "\t");
|
||||
System.out.print(metaData.getColumnLabel(3) + ":" + tables.getString(3) + "\t");
|
||||
System.out.print(metaData.getColumnLabel(4) + ":" + tables.getString(4) + "\t");
|
||||
System.out.print(metaData.getColumnLabel(5) + ":" + tables.getString(5) + "\n");
|
||||
ResultSet rs = metaData.getTables("log", "", null, null);
|
||||
ResultSetMetaData meta = rs.getMetaData();
|
||||
Assert.assertNotNull(rs);
|
||||
rs.next();
|
||||
{
|
||||
// TABLE_CAT
|
||||
Assert.assertEquals("TABLE_CAT", meta.getColumnLabel(1));
|
||||
Assert.assertEquals("log", rs.getString(1));
|
||||
Assert.assertEquals("log", rs.getString("TABLE_CAT"));
|
||||
// TABLE_SCHEM
|
||||
Assert.assertEquals("TABLE_SCHEM", meta.getColumnLabel(2));
|
||||
Assert.assertEquals(null, rs.getString(2));
|
||||
Assert.assertEquals(null, rs.getString("TABLE_SCHEM"));
|
||||
// TABLE_NAME
|
||||
Assert.assertEquals("TABLE_NAME", meta.getColumnLabel(3));
|
||||
Assert.assertNotNull(rs.getString(3));
|
||||
Assert.assertNotNull(rs.getString("TABLE_NAME"));
|
||||
// TABLE_TYPE
|
||||
Assert.assertEquals("TABLE_TYPE", meta.getColumnLabel(4));
|
||||
Assert.assertEquals("TABLE", rs.getString(4));
|
||||
Assert.assertEquals("TABLE", rs.getString("TABLE_TYPE"));
|
||||
// REMARKS
|
||||
Assert.assertEquals("REMARKS", meta.getColumnLabel(5));
|
||||
Assert.assertEquals("", rs.getString(5));
|
||||
Assert.assertEquals("", rs.getString("REMARKS"));
|
||||
}
|
||||
System.out.println();
|
||||
Assert.assertNotNull(tables);
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -647,46 +664,130 @@ public class TSDBDatabaseMetaDataTest {
|
|||
|
||||
@Test
|
||||
public void getCatalogs() throws SQLException {
|
||||
System.out.println("****************************************************");
|
||||
|
||||
ResultSet catalogs = metaData.getCatalogs();
|
||||
ResultSetMetaData meta = catalogs.getMetaData();
|
||||
while (catalogs.next()) {
|
||||
for (int i = 1; i <= meta.getColumnCount(); i++) {
|
||||
System.out.print(meta.getColumnLabel(i) + ": " + catalogs.getString(i));
|
||||
}
|
||||
System.out.println();
|
||||
ResultSet rs = metaData.getCatalogs();
|
||||
ResultSetMetaData meta = rs.getMetaData();
|
||||
rs.next();
|
||||
{
|
||||
// TABLE_CAT
|
||||
Assert.assertEquals("TABLE_CAT", meta.getColumnLabel(1));
|
||||
Assert.assertNotNull(rs.getString(1));
|
||||
Assert.assertNotNull(rs.getString("TABLE_CAT"));
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getTableTypes() throws SQLException {
|
||||
System.out.println("****************************************************");
|
||||
|
||||
ResultSet tableTypes = metaData.getTableTypes();
|
||||
while (tableTypes.next()) {
|
||||
System.out.println(tableTypes.getString("TABLE_TYPE"));
|
||||
tableTypes.next();
|
||||
// tableTypes: table
|
||||
{
|
||||
Assert.assertEquals("TABLE", tableTypes.getString(1));
|
||||
Assert.assertEquals("TABLE", tableTypes.getString("TABLE_TYPE"));
|
||||
}
|
||||
tableTypes.next();
|
||||
// tableTypes: stable
|
||||
{
|
||||
Assert.assertEquals("STABLE", tableTypes.getString(1));
|
||||
Assert.assertEquals("STABLE", tableTypes.getString("TABLE_TYPE"));
|
||||
}
|
||||
Assert.assertNotNull(metaData.getTableTypes());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getColumns() throws SQLException {
|
||||
System.out.println("****************************************************");
|
||||
|
||||
// when
|
||||
ResultSet columns = metaData.getColumns("log", "", "dn", "");
|
||||
// then
|
||||
ResultSetMetaData meta = columns.getMetaData();
|
||||
while (columns.next()) {
|
||||
System.out.print(meta.getColumnLabel(1) + ": " + columns.getString(1) + "\t");
|
||||
System.out.print(meta.getColumnLabel(3) + ": " + columns.getString(3) + "\t");
|
||||
System.out.print(meta.getColumnLabel(4) + ": " + columns.getString(4) + "\t");
|
||||
System.out.print(meta.getColumnLabel(5) + ": " + columns.getString(5) + "\t");
|
||||
System.out.print(meta.getColumnLabel(6) + ": " + columns.getString(6) + "\t");
|
||||
System.out.print(meta.getColumnLabel(7) + ": " + columns.getString(7) + "\t");
|
||||
System.out.print(meta.getColumnLabel(9) + ": " + columns.getString(9) + "\t");
|
||||
System.out.print(meta.getColumnLabel(10) + ": " + columns.getString(10) + "\t");
|
||||
System.out.print(meta.getColumnLabel(11) + ": " + columns.getString(11) + "\n");
|
||||
System.out.print(meta.getColumnLabel(12) + ": " + columns.getString(12) + "\n");
|
||||
columns.next();
|
||||
// column: 1
|
||||
{
|
||||
// TABLE_CAT
|
||||
Assert.assertEquals("TABLE_CAT", meta.getColumnLabel(1));
|
||||
Assert.assertEquals("log", columns.getString(1));
|
||||
Assert.assertEquals("log", columns.getString("TABLE_CAT"));
|
||||
// TABLE_NAME
|
||||
Assert.assertEquals("TABLE_NAME", meta.getColumnLabel(3));
|
||||
Assert.assertEquals("dn", columns.getString(3));
|
||||
Assert.assertEquals("dn", columns.getString("TABLE_NAME"));
|
||||
// COLUMN_NAME
|
||||
Assert.assertEquals("COLUMN_NAME", meta.getColumnLabel(4));
|
||||
Assert.assertEquals("ts", columns.getString(4));
|
||||
Assert.assertEquals("ts", columns.getString("COLUMN_NAME"));
|
||||
// DATA_TYPE
|
||||
Assert.assertEquals("DATA_TYPE", meta.getColumnLabel(5));
|
||||
Assert.assertEquals(Types.TIMESTAMP, columns.getInt(5));
|
||||
Assert.assertEquals(Types.TIMESTAMP, columns.getInt("DATA_TYPE"));
|
||||
// TYPE_NAME
|
||||
Assert.assertEquals("TYPE_NAME", meta.getColumnLabel(6));
|
||||
Assert.assertEquals("TIMESTAMP", columns.getString(6));
|
||||
Assert.assertEquals("TIMESTAMP", columns.getString("TYPE_NAME"));
|
||||
// COLUMN_SIZE
|
||||
Assert.assertEquals("COLUMN_SIZE", meta.getColumnLabel(7));
|
||||
Assert.assertEquals(26, columns.getInt(7));
|
||||
Assert.assertEquals(26, columns.getInt("COLUMN_SIZE"));
|
||||
// DECIMAL_DIGITS
|
||||
Assert.assertEquals("DECIMAL_DIGITS", meta.getColumnLabel(9));
|
||||
Assert.assertEquals(Integer.MIN_VALUE, columns.getInt(9));
|
||||
Assert.assertEquals(Integer.MIN_VALUE, columns.getInt("DECIMAL_DIGITS"));
|
||||
Assert.assertEquals(null, columns.getString(9));
|
||||
Assert.assertEquals(null, columns.getString("DECIMAL_DIGITS"));
|
||||
// NUM_PREC_RADIX
|
||||
Assert.assertEquals("NUM_PREC_RADIX", meta.getColumnLabel(10));
|
||||
Assert.assertEquals(10, columns.getInt(10));
|
||||
Assert.assertEquals(10, columns.getInt("NUM_PREC_RADIX"));
|
||||
// NULLABLE
|
||||
Assert.assertEquals("NULLABLE", meta.getColumnLabel(11));
|
||||
Assert.assertEquals(DatabaseMetaData.columnNoNulls, columns.getInt(11));
|
||||
Assert.assertEquals(DatabaseMetaData.columnNoNulls, columns.getInt("NULLABLE"));
|
||||
// REMARKS
|
||||
Assert.assertEquals("REMARKS", meta.getColumnLabel(12));
|
||||
Assert.assertEquals(null, columns.getString(12));
|
||||
Assert.assertEquals(null, columns.getString("REMARKS"));
|
||||
}
|
||||
columns.next();
|
||||
// column: 2
|
||||
{
|
||||
// TABLE_CAT
|
||||
Assert.assertEquals("TABLE_CAT", meta.getColumnLabel(1));
|
||||
Assert.assertEquals("log", columns.getString(1));
|
||||
Assert.assertEquals("log", columns.getString("TABLE_CAT"));
|
||||
// TABLE_NAME
|
||||
Assert.assertEquals("TABLE_NAME", meta.getColumnLabel(3));
|
||||
Assert.assertEquals("dn", columns.getString(3));
|
||||
Assert.assertEquals("dn", columns.getString("TABLE_NAME"));
|
||||
// COLUMN_NAME
|
||||
Assert.assertEquals("COLUMN_NAME", meta.getColumnLabel(4));
|
||||
Assert.assertEquals("cpu_taosd", columns.getString(4));
|
||||
Assert.assertEquals("cpu_taosd", columns.getString("COLUMN_NAME"));
|
||||
// DATA_TYPE
|
||||
Assert.assertEquals("DATA_TYPE", meta.getColumnLabel(5));
|
||||
Assert.assertEquals(Types.FLOAT, columns.getInt(5));
|
||||
Assert.assertEquals(Types.FLOAT, columns.getInt("DATA_TYPE"));
|
||||
// TYPE_NAME
|
||||
Assert.assertEquals("TYPE_NAME", meta.getColumnLabel(6));
|
||||
Assert.assertEquals("FLOAT", columns.getString(6));
|
||||
Assert.assertEquals("FLOAT", columns.getString("TYPE_NAME"));
|
||||
// COLUMN_SIZE
|
||||
Assert.assertEquals("COLUMN_SIZE", meta.getColumnLabel(7));
|
||||
Assert.assertEquals(12, columns.getInt(7));
|
||||
Assert.assertEquals(12, columns.getInt("COLUMN_SIZE"));
|
||||
// DECIMAL_DIGITS
|
||||
Assert.assertEquals("DECIMAL_DIGITS", meta.getColumnLabel(9));
|
||||
Assert.assertEquals(Integer.MIN_VALUE, columns.getInt(9));
|
||||
Assert.assertEquals(Integer.MIN_VALUE, columns.getInt("DECIMAL_DIGITS"));
|
||||
Assert.assertEquals(null, columns.getString(9));
|
||||
Assert.assertEquals(null, columns.getString("DECIMAL_DIGITS"));
|
||||
// NUM_PREC_RADIX
|
||||
Assert.assertEquals("NUM_PREC_RADIX", meta.getColumnLabel(10));
|
||||
Assert.assertEquals(10, columns.getInt(10));
|
||||
Assert.assertEquals(10, columns.getInt("NUM_PREC_RADIX"));
|
||||
// NULLABLE
|
||||
Assert.assertEquals("NULLABLE", meta.getColumnLabel(11));
|
||||
Assert.assertEquals(DatabaseMetaData.columnNullable, columns.getInt(11));
|
||||
Assert.assertEquals(DatabaseMetaData.columnNullable, columns.getInt("NULLABLE"));
|
||||
// REMARKS
|
||||
Assert.assertEquals("REMARKS", meta.getColumnLabel(12));
|
||||
Assert.assertEquals(null, columns.getString(12));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -712,17 +813,35 @@ public class TSDBDatabaseMetaDataTest {
|
|||
|
||||
@Test
|
||||
public void getPrimaryKeys() throws SQLException {
|
||||
System.out.println("****************************************************");
|
||||
|
||||
ResultSet rs = metaData.getPrimaryKeys("log", "", "dn1");
|
||||
while (rs.next()) {
|
||||
System.out.println("TABLE_NAME: " + rs.getString("TABLE_NAME"));
|
||||
System.out.println("COLUMN_NAME: " + rs.getString("COLUMN_NAME"));
|
||||
System.out.println("KEY_SEQ: " + rs.getString("KEY_SEQ"));
|
||||
System.out.println("PK_NAME: " + rs.getString("PK_NAME"));
|
||||
ResultSetMetaData meta = rs.getMetaData();
|
||||
rs.next();
|
||||
{
|
||||
// TABLE_CAT
|
||||
Assert.assertEquals("TABLE_CAT", meta.getColumnLabel(1));
|
||||
Assert.assertEquals("log", rs.getString(1));
|
||||
Assert.assertEquals("log", rs.getString("TABLE_CAT"));
|
||||
// TABLE_SCHEM
|
||||
Assert.assertEquals("TABLE_SCHEM", meta.getColumnLabel(2));
|
||||
Assert.assertEquals(null, rs.getString(2));
|
||||
Assert.assertEquals(null, rs.getString("TABLE_SCHEM"));
|
||||
// TABLE_NAME
|
||||
Assert.assertEquals("TABLE_NAME", meta.getColumnLabel(3));
|
||||
Assert.assertEquals("dn1", rs.getString(3));
|
||||
Assert.assertEquals("dn1", rs.getString("TABLE_NAME"));
|
||||
// COLUMN_NAME
|
||||
Assert.assertEquals("COLUMN_NAME", meta.getColumnLabel(4));
|
||||
Assert.assertEquals("ts", rs.getString(4));
|
||||
Assert.assertEquals("ts", rs.getString("COLUMN_NAME"));
|
||||
// KEY_SEQ
|
||||
Assert.assertEquals("KEY_SEQ", meta.getColumnLabel(5));
|
||||
Assert.assertEquals(1, rs.getShort(5));
|
||||
Assert.assertEquals(1, rs.getShort("KEY_SEQ"));
|
||||
// DATA_TYPE
|
||||
Assert.assertEquals("PK_NAME", meta.getColumnLabel(6));
|
||||
Assert.assertEquals("ts", rs.getString(6));
|
||||
Assert.assertEquals("ts", rs.getString("PK_NAME"));
|
||||
}
|
||||
|
||||
Assert.assertNotNull(rs);
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -847,14 +966,27 @@ public class TSDBDatabaseMetaDataTest {
|
|||
|
||||
@Test
|
||||
public void getSuperTables() throws SQLException {
|
||||
System.out.println("****************************************************");
|
||||
|
||||
ResultSet rs = metaData.getSuperTables("log", "", "dn1");
|
||||
while (rs.next()) {
|
||||
System.out.println("TABLE_NAME: " + rs.getString("TABLE_NAME"));
|
||||
System.out.println("SUPERTABLE_NAME: " + rs.getString("SUPERTABLE_NAME"));
|
||||
ResultSetMetaData meta = rs.getMetaData();
|
||||
rs.next();
|
||||
{
|
||||
// TABLE_CAT
|
||||
Assert.assertEquals("TABLE_CAT", meta.getColumnLabel(1));
|
||||
Assert.assertEquals("log", rs.getString(1));
|
||||
Assert.assertEquals("log", rs.getString("TABLE_CAT"));
|
||||
// TABLE_CAT
|
||||
Assert.assertEquals("TABLE_SCHEM", meta.getColumnLabel(2));
|
||||
Assert.assertEquals(null, rs.getString(2));
|
||||
Assert.assertEquals(null, rs.getString("TABLE_SCHEM"));
|
||||
// TABLE_CAT
|
||||
Assert.assertEquals("TABLE_NAME", meta.getColumnLabel(3));
|
||||
Assert.assertEquals("dn1", rs.getString(3));
|
||||
Assert.assertEquals("dn1", rs.getString("TABLE_NAME"));
|
||||
// TABLE_CAT
|
||||
Assert.assertEquals("SUPERTABLE_NAME", meta.getColumnLabel(4));
|
||||
Assert.assertEquals("dn", rs.getString(4));
|
||||
Assert.assertEquals("dn", rs.getString("SUPERTABLE_NAME"));
|
||||
}
|
||||
Assert.assertNotNull(rs);
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -951,15 +1083,12 @@ public class TSDBDatabaseMetaDataTest {
|
|||
@BeforeClass
|
||||
public static void beforeClass() {
|
||||
try {
|
||||
Class.forName("com.taosdata.jdbc.TSDBDriver");
|
||||
Properties properties = new Properties();
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
|
||||
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata", properties);
|
||||
connection = DriverManager.getConnection(url, properties);
|
||||
metaData = connection.getMetaData().unwrap(TSDBDatabaseMetaData.class);
|
||||
} catch (ClassNotFoundException e) {
|
||||
e.printStackTrace();
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
|
|
|
@ -45,9 +45,9 @@ public class TSDBJNIConnectorTest {
|
|||
rowData = new TSDBResultSetRowData(columnSize);
|
||||
// iterate resultSet
|
||||
for (int i = 0; next(connector, pSql); i++) {
|
||||
System.out.println("col[" + i + "] size: " + rowData.getColSize());
|
||||
rowData.getData().stream().forEach(col -> System.out.print(col + "\t"));
|
||||
System.out.println();
|
||||
// System.out.println("col[" + i + "] size: " + rowData.getColSize());
|
||||
// rowData.getData().stream().forEach(col -> System.out.print(col + "\t"));
|
||||
// System.out.println();
|
||||
}
|
||||
// close resultSet
|
||||
code = connector.freeResultSet(pSql);
|
||||
|
|
|
@ -54,16 +54,17 @@ public class TSDBParameterMetaDataTest {
|
|||
|
||||
@Test
|
||||
public void getPrecision() throws SQLException {
|
||||
Assert.assertEquals(0, parameterMetaData_insert.getPrecision(1));
|
||||
Assert.assertEquals(0, parameterMetaData_insert.getPrecision(2));
|
||||
Assert.assertEquals(0, parameterMetaData_insert.getPrecision(3));
|
||||
Assert.assertEquals(0, parameterMetaData_insert.getPrecision(4));
|
||||
Assert.assertEquals(0, parameterMetaData_insert.getPrecision(5));
|
||||
Assert.assertEquals(0, parameterMetaData_insert.getPrecision(6));
|
||||
Assert.assertEquals(0, parameterMetaData_insert.getPrecision(7));
|
||||
Assert.assertEquals(0, parameterMetaData_insert.getPrecision(8));
|
||||
Assert.assertEquals(5, parameterMetaData_insert.getPrecision(9));
|
||||
Assert.assertEquals(5, parameterMetaData_insert.getPrecision(10));
|
||||
//create table weather(ts timestamp, f1 int, f2 bigint, f3 float, f4 double, f5 smallint, f6 tinyint, f7 bool, f8 binary(64), f9 nchar(64))
|
||||
Assert.assertEquals(TSDBConstants.TIMESTAMP_MS_PRECISION, parameterMetaData_insert.getPrecision(1));
|
||||
Assert.assertEquals(TSDBConstants.INT_PRECISION, parameterMetaData_insert.getPrecision(2));
|
||||
Assert.assertEquals(TSDBConstants.BIGINT_PRECISION, parameterMetaData_insert.getPrecision(3));
|
||||
Assert.assertEquals(TSDBConstants.FLOAT_PRECISION, parameterMetaData_insert.getPrecision(4));
|
||||
Assert.assertEquals(TSDBConstants.DOUBLE_PRECISION, parameterMetaData_insert.getPrecision(5));
|
||||
Assert.assertEquals(TSDBConstants.SMALLINT_PRECISION, parameterMetaData_insert.getPrecision(6));
|
||||
Assert.assertEquals(TSDBConstants.TINYINT_PRECISION, parameterMetaData_insert.getPrecision(7));
|
||||
Assert.assertEquals(TSDBConstants.BOOLEAN_PRECISION, parameterMetaData_insert.getPrecision(8));
|
||||
Assert.assertEquals("hello".getBytes().length, parameterMetaData_insert.getPrecision(9));
|
||||
Assert.assertEquals("涛思数据".length(), parameterMetaData_insert.getPrecision(10));
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -71,8 +72,8 @@ public class TSDBParameterMetaDataTest {
|
|||
Assert.assertEquals(0, parameterMetaData_insert.getScale(1));
|
||||
Assert.assertEquals(0, parameterMetaData_insert.getScale(2));
|
||||
Assert.assertEquals(0, parameterMetaData_insert.getScale(3));
|
||||
Assert.assertEquals(0, parameterMetaData_insert.getScale(4));
|
||||
Assert.assertEquals(0, parameterMetaData_insert.getScale(5));
|
||||
Assert.assertEquals(31, parameterMetaData_insert.getScale(4));
|
||||
Assert.assertEquals(31, parameterMetaData_insert.getScale(5));
|
||||
Assert.assertEquals(0, parameterMetaData_insert.getScale(6));
|
||||
Assert.assertEquals(0, parameterMetaData_insert.getScale(7));
|
||||
Assert.assertEquals(0, parameterMetaData_insert.getScale(8));
|
||||
|
@ -124,10 +125,16 @@ public class TSDBParameterMetaDataTest {
|
|||
|
||||
@Test
|
||||
public void getParameterMode() throws SQLException {
|
||||
for (int i = 1; i <= parameterMetaData_insert.getParameterCount(); i++) {
|
||||
int parameterMode = parameterMetaData_insert.getParameterMode(i);
|
||||
Assert.assertEquals(ParameterMetaData.parameterModeUnknown, parameterMode);
|
||||
}
|
||||
Assert.assertEquals(ParameterMetaData.parameterModeUnknown, parameterMetaData_insert.getParameterMode(1));
|
||||
Assert.assertEquals(ParameterMetaData.parameterModeUnknown, parameterMetaData_insert.getParameterMode(2));
|
||||
Assert.assertEquals(ParameterMetaData.parameterModeUnknown, parameterMetaData_insert.getParameterMode(3));
|
||||
Assert.assertEquals(ParameterMetaData.parameterModeUnknown, parameterMetaData_insert.getParameterMode(4));
|
||||
Assert.assertEquals(ParameterMetaData.parameterModeUnknown, parameterMetaData_insert.getParameterMode(5));
|
||||
Assert.assertEquals(ParameterMetaData.parameterModeUnknown, parameterMetaData_insert.getParameterMode(6));
|
||||
Assert.assertEquals(ParameterMetaData.parameterModeUnknown, parameterMetaData_insert.getParameterMode(7));
|
||||
Assert.assertEquals(ParameterMetaData.parameterModeUnknown, parameterMetaData_insert.getParameterMode(8));
|
||||
Assert.assertEquals(ParameterMetaData.parameterModeUnknown, parameterMetaData_insert.getParameterMode(9));
|
||||
Assert.assertEquals(ParameterMetaData.parameterModeUnknown, parameterMetaData_insert.getParameterMode(10));
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -144,7 +151,6 @@ public class TSDBParameterMetaDataTest {
|
|||
@BeforeClass
|
||||
public static void beforeClass() {
|
||||
try {
|
||||
Class.forName("com.taosdata.jdbc.TSDBDriver");
|
||||
conn = DriverManager.getConnection("jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata");
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
stmt.execute("drop database if exists test_pstmt");
|
||||
|
@ -164,7 +170,7 @@ public class TSDBParameterMetaDataTest {
|
|||
pstmt_insert.setObject(7, Byte.MAX_VALUE);
|
||||
pstmt_insert.setObject(8, true);
|
||||
pstmt_insert.setObject(9, "hello".getBytes());
|
||||
pstmt_insert.setObject(10, "Hello");
|
||||
pstmt_insert.setObject(10, "涛思数据");
|
||||
parameterMetaData_insert = pstmt_insert.getParameterMetaData();
|
||||
|
||||
pstmt_select = conn.prepareStatement(sql_select);
|
||||
|
@ -173,7 +179,7 @@ public class TSDBParameterMetaDataTest {
|
|||
pstmt_select.setInt(3, 0);
|
||||
parameterMetaData_select = pstmt_select.getParameterMetaData();
|
||||
|
||||
} catch (ClassNotFoundException | SQLException e) {
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,102 +1,301 @@
|
|||
package com.taosdata.jdbc;
|
||||
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Assert;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
import org.junit.*;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Serializable;
|
||||
import java.math.BigDecimal;
|
||||
import java.sql.*;
|
||||
import java.time.LocalTime;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Random;
|
||||
|
||||
public class TSDBPreparedStatementTest {
|
||||
|
||||
private static final String host = "127.0.0.1";
|
||||
private static Connection conn;
|
||||
private static final String sql_insert = "insert into t1 values(?, ?, ?, ?, ?, ?, ?, ?, ?, ?)";
|
||||
private static PreparedStatement pstmt_insert;
|
||||
private static final String sql_select = "select * from t1 where ts > ? and ts <= ? and f1 >= ?";
|
||||
private static PreparedStatement pstmt_select;
|
||||
private static final String sql_select = "select * from t1 where ts >= ? and ts < ? and f1 >= ?";
|
||||
|
||||
private PreparedStatement pstmt_insert;
|
||||
private PreparedStatement pstmt_select;
|
||||
//create table weather(ts timestamp, f1 int, f2 bigint, f3 float, f4 double, f5 smallint, f6 tinyint, f7 bool, f8 binary(64), f9 nchar(64)) tags(loc nchar(64))
|
||||
|
||||
@Test
|
||||
public void executeQuery() throws SQLException {
|
||||
long end = System.currentTimeMillis();
|
||||
long start = end - 1000 * 60 * 60;
|
||||
// given
|
||||
long ts = System.currentTimeMillis();
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(ts));
|
||||
pstmt_insert.setInt(2, 2);
|
||||
pstmt_insert.setLong(3, 3l);
|
||||
pstmt_insert.setFloat(4, 3.14f);
|
||||
pstmt_insert.setDouble(5, 3.1415);
|
||||
pstmt_insert.setShort(6, (short) 6);
|
||||
pstmt_insert.setByte(7, (byte) 7);
|
||||
pstmt_insert.setBoolean(8, true);
|
||||
pstmt_insert.setBytes(9, "abc".getBytes());
|
||||
pstmt_insert.setString(10, "涛思数据");
|
||||
pstmt_insert.executeUpdate();
|
||||
long start = ts - 1000 * 60 * 60;
|
||||
long end = ts + 1000 * 60 * 60;
|
||||
pstmt_select.setTimestamp(1, new Timestamp(start));
|
||||
pstmt_select.setTimestamp(2, new Timestamp(end));
|
||||
pstmt_select.setInt(3, 0);
|
||||
|
||||
// when
|
||||
ResultSet rs = pstmt_select.executeQuery();
|
||||
Assert.assertNotNull(rs);
|
||||
ResultSetMetaData meta = rs.getMetaData();
|
||||
while (rs.next()) {
|
||||
for (int i = 1; i <= meta.getColumnCount(); i++) {
|
||||
System.out.print(meta.getColumnLabel(i) + ": " + rs.getString(i) + "\t");
|
||||
}
|
||||
System.out.println();
|
||||
rs.next();
|
||||
|
||||
// then
|
||||
assertMetaData(meta);
|
||||
{
|
||||
Assert.assertNotNull(rs);
|
||||
Assert.assertEquals(ts, rs.getTimestamp(1).getTime());
|
||||
Assert.assertEquals(2, rs.getInt(2));
|
||||
Assert.assertEquals(2, rs.getInt("f1"));
|
||||
Assert.assertEquals(3l, rs.getLong(3));
|
||||
Assert.assertEquals(3l, rs.getLong("f2"));
|
||||
Assert.assertEquals(3.14f, rs.getFloat(4), 0.0);
|
||||
Assert.assertEquals(3.14f, rs.getFloat("f3"), 0.0);
|
||||
Assert.assertEquals(3.1415, rs.getDouble(5), 0.0);
|
||||
Assert.assertEquals(3.1415, rs.getDouble("f4"), 0.0);
|
||||
Assert.assertEquals((short) 6, rs.getShort(6));
|
||||
Assert.assertEquals((short) 6, rs.getShort("f5"));
|
||||
Assert.assertEquals((byte) 7, rs.getByte(7));
|
||||
Assert.assertEquals((byte) 7, rs.getByte("f6"));
|
||||
Assert.assertTrue(rs.getBoolean(8));
|
||||
Assert.assertTrue(rs.getBoolean("f7"));
|
||||
Assert.assertArrayEquals("abc".getBytes(), rs.getBytes(9));
|
||||
Assert.assertArrayEquals("abc".getBytes(), rs.getBytes("f8"));
|
||||
Assert.assertEquals("涛思数据", rs.getString(10));
|
||||
Assert.assertEquals("涛思数据", rs.getString("f9"));
|
||||
}
|
||||
}
|
||||
|
||||
private void assertMetaData(ResultSetMetaData meta) throws SQLException {
|
||||
Assert.assertEquals(10, meta.getColumnCount());
|
||||
Assert.assertEquals("ts", meta.getColumnLabel(1));
|
||||
Assert.assertEquals("f1", meta.getColumnLabel(2));
|
||||
Assert.assertEquals("f2", meta.getColumnLabel(3));
|
||||
Assert.assertEquals("f3", meta.getColumnLabel(4));
|
||||
Assert.assertEquals("f4", meta.getColumnLabel(5));
|
||||
Assert.assertEquals("f5", meta.getColumnLabel(6));
|
||||
Assert.assertEquals("f6", meta.getColumnLabel(7));
|
||||
Assert.assertEquals("f7", meta.getColumnLabel(8));
|
||||
Assert.assertEquals("f8", meta.getColumnLabel(9));
|
||||
Assert.assertEquals("f9", meta.getColumnLabel(10));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void setNullForTimestamp() throws SQLException {
|
||||
// given
|
||||
long ts = System.currentTimeMillis();
|
||||
|
||||
// when
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(ts));
|
||||
pstmt_insert.setNull(2, Types.INTEGER);
|
||||
int result = pstmt_insert.executeUpdate();
|
||||
|
||||
// then
|
||||
Assert.assertEquals(1, result);
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select * from t1");
|
||||
ResultSetMetaData meta = rs.getMetaData();
|
||||
assertMetaData(meta);
|
||||
rs.next();
|
||||
assertAllNullExceptTimestamp(rs, ts);
|
||||
}
|
||||
}
|
||||
|
||||
private void assertAllNullExceptTimestamp(ResultSet rs, long ts) throws SQLException {
|
||||
Assert.assertNotNull(rs);
|
||||
Assert.assertEquals(ts, rs.getTimestamp(1).getTime());
|
||||
Assert.assertEquals(0, rs.getInt(2));
|
||||
Assert.assertEquals(0, rs.getInt("f1"));
|
||||
Assert.assertEquals(0, rs.getLong(3));
|
||||
Assert.assertEquals(0, rs.getLong("f2"));
|
||||
Assert.assertEquals(0, rs.getFloat(4), 0.0);
|
||||
Assert.assertEquals(0, rs.getFloat("f3"), 0.0);
|
||||
Assert.assertEquals(0, rs.getDouble(5), 0.0);
|
||||
Assert.assertEquals(0, rs.getDouble("f4"), 0.0);
|
||||
Assert.assertEquals(0, rs.getShort(6));
|
||||
Assert.assertEquals(0, rs.getShort("f5"));
|
||||
Assert.assertEquals(0, rs.getByte(7));
|
||||
Assert.assertEquals(0, rs.getByte("f6"));
|
||||
Assert.assertFalse(rs.getBoolean(8));
|
||||
Assert.assertFalse(rs.getBoolean("f7"));
|
||||
Assert.assertNull(rs.getBytes(9));
|
||||
Assert.assertNull(rs.getBytes("f8"));
|
||||
Assert.assertNull(rs.getString(10));
|
||||
Assert.assertNull(rs.getString("f9"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void setNullForInteger() throws SQLException {
|
||||
// given
|
||||
long ts = System.currentTimeMillis();
|
||||
|
||||
// when
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(ts));
|
||||
pstmt_insert.setNull(3, Types.BIGINT);
|
||||
int result = pstmt_insert.executeUpdate();
|
||||
|
||||
// then
|
||||
Assert.assertEquals(1, result);
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select * from t1");
|
||||
ResultSetMetaData meta = rs.getMetaData();
|
||||
assertMetaData(meta);
|
||||
rs.next();
|
||||
assertAllNullExceptTimestamp(rs, ts);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void executeUpdate() throws SQLException {
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
|
||||
pstmt_insert.setFloat(4, 3.14f);
|
||||
public void setNullForFloat() throws SQLException {
|
||||
// given
|
||||
long ts = System.currentTimeMillis();
|
||||
|
||||
// when
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(ts));
|
||||
pstmt_insert.setNull(4, Types.FLOAT);
|
||||
int result = pstmt_insert.executeUpdate();
|
||||
|
||||
// then
|
||||
Assert.assertEquals(1, result);
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select * from t1");
|
||||
ResultSetMetaData meta = rs.getMetaData();
|
||||
assertMetaData(meta);
|
||||
rs.next();
|
||||
assertAllNullExceptTimestamp(rs, ts);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void setNull() throws SQLException {
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
|
||||
pstmt_insert.setNull(2, Types.INTEGER);
|
||||
int result = pstmt_insert.executeUpdate();
|
||||
Assert.assertEquals(1, result);
|
||||
public void setNullForDouble() throws SQLException {
|
||||
// given
|
||||
long ts = System.currentTimeMillis();
|
||||
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
|
||||
pstmt_insert.setNull(3, Types.BIGINT);
|
||||
result = pstmt_insert.executeUpdate();
|
||||
Assert.assertEquals(1, result);
|
||||
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
|
||||
pstmt_insert.setNull(4, Types.FLOAT);
|
||||
result = pstmt_insert.executeUpdate();
|
||||
Assert.assertEquals(1, result);
|
||||
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
|
||||
// when
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(ts));
|
||||
pstmt_insert.setNull(5, Types.DOUBLE);
|
||||
result = pstmt_insert.executeUpdate();
|
||||
Assert.assertEquals(1, result);
|
||||
int result = pstmt_insert.executeUpdate();
|
||||
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
|
||||
// then
|
||||
Assert.assertEquals(1, result);
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select * from t1");
|
||||
ResultSetMetaData meta = rs.getMetaData();
|
||||
assertMetaData(meta);
|
||||
rs.next();
|
||||
assertAllNullExceptTimestamp(rs, ts);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void setNullForSmallInt() throws SQLException {
|
||||
// given
|
||||
long ts = System.currentTimeMillis();
|
||||
|
||||
// when
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(ts));
|
||||
pstmt_insert.setNull(6, Types.SMALLINT);
|
||||
result = pstmt_insert.executeUpdate();
|
||||
Assert.assertEquals(1, result);
|
||||
int result = pstmt_insert.executeUpdate();
|
||||
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
|
||||
// then
|
||||
Assert.assertEquals(1, result);
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select * from t1");
|
||||
ResultSetMetaData meta = rs.getMetaData();
|
||||
assertMetaData(meta);
|
||||
rs.next();
|
||||
assertAllNullExceptTimestamp(rs, ts);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void setNullForTinyInt() throws SQLException {
|
||||
// given
|
||||
long ts = System.currentTimeMillis();
|
||||
|
||||
// when
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(ts));
|
||||
pstmt_insert.setNull(7, Types.TINYINT);
|
||||
result = pstmt_insert.executeUpdate();
|
||||
Assert.assertEquals(1, result);
|
||||
int result = pstmt_insert.executeUpdate();
|
||||
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
|
||||
// then
|
||||
Assert.assertEquals(1, result);
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select * from t1");
|
||||
ResultSetMetaData meta = rs.getMetaData();
|
||||
assertMetaData(meta);
|
||||
rs.next();
|
||||
assertAllNullExceptTimestamp(rs, ts);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void setNullForBoolean() throws SQLException {
|
||||
// given
|
||||
long ts = System.currentTimeMillis();
|
||||
|
||||
// when
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(ts));
|
||||
pstmt_insert.setNull(8, Types.BOOLEAN);
|
||||
result = pstmt_insert.executeUpdate();
|
||||
Assert.assertEquals(1, result);
|
||||
int result = pstmt_insert.executeUpdate();
|
||||
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
|
||||
// then
|
||||
Assert.assertEquals(1, result);
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select * from t1");
|
||||
ResultSetMetaData meta = rs.getMetaData();
|
||||
assertMetaData(meta);
|
||||
rs.next();
|
||||
assertAllNullExceptTimestamp(rs, ts);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void setNullForBinary() throws SQLException {
|
||||
// given
|
||||
long ts = System.currentTimeMillis();
|
||||
|
||||
// when
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(ts));
|
||||
pstmt_insert.setNull(9, Types.BINARY);
|
||||
result = pstmt_insert.executeUpdate();
|
||||
Assert.assertEquals(1, result);
|
||||
int result = pstmt_insert.executeUpdate();
|
||||
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
|
||||
// then
|
||||
Assert.assertEquals(1, result);
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select * from t1");
|
||||
ResultSetMetaData meta = rs.getMetaData();
|
||||
assertMetaData(meta);
|
||||
rs.next();
|
||||
assertAllNullExceptTimestamp(rs, ts);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void setNullForNchar() throws SQLException {
|
||||
// given
|
||||
long ts = System.currentTimeMillis();
|
||||
|
||||
// when
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(ts));
|
||||
pstmt_insert.setNull(10, Types.NCHAR);
|
||||
result = pstmt_insert.executeUpdate();
|
||||
Assert.assertEquals(1, result);
|
||||
int result = pstmt_insert.executeUpdate();
|
||||
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
|
||||
pstmt_insert.setNull(10, Types.OTHER);
|
||||
result = pstmt_insert.executeUpdate();
|
||||
// then
|
||||
Assert.assertEquals(1, result);
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select * from t1");
|
||||
ResultSetMetaData meta = rs.getMetaData();
|
||||
assertMetaData(meta);
|
||||
rs.next();
|
||||
assertAllNullExceptTimestamp(rs, ts);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -388,136 +587,358 @@ public class TSDBPreparedStatementTest {
|
|||
Assert.assertEquals(numOfRows, rows);
|
||||
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void createTwoSameDbTest() throws SQLException {
|
||||
Statement stmt = conn.createStatement();
|
||||
|
||||
stmt.execute("create database dbtest");
|
||||
Assert.assertThrows(SQLException.class, () -> stmt.execute("create database dbtest"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void setBoolean() throws SQLException {
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
|
||||
// given
|
||||
long ts = System.currentTimeMillis();
|
||||
|
||||
// when
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(ts));
|
||||
pstmt_insert.setBoolean(8, true);
|
||||
int ret = pstmt_insert.executeUpdate();
|
||||
Assert.assertEquals(1, ret);
|
||||
int result = pstmt_insert.executeUpdate();
|
||||
|
||||
// then
|
||||
Assert.assertEquals(1, result);
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select * from t1");
|
||||
ResultSetMetaData meta = rs.getMetaData();
|
||||
assertMetaData(meta);
|
||||
rs.next();
|
||||
{
|
||||
Assert.assertNotNull(rs);
|
||||
Assert.assertEquals(ts, rs.getTimestamp(1).getTime());
|
||||
Assert.assertEquals(ts, rs.getTimestamp("ts").getTime());
|
||||
Assert.assertTrue(rs.getBoolean(8));
|
||||
Assert.assertTrue(rs.getBoolean("f7"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void setByte() throws SQLException {
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
|
||||
// given
|
||||
long ts = System.currentTimeMillis();
|
||||
|
||||
// when
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(ts));
|
||||
pstmt_insert.setByte(7, (byte) 0x001);
|
||||
int ret = pstmt_insert.executeUpdate();
|
||||
Assert.assertEquals(1, ret);
|
||||
int result = pstmt_insert.executeUpdate();
|
||||
|
||||
// then
|
||||
Assert.assertEquals(1, result);
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select * from t1");
|
||||
ResultSetMetaData meta = rs.getMetaData();
|
||||
assertMetaData(meta);
|
||||
rs.next();
|
||||
{
|
||||
Assert.assertNotNull(rs);
|
||||
Assert.assertEquals(ts, rs.getTimestamp(1).getTime());
|
||||
Assert.assertEquals(ts, rs.getTimestamp("ts").getTime());
|
||||
Assert.assertEquals((byte) 0x001, rs.getByte(7));
|
||||
Assert.assertEquals((byte) 0x001, rs.getByte("f6"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void setShort() throws SQLException {
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
|
||||
// given
|
||||
long ts = System.currentTimeMillis();
|
||||
|
||||
// when
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(ts));
|
||||
pstmt_insert.setShort(6, (short) 2);
|
||||
int ret = pstmt_insert.executeUpdate();
|
||||
Assert.assertEquals(1, ret);
|
||||
int result = pstmt_insert.executeUpdate();
|
||||
|
||||
// then
|
||||
Assert.assertEquals(1, result);
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select * from t1");
|
||||
ResultSetMetaData meta = rs.getMetaData();
|
||||
assertMetaData(meta);
|
||||
rs.next();
|
||||
{
|
||||
Assert.assertNotNull(rs);
|
||||
Assert.assertEquals(ts, rs.getTimestamp(1).getTime());
|
||||
Assert.assertEquals(ts, rs.getTimestamp("ts").getTime());
|
||||
Assert.assertEquals((short) 2, rs.getByte(6));
|
||||
Assert.assertEquals((short) 2, rs.getByte("f5"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void setInt() throws SQLException {
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
|
||||
// given
|
||||
long ts = System.currentTimeMillis();
|
||||
|
||||
// when
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(ts));
|
||||
pstmt_insert.setInt(2, 10086);
|
||||
int ret = pstmt_insert.executeUpdate();
|
||||
Assert.assertEquals(1, ret);
|
||||
int result = pstmt_insert.executeUpdate();
|
||||
|
||||
// then
|
||||
Assert.assertEquals(1, result);
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select * from t1");
|
||||
ResultSetMetaData meta = rs.getMetaData();
|
||||
assertMetaData(meta);
|
||||
rs.next();
|
||||
{
|
||||
Assert.assertNotNull(rs);
|
||||
Assert.assertEquals(ts, rs.getTimestamp(1).getTime());
|
||||
Assert.assertEquals(ts, rs.getTimestamp("ts").getTime());
|
||||
Assert.assertEquals(10086, rs.getInt(2));
|
||||
Assert.assertEquals(10086, rs.getInt("f1"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void setLong() throws SQLException {
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
|
||||
// given
|
||||
long ts = System.currentTimeMillis();
|
||||
|
||||
// when
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(ts));
|
||||
pstmt_insert.setLong(3, Long.MAX_VALUE);
|
||||
int ret = pstmt_insert.executeUpdate();
|
||||
Assert.assertEquals(1, ret);
|
||||
int result = pstmt_insert.executeUpdate();
|
||||
|
||||
// then
|
||||
Assert.assertEquals(1, result);
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select * from t1");
|
||||
ResultSetMetaData meta = rs.getMetaData();
|
||||
assertMetaData(meta);
|
||||
rs.next();
|
||||
{
|
||||
Assert.assertNotNull(rs);
|
||||
Assert.assertEquals(ts, rs.getTimestamp(1).getTime());
|
||||
Assert.assertEquals(ts, rs.getTimestamp("ts").getTime());
|
||||
Assert.assertEquals(Long.MAX_VALUE, rs.getLong(3));
|
||||
Assert.assertEquals(Long.MAX_VALUE, rs.getLong("f2"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void setFloat() throws SQLException {
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
|
||||
// given
|
||||
long ts = System.currentTimeMillis();
|
||||
|
||||
// when
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(ts));
|
||||
pstmt_insert.setFloat(4, 3.14f);
|
||||
int ret = pstmt_insert.executeUpdate();
|
||||
Assert.assertEquals(1, ret);
|
||||
int result = pstmt_insert.executeUpdate();
|
||||
|
||||
// then
|
||||
Assert.assertEquals(1, result);
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select * from t1");
|
||||
ResultSetMetaData meta = rs.getMetaData();
|
||||
assertMetaData(meta);
|
||||
rs.next();
|
||||
{
|
||||
Assert.assertNotNull(rs);
|
||||
Assert.assertEquals(ts, rs.getTimestamp(1).getTime());
|
||||
Assert.assertEquals(ts, rs.getTimestamp("ts").getTime());
|
||||
Assert.assertEquals(3.14f, rs.getFloat(4), 0.0f);
|
||||
Assert.assertEquals(3.14f, rs.getFloat("f3"), 0.0f);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void setDouble() throws SQLException {
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
|
||||
// given
|
||||
long ts = System.currentTimeMillis();
|
||||
|
||||
// when
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(ts));
|
||||
pstmt_insert.setDouble(5, 3.14444);
|
||||
int ret = pstmt_insert.executeUpdate();
|
||||
Assert.assertEquals(1, ret);
|
||||
int result = pstmt_insert.executeUpdate();
|
||||
|
||||
// then
|
||||
Assert.assertEquals(1, result);
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select * from t1");
|
||||
ResultSetMetaData meta = rs.getMetaData();
|
||||
assertMetaData(meta);
|
||||
rs.next();
|
||||
{
|
||||
Assert.assertNotNull(rs);
|
||||
Assert.assertEquals(ts, rs.getTimestamp(1).getTime());
|
||||
Assert.assertEquals(ts, rs.getTimestamp("ts").getTime());
|
||||
Assert.assertEquals(3.14444, rs.getDouble(5), 0.0);
|
||||
Assert.assertEquals(3.14444, rs.getDouble("f4"), 0.0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test(expected = SQLFeatureNotSupportedException.class)
|
||||
@Test
|
||||
public void setBigDecimal() throws SQLException {
|
||||
pstmt_insert.setBigDecimal(1, null);
|
||||
// given
|
||||
long ts = System.currentTimeMillis();
|
||||
BigDecimal bigDecimal = new BigDecimal(3.14444);
|
||||
|
||||
// when
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(ts));
|
||||
pstmt_insert.setBigDecimal(5, bigDecimal);
|
||||
int result = pstmt_insert.executeUpdate();
|
||||
|
||||
// then
|
||||
Assert.assertEquals(1, result);
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select * from t1");
|
||||
ResultSetMetaData meta = rs.getMetaData();
|
||||
assertMetaData(meta);
|
||||
rs.next();
|
||||
{
|
||||
Assert.assertNotNull(rs);
|
||||
Assert.assertEquals(ts, rs.getTimestamp(1).getTime());
|
||||
Assert.assertEquals(ts, rs.getTimestamp("ts").getTime());
|
||||
Assert.assertEquals(3.14444, rs.getDouble(5), 0.0);
|
||||
Assert.assertEquals(3.14444, rs.getDouble("f4"), 0.0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void setString() throws SQLException {
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
|
||||
pstmt_insert.setString(10, "aaaa");
|
||||
boolean execute = pstmt_insert.execute();
|
||||
Assert.assertFalse(execute);
|
||||
// given
|
||||
long ts = System.currentTimeMillis();
|
||||
String f9 = "{\"name\": \"john\", \"age\": 10, \"address\": \"192.168.1.100\"}";
|
||||
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
|
||||
pstmt_insert.setString(10, new Person("john", 33, true).toString());
|
||||
Assert.assertFalse(pstmt_insert.execute());
|
||||
// when
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(ts));
|
||||
pstmt_insert.setString(10, f9);
|
||||
int result = pstmt_insert.executeUpdate();
|
||||
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
|
||||
pstmt_insert.setString(10, new Person("john", 33, true).toString().replaceAll("'", "\""));
|
||||
Assert.assertFalse(pstmt_insert.execute());
|
||||
}
|
||||
|
||||
class Person {
|
||||
String name;
|
||||
int age;
|
||||
boolean sex;
|
||||
|
||||
public Person(String name, int age, boolean sex) {
|
||||
this.name = name;
|
||||
this.age = age;
|
||||
this.sex = sex;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "Person{" +
|
||||
"name='" + name + '\'' +
|
||||
", age=" + age +
|
||||
", sex=" + sex +
|
||||
'}';
|
||||
// then
|
||||
Assert.assertEquals(1, result);
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select * from t1");
|
||||
ResultSetMetaData meta = rs.getMetaData();
|
||||
assertMetaData(meta);
|
||||
rs.next();
|
||||
{
|
||||
Assert.assertNotNull(rs);
|
||||
Assert.assertEquals(ts, rs.getTimestamp(1).getTime());
|
||||
Assert.assertEquals(ts, rs.getTimestamp("ts").getTime());
|
||||
Assert.assertEquals(f9, rs.getString(10));
|
||||
Assert.assertEquals(f9, rs.getString("f9"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void setBytes() throws SQLException, IOException {
|
||||
// given
|
||||
long ts = System.currentTimeMillis();
|
||||
byte[] f8 = "{\"name\": \"john\", \"age\": 10, \"address\": \"192.168.1.100\"}".getBytes();
|
||||
|
||||
// when
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
|
||||
pstmt_insert.setBytes(9, f8);
|
||||
int result = pstmt_insert.executeUpdate();
|
||||
|
||||
// ByteArrayOutputStream baos = new ByteArrayOutputStream();
|
||||
// ObjectOutputStream oos = new ObjectOutputStream(baos);
|
||||
// oos.writeObject(new Person("john", 33, true));
|
||||
// oos.flush();
|
||||
// byte[] bytes = baos.toByteArray();
|
||||
// pstmt_insert.setBytes(9, bytes);
|
||||
|
||||
pstmt_insert.setBytes(9, new Person("john", 33, true).toString().getBytes());
|
||||
int ret = pstmt_insert.executeUpdate();
|
||||
Assert.assertEquals(1, ret);
|
||||
// then
|
||||
Assert.assertEquals(1, result);
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select * from t1");
|
||||
ResultSetMetaData meta = rs.getMetaData();
|
||||
assertMetaData(meta);
|
||||
rs.next();
|
||||
{
|
||||
Assert.assertNotNull(rs);
|
||||
Assert.assertEquals(ts, rs.getTimestamp(1).getTime());
|
||||
Assert.assertEquals(ts, rs.getTimestamp("ts").getTime());
|
||||
Assert.assertArrayEquals(f8, rs.getBytes(9));
|
||||
Assert.assertArrayEquals(f8, rs.getBytes("f8"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test(expected = SQLFeatureNotSupportedException.class)
|
||||
@Test
|
||||
public void setDate() throws SQLException {
|
||||
pstmt_insert.setDate(1, new Date(System.currentTimeMillis()));
|
||||
// given
|
||||
long ts = new java.util.Date().getTime();
|
||||
|
||||
// when
|
||||
pstmt_insert.setDate(1, new Date(ts));
|
||||
int result = pstmt_insert.executeUpdate();
|
||||
|
||||
// then
|
||||
Assert.assertEquals(1, result);
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select * from t1");
|
||||
ResultSetMetaData meta = rs.getMetaData();
|
||||
assertMetaData(meta);
|
||||
rs.next();
|
||||
{
|
||||
Assert.assertNotNull(rs);
|
||||
Assert.assertEquals(ts, rs.getTimestamp(1).getTime());
|
||||
Assert.assertEquals(ts, rs.getTimestamp("ts").getTime());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test(expected = SQLFeatureNotSupportedException.class)
|
||||
@Test
|
||||
public void setTime() throws SQLException {
|
||||
pstmt_insert.setTime(1, new Time(System.currentTimeMillis()));
|
||||
// given
|
||||
long ts = System.currentTimeMillis();
|
||||
|
||||
// when
|
||||
pstmt_insert.setTime(1, new Time(ts));
|
||||
int result = pstmt_insert.executeUpdate();
|
||||
|
||||
// then
|
||||
Assert.assertEquals(1, result);
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select * from t1");
|
||||
ResultSetMetaData meta = rs.getMetaData();
|
||||
assertMetaData(meta);
|
||||
rs.next();
|
||||
{
|
||||
Assert.assertNotNull(rs);
|
||||
Assert.assertEquals(ts, rs.getTimestamp(1).getTime());
|
||||
Assert.assertEquals(ts, rs.getTimestamp("ts").getTime());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void setTimestamp() throws SQLException {
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
|
||||
int ret = pstmt_insert.executeUpdate();
|
||||
Assert.assertEquals(1, ret);
|
||||
// given
|
||||
long ts = System.currentTimeMillis();
|
||||
|
||||
// when
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(ts));
|
||||
int result = pstmt_insert.executeUpdate();
|
||||
|
||||
// then
|
||||
Assert.assertEquals(1, result);
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select * from t1");
|
||||
ResultSetMetaData meta = rs.getMetaData();
|
||||
assertMetaData(meta);
|
||||
rs.next();
|
||||
{
|
||||
Assert.assertNotNull(rs);
|
||||
Assert.assertEquals(ts, rs.getTimestamp(1).getTime());
|
||||
Assert.assertEquals(ts, rs.getTimestamp("ts").getTime());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test(expected = SQLFeatureNotSupportedException.class)
|
||||
|
@ -530,72 +951,6 @@ public class TSDBPreparedStatementTest {
|
|||
pstmt_insert.setBinaryStream(1, null);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void clearParameters() throws SQLException {
|
||||
pstmt_insert.clearParameters();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void setObject() throws SQLException {
|
||||
pstmt_insert.setObject(1, new Timestamp(System.currentTimeMillis()));
|
||||
int ret = pstmt_insert.executeUpdate();
|
||||
Assert.assertEquals(1, ret);
|
||||
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
|
||||
pstmt_insert.setObject(2, 111);
|
||||
ret = pstmt_insert.executeUpdate();
|
||||
Assert.assertEquals(1, ret);
|
||||
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
|
||||
pstmt_insert.setObject(3, Long.MAX_VALUE);
|
||||
ret = pstmt_insert.executeUpdate();
|
||||
Assert.assertEquals(1, ret);
|
||||
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
|
||||
pstmt_insert.setObject(4, 3.14159265354f);
|
||||
ret = pstmt_insert.executeUpdate();
|
||||
Assert.assertEquals(1, ret);
|
||||
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
|
||||
pstmt_insert.setObject(5, Double.MAX_VALUE);
|
||||
ret = pstmt_insert.executeUpdate();
|
||||
Assert.assertEquals(1, ret);
|
||||
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
|
||||
pstmt_insert.setObject(6, Short.MAX_VALUE);
|
||||
ret = pstmt_insert.executeUpdate();
|
||||
Assert.assertEquals(1, ret);
|
||||
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
|
||||
pstmt_insert.setObject(7, Byte.MAX_VALUE);
|
||||
ret = pstmt_insert.executeUpdate();
|
||||
Assert.assertEquals(1, ret);
|
||||
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
|
||||
pstmt_insert.setObject(8, true);
|
||||
ret = pstmt_insert.executeUpdate();
|
||||
Assert.assertEquals(1, ret);
|
||||
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
|
||||
pstmt_insert.setObject(9, "hello".getBytes());
|
||||
ret = pstmt_insert.executeUpdate();
|
||||
Assert.assertEquals(1, ret);
|
||||
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
|
||||
pstmt_insert.setObject(10, "Hello");
|
||||
ret = pstmt_insert.executeUpdate();
|
||||
Assert.assertEquals(1, ret);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void execute() throws SQLException {
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
|
||||
int ret = pstmt_insert.executeUpdate();
|
||||
Assert.assertEquals(1, ret);
|
||||
|
||||
executeQuery();
|
||||
}
|
||||
|
||||
@Test(expected = SQLFeatureNotSupportedException.class)
|
||||
public void setCharacterStream() throws SQLException {
|
||||
pstmt_insert.setCharacterStream(1, null);
|
||||
|
@ -621,9 +976,17 @@ public class TSDBPreparedStatementTest {
|
|||
pstmt_insert.setArray(1, null);
|
||||
}
|
||||
|
||||
@Test(expected = SQLFeatureNotSupportedException.class)
|
||||
@Test
|
||||
public void getMetaData() throws SQLException {
|
||||
pstmt_insert.getMetaData();
|
||||
// given
|
||||
long ts = System.currentTimeMillis();
|
||||
|
||||
// when
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(ts));
|
||||
ResultSetMetaData metaData = pstmt_insert.getMetaData();
|
||||
|
||||
// then
|
||||
Assert.assertNull(metaData);
|
||||
}
|
||||
|
||||
@Test(expected = SQLFeatureNotSupportedException.class)
|
||||
|
@ -633,9 +996,46 @@ public class TSDBPreparedStatementTest {
|
|||
|
||||
@Test
|
||||
public void getParameterMetaData() throws SQLException {
|
||||
// given
|
||||
long ts = System.currentTimeMillis();
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(ts));
|
||||
pstmt_insert.setInt(2, 2);
|
||||
pstmt_insert.setLong(3, 3l);
|
||||
pstmt_insert.setFloat(4, 3.14f);
|
||||
pstmt_insert.setDouble(5, 3.1415);
|
||||
pstmt_insert.setShort(6, (short) 6);
|
||||
pstmt_insert.setByte(7, (byte) 7);
|
||||
pstmt_insert.setBoolean(8, true);
|
||||
pstmt_insert.setBytes(9, "abc".getBytes());
|
||||
pstmt_insert.setString(10, "涛思数据");
|
||||
|
||||
// when
|
||||
ParameterMetaData parameterMetaData = pstmt_insert.getParameterMetaData();
|
||||
|
||||
// then
|
||||
Assert.assertNotNull(parameterMetaData);
|
||||
//TODO: modify the test case
|
||||
Assert.assertEquals(10, parameterMetaData.getParameterCount());
|
||||
Assert.assertEquals(Types.TIMESTAMP, parameterMetaData.getParameterType(1));
|
||||
Assert.assertEquals(Types.INTEGER, parameterMetaData.getParameterType(2));
|
||||
Assert.assertEquals(Types.BIGINT, parameterMetaData.getParameterType(3));
|
||||
Assert.assertEquals(Types.FLOAT, parameterMetaData.getParameterType(4));
|
||||
Assert.assertEquals(Types.DOUBLE, parameterMetaData.getParameterType(5));
|
||||
Assert.assertEquals(Types.SMALLINT, parameterMetaData.getParameterType(6));
|
||||
Assert.assertEquals(Types.TINYINT, parameterMetaData.getParameterType(7));
|
||||
Assert.assertEquals(Types.BOOLEAN, parameterMetaData.getParameterType(8));
|
||||
Assert.assertEquals(Types.BINARY, parameterMetaData.getParameterType(9));
|
||||
Assert.assertEquals(Types.NCHAR, parameterMetaData.getParameterType(10));
|
||||
|
||||
Assert.assertEquals("TIMESTAMP", parameterMetaData.getParameterTypeName(1));
|
||||
Assert.assertEquals("INT", parameterMetaData.getParameterTypeName(2));
|
||||
Assert.assertEquals("BIGINT", parameterMetaData.getParameterTypeName(3));
|
||||
Assert.assertEquals("FLOAT", parameterMetaData.getParameterTypeName(4));
|
||||
Assert.assertEquals("DOUBLE", parameterMetaData.getParameterTypeName(5));
|
||||
Assert.assertEquals("SMALLINT", parameterMetaData.getParameterTypeName(6));
|
||||
Assert.assertEquals("TINYINT", parameterMetaData.getParameterTypeName(7));
|
||||
Assert.assertEquals("BOOL", parameterMetaData.getParameterTypeName(8));
|
||||
Assert.assertEquals("BINARY", parameterMetaData.getParameterTypeName(9));
|
||||
Assert.assertEquals("NCHAR", parameterMetaData.getParameterTypeName(10));
|
||||
}
|
||||
|
||||
@Test(expected = SQLFeatureNotSupportedException.class)
|
||||
|
@ -643,9 +1043,9 @@ public class TSDBPreparedStatementTest {
|
|||
pstmt_insert.setRowId(1, null);
|
||||
}
|
||||
|
||||
@Test(expected = SQLFeatureNotSupportedException.class)
|
||||
@Test
|
||||
public void setNString() throws SQLException {
|
||||
pstmt_insert.setNString(1, null);
|
||||
setString();
|
||||
}
|
||||
|
||||
@Test(expected = SQLFeatureNotSupportedException.class)
|
||||
|
@ -663,22 +1063,45 @@ public class TSDBPreparedStatementTest {
|
|||
pstmt_insert.setSQLXML(1, null);
|
||||
}
|
||||
|
||||
@Before
|
||||
public void before() {
|
||||
try {
|
||||
Statement stmt = conn.createStatement();
|
||||
stmt.execute("drop table if exists weather");
|
||||
stmt.execute("create table if not exists weather(ts timestamp, f1 int, f2 bigint, f3 float, f4 double, f5 smallint, f6 tinyint, f7 bool, f8 binary(64), f9 nchar(64)) tags(loc nchar(64))");
|
||||
stmt.execute("create table if not exists t1 using weather tags('beijing')");
|
||||
stmt.close();
|
||||
|
||||
pstmt_insert = conn.prepareStatement(sql_insert);
|
||||
pstmt_select = conn.prepareStatement(sql_select);
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@After
|
||||
public void after() {
|
||||
try {
|
||||
if (pstmt_insert != null)
|
||||
pstmt_insert.close();
|
||||
if (pstmt_select != null)
|
||||
pstmt_select.close();
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@BeforeClass
|
||||
public static void beforeClass() {
|
||||
try {
|
||||
Class.forName("com.taosdata.jdbc.TSDBDriver");
|
||||
conn = DriverManager.getConnection("jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata");
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
stmt.execute("drop database if exists test_pstmt_jni");
|
||||
stmt.execute("create database if not exists test_pstmt_jni");
|
||||
stmt.execute("use test_pstmt_jni");
|
||||
stmt.execute("create table weather(ts timestamp, f1 int, f2 bigint, f3 float, f4 double, f5 smallint, f6 tinyint, f7 bool, f8 binary(64), f9 nchar(64)) tags(loc nchar(64))");
|
||||
stmt.execute("create table t1 using weather tags('beijing')");
|
||||
}
|
||||
pstmt_insert = conn.prepareStatement(sql_insert);
|
||||
pstmt_select = conn.prepareStatement(sql_select);
|
||||
} catch (ClassNotFoundException | SQLException e) {
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
@ -686,10 +1109,6 @@ public class TSDBPreparedStatementTest {
|
|||
@AfterClass
|
||||
public static void afterClass() {
|
||||
try {
|
||||
if (pstmt_insert != null)
|
||||
pstmt_insert.close();
|
||||
if (pstmt_select != null)
|
||||
pstmt_select.close();
|
||||
if (conn != null)
|
||||
conn.close();
|
||||
} catch (SQLException e) {
|
||||
|
|
|
@ -14,6 +14,7 @@ import java.math.BigDecimal;
|
|||
import java.sql.*;
|
||||
import java.text.ParseException;
|
||||
import java.text.SimpleDateFormat;
|
||||
import java.util.Arrays;
|
||||
|
||||
public class TSDBResultSetTest {
|
||||
|
||||
|
@ -133,7 +134,7 @@ public class TSDBResultSetTest {
|
|||
Assert.assertEquals(3.1415926, Double.valueOf(new String(f5)), 0.000000f);
|
||||
|
||||
byte[] f6 = rs.getBytes("f6");
|
||||
Assert.assertEquals("abc", new String(f6));
|
||||
Assert.assertTrue(Arrays.equals("abc".getBytes(), f6));
|
||||
|
||||
byte[] f7 = rs.getBytes("f7");
|
||||
Assert.assertEquals((short) 10, Shorts.fromByteArray(f7));
|
||||
|
@ -646,7 +647,6 @@ public class TSDBResultSetTest {
|
|||
@BeforeClass
|
||||
public static void beforeClass() {
|
||||
try {
|
||||
Class.forName("com.taosdata.jdbc.TSDBDriver");
|
||||
conn = DriverManager.getConnection("jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata");
|
||||
stmt = conn.createStatement();
|
||||
stmt.execute("create database if not exists restful_test");
|
||||
|
@ -656,10 +656,9 @@ public class TSDBResultSetTest {
|
|||
stmt.execute("insert into restful_test.weather values('2021-01-01 00:00:00.000', 1, 100, 3.1415, 3.1415926, 'abc', 10, 10, true, '涛思数据')");
|
||||
rs = stmt.executeQuery("select * from restful_test.weather");
|
||||
rs.next();
|
||||
} catch (ClassNotFoundException | SQLException e) {
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
|
|
|
@ -387,15 +387,12 @@ public class TSDBStatementTest {
|
|||
@BeforeClass
|
||||
public static void beforeClass() {
|
||||
try {
|
||||
Class.forName("com.taosdata.jdbc.TSDBDriver");
|
||||
Properties properties = new Properties();
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
|
||||
conn = DriverManager.getConnection("jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata", properties);
|
||||
stmt = conn.createStatement();
|
||||
} catch (ClassNotFoundException e) {
|
||||
e.printStackTrace();
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
|
|
|
@ -10,6 +10,7 @@ import java.sql.*;
|
|||
import java.util.Properties;
|
||||
|
||||
public class RestfulDatabaseMetaDataTest {
|
||||
|
||||
private static final String host = "127.0.0.1";
|
||||
private static final String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata";
|
||||
private static Connection connection;
|
||||
|
@ -632,17 +633,32 @@ public class RestfulDatabaseMetaDataTest {
|
|||
|
||||
@Test
|
||||
public void getTables() throws SQLException {
|
||||
System.out.println("****************************************************");
|
||||
ResultSet tables = metaData.getTables("log", "", null, null);
|
||||
ResultSetMetaData metaData = tables.getMetaData();
|
||||
while (tables.next()) {
|
||||
System.out.print(metaData.getColumnLabel(1) + ":" + tables.getString(1) + "\t");
|
||||
System.out.print(metaData.getColumnLabel(3) + ":" + tables.getString(3) + "\t");
|
||||
System.out.print(metaData.getColumnLabel(4) + ":" + tables.getString(4) + "\t");
|
||||
System.out.print(metaData.getColumnLabel(5) + ":" + tables.getString(5) + "\n");
|
||||
ResultSet rs = metaData.getTables("log", "", null, null);
|
||||
ResultSetMetaData meta = rs.getMetaData();
|
||||
Assert.assertNotNull(rs);
|
||||
rs.next();
|
||||
{
|
||||
// TABLE_CAT
|
||||
Assert.assertEquals("TABLE_CAT", meta.getColumnLabel(1));
|
||||
Assert.assertEquals("log", rs.getString(1));
|
||||
Assert.assertEquals("log", rs.getString("TABLE_CAT"));
|
||||
// TABLE_SCHEM
|
||||
Assert.assertEquals("TABLE_SCHEM", meta.getColumnLabel(2));
|
||||
Assert.assertEquals(null, rs.getString(2));
|
||||
Assert.assertEquals(null, rs.getString("TABLE_SCHEM"));
|
||||
// TABLE_NAME
|
||||
Assert.assertEquals("TABLE_NAME", meta.getColumnLabel(3));
|
||||
Assert.assertNotNull(rs.getString(3));
|
||||
Assert.assertNotNull(rs.getString("TABLE_NAME"));
|
||||
// TABLE_TYPE
|
||||
Assert.assertEquals("TABLE_TYPE", meta.getColumnLabel(4));
|
||||
Assert.assertEquals("TABLE", rs.getString(4));
|
||||
Assert.assertEquals("TABLE", rs.getString("TABLE_TYPE"));
|
||||
// REMARKS
|
||||
Assert.assertEquals("REMARKS", meta.getColumnLabel(5));
|
||||
Assert.assertEquals("", rs.getString(5));
|
||||
Assert.assertEquals("", rs.getString("REMARKS"));
|
||||
}
|
||||
System.out.println();
|
||||
Assert.assertNotNull(tables);
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -652,46 +668,130 @@ public class RestfulDatabaseMetaDataTest {
|
|||
|
||||
@Test
|
||||
public void getCatalogs() throws SQLException {
|
||||
System.out.println("****************************************************");
|
||||
|
||||
ResultSet catalogs = metaData.getCatalogs();
|
||||
ResultSetMetaData meta = catalogs.getMetaData();
|
||||
while (catalogs.next()) {
|
||||
for (int i = 1; i <= meta.getColumnCount(); i++) {
|
||||
System.out.print(meta.getColumnLabel(i) + ": " + catalogs.getString(i));
|
||||
}
|
||||
System.out.println();
|
||||
ResultSet rs = metaData.getCatalogs();
|
||||
ResultSetMetaData meta = rs.getMetaData();
|
||||
rs.next();
|
||||
{
|
||||
// TABLE_CAT
|
||||
Assert.assertEquals("TABLE_CAT", meta.getColumnLabel(1));
|
||||
Assert.assertNotNull(rs.getString(1));
|
||||
Assert.assertNotNull(rs.getString("TABLE_CAT"));
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getTableTypes() throws SQLException {
|
||||
System.out.println("****************************************************");
|
||||
|
||||
ResultSet tableTypes = metaData.getTableTypes();
|
||||
while (tableTypes.next()) {
|
||||
System.out.println(tableTypes.getString("TABLE_TYPE"));
|
||||
tableTypes.next();
|
||||
// tableTypes: table
|
||||
{
|
||||
Assert.assertEquals("TABLE", tableTypes.getString(1));
|
||||
Assert.assertEquals("TABLE", tableTypes.getString("TABLE_TYPE"));
|
||||
}
|
||||
tableTypes.next();
|
||||
// tableTypes: stable
|
||||
{
|
||||
Assert.assertEquals("STABLE", tableTypes.getString(1));
|
||||
Assert.assertEquals("STABLE", tableTypes.getString("TABLE_TYPE"));
|
||||
}
|
||||
Assert.assertNotNull(metaData.getTableTypes());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getColumns() throws SQLException {
|
||||
System.out.println("****************************************************");
|
||||
|
||||
// when
|
||||
ResultSet columns = metaData.getColumns("log", "", "dn", "");
|
||||
// then
|
||||
ResultSetMetaData meta = columns.getMetaData();
|
||||
while (columns.next()) {
|
||||
System.out.print(meta.getColumnLabel(1) + ": " + columns.getString(1) + "\t");
|
||||
System.out.print(meta.getColumnLabel(3) + ": " + columns.getString(3) + "\t");
|
||||
System.out.print(meta.getColumnLabel(4) + ": " + columns.getString(4) + "\t");
|
||||
System.out.print(meta.getColumnLabel(5) + ": " + columns.getString(5) + "\t");
|
||||
System.out.print(meta.getColumnLabel(6) + ": " + columns.getString(6) + "\t");
|
||||
System.out.print(meta.getColumnLabel(7) + ": " + columns.getString(7) + "\t");
|
||||
System.out.print(meta.getColumnLabel(9) + ": " + columns.getString(9) + "\t");
|
||||
System.out.print(meta.getColumnLabel(10) + ": " + columns.getString(10) + "\t");
|
||||
System.out.print(meta.getColumnLabel(11) + ": " + columns.getString(11) + "\n");
|
||||
System.out.print(meta.getColumnLabel(12) + ": " + columns.getString(12) + "\n");
|
||||
columns.next();
|
||||
// column: 1
|
||||
{
|
||||
// TABLE_CAT
|
||||
Assert.assertEquals("TABLE_CAT", meta.getColumnLabel(1));
|
||||
Assert.assertEquals("log", columns.getString(1));
|
||||
Assert.assertEquals("log", columns.getString("TABLE_CAT"));
|
||||
// TABLE_NAME
|
||||
Assert.assertEquals("TABLE_NAME", meta.getColumnLabel(3));
|
||||
Assert.assertEquals("dn", columns.getString(3));
|
||||
Assert.assertEquals("dn", columns.getString("TABLE_NAME"));
|
||||
// COLUMN_NAME
|
||||
Assert.assertEquals("COLUMN_NAME", meta.getColumnLabel(4));
|
||||
Assert.assertEquals("ts", columns.getString(4));
|
||||
Assert.assertEquals("ts", columns.getString("COLUMN_NAME"));
|
||||
// DATA_TYPE
|
||||
Assert.assertEquals("DATA_TYPE", meta.getColumnLabel(5));
|
||||
Assert.assertEquals(Types.TIMESTAMP, columns.getInt(5));
|
||||
Assert.assertEquals(Types.TIMESTAMP, columns.getInt("DATA_TYPE"));
|
||||
// TYPE_NAME
|
||||
Assert.assertEquals("TYPE_NAME", meta.getColumnLabel(6));
|
||||
Assert.assertEquals("TIMESTAMP", columns.getString(6));
|
||||
Assert.assertEquals("TIMESTAMP", columns.getString("TYPE_NAME"));
|
||||
// COLUMN_SIZE
|
||||
Assert.assertEquals("COLUMN_SIZE", meta.getColumnLabel(7));
|
||||
Assert.assertEquals(26, columns.getInt(7));
|
||||
Assert.assertEquals(26, columns.getInt("COLUMN_SIZE"));
|
||||
// DECIMAL_DIGITS
|
||||
Assert.assertEquals("DECIMAL_DIGITS", meta.getColumnLabel(9));
|
||||
Assert.assertEquals(Integer.MIN_VALUE, columns.getInt(9));
|
||||
Assert.assertEquals(Integer.MIN_VALUE, columns.getInt("DECIMAL_DIGITS"));
|
||||
Assert.assertEquals(null, columns.getString(9));
|
||||
Assert.assertEquals(null, columns.getString("DECIMAL_DIGITS"));
|
||||
// NUM_PREC_RADIX
|
||||
Assert.assertEquals("NUM_PREC_RADIX", meta.getColumnLabel(10));
|
||||
Assert.assertEquals(10, columns.getInt(10));
|
||||
Assert.assertEquals(10, columns.getInt("NUM_PREC_RADIX"));
|
||||
// NULLABLE
|
||||
Assert.assertEquals("NULLABLE", meta.getColumnLabel(11));
|
||||
Assert.assertEquals(DatabaseMetaData.columnNoNulls, columns.getInt(11));
|
||||
Assert.assertEquals(DatabaseMetaData.columnNoNulls, columns.getInt("NULLABLE"));
|
||||
// REMARKS
|
||||
Assert.assertEquals("REMARKS", meta.getColumnLabel(12));
|
||||
Assert.assertEquals(null, columns.getString(12));
|
||||
Assert.assertEquals(null, columns.getString("REMARKS"));
|
||||
}
|
||||
columns.next();
|
||||
// column: 2
|
||||
{
|
||||
// TABLE_CAT
|
||||
Assert.assertEquals("TABLE_CAT", meta.getColumnLabel(1));
|
||||
Assert.assertEquals("log", columns.getString(1));
|
||||
Assert.assertEquals("log", columns.getString("TABLE_CAT"));
|
||||
// TABLE_NAME
|
||||
Assert.assertEquals("TABLE_NAME", meta.getColumnLabel(3));
|
||||
Assert.assertEquals("dn", columns.getString(3));
|
||||
Assert.assertEquals("dn", columns.getString("TABLE_NAME"));
|
||||
// COLUMN_NAME
|
||||
Assert.assertEquals("COLUMN_NAME", meta.getColumnLabel(4));
|
||||
Assert.assertEquals("cpu_taosd", columns.getString(4));
|
||||
Assert.assertEquals("cpu_taosd", columns.getString("COLUMN_NAME"));
|
||||
// DATA_TYPE
|
||||
Assert.assertEquals("DATA_TYPE", meta.getColumnLabel(5));
|
||||
Assert.assertEquals(Types.FLOAT, columns.getInt(5));
|
||||
Assert.assertEquals(Types.FLOAT, columns.getInt("DATA_TYPE"));
|
||||
// TYPE_NAME
|
||||
Assert.assertEquals("TYPE_NAME", meta.getColumnLabel(6));
|
||||
Assert.assertEquals("FLOAT", columns.getString(6));
|
||||
Assert.assertEquals("FLOAT", columns.getString("TYPE_NAME"));
|
||||
// COLUMN_SIZE
|
||||
Assert.assertEquals("COLUMN_SIZE", meta.getColumnLabel(7));
|
||||
Assert.assertEquals(12, columns.getInt(7));
|
||||
Assert.assertEquals(12, columns.getInt("COLUMN_SIZE"));
|
||||
// DECIMAL_DIGITS
|
||||
Assert.assertEquals("DECIMAL_DIGITS", meta.getColumnLabel(9));
|
||||
Assert.assertEquals(Integer.MIN_VALUE, columns.getInt(9));
|
||||
Assert.assertEquals(Integer.MIN_VALUE, columns.getInt("DECIMAL_DIGITS"));
|
||||
Assert.assertEquals(null, columns.getString(9));
|
||||
Assert.assertEquals(null, columns.getString("DECIMAL_DIGITS"));
|
||||
// NUM_PREC_RADIX
|
||||
Assert.assertEquals("NUM_PREC_RADIX", meta.getColumnLabel(10));
|
||||
Assert.assertEquals(10, columns.getInt(10));
|
||||
Assert.assertEquals(10, columns.getInt("NUM_PREC_RADIX"));
|
||||
// NULLABLE
|
||||
Assert.assertEquals("NULLABLE", meta.getColumnLabel(11));
|
||||
Assert.assertEquals(DatabaseMetaData.columnNullable, columns.getInt(11));
|
||||
Assert.assertEquals(DatabaseMetaData.columnNullable, columns.getInt("NULLABLE"));
|
||||
// REMARKS
|
||||
Assert.assertEquals("REMARKS", meta.getColumnLabel(12));
|
||||
Assert.assertEquals(null, columns.getString(12));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -717,17 +817,35 @@ public class RestfulDatabaseMetaDataTest {
|
|||
|
||||
@Test
|
||||
public void getPrimaryKeys() throws SQLException {
|
||||
System.out.println("****************************************************");
|
||||
|
||||
ResultSet rs = metaData.getPrimaryKeys("log", "", "dn1");
|
||||
while (rs.next()) {
|
||||
System.out.println("TABLE_NAME: " + rs.getString("TABLE_NAME"));
|
||||
System.out.println("COLUMN_NAME: " + rs.getString("COLUMN_NAME"));
|
||||
System.out.println("KEY_SEQ: " + rs.getString("KEY_SEQ"));
|
||||
System.out.println("PK_NAME: " + rs.getString("PK_NAME"));
|
||||
ResultSetMetaData meta = rs.getMetaData();
|
||||
rs.next();
|
||||
{
|
||||
// TABLE_CAT
|
||||
Assert.assertEquals("TABLE_CAT", meta.getColumnLabel(1));
|
||||
Assert.assertEquals("log", rs.getString(1));
|
||||
Assert.assertEquals("log", rs.getString("TABLE_CAT"));
|
||||
// TABLE_SCHEM
|
||||
Assert.assertEquals("TABLE_SCHEM", meta.getColumnLabel(2));
|
||||
Assert.assertEquals(null, rs.getString(2));
|
||||
Assert.assertEquals(null, rs.getString("TABLE_SCHEM"));
|
||||
// TABLE_NAME
|
||||
Assert.assertEquals("TABLE_NAME", meta.getColumnLabel(3));
|
||||
Assert.assertEquals("dn1", rs.getString(3));
|
||||
Assert.assertEquals("dn1", rs.getString("TABLE_NAME"));
|
||||
// COLUMN_NAME
|
||||
Assert.assertEquals("COLUMN_NAME", meta.getColumnLabel(4));
|
||||
Assert.assertEquals("ts", rs.getString(4));
|
||||
Assert.assertEquals("ts", rs.getString("COLUMN_NAME"));
|
||||
// KEY_SEQ
|
||||
Assert.assertEquals("KEY_SEQ", meta.getColumnLabel(5));
|
||||
Assert.assertEquals(1, rs.getShort(5));
|
||||
Assert.assertEquals(1, rs.getShort("KEY_SEQ"));
|
||||
// DATA_TYPE
|
||||
Assert.assertEquals("PK_NAME", meta.getColumnLabel(6));
|
||||
Assert.assertEquals("ts", rs.getString(6));
|
||||
Assert.assertEquals("ts", rs.getString("PK_NAME"));
|
||||
}
|
||||
|
||||
Assert.assertNotNull(rs);
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -852,14 +970,27 @@ public class RestfulDatabaseMetaDataTest {
|
|||
|
||||
@Test
|
||||
public void getSuperTables() throws SQLException {
|
||||
System.out.println("****************************************************");
|
||||
|
||||
ResultSet rs = metaData.getSuperTables("log", "", "dn1");
|
||||
while (rs.next()) {
|
||||
System.out.println("TABLE_NAME: " + rs.getString("TABLE_NAME"));
|
||||
System.out.println("SUPERTABLE_NAME: " + rs.getString("SUPERTABLE_NAME"));
|
||||
ResultSetMetaData meta = rs.getMetaData();
|
||||
rs.next();
|
||||
{
|
||||
// TABLE_CAT
|
||||
Assert.assertEquals("TABLE_CAT", meta.getColumnLabel(1));
|
||||
Assert.assertEquals("log", rs.getString(1));
|
||||
Assert.assertEquals("log", rs.getString("TABLE_CAT"));
|
||||
// TABLE_CAT
|
||||
Assert.assertEquals("TABLE_SCHEM", meta.getColumnLabel(2));
|
||||
Assert.assertEquals(null, rs.getString(2));
|
||||
Assert.assertEquals(null, rs.getString("TABLE_SCHEM"));
|
||||
// TABLE_CAT
|
||||
Assert.assertEquals("TABLE_NAME", meta.getColumnLabel(3));
|
||||
Assert.assertEquals("dn1", rs.getString(3));
|
||||
Assert.assertEquals("dn1", rs.getString("TABLE_NAME"));
|
||||
// TABLE_CAT
|
||||
Assert.assertEquals("SUPERTABLE_NAME", meta.getColumnLabel(4));
|
||||
Assert.assertEquals("dn", rs.getString(4));
|
||||
Assert.assertEquals("dn", rs.getString("SUPERTABLE_NAME"));
|
||||
}
|
||||
Assert.assertNotNull(rs);
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
|
@ -54,16 +54,17 @@ public class RestfulParameterMetaDataTest {
|
|||
|
||||
@Test
|
||||
public void getPrecision() throws SQLException {
|
||||
Assert.assertEquals(0, parameterMetaData_insert.getPrecision(1));
|
||||
Assert.assertEquals(0, parameterMetaData_insert.getPrecision(2));
|
||||
Assert.assertEquals(0, parameterMetaData_insert.getPrecision(3));
|
||||
Assert.assertEquals(0, parameterMetaData_insert.getPrecision(4));
|
||||
Assert.assertEquals(0, parameterMetaData_insert.getPrecision(5));
|
||||
Assert.assertEquals(0, parameterMetaData_insert.getPrecision(6));
|
||||
Assert.assertEquals(0, parameterMetaData_insert.getPrecision(7));
|
||||
Assert.assertEquals(0, parameterMetaData_insert.getPrecision(8));
|
||||
Assert.assertEquals(5, parameterMetaData_insert.getPrecision(9));
|
||||
Assert.assertEquals(5, parameterMetaData_insert.getPrecision(10));
|
||||
//create table weather(ts timestamp, f1 int, f2 bigint, f3 float, f4 double, f5 smallint, f6 tinyint, f7 bool, f8 binary(64), f9 nchar(64))
|
||||
Assert.assertEquals(TSDBConstants.TIMESTAMP_MS_PRECISION, parameterMetaData_insert.getPrecision(1));
|
||||
Assert.assertEquals(TSDBConstants.INT_PRECISION, parameterMetaData_insert.getPrecision(2));
|
||||
Assert.assertEquals(TSDBConstants.BIGINT_PRECISION, parameterMetaData_insert.getPrecision(3));
|
||||
Assert.assertEquals(TSDBConstants.FLOAT_PRECISION, parameterMetaData_insert.getPrecision(4));
|
||||
Assert.assertEquals(TSDBConstants.DOUBLE_PRECISION, parameterMetaData_insert.getPrecision(5));
|
||||
Assert.assertEquals(TSDBConstants.SMALLINT_PRECISION, parameterMetaData_insert.getPrecision(6));
|
||||
Assert.assertEquals(TSDBConstants.TINYINT_PRECISION, parameterMetaData_insert.getPrecision(7));
|
||||
Assert.assertEquals(TSDBConstants.BOOLEAN_PRECISION, parameterMetaData_insert.getPrecision(8));
|
||||
Assert.assertEquals("hello".getBytes().length, parameterMetaData_insert.getPrecision(9));
|
||||
Assert.assertEquals("涛思数据".length(), parameterMetaData_insert.getPrecision(10));
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -71,8 +72,8 @@ public class RestfulParameterMetaDataTest {
|
|||
Assert.assertEquals(0, parameterMetaData_insert.getScale(1));
|
||||
Assert.assertEquals(0, parameterMetaData_insert.getScale(2));
|
||||
Assert.assertEquals(0, parameterMetaData_insert.getScale(3));
|
||||
Assert.assertEquals(0, parameterMetaData_insert.getScale(4));
|
||||
Assert.assertEquals(0, parameterMetaData_insert.getScale(5));
|
||||
Assert.assertEquals(31, parameterMetaData_insert.getScale(4));
|
||||
Assert.assertEquals(31, parameterMetaData_insert.getScale(5));
|
||||
Assert.assertEquals(0, parameterMetaData_insert.getScale(6));
|
||||
Assert.assertEquals(0, parameterMetaData_insert.getScale(7));
|
||||
Assert.assertEquals(0, parameterMetaData_insert.getScale(8));
|
||||
|
@ -164,7 +165,7 @@ public class RestfulParameterMetaDataTest {
|
|||
pstmt_insert.setObject(7, Byte.MAX_VALUE);
|
||||
pstmt_insert.setObject(8, true);
|
||||
pstmt_insert.setObject(9, "hello".getBytes());
|
||||
pstmt_insert.setObject(10, "Hello");
|
||||
pstmt_insert.setObject(10, "涛思数据");
|
||||
parameterMetaData_insert = pstmt_insert.getParameterMetaData();
|
||||
|
||||
pstmt_select = conn.prepareStatement(sql_select);
|
||||
|
|
|
@ -242,7 +242,7 @@ def _load_taos_linux():
|
|||
|
||||
|
||||
def _load_taos_darwin():
|
||||
return ctypes.cDLL('libtaos.dylib')
|
||||
return ctypes.CDLL('libtaos.dylib')
|
||||
|
||||
|
||||
def _load_taos_windows():
|
||||
|
|
|
@ -38,21 +38,6 @@
|
|||
#define cDebug(...) { if (cqDebugFlag & DEBUG_DEBUG) { taosPrintLog("CQ ", cqDebugFlag, __VA_ARGS__); }}
|
||||
#define cTrace(...) { if (cqDebugFlag & DEBUG_TRACE) { taosPrintLog("CQ ", cqDebugFlag, __VA_ARGS__); }}
|
||||
|
||||
typedef struct {
|
||||
int32_t vgId;
|
||||
int32_t master;
|
||||
int32_t num; // number of continuous streams
|
||||
char user[TSDB_USER_LEN];
|
||||
char pass[TSDB_KEY_LEN];
|
||||
char db[TSDB_DB_NAME_LEN];
|
||||
FCqWrite cqWrite;
|
||||
struct SCqObj *pHead;
|
||||
void *dbConn;
|
||||
void *tmrCtrl;
|
||||
pthread_mutex_t mutex;
|
||||
int32_t delete;
|
||||
int32_t cqObjNum;
|
||||
} SCqContext;
|
||||
|
||||
typedef struct SCqObj {
|
||||
tmr_h tmrId;
|
||||
|
@ -439,7 +424,7 @@ static void cqProcessCreateTimer(void *param, void *tmrId) {
|
|||
|
||||
// inner implement in tscStream.c
|
||||
TAOS_STREAM *taos_open_stream_withname(TAOS *taos, const char* desName, const char *sqlstr, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row),
|
||||
int64_t stime, void *param, void (*callback)(void *));
|
||||
int64_t stime, void *param, void (*callback)(void *), void* cqhandle);
|
||||
|
||||
static void cqCreateStream(SCqContext *pContext, SCqObj *pObj) {
|
||||
pObj->pContext = pContext;
|
||||
|
@ -453,7 +438,8 @@ static void cqCreateStream(SCqContext *pContext, SCqObj *pObj) {
|
|||
pObj->tmrId = 0;
|
||||
|
||||
if (pObj->pStream == NULL) {
|
||||
pObj->pStream = taos_open_stream_withname(pContext->dbConn, pObj->dstTable, pObj->sqlStr, cqProcessStreamRes, INT64_MIN, (void *)pObj->rid, NULL);
|
||||
pObj->pStream = taos_open_stream_withname(pContext->dbConn, pObj->dstTable, pObj->sqlStr, cqProcessStreamRes, \
|
||||
INT64_MIN, (void *)pObj->rid, NULL, pContext);
|
||||
|
||||
// TODO the pObj->pStream may be released if error happens
|
||||
if (pObj->pStream) {
|
||||
|
|
|
@ -156,7 +156,6 @@ static SCreateVnodeMsg* dnodeParseVnodeMsg(SRpcMsg *rpcMsg) {
|
|||
|
||||
static int32_t dnodeProcessCreateVnodeMsg(SRpcMsg *rpcMsg) {
|
||||
SCreateVnodeMsg *pCreate = dnodeParseVnodeMsg(rpcMsg);
|
||||
|
||||
void *pVnode = vnodeAcquire(pCreate->cfg.vgId);
|
||||
if (pVnode != NULL) {
|
||||
dDebug("vgId:%d, already exist, return success", pCreate->cfg.vgId);
|
||||
|
|
|
@ -124,6 +124,7 @@ int taos_stmt_add_batch(TAOS_STMT *stmt);
|
|||
int taos_stmt_execute(TAOS_STMT *stmt);
|
||||
TAOS_RES * taos_stmt_use_result(TAOS_STMT *stmt);
|
||||
int taos_stmt_close(TAOS_STMT *stmt);
|
||||
char * taos_stmt_errstr(TAOS_STMT *stmt);
|
||||
|
||||
DLL_EXPORT TAOS_RES *taos_query(TAOS *taos, const char *sql);
|
||||
DLL_EXPORT TAOS_ROW taos_fetch_row(TAOS_RES *res);
|
||||
|
|
|
@ -244,6 +244,7 @@ int32_t* taosGetErrno();
|
|||
#define TSDB_CODE_TDB_NO_AVAIL_DISK TAOS_DEF_ERROR_CODE(0, 0x0613) //"No available disk")
|
||||
#define TSDB_CODE_TDB_MESSED_MSG TAOS_DEF_ERROR_CODE(0, 0x0614) //"TSDB messed message")
|
||||
#define TSDB_CODE_TDB_IVLD_TAG_VAL TAOS_DEF_ERROR_CODE(0, 0x0615) //"TSDB invalid tag value")
|
||||
#define TSDB_CODE_TDB_NO_CACHE_LAST_ROW TAOS_DEF_ERROR_CODE(0, 0x0616) //"TSDB no cache last row data")
|
||||
|
||||
// query
|
||||
#define TSDB_CODE_QRY_INVALID_QHANDLE TAOS_DEF_ERROR_CODE(0, 0x0700) //"Invalid handle")
|
||||
|
|
|
@ -551,7 +551,7 @@ typedef struct {
|
|||
int32_t totalBlocks;
|
||||
int32_t maxTables;
|
||||
int32_t daysPerFile;
|
||||
int32_t daysToKeep;
|
||||
int32_t daysToKeep0;
|
||||
int32_t daysToKeep1;
|
||||
int32_t daysToKeep2;
|
||||
int32_t minRowsPerFileBlock;
|
||||
|
|
|
@ -31,6 +31,23 @@ typedef struct {
|
|||
FCqWrite cqWrite;
|
||||
} SCqCfg;
|
||||
|
||||
// SCqContext
|
||||
typedef struct {
|
||||
int32_t vgId;
|
||||
int32_t master;
|
||||
int32_t num; // number of continuous streams
|
||||
char user[TSDB_USER_LEN];
|
||||
char pass[TSDB_KEY_LEN];
|
||||
char db[TSDB_DB_NAME_LEN];
|
||||
FCqWrite cqWrite;
|
||||
struct SCqObj *pHead;
|
||||
void *dbConn;
|
||||
void *tmrCtrl;
|
||||
pthread_mutex_t mutex;
|
||||
int32_t delete;
|
||||
int32_t cqObjNum;
|
||||
} SCqContext;
|
||||
|
||||
// the following API shall be called by vnode
|
||||
void *cqOpen(void *ahandle, const SCqCfg *pCfg);
|
||||
void cqClose(void *handle);
|
||||
|
|
|
@ -94,7 +94,7 @@ STsdbRepo *tsdbOpenRepo(STsdbCfg *pCfg, STsdbAppH *pAppH);
|
|||
int tsdbCloseRepo(STsdbRepo *repo, int toCommit);
|
||||
int32_t tsdbConfigRepo(STsdbRepo *repo, STsdbCfg *pCfg);
|
||||
int tsdbGetState(STsdbRepo *repo);
|
||||
|
||||
bool tsdbInCompact(STsdbRepo *repo);
|
||||
// --------- TSDB TABLE DEFINITION
|
||||
typedef struct {
|
||||
uint64_t uid; // the unique table ID
|
||||
|
|
|
@ -106,26 +106,26 @@
|
|||
#define TK_QTIME 88
|
||||
#define TK_CONNS 89
|
||||
#define TK_STATE 90
|
||||
#define TK_KEEP 91
|
||||
#define TK_CACHE 92
|
||||
#define TK_REPLICA 93
|
||||
#define TK_QUORUM 94
|
||||
#define TK_DAYS 95
|
||||
#define TK_MINROWS 96
|
||||
#define TK_MAXROWS 97
|
||||
#define TK_BLOCKS 98
|
||||
#define TK_CTIME 99
|
||||
#define TK_WAL 100
|
||||
#define TK_FSYNC 101
|
||||
#define TK_COMP 102
|
||||
#define TK_PRECISION 103
|
||||
#define TK_UPDATE 104
|
||||
#define TK_CACHELAST 105
|
||||
#define TK_PARTITIONS 106
|
||||
#define TK_UNSIGNED 107
|
||||
#define TK_TAGS 108
|
||||
#define TK_USING 109
|
||||
#define TK_COMMA 110
|
||||
#define TK_COMMA 91
|
||||
#define TK_KEEP 92
|
||||
#define TK_CACHE 93
|
||||
#define TK_REPLICA 94
|
||||
#define TK_QUORUM 95
|
||||
#define TK_DAYS 96
|
||||
#define TK_MINROWS 97
|
||||
#define TK_MAXROWS 98
|
||||
#define TK_BLOCKS 99
|
||||
#define TK_CTIME 100
|
||||
#define TK_WAL 101
|
||||
#define TK_FSYNC 102
|
||||
#define TK_COMP 103
|
||||
#define TK_PRECISION 104
|
||||
#define TK_UPDATE 105
|
||||
#define TK_CACHELAST 106
|
||||
#define TK_PARTITIONS 107
|
||||
#define TK_UNSIGNED 108
|
||||
#define TK_TAGS 109
|
||||
#define TK_USING 110
|
||||
#define TK_AS 111
|
||||
#define TK_NULL 112
|
||||
#define TK_SELECT 113
|
||||
|
@ -207,7 +207,6 @@
|
|||
#define TK_VALUES 189
|
||||
|
||||
|
||||
|
||||
#define TK_SPACE 300
|
||||
#define TK_COMMENT 301
|
||||
#define TK_ILLEGAL 302
|
||||
|
|
|
@ -398,7 +398,10 @@ static char* formatTimestamp(char* buf, int64_t val, int precision) {
|
|||
|
||||
time_t tt;
|
||||
int32_t ms = 0;
|
||||
if (precision == TSDB_TIME_PRECISION_MICRO) {
|
||||
if (precision == TSDB_TIME_PRECISION_NANO) {
|
||||
tt = (time_t)(val / 1000000000);
|
||||
ms = val % 1000000000;
|
||||
} else if (precision == TSDB_TIME_PRECISION_MICRO) {
|
||||
tt = (time_t)(val / 1000000);
|
||||
ms = val % 1000000;
|
||||
} else {
|
||||
|
@ -419,7 +422,9 @@ static char* formatTimestamp(char* buf, int64_t val, int precision) {
|
|||
#endif
|
||||
if (tt <= 0 && ms < 0) {
|
||||
tt--;
|
||||
if (precision == TSDB_TIME_PRECISION_MICRO) {
|
||||
if (precision == TSDB_TIME_PRECISION_NANO) {
|
||||
ms += 1000000000;
|
||||
} else if (precision == TSDB_TIME_PRECISION_MICRO) {
|
||||
ms += 1000000;
|
||||
} else {
|
||||
ms += 1000;
|
||||
|
@ -427,9 +432,11 @@ static char* formatTimestamp(char* buf, int64_t val, int precision) {
|
|||
}
|
||||
|
||||
struct tm* ptm = localtime(&tt);
|
||||
size_t pos = strftime(buf, 32, "%Y-%m-%d %H:%M:%S", ptm);
|
||||
size_t pos = strftime(buf, 35, "%Y-%m-%d %H:%M:%S", ptm);
|
||||
|
||||
if (precision == TSDB_TIME_PRECISION_MICRO) {
|
||||
if (precision == TSDB_TIME_PRECISION_NANO) {
|
||||
sprintf(buf + pos, ".%09d", ms);
|
||||
} else if (precision == TSDB_TIME_PRECISION_MICRO) {
|
||||
sprintf(buf + pos, ".%06d", ms);
|
||||
} else {
|
||||
sprintf(buf + pos, ".%03d", ms);
|
||||
|
@ -778,6 +785,8 @@ static int calcColWidth(TAOS_FIELD* field, int precision) {
|
|||
case TSDB_DATA_TYPE_TIMESTAMP:
|
||||
if (args.is_raw_time) {
|
||||
return MAX(14, width);
|
||||
} if (precision == TSDB_TIME_PRECISION_NANO) {
|
||||
return MAX(29, width);
|
||||
} else if (precision == TSDB_TIME_PRECISION_MICRO) {
|
||||
return MAX(26, width); // '2020-01-01 00:00:00.000000'
|
||||
} else {
|
||||
|
|
|
@ -625,6 +625,10 @@ static int64_t g_totalChildTables = 0;
|
|||
static SQueryMetaInfo g_queryInfo;
|
||||
static FILE * g_fpOfInsertResult = NULL;
|
||||
|
||||
#if _MSC_VER <= 1900
|
||||
#define __func__ __FUNCTION__
|
||||
#endif
|
||||
|
||||
#define debugPrint(fmt, ...) \
|
||||
do { if (g_args.debug_print || g_args.verbose_print) \
|
||||
fprintf(stderr, "DEBG: "fmt, __VA_ARGS__); } while(0)
|
||||
|
@ -1209,7 +1213,6 @@ static void fetchResult(TAOS_RES *res, threadInfo* pThreadInfo) {
|
|||
}
|
||||
|
||||
int totalLen = 0;
|
||||
char temp[16000];
|
||||
|
||||
// fetch the records row by row
|
||||
while((row = taos_fetch_row(res))) {
|
||||
|
@ -1220,6 +1223,7 @@ static void fetchResult(TAOS_RES *res, threadInfo* pThreadInfo) {
|
|||
memset(databuf, 0, 100*1024*1024);
|
||||
}
|
||||
num_rows++;
|
||||
char temp[16000] = {0};
|
||||
int len = taos_print_row(temp, row, fields, num_fields);
|
||||
len += sprintf(temp + len, "\n");
|
||||
//printf("query result:%s\n", temp);
|
||||
|
@ -1848,7 +1852,9 @@ static void printfQueryMeta() {
|
|||
|
||||
static char* formatTimestamp(char* buf, int64_t val, int precision) {
|
||||
time_t tt;
|
||||
if (precision == TSDB_TIME_PRECISION_MICRO) {
|
||||
if (precision == TSDB_TIME_PRECISION_NANO) {
|
||||
tt = (time_t)(val / 1000000000);
|
||||
} else if (precision == TSDB_TIME_PRECISION_MICRO) {
|
||||
tt = (time_t)(val / 1000000);
|
||||
} else {
|
||||
tt = (time_t)(val / 1000);
|
||||
|
@ -1869,7 +1875,9 @@ static char* formatTimestamp(char* buf, int64_t val, int precision) {
|
|||
struct tm* ptm = localtime(&tt);
|
||||
size_t pos = strftime(buf, 32, "%Y-%m-%d %H:%M:%S", ptm);
|
||||
|
||||
if (precision == TSDB_TIME_PRECISION_MICRO) {
|
||||
if (precision == TSDB_TIME_PRECISION_NANO) {
|
||||
sprintf(buf + pos, ".%09d", (int)(val % 1000000000));
|
||||
} else if (precision == TSDB_TIME_PRECISION_MICRO) {
|
||||
sprintf(buf + pos, ".%06d", (int)(val % 1000000));
|
||||
} else {
|
||||
sprintf(buf + pos, ".%03d", (int)(val % 1000));
|
||||
|
@ -6249,9 +6257,11 @@ static void startMultiThreadInsertData(int threads, char* db_name,
|
|||
if (0 != precision[0]) {
|
||||
if (0 == strncasecmp(precision, "ms", 2)) {
|
||||
timePrec = TSDB_TIME_PRECISION_MILLI;
|
||||
} else if (0 == strncasecmp(precision, "us", 2)) {
|
||||
} else if (0 == strncasecmp(precision, "us", 2)) {
|
||||
timePrec = TSDB_TIME_PRECISION_MICRO;
|
||||
} else {
|
||||
} else if (0 == strncasecmp(precision, "ns", 2)) {
|
||||
timePrec = TSDB_TIME_PRECISION_NANO;
|
||||
} else {
|
||||
errorPrint("Not support precision: %s\n", precision);
|
||||
exit(-1);
|
||||
}
|
||||
|
|
|
@ -40,6 +40,7 @@ void mnodeIncDbRef(SDbObj *pDb);
|
|||
void mnodeDecDbRef(SDbObj *pDb);
|
||||
bool mnodeCheckIsMonitorDB(char *db, char *monitordb);
|
||||
void mnodeDropAllDbs(SAcctObj *pAcct);
|
||||
int mnodeInsertAlterDbRow(SDbObj *pDb, void *pMsg);
|
||||
|
||||
int32_t mnodeCompactDbs();
|
||||
|
||||
|
|