Merge branch 'develop' into feature/TD-2577

This commit is contained in:
dapan1121 2021-03-08 17:17:03 +08:00
commit 359ec2776a
56 changed files with 2085 additions and 1200 deletions

1
.gitignore vendored
View File

@ -27,7 +27,6 @@ tests/hdfs/
nmake/
sln/
hdfs/
c/
taoshebei/
taosdalipu/
Target/

2
Jenkinsfile vendored
View File

@ -227,6 +227,8 @@ pipeline {
./test-all.sh p4
cd ${WKC}/tests
./test-all.sh full jdbc
cd ${WKC}/tests
./test-all.sh full unit
date'''
}
}

View File

@ -32,7 +32,7 @@ ELSEIF (TD_WINDOWS)
#INSTALL(TARGETS taos RUNTIME DESTINATION driver)
#INSTALL(TARGETS shell RUNTIME DESTINATION .)
IF (TD_MVN_INSTALLED)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.20-dist.jar DESTINATION connector/jdbc)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.21-dist.jar DESTINATION connector/jdbc)
ENDIF ()
ELSEIF (TD_DARWIN)
SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh")

View File

@ -31,6 +31,20 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台
* [创建超级表](/model#create-stable):为同一类型的数据采集点创建一个超级表
* [创建表](/model#create-table):使用超级表做模板,为每一个具体的数据采集点单独建表
## [TAOS SQL](/taos-sql)
* [支持的数据类型](/taos-sql#data-type):支持时间戳、整型、浮点型、布尔型、字符型等多种数据类型
* [数据库管理](/taos-sql#management):添加、删除、查看数据库
* [表管理](/taos-sql#table):添加、删除、查看、修改表
* [超级表管理](/taos-sql#super-table):添加、删除、查看、修改超级表
* [标签管理](/taos-sql#tags):增加、删除、修改标签
* [数据写入](/taos-sql#insert):支持单表单条、多条、多表多条写入,支持历史数据写入
* [数据查询](/taos-sql#select):支持时间段、值过滤、排序、查询结果手动分页等
* [SQL函数](/taos-sql#functions)支持各种聚合函数、选择函数、计算函数如avg, min, diff等
* [时间维度聚合](/taos-sql#aggregation):将表中数据按照时间段进行切割后聚合,降维处理
* [边界限制](/taos-sql#limitation)库、表、SQL等边界限制条件
* [错误码](/taos-sql/error-code)TDengine 2.0 错误码以及对应的十进制码
## [高效写入数据](/insert)
* [SQL写入](/insert#sql)使用SQL insert命令向一张或多张表写入单条或多条记录
@ -94,20 +108,6 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台
* [文件目录结构](/administrator#directories)TDengine数据文件、配置文件等所在目录
* [参数限制与保留关键字](/administrator#keywords)TDengine的参数限制与保留关键字列表
## [TAOS SQL](/taos-sql)
* [支持的数据类型](/taos-sql#data-type):支持时间戳、整型、浮点型、布尔型、字符型等多种数据类型
* [数据库管理](/taos-sql#management):添加、删除、查看数据库
* [表管理](/taos-sql#table):添加、删除、查看、修改表
* [超级表管理](/taos-sql#super-table):添加、删除、查看、修改超级表
* [标签管理](/taos-sql#tags):增加、删除、修改标签
* [数据写入](/taos-sql#insert):支持单表单条、多条、多表多条写入,支持历史数据写入
* [数据查询](/taos-sql#select):支持时间段、值过滤、排序、查询结果手动分页等
* [SQL函数](/taos-sql#functions)支持各种聚合函数、选择函数、计算函数如avg, min, diff等
* [时间维度聚合](/taos-sql#aggregation):将表中数据按照时间段进行切割后聚合,降维处理
* [边界限制](/taos-sql#limitation)库、表、SQL等边界限制条件
* [错误码](/taos-sql/error-code)TDengine 2.0 错误码以及对应的十进制码
## TDengine的技术设计
* [系统模块](/architecture/taosd)taosd的功能和模块划分
@ -120,6 +120,7 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台
* [TDengine性能对比测试工具](https://www.taosdata.com/blog/2020/01/18/1166.html)
* [IDEA数据库管理工具可视化使用TDengine](https://www.taosdata.com/blog/2020/08/27/1767.html)
* [基于eletron开发的跨平台TDengine图形化管理工具](https://github.com/skye0207/TDengineGUI)
* [DataX支持TDengine的离线数据采集/同步工具](https://github.com/alibaba/DataX)
## TDengine与其他数据库的对比测试

View File

@ -178,11 +178,11 @@ TDengine 分布式架构的逻辑结构图如下:
**FQDN配置**一个数据节点有一个或多个FQDN可以在系统配置文件taos.cfg通过参数“fqdn"进行指定如果没有指定系统将自动获取计算机的hostname作为其FQDN。如果节点没有配置FQDN可以直接将该节点的配置参数fqdn设置为它的IP地址。但不建议使用IP因为IP地址可变一旦变化将让集群无法正常工作。一个数据节点的EP(End Point)由FQDN + Port组成。采用FQDN需要保证DNS服务正常工作或者在节点以及应用所在的节点配置好hosts文件。
**端口配置:**一个数据节点对外的端口由TDengine的系统配置参数serverPort决定对集群内部通讯的端口是serverPort+5。集群内数据节点之间的数据复制操作还占有一个TCP端口是serverPort+10. 为支持多线程高效的处理UDP数据每个对内和对外的UDP连接都需要占用5个连续的端口。因此一个数据节点总的端口范围为serverPort到serverPort + 10总共11个TCP/UDP端口。使用时需要确保防火墙将这些端口打开。每个数据节点可以配置不同的serverPort。
**端口配置:**一个数据节点对外的端口由TDengine的系统配置参数serverPort决定对集群内部通讯的端口是serverPort+5。集群内数据节点之间的数据复制操作还占有一个TCP端口是serverPort+10. 为支持多线程高效的处理UDP数据每个对内和对外的UDP连接都需要占用5个连续的端口。因此一个数据节点总的端口范围为serverPort到serverPort + 10总共11个TCP/UDP端口。(另外还可能有 RESTful、Arbitrator 所使用的端口,那样的话就一共是 13 个。)使用时,需要确保防火墙将这些端口打开,以备使用。每个数据节点可以配置不同的serverPort。
**集群对外连接:** TDengine集群可以容纳单个、多个甚至几千个数据节点。应用只需要向集群中任何一个数据节点发起连接即可连接需要提供的网络参数是一数据节点的End Point(FQDN加配置的端口号。通过命令行CLI启动应用taos时可以通过选项-h来指定数据节点的FQDN, -P来指定其配置的端口号如果端口不配置将采用TDengine的系统配置参数serverPort。
**集群内部通讯**: 各个数据节点之间通过TCP/UDP进行连接。一个数据节点启动时将获取mnode所在的dnode的EP信息然后与系统中的mnode建立起连接交换信息。获取mnode的EP信息有三步1检查mnodeEpList文件是否存在如果不存在或不能正常打开获得mnode EP信息进入第二步2检查系统配置文件taos.cfg, 获取节点配置参数firstEp, secondEp这两个参数指定的节点可以是不带mnode的普通节点这样的话节点被连接时会尝试重定向到mnode节点如果不存在或者taos.cfg里没有这两个配置参数或无效进入第三步3将自己的EP设为mnode EP, 并独立运行起来。获取mnode EP列表后数据节点发起连接如果连接成功则成功加入进工作的集群如果不成功则尝试mnode EP列表中的下一个。如果都尝试了但连接都仍然失败则休眠几秒后再进行尝试。
**集群内部通讯**: 各个数据节点之间通过TCP/UDP进行连接。一个数据节点启动时将获取mnode所在的dnode的EP信息然后与系统中的mnode建立起连接交换信息。获取mnode的EP信息有三步1检查mnodeEpSet文件是否存在如果不存在或不能正常打开获得mnode EP信息进入第二步2检查系统配置文件taos.cfg, 获取节点配置参数firstEp, secondEp这两个参数指定的节点可以是不带mnode的普通节点这样的话节点被连接时会尝试重定向到mnode节点如果不存在或者taos.cfg里没有这两个配置参数或无效进入第三步3将自己的EP设为mnode EP, 并独立运行起来。获取mnode EP列表后数据节点发起连接如果连接成功则成功加入进工作的集群如果不成功则尝试mnode EP列表中的下一个。如果都尝试了但连接都仍然失败则休眠几秒后再进行尝试。
**MNODE的选择:** TDengine逻辑上有管理节点但没有单独的执行代码服务器侧只有一套执行代码taosd。那么哪个数据节点会是管理节点呢这是系统自动决定的无需任何人工干预。原则如下一个数据节点启动时会检查自己的End Point, 并与获取的mnode EP List进行比对如果在其中该数据节点认为自己应该启动mnode模块成为mnode。如果自己的EP不在mnode EP List里则不启动mnode模块。在系统的运行过程中由于负载均衡、宕机等原因mnode有可能迁移至新的dnode但一切都是透明的无需人工干预配置参数的修改是mnode自己根据资源做出的决定。

View File

@ -209,7 +209,7 @@ C/C++的API类似于MySQL的C API。应用程序使用时需要包含TDengine
- `TAOS_RES* taos_query(TAOS *taos, const char *sql)`
该API用来执行SQL语句可以是DQL、DML或DDL语句。 其中的`taos`参数是通过`taos_connect`获得的指针。返回值 NULL 表示失败
该API用来执行SQL语句可以是DQL、DML或DDL语句。 其中的`taos`参数是通过`taos_connect`获得的指针。不能通过返回值是否是 NULL 来判断执行结果是否失败,而是需要用`taos_errno`函数解析结果集中的错误代码来进行判断
- `int taos_result_precision(TAOS_RES *res)`
@ -591,7 +591,8 @@ curl -u username:password -d '<SQL>' <ip>:<PORT>/rest/sql
```json
{
"status": "succ",
"head": ["Time Stamp","current", …],
"head": ["ts","current", …],
"column_meta": [["ts",9,8],["current",6,4], …],
"data": [
["2018-10-03 14:38:05.000", 10.3, …],
["2018-10-03 14:38:15.000", 12.6, …]
@ -602,10 +603,23 @@ curl -u username:password -d '<SQL>' <ip>:<PORT>/rest/sql
说明:
- status: 告知操作结果是成功还是失败
- head: 表的定义如果不返回结果集仅有一列“affected_rows”
- data: 具体返回的数据,一排一排的呈现,如果不返回结果集,仅[[affected_rows]]
- rows: 表明总共多少行数据
- status: 告知操作结果是成功还是失败。
- head: 表的定义如果不返回结果集则仅有一列“affected_rows”。从 2.0.17 版本开始,建议不要依赖 head 返回值来判断数据列类型,而推荐使用 column_meta。在未来版本中有可能会从返回值中去掉 head 这一项。)
- column_meta: 从 2.0.17 版本开始,返回值中增加这一项来说明 data 里每一列的数据类型。具体每个列会用三个值来说明,分别为:列名、列类型、类型长度。例如`["current",6,4]`表示列名为“current”列类型为 6也即 float 类型;类型长度为 4也即对应 4 个字节表示的 float。如果列类型为 binary 或 nchar则类型长度表示该列最多可以保存的内容长度而不是本次返回值中的具体数据长度。当列类型是 nchar 的时候,其类型长度表示可以保存的 unicode 字符数量,而不是 bytes。
- data: 具体返回的数据,一行一行的呈现,如果不返回结果集,那么就仅有[[affected_rows]]。data 中每一行的数据列顺序,与 column_meta 中描述数据列的顺序完全一致。
- rows: 表明总共多少行数据。
column_meta 中的列类型说明:
* 1BOOL
* 2TINYINT
* 3SMALLINT
* 4INT
* 5BIGINT
* 6FLOAT
* 7DOUBLE
* 8BINARY
* 9TIMESTAMP
* 10NCHAR
### 自定义授权码
@ -651,7 +665,8 @@ curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.d1001
```json
{
"status": "succ",
"head": ["Time Stamp","current","voltage","phase"],
"head": ["ts","current","voltage","phase"],
"column_meta": [["ts",9,8],["current",6,4],["voltage",4,4],["phase",6,4]],
"data": [
["2018-10-03 14:38:05.000",10.3,219,0.31],
["2018-10-03 14:38:15.000",12.6,218,0.33]
@ -671,8 +686,9 @@ curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'create database demo' 19
{
"status": "succ",
"head": ["affected_rows"],
"column_meta": [["affected_rows",4,4]],
"data": [[1]],
"rows": 1,
"rows": 1
}
```
@ -691,7 +707,8 @@ curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.d1001
```json
{
"status": "succ",
"head": ["column1","column2","column3"],
"head": ["ts","current","voltage","phase"],
"column_meta": [["ts",9,8],["current",6,4],["voltage",4,4],["phase",6,4]],
"data": [
[1538548685000,10.3,219,0.31],
[1538548695000,12.6,218,0.33]
@ -712,7 +729,8 @@ HTTP请求URL采用`sqlutc`时返回结果集的时间戳将采用UTC时间
```json
{
"status": "succ",
"head": ["column1","column2","column3"],
"head": ["ts","current","voltage","phase"],
"column_meta": [["ts",9,8],["current",6,4],["voltage",4,4],["phase",6,4]],
"data": [
["2018-10-03T14:38:05.000+0800",10.3,219,0.31],
["2018-10-03T14:38:15.000+0800",12.6,218,0.33]

View File

@ -155,11 +155,3 @@ TDengine客户端暂不支持如下函数
- dbExistsTable(conn, "test")是否存在表test
- dbListTables(conn):显示连接中的所有表
## <a class="anchor" id="datax"></a>DataX
[DataX](https://github.com/alibaba/DataX) 是阿里巴巴集团开源的一款通用离线数据采集/同步工具,能够简单、高效地接入 TDengine 进行数据写入和读取。
* 数据读取集成的方法请参见 [TSDBReader 插件文档](https://github.com/alibaba/DataX/blob/master/tsdbreader/doc/tsdbreader.md)
* 数据写入集成的方法请参见 [TSDBWriter 插件文档](https://github.com/alibaba/DataX/blob/master/tsdbwriter/doc/tsdbhttpwriter.md)

View File

@ -13,7 +13,7 @@ TDengine的集群管理极其简单除添加和删除节点需要人工干预
**第零步**规划集群所有物理节点的FQDN将规划好的FQDN分别添加到每个物理节点的/etc/hostname修改每个物理节点的/etc/hosts将所有集群物理节点的IP与FQDN的对应添加好。【如部署了DNS请联系网络管理员在DNS上做好相关配置】
**第一步**如果搭建集群的物理节点中存有之前的测试数据、装过1.X的版本或者装过其他版本的TDengine请先将其删除并清空所有数据具体步骤请参考博客[《TDengine多种安装包的安装和卸载》](https://www.taosdata.com/blog/2019/08/09/566.html )
**注意1**因为FQDN的信息会写进文件如果之前没有配置或者更改FQDN且启动了TDengine。请一定在确保数据无用或者备份的前提下清理一下之前的数据rm -rf /var/lib/taos/
**注意1**因为FQDN的信息会写进文件如果之前没有配置或者更改FQDN且启动了TDengine。请一定在确保数据无用或者备份的前提下清理一下之前的数据`rm -rf /var/lib/taos/*`
**注意2**客户端也需要配置确保它可以正确解析每个节点的FQDN配置不管是通过DNS服务还是 Host 文件。
**第二步**建议关闭所有物理节点的防火墙至少保证端口6030 - 6042的TCP和UDP端口都是开放的。**强烈建议**先关闭防火墙,集群搭建完毕之后,再来配置端口;

View File

@ -627,19 +627,21 @@ Query OK, 1 row(s) in set (0.001091s)
### 支持的条件过滤操作
| Operation | Note | Applicable Data Types |
| --------- | ----------------------------- | ------------------------------------- |
| > | larger than | **`timestamp`** and all numeric types |
| < | smaller than | **`timestamp`** and all numeric types |
| >= | larger than or equal to | **`timestamp`** and all numeric types |
| <= | smaller than or equal to | **`timestamp`** and all numeric types |
| = | equal to | all types |
| <> | not equal to | all types |
| % | match with any char sequences | **`binary`** **`nchar`** |
| _ | match with a single char | **`binary`** **`nchar`** |
| Operation | Note | Applicable Data Types |
| ----------- | ----------------------------- | ------------------------------------- |
| > | larger than | **`timestamp`** and all numeric types |
| < | smaller than | **`timestamp`** and all numeric types |
| >= | larger than or equal to | **`timestamp`** and all numeric types |
| <= | smaller than or equal to | **`timestamp`** and all numeric types |
| = | equal to | all types |
| <> | not equal to | all types |
| between and | within a certain range | **`timestamp`** and all numeric types |
| % | match with any char sequences | **`binary`** **`nchar`** |
| _ | match with a single char | **`binary`** **`nchar`** |
1. 同时进行多个字段的范围过滤,需要使用关键词 AND 来连接不同的查询条件,暂不支持 OR 连接的不同列之间的查询过滤条件。
2. 针对单一字段的过滤,如果是时间过滤条件,则一条语句中只支持设定一个;但针对其他的(普通)列或标签列,则可以使用``` OR``` 关键字进行组合条件的查询过滤。例如:((value > 20 and value < 30) OR (value < 12))
2. 针对单一字段的过滤,如果是时间过滤条件,则一条语句中只支持设定一个;但针对其他的(普通)列或标签列,则可以使用 `OR` 关键字进行组合条件的查询过滤。例如:((value > 20 AND value < 30) OR (value < 12))
3. 从 2.0.17 版本开始,条件过滤开始支持 BETWEEN AND 语法,例如 `WHERE col2 BETWEEN 1.5 AND 3.25` 表示查询条件为“1.5 ≤ col2 ≤ 3.25”。
### SQL 示例

View File

@ -8,7 +8,7 @@ IF (TD_MVN_INSTALLED)
ADD_CUSTOM_COMMAND(OUTPUT ${JDBC_CMD_NAME}
POST_BUILD
COMMAND mvn -Dmaven.test.skip=true install -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.20-dist.jar ${LIBRARY_OUTPUT_PATH}
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.21-dist.jar ${LIBRARY_OUTPUT_PATH}
COMMAND mvn -Dmaven.test.skip=true clean -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
COMMENT "build jdbc driver")
ADD_CUSTOM_TARGET(${JDBC_TARGET_NAME} ALL WORKING_DIRECTORY ${EXECUTABLE_OUTPUT_PATH} DEPENDS ${JDBC_CMD_NAME})

View File

@ -5,7 +5,7 @@
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>2.0.20</version>
<version>2.0.21</version>
<packaging>jar</packaging>
<name>JDBCDriver</name>

View File

@ -3,7 +3,7 @@
<modelVersion>4.0.0</modelVersion>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>2.0.20</version>
<version>2.0.21</version>
<packaging>jar</packaging>
<name>JDBCDriver</name>
<url>https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc</url>

View File

@ -308,7 +308,7 @@ public class DatabaseMetaDataResultSet implements ResultSet {
return colMetaData.getColIndex() + 1;
}
}
throw new SQLException(TSDBConstants.INVALID_VARIABLES);
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE);
}
@Override

View File

@ -14,16 +14,13 @@
*****************************************************************************/
package com.taosdata.jdbc;
import java.sql.SQLException;
import java.sql.Types;
import java.util.HashMap;
import java.util.Map;
public abstract class TSDBConstants {
public static final String STATEMENT_CLOSED = "statement is closed";
public static final String UNSUPPORTED_METHOD_EXCEPTION_MSG = "this operation is NOT supported currently!";
public static final String INVALID_VARIABLES = "invalid variables";
public static final String RESULT_SET_IS_CLOSED = "resultSet is closed";
public static final String DEFAULT_PORT = "6200";
public static Map<Integer, String> DATATYPE_MAP = null;
@ -77,8 +74,65 @@ public abstract class TSDBConstants {
return WrapErrMsg("unkown error!");
}
public static int taosType2JdbcType(int taosType) throws SQLException {
switch (taosType) {
case TSDBConstants.TSDB_DATA_TYPE_NULL:
return Types.NULL;
case TSDBConstants.TSDB_DATA_TYPE_BOOL:
return Types.BOOLEAN;
case TSDBConstants.TSDB_DATA_TYPE_TINYINT:
return Types.TINYINT;
case TSDBConstants.TSDB_DATA_TYPE_SMALLINT:
return Types.SMALLINT;
case TSDBConstants.TSDB_DATA_TYPE_INT:
return Types.INTEGER;
case TSDBConstants.TSDB_DATA_TYPE_BIGINT:
return Types.BIGINT;
case TSDBConstants.TSDB_DATA_TYPE_FLOAT:
return Types.FLOAT;
case TSDBConstants.TSDB_DATA_TYPE_DOUBLE:
return Types.DOUBLE;
case TSDBConstants.TSDB_DATA_TYPE_BINARY:
return Types.BINARY;
case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP:
return Types.TIMESTAMP;
case TSDBConstants.TSDB_DATA_TYPE_NCHAR:
return Types.NCHAR;
}
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_SQL_TYPE_IN_TDENGINE);
}
public static int jdbcType2TaosType(int jdbcType) throws SQLException {
switch (jdbcType){
case Types.NULL:
return TSDBConstants.TSDB_DATA_TYPE_NULL;
case Types.BOOLEAN:
return TSDBConstants.TSDB_DATA_TYPE_BOOL;
case Types.TINYINT:
return TSDBConstants.TSDB_DATA_TYPE_TINYINT;
case Types.SMALLINT:
return TSDBConstants.TSDB_DATA_TYPE_SMALLINT;
case Types.INTEGER:
return TSDBConstants.TSDB_DATA_TYPE_INT;
case Types.BIGINT:
return TSDBConstants.TSDB_DATA_TYPE_BIGINT;
case Types.FLOAT:
return TSDBConstants.TSDB_DATA_TYPE_FLOAT;
case Types.DOUBLE:
return TSDBConstants.TSDB_DATA_TYPE_DOUBLE;
case Types.BINARY:
return TSDBConstants.TSDB_DATA_TYPE_BINARY;
case Types.TIMESTAMP:
return TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP;
case Types.NCHAR:
return TSDBConstants.TSDB_DATA_TYPE_NCHAR;
}
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_SQL_TYPE_IN_TDENGINE);
}
static {
DATATYPE_MAP = new HashMap<>();
DATATYPE_MAP.put(0, "NULL");
DATATYPE_MAP.put(1, "BOOL");
DATATYPE_MAP.put(2, "TINYINT");
DATATYPE_MAP.put(3, "SMALLINT");
@ -90,4 +144,8 @@ public abstract class TSDBConstants {
DATATYPE_MAP.put(9, "TIMESTAMP");
DATATYPE_MAP.put(10, "NCHAR");
}
public static String jdbcType2TaosTypeName(int type) throws SQLException {
return DATATYPE_MAP.get(jdbcType2TaosType(type));
}
}

View File

@ -18,6 +18,7 @@ public class TSDBErrorNumbers {
public static final int ERROR_INVALID_FOR_EXECUTE = 0x230c; //not a valid sql for execute: (SQL)
public static final int ERROR_PARAMETER_INDEX_OUT_RANGE = 0x230d; // parameter index out of range
public static final int ERROR_SQLCLIENT_EXCEPTION_ON_CONNECTION_CLOSED = 0x230e; // connection already closed
public static final int ERROR_UNKNOWN_SQL_TYPE_IN_TDENGINE = 0x230f; //unknown sql type in tdengine
public static final int ERROR_UNKNOWN = 0x2350; //unknown error
@ -49,6 +50,7 @@ public class TSDBErrorNumbers {
errorNumbers.add(ERROR_INVALID_FOR_EXECUTE);
errorNumbers.add(ERROR_PARAMETER_INDEX_OUT_RANGE);
errorNumbers.add(ERROR_SQLCLIENT_EXCEPTION_ON_CONNECTION_CLOSED);
errorNumbers.add(ERROR_UNKNOWN_SQL_TYPE_IN_TDENGINE);
/*****************************************************/
errorNumbers.add(ERROR_SUBSCRIBE_FAILED);

View File

@ -20,7 +20,7 @@ import java.sql.Timestamp;
import java.sql.Types;
import java.util.List;
public class TSDBResultSetMetaData implements ResultSetMetaData {
public class TSDBResultSetMetaData extends WrapperImpl implements ResultSetMetaData {
List<ColumnMetaData> colMetaDataList = null;
@ -28,14 +28,6 @@ public class TSDBResultSetMetaData implements ResultSetMetaData {
this.colMetaDataList = metaDataList;
}
public <T> T unwrap(Class<T> iface) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG);
}
public boolean isWrapperFor(Class<?> iface) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG);
}
public int getColumnCount() throws SQLException {
return colMetaDataList.size();
}
@ -94,7 +86,7 @@ public class TSDBResultSetMetaData implements ResultSetMetaData {
}
public String getSchemaName(int column) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG);
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
}
public int getPrecision(int column) throws SQLException {
@ -125,18 +117,18 @@ public class TSDBResultSetMetaData implements ResultSetMetaData {
}
public String getTableName(int column) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG);
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
}
public String getCatalogName(int column) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG);
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
}
public int getColumnType(int column) throws SQLException {
ColumnMetaData meta = this.colMetaDataList.get(column - 1);
switch (meta.getColType()) {
case TSDBConstants.TSDB_DATA_TYPE_BOOL:
return java.sql.Types.BIT;
return Types.BOOLEAN;
case TSDBConstants.TSDB_DATA_TYPE_TINYINT:
return java.sql.Types.TINYINT;
case TSDBConstants.TSDB_DATA_TYPE_SMALLINT:
@ -150,13 +142,13 @@ public class TSDBResultSetMetaData implements ResultSetMetaData {
case TSDBConstants.TSDB_DATA_TYPE_DOUBLE:
return java.sql.Types.DOUBLE;
case TSDBConstants.TSDB_DATA_TYPE_BINARY:
return java.sql.Types.CHAR;
return Types.BINARY;
case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP:
return java.sql.Types.BIGINT;
return java.sql.Types.TIMESTAMP;
case TSDBConstants.TSDB_DATA_TYPE_NCHAR:
return java.sql.Types.CHAR;
return Types.NCHAR;
}
throw new SQLException(TSDBConstants.INVALID_VARIABLES);
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE);
}
public String getColumnTypeName(int column) throws SQLException {
@ -173,7 +165,7 @@ public class TSDBResultSetMetaData implements ResultSetMetaData {
}
public boolean isDefinitelyWritable(int column) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG);
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
}
public String getColumnClassName(int column) throws SQLException {

View File

@ -1153,11 +1153,11 @@ public class TSDBResultSetWrapper implements ResultSet {
}
public <T> T getObject(int columnIndex, Class<T> type) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG);
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
}
public <T> T getObject(String columnLabel, Class<T> type) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORTED_METHOD_EXCEPTION_MSG);
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
}
@Override

View File

@ -14,12 +14,11 @@
*****************************************************************************/
package com.taosdata.jdbc;
import javax.management.OperationsException;
import java.sql.SQLException;
public class TSDBSubscribe {
private TSDBJNIConnector connecter = null;
private long id = 0;
private final TSDBJNIConnector connecter;
private final long id;
TSDBSubscribe(TSDBJNIConnector connecter, long id) throws SQLException {
if (null != connecter) {

View File

@ -18,10 +18,10 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
private final String database;
private final Statement statement;
// data
private ArrayList<ArrayList<Object>> resultSet = new ArrayList<>();
private ArrayList<ArrayList<Object>> resultSet;
// meta
private ArrayList<String> columnNames = new ArrayList<>();
private ArrayList<Field> columns = new ArrayList<>();
private ArrayList<String> columnNames;
private ArrayList<Field> columns;
private RestfulResultSetMetaData metaData;
/**
@ -29,11 +29,36 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
*
* @param resultJson: 包含data信息的结果集有sql返回的结果集
***/
public RestfulResultSet(String database, Statement statement, JSONObject resultJson) {
public RestfulResultSet(String database, Statement statement, JSONObject resultJson) throws SQLException {
this.database = database;
this.statement = statement;
// column metadata
JSONArray columnMeta = resultJson.getJSONArray("column_meta");
columnNames = new ArrayList<>();
columns = new ArrayList<>();
for (int colIndex = 0; colIndex < columnMeta.size(); colIndex++) {
JSONArray col = columnMeta.getJSONArray(colIndex);
String col_name = col.getString(0);
int col_type = TSDBConstants.taosType2JdbcType(col.getInteger(1));
int col_length = col.getInteger(2);
columnNames.add(col_name);
columns.add(new Field(col_name, col_type, col_length, ""));
}
this.metaData = new RestfulResultSetMetaData(this.database, columns, this);
// row data
JSONArray data = resultJson.getJSONArray("data");
resultSet = new ArrayList<>();
for (int rowIndex = 0; rowIndex < data.size(); rowIndex++) {
ArrayList row = new ArrayList();
JSONArray jsonRow = data.getJSONArray(rowIndex);
for (int colIndex = 0; colIndex < jsonRow.size(); colIndex++) {
row.add(parseColumnData(jsonRow, colIndex, columns.get(colIndex).type));
}
resultSet.add(row);
}
/*
int columnIndex = 0;
for (; columnIndex < data.size(); columnIndex++) {
ArrayList oneRow = new ArrayList<>();
@ -52,50 +77,77 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
columns.add(new Field(name, "", 0, ""));
}
this.metaData = new RestfulResultSetMetaData(this.database, columns, this);
*/
}
/**
* 由多个resultSet的JSON构造结果集
*
* @param resultJson: 包含data信息的结果集有sql返回的结果集
* @param fieldJson: 包含多个最多2个meta信息的结果集有describe xxx
**/
public RestfulResultSet(String database, Statement statement, JSONObject resultJson, List<JSONObject> fieldJson) {
this(database, statement, resultJson);
ArrayList<Field> newColumns = new ArrayList<>();
for (Field column : columns) {
Field field = findField(column.name, fieldJson);
if (field != null) {
newColumns.add(field);
} else {
newColumns.add(column);
}
private Object parseColumnData(JSONArray row, int colIndex, int sqlType) {
switch (sqlType) {
case Types.NULL:
return null;
case Types.BOOLEAN:
return row.getBoolean(colIndex);
case Types.TINYINT:
case Types.SMALLINT:
return row.getShort(colIndex);
case Types.INTEGER:
return row.getInteger(colIndex);
case Types.BIGINT:
return row.getBigInteger(colIndex);
case Types.FLOAT:
return row.getFloat(colIndex);
case Types.DOUBLE:
return row.getDouble(colIndex);
case Types.TIMESTAMP:
return new Timestamp(row.getDate(colIndex).getTime());
case Types.BINARY:
case Types.NCHAR:
default:
return row.getString(colIndex);
}
this.columns = newColumns;
this.metaData = new RestfulResultSetMetaData(this.database, this.columns, this);
}
public Field findField(String columnName, List<JSONObject> fieldJsonList) {
for (JSONObject fieldJSON : fieldJsonList) {
JSONArray fieldDataJson = fieldJSON.getJSONArray("data");
for (int i = 0; i < fieldDataJson.size(); i++) {
JSONArray field = fieldDataJson.getJSONArray(i);
if (columnName.equalsIgnoreCase(field.getString(0))) {
return new Field(field.getString(0), field.getString(1), field.getInteger(2), field.getString(3));
}
}
}
return null;
}
// /**
// * 由多个resultSet的JSON构造结果集
// *
// * @param resultJson: 包含data信息的结果集有sql返回的结果集
// * @param fieldJson: 包含多个最多2个meta信息的结果集有describe xxx
// **/
// public RestfulResultSet(String database, Statement statement, JSONObject resultJson, List<JSONObject> fieldJson) throws SQLException {
// this(database, statement, resultJson);
// ArrayList<Field> newColumns = new ArrayList<>();
//
// for (Field column : columns) {
// Field field = findField(column.name, fieldJson);
// if (field != null) {
// newColumns.add(field);
// } else {
// newColumns.add(column);
// }
// }
// this.columns = newColumns;
// this.metaData = new RestfulResultSetMetaData(this.database, this.columns, this);
// }
// public Field findField(String columnName, List<JSONObject> fieldJsonList) {
// for (JSONObject fieldJSON : fieldJsonList) {
// JSONArray fieldDataJson = fieldJSON.getJSONArray("data");
// for (int i = 0; i < fieldDataJson.size(); i++) {
// JSONArray field = fieldDataJson.getJSONArray(i);
// if (columnName.equalsIgnoreCase(field.getString(0))) {
// return new Field(field.getString(0), field.getString(1), field.getInteger(2), field.getString(3));
// }
// }
// }
// return null;
// }
public class Field {
String name;
String type;
int type;
int length;
String note;
public Field(String name, String type, int length, String note) {
public Field(String name, int type, int length, String note) {
this.name = name;
this.type = type;
this.length = length;

View File

@ -5,6 +5,7 @@ import com.taosdata.jdbc.TSDBConstants;
import java.sql.ResultSetMetaData;
import java.sql.SQLException;
import java.sql.Timestamp;
import java.sql.Types;
import java.util.ArrayList;
public class RestfulResultSetMetaData implements ResultSetMetaData {
@ -53,14 +54,14 @@ public class RestfulResultSetMetaData implements ResultSetMetaData {
@Override
public boolean isSigned(int column) throws SQLException {
String type = this.fields.get(column - 1).type.toUpperCase();
int type = this.fields.get(column - 1).type;
switch (type) {
case "TINYINT":
case "SMALLINT":
case "INT":
case "BIGINT":
case "FLOAT":
case "DOUBLE":
case Types.TINYINT:
case Types.SMALLINT:
case Types.INTEGER:
case Types.BIGINT:
case Types.FLOAT:
case Types.DOUBLE:
return true;
default:
return false;
@ -89,14 +90,14 @@ public class RestfulResultSetMetaData implements ResultSetMetaData {
@Override
public int getPrecision(int column) throws SQLException {
String type = this.fields.get(column - 1).type.toUpperCase();
int type = this.fields.get(column - 1).type;
switch (type) {
case "FLOAT":
case Types.FLOAT:
return 5;
case "DOUBLE":
case Types.DOUBLE:
return 9;
case "BINARY":
case "NCHAR":
case Types.BINARY:
case Types.NCHAR:
return this.fields.get(column - 1).length;
default:
return 0;
@ -105,11 +106,11 @@ public class RestfulResultSetMetaData implements ResultSetMetaData {
@Override
public int getScale(int column) throws SQLException {
String type = this.fields.get(column - 1).type.toUpperCase();
int type = this.fields.get(column - 1).type;
switch (type) {
case "FLOAT":
case Types.FLOAT:
return 5;
case "DOUBLE":
case Types.DOUBLE:
return 9;
default:
return 0;
@ -128,36 +129,13 @@ public class RestfulResultSetMetaData implements ResultSetMetaData {
@Override
public int getColumnType(int column) throws SQLException {
String type = this.fields.get(column - 1).type.toUpperCase();
switch (type) {
case "BOOL":
return java.sql.Types.BOOLEAN;
case "TINYINT":
return java.sql.Types.TINYINT;
case "SMALLINT":
return java.sql.Types.SMALLINT;
case "INT":
return java.sql.Types.INTEGER;
case "BIGINT":
return java.sql.Types.BIGINT;
case "FLOAT":
return java.sql.Types.FLOAT;
case "DOUBLE":
return java.sql.Types.DOUBLE;
case "BINARY":
return java.sql.Types.BINARY;
case "TIMESTAMP":
return java.sql.Types.TIMESTAMP;
case "NCHAR":
return java.sql.Types.NCHAR;
}
throw new SQLException(TSDBConstants.INVALID_VARIABLES);
return this.fields.get(column - 1).type;
}
@Override
public String getColumnTypeName(int column) throws SQLException {
String type = fields.get(column - 1).type;
return type.toUpperCase();
int type = fields.get(column - 1).type;
return TSDBConstants.jdbcType2TaosTypeName(type);
}
@Override
@ -177,26 +155,26 @@ public class RestfulResultSetMetaData implements ResultSetMetaData {
@Override
public String getColumnClassName(int column) throws SQLException {
String type = this.fields.get(column - 1).type;
int type = this.fields.get(column - 1).type;
String columnClassName = "";
switch (type) {
case "BOOL":
case Types.BOOLEAN:
return Boolean.class.getName();
case "TINYINT":
case "SMALLINT":
case Types.TINYINT:
case Types.SMALLINT:
return Short.class.getName();
case "INT":
case Types.INTEGER:
return Integer.class.getName();
case "BIGINT":
case Types.BIGINT:
return Long.class.getName();
case "FLOAT":
case Types.FLOAT:
return Float.class.getName();
case "DOUBLE":
case Types.DOUBLE:
return Double.class.getName();
case "TIMESTAMP":
case Types.TIMESTAMP:
return Timestamp.class.getName();
case "BINARY":
case "NCHAR":
case Types.BINARY:
case Types.NCHAR:
return String.class.getName();
}
return columnClassName;

View File

@ -151,22 +151,21 @@ public class RestfulStatement extends AbstractStatement {
throw new SQLException(TSDBConstants.WrapErrMsg("SQL execution error: " + resultJson.getString("desc") + "\n" + "error code: " + resultJson.getString("code")));
}
// parse table name from sql
String[] tableIdentifiers = parseTableIdentifier(sql);
if (tableIdentifiers != null) {
List<JSONObject> fieldJsonList = new ArrayList<>();
for (String tableIdentifier : tableIdentifiers) {
// field meta
String fields = HttpClientPoolUtil.execute(url, "DESCRIBE " + tableIdentifier);
JSONObject fieldJson = JSON.parseObject(fields);
if (fieldJson.getString("status").equals("error")) {
throw new SQLException(TSDBConstants.WrapErrMsg("SQL execution error: " + fieldJson.getString("desc") + "\n" + "error code: " + fieldJson.getString("code")));
}
fieldJsonList.add(fieldJson);
}
this.resultSet = new RestfulResultSet(database, this, resultJson, fieldJsonList);
} else {
this.resultSet = new RestfulResultSet(database, this, resultJson);
}
// String[] tableIdentifiers = parseTableIdentifier(sql);
// if (tableIdentifiers != null) {
// List<JSONObject> fieldJsonList = new ArrayList<>();
// for (String tableIdentifier : tableIdentifiers) {
// String fields = HttpClientPoolUtil.execute(url, "DESCRIBE " + tableIdentifier);
// JSONObject fieldJson = JSON.parseObject(fields);
// if (fieldJson.getString("status").equals("error")) {
// throw new SQLException(TSDBConstants.WrapErrMsg("SQL execution error: " + fieldJson.getString("desc") + "\n" + "error code: " + fieldJson.getString("code")));
// }
// fieldJsonList.add(fieldJson);
// }
// this.resultSet = new RestfulResultSet(database, this, resultJson, fieldJsonList);
// } else {
this.resultSet = new RestfulResultSet(database, this, resultJson);
// }
this.affectedRows = 0;
return resultSet;
}
@ -201,7 +200,7 @@ public class RestfulStatement extends AbstractStatement {
@Override
public ResultSet getResultSet() throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.STATEMENT_CLOSED);
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
return resultSet;
}

View File

@ -13,7 +13,6 @@ import java.util.HashMap;
import java.util.Properties;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
public class ResultSetTest {
static Connection connection;

View File

@ -48,29 +48,28 @@ public class SubscribeTest {
@Test
public void subscribe() {
try {
String rawSql = "select * from " + dbName + "." + tName + ";";
System.out.println(rawSql);
TSDBSubscribe subscribe = ((TSDBConnection) connection).subscribe(topic, rawSql, false);
// TSDBSubscribe subscribe = ((TSDBConnection) connection).subscribe(topic, rawSql, false);
int a = 0;
while (true) {
TimeUnit.MILLISECONDS.sleep(1000);
TSDBResultSet resSet = subscribe.consume();
while (resSet.next()) {
for (int i = 1; i <= resSet.getMetaData().getColumnCount(); i++) {
System.out.printf(i + ": " + resSet.getString(i) + "\t");
}
System.out.println("\n======" + a + "==========");
}
a++;
if (a >= 2) {
break;
}
// int a = 0;
// while (true) {
// TimeUnit.MILLISECONDS.sleep(1000);
// TSDBResultSet resSet = subscribe.consume();
// while (resSet.next()) {
// for (int i = 1; i <= resSet.getMetaData().getColumnCount(); i++) {
// System.out.printf(i + ": " + resSet.getString(i) + "\t");
// }
// System.out.println("\n======" + a + "==========");
// }
// a++;
// if (a >= 2) {
// break;
// }
// resSet.close();
}
subscribe.close(true);
// }
//
// subscribe.close(true);
} catch (Exception e) {
e.printStackTrace();
}

View File

@ -10,7 +10,7 @@ import java.util.Random;
public class RestfulJDBCTest {
private static final String host = "127.0.0.1";
// private static final String host = "master";
// private static final String host = "master";
private static Connection connection;
private Random random = new Random(System.currentTimeMillis());

View File

@ -12,7 +12,7 @@ import java.sql.*;
@FixMethodOrder(MethodSorters.NAME_ASCENDING)
public class SQLTest {
private static final String host = "127.0.0.1";
// private static final String host = "master";
// private static final String host = "master";
private static Connection connection;
@Test
@ -323,6 +323,18 @@ public class SQLTest {
SQLExecutor.executeQuery(connection, sql);
}
@Test
public void testCase052() {
String sql = "select server_status()";
SQLExecutor.executeQuery(connection, sql);
}
@Test
public void testCase053() {
String sql = "select avg(cpu_taosd), avg(cpu_system), max(cpu_cores), avg(mem_taosd), avg(mem_system), max(mem_total), avg(disk_used), max(disk_total), avg(band_speed), avg(io_read), avg(io_write), sum(req_http), sum(req_select), sum(req_insert) from log.dn1 where ts> now - 60m and ts<= now interval(1m) fill(value, 0)";
SQLExecutor.executeQuery(connection, sql);
}
@BeforeClass
public static void before() throws ClassNotFoundException, SQLException {
Class.forName("com.taosdata.jdbc.rs.RestfulDriver");

View File

@ -8,10 +8,12 @@
"thread_count": 4,
"thread_count_create_tbl": 4,
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"num_of_records_per_req": 100,
"databases": [{
"dbinfo": {
"name": "dbx",
"name": "db",
"drop": "yes",
"replica": 1,
"days": 10,
@ -30,14 +32,13 @@
},
"super_tables": [{
"name": "stb",
"child_table_exists":"no",
"child_table_exists":"no",
"childtable_count": 100,
"childtable_prefix": "stb_",
"auto_create_table": "no",
"data_source": "rand",
"insert_mode": "taosc",
"insert_rate": 0,
"insert_rows": 1000,
"insert_rows": 100000,
"multi_thread_write_one_tbl": "no",
"number_of_tbl_in_one_sql": 0,
"rows_per_tbl": 100,

File diff suppressed because it is too large Load Diff

View File

@ -769,6 +769,7 @@ int32_t taosSaveTableOfMetricToTempFile(TAOS *taosCon, char* metric, struct argu
}
sprintf(tmpBuf, ".select-tbname.tmp");
(void)remove(tmpBuf);
free(tblBuf);
close(fd);
return -1;
}
@ -1523,6 +1524,7 @@ int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *tao
}
sprintf(tmpBuf, ".show-tables.tmp");
(void)remove(tmpBuf);
free(tblBuf);
close(fd);
return -1;
}

View File

@ -832,12 +832,13 @@ static int32_t mnodeProcessBatchCreateTableMsg(SMnodeMsg *pMsg) {
return code;
} else if (code != TSDB_CODE_MND_ACTION_IN_PROGRESS) {
++pMsg->pBatchMasterMsg->received;
pMsg->pBatchMasterMsg->code = code;
mnodeDestroySubMsg(pMsg);
}
if (pMsg->pBatchMasterMsg->successed + pMsg->pBatchMasterMsg->received
>= pMsg->pBatchMasterMsg->expected) {
dnodeSendRpcMWriteRsp(pMsg->pBatchMasterMsg, TSDB_CODE_SUCCESS);
dnodeSendRpcMWriteRsp(pMsg->pBatchMasterMsg, pMsg->pBatchMasterMsg->code);
}
return TSDB_CODE_MND_ACTION_IN_PROGRESS;
@ -1908,7 +1909,8 @@ static int32_t mnodeDoCreateChildTableCb(SMnodeMsg *pMsg, int32_t code) {
sdbDeleteRow(&desc);
if (pMsg->pBatchMasterMsg) {
++pMsg->pBatchMasterMsg->successed;
++pMsg->pBatchMasterMsg->received;
pMsg->pBatchMasterMsg->code = code;
if (pMsg->pBatchMasterMsg->successed + pMsg->pBatchMasterMsg->received
>= pMsg->pBatchMasterMsg->expected) {
dnodeSendRpcMWriteRsp(pMsg->pBatchMasterMsg, code);
@ -2690,6 +2692,7 @@ static void mnodeProcessCreateChildTableRsp(SRpcMsg *rpcMsg) {
if (pMsg->pBatchMasterMsg) {
++pMsg->pBatchMasterMsg->received;
pMsg->pBatchMasterMsg->code = code;
if (pMsg->pBatchMasterMsg->successed + pMsg->pBatchMasterMsg->received
>= pMsg->pBatchMasterMsg->expected) {
dnodeSendRpcMWriteRsp(pMsg->pBatchMasterMsg, code);
@ -2728,6 +2731,7 @@ static void mnodeProcessCreateChildTableRsp(SRpcMsg *rpcMsg) {
if (pMsg->pBatchMasterMsg) {
++pMsg->pBatchMasterMsg->received;
pMsg->pBatchMasterMsg->code = rpcMsg->code;
if (pMsg->pBatchMasterMsg->successed + pMsg->pBatchMasterMsg->received
>= pMsg->pBatchMasterMsg->expected) {
dnodeSendRpcMWriteRsp(pMsg->pBatchMasterMsg, rpcMsg->code);

View File

@ -537,6 +537,7 @@ static int32_t mnodeCreateVgroupCb(SMnodeMsg *pMsg, int32_t code) {
if (pMsg->pBatchMasterMsg) {
++pMsg->pBatchMasterMsg->received;
pMsg->pBatchMasterMsg->code = pMsg->code;
if (pMsg->pBatchMasterMsg->successed + pMsg->pBatchMasterMsg->received
>= pMsg->pBatchMasterMsg->expected) {
dnodeSendRpcMWriteRsp(pMsg->pBatchMasterMsg, pMsg->code);
@ -1002,6 +1003,7 @@ static void mnodeProcessCreateVnodeRsp(SRpcMsg *rpcMsg) {
if (mnodeMsg->pBatchMasterMsg) {
++mnodeMsg->pBatchMasterMsg->received;
mnodeMsg->pBatchMasterMsg->code = code;
if (mnodeMsg->pBatchMasterMsg->successed + mnodeMsg->pBatchMasterMsg->received
>= mnodeMsg->pBatchMasterMsg->expected) {
dnodeSendRpcMWriteRsp(mnodeMsg->pBatchMasterMsg, code);
@ -1024,6 +1026,7 @@ static void mnodeProcessCreateVnodeRsp(SRpcMsg *rpcMsg) {
if (mnodeMsg->pBatchMasterMsg) {
++mnodeMsg->pBatchMasterMsg->received;
mnodeMsg->pBatchMasterMsg->code = mnodeMsg->code;
if (mnodeMsg->pBatchMasterMsg->successed + mnodeMsg->pBatchMasterMsg->received
>= mnodeMsg->pBatchMasterMsg->expected) {
dnodeSendRpcMWriteRsp(mnodeMsg->pBatchMasterMsg, mnodeMsg->code);

View File

@ -83,6 +83,20 @@ extern "C" {
} \
} while (0)
#define DEFAULT_DOUBLE_COMP(x, y) \
do { \
if (isnan(x) && isnan(y)) { return 0; } \
if (isnan(x)) { return -1; } \
if (isnan(y)) { return 1; } \
if ((x) == (y)) { \
return 0; \
} else { \
return (x) < (y) ? -1 : 1; \
} \
} while (0)
#define DEFAULT_FLOAT_COMP(x, y) DEFAULT_DOUBLE_COMP(x, y)
#define ALIGN_NUM(n, align) (((n) + ((align)-1)) & (~((align)-1)))
// align to 8bytes

View File

@ -362,20 +362,10 @@ static FORCE_INLINE int32_t columnValueAscendingComparator(char *f1, char *f2, i
return (first < second) ? -1 : 1;
};
case TSDB_DATA_TYPE_DOUBLE: {
double first = GET_DOUBLE_VAL(f1);
double second = GET_DOUBLE_VAL(f2);
if (first == second) {
return 0;
}
return (first < second) ? -1 : 1;
DEFAULT_DOUBLE_COMP(GET_DOUBLE_VAL(f1), GET_DOUBLE_VAL(f2));
};
case TSDB_DATA_TYPE_FLOAT: {
float first = GET_FLOAT_VAL(f1);
float second = GET_FLOAT_VAL(f2);
if (first == second) {
return 0;
}
return (first < second) ? -1 : 1;
DEFAULT_FLOAT_COMP(GET_FLOAT_VAL(f1), GET_FLOAT_VAL(f2));
};
case TSDB_DATA_TYPE_BIGINT: {
int64_t first = *(int64_t *)f1;

View File

@ -58,6 +58,15 @@ SSqlInfo qSQLParse(const char *pStr) {
sqlInfo.valid = false;
goto abort_parse;
}
case TK_HEX:
case TK_OCT:
case TK_BIN:{
snprintf(sqlInfo.msg, tListLen(sqlInfo.msg), "unsupported token: \"%s\"", t0.z);
sqlInfo.valid = false;
goto abort_parse;
}
default:
Parse(pParser, t0.type, t0, &sqlInfo);
if (sqlInfo.valid == false) {

View File

@ -1281,7 +1281,7 @@ static void rpcSendReqToServer(SRpcInfo *pRpc, SRpcReqContext *pContext) {
SRpcConn *pConn = rpcSetupConnToServer(pContext);
if (pConn == NULL) {
pContext->code = terrno;
taosTmrStart(rpcProcessConnError, 0, pContext, pRpc->tmrCtrl);
taosTmrStart(rpcProcessConnError, 1, pContext, pRpc->tmrCtrl);
return;
}

View File

@ -392,8 +392,8 @@ __compar_fn_t getKeyComparFunc(int32_t keyType) {
int32_t doCompare(const char* f1, const char* f2, int32_t type, size_t size) {
switch (type) {
case TSDB_DATA_TYPE_INT: DEFAULT_COMP(GET_INT32_VAL(f1), GET_INT32_VAL(f2));
case TSDB_DATA_TYPE_DOUBLE: DEFAULT_COMP(GET_DOUBLE_VAL(f1), GET_DOUBLE_VAL(f2));
case TSDB_DATA_TYPE_FLOAT: DEFAULT_COMP(GET_FLOAT_VAL(f1), GET_FLOAT_VAL(f2));
case TSDB_DATA_TYPE_DOUBLE: DEFAULT_DOUBLE_COMP(GET_DOUBLE_VAL(f1), GET_DOUBLE_VAL(f2));
case TSDB_DATA_TYPE_FLOAT: DEFAULT_FLOAT_COMP(GET_FLOAT_VAL(f1), GET_FLOAT_VAL(f2));
case TSDB_DATA_TYPE_BIGINT: DEFAULT_COMP(GET_INT64_VAL(f1), GET_INT64_VAL(f2));
case TSDB_DATA_TYPE_SMALLINT: DEFAULT_COMP(GET_INT16_VAL(f1), GET_INT16_VAL(f2));
case TSDB_DATA_TYPE_TINYINT:

6
tests/Jenkinsfile vendored
View File

@ -55,9 +55,15 @@ pipeline {
sh '''
cd ${WKC}/tests
./test-all.sh b1
date'''
sh '''
cd ${WKC}/tests
./test-all.sh full jdbc
date'''
sh '''
cd ${WKC}/tests
./test-all.sh full unit
date'''
}
}

View File

@ -63,7 +63,9 @@
<dependency>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>2.0.18</version>
<version>2.0.20</version>
<!-- <scope>system</scope>-->
<!-- <systemPath>${project.basedir}/src/main/resources/taos-jdbcdriver-2.0.20-dist.jar</systemPath>-->
</dependency>
<dependency>

View File

@ -10,4 +10,4 @@ public class SpringbootdemoApplication {
public static void main(String[] args) {
SpringApplication.run(SpringbootdemoApplication.class, args);
}
}
}

View File

@ -6,6 +6,7 @@ import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.*;
import java.util.List;
import java.util.Map;
@RequestMapping("/weather")
@RestController
@ -20,7 +21,7 @@ public class WeatherController {
* @return
*/
@GetMapping("/init")
public boolean init() {
public int init() {
return weatherService.init();
}
@ -44,19 +45,23 @@ public class WeatherController {
* @return
*/
@PostMapping("/{temperature}/{humidity}")
public int saveWeather(@PathVariable int temperature, @PathVariable float humidity) {
public int saveWeather(@PathVariable float temperature, @PathVariable int humidity) {
return weatherService.save(temperature, humidity);
}
/**
* upload multi weather info
*
* @param weatherList
* @return
*/
@PostMapping("/batch")
public int batchSaveWeather(@RequestBody List<Weather> weatherList) {
return weatherService.save(weatherList);
@GetMapping("/count")
public int count() {
return weatherService.count();
}
@GetMapping("/subTables")
public List<String> getSubTables() {
return weatherService.getSubTables();
}
@GetMapping("/avg")
public List<Weather> avg() {
return weatherService.avg();
}
}

View File

@ -4,16 +4,26 @@ import com.taosdata.example.springbootdemo.domain.Weather;
import org.apache.ibatis.annotations.Param;
import java.util.List;
import java.util.Map;
public interface WeatherMapper {
int insert(Weather weather);
int batchInsert(List<Weather> weatherList);
List<Weather> select(@Param("limit") Long limit, @Param("offset")Long offset);
void dropDB();
void createDB();
void createTable();
void createSuperTable();
void createTable(Weather weather);
List<Weather> select(@Param("limit") Long limit, @Param("offset") Long offset);
int insert(Weather weather);
int count();
List<String> getSubTables();
List<Weather> avg();
}

View File

@ -4,28 +4,29 @@
<mapper namespace="com.taosdata.example.springbootdemo.dao.WeatherMapper">
<resultMap id="BaseResultMap" type="com.taosdata.example.springbootdemo.domain.Weather">
<id column="ts" jdbcType="TIMESTAMP" property="ts" />
<result column="temperature" jdbcType="INTEGER" property="temperature" />
<result column="humidity" jdbcType="FLOAT" property="humidity" />
<id column="ts" jdbcType="TIMESTAMP" property="ts"/>
<result column="temperature" jdbcType="FLOAT" property="temperature"/>
<result column="humidity" jdbcType="FLOAT" property="humidity"/>
</resultMap>
<update id="createDB" >
create database if not exists test;
<update id="dropDB">
drop database if exists test
</update>
<update id="createTable" >
create table if not exists test.weather(ts timestamp, temperature int, humidity float);
<update id="createDB">
create database if not exists test
</update>
<sql id="Base_Column_List">
ts, temperature, humidity
</sql>
<update id="createSuperTable">
create table if not exists test.weather(ts timestamp, temperature float, humidity float) tags(location nchar(64), groupId int)
</update>
<update id="createTable" parameterType="com.taosdata.example.springbootdemo.domain.Weather">
create table if not exists test.t#{groupId} using test.weather tags(#{location}, #{groupId})
</update>
<select id="select" resultMap="BaseResultMap">
select
<include refid="Base_Column_List" />
from test.weather
order by ts desc
select * from test.weather order by ts desc
<if test="limit != null">
limit #{limit,jdbcType=BIGINT}
</if>
@ -34,16 +35,26 @@
</if>
</select>
<insert id="insert" parameterType="com.taosdata.example.springbootdemo.domain.Weather" >
insert into test.weather (ts, temperature, humidity) values (now, #{temperature,jdbcType=INTEGER}, #{humidity,jdbcType=FLOAT})
<insert id="insert" parameterType="com.taosdata.example.springbootdemo.domain.Weather">
insert into test.t#{groupId} (ts, temperature, humidity) values (#{ts}, ${temperature}, ${humidity})
</insert>
<insert id="batchInsert" parameterType="java.util.List" >
insert into test.weather (ts, temperature, humidity) values
<foreach separator=" " collection="list" item="weather" index="index" >
(now + #{index}a, #{weather.temperature}, #{weather.humidity})
</foreach>
</insert>
<select id="getSubTables" resultType="String">
select tbname from test.weather
</select>
<select id="count" resultType="int">
select count(*) from test.weather
</select>
<resultMap id="avgResultSet" type="com.taosdata.example.springbootdemo.domain.Weather">
<id column="ts" jdbcType="TIMESTAMP" property="ts" />
<result column="avg(temperature)" jdbcType="FLOAT" property="temperature" />
<result column="avg(humidity)" jdbcType="FLOAT" property="humidity" />
</resultMap>
<select id="avg" resultMap="avgResultSet">
select avg(temperature), avg(humidity)from test.weather interval(1m)
</select>
</mapper>

View File

@ -6,12 +6,21 @@ import java.sql.Timestamp;
public class Weather {
@JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss.SSS",timezone = "GMT+8")
@JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss.SSS", timezone = "GMT+8")
private Timestamp ts;
private int temperature;
private float temperature;
private float humidity;
private String location;
private int groupId;
public Weather() {
}
public Weather(Timestamp ts, float temperature, float humidity) {
this.ts = ts;
this.temperature = temperature;
this.humidity = humidity;
}
public Timestamp getTs() {
return ts;
@ -21,11 +30,11 @@ public class Weather {
this.ts = ts;
}
public int getTemperature() {
public float getTemperature() {
return temperature;
}
public void setTemperature(int temperature) {
public void setTemperature(float temperature) {
this.temperature = temperature;
}
@ -36,4 +45,20 @@ public class Weather {
public void setHumidity(float humidity) {
this.humidity = humidity;
}
public String getLocation() {
return location;
}
public void setLocation(String location) {
this.location = location;
}
public int getGroupId() {
return groupId;
}
public void setGroupId(int groupId) {
this.groupId = groupId;
}
}

View File

@ -5,25 +5,41 @@ import com.taosdata.example.springbootdemo.domain.Weather;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import java.sql.Timestamp;
import java.util.List;
import java.util.Map;
import java.util.Random;
@Service
public class WeatherService {
@Autowired
private WeatherMapper weatherMapper;
private Random random = new Random(System.currentTimeMillis());
private String[] locations = {"北京", "上海", "广州", "深圳", "天津"};
public boolean init() {
public int init() {
weatherMapper.dropDB();
weatherMapper.createDB();
weatherMapper.createTable();
return true;
weatherMapper.createSuperTable();
long ts = System.currentTimeMillis();
long thirtySec = 1000 * 30;
int count = 0;
for (int i = 0; i < 20; i++) {
Weather weather = new Weather(new Timestamp(ts + (thirtySec * i)), 30 * random.nextFloat(), random.nextInt(100));
weather.setLocation(locations[random.nextInt(locations.length)]);
weather.setGroupId(i % locations.length);
weatherMapper.createTable(weather);
count += weatherMapper.insert(weather);
}
return count;
}
public List<Weather> query(Long limit, Long offset) {
return weatherMapper.select(limit, offset);
}
public int save(int temperature, float humidity) {
public int save(float temperature, int humidity) {
Weather weather = new Weather();
weather.setTemperature(temperature);
weather.setHumidity(humidity);
@ -31,8 +47,15 @@ public class WeatherService {
return weatherMapper.insert(weather);
}
public int save(List<Weather> weatherList) {
return weatherMapper.batchInsert(weatherList);
public int count() {
return weatherMapper.count();
}
public List<String> getSubTables() {
return weatherMapper.getSubTables();
}
public List<Weather> avg() {
return weatherMapper.avg();
}
}

View File

@ -1,12 +1,14 @@
# datasource config - JDBC-JNI
spring.datasource.driver-class-name=com.taosdata.jdbc.TSDBDriver
spring.datasource.url=jdbc:TAOS://127.0.0.1:6030/test?timezone=UTC-8&charset=UTF-8&locale=en_US.UTF-8
spring.datasource.username=root
spring.datasource.password=taosdata
#spring.datasource.driver-class-name=com.taosdata.jdbc.TSDBDriver
#spring.datasource.url=jdbc:TAOS://127.0.0.1:6030/test?timezone=UTC-8&charset=UTF-8&locale=en_US.UTF-8
#spring.datasource.username=root
#spring.datasource.password=taosdata
# datasource config - JDBC-RESTful
#spring.datasource.driver-class-name=com.taosdata.jdbc.rs.RestfulDriver
#spring.datasource.url=jdbc:TAOS-RS://master:6041/test?user=root&password=taosdata
spring.datasource.driver-class-name=com.taosdata.jdbc.rs.RestfulDriver
spring.datasource.url=jdbc:TAOS-RS://master:6041/test?timezone=UTC-8&charset=UTF-8&locale=en_US.UTF-8
spring.datasource.username=root
spring.datasource.password=taosdata
spring.datasource.druid.initial-size=5
spring.datasource.druid.min-idle=5

View File

@ -4,7 +4,7 @@ import com.taosdata.taosdemo.components.DataSourceFactory;
import com.taosdata.taosdemo.components.JdbcTaosdemoConfig;
import com.taosdata.taosdemo.domain.SuperTableMeta;
import com.taosdata.taosdemo.service.DatabaseService;
import com.taosdata.taosdemo.service.QueryService;
import com.taosdata.taosdemo.service.SqlExecuteTask;
import com.taosdata.taosdemo.service.SubTableService;
import com.taosdata.taosdemo.service.SuperTableService;
import com.taosdata.taosdemo.service.data.SuperTableMetaGenerator;
@ -32,6 +32,17 @@ public class TaosDemoApplication {
}
// 初始化
final DataSource dataSource = DataSourceFactory.getInstance(config.host, config.port, config.user, config.password);
if (config.executeSql != null && !config.executeSql.isEmpty() && !config.executeSql.replaceAll("\\s", "").isEmpty()) {
Thread task = new Thread(new SqlExecuteTask(dataSource, config.executeSql));
task.start();
try {
task.join();
} catch (InterruptedException e) {
e.printStackTrace();
}
return;
}
final DatabaseService databaseService = new DatabaseService(dataSource);
final SuperTableService superTableService = new SuperTableService(dataSource);
final SubTableService subTableService = new SubTableService(dataSource);
@ -96,7 +107,6 @@ public class TaosDemoApplication {
// 查询
/**********************************************************************************/
// 删除表
if (config.dropTable) {

View File

@ -42,7 +42,7 @@ public final class JdbcTaosdemoConfig {
public int rate = 10;
public long range = 1000l;
// select task
public String executeSql;
// drop task
public boolean dropTable = false;
@ -89,7 +89,7 @@ public final class JdbcTaosdemoConfig {
System.out.println("-rate The proportion of data out of order. effective only if order is 1. min 0, max 100, default is 10");
System.out.println("-range The range of data out of order. effective only if order is 1. default is 1000 ms");
// query task
// System.out.println("-sqlFile The select sql file");
System.out.println("-executeSql execute a specific sql.");
// drop task
System.out.println("-dropTable Drop data before quit. Default is false");
System.out.println("--help Give this help list");
@ -207,6 +207,9 @@ public final class JdbcTaosdemoConfig {
range = Integer.parseInt(args[++i]);
}
// select task
if ("-executeSql".equals(args[i]) && i < args.length - 1) {
executeSql = args[++i];
}
// drop task
if ("-dropTable".equals(args[i]) && i < args.length - 1) {

View File

@ -0,0 +1,36 @@
package com.taosdata.taosdemo.service;
import com.taosdata.taosdemo.utils.Printer;
import javax.sql.DataSource;
import java.sql.Connection;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
public class SqlExecuteTask implements Runnable {
private final DataSource dataSource;
private final String sql;
public SqlExecuteTask(DataSource dataSource, String sql) {
this.dataSource = dataSource;
this.sql = sql;
}
@Override
public void run() {
try (Connection conn = dataSource.getConnection(); Statement stmt = conn.createStatement()) {
long start = System.currentTimeMillis();
boolean execute = stmt.execute(sql);
long end = System.currentTimeMillis();
if (execute) {
ResultSet rs = stmt.getResultSet();
Printer.printResult(rs);
} else {
Printer.printSql(sql, true, (end - start));
}
} catch (SQLException e) {
e.printStackTrace();
}
}
}

View File

@ -0,0 +1,27 @@
package com.taosdata.taosdemo.utils;
import java.sql.ResultSet;
import java.sql.ResultSetMetaData;
import java.sql.SQLException;
public class Printer {
public static void printResult(ResultSet resultSet) throws SQLException {
ResultSetMetaData metaData = resultSet.getMetaData();
while (resultSet.next()) {
for (int i = 1; i <= metaData.getColumnCount(); i++) {
String columnLabel = metaData.getColumnLabel(i);
String value = resultSet.getString(i);
System.out.printf("%s: %s\t", columnLabel, value);
}
System.out.println();
}
}
public static void printSql(String sql, boolean succeed, long cost) {
System.out.println("[ " + (succeed ? "OK" : "ERROR!") + " ] time cost: " + cost + " ms, execute statement ====> " + sql);
}
private Printer() {
}
}

View File

@ -92,15 +92,14 @@ void Test(TAOS *taos, char *qstr, int index) {
// printf("insert row: %i, reason:%s\n", i, taos_errstr(taos));
// }
TAOS_RES *result1 = taos_query(taos, qstr);
if (result1) {
printf("insert row: %i\n", i);
} else {
printf("failed to insert row: %i, reason:%s\n", i, "null result"/*taos_errstr(result)*/);
if (result1 == NULL || taos_errno(result1) != 0) {
printf("failed to insert row, reason:%s\n", taos_errstr(result1));
taos_free_result(result1);
exit(1);
} else {
printf("insert row: %i\n", i);
}
taos_free_result(result1);
}
printf("success to insert rows, total %d rows\n", i);

View File

@ -11,15 +11,9 @@
# -*- coding: utf-8 -*-
import os
import sys
sys.path.insert(0, os.getcwd())
from fabric import Connection
from util.sql import *
from util.log import *
import taos
import random
import threading
import time
import logging
class Node:
@ -76,6 +70,19 @@ class Node:
print("remove taosd error for node %d " % self.index)
logging.exception(e)
def forceStopOneTaosd(self):
try:
self.conn.run("kill -9 $(ps -ax|grep taosd|awk '{print $1}')")
except Exception as e:
print("kill taosd error on node%d " % self.index)
def startOneTaosd(self):
try:
self.conn.run("nohup taosd -c /etc/taos/ > /dev/null 2>&1 &")
except Exception as e:
print("start taosd error on node%d " % self.index)
logging.exception(e)
def installTaosd(self, packagePath):
self.conn.put(packagePath, self.homeDir)
self.conn.cd(self.homeDir)
@ -122,100 +129,51 @@ class Node:
class Nodes:
def __init__(self):
self.node1 = Node(1, 'root', '52.151.60.239', 'node1', 'r', '/root/')
self.node2 = Node(2, 'root', '52.183.32.246', 'node1', 'r', '/root/')
self.node3 = Node(3, 'root', '51.143.46.79', 'node1', 'r', '/root/')
self.node4 = Node(4, 'root', '52.183.2.76', 'node1', 'r', '/root/')
self.node5 = Node(5, 'root', '13.66.225.87', 'node1', 'r', '/root/')
self.tdnodes = []
self.tdnodes.append(Node(0, 'root', '52.143.103.7', 'node1', 'a', '/root/'))
self.tdnodes.append(Node(1, 'root', '52.250.48.222', 'node2', 'a', '/root/'))
self.tdnodes.append(Node(2, 'root', '51.141.167.23', 'node3', 'a', '/root/'))
self.tdnodes.append(Node(3, 'root', '52.247.207.173', 'node4', 'a', '/root/'))
self.tdnodes.append(Node(4, 'root', '51.141.166.100', 'node5', 'a', '/root/'))
def stopOneNode(self, index):
self.tdnodes[index].forceStopOneTaosd()
def startOneNode(self, index):
self.tdnodes[index].startOneTaosd()
def stopAllTaosd(self):
self.node1.stopTaosd()
self.node2.stopTaosd()
self.node3.stopTaosd()
for i in range(len(self.tdnodes)):
self.tdnodes[i].stopTaosd()
def startAllTaosd(self):
self.node1.startTaosd()
self.node2.startTaosd()
self.node3.startTaosd()
for i in range(len(self.tdnodes)):
self.tdnodes[i].startTaosd()
def restartAllTaosd(self):
self.node1.restartTaosd()
self.node2.restartTaosd()
self.node3.restartTaosd()
for i in range(len(self.tdnodes)):
self.tdnodes[i].restartTaosd()
def addConfigs(self, configKey, configValue):
self.node1.configTaosd(configKey, configValue)
self.node2.configTaosd(configKey, configValue)
self.node3.configTaosd(configKey, configValue)
for i in range(len(self.tdnodes)):
self.tdnodes[i].configTaosd(configKey, configValue)
def removeConfigs(self, configKey, configValue):
self.node1.removeTaosConfig(configKey, configValue)
self.node2.removeTaosConfig(configKey, configValue)
self.node3.removeTaosConfig(configKey, configValue)
def removeConfigs(self, configKey, configValue):
for i in range(len(self.tdnodes)):
self.tdnodes[i].removeTaosConfig(configKey, configValue)
def removeAllDataFiles(self):
self.node1.removeData()
self.node2.removeData()
self.node3.removeData()
for i in range(len(self.tdnodes)):
self.tdnodes[i].removeData()
class ClusterTest:
def __init__(self, hostName):
self.host = hostName
self.user = "root"
self.password = "taosdata"
self.config = "/etc/taos"
self.dbName = "mytest"
self.stbName = "meters"
self.numberOfThreads = 20
self.numberOfTables = 10000
self.numberOfRecords = 1000
self.tbPrefix = "t"
self.ts = 1538548685000
self.repeat = 1
def connectDB(self):
self.conn = taos.connect(
host=self.host,
user=self.user,
password=self.password,
config=self.config)
def createSTable(self, replica):
cursor = self.conn.cursor()
tdLog.info("drop database if exists %s" % self.dbName)
cursor.execute("drop database if exists %s" % self.dbName)
tdLog.info("create database %s replica %d" % (self.dbName, replica))
cursor.execute("create database %s replica %d" % (self.dbName, replica))
tdLog.info("use %s" % self.dbName)
cursor.execute("use %s" % self.dbName)
tdLog.info("drop table if exists %s" % self.stbName)
cursor.execute("drop table if exists %s" % self.stbName)
tdLog.info("create table %s(ts timestamp, current float, voltage int, phase int) tags(id int)" % self.stbName)
cursor.execute("create table %s(ts timestamp, current float, voltage int, phase int) tags(id int)" % self.stbName)
cursor.close()
def insertData(self, threadID):
print("Thread %d: starting" % threadID)
cursor = self.conn.cursor()
tablesPerThread = int(self.numberOfTables / self.numberOfThreads)
baseTableID = tablesPerThread * threadID
for i in range (tablesPerThread):
cursor.execute("create table %s%d using %s tags(%d)" % (self.tbPrefix, baseTableID + i, self.stbName, baseTableID + i))
query = "insert into %s%d values" % (self.tbPrefix, baseTableID + i)
base = self.numberOfRecords * i
for j in range(self.numberOfRecords):
query += "(%d, %f, %d, %d)" % (self.ts + base + j, random.random(), random.randint(210, 230), random.randint(0, 10))
cursor.execute(query)
cursor.close()
print("Thread %d: finishing" % threadID)
def run(self):
threads = []
tdLog.info("Inserting data")
for i in range(self.numberOfThreads):
thread = threading.Thread(target=self.insertData, args=(i,))
threads.append(thread)
thread.start()
for i in range(self.numberOfThreads):
threads[i].join()
# kill taosd randomly every 10 mins
nodes = Nodes()
loop = 0
while True:
loop = loop + 1
index = random.randint(0, 4)
print("loop: %d, kill taosd on node%d" %(loop, index))
nodes.stopOneNode(index)
time.sleep(60)
nodes.startOneNode(index)
time.sleep(600)

View File

@ -69,15 +69,37 @@ class TDTestCase:
tdSql.checkData(10, 0, None)
# test for tarithoperator.c coverage
col_list = [ 'col1' , 'col2' , 'col3' , 'col4' , 'col5' , 'col6' , 'col11' , 'col12' , 'col13' , 'col14' , '1' ]
tdSql.execute("insert into test1 values(1537146000010,1,NULL,9,8,1.2,1.3,0,1,1,5,4,3,2)")
tdSql.execute("insert into test1 values(1537146000011,2,1,NULL,9,1.2,1.3,1,2,2,6,5,4,3)")
tdSql.execute("insert into test1 values(1537146000012,3,2,1,NULL,1.2,1.3,0,3,3,7,6,5,4)")
tdSql.execute("insert into test1 values(1537146000013,4,3,2,1,1.2,1.3,1,4,4,8,7,6,5)")
tdSql.execute("insert into test1 values(1537146000014,5,4,3,2,1.2,1.3,0,5,5,9,8,7,6)")
tdSql.execute("insert into test1 values(1537146000015,6,5,4,3,1.2,1.3,1,6,6,NULL,9,8,7)")
tdSql.execute("insert into test1 values(1537146000016,7,6,5,4,1.2,1.3,0,7,7,1,NULL,9,8)")
tdSql.execute("insert into test1 values(1537146000017,8,7,6,5,1.2,1.3,1,8,8,2,1,NULL,9)")
tdSql.execute("insert into test1 values(1537146000018,9,8,7,6,1.2,1.3,0,9,9,3,2,1,NULL)")
tdSql.execute("insert into test1 values(1537146000019,NULL,9,8,7,1.2,1.3,1,10,10,4,3,2,1)")
self.ts = self.ts + self.rowNum + 10
tdSql.execute("insert into test1 values(%d, 1, 1, 1, 1, 1.1, 1.1, 1, NULL, '涛思数据3', 1, 1, 1, 1)" % ( self.ts + self.rowNum + 1 ))
tdSql.execute("insert into test1 values(%d, 1, 1, 1, 1, 1.1, 1.1, 1, 'taosdata', NULL, 1, 1, 1, 1)" % ( self.ts + self.rowNum + 2 ))
tdSql.execute("insert into test1 values(%d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL)" % ( self.ts + self.rowNum + 3 ))
tdSql.execute("insert into test1 values(%d, 1, 1, 1, 1, NULL, 1.1, 1, NULL, '涛思数据3', 1, 1, 1, 1)" % ( self.ts + self.rowNum + 4 ))
tdSql.execute("insert into test1 values(%d, 1, 1, 1, 1, 1.1, NULL, 1, 'taosdata', NULL, 1, 1, 1, 1)" % ( self.ts + self.rowNum + 5 ))
self.rowNum = self.rowNum + 5
col_list = [ 'col1' , 'col2' , 'col3' , 'col4' , 'col5' , 'col6' , 'col7' , 'col8' , 'col9' , 'col11' , 'col12' , 'col13' , 'col14' , '1' , '1.1' , 'NULL' ]
op_list = [ '+' , '-' , '*' , '/' , '%' ]
err_list = [ 'col7' , 'col8' , 'col9' , 'NULL' ]
for i in col_list :
for j in col_list :
for k in op_list :
sql = " select %s %s %s from test1 " % ( i , k , j )
print(sql)
tdSql.query(sql)
if i in err_list or j in err_list:
tdSql.error(sql)
else:
tdSql.query(sql)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)

View File

@ -41,24 +41,24 @@ class TDTestCase:
tdSql.error("select col1 + col9 from test1")
tdSql.query("select col1 + col2 from test1")
tdSql.checkRows(11)
tdSql.checkRows(25)
tdSql.checkData(0, 0, 2.0)
tdSql.query("select col1 + col2 * col3 + col3 / col4 + col5 + col6 + col11 + col12 + col13 + col14 from test1")
tdSql.checkRows(11)
tdSql.checkRows(25)
tdSql.checkData(0, 0, 7.2)
#tdSql.execute("insert into test1(ts, col1) values(%d, 11)" % (self.ts + 11))
tdSql.query("select col1 + col2 from test1")
tdSql.checkRows(11)
tdSql.checkRows(25)
tdSql.checkData(10, 0, None)
tdSql.query("select col1 + col2 * col3 from test1")
tdSql.checkRows(11)
tdSql.checkRows(25)
tdSql.checkData(10, 0, None)
tdSql.query("select col1 + col2 * col3 + col3 / col4 + col5 + col6 + col11 + col12 + col13 + col14 from test1")
tdSql.checkRows(11)
tdSql.checkRows(25)
tdSql.checkData(10, 0, None)

View File

@ -52,8 +52,8 @@ class TDTestCase:
p.start()
p.join()
p.terminate()
tdSql.execute("insert into tb values(%d, 1, 2)" % (self.ts + 1))
tdSql.execute("insert into tb(ts, col1, col2) values(%d, 1, 2)" % (self.ts + 2))
print("==============step2")
tdSql.query("select * from tb")

View File

@ -28,12 +28,13 @@ class TDTestCase:
def run(self):
tdSql.prepare()
tdSql.execute("CREATE TABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int)")
tdSql.execute("CREATE TABLE D1001 USING meters TAGS ('Beijing.Chaoyang', 2)")
tdSql.execute("CREATE TABLE D1002 USING meters TAGS ('Beijing.Chaoyang', 3)")
tdSql.execute("CREATE TABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int, t3 float, t4 double)")
tdSql.execute("CREATE TABLE D1001 USING meters TAGS ('Beijing.Chaoyang', 2 , NULL, NULL)")
tdSql.execute("CREATE TABLE D1002 USING meters TAGS ('Beijing.Chaoyang', 3 , NULL , 1.7)")
tdSql.execute("CREATE TABLE D1003 USING meters TAGS ('Beijing.Chaoyang', 3 , 1.1 , 1.7)")
tdSql.execute("INSERT INTO D1001 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6, 218, 0.33) (1538548696800, 12.3, 221, 0.31)")
tdSql.execute("INSERT INTO D1002 VALUES (1538548685001, 10.5, 220, 0.28) (1538548696800, 12.3, 221, 0.31)")
tdSql.execute("INSERT INTO D1003 VALUES (1538548685001, 10.5, 220, 0.28) (1538548696800, 12.3, 221, 0.31)")
tdSql.query("SELECT SUM(current), AVG(voltage) FROM meters WHERE groupId > 1 INTERVAL(1s) GROUP BY location order by ts DESC")
tdSql.checkRows(3)
tdSql.checkData(0, 0, "2018-10-03 14:38:16")
@ -49,6 +50,12 @@ class TDTestCase:
tdSql.error("SELECT SUM(current) as s, AVG(voltage) FROM meters WHERE groupId > 1 INTERVAL(1s) GROUP BY location order by s ASC")
tdSql.error("SELECT SUM(current) as s, AVG(voltage) FROM meters WHERE groupId > 1 INTERVAL(1s) GROUP BY location order by s DESC")
#add for TD-3170
tdSql.query("select avg(current) from meters group by t3;")
tdSql.checkData(0, 0, 11.6)
tdSql.query("select avg(current) from meters group by t4;")
tdSql.query("select avg(current) from meters group by t3,t4;")
def stop(self):
tdSql.close()

View File

@ -24,7 +24,7 @@ class TDTestCase:
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self.numberOfTables = 10000
self.numberOfTables = 1000
self.numberOfRecords = 100
def getBuildPath(self):

View File

@ -160,9 +160,10 @@ function runPyCaseOneByOnefq {
totalFailed=0
totalPyFailed=0
totalJDBCFailed=0
totalUnitFailed=0
corepath=`grep -oP '.*(?=core_)' /proc/sys/kernel/core_pattern||grep -oP '.*(?=core-)' /proc/sys/kernel/core_pattern`
if [ "$2" != "jdbc" ] && [ "$2" != "python" ]; then
if [ "$2" != "jdbc" ] && [ "$2" != "python" ] && [ "$2" != "unit" ]; then
echo "### run TSIM test case ###"
cd $tests_dir/script
@ -231,7 +232,7 @@ if [ "$2" != "jdbc" ] && [ "$2" != "python" ]; then
fi
fi
if [ "$2" != "sim" ] && [ "$2" != "jdbc" ] ; then
if [ "$2" != "sim" ] && [ "$2" != "jdbc" ] && [ "$2" != "unit" ]; then
echo "### run Python test case ###"
cd $tests_dir
@ -300,8 +301,8 @@ if [ "$2" != "sim" ] && [ "$2" != "jdbc" ] ; then
fi
if [ "$2" != "sim" ] && [ "$2" != "python" ] && [ "$1" == "full" ]; then
echo "### run JDBC test case ###"
if [ "$2" != "sim" ] && [ "$2" != "python" ] && [ "$2" != "unit" ] && [ "$1" == "full" ]; then
echo "### run JDBC test cases ###"
cd $tests_dir
@ -318,7 +319,7 @@ if [ "$2" != "sim" ] && [ "$2" != "python" ] && [ "$1" == "full" ]; then
nohup build/bin/taosd -c /etc/taos/ > /dev/null 2>&1 &
sleep 30
cd $tests_dir/../src/connector/jdbc
cd $tests_dir/../src/connector/jdbc
mvn test > jdbc-out.log 2>&1
tail -n 20 jdbc-out.log
@ -343,4 +344,40 @@ if [ "$2" != "sim" ] && [ "$2" != "python" ] && [ "$1" == "full" ]; then
dohavecore 1
fi
exit $(($totalFailed + $totalPyFailed + $totalJDBCFailed))
if [ "$2" != "sim" ] && [ "$2" != "python" ] && [ "$2" != "jdbc" ] && [ "$1" == "full" ]; then
echo "### run Unit tests ###"
stopTaosd
cd $tests_dir
if [[ "$tests_dir" == *"$IN_TDINTERNAL"* ]]; then
cd ../../
else
cd ../
fi
pwd
cd debug/build/bin
nohup ./taosd -c /etc/taos/ > /dev/null 2>&1 &
sleep 30
pwd
./queryTest > unittest-out.log 2>&1
tail -n 20 unittest-out.log
totalUnitTests=`grep "Running" unittest-out.log | awk '{print $3}'`
totalUnitSuccess=`grep 'PASSED' unittest-out.log | awk '{print $4}'`
totalUnitFailed=`expr $totalUnitTests - $totalUnitSuccess`
if [ "$totalUnitSuccess" -gt "0" ]; then
echo -e "\n${GREEN} ### Total $totalUnitSuccess Unit test succeed! ### ${NC}"
fi
if [ "$totalUnitFailed" -ne "0" ]; then
echo -e "\n${RED} ### Total $totalUnitFailed Unit test failed! ### ${NC}"
fi
dohavecore 1
fi
exit $(($totalFailed + $totalPyFailed + $totalJDBCFailed + $totalUnitFailed))