[td-225]merge develop.

This commit is contained in:
Haojun Liao 2021-07-16 10:20:42 +08:00
commit dbaaee3365
42 changed files with 932 additions and 257 deletions

View File

@ -32,7 +32,7 @@ ELSEIF (TD_WINDOWS)
#INSTALL(TARGETS taos RUNTIME DESTINATION driver)
#INSTALL(TARGETS shell RUNTIME DESTINATION .)
IF (TD_MVN_INSTALLED)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.32-dist.jar DESTINATION connector/jdbc)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-*-dist.jar DESTINATION connector/jdbc)
ENDIF ()
ELSEIF (TD_DARWIN)
SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh")

View File

@ -13,7 +13,10 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <tscompression.h>
#include "os.h"
#include "qPlan.h"
#include "qTableMeta.h"
#include "tcmdtype.h"
#include "tlockfree.h"
#include "trpc.h"
@ -21,10 +24,8 @@
#include "tscLog.h"
#include "tscProfile.h"
#include "tscUtil.h"
#include "qTableMeta.h"
#include "tsclient.h"
#include "ttimer.h"
#include "qPlan.h"
int (*tscBuildMsg[TSDB_SQL_MAX])(SSqlObj *pSql, SSqlInfo *pInfo) = {0};
@ -2182,16 +2183,27 @@ int tscProcessMultiTableMetaRsp(SSqlObj *pSql) {
}
SSqlCmd *pParentCmd = &pParentSql->cmd;
SHashObj *pSet = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
char* pMsg = pMultiMeta->meta;
char* buf = NULL;
if (pMultiMeta->compressed) {
buf = malloc(pMultiMeta->rawLen - sizeof(SMultiTableMeta));
int32_t len = tsDecompressString(pMultiMeta->meta, pMultiMeta->contLen - sizeof(SMultiTableMeta), 1,
buf, pMultiMeta->rawLen - sizeof(SMultiTableMeta), ONE_STAGE_COMP, NULL, 0);
assert(len == pMultiMeta->rawLen - sizeof(SMultiTableMeta));
pMsg = buf;
}
for (int32_t i = 0; i < pMultiMeta->numOfTables; i++) {
STableMetaMsg *pMetaMsg = (STableMetaMsg *)pMsg;
int32_t code = tableMetaMsgConvert(pMetaMsg);
if (code != TSDB_CODE_SUCCESS) {
taosHashCleanup(pSet);
taosReleaseRef(tscObjRef, pParentSql->self);
tfree(buf);
return code;
}
@ -2200,6 +2212,8 @@ int tscProcessMultiTableMetaRsp(SSqlObj *pSql) {
tscError("0x%"PRIx64" invalid table meta from mnode, name:%s", pSql->self, pMetaMsg->tableFname);
taosHashCleanup(pSet);
taosReleaseRef(tscObjRef, pParentSql->self);
tfree(buf);
return TSDB_CODE_TSC_INVALID_VALUE;
}
@ -2280,6 +2294,8 @@ int tscProcessMultiTableMetaRsp(SSqlObj *pSql) {
taosHashCleanup(pSet);
taosReleaseRef(tscObjRef, pParentSql->self);
tfree(buf);
return TSDB_CODE_SUCCESS;
}

View File

@ -948,8 +948,6 @@ int taos_load_table_info(TAOS *taos, const char *tableNameList) {
SSqlObj* pSql = calloc(1, sizeof(SSqlObj));
pSql->pTscObj = taos;
pSql->signature = pSql;
pSql->fp = NULL; // todo set the correct callback function pointer
pSql->cmd.pTableMetaMap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
int32_t length = (int32_t)strlen(tableNameList);

View File

@ -8,7 +8,7 @@ IF (TD_MVN_INSTALLED)
ADD_CUSTOM_COMMAND(OUTPUT ${JDBC_CMD_NAME}
POST_BUILD
COMMAND mvn -Dmaven.test.skip=true install -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.32-dist.jar ${LIBRARY_OUTPUT_PATH}
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-*-dist.jar ${LIBRARY_OUTPUT_PATH}
COMMAND mvn -Dmaven.test.skip=true clean -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
COMMENT "build jdbc driver")
ADD_CUSTOM_TARGET(${JDBC_TARGET_NAME} ALL WORKING_DIRECTORY ${EXECUTABLE_OUTPUT_PATH} DEPENDS ${JDBC_CMD_NAME})

View File

@ -3,7 +3,7 @@
<modelVersion>4.0.0</modelVersion>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>2.0.32</version>
<version>2.0.33</version>
<packaging>jar</packaging>
<name>JDBCDriver</name>
<url>https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc</url>
@ -40,7 +40,7 @@
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.13</version>
<version>4.13.1</version>
<scope>test</scope>
</dependency>
<!-- for restful -->
@ -57,7 +57,7 @@
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
<version>29.0-jre</version>
<version>30.0-jre</version>
</dependency>
</dependencies>

View File

@ -1,54 +1,62 @@
# Java Connector
## TAOS-JDBCDriver 概述
TDengine 提供了遵循 JDBC 标准3.0API 规范的 `taos-jdbcdriver` 实现,可在 maven 的中央仓库 [Sonatype Repository][1] 搜索下载。
TDengine 为了方便 Java 应用使用,提供了遵循 JDBC 标准(3.0)API 规范的 `taos-jdbcdriver` 实现。目前可以通过 [Sonatype Repository][1] 搜索并下载
`taos-jdbcdriver` 的实现包括 2 种形式: JDBC-JNI 和 JDBC-RESTfultaos-jdbcdriver-2.0.18 开始支持 JDBC-RESTful。 JDBC-JNI 通过调用客户端 libtaos.so或 taos.dll )的本地方法实现, JDBC-RESTful 则在内部封装了 RESTful 接口实现
由于 TDengine 是使用 c 语言开发的,使用 taos-jdbcdriver 驱动包时需要依赖系统对应的本地函数库。
![tdengine-connector](https://www.taosdata.com/cn/documentation/user/pages/images/tdengine-jdbc-connector.png)
* libtaos.so
在 linux 系统中成功安装 TDengine 后,依赖的本地函数库 libtaos.so 文件会被自动拷贝至 /usr/lib/libtaos.so该目录包含在 Linux 自动扫描路径上,无需单独指定。
上图显示了 3 种 Java 应用使用连接器访问 TDengine 的方式:
* taos.dll
在 windows 系统中安装完客户端之后,驱动包依赖的 taos.dll 文件会自动拷贝到系统默认搜索路径 C:/Windows/System32 下,同样无需要单独指定。
* JDBC-JNIJava 应用在物理节点1pnode1上使用 JDBC-JNI 的 API ,直接调用客户端 APIlibtaos.so 或 taos.dll将写入和查询请求发送到位于物理节点2pnode2上的 taosd 实例。
* RESTful应用将 SQL 发送给位于物理节点2pnode2上的 RESTful 连接器,再调用客户端 APIlibtaos.so
* JDBC-RESTfulJava 应用通过 JDBC-RESTful 的 API ,将 SQL 封装成一个 RESTful 请求发送给物理节点2的 RESTful 连接器。
> 注意:在 windows 环境开发时需要安装 TDengine 对应的 windows 版本客户端,由于目前没有提供 Linux 环境单独的客户端,需要安装 TDengine 才能使用。
TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致,但时序空间数据库与关系对象型数据库服务的对象和技术特征存在差异,导致 `taos-jdbcdriver` 与传统的 JDBC driver 也存在一定差异。在使用时需要注意以下几点:
TDengine 的 JDBC 驱动实现尽可能的与关系型数据库驱动保持一致,但时序空间数据库与关系对象型数据库服务的对象和技术特征的差异导致 taos-jdbcdriver 并未完全实现 JDBC 标准规范。在使用时需要注意以下几点:
* TDengine 不提供针对单条数据记录的删除和修改的操作,驱动中也没有支持相关方法。
* 由于不支持删除和修改,所以也不支持事务操作。
* 目前不支持表间的 union 操作。
* 目前不支持嵌套查询(nested query)`对每个 Connection 的实例,至多只能有一个打开的 ResultSet 实例;如果在 ResultSet还没关闭的情况下执行了新的查询TSDBJDBCDriver 则会自动关闭上一个 ResultSet`。
* TDengine 目前不支持针对单条数据记录的删除操作。
* 目前不支持事务操作。
* 目前不支持嵌套查询nested query
* 对每个 Connection 的实例,至多只能有一个打开的 ResultSet 实例;如果在 ResultSet 还没关闭的情况下执行了新的查询taos-jdbcdriver 会自动关闭上一个 ResultSet。
## TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本
## JDBC-JNI和JDBC-RESTful的对比
| taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 |
| --- | --- | --- |
| 1.0.3 | 1.6.1.x 及以上 | 1.8.x |
| 1.0.2 | 1.6.1.x 及以上 | 1.8.x |
| 1.0.1 | 1.6.1.x 及以上 | 1.8.x |
<table >
<tr align="center"><th>对比项</th><th>JDBC-JNI</th><th>JDBC-RESTful</th></tr>
<tr align="center">
<td>支持的操作系统</td>
<td>linux、windows</td>
<td>全平台</td>
</tr>
<tr align="center">
<td>是否需要安装 client</td>
<td>需要</td>
<td>不需要</td>
</tr>
<tr align="center">
<td>server 升级后是否需要升级 client</td>
<td>需要</td>
<td>不需要</td>
</tr>
<tr align="center">
<td>写入性能</td>
<td colspan="2">JDBC-RESTful 是 JDBC-JNI 的 50%90% </td>
</tr>
<tr align="center">
<td>查询性能</td>
<td colspan="2">JDBC-RESTful 与 JDBC-JNI 没有差别</td>
</tr>
</table>
## TDengine DataType 和 Java DataType
注意:与 JNI 方式不同RESTful 接口是无状态的,因此 `USE db_name` 指令没有效果RESTful 下所有对表名、超级表名的引用都需要指定数据库名前缀。
TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对应类型转换如下:
| TDengine DataType | Java DataType |
| --- | --- |
| TIMESTAMP | java.sql.Timestamp |
| INT | java.lang.Integer |
| BIGINT | java.lang.Long |
| FLOAT | java.lang.Float |
| DOUBLE | java.lang.Double |
| SMALLINT, TINYINT |java.lang.Short |
| BOOL | java.lang.Boolean |
| BINARY, NCHAR | java.lang.String |
## 如何获取 TAOS-JDBCDriver
## 如何获取 taos-jdbcdriver
### maven 仓库
目前 taos-jdbcdriver 已经发布到 [Sonatype Repository][1] 仓库,且各大仓库都已同步。
* [sonatype][8]
* [mvnrepository][9]
* [maven.aliyun][10]
@ -56,56 +64,86 @@ TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对
maven 项目中使用如下 pom.xml 配置即可:
```xml
<dependencies>
<dependency>
<dependency>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>1.0.3</version>
</dependency>
</dependencies>
<version>2.0.18</version>
</dependency>
```
### 源码编译打包
下载 [TDengine][3] 源码之后,进入 taos-jdbcdriver 源码目录 `src/connector/jdbc` 执行 `mvn clean package` 即可生成相应 jar 包。
下载 [TDengine][3] 源码之后,进入 taos-jdbcdriver 源码目录 `src/connector/jdbc` 执行 `mvn clean package -Dmaven.test.skip=true` 即可生成相应 jar 包。
## 使用说明
## JDBC的使用说明
### 获取连接
如下所示配置即可获取 TDengine Connection
#### 指定URL获取连接
通过指定URL获取连接如下所示
```java
Class.forName("com.taosdata.jdbc.TSDBDriver");
String jdbcUrl = "jdbc:TAOS://127.0.0.1:6030/log?user=root&password=taosdata";
Class.forName("com.taosdata.jdbc.rs.RestfulDriver");
String jdbcUrl = "jdbc:TAOS-RS://taosdemo.com:6041/test?user=root&password=taosdata";
Connection conn = DriverManager.getConnection(jdbcUrl);
```
> 端口 6030 为默认连接端口JDBC URL 中的 log 为系统本身的监控数据库。
以上示例,使用 **JDBC-RESTful** 的 driver建立了到 hostname 为 taosdemo.com端口为 6041数据库名为 test 的连接。这个 URL 中指定用户名user为 root密码password为 taosdata。
使用 JDBC-RESTful 接口,不需要依赖本地函数库。与 JDBC-JNI 相比,仅需要:
1. driverClass 指定为“com.taosdata.jdbc.rs.RestfulDriver”
2. jdbcUrl 以“jdbc:TAOS-RS://”开头;
3. 使用 6041 作为连接端口。
如果希望获得更好的写入和查询性能Java 应用可以使用 **JDBC-JNI** 的driver如下所示
```java
Class.forName("com.taosdata.jdbc.TSDBDriver");
String jdbcUrl = "jdbc:TAOS://taosdemo.com:6030/test?user=root&password=taosdata";
Connection conn = DriverManager.getConnection(jdbcUrl);
```
以上示例,使用了 JDBC-JNI 的 driver建立了到 hostname 为 taosdemo.com端口为 6030TDengine 的默认端口),数据库名为 test 的连接。这个 URL 中指定用户名user为 root密码password为 taosdata。
**注意**:使用 JDBC-JNI 的 drivertaos-jdbcdriver 驱动包时需要依赖系统对应的本地函数库。
* libtaos.so
在 linux 系统中成功安装 TDengine 后,依赖的本地函数库 libtaos.so 文件会被自动拷贝至 /usr/lib/libtaos.so该目录包含在 Linux 自动扫描路径上,无需单独指定。
* taos.dll
在 windows 系统中安装完客户端之后,驱动包依赖的 taos.dll 文件会自动拷贝到系统默认搜索路径 C:/Windows/System32 下,同样无需要单独指定。
> 在 windows 环境开发时需要安装 TDengine 对应的 [windows 客户端][14]Linux 服务器安装完 TDengine 之后默认已安装 client也可以单独安装 [Linux 客户端][15] 连接远程 TDengine Server。
JDBC-JNI 的使用请参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1955.html)。
TDengine 的 JDBC URL 规范格式为:
`jdbc:TSDB://{host_ip}:{port}/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]`
其中,`{}` 中的内容必须,`[]` 中为可选。配置参数说明如下:
`jdbc:[TAOS|TAOS-RS]://[host_name]:[port]/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]`
url中的配置参数如下
* user登录 TDengine 用户名,默认值 root。
* password用户登录密码默认值 taosdata。
* charset客户端使用的字符集默认值为系统字符集。
* cfgdir客户端配置文件目录路径Linux OS 上默认值 /etc/taos Windows OS 上默认值 C:/TDengine/cfg。
* charset客户端使用的字符集默认值为系统字符集。
* locale客户端语言环境默认值系统当前 locale。
* timezone客户端使用的时区默认值为系统当前时区。
以上参数可以在 3 处配置,`优先级由高到低`分别如下:
1. JDBC URL 参数
如上所述,可以在 JDBC URL 的参数中指定。
2. java.sql.DriverManager.getConnection(String jdbcUrl, Properties connProps)
#### 指定URL和Properties获取连接
除了通过指定的 URL 获取连接,还可以使用 Properties 指定建立连接时的参数,如下所示:
```java
public Connection getConn() throws Exception{
Class.forName("com.taosdata.jdbc.TSDBDriver");
String jdbcUrl = "jdbc:TAOS://127.0.0.1:0/log?user=root&password=taosdata";
// Class.forName("com.taosdata.jdbc.rs.RestfulDriver");
String jdbcUrl = "jdbc:TAOS://taosdemo.com:6030/test?user=root&password=taosdata";
// String jdbcUrl = "jdbc:TAOS-RS://taosdemo.com:6041/test?user=root&password=taosdata";
Properties connProps = new Properties();
connProps.setProperty(TSDBDriver.PROPERTY_KEY_USER, "root");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, "taosdata");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_CONFIG_DIR, "/etc/taos");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
@ -114,15 +152,46 @@ public Connection getConn() throws Exception{
}
```
3. 客户端配置文件 taos.cfg
以上示例,建立一个到 hostname 为 taosdemo.com端口为 6030数据库名为 test 的连接。注释为使用 JDBC-RESTful 时的方法。这个连接在 url 中指定了用户名(user)为 root密码password为 taosdata并在 connProps 中指定了使用的字符集、语言环境、时区等信息。
linux 系统默认配置文件为 /var/lib/taos/taos.cfgwindows 系统默认配置文件路径为 C:\TDengine\cfg\taos.cfg。
```properties
# client default username
# defaultUser root
properties 中的配置参数如下:
* TSDBDriver.PROPERTY_KEY_USER登录 TDengine 用户名,默认值 root。
* TSDBDriver.PROPERTY_KEY_PASSWORD用户登录密码默认值 taosdata。
* TSDBDriver.PROPERTY_KEY_CONFIG_DIR客户端配置文件目录路径Linux OS 上默认值 /etc/taos Windows OS 上默认值 C:/TDengine/cfg。
* TSDBDriver.PROPERTY_KEY_CHARSET客户端使用的字符集默认值为系统字符集。
* TSDBDriver.PROPERTY_KEY_LOCALE客户端语言环境默认值系统当前 locale。
* TSDBDriver.PROPERTY_KEY_TIME_ZONE客户端使用的时区默认值为系统当前时区。
# client default password
# defaultPass taosdata
#### 使用客户端配置文件建立连接
当使用 JDBC-JNI 连接 TDengine 集群时,可以使用客户端配置文件,在客户端配置文件中指定集群的 firstEp、secondEp参数。
如下所示:
1. 在 Java 应用中不指定 hostname 和 port
```java
public Connection getConn() throws Exception{
Class.forName("com.taosdata.jdbc.TSDBDriver");
String jdbcUrl = "jdbc:TAOS://:/test?user=root&password=taosdata";
Properties connProps = new Properties();
connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
Connection conn = DriverManager.getConnection(jdbcUrl, connProps);
return conn;
}
```
2. 在配置文件中指定 firstEp 和 secondEp
```
# first fully qualified domain name (FQDN) for TDengine system
firstEp cluster_node1:6030
# second fully qualified domain name (FQDN) for TDengine system, for cluster only
secondEp cluster_node2:6030
# default system charset
# charset UTF-8
@ -130,6 +199,21 @@ public Connection getConn() throws Exception{
# system locale
# locale en_US.UTF-8
```
以上示例jdbc 会使用客户端的配置文件,建立到 hostname 为 cluster_node1、端口为 6030、数据库名为 test 的连接。当集群中 firstEp 节点失效时JDBC 会尝试使用 secondEp 连接集群。
TDengine 中,只要保证 firstEp 和 secondEp 中一个节点有效,就可以正常建立到集群的连接。
> 注意:这里的配置文件指的是调用 JDBC Connector 的应用程序所在机器上的配置文件Linux OS 上默认值 /etc/taos/taos.cfg Windows OS 上默认值 C://TDengine/cfg/taos.cfg。
#### 配置参数的优先级
通过以上 3 种方式获取连接,如果配置参数在 url、Properties、客户端配置文件中有重复则参数的`优先级由高到低`分别如下:
1. JDBC URL 参数,如上所述,可以在 JDBC URL 的参数中指定。
2. Properties connProps
3. 客户端配置文件 taos.cfg
例如:在 url 中指定了 password 为 taosdata在 Properties 中指定了 password 为 taosdemo那么JDBC 会使用 url 中的 password 建立连接。
> 更多详细配置请参考[客户端配置][13]
### 创建数据库和表
@ -146,6 +230,7 @@ stmt.executeUpdate("use db");
// create table
stmt.executeUpdate("create table if not exists tb (ts timestamp, temperature int, humidity float)");
```
> 注意:如果不使用 `use db` 指定数据库,则后续对表的操作都需要增加数据库名称作为前缀,如 db.tb。
### 插入数据
@ -156,6 +241,7 @@ int affectedRows = stmt.executeUpdate("insert into tb values(now, 23, 10.3) (now
System.out.println("insert " + affectedRows + " rows.");
```
> now 为系统内部函数,默认为服务器当前时间。
> `now + 1s` 代表服务器当前时间往后加 1 秒数字后面代表时间单位a(毫秒), s(秒), m(分), h(小时), d(天)w(周), n(月), y(年)。
@ -177,8 +263,150 @@ while(resultSet.next()){
System.out.printf("%s, %d, %s\n", ts, temperature, humidity);
}
```
> 查询和操作关系型数据库一致,使用下标获取返回字段内容时从 1 开始,建议使用字段名称获取。
### 处理异常
在报错后通过SQLException可以获取到错误的信息和错误码
```java
try (Statement statement = connection.createStatement()) {
// executeQuery
ResultSet resultSet = statement.executeQuery(sql);
// print result
printResult(resultSet);
} catch (SQLException e) {
System.out.println("ERROR Message: " + e.getMessage());
System.out.println("ERROR Code: " + e.getErrorCode());
e.printStackTrace();
}
```
JDBC连接器可能报错的错误码包括3种JDBC driver本身的报错错误码在0x2301到0x2350之间JNI方法的报错错误码在0x2351到0x2400之间TDengine其他功能模块的报错。
具体的错误码请参考:
* https://github.com/taosdata/TDengine/blob/develop/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java
* https://github.com/taosdata/TDengine/blob/develop/src/inc/taoserror.h
### <a class="anchor" id="stmt-java"></a>通过参数绑定写入数据
从 2.1.2.0 版本开始TDengine 的 **JDBC-JNI** 实现大幅改进了参数绑定方式对数据写入INSERT场景的支持。采用这种方式写入数据时能避免 SQL 语法解析的资源消耗,从而在很多情况下显著提升写入性能。(注意:**JDBC-RESTful** 实现并不提供参数绑定这种使用方式。)
```java
Statement stmt = conn.createStatement();
Random r = new Random();
// INSERT 语句中VALUES 部分允许指定具体的数据列;如果采取自动建表,则 TAGS 部分需要设定全部 TAGS 列的参数值:
TSDBPreparedStatement s = (TSDBPreparedStatement) conn.prepareStatement("insert into ? using weather_test tags (?, ?) (ts, c1, c2) values(?, ?, ?)");
// 设定数据表名:
s.setTableName("w1");
// 设定 TAGS 取值:
s.setTagInt(0, r.nextInt(10));
s.setTagString(1, "Beijing");
int numOfRows = 10;
// VALUES 部分以逐列的方式进行设置:
ArrayList<Long> ts = new ArrayList<>();
for (int i = 0; i < numOfRows; i++){
ts.add(System.currentTimeMillis() + i);
}
s.setTimestamp(0, ts);
ArrayList<Integer> s1 = new ArrayList<>();
for (int i = 0; i < numOfRows; i++){
s1.add(r.nextInt(100));
}
s.setInt(1, s1);
ArrayList<String> s2 = new ArrayList<>();
for (int i = 0; i < numOfRows; i++){
s2.add("test" + r.nextInt(100));
}
s.setString(2, s2, 10);
// AddBatch 之后,缓存并未清空。为避免混乱,并不推荐在 ExecuteBatch 之前再次绑定新一批的数据:
s.columnDataAddBatch();
// 执行绑定数据后的语句:
s.columnDataExecuteBatch();
// 执行语句后清空缓存。在清空之后,可以复用当前的对象,绑定新的一批数据(可以是新表名、新 TAGS 值、新 VALUES 值):
s.columnDataClearBatch();
// 执行完毕,释放资源:
s.columnDataCloseBatch();
```
用于设定 TAGS 取值的方法总共有:
```java
public void setTagNull(int index, int type)
public void setTagBoolean(int index, boolean value)
public void setTagInt(int index, int value)
public void setTagByte(int index, byte value)
public void setTagShort(int index, short value)
public void setTagLong(int index, long value)
public void setTagTimestamp(int index, long value)
public void setTagFloat(int index, float value)
public void setTagDouble(int index, double value)
public void setTagString(int index, String value)
public void setTagNString(int index, String value)
```
用于设定 VALUES 数据列的取值的方法总共有:
```java
public void setInt(int columnIndex, ArrayList<Integer> list) throws SQLException
public void setFloat(int columnIndex, ArrayList<Float> list) throws SQLException
public void setTimestamp(int columnIndex, ArrayList<Long> list) throws SQLException
public void setLong(int columnIndex, ArrayList<Long> list) throws SQLException
public void setDouble(int columnIndex, ArrayList<Double> list) throws SQLException
public void setBoolean(int columnIndex, ArrayList<Boolean> list) throws SQLException
public void setByte(int columnIndex, ArrayList<Byte> list) throws SQLException
public void setShort(int columnIndex, ArrayList<Short> list) throws SQLException
public void setString(int columnIndex, ArrayList<String> list, int size) throws SQLException
public void setNString(int columnIndex, ArrayList<String> list, int size) throws SQLException
```
其中 setString 和 setNString 都要求用户在 size 参数里声明表定义中对应列的列宽。
### <a class="anchor" id="subscribe"></a>订阅
#### 创建
```java
TSDBSubscribe sub = ((TSDBConnection)conn).subscribe("topic", "select * from meters", false);
```
`subscribe` 方法的三个参数含义如下:
* topic订阅的主题即名称此参数是订阅的唯一标识
* sql订阅的查询语句此语句只能是 `select` 语句,只应查询原始数据,只能按时间正序查询数据
* restart如果订阅已经存在是重新开始还是继续之前的订阅
如上面的例子将使用 SQL 语句 `select * from meters` 创建一个名为 `topic` 的订阅,如果这个订阅已经存在,将继续之前的查询进度,而不是从头开始消费所有的数据。
#### 消费数据
```java
int total = 0;
while(true) {
TSDBResultSet rs = sub.consume();
int count = 0;
while(rs.next()) {
count++;
}
total += count;
System.out.printf("%d rows consumed, total %d\n", count, total);
Thread.sleep(1000);
}
```
`consume` 方法返回一个结果集,其中包含从上次 `consume` 到目前为止的所有新数据。请务必按需选择合理的调用 `consume` 的频率(如例子中的 `Thread.sleep(1000)`),否则会给服务端造成不必要的压力。
#### 关闭订阅
```java
sub.close(true);
```
`close` 方法关闭一个订阅。如果其参数为 `true` 表示保留订阅进度信息,后续可以创建同名订阅继续消费数据;如为 `false` 则不保留订阅进度。
### 关闭资源
@ -187,12 +415,17 @@ resultSet.close();
stmt.close();
conn.close();
```
> `注意务必要将 connection 进行关闭`,否则会出现连接泄露。
## 与连接池使用
**HikariCP**
* 引入相应 HikariCP maven 依赖:
```xml
<dependency>
<groupId>com.zaxxer</groupId>
@ -202,19 +435,21 @@ conn.close();
```
* 使用示例如下:
```java
public static void main(String[] args) throws SQLException {
HikariConfig config = new HikariConfig();
// jdbc properties
config.setJdbcUrl("jdbc:TAOS://127.0.0.1:6030/log");
config.setUsername("root");
config.setPassword("taosdata");
config.setMinimumIdle(3); //minimum number of idle connection
// connection pool configurations
config.setMinimumIdle(10); //minimum number of idle connection
config.setMaximumPoolSize(10); //maximum number of connection in the pool
config.setConnectionTimeout(10000); //maximum wait milliseconds for get connection from pool
config.setIdleTimeout(60000); // max idle time for recycle idle connection
config.setConnectionTestQuery("describe log.dn"); //validation query
config.setValidationTimeout(3000); //validation query timeout
config.setConnectionTimeout(30000); //maximum wait milliseconds for get connection from pool
config.setMaxLifetime(0); // maximum life time for each connection
config.setIdleTimeout(0); // max idle time for recycle idle connection
config.setConnectionTestQuery("select server_status()"); //validation query
HikariDataSource ds = new HikariDataSource(config); //create datasource
@ -227,6 +462,7 @@ conn.close();
connection.close(); // put back to conneciton pool
}
```
> 通过 HikariDataSource.getConnection() 获取连接后,使用完成后需要调用 close() 方法,实际上它并不会关闭连接,只是放回连接池中。
> 更多 HikariCP 使用问题请查看[官方说明][5]
@ -243,40 +479,32 @@ conn.close();
```
* 使用示例如下:
```java
public static void main(String[] args) throws Exception {
Properties properties = new Properties();
properties.put("driverClassName","com.taosdata.jdbc.TSDBDriver");
properties.put("url","jdbc:TAOS://127.0.0.1:6030/log");
properties.put("username","root");
properties.put("password","taosdata");
properties.put("maxActive","10"); //maximum number of connection in the pool
properties.put("initialSize","3");//initial number of connection
properties.put("maxWait","10000");//maximum wait milliseconds for get connection from pool
properties.put("minIdle","3");//minimum number of connection in the pool
DruidDataSource dataSource = new DruidDataSource();
// jdbc properties
dataSource.setDriverClassName("com.taosdata.jdbc.TSDBDriver");
dataSource.setUrl(url);
dataSource.setUsername("root");
dataSource.setPassword("taosdata");
// pool configurations
dataSource.setInitialSize(10);
dataSource.setMinIdle(10);
dataSource.setMaxActive(10);
dataSource.setMaxWait(30000);
dataSource.setValidationQuery("select server_status()");
properties.put("timeBetweenEvictionRunsMillis","3000");// the interval milliseconds to test connection
properties.put("minEvictableIdleTimeMillis","60000");//the minimum milliseconds to keep idle
properties.put("maxEvictableIdleTimeMillis","90000");//the maximum milliseconds to keep idle
properties.put("validationQuery","describe log.dn"); //validation query
properties.put("testWhileIdle","true"); // test connection while idle
properties.put("testOnBorrow","false"); // don't need while testWhileIdle is true
properties.put("testOnReturn","false"); // don't need while testWhileIdle is true
//create druid datasource
DataSource ds = DruidDataSourceFactory.createDataSource(properties);
Connection connection = ds.getConnection(); // get connection
Connection connection = dataSource.getConnection(); // get connection
Statement statement = connection.createStatement(); // get statement
//query or insert
// ...
connection.close(); // put back to conneciton pool
}
```
> 更多 druid 使用问题请查看[官方说明][6]
**注意事项**
@ -291,18 +519,55 @@ server_status()|
Query OK, 1 row(s) in set (0.000141s)
```
## 与框架使用
* Spring JdbcTemplate 中使用 taos-jdbcdriver可参考 [SpringJdbcTemplate][11]
* Springboot + Mybatis 中使用,可参考 [springbootdemo][12]
## <a class="anchor" id="version"></a>TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本
| taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 |
| -------------------- | ----------------- | -------- |
| 2.0.31 | 2.1.3.0 及以上 | 1.8.x |
| 2.0.22 - 2.0.30 | 2.0.18.0 - 2.1.2.x | 1.8.x |
| 2.0.12 - 2.0.21 | 2.0.8.0 - 2.0.17.x | 1.8.x |
| 2.0.4 - 2.0.11 | 2.0.0.0 - 2.0.7.x | 1.8.x |
| 1.0.3 | 1.6.1.x 及以上 | 1.8.x |
| 1.0.2 | 1.6.1.x 及以上 | 1.8.x |
| 1.0.1 | 1.6.1.x 及以上 | 1.8.x |
## TDengine DataType 和 Java DataType
TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对应类型转换如下:
| TDengine DataType | Java DataType |
| ----------------- | ------------------ |
| TIMESTAMP | java.sql.Timestamp |
| INT | java.lang.Integer |
| BIGINT | java.lang.Long |
| FLOAT | java.lang.Float |
| DOUBLE | java.lang.Double |
| SMALLINT | java.lang.Short |
| TINYINT | java.lang.Byte |
| BOOL | java.lang.Boolean |
| BINARY | byte array |
| NCHAR | java.lang.String |
## 常见问题
* java.lang.UnsatisfiedLinkError: no taos in java.library.path
**原因**:程序没有找到依赖的本地函数库 taos。
**解决方法**windows 下可以将 C:\TDengine\driver\taos.dll 拷贝到 C:\Windows\System32\ 目录下linux 下将建立如下软链 ` ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` 即可。
**解决方法**windows 下可以将 C:\TDengine\driver\taos.dll 拷贝到 C:\Windows\System32\ 目录下linux 下将建立如下软链 `ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` 即可。
* java.lang.UnsatisfiedLinkError: taos.dll Can't load AMD 64 bit on a IA 32-bit platform
@ -312,8 +577,6 @@ Query OK, 1 row(s) in set (0.000141s)
* 其它问题请参考 [Issues][7]
[1]: https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver
[2]: https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver
[3]: https://github.com/taosdata/TDengine
@ -326,4 +589,7 @@ Query OK, 1 row(s) in set (0.000141s)
[10]: https://maven.aliyun.com/mvn/search
[11]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/SpringJdbcTemplate
[12]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/springbootdemo
[13]: https://www.taosdata.com/cn/documentation/administrator/#%E5%AE%A2%E6%88%B7%E7%AB%AF%E9%85%8D%E7%BD%AE
[13]: https://www.taosdata.com/cn/documentation/administrator/#client
[14]: https://www.taosdata.com/cn/all-downloads/#TDengine-Windows-Client
[15]: https://www.taosdata.com/cn/getting-started/#%E5%AE%A2%E6%88%B7%E7%AB%AF

View File

@ -2,6 +2,7 @@ package com.taosdata.jdbc;
import java.sql.*;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
public abstract class AbstractStatement extends WrapperImpl implements Statement {
@ -196,14 +197,45 @@ public abstract class AbstractStatement extends WrapperImpl implements Statement
if (batchedArgs == null || batchedArgs.isEmpty())
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_BATCH_IS_EMPTY);
String clientInfo = getConnection().getClientInfo(TSDBDriver.PROPERTY_KEY_BATCH_ERROR_IGNORE);
boolean batchErrorIgnore = clientInfo == null ? TSDBConstants.DEFAULT_BATCH_ERROR_IGNORE : Boolean.parseBoolean(clientInfo);
if (batchErrorIgnore) {
return executeBatchIgnoreException();
}
return executeBatchThrowException();
}
private int[] executeBatchIgnoreException() {
return batchedArgs.stream().mapToInt(sql -> {
try {
boolean isSelect = execute(sql);
if (isSelect) {
return SUCCESS_NO_INFO;
} else {
return getUpdateCount();
}
} catch (SQLException e) {
return EXECUTE_FAILED;
}
}).toArray();
}
private int[] executeBatchThrowException() throws BatchUpdateException {
int[] res = new int[batchedArgs.size()];
for (int i = 0; i < batchedArgs.size(); i++) {
try {
boolean isSelect = execute(batchedArgs.get(i));
if (isSelect) {
res[i] = SUCCESS_NO_INFO;
} else {
res[i] = getUpdateCount();
}
} catch (SQLException e) {
String reason = e.getMessage();
int[] updateCounts = Arrays.copyOfRange(res, 0, i);
throw new BatchUpdateException(reason, updateCounts, e);
}
}
return res;
}

View File

@ -74,6 +74,8 @@ public abstract class TSDBConstants {
public static final String DEFAULT_PRECISION = "ms";
public static final boolean DEFAULT_BATCH_ERROR_IGNORE = false;
public static int typeName2JdbcType(String type) {
switch (type.toUpperCase()) {
case "TIMESTAMP":

View File

@ -100,6 +100,11 @@ public class TSDBDriver extends AbstractDriver {
*/
public static final String PROPERTY_KEY_TIMESTAMP_FORMAT = "timestampFormat";
/**
* continue process commands in executeBatch
*/
public static final String PROPERTY_KEY_BATCH_ERROR_IGNORE = "batchErrorIgnore";
private TSDBDatabaseMetaData dbMetaData = null;
static {

View File

@ -841,13 +841,13 @@ public class TSDBPreparedStatementTest {
}
@Test
public void setBytes() throws SQLException, IOException {
public void setBytes() throws SQLException {
// given
long ts = System.currentTimeMillis();
byte[] f8 = "{\"name\": \"john\", \"age\": 10, \"address\": \"192.168.1.100\"}".getBytes();
// when
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
pstmt_insert.setTimestamp(1, new Timestamp(ts));
pstmt_insert.setBytes(9, f8);
int result = pstmt_insert.executeUpdate();

View File

@ -0,0 +1,144 @@
package com.taosdata.jdbc.cases;
import org.junit.*;
import java.sql.*;
import java.util.stream.IntStream;
public class BatchErrorIgnoreTest {
private static final String host = "127.0.0.1";
@Test
public void batchErrorThrowException() throws SQLException {
// given
Connection conn = DriverManager.getConnection("jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata");
// when
try (Statement stmt = conn.createStatement()) {
IntStream.range(1, 6).mapToObj(i -> "insert into test.t" + i + " values(now, " + i + ")").forEach(sql -> {
try {
stmt.addBatch(sql);
} catch (SQLException e) {
e.printStackTrace();
}
});
stmt.addBatch("insert into t11 values(now, 11)");
IntStream.range(6, 11).mapToObj(i -> "insert into test.t" + i + " values(now, " + i + "),(now + 1s, " + (10 * i) + ")").forEach(sql -> {
try {
stmt.addBatch(sql);
} catch (SQLException e) {
e.printStackTrace();
}
});
stmt.addBatch("select count(*) from test.weather");
stmt.executeBatch();
} catch (BatchUpdateException e) {
int[] updateCounts = e.getUpdateCounts();
Assert.assertEquals(5, updateCounts.length);
Assert.assertEquals(1, updateCounts[0]);
Assert.assertEquals(1, updateCounts[1]);
Assert.assertEquals(1, updateCounts[2]);
Assert.assertEquals(1, updateCounts[3]);
Assert.assertEquals(1, updateCounts[4]);
}
}
@Test
public void batchErrorIgnore() throws SQLException {
// given
Connection conn = DriverManager.getConnection("jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata&batchErrorIgnore=true");
// when
int[] results = null;
try (Statement stmt = conn.createStatement()) {
IntStream.range(1, 6).mapToObj(i -> "insert into test.t" + i + " values(now, " + i + ")").forEach(sql -> {
try {
stmt.addBatch(sql);
} catch (SQLException e) {
e.printStackTrace();
}
});
stmt.addBatch("insert into t11 values(now, 11)");
IntStream.range(6, 11).mapToObj(i -> "insert into test.t" + i + " values(now, " + i + "),(now + 1s, " + (10 * i) + ")").forEach(sql -> {
try {
stmt.addBatch(sql);
} catch (SQLException e) {
e.printStackTrace();
}
});
stmt.addBatch("select count(*) from test.weather");
results = stmt.executeBatch();
} catch (SQLException e) {
e.printStackTrace();
}
// then
assert results != null;
Assert.assertEquals(12, results.length);
Assert.assertEquals(1, results[0]);
Assert.assertEquals(1, results[1]);
Assert.assertEquals(1, results[2]);
Assert.assertEquals(1, results[3]);
Assert.assertEquals(1, results[4]);
Assert.assertEquals(Statement.EXECUTE_FAILED, results[5]);
Assert.assertEquals(2, results[6]);
Assert.assertEquals(2, results[7]);
Assert.assertEquals(2, results[8]);
Assert.assertEquals(2, results[9]);
Assert.assertEquals(2, results[10]);
Assert.assertEquals(Statement.SUCCESS_NO_INFO, results[11]);
}
@Before
public void before() {
try {
Connection conn = DriverManager.getConnection("jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata");
Statement stmt = conn.createStatement();
stmt.execute("use test");
stmt.execute("drop table if exists weather");
stmt.execute("create table weather (ts timestamp, f1 float) tags(t1 int)");
IntStream.range(1, 11).mapToObj(i -> "create table t" + i + " using weather tags(" + i + ")").forEach(sql -> {
try {
stmt.execute(sql);
} catch (SQLException e) {
e.printStackTrace();
}
});
stmt.close();
conn.close();
} catch (SQLException e) {
e.printStackTrace();
}
}
@BeforeClass
public static void beforeClass() {
try {
Connection conn = DriverManager.getConnection("jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata");
Statement stmt = conn.createStatement();
stmt.execute("drop database if exists test");
stmt.execute("create database if not exists test");
stmt.close();
conn.close();
} catch (SQLException e) {
e.printStackTrace();
}
}
@AfterClass
public static void afterClass() {
try {
Connection conn = DriverManager.getConnection("jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata");
Statement stmt = conn.createStatement();
stmt.execute("drop database if exists test");
stmt.close();
conn.close();
} catch (SQLException e) {
e.printStackTrace();
}
}
}

View File

@ -404,6 +404,8 @@ int32_t* taosGetErrno();
#define TSDB_CODE_HTTP_OP_VALUE_NULL TAOS_DEF_ERROR_CODE(0, 0x11A5) //"value not find")
#define TSDB_CODE_HTTP_OP_VALUE_TYPE TAOS_DEF_ERROR_CODE(0, 0x11A6) //"value type should be boolean number or string")
#define TSDB_CODE_HTTP_REQUEST_JSON_ERROR TAOS_DEF_ERROR_CODE(0, 0x1F00) //"http request json error")
// odbc
#define TSDB_CODE_ODBC_OOM TAOS_DEF_ERROR_CODE(0, 0x2100) //"out of memory")
#define TSDB_CODE_ODBC_CONV_CHAR_NOT_NUM TAOS_DEF_ERROR_CODE(0, 0x2101) //"convertion not a valid literal input")

View File

@ -806,6 +806,8 @@ typedef struct SMultiTableMeta {
int32_t numOfVgroup;
int32_t numOfUdf;
int32_t contLen;
uint8_t compressed; // denote if compressed or not
uint32_t rawLen; // size before compress
char meta[];
} SMultiTableMeta;

View File

@ -569,7 +569,7 @@ SArguments g_args = {
0, // test_mode
"127.0.0.1", // host
6030, // port
TAOSC_IFACE, // iface
INTERFACE_BUT, // iface
"root", // user
#ifdef _TD_POWER_
"powerdb", // password
@ -946,7 +946,6 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
for (int col = arguments->num_of_CPR; col < MAX_NUM_COLUMNS; col++) {
arguments->datatype[col] = NULL;
}
} else if (strcmp(argv[i], "-b") == 0) {
arguments->demo_mode = false;
if (argc == i+1) {
@ -1430,8 +1429,13 @@ static int printfInsertMeta() {
else
printf("\ntaosdemo is simulating random data as you request..\n\n");
if (g_args.iface != INTERFACE_BUT) {
// first time if no iface specified
printf("interface: \033[33m%s\033[0m\n",
(g_args.iface==TAOSC_IFACE)?"taosc":(g_args.iface==REST_IFACE)?"rest":"stmt");
(g_args.iface==TAOSC_IFACE)?"taosc":
(g_args.iface==REST_IFACE)?"rest":"stmt");
}
printf("host: \033[33m%s:%u\033[0m\n",
g_Dbs.host, g_Dbs.port);
printf("user: \033[33m%s\033[0m\n", g_Dbs.user);
@ -3104,7 +3108,7 @@ static int startMultiThreadCreateChildTable(
char* cols, int threads, uint64_t tableFrom, int64_t ntables,
char* db_name, SSuperTable* superTblInfo) {
pthread_t *pids = malloc(threads * sizeof(pthread_t));
pthread_t *pids = calloc(1, threads * sizeof(pthread_t));
threadInfo *infos = calloc(1, threads * sizeof(threadInfo));
if ((NULL == pids) || (NULL == infos)) {
@ -5039,13 +5043,17 @@ static int32_t execInsert(threadInfo *pThreadInfo, uint32_t k)
uint16_t iface;
if (superTblInfo)
iface = superTblInfo->iface;
else {
if (g_args.iface == INTERFACE_BUT)
iface = TAOSC_IFACE;
else
iface = g_args.iface;
}
debugPrint("[%d] %s() LN%d %s\n", pThreadInfo->threadID,
__func__, __LINE__,
(g_args.iface==TAOSC_IFACE)?
"taosc":(g_args.iface==REST_IFACE)?"rest":"stmt");
(iface==TAOSC_IFACE)?
"taosc":(iface==REST_IFACE)?"rest":"stmt");
switch(iface) {
case TAOSC_IFACE:
@ -5885,7 +5893,7 @@ static void printStatPerThread(threadInfo *pThreadInfo)
pThreadInfo->threadID,
pThreadInfo->totalInsertRows,
pThreadInfo->totalAffectedRows,
(double)(pThreadInfo->totalAffectedRows / (pThreadInfo->totalDelay/1000.0)));
(pThreadInfo->totalDelay)?(double)((pThreadInfo->totalAffectedRows / (pThreadInfo->totalDelay)/1000.0)): FLT_MAX);
}
// sync write interlace data
@ -6464,7 +6472,7 @@ static int convertHostToServAddr(char *host, uint16_t port, struct sockaddr_in *
}
static void startMultiThreadInsertData(int threads, char* db_name,
char* precision,SSuperTable* superTblInfo) {
char* precision, SSuperTable* superTblInfo) {
int32_t timePrec = TSDB_TIME_PRECISION_MILLI;
if (0 != precision[0]) {
@ -6602,7 +6610,7 @@ static void startMultiThreadInsertData(int threads, char* db_name,
}
}
pthread_t *pids = malloc(threads * sizeof(pthread_t));
pthread_t *pids = calloc(1, threads * sizeof(pthread_t));
assert(pids != NULL);
threadInfo *infos = calloc(1, threads * sizeof(threadInfo));
@ -7261,8 +7269,8 @@ static int queryTestProcess() {
if ((nSqlCount > 0) && (nConcurrent > 0)) {
pids = malloc(nConcurrent * nSqlCount * sizeof(pthread_t));
infos = malloc(nConcurrent * nSqlCount * sizeof(threadInfo));
pids = calloc(1, nConcurrent * nSqlCount * sizeof(pthread_t));
infos = calloc(1, nConcurrent * nSqlCount * sizeof(threadInfo));
if ((NULL == pids) || (NULL == infos)) {
taos_close(taos);
@ -7307,8 +7315,8 @@ static int queryTestProcess() {
//==== create sub threads for query from all sub table of the super table
if ((g_queryInfo.superQueryInfo.sqlCount > 0)
&& (g_queryInfo.superQueryInfo.threadCnt > 0)) {
pidsOfSub = malloc(g_queryInfo.superQueryInfo.threadCnt * sizeof(pthread_t));
infosOfSub = malloc(g_queryInfo.superQueryInfo.threadCnt * sizeof(threadInfo));
pidsOfSub = calloc(1, g_queryInfo.superQueryInfo.threadCnt * sizeof(pthread_t));
infosOfSub = calloc(1, g_queryInfo.superQueryInfo.threadCnt * sizeof(threadInfo));
if ((NULL == pidsOfSub) || (NULL == infosOfSub)) {
free(infos);
@ -7741,11 +7749,13 @@ static int subscribeTestProcess() {
exit(-1);
}
pids = malloc(
pids = calloc(
1,
g_queryInfo.specifiedQueryInfo.sqlCount *
g_queryInfo.specifiedQueryInfo.concurrent *
sizeof(pthread_t));
infos = malloc(
infos = calloc(
1,
g_queryInfo.specifiedQueryInfo.sqlCount *
g_queryInfo.specifiedQueryInfo.concurrent *
sizeof(threadInfo));
@ -7774,11 +7784,13 @@ static int subscribeTestProcess() {
} else {
if ((g_queryInfo.superQueryInfo.sqlCount > 0)
&& (g_queryInfo.superQueryInfo.threadCnt > 0)) {
pidsOfStable = malloc(
pidsOfStable = calloc(
1,
g_queryInfo.superQueryInfo.sqlCount *
g_queryInfo.superQueryInfo.threadCnt *
sizeof(pthread_t));
infosOfStable = malloc(
infosOfStable = calloc(
1,
g_queryInfo.superQueryInfo.sqlCount *
g_queryInfo.superQueryInfo.threadCnt *
sizeof(threadInfo));
@ -7933,7 +7945,12 @@ static void setParaFromArg(){
tstrncpy(g_Dbs.db[0].superTbls[0].childTblPrefix,
g_args.tb_prefix, TSDB_TABLE_NAME_LEN - 20);
tstrncpy(g_Dbs.db[0].superTbls[0].dataSource, "rand", MAX_TB_NAME_SIZE);
if (g_args.iface == INTERFACE_BUT) {
g_Dbs.db[0].superTbls[0].iface = TAOSC_IFACE;
} else {
g_Dbs.db[0].superTbls[0].iface = g_args.iface;
}
tstrncpy(g_Dbs.db[0].superTbls[0].startTimestamp,
"2017-07-14 10:40:00.000", MAX_TB_NAME_SIZE);
g_Dbs.db[0].superTbls[0].timeStampStep = DEFAULT_TIMESTAMP_STEP;
@ -8095,7 +8112,7 @@ static void queryResult() {
// query data
pthread_t read_id;
threadInfo *pThreadInfo = malloc(sizeof(threadInfo));
threadInfo *pThreadInfo = calloc(1, sizeof(threadInfo));
assert(pThreadInfo);
pThreadInfo->start_time = 1500000000000; // 2017-07-14 10:40:00.000
pThreadInfo->start_table_from = 0;

View File

@ -2893,7 +2893,7 @@ static SMultiTableMeta* ensureMsgBufferSpace(SMultiTableMeta *pMultiMeta, SArray
(*totalMallocLen) *= 2;
}
pMultiMeta = rpcReallocCont(pMultiMeta, *totalMallocLen);
pMultiMeta = realloc(pMultiMeta, *totalMallocLen);
if (pMultiMeta == NULL) {
return NULL;
}
@ -2916,8 +2916,8 @@ static int32_t mnodeProcessMultiTableMetaMsg(SMnodeMsg *pMsg) {
char* str = strndup(pInfo->tableNames, contLen);
char** nameList = strsplit(str, ",", &num);
SArray* pList = taosArrayInit(4, POINTER_BYTES);
SMultiTableMeta *pMultiMeta = NULL;
SMultiTableMeta *pMultiMeta = NULL;
if (num != pInfo->numOfTables + pInfo->numOfVgroups + pInfo->numOfUdfs) {
mError("msg:%p, app:%p, failed to get multi-tableMeta, msg inconsistent", pMsg, pMsg->rpcMsg.ahandle);
code = TSDB_CODE_MND_INVALID_TABLE_NAME;
@ -2925,8 +2925,8 @@ static int32_t mnodeProcessMultiTableMetaMsg(SMnodeMsg *pMsg) {
}
// first malloc 80KB, subsequent reallocation will expand the size as twice of the original size
int32_t totalMallocLen = sizeof(STableMetaMsg) + sizeof(SSchema) * (TSDB_MAX_TAGS + TSDB_MAX_COLUMNS + 16) + (sizeof(SFunctionInfoMsg) + TSDB_FUNC_CODE_LEN) * pInfo->numOfUdfs + 16384;
pMultiMeta = rpcMallocCont(totalMallocLen);
int32_t totalMallocLen = sizeof(SMultiTableMeta) + sizeof(STableMetaMsg) + sizeof(SSchema) * (TSDB_MAX_TAGS + TSDB_MAX_COLUMNS + 16);
pMultiMeta = calloc(1, totalMallocLen);
if (pMultiMeta == NULL) {
code = TSDB_CODE_MND_OUT_OF_MEMORY;
goto _end;
@ -2959,7 +2959,7 @@ static int32_t mnodeProcessMultiTableMetaMsg(SMnodeMsg *pMsg) {
int remain = totalMallocLen - pMultiMeta->contLen;
if (remain <= sizeof(STableMetaMsg) + sizeof(SSchema) * (TSDB_MAX_TAGS + TSDB_MAX_COLUMNS + 16)) {
totalMallocLen *= 2;
pMultiMeta = rpcReallocCont(pMultiMeta, totalMallocLen);
pMultiMeta = realloc(pMultiMeta, totalMallocLen);
if (pMultiMeta == NULL) {
mnodeDecTableRef(pMsg->pTable);
code = TSDB_CODE_MND_OUT_OF_MEMORY;
@ -2994,6 +2994,7 @@ static int32_t mnodeProcessMultiTableMetaMsg(SMnodeMsg *pMsg) {
}
int32_t tableNum = pInfo->numOfTables + pInfo->numOfVgroups;
// add the additional super table names that needs the vgroup info
for(;t < tableNum; ++t) {
taosArrayPush(pList, &nameList[t]);
@ -3027,6 +3028,7 @@ static int32_t mnodeProcessMultiTableMetaMsg(SMnodeMsg *pMsg) {
pMultiMeta->numOfTables = htonl(pMultiMeta->numOfTables);
// add the user-defined-function information
for(int32_t i = 0; i < pInfo->numOfUdfs; ++i, ++t) {
char buf[TSDB_FUNC_NAME_LEN] = {0};
strcpy(buf, nameList[t]);
@ -3053,23 +3055,48 @@ static int32_t mnodeProcessMultiTableMetaMsg(SMnodeMsg *pMsg) {
}
pMultiMeta->contLen = (int32_t) (msg - (char*) pMultiMeta);
pMultiMeta->numOfUdf = htonl(pInfo->numOfUdfs);
pMsg->rpcRsp.rsp = pMultiMeta;
pMsg->rpcRsp.len = pMultiMeta->contLen;
code = TSDB_CODE_SUCCESS;
char* tmp = rpcMallocCont(pMultiMeta->contLen + 2);
if (tmp == NULL) {
code = TSDB_CODE_MND_OUT_OF_MEMORY;
goto _end;
}
int32_t dataLen = (int32_t)pMultiMeta->contLen - sizeof(SMultiTableMeta);
int32_t len = tsCompressString(pMultiMeta->meta, dataLen, 1, tmp + sizeof(SMultiTableMeta), (int32_t)dataLen + 2,
ONE_STAGE_COMP, NULL, 0);
pMultiMeta->rawLen = pMultiMeta->contLen;
if (len == -1 || len >= dataLen + 2) { // compress failed, do not compress this binary data
pMultiMeta->compressed = 0;
memcpy(tmp, pMultiMeta, sizeof(SMultiTableMeta) + pMultiMeta->contLen);
} else {
pMultiMeta->compressed = 1;
pMultiMeta->contLen = sizeof(SMultiTableMeta) + len;
// copy the header and the compressed payload
memcpy(tmp, pMultiMeta, sizeof(SMultiTableMeta));
}
pMsg->rpcRsp.rsp = tmp;
pMsg->rpcRsp.len = pMultiMeta->contLen;
SMultiTableMeta* p = (SMultiTableMeta*) tmp;
mDebug("multiTable info build completed, original:%d, compressed:%d, comp:%d", p->rawLen, p->contLen, p->compressed);
_end:
tfree(str);
tfree(nameList);
taosArrayDestroy(pList);
pMsg->pTable = NULL;
pMsg->pVgroup = NULL;
if (code != TSDB_CODE_SUCCESS) {
rpcFreeCont(pMultiMeta);
}
tfree(pMultiMeta);
return code;
}

View File

@ -35,4 +35,7 @@ void httpTrimTableName(char *name);
int32_t httpShrinkTableName(HttpContext *pContext, int32_t pos, char *name);
char * httpGetCmdsString(HttpContext *pContext, int32_t pos);
int32_t httpCheckAllocEscapeSql(char *oldSql, char **newSql);
void httpCheckFreeEscapedSql(char *oldSql, char *newSql);
#endif

View File

@ -176,6 +176,16 @@ bool gcProcessQueryRequest(HttpContext* pContext) {
return false;
}
#define ESCAPE_ERROR_PROC(code, context, root) \
do { \
if (code != TSDB_CODE_SUCCESS) { \
httpSendErrorResp(context, code); \
\
cJSON_Delete(root); \
return false; \
} \
} while (0)
for (int32_t i = 0; i < size; ++i) {
cJSON* query = cJSON_GetArrayItem(root, i);
if (query == NULL) continue;
@ -186,7 +196,14 @@ bool gcProcessQueryRequest(HttpContext* pContext) {
continue;
}
int32_t refIdBuffer = httpAddToSqlCmdBuffer(pContext, refId->valuestring);
char *newStr = NULL;
int32_t retCode = 0;
retCode = httpCheckAllocEscapeSql(refId->valuestring, &newStr);
ESCAPE_ERROR_PROC(retCode, pContext, root);
int32_t refIdBuffer = httpAddToSqlCmdBuffer(pContext, newStr);
httpCheckFreeEscapedSql(refId->valuestring, newStr);
if (refIdBuffer == -1) {
httpWarn("context:%p, fd:%d, user:%s, refId buffer is full", pContext, pContext->fd, pContext->user);
break;
@ -195,7 +212,11 @@ bool gcProcessQueryRequest(HttpContext* pContext) {
cJSON* alias = cJSON_GetObjectItem(query, "alias");
int32_t aliasBuffer = -1;
if (!(alias == NULL || alias->valuestring == NULL || strlen(alias->valuestring) == 0)) {
aliasBuffer = httpAddToSqlCmdBuffer(pContext, alias->valuestring);
retCode = httpCheckAllocEscapeSql(alias->valuestring, &newStr);
ESCAPE_ERROR_PROC(retCode, pContext, root);
aliasBuffer = httpAddToSqlCmdBuffer(pContext, newStr);
httpCheckFreeEscapedSql(alias->valuestring, newStr);
if (aliasBuffer == -1) {
httpWarn("context:%p, fd:%d, user:%s, alias buffer is full", pContext, pContext->fd, pContext->user);
break;
@ -211,7 +232,11 @@ bool gcProcessQueryRequest(HttpContext* pContext) {
continue;
}
int32_t sqlBuffer = httpAddToSqlCmdBuffer(pContext, sql->valuestring);
retCode = httpCheckAllocEscapeSql(sql->valuestring, &newStr);
ESCAPE_ERROR_PROC(retCode, pContext, root);
int32_t sqlBuffer = httpAddToSqlCmdBuffer(pContext, newStr);
httpCheckFreeEscapedSql(sql->valuestring, newStr);
if (sqlBuffer == -1) {
httpWarn("context:%p, fd:%d, user:%s, sql buffer is full", pContext, pContext->fd, pContext->user);
break;
@ -237,6 +262,8 @@ bool gcProcessQueryRequest(HttpContext* pContext) {
}
}
#undef ESCAPE_ERROR_PROC
pContext->reqType = HTTP_REQTYPE_MULTI_SQL;
pContext->encodeMethod = &gcQueryMethod;
pContext->multiCmds->pos = 0;

View File

@ -423,3 +423,65 @@ void httpProcessRequest(HttpContext *pContext) {
httpExecCmd(pContext);
}
}
int32_t httpCheckAllocEscapeSql(char *oldSql, char **newSql)
{
char *pos;
if (oldSql == NULL || newSql == NULL) {
return TSDB_CODE_SUCCESS;
}
/* bad sql clause */
pos = strstr(oldSql, "%%");
if (pos) {
httpError("bad sql:%s", oldSql);
return TSDB_CODE_HTTP_REQUEST_JSON_ERROR;
}
pos = strchr(oldSql, '%');
if (pos == NULL) {
httpDebug("sql:%s", oldSql);
*newSql = oldSql;
return TSDB_CODE_SUCCESS;
}
*newSql = (char *) calloc(1, (strlen(oldSql) << 1) + 1);
if (newSql == NULL) {
httpError("failed to allocate for new sql, old sql:%s", oldSql);
return TSDB_CODE_HTTP_NO_ENOUGH_MEMORY;
}
char *src = oldSql;
char *dst = *newSql;
size_t sqlLen = strlen(src);
while (1) {
memcpy(dst, src, pos - src + 1);
dst += pos - src + 1;
*dst++ = '%';
if (pos + 1 >= oldSql + sqlLen) {
break;
}
src = ++pos;
pos = strchr(pos, '%');
if (pos == NULL) {
memcpy(dst, src, strlen(src));
break;
}
}
return TSDB_CODE_SUCCESS;
}
void httpCheckFreeEscapedSql(char *oldSql, char *newSql)
{
if (oldSql && newSql) {
if (oldSql != newSql) {
free(newSql);
}
}
}

View File

@ -610,7 +610,18 @@ bool tgProcessSingleMetric(HttpContext *pContext, cJSON *metric, char *db) {
// stable tag for detail
for (int32_t i = 0; i < orderTagsLen; ++i) {
cJSON *tag = orderedTags[i];
stable_cmd->tagNames[i] = table_cmd->tagNames[i] = httpAddToSqlCmdBuffer(pContext, tag->string);
char *tagStr = NULL;
int32_t retCode = httpCheckAllocEscapeSql(tag->string, &tagStr);
if (retCode != TSDB_CODE_SUCCESS) {
httpSendErrorResp(pContext, retCode);
return false;
}
stable_cmd->tagNames[i] = table_cmd->tagNames[i] = httpAddToSqlCmdBuffer(pContext, tagStr);
httpCheckFreeEscapedSql(tag->string, tagStr);
if (tag->type == cJSON_String)
stable_cmd->tagValues[i] = table_cmd->tagValues[i] = httpAddToSqlCmdBuffer(pContext, "'%s'", tag->valuestring);

View File

@ -595,7 +595,7 @@ int32_t createIndirectQueryFuncExprFromMsg(SQueryTableMsg *pQueryMsg, int32_t nu
SGroupbyExpr *createGroupbyExprFromMsg(SQueryTableMsg *pQueryMsg, SColIndex *pColIndex, int32_t *code);
SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SGroupbyExpr *pGroupbyExpr, SExprInfo *pExprs,
SExprInfo *pSecExprs, STableGroupInfo *pTableGroupInfo, SColumnInfo* pTagCols, int32_t vgId, char* sql, uint64_t *qId, SUdfInfo* pUdfInfo);
SExprInfo *pSecExprs, STableGroupInfo *pTableGroupInfo, SColumnInfo* pTagCols, int32_t vgId, char* sql, uint64_t qId, SUdfInfo* pUdfInfo);
int32_t initQInfo(STsBufInfo* pTsBufInfo, void* tsdb, void* sourceOptr, SQInfo* pQInfo, SQueryParam* param, char* start,
int32_t prevResultLen, void* merger);

View File

@ -24,7 +24,18 @@
memcpy((_k) + sizeof(uint64_t), (_ori), (_len)); \
} while (0)
#define SET_RES_EXT_WINDOW_KEY(_k, _ori, _len, _uid, _buf) \
do { \
assert(sizeof(_uid) == sizeof(uint64_t)); \
*(void **)(_k) = (_buf); \
*(uint64_t *)((_k) + POINTER_BYTES) = (_uid); \
memcpy((_k) + POINTER_BYTES + sizeof(uint64_t), (_ori), (_len)); \
} while (0)
#define GET_RES_WINDOW_KEY_LEN(_l) ((_l) + sizeof(uint64_t))
#define GET_RES_EXT_WINDOW_KEY_LEN(_l) ((_l) + sizeof(uint64_t) + POINTER_BYTES)
#define GET_QID(_r) (((SQInfo*)((_r)->qinfo))->qId)
#define curTimeWindowIndex(_winres) ((_winres)->curIndex)

View File

@ -548,7 +548,7 @@ int32_t noDataRequired(SQLFunctionCtx *pCtx, STimeWindow* w, int32_t colId) {
if ((ctx)->hasNull && isNull((char *)&(list)[i], tsdbType)) { \
continue; \
} \
TSKEY key = GET_TS_DATA(ctx, i); \
TSKEY key = (ctx)->ptsList != NULL? GET_TS_DATA(ctx, i):0; \
UPDATE_DATA(ctx, val, (list)[i], num, sign, key); \
}
@ -1491,9 +1491,10 @@ static void first_function(SQLFunctionCtx *pCtx) {
}
memcpy(pCtx->pOutput, data, pCtx->inputBytes);
if (pCtx->ptsList != NULL) {
TSKEY k = GET_TS_DATA(pCtx, i);
DO_UPDATE_TAG_COLUMNS(pCtx, k);
}
SResultRowCellInfo *pInfo = GET_RES_INFO(pCtx);
pInfo->hasResult = DATA_SET_FLAG;

View File

@ -434,8 +434,8 @@ static SResultRow* doSetResultOutBufByKey(SQueryRuntimeEnv* pRuntimeEnv, SResult
existed = (pResultRowInfo->pResult[0] == (*p1));
pResultRowInfo->curPos = 0;
} else { // check if current pResultRowInfo contains the existed pResultRow
SET_RES_WINDOW_KEY(pRuntimeEnv->keyBuf, pData, bytes, tid);
int64_t* index = taosHashGet(pRuntimeEnv->pResultRowListSet, pRuntimeEnv->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes));
SET_RES_EXT_WINDOW_KEY(pRuntimeEnv->keyBuf, pData, bytes, tid, pResultRowInfo);
int64_t* index = taosHashGet(pRuntimeEnv->pResultRowListSet, pRuntimeEnv->keyBuf, GET_RES_EXT_WINDOW_KEY_LEN(bytes));
if (index != NULL) {
pResultRowInfo->curPos = (int32_t) *index;
existed = true;
@ -472,8 +472,8 @@ static SResultRow* doSetResultOutBufByKey(SQueryRuntimeEnv* pRuntimeEnv, SResult
pResultRowInfo->pResult[pResultRowInfo->size++] = pResult;
int64_t index = pResultRowInfo->curPos;
SET_RES_WINDOW_KEY(pRuntimeEnv->keyBuf, pData, bytes, tid);
taosHashPut(pRuntimeEnv->pResultRowListSet, pRuntimeEnv->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes), &index, POINTER_BYTES);
SET_RES_EXT_WINDOW_KEY(pRuntimeEnv->keyBuf, pData, bytes, tid, pResultRowInfo);
taosHashPut(pRuntimeEnv->pResultRowListSet, pRuntimeEnv->keyBuf, GET_RES_EXT_WINDOW_KEY_LEN(bytes), &index, POINTER_BYTES);
}
// too many time window in query
@ -1892,7 +1892,7 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf
pRuntimeEnv->pResultRowHashTable = taosHashInit(numOfTables, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
pRuntimeEnv->pResultRowListSet = taosHashInit(numOfTables, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
pRuntimeEnv->keyBuf = malloc(pQueryAttr->maxTableColumnWidth + sizeof(int64_t));
pRuntimeEnv->keyBuf = malloc(pQueryAttr->maxTableColumnWidth + sizeof(int64_t) + POINTER_BYTES);
pRuntimeEnv->pool = initResultRowPool(getResultRowSize(pRuntimeEnv));
pRuntimeEnv->prevRow = malloc(POINTER_BYTES * pQueryAttr->numOfCols + pQueryAttr->srcRowSize);
@ -2372,10 +2372,11 @@ static int32_t updateBlockLoadStatus(SQueryAttr *pQuery, int32_t status) {
return status;
}
static void doExchangeTimeWindow(SQInfo* pQInfo, STimeWindow* win) {
SQueryAttr* pQueryAttr = &pQInfo->query;
size_t t = taosArrayGetSize(pQueryAttr->tableGroupInfo.pGroupList);
for(int32_t i = 0; i < t; ++i) {
static void doUpdateLastKey(SQueryAttr* pQueryAttr) {
STimeWindow* win = &pQueryAttr->window;
size_t num = taosArrayGetSize(pQueryAttr->tableGroupInfo.pGroupList);
for(int32_t i = 0; i < num; ++i) {
SArray* p1 = taosArrayGetP(pQueryAttr->tableGroupInfo.pGroupList, i);
size_t len = taosArrayGetSize(p1);
@ -2390,7 +2391,7 @@ static void doExchangeTimeWindow(SQInfo* pQInfo, STimeWindow* win) {
}
}
static void changeExecuteScanOrder(SQInfo *pQInfo, SQueryTableMsg* pQueryMsg, bool stableQuery) {
static void updateDataCheckOrder(SQInfo *pQInfo, SQueryTableMsg* pQueryMsg, bool stableQuery) {
SQueryAttr* pQueryAttr = pQInfo->runtimeEnv.pQueryAttr;
// in case of point-interpolation query, use asc order scan
@ -2407,6 +2408,7 @@ static void changeExecuteScanOrder(SQInfo *pQInfo, SQueryTableMsg* pQueryMsg, bo
SWAP(pQueryAttr->window.skey, pQueryAttr->window.ekey, TSKEY);
}
pQueryAttr->needReverseScan = false;
return;
}
@ -2416,7 +2418,8 @@ static void changeExecuteScanOrder(SQInfo *pQInfo, SQueryTableMsg* pQueryMsg, bo
SWAP(pQueryAttr->window.skey, pQueryAttr->window.ekey, TSKEY);
}
doExchangeTimeWindow(pQInfo, &pQueryAttr->window);
pQueryAttr->needReverseScan = false;
doUpdateLastKey(pQueryAttr);
return;
}
@ -2437,20 +2440,22 @@ static void changeExecuteScanOrder(SQInfo *pQInfo, SQueryTableMsg* pQueryMsg, bo
pQueryAttr->window.ekey, pQueryAttr->window.ekey, pQueryAttr->window.skey);
SWAP(pQueryAttr->window.skey, pQueryAttr->window.ekey, TSKEY);
doExchangeTimeWindow(pQInfo, &pQueryAttr->window);
doUpdateLastKey(pQueryAttr);
}
pQueryAttr->order.order = TSDB_ORDER_ASC;
pQueryAttr->needReverseScan = false;
} else if (onlyLastQuery(pQueryAttr) && notContainSessionOrStateWindow(pQueryAttr)) {
if (QUERY_IS_ASC_QUERY(pQueryAttr)) {
qDebug(msg, pQInfo, "only-last", pQueryAttr->order.order, TSDB_ORDER_DESC, pQueryAttr->window.skey,
pQueryAttr->window.ekey, pQueryAttr->window.ekey, pQueryAttr->window.skey);
SWAP(pQueryAttr->window.skey, pQueryAttr->window.ekey, TSKEY);
doExchangeTimeWindow(pQInfo, &pQueryAttr->window);
doUpdateLastKey(pQueryAttr);
}
pQueryAttr->order.order = TSDB_ORDER_DESC;
pQueryAttr->needReverseScan = false;
}
} else { // interval query
@ -2461,20 +2466,22 @@ static void changeExecuteScanOrder(SQInfo *pQInfo, SQueryTableMsg* pQueryMsg, bo
pQueryAttr->window.skey, pQueryAttr->window.ekey, pQueryAttr->window.ekey, pQueryAttr->window.skey);
SWAP(pQueryAttr->window.skey, pQueryAttr->window.ekey, TSKEY);
doExchangeTimeWindow(pQInfo, &pQueryAttr->window);
doUpdateLastKey(pQueryAttr);
}
pQueryAttr->order.order = TSDB_ORDER_ASC;
pQueryAttr->needReverseScan = false;
} else if (onlyLastQuery(pQueryAttr)) {
if (QUERY_IS_ASC_QUERY(pQueryAttr)) {
qDebug(msg, pQInfo, "only-last stable", pQueryAttr->order.order, TSDB_ORDER_DESC,
pQueryAttr->window.skey, pQueryAttr->window.ekey, pQueryAttr->window.ekey, pQueryAttr->window.skey);
SWAP(pQueryAttr->window.skey, pQueryAttr->window.ekey, TSKEY);
doExchangeTimeWindow(pQInfo, &pQueryAttr->window);
doUpdateLastKey(pQueryAttr);
}
pQueryAttr->order.order = TSDB_ORDER_DESC;
pQueryAttr->needReverseScan = false;
}
}
}
@ -2492,9 +2499,6 @@ static void getIntermediateBufInfo(SQueryRuntimeEnv* pRuntimeEnv, int32_t* ps, i
while(((*rowsize) * MIN_ROWS_PER_PAGE) > (*ps) - overhead) {
*ps = ((*ps) << 1u);
}
// pRuntimeEnv->numOfRowsPerPage = ((*ps) - sizeof(tFilePage)) / (*rowsize);
// assert(pRuntimeEnv->numOfRowsPerPage <= MAX_ROWS_PER_RESBUF_PAGE);
}
#define IS_PREFILTER_TYPE(_t) ((_t) != TSDB_DATA_TYPE_BINARY && (_t) != TSDB_DATA_TYPE_NCHAR)
@ -4524,7 +4528,7 @@ int32_t doInitQInfo(SQInfo* pQInfo, STSBuf* pTsBuf, void* tsdb, void* sourceOptr
break;
}
case OP_DataBlocksOptScan: {
pRuntimeEnv->proot = createDataBlocksOptScanInfo(pRuntimeEnv->pQueryHandle, pRuntimeEnv, getNumOfScanTimes(pQueryAttr), 1);
pRuntimeEnv->proot = createDataBlocksOptScanInfo(pRuntimeEnv->pQueryHandle, pRuntimeEnv, getNumOfScanTimes(pQueryAttr), pQueryAttr->needReverseScan? 1:0);
break;
}
case OP_TableScan: {
@ -4562,8 +4566,10 @@ int32_t doInitQInfo(SQInfo* pQInfo, STSBuf* pTsBuf, void* tsdb, void* sourceOptr
if (pQInfo->summary.queryProfEvents == NULL) {
qDebug("QInfo:0x%"PRIx64" failed to allocate query prof events array", pQInfo->qId);
}
pQInfo->summary.operatorProfResults =
taosHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_TINYINT), true, HASH_NO_LOCK);
if (pQInfo->summary.operatorProfResults == NULL) {
qDebug("QInfo:0x%"PRIx64" failed to allocate operator prof results hash", pQInfo->qId);
}
@ -4956,7 +4962,6 @@ SOperatorInfo* createDataBlocksOptScanInfo(void* pTsdbQueryHandle, SQueryRuntime
pInfo->reverseTimes = reverseTime;
pInfo->current = 0;
pInfo->order = pRuntimeEnv->pQueryAttr->order.order;
// pInfo->prevGroupId = -1;
SOperatorInfo* pOptr = calloc(1, sizeof(SOperatorInfo));
pOptr->name = "DataBlocksOptimizedScanOperator";
@ -7665,7 +7670,7 @@ FORCE_INLINE bool checkQIdEqual(void *qHandle, uint64_t qId) {
SQInfo* createQInfoImpl(SQueryTableMsg* pQueryMsg, SGroupbyExpr* pGroupbyExpr, SExprInfo* pExprs,
SExprInfo* pSecExprs, STableGroupInfo* pTableGroupInfo, SColumnInfo* pTagCols, int32_t vgId,
char* sql, uint64_t *qId, SUdfInfo* pUdfInfo) {
char* sql, uint64_t qId, SUdfInfo* pUdfInfo) {
int16_t numOfCols = pQueryMsg->numOfCols;
int16_t numOfOutput = pQueryMsg->numOfOutput;
@ -7674,7 +7679,7 @@ SQInfo* createQInfoImpl(SQueryTableMsg* pQueryMsg, SGroupbyExpr* pGroupbyExpr, S
goto _cleanup_qinfo;
}
pQInfo->qId = *qId;
pQInfo->qId = qId;
pQInfo->runtimeEnv.pUdfInfo = pUdfInfo;
@ -7800,7 +7805,7 @@ SQInfo* createQInfoImpl(SQueryTableMsg* pQueryMsg, SGroupbyExpr* pGroupbyExpr, S
tsem_init(&pQInfo->ready, 0, 0);
pQueryAttr->window = pQueryMsg->window;
changeExecuteScanOrder(pQInfo, pQueryMsg, pQueryAttr->stableQuery);
updateDataCheckOrder(pQInfo, pQueryMsg, pQueryAttr->stableQuery);
SQueryRuntimeEnv* pRuntimeEnv = &pQInfo->runtimeEnv;
STimeWindow window = pQueryAttr->window;

View File

@ -98,6 +98,8 @@ typedef struct SIOCostSummary {
int64_t blockLoadTime;
int64_t statisInfoLoadTime;
int64_t checkForNextTime;
int64_t headFileLoad;
int64_t headFileLoadTime;
} SIOCostSummary;
typedef struct STsdbQueryHandle {
@ -1045,6 +1047,9 @@ static int32_t getFileCompInfo(STsdbQueryHandle* pQueryHandle, int32_t* numOfBlo
int32_t code = TSDB_CODE_SUCCESS;
*numOfBlocks = 0;
pQueryHandle->cost.headFileLoad += 1;
int64_t s = taosGetTimestampUs();
size_t numOfTables = 0;
if (pQueryHandle->loadType == BLOCK_LOAD_TABLE_SEQ_ORDER) {
code = loadBlockInfo(pQueryHandle, pQueryHandle->activeIndex, numOfBlocks);
@ -1054,6 +1059,9 @@ static int32_t getFileCompInfo(STsdbQueryHandle* pQueryHandle, int32_t* numOfBlo
for (int32_t i = 0; i < numOfTables; ++i) {
code = loadBlockInfo(pQueryHandle, i, numOfBlocks);
if (code != TSDB_CODE_SUCCESS) {
int64_t e = taosGetTimestampUs();
pQueryHandle->cost.headFileLoadTime += (e - s);
return code;
}
}
@ -1061,6 +1069,8 @@ static int32_t getFileCompInfo(STsdbQueryHandle* pQueryHandle, int32_t* numOfBlo
assert(0);
}
int64_t e = taosGetTimestampUs();
pQueryHandle->cost.headFileLoadTime += (e - s);
return code;
}
@ -3731,8 +3741,8 @@ void tsdbCleanupQueryHandle(TsdbQueryHandleT queryHandle) {
pQueryHandle->next = doFreeColumnInfoData(pQueryHandle->next);
SIOCostSummary* pCost = &pQueryHandle->cost;
tsdbDebug("%p :io-cost summary: statis-info:%"PRId64" us, datablock:%" PRId64" us, check data:%"PRId64" us, 0x%"PRIx64,
pQueryHandle, pCost->statisInfoLoadTime, pCost->blockLoadTime, pCost->checkForNextTime, pQueryHandle->qId);
tsdbDebug("%p :io-cost summary: head-file read cnt:%"PRIu64", head-file time:%"PRIu64" us, statis-info:%"PRId64" us, datablock:%" PRId64" us, check data:%"PRId64" us, 0x%"PRIx64,
pQueryHandle, pCost->headFileLoad, pCost->headFileLoadTime, pCost->statisInfoLoadTime, pCost->blockLoadTime, pCost->checkForNextTime, pQueryHandle->qId);
tfree(pQueryHandle);
}

View File

@ -412,6 +412,8 @@ TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_TAG_VALUE_TOO_LONG, "tag value can not mor
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_VALUE_NULL, "value not find")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_VALUE_TYPE, "value type should be boolean, number or string")
TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_REQUEST_JSON_ERROR, "http request json error")
// odbc
TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_OOM, "out of memory")
TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_CONV_CHAR_NOT_NUM, "convertion not a valid literal input")

View File

@ -47,9 +47,6 @@ int32_t vnodeCreate(SCreateVnodeMsg *pVnodeCfg) {
return terrno;
}
char rootDir[TSDB_FILENAME_LEN] = {0};
sprintf(rootDir, "%s/vnode%d", tsVnodeDir, pVnodeCfg->cfg.vgId);
char vnodeDir[TSDB_FILENAME_LEN] = "\0";
snprintf(vnodeDir, TSDB_FILENAME_LEN, "/vnode/vnode%d", pVnodeCfg->cfg.vgId);
if (tfsMkdir(vnodeDir) < 0) {
@ -63,23 +60,6 @@ int32_t vnodeCreate(SCreateVnodeMsg *pVnodeCfg) {
return code;
}
// STsdbCfg tsdbCfg = {0};
// tsdbCfg.tsdbId = pVnodeCfg->cfg.vgId;
// tsdbCfg.cacheBlockSize = pVnodeCfg->cfg.cacheBlockSize;
// tsdbCfg.totalBlocks = pVnodeCfg->cfg.totalBlocks;
// tsdbCfg.daysPerFile = pVnodeCfg->cfg.daysPerFile;
// tsdbCfg.keep = pVnodeCfg->cfg.daysToKeep;
// tsdbCfg.keep1 = pVnodeCfg->cfg.daysToKeep1;
// tsdbCfg.keep2 = pVnodeCfg->cfg.daysToKeep2;
// tsdbCfg.minRowsPerFileBlock = pVnodeCfg->cfg.minRowsPerFileBlock;
// tsdbCfg.maxRowsPerFileBlock = pVnodeCfg->cfg.maxRowsPerFileBlock;
// tsdbCfg.precision = pVnodeCfg->cfg.precision;
// tsdbCfg.compression = pVnodeCfg->cfg.compression;
// tsdbCfg.update = pVnodeCfg->cfg.update;
// tsdbCfg.cacheLastRow = pVnodeCfg->cfg.cacheLastRow;
// char tsdbDir[TSDB_FILENAME_LEN] = {0};
// sprintf(tsdbDir, "vnode/vnode%d/tsdb", pVnodeCfg->cfg.vgId);
if (tsdbCreateRepo(pVnodeCfg->cfg.vgId) < 0) {
vError("vgId:%d, failed to create tsdb in vnode, reason:%s", pVnodeCfg->cfg.vgId, tstrerror(terrno));
return TSDB_CODE_VND_INIT_FAILED;

View File

@ -75,7 +75,7 @@
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.11</version>
<version>4.13.1</version>
<scope>test</scope>
</dependency>

View File

@ -87,14 +87,14 @@
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.11</version>
<version>4.13.1</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
<version>29.0-jre</version>
<version>30.0-jre</version>
</dependency>
<dependency>

View File

@ -40,7 +40,7 @@
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.13</version>
<version>4.13.1</version>
<scope>test</scope>
</dependency>

View File

@ -4,6 +4,11 @@
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<properties>
<maven.compiler.source>1.8</maven.compiler.source>
<maven.compiler.target>1.8</maven.compiler.target>
</properties>
<groupId>com.taosdata.demo</groupId>
<artifactId>connectionPools</artifactId>
<version>1.0-SNAPSHOT</version>
@ -46,9 +51,9 @@
</dependency>
<!-- log4j -->
<dependency>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
<version>1.2.17</version>
<groupId>org.apache.logging.log4j</groupId>
<artifactId>log4j-core</artifactId>
<version>2.14.1</version>
</dependency>
<!-- proxool -->
<dependency>

View File

@ -5,7 +5,8 @@ import com.taosdata.example.pool.C3p0Builder;
import com.taosdata.example.pool.DbcpBuilder;
import com.taosdata.example.pool.DruidPoolBuilder;
import com.taosdata.example.pool.HikariCpBuilder;
import org.apache.log4j.Logger;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.LogManager;
import javax.sql.DataSource;
import java.sql.Connection;
@ -17,7 +18,7 @@ import java.util.concurrent.TimeUnit;
public class ConnectionPoolDemo {
private static Logger logger = Logger.getLogger(DruidPoolBuilder.class);
private static Logger logger = LogManager.getLogger(DruidPoolBuilder.class);
private static final String dbName = "pool_test";
private static String poolType = "hikari";

View File

@ -1,6 +1,7 @@
package com.taosdata.example.common;
import org.apache.log4j.Logger;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.LogManager;
import javax.sql.DataSource;
import java.sql.Connection;
@ -10,7 +11,7 @@ import java.util.Random;
public class InsertTask implements Runnable {
private final Random random = new Random(System.currentTimeMillis());
private static final Logger logger = Logger.getLogger(InsertTask.class);
private static final Logger logger = LogManager.getLogger(InsertTask.class);
private final DataSource ds;
private final String dbName;

View File

@ -68,7 +68,7 @@
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.12</version>
<version>4.13.1</version>
<scope>test</scope>
</dependency>
</dependencies>

View File

@ -4,7 +4,7 @@
<modelVersion>4.0.0</modelVersion>
<groupId>com.taosdata</groupId>
<artifactId>taosdemo</artifactId>
<version>2.0</version>
<version>2.0.1</version>
<name>taosdemo</name>
<packaging>jar</packaging>
<description>Demo project for TDengine</description>
@ -81,20 +81,20 @@
<dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
<version>5.1.47</version>
<version>8.0.16</version>
<scope>test</scope>
</dependency>
<!-- log4j -->
<dependency>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
<version>1.2.17</version>
<groupId>org.apache.logging.log4j</groupId>
<artifactId>log4j-core</artifactId>
<version>2.14.1</version>
</dependency>
<!-- junit -->
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.12</version>
<version>4.13.1</version>
<scope>test</scope>
</dependency>
<!-- lombok -->

View File

@ -8,7 +8,8 @@ import com.taosdata.taosdemo.service.SqlExecuteTask;
import com.taosdata.taosdemo.service.SubTableService;
import com.taosdata.taosdemo.service.SuperTableService;
import com.taosdata.taosdemo.service.data.SuperTableMetaGenerator;
import org.apache.log4j.Logger;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.LogManager;
import javax.sql.DataSource;
import java.io.IOException;
@ -20,7 +21,7 @@ import java.util.Map;
public class TaosDemoApplication {
private static final Logger logger = Logger.getLogger(TaosDemoApplication.class);
private static final Logger logger = LogManager.getLogger(TaosDemoApplication.class);
public static void main(String[] args) throws IOException {
// 读配置参数

View File

@ -1,14 +1,15 @@
package com.taosdata.taosdemo.dao;
import com.taosdata.taosdemo.utils.SqlSpeller;
import org.apache.log4j.Logger;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.LogManager;
import org.springframework.jdbc.core.JdbcTemplate;
import javax.sql.DataSource;
import java.util.Map;
public class DatabaseMapperImpl implements DatabaseMapper {
private static final Logger logger = Logger.getLogger(DatabaseMapperImpl.class);
private static final Logger logger = LogManager.getLogger(DatabaseMapperImpl.class);
private final JdbcTemplate jdbcTemplate;

View File

@ -3,7 +3,8 @@ package com.taosdata.taosdemo.dao;
import com.taosdata.taosdemo.domain.SubTableMeta;
import com.taosdata.taosdemo.domain.SubTableValue;
import com.taosdata.taosdemo.utils.SqlSpeller;
import org.apache.log4j.Logger;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.LogManager;
import org.springframework.jdbc.core.JdbcTemplate;
import javax.sql.DataSource;
@ -11,7 +12,7 @@ import java.util.List;
public class SubTableMapperImpl implements SubTableMapper {
private static final Logger logger = Logger.getLogger(SubTableMapperImpl.class);
private static final Logger logger = LogManager.getLogger(SubTableMapperImpl.class);
private final JdbcTemplate jdbcTemplate;
public SubTableMapperImpl(DataSource dataSource) {

View File

@ -2,13 +2,14 @@ package com.taosdata.taosdemo.dao;
import com.taosdata.taosdemo.domain.SuperTableMeta;
import com.taosdata.taosdemo.utils.SqlSpeller;
import org.apache.log4j.Logger;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.LogManager;
import org.springframework.jdbc.core.JdbcTemplate;
import javax.sql.DataSource;
public class SuperTableMapperImpl implements SuperTableMapper {
private static final Logger logger = Logger.getLogger(SuperTableMapperImpl.class);
private static final Logger logger = LogManager.getLogger(SuperTableMapperImpl.class);
private JdbcTemplate jdbcTemplate;
public SuperTableMapperImpl(DataSource dataSource) {

View File

@ -3,13 +3,14 @@ package com.taosdata.taosdemo.dao;
import com.taosdata.taosdemo.domain.TableMeta;
import com.taosdata.taosdemo.domain.TableValue;
import com.taosdata.taosdemo.utils.SqlSpeller;
import org.apache.log4j.Logger;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.LogManager;
import org.springframework.jdbc.core.JdbcTemplate;
import java.util.List;
public class TableMapperImpl implements TableMapper {
private static final Logger logger = Logger.getLogger(TableMapperImpl.class);
private static final Logger logger = LogManager.getLogger(TableMapperImpl.class);
private JdbcTemplate template;
@Override

View File

@ -8,7 +8,8 @@ import com.taosdata.taosdemo.domain.SubTableValue;
import com.taosdata.taosdemo.domain.SuperTableMeta;
import com.taosdata.taosdemo.service.data.SubTableMetaGenerator;
import com.taosdata.taosdemo.service.data.SubTableValueGenerator;
import org.apache.log4j.Logger;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.LogManager;
import javax.sql.DataSource;
import java.util.ArrayList;
@ -20,7 +21,7 @@ import java.util.stream.IntStream;
public class SubTableService extends AbstractService {
private SubTableMapper mapper;
private static final Logger logger = Logger.getLogger(SubTableService.class);
private static final Logger logger = LogManager.getLogger(SubTableService.class);
public SubTableService(DataSource datasource) {
this.mapper = new SubTableMapperImpl(datasource);

View File

@ -247,4 +247,25 @@ if $system_content != @[{"refId":"A","target":"{val1:nil, val2:nil}","datapoints
return -1
endi
sql create table tt (ts timestamp ,i int) tags(j binary(20),k binary(20));
sql insert into t1 using tt tags('jnetworki','t1') values('2020-01-01 00:00:00.000',1)('2020-01-01 00:01:00.000',2)('2020-01-01 00:02:00.000',3)('2020-01-01 00:03:00.000',4)('2020-01-01 00:04:00.000',5);
system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select max(i) from db.tt where j like \u0027%network%\u0027 and ts >= \u00272020-01-01 00:00:00.000\u0027 and ts < \u00272020-01-01 00:05:00.000\u0027 interval(5m) group by k "} ]' 127.0.0.1:7111/grafana/query
print step1-> $system_content
if $system_content != @[{"refId":"A","target":"{k:t1}","datapoints":[[5,1577808000000]]}]@ then
return -1
endi
system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select max(i) from db.tt where j like \u0027jnetwo%\u0027 and ts >= \u00272020-01-01 00:00:00.000\u0027 and ts < \u00272020-01-01 00:05:00.000\u0027 interval(5m) group by k "} ]' 127.0.0.1:7111/grafana/query
print step1-> $system_content
if $system_content != @[{"refId":"A","target":"{k:t1}","datapoints":[[5,1577808000000]]}]@ then
return -1
endi
system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select max(i) from db.tt where j like \u0027%networki\u0027 and ts >= \u00272020-01-01 00:00:00.000\u0027 and ts < \u00272020-01-01 00:05:00.000\u0027 interval(5m) group by k "} ]' 127.0.0.1:7111/grafana/query
print step1-> $system_content
if $system_content != @[{"refId":"A","target":"{k:t1}","datapoints":[[5,1577808000000]]}]@ then
return -1
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT

View File

@ -334,10 +334,6 @@ sql select top(x, 20) from (select c1 x from nest_tb0);
sql select bottom(x, 20) from (select c1 x from nest_tb0)
print ===================> complex query
print ===================> group by + having
@ -464,6 +460,28 @@ if $data01 != 0.000083333 then
return -1
endi
print ======================>TD-5271
sql select min(val),max(val),first(val),last(val),count(val),sum(val),avg(val) from (select count(*) val from nest_mt0 group by tbname)
if $rows != 1 then
return -1
endi
if $data00 != 10000 then
return -1
endi
if $data01 != 10000 then
return -1
endi
if $data04 != 10 then
return -1
endi
if $data05 != 100000 then
return -1
endi
print =================>us database interval query, TD-5039
sql create database test precision 'us';
sql use test;