Merge branch 'develop' into docs/Update-Latest-Feature

This commit is contained in:
Elias Soong 2021-01-28 09:36:22 +08:00
commit 2dd8333ccb
715 changed files with 14492 additions and 9670 deletions

View File

@ -270,19 +270,20 @@ matrix:
fi
- make > /dev/null
# - os: osx
# language: c
# compiler: clang
# env: DESC="mac/clang build"
# git:
# - depth: 1
# addons:
# homebrew:
# - cmake
#
# script:
# - cd ${TRAVIS_BUILD_DIR}
# - mkdir debug
# - cd debug
# - cmake .. > /dev/null
# - make > /dev/null
- os: osx
osx_image: xcode11.4
language: c
compiler: clang
env: DESC="mac/clang build"
git:
- depth: 1
addons:
homebrew:
- cmake
script:
- cd ${TRAVIS_BUILD_DIR}
- mkdir debug
- cd debug
- cmake .. > /dev/null
- make > /dev/null

View File

@ -13,7 +13,7 @@ ENDIF ()
SET(TD_ACCOUNT FALSE)
SET(TD_ADMIN FALSE)
SET(TD_GRANT FALSE)
SET(TD_MQTT TRUE)
SET(TD_MQTT FALSE)
SET(TD_TSDB_PLUGINS FALSE)
SET(TD_COVER FALSE)
@ -29,6 +29,11 @@ MESSAGE(STATUS "Community directory: " ${TD_COMMUNITY_DIR})
INCLUDE(cmake/input.inc)
INCLUDE(cmake/platform.inc)
IF (TD_WINDOWS OR TD_DARWIN)
SET(TD_SOMODE_STATIC TRUE)
ENDIF ()
INCLUDE(cmake/define.inc)
INCLUDE(cmake/env.inc)
INCLUDE(cmake/version.inc)

143
Jenkinsfile vendored
View File

@ -41,13 +41,10 @@ def pre_test(){
cd ${WKC}
git checkout develop
git reset --hard HEAD~10
git reset --hard HEAD~10 >/dev/null
git pull
git fetch
git checkout ${CHANGE_BRANCH}
git reset --hard HEAD~10
git pull
git merge develop
git fetch origin +refs/pull/${CHANGE_ID}/merge
git checkout -qf FETCH_HEAD
cd ${WK}
git reset --hard HEAD~10
git checkout develop
@ -87,11 +84,14 @@ pipeline {
steps {
pre_test()
sh '''
cd ${WKC}/tests
find pytest -name '*'sql|xargs rm -rf
./test-all.sh p1
date'''
timeout(time: 90, unit: 'MINUTES'){
sh '''
cd ${WKC}/tests
find pytest -name '*'sql|xargs rm -rf
./test-all.sh p1
date'''
}
}
}
stage('python_2') {
@ -112,12 +112,14 @@ pipeline {
}
stage('test_b1') {
agent{label 'b1'}
steps {
pre_test()
sh '''
cd ${WKC}/tests
./test-all.sh b1fq
date'''
steps {
timeout(time: 90, unit: 'MINUTES'){
pre_test()
sh '''
cd ${WKC}/tests
./test-all.sh b1fq
date'''
}
}
}
@ -137,12 +139,14 @@ pipeline {
./handle_crash_gen_val_log.sh
'''
}
sh '''
date
cd ${WKC}/tests
./test-all.sh b2fq
date
'''
timeout(time: 90, unit: 'MINUTES'){
sh '''
date
cd ${WKC}/tests
./test-all.sh b2fq
date
'''
}
}
}
@ -157,12 +161,14 @@ pipeline {
./valgrind-test.sh 2>&1 > mem-error-out.log
./handle_val_log.sh
'''
}
sh '''
date
cd ${WKC}/tests
./test-all.sh b3fq
date'''
}
timeout(time: 90, unit: 'MINUTES'){
sh '''
date
cd ${WKC}/tests
./test-all.sh b3fq
date'''
}
}
}
@ -170,5 +176,84 @@ pipeline {
}
}
}
post {
success {
emailext (
subject: "PR-result: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]'",
body: '''<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
</head>
<body leftmargin="8" marginwidth="0" topmargin="8" marginheight="4" offset="0">
<table width="95%" cellpadding="0" cellspacing="0" style="font-size: 16pt; font-family: Tahoma, Arial, Helvetica, sans-serif">
<tr>
<td><br />
<b><font color="#0B610B"><font size="6">构建信息</font></font></b>
<hr size="2" width="100%" align="center" /></td>
</tr>
<tr>
<td>
<ul>
<div style="font-size:18px">
<li>构建名称>>分支:${PROJECT_NAME}</li>
<li>构建结果:<span style="color:green"> Successful </span></li>
<li>构建编号:${BUILD_NUMBER}</li>
<li>触发用户:${CAUSE}</li>
<li>提交信息:${CHANGE_TITLE}</li>
<li>构建地址:<a href=${BUILD_URL}>${BUILD_URL}</a></li>
<li>构建日志:<a href=${BUILD_URL}console>${BUILD_URL}console</a></li>
<li>变更集:${JELLY_SCRIPT}</li>
</div>
</ul>
</td>
</tr>
</table></font>
</body>
</html>''',
to: "${env.CHANGE_AUTHOR_EMAIL}",
from: "support@taosdata.com"
)
}
failure {
emailext (
subject: "PR-result: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]'",
body: '''<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
</head>
<body leftmargin="8" marginwidth="0" topmargin="8" marginheight="4" offset="0">
<table width="95%" cellpadding="0" cellspacing="0" style="font-size: 16pt; font-family: Tahoma, Arial, Helvetica, sans-serif">
<tr>
<td><br />
<b><font color="#0B610B"><font size="6">构建信息</font></font></b>
<hr size="2" width="100%" align="center" /></td>
</tr>
<tr>
<td>
<ul>
<div style="font-size:18px">
<li>构建名称>>分支:${PROJECT_NAME}</li>
<li>构建结果:<span style="color:green"> Successful </span></li>
<li>构建编号:${BUILD_NUMBER}</li>
<li>触发用户:${CAUSE}</li>
<li>提交信息:${CHANGE_TITLE}</li>
<li>构建地址:<a href=${BUILD_URL}>${BUILD_URL}</a></li>
<li>构建日志:<a href=${BUILD_URL}console>${BUILD_URL}console</a></li>
<li>变更集:${JELLY_SCRIPT}</li>
</div>
</ul>
</td>
</tr>
</table></font>
</body>
</html>''',
to: "${env.CHANGE_AUTHOR_EMAIL}",
from: "support@taosdata.com"
)
}
}
}

View File

@ -126,44 +126,57 @@ cmake .. -DCPUTYPE=aarch32 && cmake --build .
If you use the Visual Studio 2013, please open a command window by executing "cmd.exe".
Please specify "x86_amd64" for 64 bits Windows or specify "x86" is for 32 bits Windows when you execute vcvarsall.bat.
```
```cmd
mkdir debug && cd debug
"C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\vcvarsall.bat" < x86_amd64 | x86 >
cmake .. -G "NMake Makefiles"
nmake
```
If you use the Visual Studio 2019, please open a command window by executing "cmd.exe".
If you use the Visual Studio 2019 or 2017:
please open a command window by executing "cmd.exe".
Please specify "x64" for 64 bits Windows or specify "x86" is for 32 bits Windows when you execute vcvarsall.bat.
```
```cmd
mkdir debug && cd debug
"c:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Auxiliary\Build\vcvarsall.bat" < x64 | x86 >
cmake .. -G "NMake Makefiles"
nmake
```
Or, you can open a command window by clicking Visual Studio 2019 menu "Tools -> Command Line -> Developer Command Prompt" or "Tools -> Command Line -> Developer PowerShell" then execute commands as follows:
```
Or, you can simply open a command window by clicking Windows Start -> "Visual Studio < 2019 | 2017 >" folder -> "x64 Native Tools Command Prompt for VS < 2019 | 2017 >" or "x86 Native Tools Command Prompt for VS < 2019 | 2017 >" depends what architecture your Windows is, then execute commands as follows:
```cmd
mkdir debug && cd debug
cmake .. -G "NMake Makefiles"
nmake
```
### On Mac OS X platform
Please install XCode command line tools and cmake. Verified with XCode 11.4+ on Catalina and Big Sur.
```shell
mkdir debug && cd debug
cmake .. && cmake --build .
```
# Quick Run
# Quick Run
To quickly start a TDengine server after building, run the command below in terminal:
```cmd
```bash
./build/bin/taosd -c test/cfg
```
In another terminal, use the TDengine shell to connect the server:
```
```bash
./build/bin/taos -c test/cfg
```
option "-c test/cfg" specifies the system configuration file directory.
# Installing
After building successfully, TDengine can be installed by:
```cmd
```bash
make install
```
Users can find more information about directories installed on the system in the [directory and files](https://www.taosdata.com/en/documentation/administrator/#Directory-and-Files) section. It should be noted that installing from source code does not configure service management for TDengine.

View File

@ -128,6 +128,8 @@ IF (TD_DARWIN_64)
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -Wno-missing-braces -fPIC -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
SET(DEBUG_FLAGS "-O0 -g3 -DDEBUG")
SET(RELEASE_FLAGS "-Og")
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/cJson/inc)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/lz4/inc)
ENDIF ()
IF (TD_WINDOWS)
@ -139,6 +141,9 @@ IF (TD_WINDOWS)
SET(CMAKE_GENERATOR "NMake Makefiles" CACHE INTERNAL "" FORCE)
IF (NOT TD_GODLL)
SET(COMMON_FLAGS "/nologo /WX /wd4018 /wd2220 /Oi /Oy- /Gm- /EHsc /MT /GS /Gy /fp:precise /Zc:wchar_t /Zc:forScope /Gd /errorReport:prompt /analyze-")
IF (MSVC AND (MSVC_VERSION GREATER_EQUAL 1900))
SET(COMMON_FLAGS "${COMMON_FLAGS} /Wv:18")
ENDIF ()
SET(DEBUG_FLAGS "/Zi /W3 /GL")
SET(RELEASE_FLAGS "/W0 /O3 /GL")
ENDIF ()

View File

@ -32,7 +32,7 @@ ELSEIF (TD_WINDOWS)
#INSTALL(TARGETS taos RUNTIME DESTINATION driver)
#INSTALL(TARGETS shell RUNTIME DESTINATION .)
IF (TD_MVN_INSTALLED)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.16-dist.jar DESTINATION connector/jdbc)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.18-dist.jar DESTINATION connector/jdbc)
ENDIF ()
ELSEIF (TD_DARWIN)
SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh")

16
cmake/version.inc Normal file → Executable file
View File

@ -16,13 +16,25 @@ ENDIF ()
IF (DEFINED GITINFO)
SET(TD_VER_GIT ${GITINFO})
ELSE ()
SET(TD_VER_GIT "community")
execute_process(
COMMAND git log -1 --format=%H
WORKING_DIRECTORY ${TD_COMMUNITY_DIR}
OUTPUT_VARIABLE GIT_COMMITID
)
string (REGEX REPLACE "[\n\t\r]" "" GIT_COMMITID ${GIT_COMMITID})
SET(TD_VER_GIT ${GIT_COMMITID})
ENDIF ()
IF (DEFINED GITINFOI)
SET(TD_VER_GIT_INTERNAL ${GITINFOI})
ELSE ()
SET(TD_VER_GIT_INTERNAL "internal")
execute_process(
COMMAND git log -1 --format=%H
WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}
OUTPUT_VARIABLE GIT_COMMITID
)
string (REGEX REPLACE "[\n\t\r]" "" GIT_COMMITID ${GIT_COMMITID})
SET(TD_VER_GIT_INTERNAL ${GIT_COMMITID})
ENDIF ()
IF (DEFINED VERDATE)

6
deps/CMakeLists.txt vendored
View File

@ -12,4 +12,8 @@ ADD_SUBDIRECTORY(MsvcLibX)
IF (TD_LINUX AND TD_MQTT)
ADD_SUBDIRECTORY(MQTT-C)
ENDIF ()
ENDIF ()
IF (TD_DARWIN AND TD_MQTT)
ADD_SUBDIRECTORY(MQTT-C)
ENDIF ()

View File

@ -5,6 +5,10 @@
#include <stdint.h>
#include "gzguts.h"
#ifndef O_BINARY
#define O_BINARY 0
#endif
#if defined(_WIN32) && !defined(__BORLANDC__) && !defined(__MINGW32__)
# define LSEEK _lseeki64
#else
@ -240,9 +244,9 @@ local gzFile gz_open(path, fd, mode)
/* open the file with the appropriate flags (or just use fd) */
state->fd = fd > -1 ? fd : (
#ifdef WIDECHAR
fd == -2 ? _wopen(path, oflag, 0666) :
fd == -2 ? _wopen(path, oflag | O_BINARY, 0666) :
#endif
open((const char *)path, oflag, 0666));
open((const char *)path, oflag | O_BINARY, 0666));
if (state->fd == -1) {
free(state->path);
free(state);

Binary file not shown.

After

Width:  |  Height:  |  Size: 42 KiB

View File

@ -179,7 +179,7 @@ taos> select avg(f1), max(f2), min(f3) from test.t10 interval(10s);
## **支持平台列表**
## 支持平台列表
### TDengine服务器支持的平台列表

View File

@ -9,7 +9,7 @@
TDengine 采用 SQL 作为查询语言。应用程序可以通过 C/C++, Java, Go, Python 连接器发送 SQL 语句,用户可以通过 TDengine 提供的命令行Command Line Interface, CLI工具 TAOS Shell 手动执行 SQL 即席查询Ad-Hoc Query。TDengine 支持如下查询功能:
- 单列、多列数据查询
- 标签和数值的多种过滤条件:\>, \<, =, \<>, like 等
- 标签和数值的多种过滤条件:>, <, =, <>, like 等
- 聚合结果的分组Group by、排序Order by、约束输出Limit/Offset
- 数值列及聚合结果的四则运算
- 时间戳对齐的连接查询Join Query: 隐式连接)操作

View File

@ -58,26 +58,26 @@ TDengine缺省的时间戳是毫秒精度但通过修改配置参数enableMic
- **创建数据库**
```mysql
CREATE DATABASE [IF NOT EXISTS] db_name [KEEP keep] [UPDATE 1];
```
说明:
1) KEEP是该数据库的数据保留多长天数缺省是3650天(10年),数据库会自动删除超过时限的数据;
2) UPDATE 标志数据库支持更新相同时间戳数据;
3) 数据库名最大长度为33
4) 一条SQL 语句的最大长度为65480个字符
5) 数据库还有更多与存储相关的配置参数,请参见系统管理。
```mysql
CREATE DATABASE [IF NOT EXISTS] db_name [KEEP keep] [UPDATE 1];
```
说明:
1) KEEP是该数据库的数据保留多长天数缺省是3650天(10年),数据库会自动删除超过时限的数据;
2) UPDATE 标志数据库支持更新相同时间戳数据;
3) 数据库名最大长度为33
4) 一条SQL 语句的最大长度为65480个字符
5) 数据库还有更多与存储相关的配置参数,请参见系统管理。
- **显示系统当前参数**
```mysql
SHOW VARIABLES;
```
```mysql
SHOW VARIABLES;
```
- **使用数据库**
@ -701,13 +701,13 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
```mysql
SELECT TWA(field_name) FROM tb_name WHERE clause;
```
功能说明:时间加权平均函数。统计表/超级表中某列在一段时间内的时间加权平均。
功能说明:时间加权平均函数。统计表中某列在一段时间内的时间加权平均。
返回结果数据类型双精度浮点数Double。
应用字段不能应用在timestamp、binary、nchar、bool类型字段。
适用于:表、超级表
适用于:表。
- **SUM**
```mysql
@ -1128,11 +1128,8 @@ SELECT function_list FROM stb_name
- WHERE语句可以指定查询的起止时间和其他过滤条件
- FILL语句指定某一时间区间数据缺失的情况下的填充模式。填充模式包括以下几种
1. 不进行填充NONE(默认填充模式)。
2. VALUE填充固定值填充此时需要指定填充的数值。例如fill(value, 1.23)。
3. NULL填充使用NULL填充数据。例如fill(null)。
4. PREV填充使用前一个非NULL值填充数据。例如fill(prev)。
说明:

View File

@ -124,10 +124,10 @@ taosd -C
对于一个应用场景可能有多种数据特征的数据并存最佳的设计是将具有相同数据特征的表放在一个库里这样一个应用有多个库而每个库可以配置不同的存储参数从而保证系统有最优的性能。TDengine允许应用在创建库时指定上述存储参数如果指定该参数就将覆盖对应的系统配置参数。举例有下述SQL
```
create database demo days 10 cache 32 blocks 8 replica 3;
create database demo days 10 cache 32 blocks 8 replica 3 update 1;
```
该SQL创建了一个库demo, 每个数据文件存储10天数据内存块为32兆字节每个VNODE占用8个内存块副本数为3而其他参数与系统配置完全一致。
该SQL创建了一个库demo, 每个数据文件存储10天数据内存块为32兆字节每个VNODE占用8个内存块副本数为3允许更新,而其他参数与系统配置完全一致。
TDengine集群中加入一个新的dnode时涉及集群相关的一些参数必须与已有集群的配置相同否则不能成功加入到集群中。会进行校验的参数如下

View File

@ -197,7 +197,7 @@ select * from meters where ts > now - 1d and current > 10;
且`restart`是 **false****0**),用户程序就不会读到之前已经读取的数据了。
`taos_subscribe`的最后一个参数是以毫秒为单位的轮询周期。
在同步模式下,如前后两次调用`taos_consume`的时间间隔小于此时间,
在同步模式下,如前后两次调用`taos_consume`的时间间隔小于此时间,
`taos_consume`会阻塞,直到间隔超过此时间。
异步模式下,这个时间是两次调用回调函数的最小时间间隔。
@ -414,7 +414,7 @@ TDengine通过查询函数向用户提供毫秒级的数据获取能力。直接
TDengine分配固定大小的内存空间作为缓存空间缓存空间可根据应用的需求和硬件资源配置。通过适当的设置缓存空间TDengine可以提供极高性能的写入和查询的支持。TDengine中每个虚拟节点virtual node创建时分配独立的缓存池。每个虚拟节点管理自己的缓存池不同虚拟节点间不共享缓存池。每个虚拟节点内部所属的全部表共享该虚拟节点的缓存池。
TDengine将内存池按块划分进行管理数据在内存块里按照列式存储。一个vnode的内存池是在vnode创建时按块分配好,而且每个内存块按照先进先出的原则进行管理。一张表所需要的内存块是从vnode的内存池中进行分配的块的大小由系统配置参数cache决定。每张表最大内存块的数目由配置参数tblocks决定每张表平均的内存块的个数由配置参数ablocks决定。因此对于一个vnode, 总的内存大小为: `cache * ablocks * tables`。内存块参数cache不宜过小一个cache block需要能存储至少几十条以上记录,才会有效率。参数ablocks最小为2保证每张表平均至少能分配两个内存块。
TDengine将内存池按块划分进行管理数据在内存块里是以行row的形式存储。一个vnode的内存池是在vnode创建时按块分配好而且每个内存块按照先进先出的原则进行管理。在创建内存池时块的大小由系统配置参数cache决定每个vnode中内存块的数目则由配置参数blocks决定。因此对于一个vnode总的内存大小为`cache * blocks`。一个cache block需要保证每张表能存储至少几十条以上记录,才会有效率。
你可以通过函数last_row快速获取一张表或一张超级表的最后一条记录这样很便于在大屏显示各设备的实时状态或采集值。例如

View File

@ -225,7 +225,7 @@ SHOW MNODES;
## Arbitrator的使用
如果副本数为偶数当一个vnode group里一半vnode不工作时是无法从中选出master的。同理一半mnode不工作时是无法选出mnode的master的因为存在“split brain”问题。为解决这个问题TDengine引入了arbitrator的概念。Arbitrator模拟一个vnode或mnode在工作但只简单的负责网络连接不处理任何数据插入或访问。只要包含arbitrator在内超过半数的vnode或mnode工作那么该vnode group或mnode组就可以正常的提供数据插入或查询服务。比如对于副本数为2的情形如果一个节点A离线但另外一个节点B正常而且能连接到arbitrator, 那么节点B就能正常工作。
如果副本数为偶数当一个vnode group里一半vnode不工作时是无法从中选出master的。同理一半mnode不工作时是无法选出mnode的master的因为存在“split brain”问题。为解决这个问题TDengine引入了Arbitrator的概念。Arbitrator模拟一个vnode或mnode在工作但只简单的负责网络连接不处理任何数据插入或访问。只要包含Arbitrator在内超过半数的vnode或mnode工作那么该vnode group或mnode组就可以正常的提供数据插入或查询服务。比如对于副本数为2的情形如果一个节点A离线但另外一个节点B正常而且能连接到Arbitrator那么节点B就能正常工作。
TDengine提供一个执行程序tarbitrator, 找任何一台Linux服务器运行它即可。请点击[安装包下载](https://www.taosdata.com/cn/all-downloads/)在TDengine Arbitrator Linux一节中选择适合的版本下载并安装。该程序对系统资源几乎没有要求只需要保证有网络连接即可。该应用的命令行参数`-p`可以指定其对外服务的端口号缺省是6042。配置每个taosd实例时可以在配置文件taos.cfg里将参数arbitrator设置为arbitrator的End Point。如果该参数配置了当副本数为偶数时系统将自动连接配置的arbitrator。如果副本数为奇数即使配置了arbitrator, 系统也不会去建立连接。
TDengine提供一个执行程序,名为 tarbitrator找任何一台Linux服务器运行它即可。请点击[安装包下载](https://www.taosdata.com/cn/all-downloads/)在TDengine Arbitrator Linux一节中选择适合的版本下载并安装。该程序对系统资源几乎没有要求只需要保证有网络连接即可。该应用的命令行参数`-p`可以指定其对外服务的端口号缺省是6042。配置每个taosd实例时可以在配置文件taos.cfg里将参数arbitrator设置为Arbitrator的End Point。如果该参数配置了当副本数为偶数时系统将自动连接配置的Arbitrator。如果副本数为奇数即使配置了Arbitrator系统也不会去建立连接。

View File

@ -1,146 +0,0 @@
#集群安装、管理
多个taosd的运行实例可以组成一个集群以保证TDengine的高可靠运行并提供水平扩展能力。要了解TDengine 2.0的集群管理需要对集群的基本概念有所了解请看TDengine 2.0整体架构一章。
集群的每个节点是由End Point来唯一标识的End Point是由FQDN(Fully Qualified Domain Name)外加Port组成比如 h1.taosdata.com:6030。一般FQDN就是服务器的hostname可通过Linux命令“hostname"获取。端口是这个节点对外服务的端口号缺省是6030但可以通过taos.cfg里配置参数serverPort进行修改。
TDengine的集群管理极其简单除添加和删除节点需要人工干预之外其他全部是自动完成最大程度的降低了运维的工作量。本章对集群管理的操作做详细的描述。
##安装、创建第一个节点
集群是由一个一个dnode组成的是从一个dnode的创建开始的。创建第一个节点很简单就按照["立即开始“](https://www.taosdata.com/cn/getting-started/)一章的方法进行安装、启动即可。
启动后请执行taos, 启动taos shell从shell里执行命令"show dnodes;",如下所示:
```
Welcome to the TDengine shell from Linux, Client Version:2.0.0.0
Copyright (c) 2017 by TAOS Data, Inc. All rights reserved.
taos> show dnodes;
id | end_point | vnodes | cores | status | role | create_time |
=====================================================================================
1 | h1.taos.com:6030 | 0 | 2 | ready | any | 2020-07-31 03:49:29.202 |
Query OK, 1 row(s) in set (0.006385s)
taos>
```
上述命令里可以看到这个刚启动的这个节点的End Point是h1.taos.com:6030
## 安装、创建后续节点
将新的节点添加到现有集群,具体有以下几步:
1. 按照["立即开始“](https://www.taosdata.com/cn/getting-started/)一章的方法进行安装但不要启动taosd
2. 如果是使用涛思数据的官方安装包进行安装在安装结束时会询问集群的End Port, 输入第一个节点的End Point即可。如果是源码安装请编辑配置文件taos.cfg(缺省是在/etc/taos/目录),增加一行:
```
firstEp h1.taos.com:6030
```
请注意将示例的“h1.taos.com:6030" 替换为你自己第一个节点的End Point
3. 按照["立即开始“](https://www.taosdata.com/cn/getting-started/)一章的方法启动taosd
4. 在Linux shell里执行命令"hostname"找出本机的FQDN, 假设为h2.taos.com。如果无法找到可以查看taosd日志文件taosdlog.0里前面几行日志(一般在/var/log/taos目录)fqdn以及port都会打印出来。
5. 在第一个节点使用CLI程序taos, 登录进TDengine系统, 使用命令:
```
CREATE DNODE "h2.taos.com:6030";
```
将新节点的End Point添加进集群的EP列表。**"fqdn:port"需要用双引号引起来**否则出错。请注意将示例的“h2.taos.com:6030" 替换为你自己第一个节点的End Point
6. 使用命令
```
SHOW DNODES;
```
查看新节点是否被成功加入。
按照上述步骤可以源源不断的将新的节点加入到集群。
**提示:**
- firstEp, secondEp这两个参数仅仅在该节点第一次加入集群时有作用加入集群后该节点会保存最新的mnode的End Point列表不再依赖这两个参数。
- 两个没有配置first, second参数的dnode启动后会独立运行起来。这个时候无法将其中一个节点加入到另外一个节点形成集群。**无法将两个独立的集群合并成为新的集群**。
##节点管理
###添加节点
执行CLI程序taos, 使用root账号登录进系统, 执行:
```
CREATE DNODE "fqdn:port";
```
将新节点的End Point添加进集群的EP列表。**"fqdn:port"需要用双引号引起来**否则出错。一个节点对外服务的fqdn和port可以通过配置文件taos.cfg进行配置缺省是自动获取。
###删除节点
执行CLI程序taos, 使用root账号登录进TDengine系统执行
```
DROP DNODE "fqdn:port";
```
其中fqdn是被删除的节点的FQDNport是其对外服务器的端口号
###查看节点
执行CLI程序taos,使用root账号登录进TDengine系统执行
```
SHOW DNODES;
```
它将列出集群中所有的dnode,每个dnode的fqdn:port, 状态(ready, offline等vnode数目还未使用的vnode数目等信息。在添加或删除一个节点后可以使用该命令查看。
如果集群配置了Arbitrator那么它也会在这个节点列表中显示出来其role列的值会是“arb”。
###查看虚拟节点组
为充分利用多核技术并提供scalability数据需要分片处理。因此TDengine会将一个DB的数据切分成多份存放在多个vnode里。这些vnode可能分布在多个dnode里这样就实现了水平扩展。一个vnode仅仅属于一个DB但一个DB可以有多个vnode。vnode的是mnode根据当前系统资源的情况自动进行分配的无需任何人工干预。
执行CLI程序taos,使用root账号登录进TDengine系统执行
```
SHOW VGROUPS;
```
##高可用性
TDengine通过多副本的机制来提供系统的高可用性。副本数是与DB关联的一个集群里可以有多个DB根据运营的需求每个DB可以配置不同的副本数。创建数据库时通过参数replica 指定副本数缺省为1。如果副本数为1系统的可靠性无法保证只要数据所在的节点宕机就将无法提供服务。集群的节点数必须大于等于副本数否则创建表时将返回错误“more dnodes are needed"。比如下面的命令将创建副本数为3的数据库demo
```
CREATE DATABASE demo replica 3;
```
一个DB里的数据会被切片分到多个vnode groupvnode group里的vnode数目就是DB的副本数同一个vnode group里各vnode的数据是完全一致的。为保证高可用性vnode group里的vnode一定要分布在不同的dnode里实际部署时需要在不同的物理机上只要一个vgroup里超过半数的vnode处于工作状态这个vgroup就能正常的对外服务。
一个dnode里可能有多个DB的数据因此一个dnode离线时可能会影响到多个DB。如果一个vnode group里的一半或一半以上的vnode不工作那么该vnode group就无法对外服务无法插入或读取数据这样会影响到它所属的DB的一部分表的d读写操作。
因为vnode的引入无法简单的给出结论“集群中过半dnode工作集群就应该工作”。但是对于简单的情形很好下结论。比如副本数为3只有三个dnode那如果仅有一个节点不工作整个集群还是可以正常工作的但如果有两个节点不工作那整个集群就无法正常工作了。
##Mnode的高可用
TDengine集群是由mnode (taosd的一个模块逻辑节点) 负责管理的为保证mnode的高可用可以配置多个mnode副本副本数由系统配置参数numOfMnodes决定有效范围为1-3。为保证元数据的强一致性mnode副本之间是通过同步的方式进行数据复制的。
一个集群有多个dnode, 但一个dnode至多运行一个mnode实例。多个dnode情况下哪个dnode可以作为mnode呢这是完全由系统根据整个系统资源情况自动指定的。用户可通过CLI程序taos在TDengine的console里执行如下命令
```
SHOW MNODES;
```
来查看mnode列表该列表将列出mnode所处的dnode的End Point和角色(master, slave, unsynced 或offline)。
当集群中第一个节点启动时该节点一定会运行一个mnode实例否则该dnode无法正常工作因为一个系统是必须有至少一个mnode的。如果numOfMnodes配置为2启动第二个dnode时该dnode也将运行一个mnode实例。
为保证mnode服务的高可用性numOfMnodes必须设置为2或更大。因为mnode保存的元数据必须是强一致的如果numOfMnodes大于2复制参数quorum自动设为2也就是说至少要保证有两个副本写入数据成功才通知客户端应用写入成功。
##负载均衡
有三种情况,将触发负载均衡,而且都无需人工干预。
- 当一个新节点添加进集群时,系统将自动触发负载均衡,一些节点上的数据将被自动转移到新节点上,无需任何人工干预。
- 当一个节点从集群中移除时,系统将自动把该节点上的数据转移到其他节点,无需任何人工干预。
- 如果一个节点过热数据量过大系统将自动进行负载均衡将该节点的一些vnode自动挪到其他节点。
当上述三种情况发生时,系统将启动一各个节点的负载计算,从而决定如何挪动。
##节点离线处理
如果一个节点离线TDengine集群将自动检测到。有如下两种情况
- 改节点离线超过一定时间taos.cfg里配置参数offlineThreshold控制时长),系统将自动把该节点删除,产生系统报警信息,触发负载均衡流程。如果该被删除的节点重现上线时,它将无法加入集群,需要系统管理员重新将其添加进集群才会开始工作。
- 离线后在offlineThreshold的时长内重新上线系统将自动启动数据恢复流程等数据完全恢复后该节点将开始正常工作。
##Arbitrator的使用
如果副本数为偶数当一个vnode group里一半vnode不工作时是无法从中选出master的。同理一半mnode不工作时是无法选出mnode的master的因为存在“split brain”问题。为解决这个问题TDengine引入了arbitrator的概念。Arbitrator模拟一个vnode或mnode在工作但只简单的负责网络连接不处理任何数据插入或访问。只要包含arbitrator在内超过半数的vnode或mnode工作那么该vnode group或mnode组就可以正常的提供数据插入或查询服务。比如对于副本数为2的情形如果一个节点A离线但另外一个节点B正常而且能连接到arbitrator, 那么节点B就能正常工作。
TDengine安装包里带有一个执行程序tarbitrator, 找任何一台Linux服务器运行它即可。该程序对系统资源几乎没有要求只需要保证有网络连接即可。该应用的命令行参数`-p`可以指定其对外服务的端口号缺省是6030。配置每个taosd实例时可以在配置文件taos.cfg里将参数arbitrator设置为Arbitrator的End Point。如果该参数配置了当副本数为偶数时系统将自动连接配置的Arbitrator。
在配置了Arbitrator的情况下它也会显示在“show dnodes;”指令给出的节点列表中。

View File

@ -1,62 +1,62 @@
# Java Connector
Java连接器支持的系统有
| **CPU类型** | x6464bit | | | ARM64 | ARM32 |
| ------------ | ------------ | -------- | -------- | -------- | -------- |
| **OS类型** | Linux | Win64 | Win32 | Linux | Linux |
| **支持与否** | **支持** | **支持** | **支持** | **支持** | **支持** |
TDengine 提供了遵循 JDBC 标准3.0API 规范的 `taos-jdbcdriver` 实现,可在 maven 的中央仓库 [Sonatype Repository][1] 搜索下载。
Java连接器的使用请参见<a href=https://www.taosdata.com/blog/2020/11/11/1955.html>视频教程</a>
`taos-jdbcdriver` 的实现包括 2 种形式: JDBC-JNI 和 JDBC-RESTfultaos-jdbcdriver-2.0.18 开始支持 JDBC-RESTful。 JDBC-JNI 通过调用客户端 libtaos.so或 taos.dll )的本地方法实现, JDBC-RESTful 则在内部封装了 RESTful 接口实现。
TDengine 为了方便 Java 应用使用,提供了遵循 JDBC 标准(3.0)API 规范的 `taos-jdbcdriver` 实现。目前可以通过 [Sonatype Repository][1] 搜索并下载。
![tdengine-connector](../assets/tdengine-jdbc-connector.png)
由于 TDengine 是使用 c 语言开发的,使用 taos-jdbcdriver 驱动包时需要依赖系统对应的本地函数库。
上图显示了 3 种 Java 应用使用连接器访问 TDengine 的方式:
* libtaos.so
在 linux 系统中成功安装 TDengine 后,依赖的本地函数库 libtaos.so 文件会被自动拷贝至 /usr/lib/libtaos.so该目录包含在 Linux 自动扫描路径上,无需单独指定。
* JDBC-JNIJava 应用在物理节点1pnode1上使用 JDBC-JNI 的 API ,直接调用客户端 APIlibtaos.so 或 taos.dll将写入和查询请求发送到位于物理节点2pnode2上的 taosd 实例。
* RESTful应用将 SQL 发送给位于物理节点2pnode2上的 RESTful 连接器,再调用客户端 APIlibtaos.so
* JDBC-RESTfulJava 应用通过 JDBC-RESTful 的 API ,将 SQL 封装成一个 RESTful 请求发送给物理节点2的 RESTful 连接器。
* taos.dll
在 windows 系统中安装完客户端之后,驱动包依赖的 taos.dll 文件会自动拷贝到系统默认搜索路径 C:/Windows/System32 下,同样无需要单独指定。
TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致,但时序空间数据库与关系对象型数据库服务的对象和技术特征存在差异,导致 `taos-jdbcdriver` 与传统的 JDBC driver 也存在一定差异。在使用时需要注意以下几点:
> 注意:在 windows 环境开发时需要安装 TDengine 对应的 [windows 客户端][14]Linux 服务器安装完 TDengine 之后默认已安装 client也可以单独安装 [Linux 客户端][15] 连接远程 TDengine Server。
TDengine 的 JDBC 驱动实现尽可能的与关系型数据库驱动保持一致,但时序空间数据库与关系对象型数据库服务的对象和技术特征的差异导致 taos-jdbcdriver 并未完全实现 JDBC 标准规范。在使用时需要注意以下几点:
* TDengine 不提供针对单条数据记录的删除和修改的操作,驱动中也没有支持相关方法。
* 由于不支持删除和修改,所以也不支持事务操作。
* TDengine 目前不支持针对单条数据记录的删除操作。
* 目前不支持事务操作。
* 目前不支持表间的 union 操作。
* 目前不支持嵌套查询(nested query),对每个 Connection 的实例,至多只能有一个打开的 ResultSet 实例;如果在 ResultSet还没关闭的情况下执行了新的查询TSDBJDBCDriver 则会自动关闭上一个 ResultSet。
* 目前不支持嵌套查询nested query
* 对每个 Connection 的实例,至多只能有一个打开的 ResultSet 实例;如果在 ResultSet 还没关闭的情况下执行了新的查询taos-jdbcdriver 会自动关闭上一个 ResultSet。
## TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本
| taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 |
| --- | --- | --- |
| 2.0.12 及以上 | 2.0.8.0 及以上 | 1.8.x |
| 2.0.4 - 2.0.11 | 2.0.0.0 - 2.0.7.x | 1.8.x |
| 1.0.3 | 1.6.1.x 及以上 | 1.8.x |
| 1.0.2 | 1.6.1.x 及以上 | 1.8.x |
| 1.0.1 | 1.6.1.x 及以上 | 1.8.x |
## JDBC-JNI和JDBC-RESTful的对比
## TDengine DataType 和 Java DataType
<table >
<tr align="center"><th>对比项</th><th>JDBC-JNI</th><th>JDBC-RESTful</th></tr>
<tr align="center">
<td>支持的操作系统</td>
<td>linux、windows</td>
<td>全平台</td>
</tr>
<tr align="center">
<td>是否需要安装 client</td>
<td>需要</td>
<td>不需要</td>
</tr>
<tr align="center">
<td>server 升级后是否需要升级 client</td>
<td>需要</td>
<td>不需要</td>
</tr>
<tr align="center">
<td>写入性能</td>
<td colspan="2">JDBC-RESTful 是 JDBC-JNI 的 50%90% </td>
</tr>
<tr align="center">
<td>查询性能</td>
<td colspan="2">JDBC-RESTful 与 JDBC-JNI 没有差别</td>
</tr>
</table>
TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对应类型转换如下:
| TDengine DataType | Java DataType |
| --- | --- |
| TIMESTAMP | java.sql.Timestamp |
| INT | java.lang.Integer |
| BIGINT | java.lang.Long |
| FLOAT | java.lang.Float |
| DOUBLE | java.lang.Double |
| SMALLINT, TINYINT |java.lang.Short |
| BOOL | java.lang.Boolean |
| BINARY, NCHAR | java.lang.String |
## 如何获取 TAOS-JDBCDriver
## 如何获取 taos-jdbcdriver
### maven 仓库
目前 taos-jdbcdriver 已经发布到 [Sonatype Repository][1] 仓库,且各大仓库都已同步。
* [sonatype][8]
* [mvnrepository][9]
* [maven.aliyun][10]
@ -67,30 +67,63 @@ maven 项目中使用如下 pom.xml 配置即可:
<dependency>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>2.0.4</version>
<version>2.0.18</version>
</dependency>
```
### 源码编译打包
下载 [TDengine][3] 源码之后,进入 taos-jdbcdriver 源码目录 `src/connector/jdbc` 执行 `mvn clean package` 即可生成相应 jar 包。
下载 [TDengine][3] 源码之后,进入 taos-jdbcdriver 源码目录 `src/connector/jdbc` 执行 `mvn clean package -Dmaven.test.skip=true` 即可生成相应 jar 包。
## 使用说明
## JDBC的使用说明
### 获取连接
#### 通过JdbcUrl获取连接
#### 指定URL获取连接
通过指定URL获取连接如下所示
```java
Class.forName("com.taosdata.jdbc.rs.RestfulDriver");
String jdbcUrl = "jdbc:TAOS-RS://taosdemo.com:6041/test?user=root&password=taosdata";
Connection conn = DriverManager.getConnection(jdbcUrl);
```
以上示例,使用 **JDBC-RESTful** 的 driver建立了到 hostname 为 taosdemo.com端口为 6041数据库名为 test 的连接。这个 URL 中指定用户名user为 root密码password为 taosdata。
使用 JDBC-RESTful 接口,不需要依赖本地函数库。与 JDBC-JNI 相比,仅需要:
1. driverClass 指定为“com.taosdata.jdbc.rs.RestfulDriver”
2. jdbcUrl 以“jdbc:TAOS-RS://”开头;
3. 使用 6041 作为连接端口。
如果希望获得更好的写入和查询性能Java 应用可以使用 **JDBC-JNI** 的driver如下所示
通过指定的jdbcUrl获取连接如下所示
```java
Class.forName("com.taosdata.jdbc.TSDBDriver");
String jdbcUrl = "jdbc:TAOS://taosdemo.com:6030/test?user=root&password=taosdata";
Connection conn = DriverManager.getConnection(jdbcUrl);
```
以上示例建立了到hostname为taosdemo.com端口为6030TDengine的默认端口数据库名为test的连接。这个url中指定用户名user为root密码password为taosdata。
以上示例,使用了 JDBC-JNI 的 driver建立了到 hostname 为 taosdemo.com端口为 6030TDengine 的默认端口),数据库名为 test 的连接。这个 URL 中指定用户名user为 root密码password为 taosdata。
**注意**:使用 JDBC-JNI 的 drivertaos-jdbcdriver 驱动包时需要依赖系统对应的本地函数库。
* libtaos.so
在 linux 系统中成功安装 TDengine 后,依赖的本地函数库 libtaos.so 文件会被自动拷贝至 /usr/lib/libtaos.so该目录包含在 Linux 自动扫描路径上,无需单独指定。
* taos.dll
在 windows 系统中安装完客户端之后,驱动包依赖的 taos.dll 文件会自动拷贝到系统默认搜索路径 C:/Windows/System32 下,同样无需要单独指定。
> 在 windows 环境开发时需要安装 TDengine 对应的 [windows 客户端][14]Linux 服务器安装完 TDengine 之后默认已安装 client也可以单独安装 [Linux 客户端][15] 连接远程 TDengine Server。
JDBC-JNI 的使用请参见<a href=https://www.taosdata.com/blog/2020/11/11/1955.html>视频教程</a>
TDengine 的 JDBC URL 规范格式为:
`jdbc:TAOS://[host_name]:[port]/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]`
`jdbc:[TAOS|TAOS-RS]://[host_name]:[port]/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]`
url中的配置参数如下
* user登录 TDengine 用户名,默认值 root。
* password用户登录密码默认值 taosdata。
@ -99,13 +132,17 @@ url中的配置参数如下
* locale客户端语言环境默认值系统当前 locale。
* timezone客户端使用的时区默认值为系统当前时区。
#### 使用JdbcUrl和Properties获取连接
除了通过指定的jdbcUrl获取连接还可以使用Properties指定建立连接时的参数如下所示
#### 指定URL和Properties获取连接
除了通过指定的 URL 获取连接,还可以使用 Properties 指定建立连接时的参数,如下所示:
```java
public Connection getConn() throws Exception{
Class.forName("com.taosdata.jdbc.TSDBDriver");
// Class.forName("com.taosdata.jdbc.rs.RestfulDriver");
String jdbcUrl = "jdbc:TAOS://taosdemo.com:6030/test?user=root&password=taosdata";
// String jdbcUrl = "jdbc:TAOS-RS://taosdemo.com:6041/test?user=root&password=taosdata";
Properties connProps = new Properties();
connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
@ -114,9 +151,10 @@ public Connection getConn() throws Exception{
return conn;
}
```
以上示例建立一个到hostname为taosdemo.com端口为6030数据库名为test的连接。这个连接在url中指定了用户名(user)为root密码password为taosdata并在connProps中指定了使用的字符集、语言环境、时区等信息。
properties中的配置参数如下
以上示例,建立一个到 hostname 为 taosdemo.com端口为 6030数据库名为 test 的连接。注释为使用 JDBC-RESTful 时的方法。这个连接在 url 中指定了用户名(user)为 root密码password为 taosdata并在 connProps 中指定了使用的字符集、语言环境、时区等信息。
properties 中的配置参数如下:
* TSDBDriver.PROPERTY_KEY_USER登录 TDengine 用户名,默认值 root。
* TSDBDriver.PROPERTY_KEY_PASSWORD用户登录密码默认值 taosdata。
* TSDBDriver.PROPERTY_KEY_CONFIG_DIR客户端配置文件目录路径Linux OS 上默认值 /etc/taos Windows OS 上默认值 C:/TDengine/cfg。
@ -124,10 +162,14 @@ properties中的配置参数如下
* TSDBDriver.PROPERTY_KEY_LOCALE客户端语言环境默认值系统当前 locale。
* TSDBDriver.PROPERTY_KEY_TIME_ZONE客户端使用的时区默认值为系统当前时区。
#### 使用客户端配置文件建立连接
当使用JDBC连接TDengine集群时可以使用客户端配置文件在客户端配置文件中指定集群的firstEp、secondEp参数。
当使用 JDBC-JNI 连接 TDengine 集群时,可以使用客户端配置文件,在客户端配置文件中指定集群的 firstEp、secondEp参数。
如下所示:
1. 在java中不指定hostname和port
1. 在 Java 应用中不指定 hostname 和 port
```java
public Connection getConn() throws Exception{
Class.forName("com.taosdata.jdbc.TSDBDriver");
@ -140,7 +182,7 @@ public Connection getConn() throws Exception{
return conn;
}
```
2. 在配置文件中指定firstEp和secondEp
2. 在配置文件中指定 firstEp secondEp
```
# first fully qualified domain name (FQDN) for TDengine system
firstEp cluster_node1:6030
@ -155,17 +197,19 @@ secondEp cluster_node2:6030
# locale en_US.UTF-8
```
以上示例jdbc会使用客户端的配置文件建立到hostname为cluster_node1端口为6030数据库名为test的连接。当集群中firstEp节点失效时JDBC会尝试使用secondEp连接集群。
TDengine中只要保证firstEp和secondEp中一个节点有效就可以正常建立到集群的连接。
以上示例jdbc 会使用客户端的配置文件,建立到 hostname 为 cluster_node1、端口为 6030、数据库名为 test 的连接。当集群中 firstEp 节点失效时JDBC 会尝试使用 secondEp 连接集群。
TDengine 中,只要保证 firstEp secondEp 中一个节点有效,就可以正常建立到集群的连接。
> 注意这里的配置文件指的是调用JDBC Connector的应用程序所在机器上的配置文件Linux OS 上默认值 /etc/taos/taos.cfg Windows OS 上默认值 C://TDengine/cfg/taos.cfg。
> 注意:这里的配置文件指的是调用 JDBC Connector 的应用程序所在机器上的配置文件Linux OS 上默认值 /etc/taos/taos.cfg Windows OS 上默认值 C://TDengine/cfg/taos.cfg。
#### 配置参数的优先级
通过以上3种方式获取连接如果配置参数在url、Properties、客户端配置文件中有重复则参数的`优先级由高到低`分别如下:
通过以上 3 种方式获取连接,如果配置参数在 url、Properties、客户端配置文件中有重复则参数的`优先级由高到低`分别如下:
1. JDBC URL 参数,如上所述,可以在 JDBC URL 的参数中指定。
2. Properties connProps
3. 客户端配置文件 taos.cfg
例如在url中指定了password为taosdata在Properties中指定了password为taosdemo那么JDBC会使用url中的password建立连接。
例如:在 url 中指定了 password 为 taosdata在 Properties 中指定了 password 为 taosdemo那么JDBC 会使用 url 中的 password 建立连接。
> 更多详细配置请参考[客户端配置][13]
@ -183,6 +227,7 @@ stmt.executeUpdate("use db");
// create table
stmt.executeUpdate("create table if not exists tb (ts timestamp, temperature int, humidity float)");
```
> 注意:如果不使用 `use db` 指定数据库,则后续对表的操作都需要增加数据库名称作为前缀,如 db.tb。
### 插入数据
@ -193,6 +238,7 @@ int affectedRows = stmt.executeUpdate("insert into tb values(now, 23, 10.3) (now
System.out.println("insert " + affectedRows + " rows.");
```
> now 为系统内部函数,默认为服务器当前时间。
> `now + 1s` 代表服务器当前时间往后加 1 秒数字后面代表时间单位a(毫秒), s(秒), m(分), h(小时), d(天)w(周), n(月), y(年)。
@ -214,6 +260,7 @@ while(resultSet.next()){
System.out.printf("%s, %d, %s\n", ts, temperature, humidity);
}
```
> 查询和操作关系型数据库一致,使用下标获取返回字段内容时从 1 开始,建议使用字段名称获取。
### 订阅
@ -248,7 +295,7 @@ while(true) {
}
```
`consume` 方法返回一个结果集,其中包含从上次 `consume` 到目前为止的所有新数据。请务必按需选择合理的调用 `consume` 的频率(如例子中的`Thread.sleep(1000)`),否则会给服务端造成不必要的压力。
`consume` 方法返回一个结果集,其中包含从上次 `consume` 到目前为止的所有新数据。请务必按需选择合理的调用 `consume` 的频率(如例子中的 `Thread.sleep(1000)`),否则会给服务端造成不必要的压力。
#### 关闭订阅
@ -265,8 +312,11 @@ resultSet.close();
stmt.close();
conn.close();
```
> `注意务必要将 connection 进行关闭`,否则会出现连接泄露。
## 与连接池使用
**HikariCP**
@ -284,16 +334,17 @@ conn.close();
```java
public static void main(String[] args) throws SQLException {
HikariConfig config = new HikariConfig();
// jdbc properties
config.setJdbcUrl("jdbc:TAOS://127.0.0.1:6030/log");
config.setUsername("root");
config.setPassword("taosdata");
config.setMinimumIdle(3); //minimum number of idle connection
// connection pool configurations
config.setMinimumIdle(10); //minimum number of idle connection
config.setMaximumPoolSize(10); //maximum number of connection in the pool
config.setConnectionTimeout(10000); //maximum wait milliseconds for get connection from pool
config.setIdleTimeout(60000); // max idle time for recycle idle connection
config.setConnectionTestQuery("describe log.dn"); //validation query
config.setValidationTimeout(3000); //validation query timeout
config.setConnectionTimeout(30000); //maximum wait milliseconds for get connection from pool
config.setMaxLifetime(0); // maximum life time for each connection
config.setIdleTimeout(0); // max idle time for recycle idle connection
config.setConnectionTestQuery("select server_status()"); //validation query
HikariDataSource ds = new HikariDataSource(config); //create datasource
@ -306,6 +357,7 @@ conn.close();
connection.close(); // put back to conneciton pool
}
```
> 通过 HikariDataSource.getConnection() 获取连接后,使用完成后需要调用 close() 方法,实际上它并不会关闭连接,只是放回连接池中。
> 更多 HikariCP 使用问题请查看[官方说明][5]
@ -324,38 +376,29 @@ conn.close();
* 使用示例如下:
```java
public static void main(String[] args) throws Exception {
Properties properties = new Properties();
properties.put("driverClassName","com.taosdata.jdbc.TSDBDriver");
properties.put("url","jdbc:TAOS://127.0.0.1:6030/log");
properties.put("username","root");
properties.put("password","taosdata");
properties.put("maxActive","10"); //maximum number of connection in the pool
properties.put("initialSize","3");//initial number of connection
properties.put("maxWait","10000");//maximum wait milliseconds for get connection from pool
properties.put("minIdle","3");//minimum number of connection in the pool
properties.put("timeBetweenEvictionRunsMillis","3000");// the interval milliseconds to test connection
properties.put("minEvictableIdleTimeMillis","60000");//the minimum milliseconds to keep idle
properties.put("maxEvictableIdleTimeMillis","90000");//the maximum milliseconds to keep idle
properties.put("validationQuery","describe log.dn"); //validation query
properties.put("testWhileIdle","true"); // test connection while idle
properties.put("testOnBorrow","false"); // don't need while testWhileIdle is true
properties.put("testOnReturn","false"); // don't need while testWhileIdle is true
//create druid datasource
DataSource ds = DruidDataSourceFactory.createDataSource(properties);
Connection connection = ds.getConnection(); // get connection
DruidDataSource dataSource = new DruidDataSource();
// jdbc properties
dataSource.setDriverClassName("com.taosdata.jdbc.TSDBDriver");
dataSource.setUrl(url);
dataSource.setUsername("root");
dataSource.setPassword("taosdata");
// pool configurations
dataSource.setInitialSize(10);
dataSource.setMinIdle(10);
dataSource.setMaxActive(10);
dataSource.setMaxWait(30000);
dataSource.setValidationQuery("select server_status()");
Connection connection = dataSource.getConnection(); // get connection
Statement statement = connection.createStatement(); // get statement
//query or insert
// ...
connection.close(); // put back to conneciton pool
}
```
> 更多 druid 使用问题请查看[官方说明][6]
**注意事项**
@ -370,18 +413,51 @@ server_status()|
Query OK, 1 row(s) in set (0.000141s)
```
## 与框架使用
* Spring JdbcTemplate 中使用 taos-jdbcdriver可参考 [SpringJdbcTemplate][11]
* Springboot + Mybatis 中使用,可参考 [springbootdemo][12]
## TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本
| taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 |
| -------------------- | ----------------- | -------- |
| 2.0.12 及以上 | 2.0.8.0 及以上 | 1.8.x |
| 2.0.4 - 2.0.11 | 2.0.0.0 - 2.0.7.x | 1.8.x |
| 1.0.3 | 1.6.1.x 及以上 | 1.8.x |
| 1.0.2 | 1.6.1.x 及以上 | 1.8.x |
| 1.0.1 | 1.6.1.x 及以上 | 1.8.x |
## TDengine DataType 和 Java DataType
TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对应类型转换如下:
| TDengine DataType | Java DataType |
| ----------------- | ------------------ |
| TIMESTAMP | java.sql.Timestamp |
| INT | java.lang.Integer |
| BIGINT | java.lang.Long |
| FLOAT | java.lang.Float |
| DOUBLE | java.lang.Double |
| SMALLINT, TINYINT | java.lang.Short |
| BOOL | java.lang.Boolean |
| BINARY, NCHAR | java.lang.String |
## 常见问题
* java.lang.UnsatisfiedLinkError: no taos in java.library.path
**原因**:程序没有找到依赖的本地函数库 taos。
**解决方法**windows 下可以将 C:\TDengine\driver\taos.dll 拷贝到 C:\Windows\System32\ 目录下linux 下将建立如下软链 ` ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` 即可。
**解决方法**windows 下可以将 C:\TDengine\driver\taos.dll 拷贝到 C:\Windows\System32\ 目录下linux 下将建立如下软链 `ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` 即可。
* java.lang.UnsatisfiedLinkError: taos.dll Can't load AMD 64 bit on a IA 32-bit platform
@ -406,3 +482,4 @@ Query OK, 1 row(s) in set (0.000141s)
[13]: https://www.taosdata.com/cn/documentation20/administrator/#%E5%AE%A2%E6%88%B7%E7%AB%AF%E9%85%8D%E7%BD%AE
[14]: https://www.taosdata.com/cn/all-downloads/#TDengine-Windows-Client
[15]: https://www.taosdata.com/cn/getting-started/#%E5%BF%AB%E9%80%9F%E4%B8%8A%E6%89%8B

View File

@ -85,7 +85,9 @@ TDengine还没有一组专用的validation queries。然而建议你使用系统
## 9. 我可以删除或更新一条记录吗?
不能。因为TDengine是为联网设备采集的数据设计的不容许修改。但TDengine提供数据保留策略只要数据记录超过保留时长就会被自动删除。
TDengine 目前尚不支持删除功能,未来根据用户需求可能会支持。
从 2.0.8.0 开始TDengine 支持更新已经写入数据的功能。使用更新功能需要在创建数据库时使用 UPDATE 1 参数,之后可以使用 INSERT INTO 命令更新已经写入的相同时间戳数据。UPDATE 参数不支持 ALTER DATABASE 命令修改。没有使用 UPDATE 1 参数创建的数据库,写入相同时间戳的数据不会修改之前的数据,也不会报错。
## 10. 我怎么创建超过1024列的表
@ -132,8 +134,3 @@ TDengine是根据hostname唯一标志一台机器的在数据文件从机器A
- 2.0.7.0 及以后的版本,到/var/lib/taos/dnode下修复dnodeEps.json的dnodeId对应的FQDN重启。确保机器内所有机器的此文件是完全相同的。
- 1.x 和 2.x 版本的存储结构不兼容,需要使用迁移工具或者自己开发应用导出导入数据。
## 17. TDengine 是否支持删除或更新已经写入的数据?
TDengine 目前尚不支持删除功能,未来根据用户需求可能会支持。
从 2.0.8.0 开始TDengine 支持更新已经写入数据的功能。使用更新功能需要在创建数据库时使用 UPDATE 1 参数,之后可以使用 INSERT INTO 命令更新已经写入的相同时间戳数据。UPDATE 参数不支持 ALTER DATABASE 命令修改。没有使用 UPDATE 1 参数创建的数据库,写入相同时间戳的数据不会修改之前的数据,也不会报错。

View File

@ -222,7 +222,7 @@ MQTT是一流行的物联网数据传输协议TDengine 可以很方便的接
## EMQ Broker 直接写入
<a href="https://github.com/emqx/emqx">EMQ</a>是一开源的MQTT Broker软件无需任何代码只需要在EMQ Dashboard里使用“规则”做简单配置即可将MQTT的数据直接写入TDengine。EMQ X 支持通过 发送到 Web 服务 的方式保存数据到 TDengine也在企业版上提供原生的 TDEngine 驱动实现直接保存。详细使用方法请参考<a href="https://docs.emqx.io/broker/latest/cn/rule/rule-example.html#%E4%BF%9D%E5%AD%98%E6%95%B0%E6%8D%AE%E5%88%B0-tdengine">EMQ 官方文档</a>
<a href="https://github.com/emqx/emqx">EMQ</a>是一开源的MQTT Broker软件无需任何代码只需要在EMQ Dashboard里使用“规则”做简单配置即可将MQTT的数据直接写入TDengine。EMQ X 支持通过 发送到 Web 服务 的方式保存数据到 TDengine也在企业版上提供原生的 TDengine 驱动实现直接保存。详细使用方法请参考<a href="https://docs.emqx.io/broker/latest/cn/rule/rule-example.html#%E4%BF%9D%E5%AD%98%E6%95%B0%E6%8D%AE%E5%88%B0-tdengine">EMQ 官方文档</a>
## HiveMQ Broker 直接写入

View File

@ -265,9 +265,6 @@
# maximum display width of binary and nchar fields in the shell. The parts exceeding this limit will be hidden
# maxBinaryDisplayWidth 30
# enable/disable telemetry reporting
# telemetryReporting 1
# enable/disable stream (continuous query)
# stream 1

View File

@ -43,6 +43,7 @@ mkdir -p ${pkg_dir}${install_home_path}/include
mkdir -p ${pkg_dir}${install_home_path}/init.d
mkdir -p ${pkg_dir}${install_home_path}/script
echo "" > ${pkg_dir}${install_home_path}/email
cp ${compile_dir}/../packaging/cfg/taos.cfg ${pkg_dir}${install_home_path}/cfg
cp ${compile_dir}/../packaging/deb/taosd ${pkg_dir}${install_home_path}/init.d
cp ${compile_dir}/../packaging/tools/post.sh ${pkg_dir}${install_home_path}/script

View File

@ -1,14 +1,16 @@
FROM ubuntu:20.04
FROM ubuntu:18.04
WORKDIR /root
ARG version
RUN echo $version
COPY tdengine.tar.gz /root/
RUN tar -zxf tdengine.tar.gz
WORKDIR /root/TDengine-server-$version/
RUN /bin/bash install.sh -e no
ARG pkgFile
ARG dirName
RUN echo ${pkgFile}
RUN echo ${dirName}
COPY ${pkgFile} /root/
RUN tar -zxf ${pkgFile}
WORKDIR /root/${dirName}/
RUN /bin/bash install.sh -e no
ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/lib"
ENV LANG=en_US.UTF-8

View File

@ -0,0 +1,44 @@
#!/bin/bash
set -e
#set -x
# dockerbuild.sh
# -n [version number]
# -p [xxxx]
# set parameters by default value
verNumber=""
passWord=""
while getopts "hn:p:" arg
do
case $arg in
n)
#echo "verNumber=$OPTARG"
verNumber=$(echo $OPTARG)
;;
p)
#echo "passWord=$OPTARG"
passWord=$(echo $OPTARG)
;;
h)
echo "Usage: `basename $0` -n [version number] "
echo " -p [password for docker hub] "
exit 0
;;
?) #unknow option
echo "unkonw argument"
exit 1
;;
esac
done
echo "verNumber=${verNumber}"
docker manifest create -a tdengine/tdengine:${verNumber} tdengine/tdengine-amd64:${verNumber} tdengine/tdengine-aarch64:${verNumber} tdengine/tdengine-aarch32:${verNumber}
docker login -u tdengine -p ${passWord} #replace the docker registry username and password
docker manifest push tdengine/tdengine:${verNumber}
# how set latest version ???

View File

@ -1,5 +0,0 @@
#!/bin/bash
set -x
docker build --rm -f "Dockerfile" -t tdengine/tdengine-aarch64:$1 "." --build-arg version=$1
docker login -u tdengine -p $2 #replace the docker registry username and password
docker push tdengine/tdengine-aarch64:$1

View File

@ -1,5 +1,63 @@
#!/bin/bash
set -x
docker build --rm -f "Dockerfile" -t tdengine/tdengine:$1 "." --build-arg version=$1
docker login -u tdengine -p $2 #replace the docker registry username and password
docker push tdengine/tdengine:$1
set -e
#set -x
# dockerbuild.sh
# -c [aarch32 | aarch64 | amd64 | x86 | mips64 ...]
# -f [pkg file]
# -n [version number]
# -p [password for docker hub]
# set parameters by default value
cpuType=amd64
verNumber=""
passWord=""
pkgFile=""
while getopts "hc:n:p:f:" arg
do
case $arg in
c)
#echo "cpuType=$OPTARG"
cpuType=$(echo $OPTARG)
;;
n)
#echo "verNumber=$OPTARG"
verNumber=$(echo $OPTARG)
;;
p)
#echo "passWord=$OPTARG"
passWord=$(echo $OPTARG)
;;
f)
#echo "pkgFile=$OPTARG"
pkgFile=$(echo $OPTARG)
;;
h)
echo "Usage: `basename $0` -c [aarch32 | aarch64 | amd64 | x86 | mips64 ...] "
echo " -f [pkg file] "
echo " -n [version number] "
echo " -p [password for docker hub] "
exit 0
;;
?) #unknow option
echo "unkonw argument"
exit 1
;;
esac
done
echo "cpuType=${cpuType} verNumber=${verNumber} pkgFile=${pkgFile} "
echo "$(pwd)"
echo "====NOTES: ${pkgFile} must be in the same directory as dockerbuild.sh===="
dirName=${pkgFile%-Linux*}
#echo "dirName=${dirName}"
docker build --rm -f "Dockerfile" -t tdengine/tdengine-${cpuType}:${verNumber} "." --build-arg pkgFile=${pkgFile} --build-arg dirName=${dirName}
docker login -u tdengine -p ${passWord} #replace the docker registry username and password
docker push tdengine/tdengine-${cpuType}:${verNumber}
# set this version to latest version
docker tag tdengine/tdengine-${cpuType}:${verNumber} tdengine/tdengine-${cpuType}:latest
docker push tdengine/tdengine-${cpuType}:latest

View File

@ -0,0 +1,56 @@
#!/bin/bash
#
set -e
#set -x
# dockerbuild.sh
# -c [aarch32 | aarch64 | amd64 | x86 | mips64 ...]
# -n [version number]
# -p [password for docker hub]
# set parameters by default value
cpuType=aarch64
verNumber=""
passWord=""
while getopts "hc:n:p:f:" arg
do
case $arg in
c)
#echo "cpuType=$OPTARG"
cpuType=$(echo $OPTARG)
;;
n)
#echo "verNumber=$OPTARG"
verNumber=$(echo $OPTARG)
;;
p)
#echo "passWord=$OPTARG"
passWord=$(echo $OPTARG)
;;
h)
echo "Usage: `basename $0` -c [aarch32 | aarch64 | amd64 | x86 | mips64 ...] "
echo " -n [version number] "
echo " -p [password for docker hub] "
exit 0
;;
?) #unknow option
echo "unkonw argument"
exit 1
;;
esac
done
pkgFile=TDengine-server-${verNumber}-Linux-${cpuType}.tar.gz
echo "cpuType=${cpuType} verNumber=${verNumber} pkgFile=${pkgFile} "
scriptDir=`pwd`
pkgDir=$scriptDir/../../release/
cp -f ${pkgDir}/${pkgFile} .
./dockerbuild.sh -c ${cpuType} -f ${pkgFile} -n ${verNumber} -p ${passWord}
rm -f ${pkgFile}

View File

@ -51,6 +51,7 @@ mkdir -p %{buildroot}%{homepath}/include
mkdir -p %{buildroot}%{homepath}/init.d
mkdir -p %{buildroot}%{homepath}/script
echo "" > %{buildroot}%{homepath}/email
cp %{_compiledir}/../packaging/cfg/taos.cfg %{buildroot}%{homepath}/cfg
cp %{_compiledir}/../packaging/rpm/taosd %{buildroot}%{homepath}/init.d
cp %{_compiledir}/../packaging/tools/post.sh %{buildroot}%{homepath}/script

View File

@ -21,7 +21,7 @@ else
cd ${script_dir}
script_dir="$(pwd)"
data_dir="/var/lib/taos"
log_dir="~/TDengineLog"
log_dir=~/TDengine/log
fi
log_link_dir="/usr/local/taos/log"

View File

@ -24,7 +24,7 @@ data_dir="/var/lib/taos"
if [ "$osType" != "Darwin" ]; then
log_dir="/var/log/taos"
else
log_dir="~/TDengineLog"
log_dir=~/TDengine/log
fi
data_link_dir="/usr/local/taos/data"
@ -178,7 +178,9 @@ function install_bin() {
function install_lib() {
# Remove links
${csudo} rm -f ${lib_link_dir}/libtaos.* || :
${csudo} rm -f ${lib64_link_dir}/libtaos.* || :
if [ "$osType" != "Darwin" ]; then
${csudo} rm -f ${lib64_link_dir}/libtaos.* || :
fi
if [ "$osType" != "Darwin" ]; then
${csudo} cp ${binary_dir}/build/lib/libtaos.so.${verNumber} ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/*
@ -190,12 +192,14 @@ function install_lib() {
${csudo} ln -sf ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so
fi
else
${csudo} cp ${binary_dir}/build/lib/libtaos.* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/*
${csudo} ln -sf ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.1.dylib
${csudo} cp -Rf ${binary_dir}/build/lib/libtaos.* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/*
${csudo} ln -sf ${install_main_dir}/driver/libtaos.1.dylib ${lib_link_dir}/libtaos.1.dylib
${csudo} ln -sf ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib
fi
${csudo} ldconfig
if [ "$osType" != "Darwin" ]; then
${csudo} ldconfig
fi
}
function install_header() {

View File

@ -0,0 +1,144 @@
# Usage:
# sudo gdb -x ./taosd-dump-cfg.gdb
define attach_pidof
if $argc != 1
help attach_pidof
else
shell echo -e "\
set \$PID = "$(echo $(pidof $arg0) 0 | cut -d " " -f 1)"\n\
if \$PID > 0\n\
attach "$(pidof -s $arg0)"\n\
else\n\
print \"Process '"$arg0"' not found\"\n\
end" > /tmp/gdb.pidof
source /tmp/gdb.pidof
end
end
document attach_pidof
Attach to process by name
Usage: attach_pidof PROG_NAME
end
set $TAOS_CFG_VTYPE_INT8 = 0
set $TAOS_CFG_VTYPE_INT16 = 1
set $TAOS_CFG_VTYPE_INT32 = 2
set $TAOS_CFG_VTYPE_FLOAT = 3
set $TAOS_CFG_VTYPE_STRING = 4
set $TAOS_CFG_VTYPE_IPSTR = 5
set $TAOS_CFG_VTYPE_DIRECTORY = 6
set $TSDB_CFG_CTYPE_B_CONFIG = 1U
set $TSDB_CFG_CTYPE_B_SHOW = 2U
set $TSDB_CFG_CTYPE_B_LOG = 4U
set $TSDB_CFG_CTYPE_B_CLIENT = 8U
set $TSDB_CFG_CTYPE_B_OPTION = 16U
set $TSDB_CFG_CTYPE_B_NOT_PRINT = 32U
set $TSDB_CFG_PRINT_LEN = 53
define print_blank
if $argc == 1
set $blank_len = $arg0
while $blank_len > 0
printf "%s", " "
set $blank_len = $blank_len - 1
end
end
end
define dump_cfg
if $argc != 1
help dump_cfg
else
set $blen = $TSDB_CFG_PRINT_LEN - (int)strlen($arg0.option)
if $blen < 0
$blen = 0
end
#printf "%s: %d\n", "******blen: ", $blen
printf "%s: ", $arg0.option
print_blank $blen
if $arg0.valType == $TAOS_CFG_VTYPE_INT8
printf "%d\n", *((int8_t *) $arg0.ptr)
else
if $arg0.valType == $TAOS_CFG_VTYPE_INT16
printf "%d\n", *((int16_t *) $arg0.ptr)
else
if $arg0.valType == $TAOS_CFG_VTYPE_INT32
printf "%d\n", *((int32_t *) $arg0.ptr)
else
if $arg0.valType == $TAOS_CFG_VTYPE_FLOAT
printf "%f\n", *((float *) $arg0.ptr)
else
printf "%s\n", $arg0.ptr
end
end
end
end
end
end
document dump_cfg
Dump a cfg entry
Usage: dump_cfg cfg
end
set pagination off
attach_pidof taosd
set $idx=0
#print tsGlobalConfigNum
#set $end=$1
set $end=tsGlobalConfigNum
p "*=*=*=*=*=*=*=*=*= taos global config:"
#while ($idx .lt. $end)
while ($idx < $end)
# print tsGlobalConfig[$idx].option
set $cfg = tsGlobalConfig[$idx]
set $tsce = tscEmbedded
# p "1"
if ($tsce == 0)
if !($cfg.cfgType & $TSDB_CFG_CTYPE_B_CLIENT)
end
else
if $cfg.cfgType & $TSDB_CFG_CTYPE_B_NOT_PRINT
else
if !($cfg.cfgType & $TSDB_CFG_CTYPE_B_SHOW)
else
dump_cfg $cfg
end
end
end
set $idx=$idx+1
end
set $idx=0
p "*=*=*=*=*=*=*=*=*= taos local config:"
while ($idx < $end)
set $cfg = tsGlobalConfig[$idx]
set $tsce = tscEmbedded
if ($tsce == 0)
if !($cfg.cfgType & $TSDB_CFG_CTYPE_B_CLIENT)
end
else
if $cfg.cfgType & $TSDB_CFG_CTYPE_B_NOT_PRINT
else
if ($cfg.cfgType & $TSDB_CFG_CTYPE_B_SHOW)
else
dump_cfg $cfg
end
end
end
set $idx=$idx+1
end
detach
quit

View File

@ -8,6 +8,4 @@ INCLUDE_DIRECTORIES(${TD_ENTERPRISE_DIR}/src/inc)
INCLUDE_DIRECTORIES(inc)
AUX_SOURCE_DIRECTORY(src SRC)
IF (TD_LINUX)
ADD_LIBRARY(balance ${SRC})
ENDIF ()
ADD_LIBRARY(balance ${SRC})

View File

@ -24,7 +24,7 @@ extern "C" {
int32_t bnInitThread();
void bnCleanupThread();
void bnNotify();
void bnStartTimer(int64_t mseconds);
void bnStartTimer(int32_t mseconds);
#ifdef __cplusplus
}

View File

@ -30,7 +30,7 @@
#include "mnodeVgroup.h"
extern int64_t tsDnodeRid;
extern int64_t tsSdbRid;
extern int32_t tsSdbRid;
static SBnMgmt tsBnMgmt;
static void bnMonitorDnodeModule();

View File

@ -271,23 +271,23 @@ static int32_t bnRetrieveScores(SShowObj *pShow, char *data, int32_t rows, void
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
*(float *)pWrite = systemScore;
*(float *)pWrite = (float)systemScore;
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
*(float *)pWrite = pDnode->customScore;
*(float *)pWrite = (float)pDnode->customScore;
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
*(float *)pWrite = (int32_t)moduleScore;
*(float *)pWrite = (float)moduleScore;
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
*(float *)pWrite = (int32_t)vnodeScore;
*(float *)pWrite = (float)vnodeScore;
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
*(float *)pWrite = (int32_t)(vnodeScore + moduleScore + pDnode->customScore + systemScore);
*(float *)pWrite = (float)(vnodeScore + moduleScore + pDnode->customScore + systemScore);
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;

View File

@ -56,7 +56,7 @@ int32_t bnInitThread() {
pthread_attr_destroy(&thattr);
if (ret != 0) {
mError("failed to create balance thread since %s", strerror(errno));
mError("failed to create balance thread since %s", strerror(ret));
return -1;
}
@ -119,13 +119,13 @@ static void bnProcessTimer(void *handle, void *tmrId) {
}
}
void bnStartTimer(int64_t mseconds) {
void bnStartTimer(int32_t mseconds) {
if (tsBnThread.stop) return;
bool updateSoon = (mseconds != -1);
if (updateSoon) {
mTrace("balance function will be called after %" PRId64 " ms", mseconds);
taosTmrReset(bnProcessTimer, mseconds, (void *)mseconds, tsMnodeTmr, &tsBnThread.timer);
mTrace("balance function will be called after %d ms", mseconds);
taosTmrReset(bnProcessTimer, mseconds, (void *)(int64_t)mseconds, tsMnodeTmr, &tsBnThread.timer);
} else {
taosTmrReset(bnProcessTimer, tsStatusInterval * 1000, NULL, tsMnodeTmr, &tsBnThread.timer);
}

View File

@ -28,6 +28,28 @@ IF (TD_LINUX)
ADD_SUBDIRECTORY(tests)
ELSEIF (TD_DARWIN)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/jni/linux)
# set the static lib name
ADD_LIBRARY(taos_static STATIC ${SRC})
TARGET_LINK_LIBRARIES(taos_static common query trpc tutil pthread m)
SET_TARGET_PROPERTIES(taos_static PROPERTIES OUTPUT_NAME "taos_static")
SET_TARGET_PROPERTIES(taos_static PROPERTIES CLEAN_DIRECT_OUTPUT 1)
# generate dynamic library (*.dylib)
ADD_LIBRARY(taos SHARED ${SRC})
TARGET_LINK_LIBRARIES(taos common query trpc tutil pthread m)
SET_TARGET_PROPERTIES(taos PROPERTIES CLEAN_DIRECT_OUTPUT 1)
#set version of .dylib
#VERSION dylib version
#SOVERSION dylib version
#MESSAGE(STATUS "build version ${TD_VER_NUMBER}")
SET_TARGET_PROPERTIES(taos PROPERTIES VERSION ${TD_VER_NUMBER} SOVERSION 1)
ADD_SUBDIRECTORY(tests)
ELSEIF (TD_WINDOWS)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/jni/windows)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/jni/windows/win32)
@ -49,12 +71,12 @@ ELSEIF (TD_DARWIN)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/jni/linux)
ADD_LIBRARY(taos_static STATIC ${SRC})
TARGET_LINK_LIBRARIES(taos_static trpc tutil pthread m)
TARGET_LINK_LIBRARIES(taos_static query trpc tutil pthread m)
SET_TARGET_PROPERTIES(taos_static PROPERTIES OUTPUT_NAME "taos_static")
# generate dynamic library (*.dylib)
ADD_LIBRARY(taos SHARED ${SRC})
TARGET_LINK_LIBRARIES(taos trpc tutil pthread m)
TARGET_LINK_LIBRARIES(taos query trpc tutil pthread m)
SET_TARGET_PROPERTIES(taos PROPERTIES CLEAN_DIRECT_OUTPUT 1)

View File

@ -38,7 +38,7 @@ typedef struct SLocalDataSource {
tFilePage filePage;
} SLocalDataSource;
typedef struct SLocalReducer {
typedef struct SLocalMerger {
SLocalDataSource ** pLocalDataSrc;
int32_t numOfBuffer;
int32_t numOfCompleted;
@ -62,7 +62,7 @@ typedef struct SLocalReducer {
bool discard;
int32_t offset; // limit offset value
bool orderPrjOnSTable; // projection query on stable
} SLocalReducer;
} SLocalMerger;
typedef struct SRetrieveSupport {
tExtMemBuffer ** pExtMemBuffer; // for build loser tree
@ -89,10 +89,10 @@ int32_t tscFlushTmpBuffer(tExtMemBuffer *pMemoryBuf, tOrderDescriptor *pDesc, tF
/*
* create local reducer to launch the second-stage reduce process at client site
*/
void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrderDescriptor *pDesc,
void tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrderDescriptor *pDesc,
SColumnModel *finalModel, SColumnModel *pFFModel, SSqlObj* pSql);
void tscDestroyLocalReducer(SSqlObj *pSql);
void tscDestroyLocalMerger(SSqlObj *pSql);
int32_t tscDoLocalMerge(SSqlObj *pSql);

View File

@ -47,7 +47,6 @@ void tscLockByThread(int64_t *lockedBy);
void tscUnlockByThread(int64_t *lockedBy);
#ifdef __cplusplus
}
#endif

View File

@ -98,20 +98,19 @@ static FORCE_INLINE SQueryInfo* tscGetQueryInfoDetail(SSqlCmd* pCmd, int32_t sub
return pCmd->pQueryInfo[subClauseIndex];
}
int32_t tscCreateDataBlock(size_t initialSize, int32_t rowSize, int32_t startOffset, const char* name,
STableMeta* pTableMeta, STableDataBlocks** dataBlocks);
void tscDestroyDataBlock(STableDataBlocks* pDataBlock);
int32_t tscCreateDataBlock(size_t initialSize, int32_t rowSize, int32_t startOffset, SName* name, STableMeta* pTableMeta, STableDataBlocks** dataBlocks);
void tscDestroyDataBlock(STableDataBlocks* pDataBlock, bool removeMeta);
void tscSortRemoveDataBlockDupRows(STableDataBlocks* dataBuf);
SParamInfo* tscAddParamToDataBlock(STableDataBlocks* pDataBlock, char type, uint8_t timePrec, int16_t bytes,
uint32_t offset);
void* tscDestroyBlockArrayList(SArray* pDataBlockList);
void* tscDestroyBlockHashTable(SHashObj* pBlockHashTable);
void* tscDestroyBlockHashTable(SHashObj* pBlockHashTable, bool removeMeta);
int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock);
int32_t tscMergeTableDataBlocks(SSqlObj* pSql, bool freeBlockMap);
int32_t tscGetDataBlockFromList(SHashObj* pHashList, int64_t id, int32_t size, int32_t startOffset, int32_t rowSize, const char* tableId, STableMeta* pTableMeta,
int32_t tscGetDataBlockFromList(SHashObj* pHashList, int64_t id, int32_t size, int32_t startOffset, int32_t rowSize, SName* pName, STableMeta* pTableMeta,
STableDataBlocks** dataBlocks, SArray* pBlockList);
/**
@ -142,10 +141,6 @@ void tscClearInterpInfo(SQueryInfo* pQueryInfo);
bool tscIsInsertData(char* sqlstr);
/* use for keep current db info temporarily, for handle table with db prefix */
// todo remove it
void tscGetDBInfoFromTableFullName(char* tableId, char* db);
int tscAllocPayload(SSqlCmd* pCmd, int size);
TAOS_FIELD tscCreateField(int8_t type, const char* name, int16_t bytes);
@ -215,8 +210,8 @@ SQueryInfo *tscGetQueryInfoDetailSafely(SSqlCmd *pCmd, int32_t subClauseIndex);
void tscClearTableMetaInfo(STableMetaInfo* pTableMetaInfo);
STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, const char* name, STableMeta* pTableMeta,
SVgroupsInfo* vgroupList, SArray* pTagCols, SArray* pVgroupTables);
STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, SName* name, STableMeta* pTableMeta,
SVgroupsInfo* vgroupList, SArray* pTagCols, SArray* pVgroupTables);
STableMetaInfo* tscAddEmptyMetaInfo(SQueryInfo *pQueryInfo);
int32_t tscAddSubqueryInfo(SSqlCmd *pCmd);
@ -225,7 +220,7 @@ void tscInitQueryInfo(SQueryInfo* pQueryInfo);
void tscClearSubqueryInfo(SSqlCmd* pCmd);
void tscFreeVgroupTableInfo(SArray* pVgroupTables);
SArray* tscVgroupTableInfoClone(SArray* pVgroupTables);
SArray* tscVgroupTableInfoDup(SArray* pVgroupTables);
void tscRemoveVgroupTableGroup(SArray* pVgroupTable, int32_t index);
void tscVgroupTableCopy(SVgroupTableInfo* info, SVgroupTableInfo* pInfo);
@ -292,7 +287,7 @@ uint32_t tscGetTableMetaSize(STableMeta* pTableMeta);
CChildTableMeta* tscCreateChildMeta(STableMeta* pTableMeta);
uint32_t tscGetTableMetaMaxSize();
int32_t tscCreateTableMetaFromCChildMeta(STableMeta* pChild, const char* name);
STableMeta* tscTableMetaClone(STableMeta* pTableMeta);
STableMeta* tscTableMetaDup(STableMeta* pTableMeta);
void* malloc_throw(size_t size);

View File

@ -22,15 +22,15 @@ extern "C" {
#include "os.h"
#include "qAggMain.h"
#include "taos.h"
#include "taosdef.h"
#include "taosmsg.h"
#include "tarray.h"
#include "tglobal.h"
#include "tsqlfunction.h"
#include "tutil.h"
#include "tcache.h"
#include "tglobal.h"
#include "tref.h"
#include "tutil.h"
#include "qExecutor.h"
#include "qSqlparser.h"
@ -39,7 +39,7 @@ extern "C" {
// forward declaration
struct SSqlInfo;
struct SLocalReducer;
struct SLocalMerger;
// data source from sql string or from file
enum {
@ -67,7 +67,7 @@ typedef struct CChildTableMeta {
int32_t vgId;
STableId id;
uint8_t tableType;
char sTableName[TSDB_TABLE_FNAME_LEN];
char sTableName[TSDB_TABLE_FNAME_LEN]; //super table name, not full name
} CChildTableMeta;
typedef struct STableMeta {
@ -91,7 +91,7 @@ typedef struct STableMetaInfo {
* 2. keep the vgroup index for multi-vnode insertion
*/
int32_t vgroupIndex;
char name[TSDB_TABLE_FNAME_LEN]; // (super) table name
SName name;
char aliasName[TSDB_TABLE_NAME_LEN]; // alias name of table specified in query sql
SArray *tagColList; // SArray<SColumn*>, involved tag columns
} STableMetaInfo;
@ -142,7 +142,7 @@ typedef struct SCond {
} SCond;
typedef struct SJoinNode {
char tableId[TSDB_TABLE_FNAME_LEN];
char tableName[TSDB_TABLE_FNAME_LEN];
uint64_t uid;
int16_t tagColId;
} SJoinNode;
@ -176,7 +176,7 @@ typedef struct SParamInfo {
} SParamInfo;
typedef struct STableDataBlocks {
char tableName[TSDB_TABLE_FNAME_LEN];
SName tableName;
int8_t tsSource; // where does the UNIX timestamp come from, server or client
bool ordered; // if current rows are ordered or not
int64_t vgId; // virtual group id
@ -223,6 +223,8 @@ typedef struct SQueryInfo {
int32_t udColumnId; // current user-defined constant output field column id, monotonically decreases from TSDB_UD_COLUMN_INDEX
int16_t resColumnId; // result column id
bool distinctTag; // distinct tag or not
} SQueryInfo;
typedef struct {
@ -254,7 +256,7 @@ typedef struct {
int8_t submitSchema; // submit block is built with table schema
STagData tagData; // NOTE: pTagData->data is used as a variant length array
char **pTableNameList; // all involved tableMeta list of current insert sql statement.
SName **pTableNameList; // all involved tableMeta list of current insert sql statement.
int32_t numOfTables;
SHashObj *pTableBlockHashList; // data block for each table
@ -292,7 +294,7 @@ typedef struct {
SColumnIndex* pColumnIndex;
SArithmeticSupport *pArithSup; // support the arithmetic expression calculation on agg functions
struct SLocalReducer *pLocalReducer;
struct SLocalMerger *pLocalMerger;
} SSqlRes;
typedef struct STscObj {
@ -317,7 +319,8 @@ typedef struct STscObj {
} STscObj;
typedef struct SSubqueryState {
int32_t numOfRemain; // the number of remain unfinished subquery
pthread_mutex_t mutex;
int8_t *states;
int32_t numOfSub; // the number of total sub-queries
uint64_t numOfRetrievedRows; // total number of points in this query
} SSubqueryState;
@ -410,7 +413,7 @@ void tscRestoreSQLFuncForSTableQuery(SQueryInfo *pQueryInfo);
int32_t tscCreateResPointerInfo(SSqlRes *pRes, SQueryInfo *pQueryInfo);
void tscSetResRawPtr(SSqlRes* pRes, SQueryInfo* pQueryInfo);
void tscResetSqlCmdObj(SSqlCmd *pCmd);
void tscResetSqlCmd(SSqlCmd *pCmd, bool removeMeta);
/**
* free query result of the sql object
@ -435,7 +438,7 @@ void waitForQueryRsp(void *param, TAOS_RES *tres, int code);
void doAsyncQuery(STscObj *pObj, SSqlObj *pSql, __async_cb_func_t fp, void *param, const char *sqlstr, size_t sqlLen);
void tscProcessMultiVnodesImportFromFile(SSqlObj *pSql);
void tscImportDataFromFile(SSqlObj *pSql);
void tscInitResObjForLocalQuery(SSqlObj *pObj, int32_t numOfRes, int32_t rowLen);
bool tscIsUpdateQuery(SSqlObj* pSql);
bool tscHasReachLimitation(SQueryInfo *pQueryInfo, SSqlRes *pRes);

View File

@ -351,7 +351,7 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
if (pCmd->command == TSDB_SQL_SELECT) {
tscDebug("%p redo parse sql string and proceed", pSql);
pCmd->parseFinished = false;
tscResetSqlCmdObj(pCmd);
tscResetSqlCmd(pCmd, true);
code = tsParseSql(pSql, true);
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {

View File

@ -569,10 +569,12 @@ static int32_t tscRebuildDDLForSubTable(SSqlObj *pSql, const char *tableName, ch
}
char fullName[TSDB_TABLE_FNAME_LEN * 2] = {0};
extractDBName(pTableMetaInfo->name, fullName);
tNameGetDbName(&pTableMetaInfo->name, fullName);
extractTableName(pMeta->sTableName, param->sTableName);
snprintf(fullName + strlen(fullName), TSDB_TABLE_FNAME_LEN - strlen(fullName), ".%s", param->sTableName);
extractTableName(pTableMetaInfo->name, param->buf);
strncpy(param->buf, tNameGetTableName(&pTableMetaInfo->name), TSDB_TABLE_NAME_LEN);
param->pParentSql = pSql;
param->pInterSql = pInterSql;
@ -602,6 +604,7 @@ static int32_t tscRebuildDDLForSubTable(SSqlObj *pSql, const char *tableName, ch
return TSDB_CODE_TSC_ACTION_IN_PROGRESS;
}
static int32_t tscRebuildDDLForNormalTable(SSqlObj *pSql, const char *tableName, char *ddl) {
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
@ -675,8 +678,7 @@ static int32_t tscProcessShowCreateTable(SSqlObj *pSql) {
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
assert(pTableMetaInfo->pTableMeta != NULL);
char tableName[TSDB_TABLE_NAME_LEN] = {0};
extractTableName(pTableMetaInfo->name, tableName);
const char* tableName = tNameGetTableName(&pTableMetaInfo->name);
char *result = (char *)calloc(1, TSDB_MAX_BINARY_LEN);
int32_t code = TSDB_CODE_SUCCESS;
@ -712,7 +714,9 @@ static int32_t tscProcessShowCreateDatabase(SSqlObj *pSql) {
free(pInterSql);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
extractTableName(pTableMetaInfo->name, param->buf);
strncpy(param->buf, tNameGetTableName(&pTableMetaInfo->name), TSDB_TABLE_NAME_LEN);
param->pParentSql = pSql;
param->pInterSql = pInterSql;
param->fp = tscRebuildCreateDBStatement;
@ -745,7 +749,10 @@ static int32_t tscProcessCurrentUser(SSqlObj *pSql) {
static int32_t tscProcessCurrentDB(SSqlObj *pSql) {
char db[TSDB_DB_NAME_LEN] = {0};
pthread_mutex_lock(&pSql->pTscObj->mutex);
extractDBName(pSql->pTscObj->db, db);
pthread_mutex_unlock(&pSql->pTscObj->mutex);
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex);

View File

@ -16,7 +16,7 @@
#include "tscLocalMerge.h"
#include "tscSubquery.h"
#include "os.h"
#include "qAst.h"
#include "texpr.h"
#include "tlosertree.h"
#include "tscLog.h"
#include "tscUtil.h"
@ -56,7 +56,7 @@ int32_t treeComparator(const void *pLeft, const void *pRight, void *param) {
}
}
static void tscInitSqlContext(SSqlCmd *pCmd, SLocalReducer *pReducer, tOrderDescriptor *pDesc) {
static void tscInitSqlContext(SSqlCmd *pCmd, SLocalMerger *pReducer, tOrderDescriptor *pDesc) {
/*
* the fields and offset attributes in pCmd and pModel may be different due to
* merge requirement. So, the final result in pRes structure is formatted in accordance with the pCmd object.
@ -166,7 +166,7 @@ static SFillColInfo* createFillColInfo(SQueryInfo* pQueryInfo) {
return pFillCol;
}
void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrderDescriptor *pDesc,
void tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrderDescriptor *pDesc,
SColumnModel *finalmodel, SColumnModel *pFFModel, SSqlObj* pSql) {
SSqlCmd* pCmd = &pSql->cmd;
SSqlRes* pRes = &pSql->res;
@ -212,9 +212,9 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
return;
}
size_t size = sizeof(SLocalReducer) + POINTER_BYTES * numOfFlush;
size_t size = sizeof(SLocalMerger) + POINTER_BYTES * numOfFlush;
SLocalReducer *pReducer = (SLocalReducer *) calloc(1, size);
SLocalMerger *pReducer = (SLocalMerger *) calloc(1, size);
if (pReducer == NULL) {
tscError("%p failed to create local merge structure, out of memory", pSql);
@ -372,7 +372,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
pReducer->offset = (int32_t)pQueryInfo->limit.offset;
pRes->pLocalReducer = pReducer;
pRes->pLocalMerger = pReducer;
pRes->numOfGroups = 0;
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
@ -477,13 +477,13 @@ int32_t saveToBuffer(tExtMemBuffer *pMemoryBuf, tOrderDescriptor *pDesc, tFilePa
return 0;
}
void tscDestroyLocalReducer(SSqlObj *pSql) {
void tscDestroyLocalMerger(SSqlObj *pSql) {
if (pSql == NULL) {
return;
}
SSqlRes *pRes = &(pSql->res);
if (pRes->pLocalReducer == NULL) {
if (pRes->pLocalMerger == NULL) {
return;
}
@ -491,14 +491,14 @@ void tscDestroyLocalReducer(SSqlObj *pSql) {
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
// there is no more result, so we release all allocated resource
SLocalReducer *pLocalReducer = (SLocalReducer *)atomic_exchange_ptr(&pRes->pLocalReducer, NULL);
if (pLocalReducer != NULL) {
pLocalReducer->pFillInfo = taosDestroyFillInfo(pLocalReducer->pFillInfo);
SLocalMerger *pLocalMerge = (SLocalMerger *)atomic_exchange_ptr(&pRes->pLocalMerger, NULL);
if (pLocalMerge != NULL) {
pLocalMerge->pFillInfo = taosDestroyFillInfo(pLocalMerge->pFillInfo);
if (pLocalReducer->pCtx != NULL) {
if (pLocalMerge->pCtx != NULL) {
int32_t numOfExprs = (int32_t) tscSqlExprNumOfExprs(pQueryInfo);
for (int32_t i = 0; i < numOfExprs; ++i) {
SQLFunctionCtx *pCtx = &pLocalReducer->pCtx[i];
SQLFunctionCtx *pCtx = &pLocalMerge->pCtx[i];
tVariantDestroy(&pCtx->tag);
tfree(pCtx->resultInfo);
@ -508,31 +508,31 @@ void tscDestroyLocalReducer(SSqlObj *pSql) {
}
}
tfree(pLocalReducer->pCtx);
tfree(pLocalMerge->pCtx);
}
tfree(pLocalReducer->prevRowOfInput);
tfree(pLocalMerge->prevRowOfInput);
tfree(pLocalReducer->pTempBuffer);
tfree(pLocalReducer->pResultBuf);
tfree(pLocalMerge->pTempBuffer);
tfree(pLocalMerge->pResultBuf);
if (pLocalReducer->pLoserTree) {
tfree(pLocalReducer->pLoserTree->param);
tfree(pLocalReducer->pLoserTree);
if (pLocalMerge->pLoserTree) {
tfree(pLocalMerge->pLoserTree->param);
tfree(pLocalMerge->pLoserTree);
}
tfree(pLocalReducer->pFinalRes);
tfree(pLocalReducer->discardData);
tfree(pLocalMerge->pFinalRes);
tfree(pLocalMerge->discardData);
tscLocalReducerEnvDestroy(pLocalReducer->pExtMemBuffer, pLocalReducer->pDesc, pLocalReducer->resColModel, pLocalReducer->finalModel,
pLocalReducer->numOfVnode);
for (int32_t i = 0; i < pLocalReducer->numOfBuffer; ++i) {
tfree(pLocalReducer->pLocalDataSrc[i]);
tscLocalReducerEnvDestroy(pLocalMerge->pExtMemBuffer, pLocalMerge->pDesc, pLocalMerge->resColModel, pLocalMerge->finalModel,
pLocalMerge->numOfVnode);
for (int32_t i = 0; i < pLocalMerge->numOfBuffer; ++i) {
tfree(pLocalMerge->pLocalDataSrc[i]);
}
pLocalReducer->numOfBuffer = 0;
pLocalReducer->numOfCompleted = 0;
free(pLocalReducer);
pLocalMerge->numOfBuffer = 0;
pLocalMerge->numOfCompleted = 0;
free(pLocalMerge);
} else {
tscDebug("%p already freed or another free function is invoked", pSql);
}
@ -604,7 +604,7 @@ static int32_t createOrderDescriptor(tOrderDescriptor **pOrderDesc, SSqlCmd *pCm
}
}
bool isSameGroup(SSqlCmd *pCmd, SLocalReducer *pReducer, char *pPrev, tFilePage *tmpBuffer) {
bool isSameGroup(SSqlCmd *pCmd, SLocalMerger *pReducer, char *pPrev, tFilePage *tmpBuffer) {
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
// disable merge procedure for column projection query
@ -795,12 +795,12 @@ void tscLocalReducerEnvDestroy(tExtMemBuffer **pMemBuffer, tOrderDescriptor *pDe
/**
*
* @param pLocalReducer
* @param pLocalMerge
* @param pOneInterDataSrc
* @param treeList
* @return the number of remain input source. if ret == 0, all data has been handled
*/
int32_t loadNewDataFromDiskFor(SLocalReducer *pLocalReducer, SLocalDataSource *pOneInterDataSrc,
int32_t loadNewDataFromDiskFor(SLocalMerger *pLocalMerge, SLocalDataSource *pOneInterDataSrc,
bool *needAdjustLoserTree) {
pOneInterDataSrc->rowIdx = 0;
pOneInterDataSrc->pageId += 1;
@ -817,17 +817,17 @@ int32_t loadNewDataFromDiskFor(SLocalReducer *pLocalReducer, SLocalDataSource *p
#endif
*needAdjustLoserTree = true;
} else {
pLocalReducer->numOfCompleted += 1;
pLocalMerge->numOfCompleted += 1;
pOneInterDataSrc->rowIdx = -1;
pOneInterDataSrc->pageId = -1;
*needAdjustLoserTree = true;
}
return pLocalReducer->numOfBuffer;
return pLocalMerge->numOfBuffer;
}
void adjustLoserTreeFromNewData(SLocalReducer *pLocalReducer, SLocalDataSource *pOneInterDataSrc,
void adjustLoserTreeFromNewData(SLocalMerger *pLocalMerge, SLocalDataSource *pOneInterDataSrc,
SLoserTreeInfo *pTree) {
/*
* load a new data page into memory for intermediate dataset source,
@ -835,7 +835,7 @@ void adjustLoserTreeFromNewData(SLocalReducer *pLocalReducer, SLocalDataSource *
*/
bool needToAdjust = true;
if (pOneInterDataSrc->filePage.num <= pOneInterDataSrc->rowIdx) {
loadNewDataFromDiskFor(pLocalReducer, pOneInterDataSrc, &needToAdjust);
loadNewDataFromDiskFor(pLocalMerge, pOneInterDataSrc, &needToAdjust);
}
/*
@ -843,7 +843,7 @@ void adjustLoserTreeFromNewData(SLocalReducer *pLocalReducer, SLocalDataSource *
* if the loser tree is rebuild completed, we do not need to adjust
*/
if (needToAdjust) {
int32_t leafNodeIdx = pTree->pNode[0].index + pLocalReducer->numOfBuffer;
int32_t leafNodeIdx = pTree->pNode[0].index + pLocalMerge->numOfBuffer;
#ifdef _DEBUG_VIEW
printf("before adjust:\t");
@ -860,7 +860,7 @@ void adjustLoserTreeFromNewData(SLocalReducer *pLocalReducer, SLocalDataSource *
}
}
void savePrevRecordAndSetupFillInfo(SLocalReducer *pLocalReducer, SQueryInfo *pQueryInfo, SFillInfo *pFillInfo) {
void savePrevRecordAndSetupFillInfo(SLocalMerger *pLocalMerge, SQueryInfo *pQueryInfo, SFillInfo *pFillInfo) {
// discard following dataset in the same group and reset the interpolation information
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
@ -873,28 +873,28 @@ void savePrevRecordAndSetupFillInfo(SLocalReducer *pLocalReducer, SQueryInfo *pQ
taosResetFillInfo(pFillInfo, revisedSTime);
}
pLocalReducer->discard = true;
pLocalReducer->discardData->num = 0;
pLocalMerge->discard = true;
pLocalMerge->discardData->num = 0;
SColumnModel *pModel = pLocalReducer->pDesc->pColumnModel;
tColModelAppend(pModel, pLocalReducer->discardData, pLocalReducer->prevRowOfInput, 0, 1, 1);
SColumnModel *pModel = pLocalMerge->pDesc->pColumnModel;
tColModelAppend(pModel, pLocalMerge->discardData, pLocalMerge->prevRowOfInput, 0, 1, 1);
}
static void genFinalResWithoutFill(SSqlRes* pRes, SLocalReducer *pLocalReducer, SQueryInfo* pQueryInfo) {
static void genFinalResWithoutFill(SSqlRes* pRes, SLocalMerger *pLocalMerge, SQueryInfo* pQueryInfo) {
assert(pQueryInfo->interval.interval == 0 || pQueryInfo->fillType == TSDB_FILL_NONE);
tFilePage * pBeforeFillData = pLocalReducer->pResultBuf;
tFilePage * pBeforeFillData = pLocalMerge->pResultBuf;
pRes->data = pLocalReducer->pFinalRes;
pRes->data = pLocalMerge->pFinalRes;
pRes->numOfRows = (int32_t) pBeforeFillData->num;
if (pQueryInfo->limit.offset > 0) {
if (pQueryInfo->limit.offset < pRes->numOfRows) {
int32_t prevSize = (int32_t) pBeforeFillData->num;
tColModelErase(pLocalReducer->finalModel, pBeforeFillData, prevSize, 0, (int32_t)pQueryInfo->limit.offset - 1);
tColModelErase(pLocalMerge->finalModel, pBeforeFillData, prevSize, 0, (int32_t)pQueryInfo->limit.offset - 1);
/* remove the hole in column model */
tColModelCompact(pLocalReducer->finalModel, pBeforeFillData, prevSize);
tColModelCompact(pLocalMerge->finalModel, pBeforeFillData, prevSize);
pRes->numOfRows -= (int32_t) pQueryInfo->limit.offset;
pQueryInfo->limit.offset = 0;
@ -907,7 +907,7 @@ static void genFinalResWithoutFill(SSqlRes* pRes, SLocalReducer *pLocalReducer,
if (pRes->numOfRowsGroup >= pQueryInfo->limit.limit && pQueryInfo->limit.limit > 0) {
pRes->numOfRows = 0;
pBeforeFillData->num = 0;
pLocalReducer->discard = true;
pLocalMerge->discard = true;
return;
}
@ -923,29 +923,29 @@ static void genFinalResWithoutFill(SSqlRes* pRes, SLocalReducer *pLocalReducer,
pRes->numOfRows -= overflow;
pBeforeFillData->num -= overflow;
tColModelCompact(pLocalReducer->finalModel, pBeforeFillData, prevSize);
tColModelCompact(pLocalMerge->finalModel, pBeforeFillData, prevSize);
// set remain data to be discarded, and reset the interpolation information
savePrevRecordAndSetupFillInfo(pLocalReducer, pQueryInfo, pLocalReducer->pFillInfo);
savePrevRecordAndSetupFillInfo(pLocalMerge, pQueryInfo, pLocalMerge->pFillInfo);
}
memcpy(pRes->data, pBeforeFillData->data, (size_t)(pRes->numOfRows * pLocalReducer->finalModel->rowSize));
memcpy(pRes->data, pBeforeFillData->data, (size_t)(pRes->numOfRows * pLocalMerge->finalModel->rowSize));
pRes->numOfClauseTotal += pRes->numOfRows;
pBeforeFillData->num = 0;
}
/*
* Note: pRes->pLocalReducer may be null, due to the fact that "tscDestroyLocalReducer" is called
* Note: pRes->pLocalMerge may be null, due to the fact that "tscDestroyLocalMerger" is called
* by "interuptHandler" function in shell
*/
static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneOutput) {
static void doFillResult(SSqlObj *pSql, SLocalMerger *pLocalMerge, bool doneOutput) {
SSqlCmd *pCmd = &pSql->cmd;
SSqlRes *pRes = &pSql->res;
tFilePage *pBeforeFillData = pLocalReducer->pResultBuf;
tFilePage *pBeforeFillData = pLocalMerge->pResultBuf;
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
SFillInfo *pFillInfo = pLocalReducer->pFillInfo;
SFillInfo *pFillInfo = pLocalMerge->pFillInfo;
// todo extract function
int64_t actualETime = (pQueryInfo->order.order == TSDB_ORDER_ASC)? pQueryInfo->window.ekey: pQueryInfo->window.skey;
@ -953,11 +953,11 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO
tFilePage **pResPages = malloc(POINTER_BYTES * pQueryInfo->fieldsInfo.numOfOutput);
for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) {
TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i);
pResPages[i] = calloc(1, sizeof(tFilePage) + pField->bytes * pLocalReducer->resColModel->capacity);
pResPages[i] = calloc(1, sizeof(tFilePage) + pField->bytes * pLocalMerge->resColModel->capacity);
}
while (1) {
int64_t newRows = taosFillResultDataBlock(pFillInfo, pResPages, pLocalReducer->resColModel->capacity);
int64_t newRows = taosFillResultDataBlock(pFillInfo, pResPages, pLocalMerge->resColModel->capacity);
if (pQueryInfo->limit.offset < newRows) {
newRows -= pQueryInfo->limit.offset;
@ -970,7 +970,7 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO
}
}
pRes->data = pLocalReducer->pFinalRes;
pRes->data = pLocalMerge->pFinalRes;
pRes->numOfRows = (int32_t) newRows;
pQueryInfo->limit.offset = 0;
@ -985,7 +985,7 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO
}
// all output in current group are completed
int32_t totalRemainRows = (int32_t)getNumOfResultsAfterFillGap(pFillInfo, actualETime, pLocalReducer->resColModel->capacity);
int32_t totalRemainRows = (int32_t)getNumOfResultsAfterFillGap(pFillInfo, actualETime, pLocalMerge->resColModel->capacity);
if (totalRemainRows <= 0) {
break;
}
@ -1003,7 +1003,7 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO
assert(pRes->numOfRows >= 0);
/* set remain data to be discarded, and reset the interpolation information */
savePrevRecordAndSetupFillInfo(pLocalReducer, pQueryInfo, pFillInfo);
savePrevRecordAndSetupFillInfo(pLocalMerge, pQueryInfo, pFillInfo);
}
int32_t offset = 0;
@ -1025,8 +1025,8 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO
tfree(pResPages);
}
static void savePreviousRow(SLocalReducer *pLocalReducer, tFilePage *tmpBuffer) {
SColumnModel *pColumnModel = pLocalReducer->pDesc->pColumnModel;
static void savePreviousRow(SLocalMerger *pLocalMerge, tFilePage *tmpBuffer) {
SColumnModel *pColumnModel = pLocalMerge->pDesc->pColumnModel;
assert(pColumnModel->capacity == 1 && tmpBuffer->num == 1);
// copy to previous temp buffer
@ -1034,20 +1034,20 @@ static void savePreviousRow(SLocalReducer *pLocalReducer, tFilePage *tmpBuffer)
SSchema *pSchema = getColumnModelSchema(pColumnModel, i);
int16_t offset = getColumnModelOffset(pColumnModel, i);
memcpy(pLocalReducer->prevRowOfInput + offset, tmpBuffer->data + offset, pSchema->bytes);
memcpy(pLocalMerge->prevRowOfInput + offset, tmpBuffer->data + offset, pSchema->bytes);
}
tmpBuffer->num = 0;
pLocalReducer->hasPrevRow = true;
pLocalMerge->hasPrevRow = true;
}
static void doExecuteSecondaryMerge(SSqlCmd *pCmd, SLocalReducer *pLocalReducer, bool needInit) {
static void doExecuteSecondaryMerge(SSqlCmd *pCmd, SLocalMerger *pLocalMerge, bool needInit) {
// the tag columns need to be set before all functions execution
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
size_t size = tscSqlExprNumOfExprs(pQueryInfo);
for (int32_t j = 0; j < size; ++j) {
SQLFunctionCtx *pCtx = &pLocalReducer->pCtx[j];
SQLFunctionCtx *pCtx = &pLocalMerge->pCtx[j];
// tags/tags_dummy function, the tag field of SQLFunctionCtx is from the input buffer
int32_t functionId = pCtx->functionId;
@ -1074,20 +1074,20 @@ static void doExecuteSecondaryMerge(SSqlCmd *pCmd, SLocalReducer *pLocalReducer,
}
for (int32_t j = 0; j < size; ++j) {
int32_t functionId = pLocalReducer->pCtx[j].functionId;
int32_t functionId = pLocalMerge->pCtx[j].functionId;
if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY) {
continue;
}
aAggs[functionId].mergeFunc(&pLocalReducer->pCtx[j]);
aAggs[functionId].mergeFunc(&pLocalMerge->pCtx[j]);
}
}
static void handleUnprocessedRow(SSqlCmd *pCmd, SLocalReducer *pLocalReducer, tFilePage *tmpBuffer) {
if (pLocalReducer->hasUnprocessedRow) {
pLocalReducer->hasUnprocessedRow = false;
doExecuteSecondaryMerge(pCmd, pLocalReducer, true);
savePreviousRow(pLocalReducer, tmpBuffer);
static void handleUnprocessedRow(SSqlCmd *pCmd, SLocalMerger *pLocalMerge, tFilePage *tmpBuffer) {
if (pLocalMerge->hasUnprocessedRow) {
pLocalMerge->hasUnprocessedRow = false;
doExecuteSecondaryMerge(pCmd, pLocalMerge, true);
savePreviousRow(pLocalMerge, tmpBuffer);
}
}
@ -1101,7 +1101,7 @@ static int64_t getNumOfResultLocal(SQueryInfo *pQueryInfo, SQLFunctionCtx *pCtx)
* the number of output result is decided by main output
*/
int32_t functionId = pCtx[j].functionId;
if (functionId == TSDB_FUNC_TS || functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_TAGPRJ) {
if (functionId == TSDB_FUNC_TS || functionId == TSDB_FUNC_TAG) {
continue;
}
@ -1120,7 +1120,7 @@ static int64_t getNumOfResultLocal(SQueryInfo *pQueryInfo, SQLFunctionCtx *pCtx)
* filled with the same result, which is the tags, specified in group by clause
*
*/
static void fillMultiRowsOfTagsVal(SQueryInfo *pQueryInfo, int32_t numOfRes, SLocalReducer *pLocalReducer) {
static void fillMultiRowsOfTagsVal(SQueryInfo *pQueryInfo, int32_t numOfRes, SLocalMerger *pLocalMerge) {
int32_t maxBufSize = 0; // find the max tags column length to prepare the buffer
size_t size = tscSqlExprNumOfExprs(pQueryInfo);
@ -1135,7 +1135,7 @@ static void fillMultiRowsOfTagsVal(SQueryInfo *pQueryInfo, int32_t numOfRes, SLo
char *buf = malloc((size_t)maxBufSize);
for (int32_t k = 0; k < size; ++k) {
SQLFunctionCtx *pCtx = &pLocalReducer->pCtx[k];
SQLFunctionCtx *pCtx = &pLocalMerge->pCtx[k];
if (pCtx->functionId != TSDB_FUNC_TAG) {
continue;
}
@ -1153,20 +1153,20 @@ static void fillMultiRowsOfTagsVal(SQueryInfo *pQueryInfo, int32_t numOfRes, SLo
free(buf);
}
int32_t finalizeRes(SQueryInfo *pQueryInfo, SLocalReducer *pLocalReducer) {
int32_t finalizeRes(SQueryInfo *pQueryInfo, SLocalMerger *pLocalMerge) {
size_t size = tscSqlExprNumOfExprs(pQueryInfo);
for (int32_t k = 0; k < size; ++k) {
SQLFunctionCtx* pCtx = &pLocalReducer->pCtx[k];
SQLFunctionCtx* pCtx = &pLocalMerge->pCtx[k];
aAggs[pCtx->functionId].xFinalize(pCtx);
}
pLocalReducer->hasPrevRow = false;
pLocalMerge->hasPrevRow = false;
int32_t numOfRes = (int32_t)getNumOfResultLocal(pQueryInfo, pLocalReducer->pCtx);
pLocalReducer->pResultBuf->num += numOfRes;
int32_t numOfRes = (int32_t)getNumOfResultLocal(pQueryInfo, pLocalMerge->pCtx);
pLocalMerge->pResultBuf->num += numOfRes;
fillMultiRowsOfTagsVal(pQueryInfo, numOfRes, pLocalReducer);
fillMultiRowsOfTagsVal(pQueryInfo, numOfRes, pLocalMerge);
return numOfRes;
}
@ -1177,22 +1177,22 @@ int32_t finalizeRes(SQueryInfo *pQueryInfo, SLocalReducer *pLocalReducer) {
* results generated by simple aggregation function, we merge them all into one points
* *Exception*: column projection query, required no merge procedure
*/
bool needToMerge(SQueryInfo *pQueryInfo, SLocalReducer *pLocalReducer, tFilePage *tmpBuffer) {
bool needToMerge(SQueryInfo *pQueryInfo, SLocalMerger *pLocalMerge, tFilePage *tmpBuffer) {
int32_t ret = 0; // merge all result by default
int16_t functionId = pLocalReducer->pCtx[0].functionId;
int16_t functionId = pLocalMerge->pCtx[0].functionId;
// todo opt performance
if ((/*functionId == TSDB_FUNC_PRJ || */functionId == TSDB_FUNC_ARITHM) || (tscIsProjectionQueryOnSTable(pQueryInfo, 0))) { // column projection query
if ((/*functionId == TSDB_FUNC_PRJ || */functionId == TSDB_FUNC_ARITHM) || (tscIsProjectionQueryOnSTable(pQueryInfo, 0) && pQueryInfo->distinctTag == false)) { // column projection query
ret = 1; // disable merge procedure
} else {
tOrderDescriptor *pDesc = pLocalReducer->pDesc;
tOrderDescriptor *pDesc = pLocalMerge->pDesc;
if (pDesc->orderInfo.numOfCols > 0) {
if (pDesc->tsOrder == TSDB_ORDER_ASC) { // asc
// todo refactor comparator
ret = compare_a(pLocalReducer->pDesc, 1, 0, pLocalReducer->prevRowOfInput, 1, 0, tmpBuffer->data);
ret = compare_a(pLocalMerge->pDesc, 1, 0, pLocalMerge->prevRowOfInput, 1, 0, tmpBuffer->data);
} else { // desc
ret = compare_d(pLocalReducer->pDesc, 1, 0, pLocalReducer->prevRowOfInput, 1, 0, tmpBuffer->data);
ret = compare_d(pLocalMerge->pDesc, 1, 0, pLocalMerge->prevRowOfInput, 1, 0, tmpBuffer->data);
}
}
}
@ -1230,17 +1230,17 @@ static bool saveGroupResultInfo(SSqlObj *pSql) {
/**
*
* @param pSql
* @param pLocalReducer
* @param pLocalMerge
* @param noMoreCurrentGroupRes
* @return if current group is skipped, return false, and do NOT record it into pRes->numOfGroups
*/
bool genFinalResults(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool noMoreCurrentGroupRes) {
bool genFinalResults(SSqlObj *pSql, SLocalMerger *pLocalMerge, bool noMoreCurrentGroupRes) {
SSqlCmd *pCmd = &pSql->cmd;
SSqlRes *pRes = &pSql->res;
SQueryInfo * pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
tFilePage * pResBuf = pLocalReducer->pResultBuf;
SColumnModel *pModel = pLocalReducer->resColModel;
tFilePage * pResBuf = pLocalMerge->pResultBuf;
SColumnModel *pModel = pLocalMerge->resColModel;
pRes->code = TSDB_CODE_SUCCESS;
@ -1251,11 +1251,11 @@ bool genFinalResults(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool noMoreCur
if (pQueryInfo->slimit.offset > 0) {
pRes->numOfRows = 0;
pQueryInfo->slimit.offset -= 1;
pLocalReducer->discard = !noMoreCurrentGroupRes;
pLocalMerge->discard = !noMoreCurrentGroupRes;
if (pLocalReducer->discard) {
SColumnModel *pInternModel = pLocalReducer->pDesc->pColumnModel;
tColModelAppend(pInternModel, pLocalReducer->discardData, pLocalReducer->pTempBuffer->data, 0, 1, 1);
if (pLocalMerge->discard) {
SColumnModel *pInternModel = pLocalMerge->pDesc->pColumnModel;
tColModelAppend(pInternModel, pLocalMerge->discardData, pLocalMerge->pTempBuffer->data, 0, 1, 1);
}
return false;
@ -1264,19 +1264,14 @@ bool genFinalResults(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool noMoreCur
tColModelCompact(pModel, pResBuf, pModel->capacity);
if (tscIsSecondStageQuery(pQueryInfo)) {
doArithmeticCalculate(pQueryInfo, pResBuf, pModel->rowSize, pLocalReducer->finalModel->rowSize);
doArithmeticCalculate(pQueryInfo, pResBuf, pModel->rowSize, pLocalMerge->finalModel->rowSize);
}
#ifdef _DEBUG_VIEW
printf("final result before interpo:\n");
// tColModelDisplay(pLocalReducer->resColModel, pLocalReducer->pBufForInterpo, pResBuf->num, pResBuf->num);
#endif
// no interval query, no fill operation
if (pQueryInfo->interval.interval == 0 || pQueryInfo->fillType == TSDB_FILL_NONE) {
genFinalResWithoutFill(pRes, pLocalReducer, pQueryInfo);
genFinalResWithoutFill(pRes, pLocalMerge, pQueryInfo);
} else {
SFillInfo* pFillInfo = pLocalReducer->pFillInfo;
SFillInfo* pFillInfo = pLocalMerge->pFillInfo;
if (pFillInfo != NULL) {
TSKEY ekey = (pQueryInfo->order.order == TSDB_ORDER_ASC)? pQueryInfo->window.ekey: pQueryInfo->window.skey;
@ -1284,34 +1279,34 @@ bool genFinalResults(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool noMoreCur
taosFillCopyInputDataFromOneFilePage(pFillInfo, pResBuf);
}
doFillResult(pSql, pLocalReducer, noMoreCurrentGroupRes);
doFillResult(pSql, pLocalMerge, noMoreCurrentGroupRes);
}
return true;
}
void resetOutputBuf(SQueryInfo *pQueryInfo, SLocalReducer *pLocalReducer) {// reset output buffer to the beginning
void resetOutputBuf(SQueryInfo *pQueryInfo, SLocalMerger *pLocalMerge) {// reset output buffer to the beginning
size_t t = tscSqlExprNumOfExprs(pQueryInfo);
for (int32_t i = 0; i < t; ++i) {
SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
pLocalReducer->pCtx[i].aOutputBuf = pLocalReducer->pResultBuf->data + pExpr->offset * pLocalReducer->resColModel->capacity;
pLocalMerge->pCtx[i].aOutputBuf = pLocalMerge->pResultBuf->data + pExpr->offset * pLocalMerge->resColModel->capacity;
if (pExpr->functionId == TSDB_FUNC_TOP || pExpr->functionId == TSDB_FUNC_BOTTOM || pExpr->functionId == TSDB_FUNC_DIFF) {
pLocalReducer->pCtx[i].ptsOutputBuf = pLocalReducer->pCtx[0].aOutputBuf;
pLocalMerge->pCtx[i].ptsOutputBuf = pLocalMerge->pCtx[0].aOutputBuf;
}
}
memset(pLocalReducer->pResultBuf, 0, pLocalReducer->nResultBufSize + sizeof(tFilePage));
memset(pLocalMerge->pResultBuf, 0, pLocalMerge->nResultBufSize + sizeof(tFilePage));
}
static void resetEnvForNewResultset(SSqlRes *pRes, SSqlCmd *pCmd, SLocalReducer *pLocalReducer) {
static void resetEnvForNewResultset(SSqlRes *pRes, SSqlCmd *pCmd, SLocalMerger *pLocalMerge) {
// In handling data in other groups, we need to reset the interpolation information for a new group data
pRes->numOfRows = 0;
pRes->numOfRowsGroup = 0;
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
pQueryInfo->limit.offset = pLocalReducer->offset;
pQueryInfo->limit.offset = pLocalMerge->offset;
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
@ -1320,12 +1315,12 @@ static void resetEnvForNewResultset(SSqlRes *pRes, SSqlCmd *pCmd, SLocalReducer
if (pQueryInfo->fillType != TSDB_FILL_NONE) {
TSKEY skey = (pQueryInfo->order.order == TSDB_ORDER_ASC)? pQueryInfo->window.skey:pQueryInfo->window.ekey;//MIN(pQueryInfo->window.skey, pQueryInfo->window.ekey);
int64_t newTime = taosTimeTruncate(skey, &pQueryInfo->interval, tinfo.precision);
taosResetFillInfo(pLocalReducer->pFillInfo, newTime);
taosResetFillInfo(pLocalMerge->pFillInfo, newTime);
}
}
static bool isAllSourcesCompleted(SLocalReducer *pLocalReducer) {
return (pLocalReducer->numOfBuffer == pLocalReducer->numOfCompleted);
static bool isAllSourcesCompleted(SLocalMerger *pLocalMerge) {
return (pLocalMerge->numOfBuffer == pLocalMerge->numOfCompleted);
}
static bool doBuildFilledResultForGroup(SSqlObj *pSql) {
@ -1333,19 +1328,19 @@ static bool doBuildFilledResultForGroup(SSqlObj *pSql) {
SSqlRes *pRes = &pSql->res;
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
SLocalReducer *pLocalReducer = pRes->pLocalReducer;
SFillInfo *pFillInfo = pLocalReducer->pFillInfo;
SLocalMerger *pLocalMerge = pRes->pLocalMerger;
SFillInfo *pFillInfo = pLocalMerge->pFillInfo;
if (pFillInfo != NULL && taosFillHasMoreResults(pFillInfo)) {
assert(pQueryInfo->fillType != TSDB_FILL_NONE);
tFilePage *pFinalDataBuf = pLocalReducer->pResultBuf;
tFilePage *pFinalDataBuf = pLocalMerge->pResultBuf;
int64_t etime = *(int64_t *)(pFinalDataBuf->data + TSDB_KEYSIZE * (pFillInfo->numOfRows - 1));
// the first column must be the timestamp column
int32_t rows = (int32_t) getNumOfResultsAfterFillGap(pFillInfo, etime, pLocalReducer->resColModel->capacity);
int32_t rows = (int32_t) getNumOfResultsAfterFillGap(pFillInfo, etime, pLocalMerge->resColModel->capacity);
if (rows > 0) { // do fill gap
doFillResult(pSql, pLocalReducer, false);
doFillResult(pSql, pLocalMerge, false);
}
return true;
@ -1358,23 +1353,23 @@ static bool doHandleLastRemainData(SSqlObj *pSql) {
SSqlCmd *pCmd = &pSql->cmd;
SSqlRes *pRes = &pSql->res;
SLocalReducer *pLocalReducer = pRes->pLocalReducer;
SFillInfo *pFillInfo = pLocalReducer->pFillInfo;
SLocalMerger *pLocalMerge = pRes->pLocalMerger;
SFillInfo *pFillInfo = pLocalMerge->pFillInfo;
bool prevGroupCompleted = (!pLocalReducer->discard) && pLocalReducer->hasUnprocessedRow;
bool prevGroupCompleted = (!pLocalMerge->discard) && pLocalMerge->hasUnprocessedRow;
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
if ((isAllSourcesCompleted(pLocalReducer) && !pLocalReducer->hasPrevRow) || pLocalReducer->pLocalDataSrc[0] == NULL ||
if ((isAllSourcesCompleted(pLocalMerge) && !pLocalMerge->hasPrevRow) || pLocalMerge->pLocalDataSrc[0] == NULL ||
prevGroupCompleted) {
// if fillType == TSDB_FILL_NONE, return directly
if (pQueryInfo->fillType != TSDB_FILL_NONE &&
((pRes->numOfRowsGroup < pQueryInfo->limit.limit && pQueryInfo->limit.limit > 0) || (pQueryInfo->limit.limit < 0))) {
int64_t etime = (pQueryInfo->order.order == TSDB_ORDER_ASC)? pQueryInfo->window.ekey : pQueryInfo->window.skey;
int32_t rows = (int32_t)getNumOfResultsAfterFillGap(pFillInfo, etime, pLocalReducer->resColModel->capacity);
int32_t rows = (int32_t)getNumOfResultsAfterFillGap(pFillInfo, etime, pLocalMerge->resColModel->capacity);
if (rows > 0) {
doFillResult(pSql, pLocalReducer, true);
doFillResult(pSql, pLocalMerge, true);
}
}
@ -1384,7 +1379,7 @@ static bool doHandleLastRemainData(SSqlObj *pSql) {
*
* No results will be generated and query completed.
*/
if (pRes->numOfRows > 0 || (isAllSourcesCompleted(pLocalReducer) && (!pLocalReducer->hasUnprocessedRow))) {
if (pRes->numOfRows > 0 || (isAllSourcesCompleted(pLocalMerge) && (!pLocalMerge->hasUnprocessedRow))) {
return true;
}
@ -1393,7 +1388,7 @@ static bool doHandleLastRemainData(SSqlObj *pSql) {
return true;
}
resetEnvForNewResultset(pRes, pCmd, pLocalReducer);
resetEnvForNewResultset(pRes, pCmd, pLocalMerge);
}
return false;
@ -1403,12 +1398,12 @@ static void doProcessResultInNextWindow(SSqlObj *pSql, int32_t numOfRes) {
SSqlCmd *pCmd = &pSql->cmd;
SSqlRes *pRes = &pSql->res;
SLocalReducer *pLocalReducer = pRes->pLocalReducer;
SLocalMerger *pLocalMerge = pRes->pLocalMerger;
SQueryInfo * pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
size_t size = tscSqlExprNumOfExprs(pQueryInfo);
for (int32_t k = 0; k < size; ++k) {
SQLFunctionCtx *pCtx = &pLocalReducer->pCtx[k];
SQLFunctionCtx *pCtx = &pLocalMerge->pCtx[k];
pCtx->aOutputBuf += pCtx->outputBytes * numOfRes;
// set the correct output timestamp column position
@ -1417,7 +1412,7 @@ static void doProcessResultInNextWindow(SSqlObj *pSql, int32_t numOfRes) {
}
}
doExecuteSecondaryMerge(pCmd, pLocalReducer, true);
doExecuteSecondaryMerge(pCmd, pLocalMerge, true);
}
int32_t tscDoLocalMerge(SSqlObj *pSql) {
@ -1426,14 +1421,18 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) {
tscResetForNextRetrieve(pRes);
if (pSql->signature != pSql || pRes == NULL || pRes->pLocalReducer == NULL) { // all data has been processed
if (pSql->signature != pSql || pRes == NULL || pRes->pLocalMerger == NULL) { // all data has been processed
if (pRes->code == TSDB_CODE_SUCCESS) {
return pRes->code;
}
tscError("%p local merge abort due to error occurs, code:%s", pSql, tstrerror(pRes->code));
return pRes->code;
}
SLocalReducer *pLocalReducer = pRes->pLocalReducer;
SLocalMerger *pLocalMerge = pRes->pLocalMerger;
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
tFilePage *tmpBuffer = pLocalReducer->pTempBuffer;
tFilePage *tmpBuffer = pLocalMerge->pTempBuffer;
if (doHandleLastRemainData(pSql)) {
return TSDB_CODE_SUCCESS;
@ -1443,24 +1442,24 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) {
return TSDB_CODE_SUCCESS;
}
SLoserTreeInfo *pTree = pLocalReducer->pLoserTree;
SLoserTreeInfo *pTree = pLocalMerge->pLoserTree;
// clear buffer
handleUnprocessedRow(pCmd, pLocalReducer, tmpBuffer);
SColumnModel *pModel = pLocalReducer->pDesc->pColumnModel;
handleUnprocessedRow(pCmd, pLocalMerge, tmpBuffer);
SColumnModel *pModel = pLocalMerge->pDesc->pColumnModel;
while (1) {
if (isAllSourcesCompleted(pLocalReducer)) {
if (isAllSourcesCompleted(pLocalMerge)) {
break;
}
#ifdef _DEBUG_VIEW
printf("chosen data in pTree[0] = %d\n", pTree->pNode[0].index);
#endif
assert((pTree->pNode[0].index < pLocalReducer->numOfBuffer) && (pTree->pNode[0].index >= 0) && tmpBuffer->num == 0);
assert((pTree->pNode[0].index < pLocalMerge->numOfBuffer) && (pTree->pNode[0].index >= 0) && tmpBuffer->num == 0);
// chosen from loser tree
SLocalDataSource *pOneDataSrc = pLocalReducer->pLocalDataSrc[pTree->pNode[0].index];
SLocalDataSource *pOneDataSrc = pLocalMerge->pLocalDataSrc[pTree->pNode[0].index];
tColModelAppend(pModel, tmpBuffer, pOneDataSrc->filePage.data, pOneDataSrc->rowIdx, 1,
pOneDataSrc->pMemBuffer->pColumnModel->capacity);
@ -1473,76 +1472,76 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) {
tColModelDisplayEx(pModel, tmpBuffer->data, tmpBuffer->num, pModel->capacity, colInfo);
#endif
if (pLocalReducer->discard) {
assert(pLocalReducer->hasUnprocessedRow == false);
if (pLocalMerge->discard) {
assert(pLocalMerge->hasUnprocessedRow == false);
/* current record belongs to the same group of previous record, need to discard it */
if (isSameGroup(pCmd, pLocalReducer, pLocalReducer->discardData->data, tmpBuffer)) {
if (isSameGroup(pCmd, pLocalMerge, pLocalMerge->discardData->data, tmpBuffer)) {
tmpBuffer->num = 0;
pOneDataSrc->rowIdx += 1;
adjustLoserTreeFromNewData(pLocalReducer, pOneDataSrc, pTree);
adjustLoserTreeFromNewData(pLocalMerge, pOneDataSrc, pTree);
// all inputs are exhausted, abort current process
if (isAllSourcesCompleted(pLocalReducer)) {
if (isAllSourcesCompleted(pLocalMerge)) {
break;
}
// data belongs to the same group needs to be discarded
continue;
} else {
pLocalReducer->discard = false;
pLocalReducer->discardData->num = 0;
pLocalMerge->discard = false;
pLocalMerge->discardData->num = 0;
if (saveGroupResultInfo(pSql)) {
return TSDB_CODE_SUCCESS;
}
resetEnvForNewResultset(pRes, pCmd, pLocalReducer);
resetEnvForNewResultset(pRes, pCmd, pLocalMerge);
}
}
if (pLocalReducer->hasPrevRow) {
if (needToMerge(pQueryInfo, pLocalReducer, tmpBuffer)) {
if (pLocalMerge->hasPrevRow) {
if (needToMerge(pQueryInfo, pLocalMerge, tmpBuffer)) {
// belong to the group of the previous row, continue process it
doExecuteSecondaryMerge(pCmd, pLocalReducer, false);
doExecuteSecondaryMerge(pCmd, pLocalMerge, false);
// copy to buffer
savePreviousRow(pLocalReducer, tmpBuffer);
savePreviousRow(pLocalMerge, tmpBuffer);
} else {
/*
* current row does not belong to the group of previous row.
* so the processing of previous group is completed.
*/
int32_t numOfRes = finalizeRes(pQueryInfo, pLocalReducer);
bool sameGroup = isSameGroup(pCmd, pLocalReducer, pLocalReducer->prevRowOfInput, tmpBuffer);
int32_t numOfRes = finalizeRes(pQueryInfo, pLocalMerge);
bool sameGroup = isSameGroup(pCmd, pLocalMerge, pLocalMerge->prevRowOfInput, tmpBuffer);
tFilePage *pResBuf = pLocalReducer->pResultBuf;
tFilePage *pResBuf = pLocalMerge->pResultBuf;
/*
* if the previous group does NOT generate any result (pResBuf->num == 0),
* continue to process results instead of return results.
*/
if ((!sameGroup && pResBuf->num > 0) || (pResBuf->num == pLocalReducer->resColModel->capacity)) {
if ((!sameGroup && pResBuf->num > 0) || (pResBuf->num == pLocalMerge->resColModel->capacity)) {
// does not belong to the same group
bool notSkipped = genFinalResults(pSql, pLocalReducer, !sameGroup);
bool notSkipped = genFinalResults(pSql, pLocalMerge, !sameGroup);
// this row needs to discard, since it belongs to the group of previous
if (pLocalReducer->discard && sameGroup) {
pLocalReducer->hasUnprocessedRow = false;
if (pLocalMerge->discard && sameGroup) {
pLocalMerge->hasUnprocessedRow = false;
tmpBuffer->num = 0;
} else { // current row does not belongs to the previous group, so it is not be handled yet.
pLocalReducer->hasUnprocessedRow = true;
pLocalMerge->hasUnprocessedRow = true;
}
resetOutputBuf(pQueryInfo, pLocalReducer);
resetOutputBuf(pQueryInfo, pLocalMerge);
pOneDataSrc->rowIdx += 1;
// here we do not check the return value
adjustLoserTreeFromNewData(pLocalReducer, pOneDataSrc, pTree);
adjustLoserTreeFromNewData(pLocalMerge, pOneDataSrc, pTree);
if (pRes->numOfRows == 0) {
handleUnprocessedRow(pCmd, pLocalReducer, tmpBuffer);
handleUnprocessedRow(pCmd, pLocalMerge, tmpBuffer);
if (!sameGroup) {
/*
@ -1553,7 +1552,7 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) {
return TSDB_CODE_SUCCESS;
}
resetEnvForNewResultset(pRes, pCmd, pLocalReducer);
resetEnvForNewResultset(pRes, pCmd, pLocalMerge);
}
} else {
/*
@ -1561,7 +1560,7 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) {
* We start the process in a new round.
*/
if (sameGroup) {
handleUnprocessedRow(pCmd, pLocalReducer, tmpBuffer);
handleUnprocessedRow(pCmd, pLocalMerge, tmpBuffer);
}
}
@ -1573,24 +1572,24 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) {
}
} else { // result buffer is not full
doProcessResultInNextWindow(pSql, numOfRes);
savePreviousRow(pLocalReducer, tmpBuffer);
savePreviousRow(pLocalMerge, tmpBuffer);
}
}
} else {
doExecuteSecondaryMerge(pCmd, pLocalReducer, true);
savePreviousRow(pLocalReducer, tmpBuffer); // copy the processed row to buffer
doExecuteSecondaryMerge(pCmd, pLocalMerge, true);
savePreviousRow(pLocalMerge, tmpBuffer); // copy the processed row to buffer
}
pOneDataSrc->rowIdx += 1;
adjustLoserTreeFromNewData(pLocalReducer, pOneDataSrc, pTree);
adjustLoserTreeFromNewData(pLocalMerge, pOneDataSrc, pTree);
}
if (pLocalReducer->hasPrevRow) {
finalizeRes(pQueryInfo, pLocalReducer);
if (pLocalMerge->hasPrevRow) {
finalizeRes(pQueryInfo, pLocalMerge);
}
if (pLocalReducer->pResultBuf->num) {
genFinalResults(pSql, pLocalReducer, true);
if (pLocalMerge->pResultBuf->num) {
genFinalResults(pSql, pLocalMerge, true);
}
return TSDB_CODE_SUCCESS;
@ -1598,8 +1597,8 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) {
void tscInitResObjForLocalQuery(SSqlObj *pObj, int32_t numOfRes, int32_t rowLen) {
SSqlRes *pRes = &pObj->res;
if (pRes->pLocalReducer != NULL) {
tscDestroyLocalReducer(pObj);
if (pRes->pLocalMerger != NULL) {
tscDestroyLocalMerger(pObj);
}
pRes->qhandle = 1; // hack to pass the safety check in fetch_row function
@ -1607,17 +1606,17 @@ void tscInitResObjForLocalQuery(SSqlObj *pObj, int32_t numOfRes, int32_t rowLen)
pRes->row = 0;
pRes->rspType = 0; // used as a flag to denote if taos_retrieved() has been called yet
pRes->pLocalReducer = (SLocalReducer *)calloc(1, sizeof(SLocalReducer));
pRes->pLocalMerger = (SLocalMerger *)calloc(1, sizeof(SLocalMerger));
/*
* we need one additional byte space
* the sprintf function needs one additional space to put '\0' at the end of string
*/
size_t allocSize = numOfRes * rowLen + sizeof(tFilePage) + 1;
pRes->pLocalReducer->pResultBuf = (tFilePage *)calloc(1, allocSize);
pRes->pLocalMerger->pResultBuf = (tFilePage *)calloc(1, allocSize);
pRes->pLocalReducer->pResultBuf->num = numOfRes;
pRes->data = pRes->pLocalReducer->pResultBuf->data;
pRes->pLocalMerger->pResultBuf->num = numOfRes;
pRes->data = pRes->pLocalMerger->pResultBuf->data;
}
int32_t doArithmeticCalculate(SQueryInfo* pQueryInfo, tFilePage* pOutput, int32_t rowSize, int32_t finalRowSize) {

View File

@ -703,7 +703,7 @@ static int32_t doParseInsertStatement(SSqlCmd* pCmd, char **str, SParsedDataColI
STableDataBlocks *dataBuf = NULL;
int32_t ret = tscGetDataBlockFromList(pCmd->pTableBlockHashList, pTableMeta->id.uid, TSDB_DEFAULT_PAYLOAD_SIZE,
sizeof(SSubmitBlk), tinfo.rowSize, pTableMetaInfo->name, pTableMeta, &dataBuf, NULL);
sizeof(SSubmitBlk), tinfo.rowSize, &pTableMetaInfo->name, pTableMeta, &dataBuf, NULL);
if (ret != TSDB_CODE_SUCCESS) {
return ret;
}
@ -813,26 +813,26 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
tscAddEmptyMetaInfo(pQueryInfo);
}
STableMetaInfo *pSTableMeterMetaInfo = tscGetMetaInfo(pQueryInfo, STABLE_INDEX);
code = tscSetTableFullName(pSTableMeterMetaInfo, &sToken, pSql);
STableMetaInfo *pSTableMetaInfo = tscGetMetaInfo(pQueryInfo, STABLE_INDEX);
code = tscSetTableFullName(pSTableMetaInfo, &sToken, pSql);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
tstrncpy(pCmd->tagData.name, pSTableMeterMetaInfo->name, sizeof(pCmd->tagData.name));
tNameExtractFullName(&pSTableMetaInfo->name, pCmd->tagData.name);
pCmd->tagData.dataLen = 0;
code = tscGetTableMeta(pSql, pSTableMeterMetaInfo);
code = tscGetTableMeta(pSql, pSTableMetaInfo);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
if (!UTIL_TABLE_IS_SUPER_TABLE(pSTableMeterMetaInfo)) {
if (!UTIL_TABLE_IS_SUPER_TABLE(pSTableMetaInfo)) {
return tscInvalidSQLErrMsg(pCmd->payload, "create table only from super table is allowed", sToken.z);
}
SSchema *pTagSchema = tscGetTableTagSchema(pSTableMeterMetaInfo->pTableMeta);
STableComInfo tinfo = tscGetTableInfo(pSTableMeterMetaInfo->pTableMeta);
SSchema *pTagSchema = tscGetTableTagSchema(pSTableMetaInfo->pTableMeta);
STableComInfo tinfo = tscGetTableInfo(pSTableMetaInfo->pTableMeta);
index = 0;
sToken = tStrGetToken(sql, &index, false, 0, NULL);
@ -840,7 +840,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
SParsedDataColInfo spd = {0};
uint8_t numOfTags = tscGetNumOfTags(pSTableMeterMetaInfo->pTableMeta);
uint8_t numOfTags = tscGetNumOfTags(pSTableMetaInfo->pTableMeta);
spd.numOfCols = numOfTags;
// if specify some tags column
@ -905,6 +905,13 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
return tscInvalidSQLErrMsg(pCmd->payload, "keyword TAGS expected", sToken.z);
}
index = 0;
sToken = tStrGetToken(sql, &index, false, 0, NULL);
sql += index;
if (sToken.type != TK_LP) {
return tscInvalidSQLErrMsg(pCmd->payload, NULL, sToken.z);
}
SKVRowBuilder kvRowBuilder = {0};
if (tdInitKVRowBuilder(&kvRowBuilder) < 0) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
@ -1036,11 +1043,7 @@ static int32_t validateDataSource(SSqlCmd *pCmd, int8_t type, const char *sql) {
}
/**
* usage: insert into table1 values() () table2 values()()
*
* @param str
* @param acct
* @param db
* parse insert sql
* @param pSql
* @return
*/
@ -1343,10 +1346,11 @@ int tsParseSql(SSqlObj *pSql, bool initial) {
// make a backup as tsParseInsertSql may modify the string
char* sqlstr = strdup(pSql->sqlstr);
ret = tsParseInsertSql(pSql);
if (sqlstr == NULL || pSql->parseRetry >= 1 || ret != TSDB_CODE_TSC_INVALID_SQL) {
if ((sqlstr == NULL) || (pSql->parseRetry >= 1) ||
(ret != TSDB_CODE_TSC_SQL_SYNTAX_ERROR && ret != TSDB_CODE_TSC_INVALID_SQL)) {
free(sqlstr);
} else {
tscResetSqlCmdObj(pCmd);
tscResetSqlCmd(pCmd, true);
free(pSql->sqlstr);
pSql->sqlstr = sqlstr;
pSql->parseRetry++;
@ -1358,7 +1362,7 @@ int tsParseSql(SSqlObj *pSql, bool initial) {
SSqlInfo SQLInfo = qSQLParse(pSql->sqlstr);
ret = tscToSQLCmd(pSql, &SQLInfo);
if (ret == TSDB_CODE_TSC_INVALID_SQL && pSql->parseRetry == 0 && SQLInfo.type == TSDB_SQL_NULL) {
tscResetSqlCmdObj(pCmd);
tscResetSqlCmd(pCmd, true);
pSql->parseRetry++;
ret = tscToSQLCmd(pSql, &SQLInfo);
}
@ -1406,39 +1410,38 @@ typedef struct SImportFileSupport {
FILE *fp;
} SImportFileSupport;
static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int code) {
static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int32_t numOfRows) {
assert(param != NULL && tres != NULL);
char * tokenBuf = NULL;
size_t n = 0;
ssize_t readLen = 0;
char * line = NULL;
int32_t count = 0;
int32_t maxRows = 0;
FILE * fp = NULL;
SSqlObj *pSql = tres;
SSqlCmd *pCmd = &pSql->cmd;
SImportFileSupport *pSupporter = (SImportFileSupport *) param;
SImportFileSupport *pSupporter = (SImportFileSupport *)param;
SSqlObj *pParentSql = pSupporter->pSql;
FILE *fp = pSupporter->fp;
fp = pSupporter->fp;
if (taos_errno(pSql) != TSDB_CODE_SUCCESS) { // handle error
assert(taos_errno(pSql) == code);
int32_t code = pSql->res.code;
do {
if (code == TSDB_CODE_TDB_TABLE_RECONFIGURE) {
assert(pSql->res.numOfRows == 0);
int32_t errc = fseek(fp, 0, SEEK_SET);
if (errc < 0) {
tscError("%p failed to seek SEEK_SET since:%s", pSql, tstrerror(errno));
} else {
break;
}
}
taos_free_result(pSql);
tfree(pSupporter);
fclose(fp);
pParentSql->res.code = code;
tscAsyncResultOnError(pParentSql);
return;
} while (0);
// retry parse data from file and import data from the begining again
if (code == TSDB_CODE_TDB_TABLE_RECONFIGURE) {
assert(pSql->res.numOfRows == 0);
int32_t ret = fseek(fp, 0, SEEK_SET);
if (ret < 0) {
tscError("%p failed to seek SEEK_SET since:%s", pSql, tstrerror(errno));
code = TAOS_SYSTEM_ERROR(errno);
goto _error;
}
} else if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
// accumulate the total submit records
@ -1452,28 +1455,32 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int code) {
SParsedDataColInfo spd = {.numOfCols = tinfo.numOfColumns};
tscSetAssignedColumnInfo(&spd, pSchema, tinfo.numOfColumns);
size_t n = 0;
ssize_t readLen = 0;
char * line = NULL;
int32_t count = 0;
int32_t maxRows = 0;
tfree(pCmd->pTableNameList);
pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks);
if (pCmd->pTableBlockHashList == NULL) {
pCmd->pTableBlockHashList = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false);
if (pCmd->pTableBlockHashList == NULL) {
code = TSDB_CODE_TSC_OUT_OF_MEMORY;
goto _error;
}
}
STableDataBlocks *pTableDataBlock = NULL;
int32_t ret = tscGetDataBlockFromList(pCmd->pTableBlockHashList, pTableMeta->id.uid, TSDB_PAYLOAD_SIZE,
sizeof(SSubmitBlk), tinfo.rowSize, pTableMetaInfo->name, pTableMeta, &pTableDataBlock, NULL);
int32_t ret =
tscGetDataBlockFromList(pCmd->pTableBlockHashList, pTableMeta->id.uid, TSDB_PAYLOAD_SIZE, sizeof(SSubmitBlk),
tinfo.rowSize, &pTableMetaInfo->name, pTableMeta, &pTableDataBlock, NULL);
if (ret != TSDB_CODE_SUCCESS) {
// return ret;
pParentSql->res.code = TSDB_CODE_TSC_OUT_OF_MEMORY;
goto _error;
}
tscAllocateMemIfNeed(pTableDataBlock, tinfo.rowSize, &maxRows);
char *tokenBuf = calloc(1, 4096);
tokenBuf = calloc(1, TSDB_MAX_BYTES_PER_ROW);
if (tokenBuf == NULL) {
code = TSDB_CODE_TSC_OUT_OF_MEMORY;
goto _error;
}
while ((readLen = tgetline(&line, &n, fp)) != -1) {
if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) {
@ -1501,30 +1508,42 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int code) {
}
tfree(tokenBuf);
free(line);
tfree(line);
if (count > 0) {
code = doPackSendDataBlock(pSql, count, pTableDataBlock);
if (code != TSDB_CODE_SUCCESS) {
pParentSql->res.code = code;
tscAsyncResultOnError(pParentSql);
pParentSql->res.code = code;
if (code == TSDB_CODE_SUCCESS) {
if (count > 0) {
code = doPackSendDataBlock(pSql, count, pTableDataBlock);
if (code == TSDB_CODE_SUCCESS) {
return;
} else {
goto _error;
}
} else {
taos_free_result(pSql);
tfree(pSupporter);
fclose(fp);
pParentSql->fp = pParentSql->fetchFp;
// all data has been sent to vnode, call user function
int32_t v = (code != TSDB_CODE_SUCCESS) ? code : (int32_t)pParentSql->res.numOfRows;
(*pParentSql->fp)(pParentSql->param, pParentSql, v);
return;
}
} else {
taos_free_result(pSql);
tfree(pSupporter);
fclose(fp);
pParentSql->fp = pParentSql->fetchFp;
// all data has been sent to vnode, call user function
int32_t v = (pParentSql->res.code != TSDB_CODE_SUCCESS) ? pParentSql->res.code : (int32_t)pParentSql->res.numOfRows;
(*pParentSql->fp)(pParentSql->param, pParentSql, v);
}
_error:
tfree(tokenBuf);
tfree(line);
taos_free_result(pSql);
tfree(pSupporter);
fclose(fp);
tscAsyncResultOnError(pParentSql);
}
void tscProcessMultiVnodesImportFromFile(SSqlObj *pSql) {
void tscImportDataFromFile(SSqlObj *pSql) {
SSqlCmd *pCmd = &pSql->cmd;
if (pCmd->command != TSDB_SQL_INSERT) {
return;
@ -1536,19 +1555,19 @@ void tscProcessMultiVnodesImportFromFile(SSqlObj *pSql) {
SSqlObj *pNew = createSubqueryObj(pSql, 0, parseFileSendDataBlock, pSupporter, TSDB_SQL_INSERT, NULL);
pCmd->count = 1;
FILE *fp = fopen(pCmd->payload, "r");
FILE *fp = fopen(pCmd->payload, "rb");
if (fp == NULL) {
pSql->res.code = TAOS_SYSTEM_ERROR(errno);
tscError("%p failed to open file %s to load data from file, code:%s", pSql, pCmd->payload, tstrerror(pSql->res.code));
tfree(pSupporter);
taos_free_result(pNew);
tscAsyncResultOnError(pSql);
return;
}
pSupporter->pSql = pSql;
pSupporter->fp = fp;
pSupporter->fp = fp;
parseFileSendDataBlock(pSupporter, pNew, 0);
parseFileSendDataBlock(pSupporter, pNew, TSDB_CODE_SUCCESS);
}

View File

@ -707,7 +707,7 @@ static int insertStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) {
int32_t ret =
tscGetDataBlockFromList(pCmd->pTableBlockHashList, pTableMeta->id.uid, TSDB_PAYLOAD_SIZE, sizeof(SSubmitBlk),
pTableMeta->tableInfo.rowSize, pTableMetaInfo->name, pTableMeta, &pBlock, NULL);
pTableMeta->tableInfo.rowSize, &pTableMetaInfo->name, pTableMeta, &pBlock, NULL);
if (ret != 0) {
// todo handle error
}
@ -790,7 +790,7 @@ static int insertStmtExecute(STscStmt* stmt) {
int32_t ret =
tscGetDataBlockFromList(pCmd->pTableBlockHashList, pTableMeta->id.uid, TSDB_PAYLOAD_SIZE, sizeof(SSubmitBlk),
pTableMeta->tableInfo.rowSize, pTableMetaInfo->name, pTableMeta, &pBlock, NULL);
pTableMeta->tableInfo.rowSize, &pTableMetaInfo->name, pTableMeta, &pBlock, NULL);
assert(ret == 0);
pBlock->size = sizeof(SSubmitBlk) + pCmd->batchSize * pBlock->rowSize;
SSubmitBlk* pBlk = (SSubmitBlk*) pBlock->pData;

View File

@ -13,14 +13,16 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __APPLE__
#define _BSD_SOURCE
#define _XOPEN_SOURCE 500
#define _DEFAULT_SOURCE
#define _GNU_SOURCE
#endif // __APPLE__
#include "os.h"
#include "ttype.h"
#include "qAst.h"
#include "texpr.h"
#include "taos.h"
#include "taosmsg.h"
#include "tcompare.h"
@ -41,7 +43,7 @@
#define COLUMN_INDEX_INITIAL_VAL (-3)
#define COLUMN_INDEX_INITIALIZER \
{ COLUMN_INDEX_INITIAL_VAL, COLUMN_INDEX_INITIAL_VAL }
#define COLUMN_INDEX_VALIDE(index) (((index).tableIndex >= 0) && ((index).columnIndex >= TSDB_TBNAME_COLUMN_INDEX))
#define COLUMN_INDEX_VALIDE(index) (((index).tableIndex >= 0) && ((index).columnIndex >= TSDB_BLOCK_DIST_COLUMN_INDEX))
#define TBNAME_LIST_SEP ","
typedef struct SColumnList { // todo refactor
@ -60,7 +62,7 @@ static int32_t setShowInfo(SSqlObj* pSql, SSqlInfo* pInfo);
static char* getAccountId(SSqlObj* pSql);
static bool has(SArray* pFieldList, int32_t startIdx, const char* name);
static void getCurrentDBName(SSqlObj* pSql, SStrToken* pDBToken);
static char* cloneCurrentDBName(SSqlObj* pSql);
static bool hasSpecifyDB(SStrToken* pTableName);
static bool validateTableColumnInfo(SArray* pFieldList, SSqlCmd* pCmd);
static bool validateTagParams(SArray* pTagsList, SArray* pFieldList, SSqlCmd* pCmd);
@ -100,8 +102,8 @@ static int32_t validateSqlFunctionInStreamSql(SSqlCmd* pCmd, SQueryInfo* pQueryI
static int32_t validateFunctionsInIntervalOrGroupbyQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo);
static int32_t validateArithmeticSQLExpr(SSqlCmd* pCmd, tSQLExpr* pExpr, SQueryInfo* pQueryInfo, SColumnList* pList, int32_t* type);
static int32_t validateEp(char* ep);
static int32_t validateDNodeConfig(tDCLSQL* pOptions);
static int32_t validateLocalConfig(tDCLSQL* pOptions);
static int32_t validateDNodeConfig(SMiscInfo* pOptions);
static int32_t validateLocalConfig(SMiscInfo* pOptions);
static int32_t validateColumnName(char* name);
static int32_t setKillInfo(SSqlObj* pSql, struct SSqlInfo* pInfo, int32_t killType);
@ -110,7 +112,7 @@ static bool hasTimestampForPointInterpQuery(SQueryInfo* pQueryInfo);
static bool hasNormalColumnFilter(SQueryInfo* pQueryInfo);
static int32_t parseLimitClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t index, SQuerySQL* pQuerySql, SSqlObj* pSql);
static int32_t parseCreateDBOptions(SSqlCmd* pCmd, SCreateDBInfo* pCreateDbSql);
static int32_t parseCreateDBOptions(SSqlCmd* pCmd, SCreateDbInfo* pCreateDbSql);
static int32_t getColumnIndexByName(SSqlCmd* pCmd, const SStrToken* pToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex);
static int32_t getTableIndexByName(SStrToken* pToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex);
static int32_t optrToString(tSQLExpr* pExpr, char** exprString);
@ -239,7 +241,7 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
int32_t code = TSDB_CODE_SUCCESS;
if (!pInfo->valid || terrno == TSDB_CODE_TSC_SQL_SYNTAX_ERROR) {
terrno = TSDB_CODE_SUCCESS; // clear the error number
return tscSQLSyntaxErrMsg(tscGetErrorMsgPayload(pCmd), NULL, pInfo->pzErrMsg);
return tscSQLSyntaxErrMsg(tscGetErrorMsgPayload(pCmd), NULL, pInfo->msg);
}
SQueryInfo* pQueryInfo = tscGetQueryInfoDetailSafely(pCmd, pCmd->clauseIndex);
@ -264,36 +266,48 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
case TSDB_SQL_DROP_DB: {
const char* msg2 = "invalid name";
const char* msg3 = "param name too long";
const char* msg4 = "table is not super table";
SStrToken* pzName = &pInfo->pDCLInfo->a[0];
SStrToken* pzName = taosArrayGet(pInfo->pMiscInfo->a, 0);
if ((pInfo->type != TSDB_SQL_DROP_DNODE) && (tscValidateName(pzName) != TSDB_CODE_SUCCESS)) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
if (pInfo->type == TSDB_SQL_DROP_DB) {
assert(pInfo->pDCLInfo->nTokens == 1);
code = setObjFullName(pTableMetaInfo->name, getAccountId(pSql), pzName, NULL, NULL);
assert(taosArrayGetSize(pInfo->pMiscInfo->a) == 1);
code = tNameSetDbName(&pTableMetaInfo->name, getAccountId(pSql), pzName);
if (code != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
} else if (pInfo->type == TSDB_SQL_DROP_TABLE) {
assert(pInfo->pDCLInfo->nTokens == 1);
assert(taosArrayGetSize(pInfo->pMiscInfo->a) == 1);
code = tscSetTableFullName(pTableMetaInfo, pzName, pSql);
if(code != TSDB_CODE_SUCCESS) {
return code;
}
if (pInfo->pMiscInfo->tableType == TSDB_SUPER_TABLE) {
code = tscGetTableMeta(pSql, pTableMetaInfo);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
if (!UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
}
} else if (pInfo->type == TSDB_SQL_DROP_DNODE) {
pzName->n = strdequote(pzName->z);
strncpy(pTableMetaInfo->name, pzName->z, pzName->n);
} else { // drop user
strncpy(pCmd->payload, pzName->z, pzName->n);
} else { // drop user/account
if (pzName->n >= TSDB_USER_LEN) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
strncpy(pTableMetaInfo->name, pzName->z, pzName->n);
strncpy(pCmd->payload, pzName->z, pzName->n);
}
break;
@ -301,13 +315,13 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
case TSDB_SQL_USE_DB: {
const char* msg = "invalid db name";
SStrToken* pToken = &pInfo->pDCLInfo->a[0];
SStrToken* pToken = taosArrayGet(pInfo->pMiscInfo->a, 0);
if (tscValidateName(pToken) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
}
int32_t ret = setObjFullName(pTableMetaInfo->name, getAccountId(pSql), pToken, NULL, NULL);
int32_t ret = tNameSetDbName(&pTableMetaInfo->name, getAccountId(pSql), pToken);
if (ret != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
}
@ -332,12 +346,12 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
const char* msg1 = "invalid db name";
const char* msg2 = "name too long";
SCreateDBInfo* pCreateDB = &(pInfo->pDCLInfo->dbOpt);
SCreateDbInfo* pCreateDB = &(pInfo->pMiscInfo->dbOpt);
if (tscValidateName(&pCreateDB->dbname) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
int32_t ret = setObjFullName(pTableMetaInfo->name, getAccountId(pSql), &(pCreateDB->dbname), NULL, NULL);
int32_t ret = tNameSetDbName(&pTableMetaInfo->name, getAccountId(pSql), &(pCreateDB->dbname));
if (ret != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
@ -349,15 +363,15 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
break;
}
case TSDB_SQL_CREATE_DNODE: { // todo hostname
case TSDB_SQL_CREATE_DNODE: {
const char* msg = "invalid host name (ip address)";
if (pInfo->pDCLInfo->nTokens > 1) {
if (taosArrayGetSize(pInfo->pMiscInfo->a) > 1) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
}
SStrToken* pIpAddr = &pInfo->pDCLInfo->a[0];
pIpAddr->n = strdequote(pIpAddr->z);
SStrToken* id = taosArrayGet(pInfo->pMiscInfo->a, 0);
id->n = strdequote(id->z);
break;
}
@ -367,8 +381,8 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
const char* msg2 = "invalid user/account name";
const char* msg3 = "name too long";
SStrToken* pName = &pInfo->pDCLInfo->user.user;
SStrToken* pPwd = &pInfo->pDCLInfo->user.passwd;
SStrToken* pName = &pInfo->pMiscInfo->user.user;
SStrToken* pPwd = &pInfo->pMiscInfo->user.passwd;
if (handlePassword(pCmd, pPwd) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
@ -382,7 +396,7 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
SCreateAcctSQL* pAcctOpt = &pInfo->pDCLInfo->acctOpt;
SCreateAcctInfo* pAcctOpt = &pInfo->pMiscInfo->acctOpt;
if (pAcctOpt->stat.n > 0) {
if (pAcctOpt->stat.z[0] == 'r' && pAcctOpt->stat.n == 1) {
} else if (pAcctOpt->stat.z[0] == 'w' && pAcctOpt->stat.n == 1) {
@ -397,10 +411,10 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
}
case TSDB_SQL_DESCRIBE_TABLE: {
SStrToken* pToken = &pInfo->pDCLInfo->a[0];
const char* msg1 = "invalid table name";
const char* msg2 = "table name too long";
SStrToken* pToken = taosArrayGet(pInfo->pMiscInfo->a, 0);
if (tscValidateName(pToken) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
@ -418,10 +432,10 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
return tscGetTableMeta(pSql, pTableMetaInfo);
}
case TSDB_SQL_SHOW_CREATE_TABLE: {
SStrToken* pToken = &pInfo->pDCLInfo->a[0];
const char* msg1 = "invalid table name";
const char* msg2 = "table name is too long";
SStrToken* pToken = taosArrayGet(pInfo->pMiscInfo->a, 0);
if (tscValidateName(pToken) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
@ -439,11 +453,12 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
}
case TSDB_SQL_SHOW_CREATE_DATABASE: {
const char* msg1 = "invalid database name";
SStrToken* pToken = &pInfo->pDCLInfo->a[0];
SStrToken* pToken = taosArrayGet(pInfo->pMiscInfo->a, 0);
if (tscValidateName(pToken) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
if (pToken->n > TSDB_DB_NAME_LEN) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
@ -455,29 +470,34 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
const char* msg3 = "invalid dnode ep";
/* validate the ip address */
tDCLSQL* pDCL = pInfo->pDCLInfo;
SMiscInfo* pMiscInfo = pInfo->pMiscInfo;
/* validate the parameter names and options */
if (validateDNodeConfig(pDCL) != TSDB_CODE_SUCCESS) {
if (validateDNodeConfig(pMiscInfo) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
char* pMsg = pCmd->payload;
SCfgDnodeMsg* pCfg = (SCfgDnodeMsg*)pMsg;
pDCL->a[0].n = strdequote(pDCL->a[0].z);
strncpy(pCfg->ep, pDCL->a[0].z, pDCL->a[0].n);
SStrToken* t0 = taosArrayGet(pMiscInfo->a, 0);
SStrToken* t1 = taosArrayGet(pMiscInfo->a, 1);
t0->n = strdequote(t0->z);
strncpy(pCfg->ep, t0->z, t0->n);
if (validateEp(pCfg->ep) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
strncpy(pCfg->config, pDCL->a[1].z, pDCL->a[1].n);
strncpy(pCfg->config, t1->z, t1->n);
if (pDCL->nTokens == 3) {
pCfg->config[pDCL->a[1].n] = ' '; // add sep
strncpy(&pCfg->config[pDCL->a[1].n + 1], pDCL->a[2].z, pDCL->a[2].n);
if (taosArrayGetSize(pMiscInfo->a) == 3) {
SStrToken* t2 = taosArrayGet(pMiscInfo->a, 2);
pCfg->config[t1->n] = ' '; // add sep
strncpy(&pCfg->config[t1->n + 1], t2->z, t2->n);
}
break;
@ -492,7 +512,7 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
pCmd->command = pInfo->type;
SUserInfo* pUser = &pInfo->pDCLInfo->user;
SUserInfo* pUser = &pInfo->pMiscInfo->user;
SStrToken* pName = &pUser->user;
SStrToken* pPwd = &pUser->passwd;
@ -536,18 +556,22 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
}
case TSDB_SQL_CFG_LOCAL: {
tDCLSQL* pDCL = pInfo->pDCLInfo;
const char* msg = "invalid configure options or values";
SMiscInfo *pMiscInfo = pInfo->pMiscInfo;
const char *msg = "invalid configure options or values";
// validate the parameter names and options
if (validateLocalConfig(pDCL) != TSDB_CODE_SUCCESS) {
if (validateLocalConfig(pMiscInfo) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
}
strncpy(pCmd->payload, pDCL->a[0].z, pDCL->a[0].n);
if (pDCL->nTokens == 2) {
pCmd->payload[pDCL->a[0].n] = ' '; // add sep
strncpy(&pCmd->payload[pDCL->a[0].n + 1], pDCL->a[1].z, pDCL->a[1].n);
int32_t numOfToken = (int32_t) taosArrayGetSize(pMiscInfo->a);
SStrToken* t = taosArrayGet(pMiscInfo->a, 0);
SStrToken* t1 = taosArrayGet(pMiscInfo->a, 1);
strncpy(pCmd->payload, t->z, t->n);
if (numOfToken == 2) {
pCmd->payload[t->n] = ' '; // add sep
strncpy(&pCmd->payload[t->n + 1], t1->z, t1->n);
}
break;
@ -878,47 +902,54 @@ int32_t parseSlidingClause(SSqlObj* pSql, SQueryInfo* pQueryInfo, SQuerySQL* pQu
return TSDB_CODE_SUCCESS;
}
int32_t tscSetTableFullName(STableMetaInfo* pTableMetaInfo, SStrToken* pzTableName, SSqlObj* pSql) {
int32_t tscSetTableFullName(STableMetaInfo* pTableMetaInfo, SStrToken* pTableName, SSqlObj* pSql) {
const char* msg1 = "name too long";
const char* msg2 = "acctId too long";
SSqlCmd* pCmd = &pSql->cmd;
int32_t code = TSDB_CODE_SUCCESS;
// backup the old name in pTableMetaInfo
char oldName[TSDB_TABLE_FNAME_LEN] = {0};
tstrncpy(oldName, pTableMetaInfo->name, tListLen(oldName));
if (hasSpecifyDB(pzTableName)) { // db has been specified in sql string so we ignore current db path
code = setObjFullName(pTableMetaInfo->name, getAccountId(pSql), NULL, pzTableName, NULL);
if (hasSpecifyDB(pTableName)) { // db has been specified in sql string so we ignore current db path
code = tNameSetAcctId(&pTableMetaInfo->name, getAccountId(pSql));
if (code != 0) {
invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
char name[TSDB_TABLE_FNAME_LEN] = {0};
strncpy(name, pTableName->z, pTableName->n);
code = tNameFromString(&pTableMetaInfo->name, name, T_NAME_DB|T_NAME_TABLE);
if (code != 0) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
} else { // get current DB name first, and then set it into path
SStrToken t = {0};
getCurrentDBName(pSql, &t);
if (t.n == 0) { // current database not available or not specified
code = TSDB_CODE_TSC_DB_NOT_SELECTED;
} else {
code = setObjFullName(pTableMetaInfo->name, NULL, &t, pzTableName, NULL);
if (code != 0) {
invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
char* t = cloneCurrentDBName(pSql);
if (strlen(t) == 0) {
return TSDB_CODE_TSC_DB_NOT_SELECTED;
}
code = tNameFromString(&pTableMetaInfo->name, t, T_NAME_ACCT | T_NAME_DB);
if (code != 0) {
free(t);
return TSDB_CODE_TSC_DB_NOT_SELECTED;
}
free(t);
if (pTableName->n >= TSDB_TABLE_NAME_LEN) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
char name[TSDB_TABLE_FNAME_LEN] = {0};
strncpy(name, pTableName->z, pTableName->n);
code = tNameFromString(&pTableMetaInfo->name, name, T_NAME_TABLE);
if (code != 0) {
code = invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
}
if (code != TSDB_CODE_SUCCESS) {
return code;
}
/*
* the old name exists and is not equalled to the new name. Release the table meta
* that are corresponding to the old name for the new table name.
*/
if (strlen(oldName) > 0 && strncasecmp(oldName, pTableMetaInfo->name, tListLen(pTableMetaInfo->name)) != 0) {
tscClearTableMetaInfo(pTableMetaInfo);
}
return TSDB_CODE_SUCCESS;
return code;
}
static bool validateTableColumnInfo(SArray* pFieldList, SSqlCmd* pCmd) {
@ -1218,9 +1249,12 @@ static bool has(SArray* pFieldList, int32_t startIdx, const char* name) {
static char* getAccountId(SSqlObj* pSql) { return pSql->pTscObj->acctId; }
static void getCurrentDBName(SSqlObj* pSql, SStrToken* pDBToken) {
pDBToken->z = pSql->pTscObj->db;
pDBToken->n = (uint32_t)strlen(pSql->pTscObj->db);
static char* cloneCurrentDBName(SSqlObj* pSql) {
pthread_mutex_lock(&pSql->pTscObj->mutex);
char *p = strdup(pSql->pTscObj->db);
pthread_mutex_unlock(&pSql->pTscObj->mutex);
return p;
}
/* length limitation, strstr cannot be applied */
@ -1333,7 +1367,7 @@ static int32_t handleArithmeticExpr(SSqlCmd* pCmd, int32_t clauseIndex, int32_t
int32_t ret = exprTreeFromSqlExpr(pCmd, &pNode, pItem->pNode, pQueryInfo, colList, NULL);
if (ret != TSDB_CODE_SUCCESS) {
taosArrayDestroy(colList);
tExprTreeDestroy(&pNode, NULL);
tExprTreeDestroy(pNode, NULL);
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
@ -1342,9 +1376,9 @@ static int32_t handleArithmeticExpr(SSqlCmd* pCmd, int32_t clauseIndex, int32_t
for(int32_t k = 0; k < numOfNode; ++k) {
SColIndex* pIndex = taosArrayGet(colList, k);
if (TSDB_COL_IS_TAG(pIndex->flag)) {
tExprTreeDestroy(&pNode, NULL);
tExprTreeDestroy(pNode, NULL);
taosArrayDestroy(colList);
tExprTreeDestroy(&pNode, NULL);
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
}
@ -1371,7 +1405,7 @@ static int32_t handleArithmeticExpr(SSqlCmd* pCmd, int32_t clauseIndex, int32_t
tbufCloseWriter(&bw);
taosArrayDestroy(colList);
tExprTreeDestroy(&pNode, NULL);
tExprTreeDestroy(pNode, NULL);
} else {
columnList.num = 0;
columnList.ids[0] = (SColumnIndex) {0, 0};
@ -1403,7 +1437,7 @@ static int32_t handleArithmeticExpr(SSqlCmd* pCmd, int32_t clauseIndex, int32_t
int32_t ret = exprTreeFromSqlExpr(pCmd, &pArithExprInfo->pExpr, pItem->pNode, pQueryInfo, NULL, &pArithExprInfo->uid);
if (ret != TSDB_CODE_SUCCESS) {
tExprTreeDestroy(&pArithExprInfo->pExpr, NULL);
tExprTreeDestroy(pArithExprInfo->pExpr, NULL);
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), "invalid expression in select clause");
}
@ -1480,23 +1514,39 @@ static void addPrimaryTsColIntoResult(SQueryInfo* pQueryInfo) {
pQueryInfo->type |= TSDB_QUERY_TYPE_PROJECTION_QUERY;
}
bool isValidDistinctSql(SQueryInfo* pQueryInfo) {
if (pQueryInfo == NULL) {
return false;
}
if ((pQueryInfo->type & TSDB_QUERY_TYPE_STABLE_QUERY) != TSDB_QUERY_TYPE_STABLE_QUERY) {
return false;
}
if (tscQueryTags(pQueryInfo) && tscSqlExprNumOfExprs(pQueryInfo) == 1){
return true;
}
return false;
}
int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSelection, bool isSTable, bool joinQuery, bool intervalQuery) {
assert(pSelection != NULL && pCmd != NULL);
const char* msg2 = "functions can not be mixed up";
const char* msg3 = "not support query expression";
const char* msg5 = "invalid function name";
const char* msg6 = "only support distinct one tag";
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, clauseIndex);
if (pQueryInfo->colList == NULL) {
pQueryInfo->colList = taosArrayInit(4, POINTER_BYTES);
}
bool hasDistinct = false;
for (int32_t i = 0; i < pSelection->nExpr; ++i) {
int32_t outputIndex = (int32_t)tscSqlExprNumOfExprs(pQueryInfo);
tSqlExprItem* pItem = &pSelection->a[i];
if (hasDistinct == false) {
hasDistinct = (pItem->distinct == true);
}
// project on all fields
int32_t optr = pItem->pNode->nSQLOptr;
@ -1530,6 +1580,13 @@ int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSel
}
}
if (hasDistinct == true) {
if (!isValidDistinctSql(pQueryInfo)) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6);
}
pQueryInfo->distinctTag = true;
}
// there is only one user-defined column in the final result field, add the timestamp column.
size_t numOfSrcCols = taosArrayGetSize(pQueryInfo->colList);
if (numOfSrcCols <= 0 && !tscQueryTags(pQueryInfo)) {
@ -1706,6 +1763,9 @@ int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, t
if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
SSchema colSchema = tGetTableNameColumnSchema();
tscAddSpecialColumnForSelect(pQueryInfo, startPos, TSDB_FUNC_TAGPRJ, &index, &colSchema, TSDB_COL_TAG);
} else if (index.columnIndex == TSDB_BLOCK_DIST_COLUMN_INDEX) {
SSchema colSchema = tGetBlockDistColumnSchema();
tscAddSpecialColumnForSelect(pQueryInfo, startPos, TSDB_FUNC_PRJ, &index, &colSchema, TSDB_COL_TAG);
} else {
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
@ -2173,6 +2233,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
if (getColumnIndexByName(pCmd, &pParamElem->pNode->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6);
}
@ -2381,6 +2442,14 @@ static bool isTablenameToken(SStrToken* token) {
return (strncasecmp(TSQL_TBNAME_L, tmpToken.z, tmpToken.n) == 0 && tmpToken.n == strlen(TSQL_TBNAME_L));
}
static bool isTableBlockDistToken(SStrToken* token) {
SStrToken tmpToken = *token;
SStrToken tableToken = {0};
extractTableNameFromToken(&tmpToken, &tableToken);
return (strncasecmp(TSQL_BLOCK_DIST, tmpToken.z, tmpToken.n) == 0 && tmpToken.n == strlen(TSQL_BLOCK_DIST_L));
}
static int16_t doGetColumnIndex(SQueryInfo* pQueryInfo, int32_t index, SStrToken* pToken) {
STableMeta* pTableMeta = tscGetMetaInfo(pQueryInfo, index)->pTableMeta;
@ -2410,6 +2479,8 @@ int32_t doGetColumnIndexByName(SSqlCmd* pCmd, SStrToken* pToken, SQueryInfo* pQu
if (isTablenameToken(pToken)) {
pIndex->columnIndex = TSDB_TBNAME_COLUMN_INDEX;
} else if (isTableBlockDistToken(pToken)) {
pIndex->columnIndex = TSDB_BLOCK_DIST_COLUMN_INDEX;
} else if (strncasecmp(pToken->z, DEFAULT_PRIMARY_TIMESTAMP_COL_NAME, pToken->n) == 0) {
pIndex->columnIndex = PRIMARYKEY_TIMESTAMP_COL_INDEX;
} else {
@ -2595,10 +2666,10 @@ int32_t setShowInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
const char* msg6 = "pattern string is empty";
/*
* database prefix in pInfo->pDCLInfo->a[0]
* wildcard in like clause in pInfo->pDCLInfo->a[1]
* database prefix in pInfo->pMiscInfo->a[0]
* wildcard in like clause in pInfo->pMiscInfo->a[1]
*/
SShowInfo* pShowInfo = &pInfo->pDCLInfo->showOpt;
SShowInfo* pShowInfo = &pInfo->pMiscInfo->showOpt;
int16_t showType = pShowInfo->showType;
if (showType == TSDB_MGMT_TABLE_TABLE || showType == TSDB_MGMT_TABLE_METRIC || showType == TSDB_MGMT_TABLE_VGROUP) {
// db prefix in tagCond, show table conds in payload
@ -2617,7 +2688,7 @@ int32_t setShowInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
int32_t ret = setObjFullName(pTableMetaInfo->name, getAccountId(pSql), pDbPrefixToken, NULL, NULL);
int32_t ret = tNameSetDbName(&pTableMetaInfo->name, getAccountId(pSql), pDbPrefixToken);
if (ret != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
@ -2650,8 +2721,7 @@ int32_t setShowInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
if (!validateIpAddress(pDnodeIp->z, pDnodeIp->n)) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
}
}
return TSDB_CODE_SUCCESS;
}
@ -2663,7 +2733,7 @@ int32_t setKillInfo(SSqlObj* pSql, struct SSqlInfo* pInfo, int32_t killType) {
SSqlCmd* pCmd = &pSql->cmd;
pCmd->command = pInfo->type;
SStrToken* idStr = &(pInfo->pDCLInfo->ip);
SStrToken* idStr = &(pInfo->pMiscInfo->id);
if (idStr->n > TSDB_KILL_MSG_LEN) {
return TSDB_CODE_TSC_INVALID_SQL;
}
@ -2898,7 +2968,7 @@ int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, SArray* pList, SSqlCmd* pCmd)
STableMeta* pTableMeta = NULL;
SSchema* pSchema = NULL;
SSchema s = tscGetTbnameColumnSchema();
SSchema s = tGetTbnameColumnSchema();
int32_t tableIndex = COLUMN_INDEX_INITIAL_VAL;
@ -3421,6 +3491,7 @@ static int32_t getColumnQueryCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQ
static int32_t getJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExpr* pExpr) {
const char* msg1 = "invalid join query condition";
const char* msg2 = "invalid table name in join query";
const char* msg3 = "type of join columns must be identical";
const char* msg4 = "invalid column name in join condition";
@ -3446,7 +3517,11 @@ static int32_t getJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExpr*
pLeft->uid = pTableMetaInfo->pTableMeta->id.uid;
pLeft->tagColId = pTagSchema1->colId;
strcpy(pLeft->tableId, pTableMetaInfo->name);
int32_t code = tNameExtractFullName(&pTableMetaInfo->name, pLeft->tableName);
if (code != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
index = (SColumnIndex)COLUMN_INDEX_INITIALIZER;
if (getColumnIndexByName(pCmd, &pExpr->pRight->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
@ -3458,7 +3533,11 @@ static int32_t getJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExpr*
pRight->uid = pTableMetaInfo->pTableMeta->id.uid;
pRight->tagColId = pTagSchema2->colId;
strcpy(pRight->tableId, pTableMetaInfo->name);
code = tNameExtractFullName(&pTableMetaInfo->name, pRight->tableName);
if (code != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
if (pTagSchema1->type != pTagSchema2->type) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
@ -4035,8 +4114,6 @@ static int32_t setTableCondForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo,
SStringBuilder sb1; memset(&sb1, 0, sizeof(sb1));
taosStringBuilderAppendStringLen(&sb1, QUERY_COND_REL_PREFIX_IN, QUERY_COND_REL_PREFIX_IN_LEN);
char db[TSDB_TABLE_FNAME_LEN] = {0};
// remove the duplicated input table names
int32_t num = 0;
char* tableNameString = taosStringBuilderGetResult(sb, NULL);
@ -4052,7 +4129,8 @@ static int32_t setTableCondForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo,
}
num = j;
char* name = extractDBName(pTableMetaInfo->name, db);
char name[TSDB_DB_NAME_LEN] = {0};
tNameGetDbName(&pTableMetaInfo->name, name);
SStrToken dbToken = { .type = TK_STRING, .z = name, .n = (uint32_t)strlen(name) };
for (int32_t i = 0; i < num; ++i) {
@ -4231,6 +4309,77 @@ static void doAddJoinTagsColumnsIntoTagList(SSqlCmd* pCmd, SQueryInfo* pQueryInf
}
}
static int32_t validateTagCondExpr(SSqlCmd* pCmd, tExprNode *p) {
const char *msg1 = "invalid tag operator";
const char* msg2 = "not supported filter condition";
do {
if (p->nodeType != TSQL_NODE_EXPR) {
break;
}
if (!p->_node.pLeft || !p->_node.pRight) {
break;
}
if (IS_ARITHMETIC_OPTR(p->_node.optr)) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
if (!IS_RELATION_OPTR(p->_node.optr)) {
break;
}
tVariant * vVariant = NULL;
int32_t schemaType = -1;
if (p->_node.pLeft->nodeType == TSQL_NODE_VALUE && p->_node.pRight->nodeType == TSQL_NODE_COL) {
if (!p->_node.pRight->pSchema) {
break;
}
vVariant = p->_node.pLeft->pVal;
schemaType = p->_node.pRight->pSchema->type;
} else if (p->_node.pLeft->nodeType == TSQL_NODE_COL && p->_node.pRight->nodeType == TSQL_NODE_VALUE) {
if (!p->_node.pLeft->pSchema) {
break;
}
vVariant = p->_node.pRight->pVal;
schemaType = p->_node.pLeft->pSchema->type;
} else {
break;
}
if (schemaType >= TSDB_DATA_TYPE_TINYINT && schemaType <= TSDB_DATA_TYPE_BIGINT) {
schemaType = TSDB_DATA_TYPE_BIGINT;
} else if (schemaType == TSDB_DATA_TYPE_FLOAT || schemaType == TSDB_DATA_TYPE_DOUBLE) {
schemaType = TSDB_DATA_TYPE_DOUBLE;
}
int32_t retVal = TSDB_CODE_SUCCESS;
if (schemaType == TSDB_DATA_TYPE_BINARY) {
char *tmp = calloc(1, vVariant->nLen + TSDB_NCHAR_SIZE);
retVal = tVariantDump(vVariant, tmp, schemaType, false);
free(tmp);
} else if (schemaType == TSDB_DATA_TYPE_NCHAR) {
// pRight->val.nLen + 1 is larger than the actual nchar string length
char *tmp = calloc(1, (vVariant->nLen + 1) * TSDB_NCHAR_SIZE);
retVal = tVariantDump(vVariant, tmp, schemaType, false);
free(tmp);
} else {
double tmp;
retVal = tVariantDump(vVariant, (char*)&tmp, schemaType, false);
}
if (retVal != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
}while (0);
return TSDB_CODE_SUCCESS;
}
static int32_t getTagQueryCondExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SCondExpr* pCondExpr, tSQLExpr** pExpr) {
int32_t ret = TSDB_CODE_SUCCESS;
@ -4273,13 +4422,21 @@ static int32_t getTagQueryCondExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SCondE
tsSetSTableQueryCond(&pQueryInfo->tagCond, uid, &bw);
doCompactQueryExpr(pExpr);
if (ret == TSDB_CODE_SUCCESS) {
ret = validateTagCondExpr(pCmd, p);
}
tSqlExprDestroy(p1);
tExprTreeDestroy(&p, NULL);
tExprTreeDestroy(p, NULL);
taosArrayDestroy(colList);
if (pQueryInfo->tagCond.pCond != NULL && taosArrayGetSize(pQueryInfo->tagCond.pCond) > 0 && !UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), "filter on tag not supported for normal table");
}
if (ret) {
break;
}
}
pCondExpr->pTagCond = NULL;
@ -4495,10 +4652,10 @@ int32_t parseFillClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuery
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
size_t size = tscNumOfFields(pQueryInfo);
size_t numOfFields = tscNumOfFields(pQueryInfo);
if (pQueryInfo->fillVal == NULL) {
pQueryInfo->fillVal = calloc(size, sizeof(int64_t));
pQueryInfo->fillVal = calloc(numOfFields, sizeof(int64_t));
if (pQueryInfo->fillVal == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
@ -4508,7 +4665,7 @@ int32_t parseFillClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuery
pQueryInfo->fillType = TSDB_FILL_NONE;
} else if (strncasecmp(pItem->pVar.pz, "null", 4) == 0 && pItem->pVar.nLen == 4) {
pQueryInfo->fillType = TSDB_FILL_NULL;
for (int32_t i = START_INTERPO_COL_IDX; i < size; ++i) {
for (int32_t i = START_INTERPO_COL_IDX; i < numOfFields; ++i) {
TAOS_FIELD* pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i);
setNull((char*)&pQueryInfo->fillVal[i], pField->type, pField->bytes);
}
@ -4522,7 +4679,7 @@ int32_t parseFillClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuery
pQueryInfo->fillType = TSDB_FILL_SET_VALUE;
size_t num = taosArrayGetSize(pFillToken);
if (num == 1) {
if (num == 1) { // no actual value, return with error code
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
@ -4533,11 +4690,11 @@ int32_t parseFillClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuery
if (tscIsPointInterpQuery(pQueryInfo)) {
startPos = 0;
if (numOfFillVal > size) {
numOfFillVal = (int32_t)size;
if (numOfFillVal > numOfFields) {
numOfFillVal = (int32_t)numOfFields;
}
} else {
numOfFillVal = (int16_t)((num > (int32_t)size) ? (int32_t)size : num);
numOfFillVal = (int16_t)((num > (int32_t)numOfFields) ? (int32_t)numOfFields : num);
}
int32_t j = 1;
@ -4557,10 +4714,10 @@ int32_t parseFillClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuery
}
}
if ((num < size) || ((num - 1 < size) && (tscIsPointInterpQuery(pQueryInfo)))) {
if ((num < numOfFields) || ((num - 1 < numOfFields) && (tscIsPointInterpQuery(pQueryInfo)))) {
tVariantListItem* lastItem = taosArrayGetLast(pFillToken);
for (int32_t i = numOfFillVal; i < size; ++i) {
for (int32_t i = numOfFillVal; i < numOfFields; ++i) {
TAOS_FIELD* pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i);
if (pField->type == TSDB_DATA_TYPE_BINARY || pField->type == TSDB_DATA_TYPE_NCHAR) {
@ -4611,6 +4768,12 @@ int32_t parseOrderbyClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQu
setDefaultOrderInfo(pQueryInfo);
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
if (pQueryInfo->distinctTag == true) {
pQueryInfo->order.order = TSDB_ORDER_ASC;
pQueryInfo->order.orderColId = 0;
return TSDB_CODE_SUCCESS;
}
if (pQuerySql->pSortOrder == NULL) {
return TSDB_CODE_SUCCESS;
}
@ -4794,11 +4957,12 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
const char* msg17 = "invalid column name";
const char* msg18 = "primary timestamp column cannot be dropped";
const char* msg19 = "invalid new tag name";
const char* msg20 = "table is not super table";
int32_t code = TSDB_CODE_SUCCESS;
SSqlCmd* pCmd = &pSql->cmd;
SAlterTableSQL* pAlterSQL = pInfo->pAlterInfo;
SAlterTableInfo* pAlterSQL = pInfo->pAlterInfo;
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, DEFAULT_TABLE_INDEX);
@ -4819,6 +4983,10 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
if (pAlterSQL->tableType == TSDB_SUPER_TABLE && !(UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo))) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg20);
}
if (pAlterSQL->type == TSDB_ALTER_TABLE_ADD_TAG_COLUMN || pAlterSQL->type == TSDB_ALTER_TABLE_DROP_TAG_COLUMN ||
pAlterSQL->type == TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN) {
if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo)) {
@ -5133,8 +5301,10 @@ int32_t validateEp(char* ep) {
return TSDB_CODE_SUCCESS;
}
int32_t validateDNodeConfig(tDCLSQL* pOptions) {
if (pOptions->nTokens < 2 || pOptions->nTokens > 3) {
int32_t validateDNodeConfig(SMiscInfo* pOptions) {
int32_t numOfToken = (int32_t) taosArrayGetSize(pOptions->a);
if (numOfToken < 2 || numOfToken > 3) {
return TSDB_CODE_TSC_INVALID_SQL;
}
@ -5152,9 +5322,9 @@ int32_t validateDNodeConfig(tDCLSQL* pOptions) {
{"cqDebugFlag", 11},
};
SStrToken* pOptionToken = &pOptions->a[1];
SStrToken* pOptionToken = taosArrayGet(pOptions->a, 1);
if (pOptions->nTokens == 2) {
if (numOfToken == 2) {
// reset log and reset query cache does not need value
for (int32_t i = 0; i < tokenLogEnd; ++i) {
const SDNodeDynConfOption* pOption = &cfgOptions[i];
@ -5164,7 +5334,7 @@ int32_t validateDNodeConfig(tDCLSQL* pOptions) {
}
} else if ((strncasecmp(cfgOptions[tokenBalance].name, pOptionToken->z, pOptionToken->n) == 0) &&
(cfgOptions[tokenBalance].len == pOptionToken->n)) {
SStrToken* pValToken = &pOptions->a[2];
SStrToken* pValToken = taosArrayGet(pOptions->a, 2);
int32_t vnodeId = 0;
int32_t dnodeId = 0;
strdequote(pValToken->z);
@ -5175,14 +5345,14 @@ int32_t validateDNodeConfig(tDCLSQL* pOptions) {
return TSDB_CODE_SUCCESS;
} else if ((strncasecmp(cfgOptions[tokenMonitor].name, pOptionToken->z, pOptionToken->n) == 0) &&
(cfgOptions[tokenMonitor].len == pOptionToken->n)) {
SStrToken* pValToken = &pOptions->a[2];
SStrToken* pValToken = taosArrayGet(pOptions->a, 2);
int32_t val = strtol(pValToken->z, NULL, 10);
if (val != 0 && val != 1) {
return TSDB_CODE_TSC_INVALID_SQL; // options value is invalid
}
return TSDB_CODE_SUCCESS;
} else {
SStrToken* pValToken = &pOptions->a[2];
SStrToken* pValToken = taosArrayGet(pOptions->a, 2);
int32_t val = strtol(pValToken->z, NULL, 10);
if (val < 0 || val > 256) {
@ -5193,8 +5363,8 @@ int32_t validateDNodeConfig(tDCLSQL* pOptions) {
for (int32_t i = tokenDebugFlag; i < tokenDebugFlagEnd; ++i) {
const SDNodeDynConfOption* pOption = &cfgOptions[i];
// options is valid
if ((strncasecmp(pOption->name, pOptionToken->z, pOptionToken->n) == 0) && (pOption->len == pOptionToken->n)) {
/* options is valid */
return TSDB_CODE_SUCCESS;
}
}
@ -5203,17 +5373,18 @@ int32_t validateDNodeConfig(tDCLSQL* pOptions) {
return TSDB_CODE_TSC_INVALID_SQL;
}
int32_t validateLocalConfig(tDCLSQL* pOptions) {
if (pOptions->nTokens < 1 || pOptions->nTokens > 2) {
int32_t validateLocalConfig(SMiscInfo* pOptions) {
int32_t numOfToken = (int32_t) taosArrayGetSize(pOptions->a);
if (numOfToken < 1 || numOfToken > 2) {
return TSDB_CODE_TSC_INVALID_SQL;
}
SDNodeDynConfOption LOCAL_DYNAMIC_CFG_OPTIONS[6] = {{"resetLog", 8}, {"rpcDebugFlag", 12}, {"tmrDebugFlag", 12},
{"cDebugFlag", 10}, {"uDebugFlag", 10}, {"debugFlag", 9}};
SStrToken* pOptionToken = &pOptions->a[0];
SStrToken* pOptionToken = taosArrayGet(pOptions->a, 0);
if (pOptions->nTokens == 1) {
if (numOfToken == 1) {
// reset log does not need value
for (int32_t i = 0; i < 1; ++i) {
SDNodeDynConfOption* pOption = &LOCAL_DYNAMIC_CFG_OPTIONS[i];
@ -5222,7 +5393,7 @@ int32_t validateLocalConfig(tDCLSQL* pOptions) {
}
}
} else {
SStrToken* pValToken = &pOptions->a[1];
SStrToken* pValToken = taosArrayGet(pOptions->a, 1);
int32_t val = strtol(pValToken->z, NULL, 10);
if (val < 131 || val > 199) {
@ -5374,7 +5545,7 @@ int32_t parseLimitClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t clauseIn
return TSDB_CODE_SUCCESS;
}
static int32_t setKeepOption(SSqlCmd* pCmd, SCreateDbMsg* pMsg, SCreateDBInfo* pCreateDb) {
static int32_t setKeepOption(SSqlCmd* pCmd, SCreateDbMsg* pMsg, SCreateDbInfo* pCreateDb) {
const char* msg = "invalid number of options";
pMsg->daysToKeep = htonl(-1);
@ -5412,7 +5583,7 @@ static int32_t setKeepOption(SSqlCmd* pCmd, SCreateDbMsg* pMsg, SCreateDBInfo* p
return TSDB_CODE_SUCCESS;
}
static int32_t setTimePrecision(SSqlCmd* pCmd, SCreateDbMsg* pMsg, SCreateDBInfo* pCreateDbInfo) {
static int32_t setTimePrecision(SSqlCmd* pCmd, SCreateDbMsg* pMsg, SCreateDbInfo* pCreateDbInfo) {
const char* msg = "invalid time precision";
pMsg->precision = TSDB_TIME_PRECISION_MILLI; // millisecond by default
@ -5436,7 +5607,7 @@ static int32_t setTimePrecision(SSqlCmd* pCmd, SCreateDbMsg* pMsg, SCreateDBInfo
return TSDB_CODE_SUCCESS;
}
static void setCreateDBOption(SCreateDbMsg* pMsg, SCreateDBInfo* pCreateDb) {
static void setCreateDBOption(SCreateDbMsg* pMsg, SCreateDbInfo* pCreateDb) {
pMsg->maxTables = htonl(-1); // max tables can not be set anymore
pMsg->cacheBlockSize = htonl(pCreateDb->cacheBlockSize);
pMsg->totalBlocks = htonl(pCreateDb->numOfBlocks);
@ -5454,7 +5625,7 @@ static void setCreateDBOption(SCreateDbMsg* pMsg, SCreateDBInfo* pCreateDb) {
pMsg->cacheLastRow = pCreateDb->cachelast;
}
int32_t parseCreateDBOptions(SSqlCmd* pCmd, SCreateDBInfo* pCreateDbSql) {
int32_t parseCreateDBOptions(SSqlCmd* pCmd, SCreateDbInfo* pCreateDbSql) {
SCreateDbMsg* pMsg = (SCreateDbMsg *)(pCmd->payload);
setCreateDBOption(pMsg, pCreateDbSql);
@ -6215,9 +6386,9 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) {
}
// get table meta from mnode
tstrncpy(pCreateTableInfo->tagdata.name, pStableMetaInfo->name, tListLen(pCreateTableInfo->tagdata.name));
SArray* pList = pCreateTableInfo->pTagVals;
code = tNameExtractFullName(&pStableMetaInfo->name, pCreateTableInfo->tagdata.name);
SArray* pList = pCreateTableInfo->pTagVals;
code = tscGetTableMeta(pSql, pStableMetaInfo);
if (code != TSDB_CODE_SUCCESS) {
return code;
@ -6295,7 +6466,11 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) {
return ret;
}
pCreateTableInfo->fullname = strndup(pTableMetaInfo->name, TSDB_TABLE_FNAME_LEN);
pCreateTableInfo->fullname = calloc(1, tNameLen(&pTableMetaInfo->name) + 1);
ret = tNameExtractFullName(&pTableMetaInfo->name, pCreateTableInfo->fullname);
if (ret != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
}
return TSDB_CODE_SUCCESS;
@ -6542,7 +6717,7 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) {
// has no table alias name
if (memcmp(pTableItem->pz, p1->pVar.pz, p1->pVar.nLen) == 0) {
extractTableName(pTableMetaInfo1->name, pTableMetaInfo1->aliasName);
strncpy(pTableMetaInfo1->aliasName, tNameGetTableName(&pTableMetaInfo1->name), tListLen(pTableMetaInfo->aliasName));
} else {
tstrncpy(pTableMetaInfo1->aliasName, p1->pVar.pz, sizeof(pTableMetaInfo1->aliasName));
}

View File

@ -106,6 +106,7 @@ STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg) {
pTableMeta->sversion = pTableMetaMsg->sversion;
pTableMeta->tversion = pTableMetaMsg->tversion;
tstrncpy(pTableMeta->sTableName, pTableMetaMsg->sTableName, TSDB_TABLE_FNAME_LEN);
memcpy(pTableMeta->schema, pTableMetaMsg->schema, schemaSize);

View File

@ -309,7 +309,7 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
return;
}
if (pEpSet) { // todo update this
if (pEpSet) {
if (!tscEpSetIsEqual(&pSql->epSet, pEpSet)) {
if (pCmd->command < TSDB_SQL_MGMT) {
tscUpdateVgroupInfo(pSql, pEpSet);
@ -461,16 +461,20 @@ int doProcessSql(SSqlObj *pSql) {
}
int tscProcessSql(SSqlObj *pSql) {
char *name = NULL;
char name[TSDB_TABLE_FNAME_LEN] = {0};
SSqlCmd *pCmd = &pSql->cmd;
uint32_t type = 0;
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
STableMetaInfo *pTableMetaInfo = NULL;
uint32_t type = 0;
if (pQueryInfo != NULL) {
pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
name = (pTableMetaInfo != NULL)? pTableMetaInfo->name:NULL;
if (pTableMetaInfo != NULL) {
tNameExtractFullName(&pTableMetaInfo->name, name);
}
type = pQueryInfo->type;
// while numOfTables equals to 0, it must be Heartbeat
@ -669,10 +673,11 @@ static char *doSerializeTableInfo(SQueryTableMsg* pQueryMsg, SSqlObj *pSql, char
pMsg += sizeof(STableIdInfo);
}
}
tscDebug("%p vgId:%d, query on table:%s, tid:%d, uid:%" PRIu64, pSql, htonl(pQueryMsg->head.vgId), pTableMetaInfo->name,
pTableMeta->id.tid, pTableMeta->id.uid);
char n[TSDB_TABLE_FNAME_LEN] = {0};
tNameExtractFullName(&pTableMetaInfo->name, n);
tscDebug("%p vgId:%d, query on table:%s, tid:%d, uid:%" PRIu64, pSql, htonl(pQueryMsg->head.vgId), n, pTableMeta->id.tid, pTableMeta->id.uid);
return pMsg;
}
@ -755,8 +760,11 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSchema *pColSchema = &pSchema[pCol->colIndex.columnIndex];
if (pCol->colIndex.columnIndex >= tscGetNumOfColumns(pTableMeta) || !isValidDataType(pColSchema->type)) {
char n[TSDB_TABLE_FNAME_LEN] = {0};
tNameExtractFullName(&pTableMetaInfo->name, n);
tscError("%p tid:%d uid:%" PRIu64" id:%s, column index out of range, numOfColumns:%d, index:%d, column name:%s",
pSql, pTableMeta->id.tid, pTableMeta->id.uid, pTableMetaInfo->name, tscGetNumOfColumns(pTableMeta), pCol->colIndex.columnIndex,
pSql, pTableMeta->id.tid, pTableMeta->id.uid, n, tscGetNumOfColumns(pTableMeta), pCol->colIndex.columnIndex,
pColSchema->name);
return TSDB_CODE_TSC_INVALID_SQL;
}
@ -942,9 +950,11 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
if ((pCol->colIndex.columnIndex >= numOfTagColumns || pCol->colIndex.columnIndex < -1) ||
(!isValidDataType(pColSchema->type))) {
char n[TSDB_TABLE_FNAME_LEN] = {0};
tNameExtractFullName(&pTableMetaInfo->name, n);
tscError("%p tid:%d uid:%" PRIu64 " id:%s, tag index out of range, totalCols:%d, numOfTags:%d, index:%d, column name:%s",
pSql, pTableMeta->id.tid, pTableMeta->id.uid, pTableMetaInfo->name, total, numOfTagColumns,
pCol->colIndex.columnIndex, pColSchema->name);
pSql, pTableMeta->id.tid, pTableMeta->id.uid, n, total, numOfTagColumns, pCol->colIndex.columnIndex, pColSchema->name);
return TSDB_CODE_TSC_INVALID_SQL;
}
@ -1021,7 +1031,8 @@ int32_t tscBuildCreateDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
assert(pCmd->numOfClause == 1);
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
tstrncpy(pCreateDbMsg->db, pTableMetaInfo->name, sizeof(pCreateDbMsg->db));
int32_t code = tNameExtractFullName(&pTableMetaInfo->name, pCreateDbMsg->db);
assert(code == TSDB_CODE_SUCCESS);
return TSDB_CODE_SUCCESS;
}
@ -1035,7 +1046,9 @@ int32_t tscBuildCreateDnodeMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
}
SCreateDnodeMsg *pCreate = (SCreateDnodeMsg *)pCmd->payload;
strncpy(pCreate->ep, pInfo->pDCLInfo->a[0].z, pInfo->pDCLInfo->a[0].n);
SStrToken* t0 = taosArrayGet(pInfo->pMiscInfo->a, 0);
strncpy(pCreate->ep, t0->z, t0->n);
pCmd->msgType = TSDB_MSG_TYPE_CM_CREATE_DNODE;
@ -1052,13 +1065,13 @@ int32_t tscBuildAcctMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SCreateAcctMsg *pAlterMsg = (SCreateAcctMsg *)pCmd->payload;
SStrToken *pName = &pInfo->pDCLInfo->user.user;
SStrToken *pPwd = &pInfo->pDCLInfo->user.passwd;
SStrToken *pName = &pInfo->pMiscInfo->user.user;
SStrToken *pPwd = &pInfo->pMiscInfo->user.passwd;
strncpy(pAlterMsg->user, pName->z, pName->n);
strncpy(pAlterMsg->pass, pPwd->z, pPwd->n);
SCreateAcctSQL *pAcctOpt = &pInfo->pDCLInfo->acctOpt;
SCreateAcctInfo *pAcctOpt = &pInfo->pMiscInfo->acctOpt;
pAlterMsg->cfg.maxUsers = htonl(pAcctOpt->maxUsers);
pAlterMsg->cfg.maxDbs = htonl(pAcctOpt->maxDbs);
@ -1098,7 +1111,7 @@ int32_t tscBuildUserMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SCreateUserMsg *pAlterMsg = (SCreateUserMsg *)pCmd->payload;
SUserInfo *pUser = &pInfo->pDCLInfo->user;
SUserInfo *pUser = &pInfo->pMiscInfo->user;
strncpy(pAlterMsg->user, pUser->user.z, pUser->user.n);
pAlterMsg->flag = (int8_t)pUser->type;
@ -1138,8 +1151,11 @@ int32_t tscBuildDropDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SDropDbMsg *pDropDbMsg = (SDropDbMsg*)pCmd->payload;
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
tstrncpy(pDropDbMsg->db, pTableMetaInfo->name, sizeof(pDropDbMsg->db));
pDropDbMsg->ignoreNotExists = pInfo->pDCLInfo->existsCheck ? 1 : 0;
int32_t code = tNameExtractFullName(&pTableMetaInfo->name, pDropDbMsg->db);
assert(code == TSDB_CODE_SUCCESS && pTableMetaInfo->name.type == TSDB_DB_NAME_T);
pDropDbMsg->ignoreNotExists = pInfo->pMiscInfo->existsCheck ? 1 : 0;
pCmd->msgType = TSDB_MSG_TYPE_CM_DROP_DB;
return TSDB_CODE_SUCCESS;
@ -1156,15 +1172,19 @@ int32_t tscBuildDropTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SCMDropTableMsg *pDropTableMsg = (SCMDropTableMsg*)pCmd->payload;
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
strcpy(pDropTableMsg->tableFname, pTableMetaInfo->name);
pDropTableMsg->igNotExists = pInfo->pDCLInfo->existsCheck ? 1 : 0;
tNameExtractFullName(&pTableMetaInfo->name, pDropTableMsg->name);
pDropTableMsg->igNotExists = pInfo->pMiscInfo->existsCheck ? 1 : 0;
pCmd->msgType = TSDB_MSG_TYPE_CM_DROP_TABLE;
return TSDB_CODE_SUCCESS;
}
int32_t tscBuildDropDnodeMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSqlCmd *pCmd = &pSql->cmd;
char dnodeEp[TSDB_EP_LEN] = {0};
tstrncpy(dnodeEp, pCmd->payload, TSDB_EP_LEN);
pCmd->payloadLen = sizeof(SDropDnodeMsg);
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
tscError("%p failed to malloc for query msg", pSql);
@ -1172,43 +1192,28 @@ int32_t tscBuildDropDnodeMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
}
SDropDnodeMsg * pDrop = (SDropDnodeMsg *)pCmd->payload;
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
tstrncpy(pDrop->ep, pTableMetaInfo->name, sizeof(pDrop->ep));
tstrncpy(pDrop->ep, dnodeEp, tListLen(pDrop->ep));
pCmd->msgType = TSDB_MSG_TYPE_CM_DROP_DNODE;
return TSDB_CODE_SUCCESS;
}
int32_t tscBuildDropUserMsg(SSqlObj *pSql, SSqlInfo * UNUSED_PARAM(pInfo)) {
int32_t tscBuildDropUserAcctMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSqlCmd *pCmd = &pSql->cmd;
char user[TSDB_USER_LEN] = {0};
tstrncpy(user, pCmd->payload, TSDB_USER_LEN);
pCmd->payloadLen = sizeof(SDropUserMsg);
pCmd->msgType = TSDB_MSG_TYPE_CM_DROP_USER;
pCmd->msgType = (pInfo->type == TSDB_SQL_DROP_USER)? TSDB_MSG_TYPE_CM_DROP_USER:TSDB_MSG_TYPE_CM_DROP_ACCT;
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
tscError("%p failed to malloc for query msg", pSql);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
SDropUserMsg * pDropMsg = (SDropUserMsg *)pCmd->payload;
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
tstrncpy(pDropMsg->user, pTableMetaInfo->name, sizeof(pDropMsg->user));
return TSDB_CODE_SUCCESS;
}
int32_t tscBuildDropAcctMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSqlCmd *pCmd = &pSql->cmd;
pCmd->payloadLen = sizeof(SDropUserMsg);
pCmd->msgType = TSDB_MSG_TYPE_CM_DROP_ACCT;
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
tscError("%p failed to malloc for query msg", pSql);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
SDropUserMsg * pDropMsg = (SDropUserMsg *)pCmd->payload;
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
tstrncpy(pDropMsg->user, pTableMetaInfo->name, sizeof(pDropMsg->user));
SDropUserMsg *pDropMsg = (SDropUserMsg *)pCmd->payload;
tstrncpy(pDropMsg->user, user, tListLen(user));
return TSDB_CODE_SUCCESS;
}
@ -1224,7 +1229,7 @@ int32_t tscBuildUseDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SUseDbMsg *pUseDbMsg = (SUseDbMsg *)pCmd->payload;
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
strcpy(pUseDbMsg->db, pTableMetaInfo->name);
tNameExtractFullName(&pTableMetaInfo->name, pUseDbMsg->db);
pCmd->msgType = TSDB_MSG_TYPE_CM_USE_DB;
return TSDB_CODE_SUCCESS;
@ -1244,14 +1249,16 @@ int32_t tscBuildShowMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SShowMsg *pShowMsg = (SShowMsg *)pCmd->payload;
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
size_t nameLen = strlen(pTableMetaInfo->name);
if (nameLen > 0) {
tstrncpy(pShowMsg->db, pTableMetaInfo->name, sizeof(pShowMsg->db)); // prefix is set here
if (tNameIsEmpty(&pTableMetaInfo->name)) {
pthread_mutex_lock(&pObj->mutex);
tstrncpy(pShowMsg->db, pObj->db, sizeof(pShowMsg->db));
pthread_mutex_unlock(&pObj->mutex);
} else {
tstrncpy(pShowMsg->db, pObj->db, sizeof(pShowMsg->db));
tNameGetFullDbName(&pTableMetaInfo->name, pShowMsg->db);
}
SShowInfo *pShowInfo = &pInfo->pDCLInfo->showOpt;
SShowInfo *pShowInfo = &pInfo->pMiscInfo->showOpt;
pShowMsg->type = pShowInfo->showType;
if (pShowInfo->showType != TSDB_MGMT_TABLE_VNODES) {
@ -1310,12 +1317,12 @@ int tscEstimateCreateTableMsgLength(SSqlObj *pSql, SSqlInfo *pInfo) {
}
int tscBuildCreateTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
int msgLen = 0;
SSchema * pSchema;
int size = 0;
int msgLen = 0;
int size = 0;
SSchema *pSchema;
SSqlCmd *pCmd = &pSql->cmd;
SQueryInfo * pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
// Reallocate the payload size
@ -1346,11 +1353,10 @@ int tscBuildCreateTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pMsg += sizeof(SCreateTableMsg);
SCreatedTableInfo* p = taosArrayGet(list, i);
strcpy(pCreate->tableFname, p->fullname);
strcpy(pCreate->tableName, p->fullname);
pCreate->igExists = (p->igExist)? 1 : 0;
// use dbinfo from table id without modifying current db info
tscGetDBInfoFromTableFullName(p->fullname, pCreate->db);
pMsg = serializeTagData(&p->tagdata, pMsg);
int32_t len = (int32_t)(pMsg - (char*) pCreate);
@ -1359,10 +1365,8 @@ int tscBuildCreateTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
} else { // create (super) table
pCreateTableMsg->numOfTables = htonl(1); // only one table will be created
strcpy(pCreateMsg->tableFname, pTableMetaInfo->name);
// use dbinfo from table id without modifying current db info
tscGetDBInfoFromTableFullName(pTableMetaInfo->name, pCreateMsg->db);
int32_t code = tNameExtractFullName(&pTableMetaInfo->name, pCreateMsg->tableName);
assert(code == 0);
SCreateTableSQL *pCreateTable = pInfo->pCreateTableInfo;
@ -1420,7 +1424,7 @@ int tscBuildAlterTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
SAlterTableSQL *pAlterInfo = pInfo->pAlterInfo;
SAlterTableInfo *pAlterInfo = pInfo->pAlterInfo;
int size = tscEstimateAlterTableMsgLength(pCmd);
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, size)) {
tscError("%p failed to malloc for alter table msg", pSql);
@ -1428,9 +1432,8 @@ int tscBuildAlterTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
}
SAlterTableMsg *pAlterTableMsg = (SAlterTableMsg *)pCmd->payload;
tscGetDBInfoFromTableFullName(pTableMetaInfo->name, pAlterTableMsg->db);
strcpy(pAlterTableMsg->tableFname, pTableMetaInfo->name);
tNameExtractFullName(&pTableMetaInfo->name, pAlterTableMsg->tableFname);
pAlterTableMsg->type = htons(pAlterInfo->type);
pAlterTableMsg->numOfCols = htons(tscNumOfFields(pQueryInfo));
@ -1485,7 +1488,7 @@ int tscAlterDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SAlterDbMsg *pAlterDbMsg = (SAlterDbMsg* )pCmd->payload;
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
tstrncpy(pAlterDbMsg->db, pTableMetaInfo->name, sizeof(pAlterDbMsg->db));
tNameExtractFullName(&pTableMetaInfo->name, pAlterDbMsg->db);
return TSDB_CODE_SUCCESS;
}
@ -1610,9 +1613,14 @@ int tscBuildConnectMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
// TODO refactor full_name
char *db; // ugly code to move the space
pthread_mutex_lock(&pObj->mutex);
db = strstr(pObj->db, TS_PATH_DELIMITER);
db = (db == NULL) ? pObj->db : db + 1;
tstrncpy(pConnect->db, db, sizeof(pConnect->db));
pthread_mutex_unlock(&pObj->mutex);
tstrncpy(pConnect->clientVersion, version, sizeof(pConnect->clientVersion));
tstrncpy(pConnect->msgVersion, "", sizeof(pConnect->msgVersion));
@ -1623,13 +1631,17 @@ int tscBuildConnectMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
}
int tscBuildTableMetaMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSqlCmd * pCmd = &pSql->cmd;
SSqlCmd *pCmd = &pSql->cmd;
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
STableInfoMsg *pInfoMsg = (STableInfoMsg *)pCmd->payload;
int32_t code = tNameExtractFullName(&pTableMetaInfo->name, pInfoMsg->tableFname);
if (code != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
STableInfoMsg *pInfoMsg = (STableInfoMsg *)pCmd->payload;
strcpy(pInfoMsg->tableFname, pTableMetaInfo->name);
pInfoMsg->createFlag = htons(pSql->cmd.autoCreated ? 1 : 0);
char *pMsg = (char *)pInfoMsg + sizeof(STableInfoMsg);
@ -1686,33 +1698,6 @@ int tscBuildMultiMeterMetaMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
return 0;
}
//static UNUSED_FUNC int32_t tscEstimateMetricMetaMsgSize(SSqlCmd *pCmd) {
//// const int32_t defaultSize =
//// minMsgSize() + sizeof(SSuperTableMetaMsg) + sizeof(SMgmtHead) + sizeof(int16_t) * TSDB_MAX_TAGS;
//// SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
////
//// int32_t n = 0;
//// size_t size = taosArrayGetSize(pQueryInfo->tagCond.pCond);
//// for (int32_t i = 0; i < size; ++i) {
//// assert(0);
////// n += strlen(pQueryInfo->tagCond.cond[i].cond);
//// }
////
//// int32_t tagLen = n * TSDB_NCHAR_SIZE;
//// if (pQueryInfo->tagCond.tbnameCond.cond != NULL) {
//// tagLen += strlen(pQueryInfo->tagCond.tbnameCond.cond) * TSDB_NCHAR_SIZE;
//// }
////
//// int32_t joinCondLen = (TSDB_TABLE_FNAME_LEN + sizeof(int16_t)) * 2;
//// int32_t elemSize = sizeof(SSuperTableMetaElemMsg) * pQueryInfo->numOfTables;
////
//// int32_t colSize = pQueryInfo->groupbyExpr.numOfGroupCols*sizeof(SColIndex);
////
//// int32_t len = tagLen + joinCondLen + elemSize + colSize + defaultSize;
////
//// return MAX(len, TSDB_DEFAULT_PAYLOAD_SIZE);
//}
int tscBuildSTableVgroupMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSqlCmd *pCmd = &pSql->cmd;
@ -1725,9 +1710,10 @@ int tscBuildSTableVgroupMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) {
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, i);
size_t size = sizeof(pTableMetaInfo->name);
tstrncpy(pMsg, pTableMetaInfo->name, size);
pMsg += size;
int32_t code = tNameExtractFullName(&pTableMetaInfo->name, pMsg);
assert(code == TSDB_CODE_SUCCESS);
pMsg += TSDB_TABLE_FNAME_LEN;
}
pCmd->msgType = TSDB_MSG_TYPE_CM_STABLE_VGROUP;
@ -1827,7 +1813,6 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) {
assert(i == 0);
}
assert(isValidDataType(pSchema->type));
pSchema++;
}
@ -1835,8 +1820,8 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) {
assert(pTableMetaInfo->pTableMeta == NULL);
STableMeta* pTableMeta = tscCreateTableMetaFromMsg(pMetaMsg);
if (!isValidSchema(pTableMeta->schema, pTableMeta->tableInfo.numOfColumns, pTableMeta->tableInfo.numOfTags)) {
tscError("%p invalid table meta from mnode, name:%s", pSql, pTableMetaInfo->name);
if (!tIsValidSchema(pTableMeta->schema, pTableMeta->tableInfo.numOfColumns, pTableMeta->tableInfo.numOfTags)) {
tscError("%p invalid table meta from mnode, name:%s", pSql, tNameGetTableName(&pTableMetaInfo->name));
return TSDB_CODE_TSC_INVALID_VALUE;
}
@ -1854,11 +1839,19 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) {
tfree(pSupTableMeta);
CChildTableMeta* cMeta = tscCreateChildMeta(pTableMeta);
taosHashPut(tscTableMetaInfo, pTableMetaInfo->name, strlen(pTableMetaInfo->name), cMeta, sizeof(CChildTableMeta));
char name[TSDB_TABLE_FNAME_LEN] = {0};
tNameExtractFullName(&pTableMetaInfo->name, name);
taosHashPut(tscTableMetaInfo, name, strlen(name), cMeta, sizeof(CChildTableMeta));
tfree(cMeta);
} else {
uint32_t s = tscGetTableMetaSize(pTableMeta);
taosHashPut(tscTableMetaInfo, pTableMetaInfo->name, strlen(pTableMetaInfo->name), pTableMeta, s);
char name[TSDB_TABLE_FNAME_LEN] = {0};
tNameExtractFullName(&pTableMetaInfo->name, name);
taosHashPut(tscTableMetaInfo, name, strlen(name), pTableMeta, s);
}
// update the vgroupInfo if needed
@ -1877,9 +1870,10 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) {
}
}
tscDebug("%p recv table meta, uid:%"PRId64 ", tid:%d, name:%s", pSql, pTableMeta->id.uid, pTableMeta->id.tid, pTableMetaInfo->name);
tscDebug("%p recv table meta, uid:%" PRIu64 ", tid:%d, name:%s", pSql, pTableMeta->id.uid, pTableMeta->id.tid,
tNameGetTableName(&pTableMetaInfo->name));
free(pTableMeta);
return TSDB_CODE_SUCCESS;
}
@ -2144,10 +2138,13 @@ int tscProcessConnectRsp(SSqlObj *pSql) {
SConnectRsp *pConnect = (SConnectRsp *)pRes->pRsp;
tstrncpy(pObj->acctId, pConnect->acctId, sizeof(pObj->acctId)); // copy acctId from response
pthread_mutex_lock(&pObj->mutex);
int32_t len = sprintf(temp, "%s%s%s", pObj->acctId, TS_PATH_DELIMITER, pObj->db);
assert(len <= sizeof(pObj->db));
tstrncpy(pObj->db, temp, sizeof(pObj->db));
pthread_mutex_unlock(&pObj->mutex);
if (pConnect->epSet.numOfEps > 0) {
tscEpSetHtons(&pConnect->epSet);
@ -2174,13 +2171,18 @@ int tscProcessConnectRsp(SSqlObj *pSql) {
int tscProcessUseDbRsp(SSqlObj *pSql) {
STscObj * pObj = pSql->pTscObj;
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0);
tstrncpy(pObj->db, pTableMetaInfo->name, sizeof(pObj->db));
return 0;
pthread_mutex_lock(&pObj->mutex);
int ret = tNameExtractFullName(&pTableMetaInfo->name, pObj->db);
pthread_mutex_unlock(&pObj->mutex);
return ret;
}
int tscProcessDropDbRsp(SSqlObj *pSql) {
pSql->pTscObj->db[0] = 0;
//TODO LOCK DB WHEN MODIFY IT
//pSql->pTscObj->db[0] = 0;
taosHashEmpty(tscTableMetaInfo);
return 0;
}
@ -2189,18 +2191,22 @@ int tscProcessDropTableRsp(SSqlObj *pSql) {
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0);
//The cached tableMeta is expired in this case, so clean it in hash table
taosHashRemove(tscTableMetaInfo, pTableMetaInfo->name, strnlen(pTableMetaInfo->name, TSDB_TABLE_FNAME_LEN));
tscDebug("%p remove table meta after drop table:%s, numOfRemain:%d", pSql, pTableMetaInfo->name,
(int32_t) taosHashGetSize(tscTableMetaInfo));
char name[TSDB_TABLE_FNAME_LEN] = {0};
tNameExtractFullName(&pTableMetaInfo->name, name);
assert(pTableMetaInfo->pTableMeta == NULL);
taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
tscDebug("%p remove table meta after drop table:%s, numOfRemain:%d", pSql, name, (int32_t) taosHashGetSize(tscTableMetaInfo));
pTableMetaInfo->pTableMeta = NULL;
return 0;
}
int tscProcessAlterTableMsgRsp(SSqlObj *pSql) {
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0);
char* name = pTableMetaInfo->name;
char name[TSDB_TABLE_FNAME_LEN] = {0};
tNameExtractFullName(&pTableMetaInfo->name, name);
tscDebug("%p remove tableMeta in hashMap after alter-table: %s", pSql, name);
bool isSuperTable = UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo);
@ -2239,6 +2245,8 @@ int tscProcessRetrieveRspFromNode(SSqlObj *pSql) {
SSqlRes *pRes = &pSql->res;
SSqlCmd *pCmd = &pSql->cmd;
assert(pRes->rspLen >= sizeof(SRetrieveTableRsp));
SRetrieveTableRsp *pRetrieve = (SRetrieveTableRsp *)pRes->pRsp;
if (pRetrieve == NULL) {
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
@ -2319,7 +2327,7 @@ static int32_t getTableMetaFromMnode(SSqlObj *pSql, STableMetaInfo *pTableMetaIn
STableMetaInfo *pNewMeterMetaInfo = tscAddEmptyMetaInfo(pNewQueryInfo);
assert(pNew->cmd.numOfClause == 1 && pNewQueryInfo->numOfTables == 1);
tstrncpy(pNewMeterMetaInfo->name, pTableMetaInfo->name, sizeof(pNewMeterMetaInfo->name));
tNameAssign(&pNewMeterMetaInfo->name, &pTableMetaInfo->name);
if (pSql->cmd.autoCreated) {
int32_t code = copyTagData(&pNew->cmd.tagData, &pSql->cmd.tagData);
@ -2350,7 +2358,8 @@ static int32_t getTableMetaFromMnode(SSqlObj *pSql, STableMetaInfo *pTableMetaIn
}
int32_t tscGetTableMeta(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo) {
assert(strlen(pTableMetaInfo->name) != 0);
assert(tIsValidName(&pTableMetaInfo->name));
tfree(pTableMetaInfo->pTableMeta);
uint32_t size = tscGetTableMetaMaxSize();
@ -2358,15 +2367,18 @@ int32_t tscGetTableMeta(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo) {
pTableMetaInfo->pTableMeta->tableType = -1;
pTableMetaInfo->pTableMeta->tableInfo.numOfColumns = -1;
int32_t len = (int32_t) strlen(pTableMetaInfo->name);
taosHashGetClone(tscTableMetaInfo, pTableMetaInfo->name, len, NULL, pTableMetaInfo->pTableMeta, -1);
char name[TSDB_TABLE_FNAME_LEN] = {0};
tNameExtractFullName(&pTableMetaInfo->name, name);
size_t len = strlen(name);
taosHashGetClone(tscTableMetaInfo, name, len, NULL, pTableMetaInfo->pTableMeta, -1);
// TODO resize the tableMeta
STableMeta* pMeta = pTableMetaInfo->pTableMeta;
if (pMeta->id.uid > 0) {
if (pMeta->tableType == TSDB_CHILD_TABLE) {
int32_t code = tscCreateTableMetaFromCChildMeta(pTableMetaInfo->pTableMeta, pTableMetaInfo->name);
int32_t code = tscCreateTableMetaFromCChildMeta(pTableMetaInfo->pTableMeta, name);
if (code != TSDB_CODE_SUCCESS) {
return getTableMetaFromMnode(pSql, pTableMetaInfo);
}
@ -2394,16 +2406,24 @@ int tscRenewTableMeta(SSqlObj *pSql, int32_t tableIndex) {
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, tableIndex);
const char* name = pTableMetaInfo->name;
char name[TSDB_TABLE_FNAME_LEN] = {0};
int32_t code = tNameExtractFullName(&pTableMetaInfo->name, name);
if (code != TSDB_CODE_SUCCESS) {
tscError("%p failed to generate the table full name", pSql);
return TSDB_CODE_TSC_INVALID_SQL;
}
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
if (pTableMeta) {
tscDebug("%p update table meta:%s, old meta numOfTags:%d, numOfCols:%d, uid:%" PRId64, pSql, name,
tscGetNumOfTags(pTableMeta), tscGetNumOfColumns(pTableMeta), pTableMeta->id.uid);
tscGetNumOfTags(pTableMeta), tscGetNumOfColumns(pTableMeta), pTableMeta->id.uid);
}
// remove stored tableMeta info in hash table
taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
size_t len = strlen(name);
taosHashRemove(tscTableMetaInfo, name, len);
return getTableMetaFromMnode(pSql, pTableMetaInfo);
}
@ -2444,8 +2464,8 @@ int tscGetSTableVgroupInfo(SSqlObj *pSql, int32_t clauseIndex) {
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, clauseIndex);
for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) {
STableMetaInfo *pMInfo = tscGetMetaInfo(pQueryInfo, i);
STableMeta* pTableMeta = tscTableMetaClone(pMInfo->pTableMeta);
tscAddTableMetaInfo(pNewQueryInfo, pMInfo->name, pTableMeta, NULL, pMInfo->tagColList, pMInfo->pVgroupTables);
STableMeta* pTableMeta = tscTableMetaDup(pMInfo->pTableMeta);
tscAddTableMetaInfo(pNewQueryInfo, &pMInfo->name, pTableMeta, NULL, pMInfo->tagColList, pMInfo->pVgroupTables);
}
if ((code = tscAllocPayload(&pNew->cmd, TSDB_DEFAULT_PAYLOAD_SIZE)) != TSDB_CODE_SUCCESS) {
@ -2485,8 +2505,8 @@ void tscInitMsgsFp() {
tscBuildMsg[TSDB_SQL_ALTER_ACCT] = tscBuildAcctMsg;
tscBuildMsg[TSDB_SQL_CREATE_TABLE] = tscBuildCreateTableMsg;
tscBuildMsg[TSDB_SQL_DROP_USER] = tscBuildDropUserMsg;
tscBuildMsg[TSDB_SQL_DROP_ACCT] = tscBuildDropAcctMsg;
tscBuildMsg[TSDB_SQL_DROP_USER] = tscBuildDropUserAcctMsg;
tscBuildMsg[TSDB_SQL_DROP_ACCT] = tscBuildDropUserAcctMsg;
tscBuildMsg[TSDB_SQL_DROP_DB] = tscBuildDropDbMsg;
tscBuildMsg[TSDB_SQL_DROP_TABLE] = tscBuildDropTableMsg;
tscBuildMsg[TSDB_SQL_ALTER_USER] = tscBuildUserMsg;

View File

@ -15,7 +15,7 @@
#include "hash.h"
#include "os.h"
#include "qAst.h"
#include "texpr.h"
#include "tkey.h"
#include "tcache.h"
#include "tnote.h"
@ -110,6 +110,7 @@ static SSqlObj *taosConnectImpl(const char *ip, const char *user, const char *pa
rpcClose(pDnodeConn);
free(pObj->tscCorMgmtEpSet);
free(pObj);
return NULL;
}
memcpy(pObj->tscCorMgmtEpSet, &corMgmtEpSet, sizeof(SRpcCorEpSet));
@ -294,6 +295,10 @@ void taos_close(TAOS *taos) {
tscDebug("%p HB is freed", pHb);
taosReleaseRef(tscObjRef, pHb->self);
#ifdef __APPLE__
// to satisfy later tsem_destroy in taos_free_result
tsem_init(&pHb->rspSem, 0, 0);
#endif // __APPLE__
taos_free_result(pHb);
}
}
@ -936,7 +941,7 @@ int taos_validate_sql(TAOS *taos, const char *sql) {
static int tscParseTblNameList(SSqlObj *pSql, const char *tblNameList, int32_t tblListLen) {
// must before clean the sqlcmd object
tscResetSqlCmdObj(&pSql->cmd);
tscResetSqlCmd(&pSql->cmd, false);
SSqlCmd *pCmd = &pSql->cmd;
@ -995,7 +1000,8 @@ static int tscParseTblNameList(SSqlObj *pSql, const char *tblNameList, int32_t t
return code;
}
if (payloadLen + strlen(pTableMetaInfo->name) + 128 >= pCmd->allocSize) {
int32_t xlen = tNameLen(&pTableMetaInfo->name);
if (payloadLen + xlen + 128 >= pCmd->allocSize) {
char *pNewMem = realloc(pCmd->payload, pCmd->allocSize + tblListLen);
if (pNewMem == NULL) {
code = TSDB_CODE_TSC_OUT_OF_MEMORY;
@ -1008,7 +1014,9 @@ static int tscParseTblNameList(SSqlObj *pSql, const char *tblNameList, int32_t t
pMsg = pCmd->payload;
}
payloadLen += sprintf(pMsg + payloadLen, "%s,", pTableMetaInfo->name);
char n[TSDB_TABLE_FNAME_LEN] = {0};
tNameExtractFullName(&pTableMetaInfo->name, n);
payloadLen += sprintf(pMsg + payloadLen, "%s,", n);
}
*(pMsg + payloadLen) = '\0';

View File

@ -104,7 +104,7 @@ static void doLaunchQuery(void* param, TAOS_RES* tres, int32_t code) {
// failed to get table Meta or vgroup list, retry in 10sec.
if (code == TSDB_CODE_SUCCESS) {
tscTansformSQLFuncForSTableQuery(pQueryInfo);
tscDebug("%p stream:%p, start stream query on:%s", pSql, pStream, pTableMetaInfo->name);
tscDebug("%p stream:%p, start stream query on:%s", pSql, pStream, tNameGetTableName(&pTableMetaInfo->name));
pSql->fp = tscProcessStreamQueryCallback;
pSql->fetchFp = tscProcessStreamQueryCallback;
@ -191,7 +191,9 @@ static void tscProcessStreamQueryCallback(void *param, TAOS_RES *tres, int numOf
STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pStream->pSql->cmd, 0, 0);
char* name = pTableMetaInfo->name;
char name[TSDB_TABLE_FNAME_LEN] = {0};
tNameExtractFullName(&pTableMetaInfo->name, name);
taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
pTableMetaInfo->vgroupList = tscVgroupInfoClear(pTableMetaInfo->vgroupList);
@ -291,7 +293,7 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf
pStream->stime += 1;
}
tscDebug("%p stream:%p, query on:%s, fetch result completed, fetched rows:%" PRId64, pSql, pStream, pTableMetaInfo->name,
tscDebug("%p stream:%p, query on:%s, fetch result completed, fetched rows:%" PRId64, pSql, pStream, tNameGetTableName(&pTableMetaInfo->name),
pStream->numOfRes);
tfree(pTableMetaInfo->pTableMeta);
@ -556,7 +558,7 @@ static void tscCreateStream(void *param, TAOS_RES *res, int code) {
taosTmrReset(tscProcessStreamTimer, (int32_t)starttime, pStream, tscTmr, &pStream->pTimer);
tscDebug("%p stream:%p is opened, query on:%s, interval:%" PRId64 ", sliding:%" PRId64 ", first launched in:%" PRId64 ", sql:%s", pSql,
pStream, pTableMetaInfo->name, pStream->interval.interval, pStream->interval.sliding, starttime, pSql->sqlstr);
pStream, tNameGetTableName(&pTableMetaInfo->name), pStream->interval.interval, pStream->interval.sliding, starttime, pSql->sqlstr);
}
void tscSetStreamDestTable(SSqlStream* pStream, const char* dstTable) {

View File

@ -313,7 +313,7 @@ static int tscLoadSubscriptionProgress(SSub* pSub) {
char buf[TSDB_MAX_SQL_LEN];
sprintf(buf, "%s/subscribe/%s", tsDataDir, pSub->topic);
FILE* fp = fopen(buf, "r");
FILE* fp = fopen(buf, "rb");
if (fp == NULL) {
tscDebug("subscription progress file does not exist: %s", pSub->topic);
return 1;
@ -368,7 +368,7 @@ void tscSaveSubscriptionProgress(void* sub) {
}
sprintf(path, "%s/subscribe/%s", tsDataDir, pSub->topic);
FILE* fp = fopen(path, "w+");
FILE* fp = fopen(path, "wb+");
if (fp == NULL) {
tscError("failed to create progress file for subscription: %s", pSub->topic);
return;

View File

@ -16,7 +16,7 @@
#include "os.h"
#include "qAst.h"
#include "texpr.h"
#include "qTsbuf.h"
#include "tcompare.h"
#include "tscLog.h"
@ -55,6 +55,58 @@ static void skipRemainValue(STSBuf* pTSBuf, tVariant* tag1) {
}
}
static void subquerySetState(SSqlObj *pSql, SSubqueryState *subState, int idx, int8_t state) {
assert(idx < subState->numOfSub);
assert(subState->states);
pthread_mutex_lock(&subState->mutex);
tscDebug("subquery:%p,%d state set to %d", pSql, idx, state);
subState->states[idx] = state;
pthread_mutex_unlock(&subState->mutex);
}
static bool allSubqueryDone(SSqlObj *pParentSql) {
bool done = true;
SSubqueryState *subState = &pParentSql->subState;
//lock in caller
for (int i = 0; i < subState->numOfSub; i++) {
if (0 == subState->states[i]) {
tscDebug("%p subquery:%p,%d is NOT finished, total:%d", pParentSql, pParentSql->pSubs[i], i, subState->numOfSub);
done = false;
break;
} else {
tscDebug("%p subquery:%p,%d is finished, total:%d", pParentSql, pParentSql->pSubs[i], i, subState->numOfSub);
}
}
return done;
}
static bool subAndCheckDone(SSqlObj *pSql, SSqlObj *pParentSql, int idx) {
SSubqueryState *subState = &pParentSql->subState;
assert(idx < subState->numOfSub);
pthread_mutex_lock(&subState->mutex);
tscDebug("%p subquery:%p,%d state set to 1", pParentSql, pSql, idx);
subState->states[idx] = 1;
bool done = allSubqueryDone(pParentSql);
pthread_mutex_unlock(&subState->mutex);
return done;
}
static int64_t doTSBlockIntersect(SSqlObj* pSql, SJoinSupporter* pSupporter1, SJoinSupporter* pSupporter2, STimeWindow * win) {
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex);
@ -367,10 +419,6 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) {
// scan all subquery, if one sub query has only ts, ignore it
tscDebug("%p start to launch secondary subqueries, %d out of %d needs to query", pSql, numOfSub, pSql->subState.numOfSub);
//the subqueries that do not actually launch the secondary query to virtual node is set as completed.
SSubqueryState* pState = &pSql->subState;
pState->numOfRemain = numOfSub;
bool success = true;
for (int32_t i = 0; i < pSql->subState.numOfSub; ++i) {
@ -403,6 +451,7 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) {
success = false;
break;
}
tscClearSubqueryInfo(&pNew->cmd);
pSql->pSubs[i] = pNew;
@ -480,10 +529,12 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) {
}
}
subquerySetState(pPrevSub, &pSql->subState, i, 0);
size_t numOfCols = taosArrayGetSize(pQueryInfo->colList);
tscDebug("%p subquery:%p tableIndex:%d, vgroupIndex:%d, type:%d, exprInfo:%" PRIzu ", colList:%" PRIzu ", fieldsInfo:%d, name:%s",
pSql, pNew, 0, pTableMetaInfo->vgroupIndex, pQueryInfo->type, taosArrayGetSize(pQueryInfo->exprList),
numOfCols, pQueryInfo->fieldsInfo.numOfOutput, pTableMetaInfo->name);
numOfCols, pQueryInfo->fieldsInfo.numOfOutput, tNameGetTableName(&pTableMetaInfo->name));
}
//prepare the subqueries object failed, abort
@ -517,22 +568,28 @@ void freeJoinSubqueryObj(SSqlObj* pSql) {
SJoinSupporter* p = pSub->param;
tscDestroyJoinSupporter(p);
if (pSub->res.code == TSDB_CODE_SUCCESS) {
taos_free_result(pSub);
}
taos_free_result(pSub);
pSql->pSubs[i] = NULL;
}
if (pSql->subState.states) {
pthread_mutex_destroy(&pSql->subState.mutex);
}
tfree(pSql->subState.states);
pSql->subState.numOfSub = 0;
}
static void quitAllSubquery(SSqlObj* pSqlObj, SJoinSupporter* pSupporter) {
assert(pSqlObj->subState.numOfRemain > 0);
if (atomic_sub_fetch_32(&pSqlObj->subState.numOfRemain, 1) <= 0) {
tscError("%p all subquery return and query failed, global code:%s", pSqlObj, tstrerror(pSqlObj->res.code));
static int32_t quitAllSubquery(SSqlObj* pSqlSub, SSqlObj* pSqlObj, SJoinSupporter* pSupporter) {
if (subAndCheckDone(pSqlSub, pSqlObj, pSupporter->subqueryIndex)) {
tscError("%p all subquery return and query failed, global code:%s", pSqlObj, tstrerror(pSqlObj->res.code));
freeJoinSubqueryObj(pSqlObj);
return 0;
}
return 1;
//tscDestroyJoinSupporter(pSupporter);
}
@ -674,7 +731,7 @@ static void issueTSCompQuery(SSqlObj* pSql, SJoinSupporter* pSupporter, SSqlObj*
"%p subquery:%p tableIndex:%d, vgroupIndex:%d, numOfVgroups:%d, type:%d, ts_comp query to retrieve timestamps, "
"numOfExpr:%" PRIzu ", colList:%" PRIzu ", numOfOutputFields:%d, name:%s",
pParent, pSql, 0, pTableMetaInfo->vgroupIndex, pTableMetaInfo->vgroupList->numOfVgroups, pQueryInfo->type,
tscSqlExprNumOfExprs(pQueryInfo), numOfCols, pQueryInfo->fieldsInfo.numOfOutput, pTableMetaInfo->name);
tscSqlExprNumOfExprs(pQueryInfo), numOfCols, pQueryInfo->fieldsInfo.numOfOutput, tNameGetTableName(&pTableMetaInfo->name));
tscProcessSql(pSql);
}
@ -777,6 +834,17 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
assert(TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_TAG_FILTER_QUERY));
if (pParentSql->res.code != TSDB_CODE_SUCCESS) {
tscError("%p abort query due to other subquery failure. code:%d, global code:%d", pSql, numOfRows, pParentSql->res.code);
if (quitAllSubquery(pSql, pParentSql, pSupporter)) {
return;
}
tscAsyncResultOnError(pParentSql);
return;
}
// check for the error code firstly
if (taos_errno(pSql) != TSDB_CODE_SUCCESS) {
// todo retry if other subqueries are not failed
@ -785,7 +853,9 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
tscError("%p sub query failed, code:%s, index:%d", pSql, tstrerror(numOfRows), pSupporter->subqueryIndex);
pParentSql->res.code = numOfRows;
quitAllSubquery(pParentSql, pSupporter);
if (quitAllSubquery(pSql, pParentSql, pSupporter)) {
return;
}
tscAsyncResultOnError(pParentSql);
return;
@ -802,7 +872,9 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
tscError("%p failed to malloc memory", pSql);
pParentSql->res.code = TAOS_SYSTEM_ERROR(errno);
quitAllSubquery(pParentSql, pSupporter);
if (quitAllSubquery(pSql, pParentSql, pSupporter)) {
return;
}
tscAsyncResultOnError(pParentSql);
return;
@ -844,9 +916,10 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
// no data exists in next vnode, mark the <tid, tags> query completed
// only when there is no subquery exits any more, proceeds to get the intersect of the <tid, tags> tuple sets.
if (atomic_sub_fetch_32(&pParentSql->subState.numOfRemain, 1) > 0) {
if (!subAndCheckDone(pSql, pParentSql, pSupporter->subqueryIndex)) {
tscDebug("%p tagRetrieve:%p,%d completed, total:%d", pParentSql, tres, pSupporter->subqueryIndex, pParentSql->subState.numOfSub);
return;
}
}
SArray *s1 = NULL, *s2 = NULL;
int32_t code = getIntersectionOfTableTuple(pQueryInfo, pParentSql, &s1, &s2);
@ -885,14 +958,16 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
tscBuildVgroupTableInfo(pParentSql, pTableMetaInfo2, s2);
SSqlObj* psub1 = pParentSql->pSubs[0];
((SJoinSupporter*)psub1->param)->pVgroupTables = tscVgroupTableInfoClone(pTableMetaInfo1->pVgroupTables);
((SJoinSupporter*)psub1->param)->pVgroupTables = tscVgroupTableInfoDup(pTableMetaInfo1->pVgroupTables);
SSqlObj* psub2 = pParentSql->pSubs[1];
((SJoinSupporter*)psub2->param)->pVgroupTables = tscVgroupTableInfoClone(pTableMetaInfo2->pVgroupTables);
((SJoinSupporter*)psub2->param)->pVgroupTables = tscVgroupTableInfoDup(pTableMetaInfo2->pVgroupTables);
pParentSql->subState.numOfSub = 2;
pParentSql->subState.numOfRemain = pParentSql->subState.numOfSub;
memset(pParentSql->subState.states, 0, sizeof(pParentSql->subState.states[0]) * pParentSql->subState.numOfSub);
tscDebug("%p reset all sub states to 0", pParentSql);
for (int32_t m = 0; m < pParentSql->subState.numOfSub; ++m) {
SSqlObj* sub = pParentSql->pSubs[m];
issueTSCompQuery(sub, sub->param, pParentSql);
@ -915,6 +990,17 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
assert(!TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_SEC_STAGE));
if (pParentSql->res.code != TSDB_CODE_SUCCESS) {
tscError("%p abort query due to other subquery failure. code:%d, global code:%d", pSql, numOfRows, pParentSql->res.code);
if (quitAllSubquery(pSql, pParentSql, pSupporter)){
return;
}
tscAsyncResultOnError(pParentSql);
return;
}
// check for the error code firstly
if (taos_errno(pSql) != TSDB_CODE_SUCCESS) {
// todo retry if other subqueries are not failed yet
@ -922,7 +1008,9 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
tscError("%p sub query failed, code:%s, index:%d", pSql, tstrerror(numOfRows), pSupporter->subqueryIndex);
pParentSql->res.code = numOfRows;
quitAllSubquery(pParentSql, pSupporter);
if (quitAllSubquery(pSql, pParentSql, pSupporter)){
return;
}
tscAsyncResultOnError(pParentSql);
return;
@ -930,14 +1018,16 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
if (numOfRows > 0) { // write the compressed timestamp to disk file
if(pSupporter->f == NULL) {
pSupporter->f = fopen(pSupporter->path, "w");
pSupporter->f = fopen(pSupporter->path, "wb");
if (pSupporter->f == NULL) {
tscError("%p failed to create tmp file:%s, reason:%s", pSql, pSupporter->path, strerror(errno));
pParentSql->res.code = TAOS_SYSTEM_ERROR(errno);
quitAllSubquery(pParentSql, pSupporter);
if (quitAllSubquery(pSql, pParentSql, pSupporter)) {
return;
}
tscAsyncResultOnError(pParentSql);
@ -955,7 +1045,9 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
pParentSql->res.code = TAOS_SYSTEM_ERROR(errno);
quitAllSubquery(pParentSql, pSupporter);
if (quitAllSubquery(pSql, pParentSql, pSupporter)){
return;
}
tscAsyncResultOnError(pParentSql);
@ -974,7 +1066,7 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
// continue to retrieve ts-comp data from vnode
if (!pRes->completed) {
taosGetTmpfilePath("ts-join", pSupporter->path);
pSupporter->f = fopen(pSupporter->path, "w");
pSupporter->f = fopen(pSupporter->path, "wb");
pRes->row = pRes->numOfRows;
taos_fetch_rows_a(tres, tsCompRetrieveCallback, param);
@ -1000,7 +1092,7 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
taosGetTmpfilePath("ts-join", pSupporter->path);
// TODO check for failure
pSupporter->f = fopen(pSupporter->path, "w");
pSupporter->f = fopen(pSupporter->path, "wb");
pRes->row = pRes->numOfRows;
// set the callback function
@ -1009,9 +1101,9 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
return;
}
if (atomic_sub_fetch_32(&pParentSql->subState.numOfRemain, 1) > 0) {
if (!subAndCheckDone(pSql, pParentSql, pSupporter->subqueryIndex)) {
return;
}
}
tscDebug("%p all subquery retrieve ts complete, do ts block intersect", pParentSql);
@ -1049,6 +1141,19 @@ static void joinRetrieveFinalResCallback(void* param, TAOS_RES* tres, int numOfR
SSqlRes* pRes = &pSql->res;
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
if (pParentSql->res.code != TSDB_CODE_SUCCESS) {
tscError("%p abort query due to other subquery failure. code:%d, global code:%d", pSql, numOfRows, pParentSql->res.code);
if (quitAllSubquery(pSql, pParentSql, pSupporter)) {
return;
}
tscAsyncResultOnError(pParentSql);
return;
}
if (taos_errno(pSql) != TSDB_CODE_SUCCESS) {
assert(numOfRows == taos_errno(pSql));
@ -1088,9 +1193,8 @@ static void joinRetrieveFinalResCallback(void* param, TAOS_RES* tres, int numOfR
}
}
assert(pState->numOfRemain > 0);
if (atomic_sub_fetch_32(&pState->numOfRemain, 1) > 0) {
tscDebug("%p sub:%p completed, remain:%d, total:%d", pParentSql, tres, pState->numOfRemain, pState->numOfSub);
if (!subAndCheckDone(pSql, pParentSql, pSupporter->subqueryIndex)) {
tscDebug("%p sub:%p,%d completed, total:%d", pParentSql, tres, pSupporter->subqueryIndex, pState->numOfSub);
return;
}
@ -1205,15 +1309,16 @@ void tscFetchDatablockForSubquery(SSqlObj* pSql) {
}
}
// get the number of subquery that need to retrieve the next vnode.
if (orderedPrjQuery) {
for (int32_t i = 0; i < pSql->subState.numOfSub; ++i) {
SSqlObj* pSub = pSql->pSubs[i];
if (pSub != NULL && pSub->res.row >= pSub->res.numOfRows && pSub->res.completed) {
pSql->subState.numOfRemain++;
subquerySetState(pSub, &pSql->subState, i, 0);
}
}
}
for (int32_t i = 0; i < pSql->subState.numOfSub; ++i) {
SSqlObj* pSub = pSql->pSubs[i];
@ -1270,7 +1375,19 @@ void tscFetchDatablockForSubquery(SSqlObj* pSql) {
// retrieve data from current vnode.
tscDebug("%p retrieve data from %d subqueries", pSql, numOfFetch);
SJoinSupporter* pSupporter = NULL;
pSql->subState.numOfRemain = numOfFetch;
for (int32_t i = 0; i < pSql->subState.numOfSub; ++i) {
SSqlObj* pSql1 = pSql->pSubs[i];
if (pSql1 == NULL) {
continue;
}
SSqlRes* pRes1 = &pSql1->res;
if (pRes1->row >= pRes1->numOfRows) {
subquerySetState(pSql1, &pSql->subState, i, 0);
}
}
for (int32_t i = 0; i < pSql->subState.numOfSub; ++i) {
SSqlObj* pSql1 = pSql->pSubs[i];
@ -1372,7 +1489,10 @@ void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code) {
// retrieve actual query results from vnode during the second stage join subquery
if (pParentSql->res.code != TSDB_CODE_SUCCESS) {
tscError("%p abort query due to other subquery failure. code:%d, global code:%d", pSql, code, pParentSql->res.code);
quitAllSubquery(pParentSql, pSupporter);
if (quitAllSubquery(pSql, pParentSql, pSupporter)) {
return;
}
tscAsyncResultOnError(pParentSql);
return;
@ -1384,7 +1504,11 @@ void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code) {
tscError("%p abort query, code:%s, global code:%s", pSql, tstrerror(code), tstrerror(pParentSql->res.code));
pParentSql->res.code = code;
quitAllSubquery(pParentSql, pSupporter);
if (quitAllSubquery(pSql, pParentSql, pSupporter)) {
return;
}
tscAsyncResultOnError(pParentSql);
return;
@ -1408,9 +1532,9 @@ void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code) {
// In case of consequence query from other vnode, do not wait for other query response here.
if (!(pTableMetaInfo->vgroupIndex > 0 && tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0))) {
if (atomic_sub_fetch_32(&pParentSql->subState.numOfRemain, 1) > 0) {
if (!subAndCheckDone(pSql, pParentSql, pSupporter->subqueryIndex)) {
return;
}
}
}
tscSetupOutputColumnIndex(pParentSql);
@ -1422,6 +1546,7 @@ void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code) {
if (pTableMetaInfo->vgroupIndex > 0 && tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0)) {
pSql->fp = joinRetrieveFinalResCallback; // continue retrieve data
pSql->cmd.command = TSDB_SQL_FETCH;
tscProcessSql(pSql);
} else { // first retrieve from vnode during the secondary stage sub-query
// set the command flag must be after the semaphore been correctly set.
@ -1457,8 +1582,7 @@ int32_t tscCreateJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
pSql->pSubs[pSql->subState.numOfRemain++] = pNew;
assert(pSql->subState.numOfRemain <= pSql->subState.numOfSub);
pSql->pSubs[tableIndex] = pNew;
if (QUERY_IS_JOIN_QUERY(pQueryInfo->type)) {
addGroupInfoForSubquery(pSql, pNew, 0, tableIndex);
@ -1534,7 +1658,7 @@ int32_t tscCreateJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter
"%p subquery:%p tableIndex:%d, vgroupIndex:%d, type:%d, transfer to tid_tag query to retrieve (tableId, tags), "
"exprInfo:%" PRIzu ", colList:%" PRIzu ", fieldsInfo:%d, tagIndex:%d, name:%s",
pSql, pNew, tableIndex, pTableMetaInfo->vgroupIndex, pNewQueryInfo->type, tscSqlExprNumOfExprs(pNewQueryInfo),
numOfCols, pNewQueryInfo->fieldsInfo.numOfOutput, colIndex.columnIndex, pNewQueryInfo->pTableMetaInfo[0]->name);
numOfCols, pNewQueryInfo->fieldsInfo.numOfOutput, colIndex.columnIndex, tNameGetTableName(&pNewQueryInfo->pTableMetaInfo[0]->name));
} else {
SSchema colSchema = {.type = TSDB_DATA_TYPE_BINARY, .bytes = 1};
SColumnIndex colIndex = {0, PRIMARYKEY_TIMESTAMP_COL_INDEX};
@ -1569,7 +1693,7 @@ int32_t tscCreateJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter
"%p subquery:%p tableIndex:%d, vgroupIndex:%d, type:%u, transfer to ts_comp query to retrieve timestamps, "
"exprInfo:%" PRIzu ", colList:%" PRIzu ", fieldsInfo:%d, name:%s",
pSql, pNew, tableIndex, pTableMetaInfo->vgroupIndex, pNewQueryInfo->type, tscSqlExprNumOfExprs(pNewQueryInfo),
numOfCols, pNewQueryInfo->fieldsInfo.numOfOutput, pNewQueryInfo->pTableMetaInfo[0]->name);
numOfCols, pNewQueryInfo->fieldsInfo.numOfOutput, tNameGetTableName(&pNewQueryInfo->pTableMetaInfo[0]->name));
}
} else {
assert(0);
@ -1590,6 +1714,19 @@ void tscHandleMasterJoinQuery(SSqlObj* pSql) {
int32_t code = TSDB_CODE_SUCCESS;
pSql->subState.numOfSub = pQueryInfo->numOfTables;
if (pSql->subState.states == NULL) {
pSql->subState.states = calloc(pSql->subState.numOfSub, sizeof(*pSql->subState.states));
if (pSql->subState.states == NULL) {
code = TSDB_CODE_TSC_OUT_OF_MEMORY;
goto _error;
}
pthread_mutex_init(&pSql->subState.mutex, NULL);
}
memset(pSql->subState.states, 0, sizeof(*pSql->subState.states) * pSql->subState.numOfSub);
tscDebug("%p reset all sub states to 0", pSql);
bool hasEmptySub = false;
tscDebug("%p start subquery, total:%d", pSql, pQueryInfo->numOfTables);
@ -1622,12 +1759,23 @@ void tscHandleMasterJoinQuery(SSqlObj* pSql) {
pSql->cmd.command = TSDB_SQL_RETRIEVE_EMPTY_RESULT;
(*pSql->fp)(pSql->param, pSql, 0);
} else {
int fail = 0;
for (int32_t i = 0; i < pSql->subState.numOfSub; ++i) {
SSqlObj* pSub = pSql->pSubs[i];
if ((code = tscProcessSql(pSub)) != TSDB_CODE_SUCCESS) {
pSql->subState.numOfRemain = i - 1; // the already sent request will continue and do not go to the error process routine
break;
if (fail) {
(*pSub->fp)(pSub->param, pSub, 0);
continue;
}
if ((code = tscProcessSql(pSub)) != TSDB_CODE_SUCCESS) {
pRes->code = code;
(*pSub->fp)(pSub->param, pSub, 0);
fail = 1;
}
}
if(fail) {
return;
}
pSql->cmd.command = TSDB_SQL_TABLE_JOIN_RETRIEVE;
@ -1728,7 +1876,21 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
return ret;
}
pState->numOfRemain = pState->numOfSub;
if (pState->states == NULL) {
pState->states = calloc(pState->numOfSub, sizeof(*pState->states));
if (pState->states == NULL) {
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
tscAsyncResultOnError(pSql);
tfree(pMemoryBuf);
return ret;
}
pthread_mutex_init(&pState->mutex, NULL);
}
memset(pState->states, 0, sizeof(*pState->states) * pState->numOfSub);
tscDebug("%p reset all sub states to 0", pSql);
pRes->code = TSDB_CODE_SUCCESS;
int32_t i = 0;
@ -1829,7 +1991,22 @@ static void tscAbortFurtherRetryRetrieval(SRetrieveSupport *trsupport, TAOS_RES
* current query failed, and the retry count is less than the available
* count, retry query clear previous retrieved data, then launch a new sub query
*/
static int32_t tscReissueSubquery(SRetrieveSupport *trsupport, SSqlObj *pSql, int32_t code) {
static int32_t tscReissueSubquery(SRetrieveSupport *oriTrs, SSqlObj *pSql, int32_t code) {
SRetrieveSupport *trsupport = malloc(sizeof(SRetrieveSupport));
if (trsupport == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
memcpy(trsupport, oriTrs, sizeof(*trsupport));
const uint32_t nBufferSize = (1u << 16u); // 64KB
trsupport->localBuffer = (tFilePage *)calloc(1, nBufferSize + sizeof(tFilePage));
if (trsupport->localBuffer == NULL) {
tscError("%p failed to malloc buffer for local buffer, reason:%s", pSql, strerror(errno));
tfree(trsupport);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
SSqlObj *pParentSql = trsupport->pParentSql;
int32_t subqueryIndex = trsupport->subqueryIndex;
@ -1877,7 +2054,6 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO
assert(pSql != NULL);
SSubqueryState* pState = &pParentSql->subState;
assert(pState->numOfRemain <= pState->numOfSub && pState->numOfRemain >= 0);
// retrieved in subquery failed. OR query cancelled in retrieve phase.
if (taos_errno(pSql) == TSDB_CODE_SUCCESS && pParentSql->res.code != TSDB_CODE_SUCCESS) {
@ -1908,14 +2084,12 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO
}
}
int32_t remain = -1;
if ((remain = atomic_sub_fetch_32(&pState->numOfRemain, 1)) > 0) {
tscDebug("%p sub:%p orderOfSub:%d freed, finished subqueries:%d", pParentSql, pSql, trsupport->subqueryIndex,
pState->numOfSub - remain);
if (!subAndCheckDone(pSql, pParentSql, subqueryIndex)) {
tscDebug("%p sub:%p,%d freed, not finished, total:%d", pParentSql, pSql, trsupport->subqueryIndex, pState->numOfSub);
tscFreeRetrieveSup(pSql);
return;
}
}
// all subqueries are failed
tscError("%p retrieve from %d vnode(s) completed,code:%s.FAILED.", pParentSql, pState->numOfSub,
@ -1980,14 +2154,12 @@ static void tscAllDataRetrievedFromDnode(SRetrieveSupport *trsupport, SSqlObj* p
return;
}
int32_t remain = -1;
if ((remain = atomic_sub_fetch_32(&pParentSql->subState.numOfRemain, 1)) > 0) {
tscDebug("%p sub:%p orderOfSub:%d freed, finished subqueries:%d", pParentSql, pSql, trsupport->subqueryIndex,
pState->numOfSub - remain);
if (!subAndCheckDone(pSql, pParentSql, idx)) {
tscDebug("%p sub:%p orderOfSub:%d freed, not finished", pParentSql, pSql, trsupport->subqueryIndex);
tscFreeRetrieveSup(pSql);
return;
}
}
// all sub-queries are returned, start to local merge process
pDesc->pColumnModel->capacity = trsupport->pExtMemBuffer[idx]->numOfElemsPerPage;
@ -1998,7 +2170,7 @@ static void tscAllDataRetrievedFromDnode(SRetrieveSupport *trsupport, SSqlObj* p
SQueryInfo *pPQueryInfo = tscGetQueryInfoDetail(&pParentSql->cmd, 0);
tscClearInterpInfo(pPQueryInfo);
tscCreateLocalReducer(trsupport->pExtMemBuffer, pState->numOfSub, pDesc, trsupport->pFinalColModel, trsupport->pFFColModel, pParentSql);
tscCreateLocalMerger(trsupport->pExtMemBuffer, pState->numOfSub, pDesc, trsupport->pFinalColModel, trsupport->pFFColModel, pParentSql);
tscDebug("%p build loser tree completed", pParentSql);
pParentSql->res.precision = pSql->res.precision;
@ -2033,7 +2205,6 @@ static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfR
SSqlObj * pParentSql = trsupport->pParentSql;
SSubqueryState* pState = &pParentSql->subState;
assert(pState->numOfRemain <= pState->numOfSub && pState->numOfRemain >= 0);
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0);
SVgroupInfo *pVgroup = &pTableMetaInfo->vgroupList->vgroups[0];
@ -2254,7 +2425,8 @@ static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows)
}
}
if (atomic_sub_fetch_32(&pParentObj->subState.numOfRemain, 1) > 0) {
if (!subAndCheckDone(tres, pParentObj, pSupporter->index)) {
tscDebug("%p insert:%p,%d completed, total:%d", pParentObj, tres, pSupporter->index, pParentObj->subState.numOfSub);
return;
}
@ -2286,7 +2458,9 @@ static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows)
tscFreeQueryInfo(&pSql->cmd);
SQueryInfo* pQueryInfo = tscGetQueryInfoDetailSafely(&pSql->cmd, 0);
STableMetaInfo* pMasterTableMetaInfo = tscGetTableMetaInfoFromCmd(&pParentObj->cmd, pSql->cmd.clauseIndex, 0);
tscAddTableMetaInfo(pQueryInfo, pMasterTableMetaInfo->name, NULL, NULL, NULL, NULL);
tscAddTableMetaInfo(pQueryInfo, &pMasterTableMetaInfo->name, NULL, NULL, NULL, NULL);
subquerySetState(pSql, &pParentObj->subState, i, 0);
tscDebug("%p, failed sub:%d, %p", pParentObj, i, pSql);
}
@ -2297,14 +2471,14 @@ static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows)
tscDebug("%p cleanup %d tableMeta in hashTable", pParentObj, pParentObj->cmd.numOfTables);
for(int32_t i = 0; i < pParentObj->cmd.numOfTables; ++i) {
char* name = pParentObj->cmd.pTableNameList[i];
char name[TSDB_TABLE_FNAME_LEN] = {0};
tNameExtractFullName(pParentObj->cmd.pTableNameList[i], name);
taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
}
pParentObj->cmd.parseFinished = false;
pParentObj->subState.numOfRemain = numOfFailed;
tscResetSqlCmdObj(&pParentObj->cmd);
tscResetSqlCmd(&pParentObj->cmd, false);
// in case of insert, redo parsing the sql string and build new submit data block for two reasons:
// 1. the table Id(tid & uid) may have been update, the submit block needs to be updated accordingly.
@ -2378,7 +2552,19 @@ int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) {
// the number of already initialized subqueries
int32_t numOfSub = 0;
pSql->subState.numOfRemain = pSql->subState.numOfSub;
if (pSql->subState.states == NULL) {
pSql->subState.states = calloc(pSql->subState.numOfSub, sizeof(*pSql->subState.states));
if (pSql->subState.states == NULL) {
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
goto _error;
}
pthread_mutex_init(&pSql->subState.mutex, NULL);
}
memset(pSql->subState.states, 0, sizeof(*pSql->subState.states) * pSql->subState.numOfSub);
tscDebug("%p reset all sub states to 0", pSql);
pSql->pSubs = calloc(pSql->subState.numOfSub, POINTER_BYTES);
if (pSql->pSubs == NULL) {
goto _error;

View File

@ -16,7 +16,7 @@
#include "tscUtil.h"
#include "hash.h"
#include "os.h"
#include "qAst.h"
#include "texpr.h"
#include "taosmsg.h"
#include "tkey.h"
#include "tmd5.h"
@ -89,21 +89,6 @@ bool tscQueryTags(SQueryInfo* pQueryInfo) {
return true;
}
// todo refactor, extract methods and move the common module
void tscGetDBInfoFromTableFullName(char* tableId, char* db) {
char* st = strstr(tableId, TS_PATH_DELIMITER);
if (st != NULL) {
char* end = strstr(st + 1, TS_PATH_DELIMITER);
if (end != NULL) {
memcpy(db, tableId, (end - tableId));
db[end - tableId] = 0;
return;
}
}
db[0] = 0;
}
bool tscIsTwoStageSTableQuery(SQueryInfo* pQueryInfo, int32_t tableIndex) {
if (pQueryInfo == NULL) {
return false;
@ -396,7 +381,7 @@ void tscFreeQueryInfo(SSqlCmd* pCmd) {
tfree(pCmd->pQueryInfo);
}
void tscResetSqlCmdObj(SSqlCmd* pCmd) {
void tscResetSqlCmd(SSqlCmd* pCmd, bool removeMeta) {
pCmd->command = 0;
pCmd->numOfCols = 0;
pCmd->count = 0;
@ -414,13 +399,13 @@ void tscResetSqlCmdObj(SSqlCmd* pCmd) {
pCmd->numOfTables = 0;
tfree(pCmd->pTableNameList);
pCmd->pTableBlockHashList = tscDestroyBlockHashTable(pCmd->pTableBlockHashList);
pCmd->pTableBlockHashList = tscDestroyBlockHashTable(pCmd->pTableBlockHashList, removeMeta);
pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks);
tscFreeQueryInfo(pCmd);
}
void tscFreeSqlResult(SSqlObj* pSql) {
tscDestroyLocalReducer(pSql);
tscDestroyLocalMerger(pSql);
SSqlRes* pRes = &pSql->res;
tscDestroyResPointerInfo(pRes);
@ -441,6 +426,12 @@ static void tscFreeSubobj(SSqlObj* pSql) {
pSql->pSubs[i] = NULL;
}
if (pSql->subState.states) {
pthread_mutex_destroy(&pSql->subState.mutex);
}
tfree(pSql->subState.states);
pSql->subState.numOfSub = 0;
}
@ -510,7 +501,7 @@ void tscFreeSqlObj(SSqlObj* pSql) {
pSql->self = 0;
tscFreeSqlResult(pSql);
tscResetSqlCmdObj(pCmd);
tscResetSqlCmd(pCmd, false);
tfree(pCmd->tagData.data);
pCmd->tagData.dataLen = 0;
@ -524,7 +515,7 @@ void tscFreeSqlObj(SSqlObj* pSql) {
free(pSql);
}
void tscDestroyDataBlock(STableDataBlocks* pDataBlock) {
void tscDestroyDataBlock(STableDataBlocks* pDataBlock, bool removeMeta) {
if (pDataBlock == NULL) {
return;
}
@ -537,6 +528,13 @@ void tscDestroyDataBlock(STableDataBlocks* pDataBlock) {
tfree(pDataBlock->pTableMeta);
}
if (removeMeta) {
char name[TSDB_TABLE_FNAME_LEN] = {0};
tNameExtractFullName(&pDataBlock->tableName, name);
taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
}
tfree(pDataBlock);
}
@ -572,21 +570,21 @@ void* tscDestroyBlockArrayList(SArray* pDataBlockList) {
size_t size = taosArrayGetSize(pDataBlockList);
for (int32_t i = 0; i < size; i++) {
void* d = taosArrayGetP(pDataBlockList, i);
tscDestroyDataBlock(d);
tscDestroyDataBlock(d, false);
}
taosArrayDestroy(pDataBlockList);
return NULL;
}
void* tscDestroyBlockHashTable(SHashObj* pBlockHashTable) {
void* tscDestroyBlockHashTable(SHashObj* pBlockHashTable, bool removeMeta) {
if (pBlockHashTable == NULL) {
return NULL;
}
STableDataBlocks** p = taosHashIterate(pBlockHashTable, NULL);
while(p) {
tscDestroyDataBlock(*p);
tscDestroyDataBlock(*p, removeMeta);
p = taosHashIterate(pBlockHashTable, p);
}
@ -606,15 +604,13 @@ int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock) {
// todo refactor
// set the correct table meta object, the table meta has been locked in pDataBlocks, so it must be in the cache
if (pTableMetaInfo->pTableMeta != pDataBlock->pTableMeta) {
tstrncpy(pTableMetaInfo->name, pDataBlock->tableName, sizeof(pTableMetaInfo->name));
tNameAssign(&pTableMetaInfo->name, &pDataBlock->tableName);
if (pTableMetaInfo->pTableMeta != NULL) {
tfree(pTableMetaInfo->pTableMeta);
}
pTableMetaInfo->pTableMeta = tscTableMetaClone(pDataBlock->pTableMeta);
} else {
assert(strncmp(pTableMetaInfo->name, pDataBlock->tableName, tListLen(pDataBlock->tableName)) == 0);
pTableMetaInfo->pTableMeta = tscTableMetaDup(pDataBlock->pTableMeta);
}
/*
@ -649,7 +645,7 @@ int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock) {
* @param dataBlocks
* @return
*/
int32_t tscCreateDataBlock(size_t initialSize, int32_t rowSize, int32_t startOffset, const char* name,
int32_t tscCreateDataBlock(size_t initialSize, int32_t rowSize, int32_t startOffset, SName* name,
STableMeta* pTableMeta, STableDataBlocks** dataBlocks) {
STableDataBlocks* dataBuf = (STableDataBlocks*)calloc(1, sizeof(STableDataBlocks));
if (dataBuf == NULL) {
@ -677,18 +673,18 @@ int32_t tscCreateDataBlock(size_t initialSize, int32_t rowSize, int32_t startOff
dataBuf->size = startOffset;
dataBuf->tsSource = -1;
tstrncpy(dataBuf->tableName, name, sizeof(dataBuf->tableName));
tNameAssign(&dataBuf->tableName, name);
//Here we keep the tableMeta to avoid it to be remove by other threads.
dataBuf->pTableMeta = tscTableMetaClone(pTableMeta);
dataBuf->pTableMeta = tscTableMetaDup(pTableMeta);
assert(initialSize > 0 && pTableMeta != NULL && dataBuf->pTableMeta != NULL);
*dataBlocks = dataBuf;
return TSDB_CODE_SUCCESS;
}
int32_t tscGetDataBlockFromList(SHashObj* pHashList, int64_t id, int32_t size, int32_t startOffset, int32_t rowSize, const char* tableId, STableMeta* pTableMeta,
STableDataBlocks** dataBlocks, SArray* pBlockList) {
int32_t tscGetDataBlockFromList(SHashObj* pHashList, int64_t id, int32_t size, int32_t startOffset, int32_t rowSize,
SName* name, STableMeta* pTableMeta, STableDataBlocks** dataBlocks, SArray* pBlockList) {
*dataBlocks = NULL;
STableDataBlocks** t1 = (STableDataBlocks**)taosHashGet(pHashList, (const char*)&id, sizeof(id));
if (t1 != NULL) {
@ -696,7 +692,7 @@ int32_t tscGetDataBlockFromList(SHashObj* pHashList, int64_t id, int32_t size, i
}
if (*dataBlocks == NULL) {
int32_t ret = tscCreateDataBlock((size_t)size, rowSize, startOffset, tableId, pTableMeta, dataBlocks);
int32_t ret = tscCreateDataBlock((size_t)size, rowSize, startOffset, name, pTableMeta, dataBlocks);
if (ret != TSDB_CODE_SUCCESS) {
return ret;
}
@ -797,12 +793,12 @@ static void extractTableNameList(SSqlCmd* pCmd, bool freeBlockMap) {
int32_t i = 0;
while(p1) {
STableDataBlocks* pBlocks = *p1;
pCmd->pTableNameList[i++] = strndup(pBlocks->tableName, TSDB_TABLE_FNAME_LEN);
pCmd->pTableNameList[i++] = tNameDup(&pBlocks->tableName);
p1 = taosHashIterate(pCmd->pTableBlockHashList, p1);
}
if (freeBlockMap) {
pCmd->pTableBlockHashList = tscDestroyBlockHashTable(pCmd->pTableBlockHashList);
pCmd->pTableBlockHashList = tscDestroyBlockHashTable(pCmd->pTableBlockHashList, false);
}
}
@ -822,7 +818,7 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, bool freeBlockMap) {
STableDataBlocks* dataBuf = NULL;
int32_t ret = tscGetDataBlockFromList(pVnodeDataBlockHashList, pOneTableBlock->vgId, TSDB_PAYLOAD_SIZE,
INSERT_HEAD_SIZE, 0, pOneTableBlock->tableName, pOneTableBlock->pTableMeta, &dataBuf, pVnodeDataBlockList);
INSERT_HEAD_SIZE, 0, &pOneTableBlock->tableName, pOneTableBlock->pTableMeta, &dataBuf, pVnodeDataBlockList);
if (ret != TSDB_CODE_SUCCESS) {
tscError("%p failed to prepare the data block buffer for merging table data, code:%d", pSql, ret);
taosHashCleanup(pVnodeDataBlockHashList);
@ -855,8 +851,8 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, bool freeBlockMap) {
tscSortRemoveDataBlockDupRows(pOneTableBlock);
char* ekey = (char*)pBlocks->data + pOneTableBlock->rowSize*(pBlocks->numOfRows-1);
tscDebug("%p name:%s, sid:%d rows:%d sversion:%d skey:%" PRId64 ", ekey:%" PRId64, pSql, pOneTableBlock->tableName,
tscDebug("%p name:%s, name:%d rows:%d sversion:%d skey:%" PRId64 ", ekey:%" PRId64, pSql, tNameGetTableName(&pOneTableBlock->tableName),
pBlocks->tid, pBlocks->numOfRows, pBlocks->sversion, GET_INT64_VAL(pBlocks->data), GET_INT64_VAL(ekey));
int32_t len = pBlocks->numOfRows * (pOneTableBlock->rowSize + expandSize) + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta);
@ -1058,7 +1054,7 @@ void tscFieldInfoClear(SFieldInfo* pFieldInfo) {
SInternalField* pInfo = taosArrayGet(pFieldInfo->internalField, i);
if (pInfo->pArithExprInfo != NULL) {
tExprTreeDestroy(&pInfo->pArithExprInfo->pExpr, NULL);
tExprTreeDestroy(pInfo->pArithExprInfo->pExpr, NULL);
SSqlFuncMsg* pFuncMsg = &pInfo->pArithExprInfo->base;
for(int32_t j = 0; j < pFuncMsg->numOfParams; ++j) {
@ -1091,6 +1087,8 @@ static SSqlExpr* doBuildSqlExpr(SQueryInfo* pQueryInfo, int16_t functionId, SCol
// set the correct columnIndex index
if (pColIndex->columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
pExpr->colInfo.colId = TSDB_TBNAME_COLUMN_INDEX;
} else if (pColIndex->columnIndex == TSDB_BLOCK_DIST_COLUMN_INDEX) {
pExpr->colInfo.colId = TSDB_BLOCK_DIST_COLUMN_INDEX;
} else if (pColIndex->columnIndex <= TSDB_UD_COLUMN_INDEX) {
pExpr->colInfo.colId = pColIndex->columnIndex;
} else {
@ -1304,7 +1302,7 @@ SColumn* tscColumnClone(const SColumn* src) {
dst->colIndex = src->colIndex;
dst->numOfFilters = src->numOfFilters;
dst->filterInfo = tscFilterInfoClone(src->filterInfo, src->numOfFilters);
dst->filterInfo = tFilterInfoDup(src->filterInfo, src->numOfFilters);
return dst;
}
@ -1507,7 +1505,7 @@ bool tscValidateColumnId(STableMetaInfo* pTableMetaInfo, int32_t colId, int32_t
return false;
}
if (colId == TSDB_TBNAME_COLUMN_INDEX || (colId <= TSDB_UD_COLUMN_INDEX && numOfParams == 2)) {
if (colId == TSDB_TBNAME_COLUMN_INDEX || colId == TSDB_BLOCK_DIST_COLUMN_INDEX || (colId <= TSDB_UD_COLUMN_INDEX && numOfParams == 2)) {
return true;
}
@ -1810,10 +1808,10 @@ void tscVgroupTableCopy(SVgroupTableInfo* info, SVgroupTableInfo* pInfo) {
info->vgInfo.epAddr[j].fqdn = strdup(pInfo->vgInfo.epAddr[j].fqdn);
}
info->itemList = taosArrayClone(pInfo->itemList);
info->itemList = taosArrayDup(pInfo->itemList);
}
SArray* tscVgroupTableInfoClone(SArray* pVgroupTables) {
SArray* tscVgroupTableInfoDup(SArray* pVgroupTables) {
if (pVgroupTables == NULL) {
return NULL;
}
@ -1844,7 +1842,7 @@ void clearAllTableMetaInfo(SQueryInfo* pQueryInfo) {
tfree(pQueryInfo->pTableMetaInfo);
}
STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, const char* name, STableMeta* pTableMeta,
STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, SName* name, STableMeta* pTableMeta,
SVgroupsInfo* vgroupList, SArray* pTagCols, SArray* pVgroupTables) {
void* pAlloc = realloc(pQueryInfo->pTableMetaInfo, (pQueryInfo->numOfTables + 1) * POINTER_BYTES);
if (pAlloc == NULL) {
@ -1862,7 +1860,7 @@ STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, const char* name, ST
pQueryInfo->pTableMetaInfo[pQueryInfo->numOfTables] = pTableMetaInfo;
if (name != NULL) {
tstrncpy(pTableMetaInfo->name, name, sizeof(pTableMetaInfo->name));
tNameAssign(&pTableMetaInfo->name, name);
}
pTableMetaInfo->pTableMeta = pTableMeta;
@ -1881,7 +1879,7 @@ STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, const char* name, ST
tscColumnListCopy(pTableMetaInfo->tagColList, pTagCols, -1);
}
pTableMetaInfo->pVgroupTables = tscVgroupTableInfoClone(pVgroupTables);
pTableMetaInfo->pVgroupTables = tscVgroupTableInfoDup(pVgroupTables);
pQueryInfo->numOfTables += 1;
return pTableMetaInfo;
@ -1944,6 +1942,10 @@ SSqlObj* createSimpleSubObj(SSqlObj* pSql, __async_cb_func_t fp, void* param, in
}
if (tscAddSubqueryInfo(pCmd) != TSDB_CODE_SUCCESS) {
#ifdef __APPLE__
// to satisfy later tsem_destroy in taos_free_result
tsem_init(&pNew->rspSem, 0, 0);
#endif // __APPLE__
tscFreeSqlObj(pNew);
return NULL;
}
@ -1959,7 +1961,7 @@ SSqlObj* createSimpleSubObj(SSqlObj* pSql, __async_cb_func_t fp, void* param, in
assert(pSql->cmd.clauseIndex == 0);
STableMetaInfo* pMasterTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, pSql->cmd.clauseIndex, 0);
tscAddTableMetaInfo(pQueryInfo, pMasterTableMetaInfo->name, NULL, NULL, NULL, NULL);
tscAddTableMetaInfo(pQueryInfo, &pMasterTableMetaInfo->name, NULL, NULL, NULL, NULL);
registerSqlObj(pNew);
return pNew;
@ -2064,7 +2066,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
pNewQueryInfo->groupbyExpr = pQueryInfo->groupbyExpr;
if (pQueryInfo->groupbyExpr.columnInfo != NULL) {
pNewQueryInfo->groupbyExpr.columnInfo = taosArrayClone(pQueryInfo->groupbyExpr.columnInfo);
pNewQueryInfo->groupbyExpr.columnInfo = taosArrayDup(pQueryInfo->groupbyExpr.columnInfo);
if (pNewQueryInfo->groupbyExpr.columnInfo == NULL) {
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
goto _error;
@ -2115,27 +2117,30 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
pNew->param = param;
pNew->maxRetry = TSDB_MAX_REPLICA;
char* name = pTableMetaInfo->name;
STableMetaInfo* pFinalInfo = NULL;
if (pPrevSql == NULL) {
STableMeta* pTableMeta = tscTableMetaClone(pTableMetaInfo->pTableMeta);
STableMeta* pTableMeta = tscTableMetaDup(pTableMetaInfo->pTableMeta);
assert(pTableMeta != NULL);
pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, name, pTableMeta, pTableMetaInfo->vgroupList,
pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, &pTableMetaInfo->name, pTableMeta, pTableMetaInfo->vgroupList,
pTableMetaInfo->tagColList, pTableMetaInfo->pVgroupTables);
} else { // transfer the ownership of pTableMeta to the newly create sql object.
STableMetaInfo* pPrevInfo = tscGetTableMetaInfoFromCmd(&pPrevSql->cmd, pPrevSql->cmd.clauseIndex, 0);
STableMeta* pPrevTableMeta = tscTableMetaClone(pPrevInfo->pTableMeta);
if (pPrevInfo->pTableMeta && pPrevInfo->pTableMeta->tableType < 0) {
terrno = TSDB_CODE_TSC_APP_ERROR;
goto _error;
}
STableMeta* pPrevTableMeta = tscTableMetaDup(pPrevInfo->pTableMeta);
SVgroupsInfo* pVgroupsInfo = pPrevInfo->vgroupList;
pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, name, pPrevTableMeta, pVgroupsInfo, pTableMetaInfo->tagColList,
pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, &pTableMetaInfo->name, pPrevTableMeta, pVgroupsInfo, pTableMetaInfo->tagColList,
pTableMetaInfo->pVgroupTables);
}
// this case cannot be happened
if (pFinalInfo->pTableMeta == NULL) {
tscError("%p new subquery failed since no tableMeta, name:%s", pSql, name);
tscError("%p new subquery failed since no tableMeta, name:%s", pSql, tNameGetTableName(&pTableMetaInfo->name));
if (pPrevSql != NULL) { // pass the previous error to client
assert(pPrevSql->res.code != TSDB_CODE_SUCCESS);
@ -2160,7 +2165,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
"%p new subquery:%p, tableIndex:%d, vgroupIndex:%d, type:%d, exprInfo:%" PRIzu ", colList:%" PRIzu ","
"fieldInfo:%d, name:%s, qrang:%" PRId64 " - %" PRId64 " order:%d, limit:%" PRId64,
pSql, pNew, tableIndex, pTableMetaInfo->vgroupIndex, pNewQueryInfo->type, tscSqlExprNumOfExprs(pNewQueryInfo),
size, pNewQueryInfo->fieldsInfo.numOfOutput, pFinalInfo->name, pNewQueryInfo->window.skey,
size, pNewQueryInfo->fieldsInfo.numOfOutput, tNameGetTableName(&pFinalInfo->name), pNewQueryInfo->window.skey,
pNewQueryInfo->window.ekey, pNewQueryInfo->order.order, pNewQueryInfo->limit.limit);
tscPrintSelectClause(pNew, 0);
@ -2197,7 +2202,7 @@ void tscDoQuery(SSqlObj* pSql) {
}
if (pCmd->dataSourceType == DATA_FROM_DATA_FILE) {
tscProcessMultiVnodesImportFromFile(pSql);
tscImportDataFromFile(pSql);
} else {
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
uint16_t type = pQueryInfo->type;
@ -2297,7 +2302,6 @@ int32_t tscSQLSyntaxErrMsg(char* msg, const char* additionalInfo, const char* s
}
return TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
}
int32_t tscInvalidSQLErrMsg(char* msg, const char* additionalInfo, const char* sql) {
@ -2512,7 +2516,11 @@ bool tscSetSqlOwner(SSqlObj* pSql) {
SSqlRes* pRes = &pSql->res;
// set the sql object owner
#ifdef __APPLE__
pthread_t threadId = (pthread_t)taosGetSelfPthreadId();
#else // __APPLE__
uint64_t threadId = taosGetSelfPthreadId();
#endif // __APPLE__
if (atomic_val_compare_exchange_64(&pSql->owner, 0, threadId) != 0) {
pRes->code = TSDB_CODE_QRY_IN_EXEC;
return false;
@ -2695,7 +2703,7 @@ uint32_t tscGetTableMetaMaxSize() {
return sizeof(STableMeta) + TSDB_MAX_COLUMNS * sizeof(SSchema);
}
STableMeta* tscTableMetaClone(STableMeta* pTableMeta) {
STableMeta* tscTableMetaDup(STableMeta* pTableMeta) {
assert(pTableMeta != NULL);
uint32_t size = tscGetTableMetaSize(pTableMeta);
STableMeta* p = calloc(1, size);

View File

@ -27,23 +27,23 @@
extern "C" {
#endif
#define STR_TO_VARSTR(x, str) \
do { \
VarDataLenT __len = strlen(str); \
*(VarDataLenT *)(x) = __len; \
memcpy(varDataVal(x), (str), __len); \
#define STR_TO_VARSTR(x, str) \
do { \
VarDataLenT __len = (VarDataLenT)strlen(str); \
*(VarDataLenT *)(x) = __len; \
memcpy(varDataVal(x), (str), __len); \
} while (0);
#define STR_WITH_MAXSIZE_TO_VARSTR(x, str, _maxs) \
do { \
#define STR_WITH_MAXSIZE_TO_VARSTR(x, str, _maxs) \
do { \
char *_e = stpncpy(varDataVal(x), (str), (_maxs)-VARSTR_HEADER_SIZE); \
varDataSetLen(x, (_e - (x)-VARSTR_HEADER_SIZE)); \
varDataSetLen(x, (_e - (x)-VARSTR_HEADER_SIZE)); \
} while (0)
#define STR_WITH_SIZE_TO_VARSTR(x, str, _size) \
do { \
*(VarDataLenT *)(x) = (_size); \
memcpy(varDataVal(x), (str), (_size)); \
#define STR_WITH_SIZE_TO_VARSTR(x, str, _size) \
do { \
*(VarDataLenT *)(x) = (VarDataLenT)(_size); \
memcpy(varDataVal(x), (str), (_size)); \
} while (0);
// ----------------- TSDB COLUMN DEFINITION
@ -156,7 +156,7 @@ static FORCE_INLINE int tkeyComparFn(const void *tkey1, const void *tkey2) {
* +----------+----------+---------------------------------+---------------------------------+
* | len | sversion | First part | Second part |
* +----------+----------+---------------------------------+---------------------------------+
*
*
* NOTE: timestamp in this row structure is TKEY instead of TSKEY
*/
typedef void *SDataRow;

View File

@ -31,6 +31,15 @@ extern "C" {
struct tExprNode;
struct SSchema;
#define QUERY_COND_REL_PREFIX_IN "IN|"
#define QUERY_COND_REL_PREFIX_LIKE "LIKE|"
#define QUERY_COND_REL_PREFIX_IN_LEN 3
#define QUERY_COND_REL_PREFIX_LIKE_LEN 5
typedef bool (*__result_filter_fn_t)(const void *, void *);
typedef void (*__do_filter_suppl_fn_t)(void *, void *);
enum {
TSQL_NODE_DUMMY = 0x0,
TSQL_NODE_EXPR = 0x1,
@ -38,9 +47,6 @@ enum {
TSQL_NODE_VALUE = 0x4,
};
typedef bool (*__result_filter_fn_t)(const void *, void *);
typedef void (*__do_filter_suppl_fn_t)(void *, void *);
/**
* this structure is used to filter data in tags, so the offset of filtered tag column in tagdata string is required
*/
@ -52,12 +58,6 @@ typedef struct tQueryInfo {
bool indexed; // indexed columns
} tQueryInfo;
typedef struct SExprTraverseSupp {
__result_filter_fn_t nodeFilterFn;
__do_filter_suppl_fn_t setupInfoFn;
void * pExtInfo;
} SExprTraverseSupp;
typedef struct tExprNode {
uint8_t nodeType;
union {
@ -65,7 +65,7 @@ typedef struct tExprNode {
uint8_t optr; // filter operator
uint8_t hasPK; // 0: do not contain primary filter, 1: contain
void * info; // support filter operation on this expression only available for leaf node
struct tExprNode *pLeft; // left child pointer
struct tExprNode *pRight; // right child pointer
} _node;
@ -74,19 +74,27 @@ typedef struct tExprNode {
};
} tExprNode;
void arithmeticTreeTraverse(tExprNode *pExprs, int32_t numOfRows, char *pOutput, void *param, int32_t order,
char *(*cb)(void *, const char*, int32_t));
typedef struct SExprTraverseSupp {
__result_filter_fn_t nodeFilterFn;
__do_filter_suppl_fn_t setupInfoFn;
void * pExtInfo;
} SExprTraverseSupp;
void tExprTreeDestroy(tExprNode *pNode, void (*fp)(void *));
tExprNode* exprTreeFromBinary(const void* data, size_t size);
tExprNode* exprTreeFromTableName(const char* tbnameCond);
void exprTreeToBinary(SBufferWriter* bw, tExprNode* pExprTree);
void tExprNodeDestroy(tExprNode *pNode, void (*fp)(void *));
void tExprTreeDestroy(tExprNode **pExprs, void (*fp)(void*));
bool exprTreeApplayFilter(tExprNode *pExpr, const void *pItem, SExprTraverseSupp *param);
typedef void (*_arithmetic_operator_fn_t)(void *left, int32_t numLeft, int32_t leftType, void *right, int32_t numRight,
int32_t rightType, void *output, int32_t order);
void arithmeticTreeTraverse(tExprNode *pExprs, int32_t numOfRows, char *pOutput, void *param, int32_t order,
char *(*cb)(void *, const char*, int32_t));
#ifdef __cplusplus
}
#endif

View File

@ -88,8 +88,8 @@ extern int32_t tsMinRowsInFileBlock;
extern int32_t tsMaxRowsInFileBlock;
extern int16_t tsCommitTime; // seconds
extern int32_t tsTimePrecision;
extern int16_t tsCompression;
extern int16_t tsWAL;
extern int8_t tsCompression;
extern int8_t tsWAL;
extern int32_t tsFsyncPeriod;
extern int32_t tsReplications;
extern int32_t tsQuorum;

View File

@ -1,3 +1,18 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef TDENGINE_NAME_H
#define TDENGINE_NAME_H
@ -21,6 +36,20 @@ typedef struct SColumnInfoData {
void* pData; // the corresponding block data in memory
} SColumnInfoData;
#define TSDB_DB_NAME_T 1
#define TSDB_TABLE_NAME_T 2
#define T_NAME_ACCT 0x1u
#define T_NAME_DB 0x2u
#define T_NAME_TABLE 0x4u
typedef struct SName {
uint8_t type; //db_name_t, table_name_t
char acctId[TSDB_ACCT_ID_LEN];
char dbname[TSDB_DB_NAME_LEN];
char tname[TSDB_TABLE_NAME_LEN];
} SName;
void extractTableName(const char *tableId, char *name);
char* extractDBName(const char *tableId, char *name);
@ -31,13 +60,15 @@ void extractTableNameFromToken(SStrToken *pToken, SStrToken* pTable);
SSchema tGetTableNameColumnSchema();
SSchema tGetBlockDistColumnSchema();
SSchema tGetUserSpecifiedColumnSchema(tVariant* pVal, SStrToken* exprStr, const char* name);
bool tscValidateTableNameLength(size_t len);
SColumnFilterInfo* tscFilterInfoClone(const SColumnFilterInfo* src, int32_t numOfFilters);
SColumnFilterInfo* tFilterInfoDup(const SColumnFilterInfo* src, int32_t numOfFilters);
SSchema tscGetTbnameColumnSchema();
SSchema tGetTbnameColumnSchema();
/**
* check if the schema is valid or not, including following aspects:
@ -51,6 +82,28 @@ SSchema tscGetTbnameColumnSchema();
* @param numOfCols
* @return
*/
bool isValidSchema(struct SSchema* pSchema, int32_t numOfCols, int32_t numOfTags);
bool tIsValidSchema(struct SSchema* pSchema, int32_t numOfCols, int32_t numOfTags);
int32_t tNameExtractFullName(const SName* name, char* dst);
int32_t tNameLen(const SName* name);
SName* tNameDup(const SName* name);
bool tIsValidName(const SName* name);
const char* tNameGetTableName(const SName* name);
int32_t tNameGetDbName(const SName* name, char* dst);
int32_t tNameGetFullDbName(const SName* name, char* dst);
bool tNameIsEmpty(const SName* name);
void tNameAssign(SName* dst, const SName* src);
int32_t tNameFromString(SName* dst, const char* str, uint32_t type);
int32_t tNameSetAcctId(SName* dst, const char* acct);
int32_t tNameSetDbName(SName* dst, const char* acct, SStrToken* dbToken);
#endif // TDENGINE_NAME_H

View File

@ -15,9 +15,9 @@
#include "os.h"
#include "qArithmeticOperator.h"
#include "ttype.h"
#include "tutil.h"
#include "tarithoperator.h"
#define ARRAY_LIST_OP(left, right, _left_type, _right_type, len1, len2, out, op, _res_type, _ord) \
{ \

View File

@ -16,18 +16,15 @@
#include "os.h"
#include "exception.h"
#include "qArithmeticOperator.h"
#include "qAst.h"
#include "taosdef.h"
#include "taosmsg.h"
#include "tarray.h"
#include "tbuffer.h"
#include "tcompare.h"
#include "tname.h"
#include "tschemautil.h"
#include "tsdb.h"
#include "tskiplist.h"
#include "tsqlfunction.h"
#include "texpr.h"
#include "tarithoperator.h"
static uint8_t UNUSED_FUNC isQueryOnPrimaryKey(const char *primaryColumnName, const tExprNode *pLeft, const tExprNode *pRight) {
if (pLeft->nodeType == TSQL_NODE_COL) {
@ -102,13 +99,15 @@ static void reverseCopy(char* dest, const char* src, int16_t type, int32_t numOf
}
}
void tExprNodeDestroy(tExprNode *pNode, void (*fp)(void *)) {
static void doExprTreeDestroy(tExprNode **pExpr, void (*fp)(void *));
void tExprTreeDestroy(tExprNode *pNode, void (*fp)(void *)) {
if (pNode == NULL) {
return;
}
if (pNode->nodeType == TSQL_NODE_EXPR) {
tExprTreeDestroy(&pNode, fp);
doExprTreeDestroy(&pNode, fp);
} else if (pNode->nodeType == TSQL_NODE_VALUE) {
tVariantDestroy(pNode->pVal);
} else if (pNode->nodeType == TSQL_NODE_COL) {
@ -118,14 +117,14 @@ void tExprNodeDestroy(tExprNode *pNode, void (*fp)(void *)) {
free(pNode);
}
void tExprTreeDestroy(tExprNode **pExpr, void (*fp)(void *)) {
static void doExprTreeDestroy(tExprNode **pExpr, void (*fp)(void *)) {
if (*pExpr == NULL) {
return;
}
if ((*pExpr)->nodeType == TSQL_NODE_EXPR) {
tExprTreeDestroy(&(*pExpr)->_node.pLeft, fp);
tExprTreeDestroy(&(*pExpr)->_node.pRight, fp);
doExprTreeDestroy(&(*pExpr)->_node.pLeft, fp);
doExprTreeDestroy(&(*pExpr)->_node.pRight, fp);
if (fp != NULL) {
fp((*pExpr)->_node.info);
@ -270,8 +269,9 @@ void arithmeticTreeTraverse(tExprNode *pExprs, int32_t numOfRows, char *pOutput,
}
}
free(pLeftOutput);
free(pRightOutput);
tfree(pdata);
tfree(pLeftOutput);
tfree(pRightOutput);
}
static void exprTreeToBinaryImpl(SBufferWriter* bw, tExprNode* expr) {
@ -342,7 +342,7 @@ static tExprNode* exprTreeFromBinaryImpl(SBufferReader* br) {
}
tExprNode* pExpr = exception_calloc(1, sizeof(tExprNode));
CLEANUP_PUSH_VOID_PTR_PTR(true, tExprNodeDestroy, pExpr, NULL);
CLEANUP_PUSH_VOID_PTR_PTR(true, tExprTreeDestroy, pExpr, NULL);
pExpr->nodeType = tbufReadUint8(br);
if (pExpr->nodeType == TSQL_NODE_VALUE) {
@ -396,7 +396,7 @@ tExprNode* exprTreeFromTableName(const char* tbnameCond) {
int32_t anchor = CLEANUP_GET_ANCHOR();
tExprNode* expr = exception_calloc(1, sizeof(tExprNode));
CLEANUP_PUSH_VOID_PTR_PTR(true, tExprNodeDestroy, expr, NULL);
CLEANUP_PUSH_VOID_PTR_PTR(true, tExprTreeDestroy, expr, NULL);
expr->nodeType = TSQL_NODE_EXPR;
@ -407,7 +407,7 @@ tExprNode* exprTreeFromTableName(const char* tbnameCond) {
SSchema* pSchema = exception_calloc(1, sizeof(SSchema));
left->pSchema = pSchema;
*pSchema = tscGetTbnameColumnSchema();
*pSchema = tGetTbnameColumnSchema();
tExprNode* right = exception_calloc(1, sizeof(tExprNode));
expr->_node.pRight = right;

View File

@ -121,8 +121,8 @@ int32_t tsMinRowsInFileBlock = TSDB_DEFAULT_MIN_ROW_FBLOCK;
int32_t tsMaxRowsInFileBlock = TSDB_DEFAULT_MAX_ROW_FBLOCK;
int16_t tsCommitTime = TSDB_DEFAULT_COMMIT_TIME; // seconds
int32_t tsTimePrecision = TSDB_DEFAULT_PRECISION;
int16_t tsCompression = TSDB_DEFAULT_COMP_LEVEL;
int16_t tsWAL = TSDB_DEFAULT_WAL_LEVEL;
int8_t tsCompression = TSDB_DEFAULT_COMP_LEVEL;
int8_t tsWAL = TSDB_DEFAULT_WAL_LEVEL;
int32_t tsFsyncPeriod = TSDB_DEFAULT_FSYNC_PERIOD;
int32_t tsReplications = TSDB_DEFAULT_DB_REPLICA_OPTION;
int32_t tsQuorum = TSDB_DEFAULT_DB_QUORUM_OPTION;
@ -137,7 +137,7 @@ int32_t tsTableIncStepPerVnode = TSDB_TABLES_STEP;
int8_t tsEnableBalance = 1;
int8_t tsAlternativeRole = 0;
int32_t tsBalanceInterval = 300; // seconds
int32_t tsOfflineThreshold = 86400 * 100; // seconds 10days
int32_t tsOfflineThreshold = 86400 * 100; // seconds 100 days
int32_t tsMnodeEqualVnodeNum = 4;
int8_t tsEnableFlowCtrl = 1;
int8_t tsEnableSlaveQuery = 1;
@ -550,7 +550,7 @@ static void doInitGlobalConfig(void) {
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
cfg.minValue = 3;
cfg.maxValue = 7200000;
cfg.maxValue = 86400 * 365;
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_SECOND;
taosInitConfigOption(cfg);
@ -758,7 +758,7 @@ static void doInitGlobalConfig(void) {
cfg.option = "comp";
cfg.ptr = &tsCompression;
cfg.valType = TAOS_CFG_VTYPE_INT16;
cfg.valType = TAOS_CFG_VTYPE_INT8;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
cfg.minValue = TSDB_MIN_COMP_LEVEL;
cfg.maxValue = TSDB_MAX_COMP_LEVEL;
@ -768,7 +768,7 @@ static void doInitGlobalConfig(void) {
cfg.option = "walLevel";
cfg.ptr = &tsWAL;
cfg.valType = TAOS_CFG_VTYPE_INT16;
cfg.valType = TAOS_CFG_VTYPE_INT8;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
cfg.minValue = TSDB_MIN_WAL_LEVEL;
cfg.maxValue = TSDB_MAX_WAL_LEVEL;

View File

@ -3,31 +3,12 @@
#include "tname.h"
#include "tstoken.h"
#include "ttokendef.h"
#include "tvariant.h"
#define VALIDNUMOFCOLS(x) ((x) >= TSDB_MIN_COLUMNS && (x) <= TSDB_MAX_COLUMNS)
#define VALIDNUMOFCOLS(x) ((x) >= TSDB_MIN_COLUMNS && (x) <= TSDB_MAX_COLUMNS)
#define VALIDNUMOFTAGS(x) ((x) >= 0 && (x) <= TSDB_MAX_TAGS)
// todo refactor
UNUSED_FUNC static FORCE_INLINE const char* skipSegments(const char* input, char delim, int32_t num) {
for (int32_t i = 0; i < num; ++i) {
while (*input != 0 && *input++ != delim) {
};
}
return input;
}
UNUSED_FUNC static FORCE_INLINE size_t copy(char* dst, const char* src, char delimiter) {
size_t len = 0;
while (*src != delimiter && *src != 0) {
*dst++ = *src++;
len++;
}
return len;
}
#define VALID_NAME_TYPE(x) ((x) == TSDB_DB_NAME_T || (x) == TSDB_TABLE_NAME_T)
void extractTableName(const char* tableId, char* name) {
size_t s1 = strcspn(tableId, &TS_PATH_DELIMITER[0]);
@ -58,6 +39,14 @@ SSchema tGetTableNameColumnSchema() {
tstrncpy(s.name, TSQL_TBNAME_L, TSDB_COL_NAME_LEN);
return s;
}
SSchema tGetBlockDistColumnSchema() {
SSchema s = {0};
s.bytes = TSDB_MAX_BINARY_LEN;;
s.type = TSDB_DATA_TYPE_BINARY;
s.colId = TSDB_BLOCK_DIST_COLUMN_INDEX;
tstrncpy(s.name, TSQL_BLOCK_DIST_L, TSDB_COL_NAME_LEN);
return s;
}
SSchema tGetUserSpecifiedColumnSchema(tVariant* pVal, SStrToken* exprStr, const char* name) {
SSchema s = {0};
@ -85,7 +74,7 @@ bool tscValidateTableNameLength(size_t len) {
return len < TSDB_TABLE_NAME_LEN;
}
SColumnFilterInfo* tscFilterInfoClone(const SColumnFilterInfo* src, int32_t numOfFilters) {
SColumnFilterInfo* tFilterInfoDup(const SColumnFilterInfo* src, int32_t numOfFilters) {
if (numOfFilters == 0) {
assert(src == NULL);
return NULL;
@ -200,7 +189,7 @@ void extractTableNameFromToken(SStrToken* pToken, SStrToken* pTable) {
}
}
SSchema tscGetTbnameColumnSchema() {
SSchema tGetTbnameColumnSchema() {
struct SSchema s = {
.colId = TSDB_TBNAME_COLUMN_INDEX,
.type = TSDB_DATA_TYPE_BINARY,
@ -248,7 +237,7 @@ static bool doValidateSchema(SSchema* pSchema, int32_t numOfCols, int32_t maxLen
return rowLen <= maxLen;
}
bool isValidSchema(struct SSchema* pSchema, int32_t numOfCols, int32_t numOfTags) {
bool tIsValidSchema(struct SSchema* pSchema, int32_t numOfCols, int32_t numOfTags) {
if (!VALIDNUMOFCOLS(numOfCols)) {
return false;
}
@ -272,3 +261,184 @@ bool isValidSchema(struct SSchema* pSchema, int32_t numOfCols, int32_t numOfTags
return true;
}
int32_t tNameExtractFullName(const SName* name, char* dst) {
assert(name != NULL && dst != NULL);
// invalid full name format, abort
if (!tIsValidName(name)) {
return -1;
}
int32_t len = snprintf(dst, TSDB_ACCT_ID_LEN + 1 + TSDB_DB_NAME_LEN, "%s.%s", name->acctId, name->dbname);
size_t tnameLen = strlen(name->tname);
if (tnameLen > 0) {
assert(name->type == TSDB_TABLE_NAME_T);
dst[len] = TS_PATH_DELIMITER[0];
memcpy(dst + len + 1, name->tname, tnameLen);
dst[len + tnameLen + 1] = 0;
}
return 0;
}
int32_t tNameLen(const SName* name) {
assert(name != NULL);
int32_t len = (int32_t) strlen(name->acctId);
int32_t len1 = (int32_t) strlen(name->dbname);
int32_t len2 = (int32_t) strlen(name->tname);
if (name->type == TSDB_DB_NAME_T) {
assert(len2 == 0);
return len + len1 + TS_PATH_DELIMITER_LEN;
} else {
assert(len2 > 0);
return len + len1 + len2 + TS_PATH_DELIMITER_LEN * 2;
}
}
bool tIsValidName(const SName* name) {
assert(name != NULL);
if (!VALID_NAME_TYPE(name->type)) {
return false;
}
if (strlen(name->acctId) <= 0) {
return false;
}
if (name->type == TSDB_DB_NAME_T) {
return strlen(name->dbname) > 0;
} else {
return strlen(name->dbname) > 0 && strlen(name->tname) > 0;
}
}
SName* tNameDup(const SName* name) {
assert(name != NULL);
SName* p = calloc(1, sizeof(SName));
memcpy(p, name, sizeof(SName));
return p;
}
int32_t tNameGetDbName(const SName* name, char* dst) {
assert(name != NULL && dst != NULL);
strncpy(dst, name->dbname, tListLen(name->dbname));
return 0;
}
int32_t tNameGetFullDbName(const SName* name, char* dst) {
assert(name != NULL && dst != NULL);
snprintf(dst, TSDB_ACCT_ID_LEN + TS_PATH_DELIMITER_LEN + TSDB_DB_NAME_LEN,
"%s.%s", name->acctId, name->dbname);
return 0;
}
bool tNameIsEmpty(const SName* name) {
assert(name != NULL);
return name->type == 0 || strlen(name->acctId) <= 0;
}
const char* tNameGetTableName(const SName* name) {
assert(name != NULL && name->type == TSDB_TABLE_NAME_T);
return &name->tname[0];
}
void tNameAssign(SName* dst, const SName* src) {
memcpy(dst, src, sizeof(SName));
}
int32_t tNameSetDbName(SName* dst, const char* acct, SStrToken* dbToken) {
assert(dst != NULL && dbToken != NULL && acct != NULL);
// too long account id or too long db name
if (strlen(acct) >= tListLen(dst->acctId) || dbToken->n >= tListLen(dst->dbname)) {
return -1;
}
dst->type = TSDB_DB_NAME_T;
tstrncpy(dst->acctId, acct, tListLen(dst->acctId));
tstrncpy(dst->dbname, dbToken->z, dbToken->n + 1);
return 0;
}
int32_t tNameSetAcctId(SName* dst, const char* acct) {
assert(dst != NULL && acct != NULL);
// too long account id or too long db name
if (strlen(acct) >= tListLen(dst->acctId)) {
return -1;
}
tstrncpy(dst->acctId, acct, tListLen(dst->acctId));
assert(strlen(dst->acctId) > 0);
return 0;
}
int32_t tNameFromString(SName* dst, const char* str, uint32_t type) {
assert(dst != NULL && str != NULL && strlen(str) > 0);
char* p = NULL;
if ((type & T_NAME_ACCT) == T_NAME_ACCT) {
p = strstr(str, TS_PATH_DELIMITER);
if (p == NULL) {
return -1;
}
int32_t len = (int32_t)(p - str);
// too long account id or too long db name
if ((len >= tListLen(dst->acctId)) || (len <= 0)) {
return -1;
}
memcpy (dst->acctId, str, len);
dst->acctId[len] = 0;
assert(strlen(dst->acctId) > 0);
}
if ((type & T_NAME_DB) == T_NAME_DB) {
dst->type = TSDB_DB_NAME_T;
char* start = (char*)((p == NULL)? str:(p+1));
int32_t len = 0;
p = strstr(start, TS_PATH_DELIMITER);
if (p == NULL) {
len = (int32_t) strlen(start);
} else {
len = (int32_t) (p - start);
}
// too long account id or too long db name
if ((len >= tListLen(dst->dbname)) || (len <= 0)) {
return -1;
}
memcpy (dst->dbname, start, len);
dst->dbname[len] = 0;
}
if ((type & T_NAME_TABLE) == T_NAME_TABLE) {
dst->type = TSDB_TABLE_NAME_T;
char* start = (char*) ((p == NULL)? str: (p+1));
int32_t len = (int32_t) strlen(start);
// too long account id or too long db name
if ((len >= tListLen(dst->tname)) || (len <= 0)) {
return -1;
}
memcpy (dst->tname, start, len);
dst->tname[len] = 0;
}
return 0;
}

View File

@ -430,7 +430,7 @@ static FORCE_INLINE int32_t convertToInteger(tVariant *pVariant, int64_t *result
}
errno = 0;
if (IS_SIGNED_NUMERIC_TYPE(pVariant->nType)) {
if (IS_SIGNED_NUMERIC_TYPE(pVariant->nType) || (pVariant->nType == TSDB_DATA_TYPE_BOOL)) {
*result = pVariant->i64;
} else if (IS_UNSIGNED_NUMERIC_TYPE(pVariant->nType)) {
*result = pVariant->u64;
@ -775,7 +775,7 @@ int32_t tVariantDump(tVariant *pVariant, char *payload, int16_t type, bool inclu
return -1;
}
} else {
wcsncpy((wchar_t *)p, pVariant->wpz, pVariant->nLen);
memcpy(p, pVariant->wpz, pVariant->nLen);
newlen = pVariant->nLen;
}
@ -867,4 +867,4 @@ int32_t tVariantTypeSetType(tVariant *pVariant, char type) {
}
return 0;
}
}

View File

@ -8,7 +8,7 @@ IF (TD_MVN_INSTALLED)
ADD_CUSTOM_COMMAND(OUTPUT ${JDBC_CMD_NAME}
POST_BUILD
COMMAND mvn -Dmaven.test.skip=true install -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.16-dist.jar ${LIBRARY_OUTPUT_PATH}
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.18-dist.jar ${LIBRARY_OUTPUT_PATH}
COMMAND mvn -Dmaven.test.skip=true clean -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
COMMENT "build jdbc driver")
ADD_CUSTOM_TARGET(${JDBC_TARGET_NAME} ALL WORKING_DIRECTORY ${EXECUTABLE_OUTPUT_PATH} DEPENDS ${JDBC_CMD_NAME})

View File

@ -5,7 +5,7 @@
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>2.0.16</version>
<version>2.0.18</version>
<packaging>jar</packaging>
<name>JDBCDriver</name>

View File

@ -3,7 +3,7 @@
<modelVersion>4.0.0</modelVersion>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>2.0.16</version>
<version>2.0.18</version>
<packaging>jar</packaging>
<name>JDBCDriver</name>
<url>https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc</url>
@ -74,6 +74,14 @@
<version>1.2.58</version>
</dependency>
<!-- https://mvnrepository.com/artifact/org.apache.commons/commons-dbcp2 -->
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-dbcp2</artifactId>
<version>2.7.0</version>
</dependency>
</dependencies>
<build>
@ -118,7 +126,7 @@
<include>**/*Test.java</include>
</includes>
<excludes>
<exclude>**/BatchInsertTest.java</exclude>
<exclude>**/AppMemoryLeakTest.java</exclude>
<exclude>**/FailOverTest.java</exclude>
</excludes>
<testFailureIgnore>true</testFailureIgnore>

View File

@ -1,28 +1,13 @@
/***************************************************************************
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*****************************************************************************/
package com.taosdata.jdbc;
import java.sql.*;
import java.util.ArrayList;
import java.util.List;
public abstract class AbstractDatabaseMetaData implements DatabaseMetaData {
public abstract class AbstractDatabaseMetaData implements DatabaseMetaData, Wrapper {
private final static String PRODUCT_NAME = "TDengine";
private final static String PRODUCT_VESION = "2.0.x.x";
private final static String DRIVER_NAME = "taos-jdbcdriver";
private final static String DRIVER_VERSION = "2.0.x";
private final static int DRIVER_MAJAR_VERSION = 2;
private final static int DRIVER_MINOR_VERSION = 0;
@ -67,9 +52,7 @@ public abstract class AbstractDatabaseMetaData implements DatabaseMetaData {
return PRODUCT_VESION;
}
public String getDriverName() throws SQLException {
return DRIVER_NAME;
}
public abstract String getDriverName() throws SQLException;
public String getDriverVersion() throws SQLException {
return DRIVER_VERSION;
@ -92,6 +75,7 @@ public abstract class AbstractDatabaseMetaData implements DatabaseMetaData {
}
public boolean supportsMixedCaseIdentifiers() throws SQLException {
//像databasetable这些对象的标识符在存储时是否采用大小写混合的模式
return false;
}
@ -100,7 +84,7 @@ public abstract class AbstractDatabaseMetaData implements DatabaseMetaData {
}
public boolean storesLowerCaseIdentifiers() throws SQLException {
return false;
return true;
}
public boolean storesMixedCaseIdentifiers() throws SQLException {
@ -168,10 +152,12 @@ public abstract class AbstractDatabaseMetaData implements DatabaseMetaData {
}
public boolean nullPlusNonNullIsNull() throws SQLException {
// null + non-null != null
return false;
}
public boolean supportsConvert() throws SQLException {
// 是否支持转换函数convert
return false;
}
@ -196,7 +182,7 @@ public abstract class AbstractDatabaseMetaData implements DatabaseMetaData {
}
public boolean supportsGroupBy() throws SQLException {
return false;
return true;
}
public boolean supportsGroupByUnrelated() throws SQLException {
@ -468,7 +454,7 @@ public abstract class AbstractDatabaseMetaData implements DatabaseMetaData {
}
public int getDefaultTransactionIsolation() throws SQLException {
return 0;
return Connection.TRANSACTION_NONE;
}
public boolean supportsTransactions() throws SQLException {
@ -476,6 +462,8 @@ public abstract class AbstractDatabaseMetaData implements DatabaseMetaData {
}
public boolean supportsTransactionIsolationLevel(int level) throws SQLException {
if (level == Connection.TRANSACTION_NONE)
return true;
return false;
}
@ -495,18 +483,113 @@ public abstract class AbstractDatabaseMetaData implements DatabaseMetaData {
return false;
}
public ResultSet getProcedures(String catalog, String schemaPattern, String procedureNamePattern)
throws SQLException {
public ResultSet getProcedures(String catalog, String schemaPattern, String procedureNamePattern) throws SQLException {
return null;
}
public ResultSet getProcedureColumns(String catalog, String schemaPattern, String procedureNamePattern,
String columnNamePattern) throws SQLException {
public ResultSet getProcedureColumns(String catalog, String schemaPattern, String procedureNamePattern, String columnNamePattern) throws SQLException {
return null;
}
public abstract ResultSet getTables(String catalog, String schemaPattern, String tableNamePattern, String[] types)
throws SQLException;
public abstract ResultSet getTables(String catalog, String schemaPattern, String tableNamePattern, String[] types) throws SQLException;
protected ResultSet getTables(String catalog, String schemaPattern, String tableNamePattern, String[] types, Connection connection) throws SQLException {
try (Statement stmt = connection.createStatement()) {
if (catalog == null || catalog.isEmpty())
return null;
ResultSet databases = stmt.executeQuery("show databases");
String dbname = null;
while (databases.next()) {
dbname = databases.getString("name");
if (dbname.equalsIgnoreCase(catalog))
break;
}
databases.close();
if (dbname == null)
return null;
stmt.execute("use " + dbname);
DatabaseMetaDataResultSet resultSet = new DatabaseMetaDataResultSet();
List<ColumnMetaData> columnMetaDataList = new ArrayList<>();
ColumnMetaData col1 = new ColumnMetaData();
col1.setColIndex(1);
col1.setColName("TABLE_CAT");
col1.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(col1);
ColumnMetaData col2 = new ColumnMetaData();
col2.setColIndex(2);
col2.setColName("TABLE_SCHEM");
col2.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(col2);
ColumnMetaData col3 = new ColumnMetaData();
col3.setColIndex(3);
col3.setColName("TABLE_NAME");
col3.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(col3);
ColumnMetaData col4 = new ColumnMetaData();
col4.setColIndex(4);
col4.setColName("TABLE_TYPE");
col4.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(col4);
ColumnMetaData col5 = new ColumnMetaData();
col5.setColIndex(5);
col5.setColName("REMARKS");
col5.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(col5);
ColumnMetaData col6 = new ColumnMetaData();
col6.setColIndex(6);
col6.setColName("TYPE_CAT");
col6.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(col6);
ColumnMetaData col7 = new ColumnMetaData();
col7.setColIndex(7);
col7.setColName("TYPE_SCHEM");
col7.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(col7);
ColumnMetaData col8 = new ColumnMetaData();
col8.setColIndex(8);
col8.setColName("TYPE_NAME");
col8.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(col8);
ColumnMetaData col9 = new ColumnMetaData();
col9.setColIndex(9);
col9.setColName("SELF_REFERENCING_COL_NAME");
col9.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(col9);
ColumnMetaData col10 = new ColumnMetaData();
col10.setColIndex(10);
col10.setColName("REF_GENERATION");
col10.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(col10);
resultSet.setColumnMetaDataList(columnMetaDataList);
List<TSDBResultSetRowData> rowDataList = new ArrayList<>();
ResultSet tables = stmt.executeQuery("show tables");
while (tables.next()) {
TSDBResultSetRowData rowData = new TSDBResultSetRowData(10);
rowData.setString(0, dbname); //table_cat
rowData.setString(1, null); //TABLE_SCHEM
rowData.setString(2, tables.getString("table_name")); //TABLE_NAME
rowData.setString(3, "TABLE"); //TABLE_TYPE
rowData.setString(4, ""); //REMARKS
rowDataList.add(rowData);
}
ResultSet stables = stmt.executeQuery("show stables");
while (stables.next()) {
TSDBResultSetRowData rowData = new TSDBResultSetRowData(10);
rowData.setString(0, dbname); //TABLE_CAT
rowData.setString(1, null); //TABLE_SCHEM
rowData.setString(2, stables.getString("name")); //TABLE_NAME
rowData.setString(3, "TABLE"); //TABLE_TYPE
rowData.setString(4, "STABLE"); //REMARKS
rowDataList.add(rowData);
}
resultSet.setRowDataList(rowDataList);
return resultSet;
}
}
public ResultSet getSchemas() throws SQLException {
return getEmptyResultSet();
@ -516,32 +599,239 @@ public abstract class AbstractDatabaseMetaData implements DatabaseMetaData {
public ResultSet getTableTypes() throws SQLException {
DatabaseMetaDataResultSet resultSet = new DatabaseMetaDataResultSet();
// set up ColumnMetaDataList
List<ColumnMetaData> columnMetaDataList = new ArrayList<ColumnMetaData>(1);
List<ColumnMetaData> columnMetaDataList = new ArrayList<>();
ColumnMetaData colMetaData = new ColumnMetaData();
colMetaData.setColIndex(0);
colMetaData.setColName("TABLE_TYPE");
colMetaData.setColSize(10);
colMetaData.setColType(TSDBConstants.TSDB_DATA_TYPE_BINARY);
colMetaData.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(colMetaData);
resultSet.setColumnMetaDataList(columnMetaDataList);
// set up rowDataList
List<TSDBResultSetRowData> rowDataList = new ArrayList<TSDBResultSetRowData>(2);
TSDBResultSetRowData rowData = new TSDBResultSetRowData();
List<TSDBResultSetRowData> rowDataList = new ArrayList<>();
TSDBResultSetRowData rowData = new TSDBResultSetRowData(1);
rowData.setString(0, "TABLE");
rowDataList.add(rowData);
rowData = new TSDBResultSetRowData();
rowData = new TSDBResultSetRowData(1);
rowData.setString(0, "STABLE");
rowDataList.add(rowData);
resultSet.setColumnMetaDataList(columnMetaDataList);
resultSet.setRowDataList(rowDataList);
return resultSet;
}
public abstract ResultSet getColumns(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern) throws SQLException;
protected ResultSet getColumns(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern, Connection conn) {
try (Statement stmt = conn.createStatement()) {
if (catalog == null || catalog.isEmpty())
return null;
ResultSet databases = stmt.executeQuery("show databases");
String dbname = null;
while (databases.next()) {
dbname = databases.getString("name");
if (dbname.equalsIgnoreCase(catalog))
break;
}
databases.close();
if (dbname == null)
return null;
stmt.execute("use " + dbname);
DatabaseMetaDataResultSet resultSet = new DatabaseMetaDataResultSet();
// set up ColumnMetaDataList
List<ColumnMetaData> columnMetaDataList = new ArrayList<>();
// TABLE_CAT
ColumnMetaData col1 = new ColumnMetaData();
col1.setColIndex(1);
col1.setColName("TABLE_CAT");
col1.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(col1);
// TABLE_SCHEM
ColumnMetaData col2 = new ColumnMetaData();
col2.setColIndex(2);
col2.setColName("TABLE_SCHEM");
col2.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(col2);
// TABLE_NAME
ColumnMetaData col3 = new ColumnMetaData();
col3.setColIndex(3);
col3.setColName("TABLE_NAME");
col3.setColSize(193);
col3.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(col3);
// COLUMN_NAME
ColumnMetaData col4 = new ColumnMetaData();
col4.setColIndex(4);
col4.setColName("COLUMN_NAME");
col4.setColSize(65);
col4.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(col4);
// DATA_TYPE
ColumnMetaData col5 = new ColumnMetaData();
col5.setColIndex(5);
col5.setColName("DATA_TYPE");
col5.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
columnMetaDataList.add(col5);
// TYPE_NAME
ColumnMetaData col6 = new ColumnMetaData();
col6.setColIndex(6);
col6.setColName("TYPE_NAME");
col6.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(col6);
// COLUMN_SIZE
ColumnMetaData col7 = new ColumnMetaData();
col7.setColIndex(7);
col7.setColName("COLUMN_SIZE");
col7.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
columnMetaDataList.add(col7);
// BUFFER_LENGTH, not used
ColumnMetaData col8 = new ColumnMetaData();
col8.setColIndex(8);
col8.setColName("BUFFER_LENGTH");
columnMetaDataList.add(col8);
// DECIMAL_DIGITS
ColumnMetaData col9 = new ColumnMetaData();
col9.setColIndex(9);
col9.setColName("DECIMAL_DIGITS");
col9.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
columnMetaDataList.add(col9);
// add NUM_PREC_RADIX
ColumnMetaData col10 = new ColumnMetaData();
col10.setColIndex(10);
col10.setColName("NUM_PREC_RADIX");
col10.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
columnMetaDataList.add(col10);
// NULLABLE
ColumnMetaData col11 = new ColumnMetaData();
col11.setColIndex(11);
col11.setColName("NULLABLE");
col11.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
columnMetaDataList.add(col11);
// REMARKS
ColumnMetaData col12 = new ColumnMetaData();
col12.setColIndex(12);
col12.setColName("REMARKS");
col12.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(col12);
// COLUMN_DEF
ColumnMetaData col13 = new ColumnMetaData();
col13.setColIndex(13);
col13.setColName("COLUMN_DEF");
col13.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(col13);
//SQL_DATA_TYPE
ColumnMetaData col14 = new ColumnMetaData();
col14.setColIndex(14);
col14.setColName("SQL_DATA_TYPE");
col14.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
columnMetaDataList.add(col14);
//SQL_DATETIME_SUB
ColumnMetaData col15 = new ColumnMetaData();
col15.setColIndex(15);
col15.setColName("SQL_DATETIME_SUB");
col15.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
columnMetaDataList.add(col15);
//CHAR_OCTET_LENGTH
ColumnMetaData col16 = new ColumnMetaData();
col16.setColIndex(16);
col16.setColName("CHAR_OCTET_LENGTH");
col16.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
columnMetaDataList.add(col16);
//ORDINAL_POSITION
ColumnMetaData col17 = new ColumnMetaData();
col17.setColIndex(17);
col17.setColName("ORDINAL_POSITION");
col17.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
columnMetaDataList.add(col17);
// IS_NULLABLE
ColumnMetaData col18 = new ColumnMetaData();
col18.setColIndex(18);
col18.setColName("IS_NULLABLE");
col18.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(col18);
//SCOPE_CATALOG
ColumnMetaData col19 = new ColumnMetaData();
col19.setColIndex(19);
col19.setColName("SCOPE_CATALOG");
col19.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(col19);
//SCOPE_SCHEMA
ColumnMetaData col20 = new ColumnMetaData();
col20.setColIndex(20);
col20.setColName("SCOPE_SCHEMA");
col20.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(col20);
//SCOPE_TABLE
ColumnMetaData col21 = new ColumnMetaData();
col21.setColIndex(21);
col21.setColName("SCOPE_TABLE");
col21.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(col21);
//SOURCE_DATA_TYPE
ColumnMetaData col22 = new ColumnMetaData();
col22.setColIndex(22);
col22.setColName("SOURCE_DATA_TYPE");
col22.setColType(TSDBConstants.TSDB_DATA_TYPE_SMALLINT);
columnMetaDataList.add(col22);
//IS_AUTOINCREMENT
ColumnMetaData col23 = new ColumnMetaData();
col23.setColIndex(23);
col23.setColName("IS_AUTOINCREMENT");
col23.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(col23);
//IS_GENERATEDCOLUMN
ColumnMetaData col24 = new ColumnMetaData();
col24.setColIndex(24);
col24.setColName("IS_GENERATEDCOLUMN");
col24.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(col24);
resultSet.setColumnMetaDataList(columnMetaDataList);
// set up rowDataList
ResultSet rs = stmt.executeQuery("describe " + dbname + "." + tableNamePattern);
List<TSDBResultSetRowData> rowDataList = new ArrayList<>();
int index = 0;
while (rs.next()) {
TSDBResultSetRowData rowData = new TSDBResultSetRowData(24);
// set TABLE_CAT
rowData.setString(0, dbname);
// set TABLE_NAME
rowData.setString(2, tableNamePattern);
// set COLUMN_NAME
rowData.setString(3, rs.getString("Field"));
// set DATA_TYPE
String typeName = rs.getString("Type");
rowData.setInt(4, getDataType(typeName));
// set TYPE_NAME
rowData.setString(5, typeName);
// set COLUMN_SIZE
int length = rs.getInt("Length");
rowData.setInt(6, getColumnSize(typeName, length));
// set DECIMAL_DIGITS
rowData.setInt(8, getDecimalDigits(typeName));
// set NUM_PREC_RADIX
rowData.setInt(9, 10);
// set NULLABLE
rowData.setInt(10, getNullable(index, typeName));
// set REMARKS
rowData.setString(11, rs.getString("Note"));
rowDataList.add(rowData);
index++;
}
resultSet.setRowDataList(rowDataList);
return resultSet;
} catch (SQLException e) {
e.printStackTrace();
}
return null;
}
protected int getNullable(int index, String typeName) {
if (index == 0 && "TIMESTAMP".equals(typeName))
return DatabaseMetaData.columnNoNulls;
@ -552,7 +842,6 @@ public abstract class AbstractDatabaseMetaData implements DatabaseMetaData {
switch (typeName) {
case "TIMESTAMP":
return 23;
default:
return 0;
}
@ -615,9 +904,7 @@ public abstract class AbstractDatabaseMetaData implements DatabaseMetaData {
return getEmptyResultSet();
}
public ResultSet getPrimaryKeys(String catalog, String schema, String table) throws SQLException {
return getEmptyResultSet();
}
public abstract ResultSet getPrimaryKeys(String catalog, String schema, String table) throws SQLException;
public ResultSet getImportedKeys(String catalog, String schema, String table) throws SQLException {
return getEmptyResultSet();
@ -718,9 +1005,7 @@ public abstract class AbstractDatabaseMetaData implements DatabaseMetaData {
return getEmptyResultSet();
}
public ResultSet getSuperTables(String catalog, String schemaPattern, String tableNamePattern) throws SQLException {
return getEmptyResultSet();
}
public abstract ResultSet getSuperTables(String catalog, String schemaPattern, String tableNamePattern) throws SQLException;
public ResultSet getAttributes(String catalog, String schemaPattern, String typeNamePattern,
String attributeNamePattern) throws SQLException {
@ -728,15 +1013,17 @@ public abstract class AbstractDatabaseMetaData implements DatabaseMetaData {
}
public boolean supportsResultSetHoldability(int holdability) throws SQLException {
if (holdability == ResultSet.HOLD_CURSORS_OVER_COMMIT)
return true;
return false;
}
public int getResultSetHoldability() throws SQLException {
return 0;
return ResultSet.HOLD_CURSORS_OVER_COMMIT;
}
public int getDatabaseMajorVersion() throws SQLException {
return 0;
return 2;
}
public int getDatabaseMinorVersion() throws SQLException {
@ -744,7 +1031,7 @@ public abstract class AbstractDatabaseMetaData implements DatabaseMetaData {
}
public int getJDBCMajorVersion() throws SQLException {
return 0;
return 2;
}
public int getJDBCMinorVersion() throws SQLException {
@ -768,7 +1055,7 @@ public abstract class AbstractDatabaseMetaData implements DatabaseMetaData {
}
public ResultSet getSchemas(String catalog, String schemaPattern) throws SQLException {
return null;
return getEmptyResultSet();
}
public boolean supportsStoredFunctionsUsingCallSyntax() throws SQLException {
@ -805,4 +1092,180 @@ public abstract class AbstractDatabaseMetaData implements DatabaseMetaData {
private ResultSet getEmptyResultSet() {
return new EmptyResultSet();
}
@Override
public <T> T unwrap(Class<T> iface) throws SQLException {
try {
return iface.cast(this);
} catch (ClassCastException cce) {
throw new SQLException("Unable to unwrap to " + iface.toString());
}
}
@Override
public boolean isWrapperFor(Class<?> iface) throws SQLException {
return iface.isInstance(this);
}
protected ResultSet getCatalogs(Connection conn) throws SQLException {
try (Statement stmt = conn.createStatement()) {
DatabaseMetaDataResultSet resultSet = new DatabaseMetaDataResultSet();
// set up ColumnMetaDataList
List<ColumnMetaData> columnMetaDataList = new ArrayList<>();
// TABLE_CAT
ColumnMetaData col1 = new ColumnMetaData();
col1.setColIndex(1);
col1.setColName("TABLE_CAT");
col1.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(col1);
resultSet.setColumnMetaDataList(columnMetaDataList);
List<TSDBResultSetRowData> rowDataList = new ArrayList<>();
ResultSet rs = stmt.executeQuery("show databases");
while (rs.next()) {
TSDBResultSetRowData rowData = new TSDBResultSetRowData(1);
rowData.setString(0, rs.getString("name"));
rowDataList.add(rowData);
}
resultSet.setRowDataList(rowDataList);
return resultSet;
}
}
protected ResultSet getPrimaryKeys(String catalog, String schema, String table, Connection conn) throws SQLException {
try (Statement stmt = conn.createStatement()) {
if (catalog == null || catalog.isEmpty())
return null;
ResultSet databases = stmt.executeQuery("show databases");
String dbname = null;
while (databases.next()) {
dbname = databases.getString("name");
if (dbname.equalsIgnoreCase(catalog))
break;
}
databases.close();
if (dbname == null)
return null;
stmt.execute("use " + dbname);
DatabaseMetaDataResultSet resultSet = new DatabaseMetaDataResultSet();
// set up ColumnMetaDataList
List<ColumnMetaData> columnMetaDataList = new ArrayList<>();
// TABLE_CAT
ColumnMetaData col1 = new ColumnMetaData();
col1.setColIndex(0);
col1.setColName("TABLE_CAT");
col1.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(col1);
// TABLE_SCHEM
ColumnMetaData col2 = new ColumnMetaData();
col2.setColIndex(1);
col2.setColName("TABLE_SCHEM");
col2.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(col2);
// TABLE_NAME
ColumnMetaData col3 = new ColumnMetaData();
col3.setColIndex(2);
col3.setColName("TABLE_NAME");
col3.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(col3);
// COLUMN_NAME
ColumnMetaData col4 = new ColumnMetaData();
col4.setColIndex(3);
col4.setColName("COLUMN_NAME");
col4.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(col4);
// KEY_SEQ
ColumnMetaData col5 = new ColumnMetaData();
col5.setColIndex(4);
col5.setColName("KEY_SEQ");
col5.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
columnMetaDataList.add(col5);
// PK_NAME
ColumnMetaData col6 = new ColumnMetaData();
col6.setColIndex(5);
col6.setColName("PK_NAME");
col6.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(col6);
resultSet.setColumnMetaDataList(columnMetaDataList);
// set rowData
List<TSDBResultSetRowData> rowDataList = new ArrayList<>();
ResultSet rs = stmt.executeQuery("describe " + dbname + "." + table);
rs.next();
TSDBResultSetRowData rowData = new TSDBResultSetRowData(6);
rowData.setString(0, null);
rowData.setString(1, null);
rowData.setString(2, table);
String pkName = rs.getString(1);
rowData.setString(3, pkName);
rowData.setInt(4, 1);
rowData.setString(5, pkName);
rowDataList.add(rowData);
resultSet.setRowDataList(rowDataList);
return resultSet;
}
}
protected ResultSet getSuperTables(String catalog, String schemaPattern, String tableNamePattern, Connection conn) throws SQLException {
try (Statement stmt = conn.createStatement()) {
if (catalog == null || catalog.isEmpty())
return null;
ResultSet databases = stmt.executeQuery("show databases");
String dbname = null;
while (databases.next()) {
dbname = databases.getString("name");
if (dbname.equalsIgnoreCase(catalog))
break;
}
databases.close();
if (dbname == null)
return null;
stmt.execute("use " + dbname);
DatabaseMetaDataResultSet resultSet = new DatabaseMetaDataResultSet();
// set up ColumnMetaDataList
List<ColumnMetaData> columnMetaDataList = new ArrayList<>();
// TABLE_CAT
ColumnMetaData col1 = new ColumnMetaData();
col1.setColIndex(0);
col1.setColName("TABLE_CAT");
col1.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(col1);
// TABLE_SCHEM
ColumnMetaData col2 = new ColumnMetaData();
col2.setColIndex(1);
col2.setColName("TABLE_SCHEM");
col2.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(col2);
// TABLE_NAME
ColumnMetaData col3 = new ColumnMetaData();
col3.setColIndex(2);
col3.setColName("TABLE_NAME");
col3.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(col3);
// SUPERTABLE_NAME
ColumnMetaData col4 = new ColumnMetaData();
col4.setColIndex(3);
col4.setColName("SUPERTABLE_NAME");
col4.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(col4);
resultSet.setColumnMetaDataList(columnMetaDataList);
ResultSet rs = stmt.executeQuery("show tables like '" + tableNamePattern + "'");
List<TSDBResultSetRowData> rowDataList = new ArrayList<>();
while (rs.next()) {
TSDBResultSetRowData rowData = new TSDBResultSetRowData(4);
rowData.setString(2, rs.getString(1));
rowData.setString(3, rs.getString(4));
rowDataList.add(rowData);
}
resultSet.setRowDataList(rowDataList);
return resultSet;
}
}
}

View File

@ -1,68 +0,0 @@
/***************************************************************************
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*****************************************************************************/
package com.taosdata.jdbc;
import java.sql.ResultSet;
import java.sql.SQLException;
/*
* TDengine only supports a subset of the standard SQL, thus this implemetation of the
* standard JDBC API contains more or less some adjustments customized for certain
* compatibility needs.
*/
public class CatalogResultSet extends TSDBResultSetWrapper {
public CatalogResultSet(ResultSet resultSet) {
super.setOriginalResultSet(resultSet);
}
@Override
public String getString(int columnIndex) throws SQLException {
if (columnIndex <= 1) {
return super.getString(columnIndex);
} else {
return null;
}
}
@Override
public boolean getBoolean(int columnIndex) throws SQLException {
if (columnIndex <= 1) {
return super.getBoolean(columnIndex);
} else {
return false;
}
}
@Override
public byte[] getBytes(int columnIndex) throws SQLException {
if (columnIndex <= 1) {
return super.getBytes(columnIndex);
} else {
return null;
}
}
@Override
public Object getObject(int columnIndex) throws SQLException {
if (columnIndex <= 1) {
return super.getObject(columnIndex);
} else {
return null;
}
}
}

View File

@ -16,40 +16,40 @@ package com.taosdata.jdbc;
public class ColumnMetaData {
private int colType = 0;
private String colName = null;
private int colSize = -1;
private int colIndex = 0;
private int colType = 0;
private String colName = null;
private int colSize = -1;
private int colIndex = 0;
public int getColSize() {
return colSize;
}
public int getColSize() {
return colSize;
}
public void setColSize(int colSize) {
this.colSize = colSize;
}
public void setColSize(int colSize) {
this.colSize = colSize;
}
public int getColType() {
return colType;
}
public int getColType() {
return colType;
}
public void setColType(int colType) {
this.colType = colType;
}
public void setColType(int colType) {
this.colType = colType;
}
public String getColName() {
return colName;
}
public String getColName() {
return colName;
}
public void setColName(String colName) {
this.colName = colName;
}
public void setColName(String colName) {
this.colName = colName;
}
public int getColIndex() {
return colIndex;
}
public int getColIndex() {
return colIndex;
}
public void setColIndex(int colIndex) {
this.colIndex = colIndex;
}
public void setColIndex(int colIndex) {
this.colIndex = colIndex;
}
}

View File

@ -1,51 +0,0 @@
/***************************************************************************
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*****************************************************************************/
package com.taosdata.jdbc;
import java.sql.ResultSet;
/*
* TDengine only supports a subset of the standard SQL, thus this implemetation of the
* standard JDBC API contains more or less some adjustments customized for certain
* compatibility needs.
*/
public class GetColumnsResultSet extends TSDBResultSetWrapper {
private String catalog;
private String schemaPattern;
private String tableNamePattern;
private String columnNamePattern;
public GetColumnsResultSet(ResultSet resultSet, String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern) {
super.setOriginalResultSet(resultSet);
this.catalog = catalog;
this.schemaPattern = schemaPattern;
this.tableNamePattern = tableNamePattern;
this.columnNamePattern = columnNamePattern;
}
@Override
public String getString(int columnIndex) {
switch (columnIndex) {
case 1:
return catalog;
case 2:
return null;
case 3:
return tableNamePattern;
default:
return null;
}
}
}

View File

@ -1,53 +0,0 @@
/***************************************************************************
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*****************************************************************************/
package com.taosdata.jdbc;
import java.sql.ResultSet;
import java.sql.SQLException;
/*
* TDengine only supports a subset of the standard SQL, thus this implemetation of the
* standard JDBC API contains more or less some adjustments customized for certain
* compatibility needs.
*/
public class GetTablesResultSet extends TSDBResultSetWrapper {
private String catalog;
private String schemaPattern;
private String tableNamePattern;
private String[] types;
public GetTablesResultSet(ResultSet resultSet, String catalog, String schemaPattern, String tableNamePattern, String[] types) {
super.setOriginalResultSet(resultSet);
this.catalog = catalog;
this.schemaPattern = schemaPattern;
this.tableNamePattern = tableNamePattern;
this.types = types;
}
@Override
public String getString(int columnIndex) throws SQLException {
String ret = null;
switch (columnIndex) {
case 3:
return super.getString(1);
case 4:
return "table";
default:
return null;
}
}
}

View File

@ -19,9 +19,12 @@ import java.util.Map;
public abstract class TSDBConstants {
public static final String STATEMENT_CLOSED = "Statement already closed.";
public static final String DEFAULT_PORT = "6200";
public static final String UNSUPPORT_METHOD_EXCEPTIONZ_MSG = "this operation is NOT supported currently!";
public static final String INVALID_VARIABLES = "invalid variables";
public static final String RESULT_SET_IS_CLOSED = "resultSet is closed.";
public static Map<Integer, String> DATATYPE_MAP = null;
public static final long JNI_NULL_POINTER = 0L;
@ -74,7 +77,7 @@ public abstract class TSDBConstants {
}
static {
DATATYPE_MAP = new HashMap<Integer, String>();
DATATYPE_MAP = new HashMap<>();
DATATYPE_MAP.put(1, "BOOL");
DATATYPE_MAP.put(2, "TINYINT");
DATATYPE_MAP.put(3, "SMALLINT");

View File

@ -14,11 +14,11 @@
*****************************************************************************/
package com.taosdata.jdbc;
import java.sql.*;
import java.util.ArrayList;
import java.util.List;
import java.sql.Connection;
import java.sql.ResultSet;
import java.sql.SQLException;
public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData {
public class TSDBDatabaseMetaData extends AbstractDatabaseMetaData {
private String url;
private String userName;
@ -29,31 +29,14 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData {
this.userName = userName;
}
public Connection getConnection() throws SQLException {
return this.conn;
}
public void setConnection(Connection conn) {
this.conn = conn;
}
@Override
public <T> T unwrap(Class<T> iface) throws SQLException {
try {
return iface.cast(this);
} catch (ClassCastException cce) {
throw new SQLException("Unable to unwrap to " + iface.toString());
}
}
public boolean isWrapperFor(Class<?> iface) throws SQLException {
return iface.isInstance(this);
}
public boolean allProceduresAreCallable() throws SQLException {
return false;
}
public boolean allTablesAreSelectable() throws SQLException {
return false;
}
public String getURL() throws SQLException {
return this.url;
}
@ -62,911 +45,52 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData {
return this.userName;
}
public boolean isReadOnly() throws SQLException {
return false;
}
public boolean nullsAreSortedHigh() throws SQLException {
return false;
}
public boolean nullsAreSortedLow() throws SQLException {
return !nullsAreSortedHigh();
}
public boolean nullsAreSortedAtStart() throws SQLException {
return true;
}
public boolean nullsAreSortedAtEnd() throws SQLException {
return !nullsAreSortedAtStart();
}
public String getDatabaseProductName() throws SQLException {
return "TDengine";
}
public String getDatabaseProductVersion() throws SQLException {
return "2.0.x.x";
}
public String getDriverName() throws SQLException {
return TSDBDriver.class.getName();
}
public String getDriverVersion() throws SQLException {
return "2.0.x";
}
public int getDriverMajorVersion() {
return 2;
}
public int getDriverMinorVersion() {
return 0;
}
public boolean usesLocalFiles() throws SQLException {
return false;
}
public boolean usesLocalFilePerTable() throws SQLException {
return false;
}
public boolean supportsMixedCaseIdentifiers() throws SQLException {
//像databasetable这些对象的标识符在存储时是否采用大小写混合的模式
return false;
}
public boolean storesUpperCaseIdentifiers() throws SQLException {
return false;
}
public boolean storesLowerCaseIdentifiers() throws SQLException {
return true;
}
public boolean storesMixedCaseIdentifiers() throws SQLException {
return false;
}
public boolean supportsMixedCaseQuotedIdentifiers() throws SQLException {
//像databasetable这些对象的标识符在存储时是否采用大小写混合并带引号的模式
return false;
}
public boolean storesUpperCaseQuotedIdentifiers() throws SQLException {
return false;
}
public boolean storesLowerCaseQuotedIdentifiers() throws SQLException {
return false;
}
public boolean storesMixedCaseQuotedIdentifiers() throws SQLException {
return false;
}
public String getIdentifierQuoteString() throws SQLException {
return " ";
}
public String getSQLKeywords() throws SQLException {
return null;
}
public String getNumericFunctions() throws SQLException {
return null;
}
public String getStringFunctions() throws SQLException {
return null;
}
public String getSystemFunctions() throws SQLException {
return null;
}
public String getTimeDateFunctions() throws SQLException {
return null;
}
public String getSearchStringEscape() throws SQLException {
return null;
}
public String getExtraNameCharacters() throws SQLException {
return null;
}
public boolean supportsAlterTableWithAddColumn() throws SQLException {
return true;
}
public boolean supportsAlterTableWithDropColumn() throws SQLException {
return true;
}
public boolean supportsColumnAliasing() throws SQLException {
return true;
}
public boolean nullPlusNonNullIsNull() throws SQLException {
// null + non-null != null
return false;
}
public boolean supportsConvert() throws SQLException {
// 是否支持转换函数convert
return false;
}
public boolean supportsConvert(int fromType, int toType) throws SQLException {
return false;
}
public boolean supportsTableCorrelationNames() throws SQLException {
return false;
}
public boolean supportsDifferentTableCorrelationNames() throws SQLException {
return false;
}
public boolean supportsExpressionsInOrderBy() throws SQLException {
return false;
}
public boolean supportsOrderByUnrelated() throws SQLException {
return false;
}
public boolean supportsGroupBy() throws SQLException {
return true;
}
public boolean supportsGroupByUnrelated() throws SQLException {
return false;
}
public boolean supportsGroupByBeyondSelect() throws SQLException {
return false;
}
public boolean supportsLikeEscapeClause() throws SQLException {
return false;
}
public boolean supportsMultipleResultSets() throws SQLException {
return false;
}
public boolean supportsMultipleTransactions() throws SQLException {
return false;
}
public boolean supportsNonNullableColumns() throws SQLException {
return false;
}
public boolean supportsMinimumSQLGrammar() throws SQLException {
return false;
}
public boolean supportsCoreSQLGrammar() throws SQLException {
return false;
}
public boolean supportsExtendedSQLGrammar() throws SQLException {
return false;
}
public boolean supportsANSI92EntryLevelSQL() throws SQLException {
return false;
}
public boolean supportsANSI92IntermediateSQL() throws SQLException {
return false;
}
public boolean supportsANSI92FullSQL() throws SQLException {
return false;
}
public boolean supportsIntegrityEnhancementFacility() throws SQLException {
return false;
}
public boolean supportsOuterJoins() throws SQLException {
return false;
}
public boolean supportsFullOuterJoins() throws SQLException {
return false;
}
public boolean supportsLimitedOuterJoins() throws SQLException {
return false;
}
public String getSchemaTerm() throws SQLException {
return null;
}
public String getProcedureTerm() throws SQLException {
return null;
}
public String getCatalogTerm() throws SQLException {
return "database";
}
public boolean isCatalogAtStart() throws SQLException {
return true;
}
public String getCatalogSeparator() throws SQLException {
return ".";
}
public boolean supportsSchemasInDataManipulation() throws SQLException {
return false;
}
public boolean supportsSchemasInProcedureCalls() throws SQLException {
return false;
}
public boolean supportsSchemasInTableDefinitions() throws SQLException {
return false;
}
public boolean supportsSchemasInIndexDefinitions() throws SQLException {
return false;
}
public boolean supportsSchemasInPrivilegeDefinitions() throws SQLException {
return false;
}
public boolean supportsCatalogsInDataManipulation() throws SQLException {
return true;
}
public boolean supportsCatalogsInProcedureCalls() throws SQLException {
return false;
}
public boolean supportsCatalogsInTableDefinitions() throws SQLException {
return false;
}
public boolean supportsCatalogsInIndexDefinitions() throws SQLException {
return false;
}
public boolean supportsCatalogsInPrivilegeDefinitions() throws SQLException {
return false;
}
public boolean supportsPositionedDelete() throws SQLException {
return false;
}
public boolean supportsPositionedUpdate() throws SQLException {
return false;
}
public boolean supportsSelectForUpdate() throws SQLException {
return false;
}
public boolean supportsStoredProcedures() throws SQLException {
return false;
}
public boolean supportsSubqueriesInComparisons() throws SQLException {
return false;
}
public boolean supportsSubqueriesInExists() throws SQLException {
return false;
}
public boolean supportsSubqueriesInIns() throws SQLException {
return false;
}
public boolean supportsSubqueriesInQuantifieds() throws SQLException {
return false;
}
public boolean supportsCorrelatedSubqueries() throws SQLException {
return false;
}
public boolean supportsUnion() throws SQLException {
return false;
}
public boolean supportsUnionAll() throws SQLException {
return false;
}
public boolean supportsOpenCursorsAcrossCommit() throws SQLException {
return false;
}
public boolean supportsOpenCursorsAcrossRollback() throws SQLException {
return false;
}
public boolean supportsOpenStatementsAcrossCommit() throws SQLException {
return false;
}
public boolean supportsOpenStatementsAcrossRollback() throws SQLException {
return false;
}
public int getMaxBinaryLiteralLength() throws SQLException {
return 0;
}
public int getMaxCharLiteralLength() throws SQLException {
return 0;
}
public int getMaxColumnNameLength() throws SQLException {
return 0;
}
public int getMaxColumnsInGroupBy() throws SQLException {
return 0;
}
public int getMaxColumnsInIndex() throws SQLException {
return 0;
}
public int getMaxColumnsInOrderBy() throws SQLException {
return 0;
}
public int getMaxColumnsInSelect() throws SQLException {
return 0;
}
public int getMaxColumnsInTable() throws SQLException {
return 0;
}
public int getMaxConnections() throws SQLException {
return 0;
}
public int getMaxCursorNameLength() throws SQLException {
return 0;
}
public int getMaxIndexLength() throws SQLException {
return 0;
}
public int getMaxSchemaNameLength() throws SQLException {
return 0;
}
public int getMaxProcedureNameLength() throws SQLException {
return 0;
}
public int getMaxCatalogNameLength() throws SQLException {
return 0;
}
public int getMaxRowSize() throws SQLException {
return 0;
}
public boolean doesMaxRowSizeIncludeBlobs() throws SQLException {
return false;
}
public int getMaxStatementLength() throws SQLException {
return 0;
}
public int getMaxStatements() throws SQLException {
return 0;
}
public int getMaxTableNameLength() throws SQLException {
return 0;
}
public int getMaxTablesInSelect() throws SQLException {
return 0;
}
public int getMaxUserNameLength() throws SQLException {
return 0;
}
public int getDefaultTransactionIsolation() throws SQLException {
return Connection.TRANSACTION_NONE;
}
public boolean supportsTransactions() throws SQLException {
return false;
}
public boolean supportsTransactionIsolationLevel(int level) throws SQLException {
if (level == Connection.TRANSACTION_NONE)
return true;
return false;
}
public boolean supportsDataDefinitionAndDataManipulationTransactions() throws SQLException {
return false;
}
public boolean supportsDataManipulationTransactionsOnly() throws SQLException {
return false;
}
public boolean dataDefinitionCausesTransactionCommit() throws SQLException {
return false;
}
public boolean dataDefinitionIgnoredInTransactions() throws SQLException {
return false;
}
public ResultSet getProcedures(String catalog, String schemaPattern, String procedureNamePattern)
throws SQLException {
return null;
}
public ResultSet getProcedureColumns(String catalog, String schemaPattern, String procedureNamePattern,
String columnNamePattern) throws SQLException {
return null;
}
/**
* @Param catalog : database名称"" 表示不属于任何database的tablenull表示不使用database来缩小范围
* @Param schemaPattern : schema名称""表示
* @Param tableNamePattern : 表名满足tableNamePattern的表, null表示返回所有表
* @Param types : 表类型null表示返回所有类型
*/
public ResultSet getTables(String catalog, String schemaPattern, String tableNamePattern, String[] types) throws SQLException {
if (conn == null || conn.isClosed()) {
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
}
try (Statement stmt = conn.createStatement()) {
if (catalog == null || catalog.isEmpty())
return null;
stmt.executeUpdate("use " + catalog);
ResultSet resultSet0 = stmt.executeQuery("show tables");
GetTablesResultSet getTablesResultSet = new GetTablesResultSet(resultSet0, catalog, schemaPattern, tableNamePattern, types);
return getTablesResultSet;
}
return super.getTables(catalog, schemaPattern, tableNamePattern, types, conn);
}
public ResultSet getSchemas() throws SQLException {
return getEmptyResultSet();
}
public ResultSet getCatalogs() throws SQLException {
if (conn == null || conn.isClosed())
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery("show databases");
return new CatalogResultSet(rs);
}
return super.getCatalogs(conn);
}
public ResultSet getTableTypes() throws SQLException {
DatabaseMetaDataResultSet resultSet = new DatabaseMetaDataResultSet();
// set up ColumnMetaDataList
List<ColumnMetaData> columnMetaDataList = new ArrayList<>(1);
ColumnMetaData colMetaData = new ColumnMetaData();
colMetaData.setColIndex(0);
colMetaData.setColName("TABLE_TYPE");
colMetaData.setColSize(10);
colMetaData.setColType(TSDBConstants.TSDB_DATA_TYPE_BINARY);
columnMetaDataList.add(colMetaData);
// set up rowDataList
List<TSDBResultSetRowData> rowDataList = new ArrayList<>(2);
TSDBResultSetRowData rowData = new TSDBResultSetRowData();
rowData.setString(0, "TABLE");
rowDataList.add(rowData);
rowData = new TSDBResultSetRowData();
rowData.setString(0, "STABLE");
rowDataList.add(rowData);
resultSet.setColumnMetaDataList(columnMetaDataList);
resultSet.setRowDataList(rowDataList);
return resultSet;
}
public ResultSet getColumns(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern)
throws SQLException {
/** add by zyyang **********/
Statement stmt = null;
if (null != conn && !conn.isClosed()) {
stmt = conn.createStatement();
if (catalog == null || catalog.isEmpty())
return null;
stmt.executeUpdate("use " + catalog);
DatabaseMetaDataResultSet resultSet = new DatabaseMetaDataResultSet();
// set up ColumnMetaDataList
List<ColumnMetaData> columnMetaDataList = new ArrayList<>(24);
columnMetaDataList.add(null);
columnMetaDataList.add(null);
// add TABLE_NAME
ColumnMetaData colMetaData = new ColumnMetaData();
colMetaData.setColIndex(3);
colMetaData.setColName("TABLE_NAME");
colMetaData.setColSize(193);
colMetaData.setColType(TSDBConstants.TSDB_DATA_TYPE_BINARY);
columnMetaDataList.add(colMetaData);
// add COLUMN_NAME
colMetaData = new ColumnMetaData();
colMetaData.setColIndex(4);
colMetaData.setColName("COLUMN_NAME");
colMetaData.setColSize(65);
colMetaData.setColType(TSDBConstants.TSDB_DATA_TYPE_BINARY);
columnMetaDataList.add(colMetaData);
// add DATA_TYPE
colMetaData = new ColumnMetaData();
colMetaData.setColIndex(5);
colMetaData.setColName("DATA_TYPE");
colMetaData.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
columnMetaDataList.add(colMetaData);
// add TYPE_NAME
colMetaData = new ColumnMetaData();
colMetaData.setColIndex(6);
colMetaData.setColName("TYPE_NAME");
colMetaData.setColType(TSDBConstants.TSDB_DATA_TYPE_BINARY);
columnMetaDataList.add(colMetaData);
// add COLUMN_SIZE
colMetaData = new ColumnMetaData();
colMetaData.setColIndex(7);
colMetaData.setColName("COLUMN_SIZE");
colMetaData.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
columnMetaDataList.add(colMetaData);
// add BUFFER_LENGTH ,not used
columnMetaDataList.add(null);
// add DECIMAL_DIGITS
colMetaData = new ColumnMetaData();
colMetaData.setColIndex(9);
colMetaData.setColName("DECIMAL_DIGITS");
colMetaData.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
columnMetaDataList.add(colMetaData);
// add NUM_PREC_RADIX
colMetaData = new ColumnMetaData();
colMetaData.setColIndex(10);
colMetaData.setColName("NUM_PREC_RADIX");
colMetaData.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
columnMetaDataList.add(colMetaData);
// add NULLABLE
colMetaData = new ColumnMetaData();
colMetaData.setColIndex(11);
colMetaData.setColName("NULLABLE");
colMetaData.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
columnMetaDataList.add(colMetaData);
resultSet.setColumnMetaDataList(columnMetaDataList);
// set up rowDataList
ResultSet resultSet0 = stmt.executeQuery("describe " + tableNamePattern);
List<TSDBResultSetRowData> rowDataList = new ArrayList<>();
int index = 0;
while (resultSet0.next()) {
TSDBResultSetRowData rowData = new TSDBResultSetRowData(24);
// set TABLE_NAME
rowData.setString(2, tableNamePattern);
// set COLUMN_NAME
rowData.setString(3, resultSet0.getString(1));
// set DATA_TYPE
String typeName = resultSet0.getString(2);
rowData.setInt(4, getDataType(typeName));
// set TYPE_NAME
rowData.setString(5, typeName);
// set COLUMN_SIZE
int length = resultSet0.getInt(3);
rowData.setInt(6, getColumnSize(typeName, length));
// set DECIMAL_DIGITS
rowData.setInt(8, getDecimalDigits(typeName));
// set NUM_PREC_RADIX
rowData.setInt(9, 10);
// set NULLABLE
rowData.setInt(10, getNullable(index, typeName));
rowDataList.add(rowData);
index++;
}
resultSet.setRowDataList(rowDataList);
// GetColumnsResultSet getColumnsResultSet = new GetColumnsResultSet(resultSet0, catalog, schemaPattern, tableNamePattern, columnNamePattern);
// return getColumnsResultSet;
// DatabaseMetaDataResultSet resultSet = new DatabaseMetaDataResultSet();
return resultSet;
} else {
if (conn == null || conn.isClosed())
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
}
/*************************/
// return getEmptyResultSet();
return super.getTableTypes();
}
private int getNullable(int index, String typeName) {
if (index == 0 && "TIMESTAMP".equals(typeName))
return DatabaseMetaData.columnNoNulls;
return DatabaseMetaData.columnNullable;
}
private int getColumnSize(String typeName, int length) {
switch (typeName) {
case "TIMESTAMP":
return 23;
default:
return 0;
}
}
private int getDecimalDigits(String typeName) {
switch (typeName) {
case "FLOAT":
return 5;
case "DOUBLE":
return 9;
default:
return 0;
}
}
private int getDataType(String typeName) {
switch (typeName) {
case "TIMESTAMP":
return Types.TIMESTAMP;
case "INT":
return Types.INTEGER;
case "BIGINT":
return Types.BIGINT;
case "FLOAT":
return Types.FLOAT;
case "DOUBLE":
return Types.DOUBLE;
case "BINARY":
return Types.BINARY;
case "SMALLINT":
return Types.SMALLINT;
case "TINYINT":
return Types.TINYINT;
case "BOOL":
return Types.BOOLEAN;
case "NCHAR":
return Types.NCHAR;
default:
return Types.NULL;
}
}
public ResultSet getColumnPrivileges(String catalog, String schema, String table, String columnNamePattern)
throws SQLException {
return getEmptyResultSet();
}
public ResultSet getTablePrivileges(String catalog, String schemaPattern, String tableNamePattern)
throws SQLException {
return getEmptyResultSet();
}
public ResultSet getBestRowIdentifier(String catalog, String schema, String table, int scope, boolean nullable)
throws SQLException {
return getEmptyResultSet();
}
public ResultSet getVersionColumns(String catalog, String schema, String table) throws SQLException {
return getEmptyResultSet();
public ResultSet getColumns(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern) throws SQLException {
if (conn == null || conn.isClosed())
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
return super.getColumns(catalog, schemaPattern, tableNamePattern, columnNamePattern, conn);
}
public ResultSet getPrimaryKeys(String catalog, String schema, String table) throws SQLException {
return getEmptyResultSet();
}
public ResultSet getImportedKeys(String catalog, String schema, String table) throws SQLException {
return getEmptyResultSet();
}
public ResultSet getExportedKeys(String catalog, String schema, String table) throws SQLException {
return getEmptyResultSet();
}
public ResultSet getCrossReference(String parentCatalog, String parentSchema, String parentTable,
String foreignCatalog, String foreignSchema, String foreignTable) throws SQLException {
return getEmptyResultSet();
}
public ResultSet getTypeInfo() throws SQLException {
return getEmptyResultSet();
}
public ResultSet getIndexInfo(String catalog, String schema, String table, boolean unique, boolean approximate)
throws SQLException {
return getEmptyResultSet();
}
public boolean supportsResultSetType(int type) throws SQLException {
return false;
}
public boolean supportsResultSetConcurrency(int type, int concurrency) throws SQLException {
return false;
}
public boolean ownUpdatesAreVisible(int type) throws SQLException {
return false;
}
public boolean ownDeletesAreVisible(int type) throws SQLException {
return false;
}
public boolean ownInsertsAreVisible(int type) throws SQLException {
return false;
}
public boolean othersUpdatesAreVisible(int type) throws SQLException {
return false;
}
public boolean othersDeletesAreVisible(int type) throws SQLException {
return false;
}
public boolean othersInsertsAreVisible(int type) throws SQLException {
return false;
}
public boolean updatesAreDetected(int type) throws SQLException {
return false;
}
public boolean deletesAreDetected(int type) throws SQLException {
return false;
}
public boolean insertsAreDetected(int type) throws SQLException {
return false;
}
public boolean supportsBatchUpdates() throws SQLException {
return false;
}
public ResultSet getUDTs(String catalog, String schemaPattern, String typeNamePattern, int[] types)
throws SQLException {
return getEmptyResultSet();
}
public Connection getConnection() throws SQLException {
return this.conn;
}
public boolean supportsSavepoints() throws SQLException {
return false;
}
public boolean supportsNamedParameters() throws SQLException {
return false;
}
public boolean supportsMultipleOpenResults() throws SQLException {
return false;
}
public boolean supportsGetGeneratedKeys() throws SQLException {
return false;
}
public ResultSet getSuperTypes(String catalog, String schemaPattern, String typeNamePattern) throws SQLException {
return getEmptyResultSet();
if (conn == null || conn.isClosed())
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
return super.getPrimaryKeys(catalog, schema, table, conn);
}
public ResultSet getSuperTables(String catalog, String schemaPattern, String tableNamePattern) throws SQLException {
return getEmptyResultSet();
if (conn == null || conn.isClosed())
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
return super.getSuperTables(catalog, schemaPattern, tableNamePattern, conn);
}
public ResultSet getAttributes(String catalog, String schemaPattern, String typeNamePattern,
String attributeNamePattern) throws SQLException {
return getEmptyResultSet();
}
public boolean supportsResultSetHoldability(int holdability) throws SQLException {
if (holdability == ResultSet.HOLD_CURSORS_OVER_COMMIT)
return true;
return false;
}
public int getResultSetHoldability() throws SQLException {
return ResultSet.HOLD_CURSORS_OVER_COMMIT;
}
public int getDatabaseMajorVersion() throws SQLException {
return 2;
}
public int getDatabaseMinorVersion() throws SQLException {
return 0;
}
public int getJDBCMajorVersion() throws SQLException {
return 2;
}
public int getJDBCMinorVersion() throws SQLException {
return 0;
}
public int getSQLStateType() throws SQLException {
return 0;
}
public boolean locatorsUpdateCopy() throws SQLException {
return false;
}
public boolean supportsStatementPooling() throws SQLException {
return false;
}
public RowIdLifetime getRowIdLifetime() throws SQLException {
return null;
}
public ResultSet getSchemas(String catalog, String schemaPattern) throws SQLException {
return null;
}
public boolean supportsStoredFunctionsUsingCallSyntax() throws SQLException {
return false;
}
public boolean autoCommitFailureClosesAllResultSets() throws SQLException {
return false;
}
public ResultSet getClientInfoProperties() throws SQLException {
return getEmptyResultSet();
}
public ResultSet getFunctions(String catalog, String schemaPattern, String functionNamePattern)
throws SQLException {
return getEmptyResultSet();
}
public ResultSet getFunctionColumns(String catalog, String schemaPattern, String functionNamePattern,
String columnNamePattern) throws SQLException {
return getEmptyResultSet();
}
public ResultSet getPseudoColumns(String catalog, String schemaPattern, String tableNamePattern,
String columnNamePattern) throws SQLException {
return getEmptyResultSet();
}
public boolean generatedKeyAlwaysReturned() throws SQLException {
return false;
}
private ResultSet getEmptyResultSet() {
return new EmptyResultSet();
}
}

View File

@ -1,75 +0,0 @@
/***************************************************************************
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*****************************************************************************/
package com.taosdata.jdbc;
import java.sql.ParameterMetaData;
import java.sql.SQLException;
public class TSDBParameterMetaData implements ParameterMetaData {
@Override
public int getParameterCount() throws SQLException {
return 0;
}
@Override
public int isNullable(int param) throws SQLException {
return 0;
}
@Override
public boolean isSigned(int param) throws SQLException {
return false;
}
@Override
public int getPrecision(int param) throws SQLException {
return 0;
}
@Override
public int getScale(int param) throws SQLException {
return 0;
}
@Override
public int getParameterType(int param) throws SQLException {
return 0;
}
@Override
public String getParameterTypeName(int param) throws SQLException {
return null;
}
@Override
public String getParameterClassName(int param) throws SQLException {
return null;
}
@Override
public int getParameterMode(int param) throws SQLException {
return 0;
}
@Override
public <T> T unwrap(Class<T> iface) throws SQLException {
return null;
}
@Override
public boolean isWrapperFor(Class<?> iface) throws SQLException {
return false;
}
}

View File

@ -100,7 +100,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
* order to process those supported SQLs.
*/
private void preprocessSql() {
/***** For processing some of Spark SQLs*****/
// should replace it first
this.rawSql = this.rawSql.replaceAll("or (.*) is null", "");
@ -149,7 +148,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
rawSql = rawSql.replace(matcher.group(1), tableFullName);
}
/***** for inner queries *****/
}
/**
@ -196,7 +194,7 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
@Override
public void setNull(int parameterIndex, int sqlType) throws SQLException {
setObject(parameterIndex, new String("NULL"));
setObject(parameterIndex, "NULL");
}
@Override

View File

@ -26,9 +26,9 @@ public class TSDBResultSetRowData {
public TSDBResultSetRowData(int colSize) {
this.setColSize(colSize);
}
public TSDBResultSetRowData() {
this.data = new ArrayList<Object>();
this.data = new ArrayList<>();
this.setColSize(0);
}
@ -39,7 +39,7 @@ public class TSDBResultSetRowData {
if (this.colSize == 0) {
return;
}
this.data = new ArrayList<Object>(colSize);
this.data = new ArrayList<>(colSize);
this.data.addAll(Collections.nCopies(this.colSize, null));
}
@ -53,7 +53,7 @@ public class TSDBResultSetRowData {
public boolean getBoolean(int col, int srcType) throws SQLException {
Object obj = data.get(col);
switch(srcType) {
case TSDBConstants.TSDB_DATA_TYPE_BOOL: return (Boolean) obj;
case TSDBConstants.TSDB_DATA_TYPE_FLOAT: return ((Float) obj) == 1.0? Boolean.TRUE:Boolean.FALSE;
@ -64,10 +64,10 @@ public class TSDBResultSetRowData {
case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP:
case TSDBConstants.TSDB_DATA_TYPE_BIGINT: return ((Long) obj) == 1L? Boolean.TRUE:Boolean.FALSE;
}
return Boolean.TRUE;
}
public void setByte(int col, byte value) {
data.set(col, value);
}
@ -82,20 +82,20 @@ public class TSDBResultSetRowData {
public int getInt(int col, int srcType) throws SQLException {
Object obj = data.get(col);
switch(srcType) {
case TSDBConstants.TSDB_DATA_TYPE_BOOL: return Boolean.TRUE.equals(obj)? 1:0;
case TSDBConstants.TSDB_DATA_TYPE_FLOAT: return ((Float) obj).intValue();
case TSDBConstants.TSDB_DATA_TYPE_DOUBLE: return ((Double)obj).intValue();
case TSDBConstants.TSDB_DATA_TYPE_TINYINT: return (Byte) obj;
case TSDBConstants.TSDB_DATA_TYPE_SMALLINT:return (Short) obj;
case TSDBConstants.TSDB_DATA_TYPE_INT: return (Integer) obj;
case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP:
case TSDBConstants.TSDB_DATA_TYPE_BIGINT: return ((Long) obj).intValue();
case TSDBConstants.TSDB_DATA_TYPE_NCHAR:
case TSDBConstants.TSDB_DATA_TYPE_BINARY: return Integer.parseInt((String) obj);
case TSDBConstants.TSDB_DATA_TYPE_BOOL: return Boolean.TRUE.equals(obj)? 1:0;
case TSDBConstants.TSDB_DATA_TYPE_FLOAT: return ((Float) obj).intValue();
case TSDBConstants.TSDB_DATA_TYPE_DOUBLE: return ((Double)obj).intValue();
case TSDBConstants.TSDB_DATA_TYPE_TINYINT: return (Byte) obj;
case TSDBConstants.TSDB_DATA_TYPE_SMALLINT:return (Short) obj;
case TSDBConstants.TSDB_DATA_TYPE_INT: return (Integer) obj;
case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP:
case TSDBConstants.TSDB_DATA_TYPE_BIGINT: return ((Long) obj).intValue();
case TSDBConstants.TSDB_DATA_TYPE_NCHAR:
case TSDBConstants.TSDB_DATA_TYPE_BINARY: return Integer.parseInt((String) obj);
}
return 0;
}
@ -105,7 +105,7 @@ public class TSDBResultSetRowData {
public long getLong(int col, int srcType) throws SQLException {
Object obj = data.get(col);
switch(srcType) {
case TSDBConstants.TSDB_DATA_TYPE_BOOL: return Boolean.TRUE.equals(obj)? 1:0;
case TSDBConstants.TSDB_DATA_TYPE_FLOAT: return ((Float) obj).longValue();
@ -116,9 +116,9 @@ public class TSDBResultSetRowData {
case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP:
case TSDBConstants.TSDB_DATA_TYPE_BIGINT: return (Long) obj;
case TSDBConstants.TSDB_DATA_TYPE_NCHAR:
case TSDBConstants.TSDB_DATA_TYPE_BINARY: return Long.parseLong((String) obj);
case TSDBConstants.TSDB_DATA_TYPE_BINARY: return Long.parseLong((String) obj);
}
return 0;
}
@ -128,7 +128,7 @@ public class TSDBResultSetRowData {
public float getFloat(int col, int srcType) throws SQLException {
Object obj = data.get(col);
switch(srcType) {
case TSDBConstants.TSDB_DATA_TYPE_BOOL: return Boolean.TRUE.equals(obj)? 1:0;
case TSDBConstants.TSDB_DATA_TYPE_FLOAT: return (Float) obj;
@ -139,7 +139,7 @@ public class TSDBResultSetRowData {
case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP:
case TSDBConstants.TSDB_DATA_TYPE_BIGINT: return (Long) obj;
}
return 0;
}
@ -149,7 +149,7 @@ public class TSDBResultSetRowData {
public double getDouble(int col, int srcType) throws SQLException {
Object obj = data.get(col);
switch(srcType) {
case TSDBConstants.TSDB_DATA_TYPE_BOOL: return Boolean.TRUE.equals(obj)? 1:0;
case TSDBConstants.TSDB_DATA_TYPE_FLOAT: return (Float) obj;
@ -160,7 +160,7 @@ public class TSDBResultSetRowData {
case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP:
case TSDBConstants.TSDB_DATA_TYPE_BIGINT: return (Long) obj;
}
return 0;
}
@ -180,7 +180,7 @@ public class TSDBResultSetRowData {
* The original type may not be a string type, but will be converted to by calling this method
* @param col column index
* @return
* @throws SQLException
* @throws SQLException
*/
public String getString(int col, int srcType) throws SQLException {
if (srcType == TSDBConstants.TSDB_DATA_TYPE_BINARY || srcType == TSDBConstants.TSDB_DATA_TYPE_NCHAR) {

View File

@ -52,12 +52,18 @@ public class TSDBStatement implements Statement {
this.isClosed = false;
}
@Override
public <T> T unwrap(Class<T> iface) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
try {
return iface.cast(this);
} catch (ClassCastException cce) {
throw new SQLException("Unable to unwrap to " + iface.toString());
}
}
@Override
public boolean isWrapperFor(Class<?> iface) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
return iface.isInstance(this);
}
public ResultSet executeQuery(String sql) throws SQLException {
@ -130,10 +136,15 @@ public class TSDBStatement implements Statement {
}
public void setMaxFieldSize(int max) throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.STATEMENT_CLOSED);
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
public int getMaxRows() throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.STATEMENT_CLOSED);
// always set maxRows to zero, meaning unlimitted rows in a resultSet
return 0;
}

View File

@ -29,177 +29,53 @@ public class RestfulDatabaseMetaData extends AbstractDatabaseMetaData {
}
@Override
public String getSchemaTerm() throws SQLException {
return null;
public String getDriverName() throws SQLException {
return RestfulDriver.class.getName();
}
@Override
public String getProcedureTerm() throws SQLException {
return null;
}
@Override
public String getCatalogTerm() throws SQLException {
return null;
}
@Override
public boolean isCatalogAtStart() throws SQLException {
return false;
}
@Override
public ResultSet getTables(String catalog, String schemaPattern, String tableNamePattern, String[] types) throws SQLException {
Statement stmt = null;
if (null != connection && !connection.isClosed()) {
stmt = connection.createStatement();
if (catalog == null || catalog.length() < 1) {
catalog = connection.getCatalog();
}
stmt.executeUpdate("use " + catalog);
ResultSet resultSet0 = stmt.executeQuery("show tables");
GetTablesResultSet getTablesResultSet = new GetTablesResultSet(resultSet0, catalog, schemaPattern, tableNamePattern, types);
return getTablesResultSet;
} else {
if (connection == null || connection.isClosed()) {
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
}
return super.getTables(catalog, schemaPattern, tableNamePattern, types, connection);
}
@Override
public ResultSet getCatalogs() throws SQLException {
if (connection != null && !connection.isClosed()) {
Statement stmt = connection.createStatement();
ResultSet resultSet0 = stmt.executeQuery("show databases");
CatalogResultSet resultSet = new CatalogResultSet(resultSet0);
return resultSet;
} else {
return new EmptyResultSet();
if (connection == null || connection.isClosed())
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
return super.getCatalogs(connection);
}
@Override
public ResultSet getTableTypes() throws SQLException {
if (connection == null || connection.isClosed()) {
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
}
return super.getTableTypes();
}
@Override
public ResultSet getColumns(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern) throws SQLException {
Statement stmt = null;
if (null != connection && !connection.isClosed()) {
stmt = connection.createStatement();
if (catalog == null || catalog.length() < 1) {
catalog = connection.getCatalog();
}
stmt.execute("use " + catalog);
DatabaseMetaDataResultSet resultSet = new DatabaseMetaDataResultSet();
// set up ColumnMetaDataList
List<ColumnMetaData> columnMetaDataList = new ArrayList<>(24);
columnMetaDataList.add(null);
columnMetaDataList.add(null);
// add TABLE_NAME
ColumnMetaData colMetaData = new ColumnMetaData();
colMetaData.setColIndex(3);
colMetaData.setColName("TABLE_NAME");
colMetaData.setColSize(193);
colMetaData.setColType(TSDBConstants.TSDB_DATA_TYPE_BINARY);
columnMetaDataList.add(colMetaData);
// add COLUMN_NAME
colMetaData = new ColumnMetaData();
colMetaData.setColIndex(4);
colMetaData.setColName("COLUMN_NAME");
colMetaData.setColSize(65);
colMetaData.setColType(TSDBConstants.TSDB_DATA_TYPE_BINARY);
columnMetaDataList.add(colMetaData);
// add DATA_TYPE
colMetaData = new ColumnMetaData();
colMetaData.setColIndex(5);
colMetaData.setColName("DATA_TYPE");
colMetaData.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
columnMetaDataList.add(colMetaData);
// add TYPE_NAME
colMetaData = new ColumnMetaData();
colMetaData.setColIndex(6);
colMetaData.setColName("TYPE_NAME");
colMetaData.setColType(TSDBConstants.TSDB_DATA_TYPE_BINARY);
columnMetaDataList.add(colMetaData);
// add COLUMN_SIZE
colMetaData = new ColumnMetaData();
colMetaData.setColIndex(7);
colMetaData.setColName("COLUMN_SIZE");
colMetaData.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
columnMetaDataList.add(colMetaData);
// add BUFFER_LENGTH ,not used
columnMetaDataList.add(null);
// add DECIMAL_DIGITS
colMetaData = new ColumnMetaData();
colMetaData.setColIndex(9);
colMetaData.setColName("DECIMAL_DIGITS");
colMetaData.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
columnMetaDataList.add(colMetaData);
// add NUM_PREC_RADIX
colMetaData = new ColumnMetaData();
colMetaData.setColIndex(10);
colMetaData.setColName("NUM_PREC_RADIX");
colMetaData.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
columnMetaDataList.add(colMetaData);
// add NULLABLE
colMetaData = new ColumnMetaData();
colMetaData.setColIndex(11);
colMetaData.setColName("NULLABLE");
colMetaData.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
columnMetaDataList.add(colMetaData);
resultSet.setColumnMetaDataList(columnMetaDataList);
// set up rowDataList
ResultSet resultSet0 = stmt.executeQuery("describe " + tableNamePattern);
List<TSDBResultSetRowData> rowDataList = new ArrayList<>();
int index = 0;
while (resultSet0.next()) {
TSDBResultSetRowData rowData = new TSDBResultSetRowData(24);
// set TABLE_NAME
rowData.setString(2, tableNamePattern);
// set COLUMN_NAME
rowData.setString(3, resultSet0.getString(1));
// set DATA_TYPE
String typeName = resultSet0.getString(2);
rowData.setInt(4, getDataType(typeName));
// set TYPE_NAME
rowData.setString(5, typeName);
// set COLUMN_SIZE
int length = resultSet0.getInt(3);
rowData.setInt(6, getColumnSize(typeName, length));
// set DECIMAL_DIGITS
rowData.setInt(8, getDecimalDigits(typeName));
// set NUM_PREC_RADIX
rowData.setInt(9, 10);
// set NULLABLE
rowData.setInt(10, getNullable(index, typeName));
rowDataList.add(rowData);
index++;
}
resultSet.setRowDataList(rowDataList);
return resultSet;
} else {
if (connection == null || connection.isClosed())
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
}
return super.getColumns(catalog, schemaPattern, tableNamePattern, columnNamePattern, connection);
}
@Override
public long getMaxLogicalLobSize() throws SQLException {
return 0;
public ResultSet getPrimaryKeys(String catalog, String schema, String table) throws SQLException {
if (connection == null || connection.isClosed())
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
return super.getPrimaryKeys(catalog, schema, table, connection);
}
@Override
public boolean supportsRefCursors() throws SQLException {
return false;
public ResultSet getSuperTables(String catalog, String schemaPattern, String tableNamePattern) throws SQLException {
if (connection == null || connection.isClosed())
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
return super.getSuperTables(catalog, schemaPattern, tableNamePattern, connection);
}
@Override
public <T> T unwrap(Class<T> iface) throws SQLException {
return null;
}
@Override
public boolean isWrapperFor(Class<?> iface) throws SQLException {
return false;
}
}

View File

@ -44,6 +44,8 @@ public class RestfulDriver extends AbstractTaosDriver {
String result = HttpClientPoolUtil.execute(loginUrl);
JSONObject jsonResult = JSON.parseObject(result);
String status = jsonResult.getString("status");
String token = jsonResult.getString("desc");
HttpClientPoolUtil.token = token;
if (!status.equals("succ")) {
throw new SQLException(jsonResult.getString("desc"));
}

View File

@ -15,28 +15,28 @@ import java.util.List;
import java.util.Map;
public class RestfulResultSet implements ResultSet {
private static final String RESULT_SET_IS_CLOSED = "resultSet is closed.";
private volatile boolean isClosed;
private int pos = -1;
private final String database;
private final Statement statement;
// data
private ArrayList<ArrayList<Object>> resultSet;
private ArrayList<ArrayList<Object>> resultSet = new ArrayList<>();
// meta
private ArrayList<String> columnNames;
private ArrayList<Field> columns;
private ArrayList<String> columnNames = new ArrayList<>();
private ArrayList<Field> columns = new ArrayList<>();
private RestfulResultSetMetaData metaData;
/**
* 由一个result的Json构造结果集对应执行show databases,show tables等这些语句返回结果集但无法获取结果集对应的meta统一当成String处理
* 由一个result的Json构造结果集对应执行show databases, show tables等这些语句返回结果集但无法获取结果集对应的meta统一当成String处理
*
* @param resultJson: 包含data信息的结果集有sql返回的结果集
***/
public RestfulResultSet(String database, Statement statement, JSONObject resultJson) {
this.database = database;
this.statement = statement;
// row data
JSONArray data = resultJson.getJSONArray("data");
resultSet = new ArrayList<>();
int columnIndex = 0;
for (; columnIndex < data.size(); columnIndex++) {
ArrayList oneRow = new ArrayList<>();
@ -48,15 +48,13 @@ public class RestfulResultSet implements ResultSet {
}
// column only names
columnNames = new ArrayList<>();
columns = new ArrayList<>();
JSONArray head = resultJson.getJSONArray("head");
for (int i = 0; i < head.size(); i++) {
String name = head.getString(i);
columnNames.add(name);
columns.add(new Field(name, "", 0, ""));
}
this.metaData = new RestfulResultSetMetaData(this.database, columns);
this.metaData = new RestfulResultSetMetaData(this.database, columns, this);
}
/**
@ -78,7 +76,7 @@ public class RestfulResultSet implements ResultSet {
}
}
this.columns = newColumns;
this.metaData = new RestfulResultSetMetaData(this.database, this.columns);
this.metaData = new RestfulResultSetMetaData(this.database, this.columns, this);
}
public Field findField(String columnName, List<JSONObject> fieldJsonList) {
@ -91,7 +89,6 @@ public class RestfulResultSet implements ResultSet {
}
}
}
return null;
}
@ -112,10 +109,9 @@ public class RestfulResultSet implements ResultSet {
@Override
public boolean next() throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED));
if (pos < resultSet.size() - 1) {
pos++;
throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED));
pos++;
if (pos <= resultSet.size() - 1) {
return true;
}
return false;
@ -131,37 +127,38 @@ public class RestfulResultSet implements ResultSet {
@Override
public boolean wasNull() throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED));
throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED));
return resultSet.isEmpty();
}
@Override
public String getString(int columnIndex) throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED));
throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED));
if (columnIndex > resultSet.get(pos).size()) {
throw new SQLException(TSDBConstants.WrapErrMsg("Column Index out of range, " + columnIndex + " > " + resultSet.get(pos).size()));
}
return resultSet.get(pos).get(columnIndex - 1).toString();
columnIndex = getTrueColumnIndex(columnIndex);
return resultSet.get(pos).get(columnIndex).toString();
}
@Override
public boolean getBoolean(int columnIndex) throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED));
throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED));
String result = getString(columnIndex);
if (!(result.equals("true") || result.equals("false"))) {
throw new SQLException("not boolean value");
}
return result.equals("true");
columnIndex = getTrueColumnIndex(columnIndex);
int result = getInt(columnIndex);
return result == 0 ? false : true;
}
@Override
public byte getByte(int columnIndex) throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED));
throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED));
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@ -169,40 +166,55 @@ public class RestfulResultSet implements ResultSet {
@Override
public short getShort(int columnIndex) throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED));
return Short.parseShort(getString(columnIndex));
throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED));
columnIndex = getTrueColumnIndex(columnIndex);
return Short.parseShort(resultSet.get(pos).get(columnIndex).toString());
}
@Override
public int getInt(int columnIndex) throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED));
return Integer.parseInt(getString(columnIndex));
throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED));
columnIndex = getTrueColumnIndex(columnIndex);
return Integer.parseInt(resultSet.get(pos).get(columnIndex).toString());
}
@Override
public long getLong(int columnIndex) throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED));
return Long.parseLong(getString(columnIndex));
throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED));
columnIndex = getTrueColumnIndex(columnIndex);
return Long.parseLong(resultSet.get(pos).get(columnIndex).toString());
}
@Override
public float getFloat(int columnIndex) throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED));
return Float.parseFloat(getString(columnIndex));
throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED));
columnIndex = getTrueColumnIndex(columnIndex);
return Float.parseFloat(resultSet.get(pos).get(columnIndex).toString());
}
@Override
public double getDouble(int columnIndex) throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED));
throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED));
return Double.parseDouble(getString(columnIndex));
columnIndex = getTrueColumnIndex(columnIndex);
return Double.parseDouble(resultSet.get(pos).get(columnIndex).toString());
}
private int getTrueColumnIndex(int columnIndex) throws SQLException {
if (columnIndex < 1) {
throw new SQLException("Column Index out of range, " + columnIndex + " < 1");
}
int numOfCols = resultSet.get(pos).size();
if (columnIndex > numOfCols) {
throw new SQLException("Column Index out of range, " + columnIndex + " > " + numOfCols);
}
return columnIndex - 1;
}
/*******************************************************************************************************************/
@ -210,7 +222,7 @@ public class RestfulResultSet implements ResultSet {
@Override
public BigDecimal getBigDecimal(int columnIndex, int scale) throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED));
throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED));
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@ -218,7 +230,7 @@ public class RestfulResultSet implements ResultSet {
@Override
public byte[] getBytes(int columnIndex) throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED));
throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED));
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@ -226,7 +238,7 @@ public class RestfulResultSet implements ResultSet {
@Override
public Date getDate(int columnIndex) throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED));
throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED));
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@ -234,7 +246,7 @@ public class RestfulResultSet implements ResultSet {
@Override
public Time getTime(int columnIndex) throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED));
throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED));
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@ -242,17 +254,18 @@ public class RestfulResultSet implements ResultSet {
@Override
public Timestamp getTimestamp(int columnIndex) throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED));
throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED));
String strDate = getString(columnIndex);
strDate = strDate.substring(1, strDate.length() - 1);
columnIndex = getTrueColumnIndex(columnIndex);
String strDate = resultSet.get(pos).get(columnIndex).toString();
// strDate = strDate.substring(1, strDate.length() - 1);
return Timestamp.valueOf(strDate);
}
@Override
public InputStream getAsciiStream(int columnIndex) throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED));
throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED));
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@ -260,7 +273,7 @@ public class RestfulResultSet implements ResultSet {
@Override
public InputStream getUnicodeStream(int columnIndex) throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED));
throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED));
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@ -268,17 +281,16 @@ public class RestfulResultSet implements ResultSet {
@Override
public InputStream getBinaryStream(int columnIndex) throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED));
throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED));
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
/*************************************************************************************************************/
@Override
public String getString(String columnLabel) throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED));
return getString(findColumn(columnLabel) + 1);
return getString(findColumn(columnLabel));
}
@Override
@ -338,53 +350,44 @@ public class RestfulResultSet implements ResultSet {
@Override
public Timestamp getTimestamp(String columnLabel) throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED));
return Timestamp.valueOf(getString(findColumn(columnLabel)));
return getTimestamp(findColumn(columnLabel));
}
@Override
public InputStream getAsciiStream(String columnLabel) throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED));
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
return getAsciiStream(findColumn(columnLabel));
}
@Override
public InputStream getUnicodeStream(String columnLabel) throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED));
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
return getUnicodeStream(findColumn(columnLabel));
}
@Override
public InputStream getBinaryStream(String columnLabel) throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED));
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
return getBinaryStream(findColumn(columnLabel));
}
/*************************************************************************************************************/
@Override
public SQLWarning getWarnings() throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED));
throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED));
return null;
}
@Override
public void clearWarnings() throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED));
throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED));
return;
}
@Override
public String getCursorName() throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED));
throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED));
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@ -392,7 +395,7 @@ public class RestfulResultSet implements ResultSet {
@Override
public ResultSetMetaData getMetaData() throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED));
throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED));
return this.metaData;
}
@ -400,7 +403,7 @@ public class RestfulResultSet implements ResultSet {
@Override
public Object getObject(int columnIndex) throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED));
throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED));
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@ -413,15 +416,18 @@ public class RestfulResultSet implements ResultSet {
@Override
public int findColumn(String columnLabel) throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED));
throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED));
return columnNames.indexOf(columnLabel);
int columnIndex = columnNames.indexOf(columnLabel);
if (columnIndex == -1)
throw new SQLException("cannot find Column in resultSet");
return columnIndex + 1;
}
@Override
public Reader getCharacterStream(int columnIndex) throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED));
throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED));
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@ -429,7 +435,7 @@ public class RestfulResultSet implements ResultSet {
@Override
public Reader getCharacterStream(String columnLabel) throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED));
throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED));
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@ -437,7 +443,7 @@ public class RestfulResultSet implements ResultSet {
@Override
public BigDecimal getBigDecimal(int columnIndex) throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED));
throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED));
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@ -445,7 +451,7 @@ public class RestfulResultSet implements ResultSet {
@Override
public BigDecimal getBigDecimal(String columnLabel) throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED));
throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED));
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@ -453,78 +459,132 @@ public class RestfulResultSet implements ResultSet {
@Override
public boolean isBeforeFirst() throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED));
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED));
return this.pos == -1 && this.resultSet.size() != 0;
}
@Override
public boolean isAfterLast() throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED));
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED));
return this.pos >= resultSet.size() && this.resultSet.size() != 0;
}
@Override
public boolean isFirst() throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED));
throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED));
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
return this.pos == 0;
}
@Override
public boolean isLast() throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED));
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED));
if (this.resultSet.size() == 0)
return false;
return this.pos == (this.resultSet.size() - 1);
}
@Override
public void beforeFirst() throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED));
throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED));
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
synchronized (this) {
if (this.resultSet.size() > 0) {
this.pos = -1;
}
}
}
@Override
public void afterLast() throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED));
throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED));
synchronized (this) {
if (this.resultSet.size() > 0) {
this.pos = this.resultSet.size();
}
}
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@Override
public boolean first() throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED));
throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED));
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
if (this.resultSet.size() == 0)
return false;
synchronized (this) {
this.pos = 0;
}
return true;
}
@Override
public boolean last() throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED));
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED));
if (this.resultSet.size() == 0)
return false;
synchronized (this) {
this.pos = this.resultSet.size() - 1;
}
return true;
}
@Override
public int getRow() throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED));
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED));
int row;
synchronized (this) {
if (this.pos < 0 || this.pos >= this.resultSet.size())
return 0;
row = this.pos + 1;
}
return row;
}
@Override
public boolean absolute(int row) throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED));
throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED));
// if (this.resultSet.size() == 0)
// return false;
//
// if (row == 0) {
// beforeFirst();
// return false;
// } else if (row == 1) {
// return first();
// } else if (row == -1) {
// return last();
// } else if (row > this.resultSet.size()) {
// afterLast();
// return false;
// } else {
// if (row < 0) {
// // adjust to reflect after end of result set
// int newRowPosition = this.resultSet.size() + row + 1;
// if (newRowPosition <= 0) {
// beforeFirst();
// return false;
// } else {
// return absolute(newRowPosition);
// }
// } else {
// row--; // adjust for index difference
// this.pos = row;
// return true;
// }
// }
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@ -532,7 +592,7 @@ public class RestfulResultSet implements ResultSet {
@Override
public boolean relative(int rows) throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED));
throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED));
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@ -540,7 +600,7 @@ public class RestfulResultSet implements ResultSet {
@Override
public boolean previous() throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED));
throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED));
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@ -548,17 +608,18 @@ public class RestfulResultSet implements ResultSet {
@Override
public void setFetchDirection(int direction) throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED));
if (direction != ResultSet.FETCH_REVERSE || direction != ResultSet.FETCH_REVERSE || direction != ResultSet.FETCH_UNKNOWN)
throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED));
if ((direction != ResultSet.FETCH_FORWARD) && (direction != ResultSet.FETCH_REVERSE) && (direction != ResultSet.FETCH_UNKNOWN))
throw new SQLException(TSDBConstants.INVALID_VARIABLES);
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
if (!(getType() == ResultSet.TYPE_FORWARD_ONLY && direction == ResultSet.FETCH_FORWARD))
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@Override
public int getFetchDirection() throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED));
throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED));
return ResultSet.FETCH_FORWARD;
}
@ -566,17 +627,17 @@ public class RestfulResultSet implements ResultSet {
@Override
public void setFetchSize(int rows) throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED));
throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED));
if (rows < 0)
throw new SQLException(TSDBConstants.INVALID_VARIABLES);
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@Override
public int getFetchSize() throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED));
throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED));
return this.resultSet.size();
}
@ -834,7 +895,7 @@ public class RestfulResultSet implements ResultSet {
@Override
public Statement getStatement() throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.WrapErrMsg(RESULT_SET_IS_CLOSED));
throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED));
return this.statement;
}
@ -992,14 +1053,15 @@ public class RestfulResultSet implements ResultSet {
@Override
public int getHoldability() throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED));
return ResultSet.HOLD_CURSORS_OVER_COMMIT;
}
@Override
public boolean isClosed() throws SQLException {
return false;
//TODO: SQLFeature Not Supported
// throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
return isClosed;
}
@Override
@ -1224,11 +1286,21 @@ public class RestfulResultSet implements ResultSet {
@Override
public <T> T unwrap(Class<T> iface) throws SQLException {
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
if (isClosed())
throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED));
try {
return iface.cast(this);
} catch (ClassCastException cce) {
throw new SQLException("Unable to unwrap to " + iface.toString());
}
}
@Override
public boolean isWrapperFor(Class<?> iface) throws SQLException {
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
if (isClosed())
throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.RESULT_SET_IS_CLOSED));
return iface.isInstance(this);
}
}

View File

@ -1,17 +1,22 @@
package com.taosdata.jdbc.rs;
import com.taosdata.jdbc.TSDBConstants;
import java.sql.ResultSetMetaData;
import java.sql.SQLException;
import java.sql.Timestamp;
import java.util.ArrayList;
public class RestfulResultSetMetaData implements ResultSetMetaData {
private final String database;
private ArrayList<RestfulResultSet.Field> fields;
private final RestfulResultSet resultSet;
public RestfulResultSetMetaData(String database, ArrayList<RestfulResultSet.Field> fields) {
public RestfulResultSetMetaData(String database, ArrayList<RestfulResultSet.Field> fields, RestfulResultSet resultSet) {
this.database = database;
this.fields = fields;
this.resultSet = resultSet;
}
@Override
@ -26,13 +31,12 @@ public class RestfulResultSetMetaData implements ResultSetMetaData {
@Override
public boolean isCaseSensitive(int column) throws SQLException {
//TODO
return false;
}
@Override
public boolean isSearchable(int column) throws SQLException {
return false;
return true;
}
@Override
@ -42,17 +46,30 @@ public class RestfulResultSetMetaData implements ResultSetMetaData {
@Override
public int isNullable(int column) throws SQLException {
if (column == 1)
return ResultSetMetaData.columnNoNulls;
return ResultSetMetaData.columnNullable;
}
@Override
public boolean isSigned(int column) throws SQLException {
return false;
String type = this.fields.get(column - 1).type.toUpperCase();
switch (type) {
case "TINYINT":
case "SMALLINT":
case "INT":
case "BIGINT":
case "FLOAT":
case "DOUBLE":
return true;
default:
return false;
}
}
@Override
public int getColumnDisplaySize(int column) throws SQLException {
return 0;
return this.fields.get(column - 1).length;
}
@Override
@ -62,27 +79,46 @@ public class RestfulResultSetMetaData implements ResultSetMetaData {
@Override
public String getColumnName(int column) throws SQLException {
return null;
return fields.get(column - 1).name;
}
@Override
public String getSchemaName(int column) throws SQLException {
return this.database;
return "";
}
@Override
public int getPrecision(int column) throws SQLException {
return 0;
String type = this.fields.get(column - 1).type.toUpperCase();
switch (type) {
case "FLOAT":
return 5;
case "DOUBLE":
return 9;
case "BINARY":
case "NCHAR":
return this.fields.get(column - 1).length;
default:
return 0;
}
}
@Override
public int getScale(int column) throws SQLException {
return 0;
String type = this.fields.get(column - 1).type.toUpperCase();
switch (type) {
case "FLOAT":
return 5;
case "DOUBLE":
return 9;
default:
return 0;
}
}
@Override
public String getTableName(int column) throws SQLException {
return null;
return "";
}
@Override
@ -92,17 +128,41 @@ public class RestfulResultSetMetaData implements ResultSetMetaData {
@Override
public int getColumnType(int column) throws SQLException {
return 0;
String type = this.fields.get(column - 1).type.toUpperCase();
switch (type) {
case "BOOL":
return java.sql.Types.BOOLEAN;
case "TINYINT":
return java.sql.Types.TINYINT;
case "SMALLINT":
return java.sql.Types.SMALLINT;
case "INT":
return java.sql.Types.INTEGER;
case "BIGINT":
return java.sql.Types.BIGINT;
case "FLOAT":
return java.sql.Types.FLOAT;
case "DOUBLE":
return java.sql.Types.DOUBLE;
case "BINARY":
return java.sql.Types.BINARY;
case "TIMESTAMP":
return java.sql.Types.TIMESTAMP;
case "NCHAR":
return java.sql.Types.NCHAR;
}
throw new SQLException(TSDBConstants.INVALID_VARIABLES);
}
@Override
public String getColumnTypeName(int column) throws SQLException {
return null;
String type = fields.get(column - 1).type;
return type.toUpperCase();
}
@Override
public boolean isReadOnly(int column) throws SQLException {
return false;
return true;
}
@Override
@ -117,16 +177,43 @@ public class RestfulResultSetMetaData implements ResultSetMetaData {
@Override
public String getColumnClassName(int column) throws SQLException {
return null;
String type = this.fields.get(column - 1).type;
String columnClassName = "";
switch (type) {
case "BOOL":
return Boolean.class.getName();
case "TINYINT":
case "SMALLINT":
return Short.class.getName();
case "INT":
return Integer.class.getName();
case "BIGINT":
return Long.class.getName();
case "FLOAT":
return Float.class.getName();
case "DOUBLE":
return Double.class.getName();
case "TIMESTAMP":
return Timestamp.class.getName();
case "BINARY":
case "NCHAR":
return String.class.getName();
}
return columnClassName;
}
@Override
public <T> T unwrap(Class<T> iface) throws SQLException {
return null;
try {
return iface.cast(this);
} catch (ClassCastException cce) {
throw new SQLException("Unable to unwrap to " + iface.toString());
}
}
@Override
public boolean isWrapperFor(Class<?> iface) throws SQLException {
return false;
return iface.isInstance(this);
}
}

View File

@ -14,7 +14,6 @@ import java.util.stream.Collectors;
public class RestfulStatement implements Statement {
private static final String STATEMENT_CLOSED = "Statement already closed.";
private boolean closed;
private String database;
private final RestfulConnection conn;
@ -65,17 +64,163 @@ public class RestfulStatement implements Statement {
public ResultSet executeQuery(String sql) throws SQLException {
if (isClosed())
throw new SQLException("statement already closed");
if (!SqlSyntaxValidator.isSelectSql(sql))
throw new SQLException("not a select sql for executeQuery: " + sql);
if (!SqlSyntaxValidator.isValidForExecuteQuery(sql))
throw new SQLException("not a valid sql for executeQuery: " + sql);
final String url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql";
if (SqlSyntaxValidator.isDatabaseUnspecifiedQuery(sql)) {
return executeOneQuery(url, sql);
}
if (this.database == null || this.database.isEmpty())
throw new SQLException("Database not specified or available");
HttpClientPoolUtil.execute(url, "use " + this.database);
return executeOneQuery(url, sql);
}
@Override
public int executeUpdate(String sql) throws SQLException {
if (isClosed())
throw new SQLException("statement already closed");
if (!SqlSyntaxValidator.isValidForExecuteUpdate(sql))
throw new SQLException("not a valid sql for executeUpdate: " + sql);
final String url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql";
if (SqlSyntaxValidator.isDatabaseUnspecifiedUpdate(sql)) {
return executeOneUpdate(url, sql);
}
if (this.database == null || this.database.isEmpty())
throw new SQLException("Database not specified or available");
HttpClientPoolUtil.execute(url, "use " + this.database);
return executeOneUpdate(url, sql);
}
@Override
public void close() throws SQLException {
synchronized (RestfulStatement.class) {
if (!isClosed())
this.closed = true;
}
}
@Override
public int getMaxFieldSize() throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.STATEMENT_CLOSED);
return TSDBConstants.maxFieldSize;
}
@Override
public void setMaxFieldSize(int max) throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.STATEMENT_CLOSED);
if (max < 0)
throw new SQLException(TSDBConstants.INVALID_VARIABLES);
// nothing to do
}
@Override
public int getMaxRows() throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.STATEMENT_CLOSED);
return 0;
}
@Override
public void setMaxRows(int max) throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.STATEMENT_CLOSED);
if (max < 0)
throw new SQLException(TSDBConstants.INVALID_VARIABLES);
// nothing to do
}
@Override
public void setEscapeProcessing(boolean enable) throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.STATEMENT_CLOSED);
}
@Override
public int getQueryTimeout() throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.STATEMENT_CLOSED);
return 0;
}
@Override
public void setQueryTimeout(int seconds) throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.STATEMENT_CLOSED);
if (seconds < 0)
throw new SQLException(TSDBConstants.INVALID_VARIABLES);
}
@Override
public void cancel() throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@Override
public SQLWarning getWarnings() throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.STATEMENT_CLOSED);
return null;
}
@Override
public void clearWarnings() throws SQLException {
// nothing to do
if (isClosed())
throw new SQLException(TSDBConstants.STATEMENT_CLOSED);
}
@Override
public void setCursorName(String name) throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.STATEMENT_CLOSED);
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@Override
public boolean execute(String sql) throws SQLException {
if (isClosed())
throw new SQLException("Invalid method call on a closed statement.");
if (!SqlSyntaxValidator.isValidForExecute(sql))
throw new SQLException("not a valid sql for execute: " + sql);
//如果执行了use操作应该将当前Statement的catalog设置为新的database
final String url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql";
if (SqlSyntaxValidator.isUseSql(sql)) {
HttpClientPoolUtil.execute(url, sql);
this.database = sql.trim().replace("use", "").trim();
this.conn.setCatalog(this.database);
} else if (SqlSyntaxValidator.isDatabaseUnspecifiedQuery(sql)) {
executeOneQuery(url, sql);
} else if (SqlSyntaxValidator.isDatabaseUnspecifiedUpdate(sql)) {
executeOneUpdate(url, sql);
} else {
if (SqlSyntaxValidator.isValidForExecuteQuery(sql)) {
executeQuery(sql);
} else {
executeUpdate(sql);
}
}
return true;
}
private ResultSet executeOneQuery(String url, String sql) throws SQLException {
if (!SqlSyntaxValidator.isValidForExecuteQuery(sql))
throw new SQLException("not a select sql for executeQuery: " + sql);
// row data
String result = HttpClientPoolUtil.execute(url, sql);
JSONObject resultJson = JSON.parseObject(result);
if (resultJson.getString("status").equals("error")) {
throw new SQLException(TSDBConstants.WrapErrMsg("SQL execution error: " + resultJson.getString("desc") + "\n" + "error code: " + resultJson.getString("code")));
}
// parse table name from sql
String[] tableIdentifiers = parseTableIdentifier(sql);
if (tableIdentifiers != null) {
@ -93,23 +238,14 @@ public class RestfulStatement implements Statement {
} else {
this.resultSet = new RestfulResultSet(database, this, resultJson);
}
this.affectedRows = 0;
return resultSet;
}
@Override
public int executeUpdate(String sql) throws SQLException {
if (isClosed())
throw new SQLException("statement already closed");
private int executeOneUpdate(String url, String sql) throws SQLException {
if (!SqlSyntaxValidator.isValidForExecuteUpdate(sql))
throw new SQLException("not a valid sql for executeUpdate: " + sql);
if (this.database == null)
throw new SQLException("Database not specified or available");
final String url = "http://" + conn.getHost().trim() + ":" + conn.getPort() + "/rest/sql";
// HttpClientPoolUtil.execute(url, "use " + conn.getDatabase());
String result = HttpClientPoolUtil.execute(url, sql);
JSONObject jsonObject = JSON.parseObject(result);
if (jsonObject.getString("status").equals("error")) {
@ -120,130 +256,10 @@ public class RestfulStatement implements Statement {
return this.affectedRows;
}
@Override
public void close() throws SQLException {
synchronized (RestfulStatement.class) {
if (!isClosed())
this.closed = true;
}
}
@Override
public int getMaxFieldSize() throws SQLException {
if (isClosed())
throw new SQLException(STATEMENT_CLOSED);
return TSDBConstants.maxFieldSize;
}
@Override
public void setMaxFieldSize(int max) throws SQLException {
if (isClosed())
throw new SQLException(STATEMENT_CLOSED);
if (max < 0)
throw new SQLException(TSDBConstants.INVALID_VARIABLES);
// nothing to do
}
@Override
public int getMaxRows() throws SQLException {
if (isClosed())
throw new SQLException(STATEMENT_CLOSED);
return 0;
}
@Override
public void setMaxRows(int max) throws SQLException {
if (isClosed())
throw new SQLException(STATEMENT_CLOSED);
if (max < 0)
throw new SQLException(TSDBConstants.INVALID_VARIABLES);
// nothing to do
}
@Override
public void setEscapeProcessing(boolean enable) throws SQLException {
if (isClosed())
throw new SQLException(RestfulStatement.STATEMENT_CLOSED);
}
@Override
public int getQueryTimeout() throws SQLException {
if (isClosed())
throw new SQLException(STATEMENT_CLOSED);
return 0;
}
@Override
public void setQueryTimeout(int seconds) throws SQLException {
if (isClosed())
throw new SQLException(STATEMENT_CLOSED);
if (seconds < 0)
throw new SQLException(TSDBConstants.INVALID_VARIABLES);
}
@Override
public void cancel() throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@Override
public SQLWarning getWarnings() throws SQLException {
if (isClosed())
throw new SQLException(STATEMENT_CLOSED);
return null;
}
@Override
public void clearWarnings() throws SQLException {
// nothing to do
if (isClosed())
throw new SQLException(STATEMENT_CLOSED);
}
@Override
public void setCursorName(String name) throws SQLException {
if (isClosed())
throw new SQLException(RestfulStatement.STATEMENT_CLOSED);
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@Override
public boolean execute(String sql) throws SQLException {
if (isClosed()) {
throw new SQLException("Invalid method call on a closed statement.");
}
//如果执行了use操作应该将当前Statement的catalog设置为新的database
if (SqlSyntaxValidator.isUseSql(sql)) {
this.database = sql.trim().replace("use", "").trim();
this.conn.setCatalog(this.database);
}
if (this.database == null)
throw new SQLException("Database not specified or available");
if (SqlSyntaxValidator.isSelectSql(sql)) {
executeQuery(sql);
} else if (SqlSyntaxValidator.isShowSql(sql) || SqlSyntaxValidator.isDescribeSql(sql)) {
final String url = "http://" + conn.getHost().trim() + ":" + conn.getPort() + "/rest/sql";
if (!SqlSyntaxValidator.isShowDatabaseSql(sql)) {
HttpClientPoolUtil.execute(url, "use " + conn.getDatabase());
}
String result = HttpClientPoolUtil.execute(url, sql);
JSONObject resultJson = JSON.parseObject(result);
if (resultJson.getString("status").equals("error")) {
throw new SQLException(TSDBConstants.WrapErrMsg("SQL execution error: " + resultJson.getString("desc") + "\n" + "error code: " + resultJson.getString("code")));
}
this.resultSet = new RestfulResultSet(database, this, resultJson);
} else {
executeUpdate(sql);
}
return true;
}
@Override
public ResultSet getResultSet() throws SQLException {
if (isClosed())
throw new SQLException(STATEMENT_CLOSED);
throw new SQLException(TSDBConstants.STATEMENT_CLOSED);
return resultSet;
}
@ -275,7 +291,7 @@ public class RestfulStatement implements Statement {
@Override
public void setFetchSize(int rows) throws SQLException {
if (isClosed())
throw new SQLException(STATEMENT_CLOSED);
throw new SQLException(TSDBConstants.STATEMENT_CLOSED);
if (rows < 0)
throw new SQLException(TSDBConstants.INVALID_VARIABLES);
//nothing to do
@ -284,28 +300,28 @@ public class RestfulStatement implements Statement {
@Override
public int getFetchSize() throws SQLException {
if (isClosed())
throw new SQLException(STATEMENT_CLOSED);
throw new SQLException(TSDBConstants.STATEMENT_CLOSED);
return 0;
}
@Override
public int getResultSetConcurrency() throws SQLException {
if (isClosed())
throw new SQLException(STATEMENT_CLOSED);
throw new SQLException(TSDBConstants.STATEMENT_CLOSED);
return this.resultSet.getConcurrency();
}
@Override
public int getResultSetType() throws SQLException {
if (isClosed())
throw new SQLException(STATEMENT_CLOSED);
throw new SQLException(TSDBConstants.STATEMENT_CLOSED);
return this.resultSet.getType();
}
@Override
public void addBatch(String sql) throws SQLException {
if (isClosed())
throw new SQLException(STATEMENT_CLOSED);
throw new SQLException(TSDBConstants.STATEMENT_CLOSED);
//TODO:
}
@ -323,14 +339,14 @@ public class RestfulStatement implements Statement {
@Override
public Connection getConnection() throws SQLException {
if (isClosed())
throw new SQLException(STATEMENT_CLOSED);
throw new SQLException(TSDBConstants.STATEMENT_CLOSED);
return this.conn;
}
@Override
public boolean getMoreResults(int current) throws SQLException {
if (isClosed())
throw new SQLException(STATEMENT_CLOSED);
throw new SQLException(TSDBConstants.STATEMENT_CLOSED);
if (resultSet == null)
return false;
@ -388,7 +404,7 @@ public class RestfulStatement implements Statement {
@Override
public int getResultSetHoldability() throws SQLException {
if (isClosed())
throw new SQLException(STATEMENT_CLOSED);
throw new SQLException(TSDBConstants.STATEMENT_CLOSED);
return this.resultSet.getHoldability();
}
@ -400,28 +416,28 @@ public class RestfulStatement implements Statement {
@Override
public void setPoolable(boolean poolable) throws SQLException {
if (isClosed())
throw new SQLException(STATEMENT_CLOSED);
throw new SQLException(TSDBConstants.STATEMENT_CLOSED);
//nothing to do
}
@Override
public boolean isPoolable() throws SQLException {
if (isClosed())
throw new SQLException(STATEMENT_CLOSED);
throw new SQLException(TSDBConstants.STATEMENT_CLOSED);
return false;
}
@Override
public void closeOnCompletion() throws SQLException {
if (isClosed())
throw new SQLException(STATEMENT_CLOSED);
throw new SQLException(TSDBConstants.STATEMENT_CLOSED);
this.closeOnCompletion = true;
}
@Override
public boolean isCloseOnCompletion() throws SQLException {
if (isClosed())
throw new SQLException(STATEMENT_CLOSED);
throw new SQLException(TSDBConstants.STATEMENT_CLOSED);
return this.closeOnCompletion;
}

View File

@ -23,6 +23,7 @@ import java.nio.charset.Charset;
public class HttpClientPoolUtil {
public static PoolingHttpClientConnectionManager cm = null;
public static CloseableHttpClient httpClient = null;
public static String token = "cm9vdDp0YW9zZGF0YQ==";
/**
* 默认content 类型
*/
@ -61,9 +62,7 @@ public class HttpClientPoolUtil {
try {
return Long.parseLong(value) * 1000;
} catch (Exception e) {
new Exception(
"format KeepAlive timeout exception, exception:" + e.toString())
.printStackTrace();
new Exception("format KeepAlive timeout exception, exception:" + e.toString()).printStackTrace();
}
}
}
@ -96,7 +95,7 @@ public class HttpClientPoolUtil {
initPools();
}
method = (HttpEntityEnclosingRequestBase) getRequest(uri, HttpPost.METHOD_NAME, DEFAULT_CONTENT_TYPE, 0);
method.setHeader("Authorization", "Basic cm9vdDp0YW9zZGF0YQ==");
method.setHeader("Authorization", "Taosd " + token);
method.setHeader("Content-Type", "text/plain");
method.setEntity(new StringEntity(data, Charset.forName("UTF-8")));
HttpContext context = HttpClientContext.create();

View File

@ -20,8 +20,11 @@ import java.sql.Connection;
public class SqlSyntaxValidator {
private static final String[] updateSQL = {"insert", "update", "delete", "create", "alter", "drop", "show", "describe", "use", "import"};
private static final String[] querySQL = {"select"};
private static final String[] SQL = {"select", "insert", "import", "create", "use", "alter", "drop", "set", "show", "describe"};
private static final String[] updateSQL = {"insert", "import", "create", "use", "alter", "drop", "set"};
private static final String[] querySQL = {"select", "show", "describe"};
private static final String[] databaseUnspecifiedShow = {"databases", "dnodes", "mnodes", "variables"};
private TSDBConnection tsdbConnection;
@ -37,8 +40,38 @@ public class SqlSyntaxValidator {
return false;
}
public static boolean isValidForExecuteQuery(String sql) {
for (String prefix : querySQL) {
if (sql.trim().toLowerCase().startsWith(prefix))
return true;
}
return false;
}
public static boolean isValidForExecute(String sql) {
for (String prefix : SQL) {
if (sql.trim().toLowerCase().startsWith(prefix))
return true;
}
return false;
}
public static boolean isDatabaseUnspecifiedQuery(String sql) {
for (String databaseObj : databaseUnspecifiedShow) {
if (sql.trim().toLowerCase().matches("show\\s+" + databaseObj + ".*"))
return true;
}
return false;
}
public static boolean isDatabaseUnspecifiedUpdate(String sql) {
sql = sql.trim().toLowerCase();
return sql.matches("create\\s+database.*") || sql.startsWith("set") || sql.matches("drop\\s+database.*");
}
public static boolean isUseSql(String sql) {
return sql.trim().toLowerCase().startsWith("use") || sql.trim().toLowerCase().matches("create\\s*database.*") || sql.toLowerCase().toLowerCase().matches("drop\\s*database.*");
return sql.trim().toLowerCase().startsWith("use");
// || sql.trim().toLowerCase().matches("create\\s*database.*") || sql.toLowerCase().toLowerCase().matches("drop\\s*database.*");
}
public static boolean isShowSql(String sql) {
@ -58,8 +91,9 @@ public class SqlSyntaxValidator {
return sql.trim().toLowerCase().startsWith("select");
}
public static boolean isShowDatabaseSql(String sql) {
return sql.trim().toLowerCase().matches("show\\s*databases");
}
}

View File

@ -1,36 +0,0 @@
import com.taosdata.jdbc.TSDBDriver;
import com.taosdata.jdbc.TSDBPreparedStatement;
import java.sql.*;
import java.util.Properties;
public class TestPreparedStatement {
public static void main(String[] args) throws SQLException {
Connection connection = null;
try {
Class.forName("com.taosdata.jdbc.TSDBDriver");
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, "localhost");
connection = DriverManager.getConnection("jdbc:TAOS://localhost:0/", properties);
String rawSql = "select * from test.log0601";
// String[] params = new String[]{"ts", "c1"};
PreparedStatement pstmt = (TSDBPreparedStatement) connection.prepareStatement(rawSql);
ResultSet resSet = pstmt.executeQuery();
while(resSet.next()) {
for (int i = 1; i <= resSet.getMetaData().getColumnCount(); i++) {
System.out.printf("%d: %s \n", i, resSet.getString(i));
}
}
resSet.close();
pstmt.close();
connection.close();
} catch (Exception e) {
e.printStackTrace();
if (null != connection) {
connection.close();
}
}
}
}

View File

@ -1,33 +0,0 @@
import com.taosdata.jdbc.TSDBDriver;
import java.sql.*;
import java.util.Properties;
public class TestTSDBDatabaseMetaData {
public static void main(String[] args) throws SQLException {
Connection connection = null;
DatabaseMetaData dbMetaData = null;
ResultSet resSet = null;
try {
Class.forName("com.taosdata.jdbc.TSDBDriver");
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, "localhost");
connection = DriverManager.getConnection("jdbc:TAOS://localhost:0/", properties);
dbMetaData = connection.getMetaData();
resSet = dbMetaData.getCatalogs();
while(resSet.next()) {
for (int i = 1; i <= resSet.getMetaData().getColumnCount(); i++) {
System.out.printf("dbMetaData.getCatalogs(%d) = %s\n", i, resSet.getString(i));
}
}
resSet.close();
} catch (Exception e) {
e.printStackTrace();
if (null != connection) {
connection.close();
}
}
}
}

View File

@ -1,33 +0,0 @@
package com.taosdata.jdbc;
import com.taosdata.jdbc.utils.TDNodes;
import org.junit.AfterClass;
import org.junit.BeforeClass;
public abstract class BaseTest {
private static boolean testCluster = false;
private static TDNodes nodes = new TDNodes();
@BeforeClass
public static void setupEnv() {
try {
if (nodes.getTDNode(1).getTaosdPid() != null) {
System.out.println("Kill taosd before running JDBC test");
nodes.getTDNode(1).setRunning(1);
nodes.stop(1);
}
nodes.setTestCluster(testCluster);
nodes.deploy(1);
nodes.start(1);
} catch (Exception e) {
e.printStackTrace();
}
}
@AfterClass
public static void cleanUpEnv() {
nodes.stop(1);
}
}

View File

@ -1,120 +0,0 @@
package com.taosdata.jdbc;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import java.sql.*;
import java.util.Properties;
import java.util.Random;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import static org.junit.Assert.assertEquals;
public class BatchInsertTest extends BaseTest {
static Connection connection = null;
static Statement statement = null;
static String dbName = "test";
static String stbName = "meters";
static String host = "localhost";
static int numOfTables = 30;
final static int numOfRecordsPerTable = 1000;
static long ts = 1496732686000l;
final static String tablePrefix = "t";
@Before
public void createDatabase() throws SQLException {
try {
Class.forName("com.taosdata.jdbc.TSDBDriver");
} catch (ClassNotFoundException e) {
return;
}
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, host);
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
statement = connection.createStatement();
statement.executeUpdate("drop database if exists " + dbName);
statement.executeUpdate("create database if not exists " + dbName);
statement.executeUpdate("use " + dbName);
String createTableSql = "create table " + stbName + "(ts timestamp, f1 int, f2 int, f3 int) tags(areaid int, loc binary(20))";
statement.executeUpdate(createTableSql);
for(int i = 0; i < numOfTables; i++) {
String loc = i % 2 == 0 ? "beijing" : "shanghai";
String createSubTalbesSql = "create table " + tablePrefix + i + " using " + stbName + " tags(" + i + ", '" + loc + "')";
statement.executeUpdate(createSubTalbesSql);
}
}
@Test
public void testBatchInsert() throws SQLException{
ExecutorService executorService = Executors.newFixedThreadPool(numOfTables);
for (int i = 0; i < numOfTables; i++) {
final int index = i;
executorService.execute(new Runnable() {
@Override
public void run() {
try {
long startTime = System.currentTimeMillis();
Statement statement = connection.createStatement(); // get statement
StringBuilder sb = new StringBuilder();
sb.append("INSERT INTO " + tablePrefix + index + " VALUES");
Random rand = new Random();
for (int j = 1; j <= numOfRecordsPerTable; j++) {
sb.append("(" + (ts + j) + ", ");
sb.append(rand.nextInt(100) + ", ");
sb.append(rand.nextInt(100) + ", ");
sb.append(rand.nextInt(100) + ")");
}
statement.addBatch(sb.toString());
statement.executeBatch();
long endTime = System.currentTimeMillis();
System.out.println("Thread " + index + " takes " + (endTime - startTime) + " microseconds");
connection.commit();
statement.close();
} catch (Exception e) {
e.printStackTrace();
}
}
});
}
executorService.shutdown();
try {
executorService.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
} catch (InterruptedException e) {
e.printStackTrace();
}
Statement statement = connection.createStatement();
ResultSet rs = statement.executeQuery("select * from meters");
int num = 0;
while (rs.next()) {
num++;
}
assertEquals(num, numOfTables * numOfRecordsPerTable);
rs.close();
}
@After
public void close() throws Exception {
statement.close();
connection.close();
Thread.sleep(10);
}
}

View File

@ -1,5 +1,6 @@
package com.taosdata.jdbc;
import org.junit.Assert;
import org.junit.Test;
import java.sql.Connection;
@ -8,53 +9,31 @@ import java.sql.SQLException;
import java.sql.Statement;
import java.util.Properties;
import static org.junit.Assert.assertTrue;
public class ConnectionTest extends BaseTest {
static Connection connection = null;
static Statement statement = null;
static String dbName = "test";
static String stbName = "st";
static String host = "localhost";
public class ConnectionTest {
private Connection connection;
private Statement statement;
private static String host = "127.0.0.1";
@Test
public void testConnection() throws SQLException {
try {
Class.forName("com.taosdata.jdbc.TSDBDriver");
} catch (ClassNotFoundException e) {
return;
}
public void testConnection() {
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, host);
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
assertTrue(null != connection);
statement = connection.createStatement();
assertTrue(null != statement);
// try reconnect
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
try {
statement.execute("create database if not exists " + dbName);
Class.forName("com.taosdata.jdbc.TSDBDriver");
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
Assert.assertTrue(null != connection);
statement = connection.createStatement();
Assert.assertTrue(null != statement);
statement.close();
connection.close();
} catch (ClassNotFoundException e) {
return;
} catch (SQLException e) {
assert false : "create database error: " + e.getMessage();
}
try {
if (!connection.isClosed()) {
if (!statement.isClosed()) {
statement.executeUpdate("drop database " + dbName);
statement.close();
}
connection.close();
Thread.sleep(10);
}
} catch (Exception e) {
assert false : "close connection error: " + e.getMessage();
e.printStackTrace();
}
}
}

View File

@ -1,92 +1,112 @@
package com.taosdata.jdbc;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.junit.*;
import org.junit.runners.MethodSorters;
import java.sql.*;
import java.util.Properties;
import static org.junit.Assert.assertEquals;
public class ImportTest extends BaseTest {
Connection connection = null;
Statement statement = null;
String dbName = "test";
String tName = "t0";
String host = "localhost";
@FixMethodOrder(MethodSorters.NAME_ASCENDING)
public class ImportTest {
private static Connection connection;
static String dbName = "test";
static String tName = "t0";
static String host = "127.0.0.1";
private static long ts;
@Before
public void createDatabase() throws SQLException {
@BeforeClass
public static void before() {
try {
Class.forName("com.taosdata.jdbc.TSDBDriver");
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
Statement stmt = connection.createStatement();
stmt.execute("create database if not exists " + dbName);
stmt.execute("create table if not exists " + dbName + "." + tName + " (ts timestamp, k int, v int)");
stmt.close();
ts = System.currentTimeMillis();
} catch (ClassNotFoundException e) {
return;
e.printStackTrace();
} catch (SQLException e) {
e.printStackTrace();
}
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, host);
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
statement = connection.createStatement();
statement.executeUpdate("drop database if exists " + dbName);
statement.executeUpdate("create database if not exists " + dbName);
statement.executeUpdate("create table if not exists " + dbName + "." + tName + " (ts timestamp, k int, v int)");
}
@Test
public void insertData() throws Exception {
long ts = 1496732686000l;
for (int i = 0; i < 50; i++) {
ts++;
int row = statement.executeUpdate("insert into " + dbName + "." + tName + " values (" + ts + ", " + (100 + i) + ", " + i + ")");
System.out.println("insert into " + dbName + "." + tName + " values (" + ts + ", " + (100 + i) + ", " + i + ")\t" + row);
assertEquals(1, row);
}
}
@Test
public void selectData() throws Exception {
insertData();
String sql = "select * from test.t0";
ResultSet resSet = statement.executeQuery(sql);
while (resSet.next()) {
for (int i = 1; i <= resSet.getMetaData().getColumnCount(); i++) {
System.out.printf(i + ": " + resSet.getString(i) + "\t");
public void case001_insertData() throws Exception {
try (Statement stmt = connection.createStatement()) {
for (int i = 0; i < 50; i++) {
ts++;
int row = stmt.executeUpdate("import into " + dbName + "." + tName + " values (" + ts + ", " + (100 + i) + ", " + i + ")");
System.out.println("import into " + dbName + "." + tName + " values (" + ts + ", " + (100 + i) + ", " + i + ")\t" + row);
assertEquals(1, row);
}
}
resSet.close();
}
@Test
public void importData() throws Exception {
// 避免时间重复
long ts = 1496732686000l;
StringBuilder sqlBuilder = new StringBuilder("insert into ").append(dbName).append(".").append(tName).append(" values ");
for (int i = 0; i < 50; i++) {
int a = i / 5;
long t = ts + a;
sqlBuilder.append("(").append(t).append(",").append((100 + i)).append(",").append(i).append(") ");
}
System.out.println(sqlBuilder.toString());
int rows = statement.executeUpdate(sqlBuilder.toString());
System.out.println(rows);
assertEquals(10, rows);
public void case002_checkSum() {
Assert.assertEquals(50, select());
}
@After
public void close() throws Exception {
statement.executeUpdate("drop database " + dbName);
statement.close();
connection.close();
Thread.sleep(10);
private int select() {
int count = 0;
try (Statement stmt = connection.createStatement()) {
String sql = "select * from " + dbName + "." + tName;
ResultSet rs = stmt.executeQuery(sql);
while (rs.next()) {
count++;
}
rs.close();
} catch (SQLException e) {
e.printStackTrace();
}
return count;
}
@Test
public void case003_importData() {
// 避免时间重复
try (Statement stmt = connection.createStatement()) {
StringBuilder sqlBuilder = new StringBuilder("import into ").append(dbName).append(".").append(tName).append(" values ");
for (int i = 0; i < 50; i++) {
int a = i / 5;
long t = (++ts) + a;
sqlBuilder.append("(").append(t).append(",").append((100 + i)).append(",").append(i).append(") ");
}
System.out.println(sqlBuilder.toString());
int rows = stmt.executeUpdate(sqlBuilder.toString());
assertEquals(50, rows);
} catch (SQLException e) {
e.printStackTrace();
}
}
@Test
public void case004_checkSum() {
Assert.assertEquals(100, select());
}
@AfterClass
public static void close() {
try {
if (connection != null) {
Statement statement = connection.createStatement();
statement.executeUpdate("drop database " + dbName);
statement.close();
connection.close();
}
} catch (SQLException e) {
e.printStackTrace();
}
}
}

View File

@ -1,9 +1,6 @@
package com.taosdata.jdbc;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.FixMethodOrder;
import org.junit.Test;
import org.junit.*;
import org.junit.runners.MethodSorters;
import java.sql.*;
@ -12,14 +9,13 @@ import java.util.Properties;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
@FixMethodOrder(MethodSorters.DEFAULT)
public class PreparedStatementTest extends BaseTest {
static Connection connection = null;
static PreparedStatement statement = null;
@FixMethodOrder(value = MethodSorters.NAME_ASCENDING)
public class PreparedStatementTest {
static Connection connection;
static TSDBPreparedStatement statement;
static String dbName = "test";
static String tName = "t0";
static String host = "localhost";
static ResultSet resSet = null;
@BeforeClass
public static void createConnection() throws SQLException {
@ -29,19 +25,16 @@ public class PreparedStatementTest extends BaseTest {
return;
}
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, host);
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
String sql = "drop database if exists " + dbName;
statement = (TSDBPreparedStatement) connection.prepareStatement(sql);
}
@Test
public void createTableAndQuery() throws SQLException {
public void case001_createTableAndQuery() throws SQLException {
long ts = System.currentTimeMillis();
statement.executeUpdate("create database if not exists " + dbName);
@ -49,141 +42,132 @@ public class PreparedStatementTest extends BaseTest {
statement.executeUpdate("insert into " + dbName + "." + tName + " values (" + ts + ", 1)");
PreparedStatement selectStatement = connection.prepareStatement("select * from " + dbName + "." + tName);
ResultSet resultSet = selectStatement.executeQuery();
assertTrue(null != resultSet);
boolean isClosed = statement.isClosed();
assertEquals(false, isClosed);
selectStatement.close();
}
@Test
public void testPreparedStatement() throws SQLException {
public void case002_testPreparedStatement() throws SQLException {
long ts = System.currentTimeMillis() + 20000;
PreparedStatement saveStatement = connection
.prepareStatement("insert into " + dbName + "." + tName + " values (" + ts + ", 1)");
PreparedStatement saveStatement = connection.prepareStatement("insert into " + dbName + "." + tName + " values (" + ts + ", 1)");
int affectedRows = saveStatement.executeUpdate();
assertTrue(1 == affectedRows);
saveStatement.close();
}
@Test
public void testSavedPreparedStatement() throws SQLException {
public void case003_testSavedPreparedStatement() throws SQLException {
long ts = System.currentTimeMillis();
TSDBPreparedStatement saveStatement = (TSDBPreparedStatement) connection
.prepareStatement("insert into " + dbName + "." + tName + " values (?, ?)");
TSDBPreparedStatement saveStatement = (TSDBPreparedStatement) connection.prepareStatement("insert into " + dbName + "." + tName + " values (?, ?)");
saveStatement.setObject(1, ts + 10000);
saveStatement.setObject(2, 3);
int rows = saveStatement.executeUpdate();
assertEquals(1, rows);
saveStatement.close();
}
@Test
public void testUnsupport() {
// if(null == resSet) {
// return;
// }
TSDBPreparedStatement tsdbStatement = (TSDBPreparedStatement) statement;
public void case004_testUnsupport() throws SQLException {
Assert.assertNotNull(statement.unwrap(TSDBPreparedStatement.class));
Assert.assertTrue(statement.isWrapperFor(TSDBPreparedStatement.class));
try {
tsdbStatement.unwrap(null);
statement.getMaxFieldSize();
} catch (SQLException e) {
}
try {
tsdbStatement.isWrapperFor(null);
statement.setMaxFieldSize(0);
} catch (SQLException e) {
}
try {
tsdbStatement.getMaxFieldSize();
statement.setEscapeProcessing(true);
} catch (SQLException e) {
}
try {
tsdbStatement.setMaxFieldSize(0);
statement.cancel();
} catch (SQLException e) {
}
try {
tsdbStatement.setEscapeProcessing(true);
statement.getWarnings();
} catch (SQLException e) {
}
try {
tsdbStatement.cancel();
statement.clearWarnings();
} catch (SQLException e) {
}
try {
tsdbStatement.getWarnings();
statement.setCursorName(null);
} catch (SQLException e) {
}
try {
tsdbStatement.clearWarnings();
statement.getMoreResults();
} catch (SQLException e) {
}
try {
tsdbStatement.setCursorName(null);
statement.setFetchDirection(0);
} catch (SQLException e) {
}
try {
tsdbStatement.getMoreResults();
statement.getFetchDirection();
} catch (SQLException e) {
}
try {
tsdbStatement.setFetchDirection(0);
statement.getResultSetConcurrency();
} catch (SQLException e) {
}
try {
tsdbStatement.getFetchDirection();
statement.getResultSetType();
} catch (SQLException e) {
}
try {
tsdbStatement.getResultSetConcurrency();
statement.getConnection();
} catch (SQLException e) {
}
try {
tsdbStatement.getResultSetType();
statement.getMoreResults();
} catch (SQLException e) {
}
try {
tsdbStatement.getConnection();
statement.getGeneratedKeys();
} catch (SQLException e) {
}
try {
tsdbStatement.getMoreResults();
statement.executeUpdate(null, 0);
} catch (SQLException e) {
}
try {
tsdbStatement.getGeneratedKeys();
statement.executeUpdate(null, new int[]{0});
} catch (SQLException e) {
}
try {
tsdbStatement.executeUpdate(null, 0);
statement.executeUpdate(null, new String[]{"str1", "str2"});
} catch (SQLException e) {
}
try {
tsdbStatement.executeUpdate(null, new int[]{0});
statement.getResultSetHoldability();
} catch (SQLException e) {
}
try {
tsdbStatement.executeUpdate(null, new String[]{"str1", "str2"});
statement.setPoolable(true);
} catch (SQLException e) {
}
try {
tsdbStatement.getResultSetHoldability();
statement.isPoolable();
} catch (SQLException e) {
}
try {
tsdbStatement.setPoolable(true);
statement.closeOnCompletion();
} catch (SQLException e) {
}
try {
tsdbStatement.isPoolable();
} catch (SQLException e) {
}
try {
tsdbStatement.closeOnCompletion();
} catch (SQLException e) {
}
try {
tsdbStatement.isCloseOnCompletion();
statement.isCloseOnCompletion();
} catch (SQLException e) {
}
}

View File

@ -6,76 +6,67 @@ import org.junit.Test;
import java.sql.*;
import java.util.Properties;
import java.util.Random;
import static org.junit.Assert.assertEquals;
import java.util.Properties;
import java.util.concurrent.Executors;
import java.util.concurrent.*;
import static org.junit.Assert.assertTrue;
public class QueryDataTest {
public class QueryDataTest extends BaseTest {
static Connection connection = null;
static Statement statement = null;
static Connection connection;
static Statement statement;
static String dbName = "test";
static String stbName = "meters";
static String host = "localhost";
static int numOfTables = 30;
final static int numOfRecordsPerTable = 1000;
static long ts = 1496732686000l;
final static String tablePrefix = "t";
static String host = "127.0.0.1";
@Before
public void createDatabase() throws SQLException {
public void createDatabase() {
try {
Class.forName("com.taosdata.jdbc.TSDBDriver");
} catch (ClassNotFoundException e) {
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
statement = connection.createStatement();
statement.executeUpdate("drop database if exists " + dbName);
statement.executeUpdate("create database if not exists " + dbName);
statement.executeUpdate("use " + dbName);
String createTableSql = "create table " + stbName + "(ts timestamp, name binary(64))";
statement.executeUpdate(createTableSql);
} catch (ClassNotFoundException | SQLException e) {
return;
}
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, host);
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
statement = connection.createStatement();
statement.executeUpdate("drop database if exists " + dbName);
statement.executeUpdate("create database if not exists " + dbName);
statement.executeUpdate("use " + dbName);
String createTableSql = "create table " + stbName + "(ts timestamp, name binary(6))";
statement.executeUpdate(createTableSql);
}
@Test
public void testQueryBinaryData() throws SQLException{
String insertSql = "insert into " + stbName + " values(now, 'taosda')";
System.out.println(insertSql);
@Test
public void testQueryBinaryData() throws SQLException {
String insertSql = "insert into " + stbName + " values(now, 'taosdata')";
System.out.println(insertSql);
statement.executeUpdate(insertSql);
String querySql = "select * from " + stbName;
ResultSet rs = statement.executeQuery(querySql);
ResultSet rs = statement.executeQuery(querySql);
while(rs.next()) {
String name = rs.getString(2) + "001";
while (rs.next()) {
String name = rs.getString(2);
System.out.println("name = " + name);
assertEquals(name, "taosda001");
assertEquals("taosdata", name);
}
rs.close();
rs.close();
}
@After
public void close() throws Exception {
statement.close();
connection.close();
Thread.sleep(10);
public void close() {
try {
if (statement != null)
statement.close();
if (connection != null)
connection.close();
} catch (SQLException e) {
e.printStackTrace();
}
}
}

View File

@ -1,6 +1,7 @@
package com.taosdata.jdbc;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
@ -13,42 +14,37 @@ import java.util.Properties;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
public class ResultSetTest extends BaseTest {
static Connection connection = null;
static Statement statement = null;
public class ResultSetTest {
static Connection connection;
static Statement statement;
static String dbName = "test";
static String tName = "t0";
static String host = "localhost";
static ResultSet resSet = null;
static ResultSet resSet;
@BeforeClass
public static void createDatabaseAndTable() throws SQLException {
public static void createDatabaseAndTable() {
try {
Class.forName("com.taosdata.jdbc.TSDBDriver");
} catch (ClassNotFoundException e) {
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
statement = connection.createStatement();
statement.executeUpdate("drop database if exists " + dbName);
statement.executeUpdate("create database if not exists " + dbName);
statement.execute("use " + dbName);
statement.executeUpdate("create table if not exists " + dbName + "." + tName + " (ts timestamp, k1 int, k2 bigint, k3 float, k4 double, k5 binary(30), k6 smallint, k7 bool, k8 nchar(20))");
} catch (ClassNotFoundException | SQLException e) {
return;
}
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, host);
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
statement = connection.createStatement();
statement.executeUpdate("drop database if exists " + dbName);
statement.executeUpdate("create database if not exists " + dbName);
statement.executeUpdate("create table if not exists " + dbName + "." + tName +
" (ts timestamp, k1 int, k2 bigint, k3 float, k4 double, k5 binary(30), k6 smallint, k7 bool, k8 nchar(20))");
statement.executeQuery("use " + dbName);
}
@Test
public void testResultSet() {
String sql = null;
String sql;
long ts = 1496732686000l;
int v1 = 2147483600;
long v2 = ts + 1000;
@ -119,16 +115,8 @@ public class ResultSetTest extends BaseTest {
public void testUnsupport() throws SQLException {
statement.executeQuery("show databases");
resSet = statement.getResultSet();
try {
resSet.unwrap(null);
} catch (SQLException e) {
assertTrue(e.getMessage().contains("this operation is NOT supported currently!"));
}
try {
resSet.isWrapperFor(null);
} catch (SQLException e) {
assertTrue(e.getMessage().contains("this operation is NOT supported currently!"));
}
Assert.assertNotNull(resSet.unwrap(TSDBResultSet.class));
Assert.assertTrue(resSet.isWrapperFor(TSDBResultSet.class));
try {
resSet.getAsciiStream(0);
} catch (SQLException e) {
@ -815,13 +803,18 @@ public class ResultSetTest extends BaseTest {
assertEquals(res.length, 2);
statement.clearBatch();
}
@AfterClass
public static void close() throws Exception {
statement.executeUpdate("drop database " + dbName);
statement.close();
connection.close();
Thread.sleep(10);
@AfterClass
public static void close() {
try {
statement.executeUpdate("drop database " + dbName);
if (statement != null)
statement.close();
if (connection != null)
connection.close();
} catch (SQLException e) {
e.printStackTrace();
}
}
}

View File

@ -9,61 +9,73 @@ import java.util.Properties;
import static org.junit.Assert.assertEquals;
public class SelectTest extends BaseTest {
Connection connection = null;
Statement statement = null;
public class SelectTest {
Connection connection;
String dbName = "test";
String tName = "t0";
String host = "localhost";
String host = "127.0.0.1";
@Before
public void createDatabaseAndTable() throws SQLException {
public void createDatabaseAndTable() {
try {
Class.forName("com.taosdata.jdbc.TSDBDriver");
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
Statement stmt = connection.createStatement();
stmt.execute("drop database if exists " + dbName);
stmt.execute("create database if not exists " + dbName);
stmt.execute("create table if not exists " + dbName + "." + tName + " (ts timestamp, k int, v int)");
stmt.close();
} catch (ClassNotFoundException e) {
return;
} catch (SQLException e) {
e.printStackTrace();
}
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, host);
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
statement = connection.createStatement();
statement.executeUpdate("drop database if exists " + dbName);
statement.executeUpdate("create database if not exists " + dbName);
statement.executeUpdate("create table if not exists " + dbName + "." + tName + " (ts timestamp, k int, v int)");
}
@Test
public void selectData() throws SQLException {
public void selectData() {
long ts = 1496732686000l;
for (int i = 0; i < 50; i++) {
ts++;
int row = statement.executeUpdate("insert into " + dbName + "." + tName + " values (" + ts + ", " + (100 + i) + ", " + i + ")");
System.out.println("insert into " + dbName + "." + tName + " values (" + ts + ", " + (100 + i) + ", " + i + ")\t" + row);
assertEquals(1, row);
try (Statement stmt = connection.createStatement()) {
for (int i = 0; i < 50; i++) {
ts++;
int row = stmt.executeUpdate("insert into " + dbName + "." + tName + " values (" + ts + ", " + (100 + i) + ", " + i + ")");
System.out.println("insert into " + dbName + "." + tName + " values (" + ts + ", " + (100 + i) + ", " + i + ")\t" + row);
assertEquals(1, row);
}
String sql = "select * from " + dbName + "." + tName;
ResultSet resSet = stmt.executeQuery(sql);
int num = 0;
while (resSet.next()) {
num++;
}
resSet.close();
assertEquals(num, 50);
} catch (SQLException e) {
e.printStackTrace();
}
String sql = "select * from " + dbName + "." + tName;
ResultSet resSet = statement.executeQuery(sql);
int num = 0;
while (resSet.next()) {
num++;
}
resSet.close();
assertEquals(num, 50);
}
@After
public void close() throws Exception {
statement.executeUpdate("drop database " + dbName);
statement.close();
connection.close();
Thread.sleep(10);
public void close() {
try {
if (connection != null) {
Statement stmt = connection.createStatement();
stmt.executeUpdate("drop database " + dbName);
stmt.close();
connection.close();
}
} catch (SQLException e) {
e.printStackTrace();
}
}
}

View File

@ -12,69 +12,67 @@ import java.util.Properties;
import static org.junit.Assert.assertEquals;
@FixMethodOrder(MethodSorters.NAME_ASCENDING)
public class StableTest extends BaseTest {
static Connection connection = null;
static Statement statement = null;
static String dbName = "test";
static String stbName = "st";
static String host = "localhost";
public class StableTest {
private static Connection connection;
private static String dbName = "test";
private static String stbName = "st";
private static String host = "127.0.0.1";
@BeforeClass
public static void createDatabase() throws SQLException {
public static void createDatabase() {
try {
Class.forName("com.taosdata.jdbc.TSDBDriver");
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
Statement statement = connection.createStatement();
statement.execute("drop database if exists " + dbName);
statement.execute("create database if not exists " + dbName);
statement.execute("use " + dbName);
statement.close();
} catch (ClassNotFoundException e) {
return;
} catch (SQLException e) {
e.printStackTrace();
}
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, host);
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
statement = connection.createStatement();
statement.executeUpdate("create database if not exists " + dbName);
statement.executeQuery("use " + dbName);
}
// @Test
public void createStable() {
String sql = "create table " + stbName + " (ts timestamp, v1 int, v2 int) tags (tg nchar(20)) ";
try {
statement.executeUpdate(sql);
@Test
public void case001_createSuperTable() {
try (Statement stmt = connection.createStatement()) {
final String sql = "create table " + stbName + " (ts timestamp, v1 int, v2 int) tags (tg nchar(20)) ";
stmt.execute(sql);
} catch (SQLException e) {
assert false : "error create stable" + e.getMessage();
}
}
// @Test
public void createTable() {
String sql = "create table t1 using " + stbName + " tags (\"beijing\")";
try {
statement.executeUpdate(sql);
@Test
public void case002_createTable() {
try (Statement stmt = connection.createStatement()) {
final String sql = "create table t1 using " + stbName + " tags (\"beijing\")";
stmt.execute(sql);
} catch (SQLException e) {
assert false : "error create table" + e.getMessage();
}
}
@Test
public void describeSTable() {
createStable();
String sql = "describe " + stbName;
public void case003_describeSTable() {
int num = 0;
System.out.println("describe stable");
try {
ResultSet res = statement.executeQuery(sql);
while (res.next()) {
for (int i = 1; i <= res.getMetaData().getColumnCount(); i++) {
System.out.printf("%d: %s\n", i, res.getString(i));
try (Statement stmt = connection.createStatement()) {
String sql = "describe " + stbName;
ResultSet rs = stmt.executeQuery(sql);
while (rs.next()) {
for (int i = 1; i <= rs.getMetaData().getColumnCount(); i++) {
System.out.println(i + ":" + rs.getString(i));
}
num++;
}
res.close();
rs.close();
assertEquals(4, num);
} catch (SQLException e) {
assert false : "error describe stable" + e.getMessage();
@ -82,41 +80,31 @@ public class StableTest extends BaseTest {
}
@Test
public void describeTable() {
createTable();
String sql = "describe t1";
public void case004_describeTable() {
int num = 0;
System.out.println("describe table");
try {
ResultSet res = statement.executeQuery(sql);
while (res.next()) {
for (int i = 1; i <= res.getMetaData().getColumnCount(); i++) {
System.out.printf("%d: %s\n", i, res.getString(i));
try (Statement stmt = connection.createStatement()) {
ResultSet rs = stmt.executeQuery("describe t1");
while (rs.next()) {
for (int i = 1; i <= rs.getMetaData().getColumnCount(); i++) {
System.out.printf("%d: %s\n", i, rs.getString(i));
}
num++;
}
res.close();
rs.close();
assertEquals(4, num);
} catch (SQLException e) {
assert false : "error describe stable" + e.getMessage();
}
}
// @Test
public void validCreateSql() {
String sql = "create table t2 using " + stbName + " tags (\"beijing\")";
boolean valid = ((TSDBConnection) connection).getConnection().validateCreateTableSql(sql);
assertEquals(true, valid);
}
@AfterClass
public static void close() throws Exception {
if (!statement.isClosed()) {
statement.executeUpdate("drop database " + dbName);
statement.close();
connection.close();
Thread.sleep(10);
public static void close() {
try {
if (connection != null)
connection.close();
} catch (SQLException e) {
e.printStackTrace();
}
}
}

View File

@ -1,6 +1,7 @@
package com.taosdata.jdbc;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
@ -10,31 +11,44 @@ import java.util.Properties;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
public class StatementTest extends BaseTest {
public class StatementTest {
static Connection connection = null;
static Statement statement = null;
static String dbName = "test";
static String tName = "t0";
static String host = "localhost";
static ResultSet resSet = null;
@BeforeClass
public static void createConnection() throws SQLException {
try {
Class.forName("com.taosdata.jdbc.TSDBDriver");
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/?user=root&password=taosdata", properties);
statement = connection.createStatement();
statement.executeUpdate("drop database if exists " + dbName);
} catch (ClassNotFoundException e) {
return;
}
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, host);
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
statement = connection.createStatement();
statement.executeUpdate("drop database if exists " + dbName);
}
@Test
public void testCase() {
try {
ResultSet rs = statement.executeQuery("show databases");
ResultSetMetaData metaData = rs.getMetaData();
while (rs.next()) {
for (int i = 1; i <= metaData.getColumnCount(); i++) {
System.out.print(metaData.getColumnLabel(i) + ":" + rs.getString(i) + "\t");
}
System.out.println();
}
} catch (SQLException e) {
e.printStackTrace();
}
}
@Test
@ -52,122 +66,46 @@ public class StatementTest extends BaseTest {
assertEquals(false, isClosed);
}
@Test
public void testUnsupport() {
// if(null == resSet) {
// return;
// }
TSDBStatement tsdbStatement = (TSDBStatement) statement;
try {
tsdbStatement.unwrap(null);
} catch (SQLException e) {
}
try {
tsdbStatement.isWrapperFor(null);
} catch (SQLException e) {
}
try {
tsdbStatement.getMaxFieldSize();
} catch (SQLException e) {
}
try {
tsdbStatement.setMaxFieldSize(0);
} catch (SQLException e) {
}
try {
tsdbStatement.setEscapeProcessing(true);
} catch (SQLException e) {
}
try {
tsdbStatement.cancel();
} catch (SQLException e) {
}
try {
tsdbStatement.getWarnings();
} catch (SQLException e) {
}
try {
tsdbStatement.clearWarnings();
} catch (SQLException e) {
}
try {
tsdbStatement.setCursorName(null);
} catch (SQLException e) {
}
try {
tsdbStatement.getMoreResults();
} catch (SQLException e) {
}
try {
tsdbStatement.setFetchDirection(0);
} catch (SQLException e) {
}
try {
tsdbStatement.getFetchDirection();
} catch (SQLException e) {
}
try {
tsdbStatement.getResultSetConcurrency();
} catch (SQLException e) {
}
try {
tsdbStatement.getResultSetType();
} catch (SQLException e) {
}
try {
tsdbStatement.getConnection();
} catch (SQLException e) {
}
try {
tsdbStatement.getMoreResults();
} catch (SQLException e) {
}
try {
tsdbStatement.getGeneratedKeys();
} catch (SQLException e) {
}
try {
tsdbStatement.executeUpdate(null, 0);
} catch (SQLException e) {
}
try {
tsdbStatement.executeUpdate(null, new int[]{0});
} catch (SQLException e) {
}
try {
tsdbStatement.executeUpdate(null, new String[]{"str1", "str2"});
} catch (SQLException e) {
}
try {
tsdbStatement.getResultSetHoldability();
} catch (SQLException e) {
}
try {
tsdbStatement.setPoolable(true);
} catch (SQLException e) {
}
try {
tsdbStatement.isPoolable();
} catch (SQLException e) {
}
try {
tsdbStatement.closeOnCompletion();
} catch (SQLException e) {
}
try {
tsdbStatement.isCloseOnCompletion();
} catch (SQLException e) {
}
@Test(expected = SQLException.class)
public void testUnsupport() throws SQLException {
Assert.assertNotNull(statement.unwrap(TSDBStatement.class));
Assert.assertTrue(statement.isWrapperFor(TSDBStatement.class));
statement.getMaxFieldSize();
statement.setMaxFieldSize(0);
statement.setEscapeProcessing(true);
statement.cancel();
statement.getWarnings();
statement.clearWarnings();
statement.setCursorName(null);
statement.getMoreResults();
statement.setFetchDirection(0);
statement.getFetchDirection();
statement.getResultSetConcurrency();
statement.getResultSetType();
statement.getConnection();
statement.getMoreResults();
statement.getGeneratedKeys();
statement.executeUpdate(null, 0);
statement.executeUpdate(null, new int[]{0});
statement.executeUpdate(null, new String[]{"str1", "str2"});
statement.getResultSetHoldability();
statement.setPoolable(true);
statement.isPoolable();
statement.closeOnCompletion();
statement.isCloseOnCompletion();
}
@AfterClass
public static void close() throws Exception {
if (!statement.isClosed()) {
statement.executeUpdate("drop database " + dbName);
statement.close();
connection.close();
Thread.sleep(10);
public static void close() {
try {
statement.execute("drop database if exists " + dbName);
if (statement != null)
statement.close();
if (connection != null)
connection.close();
} catch (SQLException e) {
e.printStackTrace();
}
}
}

View File

@ -10,36 +10,37 @@ import java.sql.SQLException;
import java.sql.Statement;
import java.util.Properties;
public class SubscribeTest extends BaseTest {
Connection connection = null;
Statement statement = null;
public class SubscribeTest {
Connection connection;
Statement statement;
String dbName = "test";
String tName = "t0";
String host = "localhost";
String topic = "test";
@Before
public void createDatabase() throws SQLException {
public void createDatabase() {
try {
Class.forName("com.taosdata.jdbc.TSDBDriver");
} catch (ClassNotFoundException e) {
return;
}
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, host);
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, host);
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
statement = connection.createStatement();
statement.executeUpdate("create database if not exists " + dbName);
statement.executeUpdate("create table if not exists " + dbName + "." + tName + " (ts timestamp, k int, v int)");
long ts = System.currentTimeMillis();
for (int i = 0; i < 2; i++) {
ts += i;
String sql = "insert into " + dbName + "." + tName + " values (" + ts + ", " + (100 + i) + ", " + i + ")";
statement.executeUpdate(sql);
statement = connection.createStatement();
statement.executeUpdate("create database if not exists " + dbName);
statement.executeUpdate("create table if not exists " + dbName + "." + tName + " (ts timestamp, k int, v int)");
long ts = System.currentTimeMillis();
for (int i = 0; i < 2; i++) {
ts += i;
String sql = "insert into " + dbName + "." + tName + " values (" + ts + ", " + (100 + i) + ", " + i + ")";
statement.executeUpdate(sql);
}
} catch (ClassNotFoundException | SQLException e) {
return;
}
}
@ -79,10 +80,16 @@ public class SubscribeTest extends BaseTest {
}
@After
public void close() throws Exception {
statement.executeQuery("drop database " + dbName);
statement.close();
connection.close();
Thread.sleep(10);
public void close() {
try {
statement.executeQuery("drop database " + dbName);
if (statement != null)
statement.close();
if (connection != null)
connection.close();
} catch (SQLException e) {
e.printStackTrace();
}
}
}

View File

@ -1,29 +1,14 @@
package com.taosdata.jdbc;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.junit.*;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.*;
import java.util.Properties;
public class TSDBDatabaseMetaDataTest {
private TSDBDatabaseMetaData metaData;
private static final String host = "localhost";
@Before
public void before() throws ClassNotFoundException, SQLException {
Class.forName("com.taosdata.jdbc.TSDBDriver");
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, host);
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
metaData = (TSDBDatabaseMetaData) DriverManager.getConnection("jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata", properties).getMetaData();
}
private static final String host = "127.0.0.1";
private static Connection connection;
private static TSDBDatabaseMetaData metaData;
@Test
public void unwrap() throws SQLException {
@ -48,7 +33,7 @@ public class TSDBDatabaseMetaDataTest {
@Test
public void getURL() throws SQLException {
Assert.assertEquals("jdbc:TAOS://localhost:6030/?user=root&password=taosdata", metaData.getURL());
Assert.assertEquals("jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata", metaData.getURL());
}
@Test
@ -642,7 +627,17 @@ public class TSDBDatabaseMetaDataTest {
@Test
public void getTables() throws SQLException {
Assert.assertNull(metaData.getTables("", "", "*", null));
System.out.println("****************************************************");
ResultSet tables = metaData.getTables("log", "", null, null);
ResultSetMetaData metaData = tables.getMetaData();
while (tables.next()) {
System.out.print(metaData.getColumnLabel(1) + ":" + tables.getString(1) + "\t");
System.out.print(metaData.getColumnLabel(3) + ":" + tables.getString(3) + "\t");
System.out.print(metaData.getColumnLabel(4) + ":" + tables.getString(4) + "\t");
System.out.print(metaData.getColumnLabel(5) + ":" + tables.getString(5) + "\n");
}
System.out.println();
Assert.assertNotNull(tables);
}
@Test
@ -652,17 +647,47 @@ public class TSDBDatabaseMetaDataTest {
@Test
public void getCatalogs() throws SQLException {
Assert.assertNotNull(metaData.getCatalogs());
System.out.println("****************************************************");
ResultSet catalogs = metaData.getCatalogs();
ResultSetMetaData meta = catalogs.getMetaData();
while (catalogs.next()) {
for (int i = 1; i <= meta.getColumnCount(); i++) {
System.out.print(meta.getColumnLabel(i) + ": " + catalogs.getString(i));
}
System.out.println();
}
}
@Test
public void getTableTypes() throws SQLException {
System.out.println("****************************************************");
ResultSet tableTypes = metaData.getTableTypes();
while (tableTypes.next()) {
System.out.println(tableTypes.getString("TABLE_TYPE"));
}
Assert.assertNotNull(metaData.getTableTypes());
}
@Test
public void getColumns() throws SQLException {
Assert.assertNotNull(metaData.getColumns("", "", "", ""));
System.out.println("****************************************************");
ResultSet columns = metaData.getColumns("log", "", "dn", "");
ResultSetMetaData meta = columns.getMetaData();
while (columns.next()) {
System.out.print(meta.getColumnLabel(1) + ": " + columns.getString(1) + "\t");
System.out.print(meta.getColumnLabel(3) + ": " + columns.getString(3) + "\t");
System.out.print(meta.getColumnLabel(4) + ": " + columns.getString(4) + "\t");
System.out.print(meta.getColumnLabel(5) + ": " + columns.getString(5) + "\t");
System.out.print(meta.getColumnLabel(6) + ": " + columns.getString(6) + "\t");
System.out.print(meta.getColumnLabel(7) + ": " + columns.getString(7) + "\t");
System.out.print(meta.getColumnLabel(9) + ": " + columns.getString(9) + "\t");
System.out.print(meta.getColumnLabel(10) + ": " + columns.getString(10) + "\t");
System.out.print(meta.getColumnLabel(11) + ": " + columns.getString(11) + "\n");
System.out.print(meta.getColumnLabel(12) + ": " + columns.getString(12) + "\n");
}
}
@Test
@ -687,7 +712,17 @@ public class TSDBDatabaseMetaDataTest {
@Test
public void getPrimaryKeys() throws SQLException {
Assert.assertNotNull(metaData.getPrimaryKeys("", "", ""));
System.out.println("****************************************************");
ResultSet rs = metaData.getPrimaryKeys("log", "", "dn1");
while (rs.next()) {
System.out.println("TABLE_NAME: " + rs.getString("TABLE_NAME"));
System.out.println("COLUMN_NAME: " + rs.getString("COLUMN_NAME"));
System.out.println("KEY_SEQ: " + rs.getString("KEY_SEQ"));
System.out.println("PK_NAME: " + rs.getString("PK_NAME"));
}
Assert.assertNotNull(rs);
}
@Test
@ -812,7 +847,14 @@ public class TSDBDatabaseMetaDataTest {
@Test
public void getSuperTables() throws SQLException {
Assert.assertNotNull(metaData.getSuperTables("", "", ""));
System.out.println("****************************************************");
ResultSet rs = metaData.getSuperTables("log", "", "dn1");
while (rs.next()) {
System.out.println("TABLE_NAME: " + rs.getString("TABLE_NAME"));
System.out.println("SUPERTABLE_NAME: " + rs.getString("SUPERTABLE_NAME"));
}
Assert.assertNotNull(rs);
}
@Test
@ -871,11 +913,6 @@ public class TSDBDatabaseMetaDataTest {
Assert.assertNull(metaData.getRowIdLifetime());
}
@Test
public void testGetSchemas() throws SQLException {
Assert.assertNull(metaData.getSchemas());
}
@Test
public void supportsStoredFunctionsUsingCallSyntax() throws SQLException {
Assert.assertFalse(metaData.supportsStoredFunctionsUsingCallSyntax());
@ -910,4 +947,32 @@ public class TSDBDatabaseMetaDataTest {
public void generatedKeyAlwaysReturned() throws SQLException {
Assert.assertFalse(metaData.generatedKeyAlwaysReturned());
}
@BeforeClass
public static void beforeClass() {
try {
Class.forName("com.taosdata.jdbc.TSDBDriver");
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata", properties);
metaData = connection.getMetaData().unwrap(TSDBDatabaseMetaData.class);
} catch (ClassNotFoundException e) {
e.printStackTrace();
} catch (SQLException e) {
e.printStackTrace();
}
}
@AfterClass
public static void afterClass() {
try {
if (connection != null)
connection.close();
} catch (SQLException e) {
e.printStackTrace();
}
}
}

Some files were not shown because too many files have changed in this diff Show More