Merge remote-tracking branch 'upstream/develop' into mac

This commit is contained in:
freemine 2021-01-23 08:47:00 +08:00
commit 0765b2363c
521 changed files with 4870 additions and 5585 deletions

64
Jenkinsfile vendored
View File

@ -41,13 +41,10 @@ def pre_test(){
cd ${WKC}
git checkout develop
git reset --hard HEAD~10
git reset --hard HEAD~10 >/dev/null
git pull
git fetch
git checkout ${CHANGE_BRANCH}
git reset --hard HEAD~10
git pull
git merge develop
git fetch origin +refs/pull/${CHANGE_ID}/merge
git checkout -qf FETCH_HEAD
cd ${WK}
git reset --hard HEAD~10
git checkout develop
@ -87,11 +84,14 @@ pipeline {
steps {
pre_test()
sh '''
cd ${WKC}/tests
find pytest -name '*'sql|xargs rm -rf
./test-all.sh p1
date'''
timeout(time: 90, unit: 'MINUTES'){
sh '''
cd ${WKC}/tests
find pytest -name '*'sql|xargs rm -rf
./test-all.sh p1
date'''
}
}
}
stage('python_2') {
@ -112,12 +112,14 @@ pipeline {
}
stage('test_b1') {
agent{label 'b1'}
steps {
pre_test()
sh '''
cd ${WKC}/tests
./test-all.sh b1fq
date'''
steps {
timeout(time: 90, unit: 'MINUTES'){
pre_test()
sh '''
cd ${WKC}/tests
./test-all.sh b1fq
date'''
}
}
}
@ -137,12 +139,14 @@ pipeline {
./handle_crash_gen_val_log.sh
'''
}
sh '''
date
cd ${WKC}/tests
./test-all.sh b2fq
date
'''
timeout(time: 90, unit: 'MINUTES'){
sh '''
date
cd ${WKC}/tests
./test-all.sh b2fq
date
'''
}
}
}
@ -157,12 +161,14 @@ pipeline {
./valgrind-test.sh 2>&1 > mem-error-out.log
./handle_val_log.sh
'''
}
sh '''
date
cd ${WKC}/tests
./test-all.sh b3fq
date'''
}
timeout(time: 90, unit: 'MINUTES'){
sh '''
date
cd ${WKC}/tests
./test-all.sh b3fq
date'''
}
}
}

View File

@ -32,7 +32,7 @@ ELSEIF (TD_WINDOWS)
#INSTALL(TARGETS taos RUNTIME DESTINATION driver)
#INSTALL(TARGETS shell RUNTIME DESTINATION .)
IF (TD_MVN_INSTALLED)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.16-dist.jar DESTINATION connector/jdbc)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.17-dist.jar DESTINATION connector/jdbc)
ENDIF ()
ELSEIF (TD_DARWIN)
SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh")

16
cmake/version.inc Normal file → Executable file
View File

@ -16,13 +16,25 @@ ENDIF ()
IF (DEFINED GITINFO)
SET(TD_VER_GIT ${GITINFO})
ELSE ()
SET(TD_VER_GIT "community")
execute_process(
COMMAND git log -1 --format=%H
WORKING_DIRECTORY ${TD_COMMUNITY_DIR}
OUTPUT_VARIABLE GIT_COMMITID
)
string (REGEX REPLACE "[\n\t\r]" "" GIT_COMMITID ${GIT_COMMITID})
SET(TD_VER_GIT ${GIT_COMMITID})
ENDIF ()
IF (DEFINED GITINFOI)
SET(TD_VER_GIT_INTERNAL ${GITINFOI})
ELSE ()
SET(TD_VER_GIT_INTERNAL "internal")
execute_process(
COMMAND git log -1 --format=%H
WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}
OUTPUT_VARIABLE GIT_COMMITID
)
string (REGEX REPLACE "[\n\t\r]" "" GIT_COMMITID ${GIT_COMMITID})
SET(TD_VER_GIT_INTERNAL ${GIT_COMMITID})
ENDIF ()
IF (DEFINED VERDATE)

Binary file not shown.

After

Width:  |  Height:  |  Size: 42 KiB

View File

@ -179,7 +179,7 @@ taos> select avg(f1), max(f2), min(f3) from test.t10 interval(10s);
## **支持平台列表**
## 支持平台列表
### TDengine服务器支持的平台列表

View File

@ -9,7 +9,7 @@
TDengine 采用 SQL 作为查询语言。应用程序可以通过 C/C++, Java, Go, Python 连接器发送 SQL 语句,用户可以通过 TDengine 提供的命令行Command Line Interface, CLI工具 TAOS Shell 手动执行 SQL 即席查询Ad-Hoc Query。TDengine 支持如下查询功能:
- 单列、多列数据查询
- 标签和数值的多种过滤条件:\>, \<, =, \<>, like 等
- 标签和数值的多种过滤条件:>, <, =, <>, like 等
- 聚合结果的分组Group by、排序Order by、约束输出Limit/Offset
- 数值列及聚合结果的四则运算
- 时间戳对齐的连接查询Join Query: 隐式连接)操作

View File

@ -58,26 +58,26 @@ TDengine缺省的时间戳是毫秒精度但通过修改配置参数enableMic
- **创建数据库**
```mysql
CREATE DATABASE [IF NOT EXISTS] db_name [KEEP keep] [UPDATE 1];
```
说明:
1) KEEP是该数据库的数据保留多长天数缺省是3650天(10年),数据库会自动删除超过时限的数据;
2) UPDATE 标志数据库支持更新相同时间戳数据;
3) 数据库名最大长度为33
4) 一条SQL 语句的最大长度为65480个字符
5) 数据库还有更多与存储相关的配置参数,请参见系统管理。
```mysql
CREATE DATABASE [IF NOT EXISTS] db_name [KEEP keep] [UPDATE 1];
```
说明:
1) KEEP是该数据库的数据保留多长天数缺省是3650天(10年),数据库会自动删除超过时限的数据;
2) UPDATE 标志数据库支持更新相同时间戳数据;
3) 数据库名最大长度为33
4) 一条SQL 语句的最大长度为65480个字符
5) 数据库还有更多与存储相关的配置参数,请参见系统管理。
- **显示系统当前参数**
```mysql
SHOW VARIABLES;
```
```mysql
SHOW VARIABLES;
```
- **使用数据库**
@ -698,13 +698,13 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
```mysql
SELECT TWA(field_name) FROM tb_name WHERE clause;
```
功能说明:时间加权平均函数。统计表/超级表中某列在一段时间内的时间加权平均。
功能说明:时间加权平均函数。统计表中某列在一段时间内的时间加权平均。
返回结果数据类型双精度浮点数Double。
应用字段不能应用在timestamp、binary、nchar、bool类型字段。
适用于:表、超级表
适用于:表。
- **SUM**
```mysql
@ -1125,11 +1125,8 @@ SELECT function_list FROM stb_name
- WHERE语句可以指定查询的起止时间和其他过滤条件
- FILL语句指定某一时间区间数据缺失的情况下的填充模式。填充模式包括以下几种
1. 不进行填充NONE(默认填充模式)。
2. VALUE填充固定值填充此时需要指定填充的数值。例如fill(value, 1.23)。
3. NULL填充使用NULL填充数据。例如fill(null)。
4. PREV填充使用前一个非NULL值填充数据。例如fill(prev)。
说明:

View File

@ -124,10 +124,10 @@ taosd -C
对于一个应用场景可能有多种数据特征的数据并存最佳的设计是将具有相同数据特征的表放在一个库里这样一个应用有多个库而每个库可以配置不同的存储参数从而保证系统有最优的性能。TDengine允许应用在创建库时指定上述存储参数如果指定该参数就将覆盖对应的系统配置参数。举例有下述SQL
```
create database demo days 10 cache 32 blocks 8 replica 3;
create database demo days 10 cache 32 blocks 8 replica 3 update 1;
```
该SQL创建了一个库demo, 每个数据文件存储10天数据内存块为32兆字节每个VNODE占用8个内存块副本数为3而其他参数与系统配置完全一致。
该SQL创建了一个库demo, 每个数据文件存储10天数据内存块为32兆字节每个VNODE占用8个内存块副本数为3允许更新,而其他参数与系统配置完全一致。
TDengine集群中加入一个新的dnode时涉及集群相关的一些参数必须与已有集群的配置相同否则不能成功加入到集群中。会进行校验的参数如下

View File

@ -197,7 +197,7 @@ select * from meters where ts > now - 1d and current > 10;
且`restart`是 **false****0**),用户程序就不会读到之前已经读取的数据了。
`taos_subscribe`的最后一个参数是以毫秒为单位的轮询周期。
在同步模式下,如前后两次调用`taos_consume`的时间间隔小于此时间,
在同步模式下,如前后两次调用`taos_consume`的时间间隔小于此时间,
`taos_consume`会阻塞,直到间隔超过此时间。
异步模式下,这个时间是两次调用回调函数的最小时间间隔。
@ -414,7 +414,7 @@ TDengine通过查询函数向用户提供毫秒级的数据获取能力。直接
TDengine分配固定大小的内存空间作为缓存空间缓存空间可根据应用的需求和硬件资源配置。通过适当的设置缓存空间TDengine可以提供极高性能的写入和查询的支持。TDengine中每个虚拟节点virtual node创建时分配独立的缓存池。每个虚拟节点管理自己的缓存池不同虚拟节点间不共享缓存池。每个虚拟节点内部所属的全部表共享该虚拟节点的缓存池。
TDengine将内存池按块划分进行管理数据在内存块里按照列式存储。一个vnode的内存池是在vnode创建时按块分配好,而且每个内存块按照先进先出的原则进行管理。一张表所需要的内存块是从vnode的内存池中进行分配的块的大小由系统配置参数cache决定。每张表最大内存块的数目由配置参数tblocks决定每张表平均的内存块的个数由配置参数ablocks决定。因此对于一个vnode, 总的内存大小为: `cache * ablocks * tables`。内存块参数cache不宜过小一个cache block需要能存储至少几十条以上记录,才会有效率。参数ablocks最小为2保证每张表平均至少能分配两个内存块。
TDengine将内存池按块划分进行管理数据在内存块里是以行row的形式存储。一个vnode的内存池是在vnode创建时按块分配好而且每个内存块按照先进先出的原则进行管理。在创建内存池时块的大小由系统配置参数cache决定每个vnode中内存块的数目则由配置参数blocks决定。因此对于一个vnode总的内存大小为`cache * blocks`。一个cache block需要保证每张表能存储至少几十条以上记录,才会有效率。
你可以通过函数last_row快速获取一张表或一张超级表的最后一条记录这样很便于在大屏显示各设备的实时状态或采集值。例如

View File

@ -225,7 +225,7 @@ SHOW MNODES;
## Arbitrator的使用
如果副本数为偶数当一个vnode group里一半vnode不工作时是无法从中选出master的。同理一半mnode不工作时是无法选出mnode的master的因为存在“split brain”问题。为解决这个问题TDengine引入了arbitrator的概念。Arbitrator模拟一个vnode或mnode在工作但只简单的负责网络连接不处理任何数据插入或访问。只要包含arbitrator在内超过半数的vnode或mnode工作那么该vnode group或mnode组就可以正常的提供数据插入或查询服务。比如对于副本数为2的情形如果一个节点A离线但另外一个节点B正常而且能连接到arbitrator, 那么节点B就能正常工作。
如果副本数为偶数当一个vnode group里一半vnode不工作时是无法从中选出master的。同理一半mnode不工作时是无法选出mnode的master的因为存在“split brain”问题。为解决这个问题TDengine引入了Arbitrator的概念。Arbitrator模拟一个vnode或mnode在工作但只简单的负责网络连接不处理任何数据插入或访问。只要包含Arbitrator在内超过半数的vnode或mnode工作那么该vnode group或mnode组就可以正常的提供数据插入或查询服务。比如对于副本数为2的情形如果一个节点A离线但另外一个节点B正常而且能连接到Arbitrator那么节点B就能正常工作。
TDengine提供一个执行程序tarbitrator, 找任何一台Linux服务器运行它即可。请点击[安装包下载](https://www.taosdata.com/cn/all-downloads/)在TDengine Arbitrator Linux一节中选择适合的版本下载并安装。该程序对系统资源几乎没有要求只需要保证有网络连接即可。该应用的命令行参数`-p`可以指定其对外服务的端口号缺省是6042。配置每个taosd实例时可以在配置文件taos.cfg里将参数arbitrator设置为arbitrator的End Point。如果该参数配置了当副本数为偶数时系统将自动连接配置的arbitrator。如果副本数为奇数即使配置了arbitrator, 系统也不会去建立连接。
TDengine提供一个执行程序,名为 tarbitrator找任何一台Linux服务器运行它即可。请点击[安装包下载](https://www.taosdata.com/cn/all-downloads/)在TDengine Arbitrator Linux一节中选择适合的版本下载并安装。该程序对系统资源几乎没有要求只需要保证有网络连接即可。该应用的命令行参数`-p`可以指定其对外服务的端口号缺省是6042。配置每个taosd实例时可以在配置文件taos.cfg里将参数arbitrator设置为Arbitrator的End Point。如果该参数配置了当副本数为偶数时系统将自动连接配置的Arbitrator。如果副本数为奇数即使配置了Arbitrator系统也不会去建立连接。

View File

@ -1,146 +0,0 @@
#集群安装、管理
多个taosd的运行实例可以组成一个集群以保证TDengine的高可靠运行并提供水平扩展能力。要了解TDengine 2.0的集群管理需要对集群的基本概念有所了解请看TDengine 2.0整体架构一章。
集群的每个节点是由End Point来唯一标识的End Point是由FQDN(Fully Qualified Domain Name)外加Port组成比如 h1.taosdata.com:6030。一般FQDN就是服务器的hostname可通过Linux命令“hostname"获取。端口是这个节点对外服务的端口号缺省是6030但可以通过taos.cfg里配置参数serverPort进行修改。
TDengine的集群管理极其简单除添加和删除节点需要人工干预之外其他全部是自动完成最大程度的降低了运维的工作量。本章对集群管理的操作做详细的描述。
##安装、创建第一个节点
集群是由一个一个dnode组成的是从一个dnode的创建开始的。创建第一个节点很简单就按照["立即开始“](https://www.taosdata.com/cn/getting-started/)一章的方法进行安装、启动即可。
启动后请执行taos, 启动taos shell从shell里执行命令"show dnodes;",如下所示:
```
Welcome to the TDengine shell from Linux, Client Version:2.0.0.0
Copyright (c) 2017 by TAOS Data, Inc. All rights reserved.
taos> show dnodes;
id | end_point | vnodes | cores | status | role | create_time |
=====================================================================================
1 | h1.taos.com:6030 | 0 | 2 | ready | any | 2020-07-31 03:49:29.202 |
Query OK, 1 row(s) in set (0.006385s)
taos>
```
上述命令里可以看到这个刚启动的这个节点的End Point是h1.taos.com:6030
## 安装、创建后续节点
将新的节点添加到现有集群,具体有以下几步:
1. 按照["立即开始“](https://www.taosdata.com/cn/getting-started/)一章的方法进行安装但不要启动taosd
2. 如果是使用涛思数据的官方安装包进行安装在安装结束时会询问集群的End Port, 输入第一个节点的End Point即可。如果是源码安装请编辑配置文件taos.cfg(缺省是在/etc/taos/目录),增加一行:
```
firstEp h1.taos.com:6030
```
请注意将示例的“h1.taos.com:6030" 替换为你自己第一个节点的End Point
3. 按照["立即开始“](https://www.taosdata.com/cn/getting-started/)一章的方法启动taosd
4. 在Linux shell里执行命令"hostname"找出本机的FQDN, 假设为h2.taos.com。如果无法找到可以查看taosd日志文件taosdlog.0里前面几行日志(一般在/var/log/taos目录)fqdn以及port都会打印出来。
5. 在第一个节点使用CLI程序taos, 登录进TDengine系统, 使用命令:
```
CREATE DNODE "h2.taos.com:6030";
```
将新节点的End Point添加进集群的EP列表。**"fqdn:port"需要用双引号引起来**否则出错。请注意将示例的“h2.taos.com:6030" 替换为你自己第一个节点的End Point
6. 使用命令
```
SHOW DNODES;
```
查看新节点是否被成功加入。
按照上述步骤可以源源不断的将新的节点加入到集群。
**提示:**
- firstEp, secondEp这两个参数仅仅在该节点第一次加入集群时有作用加入集群后该节点会保存最新的mnode的End Point列表不再依赖这两个参数。
- 两个没有配置first, second参数的dnode启动后会独立运行起来。这个时候无法将其中一个节点加入到另外一个节点形成集群。**无法将两个独立的集群合并成为新的集群**。
##节点管理
###添加节点
执行CLI程序taos, 使用root账号登录进系统, 执行:
```
CREATE DNODE "fqdn:port";
```
将新节点的End Point添加进集群的EP列表。**"fqdn:port"需要用双引号引起来**否则出错。一个节点对外服务的fqdn和port可以通过配置文件taos.cfg进行配置缺省是自动获取。
###删除节点
执行CLI程序taos, 使用root账号登录进TDengine系统执行
```
DROP DNODE "fqdn:port";
```
其中fqdn是被删除的节点的FQDNport是其对外服务器的端口号
###查看节点
执行CLI程序taos,使用root账号登录进TDengine系统执行
```
SHOW DNODES;
```
它将列出集群中所有的dnode,每个dnode的fqdn:port, 状态(ready, offline等vnode数目还未使用的vnode数目等信息。在添加或删除一个节点后可以使用该命令查看。
如果集群配置了Arbitrator那么它也会在这个节点列表中显示出来其role列的值会是“arb”。
###查看虚拟节点组
为充分利用多核技术并提供scalability数据需要分片处理。因此TDengine会将一个DB的数据切分成多份存放在多个vnode里。这些vnode可能分布在多个dnode里这样就实现了水平扩展。一个vnode仅仅属于一个DB但一个DB可以有多个vnode。vnode的是mnode根据当前系统资源的情况自动进行分配的无需任何人工干预。
执行CLI程序taos,使用root账号登录进TDengine系统执行
```
SHOW VGROUPS;
```
##高可用性
TDengine通过多副本的机制来提供系统的高可用性。副本数是与DB关联的一个集群里可以有多个DB根据运营的需求每个DB可以配置不同的副本数。创建数据库时通过参数replica 指定副本数缺省为1。如果副本数为1系统的可靠性无法保证只要数据所在的节点宕机就将无法提供服务。集群的节点数必须大于等于副本数否则创建表时将返回错误“more dnodes are needed"。比如下面的命令将创建副本数为3的数据库demo
```
CREATE DATABASE demo replica 3;
```
一个DB里的数据会被切片分到多个vnode groupvnode group里的vnode数目就是DB的副本数同一个vnode group里各vnode的数据是完全一致的。为保证高可用性vnode group里的vnode一定要分布在不同的dnode里实际部署时需要在不同的物理机上只要一个vgroup里超过半数的vnode处于工作状态这个vgroup就能正常的对外服务。
一个dnode里可能有多个DB的数据因此一个dnode离线时可能会影响到多个DB。如果一个vnode group里的一半或一半以上的vnode不工作那么该vnode group就无法对外服务无法插入或读取数据这样会影响到它所属的DB的一部分表的d读写操作。
因为vnode的引入无法简单的给出结论“集群中过半dnode工作集群就应该工作”。但是对于简单的情形很好下结论。比如副本数为3只有三个dnode那如果仅有一个节点不工作整个集群还是可以正常工作的但如果有两个节点不工作那整个集群就无法正常工作了。
##Mnode的高可用
TDengine集群是由mnode (taosd的一个模块逻辑节点) 负责管理的为保证mnode的高可用可以配置多个mnode副本副本数由系统配置参数numOfMnodes决定有效范围为1-3。为保证元数据的强一致性mnode副本之间是通过同步的方式进行数据复制的。
一个集群有多个dnode, 但一个dnode至多运行一个mnode实例。多个dnode情况下哪个dnode可以作为mnode呢这是完全由系统根据整个系统资源情况自动指定的。用户可通过CLI程序taos在TDengine的console里执行如下命令
```
SHOW MNODES;
```
来查看mnode列表该列表将列出mnode所处的dnode的End Point和角色(master, slave, unsynced 或offline)。
当集群中第一个节点启动时该节点一定会运行一个mnode实例否则该dnode无法正常工作因为一个系统是必须有至少一个mnode的。如果numOfMnodes配置为2启动第二个dnode时该dnode也将运行一个mnode实例。
为保证mnode服务的高可用性numOfMnodes必须设置为2或更大。因为mnode保存的元数据必须是强一致的如果numOfMnodes大于2复制参数quorum自动设为2也就是说至少要保证有两个副本写入数据成功才通知客户端应用写入成功。
##负载均衡
有三种情况,将触发负载均衡,而且都无需人工干预。
- 当一个新节点添加进集群时,系统将自动触发负载均衡,一些节点上的数据将被自动转移到新节点上,无需任何人工干预。
- 当一个节点从集群中移除时,系统将自动把该节点上的数据转移到其他节点,无需任何人工干预。
- 如果一个节点过热数据量过大系统将自动进行负载均衡将该节点的一些vnode自动挪到其他节点。
当上述三种情况发生时,系统将启动一各个节点的负载计算,从而决定如何挪动。
##节点离线处理
如果一个节点离线TDengine集群将自动检测到。有如下两种情况
- 改节点离线超过一定时间taos.cfg里配置参数offlineThreshold控制时长),系统将自动把该节点删除,产生系统报警信息,触发负载均衡流程。如果该被删除的节点重现上线时,它将无法加入集群,需要系统管理员重新将其添加进集群才会开始工作。
- 离线后在offlineThreshold的时长内重新上线系统将自动启动数据恢复流程等数据完全恢复后该节点将开始正常工作。
##Arbitrator的使用
如果副本数为偶数当一个vnode group里一半vnode不工作时是无法从中选出master的。同理一半mnode不工作时是无法选出mnode的master的因为存在“split brain”问题。为解决这个问题TDengine引入了arbitrator的概念。Arbitrator模拟一个vnode或mnode在工作但只简单的负责网络连接不处理任何数据插入或访问。只要包含arbitrator在内超过半数的vnode或mnode工作那么该vnode group或mnode组就可以正常的提供数据插入或查询服务。比如对于副本数为2的情形如果一个节点A离线但另外一个节点B正常而且能连接到arbitrator, 那么节点B就能正常工作。
TDengine安装包里带有一个执行程序tarbitrator, 找任何一台Linux服务器运行它即可。该程序对系统资源几乎没有要求只需要保证有网络连接即可。该应用的命令行参数`-p`可以指定其对外服务的端口号缺省是6030。配置每个taosd实例时可以在配置文件taos.cfg里将参数arbitrator设置为Arbitrator的End Point。如果该参数配置了当副本数为偶数时系统将自动连接配置的Arbitrator。
在配置了Arbitrator的情况下它也会显示在“show dnodes;”指令给出的节点列表中。

View File

@ -1,62 +1,62 @@
# Java Connector
Java连接器支持的系统有
| **CPU类型** | x6464bit | | | ARM64 | ARM32 |
| ------------ | ------------ | -------- | -------- | -------- | -------- |
| **OS类型** | Linux | Win64 | Win32 | Linux | Linux |
| **支持与否** | **支持** | **支持** | **支持** | **支持** | **支持** |
TDengine 提供了遵循 JDBC 标准3.0API 规范的 `taos-jdbcdriver` 实现,可在 maven 的中央仓库 [Sonatype Repository][1] 搜索下载。
Java连接器的使用请参见<a href=https://www.taosdata.com/blog/2020/11/11/1955.html>视频教程</a>
`taos-jdbcdriver` 的实现包括 2 种形式: JDBC-JNI 和 JDBC-RESTfultaos-jdbcdriver-2.0.17 开始支持 JDBC-RESTful。 JDBC-JNI 通过调用客户端 libtaos.so或 taos.dll )的本地方法实现, JDBC-RESTful 则在内部封装了 RESTful 接口实现。
TDengine 为了方便 Java 应用使用,提供了遵循 JDBC 标准(3.0)API 规范的 `taos-jdbcdriver` 实现。目前可以通过 [Sonatype Repository][1] 搜索并下载。
![tdengine-connector](../assets/tdengine-jdbc-connector.png)
由于 TDengine 是使用 c 语言开发的,使用 taos-jdbcdriver 驱动包时需要依赖系统对应的本地函数库。
上图显示了 3 种 Java 应用使用连接器访问 TDengine 的方式:
* libtaos.so
在 linux 系统中成功安装 TDengine 后,依赖的本地函数库 libtaos.so 文件会被自动拷贝至 /usr/lib/libtaos.so该目录包含在 Linux 自动扫描路径上,无需单独指定。
* JDBC-JNIJava 应用在物理节点1pnode1上使用 JDBC-JNI 的 API ,直接调用客户端 APIlibtaos.so 或 taos.dll将写入和查询请求发送到位于物理节点2pnode2上的 taosd 实例。
* RESTful应用将 SQL 发送给位于物理节点2pnode2上的 RESTful 连接器,再调用客户端 APIlibtaos.so
* JDBC-RESTfulJava 应用通过 JDBC-RESTful 的 API ,将 SQL 封装成一个 RESTful 请求发送给物理节点2的 RESTful 连接器。
* taos.dll
在 windows 系统中安装完客户端之后,驱动包依赖的 taos.dll 文件会自动拷贝到系统默认搜索路径 C:/Windows/System32 下,同样无需要单独指定。
TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致,但时序空间数据库与关系对象型数据库服务的对象和技术特征存在差异,导致 `taos-jdbcdriver` 与传统的 JDBC driver 也存在一定差异。在使用时需要注意以下几点:
> 注意:在 windows 环境开发时需要安装 TDengine 对应的 [windows 客户端][14]Linux 服务器安装完 TDengine 之后默认已安装 client也可以单独安装 [Linux 客户端][15] 连接远程 TDengine Server。
TDengine 的 JDBC 驱动实现尽可能的与关系型数据库驱动保持一致,但时序空间数据库与关系对象型数据库服务的对象和技术特征的差异导致 taos-jdbcdriver 并未完全实现 JDBC 标准规范。在使用时需要注意以下几点:
* TDengine 不提供针对单条数据记录的删除和修改的操作,驱动中也没有支持相关方法。
* 由于不支持删除和修改,所以也不支持事务操作。
* TDengine 目前不支持针对单条数据记录的删除操作。
* 目前不支持事务操作。
* 目前不支持表间的 union 操作。
* 目前不支持嵌套查询(nested query),对每个 Connection 的实例,至多只能有一个打开的 ResultSet 实例;如果在 ResultSet还没关闭的情况下执行了新的查询TSDBJDBCDriver 则会自动关闭上一个 ResultSet。
* 目前不支持嵌套查询nested query
* 对每个 Connection 的实例,至多只能有一个打开的 ResultSet 实例;如果在 ResultSet 还没关闭的情况下执行了新的查询taos-jdbcdriver 会自动关闭上一个 ResultSet。
## TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本
| taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 |
| --- | --- | --- |
| 2.0.12 及以上 | 2.0.8.0 及以上 | 1.8.x |
| 2.0.4 - 2.0.11 | 2.0.0.0 - 2.0.7.x | 1.8.x |
| 1.0.3 | 1.6.1.x 及以上 | 1.8.x |
| 1.0.2 | 1.6.1.x 及以上 | 1.8.x |
| 1.0.1 | 1.6.1.x 及以上 | 1.8.x |
## JDBC-JNI和JDBC-RESTful的对比
## TDengine DataType 和 Java DataType
<table >
<tr align="center"><th>对比项</th><th>JDBC-JNI</th><th>JDBC-RESTful</th></tr>
<tr align="center">
<td>支持的操作系统</td>
<td>linux、windows</td>
<td>全平台</td>
</tr>
<tr align="center">
<td>是否需要安装 client</td>
<td>需要</td>
<td>不需要</td>
</tr>
<tr align="center">
<td>server 升级后是否需要升级 client</td>
<td>需要</td>
<td>不需要</td>
</tr>
<tr align="center">
<td>写入性能</td>
<td colspan="2">JDBC-RESTful 是 JDBC-JNI 的 50%90% </td>
</tr>
<tr align="center">
<td>查询性能</td>
<td colspan="2">JDBC-RESTful 与 JDBC-JNI 没有差别</td>
</tr>
</table>
TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对应类型转换如下:
| TDengine DataType | Java DataType |
| --- | --- |
| TIMESTAMP | java.sql.Timestamp |
| INT | java.lang.Integer |
| BIGINT | java.lang.Long |
| FLOAT | java.lang.Float |
| DOUBLE | java.lang.Double |
| SMALLINT, TINYINT |java.lang.Short |
| BOOL | java.lang.Boolean |
| BINARY, NCHAR | java.lang.String |
## 如何获取 TAOS-JDBCDriver
## 如何获取 taos-jdbcdriver
### maven 仓库
目前 taos-jdbcdriver 已经发布到 [Sonatype Repository][1] 仓库,且各大仓库都已同步。
* [sonatype][8]
* [mvnrepository][9]
* [maven.aliyun][10]
@ -67,30 +67,63 @@ maven 项目中使用如下 pom.xml 配置即可:
<dependency>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>2.0.4</version>
<version>2.0.17</version>
</dependency>
```
### 源码编译打包
下载 [TDengine][3] 源码之后,进入 taos-jdbcdriver 源码目录 `src/connector/jdbc` 执行 `mvn clean package` 即可生成相应 jar 包。
下载 [TDengine][3] 源码之后,进入 taos-jdbcdriver 源码目录 `src/connector/jdbc` 执行 `mvn clean package -Dmaven.test.skip=true` 即可生成相应 jar 包。
## 使用说明
## JDBC的使用说明
### 获取连接
#### 通过JdbcUrl获取连接
#### 指定URL获取连接
通过指定URL获取连接如下所示
```java
Class.forName("com.taosdata.jdbc.rs.RestfulDriver");
String jdbcUrl = "jdbc:TAOS-RS://taosdemo.com:6041/test?user=root&password=taosdata";
Connection conn = DriverManager.getConnection(jdbcUrl);
```
以上示例,使用 **JDBC-RESTful** 的 driver建立了到 hostname 为 taosdemo.com端口为 6041数据库名为 test 的连接。这个 URL 中指定用户名user为 root密码password为 taosdata。
使用 JDBC-RESTful 接口,不需要依赖本地函数库。与 JDBC-JNI 相比,仅需要:
1. driverClass 指定为“com.taosdata.jdbc.rs.RestfulDriver”
2. jdbcUrl 以“jdbc:TAOS-RS://”开头;
3. 使用 6041 作为连接端口。
如果希望获得更好的写入和查询性能Java 应用可以使用 **JDBC-JNI** 的driver如下所示
通过指定的jdbcUrl获取连接如下所示
```java
Class.forName("com.taosdata.jdbc.TSDBDriver");
String jdbcUrl = "jdbc:TAOS://taosdemo.com:6030/test?user=root&password=taosdata";
Connection conn = DriverManager.getConnection(jdbcUrl);
```
以上示例建立了到hostname为taosdemo.com端口为6030TDengine的默认端口数据库名为test的连接。这个url中指定用户名user为root密码password为taosdata。
以上示例,使用了 JDBC-JNI 的 driver建立了到 hostname 为 taosdemo.com端口为 6030TDengine 的默认端口),数据库名为 test 的连接。这个 URL 中指定用户名user为 root密码password为 taosdata。
**注意**:使用 JDBC-JNI 的 drivertaos-jdbcdriver 驱动包时需要依赖系统对应的本地函数库。
* libtaos.so
在 linux 系统中成功安装 TDengine 后,依赖的本地函数库 libtaos.so 文件会被自动拷贝至 /usr/lib/libtaos.so该目录包含在 Linux 自动扫描路径上,无需单独指定。
* taos.dll
在 windows 系统中安装完客户端之后,驱动包依赖的 taos.dll 文件会自动拷贝到系统默认搜索路径 C:/Windows/System32 下,同样无需要单独指定。
> 在 windows 环境开发时需要安装 TDengine 对应的 [windows 客户端][14]Linux 服务器安装完 TDengine 之后默认已安装 client也可以单独安装 [Linux 客户端][15] 连接远程 TDengine Server。
JDBC-JNI 的使用请参见<a href=https://www.taosdata.com/blog/2020/11/11/1955.html>视频教程</a>
TDengine 的 JDBC URL 规范格式为:
`jdbc:TAOS://[host_name]:[port]/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]`
`jdbc:[TAOS|TAOS-RS]://[host_name]:[port]/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]`
url中的配置参数如下
* user登录 TDengine 用户名,默认值 root。
* password用户登录密码默认值 taosdata。
@ -99,13 +132,17 @@ url中的配置参数如下
* locale客户端语言环境默认值系统当前 locale。
* timezone客户端使用的时区默认值为系统当前时区。
#### 使用JdbcUrl和Properties获取连接
除了通过指定的jdbcUrl获取连接还可以使用Properties指定建立连接时的参数如下所示
#### 指定URL和Properties获取连接
除了通过指定的 URL 获取连接,还可以使用 Properties 指定建立连接时的参数,如下所示:
```java
public Connection getConn() throws Exception{
Class.forName("com.taosdata.jdbc.TSDBDriver");
// Class.forName("com.taosdata.jdbc.rs.RestfulDriver");
String jdbcUrl = "jdbc:TAOS://taosdemo.com:6030/test?user=root&password=taosdata";
// String jdbcUrl = "jdbc:TAOS-RS://taosdemo.com:6041/test?user=root&password=taosdata";
Properties connProps = new Properties();
connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
@ -114,9 +151,10 @@ public Connection getConn() throws Exception{
return conn;
}
```
以上示例建立一个到hostname为taosdemo.com端口为6030数据库名为test的连接。这个连接在url中指定了用户名(user)为root密码password为taosdata并在connProps中指定了使用的字符集、语言环境、时区等信息。
properties中的配置参数如下
以上示例,建立一个到 hostname 为 taosdemo.com端口为 6030数据库名为 test 的连接。注释为使用 JDBC-RESTful 时的方法。这个连接在 url 中指定了用户名(user)为 root密码password为 taosdata并在 connProps 中指定了使用的字符集、语言环境、时区等信息。
properties 中的配置参数如下:
* TSDBDriver.PROPERTY_KEY_USER登录 TDengine 用户名,默认值 root。
* TSDBDriver.PROPERTY_KEY_PASSWORD用户登录密码默认值 taosdata。
* TSDBDriver.PROPERTY_KEY_CONFIG_DIR客户端配置文件目录路径Linux OS 上默认值 /etc/taos Windows OS 上默认值 C:/TDengine/cfg。
@ -124,10 +162,14 @@ properties中的配置参数如下
* TSDBDriver.PROPERTY_KEY_LOCALE客户端语言环境默认值系统当前 locale。
* TSDBDriver.PROPERTY_KEY_TIME_ZONE客户端使用的时区默认值为系统当前时区。
#### 使用客户端配置文件建立连接
当使用JDBC连接TDengine集群时可以使用客户端配置文件在客户端配置文件中指定集群的firstEp、secondEp参数。
当使用 JDBC-JNI 连接 TDengine 集群时,可以使用客户端配置文件,在客户端配置文件中指定集群的 firstEp、secondEp参数。
如下所示:
1. 在java中不指定hostname和port
1. 在 Java 应用中不指定 hostname 和 port
```java
public Connection getConn() throws Exception{
Class.forName("com.taosdata.jdbc.TSDBDriver");
@ -140,7 +182,7 @@ public Connection getConn() throws Exception{
return conn;
}
```
2. 在配置文件中指定firstEp和secondEp
2. 在配置文件中指定 firstEp secondEp
```
# first fully qualified domain name (FQDN) for TDengine system
firstEp cluster_node1:6030
@ -155,17 +197,19 @@ secondEp cluster_node2:6030
# locale en_US.UTF-8
```
以上示例jdbc会使用客户端的配置文件建立到hostname为cluster_node1端口为6030数据库名为test的连接。当集群中firstEp节点失效时JDBC会尝试使用secondEp连接集群。
TDengine中只要保证firstEp和secondEp中一个节点有效就可以正常建立到集群的连接。
以上示例jdbc 会使用客户端的配置文件,建立到 hostname 为 cluster_node1、端口为 6030、数据库名为 test 的连接。当集群中 firstEp 节点失效时JDBC 会尝试使用 secondEp 连接集群。
TDengine 中,只要保证 firstEp secondEp 中一个节点有效,就可以正常建立到集群的连接。
> 注意这里的配置文件指的是调用JDBC Connector的应用程序所在机器上的配置文件Linux OS 上默认值 /etc/taos/taos.cfg Windows OS 上默认值 C://TDengine/cfg/taos.cfg。
> 注意:这里的配置文件指的是调用 JDBC Connector 的应用程序所在机器上的配置文件Linux OS 上默认值 /etc/taos/taos.cfg Windows OS 上默认值 C://TDengine/cfg/taos.cfg。
#### 配置参数的优先级
通过以上3种方式获取连接如果配置参数在url、Properties、客户端配置文件中有重复则参数的`优先级由高到低`分别如下:
通过以上 3 种方式获取连接,如果配置参数在 url、Properties、客户端配置文件中有重复则参数的`优先级由高到低`分别如下:
1. JDBC URL 参数,如上所述,可以在 JDBC URL 的参数中指定。
2. Properties connProps
3. 客户端配置文件 taos.cfg
例如在url中指定了password为taosdata在Properties中指定了password为taosdemo那么JDBC会使用url中的password建立连接。
例如:在 url 中指定了 password 为 taosdata在 Properties 中指定了 password 为 taosdemo那么JDBC 会使用 url 中的 password 建立连接。
> 更多详细配置请参考[客户端配置][13]
@ -183,6 +227,7 @@ stmt.executeUpdate("use db");
// create table
stmt.executeUpdate("create table if not exists tb (ts timestamp, temperature int, humidity float)");
```
> 注意:如果不使用 `use db` 指定数据库,则后续对表的操作都需要增加数据库名称作为前缀,如 db.tb。
### 插入数据
@ -193,6 +238,7 @@ int affectedRows = stmt.executeUpdate("insert into tb values(now, 23, 10.3) (now
System.out.println("insert " + affectedRows + " rows.");
```
> now 为系统内部函数,默认为服务器当前时间。
> `now + 1s` 代表服务器当前时间往后加 1 秒数字后面代表时间单位a(毫秒), s(秒), m(分), h(小时), d(天)w(周), n(月), y(年)。
@ -214,6 +260,7 @@ while(resultSet.next()){
System.out.printf("%s, %d, %s\n", ts, temperature, humidity);
}
```
> 查询和操作关系型数据库一致,使用下标获取返回字段内容时从 1 开始,建议使用字段名称获取。
### 订阅
@ -248,7 +295,7 @@ while(true) {
}
```
`consume` 方法返回一个结果集,其中包含从上次 `consume` 到目前为止的所有新数据。请务必按需选择合理的调用 `consume` 的频率(如例子中的`Thread.sleep(1000)`),否则会给服务端造成不必要的压力。
`consume` 方法返回一个结果集,其中包含从上次 `consume` 到目前为止的所有新数据。请务必按需选择合理的调用 `consume` 的频率(如例子中的 `Thread.sleep(1000)`),否则会给服务端造成不必要的压力。
#### 关闭订阅
@ -265,8 +312,11 @@ resultSet.close();
stmt.close();
conn.close();
```
> `注意务必要将 connection 进行关闭`,否则会出现连接泄露。
## 与连接池使用
**HikariCP**
@ -306,6 +356,7 @@ conn.close();
connection.close(); // put back to conneciton pool
}
```
> 通过 HikariDataSource.getConnection() 获取连接后,使用完成后需要调用 close() 方法,实际上它并不会关闭连接,只是放回连接池中。
> 更多 HikariCP 使用问题请查看[官方说明][5]
@ -356,6 +407,7 @@ public static void main(String[] args) throws Exception {
connection.close(); // put back to conneciton pool
}
```
> 更多 druid 使用问题请查看[官方说明][6]
**注意事项**
@ -370,10 +422,43 @@ server_status()|
Query OK, 1 row(s) in set (0.000141s)
```
## 与框架使用
* Spring JdbcTemplate 中使用 taos-jdbcdriver可参考 [SpringJdbcTemplate][11]
* Springboot + Mybatis 中使用,可参考 [springbootdemo][12]
* Springboot + Mybatis 中使用,可参考 [springbootdemo
## TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本
| taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 |
| -------------------- | ----------------- | -------- |
| 2.0.12 及以上 | 2.0.8.0 及以上 | 1.8.x |
| 2.0.4 - 2.0.11 | 2.0.0.0 - 2.0.7.x | 1.8.x |
| 1.0.3 | 1.6.1.x 及以上 | 1.8.x |
| 1.0.2 | 1.6.1.x 及以上 | 1.8.x |
| 1.0.1 | 1.6.1.x 及以上 | 1.8.x |
## TDengine DataType 和 Java DataType
TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对应类型转换如下:
| TDengine DataType | Java DataType |
| ----------------- | ------------------ |
| TIMESTAMP | java.sql.Timestamp |
| INT | java.lang.Integer |
| BIGINT | java.lang.Long |
| FLOAT | java.lang.Float |
| DOUBLE | java.lang.Double |
| SMALLINT, TINYINT | java.lang.Short |
| BOOL | java.lang.Boolean |
| BINARY, NCHAR | java.lang.String |
## 常见问题
@ -381,7 +466,7 @@ Query OK, 1 row(s) in set (0.000141s)
**原因**:程序没有找到依赖的本地函数库 taos。
**解决方法**windows 下可以将 C:\TDengine\driver\taos.dll 拷贝到 C:\Windows\System32\ 目录下linux 下将建立如下软链 ` ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` 即可。
**解决方法**windows 下可以将 C:\TDengine\driver\taos.dll 拷贝到 C:\Windows\System32\ 目录下linux 下将建立如下软链 `ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` 即可。
* java.lang.UnsatisfiedLinkError: taos.dll Can't load AMD 64 bit on a IA 32-bit platform
@ -406,3 +491,4 @@ Query OK, 1 row(s) in set (0.000141s)
[13]: https://www.taosdata.com/cn/documentation20/administrator/#%E5%AE%A2%E6%88%B7%E7%AB%AF%E9%85%8D%E7%BD%AE
[14]: https://www.taosdata.com/cn/all-downloads/#TDengine-Windows-Client
[15]: https://www.taosdata.com/cn/getting-started/#%E5%BF%AB%E9%80%9F%E4%B8%8A%E6%89%8B

View File

@ -85,7 +85,9 @@ TDengine还没有一组专用的validation queries。然而建议你使用系统
## 9. 我可以删除或更新一条记录吗?
不能。因为TDengine是为联网设备采集的数据设计的不容许修改。但TDengine提供数据保留策略只要数据记录超过保留时长就会被自动删除。
TDengine 目前尚不支持删除功能,未来根据用户需求可能会支持。
从 2.0.8.0 开始TDengine 支持更新已经写入数据的功能。使用更新功能需要在创建数据库时使用 UPDATE 1 参数,之后可以使用 INSERT INTO 命令更新已经写入的相同时间戳数据。UPDATE 参数不支持 ALTER DATABASE 命令修改。没有使用 UPDATE 1 参数创建的数据库,写入相同时间戳的数据不会修改之前的数据,也不会报错。
## 10. 我怎么创建超过1024列的表
@ -132,8 +134,3 @@ TDengine是根据hostname唯一标志一台机器的在数据文件从机器A
- 2.0.7.0 及以后的版本,到/var/lib/taos/dnode下修复dnodeEps.json的dnodeId对应的FQDN重启。确保机器内所有机器的此文件是完全相同的。
- 1.x 和 2.x 版本的存储结构不兼容,需要使用迁移工具或者自己开发应用导出导入数据。
## 17. TDengine 是否支持删除或更新已经写入的数据?
TDengine 目前尚不支持删除功能,未来根据用户需求可能会支持。
从 2.0.8.0 开始TDengine 支持更新已经写入数据的功能。使用更新功能需要在创建数据库时使用 UPDATE 1 参数,之后可以使用 INSERT INTO 命令更新已经写入的相同时间戳数据。UPDATE 参数不支持 ALTER DATABASE 命令修改。没有使用 UPDATE 1 参数创建的数据库,写入相同时间戳的数据不会修改之前的数据,也不会报错。

View File

@ -222,7 +222,7 @@ MQTT是一流行的物联网数据传输协议TDengine 可以很方便的接
## EMQ Broker 直接写入
<a href="https://github.com/emqx/emqx">EMQ</a>是一开源的MQTT Broker软件无需任何代码只需要在EMQ Dashboard里使用“规则”做简单配置即可将MQTT的数据直接写入TDengine。EMQ X 支持通过 发送到 Web 服务 的方式保存数据到 TDengine也在企业版上提供原生的 TDEngine 驱动实现直接保存。详细使用方法请参考<a href="https://docs.emqx.io/broker/latest/cn/rule/rule-example.html#%E4%BF%9D%E5%AD%98%E6%95%B0%E6%8D%AE%E5%88%B0-tdengine">EMQ 官方文档</a>
<a href="https://github.com/emqx/emqx">EMQ</a>是一开源的MQTT Broker软件无需任何代码只需要在EMQ Dashboard里使用“规则”做简单配置即可将MQTT的数据直接写入TDengine。EMQ X 支持通过 发送到 Web 服务 的方式保存数据到 TDengine也在企业版上提供原生的 TDengine 驱动实现直接保存。详细使用方法请参考<a href="https://docs.emqx.io/broker/latest/cn/rule/rule-example.html#%E4%BF%9D%E5%AD%98%E6%95%B0%E6%8D%AE%E5%88%B0-tdengine">EMQ 官方文档</a>
## HiveMQ Broker 直接写入

View File

@ -265,9 +265,6 @@
# maximum display width of binary and nchar fields in the shell. The parts exceeding this limit will be hidden
# maxBinaryDisplayWidth 30
# enable/disable telemetry reporting
# telemetryReporting 1
# enable/disable stream (continuous query)
# stream 1

View File

@ -1,14 +1,16 @@
FROM ubuntu:20.04
FROM ubuntu:18.04
WORKDIR /root
ARG version
RUN echo $version
COPY tdengine.tar.gz /root/
RUN tar -zxf tdengine.tar.gz
WORKDIR /root/TDengine-server-$version/
RUN /bin/bash install.sh -e no
ARG pkgFile
ARG dirName
RUN echo ${pkgFile}
RUN echo ${dirName}
COPY ${pkgFile} /root/
RUN tar -zxf ${pkgFile}
WORKDIR /root/${dirName}/
RUN /bin/bash install.sh -e no
ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/lib"
ENV LANG=en_US.UTF-8

View File

@ -0,0 +1,44 @@
#!/bin/bash
set -e
#set -x
# dockerbuild.sh
# -n [version number]
# -p [xxxx]
# set parameters by default value
verNumber=""
passWord=""
while getopts "hn:p:" arg
do
case $arg in
n)
#echo "verNumber=$OPTARG"
verNumber=$(echo $OPTARG)
;;
p)
#echo "passWord=$OPTARG"
passWord=$(echo $OPTARG)
;;
h)
echo "Usage: `basename $0` -n [version number] "
echo " -p [password for docker hub] "
exit 0
;;
?) #unknow option
echo "unkonw argument"
exit 1
;;
esac
done
echo "verNumber=${verNumber}"
docker manifest create -a tdengine/tdengine:${verNumber} tdengine/tdengine-amd64:${verNumber} tdengine/tdengine-aarch64:${verNumber} tdengine/tdengine-aarch32:${verNumber}
docker login -u tdengine -p ${passWord} #replace the docker registry username and password
docker manifest push tdengine/tdengine:${verNumber}
# how set latest version ???

View File

@ -1,5 +0,0 @@
#!/bin/bash
set -x
docker build --rm -f "Dockerfile" -t tdengine/tdengine-aarch64:$1 "." --build-arg version=$1
docker login -u tdengine -p $2 #replace the docker registry username and password
docker push tdengine/tdengine-aarch64:$1

View File

@ -1,5 +1,63 @@
#!/bin/bash
set -x
docker build --rm -f "Dockerfile" -t tdengine/tdengine:$1 "." --build-arg version=$1
docker login -u tdengine -p $2 #replace the docker registry username and password
docker push tdengine/tdengine:$1
set -e
#set -x
# dockerbuild.sh
# -c [aarch32 | aarch64 | amd64 | x86 | mips64 ...]
# -f [pkg file]
# -n [version number]
# -p [password for docker hub]
# set parameters by default value
cpuType=amd64
verNumber=""
passWord=""
pkgFile=""
while getopts "hc:n:p:f:" arg
do
case $arg in
c)
#echo "cpuType=$OPTARG"
cpuType=$(echo $OPTARG)
;;
n)
#echo "verNumber=$OPTARG"
verNumber=$(echo $OPTARG)
;;
p)
#echo "passWord=$OPTARG"
passWord=$(echo $OPTARG)
;;
f)
#echo "pkgFile=$OPTARG"
pkgFile=$(echo $OPTARG)
;;
h)
echo "Usage: `basename $0` -c [aarch32 | aarch64 | amd64 | x86 | mips64 ...] "
echo " -f [pkg file] "
echo " -n [version number] "
echo " -p [password for docker hub] "
exit 0
;;
?) #unknow option
echo "unkonw argument"
exit 1
;;
esac
done
echo "cpuType=${cpuType} verNumber=${verNumber} pkgFile=${pkgFile} "
echo "$(pwd)"
echo "====NOTES: ${pkgFile} must be in the same directory as dockerbuild.sh===="
dirName=${pkgFile%-Linux*}
#echo "dirName=${dirName}"
docker build --rm -f "Dockerfile" -t tdengine/tdengine-${cpuType}:${verNumber} "." --build-arg pkgFile=${pkgFile} --build-arg dirName=${dirName}
docker login -u tdengine -p ${passWord} #replace the docker registry username and password
docker push tdengine/tdengine-${cpuType}:${verNumber}
# set this version to latest version
docker tag tdengine/tdengine-${cpuType}:${verNumber} tdengine/tdengine-${cpuType}:latest
docker push tdengine/tdengine-${cpuType}:latest

View File

@ -0,0 +1,56 @@
#!/bin/bash
#
set -e
#set -x
# dockerbuild.sh
# -c [aarch32 | aarch64 | amd64 | x86 | mips64 ...]
# -n [version number]
# -p [password for docker hub]
# set parameters by default value
cpuType=aarch64
verNumber=""
passWord=""
while getopts "hc:n:p:f:" arg
do
case $arg in
c)
#echo "cpuType=$OPTARG"
cpuType=$(echo $OPTARG)
;;
n)
#echo "verNumber=$OPTARG"
verNumber=$(echo $OPTARG)
;;
p)
#echo "passWord=$OPTARG"
passWord=$(echo $OPTARG)
;;
h)
echo "Usage: `basename $0` -c [aarch32 | aarch64 | amd64 | x86 | mips64 ...] "
echo " -n [version number] "
echo " -p [password for docker hub] "
exit 0
;;
?) #unknow option
echo "unkonw argument"
exit 1
;;
esac
done
pkgFile=TDengine-server-${verNumber}-Linux-${cpuType}.tar.gz
echo "cpuType=${cpuType} verNumber=${verNumber} pkgFile=${pkgFile} "
scriptDir=`pwd`
pkgDir=$scriptDir/../../release/
cp -f ${pkgDir}/${pkgFile} .
./dockerbuild.sh -c ${cpuType} -f ${pkgFile} -n ${verNumber} -p ${passWord}
rm -f ${pkgFile}

View File

@ -0,0 +1,144 @@
# Usage:
# sudo gdb -x ./taosd-dump-cfg.gdb
define attach_pidof
if $argc != 1
help attach_pidof
else
shell echo -e "\
set \$PID = "$(echo $(pidof $arg0) 0 | cut -d " " -f 1)"\n\
if \$PID > 0\n\
attach "$(pidof -s $arg0)"\n\
else\n\
print \"Process '"$arg0"' not found\"\n\
end" > /tmp/gdb.pidof
source /tmp/gdb.pidof
end
end
document attach_pidof
Attach to process by name
Usage: attach_pidof PROG_NAME
end
set $TAOS_CFG_VTYPE_INT8 = 0
set $TAOS_CFG_VTYPE_INT16 = 1
set $TAOS_CFG_VTYPE_INT32 = 2
set $TAOS_CFG_VTYPE_FLOAT = 3
set $TAOS_CFG_VTYPE_STRING = 4
set $TAOS_CFG_VTYPE_IPSTR = 5
set $TAOS_CFG_VTYPE_DIRECTORY = 6
set $TSDB_CFG_CTYPE_B_CONFIG = 1U
set $TSDB_CFG_CTYPE_B_SHOW = 2U
set $TSDB_CFG_CTYPE_B_LOG = 4U
set $TSDB_CFG_CTYPE_B_CLIENT = 8U
set $TSDB_CFG_CTYPE_B_OPTION = 16U
set $TSDB_CFG_CTYPE_B_NOT_PRINT = 32U
set $TSDB_CFG_PRINT_LEN = 53
define print_blank
if $argc == 1
set $blank_len = $arg0
while $blank_len > 0
printf "%s", " "
set $blank_len = $blank_len - 1
end
end
end
define dump_cfg
if $argc != 1
help dump_cfg
else
set $blen = $TSDB_CFG_PRINT_LEN - (int)strlen($arg0.option)
if $blen < 0
$blen = 0
end
#printf "%s: %d\n", "******blen: ", $blen
printf "%s: ", $arg0.option
print_blank $blen
if $arg0.valType == $TAOS_CFG_VTYPE_INT8
printf "%d\n", *((int8_t *) $arg0.ptr)
else
if $arg0.valType == $TAOS_CFG_VTYPE_INT16
printf "%d\n", *((int16_t *) $arg0.ptr)
else
if $arg0.valType == $TAOS_CFG_VTYPE_INT32
printf "%d\n", *((int32_t *) $arg0.ptr)
else
if $arg0.valType == $TAOS_CFG_VTYPE_FLOAT
printf "%f\n", *((float *) $arg0.ptr)
else
printf "%s\n", $arg0.ptr
end
end
end
end
end
end
document dump_cfg
Dump a cfg entry
Usage: dump_cfg cfg
end
set pagination off
attach_pidof taosd
set $idx=0
#print tsGlobalConfigNum
#set $end=$1
set $end=tsGlobalConfigNum
p "*=*=*=*=*=*=*=*=*= taos global config:"
#while ($idx .lt. $end)
while ($idx < $end)
# print tsGlobalConfig[$idx].option
set $cfg = tsGlobalConfig[$idx]
set $tsce = tscEmbedded
# p "1"
if ($tsce == 0)
if !($cfg.cfgType & $TSDB_CFG_CTYPE_B_CLIENT)
end
else
if $cfg.cfgType & $TSDB_CFG_CTYPE_B_NOT_PRINT
else
if !($cfg.cfgType & $TSDB_CFG_CTYPE_B_SHOW)
else
dump_cfg $cfg
end
end
end
set $idx=$idx+1
end
set $idx=0
p "*=*=*=*=*=*=*=*=*= taos local config:"
while ($idx < $end)
set $cfg = tsGlobalConfig[$idx]
set $tsce = tscEmbedded
if ($tsce == 0)
if !($cfg.cfgType & $TSDB_CFG_CTYPE_B_CLIENT)
end
else
if $cfg.cfgType & $TSDB_CFG_CTYPE_B_NOT_PRINT
else
if ($cfg.cfgType & $TSDB_CFG_CTYPE_B_SHOW)
else
dump_cfg $cfg
end
end
end
set $idx=$idx+1
end
detach
quit

View File

@ -56,7 +56,7 @@ int32_t bnInitThread() {
pthread_attr_destroy(&thattr);
if (ret != 0) {
mError("failed to create balance thread since %s", strerror(errno));
mError("failed to create balance thread since %s", strerror(ret));
return -1;
}

View File

@ -38,7 +38,7 @@ typedef struct SLocalDataSource {
tFilePage filePage;
} SLocalDataSource;
typedef struct SLocalReducer {
typedef struct SLocalMerger {
SLocalDataSource ** pLocalDataSrc;
int32_t numOfBuffer;
int32_t numOfCompleted;
@ -62,7 +62,7 @@ typedef struct SLocalReducer {
bool discard;
int32_t offset; // limit offset value
bool orderPrjOnSTable; // projection query on stable
} SLocalReducer;
} SLocalMerger;
typedef struct SRetrieveSupport {
tExtMemBuffer ** pExtMemBuffer; // for build loser tree
@ -89,10 +89,10 @@ int32_t tscFlushTmpBuffer(tExtMemBuffer *pMemoryBuf, tOrderDescriptor *pDesc, tF
/*
* create local reducer to launch the second-stage reduce process at client site
*/
void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrderDescriptor *pDesc,
void tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrderDescriptor *pDesc,
SColumnModel *finalModel, SColumnModel *pFFModel, SSqlObj* pSql);
void tscDestroyLocalReducer(SSqlObj *pSql);
void tscDestroyLocalMerger(SSqlObj *pSql);
int32_t tscDoLocalMerge(SSqlObj *pSql);

View File

@ -98,8 +98,7 @@ static FORCE_INLINE SQueryInfo* tscGetQueryInfoDetail(SSqlCmd* pCmd, int32_t sub
return pCmd->pQueryInfo[subClauseIndex];
}
int32_t tscCreateDataBlock(size_t initialSize, int32_t rowSize, int32_t startOffset, const char* name,
STableMeta* pTableMeta, STableDataBlocks** dataBlocks);
int32_t tscCreateDataBlock(size_t initialSize, int32_t rowSize, int32_t startOffset, SName* name, STableMeta* pTableMeta, STableDataBlocks** dataBlocks);
void tscDestroyDataBlock(STableDataBlocks* pDataBlock);
void tscSortRemoveDataBlockDupRows(STableDataBlocks* dataBuf);
@ -111,7 +110,7 @@ void* tscDestroyBlockHashTable(SHashObj* pBlockHashTable);
int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock);
int32_t tscMergeTableDataBlocks(SSqlObj* pSql, bool freeBlockMap);
int32_t tscGetDataBlockFromList(SHashObj* pHashList, int64_t id, int32_t size, int32_t startOffset, int32_t rowSize, const char* tableId, STableMeta* pTableMeta,
int32_t tscGetDataBlockFromList(SHashObj* pHashList, int64_t id, int32_t size, int32_t startOffset, int32_t rowSize, SName* pName, STableMeta* pTableMeta,
STableDataBlocks** dataBlocks, SArray* pBlockList);
/**
@ -142,10 +141,6 @@ void tscClearInterpInfo(SQueryInfo* pQueryInfo);
bool tscIsInsertData(char* sqlstr);
/* use for keep current db info temporarily, for handle table with db prefix */
// todo remove it
void tscGetDBInfoFromTableFullName(char* tableId, char* db);
int tscAllocPayload(SSqlCmd* pCmd, int size);
TAOS_FIELD tscCreateField(int8_t type, const char* name, int16_t bytes);
@ -215,8 +210,8 @@ SQueryInfo *tscGetQueryInfoDetailSafely(SSqlCmd *pCmd, int32_t subClauseIndex);
void tscClearTableMetaInfo(STableMetaInfo* pTableMetaInfo);
STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, const char* name, STableMeta* pTableMeta,
SVgroupsInfo* vgroupList, SArray* pTagCols, SArray* pVgroupTables);
STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, SName* name, STableMeta* pTableMeta,
SVgroupsInfo* vgroupList, SArray* pTagCols, SArray* pVgroupTables);
STableMetaInfo* tscAddEmptyMetaInfo(SQueryInfo *pQueryInfo);
int32_t tscAddSubqueryInfo(SSqlCmd *pCmd);
@ -225,7 +220,7 @@ void tscInitQueryInfo(SQueryInfo* pQueryInfo);
void tscClearSubqueryInfo(SSqlCmd* pCmd);
void tscFreeVgroupTableInfo(SArray* pVgroupTables);
SArray* tscVgroupTableInfoClone(SArray* pVgroupTables);
SArray* tscVgroupTableInfoDup(SArray* pVgroupTables);
void tscRemoveVgroupTableGroup(SArray* pVgroupTable, int32_t index);
void tscVgroupTableCopy(SVgroupTableInfo* info, SVgroupTableInfo* pInfo);
@ -292,7 +287,7 @@ uint32_t tscGetTableMetaSize(STableMeta* pTableMeta);
CChildTableMeta* tscCreateChildMeta(STableMeta* pTableMeta);
uint32_t tscGetTableMetaMaxSize();
int32_t tscCreateTableMetaFromCChildMeta(STableMeta* pChild, const char* name);
STableMeta* tscTableMetaClone(STableMeta* pTableMeta);
STableMeta* tscTableMetaDup(STableMeta* pTableMeta);
void* malloc_throw(size_t size);

View File

@ -39,7 +39,7 @@ extern "C" {
// forward declaration
struct SSqlInfo;
struct SLocalReducer;
struct SLocalMerger;
// data source from sql string or from file
enum {
@ -67,7 +67,7 @@ typedef struct CChildTableMeta {
int32_t vgId;
STableId id;
uint8_t tableType;
char sTableName[TSDB_TABLE_FNAME_LEN];
char sTableName[TSDB_TABLE_FNAME_LEN]; //super table name, not full name
} CChildTableMeta;
typedef struct STableMeta {
@ -91,7 +91,7 @@ typedef struct STableMetaInfo {
* 2. keep the vgroup index for multi-vnode insertion
*/
int32_t vgroupIndex;
char name[TSDB_TABLE_FNAME_LEN]; // (super) table name
SName name;
char aliasName[TSDB_TABLE_NAME_LEN]; // alias name of table specified in query sql
SArray *tagColList; // SArray<SColumn*>, involved tag columns
} STableMetaInfo;
@ -142,7 +142,7 @@ typedef struct SCond {
} SCond;
typedef struct SJoinNode {
char tableId[TSDB_TABLE_FNAME_LEN];
char tableName[TSDB_TABLE_FNAME_LEN];
uint64_t uid;
int16_t tagColId;
} SJoinNode;
@ -176,7 +176,7 @@ typedef struct SParamInfo {
} SParamInfo;
typedef struct STableDataBlocks {
char tableName[TSDB_TABLE_FNAME_LEN];
SName tableName;
int8_t tsSource; // where does the UNIX timestamp come from, server or client
bool ordered; // if current rows are ordered or not
int64_t vgId; // virtual group id
@ -254,7 +254,7 @@ typedef struct {
int8_t submitSchema; // submit block is built with table schema
STagData tagData; // NOTE: pTagData->data is used as a variant length array
char **pTableNameList; // all involved tableMeta list of current insert sql statement.
SName **pTableNameList; // all involved tableMeta list of current insert sql statement.
int32_t numOfTables;
SHashObj *pTableBlockHashList; // data block for each table
@ -292,7 +292,7 @@ typedef struct {
SColumnIndex* pColumnIndex;
SArithmeticSupport *pArithSup; // support the arithmetic expression calculation on agg functions
struct SLocalReducer *pLocalReducer;
struct SLocalMerger *pLocalMerger;
} SSqlRes;
typedef struct STscObj {
@ -436,7 +436,7 @@ void waitForQueryRsp(void *param, TAOS_RES *tres, int code);
void doAsyncQuery(STscObj *pObj, SSqlObj *pSql, __async_cb_func_t fp, void *param, const char *sqlstr, size_t sqlLen);
void tscProcessMultiVnodesImportFromFile(SSqlObj *pSql);
void tscImportDataFromFile(SSqlObj *pSql);
void tscInitResObjForLocalQuery(SSqlObj *pObj, int32_t numOfRes, int32_t rowLen);
bool tscIsUpdateQuery(SSqlObj* pSql);
bool tscHasReachLimitation(SQueryInfo *pQueryInfo, SSqlRes *pRes);

View File

@ -569,10 +569,12 @@ static int32_t tscRebuildDDLForSubTable(SSqlObj *pSql, const char *tableName, ch
}
char fullName[TSDB_TABLE_FNAME_LEN * 2] = {0};
extractDBName(pTableMetaInfo->name, fullName);
tNameGetDbName(&pTableMetaInfo->name, fullName);
extractTableName(pMeta->sTableName, param->sTableName);
snprintf(fullName + strlen(fullName), TSDB_TABLE_FNAME_LEN - strlen(fullName), ".%s", param->sTableName);
extractTableName(pTableMetaInfo->name, param->buf);
strncpy(param->buf, tNameGetTableName(&pTableMetaInfo->name), TSDB_TABLE_NAME_LEN);
param->pParentSql = pSql;
param->pInterSql = pInterSql;
@ -602,6 +604,7 @@ static int32_t tscRebuildDDLForSubTable(SSqlObj *pSql, const char *tableName, ch
return TSDB_CODE_TSC_ACTION_IN_PROGRESS;
}
static int32_t tscRebuildDDLForNormalTable(SSqlObj *pSql, const char *tableName, char *ddl) {
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
@ -675,8 +678,7 @@ static int32_t tscProcessShowCreateTable(SSqlObj *pSql) {
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
assert(pTableMetaInfo->pTableMeta != NULL);
char tableName[TSDB_TABLE_NAME_LEN] = {0};
extractTableName(pTableMetaInfo->name, tableName);
const char* tableName = tNameGetTableName(&pTableMetaInfo->name);
char *result = (char *)calloc(1, TSDB_MAX_BINARY_LEN);
int32_t code = TSDB_CODE_SUCCESS;
@ -712,7 +714,9 @@ static int32_t tscProcessShowCreateDatabase(SSqlObj *pSql) {
free(pInterSql);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
extractTableName(pTableMetaInfo->name, param->buf);
strncpy(param->buf, tNameGetTableName(&pTableMetaInfo->name), TSDB_TABLE_NAME_LEN);
param->pParentSql = pSql;
param->pInterSql = pInterSql;
param->fp = tscRebuildCreateDBStatement;

View File

@ -56,7 +56,7 @@ int32_t treeComparator(const void *pLeft, const void *pRight, void *param) {
}
}
static void tscInitSqlContext(SSqlCmd *pCmd, SLocalReducer *pReducer, tOrderDescriptor *pDesc) {
static void tscInitSqlContext(SSqlCmd *pCmd, SLocalMerger *pReducer, tOrderDescriptor *pDesc) {
/*
* the fields and offset attributes in pCmd and pModel may be different due to
* merge requirement. So, the final result in pRes structure is formatted in accordance with the pCmd object.
@ -166,7 +166,7 @@ static SFillColInfo* createFillColInfo(SQueryInfo* pQueryInfo) {
return pFillCol;
}
void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrderDescriptor *pDesc,
void tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrderDescriptor *pDesc,
SColumnModel *finalmodel, SColumnModel *pFFModel, SSqlObj* pSql) {
SSqlCmd* pCmd = &pSql->cmd;
SSqlRes* pRes = &pSql->res;
@ -212,9 +212,9 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
return;
}
size_t size = sizeof(SLocalReducer) + POINTER_BYTES * numOfFlush;
size_t size = sizeof(SLocalMerger) + POINTER_BYTES * numOfFlush;
SLocalReducer *pReducer = (SLocalReducer *) calloc(1, size);
SLocalMerger *pReducer = (SLocalMerger *) calloc(1, size);
if (pReducer == NULL) {
tscError("%p failed to create local merge structure, out of memory", pSql);
@ -372,7 +372,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
pReducer->offset = (int32_t)pQueryInfo->limit.offset;
pRes->pLocalReducer = pReducer;
pRes->pLocalMerger = pReducer;
pRes->numOfGroups = 0;
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
@ -477,13 +477,13 @@ int32_t saveToBuffer(tExtMemBuffer *pMemoryBuf, tOrderDescriptor *pDesc, tFilePa
return 0;
}
void tscDestroyLocalReducer(SSqlObj *pSql) {
void tscDestroyLocalMerger(SSqlObj *pSql) {
if (pSql == NULL) {
return;
}
SSqlRes *pRes = &(pSql->res);
if (pRes->pLocalReducer == NULL) {
if (pRes->pLocalMerger == NULL) {
return;
}
@ -491,14 +491,14 @@ void tscDestroyLocalReducer(SSqlObj *pSql) {
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
// there is no more result, so we release all allocated resource
SLocalReducer *pLocalReducer = (SLocalReducer *)atomic_exchange_ptr(&pRes->pLocalReducer, NULL);
if (pLocalReducer != NULL) {
pLocalReducer->pFillInfo = taosDestroyFillInfo(pLocalReducer->pFillInfo);
SLocalMerger *pLocalMerge = (SLocalMerger *)atomic_exchange_ptr(&pRes->pLocalMerger, NULL);
if (pLocalMerge != NULL) {
pLocalMerge->pFillInfo = taosDestroyFillInfo(pLocalMerge->pFillInfo);
if (pLocalReducer->pCtx != NULL) {
if (pLocalMerge->pCtx != NULL) {
int32_t numOfExprs = (int32_t) tscSqlExprNumOfExprs(pQueryInfo);
for (int32_t i = 0; i < numOfExprs; ++i) {
SQLFunctionCtx *pCtx = &pLocalReducer->pCtx[i];
SQLFunctionCtx *pCtx = &pLocalMerge->pCtx[i];
tVariantDestroy(&pCtx->tag);
tfree(pCtx->resultInfo);
@ -508,31 +508,31 @@ void tscDestroyLocalReducer(SSqlObj *pSql) {
}
}
tfree(pLocalReducer->pCtx);
tfree(pLocalMerge->pCtx);
}
tfree(pLocalReducer->prevRowOfInput);
tfree(pLocalMerge->prevRowOfInput);
tfree(pLocalReducer->pTempBuffer);
tfree(pLocalReducer->pResultBuf);
tfree(pLocalMerge->pTempBuffer);
tfree(pLocalMerge->pResultBuf);
if (pLocalReducer->pLoserTree) {
tfree(pLocalReducer->pLoserTree->param);
tfree(pLocalReducer->pLoserTree);
if (pLocalMerge->pLoserTree) {
tfree(pLocalMerge->pLoserTree->param);
tfree(pLocalMerge->pLoserTree);
}
tfree(pLocalReducer->pFinalRes);
tfree(pLocalReducer->discardData);
tfree(pLocalMerge->pFinalRes);
tfree(pLocalMerge->discardData);
tscLocalReducerEnvDestroy(pLocalReducer->pExtMemBuffer, pLocalReducer->pDesc, pLocalReducer->resColModel, pLocalReducer->finalModel,
pLocalReducer->numOfVnode);
for (int32_t i = 0; i < pLocalReducer->numOfBuffer; ++i) {
tfree(pLocalReducer->pLocalDataSrc[i]);
tscLocalReducerEnvDestroy(pLocalMerge->pExtMemBuffer, pLocalMerge->pDesc, pLocalMerge->resColModel, pLocalMerge->finalModel,
pLocalMerge->numOfVnode);
for (int32_t i = 0; i < pLocalMerge->numOfBuffer; ++i) {
tfree(pLocalMerge->pLocalDataSrc[i]);
}
pLocalReducer->numOfBuffer = 0;
pLocalReducer->numOfCompleted = 0;
free(pLocalReducer);
pLocalMerge->numOfBuffer = 0;
pLocalMerge->numOfCompleted = 0;
free(pLocalMerge);
} else {
tscDebug("%p already freed or another free function is invoked", pSql);
}
@ -604,7 +604,7 @@ static int32_t createOrderDescriptor(tOrderDescriptor **pOrderDesc, SSqlCmd *pCm
}
}
bool isSameGroup(SSqlCmd *pCmd, SLocalReducer *pReducer, char *pPrev, tFilePage *tmpBuffer) {
bool isSameGroup(SSqlCmd *pCmd, SLocalMerger *pReducer, char *pPrev, tFilePage *tmpBuffer) {
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
// disable merge procedure for column projection query
@ -795,12 +795,12 @@ void tscLocalReducerEnvDestroy(tExtMemBuffer **pMemBuffer, tOrderDescriptor *pDe
/**
*
* @param pLocalReducer
* @param pLocalMerge
* @param pOneInterDataSrc
* @param treeList
* @return the number of remain input source. if ret == 0, all data has been handled
*/
int32_t loadNewDataFromDiskFor(SLocalReducer *pLocalReducer, SLocalDataSource *pOneInterDataSrc,
int32_t loadNewDataFromDiskFor(SLocalMerger *pLocalMerge, SLocalDataSource *pOneInterDataSrc,
bool *needAdjustLoserTree) {
pOneInterDataSrc->rowIdx = 0;
pOneInterDataSrc->pageId += 1;
@ -817,17 +817,17 @@ int32_t loadNewDataFromDiskFor(SLocalReducer *pLocalReducer, SLocalDataSource *p
#endif
*needAdjustLoserTree = true;
} else {
pLocalReducer->numOfCompleted += 1;
pLocalMerge->numOfCompleted += 1;
pOneInterDataSrc->rowIdx = -1;
pOneInterDataSrc->pageId = -1;
*needAdjustLoserTree = true;
}
return pLocalReducer->numOfBuffer;
return pLocalMerge->numOfBuffer;
}
void adjustLoserTreeFromNewData(SLocalReducer *pLocalReducer, SLocalDataSource *pOneInterDataSrc,
void adjustLoserTreeFromNewData(SLocalMerger *pLocalMerge, SLocalDataSource *pOneInterDataSrc,
SLoserTreeInfo *pTree) {
/*
* load a new data page into memory for intermediate dataset source,
@ -835,7 +835,7 @@ void adjustLoserTreeFromNewData(SLocalReducer *pLocalReducer, SLocalDataSource *
*/
bool needToAdjust = true;
if (pOneInterDataSrc->filePage.num <= pOneInterDataSrc->rowIdx) {
loadNewDataFromDiskFor(pLocalReducer, pOneInterDataSrc, &needToAdjust);
loadNewDataFromDiskFor(pLocalMerge, pOneInterDataSrc, &needToAdjust);
}
/*
@ -843,7 +843,7 @@ void adjustLoserTreeFromNewData(SLocalReducer *pLocalReducer, SLocalDataSource *
* if the loser tree is rebuild completed, we do not need to adjust
*/
if (needToAdjust) {
int32_t leafNodeIdx = pTree->pNode[0].index + pLocalReducer->numOfBuffer;
int32_t leafNodeIdx = pTree->pNode[0].index + pLocalMerge->numOfBuffer;
#ifdef _DEBUG_VIEW
printf("before adjust:\t");
@ -860,7 +860,7 @@ void adjustLoserTreeFromNewData(SLocalReducer *pLocalReducer, SLocalDataSource *
}
}
void savePrevRecordAndSetupFillInfo(SLocalReducer *pLocalReducer, SQueryInfo *pQueryInfo, SFillInfo *pFillInfo) {
void savePrevRecordAndSetupFillInfo(SLocalMerger *pLocalMerge, SQueryInfo *pQueryInfo, SFillInfo *pFillInfo) {
// discard following dataset in the same group and reset the interpolation information
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
@ -873,28 +873,28 @@ void savePrevRecordAndSetupFillInfo(SLocalReducer *pLocalReducer, SQueryInfo *pQ
taosResetFillInfo(pFillInfo, revisedSTime);
}
pLocalReducer->discard = true;
pLocalReducer->discardData->num = 0;
pLocalMerge->discard = true;
pLocalMerge->discardData->num = 0;
SColumnModel *pModel = pLocalReducer->pDesc->pColumnModel;
tColModelAppend(pModel, pLocalReducer->discardData, pLocalReducer->prevRowOfInput, 0, 1, 1);
SColumnModel *pModel = pLocalMerge->pDesc->pColumnModel;
tColModelAppend(pModel, pLocalMerge->discardData, pLocalMerge->prevRowOfInput, 0, 1, 1);
}
static void genFinalResWithoutFill(SSqlRes* pRes, SLocalReducer *pLocalReducer, SQueryInfo* pQueryInfo) {
static void genFinalResWithoutFill(SSqlRes* pRes, SLocalMerger *pLocalMerge, SQueryInfo* pQueryInfo) {
assert(pQueryInfo->interval.interval == 0 || pQueryInfo->fillType == TSDB_FILL_NONE);
tFilePage * pBeforeFillData = pLocalReducer->pResultBuf;
tFilePage * pBeforeFillData = pLocalMerge->pResultBuf;
pRes->data = pLocalReducer->pFinalRes;
pRes->data = pLocalMerge->pFinalRes;
pRes->numOfRows = (int32_t) pBeforeFillData->num;
if (pQueryInfo->limit.offset > 0) {
if (pQueryInfo->limit.offset < pRes->numOfRows) {
int32_t prevSize = (int32_t) pBeforeFillData->num;
tColModelErase(pLocalReducer->finalModel, pBeforeFillData, prevSize, 0, (int32_t)pQueryInfo->limit.offset - 1);
tColModelErase(pLocalMerge->finalModel, pBeforeFillData, prevSize, 0, (int32_t)pQueryInfo->limit.offset - 1);
/* remove the hole in column model */
tColModelCompact(pLocalReducer->finalModel, pBeforeFillData, prevSize);
tColModelCompact(pLocalMerge->finalModel, pBeforeFillData, prevSize);
pRes->numOfRows -= (int32_t) pQueryInfo->limit.offset;
pQueryInfo->limit.offset = 0;
@ -907,7 +907,7 @@ static void genFinalResWithoutFill(SSqlRes* pRes, SLocalReducer *pLocalReducer,
if (pRes->numOfRowsGroup >= pQueryInfo->limit.limit && pQueryInfo->limit.limit > 0) {
pRes->numOfRows = 0;
pBeforeFillData->num = 0;
pLocalReducer->discard = true;
pLocalMerge->discard = true;
return;
}
@ -923,29 +923,29 @@ static void genFinalResWithoutFill(SSqlRes* pRes, SLocalReducer *pLocalReducer,
pRes->numOfRows -= overflow;
pBeforeFillData->num -= overflow;
tColModelCompact(pLocalReducer->finalModel, pBeforeFillData, prevSize);
tColModelCompact(pLocalMerge->finalModel, pBeforeFillData, prevSize);
// set remain data to be discarded, and reset the interpolation information
savePrevRecordAndSetupFillInfo(pLocalReducer, pQueryInfo, pLocalReducer->pFillInfo);
savePrevRecordAndSetupFillInfo(pLocalMerge, pQueryInfo, pLocalMerge->pFillInfo);
}
memcpy(pRes->data, pBeforeFillData->data, (size_t)(pRes->numOfRows * pLocalReducer->finalModel->rowSize));
memcpy(pRes->data, pBeforeFillData->data, (size_t)(pRes->numOfRows * pLocalMerge->finalModel->rowSize));
pRes->numOfClauseTotal += pRes->numOfRows;
pBeforeFillData->num = 0;
}
/*
* Note: pRes->pLocalReducer may be null, due to the fact that "tscDestroyLocalReducer" is called
* Note: pRes->pLocalMerge may be null, due to the fact that "tscDestroyLocalMerger" is called
* by "interuptHandler" function in shell
*/
static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneOutput) {
static void doFillResult(SSqlObj *pSql, SLocalMerger *pLocalMerge, bool doneOutput) {
SSqlCmd *pCmd = &pSql->cmd;
SSqlRes *pRes = &pSql->res;
tFilePage *pBeforeFillData = pLocalReducer->pResultBuf;
tFilePage *pBeforeFillData = pLocalMerge->pResultBuf;
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
SFillInfo *pFillInfo = pLocalReducer->pFillInfo;
SFillInfo *pFillInfo = pLocalMerge->pFillInfo;
// todo extract function
int64_t actualETime = (pQueryInfo->order.order == TSDB_ORDER_ASC)? pQueryInfo->window.ekey: pQueryInfo->window.skey;
@ -953,11 +953,11 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO
tFilePage **pResPages = malloc(POINTER_BYTES * pQueryInfo->fieldsInfo.numOfOutput);
for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) {
TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i);
pResPages[i] = calloc(1, sizeof(tFilePage) + pField->bytes * pLocalReducer->resColModel->capacity);
pResPages[i] = calloc(1, sizeof(tFilePage) + pField->bytes * pLocalMerge->resColModel->capacity);
}
while (1) {
int64_t newRows = taosFillResultDataBlock(pFillInfo, pResPages, pLocalReducer->resColModel->capacity);
int64_t newRows = taosFillResultDataBlock(pFillInfo, pResPages, pLocalMerge->resColModel->capacity);
if (pQueryInfo->limit.offset < newRows) {
newRows -= pQueryInfo->limit.offset;
@ -970,7 +970,7 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO
}
}
pRes->data = pLocalReducer->pFinalRes;
pRes->data = pLocalMerge->pFinalRes;
pRes->numOfRows = (int32_t) newRows;
pQueryInfo->limit.offset = 0;
@ -985,7 +985,7 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO
}
// all output in current group are completed
int32_t totalRemainRows = (int32_t)getNumOfResultsAfterFillGap(pFillInfo, actualETime, pLocalReducer->resColModel->capacity);
int32_t totalRemainRows = (int32_t)getNumOfResultsAfterFillGap(pFillInfo, actualETime, pLocalMerge->resColModel->capacity);
if (totalRemainRows <= 0) {
break;
}
@ -1003,7 +1003,7 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO
assert(pRes->numOfRows >= 0);
/* set remain data to be discarded, and reset the interpolation information */
savePrevRecordAndSetupFillInfo(pLocalReducer, pQueryInfo, pFillInfo);
savePrevRecordAndSetupFillInfo(pLocalMerge, pQueryInfo, pFillInfo);
}
int32_t offset = 0;
@ -1025,8 +1025,8 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO
tfree(pResPages);
}
static void savePreviousRow(SLocalReducer *pLocalReducer, tFilePage *tmpBuffer) {
SColumnModel *pColumnModel = pLocalReducer->pDesc->pColumnModel;
static void savePreviousRow(SLocalMerger *pLocalMerge, tFilePage *tmpBuffer) {
SColumnModel *pColumnModel = pLocalMerge->pDesc->pColumnModel;
assert(pColumnModel->capacity == 1 && tmpBuffer->num == 1);
// copy to previous temp buffer
@ -1034,20 +1034,20 @@ static void savePreviousRow(SLocalReducer *pLocalReducer, tFilePage *tmpBuffer)
SSchema *pSchema = getColumnModelSchema(pColumnModel, i);
int16_t offset = getColumnModelOffset(pColumnModel, i);
memcpy(pLocalReducer->prevRowOfInput + offset, tmpBuffer->data + offset, pSchema->bytes);
memcpy(pLocalMerge->prevRowOfInput + offset, tmpBuffer->data + offset, pSchema->bytes);
}
tmpBuffer->num = 0;
pLocalReducer->hasPrevRow = true;
pLocalMerge->hasPrevRow = true;
}
static void doExecuteSecondaryMerge(SSqlCmd *pCmd, SLocalReducer *pLocalReducer, bool needInit) {
static void doExecuteSecondaryMerge(SSqlCmd *pCmd, SLocalMerger *pLocalMerge, bool needInit) {
// the tag columns need to be set before all functions execution
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
size_t size = tscSqlExprNumOfExprs(pQueryInfo);
for (int32_t j = 0; j < size; ++j) {
SQLFunctionCtx *pCtx = &pLocalReducer->pCtx[j];
SQLFunctionCtx *pCtx = &pLocalMerge->pCtx[j];
// tags/tags_dummy function, the tag field of SQLFunctionCtx is from the input buffer
int32_t functionId = pCtx->functionId;
@ -1074,20 +1074,20 @@ static void doExecuteSecondaryMerge(SSqlCmd *pCmd, SLocalReducer *pLocalReducer,
}
for (int32_t j = 0; j < size; ++j) {
int32_t functionId = pLocalReducer->pCtx[j].functionId;
int32_t functionId = pLocalMerge->pCtx[j].functionId;
if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY) {
continue;
}
aAggs[functionId].mergeFunc(&pLocalReducer->pCtx[j]);
aAggs[functionId].mergeFunc(&pLocalMerge->pCtx[j]);
}
}
static void handleUnprocessedRow(SSqlCmd *pCmd, SLocalReducer *pLocalReducer, tFilePage *tmpBuffer) {
if (pLocalReducer->hasUnprocessedRow) {
pLocalReducer->hasUnprocessedRow = false;
doExecuteSecondaryMerge(pCmd, pLocalReducer, true);
savePreviousRow(pLocalReducer, tmpBuffer);
static void handleUnprocessedRow(SSqlCmd *pCmd, SLocalMerger *pLocalMerge, tFilePage *tmpBuffer) {
if (pLocalMerge->hasUnprocessedRow) {
pLocalMerge->hasUnprocessedRow = false;
doExecuteSecondaryMerge(pCmd, pLocalMerge, true);
savePreviousRow(pLocalMerge, tmpBuffer);
}
}
@ -1120,7 +1120,7 @@ static int64_t getNumOfResultLocal(SQueryInfo *pQueryInfo, SQLFunctionCtx *pCtx)
* filled with the same result, which is the tags, specified in group by clause
*
*/
static void fillMultiRowsOfTagsVal(SQueryInfo *pQueryInfo, int32_t numOfRes, SLocalReducer *pLocalReducer) {
static void fillMultiRowsOfTagsVal(SQueryInfo *pQueryInfo, int32_t numOfRes, SLocalMerger *pLocalMerge) {
int32_t maxBufSize = 0; // find the max tags column length to prepare the buffer
size_t size = tscSqlExprNumOfExprs(pQueryInfo);
@ -1135,7 +1135,7 @@ static void fillMultiRowsOfTagsVal(SQueryInfo *pQueryInfo, int32_t numOfRes, SLo
char *buf = malloc((size_t)maxBufSize);
for (int32_t k = 0; k < size; ++k) {
SQLFunctionCtx *pCtx = &pLocalReducer->pCtx[k];
SQLFunctionCtx *pCtx = &pLocalMerge->pCtx[k];
if (pCtx->functionId != TSDB_FUNC_TAG) {
continue;
}
@ -1153,20 +1153,20 @@ static void fillMultiRowsOfTagsVal(SQueryInfo *pQueryInfo, int32_t numOfRes, SLo
free(buf);
}
int32_t finalizeRes(SQueryInfo *pQueryInfo, SLocalReducer *pLocalReducer) {
int32_t finalizeRes(SQueryInfo *pQueryInfo, SLocalMerger *pLocalMerge) {
size_t size = tscSqlExprNumOfExprs(pQueryInfo);
for (int32_t k = 0; k < size; ++k) {
SQLFunctionCtx* pCtx = &pLocalReducer->pCtx[k];
SQLFunctionCtx* pCtx = &pLocalMerge->pCtx[k];
aAggs[pCtx->functionId].xFinalize(pCtx);
}
pLocalReducer->hasPrevRow = false;
pLocalMerge->hasPrevRow = false;
int32_t numOfRes = (int32_t)getNumOfResultLocal(pQueryInfo, pLocalReducer->pCtx);
pLocalReducer->pResultBuf->num += numOfRes;
int32_t numOfRes = (int32_t)getNumOfResultLocal(pQueryInfo, pLocalMerge->pCtx);
pLocalMerge->pResultBuf->num += numOfRes;
fillMultiRowsOfTagsVal(pQueryInfo, numOfRes, pLocalReducer);
fillMultiRowsOfTagsVal(pQueryInfo, numOfRes, pLocalMerge);
return numOfRes;
}
@ -1177,22 +1177,22 @@ int32_t finalizeRes(SQueryInfo *pQueryInfo, SLocalReducer *pLocalReducer) {
* results generated by simple aggregation function, we merge them all into one points
* *Exception*: column projection query, required no merge procedure
*/
bool needToMerge(SQueryInfo *pQueryInfo, SLocalReducer *pLocalReducer, tFilePage *tmpBuffer) {
bool needToMerge(SQueryInfo *pQueryInfo, SLocalMerger *pLocalMerge, tFilePage *tmpBuffer) {
int32_t ret = 0; // merge all result by default
int16_t functionId = pLocalReducer->pCtx[0].functionId;
int16_t functionId = pLocalMerge->pCtx[0].functionId;
// todo opt performance
if ((/*functionId == TSDB_FUNC_PRJ || */functionId == TSDB_FUNC_ARITHM) || (tscIsProjectionQueryOnSTable(pQueryInfo, 0))) { // column projection query
ret = 1; // disable merge procedure
} else {
tOrderDescriptor *pDesc = pLocalReducer->pDesc;
tOrderDescriptor *pDesc = pLocalMerge->pDesc;
if (pDesc->orderInfo.numOfCols > 0) {
if (pDesc->tsOrder == TSDB_ORDER_ASC) { // asc
// todo refactor comparator
ret = compare_a(pLocalReducer->pDesc, 1, 0, pLocalReducer->prevRowOfInput, 1, 0, tmpBuffer->data);
ret = compare_a(pLocalMerge->pDesc, 1, 0, pLocalMerge->prevRowOfInput, 1, 0, tmpBuffer->data);
} else { // desc
ret = compare_d(pLocalReducer->pDesc, 1, 0, pLocalReducer->prevRowOfInput, 1, 0, tmpBuffer->data);
ret = compare_d(pLocalMerge->pDesc, 1, 0, pLocalMerge->prevRowOfInput, 1, 0, tmpBuffer->data);
}
}
}
@ -1230,17 +1230,17 @@ static bool saveGroupResultInfo(SSqlObj *pSql) {
/**
*
* @param pSql
* @param pLocalReducer
* @param pLocalMerge
* @param noMoreCurrentGroupRes
* @return if current group is skipped, return false, and do NOT record it into pRes->numOfGroups
*/
bool genFinalResults(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool noMoreCurrentGroupRes) {
bool genFinalResults(SSqlObj *pSql, SLocalMerger *pLocalMerge, bool noMoreCurrentGroupRes) {
SSqlCmd *pCmd = &pSql->cmd;
SSqlRes *pRes = &pSql->res;
SQueryInfo * pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
tFilePage * pResBuf = pLocalReducer->pResultBuf;
SColumnModel *pModel = pLocalReducer->resColModel;
tFilePage * pResBuf = pLocalMerge->pResultBuf;
SColumnModel *pModel = pLocalMerge->resColModel;
pRes->code = TSDB_CODE_SUCCESS;
@ -1251,11 +1251,11 @@ bool genFinalResults(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool noMoreCur
if (pQueryInfo->slimit.offset > 0) {
pRes->numOfRows = 0;
pQueryInfo->slimit.offset -= 1;
pLocalReducer->discard = !noMoreCurrentGroupRes;
pLocalMerge->discard = !noMoreCurrentGroupRes;
if (pLocalReducer->discard) {
SColumnModel *pInternModel = pLocalReducer->pDesc->pColumnModel;
tColModelAppend(pInternModel, pLocalReducer->discardData, pLocalReducer->pTempBuffer->data, 0, 1, 1);
if (pLocalMerge->discard) {
SColumnModel *pInternModel = pLocalMerge->pDesc->pColumnModel;
tColModelAppend(pInternModel, pLocalMerge->discardData, pLocalMerge->pTempBuffer->data, 0, 1, 1);
}
return false;
@ -1264,19 +1264,14 @@ bool genFinalResults(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool noMoreCur
tColModelCompact(pModel, pResBuf, pModel->capacity);
if (tscIsSecondStageQuery(pQueryInfo)) {
doArithmeticCalculate(pQueryInfo, pResBuf, pModel->rowSize, pLocalReducer->finalModel->rowSize);
doArithmeticCalculate(pQueryInfo, pResBuf, pModel->rowSize, pLocalMerge->finalModel->rowSize);
}
#ifdef _DEBUG_VIEW
printf("final result before interpo:\n");
// tColModelDisplay(pLocalReducer->resColModel, pLocalReducer->pBufForInterpo, pResBuf->num, pResBuf->num);
#endif
// no interval query, no fill operation
if (pQueryInfo->interval.interval == 0 || pQueryInfo->fillType == TSDB_FILL_NONE) {
genFinalResWithoutFill(pRes, pLocalReducer, pQueryInfo);
genFinalResWithoutFill(pRes, pLocalMerge, pQueryInfo);
} else {
SFillInfo* pFillInfo = pLocalReducer->pFillInfo;
SFillInfo* pFillInfo = pLocalMerge->pFillInfo;
if (pFillInfo != NULL) {
TSKEY ekey = (pQueryInfo->order.order == TSDB_ORDER_ASC)? pQueryInfo->window.ekey: pQueryInfo->window.skey;
@ -1284,34 +1279,34 @@ bool genFinalResults(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool noMoreCur
taosFillCopyInputDataFromOneFilePage(pFillInfo, pResBuf);
}
doFillResult(pSql, pLocalReducer, noMoreCurrentGroupRes);
doFillResult(pSql, pLocalMerge, noMoreCurrentGroupRes);
}
return true;
}
void resetOutputBuf(SQueryInfo *pQueryInfo, SLocalReducer *pLocalReducer) {// reset output buffer to the beginning
void resetOutputBuf(SQueryInfo *pQueryInfo, SLocalMerger *pLocalMerge) {// reset output buffer to the beginning
size_t t = tscSqlExprNumOfExprs(pQueryInfo);
for (int32_t i = 0; i < t; ++i) {
SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
pLocalReducer->pCtx[i].aOutputBuf = pLocalReducer->pResultBuf->data + pExpr->offset * pLocalReducer->resColModel->capacity;
pLocalMerge->pCtx[i].aOutputBuf = pLocalMerge->pResultBuf->data + pExpr->offset * pLocalMerge->resColModel->capacity;
if (pExpr->functionId == TSDB_FUNC_TOP || pExpr->functionId == TSDB_FUNC_BOTTOM || pExpr->functionId == TSDB_FUNC_DIFF) {
pLocalReducer->pCtx[i].ptsOutputBuf = pLocalReducer->pCtx[0].aOutputBuf;
pLocalMerge->pCtx[i].ptsOutputBuf = pLocalMerge->pCtx[0].aOutputBuf;
}
}
memset(pLocalReducer->pResultBuf, 0, pLocalReducer->nResultBufSize + sizeof(tFilePage));
memset(pLocalMerge->pResultBuf, 0, pLocalMerge->nResultBufSize + sizeof(tFilePage));
}
static void resetEnvForNewResultset(SSqlRes *pRes, SSqlCmd *pCmd, SLocalReducer *pLocalReducer) {
static void resetEnvForNewResultset(SSqlRes *pRes, SSqlCmd *pCmd, SLocalMerger *pLocalMerge) {
// In handling data in other groups, we need to reset the interpolation information for a new group data
pRes->numOfRows = 0;
pRes->numOfRowsGroup = 0;
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
pQueryInfo->limit.offset = pLocalReducer->offset;
pQueryInfo->limit.offset = pLocalMerge->offset;
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
@ -1320,12 +1315,12 @@ static void resetEnvForNewResultset(SSqlRes *pRes, SSqlCmd *pCmd, SLocalReducer
if (pQueryInfo->fillType != TSDB_FILL_NONE) {
TSKEY skey = (pQueryInfo->order.order == TSDB_ORDER_ASC)? pQueryInfo->window.skey:pQueryInfo->window.ekey;//MIN(pQueryInfo->window.skey, pQueryInfo->window.ekey);
int64_t newTime = taosTimeTruncate(skey, &pQueryInfo->interval, tinfo.precision);
taosResetFillInfo(pLocalReducer->pFillInfo, newTime);
taosResetFillInfo(pLocalMerge->pFillInfo, newTime);
}
}
static bool isAllSourcesCompleted(SLocalReducer *pLocalReducer) {
return (pLocalReducer->numOfBuffer == pLocalReducer->numOfCompleted);
static bool isAllSourcesCompleted(SLocalMerger *pLocalMerge) {
return (pLocalMerge->numOfBuffer == pLocalMerge->numOfCompleted);
}
static bool doBuildFilledResultForGroup(SSqlObj *pSql) {
@ -1333,19 +1328,19 @@ static bool doBuildFilledResultForGroup(SSqlObj *pSql) {
SSqlRes *pRes = &pSql->res;
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
SLocalReducer *pLocalReducer = pRes->pLocalReducer;
SFillInfo *pFillInfo = pLocalReducer->pFillInfo;
SLocalMerger *pLocalMerge = pRes->pLocalMerger;
SFillInfo *pFillInfo = pLocalMerge->pFillInfo;
if (pFillInfo != NULL && taosFillHasMoreResults(pFillInfo)) {
assert(pQueryInfo->fillType != TSDB_FILL_NONE);
tFilePage *pFinalDataBuf = pLocalReducer->pResultBuf;
tFilePage *pFinalDataBuf = pLocalMerge->pResultBuf;
int64_t etime = *(int64_t *)(pFinalDataBuf->data + TSDB_KEYSIZE * (pFillInfo->numOfRows - 1));
// the first column must be the timestamp column
int32_t rows = (int32_t) getNumOfResultsAfterFillGap(pFillInfo, etime, pLocalReducer->resColModel->capacity);
int32_t rows = (int32_t) getNumOfResultsAfterFillGap(pFillInfo, etime, pLocalMerge->resColModel->capacity);
if (rows > 0) { // do fill gap
doFillResult(pSql, pLocalReducer, false);
doFillResult(pSql, pLocalMerge, false);
}
return true;
@ -1358,23 +1353,23 @@ static bool doHandleLastRemainData(SSqlObj *pSql) {
SSqlCmd *pCmd = &pSql->cmd;
SSqlRes *pRes = &pSql->res;
SLocalReducer *pLocalReducer = pRes->pLocalReducer;
SFillInfo *pFillInfo = pLocalReducer->pFillInfo;
SLocalMerger *pLocalMerge = pRes->pLocalMerger;
SFillInfo *pFillInfo = pLocalMerge->pFillInfo;
bool prevGroupCompleted = (!pLocalReducer->discard) && pLocalReducer->hasUnprocessedRow;
bool prevGroupCompleted = (!pLocalMerge->discard) && pLocalMerge->hasUnprocessedRow;
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
if ((isAllSourcesCompleted(pLocalReducer) && !pLocalReducer->hasPrevRow) || pLocalReducer->pLocalDataSrc[0] == NULL ||
if ((isAllSourcesCompleted(pLocalMerge) && !pLocalMerge->hasPrevRow) || pLocalMerge->pLocalDataSrc[0] == NULL ||
prevGroupCompleted) {
// if fillType == TSDB_FILL_NONE, return directly
if (pQueryInfo->fillType != TSDB_FILL_NONE &&
((pRes->numOfRowsGroup < pQueryInfo->limit.limit && pQueryInfo->limit.limit > 0) || (pQueryInfo->limit.limit < 0))) {
int64_t etime = (pQueryInfo->order.order == TSDB_ORDER_ASC)? pQueryInfo->window.ekey : pQueryInfo->window.skey;
int32_t rows = (int32_t)getNumOfResultsAfterFillGap(pFillInfo, etime, pLocalReducer->resColModel->capacity);
int32_t rows = (int32_t)getNumOfResultsAfterFillGap(pFillInfo, etime, pLocalMerge->resColModel->capacity);
if (rows > 0) {
doFillResult(pSql, pLocalReducer, true);
doFillResult(pSql, pLocalMerge, true);
}
}
@ -1384,7 +1379,7 @@ static bool doHandleLastRemainData(SSqlObj *pSql) {
*
* No results will be generated and query completed.
*/
if (pRes->numOfRows > 0 || (isAllSourcesCompleted(pLocalReducer) && (!pLocalReducer->hasUnprocessedRow))) {
if (pRes->numOfRows > 0 || (isAllSourcesCompleted(pLocalMerge) && (!pLocalMerge->hasUnprocessedRow))) {
return true;
}
@ -1393,7 +1388,7 @@ static bool doHandleLastRemainData(SSqlObj *pSql) {
return true;
}
resetEnvForNewResultset(pRes, pCmd, pLocalReducer);
resetEnvForNewResultset(pRes, pCmd, pLocalMerge);
}
return false;
@ -1403,12 +1398,12 @@ static void doProcessResultInNextWindow(SSqlObj *pSql, int32_t numOfRes) {
SSqlCmd *pCmd = &pSql->cmd;
SSqlRes *pRes = &pSql->res;
SLocalReducer *pLocalReducer = pRes->pLocalReducer;
SLocalMerger *pLocalMerge = pRes->pLocalMerger;
SQueryInfo * pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
size_t size = tscSqlExprNumOfExprs(pQueryInfo);
for (int32_t k = 0; k < size; ++k) {
SQLFunctionCtx *pCtx = &pLocalReducer->pCtx[k];
SQLFunctionCtx *pCtx = &pLocalMerge->pCtx[k];
pCtx->aOutputBuf += pCtx->outputBytes * numOfRes;
// set the correct output timestamp column position
@ -1417,7 +1412,7 @@ static void doProcessResultInNextWindow(SSqlObj *pSql, int32_t numOfRes) {
}
}
doExecuteSecondaryMerge(pCmd, pLocalReducer, true);
doExecuteSecondaryMerge(pCmd, pLocalMerge, true);
}
int32_t tscDoLocalMerge(SSqlObj *pSql) {
@ -1426,18 +1421,18 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) {
tscResetForNextRetrieve(pRes);
if (pSql->signature != pSql || pRes == NULL || pRes->pLocalReducer == NULL) { // all data has been processed
if (pSql->signature != pSql || pRes == NULL || pRes->pLocalMerger == NULL) { // all data has been processed
if (pRes->code == TSDB_CODE_SUCCESS) {
return pRes->code;
}
tscError("%p local merge abort due to error occurs, code:%s", pSql, tstrerror(pRes->code));
return pRes->code;
}
SLocalReducer *pLocalReducer = pRes->pLocalReducer;
SLocalMerger *pLocalMerge = pRes->pLocalMerger;
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
tFilePage *tmpBuffer = pLocalReducer->pTempBuffer;
tFilePage *tmpBuffer = pLocalMerge->pTempBuffer;
if (doHandleLastRemainData(pSql)) {
return TSDB_CODE_SUCCESS;
@ -1447,24 +1442,24 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) {
return TSDB_CODE_SUCCESS;
}
SLoserTreeInfo *pTree = pLocalReducer->pLoserTree;
SLoserTreeInfo *pTree = pLocalMerge->pLoserTree;
// clear buffer
handleUnprocessedRow(pCmd, pLocalReducer, tmpBuffer);
SColumnModel *pModel = pLocalReducer->pDesc->pColumnModel;
handleUnprocessedRow(pCmd, pLocalMerge, tmpBuffer);
SColumnModel *pModel = pLocalMerge->pDesc->pColumnModel;
while (1) {
if (isAllSourcesCompleted(pLocalReducer)) {
if (isAllSourcesCompleted(pLocalMerge)) {
break;
}
#ifdef _DEBUG_VIEW
printf("chosen data in pTree[0] = %d\n", pTree->pNode[0].index);
#endif
assert((pTree->pNode[0].index < pLocalReducer->numOfBuffer) && (pTree->pNode[0].index >= 0) && tmpBuffer->num == 0);
assert((pTree->pNode[0].index < pLocalMerge->numOfBuffer) && (pTree->pNode[0].index >= 0) && tmpBuffer->num == 0);
// chosen from loser tree
SLocalDataSource *pOneDataSrc = pLocalReducer->pLocalDataSrc[pTree->pNode[0].index];
SLocalDataSource *pOneDataSrc = pLocalMerge->pLocalDataSrc[pTree->pNode[0].index];
tColModelAppend(pModel, tmpBuffer, pOneDataSrc->filePage.data, pOneDataSrc->rowIdx, 1,
pOneDataSrc->pMemBuffer->pColumnModel->capacity);
@ -1477,76 +1472,76 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) {
tColModelDisplayEx(pModel, tmpBuffer->data, tmpBuffer->num, pModel->capacity, colInfo);
#endif
if (pLocalReducer->discard) {
assert(pLocalReducer->hasUnprocessedRow == false);
if (pLocalMerge->discard) {
assert(pLocalMerge->hasUnprocessedRow == false);
/* current record belongs to the same group of previous record, need to discard it */
if (isSameGroup(pCmd, pLocalReducer, pLocalReducer->discardData->data, tmpBuffer)) {
if (isSameGroup(pCmd, pLocalMerge, pLocalMerge->discardData->data, tmpBuffer)) {
tmpBuffer->num = 0;
pOneDataSrc->rowIdx += 1;
adjustLoserTreeFromNewData(pLocalReducer, pOneDataSrc, pTree);
adjustLoserTreeFromNewData(pLocalMerge, pOneDataSrc, pTree);
// all inputs are exhausted, abort current process
if (isAllSourcesCompleted(pLocalReducer)) {
if (isAllSourcesCompleted(pLocalMerge)) {
break;
}
// data belongs to the same group needs to be discarded
continue;
} else {
pLocalReducer->discard = false;
pLocalReducer->discardData->num = 0;
pLocalMerge->discard = false;
pLocalMerge->discardData->num = 0;
if (saveGroupResultInfo(pSql)) {
return TSDB_CODE_SUCCESS;
}
resetEnvForNewResultset(pRes, pCmd, pLocalReducer);
resetEnvForNewResultset(pRes, pCmd, pLocalMerge);
}
}
if (pLocalReducer->hasPrevRow) {
if (needToMerge(pQueryInfo, pLocalReducer, tmpBuffer)) {
if (pLocalMerge->hasPrevRow) {
if (needToMerge(pQueryInfo, pLocalMerge, tmpBuffer)) {
// belong to the group of the previous row, continue process it
doExecuteSecondaryMerge(pCmd, pLocalReducer, false);
doExecuteSecondaryMerge(pCmd, pLocalMerge, false);
// copy to buffer
savePreviousRow(pLocalReducer, tmpBuffer);
savePreviousRow(pLocalMerge, tmpBuffer);
} else {
/*
* current row does not belong to the group of previous row.
* so the processing of previous group is completed.
*/
int32_t numOfRes = finalizeRes(pQueryInfo, pLocalReducer);
bool sameGroup = isSameGroup(pCmd, pLocalReducer, pLocalReducer->prevRowOfInput, tmpBuffer);
int32_t numOfRes = finalizeRes(pQueryInfo, pLocalMerge);
bool sameGroup = isSameGroup(pCmd, pLocalMerge, pLocalMerge->prevRowOfInput, tmpBuffer);
tFilePage *pResBuf = pLocalReducer->pResultBuf;
tFilePage *pResBuf = pLocalMerge->pResultBuf;
/*
* if the previous group does NOT generate any result (pResBuf->num == 0),
* continue to process results instead of return results.
*/
if ((!sameGroup && pResBuf->num > 0) || (pResBuf->num == pLocalReducer->resColModel->capacity)) {
if ((!sameGroup && pResBuf->num > 0) || (pResBuf->num == pLocalMerge->resColModel->capacity)) {
// does not belong to the same group
bool notSkipped = genFinalResults(pSql, pLocalReducer, !sameGroup);
bool notSkipped = genFinalResults(pSql, pLocalMerge, !sameGroup);
// this row needs to discard, since it belongs to the group of previous
if (pLocalReducer->discard && sameGroup) {
pLocalReducer->hasUnprocessedRow = false;
if (pLocalMerge->discard && sameGroup) {
pLocalMerge->hasUnprocessedRow = false;
tmpBuffer->num = 0;
} else { // current row does not belongs to the previous group, so it is not be handled yet.
pLocalReducer->hasUnprocessedRow = true;
pLocalMerge->hasUnprocessedRow = true;
}
resetOutputBuf(pQueryInfo, pLocalReducer);
resetOutputBuf(pQueryInfo, pLocalMerge);
pOneDataSrc->rowIdx += 1;
// here we do not check the return value
adjustLoserTreeFromNewData(pLocalReducer, pOneDataSrc, pTree);
adjustLoserTreeFromNewData(pLocalMerge, pOneDataSrc, pTree);
if (pRes->numOfRows == 0) {
handleUnprocessedRow(pCmd, pLocalReducer, tmpBuffer);
handleUnprocessedRow(pCmd, pLocalMerge, tmpBuffer);
if (!sameGroup) {
/*
@ -1557,7 +1552,7 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) {
return TSDB_CODE_SUCCESS;
}
resetEnvForNewResultset(pRes, pCmd, pLocalReducer);
resetEnvForNewResultset(pRes, pCmd, pLocalMerge);
}
} else {
/*
@ -1565,7 +1560,7 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) {
* We start the process in a new round.
*/
if (sameGroup) {
handleUnprocessedRow(pCmd, pLocalReducer, tmpBuffer);
handleUnprocessedRow(pCmd, pLocalMerge, tmpBuffer);
}
}
@ -1577,24 +1572,24 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) {
}
} else { // result buffer is not full
doProcessResultInNextWindow(pSql, numOfRes);
savePreviousRow(pLocalReducer, tmpBuffer);
savePreviousRow(pLocalMerge, tmpBuffer);
}
}
} else {
doExecuteSecondaryMerge(pCmd, pLocalReducer, true);
savePreviousRow(pLocalReducer, tmpBuffer); // copy the processed row to buffer
doExecuteSecondaryMerge(pCmd, pLocalMerge, true);
savePreviousRow(pLocalMerge, tmpBuffer); // copy the processed row to buffer
}
pOneDataSrc->rowIdx += 1;
adjustLoserTreeFromNewData(pLocalReducer, pOneDataSrc, pTree);
adjustLoserTreeFromNewData(pLocalMerge, pOneDataSrc, pTree);
}
if (pLocalReducer->hasPrevRow) {
finalizeRes(pQueryInfo, pLocalReducer);
if (pLocalMerge->hasPrevRow) {
finalizeRes(pQueryInfo, pLocalMerge);
}
if (pLocalReducer->pResultBuf->num) {
genFinalResults(pSql, pLocalReducer, true);
if (pLocalMerge->pResultBuf->num) {
genFinalResults(pSql, pLocalMerge, true);
}
return TSDB_CODE_SUCCESS;
@ -1602,8 +1597,8 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) {
void tscInitResObjForLocalQuery(SSqlObj *pObj, int32_t numOfRes, int32_t rowLen) {
SSqlRes *pRes = &pObj->res;
if (pRes->pLocalReducer != NULL) {
tscDestroyLocalReducer(pObj);
if (pRes->pLocalMerger != NULL) {
tscDestroyLocalMerger(pObj);
}
pRes->qhandle = 1; // hack to pass the safety check in fetch_row function
@ -1611,17 +1606,17 @@ void tscInitResObjForLocalQuery(SSqlObj *pObj, int32_t numOfRes, int32_t rowLen)
pRes->row = 0;
pRes->rspType = 0; // used as a flag to denote if taos_retrieved() has been called yet
pRes->pLocalReducer = (SLocalReducer *)calloc(1, sizeof(SLocalReducer));
pRes->pLocalMerger = (SLocalMerger *)calloc(1, sizeof(SLocalMerger));
/*
* we need one additional byte space
* the sprintf function needs one additional space to put '\0' at the end of string
*/
size_t allocSize = numOfRes * rowLen + sizeof(tFilePage) + 1;
pRes->pLocalReducer->pResultBuf = (tFilePage *)calloc(1, allocSize);
pRes->pLocalMerger->pResultBuf = (tFilePage *)calloc(1, allocSize);
pRes->pLocalReducer->pResultBuf->num = numOfRes;
pRes->data = pRes->pLocalReducer->pResultBuf->data;
pRes->pLocalMerger->pResultBuf->num = numOfRes;
pRes->data = pRes->pLocalMerger->pResultBuf->data;
}
int32_t doArithmeticCalculate(SQueryInfo* pQueryInfo, tFilePage* pOutput, int32_t rowSize, int32_t finalRowSize) {

View File

@ -703,7 +703,7 @@ static int32_t doParseInsertStatement(SSqlCmd* pCmd, char **str, SParsedDataColI
STableDataBlocks *dataBuf = NULL;
int32_t ret = tscGetDataBlockFromList(pCmd->pTableBlockHashList, pTableMeta->id.uid, TSDB_DEFAULT_PAYLOAD_SIZE,
sizeof(SSubmitBlk), tinfo.rowSize, pTableMetaInfo->name, pTableMeta, &dataBuf, NULL);
sizeof(SSubmitBlk), tinfo.rowSize, &pTableMetaInfo->name, pTableMeta, &dataBuf, NULL);
if (ret != TSDB_CODE_SUCCESS) {
return ret;
}
@ -813,26 +813,26 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
tscAddEmptyMetaInfo(pQueryInfo);
}
STableMetaInfo *pSTableMeterMetaInfo = tscGetMetaInfo(pQueryInfo, STABLE_INDEX);
code = tscSetTableFullName(pSTableMeterMetaInfo, &sToken, pSql);
STableMetaInfo *pSTableMetaInfo = tscGetMetaInfo(pQueryInfo, STABLE_INDEX);
code = tscSetTableFullName(pSTableMetaInfo, &sToken, pSql);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
tstrncpy(pCmd->tagData.name, pSTableMeterMetaInfo->name, sizeof(pCmd->tagData.name));
tNameExtractFullName(&pSTableMetaInfo->name, pCmd->tagData.name);
pCmd->tagData.dataLen = 0;
code = tscGetTableMeta(pSql, pSTableMeterMetaInfo);
code = tscGetTableMeta(pSql, pSTableMetaInfo);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
if (!UTIL_TABLE_IS_SUPER_TABLE(pSTableMeterMetaInfo)) {
if (!UTIL_TABLE_IS_SUPER_TABLE(pSTableMetaInfo)) {
return tscInvalidSQLErrMsg(pCmd->payload, "create table only from super table is allowed", sToken.z);
}
SSchema *pTagSchema = tscGetTableTagSchema(pSTableMeterMetaInfo->pTableMeta);
STableComInfo tinfo = tscGetTableInfo(pSTableMeterMetaInfo->pTableMeta);
SSchema *pTagSchema = tscGetTableTagSchema(pSTableMetaInfo->pTableMeta);
STableComInfo tinfo = tscGetTableInfo(pSTableMetaInfo->pTableMeta);
index = 0;
sToken = tStrGetToken(sql, &index, false, 0, NULL);
@ -840,7 +840,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
SParsedDataColInfo spd = {0};
uint8_t numOfTags = tscGetNumOfTags(pSTableMeterMetaInfo->pTableMeta);
uint8_t numOfTags = tscGetNumOfTags(pSTableMetaInfo->pTableMeta);
spd.numOfCols = numOfTags;
// if specify some tags column
@ -1406,39 +1406,38 @@ typedef struct SImportFileSupport {
FILE *fp;
} SImportFileSupport;
static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int code) {
static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int32_t numOfRows) {
assert(param != NULL && tres != NULL);
char * tokenBuf = NULL;
size_t n = 0;
ssize_t readLen = 0;
char * line = NULL;
int32_t count = 0;
int32_t maxRows = 0;
FILE * fp = NULL;
SSqlObj *pSql = tres;
SSqlCmd *pCmd = &pSql->cmd;
SImportFileSupport *pSupporter = (SImportFileSupport *) param;
SImportFileSupport *pSupporter = (SImportFileSupport *)param;
SSqlObj *pParentSql = pSupporter->pSql;
FILE *fp = pSupporter->fp;
fp = pSupporter->fp;
if (taos_errno(pSql) != TSDB_CODE_SUCCESS) { // handle error
assert(taos_errno(pSql) == code);
int32_t code = pSql->res.code;
do {
if (code == TSDB_CODE_TDB_TABLE_RECONFIGURE) {
assert(pSql->res.numOfRows == 0);
int32_t errc = fseek(fp, 0, SEEK_SET);
if (errc < 0) {
tscError("%p failed to seek SEEK_SET since:%s", pSql, tstrerror(errno));
} else {
break;
}
}
taos_free_result(pSql);
tfree(pSupporter);
fclose(fp);
pParentSql->res.code = code;
tscAsyncResultOnError(pParentSql);
return;
} while (0);
// retry parse data from file and import data from the begining again
if (code == TSDB_CODE_TDB_TABLE_RECONFIGURE) {
assert(pSql->res.numOfRows == 0);
int32_t ret = fseek(fp, 0, SEEK_SET);
if (ret < 0) {
tscError("%p failed to seek SEEK_SET since:%s", pSql, tstrerror(errno));
code = TAOS_SYSTEM_ERROR(errno);
goto _error;
}
} else if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
// accumulate the total submit records
@ -1452,28 +1451,32 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int code) {
SParsedDataColInfo spd = {.numOfCols = tinfo.numOfColumns};
tscSetAssignedColumnInfo(&spd, pSchema, tinfo.numOfColumns);
size_t n = 0;
ssize_t readLen = 0;
char * line = NULL;
int32_t count = 0;
int32_t maxRows = 0;
tfree(pCmd->pTableNameList);
pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks);
if (pCmd->pTableBlockHashList == NULL) {
pCmd->pTableBlockHashList = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false);
if (pCmd->pTableBlockHashList == NULL) {
code = TSDB_CODE_TSC_OUT_OF_MEMORY;
goto _error;
}
}
STableDataBlocks *pTableDataBlock = NULL;
int32_t ret = tscGetDataBlockFromList(pCmd->pTableBlockHashList, pTableMeta->id.uid, TSDB_PAYLOAD_SIZE,
sizeof(SSubmitBlk), tinfo.rowSize, pTableMetaInfo->name, pTableMeta, &pTableDataBlock, NULL);
int32_t ret =
tscGetDataBlockFromList(pCmd->pTableBlockHashList, pTableMeta->id.uid, TSDB_PAYLOAD_SIZE, sizeof(SSubmitBlk),
tinfo.rowSize, &pTableMetaInfo->name, pTableMeta, &pTableDataBlock, NULL);
if (ret != TSDB_CODE_SUCCESS) {
// return ret;
pParentSql->res.code = TSDB_CODE_TSC_OUT_OF_MEMORY;
goto _error;
}
tscAllocateMemIfNeed(pTableDataBlock, tinfo.rowSize, &maxRows);
char *tokenBuf = calloc(1, 4096);
tokenBuf = calloc(1, TSDB_MAX_BYTES_PER_ROW);
if (tokenBuf == NULL) {
code = TSDB_CODE_TSC_OUT_OF_MEMORY;
goto _error;
}
while ((readLen = tgetline(&line, &n, fp)) != -1) {
if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) {
@ -1501,30 +1504,42 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int code) {
}
tfree(tokenBuf);
free(line);
tfree(line);
if (count > 0) {
code = doPackSendDataBlock(pSql, count, pTableDataBlock);
if (code != TSDB_CODE_SUCCESS) {
pParentSql->res.code = code;
tscAsyncResultOnError(pParentSql);
pParentSql->res.code = code;
if (code == TSDB_CODE_SUCCESS) {
if (count > 0) {
code = doPackSendDataBlock(pSql, count, pTableDataBlock);
if (code == TSDB_CODE_SUCCESS) {
return;
} else {
goto _error;
}
} else {
taos_free_result(pSql);
tfree(pSupporter);
fclose(fp);
pParentSql->fp = pParentSql->fetchFp;
// all data has been sent to vnode, call user function
int32_t v = (code != TSDB_CODE_SUCCESS) ? code : (int32_t)pParentSql->res.numOfRows;
(*pParentSql->fp)(pParentSql->param, pParentSql, v);
return;
}
} else {
taos_free_result(pSql);
tfree(pSupporter);
fclose(fp);
pParentSql->fp = pParentSql->fetchFp;
// all data has been sent to vnode, call user function
int32_t v = (pParentSql->res.code != TSDB_CODE_SUCCESS) ? pParentSql->res.code : (int32_t)pParentSql->res.numOfRows;
(*pParentSql->fp)(pParentSql->param, pParentSql, v);
}
_error:
tfree(tokenBuf);
tfree(line);
taos_free_result(pSql);
tfree(pSupporter);
fclose(fp);
tscAsyncResultOnError(pParentSql);
}
void tscProcessMultiVnodesImportFromFile(SSqlObj *pSql) {
void tscImportDataFromFile(SSqlObj *pSql) {
SSqlCmd *pCmd = &pSql->cmd;
if (pCmd->command != TSDB_SQL_INSERT) {
return;
@ -1543,12 +1558,11 @@ void tscProcessMultiVnodesImportFromFile(SSqlObj *pSql) {
tfree(pSupporter);
tscAsyncResultOnError(pSql);
return;
}
pSupporter->pSql = pSql;
pSupporter->fp = fp;
pSupporter->fp = fp;
parseFileSendDataBlock(pSupporter, pNew, 0);
parseFileSendDataBlock(pSupporter, pNew, TSDB_CODE_SUCCESS);
}

View File

@ -707,7 +707,7 @@ static int insertStmtBindParam(STscStmt* stmt, TAOS_BIND* bind) {
int32_t ret =
tscGetDataBlockFromList(pCmd->pTableBlockHashList, pTableMeta->id.uid, TSDB_PAYLOAD_SIZE, sizeof(SSubmitBlk),
pTableMeta->tableInfo.rowSize, pTableMetaInfo->name, pTableMeta, &pBlock, NULL);
pTableMeta->tableInfo.rowSize, &pTableMetaInfo->name, pTableMeta, &pBlock, NULL);
if (ret != 0) {
// todo handle error
}
@ -790,7 +790,7 @@ static int insertStmtExecute(STscStmt* stmt) {
int32_t ret =
tscGetDataBlockFromList(pCmd->pTableBlockHashList, pTableMeta->id.uid, TSDB_PAYLOAD_SIZE, sizeof(SSubmitBlk),
pTableMeta->tableInfo.rowSize, pTableMetaInfo->name, pTableMeta, &pBlock, NULL);
pTableMeta->tableInfo.rowSize, &pTableMetaInfo->name, pTableMeta, &pBlock, NULL);
assert(ret == 0);
pBlock->size = sizeof(SSubmitBlk) + pCmd->batchSize * pBlock->rowSize;
SSubmitBlk* pBlk = (SSubmitBlk*) pBlock->pData;

View File

@ -62,7 +62,7 @@ static int32_t setShowInfo(SSqlObj* pSql, SSqlInfo* pInfo);
static char* getAccountId(SSqlObj* pSql);
static bool has(SArray* pFieldList, int32_t startIdx, const char* name);
static void getCurrentDBName(SSqlObj* pSql, SStrToken* pDBToken);
static char* getCurrentDBName(SSqlObj* pSql);
static bool hasSpecifyDB(SStrToken* pTableName);
static bool validateTableColumnInfo(SArray* pFieldList, SSqlCmd* pCmd);
static bool validateTagParams(SArray* pTagsList, SArray* pFieldList, SSqlCmd* pCmd);
@ -102,8 +102,8 @@ static int32_t validateSqlFunctionInStreamSql(SSqlCmd* pCmd, SQueryInfo* pQueryI
static int32_t validateFunctionsInIntervalOrGroupbyQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo);
static int32_t validateArithmeticSQLExpr(SSqlCmd* pCmd, tSQLExpr* pExpr, SQueryInfo* pQueryInfo, SColumnList* pList, int32_t* type);
static int32_t validateEp(char* ep);
static int32_t validateDNodeConfig(tDCLSQL* pOptions);
static int32_t validateLocalConfig(tDCLSQL* pOptions);
static int32_t validateDNodeConfig(SMiscInfo* pOptions);
static int32_t validateLocalConfig(SMiscInfo* pOptions);
static int32_t validateColumnName(char* name);
static int32_t setKillInfo(SSqlObj* pSql, struct SSqlInfo* pInfo, int32_t killType);
@ -112,7 +112,7 @@ static bool hasTimestampForPointInterpQuery(SQueryInfo* pQueryInfo);
static bool hasNormalColumnFilter(SQueryInfo* pQueryInfo);
static int32_t parseLimitClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t index, SQuerySQL* pQuerySql, SSqlObj* pSql);
static int32_t parseCreateDBOptions(SSqlCmd* pCmd, SCreateDBInfo* pCreateDbSql);
static int32_t parseCreateDBOptions(SSqlCmd* pCmd, SCreateDbInfo* pCreateDbSql);
static int32_t getColumnIndexByName(SSqlCmd* pCmd, const SStrToken* pToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex);
static int32_t getTableIndexByName(SStrToken* pToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex);
static int32_t optrToString(tSQLExpr* pExpr, char** exprString);
@ -241,7 +241,7 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
int32_t code = TSDB_CODE_SUCCESS;
if (!pInfo->valid || terrno == TSDB_CODE_TSC_SQL_SYNTAX_ERROR) {
terrno = TSDB_CODE_SUCCESS; // clear the error number
return tscSQLSyntaxErrMsg(tscGetErrorMsgPayload(pCmd), NULL, pInfo->pzErrMsg);
return tscSQLSyntaxErrMsg(tscGetErrorMsgPayload(pCmd), NULL, pInfo->msg);
}
SQueryInfo* pQueryInfo = tscGetQueryInfoDetailSafely(pCmd, pCmd->clauseIndex);
@ -268,28 +268,27 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
const char* msg3 = "param name too long";
const char* msg4 = "table is not super table";
SStrToken* pzName = &pInfo->pDCLInfo->a[0];
SStrToken* pzName = taosArrayGet(pInfo->pMiscInfo->a, 0);
if ((pInfo->type != TSDB_SQL_DROP_DNODE) && (tscValidateName(pzName) != TSDB_CODE_SUCCESS)) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
if (pInfo->type == TSDB_SQL_DROP_DB) {
assert(pInfo->pDCLInfo->nTokens == 1);
code = setObjFullName(pTableMetaInfo->name, getAccountId(pSql), pzName, NULL, NULL);
assert(taosArrayGetSize(pInfo->pMiscInfo->a) == 1);
code = tNameSetDbName(&pTableMetaInfo->name, getAccountId(pSql), pzName);
if (code != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
} else if (pInfo->type == TSDB_SQL_DROP_TABLE) {
assert(pInfo->pDCLInfo->nTokens == 1);
assert(taosArrayGetSize(pInfo->pMiscInfo->a) == 1);
code = tscSetTableFullName(pTableMetaInfo, pzName, pSql);
if(code != TSDB_CODE_SUCCESS) {
return code;
}
if (pInfo->pDCLInfo->tableType == TSDB_SUPER_TABLE) {
if (pInfo->pMiscInfo->tableType == TSDB_SUPER_TABLE) {
code = tscGetTableMeta(pSql, pTableMetaInfo);
if (code != TSDB_CODE_SUCCESS) {
return code;
@ -302,13 +301,13 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
} else if (pInfo->type == TSDB_SQL_DROP_DNODE) {
pzName->n = strdequote(pzName->z);
strncpy(pTableMetaInfo->name, pzName->z, pzName->n);
} else { // drop user
strncpy(pCmd->payload, pzName->z, pzName->n);
} else { // drop user/account
if (pzName->n >= TSDB_USER_LEN) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
strncpy(pTableMetaInfo->name, pzName->z, pzName->n);
strncpy(pCmd->payload, pzName->z, pzName->n);
}
break;
@ -316,13 +315,13 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
case TSDB_SQL_USE_DB: {
const char* msg = "invalid db name";
SStrToken* pToken = &pInfo->pDCLInfo->a[0];
SStrToken* pToken = taosArrayGet(pInfo->pMiscInfo->a, 0);
if (tscValidateName(pToken) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
}
int32_t ret = setObjFullName(pTableMetaInfo->name, getAccountId(pSql), pToken, NULL, NULL);
int32_t ret = tNameSetDbName(&pTableMetaInfo->name, getAccountId(pSql), pToken);
if (ret != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
}
@ -347,12 +346,12 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
const char* msg1 = "invalid db name";
const char* msg2 = "name too long";
SCreateDBInfo* pCreateDB = &(pInfo->pDCLInfo->dbOpt);
SCreateDbInfo* pCreateDB = &(pInfo->pMiscInfo->dbOpt);
if (tscValidateName(&pCreateDB->dbname) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
int32_t ret = setObjFullName(pTableMetaInfo->name, getAccountId(pSql), &(pCreateDB->dbname), NULL, NULL);
int32_t ret = tNameSetDbName(&pTableMetaInfo->name, getAccountId(pSql), &(pCreateDB->dbname));
if (ret != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
@ -364,15 +363,15 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
break;
}
case TSDB_SQL_CREATE_DNODE: { // todo hostname
case TSDB_SQL_CREATE_DNODE: {
const char* msg = "invalid host name (ip address)";
if (pInfo->pDCLInfo->nTokens > 1) {
if (taosArrayGetSize(pInfo->pMiscInfo->a) > 1) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
}
SStrToken* pIpAddr = &pInfo->pDCLInfo->a[0];
pIpAddr->n = strdequote(pIpAddr->z);
SStrToken* id = taosArrayGet(pInfo->pMiscInfo->a, 0);
id->n = strdequote(id->z);
break;
}
@ -382,8 +381,8 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
const char* msg2 = "invalid user/account name";
const char* msg3 = "name too long";
SStrToken* pName = &pInfo->pDCLInfo->user.user;
SStrToken* pPwd = &pInfo->pDCLInfo->user.passwd;
SStrToken* pName = &pInfo->pMiscInfo->user.user;
SStrToken* pPwd = &pInfo->pMiscInfo->user.passwd;
if (handlePassword(pCmd, pPwd) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
@ -397,7 +396,7 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
SCreateAcctSQL* pAcctOpt = &pInfo->pDCLInfo->acctOpt;
SCreateAcctInfo* pAcctOpt = &pInfo->pMiscInfo->acctOpt;
if (pAcctOpt->stat.n > 0) {
if (pAcctOpt->stat.z[0] == 'r' && pAcctOpt->stat.n == 1) {
} else if (pAcctOpt->stat.z[0] == 'w' && pAcctOpt->stat.n == 1) {
@ -412,10 +411,10 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
}
case TSDB_SQL_DESCRIBE_TABLE: {
SStrToken* pToken = &pInfo->pDCLInfo->a[0];
const char* msg1 = "invalid table name";
const char* msg2 = "table name too long";
SStrToken* pToken = taosArrayGet(pInfo->pMiscInfo->a, 0);
if (tscValidateName(pToken) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
@ -433,10 +432,10 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
return tscGetTableMeta(pSql, pTableMetaInfo);
}
case TSDB_SQL_SHOW_CREATE_TABLE: {
SStrToken* pToken = &pInfo->pDCLInfo->a[0];
const char* msg1 = "invalid table name";
const char* msg2 = "table name is too long";
SStrToken* pToken = taosArrayGet(pInfo->pMiscInfo->a, 0);
if (tscValidateName(pToken) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
@ -454,11 +453,12 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
}
case TSDB_SQL_SHOW_CREATE_DATABASE: {
const char* msg1 = "invalid database name";
SStrToken* pToken = &pInfo->pDCLInfo->a[0];
SStrToken* pToken = taosArrayGet(pInfo->pMiscInfo->a, 0);
if (tscValidateName(pToken) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
if (pToken->n > TSDB_DB_NAME_LEN) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
@ -470,29 +470,34 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
const char* msg3 = "invalid dnode ep";
/* validate the ip address */
tDCLSQL* pDCL = pInfo->pDCLInfo;
SMiscInfo* pMiscInfo = pInfo->pMiscInfo;
/* validate the parameter names and options */
if (validateDNodeConfig(pDCL) != TSDB_CODE_SUCCESS) {
if (validateDNodeConfig(pMiscInfo) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
char* pMsg = pCmd->payload;
SCfgDnodeMsg* pCfg = (SCfgDnodeMsg*)pMsg;
pDCL->a[0].n = strdequote(pDCL->a[0].z);
strncpy(pCfg->ep, pDCL->a[0].z, pDCL->a[0].n);
SStrToken* t0 = taosArrayGet(pMiscInfo->a, 0);
SStrToken* t1 = taosArrayGet(pMiscInfo->a, 1);
t0->n = strdequote(t0->z);
strncpy(pCfg->ep, t0->z, t0->n);
if (validateEp(pCfg->ep) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
strncpy(pCfg->config, pDCL->a[1].z, pDCL->a[1].n);
strncpy(pCfg->config, t1->z, t1->n);
if (pDCL->nTokens == 3) {
pCfg->config[pDCL->a[1].n] = ' '; // add sep
strncpy(&pCfg->config[pDCL->a[1].n + 1], pDCL->a[2].z, pDCL->a[2].n);
if (taosArrayGetSize(pMiscInfo->a) == 3) {
SStrToken* t2 = taosArrayGet(pMiscInfo->a, 2);
pCfg->config[t1->n] = ' '; // add sep
strncpy(&pCfg->config[t1->n + 1], t2->z, t2->n);
}
break;
@ -507,7 +512,7 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
pCmd->command = pInfo->type;
SUserInfo* pUser = &pInfo->pDCLInfo->user;
SUserInfo* pUser = &pInfo->pMiscInfo->user;
SStrToken* pName = &pUser->user;
SStrToken* pPwd = &pUser->passwd;
@ -551,18 +556,22 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
}
case TSDB_SQL_CFG_LOCAL: {
tDCLSQL* pDCL = pInfo->pDCLInfo;
const char* msg = "invalid configure options or values";
SMiscInfo *pMiscInfo = pInfo->pMiscInfo;
const char *msg = "invalid configure options or values";
// validate the parameter names and options
if (validateLocalConfig(pDCL) != TSDB_CODE_SUCCESS) {
if (validateLocalConfig(pMiscInfo) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
}
strncpy(pCmd->payload, pDCL->a[0].z, pDCL->a[0].n);
if (pDCL->nTokens == 2) {
pCmd->payload[pDCL->a[0].n] = ' '; // add sep
strncpy(&pCmd->payload[pDCL->a[0].n + 1], pDCL->a[1].z, pDCL->a[1].n);
int32_t numOfToken = (int32_t) taosArrayGetSize(pMiscInfo->a);
SStrToken* t = taosArrayGet(pMiscInfo->a, 0);
SStrToken* t1 = taosArrayGet(pMiscInfo->a, 1);
strncpy(pCmd->payload, t->z, t->n);
if (numOfToken == 2) {
pCmd->payload[t->n] = ' '; // add sep
strncpy(&pCmd->payload[t->n + 1], t1->z, t1->n);
}
break;
@ -893,47 +902,51 @@ int32_t parseSlidingClause(SSqlObj* pSql, SQueryInfo* pQueryInfo, SQuerySQL* pQu
return TSDB_CODE_SUCCESS;
}
int32_t tscSetTableFullName(STableMetaInfo* pTableMetaInfo, SStrToken* pzTableName, SSqlObj* pSql) {
int32_t tscSetTableFullName(STableMetaInfo* pTableMetaInfo, SStrToken* pTableName, SSqlObj* pSql) {
const char* msg1 = "name too long";
const char* msg2 = "acctId too long";
SSqlCmd* pCmd = &pSql->cmd;
int32_t code = TSDB_CODE_SUCCESS;
// backup the old name in pTableMetaInfo
char oldName[TSDB_TABLE_FNAME_LEN] = {0};
tstrncpy(oldName, pTableMetaInfo->name, tListLen(oldName));
if (hasSpecifyDB(pzTableName)) { // db has been specified in sql string so we ignore current db path
code = setObjFullName(pTableMetaInfo->name, getAccountId(pSql), NULL, pzTableName, NULL);
if (hasSpecifyDB(pTableName)) { // db has been specified in sql string so we ignore current db path
code = tNameSetAcctId(&pTableMetaInfo->name, getAccountId(pSql));
if (code != 0) {
invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
char name[TSDB_TABLE_FNAME_LEN] = {0};
strncpy(name, pTableName->z, pTableName->n);
code = tNameFromString(&pTableMetaInfo->name, name, T_NAME_DB|T_NAME_TABLE);
if (code != 0) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
} else { // get current DB name first, and then set it into path
SStrToken t = {0};
getCurrentDBName(pSql, &t);
if (t.n == 0) { // current database not available or not specified
code = TSDB_CODE_TSC_DB_NOT_SELECTED;
} else {
code = setObjFullName(pTableMetaInfo->name, NULL, &t, pzTableName, NULL);
if (code != 0) {
invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
char* t = getCurrentDBName(pSql);
if (strlen(t) == 0) {
return TSDB_CODE_TSC_DB_NOT_SELECTED;
}
code = tNameFromString(&pTableMetaInfo->name, t, T_NAME_ACCT | T_NAME_DB);
if (code != 0) {
return TSDB_CODE_TSC_DB_NOT_SELECTED;
}
if (pTableName->n >= TSDB_TABLE_NAME_LEN) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
char name[TSDB_TABLE_FNAME_LEN] = {0};
strncpy(name, pTableName->z, pTableName->n);
code = tNameFromString(&pTableMetaInfo->name, name, T_NAME_TABLE);
if (code != 0) {
code = invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
}
if (code != TSDB_CODE_SUCCESS) {
return code;
}
/*
* the old name exists and is not equalled to the new name. Release the table meta
* that are corresponding to the old name for the new table name.
*/
if (strlen(oldName) > 0 && strncasecmp(oldName, pTableMetaInfo->name, tListLen(pTableMetaInfo->name)) != 0) {
tscClearTableMetaInfo(pTableMetaInfo);
}
return TSDB_CODE_SUCCESS;
return code;
}
static bool validateTableColumnInfo(SArray* pFieldList, SSqlCmd* pCmd) {
@ -1233,9 +1246,8 @@ static bool has(SArray* pFieldList, int32_t startIdx, const char* name) {
static char* getAccountId(SSqlObj* pSql) { return pSql->pTscObj->acctId; }
static void getCurrentDBName(SSqlObj* pSql, SStrToken* pDBToken) {
pDBToken->z = pSql->pTscObj->db;
pDBToken->n = (uint32_t)strlen(pSql->pTscObj->db);
static char* getCurrentDBName(SSqlObj* pSql) {
return pSql->pTscObj->db;
}
/* length limitation, strstr cannot be applied */
@ -2610,10 +2622,10 @@ int32_t setShowInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
const char* msg6 = "pattern string is empty";
/*
* database prefix in pInfo->pDCLInfo->a[0]
* wildcard in like clause in pInfo->pDCLInfo->a[1]
* database prefix in pInfo->pMiscInfo->a[0]
* wildcard in like clause in pInfo->pMiscInfo->a[1]
*/
SShowInfo* pShowInfo = &pInfo->pDCLInfo->showOpt;
SShowInfo* pShowInfo = &pInfo->pMiscInfo->showOpt;
int16_t showType = pShowInfo->showType;
if (showType == TSDB_MGMT_TABLE_TABLE || showType == TSDB_MGMT_TABLE_METRIC || showType == TSDB_MGMT_TABLE_VGROUP) {
// db prefix in tagCond, show table conds in payload
@ -2632,7 +2644,7 @@ int32_t setShowInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
int32_t ret = setObjFullName(pTableMetaInfo->name, getAccountId(pSql), pDbPrefixToken, NULL, NULL);
int32_t ret = tNameSetDbName(&pTableMetaInfo->name, getAccountId(pSql), pDbPrefixToken);
if (ret != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
@ -2678,7 +2690,7 @@ int32_t setKillInfo(SSqlObj* pSql, struct SSqlInfo* pInfo, int32_t killType) {
SSqlCmd* pCmd = &pSql->cmd;
pCmd->command = pInfo->type;
SStrToken* idStr = &(pInfo->pDCLInfo->ip);
SStrToken* idStr = &(pInfo->pMiscInfo->id);
if (idStr->n > TSDB_KILL_MSG_LEN) {
return TSDB_CODE_TSC_INVALID_SQL;
}
@ -2913,7 +2925,7 @@ int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, SArray* pList, SSqlCmd* pCmd)
STableMeta* pTableMeta = NULL;
SSchema* pSchema = NULL;
SSchema s = tscGetTbnameColumnSchema();
SSchema s = tGetTbnameColumnSchema();
int32_t tableIndex = COLUMN_INDEX_INITIAL_VAL;
@ -3436,6 +3448,7 @@ static int32_t getColumnQueryCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQ
static int32_t getJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExpr* pExpr) {
const char* msg1 = "invalid join query condition";
const char* msg2 = "invalid table name in join query";
const char* msg3 = "type of join columns must be identical";
const char* msg4 = "invalid column name in join condition";
@ -3461,7 +3474,11 @@ static int32_t getJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExpr*
pLeft->uid = pTableMetaInfo->pTableMeta->id.uid;
pLeft->tagColId = pTagSchema1->colId;
strcpy(pLeft->tableId, pTableMetaInfo->name);
int32_t code = tNameExtractFullName(&pTableMetaInfo->name, pLeft->tableName);
if (code != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
index = (SColumnIndex)COLUMN_INDEX_INITIALIZER;
if (getColumnIndexByName(pCmd, &pExpr->pRight->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
@ -3473,7 +3490,11 @@ static int32_t getJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExpr*
pRight->uid = pTableMetaInfo->pTableMeta->id.uid;
pRight->tagColId = pTagSchema2->colId;
strcpy(pRight->tableId, pTableMetaInfo->name);
code = tNameExtractFullName(&pTableMetaInfo->name, pRight->tableName);
if (code != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
if (pTagSchema1->type != pTagSchema2->type) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
@ -4050,8 +4071,6 @@ static int32_t setTableCondForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo,
SStringBuilder sb1; memset(&sb1, 0, sizeof(sb1));
taosStringBuilderAppendStringLen(&sb1, QUERY_COND_REL_PREFIX_IN, QUERY_COND_REL_PREFIX_IN_LEN);
char db[TSDB_TABLE_FNAME_LEN] = {0};
// remove the duplicated input table names
int32_t num = 0;
char* tableNameString = taosStringBuilderGetResult(sb, NULL);
@ -4067,7 +4086,8 @@ static int32_t setTableCondForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo,
}
num = j;
char* name = extractDBName(pTableMetaInfo->name, db);
char name[TSDB_DB_NAME_LEN] = {0};
tNameGetDbName(&pTableMetaInfo->name, name);
SStrToken dbToken = { .type = TK_STRING, .z = name, .n = (uint32_t)strlen(name) };
for (int32_t i = 0; i < num; ++i) {
@ -4814,7 +4834,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
int32_t code = TSDB_CODE_SUCCESS;
SSqlCmd* pCmd = &pSql->cmd;
SAlterTableSQL* pAlterSQL = pInfo->pAlterInfo;
SAlterTableInfo* pAlterSQL = pInfo->pAlterInfo;
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, DEFAULT_TABLE_INDEX);
@ -5153,8 +5173,10 @@ int32_t validateEp(char* ep) {
return TSDB_CODE_SUCCESS;
}
int32_t validateDNodeConfig(tDCLSQL* pOptions) {
if (pOptions->nTokens < 2 || pOptions->nTokens > 3) {
int32_t validateDNodeConfig(SMiscInfo* pOptions) {
int32_t numOfToken = (int32_t) taosArrayGetSize(pOptions->a);
if (numOfToken < 2 || numOfToken > 3) {
return TSDB_CODE_TSC_INVALID_SQL;
}
@ -5172,9 +5194,9 @@ int32_t validateDNodeConfig(tDCLSQL* pOptions) {
{"cqDebugFlag", 11},
};
SStrToken* pOptionToken = &pOptions->a[1];
SStrToken* pOptionToken = taosArrayGet(pOptions->a, 1);
if (pOptions->nTokens == 2) {
if (numOfToken == 2) {
// reset log and reset query cache does not need value
for (int32_t i = 0; i < tokenLogEnd; ++i) {
const SDNodeDynConfOption* pOption = &cfgOptions[i];
@ -5184,7 +5206,7 @@ int32_t validateDNodeConfig(tDCLSQL* pOptions) {
}
} else if ((strncasecmp(cfgOptions[tokenBalance].name, pOptionToken->z, pOptionToken->n) == 0) &&
(cfgOptions[tokenBalance].len == pOptionToken->n)) {
SStrToken* pValToken = &pOptions->a[2];
SStrToken* pValToken = taosArrayGet(pOptions->a, 2);
int32_t vnodeId = 0;
int32_t dnodeId = 0;
strdequote(pValToken->z);
@ -5195,14 +5217,14 @@ int32_t validateDNodeConfig(tDCLSQL* pOptions) {
return TSDB_CODE_SUCCESS;
} else if ((strncasecmp(cfgOptions[tokenMonitor].name, pOptionToken->z, pOptionToken->n) == 0) &&
(cfgOptions[tokenMonitor].len == pOptionToken->n)) {
SStrToken* pValToken = &pOptions->a[2];
SStrToken* pValToken = taosArrayGet(pOptions->a, 2);
int32_t val = strtol(pValToken->z, NULL, 10);
if (val != 0 && val != 1) {
return TSDB_CODE_TSC_INVALID_SQL; // options value is invalid
}
return TSDB_CODE_SUCCESS;
} else {
SStrToken* pValToken = &pOptions->a[2];
SStrToken* pValToken = taosArrayGet(pOptions->a, 2);
int32_t val = strtol(pValToken->z, NULL, 10);
if (val < 0 || val > 256) {
@ -5213,8 +5235,8 @@ int32_t validateDNodeConfig(tDCLSQL* pOptions) {
for (int32_t i = tokenDebugFlag; i < tokenDebugFlagEnd; ++i) {
const SDNodeDynConfOption* pOption = &cfgOptions[i];
// options is valid
if ((strncasecmp(pOption->name, pOptionToken->z, pOptionToken->n) == 0) && (pOption->len == pOptionToken->n)) {
/* options is valid */
return TSDB_CODE_SUCCESS;
}
}
@ -5223,17 +5245,18 @@ int32_t validateDNodeConfig(tDCLSQL* pOptions) {
return TSDB_CODE_TSC_INVALID_SQL;
}
int32_t validateLocalConfig(tDCLSQL* pOptions) {
if (pOptions->nTokens < 1 || pOptions->nTokens > 2) {
int32_t validateLocalConfig(SMiscInfo* pOptions) {
int32_t numOfToken = (int32_t) taosArrayGetSize(pOptions->a);
if (numOfToken < 1 || numOfToken > 2) {
return TSDB_CODE_TSC_INVALID_SQL;
}
SDNodeDynConfOption LOCAL_DYNAMIC_CFG_OPTIONS[6] = {{"resetLog", 8}, {"rpcDebugFlag", 12}, {"tmrDebugFlag", 12},
{"cDebugFlag", 10}, {"uDebugFlag", 10}, {"debugFlag", 9}};
SStrToken* pOptionToken = &pOptions->a[0];
SStrToken* pOptionToken = taosArrayGet(pOptions->a, 0);
if (pOptions->nTokens == 1) {
if (numOfToken == 1) {
// reset log does not need value
for (int32_t i = 0; i < 1; ++i) {
SDNodeDynConfOption* pOption = &LOCAL_DYNAMIC_CFG_OPTIONS[i];
@ -5242,7 +5265,7 @@ int32_t validateLocalConfig(tDCLSQL* pOptions) {
}
}
} else {
SStrToken* pValToken = &pOptions->a[1];
SStrToken* pValToken = taosArrayGet(pOptions->a, 1);
int32_t val = strtol(pValToken->z, NULL, 10);
if (val < 131 || val > 199) {
@ -5394,7 +5417,7 @@ int32_t parseLimitClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t clauseIn
return TSDB_CODE_SUCCESS;
}
static int32_t setKeepOption(SSqlCmd* pCmd, SCreateDbMsg* pMsg, SCreateDBInfo* pCreateDb) {
static int32_t setKeepOption(SSqlCmd* pCmd, SCreateDbMsg* pMsg, SCreateDbInfo* pCreateDb) {
const char* msg = "invalid number of options";
pMsg->daysToKeep = htonl(-1);
@ -5432,7 +5455,7 @@ static int32_t setKeepOption(SSqlCmd* pCmd, SCreateDbMsg* pMsg, SCreateDBInfo* p
return TSDB_CODE_SUCCESS;
}
static int32_t setTimePrecision(SSqlCmd* pCmd, SCreateDbMsg* pMsg, SCreateDBInfo* pCreateDbInfo) {
static int32_t setTimePrecision(SSqlCmd* pCmd, SCreateDbMsg* pMsg, SCreateDbInfo* pCreateDbInfo) {
const char* msg = "invalid time precision";
pMsg->precision = TSDB_TIME_PRECISION_MILLI; // millisecond by default
@ -5456,7 +5479,7 @@ static int32_t setTimePrecision(SSqlCmd* pCmd, SCreateDbMsg* pMsg, SCreateDBInfo
return TSDB_CODE_SUCCESS;
}
static void setCreateDBOption(SCreateDbMsg* pMsg, SCreateDBInfo* pCreateDb) {
static void setCreateDBOption(SCreateDbMsg* pMsg, SCreateDbInfo* pCreateDb) {
pMsg->maxTables = htonl(-1); // max tables can not be set anymore
pMsg->cacheBlockSize = htonl(pCreateDb->cacheBlockSize);
pMsg->totalBlocks = htonl(pCreateDb->numOfBlocks);
@ -5474,7 +5497,7 @@ static void setCreateDBOption(SCreateDbMsg* pMsg, SCreateDBInfo* pCreateDb) {
pMsg->cacheLastRow = pCreateDb->cachelast;
}
int32_t parseCreateDBOptions(SSqlCmd* pCmd, SCreateDBInfo* pCreateDbSql) {
int32_t parseCreateDBOptions(SSqlCmd* pCmd, SCreateDbInfo* pCreateDbSql) {
SCreateDbMsg* pMsg = (SCreateDbMsg *)(pCmd->payload);
setCreateDBOption(pMsg, pCreateDbSql);
@ -6235,9 +6258,9 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) {
}
// get table meta from mnode
tstrncpy(pCreateTableInfo->tagdata.name, pStableMetaInfo->name, tListLen(pCreateTableInfo->tagdata.name));
SArray* pList = pCreateTableInfo->pTagVals;
code = tNameExtractFullName(&pStableMetaInfo->name, pCreateTableInfo->tagdata.name);
SArray* pList = pCreateTableInfo->pTagVals;
code = tscGetTableMeta(pSql, pStableMetaInfo);
if (code != TSDB_CODE_SUCCESS) {
return code;
@ -6315,7 +6338,11 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) {
return ret;
}
pCreateTableInfo->fullname = strndup(pTableMetaInfo->name, TSDB_TABLE_FNAME_LEN);
pCreateTableInfo->fullname = calloc(1, tNameLen(&pTableMetaInfo->name) + 1);
ret = tNameExtractFullName(&pTableMetaInfo->name, pCreateTableInfo->fullname);
if (ret != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
}
return TSDB_CODE_SUCCESS;
@ -6562,7 +6589,7 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) {
// has no table alias name
if (memcmp(pTableItem->pz, p1->pVar.pz, p1->pVar.nLen) == 0) {
extractTableName(pTableMetaInfo1->name, pTableMetaInfo1->aliasName);
strncpy(pTableMetaInfo1->aliasName, tNameGetTableName(&pTableMetaInfo1->name), tListLen(pTableMetaInfo->aliasName));
} else {
tstrncpy(pTableMetaInfo1->aliasName, p1->pVar.pz, sizeof(pTableMetaInfo1->aliasName));
}

View File

@ -106,6 +106,7 @@ STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg) {
pTableMeta->sversion = pTableMetaMsg->sversion;
pTableMeta->tversion = pTableMetaMsg->tversion;
tstrncpy(pTableMeta->sTableName, pTableMetaMsg->sTableName, TSDB_TABLE_FNAME_LEN);
memcpy(pTableMeta->schema, pTableMetaMsg->schema, schemaSize);

View File

@ -309,7 +309,7 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
return;
}
if (pEpSet) { // todo update this
if (pEpSet) {
if (!tscEpSetIsEqual(&pSql->epSet, pEpSet)) {
if (pCmd->command < TSDB_SQL_MGMT) {
tscUpdateVgroupInfo(pSql, pEpSet);
@ -461,16 +461,20 @@ int doProcessSql(SSqlObj *pSql) {
}
int tscProcessSql(SSqlObj *pSql) {
char *name = NULL;
char name[TSDB_TABLE_FNAME_LEN] = {0};
SSqlCmd *pCmd = &pSql->cmd;
uint32_t type = 0;
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
STableMetaInfo *pTableMetaInfo = NULL;
uint32_t type = 0;
if (pQueryInfo != NULL) {
pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
name = (pTableMetaInfo != NULL)? pTableMetaInfo->name:NULL;
if (pTableMetaInfo != NULL) {
tNameExtractFullName(&pTableMetaInfo->name, name);
}
type = pQueryInfo->type;
// while numOfTables equals to 0, it must be Heartbeat
@ -669,10 +673,11 @@ static char *doSerializeTableInfo(SQueryTableMsg* pQueryMsg, SSqlObj *pSql, char
pMsg += sizeof(STableIdInfo);
}
}
tscDebug("%p vgId:%d, query on table:%s, tid:%d, uid:%" PRIu64, pSql, htonl(pQueryMsg->head.vgId), pTableMetaInfo->name,
pTableMeta->id.tid, pTableMeta->id.uid);
char n[TSDB_TABLE_FNAME_LEN] = {0};
tNameExtractFullName(&pTableMetaInfo->name, n);
tscDebug("%p vgId:%d, query on table:%s, tid:%d, uid:%" PRIu64, pSql, htonl(pQueryMsg->head.vgId), n, pTableMeta->id.tid, pTableMeta->id.uid);
return pMsg;
}
@ -755,8 +760,11 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSchema *pColSchema = &pSchema[pCol->colIndex.columnIndex];
if (pCol->colIndex.columnIndex >= tscGetNumOfColumns(pTableMeta) || !isValidDataType(pColSchema->type)) {
char n[TSDB_TABLE_FNAME_LEN] = {0};
tNameExtractFullName(&pTableMetaInfo->name, n);
tscError("%p tid:%d uid:%" PRIu64" id:%s, column index out of range, numOfColumns:%d, index:%d, column name:%s",
pSql, pTableMeta->id.tid, pTableMeta->id.uid, pTableMetaInfo->name, tscGetNumOfColumns(pTableMeta), pCol->colIndex.columnIndex,
pSql, pTableMeta->id.tid, pTableMeta->id.uid, n, tscGetNumOfColumns(pTableMeta), pCol->colIndex.columnIndex,
pColSchema->name);
return TSDB_CODE_TSC_INVALID_SQL;
}
@ -942,9 +950,11 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
if ((pCol->colIndex.columnIndex >= numOfTagColumns || pCol->colIndex.columnIndex < -1) ||
(!isValidDataType(pColSchema->type))) {
char n[TSDB_TABLE_FNAME_LEN] = {0};
tNameExtractFullName(&pTableMetaInfo->name, n);
tscError("%p tid:%d uid:%" PRIu64 " id:%s, tag index out of range, totalCols:%d, numOfTags:%d, index:%d, column name:%s",
pSql, pTableMeta->id.tid, pTableMeta->id.uid, pTableMetaInfo->name, total, numOfTagColumns,
pCol->colIndex.columnIndex, pColSchema->name);
pSql, pTableMeta->id.tid, pTableMeta->id.uid, n, total, numOfTagColumns, pCol->colIndex.columnIndex, pColSchema->name);
return TSDB_CODE_TSC_INVALID_SQL;
}
@ -1021,7 +1031,8 @@ int32_t tscBuildCreateDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
assert(pCmd->numOfClause == 1);
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
tstrncpy(pCreateDbMsg->db, pTableMetaInfo->name, sizeof(pCreateDbMsg->db));
int32_t code = tNameExtractFullName(&pTableMetaInfo->name, pCreateDbMsg->db);
assert(code == TSDB_CODE_SUCCESS);
return TSDB_CODE_SUCCESS;
}
@ -1035,7 +1046,9 @@ int32_t tscBuildCreateDnodeMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
}
SCreateDnodeMsg *pCreate = (SCreateDnodeMsg *)pCmd->payload;
strncpy(pCreate->ep, pInfo->pDCLInfo->a[0].z, pInfo->pDCLInfo->a[0].n);
SStrToken* t0 = taosArrayGet(pInfo->pMiscInfo->a, 0);
strncpy(pCreate->ep, t0->z, t0->n);
pCmd->msgType = TSDB_MSG_TYPE_CM_CREATE_DNODE;
@ -1052,13 +1065,13 @@ int32_t tscBuildAcctMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SCreateAcctMsg *pAlterMsg = (SCreateAcctMsg *)pCmd->payload;
SStrToken *pName = &pInfo->pDCLInfo->user.user;
SStrToken *pPwd = &pInfo->pDCLInfo->user.passwd;
SStrToken *pName = &pInfo->pMiscInfo->user.user;
SStrToken *pPwd = &pInfo->pMiscInfo->user.passwd;
strncpy(pAlterMsg->user, pName->z, pName->n);
strncpy(pAlterMsg->pass, pPwd->z, pPwd->n);
SCreateAcctSQL *pAcctOpt = &pInfo->pDCLInfo->acctOpt;
SCreateAcctInfo *pAcctOpt = &pInfo->pMiscInfo->acctOpt;
pAlterMsg->cfg.maxUsers = htonl(pAcctOpt->maxUsers);
pAlterMsg->cfg.maxDbs = htonl(pAcctOpt->maxDbs);
@ -1098,7 +1111,7 @@ int32_t tscBuildUserMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SCreateUserMsg *pAlterMsg = (SCreateUserMsg *)pCmd->payload;
SUserInfo *pUser = &pInfo->pDCLInfo->user;
SUserInfo *pUser = &pInfo->pMiscInfo->user;
strncpy(pAlterMsg->user, pUser->user.z, pUser->user.n);
pAlterMsg->flag = (int8_t)pUser->type;
@ -1138,8 +1151,11 @@ int32_t tscBuildDropDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SDropDbMsg *pDropDbMsg = (SDropDbMsg*)pCmd->payload;
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
tstrncpy(pDropDbMsg->db, pTableMetaInfo->name, sizeof(pDropDbMsg->db));
pDropDbMsg->ignoreNotExists = pInfo->pDCLInfo->existsCheck ? 1 : 0;
int32_t code = tNameExtractFullName(&pTableMetaInfo->name, pDropDbMsg->db);
assert(code == TSDB_CODE_SUCCESS && pTableMetaInfo->name.type == TSDB_DB_NAME_T);
pDropDbMsg->ignoreNotExists = pInfo->pMiscInfo->existsCheck ? 1 : 0;
pCmd->msgType = TSDB_MSG_TYPE_CM_DROP_DB;
return TSDB_CODE_SUCCESS;
@ -1156,15 +1172,19 @@ int32_t tscBuildDropTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SCMDropTableMsg *pDropTableMsg = (SCMDropTableMsg*)pCmd->payload;
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
strcpy(pDropTableMsg->tableFname, pTableMetaInfo->name);
pDropTableMsg->igNotExists = pInfo->pDCLInfo->existsCheck ? 1 : 0;
tNameExtractFullName(&pTableMetaInfo->name, pDropTableMsg->name);
pDropTableMsg->igNotExists = pInfo->pMiscInfo->existsCheck ? 1 : 0;
pCmd->msgType = TSDB_MSG_TYPE_CM_DROP_TABLE;
return TSDB_CODE_SUCCESS;
}
int32_t tscBuildDropDnodeMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSqlCmd *pCmd = &pSql->cmd;
char dnodeEp[TSDB_EP_LEN] = {0};
tstrncpy(dnodeEp, pCmd->payload, TSDB_EP_LEN);
pCmd->payloadLen = sizeof(SDropDnodeMsg);
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
tscError("%p failed to malloc for query msg", pSql);
@ -1172,43 +1192,28 @@ int32_t tscBuildDropDnodeMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
}
SDropDnodeMsg * pDrop = (SDropDnodeMsg *)pCmd->payload;
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
tstrncpy(pDrop->ep, pTableMetaInfo->name, sizeof(pDrop->ep));
tstrncpy(pDrop->ep, dnodeEp, tListLen(pDrop->ep));
pCmd->msgType = TSDB_MSG_TYPE_CM_DROP_DNODE;
return TSDB_CODE_SUCCESS;
}
int32_t tscBuildDropUserMsg(SSqlObj *pSql, SSqlInfo * UNUSED_PARAM(pInfo)) {
int32_t tscBuildDropUserAcctMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSqlCmd *pCmd = &pSql->cmd;
char user[TSDB_USER_LEN] = {0};
tstrncpy(user, pCmd->payload, TSDB_USER_LEN);
pCmd->payloadLen = sizeof(SDropUserMsg);
pCmd->msgType = TSDB_MSG_TYPE_CM_DROP_USER;
pCmd->msgType = (pInfo->type == TSDB_SQL_DROP_USER)? TSDB_MSG_TYPE_CM_DROP_USER:TSDB_MSG_TYPE_CM_DROP_ACCT;
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
tscError("%p failed to malloc for query msg", pSql);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
SDropUserMsg * pDropMsg = (SDropUserMsg *)pCmd->payload;
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
tstrncpy(pDropMsg->user, pTableMetaInfo->name, sizeof(pDropMsg->user));
return TSDB_CODE_SUCCESS;
}
int32_t tscBuildDropAcctMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSqlCmd *pCmd = &pSql->cmd;
pCmd->payloadLen = sizeof(SDropUserMsg);
pCmd->msgType = TSDB_MSG_TYPE_CM_DROP_ACCT;
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
tscError("%p failed to malloc for query msg", pSql);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
SDropUserMsg * pDropMsg = (SDropUserMsg *)pCmd->payload;
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
tstrncpy(pDropMsg->user, pTableMetaInfo->name, sizeof(pDropMsg->user));
SDropUserMsg *pDropMsg = (SDropUserMsg *)pCmd->payload;
tstrncpy(pDropMsg->user, user, tListLen(user));
return TSDB_CODE_SUCCESS;
}
@ -1224,7 +1229,7 @@ int32_t tscBuildUseDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SUseDbMsg *pUseDbMsg = (SUseDbMsg *)pCmd->payload;
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
strcpy(pUseDbMsg->db, pTableMetaInfo->name);
tNameExtractFullName(&pTableMetaInfo->name, pUseDbMsg->db);
pCmd->msgType = TSDB_MSG_TYPE_CM_USE_DB;
return TSDB_CODE_SUCCESS;
@ -1244,14 +1249,14 @@ int32_t tscBuildShowMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SShowMsg *pShowMsg = (SShowMsg *)pCmd->payload;
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
size_t nameLen = strlen(pTableMetaInfo->name);
if (nameLen > 0) {
tstrncpy(pShowMsg->db, pTableMetaInfo->name, sizeof(pShowMsg->db)); // prefix is set here
} else {
if (tNameIsEmpty(&pTableMetaInfo->name)) {
tstrncpy(pShowMsg->db, pObj->db, sizeof(pShowMsg->db));
} else {
tNameGetFullDbName(&pTableMetaInfo->name, pShowMsg->db);
}
SShowInfo *pShowInfo = &pInfo->pDCLInfo->showOpt;
SShowInfo *pShowInfo = &pInfo->pMiscInfo->showOpt;
pShowMsg->type = pShowInfo->showType;
if (pShowInfo->showType != TSDB_MGMT_TABLE_VNODES) {
@ -1310,12 +1315,12 @@ int tscEstimateCreateTableMsgLength(SSqlObj *pSql, SSqlInfo *pInfo) {
}
int tscBuildCreateTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
int msgLen = 0;
SSchema * pSchema;
int size = 0;
int msgLen = 0;
int size = 0;
SSchema *pSchema;
SSqlCmd *pCmd = &pSql->cmd;
SQueryInfo * pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
// Reallocate the payload size
@ -1346,11 +1351,10 @@ int tscBuildCreateTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pMsg += sizeof(SCreateTableMsg);
SCreatedTableInfo* p = taosArrayGet(list, i);
strcpy(pCreate->tableFname, p->fullname);
strcpy(pCreate->tableName, p->fullname);
pCreate->igExists = (p->igExist)? 1 : 0;
// use dbinfo from table id without modifying current db info
tscGetDBInfoFromTableFullName(p->fullname, pCreate->db);
pMsg = serializeTagData(&p->tagdata, pMsg);
int32_t len = (int32_t)(pMsg - (char*) pCreate);
@ -1359,10 +1363,8 @@ int tscBuildCreateTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
} else { // create (super) table
pCreateTableMsg->numOfTables = htonl(1); // only one table will be created
strcpy(pCreateMsg->tableFname, pTableMetaInfo->name);
// use dbinfo from table id without modifying current db info
tscGetDBInfoFromTableFullName(pTableMetaInfo->name, pCreateMsg->db);
int32_t code = tNameExtractFullName(&pTableMetaInfo->name, pCreateMsg->tableName);
assert(code == 0);
SCreateTableSQL *pCreateTable = pInfo->pCreateTableInfo;
@ -1420,7 +1422,7 @@ int tscBuildAlterTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
SAlterTableSQL *pAlterInfo = pInfo->pAlterInfo;
SAlterTableInfo *pAlterInfo = pInfo->pAlterInfo;
int size = tscEstimateAlterTableMsgLength(pCmd);
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, size)) {
tscError("%p failed to malloc for alter table msg", pSql);
@ -1428,9 +1430,8 @@ int tscBuildAlterTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
}
SAlterTableMsg *pAlterTableMsg = (SAlterTableMsg *)pCmd->payload;
tscGetDBInfoFromTableFullName(pTableMetaInfo->name, pAlterTableMsg->db);
strcpy(pAlterTableMsg->tableFname, pTableMetaInfo->name);
tNameExtractFullName(&pTableMetaInfo->name, pAlterTableMsg->tableFname);
pAlterTableMsg->type = htons(pAlterInfo->type);
pAlterTableMsg->numOfCols = htons(tscNumOfFields(pQueryInfo));
@ -1485,7 +1486,7 @@ int tscAlterDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SAlterDbMsg *pAlterDbMsg = (SAlterDbMsg* )pCmd->payload;
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
tstrncpy(pAlterDbMsg->db, pTableMetaInfo->name, sizeof(pAlterDbMsg->db));
tNameExtractFullName(&pTableMetaInfo->name, pAlterDbMsg->db);
return TSDB_CODE_SUCCESS;
}
@ -1623,13 +1624,17 @@ int tscBuildConnectMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
}
int tscBuildTableMetaMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSqlCmd * pCmd = &pSql->cmd;
SSqlCmd *pCmd = &pSql->cmd;
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
STableInfoMsg *pInfoMsg = (STableInfoMsg *)pCmd->payload;
int32_t code = tNameExtractFullName(&pTableMetaInfo->name, pInfoMsg->tableFname);
if (code != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
STableInfoMsg *pInfoMsg = (STableInfoMsg *)pCmd->payload;
strcpy(pInfoMsg->tableFname, pTableMetaInfo->name);
pInfoMsg->createFlag = htons(pSql->cmd.autoCreated ? 1 : 0);
char *pMsg = (char *)pInfoMsg + sizeof(STableInfoMsg);
@ -1686,33 +1691,6 @@ int tscBuildMultiMeterMetaMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
return 0;
}
//static UNUSED_FUNC int32_t tscEstimateMetricMetaMsgSize(SSqlCmd *pCmd) {
//// const int32_t defaultSize =
//// minMsgSize() + sizeof(SSuperTableMetaMsg) + sizeof(SMgmtHead) + sizeof(int16_t) * TSDB_MAX_TAGS;
//// SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
////
//// int32_t n = 0;
//// size_t size = taosArrayGetSize(pQueryInfo->tagCond.pCond);
//// for (int32_t i = 0; i < size; ++i) {
//// assert(0);
////// n += strlen(pQueryInfo->tagCond.cond[i].cond);
//// }
////
//// int32_t tagLen = n * TSDB_NCHAR_SIZE;
//// if (pQueryInfo->tagCond.tbnameCond.cond != NULL) {
//// tagLen += strlen(pQueryInfo->tagCond.tbnameCond.cond) * TSDB_NCHAR_SIZE;
//// }
////
//// int32_t joinCondLen = (TSDB_TABLE_FNAME_LEN + sizeof(int16_t)) * 2;
//// int32_t elemSize = sizeof(SSuperTableMetaElemMsg) * pQueryInfo->numOfTables;
////
//// int32_t colSize = pQueryInfo->groupbyExpr.numOfGroupCols*sizeof(SColIndex);
////
//// int32_t len = tagLen + joinCondLen + elemSize + colSize + defaultSize;
////
//// return MAX(len, TSDB_DEFAULT_PAYLOAD_SIZE);
//}
int tscBuildSTableVgroupMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSqlCmd *pCmd = &pSql->cmd;
@ -1725,9 +1703,10 @@ int tscBuildSTableVgroupMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) {
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, i);
size_t size = sizeof(pTableMetaInfo->name);
tstrncpy(pMsg, pTableMetaInfo->name, size);
pMsg += size;
int32_t code = tNameExtractFullName(&pTableMetaInfo->name, pMsg);
assert(code == TSDB_CODE_SUCCESS);
pMsg += TSDB_TABLE_FNAME_LEN;
}
pCmd->msgType = TSDB_MSG_TYPE_CM_STABLE_VGROUP;
@ -1827,7 +1806,6 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) {
assert(i == 0);
}
assert(isValidDataType(pSchema->type));
pSchema++;
}
@ -1835,8 +1813,8 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) {
assert(pTableMetaInfo->pTableMeta == NULL);
STableMeta* pTableMeta = tscCreateTableMetaFromMsg(pMetaMsg);
if (!isValidSchema(pTableMeta->schema, pTableMeta->tableInfo.numOfColumns, pTableMeta->tableInfo.numOfTags)) {
tscError("%p invalid table meta from mnode, name:%s", pSql, pTableMetaInfo->name);
if (!tIsValidSchema(pTableMeta->schema, pTableMeta->tableInfo.numOfColumns, pTableMeta->tableInfo.numOfTags)) {
tscError("%p invalid table meta from mnode, name:%s", pSql, tNameGetTableName(&pTableMetaInfo->name));
return TSDB_CODE_TSC_INVALID_VALUE;
}
@ -1854,11 +1832,19 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) {
tfree(pSupTableMeta);
CChildTableMeta* cMeta = tscCreateChildMeta(pTableMeta);
taosHashPut(tscTableMetaInfo, pTableMetaInfo->name, strlen(pTableMetaInfo->name), cMeta, sizeof(CChildTableMeta));
char name[TSDB_TABLE_FNAME_LEN] = {0};
tNameExtractFullName(&pTableMetaInfo->name, name);
taosHashPut(tscTableMetaInfo, name, strlen(name), cMeta, sizeof(CChildTableMeta));
tfree(cMeta);
} else {
uint32_t s = tscGetTableMetaSize(pTableMeta);
taosHashPut(tscTableMetaInfo, pTableMetaInfo->name, strlen(pTableMetaInfo->name), pTableMeta, s);
char name[TSDB_TABLE_FNAME_LEN] = {0};
tNameExtractFullName(&pTableMetaInfo->name, name);
taosHashPut(tscTableMetaInfo, name, strlen(name), pTableMeta, s);
}
// update the vgroupInfo if needed
@ -1877,9 +1863,10 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) {
}
}
tscDebug("%p recv table meta, uid:%"PRId64 ", tid:%d, name:%s", pSql, pTableMeta->id.uid, pTableMeta->id.tid, pTableMetaInfo->name);
tscDebug("%p recv table meta, uid:%" PRIu64 ", tid:%d, name:%s", pSql, pTableMeta->id.uid, pTableMeta->id.tid,
tNameGetTableName(&pTableMetaInfo->name));
free(pTableMeta);
return TSDB_CODE_SUCCESS;
}
@ -2174,9 +2161,7 @@ int tscProcessConnectRsp(SSqlObj *pSql) {
int tscProcessUseDbRsp(SSqlObj *pSql) {
STscObj * pObj = pSql->pTscObj;
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0);
tstrncpy(pObj->db, pTableMetaInfo->name, sizeof(pObj->db));
return 0;
return tNameExtractFullName(&pTableMetaInfo->name, pObj->db);
}
int tscProcessDropDbRsp(SSqlObj *pSql) {
@ -2189,9 +2174,11 @@ int tscProcessDropTableRsp(SSqlObj *pSql) {
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0);
//The cached tableMeta is expired in this case, so clean it in hash table
taosHashRemove(tscTableMetaInfo, pTableMetaInfo->name, strnlen(pTableMetaInfo->name, TSDB_TABLE_FNAME_LEN));
tscDebug("%p remove table meta after drop table:%s, numOfRemain:%d", pSql, pTableMetaInfo->name,
(int32_t) taosHashGetSize(tscTableMetaInfo));
char name[TSDB_TABLE_FNAME_LEN] = {0};
tNameExtractFullName(&pTableMetaInfo->name, name);
taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
tscDebug("%p remove table meta after drop table:%s, numOfRemain:%d", pSql, name, (int32_t) taosHashGetSize(tscTableMetaInfo));
pTableMetaInfo->pTableMeta = NULL;
return 0;
@ -2200,7 +2187,9 @@ int tscProcessDropTableRsp(SSqlObj *pSql) {
int tscProcessAlterTableMsgRsp(SSqlObj *pSql) {
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0);
char* name = pTableMetaInfo->name;
char name[TSDB_TABLE_FNAME_LEN] = {0};
tNameExtractFullName(&pTableMetaInfo->name, name);
tscDebug("%p remove tableMeta in hashMap after alter-table: %s", pSql, name);
bool isSuperTable = UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo);
@ -2239,6 +2228,8 @@ int tscProcessRetrieveRspFromNode(SSqlObj *pSql) {
SSqlRes *pRes = &pSql->res;
SSqlCmd *pCmd = &pSql->cmd;
assert(pRes->rspLen >= sizeof(SRetrieveTableRsp));
SRetrieveTableRsp *pRetrieve = (SRetrieveTableRsp *)pRes->pRsp;
if (pRetrieve == NULL) {
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
@ -2319,7 +2310,7 @@ static int32_t getTableMetaFromMnode(SSqlObj *pSql, STableMetaInfo *pTableMetaIn
STableMetaInfo *pNewMeterMetaInfo = tscAddEmptyMetaInfo(pNewQueryInfo);
assert(pNew->cmd.numOfClause == 1 && pNewQueryInfo->numOfTables == 1);
tstrncpy(pNewMeterMetaInfo->name, pTableMetaInfo->name, sizeof(pNewMeterMetaInfo->name));
tNameAssign(&pNewMeterMetaInfo->name, &pTableMetaInfo->name);
if (pSql->cmd.autoCreated) {
int32_t code = copyTagData(&pNew->cmd.tagData, &pSql->cmd.tagData);
@ -2350,7 +2341,8 @@ static int32_t getTableMetaFromMnode(SSqlObj *pSql, STableMetaInfo *pTableMetaIn
}
int32_t tscGetTableMeta(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo) {
assert(strlen(pTableMetaInfo->name) != 0);
assert(tIsValidName(&pTableMetaInfo->name));
tfree(pTableMetaInfo->pTableMeta);
uint32_t size = tscGetTableMetaMaxSize();
@ -2358,15 +2350,18 @@ int32_t tscGetTableMeta(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo) {
pTableMetaInfo->pTableMeta->tableType = -1;
pTableMetaInfo->pTableMeta->tableInfo.numOfColumns = -1;
int32_t len = (int32_t) strlen(pTableMetaInfo->name);
taosHashGetClone(tscTableMetaInfo, pTableMetaInfo->name, len, NULL, pTableMetaInfo->pTableMeta, -1);
char name[TSDB_TABLE_FNAME_LEN] = {0};
tNameExtractFullName(&pTableMetaInfo->name, name);
size_t len = strlen(name);
taosHashGetClone(tscTableMetaInfo, name, len, NULL, pTableMetaInfo->pTableMeta, -1);
// TODO resize the tableMeta
STableMeta* pMeta = pTableMetaInfo->pTableMeta;
if (pMeta->id.uid > 0) {
if (pMeta->tableType == TSDB_CHILD_TABLE) {
int32_t code = tscCreateTableMetaFromCChildMeta(pTableMetaInfo->pTableMeta, pTableMetaInfo->name);
int32_t code = tscCreateTableMetaFromCChildMeta(pTableMetaInfo->pTableMeta, name);
if (code != TSDB_CODE_SUCCESS) {
return getTableMetaFromMnode(pSql, pTableMetaInfo);
}
@ -2394,16 +2389,24 @@ int tscRenewTableMeta(SSqlObj *pSql, int32_t tableIndex) {
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, tableIndex);
const char* name = pTableMetaInfo->name;
char name[TSDB_TABLE_FNAME_LEN] = {0};
int32_t code = tNameExtractFullName(&pTableMetaInfo->name, name);
if (code != TSDB_CODE_SUCCESS) {
tscError("%p failed to generate the table full name", pSql);
return TSDB_CODE_TSC_INVALID_SQL;
}
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
if (pTableMeta) {
tscDebug("%p update table meta:%s, old meta numOfTags:%d, numOfCols:%d, uid:%" PRId64, pSql, name,
tscGetNumOfTags(pTableMeta), tscGetNumOfColumns(pTableMeta), pTableMeta->id.uid);
tscGetNumOfTags(pTableMeta), tscGetNumOfColumns(pTableMeta), pTableMeta->id.uid);
}
// remove stored tableMeta info in hash table
taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
size_t len = strlen(name);
taosHashRemove(tscTableMetaInfo, name, len);
return getTableMetaFromMnode(pSql, pTableMetaInfo);
}
@ -2444,8 +2447,8 @@ int tscGetSTableVgroupInfo(SSqlObj *pSql, int32_t clauseIndex) {
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, clauseIndex);
for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) {
STableMetaInfo *pMInfo = tscGetMetaInfo(pQueryInfo, i);
STableMeta* pTableMeta = tscTableMetaClone(pMInfo->pTableMeta);
tscAddTableMetaInfo(pNewQueryInfo, pMInfo->name, pTableMeta, NULL, pMInfo->tagColList, pMInfo->pVgroupTables);
STableMeta* pTableMeta = tscTableMetaDup(pMInfo->pTableMeta);
tscAddTableMetaInfo(pNewQueryInfo, &pMInfo->name, pTableMeta, NULL, pMInfo->tagColList, pMInfo->pVgroupTables);
}
if ((code = tscAllocPayload(&pNew->cmd, TSDB_DEFAULT_PAYLOAD_SIZE)) != TSDB_CODE_SUCCESS) {
@ -2485,8 +2488,8 @@ void tscInitMsgsFp() {
tscBuildMsg[TSDB_SQL_ALTER_ACCT] = tscBuildAcctMsg;
tscBuildMsg[TSDB_SQL_CREATE_TABLE] = tscBuildCreateTableMsg;
tscBuildMsg[TSDB_SQL_DROP_USER] = tscBuildDropUserMsg;
tscBuildMsg[TSDB_SQL_DROP_ACCT] = tscBuildDropAcctMsg;
tscBuildMsg[TSDB_SQL_DROP_USER] = tscBuildDropUserAcctMsg;
tscBuildMsg[TSDB_SQL_DROP_ACCT] = tscBuildDropUserAcctMsg;
tscBuildMsg[TSDB_SQL_DROP_DB] = tscBuildDropDbMsg;
tscBuildMsg[TSDB_SQL_DROP_TABLE] = tscBuildDropTableMsg;
tscBuildMsg[TSDB_SQL_ALTER_USER] = tscBuildUserMsg;

View File

@ -110,6 +110,7 @@ static SSqlObj *taosConnectImpl(const char *ip, const char *user, const char *pa
rpcClose(pDnodeConn);
free(pObj->tscCorMgmtEpSet);
free(pObj);
return NULL;
}
memcpy(pObj->tscCorMgmtEpSet, &corMgmtEpSet, sizeof(SRpcCorEpSet));
@ -995,7 +996,8 @@ static int tscParseTblNameList(SSqlObj *pSql, const char *tblNameList, int32_t t
return code;
}
if (payloadLen + strlen(pTableMetaInfo->name) + 128 >= pCmd->allocSize) {
int32_t xlen = tNameLen(&pTableMetaInfo->name);
if (payloadLen + xlen + 128 >= pCmd->allocSize) {
char *pNewMem = realloc(pCmd->payload, pCmd->allocSize + tblListLen);
if (pNewMem == NULL) {
code = TSDB_CODE_TSC_OUT_OF_MEMORY;
@ -1008,7 +1010,9 @@ static int tscParseTblNameList(SSqlObj *pSql, const char *tblNameList, int32_t t
pMsg = pCmd->payload;
}
payloadLen += sprintf(pMsg + payloadLen, "%s,", pTableMetaInfo->name);
char n[TSDB_TABLE_FNAME_LEN] = {0};
tNameExtractFullName(&pTableMetaInfo->name, n);
payloadLen += sprintf(pMsg + payloadLen, "%s,", n);
}
*(pMsg + payloadLen) = '\0';

View File

@ -104,7 +104,7 @@ static void doLaunchQuery(void* param, TAOS_RES* tres, int32_t code) {
// failed to get table Meta or vgroup list, retry in 10sec.
if (code == TSDB_CODE_SUCCESS) {
tscTansformSQLFuncForSTableQuery(pQueryInfo);
tscDebug("%p stream:%p, start stream query on:%s", pSql, pStream, pTableMetaInfo->name);
tscDebug("%p stream:%p, start stream query on:%s", pSql, pStream, tNameGetTableName(&pTableMetaInfo->name));
pSql->fp = tscProcessStreamQueryCallback;
pSql->fetchFp = tscProcessStreamQueryCallback;
@ -191,8 +191,9 @@ static void tscProcessStreamQueryCallback(void *param, TAOS_RES *tres, int numOf
STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pStream->pSql->cmd, 0, 0);
char* name = pTableMetaInfo->name;
taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
assert(0);
// char* name = pTableMetaInfo->name;
// taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
pTableMetaInfo->vgroupList = tscVgroupInfoClear(pTableMetaInfo->vgroupList);
tscSetRetryTimer(pStream, pStream->pSql, retryDelay);
@ -291,8 +292,8 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf
pStream->stime += 1;
}
tscDebug("%p stream:%p, query on:%s, fetch result completed, fetched rows:%" PRId64, pSql, pStream, pTableMetaInfo->name,
pStream->numOfRes);
// tscDebug("%p stream:%p, query on:%s, fetch result completed, fetched rows:%" PRId64, pSql, pStream, pTableMetaInfo->name,
// pStream->numOfRes);
tfree(pTableMetaInfo->pTableMeta);
@ -555,8 +556,8 @@ static void tscCreateStream(void *param, TAOS_RES *res, int code) {
taosTmrReset(tscProcessStreamTimer, (int32_t)starttime, pStream, tscTmr, &pStream->pTimer);
tscDebug("%p stream:%p is opened, query on:%s, interval:%" PRId64 ", sliding:%" PRId64 ", first launched in:%" PRId64 ", sql:%s", pSql,
pStream, pTableMetaInfo->name, pStream->interval.interval, pStream->interval.sliding, starttime, pSql->sqlstr);
// tscDebug("%p stream:%p is opened, query on:%s, interval:%" PRId64 ", sliding:%" PRId64 ", first launched in:%" PRId64 ", sql:%s", pSql,
// pStream, pTableMetaInfo->name, pStream->interval.interval, pStream->interval.sliding, starttime, pSql->sqlstr);
}
void tscSetStreamDestTable(SSqlStream* pStream, const char* dstTable) {

View File

@ -534,7 +534,7 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) {
size_t numOfCols = taosArrayGetSize(pQueryInfo->colList);
tscDebug("%p subquery:%p tableIndex:%d, vgroupIndex:%d, type:%d, exprInfo:%" PRIzu ", colList:%" PRIzu ", fieldsInfo:%d, name:%s",
pSql, pNew, 0, pTableMetaInfo->vgroupIndex, pQueryInfo->type, taosArrayGetSize(pQueryInfo->exprList),
numOfCols, pQueryInfo->fieldsInfo.numOfOutput, pTableMetaInfo->name);
numOfCols, pQueryInfo->fieldsInfo.numOfOutput, tNameGetTableName(&pTableMetaInfo->name));
}
//prepare the subqueries object failed, abort
@ -730,7 +730,7 @@ static void issueTSCompQuery(SSqlObj* pSql, SJoinSupporter* pSupporter, SSqlObj*
"%p subquery:%p tableIndex:%d, vgroupIndex:%d, numOfVgroups:%d, type:%d, ts_comp query to retrieve timestamps, "
"numOfExpr:%" PRIzu ", colList:%" PRIzu ", numOfOutputFields:%d, name:%s",
pParent, pSql, 0, pTableMetaInfo->vgroupIndex, pTableMetaInfo->vgroupList->numOfVgroups, pQueryInfo->type,
tscSqlExprNumOfExprs(pQueryInfo), numOfCols, pQueryInfo->fieldsInfo.numOfOutput, pTableMetaInfo->name);
tscSqlExprNumOfExprs(pQueryInfo), numOfCols, pQueryInfo->fieldsInfo.numOfOutput, tNameGetTableName(&pTableMetaInfo->name));
tscProcessSql(pSql);
}
@ -951,10 +951,10 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
tscBuildVgroupTableInfo(pParentSql, pTableMetaInfo2, s2);
SSqlObj* psub1 = pParentSql->pSubs[0];
((SJoinSupporter*)psub1->param)->pVgroupTables = tscVgroupTableInfoClone(pTableMetaInfo1->pVgroupTables);
((SJoinSupporter*)psub1->param)->pVgroupTables = tscVgroupTableInfoDup(pTableMetaInfo1->pVgroupTables);
SSqlObj* psub2 = pParentSql->pSubs[1];
((SJoinSupporter*)psub2->param)->pVgroupTables = tscVgroupTableInfoClone(pTableMetaInfo2->pVgroupTables);
((SJoinSupporter*)psub2->param)->pVgroupTables = tscVgroupTableInfoDup(pTableMetaInfo2->pVgroupTables);
pParentSql->subState.numOfSub = 2;
@ -1636,7 +1636,7 @@ int32_t tscCreateJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter
"%p subquery:%p tableIndex:%d, vgroupIndex:%d, type:%d, transfer to tid_tag query to retrieve (tableId, tags), "
"exprInfo:%" PRIzu ", colList:%" PRIzu ", fieldsInfo:%d, tagIndex:%d, name:%s",
pSql, pNew, tableIndex, pTableMetaInfo->vgroupIndex, pNewQueryInfo->type, tscSqlExprNumOfExprs(pNewQueryInfo),
numOfCols, pNewQueryInfo->fieldsInfo.numOfOutput, colIndex.columnIndex, pNewQueryInfo->pTableMetaInfo[0]->name);
numOfCols, pNewQueryInfo->fieldsInfo.numOfOutput, colIndex.columnIndex, tNameGetTableName(&pNewQueryInfo->pTableMetaInfo[0]->name));
} else {
SSchema colSchema = {.type = TSDB_DATA_TYPE_BINARY, .bytes = 1};
SColumnIndex colIndex = {0, PRIMARYKEY_TIMESTAMP_COL_INDEX};
@ -1671,7 +1671,7 @@ int32_t tscCreateJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter
"%p subquery:%p tableIndex:%d, vgroupIndex:%d, type:%u, transfer to ts_comp query to retrieve timestamps, "
"exprInfo:%" PRIzu ", colList:%" PRIzu ", fieldsInfo:%d, name:%s",
pSql, pNew, tableIndex, pTableMetaInfo->vgroupIndex, pNewQueryInfo->type, tscSqlExprNumOfExprs(pNewQueryInfo),
numOfCols, pNewQueryInfo->fieldsInfo.numOfOutput, pNewQueryInfo->pTableMetaInfo[0]->name);
numOfCols, pNewQueryInfo->fieldsInfo.numOfOutput, tNameGetTableName(&pNewQueryInfo->pTableMetaInfo[0]->name));
}
} else {
assert(0);
@ -2133,7 +2133,7 @@ static void tscAllDataRetrievedFromDnode(SRetrieveSupport *trsupport, SSqlObj* p
SQueryInfo *pPQueryInfo = tscGetQueryInfoDetail(&pParentSql->cmd, 0);
tscClearInterpInfo(pPQueryInfo);
tscCreateLocalReducer(trsupport->pExtMemBuffer, pState->numOfSub, pDesc, trsupport->pFinalColModel, trsupport->pFFColModel, pParentSql);
tscCreateLocalMerger(trsupport->pExtMemBuffer, pState->numOfSub, pDesc, trsupport->pFinalColModel, trsupport->pFFColModel, pParentSql);
tscDebug("%p build loser tree completed", pParentSql);
pParentSql->res.precision = pSql->res.precision;
@ -2421,7 +2421,7 @@ static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows)
tscFreeQueryInfo(&pSql->cmd);
SQueryInfo* pQueryInfo = tscGetQueryInfoDetailSafely(&pSql->cmd, 0);
STableMetaInfo* pMasterTableMetaInfo = tscGetTableMetaInfoFromCmd(&pParentObj->cmd, pSql->cmd.clauseIndex, 0);
tscAddTableMetaInfo(pQueryInfo, pMasterTableMetaInfo->name, NULL, NULL, NULL, NULL);
tscAddTableMetaInfo(pQueryInfo, &pMasterTableMetaInfo->name, NULL, NULL, NULL, NULL);
subquerySetState(pSql, &pParentObj->subState, i, 0);
@ -2434,7 +2434,8 @@ static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows)
tscDebug("%p cleanup %d tableMeta in hashTable", pParentObj, pParentObj->cmd.numOfTables);
for(int32_t i = 0; i < pParentObj->cmd.numOfTables; ++i) {
char* name = pParentObj->cmd.pTableNameList[i];
char name[TSDB_TABLE_FNAME_LEN] = {0};
tNameExtractFullName(pParentObj->cmd.pTableNameList[i], name);
taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
}

View File

@ -89,21 +89,6 @@ bool tscQueryTags(SQueryInfo* pQueryInfo) {
return true;
}
// todo refactor, extract methods and move the common module
void tscGetDBInfoFromTableFullName(char* tableId, char* db) {
char* st = strstr(tableId, TS_PATH_DELIMITER);
if (st != NULL) {
char* end = strstr(st + 1, TS_PATH_DELIMITER);
if (end != NULL) {
memcpy(db, tableId, (end - tableId));
db[end - tableId] = 0;
return;
}
}
db[0] = 0;
}
bool tscIsTwoStageSTableQuery(SQueryInfo* pQueryInfo, int32_t tableIndex) {
if (pQueryInfo == NULL) {
return false;
@ -420,7 +405,7 @@ void tscResetSqlCmdObj(SSqlCmd* pCmd) {
}
void tscFreeSqlResult(SSqlObj* pSql) {
tscDestroyLocalReducer(pSql);
tscDestroyLocalMerger(pSql);
SSqlRes* pRes = &pSql->res;
tscDestroyResPointerInfo(pRes);
@ -612,15 +597,13 @@ int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock) {
// todo refactor
// set the correct table meta object, the table meta has been locked in pDataBlocks, so it must be in the cache
if (pTableMetaInfo->pTableMeta != pDataBlock->pTableMeta) {
tstrncpy(pTableMetaInfo->name, pDataBlock->tableName, sizeof(pTableMetaInfo->name));
tNameAssign(&pTableMetaInfo->name, &pDataBlock->tableName);
if (pTableMetaInfo->pTableMeta != NULL) {
tfree(pTableMetaInfo->pTableMeta);
}
pTableMetaInfo->pTableMeta = tscTableMetaClone(pDataBlock->pTableMeta);
} else {
assert(strncmp(pTableMetaInfo->name, pDataBlock->tableName, tListLen(pDataBlock->tableName)) == 0);
pTableMetaInfo->pTableMeta = tscTableMetaDup(pDataBlock->pTableMeta);
}
/*
@ -655,7 +638,7 @@ int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock) {
* @param dataBlocks
* @return
*/
int32_t tscCreateDataBlock(size_t initialSize, int32_t rowSize, int32_t startOffset, const char* name,
int32_t tscCreateDataBlock(size_t initialSize, int32_t rowSize, int32_t startOffset, SName* name,
STableMeta* pTableMeta, STableDataBlocks** dataBlocks) {
STableDataBlocks* dataBuf = (STableDataBlocks*)calloc(1, sizeof(STableDataBlocks));
if (dataBuf == NULL) {
@ -683,18 +666,18 @@ int32_t tscCreateDataBlock(size_t initialSize, int32_t rowSize, int32_t startOff
dataBuf->size = startOffset;
dataBuf->tsSource = -1;
tstrncpy(dataBuf->tableName, name, sizeof(dataBuf->tableName));
tNameAssign(&dataBuf->tableName, name);
//Here we keep the tableMeta to avoid it to be remove by other threads.
dataBuf->pTableMeta = tscTableMetaClone(pTableMeta);
dataBuf->pTableMeta = tscTableMetaDup(pTableMeta);
assert(initialSize > 0 && pTableMeta != NULL && dataBuf->pTableMeta != NULL);
*dataBlocks = dataBuf;
return TSDB_CODE_SUCCESS;
}
int32_t tscGetDataBlockFromList(SHashObj* pHashList, int64_t id, int32_t size, int32_t startOffset, int32_t rowSize, const char* tableId, STableMeta* pTableMeta,
STableDataBlocks** dataBlocks, SArray* pBlockList) {
int32_t tscGetDataBlockFromList(SHashObj* pHashList, int64_t id, int32_t size, int32_t startOffset, int32_t rowSize,
SName* name, STableMeta* pTableMeta, STableDataBlocks** dataBlocks, SArray* pBlockList) {
*dataBlocks = NULL;
STableDataBlocks** t1 = (STableDataBlocks**)taosHashGet(pHashList, (const char*)&id, sizeof(id));
if (t1 != NULL) {
@ -702,7 +685,7 @@ int32_t tscGetDataBlockFromList(SHashObj* pHashList, int64_t id, int32_t size, i
}
if (*dataBlocks == NULL) {
int32_t ret = tscCreateDataBlock((size_t)size, rowSize, startOffset, tableId, pTableMeta, dataBlocks);
int32_t ret = tscCreateDataBlock((size_t)size, rowSize, startOffset, name, pTableMeta, dataBlocks);
if (ret != TSDB_CODE_SUCCESS) {
return ret;
}
@ -803,7 +786,7 @@ static void extractTableNameList(SSqlCmd* pCmd, bool freeBlockMap) {
int32_t i = 0;
while(p1) {
STableDataBlocks* pBlocks = *p1;
pCmd->pTableNameList[i++] = strndup(pBlocks->tableName, TSDB_TABLE_FNAME_LEN);
pCmd->pTableNameList[i++] = tNameDup(&pBlocks->tableName);
p1 = taosHashIterate(pCmd->pTableBlockHashList, p1);
}
@ -828,7 +811,7 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, bool freeBlockMap) {
STableDataBlocks* dataBuf = NULL;
int32_t ret = tscGetDataBlockFromList(pVnodeDataBlockHashList, pOneTableBlock->vgId, TSDB_PAYLOAD_SIZE,
INSERT_HEAD_SIZE, 0, pOneTableBlock->tableName, pOneTableBlock->pTableMeta, &dataBuf, pVnodeDataBlockList);
INSERT_HEAD_SIZE, 0, &pOneTableBlock->tableName, pOneTableBlock->pTableMeta, &dataBuf, pVnodeDataBlockList);
if (ret != TSDB_CODE_SUCCESS) {
tscError("%p failed to prepare the data block buffer for merging table data, code:%d", pSql, ret);
taosHashCleanup(pVnodeDataBlockHashList);
@ -861,8 +844,8 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, bool freeBlockMap) {
tscSortRemoveDataBlockDupRows(pOneTableBlock);
char* ekey = (char*)pBlocks->data + pOneTableBlock->rowSize*(pBlocks->numOfRows-1);
tscDebug("%p name:%s, sid:%d rows:%d sversion:%d skey:%" PRId64 ", ekey:%" PRId64, pSql, pOneTableBlock->tableName,
tscDebug("%p name:%s, name:%d rows:%d sversion:%d skey:%" PRId64 ", ekey:%" PRId64, pSql, tNameGetTableName(&pOneTableBlock->tableName),
pBlocks->tid, pBlocks->numOfRows, pBlocks->sversion, GET_INT64_VAL(pBlocks->data), GET_INT64_VAL(ekey));
int32_t len = pBlocks->numOfRows * (pOneTableBlock->rowSize + expandSize) + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta);
@ -1310,7 +1293,7 @@ SColumn* tscColumnClone(const SColumn* src) {
dst->colIndex = src->colIndex;
dst->numOfFilters = src->numOfFilters;
dst->filterInfo = tscFilterInfoClone(src->filterInfo, src->numOfFilters);
dst->filterInfo = tFilterInfoDup(src->filterInfo, src->numOfFilters);
return dst;
}
@ -1816,10 +1799,10 @@ void tscVgroupTableCopy(SVgroupTableInfo* info, SVgroupTableInfo* pInfo) {
info->vgInfo.epAddr[j].fqdn = strdup(pInfo->vgInfo.epAddr[j].fqdn);
}
info->itemList = taosArrayClone(pInfo->itemList);
info->itemList = taosArrayDup(pInfo->itemList);
}
SArray* tscVgroupTableInfoClone(SArray* pVgroupTables) {
SArray* tscVgroupTableInfoDup(SArray* pVgroupTables) {
if (pVgroupTables == NULL) {
return NULL;
}
@ -1850,7 +1833,7 @@ void clearAllTableMetaInfo(SQueryInfo* pQueryInfo) {
tfree(pQueryInfo->pTableMetaInfo);
}
STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, const char* name, STableMeta* pTableMeta,
STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, SName* name, STableMeta* pTableMeta,
SVgroupsInfo* vgroupList, SArray* pTagCols, SArray* pVgroupTables) {
void* pAlloc = realloc(pQueryInfo->pTableMetaInfo, (pQueryInfo->numOfTables + 1) * POINTER_BYTES);
if (pAlloc == NULL) {
@ -1868,7 +1851,7 @@ STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, const char* name, ST
pQueryInfo->pTableMetaInfo[pQueryInfo->numOfTables] = pTableMetaInfo;
if (name != NULL) {
tstrncpy(pTableMetaInfo->name, name, sizeof(pTableMetaInfo->name));
tNameAssign(&pTableMetaInfo->name, name);
}
pTableMetaInfo->pTableMeta = pTableMeta;
@ -1887,7 +1870,7 @@ STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, const char* name, ST
tscColumnListCopy(pTableMetaInfo->tagColList, pTagCols, -1);
}
pTableMetaInfo->pVgroupTables = tscVgroupTableInfoClone(pVgroupTables);
pTableMetaInfo->pVgroupTables = tscVgroupTableInfoDup(pVgroupTables);
pQueryInfo->numOfTables += 1;
return pTableMetaInfo;
@ -1965,7 +1948,7 @@ SSqlObj* createSimpleSubObj(SSqlObj* pSql, __async_cb_func_t fp, void* param, in
assert(pSql->cmd.clauseIndex == 0);
STableMetaInfo* pMasterTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, pSql->cmd.clauseIndex, 0);
tscAddTableMetaInfo(pQueryInfo, pMasterTableMetaInfo->name, NULL, NULL, NULL, NULL);
tscAddTableMetaInfo(pQueryInfo, &pMasterTableMetaInfo->name, NULL, NULL, NULL, NULL);
registerSqlObj(pNew);
return pNew;
@ -2070,7 +2053,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
pNewQueryInfo->groupbyExpr = pQueryInfo->groupbyExpr;
if (pQueryInfo->groupbyExpr.columnInfo != NULL) {
pNewQueryInfo->groupbyExpr.columnInfo = taosArrayClone(pQueryInfo->groupbyExpr.columnInfo);
pNewQueryInfo->groupbyExpr.columnInfo = taosArrayDup(pQueryInfo->groupbyExpr.columnInfo);
if (pNewQueryInfo->groupbyExpr.columnInfo == NULL) {
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
goto _error;
@ -2121,27 +2104,26 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
pNew->param = param;
pNew->maxRetry = TSDB_MAX_REPLICA;
char* name = pTableMetaInfo->name;
STableMetaInfo* pFinalInfo = NULL;
if (pPrevSql == NULL) {
STableMeta* pTableMeta = tscTableMetaClone(pTableMetaInfo->pTableMeta);
STableMeta* pTableMeta = tscTableMetaDup(pTableMetaInfo->pTableMeta);
assert(pTableMeta != NULL);
pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, name, pTableMeta, pTableMetaInfo->vgroupList,
pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, &pTableMetaInfo->name, pTableMeta, pTableMetaInfo->vgroupList,
pTableMetaInfo->tagColList, pTableMetaInfo->pVgroupTables);
} else { // transfer the ownership of pTableMeta to the newly create sql object.
STableMetaInfo* pPrevInfo = tscGetTableMetaInfoFromCmd(&pPrevSql->cmd, pPrevSql->cmd.clauseIndex, 0);
STableMeta* pPrevTableMeta = tscTableMetaClone(pPrevInfo->pTableMeta);
STableMeta* pPrevTableMeta = tscTableMetaDup(pPrevInfo->pTableMeta);
SVgroupsInfo* pVgroupsInfo = pPrevInfo->vgroupList;
pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, name, pPrevTableMeta, pVgroupsInfo, pTableMetaInfo->tagColList,
pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, &pTableMetaInfo->name, pPrevTableMeta, pVgroupsInfo, pTableMetaInfo->tagColList,
pTableMetaInfo->pVgroupTables);
}
// this case cannot be happened
if (pFinalInfo->pTableMeta == NULL) {
tscError("%p new subquery failed since no tableMeta, name:%s", pSql, name);
tscError("%p new subquery failed since no tableMeta, name:%s", pSql, tNameGetTableName(&pTableMetaInfo->name));
if (pPrevSql != NULL) { // pass the previous error to client
assert(pPrevSql->res.code != TSDB_CODE_SUCCESS);
@ -2166,7 +2148,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
"%p new subquery:%p, tableIndex:%d, vgroupIndex:%d, type:%d, exprInfo:%" PRIzu ", colList:%" PRIzu ","
"fieldInfo:%d, name:%s, qrang:%" PRId64 " - %" PRId64 " order:%d, limit:%" PRId64,
pSql, pNew, tableIndex, pTableMetaInfo->vgroupIndex, pNewQueryInfo->type, tscSqlExprNumOfExprs(pNewQueryInfo),
size, pNewQueryInfo->fieldsInfo.numOfOutput, pFinalInfo->name, pNewQueryInfo->window.skey,
size, pNewQueryInfo->fieldsInfo.numOfOutput, tNameGetTableName(&pFinalInfo->name), pNewQueryInfo->window.skey,
pNewQueryInfo->window.ekey, pNewQueryInfo->order.order, pNewQueryInfo->limit.limit);
tscPrintSelectClause(pNew, 0);
@ -2203,7 +2185,7 @@ void tscDoQuery(SSqlObj* pSql) {
}
if (pCmd->dataSourceType == DATA_FROM_DATA_FILE) {
tscProcessMultiVnodesImportFromFile(pSql);
tscImportDataFromFile(pSql);
} else {
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
uint16_t type = pQueryInfo->type;
@ -2303,7 +2285,6 @@ int32_t tscSQLSyntaxErrMsg(char* msg, const char* additionalInfo, const char* s
}
return TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
}
int32_t tscInvalidSQLErrMsg(char* msg, const char* additionalInfo, const char* sql) {
@ -2705,7 +2686,7 @@ uint32_t tscGetTableMetaMaxSize() {
return sizeof(STableMeta) + TSDB_MAX_COLUMNS * sizeof(SSchema);
}
STableMeta* tscTableMetaClone(STableMeta* pTableMeta) {
STableMeta* tscTableMetaDup(STableMeta* pTableMeta) {
assert(pTableMeta != NULL);
uint32_t size = tscGetTableMetaSize(pTableMeta);
STableMeta* p = calloc(1, size);

View File

@ -21,6 +21,20 @@ typedef struct SColumnInfoData {
void* pData; // the corresponding block data in memory
} SColumnInfoData;
#define TSDB_DB_NAME_T 1
#define TSDB_TABLE_NAME_T 2
#define T_NAME_ACCT 0x1u
#define T_NAME_DB 0x2u
#define T_NAME_TABLE 0x4u
typedef struct SName {
uint8_t type; //db_name_t, table_name_t
char acctId[TSDB_ACCT_ID_LEN];
char dbname[TSDB_DB_NAME_LEN];
char tname[TSDB_TABLE_NAME_LEN];
} SName;
void extractTableName(const char *tableId, char *name);
char* extractDBName(const char *tableId, char *name);
@ -35,9 +49,9 @@ SSchema tGetUserSpecifiedColumnSchema(tVariant* pVal, SStrToken* exprStr, const
bool tscValidateTableNameLength(size_t len);
SColumnFilterInfo* tscFilterInfoClone(const SColumnFilterInfo* src, int32_t numOfFilters);
SColumnFilterInfo* tFilterInfoDup(const SColumnFilterInfo* src, int32_t numOfFilters);
SSchema tscGetTbnameColumnSchema();
SSchema tGetTbnameColumnSchema();
/**
* check if the schema is valid or not, including following aspects:
@ -51,6 +65,28 @@ SSchema tscGetTbnameColumnSchema();
* @param numOfCols
* @return
*/
bool isValidSchema(struct SSchema* pSchema, int32_t numOfCols, int32_t numOfTags);
bool tIsValidSchema(struct SSchema* pSchema, int32_t numOfCols, int32_t numOfTags);
int32_t tNameExtractFullName(const SName* name, char* dst);
int32_t tNameLen(const SName* name);
SName* tNameDup(const SName* name);
bool tIsValidName(const SName* name);
const char* tNameGetTableName(const SName* name);
int32_t tNameGetDbName(const SName* name, char* dst);
int32_t tNameGetFullDbName(const SName* name, char* dst);
bool tNameIsEmpty(const SName* name);
void tNameAssign(SName* dst, const SName* src);
int32_t tNameFromString(SName* dst, const char* str, uint32_t type);
int32_t tNameSetAcctId(SName* dst, const char* acct);
int32_t tNameSetDbName(SName* dst, const char* acct, SStrToken* dbToken);
#endif // TDENGINE_NAME_H

View File

@ -3,31 +3,12 @@
#include "tname.h"
#include "tstoken.h"
#include "ttokendef.h"
#include "tvariant.h"
#define VALIDNUMOFCOLS(x) ((x) >= TSDB_MIN_COLUMNS && (x) <= TSDB_MAX_COLUMNS)
#define VALIDNUMOFCOLS(x) ((x) >= TSDB_MIN_COLUMNS && (x) <= TSDB_MAX_COLUMNS)
#define VALIDNUMOFTAGS(x) ((x) >= 0 && (x) <= TSDB_MAX_TAGS)
// todo refactor
UNUSED_FUNC static FORCE_INLINE const char* skipSegments(const char* input, char delim, int32_t num) {
for (int32_t i = 0; i < num; ++i) {
while (*input != 0 && *input++ != delim) {
};
}
return input;
}
UNUSED_FUNC static FORCE_INLINE size_t copy(char* dst, const char* src, char delimiter) {
size_t len = 0;
while (*src != delimiter && *src != 0) {
*dst++ = *src++;
len++;
}
return len;
}
#define VALID_NAME_TYPE(x) ((x) == TSDB_DB_NAME_T || (x) == TSDB_TABLE_NAME_T)
void extractTableName(const char* tableId, char* name) {
size_t s1 = strcspn(tableId, &TS_PATH_DELIMITER[0]);
@ -85,7 +66,7 @@ bool tscValidateTableNameLength(size_t len) {
return len < TSDB_TABLE_NAME_LEN;
}
SColumnFilterInfo* tscFilterInfoClone(const SColumnFilterInfo* src, int32_t numOfFilters) {
SColumnFilterInfo* tFilterInfoDup(const SColumnFilterInfo* src, int32_t numOfFilters) {
if (numOfFilters == 0) {
assert(src == NULL);
return NULL;
@ -200,7 +181,7 @@ void extractTableNameFromToken(SStrToken* pToken, SStrToken* pTable) {
}
}
SSchema tscGetTbnameColumnSchema() {
SSchema tGetTbnameColumnSchema() {
struct SSchema s = {
.colId = TSDB_TBNAME_COLUMN_INDEX,
.type = TSDB_DATA_TYPE_BINARY,
@ -248,7 +229,7 @@ static bool doValidateSchema(SSchema* pSchema, int32_t numOfCols, int32_t maxLen
return rowLen <= maxLen;
}
bool isValidSchema(struct SSchema* pSchema, int32_t numOfCols, int32_t numOfTags) {
bool tIsValidSchema(struct SSchema* pSchema, int32_t numOfCols, int32_t numOfTags) {
if (!VALIDNUMOFCOLS(numOfCols)) {
return false;
}
@ -272,3 +253,184 @@ bool isValidSchema(struct SSchema* pSchema, int32_t numOfCols, int32_t numOfTags
return true;
}
int32_t tNameExtractFullName(const SName* name, char* dst) {
assert(name != NULL && dst != NULL);
// invalid full name format, abort
if (!tIsValidName(name)) {
return -1;
}
int32_t len = snprintf(dst, TSDB_ACCT_ID_LEN + 1 + TSDB_DB_NAME_LEN, "%s.%s", name->acctId, name->dbname);
size_t tnameLen = strlen(name->tname);
if (tnameLen > 0) {
assert(name->type == TSDB_TABLE_NAME_T);
dst[len] = TS_PATH_DELIMITER[0];
memcpy(dst + len + 1, name->tname, tnameLen);
dst[len + tnameLen + 1] = 0;
}
return 0;
}
int32_t tNameLen(const SName* name) {
assert(name != NULL);
int32_t len = (int32_t) strlen(name->acctId);
int32_t len1 = (int32_t) strlen(name->dbname);
int32_t len2 = (int32_t) strlen(name->tname);
if (name->type == TSDB_DB_NAME_T) {
assert(len2 == 0);
return len + len1 + TS_PATH_DELIMITER_LEN;
} else {
assert(len2 > 0);
return len + len1 + len2 + TS_PATH_DELIMITER_LEN * 2;
}
}
bool tIsValidName(const SName* name) {
assert(name != NULL);
if (!VALID_NAME_TYPE(name->type)) {
return false;
}
if (strlen(name->acctId) <= 0) {
return false;
}
if (name->type == TSDB_DB_NAME_T) {
return strlen(name->dbname) > 0;
} else {
return strlen(name->dbname) > 0 && strlen(name->tname) > 0;
}
}
SName* tNameDup(const SName* name) {
assert(name != NULL);
SName* p = calloc(1, sizeof(SName));
memcpy(p, name, sizeof(SName));
return p;
}
int32_t tNameGetDbName(const SName* name, char* dst) {
assert(name != NULL && dst != NULL);
strncpy(dst, name->dbname, tListLen(name->dbname));
return 0;
}
int32_t tNameGetFullDbName(const SName* name, char* dst) {
assert(name != NULL && dst != NULL);
snprintf(dst, TSDB_ACCT_ID_LEN + TS_PATH_DELIMITER_LEN + TSDB_DB_NAME_LEN,
"%s.%s", name->acctId, name->dbname);
return 0;
}
bool tNameIsEmpty(const SName* name) {
assert(name != NULL);
return name->type == 0 || strlen(name->acctId) <= 0;
}
const char* tNameGetTableName(const SName* name) {
assert(name != NULL && name->type == TSDB_TABLE_NAME_T);
return &name->tname[0];
}
void tNameAssign(SName* dst, const SName* src) {
memcpy(dst, src, sizeof(SName));
}
int32_t tNameSetDbName(SName* dst, const char* acct, SStrToken* dbToken) {
assert(dst != NULL && dbToken != NULL && acct != NULL);
// too long account id or too long db name
if (strlen(acct) >= tListLen(dst->acctId) || dbToken->n >= tListLen(dst->dbname)) {
return -1;
}
dst->type = TSDB_DB_NAME_T;
tstrncpy(dst->acctId, acct, tListLen(dst->acctId));
tstrncpy(dst->dbname, dbToken->z, dbToken->n + 1);
return 0;
}
int32_t tNameSetAcctId(SName* dst, const char* acct) {
assert(dst != NULL && acct != NULL);
// too long account id or too long db name
if (strlen(acct) >= tListLen(dst->acctId)) {
return -1;
}
tstrncpy(dst->acctId, acct, tListLen(dst->acctId));
assert(strlen(dst->acctId) > 0);
return 0;
}
int32_t tNameFromString(SName* dst, const char* str, uint32_t type) {
assert(dst != NULL && str != NULL && strlen(str) > 0);
char* p = NULL;
if ((type & T_NAME_ACCT) == T_NAME_ACCT) {
p = strstr(str, TS_PATH_DELIMITER);
if (p == NULL) {
return -1;
}
int32_t len = (int32_t)(p - str);
// too long account id or too long db name
if ((len >= tListLen(dst->acctId)) || (len <= 0)) {
return -1;
}
memcpy (dst->acctId, str, len);
dst->acctId[len] = 0;
assert(strlen(dst->acctId) > 0);
}
if ((type & T_NAME_DB) == T_NAME_DB) {
dst->type = TSDB_DB_NAME_T;
char* start = (char*)((p == NULL)? str:(p+1));
int32_t len = 0;
p = strstr(start, TS_PATH_DELIMITER);
if (p == NULL) {
len = (int32_t) strlen(start);
} else {
len = (int32_t) (p - start);
}
// too long account id or too long db name
if ((len >= tListLen(dst->dbname)) || (len <= 0)) {
return -1;
}
memcpy (dst->dbname, start, len);
dst->dbname[len] = 0;
}
if ((type & T_NAME_TABLE) == T_NAME_TABLE) {
dst->type = TSDB_TABLE_NAME_T;
char* start = (char*) ((p == NULL)? str: (p+1));
int32_t len = (int32_t) strlen(start);
// too long account id or too long db name
if ((len >= tListLen(dst->tname)) || (len <= 0)) {
return -1;
}
memcpy (dst->tname, start, len);
dst->tname[len] = 0;
}
return 0;
}

View File

@ -8,7 +8,7 @@ IF (TD_MVN_INSTALLED)
ADD_CUSTOM_COMMAND(OUTPUT ${JDBC_CMD_NAME}
POST_BUILD
COMMAND mvn -Dmaven.test.skip=true install -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.16-dist.jar ${LIBRARY_OUTPUT_PATH}
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.17-dist.jar ${LIBRARY_OUTPUT_PATH}
COMMAND mvn -Dmaven.test.skip=true clean -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
COMMENT "build jdbc driver")
ADD_CUSTOM_TARGET(${JDBC_TARGET_NAME} ALL WORKING_DIRECTORY ${EXECUTABLE_OUTPUT_PATH} DEPENDS ${JDBC_CMD_NAME})

View File

@ -5,7 +5,7 @@
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>2.0.16</version>
<version>2.0.17</version>
<packaging>jar</packaging>
<name>JDBCDriver</name>

View File

@ -3,7 +3,7 @@
<modelVersion>4.0.0</modelVersion>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>2.0.16</version>
<version>2.0.17</version>
<packaging>jar</packaging>
<name>JDBCDriver</name>
<url>https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc</url>
@ -74,6 +74,14 @@
<version>1.2.58</version>
</dependency>
<!-- https://mvnrepository.com/artifact/org.apache.commons/commons-dbcp2 -->
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-dbcp2</artifactId>
<version>2.7.0</version>
</dependency>
</dependencies>
<build>

View File

@ -1,28 +1,13 @@
/***************************************************************************
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*****************************************************************************/
package com.taosdata.jdbc;
import java.sql.*;
import java.util.ArrayList;
import java.util.List;
public abstract class AbstractDatabaseMetaData implements DatabaseMetaData {
public abstract class AbstractDatabaseMetaData implements DatabaseMetaData, Wrapper {
private final static String PRODUCT_NAME = "TDengine";
private final static String PRODUCT_VESION = "2.0.x.x";
private final static String DRIVER_NAME = "taos-jdbcdriver";
private final static String DRIVER_VERSION = "2.0.x";
private final static int DRIVER_MAJAR_VERSION = 2;
private final static int DRIVER_MINOR_VERSION = 0;
@ -67,9 +52,7 @@ public abstract class AbstractDatabaseMetaData implements DatabaseMetaData {
return PRODUCT_VESION;
}
public String getDriverName() throws SQLException {
return DRIVER_NAME;
}
public abstract String getDriverName() throws SQLException;
public String getDriverVersion() throws SQLException {
return DRIVER_VERSION;
@ -92,6 +75,7 @@ public abstract class AbstractDatabaseMetaData implements DatabaseMetaData {
}
public boolean supportsMixedCaseIdentifiers() throws SQLException {
//像databasetable这些对象的标识符在存储时是否采用大小写混合的模式
return false;
}
@ -100,7 +84,7 @@ public abstract class AbstractDatabaseMetaData implements DatabaseMetaData {
}
public boolean storesLowerCaseIdentifiers() throws SQLException {
return false;
return true;
}
public boolean storesMixedCaseIdentifiers() throws SQLException {
@ -168,10 +152,12 @@ public abstract class AbstractDatabaseMetaData implements DatabaseMetaData {
}
public boolean nullPlusNonNullIsNull() throws SQLException {
// null + non-null != null
return false;
}
public boolean supportsConvert() throws SQLException {
// 是否支持转换函数convert
return false;
}
@ -196,7 +182,7 @@ public abstract class AbstractDatabaseMetaData implements DatabaseMetaData {
}
public boolean supportsGroupBy() throws SQLException {
return false;
return true;
}
public boolean supportsGroupByUnrelated() throws SQLException {
@ -468,7 +454,7 @@ public abstract class AbstractDatabaseMetaData implements DatabaseMetaData {
}
public int getDefaultTransactionIsolation() throws SQLException {
return 0;
return Connection.TRANSACTION_NONE;
}
public boolean supportsTransactions() throws SQLException {
@ -476,6 +462,8 @@ public abstract class AbstractDatabaseMetaData implements DatabaseMetaData {
}
public boolean supportsTransactionIsolationLevel(int level) throws SQLException {
if (level == Connection.TRANSACTION_NONE)
return true;
return false;
}
@ -495,18 +483,113 @@ public abstract class AbstractDatabaseMetaData implements DatabaseMetaData {
return false;
}
public ResultSet getProcedures(String catalog, String schemaPattern, String procedureNamePattern)
throws SQLException {
public ResultSet getProcedures(String catalog, String schemaPattern, String procedureNamePattern) throws SQLException {
return null;
}
public ResultSet getProcedureColumns(String catalog, String schemaPattern, String procedureNamePattern,
String columnNamePattern) throws SQLException {
public ResultSet getProcedureColumns(String catalog, String schemaPattern, String procedureNamePattern, String columnNamePattern) throws SQLException {
return null;
}
public abstract ResultSet getTables(String catalog, String schemaPattern, String tableNamePattern, String[] types)
throws SQLException;
public abstract ResultSet getTables(String catalog, String schemaPattern, String tableNamePattern, String[] types) throws SQLException;
protected ResultSet getTables(String catalog, String schemaPattern, String tableNamePattern, String[] types, Connection connection) throws SQLException {
try (Statement stmt = connection.createStatement()) {
if (catalog == null || catalog.isEmpty())
return null;
ResultSet databases = stmt.executeQuery("show databases");
String dbname = null;
while (databases.next()) {
dbname = databases.getString("name");
if (dbname.equalsIgnoreCase(catalog))
break;
}
databases.close();
if (dbname == null)
return null;
stmt.execute("use " + dbname);
DatabaseMetaDataResultSet resultSet = new DatabaseMetaDataResultSet();
List<ColumnMetaData> columnMetaDataList = new ArrayList<>();
ColumnMetaData col1 = new ColumnMetaData();
col1.setColIndex(1);
col1.setColName("TABLE_CAT");
col1.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(col1);
ColumnMetaData col2 = new ColumnMetaData();
col2.setColIndex(2);
col2.setColName("TABLE_SCHEM");
col2.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(col2);
ColumnMetaData col3 = new ColumnMetaData();
col3.setColIndex(3);
col3.setColName("TABLE_NAME");
col3.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(col3);
ColumnMetaData col4 = new ColumnMetaData();
col4.setColIndex(4);
col4.setColName("TABLE_TYPE");
col4.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(col4);
ColumnMetaData col5 = new ColumnMetaData();
col5.setColIndex(5);
col5.setColName("REMARKS");
col5.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(col5);
ColumnMetaData col6 = new ColumnMetaData();
col6.setColIndex(6);
col6.setColName("TYPE_CAT");
col6.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(col6);
ColumnMetaData col7 = new ColumnMetaData();
col7.setColIndex(7);
col7.setColName("TYPE_SCHEM");
col7.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(col7);
ColumnMetaData col8 = new ColumnMetaData();
col8.setColIndex(8);
col8.setColName("TYPE_NAME");
col8.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(col8);
ColumnMetaData col9 = new ColumnMetaData();
col9.setColIndex(9);
col9.setColName("SELF_REFERENCING_COL_NAME");
col9.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(col9);
ColumnMetaData col10 = new ColumnMetaData();
col10.setColIndex(10);
col10.setColName("REF_GENERATION");
col10.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(col10);
resultSet.setColumnMetaDataList(columnMetaDataList);
List<TSDBResultSetRowData> rowDataList = new ArrayList<>();
ResultSet tables = stmt.executeQuery("show tables");
while (tables.next()) {
TSDBResultSetRowData rowData = new TSDBResultSetRowData(10);
rowData.setString(0, dbname); //table_cat
rowData.setString(1, null); //TABLE_SCHEM
rowData.setString(2, tables.getString("table_name")); //TABLE_NAME
rowData.setString(3, "TABLE"); //TABLE_TYPE
rowData.setString(4, ""); //REMARKS
rowDataList.add(rowData);
}
ResultSet stables = stmt.executeQuery("show stables");
while (stables.next()) {
TSDBResultSetRowData rowData = new TSDBResultSetRowData(10);
rowData.setString(0, dbname); //TABLE_CAT
rowData.setString(1, null); //TABLE_SCHEM
rowData.setString(2, stables.getString("name")); //TABLE_NAME
rowData.setString(3, "TABLE"); //TABLE_TYPE
rowData.setString(4, "STABLE"); //REMARKS
rowDataList.add(rowData);
}
resultSet.setRowDataList(rowDataList);
return resultSet;
}
}
public ResultSet getSchemas() throws SQLException {
return getEmptyResultSet();
@ -516,32 +599,239 @@ public abstract class AbstractDatabaseMetaData implements DatabaseMetaData {
public ResultSet getTableTypes() throws SQLException {
DatabaseMetaDataResultSet resultSet = new DatabaseMetaDataResultSet();
// set up ColumnMetaDataList
List<ColumnMetaData> columnMetaDataList = new ArrayList<ColumnMetaData>(1);
List<ColumnMetaData> columnMetaDataList = new ArrayList<>();
ColumnMetaData colMetaData = new ColumnMetaData();
colMetaData.setColIndex(0);
colMetaData.setColName("TABLE_TYPE");
colMetaData.setColSize(10);
colMetaData.setColType(TSDBConstants.TSDB_DATA_TYPE_BINARY);
colMetaData.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(colMetaData);
resultSet.setColumnMetaDataList(columnMetaDataList);
// set up rowDataList
List<TSDBResultSetRowData> rowDataList = new ArrayList<TSDBResultSetRowData>(2);
TSDBResultSetRowData rowData = new TSDBResultSetRowData();
List<TSDBResultSetRowData> rowDataList = new ArrayList<>();
TSDBResultSetRowData rowData = new TSDBResultSetRowData(1);
rowData.setString(0, "TABLE");
rowDataList.add(rowData);
rowData = new TSDBResultSetRowData();
rowData = new TSDBResultSetRowData(1);
rowData.setString(0, "STABLE");
rowDataList.add(rowData);
resultSet.setColumnMetaDataList(columnMetaDataList);
resultSet.setRowDataList(rowDataList);
return resultSet;
}
public abstract ResultSet getColumns(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern) throws SQLException;
protected ResultSet getColumns(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern, Connection conn) {
try (Statement stmt = conn.createStatement()) {
if (catalog == null || catalog.isEmpty())
return null;
ResultSet databases = stmt.executeQuery("show databases");
String dbname = null;
while (databases.next()) {
dbname = databases.getString("name");
if (dbname.equalsIgnoreCase(catalog))
break;
}
databases.close();
if (dbname == null)
return null;
stmt.execute("use " + dbname);
DatabaseMetaDataResultSet resultSet = new DatabaseMetaDataResultSet();
// set up ColumnMetaDataList
List<ColumnMetaData> columnMetaDataList = new ArrayList<>();
// TABLE_CAT
ColumnMetaData col1 = new ColumnMetaData();
col1.setColIndex(1);
col1.setColName("TABLE_CAT");
col1.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(col1);
// TABLE_SCHEM
ColumnMetaData col2 = new ColumnMetaData();
col2.setColIndex(2);
col2.setColName("TABLE_SCHEM");
col2.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(col2);
// TABLE_NAME
ColumnMetaData col3 = new ColumnMetaData();
col3.setColIndex(3);
col3.setColName("TABLE_NAME");
col3.setColSize(193);
col3.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(col3);
// COLUMN_NAME
ColumnMetaData col4 = new ColumnMetaData();
col4.setColIndex(4);
col4.setColName("COLUMN_NAME");
col4.setColSize(65);
col4.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(col4);
// DATA_TYPE
ColumnMetaData col5 = new ColumnMetaData();
col5.setColIndex(5);
col5.setColName("DATA_TYPE");
col5.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
columnMetaDataList.add(col5);
// TYPE_NAME
ColumnMetaData col6 = new ColumnMetaData();
col6.setColIndex(6);
col6.setColName("TYPE_NAME");
col6.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(col6);
// COLUMN_SIZE
ColumnMetaData col7 = new ColumnMetaData();
col7.setColIndex(7);
col7.setColName("COLUMN_SIZE");
col7.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
columnMetaDataList.add(col7);
// BUFFER_LENGTH, not used
ColumnMetaData col8 = new ColumnMetaData();
col8.setColIndex(8);
col8.setColName("BUFFER_LENGTH");
columnMetaDataList.add(col8);
// DECIMAL_DIGITS
ColumnMetaData col9 = new ColumnMetaData();
col9.setColIndex(9);
col9.setColName("DECIMAL_DIGITS");
col9.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
columnMetaDataList.add(col9);
// add NUM_PREC_RADIX
ColumnMetaData col10 = new ColumnMetaData();
col10.setColIndex(10);
col10.setColName("NUM_PREC_RADIX");
col10.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
columnMetaDataList.add(col10);
// NULLABLE
ColumnMetaData col11 = new ColumnMetaData();
col11.setColIndex(11);
col11.setColName("NULLABLE");
col11.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
columnMetaDataList.add(col11);
// REMARKS
ColumnMetaData col12 = new ColumnMetaData();
col12.setColIndex(12);
col12.setColName("REMARKS");
col12.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(col12);
// COLUMN_DEF
ColumnMetaData col13 = new ColumnMetaData();
col13.setColIndex(13);
col13.setColName("COLUMN_DEF");
col13.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(col13);
//SQL_DATA_TYPE
ColumnMetaData col14 = new ColumnMetaData();
col14.setColIndex(14);
col14.setColName("SQL_DATA_TYPE");
col14.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
columnMetaDataList.add(col14);
//SQL_DATETIME_SUB
ColumnMetaData col15 = new ColumnMetaData();
col15.setColIndex(15);
col15.setColName("SQL_DATETIME_SUB");
col15.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
columnMetaDataList.add(col15);
//CHAR_OCTET_LENGTH
ColumnMetaData col16 = new ColumnMetaData();
col16.setColIndex(16);
col16.setColName("CHAR_OCTET_LENGTH");
col16.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
columnMetaDataList.add(col16);
//ORDINAL_POSITION
ColumnMetaData col17 = new ColumnMetaData();
col17.setColIndex(17);
col17.setColName("ORDINAL_POSITION");
col17.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
columnMetaDataList.add(col17);
// IS_NULLABLE
ColumnMetaData col18 = new ColumnMetaData();
col18.setColIndex(18);
col18.setColName("IS_NULLABLE");
col18.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(col18);
//SCOPE_CATALOG
ColumnMetaData col19 = new ColumnMetaData();
col19.setColIndex(19);
col19.setColName("SCOPE_CATALOG");
col19.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(col19);
//SCOPE_SCHEMA
ColumnMetaData col20 = new ColumnMetaData();
col20.setColIndex(20);
col20.setColName("SCOPE_SCHEMA");
col20.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(col20);
//SCOPE_TABLE
ColumnMetaData col21 = new ColumnMetaData();
col21.setColIndex(21);
col21.setColName("SCOPE_TABLE");
col21.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(col21);
//SOURCE_DATA_TYPE
ColumnMetaData col22 = new ColumnMetaData();
col22.setColIndex(22);
col22.setColName("SOURCE_DATA_TYPE");
col22.setColType(TSDBConstants.TSDB_DATA_TYPE_SMALLINT);
columnMetaDataList.add(col22);
//IS_AUTOINCREMENT
ColumnMetaData col23 = new ColumnMetaData();
col23.setColIndex(23);
col23.setColName("IS_AUTOINCREMENT");
col23.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(col23);
//IS_GENERATEDCOLUMN
ColumnMetaData col24 = new ColumnMetaData();
col24.setColIndex(24);
col24.setColName("IS_GENERATEDCOLUMN");
col24.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(col24);
resultSet.setColumnMetaDataList(columnMetaDataList);
// set up rowDataList
ResultSet rs = stmt.executeQuery("describe " + dbname + "." + tableNamePattern);
List<TSDBResultSetRowData> rowDataList = new ArrayList<>();
int index = 0;
while (rs.next()) {
TSDBResultSetRowData rowData = new TSDBResultSetRowData(24);
// set TABLE_CAT
rowData.setString(0, dbname);
// set TABLE_NAME
rowData.setString(2, tableNamePattern);
// set COLUMN_NAME
rowData.setString(3, rs.getString("Field"));
// set DATA_TYPE
String typeName = rs.getString("Type");
rowData.setInt(4, getDataType(typeName));
// set TYPE_NAME
rowData.setString(5, typeName);
// set COLUMN_SIZE
int length = rs.getInt("Length");
rowData.setInt(6, getColumnSize(typeName, length));
// set DECIMAL_DIGITS
rowData.setInt(8, getDecimalDigits(typeName));
// set NUM_PREC_RADIX
rowData.setInt(9, 10);
// set NULLABLE
rowData.setInt(10, getNullable(index, typeName));
// set REMARKS
rowData.setString(11, rs.getString("Note"));
rowDataList.add(rowData);
index++;
}
resultSet.setRowDataList(rowDataList);
return resultSet;
} catch (SQLException e) {
e.printStackTrace();
}
return null;
}
protected int getNullable(int index, String typeName) {
if (index == 0 && "TIMESTAMP".equals(typeName))
return DatabaseMetaData.columnNoNulls;
@ -552,7 +842,6 @@ public abstract class AbstractDatabaseMetaData implements DatabaseMetaData {
switch (typeName) {
case "TIMESTAMP":
return 23;
default:
return 0;
}
@ -615,9 +904,7 @@ public abstract class AbstractDatabaseMetaData implements DatabaseMetaData {
return getEmptyResultSet();
}
public ResultSet getPrimaryKeys(String catalog, String schema, String table) throws SQLException {
return getEmptyResultSet();
}
public abstract ResultSet getPrimaryKeys(String catalog, String schema, String table) throws SQLException;
public ResultSet getImportedKeys(String catalog, String schema, String table) throws SQLException {
return getEmptyResultSet();
@ -718,9 +1005,7 @@ public abstract class AbstractDatabaseMetaData implements DatabaseMetaData {
return getEmptyResultSet();
}
public ResultSet getSuperTables(String catalog, String schemaPattern, String tableNamePattern) throws SQLException {
return getEmptyResultSet();
}
public abstract ResultSet getSuperTables(String catalog, String schemaPattern, String tableNamePattern) throws SQLException;
public ResultSet getAttributes(String catalog, String schemaPattern, String typeNamePattern,
String attributeNamePattern) throws SQLException {
@ -728,15 +1013,17 @@ public abstract class AbstractDatabaseMetaData implements DatabaseMetaData {
}
public boolean supportsResultSetHoldability(int holdability) throws SQLException {
if (holdability == ResultSet.HOLD_CURSORS_OVER_COMMIT)
return true;
return false;
}
public int getResultSetHoldability() throws SQLException {
return 0;
return ResultSet.HOLD_CURSORS_OVER_COMMIT;
}
public int getDatabaseMajorVersion() throws SQLException {
return 0;
return 2;
}
public int getDatabaseMinorVersion() throws SQLException {
@ -744,7 +1031,7 @@ public abstract class AbstractDatabaseMetaData implements DatabaseMetaData {
}
public int getJDBCMajorVersion() throws SQLException {
return 0;
return 2;
}
public int getJDBCMinorVersion() throws SQLException {
@ -768,7 +1055,7 @@ public abstract class AbstractDatabaseMetaData implements DatabaseMetaData {
}
public ResultSet getSchemas(String catalog, String schemaPattern) throws SQLException {
return null;
return getEmptyResultSet();
}
public boolean supportsStoredFunctionsUsingCallSyntax() throws SQLException {
@ -805,4 +1092,180 @@ public abstract class AbstractDatabaseMetaData implements DatabaseMetaData {
private ResultSet getEmptyResultSet() {
return new EmptyResultSet();
}
@Override
public <T> T unwrap(Class<T> iface) throws SQLException {
try {
return iface.cast(this);
} catch (ClassCastException cce) {
throw new SQLException("Unable to unwrap to " + iface.toString());
}
}
@Override
public boolean isWrapperFor(Class<?> iface) throws SQLException {
return iface.isInstance(this);
}
protected ResultSet getCatalogs(Connection conn) throws SQLException {
try (Statement stmt = conn.createStatement()) {
DatabaseMetaDataResultSet resultSet = new DatabaseMetaDataResultSet();
// set up ColumnMetaDataList
List<ColumnMetaData> columnMetaDataList = new ArrayList<>();
// TABLE_CAT
ColumnMetaData col1 = new ColumnMetaData();
col1.setColIndex(1);
col1.setColName("TABLE_CAT");
col1.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(col1);
resultSet.setColumnMetaDataList(columnMetaDataList);
List<TSDBResultSetRowData> rowDataList = new ArrayList<>();
ResultSet rs = stmt.executeQuery("show databases");
while (rs.next()) {
TSDBResultSetRowData rowData = new TSDBResultSetRowData(1);
rowData.setString(0, rs.getString("name"));
rowDataList.add(rowData);
}
resultSet.setRowDataList(rowDataList);
return resultSet;
}
}
protected ResultSet getPrimaryKeys(String catalog, String schema, String table, Connection conn) throws SQLException {
try (Statement stmt = conn.createStatement()) {
if (catalog == null || catalog.isEmpty())
return null;
ResultSet databases = stmt.executeQuery("show databases");
String dbname = null;
while (databases.next()) {
dbname = databases.getString("name");
if (dbname.equalsIgnoreCase(catalog))
break;
}
databases.close();
if (dbname == null)
return null;
stmt.execute("use " + dbname);
DatabaseMetaDataResultSet resultSet = new DatabaseMetaDataResultSet();
// set up ColumnMetaDataList
List<ColumnMetaData> columnMetaDataList = new ArrayList<>();
// TABLE_CAT
ColumnMetaData col1 = new ColumnMetaData();
col1.setColIndex(0);
col1.setColName("TABLE_CAT");
col1.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(col1);
// TABLE_SCHEM
ColumnMetaData col2 = new ColumnMetaData();
col2.setColIndex(1);
col2.setColName("TABLE_SCHEM");
col2.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(col2);
// TABLE_NAME
ColumnMetaData col3 = new ColumnMetaData();
col3.setColIndex(2);
col3.setColName("TABLE_NAME");
col3.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(col3);
// COLUMN_NAME
ColumnMetaData col4 = new ColumnMetaData();
col4.setColIndex(3);
col4.setColName("COLUMN_NAME");
col4.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(col4);
// KEY_SEQ
ColumnMetaData col5 = new ColumnMetaData();
col5.setColIndex(4);
col5.setColName("KEY_SEQ");
col5.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
columnMetaDataList.add(col5);
// PK_NAME
ColumnMetaData col6 = new ColumnMetaData();
col6.setColIndex(5);
col6.setColName("PK_NAME");
col6.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(col6);
resultSet.setColumnMetaDataList(columnMetaDataList);
// set rowData
List<TSDBResultSetRowData> rowDataList = new ArrayList<>();
ResultSet rs = stmt.executeQuery("describe " + dbname + "." + table);
rs.next();
TSDBResultSetRowData rowData = new TSDBResultSetRowData(6);
rowData.setString(0, null);
rowData.setString(1, null);
rowData.setString(2, table);
String pkName = rs.getString(1);
rowData.setString(3, pkName);
rowData.setInt(4, 1);
rowData.setString(5, pkName);
rowDataList.add(rowData);
resultSet.setRowDataList(rowDataList);
return resultSet;
}
}
protected ResultSet getSuperTables(String catalog, String schemaPattern, String tableNamePattern, Connection conn) throws SQLException {
try (Statement stmt = conn.createStatement()) {
if (catalog == null || catalog.isEmpty())
return null;
ResultSet databases = stmt.executeQuery("show databases");
String dbname = null;
while (databases.next()) {
dbname = databases.getString("name");
if (dbname.equalsIgnoreCase(catalog))
break;
}
databases.close();
if (dbname == null)
return null;
stmt.execute("use " + dbname);
DatabaseMetaDataResultSet resultSet = new DatabaseMetaDataResultSet();
// set up ColumnMetaDataList
List<ColumnMetaData> columnMetaDataList = new ArrayList<>();
// TABLE_CAT
ColumnMetaData col1 = new ColumnMetaData();
col1.setColIndex(0);
col1.setColName("TABLE_CAT");
col1.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(col1);
// TABLE_SCHEM
ColumnMetaData col2 = new ColumnMetaData();
col2.setColIndex(1);
col2.setColName("TABLE_SCHEM");
col2.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(col2);
// TABLE_NAME
ColumnMetaData col3 = new ColumnMetaData();
col3.setColIndex(2);
col3.setColName("TABLE_NAME");
col3.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(col3);
// SUPERTABLE_NAME
ColumnMetaData col4 = new ColumnMetaData();
col4.setColIndex(3);
col4.setColName("SUPERTABLE_NAME");
col4.setColType(TSDBConstants.TSDB_DATA_TYPE_NCHAR);
columnMetaDataList.add(col4);
resultSet.setColumnMetaDataList(columnMetaDataList);
ResultSet rs = stmt.executeQuery("show tables like '" + tableNamePattern + "'");
List<TSDBResultSetRowData> rowDataList = new ArrayList<>();
while (rs.next()) {
TSDBResultSetRowData rowData = new TSDBResultSetRowData(4);
rowData.setString(2, rs.getString(1));
rowData.setString(3, rs.getString(4));
rowDataList.add(rowData);
}
resultSet.setRowDataList(rowDataList);
return resultSet;
}
}
}

View File

@ -1,68 +0,0 @@
/***************************************************************************
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*****************************************************************************/
package com.taosdata.jdbc;
import java.sql.ResultSet;
import java.sql.SQLException;
/*
* TDengine only supports a subset of the standard SQL, thus this implemetation of the
* standard JDBC API contains more or less some adjustments customized for certain
* compatibility needs.
*/
public class CatalogResultSet extends TSDBResultSetWrapper {
public CatalogResultSet(ResultSet resultSet) {
super.setOriginalResultSet(resultSet);
}
@Override
public String getString(int columnIndex) throws SQLException {
if (columnIndex <= 1) {
return super.getString(columnIndex);
} else {
return null;
}
}
@Override
public boolean getBoolean(int columnIndex) throws SQLException {
if (columnIndex <= 1) {
return super.getBoolean(columnIndex);
} else {
return false;
}
}
@Override
public byte[] getBytes(int columnIndex) throws SQLException {
if (columnIndex <= 1) {
return super.getBytes(columnIndex);
} else {
return null;
}
}
@Override
public Object getObject(int columnIndex) throws SQLException {
if (columnIndex <= 1) {
return super.getObject(columnIndex);
} else {
return null;
}
}
}

View File

@ -16,40 +16,40 @@ package com.taosdata.jdbc;
public class ColumnMetaData {
private int colType = 0;
private String colName = null;
private int colSize = -1;
private int colIndex = 0;
private int colType = 0;
private String colName = null;
private int colSize = -1;
private int colIndex = 0;
public int getColSize() {
return colSize;
}
public int getColSize() {
return colSize;
}
public void setColSize(int colSize) {
this.colSize = colSize;
}
public void setColSize(int colSize) {
this.colSize = colSize;
}
public int getColType() {
return colType;
}
public int getColType() {
return colType;
}
public void setColType(int colType) {
this.colType = colType;
}
public void setColType(int colType) {
this.colType = colType;
}
public String getColName() {
return colName;
}
public String getColName() {
return colName;
}
public void setColName(String colName) {
this.colName = colName;
}
public void setColName(String colName) {
this.colName = colName;
}
public int getColIndex() {
return colIndex;
}
public int getColIndex() {
return colIndex;
}
public void setColIndex(int colIndex) {
this.colIndex = colIndex;
}
public void setColIndex(int colIndex) {
this.colIndex = colIndex;
}
}

View File

@ -1,51 +0,0 @@
/***************************************************************************
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*****************************************************************************/
package com.taosdata.jdbc;
import java.sql.ResultSet;
/*
* TDengine only supports a subset of the standard SQL, thus this implemetation of the
* standard JDBC API contains more or less some adjustments customized for certain
* compatibility needs.
*/
public class GetColumnsResultSet extends TSDBResultSetWrapper {
private String catalog;
private String schemaPattern;
private String tableNamePattern;
private String columnNamePattern;
public GetColumnsResultSet(ResultSet resultSet, String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern) {
super.setOriginalResultSet(resultSet);
this.catalog = catalog;
this.schemaPattern = schemaPattern;
this.tableNamePattern = tableNamePattern;
this.columnNamePattern = columnNamePattern;
}
@Override
public String getString(int columnIndex) {
switch (columnIndex) {
case 1:
return catalog;
case 2:
return null;
case 3:
return tableNamePattern;
default:
return null;
}
}
}

View File

@ -1,53 +0,0 @@
/***************************************************************************
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*****************************************************************************/
package com.taosdata.jdbc;
import java.sql.ResultSet;
import java.sql.SQLException;
/*
* TDengine only supports a subset of the standard SQL, thus this implemetation of the
* standard JDBC API contains more or less some adjustments customized for certain
* compatibility needs.
*/
public class GetTablesResultSet extends TSDBResultSetWrapper {
private String catalog;
private String schemaPattern;
private String tableNamePattern;
private String[] types;
public GetTablesResultSet(ResultSet resultSet, String catalog, String schemaPattern, String tableNamePattern, String[] types) {
super.setOriginalResultSet(resultSet);
this.catalog = catalog;
this.schemaPattern = schemaPattern;
this.tableNamePattern = tableNamePattern;
this.types = types;
}
@Override
public String getString(int columnIndex) throws SQLException {
String ret = null;
switch (columnIndex) {
case 3:
return super.getString(1);
case 4:
return "table";
default:
return null;
}
}
}

View File

@ -14,11 +14,11 @@
*****************************************************************************/
package com.taosdata.jdbc;
import java.sql.*;
import java.util.ArrayList;
import java.util.List;
import java.sql.Connection;
import java.sql.ResultSet;
import java.sql.SQLException;
public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData {
public class TSDBDatabaseMetaData extends AbstractDatabaseMetaData {
private String url;
private String userName;
@ -29,31 +29,14 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData {
this.userName = userName;
}
public Connection getConnection() throws SQLException {
return this.conn;
}
public void setConnection(Connection conn) {
this.conn = conn;
}
@Override
public <T> T unwrap(Class<T> iface) throws SQLException {
try {
return iface.cast(this);
} catch (ClassCastException cce) {
throw new SQLException("Unable to unwrap to " + iface.toString());
}
}
public boolean isWrapperFor(Class<?> iface) throws SQLException {
return iface.isInstance(this);
}
public boolean allProceduresAreCallable() throws SQLException {
return false;
}
public boolean allTablesAreSelectable() throws SQLException {
return false;
}
public String getURL() throws SQLException {
return this.url;
}
@ -62,911 +45,52 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData {
return this.userName;
}
public boolean isReadOnly() throws SQLException {
return false;
}
public boolean nullsAreSortedHigh() throws SQLException {
return false;
}
public boolean nullsAreSortedLow() throws SQLException {
return !nullsAreSortedHigh();
}
public boolean nullsAreSortedAtStart() throws SQLException {
return true;
}
public boolean nullsAreSortedAtEnd() throws SQLException {
return !nullsAreSortedAtStart();
}
public String getDatabaseProductName() throws SQLException {
return "TDengine";
}
public String getDatabaseProductVersion() throws SQLException {
return "2.0.x.x";
}
public String getDriverName() throws SQLException {
return TSDBDriver.class.getName();
}
public String getDriverVersion() throws SQLException {
return "2.0.x";
}
public int getDriverMajorVersion() {
return 2;
}
public int getDriverMinorVersion() {
return 0;
}
public boolean usesLocalFiles() throws SQLException {
return false;
}
public boolean usesLocalFilePerTable() throws SQLException {
return false;
}
public boolean supportsMixedCaseIdentifiers() throws SQLException {
//像databasetable这些对象的标识符在存储时是否采用大小写混合的模式
return false;
}
public boolean storesUpperCaseIdentifiers() throws SQLException {
return false;
}
public boolean storesLowerCaseIdentifiers() throws SQLException {
return true;
}
public boolean storesMixedCaseIdentifiers() throws SQLException {
return false;
}
public boolean supportsMixedCaseQuotedIdentifiers() throws SQLException {
//像databasetable这些对象的标识符在存储时是否采用大小写混合并带引号的模式
return false;
}
public boolean storesUpperCaseQuotedIdentifiers() throws SQLException {
return false;
}
public boolean storesLowerCaseQuotedIdentifiers() throws SQLException {
return false;
}
public boolean storesMixedCaseQuotedIdentifiers() throws SQLException {
return false;
}
public String getIdentifierQuoteString() throws SQLException {
return " ";
}
public String getSQLKeywords() throws SQLException {
return null;
}
public String getNumericFunctions() throws SQLException {
return null;
}
public String getStringFunctions() throws SQLException {
return null;
}
public String getSystemFunctions() throws SQLException {
return null;
}
public String getTimeDateFunctions() throws SQLException {
return null;
}
public String getSearchStringEscape() throws SQLException {
return null;
}
public String getExtraNameCharacters() throws SQLException {
return null;
}
public boolean supportsAlterTableWithAddColumn() throws SQLException {
return true;
}
public boolean supportsAlterTableWithDropColumn() throws SQLException {
return true;
}
public boolean supportsColumnAliasing() throws SQLException {
return true;
}
public boolean nullPlusNonNullIsNull() throws SQLException {
// null + non-null != null
return false;
}
public boolean supportsConvert() throws SQLException {
// 是否支持转换函数convert
return false;
}
public boolean supportsConvert(int fromType, int toType) throws SQLException {
return false;
}
public boolean supportsTableCorrelationNames() throws SQLException {
return false;
}
public boolean supportsDifferentTableCorrelationNames() throws SQLException {
return false;
}
public boolean supportsExpressionsInOrderBy() throws SQLException {
return false;
}
public boolean supportsOrderByUnrelated() throws SQLException {
return false;
}
public boolean supportsGroupBy() throws SQLException {
return true;
}
public boolean supportsGroupByUnrelated() throws SQLException {
return false;
}
public boolean supportsGroupByBeyondSelect() throws SQLException {
return false;
}
public boolean supportsLikeEscapeClause() throws SQLException {
return false;
}
public boolean supportsMultipleResultSets() throws SQLException {
return false;
}
public boolean supportsMultipleTransactions() throws SQLException {
return false;
}
public boolean supportsNonNullableColumns() throws SQLException {
return false;
}
public boolean supportsMinimumSQLGrammar() throws SQLException {
return false;
}
public boolean supportsCoreSQLGrammar() throws SQLException {
return false;
}
public boolean supportsExtendedSQLGrammar() throws SQLException {
return false;
}
public boolean supportsANSI92EntryLevelSQL() throws SQLException {
return false;
}
public boolean supportsANSI92IntermediateSQL() throws SQLException {
return false;
}
public boolean supportsANSI92FullSQL() throws SQLException {
return false;
}
public boolean supportsIntegrityEnhancementFacility() throws SQLException {
return false;
}
public boolean supportsOuterJoins() throws SQLException {
return false;
}
public boolean supportsFullOuterJoins() throws SQLException {
return false;
}
public boolean supportsLimitedOuterJoins() throws SQLException {
return false;
}
public String getSchemaTerm() throws SQLException {
return null;
}
public String getProcedureTerm() throws SQLException {
return null;
}
public String getCatalogTerm() throws SQLException {
return "database";
}
public boolean isCatalogAtStart() throws SQLException {
return true;
}
public String getCatalogSeparator() throws SQLException {
return ".";
}
public boolean supportsSchemasInDataManipulation() throws SQLException {
return false;
}
public boolean supportsSchemasInProcedureCalls() throws SQLException {
return false;
}
public boolean supportsSchemasInTableDefinitions() throws SQLException {
return false;
}
public boolean supportsSchemasInIndexDefinitions() throws SQLException {
return false;
}
public boolean supportsSchemasInPrivilegeDefinitions() throws SQLException {
return false;
}
public boolean supportsCatalogsInDataManipulation() throws SQLException {
return true;
}
public boolean supportsCatalogsInProcedureCalls() throws SQLException {
return false;
}
public boolean supportsCatalogsInTableDefinitions() throws SQLException {
return false;
}
public boolean supportsCatalogsInIndexDefinitions() throws SQLException {
return false;
}
public boolean supportsCatalogsInPrivilegeDefinitions() throws SQLException {
return false;
}
public boolean supportsPositionedDelete() throws SQLException {
return false;
}
public boolean supportsPositionedUpdate() throws SQLException {
return false;
}
public boolean supportsSelectForUpdate() throws SQLException {
return false;
}
public boolean supportsStoredProcedures() throws SQLException {
return false;
}
public boolean supportsSubqueriesInComparisons() throws SQLException {
return false;
}
public boolean supportsSubqueriesInExists() throws SQLException {
return false;
}
public boolean supportsSubqueriesInIns() throws SQLException {
return false;
}
public boolean supportsSubqueriesInQuantifieds() throws SQLException {
return false;
}
public boolean supportsCorrelatedSubqueries() throws SQLException {
return false;
}
public boolean supportsUnion() throws SQLException {
return false;
}
public boolean supportsUnionAll() throws SQLException {
return false;
}
public boolean supportsOpenCursorsAcrossCommit() throws SQLException {
return false;
}
public boolean supportsOpenCursorsAcrossRollback() throws SQLException {
return false;
}
public boolean supportsOpenStatementsAcrossCommit() throws SQLException {
return false;
}
public boolean supportsOpenStatementsAcrossRollback() throws SQLException {
return false;
}
public int getMaxBinaryLiteralLength() throws SQLException {
return 0;
}
public int getMaxCharLiteralLength() throws SQLException {
return 0;
}
public int getMaxColumnNameLength() throws SQLException {
return 0;
}
public int getMaxColumnsInGroupBy() throws SQLException {
return 0;
}
public int getMaxColumnsInIndex() throws SQLException {
return 0;
}
public int getMaxColumnsInOrderBy() throws SQLException {
return 0;
}
public int getMaxColumnsInSelect() throws SQLException {
return 0;
}
public int getMaxColumnsInTable() throws SQLException {
return 0;
}
public int getMaxConnections() throws SQLException {
return 0;
}
public int getMaxCursorNameLength() throws SQLException {
return 0;
}
public int getMaxIndexLength() throws SQLException {
return 0;
}
public int getMaxSchemaNameLength() throws SQLException {
return 0;
}
public int getMaxProcedureNameLength() throws SQLException {
return 0;
}
public int getMaxCatalogNameLength() throws SQLException {
return 0;
}
public int getMaxRowSize() throws SQLException {
return 0;
}
public boolean doesMaxRowSizeIncludeBlobs() throws SQLException {
return false;
}
public int getMaxStatementLength() throws SQLException {
return 0;
}
public int getMaxStatements() throws SQLException {
return 0;
}
public int getMaxTableNameLength() throws SQLException {
return 0;
}
public int getMaxTablesInSelect() throws SQLException {
return 0;
}
public int getMaxUserNameLength() throws SQLException {
return 0;
}
public int getDefaultTransactionIsolation() throws SQLException {
return Connection.TRANSACTION_NONE;
}
public boolean supportsTransactions() throws SQLException {
return false;
}
public boolean supportsTransactionIsolationLevel(int level) throws SQLException {
if (level == Connection.TRANSACTION_NONE)
return true;
return false;
}
public boolean supportsDataDefinitionAndDataManipulationTransactions() throws SQLException {
return false;
}
public boolean supportsDataManipulationTransactionsOnly() throws SQLException {
return false;
}
public boolean dataDefinitionCausesTransactionCommit() throws SQLException {
return false;
}
public boolean dataDefinitionIgnoredInTransactions() throws SQLException {
return false;
}
public ResultSet getProcedures(String catalog, String schemaPattern, String procedureNamePattern)
throws SQLException {
return null;
}
public ResultSet getProcedureColumns(String catalog, String schemaPattern, String procedureNamePattern,
String columnNamePattern) throws SQLException {
return null;
}
/**
* @Param catalog : database名称"" 表示不属于任何database的tablenull表示不使用database来缩小范围
* @Param schemaPattern : schema名称""表示
* @Param tableNamePattern : 表名满足tableNamePattern的表, null表示返回所有表
* @Param types : 表类型null表示返回所有类型
*/
public ResultSet getTables(String catalog, String schemaPattern, String tableNamePattern, String[] types) throws SQLException {
if (conn == null || conn.isClosed()) {
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
}
try (Statement stmt = conn.createStatement()) {
if (catalog == null || catalog.isEmpty())
return null;
stmt.executeUpdate("use " + catalog);
ResultSet resultSet0 = stmt.executeQuery("show tables");
GetTablesResultSet getTablesResultSet = new GetTablesResultSet(resultSet0, catalog, schemaPattern, tableNamePattern, types);
return getTablesResultSet;
}
return super.getTables(catalog, schemaPattern, tableNamePattern, types, conn);
}
public ResultSet getSchemas() throws SQLException {
return getEmptyResultSet();
}
public ResultSet getCatalogs() throws SQLException {
if (conn == null || conn.isClosed())
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
try (Statement stmt = conn.createStatement()) {
ResultSet rs = stmt.executeQuery("show databases");
return new CatalogResultSet(rs);
}
return super.getCatalogs(conn);
}
public ResultSet getTableTypes() throws SQLException {
DatabaseMetaDataResultSet resultSet = new DatabaseMetaDataResultSet();
// set up ColumnMetaDataList
List<ColumnMetaData> columnMetaDataList = new ArrayList<>(1);
ColumnMetaData colMetaData = new ColumnMetaData();
colMetaData.setColIndex(0);
colMetaData.setColName("TABLE_TYPE");
colMetaData.setColSize(10);
colMetaData.setColType(TSDBConstants.TSDB_DATA_TYPE_BINARY);
columnMetaDataList.add(colMetaData);
// set up rowDataList
List<TSDBResultSetRowData> rowDataList = new ArrayList<>(2);
TSDBResultSetRowData rowData = new TSDBResultSetRowData();
rowData.setString(0, "TABLE");
rowDataList.add(rowData);
rowData = new TSDBResultSetRowData();
rowData.setString(0, "STABLE");
rowDataList.add(rowData);
resultSet.setColumnMetaDataList(columnMetaDataList);
resultSet.setRowDataList(rowDataList);
return resultSet;
}
public ResultSet getColumns(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern)
throws SQLException {
/** add by zyyang **********/
Statement stmt = null;
if (null != conn && !conn.isClosed()) {
stmt = conn.createStatement();
if (catalog == null || catalog.isEmpty())
return null;
stmt.executeUpdate("use " + catalog);
DatabaseMetaDataResultSet resultSet = new DatabaseMetaDataResultSet();
// set up ColumnMetaDataList
List<ColumnMetaData> columnMetaDataList = new ArrayList<>(24);
columnMetaDataList.add(null);
columnMetaDataList.add(null);
// add TABLE_NAME
ColumnMetaData colMetaData = new ColumnMetaData();
colMetaData.setColIndex(3);
colMetaData.setColName("TABLE_NAME");
colMetaData.setColSize(193);
colMetaData.setColType(TSDBConstants.TSDB_DATA_TYPE_BINARY);
columnMetaDataList.add(colMetaData);
// add COLUMN_NAME
colMetaData = new ColumnMetaData();
colMetaData.setColIndex(4);
colMetaData.setColName("COLUMN_NAME");
colMetaData.setColSize(65);
colMetaData.setColType(TSDBConstants.TSDB_DATA_TYPE_BINARY);
columnMetaDataList.add(colMetaData);
// add DATA_TYPE
colMetaData = new ColumnMetaData();
colMetaData.setColIndex(5);
colMetaData.setColName("DATA_TYPE");
colMetaData.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
columnMetaDataList.add(colMetaData);
// add TYPE_NAME
colMetaData = new ColumnMetaData();
colMetaData.setColIndex(6);
colMetaData.setColName("TYPE_NAME");
colMetaData.setColType(TSDBConstants.TSDB_DATA_TYPE_BINARY);
columnMetaDataList.add(colMetaData);
// add COLUMN_SIZE
colMetaData = new ColumnMetaData();
colMetaData.setColIndex(7);
colMetaData.setColName("COLUMN_SIZE");
colMetaData.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
columnMetaDataList.add(colMetaData);
// add BUFFER_LENGTH ,not used
columnMetaDataList.add(null);
// add DECIMAL_DIGITS
colMetaData = new ColumnMetaData();
colMetaData.setColIndex(9);
colMetaData.setColName("DECIMAL_DIGITS");
colMetaData.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
columnMetaDataList.add(colMetaData);
// add NUM_PREC_RADIX
colMetaData = new ColumnMetaData();
colMetaData.setColIndex(10);
colMetaData.setColName("NUM_PREC_RADIX");
colMetaData.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
columnMetaDataList.add(colMetaData);
// add NULLABLE
colMetaData = new ColumnMetaData();
colMetaData.setColIndex(11);
colMetaData.setColName("NULLABLE");
colMetaData.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
columnMetaDataList.add(colMetaData);
resultSet.setColumnMetaDataList(columnMetaDataList);
// set up rowDataList
ResultSet resultSet0 = stmt.executeQuery("describe " + tableNamePattern);
List<TSDBResultSetRowData> rowDataList = new ArrayList<>();
int index = 0;
while (resultSet0.next()) {
TSDBResultSetRowData rowData = new TSDBResultSetRowData(24);
// set TABLE_NAME
rowData.setString(2, tableNamePattern);
// set COLUMN_NAME
rowData.setString(3, resultSet0.getString(1));
// set DATA_TYPE
String typeName = resultSet0.getString(2);
rowData.setInt(4, getDataType(typeName));
// set TYPE_NAME
rowData.setString(5, typeName);
// set COLUMN_SIZE
int length = resultSet0.getInt(3);
rowData.setInt(6, getColumnSize(typeName, length));
// set DECIMAL_DIGITS
rowData.setInt(8, getDecimalDigits(typeName));
// set NUM_PREC_RADIX
rowData.setInt(9, 10);
// set NULLABLE
rowData.setInt(10, getNullable(index, typeName));
rowDataList.add(rowData);
index++;
}
resultSet.setRowDataList(rowDataList);
// GetColumnsResultSet getColumnsResultSet = new GetColumnsResultSet(resultSet0, catalog, schemaPattern, tableNamePattern, columnNamePattern);
// return getColumnsResultSet;
// DatabaseMetaDataResultSet resultSet = new DatabaseMetaDataResultSet();
return resultSet;
} else {
if (conn == null || conn.isClosed())
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
}
/*************************/
// return getEmptyResultSet();
return super.getTableTypes();
}
private int getNullable(int index, String typeName) {
if (index == 0 && "TIMESTAMP".equals(typeName))
return DatabaseMetaData.columnNoNulls;
return DatabaseMetaData.columnNullable;
}
private int getColumnSize(String typeName, int length) {
switch (typeName) {
case "TIMESTAMP":
return 23;
default:
return 0;
}
}
private int getDecimalDigits(String typeName) {
switch (typeName) {
case "FLOAT":
return 5;
case "DOUBLE":
return 9;
default:
return 0;
}
}
private int getDataType(String typeName) {
switch (typeName) {
case "TIMESTAMP":
return Types.TIMESTAMP;
case "INT":
return Types.INTEGER;
case "BIGINT":
return Types.BIGINT;
case "FLOAT":
return Types.FLOAT;
case "DOUBLE":
return Types.DOUBLE;
case "BINARY":
return Types.BINARY;
case "SMALLINT":
return Types.SMALLINT;
case "TINYINT":
return Types.TINYINT;
case "BOOL":
return Types.BOOLEAN;
case "NCHAR":
return Types.NCHAR;
default:
return Types.NULL;
}
}
public ResultSet getColumnPrivileges(String catalog, String schema, String table, String columnNamePattern)
throws SQLException {
return getEmptyResultSet();
}
public ResultSet getTablePrivileges(String catalog, String schemaPattern, String tableNamePattern)
throws SQLException {
return getEmptyResultSet();
}
public ResultSet getBestRowIdentifier(String catalog, String schema, String table, int scope, boolean nullable)
throws SQLException {
return getEmptyResultSet();
}
public ResultSet getVersionColumns(String catalog, String schema, String table) throws SQLException {
return getEmptyResultSet();
public ResultSet getColumns(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern) throws SQLException {
if (conn == null || conn.isClosed())
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
return super.getColumns(catalog, schemaPattern, tableNamePattern, columnNamePattern, conn);
}
public ResultSet getPrimaryKeys(String catalog, String schema, String table) throws SQLException {
return getEmptyResultSet();
}
public ResultSet getImportedKeys(String catalog, String schema, String table) throws SQLException {
return getEmptyResultSet();
}
public ResultSet getExportedKeys(String catalog, String schema, String table) throws SQLException {
return getEmptyResultSet();
}
public ResultSet getCrossReference(String parentCatalog, String parentSchema, String parentTable,
String foreignCatalog, String foreignSchema, String foreignTable) throws SQLException {
return getEmptyResultSet();
}
public ResultSet getTypeInfo() throws SQLException {
return getEmptyResultSet();
}
public ResultSet getIndexInfo(String catalog, String schema, String table, boolean unique, boolean approximate)
throws SQLException {
return getEmptyResultSet();
}
public boolean supportsResultSetType(int type) throws SQLException {
return false;
}
public boolean supportsResultSetConcurrency(int type, int concurrency) throws SQLException {
return false;
}
public boolean ownUpdatesAreVisible(int type) throws SQLException {
return false;
}
public boolean ownDeletesAreVisible(int type) throws SQLException {
return false;
}
public boolean ownInsertsAreVisible(int type) throws SQLException {
return false;
}
public boolean othersUpdatesAreVisible(int type) throws SQLException {
return false;
}
public boolean othersDeletesAreVisible(int type) throws SQLException {
return false;
}
public boolean othersInsertsAreVisible(int type) throws SQLException {
return false;
}
public boolean updatesAreDetected(int type) throws SQLException {
return false;
}
public boolean deletesAreDetected(int type) throws SQLException {
return false;
}
public boolean insertsAreDetected(int type) throws SQLException {
return false;
}
public boolean supportsBatchUpdates() throws SQLException {
return false;
}
public ResultSet getUDTs(String catalog, String schemaPattern, String typeNamePattern, int[] types)
throws SQLException {
return getEmptyResultSet();
}
public Connection getConnection() throws SQLException {
return this.conn;
}
public boolean supportsSavepoints() throws SQLException {
return false;
}
public boolean supportsNamedParameters() throws SQLException {
return false;
}
public boolean supportsMultipleOpenResults() throws SQLException {
return false;
}
public boolean supportsGetGeneratedKeys() throws SQLException {
return false;
}
public ResultSet getSuperTypes(String catalog, String schemaPattern, String typeNamePattern) throws SQLException {
return getEmptyResultSet();
if (conn == null || conn.isClosed())
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
return super.getPrimaryKeys(catalog, schema, table, conn);
}
public ResultSet getSuperTables(String catalog, String schemaPattern, String tableNamePattern) throws SQLException {
return getEmptyResultSet();
if (conn == null || conn.isClosed())
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
return super.getSuperTables(catalog, schemaPattern, tableNamePattern, conn);
}
public ResultSet getAttributes(String catalog, String schemaPattern, String typeNamePattern,
String attributeNamePattern) throws SQLException {
return getEmptyResultSet();
}
public boolean supportsResultSetHoldability(int holdability) throws SQLException {
if (holdability == ResultSet.HOLD_CURSORS_OVER_COMMIT)
return true;
return false;
}
public int getResultSetHoldability() throws SQLException {
return ResultSet.HOLD_CURSORS_OVER_COMMIT;
}
public int getDatabaseMajorVersion() throws SQLException {
return 2;
}
public int getDatabaseMinorVersion() throws SQLException {
return 0;
}
public int getJDBCMajorVersion() throws SQLException {
return 2;
}
public int getJDBCMinorVersion() throws SQLException {
return 0;
}
public int getSQLStateType() throws SQLException {
return 0;
}
public boolean locatorsUpdateCopy() throws SQLException {
return false;
}
public boolean supportsStatementPooling() throws SQLException {
return false;
}
public RowIdLifetime getRowIdLifetime() throws SQLException {
return null;
}
public ResultSet getSchemas(String catalog, String schemaPattern) throws SQLException {
return null;
}
public boolean supportsStoredFunctionsUsingCallSyntax() throws SQLException {
return false;
}
public boolean autoCommitFailureClosesAllResultSets() throws SQLException {
return false;
}
public ResultSet getClientInfoProperties() throws SQLException {
return getEmptyResultSet();
}
public ResultSet getFunctions(String catalog, String schemaPattern, String functionNamePattern)
throws SQLException {
return getEmptyResultSet();
}
public ResultSet getFunctionColumns(String catalog, String schemaPattern, String functionNamePattern,
String columnNamePattern) throws SQLException {
return getEmptyResultSet();
}
public ResultSet getPseudoColumns(String catalog, String schemaPattern, String tableNamePattern,
String columnNamePattern) throws SQLException {
return getEmptyResultSet();
}
public boolean generatedKeyAlwaysReturned() throws SQLException {
return false;
}
private ResultSet getEmptyResultSet() {
return new EmptyResultSet();
}
}

View File

@ -1,75 +0,0 @@
/***************************************************************************
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*****************************************************************************/
package com.taosdata.jdbc;
import java.sql.ParameterMetaData;
import java.sql.SQLException;
public class TSDBParameterMetaData implements ParameterMetaData {
@Override
public int getParameterCount() throws SQLException {
return 0;
}
@Override
public int isNullable(int param) throws SQLException {
return 0;
}
@Override
public boolean isSigned(int param) throws SQLException {
return false;
}
@Override
public int getPrecision(int param) throws SQLException {
return 0;
}
@Override
public int getScale(int param) throws SQLException {
return 0;
}
@Override
public int getParameterType(int param) throws SQLException {
return 0;
}
@Override
public String getParameterTypeName(int param) throws SQLException {
return null;
}
@Override
public String getParameterClassName(int param) throws SQLException {
return null;
}
@Override
public int getParameterMode(int param) throws SQLException {
return 0;
}
@Override
public <T> T unwrap(Class<T> iface) throws SQLException {
return null;
}
@Override
public boolean isWrapperFor(Class<?> iface) throws SQLException {
return false;
}
}

View File

@ -26,9 +26,9 @@ public class TSDBResultSetRowData {
public TSDBResultSetRowData(int colSize) {
this.setColSize(colSize);
}
public TSDBResultSetRowData() {
this.data = new ArrayList<Object>();
this.data = new ArrayList<>();
this.setColSize(0);
}
@ -39,7 +39,7 @@ public class TSDBResultSetRowData {
if (this.colSize == 0) {
return;
}
this.data = new ArrayList<Object>(colSize);
this.data = new ArrayList<>(colSize);
this.data.addAll(Collections.nCopies(this.colSize, null));
}
@ -53,7 +53,7 @@ public class TSDBResultSetRowData {
public boolean getBoolean(int col, int srcType) throws SQLException {
Object obj = data.get(col);
switch(srcType) {
case TSDBConstants.TSDB_DATA_TYPE_BOOL: return (Boolean) obj;
case TSDBConstants.TSDB_DATA_TYPE_FLOAT: return ((Float) obj) == 1.0? Boolean.TRUE:Boolean.FALSE;
@ -64,10 +64,10 @@ public class TSDBResultSetRowData {
case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP:
case TSDBConstants.TSDB_DATA_TYPE_BIGINT: return ((Long) obj) == 1L? Boolean.TRUE:Boolean.FALSE;
}
return Boolean.TRUE;
}
public void setByte(int col, byte value) {
data.set(col, value);
}
@ -82,20 +82,20 @@ public class TSDBResultSetRowData {
public int getInt(int col, int srcType) throws SQLException {
Object obj = data.get(col);
switch(srcType) {
case TSDBConstants.TSDB_DATA_TYPE_BOOL: return Boolean.TRUE.equals(obj)? 1:0;
case TSDBConstants.TSDB_DATA_TYPE_FLOAT: return ((Float) obj).intValue();
case TSDBConstants.TSDB_DATA_TYPE_DOUBLE: return ((Double)obj).intValue();
case TSDBConstants.TSDB_DATA_TYPE_TINYINT: return (Byte) obj;
case TSDBConstants.TSDB_DATA_TYPE_SMALLINT:return (Short) obj;
case TSDBConstants.TSDB_DATA_TYPE_INT: return (Integer) obj;
case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP:
case TSDBConstants.TSDB_DATA_TYPE_BIGINT: return ((Long) obj).intValue();
case TSDBConstants.TSDB_DATA_TYPE_NCHAR:
case TSDBConstants.TSDB_DATA_TYPE_BINARY: return Integer.parseInt((String) obj);
case TSDBConstants.TSDB_DATA_TYPE_BOOL: return Boolean.TRUE.equals(obj)? 1:0;
case TSDBConstants.TSDB_DATA_TYPE_FLOAT: return ((Float) obj).intValue();
case TSDBConstants.TSDB_DATA_TYPE_DOUBLE: return ((Double)obj).intValue();
case TSDBConstants.TSDB_DATA_TYPE_TINYINT: return (Byte) obj;
case TSDBConstants.TSDB_DATA_TYPE_SMALLINT:return (Short) obj;
case TSDBConstants.TSDB_DATA_TYPE_INT: return (Integer) obj;
case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP:
case TSDBConstants.TSDB_DATA_TYPE_BIGINT: return ((Long) obj).intValue();
case TSDBConstants.TSDB_DATA_TYPE_NCHAR:
case TSDBConstants.TSDB_DATA_TYPE_BINARY: return Integer.parseInt((String) obj);
}
return 0;
}
@ -105,7 +105,7 @@ public class TSDBResultSetRowData {
public long getLong(int col, int srcType) throws SQLException {
Object obj = data.get(col);
switch(srcType) {
case TSDBConstants.TSDB_DATA_TYPE_BOOL: return Boolean.TRUE.equals(obj)? 1:0;
case TSDBConstants.TSDB_DATA_TYPE_FLOAT: return ((Float) obj).longValue();
@ -116,9 +116,9 @@ public class TSDBResultSetRowData {
case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP:
case TSDBConstants.TSDB_DATA_TYPE_BIGINT: return (Long) obj;
case TSDBConstants.TSDB_DATA_TYPE_NCHAR:
case TSDBConstants.TSDB_DATA_TYPE_BINARY: return Long.parseLong((String) obj);
case TSDBConstants.TSDB_DATA_TYPE_BINARY: return Long.parseLong((String) obj);
}
return 0;
}
@ -128,7 +128,7 @@ public class TSDBResultSetRowData {
public float getFloat(int col, int srcType) throws SQLException {
Object obj = data.get(col);
switch(srcType) {
case TSDBConstants.TSDB_DATA_TYPE_BOOL: return Boolean.TRUE.equals(obj)? 1:0;
case TSDBConstants.TSDB_DATA_TYPE_FLOAT: return (Float) obj;
@ -139,7 +139,7 @@ public class TSDBResultSetRowData {
case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP:
case TSDBConstants.TSDB_DATA_TYPE_BIGINT: return (Long) obj;
}
return 0;
}
@ -149,7 +149,7 @@ public class TSDBResultSetRowData {
public double getDouble(int col, int srcType) throws SQLException {
Object obj = data.get(col);
switch(srcType) {
case TSDBConstants.TSDB_DATA_TYPE_BOOL: return Boolean.TRUE.equals(obj)? 1:0;
case TSDBConstants.TSDB_DATA_TYPE_FLOAT: return (Float) obj;
@ -160,7 +160,7 @@ public class TSDBResultSetRowData {
case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP:
case TSDBConstants.TSDB_DATA_TYPE_BIGINT: return (Long) obj;
}
return 0;
}
@ -180,7 +180,7 @@ public class TSDBResultSetRowData {
* The original type may not be a string type, but will be converted to by calling this method
* @param col column index
* @return
* @throws SQLException
* @throws SQLException
*/
public String getString(int col, int srcType) throws SQLException {
if (srcType == TSDBConstants.TSDB_DATA_TYPE_BINARY || srcType == TSDBConstants.TSDB_DATA_TYPE_NCHAR) {

View File

@ -1,19 +0,0 @@
/***************************************************************************
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*****************************************************************************/
package com.taosdata.jdbc;
public interface TSDBSubscribeCallBack {
void invoke(TSDBResultSet resultSet);
}

View File

@ -29,177 +29,53 @@ public class RestfulDatabaseMetaData extends AbstractDatabaseMetaData {
}
@Override
public String getSchemaTerm() throws SQLException {
return null;
public String getDriverName() throws SQLException {
return RestfulDriver.class.getName();
}
@Override
public String getProcedureTerm() throws SQLException {
return null;
}
@Override
public String getCatalogTerm() throws SQLException {
return null;
}
@Override
public boolean isCatalogAtStart() throws SQLException {
return false;
}
@Override
public ResultSet getTables(String catalog, String schemaPattern, String tableNamePattern, String[] types) throws SQLException {
Statement stmt = null;
if (null != connection && !connection.isClosed()) {
stmt = connection.createStatement();
if (catalog == null || catalog.length() < 1) {
catalog = connection.getCatalog();
}
stmt.executeUpdate("use " + catalog);
ResultSet resultSet0 = stmt.executeQuery("show tables");
GetTablesResultSet getTablesResultSet = new GetTablesResultSet(resultSet0, catalog, schemaPattern, tableNamePattern, types);
return getTablesResultSet;
} else {
if (connection == null || connection.isClosed()) {
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
}
return super.getTables(catalog, schemaPattern, tableNamePattern, types, connection);
}
@Override
public ResultSet getCatalogs() throws SQLException {
if (connection != null && !connection.isClosed()) {
Statement stmt = connection.createStatement();
ResultSet resultSet0 = stmt.executeQuery("show databases");
CatalogResultSet resultSet = new CatalogResultSet(resultSet0);
return resultSet;
} else {
return new EmptyResultSet();
if (connection == null || connection.isClosed())
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
return super.getCatalogs(connection);
}
@Override
public ResultSet getTableTypes() throws SQLException {
if (connection == null || connection.isClosed()) {
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
}
return super.getTableTypes();
}
@Override
public ResultSet getColumns(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern) throws SQLException {
Statement stmt = null;
if (null != connection && !connection.isClosed()) {
stmt = connection.createStatement();
if (catalog == null || catalog.length() < 1) {
catalog = connection.getCatalog();
}
stmt.execute("use " + catalog);
DatabaseMetaDataResultSet resultSet = new DatabaseMetaDataResultSet();
// set up ColumnMetaDataList
List<ColumnMetaData> columnMetaDataList = new ArrayList<>(24);
columnMetaDataList.add(null);
columnMetaDataList.add(null);
// add TABLE_NAME
ColumnMetaData colMetaData = new ColumnMetaData();
colMetaData.setColIndex(3);
colMetaData.setColName("TABLE_NAME");
colMetaData.setColSize(193);
colMetaData.setColType(TSDBConstants.TSDB_DATA_TYPE_BINARY);
columnMetaDataList.add(colMetaData);
// add COLUMN_NAME
colMetaData = new ColumnMetaData();
colMetaData.setColIndex(4);
colMetaData.setColName("COLUMN_NAME");
colMetaData.setColSize(65);
colMetaData.setColType(TSDBConstants.TSDB_DATA_TYPE_BINARY);
columnMetaDataList.add(colMetaData);
// add DATA_TYPE
colMetaData = new ColumnMetaData();
colMetaData.setColIndex(5);
colMetaData.setColName("DATA_TYPE");
colMetaData.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
columnMetaDataList.add(colMetaData);
// add TYPE_NAME
colMetaData = new ColumnMetaData();
colMetaData.setColIndex(6);
colMetaData.setColName("TYPE_NAME");
colMetaData.setColType(TSDBConstants.TSDB_DATA_TYPE_BINARY);
columnMetaDataList.add(colMetaData);
// add COLUMN_SIZE
colMetaData = new ColumnMetaData();
colMetaData.setColIndex(7);
colMetaData.setColName("COLUMN_SIZE");
colMetaData.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
columnMetaDataList.add(colMetaData);
// add BUFFER_LENGTH ,not used
columnMetaDataList.add(null);
// add DECIMAL_DIGITS
colMetaData = new ColumnMetaData();
colMetaData.setColIndex(9);
colMetaData.setColName("DECIMAL_DIGITS");
colMetaData.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
columnMetaDataList.add(colMetaData);
// add NUM_PREC_RADIX
colMetaData = new ColumnMetaData();
colMetaData.setColIndex(10);
colMetaData.setColName("NUM_PREC_RADIX");
colMetaData.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
columnMetaDataList.add(colMetaData);
// add NULLABLE
colMetaData = new ColumnMetaData();
colMetaData.setColIndex(11);
colMetaData.setColName("NULLABLE");
colMetaData.setColType(TSDBConstants.TSDB_DATA_TYPE_INT);
columnMetaDataList.add(colMetaData);
resultSet.setColumnMetaDataList(columnMetaDataList);
// set up rowDataList
ResultSet resultSet0 = stmt.executeQuery("describe " + tableNamePattern);
List<TSDBResultSetRowData> rowDataList = new ArrayList<>();
int index = 0;
while (resultSet0.next()) {
TSDBResultSetRowData rowData = new TSDBResultSetRowData(24);
// set TABLE_NAME
rowData.setString(2, tableNamePattern);
// set COLUMN_NAME
rowData.setString(3, resultSet0.getString(1));
// set DATA_TYPE
String typeName = resultSet0.getString(2);
rowData.setInt(4, getDataType(typeName));
// set TYPE_NAME
rowData.setString(5, typeName);
// set COLUMN_SIZE
int length = resultSet0.getInt(3);
rowData.setInt(6, getColumnSize(typeName, length));
// set DECIMAL_DIGITS
rowData.setInt(8, getDecimalDigits(typeName));
// set NUM_PREC_RADIX
rowData.setInt(9, 10);
// set NULLABLE
rowData.setInt(10, getNullable(index, typeName));
rowDataList.add(rowData);
index++;
}
resultSet.setRowDataList(rowDataList);
return resultSet;
} else {
if (connection == null || connection.isClosed())
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
}
return super.getColumns(catalog, schemaPattern, tableNamePattern, columnNamePattern, connection);
}
@Override
public long getMaxLogicalLobSize() throws SQLException {
return 0;
public ResultSet getPrimaryKeys(String catalog, String schema, String table) throws SQLException {
if (connection == null || connection.isClosed())
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
return super.getPrimaryKeys(catalog, schema, table, connection);
}
@Override
public boolean supportsRefCursors() throws SQLException {
return false;
public ResultSet getSuperTables(String catalog, String schemaPattern, String tableNamePattern) throws SQLException {
if (connection == null || connection.isClosed())
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
return super.getSuperTables(catalog, schemaPattern, tableNamePattern, connection);
}
@Override
public <T> T unwrap(Class<T> iface) throws SQLException {
return null;
}
@Override
public boolean isWrapperFor(Class<?> iface) throws SQLException {
return false;
}
}

View File

@ -44,6 +44,8 @@ public class RestfulDriver extends AbstractTaosDriver {
String result = HttpClientPoolUtil.execute(loginUrl);
JSONObject jsonResult = JSON.parseObject(result);
String status = jsonResult.getString("status");
String token = jsonResult.getString("desc");
HttpClientPoolUtil.token = token;
if (!status.equals("succ")) {
throw new SQLException(jsonResult.getString("desc"));
}

View File

@ -23,6 +23,7 @@ import java.nio.charset.Charset;
public class HttpClientPoolUtil {
public static PoolingHttpClientConnectionManager cm = null;
public static CloseableHttpClient httpClient = null;
public static String token = "cm9vdDp0YW9zZGF0YQ==";
/**
* 默认content 类型
*/
@ -61,9 +62,7 @@ public class HttpClientPoolUtil {
try {
return Long.parseLong(value) * 1000;
} catch (Exception e) {
new Exception(
"format KeepAlive timeout exception, exception:" + e.toString())
.printStackTrace();
new Exception("format KeepAlive timeout exception, exception:" + e.toString()).printStackTrace();
}
}
}
@ -96,7 +95,7 @@ public class HttpClientPoolUtil {
initPools();
}
method = (HttpEntityEnclosingRequestBase) getRequest(uri, HttpPost.METHOD_NAME, DEFAULT_CONTENT_TYPE, 0);
method.setHeader("Authorization", "Basic cm9vdDp0YW9zZGF0YQ==");
method.setHeader("Authorization", "Taosd " + token);
method.setHeader("Content-Type", "text/plain");
method.setEntity(new StringEntity(data, Charset.forName("UTF-8")));
HttpContext context = HttpClientContext.create();

View File

@ -1,36 +0,0 @@
import com.taosdata.jdbc.TSDBDriver;
import com.taosdata.jdbc.TSDBPreparedStatement;
import java.sql.*;
import java.util.Properties;
public class TestPreparedStatement {
public static void main(String[] args) throws SQLException {
Connection connection = null;
try {
Class.forName("com.taosdata.jdbc.TSDBDriver");
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, "localhost");
connection = DriverManager.getConnection("jdbc:TAOS://localhost:0/", properties);
String rawSql = "select * from test.log0601";
// String[] params = new String[]{"ts", "c1"};
PreparedStatement pstmt = (TSDBPreparedStatement) connection.prepareStatement(rawSql);
ResultSet resSet = pstmt.executeQuery();
while(resSet.next()) {
for (int i = 1; i <= resSet.getMetaData().getColumnCount(); i++) {
System.out.printf("%d: %s \n", i, resSet.getString(i));
}
}
resSet.close();
pstmt.close();
connection.close();
} catch (Exception e) {
e.printStackTrace();
if (null != connection) {
connection.close();
}
}
}
}

View File

@ -1,33 +0,0 @@
import com.taosdata.jdbc.TSDBDriver;
import java.sql.*;
import java.util.Properties;
public class TestTSDBDatabaseMetaData {
public static void main(String[] args) throws SQLException {
Connection connection = null;
DatabaseMetaData dbMetaData = null;
ResultSet resSet = null;
try {
Class.forName("com.taosdata.jdbc.TSDBDriver");
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, "localhost");
connection = DriverManager.getConnection("jdbc:TAOS://localhost:0/", properties);
dbMetaData = connection.getMetaData();
resSet = dbMetaData.getCatalogs();
while(resSet.next()) {
for (int i = 1; i <= resSet.getMetaData().getColumnCount(); i++) {
System.out.printf("dbMetaData.getCatalogs(%d) = %s\n", i, resSet.getString(i));
}
}
resSet.close();
} catch (Exception e) {
e.printStackTrace();
if (null != connection) {
connection.close();
}
}
}
}

View File

@ -13,17 +13,17 @@ import java.util.concurrent.TimeUnit;
import static org.junit.Assert.assertEquals;
public class BatchInsertTest extends BaseTest {
public class BatchInsertTest {
static Connection connection = null;
static Statement statement = null;
static String dbName = "test";
static String stbName = "meters";
static String host = "localhost";
static int numOfTables = 30;
final static int numOfRecordsPerTable = 1000;
static long ts = 1496732686000l;
final static String tablePrefix = "t";
private Connection connection;
private static String dbName = "test";
private static String stbName = "meters";
private static String host = "127.0.0.1";
private static int numOfTables = 30;
private static int numOfRecordsPerTable = 1000;
private static long ts = 1496732686000l;
private static String tablePrefix = "t";
@Before
public void createDatabase() throws SQLException {
@ -32,65 +32,58 @@ public class BatchInsertTest extends BaseTest {
} catch (ClassNotFoundException e) {
return;
}
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, host);
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
statement = connection.createStatement();
statement.executeUpdate("drop database if exists " + dbName);
statement.executeUpdate("create database if not exists " + dbName);
statement.executeUpdate("use " + dbName);
String createTableSql = "create table " + stbName + "(ts timestamp, f1 int, f2 int, f3 int) tags(areaid int, loc binary(20))";
statement.executeUpdate(createTableSql);
Statement stmt = connection.createStatement();
stmt.execute("drop database if exists " + dbName);
stmt.execute("create database if not exists " + dbName);
stmt.execute("use " + dbName);
for(int i = 0; i < numOfTables; i++) {
String createTableSql = "create table " + stbName + "(ts timestamp, f1 int, f2 int, f3 int) tags(areaid int, loc binary(20))";
stmt.execute(createTableSql);
for (int i = 0; i < numOfTables; i++) {
String loc = i % 2 == 0 ? "beijing" : "shanghai";
String createSubTalbesSql = "create table " + tablePrefix + i + " using " + stbName + " tags(" + i + ", '" + loc + "')";
statement.executeUpdate(createSubTalbesSql);
String createSubTalbesSql = "create table " + tablePrefix + i + " using " + stbName + " tags(" + i + ", '" + loc + "')";
stmt.execute(createSubTalbesSql);
}
stmt.close();
}
@Test
public void testBatchInsert() throws SQLException{
ExecutorService executorService = Executors.newFixedThreadPool(numOfTables);
for (int i = 0; i < numOfTables; i++) {
final int index = i;
executorService.execute(new Runnable() {
@Override
public void run() {
try {
long startTime = System.currentTimeMillis();
Statement statement = connection.createStatement(); // get statement
StringBuilder sb = new StringBuilder();
sb.append("INSERT INTO " + tablePrefix + index + " VALUES");
Random rand = new Random();
for (int j = 1; j <= numOfRecordsPerTable; j++) {
sb.append("(" + (ts + j) + ", ");
sb.append(rand.nextInt(100) + ", ");
sb.append(rand.nextInt(100) + ", ");
sb.append(rand.nextInt(100) + ")");
}
statement.addBatch(sb.toString());
statement.executeBatch();
long endTime = System.currentTimeMillis();
System.out.println("Thread " + index + " takes " + (endTime - startTime) + " microseconds");
connection.commit();
statement.close();
} catch (Exception e) {
e.printStackTrace();
public void testBatchInsert() throws SQLException {
ExecutorService executorService = Executors.newFixedThreadPool(numOfTables);
for (int i = 0; i < numOfTables; i++) {
final int index = i;
executorService.execute(() -> {
try {
long startTime = System.currentTimeMillis();
Statement statement = connection.createStatement(); // get statement
StringBuilder sb = new StringBuilder();
sb.append("INSERT INTO " + tablePrefix + index + " VALUES");
Random rand = new Random();
for (int j = 1; j <= numOfRecordsPerTable; j++) {
sb.append("(" + (ts + j) + ", ");
sb.append(rand.nextInt(100) + ", ");
sb.append(rand.nextInt(100) + ", ");
sb.append(rand.nextInt(100) + ")");
}
statement.addBatch(sb.toString());
statement.executeBatch();
long endTime = System.currentTimeMillis();
System.out.println("Thread " + index + " takes " + (endTime - startTime) + " microseconds");
connection.commit();
statement.close();
} catch (Exception e) {
e.printStackTrace();
}
});
}
executorService.shutdown();
try {
@ -100,21 +93,23 @@ public class BatchInsertTest extends BaseTest {
}
Statement statement = connection.createStatement();
ResultSet rs = statement.executeQuery("select * from meters");
ResultSet rs = statement.executeQuery("select * from meters");
int num = 0;
while (rs.next()) {
num++;
}
assertEquals(num, numOfTables * numOfRecordsPerTable);
}
assertEquals(num, numOfTables * numOfRecordsPerTable);
rs.close();
}
@After
public void close() throws Exception {
statement.close();
connection.close();
Thread.sleep(10);
public void close() {
try {
if (connection != null)
connection.close();
} catch (SQLException e) {
e.printStackTrace();
}
}
}

View File

@ -1,5 +1,6 @@
package com.taosdata.jdbc;
import org.junit.Assert;
import org.junit.Test;
import java.sql.Connection;
@ -8,53 +9,31 @@ import java.sql.SQLException;
import java.sql.Statement;
import java.util.Properties;
import static org.junit.Assert.assertTrue;
public class ConnectionTest extends BaseTest {
static Connection connection = null;
static Statement statement = null;
static String dbName = "test";
static String stbName = "st";
static String host = "localhost";
public class ConnectionTest {
private Connection connection;
private Statement statement;
private static String host = "127.0.0.1";
@Test
public void testConnection() throws SQLException {
try {
Class.forName("com.taosdata.jdbc.TSDBDriver");
} catch (ClassNotFoundException e) {
return;
}
public void testConnection() {
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, host);
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
assertTrue(null != connection);
statement = connection.createStatement();
assertTrue(null != statement);
// try reconnect
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
try {
statement.execute("create database if not exists " + dbName);
Class.forName("com.taosdata.jdbc.TSDBDriver");
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
Assert.assertTrue(null != connection);
statement = connection.createStatement();
Assert.assertTrue(null != statement);
statement.close();
connection.close();
} catch (ClassNotFoundException e) {
return;
} catch (SQLException e) {
assert false : "create database error: " + e.getMessage();
}
try {
if (!connection.isClosed()) {
if (!statement.isClosed()) {
statement.executeUpdate("drop database " + dbName);
statement.close();
}
connection.close();
Thread.sleep(10);
}
} catch (Exception e) {
assert false : "close connection error: " + e.getMessage();
e.printStackTrace();
}
}
}

View File

@ -1,92 +1,112 @@
package com.taosdata.jdbc;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.junit.*;
import org.junit.runners.MethodSorters;
import java.sql.*;
import java.util.Properties;
import static org.junit.Assert.assertEquals;
public class ImportTest extends BaseTest {
Connection connection = null;
Statement statement = null;
String dbName = "test";
String tName = "t0";
String host = "localhost";
@FixMethodOrder(MethodSorters.NAME_ASCENDING)
public class ImportTest {
private static Connection connection;
static String dbName = "test";
static String tName = "t0";
static String host = "127.0.0.1";
private static long ts;
@Before
public void createDatabase() throws SQLException {
@BeforeClass
public static void before() {
try {
Class.forName("com.taosdata.jdbc.TSDBDriver");
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
Statement stmt = connection.createStatement();
stmt.execute("create database if not exists " + dbName);
stmt.execute("create table if not exists " + dbName + "." + tName + " (ts timestamp, k int, v int)");
stmt.close();
ts = System.currentTimeMillis();
} catch (ClassNotFoundException e) {
return;
e.printStackTrace();
} catch (SQLException e) {
e.printStackTrace();
}
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, host);
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
statement = connection.createStatement();
statement.executeUpdate("drop database if exists " + dbName);
statement.executeUpdate("create database if not exists " + dbName);
statement.executeUpdate("create table if not exists " + dbName + "." + tName + " (ts timestamp, k int, v int)");
}
@Test
public void insertData() throws Exception {
long ts = 1496732686000l;
for (int i = 0; i < 50; i++) {
ts++;
int row = statement.executeUpdate("insert into " + dbName + "." + tName + " values (" + ts + ", " + (100 + i) + ", " + i + ")");
System.out.println("insert into " + dbName + "." + tName + " values (" + ts + ", " + (100 + i) + ", " + i + ")\t" + row);
assertEquals(1, row);
}
}
@Test
public void selectData() throws Exception {
insertData();
String sql = "select * from test.t0";
ResultSet resSet = statement.executeQuery(sql);
while (resSet.next()) {
for (int i = 1; i <= resSet.getMetaData().getColumnCount(); i++) {
System.out.printf(i + ": " + resSet.getString(i) + "\t");
public void case001_insertData() throws Exception {
try (Statement stmt = connection.createStatement()) {
for (int i = 0; i < 50; i++) {
ts++;
int row = stmt.executeUpdate("import into " + dbName + "." + tName + " values (" + ts + ", " + (100 + i) + ", " + i + ")");
System.out.println("import into " + dbName + "." + tName + " values (" + ts + ", " + (100 + i) + ", " + i + ")\t" + row);
assertEquals(1, row);
}
}
resSet.close();
}
@Test
public void importData() throws Exception {
// 避免时间重复
long ts = 1496732686000l;
StringBuilder sqlBuilder = new StringBuilder("insert into ").append(dbName).append(".").append(tName).append(" values ");
for (int i = 0; i < 50; i++) {
int a = i / 5;
long t = ts + a;
sqlBuilder.append("(").append(t).append(",").append((100 + i)).append(",").append(i).append(") ");
}
System.out.println(sqlBuilder.toString());
int rows = statement.executeUpdate(sqlBuilder.toString());
System.out.println(rows);
assertEquals(10, rows);
public void case002_checkSum() {
Assert.assertEquals(50, select());
}
@After
public void close() throws Exception {
statement.executeUpdate("drop database " + dbName);
statement.close();
connection.close();
Thread.sleep(10);
private int select() {
int count = 0;
try (Statement stmt = connection.createStatement()) {
String sql = "select * from " + dbName + "." + tName;
ResultSet rs = stmt.executeQuery(sql);
while (rs.next()) {
count++;
}
rs.close();
} catch (SQLException e) {
e.printStackTrace();
}
return count;
}
@Test
public void case003_importData() {
// 避免时间重复
try (Statement stmt = connection.createStatement()) {
StringBuilder sqlBuilder = new StringBuilder("import into ").append(dbName).append(".").append(tName).append(" values ");
for (int i = 0; i < 50; i++) {
int a = i / 5;
long t = (++ts) + a;
sqlBuilder.append("(").append(t).append(",").append((100 + i)).append(",").append(i).append(") ");
}
System.out.println(sqlBuilder.toString());
int rows = stmt.executeUpdate(sqlBuilder.toString());
assertEquals(50, rows);
} catch (SQLException e) {
e.printStackTrace();
}
}
@Test
public void case004_checkSum() {
Assert.assertEquals(100, select());
}
@AfterClass
public static void close() {
try {
if (connection != null) {
Statement statement = connection.createStatement();
statement.executeUpdate("drop database " + dbName);
statement.close();
connection.close();
}
} catch (SQLException e) {
e.printStackTrace();
}
}
}

View File

@ -4,7 +4,6 @@ import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.FixMethodOrder;
import org.junit.Test;
import org.junit.runners.MethodSorters;
import java.sql.*;
import java.util.Properties;
@ -12,7 +11,7 @@ import java.util.Properties;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
@FixMethodOrder(MethodSorters.DEFAULT)
@FixMethodOrder()
public class PreparedStatementTest extends BaseTest {
static Connection connection = null;
static PreparedStatement statement = null;

View File

@ -9,61 +9,73 @@ import java.util.Properties;
import static org.junit.Assert.assertEquals;
public class SelectTest extends BaseTest {
Connection connection = null;
Statement statement = null;
public class SelectTest {
Connection connection;
String dbName = "test";
String tName = "t0";
String host = "localhost";
String host = "127.0.0.1";
@Before
public void createDatabaseAndTable() throws SQLException {
public void createDatabaseAndTable() {
try {
Class.forName("com.taosdata.jdbc.TSDBDriver");
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
Statement stmt = connection.createStatement();
stmt.execute("drop database if exists " + dbName);
stmt.execute("create database if not exists " + dbName);
stmt.execute("create table if not exists " + dbName + "." + tName + " (ts timestamp, k int, v int)");
stmt.close();
} catch (ClassNotFoundException e) {
return;
} catch (SQLException e) {
e.printStackTrace();
}
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, host);
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
statement = connection.createStatement();
statement.executeUpdate("drop database if exists " + dbName);
statement.executeUpdate("create database if not exists " + dbName);
statement.executeUpdate("create table if not exists " + dbName + "." + tName + " (ts timestamp, k int, v int)");
}
@Test
public void selectData() throws SQLException {
public void selectData() {
long ts = 1496732686000l;
for (int i = 0; i < 50; i++) {
ts++;
int row = statement.executeUpdate("insert into " + dbName + "." + tName + " values (" + ts + ", " + (100 + i) + ", " + i + ")");
System.out.println("insert into " + dbName + "." + tName + " values (" + ts + ", " + (100 + i) + ", " + i + ")\t" + row);
assertEquals(1, row);
try (Statement stmt = connection.createStatement()) {
for (int i = 0; i < 50; i++) {
ts++;
int row = stmt.executeUpdate("insert into " + dbName + "." + tName + " values (" + ts + ", " + (100 + i) + ", " + i + ")");
System.out.println("insert into " + dbName + "." + tName + " values (" + ts + ", " + (100 + i) + ", " + i + ")\t" + row);
assertEquals(1, row);
}
String sql = "select * from " + dbName + "." + tName;
ResultSet resSet = stmt.executeQuery(sql);
int num = 0;
while (resSet.next()) {
num++;
}
resSet.close();
assertEquals(num, 50);
} catch (SQLException e) {
e.printStackTrace();
}
String sql = "select * from " + dbName + "." + tName;
ResultSet resSet = statement.executeQuery(sql);
int num = 0;
while (resSet.next()) {
num++;
}
resSet.close();
assertEquals(num, 50);
}
@After
public void close() throws Exception {
statement.executeUpdate("drop database " + dbName);
statement.close();
connection.close();
Thread.sleep(10);
public void close() {
try {
if (connection != null) {
Statement stmt = connection.createStatement();
stmt.executeUpdate("drop database " + dbName);
stmt.close();
connection.close();
}
} catch (SQLException e) {
e.printStackTrace();
}
}
}

View File

@ -12,69 +12,66 @@ import java.util.Properties;
import static org.junit.Assert.assertEquals;
@FixMethodOrder(MethodSorters.NAME_ASCENDING)
public class StableTest extends BaseTest {
static Connection connection = null;
static Statement statement = null;
static String dbName = "test";
static String stbName = "st";
static String host = "localhost";
public class StableTest {
private static Connection connection;
private static String dbName = "test";
private static String stbName = "st";
private static String host = "127.0.0.1";
@BeforeClass
public static void createDatabase() throws SQLException {
public static void createDatabase() {
try {
Class.forName("com.taosdata.jdbc.TSDBDriver");
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
Statement statement = connection.createStatement();
statement.execute("create database if not exists " + dbName);
statement.execute("use " + dbName);
statement.close();
} catch (ClassNotFoundException e) {
return;
} catch (SQLException e) {
e.printStackTrace();
}
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, host);
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
statement = connection.createStatement();
statement.executeUpdate("create database if not exists " + dbName);
statement.executeQuery("use " + dbName);
}
// @Test
public void createStable() {
String sql = "create table " + stbName + " (ts timestamp, v1 int, v2 int) tags (tg nchar(20)) ";
try {
statement.executeUpdate(sql);
@Test
public void case001_createSuperTable() {
try (Statement stmt = connection.createStatement()) {
final String sql = "create table " + stbName + " (ts timestamp, v1 int, v2 int) tags (tg nchar(20)) ";
stmt.execute(sql);
} catch (SQLException e) {
assert false : "error create stable" + e.getMessage();
}
}
// @Test
public void createTable() {
String sql = "create table t1 using " + stbName + " tags (\"beijing\")";
try {
statement.executeUpdate(sql);
@Test
public void case002_createTable() {
try (Statement stmt = connection.createStatement()) {
final String sql = "create table t1 using " + stbName + " tags (\"beijing\")";
stmt.execute(sql);
} catch (SQLException e) {
assert false : "error create table" + e.getMessage();
}
}
@Test
public void describeSTable() {
createStable();
String sql = "describe " + stbName;
public void case003_describeSTable() {
int num = 0;
System.out.println("describe stable");
try {
ResultSet res = statement.executeQuery(sql);
while (res.next()) {
for (int i = 1; i <= res.getMetaData().getColumnCount(); i++) {
System.out.printf("%d: %s\n", i, res.getString(i));
try (Statement stmt = connection.createStatement()) {
String sql = "describe " + stbName;
ResultSet rs = stmt.executeQuery(sql);
while (rs.next()) {
for (int i = 1; i <= rs.getMetaData().getColumnCount(); i++) {
System.out.println(i + ":" + rs.getString(i));
}
num++;
}
res.close();
rs.close();
assertEquals(4, num);
} catch (SQLException e) {
assert false : "error describe stable" + e.getMessage();
@ -82,41 +79,31 @@ public class StableTest extends BaseTest {
}
@Test
public void describeTable() {
createTable();
String sql = "describe t1";
public void case004_describeTable() {
int num = 0;
System.out.println("describe table");
try {
ResultSet res = statement.executeQuery(sql);
while (res.next()) {
for (int i = 1; i <= res.getMetaData().getColumnCount(); i++) {
System.out.printf("%d: %s\n", i, res.getString(i));
try (Statement stmt = connection.createStatement()) {
ResultSet rs = stmt.executeQuery("describe t1");
while (rs.next()) {
for (int i = 1; i <= rs.getMetaData().getColumnCount(); i++) {
System.out.printf("%d: %s\n", i, rs.getString(i));
}
num++;
}
res.close();
rs.close();
assertEquals(4, num);
} catch (SQLException e) {
assert false : "error describe stable" + e.getMessage();
}
}
// @Test
public void validCreateSql() {
String sql = "create table t2 using " + stbName + " tags (\"beijing\")";
boolean valid = ((TSDBConnection) connection).getConnection().validateCreateTableSql(sql);
assertEquals(true, valid);
}
@AfterClass
public static void close() throws Exception {
if (!statement.isClosed()) {
statement.executeUpdate("drop database " + dbName);
statement.close();
connection.close();
Thread.sleep(10);
public static void close() {
try {
if (connection != null)
connection.close();
} catch (SQLException e) {
e.printStackTrace();
}
}
}

View File

@ -10,7 +10,7 @@ import java.util.Properties;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
public class StatementTest extends BaseTest {
public class StatementTest {
static Connection connection = null;
static Statement statement = null;
static String dbName = "test";
@ -26,14 +26,29 @@ public class StatementTest extends BaseTest {
return;
}
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, host);
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/?user=root&password=taosdata", properties);
statement = connection.createStatement();
statement.executeUpdate("drop database if exists " + dbName);
}
@Test
public void testCase() {
try {
ResultSet rs = statement.executeQuery("show databases");
ResultSetMetaData metaData = rs.getMetaData();
while (rs.next()) {
for (int i = 1; i <= metaData.getColumnCount(); i++) {
System.out.print(metaData.getColumnLabel(i) + ":" + rs.getString(i) + "\t");
}
System.out.println();
}
} catch (SQLException e) {
e.printStackTrace();
}
}
@ -54,9 +69,6 @@ public class StatementTest extends BaseTest {
@Test
public void testUnsupport() {
// if(null == resSet) {
// return;
// }
TSDBStatement tsdbStatement = (TSDBStatement) statement;
try {
tsdbStatement.unwrap(null);
@ -163,11 +175,10 @@ public class StatementTest extends BaseTest {
@AfterClass
public static void close() throws Exception {
if (!statement.isClosed()) {
statement.executeUpdate("drop database " + dbName);
statement.executeUpdate("drop database if exists " + dbName);
statement.close();
connection.close();
Thread.sleep(10);
}
}
}

View File

@ -1,28 +1,41 @@
package com.taosdata.jdbc;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.junit.*;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.*;
import java.util.Properties;
public class TSDBDatabaseMetaDataTest {
private TSDBDatabaseMetaData metaData;
private static final String host = "localhost";
private static final String host = "127.0.0.1";
private Connection connection;
@Before
public void before() throws ClassNotFoundException, SQLException {
Class.forName("com.taosdata.jdbc.TSDBDriver");
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, host);
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
metaData = (TSDBDatabaseMetaData) DriverManager.getConnection("jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata", properties).getMetaData();
@BeforeClass
public void before() {
try {
Class.forName("com.taosdata.jdbc.TSDBDriver");
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, host);
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata", properties);
metaData = connection.getMetaData().unwrap(TSDBDatabaseMetaData.class);
} catch (ClassNotFoundException e) {
e.printStackTrace();
} catch (SQLException e) {
e.printStackTrace();
}
}
@AfterClass
public void after() {
try {
if (connection != null)
connection.close();
} catch (SQLException e) {
e.printStackTrace();
}
}
@Test
@ -642,7 +655,17 @@ public class TSDBDatabaseMetaDataTest {
@Test
public void getTables() throws SQLException {
Assert.assertNull(metaData.getTables("", "", "*", null));
System.out.println("****************************************************");
ResultSet tables = metaData.getTables("log", "", null, null);
ResultSetMetaData metaData = tables.getMetaData();
while (tables.next()) {
System.out.print(metaData.getColumnLabel(1) + ":" + tables.getString(1) + "\t");
System.out.print(metaData.getColumnLabel(3) + ":" + tables.getString(3) + "\t");
System.out.print(metaData.getColumnLabel(4) + ":" + tables.getString(4) + "\t");
System.out.print(metaData.getColumnLabel(5) + ":" + tables.getString(5) + "\n");
}
System.out.println();
Assert.assertNotNull(tables);
}
@Test
@ -652,17 +675,47 @@ public class TSDBDatabaseMetaDataTest {
@Test
public void getCatalogs() throws SQLException {
Assert.assertNotNull(metaData.getCatalogs());
System.out.println("****************************************************");
ResultSet catalogs = metaData.getCatalogs();
ResultSetMetaData meta = catalogs.getMetaData();
while (catalogs.next()) {
for (int i = 1; i <= meta.getColumnCount(); i++) {
System.out.print(meta.getColumnLabel(i) + ": " + catalogs.getString(i));
}
System.out.println();
}
}
@Test
public void getTableTypes() throws SQLException {
System.out.println("****************************************************");
ResultSet tableTypes = metaData.getTableTypes();
while (tableTypes.next()) {
System.out.println(tableTypes.getString("TABLE_TYPE"));
}
Assert.assertNotNull(metaData.getTableTypes());
}
@Test
public void getColumns() throws SQLException {
Assert.assertNotNull(metaData.getColumns("", "", "", ""));
System.out.println("****************************************************");
ResultSet columns = metaData.getColumns("log", "", "dn", "");
ResultSetMetaData meta = columns.getMetaData();
while (columns.next()) {
System.out.print(meta.getColumnLabel(1) + ": " + columns.getString(1) + "\t");
System.out.print(meta.getColumnLabel(3) + ": " + columns.getString(3) + "\t");
System.out.print(meta.getColumnLabel(4) + ": " + columns.getString(4) + "\t");
System.out.print(meta.getColumnLabel(5) + ": " + columns.getString(5) + "\t");
System.out.print(meta.getColumnLabel(6) + ": " + columns.getString(6) + "\t");
System.out.print(meta.getColumnLabel(7) + ": " + columns.getString(7) + "\t");
System.out.print(meta.getColumnLabel(9) + ": " + columns.getString(9) + "\t");
System.out.print(meta.getColumnLabel(10) + ": " + columns.getString(10) + "\t");
System.out.print(meta.getColumnLabel(11) + ": " + columns.getString(11) + "\n");
System.out.print(meta.getColumnLabel(12) + ": " + columns.getString(12) + "\n");
}
}
@Test
@ -687,7 +740,17 @@ public class TSDBDatabaseMetaDataTest {
@Test
public void getPrimaryKeys() throws SQLException {
Assert.assertNotNull(metaData.getPrimaryKeys("", "", ""));
System.out.println("****************************************************");
ResultSet rs = metaData.getPrimaryKeys("log", "", "dn1");
while (rs.next()) {
System.out.println("TABLE_NAME: " + rs.getString("TABLE_NAME"));
System.out.println("COLUMN_NAME: " + rs.getString("COLUMN_NAME"));
System.out.println("KEY_SEQ: " + rs.getString("KEY_SEQ"));
System.out.println("PK_NAME: " + rs.getString("PK_NAME"));
}
Assert.assertNotNull(rs);
}
@Test
@ -812,7 +875,14 @@ public class TSDBDatabaseMetaDataTest {
@Test
public void getSuperTables() throws SQLException {
Assert.assertNotNull(metaData.getSuperTables("", "", ""));
System.out.println("****************************************************");
ResultSet rs = metaData.getSuperTables("log", "", "dn1");
while (rs.next()) {
System.out.println("TABLE_NAME: " + rs.getString("TABLE_NAME"));
System.out.println("SUPERTABLE_NAME: " + rs.getString("SUPERTABLE_NAME"));
}
Assert.assertNotNull(rs);
}
@Test
@ -871,11 +941,6 @@ public class TSDBDatabaseMetaDataTest {
Assert.assertNull(metaData.getRowIdLifetime());
}
@Test
public void testGetSchemas() throws SQLException {
Assert.assertNull(metaData.getSchemas());
}
@Test
public void supportsStoredFunctionsUsingCallSyntax() throws SQLException {
Assert.assertFalse(metaData.supportsStoredFunctionsUsingCallSyntax());

View File

@ -0,0 +1,65 @@
package com.taosdata.jdbc.rs;
import org.junit.Before;
import org.junit.Test;
import java.sql.*;
public class AuthenticationTest {
private static final String host = "127.0.0.1";
private static final String user = "root";
private static final String password = "123456";
private Connection conn;
@Test
public void test() {
// change password
try {
conn = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/restful_test?user=" + user + "&password=taosdata");
Statement stmt = conn.createStatement();
stmt.execute("alter user " + user + " pass '" + password + "'");
stmt.close();
conn.close();
} catch (SQLException e) {
e.printStackTrace();
}
// use new to login and execute query
try {
conn = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/restful_test?user=" + user + "&password=" + password);
Statement stmt = conn.createStatement();
stmt.execute("show databases");
ResultSet rs = stmt.getResultSet();
ResultSetMetaData meta = rs.getMetaData();
while (rs.next()) {
for (int i = 1; i <= meta.getColumnCount(); i++) {
System.out.print(meta.getColumnLabel(i) + ":" + rs.getString(i) + "\t");
}
System.out.println();
}
} catch (SQLException e) {
e.printStackTrace();
}
// change password back
try {
conn = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/restful_test?user=" + user + "&password=" + password);
Statement stmt = conn.createStatement();
stmt.execute("alter user " + user + " pass 'taosdata'");
stmt.close();
conn.close();
} catch (SQLException e) {
e.printStackTrace();
}
}
@Before
public void before() {
try {
Class.forName("com.taosdata.jdbc.rs.RestfulDriver");
} catch (ClassNotFoundException e) {
e.printStackTrace();
}
}
}

View File

@ -6,6 +6,7 @@ import org.junit.Test;
import java.sql.*;
public class RestfulDriverTest {
private static final String host = "127.0.0.1";
@Test
public void connect() {
@ -15,9 +16,9 @@ public class RestfulDriverTest {
@Test
public void acceptsURL() throws SQLException {
Driver driver = new RestfulDriver();
boolean isAccept = driver.acceptsURL("jdbc:TAOS-RS://master:6041");
boolean isAccept = driver.acceptsURL("jdbc:TAOS-RS://" + host + ":6041");
Assert.assertTrue(isAccept);
isAccept = driver.acceptsURL("jdbc:TAOS://master:6041");
isAccept = driver.acceptsURL("jdbc:TAOS://" + host + ":6041");
Assert.assertFalse(isAccept);
}
@ -26,6 +27,9 @@ public class RestfulDriverTest {
Driver driver = new RestfulDriver();
final String url = "";
DriverPropertyInfo[] propertyInfo = driver.getPropertyInfo(url, null);
for (DriverPropertyInfo prop : propertyInfo) {
System.out.println(prop);
}
}
@Test

View File

@ -1,6 +1,5 @@
package com.taosdata.jdbc.rs;
import org.junit.*;
import org.junit.runners.MethodSorters;
@ -10,12 +9,13 @@ import java.util.Random;
@FixMethodOrder(MethodSorters.NAME_ASCENDING)
public class RestfulJDBCTest {
private static final String host = "127.0.0.1";
private Connection connection;
@Before
public void before() throws ClassNotFoundException, SQLException {
Class.forName("com.taosdata.jdbc.rs.RestfulDriver");
connection = DriverManager.getConnection("jdbc:TAOS-RS://master:6041/restful_test?user=root&password=taosdata");
connection = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/restful_test?user=root&password=taosdata");
}
@After

View File

@ -10,7 +10,7 @@ import java.sql.*;
@FixMethodOrder(MethodSorters.NAME_ASCENDING)
public class SQLTest {
private static final String host = "master";
private static final String host = "127.0.0.1";
private static Connection connection;

View File

@ -21,4 +21,5 @@ public class SqlSyntaxValidatorTest {
Assert.assertTrue(SqlSyntaxValidator.isUseSql("drop database test"));
Assert.assertTrue(SqlSyntaxValidator.isUseSql("drop database if exist test"));
}
}

View File

@ -0,0 +1,67 @@
package com.taosdata.jdbc.utils;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Date;
public class TimeStampUtil {
private static final String datetimeFormat = "yyyy-MM-dd HH:mm:ss.SSS";
public static long datetimeToLong(String dateTime) {
SimpleDateFormat sdf = new SimpleDateFormat(datetimeFormat);
try {
return sdf.parse(dateTime).getTime();
} catch (ParseException e) {
throw new IllegalArgumentException("invalid datetime string >>> " + dateTime);
}
}
public static String longToDatetime(long time) {
SimpleDateFormat sdf = new SimpleDateFormat(datetimeFormat);
return sdf.format(new Date(time));
}
public static class TimeTuple {
public Long start;
public Long end;
public Long timeGap;
TimeTuple(long start, long end, long timeGap) {
this.start = start;
this.end = end;
this.timeGap = timeGap;
}
}
public static TimeTuple range(long start, long timeGap, long size) {
long now = System.currentTimeMillis();
if (timeGap < 1)
timeGap = 1;
if (start == 0)
start = now - size * timeGap;
// 如果size小于1异常
if (size < 1)
throw new IllegalArgumentException("size less than 1.");
// 如果timeGap为1已经超长需要前移start
if (start + size > now) {
start = now - size;
return new TimeTuple(start, now, 1);
}
long end = start + (long) (timeGap * size);
if (end > now) {
//压缩timeGap
end = now;
double gap = (end - start) / (size * 1.0f);
if (gap < 1.0f) {
timeGap = 1;
start = end - size;
} else {
timeGap = (long) gap;
end = start + (long) (timeGap * size);
}
}
return new TimeTuple(start, end, timeGap);
}
}

View File

@ -3,15 +3,15 @@
* @module CTaosInterface
*/
const ref = require('ref');
const ref = require('ref-napi');
const os = require('os');
const ffi = require('ffi');
const ArrayType = require('ref-array');
const Struct = require('ref-struct');
const ffi = require('ffi-napi');
const ArrayType = require('ref-array-napi');
const Struct = require('ref-struct-napi');
const FieldTypes = require('./constants');
const errors = require ('./error');
const TaosObjects = require('./taosobjects');
const { NULL_POINTER } = require('ref');
const { NULL_POINTER } = require('ref-napi');
module.exports = CTaosInterface;

View File

@ -1,4 +1,4 @@
const ref = require('ref');
const ref = require('ref-napi');
require('./globalfunc.js')
const CTaosInterface = require('./cinterface')
const errors = require ('./error')

File diff suppressed because it is too large Load Diff

View File

@ -1,31 +1,34 @@
{
"name": "td2.0-connector",
"version": "2.0.5",
"version": "2.0.6",
"description": "A Node.js connector for TDengine.",
"main": "tdengine.js",
"directories": {
"test": "test"
},
"scripts": {
"test": "node test/test.js"
},
"repository": {
"type": "git",
"url": "git+https://github.com/taosdata/tdengine.git"
},
"keywords": [
"TDengine",
"TAOS Data",
"Time Series Database",
"Connector"
],
"author": "StoneT2000",
"license": "AGPL-3.0",
"dependencies": {
"ffi": "^2.3.0",
"node-gyp": "^5.0.2",
"ref": "^1.3.5",
"ref-array": "^1.2.0"
},
"homepage": "https://github.com/taosdata/tdengine",
"author": "TaosData Inc.",
"license": "AGPL-3.0-or-later",
"bugs": {
"url": "https://github.com/taosdata/tdengine/issues"
},
"repository": {
"type": "git",
"url": "https://github.com/taosdata/tdengine.git"
"homepage": "https://github.com/taosdata/tdengine#readme",
"dependencies": {
"ffi-napi": "^3.1.0",
"ref-array-napi": "^1.2.1",
"ref-napi": "^1.5.2",
"ref-struct-napi": "^1.1.1"
}
}

View File

@ -298,7 +298,7 @@ int32_t dnodeInitTelemetry() {
int32_t code = pthread_create(&tsTelemetryThread, &attr, telemetryThread, NULL);
pthread_attr_destroy(&attr);
if (code != 0) {
dTrace("failed to create telemetry thread, reason:%s", strerror(errno));
dTrace("failed to create telemetry thread, reason:%s", strerror(code));
}
dInfo("dnode telemetry is initialized");

View File

@ -268,8 +268,7 @@ typedef struct {
typedef struct {
int32_t len; // one create table message
char tableFname[TSDB_TABLE_FNAME_LEN];
char db[TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN];
char tableName[TSDB_TABLE_FNAME_LEN];
int8_t igExists;
int8_t getMeta;
int16_t numOfTags;
@ -285,7 +284,7 @@ typedef struct {
} SCMCreateTableMsg;
typedef struct {
char tableFname[TSDB_TABLE_FNAME_LEN];
char name[TSDB_TABLE_FNAME_LEN];
int8_t igNotExists;
} SCMDropTableMsg;

View File

@ -227,9 +227,6 @@
#define TK_SPACE 300
#define TK_COMMENT 301
#define TK_ILLEGAL 302

View File

@ -4507,6 +4507,7 @@ void setParaFromArg(){
strncpy(g_Dbs.db[0].superTbls[0].sTblName, "meters", MAX_TB_NAME_SIZE);
g_Dbs.db[0].superTbls[0].childTblCount = g_args.num_of_tables;
g_Dbs.threadCount = g_args.num_of_threads;
g_Dbs.threadCountByCreateTbl = 1;
g_Dbs.queryMode = g_args.mode;
g_Dbs.db[0].superTbls[0].autoCreateTable = PRE_CREATE_SUBTBL;

View File

@ -32,7 +32,7 @@ int32_t mnodeInitDbs();
void mnodeCleanupDbs();
int64_t mnodeGetDbNum();
SDbObj *mnodeGetDb(char *db);
SDbObj *mnodeGetDbByTableId(char *db);
SDbObj *mnodeGetDbByTableName(char *db);
void * mnodeGetNextDb(void *pIter, SDbObj **pDb);
void mnodeCancelGetNextDb(void *pIter);
void mnodeIncDbRef(SDbObj *pDb);

View File

@ -199,18 +199,13 @@ void mnodeDecDbRef(SDbObj *pDb) {
return sdbDecRef(tsDbSdb, pDb);
}
SDbObj *mnodeGetDbByTableId(char *tableId) {
char db[TSDB_TABLE_FNAME_LEN], *pos;
// tableId format should be : acct.db.table
pos = strstr(tableId, TS_PATH_DELIMITER);
assert(NULL != pos);
SDbObj *mnodeGetDbByTableName(char *tableName) {
SName name = {0};
tNameFromString(&name, tableName, T_NAME_ACCT|T_NAME_DB|T_NAME_TABLE);
pos = strstr(pos + 1, TS_PATH_DELIMITER);
assert(NULL != pos);
memset(db, 0, sizeof(db));
strncpy(db, tableId, pos - tableId);
// validate the tableName?
char db[TSDB_TABLE_FNAME_LEN] = {0};
tNameGetFullDbName(&name, db);
return mnodeGetDb(db);
}

View File

@ -50,6 +50,9 @@ int32_t mnodeProcessRead(SMnodeMsg *pMsg) {
if (!sdbIsMaster()) {
SMnodeRsp *rpcRsp = &pMsg->rpcRsp;
SRpcEpSet *epSet = rpcMallocCont(sizeof(SRpcEpSet));
if (!epSet) {
return TSDB_CODE_MND_OUT_OF_MEMORY;
}
mnodeGetMnodeEpSetForShell(epSet, true);
rpcRsp->rsp = epSet;
rpcRsp->len = sizeof(SRpcEpSet);

View File

@ -297,7 +297,7 @@ static int32_t mnodeChildTableActionRestored() {
pIter = mnodeGetNextChildTable(pIter, &pTable);
if (pTable == NULL) break;
SDbObj *pDb = mnodeGetDbByTableId(pTable->info.tableId);
SDbObj *pDb = mnodeGetDbByTableName(pTable->info.tableId);
if (pDb == NULL || pDb->status != TSDB_DB_STATUS_READY) {
mError("ctable:%s, failed to get db or db in dropping, discard it", pTable->info.tableId);
SSdbRow desc = {.type = SDB_OPER_LOCAL, .pObj = pTable, .pTable = tsChildTableSdb};
@ -443,7 +443,7 @@ static int32_t mnodeSuperTableActionDestroy(SSdbRow *pRow) {
static int32_t mnodeSuperTableActionInsert(SSdbRow *pRow) {
SSTableObj *pStable = pRow->pObj;
SDbObj *pDb = mnodeGetDbByTableId(pStable->info.tableId);
SDbObj *pDb = mnodeGetDbByTableName(pStable->info.tableId);
if (pDb != NULL && pDb->status == TSDB_DB_STATUS_READY) {
mnodeAddSuperTableIntoDb(pDb);
}
@ -455,7 +455,7 @@ static int32_t mnodeSuperTableActionInsert(SSdbRow *pRow) {
static int32_t mnodeSuperTableActionDelete(SSdbRow *pRow) {
SSTableObj *pStable = pRow->pObj;
SDbObj *pDb = mnodeGetDbByTableId(pStable->info.tableId);
SDbObj *pDb = mnodeGetDbByTableName(pStable->info.tableId);
if (pDb != NULL) {
mnodeRemoveSuperTableFromDb(pDb);
mnodeDropAllChildTablesInStable((SSTableObj *)pStable);
@ -748,9 +748,12 @@ void mnodeDestroySubMsg(SMnodeMsg *pSubMsg) {
}
static int32_t mnodeValidateCreateTableMsg(SCreateTableMsg *pCreateTable, SMnodeMsg *pMsg) {
if (pMsg->pDb == NULL) pMsg->pDb = mnodeGetDb(pCreateTable->db);
if (pMsg->pDb == NULL) {
mError("msg:%p, app:%p table:%s, failed to create, db not selected", pMsg, pMsg->rpcMsg.ahandle, pCreateTable->tableFname);
pMsg->pDb = mnodeGetDbByTableName(pCreateTable->tableName);
}
if (pMsg->pDb == NULL) {
mError("msg:%p, app:%p table:%s, failed to create, db not selected", pMsg, pMsg->rpcMsg.ahandle, pCreateTable->tableName);
return TSDB_CODE_MND_DB_NOT_SELECTED;
}
@ -759,28 +762,28 @@ static int32_t mnodeValidateCreateTableMsg(SCreateTableMsg *pCreateTable, SMnode
return TSDB_CODE_MND_DB_IN_DROPPING;
}
if (pMsg->pTable == NULL) pMsg->pTable = mnodeGetTable(pCreateTable->tableFname);
if (pMsg->pTable == NULL) pMsg->pTable = mnodeGetTable(pCreateTable->tableName);
if (pMsg->pTable != NULL && pMsg->retry == 0) {
if (pCreateTable->getMeta) {
mDebug("msg:%p, app:%p table:%s, continue to get meta", pMsg, pMsg->rpcMsg.ahandle, pCreateTable->tableFname);
mDebug("msg:%p, app:%p table:%s, continue to get meta", pMsg, pMsg->rpcMsg.ahandle, pCreateTable->tableName);
return mnodeGetChildTableMeta(pMsg);
} else if (pCreateTable->igExists) {
mDebug("msg:%p, app:%p table:%s, is already exist", pMsg, pMsg->rpcMsg.ahandle, pCreateTable->tableFname);
mDebug("msg:%p, app:%p table:%s, is already exist", pMsg, pMsg->rpcMsg.ahandle, pCreateTable->tableName);
return TSDB_CODE_SUCCESS;
} else {
mError("msg:%p, app:%p table:%s, failed to create, table already exist", pMsg, pMsg->rpcMsg.ahandle,
pCreateTable->tableFname);
pCreateTable->tableName);
return TSDB_CODE_MND_TABLE_ALREADY_EXIST;
}
}
if (pCreateTable->numOfTags != 0) {
mDebug("msg:%p, app:%p table:%s, create stable msg is received from thandle:%p", pMsg, pMsg->rpcMsg.ahandle,
pCreateTable->tableFname, pMsg->rpcMsg.handle);
pCreateTable->tableName, pMsg->rpcMsg.handle);
return mnodeProcessCreateSuperTableMsg(pMsg);
} else {
mDebug("msg:%p, app:%p table:%s, create ctable msg is received from thandle:%p", pMsg, pMsg->rpcMsg.ahandle,
pCreateTable->tableFname, pMsg->rpcMsg.handle);
pCreateTable->tableName, pMsg->rpcMsg.handle);
return mnodeProcessCreateChildTableMsg(pMsg);
}
}
@ -860,9 +863,12 @@ static int32_t mnodeProcessCreateTableMsg(SMnodeMsg *pMsg) {
}
SCreateTableMsg *p = (SCreateTableMsg*)((char*) pCreate + sizeof(SCMCreateTableMsg));
if (pMsg->pDb == NULL) pMsg->pDb = mnodeGetDb(p->db);
if (pMsg->pDb == NULL) {
mError("msg:%p, app:%p table:%s, failed to create, db not selected", pMsg, pMsg->rpcMsg.ahandle, p->tableFname);
pMsg->pDb = mnodeGetDbByTableName(p->tableName);
}
if (pMsg->pDb == NULL) {
mError("msg:%p, app:%p table:%s, failed to create, db not selected", pMsg, pMsg->rpcMsg.ahandle, p->tableName);
return TSDB_CODE_MND_DB_NOT_SELECTED;
}
@ -871,37 +877,37 @@ static int32_t mnodeProcessCreateTableMsg(SMnodeMsg *pMsg) {
return TSDB_CODE_MND_DB_IN_DROPPING;
}
if (pMsg->pTable == NULL) pMsg->pTable = mnodeGetTable(p->tableFname);
if (pMsg->pTable == NULL) pMsg->pTable = mnodeGetTable(p->tableName);
if (pMsg->pTable != NULL && pMsg->retry == 0) {
if (p->getMeta) {
mDebug("msg:%p, app:%p table:%s, continue to get meta", pMsg, pMsg->rpcMsg.ahandle, p->tableFname);
mDebug("msg:%p, app:%p table:%s, continue to get meta", pMsg, pMsg->rpcMsg.ahandle, p->tableName);
return mnodeGetChildTableMeta(pMsg);
} else if (p->igExists) {
mDebug("msg:%p, app:%p table:%s, is already exist", pMsg, pMsg->rpcMsg.ahandle, p->tableFname);
mDebug("msg:%p, app:%p table:%s, is already exist", pMsg, pMsg->rpcMsg.ahandle, p->tableName);
return TSDB_CODE_SUCCESS;
} else {
mError("msg:%p, app:%p table:%s, failed to create, table already exist", pMsg, pMsg->rpcMsg.ahandle, p->tableFname);
mError("msg:%p, app:%p table:%s, failed to create, table already exist", pMsg, pMsg->rpcMsg.ahandle, p->tableName);
return TSDB_CODE_MND_TABLE_ALREADY_EXIST;
}
}
if (p->numOfTags != 0) {
mDebug("msg:%p, app:%p table:%s, create stable msg is received from thandle:%p", pMsg, pMsg->rpcMsg.ahandle,
p->tableFname, pMsg->rpcMsg.handle);
p->tableName, pMsg->rpcMsg.handle);
return mnodeProcessCreateSuperTableMsg(pMsg);
} else {
mDebug("msg:%p, app:%p table:%s, create ctable msg is received from thandle:%p", pMsg, pMsg->rpcMsg.ahandle,
p->tableFname, pMsg->rpcMsg.handle);
p->tableName, pMsg->rpcMsg.handle);
return mnodeProcessCreateChildTableMsg(pMsg);
}
}
static int32_t mnodeProcessDropTableMsg(SMnodeMsg *pMsg) {
SCMDropTableMsg *pDrop = pMsg->rpcMsg.pCont;
if (pMsg->pDb == NULL) pMsg->pDb = mnodeGetDbByTableId(pDrop->tableFname);
if (pMsg->pDb == NULL) pMsg->pDb = mnodeGetDbByTableName(pDrop->name);
if (pMsg->pDb == NULL) {
mError("msg:%p, app:%p table:%s, failed to drop table, db not selected or db in dropping", pMsg,
pMsg->rpcMsg.ahandle, pDrop->tableFname);
pMsg->rpcMsg.ahandle, pDrop->name);
return TSDB_CODE_MND_DB_NOT_SELECTED;
}
@ -912,17 +918,17 @@ static int32_t mnodeProcessDropTableMsg(SMnodeMsg *pMsg) {
if (mnodeCheckIsMonitorDB(pMsg->pDb->name, tsMonitorDbName)) {
mError("msg:%p, app:%p table:%s, failed to drop table, in monitor database", pMsg, pMsg->rpcMsg.ahandle,
pDrop->tableFname);
pDrop->name);
return TSDB_CODE_MND_MONITOR_DB_FORBIDDEN;
}
if (pMsg->pTable == NULL) pMsg->pTable = mnodeGetTable(pDrop->tableFname);
if (pMsg->pTable == NULL) pMsg->pTable = mnodeGetTable(pDrop->name);
if (pMsg->pTable == NULL) {
if (pDrop->igNotExists) {
mDebug("msg:%p, app:%p table:%s is not exist, treat as success", pMsg, pMsg->rpcMsg.ahandle, pDrop->tableFname);
mDebug("msg:%p, app:%p table:%s is not exist, treat as success", pMsg, pMsg->rpcMsg.ahandle, pDrop->name);
return TSDB_CODE_SUCCESS;
} else {
mError("msg:%p, app:%p table:%s, failed to drop, table not exist", pMsg, pMsg->rpcMsg.ahandle, pDrop->tableFname);
mError("msg:%p, app:%p table:%s, failed to drop, table not exist", pMsg, pMsg->rpcMsg.ahandle, pDrop->name);
return TSDB_CODE_MND_INVALID_TABLE_NAME;
}
}
@ -930,12 +936,12 @@ static int32_t mnodeProcessDropTableMsg(SMnodeMsg *pMsg) {
if (pMsg->pTable->type == TSDB_SUPER_TABLE) {
SSTableObj *pSTable = (SSTableObj *)pMsg->pTable;
mInfo("msg:%p, app:%p table:%s, start to drop stable, uid:%" PRIu64 ", numOfChildTables:%d, sizeOfVgList:%d", pMsg,
pMsg->rpcMsg.ahandle, pDrop->tableFname, pSTable->uid, pSTable->numOfTables, taosHashGetSize(pSTable->vgHash));
pMsg->rpcMsg.ahandle, pDrop->name, pSTable->uid, pSTable->numOfTables, taosHashGetSize(pSTable->vgHash));
return mnodeProcessDropSuperTableMsg(pMsg);
} else {
SCTableObj *pCTable = (SCTableObj *)pMsg->pTable;
mInfo("msg:%p, app:%p table:%s, start to drop ctable, vgId:%d tid:%d uid:%" PRIu64, pMsg, pMsg->rpcMsg.ahandle,
pDrop->tableFname, pCTable->vgId, pCTable->tid, pCTable->uid);
pDrop->name, pCTable->vgId, pCTable->tid, pCTable->uid);
return mnodeProcessDropChildTableMsg(pMsg);
}
}
@ -946,7 +952,7 @@ static int32_t mnodeProcessTableMetaMsg(SMnodeMsg *pMsg) {
mDebug("msg:%p, app:%p table:%s, table meta msg is received from thandle:%p, createFlag:%d", pMsg, pMsg->rpcMsg.ahandle,
pInfo->tableFname, pMsg->rpcMsg.handle, pInfo->createFlag);
if (pMsg->pDb == NULL) pMsg->pDb = mnodeGetDbByTableId(pInfo->tableFname);
if (pMsg->pDb == NULL) pMsg->pDb = mnodeGetDbByTableName(pInfo->tableFname);
if (pMsg->pDb == NULL) {
mError("msg:%p, app:%p table:%s, failed to get table meta, db not selected", pMsg, pMsg->rpcMsg.ahandle,
pInfo->tableFname);
@ -1006,12 +1012,12 @@ static int32_t mnodeProcessCreateSuperTableMsg(SMnodeMsg *pMsg) {
SSTableObj * pStable = calloc(1, sizeof(SSTableObj));
if (pStable == NULL) {
mError("msg:%p, app:%p table:%s, failed to create, no enough memory", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableFname);
mError("msg:%p, app:%p table:%s, failed to create, no enough memory", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableName);
return TSDB_CODE_MND_OUT_OF_MEMORY;
}
int64_t us = taosGetTimestampUs();
pStable->info.tableId = strdup(pCreate->tableFname);
pStable->info.tableId = strdup(pCreate->tableName);
pStable->info.type = TSDB_SUPER_TABLE;
pStable->createdTime = taosGetTimestampMs();
pStable->uid = (us << 24) + ((sdbGetVersion() & ((1ul << 16) - 1ul)) << 8) + (taosRand() & ((1ul << 8) - 1ul));
@ -1025,14 +1031,14 @@ static int32_t mnodeProcessCreateSuperTableMsg(SMnodeMsg *pMsg) {
pStable->schema = (SSchema *)calloc(1, schemaSize);
if (pStable->schema == NULL) {
free(pStable);
mError("msg:%p, app:%p table:%s, failed to create, no schema input", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableFname);
mError("msg:%p, app:%p table:%s, failed to create, no schema input", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableName);
return TSDB_CODE_MND_INVALID_TABLE_NAME;
}
memcpy(pStable->schema, pCreate->schema, numOfCols * sizeof(SSchema));
if (pStable->numOfColumns > TSDB_MAX_COLUMNS || pStable->numOfTags > TSDB_MAX_TAGS) {
mError("msg:%p, app:%p table:%s, failed to create, too many columns", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableFname);
mError("msg:%p, app:%p table:%s, failed to create, too many columns", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableName);
return TSDB_CODE_MND_INVALID_TABLE_NAME;
}
@ -1044,8 +1050,8 @@ static int32_t mnodeProcessCreateSuperTableMsg(SMnodeMsg *pMsg) {
tschema[col].bytes = htons(tschema[col].bytes);
}
if (!isValidSchema(pStable->schema, pStable->numOfColumns, pStable->numOfTags)) {
mError("msg:%p, app:%p table:%s, failed to create table, invalid schema", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableFname);
if (!tIsValidSchema(pStable->schema, pStable->numOfColumns, pStable->numOfTags)) {
mError("msg:%p, app:%p table:%s, failed to create table, invalid schema", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableName);
return TSDB_CODE_MND_INVALID_CREATE_TABLE_MSG;
}
@ -1065,7 +1071,7 @@ static int32_t mnodeProcessCreateSuperTableMsg(SMnodeMsg *pMsg) {
if (code != TSDB_CODE_SUCCESS && code != TSDB_CODE_MND_ACTION_IN_PROGRESS) {
mnodeDestroySuperTable(pStable);
pMsg->pTable = NULL;
mError("msg:%p, app:%p table:%s, failed to create, sdb error", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableFname);
mError("msg:%p, app:%p table:%s, failed to create, sdb error", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableName);
}
return code;
@ -1907,12 +1913,12 @@ static int32_t mnodeDoCreateChildTable(SMnodeMsg *pMsg, int32_t tid) {
SCTableObj *pTable = calloc(1, sizeof(SCTableObj));
if (pTable == NULL) {
mError("msg:%p, app:%p table:%s, failed to alloc memory", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableFname);
mError("msg:%p, app:%p table:%s, failed to alloc memory", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableName);
return TSDB_CODE_MND_OUT_OF_MEMORY;
}
pTable->info.type = (pCreate->numOfColumns == 0)? TSDB_CHILD_TABLE:TSDB_NORMAL_TABLE;
pTable->info.tableId = strdup(pCreate->tableFname);
pTable->info.tableId = strdup(pCreate->tableName);
pTable->createdTime = taosGetTimestampMs();
pTable->tid = tid;
pTable->vgId = pVgroup->vgId;
@ -1928,7 +1934,7 @@ static int32_t mnodeDoCreateChildTable(SMnodeMsg *pMsg, int32_t tid) {
size_t prefixLen = tableIdPrefix(pMsg->pDb->name, prefix, 64);
if (0 != strncasecmp(prefix, stableName, prefixLen)) {
mError("msg:%p, app:%p table:%s, corresponding super table:%s not in this db", pMsg, pMsg->rpcMsg.ahandle,
pCreate->tableFname, stableName);
pCreate->tableName, stableName);
mnodeDestroyChildTable(pTable);
return TSDB_CODE_TDB_INVALID_CREATE_TB_MSG;
}
@ -1936,7 +1942,7 @@ static int32_t mnodeDoCreateChildTable(SMnodeMsg *pMsg, int32_t tid) {
if (pMsg->pSTable == NULL) pMsg->pSTable = mnodeGetSuperTable(stableName);
if (pMsg->pSTable == NULL) {
mError("msg:%p, app:%p table:%s, corresponding super table:%s does not exist", pMsg, pMsg->rpcMsg.ahandle,
pCreate->tableFname, stableName);
pCreate->tableName, stableName);
mnodeDestroyChildTable(pTable);
return TSDB_CODE_MND_INVALID_TABLE_NAME;
}
@ -2003,7 +2009,7 @@ static int32_t mnodeDoCreateChildTable(SMnodeMsg *pMsg, int32_t tid) {
if (code != TSDB_CODE_SUCCESS && code != TSDB_CODE_MND_ACTION_IN_PROGRESS) {
mnodeDestroyChildTable(pTable);
pMsg->pTable = NULL;
mError("msg:%p, app:%p table:%s, failed to create, reason:%s", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableFname,
mError("msg:%p, app:%p table:%s, failed to create, reason:%s", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableName,
tstrerror(code));
} else {
mDebug("msg:%p, app:%p table:%s, allocated in vgroup, vgId:%d sid:%d uid:%" PRIu64, pMsg, pMsg->rpcMsg.ahandle,
@ -2020,7 +2026,7 @@ static int32_t mnodeProcessCreateChildTableMsg(SMnodeMsg *pMsg) {
int32_t code = grantCheck(TSDB_GRANT_TIMESERIES);
if (code != TSDB_CODE_SUCCESS) {
mError("msg:%p, app:%p table:%s, failed to create, grant timeseries failed", pMsg, pMsg->rpcMsg.ahandle,
pCreate->tableFname);
pCreate->tableName);
return code;
}
@ -2031,7 +2037,7 @@ static int32_t mnodeProcessCreateChildTableMsg(SMnodeMsg *pMsg) {
code = mnodeGetAvailableVgroup(pMsg, &pVgroup, &tid);
if (code != TSDB_CODE_SUCCESS) {
mDebug("msg:%p, app:%p table:%s, failed to get available vgroup, reason:%s", pMsg, pMsg->rpcMsg.ahandle,
pCreate->tableFname, tstrerror(code));
pCreate->tableName, tstrerror(code));
return code;
}
@ -2045,15 +2051,15 @@ static int32_t mnodeProcessCreateChildTableMsg(SMnodeMsg *pMsg) {
return mnodeDoCreateChildTable(pMsg, tid);
}
} else {
if (pMsg->pTable == NULL) pMsg->pTable = mnodeGetTable(pCreate->tableFname);
if (pMsg->pTable == NULL) pMsg->pTable = mnodeGetTable(pCreate->tableName);
}
if (pMsg->pTable == NULL) {
mError("msg:%p, app:%p table:%s, object not found, retry:%d reason:%s", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableFname, pMsg->retry,
mError("msg:%p, app:%p table:%s, object not found, retry:%d reason:%s", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableName, pMsg->retry,
tstrerror(terrno));
return terrno;
} else {
mDebug("msg:%p, app:%p table:%s, send create msg to vnode again", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableFname);
mDebug("msg:%p, app:%p table:%s, send create msg to vnode again", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableName);
return mnodeDoCreateChildTableFp(pMsg);
}
}
@ -2398,8 +2404,7 @@ static int32_t mnodeAutoCreateChildTable(SMnodeMsg *pMsg) {
SCreateTableMsg* pCreate = (SCreateTableMsg*) ((char*) pCreateMsg + sizeof(SCMCreateTableMsg));
size_t size = tListLen(pInfo->tableFname);
tstrncpy(pCreate->tableFname, pInfo->tableFname, size);
tstrncpy(pCreate->db, pMsg->pDb->name, sizeof(pCreate->db));
tstrncpy(pCreate->tableName, pInfo->tableFname, size);
pCreate->igExists = 1;
pCreate->getMeta = 1;
@ -2767,7 +2772,7 @@ static int32_t mnodeProcessMultiTableMetaMsg(SMnodeMsg *pMsg) {
SCTableObj *pTable = mnodeGetChildTable(tableId);
if (pTable == NULL) continue;
if (pMsg->pDb == NULL) pMsg->pDb = mnodeGetDbByTableId(tableId);
if (pMsg->pDb == NULL) pMsg->pDb = mnodeGetDbByTableName(tableId);
if (pMsg->pDb == NULL || pMsg->pDb->status != TSDB_DB_STATUS_READY) {
mnodeDecTableRef(pTable);
continue;
@ -2988,7 +2993,7 @@ static int32_t mnodeProcessAlterTableMsg(SMnodeMsg *pMsg) {
mDebug("msg:%p, app:%p table:%s, alter table msg is received from thandle:%p", pMsg, pMsg->rpcMsg.ahandle,
pAlter->tableFname, pMsg->rpcMsg.handle);
if (pMsg->pDb == NULL) pMsg->pDb = mnodeGetDbByTableId(pAlter->tableFname);
if (pMsg->pDb == NULL) pMsg->pDb = mnodeGetDbByTableName(pAlter->tableFname);
if (pMsg->pDb == NULL) {
mError("msg:%p, app:%p table:%s, failed to alter table, db not selected", pMsg, pMsg->rpcMsg.ahandle, pAlter->tableFname);
return TSDB_CODE_MND_DB_NOT_SELECTED;

View File

@ -504,13 +504,13 @@ int64_t taosTimeTruncate(int64_t t, const SInterval* pInterval, int32_t precisio
int64_t end = 0;
// not enough time range
if (INT64_MAX - start > pInterval->interval - 1) {
if (start < 0 || INT64_MAX - start > pInterval->interval - 1) {
end = start + pInterval->interval - 1;
while(end < t && ((start + pInterval->sliding) <= INT64_MAX)) { // move forward to the correct time window
start += pInterval->sliding;
if (INT64_MAX - start > pInterval->interval - 1) {
if (start < 0 || INT64_MAX - start > pInterval->interval - 1) {
end = start + pInterval->interval - 1;
} else {
end = INT64_MAX;

View File

@ -39,7 +39,8 @@
#define HTTP_GC_TARGET_SIZE 512
#define HTTP_WRITE_RETRY_TIMES 500
#define HTTP_WRITE_WAIT_TIME_MS 5
#define HTTP_SESSION_ID_LEN (TSDB_USER_LEN + TSDB_KEY_LEN)
#define HTTP_PASSWORD_LEN TSDB_UNI_LEN
#define HTTP_SESSION_ID_LEN (TSDB_USER_LEN + HTTP_PASSWORD_LEN)
typedef enum HttpReqType {
HTTP_REQTYPE_OTHERS = 0,
@ -147,7 +148,7 @@ typedef struct HttpContext {
uint8_t parsed;
char ipstr[22];
char user[TSDB_USER_LEN]; // parsed from auth token or login message
char pass[TSDB_KEY_LEN];
char pass[HTTP_PASSWORD_LEN];
TAOS * taos;
void * ppContext;
HttpSession *session;

View File

@ -51,7 +51,7 @@ int32_t httpParseBasicAuthToken(HttpContext *pContext, char *token, int32_t len)
char *password = user + 1;
int32_t pass_len = (int32_t)((base64 + outlen) - password);
if (pass_len < 1 || pass_len >= TSDB_KEY_LEN) {
if (pass_len < 1 || pass_len >= HTTP_PASSWORD_LEN) {
httpError("context:%p, fd:%d, basic token:%s parse password error", pContext, pContext->fd, token);
free(base64);
return -1;
@ -73,7 +73,7 @@ int32_t httpParseTaosdAuthToken(HttpContext *pContext, char *token, int32_t len)
if (base64) free(base64);
return 01;
}
if (outlen != (TSDB_USER_LEN + TSDB_KEY_LEN)) {
if (outlen != (TSDB_USER_LEN + HTTP_PASSWORD_LEN)) {
httpError("context:%p, fd:%d, taosd token:%s length error", pContext, pContext->fd, token);
free(base64);
return -1;
@ -103,8 +103,8 @@ int32_t httpGenTaosdAuthToken(HttpContext *pContext, char *token, int32_t maxLen
size = sizeof(pContext->pass);
tstrncpy(buffer + sizeof(pContext->user), pContext->pass, size);
char *encrypt = taosDesEncode(KEY_DES_4, buffer, TSDB_USER_LEN + TSDB_KEY_LEN);
char *base64 = base64_encode((const unsigned char *)encrypt, TSDB_USER_LEN + TSDB_KEY_LEN);
char *encrypt = taosDesEncode(KEY_DES_4, buffer, TSDB_USER_LEN + HTTP_PASSWORD_LEN);
char *base64 = base64_encode((const unsigned char *)encrypt, TSDB_USER_LEN + HTTP_PASSWORD_LEN);
size_t len = strlen(base64);
tstrncpy(token, base64, len + 1);

View File

@ -59,11 +59,11 @@ bool gcGetUserFromUrl(HttpContext* pContext) {
bool gcGetPassFromUrl(HttpContext* pContext) {
HttpParser* pParser = pContext->parser;
if (pParser->path[GC_PASS_URL_POS].pos >= TSDB_KEY_LEN || pParser->path[GC_PASS_URL_POS].pos <= 0) {
if (pParser->path[GC_PASS_URL_POS].pos >= HTTP_PASSWORD_LEN || pParser->path[GC_PASS_URL_POS].pos <= 0) {
return false;
}
tstrncpy(pContext->pass, pParser->path[GC_PASS_URL_POS].str, TSDB_KEY_LEN);
tstrncpy(pContext->pass, pParser->path[GC_PASS_URL_POS].str, HTTP_PASSWORD_LEN);
return true;
}

View File

@ -72,11 +72,11 @@ bool restGetUserFromUrl(HttpContext* pContext) {
bool restGetPassFromUrl(HttpContext* pContext) {
HttpParser* pParser = pContext->parser;
if (pParser->path[REST_PASS_URL_POS].pos >= TSDB_KEY_LEN || pParser->path[REST_PASS_URL_POS].pos <= 0) {
if (pParser->path[REST_PASS_URL_POS].pos >= HTTP_PASSWORD_LEN || pParser->path[REST_PASS_URL_POS].pos <= 0) {
return false;
}
tstrncpy(pContext->pass, pParser->path[REST_PASS_URL_POS].str, TSDB_KEY_LEN);
tstrncpy(pContext->pass, pParser->path[REST_PASS_URL_POS].str, HTTP_PASSWORD_LEN);
return true;
}

View File

@ -101,7 +101,10 @@ void httpCleanUpConnect() {
HttpServer *pServer = &tsHttpServer;
if (pServer->pThreads == NULL) return;
pthread_join(pServer->thread, NULL);
if (taosCheckPthreadValid(pServer->thread)) {
pthread_join(pServer->thread, NULL);
}
for (int32_t i = 0; i < pServer->numOfThreads; ++i) {
HttpThread* pThread = pServer->pThreads + i;
if (pThread != NULL) {

View File

@ -324,7 +324,7 @@ bool tgGetUserFromUrl(HttpContext *pContext) {
bool tgGetPassFromUrl(HttpContext *pContext) {
HttpParser *pParser = pContext->parser;
if (pParser->path[TG_PASS_URL_POS].pos >= TSDB_KEY_LEN || pParser->path[TG_PASS_URL_POS].pos <= 0) {
if (pParser->path[TG_PASS_URL_POS].pos >= HTTP_PASSWORD_LEN || pParser->path[TG_PASS_URL_POS].pos <= 0) {
return false;
}

View File

@ -246,7 +246,10 @@ void monStopSystem() {
void monCleanupSystem() {
tsMonitor.quiting = 1;
monStopSystem();
pthread_join(tsMonitor.thread, NULL);
if (taosCheckPthreadValid(tsMonitor.thread)) {
pthread_join(tsMonitor.thread, NULL);
}
if (tsMonitor.conn != NULL) {
taos_close(tsMonitor.conn);
tsMonitor.conn = NULL;

View File

@ -96,16 +96,16 @@ typedef struct SCreateTableSQL {
SQuerySQL *pSelect;
} SCreateTableSQL;
typedef struct SAlterTableSQL {
typedef struct SAlterTableInfo {
SStrToken name;
int16_t tableType;
int16_t type;
STagData tagData;
SArray *pAddColumns; // SArray<TAOS_FIELD>
SArray *varList; // set t=val or: change src dst, SArray<tVariantListItem>
} SAlterTableSQL;
} SAlterTableInfo;
typedef struct SCreateDBInfo {
typedef struct SCreateDbInfo {
SStrToken dbname;
int32_t replica;
int32_t cacheBlockSize;
@ -123,11 +123,10 @@ typedef struct SCreateDBInfo {
bool ignoreExists;
int8_t update;
int8_t cachelast;
SArray *keep;
} SCreateDBInfo;
SArray *keep;
} SCreateDbInfo;
typedef struct SCreateAcctSQL {
typedef struct SCreateAcctInfo {
int32_t maxUsers;
int32_t maxDbs;
int32_t maxTimeSeries;
@ -137,7 +136,7 @@ typedef struct SCreateAcctSQL {
int64_t maxQueryTime;
int32_t maxConnections;
SStrToken stat;
} SCreateAcctSQL;
} SCreateAcctInfo;
typedef struct SShowInfo {
uint8_t showType;
@ -152,23 +151,18 @@ typedef struct SUserInfo {
int16_t type;
} SUserInfo;
typedef struct tDCLSQL {
int32_t nTokens; /* Number of expressions on the list */
int32_t nAlloc; /* Number of entries allocated below */
SStrToken *a; /* one entry for element */
bool existsCheck;
typedef struct SMiscInfo {
SArray *a; // SArray<SStrToken>
bool existsCheck;
int16_t tableType;
SUserInfo user;
union {
SCreateDBInfo dbOpt;
SCreateAcctSQL acctOpt;
SShowInfo showOpt;
SStrToken ip;
SCreateDbInfo dbOpt;
SCreateAcctInfo acctOpt;
SShowInfo showOpt;
SStrToken id;
};
SUserInfo user;
} tDCLSQL;
} SMiscInfo;
typedef struct SSubclauseInfo { // "UNION" multiple select sub-clause
SQuerySQL **pClause;
@ -178,15 +172,13 @@ typedef struct SSubclauseInfo { // "UNION" multiple select sub-clause
typedef struct SSqlInfo {
int32_t type;
bool valid;
union {
SCreateTableSQL *pCreateTableInfo;
SAlterTableSQL *pAlterInfo;
tDCLSQL *pDCLInfo;
};
SSubclauseInfo subclauseInfo;
char pzErrMsg[256];
char msg[256];
union {
SCreateTableSQL *pCreateTableInfo;
SAlterTableInfo *pAlterInfo;
SMiscInfo *pMiscInfo;
};
} SSqlInfo;
typedef struct tSQLExpr {
@ -252,7 +244,7 @@ SCreateTableSQL *tSetCreateSqlElems(SArray *pCols, SArray *pTags, SQuerySQL *pSe
void tSqlExprNodeDestroy(tSQLExpr *pExpr);
SAlterTableSQL * tAlterTableSqlElems(SStrToken *pTableName, SArray *pCols, SArray *pVals, int32_t type, int16_t tableTable);
SAlterTableInfo * tAlterTableSqlElems(SStrToken *pTableName, SArray *pCols, SArray *pVals, int32_t type, int16_t tableTable);
SCreatedTableInfo createNewChildTableInfo(SStrToken *pTableName, SArray *pTagVals, SStrToken *pToken, SStrToken* igExists);
void destroyAllSelectClause(SSubclauseInfo *pSql);
@ -272,16 +264,14 @@ void setDCLSQLElems(SSqlInfo *pInfo, int32_t type, int32_t nParams, ...);
void setDropDbTableInfo(SSqlInfo *pInfo, int32_t type, SStrToken* pToken, SStrToken* existsCheck,int16_t tableType);
void setShowOptions(SSqlInfo *pInfo, int32_t type, SStrToken* prefix, SStrToken* pPatterns);
tDCLSQL *tTokenListAppend(tDCLSQL *pTokenList, SStrToken *pToken);
void setCreateDbInfo(SSqlInfo *pInfo, int32_t type, SStrToken *pToken, SCreateDbInfo *pDB, SStrToken *pIgExists);
void setCreateDBSQL(SSqlInfo *pInfo, int32_t type, SStrToken *pToken, SCreateDBInfo *pDB, SStrToken *pIgExists);
void setCreateAcctSql(SSqlInfo *pInfo, int32_t type, SStrToken *pName, SStrToken *pPwd, SCreateAcctSQL *pAcctInfo);
void setCreateAcctSql(SSqlInfo *pInfo, int32_t type, SStrToken *pName, SStrToken *pPwd, SCreateAcctInfo *pAcctInfo);
void setCreateUserSql(SSqlInfo *pInfo, SStrToken *pName, SStrToken *pPasswd);
void setKillSql(SSqlInfo *pInfo, int32_t type, SStrToken *ip);
void setAlterUserSql(SSqlInfo *pInfo, int16_t type, SStrToken *pName, SStrToken* pPwd, SStrToken *pPrivilege);
void setDefaultCreateDbOption(SCreateDBInfo *pDBInfo);
void setDefaultCreateDbOption(SCreateDbInfo *pDBInfo);
// prefix show db.tables;
void setDbName(SStrToken *pCpxName, SStrToken *pDb);

View File

@ -36,7 +36,7 @@
%syntax_error {
pInfo->valid = false;
int32_t outputBufLen = tListLen(pInfo->pzErrMsg);
int32_t outputBufLen = tListLen(pInfo->msg);
int32_t len = 0;
if(TOKEN.z) {
@ -46,13 +46,13 @@
if (sqlLen + sizeof(msg)/sizeof(msg[0]) + 1 > outputBufLen) {
char tmpstr[128] = {0};
memcpy(tmpstr, &TOKEN.z[0], sizeof(tmpstr)/sizeof(tmpstr[0]) - 1);
len = sprintf(pInfo->pzErrMsg, msg, tmpstr);
len = sprintf(pInfo->msg, msg, tmpstr);
} else {
len = sprintf(pInfo->pzErrMsg, msg, &TOKEN.z[0]);
len = sprintf(pInfo->msg, msg, &TOKEN.z[0]);
}
} else {
len = sprintf(pInfo->pzErrMsg, "Incomplete SQL statement");
len = sprintf(pInfo->msg, "Incomplete SQL statement");
}
assert(len <= outputBufLen);
@ -216,7 +216,7 @@ conns(Y) ::= CONNS INTEGER(X). { Y = X; }
state(Y) ::= . { Y.n = 0; }
state(Y) ::= STATE ids(X). { Y = X; }
%type acct_optr {SCreateAcctSQL}
%type acct_optr {SCreateAcctInfo}
acct_optr(Y) ::= pps(C) tseries(D) storage(P) streams(F) qtime(Q) dbs(E) users(K) conns(L) state(M). {
Y.maxUsers = (K.n>0)?atoi(K.z):-1;
Y.maxDbs = (E.n>0)?atoi(E.z):-1;
@ -248,7 +248,7 @@ prec(Y) ::= PRECISION STRING(X). { Y = X; }
update(Y) ::= UPDATE INTEGER(X). { Y = X; }
cachelast(Y) ::= CACHELAST INTEGER(X). { Y = X; }
%type db_optr {SCreateDBInfo}
%type db_optr {SCreateDbInfo}
db_optr(Y) ::= . {setDefaultCreateDbOption(&Y);}
db_optr(Y) ::= db_optr(Z) cache(X). { Y = Z; Y.cacheBlockSize = strtol(X.z, NULL, 10); }
@ -267,7 +267,7 @@ db_optr(Y) ::= db_optr(Z) keep(X). { Y = Z; Y.keep = X; }
db_optr(Y) ::= db_optr(Z) update(X). { Y = Z; Y.update = strtol(X.z, NULL, 10); }
db_optr(Y) ::= db_optr(Z) cachelast(X). { Y = Z; Y.cachelast = strtol(X.z, NULL, 10); }
%type alter_db_optr {SCreateDBInfo}
%type alter_db_optr {SCreateDbInfo}
alter_db_optr(Y) ::= . { setDefaultCreateDbOption(&Y);}
alter_db_optr(Y) ::= alter_db_optr(Z) replica(X). { Y = Z; Y.replica = strtol(X.z, NULL, 10); }
@ -692,7 +692,7 @@ cmd ::= RESET QUERY CACHE. { setDCLSQLElems(pInfo, TSDB_SQL_RESET_CACHE, 0);}
///////////////////////////////////ALTER TABLE statement//////////////////////////////////
cmd ::= ALTER TABLE ids(X) cpxName(F) ADD COLUMN columnlist(A). {
X.n += F.n;
SAlterTableSQL* pAlterTable = tAlterTableSqlElems(&X, A, NULL, TSDB_ALTER_TABLE_ADD_COLUMN, -1);
SAlterTableInfo* pAlterTable = tAlterTableSqlElems(&X, A, NULL, TSDB_ALTER_TABLE_ADD_COLUMN, -1);
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
@ -702,14 +702,14 @@ cmd ::= ALTER TABLE ids(X) cpxName(F) DROP COLUMN ids(A). {
toTSDBType(A.type);
SArray* K = tVariantListAppendToken(NULL, &A, -1);
SAlterTableSQL* pAlterTable = tAlterTableSqlElems(&X, NULL, K, TSDB_ALTER_TABLE_DROP_COLUMN, -1);
SAlterTableInfo* pAlterTable = tAlterTableSqlElems(&X, NULL, K, TSDB_ALTER_TABLE_DROP_COLUMN, -1);
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
//////////////////////////////////ALTER TAGS statement/////////////////////////////////////
cmd ::= ALTER TABLE ids(X) cpxName(Y) ADD TAG columnlist(A). {
X.n += Y.n;
SAlterTableSQL* pAlterTable = tAlterTableSqlElems(&X, A, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN, -1);
SAlterTableInfo* pAlterTable = tAlterTableSqlElems(&X, A, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN, -1);
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
cmd ::= ALTER TABLE ids(X) cpxName(Z) DROP TAG ids(Y). {
@ -718,7 +718,7 @@ cmd ::= ALTER TABLE ids(X) cpxName(Z) DROP TAG ids(Y). {
toTSDBType(Y.type);
SArray* A = tVariantListAppendToken(NULL, &Y, -1);
SAlterTableSQL* pAlterTable = tAlterTableSqlElems(&X, NULL, A, TSDB_ALTER_TABLE_DROP_TAG_COLUMN, -1);
SAlterTableInfo* pAlterTable = tAlterTableSqlElems(&X, NULL, A, TSDB_ALTER_TABLE_DROP_TAG_COLUMN, -1);
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
@ -731,7 +731,7 @@ cmd ::= ALTER TABLE ids(X) cpxName(F) CHANGE TAG ids(Y) ids(Z). {
toTSDBType(Z.type);
A = tVariantListAppendToken(A, &Z, -1);
SAlterTableSQL* pAlterTable = tAlterTableSqlElems(&X, NULL, A, TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN, -1);
SAlterTableInfo* pAlterTable = tAlterTableSqlElems(&X, NULL, A, TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN, -1);
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
@ -742,7 +742,7 @@ cmd ::= ALTER TABLE ids(X) cpxName(F) SET TAG ids(Y) EQ tagitem(Z). {
SArray* A = tVariantListAppendToken(NULL, &Y, -1);
A = tVariantListAppend(A, &Z, -1);
SAlterTableSQL* pAlterTable = tAlterTableSqlElems(&X, NULL, A, TSDB_ALTER_TABLE_UPDATE_TAG_VAL, -1);
SAlterTableInfo* pAlterTable = tAlterTableSqlElems(&X, NULL, A, TSDB_ALTER_TABLE_UPDATE_TAG_VAL, -1);
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
@ -750,7 +750,7 @@ cmd ::= ALTER TABLE ids(X) cpxName(F) SET TAG ids(Y) EQ tagitem(Z). {
///////////////////////////////////ALTER STABLE statement//////////////////////////////////
cmd ::= ALTER STABLE ids(X) cpxName(F) ADD COLUMN columnlist(A). {
X.n += F.n;
SAlterTableSQL* pAlterTable = tAlterTableSqlElems(&X, A, NULL, TSDB_ALTER_TABLE_ADD_COLUMN, TSDB_SUPER_TABLE);
SAlterTableInfo* pAlterTable = tAlterTableSqlElems(&X, A, NULL, TSDB_ALTER_TABLE_ADD_COLUMN, TSDB_SUPER_TABLE);
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
@ -760,14 +760,14 @@ cmd ::= ALTER STABLE ids(X) cpxName(F) DROP COLUMN ids(A). {
toTSDBType(A.type);
SArray* K = tVariantListAppendToken(NULL, &A, -1);
SAlterTableSQL* pAlterTable = tAlterTableSqlElems(&X, NULL, K, TSDB_ALTER_TABLE_DROP_COLUMN, TSDB_SUPER_TABLE);
SAlterTableInfo* pAlterTable = tAlterTableSqlElems(&X, NULL, K, TSDB_ALTER_TABLE_DROP_COLUMN, TSDB_SUPER_TABLE);
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
//////////////////////////////////ALTER TAGS statement/////////////////////////////////////
cmd ::= ALTER STABLE ids(X) cpxName(Y) ADD TAG columnlist(A). {
X.n += Y.n;
SAlterTableSQL* pAlterTable = tAlterTableSqlElems(&X, A, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN, TSDB_SUPER_TABLE);
SAlterTableInfo* pAlterTable = tAlterTableSqlElems(&X, A, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN, TSDB_SUPER_TABLE);
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
cmd ::= ALTER STABLE ids(X) cpxName(Z) DROP TAG ids(Y). {
@ -776,7 +776,7 @@ cmd ::= ALTER STABLE ids(X) cpxName(Z) DROP TAG ids(Y). {
toTSDBType(Y.type);
SArray* A = tVariantListAppendToken(NULL, &Y, -1);
SAlterTableSQL* pAlterTable = tAlterTableSqlElems(&X, NULL, A, TSDB_ALTER_TABLE_DROP_TAG_COLUMN, TSDB_SUPER_TABLE);
SAlterTableInfo* pAlterTable = tAlterTableSqlElems(&X, NULL, A, TSDB_ALTER_TABLE_DROP_TAG_COLUMN, TSDB_SUPER_TABLE);
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
@ -789,7 +789,7 @@ cmd ::= ALTER STABLE ids(X) cpxName(F) CHANGE TAG ids(Y) ids(Z). {
toTSDBType(Z.type);
A = tVariantListAppendToken(A, &Z, -1);
SAlterTableSQL* pAlterTable = tAlterTableSqlElems(&X, NULL, A, TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN, TSDB_SUPER_TABLE);
SAlterTableInfo* pAlterTable = tAlterTableSqlElems(&X, NULL, A, TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN, TSDB_SUPER_TABLE);
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}

View File

@ -407,7 +407,7 @@ tExprNode* exprTreeFromTableName(const char* tbnameCond) {
SSchema* pSchema = exception_calloc(1, sizeof(SSchema));
left->pSchema = pSchema;
*pSchema = tscGetTbnameColumnSchema();
*pSchema = tGetTbnameColumnSchema();
tExprNode* right = exception_calloc(1, sizeof(tExprNode));
expr->_node.pRight = right;

View File

@ -1875,6 +1875,7 @@ static int32_t setCtxTagColumnInfo(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx
return TSDB_CODE_SUCCESS;
}
// todo refactor
static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int16_t order) {
qDebug("QInfo:%p setup runtime env", GET_QINFO_ADDR(pRuntimeEnv));
SQuery *pQuery = pRuntimeEnv->pQuery;
@ -3849,11 +3850,6 @@ void setExecutionContext(SQInfo *pQInfo, int32_t groupIndex, TSKEY nextKey) {
// lastKey needs to be updated
pTableQueryInfo->lastKey = nextKey;
if (pRuntimeEnv->hasTagResults || pRuntimeEnv->pTsBuf != NULL) {
setAdditionalInfo(pQInfo, pTableQueryInfo->pTable, pTableQueryInfo);
}
if (pRuntimeEnv->prevGroupId != INT32_MIN && pRuntimeEnv->prevGroupId == groupIndex) {
return;
}
@ -4779,19 +4775,17 @@ static void enableExecutionForNextTable(SQueryRuntimeEnv *pRuntimeEnv) {
}
}
// TODO refactor: setAdditionalInfo
static FORCE_INLINE void setEnvForEachBlock(SQInfo* pQInfo, STableQueryInfo* pTableQueryInfo, SDataBlockInfo* pBlockInfo) {
SQueryRuntimeEnv* pRuntimeEnv = &pQInfo->runtimeEnv;
SQuery* pQuery = pQInfo->runtimeEnv.pQuery;
int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order);
if (QUERY_IS_INTERVAL_QUERY(pQuery)) {
TSKEY nextKey = pBlockInfo->window.skey;
setIntervalQueryRange(pQInfo, nextKey);
if (pRuntimeEnv->hasTagResults || pRuntimeEnv->pTsBuf != NULL) {
setAdditionalInfo(pQInfo, pTableQueryInfo->pTable, pTableQueryInfo);
}
if (pRuntimeEnv->hasTagResults || pRuntimeEnv->pTsBuf != NULL) {
setAdditionalInfo(pQInfo, pTableQueryInfo->pTable, pTableQueryInfo);
}
if (QUERY_IS_INTERVAL_QUERY(pQuery)) {
setIntervalQueryRange(pQInfo, pBlockInfo->window.skey);
} else { // non-interval query
setExecutionContext(pQInfo, pTableQueryInfo->groupIndex, pBlockInfo->window.ekey + step);
}
@ -4814,7 +4808,7 @@ static void doTableQueryInfoTimeWindowCheck(SQuery* pQuery, STableQueryInfo* pTa
static int64_t scanMultiTableDataBlocks(SQInfo *pQInfo) {
SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv;
SQuery* pQuery = pRuntimeEnv->pQuery;
SQueryCostInfo* summary = &pRuntimeEnv->summary;
SQueryCostInfo* summary = &pRuntimeEnv->summary;
int64_t st = taosGetTimestampMs();
@ -5047,7 +5041,7 @@ static void sequentialTableProcess(SQInfo *pQInfo) {
STsdbQueryCond cond = createTsdbQueryCond(pQuery, &pQuery->window);
SArray *g1 = taosArrayInit(1, POINTER_BYTES);
SArray *tx = taosArrayClone(group);
SArray *tx = taosArrayDup(group);
taosArrayPush(g1, &tx);
STableGroupInfo gp = {.numOfTables = taosArrayGetSize(tx), .pGroupList = g1};
@ -5107,7 +5101,7 @@ static void sequentialTableProcess(SQInfo *pQInfo) {
STsdbQueryCond cond = createTsdbQueryCond(pQuery, &pQuery->window);
SArray *g1 = taosArrayInit(1, POINTER_BYTES);
SArray *tx = taosArrayClone(group);
SArray *tx = taosArrayDup(group);
taosArrayPush(g1, &tx);
STableGroupInfo gp = {.numOfTables = taosArrayGetSize(tx), .pGroupList = g1};
@ -5471,7 +5465,7 @@ static void doRestoreContext(SQInfo *pQInfo) {
SET_MASTER_SCAN_FLAG(pRuntimeEnv);
}
static void doCloseAllTimeWindowAfterScan(SQInfo *pQInfo) {
static void doCloseAllTimeWindow(SQInfo *pQInfo) {
SQuery *pQuery = pQInfo->runtimeEnv.pQuery;
if (QUERY_IS_INTERVAL_QUERY(pQuery)) {
@ -5523,7 +5517,7 @@ static void multiTableQueryProcess(SQInfo *pQInfo) {
}
// close all time window results
doCloseAllTimeWindowAfterScan(pQInfo);
doCloseAllTimeWindow(pQInfo);
if (needReverseScan(pQuery)) {
int32_t code = doSaveContext(pQInfo);
@ -5848,8 +5842,7 @@ static void stableQueryImpl(SQInfo *pQInfo) {
(isFixedOutputQuery(pRuntimeEnv) && (!isPointInterpoQuery(pQuery)) && (!pRuntimeEnv->groupbyColumn))) {
multiTableQueryProcess(pQInfo);
} else {
assert((pQuery->checkResultBuf == 1 && pQuery->interval.interval == 0) || isPointInterpoQuery(pQuery) ||
pRuntimeEnv->groupbyColumn);
assert(pQuery->checkResultBuf == 1 || isPointInterpoQuery(pQuery) || pRuntimeEnv->groupbyColumn);
sequentialTableProcess(pQInfo);
}
@ -6377,7 +6370,7 @@ static int32_t createQueryFuncExprFromMsg(SQueryTableMsg *pQueryMsg, int32_t num
if (functId == TSDB_FUNC_TOP || functId == TSDB_FUNC_BOTTOM) {
int32_t j = getColumnIndexInSource(pQueryMsg, &pExprs[i].base, pTagCols);
if (j < 0 || j >= pQueryMsg->numOfCols) {
assert(0);
return TSDB_CODE_QRY_INVALID_MSG;
} else {
SColumnInfo *pCol = &pQueryMsg->colList[j];
int32_t ret =
@ -6576,7 +6569,7 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SSqlGroupbyExpr *pGrou
int32_t srcSize = 0;
for (int16_t i = 0; i < numOfCols; ++i) {
pQuery->colList[i] = pQueryMsg->colList[i];
pQuery->colList[i].filters = tscFilterInfoClone(pQueryMsg->colList[i].filters, pQuery->colList[i].numOfFilters);
pQuery->colList[i].filters = tFilterInfoDup(pQueryMsg->colList[i].filters, pQuery->colList[i].numOfFilters);
srcSize += pQuery->colList[i].bytes;
}
@ -6642,7 +6635,7 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SSqlGroupbyExpr *pGrou
pQInfo->runtimeEnv.summary.tableInfoSize += (pTableGroupInfo->numOfTables * sizeof(STableQueryInfo));
pQInfo->runtimeEnv.pResultRowHashTable = taosHashInit(pTableGroupInfo->numOfTables, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
pQInfo->runtimeEnv.keyBuf = malloc(TSDB_MAX_BYTES_PER_ROW);
pQInfo->runtimeEnv.keyBuf = malloc(TSDB_MAX_BYTES_PER_ROW); // todo opt size
pQInfo->runtimeEnv.pool = initResultRowPool(getResultRowSize(&pQInfo->runtimeEnv));
pQInfo->runtimeEnv.prevRow = malloc(POINTER_BYTES * pQuery->numOfCols + srcSize);

View File

@ -54,7 +54,7 @@ SSqlInfo qSQLParse(const char *pStr) {
case TK_QUESTION:
case TK_ILLEGAL: {
snprintf(sqlInfo.pzErrMsg, tListLen(sqlInfo.pzErrMsg), "unrecognized token: \"%s\"", t0.z);
snprintf(sqlInfo.msg, tListLen(sqlInfo.msg), "unrecognized token: \"%s\"", t0.z);
sqlInfo.valid = false;
goto abort_parse;
}
@ -585,8 +585,8 @@ SCreatedTableInfo createNewChildTableInfo(SStrToken *pTableName, SArray *pTagVal
return info;
}
SAlterTableSQL *tAlterTableSqlElems(SStrToken *pTableName, SArray *pCols, SArray *pVals, int32_t type, int16_t tableType) {
SAlterTableSQL *pAlterTable = calloc(1, sizeof(SAlterTableSQL));
SAlterTableInfo *tAlterTableSqlElems(SStrToken *pTableName, SArray *pCols, SArray *pVals, int32_t type, int16_t tableType) {
SAlterTableInfo *pAlterTable = calloc(1, sizeof(SAlterTableInfo));
pAlterTable->name = *pTableName;
pAlterTable->type = type;
@ -632,15 +632,15 @@ void SqlInfoDestroy(SSqlInfo *pInfo) {
tfree(pInfo->pAlterInfo->tagData.data);
tfree(pInfo->pAlterInfo);
} else {
if (pInfo->pDCLInfo != NULL && pInfo->pDCLInfo->nAlloc > 0) {
free(pInfo->pDCLInfo->a);
if (pInfo->pMiscInfo != NULL) {
taosArrayDestroy(pInfo->pMiscInfo->a);
}
if (pInfo->pDCLInfo != NULL && pInfo->type == TSDB_SQL_CREATE_DB) {
taosArrayDestroyEx(pInfo->pDCLInfo->dbOpt.keep, freeVariant);
if (pInfo->pMiscInfo != NULL && pInfo->type == TSDB_SQL_CREATE_DB) {
taosArrayDestroyEx(pInfo->pMiscInfo->dbOpt.keep, freeVariant);
}
tfree(pInfo->pDCLInfo);
tfree(pInfo->pMiscInfo);
}
}
@ -697,58 +697,49 @@ void setCreatedTableName(SSqlInfo *pInfo, SStrToken *pTableNameToken, SStrToken
pInfo->pCreateTableInfo->existCheck = (pIfNotExists->n != 0);
}
void tTokenListBuyMoreSpace(tDCLSQL *pTokenList) {
if (pTokenList->nAlloc <= pTokenList->nTokens) { //
pTokenList->nAlloc = (pTokenList->nAlloc << 1u) + 4;
pTokenList->a = realloc(pTokenList->a, pTokenList->nAlloc * sizeof(pTokenList->a[0]));
if (pTokenList->a == 0) {
pTokenList->nTokens = pTokenList->nAlloc = 0;
}
}
}
tDCLSQL *tTokenListAppend(tDCLSQL *pTokenList, SStrToken *pToken) {
if (pToken == NULL) return NULL;
if (pTokenList == NULL) pTokenList = calloc(1, sizeof(tDCLSQL));
tTokenListBuyMoreSpace(pTokenList);
pTokenList->a[pTokenList->nTokens++] = *pToken;
return pTokenList;
}
void setDCLSQLElems(SSqlInfo *pInfo, int32_t type, int32_t nParam, ...) {
pInfo->type = type;
if (nParam == 0) {
return;
}
if (nParam == 0) return;
if (pInfo->pDCLInfo == NULL) pInfo->pDCLInfo = (tDCLSQL *)calloc(1, sizeof(tDCLSQL));
if (pInfo->pMiscInfo == NULL) {
pInfo->pMiscInfo = (SMiscInfo *)calloc(1, sizeof(SMiscInfo));
pInfo->pMiscInfo->a = taosArrayInit(4, sizeof(SStrToken));
}
va_list va;
va_start(va, nParam);
while (nParam-- > 0) {
while ((nParam--) > 0) {
SStrToken *pToken = va_arg(va, SStrToken *);
pInfo->pDCLInfo = tTokenListAppend(pInfo->pDCLInfo, pToken);
taosArrayPush(pInfo->pMiscInfo->a, pToken);
}
va_end(va);
}
void setDropDbTableInfo(SSqlInfo *pInfo, int32_t type, SStrToken* pToken, SStrToken* existsCheck, int16_t tableType) {
pInfo->type = type;
pInfo->pDCLInfo = tTokenListAppend(pInfo->pDCLInfo, pToken);
pInfo->pDCLInfo->tableType = tableType;
pInfo->pDCLInfo->existsCheck = (existsCheck->n == 1);
if (pInfo->pMiscInfo == NULL) {
pInfo->pMiscInfo = (SMiscInfo *)calloc(1, sizeof(SMiscInfo));
pInfo->pMiscInfo->a = taosArrayInit(4, sizeof(SStrToken));
}
taosArrayPush(pInfo->pMiscInfo->a, pToken);
pInfo->pMiscInfo->existsCheck = (existsCheck->n == 1);
pInfo->pMiscInfo->tableType = tableType;
}
void setShowOptions(SSqlInfo *pInfo, int32_t type, SStrToken* prefix, SStrToken* pPatterns) {
if (pInfo->pDCLInfo == NULL) {
pInfo->pDCLInfo = calloc(1, sizeof(tDCLSQL));
if (pInfo->pMiscInfo == NULL) {
pInfo->pMiscInfo = calloc(1, sizeof(SMiscInfo));
}
pInfo->type = TSDB_SQL_SHOW;
SShowInfo* pShowInfo = &pInfo->pDCLInfo->showOpt;
SShowInfo* pShowInfo = &pInfo->pMiscInfo->showOpt;
pShowInfo->showType = type;
if (prefix != NULL && prefix->type != 0) {
@ -764,54 +755,54 @@ void setShowOptions(SSqlInfo *pInfo, int32_t type, SStrToken* prefix, SStrToken*
}
}
void setCreateDBSQL(SSqlInfo *pInfo, int32_t type, SStrToken *pToken, SCreateDBInfo *pDB, SStrToken *pIgExists) {
void setCreateDbInfo(SSqlInfo *pInfo, int32_t type, SStrToken *pToken, SCreateDbInfo *pDB, SStrToken *pIgExists) {
pInfo->type = type;
if (pInfo->pDCLInfo == NULL) {
pInfo->pDCLInfo = calloc(1, sizeof(tDCLSQL));
if (pInfo->pMiscInfo == NULL) {
pInfo->pMiscInfo = calloc(1, sizeof(SMiscInfo));
}
pInfo->pDCLInfo->dbOpt = *pDB;
pInfo->pDCLInfo->dbOpt.dbname = *pToken;
pInfo->pDCLInfo->dbOpt.ignoreExists = pIgExists->n; // sql.y has: ifnotexists(X) ::= IF NOT EXISTS. {X.n = 1;}
pInfo->pMiscInfo->dbOpt = *pDB;
pInfo->pMiscInfo->dbOpt.dbname = *pToken;
pInfo->pMiscInfo->dbOpt.ignoreExists = pIgExists->n; // sql.y has: ifnotexists(X) ::= IF NOT EXISTS. {X.n = 1;}
}
void setCreateAcctSql(SSqlInfo *pInfo, int32_t type, SStrToken *pName, SStrToken *pPwd, SCreateAcctSQL *pAcctInfo) {
void setCreateAcctSql(SSqlInfo *pInfo, int32_t type, SStrToken *pName, SStrToken *pPwd, SCreateAcctInfo *pAcctInfo) {
pInfo->type = type;
if (pInfo->pDCLInfo == NULL) {
pInfo->pDCLInfo = calloc(1, sizeof(tDCLSQL));
if (pInfo->pMiscInfo == NULL) {
pInfo->pMiscInfo = calloc(1, sizeof(SMiscInfo));
}
pInfo->pDCLInfo->acctOpt = *pAcctInfo;
pInfo->pMiscInfo->acctOpt = *pAcctInfo;
assert(pName != NULL);
pInfo->pDCLInfo->user.user = *pName;
pInfo->pMiscInfo->user.user = *pName;
if (pPwd != NULL) {
pInfo->pDCLInfo->user.passwd = *pPwd;
pInfo->pMiscInfo->user.passwd = *pPwd;
}
}
void setCreateUserSql(SSqlInfo *pInfo, SStrToken *pName, SStrToken *pPasswd) {
pInfo->type = TSDB_SQL_CREATE_USER;
if (pInfo->pDCLInfo == NULL) {
pInfo->pDCLInfo = calloc(1, sizeof(tDCLSQL));
if (pInfo->pMiscInfo == NULL) {
pInfo->pMiscInfo = calloc(1, sizeof(SMiscInfo));
}
assert(pName != NULL && pPasswd != NULL);
pInfo->pDCLInfo->user.user = *pName;
pInfo->pDCLInfo->user.passwd = *pPasswd;
pInfo->pMiscInfo->user.user = *pName;
pInfo->pMiscInfo->user.passwd = *pPasswd;
}
void setAlterUserSql(SSqlInfo *pInfo, int16_t type, SStrToken *pName, SStrToken* pPwd, SStrToken *pPrivilege) {
pInfo->type = TSDB_SQL_ALTER_USER;
if (pInfo->pDCLInfo == NULL) {
pInfo->pDCLInfo = calloc(1, sizeof(tDCLSQL));
if (pInfo->pMiscInfo == NULL) {
pInfo->pMiscInfo = calloc(1, sizeof(SMiscInfo));
}
assert(pName != NULL);
SUserInfo* pUser = &pInfo->pDCLInfo->user;
SUserInfo* pUser = &pInfo->pMiscInfo->user;
pUser->type = type;
pUser->user = *pName;
@ -828,18 +819,17 @@ void setAlterUserSql(SSqlInfo *pInfo, int16_t type, SStrToken *pName, SStrToken*
}
}
void setKillSql(SSqlInfo *pInfo, int32_t type, SStrToken *ip) {
void setKillSql(SSqlInfo *pInfo, int32_t type, SStrToken *id) {
pInfo->type = type;
if (pInfo->pDCLInfo == NULL) {
pInfo->pDCLInfo = calloc(1, sizeof(tDCLSQL));
if (pInfo->pMiscInfo == NULL) {
pInfo->pMiscInfo = calloc(1, sizeof(SMiscInfo));
}
assert(ip != NULL);
pInfo->pDCLInfo->ip = *ip;
assert(id != NULL);
pInfo->pMiscInfo->id = *id;
}
void setDefaultCreateDbOption(SCreateDBInfo *pDBInfo) {
void setDefaultCreateDbOption(SCreateDbInfo *pDBInfo) {
pDBInfo->compressionLevel = -1;
pDBInfo->walLevel = -1;

File diff suppressed because it is too large Load Diff

View File

@ -154,7 +154,7 @@ void *taosInitTcpServer(uint32_t ip, uint16_t port, char *label, int numOfThread
if (code == 0) {
code = pthread_create(&pServerObj->thread, &thattr, taosAcceptTcpConnection, (void *)pServerObj);
if (code != 0) {
tError("%s failed to create TCP accept thread(%s)", label, strerror(errno));
tError("%s failed to create TCP accept thread(%s)", label, strerror(code));
}
}

View File

@ -140,6 +140,7 @@ void taosStopUdpConnection(void *handle) {
pConn = pSet->udpConn + i;
if (pConn->fd >=0) shutdown(pConn->fd, SHUT_RDWR);
if (pConn->fd >=0) taosCloseSocket(pConn->fd);
pConn->fd = -1;
}
for (int i = 0; i < pSet->threads; ++i) {
@ -198,8 +199,13 @@ static void *taosRecvUdpData(void *param) {
while (1) {
dataLen = recvfrom(pConn->fd, pConn->buffer, RPC_MAX_UDP_SIZE, 0, (struct sockaddr *)&sourceAdd, &addLen);
if(dataLen <= 0) {
tDebug("%s UDP socket was closed, exiting(%s)", pConn->label, strerror(errno));
break;
tDebug("%s UDP socket(fd:%d) receive dataLen(%d) error(%s)", pConn->label, pConn->fd, (int32_t)dataLen, strerror(errno));
// for windows usage, remote shutdown also returns - 1 in windows client
if (-1 == pConn->fd) {
break;
} else {
continue;
}
}
port = ntohs(sourceAdd.sin_port);

View File

@ -1388,8 +1388,8 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
break;
}
if (((tsArray[pos] > pQueryHandle->window.ekey || pos > endPos) && ASCENDING_TRAVERSE(pQueryHandle->order)) ||
((tsArray[pos] < pQueryHandle->window.ekey || pos < endPos) && !ASCENDING_TRAVERSE(pQueryHandle->order))) {
if (((pos > endPos || tsArray[pos] > pQueryHandle->window.ekey) && ASCENDING_TRAVERSE(pQueryHandle->order)) ||
((pos < endPos || tsArray[pos] < pQueryHandle->window.ekey) && !ASCENDING_TRAVERSE(pQueryHandle->order))) {
break;
}

View File

@ -110,7 +110,7 @@ void taosArrayCopy(SArray* pDst, const SArray* pSrc);
* clone a new array
* @param pSrc
*/
SArray* taosArrayClone(const SArray* pSrc);
SArray* taosArrayDup(const SArray* pSrc);
/**
* clear the array (remove all element)

View File

@ -165,7 +165,7 @@ void taosArrayCopy(SArray* pDst, const SArray* pSrc) {
pDst->size = pSrc->size;
}
SArray* taosArrayClone(const SArray* pSrc) {
SArray* taosArrayDup(const SArray* pSrc) {
assert(pSrc != NULL);
if (pSrc->size == 0) { // empty array list

Some files were not shown because too many files have changed in this diff Show More