Merge branch 'develop' into feature/qrefactor
This commit is contained in:
commit
9f70bace9b
|
@ -32,7 +32,7 @@ ELSEIF (TD_WINDOWS)
|
|||
#INSTALL(TARGETS taos RUNTIME DESTINATION driver)
|
||||
#INSTALL(TARGETS shell RUNTIME DESTINATION .)
|
||||
IF (TD_MVN_INSTALLED)
|
||||
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.25-dist.jar DESTINATION connector/jdbc)
|
||||
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.28-dist.jar DESTINATION connector/jdbc)
|
||||
ENDIF ()
|
||||
ELSEIF (TD_DARWIN)
|
||||
SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh")
|
||||
|
|
|
@ -0,0 +1,211 @@
|
|||
# 通过 Docker 快速体验 TDengine
|
||||
|
||||
虽然并不推荐在生产环境中通过 Docker 来部署 TDengine 服务,但 Docker 工具能够很好地屏蔽底层操作系统的环境差异,很适合在开发测试或初次体验时用于安装运行 TDengine 的工具集。特别是,借助 Docker,能够比较方便地在 Mac OSX 和 Windows 系统上尝试 TDengine,而无需安装虚拟机或额外租用 Linux 服务器。
|
||||
|
||||
下文通过 Step by Step 风格的介绍,讲解如何通过 Docker 快速建立 TDengine 的单节点运行环境,以支持开发和测试。
|
||||
|
||||
## 下载 Docker
|
||||
|
||||
Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。
|
||||
|
||||
安装完毕后可以在命令行终端查看 Docker 版本。如果版本号正常输出,则说明 Docker 环境已经安装成功。
|
||||
|
||||
```bash
|
||||
$ docker -v
|
||||
Docker version 20.10.5, build 55c4c88
|
||||
```
|
||||
|
||||
## 在 Docker 容器中运行 TDengine
|
||||
|
||||
1,使用命令拉取 TDengine 镜像,并使它在后台运行。
|
||||
|
||||
```bash
|
||||
$ docker run -d tdengine/tdengine
|
||||
cdf548465318c6fc2ad97813f89cc60006393392401cae58a27b15ca9171f316
|
||||
```
|
||||
|
||||
- **docker run**:通过 Docker 运行一个容器。
|
||||
- **-d**:让容器在后台运行。
|
||||
- **tdengine/tdengine**:拉取的 TDengine 官方发布的应用镜像。
|
||||
- **cdf548465318c6fc2ad97813f89cc60006393392401cae58a27b15ca9171f316**:这个返回的长字符是容器 ID,我们可以通过容器 ID 来查看对应的容器。
|
||||
|
||||
2,确认容器是否已经正确运行。
|
||||
|
||||
```bash
|
||||
$ docker ps
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS ···
|
||||
cdf548465318 tdengine/tdengine "taosd" 14 minutes ago Up 14 minutes ···
|
||||
```
|
||||
|
||||
- **docker ps**:列出所有正在运行状态的容器信息。
|
||||
- **CONTAINER ID**:容器 ID。
|
||||
- **IMAGE**:使用的镜像。
|
||||
- **COMMAND**:启动容器时运行的命令。
|
||||
- **CREATED**:容器创建时间。
|
||||
- **STATUS**:容器状态。UP 表示运行中。
|
||||
|
||||
3,进入 Docker 容器内,使用 TDengine。
|
||||
|
||||
```bash
|
||||
$ docker exec -it cdf548465318 /bin/bash
|
||||
root@cdf548465318:~/TDengine-server-2.0.13.0#
|
||||
```
|
||||
|
||||
- **docker exec**:通过 docker exec 命令进入容器,如果退出,容器不会停止。
|
||||
- **-i**:进入交互模式。
|
||||
- **-t**:指定一个终端。
|
||||
- **cdf548465318**:容器 ID,需要根据 docker ps 指令返回的值进行修改。
|
||||
- **/bin/bash**:载入容器后运行 bash 来进行交互。
|
||||
|
||||
4,进入容器后,执行 taos shell 客户端程序。
|
||||
|
||||
```bash
|
||||
$ root@cdf548465318:~/TDengine-server-2.0.13.0# taos
|
||||
|
||||
Welcome to the TDengine shell from Linux, Client Version:2.0.13.0
|
||||
Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.
|
||||
|
||||
taos>
|
||||
```
|
||||
|
||||
TDengine 终端成功连接服务端,打印出了欢迎消息和版本信息。如果失败,会有错误信息打印出来。
|
||||
|
||||
在 TDengine 终端中,可以通过 SQL 命令来创建/删除数据库、表、超级表等,并可以进行插入和查询操作。具体可以参考 [TAOS SQL 说明文档](https://www.taosdata.com/cn/documentation/taos-sql)。
|
||||
|
||||
## 通过 taosdemo 进一步了解 TDengine
|
||||
|
||||
1,接上面的步骤,先退出 TDengine 终端程序。
|
||||
|
||||
```bash
|
||||
$ taos> q
|
||||
root@cdf548465318:~/TDengine-server-2.0.13.0#
|
||||
```
|
||||
|
||||
2,在命令行界面执行 taosdemo。
|
||||
|
||||
```bash
|
||||
$ root@cdf548465318:~/TDengine-server-2.0.13.0# taosdemo
|
||||
###################################################################
|
||||
# Server IP: localhost:0
|
||||
# User: root
|
||||
# Password: taosdata
|
||||
# Use metric: true
|
||||
# Datatype of Columns: int int int int int int int float
|
||||
# Binary Length(If applicable): -1
|
||||
# Number of Columns per record: 3
|
||||
# Number of Threads: 10
|
||||
# Number of Tables: 10000
|
||||
# Number of Data per Table: 100000
|
||||
# Records/Request: 1000
|
||||
# Database name: test
|
||||
# Table prefix: t
|
||||
# Delete method: 0
|
||||
# Test time: 2021-04-13 02:05:20
|
||||
###################################################################
|
||||
```
|
||||
|
||||
回车后,该命令将新建一个数据库 test,并且自动创建一张超级表 meters,并以超级表 meters 为模版创建了 1 万张表,表名从 "t0" 到 "t9999"。每张表有 10 万条记录,每条记录有 f1,f2,f3 三个字段,时间戳 ts 字段从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:41:39 999"。每张表带有 areaid 和 loc 两个标签 TAG,areaid 被设置为 1 到 10,loc 被设置为 "beijing" 或 "shanghai"。
|
||||
|
||||
3,进入 TDengine 终端,查看 taosdemo 生成的数据。
|
||||
|
||||
- **进入命令行。**
|
||||
|
||||
```bash
|
||||
$ root@cdf548465318:~/TDengine-server-2.0.13.0# taos
|
||||
|
||||
Welcome to the TDengine shell from Linux, Client Version:2.0.13.0
|
||||
Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.
|
||||
|
||||
taos>
|
||||
```
|
||||
|
||||
- **查看数据库。**
|
||||
|
||||
```bash
|
||||
$ taos> show databases;
|
||||
name | created_time | ntables | vgroups | ···
|
||||
test | 2021-04-13 02:14:15.950 | 10000 | 6 | ···
|
||||
log | 2021-04-12 09:36:37.549 | 4 | 1 | ···
|
||||
|
||||
```
|
||||
|
||||
- **查看超级表。**
|
||||
|
||||
```bash
|
||||
$ taos> use test;
|
||||
Database changed.
|
||||
|
||||
$ taos> show stables;
|
||||
name | created_time | columns | tags | tables |
|
||||
=====================================================================================
|
||||
meters | 2021-04-13 02:14:15.955 | 4 | 2 | 10000 |
|
||||
Query OK, 1 row(s) in set (0.001737s)
|
||||
|
||||
```
|
||||
|
||||
- **查看表,限制输出十条。**
|
||||
|
||||
```bash
|
||||
$ taos> select * from test.t0 limit 10;
|
||||
ts | f1 | f2 | f3 |
|
||||
====================================================================
|
||||
2017-07-14 02:40:01.000 | 3 | 9 | 0 |
|
||||
2017-07-14 02:40:02.000 | 0 | 1 | 2 |
|
||||
2017-07-14 02:40:03.000 | 7 | 2 | 3 |
|
||||
2017-07-14 02:40:04.000 | 9 | 4 | 5 |
|
||||
2017-07-14 02:40:05.000 | 1 | 2 | 5 |
|
||||
2017-07-14 02:40:06.000 | 6 | 3 | 2 |
|
||||
2017-07-14 02:40:07.000 | 4 | 7 | 8 |
|
||||
2017-07-14 02:40:08.000 | 4 | 6 | 6 |
|
||||
2017-07-14 02:40:09.000 | 5 | 7 | 7 |
|
||||
2017-07-14 02:40:10.000 | 1 | 5 | 0 |
|
||||
Query OK, 10 row(s) in set (0.003638s)
|
||||
|
||||
```
|
||||
|
||||
- **查看 t0 表的标签值。**
|
||||
|
||||
```bash
|
||||
$ taos> select areaid, loc from test.t0;
|
||||
areaid | loc |
|
||||
===========================
|
||||
10 | shanghai |
|
||||
Query OK, 1 row(s) in set (0.002904s)
|
||||
|
||||
```
|
||||
|
||||
## 停止正在 Docker 中运行的 TDengine 服务
|
||||
|
||||
```bash
|
||||
$ docker stop cdf548465318
|
||||
cdf548465318
|
||||
```
|
||||
|
||||
- **docker stop**:通过 docker stop 停止指定的正在运行中的 docker 镜像。
|
||||
- **cdf548465318**:容器 ID,根据 docker ps 指令返回的结果进行修改。
|
||||
|
||||
## 编程开发时连接在 Docker 中的 TDengine
|
||||
|
||||
从 Docker 之外连接使用在 Docker 容器内运行的 TDengine 服务,有以下两个思路:
|
||||
|
||||
1,通过端口映射(-p),将容器内部开放的网络端口映射到宿主机的指定端口上。通过挂载本地目录(-v),可以实现宿主机与容器内部的数据同步,防止容器删除后,数据丢失。
|
||||
|
||||
```bash
|
||||
$ docker run -d -v /etc/taos:/etc/taos -p 6041:6041 tdengine/tdengine
|
||||
526aa188da767ae94b244226a2b2eec2b5f17dd8eff592893d9ec0cd0f3a1ccd
|
||||
|
||||
$ curl -u root:taosdata -d 'show databases' 127.0.0.1:6041/rest/sql
|
||||
{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep1,keep2,keep(D)","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","precision","status"],"data":[],"rows":0}
|
||||
```
|
||||
|
||||
- 第一条命令,启动一个运行了 TDengine 的 docker 容器,并且将容器的 6041 端口映射到宿主机的 6041 端口上。
|
||||
- 第二条命令,通过 RESTful 接口访问 TDengine,这时连接的是本机的 6041 端口,可见连接成功。
|
||||
|
||||
注意:在这个示例中,出于方便性考虑,只映射了 RESTful 需要的 6041 端口。如果希望以非 RESTful 方式连接 TDengine 服务,则需要映射从 6030 开始的共 11 个端口(完整的端口情况请参见 [TDengine 2.0 端口说明](https://www.taosdata.com/cn/documentation/faq#port))。在例子中,挂载本地目录也只是处理了配置文件所在的 /etc/taos 目录,而没有挂载数据存储目录。
|
||||
|
||||
2,直接通过 exec 命令,进入到 docker 容器中去做开发。也即,把程序代码放在 TDengine 服务端所在的同一个 Docker 容器中,连接容器本地的 TDengine 服务。
|
||||
|
||||
```bash
|
||||
$ docker exec -it 526aa188da /bin/bash
|
||||
```
|
||||
|
|
@ -10,7 +10,9 @@ TDengine软件分为服务器、客户端和报警模块三部分,目前2.0版
|
|||
|
||||
### 通过Docker容器运行
|
||||
|
||||
请参考[TDengine官方Docker镜像的发布、下载和使用](https://www.taosdata.com/blog/2020/05/13/1509.html)
|
||||
暂时不建议生产环境采用 Docker 来部署 TDengine 的客户端或服务端,但在开发环境下或初次尝试时,使用 Docker 方式部署是十分方便的。特别是,利用 Docker,可以方便地在 Mac OSX 和 Windows 环境下尝试 TDengine。
|
||||
|
||||
详细操作方法请参照 [通过Docker快速体验TDengine](https://www.taosdata.com/cn/documentation/getting-started/docker)。
|
||||
|
||||
### <a class="anchor" id="package-install"></a>通过安装包安装
|
||||
|
||||
|
|
|
@ -178,7 +178,7 @@ TDengine 分布式架构的逻辑结构图如下:
|
|||
|
||||
**FQDN配置**:一个数据节点有一个或多个FQDN,可以在系统配置文件taos.cfg通过参数“fqdn"进行指定,如果没有指定,系统将自动获取计算机的hostname作为其FQDN。如果节点没有配置FQDN,可以直接将该节点的配置参数fqdn设置为它的IP地址。但不建议使用IP,因为IP地址可变,一旦变化,将让集群无法正常工作。一个数据节点的EP(End Point)由FQDN + Port组成。采用FQDN,需要保证DNS服务正常工作,或者在节点以及应用所在的节点配置好hosts文件。
|
||||
|
||||
**端口配置:**一个数据节点对外的端口由TDengine的系统配置参数serverPort决定,对集群内部通讯的端口是serverPort+5。集群内数据节点之间的数据复制操作还占有一个TCP端口,是serverPort+10. 为支持多线程高效的处理UDP数据,每个对内和对外的UDP连接,都需要占用5个连续的端口。因此一个数据节点总的端口范围为serverPort到serverPort + 10,总共11个TCP/UDP端口。(另外还可能有 RESTful、Arbitrator 所使用的端口,那样的话就一共是 13 个。)使用时,需要确保防火墙将这些端口打开,以备使用。每个数据节点可以配置不同的serverPort。
|
||||
**端口配置:**一个数据节点对外的端口由TDengine的系统配置参数serverPort决定,对集群内部通讯的端口是serverPort+5。集群内数据节点之间的数据复制操作还占有一个TCP端口,是serverPort+10. 为支持多线程高效的处理UDP数据,每个对内和对外的UDP连接,都需要占用5个连续的端口。因此一个数据节点总的端口范围为serverPort到serverPort + 10,总共11个TCP/UDP端口。(另外还可能有 RESTful、Arbitrator 所使用的端口,那样的话就一共是 13 个。)使用时,需要确保防火墙将这些端口打开,以备使用。每个数据节点可以配置不同的serverPort。(详细的端口情况请参见 [TDengine 2.0 端口说明](https://www.taosdata.com/cn/documentation/faq#port))
|
||||
|
||||
**集群对外连接:** TDengine集群可以容纳单个、多个甚至几千个数据节点。应用只需要向集群中任何一个数据节点发起连接即可,连接需要提供的网络参数是一数据节点的End Point(FQDN加配置的端口号)。通过命令行CLI启动应用taos时,可以通过选项-h来指定数据节点的FQDN, -P来指定其配置的端口号,如果端口不配置,将采用TDengine的系统配置参数serverPort。
|
||||
|
||||
|
|
|
@ -120,7 +120,7 @@ if (async) {
|
|||
}
|
||||
```
|
||||
|
||||
TDengine中的订阅既可以是同步的,也可以是异步的,上面的代码会根据从命令行获取的参数`async`的值来决定使用哪种方式。这里,同步的意思是用户程序要直接调用`taos_consume`来拉取数据,而异步则由API在内部的另一个线程中调用`taos_consume`,然后把拉取到的数据交给回调函数`subscribe_callback`去处理。
|
||||
TDengine中的订阅既可以是同步的,也可以是异步的,上面的代码会根据从命令行获取的参数`async`的值来决定使用哪种方式。这里,同步的意思是用户程序要直接调用`taos_consume`来拉取数据,而异步则由API在内部的另一个线程中调用`taos_consume`,然后把拉取到的数据交给回调函数`subscribe_callback`去处理。(注意,`subscribe_callback` 中不宜做较为耗时的操作,否则有可能导致客户端阻塞等不可控的问题。)
|
||||
|
||||
参数`taos`是一个已经建立好的数据库连接,在同步模式下无特殊要求。但在异步模式下,需要注意它不会被其它线程使用,否则可能导致不可预计的错误,因为回调函数在API的内部线程中被调用,而TDengine的部分API不是线程安全的。
|
||||
|
||||
|
|
|
@ -377,6 +377,7 @@ TDengine提供时间驱动的实时流式计算API。可以每隔一指定的时
|
|||
* res:查询结果集,注意结果集中可能没有记录
|
||||
* param:调用 `taos_subscribe`时客户程序提供的附加参数
|
||||
* code:错误码
|
||||
**注意**:在这个回调函数里不可以做耗时过长的处理,尤其是对于返回的结果集中数据较多的情况,否则有可能导致客户端阻塞等异常状态。如果必须进行复杂计算,则建议在另外的线程中进行处理。
|
||||
|
||||
* `TAOS_RES *taos_consume(TAOS_SUB *tsub)`
|
||||
|
||||
|
@ -743,7 +744,7 @@ HTTP请求URL采用`sqlutc`时,返回结果集的时间戳将采用UTC时间
|
|||
|
||||
下面仅列出一些与RESTful接口有关的配置参数,其他系统参数请看配置文件里的说明。注意:配置修改后,需要重启taosd服务才能生效
|
||||
|
||||
- httpPort: 对外提供RESTful服务的端口号,默认绑定到6041
|
||||
- 对外提供RESTful服务的端口号,默认绑定到 6041(实际取值是 serverPort + 11,因此可以通过修改 serverPort 参数的设置来修改)
|
||||
- httpMaxThreads: 启动的线程数量,默认为2(2.0.17版本开始,默认值改为CPU核数的一半向下取整)
|
||||
- restfulRowLimit: 返回结果集(JSON格式)的最大条数,默认值为10240
|
||||
- httpEnableCompress: 是否支持压缩,默认不支持,目前TDengine仅支持gzip压缩格式
|
||||
|
|
|
@ -55,12 +55,11 @@ arbitrator ha.taosdata.com:6042
|
|||
| 4 | statusInterval | dnode向mnode报告状态时长 |
|
||||
| 5 | arbitrator | 系统中裁决器的end point |
|
||||
| 6 | timezone | 时区 |
|
||||
| 7 | locale | 系统区位信息及编码格式 |
|
||||
| 8 | charset | 字符集编码 |
|
||||
| 9 | balance | 是否启动负载均衡 |
|
||||
| 10 | maxTablesPerVnode | 每个vnode中能够创建的最大表个数 |
|
||||
| 11 | maxVgroupsPerDb | 每个DB中能够使用的最大vgroup个数 |
|
||||
| 7 | balance | 是否启动负载均衡 |
|
||||
| 8 | maxTablesPerVnode | 每个vnode中能够创建的最大表个数 |
|
||||
| 9 | maxVgroupsPerDb | 每个DB中能够使用的最大vgroup个数 |
|
||||
|
||||
备注:在 2.0.19.0 及更早的版本中,除以上 9 项参数外,dnode 加入集群时,还会要求 locale 和 charset 参数的取值也一致。
|
||||
|
||||
|
||||
## <a class="anchor" id="node-one"></a>启动第一个数据节点
|
||||
|
|
|
@ -100,8 +100,7 @@ taosd -C
|
|||
|
||||
- firstEp: taosd启动时,主动连接的集群中首个dnode的end point, 默认值为localhost:6030。
|
||||
- fqdn:数据节点的FQDN,缺省为操作系统配置的第一个hostname。如果习惯IP地址访问,可设置为该节点的IP地址。
|
||||
- serverPort:taosd启动后,对外服务的端口号,默认值为6030。
|
||||
- httpPort: RESTful服务使用的端口号,所有的HTTP请求(TCP)都需要向该接口发起查询/写入请求, 默认值为6041。
|
||||
- serverPort:taosd启动后,对外服务的端口号,默认值为6030。(RESTful服务使用的端口号是在此基础上+11,即默认值为6041。)
|
||||
- dataDir: 数据文件目录,所有的数据文件都将写入该目录。默认值:/var/lib/taos。
|
||||
- logDir:日志文件目录,客户端和服务器的运行日志文件将写入该目录。默认值:/var/log/taos。
|
||||
- arbitrator:系统中裁决器的end point, 缺省值为空。
|
||||
|
@ -115,7 +114,7 @@ taosd -C
|
|||
- queryBufferSize: 为所有并发查询占用保留的内存大小。计算规则可以根据实际应用可能的最大并发数和表的数字相乘,再乘 170 。单位为 MB(2.0.15 以前的版本中,此参数的单位是字节)。
|
||||
- ratioOfQueryCores: 设置查询线程的最大数量。最小值0 表示只有1个查询线程;最大值2表示最大建立2倍CPU核数的查询线程。默认为1,表示最大和CPU核数相等的查询线程。该值可以为小数,即0.5表示最大建立CPU核数一半的查询线程。
|
||||
|
||||
**注意:**对于端口,TDengine会使用从serverPort起13个连续的TCP和UDP端口号,请务必在防火墙打开。因此如果是缺省配置,需要打开从6030到6042共13个端口,而且必须TCP和UDP都打开。
|
||||
**注意:**对于端口,TDengine会使用从serverPort起13个连续的TCP和UDP端口号,请务必在防火墙打开。因此如果是缺省配置,需要打开从6030到6042共13个端口,而且必须TCP和UDP都打开。(详细的端口情况请参见 [TDengine 2.0 端口说明](https://www.taosdata.com/cn/documentation/faq#port))
|
||||
|
||||
不同应用场景的数据往往具有不同的数据特征,比如保留天数、副本数、采集频次、记录大小、采集点的数量、压缩等都可完全不同。为获得在存储上的最高效率,TDengine提供如下存储相关的系统配置参数:
|
||||
|
||||
|
@ -145,12 +144,12 @@ TDengine集群中加入一个新的dnode时,涉及集群相关的一些参数
|
|||
- numOfMnodes:系统中管理节点个数。默认值:3。
|
||||
- balance:是否启动负载均衡。0:否,1:是。默认值:1。
|
||||
- mnodeEqualVnodeNum: 一个mnode等同于vnode消耗的个数。默认值:4。
|
||||
- offlineThreshold: dnode离线阈值,超过该时间将导致该dnode从集群中删除。单位为秒,默认值:86400*10(即10天)。
|
||||
- offlineThreshold: dnode离线阈值,超过该时间将导致该dnode从集群中删除。单位为秒,默认值:86400*100(即100天)。
|
||||
- statusInterval: dnode向mnode报告状态时长。单位为秒,默认值:1。
|
||||
- maxTablesPerVnode: 每个vnode中能够创建的最大表个数。默认值:1000000。
|
||||
- maxVgroupsPerDb: 每个数据库中能够使用的最大vgroup个数。
|
||||
- arbitrator: 系统中裁决器的end point,缺省为空。
|
||||
- timezone、locale、charset 的配置见客户端配置。
|
||||
- timezone、locale、charset 的配置见客户端配置。(2.0.20.0 及以上的版本里,集群中加入新节点已不要求 locale 和 charset 参数取值一致)
|
||||
|
||||
为方便调试,可通过SQL语句临时调整每个dnode的日志配置,系统重启后会失效:
|
||||
|
||||
|
@ -463,41 +462,41 @@ TDengine的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下
|
|||
|
||||
| 关键字列表 | | | | |
|
||||
| ---------- | ----------- | ------------ | ---------- | --------- |
|
||||
| ABLOCKS | CONNECTIONS | GT | MNODES | SLIDING |
|
||||
| ABORT | COPY | ID | MODULES | SLIMIT |
|
||||
| ACCOUNT | COUNT | IF | NCHAR | SMALLINT |
|
||||
| ACCOUNTS | CREATE | IGNORE | NE | SPREAD |
|
||||
| ADD | CTIME | IMMEDIATE | NONE | STABLE |
|
||||
| AFTER | DATABASE | IMPORT | NOT | STABLES |
|
||||
| ALL | DATABASES | IN | NOTNULL | STAR |
|
||||
| ALTER | DAYS | INITIALLY | NOW | STATEMENT |
|
||||
| AND | DEFERRED | INSERT | OF | STDDEV |
|
||||
| AS | DELIMITERS | INSTEAD | OFFSET | STREAM |
|
||||
| ASC | DESC | INTEGER | OR | STREAMS |
|
||||
| ATTACH | DESCRIBE | INTERVAL | ORDER | STRING |
|
||||
| AVG | DETACH | INTO | PASS | SUM |
|
||||
| BEFORE | DIFF | IP | PERCENTILE | TABLE |
|
||||
| BEGIN | DISTINCT | IS | PLUS | TABLES |
|
||||
| BETWEEN | DIVIDE | ISNULL | PRAGMA | TAG |
|
||||
| BIGINT | DNODE | JOIN | PREV | TAGS |
|
||||
| BINARY | DNODES | KEEP | PRIVILEGE | TBLOCKS |
|
||||
| BITAND | DOT | KEY | QUERIES | TBNAME |
|
||||
| BITNOT | DOUBLE | KILL | QUERY | TIMES |
|
||||
| BITOR | DROP | LAST | RAISE | TIMESTAMP |
|
||||
| BOOL | EACH | LE | REM | TINYINT |
|
||||
| BOTTOM | END | LEASTSQUARES | REPLACE | TOP |
|
||||
| BY | EQ | LIKE | REPLICA | TRIGGER |
|
||||
| CACHE | EXISTS | LIMIT | RESET | UMINUS |
|
||||
| CASCADE | EXPLAIN | LINEAR | RESTRICT | UPLUS |
|
||||
| CHANGE | FAIL | LOCAL | ROW | USE |
|
||||
| CLOG | FILL | LP | ROWS | USER |
|
||||
| CLUSTER | FIRST | LSHIFT | RP | USERS |
|
||||
| COLON | FLOAT | LT | RSHIFT | USING |
|
||||
| COLUMN | FOR | MATCH | SCORES | VALUES |
|
||||
| COMMA | FROM | MAX | SELECT | VARIABLE |
|
||||
| COMP | GE | METRIC | SEMI | VGROUPS |
|
||||
| CONCAT | GLOB | METRICS | SET | VIEW |
|
||||
| CONFIGS | GRANTS | MIN | SHOW | WAVG |
|
||||
| CONFLICT | GROUP | MINUS | SLASH | WHERE |
|
||||
| CONNECTION | | | | |
|
||||
| ABLOCKS | CONNECTIONS | HAVING | MODULES | SLIMIT |
|
||||
| ABORT | COPY | ID | NCHAR | SMALLINT |
|
||||
| ACCOUNT | COUNT | IF | NE | SPREAD |
|
||||
| ACCOUNTS | CREATE | IGNORE | NONE | STABLE |
|
||||
| ADD | CTIME | IMMEDIATE | NOT | STABLES |
|
||||
| AFTER | DATABASE | IMPORT | NOTNULL | STAR |
|
||||
| ALL | DATABASES | IN | NOW | STATEMENT |
|
||||
| ALTER | DAYS | INITIALLY | OF | STDDEV |
|
||||
| AND | DEFERRED | INSERT | OFFSET | STREAM |
|
||||
| AS | DELIMITERS | INSTEAD | OR | STREAMS |
|
||||
| ASC | DESC | INTEGER | ORDER | STRING |
|
||||
| ATTACH | DESCRIBE | INTERVAL | PASS | SUM |
|
||||
| AVG | DETACH | INTO | PERCENTILE | TABLE |
|
||||
| BEFORE | DIFF | IP | PLUS | TABLES |
|
||||
| BEGIN | DISTINCT | IS | PRAGMA | TAG |
|
||||
| BETWEEN | DIVIDE | ISNULL | PREV | TAGS |
|
||||
| BIGINT | DNODE | JOIN | PRIVILEGE | TBLOCKS |
|
||||
| BINARY | DNODES | KEEP | QUERIES | TBNAME |
|
||||
| BITAND | DOT | KEY | QUERY | TIMES |
|
||||
| BITNOT | DOUBLE | KILL | RAISE | TIMESTAMP |
|
||||
| BITOR | DROP | LAST | REM | TINYINT |
|
||||
| BOOL | EACH | LE | REPLACE | TOP |
|
||||
| BOTTOM | END | LEASTSQUARES | REPLICA | TOPIC |
|
||||
| BY | EQ | LIKE | RESET | TRIGGER |
|
||||
| CACHE | EXISTS | LIMIT | RESTRICT | UMINUS |
|
||||
| CASCADE | EXPLAIN | LINEAR | ROW | UPLUS |
|
||||
| CHANGE | FAIL | LOCAL | ROWS | USE |
|
||||
| CLOG | FILL | LP | RP | USER |
|
||||
| CLUSTER | FIRST | LSHIFT | RSHIFT | USERS |
|
||||
| COLON | FLOAT | LT | SCORES | USING |
|
||||
| COLUMN | FOR | MATCH | SELECT | VALUES |
|
||||
| COMMA | FROM | MAX | SEMI | VARIABLE |
|
||||
| COMP | GE | METRIC | SET | VGROUPS |
|
||||
| CONCAT | GLOB | METRICS | SHOW | VIEW |
|
||||
| CONFIGS | GRANTS | MIN | SLASH | WAVG |
|
||||
| CONFLICT | GROUP | MINUS | SLIDING | WHERE |
|
||||
| CONNECTION | GT | MNODES | | |
|
||||
|
||||
|
|
|
@ -48,15 +48,15 @@ TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableM
|
|||
| 3 | BIGINT | 8 | 长整型,范围 [-2^63+1, 2^63-1], -2^63 用于 NULL |
|
||||
| 4 | FLOAT | 4 | 浮点型,有效位数 6-7,范围 [-3.4E38, 3.4E38] |
|
||||
| 5 | DOUBLE | 8 | 双精度浮点型,有效位数 15-16,范围 [-1.7E308, 1.7E308] |
|
||||
| 6 | BINARY | 自定义 | 用于记录 ASCII 型字符串。理论上,最长可以有 16374 字节,但由于每行数据最多 16K 字节,实际上限一般小于理论值。 binary 仅支持字符串输入,字符串两端使用单引号引用,否则英文全部自动转化为小写。使用时须指定大小,如 binary(20) 定义了最长为 20 个字符的字符串,每个字符占 1 byte 的存储空间,此时如果用户字符串超出 20 字节将会报错。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示,即 `\’`。 |
|
||||
| 6 | BINARY | 自定义 | 记录二进制字节型字符串,建议只用于处理 ASCII 可见字符。理论上,最长可以有 16374 字节,但由于每行数据最多 16K 字节,实际上限一般小于理论值。 binary 仅支持字符串输入,字符串两端使用单引号引用,否则英文全部自动转化为小写。使用时须指定大小,如 binary(20) 定义了最长为 20 个字符的字符串,每个字符占 1 byte 的存储空间,此时如果用户字符串超出 20 字节将会报错。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示,即 `\’`。 |
|
||||
| 7 | SMALLINT | 2 | 短整型, 范围 [-32767, 32767], -32768 用于 NULL |
|
||||
| 8 | TINYINT | 1 | 单字节整型,范围 [-127, 127], -128 用于 NULL |
|
||||
| 9 | BOOL | 1 | 布尔型,{true, false} |
|
||||
| 10 | NCHAR | 自定义 | 用于记录非 ASCII 型字符串,如中文字符。每个 nchar 字符占用 4 bytes 的存储空间。字符串两端使用单引号引用,字符串内的单引号需用转义字符 `\’`。nchar 使用时须指定字符串大小,类型为 nchar(10) 的列表示此列的字符串最多存储 10 个 nchar 字符,会固定占用 40 bytes 的空间。如果用户字符串长度超出声明长度,将会报错。 |
|
||||
| 10 | NCHAR | 自定义 | 记录包含多字节字符在内的字符串,如中文字符。每个 nchar 字符占用 4 bytes 的存储空间。字符串两端使用单引号引用,字符串内的单引号需用转义字符 `\’`。nchar 使用时须指定字符串大小,类型为 nchar(10) 的列表示此列的字符串最多存储 10 个 nchar 字符,会固定占用 40 bytes 的空间。如果用户字符串长度超出声明长度,将会报错。 |
|
||||
|
||||
**Tips**:
|
||||
1. TDengine 对 SQL 语句中的英文字符不区分大小写,自动转化为小写执行。因此用户大小写敏感的字符串及密码,需要使用单引号将字符串引起来。
|
||||
2. 应避免使用 BINARY 类型来保存非 ASCII 型的字符串,会很容易导致数据乱码等错误。正确的做法是使用 NCHAR 类型来保存中文字符。
|
||||
2. **注意**,虽然 Binary 类型在底层存储上支持字节型的二进制字符,但不同编程语言对二进制数据的处理方式并不保证一致,因此建议在 Binary 类型中只存储 ASCII 可见字符,而避免存储不可见字符。多字节的数据,例如中文字符,则需要使用 nchar 类型进行保存。如果强行使用 Binary 类型保存中文字符,虽然有时也能正常读写,但并不带有字符集信息,很容易出现数据乱码甚至数据损坏。
|
||||
|
||||
## <a class="anchor" id="management"></a>数据库管理
|
||||
|
||||
|
@ -407,10 +407,10 @@ SELECT select_expr [, select_expr ...]
|
|||
[INTERVAL (interval_val [, interval_offset])]
|
||||
[SLIDING sliding_val]
|
||||
[FILL fill_val]
|
||||
[GROUP BY col_list]
|
||||
[GROUP BY col_list [HAVING having_condition]]
|
||||
[ORDER BY col_list { DESC | ASC }]
|
||||
[SLIMIT limit_val [, SOFFSET offset_val]]
|
||||
[LIMIT limit_val [, OFFSET offset_val]]
|
||||
[SLIMIT limit_val [SOFFSET offset_val]]
|
||||
[LIMIT limit_val [OFFSET offset_val]]
|
||||
[>> export_file];
|
||||
```
|
||||
|
||||
|
@ -626,7 +626,8 @@ Query OK, 1 row(s) in set (0.001091s)
|
|||
- WHERE 语句可以使用各种逻辑判断来过滤数字值,或使用通配符来过滤字符串
|
||||
- 输出结果缺省按首列时间戳升序排序,但可以指定按降序排序( _c0 指首列时间戳)。使用 ORDER BY 对其他字段进行排序为非法操作。
|
||||
- 参数 LIMIT 控制输出条数,OFFSET 指定从第几条开始输出。LIMIT/OFFSET 对结果集的执行顺序在 ORDER BY 之后。
|
||||
- 参数 SLIMIT 控制由 GROUP BY 指令划分的每个分组中的输出条数。
|
||||
* 在有 GROUP BY 子句的情况下,LIMIT 参数控制的是每个分组中至多允许输出的条数。
|
||||
- 参数 SLIMIT 控制由 GROUP BY 指令划分的分组中,至多允许输出几个分组的数据。
|
||||
- 通过”>>"输出结果可以导出到指定文件
|
||||
|
||||
### 支持的条件过滤操作
|
||||
|
@ -647,6 +648,15 @@ Query OK, 1 row(s) in set (0.001091s)
|
|||
2. 针对单一字段的过滤,如果是时间过滤条件,则一条语句中只支持设定一个;但针对其他的(普通)列或标签列,则可以使用 `OR` 关键字进行组合条件的查询过滤。例如:((value > 20 AND value < 30) OR (value < 12)) 。
|
||||
3. 从 2.0.17 版本开始,条件过滤开始支持 BETWEEN AND 语法,例如 `WHERE col2 BETWEEN 1.5 AND 3.25` 表示查询条件为“1.5 ≤ col2 ≤ 3.25”。
|
||||
|
||||
### GROUP BY 之后的 HAVING 过滤
|
||||
|
||||
从 2.0.20 版本开始,GROUP BY 之后允许再跟一个 HAVING 子句,对成组后的各组数据再做筛选。HAVING 子句可以使用聚合函数和选择函数作为过滤条件(但暂时不支持 LEASTSQUARES、TOP、BOTTOM、LAST_ROW)。
|
||||
|
||||
例如,如下语句只会输出 `AVG(f1) > 0` 的分组:
|
||||
```mysql
|
||||
SELECT AVG(f1), SPREAD(f1, f2, st2.f1) FROM st2 WHERE f1 > 0 GROUP BY f1 HAVING AVG(f1) > 0;
|
||||
```
|
||||
|
||||
### SQL 示例
|
||||
|
||||
- 对于下面的例子,表tb1用以下语句创建
|
||||
|
@ -950,7 +960,7 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
|
|||
```mysql
|
||||
SELECT TOP(field_name, K) FROM { tb_name | stb_name } [WHERE clause];
|
||||
```
|
||||
功能说明: 统计表/超级表中某列的值最大*k*个非NULL值。若多于k个列值并列最大,则返回时间戳小的。
|
||||
功能说明: 统计表/超级表中某列的值最大 *k* 个非 NULL 值。如果多条数据取值一样,全部取用又会超出 k 条限制时,系统会从相同值中随机选取符合要求的数量返回。
|
||||
|
||||
返回结果数据类型:同应用的字段。
|
||||
|
||||
|
@ -984,7 +994,7 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
|
|||
```mysql
|
||||
SELECT BOTTOM(field_name, K) FROM { tb_name | stb_name } [WHERE clause];
|
||||
```
|
||||
功能说明:统计表/超级表中某列的值最小*k*个非NULL值。若多于k个列值并列最小,则返回时间戳小的。
|
||||
功能说明:统计表/超级表中某列的值最小 *k* 个非 NULL 值。如果多条数据取值一样,全部取用又会超出 k 条限制时,系统会从相同值中随机选取符合要求的数量返回。
|
||||
|
||||
返回结果数据类型:同应用的字段。
|
||||
|
||||
|
@ -1216,6 +1226,7 @@ SELECT AVG(current), MAX(current), LEASTSQUARES(current, start_val, step_val), P
|
|||
- 列名最大长度为 64,最多允许 1024 列,最少需要 2 列,第一列必须是时间戳
|
||||
- 标签最多允许 128 个,可以 1 个,标签总长度不超过 16k 个字符
|
||||
- SQL 语句最大长度 65480 个字符,但可通过系统配置参数 maxSQLLength 修改,最长可配置为 1M
|
||||
- SELECT 语句的查询结果,最多允许返回 1024 列(语句中的函数调用可能也会占用一些列空间),超限时需要显式指定较少的返回数据列,以避免语句执行报错。
|
||||
- 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制
|
||||
|
||||
## TAOS SQL其他约定
|
||||
|
|
|
@ -102,7 +102,7 @@ TDengine 目前尚不支持删除功能,未来根据用户需求可能会支
|
|||
|
||||
批量插入。每条写入语句可以一张表同时插入多条记录,也可以同时插入多张表的多条记录。
|
||||
|
||||
## 12. 最有效的写入数据的方法是什么?windows系统下插入的nchar类数据中的汉字被解析成了乱码如何解决?
|
||||
## 12. windows系统下插入的nchar类数据中的汉字被解析成了乱码如何解决?
|
||||
|
||||
Windows下插入nchar类的数据中如果有中文,请先确认系统的地区设置成了中国(在Control Panel里可以设置),这时cmd中的`taos`客户端应该已经可以正常工作了;如果是在IDE里开发Java应用,比如Eclipse, Intellij,请确认IDE里的文件编码为GBK(这是Java默认的编码类型),然后在生成Connection时,初始化客户端的配置,具体语句如下:
|
||||
```JAVA
|
||||
|
@ -166,3 +166,18 @@ TDengine 中时间戳的时区总是由客户端进行处理,而与服务端
|
|||
2. 如果在 taos.cfg 中设置了 timezone 参数,则客户端会以这个配置文件中的设置为准。
|
||||
3. 如果在 C/C++/Java/Python 等各种编程语言的 Connector Driver 中,在建立数据库连接时显式指定了 timezone,那么会以这个指定的时区设置为准。例如 Java Connector 的 JDBC URL 中就有 timezone 参数。
|
||||
4. 在书写 SQL 语句时,也可以直接使用 Unix 时间戳(例如 `1554984068000`)或带有时区的时间戳字符串,也即以 RFC 3339 格式(例如 `2013-04-12T15:52:01.123+08:00`)或 ISO-8601 格式(例如 `2013-04-12T15:52:01.123+0800`)来书写时间戳,此时这些时间戳的取值将不再受其他时区设置的影响。
|
||||
|
||||
## <a class="anchor" id="port"></a>19. TDengine 都会用到哪些网络端口?
|
||||
|
||||
在 TDengine 2.0 版本中,会用到以下这些网络端口(以默认端口 6030 为前提进行说明,如果修改了配置文件中的设置,那么这里列举的端口都会出现变化),管理员可以参考这里的信息调整防火墙设置:
|
||||
|
||||
| 协议 | 默认端口 | 用途说明 | 修改方法 |
|
||||
| --- | --------- | ------------------------------- | ------------------------------ |
|
||||
| TCP | 6030 | 客户端与服务端之间通讯。 | 由配置文件设置 serverPort 决定。 |
|
||||
| TCP | 6035 | 多节点集群的节点间通讯。 | 随 serverPort 端口变化。 |
|
||||
| TCP | 6040 | 多节点集群的节点间数据同步。 | 随 serverPort 端口变化。 |
|
||||
| TCP | 6041 | 客户端与服务端之间的 RESTful 通讯。 | 随 serverPort 端口变化。 |
|
||||
| TCP | 6042 | Arbitrator 的服务端口。 | 因 Arbitrator 启动参数设置变化。 |
|
||||
| TCP | 6060 | 企业版内 Monitor 服务的网络端口。 | |
|
||||
| UDP | 6030-6034 | 客户端与服务端之间通讯。 | 随 serverPort 端口变化。 |
|
||||
| UDP | 6035-6039 | 多节点集群的节点间通讯。 | 随 serverPort 端口变化。 |
|
||||
|
|
|
@ -8,7 +8,7 @@ IF (TD_MVN_INSTALLED)
|
|||
ADD_CUSTOM_COMMAND(OUTPUT ${JDBC_CMD_NAME}
|
||||
POST_BUILD
|
||||
COMMAND mvn -Dmaven.test.skip=true install -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
|
||||
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.25-dist.jar ${LIBRARY_OUTPUT_PATH}
|
||||
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.28-dist.jar ${LIBRARY_OUTPUT_PATH}
|
||||
COMMAND mvn -Dmaven.test.skip=true clean -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
|
||||
COMMENT "build jdbc driver")
|
||||
ADD_CUSTOM_TARGET(${JDBC_TARGET_NAME} ALL WORKING_DIRECTORY ${EXECUTABLE_OUTPUT_PATH} DEPENDS ${JDBC_CMD_NAME})
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>2.0.25</version>
|
||||
<version>2.0.28</version>
|
||||
<packaging>jar</packaging>
|
||||
|
||||
<name>JDBCDriver</name>
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
<modelVersion>4.0.0</modelVersion>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>2.0.25</version>
|
||||
<version>2.0.28</version>
|
||||
<packaging>jar</packaging>
|
||||
<name>JDBCDriver</name>
|
||||
<url>https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc</url>
|
||||
|
|
|
@ -4,13 +4,23 @@ import java.sql.*;
|
|||
import java.util.Enumeration;
|
||||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.*;
|
||||
|
||||
public abstract class AbstractConnection extends WrapperImpl implements Connection {
|
||||
|
||||
protected volatile boolean isClosed;
|
||||
protected volatile String catalog;
|
||||
protected volatile Properties clientInfoProps = new Properties();
|
||||
protected final Properties clientInfoProps = new Properties();
|
||||
|
||||
protected AbstractConnection(Properties properties) {
|
||||
Set<String> propNames = properties.stringPropertyNames();
|
||||
for (String propName : propNames) {
|
||||
clientInfoProps.setProperty(propName, properties.getProperty(propName));
|
||||
}
|
||||
String timestampFormat = properties.getProperty(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT, "STRING");
|
||||
clientInfoProps.setProperty(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT, timestampFormat);
|
||||
}
|
||||
|
||||
@Override
|
||||
public abstract Statement createStatement() throws SQLException;
|
||||
|
@ -35,7 +45,6 @@ public abstract class AbstractConnection extends WrapperImpl implements Connecti
|
|||
}
|
||||
|
||||
|
||||
|
||||
@Override
|
||||
public void setAutoCommit(boolean autoCommit) throws SQLException {
|
||||
if (isClosed())
|
||||
|
@ -441,8 +450,7 @@ public abstract class AbstractConnection extends WrapperImpl implements Connecti
|
|||
if (isClosed)
|
||||
throw (SQLClientInfoException) TSDBError.createSQLException(TSDBErrorNumbers.ERROR_SQLCLIENT_EXCEPTION_ON_CONNECTION_CLOSED);
|
||||
|
||||
if (clientInfoProps == null)
|
||||
clientInfoProps = new Properties();
|
||||
if (clientInfoProps != null)
|
||||
clientInfoProps.setProperty(name, value);
|
||||
}
|
||||
|
||||
|
|
|
@ -10,6 +10,7 @@ import java.util.Map;
|
|||
|
||||
public abstract class AbstractResultSet extends WrapperImpl implements ResultSet {
|
||||
private int fetchSize;
|
||||
protected boolean wasNull;
|
||||
|
||||
protected void checkAvailability(int columnIndex, int bounds) throws SQLException {
|
||||
if (isClosed())
|
||||
|
@ -28,7 +29,7 @@ public abstract class AbstractResultSet extends WrapperImpl implements ResultSet
|
|||
|
||||
@Override
|
||||
public boolean wasNull() throws SQLException {
|
||||
return false;
|
||||
return wasNull;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -28,6 +28,7 @@ public class TSDBConnection extends AbstractConnection {
|
|||
}
|
||||
|
||||
public TSDBConnection(Properties info, TSDBDatabaseMetaData meta) throws SQLException {
|
||||
super(info);
|
||||
this.databaseMetaData = meta;
|
||||
connect(info.getProperty(TSDBDriver.PROPERTY_KEY_HOST),
|
||||
Integer.parseInt(info.getProperty(TSDBDriver.PROPERTY_KEY_PORT, "0")),
|
||||
|
|
|
@ -95,6 +95,11 @@ public class TSDBDriver extends AbstractDriver {
|
|||
*/
|
||||
public static final String PROPERTY_KEY_BATCH_LOAD = "batchfetch";
|
||||
|
||||
/**
|
||||
* timestamp format for JDBC-RESTful,should one of the options: string or timestamp or utc
|
||||
*/
|
||||
public static final String PROPERTY_KEY_TIMESTAMP_FORMAT = "timestampFormat";
|
||||
|
||||
private TSDBDatabaseMetaData dbMetaData = null;
|
||||
|
||||
static {
|
||||
|
|
|
@ -33,17 +33,9 @@ import java.util.regex.Pattern;
|
|||
public class TSDBPreparedStatement extends TSDBStatement implements PreparedStatement {
|
||||
|
||||
private String rawSql;
|
||||
private String sql;
|
||||
// private ArrayList<Object> parameters = new ArrayList<>();
|
||||
private Object[] parameters;
|
||||
private boolean isPrepared;
|
||||
|
||||
//start with insert or import and is case-insensitive
|
||||
private static Pattern savePattern = Pattern.compile("(?i)^\\s*(insert|import)");
|
||||
// is insert or import
|
||||
private boolean isSaved;
|
||||
|
||||
// private SavedPreparedStatement savedPreparedStatement;
|
||||
private volatile TSDBParameterMetaData parameterMetaData;
|
||||
|
||||
TSDBPreparedStatement(TSDBConnection connection, TSDBJNIConnector connecter, String sql) {
|
||||
|
@ -65,35 +57,11 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
|
|||
private void init(String sql) {
|
||||
this.rawSql = sql;
|
||||
preprocessSql();
|
||||
// this.isSaved = isSavedSql(this.rawSql);
|
||||
// if (this.isSaved) {
|
||||
// try {
|
||||
// this.savedPreparedStatement = new SavedPreparedStatement(this.rawSql, this);
|
||||
// } catch (SQLException e) {
|
||||
// e.printStackTrace();
|
||||
// }
|
||||
// }
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* if the precompiled sql is insert or import
|
||||
*
|
||||
* @param sql
|
||||
* @return
|
||||
*/
|
||||
private boolean isSavedSql(String sql) {
|
||||
Matcher matcher = savePattern.matcher(sql);
|
||||
return matcher.find();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int[] executeBatch() throws SQLException {
|
||||
// if (isSaved) {
|
||||
// return this.savedPreparedStatement.executeBatch();
|
||||
// } else {
|
||||
return super.executeBatch();
|
||||
// }
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -157,23 +125,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
|
|||
*
|
||||
* @return a string of the native sql statement for TSDB
|
||||
*/
|
||||
// private String getNativeSql(String rawSql) {
|
||||
// for (int i = 0; i < parameters.length; i++) {
|
||||
// Object para = parameters[i];
|
||||
// if (para != null) {
|
||||
// String paraStr = para.toString();
|
||||
// if (para instanceof Timestamp || para instanceof String) {
|
||||
// paraStr = "'" + paraStr + "'";
|
||||
// }
|
||||
// this.sql = this.sql.replaceFirst("[?]", paraStr);
|
||||
// } else {
|
||||
// this.sql = this.sql.replaceFirst("[?]", "NULL");
|
||||
// }
|
||||
// }
|
||||
// parameters = new Object[parameters.length];
|
||||
// return sql;
|
||||
// }
|
||||
|
||||
private String getNativeSql(String rawSql) throws SQLException {
|
||||
String sql = rawSql;
|
||||
for (int i = 0; i < parameters.length; ++i) {
|
||||
|
@ -201,108 +152,58 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
|
|||
|
||||
@Override
|
||||
public ResultSet executeQuery() throws SQLException {
|
||||
// if (isSaved) {
|
||||
// this.savedPreparedStatement.executeBatchInternal();
|
||||
// return null;
|
||||
// } else {
|
||||
|
||||
if (!isPrepared)
|
||||
return executeQuery(this.rawSql);
|
||||
|
||||
final String sql = getNativeSql(this.rawSql);
|
||||
return executeQuery(sql);
|
||||
// }
|
||||
}
|
||||
|
||||
@Override
|
||||
public int executeUpdate() throws SQLException {
|
||||
// if (isSaved) {
|
||||
// return this.savedPreparedStatement.executeBatchInternal();
|
||||
// } else {
|
||||
if (!isPrepared)
|
||||
return executeUpdate(this.rawSql);
|
||||
String sql = getNativeSql(this.rawSql);
|
||||
return executeUpdate(sql);
|
||||
// }
|
||||
}
|
||||
|
||||
private boolean isSupportedSQLType(int sqlType) {
|
||||
switch (sqlType) {
|
||||
case Types.TIMESTAMP:
|
||||
case Types.INTEGER:
|
||||
case Types.BIGINT:
|
||||
case Types.FLOAT:
|
||||
case Types.DOUBLE:
|
||||
case Types.SMALLINT:
|
||||
case Types.TINYINT:
|
||||
case Types.BOOLEAN:
|
||||
case Types.BINARY:
|
||||
case Types.NCHAR:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setNull(int parameterIndex, int sqlType) throws SQLException {
|
||||
if (isClosed())
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
|
||||
if (!isSupportedSQLType(sqlType) || parameterIndex < 0)
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE);
|
||||
// if (parameterIndex >= parameters.size())
|
||||
// throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PARAMETER_INDEX_OUT_BOUNDARY);
|
||||
|
||||
setObject(parameterIndex, "NULL");
|
||||
setObject(parameterIndex, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setBoolean(int parameterIndex, boolean x) throws SQLException {
|
||||
if (isClosed())
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
|
||||
|
||||
setObject(parameterIndex, x);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setByte(int parameterIndex, byte x) throws SQLException {
|
||||
if (isClosed())
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
|
||||
setObject(parameterIndex, x);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setShort(int parameterIndex, short x) throws SQLException {
|
||||
if (isClosed())
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
|
||||
setObject(parameterIndex, x);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setInt(int parameterIndex, int x) throws SQLException {
|
||||
if (isClosed())
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
|
||||
setObject(parameterIndex, x);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setLong(int parameterIndex, long x) throws SQLException {
|
||||
if (isClosed())
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
|
||||
setObject(parameterIndex, x);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setFloat(int parameterIndex, float x) throws SQLException {
|
||||
if (isClosed())
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
|
||||
setObject(parameterIndex, x);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setDouble(int parameterIndex, double x) throws SQLException {
|
||||
if (isClosed())
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
|
||||
setObject(parameterIndex, x);
|
||||
}
|
||||
|
||||
|
@ -315,16 +216,11 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
|
|||
|
||||
@Override
|
||||
public void setString(int parameterIndex, String x) throws SQLException {
|
||||
if (isClosed())
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
|
||||
setObject(parameterIndex, x);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setBytes(int parameterIndex, byte[] x) throws SQLException {
|
||||
if (isClosed())
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
|
||||
|
||||
setObject(parameterIndex, x);
|
||||
}
|
||||
|
||||
|
@ -344,8 +240,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
|
|||
|
||||
@Override
|
||||
public void setTimestamp(int parameterIndex, Timestamp x) throws SQLException {
|
||||
if (isClosed())
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
|
||||
setObject(parameterIndex, x);
|
||||
}
|
||||
|
||||
|
@ -360,7 +254,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
|
|||
public void setUnicodeStream(int parameterIndex, InputStream x, int length) throws SQLException {
|
||||
if (isClosed())
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
|
||||
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
|
||||
}
|
||||
|
||||
|
@ -375,8 +268,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
|
|||
public void clearParameters() throws SQLException {
|
||||
if (isClosed())
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
|
||||
|
||||
// parameters.clear();
|
||||
parameters = new Object[parameters.length];
|
||||
}
|
||||
|
||||
|
@ -384,43 +275,29 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
|
|||
public void setObject(int parameterIndex, Object x, int targetSqlType) throws SQLException {
|
||||
if (isClosed())
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
|
||||
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
|
||||
setObject(parameterIndex,x);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setObject(int parameterIndex, Object x) throws SQLException {
|
||||
// if (isSaved) {
|
||||
// this.savedPreparedStatement.setParam(parameterIndex, x);
|
||||
// } else {
|
||||
if (isClosed())
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
|
||||
if (parameterIndex < 1 && parameterIndex >= parameters.length)
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PARAMETER_INDEX_OUT_RANGE);
|
||||
|
||||
parameters[parameterIndex - 1] = x;
|
||||
// parameters.add(x);
|
||||
// }
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean execute() throws SQLException {
|
||||
// if (isSaved) {
|
||||
// int result = this.savedPreparedStatement.executeBatchInternal();
|
||||
// return result > 0;
|
||||
// } else {
|
||||
if (!isPrepared)
|
||||
return execute(this.rawSql);
|
||||
|
||||
final String sql = getNativeSql(this.rawSql);
|
||||
|
||||
return execute(sql);
|
||||
// }
|
||||
}
|
||||
|
||||
@Override
|
||||
public void addBatch() throws SQLException {
|
||||
// if (isSaved) {
|
||||
// this.savedPreparedStatement.addBatch();
|
||||
// } else {
|
||||
if (this.batchedArgs == null) {
|
||||
batchedArgs = new ArrayList<>();
|
||||
}
|
||||
|
@ -431,7 +308,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
|
|||
String sql = this.getConnection().nativeSQL(this.rawSql);
|
||||
addBatch(sql);
|
||||
}
|
||||
// }
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -475,7 +351,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
|
|||
public ResultSetMetaData getMetaData() throws SQLException {
|
||||
if (isClosed())
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
|
||||
// return this.getResultSet().getMetaData();
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
|
||||
}
|
||||
|
||||
|
|
|
@ -203,6 +203,10 @@ public class TSDBResultSet extends AbstractResultSet implements ResultSet {
|
|||
|
||||
this.lastWasNull = this.rowData.wasNull(columnIndex - 1);
|
||||
if (!lastWasNull) {
|
||||
Object value = this.rowData.get(columnIndex - 1);
|
||||
if (value instanceof Timestamp)
|
||||
res = ((Timestamp) value).getTime();
|
||||
else
|
||||
res = this.rowData.getLong(columnIndex - 1, this.columnMetaDataList.get(columnIndex - 1).getColType());
|
||||
}
|
||||
return res;
|
||||
|
@ -273,7 +277,6 @@ public class TSDBResultSet extends AbstractResultSet implements ResultSet {
|
|||
checkAvailability(columnIndex, this.columnMetaDataList.size());
|
||||
|
||||
Timestamp res = null;
|
||||
|
||||
if (this.getBatchFetch())
|
||||
return this.blockData.getTimestamp(columnIndex - 1);
|
||||
|
||||
|
|
|
@ -17,6 +17,7 @@ package com.taosdata.jdbc;
|
|||
import java.math.BigDecimal;
|
||||
import java.sql.SQLException;
|
||||
import java.sql.Timestamp;
|
||||
import java.time.Instant;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
|
||||
|
@ -299,7 +300,19 @@ public class TSDBResultSetRowData {
|
|||
}
|
||||
|
||||
public void setTimestamp(int col, long ts) {
|
||||
//TODO: this implementation contains logical error
|
||||
// when precision is us the (long ts) is 16 digital number
|
||||
// when precision is ms, the (long ts) is 13 digital number
|
||||
// we need a JNI function like this:
|
||||
// public void setTimestamp(int col, long epochSecond, long nanoAdjustment)
|
||||
if (ts < 1_0000_0000_0000_0L) {
|
||||
data.set(col, new Timestamp(ts));
|
||||
} else {
|
||||
long epochSec = ts / 1000_000l;
|
||||
long nanoAdjustment = ts % 1000_000l * 1000l;
|
||||
Timestamp timestamp = Timestamp.from(Instant.ofEpochSecond(epochSec, nanoAdjustment));
|
||||
data.set(col, timestamp);
|
||||
}
|
||||
}
|
||||
|
||||
public Timestamp getTimestamp(int col) {
|
||||
|
|
|
@ -22,6 +22,7 @@ public class RestfulConnection extends AbstractConnection {
|
|||
private final DatabaseMetaData metadata;
|
||||
|
||||
public RestfulConnection(String host, String port, Properties props, String database, String url) {
|
||||
super(props);
|
||||
this.host = host;
|
||||
this.port = Integer.parseInt(port);
|
||||
this.database = database;
|
||||
|
|
|
@ -5,13 +5,12 @@ import com.alibaba.fastjson.JSONObject;
|
|||
import com.google.common.primitives.Ints;
|
||||
import com.google.common.primitives.Longs;
|
||||
import com.google.common.primitives.Shorts;
|
||||
import com.taosdata.jdbc.AbstractResultSet;
|
||||
import com.taosdata.jdbc.TSDBConstants;
|
||||
import com.taosdata.jdbc.TSDBError;
|
||||
import com.taosdata.jdbc.TSDBErrorNumbers;
|
||||
import com.taosdata.jdbc.*;
|
||||
|
||||
import java.math.BigDecimal;
|
||||
import java.sql.*;
|
||||
import java.time.Instant;
|
||||
import java.time.ZoneOffset;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Calendar;
|
||||
|
||||
|
@ -19,6 +18,7 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
|
|||
private volatile boolean isClosed;
|
||||
private int pos = -1;
|
||||
|
||||
|
||||
private final String database;
|
||||
private final Statement statement;
|
||||
// data
|
||||
|
@ -65,7 +65,7 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
|
|||
}
|
||||
}
|
||||
|
||||
private Object parseColumnData(JSONArray row, int colIndex, int taosType) {
|
||||
private Object parseColumnData(JSONArray row, int colIndex, int taosType) throws SQLException {
|
||||
switch (taosType) {
|
||||
case TSDBConstants.TSDB_DATA_TYPE_BOOL:
|
||||
return row.getBoolean(colIndex);
|
||||
|
@ -81,8 +81,44 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
|
|||
return row.getFloat(colIndex);
|
||||
case TSDBConstants.TSDB_DATA_TYPE_DOUBLE:
|
||||
return row.getDouble(colIndex);
|
||||
case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP:
|
||||
return new Timestamp(row.getDate(colIndex).getTime());
|
||||
case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP: {
|
||||
if (row.get(colIndex) == null)
|
||||
return null;
|
||||
String timestampFormat = this.statement.getConnection().getClientInfo(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT);
|
||||
if ("TIMESTAMP".equalsIgnoreCase(timestampFormat)) {
|
||||
Long value = row.getLong(colIndex);
|
||||
//TODO: this implementation has bug if the timestamp bigger than 9999_9999_9999_9
|
||||
if (value < 1_0000_0000_0000_0L)
|
||||
return new Timestamp(value);
|
||||
long epochSec = value / 1000_000l;
|
||||
long nanoAdjustment = value % 1000_000l * 1000l;
|
||||
return Timestamp.from(Instant.ofEpochSecond(epochSec, nanoAdjustment));
|
||||
}
|
||||
if ("UTC".equalsIgnoreCase(timestampFormat)) {
|
||||
String value = row.getString(colIndex);
|
||||
long epochSec = Timestamp.valueOf(value.substring(0, 19).replace("T", " ")).getTime() / 1000;
|
||||
int fractionalSec = Integer.parseInt(value.substring(20, value.length() - 5));
|
||||
long nanoAdjustment = 0;
|
||||
if (value.length() > 28) {
|
||||
// ms timestamp: yyyy-MM-ddTHH:mm:ss.SSSSSS+0x00
|
||||
nanoAdjustment = fractionalSec * 1000l;
|
||||
} else {
|
||||
// ms timestamp: yyyy-MM-ddTHH:mm:ss.SSS+0x00
|
||||
nanoAdjustment = fractionalSec * 1000_000l;
|
||||
}
|
||||
ZoneOffset zoneOffset = ZoneOffset.of(value.substring(value.length() - 5));
|
||||
Instant instant = Instant.ofEpochSecond(epochSec, nanoAdjustment).atOffset(zoneOffset).toInstant();
|
||||
return Timestamp.from(instant);
|
||||
}
|
||||
String value = row.getString(colIndex);
|
||||
if (value.length() <= 23) // ms timestamp: yyyy-MM-dd HH:mm:ss.SSS
|
||||
return row.getTimestamp(colIndex);
|
||||
// us timestamp: yyyy-MM-dd HH:mm:ss.SSSSSS
|
||||
long epochSec = Timestamp.valueOf(value.substring(0, 19)).getTime() / 1000;
|
||||
long nanoAdjustment = Integer.parseInt(value.substring(20)) * 1000l;
|
||||
Timestamp timestamp = Timestamp.from(Instant.ofEpochSecond(epochSec, nanoAdjustment));
|
||||
return timestamp;
|
||||
}
|
||||
case TSDBConstants.TSDB_DATA_TYPE_BINARY:
|
||||
return row.getString(colIndex) == null ? null : row.getString(colIndex).getBytes();
|
||||
case TSDBConstants.TSDB_DATA_TYPE_NCHAR:
|
||||
|
@ -126,12 +162,12 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean wasNull() throws SQLException {
|
||||
if (isClosed())
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED);
|
||||
return resultSet.isEmpty();
|
||||
}
|
||||
// @Override
|
||||
// public boolean wasNull() throws SQLException {
|
||||
// if (isClosed())
|
||||
// throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED);
|
||||
// return resultSet.isEmpty();
|
||||
// }
|
||||
|
||||
@Override
|
||||
public String getString(int columnIndex) throws SQLException {
|
||||
|
@ -150,8 +186,11 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
|
|||
checkAvailability(columnIndex, resultSet.get(pos).size());
|
||||
|
||||
Object value = resultSet.get(pos).get(columnIndex - 1);
|
||||
if (value == null)
|
||||
if (value == null) {
|
||||
wasNull = true;
|
||||
return false;
|
||||
}
|
||||
wasNull = false;
|
||||
if (value instanceof Boolean)
|
||||
return (boolean) value;
|
||||
return Boolean.valueOf(value.toString());
|
||||
|
@ -162,8 +201,11 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
|
|||
checkAvailability(columnIndex, resultSet.get(pos).size());
|
||||
|
||||
Object value = resultSet.get(pos).get(columnIndex - 1);
|
||||
if (value == null)
|
||||
if (value == null) {
|
||||
wasNull = true;
|
||||
return 0;
|
||||
}
|
||||
wasNull = false;
|
||||
long valueAsLong = Long.parseLong(value.toString());
|
||||
if (valueAsLong == Byte.MIN_VALUE)
|
||||
return 0;
|
||||
|
@ -183,8 +225,11 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
|
|||
checkAvailability(columnIndex, resultSet.get(pos).size());
|
||||
|
||||
Object value = resultSet.get(pos).get(columnIndex - 1);
|
||||
if (value == null)
|
||||
if (value == null) {
|
||||
wasNull = true;
|
||||
return 0;
|
||||
}
|
||||
wasNull = false;
|
||||
long valueAsLong = Long.parseLong(value.toString());
|
||||
if (valueAsLong == Short.MIN_VALUE)
|
||||
return 0;
|
||||
|
@ -198,8 +243,11 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
|
|||
checkAvailability(columnIndex, resultSet.get(pos).size());
|
||||
|
||||
Object value = resultSet.get(pos).get(columnIndex - 1);
|
||||
if (value == null)
|
||||
if (value == null) {
|
||||
wasNull = true;
|
||||
return 0;
|
||||
}
|
||||
wasNull = false;
|
||||
long valueAsLong = Long.parseLong(value.toString());
|
||||
if (valueAsLong == Integer.MIN_VALUE)
|
||||
return 0;
|
||||
|
@ -213,9 +261,14 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
|
|||
checkAvailability(columnIndex, resultSet.get(pos).size());
|
||||
|
||||
Object value = resultSet.get(pos).get(columnIndex - 1);
|
||||
if (value == null)
|
||||
if (value == null) {
|
||||
wasNull = true;
|
||||
return 0;
|
||||
|
||||
}
|
||||
wasNull = false;
|
||||
if (value instanceof Timestamp) {
|
||||
return ((Timestamp) value).getTime();
|
||||
}
|
||||
long valueAsLong = 0;
|
||||
try {
|
||||
valueAsLong = Long.parseLong(value.toString());
|
||||
|
@ -232,8 +285,11 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
|
|||
checkAvailability(columnIndex, resultSet.get(pos).size());
|
||||
|
||||
Object value = resultSet.get(pos).get(columnIndex - 1);
|
||||
if (value == null)
|
||||
if (value == null) {
|
||||
wasNull = true;
|
||||
return 0;
|
||||
}
|
||||
wasNull = false;
|
||||
if (value instanceof Float || value instanceof Double)
|
||||
return (float) value;
|
||||
return Float.parseFloat(value.toString());
|
||||
|
@ -244,8 +300,11 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
|
|||
checkAvailability(columnIndex, resultSet.get(pos).size());
|
||||
|
||||
Object value = resultSet.get(pos).get(columnIndex - 1);
|
||||
if (value == null)
|
||||
if (value == null) {
|
||||
wasNull = true;
|
||||
return 0;
|
||||
}
|
||||
wasNull = false;
|
||||
if (value instanceof Double || value instanceof Float)
|
||||
return (double) value;
|
||||
return Double.parseDouble(value.toString());
|
||||
|
@ -307,6 +366,13 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
|
|||
return null;
|
||||
if (value instanceof Timestamp)
|
||||
return (Timestamp) value;
|
||||
// if (value instanceof Long) {
|
||||
// if (1_0000_0000_0000_0L > (long) value)
|
||||
// return Timestamp.from(Instant.ofEpochMilli((long) value));
|
||||
// long epochSec = (long) value / 1000_000L;
|
||||
// long nanoAdjustment = (long) ((long) value % 1000_000L * 1000);
|
||||
// return Timestamp.from(Instant.ofEpochSecond(epochSec, nanoAdjustment));
|
||||
// }
|
||||
return Timestamp.valueOf(value.toString());
|
||||
}
|
||||
|
||||
|
|
|
@ -4,6 +4,7 @@ import com.alibaba.fastjson.JSON;
|
|||
import com.alibaba.fastjson.JSONArray;
|
||||
import com.alibaba.fastjson.JSONObject;
|
||||
import com.taosdata.jdbc.AbstractStatement;
|
||||
import com.taosdata.jdbc.TSDBDriver;
|
||||
import com.taosdata.jdbc.TSDBError;
|
||||
import com.taosdata.jdbc.TSDBErrorNumbers;
|
||||
import com.taosdata.jdbc.utils.HttpClientPoolUtil;
|
||||
|
@ -34,14 +35,11 @@ public class RestfulStatement extends AbstractStatement {
|
|||
if (!SqlSyntaxValidator.isValidForExecuteQuery(sql))
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_FOR_EXECUTE_QUERY, "not a valid sql for executeQuery: " + sql);
|
||||
|
||||
final String url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql";
|
||||
if (SqlSyntaxValidator.isDatabaseUnspecifiedQuery(sql)) {
|
||||
return executeOneQuery(url, sql);
|
||||
return executeOneQuery(sql);
|
||||
}
|
||||
|
||||
// if (this.database != null && !this.database.trim().replaceAll("\\s","").isEmpty())
|
||||
// HttpClientPoolUtil.execute(url, "use " + this.database);
|
||||
return executeOneQuery(url, sql);
|
||||
return executeOneQuery(sql);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -56,8 +54,6 @@ public class RestfulStatement extends AbstractStatement {
|
|||
return executeOneUpdate(url, sql);
|
||||
}
|
||||
|
||||
// if (this.database != null && !this.database.trim().replaceAll("\\s", "").isEmpty())
|
||||
// HttpClientPoolUtil.execute(url, "use " + this.database);
|
||||
return executeOneUpdate(url, sql);
|
||||
}
|
||||
|
||||
|
@ -78,14 +74,21 @@ public class RestfulStatement extends AbstractStatement {
|
|||
|
||||
//如果执行了use操作应该将当前Statement的catalog设置为新的database
|
||||
boolean result = true;
|
||||
final String url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql";
|
||||
String url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql";
|
||||
if (conn.getClientInfo(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT).equals("TIMESTAMP")) {
|
||||
url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sqlt";
|
||||
}
|
||||
if (conn.getClientInfo(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT).equals("UTC")) {
|
||||
url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sqlutc";
|
||||
}
|
||||
|
||||
if (SqlSyntaxValidator.isUseSql(sql)) {
|
||||
HttpClientPoolUtil.execute(url, sql);
|
||||
this.database = sql.trim().replace("use", "").trim();
|
||||
this.conn.setCatalog(this.database);
|
||||
result = false;
|
||||
} else if (SqlSyntaxValidator.isDatabaseUnspecifiedQuery(sql)) {
|
||||
executeOneQuery(url, sql);
|
||||
executeOneQuery(sql);
|
||||
} else if (SqlSyntaxValidator.isDatabaseUnspecifiedUpdate(sql)) {
|
||||
executeOneUpdate(url, sql);
|
||||
result = false;
|
||||
|
@ -101,11 +104,18 @@ public class RestfulStatement extends AbstractStatement {
|
|||
return result;
|
||||
}
|
||||
|
||||
private ResultSet executeOneQuery(String url, String sql) throws SQLException {
|
||||
private ResultSet executeOneQuery(String sql) throws SQLException {
|
||||
if (!SqlSyntaxValidator.isValidForExecuteQuery(sql))
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_FOR_EXECUTE_QUERY, "not a valid sql for executeQuery: " + sql);
|
||||
|
||||
// row data
|
||||
String url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql";
|
||||
String timestampFormat = conn.getClientInfo(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT);
|
||||
if ("TIMESTAMP".equalsIgnoreCase(timestampFormat))
|
||||
url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sqlt";
|
||||
if ("UTC".equalsIgnoreCase(timestampFormat))
|
||||
url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sqlutc";
|
||||
|
||||
String result = HttpClientPoolUtil.execute(url, sql);
|
||||
JSONObject resultJson = JSON.parseObject(result);
|
||||
if (resultJson.getString("status").equals("error")) {
|
||||
|
|
|
@ -0,0 +1,12 @@
|
|||
package com.taosdata.jdbc.utils;
|
||||
|
||||
import java.time.format.DateTimeFormatter;
|
||||
import java.time.format.DateTimeFormatterBuilder;
|
||||
|
||||
public class UtcTimestampUtil {
|
||||
public static final DateTimeFormatter formatter = new DateTimeFormatterBuilder()
|
||||
.appendPattern("yyyy-MM-ddTHH:mm:ss.SSS+")
|
||||
// .appendFraction(ChronoField.NANO_OF_SECOND, 0, 9, true)
|
||||
.toFormatter();
|
||||
|
||||
}
|
|
@ -50,6 +50,51 @@ public class TSDBPreparedStatementTest {
|
|||
pstmt_insert.setNull(2, Types.INTEGER);
|
||||
int result = pstmt_insert.executeUpdate();
|
||||
Assert.assertEquals(1, result);
|
||||
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
|
||||
pstmt_insert.setNull(3, Types.BIGINT);
|
||||
result = pstmt_insert.executeUpdate();
|
||||
Assert.assertEquals(1, result);
|
||||
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
|
||||
pstmt_insert.setNull(4, Types.FLOAT);
|
||||
result = pstmt_insert.executeUpdate();
|
||||
Assert.assertEquals(1, result);
|
||||
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
|
||||
pstmt_insert.setNull(5, Types.DOUBLE);
|
||||
result = pstmt_insert.executeUpdate();
|
||||
Assert.assertEquals(1, result);
|
||||
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
|
||||
pstmt_insert.setNull(6, Types.SMALLINT);
|
||||
result = pstmt_insert.executeUpdate();
|
||||
Assert.assertEquals(1, result);
|
||||
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
|
||||
pstmt_insert.setNull(7, Types.TINYINT);
|
||||
result = pstmt_insert.executeUpdate();
|
||||
Assert.assertEquals(1, result);
|
||||
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
|
||||
pstmt_insert.setNull(8, Types.BOOLEAN);
|
||||
result = pstmt_insert.executeUpdate();
|
||||
Assert.assertEquals(1, result);
|
||||
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
|
||||
pstmt_insert.setNull(9, Types.BINARY);
|
||||
result = pstmt_insert.executeUpdate();
|
||||
Assert.assertEquals(1, result);
|
||||
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
|
||||
pstmt_insert.setNull(10, Types.NCHAR);
|
||||
result = pstmt_insert.executeUpdate();
|
||||
Assert.assertEquals(1, result);
|
||||
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
|
||||
pstmt_insert.setNull(10, Types.OTHER);
|
||||
result = pstmt_insert.executeUpdate();
|
||||
Assert.assertEquals(1, result);
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -129,7 +174,7 @@ public class TSDBPreparedStatementTest {
|
|||
Assert.assertFalse(pstmt_insert.execute());
|
||||
}
|
||||
|
||||
class Person implements Serializable {
|
||||
class Person {
|
||||
String name;
|
||||
int age;
|
||||
boolean sex;
|
||||
|
|
|
@ -160,6 +160,7 @@ public class TSDBResultSetTest {
|
|||
@Test
|
||||
public void getTime() throws SQLException {
|
||||
Time f1 = rs.getTime("f1");
|
||||
Assert.assertNotNull(f1);
|
||||
Assert.assertEquals("00:00:00", f1.toString());
|
||||
}
|
||||
|
||||
|
|
|
@ -20,6 +20,7 @@ public class DriverAutoloadTest {
|
|||
final String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata";
|
||||
Connection conn = DriverManager.getConnection(url, properties);
|
||||
Assert.assertNotNull(conn);
|
||||
conn.close();
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -27,6 +28,7 @@ public class DriverAutoloadTest {
|
|||
final String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata";
|
||||
Connection conn = DriverManager.getConnection(url, properties);
|
||||
Assert.assertNotNull(conn);
|
||||
conn.close();
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -0,0 +1,64 @@
|
|||
package com.taosdata.jdbc.cases;
|
||||
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.sql.*;
|
||||
|
||||
public class NullValueInResultSetForJdbcJniTest {
|
||||
|
||||
private static final String host = "127.0.0.1";
|
||||
Connection conn;
|
||||
|
||||
@Test
|
||||
public void test() {
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select * from weather");
|
||||
ResultSetMetaData meta = rs.getMetaData();
|
||||
while (rs.next()) {
|
||||
for (int i = 1; i <= meta.getColumnCount(); i++) {
|
||||
Object value = rs.getObject(i);
|
||||
System.out.print(meta.getColumnLabel(i) + ": " + value + "\t");
|
||||
}
|
||||
System.out.println();
|
||||
}
|
||||
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@Before
|
||||
public void before() throws SQLException {
|
||||
final String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata";
|
||||
conn = DriverManager.getConnection(url);
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
stmt.execute("drop database if exists test_null");
|
||||
stmt.execute("create database if not exists test_null");
|
||||
stmt.execute("use test_null");
|
||||
stmt.execute("create table weather(ts timestamp, f1 int, f2 bigint, f3 float, f4 double, f5 smallint, f6 tinyint, f7 bool, f8 binary(64), f9 nchar(64))");
|
||||
stmt.executeUpdate("insert into weather(ts, f1) values(now+1s, 1)");
|
||||
stmt.executeUpdate("insert into weather(ts, f2) values(now+2s, 2)");
|
||||
stmt.executeUpdate("insert into weather(ts, f3) values(now+3s, 3.0)");
|
||||
stmt.executeUpdate("insert into weather(ts, f4) values(now+4s, 4.0)");
|
||||
stmt.executeUpdate("insert into weather(ts, f5) values(now+5s, 5)");
|
||||
stmt.executeUpdate("insert into weather(ts, f6) values(now+6s, 6)");
|
||||
stmt.executeUpdate("insert into weather(ts, f7) values(now+7s, true)");
|
||||
stmt.executeUpdate("insert into weather(ts, f8) values(now+8s, 'hello')");
|
||||
stmt.executeUpdate("insert into weather(ts, f9) values(now+9s, '涛思数据')");
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@After
|
||||
public void after() {
|
||||
try {
|
||||
if (conn != null)
|
||||
conn.close();
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,91 @@
|
|||
package com.taosdata.jdbc.cases;
|
||||
|
||||
import com.taosdata.jdbc.TSDBDriver;
|
||||
import com.taosdata.jdbc.utils.TimestampUtil;
|
||||
import org.junit.*;
|
||||
|
||||
import java.sql.*;
|
||||
import java.util.Properties;
|
||||
|
||||
public class TD3841Test {
|
||||
private static final String host = "127.0.0.1";
|
||||
private static Properties properties;
|
||||
private static Connection conn_restful;
|
||||
private static Connection conn_jni;
|
||||
|
||||
@Test
|
||||
public void testRestful() throws SQLException {
|
||||
String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata";
|
||||
conn_restful = DriverManager.getConnection(url, properties);
|
||||
|
||||
try (Statement stmt = conn_restful.createStatement()) {
|
||||
stmt.execute("drop database if exists test_null");
|
||||
stmt.execute("create database if not exists test_null");
|
||||
stmt.execute("use test_null");
|
||||
stmt.execute("create table weather(ts timestamp, f1 timestamp, f2 int, f3 bigint, f4 float, f5 double, f6 smallint, f7 tinyint, f8 bool, f9 binary(64), f10 nchar(64))");
|
||||
stmt.executeUpdate("insert into weather(ts, f1) values(now+1s, " + TimestampUtil.datetimeToLong("2021-04-21 12:00:00.000") + ")");
|
||||
ResultSet rs = stmt.executeQuery("select * from weather");
|
||||
rs.next();
|
||||
|
||||
Assert.assertEquals("2021-04-21 12:00:00.000", TimestampUtil.longToDatetime(rs.getTimestamp(2).getTime()));
|
||||
Assert.assertEquals(true, rs.getInt(3) == 0 && rs.wasNull());
|
||||
Assert.assertEquals(true, rs.getLong(4) == 0 && rs.wasNull());
|
||||
Assert.assertEquals(true, rs.getFloat(5) == 0.0f && rs.wasNull());
|
||||
Assert.assertEquals(true, rs.getDouble(6) == 0.0f && rs.wasNull());
|
||||
Assert.assertEquals(true, rs.getByte(7) == 0 && rs.wasNull());
|
||||
Assert.assertEquals(true, rs.getShort(8) == 0 && rs.wasNull());
|
||||
Assert.assertEquals(null, rs.getBytes(9));
|
||||
Assert.assertEquals(null, rs.getString(10));
|
||||
|
||||
rs.close();
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testJNI() throws SQLException {
|
||||
final String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata";
|
||||
conn_jni = DriverManager.getConnection(url, properties);
|
||||
|
||||
try (Statement stmt = conn_jni.createStatement()) {
|
||||
stmt.execute("drop database if exists test_null");
|
||||
stmt.execute("create database if not exists test_null");
|
||||
stmt.execute("use test_null");
|
||||
stmt.execute("create table weather(ts timestamp, f1 timestamp, f2 int, f3 bigint, f4 float, f5 double, f6 smallint, f7 tinyint, f8 bool, f9 binary(64), f10 nchar(64))");
|
||||
stmt.executeUpdate("insert into weather(ts, f1) values(now+1s, " + TimestampUtil.datetimeToLong("2021-04-21 12:00:00.000") + ")");
|
||||
ResultSet rs = stmt.executeQuery("select * from weather");
|
||||
rs.next();
|
||||
|
||||
Assert.assertEquals("2021-04-21 12:00:00.000", TimestampUtil.longToDatetime(rs.getTimestamp(2).getTime()));
|
||||
Assert.assertEquals(true, rs.getInt(3) == 0 && rs.wasNull());
|
||||
Assert.assertEquals(true, rs.getLong(4) == 0 && rs.wasNull());
|
||||
Assert.assertEquals(true, rs.getFloat(5) == 0.0f && rs.wasNull());
|
||||
Assert.assertEquals(true, rs.getDouble(6) == 0.0f && rs.wasNull());
|
||||
Assert.assertEquals(true, rs.getByte(7) == 0 && rs.wasNull());
|
||||
Assert.assertEquals(true, rs.getShort(8) == 0 && rs.wasNull());
|
||||
Assert.assertEquals(null, rs.getBytes(9));
|
||||
Assert.assertEquals(null, rs.getString(10));
|
||||
|
||||
rs.close();
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@BeforeClass
|
||||
public static void beforeClass() {
|
||||
properties = new Properties();
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void afterClass() throws SQLException {
|
||||
if (conn_restful != null)
|
||||
conn_restful.close();
|
||||
if (conn_jni != null)
|
||||
conn_jni.close();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,89 @@
|
|||
package com.taosdata.jdbc.cases;
|
||||
|
||||
|
||||
import com.taosdata.jdbc.TSDBDriver;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Assert;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.sql.*;
|
||||
import java.util.Properties;
|
||||
|
||||
public class TwoTypeTimestampPercisionInJniTest {
|
||||
|
||||
private static final String host = "127.0.0.1";
|
||||
private static final String ms_timestamp_db = "ms_precision_test";
|
||||
private static final String us_timestamp_db = "us_precision_test";
|
||||
private static final long timestamp1 = System.currentTimeMillis();
|
||||
private static final long timestamp2 = timestamp1 * 1000 + 123;
|
||||
|
||||
private static Connection conn;
|
||||
|
||||
@Test
|
||||
public void testCase1() {
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select last_row(ts) from " + ms_timestamp_db + ".weather");
|
||||
rs.next();
|
||||
long ts = rs.getTimestamp(1).getTime();
|
||||
Assert.assertEquals(timestamp1, ts);
|
||||
ts = rs.getLong(1);
|
||||
Assert.assertEquals(timestamp1, ts);
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCase2() {
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select last_row(ts) from " + us_timestamp_db + ".weather");
|
||||
rs.next();
|
||||
|
||||
Timestamp timestamp = rs.getTimestamp(1);
|
||||
System.out.println(timestamp);
|
||||
long ts = timestamp.getTime();
|
||||
Assert.assertEquals(timestamp1, ts);
|
||||
int nanos = timestamp.getNanos();
|
||||
Assert.assertEquals(timestamp2 % 1000_000l * 1000, nanos);
|
||||
|
||||
ts = rs.getLong(1);
|
||||
Assert.assertEquals(timestamp1, ts);
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@BeforeClass
|
||||
public static void beforeClass() throws SQLException {
|
||||
Properties properties = new Properties();
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
|
||||
|
||||
String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata";
|
||||
conn = DriverManager.getConnection(url, properties);
|
||||
|
||||
Statement stmt = conn.createStatement();
|
||||
stmt.execute("drop database if exists " + ms_timestamp_db);
|
||||
stmt.execute("create database if not exists " + ms_timestamp_db + " precision 'ms'");
|
||||
stmt.execute("create table " + ms_timestamp_db + ".weather(ts timestamp, f1 int)");
|
||||
stmt.executeUpdate("insert into " + ms_timestamp_db + ".weather(ts,f1) values(" + timestamp1 + ", 127)");
|
||||
|
||||
stmt.execute("drop database if exists " + us_timestamp_db);
|
||||
stmt.execute("create database if not exists " + us_timestamp_db + " precision 'us'");
|
||||
stmt.execute("create table " + us_timestamp_db + ".weather(ts timestamp, f1 int)");
|
||||
stmt.executeUpdate("insert into " + us_timestamp_db + ".weather(ts,f1) values(" + timestamp2 + ", 127)");
|
||||
stmt.close();
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void afterClass() {
|
||||
try {
|
||||
if (conn != null)
|
||||
conn.close();
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,168 @@
|
|||
package com.taosdata.jdbc.cases;
|
||||
|
||||
|
||||
import com.taosdata.jdbc.TSDBDriver;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Assert;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.sql.*;
|
||||
import java.util.Properties;
|
||||
|
||||
public class TwoTypeTimestampPercisionInRestfulTest {
|
||||
|
||||
private static final String host = "127.0.0.1";
|
||||
private static final String ms_timestamp_db = "ms_precision_test";
|
||||
private static final String us_timestamp_db = "us_precision_test";
|
||||
private static final long timestamp1 = System.currentTimeMillis();
|
||||
private static final long timestamp2 = timestamp1 * 1000 + 123;
|
||||
|
||||
private static Connection conn1;
|
||||
private static Connection conn2;
|
||||
private static Connection conn3;
|
||||
|
||||
@Test
|
||||
public void testCase1() {
|
||||
try (Statement stmt = conn1.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select last_row(ts) from " + ms_timestamp_db + ".weather");
|
||||
rs.next();
|
||||
long ts = rs.getTimestamp(1).getTime();
|
||||
Assert.assertEquals(timestamp1, ts);
|
||||
ts = rs.getLong(1);
|
||||
Assert.assertEquals(timestamp1, ts);
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCase2() {
|
||||
try (Statement stmt = conn1.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select last_row(ts) from " + us_timestamp_db + ".weather");
|
||||
rs.next();
|
||||
|
||||
Timestamp timestamp = rs.getTimestamp(1);
|
||||
long ts = timestamp.getTime();
|
||||
Assert.assertEquals(timestamp1, ts);
|
||||
int nanos = timestamp.getNanos();
|
||||
Assert.assertEquals(timestamp2 % 1000_000l * 1000, nanos);
|
||||
|
||||
ts = rs.getLong(1);
|
||||
Assert.assertEquals(timestamp1, ts);
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCase3() {
|
||||
try (Statement stmt = conn2.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select last_row(ts) from " + ms_timestamp_db + ".weather");
|
||||
rs.next();
|
||||
Timestamp rsTimestamp = rs.getTimestamp(1);
|
||||
long ts = rsTimestamp.getTime();
|
||||
Assert.assertEquals(timestamp1, ts);
|
||||
ts = rs.getLong(1);
|
||||
Assert.assertEquals(timestamp1, ts);
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCase4() {
|
||||
try (Statement stmt = conn2.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select last_row(ts) from " + us_timestamp_db + ".weather");
|
||||
rs.next();
|
||||
|
||||
Timestamp timestamp = rs.getTimestamp(1);
|
||||
long ts = timestamp.getTime();
|
||||
Assert.assertEquals(timestamp1, ts);
|
||||
int nanos = timestamp.getNanos();
|
||||
Assert.assertEquals(timestamp2 % 1000_000l * 1000, nanos);
|
||||
|
||||
ts = rs.getLong(1);
|
||||
Assert.assertEquals(timestamp1, ts);
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCase5() {
|
||||
try (Statement stmt = conn3.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select last_row(ts) from " + ms_timestamp_db + ".weather");
|
||||
rs.next();
|
||||
long ts = rs.getTimestamp(1).getTime();
|
||||
Assert.assertEquals(timestamp1, ts);
|
||||
ts = rs.getLong(1);
|
||||
Assert.assertEquals(timestamp1, ts);
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCase6() {
|
||||
try (Statement stmt = conn3.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select last_row(ts) from " + us_timestamp_db + ".weather");
|
||||
rs.next();
|
||||
|
||||
Timestamp timestamp = rs.getTimestamp(1);
|
||||
long ts = timestamp.getTime();
|
||||
Assert.assertEquals(timestamp1, ts);
|
||||
int nanos = timestamp.getNanos();
|
||||
Assert.assertEquals(timestamp2 % 1000_000l * 1000, nanos);
|
||||
|
||||
ts = rs.getLong(1);
|
||||
Assert.assertEquals(timestamp1, ts);
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@BeforeClass
|
||||
public static void beforeClass() throws SQLException {
|
||||
Properties properties = new Properties();
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
|
||||
// properties.setProperty(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT, "TIMESTAMP");
|
||||
|
||||
String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata";
|
||||
conn1 = DriverManager.getConnection(url, properties);
|
||||
|
||||
url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata×tampFormat=timestamp";
|
||||
conn2 = DriverManager.getConnection(url, properties);
|
||||
|
||||
url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata×tampFormat=utc";
|
||||
conn3 = DriverManager.getConnection(url, properties);
|
||||
|
||||
Statement stmt = conn1.createStatement();
|
||||
stmt.execute("drop database if exists " + ms_timestamp_db);
|
||||
stmt.execute("create database if not exists " + ms_timestamp_db + " precision 'ms'");
|
||||
stmt.execute("create table " + ms_timestamp_db + ".weather(ts timestamp, f1 int)");
|
||||
stmt.executeUpdate("insert into " + ms_timestamp_db + ".weather(ts,f1) values(" + timestamp1 + ", 127)");
|
||||
|
||||
stmt.execute("drop database if exists " + us_timestamp_db);
|
||||
stmt.execute("create database if not exists " + us_timestamp_db + " precision 'us'");
|
||||
stmt.execute("create table " + us_timestamp_db + ".weather(ts timestamp, f1 int)");
|
||||
stmt.executeUpdate("insert into " + us_timestamp_db + ".weather(ts,f1) values(" + timestamp2 + ", 127)");
|
||||
stmt.close();
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void afterClass() {
|
||||
try {
|
||||
if (conn1 != null)
|
||||
conn1.close();
|
||||
if (conn2 != null)
|
||||
conn2.close();
|
||||
if (conn3 != null)
|
||||
conn3.close();
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
}
|
|
@ -50,6 +50,51 @@ public class RestfulPreparedStatementTest {
|
|||
pstmt_insert.setNull(2, Types.INTEGER);
|
||||
int result = pstmt_insert.executeUpdate();
|
||||
Assert.assertEquals(1, result);
|
||||
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
|
||||
pstmt_insert.setNull(3, Types.BIGINT);
|
||||
result = pstmt_insert.executeUpdate();
|
||||
Assert.assertEquals(1, result);
|
||||
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
|
||||
pstmt_insert.setNull(4, Types.FLOAT);
|
||||
result = pstmt_insert.executeUpdate();
|
||||
Assert.assertEquals(1, result);
|
||||
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
|
||||
pstmt_insert.setNull(5, Types.DOUBLE);
|
||||
result = pstmt_insert.executeUpdate();
|
||||
Assert.assertEquals(1, result);
|
||||
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
|
||||
pstmt_insert.setNull(6, Types.SMALLINT);
|
||||
result = pstmt_insert.executeUpdate();
|
||||
Assert.assertEquals(1, result);
|
||||
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
|
||||
pstmt_insert.setNull(7, Types.TINYINT);
|
||||
result = pstmt_insert.executeUpdate();
|
||||
Assert.assertEquals(1, result);
|
||||
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
|
||||
pstmt_insert.setNull(8, Types.BOOLEAN);
|
||||
result = pstmt_insert.executeUpdate();
|
||||
Assert.assertEquals(1, result);
|
||||
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
|
||||
pstmt_insert.setNull(9, Types.BINARY);
|
||||
result = pstmt_insert.executeUpdate();
|
||||
Assert.assertEquals(1, result);
|
||||
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
|
||||
pstmt_insert.setNull(10, Types.NCHAR);
|
||||
result = pstmt_insert.executeUpdate();
|
||||
Assert.assertEquals(1, result);
|
||||
|
||||
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
|
||||
pstmt_insert.setNull(10, Types.OTHER);
|
||||
result = pstmt_insert.executeUpdate();
|
||||
Assert.assertEquals(1, result);
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -129,7 +174,7 @@ public class RestfulPreparedStatementTest {
|
|||
Assert.assertFalse(pstmt_insert.execute());
|
||||
}
|
||||
|
||||
class Person implements Serializable {
|
||||
private class Person {
|
||||
String name;
|
||||
int age;
|
||||
boolean sex;
|
||||
|
|
|
@ -160,6 +160,7 @@ public class RestfulResultSetTest {
|
|||
@Test
|
||||
public void getTime() throws SQLException {
|
||||
Time f1 = rs.getTime("f1");
|
||||
Assert.assertNotNull(f1);
|
||||
Assert.assertEquals("00:00:00", f1.toString());
|
||||
}
|
||||
|
||||
|
|
|
@ -37,7 +37,7 @@ static struct argp_option options[] = {
|
|||
{"password", 'p', "PASSWORD", OPTION_ARG_OPTIONAL, "The password to use when connecting to the server."},
|
||||
{"port", 'P', "PORT", 0, "The TCP/IP port number to use for the connection."},
|
||||
{"user", 'u', "USER", 0, "The user name to use when connecting to the server."},
|
||||
{"user", 'A', "Auth", 0, "The user auth to use when connecting to the server."},
|
||||
{"auth", 'A', "Auth", 0, "The auth string to use when connecting to the server."},
|
||||
{"config-dir", 'c', "CONFIG_DIR", 0, "Configuration directory."},
|
||||
{"dump-config", 'C', 0, 0, "Dump configuration."},
|
||||
{"commands", 's', "COMMANDS", 0, "Commands to run without enter the shell."},
|
||||
|
|
|
@ -723,7 +723,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
|
|||
if (strcmp(argv[i], "-f") == 0) {
|
||||
arguments->metaFile = argv[++i];
|
||||
} else if (strcmp(argv[i], "-c") == 0) {
|
||||
tstrncpy(configDir, argv[++i], MAX_FILE_NAME_LEN);
|
||||
tstrncpy(configDir, argv[++i], TSDB_FILENAME_LEN);
|
||||
|
||||
} else if (strcmp(argv[i], "-h") == 0) {
|
||||
arguments->host = argv[++i];
|
||||
|
@ -3610,7 +3610,6 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
|
|||
goto PARSE_OVER;
|
||||
}
|
||||
|
||||
/*
|
||||
cJSON* batchCreateTbl = cJSON_GetObjectItem(stbInfo, "batch_create_tbl_num");
|
||||
if (batchCreateTbl && batchCreateTbl->type == cJSON_Number) {
|
||||
g_Dbs.db[i].superTbls[j].batchCreateTableNum = batchCreateTbl->valueint;
|
||||
|
@ -3620,7 +3619,6 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
|
|||
printf("ERROR: failed to read json, batch_create_tbl_num not found\n");
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
*/
|
||||
|
||||
cJSON *childTblExists = cJSON_GetObjectItem(stbInfo, "child_table_exists"); // yes, no
|
||||
if (childTblExists
|
||||
|
@ -5934,6 +5932,7 @@ static void *specifiedTableQuery(void *sarg) {
|
|||
totalQueried ++;
|
||||
g_queryInfo.specifiedQueryInfo.totalQueried ++;
|
||||
|
||||
|
||||
et = taosGetTimestampMs();
|
||||
|
||||
int64_t currentPrintTime = taosGetTimestampMs();
|
||||
|
|
|
@ -214,8 +214,8 @@ static struct argp_option options[] = {
|
|||
// dump format options
|
||||
{"schemaonly", 's', 0, 0, "Only dump schema.", 3},
|
||||
{"with-property", 'M', 0, 0, "Dump schema with properties.", 3},
|
||||
{"start-time", 'S', "START_TIME", 0, "Start time to dump.", 3},
|
||||
{"end-time", 'E', "END_TIME", 0, "End time to dump. Epoch or ISO8601/RFC3339 format is acceptable. For example: 2017-10-01T18:00:00+0800", 3},
|
||||
{"start-time", 'S', "START_TIME", 0, "Start time to dump. Either Epoch or ISO8601/RFC3339 format is acceptable. Epoch precision millisecond. ISO8601 format example: 2017-10-01T18:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 3},
|
||||
{"end-time", 'E', "END_TIME", 0, "End time to dump. Either Epoch or ISO8601/RFC3339 format is acceptable. Epoch precision millisecond. ISO8601 format example: 2017-10-01T18:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 3},
|
||||
{"data-batch", 'N', "DATA_BATCH", 0, "Number of data point per insert statement. Default is 1.", 3},
|
||||
{"max-sql-len", 'L', "SQL_LEN", 0, "Max length of one sql. Default is 65480.", 3},
|
||||
{"table-batch", 't', "TABLE_BATCH", 0, "Number of table dumpout into one output file. Default is 1.", 3},
|
||||
|
@ -482,7 +482,9 @@ static int queryDbImpl(TAOS *taos, char *command) {
|
|||
|
||||
static void parse_args(int argc, char *argv[], SArguments *arguments) {
|
||||
for (int i = 1; i < argc; i++) {
|
||||
if (strcmp(argv[i], "-E") == 0) {
|
||||
if ((strcmp(argv[i], "-S") == 0)
|
||||
|| (strcmp(argv[i], "-E") == 0)) {
|
||||
if (argv[i+1]) {
|
||||
char *tmp = strdup(argv[++i]);
|
||||
|
||||
if (tmp) {
|
||||
|
@ -507,6 +509,10 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
|
|||
errorPrint("%s() LN%d, strdup() cannot allocate memory\n", __func__, __LINE__);
|
||||
exit(-1);
|
||||
}
|
||||
} else {
|
||||
errorPrint("%s need a valid value following!\n", argv[i]);
|
||||
exit(-1);
|
||||
}
|
||||
} else if (strcmp(argv[i], "-g") == 0) {
|
||||
arguments->debug_print = true;
|
||||
}
|
||||
|
|
|
@ -1879,6 +1879,7 @@ static void doFreeQueryHandle(SQueryRuntimeEnv* pRuntimeEnv) {
|
|||
}
|
||||
|
||||
static void destroyTsComp(SQueryRuntimeEnv *pRuntimeEnv, SQuery *pQuery) {
|
||||
|
||||
if (isTsCompQuery(pQuery) && pRuntimeEnv->outputBuf && pRuntimeEnv->outputBuf->pDataBlock && taosArrayGetSize(pRuntimeEnv->outputBuf->pDataBlock) > 0) {
|
||||
SColumnInfoData* pColInfoData = taosArrayGet(pRuntimeEnv->outputBuf->pDataBlock, 0);
|
||||
if (pColInfoData) {
|
||||
|
|
|
@ -232,9 +232,28 @@ int32_t vnodeAlter(void *vparam, SCreateVnodeMsg *pVnodeCfg) {
|
|||
return code;
|
||||
}
|
||||
|
||||
static void vnodeFindWalRootDir(int32_t vgId, char *walRootDir) {
|
||||
char vnodeDir[TSDB_FILENAME_LEN] = "\0";
|
||||
snprintf(vnodeDir, TSDB_FILENAME_LEN, "/vnode/vnode%d/wal", vgId);
|
||||
|
||||
TDIR *tdir = tfsOpendir(vnodeDir);
|
||||
if (!tdir) return;
|
||||
|
||||
const TFILE *tfile = tfsReaddir(tdir);
|
||||
if (!tfile) {
|
||||
tfsClosedir(tdir);
|
||||
return;
|
||||
}
|
||||
|
||||
sprintf(walRootDir, "%s/vnode/vnode%d", TFS_DISK_PATH(tfile->level, tfile->id), vgId);
|
||||
|
||||
tfsClosedir(tdir);
|
||||
}
|
||||
|
||||
int32_t vnodeOpen(int32_t vgId) {
|
||||
char temp[TSDB_FILENAME_LEN * 3];
|
||||
char rootDir[TSDB_FILENAME_LEN * 2];
|
||||
char walRootDir[TSDB_FILENAME_LEN * 2] = {0};
|
||||
snprintf(rootDir, TSDB_FILENAME_LEN * 2, "%s/vnode%d", tsVnodeDir, vgId);
|
||||
|
||||
SVnodeObj *pVnode = calloc(sizeof(SVnodeObj), 1);
|
||||
|
@ -321,7 +340,21 @@ int32_t vnodeOpen(int32_t vgId) {
|
|||
}
|
||||
}
|
||||
|
||||
sprintf(temp, "%s/wal", rootDir);
|
||||
// walRootDir for wal & syncInfo.path (not empty dir of /vnode/vnode{pVnode->vgId}/wal)
|
||||
vnodeFindWalRootDir(pVnode->vgId, walRootDir);
|
||||
if (walRootDir[0] == 0) {
|
||||
int level = -1, id = -1;
|
||||
|
||||
tfsAllocDisk(TFS_PRIMARY_LEVEL, &level, &id);
|
||||
if (level < 0 || id < 0) {
|
||||
vnodeCleanUp(pVnode);
|
||||
return terrno;
|
||||
}
|
||||
|
||||
sprintf(walRootDir, "%s/vnode/vnode%d", TFS_DISK_PATH(level, id), vgId);
|
||||
}
|
||||
|
||||
sprintf(temp, "%s/wal", walRootDir);
|
||||
pVnode->walCfg.vgId = pVnode->vgId;
|
||||
pVnode->wal = walOpen(temp, &pVnode->walCfg);
|
||||
if (pVnode->wal == NULL) {
|
||||
|
@ -353,7 +386,7 @@ int32_t vnodeOpen(int32_t vgId) {
|
|||
|
||||
pVnode->events = NULL;
|
||||
|
||||
vDebug("vgId:%d, vnode is opened in %s, pVnode:%p", pVnode->vgId, rootDir, pVnode);
|
||||
vDebug("vgId:%d, vnode is opened in %s - %s, pVnode:%p", pVnode->vgId, rootDir, walRootDir, pVnode);
|
||||
|
||||
vnodeAddIntoHash(pVnode);
|
||||
|
||||
|
@ -361,7 +394,7 @@ int32_t vnodeOpen(int32_t vgId) {
|
|||
syncInfo.vgId = pVnode->vgId;
|
||||
syncInfo.version = pVnode->version;
|
||||
syncInfo.syncCfg = pVnode->syncCfg;
|
||||
tstrncpy(syncInfo.path, rootDir, TSDB_FILENAME_LEN);
|
||||
tstrncpy(syncInfo.path, walRootDir, TSDB_FILENAME_LEN);
|
||||
syncInfo.getWalInfoFp = vnodeGetWalInfo;
|
||||
syncInfo.writeToCacheFp = vnodeWriteToCache;
|
||||
syncInfo.confirmForward = vnodeConfirmForard;
|
||||
|
|
|
@ -230,8 +230,8 @@ pipeline {
|
|||
post {
|
||||
success {
|
||||
emailext (
|
||||
subject: "SUCCESSFUL: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]'",
|
||||
body: '''<!DOCTYPE html>
|
||||
subject: "PR-result: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]' SUCCESS",
|
||||
body: """<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
|
@ -247,29 +247,29 @@ pipeline {
|
|||
<td>
|
||||
<ul>
|
||||
<div style="font-size:18px">
|
||||
<li>构建名称>>分支:${PROJECT_NAME}</li>
|
||||
<li>构建名称>>分支:${env.BRANCH_NAME}</li>
|
||||
<li>构建结果:<span style="color:green"> Successful </span></li>
|
||||
<li>构建编号:${BUILD_NUMBER}</li>
|
||||
<li>触发用户:${CAUSE}</li>
|
||||
<li>变更概要:${CHANGES}</li>
|
||||
<li>触发用户:${env.CHANGE_AUTHOR}</li>
|
||||
<li>提交信息:${env.CHANGE_TITLE}</li>
|
||||
<li>构建地址:<a href=${BUILD_URL}>${BUILD_URL}</a></li>
|
||||
<li>构建日志:<a href=${BUILD_URL}console>${BUILD_URL}console</a></li>
|
||||
<li>变更集:${JELLY_SCRIPT}</li>
|
||||
|
||||
</div>
|
||||
</ul>
|
||||
</td>
|
||||
</tr>
|
||||
</table></font>
|
||||
</body>
|
||||
</html>''',
|
||||
</html>""",
|
||||
to: "yqliu@taosdata.com,pxiao@taosdata.com",
|
||||
from: "support@taosdata.com"
|
||||
)
|
||||
}
|
||||
failure {
|
||||
emailext (
|
||||
subject: "FAILED: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]'",
|
||||
body: '''<!DOCTYPE html>
|
||||
subject: "PR-result: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]' FAIL",
|
||||
body: """<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
|
@ -285,21 +285,21 @@ pipeline {
|
|||
<td>
|
||||
<ul>
|
||||
<div style="font-size:18px">
|
||||
<li>构建名称>>分支:${PROJECT_NAME}</li>
|
||||
<li>构建结果:<span style="color:green"> Successful </span></li>
|
||||
<li>构建名称>>分支:${env.BRANCH_NAME}</li>
|
||||
<li>构建结果:<span style="color:red"> Failure </span></li>
|
||||
<li>构建编号:${BUILD_NUMBER}</li>
|
||||
<li>触发用户:${CAUSE}</li>
|
||||
<li>变更概要:${CHANGES}</li>
|
||||
<li>触发用户:${env.CHANGE_AUTHOR}</li>
|
||||
<li>提交信息:${env.CHANGE_TITLE}</li>
|
||||
<li>构建地址:<a href=${BUILD_URL}>${BUILD_URL}</a></li>
|
||||
<li>构建日志:<a href=${BUILD_URL}console>${BUILD_URL}console</a></li>
|
||||
<li>变更集:${JELLY_SCRIPT}</li>
|
||||
|
||||
</div>
|
||||
</ul>
|
||||
</td>
|
||||
</tr>
|
||||
</table></font>
|
||||
</body>
|
||||
</html>''',
|
||||
</html>""",
|
||||
to: "yqliu@taosdata.com,pxiao@taosdata.com",
|
||||
from: "support@taosdata.com"
|
||||
)
|
||||
|
|
|
@ -31,7 +31,11 @@ namespace TDengineDriver
|
|||
TSDB_DATA_TYPE_DOUBLE = 7, // 8 bytes
|
||||
TSDB_DATA_TYPE_BINARY = 8, // string
|
||||
TSDB_DATA_TYPE_TIMESTAMP = 9,// 8 bytes
|
||||
TSDB_DATA_TYPE_NCHAR = 10 // unicode string
|
||||
TSDB_DATA_TYPE_NCHAR = 10, // unicode string
|
||||
TSDB_DATA_TYPE_UTINYINT = 11,// 1 byte
|
||||
TSDB_DATA_TYPE_USMALLINT= 12,// 2 bytes
|
||||
TSDB_DATA_TYPE_UINT = 13, // 4 bytes
|
||||
TSDB_DATA_TYPE_UBIGINT= 14 // 8 bytes
|
||||
}
|
||||
|
||||
enum TDengineInitOption
|
||||
|
@ -53,15 +57,23 @@ namespace TDengineDriver
|
|||
switch ((TDengineDataType)type)
|
||||
{
|
||||
case TDengineDataType.TSDB_DATA_TYPE_BOOL:
|
||||
return "BOOLEAN";
|
||||
return "BOOL";
|
||||
case TDengineDataType.TSDB_DATA_TYPE_TINYINT:
|
||||
return "BYTE";
|
||||
return "TINYINT";
|
||||
case TDengineDataType.TSDB_DATA_TYPE_SMALLINT:
|
||||
return "SHORT";
|
||||
return "SMALLINT";
|
||||
case TDengineDataType.TSDB_DATA_TYPE_INT:
|
||||
return "INT";
|
||||
case TDengineDataType.TSDB_DATA_TYPE_BIGINT:
|
||||
return "LONG";
|
||||
return "BIGINT";
|
||||
case TDengineDataType.TSDB_DATA_TYPE_UTINYINT:
|
||||
return "TINYINT UNSIGNED";
|
||||
case TDengineDataType.TSDB_DATA_TYPE_USMALLINT:
|
||||
return "SMALLINT UNSIGNED";
|
||||
case TDengineDataType.TSDB_DATA_TYPE_UINT:
|
||||
return "INT UNSIGNED";
|
||||
case TDengineDataType.TSDB_DATA_TYPE_UBIGINT:
|
||||
return "BIGINT UNSIGNED";
|
||||
case TDengineDataType.TSDB_DATA_TYPE_FLOAT:
|
||||
return "FLOAT";
|
||||
case TDengineDataType.TSDB_DATA_TYPE_DOUBLE:
|
||||
|
|
|
@ -388,7 +388,7 @@ namespace TDengineDriver
|
|||
public void CreateDb()
|
||||
{
|
||||
StringBuilder sql = new StringBuilder();
|
||||
sql.Append("CREATE DATABASE IF NOT EXISTS ").Append(this.dbName).Append(" replica ").Append(this.replica);
|
||||
sql.Append("CREATE DATABASE IF NOT EXISTS ").Append(this.dbName).Append(" replica ").Append(this.replica).Append(" keep 36500");
|
||||
IntPtr res = TDengine.Query(this.conn, sql.ToString());
|
||||
if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0))
|
||||
{
|
||||
|
@ -413,7 +413,7 @@ namespace TDengineDriver
|
|||
sql.Clear();
|
||||
sql.Append("CREATE TABLE IF NOT EXISTS ").
|
||||
Append(this.dbName).Append(".").Append(this.stablePrefix).
|
||||
Append("(ts timestamp, v1 bool, v2 tinyint, v3 smallint, v4 int, v5 bigint, v6 float, v7 double, v8 binary(10), v9 nchar(10)) tags(t1 int)");
|
||||
Append("(ts timestamp, v1 bool, v2 tinyint, v3 smallint, v4 int, v5 bigint, v6 float, v7 double, v8 binary(10), v9 nchar(10), v10 tinyint unsigned, v11 smallint unsigned, v12 int unsigned, v13 bigint unsigned) tags(t1 int)");
|
||||
IntPtr res = TDengine.Query(this.conn, sql.ToString());
|
||||
if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0))
|
||||
{
|
||||
|
@ -538,7 +538,7 @@ namespace TDengineDriver
|
|||
int offset = IntPtr.Size * fields;
|
||||
IntPtr data = Marshal.ReadIntPtr(rowdata, offset);
|
||||
|
||||
builder.Append("---");
|
||||
builder.Append(" | ");
|
||||
|
||||
if (data == IntPtr.Zero)
|
||||
{
|
||||
|
@ -553,7 +553,7 @@ namespace TDengineDriver
|
|||
builder.Append(v1);
|
||||
break;
|
||||
case TDengineDataType.TSDB_DATA_TYPE_TINYINT:
|
||||
byte v2 = Marshal.ReadByte(data);
|
||||
sbyte v2 = (sbyte)Marshal.ReadByte(data);
|
||||
builder.Append(v2);
|
||||
break;
|
||||
case TDengineDataType.TSDB_DATA_TYPE_SMALLINT:
|
||||
|
@ -588,9 +588,25 @@ namespace TDengineDriver
|
|||
string v10 = Marshal.PtrToStringAnsi(data);
|
||||
builder.Append(v10);
|
||||
break;
|
||||
case TDengineDataType.TSDB_DATA_TYPE_UTINYINT:
|
||||
byte v11 = Marshal.ReadByte(data);
|
||||
builder.Append(v11);
|
||||
break;
|
||||
case TDengineDataType.TSDB_DATA_TYPE_USMALLINT:
|
||||
ushort v12 = (ushort)Marshal.ReadInt16(data);
|
||||
builder.Append(v12);
|
||||
break;
|
||||
case TDengineDataType.TSDB_DATA_TYPE_UINT:
|
||||
uint v13 = (uint)Marshal.ReadInt32(data);
|
||||
builder.Append(v13);
|
||||
break;
|
||||
case TDengineDataType.TSDB_DATA_TYPE_UBIGINT:
|
||||
ulong v14 = (ulong)Marshal.ReadInt64(data);
|
||||
builder.Append(v14);
|
||||
break;
|
||||
}
|
||||
}
|
||||
builder.Append("---");
|
||||
builder.Append(" | ");
|
||||
|
||||
VerbosePrint(builder.ToString() + "\n");
|
||||
builder.Clear();
|
||||
|
@ -727,7 +743,7 @@ namespace TDengineDriver
|
|||
int m = now.Minute;
|
||||
int s = now.Second;
|
||||
|
||||
long baseTimestamp = 1609430400000; // 2021/01/01 0:0:0
|
||||
long baseTimestamp = -16094340000; // 1969-06-29 01:21:00
|
||||
VerbosePrintFormat("beginTime is {0} + {1}h:{2}m:{3}s\n", baseTimestamp, h, m, s);
|
||||
long beginTimestamp = baseTimestamp + ((h * 60 + m) * 60 + s) * 1000;
|
||||
Random random = new Random();
|
||||
|
@ -770,11 +786,16 @@ namespace TDengineDriver
|
|||
|
||||
sql.Append("(")
|
||||
.Append(writeTimeStamp)
|
||||
.Append(", 1, 2, 3,")
|
||||
.Append(i + batch)
|
||||
.Append(", 5, 6, 7, 'abc', 'def')");
|
||||
.Append(", 1, -2, -3,")
|
||||
.Append(i + batch - 127)
|
||||
.Append(", -5, -6, -7, 'abc', 'def', 254, 65534,")
|
||||
.Append(4294967294 - (uint)i - (uint)batch)
|
||||
.Append(",")
|
||||
.Append(18446744073709551614 - (ulong)i - (ulong)batch)
|
||||
.Append(")");
|
||||
|
||||
}
|
||||
VerbosePrint(sql.ToString() + "\n");
|
||||
IntPtr res = TDengine.Query(this.conn, sql.ToString());
|
||||
if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0))
|
||||
{
|
||||
|
@ -856,7 +877,7 @@ namespace TDengineDriver
|
|||
}
|
||||
else
|
||||
{
|
||||
sql = sql.Append("(ts timestamp, v1 bool, v2 tinyint, v3 smallint, v4 int, v5 bigint, v6 float, v7 double, v8 binary(10), v9 nchar(10))");
|
||||
sql = sql.Append("(ts timestamp, v1 bool, v2 tinyint, v3 smallint, v4 int, v5 bigint, v6 float, v7 double, v8 binary(10), v9 nchar(10), v10 tinyint unsigned, v11 smallint unsigned, v12 int unsigned, v13 bigint unsigned)");
|
||||
}
|
||||
IntPtr res = TDengine.Query(this.conn, sql.ToString());
|
||||
if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0))
|
||||
|
|
|
@ -63,9 +63,9 @@
|
|||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>2.0.20</version>
|
||||
<version>2.0.28</version>
|
||||
<!-- <scope>system</scope>-->
|
||||
<!-- <systemPath>${project.basedir}/src/main/resources/taos-jdbcdriver-2.0.20-dist.jar</systemPath>-->
|
||||
<!-- <systemPath>${project.basedir}/src/main/resources/taos-jdbcdriver-2.0.28-dist.jar</systemPath>-->
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
|
|
|
@ -7,6 +7,7 @@ import org.springframework.boot.autoconfigure.SpringBootApplication;
|
|||
@MapperScan(basePackages = {"com.taosdata.example.springbootdemo.dao"})
|
||||
@SpringBootApplication
|
||||
public class SpringbootdemoApplication {
|
||||
|
||||
public static void main(String[] args) {
|
||||
SpringApplication.run(SpringbootdemoApplication.class, args);
|
||||
}
|
||||
|
|
|
@ -45,7 +45,7 @@ public class WeatherController {
|
|||
* @return
|
||||
*/
|
||||
@PostMapping("/{temperature}/{humidity}")
|
||||
public int saveWeather(@PathVariable float temperature, @PathVariable int humidity) {
|
||||
public int saveWeather(@PathVariable float temperature, @PathVariable float humidity) {
|
||||
return weatherService.save(temperature, humidity);
|
||||
}
|
||||
|
||||
|
|
|
@ -8,8 +8,8 @@ public class Weather {
|
|||
|
||||
@JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss.SSS", timezone = "GMT+8")
|
||||
private Timestamp ts;
|
||||
private float temperature;
|
||||
private float humidity;
|
||||
private Float temperature;
|
||||
private Float humidity;
|
||||
private String location;
|
||||
private int groupId;
|
||||
|
||||
|
@ -30,19 +30,19 @@ public class Weather {
|
|||
this.ts = ts;
|
||||
}
|
||||
|
||||
public float getTemperature() {
|
||||
public Float getTemperature() {
|
||||
return temperature;
|
||||
}
|
||||
|
||||
public void setTemperature(float temperature) {
|
||||
public void setTemperature(Float temperature) {
|
||||
this.temperature = temperature;
|
||||
}
|
||||
|
||||
public float getHumidity() {
|
||||
public Float getHumidity() {
|
||||
return humidity;
|
||||
}
|
||||
|
||||
public void setHumidity(float humidity) {
|
||||
public void setHumidity(Float humidity) {
|
||||
this.humidity = humidity;
|
||||
}
|
||||
|
||||
|
|
|
@ -39,7 +39,7 @@ public class WeatherService {
|
|||
return weatherMapper.select(limit, offset);
|
||||
}
|
||||
|
||||
public int save(float temperature, int humidity) {
|
||||
public int save(float temperature, float humidity) {
|
||||
Weather weather = new Weather();
|
||||
weather.setTemperature(temperature);
|
||||
weather.setHumidity(humidity);
|
||||
|
|
|
@ -54,8 +54,8 @@ class TDTestCase:
|
|||
tdSql.execute(" ".join(sqlcmd))
|
||||
|
||||
tdLog.info("================= step3")
|
||||
tdSql.query('select * from tb1')
|
||||
tdSql.checkRows(205)
|
||||
tdSql.query('select count(*) from tb1')
|
||||
tdSql.checkData(0, 0, 205)
|
||||
|
||||
tdLog.info("================= step4")
|
||||
tdDnodes.stop(1)
|
||||
|
@ -71,8 +71,8 @@ class TDTestCase:
|
|||
tdSql.execute(" ".join(sqlcmd))
|
||||
|
||||
tdLog.info("================= step6")
|
||||
tdSql.query('select * from tb1')
|
||||
tdSql.checkRows(250)
|
||||
tdSql.query('select count(*) from tb1')
|
||||
tdSql.checkData(0, 0, 250)
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
|
|
|
@ -50,14 +50,16 @@ class TDTestCase:
|
|||
sql += "'%s')" % self.get_random_string(22)
|
||||
tdSql.execute(sql % (self.ts + i))
|
||||
|
||||
tdSql.query("select * from stb")
|
||||
tdSql.checkRows(4096)
|
||||
time.sleep(10)
|
||||
tdSql.query("select count(*) from stb")
|
||||
tdSql.checkData(0, 0, 4096)
|
||||
|
||||
tdDnodes.stop(1)
|
||||
tdDnodes.start(1)
|
||||
|
||||
tdSql.query("select * from stb")
|
||||
tdSql.checkRows(4096)
|
||||
time.sleep(1)
|
||||
tdSql.query("select count(*) from stb")
|
||||
tdSql.checkData(0, 0, 4096)
|
||||
|
||||
endTime = time.time()
|
||||
|
||||
|
|
Loading…
Reference in New Issue