diff --git a/Jenkinsfile b/Jenkinsfile index bac0cce33b..dfe9ed4389 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -42,17 +42,49 @@ def pre_test(){ killall -9 taosd ||echo "no taosd running" killall -9 gdb || echo "no gdb running" cd ${WKC} - git reset --hard HEAD~10 >/dev/null - git checkout develop + git reset --hard HEAD~10 >/dev/null + ''' + script { + if (env.CHANGE_TARGET == 'master') { + sh ''' + cd ${WKC} + git checkout master + ''' + } + else { + sh ''' + cd ${WKC} + git checkout develop + ''' + } + } + sh''' + cd ${WKC} git pull >/dev/null git fetch origin +refs/pull/${CHANGE_ID}/merge git checkout -qf FETCH_HEAD git clean -dfx cd ${WK} git reset --hard HEAD~10 - git checkout develop - git pull >/dev/null + ''' + script { + if (env.CHANGE_TARGET == 'master') { + sh ''' + cd ${WK} + git checkout master + ''' + } + else { + sh ''' + cd ${WK} + git checkout develop + ''' + } + } + sh ''' cd ${WK} + git pull >/dev/null + export TZ=Asia/Harbin date git clean -dfx @@ -92,7 +124,8 @@ pipeline { git pull git fetch origin +refs/pull/${CHANGE_ID}/merge git checkout -qf FETCH_HEAD - ''' + ''' + script{ env.skipstage=sh(script:"cd ${WORKSPACE}.tes && git --no-pager diff --name-only FETCH_HEAD develop|grep -v -E '.*md|//src//connector|Jenkinsfile|test-all.sh' || echo 0 ",returnStdout:true) } @@ -185,14 +218,12 @@ pipeline { rm -rf /var/log/taos/* ./handle_crash_gen_val_log.sh ''' - catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { - sh ''' - cd ${WKC}/tests/pytest - rm -rf /var/lib/taos/* - rm -rf /var/log/taos/* - ./handle_taosd_val_log.sh - ''' - } + sh ''' + cd ${WKC}/tests/pytest + rm -rf /var/lib/taos/* + rm -rf /var/log/taos/* + ./handle_taosd_val_log.sh + ''' timeout(time: 45, unit: 'MINUTES'){ sh ''' date diff --git a/cmake/install.inc b/cmake/install.inc index 5823ef743e..50dc4162f9 100755 --- a/cmake/install.inc +++ b/cmake/install.inc @@ -32,7 +32,7 @@ ELSEIF (TD_WINDOWS) #INSTALL(TARGETS taos RUNTIME DESTINATION driver) #INSTALL(TARGETS shell RUNTIME DESTINATION .) IF (TD_MVN_INSTALLED) - INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.25-dist.jar DESTINATION connector/jdbc) + INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.27-dist.jar DESTINATION connector/jdbc) ENDIF () ELSEIF (TD_DARWIN) SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh") diff --git a/cmake/version.inc b/cmake/version.inc index b1e09c9532..8035b31cc7 100755 --- a/cmake/version.inc +++ b/cmake/version.inc @@ -4,7 +4,7 @@ PROJECT(TDengine) IF (DEFINED VERNUMBER) SET(TD_VER_NUMBER ${VERNUMBER}) ELSE () - SET(TD_VER_NUMBER "2.0.18.0") + SET(TD_VER_NUMBER "2.0.20.0") ENDIF () IF (DEFINED VERCOMPATIBLE) diff --git a/documentation20/cn/02.getting-started/01.docker/docs.md b/documentation20/cn/02.getting-started/01.docker/docs.md new file mode 100644 index 0000000000..30803d9777 --- /dev/null +++ b/documentation20/cn/02.getting-started/01.docker/docs.md @@ -0,0 +1,211 @@ +# 通过 Docker 快速体验 TDengine + +虽然并不推荐在生产环境中通过 Docker 来部署 TDengine 服务,但 Docker 工具能够很好地屏蔽底层操作系统的环境差异,很适合在开发测试或初次体验时用于安装运行 TDengine 的工具集。特别是,借助 Docker,能够比较方便地在 Mac OSX 和 Windows 系统上尝试 TDengine,而无需安装虚拟机或额外租用 Linux 服务器。 + +下文通过 Step by Step 风格的介绍,讲解如何通过 Docker 快速建立 TDengine 的单节点运行环境,以支持开发和测试。 + +## 下载 Docker + +Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.com/get-docker/)。 + +安装完毕后可以在命令行终端查看 Docker 版本。如果版本号正常输出,则说明 Docker 环境已经安装成功。 + +```bash +$ docker -v +Docker version 20.10.5, build 55c4c88 +``` + +## 在 Docker 容器中运行 TDengine + +1,使用命令拉取 TDengine 镜像,并使它在后台运行。 + +```bash +$ docker run -d tdengine/tdengine +cdf548465318c6fc2ad97813f89cc60006393392401cae58a27b15ca9171f316 +``` + +- **docker run**:通过 Docker 运行一个容器。 +- **-d**:让容器在后台运行。 +- **tdengine/tdengine**:拉取的 TDengine 官方发布的应用镜像。 +- **cdf548465318c6fc2ad97813f89cc60006393392401cae58a27b15ca9171f316**:这个返回的长字符是容器 ID,我们可以通过容器 ID 来查看对应的容器。 + +2,确认容器是否已经正确运行。 + +```bash +$ docker ps +CONTAINER ID IMAGE COMMAND CREATED STATUS ··· +cdf548465318 tdengine/tdengine "taosd" 14 minutes ago Up 14 minutes ··· +``` + +- **docker ps**:列出所有正在运行状态的容器信息。 +- **CONTAINER ID**:容器 ID。 +- **IMAGE**:使用的镜像。 +- **COMMAND**:启动容器时运行的命令。 +- **CREATED**:容器创建时间。 +- **STATUS**:容器状态。UP 表示运行中。 + +3,进入 Docker 容器内,使用 TDengine。 + +```bash +$ docker exec -it cdf548465318 /bin/bash +root@cdf548465318:~/TDengine-server-2.0.13.0# +``` + +- **docker exec**:通过 docker exec 命令进入容器,如果退出,容器不会停止。 +- **-i**:进入交互模式。 +- **-t**:指定一个终端。 +- **cdf548465318**:容器 ID,需要根据 docker ps 指令返回的值进行修改。 +- **/bin/bash**:载入容器后运行 bash 来进行交互。 + +4,进入容器后,执行 taos shell 客户端程序。 + +```bash +$ root@cdf548465318:~/TDengine-server-2.0.13.0# taos + +Welcome to the TDengine shell from Linux, Client Version:2.0.13.0 +Copyright (c) 2020 by TAOS Data, Inc. All rights reserved. + +taos> +``` + +TDengine 终端成功连接服务端,打印出了欢迎消息和版本信息。如果失败,会有错误信息打印出来。 + +在 TDengine 终端中,可以通过 SQL 命令来创建/删除数据库、表、超级表等,并可以进行插入和查询操作。具体可以参考 [TAOS SQL 说明文档](https://www.taosdata.com/cn/documentation/taos-sql)。 + +## 通过 taosdemo 进一步了解 TDengine + +1,接上面的步骤,先退出 TDengine 终端程序。 + +```bash +$ taos> q +root@cdf548465318:~/TDengine-server-2.0.13.0# +``` + +2,在命令行界面执行 taosdemo。 + +```bash +$ root@cdf548465318:~/TDengine-server-2.0.13.0# taosdemo +################################################################### +# Server IP: localhost:0 +# User: root +# Password: taosdata +# Use metric: true +# Datatype of Columns: int int int int int int int float +# Binary Length(If applicable): -1 +# Number of Columns per record: 3 +# Number of Threads: 10 +# Number of Tables: 10000 +# Number of Data per Table: 100000 +# Records/Request: 1000 +# Database name: test +# Table prefix: t +# Delete method: 0 +# Test time: 2021-04-13 02:05:20 +################################################################### +``` + +回车后,该命令将新建一个数据库 test,并且自动创建一张超级表 meters,并以超级表 meters 为模版创建了 1 万张表,表名从 "t0" 到 "t9999"。每张表有 10 万条记录,每条记录有 f1,f2,f3 三个字段,时间戳 ts 字段从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:41:39 999"。每张表带有 areaid 和 loc 两个标签 TAG,areaid 被设置为 1 到 10,loc 被设置为 "beijing" 或 "shanghai"。 + +3,进入 TDengine 终端,查看 taosdemo 生成的数据。 + +- **进入命令行。** + +```bash +$ root@cdf548465318:~/TDengine-server-2.0.13.0# taos + +Welcome to the TDengine shell from Linux, Client Version:2.0.13.0 +Copyright (c) 2020 by TAOS Data, Inc. All rights reserved. + +taos> +``` + +- **查看数据库。** + +```bash +$ taos> show databases; + name | created_time | ntables | vgroups | ··· + test | 2021-04-13 02:14:15.950 | 10000 | 6 | ··· + log | 2021-04-12 09:36:37.549 | 4 | 1 | ··· + +``` + +- **查看超级表。** + +```bash +$ taos> use test; +Database changed. + +$ taos> show stables; + name | created_time | columns | tags | tables | +===================================================================================== + meters | 2021-04-13 02:14:15.955 | 4 | 2 | 10000 | +Query OK, 1 row(s) in set (0.001737s) + +``` + +- **查看表,限制输出十条。** + +```bash +$ taos> select * from test.t0 limit 10; + ts | f1 | f2 | f3 | +==================================================================== + 2017-07-14 02:40:01.000 | 3 | 9 | 0 | + 2017-07-14 02:40:02.000 | 0 | 1 | 2 | + 2017-07-14 02:40:03.000 | 7 | 2 | 3 | + 2017-07-14 02:40:04.000 | 9 | 4 | 5 | + 2017-07-14 02:40:05.000 | 1 | 2 | 5 | + 2017-07-14 02:40:06.000 | 6 | 3 | 2 | + 2017-07-14 02:40:07.000 | 4 | 7 | 8 | + 2017-07-14 02:40:08.000 | 4 | 6 | 6 | + 2017-07-14 02:40:09.000 | 5 | 7 | 7 | + 2017-07-14 02:40:10.000 | 1 | 5 | 0 | +Query OK, 10 row(s) in set (0.003638s) + +``` + +- **查看 t0 表的标签值。** + +```bash +$ taos> select areaid, loc from test.t0; + areaid | loc | +=========================== + 10 | shanghai | +Query OK, 1 row(s) in set (0.002904s) + +``` + +## 停止正在 Docker 中运行的 TDengine 服务 + +```bash +$ docker stop cdf548465318 +cdf548465318 +``` + +- **docker stop**:通过 docker stop 停止指定的正在运行中的 docker 镜像。 +- **cdf548465318**:容器 ID,根据 docker ps 指令返回的结果进行修改。 + +## 编程开发时连接在 Docker 中的 TDengine + +从 Docker 之外连接使用在 Docker 容器内运行的 TDengine 服务,有以下两个思路: + +1,通过端口映射(-p),将容器内部开放的网络端口映射到宿主机的指定端口上。通过挂载本地目录(-v),可以实现宿主机与容器内部的数据同步,防止容器删除后,数据丢失。 + +```bash +$ docker run -d -v /etc/taos:/etc/taos -p 6041:6041 tdengine/tdengine +526aa188da767ae94b244226a2b2eec2b5f17dd8eff592893d9ec0cd0f3a1ccd + +$ curl -u root:taosdata -d 'show databases' 127.0.0.1:6041/rest/sql +{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep1,keep2,keep(D)","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","precision","status"],"data":[],"rows":0} +``` + +- 第一条命令,启动一个运行了 TDengine 的 docker 容器,并且将容器的 6041 端口映射到宿主机的 6041 端口上。 +- 第二条命令,通过 RESTful 接口访问 TDengine,这时连接的是本机的 6041 端口,可见连接成功。 + +注意:在这个示例中,出于方便性考虑,只映射了 RESTful 需要的 6041 端口。如果希望以非 RESTful 方式连接 TDengine 服务,则需要映射从 6030 开始的共 11 个端口(完整的端口情况请参见 [TDengine 2.0 端口说明](https://www.taosdata.com/cn/documentation/faq#port))。在例子中,挂载本地目录也只是处理了配置文件所在的 /etc/taos 目录,而没有挂载数据存储目录。 + +2,直接通过 exec 命令,进入到 docker 容器中去做开发。也即,把程序代码放在 TDengine 服务端所在的同一个 Docker 容器中,连接容器本地的 TDengine 服务。 + +```bash +$ docker exec -it 526aa188da /bin/bash +``` + diff --git a/documentation20/cn/02.getting-started/docs.md b/documentation20/cn/02.getting-started/docs.md index db2885d302..a98159d8c4 100644 --- a/documentation20/cn/02.getting-started/docs.md +++ b/documentation20/cn/02.getting-started/docs.md @@ -10,7 +10,9 @@ TDengine软件分为服务器、客户端和报警模块三部分,目前2.0版 ### 通过Docker容器运行 -请参考[TDengine官方Docker镜像的发布、下载和使用](https://www.taosdata.com/blog/2020/05/13/1509.html) +暂时不建议生产环境采用 Docker 来部署 TDengine 的客户端或服务端,但在开发环境下或初次尝试时,使用 Docker 方式部署是十分方便的。特别是,利用 Docker,可以方便地在 Mac OSX 和 Windows 环境下尝试 TDengine。 + +详细操作方法请参照 [通过Docker快速体验TDengine](https://www.taosdata.com/cn/documentation/getting-started/docker)。 ### 通过安装包安装 @@ -212,7 +214,7 @@ taos> select avg(f1), max(f2), min(f3) from test.t10 interval(10s); | **Python** | ● | ● | ● | ○ | ● | ● | ● | -- | ● | | **Go** | ● | ● | ● | ○ | ● | ● | ○ | -- | -- | | **NodeJs** | ● | ● | ○ | ○ | ● | ● | ○ | -- | -- | -| **C#** | ○ | ● | ● | ○ | ○ | ○ | ○ | -- | -- | +| **C#** | ● | ● | ○ | ○ | ○ | ○ | ○ | -- | -- | | **RESTful** | ● | ● | ● | ● | ● | ● | ● | ● | ● | 注: ● 表示经过官方测试验证, ○ 表示非官方测试验证。 diff --git a/documentation20/cn/03.architecture/docs.md b/documentation20/cn/03.architecture/docs.md index 87553fa8ad..3d6e5e4f21 100644 --- a/documentation20/cn/03.architecture/docs.md +++ b/documentation20/cn/03.architecture/docs.md @@ -178,7 +178,7 @@ TDengine 分布式架构的逻辑结构图如下: **FQDN配置**:一个数据节点有一个或多个FQDN,可以在系统配置文件taos.cfg通过参数“fqdn"进行指定,如果没有指定,系统将自动获取计算机的hostname作为其FQDN。如果节点没有配置FQDN,可以直接将该节点的配置参数fqdn设置为它的IP地址。但不建议使用IP,因为IP地址可变,一旦变化,将让集群无法正常工作。一个数据节点的EP(End Point)由FQDN + Port组成。采用FQDN,需要保证DNS服务正常工作,或者在节点以及应用所在的节点配置好hosts文件。 -**端口配置:**一个数据节点对外的端口由TDengine的系统配置参数serverPort决定,对集群内部通讯的端口是serverPort+5。集群内数据节点之间的数据复制操作还占有一个TCP端口,是serverPort+10. 为支持多线程高效的处理UDP数据,每个对内和对外的UDP连接,都需要占用5个连续的端口。因此一个数据节点总的端口范围为serverPort到serverPort + 10,总共11个TCP/UDP端口。(另外还可能有 RESTful、Arbitrator 所使用的端口,那样的话就一共是 13 个。)使用时,需要确保防火墙将这些端口打开,以备使用。每个数据节点可以配置不同的serverPort。 +**端口配置:**一个数据节点对外的端口由TDengine的系统配置参数serverPort决定,对集群内部通讯的端口是serverPort+5。集群内数据节点之间的数据复制操作还占有一个TCP端口,是serverPort+10. 为支持多线程高效的处理UDP数据,每个对内和对外的UDP连接,都需要占用5个连续的端口。因此一个数据节点总的端口范围为serverPort到serverPort + 10,总共11个TCP/UDP端口。(另外还可能有 RESTful、Arbitrator 所使用的端口,那样的话就一共是 13 个。)使用时,需要确保防火墙将这些端口打开,以备使用。每个数据节点可以配置不同的serverPort。(详细的端口情况请参见 [TDengine 2.0 端口说明](https://www.taosdata.com/cn/documentation/faq#port)) **集群对外连接:** TDengine集群可以容纳单个、多个甚至几千个数据节点。应用只需要向集群中任何一个数据节点发起连接即可,连接需要提供的网络参数是一数据节点的End Point(FQDN加配置的端口号)。通过命令行CLI启动应用taos时,可以通过选项-h来指定数据节点的FQDN, -P来指定其配置的端口号,如果端口不配置,将采用TDengine的系统配置参数serverPort。 diff --git a/documentation20/cn/08.connector/docs.md b/documentation20/cn/08.connector/docs.md index fb50e20e51..3dbf72a23e 100644 --- a/documentation20/cn/08.connector/docs.md +++ b/documentation20/cn/08.connector/docs.md @@ -14,7 +14,7 @@ TDengine提供了丰富的应用程序开发接口,其中包括C/C++、Java、 | **Python** | ● | ● | ● | ○ | ● | ● | ○ | -- | ○ | | **Go** | ● | ● | ● | ○ | ● | ● | ○ | -- | -- | | **NodeJs** | ● | ● | ○ | ○ | ● | ● | ○ | -- | -- | -| **C#** | ○ | ● | ● | ○ | ○ | ○ | ○ | -- | -- | +| **C#** | ● | ● | ○ | ○ | ○ | ○ | ○ | -- | -- | | **RESTful** | ● | ● | ● | ● | ● | ● | ○ | ○ | ○ | 其中 ● 表示经过官方测试验证, ○ 表示非官方测试验证。 @@ -23,7 +23,7 @@ TDengine提供了丰富的应用程序开发接口,其中包括C/C++、Java、 * 在没有安装TDengine服务端软件的系统中使用连接器(除RESTful外)访问 TDengine 数据库,需要安装相应版本的客户端安装包来使应用驱动(Linux系统中文件名为libtaos.so,Windows系统中为taos.dll)被安装在系统中,否则会产生无法找到相应库文件的错误。 * 所有执行 SQL 语句的 API,例如 C/C++ Connector 中的 `tao_query`、`taos_query_a`、`taos_subscribe` 等,以及其它语言中与它们对应的API,每次都只能执行一条 SQL 语句,如果实际参数中包含了多条语句,它们的行为是未定义的。 -* 升级到TDengine到2.0.8.0版本的用户,必须更新JDBC连接TDengine必须升级taos-jdbcdriver到2.0.12及以上。 +* 升级到TDengine到2.0.8.0版本的用户,必须更新JDBC连接TDengine必须升级taos-jdbcdriver到2.0.12及以上。详细的版本依赖关系请参见 [taos-jdbcdriver 文档](https://www.taosdata.com/cn/documentation/connector/java#version)。 * 无论选用何种编程语言的连接器,2.0 及以上版本的 TDengine 推荐数据库应用的每个线程都建立一个独立的连接,或基于线程建立连接池,以避免连接内的“USE statement”状态量在线程之间相互干扰(但连接的查询和写入操作都是线程安全的)。 ## 安装连接器驱动步骤 @@ -743,7 +743,7 @@ HTTP请求URL采用`sqlutc`时,返回结果集的时间戳将采用UTC时间 下面仅列出一些与RESTful接口有关的配置参数,其他系统参数请看配置文件里的说明。注意:配置修改后,需要重启taosd服务才能生效 -- httpPort: 对外提供RESTful服务的端口号,默认绑定到6041 +- 对外提供RESTful服务的端口号,默认绑定到 6041(实际取值是 serverPort + 11,因此可以通过修改 serverPort 参数的设置来修改) - httpMaxThreads: 启动的线程数量,默认为2(2.0.17版本开始,默认值改为CPU核数的一半向下取整) - restfulRowLimit: 返回结果集(JSON格式)的最大条数,默认值为10240 - httpEnableCompress: 是否支持压缩,默认不支持,目前TDengine仅支持gzip压缩格式 diff --git a/documentation20/cn/10.cluster/docs.md b/documentation20/cn/10.cluster/docs.md index a430ce8277..62d709c279 100644 --- a/documentation20/cn/10.cluster/docs.md +++ b/documentation20/cn/10.cluster/docs.md @@ -55,12 +55,11 @@ arbitrator ha.taosdata.com:6042 | 4 | statusInterval | dnode向mnode报告状态时长 | | 5 | arbitrator | 系统中裁决器的end point | | 6 | timezone | 时区 | -| 7 | locale | 系统区位信息及编码格式 | -| 8 | charset | 字符集编码 | -| 9 | balance | 是否启动负载均衡 | -| 10 | maxTablesPerVnode | 每个vnode中能够创建的最大表个数 | -| 11 | maxVgroupsPerDb | 每个DB中能够使用的最大vgroup个数 | +| 7 | balance | 是否启动负载均衡 | +| 8 | maxTablesPerVnode | 每个vnode中能够创建的最大表个数 | +| 9 | maxVgroupsPerDb | 每个DB中能够使用的最大vgroup个数 | +备注:在 2.0.19.0 及更早的版本中,除以上 9 项参数外,dnode 加入集群时,还会要求 locale 和 charset 参数的取值也一致。 ## 启动第一个数据节点 diff --git a/documentation20/cn/11.administrator/docs.md b/documentation20/cn/11.administrator/docs.md index cc8689786d..f8dc794d7d 100644 --- a/documentation20/cn/11.administrator/docs.md +++ b/documentation20/cn/11.administrator/docs.md @@ -100,8 +100,7 @@ taosd -C - firstEp: taosd启动时,主动连接的集群中首个dnode的end point, 默认值为localhost:6030。 - fqdn:数据节点的FQDN,缺省为操作系统配置的第一个hostname。如果习惯IP地址访问,可设置为该节点的IP地址。 -- serverPort:taosd启动后,对外服务的端口号,默认值为6030。 -- httpPort: RESTful服务使用的端口号,所有的HTTP请求(TCP)都需要向该接口发起查询/写入请求, 默认值为6041。 +- serverPort:taosd启动后,对外服务的端口号,默认值为6030。(RESTful服务使用的端口号是在此基础上+11,即默认值为6041。) - dataDir: 数据文件目录,所有的数据文件都将写入该目录。默认值:/var/lib/taos。 - logDir:日志文件目录,客户端和服务器的运行日志文件将写入该目录。默认值:/var/log/taos。 - arbitrator:系统中裁决器的end point, 缺省值为空。 @@ -115,7 +114,7 @@ taosd -C - queryBufferSize: 为所有并发查询占用保留的内存大小。计算规则可以根据实际应用可能的最大并发数和表的数字相乘,再乘 170 。单位为 MB(2.0.15 以前的版本中,此参数的单位是字节)。 - ratioOfQueryCores: 设置查询线程的最大数量。最小值0 表示只有1个查询线程;最大值2表示最大建立2倍CPU核数的查询线程。默认为1,表示最大和CPU核数相等的查询线程。该值可以为小数,即0.5表示最大建立CPU核数一半的查询线程。 -**注意:**对于端口,TDengine会使用从serverPort起13个连续的TCP和UDP端口号,请务必在防火墙打开。因此如果是缺省配置,需要打开从6030到6042共13个端口,而且必须TCP和UDP都打开。 +**注意:**对于端口,TDengine会使用从serverPort起13个连续的TCP和UDP端口号,请务必在防火墙打开。因此如果是缺省配置,需要打开从6030到6042共13个端口,而且必须TCP和UDP都打开。(详细的端口情况请参见 [TDengine 2.0 端口说明](https://www.taosdata.com/cn/documentation/faq#port)) 不同应用场景的数据往往具有不同的数据特征,比如保留天数、副本数、采集频次、记录大小、采集点的数量、压缩等都可完全不同。为获得在存储上的最高效率,TDengine提供如下存储相关的系统配置参数: @@ -150,7 +149,7 @@ TDengine集群中加入一个新的dnode时,涉及集群相关的一些参数 - maxTablesPerVnode: 每个vnode中能够创建的最大表个数。默认值:1000000。 - maxVgroupsPerDb: 每个数据库中能够使用的最大vgroup个数。 - arbitrator: 系统中裁决器的end point,缺省为空。 -- timezone、locale、charset 的配置见客户端配置。 +- timezone、locale、charset 的配置见客户端配置。(2.0.20.0 及以上的版本里,集群中加入新节点已不要求 locale 和 charset 参数取值一致) 为方便调试,可通过SQL语句临时调整每个dnode的日志配置,系统重启后会失效: @@ -463,41 +462,41 @@ TDengine的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下 | 关键字列表 | | | | | | ---------- | ----------- | ------------ | ---------- | --------- | -| ABLOCKS | CONNECTIONS | GT | MNODES | SLIDING | -| ABORT | COPY | ID | MODULES | SLIMIT | -| ACCOUNT | COUNT | IF | NCHAR | SMALLINT | -| ACCOUNTS | CREATE | IGNORE | NE | SPREAD | -| ADD | CTIME | IMMEDIATE | NONE | STABLE | -| AFTER | DATABASE | IMPORT | NOT | STABLES | -| ALL | DATABASES | IN | NOTNULL | STAR | -| ALTER | DAYS | INITIALLY | NOW | STATEMENT | -| AND | DEFERRED | INSERT | OF | STDDEV | -| AS | DELIMITERS | INSTEAD | OFFSET | STREAM | -| ASC | DESC | INTEGER | OR | STREAMS | -| ATTACH | DESCRIBE | INTERVAL | ORDER | STRING | -| AVG | DETACH | INTO | PASS | SUM | -| BEFORE | DIFF | IP | PERCENTILE | TABLE | -| BEGIN | DISTINCT | IS | PLUS | TABLES | -| BETWEEN | DIVIDE | ISNULL | PRAGMA | TAG | -| BIGINT | DNODE | JOIN | PREV | TAGS | -| BINARY | DNODES | KEEP | PRIVILEGE | TBLOCKS | -| BITAND | DOT | KEY | QUERIES | TBNAME | -| BITNOT | DOUBLE | KILL | QUERY | TIMES | -| BITOR | DROP | LAST | RAISE | TIMESTAMP | -| BOOL | EACH | LE | REM | TINYINT | -| BOTTOM | END | LEASTSQUARES | REPLACE | TOP | -| BY | EQ | LIKE | REPLICA | TRIGGER | -| CACHE | EXISTS | LIMIT | RESET | UMINUS | -| CASCADE | EXPLAIN | LINEAR | RESTRICT | UPLUS | -| CHANGE | FAIL | LOCAL | ROW | USE | -| CLOG | FILL | LP | ROWS | USER | -| CLUSTER | FIRST | LSHIFT | RP | USERS | -| COLON | FLOAT | LT | RSHIFT | USING | -| COLUMN | FOR | MATCH | SCORES | VALUES | -| COMMA | FROM | MAX | SELECT | VARIABLE | -| COMP | GE | METRIC | SEMI | VGROUPS | -| CONCAT | GLOB | METRICS | SET | VIEW | -| CONFIGS | GRANTS | MIN | SHOW | WAVG | -| CONFLICT | GROUP | MINUS | SLASH | WHERE | -| CONNECTION | | | | | +| ABLOCKS | CONNECTIONS | HAVING | MODULES | SLIMIT | +| ABORT | COPY | ID | NCHAR | SMALLINT | +| ACCOUNT | COUNT | IF | NE | SPREAD | +| ACCOUNTS | CREATE | IGNORE | NONE | STABLE | +| ADD | CTIME | IMMEDIATE | NOT | STABLES | +| AFTER | DATABASE | IMPORT | NOTNULL | STAR | +| ALL | DATABASES | IN | NOW | STATEMENT | +| ALTER | DAYS | INITIALLY | OF | STDDEV | +| AND | DEFERRED | INSERT | OFFSET | STREAM | +| AS | DELIMITERS | INSTEAD | OR | STREAMS | +| ASC | DESC | INTEGER | ORDER | STRING | +| ATTACH | DESCRIBE | INTERVAL | PASS | SUM | +| AVG | DETACH | INTO | PERCENTILE | TABLE | +| BEFORE | DIFF | IP | PLUS | TABLES | +| BEGIN | DISTINCT | IS | PRAGMA | TAG | +| BETWEEN | DIVIDE | ISNULL | PREV | TAGS | +| BIGINT | DNODE | JOIN | PRIVILEGE | TBLOCKS | +| BINARY | DNODES | KEEP | QUERIES | TBNAME | +| BITAND | DOT | KEY | QUERY | TIMES | +| BITNOT | DOUBLE | KILL | RAISE | TIMESTAMP | +| BITOR | DROP | LAST | REM | TINYINT | +| BOOL | EACH | LE | REPLACE | TOP | +| BOTTOM | END | LEASTSQUARES | REPLICA | TOPIC | +| BY | EQ | LIKE | RESET | TRIGGER | +| CACHE | EXISTS | LIMIT | RESTRICT | UMINUS | +| CASCADE | EXPLAIN | LINEAR | ROW | UPLUS | +| CHANGE | FAIL | LOCAL | ROWS | USE | +| CLOG | FILL | LP | RP | USER | +| CLUSTER | FIRST | LSHIFT | RSHIFT | USERS | +| COLON | FLOAT | LT | SCORES | USING | +| COLUMN | FOR | MATCH | SELECT | VALUES | +| COMMA | FROM | MAX | SEMI | VARIABLE | +| COMP | GE | METRIC | SET | VGROUPS | +| CONCAT | GLOB | METRICS | SHOW | VIEW | +| CONFIGS | GRANTS | MIN | SLASH | WAVG | +| CONFLICT | GROUP | MINUS | SLIDING | WHERE | +| CONNECTION | GT | MNODES | | | diff --git a/documentation20/cn/12.taos-sql/docs.md b/documentation20/cn/12.taos-sql/docs.md index 58191e0bd8..2415b7cd01 100644 --- a/documentation20/cn/12.taos-sql/docs.md +++ b/documentation20/cn/12.taos-sql/docs.md @@ -407,10 +407,10 @@ SELECT select_expr [, select_expr ...] [INTERVAL (interval_val [, interval_offset])] [SLIDING sliding_val] [FILL fill_val] - [GROUP BY col_list] + [GROUP BY col_list [HAVING having_condition]] [ORDER BY col_list { DESC | ASC }] - [SLIMIT limit_val [, SOFFSET offset_val]] - [LIMIT limit_val [, OFFSET offset_val]] + [SLIMIT limit_val [SOFFSET offset_val]] + [LIMIT limit_val [OFFSET offset_val]] [>> export_file]; ``` @@ -626,7 +626,8 @@ Query OK, 1 row(s) in set (0.001091s) - WHERE 语句可以使用各种逻辑判断来过滤数字值,或使用通配符来过滤字符串 - 输出结果缺省按首列时间戳升序排序,但可以指定按降序排序( _c0 指首列时间戳)。使用 ORDER BY 对其他字段进行排序为非法操作。 - 参数 LIMIT 控制输出条数,OFFSET 指定从第几条开始输出。LIMIT/OFFSET 对结果集的执行顺序在 ORDER BY 之后。 -- 参数 SLIMIT 控制由 GROUP BY 指令划分的每个分组中的输出条数。 + * 在有 GROUP BY 子句的情况下,LIMIT 参数控制的是每个分组中至多允许输出的条数。 +- 参数 SLIMIT 控制由 GROUP BY 指令划分的分组中,至多允许输出几个分组的数据。 - 通过”>>"输出结果可以导出到指定文件 ### 支持的条件过滤操作 @@ -647,6 +648,15 @@ Query OK, 1 row(s) in set (0.001091s) 2. 针对单一字段的过滤,如果是时间过滤条件,则一条语句中只支持设定一个;但针对其他的(普通)列或标签列,则可以使用 `OR` 关键字进行组合条件的查询过滤。例如:((value > 20 AND value < 30) OR (value < 12)) 。 3. 从 2.0.17 版本开始,条件过滤开始支持 BETWEEN AND 语法,例如 `WHERE col2 BETWEEN 1.5 AND 3.25` 表示查询条件为“1.5 ≤ col2 ≤ 3.25”。 +### GROUP BY 之后的 HAVING 过滤 + +从 2.0.20 版本开始,GROUP BY 之后允许再跟一个 HAVING 子句,对成组后的各组数据再做筛选。HAVING 子句可以使用聚合函数和选择函数作为过滤条件(但暂时不支持 LEASTSQUARES、TOP、BOTTOM、LAST_ROW)。 + +例如,如下语句只会输出 `AVG(f1) > 0` 的分组: +```mysql +SELECT AVG(f1), SPREAD(f1, f2, st2.f1) FROM st2 WHERE f1 > 0 GROUP BY f1 HAVING AVG(f1) > 0; +``` + ### SQL 示例 - 对于下面的例子,表tb1用以下语句创建 @@ -950,7 +960,7 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 ```mysql SELECT TOP(field_name, K) FROM { tb_name | stb_name } [WHERE clause]; ``` - 功能说明: 统计表/超级表中某列的值最大*k*个非NULL值。若多于k个列值并列最大,则返回时间戳小的。 + 功能说明: 统计表/超级表中某列的值最大 *k* 个非 NULL 值。如果多条数据取值一样,全部取用又会超出 k 条限制时,系统会从相同值中随机选取符合要求的数量返回。 返回结果数据类型:同应用的字段。 @@ -984,7 +994,7 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 ```mysql SELECT BOTTOM(field_name, K) FROM { tb_name | stb_name } [WHERE clause]; ``` - 功能说明:统计表/超级表中某列的值最小*k*个非NULL值。若多于k个列值并列最小,则返回时间戳小的。 + 功能说明:统计表/超级表中某列的值最小 *k* 个非 NULL 值。如果多条数据取值一样,全部取用又会超出 k 条限制时,系统会从相同值中随机选取符合要求的数量返回。 返回结果数据类型:同应用的字段。 @@ -1216,6 +1226,7 @@ SELECT AVG(current), MAX(current), LEASTSQUARES(current, start_val, step_val), P - 列名最大长度为 64,最多允许 1024 列,最少需要 2 列,第一列必须是时间戳 - 标签最多允许 128 个,可以 1 个,标签总长度不超过 16k 个字符 - SQL 语句最大长度 65480 个字符,但可通过系统配置参数 maxSQLLength 修改,最长可配置为 1M +- SELECT 语句的查询结果,最多允许返回 1024 列(语句中的函数调用可能也会占用一些列空间),超限时需要显式指定较少的返回数据列,以避免语句执行报错。 - 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制 ## TAOS SQL其他约定 diff --git a/documentation20/cn/13.faq/docs.md b/documentation20/cn/13.faq/docs.md index c01247d345..5a0f890cae 100644 --- a/documentation20/cn/13.faq/docs.md +++ b/documentation20/cn/13.faq/docs.md @@ -166,3 +166,18 @@ TDengine 中时间戳的时区总是由客户端进行处理,而与服务端 2. 如果在 taos.cfg 中设置了 timezone 参数,则客户端会以这个配置文件中的设置为准。 3. 如果在 C/C++/Java/Python 等各种编程语言的 Connector Driver 中,在建立数据库连接时显式指定了 timezone,那么会以这个指定的时区设置为准。例如 Java Connector 的 JDBC URL 中就有 timezone 参数。 4. 在书写 SQL 语句时,也可以直接使用 Unix 时间戳(例如 `1554984068000`)或带有时区的时间戳字符串,也即以 RFC 3339 格式(例如 `2013-04-12T15:52:01.123+08:00`)或 ISO-8601 格式(例如 `2013-04-12T15:52:01.123+0800`)来书写时间戳,此时这些时间戳的取值将不再受其他时区设置的影响。 + +## 19. TDengine 都会用到哪些网络端口? + +在 TDengine 2.0 版本中,会用到以下这些网络端口(以默认端口 6030 为前提进行说明,如果修改了配置文件中的设置,那么这里列举的端口都会出现变化),管理员可以参考这里的信息调整防火墙设置: + +| 协议 | 默认端口 | 用途说明 | 修改方法 | +| --- | --------- | ------------------------------- | ------------------------------ | +| TCP | 6030 | 客户端与服务端之间通讯。 | 由配置文件设置 serverPort 决定。 | +| TCP | 6035 | 多节点集群的节点间通讯。 | 随 serverPort 端口变化。 | +| TCP | 6040 | 多节点集群的节点间数据同步。 | 随 serverPort 端口变化。 | +| TCP | 6041 | 客户端与服务端之间的 RESTful 通讯。 | 随 serverPort 端口变化。 | +| TCP | 6042 | Arbitrator 的服务端口。 | 因 Arbitrator 启动参数设置变化。 | +| TCP | 6060 | 企业版内 Monitor 服务的网络端口。 | | +| UDP | 6030-6034 | 客户端与服务端之间通讯。 | 随 serverPort 端口变化。 | +| UDP | 6035-6039 | 多节点集群的节点间通讯。 | 随 serverPort 端口变化。 | diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml index 88628b4db6..31343ed293 100644 --- a/snap/snapcraft.yaml +++ b/snap/snapcraft.yaml @@ -1,6 +1,6 @@ name: tdengine base: core18 -version: '2.0.18.0' +version: '2.0.20.0' icon: snap/gui/t-dengine.svg summary: an open-source big data platform designed and optimized for IoT. description: | @@ -72,7 +72,7 @@ parts: - usr/bin/taosd - usr/bin/taos - usr/bin/taosdemo - - usr/lib/libtaos.so.2.0.18.0 + - usr/lib/libtaos.so.2.0.20.0 - usr/lib/libtaos.so.1 - usr/lib/libtaos.so diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h index efbef284ae..5b2d598222 100644 --- a/src/client/inc/tsclient.h +++ b/src/client/inc/tsclient.h @@ -96,6 +96,25 @@ typedef struct STableMetaInfo { SArray *tagColList; // SArray, involved tag columns } STableMetaInfo; + +typedef struct SColumnIndex { + int16_t tableIndex; + int16_t columnIndex; +} SColumnIndex; + + +typedef struct SFieldInfo { + int16_t numOfOutput; // number of column in result + TAOS_FIELD* final; + SArray *internalField; // SArray +} SFieldInfo; + +typedef struct SColumn { + SColumnIndex colIndex; + int32_t numOfFilters; + SColumnFilterInfo *filterInfo; +} SColumn; + /* the structure for sql function in select clause */ typedef struct SSqlExpr { char aliasName[TSDB_COL_NAME_LEN]; // as aliasName @@ -109,32 +128,24 @@ typedef struct SSqlExpr { tVariant param[3]; // parameters are not more than 3 int32_t offset; // sub result column value of arithmetic expression. int16_t resColId; // result column id + SColumn *pFilter; // expr filter } SSqlExpr; -typedef struct SColumnIndex { - int16_t tableIndex; - int16_t columnIndex; -} SColumnIndex; +typedef struct SExprFilter { + tSqlExpr *pExpr; //used for having parse + SSqlExpr *pSqlExpr; + SArray *fp; + SColumn *pFilters; //having filter info +}SExprFilter; typedef struct SInternalField { TAOS_FIELD field; bool visible; SExprInfo *pArithExprInfo; SSqlExpr *pSqlExpr; + SExprFilter *pFieldFilters; } SInternalField; -typedef struct SFieldInfo { - int16_t numOfOutput; // number of column in result - TAOS_FIELD* final; - SArray *internalField; // SArray -} SFieldInfo; - -typedef struct SColumn { - SColumnIndex colIndex; - int32_t numOfFilters; - SColumnFilterInfo *filterInfo; -} SColumn; - typedef struct SCond { uint64_t uid; int32_t len; // length of tag query condition data @@ -243,6 +254,7 @@ typedef struct SQueryInfo { int32_t round; // 0/1/.... int32_t bufLen; char* buf; + int32_t havingFieldNum; } SQueryInfo; typedef struct { @@ -261,10 +273,10 @@ typedef struct { char * curSql; // current sql, resume position of sql after parsing paused int8_t parseFinished; - char reserve2[3]; // fix bus error on arm32 + char reserve2[3]; // fix bus error on arm32 int16_t numOfCols; - char reserve3[2]; // fix bus error on arm32 + char reserve3[2]; // fix bus error on arm32 uint32_t allocSize; char * payload; int32_t payloadLen; @@ -317,6 +329,7 @@ typedef struct { char ** buffer; // Buffer used to put multibytes encoded using unicode (wchar_t) SColumnIndex* pColumnIndex; + TAOS_FIELD* final; SArithmeticSupport *pArithSup; // support the arithmetic expression calculation on agg functions struct SLocalMerger *pLocalMerger; } SSqlRes; diff --git a/src/client/src/tscLocalMerge.c b/src/client/src/tscLocalMerge.c index 1971cdb363..1e1c28056f 100644 --- a/src/client/src/tscLocalMerge.c +++ b/src/client/src/tscLocalMerge.c @@ -22,6 +22,7 @@ #include "tscUtil.h" #include "tschemautil.h" #include "tsclient.h" +#include "qUtil.h" typedef struct SCompareParam { SLocalDataSource **pLocalData; @@ -1287,6 +1288,76 @@ static bool saveGroupResultInfo(SSqlObj *pSql) { return false; } + +bool doFilterFieldData(char *input, SExprFilter* pFieldFilters, int16_t type, bool* notSkipped) { + bool qualified = false; + + for(int32_t k = 0; k < pFieldFilters->pFilters->numOfFilters; ++k) { + __filter_func_t fp = taosArrayGetP(pFieldFilters->fp, k); + SColumnFilterElem filterElem = {.filterInfo = pFieldFilters->pFilters->filterInfo[k]}; + + bool isnull = isNull(input, type); + if (isnull) { + if (fp == isNullOperator) { + qualified = true; + break; + } else { + continue; + } + } else { + if (fp == notNullOperator) { + qualified = true; + break; + } else if (fp == isNullOperator) { + continue; + } + } + + if (fp(&filterElem, input, input, type)) { + qualified = true; + break; + } + } + + *notSkipped = qualified; + + return TSDB_CODE_SUCCESS; +} + + +int32_t doHavingFilter(SQueryInfo* pQueryInfo, tFilePage* pOutput, bool* notSkipped) { + *notSkipped = true; + + if (pQueryInfo->havingFieldNum <= 0) { + return TSDB_CODE_SUCCESS; + } + + //int32_t exprNum = (int32_t) tscSqlExprNumOfExprs(pQueryInfo); + + size_t numOfOutput = tscNumOfFields(pQueryInfo); + for(int32_t i = 0; i < numOfOutput; ++i) { + SInternalField* pInterField = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, i); + SExprFilter* pFieldFilters = pInterField->pFieldFilters; + + if (pFieldFilters == NULL) { + continue; + } + + int32_t type = pInterField->field.type; + + char* pInput = pOutput->data + pOutput->num* pFieldFilters->pSqlExpr->offset; + + doFilterFieldData(pInput, pFieldFilters, type, notSkipped); + if (*notSkipped == false) { + return TSDB_CODE_SUCCESS; + } + } + + return TSDB_CODE_SUCCESS; +} + + + /** * * @param pSql @@ -1327,6 +1398,22 @@ bool genFinalResults(SSqlObj *pSql, SLocalMerger *pLocalMerge, bool noMoreCurren doArithmeticCalculate(pQueryInfo, pResBuf, pModel->rowSize, pLocalMerge->finalModel->rowSize); } + bool notSkipped = true; + + doHavingFilter(pQueryInfo, pResBuf, ¬Skipped); + + if (!notSkipped) { + pRes->numOfRows = 0; + pLocalMerge->discard = !noMoreCurrentGroupRes; + + if (pLocalMerge->discard) { + SColumnModel *pInternModel = pLocalMerge->pDesc->pColumnModel; + tColModelAppend(pInternModel, pLocalMerge->discardData, pLocalMerge->pTempBuffer->data, 0, 1, 1); + } + + return notSkipped; + } + // no interval query, no fill operation if (pQueryInfo->interval.interval == 0 || pQueryInfo->fillType == TSDB_FILL_NONE) { genFinalResWithoutFill(pRes, pLocalMerge, pQueryInfo); diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c index 926ee44b70..920937928f 100644 --- a/src/client/src/tscParseInsert.c +++ b/src/client/src/tscParseInsert.c @@ -937,6 +937,10 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC return ret; } + if (sql == NULL) { + return TSDB_CODE_TSC_INVALID_SQL; + } + code = tscGetTableMetaEx(pSql, pTableMetaInfo, true); if (TSDB_CODE_TSC_ACTION_IN_PROGRESS == code) { return code; @@ -945,6 +949,10 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC } else { sql = sToken.z; + if (sql == NULL) { + return TSDB_CODE_TSC_INVALID_SQL; + } + code = tscGetTableMetaEx(pSql, pTableMetaInfo, false); if (pCmd->curSql == NULL) { assert(code == TSDB_CODE_TSC_ACTION_IN_PROGRESS); @@ -952,10 +960,6 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC } *sqlstr = sql; - - if (*sqlstr == NULL) { - code = TSDB_CODE_TSC_INVALID_SQL; - } return code; } diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index ee37975208..50bc9783fc 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -35,6 +35,7 @@ #include "tstrbuild.h" #include "ttokendef.h" #include "qScript.h" +#include "qUtil.h" #define DEFAULT_PRIMARY_TIMESTAMP_COL_NAME "_c0" @@ -1237,6 +1238,7 @@ static bool validateTableColumnInfo(SArray* pFieldList, SSqlCmd* pCmd) { return true; } + static bool validateTagParams(SArray* pTagsList, SArray* pFieldList, SSqlCmd* pCmd) { assert(pTagsList != NULL); @@ -1883,18 +1885,6 @@ int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, SArray* pSelectLis return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); } - /* - * transfer sql functions that need secondary merge into another format - * in dealing with super table queries such as: count/first/last - */ - if (isSTable) { - tscTansformFuncForSTableQuery(pQueryInfo); - - if (hasUnsupportFunctionsForSTableQuery(pCmd, pQueryInfo)) { - return TSDB_CODE_TSC_INVALID_SQL; - } - } - return TSDB_CODE_SUCCESS; } @@ -3343,6 +3333,7 @@ int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, SArray* pList, SSqlCmd* pCmd) return TSDB_CODE_SUCCESS; } + static SColumnFilterInfo* addColumnFilterInfo(SColumn* pColumn) { if (pColumn == NULL) { return NULL; @@ -3366,15 +3357,11 @@ static SColumnFilterInfo* addColumnFilterInfo(SColumn* pColumn) { } static int32_t doExtractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SColumnFilterInfo* pColumnFilter, - SColumnIndex* columnIndex, tSqlExpr* pExpr) { + int16_t colType, tSqlExpr* pExpr) { const char* msg = "not supported filter condition"; tSqlExpr* pRight = pExpr->pRight; - STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, columnIndex->tableIndex); - SSchema* pSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, columnIndex->columnIndex); - - int16_t colType = pSchema->type; if (colType >= TSDB_DATA_TYPE_TINYINT && colType <= TSDB_DATA_TYPE_BIGINT) { colType = TSDB_DATA_TYPE_BIGINT; } else if (colType == TSDB_DATA_TYPE_FLOAT || colType == TSDB_DATA_TYPE_DOUBLE) { @@ -3579,7 +3566,10 @@ static int32_t extractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SC } pColumn->colIndex = *pIndex; - return doExtractColumnFilterInfo(pCmd, pQueryInfo, pColFilter, pIndex, pExpr); + + int16_t colType = pSchema->type; + + return doExtractColumnFilterInfo(pCmd, pQueryInfo, pColFilter, colType, pExpr); } static int32_t getTablenameCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pTableCond, SStringBuilder* sb) { @@ -6326,7 +6316,7 @@ static int32_t doAddGroupbyColumnsOnDemand(SSqlCmd* pCmd, SQueryInfo* pQueryInfo if (TSDB_COL_IS_TAG(pColIndex->flag)) { SColumnIndex index = {.tableIndex = pQueryInfo->groupbyExpr.tableIndex, .columnIndex = colIndex}; - SSqlExpr* pExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TAG, &index, type, bytes, getNewResColId(pQueryInfo), bytes, true); + SSqlExpr* pExpr = tscSqlExprInsert(pQueryInfo, (int32_t)size - pQueryInfo->havingFieldNum, TSDB_FUNC_TAG, &index, type, bytes, getNewResColId(pQueryInfo), bytes, true); memset(pExpr->aliasName, 0, sizeof(pExpr->aliasName)); tstrncpy(pExpr->aliasName, name, sizeof(pExpr->aliasName)); @@ -6335,7 +6325,7 @@ static int32_t doAddGroupbyColumnsOnDemand(SSqlCmd* pCmd, SQueryInfo* pQueryInfo // NOTE: tag column does not add to source column list SColumnList ids = getColumnList(1, 0, pColIndex->colIndex); - insertResultField(pQueryInfo, (int32_t)size, &ids, bytes, (int8_t)type, name, pExpr); + insertResultField(pQueryInfo, (int32_t)size - pQueryInfo->havingFieldNum, &ids, bytes, (int8_t)type, name, pExpr); } else { // if this query is "group by" normal column, time window query is not allowed if (isTimeWindowQuery(pQueryInfo)) { @@ -7083,6 +7073,313 @@ static int32_t checkQueryRangeForFill(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) { return TSDB_CODE_SUCCESS; } + + int32_t tscInsertExprFields(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pExpr, SInternalField** interField) { + tSqlExprItem item = {.pNode = pExpr, .aliasName = NULL, .distinct = false}; + + int32_t outputIndex = (int32_t)tscSqlExprNumOfExprs(pQueryInfo); + + // ADD TRUE FOR TEST + if (addExprAndResultField(pCmd, pQueryInfo, outputIndex, &item, true) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_TSC_INVALID_SQL; + } + + ++pQueryInfo->havingFieldNum; + + size_t n = tscSqlExprNumOfExprs(pQueryInfo); + SSqlExpr* pSqlExpr = tscSqlExprGet(pQueryInfo, (int32_t)n - 1); + + int32_t slot = tscNumOfFields(pQueryInfo) - 1; + SInternalField* pInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, slot); + pInfo->visible = false; + + if (pInfo->pFieldFilters == NULL) { + SExprFilter* pFieldFilters = calloc(1, sizeof(SExprFilter)); + if (pFieldFilters == NULL) { + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } + + SColumn* pFilters = calloc(1, sizeof(SColumn)); + if (pFilters == NULL) { + tfree(pFieldFilters); + + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } + + pFieldFilters->pFilters = pFilters; + pFieldFilters->pSqlExpr = pSqlExpr; + pSqlExpr->pFilter = pFilters; + pInfo->pFieldFilters = pFieldFilters; + } + + pInfo->pFieldFilters->pExpr = pExpr; + + *interField = pInfo; + + return TSDB_CODE_SUCCESS; +} + +int32_t tscGetExprFilters(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pExpr, SInternalField** pField) { + SInternalField* pInfo = NULL; + + for (int32_t i = pQueryInfo->havingFieldNum - 1; i >= 0; --i) { + pInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, pQueryInfo->fieldsInfo.numOfOutput - 1 - i); + + if (pInfo->pFieldFilters && 0 == tSqlExprCompare(pInfo->pFieldFilters->pExpr, pExpr)) { + *pField = pInfo; + return TSDB_CODE_SUCCESS; + } + } + + int32_t ret = tscInsertExprFields(pCmd, pQueryInfo, pExpr, &pInfo); + if (ret) { + return ret; + } + + *pField = pInfo; + + return TSDB_CODE_SUCCESS; +} + +static int32_t genExprFilter(SExprFilter * exprFilter) { + exprFilter->fp = taosArrayInit(4, sizeof(__filter_func_t)); + if (exprFilter->fp == NULL) { + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } + + for (int32_t i = 0; i < exprFilter->pFilters->numOfFilters; ++i) { + SColumnFilterInfo *filterInfo = &exprFilter->pFilters->filterInfo[i]; + + int32_t lower = filterInfo->lowerRelOptr; + int32_t upper = filterInfo->upperRelOptr; + if (lower == TSDB_RELATION_INVALID && upper == TSDB_RELATION_INVALID) { + tscError("invalid rel optr"); + return TSDB_CODE_TSC_APP_ERROR; + } + + __filter_func_t ffp = getFilterOperator(lower, upper); + if (ffp == NULL) { + tscError("invalid filter info"); + return TSDB_CODE_TSC_APP_ERROR; + } + + taosArrayPush(exprFilter->fp, &ffp); + } + + return TSDB_CODE_SUCCESS; +} + +static int32_t handleExprInHavingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pExpr, int32_t sqlOptr) { + const char* msg1 = "non binary column not support like operator"; + const char* msg2 = "invalid operator for binary column in having clause"; + const char* msg3 = "invalid operator for bool column in having clause"; + + SColumn* pColumn = NULL; + SColumnFilterInfo* pColFilter = NULL; + SInternalField* pInfo = NULL; + + /* + * in case of TK_AND filter condition, we first find the corresponding column and build the query condition together + * the already existed condition. + */ + if (sqlOptr == TK_AND) { + int32_t ret = tscGetExprFilters(pCmd, pQueryInfo, pExpr->pLeft, &pInfo); + if (ret) { + return ret; + } + + pColumn = pInfo->pFieldFilters->pFilters; + + // this is a new filter condition on this column + if (pColumn->numOfFilters == 0) { + pColFilter = addColumnFilterInfo(pColumn); + } else { // update the existed column filter information, find the filter info here + pColFilter = &pColumn->filterInfo[0]; + } + + if (pColFilter == NULL) { + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } + } else if (sqlOptr == TK_OR) { + int32_t ret = tscGetExprFilters(pCmd, pQueryInfo, pExpr->pLeft, &pInfo); + if (ret) { + return ret; + } + + pColumn = pInfo->pFieldFilters->pFilters; + + // TODO fixme: failed to invalid the filter expression: "col1 = 1 OR col2 = 2" + pColFilter = addColumnFilterInfo(pColumn); + if (pColFilter == NULL) { + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } + } else { // error; + return TSDB_CODE_TSC_INVALID_SQL; + } + + pColFilter->filterstr = + ((pInfo->field.type == TSDB_DATA_TYPE_BINARY || pInfo->field.type == TSDB_DATA_TYPE_NCHAR) ? 1 : 0); + + if (pColFilter->filterstr) { + if (pExpr->tokenId != TK_EQ + && pExpr->tokenId != TK_NE + && pExpr->tokenId != TK_ISNULL + && pExpr->tokenId != TK_NOTNULL + && pExpr->tokenId != TK_LIKE + ) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); + } + } else { + if (pExpr->tokenId == TK_LIKE) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + } + + if (pInfo->field.type == TSDB_DATA_TYPE_BOOL) { + if (pExpr->tokenId != TK_EQ && pExpr->tokenId != TK_NE) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); + } + } + } + + int32_t ret = doExtractColumnFilterInfo(pCmd, pQueryInfo, pColFilter, pInfo->field.type, pExpr); + if (ret) { + return ret; + } + + return genExprFilter(pInfo->pFieldFilters); +} + +int32_t getHavingExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pExpr, int32_t parentOptr) { + if (pExpr == NULL) { + return TSDB_CODE_SUCCESS; + } + + const char* msg1 = "invalid having clause"; + + tSqlExpr* pLeft = pExpr->pLeft; + tSqlExpr* pRight = pExpr->pRight; + + if (pExpr->tokenId == TK_AND || pExpr->tokenId == TK_OR) { + int32_t ret = getHavingExpr(pCmd, pQueryInfo, pExpr->pLeft, pExpr->tokenId); + if (ret != TSDB_CODE_SUCCESS) { + return ret; + } + + return getHavingExpr(pCmd, pQueryInfo, pExpr->pRight, pExpr->tokenId); + } + + if (pLeft == NULL || pRight == NULL) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + } + + if (pLeft->type == pRight->type) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + } + + exchangeExpr(pExpr); + + pLeft = pExpr->pLeft; + pRight = pExpr->pRight; + + + if (pLeft->type != SQL_NODE_SQLFUNCTION) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + } + + if (pRight->type != SQL_NODE_VALUE) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + } + + if (pExpr->tokenId >= TK_BITAND) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + } + + //if (pLeft->pParam == NULL || pLeft->pParam->nExpr < 1) { + // return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + //} + + if (pLeft->pParam) { + size_t size = taosArrayGetSize(pLeft->pParam); + for (int32_t i = 0; i < size; i++) { + tSqlExprItem* pParamElem = taosArrayGet(pLeft->pParam, i); + if (pParamElem->pNode->tokenId != TK_ALL && + pParamElem->pNode->tokenId != TK_ID && + pParamElem->pNode->tokenId != TK_STRING && + pParamElem->pNode->tokenId != TK_INTEGER && + pParamElem->pNode->tokenId != TK_FLOAT) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + } + + if (pParamElem->pNode->tokenId == TK_ID && (pParamElem->pNode->colInfo.z == NULL && pParamElem->pNode->colInfo.n == 0)) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + } + + if (pParamElem->pNode->tokenId == TK_ID) { + SColumnIndex index = COLUMN_INDEX_INITIALIZER; + if ((getColumnIndexByName(pCmd, &pParamElem->pNode->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS)) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + } + + STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex); + STableMeta* pTableMeta = pTableMetaInfo->pTableMeta; + + if (index.columnIndex <= 0 || + index.columnIndex >= tscGetNumOfColumns(pTableMeta)) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + } + } + } + } + + pLeft->functionId = isValidFunction(pLeft->operand.z, pLeft->operand.n); + if (pLeft->functionId < 0) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + } + + return handleExprInHavingClause(pCmd, pQueryInfo, pExpr, parentOptr); +} + + + +int32_t parseHavingClause(SQueryInfo* pQueryInfo, tSqlExpr* pExpr, SSqlCmd* pCmd, bool isSTable, int32_t joinQuery, int32_t timeWindowQuery) { + const char* msg1 = "having only works with group by"; + const char* msg2 = "functions or others can not be mixed up"; + const char* msg3 = "invalid expression in having clause"; + + if (pExpr == NULL) { + return TSDB_CODE_SUCCESS; + } + + if (pQueryInfo->groupbyExpr.numOfGroupCols <= 0) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + } + + if (pExpr->pLeft == NULL || pExpr->pRight == NULL) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); + } + + if (pQueryInfo->colList == NULL) { + pQueryInfo->colList = taosArrayInit(4, POINTER_BYTES); + } + + int32_t ret = 0; + + if ((ret = getHavingExpr(pCmd, pQueryInfo, pExpr, TK_AND)) != TSDB_CODE_SUCCESS) { + return ret; + } + + //REDO function check + if (!functionCompatibleCheck(pQueryInfo, joinQuery, timeWindowQuery)) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); + } + + return TSDB_CODE_SUCCESS; +} + + + + + int32_t doValidateSqlNode(SSqlObj* pSql, SQuerySqlNode* pQuerySqlNode, int32_t index) { assert(pQuerySqlNode != NULL && (pQuerySqlNode->from == NULL || taosArrayGetSize(pQuerySqlNode->from->tableList) > 0)); @@ -7255,6 +7552,23 @@ int32_t doValidateSqlNode(SSqlObj* pSql, SQuerySqlNode* pQuerySqlNode, int32_t i } } + // parse the having clause in the first place + if (parseHavingClause(pQueryInfo, pQuerySqlNode->pHaving, pCmd, isSTable, joinQuery, timeWindowQuery) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_TSC_INVALID_SQL; + } + + /* + * transfer sql functions that need secondary merge into another format + * in dealing with super table queries such as: count/first/last + */ + if (isSTable) { + tscTansformFuncForSTableQuery(pQueryInfo); + + if (hasUnsupportFunctionsForSTableQuery(pCmd, pQueryInfo)) { + return TSDB_CODE_TSC_INVALID_SQL; + } + } + if (parseSessionClause(pCmd, pQueryInfo, pQuerySqlNode) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_SQL; } @@ -7446,3 +7760,10 @@ bool hasNormalColumnFilter(SQueryInfo* pQueryInfo) { return false; } + + + + + + + diff --git a/src/client/src/tscSchemaUtil.c b/src/client/src/tscSchemaUtil.c index 67352ca71c..2ea382132b 100644 --- a/src/client/src/tscSchemaUtil.c +++ b/src/client/src/tscSchemaUtil.c @@ -144,8 +144,9 @@ SNewVgroupInfo createNewVgroupInfo(SVgroupMsg *pVgroupMsg) { SNewVgroupInfo info = {0}; info.numOfEps = pVgroupMsg->numOfEps; info.vgId = pVgroupMsg->vgId; - info.inUse = 0; + info.inUse = 0; // 0 is the default value of inUse in case of multiple replica + assert(info.numOfEps >= 1 && info.vgId >= 1); for(int32_t i = 0; i < pVgroupMsg->numOfEps; ++i) { tstrncpy(info.ep[i].fqdn, pVgroupMsg->epAddr[i].fqdn, TSDB_FQDN_LEN); info.ep[i].port = pVgroupMsg->epAddr[i].port; diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index ef2fdb9940..57931ab9d7 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -34,6 +34,7 @@ int tscKeepConn[TSDB_SQL_MAX] = {0}; TSKEY tscGetSubscriptionProgress(void* sub, int64_t uid, TSKEY dflt); void tscUpdateSubscriptionProgress(void* sub, int64_t uid, TSKEY ts); void tscSaveSubscriptionProgress(void* sub); +static int32_t extractSTableQueryVgroupId(STableMetaInfo* pTableMetaInfo); static int32_t minMsgSize() { return tsRpcHeadSize + 100; } static int32_t getWaitingTimeInterval(int32_t count) { @@ -78,7 +79,8 @@ static void tscEpSetHtons(SRpcEpSet *s) { for (int32_t i = 0; i < s->numOfEps; i++) { s->port[i] = htons(s->port[i]); } -} +} + bool tscEpSetIsEqual(SRpcEpSet *s1, SRpcEpSet *s2) { if (s1->numOfEps != s2->numOfEps || s1->inUse != s2->inUse) { return false; @@ -111,19 +113,22 @@ static void tscDumpEpSetFromVgroupInfo(SRpcEpSet *pEpSet, SNewVgroupInfo *pVgrou } } -static void tscUpdateVgroupInfo(SSqlObj *pObj, SRpcEpSet *pEpSet) { - SSqlCmd *pCmd = &pObj->cmd; +static void tscUpdateVgroupInfo(SSqlObj *pSql, SRpcEpSet *pEpSet) { + SSqlCmd *pCmd = &pSql->cmd; STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0); if (pTableMetaInfo == NULL || pTableMetaInfo->pTableMeta == NULL) { return; } - int32_t vgId = pTableMetaInfo->pTableMeta->vgId; + int32_t vgId = -1; if (pTableMetaInfo->pTableMeta->tableType == TSDB_SUPER_TABLE) { - assert(vgId == 0); - return; + vgId = extractSTableQueryVgroupId(pTableMetaInfo); + } else { + vgId = pTableMetaInfo->pTableMeta->vgId; } + assert(vgId > 0); + SNewVgroupInfo vgroupInfo = {.vgId = -1}; taosHashGetClone(tscVgroupMap, &vgId, sizeof(vgId), NULL, &vgroupInfo, sizeof(SNewVgroupInfo)); assert(vgroupInfo.numOfEps > 0 && vgroupInfo.vgId > 0); @@ -138,6 +143,33 @@ static void tscUpdateVgroupInfo(SSqlObj *pObj, SRpcEpSet *pEpSet) { tscDebug("after: EndPoint in use:%d, numOfEps:%d", vgroupInfo.inUse, vgroupInfo.numOfEps); taosHashPut(tscVgroupMap, &vgId, sizeof(vgId), &vgroupInfo, sizeof(SNewVgroupInfo)); + + // Update the local cached epSet info cached by SqlObj + int32_t inUse = pSql->epSet.inUse; + tscDumpEpSetFromVgroupInfo(&pSql->epSet, &vgroupInfo); + tscDebug("%p update the epSet in SqlObj, in use before:%d, after:%d", pSql, inUse, pSql->epSet.inUse); + +} + +int32_t extractSTableQueryVgroupId(STableMetaInfo* pTableMetaInfo) { + assert(pTableMetaInfo != NULL); + + int32_t vgIndex = pTableMetaInfo->vgroupIndex; + int32_t vgId = -1; + + if (pTableMetaInfo->pVgroupTables == NULL) { + SVgroupsInfo *pVgroupInfo = pTableMetaInfo->vgroupList; + assert(pVgroupInfo->vgroups[vgIndex].vgId > 0 && vgIndex < pTableMetaInfo->vgroupList->numOfVgroups); + vgId = pVgroupInfo->vgroups[vgIndex].vgId; + } else { + int32_t numOfVgroups = (int32_t)taosArrayGetSize(pTableMetaInfo->pVgroupTables); + assert(vgIndex >= 0 && vgIndex < numOfVgroups); + + SVgroupTableInfo *pTableIdList = taosArrayGet(pTableMetaInfo->pVgroupTables, vgIndex); + vgId = pTableIdList->vgInfo.vgId; + } + + return vgId; } void tscProcessHeartBeatRsp(void *param, TAOS_RES *tres, int code) { @@ -516,21 +548,22 @@ int tscBuildFetchMsg(SSqlObj *pSql, SSqlInfo *pInfo) { if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { int32_t vgIndex = pTableMetaInfo->vgroupIndex; + int32_t vgId = -1; + if (pTableMetaInfo->pVgroupTables == NULL) { SVgroupsInfo *pVgroupInfo = pTableMetaInfo->vgroupList; assert(pVgroupInfo->vgroups[vgIndex].vgId > 0 && vgIndex < pTableMetaInfo->vgroupList->numOfVgroups); - - pRetrieveMsg->header.vgId = htonl(pVgroupInfo->vgroups[vgIndex].vgId); - tscDebug("%p build fetch msg from vgId:%d, vgIndex:%d, qhandle:%" PRIX64, pSql, pVgroupInfo->vgroups[vgIndex].vgId, vgIndex, pSql->res.qId); + vgId = pVgroupInfo->vgroups[vgIndex].vgId; } else { int32_t numOfVgroups = (int32_t)taosArrayGetSize(pTableMetaInfo->pVgroupTables); assert(vgIndex >= 0 && vgIndex < numOfVgroups); SVgroupTableInfo* pTableIdList = taosArrayGet(pTableMetaInfo->pVgroupTables, vgIndex); - - pRetrieveMsg->header.vgId = htonl(pTableIdList->vgInfo.vgId); - tscDebug("%p build fetch msg from vgId:%d, vgIndex:%d, qId:%" PRIu64, pSql, pTableIdList->vgInfo.vgId, vgIndex, pSql->res.qId); + vgId = pTableIdList->vgInfo.vgId; } + + pRetrieveMsg->header.vgId = htonl(vgId); + tscDebug("%p build fetch msg from vgId:%d, vgIndex:%d, qId:%" PRIu64, pSql, vgId, vgIndex, pSql->res.qId); } else { STableMeta* pTableMeta = pTableMetaInfo->pTableMeta; pRetrieveMsg->header.vgId = htonl(pTableMeta->vgId); @@ -869,8 +902,44 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { pSqlFuncExpr->functionId = htons(pExpr->functionId); pSqlFuncExpr->numOfParams = htons(pExpr->numOfParams); pSqlFuncExpr->resColId = htons(pExpr->resColId); + if (pTableMeta->tableType != TSDB_SUPER_TABLE && pExpr->pFilter && pExpr->pFilter->numOfFilters > 0) { + pSqlFuncExpr->filterNum = htonl(pExpr->pFilter->numOfFilters); + } else { + pSqlFuncExpr->filterNum = 0; + } + pMsg += sizeof(SSqlFuncMsg); + if (pSqlFuncExpr->filterNum) { + pMsg += sizeof(SColumnFilterInfo) * pExpr->pFilter->numOfFilters; + + // append the filter information after the basic column information + for (int32_t f = 0; f < pExpr->pFilter->numOfFilters; ++f) { + SColumnFilterInfo *pColFilter = &pExpr->pFilter->filterInfo[f]; + + SColumnFilterInfo *pFilterMsg = &pSqlFuncExpr->filterInfo[f]; + pFilterMsg->filterstr = htons(pColFilter->filterstr); + + if (pColFilter->filterstr) { + pFilterMsg->len = htobe64(pColFilter->len); + memcpy(pMsg, (void *)pColFilter->pz, (size_t)(pColFilter->len + 1)); + pMsg += (pColFilter->len + 1); // append the additional filter binary info + } else { + pFilterMsg->lowerBndi = htobe64(pColFilter->lowerBndi); + pFilterMsg->upperBndi = htobe64(pColFilter->upperBndi); + } + + pFilterMsg->lowerRelOptr = htons(pColFilter->lowerRelOptr); + pFilterMsg->upperRelOptr = htons(pColFilter->upperRelOptr); + + if (pColFilter->lowerRelOptr == TSDB_RELATION_INVALID && pColFilter->upperRelOptr == TSDB_RELATION_INVALID) { + tscError("invalid filter info"); + return TSDB_CODE_TSC_INVALID_SQL; + } + } + } + + for (int32_t j = 0; j < pExpr->numOfParams; ++j) { // todo add log pSqlFuncExpr->arg[j].argType = htons((uint16_t)pExpr->param[j].nType); pSqlFuncExpr->arg[j].argBytes = htons(pExpr->param[j].nLen); @@ -2029,7 +2098,7 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) { (vgroupInfo.inUse < 0)) { // vgroup info exists, compare with it vgroupInfo = createNewVgroupInfo(&pMetaMsg->vgroup); taosHashPut(tscVgroupMap, &vgId, sizeof(vgId), &vgroupInfo, sizeof(vgroupInfo)); - tscDebug("add new VgroupInfo, vgId:%d, total:%d", vgId, (int32_t) taosHashGetSize(tscVgroupMap)); + tscDebug("add new VgroupInfo, vgId:%d, total cached:%d", vgId, (int32_t) taosHashGetSize(tscVgroupMap)); } } @@ -2227,18 +2296,33 @@ int tscProcessSTableVgroupRsp(SSqlObj *pSql) { tscError("%p empty vgroup info", pSql); } else { for (int32_t j = 0; j < pInfo->vgroupList->numOfVgroups; ++j) { - //just init, no need to lock - SVgroupInfo *pVgroups = &pInfo->vgroupList->vgroups[j]; + // just init, no need to lock + SVgroupInfo *pVgroup = &pInfo->vgroupList->vgroups[j]; SVgroupMsg *vmsg = &pVgroupMsg->vgroups[j]; - pVgroups->vgId = htonl(vmsg->vgId); - pVgroups->numOfEps = vmsg->numOfEps; + vmsg->vgId = htonl(vmsg->vgId); + vmsg->numOfEps = vmsg->numOfEps; + for (int32_t k = 0; k < vmsg->numOfEps; ++k) { + vmsg->epAddr[k].port = htons(vmsg->epAddr[k].port); + } - assert(pVgroups->numOfEps >= 1 && pVgroups->vgId >= 1); + SNewVgroupInfo newVi = createNewVgroupInfo(vmsg); + pVgroup->numOfEps = newVi.numOfEps; + pVgroup->vgId = newVi.vgId; + for (int32_t k = 0; k < vmsg->numOfEps; ++k) { + pVgroup->epAddr[k].port = newVi.ep[k].port; + pVgroup->epAddr[k].fqdn = strndup(newVi.ep[k].fqdn, TSDB_FQDN_LEN); + } - for (int32_t k = 0; k < pVgroups->numOfEps; ++k) { - pVgroups->epAddr[k].port = htons(vmsg->epAddr[k].port); - pVgroups->epAddr[k].fqdn = strndup(vmsg->epAddr[k].fqdn, tListLen(vmsg->epAddr[k].fqdn)); + // check if current buffer contains the vgroup info. + // If not, add it + SNewVgroupInfo existVgroupInfo = {.inUse = -1}; + taosHashGetClone(tscVgroupMap, &newVi.vgId, sizeof(newVi.vgId), NULL, &existVgroupInfo, sizeof(SNewVgroupInfo)); + + if (((existVgroupInfo.inUse >= 0) && !vgroupInfoIdentical(&existVgroupInfo, vmsg)) || + (existVgroupInfo.inUse < 0)) { // vgroup info exists, compare with it + taosHashPut(tscVgroupMap, &newVi.vgId, sizeof(newVi.vgId), &newVi, sizeof(newVi)); + tscDebug("add new VgroupInfo, vgId:%d, total cached:%d", newVi.vgId, (int32_t) taosHashGetSize(tscVgroupMap)); } } } diff --git a/src/client/src/tscSql.c b/src/client/src/tscSql.c index 22ae192db1..05d9a284ad 100644 --- a/src/client/src/tscSql.c +++ b/src/client/src/tscSql.c @@ -405,6 +405,7 @@ int taos_affected_rows(TAOS_RES *tres) { TAOS_FIELD *taos_fetch_fields(TAOS_RES *res) { SSqlObj *pSql = (SSqlObj *)res; + SSqlRes *pRes = &pSql->res; if (pSql == NULL || pSql->signature != pSql) return 0; SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); @@ -419,7 +420,7 @@ TAOS_FIELD *taos_fetch_fields(TAOS_RES *res) { SFieldInfo *pFieldInfo = &pQueryInfo->fieldsInfo; - if (pFieldInfo->final == NULL) { + if (pRes->final == NULL) { TAOS_FIELD* f = calloc(pFieldInfo->numOfOutput, sizeof(TAOS_FIELD)); int32_t j = 0; @@ -439,10 +440,10 @@ TAOS_FIELD *taos_fetch_fields(TAOS_RES *res) { } } - pFieldInfo->final = f; + pRes->final = f; } - return pFieldInfo->final; + return pRes->final; } static bool needToFetchNewBlock(SSqlObj* pSql) { diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index abce8ac8ae..5c9eea026a 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -3004,7 +3004,7 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) { } tscDebug("%p sub:%p query complete, ep:%s, vgId:%d, orderOfSub:%d, retrieve data", trsupport->pParentSql, pSql, - pVgroup->epAddr[0].fqdn, pVgroup->vgId, trsupport->subqueryIndex); + pVgroup->epAddr[pSql->epSet.inUse].fqdn, pVgroup->vgId, trsupport->subqueryIndex); if (pSql->res.qId == 0) { // qhandle is NULL, code is TSDB_CODE_SUCCESS means no results generated from this vnode tscRetrieveFromDnodeCallBack(param, pSql, 0); diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index b4b4d130f2..27279aaaee 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -447,6 +447,8 @@ static void tscDestroyResPointerInfo(SSqlRes* pRes) { tfree(pRes->pArithSup->data); tfree(pRes->pArithSup); } + + tfree(pRes->final); pRes->data = NULL; // pRes->data points to the buffer of pRsp, no need to free } @@ -1105,6 +1107,7 @@ SInternalField* tscFieldInfoAppend(SFieldInfo* pFieldInfo, TAOS_FIELD* pField) { .pSqlExpr = NULL, .pArithExprInfo = NULL, .visible = true, + .pFieldFilters = NULL, }; info.field = *pField; @@ -1117,6 +1120,7 @@ SInternalField* tscFieldInfoInsert(SFieldInfo* pFieldInfo, int32_t index, TAOS_F .pSqlExpr = NULL, .pArithExprInfo = NULL, .visible = true, + .pFieldFilters = NULL, }; info.field = *field; @@ -1190,6 +1194,22 @@ int32_t tscGetResRowLength(SArray* pExprList) { return size; } +static void destroyFilterInfo(SColumnFilterInfo* pFilterInfo, int32_t numOfFilters) { + for(int32_t i = 0; i < numOfFilters; ++i) { + if (pFilterInfo[i].filterstr) { + tfree(pFilterInfo[i].pz); + } + } + + tfree(pFilterInfo); +} + +static void tscColumnDestroy(SColumn* pCol) { + destroyFilterInfo(pCol->filterInfo, pCol->numOfFilters); + free(pCol); +} + + void tscFieldInfoClear(SFieldInfo* pFieldInfo) { if (pFieldInfo == NULL) { return; @@ -1210,10 +1230,14 @@ void tscFieldInfoClear(SFieldInfo* pFieldInfo) { tfree(pInfo->pArithExprInfo); } + + if (pInfo->pFieldFilters != NULL) { + tscColumnDestroy(pInfo->pFieldFilters->pFilters); + tfree(pInfo->pFieldFilters); + } } taosArrayDestroy(pFieldInfo->internalField); - tfree(pFieldInfo->final); memset(pFieldInfo, 0, sizeof(SFieldInfo)); } @@ -1471,15 +1495,7 @@ SColumn* tscColumnListInsert(SArray* pColumnList, SColumnIndex* pColIndex) { return taosArrayGetP(pColumnList, i); } -static void destroyFilterInfo(SColumnFilterInfo* pFilterInfo, int32_t numOfFilters) { - for(int32_t i = 0; i < numOfFilters; ++i) { - if (pFilterInfo[i].filterstr) { - tfree(pFilterInfo[i].pz); - } - } - - tfree(pFilterInfo); -} + SColumn* tscColumnClone(const SColumn* src) { assert(src != NULL); @@ -1496,10 +1512,6 @@ SColumn* tscColumnClone(const SColumn* src) { return dst; } -static void tscColumnDestroy(SColumn* pCol) { - destroyFilterInfo(pCol->filterInfo, pCol->numOfFilters); - free(pCol); -} void tscColumnListCopy(SArray* dst, const SArray* src, int16_t tableIndex) { assert(src != NULL && dst != NULL); diff --git a/src/common/inc/tglobal.h b/src/common/inc/tglobal.h index 3f96466cc0..26475834d5 100644 --- a/src/common/inc/tglobal.h +++ b/src/common/inc/tglobal.h @@ -39,6 +39,7 @@ extern int8_t tsEnableTelemetryReporting; extern char tsEmail[]; extern char tsArbitrator[]; extern int8_t tsArbOnline; +extern int32_t tsDnodeId; // common extern int tsRpcTimer; diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c index 5f4ce046ed..69b01e6c08 100644 --- a/src/common/src/tglobal.c +++ b/src/common/src/tglobal.c @@ -43,6 +43,7 @@ int8_t tsEnableVnodeBak = 1; int8_t tsEnableTelemetryReporting = 1; int8_t tsArbOnline = 0; char tsEmail[TSDB_FQDN_LEN] = {0}; +int32_t tsDnodeId = 0; // common int32_t tsRpcTimer = 1000; @@ -212,7 +213,7 @@ float tsAvailTmpDirectorySpace = 0; float tsAvailDataDirGB = 0; float tsUsedDataDirGB = 0; float tsReservedTmpDirectorySpace = 1.0f; -float tsMinimalDataDirGB = 1.0f; +float tsMinimalDataDirGB = 2.0f; int32_t tsTotalMemoryMB = 0; uint32_t tsVersion = 0; diff --git a/src/connector/go b/src/connector/go index 050667e5b4..d99751356e 160000 --- a/src/connector/go +++ b/src/connector/go @@ -1 +1 @@ -Subproject commit 050667e5b4d0eafa5387e4283e713559b421203f +Subproject commit d99751356e285696f57bc604304ffafd10287439 diff --git a/src/connector/jdbc/CMakeLists.txt b/src/connector/jdbc/CMakeLists.txt index eb158b1f76..5d5a6f5f12 100644 --- a/src/connector/jdbc/CMakeLists.txt +++ b/src/connector/jdbc/CMakeLists.txt @@ -8,7 +8,7 @@ IF (TD_MVN_INSTALLED) ADD_CUSTOM_COMMAND(OUTPUT ${JDBC_CMD_NAME} POST_BUILD COMMAND mvn -Dmaven.test.skip=true install -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml - COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.25-dist.jar ${LIBRARY_OUTPUT_PATH} + COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.27-dist.jar ${LIBRARY_OUTPUT_PATH} COMMAND mvn -Dmaven.test.skip=true clean -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml COMMENT "build jdbc driver") ADD_CUSTOM_TARGET(${JDBC_TARGET_NAME} ALL WORKING_DIRECTORY ${EXECUTABLE_OUTPUT_PATH} DEPENDS ${JDBC_CMD_NAME}) diff --git a/src/connector/jdbc/deploy-pom.xml b/src/connector/jdbc/deploy-pom.xml index eb8c92575c..d4febe70d6 100755 --- a/src/connector/jdbc/deploy-pom.xml +++ b/src/connector/jdbc/deploy-pom.xml @@ -5,7 +5,7 @@ com.taosdata.jdbc taos-jdbcdriver - 2.0.25 + 2.0.27 jar JDBCDriver diff --git a/src/connector/jdbc/pom.xml b/src/connector/jdbc/pom.xml index 1f75754b0c..b8a88562ab 100755 --- a/src/connector/jdbc/pom.xml +++ b/src/connector/jdbc/pom.xml @@ -3,7 +3,7 @@ 4.0.0 com.taosdata.jdbc taos-jdbcdriver - 2.0.25 + 2.0.27 jar JDBCDriver https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractConnection.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractConnection.java index 976078da95..2970f6c2d3 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractConnection.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractConnection.java @@ -4,13 +4,23 @@ import java.sql.*; import java.util.Enumeration; import java.util.Map; import java.util.Properties; +import java.util.Set; import java.util.concurrent.*; public abstract class AbstractConnection extends WrapperImpl implements Connection { protected volatile boolean isClosed; protected volatile String catalog; - protected volatile Properties clientInfoProps = new Properties(); + protected final Properties clientInfoProps = new Properties(); + + protected AbstractConnection(Properties properties) { + Set propNames = properties.stringPropertyNames(); + for (String propName : propNames) { + clientInfoProps.setProperty(propName, properties.getProperty(propName)); + } + String timestampFormat = properties.getProperty(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT, "STRING"); + clientInfoProps.setProperty(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT, timestampFormat); + } @Override public abstract Statement createStatement() throws SQLException; @@ -35,7 +45,6 @@ public abstract class AbstractConnection extends WrapperImpl implements Connecti } - @Override public void setAutoCommit(boolean autoCommit) throws SQLException { if (isClosed()) @@ -441,9 +450,8 @@ public abstract class AbstractConnection extends WrapperImpl implements Connecti if (isClosed) throw (SQLClientInfoException) TSDBError.createSQLException(TSDBErrorNumbers.ERROR_SQLCLIENT_EXCEPTION_ON_CONNECTION_CLOSED); - if (clientInfoProps == null) - clientInfoProps = new Properties(); - clientInfoProps.setProperty(name, value); + if (clientInfoProps != null) + clientInfoProps.setProperty(name, value); } @Override diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java index 5b4615485d..c8ab9fb15a 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java @@ -28,6 +28,7 @@ public class TSDBConnection extends AbstractConnection { } public TSDBConnection(Properties info, TSDBDatabaseMetaData meta) throws SQLException { + super(info); this.databaseMetaData = meta; connect(info.getProperty(TSDBDriver.PROPERTY_KEY_HOST), Integer.parseInt(info.getProperty(TSDBDriver.PROPERTY_KEY_PORT, "0")), diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java index 5f599df130..bbd8519a03 100755 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java @@ -95,6 +95,11 @@ public class TSDBDriver extends AbstractDriver { */ public static final String PROPERTY_KEY_BATCH_LOAD = "batchfetch"; + /** + * timestamp format for JDBC-RESTful,should one of the options: string or timestamp or utc + */ + public static final String PROPERTY_KEY_TIMESTAMP_FORMAT = "timestampFormat"; + private TSDBDatabaseMetaData dbMetaData = null; static { diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java index 728b537f71..e545bbc8f2 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java @@ -33,17 +33,9 @@ import java.util.regex.Pattern; public class TSDBPreparedStatement extends TSDBStatement implements PreparedStatement { private String rawSql; - private String sql; - // private ArrayList parameters = new ArrayList<>(); private Object[] parameters; private boolean isPrepared; - //start with insert or import and is case-insensitive - private static Pattern savePattern = Pattern.compile("(?i)^\\s*(insert|import)"); - // is insert or import - private boolean isSaved; - - // private SavedPreparedStatement savedPreparedStatement; private volatile TSDBParameterMetaData parameterMetaData; TSDBPreparedStatement(TSDBConnection connection, TSDBJNIConnector connecter, String sql) { @@ -65,35 +57,11 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat private void init(String sql) { this.rawSql = sql; preprocessSql(); -// this.isSaved = isSavedSql(this.rawSql); -// if (this.isSaved) { -// try { -// this.savedPreparedStatement = new SavedPreparedStatement(this.rawSql, this); -// } catch (SQLException e) { -// e.printStackTrace(); -// } -// } - - } - - /** - * if the precompiled sql is insert or import - * - * @param sql - * @return - */ - private boolean isSavedSql(String sql) { - Matcher matcher = savePattern.matcher(sql); - return matcher.find(); } @Override public int[] executeBatch() throws SQLException { -// if (isSaved) { -// return this.savedPreparedStatement.executeBatch(); -// } else { return super.executeBatch(); -// } } /* @@ -157,23 +125,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat * * @return a string of the native sql statement for TSDB */ -// private String getNativeSql(String rawSql) { -// for (int i = 0; i < parameters.length; i++) { -// Object para = parameters[i]; -// if (para != null) { -// String paraStr = para.toString(); -// if (para instanceof Timestamp || para instanceof String) { -// paraStr = "'" + paraStr + "'"; -// } -// this.sql = this.sql.replaceFirst("[?]", paraStr); -// } else { -// this.sql = this.sql.replaceFirst("[?]", "NULL"); -// } -// } -// parameters = new Object[parameters.length]; -// return sql; -// } - private String getNativeSql(String rawSql) throws SQLException { String sql = rawSql; for (int i = 0; i < parameters.length; ++i) { @@ -201,108 +152,58 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat @Override public ResultSet executeQuery() throws SQLException { -// if (isSaved) { -// this.savedPreparedStatement.executeBatchInternal(); -// return null; -// } else { - if (!isPrepared) return executeQuery(this.rawSql); final String sql = getNativeSql(this.rawSql); return executeQuery(sql); -// } } @Override public int executeUpdate() throws SQLException { -// if (isSaved) { -// return this.savedPreparedStatement.executeBatchInternal(); -// } else { if (!isPrepared) return executeUpdate(this.rawSql); String sql = getNativeSql(this.rawSql); return executeUpdate(sql); -// } - } - - private boolean isSupportedSQLType(int sqlType) { - switch (sqlType) { - case Types.TIMESTAMP: - case Types.INTEGER: - case Types.BIGINT: - case Types.FLOAT: - case Types.DOUBLE: - case Types.SMALLINT: - case Types.TINYINT: - case Types.BOOLEAN: - case Types.BINARY: - case Types.NCHAR: - return true; - default: - return false; - } } @Override public void setNull(int parameterIndex, int sqlType) throws SQLException { - if (isClosed()) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); - if (!isSupportedSQLType(sqlType) || parameterIndex < 0) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE); -// if (parameterIndex >= parameters.size()) -// throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PARAMETER_INDEX_OUT_BOUNDARY); - - setObject(parameterIndex, "NULL"); + setObject(parameterIndex, null); } @Override public void setBoolean(int parameterIndex, boolean x) throws SQLException { - if (isClosed()) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); - setObject(parameterIndex, x); } @Override public void setByte(int parameterIndex, byte x) throws SQLException { - if (isClosed()) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); - setObject(parameterIndex,x); + setObject(parameterIndex, x); } @Override public void setShort(int parameterIndex, short x) throws SQLException { - if (isClosed()) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); setObject(parameterIndex, x); } @Override public void setInt(int parameterIndex, int x) throws SQLException { - if (isClosed()) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); setObject(parameterIndex, x); } @Override public void setLong(int parameterIndex, long x) throws SQLException { - if (isClosed()) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); setObject(parameterIndex, x); } @Override public void setFloat(int parameterIndex, float x) throws SQLException { - if (isClosed()) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); setObject(parameterIndex, x); } @Override public void setDouble(int parameterIndex, double x) throws SQLException { - if (isClosed()) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); setObject(parameterIndex, x); } @@ -315,17 +216,12 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat @Override public void setString(int parameterIndex, String x) throws SQLException { - if (isClosed()) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); setObject(parameterIndex, x); } @Override public void setBytes(int parameterIndex, byte[] x) throws SQLException { - if (isClosed()) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); - - setObject(parameterIndex,x); + setObject(parameterIndex, x); } @Override @@ -344,8 +240,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat @Override public void setTimestamp(int parameterIndex, Timestamp x) throws SQLException { - if (isClosed()) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); setObject(parameterIndex, x); } @@ -360,7 +254,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat public void setUnicodeStream(int parameterIndex, InputStream x, int length) throws SQLException { if (isClosed()) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @@ -375,8 +268,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat public void clearParameters() throws SQLException { if (isClosed()) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); - -// parameters.clear(); parameters = new Object[parameters.length]; } @@ -384,43 +275,29 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat public void setObject(int parameterIndex, Object x, int targetSqlType) throws SQLException { if (isClosed()) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); - - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + setObject(parameterIndex,x); } @Override public void setObject(int parameterIndex, Object x) throws SQLException { -// if (isSaved) { -// this.savedPreparedStatement.setParam(parameterIndex, x); -// } else { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); if (parameterIndex < 1 && parameterIndex >= parameters.length) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PARAMETER_INDEX_OUT_RANGE); - parameters[parameterIndex - 1] = x; -// parameters.add(x); -// } } @Override public boolean execute() throws SQLException { -// if (isSaved) { -// int result = this.savedPreparedStatement.executeBatchInternal(); -// return result > 0; -// } else { if (!isPrepared) return execute(this.rawSql); final String sql = getNativeSql(this.rawSql); - return execute(sql); -// } } @Override public void addBatch() throws SQLException { -// if (isSaved) { -// this.savedPreparedStatement.addBatch(); -// } else { if (this.batchedArgs == null) { batchedArgs = new ArrayList<>(); } @@ -431,7 +308,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat String sql = this.getConnection().nativeSQL(this.rawSql); addBatch(sql); } -// } } @Override @@ -475,7 +351,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat public ResultSetMetaData getMetaData() throws SQLException { if (isClosed()) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); -// return this.getResultSet().getMetaData(); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSet.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSet.java index a20ddaa836..2576a25f0d 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSet.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSet.java @@ -203,7 +203,11 @@ public class TSDBResultSet extends AbstractResultSet implements ResultSet { this.lastWasNull = this.rowData.wasNull(columnIndex - 1); if (!lastWasNull) { - res = this.rowData.getLong(columnIndex - 1, this.columnMetaDataList.get(columnIndex - 1).getColType()); + Object value = this.rowData.get(columnIndex - 1); + if (value instanceof Timestamp) + res = ((Timestamp) value).getTime(); + else + res = this.rowData.getLong(columnIndex - 1, this.columnMetaDataList.get(columnIndex - 1).getColType()); } return res; } @@ -273,7 +277,6 @@ public class TSDBResultSet extends AbstractResultSet implements ResultSet { checkAvailability(columnIndex, this.columnMetaDataList.size()); Timestamp res = null; - if (this.getBatchFetch()) return this.blockData.getTimestamp(columnIndex - 1); diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetRowData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetRowData.java index 7cf5f0d79a..34470fbc4e 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetRowData.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetRowData.java @@ -17,6 +17,7 @@ package com.taosdata.jdbc; import java.math.BigDecimal; import java.sql.SQLException; import java.sql.Timestamp; +import java.time.Instant; import java.util.ArrayList; import java.util.Collections; @@ -299,7 +300,19 @@ public class TSDBResultSetRowData { } public void setTimestamp(int col, long ts) { - data.set(col, new Timestamp(ts)); + //TODO: this implementation contains logical error + // when precision is us the (long ts) is 16 digital number + // when precision is ms, the (long ts) is 13 digital number + // we need a JNI function like this: + // public void setTimestamp(int col, long epochSecond, long nanoAdjustment) + if (ts < 1_0000_0000_0000_0L) { + data.set(col, new Timestamp(ts)); + } else { + long epochSec = ts / 1000_000l; + long nanoAdjustment = ts % 1000_000l * 1000l; + Timestamp timestamp = Timestamp.from(Instant.ofEpochSecond(epochSec, nanoAdjustment)); + data.set(col, timestamp); + } } public Timestamp getTimestamp(int col) { diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulConnection.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulConnection.java index 1f3ed2d144..b810f9aeb5 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulConnection.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulConnection.java @@ -22,6 +22,7 @@ public class RestfulConnection extends AbstractConnection { private final DatabaseMetaData metadata; public RestfulConnection(String host, String port, Properties props, String database, String url) { + super(props); this.host = host; this.port = Integer.parseInt(port); this.database = database; diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java index 5c2d4c45b0..32e517f564 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java @@ -5,13 +5,14 @@ import com.alibaba.fastjson.JSONObject; import com.google.common.primitives.Ints; import com.google.common.primitives.Longs; import com.google.common.primitives.Shorts; -import com.taosdata.jdbc.AbstractResultSet; -import com.taosdata.jdbc.TSDBConstants; -import com.taosdata.jdbc.TSDBError; -import com.taosdata.jdbc.TSDBErrorNumbers; +import com.taosdata.jdbc.*; +import com.taosdata.jdbc.utils.UtcTimestampUtil; import java.math.BigDecimal; import java.sql.*; +import java.time.Instant; +import java.time.OffsetDateTime; +import java.time.ZoneOffset; import java.util.ArrayList; import java.util.Calendar; @@ -19,6 +20,7 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet { private volatile boolean isClosed; private int pos = -1; + private final String database; private final Statement statement; // data @@ -65,7 +67,7 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet { } } - private Object parseColumnData(JSONArray row, int colIndex, int taosType) { + private Object parseColumnData(JSONArray row, int colIndex, int taosType) throws SQLException { switch (taosType) { case TSDBConstants.TSDB_DATA_TYPE_BOOL: return row.getBoolean(colIndex); @@ -81,8 +83,44 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet { return row.getFloat(colIndex); case TSDBConstants.TSDB_DATA_TYPE_DOUBLE: return row.getDouble(colIndex); - case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP: - return new Timestamp(row.getDate(colIndex).getTime()); + case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP: { + if (row.get(colIndex) == null) + return null; + String timestampFormat = this.statement.getConnection().getClientInfo(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT); + if ("TIMESTAMP".equalsIgnoreCase(timestampFormat)) { + Long value = row.getLong(colIndex); + //TODO: + if (value < 1_0000_0000_0000_0L) + return new Timestamp(value); + long epochSec = value / 1000_000l; + long nanoAdjustment = value % 1000_000l * 1000l; + return Timestamp.from(Instant.ofEpochSecond(epochSec, nanoAdjustment)); + } + if ("UTC".equalsIgnoreCase(timestampFormat)) { + String value = row.getString(colIndex); + long epochSec = Timestamp.valueOf(value.substring(0, 19).replace("T", " ")).getTime() / 1000; + int fractionalSec = Integer.parseInt(value.substring(20, value.length() - 5)); + long nanoAdjustment = 0; + if (value.length() > 28) { + // ms timestamp: yyyy-MM-ddTHH:mm:ss.SSSSSS+0x00 + nanoAdjustment = fractionalSec * 1000l; + } else { + // ms timestamp: yyyy-MM-ddTHH:mm:ss.SSS+0x00 + nanoAdjustment = fractionalSec * 1000_000l; + } + ZoneOffset zoneOffset = ZoneOffset.of(value.substring(value.length() - 5)); + Instant instant = Instant.ofEpochSecond(epochSec, nanoAdjustment).atOffset(zoneOffset).toInstant(); + return Timestamp.from(instant); + } + String value = row.getString(colIndex); + if (value.length() <= 23) // ms timestamp: yyyy-MM-dd HH:mm:ss.SSS + return row.getTimestamp(colIndex); + // us timestamp: yyyy-MM-dd HH:mm:ss.SSSSSS + long epochSec = Timestamp.valueOf(value.substring(0, 19)).getTime() / 1000; + long nanoAdjustment = Integer.parseInt(value.substring(20)) * 1000l; + Timestamp timestamp = Timestamp.from(Instant.ofEpochSecond(epochSec, nanoAdjustment)); + return timestamp; + } case TSDBConstants.TSDB_DATA_TYPE_BINARY: return row.getString(colIndex) == null ? null : row.getString(colIndex).getBytes(); case TSDBConstants.TSDB_DATA_TYPE_NCHAR: @@ -215,6 +253,9 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet { Object value = resultSet.get(pos).get(columnIndex - 1); if (value == null) return 0; + if (value instanceof Timestamp) { + return ((Timestamp) value).getTime(); + } long valueAsLong = 0; try { @@ -307,6 +348,13 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet { return null; if (value instanceof Timestamp) return (Timestamp) value; +// if (value instanceof Long) { +// if (1_0000_0000_0000_0L > (long) value) +// return Timestamp.from(Instant.ofEpochMilli((long) value)); +// long epochSec = (long) value / 1000_000L; +// long nanoAdjustment = (long) ((long) value % 1000_000L * 1000); +// return Timestamp.from(Instant.ofEpochSecond(epochSec, nanoAdjustment)); +// } return Timestamp.valueOf(value.toString()); } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java index 9071c04672..e9cc3a009f 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java @@ -4,6 +4,7 @@ import com.alibaba.fastjson.JSON; import com.alibaba.fastjson.JSONArray; import com.alibaba.fastjson.JSONObject; import com.taosdata.jdbc.AbstractStatement; +import com.taosdata.jdbc.TSDBDriver; import com.taosdata.jdbc.TSDBError; import com.taosdata.jdbc.TSDBErrorNumbers; import com.taosdata.jdbc.utils.HttpClientPoolUtil; @@ -34,14 +35,11 @@ public class RestfulStatement extends AbstractStatement { if (!SqlSyntaxValidator.isValidForExecuteQuery(sql)) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_FOR_EXECUTE_QUERY, "not a valid sql for executeQuery: " + sql); - final String url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql"; if (SqlSyntaxValidator.isDatabaseUnspecifiedQuery(sql)) { - return executeOneQuery(url, sql); + return executeOneQuery(sql); } -// if (this.database != null && !this.database.trim().replaceAll("\\s","").isEmpty()) -// HttpClientPoolUtil.execute(url, "use " + this.database); - return executeOneQuery(url, sql); + return executeOneQuery(sql); } @Override @@ -56,8 +54,6 @@ public class RestfulStatement extends AbstractStatement { return executeOneUpdate(url, sql); } -// if (this.database != null && !this.database.trim().replaceAll("\\s", "").isEmpty()) -// HttpClientPoolUtil.execute(url, "use " + this.database); return executeOneUpdate(url, sql); } @@ -78,14 +74,21 @@ public class RestfulStatement extends AbstractStatement { //如果执行了use操作应该将当前Statement的catalog设置为新的database boolean result = true; - final String url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql"; + String url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql"; + if (conn.getClientInfo(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT).equals("TIMESTAMP")) { + url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sqlt"; + } + if (conn.getClientInfo(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT).equals("UTC")) { + url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sqlutc"; + } + if (SqlSyntaxValidator.isUseSql(sql)) { HttpClientPoolUtil.execute(url, sql); this.database = sql.trim().replace("use", "").trim(); this.conn.setCatalog(this.database); result = false; } else if (SqlSyntaxValidator.isDatabaseUnspecifiedQuery(sql)) { - executeOneQuery(url, sql); + executeOneQuery(sql); } else if (SqlSyntaxValidator.isDatabaseUnspecifiedUpdate(sql)) { executeOneUpdate(url, sql); result = false; @@ -101,11 +104,18 @@ public class RestfulStatement extends AbstractStatement { return result; } - private ResultSet executeOneQuery(String url, String sql) throws SQLException { + private ResultSet executeOneQuery(String sql) throws SQLException { if (!SqlSyntaxValidator.isValidForExecuteQuery(sql)) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_FOR_EXECUTE_QUERY, "not a valid sql for executeQuery: " + sql); // row data + String url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql"; + String timestampFormat = conn.getClientInfo(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT); + if ("TIMESTAMP".equalsIgnoreCase(timestampFormat)) + url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sqlt"; + if ("UTC".equalsIgnoreCase(timestampFormat)) + url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sqlutc"; + String result = HttpClientPoolUtil.execute(url, sql); JSONObject resultJson = JSON.parseObject(result); if (resultJson.getString("status").equals("error")) { diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/UtcTimestampUtil.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/UtcTimestampUtil.java new file mode 100644 index 0000000000..04a11a2beb --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/UtcTimestampUtil.java @@ -0,0 +1,12 @@ +package com.taosdata.jdbc.utils; + +import java.time.format.DateTimeFormatter; +import java.time.format.DateTimeFormatterBuilder; + +public class UtcTimestampUtil { + public static final DateTimeFormatter formatter = new DateTimeFormatterBuilder() + .appendPattern("yyyy-MM-ddTHH:mm:ss.SSS+") +// .appendFraction(ChronoField.NANO_OF_SECOND, 0, 9, true) + .toFormatter(); + +} diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBPreparedStatementTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBPreparedStatementTest.java index 434095efa2..dc6fd4c501 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBPreparedStatementTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBPreparedStatementTest.java @@ -50,6 +50,51 @@ public class TSDBPreparedStatementTest { pstmt_insert.setNull(2, Types.INTEGER); int result = pstmt_insert.executeUpdate(); Assert.assertEquals(1, result); + + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setNull(3, Types.BIGINT); + result = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, result); + + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setNull(4, Types.FLOAT); + result = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, result); + + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setNull(5, Types.DOUBLE); + result = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, result); + + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setNull(6, Types.SMALLINT); + result = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, result); + + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setNull(7, Types.TINYINT); + result = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, result); + + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setNull(8, Types.BOOLEAN); + result = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, result); + + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setNull(9, Types.BINARY); + result = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, result); + + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setNull(10, Types.NCHAR); + result = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, result); + + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setNull(10, Types.OTHER); + result = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, result); } @Test @@ -129,7 +174,7 @@ public class TSDBPreparedStatementTest { Assert.assertFalse(pstmt_insert.execute()); } - class Person implements Serializable { + class Person { String name; int age; boolean sex; diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBResultSetTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBResultSetTest.java index 374b1335fc..c5c6f7bca5 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBResultSetTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBResultSetTest.java @@ -160,6 +160,7 @@ public class TSDBResultSetTest { @Test public void getTime() throws SQLException { Time f1 = rs.getTime("f1"); + Assert.assertNotNull(f1); Assert.assertEquals("00:00:00", f1.toString()); } diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/DriverAutoloadTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/DriverAutoloadTest.java index 9826e6ed76..6c8aed1b06 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/DriverAutoloadTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/DriverAutoloadTest.java @@ -20,6 +20,7 @@ public class DriverAutoloadTest { final String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata"; Connection conn = DriverManager.getConnection(url, properties); Assert.assertNotNull(conn); + conn.close(); } @Test @@ -27,6 +28,7 @@ public class DriverAutoloadTest { final String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata"; Connection conn = DriverManager.getConnection(url, properties); Assert.assertNotNull(conn); + conn.close(); } diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/NullValueInResultSetForJdbcJniTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/NullValueInResultSetForJdbcJniTest.java new file mode 100644 index 0000000000..782125144c --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/NullValueInResultSetForJdbcJniTest.java @@ -0,0 +1,64 @@ +package com.taosdata.jdbc.cases; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import java.sql.*; + +public class NullValueInResultSetForJdbcJniTest { + + private static final String host = "127.0.0.1"; + Connection conn; + + @Test + public void test() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select * from weather"); + ResultSetMetaData meta = rs.getMetaData(); + while (rs.next()) { + for (int i = 1; i <= meta.getColumnCount(); i++) { + Object value = rs.getObject(i); + System.out.print(meta.getColumnLabel(i) + ": " + value + "\t"); + } + System.out.println(); + } + + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Before + public void before() throws SQLException { + final String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata"; + conn = DriverManager.getConnection(url); + try (Statement stmt = conn.createStatement()) { + stmt.execute("drop database if exists test_null"); + stmt.execute("create database if not exists test_null"); + stmt.execute("use test_null"); + stmt.execute("create table weather(ts timestamp, f1 int, f2 bigint, f3 float, f4 double, f5 smallint, f6 tinyint, f7 bool, f8 binary(64), f9 nchar(64))"); + stmt.executeUpdate("insert into weather(ts, f1) values(now+1s, 1)"); + stmt.executeUpdate("insert into weather(ts, f2) values(now+2s, 2)"); + stmt.executeUpdate("insert into weather(ts, f3) values(now+3s, 3.0)"); + stmt.executeUpdate("insert into weather(ts, f4) values(now+4s, 4.0)"); + stmt.executeUpdate("insert into weather(ts, f5) values(now+5s, 5)"); + stmt.executeUpdate("insert into weather(ts, f6) values(now+6s, 6)"); + stmt.executeUpdate("insert into weather(ts, f7) values(now+7s, true)"); + stmt.executeUpdate("insert into weather(ts, f8) values(now+8s, 'hello')"); + stmt.executeUpdate("insert into weather(ts, f9) values(now+9s, '涛思数据')"); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @After + public void after() { + try { + if (conn != null) + conn.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } +} diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TwoTypeTimestampPercisionInJniTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TwoTypeTimestampPercisionInJniTest.java new file mode 100644 index 0000000000..f9b111bb12 --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TwoTypeTimestampPercisionInJniTest.java @@ -0,0 +1,89 @@ +package com.taosdata.jdbc.cases; + + +import com.taosdata.jdbc.TSDBDriver; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.sql.*; +import java.util.Properties; + +public class TwoTypeTimestampPercisionInJniTest { + + private static final String host = "127.0.0.1"; + private static final String ms_timestamp_db = "ms_precision_test"; + private static final String us_timestamp_db = "us_precision_test"; + private static final long timestamp1 = System.currentTimeMillis(); + private static final long timestamp2 = timestamp1 * 1000 + 123; + + private static Connection conn; + + @Test + public void testCase1() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select last_row(ts) from " + ms_timestamp_db + ".weather"); + rs.next(); + long ts = rs.getTimestamp(1).getTime(); + Assert.assertEquals(timestamp1, ts); + ts = rs.getLong(1); + Assert.assertEquals(timestamp1, ts); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void testCase2() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select last_row(ts) from " + us_timestamp_db + ".weather"); + rs.next(); + + Timestamp timestamp = rs.getTimestamp(1); + System.out.println(timestamp); + long ts = timestamp.getTime(); + Assert.assertEquals(timestamp1, ts); + int nanos = timestamp.getNanos(); + Assert.assertEquals(timestamp2 % 1000_000l * 1000, nanos); + + ts = rs.getLong(1); + Assert.assertEquals(timestamp1, ts); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @BeforeClass + public static void beforeClass() throws SQLException { + Properties properties = new Properties(); + properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); + + String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata"; + conn = DriverManager.getConnection(url, properties); + + Statement stmt = conn.createStatement(); + stmt.execute("drop database if exists " + ms_timestamp_db); + stmt.execute("create database if not exists " + ms_timestamp_db + " precision 'ms'"); + stmt.execute("create table " + ms_timestamp_db + ".weather(ts timestamp, f1 int)"); + stmt.executeUpdate("insert into " + ms_timestamp_db + ".weather(ts,f1) values(" + timestamp1 + ", 127)"); + + stmt.execute("drop database if exists " + us_timestamp_db); + stmt.execute("create database if not exists " + us_timestamp_db + " precision 'us'"); + stmt.execute("create table " + us_timestamp_db + ".weather(ts timestamp, f1 int)"); + stmt.executeUpdate("insert into " + us_timestamp_db + ".weather(ts,f1) values(" + timestamp2 + ", 127)"); + stmt.close(); + } + + @AfterClass + public static void afterClass() { + try { + if (conn != null) + conn.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } +} diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TwoTypeTimestampPercisionInRestfulTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TwoTypeTimestampPercisionInRestfulTest.java new file mode 100644 index 0000000000..ed4f979ef3 --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TwoTypeTimestampPercisionInRestfulTest.java @@ -0,0 +1,168 @@ +package com.taosdata.jdbc.cases; + + +import com.taosdata.jdbc.TSDBDriver; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.sql.*; +import java.util.Properties; + +public class TwoTypeTimestampPercisionInRestfulTest { + + private static final String host = "127.0.0.1"; + private static final String ms_timestamp_db = "ms_precision_test"; + private static final String us_timestamp_db = "us_precision_test"; + private static final long timestamp1 = System.currentTimeMillis(); + private static final long timestamp2 = timestamp1 * 1000 + 123; + + private static Connection conn1; + private static Connection conn2; + private static Connection conn3; + + @Test + public void testCase1() { + try (Statement stmt = conn1.createStatement()) { + ResultSet rs = stmt.executeQuery("select last_row(ts) from " + ms_timestamp_db + ".weather"); + rs.next(); + long ts = rs.getTimestamp(1).getTime(); + Assert.assertEquals(timestamp1, ts); + ts = rs.getLong(1); + Assert.assertEquals(timestamp1, ts); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void testCase2() { + try (Statement stmt = conn1.createStatement()) { + ResultSet rs = stmt.executeQuery("select last_row(ts) from " + us_timestamp_db + ".weather"); + rs.next(); + + Timestamp timestamp = rs.getTimestamp(1); + long ts = timestamp.getTime(); + Assert.assertEquals(timestamp1, ts); + int nanos = timestamp.getNanos(); + Assert.assertEquals(timestamp2 % 1000_000l * 1000, nanos); + + ts = rs.getLong(1); + Assert.assertEquals(timestamp1, ts); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void testCase3() { + try (Statement stmt = conn2.createStatement()) { + ResultSet rs = stmt.executeQuery("select last_row(ts) from " + ms_timestamp_db + ".weather"); + rs.next(); + Timestamp rsTimestamp = rs.getTimestamp(1); + long ts = rsTimestamp.getTime(); + Assert.assertEquals(timestamp1, ts); + ts = rs.getLong(1); + Assert.assertEquals(timestamp1, ts); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void testCase4() { + try (Statement stmt = conn2.createStatement()) { + ResultSet rs = stmt.executeQuery("select last_row(ts) from " + us_timestamp_db + ".weather"); + rs.next(); + + Timestamp timestamp = rs.getTimestamp(1); + long ts = timestamp.getTime(); + Assert.assertEquals(timestamp1, ts); + int nanos = timestamp.getNanos(); + Assert.assertEquals(timestamp2 % 1000_000l * 1000, nanos); + + ts = rs.getLong(1); + Assert.assertEquals(timestamp1, ts); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void testCase5() { + try (Statement stmt = conn3.createStatement()) { + ResultSet rs = stmt.executeQuery("select last_row(ts) from " + ms_timestamp_db + ".weather"); + rs.next(); + long ts = rs.getTimestamp(1).getTime(); + Assert.assertEquals(timestamp1, ts); + ts = rs.getLong(1); + Assert.assertEquals(timestamp1, ts); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void testCase6() { + try (Statement stmt = conn3.createStatement()) { + ResultSet rs = stmt.executeQuery("select last_row(ts) from " + us_timestamp_db + ".weather"); + rs.next(); + + Timestamp timestamp = rs.getTimestamp(1); + long ts = timestamp.getTime(); + Assert.assertEquals(timestamp1, ts); + int nanos = timestamp.getNanos(); + Assert.assertEquals(timestamp2 % 1000_000l * 1000, nanos); + + ts = rs.getLong(1); + Assert.assertEquals(timestamp1, ts); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @BeforeClass + public static void beforeClass() throws SQLException { + Properties properties = new Properties(); + properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); +// properties.setProperty(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT, "TIMESTAMP"); + + String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata"; + conn1 = DriverManager.getConnection(url, properties); + + url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata×tampFormat=timestamp"; + conn2 = DriverManager.getConnection(url, properties); + + url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata×tampFormat=utc"; + conn3 = DriverManager.getConnection(url, properties); + + Statement stmt = conn1.createStatement(); + stmt.execute("drop database if exists " + ms_timestamp_db); + stmt.execute("create database if not exists " + ms_timestamp_db + " precision 'ms'"); + stmt.execute("create table " + ms_timestamp_db + ".weather(ts timestamp, f1 int)"); + stmt.executeUpdate("insert into " + ms_timestamp_db + ".weather(ts,f1) values(" + timestamp1 + ", 127)"); + + stmt.execute("drop database if exists " + us_timestamp_db); + stmt.execute("create database if not exists " + us_timestamp_db + " precision 'us'"); + stmt.execute("create table " + us_timestamp_db + ".weather(ts timestamp, f1 int)"); + stmt.executeUpdate("insert into " + us_timestamp_db + ".weather(ts,f1) values(" + timestamp2 + ", 127)"); + stmt.close(); + } + + @AfterClass + public static void afterClass() { + try { + if (conn1 != null) + conn1.close(); + if (conn2 != null) + conn2.close(); + if (conn3 != null) + conn3.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } +} diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulPreparedStatementTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulPreparedStatementTest.java index 094dff8c8d..40956a601f 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulPreparedStatementTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulPreparedStatementTest.java @@ -50,6 +50,51 @@ public class RestfulPreparedStatementTest { pstmt_insert.setNull(2, Types.INTEGER); int result = pstmt_insert.executeUpdate(); Assert.assertEquals(1, result); + + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setNull(3, Types.BIGINT); + result = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, result); + + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setNull(4, Types.FLOAT); + result = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, result); + + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setNull(5, Types.DOUBLE); + result = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, result); + + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setNull(6, Types.SMALLINT); + result = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, result); + + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setNull(7, Types.TINYINT); + result = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, result); + + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setNull(8, Types.BOOLEAN); + result = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, result); + + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setNull(9, Types.BINARY); + result = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, result); + + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setNull(10, Types.NCHAR); + result = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, result); + + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setNull(10, Types.OTHER); + result = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, result); } @Test @@ -129,7 +174,7 @@ public class RestfulPreparedStatementTest { Assert.assertFalse(pstmt_insert.execute()); } - class Person implements Serializable { + private class Person { String name; int age; boolean sex; diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulResultSetTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulResultSetTest.java index d6b2a58127..9bfe9a04ff 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulResultSetTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulResultSetTest.java @@ -160,6 +160,7 @@ public class RestfulResultSetTest { @Test public void getTime() throws SQLException { Time f1 = rs.getTime("f1"); + Assert.assertNotNull(f1); Assert.assertEquals("00:00:00", f1.toString()); } diff --git a/src/cq/src/cqMain.c b/src/cq/src/cqMain.c index f620eb4dd5..d4d202267c 100644 --- a/src/cq/src/cqMain.c +++ b/src/cq/src/cqMain.c @@ -73,6 +73,7 @@ static void cqProcessStreamRes(void *param, TAOS_RES *tres, TAOS_ROW row); static void cqCreateStream(SCqContext *pContext, SCqObj *pObj); int32_t cqObjRef = -1; +int32_t cqVnodeNum = 0; void cqRmFromList(SCqObj *pObj) { //LOCK in caller @@ -166,6 +167,8 @@ void *cqOpen(void *ahandle, const SCqCfg *pCfg) { return NULL; } + atomic_add_fetch_32(&cqVnodeNum, 1); + cqCreateRef(); pContext->tmrCtrl = taosTmrInit(0, 0, 0, "CQ"); @@ -240,6 +243,13 @@ void cqClose(void *handle) { if (hasCq == 0) { freeSCqContext(pContext); } + + int32_t remainn = atomic_sub_fetch_32(&cqVnodeNum, 1); + if (remainn <= 0) { + int32_t ref = cqObjRef; + cqObjRef = -1; + taosCloseRef(ref); + } } void cqStart(void *handle) { diff --git a/src/dnode/src/dnodeCfg.c b/src/dnode/src/dnodeCfg.c index fd5956b37f..c573d709f5 100644 --- a/src/dnode/src/dnodeCfg.c +++ b/src/dnode/src/dnodeCfg.c @@ -17,6 +17,7 @@ #include "os.h" #include "cJSON.h" #include "dnodeCfg.h" +#include "tglobal.h" static SDnodeCfg tsCfg = {0}; static pthread_mutex_t tsCfgMutex; @@ -70,6 +71,7 @@ static void dnodeResetCfg(SDnodeCfg *cfg) { pthread_mutex_lock(&tsCfgMutex); tsCfg.dnodeId = cfg->dnodeId; + tsDnodeId = cfg->dnodeId; tstrncpy(tsCfg.clusterId, cfg->clusterId, TSDB_CLUSTER_ID_LEN); dnodePrintCfg(cfg); dnodeWriteCfg(); diff --git a/src/inc/query.h b/src/inc/query.h index c9dabcef54..38078cf21f 100644 --- a/src/inc/query.h +++ b/src/inc/query.h @@ -88,10 +88,11 @@ void* qOpenQueryMgmt(int32_t vgId); void qQueryMgmtNotifyClosed(void* pExecutor); void qQueryMgmtReOpen(void *pExecutor); void qCleanupQueryMgmt(void* pExecutor); -void** qRegisterQInfo(void* pMgmt, uint64_t qId, uint64_t qInfo); +void** qRegisterQInfo(void* pMgmt, uint64_t qId, void *qInfo); void** qAcquireQInfo(void* pMgmt, uint64_t key); void** qReleaseQInfo(void* pMgmt, void* pQInfo, bool freeHandle); bool checkQIdEqual(void *qHandle, uint64_t qId); +int64_t genQueryId(void); #ifdef __cplusplus } diff --git a/src/inc/taoserror.h b/src/inc/taoserror.h index 3be97b10f4..2c7d7e7b86 100644 --- a/src/inc/taoserror.h +++ b/src/inc/taoserror.h @@ -164,6 +164,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_MND_INVALID_TABLE_NAME TAOS_DEF_ERROR_CODE(0, 0x0362) //"Table does not exist") #define TSDB_CODE_MND_INVALID_TABLE_TYPE TAOS_DEF_ERROR_CODE(0, 0x0363) //"Invalid table type in tsdb") #define TSDB_CODE_MND_TOO_MANY_TAGS TAOS_DEF_ERROR_CODE(0, 0x0364) //"Too many tags") +#define TSDB_CODE_MND_TOO_MANY_COLUMNS TAOS_DEF_ERROR_CODE(0, 0x0365) //"Too many columns") #define TSDB_CODE_MND_TOO_MANY_TIMESERIES TAOS_DEF_ERROR_CODE(0, 0x0366) //"Too many time series") #define TSDB_CODE_MND_NOT_SUPER_TABLE TAOS_DEF_ERROR_CODE(0, 0x0367) //"Not super table") // operation only available for super table #define TSDB_CODE_MND_COL_NAME_TOO_LONG TAOS_DEF_ERROR_CODE(0, 0x0368) //"Tag name too long") diff --git a/src/inc/taosmsg.h b/src/inc/taosmsg.h index 7dca0f2dc1..886de097ce 100644 --- a/src/inc/taosmsg.h +++ b/src/inc/taosmsg.h @@ -402,36 +402,6 @@ typedef struct SColIndex { char name[TSDB_COL_NAME_LEN]; // TODO remove it } SColIndex; -/* sql function msg, to describe the message to vnode about sql function - * operations in select clause */ -typedef struct SSqlFuncMsg { - int16_t functionId; - int16_t numOfParams; - - int16_t resColId; // result column id, id of the current output column - int16_t colType; - int16_t colBytes; - - SColIndex colInfo; - struct ArgElem { - int16_t argType; - int16_t argBytes; - union { - double d; - int64_t i64; - char * pz; - } argValue; - } arg[3]; -} SSqlFuncMsg; - -typedef struct SExprInfo { - SSqlFuncMsg base; - struct tExprNode* pExpr; - int16_t bytes; - int16_t type; - int32_t interBytes; - int64_t uid; -} SExprInfo; typedef struct SColumnFilterInfo { int16_t lowerRelOptr; @@ -454,6 +424,42 @@ typedef struct SColumnFilterInfo { }; } SColumnFilterInfo; +/* sql function msg, to describe the message to vnode about sql function + * operations in select clause */ +typedef struct SSqlFuncMsg { + int16_t functionId; + int16_t numOfParams; + + int16_t resColId; // result column id, id of the current output column + int16_t colType; + int16_t colBytes; + + SColIndex colInfo; + struct ArgElem { + int16_t argType; + int16_t argBytes; + union { + double d; + int64_t i64; + char * pz; + } argValue; + } arg[3]; + + int32_t filterNum; + SColumnFilterInfo filterInfo[]; +} SSqlFuncMsg; + + +typedef struct SExprInfo { + SColumnFilterInfo * pFilter; + struct tExprNode* pExpr; + int16_t bytes; + int16_t type; + int32_t interBytes; + int64_t uid; + SSqlFuncMsg base; +} SExprInfo; + /* * for client side struct, we only need the column id, type, bytes are not necessary * But for data in vnode side, we need all the following information. diff --git a/src/inc/ttokendef.h b/src/inc/ttokendef.h index 20f81acc31..2228ff2ed2 100644 --- a/src/inc/ttokendef.h +++ b/src/inc/ttokendef.h @@ -221,6 +221,12 @@ + + + + + + #define TK_SPACE 300 #define TK_COMMENT 301 diff --git a/src/kit/shell/inc/shell.h b/src/kit/shell/inc/shell.h index d0b7149541..2374150c52 100644 --- a/src/kit/shell/inc/shell.h +++ b/src/kit/shell/inc/shell.h @@ -27,7 +27,7 @@ #define MAX_IP_SIZE 20 #define MAX_PASSWORD_SIZE 20 #define MAX_HISTORY_SIZE 1000 -#define MAX_COMMAND_SIZE 65536 +#define MAX_COMMAND_SIZE 1048586 #define HISTORY_FILE ".taos_history" #define DEFAULT_RES_SHOW_NUM 100 diff --git a/src/kit/shell/src/shellCommand.c b/src/kit/shell/src/shellCommand.c index 16545a5fe8..9173ab0efd 100644 --- a/src/kit/shell/src/shellCommand.c +++ b/src/kit/shell/src/shellCommand.c @@ -238,7 +238,7 @@ void resetCommand(Command *cmd, const char s[]) { clearScreen(cmd->endOffset + prompt_size, cmd->screenOffset + prompt_size); memset(cmd->buffer, 0, MAX_COMMAND_SIZE); memset(cmd->command, 0, MAX_COMMAND_SIZE); - strcpy(cmd->command, s); + strncpy(cmd->command, s, MAX_COMMAND_SIZE); int size = 0; int width = 0; getMbSizeInfo(s, &size, &width); diff --git a/src/kit/shell/src/shellLinux.c b/src/kit/shell/src/shellLinux.c index 3f6b3da9bf..37050c416c 100644 --- a/src/kit/shell/src/shellLinux.c +++ b/src/kit/shell/src/shellLinux.c @@ -37,7 +37,7 @@ static struct argp_option options[] = { {"password", 'p', "PASSWORD", OPTION_ARG_OPTIONAL, "The password to use when connecting to the server."}, {"port", 'P', "PORT", 0, "The TCP/IP port number to use for the connection."}, {"user", 'u', "USER", 0, "The user name to use when connecting to the server."}, - {"user", 'A', "Auth", 0, "The user auth to use when connecting to the server."}, + {"auth", 'A', "Auth", 0, "The auth string to use when connecting to the server."}, {"config-dir", 'c', "CONFIG_DIR", 0, "Configuration directory."}, {"dump-config", 'C', 0, 0, "Dump configuration."}, {"commands", 's', "COMMANDS", 0, "Commands to run without enter the shell."}, diff --git a/src/kit/taosdemo/CMakeLists.txt b/src/kit/taosdemo/CMakeLists.txt index b894fc762e..d176c5f4f9 100644 --- a/src/kit/taosdemo/CMakeLists.txt +++ b/src/kit/taosdemo/CMakeLists.txt @@ -9,19 +9,18 @@ IF (GIT_FOUND) EXECUTE_PROCESS( COMMAND ${GIT_EXECUTABLE} log --pretty=oneline -n 1 ${CMAKE_CURRENT_LIST_DIR}/taosdemo.c RESULT_VARIABLE RESULT - OUTPUT_VARIABLE TAOSDEMO_COMMIT) - EXECUTE_PROCESS( - COMMAND bash "-c" "echo '${TAOSDEMO_COMMIT}' | awk '{print $1}' | cut -c -9" - RESULT_VARIABLE RESULT OUTPUT_VARIABLE TAOSDEMO_COMMIT_SHA1) + STRING(SUBSTRING "${TAOSDEMO_COMMIT_SHA1}" 0 7 TAOSDEMO_COMMIT_SHA1) EXECUTE_PROCESS( COMMAND ${GIT_EXECUTABLE} status -z -s ${CMAKE_CURRENT_LIST_DIR}/taosdemo.c RESULT_VARIABLE RESULT OUTPUT_VARIABLE TAOSDEMO_STATUS) - EXECUTE_PROCESS( + IF (TD_LINUX) + EXECUTE_PROCESS( COMMAND bash "-c" "echo '${TAOSDEMO_STATUS}' | awk '{print $1}'" RESULT_VARIABLE RESULT OUTPUT_VARIABLE TAOSDEMO_STATUS) + ENDIF (TD_LINUX) MESSAGE("taosdemo.c status: " ${TAOSDEMO_STATUS}) ELSE() MESSAGE("Git not found") @@ -29,9 +28,9 @@ ELSE() SET(TAOSDEMO_STATUS "unknown") ENDIF (GIT_FOUND) -STRING(STRIP ${TAOSDEMO_COMMIT_SHA1} TAOSDEMO_COMMIT_SHA1) +STRING(STRIP "${TAOSDEMO_COMMIT_SHA1}" TAOSDEMO_COMMIT_SHA1) MESSAGE("taosdemo's latest commit in short is:" ${TAOSDEMO_COMMIT_SHA1}) -STRING(STRIP ${TAOSDEMO_STATUS} TAOSDEMO_STATUS) +STRING(STRIP "${TAOSDEMO_STATUS}" TAOSDEMO_STATUS) IF (TAOSDEMO_STATUS MATCHES "M") SET(TAOSDEMO_STATUS "modified") diff --git a/src/kit/taosdemo/insert-interlace.json b/src/kit/taosdemo/insert-interlace.json index 344db4fd00..cf3e1de2f4 100644 --- a/src/kit/taosdemo/insert-interlace.json +++ b/src/kit/taosdemo/insert-interlace.json @@ -40,7 +40,6 @@ "data_source": "rand", "insert_mode": "taosc", "insert_rows": 1000, - "multi_thread_write_one_tbl": "no", "interlace_rows": 20, "max_sql_len": 1024000, "disorder_ratio": 0, diff --git a/src/kit/taosdemo/insert.json b/src/kit/taosdemo/insert.json index f0e3ab1d50..5f152a2d0c 100644 --- a/src/kit/taosdemo/insert.json +++ b/src/kit/taosdemo/insert.json @@ -40,7 +40,6 @@ "data_source": "rand", "insert_mode": "taosc", "insert_rows": 100000, - "multi_thread_write_one_tbl": "no", "interlace_rows": 0, "max_sql_len": 1024000, "disorder_ratio": 0, diff --git a/src/kit/taosdemo/query.json b/src/kit/taosdemo/query.json index 33ac120bda..d84f997c32 100644 --- a/src/kit/taosdemo/query.json +++ b/src/kit/taosdemo/query.json @@ -1,5 +1,5 @@ { - "filetype":"query", + "filetype": "query", "cfgdir": "/etc/taos", "host": "127.0.0.1", "port": 6030, @@ -7,13 +7,30 @@ "password": "taosdata", "confirm_parameter_prompt": "yes", "databases": "dbx", - "specified_table_query": - {"query_interval":1, "concurrent":4, - "sqls": [{"sql": "select last_row(*) from stb where color='red'", "result": "./query_res0.txt"}, - {"sql": "select count(*) from stb_01", "result": "./query_res1.txt"}] - }, - "super_table_query": - {"stblname": "stb", "query_interval":1, "threads":4, - "sqls": [{"sql": "select last_row(*) from xxxx", "result": "./query_res2.txt"}] - } + "query_times": 1, + "specified_table_query": { + "query_interval": 1, + "concurrent": 4, + "sqls": [ + { + "sql": "select last_row(*) from stb where color='red'", + "result": "./query_res0.txt" + }, + { + "sql": "select count(*) from stb_01", + "result": "./query_res1.txt" + } + ] + }, + "super_table_query": { + "stblname": "stb", + "query_interval": 1, + "threads": 4, + "sqls": [ + { + "sql": "select last_row(*) from xxxx", + "result": "./query_res2.txt" + } + ] + } } diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 87a08dee49..01a7e31940 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -18,6 +18,7 @@ when in some thread query return error, thread don't exit, but return, otherwise coredump in other thread. */ +#include #define _GNU_SOURCE #define CURL_STATICLIB @@ -67,6 +68,12 @@ enum TEST_MODE { INVAID_TEST }; +enum QUERY_MODE { + SYNC_QUERY_MODE, // 0 + ASYNC_QUERY_MODE, // 1 + INVALID_MODE +}; + #define MAX_SQL_SIZE 65536 #define BUFFER_SIZE (65536*2) #define MAX_USERNAME_SIZE 64 @@ -198,7 +205,7 @@ typedef struct SArguments_S { bool verbose_print; bool performance_print; char * output_file; - int mode; + int query_mode; char * datatype[MAX_NUM_DATATYPE + 1]; int len_of_binary; int num_of_CPR; @@ -351,7 +358,7 @@ typedef struct SpecifiedQueryInfo_S { int rate; // 0: unlimit > 0 loop/s int concurrent; int sqlCount; - int subscribeMode; // 0: sync, 1: async + int mode; // 0: sync, 1: async int subscribeInterval; // ms int queryTimes; int subscribeRestart; @@ -359,13 +366,14 @@ typedef struct SpecifiedQueryInfo_S { char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1]; char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN+1]; TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT]; + int totalQueried; } SpecifiedQueryInfo; typedef struct SuperQueryInfo_S { char sTblName[MAX_TB_NAME_SIZE+1]; int rate; // 0: unlimit > 0 loop/s int threadCnt; - int subscribeMode; // 0: sync, 1: async + int mode; // 0: sync, 1: async int subscribeInterval; // ms int subscribeRestart; int subscribeKeepProgress; @@ -378,6 +386,7 @@ typedef struct SuperQueryInfo_S { TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT]; char* childTblName; + int totalQueried; } SuperQueryInfo; typedef struct SQueryMetaInfo_S { @@ -391,6 +400,7 @@ typedef struct SQueryMetaInfo_S { SpecifiedQueryInfo specifiedQueryInfo; SuperQueryInfo superQueryInfo; + int totalQueried; } SQueryMetaInfo; typedef struct SThreadInfo_S { @@ -429,6 +439,8 @@ typedef struct SThreadInfo_S { int64_t maxDelay; int64_t minDelay; + // query + int querySeq; // sequence number of sql command } threadInfo; #ifdef WINDOWS @@ -499,11 +511,6 @@ static void resetAfterAnsiEscape(void) { static int taosRandom() { - struct timeval tv; - - gettimeofday(&tv, NULL); - srand(tv.tv_usec); - return rand(); } @@ -699,7 +706,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { if (strcmp(argv[i], "-f") == 0) { arguments->metaFile = argv[++i]; } else if (strcmp(argv[i], "-c") == 0) { - tstrncpy(configDir, argv[++i], MAX_FILE_NAME_LEN); + tstrncpy(configDir, argv[++i], TSDB_FILENAME_LEN); } else if (strcmp(argv[i], "-h") == 0) { arguments->host = argv[++i]; @@ -714,7 +721,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } else if (strcmp(argv[i], "-s") == 0) { arguments->sqlFile = argv[++i]; } else if (strcmp(argv[i], "-q") == 0) { - arguments->mode = atoi(argv[++i]); + arguments->query_mode = atoi(argv[++i]); } else if (strcmp(argv[i], "-T") == 0) { arguments->num_of_threads = atoi(argv[++i]); } else if (strcmp(argv[i], "-i") == 0) { @@ -758,7 +765,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { char *dupstr = strdup(argv[i]); char *running = dupstr; char *token = strsep(&running, ","); - while (token != NULL) { + while(token != NULL) { if (strcasecmp(token, "INT") && strcasecmp(token, "FLOAT") && strcasecmp(token, "TINYINT") @@ -769,6 +776,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { && strcasecmp(token, "BINARY") && strcasecmp(token, "NCHAR")) { printHelp(); + free(dupstr); ERROR_EXIT("Invalid data_type!\n"); exit(EXIT_FAILURE); } @@ -776,6 +784,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { token = strsep(&running, ","); if (index >= MAX_NUM_DATATYPE) break; } + free(dupstr); sptr[index] = NULL; } } else if (strcmp(argv[i], "-w") == 0) { @@ -962,7 +971,7 @@ static void getResult(TAOS_RES *res, char* resultFileName) { char temp[16000]; // fetch the records row by row - while ((row = taos_fetch_row(res))) { + while((row = taos_fetch_row(res))) { if (totalLen >= 100*1024*1024 - 32000) { if (fp) fprintf(fp, "%s", databuf); totalLen = 0; @@ -984,7 +993,8 @@ static void getResult(TAOS_RES *res, char* resultFileName) { static void selectAndGetResult(TAOS *taos, char *command, char* resultFileName) { TAOS_RES *res = taos_query(taos, command); if (res == NULL || taos_errno(res) != 0) { - printf("failed to sql:%s, reason:%s\n", command, taos_errstr(res)); + errorPrint("%s() LN%d, failed to execute sql:%s, reason:%s\n", + __func__, __LINE__, command, taos_errstr(res)); taos_free_result(res); return; } @@ -1053,7 +1063,7 @@ static void rand_string(char *str, int size) { //--size; int n; for (n = 0; n < size - 1; n++) { - int key = rand_tinyint() % (int)(sizeof(charset) - 1); + int key = abs(rand_tinyint()) % (int)(sizeof(charset) - 1); str[n] = charset[key]; } str[n] = 0; @@ -1161,7 +1171,8 @@ static int printfInsertMeta() { if (g_Dbs.db[i].dbCfg.precision[0] != 0) { if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", 2)) || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "us", 2))) { - printf(" precision: \033[33m%s\033[0m\n", g_Dbs.db[i].dbCfg.precision); + printf(" precision: \033[33m%s\033[0m\n", + g_Dbs.db[i].dbCfg.precision); } else { printf("\033[1m\033[40;31m precision error: %s\033[0m\n", g_Dbs.db[i].dbCfg.precision); @@ -1169,11 +1180,13 @@ static int printfInsertMeta() { } } - printf(" super table count: \033[33m%d\033[0m\n", g_Dbs.db[i].superTblCount); + printf(" super table count: \033[33m%d\033[0m\n", + g_Dbs.db[i].superTblCount); for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { printf(" super table[\033[33m%d\033[0m]:\n", j); - printf(" stbName: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].sTblName); + printf(" stbName: \033[33m%s\033[0m\n", + g_Dbs.db[i].superTbls[j].sTblName); if (PRE_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) { printf(" autoCreateTable: \033[33m%s\033[0m\n", "no"); @@ -1239,7 +1252,7 @@ static int printfInsertMeta() { g_Dbs.db[i].superTbls[j].sampleFile); printf(" tagsFile: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].tagsFile); - printf(" columnCount: \033[33m%d\033[0m\n ", + printf(" columnCount: \033[33m%d\033[0m\n", g_Dbs.db[i].superTbls[j].columnCount); for (int k = 0; k < g_Dbs.db[i].superTbls[j].columnCount; k++) { //printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].columns[k].dataType, g_Dbs.db[i].superTbls[j].columns[k].dataLen); @@ -1457,41 +1470,61 @@ static void printfQueryMeta() { printf("\n"); printf("specified table query info: \n"); - printf("query interval: \033[33m%d\033[0m\n", g_queryInfo.specifiedQueryInfo.rate); + printf("query interval: \033[33m%d\033[0m\n", + g_queryInfo.specifiedQueryInfo.rate); printf("top query times:\033[33m%d\033[0m\n", g_args.query_times); - printf("concurrent: \033[33m%d\033[0m\n", g_queryInfo.specifiedQueryInfo.concurrent); - printf("sqlCount: \033[33m%d\033[0m\n", g_queryInfo.specifiedQueryInfo.sqlCount); + printf("concurrent: \033[33m%d\033[0m\n", + g_queryInfo.specifiedQueryInfo.concurrent); + printf("sqlCount: \033[33m%d\033[0m\n", + g_queryInfo.specifiedQueryInfo.sqlCount); printf("specified tbl query times:\n"); - printf(" \033[33m%d\033[0m\n", g_queryInfo.specifiedQueryInfo.queryTimes); + printf(" \033[33m%d\033[0m\n", + g_queryInfo.specifiedQueryInfo.queryTimes); if (SUBSCRIBE_TEST == g_args.test_mode) { - printf("mod: \033[33m%d\033[0m\n", g_queryInfo.specifiedQueryInfo.subscribeMode); - printf("interval: \033[33m%d\033[0m\n", g_queryInfo.specifiedQueryInfo.subscribeInterval); - printf("restart: \033[33m%d\033[0m\n", g_queryInfo.specifiedQueryInfo.subscribeRestart); - printf("keepProgress: \033[33m%d\033[0m\n", g_queryInfo.specifiedQueryInfo.subscribeKeepProgress); + printf("mod: \033[33m%d\033[0m\n", + g_queryInfo.specifiedQueryInfo.mode); + printf("interval: \033[33m%d\033[0m\n", + g_queryInfo.specifiedQueryInfo.subscribeInterval); + printf("restart: \033[33m%d\033[0m\n", + g_queryInfo.specifiedQueryInfo.subscribeRestart); + printf("keepProgress: \033[33m%d\033[0m\n", + g_queryInfo.specifiedQueryInfo.subscribeKeepProgress); } for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) { - printf(" sql[%d]: \033[33m%s\033[0m\n", i, g_queryInfo.specifiedQueryInfo.sql[i]); + printf(" sql[%d]: \033[33m%s\033[0m\n", + i, g_queryInfo.specifiedQueryInfo.sql[i]); } printf("\n"); - printf("super table query info: \n"); - printf("query interval: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.rate); - printf("threadCnt: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.threadCnt); - printf("childTblCount: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.childTblCount); - printf("stable name: \033[33m%s\033[0m\n", g_queryInfo.superQueryInfo.sTblName); - printf("stb query times:\033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.queryTimes); + printf("super table query info:\n"); + printf("query interval: \033[33m%d\033[0m\n", + g_queryInfo.superQueryInfo.rate); + printf("threadCnt: \033[33m%d\033[0m\n", + g_queryInfo.superQueryInfo.threadCnt); + printf("childTblCount: \033[33m%d\033[0m\n", + g_queryInfo.superQueryInfo.childTblCount); + printf("stable name: \033[33m%s\033[0m\n", + g_queryInfo.superQueryInfo.sTblName); + printf("stb query times:\033[33m%d\033[0m\n", + g_queryInfo.superQueryInfo.queryTimes); if (SUBSCRIBE_TEST == g_args.test_mode) { - printf("mod: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.subscribeMode); - printf("interval: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.subscribeInterval); - printf("restart: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.subscribeRestart); - printf("keepProgress: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.subscribeKeepProgress); + printf("mod: \033[33m%d\033[0m\n", + g_queryInfo.superQueryInfo.mode); + printf("interval: \033[33m%d\033[0m\n", + g_queryInfo.superQueryInfo.subscribeInterval); + printf("restart: \033[33m%d\033[0m\n", + g_queryInfo.superQueryInfo.subscribeRestart); + printf("keepProgress: \033[33m%d\033[0m\n", + g_queryInfo.superQueryInfo.subscribeKeepProgress); } - printf("sqlCount: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.sqlCount); + printf("sqlCount: \033[33m%d\033[0m\n", + g_queryInfo.superQueryInfo.sqlCount); for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) { - printf(" sql[%d]: \033[33m%s\033[0m\n", i, g_queryInfo.superQueryInfo.sql[i]); + printf(" sql[%d]: \033[33m%s\033[0m\n", + i, g_queryInfo.superQueryInfo.sql[i]); } printf("\n"); @@ -1635,7 +1668,7 @@ static int getDbFromServer(TAOS * taos, SDbInfo** dbInfos) { TAOS_FIELD *fields = taos_fetch_fields(res); - while ((row = taos_fetch_row(res)) != NULL) { + while((row = taos_fetch_row(res)) != NULL) { // sys database name : 'log' if (strncasecmp(row[TSDB_SHOW_DB_NAME_INDEX], "log", fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0) { @@ -1668,7 +1701,8 @@ static int getDbFromServer(TAOS * taos, SDbInfo** dbInfos) { dbInfos[count]->wallevel = *((int8_t *)row[TSDB_SHOW_DB_WALLEVEL_INDEX]); dbInfos[count]->fsync = *((int32_t *)row[TSDB_SHOW_DB_FSYNC_INDEX]); dbInfos[count]->comp = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX])); - dbInfos[count]->cachelast = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX])); + dbInfos[count]->cachelast = + (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX])); tstrncpy(dbInfos[count]->precision, (char *)row[TSDB_SHOW_DB_PRECISION_INDEX], @@ -1679,7 +1713,8 @@ static int getDbFromServer(TAOS * taos, SDbInfo** dbInfos) { count++; if (count > MAX_DATABASE_COUNT) { - errorPrint( "The database count overflow than %d\n", MAX_DATABASE_COUNT); + errorPrint("%s() LN%d, The database count overflow than %d\n", + __func__, __LINE__, MAX_DATABASE_COUNT); break; } } @@ -1689,6 +1724,7 @@ static int getDbFromServer(TAOS * taos, SDbInfo** dbInfos) { static void printfDbInfoForQueryToFile( char* filename, SDbInfo* dbInfos, int index) { + if (filename[0] == 0) return; @@ -1907,7 +1943,7 @@ static int postProceSql(char* host, uint16_t port, char* sqlstr) if (bytes == 0) break; sent+=bytes; - } while (sent < req_str_len); + } while(sent < req_str_len); memset(response_buf, 0, RESP_BUF_LEN); resp_len = sizeof(response_buf) - 1; @@ -1925,7 +1961,7 @@ static int postProceSql(char* host, uint16_t port, char* sqlstr) if (bytes == 0) break; received += bytes; - } while (received < resp_len); + } while(received < resp_len); if (received == resp_len) { free(request_buf); @@ -1949,7 +1985,8 @@ static int postProceSql(char* host, uint16_t port, char* sqlstr) static char* getTagValueFromTagSample(SSuperTable* stbInfo, int tagUsePos) { char* dataBuf = (char*)calloc(TSDB_MAX_SQL_LEN+1, 1); if (NULL == dataBuf) { - errorPrint("%s() LN%d, calloc failed! size:%d\n", __func__, __LINE__, TSDB_MAX_SQL_LEN+1); + errorPrint("%s() LN%d, calloc failed! size:%d\n", + __func__, __LINE__, TSDB_MAX_SQL_LEN+1); return NULL; } @@ -2153,7 +2190,7 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos, } char* pTblName = childTblName; - while ((row = taos_fetch_row(res)) != NULL) { + while((row = taos_fetch_row(res)) != NULL) { int32_t* len = taos_fetch_lengths(res); tstrncpy(pTblName, (char *)row[0], len[0]+1); //printf("==== sub table name: %s\n", pTblName); @@ -2216,7 +2253,7 @@ static int getSuperTableFromServer(TAOS * taos, char* dbName, int tagIndex = 0; int columnIndex = 0; TAOS_FIELD *fields = taos_fetch_fields(res); - while ((row = taos_fetch_row(res)) != NULL) { + while((row = taos_fetch_row(res)) != NULL) { if (0 == count) { count++; continue; @@ -2331,7 +2368,8 @@ static int createSuperTable(TAOS * taos, char* dbName, lenOfOneRow += 21; } else { taos_close(taos); - printf("config error data type : %s\n", dataType); + errorPrint("%s() LN%d, config error data type : %s\n", + __func__, __LINE__, dataType); exit(-1); } } @@ -2349,7 +2387,8 @@ static int createSuperTable(TAOS * taos, char* dbName, } snprintf(superTbl->colsOfCreateChildTable, len+20, "(ts timestamp%s)", cols); - verbosePrint("%s() LN%d: %s\n", __func__, __LINE__, superTbl->colsOfCreateChildTable); + verbosePrint("%s() LN%d: %s\n", + __func__, __LINE__, superTbl->colsOfCreateChildTable); if (superTbl->tagCount == 0) { errorPrint("%s() LN%d, super table tag count is %d\n", @@ -2404,7 +2443,8 @@ static int createSuperTable(TAOS * taos, char* dbName, lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 42; } else { taos_close(taos); - printf("config error tag type : %s\n", dataType); + errorPrint("%s() LN%d, config error tag type : %s\n", + __func__, __LINE__, dataType); exit(-1); } } @@ -2565,16 +2605,13 @@ static int createDatabasesAndStables() { static void* createTable(void *sarg) { - threadInfo *winfo = (threadInfo *)sarg; - SSuperTable* superTblInfo = winfo->superTblInfo; + threadInfo *pThreadInfo = (threadInfo *)sarg; + SSuperTable* superTblInfo = pThreadInfo->superTblInfo; int64_t lastPrintTime = taosGetTimestampMs(); int buff_len; - if (superTblInfo) - buff_len = superTblInfo->maxSqlLen; - else - buff_len = BUFFER_SIZE; + buff_len = BUFFER_SIZE / 8; char *buffer = calloc(buff_len, 1); if (buffer == NULL) { @@ -2587,15 +2624,15 @@ static void* createTable(void *sarg) verbosePrint("%s() LN%d: Creating table from %d to %d\n", __func__, __LINE__, - winfo->start_table_from, winfo->end_table_to); + pThreadInfo->start_table_from, pThreadInfo->end_table_to); - for (int i = winfo->start_table_from; i <= winfo->end_table_to; i++) { + for (int i = pThreadInfo->start_table_from; i <= pThreadInfo->end_table_to; i++) { if (0 == g_Dbs.use_metric) { snprintf(buffer, buff_len, "create table if not exists %s.%s%d %s;", - winfo->db_name, + pThreadInfo->db_name, g_args.tb_prefix, i, - winfo->cols); + pThreadInfo->cols); } else { if (superTblInfo == NULL) { errorPrint("%s() LN%d, use metric, but super table info is NULL\n", @@ -2622,15 +2659,15 @@ static void* createTable(void *sarg) return NULL; } len += snprintf(buffer + len, - superTblInfo->maxSqlLen - len, + buff_len - len, "if not exists %s.%s%d using %s.%s tags %s ", - winfo->db_name, superTblInfo->childTblPrefix, - i, winfo->db_name, + pThreadInfo->db_name, superTblInfo->childTblPrefix, + i, pThreadInfo->db_name, superTblInfo->sTblName, tagsValBuf); free(tagsValBuf); batchNum++; if ((batchNum < superTblInfo->batchCreateTableNum) - && ((superTblInfo->maxSqlLen - len) + && ((buff_len - len) >= (superTblInfo->lenOfTagOfOneRow + 256))) { continue; } @@ -2639,7 +2676,7 @@ static void* createTable(void *sarg) len = 0; verbosePrint("%s() LN%d %s\n", __func__, __LINE__, buffer); - if (0 != queryDbExec(winfo->taos, buffer, NO_INSERT_TYPE, false)){ + if (0 != queryDbExec(pThreadInfo->taos, buffer, NO_INSERT_TYPE, false)){ errorPrint( "queryDbExec() failed. buffer:\n%s\n", buffer); free(buffer); return NULL; @@ -2648,14 +2685,14 @@ static void* createTable(void *sarg) int64_t currentPrintTime = taosGetTimestampMs(); if (currentPrintTime - lastPrintTime > 30*1000) { printf("thread[%d] already create %d - %d tables\n", - winfo->threadID, winfo->start_table_from, i); + pThreadInfo->threadID, pThreadInfo->start_table_from, i); lastPrintTime = currentPrintTime; } } if (0 != len) { verbosePrint("%s() %d buffer: %s\n", __func__, __LINE__, buffer); - if (0 != queryDbExec(winfo->taos, buffer, NO_INSERT_TYPE, false)) { + if (0 != queryDbExec(pThreadInfo->taos, buffer, NO_INSERT_TYPE, false)) { errorPrint( "queryDbExec() failed. buffer:\n%s\n", buffer); } } @@ -2702,7 +2739,8 @@ static int startMultiThreadCreateChildTable( db_name, g_Dbs.port); if (t_info->taos == NULL) { - errorPrint( "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); + errorPrint( "%s() LN%d, Failed to connect to TDengine, reason:%s\n", + __func__, __LINE__, taos_errstr(NULL)); free(pids); free(infos); return -1; @@ -2763,35 +2801,35 @@ static void createChildTables() { } } } else { - // normal table - len = snprintf(tblColsBuf, MAX_SQL_SIZE, "(TS TIMESTAMP"); - int j = 0; - while (g_args.datatype[j]) { - if ((strncasecmp(g_args.datatype[j], "BINARY", strlen("BINARY")) == 0) - || (strncasecmp(g_args.datatype[j], - "NCHAR", strlen("NCHAR")) == 0)) { - snprintf(tblColsBuf + len, MAX_SQL_SIZE - len, - ", COL%d %s(60)", j, g_args.datatype[j]); - } else { - snprintf(tblColsBuf + len, MAX_SQL_SIZE - len, - ", COL%d %s", j, g_args.datatype[j]); - } - len = strlen(tblColsBuf); - j++; - } + // normal table + len = snprintf(tblColsBuf, MAX_SQL_SIZE, "(TS TIMESTAMP"); + int j = 0; + while(g_args.datatype[j]) { + if ((strncasecmp(g_args.datatype[j], "BINARY", strlen("BINARY")) == 0) + || (strncasecmp(g_args.datatype[j], + "NCHAR", strlen("NCHAR")) == 0)) { + snprintf(tblColsBuf + len, MAX_SQL_SIZE - len, + ", COL%d %s(60)", j, g_args.datatype[j]); + } else { + snprintf(tblColsBuf + len, MAX_SQL_SIZE - len, + ", COL%d %s", j, g_args.datatype[j]); + } + len = strlen(tblColsBuf); + j++; + } - snprintf(tblColsBuf + len, MAX_SQL_SIZE - len, ")"); + snprintf(tblColsBuf + len, MAX_SQL_SIZE - len, ")"); - verbosePrint("%s() LN%d: dbName: %s num of tb: %d schema: %s\n", - __func__, __LINE__, - g_Dbs.db[i].dbName, g_args.num_of_tables, tblColsBuf); - startMultiThreadCreateChildTable( - tblColsBuf, - g_Dbs.threadCountByCreateTbl, - 0, - g_args.num_of_tables, - g_Dbs.db[i].dbName, - NULL); + verbosePrint("%s() LN%d: dbName: %s num of tb: %d schema: %s\n", + __func__, __LINE__, + g_Dbs.db[i].dbName, g_args.num_of_tables, tblColsBuf); + startMultiThreadCreateChildTable( + tblColsBuf, + g_Dbs.threadCountByCreateTbl, + 0, + g_args.num_of_tables, + g_Dbs.db[i].dbName, + NULL); } } } @@ -2825,7 +2863,7 @@ static int readTagFromCsvFileToMem(SSuperTable * superTblInfo) { return -1; } - while ((readLen = tgetline(&line, &n, fp)) != -1) { + while((readLen = tgetline(&line, &n, fp)) != -1) { if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) { line[--readLen] = 0; } @@ -2889,7 +2927,7 @@ static int readSampleFromCsvFileToMem( assert(superTblInfo->sampleDataBuf); memset(superTblInfo->sampleDataBuf, 0, MAX_SAMPLES_ONCE_FROM_FILE * superTblInfo->lenOfOneRow); - while (1) { + while(1) { readLen = tgetline(&line, &n, fp); if (-1 == readLen) { if(0 != fseek(fp, 0, SEEK_SET)) { @@ -2915,9 +2953,6 @@ static int readSampleFromCsvFileToMem( continue; } - verbosePrint("readLen=%ld stb->lenOfOneRow=%d getRows=%d\n", (long)readLen, - superTblInfo->lenOfOneRow, getRows); - memcpy(superTblInfo->sampleDataBuf + getRows * superTblInfo->lenOfOneRow, line, readLen); getRows++; @@ -2948,7 +2983,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile( } int columnSize = cJSON_GetArraySize(columns); - if (columnSize > MAX_COLUMN_COUNT) { + if ((columnSize + 1/* ts */) > MAX_COLUMN_COUNT) { errorPrint("%s() LN%d, failed to read json, column size overflow, max column size is %d\n", __func__, __LINE__, MAX_COLUMN_COUNT); goto PARSE_OVER; @@ -2968,7 +3003,8 @@ static bool getColumnAndTagTypeFromInsertJsonFile( if (countObj && countObj->type == cJSON_Number) { count = countObj->valueint; } else if (countObj && countObj->type != cJSON_Number) { - errorPrint("%s() LN%d, failed to read json, column count not found\n", __func__, __LINE__); + errorPrint("%s() LN%d, failed to read json, column count not found\n", + __func__, __LINE__); goto PARSE_OVER; } else { count = 1; @@ -2977,8 +3013,10 @@ static bool getColumnAndTagTypeFromInsertJsonFile( // column info memset(&columnCase, 0, sizeof(StrColumn)); cJSON *dataType = cJSON_GetObjectItem(column, "type"); - if (!dataType || dataType->type != cJSON_String || dataType->valuestring == NULL) { - errorPrint("%s() LN%d: failed to read json, column type not found\n", __func__, __LINE__); + if (!dataType || dataType->type != cJSON_String + || dataType->valuestring == NULL) { + errorPrint("%s() LN%d: failed to read json, column type not found\n", + __func__, __LINE__); goto PARSE_OVER; } //tstrncpy(superTbls->columns[k].dataType, dataType->valuestring, MAX_TB_NAME_SIZE); @@ -2988,7 +3026,8 @@ static bool getColumnAndTagTypeFromInsertJsonFile( if (dataLen && dataLen->type == cJSON_Number) { columnCase.dataLen = dataLen->valueint; } else if (dataLen && dataLen->type != cJSON_Number) { - debugPrint("%s() LN%d: failed to read json, column len not found\n", __func__, __LINE__); + debugPrint("%s() LN%d: failed to read json, column len not found\n", + __func__, __LINE__); goto PARSE_OVER; } else { columnCase.dataLen = 8; @@ -3001,6 +3040,13 @@ static bool getColumnAndTagTypeFromInsertJsonFile( index++; } } + + if ((index + 1 /* ts */) > MAX_COLUMN_COUNT) { + errorPrint("%s() LN%d, failed to read json, column size overflow, allowed max column size is %d\n", + __func__, __LINE__, MAX_COLUMN_COUNT); + goto PARSE_OVER; + } + superTbls->columnCount = index; count = 1; @@ -3008,13 +3054,15 @@ static bool getColumnAndTagTypeFromInsertJsonFile( // tags cJSON *tags = cJSON_GetObjectItem(stbInfo, "tags"); if (!tags || tags->type != cJSON_Array) { - debugPrint("%s() LN%d, failed to read json, tags not found\n", __func__, __LINE__); + errorPrint("%s() LN%d, failed to read json, tags not found\n", + __func__, __LINE__); goto PARSE_OVER; } int tagSize = cJSON_GetArraySize(tags); if (tagSize > MAX_TAG_COUNT) { - debugPrint("%s() LN%d, failed to read json, tags size overflow, max tag size is %d\n", __func__, __LINE__, MAX_TAG_COUNT); + errorPrint("%s() LN%d, failed to read json, tags size overflow, max tag size is %d\n", + __func__, __LINE__, MAX_TAG_COUNT); goto PARSE_OVER; } @@ -3037,8 +3085,10 @@ static bool getColumnAndTagTypeFromInsertJsonFile( // column info memset(&columnCase, 0, sizeof(StrColumn)); cJSON *dataType = cJSON_GetObjectItem(tag, "type"); - if (!dataType || dataType->type != cJSON_String || dataType->valuestring == NULL) { - printf("ERROR: failed to read json, tag type not found\n"); + if (!dataType || dataType->type != cJSON_String + || dataType->valuestring == NULL) { + errorPrint("%s() LN%d, failed to read json, tag type not found\n", + __func__, __LINE__); goto PARSE_OVER; } tstrncpy(columnCase.dataType, dataType->valuestring, MAX_TB_NAME_SIZE); @@ -3047,26 +3097,37 @@ static bool getColumnAndTagTypeFromInsertJsonFile( if (dataLen && dataLen->type == cJSON_Number) { columnCase.dataLen = dataLen->valueint; } else if (dataLen && dataLen->type != cJSON_Number) { - printf("ERROR: failed to read json, column len not found\n"); + errorPrint("%s() LN%d, failed to read json, column len not found\n", + __func__, __LINE__); goto PARSE_OVER; } else { columnCase.dataLen = 0; } for (int n = 0; n < count; ++n) { - tstrncpy(superTbls->tags[index].dataType, columnCase.dataType, MAX_TB_NAME_SIZE); + tstrncpy(superTbls->tags[index].dataType, columnCase.dataType, + MAX_TB_NAME_SIZE); superTbls->tags[index].dataLen = columnCase.dataLen; index++; } } + + if (index > MAX_TAG_COUNT) { + errorPrint("%s() LN%d, failed to read json, tags size overflow, allowed max tag count is %d\n", + __func__, __LINE__, MAX_TAG_COUNT); + goto PARSE_OVER; + } + superTbls->tagCount = index; + if ((superTbls->columnCount + superTbls->tagCount + 1 /* ts */) > MAX_COLUMN_COUNT) { + errorPrint("%s() LN%d, columns + tags is more than allowed max columns count: %d\n", + __func__, __LINE__, MAX_COLUMN_COUNT); + goto PARSE_OVER; + } ret = true; PARSE_OVER: - //free(content); - //cJSON_Delete(root); - //fclose(fp); return ret; } @@ -3143,7 +3204,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!gInsertInterval) { g_args.insert_interval = 0; } else { - errorPrint("%s() LN%d, failed to read json, insert_interval input mistake\n", __func__, __LINE__); + errorPrint("%s() LN%d, failed to read json, insert_interval input mistake\n", + __func__, __LINE__); goto PARSE_OVER; } @@ -3153,9 +3215,9 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { // rows per table need be less than insert batch if (g_args.interlace_rows > g_args.num_of_RPR) { - printf("NOTICE: interlace rows value %d > num_of_records_per_request %d\n\n", + printf("NOTICE: interlace rows value %d > num_of_records_per_req %d\n\n", g_args.interlace_rows, g_args.num_of_RPR); - printf(" interlace rows value will be set to num_of_records_per_request %d\n\n", + printf(" interlace rows value will be set to num_of_records_per_req %d\n\n", g_args.num_of_RPR); printf(" press Enter key to continue or Ctrl-C to stop."); (void)getchar(); @@ -3164,7 +3226,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!interlaceRows) { g_args.interlace_rows = 0; // 0 means progressive mode, > 0 mean interlace mode. max value is less or equ num_of_records_per_req } else { - errorPrint("%s() LN%d, failed to read json, interlace_rows input mistake\n", __func__, __LINE__); + errorPrint("%s() LN%d, failed to read json, interlace_rows input mistake\n", + __func__, __LINE__); goto PARSE_OVER; } @@ -3174,7 +3237,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!maxSqlLen) { g_args.max_sql_len = TSDB_PAYLOAD_SIZE; } else { - errorPrint("%s() LN%d, failed to read json, max_sql_len input mistake\n", __func__, __LINE__); + errorPrint("%s() LN%d, failed to read json, max_sql_len input mistake\n", + __func__, __LINE__); goto PARSE_OVER; } @@ -3182,9 +3246,10 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { if (numRecPerReq && numRecPerReq->type == cJSON_Number) { g_args.num_of_RPR = numRecPerReq->valueint; } else if (!numRecPerReq) { - g_args.num_of_RPR = 0xffff; + g_args.num_of_RPR = INT32_MAX; } else { - errorPrint("%s() LN%d, failed to read json, num_of_records_per_req not found\n", __func__, __LINE__); + errorPrint("%s() LN%d, failed to read json, num_of_records_per_req not found\n", + __func__, __LINE__); goto PARSE_OVER; } @@ -3477,9 +3542,11 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { if (childTblExists && childTblExists->type == cJSON_String && childTblExists->valuestring != NULL) { - if (0 == strncasecmp(childTblExists->valuestring, "yes", 3)) { + if ((0 == strncasecmp(childTblExists->valuestring, "yes", 3)) + && (g_Dbs.db[i].drop == false)) { g_Dbs.db[i].superTbls[j].childTblExists = TBL_ALREADY_EXISTS; - } else if (0 == strncasecmp(childTblExists->valuestring, "no", 2)) { + } else if ((0 == strncasecmp(childTblExists->valuestring, "no", 2) + || (g_Dbs.db[i].drop == true))) { g_Dbs.db[i].superTbls[j].childTblExists = TBL_NO_EXISTS; } else { g_Dbs.db[i].superTbls[j].childTblExists = TBL_NO_EXISTS; @@ -3508,7 +3575,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!dataSource) { tstrncpy(g_Dbs.db[i].superTbls[j].dataSource, "rand", MAX_DB_NAME_SIZE); } else { - errorPrint("%s() LN%d, failed to read json, data_source not found\n", __func__, __LINE__); + errorPrint("%s() LN%d, failed to read json, data_source not found\n", + __func__, __LINE__); goto PARSE_OVER; } @@ -3525,18 +3593,20 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } cJSON* childTbl_limit = cJSON_GetObjectItem(stbInfo, "childtable_limit"); - if (childTbl_limit) { + if ((childTbl_limit) && (g_Dbs.db[i].drop != true) + && (g_Dbs.db[i].superTbls[j].childTblExists == TBL_ALREADY_EXISTS)) { if (childTbl_limit->type != cJSON_Number) { printf("ERROR: failed to read json, childtable_limit\n"); goto PARSE_OVER; } g_Dbs.db[i].superTbls[j].childTblLimit = childTbl_limit->valueint; } else { - g_Dbs.db[i].superTbls[j].childTblLimit = -1; // select ... limit -1 means all query result + g_Dbs.db[i].superTbls[j].childTblLimit = -1; // select ... limit -1 means all query result, drop = yes mean all table need recreate, limit value is invalid. } cJSON* childTbl_offset = cJSON_GetObjectItem(stbInfo, "childtable_offset"); - if (childTbl_offset) { + if ((childTbl_offset) && (g_Dbs.db[i].drop != true) + && (g_Dbs.db[i].superTbls[j].childTblExists == TBL_ALREADY_EXISTS)) { if (childTbl_offset->type != cJSON_Number || 0 > childTbl_offset->valueint) { printf("ERROR: failed to read json, childtable_offset\n"); goto PARSE_OVER; @@ -3581,7 +3651,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } cJSON *sampleFile = cJSON_GetObjectItem(stbInfo, "sample_file"); - if (sampleFile && sampleFile->type == cJSON_String && sampleFile->valuestring != NULL) { + if (sampleFile && sampleFile->type == cJSON_String + && sampleFile->valuestring != NULL) { tstrncpy(g_Dbs.db[i].superTbls[j].sampleFile, sampleFile->valuestring, MAX_FILE_NAME_LEN); } else if (!sampleFile) { @@ -3613,17 +3684,18 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { int32_t len = maxSqlLen->valueint; if (len > TSDB_MAX_ALLOWED_SQL_LEN) { len = TSDB_MAX_ALLOWED_SQL_LEN; - } else if (len < TSDB_MAX_SQL_LEN) { - len = TSDB_MAX_SQL_LEN; + } else if (len < 5) { + len = 5; } g_Dbs.db[i].superTbls[j].maxSqlLen = len; } else if (!maxSqlLen) { g_Dbs.db[i].superTbls[j].maxSqlLen = g_args.max_sql_len; } else { - printf("ERROR: failed to read json, maxSqlLen not found\n"); + errorPrint("%s() LN%d, failed to read json, maxSqlLen input mistake\n", + __func__, __LINE__); goto PARSE_OVER; } - +/* cJSON *multiThreadWriteOneTbl = cJSON_GetObjectItem(stbInfo, "multi_thread_write_one_tbl"); // no , yes if (multiThreadWriteOneTbl @@ -3640,15 +3712,15 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { printf("ERROR: failed to read json, multiThreadWriteOneTbl not found\n"); goto PARSE_OVER; } - +*/ cJSON* interlaceRows = cJSON_GetObjectItem(stbInfo, "interlace_rows"); if (interlaceRows && interlaceRows->type == cJSON_Number) { g_Dbs.db[i].superTbls[j].interlaceRows = interlaceRows->valueint; // rows per table need be less than insert batch if (g_Dbs.db[i].superTbls[j].interlaceRows > g_args.num_of_RPR) { - printf("NOTICE: db[%d].superTbl[%d]'s interlace rows value %d > num_of_records_per_request %d\n\n", + printf("NOTICE: db[%d].superTbl[%d]'s interlace rows value %d > num_of_records_per_req %d\n\n", i, j, g_Dbs.db[i].superTbls[j].interlaceRows, g_args.num_of_RPR); - printf(" interlace rows value will be set to num_of_records_per_request %d\n\n", + printf(" interlace rows value will be set to num_of_records_per_req %d\n\n", g_args.num_of_RPR); printf(" press Enter key to continue or Ctrl-C to stop."); (void)getchar(); @@ -3724,9 +3796,6 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { ret = true; PARSE_OVER: - //free(content); - //cJSON_Delete(root); - //fclose(fp); return ret; } @@ -3792,7 +3861,8 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { } else if (!gQueryTimes) { g_args.query_times = 1; } else { - errorPrint("%s() LN%d, failed to read json, query_times input mistake\n", __func__, __LINE__); + errorPrint("%s() LN%d, failed to read json, query_times input mistake\n", + __func__, __LINE__); goto PARSE_OVER; } @@ -3830,35 +3900,45 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { g_queryInfo.specifiedQueryInfo.rate = 0; } - cJSON* specifiedQueryTimes = cJSON_GetObjectItem(specifiedQuery, "query_times"); + cJSON* specifiedQueryTimes = cJSON_GetObjectItem(specifiedQuery, + "query_times"); if (specifiedQueryTimes && specifiedQueryTimes->type == cJSON_Number) { g_queryInfo.specifiedQueryInfo.queryTimes = specifiedQueryTimes->valueint; } else if (!specifiedQueryTimes) { g_queryInfo.specifiedQueryInfo.queryTimes = g_args.query_times; } else { - errorPrint("%s() LN%d, failed to read json, query_times input mistake\n", __func__, __LINE__); + errorPrint("%s() LN%d, failed to read json, query_times input mistake\n", + __func__, __LINE__); goto PARSE_OVER; } cJSON* concurrent = cJSON_GetObjectItem(specifiedQuery, "concurrent"); if (concurrent && concurrent->type == cJSON_Number) { g_queryInfo.specifiedQueryInfo.concurrent = concurrent->valueint; + if (g_queryInfo.specifiedQueryInfo.concurrent <= 0) { + errorPrint("%s() LN%d, query sqlCount %d or concurrent %d is not correct.\n", + __func__, __LINE__, g_queryInfo.specifiedQueryInfo.sqlCount, + g_queryInfo.specifiedQueryInfo.concurrent); + goto PARSE_OVER; + } } else if (!concurrent) { g_queryInfo.specifiedQueryInfo.concurrent = 1; } - cJSON* mode = cJSON_GetObjectItem(specifiedQuery, "mode"); - if (mode && mode->type == cJSON_String && mode->valuestring != NULL) { - if (0 == strcmp("sync", mode->valuestring)) { - g_queryInfo.specifiedQueryInfo.subscribeMode = 0; - } else if (0 == strcmp("async", mode->valuestring)) { - g_queryInfo.specifiedQueryInfo.subscribeMode = 1; + cJSON* queryMode = cJSON_GetObjectItem(specifiedQuery, "mode"); + if (queryMode && queryMode->type == cJSON_String + && queryMode->valuestring != NULL) { + if (0 == strcmp("sync", queryMode->valuestring)) { + g_queryInfo.specifiedQueryInfo.mode = SYNC_QUERY_MODE; + } else if (0 == strcmp("async", queryMode->valuestring)) { + g_queryInfo.specifiedQueryInfo.mode = ASYNC_QUERY_MODE; } else { - printf("ERROR: failed to read json, subscribe mod error\n"); + errorPrint("%s() LN%d, failed to read json, query mode input error\n", + __func__, __LINE__); goto PARSE_OVER; } } else { - g_queryInfo.specifiedQueryInfo.subscribeMode = 0; + g_queryInfo.specifiedQueryInfo.mode = SYNC_QUERY_MODE; } cJSON* interval = cJSON_GetObjectItem(specifiedQuery, "interval"); @@ -3905,12 +3985,14 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { if (!superSqls) { g_queryInfo.specifiedQueryInfo.sqlCount = 0; } else if (superSqls->type != cJSON_Array) { - printf("ERROR: failed to read json, super sqls not found\n"); + errorPrint("%s() LN%d, failed to read json, super sqls not found\n", + __func__, __LINE__); goto PARSE_OVER; } else { int superSqlSize = cJSON_GetArraySize(superSqls); if (superSqlSize > MAX_QUERY_SQL_COUNT) { - printf("ERROR: failed to read json, query sql size overflow, max is %d\n", MAX_QUERY_SQL_COUNT); + errorPrint("%s() LN%d, failed to read json, query sql size overflow, max is %d\n", + __func__, __LINE__, MAX_QUERY_SQL_COUNT); goto PARSE_OVER; } @@ -3962,7 +4044,8 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { } else if (!superQueryTimes) { g_queryInfo.superQueryInfo.queryTimes = g_args.query_times; } else { - errorPrint("%s() LN%d, failed to read json, query_times input mistake\n", __func__, __LINE__); + errorPrint("%s() LN%d, failed to read json, query_times input mistake\n", + __func__, __LINE__); goto PARSE_OVER; } @@ -3981,25 +4064,30 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { //} cJSON* stblname = cJSON_GetObjectItem(superQuery, "stblname"); - if (stblname && stblname->type == cJSON_String && stblname->valuestring != NULL) { - tstrncpy(g_queryInfo.superQueryInfo.sTblName, stblname->valuestring, MAX_TB_NAME_SIZE); + if (stblname && stblname->type == cJSON_String + && stblname->valuestring != NULL) { + tstrncpy(g_queryInfo.superQueryInfo.sTblName, stblname->valuestring, + MAX_TB_NAME_SIZE); } else { - printf("ERROR: failed to read json, super table name not found\n"); + errorPrint("%s() LN%d, failed to read json, super table name input error\n", + __func__, __LINE__); goto PARSE_OVER; } cJSON* submode = cJSON_GetObjectItem(superQuery, "mode"); - if (submode && submode->type == cJSON_String && submode->valuestring != NULL) { + if (submode && submode->type == cJSON_String + && submode->valuestring != NULL) { if (0 == strcmp("sync", submode->valuestring)) { - g_queryInfo.superQueryInfo.subscribeMode = 0; + g_queryInfo.superQueryInfo.mode = SYNC_QUERY_MODE; } else if (0 == strcmp("async", submode->valuestring)) { - g_queryInfo.superQueryInfo.subscribeMode = 1; + g_queryInfo.superQueryInfo.mode = ASYNC_QUERY_MODE; } else { - printf("ERROR: failed to read json, subscribe mod error\n"); + errorPrint("%s() LN%d, failed to read json, query mode input error\n", + __func__, __LINE__); goto PARSE_OVER; } } else { - g_queryInfo.superQueryInfo.subscribeMode = 0; + g_queryInfo.superQueryInfo.mode = SYNC_QUERY_MODE; } cJSON* subinterval = cJSON_GetObjectItem(superQuery, "interval"); @@ -4012,7 +4100,8 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { } cJSON* subrestart = cJSON_GetObjectItem(superQuery, "restart"); - if (subrestart && subrestart->type == cJSON_String && subrestart->valuestring != NULL) { + if (subrestart && subrestart->type == cJSON_String + && subrestart->valuestring != NULL) { if (0 == strcmp("yes", subrestart->valuestring)) { g_queryInfo.superQueryInfo.subscribeRestart = 1; } else if (0 == strcmp("no", subrestart->valuestring)) { @@ -4046,12 +4135,14 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { if (!subsqls) { g_queryInfo.superQueryInfo.sqlCount = 0; } else if (subsqls->type != cJSON_Array) { - printf("ERROR: failed to read json, super sqls not found\n"); + errorPrint("%s() LN%d: failed to read json, super sqls not found\n", + __func__, __LINE__); goto PARSE_OVER; } else { int superSqlSize = cJSON_GetArraySize(subsqls); if (superSqlSize > MAX_QUERY_SQL_COUNT) { - printf("ERROR: failed to read json, query sql size overflow, max is %d\n", MAX_QUERY_SQL_COUNT); + errorPrint("%s() LN%d, failed to read json, query sql size overflow, max is %d\n", + __func__, __LINE__, MAX_QUERY_SQL_COUNT); goto PARSE_OVER; } @@ -4061,19 +4152,25 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { if (sql == NULL) continue; cJSON *sqlStr = cJSON_GetObjectItem(sql, "sql"); - if (!sqlStr || sqlStr->type != cJSON_String || sqlStr->valuestring == NULL) { - printf("ERROR: failed to read json, sql not found\n"); + if (!sqlStr || sqlStr->type != cJSON_String + || sqlStr->valuestring == NULL) { + errorPrint("%s() LN%d, failed to read json, sql not found\n", + __func__, __LINE__); goto PARSE_OVER; } - tstrncpy(g_queryInfo.superQueryInfo.sql[j], sqlStr->valuestring, MAX_QUERY_SQL_LENGTH); + tstrncpy(g_queryInfo.superQueryInfo.sql[j], sqlStr->valuestring, + MAX_QUERY_SQL_LENGTH); cJSON *result = cJSON_GetObjectItem(sql, "result"); - if (result != NULL && result->type == cJSON_String && result->valuestring != NULL){ - tstrncpy(g_queryInfo.superQueryInfo.result[j], result->valuestring, MAX_FILE_NAME_LEN); + if (result != NULL && result->type == cJSON_String + && result->valuestring != NULL){ + tstrncpy(g_queryInfo.superQueryInfo.result[j], + result->valuestring, MAX_FILE_NAME_LEN); } else if (NULL == result) { memset(g_queryInfo.superQueryInfo.result[j], 0, MAX_FILE_NAME_LEN); } else { - printf("ERROR: failed to read json, sub query result file not found\n"); + errorPrint("%s() LN%d, failed to read json, sub query result file not found\n", + __func__, __LINE__); goto PARSE_OVER; } } @@ -4083,9 +4180,6 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { ret = true; PARSE_OVER: - //free(content); - //cJSON_Delete(root); - //fclose(fp); return ret; } @@ -4255,13 +4349,20 @@ static int generateRowData(char* recBuf, int64_t timestamp, SSuperTable* stbInfo "%f, ", rand_double()); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "smallint", 8)) { - dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "%d, ", rand_smallint()); - } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "tinyint", 7)) { - dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "%d, ", rand_tinyint()); - } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "bool", 4)) { - dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "%d, ", rand_bool()); - } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "timestamp", 9)) { - dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "%"PRId64", ", rand_bigint()); + dataLen += snprintf(pstr + dataLen, maxLen - dataLen, + "%d, ", rand_smallint()); + } else if (0 == strncasecmp(stbInfo->columns[i].dataType, + "tinyint", strlen("tinyint"))) { + dataLen += snprintf(pstr + dataLen, maxLen - dataLen, + "%d, ", rand_tinyint()); + } else if (0 == strncasecmp(stbInfo->columns[i].dataType, + "bool", strlen("bool"))) { + dataLen += snprintf(pstr + dataLen, maxLen - dataLen, + "%d, ", rand_bool()); + } else if (0 == strncasecmp(stbInfo->columns[i].dataType, + "timestamp", strlen("timestamp"))) { + dataLen += snprintf(pstr + dataLen, maxLen - dataLen, + "%"PRId64", ", rand_bigint()); } else { errorPrint( "No support data type: %s\n", stbInfo->columns[i].dataType); return -1; @@ -4353,7 +4454,8 @@ static int prepareSampleDataForSTable(SSuperTable *superTblInfo) { int ret = readSampleFromCsvFileToMem(superTblInfo); if (0 != ret) { - errorPrint("%s() LN%d, read sample from csv file failed.\n", __func__, __LINE__); + errorPrint("%s() LN%d, read sample from csv file failed.\n", + __func__, __LINE__); tmfree(sampleDataBuf); superTblInfo->sampleDataBuf = NULL; return -1; @@ -4375,7 +4477,8 @@ static int execInsert(threadInfo *pThreadInfo, char *buffer, int k) } else { if (0 != postProceSql(g_Dbs.host, g_Dbs.port, buffer)) { affectedRows = -1; - printf("========restful return fail, threadID[%d]\n", pThreadInfo->threadID); + printf("========restful return fail, threadID[%d]\n", + pThreadInfo->threadID); } else { affectedRows = k; } @@ -4411,13 +4514,15 @@ static void getTableName(char *pTblName, threadInfo* pThreadInfo, int tableSeq) } } -static int generateDataTail(char *tableName, int32_t tableSeq, - threadInfo* pThreadInfo, SSuperTable* superTblInfo, +static int generateDataTail( + SSuperTable* superTblInfo, int batch, char* buffer, int remainderBufLen, int64_t insertRows, int64_t startFrom, uint64_t startTime, int *pSamplePos, int *dataLen) { int len = 0; int ncols_per_record = 1; // count first col ts + char *pstr = buffer; + if (superTblInfo == NULL) { int datatypeSeq = 0; while(g_args.datatype[datatypeSeq]) { @@ -4446,29 +4551,29 @@ static int generateDataTail(char *tableName, int32_t tableSeq, pSamplePos); } else if (0 == strncasecmp(superTblInfo->dataSource, "rand", strlen("rand"))) { + + int randTail = superTblInfo->timeStampStep * k; + if (superTblInfo->disorderRatio > 0) { int rand_num = taosRandom() % 100; - if (0 != superTblInfo->disorderRatio - && rand_num < superTblInfo->disorderRatio) { - int64_t d = startTime - + superTblInfo->timeStampStep * k - - taosRandom() % superTblInfo->disorderRange; - retLen = generateRowData( + if(rand_num < superTblInfo->disorderRatio) { + randTail = (randTail + (taosRandom() % superTblInfo->disorderRange + 1)) * (-1); + debugPrint("rand data generated, back %d\n", randTail); + } + } + + uint64_t d = startTime + + randTail; + retLen = generateRowData( data, d, superTblInfo); - } else { - retLen = generateRowData( - data, - startTime + superTblInfo->timeStampStep * k, - superTblInfo); - } } if (retLen > remainderBufLen) { break; } - buffer += snprintf(buffer, retLen + 1, "%s", data); + pstr += snprintf(pstr , retLen + 1, "%s", data); k++; len += retLen; remainderBufLen -= retLen; @@ -4477,25 +4582,26 @@ static int generateDataTail(char *tableName, int32_t tableSeq, int lenOfBinary = g_args.len_of_binary; int rand_num = taosRandom() % 100; + int randTail; + if ((g_args.disorderRatio != 0) && (rand_num < g_args.disorderRatio)) { - - int64_t d = startTime + DEFAULT_TIMESTAMP_STEP * k - - taosRandom() % g_args.disorderRange; - - retLen = generateData(data, data_type, - ncols_per_record, d, lenOfBinary); + randTail = (DEFAULT_TIMESTAMP_STEP * k + + (taosRandom() % g_args.disorderRange + 1)) * (-1); + debugPrint("rand data generated, back %d\n", randTail); } else { - retLen = generateData(data, data_type, - ncols_per_record, - startTime + DEFAULT_TIMESTAMP_STEP * k, - lenOfBinary); + randTail = DEFAULT_TIMESTAMP_STEP * k; } + retLen = generateData(data, data_type, + ncols_per_record, + startTime + randTail, + lenOfBinary); + if (len > remainderBufLen) break; - buffer += sprintf(buffer, " %s", data); + pstr += sprintf(pstr, " %s", data); k++; len += retLen; remainderBufLen -= retLen; @@ -4516,9 +4622,14 @@ static int generateDataTail(char *tableName, int32_t tableSeq, } static int generateSQLHead(char *tableName, int32_t tableSeq, - threadInfo* pThreadInfo, SSuperTable* superTblInfo, char *buffer) + threadInfo* pThreadInfo, SSuperTable* superTblInfo, + char *buffer, int remainderBufLen) { int len; + +#define HEAD_BUFF_LEN 1024*24 // 16*1024 + (192+32)*2 + insert into .. + char headBuf[HEAD_BUFF_LEN]; + if (superTblInfo) { if (AUTO_CREATE_SUBTBL == superTblInfo->autoCreateTable) { char* tagsValBuf = NULL; @@ -4530,13 +4641,15 @@ static int generateSQLHead(char *tableName, int32_t tableSeq, tableSeq % superTblInfo->tagSampleCount); } if (NULL == tagsValBuf) { - errorPrint("%s() LN%d, tag buf failed to allocate memory\n", __func__, __LINE__); + errorPrint("%s() LN%d, tag buf failed to allocate memory\n", + __func__, __LINE__); return -1; } - len = snprintf(buffer, - superTblInfo->maxSqlLen, - "insert into %s.%s using %s.%s tags %s values", + len = snprintf( + headBuf, + HEAD_BUFF_LEN, + "%s.%s using %s.%s tags %s values", pThreadInfo->db_name, tableName, pThreadInfo->db_name, @@ -4544,34 +4657,101 @@ static int generateSQLHead(char *tableName, int32_t tableSeq, tagsValBuf); tmfree(tagsValBuf); } else if (TBL_ALREADY_EXISTS == superTblInfo->childTblExists) { - len = snprintf(buffer, - superTblInfo->maxSqlLen, - "insert into %s.%s values", + len = snprintf( + headBuf, + HEAD_BUFF_LEN, + "%s.%s values", pThreadInfo->db_name, tableName); } else { - len = snprintf(buffer, - superTblInfo->maxSqlLen, - "insert into %s.%s values", + len = snprintf( + headBuf, + HEAD_BUFF_LEN, + "%s.%s values", pThreadInfo->db_name, tableName); } } else { - len = snprintf(buffer, - g_args.max_sql_len, - "insert into %s.%s values", + len = snprintf( + headBuf, + HEAD_BUFF_LEN, + "%s.%s values", pThreadInfo->db_name, tableName); } + if (len > remainderBufLen) + return -1; + + tstrncpy(buffer, headBuf, len + 1); + return len; } -static int generateProgressiveDataBuffer(char *pTblName, +static int generateInterlaceDataBuffer( + char *tableName, int batchPerTbl, int i, int batchPerTblTimes, int32_t tableSeq, threadInfo *pThreadInfo, char *buffer, int64_t insertRows, - int64_t startFrom, int64_t startTime, int *pSamplePos) + int64_t startTime, + int *pRemainderBufLen) +{ + assert(buffer); + char *pstr = buffer; + SSuperTable* superTblInfo = pThreadInfo->superTblInfo; + + int headLen = generateSQLHead(tableName, tableSeq, pThreadInfo, + superTblInfo, pstr, *pRemainderBufLen); + + if (headLen <= 0) { + return 0; + } + // generate data buffer + verbosePrint("[%d] %s() LN%d i=%d buffer:\n%s\n", + pThreadInfo->threadID, __func__, __LINE__, i, buffer); + + pstr += headLen; + *pRemainderBufLen -= headLen; + + int dataLen = 0; + + verbosePrint("[%d] %s() LN%d i=%d batchPerTblTimes=%d batchPerTbl = %d\n", + pThreadInfo->threadID, __func__, __LINE__, + i, batchPerTblTimes, batchPerTbl); + + if (superTblInfo) { + if (0 == strncasecmp(superTblInfo->startTimestamp, "now", 3)) { + startTime = taosGetTimestamp(pThreadInfo->time_precision); + } + } else { + startTime = 1500000000000; + } + + int k = generateDataTail( + superTblInfo, + batchPerTbl, pstr, *pRemainderBufLen, insertRows, 0, + startTime, + &(pThreadInfo->samplePos), &dataLen); + + if (k == batchPerTbl) { + pstr += dataLen; + *pRemainderBufLen -= dataLen; + } else { + pstr -= headLen; + pstr[0] = '\0'; + k = 0; + } + + return k; +} + +static int generateProgressiveDataBuffer( + char *tableName, + int32_t tableSeq, + threadInfo *pThreadInfo, char *buffer, + int64_t insertRows, + int64_t startFrom, int64_t startTime, int *pSamplePos, + int *pRemainderBufLen) { SSuperTable* superTblInfo = pThreadInfo->superTblInfo; @@ -4586,23 +4766,24 @@ static int generateProgressiveDataBuffer(char *pTblName, } assert(buffer != NULL); - - int maxSqlLen = superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len; - int remainderBufLen = maxSqlLen; - - memset(buffer, 0, maxSqlLen); - char *pstr = buffer; - int headLen = generateSQLHead(pTblName, tableSeq, pThreadInfo, superTblInfo, - buffer); - pstr += headLen; - remainderBufLen -= headLen; + int k = 0; + + memset(buffer, 0, *pRemainderBufLen); + + int headLen = generateSQLHead(tableName, tableSeq, pThreadInfo, superTblInfo, + buffer, *pRemainderBufLen); + + if (headLen <= 0) { + return 0; + } + pstr += headLen; + *pRemainderBufLen -= headLen; - int k; int dataLen; - k = generateDataTail(pTblName, tableSeq, pThreadInfo, superTblInfo, - g_args.num_of_RPR, pstr, remainderBufLen, insertRows, startFrom, + k = generateDataTail(superTblInfo, + g_args.num_of_RPR, pstr, *pRemainderBufLen, insertRows, startFrom, startTime, pSamplePos, &dataLen); @@ -4614,8 +4795,16 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { pThreadInfo->threadID, __func__, __LINE__); SSuperTable* superTblInfo = pThreadInfo->superTblInfo; + + int64_t insertRows = (superTblInfo)?superTblInfo->insertRows:g_args.num_of_DPT; int interlaceRows = superTblInfo?superTblInfo->interlaceRows:g_args.interlace_rows; + if (interlaceRows > insertRows) + interlaceRows = insertRows; + + if (interlaceRows > g_args.num_of_RPR) + interlaceRows = g_args.num_of_RPR; + int insertMode; if (interlaceRows > 0) { @@ -4627,11 +4816,11 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { // TODO: prompt tbl count multple interlace rows and batch // - char* buffer = calloc(superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len, 1); + int maxSqlLen = superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len; + char* buffer = calloc(maxSqlLen, 1); if (NULL == buffer) { - errorPrint( "Failed to alloc %d Bytes, reason:%s\n", - superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len, - strerror(errno)); + errorPrint( "%s() LN%d, Failed to alloc %d Bytes, reason:%s\n", + __func__, __LINE__, maxSqlLen, strerror(errno)); return NULL; } @@ -4642,7 +4831,6 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { int nTimeStampStep = superTblInfo?superTblInfo->timeStampStep:DEFAULT_TIMESTAMP_STEP; - int64_t insertRows = (superTblInfo)?superTblInfo->insertRows:g_args.num_of_DPT; int insert_interval = superTblInfo?superTblInfo->insertInterval:g_args.insert_interval; uint64_t st = 0; @@ -4660,18 +4848,14 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { int64_t startTime = pThreadInfo->start_time; - int batchPerTblTimes; - int batchPerTbl; - assert(pThreadInfo->ntables > 0); - if (interlaceRows > g_args.num_of_RPR) - interlaceRows = g_args.num_of_RPR; + int batchPerTbl = interlaceRows; - batchPerTbl = interlaceRows; + int batchPerTblTimes; if ((interlaceRows > 0) && (pThreadInfo->ntables > 1)) { batchPerTblTimes = - (g_args.num_of_RPR / (interlaceRows * pThreadInfo->ntables)) + 1; + g_args.num_of_RPR / interlaceRows; } else { batchPerTblTimes = 1; } @@ -4680,8 +4864,8 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { bool flagSleep = true; int sleepTimeTotal = 0; - int maxSqlLen = superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len; - int remainderBufLen; + char *strInsertInto = "insert into "; + int nInsertBufLen = strlen(strInsertInto); while(pThreadInfo->totalInsertRows < pThreadInfo->ntables * insertRows) { if ((flagSleep) && (insert_interval)) { @@ -4690,9 +4874,14 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { } // generate data memset(buffer, 0, maxSqlLen); - remainderBufLen = maxSqlLen; + int remainderBufLen = maxSqlLen; char *pstr = buffer; + + int len = snprintf(pstr, nInsertBufLen + 1, "%s", strInsertInto); + pstr += len; + remainderBufLen -= len; + int recOfBatch = 0; for (int i = 0; i < batchPerTblTimes; i ++) { @@ -4704,58 +4893,32 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { return NULL; } - int headLen; - if (i == 0) { - headLen = generateSQLHead(tableName, tableSeq, pThreadInfo, - superTblInfo, pstr); - } else { - headLen = snprintf(pstr, TSDB_TABLE_NAME_LEN, "%s.%s values", - pThreadInfo->db_name, - tableName); - } - - // generate data buffer - verbosePrint("[%d] %s() LN%d i=%d buffer:\n%s\n", - pThreadInfo->threadID, __func__, __LINE__, i, buffer); - - pstr += headLen; - remainderBufLen -= headLen; - - int dataLen = 0; - - verbosePrint("[%d] %s() LN%d i=%d batchPerTblTimes=%d batchPerTbl = %d\n", - pThreadInfo->threadID, __func__, __LINE__, - i, batchPerTblTimes, batchPerTbl); - - if (superTblInfo) { - if (0 == strncasecmp(superTblInfo->startTimestamp, "now", 3)) { - startTime = taosGetTimestamp(pThreadInfo->time_precision); - } - } else { - startTime = 1500000000000; - } - int generated = generateDataTail( - tableName, tableSeq, pThreadInfo, superTblInfo, - batchPerTbl, pstr, remainderBufLen, insertRows, 0, + int oldRemainderLen = remainderBufLen; + int generated = generateInterlaceDataBuffer( + tableName, batchPerTbl, i, batchPerTblTimes, + tableSeq, + pThreadInfo, pstr, + insertRows, startTime, - &(pThreadInfo->samplePos), &dataLen); + &remainderBufLen); if (generated < 0) { debugPrint("[%d] %s() LN%d, generated data is %d\n", pThreadInfo->threadID, __func__, __LINE__, generated); goto free_and_statistics_interlace; + } else if (generated == 0) { + break; } - pstr += dataLen; - remainderBufLen -= dataLen; + tableSeq ++; recOfBatch += batchPerTbl; + pstr += (oldRemainderLen - remainderBufLen); // startTime += batchPerTbl * superTblInfo->timeStampStep; pThreadInfo->totalInsertRows += batchPerTbl; verbosePrint("[%d] %s() LN%d batchPerTbl=%d recOfBatch=%d\n", pThreadInfo->threadID, __func__, __LINE__, batchPerTbl, recOfBatch); - tableSeq ++; if (insertMode == INTERLACE_INSERT_MODE) { if (tableSeq == pThreadInfo->start_table_from + pThreadInfo->ntables) { // turn to first table @@ -4861,11 +5024,12 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { debugPrint("%s() LN%d: ### progressive write\n", __func__, __LINE__); SSuperTable* superTblInfo = pThreadInfo->superTblInfo; + int maxSqlLen = superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len; - char* buffer = calloc(superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len, 1); + char* buffer = calloc(maxSqlLen, 1); if (NULL == buffer) { errorPrint( "Failed to alloc %d Bytes, reason:%s\n", - superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len, + maxSqlLen, strerror(errno)); return NULL; } @@ -4908,10 +5072,20 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { __func__, __LINE__, pThreadInfo->threadID, tableSeq, tableName); + int remainderBufLen = maxSqlLen; + char *pstr = buffer; + int nInsertBufLen = strlen("insert into "); + + int len = snprintf(pstr, nInsertBufLen + 1, "%s", "insert into "); + + pstr += len; + remainderBufLen -= len; + int generated = generateProgressiveDataBuffer( - tableName, tableSeq, pThreadInfo, buffer, insertRows, + tableName, tableSeq, pThreadInfo, pstr, insertRows, i, start_time, - &(pThreadInfo->samplePos)); + &(pThreadInfo->samplePos), + &remainderBufLen); if (generated > 0) i += generated; else @@ -4964,11 +5138,13 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { */ } // num_of_DPT - if ((tableSeq == pThreadInfo->ntables - 1) && superTblInfo && + if (g_args.verbose_print) { + if ((tableSeq == pThreadInfo->ntables - 1) && superTblInfo && (0 == strncasecmp( superTblInfo->dataSource, "sample", strlen("sample")))) { - printf("%s() LN%d samplePos=%d\n", + verbosePrint("%s() LN%d samplePos=%d\n", __func__, __LINE__, pThreadInfo->samplePos); + } } } // tableSeq @@ -4984,45 +5160,45 @@ free_and_statistics_2: static void* syncWrite(void *sarg) { - threadInfo *winfo = (threadInfo *)sarg; - SSuperTable* superTblInfo = winfo->superTblInfo; + threadInfo *pThreadInfo = (threadInfo *)sarg; + SSuperTable* superTblInfo = pThreadInfo->superTblInfo; int interlaceRows = superTblInfo?superTblInfo->interlaceRows:g_args.interlace_rows; if (interlaceRows > 0) { // interlace mode - return syncWriteInterlace(winfo); + return syncWriteInterlace(pThreadInfo); } else { // progressive mode - return syncWriteProgressive(winfo); + return syncWriteProgressive(pThreadInfo); } } static void callBack(void *param, TAOS_RES *res, int code) { - threadInfo* winfo = (threadInfo*)param; - SSuperTable* superTblInfo = winfo->superTblInfo; + threadInfo* pThreadInfo = (threadInfo*)param; + SSuperTable* superTblInfo = pThreadInfo->superTblInfo; int insert_interval = superTblInfo?superTblInfo->insertInterval:g_args.insert_interval; if (insert_interval) { - winfo->et = taosGetTimestampUs(); - if (((winfo->et - winfo->st)/1000) < insert_interval) { - taosMsleep(insert_interval - (winfo->et - winfo->st)/1000); // ms + pThreadInfo->et = taosGetTimestampUs(); + if (((pThreadInfo->et - pThreadInfo->st)/1000) < insert_interval) { + taosMsleep(insert_interval - (pThreadInfo->et - pThreadInfo->st)/1000); // ms } } - char *buffer = calloc(1, winfo->superTblInfo->maxSqlLen); + char *buffer = calloc(1, pThreadInfo->superTblInfo->maxSqlLen); char data[MAX_DATA_SIZE]; char *pstr = buffer; - pstr += sprintf(pstr, "insert into %s.%s%d values", winfo->db_name, winfo->tb_prefix, - winfo->start_table_from); -// if (winfo->counter >= winfo->superTblInfo->insertRows) { - if (winfo->counter >= g_args.num_of_RPR) { - winfo->start_table_from++; - winfo->counter = 0; + pstr += sprintf(pstr, "insert into %s.%s%d values", pThreadInfo->db_name, pThreadInfo->tb_prefix, + pThreadInfo->start_table_from); +// if (pThreadInfo->counter >= pThreadInfo->superTblInfo->insertRows) { + if (pThreadInfo->counter >= g_args.num_of_RPR) { + pThreadInfo->start_table_from++; + pThreadInfo->counter = 0; } - if (winfo->start_table_from > winfo->end_table_to) { - tsem_post(&winfo->lock_sem); + if (pThreadInfo->start_table_from > pThreadInfo->end_table_to) { + tsem_post(&pThreadInfo->lock_sem); free(buffer); taos_free_result(res); return; @@ -5030,46 +5206,46 @@ static void callBack(void *param, TAOS_RES *res, int code) { for (int i = 0; i < g_args.num_of_RPR; i++) { int rand_num = taosRandom() % 100; - if (0 != winfo->superTblInfo->disorderRatio - && rand_num < winfo->superTblInfo->disorderRatio) { - int64_t d = winfo->lastTs - taosRandom() % winfo->superTblInfo->disorderRange; - generateRowData(data, d, winfo->superTblInfo); + if (0 != pThreadInfo->superTblInfo->disorderRatio + && rand_num < pThreadInfo->superTblInfo->disorderRatio) { + int64_t d = pThreadInfo->lastTs - (taosRandom() % pThreadInfo->superTblInfo->disorderRange + 1); + generateRowData(data, d, pThreadInfo->superTblInfo); } else { - generateRowData(data, winfo->lastTs += 1000, winfo->superTblInfo); + generateRowData(data, pThreadInfo->lastTs += 1000, pThreadInfo->superTblInfo); } pstr += sprintf(pstr, "%s", data); - winfo->counter++; + pThreadInfo->counter++; - if (winfo->counter >= winfo->superTblInfo->insertRows) { + if (pThreadInfo->counter >= pThreadInfo->superTblInfo->insertRows) { break; } } if (insert_interval) { - winfo->st = taosGetTimestampUs(); + pThreadInfo->st = taosGetTimestampUs(); } - taos_query_a(winfo->taos, buffer, callBack, winfo); + taos_query_a(pThreadInfo->taos, buffer, callBack, pThreadInfo); free(buffer); taos_free_result(res); } static void *asyncWrite(void *sarg) { - threadInfo *winfo = (threadInfo *)sarg; - SSuperTable* superTblInfo = winfo->superTblInfo; + threadInfo *pThreadInfo = (threadInfo *)sarg; + SSuperTable* superTblInfo = pThreadInfo->superTblInfo; - winfo->st = 0; - winfo->et = 0; - winfo->lastTs = winfo->start_time; + pThreadInfo->st = 0; + pThreadInfo->et = 0; + pThreadInfo->lastTs = pThreadInfo->start_time; int insert_interval = superTblInfo?superTblInfo->insertInterval:g_args.insert_interval; if (insert_interval) { - winfo->st = taosGetTimestampUs(); + pThreadInfo->st = taosGetTimestampUs(); } - taos_query_a(winfo->taos, "show databases", callBack, winfo); + taos_query_a(pThreadInfo->taos, "show databases", callBack, pThreadInfo); - tsem_wait(&(winfo->lock_sem)); + tsem_wait(&(pThreadInfo->lock_sem)); return NULL; } @@ -5162,13 +5338,15 @@ static void startMultiThreadInsertData(int threads, char* db_name, int limit, offset; if ((superTblInfo->childTblExists == TBL_NO_EXISTS) && - ((superTblInfo->childTblOffset != 0) || (superTblInfo->childTblLimit != 0))) { + ((superTblInfo->childTblOffset != 0) || (superTblInfo->childTblLimit >= 0))) { printf("WARNING: offset and limit will not be used since the child tables are not exists!\n"); } if ((superTblInfo->childTblExists == TBL_ALREADY_EXISTS) && (superTblInfo->childTblOffset >= 0)) { - if (superTblInfo->childTblLimit < 0) { + if ((superTblInfo->childTblLimit < 0) + || ((superTblInfo->childTblOffset + superTblInfo->childTblLimit) + > (superTblInfo->childTblCount))) { superTblInfo->childTblLimit = superTblInfo->childTblCount - superTblInfo->childTblOffset; } @@ -5410,7 +5588,7 @@ static void *readTable(void *sarg) { return NULL; } - while (taos_fetch_row(pSql) != NULL) { + while(taos_fetch_row(pSql) != NULL) { count++; } @@ -5486,7 +5664,7 @@ static void *readMetric(void *sarg) { return NULL; } int count = 0; - while (taos_fetch_row(pSql) != NULL) { + while(taos_fetch_row(pSql) != NULL) { count++; } t = getCurrentTimeUs() - t; @@ -5597,10 +5775,10 @@ static int insertTestProcess() { return 0; } -static void *superQueryProcess(void *sarg) { - threadInfo *winfo = (threadInfo *)sarg; +static void *specifiedTableQuery(void *sarg) { + threadInfo *pThreadInfo = (threadInfo *)sarg; - if (winfo->taos == NULL) { + if (pThreadInfo->taos == NULL) { TAOS * taos = NULL; taos = taos_connect(g_queryInfo.host, g_queryInfo.user, @@ -5609,17 +5787,17 @@ static void *superQueryProcess(void *sarg) { g_queryInfo.port); if (taos == NULL) { errorPrint("[%d] Failed to connect to TDengine, reason:%s\n", - winfo->threadID, taos_errstr(NULL)); + pThreadInfo->threadID, taos_errstr(NULL)); return NULL; } else { - winfo->taos = taos; + pThreadInfo->taos = taos; } } char sqlStr[MAX_DB_NAME_SIZE + 5]; sprintf(sqlStr, "use %s", g_queryInfo.dbName); - if (0 != queryDbExec(winfo->taos, sqlStr, NO_INSERT_TYPE, false)) { - taos_close(winfo->taos); + if (0 != queryDbExec(pThreadInfo->taos, sqlStr, NO_INSERT_TYPE, false)) { + taos_close(pThreadInfo->taos); errorPrint( "use database %s failed!\n\n", g_queryInfo.dbName); return NULL; @@ -5630,48 +5808,67 @@ static void *superQueryProcess(void *sarg) { int queryTimes = g_queryInfo.specifiedQueryInfo.queryTimes; + int totalQueried = 0; + int64_t lastPrintTime = taosGetTimestampMs(); + int64_t startTs = taosGetTimestampMs(); + while(queryTimes --) { if (g_queryInfo.specifiedQueryInfo.rate && (et - st) < (int64_t)g_queryInfo.specifiedQueryInfo.rate*1000) { taosMsleep(g_queryInfo.specifiedQueryInfo.rate*1000 - (et - st)); // ms - //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, winfo->start_table_from, winfo->end_table_to); + //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, pThreadInfo->start_table_from, pThreadInfo->end_table_to); } st = taosGetTimestampUs(); - for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) { - if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", 5)) { - int64_t t1 = taosGetTimestampUs(); - char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; - if (g_queryInfo.specifiedQueryInfo.result[i][0] != 0) { - sprintf(tmpFile, "%s-%d", - g_queryInfo.specifiedQueryInfo.result[i], winfo->threadID); - } - selectAndGetResult(winfo->taos, g_queryInfo.specifiedQueryInfo.sql[i], tmpFile); - int64_t t2 = taosGetTimestampUs(); - printf("=[taosc] thread[%"PRId64"] complete one sql, Spent %f s\n", - taosGetSelfPthreadId(), (t2 - t1)/1000000.0); - } else { - int64_t t1 = taosGetTimestampUs(); - int retCode = postProceSql(g_queryInfo.host, - g_queryInfo.port, g_queryInfo.specifiedQueryInfo.sql[i]); - int64_t t2 = taosGetTimestampUs(); - printf("=[restful] thread[%"PRId64"] complete one sql, Spent %f s\n", - taosGetSelfPthreadId(), (t2 - t1)/1000000.0); - if (0 != retCode) { - printf("====restful return fail, threadID[%d]\n", winfo->threadID); - return NULL; - } + if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", 5)) { + int64_t t1 = taosGetTimestampUs(); + char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; + if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq][0] != 0) { + sprintf(tmpFile, "%s-%d", + g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq], + pThreadInfo->threadID); } + selectAndGetResult(pThreadInfo->taos, + g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq], tmpFile); + int64_t t2 = taosGetTimestampUs(); + printf("=[taosc] thread[%"PRId64"] complete one sql, Spent %f s\n", + taosGetSelfPthreadId(), (t2 - t1)/1000000.0); + } else { + int64_t t1 = taosGetTimestampUs(); + int retCode = postProceSql(g_queryInfo.host, + g_queryInfo.port, + g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq]); + if (0 != retCode) { + printf("====restful return fail, threadID[%d]\n", pThreadInfo->threadID); + return NULL; + } + int64_t t2 = taosGetTimestampUs(); + printf("=[restful] thread[%"PRId64"] complete one sql, Spent %f s\n", + taosGetSelfPthreadId(), (t2 - t1)/1000000.0); + } + totalQueried ++; + g_queryInfo.specifiedQueryInfo.totalQueried ++; + et = taosGetTimestampUs(); printf("==thread[%"PRId64"] complete all sqls to specify tables once queries duration:%.6fs\n\n", taosGetSelfPthreadId(), (double)(et - st)/1000.0); + + int64_t currentPrintTime = taosGetTimestampMs(); + int64_t endTs = taosGetTimestampMs(); + if (currentPrintTime - lastPrintTime > 30*1000) { + printf("thread[%d] has currently completed queries: %d, QPS: %10.2f\n", + pThreadInfo->threadID, + totalQueried, + totalQueried/((endTs-startTs)/1000.0)); + } + lastPrintTime = currentPrintTime; } return NULL; } -static void replaceSubTblName(char* inSql, char* outSql, int tblIndex) { +static void replaceChildTblName(char* inSql, char* outSql, int tblIndex) { char sourceString[32] = "xxxx"; char subTblName[MAX_TB_NAME_SIZE*3]; sprintf(subTblName, "%s.%s", @@ -5693,11 +5890,11 @@ static void replaceSubTblName(char* inSql, char* outSql, int tblIndex) { //printf("3: %s\n", outSql); } -static void *subQueryProcess(void *sarg) { +static void *superTableQuery(void *sarg) { char sqlstr[1024]; - threadInfo *winfo = (threadInfo *)sarg; + threadInfo *pThreadInfo = (threadInfo *)sarg; - if (winfo->taos == NULL) { + if (pThreadInfo->taos == NULL) { TAOS * taos = NULL; taos = taos_connect(g_queryInfo.host, g_queryInfo.user, @@ -5706,10 +5903,10 @@ static void *subQueryProcess(void *sarg) { g_queryInfo.port); if (taos == NULL) { errorPrint("[%d] Failed to connect to TDengine, reason:%s\n", - winfo->threadID, taos_errstr(NULL)); + pThreadInfo->threadID, taos_errstr(NULL)); return NULL; } else { - winfo->taos = taos; + pThreadInfo->taos = taos; } } @@ -5717,33 +5914,49 @@ static void *subQueryProcess(void *sarg) { int64_t et = (int64_t)g_queryInfo.superQueryInfo.rate*1000; int queryTimes = g_queryInfo.superQueryInfo.queryTimes; + int totalQueried = 0; + int64_t startTs = taosGetTimestampMs(); + int64_t lastPrintTime = taosGetTimestampMs(); while(queryTimes --) { if (g_queryInfo.superQueryInfo.rate && (et - st) < (int64_t)g_queryInfo.superQueryInfo.rate*1000) { taosMsleep(g_queryInfo.superQueryInfo.rate*1000 - (et - st)); // ms - //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, winfo->start_table_from, winfo->end_table_to); + //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, pThreadInfo->start_table_from, pThreadInfo->end_table_to); } st = taosGetTimestampUs(); - for (int i = winfo->start_table_from; i <= winfo->end_table_to; i++) { + for (int i = pThreadInfo->start_table_from; i <= pThreadInfo->end_table_to; i++) { for (int j = 0; j < g_queryInfo.superQueryInfo.sqlCount; j++) { memset(sqlstr,0,sizeof(sqlstr)); - replaceSubTblName(g_queryInfo.superQueryInfo.sql[j], sqlstr, i); + replaceChildTblName(g_queryInfo.superQueryInfo.sql[j], sqlstr, i); char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; if (g_queryInfo.superQueryInfo.result[j][0] != 0) { sprintf(tmpFile, "%s-%d", g_queryInfo.superQueryInfo.result[j], - winfo->threadID); + pThreadInfo->threadID); } - selectAndGetResult(winfo->taos, sqlstr, tmpFile); + selectAndGetResult(pThreadInfo->taos, sqlstr, tmpFile); + + totalQueried++; + g_queryInfo.superQueryInfo.totalQueried ++; + + int64_t currentPrintTime = taosGetTimestampMs(); + int64_t endTs = taosGetTimestampMs(); + if (currentPrintTime - lastPrintTime > 30*1000) { + printf("thread[%d] has currently completed queries: %d, QPS: %10.2f\n", + pThreadInfo->threadID, + totalQueried, + totalQueried/((endTs-startTs)/1000.0)); + } + lastPrintTime = currentPrintTime; } } et = taosGetTimestampUs(); printf("####thread[%"PRId64"] complete all sqls to allocate all sub-tables[%d - %d] once queries duration:%.4fs\n\n", taosGetSelfPthreadId(), - winfo->start_table_from, - winfo->end_table_to, + pThreadInfo->start_table_from, + pThreadInfo->end_table_to, (double)(et - st)/1000000.0); } @@ -5786,43 +5999,47 @@ static int queryTestProcess() { pthread_t *pids = NULL; threadInfo *infos = NULL; //==== create sub threads for query from specify table - if (g_queryInfo.specifiedQueryInfo.sqlCount > 0 - && g_queryInfo.specifiedQueryInfo.concurrent > 0) { + int nConcurrent = g_queryInfo.specifiedQueryInfo.concurrent; + int nSqlCount = g_queryInfo.specifiedQueryInfo.sqlCount; - pids = malloc(g_queryInfo.specifiedQueryInfo.concurrent * sizeof(pthread_t)); - if (NULL == pids) { - taos_close(taos); - ERROR_EXIT("memory allocation failed\n"); - } - infos = malloc(g_queryInfo.specifiedQueryInfo.concurrent * sizeof(threadInfo)); - if (NULL == infos) { + int64_t startTs = taosGetTimestampMs(); + + if ((nSqlCount > 0) && (nConcurrent > 0)) { + + pids = malloc(nConcurrent * nSqlCount * sizeof(pthread_t)); + infos = malloc(nConcurrent * nSqlCount * sizeof(threadInfo)); + + if ((NULL == pids) || (NULL == infos)) { taos_close(taos); - free(pids); ERROR_EXIT("memory allocation failed for create threads\n"); } - for (int i = 0; i < g_queryInfo.specifiedQueryInfo.concurrent; i++) { - threadInfo *t_info = infos + i; - t_info->threadID = i; + for (int i = 0; i < nConcurrent; i++) { + for (int j = 0; j < nSqlCount; j++) { + threadInfo *t_info = infos + i * nSqlCount + j; + t_info->threadID = i * nSqlCount + j; + t_info->querySeq = j; - if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", 5)) { + if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", 5)) { - char sqlStr[MAX_TB_NAME_SIZE*2]; - sprintf(sqlStr, "use %s", g_queryInfo.dbName); - verbosePrint("%s() %d sqlStr: %s\n", __func__, __LINE__, sqlStr); - if (0 != queryDbExec(taos, sqlStr, NO_INSERT_TYPE, false)) { + char sqlStr[MAX_TB_NAME_SIZE*2]; + sprintf(sqlStr, "use %s", g_queryInfo.dbName); + verbosePrint("%s() %d sqlStr: %s\n", __func__, __LINE__, sqlStr); + if (0 != queryDbExec(taos, sqlStr, NO_INSERT_TYPE, false)) { taos_close(taos); free(infos); free(pids); errorPrint( "use database %s failed!\n\n", g_queryInfo.dbName); return -1; + } } + + t_info->taos = NULL;// TODO: workaround to use separate taos connection; + + pthread_create(pids + i * nSqlCount + j, NULL, specifiedTableQuery, + t_info); } - - t_info->taos = NULL;// TODO: workaround to use separate taos connection; - - pthread_create(pids + i, NULL, superQueryProcess, t_info); } } else { g_queryInfo.specifiedQueryInfo.concurrent = 0; @@ -5836,18 +6053,12 @@ static int queryTestProcess() { if ((g_queryInfo.superQueryInfo.sqlCount > 0) && (g_queryInfo.superQueryInfo.threadCnt > 0)) { pidsOfSub = malloc(g_queryInfo.superQueryInfo.threadCnt * sizeof(pthread_t)); - if (NULL == pidsOfSub) { - free(infos); - free(pids); - - ERROR_EXIT("memory allocation failed for create threads\n"); - } - infosOfSub = malloc(g_queryInfo.superQueryInfo.threadCnt * sizeof(threadInfo)); - if (NULL == infosOfSub) { - free(pidsOfSub); + + if ((NULL == pidsOfSub) || (NULL == infosOfSub)) { free(infos); free(pids); + ERROR_EXIT("memory allocation failed for create threads\n"); } @@ -5875,7 +6086,7 @@ static int queryTestProcess() { t_info->end_table_to = i < b ? startFrom + a : startFrom + a - 1; startFrom = t_info->end_table_to + 1; t_info->taos = NULL; // TODO: workaround to use separate taos connection; - pthread_create(pidsOfSub + i, NULL, subQueryProcess, t_info); + pthread_create(pidsOfSub + i, NULL, superTableQuery, t_info); } g_queryInfo.superQueryInfo.threadCnt = threads; @@ -5883,8 +6094,12 @@ static int queryTestProcess() { g_queryInfo.superQueryInfo.threadCnt = 0; } - for (int i = 0; i < g_queryInfo.specifiedQueryInfo.concurrent; i++) { - pthread_join(pids[i], NULL); + if ((nSqlCount > 0) && (nConcurrent > 0)) { + for (int i = 0; i < nConcurrent; i++) { + for (int j = 0; j < nSqlCount; j++) { + pthread_join(pids[i * nSqlCount + j], NULL); + } + } } tmfree((char*)pids); @@ -5898,6 +6113,14 @@ static int queryTestProcess() { tmfree((char*)infosOfSub); // taos_close(taos);// TODO: workaround to use separate taos connection; + int64_t endTs = taosGetTimestampMs(); + + int totalQueried = g_queryInfo.specifiedQueryInfo.totalQueried + + g_queryInfo.superQueryInfo.totalQueried; + + printf("==== completed total queries: %d, the QPS of all threads: %10.2f====\n", + totalQueried, + totalQueried/((endTs-startTs)/1000.0)); return 0; } @@ -5915,7 +6138,7 @@ static void subscribe_callback(TAOS_SUB* tsub, TAOS_RES *res, void* param, int c static TAOS_SUB* subscribeImpl(TAOS *taos, char *sql, char* topic, char* resultFileName) { TAOS_SUB* tsub = NULL; - if (g_queryInfo.specifiedQueryInfo.subscribeMode) { + if (g_queryInfo.specifiedQueryInfo.mode) { tsub = taos_subscribe(taos, g_queryInfo.specifiedQueryInfo.subscribeRestart, topic, sql, subscribe_callback, (void*)resultFileName, @@ -5934,12 +6157,12 @@ static TAOS_SUB* subscribeImpl(TAOS *taos, char *sql, char* topic, char* resultF return tsub; } -static void *subSubscribeProcess(void *sarg) { - threadInfo *winfo = (threadInfo *)sarg; +static void *superSubscribe(void *sarg) { + threadInfo *pThreadInfo = (threadInfo *)sarg; char subSqlstr[1024]; TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT] = {0}; - if (winfo->taos == NULL) { + if (pThreadInfo->taos == NULL) { TAOS * taos = NULL; taos = taos_connect(g_queryInfo.host, g_queryInfo.user, @@ -5948,17 +6171,17 @@ static void *subSubscribeProcess(void *sarg) { g_queryInfo.port); if (taos == NULL) { errorPrint("[%d] Failed to connect to TDengine, reason:%s\n", - winfo->threadID, taos_errstr(NULL)); + pThreadInfo->threadID, taos_errstr(NULL)); return NULL; } else { - winfo->taos = taos; + pThreadInfo->taos = taos; } } char sqlStr[MAX_TB_NAME_SIZE*2]; sprintf(sqlStr, "use %s", g_queryInfo.dbName); - if (0 != queryDbExec(winfo->taos, sqlStr, NO_INSERT_TYPE, false)) { - taos_close(winfo->taos); + if (0 != queryDbExec(pThreadInfo->taos, sqlStr, NO_INSERT_TYPE, false)) { + taos_close(pThreadInfo->taos); errorPrint( "use database %s failed!\n\n", g_queryInfo.dbName); return NULL; @@ -5969,7 +6192,7 @@ static void *subSubscribeProcess(void *sarg) { do { //if (g_queryInfo.specifiedQueryInfo.rate && (et - st) < g_queryInfo.specifiedQueryInfo.rate*1000) { // taosMsleep(g_queryInfo.specifiedQueryInfo.rate*1000 - (et - st)); // ms - // //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, winfo->start_table_from, winfo->end_table_to); + // //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, pThreadInfo->start_table_from, pThreadInfo->end_table_to); //} //st = taosGetTimestampMs(); @@ -5977,27 +6200,27 @@ static void *subSubscribeProcess(void *sarg) { for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) { sprintf(topic, "taosdemo-subscribe-%d", i); memset(subSqlstr,0,sizeof(subSqlstr)); - replaceSubTblName(g_queryInfo.superQueryInfo.sql[i], subSqlstr, i); + replaceChildTblName(g_queryInfo.superQueryInfo.sql[i], subSqlstr, i); char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; if (g_queryInfo.superQueryInfo.result[i][0] != 0) { sprintf(tmpFile, "%s-%d", - g_queryInfo.superQueryInfo.result[i], winfo->threadID); + g_queryInfo.superQueryInfo.result[i], pThreadInfo->threadID); } - tsub[i] = subscribeImpl(winfo->taos, subSqlstr, topic, tmpFile); + tsub[i] = subscribeImpl(pThreadInfo->taos, subSqlstr, topic, tmpFile); if (NULL == tsub[i]) { - taos_close(winfo->taos); + taos_close(pThreadInfo->taos); return NULL; } } //et = taosGetTimestampMs(); //printf("========thread[%"PRId64"] complete all sqls to super table once queries duration:%.4fs\n", taosGetSelfPthreadId(), (double)(et - st)/1000.0); - } while (0); + } while(0); // start loop to consume result TAOS_RES* res = NULL; - while (1) { + while(1) { for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) { - if (1 == g_queryInfo.superQueryInfo.subscribeMode) { + if (1 == g_queryInfo.superQueryInfo.mode) { continue; } @@ -6007,7 +6230,7 @@ static void *subSubscribeProcess(void *sarg) { if (g_queryInfo.superQueryInfo.result[i][0] != 0) { sprintf(tmpFile, "%s-%d", g_queryInfo.superQueryInfo.result[i], - winfo->threadID); + pThreadInfo->threadID); } getResult(res, tmpFile); } @@ -6019,15 +6242,15 @@ static void *subSubscribeProcess(void *sarg) { taos_unsubscribe(tsub[i], g_queryInfo.superQueryInfo.subscribeKeepProgress); } - taos_close(winfo->taos); + taos_close(pThreadInfo->taos); return NULL; } -static void *superSubscribeProcess(void *sarg) { - threadInfo *winfo = (threadInfo *)sarg; +static void *specifiedSubscribe(void *sarg) { + threadInfo *pThreadInfo = (threadInfo *)sarg; TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT] = {0}; - if (winfo->taos == NULL) { + if (pThreadInfo->taos == NULL) { TAOS * taos = NULL; taos = taos_connect(g_queryInfo.host, g_queryInfo.user, @@ -6036,18 +6259,18 @@ static void *superSubscribeProcess(void *sarg) { g_queryInfo.port); if (taos == NULL) { errorPrint("[%d] Failed to connect to TDengine, reason:%s\n", - winfo->threadID, taos_errstr(NULL)); + pThreadInfo->threadID, taos_errstr(NULL)); return NULL; } else { - winfo->taos = taos; + pThreadInfo->taos = taos; } } char sqlStr[MAX_TB_NAME_SIZE*2]; sprintf(sqlStr, "use %s", g_queryInfo.dbName); debugPrint("%s() %d sqlStr: %s\n", __func__, __LINE__, sqlStr); - if (0 != queryDbExec(winfo->taos, sqlStr, NO_INSERT_TYPE, false)) { - taos_close(winfo->taos); + if (0 != queryDbExec(pThreadInfo->taos, sqlStr, NO_INSERT_TYPE, false)) { + taos_close(pThreadInfo->taos); return NULL; } @@ -6056,7 +6279,7 @@ static void *superSubscribeProcess(void *sarg) { do { //if (g_queryInfo.specifiedQueryInfo.rate && (et - st) < g_queryInfo.specifiedQueryInfo.rate*1000) { // taosMsleep(g_queryInfo.specifiedQueryInfo.rate*1000 - (et - st)); // ms - // //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, winfo->start_table_from, winfo->end_table_to); + // //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, pThreadInfo->start_table_from, pThreadInfo->end_table_to); //} //st = taosGetTimestampMs(); @@ -6066,23 +6289,24 @@ static void *superSubscribeProcess(void *sarg) { char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; if (g_queryInfo.superQueryInfo.result[i][0] != 0) { sprintf(tmpFile, "%s-%d", - g_queryInfo.specifiedQueryInfo.result[i], winfo->threadID); + g_queryInfo.specifiedQueryInfo.result[i], pThreadInfo->threadID); } - tsub[i] = subscribeImpl(winfo->taos, g_queryInfo.specifiedQueryInfo.sql[i], topic, tmpFile); + tsub[i] = subscribeImpl(pThreadInfo->taos, + g_queryInfo.specifiedQueryInfo.sql[i], topic, tmpFile); if (NULL == g_queryInfo.specifiedQueryInfo.tsub[i]) { - taos_close(winfo->taos); + taos_close(pThreadInfo->taos); return NULL; } } //et = taosGetTimestampMs(); //printf("========thread[%"PRId64"] complete all sqls to super table once queries duration:%.4fs\n", taosGetSelfPthreadId(), (double)(et - st)/1000.0); - } while (0); + } while(0); // start loop to consume result TAOS_RES* res = NULL; - while (1) { + while(1) { for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) { - if (1 == g_queryInfo.specifiedQueryInfo.subscribeMode) { + if (SYNC_QUERY_MODE == g_queryInfo.specifiedQueryInfo.mode) { continue; } @@ -6091,7 +6315,7 @@ static void *superSubscribeProcess(void *sarg) { char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; if (g_queryInfo.specifiedQueryInfo.result[i][0] != 0) { sprintf(tmpFile, "%s-%d", - g_queryInfo.specifiedQueryInfo.result[i], winfo->threadID); + g_queryInfo.specifiedQueryInfo.result[i], pThreadInfo->threadID); } getResult(res, tmpFile); } @@ -6100,10 +6324,11 @@ static void *superSubscribeProcess(void *sarg) { taos_free_result(res); for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) { - taos_unsubscribe(tsub[i], g_queryInfo.specifiedQueryInfo.subscribeKeepProgress); + taos_unsubscribe(tsub[i], + g_queryInfo.specifiedQueryInfo.subscribeKeepProgress); } - taos_close(winfo->taos); + taos_close(pThreadInfo->taos); return NULL; } @@ -6161,7 +6386,7 @@ static int subscribeTestProcess() { threadInfo *t_info = infos + i; t_info->threadID = i; t_info->taos = NULL; // TODO: workaround to use separate taos connection; - pthread_create(pids + i, NULL, superSubscribeProcess, t_info); + pthread_create(pids + i, NULL, specifiedSubscribe, t_info); } //==== create sub threads for query from sub table @@ -6204,7 +6429,7 @@ static int subscribeTestProcess() { t_info->end_table_to = i < b ? startFrom + a : startFrom + a - 1; startFrom = t_info->end_table_to + 1; t_info->taos = NULL; // TODO: workaround to use separate taos connection; - pthread_create(pidsOfSub + i, NULL, subSubscribeProcess, t_info); + pthread_create(pidsOfSub + i, NULL, superSubscribe, t_info); } g_queryInfo.superQueryInfo.threadCnt = threads; @@ -6303,7 +6528,7 @@ static void setParaFromArg(){ g_Dbs.db[0].superTbls[0].childTblCount = g_args.num_of_tables; g_Dbs.threadCount = g_args.num_of_threads; g_Dbs.threadCountByCreateTbl = g_args.num_of_threads; - g_Dbs.queryMode = g_args.mode; + g_Dbs.queryMode = g_args.query_mode; g_Dbs.db[0].superTbls[0].autoCreateTable = PRE_CREATE_SUBTBL; g_Dbs.db[0].superTbls[0].childTblExists = TBL_NO_EXISTS; @@ -6405,7 +6630,7 @@ static void querySqlFile(TAOS* taos, char* sqlFile) double t = getCurrentTimeUs(); - while ((read_len = tgetline(&line, &line_len, fp)) != -1) { + while((read_len = tgetline(&line, &line_len, fp)) != -1) { if (read_len >= MAX_SQL_SIZE) continue; line[--read_len] = '\0'; @@ -6468,52 +6693,50 @@ static void testMetaFile() { } static void queryResult() { - // select - if (false == g_Dbs.insert_only) { - // query data + // query data - pthread_t read_id; - threadInfo *rInfo = malloc(sizeof(threadInfo)); - rInfo->start_time = 1500000000000; // 2017-07-14 10:40:00.000 - rInfo->start_table_from = 0; + pthread_t read_id; + threadInfo *rInfo = malloc(sizeof(threadInfo)); + assert(rInfo); + rInfo->start_time = 1500000000000; // 2017-07-14 10:40:00.000 + rInfo->start_table_from = 0; - //rInfo->do_aggreFunc = g_Dbs.do_aggreFunc; - if (g_args.use_metric) { - rInfo->ntables = g_Dbs.db[0].superTbls[0].childTblCount; - rInfo->end_table_to = g_Dbs.db[0].superTbls[0].childTblCount - 1; - rInfo->superTblInfo = &g_Dbs.db[0].superTbls[0]; - tstrncpy(rInfo->tb_prefix, - g_Dbs.db[0].superTbls[0].childTblPrefix, MAX_TB_NAME_SIZE); - } else { - rInfo->ntables = g_args.num_of_tables; - rInfo->end_table_to = g_args.num_of_tables -1; - tstrncpy(rInfo->tb_prefix, g_args.tb_prefix, MAX_TB_NAME_SIZE); - } + //rInfo->do_aggreFunc = g_Dbs.do_aggreFunc; + if (g_args.use_metric) { + rInfo->ntables = g_Dbs.db[0].superTbls[0].childTblCount; + rInfo->end_table_to = g_Dbs.db[0].superTbls[0].childTblCount - 1; + rInfo->superTblInfo = &g_Dbs.db[0].superTbls[0]; + tstrncpy(rInfo->tb_prefix, + g_Dbs.db[0].superTbls[0].childTblPrefix, MAX_TB_NAME_SIZE); + } else { + rInfo->ntables = g_args.num_of_tables; + rInfo->end_table_to = g_args.num_of_tables -1; + tstrncpy(rInfo->tb_prefix, g_args.tb_prefix, MAX_TB_NAME_SIZE); + } - rInfo->taos = taos_connect( - g_Dbs.host, - g_Dbs.user, - g_Dbs.password, - g_Dbs.db[0].dbName, - g_Dbs.port); - if (rInfo->taos == NULL) { - errorPrint( "Failed to connect to TDengine, reason:%s\n", - taos_errstr(NULL)); - free(rInfo); - exit(-1); - } + rInfo->taos = taos_connect( + g_Dbs.host, + g_Dbs.user, + g_Dbs.password, + g_Dbs.db[0].dbName, + g_Dbs.port); + if (rInfo->taos == NULL) { + errorPrint( "Failed to connect to TDengine, reason:%s\n", + taos_errstr(NULL)); + free(rInfo); + exit(-1); + } - tstrncpy(rInfo->fp, g_Dbs.resultFile, MAX_FILE_NAME_LEN); + tstrncpy(rInfo->fp, g_Dbs.resultFile, MAX_FILE_NAME_LEN); - if (!g_Dbs.use_metric) { - pthread_create(&read_id, NULL, readTable, rInfo); - } else { - pthread_create(&read_id, NULL, readMetric, rInfo); - } - pthread_join(read_id, NULL); - taos_close(rInfo->taos); - free(rInfo); - } + if (!g_Dbs.use_metric) { + pthread_create(&read_id, NULL, readTable, rInfo); + } else { + pthread_create(&read_id, NULL, readMetric, rInfo); + } + pthread_join(read_id, NULL); + taos_close(rInfo->taos); + free(rInfo); } static void testCmdLine() { @@ -6531,9 +6754,7 @@ static void testCmdLine() { g_args.test_mode = INSERT_TEST; insertTestProcess(); - if (g_Dbs.insert_only) - return; - else + if (false == g_Dbs.insert_only) queryResult(); } diff --git a/src/kit/taosdump/taosdump.c b/src/kit/taosdump/taosdump.c index 9f176904fe..092374cef1 100644 --- a/src/kit/taosdump/taosdump.c +++ b/src/kit/taosdump/taosdump.c @@ -484,24 +484,33 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { for (int i = 1; i < argc; i++) { if (strcmp(argv[i], "-E") == 0) { if (argv[i+1]) { - char *tmp = argv[++i]; - int64_t tmpEpoch; - if (strchr(tmp, ':') && strchr(tmp, '-')) { - if (TSDB_CODE_SUCCESS != taosParseTime( - tmp, &tmpEpoch, strlen(tmp), TSDB_TIME_PRECISION_MILLI, 0)) { - fprintf(stderr, "Input end time error!\n"); - return; - } - } else { - tmpEpoch = atoll(tmp); - } + char *tmp = strdup(argv[++i]); - sprintf(argv[i], "%"PRId64"", tmpEpoch); - debugPrint("%s() LN%d, tmp is: %s, argv[%d]: %s\n", - __func__, __LINE__, tmp, i, argv[i]); + if (tmp) { + int64_t tmpEpoch; + if (strchr(tmp, ':') && strchr(tmp, '-')) { + if (TSDB_CODE_SUCCESS != taosParseTime( + tmp, &tmpEpoch, strlen(tmp), TSDB_TIME_PRECISION_MILLI, 0)) { + fprintf(stderr, "Input end time error!\n"); + free(tmp); + return; + } + } else { + tmpEpoch = atoll(tmp); + } + + sprintf(argv[i], "%"PRId64"", tmpEpoch); + debugPrint("%s() LN%d, tmp is: %s, argv[%d]: %s\n", + __func__, __LINE__, tmp, i, argv[i]); + + free(tmp); + } else { + errorPrint("%s() LN%d, strdup() cannot allocate memory\n", __func__, __LINE__); + exit(-1); + } } else { - fprintf(stderr, "Input end time error!\n"); - return; + errorPrint("%s() LN%d, -E need a valid value following!\n", __func__, __LINE__); + exit(-1); } } else if (strcmp(argv[i], "-g") == 0) { arguments->debug_print = true; diff --git a/src/mnode/src/mnodeDnode.c b/src/mnode/src/mnodeDnode.c index 80473ba5ae..85d9f94b88 100644 --- a/src/mnode/src/mnodeDnode.c +++ b/src/mnode/src/mnodeDnode.c @@ -437,14 +437,14 @@ static int32_t mnodeCheckClusterCfgPara(const SClusterCfg *clusterCfg) { return TAOS_DN_OFF_TIME_ZONE_NOT_MATCH; } - if (0 != strncasecmp(clusterCfg->locale, tsLocale, strlen(tsLocale))) { - mError("\"locale\"[%s - %s] cfg parameters inconsistent", clusterCfg->locale, tsLocale); - return TAOS_DN_OFF_LOCALE_NOT_MATCH; - } - if (0 != strncasecmp(clusterCfg->charset, tsCharset, strlen(tsCharset))) { - mError("\"charset\"[%s - %s] cfg parameters inconsistent.", clusterCfg->charset, tsCharset); - return TAOS_DN_OFF_CHARSET_NOT_MATCH; - } + // if (0 != strncasecmp(clusterCfg->locale, tsLocale, strlen(tsLocale))) { + // mError("\"locale\"[%s - %s] cfg parameters inconsistent", clusterCfg->locale, tsLocale); + // return TAOS_DN_OFF_LOCALE_NOT_MATCH; + // } + // if (0 != strncasecmp(clusterCfg->charset, tsCharset, strlen(tsCharset))) { + // mError("\"charset\"[%s - %s] cfg parameters inconsistent.", clusterCfg->charset, tsCharset); + // return TAOS_DN_OFF_CHARSET_NOT_MATCH; + // } if (clusterCfg->enableBalance != tsEnableBalance) { mError("\"balance\"[%d - %d] cfg parameters inconsistent", clusterCfg->enableBalance, tsEnableBalance); diff --git a/src/mnode/src/mnodeSdb.c b/src/mnode/src/mnodeSdb.c index 381cb11952..505d3c519c 100644 --- a/src/mnode/src/mnodeSdb.c +++ b/src/mnode/src/mnodeSdb.c @@ -315,6 +315,10 @@ void sdbUpdateAsync() { taosTmrReset(sdbUpdateSyncTmrFp, 200, NULL, tsMnodeTmr, &tsSdbTmr); } +static int node_cmp(const void *l, const void *r) { + return ((SNodeInfo *)l)->nodeId - ((SNodeInfo *)r)->nodeId; +} + int32_t sdbUpdateSync(void *pMnodes) { SMInfos *pMinfos = pMnodes; if (!mnodeIsRunning()) { @@ -382,6 +386,8 @@ int32_t sdbUpdateSync(void *pMnodes) { return TSDB_CODE_SUCCESS; } + qsort(syncCfg.nodeInfo, syncCfg.replica, sizeof(syncCfg.nodeInfo[0]), node_cmp); + sdbInfo("vgId:1, work as mnode, replica:%d", syncCfg.replica); for (int32_t i = 0; i < syncCfg.replica; ++i) { sdbInfo("vgId:1, mnode:%d, %s:%d", syncCfg.nodeInfo[i].nodeId, syncCfg.nodeInfo[i].nodeFqdn, @@ -1019,7 +1025,7 @@ static int32_t sdbWriteToQueue(SSdbRow *pRow, int32_t qtype) { int32_t queued = atomic_add_fetch_32(&tsSdbMgmt.queuedMsg, 1); if (queued > MAX_QUEUED_MSG_NUM) { - sdbDebug("vgId:1, too many msg:%d in sdb queue, flow control", queued); + sdbInfo("vgId:1, too many msg:%d in sdb queue, flow control", queued); taosMsleep(1); } @@ -1131,4 +1137,4 @@ static void *sdbWorkerFp(void *pWorker) { int32_t sdbGetReplicaNum() { return tsSdbMgmt.cfg.replica; -} \ No newline at end of file +} diff --git a/src/mnode/src/mnodeTable.c b/src/mnode/src/mnodeTable.c index 39eca8819d..2a8e941fcb 100644 --- a/src/mnode/src/mnodeTable.c +++ b/src/mnode/src/mnodeTable.c @@ -1037,6 +1037,19 @@ static int32_t mnodeProcessCreateSuperTableMsg(SMnodeMsg *pMsg) { SCreateTableMsg* pCreate = (SCreateTableMsg*)((char*)pCreate1 + sizeof(SCMCreateTableMsg)); + int16_t numOfTags = htons(pCreate->numOfTags); + if (numOfTags > TSDB_MAX_TAGS) { + mError("msg:%p, app:%p table:%s, failed to create, too many tags", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableName); + return TSDB_CODE_MND_TOO_MANY_TAGS; + } + + int16_t numOfColumns = htons(pCreate->numOfColumns); + int32_t numOfCols = numOfColumns + numOfTags; + if (numOfCols > TSDB_MAX_COLUMNS) { + mError("msg:%p, app:%p table:%s, failed to create, too many columns", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableName); + return TSDB_CODE_MND_TOO_MANY_COLUMNS; + } + SSTableObj * pStable = calloc(1, sizeof(SSTableObj)); if (pStable == NULL) { mError("msg:%p, app:%p table:%s, failed to create, no enough memory", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableName); @@ -1050,10 +1063,9 @@ static int32_t mnodeProcessCreateSuperTableMsg(SMnodeMsg *pMsg) { pStable->uid = (us << 24) + ((sdbGetVersion() & ((1ul << 16) - 1ul)) << 8) + (taosRand() & ((1ul << 8) - 1ul)); pStable->sversion = 0; pStable->tversion = 0; - pStable->numOfColumns = htons(pCreate->numOfColumns); - pStable->numOfTags = htons(pCreate->numOfTags); + pStable->numOfColumns = numOfColumns; + pStable->numOfTags = numOfTags; - int32_t numOfCols = pStable->numOfColumns + pStable->numOfTags; int32_t schemaSize = numOfCols * sizeof(SSchema); pStable->schema = (SSchema *)calloc(1, schemaSize); if (pStable->schema == NULL) { @@ -1064,11 +1076,6 @@ static int32_t mnodeProcessCreateSuperTableMsg(SMnodeMsg *pMsg) { memcpy(pStable->schema, pCreate->schema, numOfCols * sizeof(SSchema)); - if (pStable->numOfColumns > TSDB_MAX_COLUMNS || pStable->numOfTags > TSDB_MAX_TAGS) { - mError("msg:%p, app:%p table:%s, failed to create, too many columns", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableName); - return TSDB_CODE_MND_INVALID_TABLE_NAME; - } - pStable->nextColId = 0; for (int32_t col = 0; col < numOfCols; col++) { @@ -1340,6 +1347,11 @@ static int32_t mnodeAddSuperTableColumn(SMnodeMsg *pMsg, SSchema schema[], int32 return TSDB_CODE_MND_APP_ERROR; } + if (pStable->numOfColumns + ncols + pStable->numOfTags > TSDB_MAX_COLUMNS) { + mError("msg:%p, app:%p stable:%s, add column, too many columns", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId); + return TSDB_CODE_MND_TOO_MANY_COLUMNS; + } + for (int32_t i = 0; i < ncols; i++) { if (mnodeFindSuperTableColumnIndex(pStable, schema[i].name) > 0) { mError("msg:%p, app:%p stable:%s, add column, column:%s already exist", pMsg, pMsg->rpcMsg.ahandle, diff --git a/src/mnode/src/mnodeVgroup.c b/src/mnode/src/mnodeVgroup.c index 7eb3122d83..7222c8d1a0 100644 --- a/src/mnode/src/mnodeVgroup.c +++ b/src/mnode/src/mnodeVgroup.c @@ -994,6 +994,7 @@ void mnodeSendSyncVgroupMsg(SVgObj *pVgroup) { mDebug("vgId:%d, send sync all vnodes msg, numOfVnodes:%d db:%s", pVgroup->vgId, pVgroup->numOfVnodes, pVgroup->dbName); for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) { + if (pVgroup->vnodeGid[i].role != TAOS_SYNC_ROLE_SLAVE) continue; SRpcEpSet epSet = mnodeGetEpSetFromIp(pVgroup->vnodeGid[i].pDnode->dnodeEp); mDebug("vgId:%d, index:%d, send sync vnode msg to dnode %s", pVgroup->vgId, i, pVgroup->vnodeGid[i].pDnode->dnodeEp); diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h index a02df1a10e..ef1b7a7602 100644 --- a/src/query/inc/qExecutor.h +++ b/src/query/inc/qExecutor.h @@ -191,6 +191,8 @@ typedef struct SQuery { bool stabledev; // super table stddev query int32_t interBufSize; // intermediate buffer sizse + int32_t havingNum; // having expr number + SOrderVal order; int16_t numOfCols; int16_t numOfTags; @@ -287,6 +289,7 @@ enum OPERATOR_TYPE_E { OP_Fill = 13, OP_MultiTableAggregate = 14, OP_MultiTableTimeInterval = 15, + OP_Having = 16, }; typedef struct SOperatorInfo { @@ -405,6 +408,11 @@ typedef struct SOffsetOperatorInfo { int64_t offset; } SOffsetOperatorInfo; +typedef struct SHavingOperatorInfo { + SArray* fp; +} SHavingOperatorInfo; + + typedef struct SFillOperatorInfo { SFillInfo *pFillInfo; SSDataBlock *pRes; diff --git a/src/query/inc/qSqlparser.h b/src/query/inc/qSqlparser.h index 313e656cbd..a360b0218a 100644 --- a/src/query/inc/qSqlparser.h +++ b/src/query/inc/qSqlparser.h @@ -98,6 +98,7 @@ typedef struct SQuerySqlNode { SLimitVal limit; // limit offset [optional] SLimitVal slimit; // group limit offset [optional] SStrToken sqlstr; // sql string in select clause + struct tSqlExpr *pHaving; // having clause [optional] } SQuerySqlNode; typedef struct STableNamePair { @@ -262,6 +263,11 @@ SArray *tVariantListAppend(SArray *pList, tVariant *pVar, uint8_t sortOrder); SArray *tVariantListInsert(SArray *pList, tVariant *pVar, uint8_t sortOrder, int32_t index); SArray *tVariantListAppendToken(SArray *pList, SStrToken *pAliasToken, uint8_t sortOrder); +tSqlExpr *tSqlExprCreate(tSqlExpr *pLeft, tSqlExpr *pRight, int32_t optrType); + +int32_t tSqlExprCompare(tSqlExpr *left, tSqlExpr *right); + +tSqlExpr *tSqlExprClone(tSqlExpr *pSrc); SFromInfo *setTableNameList(SFromInfo* pFromInfo, SStrToken *pName, SStrToken* pAlias); SFromInfo *setSubquery(SFromInfo* pFromInfo, SQuerySqlNode *pSqlNode); void *destroyFromInfo(SFromInfo* pFromInfo); @@ -281,7 +287,7 @@ void tSqlExprListDestroy(SArray *pList); SQuerySqlNode *tSetQuerySqlNode(SStrToken *pSelectToken, SArray *pSelectList, SFromInfo *pFrom, tSqlExpr *pWhere, SArray *pGroupby, SArray *pSortOrder, SIntervalVal *pInterval, SSessionWindowVal *ps, - SStrToken *pSliding, SArray *pFill, SLimitVal *pLimit, SLimitVal *pgLimit); + SStrToken *pSliding, SArray *pFill, SLimitVal *pLimit, SLimitVal *pgLimit, tSqlExpr *pHaving); SCreateTableSql *tSetCreateTableInfo(SArray *pCols, SArray *pTags, SQuerySqlNode *pSelect, int32_t type); diff --git a/src/query/inc/qUtil.h b/src/query/inc/qUtil.h index 87f2458658..b9d52da39b 100644 --- a/src/query/inc/qUtil.h +++ b/src/query/inc/qUtil.h @@ -52,11 +52,20 @@ static FORCE_INLINE SResultRow *getResultRow(SResultRowInfo *pResultRowInfo, int return pResultRowInfo->pResult[slot]; } -static FORCE_INLINE char *getPosInResultPage(SQuery *pQuery, tFilePage* page, int32_t rowOffset, int16_t offset) { - assert(rowOffset >= 0 && pQuery != NULL); +static FORCE_INLINE char* getPosInResultPage(SQueryRuntimeEnv* pRuntimeEnv, tFilePage* page, int32_t rowOffset, + int16_t offset, int32_t size) { + assert(rowOffset >= 0 && pRuntimeEnv != NULL); + + SQuery* pQuery = pRuntimeEnv->pQuery; + int64_t pageSize = pRuntimeEnv->pResultBuf->pageSize; int32_t numOfRows = (int32_t)GET_ROW_PARAM_FOR_MULTIOUTPUT(pQuery, pQuery->topBotQuery, pQuery->stableQuery); - return ((char *)page->data) + rowOffset + offset * numOfRows; + + // buffer overflow check + int64_t bufEnd = (rowOffset + offset * numOfRows + size); + assert(page->num <= pageSize && bufEnd <= page->num); + + return ((char*)page->data) + rowOffset + offset * numOfRows; } bool isNullOperator(SColumnFilterElem *pFilter, const char* minval, const char* maxval, int16_t type); diff --git a/src/query/inc/sql.y b/src/query/inc/sql.y index d39c938854..b31597bdda 100644 --- a/src/query/inc/sql.y +++ b/src/query/inc/sql.y @@ -457,7 +457,7 @@ tagitem(A) ::= PLUS(X) FLOAT(Y). { %type select {SQuerySqlNode*} %destructor select {destroyQuerySqlNode($$);} select(A) ::= SELECT(T) selcollist(W) from(X) where_opt(Y) interval_opt(K) session_option(H) fill_opt(F) sliding_opt(S) groupby_opt(P) orderby_opt(Z) having_opt(N) slimit_opt(G) limit_opt(L). { - A = tSetQuerySqlNode(&T, W, X, Y, P, Z, &K, &H, &S, F, &L, &G); + A = tSetQuerySqlNode(&T, W, X, Y, P, Z, &K, &H, &S, F, &L, &G, N); } select(A) ::= LP select(B) RP. {A = B;} @@ -475,7 +475,7 @@ cmd ::= union(X). { setSqlInfo(pInfo, X, NULL, TSDB_SQL_SELECT); } // select client_version() // select server_state() select(A) ::= SELECT(T) selcollist(W). { - A = tSetQuerySqlNode(&T, W, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); + A = tSetQuerySqlNode(&T, W, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); } // selcollist is a list of expressions that are to become the return diff --git a/src/query/src/qAggMain.c b/src/query/src/qAggMain.c index e32ef2479f..29f15f7acc 100644 --- a/src/query/src/qAggMain.c +++ b/src/query/src/qAggMain.c @@ -2773,14 +2773,16 @@ static void percentile_function(SQLFunctionCtx *pCtx) { SPercentileInfo *pInfo = GET_ROWCELL_INTERBUF(pResInfo); if (pCtx->currentStage == REPEAT_SCAN && pInfo->stage == 0) { + pInfo->stage += 1; + // all data are null, set it completed if (pInfo->numOfElems == 0) { pResInfo->complete = true; + + return; } else { pInfo->pMemBucket = tMemBucketCreate(pCtx->inputBytes, pCtx->inputType, pInfo->minval, pInfo->maxval); } - - pInfo->stage += 1; } // the first stage, only acquire the min/max value @@ -2859,14 +2861,16 @@ static void percentile_function_f(SQLFunctionCtx *pCtx, int32_t index) { SPercentileInfo *pInfo = (SPercentileInfo *)GET_ROWCELL_INTERBUF(pResInfo); if (pCtx->currentStage == REPEAT_SCAN && pInfo->stage == 0) { + pInfo->stage += 1; + // all data are null, set it completed if (pInfo->numOfElems == 0) { pResInfo->complete = true; + + return; } else { pInfo->pMemBucket = tMemBucketCreate(pCtx->inputBytes, pCtx->inputType, pInfo->minval, pInfo->maxval); } - - pInfo->stage += 1; } if (pInfo->stage == 0) { diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index e11459dede..65b19be0cd 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -98,6 +98,30 @@ int32_t getMaximumIdleDurationSec() { return tsShellActivityTimer * 2; } + +int64_t genQueryId(void) { + int64_t uid = 0; + int64_t did = tsDnodeId; + + uid = did << 54; + + int64_t pid = ((int64_t)taosGetPId()) & 0x3FF; + + uid |= pid << 44; + + int64_t ts = taosGetTimestampMs() & 0x1FFFFFFFF; + + uid |= ts << 11; + + int64_t sid = atomic_add_fetch_64(&queryHandleId, 1) & 0x7FF; + + uid |= sid; + + return uid; +} + + + static void getNextTimeWindow(SQuery* pQuery, STimeWindow* tw) { int32_t factor = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order); if (pQuery->interval.intervalUnit != 'n' && pQuery->interval.intervalUnit != 'y') { @@ -175,6 +199,7 @@ static SOperatorInfo* createMultiTableAggOperatorInfo(SQueryRuntimeEnv* pRuntime static SOperatorInfo* createMultiTableTimeIntervalOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput); static SOperatorInfo* createTagScanOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SExprInfo* pExpr, int32_t numOfOutput); static SOperatorInfo* createTableBlockInfoScanOperator(void* pTsdbQueryHandle, SQueryRuntimeEnv* pRuntimeEnv); +static SOperatorInfo* createHavingOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput); static void destroyBasicOperatorInfo(void* param, int32_t numOfOutput); static void destroySFillOperatorInfo(void* param, int32_t numOfOutput); @@ -1863,6 +1888,10 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf } + if (pQuery->havingNum > 0) { + pRuntimeEnv->proot = createHavingOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQuery->pExpr1, pQuery->numOfOutput); + } + if (pQuery->limit.offset > 0) { pRuntimeEnv->proot = createOffsetOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot); } @@ -1893,6 +1922,17 @@ static void doFreeQueryHandle(SQueryRuntimeEnv* pRuntimeEnv) { assert(pMemRef->ref == 0 && pMemRef->snapshot.imem == NULL && pMemRef->snapshot.mem == NULL); } +static void destroyTsComp(SQueryRuntimeEnv *pRuntimeEnv, SQuery *pQuery) { + if (isTsCompQuery(pQuery)) { + SColumnInfoData* pColInfoData = taosArrayGet(pRuntimeEnv->outputBuf->pDataBlock, 0); + FILE *f = *(FILE **)pColInfoData->pData; // TODO refactor + if (f) { + fclose(f); + *(FILE **)pColInfoData->pData = NULL; + } + } +} + static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) { SQuery *pQuery = pRuntimeEnv->pQuery; SQInfo* pQInfo = (SQInfo*) pRuntimeEnv->qinfo; @@ -1913,6 +1953,8 @@ static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) { destroyResultBuf(pRuntimeEnv->pResultBuf); doFreeQueryHandle(pRuntimeEnv); + destroyTsComp(pRuntimeEnv, pQuery); + pRuntimeEnv->pTsBuf = tsBufDestroy(pRuntimeEnv->pTsBuf); tfree(pRuntimeEnv->keyBuf); @@ -2160,6 +2202,40 @@ static bool onlyFirstQuery(SQuery *pQuery) { return onlyOneQueryType(pQuery, TSD static bool onlyLastQuery(SQuery *pQuery) { return onlyOneQueryType(pQuery, TSDB_FUNC_LAST, TSDB_FUNC_LAST_DST); } +static int32_t updateBlockLoadStatus(SQuery *pQuery, int32_t status) { + bool hasFirstLastFunc = false; + bool hasOtherFunc = false; + + if (status == BLK_DATA_ALL_NEEDED || status == BLK_DATA_DISCARD) { + return status; + } + + for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { + int32_t functionId = pQuery->pExpr1[i].base.functionId; + + if (functionId == TSDB_FUNC_TS || functionId == TSDB_FUNC_TS_DUMMY || functionId == TSDB_FUNC_TAG || + functionId == TSDB_FUNC_TAG_DUMMY) { + continue; + } + + if (functionId == TSDB_FUNC_FIRST_DST || functionId == TSDB_FUNC_LAST_DST) { + hasFirstLastFunc = true; + } else { + hasOtherFunc = true; + } + } + + if (hasFirstLastFunc && status == BLK_DATA_NO_NEEDED) { + if(!hasOtherFunc) { + return BLK_DATA_DISCARD; + } else{ + return BLK_DATA_ALL_NEEDED; + } + } + + return status; +} + static void doExchangeTimeWindow(SQInfo* pQInfo, STimeWindow* win) { SQuery* pQuery = &pQInfo->query; size_t t = taosArrayGetSize(pQuery->tableGroupInfo.pGroupList); @@ -2606,11 +2682,12 @@ int32_t loadDataBlockOnDemand(SQueryRuntimeEnv* pRuntimeEnv, STableScanInfo* pTa pBlock->pDataBlock = NULL; pBlock->pBlockStatis = NULL; + SQInfo* pQInfo = pRuntimeEnv->qinfo; SQuery* pQuery = pRuntimeEnv->pQuery; + int64_t groupId = pQuery->current->groupIndex; bool ascQuery = QUERY_IS_ASC_QUERY(pQuery); - SQInfo* pQInfo = pRuntimeEnv->qinfo; SQueryCostInfo* pCost = &pQInfo->summary; if (pRuntimeEnv->pTsBuf != NULL) { @@ -2667,7 +2744,9 @@ int32_t loadDataBlockOnDemand(SQueryRuntimeEnv* pRuntimeEnv, STableScanInfo* pTa } SDataBlockInfo* pBlockInfo = &pBlock->info; - if ((*status) == BLK_DATA_NO_NEEDED) { + *status = updateBlockLoadStatus(pRuntimeEnv->pQuery, *status); + + if ((*status) == BLK_DATA_NO_NEEDED || (*status) == BLK_DATA_DISCARD) { qDebug("QInfo:%"PRIu64" data block discard, brange:%" PRId64 "-%" PRId64 ", rows:%d", pQInfo->qId, pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows); pCost->discardBlocks += 1; @@ -3334,7 +3413,7 @@ void setResultRowOutputBufInitCtx(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pRe continue; } - pCtx[i].pOutput = getPosInResultPage(pRuntimeEnv->pQuery, bufPage, pResult->offset, offset); + pCtx[i].pOutput = getPosInResultPage(pRuntimeEnv, bufPage, pResult->offset, offset, pCtx[i].outputBytes); offset += pCtx[i].outputBytes; int32_t functionId = pCtx[i].functionId; @@ -3396,7 +3475,7 @@ void setResultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pResult, SQLF int16_t offset = 0; for (int32_t i = 0; i < numOfCols; ++i) { - pCtx[i].pOutput = getPosInResultPage(pRuntimeEnv->pQuery, page, pResult->offset, offset); + pCtx[i].pOutput = getPosInResultPage(pRuntimeEnv, page, pResult->offset, offset, pCtx[i].outputBytes); offset += pCtx[i].outputBytes; int32_t functionId = pCtx[i].functionId; @@ -3604,8 +3683,6 @@ void setIntervalQueryRange(SQueryRuntimeEnv *pRuntimeEnv, TSKEY key) { */ static int32_t doCopyToSDataBlock(SQueryRuntimeEnv* pRuntimeEnv, SGroupResInfo* pGroupResInfo, int32_t orderType, SSDataBlock* pBlock) { - SQuery *pQuery = pRuntimeEnv->pQuery; - int32_t numOfRows = getNumOfTotalRes(pGroupResInfo); int32_t numOfResult = pBlock->info.rows; // there are already exists result rows @@ -3640,7 +3717,7 @@ static int32_t doCopyToSDataBlock(SQueryRuntimeEnv* pRuntimeEnv, SGroupResInfo* int32_t bytes = pColInfoData->info.bytes; char *out = pColInfoData->pData + numOfResult * bytes; - char *in = getPosInResultPage(pQuery, page, pRow->offset, offset); + char *in = getPosInResultPage(pRuntimeEnv, page, pRow->offset, offset, bytes); memcpy(out, in, bytes * numOfRowsToCopy); offset += bytes; @@ -4110,7 +4187,7 @@ static SFillColInfo* createFillColInfo(SExprInfo* pExpr, int32_t numOfOutput, in return pFillCol; } -int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, SArray* prevResult, void *tsdb, int32_t vgId, bool isSTableQuery) { +int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bool isSTableQuery) { SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv; SQuery *pQuery = pQInfo->runtimeEnv.pQuery; @@ -4121,8 +4198,6 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, SArray* prevResult, void *ts pQuery->timeWindowInterpo = timeWindowInterpoRequired(pQuery); pQuery->stabledev = isStabledev(pQuery); - pRuntimeEnv->prevResult = prevResult; - setScanLimitationByResultBuffer(pQuery); int32_t code = setupQueryHandle(tsdb, pQInfo, isSTableQuery); @@ -4764,6 +4839,111 @@ static SSDataBlock* doOffset(void* param) { } } + +bool doFilterData(SColumnInfoData* p, int32_t rid, SColumnFilterElem *filterElem, __filter_func_t fp) { + char* input = p->pData + p->info.bytes * rid; + bool isnull = isNull(input, p->info.type); + if (isnull) { + return (fp == isNullOperator) ? true : false; + } else { + if (fp == notNullOperator) { + return true; + } else if (fp == isNullOperator) { + return false; + } + } + + if (fp(filterElem, input, input, p->info.type)) { + return true; + } + + return false; +} + + +void doHavingImpl(SOperatorInfo *pOperator, SSDataBlock *pBlock) { + SHavingOperatorInfo* pInfo = pOperator->info; + int32_t f = 0; + int32_t allQualified = 1; + int32_t exprQualified = 0; + + for (int32_t r = 0; r < pBlock->info.rows; ++r) { + allQualified = 1; + + for (int32_t i = 0; i < pOperator->numOfOutput; ++i) { + SExprInfo* pExprInfo = &(pOperator->pExpr[i]); + if (pExprInfo->pFilter == NULL) { + continue; + } + + SArray* es = taosArrayGetP(pInfo->fp, i); + assert(es); + + size_t fpNum = taosArrayGetSize(es); + + exprQualified = 0; + for (int32_t m = 0; m < fpNum; ++m) { + __filter_func_t fp = taosArrayGetP(es, m); + + assert(fp); + + //SColIndex* colIdx = &pExprInfo->base.colInfo; + SColumnInfoData* p = taosArrayGet(pBlock->pDataBlock, i); + + SColumnFilterElem filterElem = {.filterInfo = pExprInfo->pFilter[m]}; + + if (doFilterData(p, r, &filterElem, fp)) { + exprQualified = 1; + break; + } + } + + if (exprQualified == 0) { + allQualified = 0; + break; + } + } + + if (allQualified == 0) { + continue; + } + + for (int32_t i = 0; i < pBlock->info.numOfCols; ++i) { + SColumnInfoData *pColInfoData = taosArrayGet(pBlock->pDataBlock, i); + + int16_t bytes = pColInfoData->info.bytes; + memmove(pColInfoData->pData + f * bytes, pColInfoData->pData + bytes * r, bytes); + } + + ++f; + } + + pBlock->info.rows = f; +} + +static SSDataBlock* doHaving(void* param) { + SOperatorInfo *pOperator = (SOperatorInfo *)param; + if (pOperator->status == OP_EXEC_DONE) { + return NULL; + } + + SQueryRuntimeEnv* pRuntimeEnv = pOperator->pRuntimeEnv; + + while (1) { + SSDataBlock *pBlock = pOperator->upstream->exec(pOperator->upstream); + if (pBlock == NULL) { + setQueryStatus(pRuntimeEnv, QUERY_COMPLETED); + pOperator->status = OP_EXEC_DONE; + return NULL; + } + + doHavingImpl(pOperator, pBlock); + + return pBlock; + } +} + + static SSDataBlock* doIntervalAgg(void* param) { SOperatorInfo* pOperator = (SOperatorInfo*) param; if (pOperator->status == OP_EXEC_DONE) { @@ -5114,6 +5294,13 @@ static void destroyTagScanOperatorInfo(void* param, int32_t numOfOutput) { pInfo->pRes = destroyOutputBuf(pInfo->pRes); } +static void destroyHavingOperatorInfo(void* param, int32_t numOfOutput) { + SHavingOperatorInfo* pInfo = (SHavingOperatorInfo*) param; + if (pInfo->fp) { + taosArrayDestroy(pInfo->fp); + } +} + SOperatorInfo* createMultiTableAggOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) { SAggOperatorInfo* pInfo = calloc(1, sizeof(SAggOperatorInfo)); @@ -5170,6 +5357,83 @@ SOperatorInfo* createArithOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorI return pOperator; } + +int32_t initFilterFp(SExprInfo* pExpr, int32_t numOfOutput, SArray** fps) { + __filter_func_t fp = NULL; + + *fps = taosArrayInit(numOfOutput, sizeof(SArray*)); + if (*fps == NULL) { + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } + + for (int32_t i = 0; i < numOfOutput; ++i) { + SExprInfo* pExprInfo = &(pExpr[i]); + SColIndex* colIdx = &pExprInfo->base.colInfo; + + if (pExprInfo->pFilter == NULL || !TSDB_COL_IS_NORMAL_COL(colIdx->flag)) { + taosArrayPush(*fps, &fp); + + continue; + } + + int32_t filterNum = pExprInfo->base.filterNum; + SColumnFilterInfo *filterInfo = pExprInfo->pFilter; + + SArray* es = taosArrayInit(filterNum, sizeof(__filter_func_t)); + + for (int32_t j = 0; j < filterNum; ++j) { + int32_t lower = filterInfo->lowerRelOptr; + int32_t upper = filterInfo->upperRelOptr; + if (lower == TSDB_RELATION_INVALID && upper == TSDB_RELATION_INVALID) { + qError("invalid rel optr"); + taosArrayDestroy(es); + return TSDB_CODE_QRY_APP_ERROR; + } + + __filter_func_t ffp = getFilterOperator(lower, upper); + if (ffp == NULL) { + qError("invalid filter info"); + taosArrayDestroy(es); + return TSDB_CODE_QRY_APP_ERROR; + } + + taosArrayPush(es, &ffp); + + filterInfo += 1; + } + + taosArrayPush(*fps, &es); + } + + return TSDB_CODE_SUCCESS; +} + +SOperatorInfo* createHavingOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) { + SHavingOperatorInfo* pInfo = calloc(1, sizeof(SHavingOperatorInfo)); + + initFilterFp(pExpr, numOfOutput, &pInfo->fp); + + assert(pInfo->fp); + + SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo)); + + pOperator->name = "HavingOperator"; + pOperator->operatorType = OP_Having; + pOperator->blockingOptr = false; + pOperator->status = OP_IN_EXECUTING; + pOperator->numOfOutput = numOfOutput; + pOperator->pExpr = pExpr; + pOperator->upstream = upstream; + pOperator->exec = doHaving; + pOperator->info = pInfo; + pOperator->pRuntimeEnv = pRuntimeEnv; + pOperator->cleanup = destroyHavingOperatorInfo; + + return pOperator; +} + + + SOperatorInfo* createLimitOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream) { SLimitOperatorInfo* pInfo = calloc(1, sizeof(SLimitOperatorInfo)); pInfo->limit = pRuntimeEnv->pQuery->limit.limit; @@ -5744,9 +6008,35 @@ int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SQueryParam* param) { pExprMsg->functionId = htons(pExprMsg->functionId); pExprMsg->numOfParams = htons(pExprMsg->numOfParams); pExprMsg->resColId = htons(pExprMsg->resColId); + pExprMsg->filterNum = htonl(pExprMsg->filterNum); pMsg += sizeof(SSqlFuncMsg); + SColumnFilterInfo* pExprFilterInfo = pExprMsg->filterInfo; + + pMsg += sizeof(SColumnFilterInfo) * pExprMsg->filterNum; + + for (int32_t f = 0; f < pExprMsg->filterNum; ++f) { + SColumnFilterInfo *pFilterMsg = (SColumnFilterInfo *)pExprFilterInfo; + + pFilterMsg->filterstr = htons(pFilterMsg->filterstr); + + if (pFilterMsg->filterstr) { + pFilterMsg->len = htobe64(pFilterMsg->len); + + pFilterMsg->pz = (int64_t)pMsg; + pMsg += (pFilterMsg->len + 1); + } else { + pFilterMsg->lowerBndi = htobe64(pFilterMsg->lowerBndi); + pFilterMsg->upperBndi = htobe64(pFilterMsg->upperBndi); + } + + pFilterMsg->lowerRelOptr = htons(pFilterMsg->lowerRelOptr); + pFilterMsg->upperRelOptr = htons(pFilterMsg->upperRelOptr); + + pExprFilterInfo++; + } + for (int32_t j = 0; j < pExprMsg->numOfParams; ++j) { pExprMsg->arg[j].argType = htons(pExprMsg->arg[j].argType); pExprMsg->arg[j].argBytes = htons(pExprMsg->arg[j].argBytes); @@ -5955,6 +6245,42 @@ _cleanup: return code; } +int32_t cloneExprFilterInfo(SColumnFilterInfo **dst, SColumnFilterInfo* src, int32_t filterNum) { + if (filterNum <= 0) { + return TSDB_CODE_SUCCESS; + } + + *dst = calloc(filterNum, sizeof(*src)); + if (*dst == NULL) { + return TSDB_CODE_QRY_OUT_OF_MEMORY; + } + + memcpy(*dst, src, sizeof(*src) * filterNum); + + for (int32_t i = 0; i < filterNum; i++) { + if ((*dst)[i].filterstr && dst[i]->len > 0) { + void *pz = calloc(1, (size_t)(*dst)[i].len + 1); + + if (pz == NULL) { + if (i == 0) { + free(*dst); + } else { + freeColumnFilterInfo(*dst, i); + } + + return TSDB_CODE_QRY_OUT_OF_MEMORY; + } + + memcpy(pz, (void *)src->pz, (size_t)src->len + 1); + + (*dst)[i].pz = (int64_t)pz; + } + } + + return TSDB_CODE_SUCCESS; +} + + static int32_t buildArithmeticExprFromMsg(SExprInfo *pArithExprInfo, SQueryTableMsg *pQueryMsg) { qDebug("qmsg:%p create arithmetic expr from binary", pQueryMsg); @@ -6187,6 +6513,13 @@ int32_t createQueryFuncExprFromMsg(SQueryTableMsg* pQueryMsg, int32_t numOfOutpu type = s->type; bytes = s->bytes; } + + if (pExprs[i].base.filterNum > 0) { + int32_t ret = cloneExprFilterInfo(&pExprs[i].pFilter, pExprMsg[i]->filterInfo, pExprMsg[i]->filterNum); + if (ret) { + return ret; + } + } } int32_t param = (int32_t)pExprs[i].base.arg[0].argValue.i64; @@ -6432,6 +6765,8 @@ SQInfo* createQInfoImpl(SQueryTableMsg* pQueryMsg, SSqlGroupbyExpr* pGroupbyExpr goto _cleanup_qinfo; } + pQInfo->qId = *qId; + // to make sure third party won't overwrite this structure pQInfo->signature = pQInfo; SQuery* pQuery = &pQInfo->query; @@ -6482,6 +6817,10 @@ SQInfo* createQInfoImpl(SQueryTableMsg* pQueryMsg, SSqlGroupbyExpr* pGroupbyExpr if (TSDB_COL_IS_TAG(pExprs[col].base.colInfo.flag)) { pQuery->tagLen += pExprs[col].bytes; } + + if (pExprs[col].pFilter) { + ++pQuery->havingNum; + } } if (pSecExprs != NULL) { @@ -6579,8 +6918,6 @@ SQInfo* createQInfoImpl(SQueryTableMsg* pQueryMsg, SSqlGroupbyExpr* pGroupbyExpr // todo refactor pQInfo->query.queryBlockDist = (numOfOutput == 1 && pExprs[0].base.colInfo.colId == TSDB_BLOCK_DIST_COLUMN_INDEX); - pQInfo->qId = atomic_add_fetch_64(&queryHandleId, 1); - *qId = pQInfo->qId; qDebug("qmsg:%p QInfo:%" PRIu64 "-%p created", pQueryMsg, pQInfo->qId, pQInfo); return pQInfo; @@ -6599,6 +6936,10 @@ _cleanup_qinfo: tExprTreeDestroy(pExprInfo->pExpr, NULL); pExprInfo->pExpr = NULL; } + + if (pExprInfo->pFilter) { + freeColumnFilterInfo(pExprInfo->pFilter, pExprInfo->base.filterNum); + } } tfree(pExprs); @@ -6644,6 +6985,8 @@ int32_t initQInfo(SQueryTableMsg *pQueryMsg, void *tsdb, int32_t vgId, SQInfo *p SArray* prevResult = NULL; if (pQueryMsg->prevResultLen > 0) { prevResult = interResFromBinary(param->prevResult, pQueryMsg->prevResultLen); + + pRuntimeEnv->prevResult = prevResult; } pQuery->precision = tsdbGetCfg(tsdb)->precision; @@ -6665,7 +7008,7 @@ int32_t initQInfo(SQueryTableMsg *pQueryMsg, void *tsdb, int32_t vgId, SQInfo *p } // filter the qualified - if ((code = doInitQInfo(pQInfo, pTsBuf, prevResult, tsdb, vgId, isSTable)) != TSDB_CODE_SUCCESS) { + if ((code = doInitQInfo(pQInfo, pTsBuf, tsdb, vgId, isSTable)) != TSDB_CODE_SUCCESS) { goto _error; } @@ -6683,7 +7026,7 @@ void freeColumnFilterInfo(SColumnFilterInfo* pFilter, int32_t numOfFilters) { } for (int32_t i = 0; i < numOfFilters; i++) { - if (pFilter[i].filterstr) { + if (pFilter[i].filterstr && pFilter[i].pz) { free((void*)(pFilter[i].pz)); } } @@ -6725,6 +7068,10 @@ static void* destroyQueryFuncExpr(SExprInfo* pExprInfo, int32_t numOfExpr) { if (pExprInfo[i].pExpr != NULL) { tExprTreeDestroy(pExprInfo[i].pExpr, NULL); } + + if (pExprInfo[i].pFilter) { + freeColumnFilterInfo(pExprInfo[i].pFilter, pExprInfo[i].base.filterNum); + } } tfree(pExprInfo); @@ -6827,6 +7174,7 @@ int32_t doDumpQueryResult(SQInfo *pQInfo, char *data) { } fclose(f); + *(FILE **)pColInfoData->pData = NULL; } // all data returned, set query over diff --git a/src/query/src/qExtbuffer.c b/src/query/src/qExtbuffer.c index a73f385282..73b5b81e52 100644 --- a/src/query/src/qExtbuffer.c +++ b/src/query/src/qExtbuffer.c @@ -266,6 +266,7 @@ int32_t tExtMemBufferFlush(tExtMemBuffer *pMemBuffer) { size_t retVal = fwrite((char *)&(first->item), pMemBuffer->pageSize, 1, pMemBuffer->file); if (retVal <= 0) { // failed to write to buffer, may be not enough space ret = TAOS_SYSTEM_ERROR(errno); + pMemBuffer->pHead = first; return ret; } diff --git a/src/query/src/qFilterfunc.c b/src/query/src/qFilterfunc.c index 884f7e653f..dabce88423 100644 --- a/src/query/src/qFilterfunc.c +++ b/src/query/src/qFilterfunc.c @@ -124,7 +124,7 @@ bool greaterEqualOperator(SColumnFilterElem *pFilter, const char *minval, const bool equalOperator(SColumnFilterElem *pFilter, const char *minval, const char *maxval, int16_t type) { SColumnFilterInfo *pFilterInfo = &pFilter->filterInfo; - if (IS_SIGNED_NUMERIC_TYPE(type) || type == TSDB_DATA_TYPE_BOOL) { + if (IS_SIGNED_NUMERIC_TYPE(type) || type == TSDB_DATA_TYPE_BOOL || type == TSDB_DATA_TYPE_TIMESTAMP) { int64_t minv = -1, maxv = -1; GET_TYPED_DATA(minv, int64_t, type, minval); GET_TYPED_DATA(maxv, int64_t, type, maxval); @@ -202,7 +202,7 @@ bool likeOperator(SColumnFilterElem *pFilter, const char *minval, const char *ma bool notEqualOperator(SColumnFilterElem *pFilter, const char *minval, const char *maxval, int16_t type) { SColumnFilterInfo *pFilterInfo = &pFilter->filterInfo; - if (IS_SIGNED_NUMERIC_TYPE(type) || type == TSDB_DATA_TYPE_BOOL) { + if (IS_SIGNED_NUMERIC_TYPE(type) || type == TSDB_DATA_TYPE_BOOL || type == TSDB_DATA_TYPE_TIMESTAMP) { int64_t minv = -1, maxv = -1; GET_TYPED_DATA(minv, int64_t, type, minval); GET_TYPED_DATA(maxv, int64_t, type, maxval); diff --git a/src/query/src/qResultbuf.c b/src/query/src/qResultbuf.c index c5dd6b3cac..f83caf2d8f 100644 --- a/src/query/src/qResultbuf.c +++ b/src/query/src/qResultbuf.c @@ -287,6 +287,10 @@ static void lruListMoveToFront(SList *pList, SPageInfo* pi) { tdListPrependNode(pList, pi->pn); } +static FORCE_INLINE size_t getAllocPageSize(int32_t pageSize) { + return pageSize + POINTER_BYTES + 2 + sizeof(tFilePage); +} + tFilePage* getNewDataBuf(SDiskbasedResultBuf* pResultBuf, int32_t groupId, int32_t* pageId) { pResultBuf->statis.getPages += 1; @@ -311,7 +315,7 @@ tFilePage* getNewDataBuf(SDiskbasedResultBuf* pResultBuf, int32_t groupId, int32 // allocate buf if (availablePage == NULL) { - pi->pData = calloc(1, pResultBuf->pageSize + POINTER_BYTES + 2); // add extract bytes in case of zipped buffer increased. + pi->pData = calloc(1, getAllocPageSize(pResultBuf->pageSize)); // add extract bytes in case of zipped buffer increased. } else { pi->pData = availablePage; } @@ -355,7 +359,7 @@ tFilePage* getResBufPage(SDiskbasedResultBuf* pResultBuf, int32_t id) { } if (availablePage == NULL) { - (*pi)->pData = calloc(1, pResultBuf->pageSize + POINTER_BYTES); + (*pi)->pData = calloc(1, getAllocPageSize(pResultBuf->pageSize)); } else { (*pi)->pData = availablePage; } diff --git a/src/query/src/qSqlParser.c b/src/query/src/qSqlParser.c index 438aa89c46..e8bea98eb7 100644 --- a/src/query/src/qSqlParser.c +++ b/src/query/src/qSqlParser.c @@ -310,6 +310,77 @@ tSqlExpr *tSqlExprCreate(tSqlExpr *pLeft, tSqlExpr *pRight, int32_t optrType) { return pExpr; } +static FORCE_INLINE int32_t tStrTokenCompare(SStrToken* left, SStrToken* right) { + return (left->type == right->type && left->n == right->n && strncasecmp(left->z, right->z, left->n) == 0) ? 0 : 1; +} + + +int32_t tSqlExprCompare(tSqlExpr *left, tSqlExpr *right) { + if ((left == NULL && right) || (left && right == NULL)) { + return 1; + } + + if (left->type != right->type) { + return 1; + } + + if (left->tokenId != right->tokenId) { + return 1; + } + + if (left->functionId != right->functionId) { + return 1; + } + + if ((left->pLeft && right->pLeft == NULL) + || (left->pLeft == NULL && right->pLeft) + || (left->pRight && right->pRight == NULL) + || (left->pRight == NULL && right->pRight) + || (left->pParam && right->pParam == NULL) + || (left->pParam == NULL && right->pParam)) { + return 1; + } + + if (tVariantCompare(&left->value, &right->value)) { + return 1; + } + + if (tStrTokenCompare(&left->colInfo, &right->colInfo)) { + return 1; + } + + + if (right->pParam && left->pParam) { + size_t size = taosArrayGetSize(right->pParam); + if (left->pParam && taosArrayGetSize(left->pParam) != size) { + return 1; + } + + for (int32_t i = 0; i < size; i++) { + tSqlExprItem* pLeftElem = taosArrayGet(left->pParam, i); + tSqlExpr* pSubLeft = pLeftElem->pNode; + tSqlExprItem* pRightElem = taosArrayGet(left->pParam, i); + tSqlExpr* pSubRight = pRightElem->pNode; + + if (tSqlExprCompare(pSubLeft, pSubRight)) { + return 1; + } + } + } + + if (left->pLeft && tSqlExprCompare(left->pLeft, right->pLeft)) { + return 1; + } + + if (left->pRight && tSqlExprCompare(left->pRight, right->pRight)) { + return 1; + } + + return 0; +} + + + tSqlExpr *tSqlExprClone(tSqlExpr *pSrc) { tSqlExpr *pExpr = calloc(1, sizeof(tSqlExpr)); @@ -640,7 +711,7 @@ void tSetColumnType(TAOS_FIELD *pField, SStrToken *type) { SQuerySqlNode *tSetQuerySqlNode(SStrToken *pSelectToken, SArray *pSelectList, SFromInfo *pFrom, tSqlExpr *pWhere, SArray *pGroupby, SArray *pSortOrder, SIntervalVal *pInterval, SSessionWindowVal *pSession, SStrToken *pSliding, SArray *pFill, SLimitVal *pLimit, - SLimitVal *psLimit) { + SLimitVal *psLimit, tSqlExpr *pHaving) { assert(pSelectList != NULL); SQuerySqlNode *pSqlNode = calloc(1, sizeof(SQuerySqlNode)); @@ -655,6 +726,7 @@ SQuerySqlNode *tSetQuerySqlNode(SStrToken *pSelectToken, SArray *pSelectList, SF pSqlNode->pSortOrder = pSortOrder; pSqlNode->pWhere = pWhere; pSqlNode->fillType = pFill; + pSqlNode->pHaving = pHaving; if (pLimit != NULL) { pSqlNode->limit = *pLimit; @@ -717,6 +789,9 @@ void destroyQuerySqlNode(SQuerySqlNode *pQuerySql) { tSqlExprDestroy(pQuerySql->pWhere); pQuerySql->pWhere = NULL; + + tSqlExprDestroy(pQuerySql->pHaving); + pQuerySql->pHaving = NULL; taosArrayDestroyEx(pQuerySql->pSortOrder, freeVariant); pQuerySql->pSortOrder = NULL; diff --git a/src/query/src/qUtil.c b/src/query/src/qUtil.c index 9b0046fda0..aa793add84 100644 --- a/src/query/src/qUtil.c +++ b/src/query/src/qUtil.c @@ -140,7 +140,7 @@ void clearResultRow(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pResultRow, int16 SResultRowCellInfo *pResultInfo = &pResultRow->pCellInfo[i]; int16_t size = pRuntimeEnv->pQuery->pExpr1[i].bytes; - char * s = getPosInResultPage(pRuntimeEnv->pQuery, page, pResultRow->offset, offset); + char * s = getPosInResultPage(pRuntimeEnv, page, pResultRow->offset, offset, size); memset(s, 0, size); offset += size; diff --git a/src/query/src/queryMain.c b/src/query/src/queryMain.c index 7eb80e200d..153c7cb0cb 100644 --- a/src/query/src/queryMain.c +++ b/src/query/src/queryMain.c @@ -200,6 +200,7 @@ int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, qi return code; } + bool qTableQuery(qinfo_t qinfo, uint64_t *qId) { SQInfo *pQInfo = (SQInfo *)qinfo; assert(pQInfo && pQInfo->signature == pQInfo); @@ -478,7 +479,7 @@ void qCleanupQueryMgmt(void* pQMgmt) { qDebug("vgId:%d, queryMgmt cleanup completed", vgId); } -void** qRegisterQInfo(void* pMgmt, uint64_t qId, uint64_t qInfo) { +void** qRegisterQInfo(void* pMgmt, uint64_t qId, void *qInfo) { if (pMgmt == NULL) { terrno = TSDB_CODE_VND_INVALID_VGROUP_ID; return NULL; @@ -519,8 +520,7 @@ void** qAcquireQInfo(void* pMgmt, uint64_t _key) { return NULL; } - TSDB_CACHE_PTR_TYPE key = (TSDB_CACHE_PTR_TYPE)_key; - void** handle = taosCacheAcquireByKey(pQueryMgmt->qinfoPool, &key, sizeof(TSDB_CACHE_PTR_TYPE)); + void** handle = taosCacheAcquireByKey(pQueryMgmt->qinfoPool, &_key, sizeof(_key)); if (handle == NULL || *handle == NULL) { terrno = TSDB_CODE_QRY_INVALID_QHANDLE; return NULL; diff --git a/src/query/src/sql.c b/src/query/src/sql.c index 7dca72b937..f9f9d624d3 100644 --- a/src/query/src/sql.c +++ b/src/query/src/sql.c @@ -2925,7 +2925,7 @@ static YYACTIONTYPE yy_reduce( break; case 160: /* select ::= SELECT selcollist from where_opt interval_opt session_option fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt */ { - yylhsminor.yy286 = tSetQuerySqlNode(&yymsp[-12].minor.yy0, yymsp[-11].minor.yy193, yymsp[-10].minor.yy370, yymsp[-9].minor.yy454, yymsp[-4].minor.yy193, yymsp[-3].minor.yy193, &yymsp[-8].minor.yy392, &yymsp[-7].minor.yy447, &yymsp[-5].minor.yy0, yymsp[-6].minor.yy193, &yymsp[0].minor.yy482, &yymsp[-1].minor.yy482); + yylhsminor.yy286 = tSetQuerySqlNode(&yymsp[-12].minor.yy0, yymsp[-11].minor.yy193, yymsp[-10].minor.yy370, yymsp[-9].minor.yy454, yymsp[-4].minor.yy193, yymsp[-3].minor.yy193, &yymsp[-8].minor.yy392, &yymsp[-7].minor.yy447, &yymsp[-5].minor.yy0, yymsp[-6].minor.yy193, &yymsp[0].minor.yy482, &yymsp[-1].minor.yy482, yymsp[-2].minor.yy454); } yymsp[-12].minor.yy286 = yylhsminor.yy286; break; @@ -2945,7 +2945,7 @@ static YYACTIONTYPE yy_reduce( break; case 165: /* select ::= SELECT selcollist */ { - yylhsminor.yy286 = tSetQuerySqlNode(&yymsp[-1].minor.yy0, yymsp[0].minor.yy193, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); + yylhsminor.yy286 = tSetQuerySqlNode(&yymsp[-1].minor.yy0, yymsp[0].minor.yy193, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); } yymsp[-1].minor.yy286 = yylhsminor.yy286; break; diff --git a/src/rpc/src/rpcMain.c b/src/rpc/src/rpcMain.c index 133ae6d0ab..db3c72c2fc 100644 --- a/src/rpc/src/rpcMain.c +++ b/src/rpc/src/rpcMain.c @@ -295,7 +295,7 @@ void *rpcOpen(const SRpcInit *pInit) { return NULL; } } else { - pRpc->pCache = rpcOpenConnCache(pRpc->sessions, rpcCloseConn, pRpc->tmrCtrl, pRpc->idleTime); + pRpc->pCache = rpcOpenConnCache(pRpc->sessions, rpcCloseConn, pRpc->tmrCtrl, pRpc->idleTime * 30); if ( pRpc->pCache == NULL ) { tError("%s failed to init connection cache", pRpc->label); rpcClose(pRpc); @@ -470,7 +470,7 @@ void rpcSendResponse(const SRpcMsg *pRsp) { taosTmrStopA(&pConn->pTimer); // set the idle timer to monitor the activity - taosTmrReset(rpcProcessIdleTimer, pRpc->idleTime, pConn, pRpc->tmrCtrl, &pConn->pIdleTimer); + taosTmrReset(rpcProcessIdleTimer, pRpc->idleTime * 30, pConn, pRpc->tmrCtrl, &pConn->pIdleTimer); rpcSendMsgToPeer(pConn, msg, msgLen); // if not set to secured, set it expcet NOT_READY case, since client wont treat it as secured @@ -997,8 +997,8 @@ static SRpcConn *rpcProcessMsgHead(SRpcInfo *pRpc, SRecvInfo *pRecv, SRpcReqCont } if ( rpcIsReq(pHead->msgType) ) { - terrno = rpcProcessReqHead(pConn, pHead); pConn->connType = pRecv->connType; + terrno = rpcProcessReqHead(pConn, pHead); // stop idle timer taosTmrStopA(&pConn->pIdleTimer); @@ -1367,7 +1367,7 @@ static void rpcProcessConnError(void *param, void *id) { tDebug("%s %p, connection error happens", pRpc->label, pContext->ahandle); - if (pContext->numOfTry >= pContext->epSet.numOfEps) { + if (pContext->numOfTry >= pContext->epSet.numOfEps || pContext->msgType == TSDB_MSG_TYPE_FETCH) { rpcMsg.msgType = pContext->msgType+1; rpcMsg.ahandle = pContext->ahandle; rpcMsg.code = pContext->code; diff --git a/src/sync/inc/syncInt.h b/src/sync/inc/syncInt.h index b4d9315a8e..ec6dfcbc82 100644 --- a/src/sync/inc/syncInt.h +++ b/src/sync/inc/syncInt.h @@ -35,7 +35,7 @@ extern "C" { #define SYNC_MAX_SIZE (TSDB_MAX_WAL_SIZE + sizeof(SWalHead) + sizeof(SSyncHead) + 16) #define SYNC_RECV_BUFFER_SIZE (5*1024*1024) -#define SYNC_MAX_FWDS 1024 +#define SYNC_MAX_FWDS 4096 #define SYNC_FWD_TIMER 300 #define SYNC_ROLE_TIMER 15000 // ms #define SYNC_CHECK_INTERVAL 1000 // ms diff --git a/src/sync/src/syncMain.c b/src/sync/src/syncMain.c index 72442eee6c..e5f2d94c4a 100644 --- a/src/sync/src/syncMain.c +++ b/src/sync/src/syncMain.c @@ -409,23 +409,22 @@ void syncConfirmForward(int64_t rid, uint64_t version, int32_t code, bool force) syncReleaseNode(pNode); } -#if 1 void syncRecover(int64_t rid) { SSyncPeer *pPeer; SSyncNode *pNode = syncAcquireNode(rid); if (pNode == NULL) return; - // to do: add a few lines to check if recover is OK - // if take this node to unsync state, the whole system may not work - nodeRole = TAOS_SYNC_ROLE_UNSYNCED; (*pNode->notifyRoleFp)(pNode->vgId, nodeRole); - nodeVersion = 0; pthread_mutex_lock(&pNode->mutex); + nodeVersion = 0; + for (int32_t i = 0; i < pNode->replica; ++i) { + if (i == pNode->selfIndex) continue; + pPeer = pNode->peerInfo[i]; if (pPeer->peerFd >= 0) { syncRestartConnection(pPeer); @@ -436,7 +435,6 @@ void syncRecover(int64_t rid) { syncReleaseNode(pNode); } -#endif int32_t syncGetNodesRole(int64_t rid, SNodesRole *pNodesRole) { SSyncNode *pNode = syncAcquireNode(rid); @@ -551,7 +549,10 @@ static void syncClosePeerConn(SSyncPeer *pPeer) { if (pPeer->peerFd >= 0) { pPeer->peerFd = -1; void *pConn = pPeer->pConn; - if (pConn != NULL) syncFreeTcpConn(pPeer->pConn); + if (pConn != NULL) { + syncFreeTcpConn(pPeer->pConn); + pPeer->pConn = NULL; + } } } @@ -997,17 +998,24 @@ static void syncProcessForwardFromPeer(char *cont, SSyncPeer *pPeer) { sTrace("%s, forward is received, hver:%" PRIu64 ", len:%d", pPeer->id, pHead->version, pHead->len); + int32_t code = 0; if (nodeRole == TAOS_SYNC_ROLE_SLAVE) { // nodeVersion = pHead->version; - (*pNode->writeToCacheFp)(pNode->vgId, pHead, TAOS_QTYPE_FWD, NULL); + code = (*pNode->writeToCacheFp)(pNode->vgId, pHead, TAOS_QTYPE_FWD, NULL); } else { if (nodeSStatus != TAOS_SYNC_STATUS_INIT) { - syncSaveIntoBuffer(pPeer, pHead); + code = syncSaveIntoBuffer(pPeer, pHead); } else { sError("%s, forward discarded since sstatus:%s, hver:%" PRIu64, pPeer->id, syncStatus[nodeSStatus], pHead->version); + code = -1; } } + + if (code != 0) { + sError("%s, failed to process fwd msg, hver:%" PRIu64 ", len:%d", pPeer->id, pHead->version, pHead->len); + syncRestartConnection(pPeer); + } } static void syncProcessPeersStatusMsg(SPeersStatus *pPeersStatus, SSyncPeer *pPeer) { diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index ea72760568..cd97b2a9d6 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -2861,12 +2861,6 @@ int32_t tsdbRetrieveDataBlockStatisInfo(TsdbQueryHandleT* pQueryHandle, SDataSta if (pHandle->statis[i].numOfNull == -1) { // set the column data are all NULL pHandle->statis[i].numOfNull = pBlockInfo->compBlock->numOfRows; } - - SColumnInfo* pColInfo = taosArrayGet(pHandle->pColumns, i); - if (pColInfo->type == TSDB_DATA_TYPE_TIMESTAMP) { - pHandle->statis[i].min = pBlockInfo->compBlock->keyFirst; - pHandle->statis[i].max = pBlockInfo->compBlock->keyLast; - } } int64_t elapsed = taosGetTimestampUs() - stime; diff --git a/src/util/src/terror.c b/src/util/src/terror.c index fc600c8260..4fc7c510b0 100644 --- a/src/util/src/terror.c +++ b/src/util/src/terror.c @@ -176,6 +176,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_TABLE_ID, "Table name too long") TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_TABLE_NAME, "Table does not exist") TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_TABLE_TYPE, "Invalid table type in tsdb") TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOO_MANY_TAGS, "Too many tags") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOO_MANY_COLUMNS, "Too many columns") TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOO_MANY_TIMESERIES, "Too many time series") TAOS_DEFINE_ERROR(TSDB_CODE_MND_NOT_SUPER_TABLE, "Not super table") // operation only available for super table TAOS_DEFINE_ERROR(TSDB_CODE_MND_COL_NAME_TOO_LONG, "Tag name too long") diff --git a/src/vnode/inc/vnodeInt.h b/src/vnode/inc/vnodeInt.h index 4aa07196a7..d770a38e37 100644 --- a/src/vnode/inc/vnodeInt.h +++ b/src/vnode/inc/vnodeInt.h @@ -37,6 +37,7 @@ extern int32_t vDebugFlag; typedef struct { int32_t vgId; // global vnode group ID int32_t refCount; // reference count + int64_t queuedWMsgSize; int32_t queuedWMsg; int32_t queuedRMsg; int32_t flowctrlLevel; diff --git a/src/vnode/src/vnodeMain.c b/src/vnode/src/vnodeMain.c index 69d94aff87..0921c5ce48 100644 --- a/src/vnode/src/vnodeMain.c +++ b/src/vnode/src/vnodeMain.c @@ -99,8 +99,13 @@ int32_t vnodeSync(int32_t vgId) { return TSDB_CODE_VND_INVALID_VGROUP_ID; } - if (pVnode->role != TAOS_SYNC_ROLE_MASTER) { + if (pVnode->role == TAOS_SYNC_ROLE_SLAVE) { vInfo("vgId:%d, vnode will sync, refCount:%d pVnode:%p", pVnode->vgId, pVnode->refCount, pVnode); + + pVnode->version = 0; + pVnode->fversion = 0; + walResetVersion(pVnode->wal, pVnode->fversion); + syncRecover(pVnode->sync); } @@ -227,9 +232,28 @@ int32_t vnodeAlter(void *vparam, SCreateVnodeMsg *pVnodeCfg) { return code; } +static void vnodeFindWalRootDir(int32_t vgId, char *walRootDir) { + char vnodeDir[TSDB_FILENAME_LEN] = "\0"; + snprintf(vnodeDir, TSDB_FILENAME_LEN, "/vnode/vnode%d/wal", vgId); + + TDIR *tdir = tfsOpendir(vnodeDir); + if (!tdir) return; + + const TFILE *tfile = tfsReaddir(tdir); + if (!tfile) { + tfsClosedir(tdir); + return; + } + + sprintf(walRootDir, "%s/vnode/vnode%d", TFS_DISK_PATH(tfile->level, tfile->id), vgId); + + tfsClosedir(tdir); +} + int32_t vnodeOpen(int32_t vgId) { char temp[TSDB_FILENAME_LEN * 3]; char rootDir[TSDB_FILENAME_LEN * 2]; + char walRootDir[TSDB_FILENAME_LEN * 2] = {0}; snprintf(rootDir, TSDB_FILENAME_LEN * 2, "%s/vnode%d", tsVnodeDir, vgId); SVnodeObj *pVnode = calloc(sizeof(SVnodeObj), 1); @@ -316,7 +340,21 @@ int32_t vnodeOpen(int32_t vgId) { } } - sprintf(temp, "%s/wal", rootDir); + // walRootDir for wal & syncInfo.path (not empty dir of /vnode/vnode{pVnode->vgId}/wal) + vnodeFindWalRootDir(pVnode->vgId, walRootDir); + if (walRootDir[0] == 0) { + int level = -1, id = -1; + + tfsAllocDisk(TFS_PRIMARY_LEVEL, &level, &id); + if (level < 0 || id < 0) { + vnodeCleanUp(pVnode); + return terrno; + } + + sprintf(walRootDir, "%s/vnode/vnode%d", TFS_DISK_PATH(level, id), vgId); + } + + sprintf(temp, "%s/wal", walRootDir); pVnode->walCfg.vgId = pVnode->vgId; pVnode->wal = walOpen(temp, &pVnode->walCfg); if (pVnode->wal == NULL) { @@ -348,7 +386,7 @@ int32_t vnodeOpen(int32_t vgId) { pVnode->events = NULL; - vDebug("vgId:%d, vnode is opened in %s, pVnode:%p", pVnode->vgId, rootDir, pVnode); + vDebug("vgId:%d, vnode is opened in %s - %s, pVnode:%p", pVnode->vgId, rootDir, walRootDir, pVnode); vnodeAddIntoHash(pVnode); @@ -356,7 +394,7 @@ int32_t vnodeOpen(int32_t vgId) { syncInfo.vgId = pVnode->vgId; syncInfo.version = pVnode->version; syncInfo.syncCfg = pVnode->syncCfg; - tstrncpy(syncInfo.path, rootDir, TSDB_FILENAME_LEN); + tstrncpy(syncInfo.path, walRootDir, TSDB_FILENAME_LEN); syncInfo.getWalInfoFp = vnodeGetWalInfo; syncInfo.writeToCacheFp = vnodeWriteToCache; syncInfo.confirmForward = vnodeConfirmForard; diff --git a/src/vnode/src/vnodeRead.c b/src/vnode/src/vnodeRead.c index 0836ade77f..2448fada50 100644 --- a/src/vnode/src/vnodeRead.c +++ b/src/vnode/src/vnodeRead.c @@ -208,6 +208,7 @@ static void vnodeBuildNoResultQueryRsp(SRspRet *pRet) { pRsp->completed = true; } + static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SVReadMsg *pRead) { void * pCont = pRead->pCont; int32_t contLen = pRead->contLen; @@ -226,7 +227,7 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SVReadMsg *pRead) { if (contLen != 0) { qinfo_t pQInfo = NULL; - uint64_t qId = 0; + uint64_t qId = genQueryId(); code = qCreateQueryInfo(pVnode->tsdb, pVnode->vgId, pQueryTableMsg, &pQInfo, &qId); SQueryTableRsp *pRsp = (SQueryTableRsp *)rpcMallocCont(sizeof(SQueryTableRsp)); @@ -239,7 +240,7 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SVReadMsg *pRead) { // current connect is broken if (code == TSDB_CODE_SUCCESS) { - handle = qRegisterQInfo(pVnode->qMgmt, qId, (uint64_t)pQInfo); + handle = qRegisterQInfo(pVnode->qMgmt, qId, pQInfo); if (handle == NULL) { // failed to register qhandle pRsp->code = terrno; terrno = 0; diff --git a/src/vnode/src/vnodeWrite.c b/src/vnode/src/vnodeWrite.c index a0be52db7a..aab685e678 100644 --- a/src/vnode/src/vnodeWrite.c +++ b/src/vnode/src/vnodeWrite.c @@ -25,6 +25,7 @@ #include "vnodeStatus.h" #define MAX_QUEUED_MSG_NUM 100000 +#define MAX_QUEUED_MSG_SIZE 1024*1024*1024 //1GB extern void * tsDnodeTmr; static int32_t (*vnodeProcessWriteMsgFp[TSDB_MSG_TYPE_MAX])(SVnodeObj *, void *pCont, SRspRet *); @@ -269,6 +270,13 @@ static int32_t vnodeWriteToWQueueImp(SVWriteMsg *pWrite) { } } + if (tsAvailDataDirGB <= tsMinimalDataDirGB) { + vError("vgId:%d, failed to write into vwqueue since no diskspace, avail:%fGB", pVnode->vgId, tsAvailDataDirGB); + taosFreeQitem(pWrite); + vnodeRelease(pVnode); + return TSDB_CODE_VND_NO_DISKSPACE; + } + if (!vnodeInReadyOrUpdatingStatus(pVnode)) { vError("vgId:%d, failed to write into vwqueue, vstatus is %s, refCount:%d pVnode:%p", pVnode->vgId, vnodeStatus[pVnode->status], pVnode->refCount, pVnode); @@ -278,14 +286,17 @@ static int32_t vnodeWriteToWQueueImp(SVWriteMsg *pWrite) { } int32_t queued = atomic_add_fetch_32(&pVnode->queuedWMsg, 1); - if (queued > MAX_QUEUED_MSG_NUM) { + int64_t queuedSize = atomic_add_fetch_64(&pVnode->queuedWMsgSize, pWrite->pHead.len); + + if (queued > MAX_QUEUED_MSG_NUM || queuedSize > MAX_QUEUED_MSG_SIZE) { int32_t ms = (queued / MAX_QUEUED_MSG_NUM) * 10 + 3; if (ms > 100) ms = 100; vDebug("vgId:%d, too many msg:%d in vwqueue, flow control %dms", pVnode->vgId, queued, ms); taosMsleep(ms); } - vTrace("vgId:%d, write into vwqueue, refCount:%d queued:%d", pVnode->vgId, pVnode->refCount, pVnode->queuedWMsg); + vTrace("vgId:%d, write into vwqueue, refCount:%d queued:%d size:%" PRId64, pVnode->vgId, pVnode->refCount, + pVnode->queuedWMsg, pVnode->queuedWMsgSize); taosWriteQitem(pVnode->wqueue, pWrite->qtype, pWrite); return TSDB_CODE_SUCCESS; @@ -308,7 +319,10 @@ void vnodeFreeFromWQueue(void *vparam, SVWriteMsg *pWrite) { SVnodeObj *pVnode = vparam; int32_t queued = atomic_sub_fetch_32(&pVnode->queuedWMsg, 1); - vTrace("vgId:%d, msg:%p, app:%p, free from vwqueue, queued:%d", pVnode->vgId, pWrite, pWrite->rpcMsg.ahandle, queued); + int64_t queuedSize = atomic_sub_fetch_64(&pVnode->queuedWMsgSize, pWrite->pHead.len); + + vTrace("vgId:%d, msg:%p, app:%p, free from vwqueue, queued:%d size:%" PRId64, pVnode->vgId, pWrite, + pWrite->rpcMsg.ahandle, queued, queuedSize); taosFreeQitem(pWrite); vnodeRelease(pVnode); @@ -344,7 +358,9 @@ static void vnodeFlowCtrlMsgToWQueue(void *param, void *tmrId) { static int32_t vnodePerformFlowCtrl(SVWriteMsg *pWrite) { SVnodeObj *pVnode = pWrite->pVnode; if (pWrite->qtype != TAOS_QTYPE_RPC) return 0; - if (pVnode->queuedWMsg < MAX_QUEUED_MSG_NUM && pVnode->flowctrlLevel <= 0) return 0; + if (pVnode->queuedWMsg < MAX_QUEUED_MSG_NUM && pVnode->queuedWMsgSize < MAX_QUEUED_MSG_SIZE && + pVnode->flowctrlLevel <= 0) + return 0; if (tsEnableFlowCtrl == 0) { int32_t ms = (int32_t)pow(2, pVnode->flowctrlLevel + 2); diff --git a/src/wal/src/walMgmt.c b/src/wal/src/walMgmt.c index 39ce2657aa..55ab9b031b 100644 --- a/src/wal/src/walMgmt.c +++ b/src/wal/src/walMgmt.c @@ -104,7 +104,7 @@ int32_t walAlter(void *handle, SWalCfg *pCfg) { pWal->level = pCfg->walLevel; pWal->fsyncPeriod = pCfg->fsyncPeriod; - pWal->fsyncSeq = pCfg->fsyncPeriod % 1000; + pWal->fsyncSeq = pCfg->fsyncPeriod / 1000; if (pWal->fsyncSeq <= 0) pWal->fsyncSeq = 1; return TSDB_CODE_SUCCESS; diff --git a/tests/Jenkinsfile b/tests/Jenkinsfile index 178a0446c3..afd0b25338 100644 --- a/tests/Jenkinsfile +++ b/tests/Jenkinsfile @@ -227,11 +227,11 @@ pipeline { } } - post { + post { success { emailext ( - subject: "SUCCESSFUL: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]'", - body: ''' + subject: "PR-result: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]' SUCCESS", + body: """ @@ -247,29 +247,29 @@ pipeline {
    -
  • 构建名称>>分支:${PROJECT_NAME}
  • +
  • 构建名称>>分支:${env.BRANCH_NAME}
  • 构建结果: Successful
  • 构建编号:${BUILD_NUMBER}
  • -
  • 触发用户:${CAUSE}
  • -
  • 变更概要:${CHANGES}
  • +
  • 触发用户:${env.CHANGE_AUTHOR}
  • +
  • 提交信息:${env.CHANGE_TITLE}
  • 构建地址:${BUILD_URL}
  • 构建日志:${BUILD_URL}console
  • -
  • 变更集:${JELLY_SCRIPT}
  • +
- ''', + """, to: "yqliu@taosdata.com,pxiao@taosdata.com", from: "support@taosdata.com" ) } failure { emailext ( - subject: "FAILED: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]'", - body: ''' + subject: "PR-result: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]' FAIL", + body: """ @@ -285,21 +285,21 @@ pipeline {
    -
  • 构建名称>>分支:${PROJECT_NAME}
  • -
  • 构建结果: Successful
  • +
  • 构建名称>>分支:${env.BRANCH_NAME}
  • +
  • 构建结果: Failure
  • 构建编号:${BUILD_NUMBER}
  • -
  • 触发用户:${CAUSE}
  • -
  • 变更概要:${CHANGES}
  • +
  • 触发用户:${env.CHANGE_AUTHOR}
  • +
  • 提交信息:${env.CHANGE_TITLE}
  • 构建地址:${BUILD_URL}
  • 构建日志:${BUILD_URL}console
  • -
  • 变更集:${JELLY_SCRIPT}
  • +
- ''', + """, to: "yqliu@taosdata.com,pxiao@taosdata.com", from: "support@taosdata.com" ) diff --git a/tests/examples/C#/taosdemo/TDengineDriver.cs b/tests/examples/C#/taosdemo/TDengineDriver.cs index 205269501d..2c150341f6 100644 --- a/tests/examples/C#/taosdemo/TDengineDriver.cs +++ b/tests/examples/C#/taosdemo/TDengineDriver.cs @@ -31,7 +31,11 @@ namespace TDengineDriver TSDB_DATA_TYPE_DOUBLE = 7, // 8 bytes TSDB_DATA_TYPE_BINARY = 8, // string TSDB_DATA_TYPE_TIMESTAMP = 9,// 8 bytes - TSDB_DATA_TYPE_NCHAR = 10 // unicode string + TSDB_DATA_TYPE_NCHAR = 10, // unicode string + TSDB_DATA_TYPE_UTINYINT = 11,// 1 byte + TSDB_DATA_TYPE_USMALLINT= 12,// 2 bytes + TSDB_DATA_TYPE_UINT = 13, // 4 bytes + TSDB_DATA_TYPE_UBIGINT= 14 // 8 bytes } enum TDengineInitOption @@ -53,15 +57,23 @@ namespace TDengineDriver switch ((TDengineDataType)type) { case TDengineDataType.TSDB_DATA_TYPE_BOOL: - return "BOOLEAN"; + return "BOOL"; case TDengineDataType.TSDB_DATA_TYPE_TINYINT: - return "BYTE"; + return "TINYINT"; case TDengineDataType.TSDB_DATA_TYPE_SMALLINT: - return "SHORT"; + return "SMALLINT"; case TDengineDataType.TSDB_DATA_TYPE_INT: return "INT"; case TDengineDataType.TSDB_DATA_TYPE_BIGINT: - return "LONG"; + return "BIGINT"; + case TDengineDataType.TSDB_DATA_TYPE_UTINYINT: + return "TINYINT UNSIGNED"; + case TDengineDataType.TSDB_DATA_TYPE_USMALLINT: + return "SMALLINT UNSIGNED"; + case TDengineDataType.TSDB_DATA_TYPE_UINT: + return "INT UNSIGNED"; + case TDengineDataType.TSDB_DATA_TYPE_UBIGINT: + return "BIGINT UNSIGNED"; case TDengineDataType.TSDB_DATA_TYPE_FLOAT: return "FLOAT"; case TDengineDataType.TSDB_DATA_TYPE_DOUBLE: diff --git a/tests/examples/C#/taosdemo/taosdemo.cs b/tests/examples/C#/taosdemo/taosdemo.cs index 2d78418e0a..4fc7232ad2 100644 --- a/tests/examples/C#/taosdemo/taosdemo.cs +++ b/tests/examples/C#/taosdemo/taosdemo.cs @@ -63,7 +63,7 @@ namespace TDengineDriver static void HelpPrint(string arg, string desc) { string indent = " "; - Console.WriteLine("{0}{1}", indent, arg.PadRight(25)+desc); + Console.WriteLine("{0}{1}", indent, arg.PadRight(25) + desc); } static void PrintHelp(String[] argv) @@ -142,33 +142,33 @@ namespace TDengineDriver verbose = this.GetArgumentAsFlag(argv, "-v", true); debug = this.GetArgumentAsFlag(argv, "-g", true); - VerbosePrint ("###################################################################\n"); - VerbosePrintFormat ("# Server IP: {0}\n", host); - VerbosePrintFormat ("# User: {0}\n", user); - VerbosePrintFormat ("# Password: {0}\n", password); - VerbosePrintFormat ("# Number of Columns per record: {0}\n", colsPerRecord); - VerbosePrintFormat ("# Number of Threads: {0}\n", numOfThreads); - VerbosePrintFormat ("# Number of Tables: {0}\n", numOfTables); - VerbosePrintFormat ("# Number of records per Table: {0}\n", recordsPerTable); - VerbosePrintFormat ("# Records/Request: {0}\n", recordsPerRequest); - VerbosePrintFormat ("# Database name: {0}\n", dbName); - VerbosePrintFormat ("# Replica: {0}\n", replica); - VerbosePrintFormat ("# Use STable: {0}\n", useStable); - VerbosePrintFormat ("# Table prefix: {0}\n", tablePrefix); + VerbosePrint("###################################################################\n"); + VerbosePrintFormat("# Server IP: {0}\n", host); + VerbosePrintFormat("# User: {0}\n", user); + VerbosePrintFormat("# Password: {0}\n", password); + VerbosePrintFormat("# Number of Columns per record: {0}\n", colsPerRecord); + VerbosePrintFormat("# Number of Threads: {0}\n", numOfThreads); + VerbosePrintFormat("# Number of Tables: {0}\n", numOfTables); + VerbosePrintFormat("# Number of records per Table: {0}\n", recordsPerTable); + VerbosePrintFormat("# Records/Request: {0}\n", recordsPerRequest); + VerbosePrintFormat("# Database name: {0}\n", dbName); + VerbosePrintFormat("# Replica: {0}\n", replica); + VerbosePrintFormat("# Use STable: {0}\n", useStable); + VerbosePrintFormat("# Table prefix: {0}\n", tablePrefix); if (useStable == true) { VerbosePrintFormat("# STable prefix: {0}\n", stablePrefix); } - VerbosePrintFormat ("# Data order: {0}\n", order); - VerbosePrintFormat ("# Data out of order rate: {0}\n", rateOfOutorder); - VerbosePrintFormat ("# Delete method: {0}\n", methodOfDelete); - VerbosePrintFormat ("# Query command: {0}\n", query); - VerbosePrintFormat ("# Query Mode: {0}\n", queryMode); - VerbosePrintFormat ("# Insert Only: {0}\n", isInsertOnly); - VerbosePrintFormat ("# Verbose output {0}\n", verbose); - VerbosePrintFormat ("# Test time: {0}\n", DateTime.Now.ToString("h:mm:ss tt")); + VerbosePrintFormat("# Data order: {0}\n", order); + VerbosePrintFormat("# Data out of order rate: {0}\n", rateOfOutorder); + VerbosePrintFormat("# Delete method: {0}\n", methodOfDelete); + VerbosePrintFormat("# Query command: {0}\n", query); + VerbosePrintFormat("# Query Mode: {0}\n", queryMode); + VerbosePrintFormat("# Insert Only: {0}\n", isInsertOnly); + VerbosePrintFormat("# Verbose output {0}\n", verbose); + VerbosePrintFormat("# Test time: {0}\n", DateTime.Now.ToString("h:mm:ss tt")); - VerbosePrint ("###################################################################\n"); + VerbosePrint("###################################################################\n"); if (skipReadKey == false) { @@ -385,7 +385,7 @@ namespace TDengineDriver public void CreateDb() { StringBuilder sql = new StringBuilder(); - sql.Append("CREATE DATABASE IF NOT EXISTS ").Append(this.dbName).Append(" replica ").Append(this.replica); + sql.Append("CREATE DATABASE IF NOT EXISTS ").Append(this.dbName).Append(" replica ").Append(this.replica).Append(" keep 36500"); IntPtr res = TDengine.Query(this.conn, sql.ToString()); if (res != IntPtr.Zero) { @@ -406,7 +406,7 @@ namespace TDengineDriver sql.Clear(); sql.Append("CREATE TABLE IF NOT EXISTS "). Append(this.dbName).Append(".").Append(this.stablePrefix). - Append("(ts timestamp, v1 bool, v2 tinyint, v3 smallint, v4 int, v5 bigint, v6 float, v7 double, v8 binary(10), v9 nchar(10)) tags(t1 int)"); + Append("(ts timestamp, v1 bool, v2 tinyint, v3 smallint, v4 int, v5 bigint, v6 float, v7 double, v8 binary(10), v9 nchar(10), v10 tinyint unsigned, v11 smallint unsigned, v12 int unsigned, v13 bigint unsigned) tags(t1 int)"); IntPtr res = TDengine.Query(this.conn, sql.ToString()); if (res != IntPtr.Zero) { @@ -523,7 +523,7 @@ namespace TDengineDriver int offset = IntPtr.Size * fields; IntPtr data = Marshal.ReadIntPtr(rowdata, offset); - builder.Append("---"); + builder.Append(" | "); if (data == IntPtr.Zero) { @@ -538,7 +538,7 @@ namespace TDengineDriver builder.Append(v1); break; case TDengineDataType.TSDB_DATA_TYPE_TINYINT: - byte v2 = Marshal.ReadByte(data); + sbyte v2 = (sbyte)Marshal.ReadByte(data); builder.Append(v2); break; case TDengineDataType.TSDB_DATA_TYPE_SMALLINT: @@ -573,9 +573,25 @@ namespace TDengineDriver string v10 = Marshal.PtrToStringAnsi(data); builder.Append(v10); break; + case TDengineDataType.TSDB_DATA_TYPE_UTINYINT: + byte v11 = Marshal.ReadByte(data); + builder.Append(v11); + break; + case TDengineDataType.TSDB_DATA_TYPE_USMALLINT: + ushort v12 = (ushort)Marshal.ReadInt16(data); + builder.Append(v12); + break; + case TDengineDataType.TSDB_DATA_TYPE_UINT: + uint v13 = (uint)Marshal.ReadInt32(data); + builder.Append(v13); + break; + case TDengineDataType.TSDB_DATA_TYPE_UBIGINT: + ulong v14 = (ulong)Marshal.ReadInt64(data); + builder.Append(v14); + break; } } - builder.Append("---"); + builder.Append(" | "); VerbosePrint(builder.ToString() + "\n"); builder.Clear(); @@ -642,8 +658,8 @@ namespace TDengineDriver tester.ExecuteQuery(); watch.Stop(); elapsedMs = watch.Elapsed.TotalMilliseconds; - Console.WriteLine("C# taosdemo: Spent {0} seconds to query {1} records.\n", - elapsedMs/1000, + Console.WriteLine("C# taosdemo: Spent {0} seconds to query {1} records.\n", + elapsedMs / 1000, tester.recordsPerTable * tester.numOfTables ); } @@ -712,9 +728,9 @@ namespace TDengineDriver int m = now.Minute; int s = now.Second; - long baseTimestamp = 1609430400000; // 2021/01/01 0:0:0 + long baseTimestamp = -16094340000; // 1969-06-29 01:21:00 VerbosePrintFormat("beginTime is {0} + {1}h:{2}m:{3}s\n", baseTimestamp, h, m, s); - long beginTimestamp = baseTimestamp + ((h*60 + m) * 60 + s) * 1000; + long beginTimestamp = baseTimestamp + ((h * 60 + m) * 60 + s) * 1000; Random random = new Random(); long rowsInserted = 0; @@ -755,11 +771,16 @@ namespace TDengineDriver sql.Append("(") .Append(writeTimeStamp) - .Append(", 1, 2, 3,") - .Append(i + batch) - .Append(", 5, 6, 7, 'abc', 'def')"); + .Append(", 1, -2, -3,") + .Append(i + batch - 127) + .Append(", -5, -6, -7, 'abc', 'def', 254, 65534,") + .Append(4294967294 - (uint)i - (uint)batch) + .Append(",") + .Append(18446744073709551614 - (ulong)i - (ulong)batch) + .Append(")"); } + VerbosePrint(sql.ToString() + "\n"); IntPtr res = TDengine.Query(this.conn, sql.ToString()); if (res == IntPtr.Zero) { @@ -837,7 +858,7 @@ namespace TDengineDriver } else { - sql = sql.Append("(ts timestamp, v1 bool, v2 tinyint, v3 smallint, v4 int, v5 bigint, v6 float, v7 double, v8 binary(10), v9 nchar(10))"); + sql = sql.Append("(ts timestamp, v1 bool, v2 tinyint, v3 smallint, v4 int, v5 bigint, v6 float, v7 double, v8 binary(10), v9 nchar(10), v10 tinyint unsigned, v11 smallint unsigned, v12 int unsigned, v13 bigint unsigned)"); } IntPtr res = TDengine.Query(this.conn, sql.ToString()); if (res != IntPtr.Zero) diff --git a/tests/examples/JDBC/taosdemo/src/main/resources/insert.json b/tests/examples/JDBC/taosdemo/src/main/resources/insert.json index 35c7773175..7578083d33 100644 --- a/tests/examples/JDBC/taosdemo/src/main/resources/insert.json +++ b/tests/examples/JDBC/taosdemo/src/main/resources/insert.json @@ -36,8 +36,6 @@ "insert_mode": "taosc", "insert_rate": 0, "insert_rows": 100, - "multi_thread_write_one_tbl": "no", - "number_of_tbl_in_one_sql": 0, "interlace_rows": 3, "max_sql_len": 1024, "disorder_ratio": 0, diff --git a/tests/examples/c/asyncdemo.c b/tests/examples/c/asyncdemo.c index d711ce22c1..f2a96dd825 100644 --- a/tests/examples/c/asyncdemo.c +++ b/tests/examples/c/asyncdemo.c @@ -28,7 +28,8 @@ int points = 5; int numOfTables = 3; -int tablesProcessed = 0; +int tablesInsertProcessed = 0; +int tablesSelectProcessed = 0; int64_t st, et; typedef struct { @@ -134,6 +135,9 @@ int main(int argc, char *argv[]) gettimeofday(&systemTime, NULL); st = systemTime.tv_sec * 1000000 + systemTime.tv_usec; + tablesInsertProcessed = 0; + tablesSelectProcessed = 0; + for (i = 0; iname); - tablesProcessed++; - if (tablesProcessed >= numOfTables) { + tablesInsertProcessed++; + if (tablesInsertProcessed >= numOfTables) { gettimeofday(&systemTime, NULL); et = systemTime.tv_sec * 1000000 + systemTime.tv_usec; printf("%lld mseconds to insert %d data points\n", (et - st) / 1000, points*numOfTables); @@ -251,15 +263,17 @@ void taos_retrieve_call_back(void *param, TAOS_RES *tres, int numOfRows) //taos_free_result(tres); printf("%d rows data retrieved from %s\n", pTable->rowsRetrieved, pTable->name); - tablesProcessed++; - if (tablesProcessed >= numOfTables) { + tablesSelectProcessed++; + if (tablesSelectProcessed >= numOfTables) { gettimeofday(&systemTime, NULL); et = systemTime.tv_sec * 1000000 + systemTime.tv_usec; printf("%lld mseconds to query %d data rows\n", (et - st) / 1000, points * numOfTables); } + + taos_free_result(tres); } - taos_free_result(tres); + } void taos_select_call_back(void *param, TAOS_RES *tres, int code) @@ -276,6 +290,4 @@ void taos_select_call_back(void *param, TAOS_RES *tres, int code) taos_cleanup(); exit(1); } - - taos_free_result(tres); } diff --git a/tests/examples/c/demo.c b/tests/examples/c/demo.c index e074e64966..0b1cd7b5d2 100644 --- a/tests/examples/c/demo.c +++ b/tests/examples/c/demo.c @@ -66,7 +66,7 @@ int main(int argc, char *argv[]) { printf("failed to connect to server, reason:%s\n", "null taos"/*taos_errstr(taos)*/); exit(1); } - for (int i = 0; i < 4000000; i++) { + for (int i = 0; i < 100; i++) { Test(taos, qstr, i); } taos_close(taos); diff --git a/tests/examples/lua/README.md b/tests/examples/lua/README.md index dd9c9d0787..32d6a4cace 100644 --- a/tests/examples/lua/README.md +++ b/tests/examples/lua/README.md @@ -19,6 +19,10 @@ Run lua sample: lua test.lua ``` +## Run performance test: +``` +time lua benchmark.lua +``` ## OpenResty Dependencies - OpenResty: ``` diff --git a/tests/examples/lua/benchmark.lua b/tests/examples/lua/benchmark.lua new file mode 100644 index 0000000000..900e7891d8 --- /dev/null +++ b/tests/examples/lua/benchmark.lua @@ -0,0 +1,67 @@ +local driver = require "luaconnector" + +local config = { + password = "taosdata", + host = "127.0.0.1", + port = 6030, + database = "", + user = "root", + + max_packet_size = 1024 * 1024 +} + +local conn +local res = driver.connect(config) +if res.code ~=0 then + print("connect--- failed: "..res.error) + return +else + conn = res.conn + print("connect--- pass.") +end + +local res = driver.query(conn,"drop database if exists demo") + +res = driver.query(conn,"create database demo") +if res.code ~=0 then + print("create db--- failed: "..res.error) + return +else + print("create db--- pass.") +end + +res = driver.query(conn,"use demo") +if res.code ~=0 then + print("select db--- failed: "..res.error) + return +else + print("select db--- pass.") +end + +res = driver.query(conn,"create table m1 (ts timestamp, speed int,owner binary(20))") +if res.code ~=0 then + print("create table---failed: "..res.error) + return +else + print("create table--- pass.") +end + +local base = 1617330000000 +local index =0 +local count = 100000 +local t +while( index < count ) +do + t = base + index + local q=string.format([[insert into m1 values (%d,0,'robotspace')]],t) +res = driver.query(conn,q) +if res.code ~=0 then + print("insert records failed: "..res.error) + return +else + +end + index = index+1 +end +print(string.format([["Done. %d records has been stored."]],count)) +driver.close(conn) diff --git a/tests/examples/lua/build.sh b/tests/examples/lua/build.sh index 8018a3b0d8..cbd47bdfd2 100755 --- a/tests/examples/lua/build.sh +++ b/tests/examples/lua/build.sh @@ -1,2 +1,2 @@ -gcc lua_connector.c -fPIC -shared -o luaconnector.so -Wall -ltaos +gcc -std=c99 lua_connector.c -fPIC -shared -o luaconnector.so -Wall -ltaos diff --git a/tests/examples/lua/lua51/build.sh b/tests/examples/lua/lua51/build.sh index da2981bf7d..3b52ed1448 100755 --- a/tests/examples/lua/lua51/build.sh +++ b/tests/examples/lua/lua51/build.sh @@ -1,2 +1,2 @@ -gcc lua_connector51.c -fPIC -shared -o luaconnector51.so -Wall -ltaos +gcc -std=c99 lua_connector51.c -fPIC -shared -o luaconnector51.so -Wall -ltaos diff --git a/tests/examples/lua/lua_connector.c b/tests/examples/lua/lua_connector.c index 920d2cdc35..8078ed2665 100644 --- a/tests/examples/lua/lua_connector.c +++ b/tests/examples/lua/lua_connector.c @@ -23,7 +23,7 @@ static int l_connect(lua_State *L){ luaL_checktype(L, 1, LUA_TTABLE); - lua_getfield(L,-1,"host"); + lua_getfield(L,1,"host"); if (lua_isstring(L,-1)){ host = lua_tostring(L, -1); // printf("host = %s\n", host); diff --git a/tests/perftest-scripts/perftest-query.sh b/tests/perftest-scripts/perftest-query.sh index 8b9fa1a546..0325f552b1 100755 --- a/tests/perftest-scripts/perftest-query.sh +++ b/tests/perftest-scripts/perftest-query.sh @@ -40,8 +40,8 @@ function buildTDengine { git remote update > /dev/null git reset --hard HEAD - git checkout develop - REMOTE_COMMIT=`git rev-parse --short remotes/origin/develop` + git checkout master + REMOTE_COMMIT=`git rev-parse --short remotes/origin/master` LOCAL_COMMIT=`git rev-parse --short @` echo " LOCAL: $LOCAL_COMMIT" @@ -73,6 +73,9 @@ function runQueryPerfTest { python3 insert/insertFromCSVPerformance.py -c $LOCAL_COMMIT | tee -a $PERFORMANCE_TEST_REPORT python3 tools/taosdemoPerformance.py -c $LOCAL_COMMIT | tee -a $PERFORMANCE_TEST_REPORT + + python3 perfbenchmark/joinPerformance.py | tee -a $PERFORMANCE_TEST_REPORT + } diff --git a/tests/pytest/cluster/clusterEnvSetup/insert.json b/tests/pytest/cluster/clusterEnvSetup/insert.json index 4548ef74e6..2f3cf0f0d9 100644 --- a/tests/pytest/cluster/clusterEnvSetup/insert.json +++ b/tests/pytest/cluster/clusterEnvSetup/insert.json @@ -37,8 +37,6 @@ "insert_mode": "taosc", "insert_rate": 0, "insert_rows": 100000, - "multi_thread_write_one_tbl": "no", - "number_of_tbl_in_one_sql": 1, "interlace_rows": 100, "max_sql_len": 1024000, "disorder_ratio": 0, diff --git a/tests/pytest/concurrent_inquiry.sh b/tests/pytest/concurrent_inquiry.sh index f426fbbcec..e5918792f4 100755 --- a/tests/pytest/concurrent_inquiry.sh +++ b/tests/pytest/concurrent_inquiry.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/bin/bash # This is the script for us to try to cause the TDengine server or client to crash # diff --git a/tests/pytest/crash_gen/README.md b/tests/pytest/crash_gen/README.md index 6788ab1a63..8cdddd5b57 100644 --- a/tests/pytest/crash_gen/README.md +++ b/tests/pytest/crash_gen/README.md @@ -6,6 +6,25 @@ To effectively test and debug our TDengine product, we have developed a simple t exercise various functions of the system in a randomized fashion, hoping to expose maximum number of problems, hopefully without a pre-determined scenario. +# Features + +This tool can run as a test client with the following features: + +1. Any number of concurrent threads +1. Any number of test steps/loops +1. Auto-create and writing to multiple databases +1. Ignore specific error codes +1. Write small or large data blocks +1. Auto-generate out-of-sequence data, if needed +1. Verify the result of write operations +1. Concurrent writing to a shadow database for later data verification +1. User specified number of replicas to use, against clusters + +This tool can also use to start a TDengine service, either in stand-alone mode or +cluster mode. The features include: + +1. User specified number of D-Nodes to create/use. + # Preparation To run this tool, please ensure the followed preparation work is done first. @@ -16,7 +35,7 @@ To run this tool, please ensure the followed preparation work is done first. Ubuntu 20.04LTS as our own development environment, and suggest you also use such an environment if possible. -# Simple Execution +# Simple Execution as Client Test Tool To run the tool with the simplest method, follow the steps below: @@ -28,19 +47,21 @@ To run the tool with the simplest method, follow the steps below: That's it! -# Running Clusters +# Running Server-side Clusters This tool also makes it easy to test/verify the clustering capabilities of TDengine. You can start a cluster quite easily with the following command: ``` $ cd tests/pytest/ -$ ./crash_gen.sh -e -o 3 +$ rm -rf ../../build/cluster_dnode_?; ./crash_gen.sh -e -o 3 # first part optional ``` The `-e` option above tells the tool to start the service, and do not run any tests, while the `-o 3` option tells the tool to start 3 DNodes and join them together in a cluster. -Obviously you can adjust the the number here. +Obviously you can adjust the the number here. The `rm -rf` command line is optional +to clean up previous cluster data, so that we can start from a clean state with no data +at all. ## Behind the Scenes @@ -89,8 +110,9 @@ The exhaustive features of the tool is available through the `-h` option: ``` $ ./crash_gen.sh -h -usage: crash_gen_bootstrap.py [-h] [-a] [-b MAX_DBS] [-c CONNECTOR_TYPE] [-d] [-e] [-g IGNORE_ERRORS] [-i MAX_REPLICAS] [-l] [-n] [-o NUM_DNODES] [-p] [-r] - [-s MAX_STEPS] [-t NUM_THREADS] [-v] [-x] +usage: crash_gen_bootstrap.py [-h] [-a] [-b MAX_DBS] [-c CONNECTOR_TYPE] [-d] [-e] [-g IGNORE_ERRORS] + [-i NUM_REPLICAS] [-k] [-l] [-m] [-n] + [-o NUM_DNODES] [-p] [-r] [-s MAX_STEPS] [-t NUM_THREADS] [-v] [-w] [-x] TDengine Auto Crash Generator (PLEASE NOTICE the Prerequisites Below) --------------------------------------------------------------------- @@ -109,11 +131,14 @@ optional arguments: -e, --run-tdengine Run TDengine service in foreground (default: false) -g IGNORE_ERRORS, --ignore-errors IGNORE_ERRORS Ignore error codes, comma separated, 0x supported (default: None) - -i MAX_REPLICAS, --max-replicas MAX_REPLICAS - Maximum number of replicas to use, when testing against clusters. (default: 1) + -i NUM_REPLICAS, --num-replicas NUM_REPLICAS + Number (fixed) of replicas to use, when testing against clusters. (default: 1) + -k, --track-memory-leaks + Use Valgrind tool to track memory leaks (default: false) -l, --larger-data Write larger amount of data during write operations (default: false) + -m, --mix-oos-data Mix out-of-sequence data into the test data stream (default: true) -n, --dynamic-db-table-names - Use non-fixed names for dbs/tables, useful for multi-instance executions (default: false) + Use non-fixed names for dbs/tables, for -b, useful for multi-instance executions (default: false) -o NUM_DNODES, --num-dnodes NUM_DNODES Number of Dnodes to initialize, used with -e option. (default: 1) -p, --per-thread-db-connection @@ -124,6 +149,7 @@ optional arguments: -t NUM_THREADS, --num-threads NUM_THREADS Number of threads to run (default: 10) -v, --verify-data Verify data written in a number of places by reading back (default: false) + -w, --use-shadow-db Use a shaddow database to verify data integrity (default: false) -x, --continue-on-exception Continue execution after encountering unexpected/disallowed errors/exceptions (default: false) ``` diff --git a/tests/pytest/crash_gen/crash_gen_main.py b/tests/pytest/crash_gen/crash_gen_main.py index 0fe94bad1d..44295e8bee 100755 --- a/tests/pytest/crash_gen/crash_gen_main.py +++ b/tests/pytest/crash_gen/crash_gen_main.py @@ -1574,9 +1574,9 @@ class TaskCreateDb(StateTransitionTask): def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): # was: self.execWtSql(wt, "create database db") repStr = "" - if gConfig.max_replicas != 1: + if gConfig.num_replicas != 1: # numReplica = Dice.throw(gConfig.max_replicas) + 1 # 1,2 ... N - numReplica = gConfig.max_replicas # fixed, always + numReplica = gConfig.num_replicas # fixed, always repStr = "replica {}".format(numReplica) updatePostfix = "update 1" if gConfig.verify_data else "" # allow update only when "verify data" is active dbName = self._db.getName() @@ -2394,11 +2394,16 @@ class MainExec: help='Ignore error codes, comma separated, 0x supported (default: None)') parser.add_argument( '-i', - '--max-replicas', + '--num-replicas', action='store', default=1, type=int, - help='Maximum number of replicas to use, when testing against clusters. (default: 1)') + help='Number (fixed) of replicas to use, when testing against clusters. (default: 1)') + parser.add_argument( + '-k', + '--track-memory-leaks', + action='store_true', + help='Use Valgrind tool to track memory leaks (default: false)') parser.add_argument( '-l', '--larger-data', diff --git a/tests/pytest/crash_gen/service_manager.py b/tests/pytest/crash_gen/service_manager.py index ae6f8d5d3a..cdbf2db4da 100644 --- a/tests/pytest/crash_gen/service_manager.py +++ b/tests/pytest/crash_gen/service_manager.py @@ -19,6 +19,7 @@ from queue import Queue, Empty from .misc import Logging, Status, CrashGenError, Dice, Helper, Progress from .db import DbConn, DbTarget +import crash_gen.settings class TdeInstance(): """ @@ -132,6 +133,7 @@ keep 36500 walLevel 1 # # maxConnections 100 +quorum 2 """ cfgContent = cfgTemplate.format_map(cfgValues) f = open(cfgFile, "w") @@ -164,7 +166,12 @@ walLevel 1 return "127.0.0.1" def getServiceCmdLine(self): # to start the instance - return [self.getExecFile(), '-c', self.getCfgDir()] # used in subproce.Popen() + cmdLine = [] + if crash_gen.settings.gConfig.track_memory_leaks: + Logging.info("Invoking VALGRIND on service...") + cmdLine = ['valgrind', '--leak-check=yes'] + cmdLine += ["exec " + self.getExecFile(), '-c', self.getCfgDir()] # used in subproce.Popen() + return cmdLine def _getDnodes(self, dbc): dbc.query("show dnodes") @@ -202,7 +209,7 @@ walLevel 1 self.generateCfgFile() # service side generates config file, client does not self.rotateLogs() - self._smThread.start(self.getServiceCmdLine()) + self._smThread.start(self.getServiceCmdLine(), self.getLogDir()) # May raise exceptions def stop(self): self._smThread.stop() @@ -225,7 +232,7 @@ class TdeSubProcess: # RET_SUCCESS = -4 def __init__(self): - self.subProcess = None + self.subProcess = None # type: subprocess.Popen # if tInst is None: # raise CrashGenError("Empty instance not allowed in TdeSubProcess") # self._tInst = tInst # Default create at ServiceManagerThread @@ -263,7 +270,7 @@ class TdeSubProcess: # print("Starting TDengine with env: ", myEnv.items()) # print("Starting TDengine via Shell: {}".format(cmdLineStr)) - useShell = True + useShell = True # Needed to pass environments into it self.subProcess = subprocess.Popen( # ' '.join(cmdLine) if useShell else cmdLine, # shell=useShell, @@ -276,12 +283,12 @@ class TdeSubProcess: env=myEnv ) # had text=True, which interferred with reading EOF - STOP_SIGNAL = signal.SIGKILL # signal.SIGKILL/SIGINT # What signal to use (in kill) to stop a taosd process? + STOP_SIGNAL = signal.SIGINT # signal.SIGKILL/SIGINT # What signal to use (in kill) to stop a taosd process? SIG_KILL_RETCODE = 137 # ref: https://stackoverflow.com/questions/43268156/process-finished-with-exit-code-137-in-pycharm def stop(self): """ - Stop a sub process, and try to return a meaningful return code. + Stop a sub process, DO NOT return anything, process all conditions INSIDE Common POSIX signal values (from man -7 signal): SIGHUP 1 @@ -301,40 +308,99 @@ class TdeSubProcess: """ if not self.subProcess: Logging.error("Sub process already stopped") - return # -1 + return retCode = self.subProcess.poll() # ret -N means killed with signal N, otherwise it's from exit(N) if retCode: # valid return code, process ended - retCode = -retCode # only if valid + # retCode = -retCode # only if valid Logging.warning("TSP.stop(): process ended itself") self.subProcess = None - return retCode + return # process still alive, let's interrupt it - Logging.info("Terminate running process, send SIG_{} and wait...".format(self.STOP_SIGNAL)) - # sub process should end, then IPC queue should end, causing IO thread to end - topSubProc = psutil.Process(self.subProcess.pid) - for child in topSubProc.children(recursive=True): # or parent.children() for recursive=False - child.send_signal(self.STOP_SIGNAL) - time.sleep(0.2) # 200 ms - # topSubProc.send_signal(sig) # now kill the main sub process (likely the Shell) + self._stopForSure(self.subProcess, self.STOP_SIGNAL) # success if no exception + self.subProcess = None - self.subProcess.send_signal(self.STOP_SIGNAL) # main sub process (likely the Shell) - self.subProcess.wait(20) - retCode = self.subProcess.returncode # should always be there - # May throw subprocess.TimeoutExpired exception above, therefore - # The process is guranteed to have ended by now - self.subProcess = None - if retCode == self.SIG_KILL_RETCODE: - Logging.info("TSP.stop(): sub proc KILLED, as expected") - elif retCode == (- self.STOP_SIGNAL): - Logging.info("TSP.stop(), sub process STOPPED, as expected") - elif retCode != 0: # != (- signal.SIGINT): - Logging.error("TSP.stop(): Failed to stop sub proc properly w/ SIG {}, retCode={}".format( - self.STOP_SIGNAL, retCode)) - else: - Logging.info("TSP.stop(): sub proc successfully terminated with SIG {}".format(self.STOP_SIGNAL)) - return - retCode + # sub process should end, then IPC queue should end, causing IO thread to end + + @classmethod + def _stopForSure(cls, proc: subprocess.Popen, sig: int): + ''' + Stop a process and all sub processes with a singal, and SIGKILL if necessary + ''' + def doKillTdService(proc: subprocess.Popen, sig: int): + Logging.info("Killing sub-sub process {} with signal {}".format(proc.pid, sig)) + proc.send_signal(sig) + try: + retCode = proc.wait(20) + if (- retCode) == signal.SIGSEGV: # Crashed + Logging.warning("Process {} CRASHED, please check CORE file!".format(proc.pid)) + elif (- retCode) == sig : + Logging.info("TD service terminated with expected return code {}".format(sig)) + else: + Logging.warning("TD service terminated, EXPECTING ret code {}, got {}".format(sig, -retCode)) + return True # terminated successfully + except subprocess.TimeoutExpired as err: + Logging.warning("Failed to kill sub-sub process {} with signal {}".format(proc.pid, sig)) + return False # failed to terminate + + + def doKillChild(child: psutil.Process, sig: int): + Logging.info("Killing sub-sub process {} with signal {}".format(child.pid, sig)) + child.send_signal(sig) + try: + retCode = child.wait(20) + if (- retCode) == signal.SIGSEGV: # Crashed + Logging.warning("Process {} CRASHED, please check CORE file!".format(child.pid)) + elif (- retCode) == sig : + Logging.info("Sub-sub process terminated with expected return code {}".format(sig)) + else: + Logging.warning("Process terminated, EXPECTING ret code {}, got {}".format(sig, -retCode)) + return True # terminated successfully + except psutil.TimeoutExpired as err: + Logging.warning("Failed to kill sub-sub process {} with signal {}".format(child.pid, sig)) + return False # did not terminate + + def doKill(proc: subprocess.Popen, sig: int): + pid = proc.pid + try: + topSubProc = psutil.Process(pid) + for child in topSubProc.children(recursive=True): # or parent.children() for recursive=False + Logging.warning("Unexpected child to be killed") + doKillChild(child, sig) + except psutil.NoSuchProcess as err: + Logging.info("Process not found, can't kill, pid = {}".format(pid)) + + return doKillTdService(proc, sig) + # TODO: re-examine if we need to kill the top process, which is always the SHELL for now + # try: + # proc.wait(1) # SHELL process here, may throw subprocess.TimeoutExpired exception + # # expRetCode = self.SIG_KILL_RETCODE if sig==signal.SIGKILL else (-sig) + # # if retCode == expRetCode: + # # Logging.info("Process terminated with expected return code {}".format(retCode)) + # # else: + # # Logging.warning("Process terminated, EXPECTING ret code {}, got {}".format(expRetCode, retCode)) + # # return True # success + # except subprocess.TimeoutExpired as err: + # Logging.warning("Failed to kill process {} with signal {}".format(pid, sig)) + # return False # failed to kill + + def softKill(proc, sig): + return doKill(proc, sig) + + def hardKill(proc): + return doKill(proc, signal.SIGKILL) + + + + pid = proc.pid + Logging.info("Terminate running processes under {}, with SIG #{} and wait...".format(pid, sig)) + if softKill(proc, sig): + return# success + if sig != signal.SIGKILL: # really was soft above + if hardKill(proc): + return + raise CrashGenError("Failed to stop process, pid={}".format(pid)) class ServiceManager: PAUSE_BETWEEN_IPC_CHECK = 1.2 # seconds between checks on STDOUT of sub process @@ -560,7 +626,8 @@ class ServiceManagerThread: # self._tInstNum = tInstNum # instance serial number in cluster, ZERO based # self._tInst = tInst or TdeInstance() # Need an instance - self._thread = None # The actual thread, # type: threading.Thread + self._thread = None # The actual thread, # type: threading.Thread + self._thread2 = None # watching stderr self._status = Status(Status.STATUS_STOPPED) # The status of the underlying service, actually. def __repr__(self): @@ -568,11 +635,20 @@ class ServiceManagerThread: self.getStatus(), self._tdeSubProcess) def getStatus(self): + ''' + Get the status of the process being managed. (misnomer alert!) + ''' return self._status # Start the thread (with sub process), and wait for the sub service # to become fully operational - def start(self, cmdLine): + def start(self, cmdLine : str, logDir: str): + ''' + Request the manager thread to start a new sub process, and manage it. + + :param cmdLine: the command line to invoke + :param logDir: the logging directory, to hold stdout/stderr files + ''' if self._thread: raise RuntimeError("Unexpected _thread") if self._tdeSubProcess: @@ -582,20 +658,30 @@ class ServiceManagerThread: self._status.set(Status.STATUS_STARTING) self._tdeSubProcess = TdeSubProcess() - self._tdeSubProcess.start(cmdLine) + self._tdeSubProcess.start(cmdLine) # TODO: verify process is running self._ipcQueue = Queue() self._thread = threading.Thread( # First thread captures server OUTPUT target=self.svcOutputReader, - args=(self._tdeSubProcess.getStdOut(), self._ipcQueue)) + args=(self._tdeSubProcess.getStdOut(), self._ipcQueue, logDir)) self._thread.daemon = True # thread dies with the program self._thread.start() + time.sleep(0.01) + if not self._thread.is_alive(): # What happened? + Logging.info("Failed to started process to monitor STDOUT") + self.stop() + raise CrashGenError("Failed to start thread to monitor STDOUT") + Logging.info("Successfully started process to monitor STDOUT") self._thread2 = threading.Thread( # 2nd thread captures server ERRORs target=self.svcErrorReader, - args=(self._tdeSubProcess.getStdErr(), self._ipcQueue)) + args=(self._tdeSubProcess.getStdErr(), self._ipcQueue, logDir)) self._thread2.daemon = True # thread dies with the program self._thread2.start() + time.sleep(0.01) + if not self._thread2.is_alive(): + self.stop() + raise CrashGenError("Failed to start thread to monitor STDERR") # wait for service to start for i in range(0, 100): @@ -643,7 +729,7 @@ class ServiceManagerThread: Logging.info("Service already stopped") return if self.getStatus().isStopping(): - Logging.info("Service is already being stopped") + Logging.info("Service is already being stopped, pid: {}".format(self._tdeSubProcess.getPid())) return # Linux will send Control-C generated SIGINT to the TDengine process # already, ref: @@ -653,14 +739,14 @@ class ServiceManagerThread: self._status.set(Status.STATUS_STOPPING) # retCode = self._tdeSubProcess.stop() - try: - retCode = self._tdeSubProcess.stop() - # print("Attempted to stop sub process, got return code: {}".format(retCode)) - if retCode == signal.SIGSEGV : # SGV - Logging.error("[[--ERROR--]]: TDengine service SEGV fault (check core file!)") - except subprocess.TimeoutExpired as err: - Logging.info("Time out waiting for TDengine service process to exit") - else: + # try: + # retCode = self._tdeSubProcess.stop() + # # print("Attempted to stop sub process, got return code: {}".format(retCode)) + # if retCode == signal.SIGSEGV : # SGV + # Logging.error("[[--ERROR--]]: TDengine service SEGV fault (check core file!)") + # except subprocess.TimeoutExpired as err: + # Logging.info("Time out waiting for TDengine service process to exit") + if not self._tdeSubProcess.stop(): # everything withing if self._tdeSubProcess.isRunning(): # still running, should now never happen Logging.error("FAILED to stop sub process, it is still running... pid = {}".format( self._tdeSubProcess.getPid())) @@ -683,16 +769,18 @@ class ServiceManagerThread: raise RuntimeError( "SMT.Join(): Unexpected status: {}".format(self._status)) - if self._thread: - self._thread.join() - self._thread = None - self._status.set(Status.STATUS_STOPPED) - # STD ERR thread - self._thread2.join() - self._thread2 = None + if self._thread or self._thread2 : + if self._thread: + self._thread.join() + self._thread = None + if self._thread2: # STD ERR thread + self._thread2.join() + self._thread2 = None else: print("Joining empty thread, doing nothing") + self._status.set(Status.STATUS_STOPPED) + def _trimQueue(self, targetSize): if targetSize <= 0: return # do nothing @@ -739,11 +827,22 @@ class ServiceManagerThread: print(pBar, end="", flush=True) print('\b\b\b\b', end="", flush=True) - def svcOutputReader(self, out: IO, queue): + def svcOutputReader(self, out: IO, queue, logDir: str): + ''' + The infinite routine that processes the STDOUT stream for the sub process being managed. + + :param out: the IO stream object used to fetch the data from + :param queue: the queue where we dump the roughly parsed line-by-line data + :param logDir: where we should dump a verbatim output file + ''' + os.makedirs(logDir, exist_ok=True) + logFile = os.path.join(logDir,'stdout.log') + fOut = open(logFile, 'wb') # Important Reference: https://stackoverflow.com/questions/375427/non-blocking-read-on-a-subprocess-pipe-in-python # print("This is the svcOutput Reader...") # for line in out : for line in iter(out.readline, b''): + fOut.write(line) # print("Finished reading a line: {}".format(line)) # print("Adding item to queue...") try: @@ -772,10 +871,16 @@ class ServiceManagerThread: # queue.put(line) # meaning sub process must have died Logging.info("EOF for TDengine STDOUT: {}".format(self)) - out.close() + out.close() # Close the stream + fOut.close() # Close the output file - def svcErrorReader(self, err: IO, queue): + def svcErrorReader(self, err: IO, queue, logDir: str): + os.makedirs(logDir, exist_ok=True) + logFile = os.path.join(logDir,'stderr.log') + fErr = open(logFile, 'wb') for line in iter(err.readline, b''): + fErr.write(line) Logging.info("TDengine STDERR: {}".format(line)) Logging.info("EOF for TDengine STDERR: {}".format(self)) - err.close() \ No newline at end of file + err.close() + fErr.close() \ No newline at end of file diff --git a/tests/pytest/crash_gen/valgrind_taos.supp b/tests/pytest/crash_gen/valgrind_taos.supp index 123858b3db..5eb5403395 100644 --- a/tests/pytest/crash_gen/valgrind_taos.supp +++ b/tests/pytest/crash_gen/valgrind_taos.supp @@ -17247,3 +17247,88 @@ fun:_PyEval_EvalFrameDefault fun:_PyEval_EvalCodeWithName } +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/lib/python3/dist-packages/_cffi_backend.cpython-38-x86_64-linux-gnu.so + obj:/usr/lib/python3/dist-packages/_cffi_backend.cpython-38-x86_64-linux-gnu.so + fun:_PyEval_EvalFrameDefault + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + fun:_PyEval_EvalCodeWithName + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyVectorcall_Call + fun:_PyEval_EvalFrameDefault +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/lib/python3/dist-packages/_cffi_backend.cpython-38-x86_64-linux-gnu.so + obj:/usr/lib/python3/dist-packages/_cffi_backend.cpython-38-x86_64-linux-gnu.so + obj:/usr/lib/python3/dist-packages/_cffi_backend.cpython-38-x86_64-linux-gnu.so + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/lib/python3/dist-packages/_cffi_backend.cpython-38-x86_64-linux-gnu.so + obj:/usr/lib/python3/dist-packages/_cffi_backend.cpython-38-x86_64-linux-gnu.so + fun:_PyEval_EvalFrameDefault + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + fun:PyEval_EvalCode + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyVectorcall_Call + fun:_PyEval_EvalFrameDefault +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/lib/python3/dist-packages/_cffi_backend.cpython-38-x86_64-linux-gnu.so + obj:/usr/lib/python3/dist-packages/_cffi_backend.cpython-38-x86_64-linux-gnu.so + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/lib/python3/dist-packages/_cffi_backend.cpython-38-x86_64-linux-gnu.so + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyObject_CallMethod + fun:PyInit__openssl + fun:_PyImport_LoadDynamicModuleWithSpec + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyVectorcall_Call + fun:_PyEval_EvalFrameDefault +} diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index 7ff41b13a6..e7e0586636 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -22,6 +22,7 @@ python3 ./test.py -f insert/alterTableAndInsert.py python3 ./test.py -f insert/insertIntoTwoTables.py python3 ./test.py -f insert/before_1970.py python3 bug2265.py +python3 ./test.py -f insert/bug3654.py #table python3 ./test.py -f table/alter_wal0.py @@ -216,8 +217,8 @@ python3 ./test.py -f query/floatCompare.py python3 ./test.py -f query/query1970YearsAf.py python3 ./test.py -f query/bug3351.py python3 ./test.py -f query/bug3375.py - - +python3 ./test.py -f query/queryJoin10tables.py +python3 ./test.py -f query/queryStddevWithGroupby.py #stream python3 ./test.py -f stream/metric_1.py @@ -257,6 +258,8 @@ python3 test.py -f subscribe/singlemeter.py #python3 test.py -f subscribe/stability.py python3 test.py -f subscribe/supertable.py +# topic +python3 ./test.py -f topic/topicQuery.py #======================p3-end=============== #======================p4-start=============== diff --git a/tests/pytest/handle_crash_gen_val_log.sh b/tests/pytest/handle_crash_gen_val_log.sh index 55c10639d7..01cc62aaf8 100755 --- a/tests/pytest/handle_crash_gen_val_log.sh +++ b/tests/pytest/handle_crash_gen_val_log.sh @@ -36,16 +36,11 @@ for defiMemError in `grep 'definitely lost:' crash_gen-definitely-lost-out.log | do defiMemError=(${defiMemError//,/}) if [ -n "$defiMemError" ]; then - if [ "$defiMemError" -gt 0 -a "$defiMemError" -lt 1013 ]; then + if [ "$defiMemError" -gt 0 ]; then cat valgrind.err echo -e "${RED} ## Memory errors number valgrind reports \ Definitely lost is $defiMemError. More than our threshold! ## ${NC}" exit 8 - elif [ "$defiMemError" -gt 1013 ];then #add for azure - cat valgrind.err - echo -e "${RED} ## Memory errors number valgrind reports \ - Definitely lost is $defiMemError. More than our threshold! ## ${NC}" - exit 8 fi fi done diff --git a/tests/pytest/handle_taosd_val_log.sh b/tests/pytest/handle_taosd_val_log.sh index 6b7f669efe..829bc68225 100755 --- a/tests/pytest/handle_taosd_val_log.sh +++ b/tests/pytest/handle_taosd_val_log.sh @@ -21,7 +21,7 @@ rm -rf /var/lib/taos/* nohup valgrind --leak-check=yes $TAOSD_DIR > $TDIR/$VALGRIND_OUT 2> $TDIR/$VALGRIND_ERR & sleep 20 cd - -./crash_gen.sh -p -t 10 -s 200 +./crash_gen.sh -p -t 10 -s 1000 ps -ef |grep valgrind|grep -v grep|awk '{print $2}'|xargs kill -term while true do @@ -53,16 +53,11 @@ for defiMemError in `grep 'definitely lost:' taosd-definitely-lost-out.log | awk do defiMemError=(${defiMemError//,/}) if [ -n "$defiMemError" ]; then - if [ "$defiMemError" -gt 0 -a "$defiMemError" -lt 1013 ]; then + if [ "$defiMemError" -gt 0 ]; then cat $VALGRIND_ERR echo -e "${RED} ## Memory errors number valgrind reports \ Definitely lost is $defiMemError. More than our threshold! ## ${NC}" exit 8 - elif [ "$defiMemError" -gt 1013 ];then #add for azure - cat $VALGRIND_ERR - echo -e "${RED} ## Memory errors number valgrind reports \ - Definitely lost is $defiMemError. More than our threshold! ## ${NC}" - exit 8 fi fi done diff --git a/tests/pytest/import_merge/importDataLastS.py b/tests/pytest/import_merge/importDataLastS.py index 2c1559d290..4e5701deb1 100644 --- a/tests/pytest/import_merge/importDataLastS.py +++ b/tests/pytest/import_merge/importDataLastS.py @@ -54,8 +54,8 @@ class TDTestCase: tdSql.execute(" ".join(sqlcmd)) tdLog.info("================= step3") - tdSql.query('select * from tb1') - tdSql.checkRows(205) + tdSql.query('select count(*) from tb1') + tdSql.checkData(0, 0, 205) tdLog.info("================= step4") tdDnodes.stop(1) @@ -71,8 +71,8 @@ class TDTestCase: tdSql.execute(" ".join(sqlcmd)) tdLog.info("================= step6") - tdSql.query('select * from tb1') - tdSql.checkRows(250) + tdSql.query('select count(*) from tb1') + tdSql.checkData(0, 0, 250) def stop(self): tdSql.close() diff --git a/tests/pytest/insert/basic.py b/tests/pytest/insert/basic.py index dcb5834d55..f23f38651a 100644 --- a/tests/pytest/insert/basic.py +++ b/tests/pytest/insert/basic.py @@ -43,6 +43,9 @@ class TDTestCase: tdSql.query("select * from tb") tdSql.checkRows(insertRows + 4) + # test case for https://jira.taosdata.com:18080/browse/TD-3716: + tdSql.error("insert into tb(now, 1)") + def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) diff --git a/tests/pytest/insert/boundary2.py b/tests/pytest/insert/boundary2.py index 99784b7cd9..8a6fd1e6a1 100644 --- a/tests/pytest/insert/boundary2.py +++ b/tests/pytest/insert/boundary2.py @@ -50,14 +50,16 @@ class TDTestCase: sql += "'%s')" % self.get_random_string(22) tdSql.execute(sql % (self.ts + i)) - tdSql.query("select * from stb") - tdSql.checkRows(4096) + time.sleep(10) + tdSql.query("select count(*) from stb") + tdSql.checkData(0, 0, 4096) tdDnodes.stop(1) tdDnodes.start(1) - - tdSql.query("select * from stb") - tdSql.checkRows(4096) + + time.sleep(1) + tdSql.query("select count(*) from stb") + tdSql.checkData(0, 0, 4096) endTime = time.time() diff --git a/tests/pytest/insert/bug3654.py b/tests/pytest/insert/bug3654.py new file mode 100644 index 0000000000..41bedf771b --- /dev/null +++ b/tests/pytest/insert/bug3654.py @@ -0,0 +1,53 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import taos +import sys + +from util.log import * +from util.sql import * +from util.cases import * + +class TDTestCase: + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + + def run(self): + tdSql.prepare() + + tdLog.printNoPrefix("==========step1:create table") + tdSql.execute("drop database if exists db") + tdSql.execute("create database db keep 36500,36500,36500") + tdSql.execute("use db") + tdSql.execute("create stable if not exists stb1 (ts timestamp, c1 int) tags(t11 int)") + tdSql.execute("create table t10 using stb1 tags(1)") + gapdate = (datetime.datetime.now() - datetime.datetime(1970, 1, 1, 8, 0, 0)).days + + tdLog.printNoPrefix("==========step2:insert data") + tdSql.execute(f"insert into t10 values (now-{gapdate}d, 1)") + tdSql.execute(f"insert into t10 values (now-{gapdate + 1}d, 2)") + + tdLog.printNoPrefix("==========step3:query") + tdSql.query("select * from t10 where c1=1") + tdSql.checkRows(1) + tdSql.query("select * from t10 where c1=2") + tdSql.checkRows(1) + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/pytest/perfbenchmark/bug3433.py b/tests/pytest/perfbenchmark/bug3433.py index 80e9cce0dc..e4480df6b6 100644 --- a/tests/pytest/perfbenchmark/bug3433.py +++ b/tests/pytest/perfbenchmark/bug3433.py @@ -91,8 +91,6 @@ class TDTestCase: "data_source": "rand", "insert_mode": "taosc", "insert_rows": 1, - "multi_thread_write_one_tbl": "no", - "number_of_tbl_in_one_sql": 0, "rows_per_tbl": 1, "max_sql_len": 65480, "disorder_ratio": 0, @@ -135,8 +133,6 @@ class TDTestCase: "data_source": "rand", "insert_mode": "taosc", "insert_rows": 1, - "multi_thread_write_one_tbl": "no", - "number_of_tbl_in_one_sql": 0, "rows_per_tbl": 1, "max_sql_len": 65480, "disorder_ratio": 0, diff --git a/tests/pytest/perfbenchmark/joinPerformance.py b/tests/pytest/perfbenchmark/joinPerformance.py new file mode 100644 index 0000000000..acd4f88c9b --- /dev/null +++ b/tests/pytest/perfbenchmark/joinPerformance.py @@ -0,0 +1,355 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import taos +import os +import json +import argparse +import subprocess +import datetime +import re + +from multiprocessing import cpu_count +# from util.log import * +# from util.sql import * +# from util.cases import * +# from util.dnodes import * + +class JoinPerf: + + def __init__(self, clearCache, dbName, keep): + self.clearCache = clearCache + self.dbname = dbName + self.drop = "yes" + self.keep = keep + self.host = "127.0.0.1" + self.user = "root" + self.password = "taosdata" + self.config = "/etc/taosperf" + self.conn = taos.connect( + self.host, + self.user, + self.password, + self.config) + + # def init(self, conn, logSql): + # tdLog.debug(f"start to excute {__file__}") + # tdSql.init(conn.cursor()) + + def getBuildPath(self) -> str: + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/debug/build/bin")] + break + return buildPath + + def getCfgDir(self) -> str: + return self.getBuildPath() + "/sim/dnode1/cfg" + + # def initConnection(self): + # return self.getCfgDir() + + # def connectDB(self): + # self.conn = taos.connect( + # self.host, + # self.user, + # self.password, + # self.getCfgDir()) + # return self.conn.cursor() + + def dbinfocfg(self) -> dict: + return { + "name": self.dbname, + "drop": self.drop, + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": self.keep, + "minRows": 100, + "maxRows": 4096, + "comp": 2, + "walLevel": 1, + "cachelast": 0, + "quorum": 1, + "fsync": 3000, + "update": 0 + } + + # def column_tag_count(self, **column_tag) -> list : + # return [{"type": type, "count": count} for type, count in column_tag.items()] + + def type_check(func): + def wrapper(self, **kwargs): + num_types = ["int", "float", "bigint", "tinyint", "smallint", "double"] + str_types = ["binary", "nchar"] + for k ,v in kwargs.items(): + if k.lower() not in num_types and k.lower() not in str_types: + return f"args {k} type error, not allowed" + elif not isinstance(v, (int, list, tuple)): + return f"value {v} type error, not allowed" + elif k.lower() in num_types and not isinstance(v, int): + return f"arg {v} takes 1 positional argument must be type int " + elif isinstance(v, (list,tuple)) and len(v) > 2: + return f"arg {v} takes from 1 to 2 positional arguments but more than 2 were given " + elif isinstance(v,(list,tuple)) and [ False for _ in v if not isinstance(_, int) ]: + return f"arg {v} takes from 1 to 2 positional arguments must be type int " + else: + pass + return func(self, **kwargs) + return wrapper + + @type_check + def column_tag_count(self, **column_tag) -> list : + init_column_tag = [] + for k, v in column_tag.items(): + if re.search(k, "int, float, bigint, tinyint, smallint, double", re.IGNORECASE): + init_column_tag.append({"type": k, "count": v}) + elif re.search(k, "binary, nchar", re.IGNORECASE): + if isinstance(v, int): + init_column_tag.append({"type": k, "count": v, "len":8}) + elif len(v) == 1: + init_column_tag.append({"type": k, "count": v[0], "len": 8}) + else: + init_column_tag.append({"type": k, "count": v[0], "len": v[1]}) + return init_column_tag + + def stbcfg(self, stb: str, child_tab_count: int, prechildtab: str, columns: dict, tags: dict) -> dict: + return { + "name": stb, + "child_table_exists": "no", + "childtable_count": child_tab_count, + "childtable_prefix": prechildtab, + "auto_create_table": "no", + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 50, + "multi_thread_write_one_tbl": "no", + "number_of_tbl_in_one_sql": 0, + "rows_per_tbl": 1, + "max_sql_len": 65480, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 20000, + "start_timestamp": "1969-12-31 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": self.column_tag_count(**columns), + "tags": self.column_tag_count(**tags) + } + + def createcfg(self,db: dict, stbs: list) -> dict: + return { + "filetype": "insert", + "cfgdir": self.config, + "host": self.host, + "port": 6030, + "user": self.user, + "password": self.password, + "thread_count": cpu_count(), + "thread_count_create_tbl": cpu_count(), + "result_file": "/tmp/insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "num_of_records_per_req": 100, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": db, + "super_tables": stbs + }] + } + + def createinsertfile(self,db: dict, stbs: list) -> str: + date = datetime.datetime.now().strftime("%Y%m%d%H%M") + file_create_table = f"/tmp/insert_{date}.json" + + with open(file_create_table, 'w') as f: + json.dump(self.createcfg(db, stbs), f) + + return file_create_table + + def querysqls(self, sql: str) -> list: + return [{"sql":sql,"result":""}] + + def querycfg(self, sql: str) -> dict: + return { + "filetype": "query", + "cfgdir": self.config, + "host": self.host, + "port": 6030, + "user": self.user, + "password": self.password, + "confirm_parameter_prompt": "yes", + "databases": "db", + "specified_table_query": { + "query_interval": 0, + "concurrent": cpu_count(), + "sqls": self.querysqls(sql) + } + } + + def createqueryfile(self, sql: str): + date = datetime.datetime.now().strftime("%Y%m%d%H%M") + file_query_table = f"/tmp/query_{date}.json" + + with open(file_query_table,"w") as f: + json.dump(self.querycfg(sql), f) + + return file_query_table + + def taosdemotable(self, filepath, resultfile="/dev/null"): + taosdemopath = self.getBuildPath() + "/debug/build/bin" + with open(filepath,"r") as f: + filetype = json.load(f)["filetype"] + if filetype == "insert": + taosdemo_table_cmd = f"{taosdemopath}/taosdemo -f {filepath} > {resultfile} 2>&1" + else: + taosdemo_table_cmd = f"yes | {taosdemopath}/taosdemo -f {filepath} > {resultfile} 2>&1" + _ = subprocess.check_output(taosdemo_table_cmd, shell=True).decode("utf-8") + + def droptmpfile(self): + drop_file_cmd = "rm -f /tmp/query_* " + _ = subprocess.check_output(drop_file_cmd, shell=True).decode("utf-8") + drop_file_cmd = "rm -f querySystemInfo-*" + _ = subprocess.check_output(drop_file_cmd, shell=True).decode("utf-8") + drop_file_cmd = "rm -f /tmp/insert_* " + _ = subprocess.check_output(drop_file_cmd, shell=True).decode("utf-8") + + def run(self): + print("========== join on table schema performance ==========") + if self.clearCache == True: + # must be root permission + subprocess.check_output("echo 3 > /proc/sys/vm/drop_caches") + + # cfg = { + # 'enableRecordSql': '1' + # } + # tdDnodes.deploy(1, cfg) + + # tdLog.printNoPrefix("==========step1: create 1024 columns on different data type==========") + # db = self.dbinfocfg() + # stblist = [] + # # the supertable config for each type in column_tag_types + # column_tag_types = ["INT","FLOAT","BIGINT","TINYINT","SMALLINT","DOUBLE","BINARY","NCHAR"] + # for i in range(len(column_tag_types)): + # tagtype = { + # "INT": 0, + # "FLOAT": 0, + # "BIGINT": 0, + # "TINYINT": 0, + # "SMALLINT": 0, + # "DOUBLE": 0, + # "BINARY": 0, + # "NCHAR": 0 + # } + # columntype = tagtype.copy() + # tagtype["INT"] = 2 + # if column_tag_types[i] == "BINARY": + # columntype[column_tag_types[i]] = [509, 10] + # elif column_tag_types[i] == "NCHAR": + # columntype[column_tag_types[i]] = [350, 10] + # else: + # columntype[column_tag_types[i]] = 1021 + # supertable = self.stbcfg( + # stb=f"stb{i}", + # child_tab_count=2, + # prechildtab=f"t{i}", + # columns=columntype, + # tags=tagtype + # ) + # stblist.append(supertable) + # self.taosdemotable(self.createinsertfile(db=db, stbs=stblist)) + + # tdLog.printNoPrefix("==========step2: execute query operation==========") + # tdLog.printNoPrefix("==========execute query operation==========") + sqls = { + "nchar":"select * from t00,t01 where t00.ts=t01.ts" + } + for type, sql in sqls.items(): + result_file = f"/tmp/queryResult_{type}.log" + self.taosdemotable(self.createqueryfile(sql), resultfile=result_file) + if result_file: + # tdLog.printNoPrefix(f"execute type {type} sql, the sql is: {sql}") + print(f"execute type {type} sql, the sql is: {sql}") + max_sql_time_cmd = f''' + grep Spent {result_file} | awk 'NR==1{{max=$7;next}}{{max=max>$7?max:$7}}END{{print "Max=",max,"s"}}' + ''' + max_sql_time = subprocess.check_output(max_sql_time_cmd, shell=True).decode("UTF-8") + # tdLog.printNoPrefix(f"type: {type} sql time : {max_sql_time}") + print(f"type: {type} sql time : {max_sql_time}") + + min_sql_time_cmd = f''' + grep Spent {result_file} | awk 'NR==1{{min=$7;next}}{{min=min<$7?min:$7}}END{{print "Min=",min,"s"}}' + ''' + min_sql_time = subprocess.check_output(min_sql_time_cmd, shell=True).decode("UTF-8") + # tdLog.printNoPrefix(f"type: {type} sql time : {min_sql_time}") + print(f"type: {type} sql time : {min_sql_time}") + + avg_sql_time_cmd = f''' + grep Spent {result_file} |awk '{{sum+=$7}}END{{print "Average=",sum/NR,"s"}}' + ''' + avg_sql_time = subprocess.check_output(avg_sql_time_cmd, shell=True).decode("UTF-8") + # tdLog.printNoPrefix(f"type: {type} sql time : {avg_sql_time}") + print(f"type: {type} sql time : {avg_sql_time}") + + # tdLog.printNoPrefix(f"==========type {type} sql is over==========") + + # tdLog.printNoPrefix("==========query operation is over==========") + + self.droptmpfile() + # tdLog.printNoPrefix("==========tmp file has been deleted==========") + + # def stop(self): + # tdSql.close() + # tdLog.success(f"{__file__} successfully executed") + +# tdCases.addLinux(__file__, TDTestCase()) +# tdCases.addWindows(__file__, TDTestCase()) + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument( + '-r', + '--remove-cache', + action='store_true', + default=False, + help='clear cache before query (default: False)') + parser.add_argument( + '-d', + '--database-name', + action='store', + default='db', + type=str, + help='Database name to be created (default: db)') + parser.add_argument( + '-k', + '--keep-time', + action='store', + default=36500, + type=int, + help='Database keep parameters (default: 36500)') + + args = parser.parse_args() + jointest = JoinPerf(args.remove_cache, args.database_name, args.keep_time) + jointest.run() \ No newline at end of file diff --git a/tests/pytest/query/query1970YearsAf.py b/tests/pytest/query/query1970YearsAf.py index e66a79f5e6..93404afd59 100644 --- a/tests/pytest/query/query1970YearsAf.py +++ b/tests/pytest/query/query1970YearsAf.py @@ -82,8 +82,6 @@ class TDTestCase: "data_source": "rand", "insert_mode": "taosc", "insert_rows": 5000, - "multi_thread_write_one_tbl": "no", - "number_of_tbl_in_one_sql": 0, "rows_per_tbl": 1, "max_sql_len": 65480, "disorder_ratio": 0, @@ -255,4 +253,4 @@ class TDTestCase: tdCases.addLinux(__file__, TDTestCase()) -tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/pytest/query/queryJoin10tables.py b/tests/pytest/query/queryJoin10tables.py new file mode 100644 index 0000000000..01a7397d44 --- /dev/null +++ b/tests/pytest/query/queryJoin10tables.py @@ -0,0 +1,201 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import taos +import sys + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * + +class TDTestCase: + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + + def createtable(self): + + # create stbles + tdSql.execute("create table if not exists stb1 (ts timestamp, c1 int) tags(t11 int, t12 int)") + tdSql.execute("create table if not exists stb2 (ts timestamp, c2 int) tags(t21 int, t22 int)") + tdSql.execute("create table if not exists stb3 (ts timestamp, c3 int) tags(t31 int, t32 int)") + tdSql.execute("create table if not exists stb4 (ts timestamp, c4 int) tags(t41 int, t42 int)") + tdSql.execute("create table if not exists stb5 (ts timestamp, c5 int) tags(t51 int, t52 int)") + tdSql.execute("create table if not exists stb6 (ts timestamp, c6 int) tags(t61 int, t62 int)") + tdSql.execute("create table if not exists stb7 (ts timestamp, c7 int) tags(t71 int, t72 int)") + tdSql.execute("create table if not exists stb8 (ts timestamp, c8 int) tags(t81 int, t82 int)") + tdSql.execute("create table if not exists stb9 (ts timestamp, c9 int) tags(t91 int, t92 int)") + tdSql.execute("create table if not exists stb10 (ts timestamp, c10 int) tags(t101 int, t102 int)") + tdSql.execute("create table if not exists stb11 (ts timestamp, c11 int) tags(t111 int, t112 int)") + + # create normal tables + tdSql.execute("create table t10 using stb1 tags(0, 9)") + tdSql.execute("create table t11 using stb1 tags(1, 8)") + tdSql.execute("create table t12 using stb1 tags(2, 7)") + tdSql.execute("create table t13 using stb1 tags(3, 6)") + tdSql.execute("create table t14 using stb1 tags(4, 5)") + tdSql.execute("create table t15 using stb1 tags(5, 4)") + tdSql.execute("create table t16 using stb1 tags(6, 3)") + tdSql.execute("create table t17 using stb1 tags(7, 2)") + tdSql.execute("create table t18 using stb1 tags(8, 1)") + tdSql.execute("create table t19 using stb1 tags(9, 0)") + tdSql.execute("create table t110 using stb1 tags(10, 10)") + + tdSql.execute("create table t20 using stb2 tags(0, 9)") + tdSql.execute("create table t21 using stb2 tags(1, 8)") + tdSql.execute("create table t22 using stb2 tags(2, 7)") + + tdSql.execute("create table t30 using stb3 tags(0, 9)") + tdSql.execute("create table t31 using stb3 tags(1, 8)") + tdSql.execute("create table t32 using stb3 tags(2, 7)") + + def inserttable(self): + for i in range(100): + if i<60: + tdSql.execute(f"insert into t20 values('2020-10-01 00:00:{i}.000', {i})") + tdSql.execute(f"insert into t21 values('2020-10-01 00:00:{i}.000', {i})") + tdSql.execute(f"insert into t22 values('2020-10-01 00:00:{i}.000', {i})") + tdSql.execute(f"insert into t30 values('2020-10-01 00:00:{i}.000', {i})") + tdSql.execute(f"insert into t31 values('2020-10-01 00:00:{i}.000', {i})") + tdSql.execute(f"insert into t32 values('2020-10-01 00:00:{i}.000', {i})") + else: + tdSql.execute(f"insert into t20 values('2020-10-01 00:01:{i-60}.000', {i})") + tdSql.execute(f"insert into t21 values('2020-10-01 00:01:{i-60}.000', {i})") + tdSql.execute(f"insert into t22 values('2020-10-01 00:01:{i-60}.000', {i})") + tdSql.execute(f"insert into t30 values('2020-10-01 00:01:{i-60}.000', {i})") + tdSql.execute(f"insert into t31 values('2020-10-01 00:01:{i-60}.000', {i})") + tdSql.execute(f"insert into t32 values('2020-10-01 00:01:{i-60}.000', {i})") + for j in range(11): + if i<60: + tdSql.execute(f"insert into t1{j} values('2020-10-01 00:00:{i}.000', {i})") + else: + tdSql.execute(f"insert into t1{j} values('2020-10-01 00:01:{i-60}.000', {i})") + + def queryjointable(self): + tdSql.error( + '''select from t10,t11,t12,t13,t14,t15,t16,t17,t18,t19 + where t10.ts=t11.ts and t10.ts=t12.ts and t10.ts=t13.ts and t10.ts=t14.ts and t10.ts=t15.ts + and t10.ts=t16.ts and t10.ts=t17.ts and t10.ts=t18.ts and t10.ts=t19.ts''' + ) + tdSql.error("select * from t10 where t10.ts=t11.ts") + tdSql.error("select * from where t10.ts=t11.ts") + tdSql.error("select * from t10,t11,t12,t13,t14,t15,t16,t17,t18,t19") + tdSql.error("select * from stb1, stb2, stb3 where stb1.ts=stb2.ts and stb1.ts=stb3.ts") + tdSql.error("select * from stb1, stb2, stb3 where stb1.t11=stb2.t21 and stb1.t11=stb3.t31") + tdSql.error("select * from stb1, stb2, stb3") + tdSql.error( + '''select * from stb1 + join stb2 on stb1.ts=stb2.ts and stb1.t11=stb2.t21 + join stb3 on stb1.ts=stb3.ts and stb1.t11=stb3.t31''' + ) + tdSql.error("select * from t10 join t11 on t10.ts=t11.ts join t12 on t11.ts=t12.ts") + tdSql.query( + '''select * from stb1,stb2,stb3 + where stb1.ts=stb2.ts and stb1.ts=stb3.ts and stb1.t11=stb2.t21 and stb1.t11 =stb3.t31''' + ) + tdSql.checkRows(300) + tdSql.query("select * from t11,t12,t13 where t11.ts=t12.ts and t11.ts=t13.ts") + tdSql.checkRows(100) + tdSql.error("selec * from t11,t12,t13 where t11.ts=t12.ts and t11.ts=t13.ts") + tdSql.error("select * form t11,t12,t13 where t11.ts=t12.ts and t11.ts=t13.ts") + tdSql.error("select * from t11,t12,t13 when t11.ts=t12.ts and t11.ts=t13.ts") + tdSql.error("select * from t11,t12,t13 when t11.ts <> t12.ts and t11.ts=t13.ts") + tdSql.error("select * from t11,t12,t13 when t11.ts != t12.ts and t11.ts=t13.ts") + tdSql.error("select * from t11,t12,t13 when t11.ts=t12.ts or t11.ts=t13.ts") + tdSql.error("select * from t11,t12,t13 when t11.ts=t12.ts=t13.ts") + tdSql.error("select * from t11,t12,t13 when t11.c1=t12.c2 and t11.c1=t13.c3") + tdSql.error("select * from t11,t12,t13 when t11.ts=t12.ts and t11.ts=t13.c3 and t11.c1=t13.ts") + tdSql.error("select ts from t11,t12,t13 when t11.ts=t12.ts and t11.ts=t13.ts") + tdSql.error("select * from t11,t12,t13 when t11.ts=ts and t11.ts=t13.ts") + tdSql.error("select * from t11,t12,t13 when t11.ts=t12.ts and t11.ts=t13.ts and ts>100") + tdSql.error("select * from t11,t12,stb1 when t11.ts=t12.ts and t11.ts=stb1.ts") + tdSql.error("select t14.ts from t11,t12,t13 when t11.ts=t12.ts and t11.ts=t13.ts") + tdSql.error("select * from t11,t12,t13 when t11.ts=t12.ts and t11.ts=t13.ts1") + tdSql.error("select * from t11,t12,t13 when t11.ts=t12.ts and t11.ts=t14.ts") + tdSql.error("select * from t11,t12,t13 when t11.ts=t12.ts") + tdSql.error("select * from t11,t12,t13 when t11.ts=t12.ts and t11.ts=t13.ts and t11.c1=t13.c3") + tdSql.error( + '''select * from t10,t11,t12,t13,t14,t15,t16,t17,t18,t19,t20 + where t10.ts=t11.ts and t10.ts=t12.ts and t10.ts=t13.ts and t10.ts=t14.ts and t10.ts=t15.ts + and t10.ts=t16.ts and t10.ts=t17.ts and t10.ts=t18.ts and t10.ts=t19.ts and t10.ts=t20.ts''' + ) + tdSql.error( + '''select * from t10,t11,t12,t13,t14,t15,t16,t17,t18,t19,t20 + where t10.ts=t11.ts and t10.ts=t12.ts and t10.ts=t13.ts and t10.ts=t14.ts and t10.ts=t15.ts + and t10.ts=t16.ts and t10.ts=t17.ts and t10.ts=t18.ts and t10.ts=t19.ts''' + ) + tdSql.error( + '''select * from t10,t11,t12,t13,t14,t15,t16,t17,t18,t19 + where t10.ts=t11.ts and t10.ts=t12.ts and t10.ts=t13.ts and t10.ts=t14.ts and t10.ts=t15.ts + and t10.ts=t16.ts and t10.ts=t17.ts and t10.ts=t18.ts and t10.ts=t19.ts and t10.c1=t19.c1''' + ) + tdSql.error( + '''select * from stb1,stb2,stb3 + where stb1.ts=stb2.ts and stb1.ts=stb3.ts and stb1.t11=stb2.t21''' + ) + tdSql.error( + '''select * from stb1,stb2,stb3 + where stb1.ts=stb2.ts and stb1.t11=stb2.t21 and stb1.t11=stb3.t31''' + ) + tdSql.error( + '''select * from stb1,stb2,stb3 + where stb1.ts=stb2.ts and stb1.ts=stb3.ts and stb1.t11=stb2.t21 and stb1.t11=stb3.t31 + and stb1.t12=stb3=t32''' + ) + tdSql.error( + '''select * from stb1,stb2,stb3,stb4,stb5,stb6,stb7,stb8,stb9,stb10,stb11 + where stb1.ts=stb2.ts and stb1.ts=stb3.ts and stb1.ts=stb4.ts and stb1.ts=stb5.ts and stb1.ts=stb6.ts + and stb1.ts=stb7.ts and stb1.ts=stb8.ts and stb1.ts=stb9.ts and stb1.ts=stb10.ts and stb1.ts=stb11.ts + and stb1.t11=stb2.t21 and stb1.t11=stb3.t31 and stb1.t11=stb4.t41 and stb1.t11=stb5.t51 + and stb1.t11=stb6.t61 and stb1.t11=stb7.t71 and stb1.t11=stb8.t81 and stb1.t11=stb9.t91 + and stb1.t11=stb10.t101 and stb1.t11=stb11.t111''' + ) + tdSql.error( + '''select * from stb1,stb2,stb3,stb4,stb5,stb6,stb7,stb8,stb9,stb10 + where stb1.ts=stb2.ts and stb1.ts=stb3.ts and stb1.ts=stb4.ts and stb1.ts=stb5.ts and stb1.ts=stb6.ts + and stb1.ts=stb7.ts and stb1.ts=stb8.ts and stb1.ts=stb9.ts and stb1.ts=stb10.ts and stb1.t11=stb2.t21 + and stb1.t11=stb3.t31 and stb1.t11=stb4.t41 and stb1.t11=stb5.t51 and stb1.t11=stb6.t61 + and stb1.t11=stb7.t71 and stb1.t11=stb8.t81 and stb1.t11=stb9.t91 and stb1.t11=stb10.t101 + and stb1.t12=stb11.t102''' + ) + + def run(self): + tdSql.prepare() + + tdLog.printNoPrefix("==========step1:create table") + self.createtable() + + tdLog.printNoPrefix("==========step2:insert data") + self.inserttable() + + tdLog.printNoPrefix("==========step3:query timestamp type") + self.queryjointable() + + # after wal and sync, check again + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + tdDnodes.stop(index) + tdDnodes.start(index) + + tdLog.printNoPrefix("==========step4:query again after wal") + self.queryjointable() + + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/pytest/query/queryStddevWithGroupby.py b/tests/pytest/query/queryStddevWithGroupby.py new file mode 100644 index 0000000000..aee88ca9c5 --- /dev/null +++ b/tests/pytest/query/queryStddevWithGroupby.py @@ -0,0 +1,68 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def querysqls(self): + tdSql.query("select stddev(c1) from t10 group by c1") + tdSql.checkRows(6) + tdSql.checkData(0, 0, 0) + tdSql.checkData(1, 0, 0) + tdSql.checkData(2, 0, 0) + tdSql.checkData(3, 0, 0) + tdSql.checkData(4, 0, 0) + tdSql.checkData(5, 0, 0) + tdSql.query("select stddev(c2) from t10") + tdSql.checkData(0, 0, 0.5) + + def run(self): + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 36500") + tdSql.execute("use db") + + tdLog.printNoPrefix("==========step1:create table && insert data") + tdSql.execute("create stable stb1 (ts timestamp , c1 int ,c2 float) tags(t1 int)") + tdSql.execute("create table t10 using stb1 tags(1)") + tdSql.execute("insert into t10 values ('1969-12-31 00:00:00.000', 2,1)") + tdSql.execute("insert into t10 values ('1970-01-01 00:00:00.000', 3,1)") + tdSql.execute("insert into t10 values (0, 4,1)") + tdSql.execute("insert into t10 values (now-18725d, 1,2)") + tdSql.execute("insert into t10 values ('2021-04-06 00:00:00.000', 5,2)") + tdSql.execute("insert into t10 values (now+1d,6,2)") + + tdLog.printNoPrefix("==========step2:query and check") + self.querysqls() + + tdLog.printNoPrefix("==========step3:after wal,check again") + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + tdDnodes.stop(index) + tdDnodes.start(index) + self.querysqls() + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/pytest/tools/insert-interlace.json b/tests/pytest/tools/insert-interlace.json index d4767ad064..a5c545d159 100644 --- a/tests/pytest/tools/insert-interlace.json +++ b/tests/pytest/tools/insert-interlace.json @@ -41,7 +41,6 @@ "data_source": "rand", "insert_mode": "taosc", "insert_rows": 250, - "multi_thread_write_one_tbl": "no", "interlace_rows": 80, "max_sql_len": 1024000, "disorder_ratio": 0, diff --git a/tests/pytest/tools/insert-tblimit-tboffset-createdb.json b/tests/pytest/tools/insert-tblimit-tboffset-createdb.json index 8c3b39fb0b..9220bc1d17 100644 --- a/tests/pytest/tools/insert-tblimit-tboffset-createdb.json +++ b/tests/pytest/tools/insert-tblimit-tboffset-createdb.json @@ -40,8 +40,6 @@ "data_source": "rand", "insert_mode": "taosc", "insert_rows": 0, - "multi_thread_write_one_tbl": "no", - "number_of_tbl_in_one_sql": 0, "max_sql_len": 1024000, "disorder_ratio": 0, "disorder_range": 1000, diff --git a/tests/pytest/tools/insert-tblimit-tboffset-insertrec.json b/tests/pytest/tools/insert-tblimit-tboffset-insertrec.json index a9efa7c31c..164d4fe8be 100644 --- a/tests/pytest/tools/insert-tblimit-tboffset-insertrec.json +++ b/tests/pytest/tools/insert-tblimit-tboffset-insertrec.json @@ -42,8 +42,6 @@ "insert_rows": 1000, "childtable_limit": 33, "childtable_offset": 33, - "multi_thread_write_one_tbl": "no", - "number_of_tbl_in_one_sql": 0, "max_sql_len": 1024000, "disorder_ratio": 0, "disorder_range": 1000, diff --git a/tests/pytest/tools/insert-tblimit-tboffset.json b/tests/pytest/tools/insert-tblimit-tboffset.json index f3d3e864ba..0b8e0bd6c5 100644 --- a/tests/pytest/tools/insert-tblimit-tboffset.json +++ b/tests/pytest/tools/insert-tblimit-tboffset.json @@ -42,8 +42,6 @@ "insert_rows": 1000, "childtable_limit": 33, "childtable_offset": 33, - "multi_thread_write_one_tbl": "no", - "number_of_tbl_in_one_sql": 0, "max_sql_len": 1024000, "disorder_ratio": 0, "disorder_range": 1000, diff --git a/tests/pytest/tools/insert-tblimit-tboffset0.json b/tests/pytest/tools/insert-tblimit-tboffset0.json index 6bf3783cb2..55d9e19055 100644 --- a/tests/pytest/tools/insert-tblimit-tboffset0.json +++ b/tests/pytest/tools/insert-tblimit-tboffset0.json @@ -42,8 +42,6 @@ "insert_rows": 1000, "childtable_limit": 20, "childtable_offset": 0, - "multi_thread_write_one_tbl": "no", - "number_of_tbl_in_one_sql": 0, "max_sql_len": 1024000, "disorder_ratio": 0, "disorder_range": 1000, diff --git a/tests/pytest/tools/insert-tblimit1-tboffset.json b/tests/pytest/tools/insert-tblimit1-tboffset.json index 292902093d..3a88665661 100644 --- a/tests/pytest/tools/insert-tblimit1-tboffset.json +++ b/tests/pytest/tools/insert-tblimit1-tboffset.json @@ -42,8 +42,6 @@ "insert_rows": 1000, "childtable_limit": 1, "childtable_offset": 50, - "multi_thread_write_one_tbl": "no", - "number_of_tbl_in_one_sql": 0, "max_sql_len": 1024000, "disorder_ratio": 0, "disorder_range": 1000, diff --git a/tests/pytest/tools/taosdemo-sampledata.json b/tests/pytest/tools/taosdemo-sampledata.json index d543b7e086..ce1c68b393 100644 --- a/tests/pytest/tools/taosdemo-sampledata.json +++ b/tests/pytest/tools/taosdemo-sampledata.json @@ -22,8 +22,6 @@ "insert_mode": "taosc", "insert_rate": 0, "insert_rows": 20, - "multi_thread_write_one_tbl": "no", - "number_of_tbl_in_one_sql": 0, "max_sql_len": 1048000, "timestamp_step": 1000, "start_timestamp": "2020-1-1 00:00:00", diff --git a/tests/pytest/tools/taosdemoPerformance.py b/tests/pytest/tools/taosdemoPerformance.py index 1156cc2484..b50180e2b3 100644 --- a/tests/pytest/tools/taosdemoPerformance.py +++ b/tests/pytest/tools/taosdemoPerformance.py @@ -49,9 +49,7 @@ class taosdemoPerformace: "batch_create_tbl_num": 10, "insert_mode": "taosc", "insert_rows": 100000, - "multi_thread_write_one_tbl": "no", - "number_of_tbl_in_one_sql": 0, - "rows_per_tbl": 100, + "interlace_rows": 100, "max_sql_len": 1024000, "disorder_ratio": 0, "disorder_range": 1000, @@ -159,4 +157,4 @@ if __name__ == '__main__': perftest = taosdemoPerformace(args.commit_id, args.database_name) perftest.insertData() - perftest.createTablesAndStoreData() \ No newline at end of file + perftest.createTablesAndStoreData() diff --git a/tests/pytest/topic/topicQuery.py b/tests/pytest/topic/topicQuery.py new file mode 100644 index 0000000000..1ee3c3a427 --- /dev/null +++ b/tests/pytest/topic/topicQuery.py @@ -0,0 +1,91 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + self.ts = 1538548685000 + + def run(self): + tdSql.prepare() + + # test case for https://jira.taosdata.com:18080/browse/TD-3679 + print("==============step1") + tdSql.execute( + "create topic tq_test partitions 10") + tdSql.execute( + "insert into tq_test.p1(off, ts, content) values(0, %d, 'aaaa')" % self.ts) + tdSql.execute( + "insert into tq_test.p1(off, ts, content) values(1, %d, 'aaaa')" % (self.ts + 1)) + tdSql.execute( + "insert into tq_test.p1(off, ts, content) values(2, %d, 'aaaa')" % (self.ts + 2)) + tdSql.execute( + "insert into tq_test.p1(off, ts, content) values(3, %d, 'aaaa')" % (self.ts + 3)) + + print("==============step2") + + tdSql.query("select * from tq_test.p1") + tdSql.checkRows(4) + + tdSql.query("select * from tq_test.p1 where ts >= %d" % self.ts) + tdSql.checkRows(4) + + tdSql.query("select * from tq_test.p1 where ts > %d" % self.ts) + tdSql.checkRows(3) + + tdSql.query("select * from tq_test.p1 where ts = %d" % self.ts) + tdSql.checkRows(1) + + + tdSql.execute("use db") + tdSql.execute("create table test(ts timestamp, start timestamp, value int)") + tdSql.execute("insert into test values(%d, %d, 1)" % (self.ts, self.ts)) + tdSql.execute("insert into test values(%d, %d, 1)" % (self.ts + 1, self.ts + 1)) + tdSql.execute("insert into test values(%d, %d, 1)" % (self.ts + 2, self.ts + 2)) + tdSql.execute("insert into test values(%d, %d, 1)" % (self.ts + 3, self.ts + 3)) + + tdSql.query("select * from test") + tdSql.checkRows(4) + + tdSql.query("select * from test where ts >= %d" % self.ts) + tdSql.checkRows(4) + + tdSql.query("select * from test where ts > %d" % self.ts) + tdSql.checkRows(3) + + tdSql.query("select * from test where ts = %d" % self.ts) + tdSql.checkRows(1) + + tdSql.query("select * from test where start >= %d" % self.ts) + tdSql.checkRows(4) + + tdSql.query("select * from test where start > %d" % self.ts) + tdSql.checkRows(3) + + tdSql.query("select * from test where start = %d" % self.ts) + tdSql.checkRows(1) + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/script/general/parser/groupby.sim b/tests/script/general/parser/groupby.sim index dd7331054c..124e76e85c 100644 --- a/tests/script/general/parser/groupby.sim +++ b/tests/script/general/parser/groupby.sim @@ -220,6 +220,7 @@ sql_error select sum(c3), ts, c2 from group_tb0 where c1 < 20 group by c1; sql_error select sum(c3), first(ts), c2 from group_tb0 where c1 < 20 group by c1; sql_error select first(c3), ts, c1, c2 from group_tb0 where c1 < 20 group by c1; sql_error select first(c3), last(c3), ts, c1 from group_tb0 where c1 < 20 group by c1; +sql_error select ts from group_tb0 group by c1; #===========================interval=====not support====================== sql_error select count(*), c1 from group_tb0 where c1<20 interval(1y) group by c1; diff --git a/tests/script/general/parser/having.sim b/tests/script/general/parser/having.sim new file mode 100644 index 0000000000..2e95664d31 --- /dev/null +++ b/tests/script/general/parser/having.sim @@ -0,0 +1,1841 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/cfg.sh -n dnode1 -c walLevel -v 0 +system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 2 +system sh/exec.sh -n dnode1 -s start + +sleep 100 +sql connect +print ======================== dnode1 start + +$db = testdb + +sql create database $db +sql use $db + +sql create stable st2 (ts timestamp, f1 int, f2 float, f3 double, f4 bigint, f5 smallint, f6 tinyint, f7 bool, f8 binary(10), f9 nchar(10)) tags (id1 int, id2 float, id3 nchar(10), id4 double, id5 smallint, id6 bigint, id7 binary(10)) + +sql create table tb1 using st2 tags (1,1.0,"1",1.0,1,1,"1"); +sql create table tb2 using st2 tags (2,2.0,"2",2.0,2,2,"2"); +sql create table tb3 using st2 tags (3,3.0,"3",3.0,3,3,"3"); +sql create table tb4 using st2 tags (4,4.0,"4",4.0,4,4,"4"); + +sql insert into tb1 values (now-200s,1,1.0,1.0,1,1,1,true ,"1","1") +sql insert into tb1 values (now-150s,1,1.0,1.0,1,1,1,false,"1","1") +sql insert into tb1 values (now-100s,2,2.0,2.0,2,2,2,true ,"2","2") +sql insert into tb1 values (now-50s ,2,2.0,2.0,2,2,2,false,"2","2") +sql insert into tb1 values (now ,3,3.0,3.0,3,3,3,true ,"3","3") +sql insert into tb1 values (now+50s ,3,3.0,3.0,3,3,3,false,"3","3") +sql insert into tb1 values (now+100s,4,4.0,4.0,4,4,4,true ,"4","4") +sql insert into tb1 values (now+150s,4,4.0,4.0,4,4,4,false,"4","4") + + +sql select count(*),f1 from st2 group by f1 having count(f1) > 0; +if $rows != 4 then + return -1 +endi +if $data00 != 2 then + return -1 +endi +if $data01 != 1 then + return -1 +endi +if $data10 != 2 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data20 != 2 then + return -1 +endi +if $data21 != 3 then + return -1 +endi +if $data30 != 2 then + return -1 +endi +if $data31 != 4 then + return -1 +endi + + + + +sql select count(*),f1 from st2 group by f1 having count(*) > 0; +if $rows != 4 then + return -1 +endi +if $data00 != 2 then + return -1 +endi +if $data01 != 1 then + return -1 +endi +if $data10 != 2 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data20 != 2 then + return -1 +endi +if $data21 != 3 then + return -1 +endi +if $data30 != 2 then + return -1 +endi +if $data31 != 4 then + return -1 +endi + + +sql select count(*),f1 from st2 group by f1 having count(f2) > 0; +if $rows != 4 then + return -1 +endi +if $data00 != 2 then + return -1 +endi +if $data01 != 1 then + return -1 +endi +if $data10 != 2 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data20 != 2 then + return -1 +endi +if $data21 != 3 then + return -1 +endi +if $data30 != 2 then + return -1 +endi +if $data31 != 4 then + return -1 +endi + +sql_error select top(f1,2) from st2 group by f1 having count(f2) > 0; + +sql select last(f1) from st2 group by f1 having count(f2) > 0; +if $rows != 4 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data10 != 2 then + return -1 +endi +if $data20 != 3 then + return -1 +endi +if $data30 != 4 then + return -1 +endi + +sql_error select top(f1,2) from st2 group by f1 having count(f2) > 0; + +sql_error select top(f1,2) from st2 group by f1 having count(f2) > 0; + +sql_error select top(f1,2) from st2 group by f1 having avg(f1) > 0; + +sql select avg(f1),count(f1) from st2 group by f1 having avg(f1) > 2; +if $rows != 2 then + return -1 +endi +if $data00 != 3.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data10 != 4.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi + + +sql select avg(f1),count(f1) from st2 group by f1 having avg(f1) > 2 and sum(f1) > 0; +if $rows != 2 then + return -1 +endi +if $data00 != 3.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data10 != 4.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi + +sql select avg(f1),count(f1),sum(f1) from st2 group by f1 having avg(f1) > 2 and sum(f1) > 0; +if $rows != 2 then + return -1 +endi +if $data00 != 3.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 6 then + return -1 +endi +if $data10 != 4.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 8 then + return -1 +endi + +sql select avg(f1),count(f1),sum(f1) from st2 group by f1 having avg(f1) > 2; +if $rows != 2 then + return -1 +endi +if $data00 != 3.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 6 then + return -1 +endi +if $data10 != 4.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 8 then + return -1 +endi + +sql select avg(f1),count(f1),sum(f1) from st2 group by f1 having sum(f1) > 0; +if $rows != 4 then + return -1 +endi +if $data00 != 1.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 2 then + return -1 +endi +if $data10 != 2.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 4 then + return -1 +endi +if $data20 != 3.000000000 then + return -1 +endi +if $data21 != 2 then + return -1 +endi +if $data22 != 6 then + return -1 +endi +if $data30 != 4.000000000 then + return -1 +endi +if $data31 != 2 then + return -1 +endi +if $data32 != 8 then + return -1 +endi + +sql select avg(f1),count(f1),sum(f1) from st2 group by f1 having sum(f1) > 2 and sum(f1) < 6; +if $rows != 1 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 4 then + return -1 +endi + + +sql select avg(f1),count(f1),sum(f1) from st2 group by f1 having 1 <= sum(f1) and 5 >= sum(f1); +if $rows != 2 then + return -1 +endi +if $data00 != 1.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 2 then + return -1 +endi +if $data10 != 2.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 4 then + return -1 +endi + +sql select avg(f1),count(f1),sum(f1),twa(f1) from st2 group by tbname having twa(f1) > 0; +if $rows != 1 then + return -1 +endi +if $data00 != 2.500000000 then + return -1 +endi +if $data01 != 8 then + return -1 +endi +if $data02 != 20 then + return -1 +endi +if $data04 != tb1 then + return -1 +endi + +sql_error select avg(f1),count(f1),sum(f1),twa(f1) from st2 group by f1 having twa(f1) > 0; + +sql select avg(f1),count(f1),sum(f1),twa(f1) from st2 group by tbname having sum(f1) > 0; +if $rows != 1 then + return -1 +endi +if $data00 != 2.500000000 then + return -1 +endi +if $data01 != 8 then + return -1 +endi +if $data02 != 20 then + return -1 +endi +if $data04 != tb1 then + return -1 +endi + +sql_error select avg(f1),count(f1),sum(f1),twa(f1) from st2 group by f1 having sum(f1) > 0; + +sql select avg(f1),count(f1),sum(f1) from st2 group by f1 having sum(f1) > 0; +if $rows != 4 then + return -1 +endi +if $data00 != 1.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 2 then + return -1 +endi +if $data10 != 2.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 4 then + return -1 +endi +if $data20 != 3.000000000 then + return -1 +endi +if $data21 != 2 then + return -1 +endi +if $data22 != 6 then + return -1 +endi +if $data30 != 4.000000000 then + return -1 +endi +if $data31 != 2 then + return -1 +endi +if $data32 != 8 then + return -1 +endi + +sql select avg(f1),count(f1),sum(f1) from st2 group by f1 having sum(f1) > 3; +if $rows != 3 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 4 then + return -1 +endi +if $data10 != 3.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 6 then + return -1 +endi +if $data20 != 4.000000000 then + return -1 +endi +if $data21 != 2 then + return -1 +endi +if $data22 != 8 then + return -1 +endi + +###########and issue +sql select avg(f1),count(f1),sum(f1) from st2 group by f1 having sum(f1) > 3 and sum(f1) > 1; +if $rows != 4 then + return -1 +endi +if $data00 != 1.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 2 then + return -1 +endi +if $data10 != 2.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 4 then + return -1 +endi +if $data20 != 3.000000000 then + return -1 +endi +if $data21 != 2 then + return -1 +endi +if $data22 != 6 then + return -1 +endi +if $data30 != 4.000000000 then + return -1 +endi +if $data31 != 2 then + return -1 +endi +if $data32 != 8 then + return -1 +endi + + +sql select avg(f1),count(f1),sum(f1) from st2 group by f1 having sum(f1) > 3 or sum(f1) > 1; +if $rows != 4 then + return -1 +endi +if $data00 != 1.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 2 then + return -1 +endi +if $data10 != 2.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 4 then + return -1 +endi +if $data20 != 3.000000000 then + return -1 +endi +if $data21 != 2 then + return -1 +endi +if $data22 != 6 then + return -1 +endi +if $data30 != 4.000000000 then + return -1 +endi +if $data31 != 2 then + return -1 +endi +if $data32 != 8 then + return -1 +endi + +sql select avg(f1),count(f1),sum(f1) from st2 group by f1 having sum(f1) > 3 or sum(f1) > 4; +if $rows != 3 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 4 then + return -1 +endi +if $data10 != 3.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 6 then + return -1 +endi +if $data20 != 4.000000000 then + return -1 +endi +if $data21 != 2 then + return -1 +endi +if $data22 != 8 then + return -1 +endi + +############or issue +sql select avg(f1),count(f1),sum(f1) from st2 group by f1 having sum(f1) > 3 or avg(f1) > 4; +if $rows != 0 then + return -1 +endi + +sql select avg(f1),count(f1),sum(f1) from st2 group by f1 having (sum(f1) > 3); +if $rows != 3 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 4 then + return -1 +endi +if $data10 != 3.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 6 then + return -1 +endi +if $data20 != 4.000000000 then + return -1 +endi +if $data21 != 2 then + return -1 +endi +if $data22 != 8 then + return -1 +endi + +sql_error select avg(f1),count(f1),sum(f1) from st2 group by f1 having (sum(*) > 3); + +sql select avg(f1),count(f1),sum(f1) from st2 group by f1 having (sum(st2.f1) > 3); +if $rows != 3 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 4 then + return -1 +endi +if $data10 != 3.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 6 then + return -1 +endi +if $data20 != 4.000000000 then + return -1 +endi +if $data21 != 2 then + return -1 +endi +if $data22 != 8 then + return -1 +endi + +sql select avg(f1),count(st2.*),sum(f1) from st2 group by f1 having (sum(st2.f1) > 3); +if $rows != 3 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 4 then + return -1 +endi +if $data10 != 3.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 6 then + return -1 +endi +if $data20 != 4.000000000 then + return -1 +endi +if $data21 != 2 then + return -1 +endi +if $data22 != 8 then + return -1 +endi + +sql select avg(f1),count(st2.*),sum(f1),stddev(f1),stddev(f1) from st2 group by f1; +if $rows != 4 then + return -1 +endi +if $data00 != 1.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 2 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data04 != 0.000000000 then + return -1 +endi +if $data10 != 2.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 4 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi +if $data14 != 0.000000000 then + return -1 +endi +if $data20 != 3.000000000 then + return -1 +endi +if $data21 != 2 then + return -1 +endi +if $data22 != 6 then + return -1 +endi +if $data23 != 0.000000000 then + return -1 +endi +if $data24 != 0.000000000 then + return -1 +endi +if $data30 != 4.000000000 then + return -1 +endi +if $data31 != 2 then + return -1 +endi +if $data32 != 8 then + return -1 +endi +if $data33 != 0.000000000 then + return -1 +endi +if $data34 != 0.000000000 then + return -1 +endi + +sql select avg(f1),count(st2.*),sum(f1),stddev(f1) from st2 group by f1 having (stddev(st2.f1) > 3); +if $rows != 0 then + return -1 +endi + +sql select avg(f1),count(st2.*),sum(f1),stddev(f1) from st2 group by f1 having (stddev(st2.f1) < 1); +if $rows != 4 then + return -1 +endi +if $data00 != 1.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 2 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data10 != 2.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 4 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi +if $data20 != 3.000000000 then + return -1 +endi +if $data21 != 2 then + return -1 +endi +if $data22 != 6 then + return -1 +endi +if $data23 != 0.000000000 then + return -1 +endi +if $data30 != 4.000000000 then + return -1 +endi +if $data31 != 2 then + return -1 +endi +if $data32 != 8 then + return -1 +endi +if $data33 != 0.000000000 then + return -1 +endi + + +sql_error select avg(f1),count(st2.*),sum(f1),stddev(f1) from st2 group by f1 having (LEASTSQUARES(f1) < 1); + +sql_error select avg(f1),count(st2.*),sum(f1),stddev(f1) from st2 group by f1 having LEASTSQUARES(f1) < 1; + +sql_error select avg(f1),count(st2.*),sum(f1),stddev(f1) from st2 group by f1 having LEASTSQUARES(f1,1,1) < 1; + +sql_error select avg(f1),count(st2.*),sum(f1),stddev(f1) from st2 group by f1 having LEASTSQUARES(f1,1,1) > 2; + +sql_error select avg(f1),count(st2.*),sum(f1),stddev(f1),LEASTSQUARES(f1,1,1) from st2 group by f1 having LEASTSQUARES(f1,1,1) > 2; + +sql_error select avg(f1),count(st2.*),sum(f1),stddev(f1),LEASTSQUARES(f1,1,1) from st2 group by f1 having sum(f1) > 2; + +sql select avg(f1),count(st2.*),sum(f1),stddev(f1) from st2 group by f1 having min(f1) > 2; +if $rows != 2 then + return -1 +endi +if $data00 != 3.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 6 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data10 != 4.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 8 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi + +sql select avg(f1),count(st2.*),sum(f1),stddev(f1),min(f1) from st2 group by f1 having min(f1) > 2; +if $rows != 2 then + return -1 +endi +if $data00 != 3.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 6 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data04 != 3 then + return -1 +endi +if $data10 != 4.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 8 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi +if $data14 != 4 then + return -1 +endi + +sql select avg(f1),count(st2.*),sum(f1),stddev(f1),min(f1) from st2 group by f1 having max(f1) > 2; +if $rows != 2 then + return -1 +endi +if $data00 != 3.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 6 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data04 != 3 then + return -1 +endi +if $data10 != 4.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 8 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi +if $data14 != 4 then + return -1 +endi + +sql select avg(f1),count(st2.*),sum(f1),stddev(f1),min(f1),max(f1) from st2 group by f1 having max(f1) != 2; +if $rows != 3 then + return -1 +endi +if $data00 != 1.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 2 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data04 != 1 then + return -1 +endi +if $data05 != 1 then + return -1 +endi +if $data10 != 3.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 6 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi +if $data14 != 3 then + return -1 +endi +if $data15 != 3 then + return -1 +endi +if $data20 != 4.000000000 then + return -1 +endi +if $data21 != 2 then + return -1 +endi +if $data22 != 8 then + return -1 +endi +if $data23 != 0.000000000 then + return -1 +endi +if $data24 != 4 then + return -1 +endi +if $data25 != 4 then + return -1 +endi + +sql select avg(f1),count(st2.*),sum(f1),stddev(f1),min(f1),max(f1) from st2 group by f1 having first(f1) != 2; +if $rows != 3 then + return -1 +endi +if $data00 != 1.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 2 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data04 != 1 then + return -1 +endi +if $data05 != 1 then + return -1 +endi +if $data10 != 3.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 6 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi +if $data14 != 3 then + return -1 +endi +if $data15 != 3 then + return -1 +endi +if $data20 != 4.000000000 then + return -1 +endi +if $data21 != 2 then + return -1 +endi +if $data22 != 8 then + return -1 +endi +if $data23 != 0.000000000 then + return -1 +endi +if $data24 != 4 then + return -1 +endi +if $data25 != 4 then + return -1 +endi + + + +sql select avg(f1),count(st2.*),sum(f1),stddev(f1),min(f1),max(f1),first(f1) from st2 group by f1 having first(f1) != 2; +if $rows != 3 then + return -1 +endi +if $data00 != 1.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 2 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data04 != 1 then + return -1 +endi +if $data05 != 1 then + return -1 +endi +if $data06 != 1 then + return -1 +endi +if $data10 != 3.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 6 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi +if $data14 != 3 then + return -1 +endi +if $data15 != 3 then + return -1 +endi +if $data16 != 3 then + return -1 +endi +if $data20 != 4.000000000 then + return -1 +endi +if $data21 != 2 then + return -1 +endi +if $data22 != 8 then + return -1 +endi +if $data23 != 0.000000000 then + return -1 +endi +if $data24 != 4 then + return -1 +endi +if $data25 != 4 then + return -1 +endi +if $data26 != 4 then + return -1 +endi + + + +sql_error select avg(f1),count(st2.*),sum(f1),stddev(f1),min(f1),max(f1),first(f1),last(f1) from st2 group by f1 having top(f1,1); + +sql_error select avg(f1),count(st2.*),sum(f1),stddev(f1),min(f1),max(f1),first(f1),last(f1) from st2 group by f1 having top(f1,1) > 1; + +sql_error select avg(f1),count(st2.*),sum(f1),stddev(f1),min(f1),max(f1),first(f1),last(f1) from st2 group by f1 having bottom(f1,1) > 1; + +sql_error select avg(f1),count(st2.*),sum(f1),stddev(f1),min(f1),max(f1),first(f1),last(f1),top(f1,1),bottom(f1,1) from st2 group by f1 having bottom(f1,1) > 1; + +sql_error select avg(f1),count(st2.*),sum(f1),stddev(f1),min(f1),max(f1),first(f1),last(f1),top(f1,1),bottom(f1,1) from st2 group by f1 having sum(f1) > 1; + +sql_error select PERCENTILE(f1) from st2 group by f1 having sum(f1) > 1; + +sql_error select PERCENTILE(f1,20) from st2 group by f1 having sum(f1) > 1; + +sql select aPERCENTILE(f1,20) from st2 group by f1 having sum(f1) > 1; +if $rows != 4 then + return -1 +endi +if $data00 != 1.000000000 then + return -1 +endi +if $data10 != 2.000000000 then + return -1 +endi +if $data20 != 3.000000000 then + return -1 +endi +if $data30 != 4.000000000 then + return -1 +endi + +sql select aPERCENTILE(f1,20) from st2 group by f1 having apercentile(f1,1) > 1; +if $rows != 3 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi +if $data10 != 3.000000000 then + return -1 +endi +if $data20 != 4.000000000 then + return -1 +endi + +sql select aPERCENTILE(f1,20) from st2 group by f1 having apercentile(f1,1) > 1 and apercentile(f1,1) < 50; +if $rows != 3 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi +if $data10 != 3.000000000 then + return -1 +endi +if $data20 != 4.000000000 then + return -1 +endi + +sql select aPERCENTILE(f1,20) from st2 group by f1 having apercentile(f1,1) > 1 and apercentile(f1,1) < 3; +if $rows != 1 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi + +sql select aPERCENTILE(f1,20) from st2 group by f1 having apercentile(f1,1) > 1 and apercentile(f1,3) < 3; +if $rows != 1 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi + +sql_error select aPERCENTILE(f1,20) from st2 group by f1 having apercentile(1) > 1; + +sql_error select aPERCENTILE(f1,20),LAST_ROW(f1) from st2 group by f1 having apercentile(1) > 1; + +sql_error select aPERCENTILE(f1,20),LAST_ROW(f1) from st2 group by f1 having apercentile(f1,1) > 1; + +sql_error select sum(f1) from st2 group by f1 having last_row(f1) > 1; + +sql_error select avg(f1) from st2 group by f1 having diff(f1) > 0; + +sql_error select avg(f1),diff(f1) from st2 group by f1 having avg(f1) > 0; + +sql_error select avg(f1),diff(f1) from st2 group by f1 having spread(f2) > 0; + +sql select avg(f1) from st2 group by f1 having spread(f2) > 0; +if $rows != 0 then + return -1 +endi + +sql select avg(f1) from st2 group by f1 having spread(f2) = 0; +if $rows != 4 then + return -1 +endi +if $data00 != 1.000000000 then + return -1 +endi +if $data10 != 2.000000000 then + return -1 +endi +if $data20 != 3.000000000 then + return -1 +endi +if $data30 != 4.000000000 then + return -1 +endi + +sql select avg(f1),spread(f2) from st2 group by f1; +if $rows != 4 then + return -1 +endi +if $data00 != 1.000000000 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data10 != 2.000000000 then + return -1 +endi +if $data11 != 0.000000000 then + return -1 +endi +if $data20 != 3.000000000 then + return -1 +endi +if $data21 != 0.000000000 then + return -1 +endi +if $data30 != 4.000000000 then + return -1 +endi +if $data31 != 0.000000000 then + return -1 +endi + +sql select avg(f1),spread(f1,f2,st2.f1) from st2 group by f1 having spread(f1) = 0; +if $rows != 4 then + return -1 +endi +if $data00 != 1.000000000 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data10 != 2.000000000 then + return -1 +endi +if $data11 != 0.000000000 then + return -1 +endi +if $data12 != 0.000000000 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi +if $data20 != 3.000000000 then + return -1 +endi +if $data21 != 0.000000000 then + return -1 +endi +if $data22 != 0.000000000 then + return -1 +endi +if $data23 != 0.000000000 then + return -1 +endi +if $data30 != 4.000000000 then + return -1 +endi +if $data31 != 0.000000000 then + return -1 +endi +if $data32 != 0.000000000 then + return -1 +endi +if $data33 != 0.000000000 then + return -1 +endi + +sql select avg(f1),spread(f1,f2,st2.f1) from st2 group by f1 having spread(f1) != 0; +if $rows != 0 then + return -1 +endi + + +sql_error select avg(f1),spread(f1,f2,st2.f1) from st2 group by f1 having spread(f1) + 1 > 0; + +sql_error select avg(f1),spread(f1,f2,st2.f1) from st2 group by f1 having spread(f1) + 1; + +sql_error select avg(f1),spread(f1,f2,st2.f1) from st2 group by f1 having spread(f1) + sum(f1); + +sql_error select avg(f1),spread(f1,f2,st2.f1) from st2 group by f1 having spread(f1) + sum(f1) > 0; + +sql_error select avg(f1),spread(f1,f2,st2.f1) from st2 group by f1 having spread(f1) - sum(f1) > 0; + +sql_error select avg(f1),spread(f1,f2,st2.f1) from st2 group by f1 having spread(f1) * sum(f1) > 0; + +sql_error select avg(f1),spread(f1,f2,st2.f1) from st2 group by f1 having spread(f1) / sum(f1) > 0; + +sql_error select avg(f1),spread(f1,f2,st2.f1) from st2 group by f1 having spread(f1) > sum(f1); + +sql_error select avg(f1),spread(f1,f2,st2.f1) from st2 group by f1 having spread(f1) and sum(f1); + +sql_error select avg(f1),spread(f1,f2,st2.f1) from st2 group by f1 having spread(f1) 0 and sum(f1); + +sql_error select avg(f1),spread(f1,f2,st2.f1) from st2 group by f1 having spread(f1) + 0 and sum(f1); + +sql_error select avg(f1),spread(f1,f2,st2.f1) from st2 group by f1 having spread(f1) - f1 and sum(f1); + +sql_error select avg(f1),spread(f1,f2,st2.f1) from st2 group by f1 having spread(f1) - id1 and sum(f1); + +sql_error select avg(f1),spread(f1,f2,st2.f1) from st2 group by f1 having spread(f1) > id1 and sum(f1); + +sql_error select avg(f1),spread(f1,f2,st2.f1) from st2 group by f1 having spread(f1) > id1 and sum(f1) > 1; + +sql select avg(f1),spread(f1,f2,st2.f1) from st2 group by f1 having spread(f1) > 2 and sum(f1) > 1; +if $rows != 0 then + return -1 +endi + +sql select avg(f1),spread(f1,f2,st2.f1) from st2 group by f1 having spread(f1) = 0 and sum(f1) > 1; +if $rows != 4 then + return -1 +endi +if $data00 != 1.000000000 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data10 != 2.000000000 then + return -1 +endi +if $data11 != 0.000000000 then + return -1 +endi +if $data12 != 0.000000000 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi +if $data20 != 3.000000000 then + return -1 +endi +if $data21 != 0.000000000 then + return -1 +endi +if $data22 != 0.000000000 then + return -1 +endi +if $data23 != 0.000000000 then + return -1 +endi +if $data30 != 4.000000000 then + return -1 +endi +if $data31 != 0.000000000 then + return -1 +endi +if $data32 != 0.000000000 then + return -1 +endi +if $data33 != 0.000000000 then + return -1 +endi + +sql select avg(f1),spread(f1,f2,st2.f1) from st2 group by f1 having spread(f1) = 0 and avg(f1) > 1; +if $rows != 3 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data10 != 3.000000000 then + return -1 +endi +if $data11 != 0.000000000 then + return -1 +endi +if $data12 != 0.000000000 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi +if $data20 != 4.000000000 then + return -1 +endi +if $data21 != 0.000000000 then + return -1 +endi +if $data22 != 0.000000000 then + return -1 +endi +if $data23 != 0.000000000 then + return -1 +endi + +sql_error select avg(f1),spread(f1,f2,st2.f1) from st2 group by c1 having spread(f1) = 0 and avg(f1) > 1; + +sql_error select avg(f1),spread(f1,f2,st2.f1) from st2 group by id1 having avg(id1) > 0; + +sql_error select avg(f1),spread(f1,f2,st2.f1) from st2 group by id1 having avg(f1) > id1; + +sql_error select avg(f1),spread(f1,f2,st2.f1),avg(id1) from st2 group by id1 having avg(f1) > id1; + +sql select avg(f1),spread(f1,f2,st2.f1) from st2 group by id1 having avg(f1) > 0; +if $rows != 1 then + return -1 +endi +if $data00 != 2.500000000 then + return -1 +endi +if $data01 != 3.000000000 then + return -1 +endi +if $data02 != 3.000000000 then + return -1 +endi +if $data03 != 3.000000000 then + return -1 +endi +if $data04 != 1 then + return -1 +endi + +sql select avg(f1),spread(f1,f2,st2.f1) from st2 group by id1 having avg(f1) < 2; +if $rows != 0 then + return -1 +endi + +sql select avg(f1),spread(f1,f2,st2.f1) from st2 where f1 > 0 group by f1 having avg(f1) > 0; +if $rows != 4 then + return -1 +endi +if $data00 != 1.000000000 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data10 != 2.000000000 then + return -1 +endi +if $data11 != 0.000000000 then + return -1 +endi +if $data12 != 0.000000000 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi +if $data20 != 3.000000000 then + return -1 +endi +if $data21 != 0.000000000 then + return -1 +endi +if $data22 != 0.000000000 then + return -1 +endi +if $data23 != 0.000000000 then + return -1 +endi +if $data30 != 4.000000000 then + return -1 +endi +if $data31 != 0.000000000 then + return -1 +endi +if $data32 != 0.000000000 then + return -1 +endi +if $data33 != 0.000000000 then + return -1 +endi + +sql select avg(f1),spread(f1,f2,st2.f1) from st2 where f1 > 2 group by f1 having avg(f1) > 0; +if $rows != 2 then + return -1 +endi +if $data00 != 3.000000000 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data10 != 4.000000000 then + return -1 +endi +if $data11 != 0.000000000 then + return -1 +endi +if $data12 != 0.000000000 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi + +sql select avg(f1),spread(f1,f2,st2.f1) from st2 where f2 > 2 group by f1 having avg(f1) > 0; +if $rows != 2 then + return -1 +endi +if $data00 != 3.000000000 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data10 != 4.000000000 then + return -1 +endi +if $data11 != 0.000000000 then + return -1 +endi +if $data12 != 0.000000000 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi + +sql select avg(f1),spread(f1,f2,st2.f1) from st2 where f3 > 2 group by f1 having avg(f1) > 0; +if $rows != 2 then + return -1 +endi +if $data00 != 3.000000000 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data10 != 4.000000000 then + return -1 +endi +if $data11 != 0.000000000 then + return -1 +endi +if $data12 != 0.000000000 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi + +sql select avg(f1),spread(f1,f2,st2.f1) from st2 where f2 > 3 group by f1 having avg(f1) > 0; +if $rows != 1 then + return -1 +endi +if $data00 != 4.000000000 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi + +sql_error select avg(f1),spread(f1,f2,st2.f1) from st2 where f2 > 3 group by f1 having avg(ts) > 0; + +sql_error select avg(f1),spread(f1,f2,st2.f1) from st2 where f2 > 3 group by f1 having avg(f7) > 0; + +sql_error select avg(f1),spread(f1,f2,st2.f1) from st2 where f2 > 3 group by f1 having avg(f8) > 0; + +sql_error select avg(f1),spread(f1,f2,st2.f1) from st2 where f2 > 3 group by f1 having avg(f9) > 0; + +sql select avg(f1),spread(f1,f2,st2.f1) from st2 where f2 > 3 group by f1 having count(f9) > 0; +if $rows != 1 then + return -1 +endi +if $data00 != 4.000000000 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi + +sql_error select avg(f1),spread(f1,f2,st2.f1) from st2 where f2 > 3 group by f1 having last(f9) > 0; + +sql select avg(f1),spread(f1,f2,st2.f1) from st2 where f2 > 3 group by f1 having last(f2) > 0; +if $rows != 1 then + return -1 +endi +if $data00 != 4.000000000 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi + +sql select avg(f1),spread(f1,f2,st2.f1) from st2 where f2 > 3 group by f1 having last(f3) > 0; +if $rows != 1 then + return -1 +endi +if $data00 != 4.000000000 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi + +sql select avg(f1),spread(f1,f2,st2.f1) from st2 where f2 > 1 group by f1 having last(f3) > 0; +if $rows != 3 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data10 != 3.000000000 then + return -1 +endi +if $data11 != 0.000000000 then + return -1 +endi +if $data12 != 0.000000000 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi +if $data20 != 4.000000000 then + return -1 +endi +if $data21 != 0.000000000 then + return -1 +endi +if $data22 != 0.000000000 then + return -1 +endi +if $data23 != 0.000000000 then + return -1 +endi + +sql select avg(f1),spread(f1,f2,st2.f1) from st2 where f2 > 1 group by f1 having last(f4) > 0; +if $rows != 3 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data10 != 3.000000000 then + return -1 +endi +if $data11 != 0.000000000 then + return -1 +endi +if $data12 != 0.000000000 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi +if $data20 != 4.000000000 then + return -1 +endi +if $data21 != 0.000000000 then + return -1 +endi +if $data22 != 0.000000000 then + return -1 +endi +if $data23 != 0.000000000 then + return -1 +endi + +sql select avg(f1),spread(f1,f2,st2.f1) from st2 where f2 > 1 group by f1 having last(f5) > 0; +if $rows != 3 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data10 != 3.000000000 then + return -1 +endi +if $data11 != 0.000000000 then + return -1 +endi +if $data12 != 0.000000000 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi +if $data20 != 4.000000000 then + return -1 +endi +if $data21 != 0.000000000 then + return -1 +endi +if $data22 != 0.000000000 then + return -1 +endi +if $data23 != 0.000000000 then + return -1 +endi + +sql select avg(f1),spread(f1,f2,st2.f1) from st2 where f2 > 1 group by f1 having last(f6) > 0; +if $rows != 3 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data10 != 3.000000000 then + return -1 +endi +if $data11 != 0.000000000 then + return -1 +endi +if $data12 != 0.000000000 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi +if $data20 != 4.000000000 then + return -1 +endi +if $data21 != 0.000000000 then + return -1 +endi +if $data22 != 0.000000000 then + return -1 +endi +if $data23 != 0.000000000 then + return -1 +endi + + +sql_error select avg(f1),spread(f1,f2,st2.f1),f1,f2 from st2 where f2 > 1 group by f1 having last(f6) > 0; + +sql_error select avg(f1),spread(f1,f2,st2.f1),f1,f6 from st2 where f2 > 1 group by f1 having last(f6) > 0; + +sql_error select avg(f1),spread(f1,f2,st2.f1),f1,f6 from st2 where f2 > 1 group by f1,f2 having last(f6) > 0; + +sql_error select avg(f1),spread(f1,f2,st2.f1),f1,f6 from st2 where f2 > 1 group by f1,id1 having last(f6) > 0; + +sql_error select avg(f1),spread(f1,f2,st2.f1),f1,f6 from st2 where f2 > 1 group by id1 having last(f6) > 0; + +sql select avg(f1),spread(f1,f2,st2.f1) from st2 where f2 > 1 group by id1 having last(f6) > 0; +if $rows != 1 then + return -1 +endi +if $data00 != 3.000000000 then + return -1 +endi +if $data01 != 2.000000000 then + return -1 +endi +if $data02 != 2.000000000 then + return -1 +endi +if $data03 != 2.000000000 then + return -1 +endi +if $data04 != 1 then + return -1 +endi + +sql_error select top(f1,2) from tb1 group by f1 having count(f1) > 0; + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/general/parser/having_child.sim b/tests/script/general/parser/having_child.sim new file mode 100644 index 0000000000..1f29a63b91 --- /dev/null +++ b/tests/script/general/parser/having_child.sim @@ -0,0 +1,1891 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/cfg.sh -n dnode1 -c walLevel -v 0 +system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 2 +system sh/exec.sh -n dnode1 -s start + +sleep 100 +sql connect +print ======================== dnode1 start + +$db = testdb + +sql create database $db +sql use $db + +sql create stable st2 (ts timestamp, f1 int, f2 float, f3 double, f4 bigint, f5 smallint, f6 tinyint, f7 bool, f8 binary(10), f9 nchar(10)) tags (id1 int, id2 float, id3 nchar(10), id4 double, id5 smallint, id6 bigint, id7 binary(10)) + +sql create table tb1 using st2 tags (1,1.0,"1",1.0,1,1,"1"); +sql create table tb2 using st2 tags (2,2.0,"2",2.0,2,2,"2"); +sql create table tb3 using st2 tags (3,3.0,"3",3.0,3,3,"3"); +sql create table tb4 using st2 tags (4,4.0,"4",4.0,4,4,"4"); + +sql insert into tb1 values (now-200s,1,1.0,1.0,1,1,1,true ,"1","1") +sql insert into tb1 values (now-150s,1,1.0,1.0,1,1,1,false,"1","1") +sql insert into tb1 values (now-100s,2,2.0,2.0,2,2,2,true ,"2","2") +sql insert into tb1 values (now-50s ,2,2.0,2.0,2,2,2,false,"2","2") +sql insert into tb1 values (now ,3,3.0,3.0,3,3,3,true ,"3","3") +sql insert into tb1 values (now+50s ,3,3.0,3.0,3,3,3,false,"3","3") +sql insert into tb1 values (now+100s,4,4.0,4.0,4,4,4,true ,"4","4") +sql insert into tb1 values (now+150s,4,4.0,4.0,4,4,4,false,"4","4") + + +sql select count(*),f1 from tb1 group by f1 having count(f1) > 0; +if $rows != 4 then + return -1 +endi +if $data00 != 2 then + return -1 +endi +if $data01 != 1 then + return -1 +endi +if $data10 != 2 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data20 != 2 then + return -1 +endi +if $data21 != 3 then + return -1 +endi +if $data30 != 2 then + return -1 +endi +if $data31 != 4 then + return -1 +endi + + +sql select count(*),f1 from tb1 group by f1 having count(*) > 0; +if $rows != 4 then + return -1 +endi +if $data00 != 2 then + return -1 +endi +if $data01 != 1 then + return -1 +endi +if $data10 != 2 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data20 != 2 then + return -1 +endi +if $data21 != 3 then + return -1 +endi +if $data30 != 2 then + return -1 +endi +if $data31 != 4 then + return -1 +endi + + +sql select count(*),f1 from tb1 group by f1 having count(f2) > 0; +if $rows != 4 then + return -1 +endi +if $data00 != 2 then + return -1 +endi +if $data01 != 1 then + return -1 +endi +if $data10 != 2 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data20 != 2 then + return -1 +endi +if $data21 != 3 then + return -1 +endi +if $data30 != 2 then + return -1 +endi +if $data31 != 4 then + return -1 +endi + +sql_error select top(f1,2) from tb1 group by f1 having count(f2) > 0; + +sql select last(f1) from tb1 group by f1 having count(f2) > 0; +if $rows != 4 then + return -1 +endi +if $data00 != 1 then + return -1 +endi +if $data10 != 2 then + return -1 +endi +if $data20 != 3 then + return -1 +endi +if $data30 != 4 then + return -1 +endi + +sql_error select top(f1,2) from tb1 group by f1 having count(f2) > 0; + +sql_error select top(f1,2) from tb1 group by f1 having count(f2) > 0; + +sql_error select top(f1,2) from tb1 group by f1 having avg(f1) > 0; + +sql select avg(f1),count(f1) from tb1 group by f1 having avg(f1) > 2; +if $rows != 2 then + return -1 +endi +if $data00 != 3.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data10 != 4.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi + + +sql select avg(f1),count(f1) from tb1 group by f1 having avg(f1) > 2 and sum(f1) > 0; +if $rows != 2 then + return -1 +endi +if $data00 != 3.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data10 != 4.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi + +sql select avg(f1),count(f1),sum(f1) from tb1 group by f1 having avg(f1) > 2 and sum(f1) > 0; +if $rows != 2 then + return -1 +endi +if $data00 != 3.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 6 then + return -1 +endi +if $data10 != 4.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 8 then + return -1 +endi + +sql select avg(f1),count(f1),sum(f1) from tb1 group by f1 having avg(f1) > 2; +if $rows != 2 then + return -1 +endi +if $data00 != 3.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 6 then + return -1 +endi +if $data10 != 4.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 8 then + return -1 +endi + +sql select avg(f1),count(f1),sum(f1) from tb1 group by f1 having sum(f1) > 0; +if $rows != 4 then + return -1 +endi +if $data00 != 1.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 2 then + return -1 +endi +if $data10 != 2.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 4 then + return -1 +endi +if $data20 != 3.000000000 then + return -1 +endi +if $data21 != 2 then + return -1 +endi +if $data22 != 6 then + return -1 +endi +if $data30 != 4.000000000 then + return -1 +endi +if $data31 != 2 then + return -1 +endi +if $data32 != 8 then + return -1 +endi + +sql select avg(f1),count(f1),sum(f1) from tb1 group by f1 having sum(f1) > 2 and sum(f1) < 6; +if $rows != 1 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 4 then + return -1 +endi + + +sql select avg(f1),count(f1),sum(f1) from tb1 group by f1 having 1 <= sum(f1) and 5 >= sum(f1); +if $rows != 2 then + return -1 +endi +if $data00 != 1.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 2 then + return -1 +endi +if $data10 != 2.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 4 then + return -1 +endi + +sql_error select avg(f1),count(f1),sum(f1),twa(f1) from tb1 group by tbname having twa(f1) > 0; + +sql select avg(f1),count(f1),sum(f1),twa(f1) from tb1 group by f1 having twa(f1) > 3; +if $rows != 1 then + return -1 +endi +if $data00 != 4.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 8 then + return -1 +endi +if $data03 != 4.000000000 then + return -1 +endi + +sql_error select avg(f1),count(f1),sum(f1),twa(f1) from tb1 group by tbname having sum(f1) > 0; + +sql select avg(f1),count(f1),sum(f1),twa(f1) from tb1 group by f1 having sum(f1) = 4; +if $rows != 1 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 4 then + return -1 +endi +if $data03 != 2.000000000 then + return -1 +endi + +sql select avg(f1),count(f1),sum(f1) from tb1 group by f1 having sum(f1) > 0; +if $rows != 4 then + return -1 +endi +if $data00 != 1.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 2 then + return -1 +endi +if $data10 != 2.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 4 then + return -1 +endi +if $data20 != 3.000000000 then + return -1 +endi +if $data21 != 2 then + return -1 +endi +if $data22 != 6 then + return -1 +endi +if $data30 != 4.000000000 then + return -1 +endi +if $data31 != 2 then + return -1 +endi +if $data32 != 8 then + return -1 +endi + +sql select avg(f1),count(f1),sum(f1) from tb1 group by f1 having sum(f1) > 3; +if $rows != 3 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 4 then + return -1 +endi +if $data10 != 3.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 6 then + return -1 +endi +if $data20 != 4.000000000 then + return -1 +endi +if $data21 != 2 then + return -1 +endi +if $data22 != 8 then + return -1 +endi + +###########and issue +sql select avg(f1),count(f1),sum(f1) from tb1 group by f1 having sum(f1) > 3 and sum(f1) > 1; +if $rows != 4 then + return -1 +endi +if $data00 != 1.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 2 then + return -1 +endi +if $data10 != 2.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 4 then + return -1 +endi +if $data20 != 3.000000000 then + return -1 +endi +if $data21 != 2 then + return -1 +endi +if $data22 != 6 then + return -1 +endi +if $data30 != 4.000000000 then + return -1 +endi +if $data31 != 2 then + return -1 +endi +if $data32 != 8 then + return -1 +endi + + +sql select avg(f1),count(f1),sum(f1) from tb1 group by f1 having sum(f1) > 3 or sum(f1) > 1; +if $rows != 4 then + return -1 +endi +if $data00 != 1.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 2 then + return -1 +endi +if $data10 != 2.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 4 then + return -1 +endi +if $data20 != 3.000000000 then + return -1 +endi +if $data21 != 2 then + return -1 +endi +if $data22 != 6 then + return -1 +endi +if $data30 != 4.000000000 then + return -1 +endi +if $data31 != 2 then + return -1 +endi +if $data32 != 8 then + return -1 +endi + +sql select avg(f1),count(f1),sum(f1) from tb1 group by f1 having sum(f1) > 3 or sum(f1) > 4; +if $rows != 3 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 4 then + return -1 +endi +if $data10 != 3.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 6 then + return -1 +endi +if $data20 != 4.000000000 then + return -1 +endi +if $data21 != 2 then + return -1 +endi +if $data22 != 8 then + return -1 +endi + +############or issue +sql select avg(f1),count(f1),sum(f1) from tb1 group by f1 having sum(f1) > 3 or avg(f1) > 4; +if $rows != 0 then + return -1 +endi + +sql select avg(f1),count(f1),sum(f1) from tb1 group by f1 having (sum(f1) > 3); +if $rows != 3 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 4 then + return -1 +endi +if $data10 != 3.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 6 then + return -1 +endi +if $data20 != 4.000000000 then + return -1 +endi +if $data21 != 2 then + return -1 +endi +if $data22 != 8 then + return -1 +endi + +sql_error select avg(f1),count(f1),sum(f1) from tb1 group by f1 having (sum(*) > 3); + +sql select avg(f1),count(f1),sum(f1) from tb1 group by f1 having (sum(tb1.f1) > 3); +if $rows != 3 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 4 then + return -1 +endi +if $data10 != 3.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 6 then + return -1 +endi +if $data20 != 4.000000000 then + return -1 +endi +if $data21 != 2 then + return -1 +endi +if $data22 != 8 then + return -1 +endi + +sql select avg(f1),count(tb1.*),sum(f1) from tb1 group by f1 having (sum(tb1.f1) > 3); +if $rows != 3 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 4 then + return -1 +endi +if $data10 != 3.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 6 then + return -1 +endi +if $data20 != 4.000000000 then + return -1 +endi +if $data21 != 2 then + return -1 +endi +if $data22 != 8 then + return -1 +endi + +sql select avg(f1),count(tb1.*),sum(f1),stddev(f1),stddev(f1) from tb1 group by f1; +if $rows != 4 then + return -1 +endi +if $data00 != 1.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 2 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data04 != 0.000000000 then + return -1 +endi +if $data10 != 2.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 4 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi +if $data14 != 0.000000000 then + return -1 +endi +if $data20 != 3.000000000 then + return -1 +endi +if $data21 != 2 then + return -1 +endi +if $data22 != 6 then + return -1 +endi +if $data23 != 0.000000000 then + return -1 +endi +if $data24 != 0.000000000 then + return -1 +endi +if $data30 != 4.000000000 then + return -1 +endi +if $data31 != 2 then + return -1 +endi +if $data32 != 8 then + return -1 +endi +if $data33 != 0.000000000 then + return -1 +endi +if $data34 != 0.000000000 then + return -1 +endi + +sql select avg(f1),count(tb1.*),sum(f1),stddev(f1) from tb1 group by f1 having (stddev(tb1.f1) > 3); +if $rows != 0 then + return -1 +endi + +sql select avg(f1),count(tb1.*),sum(f1),stddev(f1) from tb1 group by f1 having (stddev(tb1.f1) < 1); +if $rows != 4 then + return -1 +endi +if $data00 != 1.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 2 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data10 != 2.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 4 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi +if $data20 != 3.000000000 then + return -1 +endi +if $data21 != 2 then + return -1 +endi +if $data22 != 6 then + return -1 +endi +if $data23 != 0.000000000 then + return -1 +endi +if $data30 != 4.000000000 then + return -1 +endi +if $data31 != 2 then + return -1 +endi +if $data32 != 8 then + return -1 +endi +if $data33 != 0.000000000 then + return -1 +endi + + +sql_error select avg(f1),count(tb1.*),sum(f1),stddev(f1) from tb1 group by f1 having (LEASTSQUARES(f1) < 1); + +sql_error select avg(f1),count(tb1.*),sum(f1),stddev(f1) from tb1 group by f1 having LEASTSQUARES(f1) < 1; + +sql_error select avg(f1),count(tb1.*),sum(f1),stddev(f1) from tb1 group by f1 having LEASTSQUARES(f1,1,1) < 1; + +sql_error select avg(f1),count(tb1.*),sum(f1),stddev(f1) from tb1 group by f1 having LEASTSQUARES(f1,1,1) > 2; + +sql_error select avg(f1),count(tb1.*),sum(f1),stddev(f1),LEASTSQUARES(f1,1,1) from tb1 group by f1 having LEASTSQUARES(f1,1,1) > 2; + +sql select avg(f1),count(tb1.*),sum(f1),stddev(f1),LEASTSQUARES(f1,1,1) from tb1 group by f1 having sum(f1) > 2; +if $rows != 3 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 4 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data10 != 3.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 6 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi +if $data20 != 4.000000000 then + return -1 +endi +if $data21 != 2 then + return -1 +endi +if $data22 != 8 then + return -1 +endi +if $data23 != 0.000000000 then + return -1 +endi + +sql select avg(f1),count(tb1.*),sum(f1),stddev(f1) from tb1 group by f1 having min(f1) > 2; +if $rows != 2 then + return -1 +endi +if $data00 != 3.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 6 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data10 != 4.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 8 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi + +sql select avg(f1),count(tb1.*),sum(f1),stddev(f1),min(f1) from tb1 group by f1 having min(f1) > 2; +if $rows != 2 then + return -1 +endi +if $data00 != 3.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 6 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data04 != 3 then + return -1 +endi +if $data10 != 4.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 8 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi +if $data14 != 4 then + return -1 +endi + +sql select avg(f1),count(tb1.*),sum(f1),stddev(f1),min(f1) from tb1 group by f1 having max(f1) > 2; +if $rows != 2 then + return -1 +endi +if $data00 != 3.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 6 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data04 != 3 then + return -1 +endi +if $data10 != 4.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 8 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi +if $data14 != 4 then + return -1 +endi + +sql select avg(f1),count(tb1.*),sum(f1),stddev(f1),min(f1),max(f1) from tb1 group by f1 having max(f1) != 2; +if $rows != 3 then + return -1 +endi +if $data00 != 1.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 2 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data04 != 1 then + return -1 +endi +if $data05 != 1 then + return -1 +endi +if $data10 != 3.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 6 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi +if $data14 != 3 then + return -1 +endi +if $data15 != 3 then + return -1 +endi +if $data20 != 4.000000000 then + return -1 +endi +if $data21 != 2 then + return -1 +endi +if $data22 != 8 then + return -1 +endi +if $data23 != 0.000000000 then + return -1 +endi +if $data24 != 4 then + return -1 +endi +if $data25 != 4 then + return -1 +endi + +sql select avg(f1),count(tb1.*),sum(f1),stddev(f1),min(f1),max(f1) from tb1 group by f1 having first(f1) != 2; +if $rows != 3 then + return -1 +endi +if $data00 != 1.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 2 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data04 != 1 then + return -1 +endi +if $data05 != 1 then + return -1 +endi +if $data10 != 3.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 6 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi +if $data14 != 3 then + return -1 +endi +if $data15 != 3 then + return -1 +endi +if $data20 != 4.000000000 then + return -1 +endi +if $data21 != 2 then + return -1 +endi +if $data22 != 8 then + return -1 +endi +if $data23 != 0.000000000 then + return -1 +endi +if $data24 != 4 then + return -1 +endi +if $data25 != 4 then + return -1 +endi + + + +sql select avg(f1),count(tb1.*),sum(f1),stddev(f1),min(f1),max(f1),first(f1) from tb1 group by f1 having first(f1) != 2; +if $rows != 3 then + return -1 +endi +if $data00 != 1.000000000 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 2 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data04 != 1 then + return -1 +endi +if $data05 != 1 then + return -1 +endi +if $data06 != 1 then + return -1 +endi +if $data10 != 3.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data12 != 6 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi +if $data14 != 3 then + return -1 +endi +if $data15 != 3 then + return -1 +endi +if $data16 != 3 then + return -1 +endi +if $data20 != 4.000000000 then + return -1 +endi +if $data21 != 2 then + return -1 +endi +if $data22 != 8 then + return -1 +endi +if $data23 != 0.000000000 then + return -1 +endi +if $data24 != 4 then + return -1 +endi +if $data25 != 4 then + return -1 +endi +if $data26 != 4 then + return -1 +endi + + + +sql_error select avg(f1),count(tb1.*),sum(f1),stddev(f1),min(f1),max(f1),first(f1),last(f1) from tb1 group by f1 having top(f1,1); + +sql_error select avg(f1),count(tb1.*),sum(f1),stddev(f1),min(f1),max(f1),first(f1),last(f1) from tb1 group by f1 having top(f1,1) > 1; + +sql_error select avg(f1),count(tb1.*),sum(f1),stddev(f1),min(f1),max(f1),first(f1),last(f1) from tb1 group by f1 having bottom(f1,1) > 1; + +sql_error select avg(f1),count(tb1.*),sum(f1),stddev(f1),min(f1),max(f1),first(f1),last(f1),top(f1,1),bottom(f1,1) from tb1 group by f1 having bottom(f1,1) > 1; + +sql_error select avg(f1),count(tb1.*),sum(f1),stddev(f1),min(f1),max(f1),first(f1),last(f1),top(f1,1),bottom(f1,1) from tb1 group by f1 having sum(f1) > 1; + +sql_error select PERCENTILE(f1) from tb1 group by f1 having sum(f1) > 1; + +sql select PERCENTILE(f1,20) from tb1 group by f1 having sum(f1) = 4; +if $rows != 1 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi + +sql select aPERCENTILE(f1,20) from tb1 group by f1 having sum(f1) > 1; +if $rows != 4 then + return -1 +endi +if $data00 != 1.000000000 then + return -1 +endi +if $data10 != 2.000000000 then + return -1 +endi +if $data20 != 3.000000000 then + return -1 +endi +if $data30 != 4.000000000 then + return -1 +endi + +sql select aPERCENTILE(f1,20) from tb1 group by f1 having apercentile(f1,1) > 1; +if $rows != 3 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi +if $data10 != 3.000000000 then + return -1 +endi +if $data20 != 4.000000000 then + return -1 +endi + +sql select aPERCENTILE(f1,20) from tb1 group by f1 having apercentile(f1,1) > 1 and apercentile(f1,1) < 50; +if $rows != 3 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi +if $data10 != 3.000000000 then + return -1 +endi +if $data20 != 4.000000000 then + return -1 +endi + +sql select aPERCENTILE(f1,20) from tb1 group by f1 having apercentile(f1,1) > 1 and apercentile(f1,1) < 3; +if $rows != 1 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi + +sql select aPERCENTILE(f1,20) from tb1 group by f1 having apercentile(f1,1) > 1 and apercentile(f1,3) < 3; +if $rows != 1 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi + +sql_error select aPERCENTILE(f1,20) from tb1 group by f1 having apercentile(1) > 1; + +sql_error select aPERCENTILE(f1,20),LAST_ROW(f1) from tb1 group by f1 having apercentile(1) > 1; + +sql_error select aPERCENTILE(f1,20),LAST_ROW(f1) from tb1 group by f1 having apercentile(f1,1) > 1; + +sql_error select sum(f1) from tb1 group by f1 having last_row(f1) > 1; + +sql_error select avg(f1) from tb1 group by f1 having diff(f1) > 0; + +sql_error select avg(f1),diff(f1) from tb1 group by f1 having avg(f1) > 0; + +sql_error select avg(f1),diff(f1) from tb1 group by f1 having spread(f2) > 0; + +sql select avg(f1) from tb1 group by f1 having spread(f2) > 0; +if $rows != 0 then + return -1 +endi + +sql select avg(f1) from tb1 group by f1 having spread(f2) = 0; +if $rows != 4 then + return -1 +endi +if $data00 != 1.000000000 then + return -1 +endi +if $data10 != 2.000000000 then + return -1 +endi +if $data20 != 3.000000000 then + return -1 +endi +if $data30 != 4.000000000 then + return -1 +endi + +sql select avg(f1),spread(f2) from tb1 group by f1; +if $rows != 4 then + return -1 +endi +if $data00 != 1.000000000 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data10 != 2.000000000 then + return -1 +endi +if $data11 != 0.000000000 then + return -1 +endi +if $data20 != 3.000000000 then + return -1 +endi +if $data21 != 0.000000000 then + return -1 +endi +if $data30 != 4.000000000 then + return -1 +endi +if $data31 != 0.000000000 then + return -1 +endi + +sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by f1 having spread(f1) = 0; +if $rows != 4 then + return -1 +endi +if $data00 != 1.000000000 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data10 != 2.000000000 then + return -1 +endi +if $data11 != 0.000000000 then + return -1 +endi +if $data12 != 0.000000000 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi +if $data20 != 3.000000000 then + return -1 +endi +if $data21 != 0.000000000 then + return -1 +endi +if $data22 != 0.000000000 then + return -1 +endi +if $data23 != 0.000000000 then + return -1 +endi +if $data30 != 4.000000000 then + return -1 +endi +if $data31 != 0.000000000 then + return -1 +endi +if $data32 != 0.000000000 then + return -1 +endi +if $data33 != 0.000000000 then + return -1 +endi + +sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by f1 having spread(f1) != 0; +if $rows != 0 then + return -1 +endi + + +sql_error select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by f1 having spread(f1) + 1 > 0; + +sql_error select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by f1 having spread(f1) + 1; + +sql_error select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by f1 having spread(f1) + sum(f1); + +sql_error select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by f1 having spread(f1) + sum(f1) > 0; + +sql_error select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by f1 having spread(f1) - sum(f1) > 0; + +sql_error select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by f1 having spread(f1) * sum(f1) > 0; + +sql_error select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by f1 having spread(f1) / sum(f1) > 0; + +sql_error select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by f1 having spread(f1) > sum(f1); + +sql_error select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by f1 having spread(f1) and sum(f1); + +sql_error select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by f1 having spread(f1) 0 and sum(f1); + +sql_error select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by f1 having spread(f1) + 0 and sum(f1); + +sql_error select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by f1 having spread(f1) - f1 and sum(f1); + +sql_error select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by f1 having spread(f1) - id1 and sum(f1); + +sql_error select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by f1 having spread(f1) > id1 and sum(f1); + +sql_error select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by f1 having spread(f1) > id1 and sum(f1) > 1; + +sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by f1 having spread(f1) > 2 and sum(f1) > 1; +if $rows != 0 then + return -1 +endi + +sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by f1 having spread(f1) = 0 and sum(f1) > 1; +if $rows != 4 then + return -1 +endi +if $data00 != 1.000000000 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data10 != 2.000000000 then + return -1 +endi +if $data11 != 0.000000000 then + return -1 +endi +if $data12 != 0.000000000 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi +if $data20 != 3.000000000 then + return -1 +endi +if $data21 != 0.000000000 then + return -1 +endi +if $data22 != 0.000000000 then + return -1 +endi +if $data23 != 0.000000000 then + return -1 +endi +if $data30 != 4.000000000 then + return -1 +endi +if $data31 != 0.000000000 then + return -1 +endi +if $data32 != 0.000000000 then + return -1 +endi +if $data33 != 0.000000000 then + return -1 +endi + +sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by f1 having spread(f1) = 0 and avg(f1) > 1; +if $rows != 3 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data10 != 3.000000000 then + return -1 +endi +if $data11 != 0.000000000 then + return -1 +endi +if $data12 != 0.000000000 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi +if $data20 != 4.000000000 then + return -1 +endi +if $data21 != 0.000000000 then + return -1 +endi +if $data22 != 0.000000000 then + return -1 +endi +if $data23 != 0.000000000 then + return -1 +endi + +sql_error select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by c1 having spread(f1) = 0 and avg(f1) > 1; + +sql_error select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by id1 having avg(id1) > 0; + +sql_error select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by id1 having avg(f1) > id1; + +sql_error select avg(f1),spread(f1,f2,tb1.f1),avg(id1) from tb1 group by id1 having avg(f1) > id1; + +sql_error select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by id1 having avg(f1) > 0; + +sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by f1 having avg(f1) > 0 and avg(f1) = 3; +if $rows != 1 then + return -1 +endi +if $data00 != 3.000000000 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi + +sql_error select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by f1 having avg(f1) < 0 and avg(f1) = 3; + +sql_error select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by id1 having avg(f1) < 2; + +sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f1 > 0 group by f1 having avg(f1) > 0; +if $rows != 4 then + return -1 +endi +if $data00 != 1.000000000 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data10 != 2.000000000 then + return -1 +endi +if $data11 != 0.000000000 then + return -1 +endi +if $data12 != 0.000000000 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi +if $data20 != 3.000000000 then + return -1 +endi +if $data21 != 0.000000000 then + return -1 +endi +if $data22 != 0.000000000 then + return -1 +endi +if $data23 != 0.000000000 then + return -1 +endi +if $data30 != 4.000000000 then + return -1 +endi +if $data31 != 0.000000000 then + return -1 +endi +if $data32 != 0.000000000 then + return -1 +endi +if $data33 != 0.000000000 then + return -1 +endi + +sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f1 > 2 group by f1 having avg(f1) > 0; +if $rows != 2 then + return -1 +endi +if $data00 != 3.000000000 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data10 != 4.000000000 then + return -1 +endi +if $data11 != 0.000000000 then + return -1 +endi +if $data12 != 0.000000000 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi + +sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f2 > 2 group by f1 having avg(f1) > 0; +if $rows != 2 then + return -1 +endi +if $data00 != 3.000000000 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data10 != 4.000000000 then + return -1 +endi +if $data11 != 0.000000000 then + return -1 +endi +if $data12 != 0.000000000 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi + +sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f3 > 2 group by f1 having avg(f1) > 0; +if $rows != 2 then + return -1 +endi +if $data00 != 3.000000000 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data10 != 4.000000000 then + return -1 +endi +if $data11 != 0.000000000 then + return -1 +endi +if $data12 != 0.000000000 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi + +sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f2 > 3 group by f1 having avg(f1) > 0; +if $rows != 1 then + return -1 +endi +if $data00 != 4.000000000 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi + +sql_error select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f2 > 3 group by f1 having avg(ts) > 0; + +sql_error select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f2 > 3 group by f1 having avg(f7) > 0; + +sql_error select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f2 > 3 group by f1 having avg(f8) > 0; + +sql_error select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f2 > 3 group by f1 having avg(f9) > 0; + +sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f2 > 3 group by f1 having count(f9) > 0; +if $rows != 1 then + return -1 +endi +if $data00 != 4.000000000 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi + +sql_error select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f2 > 3 group by f1 having last(f9) > 0; + +sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f2 > 3 group by f1 having last(f2) > 0; +if $rows != 1 then + return -1 +endi +if $data00 != 4.000000000 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi + +sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f2 > 3 group by f1 having last(f3) > 0; +if $rows != 1 then + return -1 +endi +if $data00 != 4.000000000 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi + +sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f2 > 1 group by f1 having last(f3) > 0; +if $rows != 3 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data10 != 3.000000000 then + return -1 +endi +if $data11 != 0.000000000 then + return -1 +endi +if $data12 != 0.000000000 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi +if $data20 != 4.000000000 then + return -1 +endi +if $data21 != 0.000000000 then + return -1 +endi +if $data22 != 0.000000000 then + return -1 +endi +if $data23 != 0.000000000 then + return -1 +endi + +sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f2 > 1 group by f1 having last(f4) > 0; +if $rows != 3 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data10 != 3.000000000 then + return -1 +endi +if $data11 != 0.000000000 then + return -1 +endi +if $data12 != 0.000000000 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi +if $data20 != 4.000000000 then + return -1 +endi +if $data21 != 0.000000000 then + return -1 +endi +if $data22 != 0.000000000 then + return -1 +endi +if $data23 != 0.000000000 then + return -1 +endi + +sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f2 > 1 group by f1 having last(f5) > 0; +if $rows != 3 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data10 != 3.000000000 then + return -1 +endi +if $data11 != 0.000000000 then + return -1 +endi +if $data12 != 0.000000000 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi +if $data20 != 4.000000000 then + return -1 +endi +if $data21 != 0.000000000 then + return -1 +endi +if $data22 != 0.000000000 then + return -1 +endi +if $data23 != 0.000000000 then + return -1 +endi + +sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f2 > 1 group by f1 having last(f6) > 0; +if $rows != 3 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data10 != 3.000000000 then + return -1 +endi +if $data11 != 0.000000000 then + return -1 +endi +if $data12 != 0.000000000 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi +if $data20 != 4.000000000 then + return -1 +endi +if $data21 != 0.000000000 then + return -1 +endi +if $data22 != 0.000000000 then + return -1 +endi +if $data23 != 0.000000000 then + return -1 +endi + + +sql_error select avg(f1),spread(f1,f2,tb1.f1),f1,f2 from tb1 where f2 > 1 group by f1 having last(f6) > 0; + +sql_error select avg(f1),spread(f1,f2,tb1.f1),f1,f6 from tb1 where f2 > 1 group by f1 having last(f6) > 0; + +sql_error select avg(f1),spread(f1,f2,tb1.f1),f1,f6 from tb1 where f2 > 1 group by f1,f2 having last(f6) > 0; + +sql_error select avg(f1),spread(f1,f2,tb1.f1),f1,f6 from tb1 where f2 > 1 group by f1,id1 having last(f6) > 0; + +sql_error select avg(f1),spread(f1,f2,tb1.f1),f1,f6 from tb1 where f2 > 1 group by id1 having last(f6) > 0; + +sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f2 > 1 and f2 < 4 group by f1 having last(f6) > 0; +if $rows != 2 then + return -1 +endi +if $data00 != 2.000000000 then + return -1 +endi +if $data01 != 0.000000000 then + return -1 +endi +if $data02 != 0.000000000 then + return -1 +endi +if $data03 != 0.000000000 then + return -1 +endi +if $data10 != 3.000000000 then + return -1 +endi +if $data11 != 0.000000000 then + return -1 +endi +if $data12 != 0.000000000 then + return -1 +endi +if $data13 != 0.000000000 then + return -1 +endi + +sql_error select top(f1,2) from tb1 group by f1 having count(f1) > 0; + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/test-all.sh b/tests/test-all.sh index 3c8aed7d18..47e5de6aa0 100755 --- a/tests/test-all.sh +++ b/tests/test-all.sh @@ -80,6 +80,7 @@ function runSimCaseOneByOne { fi done < $1 } + function runSimCaseOneByOnefq { start=`sed -n "/$1-start/=" jenkins/basic.txt` @@ -155,6 +156,7 @@ function runPyCaseOneByOne { fi done < $1 } + function runPyCaseOneByOnefq() { cd $tests_dir/pytest if [[ $1 =~ full ]] ; then @@ -202,13 +204,37 @@ function runPyCaseOneByOnefq() { rm -rf ../../sim/case.log } +###################### +# main entry +###################### + +unameOut="$(uname -s)" +case "${unameOut}" in + Linux*) OS=Linux;; + Darwin*) OS=Darwin;; + CYGWIN*) OS=Windows;; + *) OS=Unknown;; +esac + +case "${OS}" in + Linux*) TAOSLIB=libtaos.so;; + Darwin*) TAOSLIB=libtaos.dylib;; + Windows*) TAOSLIB=taos.dll;; + Unknown) TAOSLIB="UNKNOWN:${unameOut}";; +esac + +echo TAOSLIB is ${TAOSLIB} + totalFailed=0 totalPyFailed=0 totalJDBCFailed=0 totalUnitFailed=0 totalExampleFailed=0 -corepath=`grep -oP '.*(?=core_)' /proc/sys/kernel/core_pattern||grep -oP '.*(?=core-)' /proc/sys/kernel/core_pattern` +if [ "${OS}" == "Linux" ]; then + corepath=`grep -oP '.*(?=core_)' /proc/sys/kernel/core_pattern||grep -oP '.*(?=core-)' /proc/sys/kernel/core_pattern` +fi + if [ "$2" != "jdbc" ] && [ "$2" != "python" ] && [ "$2" != "unit" ] && [ "$2" != "example" ]; then echo "### run TSIM test case ###" cd $tests_dir/script @@ -290,11 +316,11 @@ if [ "$2" != "sim" ] && [ "$2" != "jdbc" ] && [ "$2" != "unit" ] && [ "$2" != " fi TOP_DIR=`pwd` - TAOSLIB_DIR=`find . -name "libtaos.so"|grep -w lib|head -n1` + TAOSLIB_DIR=`find . -name "${TAOSLIB}"|grep -w lib|head -n1` if [[ "$TAOSLIB_DIR" == *"$IN_TDINTERNAL"* ]]; then - LIB_DIR=`find . -name "libtaos.so"|grep -w lib|head -n1|cut -d '/' --fields=2,3,4,5` + LIB_DIR=`find . -name "${TAOSLIB}"|grep -w lib|head -n1|cut -d '/' -f 2,3,4,5` else - LIB_DIR=`find . -name "libtaos.so"|grep -w lib|head -n1|cut -d '/' --fields=2,3,4` + LIB_DIR=`find . -name "${TAOSLIB}"|grep -w lib|head -n1|cut -d '/' -f 2,3,4` fi export LD_LIBRARY_PATH=$TOP_DIR/$LIB_DIR:$LD_LIBRARY_PATH @@ -490,6 +516,15 @@ if [ "$2" != "sim" ] && [ "$2" != "python" ] && [ "$2" != "jdbc" ] && [ "$2" != echo "asyncdemo pass" totalExamplePass=`expr $totalExamplePass + 1` fi + + ./demo 127.0.0.1 > /dev/null 2>&1 + if [ $? != "0" ]; then + echo "demo failed" + totalExampleFailed=`expr $totalExampleFailed + 1` + else + echo "demo pass" + totalExamplePass=`expr $totalExamplePass + 1` + fi if [ "$totalExamplePass" -gt "0" ]; then echo -e "\n${GREEN} ### Total $totalExamplePass examples succeed! ### ${NC}" @@ -499,7 +534,9 @@ if [ "$2" != "sim" ] && [ "$2" != "python" ] && [ "$2" != "jdbc" ] && [ "$2" != echo -e "\n${RED} ### Total $totalExampleFailed examples failed! ### ${NC}" fi - dohavecore 1 + if [ "${OS}" == "Linux" ]; then + dohavecore 1 + fi fi