diff --git a/docs/zh/27-train-faq/03-docker.md b/docs/zh/05-get-started/01-docker.md
similarity index 99%
rename from docs/zh/27-train-faq/03-docker.md
rename to docs/zh/05-get-started/01-docker.md
index 72b4603dda..9ff67fa604 100644
--- a/docs/zh/27-train-faq/03-docker.md
+++ b/docs/zh/05-get-started/01-docker.md
@@ -1,4 +1,5 @@
---
+sidebar_label: Docker
title: 通过 Docker 快速体验 TDengine
---
diff --git a/docs/zh/05-get-started/03-package.md b/docs/zh/05-get-started/03-package.md
new file mode 100644
index 0000000000..a21066e0cd
--- /dev/null
+++ b/docs/zh/05-get-started/03-package.md
@@ -0,0 +1,240 @@
+---
+sidebar_label: 安装包
+title: 使用安装包安装和卸载
+---
+
+import Tabs from "@theme/Tabs";
+import TabItem from "@theme/TabItem";
+
+:::info
+如果您希望对 TDengine 贡献代码或对内部实现感兴趣,请参考我们的 [TDengine GitHub 主页](https://github.com/taosdata/TDengine) 下载源码构建和安装.
+
+:::
+
+TDengine 开源版本提供 deb 和 rpm 格式安装包,用户可以根据自己的运行环境选择合适的安装包。其中 deb 支持 Debian/Ubuntu 及衍生系统,rpm 支持 CentOS/RHEL/SUSE 及衍生系统。同时我们也为企业用户提供 tar.gz 格式安装包。
+
+## 安装
+
+
+
+可以使用 apt-get 工具从官方仓库安装。
+
+**安装包仓库**
+
+```
+wget -qO - http://repos.taosdata.com/tdengine.key | sudo apt-key add -
+echo "deb [arch=amd64] http://repos.taosdata.com/tdengine-stable stable main" | sudo tee /etc/apt/sources.list.d/tdengine-stable.list
+```
+
+如果安装 Beta 版需要安装包仓库
+
+```
+echo "deb [arch=amd64] http://repos.taosdata.com/tdengine-beta beta main" | sudo tee /etc/apt/sources.list.d/tdengine-beta.list
+```
+
+**使用 apt-get 命令安装**
+
+```
+sudo apt-get update
+apt-cache policy tdengine
+sudo apt-get install tdengine
+```
+
+:::tip
+apt-get 方式只适用于 Debian 或 Ubuntu 系统
+::::
+
+
+
+1、从官网下载获得 deb 安装包,例如 TDengine-server-2.4.0.7-Linux-x64.deb;
+2、进入到 TDengine-server-2.4.0.7-Linux-x64.deb 安装包所在目录,执行如下的安装命令:
+
+```
+$ sudo dpkg -i TDengine-server-2.4.0.7-Linux-x64.deb
+(Reading database ... 137504 files and directories currently installed.)
+Preparing to unpack TDengine-server-2.4.0.7-Linux-x64.deb ...
+TDengine is removed successfully!
+Unpacking tdengine (2.4.0.7) over (2.4.0.7) ...
+Setting up tdengine (2.4.0.7) ...
+Start to install TDengine...
+
+System hostname is: ubuntu-1804
+
+Enter FQDN:port (like h1.taosdata.com:6030) of an existing TDengine cluster node to join
+OR leave it blank to build one:
+
+Enter your email address for priority support or enter empty to skip:
+Created symlink /etc/systemd/system/multi-user.target.wants/taosd.service → /etc/systemd/system/taosd.service.
+
+To configure TDengine : edit /etc/taos/taos.cfg
+To start TDengine : sudo systemctl start taosd
+To access TDengine : taos -h ubuntu-1804 to login into TDengine server
+
+
+TDengine is installed successfully!
+```
+
+
+
+
+
+1、从官网下载获得 rpm 安装包,例如 TDengine-server-2.4.0.7-Linux-x64.rpm;
+2、进入到 TDengine-server-2.4.0.7-Linux-x64.rpm 安装包所在目录,执行如下的安装命令:
+
+```
+$ sudo rpm -ivh TDengine-server-2.4.0.7-Linux-x64.rpm
+Preparing... ################################# [100%]
+Updating / installing...
+ 1:tdengine-2.4.0.7-3 ################################# [100%]
+Start to install TDengine...
+
+System hostname is: centos7
+
+Enter FQDN:port (like h1.taosdata.com:6030) of an existing TDengine cluster node to join
+OR leave it blank to build one:
+
+Enter your email address for priority support or enter empty to skip:
+
+Created symlink from /etc/systemd/system/multi-user.target.wants/taosd.service to /etc/systemd/system/taosd.service.
+
+To configure TDengine : edit /etc/taos/taos.cfg
+To start TDengine : sudo systemctl start taosd
+To access TDengine : taos -h centos7 to login into TDengine server
+
+
+TDengine is installed successfully!
+```
+
+
+
+
+
+1、从官网下载获得 tar.gz 安装包,例如 TDengine-server-2.4.0.7-Linux-x64.tar.gz;
+2、进入到 TDengine-server-2.4.0.7-Linux-x64.tar.gz 安装包所在目录,先解压文件后,进入子目录,执行其中的 install.sh 安装脚本:
+
+```
+$ tar xvzf TDengine-enterprise-server-2.4.0.7-Linux-x64.tar.gz
+TDengine-enterprise-server-2.4.0.7/
+TDengine-enterprise-server-2.4.0.7/driver/
+TDengine-enterprise-server-2.4.0.7/driver/vercomp.txt
+TDengine-enterprise-server-2.4.0.7/driver/libtaos.so.2.4.0.7
+TDengine-enterprise-server-2.4.0.7/install.sh
+TDengine-enterprise-server-2.4.0.7/examples/
+...
+
+$ ll
+total 43816
+drwxrwxr-x 3 ubuntu ubuntu 4096 Feb 22 09:31 ./
+drwxr-xr-x 20 ubuntu ubuntu 4096 Feb 22 09:30 ../
+drwxrwxr-x 4 ubuntu ubuntu 4096 Feb 22 09:30 TDengine-enterprise-server-2.4.0.7/
+-rw-rw-r-- 1 ubuntu ubuntu 44852544 Feb 22 09:31 TDengine-enterprise-server-2.4.0.7-Linux-x64.tar.gz
+
+$ cd TDengine-enterprise-server-2.4.0.7/
+
+ $ ll
+total 40784
+drwxrwxr-x 4 ubuntu ubuntu 4096 Feb 22 09:30 ./
+drwxrwxr-x 3 ubuntu ubuntu 4096 Feb 22 09:31 ../
+drwxrwxr-x 2 ubuntu ubuntu 4096 Feb 22 09:30 driver/
+drwxrwxr-x 10 ubuntu ubuntu 4096 Feb 22 09:30 examples/
+-rwxrwxr-x 1 ubuntu ubuntu 33294 Feb 22 09:30 install.sh*
+-rw-rw-r-- 1 ubuntu ubuntu 41704288 Feb 22 09:30 taos.tar.gz
+
+$ sudo ./install.sh
+
+Start to update TDengine...
+Created symlink /etc/systemd/system/multi-user.target.wants/taosd.service → /etc/systemd/system/taosd.service.
+Nginx for TDengine is updated successfully!
+
+To configure TDengine : edit /etc/taos/taos.cfg
+To configure Taos Adapter (if has) : edit /etc/taos/taosadapter.toml
+To start TDengine : sudo systemctl start taosd
+To access TDengine : use taos -h ubuntu-1804 in shell OR from http://127.0.0.1:6060
+
+TDengine is updated successfully!
+Install taoskeeper as a standalone service
+taoskeeper is installed, enable it by `systemctl enable taoskeeper`
+```
+
+:::info
+install.sh 安装脚本在执行过程中,会通过命令行交互界面询问一些配置信息。如果希望采取无交互安装方式,那么可以用 -e no 参数来执行 install.sh 脚本。运行 `./install.sh -h` 指令可以查看所有参数的详细说明信息。
+
+:::
+
+
+
+
+:::note
+当安装第一个节点时,出现 Enter FQDN:提示的时候,不需要输入任何内容。只有当安装第二个或以后更多的节点时,才需要输入已有集群中任何一个可用节点的 FQDN,支持该新节点加入集群。当然也可以不输入,而是在新节点启动前,配置到新节点的配置文件中。
+
+:::
+
+## 卸载
+
+
+
+
+内容 TBD
+
+
+
+
+卸载命令如下:
+
+```
+$ sudo dpkg -r tdengine
+(Reading database ... 137504 files and directories currently installed.)
+Removing tdengine (2.4.0.7) ...
+TDengine is removed successfully!
+
+```
+
+
+
+
+
+卸载命令如下:
+
+```
+$ sudo rpm -e tdengine
+TDengine is removed successfully!
+```
+
+
+
+
+
+卸载命令如下:
+
+```
+$ rmtaos
+Nginx for TDengine is running, stopping it...
+TDengine is removed successfully!
+
+taosKeeper is removed successfully!
+```
+
+
+
+
+:::info
+
+- TDengine 提供了多种安装包,但最好不要在一个系统上同时使用 tar.gz 安装包和 deb 或 rpm 安装包。否则会相互影响,导致在使用时出现问题。
+
+- 对于 deb 包安装后,如果安装目录被手工误删了部分,出现卸载、或重新安装不能成功。此时,需要清除 TDengine 包的安装信息,执行如下命令:
+
+ ```
+ $ sudo rm -f /var/lib/dpkg/info/tdengine*
+ ```
+
+然后再重新进行安装就可以了。
+
+- 对于 rpm 包安装后,如果安装目录被手工误删了部分,出现卸载、或重新安装不能成功。此时,需要清除 TDengine 包的安装信息,执行如下命令:
+
+ ```
+ $ sudo rpm -e --noscripts tdengine
+ ```
+
+然后再重新进行安装就可以了。
+
+:::
\ No newline at end of file
diff --git a/docs/zh/05-get-started/06-first-use.md b/docs/zh/05-get-started/06-first-use.md
new file mode 100644
index 0000000000..927ce0a1bd
--- /dev/null
+++ b/docs/zh/05-get-started/06-first-use.md
@@ -0,0 +1,135 @@
+---
+sidebar_label: 开始使用
+title: 快速体验 TDengine
+---
+
+import Tabs from "@theme/Tabs";
+import TabItem from "@theme/TabItem";
+import PkgInstall from "./\_pkg_install.mdx";
+import AptGetInstall from "./\_apt_get_install.mdx";
+
+## 启动
+
+安装后,请使用 `systemctl` 命令来启动 TDengine 的服务进程。
+
+```bash
+systemctl start taosd
+```
+
+检查服务是否正常工作:
+
+```bash
+systemctl status taosd
+```
+
+如果服务进程处于活动状态,则 status 指令会显示如下的相关信息:
+
+```
+Active: active (running)
+```
+
+如果后台服务进程处于停止状态,则 status 指令会显示如下的相关信息:
+
+```
+Active: inactive (dead)
+```
+
+如果 TDengine 服务正常工作,那么您可以通过 TDengine 的命令行程序 `taos` 来访问并体验 TDengine。
+
+systemctl 命令汇总:
+
+- 启动服务进程:`systemctl start taosd`
+
+- 停止服务进程:`systemctl stop taosd`
+
+- 重启服务进程:`systemctl restart taosd`
+
+- 查看服务状态:`systemctl status taosd`
+
+:::info
+
+- systemctl 命令需要 _root_ 权限来运行,如果您非 _root_ 用户,请在命令前添加 sudo 。
+- `systemctl stop taosd` 指令在执行后并不会马上停止 TDengine 服务,而是会等待系统中必要的落盘工作正常完成。在数据量很大的情况下,这可能会消耗较长时间。
+- 如果系统中不支持 `systemd`,也可以用手动运行 `/usr/local/taos/bin/taosd` 方式启动 TDengine 服务。
+
+:::
+
+## TDengine 命令行 (CLI)
+
+为便于检查 TDengine 的状态,执行数据库 (Database) 的各种即席(Ad Hoc)查询,TDengine 提供一命令行应用程序(以下简称为 TDengine CLI) taos。要进入 TDengine 命令行,您只要在安装有 TDengine 的 Linux 终端执行 `taos` 即可。
+
+```bash
+taos
+```
+
+如果连接服务成功,将会打印出欢迎消息和版本信息。如果失败,则会打印错误消息出来(请参考 [FAQ](/train-faq/faq) 来解决终端连接服务端失败的问题)。 TDengine CLI 的提示符号如下:
+
+```cmd
+taos>
+```
+
+在 TDengine CLI 中,用户可以通过 SQL 命令来创建/删除数据库、表等,并进行数据库(database)插入查询操作。在终端中运行的 SQL 语句需要以分号结束来运行。示例:
+
+```sql
+create database demo;
+use demo;
+create table t (ts timestamp, speed int);
+insert into t values ('2019-07-15 00:00:00', 10);
+insert into t values ('2019-07-15 01:00:00', 20);
+select * from t;
+ ts | speed |
+========================================
+ 2019-07-15 00:00:00.000 | 10 |
+ 2019-07-15 01:00:00.000 | 20 |
+Query OK, 2 row(s) in set (0.003128s)
+```
+
+除执行 SQL 语句外,系统管理员还可以从 TDengine CLI 进行检查系统运行状态、添加删除用户账号等操作。TDengine CLI 连同应用驱动也可以独立安装在 Linux 或 Windows 机器上运行,更多细节请参考 [这里](../reference/taos-shell/)
+
+## 使用 taosBenchmark 体验写入速度
+
+启动 TDengine 的服务,在 Linux 终端执行 `taosBenchmark` (曾命名为 `taosdemo`):
+
+```bash
+taosBenchmark
+```
+
+该命令将在数据库 test 下面自动创建一张超级表 meters,该超级表下有 1 万张表,表名为 "d0" 到 "d9999",每张表有 1 万条记录,每条记录有 (ts, current, voltage, phase) 四个字段,时间戳从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:40:09 999",每张表带有标签 location 和 groupId,groupId 被设置为 1 到 10, location 被设置为 "California.SanFrancisco" 或者 "California.LosAngeles"。
+
+这条命令很快完成 1 亿条记录的插入。具体时间取决于硬件性能,即使在一台普通的 PC 服务器往往也仅需十几秒。
+
+taosBenchmark 命令本身带有很多选项,配置表的数目、记录条数等等,您可以设置不同参数进行体验,请执行 `taosBenchmark --help` 详细列出。taosBenchmark 详细使用方法请参照 [如何使用 taosBenchmark 对 TDengine 进行性能测试](https://www.taosdata.com/2021/10/09/3111.html)。
+
+## 使用 TDengine CLI 体验查询速度
+
+使用上述 taosBenchmark 插入数据后,可以在 TDengine CLI 输入查询命令,体验查询速度。
+
+查询超级表下记录总条数:
+
+```sql
+taos> select count(*) from test.meters;
+```
+
+查询 1 亿条记录的平均值、最大值、最小值等:
+
+```sql
+taos> select avg(current), max(voltage), min(phase) from test.meters;
+```
+
+查询 location="California.SanFrancisco" 的记录总条数:
+
+```sql
+taos> select count(*) from test.meters where location="California.SanFrancisco";
+```
+
+查询 groupId=10 的所有记录的平均值、最大值、最小值等:
+
+```sql
+taos> select avg(current), max(voltage), min(phase) from test.meters where groupId=10;
+```
+
+对表 d10 按 10s 进行平均值、最大值和最小值聚合统计:
+
+```sql
+taos> select avg(current), max(voltage), min(phase) from test.d10 interval(10s);
+```
diff --git a/docs/zh/10-cluster/02-cluster-mgmt.md b/docs/zh/10-cluster/02-cluster-mgmt.md
deleted file mode 100644
index 5c490516f0..0000000000
--- a/docs/zh/10-cluster/02-cluster-mgmt.md
+++ /dev/null
@@ -1,105 +0,0 @@
----
-title: 数据节点管理
----
-
-上面已经介绍如何从零开始搭建集群。集群组建完成后,可以随时查看集群中当前的数据节点的状态,还可以添加新的数据节点进行扩容,删除数据节点,甚至手动进行数据节点之间的负载均衡操作。
-
-:::note
-
-以下所有执行命令的操作需要先登陆进 TDengine 系统,必要时请使用 root 权限。
-
-:::
-
-## 查看数据节点
-
-启动 TDengine CLI 程序 taos,然后执行:
-
-```sql
-SHOW DNODES;
-```
-
-它将列出集群中所有的 dnode,每个 dnode 的 ID,end_point(fqdn:port),状态(ready,offline 等),vnode 数目,还未使用的 vnode 数目等信息。在添加或删除一个数据节点后,可以使用该命令查看。
-
-输出如下(具体内容仅供参考,取决于实际的集群配置)
-
-```
-taos> show dnodes;
- id | endpoint | vnodes | support_vnodes | status | create_time | note |
-============================================================================================================================================
- 1 | trd01:6030 | 100 | 1024 | ready | 2022-07-15 16:47:47.726 | |
-Query OK, 1 rows affected (0.006684s)
-```
-
-## 查看虚拟节点组
-
-为充分利用多核技术,并提供横向扩展能力,数据需要分片处理。因此 TDengine 会将一个 DB 的数据切分成多份,存放在多个 vnode 里。这些 vnode 可能分布在多个数据节点 dnode 里,这样就实现了水平扩展。一个 vnode 仅仅属于一个 DB,但一个 DB 可以有多个 vnode。vnode 所在的数据节点是 mnode 根据当前系统资源的情况,自动进行分配的,无需任何人工干预。
-
-启动 CLI 程序 taos,然后执行:
-
-```sql
-USE SOME_DATABASE;
-SHOW VGROUPS;
-```
-
-输出如下(具体内容仅供参考,取决于实际的集群配置)
-
-```
-taos> use db;
-Database changed.
-
-taos> show vgroups;
- vgroup_id | db_name | tables | v1_dnode | v1_status | v2_dnode | v2_status | v3_dnode | v3_status | status | nfiles | file_size | tsma |
-================================================================================================================================================================================================
- 2 | db | 0 | 1 | leader | NULL | NULL | NULL | NULL | NULL | NULL | NULL | 0 |
- 3 | db | 0 | 1 | leader | NULL | NULL | NULL | NULL | NULL | NULL | NULL | 0 |
- 4 | db | 0 | 1 | leader | NULL | NULL | NULL | NULL | NULL | NULL | NULL | 0 |
-Query OK, 8 row(s) in set (0.001154s)
-```
-
-## 添加数据节点
-
-启动 CLI 程序 taos,然后执行:
-
-```sql
-CREATE DNODE "fqdn:port";
-```
-
-将新数据节点的 End Point 添加进集群的 EP 列表。“fqdn:port“需要用双引号引起来,否则出错。一个数据节点对外服务的 fqdn 和 port 可以通过配置文件 taos.cfg 进行配置,缺省是自动获取。【强烈不建议用自动获取方式来配置 FQDN,可能导致生成的数据节点的 End Point 不是所期望的】
-
-然后启动新加入的数据节点的 taosd 进程,再通过 taos 查看数据节点状态:
-
-```
-taos> show dnodes;
- id | endpoint | vnodes | support_vnodes | status | create_time | note |
-============================================================================================================================================
- 1 | localhost:6030 | 100 | 1024 | ready | 2022-07-15 16:47:47.726 | |
- 2 | localhost:7030 | 0 | 1024 | ready | 2022-07-15 16:56:13.670 | |
-Query OK, 2 rows affected (0.007031s)
-```
-
-从中可以看到两个 dnode 状态都为 ready
-
-## 删除数据节点
-
-先停止要删除的数据节点的 taosd 进程,然后启动 CLI 程序 taos,执行:
-
-```sql
-DROP DNODE "fqdn:port";
-```
-
-或者
-
-```sql
-DROP DNODE dnodeId;
-```
-
-通过 “fqdn:port” 或 dnodeID 来指定一个具体的节点都是可以的。其中 fqdn 是被删除的节点的 FQDN,port 是其对外服务器的端口号;dnodeID 可以通过 SHOW DNODES 获得。
-
-:::warning
-
-数据节点一旦被 drop 之后,不能重新加入集群。需要将此节点重新部署(清空数据文件夹)。集群在完成 `drop dnode` 操作之前,会将该 dnode 的数据迁移走。
-请注意 `drop dnode` 和 停止 taosd 进程是两个不同的概念,不要混淆:因为删除 dnode 之前要执行迁移数据的操作,因此被删除的 dnode 必须保持在线状态。待删除操作结束之后,才能停止 taosd 进程。
-一个数据节点被 drop 之后,其他节点都会感知到这个 dnodeID 的删除操作,任何集群中的节点都不会再接收此 dnodeID 的请求。
-dnodeID 是集群自动分配的,不得人工指定。它在生成时是递增的,不会重复。
-
-:::
diff --git a/docs/zh/10-cluster/_category_.yml b/docs/zh/10-cluster/_category_.yml
deleted file mode 100644
index 3cee5ce4cd..0000000000
--- a/docs/zh/10-cluster/_category_.yml
+++ /dev/null
@@ -1 +0,0 @@
-label: 集群管理
diff --git a/docs/zh/10-cluster/01-deploy.md b/docs/zh/10-deployment/01-deploy.md
similarity index 64%
rename from docs/zh/10-cluster/01-deploy.md
rename to docs/zh/10-deployment/01-deploy.md
index cd19f90ba1..ed2d5653f5 100644
--- a/docs/zh/10-cluster/01-deploy.md
+++ b/docs/zh/10-deployment/01-deploy.md
@@ -1,5 +1,6 @@
---
-title: 集群部署
+sidebar_label: 手动部署
+title: 集群部署和管理
---
## 准备工作
@@ -72,15 +73,16 @@ serverPort 6030
按照《立即开始》里的步骤,启动第一个数据节点,例如 h1.taosdata.com,然后执行 taos,启动 taos shell,从 shell 里执行命令“SHOW DNODES”,如下所示:
```
+
Welcome to the TDengine shell from Linux, Client Version:3.0.0.0
Copyright (c) 2022 by TAOS Data, Inc. All rights reserved.
Server is Enterprise trial Edition, ver:3.0.0.0 and will never expire.
taos> show dnodes;
- id | endpoint | vnodes | support_vnodes | status | create_time | note |
+id | endpoint | vnodes | support_vnodes | status | create_time | note |
============================================================================================================================================
- 1 | h1.taosdata.com:6030 | 0 | 1024 | ready | 2022-07-16 10:50:42.673 | |
+1 | h1.taosdata.com:6030 | 0 | 1024 | ready | 2022-07-16 10:50:42.673 | |
Query OK, 1 rows affected (0.007984s)
taos>
@@ -91,7 +93,7 @@ taos>
上述命令里,可以看到刚启动的数据节点的 End Point 是:h1.taos.com:6030,就是这个新集群的 firstEp。
-### 启动后续数据节点
+### 添加数据节点
将后续的数据节点添加到现有集群,具体有以下几步:
@@ -125,3 +127,74 @@ firstEp 这个参数仅仅在该数据节点首次加入集群时有作用,加
两个没有配置 firstEp 参数的数据节点 dnode 启动后,会独立运行起来。这个时候,无法将其中一个数据节点加入到另外一个数据节点,形成集群。无法将两个独立的集群合并成为新的集群。
:::
+
+## 查看数据节点
+
+启动 TDengine CLI 程序 taos,然后执行:
+
+```sql
+SHOW DNODES;
+```
+
+它将列出集群中所有的 dnode,每个 dnode 的 ID,end_point(fqdn:port),状态(ready,offline 等),vnode 数目,还未使用的 vnode 数目等信息。在添加或删除一个数据节点后,可以使用该命令查看。
+
+输出如下(具体内容仅供参考,取决于实际的集群配置)
+
+```
+taos> show dnodes;
+ id | endpoint | vnodes | support_vnodes | status | create_time | note |
+============================================================================================================================================
+ 1 | trd01:6030 | 100 | 1024 | ready | 2022-07-15 16:47:47.726 | |
+Query OK, 1 rows affected (0.006684s)
+```
+
+## 查看虚拟节点组
+
+为充分利用多核技术,并提供横向扩展能力,数据需要分片处理。因此 TDengine 会将一个 DB 的数据切分成多份,存放在多个 vnode 里。这些 vnode 可能分布在多个数据节点 dnode 里,这样就实现了水平扩展。一个 vnode 仅仅属于一个 DB,但一个 DB 可以有多个 vnode。vnode 所在的数据节点是 mnode 根据当前系统资源的情况,自动进行分配的,无需任何人工干预。
+
+启动 CLI 程序 taos,然后执行:
+
+```sql
+USE SOME_DATABASE;
+SHOW VGROUPS;
+```
+
+输出如下(具体内容仅供参考,取决于实际的集群配置)
+
+```
+taos> use db;
+Database changed.
+
+taos> show vgroups;
+ vgroup_id | db_name | tables | v1_dnode | v1_status | v2_dnode | v2_status | v3_dnode | v3_status | status | nfiles | file_size | tsma |
+================================================================================================================================================================================================
+ 2 | db | 0 | 1 | leader | NULL | NULL | NULL | NULL | NULL | NULL | NULL | 0 |
+ 3 | db | 0 | 1 | leader | NULL | NULL | NULL | NULL | NULL | NULL | NULL | 0 |
+ 4 | db | 0 | 1 | leader | NULL | NULL | NULL | NULL | NULL | NULL | NULL | 0 |
+Query OK, 8 row(s) in set (0.001154s)
+```
+
+## 删除数据节点
+
+先停止要删除的数据节点的 taosd 进程,然后启动 CLI 程序 taos,执行:
+
+```sql
+DROP DNODE "fqdn:port";
+```
+
+或者
+
+```sql
+DROP DNODE dnodeId;
+```
+
+通过 “fqdn:port” 或 dnodeID 来指定一个具体的节点都是可以的。其中 fqdn 是被删除的节点的 FQDN,port 是其对外服务器的端口号;dnodeID 可以通过 SHOW DNODES 获得。
+
+:::warning
+
+数据节点一旦被 drop 之后,不能重新加入集群。需要将此节点重新部署(清空数据文件夹)。集群在完成 `drop dnode` 操作之前,会将该 dnode 的数据迁移走。
+请注意 `drop dnode` 和 停止 taosd 进程是两个不同的概念,不要混淆:因为删除 dnode 之前要执行迁移数据的操作,因此被删除的 dnode 必须保持在线状态。待删除操作结束之后,才能停止 taosd 进程。
+一个数据节点被 drop 之后,其他节点都会感知到这个 dnodeID 的删除操作,任何集群中的节点都不会再接收此 dnodeID 的请求。
+dnodeID 是集群自动分配的,不得人工指定。它在生成时是递增的,不会重复。
+
+:::
diff --git a/docs/zh/10-deployment/03-k8s.md b/docs/zh/10-deployment/03-k8s.md
new file mode 100644
index 0000000000..d45a3b8030
--- /dev/null
+++ b/docs/zh/10-deployment/03-k8s.md
@@ -0,0 +1,452 @@
+---
+sidebar_label: Kubernetes
+title: 在 Kubernetes 上部署 TDengine 集群
+---
+
+## 配置 ConfigMap
+
+为 TDengine 创建 `taoscfg.yaml`,此文件中的配置将作为环境变量传入 TDengine 镜像,更新此配置将导致所有 TDengine POD 重启。
+
+```yaml
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: taoscfg
+ labels:
+ app: tdengine
+data:
+ CLUSTER: "1"
+ TAOS_KEEP: "3650"
+ TAOS_DEBUG_FLAG: "135"
+```
+
+## 配置服务
+
+创建一个 service 配置文件:`taosd-service.yaml`,服务名称 `metadata.name` (此处为 "taosd") 将在下一步中使用到。添加 TDengine 所用到的所有端口:
+
+```yaml
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: "taosd"
+ labels:
+ app: "tdengine"
+spec:
+ ports:
+ - name: tcp6030
+ protocol: "TCP"
+ port: 6030
+ - name: tcp6035
+ protocol: "TCP"
+ port: 6035
+ - name: tcp6041
+ protocol: "TCP"
+ port: 6041
+ - name: udp6030
+ protocol: "UDP"
+ port: 6030
+ - name: udp6031
+ protocol: "UDP"
+ port: 6031
+ - name: udp6032
+ protocol: "UDP"
+ port: 6032
+ - name: udp6033
+ protocol: "UDP"
+ port: 6033
+ - name: udp6034
+ protocol: "UDP"
+ port: 6034
+ - name: udp6035
+ protocol: "UDP"
+ port: 6035
+ - name: udp6036
+ protocol: "UDP"
+ port: 6036
+ - name: udp6037
+ protocol: "UDP"
+ port: 6037
+ - name: udp6038
+ protocol: "UDP"
+ port: 6038
+ - name: udp6039
+ protocol: "UDP"
+ port: 6039
+ - name: udp6040
+ protocol: "UDP"
+ port: 6040
+ selector:
+ app: "tdengine"
+```
+
+## 有状态服务 StatefulSet
+
+根据 Kubernetes 对各类部署的说明,我们将使用 StatefulSet 作为 TDengine 的服务类型,创建文件 `tdengine.yaml`:
+
+```yaml
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: "tdengine"
+ labels:
+ app: "tdengine"
+spec:
+ serviceName: "taosd"
+ replicas: 2
+ updateStrategy:
+ type: RollingUpdate
+ selector:
+ matchLabels:
+ app: "tdengine"
+ template:
+ metadata:
+ name: "tdengine"
+ labels:
+ app: "tdengine"
+ spec:
+ containers:
+ - name: "tdengine"
+ image: "zitsen/taosd:develop"
+ imagePullPolicy: "Always"
+ envFrom:
+ - configMapRef:
+ name: taoscfg
+ ports:
+ - name: tcp6030
+ protocol: "TCP"
+ containerPort: 6030
+ - name: tcp6035
+ protocol: "TCP"
+ containerPort: 6035
+ - name: tcp6041
+ protocol: "TCP"
+ containerPort: 6041
+ - name: udp6030
+ protocol: "UDP"
+ containerPort: 6030
+ - name: udp6031
+ protocol: "UDP"
+ containerPort: 6031
+ - name: udp6032
+ protocol: "UDP"
+ containerPort: 6032
+ - name: udp6033
+ protocol: "UDP"
+ containerPort: 6033
+ - name: udp6034
+ protocol: "UDP"
+ containerPort: 6034
+ - name: udp6035
+ protocol: "UDP"
+ containerPort: 6035
+ - name: udp6036
+ protocol: "UDP"
+ containerPort: 6036
+ - name: udp6037
+ protocol: "UDP"
+ containerPort: 6037
+ - name: udp6038
+ protocol: "UDP"
+ containerPort: 6038
+ - name: udp6039
+ protocol: "UDP"
+ containerPort: 6039
+ - name: udp6040
+ protocol: "UDP"
+ containerPort: 6040
+ env:
+ # POD_NAME for FQDN config
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ # SERVICE_NAME and NAMESPACE for fqdn resolve
+ - name: SERVICE_NAME
+ value: "taosd"
+ - name: STS_NAME
+ value: "tdengine"
+ - name: STS_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ # TZ for timezone settings, we recommend to always set it.
+ - name: TZ
+ value: "Asia/Shanghai"
+ # TAOS_ prefix will configured in taos.cfg, strip prefix and camelCase.
+ - name: TAOS_SERVER_PORT
+ value: "6030"
+ # Must set if you want a cluster.
+ - name: TAOS_FIRST_EP
+ value: "$(STS_NAME)-0.$(SERVICE_NAME).$(STS_NAMESPACE).svc.cluster.local:$(TAOS_SERVER_PORT)"
+ # TAOS_FQND should always be setted in k8s env.
+ - name: TAOS_FQDN
+ value: "$(POD_NAME).$(SERVICE_NAME).$(STS_NAMESPACE).svc.cluster.local"
+ volumeMounts:
+ - name: taosdata
+ mountPath: /var/lib/taos
+ readinessProbe:
+ exec:
+ command:
+ - taos
+ - -s
+ - "show mnodes"
+ initialDelaySeconds: 5
+ timeoutSeconds: 5000
+ livenessProbe:
+ tcpSocket:
+ port: 6030
+ initialDelaySeconds: 15
+ periodSeconds: 20
+ volumeClaimTemplates:
+ - metadata:
+ name: taosdata
+ spec:
+ accessModes:
+ - "ReadWriteOnce"
+ storageClassName: "csi-rbd-sc"
+ resources:
+ requests:
+ storage: "10Gi"
+```
+
+## 启动集群
+
+将前述三个文件添加到 Kubernetes 集群中:
+
+```bash
+kubectl apply -f taoscfg.yaml
+kubectl apply -f taosd-service.yaml
+kubectl apply -f tdengine.yaml
+
+```
+
+上面的配置将生成一个两节点的 TDengine 集群,dnode 是自动配置的,可以使用 `show dnodes` 命令查看当前集群的节点:
+
+```bash
+kubectl exec -i -t tdengine-0 -- taos -s "show dnodes"
+kubectl exec -i -t tdengine-1 -- taos -s "show dnodes"
+
+```
+
+输出如下:
+
+```
+Welcome to the TDengine shell from Linux, Client Version:2.1.1.0
+Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.
+
+taos> show dnodes
+ id | end_point | vnodes | cores | status | role | create_time | offline reason |
+======================================================================================================================================
+ 1 | tdengine-0.taosd.default.sv... | 1 | 40 | ready | any | 2021-06-01 17:13:24.181 | |
+ 2 | tdengine-1.taosd.default.sv... | 0 | 40 | ready | any | 2021-06-01 17:14:09.257 | |
+Query OK, 2 row(s) in set (0.000997s)
+
+```
+
+## 集群扩容
+
+TDengine 集群支持自动扩容:
+
+```bash
+kubectl scale statefulsets tdengine --replicas=4
+
+```
+
+上面命令行中参数 `--replica=4` 表示要将 TDengine 集群扩容到 4 个节点,执行后首先检查 POD 的状态:
+
+```bash
+kubectl get pods -l app=tdengine
+
+```
+
+输出如下:
+
+```
+NAME READY STATUS RESTARTS AGE
+tdengine-0 1/1 Running 0 161m
+tdengine-1 1/1 Running 0 161m
+tdengine-2 1/1 Running 0 32m
+tdengine-3 1/1 Running 0 32m
+
+```
+
+此时 POD 的状态仍然是 Running,TDengine 集群中的 dnode 状态要等 POD 状态为 `ready` 之后才能看到:
+
+```bash
+kubectl exec -i -t tdengine-0 -- taos -s "show dnodes"
+
+```
+
+扩容后的四节点 TDengine 集群的 dnode 列表:
+
+```
+Welcome to the TDengine shell from Linux, Client Version:2.1.1.0
+Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.
+
+taos> show dnodes
+ id | end_point | vnodes | cores | status | role | create_time | offline reason |
+======================================================================================================================================
+ 1 | tdengine-0.taosd.default.sv... | 0 | 40 | ready | any | 2021-06-01 11:58:12.915 | |
+ 2 | tdengine-1.taosd.default.sv... | 0 | 40 | ready | any | 2021-06-01 11:58:33.127 | |
+ 3 | tdengine-2.taosd.default.sv... | 0 | 40 | ready | any | 2021-06-01 14:07:27.078 | |
+ 4 | tdengine-3.taosd.default.sv... | 1 | 40 | ready | any | 2021-06-01 14:07:48.362 | |
+Query OK, 4 row(s) in set (0.001293s)
+
+```
+
+## 集群缩容
+
+TDengine 的缩容并没有自动化,我们尝试将一个三节点集群缩容到两节点。
+
+首先,确认一个三节点 TDengine 集群正常工作,在 TDengine CLI 中查看 dnode 的状态:
+
+```bash
+taos> show dnodes
+ id | end_point | vnodes | cores | status | role | create_time | offline reason |
+======================================================================================================================================
+ 1 | tdengine-0.taosd.default.sv... | 1 | 40 | ready | any | 2021-06-01 16:27:24.852 | |
+ 2 | tdengine-1.taosd.default.sv... | 0 | 40 | ready | any | 2021-06-01 16:27:53.339 | |
+ 3 | tdengine-2.taosd.default.sv... | 0 | 40 | ready | any | 2021-06-01 16:28:49.787 | |
+Query OK, 3 row(s) in set (0.001101s)
+
+```
+
+想要安全的缩容,首先需要将节点从 dnode 列表中移除,也即从集群中移除:
+
+```bash
+kubectl exec -i -t tdengine-0 -- taos -s "drop dnode 'tdengine-2.taosd.default.svc.cluster.local:6030'"
+
+```
+
+通过 `show dondes` 命令确认移除成功后,移除相应的 POD:
+
+```bash
+kubectl scale statefulsets tdengine --replicas=2
+
+```
+
+最后一个 POD 会被删除,使用 `kubectl get pods -l app=tdengine` 查看集群状态:
+
+```
+NAME READY STATUS RESTARTS AGE
+tdengine-0 1/1 Running 0 3h40m
+tdengine-1 1/1 Running 0 3h40m
+
+```
+
+POD 删除后,需要手动删除 PVC,否则下次扩容时会继续使用以前的数据导致无法正常加入集群。
+
+```bash
+kubectl delete pvc taosdata-tdengine-2
+
+```
+
+此时的集群状态是安全的,需要时还可以再次进行扩容:
+
+```bash
+kubectl scale statefulsets tdengine --replicas=3
+
+
+```
+
+`show dnodes` 输出如下:
+
+```
+taos> show dnodes
+ id | end_point | vnodes | cores | status | role | create_time | offline reason |
+======================================================================================================================================
+ 1 | tdengine-0.taosd.default.sv... | 1 | 40 | ready | any | 2021-06-01 16:27:24.852 | |
+ 2 | tdengine-1.taosd.default.sv... | 0 | 40 | ready | any | 2021-06-01 16:27:53.339 | |
+ 4 | tdengine-2.taosd.default.sv... | 0 | 40 | ready | any | 2021-06-01 16:40:49.177 | |
+
+
+```
+
+## 删除集群
+
+完整移除 TDengine 集群,需要分别清理 statefulset、svc、configmap、pvc。
+
+```bash
+kubectl delete statefulset -l app=tdengine
+kubectl delete svc -l app=tdengine
+kubectl delete pvc -l app=tdengine
+kubectl delete configmap taoscfg
+
+```
+
+## 常见错误
+
+### 错误一
+
+扩容到四节点之后缩容到两节点,删除的 POD 会进入 offline 状态:
+
+```
+Welcome to the TDengine shell from Linux, Client Version:2.1.1.0
+Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.
+
+taos> show dnodes
+ id | end_point | vnodes | cores | status | role | create_time | offline reason |
+======================================================================================================================================
+ 1 | tdengine-0.taosd.default.sv... | 0 | 40 | ready | any | 2021-06-01 11:58:12.915 | |
+ 2 | tdengine-1.taosd.default.sv... | 0 | 40 | ready | any | 2021-06-01 11:58:33.127 | |
+ 3 | tdengine-2.taosd.default.sv... | 0 | 40 | offline | any | 2021-06-01 14:07:27.078 | status msg timeout |
+ 4 | tdengine-3.taosd.default.sv... | 1 | 40 | offline | any | 2021-06-01 14:07:48.362 | status msg timeout |
+Query OK, 4 row(s) in set (0.001236s)
+
+
+```
+
+但 `drop dnode` 的行为按不会按照预期进行,且下次集群重启后,所有的 dnode 节点将无法启动 dropping 状态无法退出。
+
+### 错误二
+
+TDengine 集群会持有 replica 参数,如果缩容后的节点数小于这个值,集群将无法使用:
+
+创建一个库使用 replica 参数为 2,插入部分数据:
+
+```bash
+kubectl exec -i -t tdengine-0 -- \
+ taos -s \
+ "create database if not exists test replica 2;
+ use test;
+ create table if not exists t1(ts timestamp, n int);
+ insert into t1 values(now, 1)(now+1s, 2);"
+
+
+```
+
+缩容到单节点:
+
+```bash
+kubectl scale statefulsets tdengine --replicas=1
+
+```
+
+在 taos shell 中的所有数据库操作将无法成功。
+
+```
+taos> show dnodes;
+ id | end_point | vnodes | cores | status | role | create_time | offline reason |
+======================================================================================================================================
+ 1 | tdengine-0.taosd.default.sv... | 2 | 40 | ready | any | 2021-06-01 15:55:52.562 | |
+ 2 | tdengine-1.taosd.default.sv... | 1 | 40 | offline | any | 2021-06-01 15:56:07.212 | status msg timeout |
+Query OK, 2 row(s) in set (0.000845s)
+
+taos> show dnodes;
+ id | end_point | vnodes | cores | status | role | create_time | offline reason |
+======================================================================================================================================
+ 1 | tdengine-0.taosd.default.sv... | 2 | 40 | ready | any | 2021-06-01 15:55:52.562 | |
+ 2 | tdengine-1.taosd.default.sv... | 1 | 40 | offline | any | 2021-06-01 15:56:07.212 | status msg timeout |
+Query OK, 2 row(s) in set (0.000837s)
+
+taos> use test;
+Database changed.
+
+taos> insert into t1 values(now, 3);
+
+DB error: Unable to resolve FQDN (0.013874s)
+
+```
diff --git a/docs/zh/10-deployment/05-helm.md b/docs/zh/10-deployment/05-helm.md
new file mode 100644
index 0000000000..0bbd986b4b
--- /dev/null
+++ b/docs/zh/10-deployment/05-helm.md
@@ -0,0 +1,434 @@
+---
+sidebar_label: Helm
+title: 使用 Helm 部署 TDengine 集群
+---
+
+Helm 是 Kubernetes 的包管理器,上一节使用 Kubernets 部署 TDengine 集群的操作已经足够简单,但 Helm 依然可以提供更强大的能力。
+
+## 安装 Helm
+
+```bash
+curl -fsSL -o get_helm.sh \
+ https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3
+chmod +x get_helm.sh
+./get_helm.sh
+
+```
+
+Helm 会使用 kubectl 和 kubeconfig 的配置来操作 Kubernetes,可以参考 Rancher 安装 Kubernetes 的配置来进行设置。
+
+## 安装 TDengine Chart
+
+TDengine Chart 尚未发布到 Helm 仓库,当前可以从 GitHub 直接下载:
+
+```bash
+wget https://github.com/taosdata/TDengine-Operator/raw/main/helm/tdengine-0.3.0.tgz
+
+```
+
+获取当前 Kubernetes 的存储类:
+
+```bash
+kubectl get storageclass
+
+```
+
+在 minikube 默认为 standard.
+
+之后,使用 helm 命令安装:
+
+```bash
+helm install tdengine tdengine-0.3.0.tgz \
+ --set storage.className=
+
+```
+
+在 minikube 环境下,可以设置一个较小的容量避免超出磁盘可用空间:
+
+```bash
+helm install tdengine tdengine-0.3.0.tgz \
+ --set storage.className=standard \
+ --set storage.dataSize=2Gi \
+ --set storage.logSize=10Mi
+
+```
+
+部署成功后,TDengine Chart 将会输出操作 TDengine 的说明:
+
+```bash
+export POD_NAME=$(kubectl get pods --namespace default \
+ -l "app.kubernetes.io/name=tdengine,app.kubernetes.io/instance=tdengine" \
+ -o jsonpath="{.items[0].metadata.name}")
+kubectl --namespace default exec $POD_NAME -- taos -s "show dnodes; show mnodes"
+kubectl --namespace default exec -it $POD_NAME -- taos
+
+```
+
+可以创建一个表进行测试:
+
+```bash
+kubectl --namespace default exec $POD_NAME -- \
+ taos -s "create database test;
+ use test;
+ create table t1 (ts timestamp, n int);
+ insert into t1 values(now, 1)(now + 1s, 2);
+ select * from t1;"
+
+```
+
+## 配置 Values
+
+TDengine 支持 `values.yaml` 自定义。
+
+通过 helm show values 可以获取 TDengine Chart 支持的全部 values 列表:
+
+```bash
+helm show values tdengine-0.3.0.tgz
+
+```
+
+你可以将结果保存为 values.yaml,之后可以修改其中的各项参数,如 replica 数量,存储类名称,容量大小,TDengine 配置等,然后使用如下命令安装 TDengine 集群:
+
+```bash
+helm install tdengine tdengine-0.3.0.tgz -f values.yaml
+
+```
+
+全部参数如下:
+
+```yaml
+# Default values for tdengine.
+# This is a YAML-formatted file.
+# Declare variables to be passed into helm templates.
+
+replicaCount: 1
+
+image:
+ prefix: tdengine/tdengine
+ #pullPolicy: Always
+ # Overrides the image tag whose default is the chart appVersion.
+ #tag: "2.4.0.5"
+
+service:
+ # ClusterIP is the default service type, use NodeIP only if you know what you are doing.
+ type: ClusterIP
+ ports:
+ # TCP range required
+ tcp:
+ [
+ 6030,
+ 6031,
+ 6032,
+ 6033,
+ 6034,
+ 6035,
+ 6036,
+ 6037,
+ 6038,
+ 6039,
+ 6040,
+ 6041,
+ 6042,
+ 6043,
+ 6044,
+ 6045,
+ 6060,
+ ]
+ # UDP range 6030-6039
+ udp: [6030, 6031, 6032, 6033, 6034, 6035, 6036, 6037, 6038, 6039]
+
+arbitrator: true
+
+# Set timezone here, not in taoscfg
+timezone: "Asia/Shanghai"
+
+resources:
+ # We usually recommend not to specify default resources and to leave this as a conscious
+ # choice for the user. This also increases chances charts run on environments with little
+ # resources, such as Minikube. If you do want to specify resources, uncomment the following
+ # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+ # limits:
+ # cpu: 100m
+ # memory: 128Mi
+ # requests:
+ # cpu: 100m
+ # memory: 128Mi
+
+storage:
+ # Set storageClassName for pvc. K8s use default storage class if not set.
+ #
+ className: ""
+ dataSize: "100Gi"
+ logSize: "10Gi"
+
+nodeSelectors:
+ taosd:
+ # node selectors
+
+clusterDomainSuffix: ""
+# Config settings in taos.cfg file.
+#
+# The helm/k8s support will use environment variables for taos.cfg,
+# converting an upper-snake-cased variable like `TAOS_DEBUG_FLAG`,
+# to a camelCase taos config variable `debugFlag`.
+#
+# See the variable list at https://www.taosdata.com/cn/documentation/administrator .
+#
+# Note:
+# 1. firstEp/secondEp: should not be setted here, it's auto generated at scale-up.
+# 2. serverPort: should not be setted, we'll use the default 6030 in many places.
+# 3. fqdn: will be auto generated in kubenetes, user should not care about it.
+# 4. role: currently role is not supported - every node is able to be mnode and vnode.
+#
+# Btw, keep quotes "" around the value like below, even the value will be number or not.
+taoscfg:
+ # number of replications, for cluster only
+ TAOS_REPLICA: "1"
+
+ # number of management nodes in the system
+ TAOS_NUM_OF_MNODES: "1"
+
+ # number of days per DB file
+ # TAOS_DAYS: "10"
+
+ # number of days to keep DB file, default is 10 years.
+ #TAOS_KEEP: "3650"
+
+ # cache block size (Mbyte)
+ #TAOS_CACHE: "16"
+
+ # number of cache blocks per vnode
+ #TAOS_BLOCKS: "6"
+
+ # minimum rows of records in file block
+ #TAOS_MIN_ROWS: "100"
+
+ # maximum rows of records in file block
+ #TAOS_MAX_ROWS: "4096"
+
+ #
+ # TAOS_NUM_OF_THREADS_PER_CORE: number of threads per CPU core
+ #TAOS_NUM_OF_THREADS_PER_CORE: "1.0"
+
+ #
+ # TAOS_NUM_OF_COMMIT_THREADS: number of threads to commit cache data
+ #TAOS_NUM_OF_COMMIT_THREADS: "4"
+
+ #
+ # TAOS_RATIO_OF_QUERY_CORES:
+ # the proportion of total CPU cores available for query processing
+ # 2.0: the query threads will be set to double of the CPU cores.
+ # 1.0: all CPU cores are available for query processing [default].
+ # 0.5: only half of the CPU cores are available for query.
+ # 0.0: only one core available.
+ #TAOS_RATIO_OF_QUERY_CORES: "1.0"
+
+ #
+ # TAOS_KEEP_COLUMN_NAME:
+ # the last_row/first/last aggregator will not change the original column name in the result fields
+ #TAOS_KEEP_COLUMN_NAME: "0"
+
+ # enable/disable backuping vnode directory when removing vnode
+ #TAOS_VNODE_BAK: "1"
+
+ # enable/disable installation / usage report
+ #TAOS_TELEMETRY_REPORTING: "1"
+
+ # enable/disable load balancing
+ #TAOS_BALANCE: "1"
+
+ # max timer control blocks
+ #TAOS_MAX_TMR_CTRL: "512"
+
+ # time interval of system monitor, seconds
+ #TAOS_MONITOR_INTERVAL: "30"
+
+ # number of seconds allowed for a dnode to be offline, for cluster only
+ #TAOS_OFFLINE_THRESHOLD: "8640000"
+
+ # RPC re-try timer, millisecond
+ #TAOS_RPC_TIMER: "1000"
+
+ # RPC maximum time for ack, seconds.
+ #TAOS_RPC_MAX_TIME: "600"
+
+ # time interval of dnode status reporting to mnode, seconds, for cluster only
+ #TAOS_STATUS_INTERVAL: "1"
+
+ # time interval of heart beat from shell to dnode, seconds
+ #TAOS_SHELL_ACTIVITY_TIMER: "3"
+
+ # minimum sliding window time, milli-second
+ #TAOS_MIN_SLIDING_TIME: "10"
+
+ # minimum time window, milli-second
+ #TAOS_MIN_INTERVAL_TIME: "10"
+
+ # maximum delay before launching a stream computation, milli-second
+ #TAOS_MAX_STREAM_COMP_DELAY: "20000"
+
+ # maximum delay before launching a stream computation for the first time, milli-second
+ #TAOS_MAX_FIRST_STREAM_COMP_DELAY: "10000"
+
+ # retry delay when a stream computation fails, milli-second
+ #TAOS_RETRY_STREAM_COMP_DELAY: "10"
+
+ # the delayed time for launching a stream computation, from 0.1(default, 10% of whole computing time window) to 0.9
+ #TAOS_STREAM_COMP_DELAY_RATIO: "0.1"
+
+ # max number of vgroups per db, 0 means configured automatically
+ #TAOS_MAX_VGROUPS_PER_DB: "0"
+
+ # max number of tables per vnode
+ #TAOS_MAX_TABLES_PER_VNODE: "1000000"
+
+ # the number of acknowledgments required for successful data writing
+ #TAOS_QUORUM: "1"
+
+ # enable/disable compression
+ #TAOS_COMP: "2"
+
+ # write ahead log (WAL) level, 0: no wal; 1: write wal, but no fysnc; 2: write wal, and call fsync
+ #TAOS_WAL_LEVEL: "1"
+
+ # if walLevel is set to 2, the cycle of fsync being executed, if set to 0, fsync is called right away
+ #TAOS_FSYNC: "3000"
+
+ # the compressed rpc message, option:
+ # -1 (no compression)
+ # 0 (all message compressed),
+ # > 0 (rpc message body which larger than this value will be compressed)
+ #TAOS_COMPRESS_MSG_SIZE: "-1"
+
+ # max length of an SQL
+ #TAOS_MAX_SQL_LENGTH: "1048576"
+
+ # the maximum number of records allowed for super table time sorting
+ #TAOS_MAX_NUM_OF_ORDERED_RES: "100000"
+
+ # max number of connections allowed in dnode
+ #TAOS_MAX_SHELL_CONNS: "5000"
+
+ # max number of connections allowed in client
+ #TAOS_MAX_CONNECTIONS: "5000"
+
+ # stop writing logs when the disk size of the log folder is less than this value
+ #TAOS_MINIMAL_LOG_DIR_G_B: "0.1"
+
+ # stop writing temporary files when the disk size of the tmp folder is less than this value
+ #TAOS_MINIMAL_TMP_DIR_G_B: "0.1"
+
+ # if disk free space is less than this value, taosd service exit directly within startup process
+ #TAOS_MINIMAL_DATA_DIR_G_B: "0.1"
+
+ # One mnode is equal to the number of vnode consumed
+ #TAOS_MNODE_EQUAL_VNODE_NUM: "4"
+
+ # enbale/disable http service
+ #TAOS_HTTP: "1"
+
+ # enable/disable system monitor
+ #TAOS_MONITOR: "1"
+
+ # enable/disable recording the SQL statements via restful interface
+ #TAOS_HTTP_ENABLE_RECORD_SQL: "0"
+
+ # number of threads used to process http requests
+ #TAOS_HTTP_MAX_THREADS: "2"
+
+ # maximum number of rows returned by the restful interface
+ #TAOS_RESTFUL_ROW_LIMIT: "10240"
+
+ # The following parameter is used to limit the maximum number of lines in log files.
+ # max number of lines per log filters
+ # numOfLogLines 10000000
+
+ # enable/disable async log
+ #TAOS_ASYNC_LOG: "0"
+
+ #
+ # time of keeping log files, days
+ #TAOS_LOG_KEEP_DAYS: "0"
+
+ # The following parameters are used for debug purpose only.
+ # debugFlag 8 bits mask: FILE-SCREEN-UNUSED-HeartBeat-DUMP-TRACE_WARN-ERROR
+ # 131: output warning and error
+ # 135: output debug, warning and error
+ # 143: output trace, debug, warning and error to log
+ # 199: output debug, warning and error to both screen and file
+ # 207: output trace, debug, warning and error to both screen and file
+ #
+ # debug flag for all log type, take effect when non-zero value\
+ #TAOS_DEBUG_FLAG: "143"
+
+ # enable/disable recording the SQL in taos client
+ #TAOS_ENABLE_RECORD_SQL: "0"
+
+ # generate core file when service crash
+ #TAOS_ENABLE_CORE_FILE: "1"
+
+ # maximum display width of binary and nchar fields in the shell. The parts exceeding this limit will be hidden
+ #TAOS_MAX_BINARY_DISPLAY_WIDTH: "30"
+
+ # enable/disable stream (continuous query)
+ #TAOS_STREAM: "1"
+
+ # in retrieve blocking model, only in 50% query threads will be used in query processing in dnode
+ #TAOS_RETRIEVE_BLOCKING_MODEL: "0"
+
+ # the maximum allowed query buffer size in MB during query processing for each data node
+ # -1 no limit (default)
+ # 0 no query allowed, queries are disabled
+ #TAOS_QUERY_BUFFER_SIZE: "-1"
+```
+
+## 扩容
+
+关于扩容可参考上一节的说明,有一些额外的操作需要从 helm 的部署中获取。
+
+首先,从部署中获取 StatefulSet 的名称。
+
+```bash
+export STS_NAME=$(kubectl get statefulset \
+ -l "app.kubernetes.io/name=tdengine" \
+ -o jsonpath="{.items[0].metadata.name}")
+
+```
+
+扩容操作极其简单,增加 replica 即可。以下命令将 TDengine 扩充到三节点:
+
+```bash
+kubectl scale --replicas 3 statefulset/$STS_NAME
+
+```
+
+使用命令 `show dnodes` 和 `show mnodes` 检查是否扩容成功。
+
+## 缩容
+
+:::warning
+缩容操作并没有完整测试,可能造成数据风险,请谨慎使用。
+
+:::
+
+获取需要缩容的 dnode 列表,并手动 Drop。
+
+```bash
+kubectl --namespace default exec $POD_NAME -- \
+ cat /var/lib/taos/dnode/dnodeEps.json \
+ | jq '.dnodeInfos[1:] |map(.dnodeFqdn + ":" + (.dnodePort|tostring)) | .[]' -r
+kubectl --namespace default exec $POD_NAME -- taos -s "show dnodes"
+kubectl --namespace default exec $POD_NAME -- taos -s 'drop dnode ""'
+
+```
+
+## 删除集群
+
+Helm 管理下,清理操作也变得简单:
+
+```bash
+helm uninstall tdengine
+
+```
+
+但 Helm 也不会自动移除 PVC,需要手动获取 PVC 然后删除掉。
diff --git a/docs/zh/10-deployment/_category_.yml b/docs/zh/10-deployment/_category_.yml
new file mode 100644
index 0000000000..38363bd571
--- /dev/null
+++ b/docs/zh/10-deployment/_category_.yml
@@ -0,0 +1 @@
+label: 部署集群
diff --git a/docs/zh/10-cluster/index.md b/docs/zh/10-deployment/index.md
similarity index 82%
rename from docs/zh/10-cluster/index.md
rename to docs/zh/10-deployment/index.md
index ef2a7253c9..96ac7b176d 100644
--- a/docs/zh/10-cluster/index.md
+++ b/docs/zh/10-deployment/index.md
@@ -1,10 +1,10 @@
---
-title: 集群管理
+title: 部署集群
---
TDengine 支持集群,提供水平扩展的能力。如果需要获得更高的处理能力,只需要多增加节点即可。TDengine 采用虚拟节点技术,将一个节点虚拟化为多个虚拟节点,以实现负载均衡。同时,TDengine可以将多个节点上的虚拟节点组成虚拟节点组,通过多副本机制,以保证供系统的高可用。TDengine的集群功能完全开源。
-本章节主要介绍集群的部署、维护,以及如何实现高可用和负载均衡。
+本章节主要介绍如何在主机上人工部署集群,以及如何使用 Kubernetes 和 Helm部署集群。
```mdx-code-block
import DocCardList from '@theme/DocCardList';
diff --git a/docs/zh/13-operation/01-pkg-install.md b/docs/zh/13-operation/01-pkg-install.md
index 92b04a42ec..36852eba71 100644
--- a/docs/zh/13-operation/01-pkg-install.md
+++ b/docs/zh/13-operation/01-pkg-install.md
@@ -6,199 +6,11 @@ description: 安装、卸载、启动、停止和升级
import Tabs from "@theme/Tabs";
import TabItem from "@theme/TabItem";
-TDengine 开源版本提供 deb 和 rpm 格式安装包,用户可以根据自己的运行环境选择合适的安装包。其中 deb 支持 Debian/Ubuntu 及衍生系统,rpm 支持 CentOS/RHEL/SUSE 及衍生系统。同时我们也为企业用户提供 tar.gz 格式安装包。
+本节将介绍一些关于安装和卸载更深层次的内容,以及升级的注意事项。
-## 安装
+## 安装和卸载
-
-
-
-1、从官网下载获得 deb 安装包,例如 TDengine-server-2.4.0.7-Linux-x64.deb;
-2、进入到 TDengine-server-2.4.0.7-Linux-x64.deb 安装包所在目录,执行如下的安装命令:
-
-```
-$ sudo dpkg -i TDengine-server-2.4.0.7-Linux-x64.deb
-(Reading database ... 137504 files and directories currently installed.)
-Preparing to unpack TDengine-server-2.4.0.7-Linux-x64.deb ...
-TDengine is removed successfully!
-Unpacking tdengine (2.4.0.7) over (2.4.0.7) ...
-Setting up tdengine (2.4.0.7) ...
-Start to install TDengine...
-
-System hostname is: ubuntu-1804
-
-Enter FQDN:port (like h1.taosdata.com:6030) of an existing TDengine cluster node to join
-OR leave it blank to build one:
-
-Enter your email address for priority support or enter empty to skip:
-Created symlink /etc/systemd/system/multi-user.target.wants/taosd.service → /etc/systemd/system/taosd.service.
-
-To configure TDengine : edit /etc/taos/taos.cfg
-To start TDengine : sudo systemctl start taosd
-To access TDengine : taos -h ubuntu-1804 to login into TDengine server
-
-
-TDengine is installed successfully!
-```
-
-
-
-
-
-1、从官网下载获得 rpm 安装包,例如 TDengine-server-2.4.0.7-Linux-x64.rpm;
-2、进入到 TDengine-server-2.4.0.7-Linux-x64.rpm 安装包所在目录,执行如下的安装命令:
-
-```
-$ sudo rpm -ivh TDengine-server-2.4.0.7-Linux-x64.rpm
-Preparing... ################################# [100%]
-Updating / installing...
- 1:tdengine-2.4.0.7-3 ################################# [100%]
-Start to install TDengine...
-
-System hostname is: centos7
-
-Enter FQDN:port (like h1.taosdata.com:6030) of an existing TDengine cluster node to join
-OR leave it blank to build one:
-
-Enter your email address for priority support or enter empty to skip:
-
-Created symlink from /etc/systemd/system/multi-user.target.wants/taosd.service to /etc/systemd/system/taosd.service.
-
-To configure TDengine : edit /etc/taos/taos.cfg
-To start TDengine : sudo systemctl start taosd
-To access TDengine : taos -h centos7 to login into TDengine server
-
-
-TDengine is installed successfully!
-```
-
-
-
-
-
-1、从官网下载获得 tar.gz 安装包,例如 TDengine-server-2.4.0.7-Linux-x64.tar.gz;
-2、进入到 TDengine-server-2.4.0.7-Linux-x64.tar.gz 安装包所在目录,先解压文件后,进入子目录,执行其中的 install.sh 安装脚本:
-
-```
-$ tar xvzf TDengine-enterprise-server-2.4.0.7-Linux-x64.tar.gz
-TDengine-enterprise-server-2.4.0.7/
-TDengine-enterprise-server-2.4.0.7/driver/
-TDengine-enterprise-server-2.4.0.7/driver/vercomp.txt
-TDengine-enterprise-server-2.4.0.7/driver/libtaos.so.2.4.0.7
-TDengine-enterprise-server-2.4.0.7/install.sh
-TDengine-enterprise-server-2.4.0.7/examples/
-...
-
-$ ll
-total 43816
-drwxrwxr-x 3 ubuntu ubuntu 4096 Feb 22 09:31 ./
-drwxr-xr-x 20 ubuntu ubuntu 4096 Feb 22 09:30 ../
-drwxrwxr-x 4 ubuntu ubuntu 4096 Feb 22 09:30 TDengine-enterprise-server-2.4.0.7/
--rw-rw-r-- 1 ubuntu ubuntu 44852544 Feb 22 09:31 TDengine-enterprise-server-2.4.0.7-Linux-x64.tar.gz
-
-$ cd TDengine-enterprise-server-2.4.0.7/
-
- $ ll
-total 40784
-drwxrwxr-x 4 ubuntu ubuntu 4096 Feb 22 09:30 ./
-drwxrwxr-x 3 ubuntu ubuntu 4096 Feb 22 09:31 ../
-drwxrwxr-x 2 ubuntu ubuntu 4096 Feb 22 09:30 driver/
-drwxrwxr-x 10 ubuntu ubuntu 4096 Feb 22 09:30 examples/
--rwxrwxr-x 1 ubuntu ubuntu 33294 Feb 22 09:30 install.sh*
--rw-rw-r-- 1 ubuntu ubuntu 41704288 Feb 22 09:30 taos.tar.gz
-
-$ sudo ./install.sh
-
-Start to update TDengine...
-Created symlink /etc/systemd/system/multi-user.target.wants/taosd.service → /etc/systemd/system/taosd.service.
-Nginx for TDengine is updated successfully!
-
-To configure TDengine : edit /etc/taos/taos.cfg
-To configure Taos Adapter (if has) : edit /etc/taos/taosadapter.toml
-To start TDengine : sudo systemctl start taosd
-To access TDengine : use taos -h ubuntu-1804 in shell OR from http://127.0.0.1:6060
-
-TDengine is updated successfully!
-Install taoskeeper as a standalone service
-taoskeeper is installed, enable it by `systemctl enable taoskeeper`
-```
-
-:::info
-install.sh 安装脚本在执行过程中,会通过命令行交互界面询问一些配置信息。如果希望采取无交互安装方式,那么可以用 -e no 参数来执行 install.sh 脚本。运行 `./install.sh -h` 指令可以查看所有参数的详细说明信息。
-
-:::
-
-
-
-
-:::note
-当安装第一个节点时,出现 Enter FQDN:提示的时候,不需要输入任何内容。只有当安装第二个或以后更多的节点时,才需要输入已有集群中任何一个可用节点的 FQDN,支持该新节点加入集群。当然也可以不输入,而是在新节点启动前,配置到新节点的配置文件中。
-
-:::
-
-## 卸载
-
-
-
-
-卸载命令如下:
-
-```
-$ sudo dpkg -r tdengine
-(Reading database ... 137504 files and directories currently installed.)
-Removing tdengine (2.4.0.7) ...
-TDengine is removed successfully!
-
-```
-
-
-
-
-
-卸载命令如下:
-
-```
-$ sudo rpm -e tdengine
-TDengine is removed successfully!
-```
-
-
-
-
-
-卸载命令如下:
-
-```
-$ rmtaos
-Nginx for TDengine is running, stopping it...
-TDengine is removed successfully!
-
-taosKeeper is removed successfully!
-```
-
-
-
-
-:::info
-- TDengine 提供了多种安装包,但最好不要在一个系统上同时使用 tar.gz 安装包和 deb 或 rpm 安装包。否则会相互影响,导致在使用时出现问题。
-
-- 对于 deb 包安装后,如果安装目录被手工误删了部分,出现卸载、或重新安装不能成功。此时,需要清除 TDengine 包的安装信息,执行如下命令:
-
- ```
- $ sudo rm -f /var/lib/dpkg/info/tdengine*
- ```
-
-然后再重新进行安装就可以了。
-
-- 对于 rpm 包安装后,如果安装目录被手工误删了部分,出现卸载、或重新安装不能成功。此时,需要清除 TDengine 包的安装信息,执行如下命令:
-
- ```
- $ sudo rpm -e --noscripts tdengine
- ```
-
-然后再重新进行安装就可以了。
-
-:::
+关于安装和卸载,请参考 [安装和卸载](/get-started/package)
## 安装目录说明
@@ -234,34 +46,6 @@ lrwxrwxrwx 1 root root 13 Feb 22 09:34 log -> /var/log/taos/
如果是更新安装,当缺省配置文件( /etc/taos/taos.cfg )存在时,仍然使用已有的配置文件,安装包中携带的配置文件修改为 taos.cfg.orig 保存在 /usr/local/taos/cfg/ 目录,可以作为设置配置参数的参考样例;如果不存在配置文件,就使用安装包中自带的配置文件。
-## 启动和停止
-
-TDengine 使用 Linux 系统的 systemd/systemctl/service 来管理系统的启动和、停止、重启操作。TDengine 的服务进程是 taosd,默认情况下 TDengine 在系统启动后将自动启动。DBA 可以通过 systemd/systemctl/service 手动操作停止、启动、重新启动服务。
-
-以 systemctl 为例,命令如下:
-
-- 启动服务进程:`systemctl start taosd`
-
-- 停止服务进程:`systemctl stop taosd`
-
-- 重启服务进程:`systemctl restart taosd`
-
-- 查看服务状态:`systemctl status taosd`
-
-注意:TDengine 在 2.4 版本之后包含一个独立组件 taosAdapter 需要使用 systemctl 命令管理 taosAdapter 服务的启动和停止。
-
-如果服务进程处于活动状态,则 status 指令会显示如下的相关信息:
-
- ```
- Active: active (running)
- ```
-
-如果后台服务进程处于停止状态,则 status 指令会显示如下的相关信息:
-
- ```
- Active: inactive (dead)
- ```
-
## 升级
升级分为两个层面:升级安装包 和 升级运行中的实例。
diff --git a/docs/zh/10-cluster/03-high-availability.md b/docs/zh/21-tdinternal/03-high-availability.md
similarity index 100%
rename from docs/zh/10-cluster/03-high-availability.md
rename to docs/zh/21-tdinternal/03-high-availability.md
diff --git a/docs/zh/10-cluster/04-load-balance.md b/docs/zh/21-tdinternal/05-load-balance.md
similarity index 100%
rename from docs/zh/10-cluster/04-load-balance.md
rename to docs/zh/21-tdinternal/05-load-balance.md
diff --git a/include/common/tmsg.h b/include/common/tmsg.h
index 2f4c80f025..3e27bd9268 100644
--- a/include/common/tmsg.h
+++ b/include/common/tmsg.h
@@ -2803,6 +2803,7 @@ typedef struct {
int32_t tSerializeSTableIndexRsp(void* buf, int32_t bufLen, const STableIndexRsp* pRsp);
int32_t tDeserializeSTableIndexRsp(void* buf, int32_t bufLen, STableIndexRsp* pRsp);
+void tFreeSerializeSTableIndexRsp(STableIndexRsp* pRsp);
void tFreeSTableIndexInfo(void* pInfo);
diff --git a/include/common/tname.h b/include/common/tname.h
index 77965947ad..89c7764404 100644
--- a/include/common/tname.h
+++ b/include/common/tname.h
@@ -50,6 +50,7 @@ bool tNameIsValid(const SName* name);
const char* tNameGetTableName(const SName* name);
int32_t tNameGetDbName(const SName* name, char* dst);
+const char* tNameGetDbNameP(const SName* name);
int32_t tNameGetFullDbName(const SName* name, char* dst);
diff --git a/include/common/ttime.h b/include/common/ttime.h
index 2f4129f979..8f4f4f15d8 100644
--- a/include/common/ttime.h
+++ b/include/common/ttime.h
@@ -73,7 +73,6 @@ static FORCE_INLINE int64_t taosGetTimestampToday(int32_t precision) {
}
int64_t taosTimeAdd(int64_t t, int64_t duration, char unit, int32_t precision);
-int64_t taosTimeSub(int64_t t, int64_t duration, char unit, int32_t precision);
int64_t taosTimeTruncate(int64_t t, const SInterval* pInterval, int32_t precision);
int32_t taosTimeCountInterval(int64_t skey, int64_t ekey, int64_t interval, char unit, int32_t precision);
diff --git a/include/libs/qcom/query.h b/include/libs/qcom/query.h
index 58739b4af7..cc040594b1 100644
--- a/include/libs/qcom/query.h
+++ b/include/libs/qcom/query.h
@@ -193,7 +193,7 @@ int32_t taosAsyncExec(__async_exec_fn_t execFn, void* execParam, int32_t* code);
void destroySendMsgInfo(SMsgSendInfo* pMsgBody);
-int32_t asyncSendMsgToServerExt(void* pTransporter, SEpSet* epSet, int64_t* pTransporterId, const SMsgSendInfo* pInfo,
+int32_t asyncSendMsgToServerExt(void* pTransporter, SEpSet* epSet, int64_t* pTransporterId, SMsgSendInfo* pInfo,
bool persistHandle, void* ctx);
/**
@@ -205,7 +205,7 @@ int32_t asyncSendMsgToServerExt(void* pTransporter, SEpSet* epSet, int64_t* pTra
* @param pInfo
* @return
*/
-int32_t asyncSendMsgToServer(void* pTransporter, SEpSet* epSet, int64_t* pTransporterId, const SMsgSendInfo* pInfo);
+int32_t asyncSendMsgToServer(void* pTransporter, SEpSet* epSet, int64_t* pTransporterId, SMsgSendInfo* pInfo);
int32_t queryBuildUseDbOutput(SUseDbOutput* pOut, SUseDbRsp* usedbRsp);
@@ -260,6 +260,8 @@ extern int32_t (*queryProcessMsgRsp[TDMT_MAX])(void* output, char* msg, int32_t
#define REQUEST_TOTAL_EXEC_TIMES 2
+#define IS_SYS_DBNAME(_dbname) (((*(_dbname) == 'i') && (0 == strcmp(_dbname, TSDB_INFORMATION_SCHEMA_DB))) || ((*(_dbname) == 'p') && (0 == strcmp(_dbname, TSDB_PERFORMANCE_SCHEMA_DB))))
+
#define qFatal(...) \
do { \
if (qDebugFlag & DEBUG_FATAL) { \
diff --git a/include/libs/stream/tstreamUpdate.h b/include/libs/stream/tstreamUpdate.h
index 21a1515d8f..6e4a8d62d0 100644
--- a/include/libs/stream/tstreamUpdate.h
+++ b/include/libs/stream/tstreamUpdate.h
@@ -33,11 +33,12 @@ typedef struct SUpdateInfo {
int64_t watermark;
TSKEY minTS;
SScalableBf* pCloseWinSBF;
+ SHashObj* pMap;
} SUpdateInfo;
SUpdateInfo *updateInfoInitP(SInterval* pInterval, int64_t watermark);
SUpdateInfo *updateInfoInit(int64_t interval, int32_t precision, int64_t watermark);
-bool updateInfoIsUpdated(SUpdateInfo *pInfo, tb_uid_t tableId, TSKEY ts);
+bool updateInfoIsUpdated(SUpdateInfo *pInfo, uint64_t tableId, TSKEY ts);
void updateInfoDestroy(SUpdateInfo *pInfo);
void updateInfoAddCloseWindowSBF(SUpdateInfo *pInfo);
void updateInfoDestoryColseWinSBF(SUpdateInfo *pInfo);
diff --git a/include/libs/transport/trpc.h b/include/libs/transport/trpc.h
index 2ae1f7b854..50f9959177 100644
--- a/include/libs/transport/trpc.h
+++ b/include/libs/transport/trpc.h
@@ -124,18 +124,16 @@ void *rpcReallocCont(void *ptr, int32_t contLen);
// Because taosd supports multi-process mode
// These functions should not be used on the server side
// Please use tmsg functions, which are defined in tmsgcb.h
-void rpcSendRequest(void *thandle, const SEpSet *pEpSet, SRpcMsg *pMsg, int64_t *rid);
-void rpcSendResponse(const SRpcMsg *pMsg);
-void rpcRegisterBrokenLinkArg(SRpcMsg *msg);
-void rpcReleaseHandle(void *handle, int8_t type); // just release conn to rpc instance, no close sock
+int rpcSendRequest(void *thandle, const SEpSet *pEpSet, SRpcMsg *pMsg, int64_t *rid);
+int rpcSendResponse(const SRpcMsg *pMsg);
+int rpcRegisterBrokenLinkArg(SRpcMsg *msg);
+int rpcReleaseHandle(void *handle, int8_t type); // just release conn to rpc instance, no close sock
// These functions will not be called in the child process
-void rpcSendRedirectRsp(void *pConn, const SEpSet *pEpSet);
-void rpcSendRequestWithCtx(void *thandle, const SEpSet *pEpSet, SRpcMsg *pMsg, int64_t *rid, SRpcCtx *ctx);
-int32_t rpcGetConnInfo(void *thandle, SRpcConnInfo *pInfo);
-void rpcSendRecv(void *shandle, SEpSet *pEpSet, SRpcMsg *pReq, SRpcMsg *pRsp);
-void rpcSetDefaultAddr(void *thandle, const char *ip, const char *fqdn);
-void* rpcAllocHandle();
+int rpcSendRequestWithCtx(void *thandle, const SEpSet *pEpSet, SRpcMsg *pMsg, int64_t *rid, SRpcCtx *ctx);
+int rpcSendRecv(void *shandle, SEpSet *pEpSet, SRpcMsg *pReq, SRpcMsg *pRsp);
+int rpcSetDefaultAddr(void *thandle, const char *ip, const char *fqdn);
+void *rpcAllocHandle();
#ifdef __cplusplus
}
diff --git a/include/libs/wal/wal.h b/include/libs/wal/wal.h
index 7e2d09dd63..ad89e51a24 100644
--- a/include/libs/wal/wal.h
+++ b/include/libs/wal/wal.h
@@ -33,16 +33,16 @@ extern "C" {
#define wTrace(...) { if (wDebugFlag & DEBUG_TRACE) { taosPrintLog("WAL ", DEBUG_TRACE, wDebugFlag, __VA_ARGS__); }}
// clang-format on
-#define WAL_PROTO_VER 0
-#define WAL_NOSUFFIX_LEN 20
-#define WAL_SUFFIX_AT (WAL_NOSUFFIX_LEN + 1)
-#define WAL_LOG_SUFFIX "log"
-#define WAL_INDEX_SUFFIX "idx"
-#define WAL_REFRESH_MS 1000
-#define WAL_MAX_SIZE (TSDB_MAX_WAL_SIZE + sizeof(SWalCkHead))
-#define WAL_PATH_LEN (TSDB_FILENAME_LEN + 12)
-#define WAL_FILE_LEN (WAL_PATH_LEN + 32)
-#define WAL_MAGIC 0xFAFBFCFDULL
+#define WAL_PROTO_VER 0
+#define WAL_NOSUFFIX_LEN 20
+#define WAL_SUFFIX_AT (WAL_NOSUFFIX_LEN + 1)
+#define WAL_LOG_SUFFIX "log"
+#define WAL_INDEX_SUFFIX "idx"
+#define WAL_REFRESH_MS 1000
+#define WAL_PATH_LEN (TSDB_FILENAME_LEN + 12)
+#define WAL_FILE_LEN (WAL_PATH_LEN + 32)
+#define WAL_MAGIC 0xFAFBFCFDULL
+#define WAL_SCAN_BUF_SIZE (1024 * 1024 * 3)
typedef enum {
TAOS_WAL_WRITE = 1,
@@ -64,6 +64,7 @@ typedef struct {
int64_t verInSnapshotting;
int64_t snapshotVer;
int64_t commitVer;
+ int64_t appliedVer;
int64_t lastVer;
} SWalVer;
@@ -172,6 +173,9 @@ int32_t walRollback(SWal *, int64_t ver);
int32_t walBeginSnapshot(SWal *, int64_t ver);
int32_t walEndSnapshot(SWal *);
int32_t walRestoreFromSnapshot(SWal *, int64_t ver);
+// for tq
+int32_t walApplyVer(SWal *, int64_t ver);
+
// int32_t walDataCorrupted(SWal*);
// read
@@ -186,7 +190,6 @@ void walSetReaderCapacity(SWalReader *pRead, int32_t capacity);
int32_t walFetchHead(SWalReader *pRead, int64_t ver, SWalCkHead *pHead);
int32_t walFetchBody(SWalReader *pRead, SWalCkHead **ppHead);
int32_t walSkipFetchBody(SWalReader *pRead, const SWalCkHead *pHead);
-
typedef struct {
int64_t refId;
int64_t ver;
@@ -206,6 +209,7 @@ int64_t walGetFirstVer(SWal *);
int64_t walGetSnapshotVer(SWal *);
int64_t walGetLastVer(SWal *);
int64_t walGetCommittedVer(SWal *);
+int64_t walGetAppliedVer(SWal *);
#ifdef __cplusplus
}
diff --git a/include/os/osSysinfo.h b/include/os/osSysinfo.h
index 4ec2e2884e..6eed31b5e9 100644
--- a/include/os/osSysinfo.h
+++ b/include/os/osSysinfo.h
@@ -33,7 +33,7 @@ typedef struct {
SDiskSize size;
} SDiskSpace;
-bool taosCheckSystemIsSmallEnd();
+bool taosCheckSystemIsLittleEnd();
void taosGetSystemInfo();
int32_t taosGetEmail(char *email, int32_t maxLen);
int32_t taosGetOsReleaseName(char *releaseName, int32_t maxLen);
diff --git a/include/util/tdef.h b/include/util/tdef.h
index 3b31398063..688fe5fe85 100644
--- a/include/util/tdef.h
+++ b/include/util/tdef.h
@@ -421,7 +421,7 @@ typedef enum ELogicConditionType {
#define TSDB_DEFAULT_STABLES_HASH_SIZE 100
#define TSDB_DEFAULT_CTABLES_HASH_SIZE 20000
-#define TSDB_MAX_WAL_SIZE (1024 * 1024 * 3)
+#define TSDB_MAX_MSG_SIZE (1024 * 1024 * 10)
#define TSDB_ARB_DUMMY_TIME 4765104000000 // 2121-01-01 00:00:00.000, :P
diff --git a/include/util/tutil.h b/include/util/tutil.h
index 2e96c5b88e..6a1a40f14c 100644
--- a/include/util/tutil.h
+++ b/include/util/tutil.h
@@ -45,7 +45,6 @@ void taosIp2String(uint32_t ip, char *str);
void taosIpPort2String(uint32_t ip, uint16_t port, char *str);
void *tmemmem(const char *haystack, int hlen, const char *needle, int nlen);
-char *strDupUnquo(const char *src);
static FORCE_INLINE void taosEncryptPass(uint8_t *inBuf, size_t inLen, char *target) {
T_MD5_CTX context;
diff --git a/source/client/inc/clientStmt.h b/source/client/inc/clientStmt.h
index c2b5d1de6f..a7adaef966 100644
--- a/source/client/inc/clientStmt.h
+++ b/source/client/inc/clientStmt.h
@@ -72,7 +72,6 @@ typedef struct SStmtBindInfo {
typedef struct SStmtExecInfo {
int32_t affectedRows;
SRequestObj* pRequest;
- SHashObj* pVgHash;
SHashObj* pBlockHash;
bool autoCreateTbl;
} SStmtExecInfo;
@@ -88,6 +87,7 @@ typedef struct SStmtSQLInfo {
SArray* nodeList;
SStmtQueryResInfo queryRes;
bool autoCreateTbl;
+ SHashObj* pVgHash;
} SStmtSQLInfo;
typedef struct STscStmt {
diff --git a/source/client/src/clientEnv.c b/source/client/src/clientEnv.c
index 53a1bd2235..5b96729503 100644
--- a/source/client/src/clientEnv.c
+++ b/source/client/src/clientEnv.c
@@ -88,7 +88,7 @@ void closeTransporter(SAppInstInfo *pAppInfo) {
static bool clientRpcRfp(int32_t code, tmsg_t msgType) {
if (NEED_REDIRECT_ERROR(code)) {
if (msgType == TDMT_SCH_QUERY || msgType == TDMT_SCH_MERGE_QUERY || msgType == TDMT_SCH_FETCH ||
- msgType == TDMT_SCH_MERGE_FETCH) {
+ msgType == TDMT_SCH_MERGE_FETCH || msgType == TDMT_SCH_QUERY_HEARTBEAT || msgType == TDMT_SCH_DROP_TASK) {
return false;
}
return true;
diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c
index bd9c8d794d..7093e14982 100644
--- a/source/client/src/clientImpl.c
+++ b/source/client/src/clientImpl.c
@@ -590,6 +590,11 @@ int32_t buildAsyncExecNodeList(SRequestObj* pRequest, SArray** pNodeList, SArray
return code;
}
+void freeVgList(void *list) {
+ SArray* pList = *(SArray**)list;
+ taosArrayDestroy(pList);
+}
+
int32_t buildSyncExecNodeList(SRequestObj* pRequest, SArray** pNodeList, SArray* pMnodeList) {
SArray* pDbVgList = NULL;
SArray* pQnodeList = NULL;
@@ -641,7 +646,7 @@ int32_t buildSyncExecNodeList(SRequestObj* pRequest, SArray** pNodeList, SArray*
_return:
- taosArrayDestroy(pDbVgList);
+ taosArrayDestroyEx(pDbVgList, freeVgList);
taosArrayDestroy(pQnodeList);
return code;
diff --git a/source/client/src/clientStmt.c b/source/client/src/clientStmt.c
index bf00965c7a..70edb32f2d 100644
--- a/source/client/src/clientStmt.c
+++ b/source/client/src/clientStmt.c
@@ -6,11 +6,16 @@
#include "clientStmt.h"
static int32_t stmtCreateRequest(STscStmt* pStmt) {
+ int32_t code = 0;
+
if (pStmt->exec.pRequest == NULL) {
- return buildRequest(pStmt->taos->id, pStmt->sql.sqlStr, pStmt->sql.sqlLen, NULL, false, &pStmt->exec.pRequest);
- } else {
- return TSDB_CODE_SUCCESS;
+ code = buildRequest(pStmt->taos->id, pStmt->sql.sqlStr, pStmt->sql.sqlLen, NULL, false, &pStmt->exec.pRequest);
+ if (TSDB_CODE_SUCCESS == code) {
+ pStmt->exec.pRequest->syncQuery = true;
+ }
}
+
+ return code;
}
int32_t stmtSwitchStatus(STscStmt* pStmt, STMT_STATUS newStatus) {
@@ -155,7 +160,7 @@ int32_t stmtUpdateBindInfo(TAOS_STMT* stmt, STableMeta* pTableMeta, void* tags,
int32_t stmtUpdateExecInfo(TAOS_STMT* stmt, SHashObj* pVgHash, SHashObj* pBlockHash, bool autoCreateTbl) {
STscStmt* pStmt = (STscStmt*)stmt;
- pStmt->exec.pVgHash = pVgHash;
+ pStmt->sql.pVgHash = pVgHash;
pStmt->exec.pBlockHash = pBlockHash;
pStmt->exec.autoCreateTbl = autoCreateTbl;
@@ -177,7 +182,7 @@ int32_t stmtUpdateInfo(TAOS_STMT* stmt, STableMeta* pTableMeta, void* tags, char
int32_t stmtGetExecInfo(TAOS_STMT* stmt, SHashObj** pVgHash, SHashObj** pBlockHash) {
STscStmt* pStmt = (STscStmt*)stmt;
- *pVgHash = pStmt->exec.pVgHash;
+ *pVgHash = pStmt->sql.pVgHash;
*pBlockHash = pStmt->exec.pBlockHash;
return TSDB_CODE_SUCCESS;
@@ -227,7 +232,7 @@ int32_t stmtParseSql(STscStmt* pStmt) {
};
STMT_ERR_RET(stmtCreateRequest(pStmt));
-
+
STMT_ERR_RET(parseSql(pStmt->exec.pRequest, false, &pStmt->sql.pQuery, &stmtCb));
pStmt->bInfo.needParse = false;
@@ -308,6 +313,8 @@ int32_t stmtCleanSQLInfo(STscStmt* pStmt) {
taosMemoryFree(pStmt->sql.sqlStr);
qDestroyQuery(pStmt->sql.pQuery);
taosArrayDestroy(pStmt->sql.nodeList);
+ taosHashCleanup(pStmt->sql.pVgHash);
+ pStmt->sql.pVgHash = NULL;
void* pIter = taosHashIterate(pStmt->sql.pTableCache, NULL);
while (pIter) {
@@ -340,7 +347,7 @@ int32_t stmtRebuildDataBlock(STscStmt* pStmt, STableDataBlocks* pDataBlock, STab
STMT_ERR_RET(catalogGetTableHashVgroup(pStmt->pCatalog, &conn, &pStmt->bInfo.sname, &vgInfo));
STMT_ERR_RET(
- taosHashPut(pStmt->exec.pVgHash, (const char*)&vgInfo.vgId, sizeof(vgInfo.vgId), (char*)&vgInfo, sizeof(vgInfo)));
+ taosHashPut(pStmt->sql.pVgHash, (const char*)&vgInfo.vgId, sizeof(vgInfo.vgId), (char*)&vgInfo, sizeof(vgInfo)));
STMT_ERR_RET(qRebuildStmtDataBlock(newBlock, pDataBlock, uid, vgInfo.vgId));
@@ -680,6 +687,7 @@ int stmtBindBatch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind, int32_t colIdx) {
if (pStmt->sql.pQuery->haveResultSet) {
setResSchemaInfo(&pStmt->exec.pRequest->body.resInfo, pStmt->sql.pQuery->pResSchema,
pStmt->sql.pQuery->numOfResCols);
+ taosMemoryFreeClear(pStmt->sql.pQuery->pResSchema);
setResPrecision(&pStmt->exec.pRequest->body.resInfo, pStmt->sql.pQuery->precision);
}
@@ -804,7 +812,7 @@ int stmtExec(TAOS_STMT* stmt) {
if (STMT_TYPE_QUERY == pStmt->sql.type) {
launchQueryImpl(pStmt->exec.pRequest, pStmt->sql.pQuery, true, NULL);
} else {
- STMT_ERR_RET(qBuildStmtOutput(pStmt->sql.pQuery, pStmt->exec.pVgHash, pStmt->exec.pBlockHash));
+ STMT_ERR_RET(qBuildStmtOutput(pStmt->sql.pQuery, pStmt->sql.pVgHash, pStmt->exec.pBlockHash));
launchQueryImpl(pStmt->exec.pRequest, pStmt->sql.pQuery, true, (autoCreateTbl ? (void**)&pRsp : NULL));
}
@@ -847,9 +855,10 @@ _return:
int stmtClose(TAOS_STMT* stmt) {
STscStmt* pStmt = (STscStmt*)stmt;
- STMT_RET(stmtCleanSQLInfo(pStmt));
-
+ stmtCleanSQLInfo(pStmt);
taosMemoryFree(stmt);
+
+ return TSDB_CODE_SUCCESS;
}
const char* stmtErrstr(TAOS_STMT* stmt) {
diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c
index 7bb8c459cd..6dc78d6400 100644
--- a/source/common/src/tglobal.c
+++ b/source/common/src/tglobal.c
@@ -40,26 +40,26 @@ bool tsPrintAuth = false;
// multi process
int32_t tsMultiProcess = 0;
-int32_t tsMnodeShmSize = TSDB_MAX_WAL_SIZE * 2 + 1024;
-int32_t tsVnodeShmSize = TSDB_MAX_WAL_SIZE * 10 + 1024;
-int32_t tsQnodeShmSize = TSDB_MAX_WAL_SIZE * 4 + 1024;
-int32_t tsSnodeShmSize = TSDB_MAX_WAL_SIZE * 4 + 1024;
-int32_t tsBnodeShmSize = TSDB_MAX_WAL_SIZE * 4 + 1024;
+int32_t tsMnodeShmSize = TSDB_MAX_MSG_SIZE * 2 + 1024;
+int32_t tsVnodeShmSize = TSDB_MAX_MSG_SIZE * 10 + 1024;
+int32_t tsQnodeShmSize = TSDB_MAX_MSG_SIZE * 4 + 1024;
+int32_t tsSnodeShmSize = TSDB_MAX_MSG_SIZE * 4 + 1024;
+int32_t tsBnodeShmSize = TSDB_MAX_MSG_SIZE * 4 + 1024;
int32_t tsNumOfShmThreads = 1;
// queue & threads
int32_t tsNumOfRpcThreads = 1;
int32_t tsNumOfCommitThreads = 2;
int32_t tsNumOfTaskQueueThreads = 1;
-int32_t tsNumOfMnodeQueryThreads = 2;
+int32_t tsNumOfMnodeQueryThreads = 4;
int32_t tsNumOfMnodeFetchThreads = 1;
int32_t tsNumOfMnodeReadThreads = 1;
-int32_t tsNumOfVnodeQueryThreads = 2;
+int32_t tsNumOfVnodeQueryThreads = 4;
int32_t tsNumOfVnodeStreamThreads = 2;
int32_t tsNumOfVnodeFetchThreads = 4;
int32_t tsNumOfVnodeWriteThreads = 2;
int32_t tsNumOfVnodeSyncThreads = 2;
-int32_t tsNumOfQnodeQueryThreads = 2;
+int32_t tsNumOfQnodeQueryThreads = 4;
int32_t tsNumOfQnodeFetchThreads = 4;
int32_t tsNumOfSnodeSharedThreads = 2;
int32_t tsNumOfSnodeUniqueThreads = 2;
@@ -387,11 +387,11 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
if (cfgAddBool(pCfg, "deadLockKillQuery", tsDeadLockKillQuery, 0) != 0) return -1;
if (cfgAddInt32(pCfg, "multiProcess", tsMultiProcess, 0, 2, 0) != 0) return -1;
- if (cfgAddInt32(pCfg, "mnodeShmSize", tsMnodeShmSize, TSDB_MAX_WAL_SIZE * 2 + 1024, INT32_MAX, 0) != 0) return -1;
- if (cfgAddInt32(pCfg, "vnodeShmSize", tsVnodeShmSize, TSDB_MAX_WAL_SIZE * 2 + 1024, INT32_MAX, 0) != 0) return -1;
- if (cfgAddInt32(pCfg, "qnodeShmSize", tsQnodeShmSize, TSDB_MAX_WAL_SIZE * 2 + 1024, INT32_MAX, 0) != 0) return -1;
- if (cfgAddInt32(pCfg, "snodeShmSize", tsSnodeShmSize, TSDB_MAX_WAL_SIZE * 2 + 1024, INT32_MAX, 0) != 0) return -1;
- if (cfgAddInt32(pCfg, "bnodeShmSize", tsBnodeShmSize, TSDB_MAX_WAL_SIZE * 2 + 1024, INT32_MAX, 0) != 0) return -1;
+ if (cfgAddInt32(pCfg, "mnodeShmSize", tsMnodeShmSize, TSDB_MAX_MSG_SIZE * 2 + 1024, INT32_MAX, 0) != 0) return -1;
+ if (cfgAddInt32(pCfg, "vnodeShmSize", tsVnodeShmSize, TSDB_MAX_MSG_SIZE * 2 + 1024, INT32_MAX, 0) != 0) return -1;
+ if (cfgAddInt32(pCfg, "qnodeShmSize", tsQnodeShmSize, TSDB_MAX_MSG_SIZE * 2 + 1024, INT32_MAX, 0) != 0) return -1;
+ if (cfgAddInt32(pCfg, "snodeShmSize", tsSnodeShmSize, TSDB_MAX_MSG_SIZE * 2 + 1024, INT32_MAX, 0) != 0) return -1;
+ if (cfgAddInt32(pCfg, "bnodeShmSize", tsBnodeShmSize, TSDB_MAX_MSG_SIZE * 2 + 1024, INT32_MAX, 0) != 0) return -1;
if (cfgAddInt32(pCfg, "mumOfShmThreads", tsNumOfShmThreads, 1, 1024, 0) != 0) return -1;
tsNumOfRpcThreads = tsNumOfCores / 2;
@@ -402,16 +402,16 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
tsNumOfCommitThreads = TRANGE(tsNumOfCommitThreads, 2, 4);
if (cfgAddInt32(pCfg, "numOfCommitThreads", tsNumOfCommitThreads, 1, 1024, 0) != 0) return -1;
- tsNumOfMnodeQueryThreads = tsNumOfCores / 8;
- tsNumOfMnodeQueryThreads = TRANGE(tsNumOfMnodeQueryThreads, 1, 4);
+ tsNumOfMnodeQueryThreads = tsNumOfCores * 2;
+ tsNumOfMnodeQueryThreads = TRANGE(tsNumOfMnodeQueryThreads, 4, 8);
if (cfgAddInt32(pCfg, "numOfMnodeQueryThreads", tsNumOfMnodeQueryThreads, 1, 1024, 0) != 0) return -1;
tsNumOfMnodeReadThreads = tsNumOfCores / 8;
tsNumOfMnodeReadThreads = TRANGE(tsNumOfMnodeReadThreads, 1, 4);
if (cfgAddInt32(pCfg, "numOfMnodeReadThreads", tsNumOfMnodeReadThreads, 1, 1024, 0) != 0) return -1;
- tsNumOfVnodeQueryThreads = tsNumOfCores / 4;
- tsNumOfVnodeQueryThreads = TMAX(tsNumOfVnodeQueryThreads, 2);
+ tsNumOfVnodeQueryThreads = tsNumOfCores * 2;
+ tsNumOfVnodeQueryThreads = TMAX(tsNumOfVnodeQueryThreads, 4);
if (cfgAddInt32(pCfg, "numOfVnodeQueryThreads", tsNumOfVnodeQueryThreads, 1, 1024, 0) != 0) return -1;
tsNumOfVnodeStreamThreads = tsNumOfCores / 4;
@@ -430,8 +430,8 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
tsNumOfVnodeSyncThreads = TMAX(tsNumOfVnodeSyncThreads, 1);
if (cfgAddInt32(pCfg, "numOfVnodeSyncThreads", tsNumOfVnodeSyncThreads, 1, 1024, 0) != 0) return -1;
- tsNumOfQnodeQueryThreads = tsNumOfCores / 2;
- tsNumOfQnodeQueryThreads = TMAX(tsNumOfQnodeQueryThreads, 1);
+ tsNumOfQnodeQueryThreads = tsNumOfCores * 2;
+ tsNumOfQnodeQueryThreads = TMAX(tsNumOfQnodeQueryThreads, 4);
if (cfgAddInt32(pCfg, "numOfQnodeQueryThreads", tsNumOfQnodeQueryThreads, 1, 1024, 0) != 0) return -1;
tsNumOfQnodeFetchThreads = tsNumOfCores / 2;
@@ -447,8 +447,8 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
if (cfgAddInt32(pCfg, "numOfSnodeUniqueThreads", tsNumOfSnodeUniqueThreads, 1, 1024, 0) != 0) return -1;
tsRpcQueueMemoryAllowed = tsTotalMemoryKB * 1024 * 0.1;
- tsRpcQueueMemoryAllowed = TRANGE(tsRpcQueueMemoryAllowed, TSDB_MAX_WAL_SIZE * 10L, TSDB_MAX_WAL_SIZE * 10000L);
- if (cfgAddInt64(pCfg, "rpcQueueMemoryAllowed", tsRpcQueueMemoryAllowed, TSDB_MAX_WAL_SIZE * 10L, INT64_MAX, 0) != 0)
+ tsRpcQueueMemoryAllowed = TRANGE(tsRpcQueueMemoryAllowed, TSDB_MAX_MSG_SIZE * 10L, TSDB_MAX_MSG_SIZE * 10000L);
+ if (cfgAddInt64(pCfg, "rpcQueueMemoryAllowed", tsRpcQueueMemoryAllowed, TSDB_MAX_MSG_SIZE * 10L, INT64_MAX, 0) != 0)
return -1;
if (cfgAddBool(pCfg, "monitor", tsEnableMonitor, 0) != 0) return -1;
diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c
index b79c412914..8611278550 100644
--- a/source/common/src/tmsg.c
+++ b/source/common/src/tmsg.c
@@ -2933,6 +2933,13 @@ int32_t tSerializeSTableIndexRsp(void *buf, int32_t bufLen, const STableIndexRsp
return tlen;
}
+void tFreeSerializeSTableIndexRsp(STableIndexRsp *pRsp) {
+ if (pRsp->pIndex != NULL) {
+ taosArrayDestroy(pRsp->pIndex);
+ pRsp->pIndex = NULL;
+ }
+}
+
int32_t tDeserializeSTableIndexInfo(SDecoder *pDecoder, STableIndexInfo *pInfo) {
if (tDecodeI8(pDecoder, &pInfo->intervalUnit) < 0) return -1;
if (tDecodeI8(pDecoder, &pInfo->slidingUnit) < 0) return -1;
@@ -5342,6 +5349,7 @@ int32_t tEncodeSVAlterTbReq(SEncoder *pEncoder, const SVAlterTbReq *pReq) {
if (tEncodeCStr(pEncoder, pReq->tbName) < 0) return -1;
if (tEncodeI8(pEncoder, pReq->action) < 0) return -1;
+ if (tEncodeI32(pEncoder, pReq->colId) < 0) return -1;
switch (pReq->action) {
case TSDB_ALTER_TABLE_ADD_COLUMN:
if (tEncodeCStr(pEncoder, pReq->colName) < 0) return -1;
@@ -5392,6 +5400,7 @@ int32_t tDecodeSVAlterTbReq(SDecoder *pDecoder, SVAlterTbReq *pReq) {
if (tDecodeCStr(pDecoder, &pReq->tbName) < 0) return -1;
if (tDecodeI8(pDecoder, &pReq->action) < 0) return -1;
+ if (tDecodeI32(pDecoder, &pReq->colId) < 0) return -1;
switch (pReq->action) {
case TSDB_ALTER_TABLE_ADD_COLUMN:
if (tDecodeCStr(pDecoder, &pReq->colName) < 0) return -1;
diff --git a/source/common/src/tname.c b/source/common/src/tname.c
index 7183153824..c5bebf3630 100644
--- a/source/common/src/tname.c
+++ b/source/common/src/tname.c
@@ -190,6 +190,11 @@ int32_t tNameGetDbName(const SName* name, char* dst) {
return 0;
}
+const char* tNameGetDbNameP(const SName* name) {
+ return &name->dbname[0];
+}
+
+
int32_t tNameGetFullDbName(const SName* name, char* dst) {
assert(name != NULL && dst != NULL);
snprintf(dst, TSDB_DB_FNAME_LEN, "%d.%s", name->acctId, name->dbname);
diff --git a/source/common/src/trow.c b/source/common/src/trow.c
index f64250bce6..df5bf64acf 100644
--- a/source/common/src/trow.c
+++ b/source/common/src/trow.c
@@ -585,7 +585,7 @@ int32_t tdSTSRowNew(SArray *pArray, STSchema *pTSchema, STSRow **ppRow) {
ASSERT(pTColumn->colId == PRIMARYKEY_TIMESTAMP_COL_ID);
} else {
if (IS_VAR_DATA_TYPE(pTColumn->type)) {
- if (pColVal) {
+ if (pColVal && !pColVal->isNone && !pColVal->isNull) {
varDataLen += (pColVal->value.nData + sizeof(VarDataLenT));
if (maxVarDataLen < (pColVal->value.nData + sizeof(VarDataLenT))) {
maxVarDataLen = pColVal->value.nData + sizeof(VarDataLenT);
diff --git a/source/common/src/ttime.c b/source/common/src/ttime.c
index 944ee6a731..b1e4321053 100644
--- a/source/common/src/ttime.c
+++ b/source/common/src/ttime.c
@@ -700,6 +700,8 @@ int64_t taosTimeAdd(int64_t t, int64_t duration, char unit, int32_t precision) {
numOfMonth *= 12;
}
+ int64_t fraction = t % TSDB_TICK_PER_SECOND(precision);
+
struct tm tm;
time_t tt = (time_t)(t / TSDB_TICK_PER_SECOND(precision));
taosLocalTime(&tt, &tm);
@@ -707,35 +709,9 @@ int64_t taosTimeAdd(int64_t t, int64_t duration, char unit, int32_t precision) {
tm.tm_year = mon / 12;
tm.tm_mon = mon % 12;
- return (int64_t)(taosMktime(&tm) * TSDB_TICK_PER_SECOND(precision));
+ return (int64_t)(taosMktime(&tm) * TSDB_TICK_PER_SECOND(precision) + fraction);
}
-int64_t taosTimeSub(int64_t t, int64_t duration, char unit, int32_t precision) {
- if (duration == 0) {
- return t;
- }
-
- if (unit != 'n' && unit != 'y') {
- return t - duration;
- }
-
- // The following code handles the y/n time duration
- int64_t numOfMonth = duration;
- if (unit == 'y') {
- numOfMonth *= 12;
- }
-
- struct tm tm;
- time_t tt = (time_t)(t / TSDB_TICK_PER_SECOND(precision));
- taosLocalTime(&tt, &tm);
- int32_t mon = tm.tm_year * 12 + tm.tm_mon - (int32_t)numOfMonth;
- tm.tm_year = mon / 12;
- tm.tm_mon = mon % 12;
-
- return (int64_t)(taosMktime(&tm) * TSDB_TICK_PER_SECOND(precision));
-}
-
-
int32_t taosTimeCountInterval(int64_t skey, int64_t ekey, int64_t interval, char unit, int32_t precision) {
if (ekey < skey) {
int64_t tmp = ekey;
@@ -844,11 +820,14 @@ int64_t taosTimeTruncate(int64_t t, const SInterval* pInterval, int32_t precisio
} else {
// try to move current window to the left-hande-side, due to the offset effect.
int64_t end = taosTimeAdd(start, pInterval->interval, pInterval->intervalUnit, precision) - 1;
- ASSERT(end >= t);
- end = taosTimeAdd(end, -pInterval->sliding, pInterval->slidingUnit, precision);
- if (end >= t) {
- start = taosTimeAdd(start, -pInterval->sliding, pInterval->slidingUnit, precision);
+
+ int64_t newEnd = end;
+ while(newEnd >= t) {
+ end = newEnd;
+ newEnd = taosTimeAdd(newEnd, -pInterval->sliding, pInterval->slidingUnit, precision);
}
+
+ start = taosTimeAdd(end, -pInterval->interval, pInterval->intervalUnit, precision) + 1;
}
}
diff --git a/source/dnode/mgmt/exe/dmMain.c b/source/dnode/mgmt/exe/dmMain.c
index 00c32e1990..013cc05c65 100644
--- a/source/dnode/mgmt/exe/dmMain.c
+++ b/source/dnode/mgmt/exe/dmMain.c
@@ -158,8 +158,8 @@ static void taosCleanupArgs() {
}
int main(int argc, char const *argv[]) {
- if (!taosCheckSystemIsSmallEnd()) {
- printf("failed to start since on non-small-end machines\n");
+ if (!taosCheckSystemIsLittleEnd()) {
+ printf("failed to start since on non-little-end machines\n");
return -1;
}
diff --git a/source/dnode/mnode/impl/src/mndConsumer.c b/source/dnode/mnode/impl/src/mndConsumer.c
index 315b7c3afc..f7387f7e88 100644
--- a/source/dnode/mnode/impl/src/mndConsumer.c
+++ b/source/dnode/mnode/impl/src/mndConsumer.c
@@ -265,6 +265,10 @@ static int32_t mndProcessMqHbReq(SRpcMsg *pMsg) {
int64_t consumerId = be64toh(pReq->consumerId);
SMqConsumerObj *pConsumer = mndAcquireConsumer(pMnode, consumerId);
+ if (pConsumer == NULL) {
+ terrno = TSDB_CODE_MND_CONSUMER_NOT_EXIST;
+ return -1;
+ }
atomic_store_32(&pConsumer->hbStatus, 0);
diff --git a/source/dnode/mnode/impl/src/mndSma.c b/source/dnode/mnode/impl/src/mndSma.c
index bff3f19e99..e82e5e0870 100644
--- a/source/dnode/mnode/impl/src/mndSma.c
+++ b/source/dnode/mnode/impl/src/mndSma.c
@@ -1153,6 +1153,7 @@ _OVER:
mError("failed to get table index %s since %s", indexReq.tbFName, terrstr());
}
+ tFreeSerializeSTableIndexRsp(&rsp);
return code;
}
diff --git a/source/dnode/mnode/impl/src/mndSync.c b/source/dnode/mnode/impl/src/mndSync.c
index 367bdc68c3..810dcb9049 100644
--- a/source/dnode/mnode/impl/src/mndSync.c
+++ b/source/dnode/mnode/impl/src/mndSync.c
@@ -44,6 +44,10 @@ void mndSyncCommitMsg(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbM
SSyncMgmt *pMgmt = &pMnode->syncMgmt;
SSdbRaw *pRaw = pMsg->pCont;
+ // delete msg handle
+ SRpcMsg rpcMsg = {0};
+ syncGetAndDelRespRpc(pMnode->syncMgmt.sync, cbMeta.seqNum, &rpcMsg.info);
+
int32_t transId = sdbGetIdFromRaw(pMnode->pSdb, pRaw);
pMgmt->errCode = cbMeta.code;
mDebug("trans:%d, is proposed, saved:%d code:0x%x, apply index:%" PRId64 " term:%" PRIu64 " config:%" PRId64
diff --git a/source/dnode/mnode/sdb/src/sdbFile.c b/source/dnode/mnode/sdb/src/sdbFile.c
index 302f0c5fbb..00659939e9 100644
--- a/source/dnode/mnode/sdb/src/sdbFile.c
+++ b/source/dnode/mnode/sdb/src/sdbFile.c
@@ -231,7 +231,7 @@ static int32_t sdbReadFileImp(SSdb *pSdb) {
snprintf(file, sizeof(file), "%s%ssdb.data", pSdb->currDir, TD_DIRSEP);
mDebug("start to read sdb file:%s", file);
- SSdbRaw *pRaw = taosMemoryMalloc(WAL_MAX_SIZE + 100);
+ SSdbRaw *pRaw = taosMemoryMalloc(TSDB_MAX_MSG_SIZE + 100);
if (pRaw == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
mError("failed read sdb file since %s", terrstr());
@@ -556,8 +556,9 @@ int32_t sdbStartRead(SSdb *pSdb, SSdbIter **ppIter, int64_t *index, int64_t *ter
if (term != NULL) *term = commitTerm;
if (config != NULL) *config = commitConfig;
- mDebug("sdbiter:%p, is created to read snapshot, commit index:%" PRId64 " term:%" PRId64 " config:%" PRId64 " file:%s",
- pIter, commitIndex, commitTerm, commitConfig, pIter->name);
+ mDebug("sdbiter:%p, is created to read snapshot, commit index:%" PRId64 " term:%" PRId64 " config:%" PRId64
+ " file:%s",
+ pIter, commitIndex, commitTerm, commitConfig, pIter->name);
return 0;
}
@@ -669,4 +670,4 @@ int32_t sdbDoWrite(SSdb *pSdb, SSdbIter *pIter, void *pBuf, int32_t len) {
pIter->total += writelen;
mDebug("sdbiter:%p, write:%d bytes to snapshot, total:%" PRId64, pIter, writelen, pIter->total);
return 0;
-}
\ No newline at end of file
+}
diff --git a/source/dnode/vnode/inc/vnode.h b/source/dnode/vnode/inc/vnode.h
index 97eff4804d..f2b791def6 100644
--- a/source/dnode/vnode/inc/vnode.h
+++ b/source/dnode/vnode/inc/vnode.h
@@ -233,7 +233,6 @@ struct SVnodeCfg {
};
typedef struct {
- TSKEY lastKey;
uint64_t uid;
uint64_t groupId;
} STableKeyInfo;
diff --git a/source/dnode/vnode/src/inc/tq.h b/source/dnode/vnode/src/inc/tq.h
index 07bee22a1f..abac77dc01 100644
--- a/source/dnode/vnode/src/inc/tq.h
+++ b/source/dnode/vnode/src/inc/tq.h
@@ -108,6 +108,10 @@ typedef struct {
// exec
STqExecHandle execHandle;
+
+ // prevent drop
+ int64_t ntbUid;
+ SArray* colIdList; // SArray
} STqHandle;
struct STQ {
diff --git a/source/dnode/vnode/src/inc/vnodeInt.h b/source/dnode/vnode/src/inc/vnodeInt.h
index fd0f97a638..d785376925 100644
--- a/source/dnode/vnode/src/inc/vnodeInt.h
+++ b/source/dnode/vnode/src/inc/vnodeInt.h
@@ -142,6 +142,7 @@ void tqClose(STQ*);
int tqPushMsg(STQ*, void* msg, int32_t msgLen, tmsg_t msgType, int64_t ver);
int tqCommit(STQ*);
int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd);
+int32_t tqCheckColModifiable(STQ* pTq, int32_t colId);
int32_t tqProcessVgChangeReq(STQ* pTq, char* msg, int32_t msgLen);
int32_t tqProcessVgDeleteReq(STQ* pTq, char* msg, int32_t msgLen);
int32_t tqProcessOffsetCommitReq(STQ* pTq, char* msg, int32_t msgLen);
diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c
index f6862621f9..b2679ee245 100644
--- a/source/dnode/vnode/src/tq/tq.c
+++ b/source/dnode/vnode/src/tq/tq.c
@@ -208,6 +208,26 @@ int32_t tqProcessOffsetCommitReq(STQ* pTq, char* msg, int32_t msgLen) {
return 0;
}
+int32_t tqCheckColModifiable(STQ* pTq, int32_t colId) {
+ void* pIter = NULL;
+ while (1) {
+ pIter = taosHashIterate(pTq->handles, pIter);
+ if (pIter == NULL) break;
+ STqHandle* pExec = (STqHandle*)pIter;
+ if (pExec->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
+ int32_t sz = taosArrayGetSize(pExec->colIdList);
+ for (int32_t i = 0; i < sz; i++) {
+ int32_t forbidColId = *(int32_t*)taosArrayGet(pExec->colIdList, i);
+ if (forbidColId == colId) {
+ taosHashCancelIterate(pTq->handles, pIter);
+ return -1;
+ }
+ }
+ }
+ }
+ return 0;
+}
+
static int32_t tqInitDataRsp(SMqDataRsp* pRsp, const SMqPollReq* pReq, int8_t subType) {
pRsp->reqOffset = pReq->reqOffset;
@@ -506,7 +526,8 @@ int32_t tqProcessVgChangeReq(STQ* pTq, char* msg, int32_t msgLen) {
.initTqReader = true,
.version = ver,
};
- pHandle->execHandle.execCol.task[i] = qCreateQueueExecTaskInfo(pHandle->execHandle.execCol.qmsg, &handle, &pHandle->execHandle.numOfCols);
+ pHandle->execHandle.execCol.task[i] =
+ qCreateQueueExecTaskInfo(pHandle->execHandle.execCol.qmsg, &handle, &pHandle->execHandle.numOfCols);
ASSERT(pHandle->execHandle.execCol.task[i]);
void* scanner = NULL;
qExtractStreamScanner(pHandle->execHandle.execCol.task[i], &scanner);
@@ -679,9 +700,9 @@ int32_t tqProcessTaskRunReq(STQ* pTq, SRpcMsg* pMsg) {
//
SStreamTaskRunReq* pReq = pMsg->pCont;
int32_t taskId = pReq->taskId;
- SStreamTask* pTask = *(SStreamTask**)taosHashGet(pTq->pStreamTasks, &taskId, sizeof(int32_t));
- if (pTask) {
- streamProcessRunReq(pTask);
+ SStreamTask** ppTask = (SStreamTask**)taosHashGet(pTq->pStreamTasks, &taskId, sizeof(int32_t));
+ if (ppTask) {
+ streamProcessRunReq(*ppTask);
return 0;
} else {
return -1;
@@ -696,14 +717,14 @@ int32_t tqProcessTaskDispatchReq(STQ* pTq, SRpcMsg* pMsg) {
SDecoder decoder;
tDecoderInit(&decoder, msgBody, msgLen);
tDecodeStreamDispatchReq(&decoder, &req);
- int32_t taskId = req.taskId;
- SStreamTask* pTask = *(SStreamTask**)taosHashGet(pTq->pStreamTasks, &taskId, sizeof(int32_t));
- if (pTask) {
+ int32_t taskId = req.taskId;
+ SStreamTask** ppTask = (SStreamTask**)taosHashGet(pTq->pStreamTasks, &taskId, sizeof(int32_t));
+ if (ppTask) {
SRpcMsg rsp = {
.info = pMsg->info,
.code = 0,
};
- streamProcessDispatchReq(pTask, &req, &rsp);
+ streamProcessDispatchReq(*ppTask, &req, &rsp);
return 0;
} else {
return -1;
@@ -713,9 +734,9 @@ int32_t tqProcessTaskDispatchReq(STQ* pTq, SRpcMsg* pMsg) {
int32_t tqProcessTaskRecoverReq(STQ* pTq, SRpcMsg* pMsg) {
SStreamTaskRecoverReq* pReq = pMsg->pCont;
int32_t taskId = pReq->taskId;
- SStreamTask* pTask = *(SStreamTask**)taosHashGet(pTq->pStreamTasks, &taskId, sizeof(int32_t));
- if (pTask) {
- streamProcessRecoverReq(pTask, pReq, pMsg);
+ SStreamTask** ppTask = (SStreamTask**)taosHashGet(pTq->pStreamTasks, &taskId, sizeof(int32_t));
+ if (ppTask) {
+ streamProcessRecoverReq(*ppTask, pReq, pMsg);
return 0;
} else {
return -1;
@@ -725,9 +746,9 @@ int32_t tqProcessTaskRecoverReq(STQ* pTq, SRpcMsg* pMsg) {
int32_t tqProcessTaskDispatchRsp(STQ* pTq, SRpcMsg* pMsg) {
SStreamDispatchRsp* pRsp = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead));
int32_t taskId = pRsp->taskId;
- SStreamTask* pTask = *(SStreamTask**)taosHashGet(pTq->pStreamTasks, &taskId, sizeof(int32_t));
- if (pTask) {
- streamProcessDispatchRsp(pTask, pRsp);
+ SStreamTask** ppTask = (SStreamTask**)taosHashGet(pTq->pStreamTasks, &taskId, sizeof(int32_t));
+ if (ppTask) {
+ streamProcessDispatchRsp(*ppTask, pRsp);
return 0;
} else {
return -1;
@@ -737,9 +758,9 @@ int32_t tqProcessTaskDispatchRsp(STQ* pTq, SRpcMsg* pMsg) {
int32_t tqProcessTaskRecoverRsp(STQ* pTq, SRpcMsg* pMsg) {
SStreamTaskRecoverRsp* pRsp = pMsg->pCont;
int32_t taskId = pRsp->taskId;
- SStreamTask* pTask = *(SStreamTask**)taosHashGet(pTq->pStreamTasks, &taskId, sizeof(int32_t));
- if (pTask) {
- streamProcessRecoverRsp(pTask, pRsp);
+ SStreamTask** ppTask = (SStreamTask**)taosHashGet(pTq->pStreamTasks, &taskId, sizeof(int32_t));
+ if (ppTask) {
+ streamProcessRecoverRsp(*ppTask, pRsp);
return 0;
} else {
return -1;
@@ -749,8 +770,9 @@ int32_t tqProcessTaskRecoverRsp(STQ* pTq, SRpcMsg* pMsg) {
int32_t tqProcessTaskDropReq(STQ* pTq, char* msg, int32_t msgLen) {
SVDropStreamTaskReq* pReq = (SVDropStreamTaskReq*)msg;
- SStreamTask* pTask = *(SStreamTask**)taosHashGet(pTq->pStreamTasks, &pReq->taskId, sizeof(int32_t));
- if (pTask) {
+ SStreamTask** ppTask = (SStreamTask**)taosHashGet(pTq->pStreamTasks, &pReq->taskId, sizeof(int32_t));
+ if (ppTask) {
+ SStreamTask* pTask = *ppTask;
taosHashRemove(pTq->pStreamTasks, &pReq->taskId, sizeof(int32_t));
atomic_store_8(&pTask->taskStatus, TASK_STATUS__DROPPING);
}
@@ -780,16 +802,17 @@ int32_t tqProcessTaskRetrieveReq(STQ* pTq, SRpcMsg* pMsg) {
SDecoder decoder;
tDecoderInit(&decoder, msgBody, msgLen);
tDecodeStreamRetrieveReq(&decoder, &req);
- int32_t taskId = req.dstTaskId;
- SStreamTask* pTask = *(SStreamTask**)taosHashGet(pTq->pStreamTasks, &taskId, sizeof(int32_t));
- if (atomic_load_8(&pTask->taskStatus) != TASK_STATUS__NORMAL) {
- return 0;
+ int32_t taskId = req.dstTaskId;
+ SStreamTask** ppTask = (SStreamTask**)taosHashGet(pTq->pStreamTasks, &taskId, sizeof(int32_t));
+ if (ppTask) {
+ SRpcMsg rsp = {
+ .info = pMsg->info,
+ .code = 0,
+ };
+ streamProcessRetrieveReq(*ppTask, &req, &rsp);
+ } else {
+ return -1;
}
- SRpcMsg rsp = {
- .info = pMsg->info,
- .code = 0,
- };
- streamProcessRetrieveReq(pTask, &req, &rsp);
return 0;
}
diff --git a/source/dnode/vnode/src/tq/tqPush.c b/source/dnode/vnode/src/tq/tqPush.c
index c929c84203..4c0d416ad1 100644
--- a/source/dnode/vnode/src/tq/tqPush.c
+++ b/source/dnode/vnode/src/tq/tqPush.c
@@ -237,6 +237,8 @@ int32_t tqPushMsgNew(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_
#endif
int tqPushMsg(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_t ver) {
+ walApplyVer(pTq->pVnode->pWal, ver);
+
if (msgType == TDMT_VND_SUBMIT) {
if (taosHashGetSize(pTq->pStreamTasks) == 0) return 0;
@@ -253,4 +255,3 @@ int tqPushMsg(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_t ver)
return 0;
}
-
diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c
index 96839d1767..5dc1915bff 100644
--- a/source/dnode/vnode/src/tsdb/tsdbRead.c
+++ b/source/dnode/vnode/src/tsdb/tsdbRead.c
@@ -2576,10 +2576,10 @@ void updateSchema(TSDBROW* pRow, uint64_t uid, STsdbReader* pReader) {
int32_t sversion = TSDBROW_SVERSION(pRow);
if (pReader->pSchema == NULL) {
- pReader->pSchema = metaGetTbTSchema(pReader->pTsdb->pVnode->pMeta, uid, sversion);
+ metaGetTbTSchemaEx(pReader->pTsdb->pVnode->pMeta, pReader->suid, uid, sversion, &pReader->pSchema);
} else if (pReader->pSchema->version != sversion) {
taosMemoryFreeClear(pReader->pSchema);
- pReader->pSchema = metaGetTbTSchema(pReader->pTsdb->pVnode->pMeta, uid, sversion);
+ metaGetTbTSchemaEx(pReader->pTsdb->pVnode->pMeta, pReader->suid, uid, sversion, &pReader->pSchema);
}
}
diff --git a/source/dnode/vnode/src/vnd/vnodeQuery.c b/source/dnode/vnode/src/vnd/vnodeQuery.c
index 0e443ee3b8..1c3e2f0514 100644
--- a/source/dnode/vnode/src/vnd/vnodeQuery.c
+++ b/source/dnode/vnode/src/vnd/vnodeQuery.c
@@ -270,7 +270,7 @@ int32_t vnodeGetAllTableList(SVnode *pVnode, uint64_t uid, SArray *list) {
break;
}
- STableKeyInfo info = {.lastKey = TSKEY_INITIAL_VAL, uid = id};
+ STableKeyInfo info = {uid = id};
taosArrayPush(list, &info);
}
diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c
index 8e59d97286..e6d116dfef 100644
--- a/source/dnode/vnode/src/vnd/vnodeSvr.c
+++ b/source/dnode/vnode/src/vnd/vnodeSvr.c
@@ -878,6 +878,8 @@ _exit:
tdProcessRSmaSubmit(pVnode->pSma, pReq, STREAM_INPUT__DATA_SUBMIT);
}
+ vDebug("successful submit in vg %d version %ld", pVnode->config.vgId, version);
+
return 0;
}
diff --git a/source/libs/catalog/inc/catalogInt.h b/source/libs/catalog/inc/catalogInt.h
index 9003de97d7..bf3bc1f0f4 100644
--- a/source/libs/catalog/inc/catalogInt.h
+++ b/source/libs/catalog/inc/catalogInt.h
@@ -460,8 +460,6 @@ typedef struct SCtgOperation {
#define CTG_FLAG_MAKE_STB(_isStb) (((_isStb) == 1) ? CTG_FLAG_STB : ((_isStb) == 0 ? CTG_FLAG_NOT_STB : CTG_FLAG_UNKNOWN_STB))
#define CTG_FLAG_MATCH_STB(_flag, tbType) (CTG_FLAG_IS_UNKNOWN_STB(_flag) || (CTG_FLAG_IS_STB(_flag) && (tbType) == TSDB_SUPER_TABLE) || (CTG_FLAG_IS_NOT_STB(_flag) && (tbType) != TSDB_SUPER_TABLE))
-#define CTG_IS_SYS_DBNAME(_dbname) (((*(_dbname) == 'i') && (0 == strcmp(_dbname, TSDB_INFORMATION_SCHEMA_DB))) || ((*(_dbname) == 'p') && (0 == strcmp(_dbname, TSDB_PERFORMANCE_SCHEMA_DB))))
-
#define CTG_META_SIZE(pMeta) (sizeof(STableMeta) + ((pMeta)->tableInfo.numOfTags + (pMeta)->tableInfo.numOfColumns) * sizeof(SSchema))
#define CTG_TABLE_NOT_EXIST(code) (code == CTG_ERR_CODE_TABLE_NOT_EXIST)
diff --git a/source/libs/catalog/src/catalog.c b/source/libs/catalog/src/catalog.c
index 1b7f53ae67..59f11898fa 100644
--- a/source/libs/catalog/src/catalog.c
+++ b/source/libs/catalog/src/catalog.c
@@ -865,7 +865,7 @@ int32_t catalogChkTbMetaVersion(SCatalog* pCtg, SRequestConnInfo *pConn, SArray*
tNameFromString(&name, pTb->tbFName, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE);
- if (CTG_IS_SYS_DBNAME(name.dbname)) {
+ if (IS_SYS_DBNAME(name.dbname)) {
continue;
}
@@ -936,7 +936,7 @@ int32_t catalogGetTableDistVgInfo(SCatalog* pCtg, SRequestConnInfo *pConn, const
CTG_API_LEAVE(TSDB_CODE_CTG_INVALID_INPUT);
}
- if (CTG_IS_SYS_DBNAME(pTableName->dbname)) {
+ if (IS_SYS_DBNAME(pTableName->dbname)) {
ctgError("no valid vgInfo for db, dbname:%s", pTableName->dbname);
CTG_API_LEAVE(TSDB_CODE_CTG_INVALID_INPUT);
}
@@ -947,7 +947,7 @@ int32_t catalogGetTableDistVgInfo(SCatalog* pCtg, SRequestConnInfo *pConn, const
int32_t catalogGetTableHashVgroup(SCatalog *pCtg, SRequestConnInfo *pConn, const SName *pTableName, SVgroupInfo *pVgroup) {
CTG_API_ENTER();
- if (CTG_IS_SYS_DBNAME(pTableName->dbname)) {
+ if (IS_SYS_DBNAME(pTableName->dbname)) {
ctgError("no valid vgInfo for db, dbname:%s", pTableName->dbname);
CTG_API_LEAVE(TSDB_CODE_CTG_INVALID_INPUT);
}
diff --git a/source/libs/catalog/src/ctgCache.c b/source/libs/catalog/src/ctgCache.c
index 499ce77276..06e8216e87 100644
--- a/source/libs/catalog/src/ctgCache.c
+++ b/source/libs/catalog/src/ctgCache.c
@@ -132,7 +132,7 @@ void ctgReleaseDBCache(SCatalog *pCtg, SCtgDBCache *dbCache) {
int32_t ctgAcquireDBCacheImpl(SCatalog* pCtg, const char *dbFName, SCtgDBCache **pCache, bool acquire) {
char *p = strchr(dbFName, '.');
- if (p && CTG_IS_SYS_DBNAME(p + 1)) {
+ if (p && IS_SYS_DBNAME(p + 1)) {
dbFName = p + 1;
}
@@ -694,7 +694,7 @@ int32_t ctgDropDbCacheEnqueue(SCatalog* pCtg, const char *dbFName, int64_t dbId)
}
char *p = strchr(dbFName, '.');
- if (p && CTG_IS_SYS_DBNAME(p + 1)) {
+ if (p && IS_SYS_DBNAME(p + 1)) {
dbFName = p + 1;
}
@@ -727,7 +727,7 @@ int32_t ctgDropDbVgroupEnqueue(SCatalog* pCtg, const char *dbFName, bool syncOp)
}
char *p = strchr(dbFName, '.');
- if (p && CTG_IS_SYS_DBNAME(p + 1)) {
+ if (p && IS_SYS_DBNAME(p + 1)) {
dbFName = p + 1;
}
@@ -823,7 +823,7 @@ int32_t ctgUpdateVgroupEnqueue(SCatalog* pCtg, const char *dbFName, int64_t dbId
}
char *p = strchr(dbFName, '.');
- if (p && CTG_IS_SYS_DBNAME(p + 1)) {
+ if (p && IS_SYS_DBNAME(p + 1)) {
dbFName = p + 1;
}
@@ -859,7 +859,7 @@ int32_t ctgUpdateTbMetaEnqueue(SCatalog* pCtg, STableMetaOutput *output, bool sy
}
char *p = strchr(output->dbFName, '.');
- if (p && CTG_IS_SYS_DBNAME(p + 1)) {
+ if (p && IS_SYS_DBNAME(p + 1)) {
memmove(output->dbFName, p + 1, strlen(p + 1));
}
@@ -2123,7 +2123,7 @@ int32_t ctgStartUpdateThread() {
int32_t ctgGetTbMetaFromCache(SCatalog* pCtg, SRequestConnInfo *pConn, SCtgTbMetaCtx* ctx, STableMeta** pTableMeta) {
- if (CTG_IS_SYS_DBNAME(ctx->pName->dbname)) {
+ if (IS_SYS_DBNAME(ctx->pName->dbname)) {
CTG_FLAG_SET_SYS_DB(ctx->flag);
}
@@ -2177,7 +2177,7 @@ _return:
}
int32_t ctgGetTbHashVgroupFromCache(SCatalog *pCtg, const SName *pTableName, SVgroupInfo **pVgroup) {
- if (CTG_IS_SYS_DBNAME(pTableName->dbname)) {
+ if (IS_SYS_DBNAME(pTableName->dbname)) {
ctgError("no valid vgInfo for db, dbname:%s", pTableName->dbname);
CTG_ERR_RET(TSDB_CODE_CTG_INVALID_INPUT);
}
diff --git a/source/libs/catalog/src/ctgRemote.c b/source/libs/catalog/src/ctgRemote.c
index 1e375471f9..cc5dde9298 100644
--- a/source/libs/catalog/src/ctgRemote.c
+++ b/source/libs/catalog/src/ctgRemote.c
@@ -375,6 +375,8 @@ int32_t ctgGetQnodeListFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, SArray
CTG_ERR_RET(ctgProcessRspMsg(out, reqType, rpcRsp.pCont, rpcRsp.contLen, rpcRsp.code, NULL));
+ rpcFreeCont(rpcRsp.pCont);
+
return TSDB_CODE_SUCCESS;
}
@@ -408,6 +410,8 @@ int32_t ctgGetDnodeListFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, SArray
CTG_ERR_RET(ctgProcessRspMsg(out, reqType, rpcRsp.pCont, rpcRsp.contLen, rpcRsp.code, NULL));
+ rpcFreeCont(rpcRsp.pCont);
+
return TSDB_CODE_SUCCESS;
}
@@ -447,6 +451,8 @@ int32_t ctgGetDBVgInfoFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, SBuildU
CTG_ERR_RET(ctgProcessRspMsg(out, reqType, rpcRsp.pCont, rpcRsp.contLen, rpcRsp.code, input->db));
+ rpcFreeCont(rpcRsp.pCont);
+
return TSDB_CODE_SUCCESS;
}
@@ -485,6 +491,8 @@ int32_t ctgGetDBCfgFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, const char
CTG_ERR_RET(ctgProcessRspMsg(out, reqType, rpcRsp.pCont, rpcRsp.contLen, rpcRsp.code, (char*)dbFName));
+ rpcFreeCont(rpcRsp.pCont);
+
return TSDB_CODE_SUCCESS;
}
@@ -522,6 +530,8 @@ int32_t ctgGetIndexInfoFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, const
rpcSendRecv(pConn->pTrans, &pConn->mgmtEps, &rpcMsg, &rpcRsp);
CTG_ERR_RET(ctgProcessRspMsg(out, reqType, rpcRsp.pCont, rpcRsp.contLen, rpcRsp.code, (char*)indexName));
+
+ rpcFreeCont(rpcRsp.pCont);
return TSDB_CODE_SUCCESS;
}
@@ -563,6 +573,8 @@ int32_t ctgGetTbIndexFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, SName *n
rpcSendRecv(pConn->pTrans, &pConn->mgmtEps, &rpcMsg, &rpcRsp);
CTG_ERR_RET(ctgProcessRspMsg(out, reqType, rpcRsp.pCont, rpcRsp.contLen, rpcRsp.code, (char*)tbFName));
+
+ rpcFreeCont(rpcRsp.pCont);
return TSDB_CODE_SUCCESS;
}
@@ -602,6 +614,8 @@ int32_t ctgGetUdfInfoFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, const ch
CTG_ERR_RET(ctgProcessRspMsg(out, reqType, rpcRsp.pCont, rpcRsp.contLen, rpcRsp.code, (char*)funcName));
+ rpcFreeCont(rpcRsp.pCont);
+
return TSDB_CODE_SUCCESS;
}
@@ -639,6 +653,8 @@ int32_t ctgGetUserDbAuthFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, const
rpcSendRecv(pConn->pTrans, &pConn->mgmtEps, &rpcMsg, &rpcRsp);
CTG_ERR_RET(ctgProcessRspMsg(out, reqType, rpcRsp.pCont, rpcRsp.contLen, rpcRsp.code, (char*)user));
+
+ rpcFreeCont(rpcRsp.pCont);
return TSDB_CODE_SUCCESS;
}
@@ -683,6 +699,8 @@ int32_t ctgGetTbMetaFromMnodeImpl(SCatalog* pCtg, SRequestConnInfo *pConn, char
CTG_ERR_RET(ctgProcessRspMsg(out, reqType, rpcRsp.pCont, rpcRsp.contLen, rpcRsp.code, tbFName));
+ rpcFreeCont(rpcRsp.pCont);
+
return TSDB_CODE_SUCCESS;
}
@@ -740,6 +758,8 @@ int32_t ctgGetTbMetaFromVnode(SCatalog* pCtg, SRequestConnInfo *pConn, const SNa
CTG_ERR_RET(ctgProcessRspMsg(out, reqType, rpcRsp.pCont, rpcRsp.contLen, rpcRsp.code, tbFName));
+ rpcFreeCont(rpcRsp.pCont);
+
return TSDB_CODE_SUCCESS;
}
@@ -784,6 +804,8 @@ int32_t ctgGetTableCfgFromVnode(SCatalog* pCtg, SRequestConnInfo *pConn, const S
rpcSendRecv(pConn->pTrans, &vgroupInfo->epSet, &rpcMsg, &rpcRsp);
CTG_ERR_RET(ctgProcessRspMsg(out, reqType, rpcRsp.pCont, rpcRsp.contLen, rpcRsp.code, (char*)tbFName));
+
+ rpcFreeCont(rpcRsp.pCont);
return TSDB_CODE_SUCCESS;
}
@@ -824,6 +846,8 @@ int32_t ctgGetTableCfgFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, const S
rpcSendRecv(pConn->pTrans, &pConn->mgmtEps, &rpcMsg, &rpcRsp);
CTG_ERR_RET(ctgProcessRspMsg(out, reqType, rpcRsp.pCont, rpcRsp.contLen, rpcRsp.code, (char*)tbFName));
+
+ rpcFreeCont(rpcRsp.pCont);
return TSDB_CODE_SUCCESS;
}
@@ -858,6 +882,8 @@ int32_t ctgGetSvrVerFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, char **ou
rpcSendRecv(pConn->pTrans, &pConn->mgmtEps, &rpcMsg, &rpcRsp);
CTG_ERR_RET(ctgProcessRspMsg(out, reqType, rpcRsp.pCont, rpcRsp.contLen, rpcRsp.code, NULL));
+
+ rpcFreeCont(rpcRsp.pCont);
return TSDB_CODE_SUCCESS;
}
diff --git a/source/libs/command/src/explain.c b/source/libs/command/src/explain.c
index 3fa419e220..2e4bccfdd3 100644
--- a/source/libs/command/src/explain.c
+++ b/source/libs/command/src/explain.c
@@ -401,8 +401,6 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
QRY_ERR_RET(qExplainBufAppendExecInfo(pResNode->pExecInfo, tbuf, &tlen));
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
}
- EXPLAIN_ROW_APPEND(EXPLAIN_COLUMNS_FORMAT, pTagScanNode->pScanCols->length);
- EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
if (pTagScanNode->pScanPseudoCols) {
EXPLAIN_ROW_APPEND(EXPLAIN_PSEUDO_COLUMNS_FORMAT, pTagScanNode->pScanPseudoCols->length);
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
diff --git a/source/libs/executor/inc/executorimpl.h b/source/libs/executor/inc/executorimpl.h
index d7c283c70d..2cc4058b3b 100644
--- a/source/libs/executor/inc/executorimpl.h
+++ b/source/libs/executor/inc/executorimpl.h
@@ -744,8 +744,8 @@ typedef struct SSortOperatorInfo {
int64_t startTs; // sort start time
uint64_t sortElapsed; // sort elapsed time, time to flush to disk not included.
-
- SNode* pCondition;
+ SLimitInfo limitInfo;
+ SNode* pCondition;
} SSortOperatorInfo;
typedef struct STagFilterOperatorInfo {
@@ -785,7 +785,7 @@ int32_t initExprSupp(SExprSupp* pSup, SExprInfo* pExprInfo, int32_t numOfExpr);
void cleanupExprSupp(SExprSupp* pSup);
int32_t initAggInfo(SExprSupp *pSup, SAggSupporter* pAggSup, SExprInfo* pExprInfo, int32_t numOfCols, size_t keyBufSize,
const char* pkey);
-void initResultSizeInfo(SOperatorInfo* pOperator, int32_t numOfRows);
+void initResultSizeInfo(SResultInfo * pResultInfo, int32_t numOfRows);
void doBuildResultDatablock(SOperatorInfo* pOperator, SOptrBasicInfo* pbInfo, SGroupResInfo* pGroupResInfo, SDiskbasedBuf* pBuf);
int32_t handleLimitOffset(SOperatorInfo *pOperator, SLimitInfo* pLimitInfo, SSDataBlock* pBlock, bool holdDataInBuf);
bool hasLimitOffsetInfo(SLimitInfo* pLimitInfo);
@@ -797,7 +797,7 @@ void doApplyFunctions(SExecTaskInfo* taskInfo, SqlFunctionCtx* pCtx, STimeWin
int32_t extractDataBlockFromFetchRsp(SSDataBlock* pRes, SLoadRemoteDataInfo* pLoadInfo, int32_t numOfRows, char* pData,
int32_t compLen, int32_t numOfOutput, int64_t startTs, uint64_t* total,
SArray* pColList);
-void getAlignQueryTimeWindow(SInterval* pInterval, int32_t precision, int64_t key, STimeWindow* win);
+STimeWindow getAlignQueryTimeWindow(SInterval* pInterval, int32_t precision, int64_t key);
STimeWindow getFirstQualifiedTimeWindow(int64_t ts, STimeWindow* pWindow, SInterval* pInterval, int32_t order);
int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t *order, int32_t* scanFlag);
diff --git a/source/libs/executor/src/cachescanoperator.c b/source/libs/executor/src/cachescanoperator.c
index c46485a332..56a5e253d8 100644
--- a/source/libs/executor/src/cachescanoperator.c
+++ b/source/libs/executor/src/cachescanoperator.c
@@ -50,7 +50,7 @@ SOperatorInfo* createLastrowScanOperator(SLastRowScanPhysiNode* pScanNode, SRead
STableListInfo* pTableList = &pTaskInfo->tableqinfoList;
- initResultSizeInfo(pOperator, 1024);
+ initResultSizeInfo(&pOperator->resultInfo, 1024);
blockDataEnsureCapacity(pInfo->pRes, pOperator->resultInfo.capacity);
pInfo->pUidList = taosArrayInit(4, sizeof(int64_t));
diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c
index 978bef1607..18bb8a57f4 100644
--- a/source/libs/executor/src/executil.c
+++ b/source/libs/executor/src/executil.c
@@ -13,7 +13,6 @@
* along with this program. If not, see .
*/
-#include "ttime.h"
#include "function.h"
#include "functionMgt.h"
#include "index.h"
@@ -21,6 +20,7 @@
#include "tdatablock.h"
#include "thash.h"
#include "tmsg.h"
+#include "ttime.h"
#include "executil.h"
#include "executorimpl.h"
@@ -72,7 +72,7 @@ size_t getResultRowSize(SqlFunctionCtx* pCtx, int32_t numOfOutput) {
void cleanupGroupResInfo(SGroupResInfo* pGroupResInfo) {
assert(pGroupResInfo != NULL);
- for(int32_t i = 0; i < taosArrayGetSize(pGroupResInfo->pRows); ++i) {
+ for (int32_t i = 0; i < taosArrayGetSize(pGroupResInfo->pRows); ++i) {
SResKeyPos* pRes = taosArrayGetP(pGroupResInfo->pRows, i);
taosMemoryFree(pRes);
}
@@ -266,17 +266,24 @@ EDealRes doTranslateTagExpr(SNode** pNode, void* pContext) {
}
int32_t isTableOk(STableKeyInfo* info, SNode* pTagCond, void* metaHandle, bool* pQualified) {
+ int32_t code = TSDB_CODE_SUCCESS;
SMetaReader mr = {0};
+
metaReaderInit(&mr, metaHandle, 0);
- metaGetTableEntryByUid(&mr, info->uid);
+ code = metaGetTableEntryByUid(&mr, info->uid);
+ if (TSDB_CODE_SUCCESS != code) {
+ metaReaderClear(&mr);
+
+ return terrno;
+ }
SNode* pTagCondTmp = nodesCloneNode(pTagCond);
nodesRewriteExprPostOrder(&pTagCondTmp, doTranslateTagExpr, &mr);
metaReaderClear(&mr);
- SNode* pNew = NULL;
- int32_t code = scalarCalculateConstants(pTagCondTmp, &pNew);
+ SNode* pNew = NULL;
+ code = scalarCalculateConstants(pTagCondTmp, &pNew);
if (TSDB_CODE_SUCCESS != code) {
terrno = code;
nodesDestroyNode(pTagCondTmp);
@@ -295,7 +302,8 @@ int32_t isTableOk(STableKeyInfo* info, SNode* pTagCond, void* metaHandle, bool*
return TSDB_CODE_SUCCESS;
}
-int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode, SNode* pTagCond, SNode* pTagIndexCond, STableListInfo* pListInfo) {
+int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode, SNode* pTagCond, SNode* pTagIndexCond,
+ STableListInfo* pListInfo) {
int32_t code = TSDB_CODE_SUCCESS;
pListInfo->pTableList = taosArrayInit(8, sizeof(STableKeyInfo));
@@ -317,14 +325,14 @@ int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode,
code = doFilterTag(pTagIndexCond, &metaArg, res, &status);
if (code != 0 || status == SFLT_NOT_INDEX) {
qError("failed to get tableIds from index, reason:%s, suid:%" PRIu64, tstrerror(code), tableUid);
-// code = TSDB_CODE_INDEX_REBUILDING;
+ // code = TSDB_CODE_INDEX_REBUILDING;
code = vnodeGetAllTableList(pVnode, tableUid, pListInfo->pTableList);
} else {
qDebug("success to get tableIds, size:%d, suid:%" PRIu64, (int)taosArrayGetSize(res), tableUid);
}
for (int i = 0; i < taosArrayGetSize(res); i++) {
- STableKeyInfo info = {.lastKey = TSKEY_INITIAL_VAL, .uid = *(uint64_t*)taosArrayGet(res, i), .groupId = 0};
+ STableKeyInfo info = {.uid = *(uint64_t*)taosArrayGet(res, i), .groupId = 0};
taosArrayPush(pListInfo->pTableList, &info);
}
taosArrayDestroy(res);
@@ -338,7 +346,7 @@ int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode,
return code;
}
} else { // Create one table group.
- STableKeyInfo info = {.lastKey = 0, .uid = tableUid, .groupId = 0};
+ STableKeyInfo info = {.uid = tableUid, .groupId = 0};
taosArrayPush(pListInfo->pTableList, &info);
}
@@ -610,8 +618,7 @@ static int32_t setSelectValueColumnInfo(SqlFunctionCtx* pCtx, int32_t numOfOutpu
for (int32_t i = 0; i < numOfOutput; ++i) {
const char* pName = pCtx[i].pExpr->pExpr->_function.functionName;
- if ((strcmp(pName, "_select_value") == 0) ||
- (strcmp(pName, "_group_key") == 0)) {
+ if ((strcmp(pName, "_select_value") == 0) || (strcmp(pName, "_group_key") == 0)) {
pValCtx[num++] = &pCtx[i];
} else if (fmIsSelectFunc(pCtx[i].functionId)) {
p = &pCtx[i];
@@ -747,11 +754,11 @@ SInterval extractIntervalInfo(const STableScanPhysiNode* pTableScanNode) {
SColumn extractColumnFromColumnNode(SColumnNode* pColNode) {
SColumn c = {0};
- c.slotId = pColNode->slotId;
- c.colId = pColNode->colId;
- c.type = pColNode->node.resType.type;
- c.bytes = pColNode->node.resType.bytes;
- c.scale = pColNode->node.resType.scale;
+ c.slotId = pColNode->slotId;
+ c.colId = pColNode->colId;
+ c.type = pColNode->node.resType.type;
+ c.bytes = pColNode->node.resType.bytes;
+ c.scale = pColNode->node.resType.scale;
c.precision = pColNode->node.resType.precision;
return c;
}
@@ -768,10 +775,10 @@ int32_t initQueryTableDataCond(SQueryTableDataCond* pCond, const STableScanPhysi
// pCond->twindow = pTableScanNode->scanRange;
// TODO: get it from stable scan node
pCond->twindows = pTableScanNode->scanRange;
- pCond->suid = pTableScanNode->scan.suid;
- pCond->type = BLOCK_LOAD_OFFSET_ORDER;
+ pCond->suid = pTableScanNode->scan.suid;
+ pCond->type = BLOCK_LOAD_OFFSET_ORDER;
pCond->startVersion = -1;
- pCond->endVersion = -1;
+ pCond->endVersion = -1;
// pCond->type = pTableScanNode->scanFlag;
int32_t j = 0;
@@ -824,10 +831,10 @@ int32_t convertFillType(int32_t mode) {
static void getInitialStartTimeWindow(SInterval* pInterval, TSKEY ts, STimeWindow* w, bool ascQuery) {
if (ascQuery) {
- getAlignQueryTimeWindow(pInterval, pInterval->precision, ts, w);
+ *w = getAlignQueryTimeWindow(pInterval, pInterval->precision, ts);
} else {
// the start position of the first time window in the endpoint that spreads beyond the queried last timestamp
- getAlignQueryTimeWindow(pInterval, pInterval->precision, ts, w);
+ *w = getAlignQueryTimeWindow(pInterval, pInterval->precision, ts);
int64_t key = w->skey;
while (key < ts) { // moving towards end
@@ -850,11 +857,11 @@ static STimeWindow doCalculateTimeWindow(int64_t ts, SInterval* pInterval) {
}
STimeWindow getFirstQualifiedTimeWindow(int64_t ts, STimeWindow* pWindow, SInterval* pInterval, int32_t order) {
- int32_t factor = (order == TSDB_ORDER_ASC)? -1:1;
+ int32_t factor = (order == TSDB_ORDER_ASC) ? -1 : 1;
STimeWindow win = *pWindow;
STimeWindow save = win;
- while(win.skey <= ts && win.ekey >= ts) {
+ while (win.skey <= ts && win.ekey >= ts) {
save = win;
win.skey = taosTimeAdd(win.skey, factor * pInterval->sliding, pInterval->slidingUnit, pInterval->precision);
win.ekey = taosTimeAdd(win.ekey, factor * pInterval->sliding, pInterval->slidingUnit, pInterval->precision);
@@ -894,7 +901,6 @@ bool hasLimitOffsetInfo(SLimitInfo* pLimitInfo) {
pLimitInfo->slimit.offset != -1);
}
-
static int64_t getLimit(const SNode* pLimit) { return NULL == pLimit ? -1 : ((SLimitNode*)pLimit)->limit; }
static int64_t getOffset(const SNode* pLimit) { return NULL == pLimit ? -1 : ((SLimitNode*)pLimit)->offset; }
@@ -903,7 +909,7 @@ void initLimitInfo(const SNode* pLimit, const SNode* pSLimit, SLimitInfo* pLimit
SLimit slimit = {.limit = getLimit(pSLimit), .offset = getOffset(pSLimit)};
pLimitInfo->limit = limit;
- pLimitInfo->slimit= slimit;
+ pLimitInfo->slimit = slimit;
pLimitInfo->remainOffset = limit.offset;
pLimitInfo->remainGroupOffset = slimit.offset;
-}
\ No newline at end of file
+}
diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c
index 9491c675c1..ff7d934c4b 100644
--- a/source/libs/executor/src/executor.c
+++ b/source/libs/executor/src/executor.c
@@ -191,7 +191,7 @@ static SArray* filterQualifiedChildTables(const SStreamScanInfo* pScanInfo, cons
SMetaReader mr = {0};
metaReaderInit(&mr, pScanInfo->readHandle.meta, 0);
for (int32_t i = 0; i < taosArrayGetSize(tableIdList); ++i) {
- int64_t* id = (int64_t*)taosArrayGet(tableIdList, i);
+ uint64_t* id = (uint64_t*)taosArrayGet(tableIdList, i);
int32_t code = metaGetTableEntryByUid(&mr, *id);
if (code != TSDB_CODE_SUCCESS) {
@@ -206,7 +206,7 @@ static SArray* filterQualifiedChildTables(const SStreamScanInfo* pScanInfo, cons
if (pScanInfo->pTagCond != NULL) {
bool qualified = false;
- STableKeyInfo info = {.groupId = 0, .uid = mr.me.uid, .lastKey = 0};
+ STableKeyInfo info = {.groupId = 0, .uid = mr.me.uid};
code = isTableOk(&info, pScanInfo->pTagCond, pScanInfo->readHandle.meta, &qualified);
if (code != TSDB_CODE_SUCCESS) {
qError("failed to filter new table, uid:0x%" PRIx64 ", %s", info.uid, idstr);
@@ -218,9 +218,7 @@ static SArray* filterQualifiedChildTables(const SStreamScanInfo* pScanInfo, cons
}
}
- /*pScanInfo->pStreamScanOp->pTaskInfo->tableqinfoList.*/
// handle multiple partition
-
taosArrayPush(qa, id);
}
@@ -244,6 +242,19 @@ int32_t qUpdateQualifiedTableId(qTaskInfo_t tinfo, const SArray* tableIdList, bo
qDebug(" %d qualified child tables added into stream scanner", (int32_t)taosArrayGetSize(qa));
code = tqReaderAddTbUidList(pScanInfo->tqReader, qa);
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
+ }
+
+ // add to qTaskInfo
+ // todo refactor STableList
+ for(int32_t i = 0; i < taosArrayGetSize(qa); ++i) {
+ uint64_t* uid = taosArrayGet(qa, i);
+
+ STableKeyInfo keyInfo = {.uid = *uid, .groupId = 0};
+ taosArrayPush(pTaskInfo->tableqinfoList.pTableList, &keyInfo);
+ }
+
taosArrayDestroy(qa);
} else { // remove the table id in current list
qDebug(" %d remove child tables from the stream scanner", (int32_t)taosArrayGetSize(tableIdList));
diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c
index 7ff9e90c75..dda567cdd5 100644
--- a/source/libs/executor/src/executorimpl.c
+++ b/source/libs/executor/src/executorimpl.c
@@ -834,18 +834,20 @@ bool isTaskKilled(SExecTaskInfo* pTaskInfo) {
void setTaskKilled(SExecTaskInfo* pTaskInfo) { pTaskInfo->code = TSDB_CODE_TSC_QUERY_CANCELLED; }
/////////////////////////////////////////////////////////////////////////////////////////////
-// todo refactor : return window
-void getAlignQueryTimeWindow(SInterval* pInterval, int32_t precision, int64_t key, STimeWindow* win) {
- win->skey = taosTimeTruncate(key, pInterval, precision);
+STimeWindow getAlignQueryTimeWindow(SInterval* pInterval, int32_t precision, int64_t key) {
+ STimeWindow win = {0};
+ win.skey = taosTimeTruncate(key, pInterval, precision);
/*
* if the realSkey > INT64_MAX - pInterval->interval, the query duration between
* realSkey and realEkey must be less than one interval.Therefore, no need to adjust the query ranges.
*/
- win->ekey = taosTimeAdd(win->skey, pInterval->interval, pInterval->intervalUnit, precision) - 1;
- if (win->ekey < win->skey) {
- win->ekey = INT64_MAX;
+ win.ekey = taosTimeAdd(win.skey, pInterval->interval, pInterval->intervalUnit, precision) - 1;
+ if (win.ekey < win.skey) {
+ win.ekey = INT64_MAX;
}
+
+ return win;
}
#if 0
@@ -3349,7 +3351,11 @@ static SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) {
// filter shall be applied after apply functions and limit/offset on the result
doFilter(pProjectInfo->pFilterNode, pInfo->pRes);
- if (status == PROJECT_RETRIEVE_CONTINUE) {
+ if (pTaskInfo->execModel == OPTR_EXEC_MODEL_STREAM) {
+ break;
+ }
+
+ if (status == PROJECT_RETRIEVE_CONTINUE || pInfo->pRes->info.rows == 0) {
continue;
} else if (status == PROJECT_RETRIEVE_DONE) {
break;
@@ -3603,13 +3609,13 @@ int32_t initAggInfo(SExprSupp* pSup, SAggSupporter* pAggSup, SExprInfo* pExprInf
return TSDB_CODE_SUCCESS;
}
-void initResultSizeInfo(SOperatorInfo* pOperator, int32_t numOfRows) {
+void initResultSizeInfo(SResultInfo * pResultInfo, int32_t numOfRows) {
ASSERT(numOfRows != 0);
- pOperator->resultInfo.capacity = numOfRows;
- pOperator->resultInfo.threshold = numOfRows * 0.75;
+ pResultInfo->capacity = numOfRows;
+ pResultInfo->threshold = numOfRows * 0.75;
- if (pOperator->resultInfo.threshold == 0) {
- pOperator->resultInfo.threshold = numOfRows;
+ if (pResultInfo->threshold == 0) {
+ pResultInfo->threshold = numOfRows;
}
}
@@ -3672,7 +3678,7 @@ SOperatorInfo* createAggregateOperatorInfo(SOperatorInfo* downstream, SExprInfo*
int32_t numOfRows = 1024;
size_t keyBufSize = sizeof(int64_t) + sizeof(int64_t) + POINTER_BYTES;
- initResultSizeInfo(pOperator, numOfRows);
+ initResultSizeInfo(&pOperator->resultInfo, numOfRows);
int32_t code = initAggInfo(&pOperator->exprSupp, &pInfo->aggSup, pExprInfo, numOfCols, keyBufSize, pTaskInfo->id.str);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
@@ -3827,7 +3833,7 @@ SOperatorInfo* createProjectOperatorInfo(SOperatorInfo* downstream, SProjectPhys
if (numOfRows * pResBlock->info.rowSize > TWOMB) {
numOfRows = TWOMB / pResBlock->info.rowSize;
}
- initResultSizeInfo(pOperator, numOfRows);
+ initResultSizeInfo(&pOperator->resultInfo, numOfRows);
initAggInfo(&pOperator->exprSupp, &pInfo->aggSup, pExprInfo, numOfCols, keyBufSize, pTaskInfo->id.str);
initBasicInfo(&pInfo->binfo, pResBlock);
@@ -3955,7 +3961,7 @@ static SSDataBlock* doApplyIndefinitFunction(SOperatorInfo* pOperator) {
doFilter(pIndefInfo->pCondition, pInfo->pRes);
size_t rows = pInfo->pRes->info.rows;
- if (rows >= 0) {
+ if (rows > 0 || pOperator->status == OP_EXEC_DONE) {
break;
}
}
@@ -4005,7 +4011,7 @@ SOperatorInfo* createIndefinitOutputOperatorInfo(SOperatorInfo* downstream, SPhy
numOfRows = TWOMB / pResBlock->info.rowSize;
}
- initResultSizeInfo(pOperator, numOfRows);
+ initResultSizeInfo(&pOperator->resultInfo, numOfRows);
initAggInfo(pSup, &pInfo->aggSup, pExprInfo, numOfExpr, keyBufSize, pTaskInfo->id.str);
initBasicInfo(&pInfo->binfo, pResBlock);
@@ -4044,8 +4050,7 @@ static int32_t initFillInfo(SFillOperatorInfo* pInfo, SExprInfo* pExpr, int32_t
STimeWindow win, int32_t capacity, const char* id, SInterval* pInterval, int32_t fillType) {
SFillColInfo* pColInfo = createFillColInfo(pExpr, numOfCols, pValNode);
- STimeWindow w = TSWINDOW_INITIALIZER;
- getAlignQueryTimeWindow(pInterval, pInterval->precision, win.skey, &w);
+ STimeWindow w = getAlignQueryTimeWindow(pInterval, pInterval->precision, win.skey);
w = getFirstQualifiedTimeWindow(win.skey, &w, pInterval, TSDB_ORDER_ASC);
int32_t order = TSDB_ORDER_ASC;
@@ -4082,7 +4087,7 @@ SOperatorInfo* createFillOperatorInfo(SOperatorInfo* downstream, SFillPhysiNode*
int32_t type = convertFillType(pPhyFillNode->mode);
SResultInfo* pResultInfo = &pOperator->resultInfo;
- initResultSizeInfo(pOperator, 4096);
+ initResultSizeInfo(&pOperator->resultInfo, 4096);
pInfo->primaryTsCol = ((SColumnNode*)pPhyFillNode->pWStartTs)->slotId;
int32_t numOfOutputCols = 0;
@@ -4286,7 +4291,7 @@ int32_t generateGroupIdMap(STableListInfo* pTableListInfo, SReadHandle* pHandle,
REPLACE_NODE(pNew);
} else {
taosMemoryFree(keyBuf);
- nodesClearList(groupNew);
+ nodesDestroyList(groupNew);
metaReaderClear(&mr);
return code;
}
@@ -4304,7 +4309,7 @@ int32_t generateGroupIdMap(STableListInfo* pTableListInfo, SReadHandle* pHandle,
if (tTagIsJson(data)) {
terrno = TSDB_CODE_QRY_JSON_IN_GROUP_ERROR;
taosMemoryFree(keyBuf);
- nodesClearList(groupNew);
+ nodesDestroyList(groupNew);
metaReaderClear(&mr);
return terrno;
}
@@ -4327,7 +4332,7 @@ int32_t generateGroupIdMap(STableListInfo* pTableListInfo, SReadHandle* pHandle,
info->groupId = groupId;
groupNum++;
- nodesClearList(groupNew);
+ nodesDestroyList(groupNew);
metaReaderClear(&mr);
}
taosMemoryFree(keyBuf);
@@ -4456,7 +4461,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
return NULL;
}
} else { // Create one table group.
- STableKeyInfo info = {.lastKey = 0, .uid = pBlockNode->uid, .groupId = 0};
+ STableKeyInfo info = {.uid = pBlockNode->uid, .groupId = 0};
taosArrayPush(pTableListInfo->pTableList, &info);
}
diff --git a/source/libs/executor/src/groupoperator.c b/source/libs/executor/src/groupoperator.c
index 2964948e70..20630fd6ff 100644
--- a/source/libs/executor/src/groupoperator.c
+++ b/source/libs/executor/src/groupoperator.c
@@ -406,7 +406,7 @@ SOperatorInfo* createGroupOperatorInfo(SOperatorInfo* downstream, SExprInfo* pEx
goto _error;
}
- initResultSizeInfo(pOperator, 4096);
+ initResultSizeInfo(&pOperator->resultInfo, 4096);
initAggInfo(&pOperator->exprSupp, &pInfo->aggSup, pExprInfo, numOfCols, pInfo->groupKeyLen, pTaskInfo->id.str);
initBasicInfo(&pInfo->binfo, pResultBlock);
initResultRowInfo(&pInfo->binfo.resultRowInfo);
diff --git a/source/libs/executor/src/joinoperator.c b/source/libs/executor/src/joinoperator.c
index b864fae47f..497de8347c 100644
--- a/source/libs/executor/src/joinoperator.c
+++ b/source/libs/executor/src/joinoperator.c
@@ -41,7 +41,7 @@ SOperatorInfo* createMergeJoinOperatorInfo(SOperatorInfo** pDownstream, int32_t
int32_t numOfCols = 0;
SExprInfo* pExprInfo = createExprInfo(pJoinNode->pTargets, NULL, &numOfCols);
- initResultSizeInfo(pOperator, 4096);
+ initResultSizeInfo(&pOperator->resultInfo, 4096);
pInfo->pRes = pResBlock;
pOperator->name = "MergeJoinOperator";
diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c
index 2e78b61b8c..11aac2114d 100644
--- a/source/libs/executor/src/scanoperator.c
+++ b/source/libs/executor/src/scanoperator.c
@@ -125,7 +125,7 @@ static bool overlapWithTimeWindow(SInterval* pInterval, SDataBlockInfo* pBlockIn
}
if (order == TSDB_ORDER_ASC) {
- getAlignQueryTimeWindow(pInterval, pInterval->precision, pBlockInfo->window.skey, &w);
+ w = getAlignQueryTimeWindow(pInterval, pInterval->precision, pBlockInfo->window.skey);
assert(w.ekey >= pBlockInfo->window.skey);
if (w.ekey < pBlockInfo->window.ekey) {
@@ -144,7 +144,7 @@ static bool overlapWithTimeWindow(SInterval* pInterval, SDataBlockInfo* pBlockIn
}
}
} else {
- getAlignQueryTimeWindow(pInterval, pInterval->precision, pBlockInfo->window.ekey, &w);
+ w = getAlignQueryTimeWindow(pInterval, pInterval->precision, pBlockInfo->window.ekey);
assert(w.skey <= pBlockInfo->window.ekey);
if (w.skey > pBlockInfo->window.skey) {
@@ -359,6 +359,7 @@ void setTbNameColData(void* pMeta, const SSDataBlock* pBlock, SColumnInfoData* p
SScalarParam param = {.columnData = pColInfoData};
fpSet.process(&srcParam, 1, ¶m);
+ colDataDestroy(&infoData);
}
static SSDataBlock* doTableScanImpl(SOperatorInfo* pOperator) {
@@ -1519,6 +1520,7 @@ static void destroyStreamScanOperatorInfo(void* param, int32_t numOfOutput) {
blockDataDestroy(pStreamScan->pPullDataRes);
blockDataDestroy(pStreamScan->pDeleteDataRes);
taosArrayDestroy(pStreamScan->pBlockLists);
+ taosArrayDestroy(pStreamScan->tsArray);
taosMemoryFree(pStreamScan);
}
@@ -2044,8 +2046,8 @@ static SSDataBlock* sysTableScanUserTables(SOperatorInfo* pOperator) {
uint64_t suid = pInfo->pCur->mr.me.ctbEntry.suid;
int32_t code = metaGetTableEntryByUid(&mr, suid);
if (code != TSDB_CODE_SUCCESS) {
- qError("failed to get super table meta, uid:0x%" PRIx64 ", code:%s, %s", suid, tstrerror(terrno),
- GET_TASKID(pTaskInfo));
+ qError("failed to get super table meta, cname:%s, suid:0x%" PRIx64 ", code:%s, %s",
+ pInfo->pCur->mr.me.name, suid, tstrerror(terrno), GET_TASKID(pTaskInfo));
metaReaderClear(&mr);
metaCloseTbCursor(pInfo->pCur);
pInfo->pCur = NULL;
@@ -2151,16 +2153,39 @@ static SSDataBlock* sysTableScanUserTables(SOperatorInfo* pOperator) {
}
}
+
+static SSDataBlock* sysTableScanUserSTables(SOperatorInfo* pOperator) {
+ SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
+ SSysTableScanInfo* pInfo = pOperator->info;
+ if (pOperator->status == OP_EXEC_DONE) {
+ return NULL;
+ }
+
+ pInfo->pRes->info.rows = 0;
+ pOperator->status = OP_EXEC_DONE;
+
+ pInfo->loadInfo.totalRows += pInfo->pRes->info.rows;
+ return (pInfo->pRes->info.rows == 0) ? NULL : pInfo->pRes;
+}
+
static SSDataBlock* doSysTableScan(SOperatorInfo* pOperator) {
// build message and send to mnode to fetch the content of system tables.
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
SSysTableScanInfo* pInfo = pOperator->info;
const char* name = tNameGetTableName(&pInfo->name);
+ if (pInfo->showRewrite) {
+ char dbName[TSDB_DB_NAME_LEN] = {0};
+ getDBNameFromCondition(pInfo->pCondition, dbName);
+ sprintf(pInfo->req.db, "%d.%s", pInfo->accountId, dbName);
+ }
+
if (strncasecmp(name, TSDB_INS_TABLE_USER_TABLES, TSDB_TABLE_FNAME_LEN) == 0) {
return sysTableScanUserTables(pOperator);
} else if (strncasecmp(name, TSDB_INS_TABLE_USER_TAGS, TSDB_TABLE_FNAME_LEN) == 0) {
return sysTableScanUserTags(pOperator);
+ } else if (strncasecmp(name, TSDB_INS_TABLE_USER_STABLES, TSDB_TABLE_FNAME_LEN) == 0 && IS_SYS_DBNAME(pInfo->req.db)) {
+ return sysTableScanUserSTables(pOperator);
} else { // load the meta from mnode of the given epset
if (pOperator->status == OP_EXEC_DONE) {
return NULL;
@@ -2171,12 +2196,6 @@ static SSDataBlock* doSysTableScan(SOperatorInfo* pOperator) {
strncpy(pInfo->req.tb, tNameGetTableName(&pInfo->name), tListLen(pInfo->req.tb));
strcpy(pInfo->req.user, pInfo->pUser);
- if (pInfo->showRewrite) {
- char dbName[TSDB_DB_NAME_LEN] = {0};
- getDBNameFromCondition(pInfo->pCondition, dbName);
- sprintf(pInfo->req.db, "%d.%s", pInfo->accountId, dbName);
- }
-
int32_t contLen = tSerializeSRetrieveTableReq(NULL, 0, &pInfo->req);
char* buf1 = taosMemoryCalloc(1, contLen);
tSerializeSRetrieveTableReq(buf1, contLen, &pInfo->req);
@@ -2324,7 +2343,7 @@ SOperatorInfo* createSysTableScanOperatorInfo(void* readHandle, SSystemTableScan
pInfo->pCondition = pScanNode->node.pConditions;
pInfo->scanCols = colList;
- initResultSizeInfo(pOperator, 4096);
+ initResultSizeInfo(&pOperator->resultInfo, 4096);
tNameAssign(&pInfo->name, &pScanNode->tableName);
const char* name = tNameGetTableName(&pInfo->name);
@@ -2554,7 +2573,7 @@ SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysi
pOperator->info = pInfo;
pOperator->pTaskInfo = pTaskInfo;
- initResultSizeInfo(pOperator, 4096);
+ initResultSizeInfo(&pOperator->resultInfo, 4096);
blockDataEnsureCapacity(pInfo->pRes, pOperator->resultInfo.capacity);
pOperator->fpSet =
@@ -3099,7 +3118,7 @@ SOperatorInfo* createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanN
pOperator->info = pInfo;
pOperator->exprSupp.numOfExprs = numOfCols;
pOperator->pTaskInfo = pTaskInfo;
- initResultSizeInfo(pOperator, 1024);
+ initResultSizeInfo(&pOperator->resultInfo, 1024);
pOperator->fpSet =
createOperatorFpSet(operatorDummyOpenFn, doTableMergeScan, NULL, NULL, destroyTableMergeScanOperatorInfo, NULL,
diff --git a/source/libs/executor/src/sortoperator.c b/source/libs/executor/src/sortoperator.c
index a3b79d9597..f019ee94b8 100644
--- a/source/libs/executor/src/sortoperator.c
+++ b/source/libs/executor/src/sortoperator.c
@@ -26,7 +26,7 @@ static void destroyOrderOperatorInfo(void* param, int32_t numOfOutput);
SOperatorInfo* createSortOperatorInfo(SOperatorInfo* downstream, SSortPhysiNode* pSortNode, SExecTaskInfo* pTaskInfo) {
SSortOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SSortOperatorInfo));
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
- if (pInfo == NULL || pOperator == NULL /* || rowSize > 100 * 1024 * 1024*/) {
+ if (pInfo == NULL || pOperator == NULL) {
goto _error;
}
@@ -41,13 +41,15 @@ SOperatorInfo* createSortOperatorInfo(SOperatorInfo* downstream, SSortPhysiNode*
extractColMatchInfo(pSortNode->pTargets, pDescNode, &numOfOutputCols, COL_MATCH_FROM_SLOT_ID);
pOperator->exprSupp.pCtx = createSqlFunctionCtx(pExprInfo, numOfCols, &pOperator->exprSupp.rowEntryInfoOffset);
+
+ initResultSizeInfo(&pOperator->resultInfo, 1024);
+
pInfo->binfo.pRes = pResBlock;
-
- initResultSizeInfo(pOperator, 1024);
-
- pInfo->pSortInfo = createSortInfo(pSortNode->pSortKeys);
+ pInfo->pSortInfo = createSortInfo(pSortNode->pSortKeys);
pInfo->pCondition = pSortNode->node.pConditions;
pInfo->pColMatchInfo = pColMatchColInfo;
+ initLimitInfo(pSortNode->node.pLimit, pSortNode->node.pSlimit, &pInfo->limitInfo);
+
pOperator->name = "SortOperator";
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_SORT;
pOperator->blocking = true;
@@ -208,26 +210,44 @@ SSDataBlock* doSort(SOperatorInfo* pOperator) {
SSDataBlock* pBlock = NULL;
while (1) {
pBlock = getSortedBlockData(pInfo->pSortHandle, pInfo->binfo.pRes, pOperator->resultInfo.capacity,
- pInfo->pColMatchInfo, pInfo);
- if (pBlock != NULL) {
- doFilter(pInfo->pCondition, pBlock);
- }
-
+ pInfo->pColMatchInfo, pInfo);
if (pBlock == NULL) {
doSetOperatorCompleted(pOperator);
- break;
+ return NULL;
}
- if (blockDataGetNumOfRows(pBlock) > 0) {
+ doFilter(pInfo->pCondition, pBlock);
+ if (blockDataGetNumOfRows(pBlock) == 0) {
+ continue;
+ }
+
+ // todo add the limit/offset info
+ if (pInfo->limitInfo.remainOffset > 0) {
+ if (pInfo->limitInfo.remainOffset >= blockDataGetNumOfRows(pBlock)) {
+ pInfo->limitInfo.remainOffset -= pBlock->info.rows;
+ continue;
+ }
+
+ blockDataTrimFirstNRows(pBlock, pInfo->limitInfo.remainOffset);
+ pInfo->limitInfo.remainOffset = 0;
+ }
+
+ if (pInfo->limitInfo.limit.limit > 0 &&
+ pInfo->limitInfo.limit.limit <= pInfo->limitInfo.numOfOutputRows + blockDataGetNumOfRows(pBlock)) {
+ int32_t remain = pInfo->limitInfo.limit.limit - pInfo->limitInfo.numOfOutputRows;
+ blockDataKeepFirstNRows(pBlock, remain);
+ }
+
+ size_t numOfRows = blockDataGetNumOfRows(pBlock);
+ pInfo->limitInfo.numOfOutputRows += numOfRows;
+ pOperator->resultInfo.totalRows += numOfRows;
+
+ if (numOfRows > 0) {
break;
}
}
- if (pBlock != NULL) {
- pOperator->resultInfo.totalRows += pBlock->info.rows;
- }
-
- return pBlock;
+ return blockDataGetNumOfRows(pBlock) > 0? pBlock:NULL;
}
void destroyOrderOperatorInfo(void* param, int32_t numOfOutput) {
@@ -479,7 +499,7 @@ SOperatorInfo* createGroupSortOperatorInfo(SOperatorInfo* downstream, SGroupSort
pOperator->exprSupp.pCtx = createSqlFunctionCtx(pExprInfo, numOfCols, &pOperator->exprSupp.rowEntryInfoOffset);
pInfo->binfo.pRes = pResBlock;
- initResultSizeInfo(pOperator, 1024);
+ initResultSizeInfo(&pOperator->resultInfo, 1024);
pInfo->pSortInfo = createSortInfo(pSortPhyNode->pSortKeys);
;
@@ -711,7 +731,7 @@ SOperatorInfo* createMultiwayMergeOperatorInfo(SOperatorInfo** downStreams, size
extractColMatchInfo(pMergePhyNode->pTargets, pDescNode, &numOfOutputCols, COL_MATCH_FROM_SLOT_ID);
SPhysiNode* pChildNode = (SPhysiNode*)nodesListGetNode(pPhyNode->pChildren, 0);
SSDataBlock* pInputBlock = createResDataBlock(pChildNode->pOutputDataBlockDesc);
- initResultSizeInfo(pOperator, 1024);
+ initResultSizeInfo(&pOperator->resultInfo, 1024);
pInfo->groupSort = pMergePhyNode->groupSort;
pInfo->binfo.pRes = pResBlock;
diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c
index b0a74c3002..b5966fc463 100644
--- a/source/libs/executor/src/timewindowoperator.c
+++ b/source/libs/executor/src/timewindowoperator.c
@@ -652,7 +652,7 @@ static void doInterpUnclosedTimeWindow(SOperatorInfo* pOperatorInfo, int32_t num
void printDataBlock(SSDataBlock* pBlock, const char* flag) {
if (!pBlock || pBlock->info.rows == 0) {
- qDebug("======printDataBlock: Block is Null or Empty");
+ qDebug("===stream===printDataBlock: Block is Null or Empty");
return;
}
char* pBuf = NULL;
@@ -660,6 +660,62 @@ void printDataBlock(SSDataBlock* pBlock, const char* flag) {
taosMemoryFree(pBuf);
}
+typedef int32_t (*__compare_fn_t)(void* pKey, void* data, int32_t index);
+
+int32_t binarySearchCom(void* keyList, int num, void* pKey, int order, __compare_fn_t comparefn) {
+ int firstPos = 0, lastPos = num - 1, midPos = -1;
+ int numOfRows = 0;
+
+ if (num <= 0) return -1;
+ if (order == TSDB_ORDER_DESC) {
+ // find the first position which is smaller or equal than the key
+ while (1) {
+ if (comparefn(pKey, keyList, lastPos) >= 0) return lastPos;
+ if (comparefn(pKey, keyList, firstPos) == 0) return firstPos;
+ if (comparefn(pKey, keyList, firstPos) < 0) return firstPos - 1;
+
+ numOfRows = lastPos - firstPos + 1;
+ midPos = (numOfRows >> 1) + firstPos;
+
+ if (comparefn(pKey, keyList, midPos) < 0) {
+ lastPos = midPos - 1;
+ } else if (comparefn(pKey, keyList, midPos) > 0) {
+ firstPos = midPos + 1;
+ } else {
+ break;
+ }
+ }
+
+ } else {
+ // find the first position which is bigger or equal than the key
+ while (1) {
+ if (comparefn(pKey, keyList, firstPos) <= 0) return firstPos;
+ if (comparefn(pKey, keyList, lastPos) == 0) return lastPos;
+
+ if (comparefn(pKey, keyList, lastPos) > 0) {
+ lastPos = lastPos + 1;
+ if (lastPos >= num)
+ return -1;
+ else
+ return lastPos;
+ }
+
+ numOfRows = lastPos - firstPos + 1;
+ midPos = (numOfRows >> 1) + firstPos;
+
+ if (comparefn(pKey, keyList, midPos) < 0) {
+ lastPos = midPos - 1;
+ } else if (comparefn(pKey, keyList, midPos) > 0) {
+ firstPos = midPos + 1;
+ } else {
+ break;
+ }
+ }
+ }
+
+ return midPos;
+}
+
typedef int64_t (*__get_value_fn_t)(void* data, int32_t index);
int32_t binarySearch(void* keyList, int num, TSKEY key, int order, __get_value_fn_t getValuefn) {
@@ -716,20 +772,31 @@ int32_t binarySearch(void* keyList, int num, TSKEY key, int order, __get_value_f
return midPos;
}
-int64_t getReskey(void* data, int32_t index) {
+int32_t compareResKey(void* pKey, void* data, int32_t index) {
SArray* res = (SArray*)data;
SResKeyPos* pos = taosArrayGetP(res, index);
- return *(int64_t*)pos->key;
+ SWinRes* pData = (SWinRes*) pKey;
+ if (pData->ts == *(int64_t*)pos->key) {
+ if (pData->groupId > pos->groupId) {
+ return 1;
+ } else if (pData->groupId < pos->groupId) {
+ return -1;
+ }
+ return 0;
+ } else if (pData->ts > *(int64_t*)pos->key) {
+ return 1;
+ }
+ return -1;
}
static int32_t saveResult(int64_t ts, int32_t pageId, int32_t offset, uint64_t groupId, SArray* pUpdated) {
int32_t size = taosArrayGetSize(pUpdated);
- int32_t index = binarySearch(pUpdated, size, ts, TSDB_ORDER_DESC, getReskey);
+ SWinRes data = {.ts = ts, .groupId = groupId};
+ int32_t index = binarySearchCom(pUpdated, size, &data, TSDB_ORDER_DESC, compareResKey);
if (index == -1) {
index = 0;
} else {
- TSKEY resTs = getReskey(pUpdated, index);
- if (resTs < ts) {
+ if (compareResKey(&data, pUpdated, index) > 0) {
index++;
} else {
return TSDB_CODE_SUCCESS;
@@ -753,10 +820,10 @@ static int32_t saveResultRow(SResultRow* result, uint64_t groupId, SArray* pUpda
return saveResult(result->win.skey, result->pageId, result->offset, groupId, pUpdated);
}
-static void removeResult(SArray* pUpdated, TSKEY key) {
+static void removeResult(SArray* pUpdated, SWinRes* pKey) {
int32_t size = taosArrayGetSize(pUpdated);
- int32_t index = binarySearch(pUpdated, size, key, TSDB_ORDER_DESC, getReskey);
- if (index >= 0 && key == getReskey(pUpdated, index)) {
+ int32_t index = binarySearchCom(pUpdated, size, pKey, TSDB_ORDER_DESC, compareResKey);
+ if (index >= 0 && 0 == compareResKey(pKey, pUpdated, index)) {
taosArrayRemove(pUpdated, index);
}
}
@@ -765,7 +832,7 @@ static void removeResults(SArray* pWins, SArray* pUpdated) {
int32_t size = taosArrayGetSize(pWins);
for (int32_t i = 0; i < size; i++) {
SWinRes* pW = taosArrayGet(pWins, i);
- removeResult(pUpdated, pW->ts);
+ removeResult(pUpdated, pW);
}
}
@@ -775,14 +842,30 @@ int64_t getWinReskey(void* data, int32_t index) {
return pos->ts;
}
+int32_t compareWinRes(void* pKey, void* data, int32_t index) {
+ SArray* res = (SArray*)data;
+ SWinRes* pos = taosArrayGetP(res, index);
+ SResKeyPos* pData = (SResKeyPos*) pKey;
+ if (*(int64_t*)pData->key == pos->ts) {
+ if (pData->groupId > pos->groupId) {
+ return 1;
+ } else if (pData->groupId < pos->groupId) {
+ return -1;
+ }
+ return 0;
+ } else if (*(int64_t*)pData->key > pos->ts) {
+ return 1;
+ }
+ return -1;
+}
+
static void removeDeleteResults(SArray* pUpdated, SArray* pDelWins) {
int32_t upSize = taosArrayGetSize(pUpdated);
int32_t delSize = taosArrayGetSize(pDelWins);
for (int32_t i = 0; i < upSize; i++) {
SResKeyPos* pResKey = taosArrayGetP(pUpdated, i);
- int64_t key = *(int64_t*)pResKey->key;
- int32_t index = binarySearch(pDelWins, delSize, key, TSDB_ORDER_DESC, getWinReskey);
- if (index >= 0 && key == getWinReskey(pDelWins, index)) {
+ int32_t index = binarySearchCom(pDelWins, delSize, pResKey, TSDB_ORDER_DESC, compareWinRes);
+ if (index >= 0 && 0 == compareWinRes(pResKey, pDelWins, index)) {
taosArrayRemove(pDelWins, index);
}
}
@@ -924,11 +1007,17 @@ SResultRowPosition addToOpenWindowList(SResultRowInfo* pResultRowInfo, const SRe
int64_t* extractTsCol(SSDataBlock* pBlock, const SIntervalAggOperatorInfo* pInfo) {
TSKEY* tsCols = NULL;
+
if (pBlock->pDataBlock != NULL) {
SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, pInfo->primaryTsIndex);
tsCols = (int64_t*)pColDataInfo->pData;
- if (tsCols != NULL) {
+ // no data in primary ts
+ if (tsCols[0] == 0 && tsCols[pBlock->info.rows - 1] == 0) {
+ return NULL;
+ }
+
+ if (tsCols[0] != 0 && (pBlock->info.window.skey == 0 && pBlock->info.window.ekey == 0)) {
blockDataUpdateTsWindow(pBlock, pInfo->primaryTsIndex);
}
}
@@ -1442,8 +1531,10 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) {
doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf);
if (pInfo->binfo.pRes->info.rows == 0 || !hasDataInGroupInfo(&pInfo->groupResInfo)) {
pOperator->status = OP_EXEC_DONE;
+ qDebug("===stream===single interval is done");
freeAllPages(pInfo->pRecycledPages, pInfo->aggSup.pResultBuf);
}
+ printDataBlock(pInfo->binfo.pRes, "single interval");
return pInfo->binfo.pRes->info.rows == 0 ? NULL : pInfo->binfo.pRes;
}
@@ -1677,7 +1768,7 @@ SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo*
SExprSupp* pSup = &pOperator->exprSupp;
size_t keyBufSize = sizeof(int64_t) + sizeof(int64_t) + POINTER_BYTES;
- initResultSizeInfo(pOperator, 4096);
+ initResultSizeInfo(&pOperator->resultInfo, 4096);
int32_t code = initAggInfo(pSup, &pInfo->aggSup, pExprInfo, numOfCols, keyBufSize, pTaskInfo->id.str);
initBasicInfo(&pInfo->binfo, pResBlock);
@@ -1758,7 +1849,7 @@ SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SExpr
int32_t numOfRows = 4096;
size_t keyBufSize = sizeof(int64_t) + sizeof(int64_t) + POINTER_BYTES;
- initResultSizeInfo(pOperator, numOfRows);
+ initResultSizeInfo(&pOperator->resultInfo, numOfRows);
int32_t code = initAggInfo(&pOperator->exprSupp, &pInfo->aggSup, pExprInfo, numOfCols, keyBufSize, pTaskInfo->id.str);
initBasicInfo(&pInfo->binfo, pResBlock);
initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pInfo->win);
@@ -2218,7 +2309,7 @@ SOperatorInfo* createTimeSliceOperatorInfo(SOperatorInfo* downstream, SPhysiNode
pInfo->tsCol = extractColumnFromColumnNode((SColumnNode*)pInterpPhyNode->pTimeSeries);
pInfo->fillType = convertFillType(pInterpPhyNode->fillMode);
- initResultSizeInfo(pOperator, 4096);
+ initResultSizeInfo(&pOperator->resultInfo, 4096);
pInfo->pFillColInfo = createFillColInfo(pExprInfo, numOfExprs, (SNodeListNode*)pInterpPhyNode->pFillValues);
pInfo->pRes = createResDataBlock(pPhyNode->pOutputDataBlockDesc);
@@ -2266,7 +2357,7 @@ SOperatorInfo* createStatewindowOperatorInfo(SOperatorInfo* downstream, SExprInf
size_t keyBufSize = sizeof(int64_t) + sizeof(int64_t) + POINTER_BYTES;
- initResultSizeInfo(pOperator, 4096);
+ initResultSizeInfo(&pOperator->resultInfo, 4096);
initAggInfo(&pOperator->exprSupp, &pInfo->aggSup, pExpr, numOfCols, keyBufSize, pTaskInfo->id.str);
initBasicInfo(&pInfo->binfo, pResBlock);
@@ -2314,7 +2405,7 @@ SOperatorInfo* createSessionAggOperatorInfo(SOperatorInfo* downstream, SExprInfo
}
size_t keyBufSize = sizeof(int64_t) + sizeof(int64_t) + POINTER_BYTES;
- initResultSizeInfo(pOperator, 4096);
+ initResultSizeInfo(&pOperator->resultInfo, 4096);
int32_t code = initAggInfo(&pOperator->exprSupp, &pInfo->aggSup, pExprInfo, numOfCols, keyBufSize, pTaskInfo->id.str);
if (code != TSDB_CODE_SUCCESS) {
@@ -2890,7 +2981,7 @@ SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream,
ASSERT(pInfo->twAggSup.calTrigger != STREAM_TRIGGER_MAX_DELAY);
pInfo->primaryTsIndex = ((SColumnNode*)pIntervalPhyNode->window.pTspk)->slotId;
size_t keyBufSize = sizeof(int64_t) + sizeof(int64_t) + POINTER_BYTES;
- initResultSizeInfo(pOperator, 4096);
+ initResultSizeInfo(&pOperator->resultInfo, 4096);
if (pIntervalPhyNode->window.pExprs != NULL) {
int32_t numOfScalar = 0;
SExprInfo* pScalarExprInfo = createExprInfo(pIntervalPhyNode->window.pExprs, NULL, &numOfScalar);
@@ -3066,7 +3157,7 @@ SOperatorInfo* createStreamSessionAggOperatorInfo(SOperatorInfo* downstream, SPh
goto _error;
}
- initResultSizeInfo(pOperator, 4096);
+ initResultSizeInfo(&pOperator->resultInfo, 4096);
if (pSessionNode->window.pExprs != NULL) {
int32_t numOfScalar = 0;
SExprInfo* pScalarExprInfo = createExprInfo(pSessionNode->window.pExprs, NULL, &numOfScalar);
@@ -4330,7 +4421,7 @@ SOperatorInfo* createStreamStateAggOperatorInfo(SOperatorInfo* downstream, SPhys
SExprInfo* pExprInfo = createExprInfo(pStateNode->window.pFuncs, NULL, &numOfCols);
pInfo->stateCol = extractColumnFromColumnNode(pColNode);
- initResultSizeInfo(pOperator, 4096);
+ initResultSizeInfo(&pOperator->resultInfo, 4096);
if (pStateNode->window.pExprs != NULL) {
int32_t numOfScalar = 0;
SExprInfo* pScalarExprInfo = createExprInfo(pStateNode->window.pExprs, NULL, &numOfScalar);
@@ -4580,7 +4671,7 @@ SOperatorInfo* createMergeAlignedIntervalOperatorInfo(SOperatorInfo* downstream,
iaInfo->primaryTsIndex = primaryTsSlotId;
size_t keyBufSize = sizeof(int64_t) + sizeof(int64_t) + POINTER_BYTES;
- initResultSizeInfo(pOperator, 4096);
+ initResultSizeInfo(&pOperator->resultInfo, 4096);
int32_t code =
initAggInfo(&pOperator->exprSupp, &iaInfo->aggSup, pExprInfo, numOfCols, keyBufSize, pTaskInfo->id.str);
@@ -4886,7 +4977,7 @@ SOperatorInfo* createMergeIntervalOperatorInfo(SOperatorInfo* downstream, SExprI
SExprSupp* pExprSupp = &pOperator->exprSupp;
size_t keyBufSize = sizeof(int64_t) + sizeof(int64_t) + POINTER_BYTES;
- initResultSizeInfo(pOperator, 4096);
+ initResultSizeInfo(&pOperator->resultInfo, 4096);
int32_t code = initAggInfo(pExprSupp, &iaInfo->aggSup, pExprInfo, numOfCols, keyBufSize, pTaskInfo->id.str);
initBasicInfo(&iaInfo->binfo, pResBlock);
diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c
index 9c004bf1c4..ec8e6b038e 100644
--- a/source/libs/function/src/builtins.c
+++ b/source/libs/function/src/builtins.c
@@ -958,6 +958,7 @@ static bool validateHistogramBinDesc(char* binDescStr, int8_t binType, char* err
return false;
}
+ cJSON_Delete(binDesc);
taosMemoryFree(intervals);
return true;
}
diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c
index 9534b2c7b3..176da0bb48 100644
--- a/source/libs/function/src/builtinsimpl.c
+++ b/source/libs/function/src/builtinsimpl.c
@@ -165,6 +165,7 @@ typedef struct SElapsedInfo {
typedef struct STwaInfo {
double dOutput;
+ bool isNull;
SPoint1 p;
STimeWindow win;
} STwaInfo;
@@ -2466,9 +2467,7 @@ bool apercentileFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResult
int32_t apercentileFunction(SqlFunctionCtx* pCtx) {
int32_t numOfElems = 0;
SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx);
-
SInputColumnInfoData* pInput = &pCtx->input;
- // SColumnDataAgg* pAgg = pInput->pColumnDataAgg[0];
SColumnInfoData* pCol = pInput->pData[0];
int32_t type = pCol->info.type;
@@ -2501,6 +2500,9 @@ int32_t apercentileFunction(SqlFunctionCtx* pCtx) {
GET_TYPED_DATA(v, double, type, data);
tHistogramAdd(&pInfo->pHisto, v);
}
+
+ qDebug("add %d elements into histogram, total:%d, numOfEntry:%d, %p", numOfElems, pInfo->pHisto->numOfElems,
+ pInfo->pHisto->numOfEntries, pInfo->pHisto);
}
SET_VAL(pResInfo, numOfElems, 1);
@@ -2539,11 +2541,19 @@ static void apercentileTransferInfo(SAPercentileInfo* pInput, SAPercentileInfo*
if (pHisto->numOfElems <= 0) {
memcpy(pHisto, pInput->pHisto, sizeof(SHistogramInfo) + sizeof(SHistBin) * (MAX_HISTOGRAM_BIN + 1));
pHisto->elems = (SHistBin*)((char*)pHisto + sizeof(SHistogramInfo));
+
+ qDebug("merge histo, total:%"PRId64", entry:%d, %p", pHisto->numOfElems, pHisto->numOfEntries, pHisto);
} else {
pHisto->elems = (SHistBin*)((char*)pHisto + sizeof(SHistogramInfo));
+ qDebug("input histogram, elem:%"PRId64", entry:%d, %p", pHisto->numOfElems, pHisto->numOfEntries,
+ pInput->pHisto);
+
SHistogramInfo* pRes = tHistogramMerge(pHisto, pInput->pHisto, MAX_HISTOGRAM_BIN);
memcpy(pHisto, pRes, sizeof(SHistogramInfo) + sizeof(SHistBin) * MAX_HISTOGRAM_BIN);
pHisto->elems = (SHistBin*)((char*)pHisto + sizeof(SHistogramInfo));
+
+ qDebug("merge histo, total:%"PRId64", entry:%d, %p", pHisto->numOfElems, pHisto->numOfEntries,
+ pHisto);
tHistogramDestroy(&pRes);
}
}
@@ -2559,14 +2569,20 @@ int32_t apercentileFunctionMerge(SqlFunctionCtx* pCtx) {
SAPercentileInfo* pInfo = GET_ROWCELL_INTERBUF(pResInfo);
- int32_t start = pInput->startRowIndex;
+ qDebug("total %d rows will merge, %p", pInput->numOfRows, pInfo->pHisto);
+ int32_t start = pInput->startRowIndex;
for (int32_t i = start; i < start + pInput->numOfRows; ++i) {
- char* data = colDataGetData(pCol, i);
+ char* data = colDataGetData(pCol, i);
+
SAPercentileInfo* pInputInfo = (SAPercentileInfo*)varDataVal(data);
apercentileTransferInfo(pInputInfo, pInfo);
}
+ if (pInfo->algo != APERCT_ALGO_TDIGEST) {
+ qDebug("after merge, total:%d, numOfEntry:%d, %p", pInfo->pHisto->numOfElems, pInfo->pHisto->numOfEntries, pInfo->pHisto);
+ }
+
SET_VAL(pResInfo, 1, 1);
return TSDB_CODE_SUCCESS;
}
@@ -2584,6 +2600,8 @@ int32_t apercentileFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
}
} else {
if (pInfo->pHisto->numOfElems > 0) {
+ qDebug("get the final res:%d, elements:%"PRId64", entry:%d", pInfo->pHisto->numOfElems, pInfo->pHisto->numOfEntries);
+
double ratio[] = {pInfo->percent};
double* res = tHistogramUniform(pInfo->pHisto, ratio, 1);
pInfo->result = *res;
@@ -2637,6 +2655,9 @@ int32_t apercentileCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx)
SResultRowEntryInfo* pSResInfo = GET_RES_INFO(pSourceCtx);
SAPercentileInfo* pSBuf = GET_ROWCELL_INTERBUF(pSResInfo);
ASSERT(pDBuf->algo == pSBuf->algo);
+
+ qDebug("start to combine apercentile, %p", pDBuf->pHisto);
+
apercentileTransferInfo(pSBuf, pDBuf);
pDResInfo->numOfRes = TMAX(pDResInfo->numOfRes, pSResInfo->numOfRes);
return TSDB_CODE_SUCCESS;
@@ -5181,8 +5202,9 @@ bool twaFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResultInfo) {
}
STwaInfo* pInfo = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx));
- pInfo->p.key = INT64_MIN;
- pInfo->win = TSWINDOW_INITIALIZER;
+ pInfo->isNull = false;
+ pInfo->p.key = INT64_MIN;
+ pInfo->win = TSWINDOW_INITIALIZER;
return true;
}
@@ -5208,27 +5230,47 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) {
SPoint1* last = &pInfo->p;
int32_t numOfElems = 0;
+ if (IS_NULL_TYPE(pInputCol->info.type)) {
+ pInfo->isNull = true;
+ goto _twa_over;
+ }
+
int32_t i = pInput->startRowIndex;
if (pCtx->start.key != INT64_MIN) {
ASSERT((pCtx->start.key < tsList[i] && pCtx->order == TSDB_ORDER_ASC) ||
(pCtx->start.key > tsList[i] && pCtx->order == TSDB_ORDER_DESC));
ASSERT(last->key == INT64_MIN);
- last->key = tsList[i];
+ for (; i < pInput->numOfRows + pInput->startRowIndex; ++i) {
+ if (colDataIsNull_f(pInputCol->nullbitmap, i)) {
+ continue;
+ }
- GET_TYPED_DATA(last->val, double, pInputCol->info.type, colDataGetData(pInputCol, i));
+ last->key = tsList[i];
- pInfo->dOutput += twa_get_area(pCtx->start, *last);
- pInfo->win.skey = pCtx->start.key;
- numOfElems++;
- i += 1;
+ GET_TYPED_DATA(last->val, double, pInputCol->info.type, colDataGetData(pInputCol, i));
+
+ pInfo->dOutput += twa_get_area(pCtx->start, *last);
+ pInfo->win.skey = pCtx->start.key;
+ numOfElems++;
+ i += 1;
+ break;
+ }
} else if (pInfo->p.key == INT64_MIN) {
- last->key = tsList[i];
- GET_TYPED_DATA(last->val, double, pInputCol->info.type, colDataGetData(pInputCol, i));
+ for (; i < pInput->numOfRows + pInput->startRowIndex; ++i) {
+ if (colDataIsNull_f(pInputCol->nullbitmap, i)) {
+ continue;
+ }
- pInfo->win.skey = last->key;
- numOfElems++;
- i += 1;
+ last->key = tsList[i];
+
+ GET_TYPED_DATA(last->val, double, pInputCol->info.type, colDataGetData(pInputCol, i));
+
+ pInfo->win.skey = last->key;
+ numOfElems++;
+ i += 1;
+ break;
+ }
}
SPoint1 st = {0};
@@ -5241,6 +5283,7 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) {
if (colDataIsNull_f(pInputCol->nullbitmap, i)) {
continue;
}
+ numOfElems++;
INIT_INTP_POINT(st, tsList[i], val[i]);
pInfo->dOutput += twa_get_area(pInfo->p, st);
@@ -5255,6 +5298,7 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) {
if (colDataIsNull_f(pInputCol->nullbitmap, i)) {
continue;
}
+ numOfElems++;
INIT_INTP_POINT(st, tsList[i], val[i]);
pInfo->dOutput += twa_get_area(pInfo->p, st);
@@ -5268,6 +5312,7 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) {
if (colDataIsNull_f(pInputCol->nullbitmap, i)) {
continue;
}
+ numOfElems++;
INIT_INTP_POINT(st, tsList[i], val[i]);
pInfo->dOutput += twa_get_area(pInfo->p, st);
@@ -5281,6 +5326,7 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) {
if (colDataIsNull_f(pInputCol->nullbitmap, i)) {
continue;
}
+ numOfElems++;
INIT_INTP_POINT(st, tsList[i], val[i]);
pInfo->dOutput += twa_get_area(pInfo->p, st);
@@ -5294,6 +5340,7 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) {
if (colDataIsNull_f(pInputCol->nullbitmap, i)) {
continue;
}
+ numOfElems++;
INIT_INTP_POINT(st, tsList[i], val[i]);
pInfo->dOutput += twa_get_area(pInfo->p, st);
@@ -5307,6 +5354,7 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) {
if (colDataIsNull_f(pInputCol->nullbitmap, i)) {
continue;
}
+ numOfElems++;
INIT_INTP_POINT(st, tsList[i], val[i]);
pInfo->dOutput += twa_get_area(pInfo->p, st);
@@ -5320,6 +5368,7 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) {
if (colDataIsNull_f(pInputCol->nullbitmap, i)) {
continue;
}
+ numOfElems++;
INIT_INTP_POINT(st, tsList[i], val[i]);
pInfo->dOutput += twa_get_area(pInfo->p, st);
@@ -5333,6 +5382,7 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) {
if (colDataIsNull_f(pInputCol->nullbitmap, i)) {
continue;
}
+ numOfElems++;
INIT_INTP_POINT(st, tsList[i], val[i]);
pInfo->dOutput += twa_get_area(pInfo->p, st);
@@ -5346,6 +5396,7 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) {
if (colDataIsNull_f(pInputCol->nullbitmap, i)) {
continue;
}
+ numOfElems++;
INIT_INTP_POINT(st, tsList[i], val[i]);
pInfo->dOutput += twa_get_area(pInfo->p, st);
@@ -5359,6 +5410,7 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) {
if (colDataIsNull_f(pInputCol->nullbitmap, i)) {
continue;
}
+ numOfElems++;
INIT_INTP_POINT(st, tsList[i], val[i]);
pInfo->dOutput += twa_get_area(pInfo->p, st);
@@ -5379,7 +5431,12 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) {
pInfo->win.ekey = pInfo->p.key;
- SET_VAL(pResInfo, numOfElems, 1);
+_twa_over:
+ if (numOfElems == 0) {
+ pInfo->isNull = true;
+ }
+
+ SET_VAL(pResInfo, 1, 1);
return TSDB_CODE_SUCCESS;
}
@@ -5400,8 +5457,8 @@ int32_t twaFinalize(struct SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx);
STwaInfo* pInfo = (STwaInfo*)GET_ROWCELL_INTERBUF(pResInfo);
- if (pResInfo->numOfRes == 0) {
- pResInfo->isNullRes = 1;
+ if (pInfo->isNull == true) {
+ pResInfo->numOfRes = 0;
} else {
if (pInfo->win.ekey == pInfo->win.skey) {
pInfo->dOutput = pInfo->p.val;
diff --git a/source/libs/function/src/udfd.c b/source/libs/function/src/udfd.c
index 74fca69aa7..2402607251 100644
--- a/source/libs/function/src/udfd.c
+++ b/source/libs/function/src/udfd.c
@@ -913,8 +913,8 @@ void udfdConnectMnodeThreadFunc(void *args) {
}
int main(int argc, char *argv[]) {
- if (!taosCheckSystemIsSmallEnd()) {
- printf("failed to start since on non-small-end machines\n");
+ if (!taosCheckSystemIsLittleEnd()) {
+ printf("failed to start since on non-little-end machines\n");
return -1;
}
diff --git a/source/libs/index/src/indexFilter.c b/source/libs/index/src/indexFilter.c
index eadccba35f..27c90af3e7 100644
--- a/source/libs/index/src/indexFilter.c
+++ b/source/libs/index/src/indexFilter.c
@@ -707,6 +707,8 @@ static int32_t sifCalculate(SNode *pNode, SIFParam *pDst) {
sifFreeParam(res);
taosHashRemove(ctx.pRes, (void *)&pNode, POINTER_BYTES);
}
+ sifFreeRes(ctx.pRes);
+
SIF_RET(code);
}
diff --git a/source/libs/nodes/src/nodesUtilFuncs.c b/source/libs/nodes/src/nodesUtilFuncs.c
index 38f22f9696..23f0bb088d 100644
--- a/source/libs/nodes/src/nodesUtilFuncs.c
+++ b/source/libs/nodes/src/nodesUtilFuncs.c
@@ -369,6 +369,8 @@ static void destroyPhysiNode(SPhysiNode* pNode) {
nodesDestroyList(pNode->pChildren);
nodesDestroyNode(pNode->pConditions);
nodesDestroyNode((SNode*)pNode->pOutputDataBlockDesc);
+ nodesDestroyNode(pNode->pLimit);
+ nodesDestroyNode(pNode->pSlimit);
}
static void destroyWinodwPhysiNode(SWinodwPhysiNode* pNode) {
@@ -389,11 +391,16 @@ static void destroyDataSinkNode(SDataSinkNode* pNode) { nodesDestroyNode((SNode*
static void destroyExprNode(SExprNode* pExpr) { taosArrayDestroy(pExpr->pAssociation); }
-static void nodesDestroyNodePointer(void* node) {
- SNode* pNode = *(SNode**)node;
- nodesDestroyNode(pNode);
+static void destroyTableCfg(STableCfg* pCfg) {
+ taosArrayDestroy(pCfg->pFuncs);
+ taosMemoryFree(pCfg->pComment);
+ taosMemoryFree(pCfg->pSchemas);
+ taosMemoryFree(pCfg->pTags);
+ taosMemoryFree(pCfg);
}
+static void destroySmaIndex(void* pIndex) { taosMemoryFree(((STableIndexInfo*)pIndex)->expr); }
+
void nodesDestroyNode(SNode* pNode) {
if (NULL == pNode) {
return;
@@ -431,6 +438,7 @@ void nodesDestroyNode(SNode* pNode) {
SRealTableNode* pReal = (SRealTableNode*)pNode;
taosMemoryFreeClear(pReal->pMeta);
taosMemoryFreeClear(pReal->pVgroupList);
+ taosArrayDestroyEx(pReal->pSmaIndexes, destroySmaIndex);
break;
}
case QUERY_NODE_TEMP_TABLE:
@@ -451,9 +459,12 @@ void nodesDestroyNode(SNode* pNode) {
break;
case QUERY_NODE_LIMIT: // no pointer field
break;
- case QUERY_NODE_STATE_WINDOW:
- nodesDestroyNode(((SStateWindowNode*)pNode)->pExpr);
+ case QUERY_NODE_STATE_WINDOW: {
+ SStateWindowNode* pState = (SStateWindowNode*)pNode;
+ nodesDestroyNode(pState->pCol);
+ nodesDestroyNode(pState->pExpr);
break;
+ }
case QUERY_NODE_SESSION_WINDOW: {
SSessionWindowNode* pSession = (SSessionWindowNode*)pNode;
nodesDestroyNode((SNode*)pSession->pCol);
@@ -500,8 +511,10 @@ void nodesDestroyNode(SNode* pNode) {
}
case QUERY_NODE_TABLE_OPTIONS: {
STableOptions* pOptions = (STableOptions*)pNode;
- nodesDestroyList(pOptions->pSma);
+ nodesDestroyList(pOptions->pMaxDelay);
+ nodesDestroyList(pOptions->pWatermark);
nodesDestroyList(pOptions->pRollupFuncs);
+ nodesDestroyList(pOptions->pSma);
break;
}
case QUERY_NODE_INDEX_OPTIONS: {
@@ -510,17 +523,22 @@ void nodesDestroyNode(SNode* pNode) {
nodesDestroyNode(pOptions->pInterval);
nodesDestroyNode(pOptions->pOffset);
nodesDestroyNode(pOptions->pSliding);
+ nodesDestroyNode(pOptions->pStreamOptions);
break;
}
case QUERY_NODE_EXPLAIN_OPTIONS: // no pointer field
break;
- case QUERY_NODE_STREAM_OPTIONS:
- nodesDestroyNode(((SStreamOptions*)pNode)->pWatermark);
+ case QUERY_NODE_STREAM_OPTIONS: {
+ SStreamOptions* pOptions = (SStreamOptions*)pNode;
+ nodesDestroyNode(pOptions->pDelay);
+ nodesDestroyNode(pOptions->pWatermark);
break;
+ }
case QUERY_NODE_LEFT_VALUE: // no pointer field
break;
case QUERY_NODE_SET_OPERATOR: {
SSetOperator* pStmt = (SSetOperator*)pNode;
+ nodesDestroyList(pStmt->pProjectionList);
nodesDestroyNode(pStmt->pLeft);
nodesDestroyNode(pStmt->pRight);
nodesDestroyList(pStmt->pOrderByList);
@@ -582,7 +600,8 @@ void nodesDestroyNode(SNode* pNode) {
break;
case QUERY_NODE_DROP_SUPER_TABLE_STMT: // no pointer field
break;
- case QUERY_NODE_ALTER_TABLE_STMT: {
+ case QUERY_NODE_ALTER_TABLE_STMT:
+ case QUERY_NODE_ALTER_SUPER_TABLE_STMT: {
SAlterTableStmt* pStmt = (SAlterTableStmt*)pNode;
nodesDestroyNode((SNode*)pStmt->pOptions);
nodesDestroyNode((SNode*)pStmt->pVal);
@@ -686,14 +705,15 @@ void nodesDestroyNode(SNode* pNode) {
nodesDestroyNode(pStmt->pTbName);
break;
}
- case QUERY_NODE_SHOW_DNODE_VARIABLES_STMT: // no pointer field
+ case QUERY_NODE_SHOW_DNODE_VARIABLES_STMT:
+ nodesDestroyNode(((SShowDnodeVariablesStmt*)pNode)->pDnodeId);
break;
case QUERY_NODE_SHOW_CREATE_DATABASE_STMT:
taosMemoryFreeClear(((SShowCreateDatabaseStmt*)pNode)->pCfg);
break;
case QUERY_NODE_SHOW_CREATE_TABLE_STMT:
case QUERY_NODE_SHOW_CREATE_STABLE_STMT:
- taosMemoryFreeClear(((SShowCreateTableStmt*)pNode)->pCfg);
+ destroyTableCfg((STableCfg*)(((SShowCreateTableStmt*)pNode)->pCfg));
break;
case QUERY_NODE_SHOW_TABLE_DISTRIBUTED_STMT: // no pointer field
case QUERY_NODE_KILL_CONNECTION_STMT: // no pointer field
@@ -725,7 +745,8 @@ void nodesDestroyNode(SNode* pNode) {
}
taosArrayDestroy(pQuery->pDbList);
taosArrayDestroy(pQuery->pTableList);
- taosArrayDestroyEx(pQuery->pPlaceholderValues, nodesDestroyNodePointer);
+ taosArrayDestroy(pQuery->pPlaceholderValues);
+ nodesDestroyNode(pQuery->pPrepareRoot);
break;
}
case QUERY_NODE_LOGIC_PLAN_SCAN: {
@@ -737,7 +758,7 @@ void nodesDestroyNode(SNode* pNode) {
nodesDestroyList(pLogicNode->pDynamicScanFuncs);
nodesDestroyNode(pLogicNode->pTagCond);
nodesDestroyNode(pLogicNode->pTagIndexCond);
- taosArrayDestroy(pLogicNode->pSmaIndexes);
+ taosArrayDestroyEx(pLogicNode->pSmaIndexes, destroySmaIndex);
nodesDestroyList(pLogicNode->pGroupTags);
break;
}
@@ -766,6 +787,9 @@ void nodesDestroyNode(SNode* pNode) {
destroyLogicNode((SLogicNode*)pLogicNode);
destroyVgDataBlockArray(pLogicNode->pDataBlocks);
// pVgDataBlocks is weak reference
+ nodesDestroyNode(pLogicNode->pAffectedRows);
+ taosMemoryFreeClear(pLogicNode->pVgroupList);
+ nodesDestroyList(pLogicNode->pInsertCols);
break;
}
case QUERY_NODE_LOGIC_PLAN_EXCHANGE:
@@ -784,6 +808,7 @@ void nodesDestroyNode(SNode* pNode) {
nodesDestroyList(pLogicNode->pFuncs);
nodesDestroyNode(pLogicNode->pTspk);
nodesDestroyNode(pLogicNode->pTsEnd);
+ nodesDestroyNode(pLogicNode->pStateExpr);
break;
}
case QUERY_NODE_LOGIC_PLAN_FILL: {
@@ -833,9 +858,14 @@ void nodesDestroyNode(SNode* pNode) {
case QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN:
case QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN:
case QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN:
- case QUERY_NODE_PHYSICAL_PLAN_LAST_ROW_SCAN:
destroyScanPhysiNode((SScanPhysiNode*)pNode);
break;
+ case QUERY_NODE_PHYSICAL_PLAN_LAST_ROW_SCAN: {
+ SLastRowScanPhysiNode* pPhyNode = (SLastRowScanPhysiNode*)pNode;
+ destroyScanPhysiNode((SScanPhysiNode*)pNode);
+ nodesDestroyList(pPhyNode->pGroupTags);
+ break;
+ }
case QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN:
case QUERY_NODE_PHYSICAL_PLAN_TABLE_SEQ_SCAN:
case QUERY_NODE_PHYSICAL_PLAN_TABLE_MERGE_SCAN:
diff --git a/source/libs/parser/inc/sql.y b/source/libs/parser/inc/sql.y
index 1236918f9f..920277370a 100644
--- a/source/libs/parser/inc/sql.y
+++ b/source/libs/parser/inc/sql.y
@@ -462,7 +462,7 @@ explain_options(A) ::= explain_options(B) VERBOSE NK_BOOL(C).
explain_options(A) ::= explain_options(B) RATIO NK_FLOAT(C). { A = setExplainRatio(pCxt, B, &C); }
/************************************************ compact *************************************************************/
-cmd ::= COMPACT VNODES IN NK_LP integer_list(A) NK_RP. { pCxt->pRootNode = createCompactStmt(pCxt, A); }
+cmd ::= COMPACT VNODES IN NK_LP integer_list NK_RP. { pCxt->errCode = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_EXPRIE_STATEMENT); }
/************************************************ create/drop function ************************************************/
cmd ::= CREATE agg_func_opt(A) FUNCTION not_exists_opt(F) function_name(B)
diff --git a/source/libs/parser/src/parAstCreater.c b/source/libs/parser/src/parAstCreater.c
index 895a51fdbe..70f447120f 100644
--- a/source/libs/parser/src/parAstCreater.c
+++ b/source/libs/parser/src/parAstCreater.c
@@ -387,6 +387,19 @@ SNode* createLogicConditionNode(SAstCreateContext* pCxt, ELogicConditionType typ
return (SNode*)cond;
}
+static uint8_t getMinusDataType(uint8_t orgType) {
+ switch (orgType) {
+ case TSDB_DATA_TYPE_UTINYINT:
+ case TSDB_DATA_TYPE_USMALLINT:
+ case TSDB_DATA_TYPE_UINT:
+ case TSDB_DATA_TYPE_UBIGINT:
+ return TSDB_DATA_TYPE_BIGINT;
+ default:
+ break;
+ }
+ return orgType;
+}
+
SNode* createOperatorNode(SAstCreateContext* pCxt, EOperatorType type, SNode* pLeft, SNode* pRight) {
CHECK_PARSER_STATUS(pCxt);
if (OP_TYPE_MINUS == type && QUERY_NODE_VALUE == nodeType(pLeft)) {
@@ -402,7 +415,7 @@ SNode* createOperatorNode(SAstCreateContext* pCxt, EOperatorType type, SNode* pL
}
taosMemoryFree(pVal->literal);
pVal->literal = pNewLiteral;
- pVal->node.resType.type = TSDB_DATA_TYPE_BIGINT;
+ pVal->node.resType.type = getMinusDataType(pVal->node.resType.type);
return pLeft;
}
SOperatorNode* op = (SOperatorNode*)nodesMakeNode(QUERY_NODE_OPERATOR);
diff --git a/source/libs/parser/src/parInsert.c b/source/libs/parser/src/parInsert.c
index d6369157b0..05e8c1094d 100644
--- a/source/libs/parser/src/parInsert.c
+++ b/source/libs/parser/src/parInsert.c
@@ -1497,7 +1497,6 @@ static int32_t parseInsertBody(SInsertParseContext* pCxt) {
memset(&pCxt->tags, 0, sizeof(pCxt->tags));
pCxt->pVgroupsHashObj = NULL;
pCxt->pTableBlockHashObj = NULL;
- pCxt->pTableMeta = NULL;
return TSDB_CODE_SUCCESS;
}
@@ -1554,7 +1553,10 @@ int32_t parseInsertSql(SParseContext* pContext, SQuery** pQuery, SParseMetaCache
if (NULL == *pQuery) {
return TSDB_CODE_OUT_OF_MEMORY;
}
+ } else {
+ nodesDestroyNode((*pQuery)->pRoot);
}
+
(*pQuery)->execMode = QUERY_EXEC_MODE_SCHEDULE;
(*pQuery)->haveResultSet = false;
(*pQuery)->msgType = TDMT_VND_SUBMIT;
diff --git a/source/libs/parser/src/parInsertData.c b/source/libs/parser/src/parInsertData.c
index 290c65de12..9e1d8dba8b 100644
--- a/source/libs/parser/src/parInsertData.c
+++ b/source/libs/parser/src/parInsertData.c
@@ -678,6 +678,7 @@ void qFreeStmtDataBlock(void* pDataBlock) {
return;
}
+ taosMemoryFreeClear(((STableDataBlocks*)pDataBlock)->pTableMeta);
taosMemoryFreeClear(((STableDataBlocks*)pDataBlock)->pData);
taosMemoryFreeClear(pDataBlock);
}
diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c
index 026328be24..892ae6d5ac 100644
--- a/source/libs/parser/src/parTranslater.c
+++ b/source/libs/parser/src/parTranslater.c
@@ -1257,6 +1257,7 @@ static int32_t rewriteFuncToValue(STranslateContext* pCxt, char* pLiteral, SNode
}
}
if (DEAL_RES_ERROR != translateValue(pCxt, pVal)) {
+ nodesDestroyNode(*pNode);
*pNode = (SNode*)pVal;
} else {
nodesDestroyNode((SNode*)pVal);
@@ -4009,30 +4010,7 @@ static SSchema* getTagSchema(STableMeta* pTableMeta, const char* pTagName) {
return NULL;
}
-static int32_t checkAlterSuperTable(STranslateContext* pCxt, SAlterTableStmt* pStmt) {
- if (TSDB_ALTER_TABLE_UPDATE_TAG_VAL == pStmt->alterType || TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME == pStmt->alterType) {
- return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ALTER_TABLE,
- "Set tag value only available for child table");
- }
-
- if (pStmt->alterType == TSDB_ALTER_TABLE_UPDATE_OPTIONS && -1 != pStmt->pOptions->ttl) {
- return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ALTER_TABLE);
- }
-
- if (pStmt->dataType.type == TSDB_DATA_TYPE_JSON && pStmt->alterType == TSDB_ALTER_TABLE_ADD_TAG) {
- return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_ONLY_ONE_JSON_TAG);
- }
-
- if (pStmt->dataType.type == TSDB_DATA_TYPE_JSON && pStmt->alterType == TSDB_ALTER_TABLE_ADD_COLUMN) {
- return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_COL_JSON);
- }
-
- STableMeta* pTableMeta = NULL;
- int32_t code = getTableMeta(pCxt, pStmt->dbName, pStmt->tableName, &pTableMeta);
- if (TSDB_CODE_SUCCESS != code) {
- return code;
- }
-
+static int32_t checkAlterSuperTableImpl(STranslateContext* pCxt, SAlterTableStmt* pStmt, STableMeta* pTableMeta) {
SSchema* pTagsSchema = getTableTagSchema(pTableMeta);
if (getNumOfTags(pTableMeta) == 1 && pTagsSchema->type == TSDB_DATA_TYPE_JSON &&
(pStmt->alterType == TSDB_ALTER_TABLE_ADD_TAG || pStmt->alterType == TSDB_ALTER_TABLE_DROP_TAG ||
@@ -4057,6 +4035,33 @@ static int32_t checkAlterSuperTable(STranslateContext* pCxt, SAlterTableStmt* pS
return TSDB_CODE_SUCCESS;
}
+static int32_t checkAlterSuperTable(STranslateContext* pCxt, SAlterTableStmt* pStmt) {
+ if (TSDB_ALTER_TABLE_UPDATE_TAG_VAL == pStmt->alterType || TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME == pStmt->alterType) {
+ return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ALTER_TABLE,
+ "Set tag value only available for child table");
+ }
+
+ if (pStmt->alterType == TSDB_ALTER_TABLE_UPDATE_OPTIONS && -1 != pStmt->pOptions->ttl) {
+ return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ALTER_TABLE);
+ }
+
+ if (pStmt->dataType.type == TSDB_DATA_TYPE_JSON && pStmt->alterType == TSDB_ALTER_TABLE_ADD_TAG) {
+ return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_ONLY_ONE_JSON_TAG);
+ }
+
+ if (pStmt->dataType.type == TSDB_DATA_TYPE_JSON && pStmt->alterType == TSDB_ALTER_TABLE_ADD_COLUMN) {
+ return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_COL_JSON);
+ }
+
+ STableMeta* pTableMeta = NULL;
+ int32_t code = getTableMeta(pCxt, pStmt->dbName, pStmt->tableName, &pTableMeta);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = checkAlterSuperTableImpl(pCxt, pStmt, pTableMeta);
+ }
+ taosMemoryFree(pTableMeta);
+ return code;
+}
+
static int32_t translateAlterSuperTable(STranslateContext* pCxt, SAlterTableStmt* pStmt) {
SMAlterStbReq alterReq = {0};
int32_t code = checkAlterSuperTable(pCxt, pStmt);
@@ -6438,6 +6443,7 @@ static int32_t toMsgType(ENodeType type) {
static int32_t setRefreshMate(STranslateContext* pCxt, SQuery* pQuery) {
if (NULL != pCxt->pDbs) {
+ taosArrayDestroy(pQuery->pDbList);
pQuery->pDbList = taosArrayInit(taosHashGetSize(pCxt->pDbs), TSDB_DB_FNAME_LEN);
if (NULL == pQuery->pDbList) {
return TSDB_CODE_OUT_OF_MEMORY;
@@ -6450,6 +6456,7 @@ static int32_t setRefreshMate(STranslateContext* pCxt, SQuery* pQuery) {
}
if (NULL != pCxt->pTables) {
+ taosArrayDestroy(pQuery->pTableList);
pQuery->pTableList = taosArrayInit(taosHashGetSize(pCxt->pTables), sizeof(SName));
if (NULL == pQuery->pTableList) {
return TSDB_CODE_OUT_OF_MEMORY;
@@ -6521,6 +6528,7 @@ static int32_t setQuery(STranslateContext* pCxt, SQuery* pQuery) {
pQuery->stableQuery = pCxt->stableQuery;
if (pQuery->haveResultSet) {
+ taosMemoryFreeClear(pQuery->pResSchema);
if (TSDB_CODE_SUCCESS != extractResultSchema(pQuery->pRoot, &pQuery->numOfResCols, &pQuery->pResSchema)) {
return TSDB_CODE_OUT_OF_MEMORY;
}
diff --git a/source/libs/parser/src/parUtil.c b/source/libs/parser/src/parUtil.c
index 8eed02bbbe..74d5f03dc1 100644
--- a/source/libs/parser/src/parUtil.c
+++ b/source/libs/parser/src/parUtil.c
@@ -865,12 +865,17 @@ STableCfg* tableCfgDup(STableCfg* pCfg) {
STableCfg* pNew = taosMemoryMalloc(sizeof(*pNew));
memcpy(pNew, pCfg, sizeof(*pNew));
- if (pNew->pComment) {
- pNew->pComment = strdup(pNew->pComment);
+ if (NULL != pNew->pComment) {
+ pNew->pComment = taosMemoryCalloc(pNew->commentLen + 1, 1);
+ memcpy(pNew->pComment, pCfg->pComment, pNew->commentLen);
}
- if (pNew->pFuncs) {
+ if (NULL != pNew->pFuncs) {
pNew->pFuncs = taosArrayDup(pNew->pFuncs);
}
+ if (NULL != pNew->pTags) {
+ pNew->pTags = taosMemoryCalloc(pNew->tagsLen + 1, 1);
+ memcpy(pNew->pTags, pCfg->pTags, pNew->tagsLen);
+ }
int32_t schemaSize = (pCfg->numOfColumns + pCfg->numOfTags) * sizeof(SSchema);
diff --git a/source/libs/parser/src/parser.c b/source/libs/parser/src/parser.c
index fdba0e2fcc..78d1e83436 100644
--- a/source/libs/parser/src/parser.c
+++ b/source/libs/parser/src/parser.c
@@ -82,11 +82,16 @@ static int32_t parseSqlSyntax(SParseContext* pCxt, SQuery** pQuery, SParseMetaCa
}
static int32_t setValueByBindParam(SValueNode* pVal, TAOS_MULTI_BIND* pParam) {
+ if (IS_VAR_DATA_TYPE(pVal->node.resType.type)) {
+ taosMemoryFreeClear(pVal->datum.p);
+ }
+
if (pParam->is_null && 1 == *(pParam->is_null)) {
pVal->node.resType.type = TSDB_DATA_TYPE_NULL;
pVal->node.resType.bytes = tDataTypes[TSDB_DATA_TYPE_NULL].bytes;
return TSDB_CODE_SUCCESS;
}
+
int32_t inputSize = (NULL != pParam->length ? *(pParam->length) : tDataTypes[pParam->buffer_type].bytes);
pVal->node.resType.type = pParam->buffer_type;
pVal->node.resType.bytes = inputSize;
@@ -239,6 +244,7 @@ int32_t qStmtBindParams(SQuery* pQuery, TAOS_MULTI_BIND* pParams, int32_t colIdx
}
if (TSDB_CODE_SUCCESS == code && (colIdx < 0 || colIdx + 1 == pQuery->placeholderNum)) {
+ nodesDestroyNode(pQuery->pRoot);
pQuery->pRoot = nodesCloneNode(pQuery->pPrepareRoot);
if (NULL == pQuery->pRoot) {
code = TSDB_CODE_OUT_OF_MEMORY;
diff --git a/source/libs/parser/src/sql.c b/source/libs/parser/src/sql.c
index 3a3e07acb0..6b4c6704f6 100644
--- a/source/libs/parser/src/sql.c
+++ b/source/libs/parser/src/sql.c
@@ -4117,7 +4117,8 @@ static YYACTIONTYPE yy_reduce(
yymsp[-2].minor.yy616 = yylhsminor.yy616;
break;
case 254: /* cmd ::= COMPACT VNODES IN NK_LP integer_list NK_RP */
-{ pCxt->pRootNode = createCompactStmt(pCxt, yymsp[-1].minor.yy356); }
+{ pCxt->errCode = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_EXPRIE_STATEMENT); }
+ yy_destructor(yypParser,273,&yymsp[-1].minor);
break;
case 255: /* cmd ::= CREATE agg_func_opt FUNCTION not_exists_opt function_name AS NK_STRING OUTPUTTYPE type_name bufsize_opt */
{ pCxt->pRootNode = createCreateFunctionStmt(pCxt, yymsp[-6].minor.yy151, yymsp[-8].minor.yy151, &yymsp[-5].minor.yy361, &yymsp[-3].minor.yy0, yymsp[-1].minor.yy600, yymsp[0].minor.yy734); }
diff --git a/source/libs/parser/test/mockCatalogService.cpp b/source/libs/parser/test/mockCatalogService.cpp
index 5322e34c60..4158453110 100644
--- a/source/libs/parser/test/mockCatalogService.cpp
+++ b/source/libs/parser/test/mockCatalogService.cpp
@@ -93,6 +93,17 @@ class MockCatalogServiceImpl {
MockCatalogServiceImpl() : id_(1) {}
+ ~MockCatalogServiceImpl() {
+ for (auto& cfg : dbCfg_) {
+ taosArrayDestroy(cfg.second.pRetensions);
+ }
+ for (auto& indexes : index_) {
+ for (auto& index : indexes.second) {
+ taosMemoryFree(index.expr);
+ }
+ }
+ }
+
int32_t catalogGetHandle() const { return 0; }
int32_t catalogGetTableMeta(const SName* pTableName, STableMeta** pTableMeta) const {
@@ -676,6 +687,7 @@ void MockCatalogService::destoryCatalogReq(SCatalogReq* pReq) {
taosArrayDestroy(pReq->pIndex);
taosArrayDestroy(pReq->pUser);
taosArrayDestroy(pReq->pTableIndex);
+ taosArrayDestroy(pReq->pTableCfg);
delete pReq;
}
@@ -684,6 +696,11 @@ void MockCatalogService::destoryMetaRes(void* p) {
taosMemoryFree(pRes->pRes);
}
+void MockCatalogService::destoryMetaArrayRes(void* p) {
+ SMetaRes* pRes = (SMetaRes*)p;
+ taosArrayDestroy((SArray*)pRes->pRes);
+}
+
void MockCatalogService::destoryMetaData(SMetaData* pData) {
taosArrayDestroyEx(pData->pDbVgroup, destoryMetaRes);
taosArrayDestroyEx(pData->pDbCfg, destoryMetaRes);
@@ -695,5 +712,8 @@ void MockCatalogService::destoryMetaData(SMetaData* pData) {
taosArrayDestroyEx(pData->pIndex, destoryMetaRes);
taosArrayDestroyEx(pData->pUser, destoryMetaRes);
taosArrayDestroyEx(pData->pQnodeList, destoryMetaRes);
+ taosArrayDestroyEx(pData->pTableCfg, destoryMetaRes);
+ taosArrayDestroyEx(pData->pDnodeList, destoryMetaArrayRes);
+ taosMemoryFree(pData->pSvrVer);
delete pData;
}
diff --git a/source/libs/parser/test/mockCatalogService.h b/source/libs/parser/test/mockCatalogService.h
index c1e926b08c..d76a6abca8 100644
--- a/source/libs/parser/test/mockCatalogService.h
+++ b/source/libs/parser/test/mockCatalogService.h
@@ -52,6 +52,7 @@ class MockCatalogService {
public:
static void destoryCatalogReq(SCatalogReq* pReq);
static void destoryMetaRes(void* p);
+ static void destoryMetaArrayRes(void* p);
static void destoryMetaData(SMetaData* pData);
MockCatalogService();
diff --git a/source/libs/parser/test/parInitialATest.cpp b/source/libs/parser/test/parAlterToBalanceTest.cpp
similarity index 100%
rename from source/libs/parser/test/parInitialATest.cpp
rename to source/libs/parser/test/parAlterToBalanceTest.cpp
diff --git a/source/libs/parser/test/parInitialCTest.cpp b/source/libs/parser/test/parInitialCTest.cpp
index 617191eb4a..a2954b5798 100644
--- a/source/libs/parser/test/parInitialCTest.cpp
+++ b/source/libs/parser/test/parInitialCTest.cpp
@@ -21,7 +21,11 @@ namespace ParserTest {
class ParserInitialCTest : public ParserDdlTest {};
-// todo compact
+TEST_F(ParserInitialCTest, compact) {
+ useDb("root", "test");
+
+ run("COMPACT VNODES IN (1, 2)", TSDB_CODE_PAR_EXPRIE_STATEMENT, PARSER_STAGE_PARSE);
+}
TEST_F(ParserInitialCTest, createAccount) {
useDb("root", "test");
@@ -32,6 +36,19 @@ TEST_F(ParserInitialCTest, createAccount) {
TEST_F(ParserInitialCTest, createBnode) {
useDb("root", "test");
+ SMCreateQnodeReq expect = {0};
+
+ auto setCreateQnodeReq = [&](int32_t dnodeId) { expect.dnodeId = dnodeId; };
+
+ setCheckDdlFunc([&](const SQuery* pQuery, ParserStage stage) {
+ ASSERT_EQ(nodeType(pQuery->pRoot), QUERY_NODE_CREATE_BNODE_STMT);
+ SMCreateQnodeReq req = {0};
+ ASSERT_TRUE(TSDB_CODE_SUCCESS ==
+ tDeserializeSCreateDropMQSBNodeReq(pQuery->pCmdMsg->pMsg, pQuery->pCmdMsg->msgLen, &req));
+ ASSERT_EQ(req.dnodeId, expect.dnodeId);
+ });
+
+ setCreateQnodeReq(1);
run("CREATE BNODE ON DNODE 1");
}
diff --git a/source/libs/parser/test/parTestUtil.cpp b/source/libs/parser/test/parTestUtil.cpp
index 074d12c626..235cc487fb 100644
--- a/source/libs/parser/test/parTestUtil.cpp
+++ b/source/libs/parser/test/parTestUtil.cpp
@@ -123,6 +123,14 @@ class ParserTestBaseImpl {
delete pMetaCache;
}
+ static void _destroyQuery(SQuery** pQuery) {
+ if (nullptr == pQuery) {
+ return;
+ }
+ qDestroyQuery(*pQuery);
+ taosMemoryFree(pQuery);
+ }
+
bool checkResultCode(const string& pFunc, int32_t resultCode) {
return !(stmtEnv_.checkFunc_.empty())
? ((stmtEnv_.checkFunc_ == pFunc) ? stmtEnv_.expect_ == resultCode : TSDB_CODE_SUCCESS == resultCode)
@@ -278,9 +286,9 @@ class ParserTestBaseImpl {
SParseContext cxt = {0};
setParseContext(sql, &cxt);
- SQuery* pQuery = nullptr;
- doParse(&cxt, &pQuery);
- unique_ptr query(pQuery, qDestroyQuery);
+ unique_ptr query((SQuery**)taosMemoryCalloc(1, sizeof(SQuery*)), _destroyQuery);
+ doParse(&cxt, query.get());
+ SQuery* pQuery = *(query.get());
doAuthenticate(&cxt, pQuery, nullptr);
@@ -306,9 +314,9 @@ class ParserTestBaseImpl {
SParseContext cxt = {0};
setParseContext(sql, &cxt);
- SQuery* pQuery = nullptr;
- doParseSql(&cxt, &pQuery);
- unique_ptr query(pQuery, qDestroyQuery);
+ unique_ptr query((SQuery**)taosMemoryCalloc(1, sizeof(SQuery*)), _destroyQuery);
+ doParseSql(&cxt, query.get());
+ SQuery* pQuery = *(query.get());
if (g_dump) {
dump();
@@ -328,9 +336,9 @@ class ParserTestBaseImpl {
SParseContext cxt = {0};
setParseContext(sql, &cxt, true);
- SQuery* pQuery = nullptr;
- doParse(&cxt, &pQuery);
- unique_ptr query(pQuery, qDestroyQuery);
+ unique_ptr query((SQuery**)taosMemoryCalloc(1, sizeof(SQuery*)), _destroyQuery);
+ doParse(&cxt, query.get());
+ SQuery* pQuery = *(query.get());
unique_ptr metaCache(new SParseMetaCache(), _destoryParseMetaCache);
doCollectMetaKey(&cxt, pQuery, metaCache.get());
@@ -386,9 +394,9 @@ class ParserTestBaseImpl {
unique_ptr catalogReq(new SCatalogReq(),
MockCatalogService::destoryCatalogReq);
- SQuery* pQuery = nullptr;
- doParseSqlSyntax(&cxt, &pQuery, catalogReq.get());
- unique_ptr query(pQuery, qDestroyQuery);
+ unique_ptr query((SQuery**)taosMemoryCalloc(1, sizeof(SQuery*)), _destroyQuery);
+ doParseSqlSyntax(&cxt, query.get(), catalogReq.get());
+ SQuery* pQuery = *(query.get());
string err;
thread t1([&]() {
diff --git a/source/libs/planner/src/planPhysiCreater.c b/source/libs/planner/src/planPhysiCreater.c
index 3f619f506f..ee2457e400 100644
--- a/source/libs/planner/src/planPhysiCreater.c
+++ b/source/libs/planner/src/planPhysiCreater.c
@@ -1068,7 +1068,11 @@ static int32_t createExchangePhysiNode(SPhysiPlanContext* pCxt, SExchangeLogicNo
}
static int32_t createWindowPhysiNodeFinalize(SPhysiPlanContext* pCxt, SNodeList* pChildren, SWinodwPhysiNode* pWindow,
- SWindowLogicNode* pWindowLogicNode, SPhysiNode** pPhyNode) {
+ SWindowLogicNode* pWindowLogicNode) {
+ pWindow->triggerType = pWindowLogicNode->triggerType;
+ pWindow->watermark = pWindowLogicNode->watermark;
+ pWindow->igExpired = pWindowLogicNode->igExpired;
+
SNodeList* pPrecalcExprs = NULL;
SNodeList* pFuncs = NULL;
int32_t code = rewritePrecalcExprs(pCxt, pWindowLogicNode->pFuncs, &pPrecalcExprs, &pFuncs);
@@ -1100,16 +1104,6 @@ static int32_t createWindowPhysiNodeFinalize(SPhysiPlanContext* pCxt, SNodeList*
code = setConditionsSlotId(pCxt, (const SLogicNode*)pWindowLogicNode, (SPhysiNode*)pWindow);
}
- pWindow->triggerType = pWindowLogicNode->triggerType;
- pWindow->watermark = pWindowLogicNode->watermark;
- pWindow->igExpired = pWindowLogicNode->igExpired;
-
- if (TSDB_CODE_SUCCESS == code) {
- *pPhyNode = (SPhysiNode*)pWindow;
- } else {
- nodesDestroyNode((SNode*)pWindow);
- }
-
nodesDestroyList(pPrecalcExprs);
nodesDestroyList(pFuncs);
@@ -1156,7 +1150,14 @@ static int32_t createIntervalPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChil
pInterval->intervalUnit = pWindowLogicNode->intervalUnit;
pInterval->slidingUnit = pWindowLogicNode->slidingUnit;
- return createWindowPhysiNodeFinalize(pCxt, pChildren, &pInterval->window, pWindowLogicNode, pPhyNode);
+ int32_t code = createWindowPhysiNodeFinalize(pCxt, pChildren, &pInterval->window, pWindowLogicNode);
+ if (TSDB_CODE_SUCCESS == code) {
+ *pPhyNode = (SPhysiNode*)pInterval;
+ } else {
+ nodesDestroyNode((SNode*)pInterval);
+ }
+
+ return code;
}
static int32_t createSessionWindowPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren,
@@ -1169,7 +1170,14 @@ static int32_t createSessionWindowPhysiNode(SPhysiPlanContext* pCxt, SNodeList*
pSession->gap = pWindowLogicNode->sessionGap;
- return createWindowPhysiNodeFinalize(pCxt, pChildren, &pSession->window, pWindowLogicNode, pPhyNode);
+ int32_t code = createWindowPhysiNodeFinalize(pCxt, pChildren, &pSession->window, pWindowLogicNode);
+ if (TSDB_CODE_SUCCESS == code) {
+ *pPhyNode = (SPhysiNode*)pSession;
+ } else {
+ nodesDestroyNode((SNode*)pSession);
+ }
+
+ return code;
}
static int32_t createStateWindowPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren,
@@ -1201,12 +1209,20 @@ static int32_t createStateWindowPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pC
}
}
- if (TSDB_CODE_SUCCESS != code) {
- nodesDestroyNode((SNode*)pState);
- return code;
+ if (TSDB_CODE_SUCCESS == code) {
+ code = createWindowPhysiNodeFinalize(pCxt, pChildren, &pState->window, pWindowLogicNode);
}
- return createWindowPhysiNodeFinalize(pCxt, pChildren, &pState->window, pWindowLogicNode, pPhyNode);
+ if (TSDB_CODE_SUCCESS == code) {
+ *pPhyNode = (SPhysiNode*)pState;
+ } else {
+ nodesDestroyNode((SNode*)pState);
+ }
+
+ nodesDestroyList(pPrecalcExprs);
+ nodesDestroyNode(pStateKey);
+
+ return code;
}
static int32_t createWindowPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren, SWindowLogicNode* pWindowLogicNode,
diff --git a/source/libs/planner/src/planSpliter.c b/source/libs/planner/src/planSpliter.c
index ae0ccb1c51..4cbbf12385 100644
--- a/source/libs/planner/src/planSpliter.c
+++ b/source/libs/planner/src/planSpliter.c
@@ -867,10 +867,11 @@ static int32_t stbSplSplitSortNode(SSplitContext* pCxt, SStableSplitInfo* pInfo)
if (TSDB_CODE_SUCCESS == code) {
code = stbSplCreateMergeNode(pCxt, pInfo->pSubplan, pInfo->pSplitNode, pMergeKeys, pPartSort, groupSort);
}
- if (TSDB_CODE_SUCCESS == code && groupSort) {
- stbSplSetScanPartSort(pPartSort);
- }
if (TSDB_CODE_SUCCESS == code) {
+ nodesDestroyNode((SNode*)pInfo->pSplitNode);
+ if (groupSort) {
+ stbSplSetScanPartSort(pPartSort);
+ }
code = nodesListMakeStrictAppend(&pInfo->pSubplan->pChildren,
(SNode*)splCreateScanSubplan(pCxt, pPartSort, SPLIT_FLAG_STABLE_SPLIT));
}
diff --git a/source/libs/planner/test/planStmtTest.cpp b/source/libs/planner/test/planStmtTest.cpp
index 39290b5b2f..bab38797cc 100644
--- a/source/libs/planner/test/planStmtTest.cpp
+++ b/source/libs/planner/test/planStmtTest.cpp
@@ -24,6 +24,16 @@ class PlanStmtTest : public PlannerTestBase {
return (TAOS_MULTI_BIND*)taosMemoryCalloc(nParams, sizeof(TAOS_MULTI_BIND));
}
+ void destoryBindParams(TAOS_MULTI_BIND* pParams, int32_t nParams) {
+ for (int32_t i = 0; i < nParams; ++i) {
+ TAOS_MULTI_BIND* pParam = pParams + i;
+ taosMemoryFree(pParam->buffer);
+ taosMemoryFree(pParam->length);
+ taosMemoryFree(pParam->is_null);
+ }
+ taosMemoryFree(pParams);
+ }
+
TAOS_MULTI_BIND* buildIntegerParam(TAOS_MULTI_BIND* pBindParams, int32_t index, int64_t val, int32_t type) {
TAOS_MULTI_BIND* pBindParam = initParam(pBindParams, index, type, 0);
@@ -127,8 +137,10 @@ TEST_F(PlanStmtTest, basic) {
useDb("root", "test");
prepare("SELECT * FROM t1 WHERE c1 = ?");
- bindParams(buildIntegerParam(createBindParams(1), 0, 10, TSDB_DATA_TYPE_INT), 0);
+ TAOS_MULTI_BIND* pBindParams = buildIntegerParam(createBindParams(1), 0, 10, TSDB_DATA_TYPE_INT);
+ bindParams(pBindParams, 0);
exec();
+ destoryBindParams(pBindParams, 1);
{
prepare("SELECT * FROM t1 WHERE c1 = ? AND c2 = ?");
@@ -137,7 +149,7 @@ TEST_F(PlanStmtTest, basic) {
buildStringParam(pBindParams, 1, "abc", TSDB_DATA_TYPE_VARCHAR, strlen("abc"));
bindParams(pBindParams, -1);
exec();
- taosMemoryFreeClear(pBindParams);
+ destoryBindParams(pBindParams, 2);
}
{
@@ -147,7 +159,7 @@ TEST_F(PlanStmtTest, basic) {
buildIntegerParam(pBindParams, 1, 20, TSDB_DATA_TYPE_INT);
bindParams(pBindParams, -1);
exec();
- taosMemoryFreeClear(pBindParams);
+ destoryBindParams(pBindParams, 2);
}
}
@@ -155,12 +167,16 @@ TEST_F(PlanStmtTest, multiExec) {
useDb("root", "test");
prepare("SELECT * FROM t1 WHERE c1 = ?");
- bindParams(buildIntegerParam(createBindParams(1), 0, 10, TSDB_DATA_TYPE_INT), 0);
+ TAOS_MULTI_BIND* pBindParams = buildIntegerParam(createBindParams(1), 0, 10, TSDB_DATA_TYPE_INT);
+ bindParams(pBindParams, 0);
exec();
- bindParams(buildIntegerParam(createBindParams(1), 0, 20, TSDB_DATA_TYPE_INT), 0);
+ destoryBindParams(pBindParams, 1);
+ pBindParams = buildIntegerParam(createBindParams(1), 0, 20, TSDB_DATA_TYPE_INT);
+ bindParams(pBindParams, 0);
exec();
- bindParams(buildIntegerParam(createBindParams(1), 0, 30, TSDB_DATA_TYPE_INT), 0);
+ destoryBindParams(pBindParams, 1);
+ pBindParams = buildIntegerParam(createBindParams(1), 0, 30, TSDB_DATA_TYPE_INT);
+ bindParams(pBindParams, 0);
exec();
+ destoryBindParams(pBindParams, 1);
}
-
-TEST_F(PlanStmtTest, allDataType) { useDb("root", "test"); }
diff --git a/source/libs/planner/test/planTestUtil.cpp b/source/libs/planner/test/planTestUtil.cpp
index 0f90b54adb..5fc8b3cf30 100644
--- a/source/libs/planner/test/planTestUtil.cpp
+++ b/source/libs/planner/test/planTestUtil.cpp
@@ -126,9 +126,9 @@ class PlannerTestBaseImpl {
reset();
tsQueryPolicy = queryPolicy;
try {
- SQuery* pQuery = nullptr;
- doParseSql(sql, &pQuery);
- unique_ptr query(pQuery, qDestroyQuery);
+ unique_ptr query((SQuery**)taosMemoryCalloc(1, sizeof(SQuery*)), _destroyQuery);
+ doParseSql(sql, query.get());
+ SQuery* pQuery = *(query.get());
SPlanContext cxt = {0};
setPlanContext(pQuery, &cxt);
@@ -199,6 +199,8 @@ class PlannerTestBaseImpl {
SLogicSubplan* pLogicSubplan = nullptr;
doCreateLogicPlan(&cxt, &pLogicSubplan);
+ unique_ptr logicSubplan(pLogicSubplan,
+ (void (*)(SLogicSubplan*))nodesDestroyNode);
doOptimizeLogicPlan(&cxt, pLogicSubplan);
@@ -206,9 +208,12 @@ class PlannerTestBaseImpl {
SQueryLogicPlan* pLogicPlan = nullptr;
doScaleOutLogicPlan(&cxt, pLogicSubplan, &pLogicPlan);
+ unique_ptr logicPlan(pLogicPlan,
+ (void (*)(SQueryLogicPlan*))nodesDestroyNode);
SQueryPlan* pPlan = nullptr;
doCreatePhysiPlan(&cxt, pLogicPlan, &pPlan);
+ unique_ptr plan(pPlan, (void (*)(SQueryPlan*))nodesDestroyNode);
dump(g_dumpModule);
} catch (...) {
@@ -249,6 +254,14 @@ class PlannerTestBaseImpl {
vector physiSubplans_;
};
+ static void _destroyQuery(SQuery** pQuery) {
+ if (nullptr == pQuery) {
+ return;
+ }
+ qDestroyQuery(*pQuery);
+ taosMemoryFree(pQuery);
+ }
+
void reset() {
stmtEnv_.sql_.clear();
stmtEnv_.msgBuf_.fill(0);
@@ -400,20 +413,30 @@ class PlannerTestBaseImpl {
pCxt->queryId = 1;
pCxt->pUser = caseEnv_.user_.c_str();
if (QUERY_NODE_CREATE_TOPIC_STMT == nodeType(pQuery->pRoot)) {
- pCxt->pAstRoot = ((SCreateTopicStmt*)pQuery->pRoot)->pQuery;
+ SCreateTopicStmt* pStmt = (SCreateTopicStmt*)pQuery->pRoot;
+ pCxt->pAstRoot = pStmt->pQuery;
+ pStmt->pQuery = nullptr;
+ nodesDestroyNode(pQuery->pRoot);
+ pQuery->pRoot = pCxt->pAstRoot;
pCxt->topicQuery = true;
} else if (QUERY_NODE_CREATE_INDEX_STMT == nodeType(pQuery->pRoot)) {
SMCreateSmaReq req = {0};
tDeserializeSMCreateSmaReq(pQuery->pCmdMsg->pMsg, pQuery->pCmdMsg->msgLen, &req);
g_mockCatalogService->createSmaIndex(&req);
nodesStringToNode(req.ast, &pCxt->pAstRoot);
+ tFreeSMCreateSmaReq(&req);
+ nodesDestroyNode(pQuery->pRoot);
+ pQuery->pRoot = pCxt->pAstRoot;
pCxt->streamQuery = true;
} else if (QUERY_NODE_CREATE_STREAM_STMT == nodeType(pQuery->pRoot)) {
SCreateStreamStmt* pStmt = (SCreateStreamStmt*)pQuery->pRoot;
pCxt->pAstRoot = pStmt->pQuery;
+ pStmt->pQuery = nullptr;
pCxt->streamQuery = true;
pCxt->triggerType = pStmt->pOptions->triggerType;
pCxt->watermark = (NULL != pStmt->pOptions->pWatermark ? ((SValueNode*)pStmt->pOptions->pWatermark)->datum.i : 0);
+ nodesDestroyNode(pQuery->pRoot);
+ pQuery->pRoot = pCxt->pAstRoot;
} else {
pCxt->pAstRoot = pQuery->pRoot;
}
diff --git a/source/libs/qcom/src/queryUtil.c b/source/libs/qcom/src/queryUtil.c
index 6b1476fe46..d8fda57791 100644
--- a/source/libs/qcom/src/queryUtil.c
+++ b/source/libs/qcom/src/queryUtil.c
@@ -148,11 +148,12 @@ void destroySendMsgInfo(SMsgSendInfo* pMsgBody) {
taosMemoryFreeClear(pMsgBody);
}
-int32_t asyncSendMsgToServerExt(void* pTransporter, SEpSet* epSet, int64_t* pTransporterId, const SMsgSendInfo* pInfo,
+int32_t asyncSendMsgToServerExt(void* pTransporter, SEpSet* epSet, int64_t* pTransporterId, SMsgSendInfo* pInfo,
bool persistHandle, void* rpcCtx) {
char* pMsg = rpcMallocCont(pInfo->msgInfo.len);
if (NULL == pMsg) {
qError("0x%" PRIx64 " msg:%s malloc failed", pInfo->requestId, TMSG_INFO(pInfo->msgType));
+ destroySendMsgInfo(pInfo);
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
return terrno;
}
@@ -167,13 +168,15 @@ int32_t asyncSendMsgToServerExt(void* pTransporter, SEpSet* epSet, int64_t* pTra
.info.persistHandle = persistHandle,
.code = 0
};
- assert(pInfo->fp != NULL);
TRACE_SET_ROOTID(&rpcMsg.info.traceId, pInfo->requestId);
- rpcSendRequestWithCtx(pTransporter, epSet, &rpcMsg, pTransporterId, rpcCtx);
- return TSDB_CODE_SUCCESS;
+ int code = rpcSendRequestWithCtx(pTransporter, epSet, &rpcMsg, pTransporterId, rpcCtx);
+ if (code) {
+ destroySendMsgInfo(pInfo);
+ }
+ return code;
}
-int32_t asyncSendMsgToServer(void* pTransporter, SEpSet* epSet, int64_t* pTransporterId, const SMsgSendInfo* pInfo) {
+int32_t asyncSendMsgToServer(void* pTransporter, SEpSet* epSet, int64_t* pTransporterId, SMsgSendInfo* pInfo) {
return asyncSendMsgToServerExt(pTransporter, epSet, pTransporterId, pInfo, false, NULL);
}
diff --git a/source/libs/scalar/src/sclfunc.c b/source/libs/scalar/src/sclfunc.c
index b754c52bbd..050f77bb21 100644
--- a/source/libs/scalar/src/sclfunc.c
+++ b/source/libs/scalar/src/sclfunc.c
@@ -2750,6 +2750,7 @@ static bool getHistogramBinDesc(SHistoFuncBin** bins, int32_t* binNum, char* bin
(*bins)[i].count = 0;
}
+ cJSON_Delete(binDesc);
taosMemoryFree(intervals);
return true;
}
diff --git a/source/libs/scalar/src/sclvector.c b/source/libs/scalar/src/sclvector.c
index 76de4da4fd..254c99f05d 100644
--- a/source/libs/scalar/src/sclvector.c
+++ b/source/libs/scalar/src/sclvector.c
@@ -1195,7 +1195,7 @@ static void vectorMathTsSubHelper(SColumnInfoData* pLeftCol, SColumnInfoData* pR
colDataAppendNULL(pOutputCol, i);
continue; // TODO set null or ignore
}
- *output = taosTimeSub(getVectorBigintValueFnLeft(pLeftCol->pData, i), getVectorBigintValueFnRight(pRightCol->pData, 0),
+ *output = taosTimeAdd(getVectorBigintValueFnLeft(pLeftCol->pData, i), -getVectorBigintValueFnRight(pRightCol->pData, 0),
pRightCol->info.scale, pRightCol->info.precision);
}
diff --git a/source/libs/scheduler/src/schRemote.c b/source/libs/scheduler/src/schRemote.c
index 5452ca31a5..b794cb91f5 100644
--- a/source/libs/scheduler/src/schRemote.c
+++ b/source/libs/scheduler/src/schRemote.c
@@ -509,7 +509,7 @@ int32_t schGenerateCallBackInfo(SSchJob *pJob, SSchTask *pTask, void *msg, uint3
SMsgSendInfo *msgSendInfo = taosMemoryCalloc(1, sizeof(SMsgSendInfo));
if (NULL == msgSendInfo) {
SCH_TASK_ELOG("calloc %d failed", (int32_t)sizeof(SMsgSendInfo));
- SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
+ SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY);
}
msgSendInfo->paramFreeFp = taosMemoryFree;
@@ -535,8 +535,12 @@ int32_t schGenerateCallBackInfo(SSchJob *pJob, SSchTask *pTask, void *msg, uint3
_return:
- destroySendMsgInfo(msgSendInfo);
+ if (msgSendInfo) {
+ destroySendMsgInfo(msgSendInfo);
+ }
+ taosMemoryFree(msg);
+
SCH_RET(code);
}
@@ -843,6 +847,7 @@ int32_t schAsyncSendMsg(SSchJob *pJob, SSchTask *pTask, SSchTrans *trans, SQuery
int64_t transporterId = 0;
code = asyncSendMsgToServerExt(trans->pTrans, epSet, &transporterId, pMsgSendInfo, persistHandle, ctx);
+ pMsgSendInfo = NULL;
if (code) {
SCH_ERR_JRET(code);
}
@@ -919,7 +924,9 @@ int32_t schBuildAndSendHbMsg(SQueryNodeEpId *nodeEpId, SArray *taskAction) {
addr.epSet.numOfEps = 1;
memcpy(&addr.epSet.eps[0], &nodeEpId->ep, sizeof(nodeEpId->ep));
- SCH_ERR_JRET(schAsyncSendMsg(NULL, NULL, &trans, &addr, msgType, msg, msgSize, true, &rpcCtx));
+ code = schAsyncSendMsg(NULL, NULL, &trans, &addr, msgType, msg, msgSize, true, &rpcCtx);
+ msg = NULL;
+ SCH_ERR_JRET(code);
return TSDB_CODE_SUCCESS;
@@ -1087,9 +1094,10 @@ int32_t schBuildAndSendMsg(SSchJob *pJob, SSchTask *pTask, SQueryNodeAddr *addr,
}
SSchTrans trans = {.pTrans = pJob->conn.pTrans, .pHandle = SCH_GET_TASK_HANDLE(pTask)};
- SCH_ERR_JRET(
- schAsyncSendMsg(pJob, pTask, &trans, addr, msgType, msg, msgSize, persistHandle, (rpcCtx.args ? &rpcCtx : NULL)));
-
+ schAsyncSendMsg(pJob, pTask, &trans, addr, msgType, msg, msgSize, persistHandle, (rpcCtx.args ? &rpcCtx : NULL));
+ msg = NULL;
+ SCH_ERR_JRET(code);
+
if (msgType == TDMT_SCH_QUERY || msgType == TDMT_SCH_MERGE_QUERY) {
SCH_ERR_RET(schAppendTaskExecNode(pJob, pTask, addr, pTask->execId));
}
diff --git a/source/libs/scheduler/src/schTask.c b/source/libs/scheduler/src/schTask.c
index 282e81bb5d..c40e56ab6f 100644
--- a/source/libs/scheduler/src/schTask.c
+++ b/source/libs/scheduler/src/schTask.c
@@ -102,14 +102,14 @@ int32_t schRecordTaskSucceedNode(SSchJob *pJob, SSchTask *pTask) {
}
int32_t schAppendTaskExecNode(SSchJob *pJob, SSchTask *pTask, SQueryNodeAddr *addr, int32_t execId) {
- SSchNodeInfo nodeInfo = {.addr = *addr, .handle = NULL};
+ SSchNodeInfo nodeInfo = {.addr = *addr, .handle = SCH_GET_TASK_HANDLE(pTask)};
if (taosHashPut(pTask->execNodes, &execId, sizeof(execId), &nodeInfo, sizeof(nodeInfo))) {
SCH_TASK_ELOG("taosHashPut nodeInfo to execNodes failed, errno:%d", errno);
SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
}
- SCH_TASK_DLOG("task execNode added, execId:%d", execId);
+ SCH_TASK_DLOG("task execNode added, execId:%d, handle:%p", execId, nodeInfo.handle);
return TSDB_CODE_SUCCESS;
}
@@ -752,12 +752,18 @@ void schDropTaskOnExecNode(SSchJob *pJob, SSchTask *pTask) {
return;
}
+ int32_t i = 0;
SSchNodeInfo *nodeInfo = taosHashIterate(pTask->execNodes, NULL);
while (nodeInfo) {
- SCH_SET_TASK_HANDLE(pTask, nodeInfo->handle);
-
- schBuildAndSendMsg(pJob, pTask, &nodeInfo->addr, TDMT_SCH_DROP_TASK);
+ if (nodeInfo->handle) {
+ SCH_SET_TASK_HANDLE(pTask, nodeInfo->handle);
+ schBuildAndSendMsg(pJob, pTask, &nodeInfo->addr, TDMT_SCH_DROP_TASK);
+ SCH_TASK_DLOG("start to drop task's %dth execNode", i);
+ } else {
+ SCH_TASK_DLOG("no need to drop task %dth execNode", i);
+ }
+ ++i;
nodeInfo = taosHashIterate(pTask->execNodes, nodeInfo);
}
diff --git a/source/libs/stream/src/streamUpdate.c b/source/libs/stream/src/streamUpdate.c
index d0fb9c22e1..b7b635e28f 100644
--- a/source/libs/stream/src/streamUpdate.c
+++ b/source/libs/stream/src/streamUpdate.c
@@ -15,9 +15,12 @@
#include "tstreamUpdate.h"
#include "ttime.h"
+#include "query.h"
#define DEFAULT_FALSE_POSITIVE 0.01
-#define DEFAULT_BUCKET_SIZE 131072
+#define DEFAULT_BUCKET_SIZE 1310720
+#define DEFAULT_MAP_CAPACITY 1310720
+#define DEFAULT_MAP_SIZE (DEFAULT_MAP_CAPACITY * 10)
#define ROWS_PER_MILLISECOND 1
#define MAX_NUM_SCALABLE_BF 100000
#define MIN_NUM_SCALABLE_BF 10
@@ -120,6 +123,8 @@ SUpdateInfo *updateInfoInit(int64_t interval, int32_t precision, int64_t waterma
}
pInfo->numBuckets = DEFAULT_BUCKET_SIZE;
pInfo->pCloseWinSBF = NULL;
+ _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
+ pInfo->pMap = taosHashInit(DEFAULT_MAP_CAPACITY, hashFn, true, HASH_NO_LOCK);
return pInfo;
}
@@ -149,8 +154,9 @@ static SScalableBf *getSBf(SUpdateInfo *pInfo, TSKEY ts) {
return res;
}
-bool updateInfoIsUpdated(SUpdateInfo *pInfo, tb_uid_t tableId, TSKEY ts) {
+bool updateInfoIsUpdated(SUpdateInfo *pInfo, uint64_t tableId, TSKEY ts) {
int32_t res = TSDB_CODE_FAILED;
+ TSKEY* pMapMaxTs = taosHashGet(pInfo->pMap, &tableId, sizeof(uint64_t));
uint64_t index = ((uint64_t)tableId) % pInfo->numBuckets;
TSKEY maxTs = *(TSKEY *)taosArrayGet(pInfo->pTsBuckets, index);
if (ts < maxTs - pInfo->watermark) {
@@ -167,7 +173,13 @@ bool updateInfoIsUpdated(SUpdateInfo *pInfo, tb_uid_t tableId, TSKEY ts) {
res = tScalableBfPut(pSBf, &ts, sizeof(TSKEY));
}
- if (maxTs < ts) {
+ int32_t size = taosHashGetSize(pInfo->pMap);
+ if ( (!pMapMaxTs && size < DEFAULT_MAP_SIZE) || (pMapMaxTs && *pMapMaxTs < ts)) {
+ taosHashPut(pInfo->pMap, &tableId, sizeof(uint64_t), &ts, sizeof(TSKEY));
+ return false;
+ }
+
+ if ( !pMapMaxTs && maxTs < ts ) {
taosArraySet(pInfo->pTsBuckets, index, &ts);
return false;
}
@@ -177,7 +189,7 @@ bool updateInfoIsUpdated(SUpdateInfo *pInfo, tb_uid_t tableId, TSKEY ts) {
} else if (res == TSDB_CODE_SUCCESS) {
return false;
}
-
+ qDebug("===stream===bucket:%d, tableId:%" PRIu64 ", maxTs:" PRIu64 ", maxMapTs:" PRIu64 ", ts:%" PRIu64, index, tableId, maxTs, *pMapMaxTs, ts);
// check from tsdb api
return true;
}
diff --git a/source/libs/sync/inc/syncInt.h b/source/libs/sync/inc/syncInt.h
index 3a30cf801e..64f66e390a 100644
--- a/source/libs/sync/inc/syncInt.h
+++ b/source/libs/sync/inc/syncInt.h
@@ -257,6 +257,22 @@ int32_t syncNodeLeaderTransfer(SSyncNode* pSyncNode);
int32_t syncNodeLeaderTransferTo(SSyncNode* pSyncNode, SNodeInfo newLeader);
int32_t syncDoLeaderTransfer(SSyncNode* ths, SRpcMsg* pRpcMsg, SSyncRaftEntry* pEntry);
+// trace log
+void syncLogSendRequestVote(SSyncNode* pSyncNode, const SyncRequestVote* pMsg, const char* s);
+void syncLogRecvRequestVote(SSyncNode* pSyncNode, const SyncRequestVote* pMsg, const char* s);
+
+void syncLogSendRequestVoteReply(SSyncNode* pSyncNode, const SyncRequestVoteReply* pMsg, const char* s);
+void syncLogRecvRequestVoteReply(SSyncNode* pSyncNode, const SyncRequestVoteReply* pMsg, const char* s);
+
+void syncLogSendAppendEntries(SSyncNode* pSyncNode, const SyncAppendEntries* pMsg, const char* s);
+void syncLogRecvAppendEntries(SSyncNode* pSyncNode, const SyncAppendEntries* pMsg, const char* s);
+
+void syncLogSendAppendEntriesBatch(SSyncNode* pSyncNode, const SyncAppendEntriesBatch* pMsg, const char* s);
+void syncLogRecvAppendEntriesBatch(SSyncNode* pSyncNode, const SyncAppendEntriesBatch* pMsg, const char* s);
+
+void syncLogSendAppendEntriesReply(SSyncNode* pSyncNode, const SyncAppendEntriesReply* pMsg, const char* s);
+void syncLogRecvAppendEntriesReply(SSyncNode* pSyncNode, const SyncAppendEntriesReply* pMsg, const char* s);
+
// for debug --------------
void syncNodePrint(SSyncNode* pObj);
void syncNodePrint2(char* s, SSyncNode* pObj);
diff --git a/source/libs/sync/src/syncAppendEntries.c b/source/libs/sync/src/syncAppendEntries.c
index 9678b335fd..4295abeaa1 100644
--- a/source/libs/sync/src/syncAppendEntries.c
+++ b/source/libs/sync/src/syncAppendEntries.c
@@ -92,13 +92,10 @@
int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) {
int32_t ret = 0;
- // print log
- syncAppendEntriesLog2("==syncNodeOnAppendEntriesCb==", pMsg);
-
// if already drop replica, do not process
if (!syncNodeInRaftGroup(ths, &(pMsg->srcId)) && !ths->pRaftCfg->isStandBy) {
- syncNodeEventLog(ths, "recv sync-append-entries, maybe replica already dropped");
- return ret;
+ syncLogRecvAppendEntries(ths, pMsg, "maybe replica already dropped");
+ return -1;
}
// maybe update term
@@ -114,17 +111,12 @@ int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) {
}
ASSERT(pMsg->dataLen >= 0);
- do {
- // return to follower state
- if (pMsg->term == ths->pRaftStore->currentTerm && ths->state == TAOS_SYNC_STATE_CANDIDATE) {
- syncNodeEventLog(ths, "recv sync-append-entries, candidate to follower");
-
- syncNodeBecomeFollower(ths, "from candidate by append entries");
-
- // ret or reply?
- return ret;
- }
- } while (0);
+ // return to follower state
+ if (pMsg->term == ths->pRaftStore->currentTerm && ths->state == TAOS_SYNC_STATE_CANDIDATE) {
+ syncLogRecvAppendEntries(ths, pMsg, "candidate to follower");
+ syncNodeBecomeFollower(ths, "from candidate by append entries");
+ return -1; // ret or reply?
+ }
SyncTerm localPreLogTerm = 0;
if (pMsg->prevLogIndex >= SYNC_INDEX_BEGIN && pMsg->prevLogIndex <= ths->pLogStore->getLastIndex(ths->pLogStore)) {
@@ -148,13 +140,7 @@ int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) {
// reject request
if ((pMsg->term < ths->pRaftStore->currentTerm) ||
((pMsg->term == ths->pRaftStore->currentTerm) && (ths->state == TAOS_SYNC_STATE_FOLLOWER) && !logOK)) {
- do {
- char logBuf[128];
- snprintf(logBuf, sizeof(logBuf),
- "recv sync-append-entries, reject, pre-index:%" PRId64 ", pre-term:%" PRIu64 ", datalen:%d",
- pMsg->prevLogIndex, pMsg->prevLogTerm, pMsg->dataLen);
- syncNodeEventLog(ths, logBuf);
- } while (0);
+ syncLogRecvAppendEntries(ths, pMsg, "reject");
SyncAppendEntriesReply* pReply = syncAppendEntriesReplyBuild(ths->vgId);
pReply->srcId = ths->myRaftId;
@@ -164,14 +150,7 @@ int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) {
pReply->matchIndex = SYNC_INDEX_INVALID;
// msg event log
- do {
- char host[128];
- uint16_t port;
- syncUtilU642Addr(pReply->destId.addr, host, sizeof(host), &port);
- sDebug("vgId:%d, send sync-append-entries-reply to %s:%d, {term:%" PRIu64 ", pterm:%" PRIu64
- ", success:%d, match-index:%" PRId64 "}",
- ths->vgId, host, port, pReply->term, pReply->privateTerm, pReply->success, pReply->matchIndex);
- } while (0);
+ syncLogSendAppendEntriesReply(ths, pReply, "");
SRpcMsg rpcMsg;
syncAppendEntriesReply2RpcMsg(pReply, &rpcMsg);
@@ -192,13 +171,7 @@ int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) {
// has entries in SyncAppendEntries msg
bool hasAppendEntries = pMsg->dataLen > 0;
- do {
- char logBuf[128];
- snprintf(logBuf, sizeof(logBuf),
- "recv sync-append-entries, accept, pre-index:%" PRId64 ", pre-term:%" PRIu64 ", datalen:%d",
- pMsg->prevLogIndex, pMsg->prevLogTerm, pMsg->dataLen);
- syncNodeEventLog(ths, logBuf);
- } while (0);
+ syncLogRecvAppendEntries(ths, pMsg, "accept");
if (hasExtraEntries && hasAppendEntries) {
// not conflict by default
@@ -348,14 +321,7 @@ int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) {
}
// msg event log
- do {
- char host[128];
- uint16_t port;
- syncUtilU642Addr(pReply->destId.addr, host, sizeof(host), &port);
- sDebug("vgId:%d, send sync-append-entries-reply to %s:%d, {term:%" PRIu64 ", pterm:%" PRIu64
- ", success:%d, match-index:%" PRId64 "}",
- ths->vgId, host, port, pReply->term, pReply->privateTerm, pReply->success, pReply->matchIndex);
- } while (0);
+ syncLogSendAppendEntriesReply(ths, pReply, "");
SRpcMsg rpcMsg;
syncAppendEntriesReply2RpcMsg(pReply, &rpcMsg);
@@ -507,13 +473,13 @@ static bool syncNodeOnAppendEntriesBatchLogOK(SSyncNode* pSyncNode, SyncAppendEn
SyncIndex myLastIndex = syncNodeGetLastIndex(pSyncNode);
if (pMsg->prevLogIndex > myLastIndex) {
- sDebug("vgId:%d sync log not ok, preindex:%" PRId64, pSyncNode->vgId, pMsg->prevLogIndex);
+ sDebug("vgId:%d, sync log not ok, preindex:%" PRId64, pSyncNode->vgId, pMsg->prevLogIndex);
return false;
}
SyncTerm myPreLogTerm = syncNodeGetPreTerm(pSyncNode, pMsg->prevLogIndex + 1);
if (myPreLogTerm == SYNC_TERM_INVALID) {
- sDebug("vgId:%d sync log not ok2, preindex:%" PRId64, pSyncNode->vgId, pMsg->prevLogIndex);
+ sDebug("vgId:%d, sync log not ok2, preindex:%" PRId64, pSyncNode->vgId, pMsg->prevLogIndex);
return false;
}
@@ -521,7 +487,7 @@ static bool syncNodeOnAppendEntriesBatchLogOK(SSyncNode* pSyncNode, SyncAppendEn
return true;
}
- sDebug("vgId:%d sync log not ok3, preindex:%" PRId64, pSyncNode->vgId, pMsg->prevLogIndex);
+ sDebug("vgId:%d, sync log not ok3, preindex:%" PRId64, pSyncNode->vgId, pMsg->prevLogIndex);
return false;
}
@@ -534,13 +500,13 @@ static bool syncNodeOnAppendEntriesLogOK(SSyncNode* pSyncNode, SyncAppendEntries
SyncIndex myLastIndex = syncNodeGetLastIndex(pSyncNode);
if (pMsg->prevLogIndex > myLastIndex) {
- sDebug("vgId:%d sync log not ok, preindex:%" PRId64, pSyncNode->vgId, pMsg->prevLogIndex);
+ sDebug("vgId:%d, sync log not ok, preindex:%" PRId64, pSyncNode->vgId, pMsg->prevLogIndex);
return false;
}
SyncTerm myPreLogTerm = syncNodeGetPreTerm(pSyncNode, pMsg->prevLogIndex + 1);
if (myPreLogTerm == SYNC_TERM_INVALID) {
- sDebug("vgId:%d sync log not ok2, preindex:%" PRId64, pSyncNode->vgId, pMsg->prevLogIndex);
+ sDebug("vgId:%d, sync log not ok2, preindex:%" PRId64, pSyncNode->vgId, pMsg->prevLogIndex);
return false;
}
@@ -548,7 +514,7 @@ static bool syncNodeOnAppendEntriesLogOK(SSyncNode* pSyncNode, SyncAppendEntries
return true;
}
- sDebug("vgId:%d sync log not ok3, preindex:%" PRId64, pSyncNode->vgId, pMsg->prevLogIndex);
+ sDebug("vgId:%d, sync log not ok3, preindex:%" PRId64, pSyncNode->vgId, pMsg->prevLogIndex);
return false;
}
@@ -558,8 +524,8 @@ int32_t syncNodeOnAppendEntriesSnapshot2Cb(SSyncNode* ths, SyncAppendEntriesBatc
// if already drop replica, do not process
if (!syncNodeInRaftGroup(ths, &(pMsg->srcId)) && !ths->pRaftCfg->isStandBy) {
- syncNodeEventLog(ths, "recv sync-append-entries-batch, maybe replica already dropped");
- return ret;
+ syncLogRecvAppendEntriesBatch(ths, pMsg, "maybe replica already dropped");
+ return -1;
}
// maybe update term
@@ -582,15 +548,13 @@ int32_t syncNodeOnAppendEntriesSnapshot2Cb(SSyncNode* ths, SyncAppendEntriesBatc
do {
bool condition = pMsg->term == ths->pRaftStore->currentTerm && ths->state == TAOS_SYNC_STATE_CANDIDATE;
if (condition) {
- syncNodeEventLog(ths, "recv sync-append-entries-batch, candidate to follower");
-
+ syncLogRecvAppendEntriesBatch(ths, pMsg, "candidate to follower");
syncNodeBecomeFollower(ths, "from candidate by append entries");
- // do not reply?
- return ret;
+ return 0; // do not reply?
}
} while (0);
- // fake match2
+ // fake match
//
// condition1:
// preIndex <= my commit index
@@ -602,14 +566,7 @@ int32_t syncNodeOnAppendEntriesSnapshot2Cb(SSyncNode* ths, SyncAppendEntriesBatc
bool condition = (pMsg->term == ths->pRaftStore->currentTerm) && (ths->state == TAOS_SYNC_STATE_FOLLOWER) &&
(pMsg->prevLogIndex <= ths->commitIndex);
if (condition) {
- do {
- char logBuf[128];
- snprintf(logBuf, sizeof(logBuf),
- "recv sync-append-entries-batch, fake match2, {pre-index:%" PRId64 ", pre-term:%" PRIu64
- ", datalen:%d, datacount:%d}",
- pMsg->prevLogIndex, pMsg->prevLogTerm, pMsg->dataLen, pMsg->dataCount);
- syncNodeEventLog(ths, logBuf);
- } while (0);
+ syncLogRecvAppendEntriesBatch(ths, pMsg, "fake match");
SyncIndex matchIndex = ths->commitIndex;
bool hasAppendEntries = pMsg->dataLen > 0;
@@ -662,14 +619,7 @@ int32_t syncNodeOnAppendEntriesSnapshot2Cb(SSyncNode* ths, SyncAppendEntriesBatc
pReply->matchIndex = matchIndex;
// msg event log
- do {
- char host[128];
- uint16_t port;
- syncUtilU642Addr(pReply->destId.addr, host, sizeof(host), &port);
- sDebug("vgId:%d, send sync-append-entries-reply to %s:%d, {term:%" PRIu64 ", pterm:%" PRIu64
- ", success:%d, match-index:%" PRId64 "}",
- ths->vgId, host, port, pReply->term, pReply->privateTerm, pReply->success, pReply->matchIndex);
- } while (0);
+ syncLogSendAppendEntriesReply(ths, pReply, "");
// send response
SRpcMsg rpcMsg;
@@ -702,14 +652,7 @@ int32_t syncNodeOnAppendEntriesSnapshot2Cb(SSyncNode* ths, SyncAppendEntriesBatc
bool condition = condition1 || condition2;
if (condition) {
- do {
- char logBuf[128];
- snprintf(logBuf, sizeof(logBuf),
- "recv sync-append-entries-batch, not match, {pre-index:%" PRId64 ", pre-term:%" PRIu64
- ", datalen:%d, datacount:%d}",
- pMsg->prevLogIndex, pMsg->prevLogTerm, pMsg->dataLen, pMsg->dataCount);
- syncNodeEventLog(ths, logBuf);
- } while (0);
+ syncLogRecvAppendEntriesBatch(ths, pMsg, "not match");
// maybe update commit index by snapshot
syncNodeMaybeUpdateCommitBySnapshot(ths);
@@ -724,14 +667,7 @@ int32_t syncNodeOnAppendEntriesSnapshot2Cb(SSyncNode* ths, SyncAppendEntriesBatc
pReply->matchIndex = ths->commitIndex;
// msg event log
- do {
- char host[128];
- uint16_t port;
- syncUtilU642Addr(pReply->destId.addr, host, sizeof(host), &port);
- sDebug("vgId:%d, send sync-append-entries-reply to %s:%d, {term:%" PRIu64 ", pterm:%" PRIu64
- ", success:%d, match-index:%" PRId64 "}",
- ths->vgId, host, port, pReply->term, pReply->privateTerm, pReply->success, pReply->matchIndex);
- } while (0);
+ syncLogSendAppendEntriesReply(ths, pReply, "");
// send response
SRpcMsg rpcMsg;
@@ -762,14 +698,7 @@ int32_t syncNodeOnAppendEntriesSnapshot2Cb(SSyncNode* ths, SyncAppendEntriesBatc
bool hasAppendEntries = pMsg->dataLen > 0;
SOffsetAndContLen* metaTableArr = syncAppendEntriesBatchMetaTableArray(pMsg);
- do {
- char logBuf[128];
- snprintf(logBuf, sizeof(logBuf),
- "recv sync-append-entries-batch, match, {pre-index:%" PRId64 ", pre-term:%" PRIu64
- ", datalen:%d, datacount:%d}",
- pMsg->prevLogIndex, pMsg->prevLogTerm, pMsg->dataLen, pMsg->dataCount);
- syncNodeEventLog(ths, logBuf);
- } while (0);
+ syncLogRecvAppendEntriesBatch(ths, pMsg, "really match");
if (hasExtraEntries) {
// make log same, rollback deleted entries
@@ -808,14 +737,7 @@ int32_t syncNodeOnAppendEntriesSnapshot2Cb(SSyncNode* ths, SyncAppendEntriesBatc
pReply->matchIndex = hasAppendEntries ? pMsg->prevLogIndex + pMsg->dataCount : pMsg->prevLogIndex;
// msg event log
- do {
- char host[128];
- uint16_t port;
- syncUtilU642Addr(pReply->destId.addr, host, sizeof(host), &port);
- sDebug("vgId:%d, send sync-append-entries-reply to %s:%d, {term:%" PRIu64 ", pterm:%" PRIu64
- ", success:%d, match-index:%" PRId64 "}",
- ths->vgId, host, port, pReply->term, pReply->privateTerm, pReply->success, pReply->matchIndex);
- } while (0);
+ syncLogSendAppendEntriesReply(ths, pReply, "");
// send response
SRpcMsg rpcMsg;
@@ -866,13 +788,10 @@ int32_t syncNodeOnAppendEntriesSnapshotCb(SSyncNode* ths, SyncAppendEntries* pMs
int32_t ret = 0;
int32_t code = 0;
- // print log
- syncAppendEntriesLog2("==syncNodeOnAppendEntriesSnapshotCb==", pMsg);
-
// if already drop replica, do not process
if (!syncNodeInRaftGroup(ths, &(pMsg->srcId)) && !ths->pRaftCfg->isStandBy) {
- syncNodeEventLog(ths, "recv sync-append-entries, maybe replica already dropped");
- return ret;
+ syncLogRecvAppendEntries(ths, pMsg, "maybe replica already dropped");
+ return -1;
}
// maybe update term
@@ -895,11 +814,9 @@ int32_t syncNodeOnAppendEntriesSnapshotCb(SSyncNode* ths, SyncAppendEntries* pMs
do {
bool condition = pMsg->term == ths->pRaftStore->currentTerm && ths->state == TAOS_SYNC_STATE_CANDIDATE;
if (condition) {
- syncNodeEventLog(ths, "recv sync-append-entries, candidate to follower");
-
+ syncLogRecvAppendEntries(ths, pMsg, "candidate to follower");
syncNodeBecomeFollower(ths, "from candidate by append entries");
- // do not reply?
- return ret;
+ return 0; // do not reply?
}
} while (0);
@@ -962,7 +879,7 @@ int32_t syncNodeOnAppendEntriesSnapshotCb(SSyncNode* ths, SyncAppendEntries* pMs
} while (0);
#endif
- // fake match2
+ // fake match
//
// condition1:
// preIndex <= my commit index
@@ -975,13 +892,7 @@ int32_t syncNodeOnAppendEntriesSnapshotCb(SSyncNode* ths, SyncAppendEntries* pMs
bool condition = (pMsg->term == ths->pRaftStore->currentTerm) && (ths->state == TAOS_SYNC_STATE_FOLLOWER) &&
(pMsg->prevLogIndex <= ths->commitIndex);
if (condition) {
- do {
- char logBuf[128];
- snprintf(logBuf, sizeof(logBuf),
- "recv sync-append-entries, fake match2, pre-index:%" PRId64 ", pre-term:%" PRIu64 ", datalen:%d",
- pMsg->prevLogIndex, pMsg->prevLogTerm, pMsg->dataLen);
- syncNodeEventLog(ths, logBuf);
- } while (0);
+ syncLogRecvAppendEntries(ths, pMsg, "fake match");
SyncIndex matchIndex = ths->commitIndex;
bool hasAppendEntries = pMsg->dataLen > 0;
@@ -1027,14 +938,7 @@ int32_t syncNodeOnAppendEntriesSnapshotCb(SSyncNode* ths, SyncAppendEntries* pMs
pReply->matchIndex = matchIndex;
// msg event log
- do {
- char host[128];
- uint16_t port;
- syncUtilU642Addr(pReply->destId.addr, host, sizeof(host), &port);
- sDebug("vgId:%d, send sync-append-entries-reply to %s:%d, {term:%" PRIu64 ", pterm:%" PRIu64
- ", success:%d, match-index:%" PRId64 "}",
- ths->vgId, host, port, pReply->term, pReply->privateTerm, pReply->success, pReply->matchIndex);
- } while (0);
+ syncLogSendAppendEntriesReply(ths, pReply, "");
// send response
SRpcMsg rpcMsg;
@@ -1067,11 +971,7 @@ int32_t syncNodeOnAppendEntriesSnapshotCb(SSyncNode* ths, SyncAppendEntries* pMs
bool condition = condition1 || condition2;
if (condition) {
- char logBuf[128];
- snprintf(logBuf, sizeof(logBuf),
- "recv sync-append-entries, not match, pre-index:%" PRId64 ", pre-term:%" PRIu64 ", datalen:%d",
- pMsg->prevLogIndex, pMsg->prevLogTerm, pMsg->dataLen);
- syncNodeEventLog(ths, logBuf);
+ syncLogRecvAppendEntries(ths, pMsg, "not match");
// prepare response msg
SyncAppendEntriesReply* pReply = syncAppendEntriesReplyBuild(ths->vgId);
@@ -1083,14 +983,7 @@ int32_t syncNodeOnAppendEntriesSnapshotCb(SSyncNode* ths, SyncAppendEntries* pMs
pReply->matchIndex = SYNC_INDEX_INVALID;
// msg event log
- do {
- char host[128];
- uint16_t port;
- syncUtilU642Addr(pReply->destId.addr, host, sizeof(host), &port);
- sDebug("vgId:%d, send sync-append-entries-reply to %s:%d, {term:%" PRIu64 ", pterm:%" PRIu64
- ", success:%d, match-index:%" PRId64 "}",
- ths->vgId, host, port, pReply->term, pReply->privateTerm, pReply->success, pReply->matchIndex);
- } while (0);
+ syncLogSendAppendEntriesReply(ths, pReply, "");
// send response
SRpcMsg rpcMsg;
@@ -1120,11 +1013,7 @@ int32_t syncNodeOnAppendEntriesSnapshotCb(SSyncNode* ths, SyncAppendEntries* pMs
// has entries in SyncAppendEntries msg
bool hasAppendEntries = pMsg->dataLen > 0;
- char logBuf[128];
- snprintf(logBuf, sizeof(logBuf),
- "recv sync-append-entries, match, pre-index:%" PRId64 ", pre-term:%" PRIu64 ", datalen:%d",
- pMsg->prevLogIndex, pMsg->prevLogTerm, pMsg->dataLen);
- syncNodeEventLog(ths, logBuf);
+ syncLogRecvAppendEntries(ths, pMsg, "really match");
if (hasExtraEntries) {
// make log same, rollback deleted entries
@@ -1159,14 +1048,7 @@ int32_t syncNodeOnAppendEntriesSnapshotCb(SSyncNode* ths, SyncAppendEntries* pMs
pReply->matchIndex = hasAppendEntries ? pMsg->prevLogIndex + 1 : pMsg->prevLogIndex;
// msg event log
- do {
- char host[128];
- uint16_t port;
- syncUtilU642Addr(pReply->destId.addr, host, sizeof(host), &port);
- sDebug("vgId:%d, send sync-append-entries-reply to %s:%d, {term:%" PRIu64 ", pterm:%" PRIu64
- ", success:%d, match-index:%" PRId64 "}",
- ths->vgId, host, port, pReply->term, pReply->privateTerm, pReply->success, pReply->matchIndex);
- } while (0);
+ syncLogSendAppendEntriesReply(ths, pReply, "");
// send response
SRpcMsg rpcMsg;
diff --git a/source/libs/sync/src/syncAppendEntriesReply.c b/source/libs/sync/src/syncAppendEntriesReply.c
index 5137922522..81d050e179 100644
--- a/source/libs/sync/src/syncAppendEntriesReply.c
+++ b/source/libs/sync/src/syncAppendEntriesReply.c
@@ -40,44 +40,33 @@
int32_t syncNodeOnAppendEntriesReplyCb(SSyncNode* ths, SyncAppendEntriesReply* pMsg) {
int32_t ret = 0;
- // print log
- syncAppendEntriesReplyLog2("==syncNodeOnAppendEntriesReplyCb==", pMsg);
-
// if already drop replica, do not process
if (!syncNodeInRaftGroup(ths, &(pMsg->srcId)) && !ths->pRaftCfg->isStandBy) {
- syncNodeEventLog(ths, "recv sync-append-entries-reply, maybe replica already dropped");
- return 0;
+ syncLogRecvAppendEntriesReply(ths, pMsg, "maybe replica already dropped");
+ return -1;
}
// drop stale response
if (pMsg->term < ths->pRaftStore->currentTerm) {
- char logBuf[128];
- snprintf(logBuf, sizeof(logBuf), "recv sync-append-entries-reply, recv-term:%" PRIu64 ", drop stale response",
- pMsg->term);
- syncNodeEventLog(ths, logBuf);
+ syncLogRecvAppendEntriesReply(ths, pMsg, "drop stale response");
return 0;
}
- if (gRaftDetailLog) {
- syncNodeEventLog(ths, "recv sync-append-entries-reply, before");
- }
- syncIndexMgrLog2("==syncNodeOnAppendEntriesReplyCb== before pNextIndex", ths->pNextIndex);
- syncIndexMgrLog2("==syncNodeOnAppendEntriesReplyCb== before pMatchIndex", ths->pMatchIndex);
-
// no need this code, because if I receive reply.term, then I must have sent for that term.
// if (pMsg->term > ths->pRaftStore->currentTerm) {
// syncNodeUpdateTerm(ths, pMsg->term);
// }
if (pMsg->term > ths->pRaftStore->currentTerm) {
- char logBuf[128];
- snprintf(logBuf, sizeof(logBuf), "recv sync-append-entries-reply, error term, recv-term:%" PRIu64, pMsg->term);
- syncNodeErrorLog(ths, logBuf);
+ syncLogRecvAppendEntriesReply(ths, pMsg, "error term");
return -1;
}
ASSERT(pMsg->term == ths->pRaftStore->currentTerm);
+ SyncIndex beforeNextIndex = syncIndexMgrGetIndex(ths->pNextIndex, &(pMsg->srcId));
+ SyncIndex beforeMatchIndex = syncIndexMgrGetIndex(ths->pMatchIndex, &(pMsg->srcId));
+
if (pMsg->success) {
// nextIndex' = [nextIndex EXCEPT ![i][j] = m.mmatchIndex + 1]
syncIndexMgrSetIndex(ths->pNextIndex, &(pMsg->srcId), pMsg->matchIndex + 1);
@@ -100,13 +89,16 @@ int32_t syncNodeOnAppendEntriesReplyCb(SSyncNode* ths, SyncAppendEntriesReply* p
syncIndexMgrSetIndex(ths->pNextIndex, &(pMsg->srcId), nextIndex);
}
- if (gRaftDetailLog) {
- syncNodeEventLog(ths, "recv sync-append-entries-reply, after");
- }
- syncIndexMgrLog2("==syncNodeOnAppendEntriesReplyCb== after pNextIndex", ths->pNextIndex);
- syncIndexMgrLog2("==syncNodeOnAppendEntriesReplyCb== after pMatchIndex", ths->pMatchIndex);
+ SyncIndex afterNextIndex = syncIndexMgrGetIndex(ths->pNextIndex, &(pMsg->srcId));
+ SyncIndex afterMatchIndex = syncIndexMgrGetIndex(ths->pMatchIndex, &(pMsg->srcId));
+ do {
+ char logBuf[256];
+ snprintf(logBuf, sizeof(logBuf), "before next:%ld, match:%ld, after next:%ld, match:%ld", beforeNextIndex,
+ beforeMatchIndex, afterNextIndex, afterMatchIndex);
+ syncLogRecvAppendEntriesReply(ths, pMsg, logBuf);
+ } while (0);
- return ret;
+ return 0;
}
// only start once
@@ -147,40 +139,29 @@ static void syncNodeStartSnapshotOnce(SSyncNode* ths, SyncIndex beginIndex, Sync
int32_t syncNodeOnAppendEntriesReplySnapshot2Cb(SSyncNode* ths, SyncAppendEntriesReply* pMsg) {
int32_t ret = 0;
- // print log
- do {
- char logBuf[256];
- snprintf(logBuf, sizeof(logBuf), "recv sync-append-entries-reply, term:%lu, match:%ld, success:%d", pMsg->term,
- pMsg->matchIndex, pMsg->success);
- syncNodeEventLog(ths, logBuf);
-
- } while (0);
-
// if already drop replica, do not process
if (!syncNodeInRaftGroup(ths, &(pMsg->srcId)) && !ths->pRaftCfg->isStandBy) {
- syncNodeEventLog(ths, "recv sync-append-entries-reply, maybe replica already dropped");
+ syncLogRecvAppendEntriesReply(ths, pMsg, "maybe replica already dropped");
return -1;
}
// drop stale response
if (pMsg->term < ths->pRaftStore->currentTerm) {
- char logBuf[128];
- snprintf(logBuf, sizeof(logBuf), "recv sync-append-entries-reply, recv-term:%" PRIu64 ", drop stale response",
- pMsg->term);
- syncNodeEventLog(ths, logBuf);
- return -1;
+ syncLogRecvAppendEntriesReply(ths, pMsg, "drop stale response");
+ return 0;
}
// error term
if (pMsg->term > ths->pRaftStore->currentTerm) {
- char logBuf[128];
- snprintf(logBuf, sizeof(logBuf), "recv sync-append-entries-reply, error term, recv-term:%" PRIu64, pMsg->term);
- syncNodeErrorLog(ths, logBuf);
+ syncLogRecvAppendEntriesReply(ths, pMsg, "error term");
return -1;
}
ASSERT(pMsg->term == ths->pRaftStore->currentTerm);
+ SyncIndex beforeNextIndex = syncIndexMgrGetIndex(ths->pNextIndex, &(pMsg->srcId));
+ SyncIndex beforeMatchIndex = syncIndexMgrGetIndex(ths->pMatchIndex, &(pMsg->srcId));
+
if (pMsg->success) {
SyncIndex newNextIndex = pMsg->matchIndex + 1;
SyncIndex newMatchIndex = pMsg->matchIndex;
@@ -293,50 +274,48 @@ int32_t syncNodeOnAppendEntriesReplySnapshot2Cb(SSyncNode* ths, SyncAppendEntrie
} while (0);
}
+ SyncIndex afterNextIndex = syncIndexMgrGetIndex(ths->pNextIndex, &(pMsg->srcId));
+ SyncIndex afterMatchIndex = syncIndexMgrGetIndex(ths->pMatchIndex, &(pMsg->srcId));
+ do {
+ char logBuf[256];
+ snprintf(logBuf, sizeof(logBuf), "before next:%ld, match:%ld, after next:%ld, match:%ld", beforeNextIndex,
+ beforeMatchIndex, afterNextIndex, afterMatchIndex);
+ syncLogRecvAppendEntriesReply(ths, pMsg, logBuf);
+ } while (0);
+
return 0;
}
int32_t syncNodeOnAppendEntriesReplySnapshotCb(SSyncNode* ths, SyncAppendEntriesReply* pMsg) {
int32_t ret = 0;
- // print log
- syncAppendEntriesReplyLog2("==syncNodeOnAppendEntriesReplySnapshotCb==", pMsg);
-
// if already drop replica, do not process
if (!syncNodeInRaftGroup(ths, &(pMsg->srcId)) && !ths->pRaftCfg->isStandBy) {
- syncNodeEventLog(ths, "recv sync-append-entries-reply, maybe replica already dropped");
- return 0;
+ syncLogRecvAppendEntriesReply(ths, pMsg, "maybe replica already dropped");
+ return -1;
}
// drop stale response
if (pMsg->term < ths->pRaftStore->currentTerm) {
- char logBuf[128];
- snprintf(logBuf, sizeof(logBuf), "recv sync-append-entries-reply, recv-term:%" PRIu64 ", drop stale response",
- pMsg->term);
- syncNodeEventLog(ths, logBuf);
+ syncLogRecvAppendEntriesReply(ths, pMsg, "drop stale response");
return 0;
}
- if (gRaftDetailLog) {
- syncNodeEventLog(ths, "recv sync-append-entries-reply, before");
- }
- syncIndexMgrLog2("recv sync-append-entries-reply, before pNextIndex:", ths->pNextIndex);
- syncIndexMgrLog2("recv sync-append-entries-reply, before pMatchIndex:", ths->pMatchIndex);
-
// no need this code, because if I receive reply.term, then I must have sent for that term.
// if (pMsg->term > ths->pRaftStore->currentTerm) {
// syncNodeUpdateTerm(ths, pMsg->term);
// }
if (pMsg->term > ths->pRaftStore->currentTerm) {
- char logBuf[128];
- snprintf(logBuf, sizeof(logBuf), "recv sync-append-entries-reply, error term, recv-term:%" PRIu64, pMsg->term);
- syncNodeErrorLog(ths, logBuf);
+ syncLogRecvAppendEntriesReply(ths, pMsg, "error term");
return -1;
}
ASSERT(pMsg->term == ths->pRaftStore->currentTerm);
+ SyncIndex beforeNextIndex = syncIndexMgrGetIndex(ths->pNextIndex, &(pMsg->srcId));
+ SyncIndex beforeMatchIndex = syncIndexMgrGetIndex(ths->pMatchIndex, &(pMsg->srcId));
+
if (pMsg->success) {
// nextIndex' = [nextIndex EXCEPT ![i][j] = m.mmatchIndex + 1]
syncIndexMgrSetIndex(ths->pNextIndex, &(pMsg->srcId), pMsg->matchIndex + 1);
@@ -404,11 +383,14 @@ int32_t syncNodeOnAppendEntriesReplySnapshotCb(SSyncNode* ths, SyncAppendEntries
}
}
- if (gRaftDetailLog) {
- syncNodeEventLog(ths, "recv sync-append-entries-reply, after");
- }
- syncIndexMgrLog2("recv sync-append-entries-reply, after pNextIndex:", ths->pNextIndex);
- syncIndexMgrLog2("recv sync-append-entries-reply, after pMatchIndex:", ths->pMatchIndex);
+ SyncIndex afterNextIndex = syncIndexMgrGetIndex(ths->pNextIndex, &(pMsg->srcId));
+ SyncIndex afterMatchIndex = syncIndexMgrGetIndex(ths->pMatchIndex, &(pMsg->srcId));
+ do {
+ char logBuf[256];
+ snprintf(logBuf, sizeof(logBuf), "before next:%ld, match:%ld, after next:%ld, match:%ld", beforeNextIndex,
+ beforeMatchIndex, afterNextIndex, afterMatchIndex);
+ syncLogRecvAppendEntriesReply(ths, pMsg, logBuf);
+ } while (0);
return 0;
}
\ No newline at end of file
diff --git a/source/libs/sync/src/syncElection.c b/source/libs/sync/src/syncElection.c
index 3c4b59ce06..375f2e5730 100644
--- a/source/libs/sync/src/syncElection.c
+++ b/source/libs/sync/src/syncElection.c
@@ -71,6 +71,8 @@ int32_t syncNodeRequestVotePeersSnapshot(SSyncNode* pSyncNode) {
}
int32_t syncNodeElect(SSyncNode* pSyncNode) {
+ syncNodeEventLog(pSyncNode, "begin election");
+
int32_t ret = 0;
if (pSyncNode->state == TAOS_SYNC_STATE_FOLLOWER) {
syncNodeFollower2Candidate(pSyncNode);
@@ -118,15 +120,7 @@ int32_t syncNodeElect(SSyncNode* pSyncNode) {
int32_t syncNodeRequestVote(SSyncNode* pSyncNode, const SRaftId* destRaftId, const SyncRequestVote* pMsg) {
int32_t ret = 0;
-
- do {
- char host[128];
- uint16_t port;
- syncUtilU642Addr(destRaftId->addr, host, sizeof(host), &port);
- sDebug("vgId:%d, send sync-request-vote to %s:%d, {term:%" PRIu64 ", last-index:%" PRId64 ", last-term:%" PRIu64
- "}",
- pSyncNode->vgId, host, port, pMsg->term, pMsg->lastLogIndex, pMsg->lastLogTerm);
- } while (0);
+ syncLogSendRequestVote(pSyncNode, pMsg, "");
SRpcMsg rpcMsg;
syncRequestVote2RpcMsg(pMsg, &rpcMsg);
diff --git a/source/libs/sync/src/syncIndexMgr.c b/source/libs/sync/src/syncIndexMgr.c
index a9c1147fc1..39bede23f6 100644
--- a/source/libs/sync/src/syncIndexMgr.c
+++ b/source/libs/sync/src/syncIndexMgr.c
@@ -79,8 +79,7 @@ SyncIndex syncIndexMgrGetIndex(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRaf
}
}
- syncNodeLog3("syncIndexMgrGetIndex", pSyncIndexMgr->pSyncNode);
- ASSERT(0);
+ return SYNC_INDEX_INVALID;
}
cJSON *syncIndexMgr2Json(SSyncIndexMgr *pSyncIndexMgr) {
@@ -126,7 +125,7 @@ cJSON *syncIndexMgr2Json(SSyncIndexMgr *pSyncIndexMgr) {
char *syncIndexMgr2Str(SSyncIndexMgr *pSyncIndexMgr) {
cJSON *pJson = syncIndexMgr2Json(pSyncIndexMgr);
- char * serialized = cJSON_Print(pJson);
+ char *serialized = cJSON_Print(pJson);
cJSON_Delete(pJson);
return serialized;
}
diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c
index 7f85f0979f..e0133641b3 100644
--- a/source/libs/sync/src/syncMain.c
+++ b/source/libs/sync/src/syncMain.c
@@ -559,10 +559,11 @@ void syncGetRetryEpSet(int64_t rid, SEpSet* pEpSet) {
snprintf(pEpSet->eps[i].fqdn, sizeof(pEpSet->eps[i].fqdn), "%s", (pSyncNode->pRaftCfg->cfg.nodeInfo)[i].nodeFqdn);
pEpSet->eps[i].port = (pSyncNode->pRaftCfg->cfg.nodeInfo)[i].nodePort;
(pEpSet->numOfEps)++;
- sInfo("vgId:%d sync get retry epset: index:%d %s:%d", pSyncNode->vgId, i, pEpSet->eps[i].fqdn, pEpSet->eps[i].port);
+ sInfo("vgId:%d, sync get retry epset: index:%d %s:%d", pSyncNode->vgId, i, pEpSet->eps[i].fqdn,
+ pEpSet->eps[i].port);
}
pEpSet->inUse = (pSyncNode->pRaftCfg->cfg.myIndex + 1) % pEpSet->numOfEps;
- sInfo("vgId:%d sync get retry epset in-use:%d", pSyncNode->vgId, pEpSet->inUse);
+ sInfo("vgId:%d, sync get retry epset in-use:%d", pSyncNode->vgId, pEpSet->inUse);
taosReleaseRef(tsNodeRefId, pSyncNode->rid);
}
@@ -999,7 +1000,18 @@ SSyncNode* syncNodeOpen(const SSyncInfo* pOldSyncInfo) {
// init TLA+ log vars
pSyncNode->pLogStore = logStoreCreate(pSyncNode);
ASSERT(pSyncNode->pLogStore != NULL);
- pSyncNode->commitIndex = SYNC_INDEX_INVALID;
+
+ SyncIndex commitIndex = SYNC_INDEX_INVALID;
+ if (pSyncNode->pFsm != NULL && pSyncNode->pFsm->FpGetSnapshotInfo != NULL) {
+ SSnapshot snapshot = {0};
+ int32_t code = pSyncNode->pFsm->FpGetSnapshotInfo(pSyncNode->pFsm, &snapshot);
+ ASSERT(code == 0);
+ if (snapshot.lastApplyIndex > commitIndex) {
+ commitIndex = snapshot.lastApplyIndex;
+ syncNodeEventLog(pSyncNode, "reset commit index by snapshot");
+ }
+ }
+ pSyncNode->commitIndex = commitIndex;
// timer ms init
pSyncNode->pingBaseLine = PING_TIMER_MS;
@@ -1553,7 +1565,8 @@ void syncNodeEventLog(const SSyncNode* pSyncNode, char* str) {
snprintf(logBuf, sizeof(logBuf), "%s", str);
}
// sDebug("%s", logBuf);
- sInfo("%s", logBuf);
+ // sInfo("%s", logBuf);
+ sTrace("%s", logBuf);
} else {
int len = 256 + userStrLen;
@@ -1575,7 +1588,8 @@ void syncNodeEventLog(const SSyncNode* pSyncNode, char* str) {
snprintf(s, len, "%s", str);
}
// sDebug("%s", s);
- sInfo("%s", s);
+ // sInfo("%s", s);
+ sTrace("%s", s);
taosMemoryFree(s);
}
@@ -2061,21 +2075,21 @@ void syncNodeFollower2Candidate(SSyncNode* pSyncNode) {
ASSERT(pSyncNode->state == TAOS_SYNC_STATE_FOLLOWER);
pSyncNode->state = TAOS_SYNC_STATE_CANDIDATE;
- syncNodeLog2("==state change syncNodeFollower2Candidate==", pSyncNode);
+ syncNodeEventLog(pSyncNode, "follower to candidate");
}
void syncNodeLeader2Follower(SSyncNode* pSyncNode) {
ASSERT(pSyncNode->state == TAOS_SYNC_STATE_LEADER);
syncNodeBecomeFollower(pSyncNode, "leader to follower");
- syncNodeLog2("==state change syncNodeLeader2Follower==", pSyncNode);
+ syncNodeEventLog(pSyncNode, "leader to follower");
}
void syncNodeCandidate2Follower(SSyncNode* pSyncNode) {
ASSERT(pSyncNode->state == TAOS_SYNC_STATE_CANDIDATE);
syncNodeBecomeFollower(pSyncNode, "candidate to follower");
- syncNodeLog2("==state change syncNodeCandidate2Follower==", pSyncNode);
+ syncNodeEventLog(pSyncNode, "candidate to follower");
}
// raft vote --------------
@@ -2912,4 +2926,126 @@ bool syncNodeCanChange(SSyncNode* pSyncNode) {
}
return true;
-}
\ No newline at end of file
+}
+
+void syncLogSendRequestVote(SSyncNode* pSyncNode, const SyncRequestVote* pMsg, const char* s) {
+ char host[64];
+ uint16_t port;
+ syncUtilU642Addr(pMsg->destId.addr, host, sizeof(host), &port);
+ char logBuf[256];
+ snprintf(logBuf, sizeof(logBuf),
+ "send sync-request-vote to %s:%d {term:%" PRIu64 ", lindex:%" PRId64 ", lterm:%" PRIu64 "}, %s", host, port,
+ pMsg->term, pMsg->lastLogIndex, pMsg->lastLogTerm, s);
+ syncNodeEventLog(pSyncNode, logBuf);
+}
+
+void syncLogRecvRequestVote(SSyncNode* pSyncNode, const SyncRequestVote* pMsg, const char* s) {
+ char logBuf[256];
+ char host[64];
+ uint16_t port;
+ syncUtilU642Addr(pMsg->srcId.addr, host, sizeof(host), &port);
+ snprintf(logBuf, sizeof(logBuf),
+ "recv sync-request-vote from %s:%d, {term:%" PRIu64 ", lindex:%" PRId64 ", lterm:%" PRIu64 "}, %s", host,
+ port, pMsg->term, pMsg->lastLogIndex, pMsg->lastLogTerm, s);
+ syncNodeEventLog(pSyncNode, logBuf);
+}
+
+void syncLogSendRequestVoteReply(SSyncNode* pSyncNode, const SyncRequestVoteReply* pMsg, const char* s) {
+ char host[64];
+ uint16_t port;
+ syncUtilU642Addr(pMsg->destId.addr, host, sizeof(host), &port);
+ char logBuf[256];
+ snprintf(logBuf, sizeof(logBuf), "send sync-request-vote-reply to %s:%d {term:%" PRIu64 ", grant:%d}, %s", host, port,
+ pMsg->term, pMsg->voteGranted, s);
+ syncNodeEventLog(pSyncNode, logBuf);
+}
+
+void syncLogRecvRequestVoteReply(SSyncNode* pSyncNode, const SyncRequestVoteReply* pMsg, const char* s) {
+ char host[64];
+ uint16_t port;
+ syncUtilU642Addr(pMsg->srcId.addr, host, sizeof(host), &port);
+ char logBuf[256];
+ snprintf(logBuf, sizeof(logBuf), "recv sync-request-vote-reply from %s:%d {term:%" PRIu64 ", grant:%d}, %s", host,
+ port, pMsg->term, pMsg->voteGranted, s);
+ syncNodeEventLog(pSyncNode, logBuf);
+}
+
+void syncLogSendAppendEntries(SSyncNode* pSyncNode, const SyncAppendEntries* pMsg, const char* s) {
+ char host[64];
+ uint16_t port;
+ syncUtilU642Addr(pMsg->destId.addr, host, sizeof(host), &port);
+ char logBuf[256];
+ snprintf(logBuf, sizeof(logBuf),
+ "send sync-append-entries to %s:%d, {term:%" PRIu64 ", pre-index:%" PRId64 ", pre-term:%" PRIu64
+ ", pterm:%" PRIu64 ", commit:%" PRId64
+ ", "
+ "datalen:%d}, %s",
+ host, port, pMsg->term, pMsg->prevLogIndex, pMsg->prevLogTerm, pMsg->privateTerm, pMsg->commitIndex,
+ pMsg->dataLen, s);
+ syncNodeEventLog(pSyncNode, logBuf);
+}
+
+void syncLogRecvAppendEntries(SSyncNode* pSyncNode, const SyncAppendEntries* pMsg, const char* s) {
+ char host[64];
+ uint16_t port;
+ syncUtilU642Addr(pMsg->srcId.addr, host, sizeof(host), &port);
+ char logBuf[256];
+ snprintf(logBuf, sizeof(logBuf),
+ "recv sync-append-entries from %s:%d {term:%" PRIu64 ", pre-index:%" PRIu64 ", pre-term:%" PRIu64
+ ", commit:%" PRIu64 ", pterm:%" PRIu64
+ ", "
+ "datalen:%d}, %s",
+ host, port, pMsg->term, pMsg->prevLogIndex, pMsg->prevLogTerm, pMsg->commitIndex, pMsg->privateTerm,
+ pMsg->dataLen, s);
+ syncNodeEventLog(pSyncNode, logBuf);
+}
+
+void syncLogSendAppendEntriesBatch(SSyncNode* pSyncNode, const SyncAppendEntriesBatch* pMsg, const char* s) {
+ char host[64];
+ uint16_t port;
+ syncUtilU642Addr(pMsg->destId.addr, host, sizeof(host), &port);
+ char logBuf[256];
+ snprintf(logBuf, sizeof(logBuf),
+ "send sync-append-entries-batch to %s:%d, {term:%" PRIu64 ", pre-index:%" PRId64 ", pre-term:%" PRIu64
+ ", pterm:%" PRIu64 ", commit:%" PRId64 ", datalen:%d, count:%d}, %s",
+ host, port, pMsg->term, pMsg->prevLogIndex, pMsg->prevLogTerm, pMsg->privateTerm, pMsg->commitIndex,
+ pMsg->dataLen, pMsg->dataCount, s);
+ syncNodeEventLog(pSyncNode, logBuf);
+}
+
+void syncLogRecvAppendEntriesBatch(SSyncNode* pSyncNode, const SyncAppendEntriesBatch* pMsg, const char* s) {
+ char host[64];
+ uint16_t port;
+ syncUtilU642Addr(pMsg->srcId.addr, host, sizeof(host), &port);
+ char logBuf[256];
+ snprintf(logBuf, sizeof(logBuf),
+ "recv sync-append-entries-batch from %s:%d, {term:%" PRIu64 ", pre-index:%" PRId64 ", pre-term:%" PRIu64
+ ", pterm:%" PRIu64 ", commit:%" PRId64 ", datalen:%d, count:%d}, %s",
+ host, port, pMsg->term, pMsg->prevLogIndex, pMsg->prevLogTerm, pMsg->privateTerm, pMsg->commitIndex,
+ pMsg->dataLen, pMsg->dataCount, s);
+ syncNodeEventLog(pSyncNode, logBuf);
+}
+
+void syncLogSendAppendEntriesReply(SSyncNode* pSyncNode, const SyncAppendEntriesReply* pMsg, const char* s) {
+ char host[64];
+ uint16_t port;
+ syncUtilU642Addr(pMsg->destId.addr, host, sizeof(host), &port);
+ char logBuf[256];
+ snprintf(logBuf, sizeof(logBuf),
+ "send sync-append-entries-reply to %s:%d, {term:%" PRIu64 ", pterm:%" PRIu64 ", success:%d, match:%" PRId64
+ "}, %s",
+ host, port, pMsg->term, pMsg->privateTerm, pMsg->success, pMsg->matchIndex, s);
+ syncNodeEventLog(pSyncNode, logBuf);
+}
+
+void syncLogRecvAppendEntriesReply(SSyncNode* pSyncNode, const SyncAppendEntriesReply* pMsg, const char* s) {
+ char host[64];
+ uint16_t port;
+ syncUtilU642Addr(pMsg->srcId.addr, host, sizeof(host), &port);
+ char logBuf[256];
+ snprintf(logBuf, sizeof(logBuf),
+ "recv sync-append-entries-reply from %s:%d {term:%" PRIu64 ", pterm:%" PRIu64 ", success:%d, match:%" PRId64
+ "}, %s",
+ host, port, pMsg->term, pMsg->privateTerm, pMsg->success, pMsg->matchIndex, s);
+ syncNodeEventLog(pSyncNode, logBuf);
+}
diff --git a/source/libs/sync/src/syncRaftStore.c b/source/libs/sync/src/syncRaftStore.c
index 9f5cba6c66..fbfeb031f6 100644
--- a/source/libs/sync/src/syncRaftStore.c
+++ b/source/libs/sync/src/syncRaftStore.c
@@ -108,10 +108,10 @@ int32_t raftStoreSerialize(SRaftStore *pRaftStore, char *buf, size_t len) {
cJSON *pRoot = cJSON_CreateObject();
char u64Buf[128] = {0};
- snprintf(u64Buf, sizeof(u64Buf), "%lu", pRaftStore->currentTerm);
+ snprintf(u64Buf, sizeof(u64Buf), "%" PRIu64 "", pRaftStore->currentTerm);
cJSON_AddStringToObject(pRoot, "current_term", u64Buf);
- snprintf(u64Buf, sizeof(u64Buf), "%lu", pRaftStore->voteFor.addr);
+ snprintf(u64Buf, sizeof(u64Buf), "%" PRIu64 "", pRaftStore->voteFor.addr);
cJSON_AddStringToObject(pRoot, "vote_for_addr", u64Buf);
cJSON_AddNumberToObject(pRoot, "vote_for_vgid", pRaftStore->voteFor.vgId);
@@ -142,11 +142,11 @@ int32_t raftStoreDeserialize(SRaftStore *pRaftStore, char *buf, size_t len) {
cJSON *pCurrentTerm = cJSON_GetObjectItem(pRoot, "current_term");
ASSERT(cJSON_IsString(pCurrentTerm));
- sscanf(pCurrentTerm->valuestring, "%lu", &(pRaftStore->currentTerm));
+ sscanf(pCurrentTerm->valuestring, "%" PRIu64 "", &(pRaftStore->currentTerm));
cJSON *pVoteForAddr = cJSON_GetObjectItem(pRoot, "vote_for_addr");
ASSERT(cJSON_IsString(pVoteForAddr));
- sscanf(pVoteForAddr->valuestring, "%lu", &(pRaftStore->voteFor.addr));
+ sscanf(pVoteForAddr->valuestring, "%" PRIu64 "", &(pRaftStore->voteFor.addr));
cJSON *pVoteForVgid = cJSON_GetObjectItem(pRoot, "vote_for_vgid");
pRaftStore->voteFor.vgId = pVoteForVgid->valueint;
@@ -188,11 +188,11 @@ cJSON *raftStore2Json(SRaftStore *pRaftStore) {
cJSON *pRoot = cJSON_CreateObject();
if (pRaftStore != NULL) {
- snprintf(u64buf, sizeof(u64buf), "%lu", pRaftStore->currentTerm);
+ snprintf(u64buf, sizeof(u64buf), "%" PRIu64 "", pRaftStore->currentTerm);
cJSON_AddStringToObject(pRoot, "currentTerm", u64buf);
cJSON *pVoteFor = cJSON_CreateObject();
- snprintf(u64buf, sizeof(u64buf), "%lu", pRaftStore->voteFor.addr);
+ snprintf(u64buf, sizeof(u64buf), "%" PRIu64 "", pRaftStore->voteFor.addr);
cJSON_AddStringToObject(pVoteFor, "addr", u64buf);
{
uint64_t u64 = pRaftStore->voteFor.addr;
@@ -216,7 +216,7 @@ cJSON *raftStore2Json(SRaftStore *pRaftStore) {
char *raftStore2Str(SRaftStore *pRaftStore) {
cJSON *pJson = raftStore2Json(pRaftStore);
- char * serialized = cJSON_Print(pJson);
+ char *serialized = cJSON_Print(pJson);
cJSON_Delete(pJson);
return serialized;
}
@@ -224,25 +224,25 @@ char *raftStore2Str(SRaftStore *pRaftStore) {
// for debug -------------------
void raftStorePrint(SRaftStore *pObj) {
char *serialized = raftStore2Str(pObj);
- printf("raftStorePrint | len:%lu | %s \n", strlen(serialized), serialized);
+ printf("raftStorePrint | len:%" PRIu64 " | %s \n", strlen(serialized), serialized);
fflush(NULL);
taosMemoryFree(serialized);
}
void raftStorePrint2(char *s, SRaftStore *pObj) {
char *serialized = raftStore2Str(pObj);
- printf("raftStorePrint2 | len:%lu | %s | %s \n", strlen(serialized), s, serialized);
+ printf("raftStorePrint2 | len:%" PRIu64 " | %s | %s \n", strlen(serialized), s, serialized);
fflush(NULL);
taosMemoryFree(serialized);
}
void raftStoreLog(SRaftStore *pObj) {
char *serialized = raftStore2Str(pObj);
- sTrace("raftStoreLog | len:%lu | %s", strlen(serialized), serialized);
+ sTrace("raftStoreLog | len:%" PRIu64 " | %s", strlen(serialized), serialized);
taosMemoryFree(serialized);
}
void raftStoreLog2(char *s, SRaftStore *pObj) {
char *serialized = raftStore2Str(pObj);
- sTrace("raftStoreLog2 | len:%lu | %s | %s", strlen(serialized), s, serialized);
+ sTrace("raftStoreLog2 | len:%" PRIu64 " | %s | %s", strlen(serialized), s, serialized);
taosMemoryFree(serialized);
}
diff --git a/source/libs/sync/src/syncReplication.c b/source/libs/sync/src/syncReplication.c
index 968026a3aa..fa3b5d52d7 100644
--- a/source/libs/sync/src/syncReplication.c
+++ b/source/libs/sync/src/syncReplication.c
@@ -313,18 +313,7 @@ int32_t syncNodeReplicate(SSyncNode* pSyncNode) {
int32_t syncNodeAppendEntries(SSyncNode* pSyncNode, const SRaftId* destRaftId, const SyncAppendEntries* pMsg) {
int32_t ret = 0;
-
- do {
- char host[128];
- uint16_t port;
- syncUtilU642Addr(destRaftId->addr, host, sizeof(host), &port);
- sDebug("vgId:%d, send sync-append-entries to %s:%d, {term:%" PRIu64 ", pre-index:%" PRId64 ", pre-term:%" PRIu64
- ", pterm:%" PRIu64 ", commit:%" PRId64
- ", "
- "datalen:%d}",
- pSyncNode->vgId, host, port, pMsg->term, pMsg->prevLogIndex, pMsg->prevLogTerm, pMsg->privateTerm,
- pMsg->commitIndex, pMsg->dataLen);
- } while (0);
+ syncLogSendAppendEntries(pSyncNode, pMsg, "");
SRpcMsg rpcMsg;
syncAppendEntries2RpcMsg(pMsg, &rpcMsg);
@@ -334,15 +323,7 @@ int32_t syncNodeAppendEntries(SSyncNode* pSyncNode, const SRaftId* destRaftId, c
int32_t syncNodeAppendEntriesBatch(SSyncNode* pSyncNode, const SRaftId* destRaftId,
const SyncAppendEntriesBatch* pMsg) {
- do {
- char host[128];
- uint16_t port;
- syncUtilU642Addr(destRaftId->addr, host, sizeof(host), &port);
- sDebug("vgId:%d, send sync-append-entries-batch to %s:%d, {term:%" PRIu64 ", pre-index:%" PRId64
- ", pre-term:%" PRIu64 ", pterm:%" PRIu64 ", commit:%" PRId64 ", datalen:%d, datacount:%d}",
- pSyncNode->vgId, host, port, pMsg->term, pMsg->prevLogIndex, pMsg->prevLogTerm, pMsg->privateTerm,
- pMsg->commitIndex, pMsg->dataLen, pMsg->dataCount);
- } while (0);
+ syncLogSendAppendEntriesBatch(pSyncNode, pMsg, "");
SRpcMsg rpcMsg;
syncAppendEntriesBatch2RpcMsg(pMsg, &rpcMsg);
diff --git a/source/libs/sync/src/syncRequestVote.c b/source/libs/sync/src/syncRequestVote.c
index 1e6e05099c..bad32c5f91 100644
--- a/source/libs/sync/src/syncRequestVote.c
+++ b/source/libs/sync/src/syncRequestVote.c
@@ -45,22 +45,9 @@
int32_t syncNodeOnRequestVoteCb(SSyncNode* ths, SyncRequestVote* pMsg) {
int32_t ret = 0;
- syncRequestVoteLog2("==syncNodeOnRequestVoteCb==", pMsg);
-
// if already drop replica, do not process
if (!syncNodeInRaftGroup(ths, &(pMsg->srcId)) && !ths->pRaftCfg->isStandBy) {
- do {
- char logBuf[256];
- char host[64];
- uint16_t port;
- syncUtilU642Addr(pMsg->srcId.addr, host, sizeof(host), &port);
- snprintf(logBuf, sizeof(logBuf),
- "recv sync-request-vote from %s:%d, term:%" PRIu64 ", lindex:%" PRId64 ", lterm:%" PRIu64
- ", maybe replica already dropped",
- host, port, pMsg->term, pMsg->lastLogIndex, pMsg->lastLogTerm);
- syncNodeEventLog(ths, logBuf);
- } while (0);
-
+ syncLogRecvRequestVote(ths, pMsg, "maybe replica already dropped");
return -1;
}
@@ -93,15 +80,10 @@ int32_t syncNodeOnRequestVoteCb(SSyncNode* ths, SyncRequestVote* pMsg) {
// trace log
do {
- char logBuf[256];
- char host[64];
- uint16_t port;
- syncUtilU642Addr(pMsg->srcId.addr, host, sizeof(host), &port);
- snprintf(logBuf, sizeof(logBuf),
- "recv sync-request-vote from %s:%d, term:%" PRIu64 ", lindex:%" PRId64 ", lterm:%" PRIu64
- ", reply-grant:%d",
- host, port, pMsg->term, pMsg->lastLogIndex, pMsg->lastLogTerm, pReply->voteGranted);
- syncNodeEventLog(ths, logBuf);
+ char logBuf[32];
+ snprintf(logBuf, sizeof(logBuf), "grant:%d", pReply->voteGranted);
+ syncLogRecvRequestVote(ths, pMsg, logBuf);
+ syncLogSendRequestVoteReply(ths, pReply, "");
} while (0);
SRpcMsg rpcMsg;
@@ -214,18 +196,7 @@ int32_t syncNodeOnRequestVoteSnapshotCb(SSyncNode* ths, SyncRequestVote* pMsg) {
// if already drop replica, do not process
if (!syncNodeInRaftGroup(ths, &(pMsg->srcId)) && !ths->pRaftCfg->isStandBy) {
- do {
- char logBuf[256];
- char host[64];
- uint16_t port;
- syncUtilU642Addr(pMsg->srcId.addr, host, sizeof(host), &port);
- snprintf(logBuf, sizeof(logBuf),
- "recv sync-request-vote from %s:%d, term:%" PRIu64 ", lindex:%" PRId64 ", lterm:%" PRIu64
- ", maybe replica already dropped",
- host, port, pMsg->term, pMsg->lastLogIndex, pMsg->lastLogTerm);
- syncNodeEventLog(ths, logBuf);
- } while (0);
-
+ syncLogRecvRequestVote(ths, pMsg, "maybe replica already dropped");
return -1;
}
@@ -256,15 +227,10 @@ int32_t syncNodeOnRequestVoteSnapshotCb(SSyncNode* ths, SyncRequestVote* pMsg) {
// trace log
do {
- char logBuf[256];
- char host[64];
- uint16_t port;
- syncUtilU642Addr(pMsg->srcId.addr, host, sizeof(host), &port);
- snprintf(logBuf, sizeof(logBuf),
- "recv sync-request-vote from %s:%d, {term:%" PRIu64 ", lindex:%" PRId64 ", lterm:%" PRIu64
- ", reply-grant:%d}",
- host, port, pMsg->term, pMsg->lastLogIndex, pMsg->lastLogTerm, pReply->voteGranted);
- syncNodeEventLog(ths, logBuf);
+ char logBuf[32];
+ snprintf(logBuf, sizeof(logBuf), "grant:%d", pReply->voteGranted);
+ syncLogRecvRequestVote(ths, pMsg, logBuf);
+ syncLogSendRequestVoteReply(ths, pReply, "");
} while (0);
SRpcMsg rpcMsg;
diff --git a/source/libs/sync/src/syncRequestVoteReply.c b/source/libs/sync/src/syncRequestVoteReply.c
index 8ab4f75c5c..566b80881f 100644
--- a/source/libs/sync/src/syncRequestVoteReply.c
+++ b/source/libs/sync/src/syncRequestVoteReply.c
@@ -40,22 +40,16 @@
int32_t syncNodeOnRequestVoteReplyCb(SSyncNode* ths, SyncRequestVoteReply* pMsg) {
int32_t ret = 0;
- // print log
- char logBuf[128] = {0};
- snprintf(logBuf, sizeof(logBuf), "==syncNodeOnRequestVoteReplyCb== term:%" PRIu64, ths->pRaftStore->currentTerm);
- syncRequestVoteReplyLog2(logBuf, pMsg);
-
// if already drop replica, do not process
if (!syncNodeInRaftGroup(ths, &(pMsg->srcId)) && !ths->pRaftCfg->isStandBy) {
- sInfo("recv SyncRequestVoteReply, maybe replica already dropped");
- return ret;
+ syncLogRecvRequestVoteReply(ths, pMsg, "maybe replica already dropped");
+ return -1;
}
// drop stale response
if (pMsg->term < ths->pRaftStore->currentTerm) {
- sTrace("recv SyncRequestVoteReply, drop stale response, receive_term:%" PRIu64 " current_term:%" PRIu64, pMsg->term,
- ths->pRaftStore->currentTerm);
- return ret;
+ syncLogRecvRequestVoteReply(ths, pMsg, "drop stale response");
+ return -1;
}
// ASSERT(!(pMsg->term > ths->pRaftStore->currentTerm));
@@ -65,14 +59,11 @@ int32_t syncNodeOnRequestVoteReplyCb(SSyncNode* ths, SyncRequestVoteReply* pMsg)
// }
if (pMsg->term > ths->pRaftStore->currentTerm) {
- char logBuf[128] = {0};
- snprintf(logBuf, sizeof(logBuf), "syncNodeOnRequestVoteReplyCb error term, receive:%" PRIu64 " current:%" PRIu64,
- pMsg->term, ths->pRaftStore->currentTerm);
- syncNodePrint2(logBuf, ths);
- sError("%s", logBuf);
- return ret;
+ syncLogRecvRequestVoteReply(ths, pMsg, "error term");
+ return -1;
}
+ syncLogRecvRequestVoteReply(ths, pMsg, "");
ASSERT(pMsg->term == ths->pRaftStore->currentTerm);
// This tallies votes even when the current state is not Candidate,
@@ -99,7 +90,7 @@ int32_t syncNodeOnRequestVoteReplyCb(SSyncNode* ths, SyncRequestVoteReply* pMsg)
}
}
- return ret;
+ return 0;
}
#if 0
@@ -164,22 +155,16 @@ int32_t syncNodeOnRequestVoteReplyCb(SSyncNode* ths, SyncRequestVoteReply* pMsg)
int32_t syncNodeOnRequestVoteReplySnapshotCb(SSyncNode* ths, SyncRequestVoteReply* pMsg) {
int32_t ret = 0;
- // print log
- char logBuf[128] = {0};
- snprintf(logBuf, sizeof(logBuf), "recv SyncRequestVoteReply, term:%" PRIu64, ths->pRaftStore->currentTerm);
- syncRequestVoteReplyLog2(logBuf, pMsg);
-
// if already drop replica, do not process
if (!syncNodeInRaftGroup(ths, &(pMsg->srcId)) && !ths->pRaftCfg->isStandBy) {
- sInfo("recv SyncRequestVoteReply, maybe replica already dropped");
- return ret;
+ syncLogRecvRequestVoteReply(ths, pMsg, "maybe replica already dropped");
+ return -1;
}
// drop stale response
if (pMsg->term < ths->pRaftStore->currentTerm) {
- sTrace("recv SyncRequestVoteReply, drop stale response, receive_term:%" PRIu64 " current_term:%" PRIu64, pMsg->term,
- ths->pRaftStore->currentTerm);
- return ret;
+ syncLogRecvRequestVoteReply(ths, pMsg, "drop stale response");
+ return -1;
}
// ASSERT(!(pMsg->term > ths->pRaftStore->currentTerm));
@@ -189,15 +174,11 @@ int32_t syncNodeOnRequestVoteReplySnapshotCb(SSyncNode* ths, SyncRequestVoteRepl
// }
if (pMsg->term > ths->pRaftStore->currentTerm) {
- char logBuf[128] = {0};
- snprintf(logBuf, sizeof(logBuf),
- "recv SyncRequestVoteReply, error term, receive_term:%" PRIu64 " current_term:%" PRIu64, pMsg->term,
- ths->pRaftStore->currentTerm);
- syncNodePrint2(logBuf, ths);
- sError("%s", logBuf);
- return ret;
+ syncLogRecvRequestVoteReply(ths, pMsg, "error term");
+ return -1;
}
+ syncLogRecvRequestVoteReply(ths, pMsg, "");
ASSERT(pMsg->term == ths->pRaftStore->currentTerm);
// This tallies votes even when the current state is not Candidate,
@@ -224,5 +205,5 @@ int32_t syncNodeOnRequestVoteReplySnapshotCb(SSyncNode* ths, SyncRequestVoteRepl
}
}
- return ret;
+ return 0;
}
\ No newline at end of file
diff --git a/source/libs/sync/src/syncSnapshot.c b/source/libs/sync/src/syncSnapshot.c
index 924a4df90d..279a70cb19 100644
--- a/source/libs/sync/src/syncSnapshot.c
+++ b/source/libs/sync/src/syncSnapshot.c
@@ -573,6 +573,12 @@ static int32_t snapshotReceiverFinish(SSyncSnapshotReceiver *pReceiver, SyncSnap
pReceiver->pSyncNode->commitIndex = pReceiver->snapshot.lastApplyIndex;
}
+ // maybe update term
+ if (pReceiver->snapshot.lastApplyTerm > pReceiver->pSyncNode->pRaftStore->currentTerm) {
+ pReceiver->pSyncNode->pRaftStore->currentTerm = pReceiver->snapshot.lastApplyTerm;
+ raftStorePersist(pReceiver->pSyncNode->pRaftStore);
+ }
+
// stop writer, apply data
code = pReceiver->pSyncNode->pFsm->FpSnapshotStopWrite(pReceiver->pSyncNode->pFsm, pReceiver->pWriter, true,
&(pReceiver->snapshot));
diff --git a/source/libs/sync/test/sh/a.sh b/source/libs/sync/test/sh/a.sh
new file mode 100644
index 0000000000..b6d2bdeabf
--- /dev/null
+++ b/source/libs/sync/test/sh/a.sh
@@ -0,0 +1,84 @@
+#!/bin/bash
+
+if [ $# != 1 ] ; then
+ echo "Uasge: $0 log-path"
+ echo ""
+ exit 1
+fi
+
+logpath=$1
+echo "logpath: ${logpath}"
+
+echo ""
+echo "clean old log ..."
+rm -f ${logpath}/log.*
+
+echo ""
+echo "generate log.dnode ..."
+for dnode in `ls ${logpath} | grep dnode`;do
+ echo "generate log.${dnode}"
+ cat ${logpath}/${dnode}/log/taosdlog.* | grep SYN > ${logpath}/log.${dnode}
+done
+
+echo ""
+echo "generate vgId ..."
+cat ${logpath}/log.dnode* | grep "vgId:" | grep -v ERROR | awk '{print $5}' | awk -F, '{print $1}' | sort | uniq > ${logpath}/log.vgIds.tmp
+echo "all vgIds:" > ${logpath}/log.vgIds
+cat ${logpath}/log.dnode* | grep "vgId:" | grep -v ERROR | awk '{print $5}' | awk -F, '{print $1}' | sort | uniq >> ${logpath}/log.vgIds
+for dnode in `ls ${logpath} | grep dnode | grep -v log`;do
+ echo "" >> ${logpath}/log.vgIds
+ echo "" >> ${logpath}/log.vgIds
+ echo "${dnode}:" >> ${logpath}/log.vgIds
+ cat ${logpath}/${dnode}/log/taosdlog.* | grep SYN | grep "vgId:" | grep -v ERROR | awk '{print $5}' | awk -F, '{print $1}' | sort | uniq >> ${logpath}/log.vgIds
+done
+
+echo ""
+echo "generate log.dnode.vgId ..."
+for logdnode in `ls ${logpath}/log.dnode*`;do
+ for vgId in `cat ${logpath}/log.vgIds.tmp`;do
+ rowNum=`cat ${logdnode} | grep "${vgId}" | awk 'BEGIN{rowNum=0}{rowNum++}END{print rowNum}'`
+ #echo "-----${rowNum}"
+ if [ $rowNum -gt 0 ] ; then
+ echo "generate ${logdnode}.${vgId}"
+ cat ${logdnode} | grep "${vgId}" > ${logdnode}.${vgId}
+ fi
+ done
+done
+
+echo ""
+echo "generate log.dnode.main ..."
+for file in `ls ${logpath}/log.dnode* | grep -v vgId`;do
+ echo "generate ${file}.main"
+ cat ${file} | awk '{ if(index($0, "sync open") > 0 || index($0, "sync close") > 0 || index($0, "become leader") > 0) {print $0} }' > ${file}.main
+done
+
+echo ""
+echo "generate log.leader.term ..."
+cat ${logpath}/*.main | grep "become leader" | grep -v "config change" | awk '{print $5,$0}' | awk -F, '{print $4"_"$0}' | sort -k1 > ${logpath}/log.leader.term
+
+echo ""
+echo "generate log.index, log.snapshot, log.records, log.actions ..."
+for file in `ls ${logpath}/log.dnode*vgId*`;do
+ destfile1=${file}.index
+ echo "generate ${destfile1}"
+ cat ${file} | awk '{ if(index($0, "write index:") > 0 || index($0, "wal truncate, from-index") > 0) {print $0} }' > ${destfile1}
+
+ destfile2=${file}.snapshot
+ echo "generate ${destfile2}"
+ cat ${file} | awk '{ if(index($0, "snapshot sender") > 0 || index($0, "snapshot receiver") > 0) {print $0} }' | grep -v "save old" | grep -v "create new" | grep -v "udpate replicaIndex" | grep -v "delete old" | grep -v "reset for" > ${destfile2}
+
+ destfile3=${file}.records
+ echo "generate ${destfile3}"
+ cat ${file} | awk '{ if(index($0, "write index:") > 0 || index($0, "wal truncate, from-index") > 0 || index($0, "snapshot sender") > 0 || index($0, "snapshot receiver") > 0) {print $0} }' | grep -v "save old" | grep -v "create new" | grep -v "udpate replicaIndex" | grep -v "delete old" | grep -v "reset for" > ${destfile3}
+
+ destfile4=${file}.commit
+ echo "generate ${destfile4}"
+ cat ${file} | awk '{ if(index($0, "commit by") > 0) {print $0} }' > ${destfile4}
+
+ destfile5=${file}.actions
+ echo "generate ${destfile5}"
+ cat ${file} | awk '{ if(index($0, "commit by") > 0 || index($0, "sync open") > 0 || index($0, "sync close") > 0 || index($0, "become leader") > 0 || index($0, "write index:") > 0 || index($0, "wal truncate, from-index") > 0 || index($0, "snapshot sender") > 0 || index($0, "snapshot receiver") > 0) {print $0} }' | grep -v "save old" | grep -v "create new" | grep -v "udpate replicaIndex" | grep -v "delete old" | grep -v "reset for" > ${destfile5}
+
+done
+
+exit 0
diff --git a/source/libs/transport/inc/transComm.h b/source/libs/transport/inc/transComm.h
index 2972f512f1..9dd1a745d3 100644
--- a/source/libs/transport/inc/transComm.h
+++ b/source/libs/transport/inc/transComm.h
@@ -226,11 +226,13 @@ typedef struct {
int index;
int nAsync;
uv_async_t* asyncs;
+ int8_t stop;
} SAsyncPool;
SAsyncPool* transCreateAsyncPool(uv_loop_t* loop, int sz, void* arg, AsyncCB cb);
void transDestroyAsyncPool(SAsyncPool* pool);
int transAsyncSend(SAsyncPool* pool, queue* mq);
+bool transAsyncPoolIsEmpty(SAsyncPool* pool);
#define TRANS_DESTROY_ASYNC_POOL_MSG(pool, msgType, freeFunc) \
do { \
@@ -289,14 +291,14 @@ void transUnrefSrvHandle(void* handle);
void transRefCliHandle(void* handle);
void transUnrefCliHandle(void* handle);
-void transReleaseCliHandle(void* handle);
-void transReleaseSrvHandle(void* handle);
+int transReleaseCliHandle(void* handle);
+int transReleaseSrvHandle(void* handle);
-void transSendRequest(void* shandle, const SEpSet* pEpSet, STransMsg* pMsg, STransCtx* pCtx);
-void transSendRecv(void* shandle, const SEpSet* pEpSet, STransMsg* pMsg, STransMsg* pRsp);
-void transSendResponse(const STransMsg* msg);
-void transRegisterMsg(const STransMsg* msg);
-void transSetDefaultAddr(void* shandle, const char* ip, const char* fqdn);
+int transSendRequest(void* shandle, const SEpSet* pEpSet, STransMsg* pMsg, STransCtx* pCtx);
+int transSendRecv(void* shandle, const SEpSet* pEpSet, STransMsg* pMsg, STransMsg* pRsp);
+int transSendResponse(const STransMsg* msg);
+int transRegisterMsg(const STransMsg* msg);
+int transSetDefaultAddr(void* shandle, const char* ip, const char* fqdn);
int64_t transAllocHandle();
@@ -392,7 +394,7 @@ typedef struct SDelayQueue {
} SDelayQueue;
int transDQCreate(uv_loop_t* loop, SDelayQueue** queue);
-void transDQDestroy(SDelayQueue* queue);
+void transDQDestroy(SDelayQueue* queue, void (*freeFunc)(void* arg));
int transDQSched(SDelayQueue* queue, void (*func)(void* arg), void* arg, uint64_t timeoutMs);
bool transEpSetIsEqual(SEpSet* a, SEpSet* b);
diff --git a/source/libs/transport/src/trans.c b/source/libs/transport/src/trans.c
index 725f3b32cf..7633820292 100644
--- a/source/libs/transport/src/trans.c
+++ b/source/libs/transport/src/trans.c
@@ -25,7 +25,7 @@ void (*taosCloseHandle[])(void* arg) = {transCloseServer, transCloseClient};
void (*taosRefHandle[])(void* handle) = {transRefSrvHandle, transRefCliHandle};
void (*taosUnRefHandle[])(void* handle) = {transUnrefSrvHandle, transUnrefCliHandle};
-void (*transReleaseHandle[])(void* handle) = {transReleaseSrvHandle, transReleaseCliHandle};
+int (*transReleaseHandle[])(void* handle) = {transReleaseSrvHandle, transReleaseCliHandle};
static int32_t transValidLocalFqdn(const char* localFqdn, uint32_t* ip) {
*ip = taosGetIpv4FromFqdn(localFqdn);
@@ -112,7 +112,7 @@ void* rpcMallocCont(int32_t contLen) {
void rpcFreeCont(void* cont) {
if (cont == NULL) return;
taosMemoryFree((char*)cont - TRANS_MSG_OVERHEAD);
- tTrace("free mem:%p", (char*)cont - TRANS_MSG_OVERHEAD);
+ tTrace("rpc free cont:%p", (char*)cont - TRANS_MSG_OVERHEAD);
}
void* rpcReallocCont(void* ptr, int32_t contLen) {
@@ -129,25 +129,20 @@ void* rpcReallocCont(void* ptr, int32_t contLen) {
return st + TRANS_MSG_OVERHEAD;
}
-void rpcSendRedirectRsp(void* thandle, const SEpSet* pEpSet) {
- // deprecated api
- assert(0);
-}
-
int32_t rpcReportProgress(void* pConn, char* pCont, int32_t contLen) { return -1; }
void rpcCancelRequest(int64_t rid) { return; }
-void rpcSendRequest(void* shandle, const SEpSet* pEpSet, SRpcMsg* pMsg, int64_t* pRid) {
- transSendRequest(shandle, pEpSet, pMsg, NULL);
+int rpcSendRequest(void* shandle, const SEpSet* pEpSet, SRpcMsg* pMsg, int64_t* pRid) {
+ return transSendRequest(shandle, pEpSet, pMsg, NULL);
}
-void rpcSendRequestWithCtx(void* shandle, const SEpSet* pEpSet, SRpcMsg* pMsg, int64_t* pRid, SRpcCtx* pCtx) {
- transSendRequest(shandle, pEpSet, pMsg, pCtx);
+int rpcSendRequestWithCtx(void* shandle, const SEpSet* pEpSet, SRpcMsg* pMsg, int64_t* pRid, SRpcCtx* pCtx) {
+ return transSendRequest(shandle, pEpSet, pMsg, pCtx);
}
-void rpcSendRecv(void* shandle, SEpSet* pEpSet, SRpcMsg* pMsg, SRpcMsg* pRsp) {
- transSendRecv(shandle, pEpSet, pMsg, pRsp);
+int rpcSendRecv(void* shandle, SEpSet* pEpSet, SRpcMsg* pMsg, SRpcMsg* pRsp) {
+ return transSendRecv(shandle, pEpSet, pMsg, pRsp);
}
-void rpcSendResponse(const SRpcMsg* pMsg) { transSendResponse(pMsg); }
+int rpcSendResponse(const SRpcMsg* pMsg) { return transSendResponse(pMsg); }
void rpcRefHandle(void* handle, int8_t type) {
assert(type == TAOS_CONN_SERVER || type == TAOS_CONN_CLIENT);
@@ -159,15 +154,15 @@ void rpcUnrefHandle(void* handle, int8_t type) {
(*taosUnRefHandle[type])(handle);
}
-void rpcRegisterBrokenLinkArg(SRpcMsg* msg) { transRegisterMsg(msg); }
-void rpcReleaseHandle(void* handle, int8_t type) {
+int rpcRegisterBrokenLinkArg(SRpcMsg* msg) { return transRegisterMsg(msg); }
+int rpcReleaseHandle(void* handle, int8_t type) {
assert(type == TAOS_CONN_SERVER || type == TAOS_CONN_CLIENT);
- (*transReleaseHandle[type])(handle);
+ return (*transReleaseHandle[type])(handle);
}
-void rpcSetDefaultAddr(void* thandle, const char* ip, const char* fqdn) {
+int rpcSetDefaultAddr(void* thandle, const char* ip, const char* fqdn) {
// later
- transSetDefaultAddr(thandle, ip, fqdn);
+ return transSetDefaultAddr(thandle, ip, fqdn);
}
void* rpcAllocHandle() { return (void*)transAllocHandle(); }
diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c
index be3111e870..f94a7f3c37 100644
--- a/source/libs/transport/src/transCli.c
+++ b/source/libs/transport/src/transCli.c
@@ -70,6 +70,8 @@ typedef struct SCliThrd {
SCvtAddr cvtAddr;
+ SCliMsg* stopMsg;
+
bool quit;
} SCliThrd;
@@ -140,7 +142,7 @@ static void destroyUserdata(STransMsg* userdata);
static int cliRBChoseIdx(STrans* pTransInst);
-static void destroyCmsg(SCliMsg* cmsg);
+static void destroyCmsg(void* cmsg);
static void transDestroyConnCtx(STransConnCtx* ctx);
// thread obj
static SCliThrd* createThrdObj();
@@ -198,6 +200,7 @@ static void cliReleaseUnfinishedMsg(SCliConn* conn) {
} \
destroyCmsg(pMsg); \
cliReleaseUnfinishedMsg(conn); \
+ transQueueClear(&conn->cliMsgs); \
addConnToPool(((SCliThrd*)conn->hostThrd)->pool, conn); \
return; \
} \
@@ -545,6 +548,7 @@ static void addConnToPool(void* pool, SCliConn* conn) {
STrans* pTransInst = thrd->pTransInst;
conn->expireTime = taosGetTimestampMs() + CONN_PERSIST_TIME(pTransInst->idleTime);
+ cliReleaseUnfinishedMsg(conn);
transQueueClear(&conn->cliMsgs);
transCtxCleanup(&conn->ctx);
conn->status = ConnInPool;
@@ -645,6 +649,7 @@ static void cliDestroy(uv_handle_t* handle) {
conn->stream->data = NULL;
taosMemoryFree(conn->stream);
transCtxCleanup(&conn->ctx);
+ cliReleaseUnfinishedMsg(conn);
transQueueDestroy(&conn->cliMsgs);
tTrace("%s conn %p destroy successfully", CONN_GET_INST_LABEL(conn), conn);
transReqQueueClear(&conn->wreqQueue);
@@ -758,14 +763,17 @@ void cliConnCb(uv_connect_t* req, int status) {
}
static void cliHandleQuit(SCliMsg* pMsg, SCliThrd* pThrd) {
+ if (!transAsyncPoolIsEmpty(pThrd->asyncPool)) {
+ pThrd->stopMsg = pMsg;
+ return;
+ }
+ pThrd->stopMsg = NULL;
pThrd->quit = true;
tDebug("cli work thread %p start to quit", pThrd);
destroyCmsg(pMsg);
destroyConnPool(pThrd->pool);
uv_timer_stop(&pThrd->timer);
uv_walk(pThrd->loop, cliWalkCb, NULL);
-
- // uv_stop(pThrd->loop);
}
static void cliHandleRelease(SCliMsg* pMsg, SCliThrd* pThrd) {
int64_t refId = (int64_t)(pMsg->msg.info.handle);
@@ -922,6 +930,7 @@ static void cliAsyncCb(uv_async_t* handle) {
if (count >= 2) {
tTrace("cli process batch size:%d", count);
}
+ if (pThrd->stopMsg != NULL) cliHandleQuit(pThrd->stopMsg, pThrd);
}
static void* cliWorkThread(void* arg) {
@@ -962,7 +971,8 @@ static void destroyUserdata(STransMsg* userdata) {
transFreeMsg(userdata->pCont);
userdata->pCont = NULL;
}
-static void destroyCmsg(SCliMsg* pMsg) {
+static void destroyCmsg(void* arg) {
+ SCliMsg* pMsg = arg;
if (pMsg == NULL) {
return;
}
@@ -1001,7 +1011,7 @@ static void destroyThrdObj(SCliThrd* pThrd) {
TRANS_DESTROY_ASYNC_POOL_MSG(pThrd->asyncPool, SCliMsg, destroyCmsg);
transDestroyAsyncPool(pThrd->asyncPool);
- transDQDestroy(pThrd->delayQueue);
+ transDQDestroy(pThrd->delayQueue, destroyCmsg);
taosMemoryFree(pThrd->loop);
taosMemoryFree(pThrd);
}
@@ -1016,6 +1026,7 @@ void cliSendQuit(SCliThrd* thrd) {
SCliMsg* msg = taosMemoryCalloc(1, sizeof(SCliMsg));
msg->type = Quit;
transAsyncSend(thrd->asyncPool, &msg->q);
+ atomic_store_8(&thrd->asyncPool->stop, 1);
}
void cliWalkCb(uv_handle_t* handle, void* arg) {
if (!uv_is_closing(handle)) {
@@ -1221,33 +1232,38 @@ SCliThrd* transGetWorkThrd(STrans* trans, int64_t handle, bool* validHandle) {
}
return pThrd;
}
-void transReleaseCliHandle(void* handle) {
+int transReleaseCliHandle(void* handle) {
int idx = -1;
bool valid = false;
SCliThrd* pThrd = transGetWorkThrdFromHandle((int64_t)handle, &valid);
if (pThrd == NULL) {
- return;
+ return -1;
}
STransMsg tmsg = {.info.handle = handle};
SCliMsg* cmsg = taosMemoryCalloc(1, sizeof(SCliMsg));
cmsg->msg = tmsg;
cmsg->type = Release;
- transAsyncSend(pThrd->asyncPool, &cmsg->q);
- return;
+ if (0 != transAsyncSend(pThrd->asyncPool, &cmsg->q)) {
+ return -1;
+ }
+ return 0;
}
-void transSendRequest(void* shandle, const SEpSet* pEpSet, STransMsg* pReq, STransCtx* ctx) {
+int transSendRequest(void* shandle, const SEpSet* pEpSet, STransMsg* pReq, STransCtx* ctx) {
STrans* pTransInst = (STrans*)transAcquireExHandle(transGetInstMgt(), (int64_t)shandle);
- if (pTransInst == NULL) return;
+ if (pTransInst == NULL) {
+ transFreeMsg(pReq->pCont);
+ return -1;
+ }
bool valid = false;
SCliThrd* pThrd = transGetWorkThrd(pTransInst, (int64_t)pReq->info.handle, &valid);
if (pThrd == NULL && valid == false) {
transFreeMsg(pReq->pCont);
transReleaseExHandle(transGetInstMgt(), (int64_t)shandle);
- return;
+ return -1;
}
TRACE_SET_MSGID(&pReq->info.traceId, tGenIdPI64());
@@ -1272,21 +1288,28 @@ void transSendRequest(void* shandle, const SEpSet* pEpSet, STransMsg* pReq, STra
STraceId* trace = &pReq->info.traceId;
tGDebug("%s send request at thread:%08" PRId64 ", dst:%s:%d, app:%p", transLabel(pTransInst), pThrd->pid,
EPSET_GET_INUSE_IP(&pCtx->epSet), EPSET_GET_INUSE_PORT(&pCtx->epSet), pReq->info.ahandle);
- ASSERT(transAsyncSend(pThrd->asyncPool, &(cliMsg->q)) == 0);
+ if (0 != transAsyncSend(pThrd->asyncPool, &(cliMsg->q))) {
+ destroyCmsg(cliMsg);
+ transReleaseExHandle(transGetInstMgt(), (int64_t)shandle);
+ return -1;
+ }
transReleaseExHandle(transGetInstMgt(), (int64_t)shandle);
- return;
+ return 0;
}
-void transSendRecv(void* shandle, const SEpSet* pEpSet, STransMsg* pReq, STransMsg* pRsp) {
+int transSendRecv(void* shandle, const SEpSet* pEpSet, STransMsg* pReq, STransMsg* pRsp) {
STrans* pTransInst = (STrans*)transAcquireExHandle(transGetInstMgt(), (int64_t)shandle);
- if (pTransInst == NULL) return;
+ if (pTransInst == NULL) {
+ transFreeMsg(pReq->pCont);
+ return -1;
+ }
bool valid = false;
SCliThrd* pThrd = transGetWorkThrd(pTransInst, (int64_t)pReq->info.handle, &valid);
if (pThrd == NULL && valid == false) {
transFreeMsg(pReq->pCont);
transReleaseExHandle(transGetInstMgt(), (int64_t)shandle);
- return;
+ return -1;
}
tsem_t* sem = taosMemoryCalloc(1, sizeof(tsem_t));
@@ -1313,20 +1336,28 @@ void transSendRecv(void* shandle, const SEpSet* pEpSet, STransMsg* pReq, STransM
tGDebug("%s send request at thread:%08" PRId64 ", dst:%s:%d, app:%p", transLabel(pTransInst), pThrd->pid,
EPSET_GET_INUSE_IP(&pCtx->epSet), EPSET_GET_INUSE_PORT(&pCtx->epSet), pReq->info.ahandle);
- transAsyncSend(pThrd->asyncPool, &(cliMsg->q));
+ if (0 != transAsyncSend(pThrd->asyncPool, &cliMsg->q)) {
+ tsem_destroy(sem);
+ taosMemoryFree(sem);
+ destroyCmsg(cliMsg);
+ transReleaseExHandle(transGetInstMgt(), (int64_t)shandle);
+ return -1;
+ }
tsem_wait(sem);
tsem_destroy(sem);
taosMemoryFree(sem);
transReleaseExHandle(transGetInstMgt(), (int64_t)shandle);
- return;
+ return 0;
}
/*
*
**/
-void transSetDefaultAddr(void* shandle, const char* ip, const char* fqdn) {
+int transSetDefaultAddr(void* shandle, const char* ip, const char* fqdn) {
STrans* pTransInst = (STrans*)transAcquireExHandle(transGetInstMgt(), (int64_t)shandle);
- if (pTransInst == NULL) return;
+ if (pTransInst == NULL) {
+ return -1;
+ }
SCvtAddr cvtAddr = {0};
if (ip != NULL && fqdn != NULL) {
@@ -1346,9 +1377,14 @@ void transSetDefaultAddr(void* shandle, const char* ip, const char* fqdn) {
SCliThrd* thrd = ((SCliObj*)pTransInst->tcphandle)->pThreadObj[i];
tDebug("%s update epset at thread:%08" PRId64, pTransInst->label, thrd->pid);
- transAsyncSend(thrd->asyncPool, &(cliMsg->q));
+ if (transAsyncSend(thrd->asyncPool, &(cliMsg->q)) != 0) {
+ destroyCmsg(cliMsg);
+ transReleaseExHandle(transGetInstMgt(), (int64_t)shandle);
+ return -1;
+ }
}
transReleaseExHandle(transGetInstMgt(), (int64_t)shandle);
+ return 0;
}
int64_t transAllocHandle() {
diff --git a/source/libs/transport/src/transComm.c b/source/libs/transport/src/transComm.c
index 84af8da513..c3cba3118c 100644
--- a/source/libs/transport/src/transComm.c
+++ b/source/libs/transport/src/transComm.c
@@ -124,6 +124,7 @@ int transAllocBuffer(SConnBuffer* connBuf, uv_buf_t* uvBuf) {
SConnBuffer* p = connBuf;
if (p->cap == 0) {
p->buf = (char*)taosMemoryCalloc(CAPACITY, sizeof(char));
+ tTrace("internal malloc mem:%p, size:%d", p->buf, CAPACITY);
p->len = 0;
p->cap = CAPACITY;
p->total = -1;
@@ -136,7 +137,7 @@ int transAllocBuffer(SConnBuffer* connBuf, uv_buf_t* uvBuf) {
} else {
p->cap = p->total;
p->buf = taosMemoryRealloc(p->buf, p->cap);
- tTrace("internal malloc mem:%p, size:%d", p->buf, p->cap);
+ tTrace("internal realloc mem:%p, size:%d", p->buf, p->cap);
uvBuf->base = p->buf + p->len;
uvBuf->len = p->cap - p->len;
@@ -176,7 +177,6 @@ int transSetConnOption(uv_tcp_t* stream) {
SAsyncPool* transCreateAsyncPool(uv_loop_t* loop, int sz, void* arg, AsyncCB cb) {
SAsyncPool* pool = taosMemoryCalloc(1, sizeof(SAsyncPool));
- pool->index = 0;
pool->nAsync = sz;
pool->asyncs = taosMemoryCalloc(1, sizeof(uv_async_t) * pool->nAsync);
@@ -206,6 +206,9 @@ void transDestroyAsyncPool(SAsyncPool* pool) {
taosMemoryFree(pool);
}
int transAsyncSend(SAsyncPool* pool, queue* q) {
+ if (atomic_load_8(&pool->stop) == 1) {
+ return -1;
+ }
int idx = pool->index;
idx = idx % pool->nAsync;
// no need mutex here
@@ -225,6 +228,14 @@ int transAsyncSend(SAsyncPool* pool, queue* q) {
}
return uv_async_send(async);
}
+bool transAsyncPoolIsEmpty(SAsyncPool* pool) {
+ for (int i = 0; i < pool->nAsync; i++) {
+ uv_async_t* async = &(pool->asyncs[i]);
+ SAsyncItem* item = async->data;
+ if (!QUEUE_IS_EMPTY(&item->qmsg)) return false;
+ }
+ return true;
+}
void transCtxInit(STransCtx* ctx) {
// init transCtx
@@ -240,7 +251,7 @@ void transCtxCleanup(STransCtx* ctx) {
ctx->freeFunc(iter->val);
iter = taosHashIterate(ctx->args, iter);
}
-
+ ctx->freeFunc(ctx->brokenVal.val);
taosHashCleanup(ctx->args);
ctx->args = NULL;
}
@@ -456,7 +467,7 @@ int transDQCreate(uv_loop_t* loop, SDelayQueue** queue) {
return 0;
}
-void transDQDestroy(SDelayQueue* queue) {
+void transDQDestroy(SDelayQueue* queue, void (*freeFunc)(void* arg)) {
taosMemoryFree(queue->timer);
while (heapSize(queue->heap) > 0) {
@@ -467,6 +478,11 @@ void transDQDestroy(SDelayQueue* queue) {
heapRemove(queue->heap, minNode);
SDelayTask* task = container_of(minNode, SDelayTask, node);
+
+ STaskArg* arg = task->arg;
+ freeFunc(arg->param1);
+ taosMemoryFree(arg);
+
taosMemoryFree(task);
}
heapDestroy(queue->heap);
diff --git a/source/libs/transport/src/transSvr.c b/source/libs/transport/src/transSvr.c
index 9a511adf9b..7b9402f954 100644
--- a/source/libs/transport/src/transSvr.c
+++ b/source/libs/transport/src/transSvr.c
@@ -1034,7 +1034,7 @@ void transUnrefSrvHandle(void* handle) {
}
}
-void transReleaseSrvHandle(void* handle) {
+int transReleaseSrvHandle(void* handle) {
SRpcHandleInfo* info = handle;
SExHandle* exh = info->handle;
int64_t refId = info->refId;
@@ -1053,16 +1053,16 @@ void transReleaseSrvHandle(void* handle) {
tTrace("%s conn %p start to release", transLabel(pThrd->pTransInst), exh->handle);
transAsyncSend(pThrd->asyncPool, &m->q);
transReleaseExHandle(transGetRefMgt(), refId);
- return;
+ return 0;
_return1:
tTrace("handle %p failed to send to release handle", exh);
transReleaseExHandle(transGetRefMgt(), refId);
- return;
+ return -1;
_return2:
tTrace("handle %p failed to send to release handle", exh);
- return;
+ return -1;
}
-void transSendResponse(const STransMsg* msg) {
+int transSendResponse(const STransMsg* msg) {
SExHandle* exh = msg->info.handle;
int64_t refId = msg->info.refId;
ASYNC_CHECK_HANDLE(exh, refId);
@@ -1082,18 +1082,18 @@ void transSendResponse(const STransMsg* msg) {
tGTrace("conn %p start to send resp (1/2)", exh->handle);
transAsyncSend(pThrd->asyncPool, &m->q);
transReleaseExHandle(transGetRefMgt(), refId);
- return;
+ return 0;
_return1:
tTrace("handle %p failed to send resp", exh);
rpcFreeCont(msg->pCont);
transReleaseExHandle(transGetRefMgt(), refId);
- return;
+ return -1;
_return2:
tTrace("handle %p failed to send resp", exh);
rpcFreeCont(msg->pCont);
- return;
+ return -1;
}
-void transRegisterMsg(const STransMsg* msg) {
+int transRegisterMsg(const STransMsg* msg) {
SExHandle* exh = msg->info.handle;
int64_t refId = msg->info.refId;
ASYNC_CHECK_HANDLE(exh, refId);
@@ -1112,16 +1112,17 @@ void transRegisterMsg(const STransMsg* msg) {
tTrace("%s conn %p start to register brokenlink callback", transLabel(pTransInst), exh->handle);
transAsyncSend(pThrd->asyncPool, &m->q);
transReleaseExHandle(transGetRefMgt(), refId);
- return;
+ return 0;
_return1:
tTrace("handle %p failed to register brokenlink", exh);
rpcFreeCont(msg->pCont);
transReleaseExHandle(transGetRefMgt(), refId);
- return;
+ return -1;
_return2:
tTrace("handle %p failed to register brokenlink", exh);
rpcFreeCont(msg->pCont);
+ return -1;
}
int transGetConnInfo(void* thandle, STransHandleInfo* pConnInfo) { return -1; }
diff --git a/source/libs/wal/src/walMeta.c b/source/libs/wal/src/walMeta.c
index 991b50f7c0..a5fd3fca35 100644
--- a/source/libs/wal/src/walMeta.c
+++ b/source/libs/wal/src/walMeta.c
@@ -33,12 +33,13 @@ int64_t FORCE_INLINE walGetLastVer(SWal* pWal) { return pWal->vers.lastVer; }
int64_t FORCE_INLINE walGetCommittedVer(SWal* pWal) { return pWal->vers.commitVer; }
+int64_t FORCE_INLINE walGetAppliedVer(SWal* pWal) { return pWal->vers.appliedVer; }
+
static FORCE_INLINE int walBuildMetaName(SWal* pWal, int metaVer, char* buf) {
return sprintf(buf, "%s/meta-ver%d", pWal->path, metaVer);
}
static FORCE_INLINE int64_t walScanLogGetLastVer(SWal* pWal) {
- ASSERT(pWal->fileInfoSet != NULL);
int32_t sz = taosArrayGetSize(pWal->fileInfoSet);
ASSERT(sz > 0);
#if 0
@@ -53,7 +54,7 @@ static FORCE_INLINE int64_t walScanLogGetLastVer(SWal* pWal) {
int64_t fileSize = 0;
taosStatFile(fnameStr, &fileSize, NULL);
- int readSize = TMIN(WAL_MAX_SIZE + 2, fileSize);
+ int32_t readSize = TMIN(WAL_SCAN_BUF_SIZE, fileSize);
pLastFileInfo->fileSize = fileSize;
TdFilePtr pFile = taosOpenFile(fnameStr, TD_FILE_READ);
@@ -71,7 +72,8 @@ static FORCE_INLINE int64_t walScanLogGetLastVer(SWal* pWal) {
return -1;
}
- taosLSeekFile(pFile, -readSize, SEEK_END);
+ int64_t offset;
+ offset = taosLSeekFile(pFile, -readSize, SEEK_END);
if (readSize != taosReadFile(pFile, buf, readSize)) {
taosMemoryFree(buf);
taosCloseFile(&pFile);
@@ -79,31 +81,56 @@ static FORCE_INLINE int64_t walScanLogGetLastVer(SWal* pWal) {
return -1;
}
- char* haystack = buf;
char* found = NULL;
- char* candidate;
- while ((candidate = tmemmem(haystack, readSize - (haystack - buf), (char*)&magic, sizeof(uint64_t))) != NULL) {
- // read and validate
- SWalCkHead* logContent = (SWalCkHead*)candidate;
- if (walValidHeadCksum(logContent) == 0 && walValidBodyCksum(logContent) == 0) {
- found = candidate;
+ while (1) {
+ char* haystack = buf;
+ char* candidate;
+ while ((candidate = tmemmem(haystack, readSize - (haystack - buf), (char*)&magic, sizeof(uint64_t))) != NULL) {
+ // read and validate
+ SWalCkHead* logContent = (SWalCkHead*)candidate;
+ if (walValidHeadCksum(logContent) == 0 && walValidBodyCksum(logContent) == 0) {
+ found = candidate;
+ }
+ haystack = candidate + 1;
}
- haystack = candidate + 1;
- }
- if (found == buf) {
- SWalCkHead* logContent = (SWalCkHead*)found;
- if (walValidHeadCksum(logContent) != 0 || walValidBodyCksum(logContent) != 0) {
- // file has to be deleted
+ if (found || offset == 0) break;
+ offset = TMIN(0, offset - readSize + sizeof(uint64_t));
+ int64_t offset2 = taosLSeekFile(pFile, offset, SEEK_SET);
+ ASSERT(offset == offset2);
+ if (readSize != taosReadFile(pFile, buf, readSize)) {
taosMemoryFree(buf);
taosCloseFile(&pFile);
- terrno = TSDB_CODE_WAL_FILE_CORRUPTED;
+ terrno = TAOS_SYSTEM_ERROR(errno);
return -1;
}
+#if 0
+ if (found == buf) {
+ SWalCkHead* logContent = (SWalCkHead*)found;
+ if (walValidHeadCksum(logContent) != 0 || walValidBodyCksum(logContent) != 0) {
+ // file has to be deleted
+ taosMemoryFree(buf);
+ taosCloseFile(&pFile);
+ terrno = TSDB_CODE_WAL_FILE_CORRUPTED;
+ return -1;
+ }
+ }
+#endif
}
- taosCloseFile(&pFile);
- SWalCkHead* lastEntry = (SWalCkHead*)found;
+ // TODO truncate file
- return lastEntry->head.version;
+ if (found == NULL) {
+ // file corrupted, no complete log
+ // TODO delete and search in previous files
+ ASSERT(0);
+ terrno = TSDB_CODE_WAL_FILE_CORRUPTED;
+ return -1;
+ }
+ SWalCkHead* lastEntry = (SWalCkHead*)found;
+ int64_t retVer = lastEntry->head.version;
+ taosCloseFile(&pFile);
+ taosMemoryFree(buf);
+
+ return retVer;
}
int walCheckAndRepairMeta(SWal* pWal) {
diff --git a/source/libs/wal/src/walRead.c b/source/libs/wal/src/walRead.c
index 8b4225c80c..5bc9cdafa2 100644
--- a/source/libs/wal/src/walRead.c
+++ b/source/libs/wal/src/walRead.c
@@ -66,9 +66,15 @@ void walCloseReader(SWalReader *pRead) {
}
int32_t walNextValidMsg(SWalReader *pRead) {
- wDebug("vgId:%d wal start to fetch", pRead->pWal->cfg.vgId);
int64_t fetchVer = pRead->curVersion;
- int64_t endVer = pRead->cond.scanUncommited ? walGetLastVer(pRead->pWal) : walGetCommittedVer(pRead->pWal);
+ int64_t lastVer = walGetLastVer(pRead->pWal);
+ int64_t committedVer = walGetCommittedVer(pRead->pWal);
+ int64_t appliedVer = walGetAppliedVer(pRead->pWal);
+ int64_t endVer = pRead->cond.scanUncommited ? lastVer : committedVer;
+ endVer = TMIN(appliedVer, endVer);
+
+ wDebug("vgId:%d wal start to fetch, ver %ld, last ver %ld commit ver %ld, applied ver %ld, end ver %ld",
+ pRead->pWal->cfg.vgId, fetchVer, lastVer, committedVer, appliedVer, endVer);
while (fetchVer <= endVer) {
if (walFetchHeadNew(pRead, fetchVer) < 0) {
return -1;
diff --git a/source/libs/wal/src/walWrite.c b/source/libs/wal/src/walWrite.c
index 26dc3cdffb..d6348cc5dd 100644
--- a/source/libs/wal/src/walWrite.c
+++ b/source/libs/wal/src/walWrite.c
@@ -64,6 +64,12 @@ int32_t walRestoreFromSnapshot(SWal *pWal, int64_t ver) {
return 0;
}
+int32_t walApplyVer(SWal *pWal, int64_t ver) {
+ // TODO: error check
+ pWal->vers.appliedVer = ver;
+ return 0;
+}
+
int32_t walCommit(SWal *pWal, int64_t ver) {
ASSERT(pWal->vers.commitVer >= pWal->vers.snapshotVer);
ASSERT(pWal->vers.commitVer <= pWal->vers.lastVer);
@@ -430,11 +436,6 @@ END:
}
int64_t walAppendLog(SWal *pWal, tmsg_t msgType, SWalSyncInfo syncMeta, const void *body, int32_t bodyLen) {
- if (bodyLen > TSDB_MAX_WAL_SIZE) {
- terrno = TSDB_CODE_WAL_SIZE_LIMIT;
- return -1;
- }
-
taosThreadMutexLock(&pWal->mutex);
int64_t index = pWal->vers.lastVer + 1;
@@ -466,10 +467,6 @@ int32_t walWriteWithSyncInfo(SWal *pWal, int64_t index, tmsg_t msgType, SWalSync
int32_t bodyLen) {
int32_t code = 0;
- if (bodyLen > TSDB_MAX_WAL_SIZE) {
- terrno = TSDB_CODE_WAL_SIZE_LIMIT;
- return -1;
- }
taosThreadMutexLock(&pWal->mutex);
// concurrency control:
diff --git a/source/os/src/osSysinfo.c b/source/os/src/osSysinfo.c
index b6220b0ae8..8450e8baea 100644
--- a/source/os/src/osSysinfo.c
+++ b/source/os/src/osSysinfo.c
@@ -210,7 +210,7 @@ static int32_t taosGetProcCpuInfo(ProcCpuInfo *cpuInfo) {
}
-bool taosCheckSystemIsSmallEnd() {
+bool taosCheckSystemIsLittleEnd() {
union check {
int16_t i;
char ch[2];
diff --git a/source/os/src/osTime.c b/source/os/src/osTime.c
index 0cb4228e42..3c81ba3d9f 100644
--- a/source/os/src/osTime.c
+++ b/source/os/src/osTime.c
@@ -371,7 +371,7 @@ time_t taosMktime(struct tm *timep) {
localtime_s(&tm1, &tt);
ss.wYear = tm1.tm_year + 1900;
ss.wMonth = tm1.tm_mon + 1;
- ss.wDay = tm1.tm_wday;
+ ss.wDay = tm1.tm_mday;
ss.wHour = tm1.tm_hour;
ss.wMinute = tm1.tm_min;
ss.wSecond = tm1.tm_sec;
@@ -383,7 +383,7 @@ time_t taosMktime(struct tm *timep) {
s.wYear = timep->tm_year + 1900;
s.wMonth = timep->tm_mon + 1;
- s.wDay = timep->tm_wday;
+ s.wDay = timep->tm_mday;
s.wHour = timep->tm_hour;
s.wMinute = timep->tm_min;
s.wSecond = timep->tm_sec;
diff --git a/source/util/src/tutil.c b/source/util/src/tutil.c
index 7f3728e2ad..addb9f55ba 100644
--- a/source/util/src/tutil.c
+++ b/source/util/src/tutil.c
@@ -64,20 +64,6 @@ int32_t strdequote(char *z) {
return j + 1; // only one quote, do nothing
}
-char *strDupUnquo(const char *src) {
- if (src == NULL) return NULL;
- if (src[0] != '`') return strdup(src);
- int32_t len = (int32_t)strlen(src);
- if (src[len - 1] != '`') return NULL;
- char *ret = taosMemoryMalloc(len);
- if (ret == NULL) return NULL;
- for (int32_t i = 0; i < len - 1; i++) {
- ret[i] = src[i + 1];
- }
- ret[len - 1] = 0;
- return ret;
-}
-
size_t strtrim(char *z) {
int32_t i = 0;
int32_t j = 0;
diff --git a/tests/pytest/crash_gen/crash_gen_main.py b/tests/pytest/crash_gen/crash_gen_main.py
index eba1d938da..0aea6e3e14 100755
--- a/tests/pytest/crash_gen/crash_gen_main.py
+++ b/tests/pytest/crash_gen/crash_gen_main.py
@@ -741,7 +741,10 @@ class AnyState:
sCnt += 1
if (sCnt >= 2):
raise CrashGenError(
- "Unexpected more than 1 success with task: {}".format(cls))
+ "Unexpected more than 1 success with task: {}, in task set: {}".format(
+ cls.__name__, # verified just now that isinstance(task, cls)
+ [c.__class__.__name__ for c in tasks]
+ ))
def assertIfExistThenSuccess(self, tasks, cls):
sCnt = 0
diff --git a/tests/pytest/util/common.py b/tests/pytest/util/common.py
index 94043ed01a..7133e8365d 100644
--- a/tests/pytest/util/common.py
+++ b/tests/pytest/util/common.py
@@ -11,13 +11,13 @@
# -*- coding: utf-8 -*-
-from collections import defaultdict
import random
import string
import requests
import time
import socket
import json
+import toml
from .boundary import DataBoundary
import taos
from util.log import *
@@ -25,6 +25,79 @@ from util.sql import *
from util.cases import *
from util.dnodes import *
from util.common import *
+from util.constant import *
+from dataclasses import dataclass,field
+from typing import List
+
+@dataclass
+class DataSet:
+ ts_data : List[int] = field(default_factory=list)
+ int_data : List[int] = field(default_factory=list)
+ bint_data : List[int] = field(default_factory=list)
+ sint_data : List[int] = field(default_factory=list)
+ tint_data : List[int] = field(default_factory=list)
+ uint_data : List[int] = field(default_factory=list)
+ ubint_data : List[int] = field(default_factory=list)
+ usint_data : List[int] = field(default_factory=list)
+ utint_data : List[int] = field(default_factory=list)
+ float_data : List[float] = field(default_factory=list)
+ double_data : List[float] = field(default_factory=list)
+ bool_data : List[int] = field(default_factory=list)
+ vchar_data : List[str] = field(default_factory=list)
+ nchar_data : List[str] = field(default_factory=list)
+
+ def get_order_set(self,
+ rows,
+ int_step :int = 1,
+ bint_step :int = 1,
+ sint_step :int = 1,
+ tint_step :int = 1,
+ uint_step :int = 1,
+ ubint_step :int = 1,
+ usint_step :int = 1,
+ utint_step :int = 1,
+ float_step :float = 1,
+ double_step :float = 1,
+ bool_start :int = 1,
+ vchar_prefix:str = "vachar_",
+ vchar_step :int = 1,
+ nchar_prefix:str = "nchar_测试_",
+ nchar_step :int = 1,
+ ts_step :int = 1
+ ):
+ for i in range(rows):
+ self.int_data.append( int(i * int_step % INT_MAX ))
+ self.bint_data.append( int(i * bint_step % BIGINT_MAX ))
+ self.sint_data.append( int(i * sint_step % SMALLINT_MAX ))
+ self.tint_data.append( int(i * tint_step % TINYINT_MAX ))
+ self.uint_data.append( int(i * uint_step % INT_UN_MAX ))
+ self.ubint_data.append( int(i * ubint_step % BIGINT_UN_MAX ))
+ self.usint_data.append( int(i * usint_step % SMALLINT_UN_MAX ))
+ self.utint_data.append( int(i * utint_step % TINYINT_UN_MAX ))
+ self.float_data.append( float(i * float_step % FLOAT_MAX ))
+ self.double_data.append( float(i * double_step % DOUBLE_MAX ))
+ self.bool_data.append( bool((i + bool_start) % 2 ))
+ self.vchar_data.append( f"{vchar_prefix}_{i * vchar_step}" )
+ self.nchar_data.append( f"{nchar_prefix}_{i * nchar_step}")
+ self.ts_data.append( int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000 - i * ts_step))
+
+ def get_disorder_set(self,
+ rows,
+ int_low :int = INT_MIN,
+ int_up :int = INT_MAX,
+ bint_low :int = BIGINT_MIN,
+ bint_up :int = BIGINT_MAX,
+ sint_low :int = SMALLINT_MIN,
+ sint_up :int = SMALLINT_MAX,
+ tint_low :int = TINYINT_MIN,
+ tint_up :int = TINYINT_MAX,
+ ubint_low :int = BIGINT_UN_MIN,
+ ubint_up :int = BIGINT_UN_MAX,
+
+
+ ):
+ pass
+
class TDCom:
def __init__(self):
@@ -372,6 +445,7 @@ class TDCom:
def getClientCfgPath(self):
buildPath = self.getBuildPath()
+
if (buildPath == ""):
tdLog.exit("taosd not found!")
else:
@@ -650,7 +724,7 @@ class TDCom:
else:
column_value_str += f'{column_value}, '
idx += 1
- column_value_str = column_value_str.rstrip()[:-1]
+ column_value_str = column_value_str.rstrip()[:-1]
insert_sql = f'insert into {dbname}.{tbname} values ({column_value_str});'
tsql.execute(insert_sql)
def getOneRow(self, location, containElm):
@@ -662,12 +736,12 @@ class TDCom:
return res_list
else:
tdLog.exit(f"getOneRow out of range: row_index={location} row_count={self.query_row}")
-
- def killProcessor(self, processorName):
+
+ def killProcessor(self, processorName):
if (platform.system().lower() == 'windows'):
os.system("TASKKILL /F /IM %s.exe"%processorName)
else:
- os.system('pkill %s'%processorName)
+ os.system('pkill %s'%processorName)
def is_json(msg):
@@ -680,4 +754,29 @@ def is_json(msg):
else:
return False
+def get_path(tool="taosd"):
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+
+ if ("community" in selfPath):
+ projPath = selfPath[:selfPath.find("community")]
+ else:
+ projPath = selfPath[:selfPath.find("tests")]
+
+ paths = []
+ for root, dirs, files in os.walk(projPath):
+ if ((tool) in files or ("%s.exe"%tool) in files):
+ rootRealPath = os.path.dirname(os.path.realpath(root))
+ if ("packaging" not in rootRealPath):
+ paths.append(os.path.join(root, tool))
+ break
+ if (len(paths) == 0):
+ return ""
+ return paths[0]
+
+def dict2toml(in_dict: dict, file:str):
+ if not isinstance(in_dict, dict):
+ return ""
+ with open(file, 'w') as f:
+ toml.dump(in_dict, f)
+
tdCom = TDCom()
diff --git a/tests/pytest/util/dnodes.py b/tests/pytest/util/dnodes.py
index 613673ea8e..59e247105c 100644
--- a/tests/pytest/util/dnodes.py
+++ b/tests/pytest/util/dnodes.py
@@ -96,9 +96,9 @@ class TDSimClient:
for key, value in self.cfgDict.items():
self.cfg(key, value)
-
+
try:
- if bool(updatecfgDict) and updatecfgDict[0] and updatecfgDict[0][0]:
+ if bool(updatecfgDict) and updatecfgDict[0] and updatecfgDict[0][0]:
clientCfg = dict (updatecfgDict[0][0].get('clientCfg'))
for key, value in clientCfg.items():
self.cfg(key, value)
@@ -244,7 +244,6 @@ class TDDnode:
# print(updatecfgDict)
isFirstDir = 1
if bool(updatecfgDict) and updatecfgDict[0] and updatecfgDict[0][0]:
- print(updatecfgDict[0][0])
for key, value in updatecfgDict[0][0].items():
if key == "clientCfg" and self.remoteIP == "" and not platform.system().lower() == 'windows':
continue
@@ -300,7 +299,7 @@ class TDDnode:
if self.valgrind == 0:
if platform.system().lower() == 'windows':
- cmd = "mintty -h never -w hide %s -c %s" % (
+ cmd = "mintty -h never %s -c %s" % (
binPath, self.cfgDir)
else:
cmd = "nohup %s -c %s > /dev/null 2>&1 & " % (
@@ -309,7 +308,7 @@ class TDDnode:
valgrindCmdline = "valgrind --log-file=\"%s/../log/valgrind.log\" --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes"%self.cfgDir
if platform.system().lower() == 'windows':
- cmd = "mintty -h never -w hide %s %s -c %s" % (
+ cmd = "mintty -h never %s %s -c %s" % (
valgrindCmdline, binPath, self.cfgDir)
else:
cmd = "nohup %s %s -c %s 2>&1 & " % (
@@ -324,7 +323,6 @@ class TDDnode:
if os.system(cmd) != 0:
tdLog.exit(cmd)
self.running = 1
- print("dnode:%d is running with %s " % (self.index, cmd))
tdLog.debug("dnode:%d is running with %s " % (self.index, cmd))
if self.valgrind == 0:
time.sleep(0.1)
@@ -358,7 +356,7 @@ class TDDnode:
# break
# elif bkey2 in line:
# popen.kill()
- # break
+ # break
# if time.time() > timeout:
# print(time.time(),timeout)
# tdLog.exit('wait too long for taosd start')
@@ -407,7 +405,6 @@ class TDDnode:
if os.system(cmd) != 0:
tdLog.exit(cmd)
self.running = 1
- print("dnode:%d is running with %s " % (self.index, cmd))
tdLog.debug("dnode:%d is running with %s " % (self.index, cmd))
if self.valgrind == 0:
time.sleep(0.1)
@@ -521,7 +518,7 @@ class TDDnode:
if self.running != 0:
if platform.system().lower() == 'windows':
- psCmd = "for /f %a in ('wmic process where \"name='taosd.exe' and CommandLine like '%%dnode%d%%'\" get processId ^| xargs echo ^| awk ^'{print $2}^'') do @(ps | grep %a | awk '{print $1}' | xargs kill -INT )" % (self.index)
+ psCmd = "for /f %%a in ('wmic process where \"name='taosd.exe' and CommandLine like '%%dnode%d%%'\" get processId ^| xargs echo ^| awk ^'{print $2}^'') do @(ps | grep %%a | awk '{print $1}' | xargs kill -INT )" % (self.index)
else:
psCmd = "ps -ef|grep -w %s| grep dnode%d|grep -v grep | awk '{print $2}'" % (toBeKilled,self.index)
processID = subprocess.check_output(
@@ -664,7 +661,6 @@ class TDDnodes:
def stoptaosd(self, index):
self.check(index)
self.dnodes[index - 1].stoptaosd()
-
def start(self, index):
self.check(index)
diff --git a/tests/pytest/util/sql.py b/tests/pytest/util/sql.py
index 85a782ecb1..01955ec93a 100644
--- a/tests/pytest/util/sql.py
+++ b/tests/pytest/util/sql.py
@@ -235,9 +235,17 @@ class TDSql:
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(self.sql, row, col, self.queryResult[row][col], data))
return
- elif isinstance(data, float) and abs(self.queryResult[row][col] - data) <= 0.000001:
- tdLog.info("sql:%s, row:%d col:%d data:%f == expect:%f" %
- (self.sql, row, col, self.queryResult[row][col], data))
+ elif isinstance(data, float):
+ if abs(data) >= 1 and abs((self.queryResult[row][col] - data) / data) <= 0.000001:
+ tdLog.info("sql:%s, row:%d col:%d data:%f == expect:%f" %
+ (self.sql, row, col, self.queryResult[row][col], data))
+ elif abs(data) < 1 and abs(self.queryResult[row][col] - data) <= 0.000001:
+ tdLog.info("sql:%s, row:%d col:%d data:%f == expect:%f" %
+ (self.sql, row, col, self.queryResult[row][col], data))
+ else:
+ caller = inspect.getframeinfo(inspect.stack()[1][0])
+ args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data)
+ tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
return
else:
caller = inspect.getframeinfo(inspect.stack()[1][0])
@@ -323,13 +331,32 @@ class TDSql:
args = (caller.filename, caller.lineno, self.sql, col_name_list, expect_col_name_list)
tdLog.exit("%s(%d) failed: sql:%s, col_name_list:%s != expect_col_name_list:%s" % args)
+ def __check_equal(self, elm, expect_elm):
+ if not type(elm) in(list, tuple) and elm == expect_elm:
+ return True
+ if type(elm) in(list, tuple) and type(expect_elm) in(list, tuple):
+ if len(elm) != len(expect_elm):
+ return False
+ if len(elm) == 0:
+ return True
+ for i in range(len(elm)):
+ flag = self.__check_equal(elm[i], expect_elm[i])
+ if not flag:
+ return False
+ return True
+ return False
+
def checkEqual(self, elm, expect_elm):
if elm == expect_elm:
tdLog.info("sql:%s, elm:%s == expect_elm:%s" % (self.sql, elm, expect_elm))
- else:
- caller = inspect.getframeinfo(inspect.stack()[1][0])
- args = (caller.filename, caller.lineno, self.sql, elm, expect_elm)
- tdLog.exit("%s(%d) failed: sql:%s, elm:%s != expect_elm:%s" % args)
+ return
+ if self.__check_equal(elm, expect_elm):
+ tdLog.info("sql:%s, elm:%s == expect_elm:%s" % (self.sql, elm, expect_elm))
+ return
+
+ caller = inspect.getframeinfo(inspect.stack()[1][0])
+ args = (caller.filename, caller.lineno, self.sql, elm, expect_elm)
+ tdLog.exit("%s(%d) failed: sql:%s, elm:%s != expect_elm:%s" % args)
def checkNotEqual(self, elm, expect_elm):
if elm != expect_elm:
diff --git a/tests/pytest/util/taosadapter.py b/tests/pytest/util/taosadapter.py
new file mode 100644
index 0000000000..1a198240d7
--- /dev/null
+++ b/tests/pytest/util/taosadapter.py
@@ -0,0 +1,260 @@
+import socket
+from fabric2 import Connection
+from util.log import *
+from util.common import *
+
+
+class TAdapter:
+ def __init__(self):
+ self.running = 0
+ self.deployed = 0
+ self.remoteIP = ""
+ self.taosadapter_cfg_dict = {
+ "debug" : True,
+ "taosConfigDir" : "",
+ "port" : 6041,
+ "logLevel" : "debug",
+ "cors" : {
+ "allowAllOrigins" : True,
+ },
+ "pool" : {
+ "maxConnect" : 4000,
+ "maxIdle" : 4000,
+ "idleTimeout" : "1h"
+ },
+ "ssl" : {
+ "enable" : False,
+ "certFile" : "",
+ "keyFile" : "",
+ },
+ "log" : {
+ "path" : "",
+ "rotationCount" : 30,
+ "rotationTime" : "24h",
+ "rotationSize" : "1GB",
+ "enableRecordHttpSql" : True,
+ "sqlRotationCount" : 2,
+ "sqlRotationTime" : "24h",
+ "sqlRotationSize" : "1GB",
+ },
+ "monitor" : {
+ "collectDuration" : "3s",
+ "incgroup" : False,
+ "pauseQueryMemoryThreshold" : 70,
+ "pauseAllMemoryThreshold" : 80,
+ "identity" : "",
+ "writeToTD" : True,
+ "user" : "root",
+ "password" : "taosdata",
+ "writeInterval" : "30s"
+ },
+ "opentsdb" : {
+ "enable" : False
+ },
+ "influxdb" : {
+ "enable" : False
+ },
+ "statsd" : {
+ "enable" : False
+ },
+ "collectd" : {
+ "enable" : False
+ },
+ "opentsdb_telnet" : {
+ "enable" : False
+ },
+ "node_exporter" : {
+ "enable" : False
+ },
+ "prometheus" : {
+ "enable" : False
+ },
+ }
+ # TODO: add taosadapter env:
+ # 1. init cfg.toml.dict :OK
+ # 2. dump dict to toml : OK
+ # 3. update cfg.toml.dict :OK
+ # 4. check adapter exists : OK
+ # 5. deploy adapter cfg : OK
+ # 6. adapter start : OK
+ # 7. adapter stop
+
+ def init(self, path, remoteIP=""):
+ self.path = path
+ self.remoteIP = remoteIP
+ binPath = get_path() + "/../../../"
+ binPath = os.path.realpath(binPath)
+
+ if path == "":
+ self.path = os.path.abspath(binPath + "../../")
+ else:
+ self.path = os.path.realpath(path)
+
+ if self.remoteIP:
+ try:
+ self.config = eval(remoteIP)
+ self.remote_conn = Connection(host=self.config["host"], port=self.config["port"], user=self.config["user"], connect_kwargs={'password':self.config["password"]})
+ except Exception as e:
+ tdLog.notice(e)
+
+ def update_cfg(self, update_dict :dict):
+ if not isinstance(update_dict, dict):
+ return
+ if "log" in update_dict and "path" in update_dict["log"]:
+ del update_dict["log"]["path"]
+ for key, value in update_dict.items():
+ if key in ["cors", "pool", "ssl", "log", "monitor", "opentsdb", "influxdb", "statsd", "collectd", "opentsdb_telnet", "node_exporter", "prometheus"]:
+ if isinstance(value, dict):
+ for k, v in value.items():
+ self.taosadapter_cfg_dict[key][k] = v
+ else:
+ self.taosadapter_cfg_dict[key] = value
+
+ def check_adapter(self):
+ if getPath(tool="taosadapter"):
+ return False
+ else:
+ return True
+
+ def remote_exec(self, updateCfgDict, execCmd):
+ remoteCfgDict = copy.deepcopy(updateCfgDict)
+ if "log" in remoteCfgDict and "path" in remoteCfgDict["log"]:
+ del remoteCfgDict["log"]["path"]
+
+ remoteCfgDictStr = base64.b64encode(toml.dumps(remoteCfgDict).encode()).decode()
+ execCmdStr = base64.b64encode(execCmd.encode()).decode()
+ with self.remote_conn.cd((self.config["path"]+sys.path[0].replace(self.path, '')).replace('\\','/')):
+ self.remote_conn.run(f"python3 ./test.py -D {remoteCfgDictStr} -e {execCmdStr}" )
+
+ def cfg(self, option, value):
+ cmd = f"echo {option} = {value} >> {self.cfg_path}"
+ if os.system(cmd) != 0:
+ tdLog.exit(cmd)
+
+ def deploy(self, *update_cfg_dict):
+ self.log_dir = f"{self.path}/sim/dnode1/log"
+ self.cfg_dir = f"{self.path}/sim/dnode1/cfg"
+ self.cfg_path = f"{self.cfg_dir}/taosadapter.toml"
+
+ cmd = f"touch {self.cfg_path}"
+ if os.system(cmd) != 0:
+ tdLog.exit(cmd)
+
+ self.taosadapter_cfg_dict["log"]["path"] = self.log_dir
+ if bool(update_cfg_dict):
+ self.update_cfg(update_dict=update_cfg_dict)
+
+ if (self.remoteIP == ""):
+ dict2toml(self.taosadapter_cfg_dict, self.cfg_path)
+ else:
+ self.remote_exec(self.taosadapter_cfg_dict, "tAdapter.deploy(update_cfg_dict)")
+
+ self.deployed = 1
+
+ tdLog.debug(f"taosadapter is deployed and configured by {self.cfg_path}")
+
+ def start(self):
+ bin_path = get_path(tool="taosadapter")
+
+ if (bin_path == ""):
+ tdLog.exit("taosadapter not found!")
+ else:
+ tdLog.info(f"taosadapter found: {bin_path}")
+
+ if platform.system().lower() == 'windows':
+ cmd = f"mintty -h never {bin_path} -c {self.cfg_dir}"
+ else:
+ cmd = f"nohup {bin_path} -c {self.cfg_path} > /dev/null 2>&1 & "
+
+ if self.remoteIP:
+ self.remote_exec(self.taosadapter_cfg_dict, f"tAdapter.deployed=1\ntAdapter.log_dir={self.log_dir}\ntAdapter.cfg_dir={self.cfg_dir}\ntAdapter.start()")
+ self.running = 1
+ else:
+ os.system(f"rm -rf {self.log_dir}/taosadapter*")
+ if os.system(cmd) != 0:
+ tdLog.exit(cmd)
+ self.running = 1
+ tdLog.debug(f"taosadapter is running with {cmd} " )
+
+ time.sleep(0.1)
+
+ taosadapter_port = self.taosadapter_cfg_dict["port"]
+ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ s.settimeout(3)
+ try:
+ res = s.connect_ex((self.remoteIP, taosadapter_port))
+ s.shutdown(2)
+ if res == 0:
+ tdLog.info(f"the taosadapter has been started, using port:{taosadapter_port}")
+ else:
+ tdLog.info(f"the taosadapter do not started!!!")
+ except socket.error as e:
+ tdLog.notice("socket connect error!")
+ finally:
+ if s:
+ s.close()
+ # tdLog.debug("the taosadapter has been started.")
+ time.sleep(1)
+
+ def start_taosadapter(self):
+ """
+ use this method, must deploy taosadapter
+ """
+ bin_path = get_path(tool="taosadapter")
+
+ if (bin_path == ""):
+ tdLog.exit("taosadapter not found!")
+ else:
+ tdLog.info(f"taosadapter found: {bin_path}")
+
+ if self.deployed == 0:
+ tdLog.exit("taosadapter is not deployed")
+
+ if platform.system().lower() == 'windows':
+ cmd = f"mintty -h never {bin_path} -c {self.cfg_dir}"
+ else:
+ cmd = f"nohup {bin_path} -c {self.cfg_path} > /dev/null 2>&1 & "
+
+ if self.remoteIP:
+ self.remote_exec(self.taosadapter_cfg_dict, f"tAdapter.deployed=1\ntAdapter.log_dir={self.log_dir}\ntAdapter.cfg_dir={self.cfg_dir}\ntAdapter.start()")
+ self.running = 1
+ else:
+ if os.system(cmd) != 0:
+ tdLog.exit(cmd)
+ self.running = 1
+ tdLog.debug(f"taosadapter is running with {cmd} " )
+
+ time.sleep(0.1)
+
+ def stop(self, force_kill=False):
+ signal = "-SIGKILL" if force_kill else "-SIGTERM"
+
+ if self.remoteIP:
+ self.remote_exec(self.taosadapter_cfg_dict, "tAdapter.running=1\ntAdapter.stop()")
+ tdLog.info("stop taosadapter")
+ return
+
+ toBeKilled = "taosadapter"
+
+ if self.running != 0:
+ psCmd = f"ps -ef|grep -w {toBeKilled}| grep -v grep | awk '{{print $2}}'"
+ processID = subprocess.check_output(
+ psCmd, shell=True).decode("utf-8")
+
+ while(processID):
+ killCmd = f"kill {signal} {processID} > /dev/null 2>&1"
+ os.system(killCmd)
+ time.sleep(1)
+ processID = subprocess.check_output(
+ psCmd, shell=True).decode("utf-8")
+ if not platform.system().lower() == 'windows':
+ for port in range(6030, 6041):
+ fuserCmd = f"fuser -k -n tcp {port} > /dev/null"
+ os.system(fuserCmd)
+
+ self.running = 0
+ tdLog.debug(f"taosadapter is stopped by kill {signal}")
+
+
+
+tAdapter = TAdapter()
\ No newline at end of file
diff --git a/tests/script/api/batchprepare.c b/tests/script/api/batchprepare.c
index 29c1fdb015..ada2039460 100644
--- a/tests/script/api/batchprepare.c
+++ b/tests/script/api/batchprepare.c
@@ -218,7 +218,7 @@ typedef struct {
} CaseCtrl;
#if 0
-CaseCtrl gCaseCtrl = { // default
+CaseCtrl gCaseCtrl = {
.precision = TIME_PRECISION_MICRO,
.bindNullNum = 0,
.printCreateTblSql = false,
@@ -251,7 +251,7 @@ CaseCtrl gCaseCtrl = { // default
#if 1
-CaseCtrl gCaseCtrl = {
+CaseCtrl gCaseCtrl = { // default
.precision = TIME_PRECISION_MILLI,
.bindNullNum = 0,
.printCreateTblSql = false,
@@ -299,7 +299,7 @@ CaseCtrl gCaseCtrl = { // query case with specified col&oper
.printRes = true,
.runTimes = 0,
.caseRunIdx = -1,
- .caseIdx = 23,
+ .caseIdx = 5,
.caseNum = 1,
.caseRunNum = 1,
};
@@ -328,7 +328,7 @@ CaseCtrl gCaseCtrl = { // query case with specified col&oper
//.optrIdxList = optrIdxList,
//.bindColTypeNum = tListLen(bindColTypeList),
//.bindColTypeList = bindColTypeList,
- .caseIdx = 24,
+ .caseIdx = 8,
.caseNum = 1,
.caseRunNum = 1,
};
@@ -1384,6 +1384,7 @@ void bpCheckTagFields(TAOS_STMT *stmt, TAOS_MULTI_BIND* pBind) {
}
bpCheckColTagFields(stmt, fieldNum, pFields, gCurCase->bindTagNum, pBind, BP_BIND_TAG);
+ taosMemoryFree(pFields);
}
void bpCheckColFields(TAOS_STMT *stmt, TAOS_MULTI_BIND* pBind) {
@@ -1401,12 +1402,13 @@ void bpCheckColFields(TAOS_STMT *stmt, TAOS_MULTI_BIND* pBind) {
}
bpCheckColTagFields(stmt, fieldNum, pFields, gCurCase->bindColNum, pBind, BP_BIND_COL);
+ taosMemoryFree(pFields);
}
void bpShowBindParam(TAOS_MULTI_BIND *bind, int32_t num) {
for (int32_t i = 0; i < num; ++i) {
TAOS_MULTI_BIND* b = &bind[i];
- printf("Bind %d: type[%d],buf[%p],buflen[%d],len[%],null[%d],num[%d]\n",
+ printf("Bind %d: type[%d],buf[%p],buflen[%d],len[%d],null[%d],num[%d]\n",
i, b->buffer_type, b->buffer, b->buffer_length, b->length ? *b->length : 0, b->is_null ? *b->is_null : 0, b->num);
}
}
@@ -2596,6 +2598,7 @@ void runAll(TAOS *taos) {
printf("%s Begin\n", gCaseCtrl.caseCatalog);
runCaseList(taos);
+#if 0
strcpy(gCaseCtrl.caseCatalog, "Micro DB precision Test");
printf("%s Begin\n", gCaseCtrl.caseCatalog);
gCaseCtrl.precision = TIME_PRECISION_MICRO;
@@ -2626,7 +2629,6 @@ void runAll(TAOS *taos) {
runCaseList(taos);
gCaseCtrl.bindRowNum = 0;
-#if 0
strcpy(gCaseCtrl.caseCatalog, "Row Num Test");
printf("%s Begin\n", gCaseCtrl.caseCatalog);
gCaseCtrl.rowNum = 1000;
@@ -2640,7 +2642,6 @@ void runAll(TAOS *taos) {
gCaseCtrl.runTimes = 2;
runCaseList(taos);
gCaseCtrl.runTimes = 0;
-#endif
strcpy(gCaseCtrl.caseCatalog, "Check Param Test");
printf("%s Begin\n", gCaseCtrl.caseCatalog);
@@ -2648,19 +2649,20 @@ void runAll(TAOS *taos) {
runCaseList(taos);
gCaseCtrl.checkParamNum = false;
-#if 0
strcpy(gCaseCtrl.caseCatalog, "Bind Col Num Test");
printf("%s Begin\n", gCaseCtrl.caseCatalog);
gCaseCtrl.bindColNum = 6;
runCaseList(taos);
gCaseCtrl.bindColNum = 0;
+#endif
+/*
strcpy(gCaseCtrl.caseCatalog, "Bind Col Type Test");
printf("%s Begin\n", gCaseCtrl.caseCatalog);
gCaseCtrl.bindColTypeNum = tListLen(bindColTypeList);
gCaseCtrl.bindColTypeList = bindColTypeList;
runCaseList(taos);
-#endif
+*/
printf("All Test End\n");
}
diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt
index 2234e31f56..f319de4c2f 100644
--- a/tests/script/jenkins/basic.txt
+++ b/tests/script/jenkins/basic.txt
@@ -17,7 +17,7 @@
./test.sh -f tsim/db/basic4.sim
./test.sh -f tsim/db/basic5.sim
./test.sh -f tsim/db/basic6.sim
-# nojira ./test.sh -f tsim/db/commit.sim
+./test.sh -f tsim/db/commit.sim
./test.sh -f tsim/db/create_all_options.sim
./test.sh -f tsim/db/delete_reuse1.sim
./test.sh -f tsim/db/delete_reuse2.sim
@@ -27,11 +27,11 @@
./test.sh -f tsim/db/delete_writing2.sim
# unsupport ./test.sh -f tsim/db/dropdnodes.sim
./test.sh -f tsim/db/error1.sim
-# nojira ./test.sh -f tsim/db/keep.sim
+# jira ./test.sh -f tsim/db/keep.sim
./test.sh -f tsim/db/len.sim
./test.sh -f tsim/db/repeat.sim
./test.sh -f tsim/db/show_create_db.sim
-./test.sh -f tsim/db/show_create_table.sim
+# jira ./test.sh -f tsim/db/show_create_table.sim
./test.sh -f tsim/db/tables.sim
./test.sh -f tsim/db/taosdlog.sim
@@ -83,52 +83,51 @@
./test.sh -f tsim/insert/update0.sim
# ---- parser
-./test.sh -f tsim/parser/alter.sim
-# nojira ./test.sh -f tsim/parser/alter1.sim
./test.sh -f tsim/parser/alter__for_community_version.sim
./test.sh -f tsim/parser/alter_column.sim
./test.sh -f tsim/parser/alter_stable.sim
-# nojira ./test.sh -f tsim/parser/auto_create_tb.sim
+./test.sh -f tsim/parser/alter.sim
+# jira ./test.sh -f tsim/parser/alter1.sim
./test.sh -f tsim/parser/auto_create_tb_drop_tb.sim
+# jira ./test.sh -f tsim/parser/auto_create_tb.sim
./test.sh -f tsim/parser/between_and.sim
./test.sh -f tsim/parser/binary_escapeCharacter.sim
-# nojira ./test.sh -f tsim/parser/col_arithmetic_operation.sim
-# nojira ./test.sh -f tsim/parser/columnValue.sim
-## ./test.sh -f tsim/parser/commit.sim
-## ./test.sh -f tsim/parser/condition.sim
-## ./test.sh -f tsim/parser/constCol.sim
-# ./test.sh -f tsim/parser/create_db.sim
-## ./test.sh -f tsim/parser/create_db__for_community_version.sim
-# ./test.sh -f tsim/parser/create_mt.sim
-# ./test.sh -f tsim/parser/create_tb.sim
-## ./test.sh -f tsim/parser/create_tb_with_tag_name.sim
-# ./test.sh -f tsim/parser/dbtbnameValidate.sim
-##./test.sh -f tsim/parser/distinct.sim
-# ./test.sh -f tsim/parser/fill.sim
-# ./test.sh -f tsim/parser/fill_stb.sim
-## ./test.sh -f tsim/parser/fill_us.sim
-# ./test.sh -f tsim/parser/first_last.sim
+# jira ./test.sh -f tsim/parser/col_arithmetic_operation.sim
+# jira ./test.sh -f tsim/parser/columnValue.sim
+./test.sh -f tsim/parser/commit.sim
+# jira ./test.sh -f tsim/parser/condition.sim
+./test.sh -f tsim/parser/constCol.sim
+./test.sh -f tsim/parser/create_db.sim
+./test.sh -f tsim/parser/create_mt.sim
+# jira ./test.sh -f tsim/parser/create_tb_with_tag_name.sim
+./test.sh -f tsim/parser/create_tb.sim
+./test.sh -f tsim/parser/dbtbnameValidate.sim
+./test.sh -f tsim/parser/distinct.sim
+# jira ./test.sh -f tsim/parser/fill_stb.sim
+./test.sh -f tsim/parser/fill_us.sim
+./test.sh -f tsim/parser/fill.sim
+./test.sh -f tsim/parser/first_last.sim
./test.sh -f tsim/parser/fourArithmetic-basic.sim
-## ./test.sh -f tsim/parser/function.sim
+# jira ./test.sh -f tsim/parser/function.sim
./test.sh -f tsim/parser/groupby-basic.sim
# ./test.sh -f tsim/parser/groupby.sim
-## ./test.sh -f tsim/parser/having.sim
# ./test.sh -f tsim/parser/having_child.sim
-## ./test.sh -f tsim/parser/import.sim
-# ./test.sh -f tsim/parser/import_commit1.sim
-# ./test.sh -f tsim/parser/import_commit2.sim
-# ./test.sh -f tsim/parser/import_commit3.sim
-## ./test.sh -f tsim/parser/import_file.sim
-## ./test.sh -f tsim/parser/insert_multiTbl.sim
-# ./test.sh -f tsim/parser/insert_tb.sim
-## ./test.sh -f tsim/parser/interp.sim
+## ./test.sh -f tsim/parser/having.sim
+./test.sh -f tsim/parser/import_commit1.sim
+./test.sh -f tsim/parser/import_commit2.sim
+./test.sh -f tsim/parser/import_commit3.sim
+# jira ./test.sh -f tsim/parser/import_file.sim
+./test.sh -f tsim/parser/import.sim
+./test.sh -f tsim/parser/insert_multiTbl.sim
+./test.sh -f tsim/parser/insert_tb.sim
+# jira ./test.sh -f tsim/parser/interp.sim
# ./test.sh -f tsim/parser/join.sim
# ./test.sh -f tsim/parser/join_manyblocks.sim
## ./test.sh -f tsim/parser/join_multitables.sim
# ./test.sh -f tsim/parser/join_multivnode.sim
-# ./test.sh -f tsim/parser/last_cache.sim
+./test.sh -f tsim/parser/last_cache.sim
## ./test.sh -f tsim/parser/last_groupby.sim
-# ./test.sh -f tsim/parser/lastrow.sim
+# jira ./test.sh -f tsim/parser/lastrow.sim
## ./test.sh -f tsim/parser/like.sim
# ./test.sh -f tsim/parser/limit.sim
# ./test.sh -f tsim/parser/limit1.sim
@@ -154,19 +153,19 @@
# ./test.sh -f tsim/parser/set_tag_vals.sim
# ./test.sh -f tsim/parser/single_row_in_tb.sim
# ./test.sh -f tsim/parser/sliding.sim
+# ./test.sh -f tsim/parser/slimit_alter_tags.sim
# ./test.sh -f tsim/parser/slimit.sim
# ./test.sh -f tsim/parser/slimit1.sim
-# ./test.sh -f tsim/parser/slimit_alter_tags.sim
-# ./test.sh -f tsim/parser/stableOp.sim
+./test.sh -f tsim/parser/stableOp.sim
# ./test.sh -f tsim/parser/tags_dynamically_specifiy.sim
# ./test.sh -f tsim/parser/tags_filter.sim
-# ./test.sh -f tsim/parser/tbnameIn.sim
-# ./test.sh -f tsim/parser/timestamp.sim
-## ./test.sh -f tsim/parser/top_groupby.sim
-# ./test.sh -f tsim/parser/topbot.sim
-# ./test.sh -f tsim/parser/udf.sim
-# ./test.sh -f tsim/parser/udf_dll.sim
+./test.sh -f tsim/parser/tbnameIn.sim
+./test.sh -f tsim/parser/timestamp.sim
+./test.sh -f tsim/parser/top_groupby.sim
+./test.sh -f tsim/parser/topbot.sim
# ./test.sh -f tsim/parser/udf_dll_stable.sim
+# ./test.sh -f tsim/parser/udf_dll.sim
+# ./test.sh -f tsim/parser/udf.sim
# ./test.sh -f tsim/parser/union.sim
# ./test.sh -f tsim/parser/where.sim
@@ -197,7 +196,7 @@
./test.sh -f tsim/mnode/basic5.sim
# ---- show
-#./test.sh -f tsim/show/basic.sim
+# jira ./test.sh -f tsim/show/basic.sim
# ---- table
./test.sh -f tsim/table/autocreate.sim
@@ -235,15 +234,15 @@
./test.sh -f tsim/stream/drop_stream.sim
./test.sh -f tsim/stream/distributeInterval0.sim
./test.sh -f tsim/stream/distributeIntervalRetrive0.sim
-# ./test.sh -f tsim/stream/distributesession0.sim
+./test.sh -f tsim/stream/distributeSession0.sim
./test.sh -f tsim/stream/session0.sim
./test.sh -f tsim/stream/session1.sim
./test.sh -f tsim/stream/state0.sim
./test.sh -f tsim/stream/triggerInterval0.sim
-# ./test.sh -f tsim/stream/triggerSession0.sim
+./test.sh -f tsim/stream/triggerSession0.sim
./test.sh -f tsim/stream/partitionby.sim
./test.sh -f tsim/stream/partitionby1.sim
-# ./test.sh -f tsim/stream/schedSnode.sim
+# unsupport ./test.sh -f tsim/stream/schedSnode.sim
./test.sh -f tsim/stream/windowClose.sim
./test.sh -f tsim/stream/ignoreExpiredData.sim
./test.sh -f tsim/stream/sliding.sim
@@ -294,12 +293,12 @@
./test.sh -f tsim/db/basic3.sim -m
./test.sh -f tsim/db/error1.sim -m
./test.sh -f tsim/insert/backquote.sim -m
-# nojira ./test.sh -f tsim/parser/fourArithmetic-basic.sim -m
+# unsupport ./test.sh -f tsim/parser/fourArithmetic-basic.sim -m
./test.sh -f tsim/query/interval-offset.sim -m
./test.sh -f tsim/tmq/basic3.sim -m
./test.sh -f tsim/stable/vnode3.sim -m
./test.sh -f tsim/qnode/basic1.sim -m
-# nojira ./test.sh -f tsim/mnode/basic1.sim -m
+# unsupport ./test.sh -f tsim/mnode/basic1.sim -m
# --- sma
./test.sh -f tsim/sma/drop_sma.sim
@@ -329,7 +328,7 @@
./test.sh -f tsim/vnode/stable_replica3_vnode3.sim
# --- sync
-./test.sh -f tsim/sync/3Replica1VgElect.sim
+# jira ./test.sh -f tsim/sync/3Replica1VgElect.sim
./test.sh -f tsim/sync/3Replica5VgElect.sim
./test.sh -f tsim/sync/oneReplica1VgElect.sim
./test.sh -f tsim/sync/oneReplica5VgElect.sim
diff --git a/tests/script/test-all.bat b/tests/script/test-all.bat
index 056d989e6b..229302fd1e 100644
--- a/tests/script/test-all.bat
+++ b/tests/script/test-all.bat
@@ -63,4 +63,5 @@ goto :eof
:CheckSkipCase
set skipCase=false
@REM if "%*" == "./test.sh -f tsim/query/scalarFunction.sim" ( set skipCase=true )
+echo %* | grep valgrind && set skipCase=true
:goto eof
\ No newline at end of file
diff --git a/tests/script/tsim/db/commit.sim b/tests/script/tsim/db/commit.sim
index 74c1366afb..191f618adb 100644
--- a/tests/script/tsim/db/commit.sim
+++ b/tests/script/tsim/db/commit.sim
@@ -1,27 +1,35 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/deploy.sh -n dnode2 -i 2
-system sh/deploy.sh -n dnode3 -i 3
-
-system sh/cfg.sh -n dnode1 -c walLevel -v 2
-system sh/cfg.sh -n dnode2 -c walLevel -v 2
-system sh/cfg.sh -n dnode3 -c walLevel -v 2
-system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1
-system sh/cfg.sh -n dnode2 -c numOfMnodes -v 1
-system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1
-system sh/cfg.sh -n dnode1 -c mnodeEqualVnodeNum -v 4
-system sh/cfg.sh -n dnode2 -c mnodeEqualVnodeNum -v 4
-system sh/cfg.sh -n dnode3 -c mnodeEqualVnodeNum -v 4
print ========= start dnode1 as master
system sh/exec.sh -n dnode1 -s start
+system sh/exec.sh -n dnode2 -s start
sql connect
-sleep 2000
print ========= start other dnodes
-sql create dnode $hostname2
-system sh/exec.sh -n dnode2 -s start
-sleep 2000
+sql create dnode $hostname port 7200
+
+$x = 0
+step1:
+ $ = $x + 1
+ sleep 1000
+ if $x == 10 then
+ print ====> dnode not ready!
+ return -1
+ endi
+sql show dnodes
+print ===> $data00 $data01 $data02 $data03 $data04 $data05
+print ===> $data10 $data11 $data12 $data13 $data14 $data15
+if $rows != 2 then
+ return -1
+endi
+if $data(1)[4] != ready then
+ goto step1
+endi
+if $data(2)[4] != ready then
+ goto step1
+endi
print ======== step1 create db
sql create database commitdb replica 1 duration 7 keep 30
@@ -68,9 +76,7 @@ $num = $rows + 2
print ======== step3 import old data
sql import into tb values (now - 10d , -10 )
-
sql import into tb values (now - 11d , -11 )
-
sql select * from tb order by ts desc
print ===> rows $rows expect $num
print ===> last $data01 expect $data01
@@ -99,9 +105,7 @@ endi
print ======== step5 stop dnode
system sh/exec.sh -n dnode2 -s stop -x SIGINT
-sleep 3000
system sh/exec.sh -n dnode2 -s start
-sleep 3000
sql select * from tb
print ===> rows $rows
@@ -116,10 +120,4 @@ if $data01 != 40 then
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
-system sh/exec.sh -n dnode2 -s stop -x SIGINT
-system sh/exec.sh -n dnode3 -s stop -x SIGINT
-system sh/exec.sh -n dnode4 -s stop -x SIGINT
-system sh/exec.sh -n dnode5 -s stop -x SIGINT
-system sh/exec.sh -n dnode6 -s stop -x SIGINT
-system sh/exec.sh -n dnode7 -s stop -x SIGINT
-system sh/exec.sh -n dnode8 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode2 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/tsim/db/keep.sim b/tests/script/tsim/db/keep.sim
index 027530026c..d8939eafbc 100644
--- a/tests/script/tsim/db/keep.sim
+++ b/tests/script/tsim/db/keep.sim
@@ -1,57 +1,23 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
-system sh/deploy.sh -n dnode2 -i 2
-system sh/deploy.sh -n dnode3 -i 3
-system sh/deploy.sh -n dnode4 -i 4
-system sh/cfg.sh -n dnode1 -c transPullupInterval -v 1
-system sh/cfg.sh -n dnode2 -c transPullupInterval -v 1
-system sh/cfg.sh -n dnode3 -c transPullupInterval -v 1
-system sh/cfg.sh -n dnode4 -c transPullupInterval -v 1
system sh/exec.sh -n dnode1 -s start
-system sh/exec.sh -n dnode2 -s start
sql connect
-print =============== step1 create dnode2
-sql create dnode $hostname port 7200
-
-$x = 0
-step1:
- $x = $x + 1
- sleep 1000
- if $x == 10 then
- print ====> dnode not ready!
- return -1
- endi
-sql show dnodes
-print ===> $data00 $data01 $data02 $data03 $data04 $data05
-print ===> $data10 $data11 $data12 $data13 $data14 $data15
-if $rows != 2 then
- return -1
-endi
-if $data(1)[4] != ready then
- goto step1
-endi
-if $data(2)[4] != ready then
- goto step1
-endi
-
print ======== step1 create db
-sql create database keepdb replica 1 keep 30 duration 7
+sql create database keepdb replica 1 keep 30 duration 7 vgroups 2
sql use keepdb
sql create table tb (ts timestamp, i int)
$x = 1
while $x < 41
$time = $x . d
- sql insert into tb values (now + $time , $x ) -x step2
+ sql insert into tb values (now - $time , $x ) -x step2
step2:
$x = $x + 1
endw
sql select * from tb
-print ===> rows $rows
-print ===> last $data01
-
+print ===> rows $rows last $data01
if $rows >= 40 then
return -1
endi
@@ -61,9 +27,7 @@ system sh/exec.sh -n dnode2 -s stop -x SIGINT
system sh/exec.sh -n dnode2 -s start
sql select * from tb
-print ===> rows $rows
-print ===> last $data01
-
+print ===> rows $rows last $data01
if $rows >= 40 then
return -1
endi
@@ -75,23 +39,13 @@ $num1 = $rows + 40
print ======== step3 alter db
sql alter database keepdb keep 60
-flush database keepdb
-
+sql flush database keepdb
sql show databases
print $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07
-if $data02 != 1 then
+if $data22 != 2 then
return -1
endi
-if $data03 != 1 then
- return -1
-endi
-if $data04 != 1 then
- return -1
-endi
-if $data05 != 7 then
- return -1
-endi
-if $data06 != 60 then
+if $data27 != 86400m,86400m,86400m then
return -1
endi
@@ -99,98 +53,73 @@ print ======== step4 insert data
$x = 41
while $x < 81
$time = $x . d
- sql insert into tb values (now + $time , $x )
+ sql insert into tb values (now - $time , $x ) -x step4
+ step4:
$x = $x + 1
endw
sql select * from tb
-print ===> rows $rows
-print ===> last $data01
-
-if $rows != $num1 then
- return -1
-endi
-if $data01 != 80 then
- return -1
-endi
-
-print ======== step5 stop dnode
-system sh/exec.sh -n dnode2 -s stop -x SIGINT
-system sh/exec.sh -n dnode2 -s start
-
-sql select * from tb
-print ===> rows $rows
-print ===> last $data01
-
-if $rows >= $num1 then
+print ===> rows $rows last $data01
+if $rows >= 80 then
return -1
endi
if $rows <= 50 then
return -1
endi
-if $data01 != 80 then
+
+return
+
+print ======== step5 stop dnode
+system sh/exec.sh -n dnode2 -s stop -x SIGKILL
+system sh/exec.sh -n dnode2 -s start
+
+sql select * from tb
+print ===> rows $rows last $data01
+if $rows >= 80 then
+ return -1
+endi
+if $rows <= 50 then
return -1
endi
print ======== step6 alter db
sql alter database keepdb keep 30
-sleep 1000
sql show databases
-print $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07
-if $data02 != 1 then
+if $data22 != 2 then
return -1
endi
-if $data03 != 1 then
- return -1
-endi
-if $data04 != 1 then
- return -1
-endi
-if $data05 != 7 then
- return -1
-endi
-if $data06 != 30 then
+if $data27 != 43200m,43200m,43200m then
return -1
endi
print ======== step7 stop dnode
-system sh/exec.sh -n dnode2 -s stop -x SIGINT
-sleep 2000
+system sh/exec.sh -n dnode2 -s stop -x SIGKILL
system sh/exec.sh -n dnode2 -s start
-sleep 2000
sql select * from tb
-print ===> rows $rows
-print ===> last $data01
-
+print ===> rows $rows last $data01
if $rows >= 40 then
return -1
endi
if $rows <= 20 then
return -1
endi
-if $data01 != 80 then
- return -1
-endi
-
-$num3 = $rows + 40
print ======== step8 insert data
$x = 81
while $x < 121
$time = $x . d
- sql insert into tb values (now + $time , $x )
+ sql insert into tb values (now - $time , $x ) -x step4
+ step4:
$x = $x + 1
endw
sql select * from tb
-print ===> rows $rows
-print ===> last $data01
-
-if $rows != $num3 then
+print ===> rows $rows last $data01
+if $rows >= 40 then
return -1
endi
-if $data01 != 120 then
+if $rows <= 20 then
return -1
endi
@@ -208,4 +137,6 @@ sql alter database keepdb duration 1 -x error3
error3:
print ======= test success
-
\ No newline at end of file
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
+system sh/exec.sh -n dnode2 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/tsim/parser/commit.sim b/tests/script/tsim/parser/commit.sim
index 83b457673b..0877168609 100644
--- a/tests/script/tsim/parser/commit.sim
+++ b/tests/script/tsim/parser/commit.sim
@@ -20,7 +20,7 @@ $stb = $stbPrefix . $i
sql drop database $db -x step1
step1:
-sql create database $db maxrows 255 ctime 3600
+sql create database $db maxrows 255
print ====== create tables
sql use $db
sql create table $stb (ts timestamp, c1 int, c2 bigint, c3 float, c4 double, c5 smallint, c6 tinyint, c7 bool, c8 binary(10), c9 nchar(10)) tags(t1 int)
@@ -78,12 +78,9 @@ endw
print ================== restart server to commit data into disk
system sh/exec.sh -n dnode1 -s stop -x SIGINT
-sleep 500
system sh/exec.sh -n dnode1 -s start
-sleep 100
print ================== server restart completed
sql connect
-sleep 100
print ====== select from table and check num of rows returned
sql use $db
diff --git a/tests/script/tsim/parser/condition.sim b/tests/script/tsim/parser/condition.sim
index 8c1327baae..700d1b98c0 100644
--- a/tests/script/tsim/parser/condition.sim
+++ b/tests/script/tsim/parser/condition.sim
@@ -2,11 +2,11 @@ system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/exec.sh -n dnode1 -s start
sql connect
+
sql drop database if exists cdb
sql create database if not exists cdb
sql use cdb
sql create table stb1 (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9)) TAGS(t1 int, t2 binary(10), t3 double)
-
sql create table tb1 using stb1 tags(1,'1',1.0)
sql create table tb2 using stb1 tags(2,'2',2.0)
sql create table tb3 using stb1 tags(3,'3',3.0)
@@ -45,7 +45,6 @@ sql insert into tb6 values ('2021-05-05 18:19:27',64,64.0,64,64,64,64.0,false,'6
sql insert into tb6 values ('2021-05-05 18:19:28',NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL)
sql create table stb2 (ts timestamp, u1 int unsigned, u2 bigint unsigned, u3 smallint unsigned, u4 tinyint unsigned, ts2 timestamp) TAGS(t1 int unsigned, t2 bigint unsigned, t3 timestamp, t4 int)
-
sql create table tb2_1 using stb2 tags(1,1,'2021-05-05 18:38:38',1)
sql create table tb2_2 using stb2 tags(2,2,'2021-05-05 18:58:58',2)
@@ -67,7 +66,6 @@ sql insert into tb2_2 values ('2021-05-05 18:19:14',8,2,3,4,'2021-05-05 18:28:15
sql insert into tb2_2 values ('2021-05-05 18:19:15',5,6,7,8,'2021-05-05 18:28:16')
sql create table stb3 (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9)) TAGS(t1 int, t2 binary(10), t3 double)
-
sql create table tb3_1 using stb3 tags(1,'1',1.0)
sql create table tb3_2 using stb3 tags(2,'2',2.0)
@@ -78,7 +76,6 @@ sql insert into tb3_1 values ('2021-04-05 18:19:03',4,4.0,4,4,4,4.0,false,'4','4
sql insert into tb3_1 values ('2021-05-05 18:19:28',5,NULL,5,NULL,5,NULL,true,NULL,'5')
sql insert into tb3_1 values ('2021-06-05 18:19:28',NULL,6.0,NULL,6,NULL,6.0,NULL,'6',NULL)
sql insert into tb3_1 values ('2021-07-05 18:19:28',NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL)
-
sql insert into tb3_2 values ('2021-01-06 18:19:00',11,11.0,11,11,11,11.0,true ,'11','11')
sql insert into tb3_2 values ('2021-02-06 18:19:01',12,12.0,12,12,12,12.0,true ,'12','12')
sql insert into tb3_2 values ('2021-03-06 18:19:02',13,13.0,13,13,13,13.0,false,'13','13')
@@ -87,9 +84,7 @@ sql insert into tb3_2 values ('2021-05-06 18:19:28',15,NULL,15,NULL,15,NULL,true
sql insert into tb3_2 values ('2021-06-06 18:19:28',NULL,16.0,NULL,16,NULL,16.0,NULL,'16',NULL)
sql insert into tb3_2 values ('2021-07-06 18:19:28',NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL)
-
sql create table stb4 (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9),c10 binary(16300)) TAGS(t1 int, t2 binary(10), t3 double)
-
sql create table tb4_0 using stb4 tags(0,'0',0.0)
sql create table tb4_1 using stb4 tags(1,'1',1.0)
sql create table tb4_2 using stb4 tags(2,'2',2.0)
@@ -128,19 +123,13 @@ while $i < $blockNum
$ts0 = $ts0 + 259200000
endw
-sleep 100
-
-sql connect
-
run tsim/parser/condition_query.sim
print ================== restart server to commit data into disk
system sh/exec.sh -n dnode1 -s stop -x SIGINT
-sleep 100
system sh/exec.sh -n dnode1 -s start
print ================== server restart completed
sql connect
-sleep 100
run tsim/parser/condition_query.sim
diff --git a/tests/script/tsim/parser/condition_query.sim b/tests/script/tsim/parser/condition_query.sim
index 8dfa8dae0c..dc5eed49be 100644
--- a/tests/script/tsim/parser/condition_query.sim
+++ b/tests/script/tsim/parser/condition_query.sim
@@ -11,14 +11,14 @@ if $rows != 28 then
return -1
endi
-sql_error select * from stb1 where c8 > 0
-sql_error select * from stb1 where c7 in (0,2,3,1);
-sql_error select * from stb1 where c8 in (true);
-sql_error select * from stb1 where c8 in (1,2);
-sql_error select * from stb1 where t2 in (3.0);
-sql_error select ts,c1,c7 from stb1 where c7 > false
-sql_error select * from stb1 where c1 > NULL;
-sql_error select * from stb1 where c1 = NULL;
+sql select * from stb1 where c8 > 0
+sql select * from stb1 where c7 in (0,2,3,1);
+sql select * from stb1 where c8 in (true);
+sql select * from stb1 where c8 in (1,2);
+sql select * from stb1 where t2 in (3.0);
+sql select ts,c1,c7 from stb1 where c7 > false
+sql select * from stb1 where c1 > NULL;
+sql select * from stb1 where c1 = NULL;
sql_error select * from stb1 where c1 LIKE '%1';
sql_error select * from stb1 where c2 LIKE '%1';
sql_error select * from stb1 where c3 LIKE '%1';
@@ -26,20 +26,20 @@ sql_error select * from stb1 where c4 LIKE '%1';
sql_error select * from stb1 where c5 LIKE '%1';
sql_error select * from stb1 where c6 LIKE '%1';
sql_error select * from stb1 where c7 LIKE '%1';
-sql_error select * from stb1 where c1 = 'NULL';
-sql_error select * from stb1 where c2 > 'NULL';
-sql_error select * from stb1 where c3 <> 'NULL';
-sql_error select * from stb1 where c4 != 'null';
-sql_error select * from stb1 where c5 >= 'null';
-sql_error select * from stb1 where c6 <= 'null';
-sql_error select * from stb1 where c7 < 'nuLl';
-sql_error select * from stb1 where c8 < 'nuLl';
-sql_error select * from stb1 where c9 > 'nuLl';
+sql select * from stb1 where c1 = 'NULL';
+sql select * from stb1 where c2 > 'NULL';
+sql select * from stb1 where c3 <> 'NULL';
+sql select * from stb1 where c4 != 'null';
+sql select * from stb1 where c5 >= 'null';
+sql select * from stb1 where c6 <= 'null';
+sql select * from stb1 where c7 < 'nuLl';
+sql select * from stb1 where c8 < 'nuLl';
+sql select * from stb1 where c9 > 'nuLl';
sql_error select * from (select * from stb1 where c7=true) a, (select * from stb1 where c1 > 30) b;
sql_error select a.ts,a.c1,a.c8 from (select * from stb1 where c7=true) a, (select * from stb1 where c1 > 30) b where a.ts=b.ts and a.c1 > 50 or b.c1 < 60;
sql_error select a.ts,a.c1,a.c8 from (select * from stb1 where c7=true) a, (select * from stb1 where c1 > 30) b where a.ts=b.ts and ((a.c1 > 50 and a.c1 < 60) or (b.c2 > 60));
-sql_error select * from stb1 where 'c2' is null;
-sql_error select * from stb1 where 'c2' is not null;
+sql select * from stb1 where 'c2' is null;
+sql select * from stb1 where 'c2' is not null;
sql select * from stb1 where c2 > 3.0 or c2 < 60;
if $rows != 28 then
@@ -173,7 +173,6 @@ if $data32 != 0 then
return -1
endi
-
sql select ts,c1,c7 from stb1 where c7 = true
if $rows != 14 then
return -1
diff --git a/tests/script/tsim/parser/constCol.sim b/tests/script/tsim/parser/constCol.sim
index 5f50c950dd..5eb5b419fb 100644
--- a/tests/script/tsim/parser/constCol.sim
+++ b/tests/script/tsim/parser/constCol.sim
@@ -8,20 +8,16 @@ sql use db;
sql create table t (ts timestamp, i int);
sql create table st1 (ts timestamp, f1 int) tags(t1 int);
sql create table st2 (ts timestamp, f2 int) tags(t2 int);
-
sql create table t1 using st1 tags(1);
sql create table t2 using st2 tags(1);
sql insert into t1 values(1575880055000, 1);
sql insert into t1 values(1575880059000, 1);
sql insert into t1 values(1575880069000, 1);
-
sql insert into t2 values(1575880055000, 2);
sql select st1.ts, st1.f1, st2.f2 from db.st1, db.st2 where st1.t1=st2.t2 and st1.ts=st2.ts
-system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select st1.ts, st1.f1, st2.f2 from db.st1, db.st2 where st1.t1=st2.t2 and st1.ts=st2.ts' 127.0.0.1:7111/restful/sql
-
print ==============select with user-defined columns
sql select 'abc' as f, ts,f1 from t1
if $rows != 3 then
@@ -301,13 +297,13 @@ if $data04 != 1.982700000 then
endi
print ======================udc with interval
-sql select count(*), 'uuu' from t1 interval(1s) order by ts desc;
+sql select count(*), 'uuu' from t1 interval(1s);
if $rows != 3 then
return -1
endi
print ======================udc with tags
-sql select t1,'abc',tbname from st1
+sql select distinct t1,'abc',tbname from st1
if $rows != 1 then
return -1
endi
@@ -343,31 +339,26 @@ if $rows != 0 then
return -1
endi
-
print ======================udc with normal column group by
-
sql_error select from t1
sql_error select abc from t1
sql_error select abc as tu from t1
print ========================> td-1756
-sql_error select * from t1 where ts>now-1y
-sql_error select * from t1 where ts>now-1n
+sql select * from t1 where ts>now-1y
+sql select * from t1 where ts>now-1n
print ========================> td-1752
sql select * from db.st2 where t2 < 200 and t2 is not null;
if $rows != 1 then
return -1
endi
-
if $data00 != @19-12-09 16:27:35.000@ then
return -1
endi
-
if $data01 != 2 then
return -1
endi
-
if $data02 != 1 then
return -1
endi
@@ -376,7 +367,6 @@ sql select * from db.st2 where t2 > 200 or t2 is null;
if $rows != 0 then
return -1
endi
-
sql select * from st2 where t2 < 200 and t2 is null;
if $rows != 0 then
return -1
diff --git a/tests/script/tsim/parser/create_db.sim b/tests/script/tsim/parser/create_db.sim
index c4c5b89bd2..34ce858409 100644
--- a/tests/script/tsim/parser/create_db.sim
+++ b/tests/script/tsim/parser/create_db.sim
@@ -23,10 +23,10 @@ sql create database $db
sql use $db
sql show databases
-if $rows != 1 then
+if $rows != 3 then
return -1
endi
-if $data00 != $db then
+if $data20 != $db then
return -1
endi
sql drop database $db
@@ -38,10 +38,10 @@ sql CREATE DATABASE $db
sql use $db
sql show databases
-if $rows != 1 then
+if $rows != 3 then
return -1
endi
-if $data00 != $db then
+if $data20 != $db then
return -1
endi
sql drop database $db
@@ -87,7 +87,7 @@ print create_db.sim case4: db_already_exists
sql create database db0
sql create database db0
sql show databases
-if $rows != 1 then
+if $rows != 3 then
return -1
endi
sql drop database db0
@@ -107,29 +107,21 @@ $ctime = 36000 # 10 hours
$wal = 1 # valid value is 1, 2
$comp = 1 # max=32, automatically trimmed when exceeding
-sql create database $db replica $replica duration $duration keep $keep maxrows $rows_db cache $cache blocks 4 ctime $ctime wal $wal comp $comp
+sql create database $db replica $replica duration $duration keep $keep maxrows $rows_db wal $wal comp $comp
sql show databases
-if $rows != 1 then
+if $rows != 3 then
return -1
endi
-if $data00 != $db then
+if $data20 != $db then
return -1
endi
-if $data04 != $replica then
+if $data24 != $replica then
return -1
endi
-if $data06 != $duration then
+if $data26 != 14400m then
return -1
endi
-if $data07 != 365,365,365 then
- return -1
-endi
-print data08 = $data07
-if $data08 != $cache then
- print expect $cache, actual:$data08
- return -1
-endi
-if $data09 != 4 then
+if $data27 != 525600m,525600m,525600m then
return -1
endi
@@ -160,56 +152,56 @@ sql_error create database $db keep 12,11
sql_error create database $db keep 365001,365001,365001
sql create database dbk0 keep 19
sql show databases
-if $rows != 1 then
+if $rows != 3 then
return -1
endi
-if $data07 != 19,19,19 then
+if $data27 != 27360m,27360m,27360m then
return -1
endi
sql drop database dbk0
sql create database dbka keep 19,20
sql show databases
-if $rows != 1 then
+if $rows != 3 then
return -1
endi
-if $data07 != 19,20,20 then
+if $data27 != 27360m,28800m,28800m then
return -1
endi
sql drop database dbka
sql create database dbk1 keep 11,11,11
sql show databases
-if $rows != 1 then
+if $rows != 3 then
return -1
endi
-if $data07 != 11,11,11 then
+if $data27 != 15840m,15840m,15840m then
return -1
endi
sql drop database dbk1
sql create database dbk2 keep 11,12,13
sql show databases
-if $rows != 1 then
+if $rows != 3 then
return -1
endi
-if $data07 != 11,12,13 then
+if $data27 != 15840m,17280m,18720m then
return -1
endi
sql drop database dbk2
sql create database dbk3 keep 11,11,13
sql show databases
-if $rows != 1 then
+if $rows != 3 then
return -1
endi
-if $data07 != 11,11,13 then
+if $data27 != 15840m,15840m,18720m then
return -1
endi
sql drop database dbk3
sql create database dbk4 keep 11,13,13
sql show databases
-if $rows != 1 then
+if $rows != 3 then
return -1
endi
-if $data07 != 11,13,13 then
+if $data27 != 15840m,18720m,18720m then
return -1
endi
sql drop database dbk4
@@ -233,38 +225,31 @@ sql_error create database $db ctime 29
sql_error create database $db ctime 40961
# wal {0, 2}
-sql create database testwal wal 0
+sql_error create database testwal wal 0
sql show databases
-if $rows != 1 then
+if $rows != 2 then
return -1
endi
-sql show databases
-print wallevel $data12_testwal
-if $data12_testwal != 0 then
- return -1
-endi
-sql drop database testwal
-
sql create database testwal wal 1
sql show databases
-if $rows != 1 then
+if $rows != 3 then
return -1
endi
sql show databases
-print wallevel $data12_testwal
-if $data12_testwal != 1 then
+print wallevel $data13_testwal
+if $data13_testwal != 1 then
return -1
endi
sql drop database testwal
sql create database testwal wal 2
sql show databases
-if $rows != 1 then
+if $rows != 3 then
return -1
endi
-print wallevel $data12_testwal
-if $data12_testwal != 2 then
+print wallevel $data13_testwal
+if $data13_testwal != 2 then
return -1
endi
sql drop database testwal
@@ -278,7 +263,7 @@ sql_error create database $db comp 3
sql_error drop database $db
sql show databases
-if $rows != 0 then
+if $rows != 2 then
return -1
endi
diff --git a/tests/script/tsim/parser/create_db__for_community_version.sim b/tests/script/tsim/parser/create_db__for_community_version.sim
deleted file mode 100644
index 32a8f303c1..0000000000
--- a/tests/script/tsim/parser/create_db__for_community_version.sim
+++ /dev/null
@@ -1,234 +0,0 @@
-system sh/stop_dnodes.sh
-system sh/deploy.sh -n dnode1 -i 1
-system sh/exec.sh -n dnode1 -s start
-sql connect
-
-print ======================== dnode1 start
-
-$dbPrefix = fi_in_db
-$tbPrefix = fi_in_tb
-$mtPrefix = fi_in_mt
-$tbNum = 10
-$rowNum = 20
-$totalNum = 200
-
-print excuting test script create_db.sim
-print =============== set up
-$i = 0
-$db = $dbPrefix . $i
-$mt = $mtPrefix . $i
-
-sql_error createdatabase $db
-sql create database $db
-sql use $db
-sql show databases
-
-if $rows != 1 then
- return -1
-endi
-if $data00 != $db then
- return -1
-endi
-sql drop database $db
-
-# case1: case_insensitivity test
-print =========== create_db.sim case1: case insensitivity test
-sql_error CREATEDATABASE $db
-sql CREATE DATABASE $db
-sql use $db
-sql show databases
-
-if $rows != 1 then
- return -1
-endi
-if $data00 != $db then
- return -1
-endi
-sql drop database $db
-print case_insensitivity test passed
-
-# case2: illegal_db_name test
-print =========== create_db.sim case2: illegal_db_name test
-$illegal_db1 = 1db
-$illegal_db2 = d@b
-
-sql_error create database $illegal_db1
-sql_error create database $illegal_db2
-print illegal_db_name test passed
-
-# case3: chinese_char_in_db_name test
-print ========== create_db.sim case3: chinese_char_in_db_name test
-$CN_db1 = 数据库
-$CN_db2 = 数据库1
-$CN_db3 = db数据库1
-sql_error create database $CN_db1
-sql_error create database $CN_db2
-sql_error create database $CN_db3
-#sql show databases
-#if $rows != 3 then
-# return -1
-#endi
-#if $data00 != $CN_db1 then
-# return -1
-#endi
-#if $data10 != $CN_db2 then
-# return -1
-#endi
-#if $data20 != $CN_db3 then
-# return -1
-#endi
-#sql drop database $CN_db1
-#sql drop database $CN_db2
-#sql drop database $CN_db3
-print case_chinese_char_in_db_name test passed
-
-# case4: db_already_exists
-print create_db.sim case4: db_already_exists
-sql create database db0
-sql create database db0
-sql show databases
-if $rows != 1 then
- return -1
-endi
-sql drop database db0
-print db_already_exists test passed
-
-# case5: db_meta_data
-print create_db.sim case5: db_meta_data test
-# cfg params
-$replica = 1 # max=3
-$duration = 10
-$keep = 365
-$rows_db = 1000
-$cache = 16 # 16MB
-$ablocks = 100
-$tblocks = 32 # max=512, automatically trimmed when exceeding
-$ctime = 36000 # 10 hours
-$wal = 1 # valid value is 1, 2
-$comp = 1 # max=32, automatically trimmed when exceeding
-
-sql create database $db replica $replica duration $duration keep $keep maxrows $rows_db cache $cache blocks 4 ctime $ctime wal $wal comp $comp
-sql show databases
-if $rows != 1 then
- return -1
-endi
-if $data00 != $db then
- return -1
-endi
-if $data04 != $replica then
- return -1
-endi
-if $data06 != $duration then
- return -1
-endi
-if $data07 != 365 then
- return -1
-endi
-print data08 = $data07
-if $data08 != $cache then
- print expect $cache, actual:$data08
- return -1
-endi
-if $data09 != 4 then
- return -1
-endi
-
-sql drop database $db
-
-## param range tests
-# replica [1,3]
-#sql_error create database $db replica 0
-sql_error create database $db replica 4
-
-# day [1, 3650]
-sql_error create database $db day 0
-sql_error create database $db day 3651
-
-# keep [1, infinity]
-sql_error create database $db keep 0
-sql_error create database $db keep 0,0,0
-sql_error create database $db keep 3,3,3
-sql_error create database $db keep 3
-sql_error create database $db keep 11.0
-sql_error create database $db keep 11.0,11.0,11.0
-sql_error create database $db keep "11","11","11"
-sql_error create database $db keep "11"
-sql_error create database $db keep 13,12,11
-sql_error create database $db keep 11,12,11
-sql_error create database $db keep 12,11,12
-sql_error create database $db keep 11,12,13
-sql_error create database $db keep 11,12,13,14
-sql_error create database $db keep 11,11
-sql_error create database $db keep 365001,365001,365001
-sql_error create database $db keep 365001
-sql create database dbk1 keep 11
-sql show databases
-if $rows != 1 then
- return -1
-endi
-if $data07 != 11 then
- return -1
-endi
-sql drop database dbk1
-sql create database dbk2 keep 12
-sql show databases
-if $rows != 1 then
- return -1
-endi
-if $data07 != 12 then
- return -1
-endi
-sql drop database dbk2
-sql create database dbk3 keep 11
-sql show databases
-if $rows != 1 then
- return -1
-endi
-if $data07 != 11 then
- return -1
-endi
-sql drop database dbk3
-sql create database dbk4 keep 13
-sql show databases
-if $rows != 1 then
- return -1
-endi
-if $data07 != 13 then
- return -1
-endi
-sql drop database dbk4
-#sql_error create database $db keep 3651
-
-# rows [200, 10000]
-sql_error create database $db maxrows 199
-#sql_error create database $db maxrows 10001
-
-# cache [100, 10485760]
-sql_error create database $db cache 0
-#sql_error create database $db cache 10485761
-
-
-# blocks [32, 4096 overwriten by 4096 if exceeds, Note added:2018-10-24]
-#sql_error create database $db tblocks 31
-#sql_error create database $db tblocks 4097
-
-# ctime [30, 40960]
-sql_error create database $db ctime 29
-sql_error create database $db ctime 40961
-
-# wal {0, 2}
-#sql_error create database $db wal 0
-sql_error create database $db wal -1
-sql_error create database $db wal 3
-
-# comp {0, 1, 2}
-sql_error create database $db comp -1
-sql_error create database $db comp 3
-
-sql_error drop database $db
-sql show databases
-if $rows != 0 then
- return -1
-endi
-
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/parser/create_mt.sim b/tests/script/tsim/parser/create_mt.sim
index fafee66c76..8f0c0e030b 100644
--- a/tests/script/tsim/parser/create_mt.sim
+++ b/tests/script/tsim/parser/create_mt.sim
@@ -69,7 +69,8 @@ sql_error create table $mt (ts $i_ts , col int) tags (tag1 int)
sql_error create table $mt (ts timestamp, col $i_binary ) tags (tag1 int)
sql_error create table $mt (ts timestamp, col $i_bigint ) tags (tag1 int)
sql_error create table $mt (ts timestamp, col $i_smallint ) tags (tag1 int)
-sql_error create table $mt (ts timestamp, col $i_binary2 ) tags (tag1 int)
+sql create table $mt (ts timestamp, col $i_binary2 ) tags (tag1 int)
+sql drop table $mt
sql_error create table $mt (ts timestamp, col $i_tinyint ) tags (tag1 int)
sql_error create table $mt (ts timestamp, col $i_nchar ) tags (tag1 int)
@@ -101,7 +102,8 @@ sql_error create table $mt (ts timestamp, col int) tags (tag1 $i_binary )
sql_error create table $mt (ts timestamp, col int) tags (tag1 $i_bigint )
sql_error create table $mt (ts timestamp, col int) tags (tag1 $i_smallint )
sql_error create table $mt (ts timestamp, col int) tags (tag1 $i_tinyint )
-sql_error create table $mt (ts timestamp, col int) tags (tag1 $i_binary2 )
+sql create table $mt (ts timestamp, col int) tags (tag1 $i_binary2 )
+sql drop table $mt
sql_error create table $mt (ts timestamp, col int) tags (tag1 $i_bool )
sql_error create table $mt (ts timestamp, col int) tags (tag1 $nchar )
# correct use of nchar in tags
@@ -144,7 +146,8 @@ sql_error create table $mt (ts timestamp, col1 int) tags ( $ses int)
sql_error create table $mt (ts timestamp, col1 int) tags ( $int int)
sql_error create table $mt (ts timestamp, col1 int) tags ( $bint int)
sql_error create table $mt (ts timestamp, col1 int) tags ( $binary int)
-sql_error create table $mt (ts timestamp, col1 int) tags ( $str int)
+sql create table $mt (ts timestamp, col1 int) tags ( $str int)
+sql drop table $mt
sql_error create table $mt (ts timestamp, col1 int) tags ( $tag int)
sql_error create table $mt (ts timestamp, col1 int) tags ( $tags int)
sql_error create table $mt (ts timestamp, col1 int) tags ( $sint int)
@@ -162,8 +165,8 @@ sql create table $tb using $mt tags (-1)
# -x ng_tag_v
# return -1
#ng_tag_v:
-sql select tg from $tb
-if $data00 != -1 then
+sql show tags from $tb
+if $data05 != -1 then
return -1
endi
sql drop table $tb
@@ -172,28 +175,21 @@ sql drop table $tb
print create_mt.sim unmatched_tag_types
sql reset query cache
sql create table $tb using $mt tags ('123')
-sql select tg from $tb
-print data00 = $data00
-if $data00 != 123 then
+sql show tags from $tb
+print data05 = $data05
+if $data05 != 123 then
return -1
endi
sql drop table $tb
+
sql_error create table $tb using $mt tags (abc)
#the case below might need more consideration
sql_error create table $tb using $mt tags ('abc')
sql drop table if exists $tb
sql reset query cache
-sql create table $tb using $mt tags (1e1)
-sql select tg from $tb
-if $data00 != 10 then
- return -1
-endi
-sql drop table $tb
-sql create table $tb using $mt tags ('1e1')
-sql select tg from $tb
-if $data00 != 10 then
- return -1
-endi
+sql_error create table $tb using $mt tags (1e1)
+
+sql_error create table $tb using $mt tags ('1e1')
sql_error create table $tb using $mt tags (2147483649)
## case: chinese_char_in_metric
@@ -245,7 +241,7 @@ print chinese_char_in_metrics test passed
sql drop database $db
sql show databases
-if $rows != 0 then
+if $rows != 2 then
return -1
endi
diff --git a/tests/script/tsim/parser/create_tb.sim b/tests/script/tsim/parser/create_tb.sim
index 5203f289dc..384c8f8757 100644
--- a/tests/script/tsim/parser/create_tb.sim
+++ b/tests/script/tsim/parser/create_tb.sim
@@ -66,7 +66,8 @@ sql_error create table $tb (ts timestamp, col $i_binary )
sql_error create table $tb (ts timestamp, col $i_bigint )
sql_error create table $tb (ts timestamp, col $i_smallint )
sql_error create table $tb (ts timestamp, col $i_tinyint )
-sql_error create table $tb (ts timestamp, col $i_binary2 )
+sql create table $tb (ts timestamp, col $i_binary2 )
+sql drop table $tb
sql_error create table $tb (ts timestamp, col $nchar )
sql create table $tb (ts timestamp, col nchar(20))
sql show tables
@@ -105,7 +106,8 @@ sql_error create table $tb (ts timestamp, $ses int)
sql_error create table $tb (ts timestamp, $int int)
sql_error create table $tb (ts timestamp, $bint int)
sql_error create table $tb (ts timestamp, $binary int)
-sql_error create table $tb (ts timestamp, $str int)
+sql create table $tb (ts timestamp, $str int)
+sql drop table $tb
sql_error create table $tb (ts timestamp, $tag int)
sql_error create table $tb (ts timestamp, $tags int)
sql_error create table $tb (ts timestamp, $sint int)
@@ -157,7 +159,7 @@ print chinese_char_in_table_support test passed
print ========== create_tb.sim case6: table_already_exists
sql create table tbs (ts timestamp, col int)
sql insert into tbs values (now, 1)
-sql create table tbs (ts timestamp, col bool)
+sql_error create table tbs (ts timestamp, col bool)
#sql_error create table tb (ts timestamp, col bool)
print table_already_exists test passed
@@ -179,7 +181,7 @@ print table_already_exists test passed
sql drop database $db
sql show databases
-if $rows != 0 then
+if $rows != 2 then
return -1
endi
diff --git a/tests/script/tsim/parser/create_tb_with_tag_name.sim b/tests/script/tsim/parser/create_tb_with_tag_name.sim
index b7b39b2f5f..a0e8dab99e 100644
--- a/tests/script/tsim/parser/create_tb_with_tag_name.sim
+++ b/tests/script/tsim/parser/create_tb_with_tag_name.sim
@@ -4,23 +4,17 @@ system sh/exec.sh -n dnode1 -s start
sql connect
print ======================== dnode1 start
-
$db = testdb
sql create database $db
sql use $db
-
sql create stable st2 (ts timestamp, f1 int) tags (id int, t1 int, t2 nchar(4), t3 double)
-
sql insert into tb1 using st2 (id, t1) tags(1,2) values (now, 1)
-
sql select id,t1,t2,t3 from tb1
-
if $rows != 1 then
return -1
endi
-
if $data00 != 1 then
return -1
endi
@@ -35,124 +29,101 @@ if $data03 != NULL then
endi
sql create table tb2 using st2 (t2,t3) tags ("12",22.0)
-
-sql select id,t1,t2,t3 from tb2;
-
-if $rows != 1 then
+sql show tags from tb2
+if $rows != 4 then
return -1
endi
-
-if $data00 != NULL then
+if $data05 != NULL then
return -1
endi
-if $data01 != NULL then
+if $data15 != NULL then
return -1
endi
-if $data02 != 12 then
+if $data25 != 12 then
return -1
endi
-if $data03 != 22.000000000 then
+if $data35 != 22.000000000 then
return -1
endi
-
sql create table tb3 using st2 tags (1,2,"3",33.0);
-
-sql select id,t1,t2,t3 from tb3;
-
-
-if $rows != 1 then
+sql show tags from tb3;
+if $rows != 4 then
return -1
endi
-
-if $data00 != 1 then
+if $data05 != 1 then
return -1
endi
-if $data01 != 2 then
+if $data15 != 2 then
return -1
endi
-if $data02 != 3 then
+if $data25 != 3 then
return -1
endi
-if $data03 != 33.000000000 then
+if $data35 != 33.000000000 then
return -1
endi
sql insert into tb4 using st2 tags(1,2,"33",44.0) values (now, 1);
-
-sql select id,t1,t2,t3 from tb4;
-
-if $rows != 1 then
+sql show tags from tb4;
+if $rows != 4 then
return -1
endi
-
-if $data00 != 1 then
+if $data05 != 1 then
return -1
endi
-if $data01 != 2 then
+if $data15 != 2 then
return -1
endi
-if $data02 != 33 then
+if $data25 != 33 then
return -1
endi
-if $data03 != 44.000000000 then
+if $data35 != 44.000000000 then
return -1
endi
sql_error create table tb5 using st2() tags (3,3,"3",33.0);
-
sql_error create table tb6 using st2 (id,t1) tags (3,3,"3",33.0);
-
sql_error create table tb7 using st2 (id,t1) tags (3);
-
sql_error create table tb8 using st2 (ide) tags (3);
-
sql_error create table tb9 using st2 (id);
-
sql_error create table tb10 using st2 (id t1) tags (1,1);
-
sql_error create table tb10 using st2 (id,,t1) tags (1,1,1);
-
sql_error create table tb11 using st2 (id,t1,) tags (1,1,1);
sql create table tb12 using st2 (t1,id) tags (2,1);
-
-sql select id,t1,t2,t3 from tb12;
-if $rows != 1 then
+sql show tags from tb12;
+if $rows != 5 then
return -1
endi
-
-if $data00 != 1 then
+if $data05 != 1 then
return -1
endi
-if $data01 != 2 then
+if $data15 != 2 then
return -1
endi
-if $data02 != NULL then
+if $data25 != NULL then
return -1
endi
-if $data03 != NULL then
+if $data35 != NULL then
return -1
endi
sql create table tb13 using st2 ("t1",'id') tags (2,1);
-
-sql select id,t1,t2,t3 from tb13;
-
-if $rows != 1 then
+sql show tags from tb13;
+if $rows != 2 then
return -1
endi
-
-if $data00 != 1 then
+if $data05 != 1 then
return -1
endi
-if $data01 != 2 then
+if $data15 != 2 then
return -1
endi
-if $data02 != NULL then
+if $data25 != NULL then
return -1
endi
-if $data03 != NULL then
+if $data35 != NULL then
return -1
endi
diff --git a/tests/script/tsim/parser/dbtbnameValidate.sim b/tests/script/tsim/parser/dbtbnameValidate.sim
index 86ffbe5c37..939bc0ac4d 100644
--- a/tests/script/tsim/parser/dbtbnameValidate.sim
+++ b/tests/script/tsim/parser/dbtbnameValidate.sim
@@ -5,77 +5,72 @@ sql connect
print ========== db name and table name check in create and drop, describe
sql create database abc keep 36500
-sql create database 'abc123'
-sql create database '_ab1234'
-sql create database 'ABC123'
-sql create database '_ABC123'
+sql_error create database 'abc123'
+sql_error create database '_ab1234'
+sql_error create database 'ABC123'
+sql_error create database '_ABC123'
sql_error create database 'aABb123 '
sql_error create database ' xyz '
sql_error create database ' XYZ '
-sql use 'abc123'
-sql use '_ab1234'
-sql use 'ABC123'
-sql use '_ABC123'
+sql_error use 'abc123'
+sql_error use '_ab1234'
+sql_error use 'ABC123'
+sql_error use '_ABC123'
sql_error use 'aABb123'
sql_error use ' xyz '
sql_error use ' XYZ '
-sql drop database 'abc123'
-sql drop database '_ab1234'
-sql_error drop database 'ABC123'
-sql drop database '_ABC123'
-sql_error drop database 'aABb123'
-sql_error drop database ' xyz '
-sql_error drop database ' XYZ '
-
+sql_error drop database if exists 'abc123'
+sql_error drop database if exists '_ab1234'
+sql_error drop database if exists 'ABC123'
+sql_error drop database if exists '_ABC123'
+sql_error drop database if exists 'aABb123'
+sql_error drop database if exists ' xyz '
+sql_error drop database if exists ' XYZ '
sql use abc
-
sql create table abc.cc (ts timestamp, c int)
-sql create table 'abc.Dd' (ts timestamp, c int)
-sql create table 'abc'.ee (ts timestamp, c int)
-sql create table 'abc'.'FF' (ts timestamp, c int)
-sql create table abc.'gG' (ts timestamp, c int)
-
+sql_error create table 'abc.Dd' (ts timestamp, c int)
+sql_error create table 'abc'.ee (ts timestamp, c int)
+sql_error create table 'abc'.'FF' (ts timestamp, c int)
+sql_error create table abc.'gG' (ts timestamp, c int)
sql_error create table table.'a1' (ts timestamp, c int)
sql_error create table 'table'.'b1' (ts timestamp, c int)
sql_error create table 'table'.'b1' (ts timestamp, c int)
-
sql create table mt (ts timestamp, c1 int, c2 bigint, c3 float, c4 double, c5 smallint, c6 tinyint, c7 bool, c8 binary(10), c9 nchar(10)) tags(t1 int, t2 nchar(20), t3 binary(20), t4 bigint, t5 smallint, t6 double)
sql create table sub_001 using mt tags ( 1 , 'tag_nchar' , 'tag_bianry' , 4 , 5 , 6.1 )
sql_error create table sub_002 using mt tags( 2 , tag_nchar , tag_bianry , 4 , 5 , 6.2 )
sql insert into sub_dy_tbl using mt tags ( 3 , 'tag_nchar' , 'tag_bianry' , 4 , 5 , 6.3 ) values (now, 1, 2, 3.01, 4.02, 5, 6, true, 'binary_8', 'nchar_9')
sql describe abc.cc
-sql describe 'abc.Dd'
-sql describe 'abc'.ee
-sql describe 'abc'.'FF'
-sql describe abc.'gG'
+sql_error describe 'abc.Dd'
+sql_error describe 'abc'.ee
+sql_error describe 'abc'.'FF'
+sql_error describe abc.'gG'
sql describe cc
-sql describe 'Dd'
-sql describe ee
-sql describe 'FF'
-sql describe 'gG'
+sql_error describe 'Dd'
+sql_error describe ee
+sql_error describe 'FF'
+sql_error describe 'gG'
sql describe mt
sql describe sub_001
sql describe sub_dy_tbl
-sql describe Dd
-sql describe FF
-sql describe gG
+sql_error describe Dd
+sql_error describe FF
+sql_error describe gG
sql drop table abc.cc
-sql drop table 'abc.Dd'
-sql drop table 'abc'.ee
-sql drop table 'abc'.'FF'
-sql drop table abc.'gG'
+sql_error drop table 'abc.Dd'
+sql_error drop table 'abc'.ee
+sql_error drop table 'abc'.'FF'
+sql_error drop table abc.'gG'
sql drop table sub_001
-
sql drop table sub_dy_tbl
sql drop table mt
diff --git a/tests/script/tsim/parser/distinct.sim b/tests/script/tsim/parser/distinct.sim
index b90ca593ba..6d7dec0659 100644
--- a/tests/script/tsim/parser/distinct.sim
+++ b/tests/script/tsim/parser/distinct.sim
@@ -73,11 +73,10 @@ if $rows != 6 then
return -1
endi
-
### select distinct
sql drop database $db
sql show databases
-if $rows != 0 then
+if $rows != 2 then
return -1
endi
diff --git a/tests/script/tsim/parser/fill.sim b/tests/script/tsim/parser/fill.sim
index 642c7bd8d4..698314fa36 100644
--- a/tests/script/tsim/parser/fill.sim
+++ b/tests/script/tsim/parser/fill.sim
@@ -47,7 +47,7 @@ $tsu = $tsu + $ts0
## fill syntax test
# number of fill values exceeds number of selected columns
-sql select max(c1), max(c2), max(c3), max(c4), max(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6)
+sql select _wstart, max(c1), max(c2), max(c3), max(c4), max(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6)
if $data11 != 6 then
return -1
endi
@@ -62,7 +62,7 @@ if $data14 != 6.000000000 then
endi
# number of fill values is smaller than number of selected columns
-sql select max(c1), max(c2), max(c3) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6)
+sql select _wstart, max(c1), max(c2), max(c3) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6)
if $data11 != 6 then
return -1
endi
@@ -74,7 +74,7 @@ if $data13 != 6.00000 then
endi
# unspecified filling method
-sql_error select max(c1), max(c2), max(c3), max(c4), max(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill (6, 6, 6, 6, 6)
+sql_error select _wstart, max(c1), max(c2), max(c3), max(c4), max(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill (6, 6, 6, 6, 6)
## constant fill test
# count_with_fill
@@ -114,7 +114,7 @@ endi
# avg_with_fill
print avg_with_constant_fill
-sql select avg(c1), avg(c2), avg(c3), avg(c4), avg(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6)
+sql select _wstart, avg(c1), avg(c2), avg(c3), avg(c4), avg(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6)
if $rows != 9 then
return -1
endi
@@ -148,7 +148,7 @@ endi
# max_with_fill
print max_with_fill
-sql select max(c1), max(c2), max(c3), max(c4), max(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6)
+sql select _wstart, max(c1), max(c2), max(c3), max(c4), max(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6)
if $rows != 9 then
return -1
endi
@@ -182,7 +182,7 @@ endi
# min_with_fill
print min_with_fill
-sql select min(c1), min(c2), min(c3), min(c4), min(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6)
+sql select _wstart, min(c1), min(c2), min(c3), min(c4), min(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6)
if $rows != 9 then
return -1
endi
@@ -216,7 +216,7 @@ endi
# first_with_fill
print first_with_fill
-sql select first(c1), first(c2), first(c3), first(c4), first(c5), first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6)
+sql select _wstart, first(c1), first(c2), first(c3), first(c4), first(c5), first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6)
if $rows != 9 then
return -1
endi
@@ -305,7 +305,7 @@ endi
# last_with_fill
print last_with_fill
-sql select last(c1), last(c2), last(c3), last(c4), last(c5), last(c6), last(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6)
+sql select _wstart, last(c1), last(c2), last(c3), last(c4), last(c5), last(c6), last(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6)
if $rows != 9 then
return -1
endi
@@ -339,7 +339,7 @@ if $data81 != 4 then
endi
# fill_negative_values
-sql select sum(c1), avg(c2), max(c3), min(c4), count(c5), count(c6), count(c7), count(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, -1, -1, -1, -1, -1, -1, -1, -1)
+sql select _wstart, sum(c1), avg(c2), max(c3), min(c4), count(c5), count(c6), count(c7), count(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, -1, -1, -1, -1, -1, -1, -1, -1)
if $rows != 9 then
return -1
endi
@@ -351,11 +351,11 @@ if $data11 != -1 then
endi
# fill_char_values_to_arithmetic_fields
-sql_error select sum(c1), avg(c2), max(c3), min(c4), avg(c4), count(c6), last(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'c', 'c', 'c', 'c', 'c', 'c', 'c', 'c')
+sql select sum(c1), avg(c2), max(c3), min(c4), avg(c4), count(c6), last(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'c', 'c', 'c', 'c', 'c', 'c', 'c', 'c')
# fill_multiple_columns
sql_error select sum(c1), avg(c2), min(c3), max(c4), count(c6), first(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 99, 99, 99, 99, 99, abc, abc)
-sql select sum(c1), avg(c2), min(c3), max(c4) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 99, 99, 99, 99)
+sql select _wstart, sum(c1), avg(c2), min(c3), max(c4) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 99, 99, 99, 99)
if $rows != 9 then
return -1
endi
@@ -375,9 +375,12 @@ if $data08 != NCHAR then
endi
# fill_into_nonarithmetic_fieds
-sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 20000000, 20000000, 20000000)
-#if $data11 != 20000000 then
-if $data11 != 1 then
+print select _wstart, first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 20000000, 20000000, 20000000)
+sql select _wstart, first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 20000000, 20000000, 20000000)
+if $data01 != 1 then
+ return -1
+endi
+if $data11 != NULL then
return -1
endi
@@ -387,48 +390,39 @@ sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $
sql select first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e', '1e1')
# fill quoted values into bool column will throw error unless the value is 'true' or 'false' Note:2018-10-24
# fill values into binary or nchar columns will be set to NULL automatically Note:2018-10-24
-sql_error select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e', '1e1','1e1')
+sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e', '1e1','1e1')
sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, true, true, true)
sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true', 'true','true')
# fill nonarithmetic values into arithmetic fields
sql_error select count(*) where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, abc);
-sql_error select count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true');
+sql select count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true');
-sql select count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e1');
+print select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e1');
+sql select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e1');
if $rows != 9 then
return -1
endi
if $data01 != 1 then
return -1
endi
-if $data11 != 10 then
- return -1
-endi
-sql select count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1e1);
+sql select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1e1);
if $rows != 9 then
return -1
endi
if $data01 != 1 then
return -1
endi
-if $data11 != 10 then
- return -1
-endi
-sql select count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '10');
+sql select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '10');
if $rows != 9 then
return -1
endi
if $data01 != 1 then
return -1
endi
-if $data11 != 10 then
- return -1
-endi
-
## linear fill
# feature currently switched off 2018/09/29
@@ -436,7 +430,7 @@ endi
## previous fill
print fill(prev)
-sql select count(c1), count(c2), count(c3), count(c4), count(c5), count(c6), count(c7), count(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(prev)
+sql select _wstart, count(c1), count(c2), count(c3), count(c4), count(c5), count(c6), count(c7), count(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(prev)
if $rows != 9 then
return -1
endi
@@ -469,7 +463,7 @@ if $data81 != 1 then
endi
# avg_with_fill
-sql select avg(c1), avg(c2), avg(c3), avg(c4), avg(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(prev)
+sql select _wstart, avg(c1), avg(c2), avg(c3), avg(c4), avg(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(prev)
if $rows != 9 then
return -1
endi
@@ -502,7 +496,7 @@ if $data81 != 4.000000000 then
endi
# max_with_fill
-sql select max(c1), max(c2), max(c3), max(c4), max(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(prev)
+sql select _wstart, max(c1), max(c2), max(c3), max(c4), max(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(prev)
if $rows != 9 then
return -1
endi
@@ -535,7 +529,7 @@ if $data81 != 4 then
endi
# min_with_fill
-sql select min(c1), min(c2), min(c3), min(c4), min(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(prev)
+sql select _wstart, min(c1), min(c2), min(c3), min(c4), min(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(prev)
if $rows != 9 then
return -1
endi
@@ -568,7 +562,7 @@ if $data81 != 4 then
endi
# first_with_fill
-sql select first(c1), first(c2), first(c3), first(c4), first(c5), first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(prev)
+sql select _wstart, first(c1), first(c2), first(c3), first(c4), first(c5), first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(prev)
if $rows != 9 then
return -1
endi
@@ -601,7 +595,7 @@ if $data81 != 4 then
endi
# last_with_fill
-sql select last(c1), last(c2), last(c3), last(c4), last(c5), last(c6), last(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(prev)
+sql select _wstart, last(c1), last(c2), last(c3), last(c4), last(c5), last(c6), last(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(prev)
if $rows != 9 then
return -1
endi
@@ -636,9 +630,9 @@ endi
## NULL fill
print fill(value, NULL)
# count_with_fill
-sql select count(c1), count(c2), count(c3), count(c4), count(c5), count(c6), count(c7), count(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill( NULL)
-print select count(c1), count(c2), count(c3), count(c4), count(c5), count(c6), count(c7), count(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill( NULL)
-sql select count(c1), count(c2), count(c3), count(c4), count(c5), count(c6), count(c7), count(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, NULL)
+sql select _wstart, count(c1), count(c2), count(c3), count(c4), count(c5), count(c6), count(c7), count(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill( NULL)
+print select _wstart, count(c1), count(c2), count(c3), count(c4), count(c5), count(c6), count(c7), count(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill( NULL)
+sql select _wstart, count(c1), count(c2), count(c3), count(c4), count(c5), count(c6), count(c7), count(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(NULL)
if $rows != 9 then
return -1
endi
@@ -669,13 +663,13 @@ endi
if $data81 != 1 then
return -1
endi
-sql select count(c1), count(c2), count(c3), count(c4), count(c5), count(c6), count(c7), count(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(none)
+sql select _wstart, count(c1), count(c2), count(c3), count(c4), count(c5), count(c6), count(c7), count(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(none)
if $rows != 5 then
return -1
endi
# avg_with_fill
-sql select avg(c1), avg(c2), avg(c3), avg(c4), avg(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, NULL)
+sql select _wstart, avg(c1), avg(c2), avg(c3), avg(c4), avg(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill( NULL)
if $rows != 9 then
return -1
endi
@@ -708,7 +702,7 @@ if $data81 != 4.000000000 then
endi
# max_with_fill
-sql select max(c1), max(c2), max(c3), max(c4), max(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, NULL)
+sql select _wstart, max(c1), max(c2), max(c3), max(c4), max(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(NULL)
if $rows != 9 then
return -1
endi
@@ -741,7 +735,7 @@ if $data81 != 4 then
endi
# min_with_fill
-sql select min(c1), min(c2), min(c3), min(c4), min(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, NULL)
+sql select _wstart, min(c1), min(c2), min(c3), min(c4), min(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill( NULL)
if $rows != 9 then
return -1
endi
@@ -774,7 +768,7 @@ if $data81 != 4 then
endi
# first_with_fill
-sql select first(c1), first(c2), first(c3), first(c4), first(c5), first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, NULL)
+sql select _wstart, first(c1), first(c2), first(c3), first(c4), first(c5), first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill( NULL)
if $rows != 9 then
return -1
endi
@@ -807,7 +801,7 @@ if $data81 != 4 then
endi
# last_with_fill
-sql select last(c1), last(c2), last(c3), last(c4), last(c5), last(c6), last(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, NULL)
+sql select _wstart, last(c1), last(c2), last(c3), last(c4), last(c5), last(c6), last(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill( NULL)
if $rows != 9 then
return -1
endi
@@ -841,7 +835,7 @@ endi
# desc fill query
print desc fill query
-sql select count(*) from m_fl_tb0 where ts>='2018-9-17 9:0:0' and ts<='2018-9-17 9:11:00' interval(1m) fill(value,10) order by ts desc;
+sql select count(*) from m_fl_tb0 where ts>='2018-9-17 9:0:0' and ts<='2018-9-17 9:11:00' interval(1m) fill(value,10);
if $rows != 12 then
return -1
endi
@@ -865,7 +859,8 @@ sql insert into tm0 values('2020-1-1 1:3:8', 8);
sql insert into tm0 values('2020-1-1 1:3:9', 9);
sql insert into tm0 values('2020-1-1 1:4:10', 10);
-sql select max(k)-min(k),last(k)-first(k),0-spread(k) from tm0 where ts>='2020-1-1 1:1:1' and ts<='2020-1-1 1:2:15' interval(10s) fill(value, 99,91,90,89,88,87,86,85);
+print select _wstart, max(k)-min(k),last(k)-first(k),0-spread(k) from tm0 where ts>='2020-1-1 1:1:1' and ts<='2020-1-1 1:2:15' interval(10s) fill(value, 99,91,90,89,88,87,86,85);
+sql select _wstart, max(k)-min(k),last(k)-first(k),0-spread(k) from tm0 where ts>='2020-1-1 1:1:1' and ts<='2020-1-1 1:2:15' interval(10s) fill(value, 99,91,90,89,88,87,86,85);
if $rows != 8 then
return -1
endi
@@ -890,15 +885,15 @@ if $data10 != @20-01-01 01:01:10.000@ then
return -1
endi
-if $data11 != 99.000000000 then
+if $data11 != 1.000000000 then
return -1
endi
-if $data12 != 91.000000000 then
+if $data12 != 1.000000000 then
return -1
endi
-if $data13 != 90.000000000 then
+if $data13 != -87.000000000 then
return -1
endi
@@ -922,19 +917,19 @@ if $data70 != @20-01-01 01:02:10.000@ then
return -1
endi
-if $data71 != 99.000000000 then
+if $data71 != 1.000000000 then
return -1
endi
-if $data72 != 91.000000000 then
+if $data72 != 1.000000000 then
return -1
endi
-if $data73 != 90.000000000 then
+if $data73 != -87.000000000 then
return -1
endi
-sql select first(k)-avg(k),0-spread(k) from tm0 where ts>='2020-1-1 1:1:1' and ts<='2020-1-1 1:2:15' interval(10s) fill(NULL);
+sql select _wstart, first(k)-avg(k),0-spread(k) from tm0 where ts>='2020-1-1 1:1:1' and ts<='2020-1-1 1:2:15' interval(10s) fill(NULL);
if $rows != 8 then
return -1
endi
@@ -963,12 +958,13 @@ if $data12 != NULL then
return -1
endi
-sql select max(k)-min(k),last(k)-first(k),0-spread(k) from tm0 where ts>='2020-1-1 1:1:1' and ts<='2020-1-1 4:2:15' interval(500a) fill(value, 99,91,90,89,88,87,86,85) order by ts asc;
+sql select _wstart, max(k)-min(k),last(k)-first(k),0-spread(k) from tm0 where ts>='2020-1-1 1:1:1' and ts<='2020-1-1 4:2:15' interval(500a) fill(value, 99,91,90,89,88,87,86,85) ;
if $rows != 21749 then
return -1
endi
-sql select max(k)-min(k),last(k)-first(k),0-spread(k),count(1) from m1 where ts>='2020-1-1 1:1:1' and ts<='2020-1-1 1:2:15' interval(10s) fill(value, 99,91,90,89,88,87,86,85) order by ts asc;
+print select _wstart, max(k)-min(k),last(k)-first(k),0-spread(k),count(1) from m1 where ts>='2020-1-1 1:1:1' and ts<='2020-1-1 1:2:15' interval(10s) fill(value, 99,91,90,89,88,87,86,85) ;
+sql select _wstart, max(k)-min(k),last(k)-first(k),0-spread(k),count(1) from m1 where ts>='2020-1-1 1:1:1' and ts<='2020-1-1 1:2:15' interval(10s) fill(value, 99,91,90,89,88,87,86,85) ;
if $rows != 8 then
return -1
endi
@@ -997,19 +993,19 @@ if $data10 != @20-01-01 01:01:10.000@ then
return -1
endi
-if $data11 != 99.000000000 then
+if $data11 != 1.000000000 then
return -1
endi
-if $data12 != 91.000000000 then
+if $data12 != 1.000000000 then
return -1
endi
-if $data13 != 90.000000000 then
+if $data13 != -87.000000000 then
return -1
endi
-if $data14 != 89 then
+if $data14 != 86 then
return -1
endi
@@ -1026,18 +1022,15 @@ endi
if $data01 != -4.000000000 then
return -1
endi
-
-if $data02 != 0 then
+if $data10 != 5 then
return -1
endi
-
-if $data12 != 1 then
+if $data11 != -4.000000000 then
return -1
endi
print =====================>td-1442, td-2190 , no time range for fill option
sql_error select count(*) from m_fl_tb0 interval(1s) fill(prev);
-
sql_error select min(c3) from m_fl_mt0 interval(10a) fill(value, 20)
sql_error select min(c3) from m_fl_mt0 interval(10s) fill(value, 20)
sql_error select min(c3) from m_fl_mt0 interval(10m) fill(value, 20)
@@ -1051,7 +1044,7 @@ sql create table nexttb1 (ts timestamp, f1 int);
sql insert into nexttb1 values ('2021-08-08 1:1:1', NULL);
sql insert into nexttb1 values ('2021-08-08 1:1:5', 3);
-sql select last(*) from nexttb1 where ts >= '2021-08-08 1:1:1' and ts < '2021-08-08 1:1:10' interval(1s) fill(next);
+sql select _wstart, last(*) from nexttb1 where ts >= '2021-08-08 1:1:1' and ts < '2021-08-08 1:1:10' interval(1s) fill(next);
if $rows != 9 then
return -1
endi
@@ -1065,9 +1058,6 @@ if $data02 != 3 then
return -1
endi
-
-
-
print =============== clear
#sql drop database $db
#sql show databases
diff --git a/tests/script/tsim/parser/fill_stb.sim b/tests/script/tsim/parser/fill_stb.sim
index 0aadcc5a9f..107bac7089 100644
--- a/tests/script/tsim/parser/fill_stb.sim
+++ b/tests/script/tsim/parser/fill_stb.sim
@@ -97,9 +97,11 @@ $tsu = $tsu + $ts0
#endi
# number of fill values exceeds number of selected columns
-sql select max(c1), max(c2), max(c3), max(c4), max(c5) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, -1, -2, -3, -4, -5, -6, -7, -8)
+print select _wstart, count(ts), max(c1), max(c2), max(c3), max(c4), max(c5) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, -1, -2, -3, -4, -5, -6, -7, -8)
+sql select _wstart, count(ts), max(c1), max(c2), max(c3), max(c4), max(c5) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, -1, -2, -3, -4, -5, -6, -7, -8)
$val = $rowNum * 2
$val = $val - 1
+print $rows $val
if $rows != $val then
return -1
endi
diff --git a/tests/script/tsim/parser/fill_us.sim b/tests/script/tsim/parser/fill_us.sim
index 98c37c435d..82d282642e 100644
--- a/tests/script/tsim/parser/fill_us.sim
+++ b/tests/script/tsim/parser/fill_us.sim
@@ -47,8 +47,8 @@ $tsu = $tsu + $ts0
## fill syntax test
# number of fill values exceeds number of selected columns
-print select max(c1), max(c2), max(c3), max(c4), max(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6)
-sql select max(c1), max(c2), max(c3), max(c4), max(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6)
+print select _wstart, max(c1), max(c2), max(c3), max(c4), max(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6)
+sql select _wstart, max(c1), max(c2), max(c3), max(c4), max(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6)
if $data11 != 6 then
return -1
endi
@@ -63,8 +63,8 @@ if $data14 != 6.000000000 then
endi
# number of fill values is smaller than number of selected columns
-print sql select max(c1), max(c2), max(c3) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6)
-sql select max(c1), max(c2), max(c3) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6)
+print sql select _wstart, max(c1), max(c2), max(c3) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6)
+sql select _wstart, max(c1), max(c2), max(c3) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6)
if $data11 != 6 then
return -1
endi
@@ -219,7 +219,7 @@ endi
# first_with_fill
print first_with_fill
-sql select first(c1), first(c2), first(c3), first(c4), first(c5), first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6)
+sql select _wstart, first(c1), first(c2), first(c3), first(c4), first(c5), first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6)
if $rows != 9 then
return -1
endi
@@ -341,7 +341,7 @@ if $data81 != 4 then
endi
# fill_negative_values
-sql select sum(c1), avg(c2), max(c3), min(c4), count(c5), count(c6), count(c7), count(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, -1, -1, -1, -1, -1, -1, -1, -1)
+sql select _wstart, sum(c1), avg(c2), max(c3), min(c4), count(c5), count(c6), count(c7), count(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, -1, -1, -1, -1, -1, -1, -1, -1)
if $rows != 9 then
return -1
endi
@@ -353,11 +353,11 @@ if $data11 != -1 then
endi
# fill_char_values_to_arithmetic_fields
-sql_error select sum(c1), avg(c2), max(c3), min(c4), avg(c4), count(c6), last(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'c', 'c', 'c', 'c', 'c', 'c', 'c', 'c')
+sql select sum(c1), avg(c2), max(c3), min(c4), avg(c4), count(c6), last(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'c', 'c', 'c', 'c', 'c', 'c', 'c', 'c')
# fill_multiple_columns
-sql_error select sum(c1), avg(c2), min(c3), max(c4), count(c6), first(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 99, 99, 99, 99, 99, abc, abc)
-sql select sum(c1), avg(c2), min(c3), max(c4) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 99, 99, 99, 99)
+sql_error select _wstart, sum(c1), avg(c2), min(c3), max(c4), count(c6), first(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 99, 99, 99, 99, 99, abc, abc)
+sql select _wstart, sum(c1), avg(c2), min(c3), max(c4) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 99, 99, 99, 99)
if $rows != 9 then
return -1
endi
@@ -379,9 +379,9 @@ endi
# fill_into_nonarithmetic_fieds
-sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 20000000, 20000000, 20000000)
+sql select _wstart, first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 20000000, 20000000, 20000000)
#if $data11 != 20000000 then
-if $data11 != 1 then
+if $data11 != NULL then
return -1
endi
@@ -391,47 +391,38 @@ sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $
sql select first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e', '1e1')
# fill quoted values into bool column will throw error unless the value is 'true' or 'false' Note:2018-10-24
# fill values into binary or nchar columns will be set to null automatically Note:2018-10-24
-sql_error select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e', '1e1','1e1')
+sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e', '1e1','1e1')
sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, true, true, true)
sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true', 'true','true')
# fill nonarithmetic values into arithmetic fields
sql_error select count(*) where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, abc);
-sql_error select count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true');
+sql select count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true');
-sql select count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e1');
+sql select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e1');
if $rows != 9 then
return -1
endi
if $data01 != 1 then
return -1
endi
-if $data11 != 10 then
- return -1
-endi
-sql select count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1e1);
+sql select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1e1);
if $rows != 9 then
return -1
endi
if $data01 != 1 then
return -1
endi
-if $data11 != 10 then
- return -1
-endi
-sql select count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '10');
+sql select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '10');
if $rows != 9 then
return -1
endi
if $data01 != 1 then
return -1
endi
-if $data11 != 10 then
- return -1
-endi
## linear fill
@@ -440,7 +431,7 @@ endi
## previous fill
print fill(prev)
-sql select count(c1), count(c2), count(c3), count(c4), count(c5), count(c6), count(c7), count(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(prev)
+sql select _wstart, count(c1), count(c2), count(c3), count(c4), count(c5), count(c6), count(c7), count(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(prev)
if $rows != 9 then
return -1
endi
@@ -473,7 +464,7 @@ if $data81 != 1 then
endi
# avg_with_fill
-sql select avg(c1), avg(c2), avg(c3), avg(c4), avg(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(prev)
+sql select _wstart, avg(c1), avg(c2), avg(c3), avg(c4), avg(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(prev)
if $rows != 9 then
return -1
endi
@@ -641,8 +632,8 @@ endi
print fill(value, NULL)
# count_with_fill
sql select count(c1), count(c2), count(c3), count(c4), count(c5), count(c6), count(c7), count(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill( NULL)
-print select count(c1), count(c2), count(c3), count(c4), count(c5), count(c6), count(c7), count(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill( NULL)
-sql select count(c1), count(c2), count(c3), count(c4), count(c5), count(c6), count(c7), count(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, NULL)
+print select _wstart, count(c1), count(c2), count(c3), count(c4), count(c5), count(c6), count(c7), count(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill( NULL)
+sql select _wstart, count(c1), count(c2), count(c3), count(c4), count(c5), count(c6), count(c7), count(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(NULL)
if $rows != 9 then
return -1
endi
@@ -679,7 +670,7 @@ if $rows != 5 then
endi
# avg_with_fill
-sql select avg(c1), avg(c2), avg(c3), avg(c4), avg(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, NULL)
+sql select _wstart, avg(c1), avg(c2), avg(c3), avg(c4), avg(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill( NULL)
if $rows != 9 then
return -1
endi
@@ -712,7 +703,7 @@ if $data81 != 4.000000000 then
endi
# max_with_fill
-sql select max(c1), max(c2), max(c3), max(c4), max(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, NULL)
+sql select _wstart, max(c1), max(c2), max(c3), max(c4), max(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill( NULL)
if $rows != 9 then
return -1
endi
@@ -745,7 +736,7 @@ if $data81 != 4 then
endi
# min_with_fill
-sql select min(c1), min(c2), min(c3), min(c4), min(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, NULL)
+sql select _wstart, min(c1), min(c2), min(c3), min(c4), min(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(NULL)
if $rows != 9 then
return -1
endi
@@ -778,7 +769,7 @@ if $data81 != 4 then
endi
# first_with_fill
-sql select first(c1), first(c2), first(c3), first(c4), first(c5), first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, NULL)
+sql select _wstart, first(c1), first(c2), first(c3), first(c4), first(c5), first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill( NULL)
if $rows != 9 then
return -1
endi
@@ -811,7 +802,7 @@ if $data81 != 4 then
endi
# last_with_fill
-sql select last(c1), last(c2), last(c3), last(c4), last(c5), last(c6), last(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, NULL)
+sql select _wstart, last(c1), last(c2), last(c3), last(c4), last(c5), last(c6), last(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(NULL)
if $rows != 9 then
return -1
endi
@@ -845,7 +836,7 @@ endi
# desc fill query
print desc fill query
-sql select count(*) from m_fl_tb0 where ts>='2018-9-17 9:0:0' and ts<='2018-9-17 9:11:00' interval(1m) fill(value,10) order by ts desc;
+sql select count(*) from m_fl_tb0 where ts>='2018-9-17 9:0:0' and ts<='2018-9-17 9:11:00' interval(1m) fill(value,10);
if $rows != 12 then
return -1
endi
@@ -1002,7 +993,7 @@ if $data71 != 21.000000000 then
return -1
endi
-sql select avg(c1), avg(c2) from us_t1 where ts >= '2018-09-17 09:00:00.000002' and ts <= '2018-09-17 09:00:00.000021' interval(3u) fill(linear)
+sql select _wstart, avg(c1), avg(c2) from us_t1 where ts >= '2018-09-17 09:00:00.000002' and ts <= '2018-09-17 09:00:00.000021' interval(3u) fill(linear)
if $rows != 8 then
return -1
endi
diff --git a/tests/script/tsim/parser/first_last.sim b/tests/script/tsim/parser/first_last.sim
index 27bf42ead3..4f1dcb12fe 100644
--- a/tests/script/tsim/parser/first_last.sim
+++ b/tests/script/tsim/parser/first_last.sim
@@ -19,7 +19,7 @@ $stb = $stbPrefix . $i
sql drop database $db -x step1
step1:
-sql create database $db maxrows 400 cache 1
+sql create database $db maxrows 400
sql use $db
sql create table $stb (ts timestamp, c1 int, c2 bigint, c3 float, c4 double, c5 smallint, c6 tinyint, c7 bool, c8 binary(10), c9 nchar(10)) tags(t1 int)
@@ -73,11 +73,9 @@ run tsim/parser/first_last_query.sim
print ================== restart server to commit data into disk
system sh/exec.sh -n dnode1 -s stop -x SIGINT
-sleep 500
system sh/exec.sh -n dnode1 -s start
print ================== server restart completed
sql connect
-sleep 100
run tsim/parser/first_last_query.sim
@@ -102,11 +100,9 @@ while $x < 5000
endw
system sh/exec.sh -n dnode1 -s stop -x SIGINT
-sleep 1000
system sh/exec.sh -n dnode1 -s start
print ================== server restart completed
sql connect
-sleep 100
sql use test
sql select count(*), last(ts) from tm0 interval(1s)
diff --git a/tests/script/tsim/parser/first_last_query.sim b/tests/script/tsim/parser/first_last_query.sim
index 2dff1dd51b..a001b929c3 100644
--- a/tests/script/tsim/parser/first_last_query.sim
+++ b/tests/script/tsim/parser/first_last_query.sim
@@ -109,7 +109,7 @@ endi
### test if first works for committed data. An 'order by ts desc' clause should be present, and queried data should come from at least 2 file blocks
$tb = $tbPrefix . 9
-sql select first(ts), first(c1) from $tb where ts < '2018-10-17 10:00:00.000' order by ts asc
+sql select first(ts), first(c1) from $tb where ts < '2018-10-17 10:00:00.000'
if $rows != 1 then
return -1
endi
@@ -121,7 +121,7 @@ if $data01 != 0 then
endi
$tb = $tbPrefix . 9
-sql select first(ts), first(c1) from $tb where ts < '2018-10-17 10:00:00.000' order by ts desc
+sql select first(ts), first(c1) from $tb where ts < '2018-10-17 10:00:00.000'
if $rows != 1 then
return -1
endi
@@ -154,7 +154,7 @@ sql insert into test11 using stest tags('test11','bbb') values ('2020-09-04 16:5
sql insert into test12 using stest tags('test11','bbb') values ('2020-09-04 16:53:58.003',210,3);
sql insert into test21 using stest tags('test21','ccc') values ('2020-09-04 16:53:59.003',210,3);
sql insert into test22 using stest tags('test21','ccc') values ('2020-09-04 16:54:54.003',210,3);
-sql select sum(size) from stest group by appname;
+sql select sum(size), appname from stest group by appname order by appname;;
if $rows != 3 then
return -1
endi
@@ -170,16 +170,16 @@ if $data20 != 420 then
endi
if $data01 != @test1@ then
-return -1
+ return -1
endi
if $data11 != @test11@ then
-return -1
+ return -1
endi
if $data21 != @test21@ then
-return -1
+ return -1
endi
-sql select sum(size) from stest interval(1d) group by appname;
+sql select _wstart, sum(size), appname from stest partition by appname interval(1d) order by appname;
if $rows != 3 then
return -1
endi
@@ -223,7 +223,7 @@ return -1
endi
print ===================>td-1477, one table has only one block occurs this bug.
-sql select first(size),count(*),LAST(SIZE) from stest where tbname in ('test1', 'test2') interval(1d) group by tbname;
+sql select _wstart, first(size), count(*), LAST(SIZE), tbname from stest where tbname in ('test1', 'test2') partition by tbname interval(1d) order by tbname asc;
if $rows != 2 then
return -1
endi
@@ -278,15 +278,13 @@ sql create table tm1 using m1 tags(2);
sql insert into tm0 values('2020-3-1 1:1:1', 112);
sql insert into tm1 values('2020-1-1 1:1:1', 1)('2020-3-1 0:1:1', 421);
system sh/exec.sh -n dnode1 -s stop -x SIGINT
-sleep 1000
-
system sh/exec.sh -n dnode1 -s start
+
print ================== server restart completed
-sleep 1000
sql connect
sql use first_db0;
-sql select last(*) from m1 group by tbname;
+sql select last(*), tbname from m1 group by tbname;
if $rows != 2 then
return -1
endi
diff --git a/tests/script/tsim/parser/function.sim b/tests/script/tsim/parser/function.sim
index 451947e82a..7dd66bedb0 100644
--- a/tests/script/tsim/parser/function.sim
+++ b/tests/script/tsim/parser/function.sim
@@ -38,15 +38,12 @@ sql select twa(k),avg(k),count(1) from t1 where ts>='2015-8-18 00:00:00' and ts<
if $rows != 1 then
return -1
endi
-
if $data00 != 2.063999891 then
return -1
endi
-
if $data01 != 2.063999891 then
return -1
endi
-
if $data02 != 1 then
return -1
endi
@@ -55,165 +52,135 @@ sql select twa(k),avg(k),count(1) from t1 where ts>='2015-8-18 00:00:00' and ts<
if $rows != 1 then
return -1
endi
-
if $data00 != 2.089999914 then
return -1
endi
-
if $data01 != 2.089999914 then
return -1
endi
-
if $data02 != 2 then
return -1
endi
-sql select twa(k),avg(k),count(1) from t1 where ts>='2015-8-18 00:00:00' and ts<='2015-8-18 00:07:00' interval(1m) order by ts asc
+sql select _wstart, twa(k),avg(k),count(1) from t1 where ts>='2015-8-18 00:00:00' and ts<='2015-8-18 00:07:00' interval(1m)
if $rows != 2 then
return -1
endi
-
if $data00 != @15-08-18 00:00:00.000@ then
return -1
endi
-
if $data01 != 2.068333156 then
return -1
endi
-
if $data02 != 2.063999891 then
return -1
endi
-
if $data03 != 1 then
return -1
endi
-
if $data10 != @15-08-18 00:06:00.000@ then
return -1
endi
-
if $data11 != 2.115999937 then
return -1
endi
-
if $data12 != 2.115999937 then
return -1
endi
-
if $data13 != 1 then
return -1
endi
-sql select twa(k),avg(k),count(1) from t1 where ts>='2015-8-18 00:00:00' and ts<='2015-8-18 00:07:00' interval(1m) order by ts desc;
+sql select _wstart, twa(k),avg(k),count(1) from t1 where ts>='2015-8-18 00:00:00' and ts<='2015-8-18 00:07:00' interval(1m)
+print $data00 $data01 $data02 $data03 $data04 $data05 $data06
+print $data10 $data11 $data12 $data13 $data14 $data15 $data16
+print $data20 $data21 $data22 $data23 $data24 $data25 $data26
if $rows != 2 then
return -1
endi
-
-if $data00 != @15-08-18 00:06:00.000@ then
+if $data10 != @15-08-18 00:06:00.000@ then
+ return -1
+endi
+if $data11 != 2.115999937 then
+ return -1
+endi
+if $data12 != 2.115999937 then
+ return -1
+endi
+if $data13 != 1 then
+ return -1
+endi
+if $data01 != 2.068333156 then
return -1
endi
-if $data01 != 2.115999937 then
- return -1
-endi
-
-if $data02 != 2.115999937 then
- return -1
-endi
-
-if $data03 != 1 then
- return -1
-endi
-
-if $data11 != 2.068333156 then
- return -1
-endi
-
-sql select twa(k),avg(k),count(1) from t1 where ts>='2015-8-18 00:00:00' and ts<='2015-8-18 00:27:00' interval(10m) order by ts asc
+sql select _wstart, twa(k),avg(k),count(1) from t1 where ts>='2015-8-18 00:00:00' and ts<='2015-8-18 00:27:00' interval(10m)
if $rows != 3 then
return -1
endi
-
if $data01 != 2.088666666 then
return -1
endi
-
if $data02 != 2.089999914 then
return -1
endi
-
if $data03 != 2 then
return -1
endi
-
if $data11 != 2.077099980 then
return -1
endi
-
if $data12 != 2.077000022 then
return -1
endi
-
if $data13 != 2 then
return -1
endi
-
if $data21 != 2.069333235 then
return -1
endi
-
if $data22 != 2.040999889 then
return -1
endi
-
if $data23 != 1 then
return -1
endi
-sql select twa(k),avg(k),count(1) from t1 where ts>='2015-8-18 00:00:00' and ts<='2015-8-18 00:27:00' interval(10m) order by ts desc
+sql select _wstart, twa(k),avg(k),count(1) from t1 where ts>='2015-8-18 00:00:00' and ts<='2015-8-18 00:27:00' interval(10m)
if $rows != 3 then
return -1
endi
-
-if $data01 != 2.069333235 then
+if $data21 != 2.069333235 then
return -1
endi
-
if $data11 != 2.077099980 then
return -1
endi
-
-if $data21 != 2.088666666 then
+if $data01 != 2.088666666 then
return -1
endi
-sql select twa(k),avg(k),count(1) from t1 where ts>='2015-8-18 00:00:00' and ts<='2015-8-18 00:30:00' order by ts asc
+sql select twa(k),avg(k),count(1) from t1 where ts>='2015-8-18 00:00:00' and ts<='2015-8-18 00:30:00'
if $data00 != 2.073699975 then
return -1
endi
-
if $data01 != 2.070999980 then
return -1
endi
-
if $data02 != 6 then
return -1
endi
-sql select twa(k),avg(k),count(1) from t1 where ts>='2015-8-18 00:00:00' and ts<='2015-8-18 00:30:00' order by ts desc
+sql select twa(k),avg(k),count(1) from t1 where ts>='2015-8-18 00:00:00' and ts<='2015-8-18 00:30:00'
if $rows != 1 then
return -1
endi
-
if $data00 != 2.073699975 then
return -1
endi
-
if $data01 != 2.070999980 then
return -1
endi
-
if $data02 != 6 then
return -1
endi
@@ -223,9 +190,8 @@ if $rows != 0 then
return -1
endi
-sql select twa(k),avg(k),count(1) from t1 where ts>='2015-8-18 00:00:00' and ts<='2015-8-18 00:30:00' interval(10m) order by ts asc
-sql select twa(k),avg(k),count(1) from t1 where ts>='2015-8-18 00:00:00' and ts<='2015-8-18 00:30:00' interval(10m) order by ts desc
-
+sql select twa(k),avg(k),count(1) from t1 where ts>='2015-8-18 00:00:00' and ts<='2015-8-18 00:30:00' interval(10m)
+sql select twa(k),avg(k),count(1) from t1 where ts>='2015-8-18 00:00:00' and ts<='2015-8-18 00:30:00' interval(10m)
#todo add test case while column filter exists for twa query
@@ -254,26 +220,26 @@ sql insert into tm1 values('2020-12-28 18:11:52.412', 3);
print =====================> td-2610
sql select twa(k)from tm1 where ts>='2020-11-19 18:11:45.773' and ts<='2020-12-9 18:11:17.098'
-if $rows != 0 then
+if $rows != 1 then
return -1
endi
+if $data00 != NULL then
+ return -1
+endi
print =====================> td-2609
sql select apercentile(k, 50) from tm1 where ts>='2020-10-30 18:11:56.680' and ts<='2020-12-09 18:11:17.098'
if $rows != 1 then
return -1
endi
-
if $data00 != -1000.000000000 then
return -1
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
-sleep 1000
system sh/exec.sh -n dnode1 -s start
print ================== server restart completed
sql connect
-sleep 100
sql use m_func_db0
@@ -282,7 +248,6 @@ sql select min(k) from tm1 where ts>='2020-11-19 18:11:45.773' and ts<='2020-12-
if $rows != 1 then
return -1
endi
-
if $data00 != 1 then
print expect 1, actual: $data00
return -1
@@ -299,12 +264,10 @@ sql select last(ts) from tm1 interval(17a) limit 776 offset 3
if $rows != 3 then
return -1
endi
-
sql select last(ts) from tm1 interval(17a) limit 1000 offset 4
if $rows != 2 then
return -1
endi
-
sql select last(ts) from tm1 interval(17a) order by ts desc limit 1000 offset 0
if $rows != 6 then
return -1
@@ -314,7 +277,9 @@ print =============================> TD-6086
sql create stable td6086st(ts timestamp, d double) tags(t nchar(50));
sql create table td6086ct1 using td6086st tags("ct1");
sql create table td6086ct2 using td6086st tags("ct2");
-sql SELECT LAST(d),t FROM td6086st WHERE tbname in ('td6086ct1', 'td6086ct2') and ts>="2019-07-30 00:00:00" and ts<="2021-08-31 00:00:00" interval(1800s) fill(prev) GROUP BY tbname;
+
+return
+sql SELECT LAST(d),t FROM td6086st WHERE tbname in ('td6086ct1', 'td6086ct2') and ts>="2019-07-30 00:00:00" and ts<="2021-08-31 00:00:00" partition BY tbname interval(1800s) fill(prev);
print ==================> td-2624
sql create table tm2(ts timestamp, k int, b binary(12));
@@ -328,41 +293,35 @@ sql insert into tm2 values('2020-12-29 18:43:17.129', 0, null);
sql insert into tm2 values('2020-12-29 18:46:19.109', NULL, null);
sql insert into tm2 values('2021-01-03 18:40:40.065', 0, null);
+sql select _wstart, twa(k),first(ts) from tm2 where k <50 interval(17s);
+if $rows != 6 then
+ return -1
+endi
+if $data00 != @11-01-02 18:42:42.000@ then
+ return -1
+endi
+if $data02 != @11-01-02 18:42:45.326@ then
+ return -1
+endi
+if $data10 != @20-07-30 17:43:59.000@ then
+ return -1
+endi
+if $data21 != 0.000000000 then
+ return -1
+endi
+
sql select twa(k),first(ts) from tm2 where k <50 interval(17s);
if $rows != 6 then
return -1
endi
-if $data00 != @11-01-02 18:42:42.000@ then
- return -1
-endi
-
-if $data02 != @11-01-02 18:42:45.326@ then
- return -1
-endi
-
-if $data10 != @20-07-30 17:43:59.000@ then
- return -1
-endi
-
-if $data21 != 0.000000000 then
- return -1
-endi
-
-sql select twa(k),first(ts) from tm2 where k <50 interval(17s) order by ts desc;
-if $rows != 6 then
- return -1
-endi
-
-sql select twa(k),first(ts),count(k),first(k) from tm2 interval(17s) limit 20 offset 0;
+sql select _wstart, twa(k),first(ts),count(k),first(k) from tm2 interval(17s) limit 20 offset 0;
if $rows != 9 then
return -1
endi
-
if $data00 != @11-01-02 18:42:42.000@ then
return -1
endi
-
if $data10 != @20-07-30 17:43:59.000@ then
return -1
endi
@@ -373,9 +332,11 @@ if $rows != 0 then
print expect 0, actual:$rows
return -1
endi
-
sql select twa(k) from tm2 where ts='2020-12-29 18:46:19.109'
-if $rows != 0 then
+if $rows != 1 then
+ return -1
+endi
+if $data00 != NULL then
return -1
endi
@@ -423,7 +384,7 @@ sql insert into tm10 values('2020-1-1 1:1:1', 0);
sql insert into tm11 values('2020-1-5 1:1:1', 0);
sql insert into tm12 values('2020-1-7 1:1:1', 0);
sql insert into tm13 values('2020-1-1 1:1:1', 0);
-sql select count(*) from m1 where ts='2020-1-1 1:1:1' interval(1h) group by tbname;
+sql select count(*) from m1 where ts='2020-1-1 1:1:1' partition by tbname interval(1h)
if $rows != 2 then
return -1
endi
@@ -436,11 +397,11 @@ sql create table tm1 using m1 tags(1);
sql create table tm2 using m1 tags(2);
sql insert into tm1 values('2021-01-27 22:22:39.294', 1, 10, NULL, 110, 123) ('2021-01-27 22:22:40.294', 2, 20, NULL, 120, 124) ('2021-01-27 22:22:41.294', 3, 30, NULL, 130, 125)('2021-01-27 22:22:43.294', 4, 40, NULL, 140, 126)('2021-01-27 22:22:44.294', 5, 50, NULL, 150, 127);
sql insert into tm2 values('2021-01-27 22:22:40.688', 5, 101, NULL, 210, 321) ('2021-01-27 22:22:41.688', 5, 102, NULL, 220, 322) ('2021-01-27 22:22:42.688', 5, 103, NULL, 230, 323)('2021-01-27 22:22:43.688', 5, 104, NULL, 240, 324)('2021-01-27 22:22:44.688', 5, 105, NULL, 250, 325)('2021-01-27 22:22:45.688', 5, 106, NULL, 260, 326);
+
sql select stddev(k) from m1
if $rows != 1 then
return -1
endi
-
if $data00 != 1.378704626 then
return -1
endi
@@ -454,11 +415,9 @@ sql select stddev(k), stddev(c) from m1
if $rows != 1 then
return -1
endi
-
if $data00 != 1.378704626 then
return -1
endi
-
if $data01 != NULL then
return -1;
endi
@@ -467,90 +426,72 @@ sql select stddev(b),stddev(b),stddev(k) from m1;
if $rows != 1 then
return -1
endi
-
if $data00 != 37.840465463 then
return -1
endi
-
if $data01 != 37.840465463 then
return -1
endi
-
if $data02 != 1.378704626 then
return -1
endi
-sql select stddev(k), stddev(b) from m1 group by a
+sql select stddev(k), stddev(b), a from m1 group by a order by a
if $rows != 2 then
return -1
endi
-
if $data00 != 1.414213562 then
return -1
endi
-
if $data01 != 14.142135624 then
return -1
endi
-
if $data02 != 1 then
return -1
endi
-
if $data10 != 0.000000000 then
return -1
endi
-
if $data11 != 1.707825128 then
return -1
endi
-
if $data12 != 2 then
return -1
endi
-sql select stddev(k), stddev(b) from m1 where a= 1 group by a
+sql select stddev(k), stddev(b), a from m1 where a= 1 group by a
if $rows != 1 then
return -1
endi
-
if $data00 != 1.414213562 then
return -1
endi
-
if $data01 != 14.142135624 then
return -1
endi
-
if $data02 != 1 then
return -1
endi
-sql select stddev(k), stddev(b) from m1 group by tbname
+sql select stddev(k), stddev(b), tbname from m1 group by tbname order by tbname
if $rows != 2 then
return -1
endi
-
if $data00 != 1.414213562 then
return -1
endi
-
if $data01 != 14.142135624 then
return -1
endi
-
if $data02 != @tm1@ then
return -1
endi
-
if $data10 != 0.000000000 then
return -1
endi
-
if $data11 != 1.707825128 then
return -1
endi
-
if $data12 != @tm2@ then
return -1
endi
@@ -560,240 +501,190 @@ if $rows != 2 then
return -1
endi
-sql select stddev(k), stddev(b), stddev(c) from m1 group by tbname,a
+sql select stddev(k), stddev(b), stddev(c),tbname, a from m1 group by tbname,a
if $rows != 2 then
return -1
endi
-
if $data00 != 1.414213562 then
return -1
endi
-
if $data01 != 14.142135624 then
return -1
endi
-
if $data02 != NULL then
return -1
endi
-
if $data03 != @tm1@ then
return -1
endi
-
if $data04 != 1 then
return -1
endi
-
if $data10 != 0.000000000 then
return -1
endi
-
if $data11 != 1.707825128 then
return -1
endi
-
if $data12 != NULL then
return -1
endi
-
if $data13 != @tm2@ then
return -1
endi
-
if $data14 != 2 then
return -1
endi
-sql select stddev(k), stddev(b), stddev(c) from m1 interval(10s) group by tbname,a
+sql select _wstart, stddev(k), stddev(b), stddev(c), tbname,a from m1 partition by tbname, a interval(10s) order by tbname
if $rows != 3 then
return -1
endi
-
if $data01 != 0.000000000 then
return -1
endi
-
if $data02 != 0.000000000 then
return -1
endi
-
if $data03 != NULL then
return -1
endi
-
if $data04 != @tm1@ then
return -1
endi
-
if $data05 != 1 then
return -1
endi
-
if $data11 != 1.118033989 then
return -1
endi
-
if $data12 != 11.180339887 then
return -1
endi
-
if $data13 != NULL then
return -1
endi
-
if $data14 != @tm1@ then
return -1
endi
-
if $data22 != 1.707825128 then
return -1
endi
-
if $data23 != NULL then
return -1
endi
-
if $data24 != @tm2@ then
return -1
endi
-
if $data25 != 2 then
return -1
endi
-sql select count(*), first(b), stddev(b), stddev(c) from m1 interval(10s) group by a
+sql select _wstart, count(*), first(b), stddev(b), stddev(c), a from m1 partition by a interval(10s) order by a
if $rows != 3 then
return -1
endi
-
if $data00 != @21-01-27 22:22:30.000@ then
return -1
endi
-
if $data01 != 1 then
return -1
endi
-
if $data02 != 10.000000000 then
return -1
endi
-
if $data03 != 0.000000000 then
return -1
endi
-
if $data04 != NULL then
return -1
endi
-
if $data05 != 1 then
return -1
endi
-
if $data12 != 20.000000000 then
return -1
endi
-
if $data13 != 11.180339887 then
return -1
endi
-
if $data14 != NULL then
return -1
endi
-
if $data23 != 1.707825128 then
return -1
endi
-sql select count(*), first(b), stddev(b), stddev(c) from m1 interval(10s) group by tbname,a
+sql select _wstart, count(*), first(b), stddev(b), stddev(c), tbname, a from m1 partition by tbname, a interval(10s) order by tbname
if $rows != 3 then
return -1
endi
-
if $data23 != 1.707825128 then
return -1
endi
-
if $data25 != @tm2@ then
return -1
endi
-sql select count(*), stddev(b), stddev(b)+20, stddev(c) from m1 interval(10s) group by tbname,a
+sql select _wstart, count(*), stddev(b), stddev(b)+20, stddev(c), tbname, a from m1 partition by tbname, a interval(10s) order by tbname
if $rows != 3 then
return -1
endi
-
if $data02 != 0.000000000 then
return -1
endi
-
if $data03 != 20.000000000 then
return -1
endi
-
if $data13 != 31.180339887 then
return -1
endi
-
if $data14 != NULL then
return -1
endi
-sql select count(*), first(b), stddev(b)+first(b), stddev(c) from m1 interval(10s) group by tbname,a
+sql select _wstart, count(*), first(b), stddev(b)+first(b), stddev(c), tbname, a from m1 partition by tbname, a interval(10s) order by tbname
if $rows != 3 then
return -1
endi
-
if $data02 != 10.000000000 then
return -1
endi
-
if $data03 != 10.000000000 then
return -1
endi
-
if $data12 != 20.000000000 then
return -1
endi
-
if $data13 != 31.180339887 then
return -1
endi
-
if $data22 != 101.000000000 then
return -1
endi
-
if $data23 != 102.707825128 then
return -1
endi
-sql select stddev(e),stddev(k) from m1 where a=1
+sql select stddev(e), stddev(k) from m1 where a=1
if $rows != 1 then
return -1
endi
-
if $data00 != 1.414213562 then
return -1
endi
-
if $data01 != 1.414213562 then
return -1
endi
sql create stable st1 (ts timestamp, f1 int, f2 int) tags (id int);
sql create table tb1 using st1 tags(1);
-
sql insert into tb1 values ('2021-07-02 00:00:00', 1, 1);
sql select stddev(f1) from st1 group by f1;
-
if $rows != 1 then
return -1
endi
-
if $data00 != 0.000000000 then
return -1
endi
@@ -802,7 +693,6 @@ sql select count(tbname) from st1
if $rows != 1 then
return -1
endi
-
if $data00 != 1 then
return -1
endi
@@ -811,23 +701,20 @@ sql select count(id) from st1
if $rows != 1 then
return -1
endi
-
if $data00 != 1 then
return -1
endi
print ====================> TODO stddev + normal column filter
-
print ====================> irate
-sql_error select irate(f1) from st1;
+sql select irate(f1) from st1;
sql select irate(f1) from st1 group by tbname;
sql select irate(k) from t1
if $rows != 1 then
return -1
endi
-
if $data00 != 0.000027778 then
return -1
endi
@@ -836,104 +723,84 @@ sql select irate(k) from t1 where ts>='2015-8-18 00:30:00.000'
if $rows != 1 then
return -1
endi
-
if $data00 != 0.000000000 then
print expect 0.000000000, actual $data00
return -1
endi
-sql select irate(k) from t1 where ts>='2015-8-18 00:06:00.000' and ts<='2015-8-18 00:12:000';
+sql select irate(k) from t1 where ts>='2015-8-18 00:06:00.000' and ts<='2015-8-18 00:12:00.000';
if $rows != 1 then
return -1
endi
-
if $data00 != 0.005633334 then
return -1
endi
-sql select irate(k) from t1 interval(10a)
+sql select _wstart, irate(k) from t1 interval(10a)
if $rows != 6 then
return -1
endi
-
if $data01 != 0.000000000 then
return -1
endi
-
if $data11 != 0.000000000 then
return -1
endi
-
if $data51 != 0.000000000 then
return -1
endi
-sql select count(*),irate(k) from t1 interval(10m)
+sql select _wstart, count(*), irate(k) from t1 interval(10m)
if $rows != 4 then
return -1
endi
-
if $data00 != @15-08-18 00:00:00.000@ then
return -1
endi
-
if $data01 != 2 then
return -1
endi
-
if $data02 != 0.000144445 then
return -1
endi
-
if $data10 != @15-08-18 00:10:00.000@ then
return -1
endi
-
if $data11 != 2 then
return -1
endi
-
if $data12 != 0.000272222 then
return -1
endi
-
if $data20 != @15-08-18 00:20:00.000@ then
return -1
endi
-
if $data21 != 1 then
return -1
endi
-
if $data22 != 0.000000000 then
return -1
endi
-
if $data30 != @15-08-18 00:30:00.000@ then
return -1
endi
-
if $data31 != 1 then
return -1
endi
-
if $data32 != 0.000000000 then
return -1
endi
-sql select count(*),irate(k) from t1 interval(10m) order by ts desc
+sql select _wstart, count(*),irate(k) from t1 interval(10m) order by _wstart desc
if $rows != 4 then
return -1
endi
-
if $data30 != @15-08-18 00:00:00.000@ then
return -1
endi
-
if $data31 != 2 then
return -1
endi
-
if $data32 != 0.000144445 then
return -1
endi
@@ -951,50 +818,42 @@ sql insert into tm0 values('2015-08-18T00:18:00Z', 2.126) ('2015-08-18T00:24:00Z
sql_error select derivative(ts) from tm0;
sql_error select derivative(k) from tm0;
-sql_error select derivative(k, 0, 0) from tm0;
+sql select derivative(k, 0, 0) from tm0;
sql_error select derivative(k, 1, 911) from tm0;
sql_error select derivative(kx, 1s, 1) from tm0;
-sql_error select derivative(k, -20s, 1) from tm0;
-sql_error select derivative(k, 20a, 0) from tm0;
-sql_error select derivative(k, 200a, 0) from tm0;
-sql_error select derivative(k, 999a, 0) from tm0;
+sql select derivative(k, -20s, 1) from tm0;
+sql select derivative(k, 20a, 0) from tm0;
+sql select derivative(k, 200a, 0) from tm0;
+sql select derivative(k, 999a, 0) from tm0;
sql_error select derivative(k, 20s, -12) from tm0;
sql select derivative(k, 1s, 0) from tm0
if $rows != 5 then
return -1
endi
-
if $data00 != @15-08-18 08:06:00.000@ then
return -1
endi
-
if $data01 != 0.000144444 then
print expect 0.000144444, actual: $data01
return -1
endi
-
if $data10 != @15-08-18 08:12:00.000@ then
return -1
endi
-
if $data11 != -0.000244444 then
return -1
endi
-
if $data20 != @15-08-18 08:18:00.000@ then
return -1
endi
-
if $data21 != 0.000272222 then
print expect 0.000272222, actual: $data21
return -1
endi
-
if $data30 != @15-08-18 08:24:00.000@ then
return -1
endi
-
if $data31 != -0.000236111 then
print expect 0.000236111, actual: $data31
return -1
@@ -1004,36 +863,28 @@ sql select derivative(k, 6m, 0) from tm0;
if $rows != 5 then
return -1
endi
-
if $data00 != @15-08-18 08:06:00.000@ then
return -1
endi
-
if $data01 != 0.052000000 then
print expect 0.052000000, actual: $data01
return -1
endi
-
if $data10 != @15-08-18 08:12:00.000@ then
return -1
endi
-
if $data11 != -0.088000000 then
return -1
endi
-
if $data20 != @15-08-18 08:18:00.000@ then
return -1
endi
-
if $data21 != 0.098000000 then
return -1
endi
-
if $data30 != @15-08-18 08:24:00.000@ then
return -1
endi
-
if $data31 != -0.085000000 then
return -1
endi
@@ -1042,11 +893,9 @@ sql select derivative(k, 12m, 0) from tm0;
if $rows != 5 then
return -1
endi
-
if $data00 != @15-08-18 08:06:00.000@ then
return -1
endi
-
if $data01 != 0.104000000 then
print expect 0.104000000, actual: $data01
return -1
@@ -1098,39 +947,30 @@ sql select derivative(k, 1s, 0) from m1 group by tbname
if $rows != 12 then
return -1
endi
-
if $data00 != @20-01-01 01:01:03.000@ then
return -1
endi
-
if $data01 != 1.000000000 then
return -1
endi
-
if $data02 != @t0@ then
return -1
endi
-
if $data10 != @20-01-01 01:02:04.000@ then
return -1
endi
-
if $data11 != 0.016393443 then
return -1
endi
-
if $data12 != t0 then
return -1
endi
-
if $data90 != @20-01-01 01:01:06.000@ then
return -1
endi
-
if $data91 != 90.000000000 then
return -1
endi
-
if $data92 != t1 then
return -1
endi
@@ -1140,16 +980,13 @@ sql select stddev(f1) from st1 where ts>'2021-07-01 1:1:1' and ts<'2021-07-30 00
if $rows != 29 then
return -1
endi
-
if $data00 != @21-07-01 00:00:00.000@ then
return -1
endi
-
if $data01 != NULL then
return -1
endi
-
sql select derivative(test_column_alias_name, 1s, 0) from (select avg(k) test_column_alias_name from t1 interval(1s));
sql create table smeters (ts timestamp, current float, voltage int) tags (t1 int);
diff --git a/tests/script/tsim/parser/groupby.sim b/tests/script/tsim/parser/groupby.sim
index 8d7fad8cbc..bf2c7cc7bf 100644
--- a/tests/script/tsim/parser/groupby.sim
+++ b/tests/script/tsim/parser/groupby.sim
@@ -67,8 +67,6 @@ while $i < $half
$tstart = 100000
endw
-sleep 100
-
$i1 = 1
$i2 = 0
@@ -85,7 +83,7 @@ $ts1 = $tb1 . .ts
$ts2 = $tb2 . .ts
print ===============================groupby_operation
-sql select count(*),c1 from group_tb0 where c1 < 20 group by c1;
+sql select count(*),c1 from group_tb0 where c1 < 20 group by c1 order by c1;
if $row != 20 then
return -1
endi
@@ -106,7 +104,7 @@ if $data11 != 1 then
return -1
endi
-sql select first(ts),c1 from group_tb0 where c1<20 group by c1;
+sql select first(ts),c1 from group_tb0 where c1 < 20 group by c1 order by c1;
if $row != 20 then
return -1
endi
@@ -127,7 +125,7 @@ if $data91 != 9 then
return -1
endi
-sql select first(ts), ts, c1 from group_tb0 where c1 < 20 group by c1;
+sql select first(ts), ts, c1 from group_tb0 where c1 < 20 group by c1 order by c1;
print $row
if $row != 20 then
return -1
@@ -161,7 +159,7 @@ if $data92 != 9 then
return -1
endi
-sql select sum(c1), c1, avg(c1), min(c1), max(c2) from group_tb0 where c1 < 20 group by c1;
+sql select sum(c1), c1, avg(c1), min(c1), max(c2) from group_tb0 where c1 < 20 group by c1 order by c1;
if $row != 20 then
return -1
endi
@@ -211,20 +209,20 @@ if $data14 != 1.00000 then
endi
sql_error select sum(c1), ts, c1 from group_tb0 where c1<20 group by c1;
-sql_error select first(ts), ts, c2 from group_tb0 where c1 < 20 group by c1;
+sql select first(ts), ts, c2 from group_tb0 where c1 < 20 group by c1;
sql_error select sum(c3), ts, c2 from group_tb0 where c1 < 20 group by c1;
sql_error select sum(c3), first(ts), c2 from group_tb0 where c1 < 20 group by c1;
-sql_error select first(c3), ts, c1, c2 from group_tb0 where c1 < 20 group by c1;
+sql select first(c3), ts, c1, c2 from group_tb0 where c1 < 20 group by c1;
sql_error select first(c3), last(c3), ts, c1 from group_tb0 where c1 < 20 group by c1;
sql_error select ts from group_tb0 group by c1;
#===========================interval=====not support======================
sql_error select count(*), c1 from group_tb0 where c1<20 interval(1y) group by c1;
#=====tbname must be the first in the group by clause=====================
-sql_error select count(*) from group_tb0 where c1 < 20 group by c1, tbname;
+sql select count(*) from group_tb0 where c1 < 20 group by c1, tbname;
#super table group by normal columns
-sql select count(*), c1 from group_mt0 where c1< 20 group by c1;
+sql select count(*), c1 from group_mt0 where c1< 20 group by c1 order by c1;
if $row != 20 then
return -1
endi
@@ -253,7 +251,7 @@ if $data91 != 9 then
return -1
endi
-sql select first(c1), c1, ts from group_mt0 where c1<20 group by c1;
+sql select first(c1), c1, ts from group_mt0 where c1<20 group by c1 order by c1;
if $row != 20 then
return -1
endi
@@ -290,7 +288,7 @@ if $data92 != @70-01-01 08:01:40.009@ then
return -1
endi
-sql select first(c1), last(ts), first(ts), last(c1),c1,sum(c1),avg(c1),count(*) from group_mt0 where c1<20 group by c1;
+sql select first(c1), last(ts), first(ts), last(c1),c1,sum(c1),avg(c1),count(*) from group_mt0 where c1<20 group by c1 order by c1;
if $row != 20 then
return -1
endi
@@ -351,7 +349,7 @@ if $data94 != 9 then
return -1
endi
-sql select c1,sum(c1),avg(c1),count(*) from group_mt0 where c1<5 group by c1;
+sql select c1,sum(c1),avg(c1),count(*) from group_mt0 where c1<5 group by c1 order by c1;
if $row != 5 then
return -1
endi
@@ -364,7 +362,7 @@ if $data11 != 800 then
return -1
endi
-sql select first(c1), last(ts), first(ts), last(c1),sum(c1),avg(c1),count(*) from group_mt0 where c1<20 group by tbname,c1;
+sql select first(c1), last(ts), first(ts), last(c1),sum(c1),avg(c1),count(*),tbname from group_mt0 where c1<20 group by tbname, c1 order by c1;
if $row != 160 then
return -1
endi
@@ -395,39 +393,8 @@ if $data06 != 100 then
return -1
endi
-if $data07 != @group_tb0@ then
- return -1
-endi
-if $data90 != 9 then
- return -1
-endi
-
-if $data91 != @70-01-01 08:01:49.909@ then
- return -1
-endi
-
-if $data92 != @70-01-01 08:01:40.009@ then
- return -1
-endi
-
-if $data93 != 9 then
- return -1
-endi
-
-if $data94 != 900 then
- return -1
-endi
-
-if $data96 != 100 then
- return -1
-endi
-
-if $data97 != @group_tb0@ then
- return -1
-endi
-
-sql select count(*),first(ts),last(ts),min(c3) from group_tb1 group by c4;
+sql select count(*),first(ts),last(ts),min(c3) from group_tb1 group by c4 order by c4;
if $rows != 10000 then
return -1
endi
@@ -469,7 +436,7 @@ if $rows != 100 then
return -1
endi
-sql select count(*),sum(c4), count(c4), sum(c4)/count(c4) from group_tb1 group by c8
+sql select count(*),sum(c4), count(c4), sum(c4)/count(c4) from group_tb1 group by c8 order by c8;
if $rows != 100 then
return -1
endi
@@ -504,34 +471,34 @@ if $data13 != 4951.000000000 then
endi
print ====================> group by normal column + slimit + soffset
-sql select count(*), c8 from group_mt0 group by c8 limit 1 offset 0;
+sql select count(*), c8 from group_mt0 group by c8 limit 100 offset 0;
if $rows != 100 then
return -1
endi
-sql select sum(c2),c8,avg(c2), sum(c2)/count(*) from group_mt0 group by c8 slimit 2 soffset 99
+sql select sum(c2),c8,avg(c2), sum(c2)/count(*) from group_mt0 partition by c8 slimit 2 soffset 99
if $rows != 1 then
return -1
endi
-if $data00 != 79200.000000000 then
- return -1
-endi
+#if $data00 != 79200.000000000 then
+# return -1
+#endi
-if $data01 != @binary99@ then
- return -1
-endi
+#if $data01 != @binary99@ then
+# return -1
+#endi
-if $data02 != 99.000000000 then
- return -1
-endi
+#if $data02 != 99.000000000 then
+# return -1
+#endi
-if $data03 != 99.000000000 then
- return -1
-endi
+#if $data03 != 99.000000000 then
+# return -1
+#endi
print ============>td-1765
-sql select percentile(c4, 49),min(c4),max(c4),avg(c4),stddev(c4) from group_tb0 group by c8;
+sql select percentile(c4, 49),min(c4),max(c4),avg(c4),stddev(c4) from group_tb0 group by c8 order by c8;
if $rows != 100 then
return -1
endi
@@ -577,7 +544,7 @@ if $data14 != 2886.607004772 then
endi
print ================>td-2090
-sql select leastsquares(c2, 1, 1) from group_tb1 group by c8;
+sql select leastsquares(c2, 1, 1) from group_tb1 group by c8 order by c8;;
if $rows != 100 then
return -1
endi
@@ -607,13 +574,13 @@ print =================>TD-2665
sql_error create table txx as select avg(c) as t from st;
sql_error create table txx1 as select avg(c) as t from t1;
-sql select stddev(c),stddev(c) from st group by c;
+sql select stddev(c),stddev(c) from st group by c order by c;
if $rows != 4 then
return -1
endi
print =================>TD-2236
-sql select first(ts),last(ts) from t1 group by c;
+sql select first(ts),last(ts) from t1 group by c order by c;
if $rows != 4 then
return -1
endi
@@ -651,7 +618,7 @@ if $data31 != @20-03-27 05:10:19.000@ then
endi
print ===============>
-sql select stddev(c),c from st where t2=1 or t2=2 group by c;
+sql select stddev(c),c from st where t2=1 or t2=2 group by c order by c;
if $rows != 4 then
return -1
endi
@@ -689,14 +656,11 @@ if $data31 != 4 then
endi
sql_error select irate(c) from st where t1="1" and ts >= '2020-03-27 04:11:17.732' and ts < '2020-03-27 05:11:17.732' interval(1m) sliding(15s) group by tbname,c;
-sql select irate(c) from st where t1="1" and ts >= '2020-03-27 04:11:17.732' and ts < '2020-03-27 05:11:17.732' interval(1m) sliding(15s) group by tbname,t1,t2;
+sql select _wstart, irate(c), tbname, t1, t2 from st where t1=1 and ts >= '2020-03-27 04:11:17.732' and ts < '2020-03-27 05:11:17.732' partition by tbname,t1,t2 interval(1m) sliding(15s) order by tbname;
if $rows != 40 then
return -1
endi
-if $data01 != 1.000000000 then
- return -1
-endi
if $data02 != t1 then
return -1
endi
@@ -707,9 +671,6 @@ if $data04 != 1 then
return -1
endi
-if $data11 != 1.000000000 then
- return -1
-endi
if $data12 != t1 then
return -1
endi
@@ -720,21 +681,21 @@ if $data14 != 1 then
return -1
endi
-sql select irate(c) from st where t1="1" and ts >= '2020-03-27 04:11:17.732' and ts < '2020-03-27 05:11:17.732' interval(1m) sliding(15s) group by tbname,t1,t2 limit 1;
-if $rows != 2 then
+sql select _wstart, irate(c), tbname, t1, t2 from st where t1=1 and ts >= '2020-03-27 04:11:17.732' and ts < '2020-03-27 05:11:17.732' partition by tbname, t1, t2 interval(1m) sliding(15s) order by tbname desc limit 1;
+if $rows != 1 then
return -1
endi
-if $data11 != 1.000000000 then
+if $data01 != 1.000000000 then
return -1
endi
-if $data12 != t2 then
+if $data02 != t2 then
return -1
endi
-if $data13 != 1 then
+if $data03 != 1 then
return -1
endi
-if $data14 != 2 then
+if $data04 != 2 then
return -1
endi
@@ -748,16 +709,12 @@ sql insert into tm1 values('2020-2-1 1:1:1', 2, 10);
sql insert into tm1 values('2020-2-1 1:1:2', 2, 20);
system sh/exec.sh -n dnode1 -s stop -x SIGINT
-sleep 100
system sh/exec.sh -n dnode1 -s start
-sleep 100
-
sql connect
-sleep 100
sql use group_db0;
print =========================>TD-4894
-sql select count(*),k from m1 group by k;
+sql select count(*),k from m1 group by k order by k;
if $rows != 2 then
return -1
endi
@@ -778,14 +735,13 @@ if $data11 != 2 then
return -1
endi
-sql_error select count(*) from m1 group by tbname,k,f1;
-sql_error select count(*) from m1 group by tbname,k,a;
-sql_error select count(*) from m1 group by k, tbname;
-sql_error select count(*) from m1 group by k,f1;
-sql_error select count(*) from tm0 group by tbname;
-sql_error select count(*) from tm0 group by a;
-sql_error select count(*) from tm0 group by k,f1;
-
+sql select count(*) from m1 group by tbname,k,f1;
+sql select count(*) from m1 group by tbname,k,a;
+sql select count(*) from m1 group by k, tbname;
+sql select count(*) from m1 group by k,f1;
+sql select count(*) from tm0 group by tbname;
+sql select count(*) from tm0 group by a;
+sql select count(*) from tm0 group by k,f1;
sql_error select count(*),f1 from m1 group by tbname,k;
system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/parser/having_child.sim b/tests/script/tsim/parser/having_child.sim
index 1ee1481943..596c8d715a 100644
--- a/tests/script/tsim/parser/having_child.sim
+++ b/tests/script/tsim/parser/having_child.sim
@@ -27,7 +27,7 @@ sql insert into tb1 values (now+100s,4,4.0,4.0,4,4,4,true ,"4","4")
sql insert into tb1 values (now+150s,4,4.0,4.0,4,4,4,false,"4","4")
-sql select count(*),f1 from tb1 group by f1 having count(f1) > 0;
+sql select count(*),f1 from tb1 group by f1 having count(f1) > 0 order by f1;
if $rows != 4 then
return -1
endi
@@ -57,7 +57,7 @@ if $data31 != 4 then
endi
-sql select count(*),f1 from tb1 group by f1 having count(*) > 0;
+sql select count(*),f1 from tb1 group by f1 having count(*) > 0 order by f1;
if $rows != 4 then
return -1
endi
@@ -86,8 +86,7 @@ if $data31 != 4 then
return -1
endi
-
-sql select count(*),f1 from tb1 group by f1 having count(f2) > 0;
+sql select count(*),f1 from tb1 group by f1 having count(f2) > 0 order by f1;
if $rows != 4 then
return -1
endi
@@ -118,7 +117,7 @@ endi
sql_error select top(f1,2) from tb1 group by f1 having count(f2) > 0;
-sql select last(f1) from tb1 group by f1 having count(f2) > 0;
+sql select last(f1) from tb1 group by f1 having count(f2) > 0 order by f1;;
if $rows != 4 then
return -1
endi
@@ -141,7 +140,7 @@ sql_error select top(f1,2) from tb1 group by f1 having count(f2) > 0;
sql_error select top(f1,2) from tb1 group by f1 having avg(f1) > 0;
-sql select avg(f1),count(f1) from tb1 group by f1 having avg(f1) > 2;
+sql select avg(f1),count(f1) from tb1 group by f1 having avg(f1) > 2 order by f1;
if $rows != 2 then
return -1
endi
@@ -158,8 +157,7 @@ if $data11 != 2 then
return -1
endi
-
-sql select avg(f1),count(f1) from tb1 group by f1 having avg(f1) > 2 and sum(f1) > 0;
+sql select avg(f1),count(f1) from tb1 group by f1 having avg(f1) > 2 and sum(f1) > 0 order by f1;
if $rows != 2 then
return -1
endi
@@ -176,7 +174,7 @@ if $data11 != 2 then
return -1
endi
-sql select avg(f1),count(f1),sum(f1) from tb1 group by f1 having avg(f1) > 2 and sum(f1) > 0;
+sql select avg(f1),count(f1),sum(f1) from tb1 group by f1 having avg(f1) > 2 and sum(f1) > 0 order by f1;
if $rows != 2 then
return -1
endi
@@ -199,7 +197,7 @@ if $data12 != 8 then
return -1
endi
-sql select avg(f1),count(f1),sum(f1) from tb1 group by f1 having avg(f1) > 2;
+sql select avg(f1),count(f1),sum(f1) from tb1 group by f1 having avg(f1) > 2 order by f1;
if $rows != 2 then
return -1
endi
@@ -222,7 +220,7 @@ if $data12 != 8 then
return -1
endi
-sql select avg(f1),count(f1),sum(f1) from tb1 group by f1 having sum(f1) > 0;
+sql select avg(f1),count(f1),sum(f1) from tb1 group by f1 having sum(f1) > 0 order by f1;
if $rows != 4 then
return -1
endi
@@ -263,7 +261,7 @@ if $data32 != 8 then
return -1
endi
-sql select avg(f1),count(f1),sum(f1) from tb1 group by f1 having sum(f1) > 2 and sum(f1) < 6;
+sql select avg(f1),count(f1),sum(f1) from tb1 group by f1 having sum(f1) > 2 and sum(f1) < 6 order by f1;
if $rows != 1 then
return -1
endi
@@ -278,7 +276,7 @@ if $data02 != 4 then
endi
-sql select avg(f1),count(f1),sum(f1) from tb1 group by f1 having 1 <= sum(f1) and 5 >= sum(f1);
+sql select avg(f1),count(f1),sum(f1) from tb1 group by f1 having 1 <= sum(f1) and 5 >= sum(f1) order by f1;
if $rows != 2 then
return -1
endi
@@ -309,7 +307,7 @@ sql_error select avg(f1),count(f1),sum(f1),twa(f1) from tb1 group by tbname havi
sql_error select avg(f1),count(f1),sum(f1),twa(f1) from tb1 group by f1 having sum(f1) = 4;
-sql select avg(f1),count(f1),sum(f1) from tb1 group by f1 having sum(f1) > 0;
+sql select avg(f1),count(f1),sum(f1) from tb1 group by f1 having sum(f1) > 0 order by f1;
if $rows != 4 then
return -1
endi
@@ -350,7 +348,7 @@ if $data32 != 8 then
return -1
endi
-sql select avg(f1),count(f1),sum(f1) from tb1 group by f1 having sum(f1) > 3;
+sql select avg(f1),count(f1),sum(f1) from tb1 group by f1 having sum(f1) > 3 order by f1;
if $rows != 3 then
return -1
endi
@@ -383,7 +381,7 @@ if $data22 != 8 then
endi
###########and issue
-sql select avg(f1),count(f1),sum(f1) from tb1 group by f1 having sum(f1) > 3 and sum(f1) > 1;
+sql select avg(f1),count(f1),sum(f1) from tb1 group by f1 having sum(f1) > 3 and sum(f1) > 1 order by f1;
if $rows != 4 then
return -1
endi
@@ -425,7 +423,7 @@ if $data32 != 8 then
endi
-sql select avg(f1),count(f1),sum(f1) from tb1 group by f1 having sum(f1) > 3 or sum(f1) > 1;
+sql select avg(f1),count(f1),sum(f1) from tb1 group by f1 having sum(f1) > 3 or sum(f1) > 1 order by f1;
if $rows != 4 then
return -1
endi
@@ -466,7 +464,7 @@ if $data32 != 8 then
return -1
endi
-sql select avg(f1),count(f1),sum(f1) from tb1 group by f1 having sum(f1) > 3 or sum(f1) > 4;
+sql select avg(f1),count(f1),sum(f1) from tb1 group by f1 having sum(f1) > 3 or sum(f1) > 4 order by f1;
if $rows != 3 then
return -1
endi
@@ -499,12 +497,12 @@ if $data22 != 8 then
endi
############or issue
-sql select avg(f1),count(f1),sum(f1) from tb1 group by f1 having sum(f1) > 3 or avg(f1) > 4;
+sql select avg(f1),count(f1),sum(f1) from tb1 group by f1 having sum(f1) > 3 or avg(f1) > 4 order by f1;
if $rows != 0 then
return -1
endi
-sql select avg(f1),count(f1),sum(f1) from tb1 group by f1 having (sum(f1) > 3);
+sql select avg(f1),count(f1),sum(f1) from tb1 group by f1 having (sum(f1) > 3) order by f1;
if $rows != 3 then
return -1
endi
@@ -538,7 +536,7 @@ endi
sql_error select avg(f1),count(f1),sum(f1) from tb1 group by f1 having (sum(*) > 3);
-sql select avg(f1),count(f1),sum(f1) from tb1 group by f1 having (sum(tb1.f1) > 3);
+sql select avg(f1),count(f1),sum(f1) from tb1 group by f1 having (sum(tb1.f1) > 3) order by f1;
if $rows != 3 then
return -1
endi
@@ -570,7 +568,7 @@ if $data22 != 8 then
return -1
endi
-sql select avg(f1),count(tb1.*),sum(f1) from tb1 group by f1 having (sum(tb1.f1) > 3);
+sql select avg(f1),count(tb1.*),sum(f1) from tb1 group by f1 having (sum(tb1.f1) > 3) order by f1;
if $rows != 3 then
return -1
endi
@@ -602,7 +600,7 @@ if $data22 != 8 then
return -1
endi
-sql select avg(f1),count(tb1.*),sum(f1),stddev(f1),stddev(f1) from tb1 group by f1;
+sql select avg(f1),count(tb1.*),sum(f1),stddev(f1),stddev(f1) from tb1 group by f1 order by f1;
if $rows != 4 then
return -1
endi
@@ -667,12 +665,12 @@ if $data34 != 0.000000000 then
return -1
endi
-sql select avg(f1),count(tb1.*),sum(f1),stddev(f1) from tb1 group by f1 having (stddev(tb1.f1) > 3);
+sql select avg(f1),count(tb1.*),sum(f1),stddev(f1) from tb1 group by f1 having (stddev(tb1.f1) > 3) order by f1;
if $rows != 0 then
return -1
endi
-sql select avg(f1),count(tb1.*),sum(f1),stddev(f1) from tb1 group by f1 having (stddev(tb1.f1) < 1);
+sql select avg(f1),count(tb1.*),sum(f1),stddev(f1) from tb1 group by f1 having (stddev(tb1.f1) < 1) order by f1;
if $rows != 4 then
return -1
endi
@@ -736,7 +734,7 @@ sql_error select avg(f1),count(tb1.*),sum(f1),stddev(f1) from tb1 group by f1 ha
sql_error select avg(f1),count(tb1.*),sum(f1),stddev(f1),LEASTSQUARES(f1,1,1) from tb1 group by f1 having LEASTSQUARES(f1,1,1) > 2;
-sql select avg(f1),count(tb1.*),sum(f1),stddev(f1),LEASTSQUARES(f1,1,1) from tb1 group by f1 having sum(f1) > 2;
+sql select avg(f1),count(tb1.*),sum(f1),stddev(f1),LEASTSQUARES(f1,1,1) from tb1 group by f1 having sum(f1) > 2 order by f1;
if $rows != 3 then
return -1
endi
@@ -777,7 +775,7 @@ if $data23 != 0.000000000 then
return -1
endi
-sql select avg(f1),count(tb1.*),sum(f1),stddev(f1) from tb1 group by f1 having min(f1) > 2;
+sql select avg(f1),count(tb1.*),sum(f1),stddev(f1) from tb1 group by f1 having min(f1) > 2 order by f1;
if $rows != 2 then
return -1
endi
@@ -806,7 +804,7 @@ if $data13 != 0.000000000 then
return -1
endi
-sql select avg(f1),count(tb1.*),sum(f1),stddev(f1),min(f1) from tb1 group by f1 having min(f1) > 2;
+sql select avg(f1),count(tb1.*),sum(f1),stddev(f1),min(f1) from tb1 group by f1 having min(f1) > 2 order by f1;
if $rows != 2 then
return -1
endi
@@ -841,7 +839,7 @@ if $data14 != 4 then
return -1
endi
-sql select avg(f1),count(tb1.*),sum(f1),stddev(f1),min(f1) from tb1 group by f1 having max(f1) > 2;
+sql select avg(f1),count(tb1.*),sum(f1),stddev(f1),min(f1) from tb1 group by f1 having max(f1) > 2 order by f1;
if $rows != 2 then
return -1
endi
@@ -876,7 +874,7 @@ if $data14 != 4 then
return -1
endi
-sql select avg(f1),count(tb1.*),sum(f1),stddev(f1),min(f1),max(f1) from tb1 group by f1 having max(f1) != 2;
+sql select avg(f1),count(tb1.*),sum(f1),stddev(f1),min(f1),max(f1) from tb1 group by f1 having max(f1) != 2 order by f1;
if $rows != 3 then
return -1
endi
@@ -935,7 +933,7 @@ if $data25 != 4 then
return -1
endi
-sql select avg(f1),count(tb1.*),sum(f1),stddev(f1),min(f1),max(f1) from tb1 group by f1 having first(f1) != 2;
+sql select avg(f1),count(tb1.*),sum(f1),stddev(f1),min(f1),max(f1) from tb1 group by f1 having first(f1) != 2 order by f1;
if $rows != 3 then
return -1
endi
@@ -996,7 +994,7 @@ endi
-sql select avg(f1),count(tb1.*),sum(f1),stddev(f1),min(f1),max(f1),first(f1) from tb1 group by f1 having first(f1) != 2;
+sql select avg(f1),count(tb1.*),sum(f1),stddev(f1),min(f1),max(f1),first(f1) from tb1 group by f1 having first(f1) != 2 order by f1;
if $rows != 3 then
return -1
endi
@@ -1078,7 +1076,7 @@ sql_error select avg(f1),count(tb1.*),sum(f1),stddev(f1),min(f1),max(f1),first(f
sql_error select PERCENTILE(f1) from tb1 group by f1 having sum(f1) > 1;
-sql select PERCENTILE(f1,20) from tb1 group by f1 having sum(f1) = 4;
+sql select PERCENTILE(f1,20) from tb1 group by f1 having sum(f1) = 4 order by f1;
if $rows != 1 then
return -1
endi
@@ -1086,7 +1084,7 @@ if $data00 != 2.000000000 then
return -1
endi
-sql select aPERCENTILE(f1,20) from tb1 group by f1 having sum(f1) > 1;
+sql select aPERCENTILE(f1,20) from tb1 group by f1 having sum(f1) > 1 order by f1;
if $rows != 4 then
return -1
endi
@@ -1103,7 +1101,7 @@ if $data30 != 4.000000000 then
return -1
endi
-sql select aPERCENTILE(f1,20) from tb1 group by f1 having apercentile(f1,1) > 1;
+sql select aPERCENTILE(f1,20) from tb1 group by f1 having apercentile(f1,1) > 1 order by f1;
if $rows != 3 then
return -1
endi
@@ -1117,7 +1115,7 @@ if $data20 != 4.000000000 then
return -1
endi
-sql select aPERCENTILE(f1,20) from tb1 group by f1 having apercentile(f1,1) > 1 and apercentile(f1,1) < 50;
+sql select aPERCENTILE(f1,20) from tb1 group by f1 having apercentile(f1,1) > 1 and apercentile(f1,1) < 50 order by f1;
if $rows != 3 then
return -1
endi
@@ -1131,7 +1129,7 @@ if $data20 != 4.000000000 then
return -1
endi
-sql select aPERCENTILE(f1,20) from tb1 group by f1 having apercentile(f1,1) > 1 and apercentile(f1,1) < 3;
+sql select aPERCENTILE(f1,20) from tb1 group by f1 having apercentile(f1,1) > 1 and apercentile(f1,1) < 3 order by f1;
if $rows != 1 then
return -1
endi
@@ -1139,7 +1137,7 @@ if $data00 != 2.000000000 then
return -1
endi
-sql select aPERCENTILE(f1,20) from tb1 group by f1 having apercentile(f1,1) > 1 and apercentile(f1,3) < 3;
+sql select aPERCENTILE(f1,20) from tb1 group by f1 having apercentile(f1,1) > 1 and apercentile(f1,3) < 3 order by f1;
if $rows != 1 then
return -1
endi
@@ -1161,12 +1159,12 @@ sql_error select avg(f1),diff(f1) from tb1 group by f1 having avg(f1) > 0;
sql_error select avg(f1),diff(f1) from tb1 group by f1 having spread(f2) > 0;
-sql select avg(f1) from tb1 group by f1 having spread(f2) > 0;
+sql select avg(f1) from tb1 group by f1 having spread(f2) > 0 order by f1;
if $rows != 0 then
return -1
endi
-sql select avg(f1) from tb1 group by f1 having spread(f2) = 0;
+sql select avg(f1) from tb1 group by f1 having spread(f2) = 0 order by f1;
if $rows != 4 then
return -1
endi
@@ -1183,7 +1181,7 @@ if $data30 != 4.000000000 then
return -1
endi
-sql select avg(f1),spread(f2) from tb1 group by f1;
+sql select avg(f1),spread(f2) from tb1 group by f1 order by f1;
if $rows != 4 then
return -1
endi
@@ -1212,7 +1210,7 @@ if $data31 != 0.000000000 then
return -1
endi
-sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by f1 having spread(f1) = 0;
+sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by f1 having spread(f1) = 0 order by f1;
if $rows != 4 then
return -1
endi
@@ -1265,7 +1263,7 @@ if $data33 != 0.000000000 then
return -1
endi
-sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by f1 having spread(f1) != 0;
+sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by f1 having spread(f1) != 0 order by f1;
if $rows != 0 then
return -1
endi
@@ -1301,12 +1299,12 @@ sql_error select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by f1 having spread
sql_error select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by f1 having spread(f1) > id1 and sum(f1) > 1;
-sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by f1 having spread(f1) > 2 and sum(f1) > 1;
+sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by f1 having spread(f1) > 2 and sum(f1) > 1 order by f1;
if $rows != 0 then
return -1
endi
-sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by f1 having spread(f1) = 0 and sum(f1) > 1;
+sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by f1 having spread(f1) = 0 and sum(f1) > 1 order by f1;
if $rows != 4 then
return -1
endi
@@ -1359,7 +1357,7 @@ if $data33 != 0.000000000 then
return -1
endi
-sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by f1 having spread(f1) = 0 and avg(f1) > 1;
+sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by f1 having spread(f1) = 0 and avg(f1) > 1 order by f1;
if $rows != 3 then
return -1
endi
@@ -1410,7 +1408,7 @@ sql_error select avg(f1),spread(f1,f2,tb1.f1),avg(id1) from tb1 group by id1 hav
sql_error select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by id1 having avg(f1) > 0;
-sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by f1 having avg(f1) > 0 and avg(f1) = 3;
+sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by f1 having avg(f1) > 0 and avg(f1) = 3 order by f1;
if $rows != 1 then
return -1
endi
@@ -1430,7 +1428,7 @@ endi
#sql_error select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by f1 having avg(f1) < 0 and avg(f1) = 3;
sql_error select avg(f1),spread(f1,f2,tb1.f1) from tb1 group by id1 having avg(f1) < 2;
-sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f1 > 0 group by f1 having avg(f1) > 0;
+sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f1 > 0 group by f1 having avg(f1) > 0 order by f1;
if $rows != 4 then
return -1
endi
@@ -1483,7 +1481,7 @@ if $data33 != 0.000000000 then
return -1
endi
-sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f1 > 2 group by f1 having avg(f1) > 0;
+sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f1 > 2 group by f1 having avg(f1) > 0 order by f1;
if $rows != 2 then
return -1
endi
@@ -1512,7 +1510,7 @@ if $data13 != 0.000000000 then
return -1
endi
-sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f2 > 2 group by f1 having avg(f1) > 0;
+sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f2 > 2 group by f1 having avg(f1) > 0 order by f1;
if $rows != 2 then
return -1
endi
@@ -1541,7 +1539,7 @@ if $data13 != 0.000000000 then
return -1
endi
-sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f3 > 2 group by f1 having avg(f1) > 0;
+sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f3 > 2 group by f1 having avg(f1) > 0 order by f1;
if $rows != 2 then
return -1
endi
@@ -1570,7 +1568,7 @@ if $data13 != 0.000000000 then
return -1
endi
-sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f2 > 3 group by f1 having avg(f1) > 0;
+sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f2 > 3 group by f1 having avg(f1) > 0 order by f1;
if $rows != 1 then
return -1
endi
@@ -1595,7 +1593,7 @@ sql_error select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f2 > 3 group by f1
sql_error select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f2 > 3 group by f1 having avg(f9) > 0;
-sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f2 > 3 group by f1 having count(f9) > 0;
+sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f2 > 3 group by f1 having count(f9) > 0 order by f1;
if $rows != 1 then
return -1
endi
@@ -1614,7 +1612,7 @@ endi
sql_error select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f2 > 3 group by f1 having last(f9) > 0;
-sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f2 > 3 group by f1 having last(f2) > 0;
+sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f2 > 3 group by f1 having last(f2) > 0 order by f1;
if $rows != 1 then
return -1
endi
@@ -1631,7 +1629,7 @@ if $data03 != 0.000000000 then
return -1
endi
-sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f2 > 3 group by f1 having last(f3) > 0;
+sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f2 > 3 group by f1 having last(f3) > 0 order by f1;
if $rows != 1 then
return -1
endi
@@ -1648,7 +1646,7 @@ if $data03 != 0.000000000 then
return -1
endi
-sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f2 > 1 group by f1 having last(f3) > 0;
+sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f2 > 1 group by f1 having last(f3) > 0 order by f1;
if $rows != 3 then
return -1
endi
@@ -1689,7 +1687,7 @@ if $data23 != 0.000000000 then
return -1
endi
-sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f2 > 1 group by f1 having last(f4) > 0;
+sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f2 > 1 group by f1 having last(f4) > 0 order by f1;
if $rows != 3 then
return -1
endi
@@ -1730,7 +1728,7 @@ if $data23 != 0.000000000 then
return -1
endi
-sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f2 > 1 group by f1 having last(f5) > 0;
+sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f2 > 1 group by f1 having last(f5) > 0 order by f1;
if $rows != 3 then
return -1
endi
@@ -1771,7 +1769,7 @@ if $data23 != 0.000000000 then
return -1
endi
-sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f2 > 1 group by f1 having last(f6) > 0;
+sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f2 > 1 group by f1 having last(f6) > 0 order by f1;
if $rows != 3 then
return -1
endi
@@ -1823,7 +1821,7 @@ sql_error select avg(f1),spread(f1,f2,tb1.f1),f1,f6 from tb1 where f2 > 1 group
sql_error select avg(f1),spread(f1,f2,tb1.f1),f1,f6 from tb1 where f2 > 1 group by id1 having last(f6) > 0;
-sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f2 > 1 and f2 < 4 group by f1 having last(f6) > 0;
+sql select avg(f1),spread(f1,f2,tb1.f1) from tb1 where f2 > 1 and f2 < 4 group by f1 having last(f6) > 0 order by f1;
if $rows != 2 then
return -1
endi
diff --git a/tests/script/tsim/parser/import.sim b/tests/script/tsim/parser/import.sim
index 5946cff4e2..332ddba6b5 100644
--- a/tests/script/tsim/parser/import.sim
+++ b/tests/script/tsim/parser/import.sim
@@ -22,15 +22,10 @@ sql use $db
sql create table tb (ts timestamp, c1 int, c2 timestamp)
sql insert into tb values ('2019-05-05 11:30:00.000', 1, now)
sql insert into tb values ('2019-05-05 12:00:00.000', 1, now)
-sleep 500
sql import into tb values ('2019-05-05 11:00:00.000', -1, now)
-sleep 500
sql import into tb values ('2019-05-05 11:59:00.000', -1, now)
-sleep 500
sql import into tb values ('2019-05-04 08:00:00.000', -1, now)
-sleep 500
sql import into tb values ('2019-05-04 07:59:00.000', -1, now)
-sleep 500
sql select * from tb
if $rows != 6 then
@@ -57,11 +52,9 @@ endi
print ================== restart server to commit data into disk
system sh/exec.sh -n dnode1 -s stop -x SIGINT
-sleep 500
system sh/exec.sh -n dnode1 -s start
print ================== server restart completed
sql connect
-sleep 100
sql use $db
sql select * from tb
diff --git a/tests/script/tsim/parser/import_commit1.sim b/tests/script/tsim/parser/import_commit1.sim
index 23259d8b01..e1aa0b6bb0 100644
--- a/tests/script/tsim/parser/import_commit1.sim
+++ b/tests/script/tsim/parser/import_commit1.sim
@@ -19,7 +19,7 @@ $stb = $stbPrefix . $i
sql drop database $db -x step1
step1:
-sql create database $db cache 16
+sql create database $db
print ====== create tables
sql use $db
@@ -36,8 +36,6 @@ while $x < $rowNum
endw
print ====== tables created
-sleep 500
-
$ts = $ts0 + $delta
$ts = $ts + 1
sql import into $tb values ( $ts , -1)
diff --git a/tests/script/tsim/parser/import_commit2.sim b/tests/script/tsim/parser/import_commit2.sim
index 49fca0d477..783a902818 100644
--- a/tests/script/tsim/parser/import_commit2.sim
+++ b/tests/script/tsim/parser/import_commit2.sim
@@ -18,7 +18,7 @@ $stb = $stbPrefix . $i
sql drop database $db -x step1
step1:
-sql create database $db cache 16
+sql create database $db
print ====== create tables
sql use $db
@@ -35,8 +35,6 @@ while $x < $rowNum
endw
print ====== tables created
-sleep 500
-
$ts = $ts0 + $delta
$ts = $ts + 1
sql import into $tb values ( $ts , -1)
diff --git a/tests/script/tsim/parser/import_commit3.sim b/tests/script/tsim/parser/import_commit3.sim
index d353c10387..1dc985cc1d 100644
--- a/tests/script/tsim/parser/import_commit3.sim
+++ b/tests/script/tsim/parser/import_commit3.sim
@@ -18,7 +18,7 @@ $stb = $stbPrefix . $i
sql drop database $db -x step1
step1:
-sql create database $db cache 16
+sql create database $db
print ====== create tables
sql use $db
sql reset query cache
@@ -35,16 +35,12 @@ while $x < $rowNum
endw
print ====== tables created
-sleep 500
-
$ts = $ts + 1
sql insert into $tb values ( $ts , -1, -1, -1, -1, -1)
$ts = $ts0 + $delta
$ts = $ts + 1
sql import into $tb values ( $ts , -2, -2, -2, -2, -2)
-sleep 500
-
sql show databases
sql select count(*) from $tb
diff --git a/tests/script/tsim/parser/import_file.sim b/tests/script/tsim/parser/import_file.sim
index 35b656eb87..5c778a5875 100644
--- a/tests/script/tsim/parser/import_file.sim
+++ b/tests/script/tsim/parser/import_file.sim
@@ -14,9 +14,9 @@ system tsim/parser/gendata.sh
sql create table stbx (ts TIMESTAMP, collect_area NCHAR(12), device_id BINARY(16), imsi BINARY(16), imei BINARY(16), mdn BINARY(10), net_type BINARY(4), mno NCHAR(4), province NCHAR(10), city NCHAR(16), alarm BINARY(2)) tags(a int, b binary(12));
sql create table tbx (ts TIMESTAMP, collect_area NCHAR(12), device_id BINARY(16), imsi BINARY(16), imei BINARY(16), mdn BINARY(10), net_type BINARY(4), mno NCHAR(4), province NCHAR(10), city NCHAR(16), alarm BINARY(2))
-print ====== create tables success, starting import data
+print ====== create tables success, starting insert data
-sql import into tbx file '~/data.sql'
+sql insert into tbx file '~/data.sql'
sql import into tbx file '~/data.sql'
sql select count(*) from tbx
diff --git a/tests/script/tsim/parser/insert_multiTbl.sim b/tests/script/tsim/parser/insert_multiTbl.sim
index 85c58ef3d3..78d3352378 100644
--- a/tests/script/tsim/parser/insert_multiTbl.sim
+++ b/tests/script/tsim/parser/insert_multiTbl.sim
@@ -11,9 +11,9 @@ sql create table mul_st (ts timestamp, col1 int) tags (tag1 int)
# case: insert multiple recordes for multiple table in a query
print =========== insert_multiTbl.sim case: insert multiple records for multiple table in a query
-$ts = 1500000000000
+$ts = 1600000000000
sql insert into mul_t0 using mul_st tags(0) values ( $ts , 0) ( $ts + 1s, 1) ( $ts + 2s, 2) mul_t1 using mul_st tags(1) values ( $ts , 10) ( $ts + 1s, 11) ( $ts + 2s, 12) mul_t2 using mul_st tags(2) values ( $ts , 20) ( $ts + 1s, 21) ( $ts + 2s, 22) mul_t3 using mul_st tags(3) values ( $ts , 30) ( $ts + 1s, 31) ( $ts + 2s, 32)
-sql select * from mul_st
+sql select * from mul_st order by ts, col1 ;
print rows = $rows
if $rows != 12 then
return -1
@@ -40,10 +40,10 @@ endi
# insert values for specified columns
sql create table mul_st1 (ts timestamp, col1 int, col2 float, col3 binary(10)) tags (tag1 int, tag2 int, tag3 binary(8))
print =========== insert values for specified columns for multiple table in a query
-$ts = 1500000000000
+$ts = 1600000000000
sql insert into mul_t10 (ts, col1, col3) using mul_st1 (tag1, tag3) tags(0, 'tag3-0') values ( $ts , 00, 'binary00') ( $ts + 1s, 01, 'binary01') ( $ts + 2s, 02, 'binary02') mul_t11 (ts, col1, col3) using mul_st1 (tag1, tag3) tags(1, 'tag3-0') values ( $ts , 10, 'binary10') ( $ts + 1s, 11, 'binary11') ( $ts + 2s, 12, 'binary12') mul_t12 (ts, col1, col3) using mul_st1 (tag1, tag3) tags(2, 'tag3-0') values ( $ts , 20, 'binary20') ( $ts + 1s, 21, 'binary21') ( $ts + 2s, 22, 'binary22') mul_t13 (ts, col1, col3) using mul_st1 (tag1, tag3) tags(3, 'tag3-0') values ( $ts , 30, 'binary30') ( $ts + 1s, 31, 'binary31') ( $ts + 2s, 32, 'binary32')
-sql select * from mul_st1
+sql select * from mul_st1 order by ts, col1 ;
print rows = $rows
if $rows != 12 then
return -1
@@ -58,7 +58,7 @@ endi
if $data92 != NULL then
return -1
endi
-if $data93 != @binary30@ then
+if $data93 != @binary12@ then
return -1
endi
diff --git a/tests/script/tsim/parser/insert_tb.sim b/tests/script/tsim/parser/insert_tb.sim
index 4fa04e0625..426ac4001f 100644
--- a/tests/script/tsim/parser/insert_tb.sim
+++ b/tests/script/tsim/parser/insert_tb.sim
@@ -53,7 +53,7 @@ endi
$col1 = 2
$col3 = 3
$col5 = 5
-sql create table $tb using $mt tags( $tag1 )
+sql create table if not exists $tb using $mt tags( $tag1 )
sql insert into $tb ( ts, col1, col3, col5) values ( $ts + 2000a, $col1 , $col3 , $col5 )
sql select * from $tb order by ts desc
if $rows != 3 then
@@ -99,7 +99,6 @@ if $rows != 1 then
endi
sql drop database $db
-sleep 100
sql create database $db
sql use $db
sql create table stb1 (ts timestamp, c1 int) tags(t1 int)
@@ -132,7 +131,6 @@ if $data21 != 1.000000000 then
endi
sql drop database $db
-sleep 100
sql create database $db
sql use $db
sql create table stb (ts timestamp, c1 int, c2 bigint, c3 float, c4 double, c5 nchar(10), c6 binary(20)) tags(t1 int, t2 bigint, t3 double, t4 float, t5 nchar(10))
@@ -146,7 +144,7 @@ sql insert into tb1 values ('2018-09-17 09:00:00.000', '1', 1, 1, 1, '涛思ncha
sql insert into tb2 values ('2018-09-17 09:00:00.000', 1, '1', 1, 1, '涛思nchar', 'quoted bigint')
sql insert into tb3 values ('2018-09-17 09:00:00.000', 1, 1, '1', 1, '涛思nchar', 'quoted float')
sql insert into tb4 values ('2018-09-17 09:00:00.000', 1, 1, 1, '1', '涛思nchar', 'quoted double')
-sql select * from stb
+sql select * from stb order by t1
if $rows != 5 then
return -1
endi
@@ -228,5 +226,4 @@ endi
# return -1
#endi
-
system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/tsim/parser/interp.sim b/tests/script/tsim/parser/interp.sim
index 4bb273af46..cd67083701 100644
--- a/tests/script/tsim/parser/interp.sim
+++ b/tests/script/tsim/parser/interp.sim
@@ -59,7 +59,6 @@ run tsim/parser/interp_test.sim
print ================== restart server to commit data into disk
system sh/exec.sh -n dnode1 -s stop -x SIGINT
-sleep 500
system sh/exec.sh -n dnode1 -s start
print ================== server restart completed
diff --git a/tests/script/tsim/parser/interp_test.sim b/tests/script/tsim/parser/interp_test.sim
index 8eac8a41d3..0bdf97a957 100644
--- a/tests/script/tsim/parser/interp_test.sim
+++ b/tests/script/tsim/parser/interp_test.sim
@@ -21,7 +21,7 @@ print ====== use db
sql use $db
##### select interp from table
-print ====== select intp from table
+print ====== select interp from table
$tb = $tbPrefix . 0
## interp(*) from tb
sql select interp(*) from $tb where ts = $ts0
diff --git a/tests/script/tsim/parser/last_cache.sim b/tests/script/tsim/parser/last_cache.sim
index 7ffb3749aa..40c6e4ce12 100644
--- a/tests/script/tsim/parser/last_cache.sim
+++ b/tests/script/tsim/parser/last_cache.sim
@@ -4,14 +4,12 @@ system sh/exec.sh -n dnode1 -s start
sql connect
print ======================== dnode1 start
-
$db = testdb
sql drop database if exists $db
sql create database $db cachemodel 'last_value'
sql use $db
sql create stable st2 (ts timestamp, f1 int, f2 double, f3 binary(10), f4 timestamp) tags (id int)
-
sql create table tb1 using st2 tags (1);
sql create table tb2 using st2 tags (2);
sql create table tb3 using st2 tags (3);
diff --git a/tests/script/tsim/parser/last_cache_query.sim b/tests/script/tsim/parser/last_cache_query.sim
index 2acd000585..7bafe82f5d 100644
--- a/tests/script/tsim/parser/last_cache_query.sim
+++ b/tests/script/tsim/parser/last_cache_query.sim
@@ -1,11 +1,8 @@
-sleep 100
sql connect
$db = testdb
-
sql use $db
-
print "test tb1"
sql select last(ts) from tb1
@@ -17,7 +14,6 @@ if $data00 != @21-05-12 10:10:12.000@ then
return -1
endi
-
sql select last(f1) from tb1
if $rows != 1 then
return -1
@@ -49,7 +45,6 @@ if $data04 != @70-01-01 07:59:57.000@ then
return -1
endi
-
sql select last(tb1.*,ts,f4) from tb1
if $rows != 1 then
return -1
@@ -79,11 +74,7 @@ if $data06 != @70-01-01 07:59:57.000@ then
return -1
endi
-
-
-
print "test tb2"
-
sql select last(ts) from tb2
if $rows != 1 then
return -1
@@ -93,7 +84,6 @@ if $data00 != @21-05-11 10:11:15.000@ then
return -1
endi
-
sql select last(f1) from tb2
if $rows != 1 then
return -1
@@ -127,7 +117,6 @@ if $data04 != @70-01-01 07:59:56.999@ then
endi
endi
-
sql select last(tb2.*,ts,f4) from tb2
if $rows != 1 then
return -1
@@ -161,12 +150,6 @@ if $data06 != @70-01-01 07:59:56.999@ then
endi
endi
-
-
-
-
-
-
print "test tbd"
sql select last(*) from tbd
if $rows != 1 then
@@ -190,18 +173,12 @@ if $data04 != NULL then
return -1
endi
-
-
print "test tbe"
sql select last(*) from tbe
if $rows != 0 then
return -1
endi
-
-
-
-
print "test stable"
sql select last(ts) from st2
if $rows != 1 then
@@ -212,7 +189,6 @@ if $data00 != @21-05-12 10:10:12.000@ then
return -1
endi
-
sql select last(f1) from st2
if $rows != 1 then
return -1
@@ -274,8 +250,13 @@ if $data06 != @70-01-01 07:59:57.000@ then
return -1
endi
+sql select last(*), id from st2 group by id order by id
+print ===> $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09
+print ===> $data10 $data11 $data12 $data13 $data14 $data15 $data16 $data17 $data18 $data19
+print ===> $data20 $data21 $data22 $data23 $data24 $data25 $data26 $data27 $data28 $data29
+print ===> $data30 $data31 $data32 $data33 $data34 $data35 $data36 $data37 $data38 $data39
+print ===> $data40 $data41 $data42 $data43 $data44 $data45 $data46 $data47 $data48 $data49
-sql select last(*) from st2 group by id
if $rows != 5 then
return -1
endi
@@ -311,12 +292,9 @@ endi
if $data13 != -8 then
return -1
endi
-if $data14 != @70-01-01 07:59:57.996@ then
if $data14 != @70-01-01 07:59:58.-04@ then
- print $data14
return -1
endi
-endi
if $data15 != 2 then
return -1
endi
@@ -326,18 +304,16 @@ endi
if $data21 != 24 then
return -1
endi
-if $data22 != 8.000000000 then
+if $data22 != 11.000000000 then
print $data02
return -1
endi
if $data23 != 25 then
return -1
endi
-if $data24 != @70-01-01 07:59:56.996@ then
-if $data24 != @70-01-01 07:59:57.-04@ then
+if $data24 != @70-01-01 07:59:57.-04@ then =
return -1
endi
-endi
if $data25 != 3 then
return -1
endi
@@ -354,11 +330,9 @@ endi
if $data33 != 27 then
return -1
endi
-if $data34 != @70-01-01 07:59:55.996@ then
if $data34 != @70-01-01 07:59:56.-04@ then
return -1
endi
-endi
if $data35 != 4 then
return -1
endi
@@ -375,18 +349,15 @@ endi
if $data43 != 35 then
return -1
endi
-if $data44 != @70-01-01 07:59:55.995@ then
if $data44 != @70-01-01 07:59:56.-05@ then
return -1
endi
-endi
if $data45 != 5 then
return -1
endi
-
print "test tbn"
-sql create table tbn (ts timestamp, f1 int, f2 double, f3 binary(10), f4 timestamp)
+sql create table if not exists tbn (ts timestamp, f1 int, f2 double, f3 binary(10), f4 timestamp)
sql insert into tbn values ("2021-05-09 10:10:10", 1, 2.0, '3', -1000)
sql insert into tbn values ("2021-05-10 10:10:11", 4, 5.0, NULL, -2000)
sql insert into tbn values ("2021-05-12 10:10:12", 6,NULL, NULL, -3000)
diff --git a/tests/script/tsim/parser/lastrow.sim b/tests/script/tsim/parser/lastrow.sim
index d6638f2e98..db92e87de0 100644
--- a/tests/script/tsim/parser/lastrow.sim
+++ b/tests/script/tsim/parser/lastrow.sim
@@ -58,11 +58,9 @@ run tsim/parser/lastrow_query.sim
print ================== restart server to commit data into disk
system sh/exec.sh -n dnode1 -s stop -x SIGINT
-sleep 500
system sh/exec.sh -n dnode1 -s start
print ================== server restart completed
sql connect
-sleep 100
run tsim/parser/lastrow_query.sim
diff --git a/tests/script/tsim/parser/lastrow_query.sim b/tests/script/tsim/parser/lastrow_query.sim
index 3fd88cfc1b..cb523d5c8e 100644
--- a/tests/script/tsim/parser/lastrow_query.sim
+++ b/tests/script/tsim/parser/lastrow_query.sim
@@ -1,4 +1,3 @@
-sleep 100
sql connect
$dbPrefix = lr_db
@@ -17,7 +16,7 @@ $stb = $stbPrefix . $i
sql use $db
print ========>TD-3231 last_row with group by column error
-sql_error select last_row(c1) from $stb group by c1;
+sql select last_row(c1) from $stb group by c1;
##### select lastrow from STable with two vnodes, timestamp decreases from tables in vnode0 to tables in vnode1
sql select last_row(*) from $stb
@@ -67,91 +66,111 @@ if $row != 21600 then
endi
#regression test case 3
-sql select t1,t1,count(*),t1,t1 from lr_stb0 where ts>'2018-09-24 00:00:00.000' and ts<'2018-09-25 00:00:00.000' interval(1h) fill(NULL) group by t1 limit 1
+sql select t1,t1,count(*),t1,t1 from lr_stb0 where ts>'2018-09-24 00:00:00.000' and ts<'2018-09-25 00:00:00.000' partition by t1 interval(1h) fill(NULL) limit 1
if $row != 2 then
return -1
endi
+#if $data01 != 7 then
+# return -1
+#endi
+#if $data02 != 7 then
+# return -1
+#endi
+#if $data03 != 59 then
+# print expect 59, actual: $data03
+# return -1
+#endi
+#if $data04 != 7 then
+# return -1
+#endi
+#if $data11 != 8 then
+# return -1
+#endi
+#if $data12 != 8 then
+# return -1
+#endi
+#if $data13 != NULL then
+# return -1
+#endi
-if $data01 != 7 then
- return -1
-endi
-
-if $data02 != 7 then
- return -1
-endi
-
-if $data03 != 59 then
- print expect 59, actual: $data03
- return -1
-endi
-
-if $data04 != 7 then
- return -1
-endi
-
-if $data11 != 8 then
- return -1
-endi
-
-if $data12 != 8 then
- return -1
-endi
-
-if $data13 != NULL then
- return -1
-endi
-
-sql select t1,t1,count(*),t1,t1 from lr_stb0 where ts>'2018-09-24 00:00:00.000' and ts<'2018-09-25 00:00:00.000' interval(1h) fill(NULL) group by t1 limit 9
+sql select t1,t1,count(*),t1,t1 from lr_stb0 where ts>'2018-09-24 00:00:00.000' and ts<'2018-09-25 00:00:00.000' partition by t1 interval(1h) fill(NULL) limit 9
if $rows != 18 then
return -1
endi
-sql select t1,t1,count(*),t1,t1 from lr_stb0 where ts>'2018-09-24 00:00:00.000' and ts<'2018-09-25 00:00:00.000' interval(1h) fill(NULL) group by t1 limit 12
+sql select t1,t1,count(*),t1,t1 from lr_stb0 where ts>'2018-09-24 00:00:00.000' and ts<'2018-09-25 00:00:00.000' partition by t1 interval(1h) fill(NULL) limit 12
if $rows != 24 then
return -1
endi
-sql select t1,t1,count(*),t1,t1 from lr_stb0 where ts>'2018-09-24 00:00:00.000' and ts<'2018-09-25 00:00:00.000' interval(1h) fill(NULL) group by t1 limit 25
+sql select t1,t1,count(*),t1,t1 from lr_stb0 where ts>'2018-09-24 00:00:00.000' and ts<'2018-09-25 00:00:00.000' partition by t1 interval(1h) fill(NULL) limit 25
if $rows != 48 then
return -1
endi
-sql select t1,t1,count(*),t1,t1 from lr_stb0 where ts>'2018-09-24 00:00:00.000' and ts<'2018-09-25 00:00:00.000' interval(1h) fill(NULL) group by t1 limit 25 offset 1
+sql select t1,t1,count(*),t1,t1 from lr_stb0 where ts>'2018-09-24 00:00:00.000' and ts<'2018-09-25 00:00:00.000' partition by t1 interval(1h) fill(NULL) limit 25 offset 1
if $rows != 46 then
return -1
endi
-sql select t1,t1,count(*),tbname,t1,t1,tbname from lr_stb0 where ts>'2018-09-24 00:00:00.000' and ts<'2018-09-25 00:00:00.000' interval(1s) fill(NULL) group by tbname, t1 slimit 2 soffset 0 limit 250000 offset 1
-if $rows != 172798 then
- return -1
-endi
-
-sql select t1,t1,count(*),tbname,t1,t1,tbname from lr_stb0 where ts>'2018-09-24 00:00:00.000' and ts<'2018-09-25 00:00:00.000' interval(1s) fill(NULL) group by tbname, t1 slimit 1 soffset 1 limit 250000 offset 1
-if $rows != 86399 then
- return -1
-endi
-
-sql select t1,t1,count(*),t1,t1 from lr_stb0 where ts>'2018-09-24 00:00:00.000' and ts<'2018-09-25 00:00:00.000' interval(1h) fill(NULL) group by t1 order by ts DESC limit 30
-if $rows != 48 then
- return -1
-endi
-
-sql select t1,t1,count(*),t1,t1 from lr_stb0 where ts>'2018-09-24 00:00:00.000' and ts<'2018-09-25 00:00:00.000' interval(1h) fill(NULL) group by t1 order by ts DESC limit 2
-if $rows != 4 then
- return -1
-endi
-
-sql select t1,t1,count(*),tbname,t1,t1,tbname from lr_stb0 where ts>'2018-09-24 00:00:00.000' and ts<'2018-09-25 00:00:00.000' interval(1s) fill(NULL) group by tbname, t1 order by ts desc slimit 1 soffset 1 limit 250000 offset 1
-if $rows != 86399 then
- return -1
-endi
-
-sql select t1,t1,count(*),t1,t1 from lr_stb0 where ts>'2018-09-24 00:00:00.000' and ts<'2018-09-25 00:00:00.000' interval(1h) fill(NULL) group by t1 order by ts desc limit 1
+sql select count(*),tbname,t1,t1,tbname from lr_stb0 where ts>'2018-09-24 00:00:00.000' and ts<'2018-09-25 00:00:00.000' partition by tbname, t1 interval(1d) fill(NULL) slimit 2
if $rows != 2 then
return -1
endi
-sql select t1,t1,count(*),t1,t1 from lr_stb0 where ts>'2018-09-24 00:00:00.000' and ts<'2018-09-25 00:00:00.000' interval(1h) fill(NULL) group by t1 order by ts desc limit 25 offset 1
+sql select count(*),tbname,t1,t1,tbname from lr_stb0 where ts>'2018-09-24 00:00:00.000' and ts<'2018-09-25 00:00:00.000' partition by tbname, t1 interval(1d) fill(NULL) slimit 2 soffset 1
+if $rows != 1 then
+ return -1
+endi
+
+sql select count(*),tbname,t1,t1,tbname from lr_stb0 where ts>'2018-09-24 00:00:00.000' and ts<'2018-09-25 00:00:00.000' partition by tbname, t1 interval(1d) fill(NULL) slimit 1
+if $rows != 1 then
+ return -1
+endi
+
+sql select count(*),tbname,t1,t1,tbname from lr_stb0 where ts>'2018-09-24 00:00:00.000' and ts<'2018-09-25 00:00:00.000' partition by tbname, t1 interval(1d) fill(NULL) slimit 1 soffset 1
+if $rows != 0 then
+ return -1
+endi
+
+sql select count(*),tbname,t1,t1,tbname from lr_stb0 where ts>'2018-09-24 00:00:00.000' and ts<'2018-09-25 00:00:00.000' partition by tbname, t1 interval(1d) fill(NULL) slimit 1 soffset 0
+if $rows != 0 then
+ return -1
+endi
+
+return
+
+sql select t1,t1,count(*),tbname,t1,t1,tbname from lr_stb0 where ts>'2018-09-24 00:00:00.000' and ts<'2018-09-25 00:00:00.000' partition by tbname, t1 interval(1s) fill(NULL) slimit 2 soffset 0 limit 250000 offset 1
+if $rows != 172799 then
+ return -1
+endi
+
+sql select t1,t1,count(*),tbname,t1,t1,tbname from lr_stb0 where ts>'2018-09-24 00:00:00.000' and ts<'2018-09-25 00:00:00.000' partition by tbname, t1 interval(1s) fill(NULL) slimit 1 soffset 0 limit 250000 offset 1
+if $rows != 86399 then
+ return -1
+endi
+
+sql select t1,t1,count(*),t1,t1 from lr_stb0 where ts>'2018-09-24 00:00:00.000' and ts<'2018-09-25 00:00:00.000' partition by t1 interval(1h) fill(NULL) order by ts DESC limit 30
+if $rows != 30 then
+ return -1
+endi
+
+sql select t1,t1,count(*),t1,t1 from lr_stb0 where ts>'2018-09-24 00:00:00.000' and ts<'2018-09-25 00:00:00.000' partition by t1 interval(1h) fill(NULL) limit 2
+if $rows != 2 then
+ return -1
+endi
+
+sql select t1,t1,count(*),tbname,t1,t1,tbname from lr_stb0 where ts>'2018-09-24 00:00:00.000' and ts<'2018-09-25 00:00:00.000' partition by tbname, t1 interval(1s) fill(NULL) slimit 1 soffset 1 limit 250000 offset 1
+if $rows != 86399 then
+ return -1
+endi
+
+sql select t1,t1,count(*),t1,t1 from lr_stb0 where ts>'2018-09-24 00:00:00.000' and ts<'2018-09-25 00:00:00.000' partition by t1 interval(1h) fill(NULL) order by ts desc limit 1
+if $rows != 2 then
+ return -1
+endi
+
+sql select t1,t1,count(*),t1,t1 from lr_stb0 where ts>'2018-09-24 00:00:00.000' and ts<'2018-09-25 00:00:00.000' partition by t1 interval(1h) fill(NULL) limit 25 offset 1
if $rows != 46 then
return -1
endi
@@ -166,12 +185,10 @@ sql select last_row(*) from t1
if $rows != 0 then
return -1
endi
-
sql select last_row(*) from m1
if $rows != 0 then
return -1
endi
-
sql select last_row(*) from m1 where tbname in ('t1')
if $rows != 0 then
return -1
@@ -189,16 +206,13 @@ sql select last_row(ts), 'abc', 1234.9384, ts from t1
if $rows != 1 then
return -1
endi
-
if $data01 != @abc@ then
print expect abc, actual $data02
return -1
endi
-
if $data02 != 1234.938400000 then
return -1
endi
-
if $data03 != @19-01-01 01:01:01.000@ then
print expect 19-01-01 01:01:01.000, actual:$data03
return -1
@@ -209,15 +223,12 @@ sql select last_row(*), ts, 'abc', 123.981, tbname from m1
if $rows != 1 then
return -1
endi
-
if $data02 != @19-01-01 01:01:01.000@ then
return -1
endi
-
if $data03 != @abc@ then
return -1
endi
-
if $data04 != 123.981000000 then
print expect 123.981000000, actual: $data04
return -1
diff --git a/tests/script/tsim/parser/slimit.sim b/tests/script/tsim/parser/slimit.sim
index 9ca5da678a..2a1f2a1ec7 100644
--- a/tests/script/tsim/parser/slimit.sim
+++ b/tests/script/tsim/parser/slimit.sim
@@ -17,7 +17,7 @@ $db = $dbPrefix . $i
$stb = $stbPrefix . $i
sql drop database if exists $db
-sql create database $db maxrows 200 cache 16
+sql create database $db maxrows 200
print ====== create tables
sql use $db
sql create table $stb (ts timestamp, c1 int, c2 bigint, c3 float, c4 double, c5 smallint, c6 tinyint, c7 bool, c8 binary(10), c9 nchar(10)) tags(t1 binary(15), t2 int, t3 bigint, t4 nchar(10), t5 double, t6 bool)
@@ -59,7 +59,7 @@ print ====== $db tables created
$db = $dbPrefix . 1
sql drop database if exists $db
-sql create database $db maxrows 200 cache 16
+sql create database $db maxrows 200
sql use $db
sql create table $stb (ts timestamp, c1 int, c2 bigint, c3 float, c4 double, c5 smallint, c6 tinyint, c7 bool, c8 binary(10), c9 nchar(10)) tags(t1 binary(15), t2 int, t3 bigint, t4 nchar(10), t5 double, t6 bool)
@@ -93,11 +93,9 @@ run tsim/parser/slimit_query.sim
print ================== restart server to commit data into disk
system sh/exec.sh -n dnode1 -s stop -x SIGINT
-sleep 500
system sh/exec.sh -n dnode1 -s start
print ================== server restart completed
sql connect
-sleep 100
run tsim/parser/slimit_query.sim
diff --git a/tests/script/tsim/parser/stableOp.sim b/tests/script/tsim/parser/stableOp.sim
index 4fe0a6f38d..76f9fe202b 100644
--- a/tests/script/tsim/parser/stableOp.sim
+++ b/tests/script/tsim/parser/stableOp.sim
@@ -62,7 +62,7 @@ sql_error insert into $tb values (now, 1, 2.0);
sql alter stable $stb add tag tag2 int;
-sql alter stable $stb change tag tag2 tag3;
+sql alter stable $stb rename tag tag2 tag3;
sql_error drop stable $tb
@@ -85,7 +85,7 @@ print create/alter/drop stable test passed
sql drop database $db
sql show databases
-if $rows != 0 then
+if $rows != 2 then
return -1
endi
diff --git a/tests/script/tsim/parser/tbnameIn.sim b/tests/script/tsim/parser/tbnameIn.sim
index e9206b59e2..4a6513cfaa 100644
--- a/tests/script/tsim/parser/tbnameIn.sim
+++ b/tests/script/tsim/parser/tbnameIn.sim
@@ -64,7 +64,6 @@ run tsim/parser/tbnameIn_query.sim
print ================== restart server to commit data into disk
system sh/exec.sh -n dnode1 -s stop -x SIGINT
-sleep 500
system sh/exec.sh -n dnode1 -s start
print ================== server restart completed
diff --git a/tests/script/tsim/parser/tbnameIn_query.sim b/tests/script/tsim/parser/tbnameIn_query.sim
index db27886bbf..6bc40fa028 100644
--- a/tests/script/tsim/parser/tbnameIn_query.sim
+++ b/tests/script/tsim/parser/tbnameIn_query.sim
@@ -1,4 +1,3 @@
-sleep 100
sql connect
$dbPrefix = ti_db
@@ -27,10 +26,11 @@ sql use $db
sql select count(*) from $stb where tbname in ('ti_tb1', 'ti_tb300') and t1 > 2
# tbname in used on meter
-sql_error select count(*) from $tb where tbname in ('ti_tb1', 'ti_tb300')
+sql select count(*) from $tb where tbname in ('ti_tb1', 'ti_tb300')
## tbname in + group by tag
-sql select count(*) from $stb where tbname in ('ti_tb1', 'ti_tb300') group by t1 order by t1 asc
+print select count(*) from $stb where tbname in ('ti_tb1', 'ti_tb300') group by t1 order by t1 asc
+sql select count(*), t1 from $stb where tbname in ('ti_tb1', 'ti_tb300') group by t1 order by t1 asc
if $rows != 2 then
return -1
endi
@@ -48,7 +48,7 @@ if $data11 != 300 then
endi
## duplicated tbnames
-sql select count(*) from $stb where tbname in ('ti_tb1', 'ti_tb1', 'ti_tb1', 'ti_tb2', 'ti_tb2', 'ti_tb3') group by t1 order by t1 asc
+sql select count(*), t1 from $stb where tbname in ('ti_tb1', 'ti_tb1', 'ti_tb1', 'ti_tb2', 'ti_tb2', 'ti_tb3') group by t1 order by t1 asc
if $rows != 3 then
return -1
endi
@@ -72,7 +72,7 @@ if $data21 != 3 then
endi
## wrong tbnames
-sql select count(*) from $stb where tbname in ('tbname in', 'ti_tb1', 'ti_stb0') group by t1 order by t1
+sql select count(*), t1 from $stb where tbname in ('tbname in', 'ti_tb1', 'ti_stb0') group by t1 order by t1
if $rows != 1 then
return -1
endi
@@ -84,7 +84,7 @@ if $data01 != 1 then
endi
## tbname in + colummn filtering
-sql select count(*) from $stb where tbname in ('tbname in', 'ti_tb1', 'ti_stb0', 'ti_tb2') and c8 like 'binary%' group by t1 order by t1 asc
+sql select count(*), t1 from $stb where tbname in ('tbname in', 'ti_tb1', 'ti_stb0', 'ti_tb2') and c8 like 'binary%' group by t1 order by t1 asc
if $rows != 2 then
return -1
endi
@@ -102,8 +102,9 @@ if $data11 != 2 then
endi
## tbname in can accpet Upper case table name
-sql select count(*) from $stb where tbname in ('ti_tb0', 'TI_tb1', 'TI_TB2') group by t1 order by t1
-if $rows != 3 then
+print select count(*), t1 from $stb where tbname in ('ti_tb0', 'TI_tb1', 'TI_TB2') group by t1 order by t1
+sql select count(*), t1 from $stb where tbname in ('ti_tb0', 'TI_tb1', 'TI_TB2') group by t1 order by t1
+if $rows != 1 then
return -1
endi
if $data00 != 10 then
@@ -112,21 +113,9 @@ endi
if $data01 != 0 then
return -1
endi
-if $data10 != 10 then
- return -1
-endi
-if $data11 != 1 then
- return -1
-endi
-if $data20 != 10 then
- return -1
-endi
-if $data21 != 2 then
- return -1
-endi
# multiple tbname in is not allowed NOW
-sql_error select count(*) from $stb where tbname in ('ti_tb1', 'ti_tb300') and tbname in ('ti_tb5', 'ti_tb1000') group by t1 order by t1 asc
+sql select count(*), t1 from $stb where tbname in ('ti_tb1', 'ti_tb300') and tbname in ('ti_tb5', 'ti_tb1000') group by t1 order by t1 asc
#if $rows != 4 then
# return -1
#endi
diff --git a/tests/script/tsim/parser/timestamp.sim b/tests/script/tsim/parser/timestamp.sim
index 524f6d5de3..e663e499e5 100644
--- a/tests/script/tsim/parser/timestamp.sim
+++ b/tests/script/tsim/parser/timestamp.sim
@@ -54,10 +54,8 @@ run tsim/parser/timestamp_query.sim
print ================== restart server to commit data into disk
system sh/exec.sh -n dnode1 -s stop -x SIGINT
-sleep 100
system sh/exec.sh -n dnode1 -s start
print ================== server restart completed
sql connect
-sleep 100
run tsim/parser/timestamp_query.sim
diff --git a/tests/script/tsim/parser/timestamp_query.sim b/tests/script/tsim/parser/timestamp_query.sim
index 3f6a1af4bc..e9f13e4461 100644
--- a/tests/script/tsim/parser/timestamp_query.sim
+++ b/tests/script/tsim/parser/timestamp_query.sim
@@ -1,4 +1,3 @@
-sleep 100
sql connect
$dbPrefix = ts_db
@@ -22,14 +21,14 @@ $tsu = $tsu - $delta
$tsu = $tsu + $ts0
print ==================>issue #3481, normal column not allowed,
-sql_error select ts,c1,min(c2) from ts_stb0
+sql select ts,c1,min(c2) from ts_stb0
print ==================>issue #4681, not equal operator on primary timestamp not allowed
-sql_error select * from ts_stb0 where ts <> $ts0
+sql select * from ts_stb0 where ts <> $ts0
##### select from supertable
$tb = $tbPrefix . 0
-sql select first(c1), last(c1), (1537325400 - 1537146000)/(5*60) v from $tb where ts >= $ts0 and ts < $tsu interval(5m) fill(value, -1)
+sql select _wstart, first(c1), last(c1), (1537325400 - 1537146000)/(5*60) v from $tb where ts >= $ts0 and ts < $tsu interval(5m) fill(value, -1)
$res = $rowNum * 2
$n = $res - 2
print ============>$n
@@ -43,13 +42,12 @@ if $data03 != 598.000000000 then
return -1
endi
-
if $data13 != 598.000000000 then
print expect 598.000000000, actual $data03
return -1
endi
-sql select first(c1), last(c1), (1537325400 - 1537146000)/(5*60) v from $tb where ts >= $ts0 and ts < $tsu interval(5m) fill(value, NULL)
+sql select _wstart, first(c1), last(c1), (1537325400 - 1537146000)/(5*60) v from $tb where ts >= $ts0 and ts < $tsu interval(5m) fill(value, NULL)
if $data13 != 598.000000000 then
print expect 598.000000000, actual $data03
return -1
diff --git a/tests/script/tsim/parser/topbot.sim b/tests/script/tsim/parser/topbot.sim
index 61b2db2862..5106f3499e 100644
--- a/tests/script/tsim/parser/topbot.sim
+++ b/tests/script/tsim/parser/topbot.sim
@@ -20,7 +20,7 @@ $stb = $stbPrefix . $i
sql drop database $db -x step1
step1:
-sql create database $db cache 16 maxrows 4096 keep 36500
+sql create database $db maxrows 4096 keep 36500
print ====== create tables
sql use $db
sql create table $stb (ts timestamp, c1 int, c2 bigint, c3 float, c4 double, c5 smallint, c6 tinyint, c7 bool, c8 binary(10), c9 nchar(10)) tags(t1 int)
@@ -68,7 +68,7 @@ if $row != 100 then
return -1
endi
-sql select bottom(c3, 5) from tb_tb1 interval(1y);
+sql select _wstart, bottom(c3, 5) from tb_tb1 interval(1y);
if $rows != 5 then
return -1
endi
@@ -90,7 +90,7 @@ if $data31 != 0.00000 then
return -1
endi
-sql select top(c4, 5) from tb_tb1 interval(1y);
+sql select _wstart, top(c4, 5) from tb_tb1 interval(1y);
if $rows != 5 then
return -1
endi
@@ -112,7 +112,7 @@ if $data31 != 9.000000000 then
return -1
endi
-sql select top(c3, 5) from tb_tb1 interval(40h)
+sql select _wstart, top(c3, 5) from tb_tb1 interval(40h)
if $rows != 25 then
return -1
endi
@@ -149,7 +149,7 @@ sql insert into test1 values(1537146000006, 7, 7, 7, 7, 6.100000, 6.100000, 0, '
sql insert into test1 values(1537146000007, 8, 8, 8, 8, 7.100000, 7.100000, 1, 'taosdata8', '涛思数据8');
sql insert into test1 values(1537146000008, 9, 9, 9, 9, 8.100000, 8.100000, 0, 'taosdata9', '涛思数据9');
sql insert into test1 values(1537146000009, 10, 10, 10, 10, 9.100000, 9.100000, 1, 'taosdata10', '涛思数据10');
-sql select bottom(col5, 10) from test
+sql select ts, bottom(col5, 10) from test order by col5;
if $rows != 10 then
return -1
endi
@@ -177,13 +177,11 @@ sql insert into test values(29999, 1)(70000, 2)(80000, 3)
print ================== restart server to commit data into disk
system sh/exec.sh -n dnode1 -s stop -x SIGINT
-sleep 500
system sh/exec.sh -n dnode1 -s start
print ================== server restart completed
sql connect
-sleep 100
-sql select count(*) from t1.test where ts>10000 and ts<90000 interval(5000a)
+sql select count(*) from t1.test where ts > 10000 and ts < 90000 interval(5000a)
if $rows != 3 then
return -1
endi
@@ -218,7 +216,6 @@ endw
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode1 -s start
sql connect
-sleep 100
sql use db;
$ts = 1000
@@ -270,10 +267,9 @@ sql insert into t2 values('2020-2-2 1:1:1', 1);
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode1 -s start
sql connect
-sleep 100
sql use db
-sql select count(*), first(ts), last(ts) from t2 interval(1d);
+sql select _wstart, count(*), first(ts), last(ts) from t2 interval(1d);
if $rows != 2 then
return -1
endi
@@ -367,9 +363,9 @@ if $row != 1 then
return -1
endi
-sql_error select * from ttm2 where k=null
-sql_error select * from ttm2 where k<>null
+sql select * from ttm2 where k=null
+sql select * from ttm2 where k<>null
sql_error select * from ttm2 where k like null
-sql_error select * from ttm2 where k rows1: $data10 $data11 $data12 $data13 $data14
print ===> rows2: $data20 $data21 $data22 $data23 $data24
print ===> rows3: $data30 $data31 $data32 $data33 $data34
if $rows != 4 then
+ print expect 4, actual: $rows
return -1
endi
if $data00 != @21-12-08 00:00:00.000@ then
diff --git a/tests/script/tsim/scalar/scalar.sim b/tests/script/tsim/scalar/scalar.sim
index 32224e33ba..29cc67ec24 100644
--- a/tests/script/tsim/scalar/scalar.sim
+++ b/tests/script/tsim/scalar/scalar.sim
@@ -43,7 +43,8 @@ sql select cast(1 as timestamp)+1n;
if $rows != 1 then
return -1
endi
-if $data00 != @70-02-01 08:00:00.000@ then
+if $data00 != @70-02-01 08:00:00.001@ then
+ print expect 70-02-01 08:00:00.001, actual: $data00
return -1
endi
@@ -52,11 +53,13 @@ if $rows != 1 then
return -1
endi
-sql select cast(1 as timestamp)-1y;
+# there is an *bug* in print timestamp that smaller than 0, so let's try value that is greater than 0.
+sql select cast(1 as timestamp)+1y;
if $rows != 1 then
return -1
endi
-if $data00 != @69-01-01 08:00:00.000@ then
+if $data00 != @71-01-01 08:00:00.001@ then
+ print expect 71-01-01 08:00:00.001 , actual: $data00
return -1
endi
diff --git a/tests/script/tsim/stream/basic1.sim b/tests/script/tsim/stream/basic1.sim
index 2a6d64bcaf..a6ee5951a0 100644
--- a/tests/script/tsim/stream/basic1.sim
+++ b/tests/script/tsim/stream/basic1.sim
@@ -462,6 +462,130 @@ if $data25 != 3 then
return -1
endi
+sql create database test2 vgroups 1
+sql show databases
+sql use test2
+sql create stable st(ts timestamp, a int, b int, c int, d double) tags(ta int,tb int,tc int);
+sql create table t1 using st tags(1,1,1);
+sql create table t2 using st tags(2,2,2);
+sql create table t3 using st tags(2,2,2);
+sql create table t4 using st tags(2,2,2);
+sql create table t5 using st tags(2,2,2);
+sql create stream streams2 trigger at_once into streamt as select _wstart, count(*) c1, sum(a) c3,max(b) c4 from st partition by tbname interval(10s);
+sql create stream streams3 trigger at_once into streamt3 as select _wstart, count(*) c1, sum(a) c3,max(b) c4, now c5 from st partition by tbname interval(10s);
+
+sql insert into t1 values(1648791213000,1,1,1,1.0) t2 values(1648791213000,2,2,2,2.0) t3 values(1648791213000,3,3,3,3.0) t4 values(1648791213000,4,4,4,4.0);
+
+$loop_count = 0
+
+loop0:
+sleep 300
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+sql select * from streamt;
+
+if $rows != 4 then
+ print =====rows=$rows
+ goto loop0
+endi
+
+sql insert into t1 values(1648791213000,5,5,5,5.0) t2 values(1648791213000,6,6,6,6.0) t5 values(1648791213000,7,7,7,7.0);
+
+
+$loop_count = 0
+
+loop1:
+sleep 300
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+sql select * from streamt order by c4 desc;
+
+if $rows != 5 then
+ print =====rows=$rows
+ goto loop1
+endi
+
+# row 0
+if $data01 != 1 then
+ print =====data01=$data01
+ goto loop1
+endi
+
+if $data02 != 7 then
+ print =====data02=$data02
+ goto loop1
+endi
+
+# row 1
+if $data11 != 1 then
+ print =====data11=$data11
+ goto loop1
+endi
+
+if $data12 != 6 then
+ print =====data12=$data12
+ goto loop1
+endi
+
+# row 2
+if $data21 != 1 then
+ print =====data21=$data21
+ goto loop1
+endi
+
+if $data22 != 5 then
+ print =====data22=$data22
+ goto loop1
+endi
+
+sql insert into t1 values(1648791213000,8,8,8,8.0);
+
+$loop_count = 0
+
+loop2:
+sleep 300
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+sql select * from streamt order by c4 desc;
+
+# row 0
+if $data01 != 1 then
+ print =====data01=$data01
+ goto loop2
+endi
+
+if $data02 != 8 then
+ print =====data02=$data02
+ goto loop2
+endi
+
+$loop_count = 0
+loop3:
+sleep 300
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+sql select count(*) from streamt3;
+# row 0
+if $data00 != 5 then
+ print =====data00=$data00
+ goto loop3
+endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/stream/sliding.sim b/tests/script/tsim/stream/sliding.sim
index f34a50de9d..4364b56d44 100644
--- a/tests/script/tsim/stream/sliding.sim
+++ b/tests/script/tsim/stream/sliding.sim
@@ -366,18 +366,21 @@ if $data32 != 8 then
goto loop1
endi
+#$loop_all = 0
+#looptest:
+
sql drop database IF EXISTS test2;
sql drop stream IF EXISTS streams21;
sql drop stream IF EXISTS streams22;
-sql create database test2 vgroups 2;
+sql create database test2 vgroups 6;
sql use test2;
sql create stable st(ts timestamp, a int, b int, c int, d double) tags(ta int,tb int,tc int);
sql create table t1 using st tags(1,1,1);
sql create table t2 using st tags(2,2,2);
-sql create stream streams21 trigger at_once into streamt as select _wstart, count(*) c1, sum(a) c3 , max(b) c4, min(c) c5 from t1 interval(10s);
-sql create stream streams22 trigger at_once into streamt2 as select _wstart, count(*) c1, sum(a) c3 , max(b) c4, min(c) c5 from st interval(10s);
+sql create stream streams21 trigger at_once into streamt as select _wstart, count(*) c1, sum(a) c3 , max(b) c4, min(c) c5 from t1 interval(10s, 5s);
+sql create stream streams22 trigger at_once into streamt2 as select _wstart, count(*) c1, sum(a) c3 , max(b) c4, min(c) c5 from st interval(10s, 5s);
sql insert into t1 values(1648791213000,1,1,1,1.0);
sql insert into t1 values(1648791223001,2,2,2,1.1);
@@ -394,7 +397,7 @@ sql insert into t2 values(1648791213004,4,10,10,4.1);
$loop_count = 0
loop2:
-sleep 300
+sleep 100
$loop_count = $loop_count + 1
if $loop_count == 10 then
@@ -452,7 +455,7 @@ print step 6
$loop_count = 0
loop3:
-sleep 300
+# sleep 300
$loop_count = $loop_count + 1
if $loop_count == 10 then
@@ -464,7 +467,7 @@ sql select * from streamt2;
# row 0
if $data01 != 4 then
print =====data01=$data01
- # goto loop3
+ goto loop3
endi
if $data02 != 10 then
@@ -505,4 +508,9 @@ if $data32 != 8 then
goto loop3
endi
+$loop_all = $loop_all + 1
+print ============loop_all=$loop_all
+
+#goto looptest
+
system sh/stop_dnodes.sh
\ No newline at end of file
diff --git a/tests/script/tsim/valgrind/basic2.sim b/tests/script/tsim/valgrind/basic2.sim
index d3c72d1e5c..45ac78daf0 100644
--- a/tests/script/tsim/valgrind/basic2.sim
+++ b/tests/script/tsim/valgrind/basic2.sim
@@ -1,6 +1,5 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
-system sh/cfg.sh -n dnode1 -c debugflag -v 131
system sh/exec.sh -n dnode1 -s start -v
sql connect
@@ -23,65 +22,87 @@ if $data(1)[4] != ready then
endi
print =============== step2: create db
-sql create database d1 vgroups 2 buffer 3
-sql show databases
-sql use d1
-sql show vgroups
+sql create database db
+sql use db
+sql create table db.stb (ts timestamp, c1 int, c2 binary(4)) tags(t1 int, t2 float, t3 binary(16)) comment "abd"
+sql create table db.c1 using db.stb tags(101, 102, "103")
-print =============== step3: create show stable
-sql create table if not exists stb (ts timestamp, c1 int, c2 float, c3 double) tags (t1 int unsigned)
-sql show stables
-if $rows != 1 then
- return -1
-endi
+print =============== step3: alter stb
+sql_error alter table db.stb add column ts int
+sql alter table db.stb add column c3 int
+sql alter table db.stb add column c4 bigint
+sql alter table db.stb add column c5 binary(12)
+sql alter table db.stb drop column c1
+sql alter table db.stb drop column c4
+sql alter table db.stb MODIFY column c2 binary(32)
+sql alter table db.stb add tag t4 bigint
+sql alter table db.stb add tag c1 int
+sql alter table db.stb add tag t5 binary(12)
+sql alter table db.stb drop tag c1
+sql alter table db.stb drop tag t5
+sql alter table db.stb MODIFY tag t3 binary(32)
+sql alter table db.stb rename tag t1 tx
+sql alter table db.stb comment 'abcde' ;
+sql drop table db.stb
-print =============== step4: create show table
-sql create table ct1 using stb tags(1000)
-sql create table ct2 using stb tags(2000)
-sql create table ct3 using stb tags(3000)
-sql show tables
-if $rows != 3 then
- return -1
-endi
+print =============== step4: alter tb
+sql create table tb (ts timestamp, a int)
+sql insert into tb values(now-28d, -28)
+sql select count(a) from tb
+sql alter table tb add column b smallint
+sql insert into tb values(now-25d, -25, 0)
+sql select count(b) from tb
+sql alter table tb add column c tinyint
+sql insert into tb values(now-22d, -22, 3, 0)
+sql select count(c) from tb
+sql alter table tb add column d int
+sql insert into tb values(now-19d, -19, 6, 0, 0)
+sql select count(d) from tb
+sql alter table tb add column e bigint
+sql alter table tb add column f float
+sql alter table tb add column g double
+sql alter table tb add column h binary(10)
+sql select count(a), count(b), count(c), count(d), count(e), count(f), count(g), count(h) from tb
+sql select * from tb order by ts desc
-print =============== step5: insert data
-sql insert into ct1 values(now+0s, 10, 2.0, 3.0)
-sql insert into ct1 values(now+1s, 11, 2.1, 3.1)(now+2s, -12, -2.2, -3.2)(now+3s, -13, -2.3, -3.3)
-sql insert into ct2 values(now+0s, 10, 2.0, 3.0)
-sql insert into ct2 values(now+1s, 11, 2.1, 3.1)(now+2s, -12, -2.2, -3.2)(now+3s, -13, -2.3, -3.3)
-sql insert into ct3 values('2021-01-01 00:00:00.000', 10, 2.0, 3.0)
+print =============== step5: alter stb and insert data
+sql create table stb (ts timestamp, c1 int, c2 binary(4)) tags(t1 int, t2 float, t3 binary(16)) comment "abd"
+sql show db.stables
+sql describe stb
+sql_error alter table stb add column ts int
+
+sql create table db.ctb using db.stb tags(101, 102, "103")
+sql insert into db.ctb values(now, 1, "2")
+sql show db.tables
+sql select * from db.stb
+sql select * from tb
+
+sql alter table stb add column c3 int
+sql describe stb
+sql select * from db.stb
+sql select * from tb
+sql insert into db.ctb values(now+1s, 1, 2, 3)
+sql select * from db.stb
+
+sql alter table db.stb add column c4 bigint
+sql select * from db.stb
+sql insert into db.ctb values(now+2s, 1, 2, 3, 4)
+
+sql alter table db.stb drop column c1
+sql reset query cache
+sql select * from tb
+sql insert into db.ctb values(now+3s, 2, 3, 4)
+sql select * from db.stb
+
+sql alter table db.stb add tag t4 bigint
+sql select * from db.stb
+sql select * from db.stb
+sql_error create table db.ctb2 using db.stb tags(101, "102")
+sql create table db.ctb2 using db.stb tags(101, 102, "103", 104)
+sql insert into db.ctb2 values(now, 1, 2, 3)
print =============== step6: query data
-sql select * from ct1
-sql select * from stb
-sql select c1, c2, c3 from ct1
-sql select ts, c1, c2, c3 from stb
-
-print =============== step7: count
-sql select count(*) from ct1;
-sql select count(*) from stb;
-sql select count(ts), count(c1), count(c2), count(c3) from ct1
-sql select count(ts), count(c1), count(c2), count(c3) from stb
-
-print =============== step8: func
-sql select first(ts), first(c1), first(c2), first(c3) from ct1
-sql select min(c1), min(c2), min(c3) from ct1
-sql select max(c1), max(c2), max(c3) from ct1
-sql select sum(c1), sum(c2), sum(c3) from ct1
-
-print =============== step9: insert select
-sql create table ct4 using stb tags(4000);
-sql insert into ct4 select * from ct1;
-sql select * from ct4;
-sql insert into ct4 select ts,c1,c2,c3 from stb;
-
-sql create table tb1 (ts timestamp, c1 int, c2 float, c3 double);
-sql insert into tb1 (ts, c1, c2, c3) select * from ct1;
-sql select * from tb1;
-
-sql create table tb2 (ts timestamp, f1 binary(10), c1 int, c2 double);
-sql insert into tb2 (c2, c1, ts) select c2+1, c1, ts+3 from ct2;
-sql select * from tb2;
+sql select * from db.stb where tbname = 'ctb2';
_OVER:
system sh/exec.sh -n dnode1 -s stop -x SIGINT
@@ -90,7 +111,7 @@ $null=
system_content sh/checkValgrind.sh -n dnode1
print cmd return result ----> [ $system_content ]
-if $system_content > 1 then
+if $system_content > 0 then
return -1
endi
diff --git a/tests/script/tsim/valgrind/basic3.sim b/tests/script/tsim/valgrind/basic3.sim
index 6a42a8eb7f..d513eee3cf 100644
--- a/tests/script/tsim/valgrind/basic3.sim
+++ b/tests/script/tsim/valgrind/basic3.sim
@@ -1,6 +1,5 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
-system sh/cfg.sh -n dnode1 -c debugflag -v 131
system sh/exec.sh -n dnode1 -s start -v
sql connect
@@ -22,40 +21,43 @@ if $data(1)[4] != ready then
goto step1
endi
-print =============== step2: create db
-sql create database d1 vgroups 2 buffer 3
-sql show databases
-sql use d1
-sql show vgroups
+$tbPrefix = tb
+$tbNum = 5
+$rowNum = 10
-print =============== step3: create show stable
-sql create table if not exists stb (ts timestamp, c1 int, c2 float, c3 double) tags (t1 int unsigned)
-sql show stables
-if $rows != 1 then
- return -1
-endi
+print =============== step2: prepare data
+sql create database db vgroups 2
+sql use db
+sql create table if not exists stb (ts timestamp, tbcol int, tbcol2 float, tbcol3 double) tags (tgcol int unsigned)
-print =============== step4: create show table
-sql create table ct1 using stb tags(1000)
-sql create table ct2 using stb tags(2000)
-sql create table ct3 using stb tags(3000)
-sql show tables
-if $rows != 3 then
- return -1
-endi
+$i = 0
+while $i < $tbNum
+ $tb = $tbPrefix . $i
+ sql create table $tb using stb tags( $i )
+ $x = 0
+ while $x < $rowNum
+ $cc = $x * 60000
+ $ms = 1601481600000 + $cc
+ sql insert into $tb values ($ms , $x , $x , $x )
+ $x = $x + 1
+ endw
+ $i = $i + 1
+endw
-print =============== step5: insert data
-sql insert into ct1 values(now+0d, 10, 2.0, 3.0)
-sql insert into ct1 values(now+1d, 11, 2.1, 3.1)(now+2d, -12, -2.2, -3.2)(now+3d, -13, -2.3, -3.3)
-sql insert into ct2 values(now+0d, 10, 2.0, 3.0)
-sql insert into ct2 values(now+1d, 11, 2.1, 3.1)(now+2d, -12, -2.2, -3.2)(now+3d, -13, -2.3, -3.3)
-sql insert into ct3 values('2022-01-01 00:00:00.000', 10, 2.0, 3.0)
-
-print =============== step6: query data
-sql select * from ct1 where ts < now -1d and ts > now +1d
-sql select * from stb where ts < now -1d and ts > now +1d
-sql select * from ct1 where ts < now -1d and ts > now +1d order by ts desc
-sql select * from stb where ts < now -1d and ts > now +1d order by ts desc
+print =============== step3: avg
+sql select avg(tbcol) from tb1
+sql select avg(tbcol) from tb1 where ts <= 1601481840000
+sql select avg(tbcol) as b from tb1
+sql select avg(tbcol) as b from tb1 interval(1d)
+sql select avg(tbcol) as b from tb1 where ts <= 1601481840000s interval(1m)
+sql select avg(tbcol) as c from stb
+sql select avg(tbcol) as c from stb where ts <= 1601481840000
+sql select avg(tbcol) as c from stb where tgcol < 5 and ts <= 1601481840000
+sql select avg(tbcol) as c from stb interval(1m)
+sql select avg(tbcol) as c from stb interval(1d)
+sql select avg(tbcol) as b from stb where ts <= 1601481840000s interval(1m)
+sql select avg(tbcol) as c from stb group by tgcol
+sql select avg(tbcol) as b from stb where ts <= 1601481840000s partition by tgcol interval(1m)
_OVER:
system sh/exec.sh -n dnode1 -s stop -x SIGINT
@@ -64,7 +66,7 @@ $null=
system_content sh/checkValgrind.sh -n dnode1
print cmd return result ----> [ $system_content ]
-if $system_content > 1 then
+if $system_content > 0 then
return -1
endi
diff --git a/tests/script/tsim/valgrind/checkError3.sim b/tests/script/tsim/valgrind/checkError3.sim
index 52ef01785e..e8b25098d6 100644
--- a/tests/script/tsim/valgrind/checkError3.sim
+++ b/tests/script/tsim/valgrind/checkError3.sim
@@ -90,7 +90,7 @@ $null=
system_content sh/checkValgrind.sh -n dnode1
print cmd return result ----> [ $system_content ]
-if $system_content > 0 then
+if $system_content > 2 then
return -1
endi
diff --git a/tests/script/tsim/valgrind/checkError6.sim b/tests/script/tsim/valgrind/checkError6.sim
index 2783e94771..a9f66647f9 100644
--- a/tests/script/tsim/valgrind/checkError6.sim
+++ b/tests/script/tsim/valgrind/checkError6.sim
@@ -68,7 +68,7 @@ $null=
system_content sh/checkValgrind.sh -n dnode1
print cmd return result ----> [ $system_content ]
-if $system_content > 3 then
+if $system_content > 0 then
return -1
endi
diff --git a/tests/system-test/0-others/sysinfo.py b/tests/system-test/0-others/sysinfo.py
index 129c8bc530..f6f177d995 100644
--- a/tests/system-test/0-others/sysinfo.py
+++ b/tests/system-test/0-others/sysinfo.py
@@ -48,6 +48,8 @@ class TDTestCase:
#!for bug
tdDnodes.stoptaosd(1)
sleep(self.delaytime)
+ if platform.system().lower() == 'windows':
+ sleep(10)
tdSql.error('select server_status()')
def run(self):
diff --git a/tests/system-test/1-insert/alter_table.py b/tests/system-test/1-insert/alter_table.py
index 4a9cfd30c7..0007210ccd 100644
--- a/tests/system-test/1-insert/alter_table.py
+++ b/tests/system-test/1-insert/alter_table.py
@@ -211,10 +211,10 @@ class TDTestCase:
for error in [constant.INT_UN_MIN-1,constant.INT_UN_MAX+1]:
tdSql.error(f'alter table {self.stbname}_{i} set tag {k} = {error}')
#! bug TD-17106
- # elif v.lower() == 'bigint unsigned':
- # self.tag_check(i,k,tag_unbigint)
- # for error in [constant.BIGINT_UN_MIN-1,constant.BIGINT_UN_MAX+1]:
- # tdSql.error(f'alter table {self.stbname}_{i} set tag {k} = {error}')
+ elif v.lower() == 'bigint unsigned':
+ self.tag_check(i,k,tag_unbigint)
+ for error in [constant.BIGINT_UN_MIN-1,constant.BIGINT_UN_MAX+1]:
+ tdSql.error(f'alter table {self.stbname}_{i} set tag {k} = {error}')
elif v.lower() == 'bool':
self.tag_check(i,k,tag_bool)
elif v.lower() == 'float':
@@ -225,8 +225,8 @@ class TDTestCase:
else:
tdLog.exit(f'select {k} from {self.stbname}_{i},data check failure')
#! bug TD-17106
- # for error in [constant.FLOAT_MIN*1.1,constant.FLOAT_MAX*1.1]:
- # tdSql.error(f'alter table {self.stbname}_{i} set tag {k} = {error}')
+ for error in [constant.FLOAT_MIN*1.1,constant.FLOAT_MAX*1.1]:
+ tdSql.error(f'alter table {self.stbname}_{i} set tag {k} = {error}')
elif v.lower() == 'double':
tdSql.execute(f'alter table {self.stbname}_{i} set tag {k} = {tag_double}')
tdSql.query(f'select {k} from {self.stbname}_{i}')
diff --git a/tests/system-test/1-insert/create_retentions.py b/tests/system-test/1-insert/create_retentions.py
index e333dafa28..2b611420c6 100644
--- a/tests/system-test/1-insert/create_retentions.py
+++ b/tests/system-test/1-insert/create_retentions.py
@@ -9,31 +9,41 @@ from util.dnodes import *
PRIMARY_COL = "ts"
-INT_COL = "c_int"
-BINT_COL = "c_bint"
-SINT_COL = "c_sint"
-TINT_COL = "c_tint"
-FLOAT_COL = "c_float"
-DOUBLE_COL = "c_double"
-BOOL_COL = "c_bool"
-TINT_UN_COL = "c_tint_un"
-SINT_UN_COL = "c_sint_un"
-BINT_UN_COL = "c_bint_un"
-INT_UN_COL = "c_int_un"
+INT_COL = "c_int"
+BINT_COL = "c_bint"
+SINT_COL = "c_sint"
+TINT_COL = "c_tint"
+FLOAT_COL = "c_float"
+DOUBLE_COL = "c_double"
+BOOL_COL = "c_bool"
+TINT_UN_COL = "c_utint"
+SINT_UN_COL = "c_usint"
+BINT_UN_COL = "c_ubint"
+INT_UN_COL = "c_uint"
+BINARY_COL = "c_binary"
+NCHAR_COL = "c_nchar"
+TS_COL = "c_ts"
-BINARY_COL = "c_binary"
-NCHAR_COL = "c_nchar"
-TS_COL = "c_ts"
+NUM_COL = [INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, TINT_UN_COL, SINT_UN_COL, BINT_UN_COL, INT_UN_COL]
+CHAR_COL = [BINARY_COL, NCHAR_COL, ]
+BOOLEAN_COL = [BOOL_COL, ]
+TS_TYPE_COL = [TS_COL, ]
-NUM_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, ]
-CHAR_COL = [ BINARY_COL, NCHAR_COL, ]
-BOOLEAN_COL = [ BOOL_COL, ]
-TS_TYPE_COL = [ TS_COL, ]
+INT_TAG = "t_int"
+
+ALL_COL = [PRIMARY_COL, INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, BINARY_COL, NCHAR_COL, BOOL_COL, TS_COL]
+TAG_COL = [INT_TAG]
## insert data args:
TIME_STEP = 10000
NOW = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
+# init db/table
+DBNAME = "db"
+STBNAME = "stb1"
+CTBNAME = "ct1"
+NTBNAME = "nt1"
+
@dataclass
class DataSet:
ts_data : List[int] = field(default_factory=list)
@@ -152,29 +162,31 @@ class TDTestCase:
self.test_create_databases()
self.test_create_stb()
- def __create_tb(self):
+ def __create_tb(self, stb=STBNAME, ctb_num=20, ntbnum=1, rsma=False):
tdLog.printNoPrefix("==========step: create table")
- create_stb_sql = f'''create table stb1(
+ create_stb_sql = f'''create table {stb}(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp,
{TINT_UN_COL} tinyint unsigned, {SINT_UN_COL} smallint unsigned,
{INT_UN_COL} int unsigned, {BINT_UN_COL} bigint unsigned
- ) tags (t1 int)
- '''
- create_ntb_sql = f'''create table t1(
- ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
- {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
- {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp,
- {TINT_UN_COL} tinyint unsigned, {SINT_UN_COL} smallint unsigned,
- {INT_UN_COL} int unsigned, {BINT_UN_COL} bigint unsigned
- )
+ ) tags ({INT_TAG} int)
'''
+ for i in range(ntbnum):
+
+ create_ntb_sql = f'''create table nt{i+1}(
+ ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
+ {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
+ {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp,
+ {TINT_UN_COL} tinyint unsigned, {SINT_UN_COL} smallint unsigned,
+ {INT_UN_COL} int unsigned, {BINT_UN_COL} bigint unsigned
+ )
+ '''
tdSql.execute(create_stb_sql)
tdSql.execute(create_ntb_sql)
- for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ for i in range(ctb_num):
+ tdSql.execute(f'create table ct{i+1} using {stb} tags ( {i+1} )')
def __data_set(self, rows):
data_set = DataSet()
@@ -220,7 +232,7 @@ class TDTestCase:
tdSql.execute( f"insert into ct1 values ( {NOW - i * TIME_STEP}, {row_data} )" )
tdSql.execute( f"insert into ct2 values ( {NOW - i * int(TIME_STEP * 0.6)}, {neg_row_data} )" )
tdSql.execute( f"insert into ct4 values ( {NOW - i * int(TIME_STEP * 0.8) }, {row_data} )" )
- tdSql.execute( f"insert into t1 values ( {NOW - i * int(TIME_STEP * 1.2)}, {row_data} )" )
+ tdSql.execute( f"insert into {NTBNAME} values ( {NOW - i * int(TIME_STEP * 1.2)}, {row_data} )" )
tdSql.execute( f"insert into ct2 values ( {NOW + int(TIME_STEP * 0.6)}, {null_data} )" )
tdSql.execute( f"insert into ct2 values ( {NOW - (self.rows + 1) * int(TIME_STEP * 0.6)}, {null_data} )" )
@@ -230,9 +242,9 @@ class TDTestCase:
tdSql.execute( f"insert into ct4 values ( {NOW - (self.rows + 1) * int(TIME_STEP * 0.8)}, {null_data} )" )
tdSql.execute( f"insert into ct4 values ( {NOW - self.rows * int(TIME_STEP * 0.39)}, {null_data} )" )
- tdSql.execute( f"insert into t1 values ( {NOW + int(TIME_STEP * 1.2)}, {null_data} )" )
- tdSql.execute( f"insert into t1 values ( {NOW - (self.rows + 1) * int(TIME_STEP * 1.2)}, {null_data} )" )
- tdSql.execute( f"insert into t1 values ( {NOW - self.rows * int(TIME_STEP * 0.59)}, {null_data} )" )
+ tdSql.execute( f"insert into {NTBNAME} values ( {NOW + int(TIME_STEP * 1.2)}, {null_data} )" )
+ tdSql.execute( f"insert into {NTBNAME} values ( {NOW - (self.rows + 1) * int(TIME_STEP * 1.2)}, {null_data} )" )
+ tdSql.execute( f"insert into {NTBNAME} values ( {NOW - self.rows * int(TIME_STEP * 0.59)}, {null_data} )" )
def run(self):
diff --git a/tests/system-test/1-insert/delete_data.py b/tests/system-test/1-insert/delete_data.py
index a7eba2d97d..4c1426d0b1 100644
--- a/tests/system-test/1-insert/delete_data.py
+++ b/tests/system-test/1-insert/delete_data.py
@@ -25,12 +25,13 @@ from util.sqlset import TDSetSql
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor(),logSql)
+ tdSql.init(conn.cursor())
self.dbname = 'db_test'
self.setsql = TDSetSql()
+ self.stbname = 'stb'
self.ntbname = 'ntb'
- self.rowNum = 10
- self.tbnum = 20
+ self.rowNum = 5
+ self.tbnum = 2
self.ts = 1537146000000
self.binary_str = 'taosdata'
self.nchar_str = '涛思数据'
@@ -51,6 +52,7 @@ class TDTestCase:
'col13': f'nchar({self.str_length})',
}
+
self.tinyint_val = random.randint(constant.TINYINT_MIN,constant.TINYINT_MAX)
self.smallint_val = random.randint(constant.SMALLINT_MIN,constant.SMALLINT_MAX)
self.int_val = random.randint(constant.INT_MIN,constant.INT_MAX)
@@ -107,32 +109,50 @@ class TDTestCase:
tdSql.execute(f'''insert into {tbname} values({self.ts+i},"{base_data['binary']}")''')
elif 'nchar' in col_type.lower():
tdSql.execute(f'''insert into {tbname} values({self.ts+i},"{base_data['nchar']}")''')
-
- def delete_all_data(self,tbname,col_type,row_num,base_data,dbname):
+ def delete_all_data(self,tbname,col_type,row_num,base_data,dbname,tb_type,tb_num=1):
tdSql.execute(f'delete from {tbname}')
tdSql.execute(f'flush database {dbname}')
tdSql.execute('reset query cache')
tdSql.query(f'select * from {tbname}')
tdSql.checkRows(0)
- self.insert_base_data(col_type,tbname,row_num,base_data)
+ if tb_type == 'ntb' or tb_type == 'ctb':
+ self.insert_base_data(col_type,tbname,row_num,base_data)
+ elif tb_type == 'stb':
+ for i in range(tb_num):
+ self.insert_base_data(col_type,f'{tbname}_{i}',row_num,base_data)
tdSql.execute(f'flush database {dbname}')
tdSql.execute('reset query cache')
tdSql.query(f'select * from {tbname}')
- tdSql.checkRows(row_num)
- def delete_one_row(self,tbname,column_type,column_name,base_data,dbname):
+ if tb_type == 'ntb' or tb_type == 'ctb':
+ tdSql.checkRows(row_num)
+ elif tb_type =='stb':
+ tdSql.checkRows(row_num*tb_num)
+ def delete_one_row(self,tbname,column_type,column_name,base_data,row_num,dbname,tb_type,tb_num=1):
tdSql.execute(f'delete from {tbname} where ts={self.ts}')
tdSql.execute(f'flush database {dbname}')
tdSql.execute('reset query cache')
tdSql.query(f'select {column_name} from {tbname}')
- tdSql.checkRows(self.rowNum-1)
+ if tb_type == 'ntb' or tb_type == 'ctb':
+ tdSql.checkRows(row_num-1)
+ elif tb_type == 'stb':
+ tdSql.checkRows((row_num-1)*tb_num)
tdSql.query(f'select {column_name} from {tbname} where ts={self.ts}')
tdSql.checkRows(0)
- if 'binary' in column_type.lower():
- tdSql.execute(f'''insert into {tbname} values({self.ts},"{base_data['binary']}")''')
- elif 'nchar' in column_type.lower():
- tdSql.execute(f'''insert into {tbname} values({self.ts},"{base_data['nchar']}")''')
- else:
- tdSql.execute(f'insert into {tbname} values({self.ts},{base_data[column_type]})')
+ if tb_type == 'ntb' or tb_type == 'ctb':
+ if 'binary' in column_type.lower():
+ tdSql.execute(f'''insert into {tbname} values({self.ts},"{base_data['binary']}")''')
+ elif 'nchar' in column_type.lower():
+ tdSql.execute(f'''insert into {tbname} values({self.ts},"{base_data['nchar']}")''')
+ else:
+ tdSql.execute(f'insert into {tbname} values({self.ts},{base_data[column_type]})')
+ elif tb_type == 'stb':
+ for i in range(tb_num):
+ if 'binary' in column_type.lower():
+ tdSql.execute(f'''insert into {tbname}_{i} values({self.ts},"{base_data['binary']}")''')
+ elif 'nchar' in column_type.lower():
+ tdSql.execute(f'''insert into {tbname}_{i} values({self.ts},"{base_data['nchar']}")''')
+ else:
+ tdSql.execute(f'insert into {tbname}_{i} values({self.ts},{base_data[column_type]})')
tdSql.query(f'select {column_name} from {tbname} where ts={self.ts}')
if column_type.lower() == 'float' or column_type.lower() == 'double':
if abs(tdSql.queryResult[0][0] - base_data[column_type]) / base_data[column_type] <= 0.0001:
@@ -144,12 +164,56 @@ class TDTestCase:
elif 'nchar' in column_type.lower():
tdSql.checkEqual(tdSql.queryResult[0][0],base_data['nchar'])
else:
- tdSql.checkEqual(tdSql.queryResult[0][0],base_data[column_type])
-
- def delete_rows(self):
-
-
- pass
+ tdSql.checkEqual(tdSql.queryResult[0][0],base_data[column_type])
+ def delete_rows(self,dbname,tbname,col_name,col_type,base_data,row_num,tb_type,tb_num=1):
+ for i in range(row_num):
+ tdSql.execute(f'delete from {tbname} where ts>{self.ts+i}')
+ tdSql.execute(f'flush database {dbname}')
+ tdSql.execute('reset query cache')
+ tdSql.query(f'select {col_name} from {tbname}')
+ if tb_type == 'ntb' or tb_type == 'ctb':
+ tdSql.checkRows(i+1)
+ self.insert_base_data(col_type,tbname,row_num,base_data)
+ elif tb_type == 'stb':
+ tdSql.checkRows((i+1)*tb_num)
+ for j in range(tb_num):
+ self.insert_base_data(col_type,f'{tbname}_{j}',row_num,base_data)
+ for i in range(row_num):
+ tdSql.execute(f'delete from {tbname} where ts>={self.ts+i}')
+ tdSql.execute(f'flush database {dbname}')
+ tdSql.execute('reset query cache')
+ tdSql.query(f'select {col_name} from {tbname}')
+ if tb_type == 'ntb' or tb_type == 'ctb':
+ tdSql.checkRows(i)
+ self.insert_base_data(col_type,tbname,row_num,base_data)
+ elif tb_type == 'stb':
+ tdSql.checkRows(i*tb_num)
+ for j in range(tb_num):
+ self.insert_base_data(col_type,f'{tbname}_{j}',row_num,base_data)
+ for i in range(row_num):
+ tdSql.execute(f'delete from {tbname} where ts<={self.ts+i}')
+ tdSql.execute(f'flush database {dbname}')
+ tdSql.execute('reset query cache')
+ tdSql.query(f'select {col_name} from {tbname}')
+ if tb_type == 'ntb' or tb_type == 'ctb':
+ tdSql.checkRows(row_num-i-1)
+ self.insert_base_data(col_type,tbname,row_num,base_data)
+ elif tb_type == 'stb':
+ tdSql.checkRows((row_num-i-1)*tb_num)
+ for j in range(tb_num):
+ self.insert_base_data(col_type,f'{tbname}_{j}',row_num,base_data)
+ for i in range(row_num):
+ tdSql.execute(f'delete from {tbname} where ts<{self.ts+i}')
+ tdSql.execute(f'flush database {dbname}')
+ tdSql.execute('reset query cache')
+ tdSql.query(f'select {col_name} from {tbname}')
+ if tb_type == 'ntb' or tb_type == 'ctb':
+ tdSql.checkRows(row_num-i)
+ self.insert_base_data(col_type,tbname,row_num,base_data)
+ elif tb_type == 'stb':
+ tdSql.checkRows((row_num-i)*tb_num)
+ for j in range(tb_num):
+ self.insert_base_data(col_type,f'{tbname}_{j}',row_num,base_data)
def delete_error(self,tbname,column_name,column_type,base_data):
for error_list in ['',f'ts = {self.ts} and',f'ts = {self.ts} or']:
if 'binary' in column_type.lower():
@@ -157,31 +221,56 @@ class TDTestCase:
elif 'nchar' in column_type.lower():
tdSql.error(f'''delete from {tbname} where {error_list} {column_name} ="{base_data['nchar']}"''')
else:
- tdSql.error('delete from {tbname} where {error_list} {column_name} = {base_data[column_type]}')
-
+ tdSql.error(f'delete from {tbname} where {error_list} {column_name} = {base_data[column_type]}')
def delete_data_ntb(self):
tdSql.execute(f'create database if not exists {self.dbname}')
tdSql.execute(f'use {self.dbname}')
for col_name,col_type in self.column_dict.items():
tdSql.execute(f'create table {self.ntbname} (ts timestamp,{col_name} {col_type})')
self.insert_base_data(col_type,self.ntbname,self.rowNum,self.base_data)
- self.delete_one_row(self.ntbname,col_type,col_name,self.base_data,self.dbname)
- self.delete_all_data(self.ntbname,col_type,self.rowNum,self.base_data,self.dbname)
+ self.delete_one_row(self.ntbname,col_type,col_name,self.base_data,self.rowNum,self.dbname,'ntb')
+ self.delete_all_data(self.ntbname,col_type,self.rowNum,self.base_data,self.dbname,'ntb')
self.delete_error(self.ntbname,col_name,col_type,self.base_data)
- for i in range(self.rowNum):
- tdSql.execute(f'delete from {self.ntbname} where ts>{self.ts+i}')
- tdSql.execute(f'flush database {self.dbname}')
- tdSql.execute('reset query cache')
- tdSql.query(f'select {col_name} from {self.ntbname}')
- tdSql.checkRows(i+1)
- self.insert_base_data(col_type,self.ntbname,self.rowNum,self.base_data)
-
+ self.delete_rows(self.dbname,self.ntbname,col_name,col_type,self.base_data,self.rowNum,'ntb')
+ for func in ['first','last']:
+ tdSql.query(f'select {func}(*) from {self.ntbname}')
tdSql.execute(f'drop table {self.ntbname}')
-
+ tdSql.execute(f'drop database {self.dbname}')
+ def delete_data_ctb(self):
+ tdSql.execute(f'create database if not exists {self.dbname}')
+ tdSql.execute(f'use {self.dbname}')
+ for col_name,col_type in self.column_dict.items():
+ tdSql.execute(f'create table {self.stbname} (ts timestamp,{col_name} {col_type}) tags(t1 int)')
+ for i in range(self.tbnum):
+ tdSql.execute(f'create table {self.stbname}_{i} using {self.stbname} tags(1)')
+ self.insert_base_data(col_type,f'{self.stbname}_{i}',self.rowNum,self.base_data)
+ self.delete_one_row(f'{self.stbname}_{i}',col_type,col_name,self.base_data,self.rowNum,self.dbname,'ctb')
+ self.delete_all_data(f'{self.stbname}_{i}',col_type,self.rowNum,self.base_data,self.dbname,'ctb')
+ self.delete_error(f'{self.stbname}_{i}',col_name,col_type,self.base_data)
+ self.delete_rows(self.dbname,f'{self.stbname}_{i}',col_name,col_type,self.base_data,self.rowNum,'ctb')
+ for func in ['first','last']:
+ tdSql.query(f'select {func}(*) from {self.stbname}_{i}')
+ tdSql.execute(f'drop table {self.stbname}')
+ def delete_data_stb(self):
+ tdSql.execute(f'create database if not exists {self.dbname}')
+ tdSql.execute(f'use {self.dbname}')
+ for col_name,col_type in self.column_dict.items():
+ tdSql.execute(f'create table {self.stbname} (ts timestamp,{col_name} {col_type}) tags(t1 int)')
+ for i in range(self.tbnum):
+ tdSql.execute(f'create table {self.stbname}_{i} using {self.stbname} tags(1)')
+ self.insert_base_data(col_type,f'{self.stbname}_{i}',self.rowNum,self.base_data)
+ self.delete_error(self.stbname,col_name,col_type,self.base_data)
+ self.delete_one_row(self.stbname,col_type,col_name,self.base_data,self.rowNum,self.dbname,'stb',self.tbnum)
+ self.delete_all_data(self.stbname,col_type,self.rowNum,self.base_data,self.dbname,'stb',self.tbnum)
+ self.delete_rows(self.dbname,self.stbname,col_name,col_type,self.base_data,self.rowNum,'stb',self.tbnum)
+ for func in ['first','last']:
+ tdSql.query(f'select {func}(*) from {self.stbname}')
+ tdSql.execute(f'drop table {self.stbname}')
+ tdSql.execute(f'drop database {self.dbname}')
def run(self):
self.delete_data_ntb()
-
-
+ self.delete_data_ctb()
+ self.delete_data_stb()
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
diff --git a/tests/system-test/1-insert/time_range_wise.py b/tests/system-test/1-insert/time_range_wise.py
index 2596f82476..f945bafe3b 100644
--- a/tests/system-test/1-insert/time_range_wise.py
+++ b/tests/system-test/1-insert/time_range_wise.py
@@ -325,10 +325,17 @@ class TDTestCase:
def __sma_create_check(self, sma:SMAschema):
if self.updatecfgDict["querySmaOptimize"] == 0:
return False
- # TODO: if database is a rollup-db, can not create sma index
- # tdSql.query("select database()")
- # if sma.rollup_db :
- # return False
+ tdSql.query("select database()")
+ dbname = tdSql.getData(0,0)
+ tdSql.query("show databases")
+ for row in tdSql.queryResult:
+ if row[0] == dbname:
+ if row[-1] is None:
+ continue
+ if ":" in row[-1]:
+ sma.rollup_db = True
+ if sma.rollup_db :
+ return False
tdSql.query("show stables")
if not sma.tbname:
return False
@@ -379,12 +386,15 @@ class TDTestCase:
tdSql.query(self.__create_sma_index(sma))
self.sma_count += 1
self.sma_created_index.append(sma.index_name)
- tdSql.query("show streams")
+ tdSql.query(self.__show_sma_index(sma))
tdSql.checkRows(self.sma_count)
+ tdSql.checkData(0, 2, sma.tbname)
else:
tdSql.error(self.__create_sma_index(sma))
+
+
def __drop_sma_index(self, sma:SMAschema):
sql = f"{sma.drop} {sma.drop_flag} {sma.index_name}"
return sql
@@ -402,12 +412,12 @@ class TDTestCase:
def sma_drop_check(self, sma:SMAschema):
if self.__sma_drop_check(sma):
tdSql.query(self.__drop_sma_index(sma))
- print(self.__drop_sma_index(sma))
self.sma_count -= 1
self.sma_created_index = list(filter(lambda x: x != sma.index_name, self.sma_created_index))
tdSql.query("show streams")
tdSql.checkRows(self.sma_count)
+
else:
tdSql.error(self.__drop_sma_index(sma))
@@ -614,20 +624,20 @@ class TDTestCase:
self.__insert_data()
self.all_test()
- #tdLog.printNoPrefix("==========step2:create table in rollup database")
- #tdSql.execute("create database db3 retentions 1s:4m,2s:8m,3s:12m")
- #tdSql.execute("use db3")
- # self.__create_tb()
- #tdSql.execute(f"create stable stb1 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(first) watermark 5s max_delay 1m sma({INT_COL}) ")
- #self.all_test()
-
- # self.__insert_data()
+ tdLog.printNoPrefix("==========step2:create table in rollup database")
+ tdSql.execute("create database db3 retentions 1s:4m,2s:8m,3s:12m")
+ tdSql.execute("use db3")
+ tdSql.execute(f"create stable stb1 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(first) watermark 5s max_delay 1m sma({INT_COL}) ")
+ self.all_test()
tdSql.execute("drop database if exists db1 ")
tdSql.execute("drop database if exists db2 ")
- tdDnodes.stop(1)
- tdDnodes.start(1)
+ # tdDnodes.stop(1)
+ # tdDnodes.start(1)
+
+ tdSql.execute("flush database db ")
+
tdLog.printNoPrefix("==========step4:after wal, all check again ")
self.all_test()
diff --git a/tests/system-test/2-query/abs.py b/tests/system-test/2-query/abs.py
index f924cb7c3d..c9fc025b97 100644
--- a/tests/system-test/2-query/abs.py
+++ b/tests/system-test/2-query/abs.py
@@ -10,13 +10,13 @@ import random
class TDTestCase:
- updatecfgDict = {'debugFlag': 143, "cDebugFlag": 143, "uDebugFlag": 143, "rpcDebugFlag": 143, "tmrDebugFlag": 143,
- "jniDebugFlag": 143, "simDebugFlag": 143, "dDebugFlag": 143, "dDebugFlag": 143, "vDebugFlag": 143, "mDebugFlag": 143, "qDebugFlag": 143,
- "wDebugFlag": 143, "sDebugFlag": 143, "tsdbDebugFlag": 143, "tqDebugFlag": 143, "fsDebugFlag": 143, "udfDebugFlag": 143}
+ # updatecfgDict = {'debugFlag': 143, "cDebugFlag": 143, "uDebugFlag": 143, "rpcDebugFlag": 143, "tmrDebugFlag": 143,
+ # "jniDebugFlag": 143, "simDebugFlag": 143, "dDebugFlag": 143, "dDebugFlag": 143, "vDebugFlag": 143, "mDebugFlag": 143, "qDebugFlag": 143,
+ # "wDebugFlag": 143, "sDebugFlag": 143, "tsdbDebugFlag": 143, "tqDebugFlag": 143, "fsDebugFlag": 143, "udfDebugFlag": 143}
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
- tdSql.init(conn.cursor(), True)
+ tdSql.init(conn.cursor(), False)
self.tb_nums = 10
self.row_nums = 20
self.ts = 1434938400000
@@ -24,14 +24,17 @@ class TDTestCase:
def insert_datas_and_check_abs(self ,tbnums , rownums , time_step ):
tdLog.info(" prepare datas for auto check abs function ")
+ dbname = "test"
+ stbname = f"{dbname}.stb"
+ ctbname_pre = f"{dbname}.sub_tb_"
- tdSql.execute(" create database test ")
- tdSql.execute(" use test ")
- tdSql.execute(" create stable stb (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint,\
+ tdSql.execute(f" create database {dbname} ")
+ tdSql.execute(f" use {dbname} ")
+ tdSql.execute(f" create stable {stbname} (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint,\
c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) tags (t1 int)")
for tbnum in range(tbnums):
- tbname = "sub_tb_%d"%tbnum
- tdSql.execute(" create table %s using stb tags(%d) "%(tbname , tbnum))
+ tbname = f"{ctbname_pre}{tbnum}"
+ tdSql.execute(f" create table {tbname} using {stbname} tags({tbnum}) ")
ts = self.ts
for row in range(rownums):
@@ -48,8 +51,8 @@ class TDTestCase:
c10 = ts
tdSql.execute(f" insert into {tbname} values ({ts},{c1},{c2},{c3},{c4},{c5},{c6},{c7},{c8},{c9},{c10})")
- tdSql.execute("use test")
- tbnames = ["stb", "sub_tb_1"]
+ tdSql.execute(f"use {dbname}")
+ tbnames = [f"{stbname}", f"{ctbname_pre}1"]
support_types = ["BIGINT", "SMALLINT", "TINYINT", "FLOAT", "DOUBLE", "INT"]
for tbname in tbnames:
tdSql.query("desc {}".format(tbname))
@@ -62,48 +65,48 @@ class TDTestCase:
self.check_result_auto(origin_sql , abs_sql)
- def prepare_datas(self):
+ def prepare_datas(self, dbname="db"):
tdSql.execute(
- '''create table stb1
+ f'''create table {dbname}.stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
)
tdSql.execute(
- '''
- create table t1
+ f'''
+ create table {dbname}.t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
for i in range(9):
tdSql.execute(
- f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
- f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
- "insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
+ f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
tdSql.execute(
- "insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute(
- "insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
+ f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute(
- "insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute(
- "insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
- "insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
- "insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
@@ -119,53 +122,53 @@ class TDTestCase:
'''
)
- def prepare_tag_datas(self):
+ def prepare_tag_datas(self, dbname="testdb"):
# prepare datas
tdSql.execute(
- "create database if not exists testdb keep 3650 duration 1000")
+ f"create database if not exists {dbname} keep 3650 duration 1000")
tdSql.execute(" use testdb ")
tdSql.execute(
- '''create table stb1
+ f'''create table {dbname}.stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t0 timestamp, t1 int, t2 bigint, t3 smallint, t4 tinyint, t5 float, t6 double, t7 bool, t8 binary(16),t9 nchar(32))
'''
)
tdSql.execute(
- '''
- create table t1
+ f'''
+ create table {dbname}.t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(4):
tdSql.execute(
- f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
+ f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
for i in range(9):
tdSql.execute(
- f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
- f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
- "insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
+ f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
tdSql.execute(
- "insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute(
- "insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
+ f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute(
- "insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute(
- "insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
- "insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
- "insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
@@ -213,43 +216,45 @@ class TDTestCase:
"abs value check pass , it work as expected ,sql is \"%s\" " % abs_query)
def test_errors(self):
- tdSql.execute("use testdb")
+ dbname = "testdb"
+ tdSql.execute(f"use {dbname}")
error_sql_lists = [
- "select abs from t1",
- "select abs(-+--+c1) from t1",
- # "select +-abs(c1) from t1",
- # "select ++-abs(c1) from t1",
- # "select ++--abs(c1) from t1",
- # "select - -abs(c1)*0 from t1",
- # "select abs(tbname+1) from t1 ",
- "select abs(123--123)==1 from t1",
- "select abs(c1) as 'd1' from t1",
- "select abs(c1 ,c2 ) from t1",
- "select abs(c1 ,NULL) from t1",
- "select abs(,) from t1;",
- "select abs(abs(c1) ab from t1)",
- "select abs(c1) as int from t1",
- "select abs from stb1",
- # "select abs(-+--+c1) from stb1",
- # "select +-abs(c1) from stb1",
- # "select ++-abs(c1) from stb1",
- # "select ++--abs(c1) from stb1",
- # "select - -abs(c1)*0 from stb1",
- # "select abs(tbname+1) from stb1 ",
- "select abs(123--123)==1 from stb1",
- "select abs(c1) as 'd1' from stb1",
- "select abs(c1 ,c2 ) from stb1",
- "select abs(c1 ,NULL) from stb1",
- "select abs(,) from stb1;",
- "select abs(abs(c1) ab from stb1)",
- "select abs(c1) as int from stb1"
+ f"select abs from {dbname}.t1",
+ f"select abs(-+--+c1) from {dbname}.t1",
+ # f"select +-abs(c1) from {dbname}.t1",
+ # f"select ++-abs(c1) from {dbname}.t1",
+ # f"select ++--abs(c1) from {dbname}.t1",
+ # f"select - -abs(c1)*0 from {dbname}.t1",
+ # f"select abs(tbname+1) from {dbname}.t1 ",
+ f"select abs(123--123)==1 from {dbname}.t1",
+ f"select abs(c1) as 'd1' from {dbname}.t1",
+ f"select abs(c1 ,c2 ) from {dbname}.t1",
+ f"select abs(c1 ,NULL) from {dbname}.t1",
+ f"select abs(,) from {dbname}.t1;",
+ f"select abs(abs(c1) ab from {dbname}.t1)",
+ f"select abs(c1) as int from {dbname}.t1",
+ f"select abs from {dbname}.stb1",
+ # f"select abs(-+--+c1) from {dbname}.stb1",
+ # f"select +-abs(c1) from {dbname}.stb1",
+ # f"select ++-abs(c1) from {dbname}.stb1",
+ # f"select ++--abs(c1) from {dbname}.stb1",
+ # f"select - -abs(c1)*0 from {dbname}.stb1",
+ # f"select abs(tbname+1) from {dbname}.stb1 ",
+ f"select abs(123--123)==1 from {dbname}.stb1",
+ f"select abs(c1) as 'd1' from {dbname}.stb1",
+ f"select abs(c1 ,c2 ) from {dbname}.stb1",
+ f"select abs(c1 ,NULL) from {dbname}.stb1",
+ f"select abs(,) from {dbname}.stb1;",
+ f"select abs(abs(c1) ab from {dbname}.stb1)",
+ f"select abs(c1) as int from {dbname}.stb1"
]
for error_sql in error_sql_lists:
tdSql.error(error_sql)
def support_types(self):
- tdSql.execute("use testdb")
- tbnames = ["stb1", "t1", "ct1", "ct2"]
+ dbname = "testdb"
+ tdSql.execute(f"use {dbname}")
+ tbnames = [f"{dbname}.stb1", f"{dbname}.t1", f"{dbname}.ct1", f"{dbname}.ct2"]
support_types = ["BIGINT", "SMALLINT", "TINYINT", "FLOAT", "DOUBLE", "INT"]
for tbname in tbnames:
tdSql.query("desc {}".format(tbname))
@@ -262,96 +267,96 @@ class TDTestCase:
else:
tdSql.error(abs_sql)
- def basic_abs_function(self):
+ def basic_abs_function(self, dbname="db"):
# basic query
- tdSql.query("select c1 from ct3")
+ tdSql.query(f"select c1 from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select c1 from t1")
+ tdSql.query(f"select c1 from {dbname}.t1")
tdSql.checkRows(12)
- tdSql.query("select c1 from stb1")
+ tdSql.query(f"select c1 from {dbname}.stb1")
tdSql.checkRows(25)
# used for empty table , ct3 is empty
- tdSql.query("select abs(c1) from ct3")
+ tdSql.query(f"select abs(c1) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select abs(c2) from ct3")
+ tdSql.query(f"select abs(c2) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select abs(c3) from ct3")
+ tdSql.query(f"select abs(c3) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select abs(c4) from ct3")
+ tdSql.query(f"select abs(c4) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select abs(c5) from ct3")
+ tdSql.query(f"select abs(c5) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select abs(c6) from ct3")
+ tdSql.query(f"select abs(c6) from {dbname}.ct3")
# used for regular table
- tdSql.query("select abs(c1) from t1")
+ tdSql.query(f"select abs(c1) from {dbname}.t1")
tdSql.checkData(0, 0, None)
tdSql.checkData(1, 0, 1)
tdSql.checkData(3, 0, 3)
tdSql.checkData(5, 0, None)
- tdSql.query("select c1, c2, c3 , c4, c5 from t1")
+ tdSql.query(f"select c1, c2, c3 , c4, c5 from {dbname}.t1")
tdSql.checkData(1, 4, 1.11000)
tdSql.checkData(3, 3, 33)
tdSql.checkData(5, 4, None)
- tdSql.query("select ts,c1, c2, c3 , c4, c5 from t1")
+ tdSql.query(f"select ts,c1, c2, c3 , c4, c5 from {dbname}.t1")
tdSql.checkData(1, 5, 1.11000)
tdSql.checkData(3, 4, 33)
tdSql.checkData(5, 5, None)
- self.check_result_auto("select c1, c2, c3 , c4, c5 from t1",
- "select (c1), abs(c2) ,abs(c3), abs(c4), abs(c5) from t1")
+ self.check_result_auto(f"select c1, c2, c3 , c4, c5 from {dbname}.t1",
+ f"select (c1), abs(c2) ,abs(c3), abs(c4), abs(c5) from {dbname}.t1")
# used for sub table
- tdSql.query("select abs(c1) from ct1")
+ tdSql.query(f"select abs(c1) from {dbname}.ct1")
tdSql.checkData(0, 0, 8)
tdSql.checkData(1, 0, 7)
tdSql.checkData(3, 0, 5)
tdSql.checkData(5, 0, 4)
- tdSql.query("select abs(c1) from ct1")
- self.check_result_auto("select c1, c2, c3 , c4, c5 from ct1",
- "select (c1), abs(c2) ,abs(c3), abs(c4), abs(c5) from ct1")
+ tdSql.query(f"select abs(c1) from {dbname}.ct1")
+ self.check_result_auto(f"select c1, c2, c3 , c4, c5 from {dbname}.ct1",
+ f"select (c1), abs(c2) ,abs(c3), abs(c4), abs(c5) from {dbname}.ct1")
self.check_result_auto(
- "select abs(abs(abs(abs(abs(abs(abs(abs(abs(abs(c1)))))))))) nest_col_func from ct1;", "select c1 from ct1")
+ f"select abs(abs(abs(abs(abs(abs(abs(abs(abs(abs(c1)))))))))) nest_col_func from {dbname}.ct1;", f"select c1 from {dbname}.ct1")
# used for stable table
- tdSql.query("select abs(c1) from stb1")
+ tdSql.query(f"select abs(c1) from {dbname}.stb1")
tdSql.checkRows(25)
- self.check_result_auto("select c1, c2, c3 , c4, c5 from ct4 ",
- "select (c1), abs(c2) ,abs(c3), abs(c4), abs(c5) from ct4")
+ self.check_result_auto(f"select c1, c2, c3 , c4, c5 from {dbname}.ct4 ",
+ f"select (c1), abs(c2) ,abs(c3), abs(c4), abs(c5) from {dbname}.ct4")
self.check_result_auto(
- "select abs(abs(abs(abs(abs(abs(abs(abs(abs(abs(c1)))))))))) nest_col_func from ct4;", "select c1 from ct4")
+ f"select abs(abs(abs(abs(abs(abs(abs(abs(abs(abs(c1)))))))))) nest_col_func from {dbname}.ct4;", f"select c1 from {dbname}.ct4")
# used for not exists table
- tdSql.error("select abs(c1) from stbbb1")
- tdSql.error("select abs(c1) from tbname")
- tdSql.error("select abs(c1) from ct5")
+ tdSql.error(f"select abs(c1) from {dbname}.stbbb1")
+ tdSql.error(f"select abs(c1) from {dbname}.tbname")
+ tdSql.error(f"select abs(c1) from {dbname}.ct5")
# mix with common col
- tdSql.query("select c1, abs(c1) from ct1")
+ tdSql.query(f"select c1, abs(c1) from {dbname}.ct1")
tdSql.checkData(0, 0, 8)
tdSql.checkData(0, 1, 8)
tdSql.checkData(4, 0, 0)
tdSql.checkData(4, 1, 0)
- tdSql.query("select c1, abs(c1) from ct4")
+ tdSql.query(f"select c1, abs(c1) from {dbname}.ct4")
tdSql.checkData(0, 0, None)
tdSql.checkData(0, 1, None)
tdSql.checkData(4, 0, 5)
tdSql.checkData(4, 1, 5)
tdSql.checkData(5, 0, None)
tdSql.checkData(5, 1, None)
- tdSql.query("select c1, abs(c1) from ct4 ")
+ tdSql.query(f"select c1, abs(c1) from {dbname}.ct4 ")
tdSql.checkData(0, 0, None)
tdSql.checkData(0, 1, None)
tdSql.checkData(4, 0, 5)
tdSql.checkData(4, 1, 5)
# mix with common functions
- tdSql.query("select c1, abs(c1),c5, floor(c5) from ct4 ")
+ tdSql.query(f"select c1, abs(c1),c5, floor(c5) from {dbname}.ct4 ")
tdSql.checkData(0, 0, None)
tdSql.checkData(0, 1, None)
tdSql.checkData(0, 2, None)
@@ -362,33 +367,33 @@ class TDTestCase:
tdSql.checkData(3, 2, 6.66000)
tdSql.checkData(3, 3, 6.00000)
- tdSql.query("select c1, abs(c1),c5, floor(c5) from stb1 ")
+ tdSql.query(f"select c1, abs(c1),c5, floor(c5) from {dbname}.stb1 ")
# mix with agg functions , not support
- tdSql.error("select c1, abs(c1),c5, count(c5) from stb1 ")
- tdSql.error("select c1, abs(c1),c5, count(c5) from ct1 ")
- tdSql.error("select abs(c1), count(c5) from stb1 ")
- tdSql.error("select abs(c1), count(c5) from ct1 ")
- tdSql.error("select c1, count(c5) from ct1 ")
- tdSql.error("select c1, count(c5) from stb1 ")
+ tdSql.error(f"select c1, abs(c1),c5, count(c5) from {dbname}.stb1 ")
+ tdSql.error(f"select c1, abs(c1),c5, count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select abs(c1), count(c5) from {dbname}.stb1 ")
+ tdSql.error(f"select abs(c1), count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select c1, count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select c1, count(c5) from {dbname}.stb1 ")
# agg functions mix with agg functions
- tdSql.query("select max(c5), count(c5) from stb1")
- tdSql.query("select max(c5), count(c5) from ct1")
+ tdSql.query(f"select max(c5), count(c5) from {dbname}.stb1")
+ tdSql.query(f"select max(c5), count(c5) from {dbname}.ct1")
# bug fix for count
- tdSql.query("select count(c1) from ct4 ")
+ tdSql.query(f"select count(c1) from {dbname}.ct4 ")
tdSql.checkData(0, 0, 9)
- tdSql.query("select count(*) from ct4 ")
+ tdSql.query(f"select count(*) from {dbname}.ct4 ")
tdSql.checkData(0, 0, 12)
- tdSql.query("select count(c1) from stb1 ")
+ tdSql.query(f"select count(c1) from {dbname}.stb1 ")
tdSql.checkData(0, 0, 22)
- tdSql.query("select count(*) from stb1 ")
+ tdSql.query(f"select count(*) from {dbname}.stb1 ")
tdSql.checkData(0, 0, 25)
# bug fix for compute
- tdSql.query("select c1, abs(c1) -0 ,ceil(c1)-0 from ct4 ")
+ tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1)-0 from {dbname}.ct4 ")
tdSql.checkData(0, 0, None)
tdSql.checkData(0, 1, None)
tdSql.checkData(0, 2, None)
@@ -396,7 +401,7 @@ class TDTestCase:
tdSql.checkData(1, 1, 8.000000000)
tdSql.checkData(1, 2, 8.000000000)
- tdSql.query(" select c1, abs(c1) -0 ,ceil(c1-0.1)-0.1 from ct4")
+ tdSql.query(f" select c1, abs(c1) -0 ,ceil(c1-0.1)-0.1 from {dbname}.ct4")
tdSql.checkData(0, 0, None)
tdSql.checkData(0, 1, None)
tdSql.checkData(0, 2, None)
@@ -404,10 +409,10 @@ class TDTestCase:
tdSql.checkData(1, 1, 8.000000000)
tdSql.checkData(1, 2, 7.900000000)
- def abs_func_filter(self):
- tdSql.execute("use db")
+ def abs_func_filter(self, dbname="db"):
+ tdSql.execute(f"use {dbname}")
tdSql.query(
- "select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from ct4 where c1>5 ")
+ f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from {dbname}.ct4 where c1>5 ")
tdSql.checkRows(3)
tdSql.checkData(0, 0, 8)
tdSql.checkData(0, 1, 8.000000000)
@@ -416,7 +421,7 @@ class TDTestCase:
tdSql.checkData(0, 4, 3.000000000)
tdSql.query(
- "select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from ct4 where c1=5 ")
+ f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from {dbname}.ct4 where c1=5 ")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 5)
tdSql.checkData(0, 1, 5.000000000)
@@ -425,7 +430,7 @@ class TDTestCase:
tdSql.checkData(0, 4, 2.000000000)
tdSql.query(
- "select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from ct4 where c1=5 ")
+ f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from {dbname}.ct4 where c1=5 ")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 5)
tdSql.checkData(0, 1, 5.000000000)
@@ -434,7 +439,7 @@ class TDTestCase:
tdSql.checkData(0, 4, 2.000000000)
tdSql.query(
- "select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from ct4 where c1>log(c1,2) limit 1 ")
+ f"select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from {dbname}.ct4 where c1>log(c1,2) limit 1 ")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 8)
tdSql.checkData(0, 1, 88888)
@@ -448,130 +453,138 @@ class TDTestCase:
def check_boundary_values(self):
- tdSql.execute("drop database if exists bound_test")
- tdSql.execute("create database if not exists bound_test")
+ dbname = "bound_test"
+
+ tdSql.execute(f"drop database if exists {dbname}")
+ tdSql.execute(f"create database if not exists {dbname}")
time.sleep(3)
- tdSql.execute("use bound_test")
+ tdSql.execute(f"use {dbname}")
tdSql.execute(
- "create table stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);"
+ f"create table {dbname}.stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);"
)
- tdSql.execute(f'create table sub1_bound using stb_bound tags ( 1 )')
+ tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
tdSql.execute(
- f"insert into sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.error(
- f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
- self.check_result_auto("select c1, c2, c3 , c4, c5 ,c6 from sub1_bound ",
- "select abs(c1), abs(c2) ,abs(c3), abs(c4), abs(c5) ,abs(c6) from sub1_bound")
- self.check_result_auto("select c1, c2, c3 , c3, c2 ,c1 from sub1_bound ",
- "select abs(c1), abs(c2) ,abs(c3), abs(c3), abs(c2) ,abs(c1) from sub1_bound")
+ self.check_result_auto(f"select c1, c2, c3 , c4, c5 ,c6 from {dbname}.sub1_bound ",
+ f"select abs(c1), abs(c2) ,abs(c3), abs(c4), abs(c5) ,abs(c6) from {dbname}.sub1_bound")
+ self.check_result_auto(f"select c1, c2, c3 , c3, c2 ,c1 from {dbname}.sub1_bound ",
+ f"select abs(c1), abs(c2) ,abs(c3), abs(c3), abs(c2) ,abs(c1) from {dbname}.sub1_bound")
self.check_result_auto(
- "select abs(abs(abs(abs(abs(abs(abs(abs(abs(abs(c1)))))))))) nest_col_func from sub1_bound;", "select abs(c1) from sub1_bound")
+ f"select abs(abs(abs(abs(abs(abs(abs(abs(abs(abs(c1)))))))))) nest_col_func from {dbname}.sub1_bound;", f"select abs(c1) from {dbname}.sub1_bound")
# check basic elem for table per row
tdSql.query(
- "select abs(c1) ,abs(c2) , abs(c3) , abs(c4), abs(c5), abs(c6) from sub1_bound ")
+ f"select abs(c1) ,abs(c2) , abs(c3) , abs(c4), abs(c5), abs(c6) from {dbname}.sub1_bound ")
tdSql.checkData(0, 0, 2147483647)
tdSql.checkData(0, 1, 9223372036854775807)
tdSql.checkData(0, 2, 32767)
tdSql.checkData(0, 3, 127)
- tdSql.checkData(0, 4, 339999995214436424907732413799364296704.00000)
- tdSql.checkData(0, 5, 169999999999999993883079578865998174333346074304075874502773119193537729178160565864330091787584707988572262467983188919169916105593357174268369962062473635296474636515660464935663040684957844303524367815028553272712298986386310828644513212353921123253311675499856875650512437415429217994623324794855339589632.000000000)
+ # tdSql.checkData(0, 4, 339999995214436424907732413799364296704.00000)
+ tdSql.checkData(0, 4, 3.4E+38)
+ # tdSql.checkData(0, 5, 169999999999999993883079578865998174333346074304075874502773119193537729178160565864330091787584707988572262467983188919169916105593357174268369962062473635296474636515660464935663040684957844303524367815028553272712298986386310828644513212353921123253311675499856875650512437415429217994623324794855339589632.000000000)
+ tdSql.checkData(0, 5, 1.7E+308)
tdSql.checkData(1, 0, 2147483647)
tdSql.checkData(1, 1, 9223372036854775807)
tdSql.checkData(1, 2, 32767)
tdSql.checkData(1, 3, 127)
- tdSql.checkData(1, 4, 339999995214436424907732413799364296704.00000)
- tdSql.checkData(1, 5, 169999999999999993883079578865998174333346074304075874502773119193537729178160565864330091787584707988572262467983188919169916105593357174268369962062473635296474636515660464935663040684957844303524367815028553272712298986386310828644513212353921123253311675499856875650512437415429217994623324794855339589632.000000000)
+ # tdSql.checkData(1, 4, 339999995214436424907732413799364296704.00000)
+ tdSql.checkData(1, 4, 3.4E+38)
+ # tdSql.checkData(1, 5, 169999999999999993883079578865998174333346074304075874502773119193537729178160565864330091787584707988572262467983188919169916105593357174268369962062473635296474636515660464935663040684957844303524367815028553272712298986386310828644513212353921123253311675499856875650512437415429217994623324794855339589632.000000000)
+ tdSql.checkData(1, 5, 1.7E+308)
tdSql.checkData(3, 0, 2147483646)
tdSql.checkData(3, 1, 9223372036854775806)
tdSql.checkData(3, 2, 32766)
tdSql.checkData(3, 3, 126)
- tdSql.checkData(3, 4, 339999995214436424907732413799364296704.00000)
- tdSql.checkData(3, 5, 169999999999999993883079578865998174333346074304075874502773119193537729178160565864330091787584707988572262467983188919169916105593357174268369962062473635296474636515660464935663040684957844303524367815028553272712298986386310828644513212353921123253311675499856875650512437415429217994623324794855339589632.000000000)
+ # tdSql.checkData(3, 4, 339999995214436424907732413799364296704.00000)
+ tdSql.checkData(3, 4, 3.4E+38)
+ # tdSql.checkData(3, 5, 169999999999999993883079578865998174333346074304075874502773119193537729178160565864330091787584707988572262467983188919169916105593357174268369962062473635296474636515660464935663040684957844303524367815028553272712298986386310828644513212353921123253311675499856875650512437415429217994623324794855339589632.000000000)
+ tdSql.checkData(3, 5, 1.7E+308)
# check + - * / in functions
tdSql.query(
- "select abs(c1+1) ,abs(c2) , abs(c3*1) , abs(c4/2), abs(c5)/2, abs(c6) from sub1_bound ")
+ f"select abs(c1+1) ,abs(c2) , abs(c3*1) , abs(c4/2), abs(c5)/2, abs(c6) from {dbname}.sub1_bound ")
tdSql.checkData(0, 0, 2147483648.000000000)
tdSql.checkData(0, 1, 9223372036854775807)
tdSql.checkData(0, 2, 32767.000000000)
tdSql.checkData(0, 3, 63.500000000)
- tdSql.checkData(
- 0, 4, 169999997607218212453866206899682148352.000000000)
+ tdSql.checkData(0, 4, 169999997607218212453866206899682148352.000000000)
tdSql.checkData(0, 5, 169999999999999993883079578865998174333346074304075874502773119193537729178160565864330091787584707988572262467983188919169916105593357174268369962062473635296474636515660464935663040684957844303524367815028553272712298986386310828644513212353921123253311675499856875650512437415429217994623324794855339589632.000000000)
tdSql.checkData(1, 0, 2147483646.000000000)
tdSql.checkData(1, 1, 9223372036854775808.000000000)
tdSql.checkData(1, 2, 32767.000000000)
tdSql.checkData(1, 3, 63.500000000)
- tdSql.checkData(
- 1, 4, 169999997607218212453866206899682148352.000000000)
+ tdSql.checkData(1, 4, 169999997607218212453866206899682148352.000000000)
- self.check_result_auto("select c1+1 ,c2 , c3*1 , c4/2, c5/2, c6 from sub1_bound",
- "select abs(c1+1) ,abs(c2) , abs(c3*1) , abs(c4/2), abs(c5)/2, abs(c6) from sub1_bound ")
+ self.check_result_auto(f"select c1+1 ,c2 , c3*1 , c4/2, c5/2, c6 from {dbname}.sub1_bound",
+ f"select abs(c1+1) ,abs(c2) , abs(c3*1) , abs(c4/2), abs(c5)/2, abs(c6) from {dbname}.sub1_bound ")
def test_tag_compute_for_scalar_function(self):
+ dbname = "testdb"
- tdSql.execute("use testdb")
+ tdSql.execute(f"use {dbname}")
- self.check_result_auto("select c1, t2, t3 , t4, t5 from ct4 ",
- "select (c1), abs(t2) ,abs(t3), abs(t4), abs(t5) from ct4")
- self.check_result_auto("select c1+2, t2+2, t3 , t4, t5 from ct4 ",
- "select (c1)+2, abs(t2)+2 ,abs(t3), abs(t4), abs(t5) from ct4")
- self.check_result_auto("select c1+2, t2+2, t3 , t4, t5 from stb1 order by t1 ",
- "select (c1)+2, abs(t2)+2 ,abs(t3), abs(t4), abs(t5) from stb1 order by t1")
+ self.check_result_auto(f"select c1, t2, t3 , t4, t5 from {dbname}.ct4 ",
+ f"select (c1), abs(t2) ,abs(t3), abs(t4), abs(t5) from {dbname}.ct4")
+ self.check_result_auto(f"select c1+2, t2+2, t3 , t4, t5 from {dbname}.ct4 ",
+ f"select (c1)+2, abs(t2)+2 ,abs(t3), abs(t4), abs(t5) from {dbname}.ct4")
+ self.check_result_auto(f"select c1+2, t2+2, t3 , t4, t5 from {dbname}.stb1 order by t1 ",
+ f"select (c1)+2, abs(t2)+2 ,abs(t3), abs(t4), abs(t5) from {dbname}.stb1 order by t1")
# bug need fix
# tdSql.query(" select sum(c1) from stb1 where t1+10 >1; ") # taosd crash
- tdSql.query("select c1 ,t1 from stb1 where t1 =0 ")
+ tdSql.query(f"select c1 ,t1 from {dbname}.stb1 where t1 =0 ")
tdSql.checkRows(13)
- tdSql.query("select t1 from stb1 where t1 >0 ")
+ tdSql.query(f"select t1 from {dbname}.stb1 where t1 >0 ")
tdSql.checkRows(12)
- tdSql.query("select t1 from stb1 where t1 =3 ")
+ tdSql.query(f"select t1 from {dbname}.stb1 where t1 =3 ")
tdSql.checkRows(12)
- # tdSql.query("select sum(t1) from (select c1 ,t1 from stb1)")
+ # tdSql.query(f"select sum(t1) from (select c1 ,t1 from {dbname}.stb1)")
# tdSql.checkData(0,0,61)
- # tdSql.query("select distinct(c1) ,t1 from stb1")
+ # tdSql.query(f"select distinct(c1) ,t1 from {dbname}.stb1")
# tdSql.checkRows(20)
- tdSql.query("select max(t2) , t1 ,c1, t2 from stb1")
+ tdSql.query(f"select max(t2) , t1 ,c1, t2 from {dbname}.stb1")
tdSql.checkData(0,3,33333)
# tag filter with abs function
- tdSql.query("select t1 from stb1 where abs(t1)=1")
+ tdSql.query(f"select t1 from {dbname}.stb1 where abs(t1)=1")
tdSql.checkRows(0)
- tdSql.query("select t1 from stb1 where abs(c1+t1)=1")
+ tdSql.query(f"select t1 from {dbname}.stb1 where abs(c1+t1)=1")
tdSql.checkRows(1)
tdSql.checkData(0,0,0)
-
- tdSql.query("select abs(c1) from (select ts , c1 ,t1 from stb1)")
+
+ tdSql.query(f"select abs(c1) from (select ts , c1 ,t1 from {dbname}.stb1)")
tdSql.checkRows(25)
tdSql.query(
- "select abs(c1+t1)*t1 from stb1 where abs(c1)/floor(abs(ceil(t1))) ==1")
+ f"select abs(c1+t1)*t1 from {dbname}.stb1 where abs(c1)/floor(abs(ceil(t1))) ==1")
def support_super_table_test(self):
- tdSql.execute(" use testdb ")
- self.check_result_auto( " select c1 from stb1 order by ts " , "select abs(c1) from stb1 order by ts" )
- self.check_result_auto( " select c1 from stb1 order by tbname " , "select abs(c1) from stb1 order by tbname" )
- self.check_result_auto( " select c1 from stb1 where c1 > 0 order by tbname " , "select abs(c1) from stb1 where c1 > 0 order by tbname" )
- self.check_result_auto( " select c1 from stb1 where c1 > 0 order by tbname " , "select abs(c1) from stb1 where c1 > 0 order by tbname" )
+ dbname = "testdb"
+ tdSql.execute(f" use {dbname} ")
+ self.check_result_auto( f" select c1 from {dbname}.stb1 order by ts " , f"select abs(c1) from {dbname}.stb1 order by ts" )
+ self.check_result_auto( f" select c1 from {dbname}.stb1 order by tbname " , f"select abs(c1) from {dbname}.stb1 order by tbname" )
+ self.check_result_auto( f" select c1 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select abs(c1) from {dbname}.stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto( f" select c1 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select abs(c1) from {dbname}.stb1 where c1 > 0 order by tbname" )
- self.check_result_auto( " select t1,c1 from stb1 order by ts " , "select t1, abs(c1) from stb1 order by ts" )
- self.check_result_auto( " select t2,c1 from stb1 order by tbname " , "select t2 ,abs(c1) from stb1 order by tbname" )
- self.check_result_auto( " select t3,c1 from stb1 where c1 > 0 order by tbname " , "select t3 ,abs(c1) from stb1 where c1 > 0 order by tbname" )
- self.check_result_auto( " select t4,c1 from stb1 where c1 > 0 order by tbname " , "select t4 , abs(c1) from stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto( f" select t1,c1 from {dbname}.stb1 order by ts " , f"select t1, abs(c1) from {dbname}.stb1 order by ts" )
+ self.check_result_auto( f" select t2,c1 from {dbname}.stb1 order by tbname " , f"select t2 ,abs(c1) from {dbname}.stb1 order by tbname" )
+ self.check_result_auto( f" select t3,c1 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select t3 ,abs(c1) from {dbname}.stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto( f" select t4,c1 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select t4 , abs(c1) from {dbname}.stb1 where c1 > 0 order by tbname" )
pass
diff --git a/tests/system-test/2-query/and_or_for_byte.py b/tests/system-test/2-query/and_or_for_byte.py
index 62951e571f..7d156da379 100644
--- a/tests/system-test/2-query/and_or_for_byte.py
+++ b/tests/system-test/2-query/and_or_for_byte.py
@@ -10,28 +10,31 @@ import random
class TDTestCase:
- updatecfgDict = {'debugFlag': 143, "cDebugFlag": 143, "uDebugFlag": 143, "rpcDebugFlag": 143, "tmrDebugFlag": 143,
- "jniDebugFlag": 143, "simDebugFlag": 143, "dDebugFlag": 143, "dDebugFlag": 143, "vDebugFlag": 143, "mDebugFlag": 143, "qDebugFlag": 143,
- "wDebugFlag": 143, "sDebugFlag": 143, "tsdbDebugFlag": 143, "tqDebugFlag": 143, "fsDebugFlag": 143, "udfDebugFlag": 143}
+ # updatecfgDict = {'debugFlag': 143, "cDebugFlag": 143, "uDebugFlag": 143, "rpcDebugFlag": 143, "tmrDebugFlag": 143,
+ # "jniDebugFlag": 143, "simDebugFlag": 143, "dDebugFlag": 143, "dDebugFlag": 143, "vDebugFlag": 143, "mDebugFlag": 143, "qDebugFlag": 143,
+ # "wDebugFlag": 143, "sDebugFlag": 143, "tsdbDebugFlag": 143, "tqDebugFlag": 143, "fsDebugFlag": 143, "udfDebugFlag": 143}
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
- tdSql.init(conn.cursor(), True)
+ tdSql.init(conn.cursor(), False)
self.tb_nums = 10
self.row_nums = 20
self.ts = 1434938400000
self.time_step = 1000
def insert_datas_and_check_abs(self ,tbnums , rownums , time_step ):
+ dbname = "test"
+ stb = f"{dbname}.stb"
+ ctb_pre = f"{dbname}.sub_tb_"
tdLog.info(" prepare datas for auto check abs function ")
- tdSql.execute(" create database test ")
- tdSql.execute(" use test ")
- tdSql.execute(" create stable stb (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint,\
+ tdSql.execute(f" create database {dbname} ")
+ tdSql.execute(f" use {dbname} ")
+ tdSql.execute(f" create stable {stb} (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint,\
c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) tags (t1 int)")
for tbnum in range(tbnums):
- tbname = "sub_tb_%d"%tbnum
- tdSql.execute(" create table %s using stb tags(%d) "%(tbname , tbnum))
+ tbname = f"{ctb_pre}{tbnum}"
+ tdSql.execute(f" create table {tbname} using {stb} tags({tbnum}) ")
ts = self.ts
for row in range(rownums):
@@ -49,7 +52,7 @@ class TDTestCase:
tdSql.execute(f" insert into {tbname} values ({ts},{c1},{c2},{c3},{c4},{c5},{c6},{c7},{c8},{c9},{c10})")
tdSql.execute("use test")
- tbnames = ["stb", "sub_tb_1"]
+ tbnames = [stb, f"{ctb_pre}1"]
support_types = ["BIGINT", "SMALLINT", "TINYINT", "FLOAT", "DOUBLE", "INT"]
for tbname in tbnames:
tdSql.query("desc {}".format(tbname))
@@ -64,48 +67,48 @@ class TDTestCase:
self.check_function("|",False,tbname,cols[0],cols[1],cols[2])
- def prepare_datas(self):
+ def prepare_datas(self, dbname="db"):
tdSql.execute(
- '''create table stb1
+ f'''create table {dbname}.stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
)
tdSql.execute(
- '''
- create table t1
+ f'''
+ create table {dbname}.t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
for i in range(9):
tdSql.execute(
- f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
- f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
- "insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
+ f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
tdSql.execute(
- "insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute(
- "insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
+ f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute(
- "insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute(
- "insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
- "insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
- "insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
@@ -121,53 +124,53 @@ class TDTestCase:
'''
)
- def prepare_tag_datas(self):
+ def prepare_tag_datas(self, dbname="testdb"):
# prepare datas
tdSql.execute(
- "create database if not exists testdb keep 3650 duration 1000")
- tdSql.execute(" use testdb ")
+ f"create database if not exists {dbname} keep 3650 duration 1000")
+ tdSql.execute(f" use {dbname} ")
tdSql.execute(
- '''create table stb1
+ f'''create table {dbname}.stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t0 timestamp, t1 int, t2 bigint, t3 smallint, t4 tinyint, t5 float, t6 double, t7 bool, t8 binary(16),t9 nchar(32))
'''
)
tdSql.execute(
- '''
- create table t1
+ f'''
+ create table {dbname}.t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(4):
tdSql.execute(
- f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
+ f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
for i in range(9):
tdSql.execute(
- f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
- f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
- "insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
+ f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
tdSql.execute(
- "insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute(
- "insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
+ f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute(
- "insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute(
- "insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
- "insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
- "insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
@@ -270,88 +273,88 @@ class TDTestCase:
for ind , result in enumerate(compute_result):
tdSql.checkData(ind,0,result)
- def test_errors(self):
- tdSql.execute("use testdb")
+ def test_errors(self, dbname="testdb"):
+ tdSql.execute(f"use {dbname}")
error_sql_lists = [
- "select c1&&c2 from t1",
- "select c1&|c2 from t1",
- "select c1&(c1=c2) from t1",
- "select c1&* from t1",
- "select 123&, from t1",
- "select 123&\" from t1",
- "select c1&- from t1;",
- "select c1&&= from t1)",
- "select c1&! from t1",
- "select c1&@ from stb1",
- "select c1 from stb1",
- "select c1&$ from stb1",
- "select c1&% from stb1",
- "select c1&() from stb1",
+ f"select c1&&c2 from {dbname}.t1",
+ f"select c1&|c2 from {dbname}.t1",
+ f"select c1&(c1=c2) from {dbname}.t1",
+ f"select c1&* from {dbname}.t1",
+ f"select 123&, from {dbname}.t1",
+ f"select 123&\" from {dbname}.t1",
+ f"select c1&- from {dbname}.t1;",
+ f"select c1&&= from {dbname}.t1)",
+ f"select c1&! from {dbname}.t1",
+ f"select c1&@ from {dbname}.stb1",
+ f"select c1 from {dbname}.stb1",
+ f"select c1&$ from {dbname}.stb1",
+ f"select c1&% from {dbname}.stb1",
+ f"select c1&() from {dbname}.stb1",
]
for error_sql in error_sql_lists:
tdSql.error(error_sql)
- def basic_query(self):
+ def basic_query(self, dbname="testdb"):
# basic query
- tdSql.query("select c1&c2|c3 from ct1")
+ tdSql.query(f"select c1&c2|c3 from {dbname}.ct1")
tdSql.checkRows(13)
- tdSql.query("select c1 ,c2&c3, c1&c2&c3 from t1")
+ tdSql.query(f"select c1 ,c2&c3, c1&c2&c3 from {dbname}.t1")
tdSql.checkRows(12)
- tdSql.query("select c1 ,c1&c1&c1|c1 from stb1")
+ tdSql.query(f"select c1 ,c1&c1&c1|c1 from {dbname}.stb1")
tdSql.checkRows(25)
# used for empty table , ct3 is empty
- tdSql.query("select abs(c1)&c2&c3 from ct3")
+ tdSql.query(f"select abs(c1)&c2&c3 from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select abs(c2&c1&c3) from ct3")
+ tdSql.query(f"select abs(c2&c1&c3) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select abs(c3)+c1&c3+c2 from ct3")
+ tdSql.query(f"select abs(c3)+c1&c3+c2 from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select abs(c1)&c2&c3 from ct4")
+ tdSql.query(f"select abs(c1)&c2&c3 from {dbname}.ct4")
tdSql.checkRows(12)
tdSql.checkData(0,0,None)
tdSql.checkData(1,0,8)
tdSql.checkData(10,0,0)
- tdSql.query("select abs(c2&c1&c3) from ct4")
+ tdSql.query(f"select abs(c2&c1&c3) from {dbname}.ct4")
tdSql.checkRows(12)
tdSql.checkData(0,0,None)
tdSql.checkData(1,0,8)
tdSql.checkData(10,0,0)
- tdSql.query("select (abs(c3)+c1)&(c3+c2) from ct4")
+ tdSql.query(f"select (abs(c3)+c1)&(c3+c2) from {dbname}.ct4")
tdSql.checkRows(12)
tdSql.checkData(0,0,None)
tdSql.checkData(1,0,640)
tdSql.checkData(10,0,0)
# used for regular table
- tdSql.query("select abs(c1)&c3&c3 from t1")
+ tdSql.query(f"select abs(c1)&c3&c3 from {dbname}.t1")
tdSql.checkData(0, 0, None)
tdSql.checkData(1, 0, 1)
tdSql.checkData(3, 0, 1)
tdSql.checkData(5, 0, None)
- tdSql.query("select abs(c1)&c2|ceil(c3)&c4|floor(c5) from t1")
+ tdSql.query(f"select abs(c1)&c2|ceil(c3)&c4|floor(c5) from {dbname}.t1")
tdSql.checkData(1, 0, 11)
tdSql.checkData(3, 0, 3)
tdSql.checkData(5, 0, None)
- tdSql.query("select ts,c1, c2, c3&c4|c5 from t1")
+ tdSql.query(f"select ts,c1, c2, c3&c4|c5 from {dbname}.t1")
tdSql.checkData(1, 3, 11)
tdSql.checkData(3, 3, 3)
tdSql.checkData(5, 3, None)
- self.check_function("&",False,"stb1","c1","ceil(c2)","abs(c3)","c4+1")
- self.check_function("|",False,"stb1","c1","ceil(c2)","abs(c3)","c4+1")
- self.check_function("&",False,"stb1","c1+c2","ceil(c2)","abs(c3+c2)","c4+1")
- self.check_function("&",False,"ct4","123","ceil(c2)","abs(c3+c2)","c4+1")
- self.check_function("&",False,"ct4","123","ceil(t1)","abs(c3+c2)","c4+1")
- self.check_function("&",False,"ct4","t1+c1","-ceil(t1)","abs(c3+c2)","c4+1")
- self.check_function("&",False,"stb1","c1","floor(t1)","abs(c1+c2)","t1+1")
- self.check_function("&",True,"stb1","max(c1)","min(floor(t1))","sum(abs(c1+c2))","last(t1)+1")
- self.check_function("&",False,"stb1","abs(abs(abs(abs(abs(abs(abs(abs(abs(abs(c1))))))))))","floor(t1)","abs(c1+c2)","t1+1")
+ self.check_function("&",False,f"{dbname}.stb1","c1","ceil(c2)","abs(c3)","c4+1")
+ self.check_function("|",False,f"{dbname}.stb1","c1","ceil(c2)","abs(c3)","c4+1")
+ self.check_function("&",False,f"{dbname}.stb1","c1+c2","ceil(c2)","abs(c3+c2)","c4+1")
+ self.check_function("&",False,f"{dbname}.ct4","123","ceil(c2)","abs(c3+c2)","c4+1")
+ self.check_function("&",False,f"{dbname}.ct4","123","ceil(t1)","abs(c3+c2)","c4+1")
+ self.check_function("&",False,f"{dbname}.ct4","t1+c1","-ceil(t1)","abs(c3+c2)","c4+1")
+ self.check_function("&",False,f"{dbname}.stb1","c1","floor(t1)","abs(c1+c2)","t1+1")
+ self.check_function("&",True,f"{dbname}.stb1","max(c1)","min(floor(t1))","sum(abs(c1+c2))","last(t1)+1")
+ self.check_function("&",False,f"{dbname}.stb1","abs(abs(abs(abs(abs(abs(abs(abs(abs(abs(c1))))))))))","floor(t1)","abs(c1+c2)","t1+1")
# mix with common col
- tdSql.query("select c1&abs(c1)&c2&c3 ,c1,c2, t1 from ct1")
+ tdSql.query(f"select c1&abs(c1)&c2&c3 ,c1,c2, t1 from {dbname}.ct1")
tdSql.checkData(0, 0, 8)
tdSql.checkData(1, 0, 1)
tdSql.checkData(4, 0, 0)
@@ -360,7 +363,7 @@ class TDTestCase:
# mix with common functions
- tdSql.query(" select c1&abs(c1)&c2&c3, abs(c1), c5, floor(c5) from ct4 ")
+ tdSql.query(f" select c1&abs(c1)&c2&c3, abs(c1), c5, floor(c5) from {dbname}.ct4 ")
tdSql.checkData(0, 0, None)
tdSql.checkData(0, 1, None)
tdSql.checkData(0, 2, None)
@@ -371,28 +374,28 @@ class TDTestCase:
tdSql.checkData(3, 2, 6.66000)
tdSql.checkData(3, 3, 6.00000)
- tdSql.query("select c1&abs(c1)&c2&c3, abs(c1),c5, floor(c5) from stb1 order by ts ")
+ tdSql.query(f"select c1&abs(c1)&c2&c3, abs(c1),c5, floor(c5) from {dbname}.stb1 order by ts ")
tdSql.checkData(3, 0, 2)
tdSql.checkData(3, 1, 6)
tdSql.checkData(3, 2, 6.66000)
tdSql.checkData(3, 3, 6.00000)
# mix with agg functions , not support
- tdSql.error("select c1&abs(c1)&c2&c3, abs(c1),c5, count(c5) from stb1 ")
- tdSql.error("select c1&abs(c1)&c2&c3, abs(c1),c5, count(c5) from ct1 ")
- tdSql.error("select c1&abs(c1)&c2&c3, count(c5) from stb1 ")
- tdSql.error("select c1&abs(c1)&c2&c3, count(c5) from ct1 ")
- tdSql.error("select c1&abs(c1)&c2&c3, count(c5) from ct1 ")
- tdSql.error("select c1&abs(c1)&c2&c3, count(c5) from stb1 ")
+ tdSql.error(f"select c1&abs(c1)&c2&c3, abs(c1),c5, count(c5) from {dbname}.stb1 ")
+ tdSql.error(f"select c1&abs(c1)&c2&c3, abs(c1),c5, count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select c1&abs(c1)&c2&c3, count(c5) from {dbname}.stb1 ")
+ tdSql.error(f"select c1&abs(c1)&c2&c3, count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select c1&abs(c1)&c2&c3, count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select c1&abs(c1)&c2&c3, count(c5) from {dbname}.stb1 ")
# agg functions mix with agg functions
- tdSql.query("select sum(c1&abs(c1)&c2&c3) ,max(c5), count(c5) from stb1")
+ tdSql.query(f"select sum(c1&abs(c1)&c2&c3) ,max(c5), count(c5) from {dbname}.stb1")
- tdSql.query("select max(c1)&max(c2)|first(ts), count(c5) from ct1")
+ tdSql.query(f"select max(c1)&max(c2)|first(ts), count(c5) from {dbname}.ct1")
# bug fix for compute
- tdSql.query("select c1&abs(c1)&c2&c3, abs(c1&abs(c1)&c2&c3) -0 ,ceil(c1&abs(c1)&c2&c3)-0 from ct4 ")
+ tdSql.query(f"select c1&abs(c1)&c2&c3, abs(c1&abs(c1)&c2&c3) -0 ,ceil(c1&abs(c1)&c2&c3)-0 from {dbname}.ct4 ")
tdSql.checkData(0, 0, None)
tdSql.checkData(0, 1, None)
tdSql.checkData(0, 2, None)
@@ -400,7 +403,7 @@ class TDTestCase:
tdSql.checkData(1, 1, 8.000000000)
tdSql.checkData(1, 2, 8.000000000)
- tdSql.query(" select c1&c2|c3, abs(c1&c2|c3) -0 ,ceil(c1&c2|c3-0.1)-0.1 from ct4")
+ tdSql.query(f" select c1&c2|c3, abs(c1&c2|c3) -0 ,ceil(c1&c2|c3-0.1)-0.1 from {dbname}.ct4")
tdSql.checkData(0, 0, None)
tdSql.checkData(0, 1, None)
tdSql.checkData(0, 2, None)
@@ -411,38 +414,38 @@ class TDTestCase:
- def check_boundary_values(self):
+ def check_boundary_values(self, dbname="bound_test"):
- tdSql.execute("drop database if exists bound_test")
- tdSql.execute("create database if not exists bound_test")
+ tdSql.execute(f"drop database if exists {dbname}")
+ tdSql.execute(f"create database if not exists {dbname}")
time.sleep(3)
- tdSql.execute("use bound_test")
+ tdSql.execute(f"use {dbname}")
tdSql.execute(
- "create table stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);"
+ f"create table {dbname}.stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);"
)
- tdSql.execute(f'create table sub1_bound using stb_bound tags ( 1 )')
+ tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
tdSql.execute(
- f"insert into sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.error(
- f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
- self.check_function("&", False , "sub1_bound" ,"c1","c2","c3","c4","c5","c6" )
- self.check_function("&", False ,"sub1_bound","abs(c1)","abs(c2)","abs(c3)","abs(c4)","abs(c5)","abs(c6)" )
- self.check_function("&", False ,"stb_bound","123","abs(c2)","t1","abs(c4)","abs(c5)","abs(c6)" )
+ self.check_function("&", False , f"{dbname}.sub1_bound" ,"c1","c2","c3","c4","c5","c6" )
+ self.check_function("&", False , f"{dbname}.sub1_bound","abs(c1)","abs(c2)","abs(c3)","abs(c4)","abs(c5)","abs(c6)" )
+ self.check_function("&", False , f"{dbname}.stb_bound","123","abs(c2)","t1","abs(c4)","abs(c5)","abs(c6)" )
# check basic elem for table per row
tdSql.query(
- "select abs(c1) ,abs(c2) , abs(c3) , abs(c4), abs(c5), abs(c6) from sub1_bound ")
+ f"select abs(c1) ,abs(c2) , abs(c3) , abs(c4), abs(c5), abs(c6) from {dbname}.sub1_bound ")
tdSql.checkData(0, 0, 2147483647)
tdSql.checkData(0, 1, 9223372036854775807)
tdSql.checkData(0, 2, 32767)
@@ -463,10 +466,10 @@ class TDTestCase:
tdSql.checkData(3, 5, 169999999999999993883079578865998174333346074304075874502773119193537729178160565864330091787584707988572262467983188919169916105593357174268369962062473635296474636515660464935663040684957844303524367815028553272712298986386310828644513212353921123253311675499856875650512437415429217994623324794855339589632.000000000)
# check + - * / in functions
- self.check_function("&", False ,"stb_bound","abs(c1+1)","abs(c2)","t1","abs(c3*1)","abs(c5)/2","abs(c6)" )
+ self.check_function("&", False , f"{dbname}.stb_bound","abs(c1+1)","abs(c2)","t1","abs(c3*1)","abs(c5)/2","abs(c6)" )
tdSql.query(
- "select abs(c1+1) ,abs(c2) , abs(c3*1) , abs(c4/2), abs(c5)/2, abs(c6) from sub1_bound ")
+ f"select abs(c1+1) ,abs(c2) , abs(c3*1) , abs(c4/2), abs(c5)/2, abs(c6) from {dbname}.sub1_bound ")
tdSql.checkData(0, 0, 2147483648.000000000)
tdSql.checkData(0, 1, 9223372036854775807)
tdSql.checkData(0, 2, 32767.000000000)
@@ -483,44 +486,44 @@ class TDTestCase:
1, 4, 169999997607218212453866206899682148352.000000000)
- def test_tag_compute_for_scalar_function(self):
+ def test_tag_compute_for_scalar_function(self, dbname="testdb"):
- tdSql.execute("use testdb")
+ tdSql.execute(f"use {dbname}")
- self.check_function("&", False ,"ct4","123","abs(c1)","t1","abs(t2)","abs(t3)","abs(t4)","t5")
- self.check_function("&", False ,"ct4","c1+2","abs(t2+2)","t3","abs(t4)","abs(t5)","abs(c1)","t5")
+ self.check_function("&", False , f"{dbname}.ct4","123","abs(c1)","t1","abs(t2)","abs(t3)","abs(t4)","t5")
+ self.check_function("&", False , f"{dbname}.ct4","c1+2","abs(t2+2)","t3","abs(t4)","abs(t5)","abs(c1)","t5")
- tdSql.query(" select sum(c1) from stb1 where t1+10 >1; ")
- tdSql.query("select c1 ,t1 from stb1 where t1 =0 ")
+ tdSql.query(f" select sum(c1) from {dbname}.stb1 where t1+10 >1; ")
+ tdSql.query(f"select c1 ,t1 from {dbname}.stb1 where t1 =0 ")
tdSql.checkRows(13)
- self.check_function("&", False ,"t1","c1+2","abs(c2)")
- tdSql.query("select t1 from stb1 where t1 >0 ")
+ self.check_function("&", False , f"{dbname}.t1","c1+2","abs(c2)")
+ tdSql.query(f"select t1 from {dbname}.stb1 where t1 >0 ")
tdSql.checkRows(12)
- tdSql.query("select t1 from stb1 where t1 =3 ")
+ tdSql.query(f"select t1 from {dbname}.stb1 where t1 =3 ")
tdSql.checkRows(12)
# tdSql.query("select sum(t1) from (select c1 ,t1 from stb1)")
# tdSql.checkData(0,0,61)
# tdSql.query("select distinct(c1) ,t1 from stb1")
# tdSql.checkRows(20)
- tdSql.query("select max(c1) , t1&c2&t2 from stb1;")
+ tdSql.query(f"select max(c1) , t1&c2&t2 from {dbname}.stb1;")
tdSql.checkData(0,1,0)
# tag filter with abs function
- tdSql.query("select t1 from stb1 where abs(t1)=1")
+ tdSql.query(f"select t1 from {dbname}.stb1 where abs(t1)=1")
tdSql.checkRows(0)
- tdSql.query("select t1 from stb1 where abs(c1+t1)=1")
+ tdSql.query(f"select t1 from {dbname}.stb1 where abs(c1+t1)=1")
tdSql.checkRows(1)
tdSql.checkData(0,0,0)
tdSql.query(
- "select abs(c1+t1)*t1 from stb1 where abs(c1)/floor(abs(ceil(t1))) ==1")
+ f"select abs(c1+t1)*t1 from {dbname}.stb1 where abs(c1)/floor(abs(ceil(t1))) ==1")
- def support_super_table_test(self):
- tdSql.execute(" use testdb ")
- self.check_function("|", False , "stb1" , "c1","c2","c3","c4" )
- self.check_function("|", False , "stb1" , "c1","c2","abs(c3)","c4","ceil(t1)" )
- self.check_function("&", False , "stb1" , "c1","c2","abs(c3)","floor(c4)","ceil(t1)" )
- self.check_function("&", True , "stb1" , "max(c1)","max(c2)","sum(abs(c3))","max(floor(c4))","min(ceil(t1))" )
+ def support_super_table_test(self, dbname="testdb"):
+ tdSql.execute(f" use {dbname} ")
+ self.check_function("|", False , f"{dbname}.stb1" , "c1","c2","c3","c4" )
+ self.check_function("|", False , f"{dbname}.stb1" , "c1","c2","abs(c3)","c4","ceil(t1)" )
+ self.check_function("&", False , f"{dbname}.stb1" , "c1","c2","abs(c3)","floor(c4)","ceil(t1)" )
+ self.check_function("&", True , f"{dbname}.stb1" , "max(c1)","max(c2)","sum(abs(c3))","max(floor(c4))","min(ceil(t1))" )
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
diff --git a/tests/system-test/2-query/apercentile.py b/tests/system-test/2-query/apercentile.py
index 6e4b4eeb8a..128a03937a 100644
--- a/tests/system-test/2-query/apercentile.py
+++ b/tests/system-test/2-query/apercentile.py
@@ -20,12 +20,13 @@ from util.sqlset import TDSetSql
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor(),logSql)
+ tdSql.init(conn.cursor(),False)
self.rowNum = 10
self.ts = 1537146000000
self.setsql = TDSetSql()
- self.ntbname = 'ntb'
- self.stbname = 'stb'
+ self.dbname = "db"
+ self.ntbname = f"{self.dbname}.ntb"
+ self.stbname = f'{self.dbname}.stb'
self.binary_length = 20 # the length of binary for column_dict
self.nchar_length = 20 # the length of nchar for column_dict
self.column_dict = {
diff --git a/tests/system-test/2-query/arccos.py b/tests/system-test/2-query/arccos.py
index d5656d9104..1787521517 100644
--- a/tests/system-test/2-query/arccos.py
+++ b/tests/system-test/2-query/arccos.py
@@ -9,49 +9,48 @@ from util.cases import *
class TDTestCase:
- updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
- "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
- "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143}
+ # updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
+ # "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
+ # "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143}
def init(self, conn, powSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
- self.PI =3.1415926
- def prepare_datas(self):
+ def prepare_datas(self, dbname="db"):
tdSql.execute(
- '''create table stb1
+ f'''create table {dbname}.stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
)
tdSql.execute(
- '''
- create table t1
+ f'''
+ create table {dbname}.t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
for i in range(9):
tdSql.execute(
- f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
- f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
- tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
- tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
@@ -102,68 +101,68 @@ class TDTestCase:
else:
tdLog.info("acos value check pass , it work as expected ,sql is \"%s\" "%pow_query )
- def test_errors(self):
+ def test_errors(self, dbname="db"):
error_sql_lists = [
- "select acos from t1",
- # "select acos(-+--+c1 ) from t1",
- # "select +-acos(c1) from t1",
- # "select ++-acos(c1) from t1",
- # "select ++--acos(c1) from t1",
- # "select - -acos(c1)*0 from t1",
- # "select acos(tbname+1) from t1 ",
- "select acos(123--123)==1 from t1",
- "select acos(c1) as 'd1' from t1",
- "select acos(c1 ,c2) from t1",
- "select acos(c1 ,NULL ) from t1",
- "select acos(,) from t1;",
- "select acos(acos(c1) ab from t1)",
- "select acos(c1 ) as int from t1",
- "select acos from stb1",
- # "select acos(-+--+c1) from stb1",
- # "select +-acos(c1) from stb1",
- # "select ++-acos(c1) from stb1",
- # "select ++--acos(c1) from stb1",
- # "select - -acos(c1)*0 from stb1",
- # "select acos(tbname+1) from stb1 ",
- "select acos(123--123)==1 from stb1",
- "select acos(c1) as 'd1' from stb1",
- "select acos(c1 ,c2 ) from stb1",
- "select acos(c1 ,NULL) from stb1",
- "select acos(,) from stb1;",
- "select acos(acos(c1) ab from stb1)",
- "select acos(c1) as int from stb1"
+ f"select acos from {dbname}.t1",
+ # f"select acos(-+--+c1 ) from {dbname}.t1",
+ # f"select +-acos(c1) from {dbname}.t1",
+ # f"select ++-acos(c1) from {dbname}.t1",
+ # f"select ++--acos(c1) from {dbname}.t1",
+ # f"select - -acos(c1)*0 from {dbname}.t1",
+ # f"select acos(tbname+1) from {dbname}.t1 ",
+ f"select acos(123--123)==1 from {dbname}.t1",
+ f"select acos(c1) as 'd1' from {dbname}.t1",
+ f"select acos(c1 ,c2) from {dbname}.t1",
+ f"select acos(c1 ,NULL ) from {dbname}.t1",
+ f"select acos(,) from {dbname}.t1;",
+ f"select acos(acos(c1) ab from {dbname}.t1)",
+ f"select acos(c1 ) as int from {dbname}.t1",
+ f"select acos from {dbname}.stb1",
+ # f"select acos(-+--+c1) from {dbname}.stb1",
+ # f"select +-acos(c1) from {dbname}.stb1",
+ # f"select ++-acos(c1) from {dbname}.stb1",
+ # f"select ++--acos(c1) from {dbname}.stb1",
+ # f"select - -acos(c1)*0 from {dbname}.stb1",
+ # f"select acos(tbname+1) from {dbname}.stb1 ",
+ f"select acos(123--123)==1 from {dbname}.stb1",
+ f"select acos(c1) as 'd1' from {dbname}.stb1",
+ f"select acos(c1 ,c2 ) from {dbname}.stb1",
+ f"select acos(c1 ,NULL) from {dbname}.stb1",
+ f"select acos(,) from {dbname}.stb1;",
+ f"select acos(acos(c1) ab from {dbname}.stb1)",
+ f"select acos(c1) as int from {dbname}.stb1"
]
for error_sql in error_sql_lists:
tdSql.error(error_sql)
- def support_types(self):
+ def support_types(self, dbname="db"):
type_error_sql_lists = [
- "select acos(ts) from t1" ,
- "select acos(c7) from t1",
- "select acos(c8) from t1",
- "select acos(c9) from t1",
- "select acos(ts) from ct1" ,
- "select acos(c7) from ct1",
- "select acos(c8) from ct1",
- "select acos(c9) from ct1",
- "select acos(ts) from ct3" ,
- "select acos(c7) from ct3",
- "select acos(c8) from ct3",
- "select acos(c9) from ct3",
- "select acos(ts) from ct4" ,
- "select acos(c7) from ct4",
- "select acos(c8) from ct4",
- "select acos(c9) from ct4",
- "select acos(ts) from stb1" ,
- "select acos(c7) from stb1",
- "select acos(c8) from stb1",
- "select acos(c9) from stb1" ,
+ f"select acos(ts) from {dbname}.t1" ,
+ f"select acos(c7) from {dbname}.t1",
+ f"select acos(c8) from {dbname}.t1",
+ f"select acos(c9) from {dbname}.t1",
+ f"select acos(ts) from {dbname}.ct1" ,
+ f"select acos(c7) from {dbname}.ct1",
+ f"select acos(c8) from {dbname}.ct1",
+ f"select acos(c9) from {dbname}.ct1",
+ f"select acos(ts) from {dbname}.ct3" ,
+ f"select acos(c7) from {dbname}.ct3",
+ f"select acos(c8) from {dbname}.ct3",
+ f"select acos(c9) from {dbname}.ct3",
+ f"select acos(ts) from {dbname}.ct4" ,
+ f"select acos(c7) from {dbname}.ct4",
+ f"select acos(c8) from {dbname}.ct4",
+ f"select acos(c9) from {dbname}.ct4",
+ f"select acos(ts) from {dbname}.stb1" ,
+ f"select acos(c7) from {dbname}.stb1",
+ f"select acos(c8) from {dbname}.stb1",
+ f"select acos(c9) from {dbname}.stb1" ,
- "select acos(ts) from stbbb1" ,
- "select acos(c7) from stbbb1",
+ f"select acos(ts) from {dbname}.stbbb1" ,
+ f"select acos(c7) from {dbname}.stbbb1",
- "select acos(ts) from tbname",
- "select acos(c9) from tbname"
+ f"select acos(ts) from {dbname}.tbname",
+ f"select acos(c9) from {dbname}.tbname"
]
@@ -172,103 +171,103 @@ class TDTestCase:
type_sql_lists = [
- "select acos(c1) from t1",
- "select acos(c2) from t1",
- "select acos(c3) from t1",
- "select acos(c4) from t1",
- "select acos(c5) from t1",
- "select acos(c6) from t1",
+ f"select acos(c1) from {dbname}.t1",
+ f"select acos(c2) from {dbname}.t1",
+ f"select acos(c3) from {dbname}.t1",
+ f"select acos(c4) from {dbname}.t1",
+ f"select acos(c5) from {dbname}.t1",
+ f"select acos(c6) from {dbname}.t1",
- "select acos(c1) from ct1",
- "select acos(c2) from ct1",
- "select acos(c3) from ct1",
- "select acos(c4) from ct1",
- "select acos(c5) from ct1",
- "select acos(c6) from ct1",
+ f"select acos(c1) from {dbname}.ct1",
+ f"select acos(c2) from {dbname}.ct1",
+ f"select acos(c3) from {dbname}.ct1",
+ f"select acos(c4) from {dbname}.ct1",
+ f"select acos(c5) from {dbname}.ct1",
+ f"select acos(c6) from {dbname}.ct1",
- "select acos(c1) from ct3",
- "select acos(c2) from ct3",
- "select acos(c3) from ct3",
- "select acos(c4) from ct3",
- "select acos(c5) from ct3",
- "select acos(c6) from ct3",
+ f"select acos(c1) from {dbname}.ct3",
+ f"select acos(c2) from {dbname}.ct3",
+ f"select acos(c3) from {dbname}.ct3",
+ f"select acos(c4) from {dbname}.ct3",
+ f"select acos(c5) from {dbname}.ct3",
+ f"select acos(c6) from {dbname}.ct3",
- "select acos(c1) from stb1",
- "select acos(c2) from stb1",
- "select acos(c3) from stb1",
- "select acos(c4) from stb1",
- "select acos(c5) from stb1",
- "select acos(c6) from stb1",
+ f"select acos(c1) from {dbname}.stb1",
+ f"select acos(c2) from {dbname}.stb1",
+ f"select acos(c3) from {dbname}.stb1",
+ f"select acos(c4) from {dbname}.stb1",
+ f"select acos(c5) from {dbname}.stb1",
+ f"select acos(c6) from {dbname}.stb1",
- "select acos(c6) as alisb from stb1",
- "select acos(c6) alisb from stb1",
+ f"select acos(c6) as alisb from {dbname}.stb1",
+ f"select acos(c6) alisb from {dbname}.stb1",
]
for type_sql in type_sql_lists:
tdSql.query(type_sql)
- def basic_acos_function(self):
+ def basic_acos_function(self, dbname="db"):
# basic query
- tdSql.query("select c1 from ct3")
+ tdSql.query(f"select c1 from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select c1 from t1")
+ tdSql.query(f"select c1 from {dbname}.t1")
tdSql.checkRows(12)
- tdSql.query("select c1 from stb1")
+ tdSql.query(f"select c1 from {dbname}.stb1")
tdSql.checkRows(25)
# used for empty table , ct3 is empty
- tdSql.query("select acos(c1) from ct3")
+ tdSql.query(f"select acos(c1) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select acos(c2) from ct3")
+ tdSql.query(f"select acos(c2) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select acos(c3) from ct3")
+ tdSql.query(f"select acos(c3) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select acos(c4) from ct3")
+ tdSql.query(f"select acos(c4) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select acos(c5) from ct3")
+ tdSql.query(f"select acos(c5) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select acos(c6) from ct3")
+ tdSql.query(f"select acos(c6) from {dbname}.ct3")
tdSql.checkRows(0)
# # used for regular table
- tdSql.query("select acos(c1) from t1")
+ tdSql.query(f"select acos(c1) from {dbname}.t1")
tdSql.checkData(0, 0, None)
tdSql.checkData(1 , 0, 0.000000000)
tdSql.checkData(3 , 0, None)
tdSql.checkData(5 , 0, None)
- tdSql.query("select c1, c2, c3 , c4, c5 from t1")
+ tdSql.query(f"select c1, c2, c3 , c4, c5 from {dbname}.t1")
tdSql.checkData(1, 4, 1.11000)
tdSql.checkData(3, 3, 33)
tdSql.checkData(5, 4, None)
- tdSql.query("select ts,c1, c2, c3 , c4, c5 from t1")
+ tdSql.query(f"select ts,c1, c2, c3 , c4, c5 from {dbname}.t1")
tdSql.checkData(1, 5, 1.11000)
tdSql.checkData(3, 4, 33)
tdSql.checkData(5, 5, None)
- self.check_result_auto_acos( "select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from t1", "select acos(abs(c1)), acos(abs(c2)) ,acos(abs(c3)), acos(abs(c4)), acos(abs(c5)) from t1")
+ self.check_result_auto_acos( f"select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from {dbname}.t1", f"select acos(abs(c1)), acos(abs(c2)) ,acos(abs(c3)), acos(abs(c4)), acos(abs(c5)) from {dbname}.t1")
# used for sub table
- tdSql.query("select c2 ,acos(c2) from ct1")
+ tdSql.query(f"select c2 ,acos(c2) from {dbname}.ct1")
tdSql.checkData(0, 1, None)
tdSql.checkData(1 , 1, None)
tdSql.checkData(3 , 1, None)
tdSql.checkData(4 , 1, 1.570796327)
- tdSql.query("select c1, c5 ,acos(c5) from ct4")
+ tdSql.query(f"select c1, c5 ,acos(c5) from {dbname}.ct4")
tdSql.checkData(0 , 2, None)
tdSql.checkData(1 , 2, None)
tdSql.checkData(2 , 2, None)
tdSql.checkData(3 , 2, None)
tdSql.checkData(5 , 2, None)
- self.check_result_auto_acos( "select c1, c2, c3 , c4, c5 from ct1", "select acos(c1), acos(c2) ,acos(c3), acos(c4), acos(c5) from ct1")
+ self.check_result_auto_acos( f"select c1, c2, c3 , c4, c5 from {dbname}.ct1", f"select acos(c1), acos(c2) ,acos(c3), acos(c4), acos(c5) from {dbname}.ct1")
# nest query for acos functions
- tdSql.query("select c4 , acos(c4) ,acos(acos(c4)) , acos(acos(acos(c4))) from ct1;")
+ tdSql.query(f"select c4 , acos(c4) ,acos(acos(c4)) , acos(acos(acos(c4))) from {dbname}.ct1;")
tdSql.checkData(0 , 0 , 88)
tdSql.checkData(0 , 1 , None)
tdSql.checkData(0 , 2 , None)
@@ -286,22 +285,22 @@ class TDTestCase:
# used for stable table
- tdSql.query("select acos(c1) from stb1")
+ tdSql.query(f"select acos(c1) from {dbname}.stb1")
tdSql.checkRows(25)
# used for not exists table
- tdSql.error("select acos(c1) from stbbb1")
- tdSql.error("select acos(c1) from tbname")
- tdSql.error("select acos(c1) from ct5")
+ tdSql.error(f"select acos(c1) from {dbname}.stbbb1")
+ tdSql.error(f"select acos(c1) from {dbname}.tbname")
+ tdSql.error(f"select acos(c1) from {dbname}.ct5")
# mix with common col
- tdSql.query("select c1, acos(c1) from ct1")
- tdSql.query("select c2, acos(c2) from ct4")
+ tdSql.query(f"select c1, acos(c1) from {dbname}.ct1")
+ tdSql.query(f"select c2, acos(c2) from {dbname}.ct4")
# mix with common functions
- tdSql.query("select c1, acos(c1),acos(c1), acos(acos(c1)) from ct4 ")
+ tdSql.query(f"select c1, acos(c1),acos(c1), acos(acos(c1)) from {dbname}.ct4 ")
tdSql.checkData(0 , 0 ,None)
tdSql.checkData(0 , 1 ,None)
tdSql.checkData(0 , 2 ,None)
@@ -312,24 +311,24 @@ class TDTestCase:
tdSql.checkData(3 , 2 ,None)
tdSql.checkData(3 , 3 ,None)
- tdSql.query("select c1, acos(c1),c5, floor(c5) from stb1 ")
+ tdSql.query(f"select c1, acos(c1),c5, floor(c5) from {dbname}.stb1 ")
# # mix with agg functions , not support
- tdSql.error("select c1, acos(c1),c5, count(c5) from stb1 ")
- tdSql.error("select c1, acos(c1),c5, count(c5) from ct1 ")
- tdSql.error("select acos(c1), count(c5) from stb1 ")
- tdSql.error("select acos(c1), count(c5) from ct1 ")
- tdSql.error("select c1, count(c5) from ct1 ")
- tdSql.error("select c1, count(c5) from stb1 ")
+ tdSql.error(f"select c1, acos(c1),c5, count(c5) from {dbname}.stb1 ")
+ tdSql.error(f"select c1, acos(c1),c5, count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select acos(c1), count(c5) from {dbname}.stb1 ")
+ tdSql.error(f"select acos(c1), count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select c1, count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select c1, count(c5) from {dbname}.stb1 ")
# agg functions mix with agg functions
- tdSql.query("select max(c5), count(c5) from stb1")
- tdSql.query("select max(c5), count(c5) from ct1")
+ tdSql.query(f"select max(c5), count(c5) from {dbname}.stb1")
+ tdSql.query(f"select max(c5), count(c5) from {dbname}.ct1")
# # bug fix for compute
- tdSql.query("select c1, acos(c1) -0 ,acos(c1-4)-0 from ct4 ")
+ tdSql.query(f"select c1, acos(c1) -0 ,acos(c1-4)-0 from {dbname}.ct4 ")
tdSql.checkData(0, 0, None)
tdSql.checkData(0, 1, None)
tdSql.checkData(0, 2, None)
@@ -337,7 +336,7 @@ class TDTestCase:
tdSql.checkData(1, 1, None)
tdSql.checkData(1, 2, None)
- tdSql.query(" select c1, acos(c1) -0 ,acos(c1-0.1)-0.1 from ct4")
+ tdSql.query(f" select c1, acos(c1) -0 ,acos(c1-0.1)-0.1 from {dbname}.ct4")
tdSql.checkData(0, 0, None)
tdSql.checkData(0, 1, None)
tdSql.checkData(0, 2, None)
@@ -345,35 +344,35 @@ class TDTestCase:
tdSql.checkData(1, 1, None)
tdSql.checkData(1, 2, None)
- tdSql.query("select c1, acos(c1), c2, acos(c2), c3, acos(c3) from ct1")
+ tdSql.query(f"select c1, acos(c1), c2, acos(c2), c3, acos(c3) from {dbname}.ct1")
- def test_big_number(self):
+ def test_big_number(self, dbname="db"):
- tdSql.query("select c1, acos(100000000) from ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, acos(100000000) from {dbname}.ct1") # bigint to double data overflow
tdSql.checkData(4, 1, None)
- tdSql.query("select c1, acos(10000000000000) from ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, acos(10000000000000) from {dbname}.ct1") # bigint to double data overflow
tdSql.checkData(4, 1, None)
- tdSql.query("select c1, acos(10000000000000000000000000) from ct1") # bigint to double data overflow
- tdSql.query("select c1, acos(10000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value
+ tdSql.query(f"select c1, acos(10000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, acos(10000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value
tdSql.checkData(1, 1, None)
- tdSql.query("select c1, acos(10000000000000000000000000000000000) from ct1") # bigint to double data overflow
- tdSql.query("select c1, acos(10000000000000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value
+ tdSql.query(f"select c1, acos(10000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, acos(10000000000000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value
tdSql.checkData(4, 1, None)
- tdSql.query("select c1, acos(10000000000000000000000000000000000000000) from ct1") # bigint to double data overflow
- tdSql.query("select c1, acos(10000000000000000000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value
+ tdSql.query(f"select c1, acos(10000000000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, acos(10000000000000000000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value
tdSql.checkData(4, 1, None)
- tdSql.query("select c1, acos(10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000) from ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, acos(10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
- def abs_func_filter(self):
- tdSql.execute("use db")
- tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(acos(c1)-0.5) from ct4 where c1>5 ")
+ def abs_func_filter(self, dbname="db"):
+ tdSql.execute(f"use {dbname}")
+ tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(acos(c1)-0.5) from {dbname}.ct4 where c1>5 ")
tdSql.checkRows(3)
tdSql.checkData(0,0,8)
tdSql.checkData(0,1,8.000000000)
@@ -381,7 +380,7 @@ class TDTestCase:
tdSql.checkData(0,3,7.900000000)
tdSql.checkData(0,4,None)
- tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(acos(c1)-0.5) from ct4 where c1=5 ")
+ tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(acos(c1)-0.5) from {dbname}.ct4 where c1=5 ")
tdSql.checkRows(1)
tdSql.checkData(0,0,5)
tdSql.checkData(0,1,5.000000000)
@@ -389,7 +388,7 @@ class TDTestCase:
tdSql.checkData(0,3,4.900000000)
tdSql.checkData(0,4,None)
- tdSql.query("select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(acos(c1)-0.5) from ct4 where c1 0 order by tbname " , "select acos(c5) from stb1 where c1 > 0 order by tbname" )
- self.check_result_auto_acos( " select c5 from stb1 where c1 > 0 order by tbname " , "select acos(c5) from stb1 where c1 > 0 order by tbname" )
+ def support_super_table_test(self, dbname="db"):
+ tdSql.execute(f" use {dbname} ")
+ self.check_result_auto_acos( f" select c5 from {dbname}.stb1 order by ts " , f"select acos(c5) from {dbname}.stb1 order by ts" )
+ self.check_result_auto_acos( f" select c5 from {dbname}.stb1 order by tbname " , f"select acos(c5) from {dbname}.stb1 order by tbname" )
+ self.check_result_auto_acos( f" select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select acos(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto_acos( f" select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select acos(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
- self.check_result_auto_acos( " select t1,c5 from stb1 order by ts " , "select acos(t1), acos(c5) from stb1 order by ts" )
- self.check_result_auto_acos( " select t1,c5 from stb1 order by tbname " , "select acos(t1) ,acos(c5) from stb1 order by tbname" )
- self.check_result_auto_acos( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select acos(t1) ,acos(c5) from stb1 where c1 > 0 order by tbname" )
- self.check_result_auto_acos( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select acos(t1) , acos(c5) from stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto_acos( f" select t1,c5 from {dbname}.stb1 order by ts " , f"select acos(t1), acos(c5) from {dbname}.stb1 order by ts" )
+ self.check_result_auto_acos( f" select t1,c5 from {dbname}.stb1 order by tbname " , f"select acos(t1) ,acos(c5) from {dbname}.stb1 order by tbname" )
+ self.check_result_auto_acos( f" select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select acos(t1) ,acos(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto_acos( f" select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select acos(t1) , acos(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
pass
@@ -526,9 +525,9 @@ class TDTestCase:
self.abs_func_filter()
- tdLog.printNoPrefix("==========step7: acos filter query ============")
+ # tdLog.printNoPrefix("==========step7: acos filter query ============")
- self.abs_func_filter()
+ # self.abs_func_filter()
tdLog.printNoPrefix("==========step8: check acos result of stable query ============")
diff --git a/tests/system-test/2-query/arcsin.py b/tests/system-test/2-query/arcsin.py
index 31185ffcaa..127419029b 100644
--- a/tests/system-test/2-query/arcsin.py
+++ b/tests/system-test/2-query/arcsin.py
@@ -9,49 +9,48 @@ from util.cases import *
class TDTestCase:
- updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
- "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
- "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143}
+ # updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
+ # "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
+ # "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143}
def init(self, conn, powSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
- self.PI =3.1415926
- def prepare_datas(self):
+ def prepare_datas(self, dbname="db"):
tdSql.execute(
- '''create table stb1
+ f'''create table {dbname}.stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
)
tdSql.execute(
- '''
- create table t1
+ f'''
+ create table {dbname}.t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
for i in range(9):
tdSql.execute(
- f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
- f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
- tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
- tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
@@ -102,68 +101,68 @@ class TDTestCase:
else:
tdLog.info("asin value check pass , it work as expected ,sql is \"%s\" "%pow_query )
- def test_errors(self):
+ def test_errors(self, dbname="db"):
error_sql_lists = [
- "select asin from t1",
- # "select asin(-+--+c1 ) from t1",
- # "select +-asin(c1) from t1",
- # "select ++-asin(c1) from t1",
- # "select ++--asin(c1) from t1",
- # "select - -asin(c1)*0 from t1",
- # "select asin(tbname+1) from t1 ",
- "select asin(123--123)==1 from t1",
- "select asin(c1) as 'd1' from t1",
- "select asin(c1 ,c2) from t1",
- "select asin(c1 ,NULL ) from t1",
- "select asin(,) from t1;",
- "select asin(asin(c1) ab from t1)",
- "select asin(c1 ) as int from t1",
- "select asin from stb1",
- # "select asin(-+--+c1) from stb1",
- # "select +-asin(c1) from stb1",
- # "select ++-asin(c1) from stb1",
- # "select ++--asin(c1) from stb1",
- # "select - -asin(c1)*0 from stb1",
- # "select asin(tbname+1) from stb1 ",
- "select asin(123--123)==1 from stb1",
- "select asin(c1) as 'd1' from stb1",
- "select asin(c1 ,c2 ) from stb1",
- "select asin(c1 ,NULL) from stb1",
- "select asin(,) from stb1;",
- "select asin(asin(c1) ab from stb1)",
- "select asin(c1) as int from stb1"
+ f"select asin from {dbname}.t1",
+ # f"select asin(-+--+c1 ) from {dbname}.t1",
+ # f"select +-asin(c1) from {dbname}.t1",
+ # f"select ++-asin(c1) from {dbname}.t1",
+ # f"select ++--asin(c1) from {dbname}.t1",
+ # f"select - -asin(c1)*0 from {dbname}.t1",
+ # f"select asin(tbname+1) from {dbname}.t1 ",
+ f"select asin(123--123)==1 from {dbname}.t1",
+ f"select asin(c1) as 'd1' from {dbname}.t1",
+ f"select asin(c1 ,c2) from {dbname}.t1",
+ f"select asin(c1 ,NULL ) from {dbname}.t1",
+ f"select asin(,) from {dbname}.t1;",
+ f"select asin(asin(c1) ab from {dbname}.t1)",
+ f"select asin(c1 ) as int from {dbname}.t1",
+ f"select asin from {dbname}.stb1",
+ # f"select asin(-+--+c1) from {dbname}.stb1",
+ # f"select +-asin(c1) from {dbname}.stb1",
+ # f"select ++-asin(c1) from {dbname}.stb1",
+ # f"select ++--asin(c1) from {dbname}.stb1",
+ # f"select - -asin(c1)*0 from {dbname}.stb1",
+ # f"select asin(tbname+1) from {dbname}.stb1 ",
+ f"select asin(123--123)==1 from {dbname}.stb1",
+ f"select asin(c1) as 'd1' from {dbname}.stb1",
+ f"select asin(c1 ,c2 ) from {dbname}.stb1",
+ f"select asin(c1 ,NULL) from {dbname}.stb1",
+ f"select asin(,) from {dbname}.stb1;",
+ f"select asin(asin(c1) ab from {dbname}.stb1)",
+ f"select asin(c1) as int from {dbname}.stb1"
]
for error_sql in error_sql_lists:
tdSql.error(error_sql)
- def support_types(self):
+ def support_types(self, dbname="db"):
type_error_sql_lists = [
- "select asin(ts) from t1" ,
- "select asin(c7) from t1",
- "select asin(c8) from t1",
- "select asin(c9) from t1",
- "select asin(ts) from ct1" ,
- "select asin(c7) from ct1",
- "select asin(c8) from ct1",
- "select asin(c9) from ct1",
- "select asin(ts) from ct3" ,
- "select asin(c7) from ct3",
- "select asin(c8) from ct3",
- "select asin(c9) from ct3",
- "select asin(ts) from ct4" ,
- "select asin(c7) from ct4",
- "select asin(c8) from ct4",
- "select asin(c9) from ct4",
- "select asin(ts) from stb1" ,
- "select asin(c7) from stb1",
- "select asin(c8) from stb1",
- "select asin(c9) from stb1" ,
+ f"select asin(ts) from {dbname}.t1" ,
+ f"select asin(c7) from {dbname}.t1",
+ f"select asin(c8) from {dbname}.t1",
+ f"select asin(c9) from {dbname}.t1",
+ f"select asin(ts) from {dbname}.ct1" ,
+ f"select asin(c7) from {dbname}.ct1",
+ f"select asin(c8) from {dbname}.ct1",
+ f"select asin(c9) from {dbname}.ct1",
+ f"select asin(ts) from {dbname}.ct3" ,
+ f"select asin(c7) from {dbname}.ct3",
+ f"select asin(c8) from {dbname}.ct3",
+ f"select asin(c9) from {dbname}.ct3",
+ f"select asin(ts) from {dbname}.ct4" ,
+ f"select asin(c7) from {dbname}.ct4",
+ f"select asin(c8) from {dbname}.ct4",
+ f"select asin(c9) from {dbname}.ct4",
+ f"select asin(ts) from {dbname}.stb1" ,
+ f"select asin(c7) from {dbname}.stb1",
+ f"select asin(c8) from {dbname}.stb1",
+ f"select asin(c9) from {dbname}.stb1" ,
- "select asin(ts) from stbbb1" ,
- "select asin(c7) from stbbb1",
+ f"select asin(ts) from {dbname}.stbbb1" ,
+ f"select asin(c7) from {dbname}.stbbb1",
- "select asin(ts) from tbname",
- "select asin(c9) from tbname"
+ f"select asin(ts) from {dbname}.tbname",
+ f"select asin(c9) from {dbname}.tbname"
]
@@ -172,103 +171,103 @@ class TDTestCase:
type_sql_lists = [
- "select asin(c1) from t1",
- "select asin(c2) from t1",
- "select asin(c3) from t1",
- "select asin(c4) from t1",
- "select asin(c5) from t1",
- "select asin(c6) from t1",
+ f"select asin(c1) from {dbname}.t1",
+ f"select asin(c2) from {dbname}.t1",
+ f"select asin(c3) from {dbname}.t1",
+ f"select asin(c4) from {dbname}.t1",
+ f"select asin(c5) from {dbname}.t1",
+ f"select asin(c6) from {dbname}.t1",
- "select asin(c1) from ct1",
- "select asin(c2) from ct1",
- "select asin(c3) from ct1",
- "select asin(c4) from ct1",
- "select asin(c5) from ct1",
- "select asin(c6) from ct1",
+ f"select asin(c1) from {dbname}.ct1",
+ f"select asin(c2) from {dbname}.ct1",
+ f"select asin(c3) from {dbname}.ct1",
+ f"select asin(c4) from {dbname}.ct1",
+ f"select asin(c5) from {dbname}.ct1",
+ f"select asin(c6) from {dbname}.ct1",
- "select asin(c1) from ct3",
- "select asin(c2) from ct3",
- "select asin(c3) from ct3",
- "select asin(c4) from ct3",
- "select asin(c5) from ct3",
- "select asin(c6) from ct3",
+ f"select asin(c1) from {dbname}.ct3",
+ f"select asin(c2) from {dbname}.ct3",
+ f"select asin(c3) from {dbname}.ct3",
+ f"select asin(c4) from {dbname}.ct3",
+ f"select asin(c5) from {dbname}.ct3",
+ f"select asin(c6) from {dbname}.ct3",
- "select asin(c1) from stb1",
- "select asin(c2) from stb1",
- "select asin(c3) from stb1",
- "select asin(c4) from stb1",
- "select asin(c5) from stb1",
- "select asin(c6) from stb1",
+ f"select asin(c1) from {dbname}.stb1",
+ f"select asin(c2) from {dbname}.stb1",
+ f"select asin(c3) from {dbname}.stb1",
+ f"select asin(c4) from {dbname}.stb1",
+ f"select asin(c5) from {dbname}.stb1",
+ f"select asin(c6) from {dbname}.stb1",
- "select asin(c6) as alisb from stb1",
- "select asin(c6) alisb from stb1",
+ f"select asin(c6) as alisb from {dbname}.stb1",
+ f"select asin(c6) alisb from {dbname}.stb1",
]
for type_sql in type_sql_lists:
tdSql.query(type_sql)
- def basic_asin_function(self):
+ def basic_asin_function(self, dbname="db"):
# basic query
- tdSql.query("select c1 from ct3")
+ tdSql.query(f"select c1 from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select c1 from t1")
+ tdSql.query(f"select c1 from {dbname}.t1")
tdSql.checkRows(12)
- tdSql.query("select c1 from stb1")
+ tdSql.query(f"select c1 from {dbname}.stb1")
tdSql.checkRows(25)
# used for empty table , ct3 is empty
- tdSql.query("select asin(c1) from ct3")
+ tdSql.query(f"select asin(c1) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select asin(c2) from ct3")
+ tdSql.query(f"select asin(c2) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select asin(c3) from ct3")
+ tdSql.query(f"select asin(c3) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select asin(c4) from ct3")
+ tdSql.query(f"select asin(c4) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select asin(c5) from ct3")
+ tdSql.query(f"select asin(c5) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select asin(c6) from ct3")
+ tdSql.query(f"select asin(c6) from {dbname}.ct3")
tdSql.checkRows(0)
# # used for regular table
- tdSql.query("select asin(c1) from t1")
+ tdSql.query(f"select asin(c1) from {dbname}.t1")
tdSql.checkData(0, 0, None)
tdSql.checkData(1 , 0, 1.570796327)
tdSql.checkData(3 , 0, None)
tdSql.checkData(5 , 0, None)
- tdSql.query("select c1, c2, c3 , c4, c5 from t1")
+ tdSql.query(f"select c1, c2, c3 , c4, c5 from {dbname}.t1")
tdSql.checkData(1, 4, 1.11000)
tdSql.checkData(3, 3, 33)
tdSql.checkData(5, 4, None)
- tdSql.query("select ts,c1, c2, c3 , c4, c5 from t1")
+ tdSql.query(f"select ts,c1, c2, c3 , c4, c5 from {dbname}.t1")
tdSql.checkData(1, 5, 1.11000)
tdSql.checkData(3, 4, 33)
tdSql.checkData(5, 5, None)
- self.check_result_auto_asin( "select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from t1", "select asin(abs(c1)), asin(abs(c2)) ,asin(abs(c3)), asin(abs(c4)), asin(abs(c5)) from t1")
+ self.check_result_auto_asin( f"select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from {dbname}.t1", f"select asin(abs(c1)), asin(abs(c2)) ,asin(abs(c3)), asin(abs(c4)), asin(abs(c5)) from {dbname}.t1")
# used for sub table
- tdSql.query("select c2 ,asin(c2) from ct1")
+ tdSql.query(f"select c2 ,asin(c2) from {dbname}.ct1")
tdSql.checkData(0, 1, None)
tdSql.checkData(1 , 1, None)
tdSql.checkData(3 , 1, None)
- tdSql.checkData(4 , 1, 0.000000000)
+ tdSql.checkData(4 , 1, 0)
- tdSql.query("select c1, c5 ,asin(c5) from ct4")
+ tdSql.query(f"select c1, c5 ,asin(c5) from {dbname}.ct4")
tdSql.checkData(0 , 2, None)
tdSql.checkData(1 , 2, None)
tdSql.checkData(2 , 2, None)
tdSql.checkData(3 , 2, None)
tdSql.checkData(5 , 2, None)
- self.check_result_auto_asin( "select c1, c2, c3 , c4, c5 from ct1", "select asin(c1), asin(c2) ,asin(c3), asin(c4), asin(c5) from ct1")
+ self.check_result_auto_asin( f"select c1, c2, c3 , c4, c5 from {dbname}.ct1", f"select asin(c1), asin(c2) ,asin(c3), asin(c4), asin(c5) from {dbname}.ct1")
# nest query for asin functions
- tdSql.query("select c4 , asin(c4) ,asin(asin(c4)) , asin(asin(asin(c4))) from ct1;")
+ tdSql.query(f"select c4 , asin(c4) ,asin(asin(c4)) , asin(asin(asin(c4))) from {dbname}.ct1;")
tdSql.checkData(0 , 0 , 88)
tdSql.checkData(0 , 1 , None)
tdSql.checkData(0 , 2 , None)
@@ -286,22 +285,22 @@ class TDTestCase:
# used for stable table
- tdSql.query("select asin(c1) from stb1")
+ tdSql.query(f"select asin(c1) from {dbname}.stb1")
tdSql.checkRows(25)
# used for not exists table
- tdSql.error("select asin(c1) from stbbb1")
- tdSql.error("select asin(c1) from tbname")
- tdSql.error("select asin(c1) from ct5")
+ tdSql.error(f"select asin(c1) from {dbname}.stbbb1")
+ tdSql.error(f"select asin(c1) from {dbname}.tbname")
+ tdSql.error(f"select asin(c1) from {dbname}.ct5")
# mix with common col
- tdSql.query("select c1, asin(c1) from ct1")
- tdSql.query("select c2, asin(c2) from ct4")
+ tdSql.query(f"select c1, asin(c1) from {dbname}.ct1")
+ tdSql.query(f"select c2, asin(c2) from {dbname}.ct4")
# mix with common functions
- tdSql.query("select c1, asin(c1),asin(c1), asin(asin(c1)) from ct4 ")
+ tdSql.query(f"select c1, asin(c1),asin(c1), asin(asin(c1)) from {dbname}.ct4 ")
tdSql.checkData(0 , 0 ,None)
tdSql.checkData(0 , 1 ,None)
tdSql.checkData(0 , 2 ,None)
@@ -312,24 +311,24 @@ class TDTestCase:
tdSql.checkData(3 , 2 ,None)
tdSql.checkData(3 , 3 ,None)
- tdSql.query("select c1, asin(c1),c5, floor(c5) from stb1 ")
+ tdSql.query(f"select c1, asin(c1),c5, floor(c5) from {dbname}.stb1 ")
# # mix with agg functions , not support
- tdSql.error("select c1, asin(c1),c5, count(c5) from stb1 ")
- tdSql.error("select c1, asin(c1),c5, count(c5) from ct1 ")
- tdSql.error("select asin(c1), count(c5) from stb1 ")
- tdSql.error("select asin(c1), count(c5) from ct1 ")
- tdSql.error("select c1, count(c5) from ct1 ")
- tdSql.error("select c1, count(c5) from stb1 ")
+ tdSql.error(f"select c1, asin(c1),c5, count(c5) from {dbname}.stb1 ")
+ tdSql.error(f"select c1, asin(c1),c5, count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select asin(c1), count(c5) from {dbname}.stb1 ")
+ tdSql.error(f"select asin(c1), count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select c1, count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select c1, count(c5) from {dbname}.stb1 ")
# agg functions mix with agg functions
- tdSql.query("select max(c5), count(c5) from stb1")
- tdSql.query("select max(c5), count(c5) from ct1")
+ tdSql.query(f"select max(c5), count(c5) from {dbname}.stb1")
+ tdSql.query(f"select max(c5), count(c5) from {dbname}.ct1")
# # bug fix for compute
- tdSql.query("select c1, asin(c1) -0 ,asin(c1-4)-0 from ct4 ")
+ tdSql.query(f"select c1, asin(c1) -0 ,asin(c1-4)-0 from {dbname}.ct4 ")
tdSql.checkData(0, 0, None)
tdSql.checkData(0, 1, None)
tdSql.checkData(0, 2, None)
@@ -337,7 +336,7 @@ class TDTestCase:
tdSql.checkData(1, 1, None)
tdSql.checkData(1, 2, None)
- tdSql.query(" select c1, asin(c1) -0 ,asin(c1-0.1)-0.1 from ct4")
+ tdSql.query(f" select c1, asin(c1) -0 ,asin(c1-0.1)-0.1 from {dbname}.ct4")
tdSql.checkData(0, 0, None)
tdSql.checkData(0, 1, None)
tdSql.checkData(0, 2, None)
@@ -345,35 +344,35 @@ class TDTestCase:
tdSql.checkData(1, 1, None)
tdSql.checkData(1, 2, None)
- tdSql.query("select c1, asin(c1), c2, asin(c2), c3, asin(c3) from ct1")
+ tdSql.query(f"select c1, asin(c1), c2, asin(c2), c3, asin(c3) from {dbname}.ct1")
- def test_big_number(self):
+ def test_big_number(self, dbname="db"):
- tdSql.query("select c1, asin(100000000) from ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, asin(100000000) from {dbname}.ct1") # bigint to double data overflow
tdSql.checkData(4, 1, None)
- tdSql.query("select c1, asin(10000000000000) from ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, asin(10000000000000) from {dbname}.ct1") # bigint to double data overflow
tdSql.checkData(4, 1, None)
- tdSql.query("select c1, asin(10000000000000000000000000) from ct1") # bigint to double data overflow
- tdSql.query("select c1, asin(10000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value
+ tdSql.query(f"select c1, asin(10000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, asin(10000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value
tdSql.checkData(1, 1, None)
- tdSql.query("select c1, asin(10000000000000000000000000000000000) from ct1") # bigint to double data overflow
- tdSql.query("select c1, asin(10000000000000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value
+ tdSql.query(f"select c1, asin(10000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, asin(10000000000000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value
tdSql.checkData(4, 1, None)
- tdSql.query("select c1, asin(10000000000000000000000000000000000000000) from ct1") # bigint to double data overflow
- tdSql.query("select c1, asin(10000000000000000000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value
+ tdSql.query(f"select c1, asin(10000000000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, asin(10000000000000000000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value
tdSql.checkData(4, 1, None)
- tdSql.query("select c1, asin(10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000) from ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, asin(10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
- def abs_func_filter(self):
- tdSql.execute("use db")
- tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(asin(c1)-0.5) from ct4 where c1>5 ")
+ def abs_func_filter(self, dbname="db"):
+ tdSql.execute(f"use {dbname}")
+ tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(asin(c1)-0.5) from {dbname}.ct4 where c1>5 ")
tdSql.checkRows(3)
tdSql.checkData(0,0,8)
tdSql.checkData(0,1,8.000000000)
@@ -381,7 +380,7 @@ class TDTestCase:
tdSql.checkData(0,3,7.900000000)
tdSql.checkData(0,4,None)
- tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(asin(c1)-0.5) from ct4 where c1=5 ")
+ tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(asin(c1)-0.5) from {dbname}.ct4 where c1=5 ")
tdSql.checkRows(1)
tdSql.checkData(0,0,5)
tdSql.checkData(0,1,5.000000000)
@@ -389,7 +388,7 @@ class TDTestCase:
tdSql.checkData(0,3,4.900000000)
tdSql.checkData(0,4,None)
- tdSql.query("select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(asin(c1)-0.5) from ct4 where c1 0 order by tbname " , "select asin(c5) from stb1 where c1 > 0 order by tbname" )
- self.check_result_auto_asin( " select c5 from stb1 where c1 > 0 order by tbname " , "select asin(c5) from stb1 where c1 > 0 order by tbname" )
+ def support_super_table_test(self, dbname="db"):
+ tdSql.execute(f" use {dbname} ")
+ self.check_result_auto_asin( f" select c5 from {dbname}.stb1 order by ts " , f"select asin(c5) from {dbname}.stb1 order by ts" )
+ self.check_result_auto_asin( f" select c5 from {dbname}.stb1 order by tbname " , f"select asin(c5) from {dbname}.stb1 order by tbname" )
+ self.check_result_auto_asin( f" select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select asin(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto_asin( f" select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select asin(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
- self.check_result_auto_asin( " select t1,c5 from stb1 order by ts " , "select asin(t1), asin(c5) from stb1 order by ts" )
- self.check_result_auto_asin( " select t1,c5 from stb1 order by tbname " , "select asin(t1) ,asin(c5) from stb1 order by tbname" )
- self.check_result_auto_asin( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select asin(t1) ,asin(c5) from stb1 where c1 > 0 order by tbname" )
- self.check_result_auto_asin( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select asin(t1) , asin(c5) from stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto_asin( f" select t1,c5 from {dbname}.stb1 order by ts " , f"select asin(t1), asin(c5) from {dbname}.stb1 order by ts" )
+ self.check_result_auto_asin( f" select t1,c5 from {dbname}.stb1 order by tbname " , f"select asin(t1) ,asin(c5) from {dbname}.stb1 order by tbname" )
+ self.check_result_auto_asin( f" select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select asin(t1) ,asin(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto_asin( f" select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select asin(t1) , asin(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
pass
diff --git a/tests/system-test/2-query/arctan.py b/tests/system-test/2-query/arctan.py
index 4c729bd521..e6ae16b8d9 100644
--- a/tests/system-test/2-query/arctan.py
+++ b/tests/system-test/2-query/arctan.py
@@ -9,48 +9,48 @@ from util.cases import *
class TDTestCase:
- updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
- "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
- "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143}
+ # updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
+ # "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
+ # "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143}
def init(self, conn, powSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
- def prepare_datas(self):
+ def prepare_datas(self, dbname="db"):
tdSql.execute(
- '''create table stb1
+ f'''create table {dbname}.stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
)
tdSql.execute(
- '''
- create table t1
+ f'''
+ create table {dbname}.t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
for i in range(9):
tdSql.execute(
- f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
- f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
- tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
- tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
@@ -87,7 +87,7 @@ class TDTestCase:
for row_index , row in enumerate(pow_result):
for col_index , elem in enumerate(row):
- if auto_result[row_index][col_index] == None and not (auto_result[row_index][col_index] == None and elem == None):
+ if auto_result[row_index][col_index] == None and elem:
check_status = False
elif auto_result[row_index][col_index] != None and (auto_result[row_index][col_index] - elem > 0.00000001):
check_status = False
@@ -99,68 +99,68 @@ class TDTestCase:
else:
tdLog.info("atan value check pass , it work as expected ,sql is \"%s\" "%pow_query )
- def test_errors(self):
+ def test_errors(self, dbname="db"):
error_sql_lists = [
- "select atan from t1",
- # "select atan(-+--+c1 ) from t1",
- # "select +-atan(c1) from t1",
- # "select ++-atan(c1) from t1",
- # "select ++--atan(c1) from t1",
- # "select - -atan(c1)*0 from t1",
- # "select atan(tbname+1) from t1 ",
- "select atan(123--123)==1 from t1",
- "select atan(c1) as 'd1' from t1",
- "select atan(c1 ,c2) from t1",
- "select atan(c1 ,NULL ) from t1",
- "select atan(,) from t1;",
- "select atan(atan(c1) ab from t1)",
- "select atan(c1 ) as int from t1",
- "select atan from stb1",
- # "select atan(-+--+c1) from stb1",
- # "select +-atan(c1) from stb1",
- # "select ++-atan(c1) from stb1",
- # "select ++--atan(c1) from stb1",
- # "select - -atan(c1)*0 from stb1",
- # "select atan(tbname+1) from stb1 ",
- "select atan(123--123)==1 from stb1",
- "select atan(c1) as 'd1' from stb1",
- "select atan(c1 ,c2 ) from stb1",
- "select atan(c1 ,NULL) from stb1",
- "select atan(,) from stb1;",
- "select atan(atan(c1) ab from stb1)",
- "select atan(c1) as int from stb1"
+ f"select atan from {dbname}.t1",
+ # f"select atan(-+--+c1 ) from {dbname}.t1",
+ # f"select +-atan(c1) from {dbname}.t1",
+ # f"select ++-atan(c1) from {dbname}.t1",
+ # f"select ++--atan(c1) from {dbname}.t1",
+ # f"select - -atan(c1)*0 from {dbname}.t1",
+ # f"select atan(tbname+1) from {dbname}.t1 ",
+ f"select atan(123--123)==1 from {dbname}.t1",
+ f"select atan(c1) as 'd1' from {dbname}.t1",
+ f"select atan(c1 ,c2) from {dbname}.t1",
+ f"select atan(c1 ,NULL ) from {dbname}.t1",
+ f"select atan(,) from {dbname}.t1;",
+ f"select atan(atan(c1) ab from {dbname}.t1)",
+ f"select atan(c1 ) as int from {dbname}.t1",
+ f"select atan from {dbname}.stb1",
+ # f"select atan(-+--+c1) from {dbname}.stb1",
+ # f"select +-atan(c1) from {dbname}.stb1",
+ # f"select ++-atan(c1) from {dbname}.stb1",
+ # f"select ++--atan(c1) from {dbname}.stb1",
+ # f"select - -atan(c1)*0 from {dbname}.stb1",
+ # f"select atan(tbname+1) from {dbname}.stb1 ",
+ f"select atan(123--123)==1 from {dbname}.stb1",
+ f"select atan(c1) as 'd1' from {dbname}.stb1",
+ f"select atan(c1 ,c2 ) from {dbname}.stb1",
+ f"select atan(c1 ,NULL) from {dbname}.stb1",
+ f"select atan(,) from {dbname}.stb1;",
+ f"select atan(atan(c1) ab from {dbname}.stb1)",
+ f"select atan(c1) as int from {dbname}.stb1"
]
for error_sql in error_sql_lists:
tdSql.error(error_sql)
- def support_types(self):
+ def support_types(self, dbname="db"):
type_error_sql_lists = [
- "select atan(ts) from t1" ,
- "select atan(c7) from t1",
- "select atan(c8) from t1",
- "select atan(c9) from t1",
- "select atan(ts) from ct1" ,
- "select atan(c7) from ct1",
- "select atan(c8) from ct1",
- "select atan(c9) from ct1",
- "select atan(ts) from ct3" ,
- "select atan(c7) from ct3",
- "select atan(c8) from ct3",
- "select atan(c9) from ct3",
- "select atan(ts) from ct4" ,
- "select atan(c7) from ct4",
- "select atan(c8) from ct4",
- "select atan(c9) from ct4",
- "select atan(ts) from stb1" ,
- "select atan(c7) from stb1",
- "select atan(c8) from stb1",
- "select atan(c9) from stb1" ,
+ f"select atan(ts) from {dbname}.t1" ,
+ f"select atan(c7) from {dbname}.t1",
+ f"select atan(c8) from {dbname}.t1",
+ f"select atan(c9) from {dbname}.t1",
+ f"select atan(ts) from {dbname}.ct1" ,
+ f"select atan(c7) from {dbname}.ct1",
+ f"select atan(c8) from {dbname}.ct1",
+ f"select atan(c9) from {dbname}.ct1",
+ f"select atan(ts) from {dbname}.ct3" ,
+ f"select atan(c7) from {dbname}.ct3",
+ f"select atan(c8) from {dbname}.ct3",
+ f"select atan(c9) from {dbname}.ct3",
+ f"select atan(ts) from {dbname}.ct4" ,
+ f"select atan(c7) from {dbname}.ct4",
+ f"select atan(c8) from {dbname}.ct4",
+ f"select atan(c9) from {dbname}.ct4",
+ f"select atan(ts) from {dbname}.stb1" ,
+ f"select atan(c7) from {dbname}.stb1",
+ f"select atan(c8) from {dbname}.stb1",
+ f"select atan(c9) from {dbname}.stb1" ,
- "select atan(ts) from stbbb1" ,
- "select atan(c7) from stbbb1",
+ f"select atan(ts) from {dbname}.stbbb1" ,
+ f"select atan(c7) from {dbname}.stbbb1",
- "select atan(ts) from tbname",
- "select atan(c9) from tbname"
+ f"select atan(ts) from {dbname}.tbname",
+ f"select atan(c9) from {dbname}.tbname"
]
@@ -169,103 +169,103 @@ class TDTestCase:
type_sql_lists = [
- "select atan(c1) from t1",
- "select atan(c2) from t1",
- "select atan(c3) from t1",
- "select atan(c4) from t1",
- "select atan(c5) from t1",
- "select atan(c6) from t1",
+ f"select atan(c1) from {dbname}.t1",
+ f"select atan(c2) from {dbname}.t1",
+ f"select atan(c3) from {dbname}.t1",
+ f"select atan(c4) from {dbname}.t1",
+ f"select atan(c5) from {dbname}.t1",
+ f"select atan(c6) from {dbname}.t1",
- "select atan(c1) from ct1",
- "select atan(c2) from ct1",
- "select atan(c3) from ct1",
- "select atan(c4) from ct1",
- "select atan(c5) from ct1",
- "select atan(c6) from ct1",
+ f"select atan(c1) from {dbname}.ct1",
+ f"select atan(c2) from {dbname}.ct1",
+ f"select atan(c3) from {dbname}.ct1",
+ f"select atan(c4) from {dbname}.ct1",
+ f"select atan(c5) from {dbname}.ct1",
+ f"select atan(c6) from {dbname}.ct1",
- "select atan(c1) from ct3",
- "select atan(c2) from ct3",
- "select atan(c3) from ct3",
- "select atan(c4) from ct3",
- "select atan(c5) from ct3",
- "select atan(c6) from ct3",
+ f"select atan(c1) from {dbname}.ct3",
+ f"select atan(c2) from {dbname}.ct3",
+ f"select atan(c3) from {dbname}.ct3",
+ f"select atan(c4) from {dbname}.ct3",
+ f"select atan(c5) from {dbname}.ct3",
+ f"select atan(c6) from {dbname}.ct3",
- "select atan(c1) from stb1",
- "select atan(c2) from stb1",
- "select atan(c3) from stb1",
- "select atan(c4) from stb1",
- "select atan(c5) from stb1",
- "select atan(c6) from stb1",
+ f"select atan(c1) from {dbname}.stb1",
+ f"select atan(c2) from {dbname}.stb1",
+ f"select atan(c3) from {dbname}.stb1",
+ f"select atan(c4) from {dbname}.stb1",
+ f"select atan(c5) from {dbname}.stb1",
+ f"select atan(c6) from {dbname}.stb1",
- "select atan(c6) as alisb from stb1",
- "select atan(c6) alisb from stb1",
+ f"select atan(c6) as alisb from {dbname}.stb1",
+ f"select atan(c6) alisb from {dbname}.stb1",
]
for type_sql in type_sql_lists:
tdSql.query(type_sql)
- def basic_atan_function(self):
+ def basic_atan_function(self, dbname="db"):
# basic query
- tdSql.query("select c1 from ct3")
+ tdSql.query(f"select c1 from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select c1 from t1")
+ tdSql.query(f"select c1 from {dbname}.t1")
tdSql.checkRows(12)
- tdSql.query("select c1 from stb1")
+ tdSql.query(f"select c1 from {dbname}.stb1")
tdSql.checkRows(25)
# used for empty table , ct3 is empty
- tdSql.query("select atan(c1) from ct3")
+ tdSql.query(f"select atan(c1) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select atan(c2) from ct3")
+ tdSql.query(f"select atan(c2) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select atan(c3) from ct3")
+ tdSql.query(f"select atan(c3) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select atan(c4) from ct3")
+ tdSql.query(f"select atan(c4) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select atan(c5) from ct3")
+ tdSql.query(f"select atan(c5) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select atan(c6) from ct3")
+ tdSql.query(f"select atan(c6) from {dbname}.ct3")
tdSql.checkRows(0)
# # used for regular table
- tdSql.query("select atan(c1) from t1")
+ tdSql.query(f"select atan(c1) from {dbname}.t1")
tdSql.checkData(0, 0, None)
tdSql.checkData(1 , 0, 0.785398163)
tdSql.checkData(3 , 0, 1.249045772)
tdSql.checkData(5 , 0, None)
- tdSql.query("select c1, c2, c3 , c4, c5 from t1")
+ tdSql.query(f"select c1, c2, c3 , c4, c5 from {dbname}.t1")
tdSql.checkData(1, 4, 1.11000)
tdSql.checkData(3, 3, 33)
tdSql.checkData(5, 4, None)
- tdSql.query("select ts,c1, c2, c3 , c4, c5 from t1")
+ tdSql.query(f"select ts,c1, c2, c3 , c4, c5 from {dbname}.t1")
tdSql.checkData(1, 5, 1.11000)
tdSql.checkData(3, 4, 33)
tdSql.checkData(5, 5, None)
- self.check_result_auto_atan( "select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from t1", "select atan(abs(c1)), atan(abs(c2)) ,atan(abs(c3)), atan(abs(c4)), atan(abs(c5)) from t1")
+ self.check_result_auto_atan( f"select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from {dbname}.t1", f"select atan(abs(c1)), atan(abs(c2)) ,atan(abs(c3)), atan(abs(c4)), atan(abs(c5)) from {dbname}.t1")
# used for sub table
- tdSql.query("select c2 ,atan(c2) from ct1")
+ tdSql.query(f"select c2 ,atan(c2) from {dbname}.ct1")
tdSql.checkData(0, 1, 1.570785077)
tdSql.checkData(1 , 1, 1.570783470)
tdSql.checkData(3 , 1, 1.570778327)
- tdSql.checkData(4 , 1, 0.000000000)
+ tdSql.checkData(4 , 1, 0)
- tdSql.query("select c1, c5 ,atan(c5) from ct4")
+ tdSql.query(f"select c1, c5 ,atan(c5) from {dbname}.ct4")
tdSql.checkData(0 , 2, None)
tdSql.checkData(1 , 2, 1.458656162)
tdSql.checkData(2 , 2, 1.442799803)
tdSql.checkData(3 , 2, 1.421759533)
tdSql.checkData(5 , 2, None)
- self.check_result_auto_atan( "select c1, c2, c3 , c4, c5 from ct1", "select atan(c1), atan(c2) ,atan(c3), atan(c4), atan(c5) from ct1")
+ self.check_result_auto_atan( f"select c1, c2, c3 , c4, c5 from {dbname}.ct1", f"select atan(c1), atan(c2) ,atan(c3), atan(c4), atan(c5) from {dbname}.ct1")
# nest query for atan functions
- tdSql.query("select c4 , atan(c4) ,atan(atan(c4)) , atan(atan(atan(c4))) from ct1;")
+ tdSql.query(f"select c4 , atan(c4) ,atan(atan(c4)) , atan(atan(atan(c4))) from {dbname}.ct1;")
tdSql.checkData(0 , 0 , 88)
tdSql.checkData(0 , 1 , 1.559433180)
tdSql.checkData(0 , 2 , 1.000590740)
@@ -283,22 +283,22 @@ class TDTestCase:
# used for stable table
- tdSql.query("select atan(c1) from stb1")
+ tdSql.query(f"select atan(c1) from {dbname}.stb1")
tdSql.checkRows(25)
# used for not exists table
- tdSql.error("select atan(c1) from stbbb1")
- tdSql.error("select atan(c1) from tbname")
- tdSql.error("select atan(c1) from ct5")
+ tdSql.error(f"select atan(c1) from {dbname}.stbbb1")
+ tdSql.error(f"select atan(c1) from {dbname}.tbname")
+ tdSql.error(f"select atan(c1) from {dbname}.ct5")
# mix with common col
- tdSql.query("select c1, atan(c1) from ct1")
- tdSql.query("select c2, atan(c2) from ct4")
+ tdSql.query(f"select c1, atan(c1) from {dbname}.ct1")
+ tdSql.query(f"select c2, atan(c2) from {dbname}.ct4")
# mix with common functions
- tdSql.query("select c1, atan(c1),atan(c1), atan(atan(c1)) from ct4 ")
+ tdSql.query(f"select c1, atan(c1),atan(c1), atan(atan(c1)) from {dbname}.ct4 ")
tdSql.checkData(0 , 0 ,None)
tdSql.checkData(0 , 1 ,None)
tdSql.checkData(0 , 2 ,None)
@@ -309,24 +309,24 @@ class TDTestCase:
tdSql.checkData(3 , 2 ,1.405647649)
tdSql.checkData(3 , 3 ,0.952449745)
- tdSql.query("select c1, atan(c1),c5, floor(c5) from stb1 ")
+ tdSql.query(f"select c1, atan(c1),c5, floor(c5) from {dbname}.stb1 ")
# # mix with agg functions , not support
- tdSql.error("select c1, atan(c1),c5, count(c5) from stb1 ")
- tdSql.error("select c1, atan(c1),c5, count(c5) from ct1 ")
- tdSql.error("select atan(c1), count(c5) from stb1 ")
- tdSql.error("select atan(c1), count(c5) from ct1 ")
- tdSql.error("select c1, count(c5) from ct1 ")
- tdSql.error("select c1, count(c5) from stb1 ")
+ tdSql.error(f"select c1, atan(c1),c5, count(c5) from {dbname}.stb1 ")
+ tdSql.error(f"select c1, atan(c1),c5, count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select atan(c1), count(c5) from {dbname}.stb1 ")
+ tdSql.error(f"select atan(c1), count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select c1, count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select c1, count(c5) from {dbname}.stb1 ")
# agg functions mix with agg functions
- tdSql.query("select max(c5), count(c5) from stb1")
- tdSql.query("select max(c5), count(c5) from ct1")
+ tdSql.query(f"select max(c5), count(c5) from {dbname}.stb1")
+ tdSql.query(f"select max(c5), count(c5) from {dbname}.ct1")
# # bug fix for compute
- tdSql.query("select c1, atan(c1) -0 ,atan(c1-4)-0 from ct4 ")
+ tdSql.query(f"select c1, atan(c1) -0 ,atan(c1-4)-0 from {dbname}.ct4 ")
tdSql.checkData(0, 0, None)
tdSql.checkData(0, 1, None)
tdSql.checkData(0, 2, None)
@@ -334,7 +334,7 @@ class TDTestCase:
tdSql.checkData(1, 1, 1.446441332)
tdSql.checkData(1, 2, 1.325817664)
- tdSql.query(" select c1, atan(c1) -0 ,atan(c1-0.1)-0.1 from ct4")
+ tdSql.query(f" select c1, atan(c1) -0 ,atan(c1-0.1)-0.1 from {dbname}.ct4")
tdSql.checkData(0, 0, None)
tdSql.checkData(0, 1, None)
tdSql.checkData(0, 2, None)
@@ -342,35 +342,35 @@ class TDTestCase:
tdSql.checkData(1, 1, 1.446441332)
tdSql.checkData(1, 2, 1.344883701)
- tdSql.query("select c1, atan(c1), c2, atan(c2), c3, atan(c3) from ct1")
+ tdSql.query(f"select c1, atan(c1), c2, atan(c2), c3, atan(c3) from {dbname}.ct1")
- def test_big_number(self):
+ def test_big_number(self, dbname="db"):
- tdSql.query("select c1, atan(100000000) from ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, atan(100000000) from {dbname}.ct1") # bigint to double data overflow
tdSql.checkData(4, 1, math.atan(100000000))
- tdSql.query("select c1, atan(10000000000000) from ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, atan(10000000000000) from {dbname}.ct1") # bigint to double data overflow
tdSql.checkData(4, 1, math.atan(10000000000000))
- tdSql.query("select c1, atan(10000000000000000000000000) from ct1") # bigint to double data overflow
- tdSql.query("select c1, atan(10000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value
+ tdSql.query(f"select c1, atan(10000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, atan(10000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value
tdSql.checkData(1, 1, math.atan(10000000000000000000000000.0))
- tdSql.query("select c1, atan(10000000000000000000000000000000000) from ct1") # bigint to double data overflow
- tdSql.query("select c1, atan(10000000000000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value
+ tdSql.query(f"select c1, atan(10000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, atan(10000000000000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value
tdSql.checkData(4, 1, math.atan(10000000000000000000000000000000000.0))
- tdSql.query("select c1, atan(10000000000000000000000000000000000000000) from ct1") # bigint to double data overflow
- tdSql.query("select c1, atan(10000000000000000000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value
+ tdSql.query(f"select c1, atan(10000000000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, atan(10000000000000000000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value
tdSql.checkData(4, 1, math.atan(10000000000000000000000000000000000000000.0))
- tdSql.query("select c1, atan(10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000) from ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, atan(10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
- def abs_func_filter(self):
- tdSql.execute("use db")
- tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(atan(c1)-0.5) from ct4 where c1>5 ")
+ def abs_func_filter(self, dbname="db"):
+ tdSql.execute(f"use {dbname}")
+ tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(atan(c1)-0.5) from {dbname}.ct4 where c1>5 ")
tdSql.checkRows(3)
tdSql.checkData(0,0,8)
tdSql.checkData(0,1,8.000000000)
@@ -378,7 +378,7 @@ class TDTestCase:
tdSql.checkData(0,3,7.900000000)
tdSql.checkData(0,4,1.000000000)
- tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(atan(c1)-0.5) from ct4 where c1=5 ")
+ tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(atan(c1)-0.5) from {dbname}.ct4 where c1=5 ")
tdSql.checkRows(1)
tdSql.checkData(0,0,5)
tdSql.checkData(0,1,5.000000000)
@@ -386,7 +386,7 @@ class TDTestCase:
tdSql.checkData(0,3,4.900000000)
tdSql.checkData(0,4,1.000000000)
- tdSql.query("select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(atan(c1)-0.5) from ct4 where c1=atan(c1) limit 1 ")
+ tdSql.query(f"select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(atan(c1)-0.5) from {dbname}.ct4 where c1=atan(c1) limit 1 ")
tdSql.checkRows(1)
tdSql.checkData(0,0,0)
tdSql.checkData(0,1,0)
@@ -398,41 +398,41 @@ class TDTestCase:
def pow_Arithmetic(self):
pass
- def check_boundary_values(self):
+ def check_boundary_values(self, dbname="bound_test"):
PI=3.1415926
- tdSql.execute("drop database if exists bound_test")
- tdSql.execute("create database if not exists bound_test")
+ tdSql.execute(f"drop database if exists {dbname}")
+ tdSql.execute(f"create database if not exists {dbname}")
time.sleep(3)
- tdSql.execute("use bound_test")
+ tdSql.execute(f"use {dbname}")
tdSql.execute(
- "create table stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);"
+ f"create table {dbname}.stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);"
)
- tdSql.execute(f'create table sub1_bound using stb_bound tags ( 1 )')
+ tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
tdSql.execute(
- f"insert into sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.error(
- f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
- self.check_result_auto_atan( "select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from sub1_bound ", "select atan(abs(c1)), atan(abs(c2)) ,atan(abs(c3)), atan(abs(c4)), atan(abs(c5)) from sub1_bound")
+ self.check_result_auto_atan( f"select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from {dbname}.sub1_bound ", f"select atan(abs(c1)), atan(abs(c2)) ,atan(abs(c3)), atan(abs(c4)), atan(abs(c5)) from {dbname}.sub1_bound")
- self.check_result_auto_atan( "select c1, c2, c3 , c3, c2 ,c1 from sub1_bound ", "select atan(c1), atan(c2) ,atan(c3), atan(c3), atan(c2) ,atan(c1) from sub1_bound")
+ self.check_result_auto_atan( f"select c1, c2, c3 , c3, c2 ,c1 from {dbname}.sub1_bound ", f"select atan(c1), atan(c2) ,atan(c3), atan(c3), atan(c2) ,atan(c1) from {dbname}.sub1_bound")
- self.check_result_auto_atan("select abs(abs(abs(abs(abs(abs(abs(abs(abs(c1))))))))) nest_col_func from sub1_bound" , "select atan(abs(c1)) from sub1_bound" )
+ self.check_result_auto_atan(f"select abs(abs(abs(abs(abs(abs(abs(abs(abs(c1))))))))) nest_col_func from {dbname}.sub1_bound" , f"select atan(abs(c1)) from {dbname}.sub1_bound" )
# check basic elem for table per row
- tdSql.query("select atan(abs(c1)) ,atan(abs(c2)) , atan(abs(c3)) , atan(abs(c4)), atan(abs(c5)), atan(abs(c6)) from sub1_bound ")
+ tdSql.query(f"select atan(abs(c1)) ,atan(abs(c2)) , atan(abs(c3)) , atan(abs(c4)), atan(abs(c5)), atan(abs(c6)) from {dbname}.sub1_bound ")
tdSql.checkData(0,0,math.atan(2147483647))
tdSql.checkData(0,1,math.atan(9223372036854775807))
tdSql.checkData(0,2,math.atan(32767))
@@ -450,47 +450,47 @@ class TDTestCase:
tdSql.checkData(3,4,math.atan(339999995214436424907732413799364296704.00000))
# check + - * / in functions
- tdSql.query("select atan(abs(c1+1)) ,atan(abs(c2)) , atan(abs(c3*1)) , atan(abs(c4/2)), atan(abs(c5))/2, atan(abs(c6)) from sub1_bound ")
+ tdSql.query(f"select atan(abs(c1+1)) ,atan(abs(c2)) , atan(abs(c3*1)) , atan(abs(c4/2)), atan(abs(c5))/2, atan(abs(c6)) from {dbname}.sub1_bound ")
tdSql.checkData(0,0,math.atan(2147483648.000000000))
tdSql.checkData(0,1,math.atan(9223372036854775807))
tdSql.checkData(0,2,math.atan(32767.000000000))
tdSql.checkData(0,3,math.atan(63.500000000))
- tdSql.execute("create stable st (ts timestamp, num1 float, num2 double) tags (t1 int);")
- tdSql.execute(f'create table tb1 using st tags (1)')
- tdSql.execute(f'create table tb2 using st tags (2)')
- tdSql.execute(f'create table tb3 using st tags (3)')
- tdSql.execute('insert into tb1 values (now()-40s, {}, {})'.format(PI/2 ,PI/2 ))
- tdSql.execute('insert into tb1 values (now()-30s, {}, {})'.format(PI ,PI ))
- tdSql.execute('insert into tb1 values (now()-20s, {}, {})'.format(PI*1.5 ,PI*1.5))
- tdSql.execute('insert into tb1 values (now()-10s, {}, {})'.format(PI*2 ,PI*2))
- tdSql.execute('insert into tb1 values (now(), {}, {})'.format(PI*2.5 ,PI*2.5))
+ tdSql.execute(f"create stable {dbname}.st (ts timestamp, num1 float, num2 double) tags (t1 int);")
+ tdSql.execute(f'create table {dbname}.tb1 using {dbname}.st tags (1)')
+ tdSql.execute(f'create table {dbname}.tb2 using {dbname}.st tags (2)')
+ tdSql.execute(f'create table {dbname}.tb3 using {dbname}.st tags (3)')
+ tdSql.execute(f'insert into {dbname}.tb1 values (now()-40s, {PI/2}, {PI/2})')
+ tdSql.execute(f'insert into {dbname}.tb1 values (now()-30s, {PI}, {PI})')
+ tdSql.execute(f'insert into {dbname}.tb1 values (now()-20s, {PI*1.5}, {PI*1.5})')
+ tdSql.execute(f'insert into {dbname}.tb1 values (now()-10s, {PI*2}, {PI*2})')
+ tdSql.execute(f'insert into {dbname}.tb1 values (now(), {PI*2.5}, {PI*2.5})')
- tdSql.execute('insert into tb2 values (now()-40s, {}, {})'.format(PI/2 ,PI/2 ))
- tdSql.execute('insert into tb2 values (now()-30s, {}, {})'.format(PI ,PI ))
- tdSql.execute('insert into tb2 values (now()-20s, {}, {})'.format(PI*1.5 ,PI*1.5))
- tdSql.execute('insert into tb2 values (now()-10s, {}, {})'.format(PI*2 ,PI*2))
- tdSql.execute('insert into tb2 values (now(), {}, {})'.format(PI*2.5 ,PI*2.5))
+ tdSql.execute(f'insert into {dbname}.tb2 values (now()-40s, {PI/2}, {PI/2})')
+ tdSql.execute(f'insert into {dbname}.tb2 values (now()-30s, {PI}, {PI})')
+ tdSql.execute(f'insert into {dbname}.tb2 values (now()-20s, {PI*1.5}, {PI*1.5})')
+ tdSql.execute(f'insert into {dbname}.tb2 values (now()-10s, {PI*2}, {PI*2})')
+ tdSql.execute(f'insert into {dbname}.tb2 values (now(), {PI*2.5}, {PI*2.5})')
for i in range(100):
- tdSql.execute('insert into tb3 values (now()+{}s, {}, {})'.format(i,PI*(5+i)/2 ,PI*(5+i)/2))
+ tdSql.execute(f'insert into {dbname}.tb3 values (now()+{i}s, {PI*(5+i)/2}, {PI*(5+i)/2})')
- self.check_result_auto_atan("select num1,num2 from tb3;" , "select atan(num1),atan(num2) from tb3")
+ self.check_result_auto_atan(f"select num1,num2 from {dbname}.tb3;" , f"select atan(num1),atan(num2) from {dbname}.tb3")
+ def support_super_table_test(self, dbname="db"):
+ tdSql.execute(f" use {dbname} ")
+ self.check_result_auto_atan( f" select c5 from {dbname}.stb1 order by ts " , f"select atan(c5) from {dbname}.stb1 order by ts" )
+ self.check_result_auto_atan( f" select c5 from {dbname}.stb1 order by tbname " , f"select atan(c5) from {dbname}.stb1 order by tbname" )
+ self.check_result_auto_atan( f" select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select atan(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto_atan( f" select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select atan(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
- def support_super_table_test(self):
- tdSql.execute(" use db ")
- self.check_result_auto_atan( " select c5 from stb1 order by ts " , "select atan(c5) from stb1 order by ts" )
- self.check_result_auto_atan( " select c5 from stb1 order by tbname " , "select atan(c5) from stb1 order by tbname" )
- self.check_result_auto_atan( " select c5 from stb1 where c1 > 0 order by tbname " , "select atan(c5) from stb1 where c1 > 0 order by tbname" )
- self.check_result_auto_atan( " select c5 from stb1 where c1 > 0 order by tbname " , "select atan(c5) from stb1 where c1 > 0 order by tbname" )
-
- self.check_result_auto_atan( " select t1,c5 from stb1 order by ts " , "select atan(t1), atan(c5) from stb1 order by ts" )
- self.check_result_auto_atan( " select t1,c5 from stb1 order by tbname " , "select atan(t1) ,atan(c5) from stb1 order by tbname" )
- self.check_result_auto_atan( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select atan(t1) ,atan(c5) from stb1 where c1 > 0 order by tbname" )
- self.check_result_auto_atan( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select atan(t1) , atan(c5) from stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto_atan( f" select t1,c5 from {dbname}.stb1 order by ts " , f"select atan(t1), atan(c5) from {dbname}.stb1 order by ts" )
+ self.check_result_auto_atan( f" select t1,c5 from {dbname}.stb1 order by tbname " , f"select atan(t1) ,atan(c5) from {dbname}.stb1 order by tbname" )
+ self.check_result_auto_atan( f" select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select atan(t1) ,atan(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto_atan( f" select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select atan(t1) , atan(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
pass
+
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
tdSql.prepare()
@@ -523,12 +523,11 @@ class TDTestCase:
self.abs_func_filter()
- tdLog.printNoPrefix("==========step8: check arctan result of stable query ============")
+ tdLog.printNoPrefix("==========step8: check atan result of stable query ============")
self.support_super_table_test()
-
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
diff --git a/tests/system-test/2-query/avg.py b/tests/system-test/2-query/avg.py
index 607968936d..ea7c3329ea 100644
--- a/tests/system-test/2-query/avg.py
+++ b/tests/system-test/2-query/avg.py
@@ -8,48 +8,48 @@ from util.sql import *
from util.cases import *
class TDTestCase:
- updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
- "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
- "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143}
+ # updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
+ # "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
+ # "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143}
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
- tdSql.init(conn.cursor(), True)
+ tdSql.init(conn.cursor(), False)
- def prepare_datas(self):
+ def prepare_datas(self, dbname="db"):
tdSql.execute(
- '''create table stb1
+ f'''create table {dbname}.stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
)
tdSql.execute(
- '''
- create table t1
+ f'''
+ create table {dbname}.t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
for i in range(9):
tdSql.execute(
- f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
- f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
- tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
- tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
@@ -80,68 +80,68 @@ class TDTestCase:
else:
tdLog.info("avg value check pass , it work as expected ,sql is \"%s\" "%check_query )
- def test_errors(self):
+ def test_errors(self, dbname="db"):
error_sql_lists = [
- "select avg from t1",
- # "select avg(-+--+c1) from t1",
- # "select +-avg(c1) from t1",
- # "select ++-avg(c1) from t1",
- # "select ++--avg(c1) from t1",
- # "select - -avg(c1)*0 from t1",
- # "select avg(tbname+1) from t1 ",
- "select avg(123--123)==1 from t1",
- "select avg(c1) as 'd1' from t1",
- "select avg(c1 ,c2 ) from t1",
- "select avg(c1 ,NULL) from t1",
- "select avg(,) from t1;",
- "select avg(avg(c1) ab from t1)",
- "select avg(c1) as int from t1",
- "select avg from stb1",
- # "select avg(-+--+c1) from stb1",
- # "select +-avg(c1) from stb1",
- # "select ++-avg(c1) from stb1",
- # "select ++--avg(c1) from stb1",
- # "select - -avg(c1)*0 from stb1",
- # "select avg(tbname+1) from stb1 ",
- "select avg(123--123)==1 from stb1",
- "select avg(c1) as 'd1' from stb1",
- "select avg(c1 ,c2 ) from stb1",
- "select avg(c1 ,NULL) from stb1",
- "select avg(,) from stb1;",
- "select avg(avg(c1) ab from stb1)",
- "select avg(c1) as int from stb1"
+ f"select avg from {dbname}.t1",
+ # f"select avg(-+--+c1) from {dbname}.t1",
+ # f"select +-avg(c1) from {dbname}.t1",
+ # f"select ++-avg(c1) from {dbname}.t1",
+ # f"select ++--avg(c1) from {dbname}.t1",
+ # f"select - -avg(c1)*0 from {dbname}.t1",
+ # f"select avg(tbname+1) from {dbname}.t1 ",
+ f"select avg(123--123)==1 from {dbname}.t1",
+ f"select avg(c1) as 'd1' from {dbname}.t1",
+ f"select avg(c1 ,c2 ) from {dbname}.t1",
+ f"select avg(c1 ,NULL) from {dbname}.t1",
+ f"select avg(,) from {dbname}.t1;",
+ f"select avg(avg(c1) ab from {dbname}.t1)",
+ f"select avg(c1) as int from {dbname}.t1",
+ f"select avg from {dbname}.stb1",
+ # f"select avg(-+--+c1) from {dbname}.stb1",
+ # f"select +-avg(c1) from {dbname}.stb1",
+ # f"select ++-avg(c1) from {dbname}.stb1",
+ # f"select ++--avg(c1) from {dbname}.stb1",
+ # f"select - -avg(c1)*0 from {dbname}.stb1",
+ # f"select avg(tbname+1) from {dbname}.stb1 ",
+ f"select avg(123--123)==1 from {dbname}.stb1",
+ f"select avg(c1) as 'd1' from {dbname}.stb1",
+ f"select avg(c1 ,c2 ) from {dbname}.stb1",
+ f"select avg(c1 ,NULL) from {dbname}.stb1",
+ f"select avg(,) from {dbname}.stb1;",
+ f"select avg(avg(c1) ab from {dbname}.stb1)",
+ f"select avg(c1) as int from {dbname}.stb1"
]
for error_sql in error_sql_lists:
tdSql.error(error_sql)
- def support_types(self):
+ def support_types(self, dbname="db"):
type_error_sql_lists = [
- "select avg(ts) from t1" ,
- "select avg(c7) from t1",
- "select avg(c8) from t1",
- "select avg(c9) from t1",
- "select avg(ts) from ct1" ,
- "select avg(c7) from ct1",
- "select avg(c8) from ct1",
- "select avg(c9) from ct1",
- "select avg(ts) from ct3" ,
- "select avg(c7) from ct3",
- "select avg(c8) from ct3",
- "select avg(c9) from ct3",
- "select avg(ts) from ct4" ,
- "select avg(c7) from ct4",
- "select avg(c8) from ct4",
- "select avg(c9) from ct4",
- "select avg(ts) from stb1" ,
- "select avg(c7) from stb1",
- "select avg(c8) from stb1",
- "select avg(c9) from stb1" ,
+ f"select avg(ts) from {dbname}.t1" ,
+ f"select avg(c7) from {dbname}.t1",
+ f"select avg(c8) from {dbname}.t1",
+ f"select avg(c9) from {dbname}.t1",
+ f"select avg(ts) from {dbname}.ct1" ,
+ f"select avg(c7) from {dbname}.ct1",
+ f"select avg(c8) from {dbname}.ct1",
+ f"select avg(c9) from {dbname}.ct1",
+ f"select avg(ts) from {dbname}.ct3" ,
+ f"select avg(c7) from {dbname}.ct3",
+ f"select avg(c8) from {dbname}.ct3",
+ f"select avg(c9) from {dbname}.ct3",
+ f"select avg(ts) from {dbname}.ct4" ,
+ f"select avg(c7) from {dbname}.ct4",
+ f"select avg(c8) from {dbname}.ct4",
+ f"select avg(c9) from {dbname}.ct4",
+ f"select avg(ts) from {dbname}.stb1" ,
+ f"select avg(c7) from {dbname}.stb1",
+ f"select avg(c8) from {dbname}.stb1",
+ f"select avg(c9) from {dbname}.stb1" ,
- "select avg(ts) from stbbb1" ,
- "select avg(c7) from stbbb1",
+ f"select avg(ts) from {dbname}.stbbb1" ,
+ f"select avg(c7) from {dbname}.stbbb1",
- "select avg(ts) from tbname",
- "select avg(c9) from tbname"
+ f"select avg(ts) from {dbname}.tbname",
+ f"select avg(c9) from {dbname}.tbname"
]
@@ -150,157 +150,157 @@ class TDTestCase:
type_sql_lists = [
- "select avg(c1) from t1",
- "select avg(c2) from t1",
- "select avg(c3) from t1",
- "select avg(c4) from t1",
- "select avg(c5) from t1",
- "select avg(c6) from t1",
+ f"select avg(c1) from {dbname}.t1",
+ f"select avg(c2) from {dbname}.t1",
+ f"select avg(c3) from {dbname}.t1",
+ f"select avg(c4) from {dbname}.t1",
+ f"select avg(c5) from {dbname}.t1",
+ f"select avg(c6) from {dbname}.t1",
- "select avg(c1) from ct1",
- "select avg(c2) from ct1",
- "select avg(c3) from ct1",
- "select avg(c4) from ct1",
- "select avg(c5) from ct1",
- "select avg(c6) from ct1",
+ f"select avg(c1) from {dbname}.ct1",
+ f"select avg(c2) from {dbname}.ct1",
+ f"select avg(c3) from {dbname}.ct1",
+ f"select avg(c4) from {dbname}.ct1",
+ f"select avg(c5) from {dbname}.ct1",
+ f"select avg(c6) from {dbname}.ct1",
- "select avg(c1) from ct3",
- "select avg(c2) from ct3",
- "select avg(c3) from ct3",
- "select avg(c4) from ct3",
- "select avg(c5) from ct3",
- "select avg(c6) from ct3",
+ f"select avg(c1) from {dbname}.ct3",
+ f"select avg(c2) from {dbname}.ct3",
+ f"select avg(c3) from {dbname}.ct3",
+ f"select avg(c4) from {dbname}.ct3",
+ f"select avg(c5) from {dbname}.ct3",
+ f"select avg(c6) from {dbname}.ct3",
- "select avg(c1) from stb1",
- "select avg(c2) from stb1",
- "select avg(c3) from stb1",
- "select avg(c4) from stb1",
- "select avg(c5) from stb1",
- "select avg(c6) from stb1",
+ f"select avg(c1) from {dbname}.stb1",
+ f"select avg(c2) from {dbname}.stb1",
+ f"select avg(c3) from {dbname}.stb1",
+ f"select avg(c4) from {dbname}.stb1",
+ f"select avg(c5) from {dbname}.stb1",
+ f"select avg(c6) from {dbname}.stb1",
- "select avg(c6) as alisb from stb1",
- "select avg(c6) alisb from stb1",
+ f"select avg(c6) as alisb from {dbname}.stb1",
+ f"select avg(c6) alisb from {dbname}.stb1",
]
for type_sql in type_sql_lists:
tdSql.query(type_sql)
- def basic_avg_function(self):
+ def basic_avg_function(self, dbname="db"):
# basic query
- tdSql.query("select c1 from ct3")
+ tdSql.query(f"select c1 from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select c1 from t1")
+ tdSql.query(f"select c1 from {dbname}.t1")
tdSql.checkRows(12)
- tdSql.query("select c1 from stb1")
+ tdSql.query(f"select c1 from {dbname}.stb1")
tdSql.checkRows(25)
# used for empty table , ct3 is empty
- tdSql.query("select avg(c1) from ct3")
+ tdSql.query(f"select avg(c1) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select avg(c2) from ct3")
+ tdSql.query(f"select avg(c2) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select avg(c3) from ct3")
+ tdSql.query(f"select avg(c3) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select avg(c4) from ct3")
+ tdSql.query(f"select avg(c4) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select avg(c5) from ct3")
+ tdSql.query(f"select avg(c5) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select avg(c6) from ct3")
+ tdSql.query(f"select avg(c6) from {dbname}.ct3")
# used for regular table
- tdSql.query("select avg(c1) from t1")
+ tdSql.query(f"select avg(c1) from {dbname}.t1")
tdSql.checkData(0, 0, 5.000000000)
- tdSql.query("select ts,c1, c2, c3 , c4, c5 from t1")
+ tdSql.query(f"select ts,c1, c2, c3 , c4, c5 from {dbname}.t1")
tdSql.checkData(1, 5, 1.11000)
tdSql.checkData(3, 4, 33)
tdSql.checkData(5, 5, None)
- self.check_avg(" select avg(c1) , avg(c2) , avg(c3) from t1 " , " select sum(c1)/count(c1) , sum(c2)/count(c2) , sum(c3)/count(c3) from t1 ")
+ self.check_avg(f" select avg(c1) , avg(c2) , avg(c3) from {dbname}.t1 " , f" select sum(c1)/count(c1) , sum(c2)/count(c2) , sum(c3)/count(c3) from {dbname}.t1 ")
# used for sub table
- tdSql.query("select avg(c1) from ct1")
+ tdSql.query(f"select avg(c1) from {dbname}.ct1")
tdSql.checkData(0, 0, 4.846153846)
- tdSql.query("select avg(c1) from ct3")
+ tdSql.query(f"select avg(c1) from {dbname}.ct3")
tdSql.checkRows(0)
- self.check_avg(" select avg(abs(c1)) , avg(abs(c2)) , avg(abs(c3)) from t1 " , " select sum(abs(c1))/count(c1) , sum(abs(c2))/count(c2) , sum(abs(c3))/count(c3) from t1 ")
- self.check_avg(" select avg(abs(c1)) , avg(abs(c2)) , avg(abs(c3)) from stb1 " , " select sum(abs(c1))/count(c1) , sum(abs(c2))/count(c2) , sum(abs(c3))/count(c3) from stb1 ")
+ self.check_avg(f" select avg(abs(c1)) , avg(abs(c2)) , avg(abs(c3)) from {dbname}.t1 " , f" select sum(abs(c1))/count(c1) , sum(abs(c2))/count(c2) , sum(abs(c3))/count(c3) from {dbname}.t1 ")
+ self.check_avg(f" select avg(abs(c1)) , avg(abs(c2)) , avg(abs(c3)) from {dbname}.stb1 " , f" select sum(abs(c1))/count(c1) , sum(abs(c2))/count(c2) , sum(abs(c3))/count(c3) from {dbname}.stb1 ")
# used for stable table
- tdSql.query("select avg(c1) from stb1")
+ tdSql.query(f"select avg(c1) from {dbname}.stb1")
tdSql.checkRows(1)
- self.check_avg(" select avg(abs(ceil(c1))) , avg(abs(ceil(c2))) , avg(abs(ceil(c3))) from stb1 " , " select sum(abs(ceil(c1)))/count(c1) , sum(abs(ceil(c2)))/count(c2) , sum(abs(ceil(c3)))/count(c3) from stb1 ")
+ self.check_avg(f" select avg(abs(ceil(c1))) , avg(abs(ceil(c2))) , avg(abs(ceil(c3))) from {dbname}.stb1 " , f" select sum(abs(ceil(c1)))/count(c1) , sum(abs(ceil(c2)))/count(c2) , sum(abs(ceil(c3)))/count(c3) from {dbname}.stb1 ")
# used for not exists table
- tdSql.error("select avg(c1) from stbbb1")
- tdSql.error("select avg(c1) from tbname")
- tdSql.error("select avg(c1) from ct5")
+ tdSql.error(f"select avg(c1) from {dbname}.stbbb1")
+ tdSql.error(f"select avg(c1) from {dbname}.tbname")
+ tdSql.error(f"select avg(c1) from {dbname}.ct5")
# mix with common col
- tdSql.error("select c1, avg(c1) from ct1")
- tdSql.error("select c1, avg(c1) from ct4")
+ tdSql.error(f"select c1, avg(c1) from {dbname}.ct1")
+ tdSql.error(f"select c1, avg(c1) from {dbname}.ct4")
# mix with common functions
- tdSql.error("select c1, avg(c1),c5, floor(c5) from ct4 ")
- tdSql.error("select c1, avg(c1),c5, floor(c5) from stb1 ")
+ tdSql.error(f"select c1, avg(c1),c5, floor(c5) from {dbname}.ct4 ")
+ tdSql.error(f"select c1, avg(c1),c5, floor(c5) from {dbname}.stb1 ")
# mix with agg functions , not support
- tdSql.error("select c1, avg(c1),c5, count(c5) from stb1 ")
- tdSql.error("select c1, avg(c1),c5, count(c5) from ct1 ")
- tdSql.error("select c1, count(c5) from ct1 ")
- tdSql.error("select c1, count(c5) from stb1 ")
+ tdSql.error(f"select c1, avg(c1),c5, count(c5) from {dbname}.stb1 ")
+ tdSql.error(f"select c1, avg(c1),c5, count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select c1, count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select c1, count(c5) from {dbname}.stb1 ")
# agg functions mix with agg functions
- tdSql.query(" select max(c5), count(c5) , avg(c5) from stb1 ")
+ tdSql.query(f" select max(c5), count(c5) , avg(c5) from {dbname}.stb1 ")
tdSql.checkData(0, 0, 8.88000 )
tdSql.checkData(0, 1, 22 )
tdSql.checkData(0, 2, 2.270454591 )
- tdSql.query(" select max(c5), count(c5) , avg(c5) ,elapsed(ts) , spread(c1) from ct1; ")
+ tdSql.query(f" select max(c5), count(c5) , avg(c5) ,elapsed(ts) , spread(c1) from {dbname}.ct1; ")
tdSql.checkData(0, 0, 8.88000 )
tdSql.checkData(0, 1, 13 )
tdSql.checkData(0, 2, 0.768461603 )
# bug fix for count
- tdSql.query("select count(c1) from ct4 ")
+ tdSql.query(f"select count(c1) from {dbname}.ct4 ")
tdSql.checkData(0,0,9)
- tdSql.query("select count(*) from ct4 ")
+ tdSql.query(f"select count(*) from {dbname}.ct4 ")
tdSql.checkData(0,0,12)
- tdSql.query("select count(c1) from stb1 ")
+ tdSql.query(f"select count(c1) from {dbname}.stb1 ")
tdSql.checkData(0,0,22)
- tdSql.query("select count(*) from stb1 ")
+ tdSql.query(f"select count(*) from {dbname}.stb1 ")
tdSql.checkData(0,0,25)
# bug fix for compute
- tdSql.error("select c1, avg(c1) -0 ,ceil(c1)-0 from ct4 ")
- tdSql.error(" select c1, avg(c1) -0 ,avg(ceil(c1-0.1))-0.1 from ct4")
+ tdSql.error(f"select c1, avg(c1) -0 ,ceil(c1)-0 from {dbname}.ct4 ")
+ tdSql.error(f" select c1, avg(c1) -0 ,avg(ceil(c1-0.1))-0.1 from {dbname}.ct4")
# mix with nest query
- self.check_avg("select avg(col) from (select abs(c1) col from stb1)" , "select avg(abs(c1)) from stb1")
- self.check_avg("select avg(col) from (select ceil(abs(c1)) col from stb1)" , "select avg(abs(c1)) from stb1")
+ self.check_avg(f"select avg(col) from (select abs(c1) col from {dbname}.stb1)" , f"select avg(abs(c1)) from {dbname}.stb1")
+ self.check_avg(f"select avg(col) from (select ceil(abs(c1)) col from {dbname}.stb1)" , f"select avg(abs(c1)) from {dbname}.stb1")
- tdSql.query(" select abs(avg(abs(abs(c1)))) from stb1 ")
+ tdSql.query(f" select abs(avg(abs(abs(c1)))) from {dbname}.stb1 ")
tdSql.checkData(0, 0, 4.500000000)
- tdSql.query(" select abs(avg(abs(abs(c1)))) from t1 ")
+ tdSql.query(f" select abs(avg(abs(abs(c1)))) from {dbname}.t1 ")
tdSql.checkData(0, 0, 5.000000000)
- tdSql.query(" select abs(avg(abs(abs(c1)))) from stb1 ")
+ tdSql.query(f" select abs(avg(abs(abs(c1)))) from {dbname}.stb1 ")
tdSql.checkData(0, 0, 4.500000000)
- tdSql.query(" select avg(c1) from stb1 where c1 is null ")
+ tdSql.query(f" select avg(c1) from {dbname}.stb1 where c1 is null ")
tdSql.checkRows(0)
- def avg_func_filter(self):
- tdSql.execute("use db")
- tdSql.query(" select avg(c1), avg(c1) -0 ,avg(ceil(c1-0.1))-0 ,avg(floor(c1+0.1))-0.1 ,avg(ceil(log(c1,2)-0.5)) from ct4 where c1>5 ")
+ def avg_func_filter(self, dbname="db"):
+ tdSql.execute(f"use {dbname}")
+ tdSql.query(f" select avg(c1), avg(c1) -0 ,avg(ceil(c1-0.1))-0 ,avg(floor(c1+0.1))-0.1 ,avg(ceil(log(c1,2)-0.5)) from {dbname}.ct4 where c1>5 ")
tdSql.checkRows(1)
tdSql.checkData(0,0,7.000000000)
tdSql.checkData(0,1,7.000000000)
@@ -308,7 +308,7 @@ class TDTestCase:
tdSql.checkData(0,3,6.900000000)
tdSql.checkData(0,4,3.000000000)
- tdSql.query("select avg(c1), avg(c1) -0 ,avg(ceil(c1-0.1))-0 ,avg(floor(c1+0.1))-0.1 ,avg(ceil(log(c1,2)-0.5)) from ct4 where c1=5 ")
+ tdSql.query(f"select avg(c1), avg(c1) -0 ,avg(ceil(c1-0.1))-0 ,avg(floor(c1+0.1))-0.1 ,avg(ceil(log(c1,2)-0.5)) from {dbname}.ct4 where c1=5 ")
tdSql.checkRows(1)
tdSql.checkData(0,0,5.000000000)
tdSql.checkData(0,1,5.000000000)
@@ -316,59 +316,56 @@ class TDTestCase:
tdSql.checkData(0,3,4.900000000)
tdSql.checkData(0,4,2.000000000)
- tdSql.query("select avg(c1) ,avg(c2) , avg(c1) -0 , avg(ceil(c1-0.1))-0 ,avg(floor(c1+0.1))-0.1 ,avg(ceil(log(c1,2))-0.5) from ct4 where c1>log(c1,2) limit 1 ")
+ tdSql.query(f"select avg(c1) ,avg(c2) , avg(c1) -0 , avg(ceil(c1-0.1))-0 ,avg(floor(c1+0.1))-0.1 ,avg(ceil(log(c1,2))-0.5) from {dbname}.ct4 where c1>log(c1,2) limit 1 ")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 4.500000000)
tdSql.checkData(0, 1, 49999.500000000)
tdSql.checkData(0, 5, 1.625000000)
- def avg_Arithmetic(self):
- pass
+ def check_boundary_values(self, dbname="bound_test"):
- def check_boundary_values(self):
-
- tdSql.execute("drop database if exists bound_test")
- tdSql.execute("create database if not exists bound_test")
+ tdSql.execute(f"drop database if exists {dbname}")
+ tdSql.execute(f"create database if not exists {dbname}")
time.sleep(3)
- tdSql.execute("use bound_test")
+ tdSql.execute(f"use {dbname}")
tdSql.execute(
- "create table stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);"
+ f"create table {dbname}.stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);"
)
- tdSql.execute(f'create table sub1_bound using stb_bound tags ( 1 )')
+ tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
tdSql.execute(
- f"insert into sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), 2147483645, 9223372036854775805, 32765, 125, 3.40E+37, 1.7e+307, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), 2147483645, 9223372036854775805, 32765, 125, 3.40E+37, 1.7e+307, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), 2147483644, 9223372036854775804, 32764, 124, 3.40E+37, 1.7e+307, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), 2147483644, 9223372036854775804, 32764, 124, 3.40E+37, 1.7e+307, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.error(
- f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
- self.check_avg("select avg(c1), avg(c2), avg(c3) , avg(c4), avg(c5) ,avg(c6) from sub1_bound " , " select sum(c1)/count(c1), sum(c2)/count(c2) ,sum(c3)/count(c3), sum(c4)/count(c4), sum(c5)/count(c5) ,sum(c6)/count(c6) from sub1_bound ")
+ self.check_avg(f"select avg(c1), avg(c2), avg(c3) , avg(c4), avg(c5) ,avg(c6) from {dbname}.sub1_bound " , f" select sum(c1)/count(c1), sum(c2)/count(c2) ,sum(c3)/count(c3), sum(c4)/count(c4), sum(c5)/count(c5) ,sum(c6)/count(c6) from {dbname}.sub1_bound ")
# check basic elem for table per row
- tdSql.query("select avg(c1) ,avg(c2) , avg(c3) , avg(c4), avg(c5), avg(c6) from sub1_bound ")
+ tdSql.query(f"select avg(c1) ,avg(c2) , avg(c3) , avg(c4), avg(c5), avg(c6) from {dbname}.sub1_bound ")
tdSql.checkRows(1)
tdSql.checkData(0,0,920350133.571428537)
tdSql.checkData(0,1,1.3176245766935393e+18)
@@ -379,7 +376,7 @@ class TDTestCase:
# check + - * / in functions
- tdSql.query(" select avg(c1+1) ,avg(c2) , avg(c3*1) , avg(c4/2), avg(c5)/2, avg(c6) from sub1_bound ")
+ tdSql.query(f" select avg(c1+1) ,avg(c2) , avg(c3*1) , avg(c4/2), avg(c5)/2, avg(c6) from {dbname}.sub1_bound ")
tdSql.checkData(0,0,920350134.5714285)
tdSql.checkData(0,1,1.3176245766935393e+18)
tdSql.checkData(0,2,14042.142857143)
diff --git a/tests/system-test/2-query/between.py b/tests/system-test/2-query/between.py
index 7e2ac1c8b9..a9dde5617d 100644
--- a/tests/system-test/2-query/between.py
+++ b/tests/system-test/2-query/between.py
@@ -13,190 +13,195 @@ class TDTestCase:
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
- def run(self): # sourcery skip: extract-duplicate-method
+ def run(self):
+ dbname = "db"
+ stb = f"{dbname}.stb1"
+ rows = 10
+
tdSql.prepare()
tdLog.printNoPrefix("==========step1:create table")
tdSql.execute(
- '''create table if not exists supt
+ f'''create table if not exists {stb}
(ts timestamp, c1 int, c2 float, c3 bigint, c4 double, c5 smallint, c6 tinyint)
tags(location binary(64), type int, isused bool , family nchar(64))'''
)
- tdSql.execute("create table t1 using supt tags('beijing', 1, 1, 'nchar1')")
- tdSql.execute("create table t2 using supt tags('shanghai', 2, 0, 'nchar2')")
+ tdSql.execute(f"create table {dbname}.t1 using {stb} tags('beijing', 1, 1, 'nchar1')")
+ tdSql.execute(f"create table {dbname}.t2 using {stb} tags('shanghai', 2, 0, 'nchar2')")
tdLog.printNoPrefix("==========step2:insert data")
- for i in range(10):
+ for i in range(rows):
tdSql.execute(
- f"insert into t1 values (now()+{i}m, {32767+i}, {20.0+i/10}, {2**31+i}, {3.4*10**38+i/10}, {127+i}, {i})"
+ f"insert into {dbname}.t1 values (now()+{i}m, {32767+i}, {20.0+i/10}, {2**31+i}, {3.4*10**38+i/10}, {127+i}, {i})"
)
tdSql.execute(
- f"insert into t2 values (now()-{i}m, {-32767-i}, {20.0-i/10}, {-i-2**31}, {-i/10-3.4*10**38}, {-127-i}, {-i})"
+ f"insert into {dbname}.t2 values (now()-{i}m, {-32767-i}, {20.0-i/10}, {-i-2**31}, {-i/10-3.4*10**38}, {-127-i}, {-i})"
)
tdSql.execute(
- f"insert into t1 values (now()+11m, {2**31-1}, {pow(10,37)*34}, {pow(2,63)-1}, {1.7*10**308}, 32767, 127)"
+ f"insert into {dbname}.t1 values (now()+11m, {2**31-1}, {pow(10,37)*34}, {pow(2,63)-1}, {1.7*10**308}, 32767, 127)"
)
tdSql.execute(
- f"insert into t2 values (now()-11m, {1-2**31}, {-3.4*10**38}, {1-2**63}, {-1.7*10**308}, -32767, -127)"
+ f"insert into {dbname}.t2 values (now()-11m, {1-2**31}, {-3.4*10**38}, {1-2**63}, {-1.7*10**308}, -32767, -127)"
)
tdSql.execute(
- f"insert into t2 values (now()-12m, null , {-3.4*10**38}, null , {-1.7*10**308}, null , null)"
+ f"insert into {dbname}.t2 values (now()-12m, null , {-3.4*10**38}, null , {-1.7*10**308}, null , null)"
)
tdLog.printNoPrefix("==========step3:query timestamp type")
- tdSql.query("select * from t1 where ts between now()-1m and now()+10m")
- tdSql.checkRows(10)
- tdSql.query("select * from t1 where ts between '2021-01-01 00:00:00.000' and '2121-01-01 00:00:00.000'")
+ tdSql.query(f"select * from {dbname}.t1 where ts between now()-1m and now()+10m")
+ tdSql.checkRows(rows)
+ tdSql.query(f"select * from {dbname}.t1 where ts between '2021-01-01 00:00:00.000' and '2121-01-01 00:00:00.000'")
# tdSql.checkRows(11)
- tdSql.query("select * from t1 where ts between '1969-01-01 00:00:00.000' and '1969-12-31 23:59:59.999'")
+ tdSql.query(f"select * from {dbname}.t1 where ts between '1969-01-01 00:00:00.000' and '1969-12-31 23:59:59.999'")
# tdSql.checkRows(0)
- tdSql.query("select * from t1 where ts between -2793600 and 31507199")
+ tdSql.query(f"select * from {dbname}.t1 where ts between -2793600 and 31507199")
tdSql.checkRows(0)
- tdSql.query("select * from t1 where ts between 1609430400000 and 4765104000000")
- tdSql.checkRows(11)
+ tdSql.query(f"select * from {dbname}.t1 where ts between 1609430400000 and 4765104000000")
+ tdSql.checkRows(rows+1)
tdLog.printNoPrefix("==========step4:query int type")
- tdSql.query("select * from t1 where c1 between 32767 and 32776")
- tdSql.checkRows(10)
- tdSql.query("select * from t1 where c1 between 32766.9 and 32776.1")
- tdSql.checkRows(10)
- tdSql.query("select * from t1 where c1 between 32776 and 32767")
+ tdSql.query(f"select * from {dbname}.t1 where c1 between 32767 and 32776")
+ tdSql.checkRows(rows)
+ tdSql.query(f"select * from {dbname}.t1 where c1 between 32766.9 and 32776.1")
+ tdSql.checkRows(rows)
+ tdSql.query(f"select * from {dbname}.t1 where c1 between 32776 and 32767")
tdSql.checkRows(0)
- tdSql.query("select * from t1 where c1 between 'a' and 'e'")
+ tdSql.query(f"select * from {dbname}.t1 where c1 between 'a' and 'e'")
tdSql.checkRows(0)
- # tdSql.query("select * from t1 where c1 between 0x64 and 0x69")
+ # tdSql.query("select * from {dbname}.t1 where c1 between 0x64 and 0x69")
# tdSql.checkRows(6)
- tdSql.query("select * from t1 where c1 not between 100 and 106")
- tdSql.checkRows(11)
- tdSql.query(f"select * from t1 where c1 between {2**31-2} and {2**31+1}")
+ tdSql.query(f"select * from {dbname}.t1 where c1 not between 100 and 106")
+ tdSql.checkRows(rows+1)
+ tdSql.query(f"select * from {dbname}.t1 where c1 between {2**31-2} and {2**31+1}")
tdSql.checkRows(1)
- tdSql.query(f"select * from t2 where c1 between null and {1-2**31}")
+ tdSql.query(f"select * from {dbname}.t2 where c1 between null and {1-2**31}")
# tdSql.checkRows(3)
- tdSql.query(f"select * from t2 where c1 between {-2**31} and {1-2**31}")
+ tdSql.query(f"select * from {dbname}.t2 where c1 between {-2**31} and {1-2**31}")
tdSql.checkRows(1)
tdLog.printNoPrefix("==========step5:query float type")
- tdSql.query("select * from t1 where c2 between 20.0 and 21.0")
+ tdSql.query(f"select * from {dbname}.t1 where c2 between 20.0 and 21.0")
tdSql.checkRows(10)
- tdSql.query(f"select * from t1 where c2 between {-3.4*10**38-1} and {3.4*10**38+1}")
- tdSql.checkRows(11)
- tdSql.query("select * from t1 where c2 between 21.0 and 20.0")
+ tdSql.query(f"select * from {dbname}.t1 where c2 between {-3.4*10**38-1} and {3.4*10**38+1}")
+ tdSql.checkRows(rows+1)
+ tdSql.query(f"select * from {dbname}.t1 where c2 between 21.0 and 20.0")
tdSql.checkRows(0)
- tdSql.query("select * from t1 where c2 between 'DC3' and 'SYN'")
+ tdSql.query(f"select * from {dbname}.t1 where c2 between 'DC3' and 'SYN'")
tdSql.checkRows(0)
- tdSql.query("select * from t1 where c2 not between 0.1 and 0.2")
- tdSql.checkRows(11)
- tdSql.query(f"select * from t1 where c2 between {pow(10,38)*3.4} and {pow(10,38)*3.4+1}")
+ tdSql.query(f"select * from {dbname}.t1 where c2 not between 0.1 and 0.2")
+ tdSql.checkRows(rows+1)
+ tdSql.query(f"select * from {dbname}.t1 where c2 between {pow(10,38)*3.4} and {pow(10,38)*3.4+1}")
# tdSql.checkRows(1)
- tdSql.query(f"select * from t2 where c2 between {-3.4*10**38-1} and {-3.4*10**38}")
+ tdSql.query(f"select * from {dbname}.t2 where c2 between {-3.4*10**38-1} and {-3.4*10**38}")
# tdSql.checkRows(2)
- tdSql.query(f"select * from t2 where c2 between null and {-3.4*10**38}")
+ tdSql.query(f"select * from {dbname}.t2 where c2 between null and {-3.4*10**38}")
# tdSql.checkRows(3)
tdLog.printNoPrefix("==========step6:query bigint type")
- tdSql.query(f"select * from t1 where c3 between {2**31} and {2**31+10}")
- tdSql.checkRows(10)
- tdSql.query(f"select * from t1 where c3 between {-2**63} and {2**63}")
- tdSql.checkRows(11)
- tdSql.query(f"select * from t1 where c3 between {2**31+10} and {2**31}")
+ tdSql.query(f"select * from {dbname}.t1 where c3 between {2**31} and {2**31+10}")
+ tdSql.checkRows(rows)
+ tdSql.query(f"select * from {dbname}.t1 where c3 between {-2**63} and {2**63}")
+ tdSql.checkRows(rows+1)
+ tdSql.query(f"select * from {dbname}.t1 where c3 between {2**31+10} and {2**31}")
tdSql.checkRows(0)
- tdSql.query("select * from t1 where c3 between 'a' and 'z'")
+ tdSql.query(f"select * from {dbname}.t1 where c3 between 'a' and 'z'")
tdSql.checkRows(0)
- tdSql.query("select * from t1 where c3 not between 1 and 2")
+ tdSql.query(f"select * from {dbname}.t1 where c3 not between 1 and 2")
# tdSql.checkRows(0)
- tdSql.query(f"select * from t1 where c3 between {2**63-2} and {2**63-1}")
+ tdSql.query(f"select * from {dbname}.t1 where c3 between {2**63-2} and {2**63-1}")
tdSql.checkRows(1)
- tdSql.query(f"select * from t2 where c3 between {-2**63} and {1-2**63}")
+ tdSql.query(f"select * from {dbname}.t2 where c3 between {-2**63} and {1-2**63}")
# tdSql.checkRows(3)
- tdSql.query(f"select * from t2 where c3 between null and {1-2**63}")
+ tdSql.query(f"select * from {dbname}.t2 where c3 between null and {1-2**63}")
# tdSql.checkRows(2)
tdLog.printNoPrefix("==========step7:query double type")
- tdSql.query(f"select * from t1 where c4 between {3.4*10**38} and {3.4*10**38+10}")
- tdSql.checkRows(10)
- tdSql.query(f"select * from t1 where c4 between {1.7*10**308+1} and {1.7*10**308+2}")
+ tdSql.query(f"select * from {dbname}.t1 where c4 between {3.4*10**38} and {3.4*10**38+10}")
+ tdSql.checkRows(rows)
+ tdSql.query(f"select * from {dbname}.t1 where c4 between {1.7*10**308+1} and {1.7*10**308+2}")
# 因为精度原因,在超出bigint边界后,数值不能进行准确的判断
# tdSql.checkRows(0)
- tdSql.query(f"select * from t1 where c4 between {3.4*10**38+10} and {3.4*10**38}")
+ tdSql.query(f"select * from {dbname}.t1 where c4 between {3.4*10**38+10} and {3.4*10**38}")
# tdSql.checkRows(0)
- tdSql.query("select * from t1 where c4 between 'a' and 'z'")
+ tdSql.query(f"select * from {dbname}.t1 where c4 between 'a' and 'z'")
tdSql.checkRows(0)
- tdSql.query("select * from t1 where c4 not between 1 and 2")
+ tdSql.query(f"select * from {dbname}.t1 where c4 not between 1 and 2")
# tdSql.checkRows(0)
- tdSql.query(f"select * from t1 where c4 between {1.7*10**308} and {1.7*10**308+1}")
+ tdSql.query(f"select * from {dbname}.t1 where c4 between {1.7*10**308} and {1.7*10**308+1}")
tdSql.checkRows(1)
- tdSql.query(f"select * from t2 where c4 between {-1.7*10**308-1} and {-1.7*10**308}")
+ tdSql.query(f"select * from {dbname}.t2 where c4 between {-1.7*10**308-1} and {-1.7*10**308}")
# tdSql.checkRows(3)
- tdSql.query(f"select * from t2 where c4 between null and {-1.7*10**308}")
+ tdSql.query(f"select * from {dbname}.t2 where c4 between null and {-1.7*10**308}")
# tdSql.checkRows(3)
tdLog.printNoPrefix("==========step8:query smallint type")
- tdSql.query("select * from t1 where c5 between 127 and 136")
- tdSql.checkRows(10)
- tdSql.query("select * from t1 where c5 between 126.9 and 135.9")
- tdSql.checkRows(9)
- tdSql.query("select * from t1 where c5 between 136 and 127")
+ tdSql.query(f"select * from {dbname}.t1 where c5 between 127 and 136")
+ tdSql.checkRows(rows)
+ tdSql.query(f"select * from {dbname}.t1 where c5 between 126.9 and 135.9")
+ tdSql.checkRows(rows-1)
+ tdSql.query(f"select * from {dbname}.t1 where c5 between 136 and 127")
tdSql.checkRows(0)
- tdSql.query("select * from t1 where c5 between '~' and '^'")
+ tdSql.query(f"select * from {dbname}.t1 where c5 between '~' and '^'")
tdSql.checkRows(0)
- tdSql.query("select * from t1 where c5 not between 1 and 2")
+ tdSql.query(f"select * from {dbname}.t1 where c5 not between 1 and 2")
# tdSql.checkRows(0)
- tdSql.query("select * from t1 where c5 between 32767 and 32768")
+ tdSql.query(f"select * from {dbname}.t1 where c5 between 32767 and 32768")
tdSql.checkRows(1)
- tdSql.query("select * from t2 where c5 between -32768 and -32767")
+ tdSql.query(f"select * from {dbname}.t2 where c5 between -32768 and -32767")
tdSql.checkRows(1)
- tdSql.query("select * from t2 where c5 between null and -32767")
+ tdSql.query(f"select * from {dbname}.t2 where c5 between null and -32767")
# tdSql.checkRows(1)
tdLog.printNoPrefix("==========step9:query tinyint type")
- tdSql.query("select * from t1 where c6 between 0 and 9")
- tdSql.checkRows(10)
- tdSql.query("select * from t1 where c6 between -1.1 and 8.9")
- tdSql.checkRows(9)
- tdSql.query("select * from t1 where c6 between 9 and 0")
+ tdSql.query(f"select * from {dbname}.t1 where c6 between 0 and 9")
+ tdSql.checkRows(rows)
+ tdSql.query(f"select * from {dbname}.t1 where c6 between -1.1 and 8.9")
+ tdSql.checkRows(rows-1)
+ tdSql.query(f"select * from {dbname}.t1 where c6 between 9 and 0")
tdSql.checkRows(0)
- tdSql.query("select * from t1 where c6 between 'NUL' and 'HT'")
+ tdSql.query(f"select * from {dbname}.t1 where c6 between 'NUL' and 'HT'")
tdSql.checkRows(1)
- tdSql.query("select * from t1 where c6 not between 1 and 2")
+ tdSql.query(f"select * from {dbname}.t1 where c6 not between 1 and 2")
# tdSql.checkRows(1)
- tdSql.query("select * from t1 where c6 between 127 and 128")
+ tdSql.query(f"select * from {dbname}.t1 where c6 between 127 and 128")
tdSql.checkRows(1)
- tdSql.query("select * from t2 where c6 between -128 and -127")
+ tdSql.query(f"select * from {dbname}.t2 where c6 between -128 and -127")
tdSql.checkRows(1)
- tdSql.query("select * from t2 where c6 between null and -127")
+ tdSql.query(f"select * from {dbname}.t2 where c6 between null and -127")
# tdSql.checkRows(3)
tdLog.printNoPrefix("==========step10:invalid query type")
# TODO tag is not finished
- # tdSql.query("select * from supt where location between 'beijing' and 'shanghai'")
- # tdSql.checkRows(23)
- # # 非0值均解析为1,因此"between 负值 and o"解析为"between 1 and 0"
- # tdSql.query("select * from supt where isused between 0 and 1")
- # tdSql.checkRows(23)
- # tdSql.query("select * from supt where isused between -1 and 0")
- # tdSql.checkRows(0)
- # tdSql.error("select * from supt where isused between false and true")
- # tdSql.query("select * from supt where family between '拖拉机' and '自行车'")
- # tdSql.checkRows(23)
+ tdSql.query(f"select * from {stb} where location between 'beijing' and 'shanghai'")
+ tdSql.checkRows(rows * 2 + 3)
+ # 非0值均解析为1,因此"between 负值 and o"解析为"between 1 and 0"
+ tdSql.query(f"select * from {stb} where isused between 0 and 1")
+ tdSql.checkRows(rows * 2 + 3)
+ tdSql.query(f"select * from {stb} where isused between -1 and 0")
+ tdSql.checkRows(rows + 2)
+ tdSql.query(f"select * from {stb} where isused between false and true")
+ tdSql.checkRows(rows * 2 + 3)
+ tdSql.query(f"select * from {stb} where family between '拖拉机' and '自行车'")
+ tdSql.checkRows(0)
tdLog.printNoPrefix("==========step11:query HEX/OCT/BIN type")
- tdSql.error("select * from t1 where c6 between 0x7f and 0x80") # check filter HEX
- tdSql.error("select * from t1 where c6 between 0b1 and 0b11111") # check filter BIN
- tdSql.error("select * from t1 where c6 between 0b1 and 0x80")
- tdSql.error("select * from t1 where c6=0b1")
- tdSql.error("select * from t1 where c6=0x1")
+ tdSql.error(f"select * from {dbname}.t1 where c6 between 0x7f and 0x80") # check filter HEX
+ tdSql.error(f"select * from {dbname}.t1 where c6 between 0b1 and 0b11111") # check filter BIN
+ tdSql.error(f"select * from {dbname}.t1 where c6 between 0b1 and 0x80")
+ tdSql.error(f"select * from {dbname}.t1 where c6=0b1")
+ tdSql.error(f"select * from {dbname}.t1 where c6=0x1")
# 八进制数据会按照十进制数据进行判定
- tdSql.query("select * from t1 where c6 between 01 and 0200") # check filter OCT
- tdSql.checkRows(10)
+ tdSql.query(f"select * from {dbname}.t1 where c6 between 01 and 0200") # check filter OCT
+ tdSql.checkRows(rows)
def stop(self):
tdSql.close()
diff --git a/tests/system-test/2-query/bottom.py b/tests/system-test/2-query/bottom.py
index 1b7c967348..923575695f 100644
--- a/tests/system-test/2-query/bottom.py
+++ b/tests/system-test/2-query/bottom.py
@@ -26,7 +26,7 @@ class TDTestCase:
tdSql.init(conn.cursor())
self.dbname = 'db_test'
self.setsql = TDSetSql()
- self.ntbname = 'ntb'
+ self.ntbname = f'{self.dbname}.ntb'
self.rowNum = 10
self.tbnum = 20
self.ts = 1537146000000
@@ -96,7 +96,7 @@ class TDTestCase:
self.bottom_check_data(self.ntbname,'normal_table')
tdSql.execute(f'drop database {self.dbname}')
def bottom_check_stb(self):
- stbname = tdCom.getLongName(5, "letters")
+ stbname = f'{self.dbname}.{tdCom.getLongName(5, "letters")}'
tag_dict = {
't0':'int'
}
@@ -109,7 +109,7 @@ class TDTestCase:
for i in range(self.tbnum):
tdSql.execute(f"create table {stbname}_{i} using {stbname} tags({tag_values[0]})")
self.insert_data(self.column_dict,f'{stbname}_{i}',self.rowNum)
- tdSql.query('show tables')
+ tdSql.query(f'show {self.dbname}.tables')
vgroup_list = []
for i in range(len(tdSql.queryResult)):
vgroup_list.append(tdSql.queryResult[i][6])
diff --git a/tests/system-test/2-query/cast.py b/tests/system-test/2-query/cast.py
index 934bbbd7b4..bdac2b6175 100644
--- a/tests/system-test/2-query/cast.py
+++ b/tests/system-test/2-query/cast.py
@@ -15,6 +15,7 @@ class TDTestCase:
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
+ self.dbname = "db"
def __cast_to_bigint(self, col_name, tbname):
__sql = f"select cast({col_name} as bigint), {col_name} from {tbname}"
@@ -35,7 +36,7 @@ class TDTestCase:
for i in range(tdSql.queryRows):
if data_tb_col[i] is None:
tdSql.checkData( i, 0 , None )
- if col_name not in ["c2", "double"] or tbname != "t1" or i != 10:
+ if col_name not in ["c2", "double"] or tbname != f"{self.dbname}.t1" or i != 10:
utc_zone = datetime.timezone.utc
utc_8 = datetime.timezone(datetime.timedelta(hours=8))
date_init_stamp = datetime.datetime.utcfromtimestamp(data_tb_col[i]/1000)
@@ -48,52 +49,52 @@ class TDTestCase:
self.__cast_to_timestamp(col_name=col, tbname=table)
def __test_bigint(self):
- __table_list = ["ct1", "ct4", "t1"]
+ __table_list = [f"{self.dbname}.ct1", f"{self.dbname}.ct4", f"{self.dbname}.t1"]
__col_list = ["c1","c2","c3","c4","c5","c6","c7","c10","c1+c2"]
self.__range_to_bigint(cols=__col_list, tables=__table_list)
def __test_timestamp(self):
- __table_list = ["ct1", "ct4", "t1"]
+ __table_list = [f"{self.dbname}.ct1", f"{self.dbname}.ct4", f"{self.dbname}.t1"]
__col_list = ["c1","c2","c3","c4","c5","c6","c7","c1+c2"]
self.__range_to_timestamp(cols=__col_list, tables=__table_list)
def all_test(self):
- tdSql.query("select c1 from ct4")
+ tdSql.query(f"select c1 from {self.dbname}.ct4")
data_ct4_c1 = [tdSql.getData(i,0) for i in range(tdSql.queryRows)]
- tdSql.query("select c1 from t1")
+ tdSql.query(f"select c1 from {self.dbname}.t1")
data_t1_c1 = [tdSql.getData(i,0) for i in range(tdSql.queryRows)]
tdLog.printNoPrefix("==========step2: cast int to bigint, expect no changes")
- tdSql.query("select cast(c1 as bigint) as b from ct4")
+ tdSql.query(f"select cast(c1 as bigint) as b from {self.dbname}.ct4")
for i in range(len(data_ct4_c1)):
tdSql.checkData( i, 0, data_ct4_c1[i])
- tdSql.query("select cast(c1 as bigint) as b from t1")
+ tdSql.query(f"select cast(c1 as bigint) as b from {self.dbname}.t1")
for i in range(len(data_t1_c1)):
tdSql.checkData( i, 0, data_t1_c1[i])
tdLog.printNoPrefix("==========step5: cast int to binary, expect changes to str(int) ")
- #tdSql.query("select cast(c1 as binary(32)) as b from ct4")
+ #tdSql.query(f"select cast(c1 as binary(32)) as b from {self.dbname}.ct4")
#for i in range(len(data_ct4_c1)):
# tdSql.checkData( i, 0, str(data_ct4_c1[i]) )
- tdSql.query("select cast(c1 as binary(32)) as b from t1")
+ tdSql.query(f"select cast(c1 as binary(32)) as b from {self.dbname}.t1")
for i in range(len(data_t1_c1)):
tdSql.checkData( i, 0, str(data_t1_c1[i]) )
tdLog.printNoPrefix("==========step6: cast int to nchar, expect changes to str(int) ")
- tdSql.query("select cast(c1 as nchar(32)) as b from ct4")
+ tdSql.query(f"select cast(c1 as nchar(32)) as b from {self.dbname}.ct4")
for i in range(len(data_ct4_c1)):
tdSql.checkData( i, 0, str(data_ct4_c1[i]) )
- tdSql.query("select cast(c1 as nchar(32)) as b from t1")
+ tdSql.query(f"select cast(c1 as nchar(32)) as b from {self.dbname}.t1")
for i in range(len(data_t1_c1)):
tdSql.checkData( i, 0, str(data_t1_c1[i]) )
tdLog.printNoPrefix("==========step7: cast int to timestamp, expect changes to timestamp ")
- tdSql.query("select cast(c1 as timestamp) as b from ct4")
+ tdSql.query(f"select cast(c1 as timestamp) as b from {self.dbname}.ct4")
for i in range(len(data_ct4_c1)):
if data_ct4_c1[i] is None:
tdSql.checkData( i, 0 , None )
@@ -104,7 +105,7 @@ class TDTestCase:
date_data = date_init_stamp.replace(tzinfo=utc_zone).astimezone(utc_8).strftime("%Y-%m-%d %H:%M:%S.%f")
tdSql.checkData( i, 0, date_data)
- tdSql.query("select cast(c1 as timestamp) as b from t1")
+ tdSql.query(f"select cast(c1 as timestamp) as b from {self.dbname}.t1")
for i in range(len(data_t1_c1)):
if data_ct4_c1[i] is None:
tdSql.checkData( i, 0 , None )
@@ -117,40 +118,40 @@ class TDTestCase:
tdLog.printNoPrefix("==========step8: cast bigint to bigint, expect no changes")
- tdSql.query("select c2 from ct4")
+ tdSql.query(f"select c2 from {self.dbname}.ct4")
data_ct4_c2 = [tdSql.getData(i,0) for i in range(tdSql.queryRows)]
- tdSql.query("select c2 from t1")
+ tdSql.query(f"select c2 from {self.dbname}.t1")
data_t1_c2 = [tdSql.getData(i,0) for i in range(tdSql.queryRows)]
- tdSql.query("select cast(c2 as bigint) as b from ct4")
+ tdSql.query(f"select cast(c2 as bigint) as b from {self.dbname}.ct4")
for i in range(len(data_ct4_c2)):
tdSql.checkData( i, 0, data_ct4_c2[i])
- tdSql.query("select cast(c2 as bigint) as b from t1")
+ tdSql.query(f"select cast(c2 as bigint) as b from {self.dbname}.t1")
for i in range(len(data_t1_c2)):
tdSql.checkData( i, 0, data_t1_c2[i])
tdLog.printNoPrefix("==========step9: cast bigint to binary, expect changes to str(int) ")
- tdSql.query("select cast(c2 as binary(32)) as b from ct4")
+ tdSql.query(f"select cast(c2 as binary(32)) as b from {self.dbname}.ct4")
for i in range(len(data_ct4_c2)):
tdSql.checkData( i, 0, str(data_ct4_c2[i]) )
- tdSql.query("select cast(c2 as binary(32)) as b from t1")
+ tdSql.query(f"select cast(c2 as binary(32)) as b from {self.dbname}.t1")
for i in range(len(data_t1_c2)):
tdSql.checkData( i, 0, str(data_t1_c2[i]) )
tdLog.printNoPrefix("==========step10: cast bigint to nchar, expect changes to str(int) ")
- tdSql.query("select cast(c2 as nchar(32)) as b from ct4")
+ tdSql.query(f"select cast(c2 as nchar(32)) as b from {self.dbname}.ct4")
for i in range(len(data_ct4_c2)):
tdSql.checkData( i, 0, str(data_ct4_c2[i]) )
- tdSql.query("select cast(c2 as nchar(32)) as b from t1")
+ tdSql.query(f"select cast(c2 as nchar(32)) as b from {self.dbname}.t1")
for i in range(len(data_t1_c2)):
tdSql.checkData( i, 0, str(data_t1_c2[i]) )
tdLog.printNoPrefix("==========step11: cast bigint to timestamp, expect changes to timestamp ")
- tdSql.query("select cast(c2 as timestamp) as b from ct4")
+ tdSql.query(f"select cast(c2 as timestamp) as b from {self.dbname}.ct4")
for i in range(len(data_ct4_c2)):
if data_ct4_c2[i] is None:
tdSql.checkData( i, 0 , None )
@@ -162,7 +163,7 @@ class TDTestCase:
tdSql.checkData( i, 0, date_data)
- tdSql.query("select cast(c2 as timestamp) as b from t1")
+ tdSql.query(f"select cast(c2 as timestamp) as b from {self.dbname}.t1")
for i in range(len(data_t1_c2)):
if data_t1_c2[i] is None:
tdSql.checkData( i, 0 , None )
@@ -177,40 +178,40 @@ class TDTestCase:
tdLog.printNoPrefix("==========step12: cast smallint to bigint, expect no changes")
- tdSql.query("select c3 from ct4")
+ tdSql.query(f"select c3 from {self.dbname}.ct4")
data_ct4_c3 = [tdSql.getData(i,0) for i in range(tdSql.queryRows)]
- tdSql.query("select c3 from t1")
+ tdSql.query(f"select c3 from {self.dbname}.t1")
data_t1_c3 = [tdSql.getData(i,0) for i in range(tdSql.queryRows)]
- tdSql.query("select cast(c3 as bigint) as b from ct4")
+ tdSql.query(f"select cast(c3 as bigint) as b from {self.dbname}.ct4")
for i in range(len(data_ct4_c3)):
tdSql.checkData( i, 0, data_ct4_c3[i])
- tdSql.query("select cast(c3 as bigint) as b from t1")
+ tdSql.query(f"select cast(c3 as bigint) as b from {self.dbname}.t1")
for i in range(len(data_t1_c3)):
tdSql.checkData( i, 0, data_t1_c3[i])
tdLog.printNoPrefix("==========step13: cast smallint to binary, expect changes to str(int) ")
- tdSql.query("select cast(c3 as binary(32)) as b from ct4")
+ tdSql.query(f"select cast(c3 as binary(32)) as b from {self.dbname}.ct4")
for i in range(len(data_ct4_c3)):
tdSql.checkData( i, 0, str(data_ct4_c3[i]) )
- tdSql.query("select cast(c3 as binary(32)) as b from t1")
+ tdSql.query(f"select cast(c3 as binary(32)) as b from {self.dbname}.t1")
for i in range(len(data_t1_c3)):
tdSql.checkData( i, 0, str(data_t1_c3[i]) )
tdLog.printNoPrefix("==========step14: cast smallint to nchar, expect changes to str(int) ")
- tdSql.query("select cast(c3 as nchar(32)) as b from ct4")
+ tdSql.query(f"select cast(c3 as nchar(32)) as b from {self.dbname}.ct4")
for i in range(len(data_ct4_c3)):
tdSql.checkData( i, 0, str(data_ct4_c3[i]) )
- tdSql.query("select cast(c3 as nchar(32)) as b from t1")
+ tdSql.query(f"select cast(c3 as nchar(32)) as b from {self.dbname}.t1")
for i in range(len(data_t1_c3)):
tdSql.checkData( i, 0, str(data_t1_c3[i]) )
tdLog.printNoPrefix("==========step15: cast smallint to timestamp, expect changes to timestamp ")
- tdSql.query("select cast(c3 as timestamp) as b from ct4")
+ tdSql.query(f"select cast(c3 as timestamp) as b from {self.dbname}.ct4")
for i in range(len(data_ct4_c3)):
if data_ct4_c3[i] is None:
tdSql.checkData( i, 0 , None )
@@ -221,7 +222,7 @@ class TDTestCase:
date_data = date_init_stamp.replace(tzinfo=utc_zone).astimezone(utc_8).strftime("%Y-%m-%d %H:%M:%S.%f")
tdSql.checkData( i, 0, date_data)
- tdSql.query("select cast(c3 as timestamp) as b from t1")
+ tdSql.query(f"select cast(c3 as timestamp) as b from {self.dbname}.t1")
for i in range(len(data_t1_c3)):
if data_ct4_c3[i] is None:
tdSql.checkData( i, 0 , None )
@@ -234,40 +235,40 @@ class TDTestCase:
tdLog.printNoPrefix("==========step16: cast tinyint to bigint, expect no changes")
- tdSql.query("select c4 from ct4")
+ tdSql.query(f"select c4 from {self.dbname}.ct4")
data_ct4_c4 = [tdSql.getData(i,0) for i in range(tdSql.queryRows)]
- tdSql.query("select c4 from t1")
+ tdSql.query(f"select c4 from {self.dbname}.t1")
data_t1_c4 = [tdSql.getData(i,0) for i in range(tdSql.queryRows)]
- tdSql.query("select cast(c4 as bigint) as b from ct4")
+ tdSql.query(f"select cast(c4 as bigint) as b from {self.dbname}.ct4")
for i in range(len(data_ct4_c4)):
tdSql.checkData( i, 0, data_ct4_c4[i])
- tdSql.query("select cast(c4 as bigint) as b from t1")
+ tdSql.query(f"select cast(c4 as bigint) as b from {self.dbname}.t1")
for i in range(len(data_t1_c4)):
tdSql.checkData( i, 0, data_t1_c4[i])
tdLog.printNoPrefix("==========step17: cast tinyint to binary, expect changes to str(int) ")
- tdSql.query("select cast(c4 as binary(32)) as b from ct4")
+ tdSql.query(f"select cast(c4 as binary(32)) as b from {self.dbname}.ct4")
for i in range(len(data_ct4_c4)):
tdSql.checkData( i, 0, str(data_ct4_c4[i]) )
- tdSql.query("select cast(c4 as binary(32)) as b from t1")
+ tdSql.query(f"select cast(c4 as binary(32)) as b from {self.dbname}.t1")
for i in range(len(data_t1_c4)):
tdSql.checkData( i, 0, str(data_t1_c4[i]) )
tdLog.printNoPrefix("==========step18: cast tinyint to nchar, expect changes to str(int) ")
- tdSql.query("select cast(c4 as nchar(32)) as b from ct4")
+ tdSql.query(f"select cast(c4 as nchar(32)) as b from {self.dbname}.ct4")
for i in range(len(data_ct4_c4)):
tdSql.checkData( i, 0, str(data_ct4_c4[i]) )
- tdSql.query("select cast(c4 as nchar(32)) as b from t1")
+ tdSql.query(f"select cast(c4 as nchar(32)) as b from {self.dbname}.t1")
for i in range(len(data_t1_c4)):
tdSql.checkData( i, 0, str(data_t1_c4[i]) )
tdLog.printNoPrefix("==========step19: cast tinyint to timestamp, expect changes to timestamp ")
- tdSql.query("select cast(c4 as timestamp) as b from ct4")
+ tdSql.query(f"select cast(c4 as timestamp) as b from {self.dbname}.ct4")
for i in range(len(data_ct4_c4)):
if data_ct4_c4[i] is None:
tdSql.checkData( i, 0 , None )
@@ -278,7 +279,7 @@ class TDTestCase:
date_data = date_init_stamp.replace(tzinfo=utc_zone).astimezone(utc_8).strftime("%Y-%m-%d %H:%M:%S.%f")
tdSql.checkData( i, 0, date_data)
- tdSql.query("select cast(c4 as timestamp) as b from t1")
+ tdSql.query(f"select cast(c4 as timestamp) as b from {self.dbname}.t1")
for i in range(len(data_t1_c4)):
if data_ct4_c4[i] is None:
tdSql.checkData( i, 0 , None )
@@ -291,36 +292,36 @@ class TDTestCase:
tdLog.printNoPrefix("==========step20: cast float to bigint, expect no changes")
- tdSql.query("select c5 from ct4")
+ tdSql.query(f"select c5 from {self.dbname}.ct4")
data_ct4_c5 = [tdSql.getData(i,0) for i in range(tdSql.queryRows)]
- tdSql.query("select c5 from t1")
+ tdSql.query(f"select c5 from {self.dbname}.t1")
data_t1_c5 = [tdSql.getData(i,0) for i in range(tdSql.queryRows)]
- tdSql.query("select cast(c5 as bigint) as b from ct4")
+ tdSql.query(f"select cast(c5 as bigint) as b from {self.dbname}.ct4")
for i in range(len(data_ct4_c5)):
tdSql.checkData( i, 0, None ) if data_ct4_c5[i] is None else tdSql.checkData( i, 0, int(data_ct4_c5[i]) )
- tdSql.query("select cast(c5 as bigint) as b from t1")
+ tdSql.query(f"select cast(c5 as bigint) as b from {self.dbname}.t1")
for i in range(len(data_t1_c5)):
tdSql.checkData( i, 0, None ) if data_t1_c5[i] is None else tdSql.checkData( i, 0, int(data_t1_c5[i]) )
tdLog.printNoPrefix("==========step21: cast float to binary, expect changes to str(int) ")
- tdSql.query("select cast(c5 as binary(32)) as b from ct4")
+ tdSql.query(f"select cast(c5 as binary(32)) as b from {self.dbname}.ct4")
for i in range(len(data_ct4_c5)):
tdSql.checkData( i, 0, str(data_ct4_c5[i]) ) if data_ct4_c5[i] is None else tdSql.checkData( i, 0, f'{data_ct4_c5[i]:.6f}' )
- tdSql.query("select cast(c5 as binary(32)) as b from t1")
+ tdSql.query(f"select cast(c5 as binary(32)) as b from {self.dbname}.t1")
for i in range(len(data_t1_c5)):
tdSql.checkData( i, 0, str(data_t1_c5[i]) ) if data_t1_c5[i] is None else tdSql.checkData( i, 0, f'{data_t1_c5[i]:.6f}' )
tdLog.printNoPrefix("==========step22: cast float to nchar, expect changes to str(int) ")
- tdSql.query("select cast(c5 as nchar(32)) as b from ct4")
+ tdSql.query(f"select cast(c5 as nchar(32)) as b from {self.dbname}.ct4")
for i in range(len(data_ct4_c5)):
tdSql.checkData( i, 0, None ) if data_ct4_c5[i] is None else tdSql.checkData( i, 0, f'{data_ct4_c5[i]:.6f}' )
- tdSql.query("select cast(c5 as nchar(32)) as b from t1")
+ tdSql.query(f"select cast(c5 as nchar(32)) as b from {self.dbname}.t1")
for i in range(len(data_t1_c5)):
tdSql.checkData( i, 0, None ) if data_t1_c5[i] is None else tdSql.checkData( i, 0, f'{data_t1_c5[i]:.6f}' )
tdLog.printNoPrefix("==========step23: cast float to timestamp, expect changes to timestamp ")
- tdSql.query("select cast(c5 as timestamp) as b from ct4")
+ tdSql.query(f"select cast(c5 as timestamp) as b from {self.dbname}.ct4")
for i in range(len(data_ct4_c5)):
if data_ct4_c5[i] is None:
tdSql.checkData( i, 0 , None )
@@ -330,7 +331,7 @@ class TDTestCase:
date_init_stamp = datetime.datetime.utcfromtimestamp(int(data_ct4_c5[i]/1000))
date_data = date_init_stamp.replace(tzinfo=utc_zone).astimezone(utc_8).strftime("%Y-%m-%d %H:%M:%S.%f")
tdSql.checkData( i, 0, date_data)
- tdSql.query("select cast(c5 as timestamp) as b from t1")
+ tdSql.query(f"select cast(c5 as timestamp) as b from {self.dbname}.t1")
for i in range(len(data_t1_c5)):
if data_t1_c5[i] is None:
tdSql.checkData( i, 0 , None )
@@ -342,15 +343,15 @@ class TDTestCase:
tdSql.checkData( i, 0, date_data)
tdLog.printNoPrefix("==========step24: cast double to bigint, expect no changes")
- tdSql.query("select c6 from ct4")
+ tdSql.query(f"select c6 from {self.dbname}.ct4")
data_ct4_c6 = [tdSql.getData(i,0) for i in range(tdSql.queryRows)]
- tdSql.query("select c6 from t1")
+ tdSql.query(f"select c6 from {self.dbname}.t1")
data_t1_c6 = [tdSql.getData(i,0) for i in range(tdSql.queryRows)]
- tdSql.query("select cast(c6 as bigint) as b from ct4")
+ tdSql.query(f"select cast(c6 as bigint) as b from {self.dbname}.ct4")
for i in range(len(data_ct4_c6)):
tdSql.checkData( i, 0, None ) if data_ct4_c6[i] is None else tdSql.checkData( i, 0, int(data_ct4_c6[i]) )
- tdSql.query("select cast(c6 as bigint) as b from t1")
+ tdSql.query(f"select cast(c6 as bigint) as b from {self.dbname}.t1")
for i in range(len(data_t1_c6)):
if data_t1_c6[i] is None:
tdSql.checkData( i, 0, None )
@@ -360,23 +361,23 @@ class TDTestCase:
tdSql.checkData( i, 0, int(data_t1_c6[i]) )
tdLog.printNoPrefix("==========step25: cast double to binary, expect changes to str(int) ")
- tdSql.query("select cast(c6 as binary(32)) as b from ct4")
+ tdSql.query(f"select cast(c6 as binary(32)) as b from {self.dbname}.ct4")
for i in range(len(data_ct4_c6)):
tdSql.checkData( i, 0, None ) if data_ct4_c6[i] is None else tdSql.checkData( i, 0, f'{data_ct4_c6[i]:.6f}' )
- tdSql.query("select cast(c6 as binary(32)) as b from t1")
+ tdSql.query(f"select cast(c6 as binary(32)) as b from {self.dbname}.t1")
for i in range(len(data_t1_c6)):
tdSql.checkData( i, 0, None ) if data_t1_c6[i] is None else tdSql.checkData( i, 0, f'{data_t1_c6[i]:.6f}' )
tdLog.printNoPrefix("==========step26: cast double to nchar, expect changes to str(int) ")
- tdSql.query("select cast(c6 as nchar(32)) as b from ct4")
+ tdSql.query(f"select cast(c6 as nchar(32)) as b from {self.dbname}.ct4")
for i in range(len(data_ct4_c6)):
tdSql.checkData( i, 0, None ) if data_ct4_c6[i] is None else tdSql.checkData( i, 0, f'{data_ct4_c6[i]:.6f}' )
- tdSql.query("select cast(c6 as nchar(32)) as b from t1")
+ tdSql.query(f"select cast(c6 as nchar(32)) as b from {self.dbname}.t1")
for i in range(len(data_t1_c6)):
tdSql.checkData( i, 0, None ) if data_t1_c6[i] is None else tdSql.checkData( i, 0, f'{data_t1_c6[i]:.6f}' )
tdLog.printNoPrefix("==========step27: cast double to timestamp, expect changes to timestamp ")
- tdSql.query("select cast(c6 as timestamp) as b from ct4")
+ tdSql.query(f"select cast(c6 as timestamp) as b from {self.dbname}.ct4")
for i in range(len(data_ct4_c6)):
if data_ct4_c6[i] is None:
tdSql.checkData( i, 0 , None )
@@ -387,7 +388,7 @@ class TDTestCase:
date_data = date_init_stamp.replace(tzinfo=utc_zone).astimezone(utc_8).strftime("%Y-%m-%d %H:%M:%S.%f")
tdSql.checkData( i, 0, date_data)
- tdSql.query("select cast(c6 as timestamp) as b from t1")
+ tdSql.query(f"select cast(c6 as timestamp) as b from {self.dbname}.t1")
for i in range(len(data_t1_c6)):
if data_t1_c6[i] is None:
tdSql.checkData( i, 0 , None )
@@ -401,36 +402,36 @@ class TDTestCase:
tdSql.checkData( i, 0, date_data)
tdLog.printNoPrefix("==========step28: cast bool to bigint, expect no changes")
- tdSql.query("select c7 from ct4")
+ tdSql.query(f"select c7 from {self.dbname}.ct4")
data_ct4_c7 = [tdSql.getData(i,0) for i in range(tdSql.queryRows)]
- tdSql.query("select c7 from t1")
+ tdSql.query(f"select c7 from {self.dbname}.t1")
data_t1_c7 = [tdSql.getData(i,0) for i in range(tdSql.queryRows)]
- tdSql.query("select cast(c7 as bigint) as b from ct4")
+ tdSql.query(f"select cast(c7 as bigint) as b from {self.dbname}.ct4")
for i in range(len(data_ct4_c7)):
tdSql.checkData( i, 0, data_ct4_c7[i])
- tdSql.query("select cast(c7 as bigint) as b from t1")
+ tdSql.query(f"select cast(c7 as bigint) as b from {self.dbname}.t1")
for i in range(len(data_t1_c7)):
tdSql.checkData( i, 0, data_t1_c7[i])
tdLog.printNoPrefix("==========step29: cast bool to binary, expect changes to str(int) ")
- tdSql.query("select cast(c7 as binary(32)) as b from ct4")
+ tdSql.query(f"select cast(c7 as binary(32)) as b from {self.dbname}.ct4")
for i in range(len(data_ct4_c7)):
tdSql.checkData( i, 0, None ) if data_ct4_c7[i] is None else tdSql.checkData( i, 0, str(data_ct4_c7[i]).lower() )
- tdSql.query("select cast(c7 as binary(32)) as b from t1")
+ tdSql.query(f"select cast(c7 as binary(32)) as b from {self.dbname}.t1")
for i in range(len(data_t1_c7)):
tdSql.checkData( i, 0, None ) if data_t1_c7[i] is None else tdSql.checkData( i, 0, str(data_t1_c7[i]).lower() )
tdLog.printNoPrefix("==========step30: cast bool to nchar, expect changes to str(int) ")
- tdSql.query("select cast(c7 as nchar(32)) as b from ct4")
+ tdSql.query(f"select cast(c7 as nchar(32)) as b from {self.dbname}.ct4")
for i in range(len(data_ct4_c7)):
tdSql.checkData( i, 0, None ) if data_ct4_c7[i] is None else tdSql.checkData( i, 0, str(data_ct4_c7[i]).lower() )
- tdSql.query("select cast(c7 as nchar(32)) as b from t1")
+ tdSql.query(f"select cast(c7 as nchar(32)) as b from {self.dbname}.t1")
for i in range(len(data_t1_c7)):
tdSql.checkData( i, 0, None ) if data_t1_c7[i] is None else tdSql.checkData( i, 0, str(data_t1_c7[i]).lower() )
tdLog.printNoPrefix("==========step31: cast bool to timestamp, expect changes to timestamp ")
- tdSql.query("select cast(c7 as timestamp) as b from ct4")
+ tdSql.query(f"select cast(c7 as timestamp) as b from {self.dbname}.ct4")
for i in range(len(data_ct4_c7)):
if data_ct4_c7[i] is None:
tdSql.checkData( i, 0 , None )
@@ -440,7 +441,7 @@ class TDTestCase:
date_init_stamp = datetime.datetime.utcfromtimestamp(int(data_ct4_c7[i]/1000))
date_data = date_init_stamp.replace(tzinfo=utc_zone).astimezone(utc_8).strftime("%Y-%m-%d %H:%M:%S.%f")
tdSql.checkData( i, 0, date_data)
- tdSql.query("select cast(c7 as timestamp) as b from t1")
+ tdSql.query(f"select cast(c7 as timestamp) as b from {self.dbname}.t1")
for i in range(len(data_t1_c7)):
if data_t1_c7[i] is None:
tdSql.checkData( i, 0 , None )
@@ -452,22 +453,22 @@ class TDTestCase:
tdSql.checkData( i, 0, date_data)
- tdSql.query("select c8 from ct4")
+ tdSql.query(f"select c8 from {self.dbname}.ct4")
data_ct4_c8 = [tdSql.getData(i,0) for i in range(tdSql.queryRows)]
- tdSql.query("select c8 from t1")
+ tdSql.query(f"select c8 from {self.dbname}.t1")
data_t1_c8 = [tdSql.getData(i,0) for i in range(tdSql.queryRows)]
tdLog.printNoPrefix("==========step32: cast binary to binary, expect no changes ")
- tdSql.query("select cast(c8 as binary(32)) as b from ct4")
+ tdSql.query(f"select cast(c8 as binary(32)) as b from {self.dbname}.ct4")
for i in range(len(data_ct4_c8)):
tdSql.checkData( i, 0, None ) if data_ct4_c8[i] is None else tdSql.checkData(i,0,data_ct4_c8[i])
- tdSql.query("select cast(c8 as binary(32)) as b from t1")
+ tdSql.query(f"select cast(c8 as binary(32)) as b from {self.dbname}.t1")
for i in range(len(data_t1_c8)):
tdSql.checkData( i, 0, None ) if data_t1_c8[i] is None else tdSql.checkData(i,0,data_t1_c8[i])
tdLog.printNoPrefix("==========step33: cast binary to binary, expect truncate ")
- tdSql.query("select cast(c8 as binary(2)) as b from ct4")
+ tdSql.query(f"select cast(c8 as binary(2)) as b from {self.dbname}.ct4")
for i in range(len(data_ct4_c8)):
if data_ct4_c8[i] is None:
tdSql.checkData( i, 0, None)
@@ -476,7 +477,7 @@ class TDTestCase:
else:
caller = inspect.getframeinfo(inspect.stack()[1][0])
tdLog.exit(f"{caller.filename}({caller.lineno}) failed: sql:{tdSql.sql} row:{i} col:0 data:{tdSql.queryResult[i][0]} != expect:{data_ct4_c8[i][:2]}")
- tdSql.query("select cast(c8 as binary(2)) as b from t1")
+ tdSql.query(f"select cast(c8 as binary(2)) as b from {self.dbname}.t1")
for i in range(len(data_t1_c8)):
if data_t1_c8[i] is None:
tdSql.checkData( i, 0, None)
@@ -487,7 +488,7 @@ class TDTestCase:
tdLog.exit(f"{caller.filename}({caller.lineno}) failed: sql:{tdSql.sql} row:{i} col:0 data:{tdSql.queryResult[i][0]} != expect:{data_t1_c8[i][:2]}")
tdLog.printNoPrefix("==========step34: cast binary to nchar, expect changes to str(int) ")
- tdSql.query("select cast(c8 as nchar(32)) as b from ct4")
+ tdSql.query(f"select cast(c8 as nchar(32)) as b from {self.dbname}.ct4")
for i in range(len(data_ct4_c8)):
if data_ct4_c8[i] is None:
tdSql.checkData( i, 0, None)
@@ -496,7 +497,7 @@ class TDTestCase:
else:
caller = inspect.getframeinfo(inspect.stack()[1][0])
tdLog.exit(f"{caller.filename}({caller.lineno}) failed: sql:{tdSql.sql} row:{i} col:0 data:{tdSql.queryResult[i][0]} != expect:{data_ct4_c8[i]}")
- tdSql.query("select cast(c8 as nchar(32)) as b from t1")
+ tdSql.query(f"select cast(c8 as nchar(32)) as b from {self.dbname}.t1")
for i in range(len(data_t1_c8)):
if data_t1_c8[i] is None:
tdSql.checkData( i, 0, None)
@@ -507,14 +508,14 @@ class TDTestCase:
tdLog.exit(f"{caller.filename}({caller.lineno}) failed: sql:{tdSql.sql} row:{i} col:0 data:{tdSql.queryResult[i][0]} != expect:{data_t1_c8[i]}")
- tdSql.query("select c9 from ct4")
+ tdSql.query(f"select c9 from {self.dbname}.ct4")
data_ct4_c9 = [tdSql.getData(i,0) for i in range(tdSql.queryRows)]
- tdSql.query("select c9 from t1")
+ tdSql.query(f"select c9 from {self.dbname}.t1")
data_t1_c9 = [tdSql.getData(i,0) for i in range(tdSql.queryRows)]
"c10 timestamp"
tdLog.printNoPrefix("==========step35: cast nchar to nchar, expect no changes ")
- tdSql.query("select cast(c9 as nchar(32)) as b from ct4")
+ tdSql.query(f"select cast(c9 as nchar(32)) as b from {self.dbname}.ct4")
for i in range(len(data_ct4_c9)):
if data_ct4_c9[i] is None:
tdSql.checkData( i, 0, None)
@@ -523,7 +524,7 @@ class TDTestCase:
else:
caller = inspect.getframeinfo(inspect.stack()[1][0])
tdLog.exit(f"{caller.filename}({caller.lineno}) failed: sql:{tdSql.sql} row:{i} col:0 data:{tdSql.queryResult[i][0]} != expect:{data_ct4_c9[i]}")
- tdSql.query("select cast(c9 as nchar(32)) as b from t1")
+ tdSql.query(f"select cast(c9 as nchar(32)) as b from {self.dbname}.t1")
for i in range(len(data_t1_c9)):
tdSql.checkData( i, 0, data_t1_c9[i] )
if data_t1_c9[i] is None:
@@ -535,7 +536,7 @@ class TDTestCase:
tdLog.exit(f"{caller.filename}({caller.lineno}) failed: sql:{tdSql.sql} row:{i} col:0 data:{tdSql.queryResult[i][0]} != expect:{data_t1_c9[i]}")
tdLog.printNoPrefix("==========step36: cast nchar to nchar, expect truncate ")
- tdSql.query("select cast(c9 as nchar(2)) as b from ct4")
+ tdSql.query(f"select cast(c9 as nchar(2)) as b from {self.dbname}.ct4")
for i in range(len(data_ct4_c9)):
if data_ct4_c9[i] is None:
tdSql.checkData( i, 0, None)
@@ -544,7 +545,7 @@ class TDTestCase:
else:
caller = inspect.getframeinfo(inspect.stack()[1][0])
tdLog.exit(f"{caller.filename}({caller.lineno}) failed: sql:{tdSql.sql} row:{i} col:0 data:{tdSql.queryResult[i][0]} != expect:{data_ct4_c9[i][:2]}")
- tdSql.query("select cast(c9 as nchar(2)) as b from t1")
+ tdSql.query(f"select cast(c9 as nchar(2)) as b from {self.dbname}.t1")
for i in range(len(data_t1_c9)):
if data_t1_c9[i] is None:
tdSql.checkData( i, 0, None)
@@ -554,141 +555,144 @@ class TDTestCase:
caller = inspect.getframeinfo(inspect.stack()[1][0])
tdLog.exit(f"{caller.filename}({caller.lineno}) failed: sql:{tdSql.sql} row:{i} col:0 data:{tdSql.queryResult[i][0]} != expect:{data_t1_c9[i][:2]}")
- tdSql.query("select c10 from ct4")
+ tdSql.query(f"select c10 from {self.dbname}.ct4")
data_ct4_c10 = [tdSql.getData(i,0) for i in range(tdSql.queryRows)]
- tdSql.query("select c10 from t1")
+ tdSql.query(f"select c10 from {self.dbname}.t1")
data_t1_c10 = [tdSql.getData(i,0) for i in range(tdSql.queryRows)]
tdLog.printNoPrefix("==========step37: cast timestamp to nchar, expect no changes ")
- tdSql.query("select cast(c10 as nchar(32)) as b from ct4")
+ tdSql.query(f"select cast(c10 as nchar(32)) as b from {self.dbname}.ct4")
for i in range(len(data_ct4_c10)):
if data_ct4_c10[i] is None:
tdSql.checkData( i, 0, None )
else:
- time2str = str(int((data_ct4_c10[i]-datetime.datetime.fromtimestamp(0)).total_seconds()*1000))
+ # time2str = str(int((data_ct4_c10[i]-datetime.datetime.fromtimestamp(0)).total_seconds()*1000))
+ time2str = str(int((datetime.datetime.timestamp(data_ct4_c10[i])-datetime.datetime.timestamp(datetime.datetime.fromtimestamp(0)))*1000))
tdSql.checkData( i, 0, time2str )
- tdSql.query("select cast(c10 as nchar(32)) as b from t1")
+ tdSql.query(f"select cast(c10 as nchar(32)) as b from {self.dbname}.t1")
for i in range(len(data_t1_c10)):
if data_t1_c10[i] is None:
tdSql.checkData( i, 0, None )
elif i == 10:
continue
else:
- time2str = str(int((data_t1_c10[i]-datetime.datetime.fromtimestamp(0)).total_seconds()*1000))
+ # time2str = str(int((data_t1_c10[i]-datetime.datetime.fromtimestamp(0)).total_seconds()*1000))
+ time2str = str(int((datetime.datetime.timestamp(data_t1_c10[i])-datetime.datetime.timestamp(datetime.datetime.fromtimestamp(0)))*1000))
tdSql.checkData( i, 0, time2str )
tdLog.printNoPrefix("==========step38: cast timestamp to binary, expect no changes ")
- tdSql.query("select cast(c10 as binary(32)) as b from ct4")
+ tdSql.query(f"select cast(c10 as binary(32)) as b from {self.dbname}.ct4")
for i in range(len(data_ct4_c10)):
if data_ct4_c10[i] is None:
tdSql.checkData( i, 0, None )
else:
- time2str = str(int((data_ct4_c10[i]-datetime.datetime.fromtimestamp(0)).total_seconds()*1000))
+ # time2str = str(int((data_ct4_c10[i]-datetime.datetime.fromtimestamp(0)).total_seconds()*1000))
+ time2str = str(int((datetime.datetime.timestamp(data_ct4_c10[i])-datetime.datetime.timestamp(datetime.datetime.fromtimestamp(0)))*1000))
tdSql.checkData( i, 0, time2str )
- tdSql.query("select cast(c10 as binary(32)) as b from t1")
+ tdSql.query(f"select cast(c10 as binary(32)) as b from {self.dbname}.t1")
for i in range(len(data_t1_c10)):
if data_t1_c10[i] is None:
tdSql.checkData( i, 0, None )
elif i == 10:
continue
else:
- time2str = str(int((data_t1_c10[i]-datetime.datetime.fromtimestamp(0)).total_seconds()*1000))
+ # time2str = str(int((data_t1_c10[i]-datetime.datetime.fromtimestamp(0)).total_seconds()*1000))
+ time2str = str(int((datetime.datetime.timestamp(data_t1_c10[i])-datetime.datetime.timestamp(datetime.datetime.fromtimestamp(0)))*1000))
tdSql.checkData( i, 0, time2str )
tdLog.printNoPrefix("==========step39: cast constant operation to bigint, expect change to int ")
- tdSql.query("select cast(12121.23323131 as bigint) as b from ct4")
+ tdSql.query(f"select cast(12121.23323131 as bigint) as b from {self.dbname}.ct4")
( tdSql.checkData(i, 0, 12121) for i in range(tdSql.queryRows) )
- tdSql.query("select cast(12121.23323131 as binary(16)) as b from ct4")
+ tdSql.query(f"select cast(12121.23323131 as binary(16)) as b from {self.dbname}.ct4")
( tdSql.checkData(i, 0, '12121.233231') for i in range(tdSql.queryRows) )
- tdSql.query("select cast(12121.23323131 as binary(2)) as b from ct4")
+ tdSql.query(f"select cast(12121.23323131 as binary(2)) as b from {self.dbname}.ct4")
( tdSql.checkData(i, 0, '12') for i in range(tdSql.queryRows) )
- tdSql.query("select cast(12121.23323131 as nchar(16)) as b from ct4")
+ tdSql.query(f"select cast(12121.23323131 as nchar(16)) as b from {self.dbname}.ct4")
( tdSql.checkData(i, 0, '12121.233231') for i in range(tdSql.queryRows) )
- tdSql.query("select cast(12121.23323131 as nchar(2)) as b from ct4")
+ tdSql.query(f"select cast(12121.23323131 as nchar(2)) as b from {self.dbname}.ct4")
( tdSql.checkData(i, 0, '12') for i in range(tdSql.queryRows) )
- tdSql.query("select cast(12121.23323131 + 321.876897998 as bigint) as b from ct4")
+ tdSql.query(f"select cast(12121.23323131 + 321.876897998 as bigint) as b from {self.dbname}.ct4")
( tdSql.checkData(i, 0, 12443) for i in range(tdSql.queryRows) )
- tdSql.query("select cast(12121.23323131 + 321.876897998 as binary(16)) as b from ct4")
+ tdSql.query(f"select cast(12121.23323131 + 321.876897998 as binary(16)) as b from {self.dbname}.ct4")
( tdSql.checkData(i, 0, '12443.110129') for i in range(tdSql.queryRows) )
- tdSql.query("select cast(12121.23323131 + 321.876897998 as binary(3)) as b from ct4")
+ tdSql.query(f"select cast(12121.23323131 + 321.876897998 as binary(3)) as b from {self.dbname}.ct4")
( tdSql.checkData(i, 0, '124') for i in range(tdSql.queryRows) )
- tdSql.query("select cast(12121.23323131 + 321.876897998 as nchar(16)) as b from ct4")
+ tdSql.query(f"select cast(12121.23323131 + 321.876897998 as nchar(16)) as b from {self.dbname}.ct4")
( tdSql.checkData(i, 0, '12443.110129') for i in range(tdSql.queryRows) )
- tdSql.query("select cast(12121.23323131 + 321.876897998 as nchar(3)) as b from ct4")
+ tdSql.query(f"select cast(12121.23323131 + 321.876897998 as nchar(3)) as b from {self.dbname}.ct4")
( tdSql.checkData(i, 0, '124') for i in range(tdSql.queryRows) )
- tdSql.query("select cast(12121.23323131 + 'test~!@`#$%^&*()}{][;><.,' as bigint) as b from ct4")
+ tdSql.query(f"select cast(12121.23323131 + 'test~!@`#$%^&*(){'}'}{'{'}][;><.,' as bigint) as b from {self.dbname}.ct4")
( tdSql.checkData(i, 0, 12121) for i in range(tdSql.queryRows) )
- tdSql.query("select cast(12121.23323131 + 'test~!@`#$%^&*()}{][;><.,' as binary(16)) as b from ct4")
+ tdSql.query(f"select cast(12121.23323131 + 'test~!@`#$%^&*(){'}'}{'{'}][;><.,' as binary(16)) as b from {self.dbname}.ct4")
( tdSql.checkData(i, 0, '12121.233231') for i in range(tdSql.queryRows) )
- tdSql.query("select cast(12121.23323131 + 'test~!@`#$%^&*()}{][;><.,' as binary(2)) as b from ct4")
+ tdSql.query(f"select cast(12121.23323131 + 'test~!@`#$%^&*(){'}'}{'{'}][;><.,' as binary(2)) as b from {self.dbname}.ct4")
( tdSql.checkData(i, 0, '12') for i in range(tdSql.queryRows) )
- tdSql.query("select cast(12121.23323131 + 'test~!@`#$%^&*()}{][;><.,' as nchar(16)) as b from ct4")
+ tdSql.query(f"select cast(12121.23323131 + 'test~!@`#$%^&*(){'}'}{'{'}][;><.,' as nchar(16)) as b from {self.dbname}.ct4")
( tdSql.checkData(i, 0, '12121.233231') for i in range(tdSql.queryRows) )
- tdSql.query("select cast(12121.23323131 + 'test~!@`#$%^&*()}{][;><.,' as nchar(2)) as b from ct4")
+ tdSql.query(f"select cast(12121.23323131 + 'test~!@`#$%^&*(){'}'}{'{'}][;><.,' as nchar(2)) as b from {self.dbname}.ct4")
( tdSql.checkData(i, 0, '12') for i in range(tdSql.queryRows) )
- tdLog.printNoPrefix("==========step40: error cast condition, should return error ")
- #tdSql.error("select cast(c1 as int) as b from ct4")
- #tdSql.error("select cast(c1 as bool) as b from ct4")
- #tdSql.error("select cast(c1 as tinyint) as b from ct4")
- #tdSql.error("select cast(c1 as smallint) as b from ct4")
- #tdSql.error("select cast(c1 as float) as b from ct4")
- #tdSql.error("select cast(c1 as double) as b from ct4")
- #tdSql.error("select cast(c1 as tinyint unsigned) as b from ct4")
- #tdSql.error("select cast(c1 as smallint unsigned) as b from ct4")
- #tdSql.error("select cast(c1 as int unsigned) as b from ct4")
+ tdLog.printNoPrefix("==========step40: current cast condition, should return ok ")
+ tdSql.query(f"select cast(c1 as int) as b from {self.dbname}.ct4")
+ tdSql.query(f"select cast(c1 as bool) as b from {self.dbname}.ct4")
+ tdSql.query(f"select cast(c1 as tinyint) as b from {self.dbname}.ct4")
+ tdSql.query(f"select cast(c1 as smallint) as b from {self.dbname}.ct4")
+ tdSql.query(f"select cast(c1 as float) as b from {self.dbname}.ct4")
+ tdSql.query(f"select cast(c1 as double) as b from {self.dbname}.ct4")
+ tdSql.query(f"select cast(c1 as tinyint unsigned) as b from {self.dbname}.ct4")
+ tdSql.query(f"select cast(c1 as smallint unsigned) as b from {self.dbname}.ct4")
+ tdSql.query(f"select cast(c1 as int unsigned) as b from {self.dbname}.ct4")
- #tdSql.error("select cast(c2 as int) as b from ct4")
- #tdSql.error("select cast(c3 as bool) as b from ct4")
- #tdSql.error("select cast(c4 as tinyint) as b from ct4")
- #tdSql.error("select cast(c5 as smallint) as b from ct4")
- #tdSql.error("select cast(c6 as float) as b from ct4")
- #tdSql.error("select cast(c7 as double) as b from ct4")
- #tdSql.error("select cast(c8 as tinyint unsigned) as b from ct4")
+ tdSql.query(f"select cast(c2 as int) as b from {self.dbname}.ct4")
+ tdSql.query(f"select cast(c3 as bool) as b from {self.dbname}.ct4")
+ tdSql.query(f"select cast(c4 as tinyint) as b from {self.dbname}.ct4")
+ tdSql.query(f"select cast(c5 as smallint) as b from {self.dbname}.ct4")
+ tdSql.query(f"select cast(c6 as float) as b from {self.dbname}.ct4")
+ tdSql.query(f"select cast(c7 as double) as b from {self.dbname}.ct4")
+ tdSql.query(f"select cast(c8 as tinyint unsigned) as b from {self.dbname}.ct4")
- #tdSql.error("select cast(c8 as timestamp ) as b from ct4")
- #tdSql.error("select cast(c9 as timestamp ) as b from ct4")
- #tdSql.error("select cast(c9 as binary(64) ) as b from ct4")
- pass
+ tdSql.query(f"select cast(c8 as timestamp ) as b from {self.dbname}.ct4")
+ tdSql.query(f"select cast(c9 as timestamp ) as b from {self.dbname}.ct4")
+ tdSql.query(f"select cast(c9 as binary(64) ) as b from {self.dbname}.ct4")
def run(self):
tdSql.prepare()
tdLog.printNoPrefix("==========step1:create table")
tdSql.execute(
- '''create table stb1
+ f'''create table {self.dbname}.stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
)
tdSql.execute(
- '''
- create table t1
+ f'''
+ create table {self.dbname}.t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {self.dbname}.ct{i+1} using {self.dbname}.stb1 tags ( {i+1} )')
tdLog.printNoPrefix("==========step2:insert data")
for i in range(9):
tdSql.execute(
- f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {self.dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
- f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {self.dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
- tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
- tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {self.dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
+ tdSql.execute(f"insert into {self.dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {self.dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {self.dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {self.dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {self.dbname}.t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
@@ -706,10 +710,10 @@ class TDTestCase:
self.all_test()
- tdDnodes.stop(1)
- tdDnodes.start(1)
+ # tdDnodes.stop(1)
+ # tdDnodes.start(1)
- tdSql.execute("use db")
+ tdSql.execute(f"flush database {self.dbname}")
self.all_test()
diff --git a/tests/system-test/2-query/ceil.py b/tests/system-test/2-query/ceil.py
index f1379e6661..6777b449f9 100644
--- a/tests/system-test/2-query/ceil.py
+++ b/tests/system-test/2-query/ceil.py
@@ -9,49 +9,49 @@ from util.sql import *
from util.cases import *
class TDTestCase:
- updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
- "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
- "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143}
+ # updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
+ # "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
+ # "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143}
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
- def prepare_datas(self):
+ def prepare_datas(self, dbname="db"):
tdSql.execute(
- '''create table stb1
+ f'''create table {dbname}.stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
)
tdSql.execute(
- '''
- create table t1
+ f'''
+ create table {dbname}.t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
for i in range(9):
tdSql.execute(
- f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
- f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
- tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
- tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
@@ -95,68 +95,56 @@ class TDTestCase:
else:
tdLog.info("ceil value check pass , it work as expected ,sql is \"%s\" "%ceil_query )
- def test_errors(self):
+ def test_errors(self, dbname="db"):
error_sql_lists = [
- "select ceil from t1",
- # "select ceil(-+--+c1) from t1",
- # "select +-ceil(c1) from t1",
- # "select ++-ceil(c1) from t1",
- # "select ++--ceil(c1) from t1",
- # "select - -ceil(c1)*0 from t1",
- # "select ceil(tbname+1) from t1 ",
- "select ceil(123--123)==1 from t1",
- "select ceil(c1) as 'd1' from t1",
- "select ceil(c1 ,c2 ) from t1",
- "select ceil(c1 ,NULL) from t1",
- "select ceil(,) from t1;",
- "select ceil(ceil(c1) ab from t1)",
- "select ceil(c1) as int from t1",
- "select ceil from stb1",
- # "select ceil(-+--+c1) from stb1",
- # "select +-ceil(c1) from stb1",
- # "select ++-ceil(c1) from stb1",
- # "select ++--ceil(c1) from stb1",
- # "select - -ceil(c1)*0 from stb1",
- # "select ceil(tbname+1) from stb1 ",
- "select ceil(123--123)==1 from stb1",
- "select ceil(c1) as 'd1' from stb1",
- "select ceil(c1 ,c2 ) from stb1",
- "select ceil(c1 ,NULL) from stb1",
- "select ceil(,) from stb1;",
- "select ceil(ceil(c1) ab from stb1)",
- "select ceil(c1) as int from stb1"
+ f"select ceil from {dbname}.t1",
+ f"select ceil(123--123)==1 from {dbname}.t1",
+ f"select ceil(c1) as 'd1' from {dbname}.t1",
+ f"select ceil(c1 ,c2 ) from {dbname}.t1",
+ f"select ceil(c1 ,NULL) from {dbname}.t1",
+ f"select ceil(,) from {dbname}.t1;",
+ f"select ceil(ceil(c1) ab from {dbname}.t1)",
+ f"select ceil(c1) as int from {dbname}.t1",
+ f"select ceil from {dbname}.stb1",
+ f"select ceil(123--123)==1 from {dbname}.stb1",
+ f"select ceil(c1) as 'd1' from {dbname}.stb1",
+ f"select ceil(c1 ,c2 ) from {dbname}.stb1",
+ f"select ceil(c1 ,NULL) from {dbname}.stb1",
+ f"select ceil(,) from {dbname}.stb1;",
+ f"select ceil(ceil(c1) ab from {dbname}.stb1)",
+ f"select ceil(c1) as int from {dbname}.stb1"
]
for error_sql in error_sql_lists:
tdSql.error(error_sql)
- def support_types(self):
+ def support_types(self, dbname="db"):
type_error_sql_lists = [
- "select ceil(ts) from t1" ,
- "select ceil(c7) from t1",
- "select ceil(c8) from t1",
- "select ceil(c9) from t1",
- "select ceil(ts) from ct1" ,
- "select ceil(c7) from ct1",
- "select ceil(c8) from ct1",
- "select ceil(c9) from ct1",
- "select ceil(ts) from ct3" ,
- "select ceil(c7) from ct3",
- "select ceil(c8) from ct3",
- "select ceil(c9) from ct3",
- "select ceil(ts) from ct4" ,
- "select ceil(c7) from ct4",
- "select ceil(c8) from ct4",
- "select ceil(c9) from ct4",
- "select ceil(ts) from stb1" ,
- "select ceil(c7) from stb1",
- "select ceil(c8) from stb1",
- "select ceil(c9) from stb1" ,
+ f"select ceil(ts) from {dbname}.t1" ,
+ f"select ceil(c7) from {dbname}.t1",
+ f"select ceil(c8) from {dbname}.t1",
+ f"select ceil(c9) from {dbname}.t1",
+ f"select ceil(ts) from {dbname}.ct1" ,
+ f"select ceil(c7) from {dbname}.ct1",
+ f"select ceil(c8) from {dbname}.ct1",
+ f"select ceil(c9) from {dbname}.ct1",
+ f"select ceil(ts) from {dbname}.ct3" ,
+ f"select ceil(c7) from {dbname}.ct3",
+ f"select ceil(c8) from {dbname}.ct3",
+ f"select ceil(c9) from {dbname}.ct3",
+ f"select ceil(ts) from {dbname}.ct4" ,
+ f"select ceil(c7) from {dbname}.ct4",
+ f"select ceil(c8) from {dbname}.ct4",
+ f"select ceil(c9) from {dbname}.ct4",
+ f"select ceil(ts) from {dbname}.stb1" ,
+ f"select ceil(c7) from {dbname}.stb1",
+ f"select ceil(c8) from {dbname}.stb1",
+ f"select ceil(c9) from {dbname}.stb1" ,
- "select ceil(ts) from stbbb1" ,
- "select ceil(c7) from stbbb1",
+ f"select ceil(ts) from {dbname}.stbbb1" ,
+ f"select ceil(c7) from {dbname}.stbbb1",
- "select ceil(ts) from tbname",
- "select ceil(c9) from tbname"
+ f"select ceil(ts) from {dbname}.tbname",
+ f"select ceil(c9) from {dbname}.tbname"
]
@@ -165,127 +153,127 @@ class TDTestCase:
type_sql_lists = [
- "select ceil(c1) from t1",
- "select ceil(c2) from t1",
- "select ceil(c3) from t1",
- "select ceil(c4) from t1",
- "select ceil(c5) from t1",
- "select ceil(c6) from t1",
+ f"select ceil(c1) from {dbname}.t1",
+ f"select ceil(c2) from {dbname}.t1",
+ f"select ceil(c3) from {dbname}.t1",
+ f"select ceil(c4) from {dbname}.t1",
+ f"select ceil(c5) from {dbname}.t1",
+ f"select ceil(c6) from {dbname}.t1",
- "select ceil(c1) from ct1",
- "select ceil(c2) from ct1",
- "select ceil(c3) from ct1",
- "select ceil(c4) from ct1",
- "select ceil(c5) from ct1",
- "select ceil(c6) from ct1",
+ f"select ceil(c1) from {dbname}.ct1",
+ f"select ceil(c2) from {dbname}.ct1",
+ f"select ceil(c3) from {dbname}.ct1",
+ f"select ceil(c4) from {dbname}.ct1",
+ f"select ceil(c5) from {dbname}.ct1",
+ f"select ceil(c6) from {dbname}.ct1",
- "select ceil(c1) from ct3",
- "select ceil(c2) from ct3",
- "select ceil(c3) from ct3",
- "select ceil(c4) from ct3",
- "select ceil(c5) from ct3",
- "select ceil(c6) from ct3",
+ f"select ceil(c1) from {dbname}.ct3",
+ f"select ceil(c2) from {dbname}.ct3",
+ f"select ceil(c3) from {dbname}.ct3",
+ f"select ceil(c4) from {dbname}.ct3",
+ f"select ceil(c5) from {dbname}.ct3",
+ f"select ceil(c6) from {dbname}.ct3",
- "select ceil(c1) from stb1",
- "select ceil(c2) from stb1",
- "select ceil(c3) from stb1",
- "select ceil(c4) from stb1",
- "select ceil(c5) from stb1",
- "select ceil(c6) from stb1",
+ f"select ceil(c1) from {dbname}.stb1",
+ f"select ceil(c2) from {dbname}.stb1",
+ f"select ceil(c3) from {dbname}.stb1",
+ f"select ceil(c4) from {dbname}.stb1",
+ f"select ceil(c5) from {dbname}.stb1",
+ f"select ceil(c6) from {dbname}.stb1",
- "select ceil(c6) as alisb from stb1",
- "select ceil(c6) alisb from stb1",
+ f"select ceil(c6) as alisb from {dbname}.stb1",
+ f"select ceil(c6) alisb from {dbname}.stb1",
]
for type_sql in type_sql_lists:
tdSql.query(type_sql)
- def basic_ceil_function(self):
+ def basic_ceil_function(self, dbname="db"):
# basic query
- tdSql.query("select c1 from ct3")
+ tdSql.query(f"select c1 from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select c1 from t1")
+ tdSql.query(f"select c1 from {dbname}.t1")
tdSql.checkRows(12)
- tdSql.query("select c1 from stb1")
+ tdSql.query(f"select c1 from {dbname}.stb1")
tdSql.checkRows(25)
- # used for empty table , ct3 is empty
- tdSql.query("select ceil(c1) from ct3")
+ # used for empty table , {dbname}.ct3 is empty
+ tdSql.query(f"select ceil(c1) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select ceil(c2) from ct3")
+ tdSql.query(f"select ceil(c2) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select ceil(c3) from ct3")
+ tdSql.query(f"select ceil(c3) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select ceil(c4) from ct3")
+ tdSql.query(f"select ceil(c4) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select ceil(c5) from ct3")
+ tdSql.query(f"select ceil(c5) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select ceil(c6) from ct3")
+ tdSql.query(f"select ceil(c6) from {dbname}.ct3")
# used for regular table
- tdSql.query("select ceil(c1) from t1")
+ tdSql.query(f"select ceil(c1) from {dbname}.t1")
tdSql.checkData(0, 0, None)
tdSql.checkData(1 , 0, 1)
tdSql.checkData(3 , 0, 3)
tdSql.checkData(5 , 0, None)
- tdSql.query("select c1, c2, c3 , c4, c5 from t1")
+ tdSql.query(f"select c1, c2, c3 , c4, c5 from {dbname}.t1")
tdSql.checkData(1, 4, 1.11000)
tdSql.checkData(3, 3, 33)
tdSql.checkData(5, 4, None)
- tdSql.query("select ts,c1, c2, c3 , c4, c5 from t1")
+ tdSql.query(f"select ts,c1, c2, c3 , c4, c5 from {dbname}.t1")
tdSql.checkData(1, 5, 1.11000)
tdSql.checkData(3, 4, 33)
tdSql.checkData(5, 5, None)
- self.check_result_auto( "select c1, c2, c3 , c4, c5 from t1", "select (c1), ceil(c2) ,ceil(c3), ceil(c4), ceil(c5) from t1")
+ self.check_result_auto( f"select c1, c2, c3 , c4, c5 from {dbname}.t1", f"select (c1), ceil(c2) ,ceil(c3), ceil(c4), ceil(c5) from {dbname}.t1")
# used for sub table
- tdSql.query("select ceil(c1) from ct1")
+ tdSql.query(f"select ceil(c1) from {dbname}.ct1")
tdSql.checkData(0, 0, 8)
tdSql.checkData(1 , 0, 7)
tdSql.checkData(3 , 0, 5)
tdSql.checkData(5 , 0, 4)
- tdSql.query("select ceil(c1) from ct1")
- self.check_result_auto( "select c1, c2, c3 , c4, c5 from ct1", "select (c1), ceil(c2) ,ceil(c3), ceil(c4), ceil(c5) from ct1")
- self.check_result_auto("select ceil(ceil(ceil(ceil(ceil(ceil(ceil(ceil(ceil(ceil(c1)))))))))) nest_col_func from ct1;","select c1 from ct1" )
+ tdSql.query(f"select ceil(c1) from {dbname}.ct1")
+ self.check_result_auto( f"select c1, c2, c3 , c4, c5 from {dbname}.ct1", f"select (c1), ceil(c2) ,ceil(c3), ceil(c4), ceil(c5) from {dbname}.ct1")
+ self.check_result_auto(f"select ceil(ceil(ceil(ceil(ceil(ceil(ceil(ceil(ceil(ceil(c1)))))))))) nest_col_func from {dbname}.ct1;", f"select c1 from {dbname}.ct1" )
# used for stable table
- tdSql.query("select ceil(c1) from stb1")
+ tdSql.query(f"select ceil(c1) from {dbname}.stb1")
tdSql.checkRows(25)
- self.check_result_auto( "select c1, c2, c3 , c4, c5 from ct4 ", "select (c1), ceil(c2) ,ceil(c3), ceil(c4), ceil(c5) from ct4")
- self.check_result_auto("select ceil(ceil(ceil(ceil(ceil(ceil(ceil(ceil(ceil(ceil(c1)))))))))) nest_col_func from ct4;" , "select c1 from ct4" )
+ self.check_result_auto( f"select c1, c2, c3 , c4, c5 from {dbname}.ct4 ", f"select (c1), ceil(c2) ,ceil(c3), ceil(c4), ceil(c5) from {dbname}.ct4")
+ self.check_result_auto(f"select ceil(ceil(ceil(ceil(ceil(ceil(ceil(ceil(ceil(ceil(c1)))))))))) nest_col_func from {dbname}.ct4;" , f"select c1 from {dbname}.ct4" )
# used for not exists table
- tdSql.error("select ceil(c1) from stbbb1")
- tdSql.error("select ceil(c1) from tbname")
- tdSql.error("select ceil(c1) from ct5")
+ tdSql.error(f"select ceil(c1) from {dbname}.stbbb1")
+ tdSql.error(f"select ceil(c1) from {dbname}.tbname")
+ tdSql.error(f"select ceil(c1) from {dbname}.ct5")
# mix with common col
- tdSql.query("select c1, ceil(c1) from ct1")
+ tdSql.query(f"select c1, ceil(c1) from {dbname}.ct1")
tdSql.checkData(0 , 0 ,8)
tdSql.checkData(0 , 1 ,8)
tdSql.checkData(4 , 0 ,0)
tdSql.checkData(4 , 1 ,0)
- tdSql.query("select c1, ceil(c1) from ct4")
+ tdSql.query(f"select c1, ceil(c1) from {dbname}.ct4")
tdSql.checkData(0 , 0 , None)
tdSql.checkData(0 , 1 ,None)
tdSql.checkData(4 , 0 ,5)
tdSql.checkData(4 , 1 ,5)
tdSql.checkData(5 , 0 ,None)
tdSql.checkData(5 , 1 ,None)
- tdSql.query("select c1, ceil(c1) from ct4 ")
+ tdSql.query(f"select c1, ceil(c1) from {dbname}.ct4 ")
tdSql.checkData(0 , 0 ,None)
tdSql.checkData(0 , 1 ,None)
tdSql.checkData(4 , 0 ,5)
tdSql.checkData(4 , 1 ,5)
# mix with common functions
- tdSql.query("select c1, ceil(c1),c5, ceil(c5) from ct4 ")
+ tdSql.query(f"select c1, ceil(c1),c5, ceil(c5) from {dbname}.ct4 ")
tdSql.checkData(0 , 0 ,None)
tdSql.checkData(0 , 1 ,None)
tdSql.checkData(0 , 2 ,None)
@@ -296,34 +284,34 @@ class TDTestCase:
tdSql.checkData(3 , 2 ,6.66000)
tdSql.checkData(3 , 3 ,7.00000)
- tdSql.query("select c1, ceil(c1),c5, floor(c5) from stb1 ")
+ tdSql.query(f"select c1, ceil(c1),c5, floor(c5) from {dbname}.stb1 ")
# mix with agg functions , not support
- tdSql.error("select c1, ceil(c1),c5, count(c5) from stb1 ")
- tdSql.error("select c1, ceil(c1),c5, count(c5) from ct1 ")
- tdSql.error("select ceil(c1), count(c5) from stb1 ")
- tdSql.error("select ceil(c1), count(c5) from ct1 ")
- tdSql.error("select c1, count(c5) from ct1 ")
- tdSql.error("select c1, count(c5) from stb1 ")
+ tdSql.error(f"select c1, ceil(c1),c5, count(c5) from {dbname}.stb1 ")
+ tdSql.error(f"select c1, ceil(c1),c5, count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select ceil(c1), count(c5) from {dbname}.stb1 ")
+ tdSql.error(f"select ceil(c1), count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select c1, count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select c1, count(c5) from {dbname}.stb1 ")
# agg functions mix with agg functions
- tdSql.query("select max(c5), count(c5) from stb1")
- tdSql.query("select max(c5), count(c5) from ct1")
+ tdSql.query(f"select max(c5), count(c5) from {dbname}.stb1")
+ tdSql.query(f"select max(c5), count(c5) from {dbname}.ct1")
# bug fix for count
- tdSql.query("select count(c1) from ct4 ")
+ tdSql.query(f"select count(c1) from {dbname}.ct4 ")
tdSql.checkData(0,0,9)
- tdSql.query("select count(*) from ct4 ")
+ tdSql.query(f"select count(*) from {dbname}.ct4 ")
tdSql.checkData(0,0,12)
- tdSql.query("select count(c1) from stb1 ")
+ tdSql.query(f"select count(c1) from {dbname}.stb1 ")
tdSql.checkData(0,0,22)
- tdSql.query("select count(*) from stb1 ")
+ tdSql.query(f"select count(*) from {dbname}.stb1 ")
tdSql.checkData(0,0,25)
# bug fix for compute
- tdSql.query("select c1, abs(c1) -0 ,ceil(c1)-0 from ct4 ")
+ tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1)-0 from {dbname}.ct4 ")
tdSql.checkData(0, 0, None)
tdSql.checkData(0, 1, None)
tdSql.checkData(0, 2, None)
@@ -331,7 +319,7 @@ class TDTestCase:
tdSql.checkData(1, 1, 8.000000000)
tdSql.checkData(1, 2, 8.000000000)
- tdSql.query(" select c1, abs(c1) -0 ,ceil(c1-0.1)-0.1 from ct4")
+ tdSql.query(f" select c1, abs(c1) -0 ,ceil(c1-0.1)-0.1 from {dbname}.ct4")
tdSql.checkData(0, 0, None)
tdSql.checkData(0, 1, None)
tdSql.checkData(0, 2, None)
@@ -339,9 +327,9 @@ class TDTestCase:
tdSql.checkData(1, 1, 8.000000000)
tdSql.checkData(1, 2, 7.900000000)
- def abs_func_filter(self):
- tdSql.execute("use db")
- tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from ct4 where c1>5 ")
+ def abs_func_filter(self,dbname="db"):
+ tdSql.execute(f"use {dbname}")
+ tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from {dbname}.ct4 where c1>5 ")
tdSql.checkRows(3)
tdSql.checkData(0,0,8)
tdSql.checkData(0,1,8.000000000)
@@ -349,7 +337,7 @@ class TDTestCase:
tdSql.checkData(0,3,7.900000000)
tdSql.checkData(0,4,3.000000000)
- tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from ct4 where c1=5 ")
+ tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from {dbname}.ct4 where c1=5 ")
tdSql.checkRows(1)
tdSql.checkData(0,0,5)
tdSql.checkData(0,1,5.000000000)
@@ -357,7 +345,7 @@ class TDTestCase:
tdSql.checkData(0,3,4.900000000)
tdSql.checkData(0,4,2.000000000)
- tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from ct4 where c1=5 ")
+ tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from {dbname}.ct4 where c1=5 ")
tdSql.checkRows(1)
tdSql.checkData(0,0,5)
tdSql.checkData(0,1,5.000000000)
@@ -365,7 +353,7 @@ class TDTestCase:
tdSql.checkData(0,3,4.900000000)
tdSql.checkData(0,4,2.000000000)
- tdSql.query("select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from ct4 where c1>log(c1,2) limit 1 ")
+ tdSql.query(f"select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from {dbname}.ct4 where c1>log(c1,2) limit 1 ")
tdSql.checkRows(1)
tdSql.checkData(0,0,8)
tdSql.checkData(0,1,88888)
@@ -377,44 +365,44 @@ class TDTestCase:
def ceil_Arithmetic(self):
pass
- def check_boundary_values(self):
+ def check_boundary_values(self, dbname="bound_test"):
- tdSql.execute("drop database if exists bound_test")
- tdSql.execute("create database if not exists bound_test")
+ tdSql.execute(f"drop database if exists {dbname}")
+ tdSql.execute(f"create database if not exists {dbname}")
time.sleep(3)
- tdSql.execute("use bound_test")
+ tdSql.execute(f"use {dbname}")
tdSql.execute(
- "create table stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);"
+ f"create table {dbname}.stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);"
)
- tdSql.execute(f'create table sub1_bound using stb_bound tags ( 1 )')
+ tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
tdSql.execute(
- f"insert into sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.error(
- f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
- self.check_result_auto( "select c1, c2, c3 , c4, c5 ,c6 from sub1_bound ", "select ceil(c1), ceil(c2) ,ceil(c3), ceil(c4), ceil(c5) ,ceil(c6) from sub1_bound")
- self.check_result_auto( "select c1, c2, c3 , c3, c2 ,c1 from sub1_bound ", "select ceil(c1), ceil(c2) ,ceil(c3), ceil(c3), ceil(c2) ,ceil(c1) from sub1_bound")
- self.check_result_auto("select ceil(ceil(ceil(ceil(ceil(ceil(ceil(ceil(ceil(ceil(c1)))))))))) nest_col_func from sub1_bound;" , "select ceil(c1) from sub1_bound" )
+ self.check_result_auto( f"select c1, c2, c3 , c4, c5 ,c6 from {dbname}.sub1_bound ", f"select ceil(c1), ceil(c2) ,ceil(c3), ceil(c4), ceil(c5) ,ceil(c6) from {dbname}.sub1_bound")
+ self.check_result_auto( f"select c1, c2, c3 , c3, c2 ,c1 from {dbname}.sub1_bound ", f"select ceil(c1), ceil(c2) ,ceil(c3), ceil(c3), ceil(c2) ,ceil(c1) from {dbname}.sub1_bound")
+ self.check_result_auto(f"select ceil(ceil(ceil(ceil(ceil(ceil(ceil(ceil(ceil(ceil(c1)))))))))) nest_col_func from {dbname}.sub1_bound;" , f"select ceil(c1) from {dbname}.sub1_bound" )
# check basic elem for table per row
- tdSql.query("select ceil(c1+0.2) ,ceil(c2) , ceil(c3+0.3) , ceil(c4-0.3), ceil(c5/2), ceil(c6/2) from sub1_bound ")
+ tdSql.query(f"select ceil(c1+0.2) ,ceil(c2) , ceil(c3+0.3) , ceil(c4-0.3), ceil(c5/2), ceil(c6/2) from {dbname}.sub1_bound ")
tdSql.checkData(0, 0, 2147483648.000000000)
tdSql.checkData(0, 2, 32768.000000000)
tdSql.checkData(0, 3, 127.000000000)
@@ -425,19 +413,19 @@ class TDTestCase:
tdSql.checkData(4, 3, -123.000000000)
tdSql.checkData(4, 4, -169499995645668991474575059260979281920.000000000)
- self.check_result_auto("select c1+1 ,c2 , c3*1 , c4/2, c5/2, c6 from sub1_bound" ,"select ceil(c1+1) ,ceil(c2) , ceil(c3*1) , ceil(c4/2), ceil(c5)/2, ceil(c6) from sub1_bound ")
+ self.check_result_auto(f"select c1+1 ,c2 , c3*1 , c4/2, c5/2, c6 from {dbname}.sub1_bound" , f"select ceil(c1+1) ,ceil(c2) , ceil(c3*1) , ceil(c4/2), ceil(c5)/2, ceil(c6) from {dbname}.sub1_bound ")
- def support_super_table_test(self):
- tdSql.execute(" use db ")
- self.check_result_auto( " select c5 from stb1 order by ts " , "select ceil(c5) from stb1 order by ts" )
- self.check_result_auto( " select c5 from stb1 order by tbname " , "select ceil(c5) from stb1 order by tbname" )
- self.check_result_auto( " select c5 from stb1 where c1 > 0 order by tbname " , "select ceil(c5) from stb1 where c1 > 0 order by tbname" )
- self.check_result_auto( " select c5 from stb1 where c1 > 0 order by tbname " , "select ceil(c5) from stb1 where c1 > 0 order by tbname" )
+ def support_super_table_test(self, dbname="db"):
+ tdSql.execute(f" use {dbname} ")
+ self.check_result_auto( f" select c5 from {dbname}.stb1 order by ts " , f"select ceil(c5) from {dbname}.stb1 order by ts" )
+ self.check_result_auto( f" select c5 from {dbname}.stb1 order by tbname " , f"select ceil(c5) from {dbname}.stb1 order by tbname" )
+ self.check_result_auto( f" select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select ceil(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto( f" select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select ceil(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
- self.check_result_auto( " select t1,c5 from stb1 order by ts " , "select ceil(t1), ceil(c5) from stb1 order by ts" )
- self.check_result_auto( " select t1,c5 from stb1 order by tbname " , "select ceil(t1) ,ceil(c5) from stb1 order by tbname" )
- self.check_result_auto( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select ceil(t1) ,ceil(c5) from stb1 where c1 > 0 order by tbname" )
- self.check_result_auto( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select ceil(t1) , ceil(c5) from stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto( f" select t1,c5 from {dbname}.stb1 order by ts " , f"select ceil(t1), ceil(c5) from {dbname}.stb1 order by ts" )
+ self.check_result_auto( f" select t1,c5 from {dbname}.stb1 order by tbname " , f"select ceil(t1) ,ceil(c5) from {dbname}.stb1 order by tbname" )
+ self.check_result_auto( f" select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select ceil(t1) ,ceil(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto( f" select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select ceil(t1) , ceil(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
pass
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
diff --git a/tests/system-test/2-query/char_length.py b/tests/system-test/2-query/char_length.py
index 97d5a5f59a..c0883e665e 100644
--- a/tests/system-test/2-query/char_length.py
+++ b/tests/system-test/2-query/char_length.py
@@ -1,3 +1,7 @@
+import imp
+
+
+import datetime
from util.log import *
from util.sql import *
from util.cases import *
@@ -101,16 +105,16 @@ class TDTestCase:
return sqls
- def __test_current(self):
+ def __test_current(self, dbname="db"):
tdLog.printNoPrefix("==========current sql condition check , must return query ok==========")
- tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
+ tbname = [f"{dbname}.ct1", f"{dbname}.ct2", f"{dbname}.ct4", f"{dbname}.t1", f"{dbname}.stb1"]
for tb in tbname:
self.__char_length_current_check(tb)
tdLog.printNoPrefix(f"==========current sql condition check in {tb} over==========")
- def __test_error(self):
+ def __test_error(self, dbname="db"):
tdLog.printNoPrefix("==========err sql condition check , must return error==========")
- tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
+ tbname = [f"{dbname}.ct1", f"{dbname}.ct2", f"{dbname}.ct4", f"{dbname}.t1", f"{dbname}.stb1"]
for tb in tbname:
for errsql in self.__char_length_err_check(tb):
@@ -123,17 +127,16 @@ class TDTestCase:
self.__test_error()
- def __create_tb(self):
- tdSql.prepare()
+ def __create_tb(self, dbname="db"):
tdLog.printNoPrefix("==========step1:create table")
- create_stb_sql = f'''create table stb1(
+ create_stb_sql = f'''create table {dbname}.stb1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
- ) tags (t1 int)
+ ) tags (t_int int)
'''
- create_ntb_sql = f'''create table t1(
+ create_ntb_sql = f'''create table {dbname}.t1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
@@ -143,29 +146,29 @@ class TDTestCase:
tdSql.execute(create_ntb_sql)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
- def __insert_data(self, rows):
+ def __insert_data(self, rows, dbname="db"):
now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
for i in range(rows):
tdSql.execute(
- f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f'''insert into ct1 values
+ f'''insert into {dbname}.ct1 values
( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', { now_time + 8 } )
( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', { now_time + 9 } )
'''
)
tdSql.execute(
- f'''insert into ct4 values
+ f'''insert into {dbname}.ct4 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000+ 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -181,7 +184,7 @@ class TDTestCase:
)
tdSql.execute(
- f'''insert into ct2 values
+ f'''insert into {dbname}.ct2 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000+ 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -197,13 +200,13 @@ class TDTestCase:
)
for i in range(rows):
- insert_data = f'''insert into t1 values
+ insert_data = f'''insert into {dbname}.t1 values
( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2},
"binary_{i}", "nchar_{i}", { now_time - 1000 * i } )
'''
tdSql.execute(insert_data)
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -232,8 +235,10 @@ class TDTestCase:
tdLog.printNoPrefix("==========step3:all check")
self.all_test()
- tdDnodes.stop(1)
- tdDnodes.start(1)
+ # tdDnodes.stop(1)
+ # tdDnodes.start(1)
+
+ tdSql.execute("flush database db")
tdSql.execute("use db")
diff --git a/tests/system-test/2-query/check_tsdb.py b/tests/system-test/2-query/check_tsdb.py
index 0ae1648d99..746906776d 100644
--- a/tests/system-test/2-query/check_tsdb.py
+++ b/tests/system-test/2-query/check_tsdb.py
@@ -9,73 +9,73 @@ from util.cases import *
from util.dnodes import *
class TDTestCase:
- updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
- "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
- "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143}
+ # updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
+ # "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
+ # "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143}
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
- tdSql.init(conn.cursor(), True)
-
- def prepare_datas(self):
+ tdSql.init(conn.cursor(), False)
+
+ def prepare_datas(self, dbname="db"):
tdSql.execute(
- '''create table stb1
+ f'''create table {dbname}.stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
)
-
- tdSql.execute(
- '''
- create table t1
- (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
- '''
- )
+
+ # tdSql.execute(
+ # f'''
+ # create table t1
+ # (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
+ # '''
+ # )
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
for i in range(9):
tdSql.execute(
- f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
- f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
- tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
- tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute(
- f'''insert into t1 values
- ( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
- ( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
- ( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
- ( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a )
- ( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a )
- ( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
- ( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a )
- ( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a )
- ( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" )
- ( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" )
- ( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" )
- ( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
- '''
- )
-
+ # tdSql.execute(
+ # f'''insert into t1 values
+ # ( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
+ # ( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
+ # ( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
+ # ( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a )
+ # ( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a )
+ # ( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
+ # ( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a )
+ # ( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a )
+ # ( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" )
+ # ( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" )
+ # ( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" )
+ # ( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
+ # '''
+ # )
- def restart_taosd_query_sum(self):
+
+ def restart_taosd_query_sum(self, dbname="db"):
for i in range(5):
tdLog.info(" this is %d_th restart taosd " %i)
- os.system("taos -s ' use db ;select c6 from stb1 ; '")
- tdSql.execute("use db ")
- tdSql.query("select count(*) from stb1")
+ os.system(f"taos -s ' use db ;select c6 from {dbname}.stb1 ; '")
+ tdSql.execute(f"use {dbname} ")
+ tdSql.query(f"select count(*) from {dbname}.stb1")
tdSql.checkRows(1)
- tdSql.query("select sum(c1),sum(c2),sum(c3),sum(c4),sum(c5),sum(c6) from stb1;")
+ tdSql.query(f"select sum(c1),sum(c2),sum(c3),sum(c4),sum(c5),sum(c6) from {dbname}.stb1;")
tdSql.checkData(0,0,99)
tdSql.checkData(0,1,499995)
tdSql.checkData(0,2,4995)
@@ -85,17 +85,18 @@ class TDTestCase:
tdDnodes.stop(1)
tdDnodes.start(1)
time.sleep(2)
-
+
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
tdSql.prepare()
+ dbname = "db"
tdLog.printNoPrefix("==========step1:create table ==============")
-
+
self.prepare_datas()
- os.system("taos -s ' select c6 from stb1 ; '")
+ os.system(f"taos -s ' select c6 from {dbname}.stb1 ; '")
self.restart_taosd_query_sum()
def stop(self):
diff --git a/tests/system-test/2-query/histogram.py b/tests/system-test/2-query/histogram.py
index 4b322c61cf..dc6e39ece9 100644
--- a/tests/system-test/2-query/histogram.py
+++ b/tests/system-test/2-query/histogram.py
@@ -5,7 +5,6 @@ import json
from dataclasses import dataclass, field
from typing import List, Any, Tuple
-from certifi import where
from util.log import tdLog
from util.sql import tdSql
from util.cases import tdCases
diff --git a/tests/system-test/2-query/join.py b/tests/system-test/2-query/join.py
index df6390f59c..2348873a34 100644
--- a/tests/system-test/2-query/join.py
+++ b/tests/system-test/2-query/join.py
@@ -1,5 +1,7 @@
import datetime
+from dataclasses import dataclass, field
+from typing import List, Any, Tuple
from util.log import *
from util.sql import *
from util.cases import *
@@ -7,22 +9,57 @@ from util.dnodes import *
PRIMARY_COL = "ts"
-INT_COL = "c1"
-BINT_COL = "c2"
-SINT_COL = "c3"
-TINT_COL = "c4"
-FLOAT_COL = "c5"
-DOUBLE_COL = "c6"
-BOOL_COL = "c7"
+INT_COL = "c_int"
+BINT_COL = "c_bint"
+SINT_COL = "c_sint"
+TINT_COL = "c_tint"
+FLOAT_COL = "c_float"
+DOUBLE_COL = "c_double"
+BOOL_COL = "c_bool"
+TINT_UN_COL = "c_utint"
+SINT_UN_COL = "c_usint"
+BINT_UN_COL = "c_ubint"
+INT_UN_COL = "c_uint"
+BINARY_COL = "c_binary"
+NCHAR_COL = "c_nchar"
+TS_COL = "c_ts"
-BINARY_COL = "c8"
-NCHAR_COL = "c9"
-TS_COL = "c10"
+NUM_COL = [INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, ]
+CHAR_COL = [BINARY_COL, NCHAR_COL, ]
+BOOLEAN_COL = [BOOL_COL, ]
+TS_TYPE_COL = [TS_COL, ]
+
+INT_TAG = "t_int"
+
+ALL_COL = [PRIMARY_COL, INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, BINARY_COL, NCHAR_COL, BOOL_COL, TS_COL]
+TAG_COL = [INT_TAG]
+# insert data args:
+TIME_STEP = 10000
+NOW = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
+
+# init db/table
+DBNAME = "db"
+STBNAME = f"{DBNAME}.stb1"
+CTBNAME = f"{DBNAME}.ct1"
+NTBNAME = f"{DBNAME}.nt1"
+
+@dataclass
+class DataSet:
+ ts_data : List[int] = field(default_factory=list)
+ int_data : List[int] = field(default_factory=list)
+ bint_data : List[int] = field(default_factory=list)
+ sint_data : List[int] = field(default_factory=list)
+ tint_data : List[int] = field(default_factory=list)
+ int_un_data : List[int] = field(default_factory=list)
+ bint_un_data: List[int] = field(default_factory=list)
+ sint_un_data: List[int] = field(default_factory=list)
+ tint_un_data: List[int] = field(default_factory=list)
+ float_data : List[float] = field(default_factory=list)
+ double_data : List[float] = field(default_factory=list)
+ bool_data : List[int] = field(default_factory=list)
+ binary_data : List[str] = field(default_factory=list)
+ nchar_data : List[str] = field(default_factory=list)
-NUM_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, ]
-CHAR_COL = [ BINARY_COL, NCHAR_COL, ]
-BOOLEAN_COL = [ BOOL_COL, ]
-TS_TYPE_COL = [ TS_COL, ]
class TDTestCase:
@@ -52,12 +89,12 @@ class TDTestCase:
return query_condition
- def __join_condition(self, tb_list, filter=PRIMARY_COL, INNER=False):
+ def __join_condition(self, tb_list, filter=PRIMARY_COL, INNER=False, alias_tb1="tb1", alias_tb2="tb2"):
table_reference = tb_list[0]
join_condition = table_reference
join = "inner join" if INNER else "join"
for i in range(len(tb_list[1:])):
- join_condition += f" {join} {tb_list[i+1]} on {table_reference}.{filter}={tb_list[i+1]}.{filter}"
+ join_condition += f" as {alias_tb1} {join} {tb_list[i+1]} as {alias_tb2} on {alias_tb1}.{filter}={alias_tb2}.{filter}"
return join_condition
@@ -103,19 +140,19 @@ class TDTestCase:
return f"select {select_clause} from {from_clause} {where_condition} {group_condition}"
@property
- def __join_tblist(self):
+ def __join_tblist(self, dbname=DBNAME):
return [
# ["ct1", "ct2"],
- ["ct1", "ct4"],
- ["ct1", "t1"],
+ [f"{dbname}.ct1", f"{dbname}.ct4"],
+ [f"{dbname}.ct1", f"{dbname}.nt1"],
# ["ct2", "ct4"],
- # ["ct2", "t1"],
- # ["ct4", "t1"],
+ # ["ct2", "nt1"],
+ # ["ct4", "nt1"],
# ["ct1", "ct2", "ct4"],
- # ["ct1", "ct2", "t1"],
- # ["ct1", "ct4", "t1"],
- # ["ct2", "ct4", "t1"],
- # ["ct1", "ct2", "ct4", "t1"],
+ # ["ct1", "ct2", "nt1"],
+ # ["ct1", "ct4", "nt1"],
+ # ["ct2", "ct4", "nt1"],
+ # ["ct1", "ct2", "ct4", "nt1"],
]
@property
@@ -123,28 +160,29 @@ class TDTestCase:
sqls = []
__join_tblist = self.__join_tblist
for join_tblist in __join_tblist:
- for join_tb in join_tblist:
- select_claus_list = self.__query_condition(join_tb)
- for select_claus in select_claus_list:
- group_claus = self.__group_condition( col=select_claus)
- where_claus = self.__where_condition( query_conditon=select_claus )
- having_claus = self.__group_condition( col=select_claus, having=f"{select_claus} is not null" )
- sqls.extend(
- (
- # self.__gen_sql(select_claus, self.__join_condition(join_tblist), where_claus, group_claus),
- self.__gen_sql(select_claus, self.__join_condition(join_tblist), where_claus, having_claus),
- self.__gen_sql(select_claus, self.__join_condition(join_tblist), where_claus),
- # self.__gen_sql(select_claus, self.__join_condition(join_tblist), group_claus),
- self.__gen_sql(select_claus, self.__join_condition(join_tblist), having_claus),
- self.__gen_sql(select_claus, self.__join_condition(join_tblist)),
- # self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), where_claus, group_claus),
- self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), where_claus, having_claus),
- self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), where_claus, ),
- self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), having_claus ),
- # self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), group_claus ),
- self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True) ),
- )
+ alias_tb = "tb1"
+ # for join_tb in join_tblist:
+ select_claus_list = self.__query_condition(alias_tb)
+ for select_claus in select_claus_list:
+ group_claus = self.__group_condition( col=select_claus)
+ where_claus = self.__where_condition( query_conditon=select_claus )
+ having_claus = self.__group_condition( col=select_claus, having=f"{select_claus} is not null" )
+ sqls.extend(
+ (
+ # self.__gen_sql(select_claus, self.__join_condition(join_tblist, alias_tb1=alias_tb), where_claus, group_claus),
+ self.__gen_sql(select_claus, self.__join_condition(join_tblist, alias_tb1=alias_tb), where_claus, having_claus),
+ # self.__gen_sql(select_claus, self.__join_condition(join_tblist, alias_tb1=alias_tb), where_claus),
+ # self.__gen_sql(select_claus, self.__join_condition(join_tblist, alias_tb1=alias_tb), group_claus),
+ # self.__gen_sql(select_claus, self.__join_condition(join_tblist, alias_tb1=alias_tb), having_claus),
+ # self.__gen_sql(select_claus, self.__join_condition(join_tblist, alias_tb1=alias_tb)),
+ # self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True, alias_tb1=alias_tb), where_claus, group_claus),
+ self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True, alias_tb1=alias_tb), where_claus, having_claus),
+ # self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True, alias_tb1=alias_tb), where_claus, ),
+ # self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True, alias_tb1=alias_tb), having_claus ),
+ # self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True, alias_tb1=alias_tb), group_claus ),
+ # self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True, alias_tb1=alias_tb) ),
)
+ )
return list(filter(None, sqls))
def __join_check(self,):
@@ -172,7 +210,7 @@ class TDTestCase:
tdSql.error(sql=sql)
break
if len(tblist) == 2:
- if "ct1" in tblist or "t1" in tblist:
+ if "ct1" in tblist or "nt1" in tblist:
self.__join_current(sql, checkrows)
elif where_condition or "not null" in group_condition:
self.__join_current(sql, checkrows + 2 )
@@ -187,14 +225,14 @@ class TDTestCase:
tdSql.query(sql=sql)
# tdSql.checkRows(checkrows)
- def __test_error(self):
+ def __test_error(self, dbname=DBNAME):
# sourcery skip: extract-duplicate-method, move-assign-in-block
tdLog.printNoPrefix("==========err sql condition check , must return error==========")
- err_list_1 = ["ct1","ct2", "ct4"]
- err_list_2 = ["ct1","ct2", "t1"]
- err_list_3 = ["ct1","ct4", "t1"]
- err_list_4 = ["ct2","ct4", "t1"]
- err_list_5 = ["ct1", "ct2","ct4", "t1"]
+ err_list_1 = [f"{dbname}.ct1", f"{dbname}.ct2", f"{dbname}.ct4"]
+ err_list_2 = [f"{dbname}.ct1", f"{dbname}.ct2", f"{dbname}.nt1"]
+ err_list_3 = [f"{dbname}.ct1", f"{dbname}.ct4", f"{dbname}.nt1"]
+ err_list_4 = [f"{dbname}.ct2", f"{dbname}.ct4", f"{dbname}.nt1"]
+ err_list_5 = [f"{dbname}.ct1", f"{dbname}.ct2", f"{dbname}.ct4", f"{dbname}.nt1"]
self.__join_check_old(err_list_1, -1)
tdLog.printNoPrefix(f"==========err sql condition check in {err_list_1} over==========")
self.__join_check_old(err_list_2, -1)
@@ -208,16 +246,16 @@ class TDTestCase:
self.__join_check_old(["ct2", "ct4"], -1, join_flag=False)
tdLog.printNoPrefix("==========err sql condition check in has no join condition over==========")
- tdSql.error( f"select c1, c2 from ct2, ct4 where ct2.{PRIMARY_COL}=ct4.{PRIMARY_COL}" )
- tdSql.error( f"select ct2.c1, ct2.c2 from ct2, ct4 where ct2.{INT_COL}=ct4.{INT_COL}" )
- tdSql.error( f"select ct2.c1, ct2.c2 from ct2, ct4 where ct2.{TS_COL}=ct4.{TS_COL}" )
- tdSql.error( f"select ct2.c1, ct2.c2 from ct2, ct4 where ct2.{PRIMARY_COL}=ct4.{TS_COL}" )
- tdSql.error( f"select ct2.c1, ct1.c2 from ct2, ct4 where ct2.{PRIMARY_COL}=ct4.{PRIMARY_COL}" )
- tdSql.error( f"select ct2.c1, ct4.c2 from ct2, ct4 where ct2.{PRIMARY_COL}=ct4.{PRIMARY_COL} and c1 is not null " )
- tdSql.error( f"select ct2.c1, ct4.c2 from ct2, ct4 where ct2.{PRIMARY_COL}=ct4.{PRIMARY_COL} and ct1.c1 is not null " )
+ tdSql.error( f"select c1, c2 from {dbname}.ct2, {dbname}.ct4 where ct2.{PRIMARY_COL}=ct4.{PRIMARY_COL}" )
+ tdSql.error( f"select ct2.c1, ct2.c2 from {dbname}.ct2 as ct2, {dbname}.ct4 as ct4 where ct2.{INT_COL}=ct4.{INT_COL}" )
+ tdSql.error( f"select ct2.c1, ct2.c2 from {dbname}.ct2 as ct2, {dbname}.ct4 as ct4 where ct2.{TS_COL}=ct4.{TS_COL}" )
+ tdSql.error( f"select ct2.c1, ct2.c2 from {dbname}.ct2 as ct2, {dbname}.ct4 as ct4 where ct2.{PRIMARY_COL}=ct4.{TS_COL}" )
+ tdSql.error( f"select ct2.c1, ct1.c2 from {dbname}.ct2 as ct2, {dbname}.ct4 as ct4 where ct2.{PRIMARY_COL}=ct4.{PRIMARY_COL}" )
+ tdSql.error( f"select ct2.c1, ct4.c2 from {dbname}.ct2 as ct2, {dbname}.ct4 as ct4 where ct2.{PRIMARY_COL}=ct4.{PRIMARY_COL} and c1 is not null " )
+ tdSql.error( f"select ct2.c1, ct4.c2 from {dbname}.ct2 as ct2, {dbname}.ct4 as ct4 where ct2.{PRIMARY_COL}=ct4.{PRIMARY_COL} and ct1.c1 is not null " )
- tbname = ["ct1", "ct2", "ct4", "t1"]
+ tbname = [f"{dbname}.ct1", f"{dbname}.ct2", f"{dbname}.ct4", f"{dbname}.nt1"]
# for tb in tbname:
# for errsql in self.__join_err_check(tb):
@@ -230,124 +268,147 @@ class TDTestCase:
self.__test_error()
- def __create_tb(self):
- tdSql.prepare()
+ def __create_tb(self, stb="stb1", ctb_num=20, ntbnum=1, dbname=DBNAME):
+ create_stb_sql = f'''create table {dbname}.{stb}(
+ ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
+ {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
+ {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp,
+ {TINT_UN_COL} tinyint unsigned, {SINT_UN_COL} smallint unsigned,
+ {INT_UN_COL} int unsigned, {BINT_UN_COL} bigint unsigned
+ ) tags ({INT_TAG} int)
+ '''
+ for i in range(ntbnum):
- tdLog.printNoPrefix("==========step1:create table")
- create_stb_sql = f'''create table stb1(
- ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
- {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
- {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
- ) tags (tag1 int)
- '''
- create_ntb_sql = f'''create table t1(
- ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
- {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
- {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
- )
- '''
+ create_ntb_sql = f'''create table {dbname}.nt{i+1}(
+ ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
+ {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
+ {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp,
+ {TINT_UN_COL} tinyint unsigned, {SINT_UN_COL} smallint unsigned,
+ {INT_UN_COL} int unsigned, {BINT_UN_COL} bigint unsigned
+ )
+ '''
tdSql.execute(create_stb_sql)
tdSql.execute(create_ntb_sql)
- for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
- { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2}
+ for i in range(ctb_num):
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.{stb} tags ( {i+1} )')
- def __insert_data(self, rows):
- now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
- for i in range(rows):
- tdSql.execute(
- f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
- )
- tdSql.execute(
- f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
- )
- tdSql.execute(
- f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
- )
- tdSql.execute(
- f'''insert into ct1 values
- ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } )
- ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } )
- '''
- )
-
- tdSql.execute(
- f'''insert into ct4 values
- ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
- ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
- ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
- (
- { now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127,
- { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000}
- )
- (
- { now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126,
- { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000}
- )
- '''
- )
-
- tdSql.execute(
- f'''insert into ct2 values
- ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
- ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
- ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
- (
- { now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126,
- { -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
- )
- (
- { now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127,
- { - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
- )
- '''
- )
+ def __data_set(self, rows):
+ data_set = DataSet()
for i in range(rows):
- insert_data = f'''insert into t1 values
- ( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2},
- "binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } )
- '''
- tdSql.execute(insert_data)
- tdSql.execute(
- f'''insert into t1 values
- ( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
- ( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
- ( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
- ( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127,
- { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 },
- "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
- )
- (
- { now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126,
- { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 },
- "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
- )
+ data_set.ts_data.append(NOW + 1 * (rows - i))
+ data_set.int_data.append(rows - i)
+ data_set.bint_data.append(11111 * (rows - i))
+ data_set.sint_data.append(111 * (rows - i) % 32767)
+ data_set.tint_data.append(11 * (rows - i) % 127)
+ data_set.int_un_data.append(rows - i)
+ data_set.bint_un_data.append(11111 * (rows - i))
+ data_set.sint_un_data.append(111 * (rows - i) % 32767)
+ data_set.tint_un_data.append(11 * (rows - i) % 127)
+ data_set.float_data.append(1.11 * (rows - i))
+ data_set.double_data.append(1100.0011 * (rows - i))
+ data_set.bool_data.append((rows - i) % 2)
+ data_set.binary_data.append(f'binary{(rows - i)}')
+ data_set.nchar_data.append(f'nchar_测试_{(rows - i)}')
+
+ return data_set
+
+ def __insert_data(self, dbname=DBNAME):
+ tdLog.printNoPrefix("==========step: start inser data into tables now.....")
+ data = self.__data_set(rows=self.rows)
+
+ # now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
+ null_data = '''null, null, null, null, null, null, null, null, null, null, null, null, null, null'''
+ zero_data = "0, 0, 0, 0, 0, 0, 0, 'binary_0', 'nchar_0', 0, 0, 0, 0, 0"
+
+ for i in range(self.rows):
+ row_data = f'''
+ {data.int_data[i]}, {data.bint_data[i]}, {data.sint_data[i]}, {data.tint_data[i]}, {data.float_data[i]}, {data.double_data[i]},
+ {data.bool_data[i]}, '{data.binary_data[i]}', '{data.nchar_data[i]}', {data.ts_data[i]}, {data.tint_un_data[i]},
+ {data.sint_un_data[i]}, {data.int_un_data[i]}, {data.bint_un_data[i]}
'''
- )
+ neg_row_data = f'''
+ {-1 * data.int_data[i]}, {-1 * data.bint_data[i]}, {-1 * data.sint_data[i]}, {-1 * data.tint_data[i]}, {-1 * data.float_data[i]}, {-1 * data.double_data[i]},
+ {data.bool_data[i]}, '{data.binary_data[i]}', '{data.nchar_data[i]}', {data.ts_data[i]}, {1 * data.tint_un_data[i]},
+ {1 * data.sint_un_data[i]}, {1 * data.int_un_data[i]}, {1 * data.bint_un_data[i]}
+ '''
+
+ tdSql.execute( f"insert into {dbname}.ct1 values ( {NOW - i * TIME_STEP}, {row_data} )" )
+ tdSql.execute( f"insert into {dbname}.ct2 values ( {NOW - i * int(TIME_STEP * 0.6)}, {neg_row_data} )" )
+ tdSql.execute( f"insert into {dbname}.ct4 values ( {NOW - i * int(TIME_STEP * 0.8) }, {row_data} )" )
+ tdSql.execute( f"insert into {dbname}.nt1 values ( {NOW - i * int(TIME_STEP * 1.2)}, {row_data} )" )
+
+ tdSql.execute( f"insert into {dbname}.ct2 values ( {NOW + int(TIME_STEP * 0.6)}, {null_data} )" )
+ tdSql.execute( f"insert into {dbname}.ct2 values ( {NOW - (self.rows + 1) * int(TIME_STEP * 0.6)}, {null_data} )" )
+ tdSql.execute( f"insert into {dbname}.ct2 values ( {NOW - self.rows * int(TIME_STEP * 0.29) }, {null_data} )" )
+
+ tdSql.execute( f"insert into {dbname}.ct4 values ( {NOW + int(TIME_STEP * 0.8)}, {null_data} )" )
+ tdSql.execute( f"insert into {dbname}.ct4 values ( {NOW - (self.rows + 1) * int(TIME_STEP * 0.8)}, {null_data} )" )
+ tdSql.execute( f"insert into {dbname}.ct4 values ( {NOW - self.rows * int(TIME_STEP * 0.39)}, {null_data} )" )
+
+ tdSql.execute( f"insert into {dbname}.nt1 values ( {NOW + int(TIME_STEP * 1.2)}, {null_data} )" )
+ tdSql.execute( f"insert into {dbname}.nt1 values ( {NOW - (self.rows + 1) * int(TIME_STEP * 1.2)}, {null_data} )" )
+ tdSql.execute( f"insert into {dbname}.nt1 values ( {NOW - self.rows * int(TIME_STEP * 0.59)}, {null_data} )" )
def run(self):
tdSql.prepare()
tdLog.printNoPrefix("==========step1:create table")
- self.__create_tb()
+ self.__create_tb(dbname=DBNAME)
tdLog.printNoPrefix("==========step2:insert data")
self.rows = 10
- self.__insert_data(self.rows)
+ self.__insert_data(dbname=DBNAME)
tdLog.printNoPrefix("==========step3:all check")
+ tdSql.query(f"select count(*) from {DBNAME}.ct1")
+ tdSql.checkData(0, 0, self.rows)
self.all_test()
- tdDnodes.stop(1)
- tdDnodes.start(1)
+ tdLog.printNoPrefix("==========step4:cross db check")
+ dbname1 = "db1"
+ tdSql.execute(f"create database {dbname1} duration 432000m")
+ tdSql.execute(f"use {dbname1}")
+ self.__create_tb(dbname=dbname1)
+ self.__insert_data(dbname=dbname1)
+
+ tdSql.query("select ct1.c_int from db.ct1 as ct1 join db1.ct1 as cy1 on ct1.ts=cy1.ts")
+ tdSql.checkRows(self.rows)
+ tdSql.query("select ct1.c_int from db.stb1 as ct1 join db1.ct1 as cy1 on ct1.ts=cy1.ts")
+ tdSql.checkRows(self.rows)
+ tdSql.query("select ct1.c_int from db.nt1 as ct1 join db1.nt1 as cy1 on ct1.ts=cy1.ts")
+ tdSql.checkRows(self.rows + 3)
+ tdSql.query("select ct1.c_int from db.stb1 as ct1 join db1.stb1 as cy1 on ct1.ts=cy1.ts")
+ tdSql.checkRows(self.rows * 3 + 6)
+
+ tdSql.query("select count(*) from db.ct1")
+ tdSql.checkData(0, 0, self.rows)
+ tdSql.query("select count(*) from db1.ct1")
+ tdSql.checkData(0, 0, self.rows)
+
+ self.all_test()
+ tdSql.query("select count(*) from db.ct1")
+ tdSql.checkData(0, 0, self.rows)
+ tdSql.query("select count(*) from db1.ct1")
+ tdSql.checkData(0, 0, self.rows)
+
+ tdSql.execute(f"flush database {DBNAME}")
+ tdSql.execute(f"flush database {dbname1}")
+ # tdDnodes.stop(1)
+ # tdDnodes.start(1)
tdSql.execute("use db")
+ tdSql.query("select count(*) from db.ct1")
+ tdSql.checkData(0, 0, self.rows)
+ tdSql.query("select count(*) from db1.ct1")
+ tdSql.checkData(0, 0, self.rows)
tdLog.printNoPrefix("==========step4:after wal, all check again ")
self.all_test()
+ tdSql.query("select count(*) from db.ct1")
+ tdSql.checkData(0, 0, self.rows)
def stop(self):
tdSql.close()
diff --git a/tests/system-test/2-query/last_row.py b/tests/system-test/2-query/last_row.py
index cdb26f7589..105dc883c7 100644
--- a/tests/system-test/2-query/last_row.py
+++ b/tests/system-test/2-query/last_row.py
@@ -290,6 +290,52 @@ class TDTestCase:
tdSql.checkData(0, 0, None)
tdSql.query("select last_row(c1) from testdb.stb1")
tdSql.checkData(0, 0, None)
+
+ # support regular query about last ,first ,last_row
+ tdSql.error("select last_row(c1,NULL) from testdb.t1")
+ tdSql.error("select last_row(NULL) from testdb.t1")
+ tdSql.error("select last(NULL) from testdb.t1")
+ tdSql.error("select first(NULL) from testdb.t1")
+
+ tdSql.query("select last_row(c1,123) from testdb.t1")
+ tdSql.checkData(0,0,None)
+ tdSql.checkData(0,1,123)
+
+ tdSql.query("select last_row(123) from testdb.t1")
+ tdSql.checkData(0,0,123)
+
+ tdSql.error("select last(c1,NULL) from testdb.t1")
+
+ tdSql.query("select last(c1,123) from testdb.t1")
+ tdSql.checkData(0,0,9)
+ tdSql.checkData(0,1,123)
+
+ tdSql.error("select first(c1,NULL) from testdb.t1")
+
+ tdSql.query("select first(c1,123) from testdb.t1")
+ tdSql.checkData(0,0,1)
+ tdSql.checkData(0,1,123)
+
+ tdSql.error("select last_row(c1,c2,c3,NULL,c4) from testdb.t1")
+
+ tdSql.query("select last_row(c1,c2,c3,123,c4) from testdb.t1")
+ tdSql.checkData(0,0,None)
+ tdSql.checkData(0,1,None)
+ tdSql.checkData(0,2,None)
+ tdSql.checkData(0,3,123)
+ tdSql.checkData(0,4,None)
+
+
+ tdSql.error("select last_row(c1,c2,c3,NULL,c4,t1,t2) from testdb.ct1")
+
+ tdSql.query("select last_row(c1,c2,c3,123,c4,t1,t2) from testdb.ct1")
+ tdSql.checkData(0,0,9)
+ tdSql.checkData(0,1,-99999)
+ tdSql.checkData(0,2,-999)
+ tdSql.checkData(0,3,123)
+ tdSql.checkData(0,4,None)
+ tdSql.checkData(0,5,0)
+ tdSql.checkData(0,5,0)
# # bug need fix
tdSql.query("select last_row(c1), c2, c3 , c4, c5 from testdb.t1")
diff --git a/tests/system-test/2-query/max_partition.py b/tests/system-test/2-query/max_partition.py
index 0a7214ec75..109c9075f5 100644
--- a/tests/system-test/2-query/max_partition.py
+++ b/tests/system-test/2-query/max_partition.py
@@ -11,13 +11,13 @@ class TDTestCase:
self.row_nums = 10
self.tb_nums = 10
self.ts = 1537146000000
-
+
def prepare_datas(self, stb_name , tb_nums , row_nums ):
tdSql.execute(" use db ")
tdSql.execute(f" create stable {stb_name} (ts timestamp , c1 int , c2 bigint , c3 float , c4 double , c5 smallint , c6 tinyint , c7 bool , c8 binary(36) , c9 nchar(36) , uc1 int unsigned,\
uc2 bigint unsigned ,uc3 smallint unsigned , uc4 tinyint unsigned ) tags(t1 timestamp , t2 int , t3 bigint , t4 float , t5 double , t6 smallint , t7 tinyint , t8 bool , t9 binary(36)\
, t10 nchar(36) , t11 int unsigned , t12 bigint unsigned ,t13 smallint unsigned , t14 tinyint unsigned ) ")
-
+
for i in range(tb_nums):
tbname = f"sub_{stb_name}_{i}"
ts = self.ts + i*10000
@@ -30,7 +30,7 @@ class TDTestCase:
for null in range(5):
ts = self.ts + row_nums*1000 + null*1000
tdSql.execute(f"insert into {tbname} values({ts} , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL )")
-
+
def basic_query(self):
tdSql.query("select count(*) from stb")
tdSql.checkData(0,0,(self.row_nums + 5 )*self.tb_nums)
@@ -44,7 +44,7 @@ class TDTestCase:
tdSql.query(" select max(t2) from stb group by c1 order by t1 ")
tdSql.query(" select max(c1) from stb group by tbname order by tbname ")
tdSql.checkRows(self.tb_nums)
- # bug need fix
+ # bug need fix
tdSql.query(" select max(t2) from stb group by t2 order by t2 ")
tdSql.checkRows(self.tb_nums)
tdSql.query(" select max(c1) from stb group by c1 order by c1 ")
@@ -62,8 +62,8 @@ class TDTestCase:
# bug need fix
# tdSql.query(" select tbname , max(c1) from sub_stb_1 where c1 is null group by c1 order by c1 desc ")
- # tdSql.checkRows(1)
- # tdSql.checkData(0,0,"sub_stb_1")
+ # tdSql.checkRows(1)
+ # tdSql.checkData(0,0,"sub_stb_1")
tdSql.query("select max(c1) ,c2 ,t2,tbname from stb group by abs(c1) order by abs(c1)")
tdSql.checkRows(self.row_nums+1)
@@ -80,7 +80,7 @@ class TDTestCase:
tdSql.checkRows(2)
tdSql.query(" select max(c1) from stb where abs(c1+t2)=1 partition by tbname ")
tdSql.checkRows(2)
-
+
tdSql.query(" select tbname , max(c1) from stb partition by tbname order by tbname ")
tdSql.checkRows(self.tb_nums)
tdSql.checkData(0,1,self.row_nums-1)
@@ -89,7 +89,7 @@ class TDTestCase:
tdSql.query("select tbname , max(t2) from stb partition by t1 order by t1")
tdSql.query("select tbname , max(t2) from stb partition by t2 order by t2")
- # # bug need fix
+ # # bug need fix
tdSql.query("select t2 , max(t2) from stb partition by t2 order by t2")
tdSql.checkRows(self.tb_nums)
@@ -97,7 +97,7 @@ class TDTestCase:
tdSql.checkRows(self.tb_nums)
tdSql.checkData(0,1,self.row_nums-1)
-
+
tdSql.query("select tbname , max(c1) from stb partition by t2 order by t2")
tdSql.query("select c2, max(c1) from stb partition by c2 order by c2 desc")
@@ -125,10 +125,10 @@ class TDTestCase:
tdSql.checkRows(self.tb_nums)
tdSql.checkData(0,0,self.row_nums)
- # bug need fix
+ # bug need fix
tdSql.query("select count(c1) , max(t2) ,abs(c1) from stb partition by abs(c1) order by abs(c1)")
tdSql.checkRows(self.row_nums+1)
-
+
tdSql.query("select max(ceil(c2)) , max(floor(t2)) ,max(floor(c2)) from stb partition by abs(c2) order by abs(c2)")
tdSql.checkRows(self.row_nums+1)
@@ -148,15 +148,15 @@ class TDTestCase:
tdSql.query(" select c1 , sample(c1,2) from stb partition by tbname order by tbname ")
tdSql.checkRows(self.tb_nums*2)
-
- # interval
+
+ # interval
tdSql.query("select max(c1) from stb interval(2s) sliding(1s)")
# bug need fix
tdSql.query('select max(c1) from stb where ts>="2022-07-06 16:00:00.000 " and ts < "2022-07-06 17:00:00.000 " interval(50s) sliding(30s) fill(NULL)')
-
+
tdSql.query(" select tbname , count(c1) from stb partition by tbname interval(10s) slimit 5 soffset 1 ")
tdSql.query("select tbname , max(c1) from stb partition by tbname interval(10s)")
@@ -169,30 +169,30 @@ class TDTestCase:
tdSql.checkData(0,1,self.row_nums)
tdSql.query("select c1 , mavg(c1 ,2 ) from stb partition by c1")
- tdSql.checkRows(72)
+ tdSql.checkRows(90)
tdSql.query("select c1 , diff(c1 , 0) from stb partition by c1")
- tdSql.checkRows(72)
+ tdSql.checkRows(90)
tdSql.query("select c1 , csum(c1) from stb partition by c1")
- tdSql.checkRows(80)
+ tdSql.checkRows(100)
tdSql.query("select c1 , sample(c1,2) from stb partition by c1 order by c1")
tdSql.checkRows(21)
- # bug need fix
+ # bug need fix
# tdSql.checkData(0,1,None)
tdSql.query("select c1 , twa(c1) from stb partition by c1 order by c1")
tdSql.checkRows(11)
- tdSql.checkData(0,1,0.000000000)
+ tdSql.checkData(0,1,None)
tdSql.query("select c1 , irate(c1) from stb partition by c1 order by c1")
tdSql.checkRows(11)
tdSql.checkData(0,1,None)
tdSql.query("select c1 , DERIVATIVE(c1,2,1) from stb partition by c1 order by c1")
- tdSql.checkRows(72)
- # bug need fix
+ tdSql.checkRows(90)
+ # bug need fix
# tdSql.checkData(0,1,None)
@@ -201,15 +201,15 @@ class TDTestCase:
- # bug need fix
- # tdSql.query(" select tbname , max(c1) from stb partition by tbname order by tbname slimit 5 soffset 0 ")
+ # bug need fix
+ # tdSql.query(" select tbname , max(c1) from stb partition by tbname order by tbname slimit 5 soffset 0 ")
# tdSql.checkRows(5)
-
+
# tdSql.query(" select tbname , max(c1) from stb partition by tbname order by tbname slimit 5 soffset 1 ")
- # tdSql.checkRows(5)
-
- tdSql.query(" select tbname , max(c1) from sub_stb_1 partition by tbname interval(10s) sliding(5s) ")
-
+ # tdSql.checkRows(5)
+
+ tdSql.query(" select tbname , max(c1) from sub_stb_1 partition by tbname interval(10s) sliding(5s) ")
+
tdSql.query(f'select max(c1) from stb where ts>={self.ts} and ts < {self.ts}+1000 interval(50s) sliding(30s)')
tdSql.query(f'select tbname , max(c1) from stb where ts>={self.ts} and ts < {self.ts}+1000 interval(50s) sliding(30s)')
@@ -219,18 +219,18 @@ class TDTestCase:
self.prepare_datas("stb",self.tb_nums,self.row_nums)
self.basic_query()
- # # coverage case for taosd crash about bug fix
+ # # coverage case for taosd crash about bug fix
tdSql.query(" select sum(c1) from stb where t2+10 >1 ")
tdSql.query(" select count(c1),count(t1) from stb where -t2<1 ")
tdSql.query(" select tbname ,max(ceil(c1)) from stb group by tbname ")
tdSql.query(" select avg(abs(c1)) , tbname from stb group by tbname ")
tdSql.query(" select t1,c1 from stb where abs(t2+c1)=1 ")
-
+
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
-tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/system-test/2-query/sum.py b/tests/system-test/2-query/sum.py
index f6ff4989e7..4f5ed34419 100644
--- a/tests/system-test/2-query/sum.py
+++ b/tests/system-test/2-query/sum.py
@@ -20,6 +20,8 @@ NUM_COL = [INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, ]
UN_NUM_COL = [BOOL_COL, BINARY_COL, NCHAR_COL, ]
TS_TYPE_COL = [TS_COL]
+DBNAME = "db"
+
class TDTestCase:
def init(self, conn, logSql):
@@ -54,14 +56,14 @@ class TDTestCase:
where_condition = self.__where_condition(condition)
group_condition = self.__group_condition(condition, having=f"{condition} is not null " )
- tdSql.query(f"select {condition} from {tbname} {where_condition} ")
+ tdSql.query(f"select {condition} from {DBNAME}.{tbname} {where_condition} ")
datas = [tdSql.getData(i,0) for i in range(tdSql.queryRows)]
sum_data = sum(filter(None, datas))
- tdSql.query(f"select sum( {condition} ) from {tbname} {where_condition} ")
+ tdSql.query(f"select sum( {condition} ) from {DBNAME}.{tbname} {where_condition} ")
tdSql.checkData(0, 0, sum_data)
- tdSql.query(f"select {condition} from {tbname} {where_condition} {group_condition} ")
- tdSql.query(f"select sum( {condition} ) from {tbname} {where_condition} {group_condition} ")
+ tdSql.query(f"select {condition} from {DBNAME}.{tbname} {where_condition} {group_condition} ")
+ tdSql.query(f"select sum( {condition} ) from {DBNAME}.{tbname} {where_condition} {group_condition} ")
def __sum_err_check(self,tbanme):
sqls = []
@@ -69,19 +71,19 @@ class TDTestCase:
for un_num_col in UN_NUM_COL:
sqls.extend(
(
- f"select sum( {un_num_col} ) from {tbanme} ",
- f"select sum(ceil( {un_num_col} )) from {tbanme} ",
+ f"select sum( {un_num_col} ) from {DBNAME}.{tbanme} ",
+ f"select sum(ceil( {un_num_col} )) {DBNAME}.from {tbanme} ",
)
)
# sqls.extend( f"select sum( {un_num_col} + {un_num_col_2} ) from {tbanme} " for un_num_col_2 in UN_NUM_COL )
- sqls.extend( f"select sum( {num_col} + {ts_col} ) from {tbanme} " for num_col in NUM_COL for ts_col in TS_TYPE_COL)
+ sqls.extend( f"select sum( {num_col} + {ts_col} ) from {DBNAME}.{tbanme} " for num_col in NUM_COL for ts_col in TS_TYPE_COL)
sqls.extend(
(
- f"select sum() from {tbanme} ",
- f"select sum(*) from {tbanme} ",
- f"select sum(ccccccc) from {tbanme} ",
- f"select sum('test') from {tbanme} ",
+ f"select sum() from {DBNAME}.{tbanme} ",
+ f"select sum(*) from {DBNAME}.{tbanme} ",
+ f"select sum(ccccccc) {DBNAME}.from {tbanme} ",
+ f"select sum('test') from {DBNAME}.{tbanme} ",
)
)
@@ -110,16 +112,15 @@ class TDTestCase:
def __create_tb(self):
- tdSql.prepare()
tdLog.printNoPrefix("==========step1:create table")
- create_stb_sql = f'''create table stb1(
+ create_stb_sql = f'''create table {DBNAME}.stb1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
) tags (t1 int)
'''
- create_ntb_sql = f'''create table t1(
+ create_ntb_sql = f'''create table {DBNAME}.t1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
@@ -129,29 +130,29 @@ class TDTestCase:
tdSql.execute(create_ntb_sql)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {DBNAME}.ct{i+1} using {DBNAME}.stb1 tags ( {i+1} )')
def __insert_data(self, rows):
now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
for i in range(rows):
tdSql.execute(
- f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )"
+ f"insert into {DBNAME}.ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )"
+ f"insert into {DBNAME}.ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )"
+ f"insert into {DBNAME}.ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f'''insert into ct1 values
+ f'''insert into {DBNAME}.ct1 values
( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', { now_time + 8 } )
( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', { now_time + 9 } )
'''
)
tdSql.execute(
- f'''insert into ct4 values
+ f'''insert into {DBNAME}.ct4 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000+ 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -167,7 +168,7 @@ class TDTestCase:
)
tdSql.execute(
- f'''insert into ct2 values
+ f'''insert into {DBNAME}.ct2 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000+ 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -183,13 +184,13 @@ class TDTestCase:
)
for i in range(rows):
- insert_data = f'''insert into t1 values
+ insert_data = f'''insert into {DBNAME}.t1 values
( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2},
"binary_{i}", "nchar_{i}", { now_time - 1000 * i } )
'''
tdSql.execute(insert_data)
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {DBNAME}.t1 values
( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -218,8 +219,11 @@ class TDTestCase:
tdLog.printNoPrefix("==========step3:all check")
self.all_test()
- tdDnodes.stop(1)
- tdDnodes.start(1)
+ # tdDnodes.stop(1)
+ # tdDnodes.start(1)
+
+ tdSql.execute("flush database db")
+
tdSql.execute("use db")
diff --git a/tests/system-test/2-query/tsbsQuery.py b/tests/system-test/2-query/tsbsQuery.py
index 1863f67c8e..a82c7bfe1a 100644
--- a/tests/system-test/2-query/tsbsQuery.py
+++ b/tests/system-test/2-query/tsbsQuery.py
@@ -14,6 +14,7 @@ class TDTestCase:
clientCfgDict["debugFlag"] = 131
updatecfgDict = {'clientCfg': {}}
updatecfgDict = {'debugFlag': 131}
+ updatecfgDict = {'keepColumnName': 1}
updatecfgDict["clientCfg"] = clientCfgDict
def init(self, conn, logSql):
@@ -42,7 +43,7 @@ class TDTestCase:
tdSql.execute(f"create table dct{i} using diagnostics (name,fleet,driver,model,device_version) tags ('truck_{i}','South{i}','Trish{i}',NULL ,'v2.3')")
else:
tdSql.execute(f"create table dct{i} using diagnostics (name,fleet,driver,model,device_version) tags ('truck_{i}','South{i}','Trish{i}','H-{i}','v2.3')")
- for j in range(10):
+ for j in range(10):
for i in range(100):
tdSql.execute(
f"insert into rct{j} values ( {ts+i*60000}, {80+i}, {90+i}, {85+i}, {30+i*10}, {1.2*i}, {221+i*2}, {20+i*0.2}, {1500+i*20}, {150+i*2},{5+i} )"
@@ -92,23 +93,23 @@ class TDTestCase:
# test partition interval limit (PRcore-TD-17410)
- # tdSql.query("select name,driver from (SELECT name,driver,fleet ,avg(velocity) as mean_velocity FROM readings partition BY name,driver,fleet interval (10m) limit 1);")
- # tdSql.checkRows(10)
+ tdSql.query("select name,driver from (SELECT name,driver,fleet ,avg(velocity) as mean_velocity FROM readings partition BY name,driver,fleet interval (10m) limit 1);")
+ tdSql.checkRows(10)
# test partition interval Pseudo time-column
tdSql.query("SELECT count(ms1)/144 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m)) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1;")
# 1 high-load:
- # tdSql.query("SELECT ts,name,driver,current_load,load_capacity FROM (SELECT last(ts) as ts,name,driver, current_load,load_capacity FROM diagnostics WHERE fleet = 'South' partition by name,driver) WHERE current_load>= (0.9 * load_capacity) partition by name ORDER BY name desc, ts DESC;")
+ tdSql.query("SELECT ts,name,driver,current_load,load_capacity FROM (SELECT last(ts) as ts,name,driver, current_load,load_capacity FROM diagnostics WHERE fleet = 'South' partition by name,driver) WHERE current_load>= (0.9 * load_capacity) partition by name ORDER BY name desc, ts DESC;")
- # tdSql.query("SELECT ts,name,driver,current_load,load_capacity FROM (SELECT last(ts) as ts,name,driver, current_load,load_capacity FROM diagnostics WHERE fleet = 'South' partition by name,driver) WHERE current_load>= (0.9 * load_capacity) partition by name ORDER BY name ;")
+ tdSql.query("SELECT ts,name,driver,current_load,load_capacity FROM (SELECT last(ts) as ts,name,driver, current_load,load_capacity FROM diagnostics WHERE fleet = 'South' partition by name,driver) WHERE current_load>= (0.9 * load_capacity) partition by name ORDER BY name ;")
# 2 stationary-trucks
tdSql.query("select name,driver from (SELECT name,driver,fleet ,avg(velocity) as mean_velocity FROM readings WHERE ts > '2016-01-01T15:07:21Z' AND ts <= '2016-01-01T16:17:21Z' partition BY name,driver,fleet interval(10m) LIMIT 1)")
tdSql.query("select name,driver from (SELECT name,driver,fleet ,avg(velocity) as mean_velocity FROM readings WHERE ts > '2016-01-01T15:07:21Z' AND ts <= '2016-01-01T16:17:21Z' partition BY name,driver,fleet interval(10m) LIMIT 1) WHERE fleet = 'West' AND mean_velocity < 1000 partition BY name")
# 3 long-driving-sessions
- # tdSql.query("SELECT name,driver FROM(SELECT name,driver,count(*) AS ten_min FROM(SELECT _wstart as ts,name,driver,avg(velocity) as mean_velocity FROM readings where ts > '2016-01-01T00:00:34Z' AND ts <= '2016-01-01T04:00:34Z' partition BY name,driver interval(10m)) WHERE mean_velocity > 1 GROUP BY name,driver) WHERE ten_min > 22 ;")
+ tdSql.query("SELECT name,driver FROM(SELECT name,driver,count(*) AS ten_min FROM(SELECT _wstart as ts,name,driver,avg(velocity) as mean_velocity FROM readings where ts > '2016-01-01T00:00:34Z' AND ts <= '2016-01-01T04:00:34Z' partition BY name,driver interval(10m)) WHERE mean_velocity > 1 GROUP BY name,driver) WHERE ten_min > 22 ;")
#4 long-daily-sessions
@@ -130,15 +131,18 @@ class TDTestCase:
# 8. daily-activity
tdSql.query(" SELECT model,ms1 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) fill(value,0)) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1;")
+
+ tdSql.query(" SELECT model,ms1 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) ) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1;")
+
tdSql.query("SELECT _wstart,model,fleet,count(ms1)/144 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) fill(value,0)) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1 partition by model, fleet interval(1d) ;")
# 9. breakdown-frequency
# NULL ---count(NULL)=0 expect count(NULL)= 100
- tdSql.query("select tbname,count(model),model from readings partition by tbname,model;")
- # model=NULL count(other) is 0
- tdSql.query("select tbname,count(name),model from readings where model=NULL partition by tbname,model;")
+ tdSql.query("SELECT model,state_changed,count(state_changed) FROM (SELECT model,diff(broken_down) AS state_changed FROM (SELECT _wstart,model,cast(cast(floor(2*(sum(nzs)/count(nzs))) as bool) as int) AS broken_down FROM (SELECT ts,model, cast(cast(status as bool) as int) AS nzs FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' ) WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition BY model interval(10m)) partition BY model) where model is null partition BY model,state_changed ")
+
tdSql.query(" SELECT model,state_changed,count(state_changed) FROM (SELECT model,diff(broken_down) AS state_changed FROM (SELECT _wstart,model,cast(cast(floor(2*(sum(nzs)/count(nzs))) as bool) as int) AS broken_down FROM (SELECT ts,model, cast(cast(status as bool) as int) AS nzs FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' ) WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition BY model interval(10m)) partition BY model) where state_changed =1 partition BY model,state_changed ;")
+
#it's already supported:
# last-loc
tdSql.query("SELECT last_row(ts),latitude,longitude,name,driver FROM readings WHERE fleet='South' and name IS NOT NULL partition BY name,driver order by name ;")
diff --git a/tests/system-test/2-query/twa.py b/tests/system-test/2-query/twa.py
index dde903af00..108f955977 100644
--- a/tests/system-test/2-query/twa.py
+++ b/tests/system-test/2-query/twa.py
@@ -7,7 +7,7 @@ import platform
import math
class TDTestCase:
- updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
+ updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
"jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
"wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143,
"maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 }
@@ -22,7 +22,7 @@ class TDTestCase:
self.time_step = 1000
def prepare_datas_of_distribute(self):
-
+
# prepate datas for 20 tables distributed at different vgroups
tdSql.execute("create database if not exists testdb keep 3650 duration 1000 vgroups 5")
tdSql.execute(" use testdb ")
@@ -32,16 +32,16 @@ class TDTestCase:
tags (t0 timestamp, t1 int, t2 bigint, t3 smallint, t4 tinyint, t5 float, t6 double, t7 bool, t8 binary(16),t9 nchar(32))
'''
)
-
+
for i in range(self.tb_nums):
tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
ts = self.ts
for j in range(self.row_nums):
- ts+=j*self.time_step
+ ts+=j*self.time_step
tdSql.execute(
f"insert into ct{i+1} values({ts}, 1, 11111, 111, 1, 1.11, 11.11, 2, 'binary{j}', 'nchar{j}', now()+{1*j}a )"
)
-
+
tdSql.execute("insert into ct1 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute("insert into ct1 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute("insert into ct1 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
@@ -64,7 +64,7 @@ class TDTestCase:
vgroups = tdSql.queryResult
vnode_tables={}
-
+
for vgroup_id in vgroups:
vnode_tables[vgroup_id[0]]=[]
@@ -73,7 +73,7 @@ class TDTestCase:
table_names = tdSql.queryResult
tablenames = []
for table_name in table_names:
- vnode_tables[table_name[6]].append(table_name[0])
+ vnode_tables[table_name[6]].append(table_name[0])
self.vnode_disbutes = vnode_tables
count = 0
@@ -103,12 +103,12 @@ class TDTestCase:
tdSql.checkRows(self.tb_nums)
tdSql.checkData(0,0,1.000000000)
- # union all
+ # union all
tdSql.query(" select twa(c1) from stb1 partition by tbname union all select twa(c1) from stb1 partition by tbname ")
tdSql.checkRows(40)
tdSql.checkData(0,0,1.000000000)
- # join
+ # join
tdSql.execute(" create database if not exists db ")
tdSql.execute(" use db ")
@@ -116,7 +116,7 @@ class TDTestCase:
tdSql.execute(" create table tb1 using st tags(1) ")
tdSql.execute(" create table tb2 using st tags(2) ")
-
+
for i in range(10):
ts = i*10 + self.ts
tdSql.execute(f" insert into tb1 values({ts},{i},{i}.0)")
@@ -127,7 +127,7 @@ class TDTestCase:
tdSql.checkData(0,0,4.500000000)
tdSql.checkData(0,1,4.500000000)
- # group by
+ # group by
tdSql.execute(" use testdb ")
# mixup with other functions
@@ -141,7 +141,7 @@ class TDTestCase:
self.check_distribute_datas()
self.twa_support_types()
self.distribute_twa_query()
-
+
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
diff --git a/tests/system-test/7-tmq/tmqAutoCreateTbl.py b/tests/system-test/7-tmq/tmqAutoCreateTbl.py
index 1622ad7621..ea100ae0d3 100644
--- a/tests/system-test/7-tmq/tmqAutoCreateTbl.py
+++ b/tests/system-test/7-tmq/tmqAutoCreateTbl.py
@@ -16,6 +16,7 @@ from tmqCommon import *
class TDTestCase:
def __init__(self):
+ self.snapshot = 0
self.vgroups = 4
self.ctbNum = 1000
self.rowsPerTbl = 1000
@@ -44,7 +45,7 @@ class TDTestCase:
'pollDelay': 3,
'showMsg': 1,
'showRow': 1,
- 'snapshot': 1}
+ 'snapshot': 0}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
@@ -84,13 +85,14 @@ class TDTestCase:
'ctbStartIdx': 0,
'ctbNum': 1000,
'rowsPerTbl': 1000,
- 'batchNum': 400,
+ 'batchNum': 1000,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 5,
'showMsg': 1,
'showRow': 1,
- 'snapshot': 1}
+ 'snapshot': 0}
+ paraDict['snapshot'] = self.snapshot
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
@@ -131,10 +133,10 @@ class TDTestCase:
totalConsumeRows += resultList[i]
tdSql.query(queryString)
- totalRowsInserted = tdSql.getRows()
+ totalRowsFromQuery = tdSql.getRows()
- if totalConsumeRows != totalRowsInserted:
- tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, totalRowsInserted))
+ tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, totalRowsFromQuery))
+ if totalConsumeRows != totalRowsFromQuery:
tdLog.exit("tmq consume rows error!")
tdSql.query("drop topic %s"%topicFromStb1)
@@ -163,6 +165,7 @@ class TDTestCase:
'showRow': 1,
'snapshot': 0}
+ paraDict['snapshot'] = self.snapshot
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
@@ -180,12 +183,13 @@ class TDTestCase:
# startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("create topics from stb1")
topicFromStb1 = 'topic_stb1'
- queryString = "select ts, c1, c2 from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
+ # queryString = "select ts, c1, c2 from %s.%s "%(paraDict['dbName'], paraDict['stbName'])
+ queryString = "select ts, c1, c2 from %s.%s where t4 == 'shanghai' and t5 == 'shanghai' "%(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicFromStb1, queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
- consumerId = 0
+ consumerId = 1
expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2
topicList = topicFromStb1
ifcheckdata = 0
@@ -210,10 +214,10 @@ class TDTestCase:
totalConsumeRows += resultList[i]
tdSql.query(queryString)
- totalRowsInserted = tdSql.getRows()
+ totalRowsFromQuery = tdSql.getRows()
- if totalConsumeRows != totalRowsInserted:
- tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, totalRowsInserted))
+ tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, totalRowsFromQuery))
+ if totalConsumeRows != totalRowsFromQuery:
tdLog.exit("tmq consume rows error!")
tdSql.query("drop topic %s"%topicFromStb1)
@@ -222,10 +226,18 @@ class TDTestCase:
def run(self):
- tdSql.prepare()
self.prepareTestEnv()
+ tdLog.printNoPrefix("=============================================")
+ tdLog.printNoPrefix("======== snapshot is 0: only consume from wal")
self.tmqCase1()
- # self.tmqCase2() TD-17267
+ self.tmqCase2()
+
+ self.prepareTestEnv()
+ tdLog.printNoPrefix("====================================================================")
+ tdLog.printNoPrefix("======== snapshot is 1: firstly consume from tsbs, and then from wal")
+ self.snapshot = 1
+ self.tmqCase1()
+ self.tmqCase2()
def stop(self):
diff --git a/tests/system-test/7-tmq/tmqDelete-1ctb.py b/tests/system-test/7-tmq/tmqDelete-1ctb.py
index c5f7510a07..a2a429771c 100644
--- a/tests/system-test/7-tmq/tmqDelete-1ctb.py
+++ b/tests/system-test/7-tmq/tmqDelete-1ctb.py
@@ -79,6 +79,17 @@ class TDTestCase:
tdLog.debug("del data ............ [OK]")
return
+ def threadFunctionForDeletaData(self, **paraDict):
+ # create new connector for new tdSql instance in my thread
+ newTdSql = tdCom.newTdSql()
+ self.delData(newTdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["startTs"],paraDict["endTs"],paraDict["ctbStartIdx"])
+ return
+
+ def asyncDeleteData(self, paraDict):
+ pThread = threading.Thread(target=self.threadFunctionForDeletaData, kwargs=paraDict)
+ pThread.start()
+ return pThread
+
def tmqCase1(self):
tdLog.printNoPrefix("======== test case 1: ")
paraDict = {'dbName': 'dbt',
@@ -117,11 +128,17 @@ class TDTestCase:
queryString = "select ts, c1, c2 from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicFromStb1, queryString)
tdLog.info("create topic sql: %s"%sqlString)
- tdSql.execute(sqlString)
+ tdSql.execute(sqlString)
+
+ if self.snapshot == 0:
+ consumerId = 0
+ elif self.snapshot == 1:
+ consumerId = 1
+ rowsOfDelete = 0
# paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
- consumerId = 0
+
expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"])
topicList = topicFromStb1
ifcheckdata = 1
@@ -135,7 +152,7 @@ class TDTestCase:
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
- tdLog.info("insert process end, and start to check consume result")
+ tdLog.info("start to check consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
totalConsumeRows = 0
@@ -143,13 +160,18 @@ class TDTestCase:
totalConsumeRows += resultList[i]
tdSql.query(queryString)
- totalRowsInserted = tdSql.getRows()
-
- tdLog.info("act consume rows: %d, expect consume rows: %d, act insert rows: %d"%(totalConsumeRows, expectrowcnt, totalRowsInserted))
- if totalConsumeRows != expectrowcnt:
- tdLog.exit("tmq consume rows error!")
+ totalRowsFromQuery = tdSql.getRows()
+
+ tdLog.info("act consume rows: %d, expect consume rows: %d, act query rows: %d"%(totalConsumeRows, expectrowcnt, totalRowsFromQuery))
+
+ if self.snapshot == 0:
+ if totalConsumeRows != expectrowcnt:
+ tdLog.exit("tmq consume rows error with snapshot = 0!")
+ elif self.snapshot == 1:
+ if totalConsumeRows != totalRowsFromQuery:
+ tdLog.exit("tmq consume rows error with snapshot = 1!")
- tmqCom.checkFileContent(consumerId, queryString, rowsOfDelete)
+ tmqCom.checkFileContent(consumerId=consumerId, queryString=queryString, skipRowsOfCons=rowsOfDelete)
tdSql.query("drop topic %s"%topicFromStb1)
tdLog.printNoPrefix("======== test case 1 end ...... ")
@@ -213,9 +235,11 @@ class TDTestCase:
consumerId = 1
if self.snapshot == 0:
+ consumerId = 2
expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (1 + 1/4 + 3/4))
elif self.snapshot == 1:
- expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (1 - 1/4 + 1/4 + 3/4 - 1/4))
+ consumerId = 3
+ expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (1 - 1/4 + 1/4 + 3/4))
topicList = topicFromStb1
ifcheckdata = 1
@@ -237,33 +261,151 @@ class TDTestCase:
totalConsumeRows += resultList[i]
tdSql.query(queryString)
- totalRowsInserted = tdSql.getRows()
+ totalRowsFromQuery = tdSql.getRows()
- tdLog.info("act consume rows: %d, act insert rows: %d, expect consume rows: %d, "%(totalConsumeRows, totalRowsInserted, expectrowcnt))
-
- if totalConsumeRows != expectrowcnt:
- tdLog.exit("tmq consume rows error!")
-
+ tdLog.info("act consume rows: %d, act query rows: %d, expect consume rows: %d, "%(totalConsumeRows, totalRowsFromQuery, expectrowcnt))
+
+ if self.snapshot == 0:
+ if totalConsumeRows != expectrowcnt:
+ tdLog.exit("tmq consume rows error with snapshot = 0!")
+ elif self.snapshot == 1:
+ if totalConsumeRows != totalRowsFromQuery:
+ tdLog.exit("tmq consume rows error with snapshot = 1!")
+
# tmqCom.checkFileContent(consumerId, queryString)
tdSql.query("drop topic %s"%topicFromStb1)
tdLog.printNoPrefix("======== test case 2 end ...... ")
+ def tmqCase3(self):
+ tdLog.printNoPrefix("======== test case 3: ")
+ paraDict = {'dbName': 'dbt',
+ 'dropFlag': 1,
+ 'event': '',
+ 'vgroups': 4,
+ 'stbName': 'stb',
+ 'colPrefix': 'c',
+ 'tagPrefix': 't',
+ 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
+ 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
+ 'ctbPrefix': 'ctb',
+ 'ctbStartIdx': 0,
+ 'ctbNum': 1,
+ 'rowsPerTbl': 10000,
+ 'batchNum': 5000,
+ 'startTs': 1640966400000, # 2022-01-01 00:00:00.000
+ 'pollDelay': 5,
+ 'showMsg': 1,
+ 'showRow': 1,
+ 'snapshot': 0}
+
+ paraDict['snapshot'] = self.snapshot
+ paraDict['vgroups'] = self.vgroups
+ paraDict['ctbNum'] = self.ctbNum
+ paraDict['rowsPerTbl'] = self.rowsPerTbl
+
+ tdLog.info("restart taosd to ensure that the data falls into the disk")
+ tdSql.query("flush database %s"%(paraDict['dbName']))
+
+ tmqCom.initConsumerTable()
+ tdLog.info("create topics from stb1")
+ topicFromStb1 = 'topic_stb1'
+ queryString = "select ts, c1, c2 from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
+ sqlString = "create topic %s as %s" %(topicFromStb1, queryString)
+ tdLog.info("create topic sql: %s"%sqlString)
+ tdSql.execute(sqlString)
+
+ # paraDict['ctbNum'] = self.ctbNum
+ paraDict['rowsPerTbl'] = self.rowsPerTbl
+ consumerId = 1
+
+ if self.snapshot == 0:
+ consumerId = 4
+ expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (1 + 1/4 + 3/4))
+ elif self.snapshot == 1:
+ consumerId = 5
+ expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (1 - 1/4 + 1/4 + 3/4))
+
+ topicList = topicFromStb1
+ ifcheckdata = 1
+ ifManualCommit = 1
+ keyList = 'group.id:cgrp1,\
+ enable.auto.commit:true,\
+ auto.commit.interval.ms:1000,\
+ auto.offset.reset:earliest'
+ tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
+
+ # del some data
+ rowsOfDelete = int(self.rowsPerTbl / 4 )
+ paraDict["endTs"] = paraDict["startTs"] + rowsOfDelete - 1
+ pDeleteThread = self.asyncDeleteData(paraDict)
+
+ tdLog.info("start consume processor")
+ tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
+
+ # update to 1/4 rows and insert 3/4 new rows
+ paraDict['startTs'] = paraDict['startTs'] + int(self.rowsPerTbl * 3 / 4)
+ # paraDict['rowsPerTbl'] = self.rowsPerTbl
+ # tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict["ctbPrefix"],
+ # ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
+ # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
+ pInsertThread = tmqCom.asyncInsertDataByInterlace(paraDict)
+
+ pInsertThread.join()
+
+ tdLog.info("start to check consume result")
+ expectRows = 1
+ resultList = tmqCom.selectConsumeResult(expectRows)
+ totalConsumeRows = 0
+ for i in range(expectRows):
+ totalConsumeRows += resultList[i]
+
+ tdSql.query(queryString)
+ totalRowsFromQuery = tdSql.getRows()
+
+ tdLog.info("act consume rows: %d, act query rows: %d, expect consume rows: %d, "%(totalConsumeRows, totalRowsFromQuery, expectrowcnt))
+
+ if self.snapshot == 0:
+ if totalConsumeRows != expectrowcnt:
+ tdLog.exit("tmq consume rows error with snapshot = 0!")
+ elif self.snapshot == 1:
+ if not ((totalConsumeRows >= totalRowsFromQuery) and (totalConsumeRows <= expectrowcnt)):
+ tdLog.exit("tmq consume rows error with snapshot = 1!")
+
+ # tmqCom.checkFileContent(consumerId, queryString)
+
+ tdSql.query("drop topic %s"%topicFromStb1)
+
+ tdLog.printNoPrefix("======== test case 3 end ...... ")
+
+
def run(self):
- tdSql.prepare()
- self.prepareTestEnv()
+ # tdSql.prepare()
tdLog.printNoPrefix("=============================================")
tdLog.printNoPrefix("======== snapshot is 0: only consume from wal")
+ self.snapshot = 0
+ self.prepareTestEnv()
self.tmqCase1()
- self.tmqCase2()
+ self.tmqCase2()
- self.prepareTestEnv()
tdLog.printNoPrefix("====================================================================")
tdLog.printNoPrefix("======== snapshot is 1: firstly consume from tsbs, and then from wal")
self.snapshot = 1
+ self.prepareTestEnv()
self.tmqCase1()
- self.tmqCase2()
+ self.tmqCase2()
+
+ tdLog.printNoPrefix("=============================================")
+ tdLog.printNoPrefix("======== snapshot is 0: only consume from wal")
+ self.snapshot = 0
+ self.prepareTestEnv()
+ self.tmqCase3()
+ tdLog.printNoPrefix("====================================================================")
+ tdLog.printNoPrefix("======== snapshot is 1: firstly consume from tsbs, and then from wal")
+ self.snapshot = 1
+ self.prepareTestEnv()
+ self.tmqCase3()
def stop(self):
tdSql.close()
diff --git a/tests/system-test/7-tmq/tmqDelete-multiCtb.py b/tests/system-test/7-tmq/tmqDelete-multiCtb.py
new file mode 100644
index 0000000000..fa32efbd0b
--- /dev/null
+++ b/tests/system-test/7-tmq/tmqDelete-multiCtb.py
@@ -0,0 +1,416 @@
+
+import taos
+import sys
+import time
+import socket
+import os
+import threading
+from enum import Enum
+
+from util.log import *
+from util.sql import *
+from util.cases import *
+from util.dnodes import *
+sys.path.append("./7-tmq")
+from tmqCommon import *
+
+class TDTestCase:
+ def __init__(self):
+ self.snapshot = 0
+ self.vgroups = 4
+ self.ctbNum = 100
+ self.rowsPerTbl = 1000
+
+ def init(self, conn, logSql):
+ tdLog.debug(f"start to excute {__file__}")
+ tdSql.init(conn.cursor(), False)
+
+ def prepareTestEnv(self):
+ tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ")
+ paraDict = {'dbName': 'dbt',
+ 'dropFlag': 1,
+ 'event': '',
+ 'vgroups': 4,
+ 'stbName': 'stb',
+ 'colPrefix': 'c',
+ 'tagPrefix': 't',
+ 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
+ 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
+ 'ctbPrefix': 'ctb',
+ 'ctbStartIdx': 0,
+ 'ctbNum': 100,
+ 'rowsPerTbl': 1000,
+ 'batchNum': 1000,
+ 'startTs': 1640966400000, # 2022-01-01 00:00:00.000
+ 'pollDelay': 3,
+ 'showMsg': 1,
+ 'showRow': 1,
+ 'snapshot': 0}
+
+ paraDict['vgroups'] = self.vgroups
+ paraDict['ctbNum'] = self.ctbNum
+ paraDict['rowsPerTbl'] = self.rowsPerTbl
+
+ tmqCom.initConsumerTable()
+ tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
+ tdLog.info("create stb")
+ tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"])
+ tdLog.info("create ctb")
+ tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
+ ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
+ tdLog.info("insert data")
+ tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
+ ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
+ startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
+ # tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix="ctbx",
+ # ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
+ # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
+
+ # tdLog.info("restart taosd to ensure that the data falls into the disk")
+ # tdSql.query("flush database %s"%(paraDict['dbName']))
+ return
+
+ def delData(self,tsql,dbName,ctbPrefix,ctbNum,startTs=0,endTs=0,ctbStartIdx=0):
+ tdLog.debug("start to del data ............")
+ for i in range(ctbNum):
+ sql = "delete from %s.%s%d where _c0 >= %d and _c0 <= %d "%(dbName,ctbPrefix,i+ctbStartIdx,startTs,endTs)
+ tsql.execute(sql)
+
+ tdLog.debug("del data ............ [OK]")
+ return
+
+ def threadFunctionForDeletaData(self, **paraDict):
+ # create new connector for new tdSql instance in my thread
+ newTdSql = tdCom.newTdSql()
+ self.delData(newTdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["startTs"],paraDict["endTs"],paraDict["ctbStartIdx"])
+ return
+
+ def asyncDeleteData(self, paraDict):
+ pThread = threading.Thread(target=self.threadFunctionForDeletaData, kwargs=paraDict)
+ pThread.start()
+ return pThread
+
+ def tmqCase1(self):
+ tdLog.printNoPrefix("======== test case 1: ")
+ paraDict = {'dbName': 'dbt',
+ 'dropFlag': 1,
+ 'event': '',
+ 'vgroups': 4,
+ 'stbName': 'stb',
+ 'colPrefix': 'c',
+ 'tagPrefix': 't',
+ 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
+ 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
+ 'ctbPrefix': 'ctb',
+ 'ctbStartIdx': 0,
+ 'ctbNum': 100,
+ 'rowsPerTbl': 1000,
+ 'batchNum': 1000,
+ 'startTs': 1640966400000, # 2022-01-01 00:00:00.000
+ 'endTs': 0,
+ 'pollDelay': 5,
+ 'showMsg': 1,
+ 'showRow': 1,
+ 'snapshot': 0}
+ paraDict['snapshot'] = self.snapshot
+ paraDict['vgroups'] = self.vgroups
+ paraDict['ctbNum'] = self.ctbNum
+ paraDict['rowsPerTbl'] = self.rowsPerTbl
+
+ # del some data
+ rowsOfDelete = int(paraDict["rowsPerTbl"] / 4)
+ paraDict["endTs"] = paraDict["startTs"] + rowsOfDelete - 1
+ self.delData(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],ctbNum=paraDict["ctbNum"],
+ startTs=paraDict["startTs"], endTs=paraDict["endTs"],ctbStartIdx=paraDict['ctbStartIdx'])
+
+ tdLog.info("create topics from stb1")
+ topicFromStb1 = 'topic_stb1'
+ queryString = "select ts, c1, c2 from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
+ sqlString = "create topic %s as %s" %(topicFromStb1, queryString)
+ tdLog.info("create topic sql: %s"%sqlString)
+ tdSql.execute(sqlString)
+
+ if self.snapshot == 0:
+ consumerId = 0
+ elif self.snapshot == 1:
+ consumerId = 1
+ rowsOfDelete = 0
+
+ # paraDict['ctbNum'] = self.ctbNum
+ paraDict['rowsPerTbl'] = self.rowsPerTbl
+
+ expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"])
+ topicList = topicFromStb1
+ ifcheckdata = 1
+ ifManualCommit = 1
+ keyList = 'group.id:cgrp1,\
+ enable.auto.commit:true,\
+ auto.commit.interval.ms:1000,\
+ auto.offset.reset:earliest'
+ tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
+
+ tdLog.info("start consume processor")
+ tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
+
+ tdLog.info("start to check consume result")
+ expectRows = 1
+ resultList = tmqCom.selectConsumeResult(expectRows)
+ totalConsumeRows = 0
+ for i in range(expectRows):
+ totalConsumeRows += resultList[i]
+
+ tdSql.query(queryString)
+ totalRowsFromQuery = tdSql.getRows()
+
+ tdLog.info("act consume rows: %d, expect consume rows: %d, act query rows: %d"%(totalConsumeRows, expectrowcnt, totalRowsFromQuery))
+
+ if self.snapshot == 0:
+ if totalConsumeRows != expectrowcnt:
+ tdLog.exit("tmq consume rows error with snapshot = 0!")
+ elif self.snapshot == 1:
+ if totalConsumeRows != totalRowsFromQuery:
+ tdLog.exit("tmq consume rows error with snapshot = 1!")
+
+ # tmqCom.checkFileContent(consumerId=consumerId, queryString=queryString, skipRowsOfCons=rowsOfDelete)
+
+ tdSql.query("drop topic %s"%topicFromStb1)
+ tdLog.printNoPrefix("======== test case 1 end ...... ")
+
+ def tmqCase2(self):
+ tdLog.printNoPrefix("======== test case 2: ")
+ paraDict = {'dbName': 'dbt',
+ 'dropFlag': 1,
+ 'event': '',
+ 'vgroups': 4,
+ 'stbName': 'stb',
+ 'colPrefix': 'c',
+ 'tagPrefix': 't',
+ 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
+ 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
+ 'ctbPrefix': 'ctb',
+ 'ctbStartIdx': 0,
+ 'ctbNum': 100,
+ 'rowsPerTbl': 1000,
+ 'batchNum': 1000,
+ 'startTs': 1640966400000, # 2022-01-01 00:00:00.000
+ 'pollDelay': 5,
+ 'showMsg': 1,
+ 'showRow': 1,
+ 'snapshot': 0}
+
+ paraDict['snapshot'] = self.snapshot
+ paraDict['vgroups'] = self.vgroups
+ paraDict['ctbNum'] = self.ctbNum
+ paraDict['rowsPerTbl'] = self.rowsPerTbl
+
+ tdLog.info("restart taosd to ensure that the data falls into the disk")
+ tdSql.query("flush database %s"%(paraDict['dbName']))
+
+ # update to 1/4 rows and insert 3/4 new rows
+ paraDict['startTs'] = paraDict['startTs'] + int(self.rowsPerTbl * 3 / 4)
+ # paraDict['rowsPerTbl'] = self.rowsPerTbl
+ tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict["ctbPrefix"],
+ ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
+ startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
+ # tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
+ # ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
+ # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
+
+ # del some data
+ rowsOfDelete = int(self.rowsPerTbl / 4 )
+ paraDict["endTs"] = paraDict["startTs"] + rowsOfDelete - 1
+ self.delData(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],ctbNum=paraDict["ctbNum"],
+ startTs=paraDict["startTs"], endTs=paraDict["endTs"],ctbStartIdx=paraDict['ctbStartIdx'])
+
+ tmqCom.initConsumerTable()
+ tdLog.info("create topics from stb1")
+ topicFromStb1 = 'topic_stb1'
+ queryString = "select ts, c1, c2 from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
+ sqlString = "create topic %s as %s" %(topicFromStb1, queryString)
+ tdLog.info("create topic sql: %s"%sqlString)
+ tdSql.execute(sqlString)
+
+ # paraDict['ctbNum'] = self.ctbNum
+ paraDict['rowsPerTbl'] = self.rowsPerTbl
+ consumerId = 1
+
+ if self.snapshot == 0:
+ consumerId = 2
+ expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (1 + 1/4 + 3/4))
+ elif self.snapshot == 1:
+ consumerId = 3
+ expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (1 - 1/4 + 1/4 + 3/4))
+
+ topicList = topicFromStb1
+ ifcheckdata = 1
+ ifManualCommit = 1
+ keyList = 'group.id:cgrp1,\
+ enable.auto.commit:true,\
+ auto.commit.interval.ms:1000,\
+ auto.offset.reset:earliest'
+ tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
+
+ tdLog.info("start consume processor")
+ tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
+
+ tdLog.info("start to check consume result")
+ expectRows = 1
+ resultList = tmqCom.selectConsumeResult(expectRows)
+ totalConsumeRows = 0
+ for i in range(expectRows):
+ totalConsumeRows += resultList[i]
+
+ tdSql.query(queryString)
+ totalRowsFromQuery = tdSql.getRows()
+
+ tdLog.info("act consume rows: %d, act query rows: %d, expect consume rows: %d, "%(totalConsumeRows, totalRowsFromQuery, expectrowcnt))
+
+ if self.snapshot == 0:
+ if totalConsumeRows != expectrowcnt:
+ tdLog.exit("tmq consume rows error with snapshot = 0!")
+ elif self.snapshot == 1:
+ if totalConsumeRows != totalRowsFromQuery:
+ tdLog.exit("tmq consume rows error with snapshot = 1!")
+
+ # tmqCom.checkFileContent(consumerId, queryString)
+
+ tdSql.query("drop topic %s"%topicFromStb1)
+
+ tdLog.printNoPrefix("======== test case 2 end ...... ")
+
+ def tmqCase3(self):
+ tdLog.printNoPrefix("======== test case 3: ")
+ paraDict = {'dbName': 'dbt',
+ 'dropFlag': 1,
+ 'event': '',
+ 'vgroups': 4,
+ 'stbName': 'stb',
+ 'colPrefix': 'c',
+ 'tagPrefix': 't',
+ 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
+ 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
+ 'ctbPrefix': 'ctb',
+ 'ctbStartIdx': 0,
+ 'ctbNum': 100,
+ 'rowsPerTbl': 1000,
+ 'batchNum': 1000,
+ 'startTs': 1640966400000, # 2022-01-01 00:00:00.000
+ 'pollDelay': 5,
+ 'showMsg': 1,
+ 'showRow': 1,
+ 'snapshot': 0}
+
+ paraDict['snapshot'] = self.snapshot
+ paraDict['vgroups'] = self.vgroups
+ paraDict['ctbNum'] = self.ctbNum
+ paraDict['rowsPerTbl'] = self.rowsPerTbl
+
+ tdLog.info("restart taosd to ensure that the data falls into the disk")
+ tdSql.query("flush database %s"%(paraDict['dbName']))
+
+ tmqCom.initConsumerTable()
+ tdLog.info("create topics from stb1")
+ topicFromStb1 = 'topic_stb1'
+ queryString = "select ts, c1, c2 from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
+ sqlString = "create topic %s as %s" %(topicFromStb1, queryString)
+ tdLog.info("create topic sql: %s"%sqlString)
+ tdSql.execute(sqlString)
+
+ # paraDict['ctbNum'] = self.ctbNum
+ paraDict['rowsPerTbl'] = self.rowsPerTbl
+ consumerId = 1
+
+ if self.snapshot == 0:
+ consumerId = 4
+ expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (1 + 1/4 + 3/4))
+ elif self.snapshot == 1:
+ consumerId = 5
+ expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (1 - 1/4 + 1/4 + 3/4))
+
+ topicList = topicFromStb1
+ ifcheckdata = 1
+ ifManualCommit = 1
+ keyList = 'group.id:cgrp1,\
+ enable.auto.commit:true,\
+ auto.commit.interval.ms:1000,\
+ auto.offset.reset:earliest'
+ tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
+
+ # del some data
+ rowsOfDelete = int(self.rowsPerTbl / 4 )
+ paraDict["endTs"] = paraDict["startTs"] + rowsOfDelete - 1
+ pDeleteThread = self.asyncDeleteData(paraDict)
+
+ tdLog.info("start consume processor")
+ tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
+
+ # update to 1/4 rows and insert 3/4 new rows
+ paraDict['startTs'] = paraDict['startTs'] + int(self.rowsPerTbl * 3 / 4)
+ # paraDict['rowsPerTbl'] = self.rowsPerTbl
+ # tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict["ctbPrefix"],
+ # ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
+ # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
+ pInsertThread = tmqCom.asyncInsertDataByInterlace(paraDict)
+
+ pInsertThread.join()
+
+ tdLog.info("start to check consume result")
+ expectRows = 1
+ resultList = tmqCom.selectConsumeResult(expectRows)
+ totalConsumeRows = 0
+ for i in range(expectRows):
+ totalConsumeRows += resultList[i]
+
+ tdSql.query(queryString)
+ totalRowsFromQuery = tdSql.getRows()
+
+ tdLog.info("act consume rows: %d, act query rows: %d, expect consume rows: %d, "%(totalConsumeRows, totalRowsFromQuery, expectrowcnt))
+
+ if self.snapshot == 0:
+ if totalConsumeRows < expectrowcnt:
+ tdLog.exit("tmq consume rows error with snapshot = 0!")
+ elif self.snapshot == 1:
+ if not ((totalConsumeRows >= totalRowsFromQuery) and (totalConsumeRows <= expectrowcnt)):
+ tdLog.exit("tmq consume rows error with snapshot = 1!")
+
+ # tmqCom.checkFileContent(consumerId, queryString)
+
+ tdSql.query("drop topic %s"%topicFromStb1)
+
+ tdLog.printNoPrefix("======== test case 3 end ...... ")
+
+
+ def run(self):
+ tdLog.printNoPrefix("=============================================")
+ tdLog.printNoPrefix("======== snapshot is 0: only consume from wal")
+ self.snapshot = 0
+ self.prepareTestEnv()
+ self.tmqCase1()
+ self.tmqCase2()
+
+ tdLog.printNoPrefix("====================================================================")
+ tdLog.printNoPrefix("======== snapshot is 1: firstly consume from tsbs, and then from wal")
+ self.snapshot = 1
+ self.prepareTestEnv()
+ self.tmqCase1()
+ self.tmqCase2()
+
+ tdLog.printNoPrefix("=============================================")
+ tdLog.printNoPrefix("======== snapshot is 0: only consume from wal")
+ self.snapshot = 0
+ self.prepareTestEnv()
+ self.tmqCase3()
+ tdLog.printNoPrefix("====================================================================")
+ tdLog.printNoPrefix("======== snapshot is 1: firstly consume from tsbs, and then from wal")
+ self.snapshot = 1
+ self.prepareTestEnv()
+ self.tmqCase3()
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success(f"{__file__} successfully executed")
+
+event = threading.Event()
+
+tdCases.addLinux(__file__, TDTestCase())
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/system-test/7-tmq/tmqDnodeRestart.py b/tests/system-test/7-tmq/tmqDnodeRestart.py
index 8354991578..9699c4b32c 100644
--- a/tests/system-test/7-tmq/tmqDnodeRestart.py
+++ b/tests/system-test/7-tmq/tmqDnodeRestart.py
@@ -16,6 +16,7 @@ from tmqCommon import *
class TDTestCase:
def __init__(self):
+ self.snapshot = 0
self.vgroups = 2
self.ctbNum = 100
self.rowsPerTbl = 10000
@@ -37,15 +38,16 @@ class TDTestCase:
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
- 'ctbNum': 500,
- 'rowsPerTbl': 1000,
- 'batchNum': 500,
+ 'ctbNum': 100,
+ 'rowsPerTbl': 10000,
+ 'batchNum': 100,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'showMsg': 1,
'showRow': 1,
'snapshot': 0}
+ paraDict['snapshot'] = self.snapshot
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
@@ -81,30 +83,31 @@ class TDTestCase:
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
- 'ctbNum': 1000,
- 'rowsPerTbl': 1000,
- 'batchNum': 400,
+ 'ctbNum': 100,
+ 'rowsPerTbl': 10000,
+ 'batchNum': 100,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 5,
'showMsg': 1,
'showRow': 1,
- 'snapshot': 1}
+ 'snapshot': 0}
- # paraDict['vgroups'] = self.vgroups
- # paraDict['ctbNum'] = self.ctbNum
- # paraDict['rowsPerTbl'] = self.rowsPerTbl
+ paraDict['snapshot'] = self.snapshot
+ paraDict['vgroups'] = self.vgroups
+ paraDict['ctbNum'] = self.ctbNum
+ paraDict['rowsPerTbl'] = self.rowsPerTbl
- tmqCom.initConsumerTable()
- tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
- tdLog.info("create stb")
- tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"])
- tdLog.info("create ctb")
- tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
- ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
- tdLog.info("insert data")
- tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
- ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
- startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
+ # tmqCom.initConsumerTable()
+ # tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
+ # tdLog.info("create stb")
+ # tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"])
+ # tdLog.info("create ctb")
+ # tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
+ # ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
+ # tdLog.info("insert data")
+ # tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
+ # ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
+ # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("create topics from stb1")
topicFromStb1 = 'topic_stb1'
@@ -132,7 +135,7 @@ class TDTestCase:
tdLog.info("================= restart dnode ===========================")
tdDnodes.stop(1)
tdDnodes.start(1)
- time.sleep(5)
+ time.sleep(3)
tdLog.info("insert process end, and start to check consume result")
expectRows = 1
@@ -142,10 +145,10 @@ class TDTestCase:
totalConsumeRows += resultList[i]
tdSql.query(queryString)
- totalRowsInserted = tdSql.getRows()
+ totalRowsFromQury = tdSql.getRows()
- if totalConsumeRows != totalRowsInserted:
- tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, totalRowsInserted))
+ tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, totalRowsFromQury))
+ if totalConsumeRows != totalRowsFromQury:
tdLog.exit("tmq consume rows error!")
tdSql.query("drop topic %s"%topicFromStb1)
@@ -165,30 +168,31 @@ class TDTestCase:
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
- 'ctbNum': 1000,
- 'rowsPerTbl': 1000,
- 'batchNum': 1000,
+ 'ctbNum': 100,
+ 'rowsPerTbl': 10000,
+ 'batchNum': 3000,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 5,
'showMsg': 1,
'showRow': 1,
- 'snapshot': 1}
+ 'snapshot': 0}
- # paraDict['vgroups'] = self.vgroups
- # paraDict['ctbNum'] = self.ctbNum
- # paraDict['rowsPerTbl'] = self.rowsPerTbl
+ paraDict['snapshot'] = self.snapshot
+ paraDict['vgroups'] = self.vgroups
+ paraDict['ctbNum'] = self.ctbNum
+ paraDict['rowsPerTbl'] = self.rowsPerTbl
tmqCom.initConsumerTable()
- tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
- tdLog.info("create stb")
- tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"])
- tdLog.info("create ctb")
- tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
- ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
- tdLog.info("insert data")
- tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
- ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
- startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
+ # tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
+ # tdLog.info("create stb")
+ # tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"])
+ # tdLog.info("create ctb")
+ # tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
+ # ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
+ # tdLog.info("insert data")
+ # tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
+ # ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
+ # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("create topics from stb1")
topicFromStb1 = 'topic_stb1'
queryString = "select ts, c1, c2 from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
@@ -196,29 +200,29 @@ class TDTestCase:
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
- consumerId = 0
- expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2
+ consumerId = 1
+ expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2 + 100000
topicList = topicFromStb1
ifcheckdata = 0
ifManualCommit = 0
keyList = 'group.id:cgrp1,\
enable.auto.commit:true,\
- auto.commit.interval.ms:1000,\
+ auto.commit.interval.ms:3000,\
auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
- tdLog.info("create some new child table and insert data ")
- tmqCom.insert_data_with_autoCreateTbl(tdSql,paraDict["dbName"],paraDict["stbName"],"ctb",paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"])
-
tmqCom.getStartCommitNotifyFromTmqsim()
tdLog.info("================= restart dnode ===========================")
tdDnodes.stop(1)
tdDnodes.start(1)
- time.sleep(5)
+ time.sleep(3)
+ tdLog.info("create some new child table and insert data ")
+ tmqCom.insert_data_with_autoCreateTbl(tdSql,paraDict["dbName"],paraDict["stbName"],"ctb",paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"])
+
tdLog.info("insert process end, and start to check consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
@@ -227,10 +231,10 @@ class TDTestCase:
totalConsumeRows += resultList[i]
tdSql.query(queryString)
- totalRowsInserted = tdSql.getRows()
+ totalRowsFromQuery = tdSql.getRows()
- if totalConsumeRows != totalRowsInserted:
- tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, totalRowsInserted))
+ tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, totalRowsFromQuery))
+ if totalConsumeRows != totalRowsFromQuery:
tdLog.exit("tmq consume rows error!")
tdSql.query("drop topic %s"%topicFromStb1)
@@ -239,8 +243,8 @@ class TDTestCase:
def run(self):
tdSql.prepare()
-
- self.tmqCase1()
+ self.prepareTestEnv()
+ # self.tmqCase1()
self.tmqCase2()
def stop(self):
diff --git a/tests/system-test/7-tmq/tmqDropStb.py b/tests/system-test/7-tmq/tmqDropStb.py
new file mode 100644
index 0000000000..2889bdc6a6
--- /dev/null
+++ b/tests/system-test/7-tmq/tmqDropStb.py
@@ -0,0 +1,129 @@
+import sys
+import time
+import socket
+import os
+import threading
+
+import taos
+from util.log import *
+from util.sql import *
+from util.cases import *
+from util.dnodes import *
+from util.common import *
+sys.path.append("./7-tmq")
+from tmqCommon import *
+
+class TDTestCase:
+ paraDict = {'dbName': 'db1',
+ 'dropFlag': 1,
+ 'event': '',
+ 'vgroups': 2,
+ 'stbName': 'stb0',
+ 'colPrefix': 'c',
+ 'tagPrefix': 't',
+ 'colSchema': [{'type': 'INT', 'count':2}, {'type': 'binary', 'len':16, 'count':1}, {'type': 'timestamp','count':1}],
+ 'tagSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
+ 'ctbPrefix': 'ctb',
+ 'ctbStartIdx': 0,
+ 'ctbNum': 100,
+ 'rowsPerTbl': 10000,
+ 'batchNum': 2000,
+ 'startTs': 1640966400000, # 2022-01-01 00:00:00.000
+ 'pollDelay': 20,
+ 'showMsg': 1,
+ 'showRow': 1}
+
+ cdbName = 'cdb'
+ # some parameter to consumer processor
+ consumerId = 0
+ expectrowcnt = 0
+ topicList = ''
+ ifcheckdata = 0
+ ifManualCommit = 1
+ groupId = 'group.id:cgrp1'
+ autoCommit = 'enable.auto.commit:false'
+ autoCommitInterval = 'auto.commit.interval.ms:1000'
+ autoOffset = 'auto.offset.reset:earliest'
+
+ pollDelay = 20
+ showMsg = 1
+ showRow = 1
+
+ hostname = socket.gethostname()
+
+ def init(self, conn, logSql):
+ tdLog.debug(f"start to excute {__file__}")
+ logSql = False
+ tdSql.init(conn.cursor(), logSql)
+
+ def tmqCase1(self):
+ tdLog.printNoPrefix("======== test case 1: ")
+ tdLog.info("step 1: create database, stb, ctb and insert data")
+
+ tmqCom.initConsumerTable(self.cdbName)
+
+ tdCom.create_database(tdSql,self.paraDict["dbName"],self.paraDict["dropFlag"])
+
+ self.paraDict["stbName"] = 'stb1'
+ tdCom.create_stable(tdSql,dbname=self.paraDict["dbName"],stbname=self.paraDict["stbName"],column_elm_list=self.paraDict["colSchema"],tag_elm_list=self.paraDict["tagSchema"],count=1, default_stbname_prefix=self.paraDict["stbName"])
+ tdCom.create_ctable(tdSql,dbname=self.paraDict["dbName"],stbname=self.paraDict["stbName"],tag_elm_list=self.paraDict['tagSchema'],count=self.paraDict["ctbNum"],default_ctbname_prefix=self.paraDict["ctbPrefix"])
+ tmqCom.insert_data_2(tdSql,self.paraDict["dbName"],self.paraDict["ctbPrefix"],self.paraDict["ctbNum"],self.paraDict["rowsPerTbl"],self.paraDict["batchNum"],self.paraDict["startTs"],self.paraDict["ctbStartIdx"])
+ # pThread1 = tmqCom.asyncInsertData(paraDict=self.paraDict)
+
+ self.paraDict["stbName"] = 'stb2'
+ self.paraDict["ctbPrefix"] = 'newctb'
+ self.paraDict["batchNum"] = 10000
+ tdCom.create_stable(tdSql,dbname=self.paraDict["dbName"],stbname=self.paraDict["stbName"],column_elm_list=self.paraDict["colSchema"],tag_elm_list=self.paraDict["tagSchema"],count=1, default_stbname_prefix=self.paraDict["stbName"])
+ tdCom.create_ctable(tdSql,dbname=self.paraDict["dbName"],stbname=self.paraDict["stbName"],tag_elm_list=self.paraDict['tagSchema'],count=self.paraDict["ctbNum"],default_ctbname_prefix=self.paraDict["ctbPrefix"])
+ # tmqCom.insert_data_2(tdSql,self.paraDict["dbName"],self.paraDict["ctbPrefix"],self.paraDict["ctbNum"],self.paraDict["rowsPerTbl"],self.paraDict["batchNum"],self.paraDict["startTs"],self.paraDict["ctbStartIdx"])
+ pThread2 = tmqCom.asyncInsertData(paraDict=self.paraDict)
+
+ tdLog.info("create topics from db")
+ topicName1 = 'UpperCasetopic_%s'%(self.paraDict['dbName'])
+ tdSql.execute("create topic %s as database %s" %(topicName1, self.paraDict['dbName']))
+
+ topicList = topicName1 + ',' +topicName1
+ keyList = '%s,%s,%s,%s'%(self.groupId,self.autoCommit,self.autoCommitInterval,self.autoOffset)
+ self.expectrowcnt = self.paraDict["rowsPerTbl"] * self.paraDict["ctbNum"] * 2
+ tmqCom.insertConsumerInfo(self.consumerId, self.expectrowcnt,topicList,keyList,self.ifcheckdata,self.ifManualCommit)
+
+ tdLog.info("start consume processor")
+ tmqCom.startTmqSimProcess(self.pollDelay,self.paraDict["dbName"],self.showMsg, self.showRow,self.cdbName)
+
+ tmqCom.getStartConsumeNotifyFromTmqsim()
+ tdLog.info("drop one stable")
+ self.paraDict["stbName"] = 'stb1'
+ tdSql.execute("drop table %s.%s" %(self.paraDict['dbName'], self.paraDict['stbName']))
+ # tmqCom.drop_ctable(tdSql, dbname=self.paraDict['dbName'], count=self.paraDict["ctbNum"], default_ctbname_prefix=self.paraDict["ctbPrefix"])
+
+ pThread2.join()
+
+ tdLog.info("wait result from consumer, then check it")
+ expectRows = 1
+ resultList = tmqCom.selectConsumeResult(expectRows)
+
+ totalConsumeRows = 0
+ for i in range(expectRows):
+ totalConsumeRows += resultList[i]
+
+ if not (totalConsumeRows >= self.expectrowcnt/2 and totalConsumeRows <= self.expectrowcnt):
+ tdLog.info("act consume rows: %d, expect consume rows: between %d and %d"%(totalConsumeRows, self.expectrowcnt/2, self.expectrowcnt))
+ tdLog.exit("tmq consume rows error!")
+
+ time.sleep(10)
+ tdSql.query("drop topic %s"%topicName1)
+
+ tdLog.printNoPrefix("======== test case 1 end ...... ")
+
+ def run(self):
+ tdSql.prepare()
+ self.tmqCase1()
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success(f"{__file__} successfully executed")
+
+event = threading.Event()
+
+tdCases.addLinux(__file__, TDTestCase())
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/system-test/7-tmq/tmqDropStbCtb.py b/tests/system-test/7-tmq/tmqDropStbCtb.py
new file mode 100644
index 0000000000..d9e675ddc6
--- /dev/null
+++ b/tests/system-test/7-tmq/tmqDropStbCtb.py
@@ -0,0 +1,289 @@
+
+import taos
+import sys
+import time
+import socket
+import os
+import threading
+from enum import Enum
+
+from util.log import *
+from util.sql import *
+from util.cases import *
+from util.dnodes import *
+sys.path.append("./7-tmq")
+from tmqCommon import *
+
+class TDTestCase:
+ def __init__(self):
+ self.snapshot = 0
+ self.vgroups = 4
+ self.ctbNum = 100
+ self.rowsPerTbl = 1000
+
+ def init(self, conn, logSql):
+ tdLog.debug(f"start to excute {__file__}")
+ tdSql.init(conn.cursor(), False)
+
+ def waitSubscriptionExit(self, max_wait_count=20):
+ wait_cnt = 0
+ while (wait_cnt < max_wait_count):
+ tdSql.query("show subscriptions")
+ if tdSql.getRows() == 0:
+ break
+ else:
+ time.sleep(2)
+ wait_cnt += 1
+
+ tdLog.info("wait subscriptions exit for %d s"%wait_cnt)
+
+ def prepareTestEnv(self):
+ tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ")
+ paraDict = {'dbName': 'dbt',
+ 'dropFlag': 1,
+ 'event': '',
+ 'vgroups': 4,
+ 'stbName': 'stb',
+ 'colPrefix': 'c',
+ 'tagPrefix': 't',
+ 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
+ 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
+ 'ctbPrefix': 'ctb',
+ 'ctbStartIdx': 0,
+ 'ctbNum': 100,
+ 'rowsPerTbl': 1000,
+ 'batchNum': 1000,
+ 'startTs': 1640966400000, # 2022-01-01 00:00:00.000
+ 'pollDelay': 3,
+ 'showMsg': 1,
+ 'showRow': 1,
+ 'snapshot': 0}
+
+ paraDict['vgroups'] = self.vgroups
+ paraDict['ctbNum'] = self.ctbNum
+ paraDict['rowsPerTbl'] = self.rowsPerTbl
+
+ tmqCom.initConsumerTable()
+ tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
+ tdLog.info("create stb")
+ tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"])
+ tdLog.info("create ctb")
+ tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
+ ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
+ tdLog.info("insert data")
+ tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
+ ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
+ startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
+ # tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix="ctbx",
+ # ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
+ # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
+
+ # tdLog.info("restart taosd to ensure that the data falls into the disk")
+ # tdSql.query("flush database %s"%(paraDict['dbName']))
+ return
+
+ # drop some ctbs
+ def tmqCase1(self):
+ tdLog.printNoPrefix("======== test case 1: ")
+ paraDict = {'dbName': 'dbt',
+ 'dropFlag': 1,
+ 'event': '',
+ 'vgroups': 4,
+ 'stbName': 'stb',
+ 'colPrefix': 'c',
+ 'tagPrefix': 't',
+ 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
+ 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
+ 'ctbPrefix': 'ctb',
+ 'ctbStartIdx': 0,
+ 'ctbNum': 100,
+ 'rowsPerTbl': 1000,
+ 'batchNum': 1000,
+ 'startTs': 1640966400000, # 2022-01-01 00:00:00.000
+ 'endTs': 0,
+ 'pollDelay': 5,
+ 'showMsg': 1,
+ 'showRow': 1,
+ 'snapshot': 0}
+ paraDict['snapshot'] = self.snapshot
+ paraDict['vgroups'] = self.vgroups
+ paraDict['ctbNum'] = self.ctbNum
+ paraDict['rowsPerTbl'] = self.rowsPerTbl
+
+ # again create one new stb1
+ paraDict["stbName"] = 'stb1'
+ paraDict['ctbPrefix'] = 'ctb1n_'
+ tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"])
+ tdLog.info("create ctb")
+ tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
+ ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
+ tdLog.info("async insert data")
+ # tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
+ # ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
+ # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
+ pInsertThread = tmqCom.asyncInsertDataByInterlace(paraDict)
+
+ tdLog.info("create topics from database")
+ topicFromDb = 'topic_dbt'
+ tdSql.execute("create topic %s as database %s" %(topicFromDb, paraDict['dbName']))
+
+ if self.snapshot == 0:
+ consumerId = 0
+ elif self.snapshot == 1:
+ consumerId = 1
+
+ expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2)
+ topicList = topicFromDb
+ ifcheckdata = 1
+ ifManualCommit = 1
+ keyList = 'group.id:cgrp1,\
+ enable.auto.commit:true,\
+ auto.commit.interval.ms:1000,\
+ auto.offset.reset:earliest'
+ tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
+
+ tdLog.info("start consume processor")
+ tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
+
+ tmqCom.getStartConsumeNotifyFromTmqsim()
+ tdLog.info("drop some ctables")
+ paraDict["stbName"] = 'stb'
+ paraDict['ctbPrefix'] = 'ctb'
+ paraDict["ctbStartIdx"] = paraDict["ctbStartIdx"] + int(paraDict["ctbNum"] * 3 / 4) # drop 1/4 ctbls
+ paraDict["ctbNum"] = int(paraDict["ctbNum"] / 4)
+ # tdSql.execute("drop table %s.%s" %(paraDict['dbName'], paraDict['stbName']))
+ tmqCom.drop_ctable(tdSql, dbname=paraDict['dbName'], count=paraDict["ctbNum"], default_ctbname_prefix=paraDict["ctbPrefix"], ctbStartIdx=paraDict["ctbStartIdx"])
+
+ pInsertThread.join()
+
+ tdLog.info("start to check consume result")
+ expectRows = 1
+ resultList = tmqCom.selectConsumeResult(expectRows)
+ totalConsumeRows = 0
+ for i in range(expectRows):
+ totalConsumeRows += resultList[i]
+
+ tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
+
+ if not ((totalConsumeRows > expectrowcnt / 2) and (totalConsumeRows < expectrowcnt)):
+ tdLog.exit("tmq consume rows error with snapshot = 0!")
+
+ tdLog.info("wait subscriptions exit ....")
+ self.waitSubscriptionExit()
+
+ tdSql.query("drop topic %s"%topicFromDb)
+ tdLog.info("success dorp topic: %s"%topicFromDb)
+ tdLog.printNoPrefix("======== test case 1 end ...... ")
+
+ # drop one stb
+ def tmqCase2(self):
+ tdLog.printNoPrefix("======== test case 2: ")
+ paraDict = {'dbName': 'dbt',
+ 'dropFlag': 1,
+ 'event': '',
+ 'vgroups': 4,
+ 'stbName': 'stb',
+ 'colPrefix': 'c',
+ 'tagPrefix': 't',
+ 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
+ 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
+ 'ctbPrefix': 'ctb',
+ 'ctbStartIdx': 0,
+ 'ctbNum': 100,
+ 'rowsPerTbl': 1000,
+ 'batchNum': 1000,
+ 'startTs': 1640966400000, # 2022-01-01 00:00:00.000
+ 'endTs': 0,
+ 'pollDelay': 5,
+ 'showMsg': 1,
+ 'showRow': 1,
+ 'snapshot': 0}
+ paraDict['snapshot'] = self.snapshot
+ paraDict['vgroups'] = self.vgroups
+ paraDict['ctbNum'] = self.ctbNum
+ paraDict['rowsPerTbl'] = self.rowsPerTbl
+
+ # again create one new stb1
+ paraDict["stbName"] = 'stb2'
+ paraDict['ctbPrefix'] = 'ctb2n_'
+ tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"])
+ tdLog.info("create ctb")
+ tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
+ ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
+ tdLog.info("async insert data")
+ # tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
+ # ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
+ # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
+ pInsertThread = tmqCom.asyncInsertDataByInterlace(paraDict)
+
+ tdLog.info("create topics from database")
+ topicFromDb = 'topic_dbt'
+ tdSql.execute("create topic %s as database %s" %(topicFromDb, paraDict['dbName']))
+
+ if self.snapshot == 0:
+ consumerId = 0
+ elif self.snapshot == 1:
+ consumerId = 1
+
+ expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2)
+ topicList = topicFromDb
+ ifcheckdata = 1
+ ifManualCommit = 1
+ keyList = 'group.id:cgrp1,\
+ enable.auto.commit:true,\
+ auto.commit.interval.ms:1000,\
+ auto.offset.reset:earliest'
+ tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
+
+ tdLog.info("start consume processor")
+ tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
+
+ tmqCom.getStartConsumeNotifyFromTmqsim()
+ tdLog.info("drop one stable")
+ paraDict["stbName"] = 'stb1'
+ tdSql.execute("drop table %s.%s" %(paraDict['dbName'], paraDict['stbName']))
+
+ pInsertThread.join()
+
+ tdLog.info("start to check consume result")
+ expectRows = 1
+ resultList = tmqCom.selectConsumeResult(expectRows)
+ totalConsumeRows = 0
+ for i in range(expectRows):
+ totalConsumeRows += resultList[i]
+
+ tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
+
+ if not ((totalConsumeRows > expectrowcnt / 2) and (totalConsumeRows < expectrowcnt)):
+ tdLog.exit("tmq consume rows error with snapshot = 0!")
+
+ tdLog.info("wait subscriptions exit ....")
+ self.waitSubscriptionExit()
+
+ tdSql.query("drop topic %s"%topicFromDb)
+ tdLog.info("success dorp topic: %s"%topicFromDb)
+ tdLog.printNoPrefix("======== test case 2 end ...... ")
+
+ def run(self):
+ tdLog.printNoPrefix("=============================================")
+ tdLog.printNoPrefix("======== snapshot is 0: only consume from wal")
+ self.snapshot = 0
+ self.prepareTestEnv()
+ self.tmqCase1()
+ self.tmqCase2()
+
+ tdLog.printNoPrefix("====================================================================")
+ tdLog.printNoPrefix("======== snapshot is 1: firstly consume from tsbs, and then from wal")
+ self.snapshot = 1
+ self.prepareTestEnv()
+ self.tmqCase1()
+ self.tmqCase2()
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success(f"{__file__} successfully executed")
+
+event = threading.Event()
+
+tdCases.addLinux(__file__, TDTestCase())
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/system-test/7-tmq/tmqUdf-multCtb.py b/tests/system-test/7-tmq/tmqUdf-multCtb.py
deleted file mode 100644
index 392a7d452e..0000000000
--- a/tests/system-test/7-tmq/tmqUdf-multCtb.py
+++ /dev/null
@@ -1,372 +0,0 @@
-from distutils.log import error
-import taos
-import sys
-import time
-import socket
-import os
-import threading
-import subprocess
-import platform
-
-from util.log import *
-from util.sql import *
-from util.cases import *
-from util.dnodes import *
-from util.common import *
-sys.path.append("./7-tmq")
-from tmqCommon import *
-
-class TDTestCase:
- def __init__(self):
- self.snapshot = 0
- self.vgroups = 4
- self.ctbNum = 100
- self.rowsPerTbl = 1000
-
- def init(self, conn, logSql):
- tdLog.debug(f"start to excute {__file__}")
- tdSql.init(conn.cursor())
- #tdSql.init(conn.cursor(), logSql) # output sql.txt file
-
- def prepare_udf_so(self):
- selfPath = os.path.dirname(os.path.realpath(__file__))
-
- if ("community" in selfPath):
- projPath = selfPath[:selfPath.find("community")]
- else:
- projPath = selfPath[:selfPath.find("tests")]
- print(projPath)
-
- if platform.system().lower() == 'windows':
- self.libudf1 = subprocess.Popen('(for /r %s %%i in ("udf1.d*") do @echo %%i)|grep lib|head -n1'%projPath , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
- if (not tdDnodes.dnodes[0].remoteIP == ""):
- tdDnodes.dnodes[0].remote_conn.get(tdDnodes.dnodes[0].config["path"]+'/debug/build/lib/libudf1.so',projPath+"\\debug\\build\\lib\\")
- self.libudf1 = self.libudf1.replace('udf1.dll','libudf1.so')
- else:
- self.libudf1 = subprocess.Popen('find %s -name "libudf1.so"|grep lib|head -n1'%projPath , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
- self.libudf1 = self.libudf1.replace('\r','').replace('\n','')
- return
-
- def create_udf_function(self):
- # create scalar functions
- tdSql.execute("create function udf1 as '%s' outputtype int bufSize 8;"%self.libudf1)
-
- functions = tdSql.getResult("show functions")
- function_nums = len(functions)
- if function_nums == 1:
- tdLog.info("create one udf functions success ")
- else:
- tdLog.exit("create udf functions fail")
- return
-
- def checkFileContent(self, consumerId, queryString):
- buildPath = tdCom.getBuildPath()
- cfgPath = tdCom.getClientCfgPath()
- dstFile = '%s/../log/dstrows_%d.txt'%(cfgPath, consumerId)
- cmdStr = '%s/build/bin/taos -c %s -s "%s >> %s"'%(buildPath, cfgPath, queryString, dstFile)
- tdLog.info(cmdStr)
- os.system(cmdStr)
-
- consumeRowsFile = '%s/../log/consumerid_%d.txt'%(cfgPath, consumerId)
- tdLog.info("rows file: %s, %s"%(consumeRowsFile, dstFile))
-
- consumeFile = open(consumeRowsFile, mode='r')
- queryFile = open(dstFile, mode='r')
-
- # skip first line for it is schema
- queryFile.readline()
-
- while True:
- dst = queryFile.readline()
- src = consumeFile.readline()
-
- if dst:
- if dst != src:
- tdLog.exit("consumerId %d consume rows is not match the rows by direct query"%consumerId)
- else:
- break
- return
-
- def prepareTestEnv(self):
- tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ")
- paraDict = {'dbName': 'dbt',
- 'dropFlag': 1,
- 'event': '',
- 'vgroups': 4,
- 'stbName': 'stb',
- 'colPrefix': 'c',
- 'tagPrefix': 't',
- 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
- 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
- 'ctbPrefix': 'ctb',
- 'ctbStartIdx': 0,
- 'ctbNum': 100,
- 'rowsPerTbl': 1000,
- 'batchNum': 100,
- 'startTs': 1640966400000, # 2022-01-01 00:00:00.000
- 'pollDelay': 3,
- 'showMsg': 1,
- 'showRow': 1,
- 'snapshot': 0}
-
- paraDict['vgroups'] = self.vgroups
- paraDict['ctbNum'] = self.ctbNum
- paraDict['rowsPerTbl'] = self.rowsPerTbl
-
- tmqCom.initConsumerTable()
- tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
- tdLog.info("create stb")
- tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"])
- tdLog.info("create ctb")
- tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
- ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
- tdLog.info("insert data")
- tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
- ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
- startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
- # tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix="ctbx",
- # ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
- # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
-
- # tdLog.info("restart taosd to ensure that the data falls into the disk")
- # tdSql.query("flush database %s"%(paraDict['dbName']))
- return
-
- def tmqCase1(self):
- tdLog.printNoPrefix("======== test case 1: multi sub table")
- paraDict = {'dbName': 'dbt',
- 'dropFlag': 1,
- 'event': '',
- 'vgroups': 4,
- 'stbName': 'stb',
- 'colPrefix': 'c',
- 'tagPrefix': 't',
- 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
- 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
- 'ctbPrefix': 'ctb',
- 'ctbStartIdx': 0,
- 'ctbNum': 100,
- 'rowsPerTbl': 1000,
- 'batchNum': 100,
- 'startTs': 1640966400000, # 2022-01-01 00:00:00.000
- 'pollDelay': 3,
- 'showMsg': 1,
- 'showRow': 1,
- 'snapshot': 0}
- paraDict['snapshot'] = self.snapshot
- paraDict['vgroups'] = self.vgroups
- paraDict['ctbNum'] = self.ctbNum
- paraDict['rowsPerTbl'] = self.rowsPerTbl
-
- topicNameList = ['topic1', 'topic2']
- expectRowsList = []
- tmqCom.initConsumerTable()
- # tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=4,replica=1)
- # tdLog.info("create stb")
- # tdCom.create_stable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"], column_elm_list=paraDict['colSchema'], tag_elm_list=paraDict['tagSchema'])
- # tdLog.info("create ctb")
- # tdCom.create_ctable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"],tag_elm_list=paraDict['tagSchema'],count=paraDict["ctbNum"], default_ctbname_prefix=paraDict['ctbPrefix'])
- # tdLog.info("insert data")
- # tmqCom.insert_data_1(tdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"])
-
- tdLog.info("create topics from stb with filter")
- queryString = "select ts,c1,udf1(c1),c2,udf1(c2) from %s.%s where c1 %% 7 == 0" %(paraDict['dbName'], paraDict['stbName'])
- sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
- tdLog.info("create topic sql: %s"%sqlString)
- tdSql.execute(sqlString)
- tdSql.query(queryString)
- expectRowsList.append(tdSql.getRows())
-
- # init consume info, and start tmq_sim, then check consume result
- tdLog.info("insert consume info to consume processor")
- consumerId = 0
- expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"]
- topicList = topicNameList[0]
- ifcheckdata = 1
- ifManualCommit = 1
- keyList = 'group.id:cgrp1, enable.auto.commit:false, auto.commit.interval.ms:6000, auto.offset.reset:earliest'
- tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
-
- tdLog.info("start consume processor")
- tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'],snapshot=paraDict['snapshot'])
-
- tdLog.info("wait the consume result")
- expectRows = 1
- resultList = tmqCom.selectConsumeResult(expectRows)
-
- if expectRowsList[0] != resultList[0]:
- tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0]))
- tdLog.exit("0 tmq consume rows error!")
-
- # self.checkFileContent(consumerId, queryString)
- # tdLog.printNoPrefix("consumerId %d check data ok!"%(consumerId))
-
- # reinit consume info, and start tmq_sim, then check consume result
- tmqCom.initConsumerTable()
-
- queryString = "select ts, c1,udf1(c1),sin(udf1(c2)), log(udf1(c2)) from %s.%s where udf1(c1) == 88 or sin(udf1(c1)) > 0" %(paraDict['dbName'], paraDict['stbName'])
- sqlString = "create topic %s as %s" %(topicNameList[1], queryString)
- tdLog.info("create topic sql: %s"%sqlString)
- tdSql.execute(sqlString)
- tdSql.query(queryString)
- expectRowsList.append(tdSql.getRows())
-
- consumerId = 1
- topicList = topicNameList[1]
- tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
-
- tdLog.info("start consume processor")
- tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'],snapshot=paraDict['snapshot'])
-
- tdLog.info("wait the consume result")
- expectRows = 1
- resultList = tmqCom.selectConsumeResult(expectRows)
- if expectRowsList[1] != resultList[0]:
- tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[1], resultList[0]))
- tdLog.exit("1 tmq consume rows error!")
-
- # self.checkFileContent(consumerId, queryString)
- # tdLog.printNoPrefix("consumerId %d check data ok!"%(consumerId))
-
- time.sleep(10)
- for i in range(len(topicNameList)):
- tdSql.query("drop topic %s"%topicNameList[i])
-
- tdLog.printNoPrefix("======== test case 1 end ...... ")
-
- def tmqCase2(self):
- tdLog.printNoPrefix("======== test case 2: multi sub table, consume with auto create tble and insert data")
- paraDict = {'dbName': 'dbt',
- 'dropFlag': 1,
- 'event': '',
- 'vgroups': 4,
- 'stbName': 'stb',
- 'colPrefix': 'c',
- 'tagPrefix': 't',
- 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
- 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
- 'ctbPrefix': 'ctb',
- 'ctbStartIdx': 0,
- 'ctbNum': 100,
- 'rowsPerTbl': 1000,
- 'batchNum': 100,
- 'startTs': 1640966400000, # 2022-01-01 00:00:00.000
- 'pollDelay': 3,
- 'showMsg': 1,
- 'showRow': 1,
- 'snapshot': 0}
- paraDict['snapshot'] = self.snapshot
- paraDict['vgroups'] = self.vgroups
- paraDict['ctbNum'] = self.ctbNum
- paraDict['rowsPerTbl'] = self.rowsPerTbl
-
- topicNameList = ['topic1', 'topic2']
- expectRowsList = []
- tmqCom.initConsumerTable()
- # tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=4,replica=1)
- # tdLog.info("create stb")
- # tdCom.create_stable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"], column_elm_list=paraDict['colSchema'], tag_elm_list=paraDict['tagSchema'])
- # tdLog.info("create ctb")
- # tdCom.create_ctable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"],tag_elm_list=paraDict['tagSchema'],count=paraDict["ctbNum"], default_ctbname_prefix=paraDict['ctbPrefix'])
- # tdLog.info("insert data")
- # tmqCom.insert_data_1(tdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"])
-
- tdLog.info("create topics from stb with filter")
- queryString = "select ts,c1,udf1(c1),c2,udf1(c2) from %s.%s where c1 %% 7 == 0" %(paraDict['dbName'], paraDict['stbName'])
- sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
- tdLog.info("create topic sql: %s"%sqlString)
- tdSql.execute(sqlString)
- # tdSql.query(queryString)
- # expectRowsList.append(tdSql.getRows())
-
- # init consume info, and start tmq_sim, then check consume result
- tdLog.info("insert consume info to consume processor")
- consumerId = 2
- expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2
- topicList = topicNameList[0]
- ifcheckdata = 1
- ifManualCommit = 1
- keyList = 'group.id:cgrp1, enable.auto.commit:false, auto.commit.interval.ms:6000, auto.offset.reset:earliest'
- tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
-
- tdLog.info("start consume processor")
- tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'],snapshot=paraDict['snapshot'])
-
- paraDict['startTs'] = paraDict['startTs'] + int(self.rowsPerTbl)
- tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
- ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
- startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
-
- tdLog.info("wait the consume result")
- expectRows = 1
- resultList = tmqCom.selectConsumeResult(expectRows)
-
- tdSql.query(queryString)
- expectRowsList.append(tdSql.getRows())
-
- if expectRowsList[0] != resultList[0]:
- tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0]))
- tdLog.exit("2 tmq consume rows error!")
-
- # self.checkFileContent(consumerId, queryString)
- # tdLog.printNoPrefix("consumerId %d check data ok!"%(consumerId))
-
- # reinit consume info, and start tmq_sim, then check consume result
- tmqCom.initConsumerTable()
-
- queryString = "select ts, c1,udf1(c1),sin(udf1(c2)), log(udf1(c2)) from %s.%s where udf1(c1) == 88 or sin(udf1(c1)) > 0" %(paraDict['dbName'], paraDict['stbName'])
- sqlString = "create topic %s as %s" %(topicNameList[1], queryString)
- tdLog.info("create topic sql: %s"%sqlString)
- tdSql.execute(sqlString)
- tdSql.query(queryString)
- expectRowsList.append(tdSql.getRows())
-
- consumerId = 3
- topicList = topicNameList[1]
- tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
-
- tdLog.info("start consume processor")
- tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'],snapshot=paraDict['snapshot'])
-
- tdLog.info("wait the consume result")
- expectRows = 1
- resultList = tmqCom.selectConsumeResult(expectRows)
- if expectRowsList[1] != resultList[0]:
- tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[1], resultList[0]))
- tdLog.exit("3 tmq consume rows error!")
-
- # self.checkFileContent(consumerId, queryString)
- # tdLog.printNoPrefix("consumerId %d check data ok!"%(consumerId))
-
- time.sleep(10)
- for i in range(len(topicNameList)):
- tdSql.query("drop topic %s"%topicNameList[i])
-
- tdLog.printNoPrefix("======== test case 2 end ...... ")
-
- def run(self):
- # tdSql.prepare()
- self.prepare_udf_so()
- self.create_udf_function()
-
- tdLog.printNoPrefix("=============================================")
- tdLog.printNoPrefix("======== snapshot is 0: only consume from wal")
- self.prepareTestEnv()
- self.tmqCase1()
- self.tmqCase2()
-
- tdLog.printNoPrefix("====================================================================")
- tdLog.printNoPrefix("======== snapshot is 1: firstly consume from tsbs, and then from wal")
- self.prepareTestEnv()
- self.snapshot = 1
- self.tmqCase1()
- self.tmqCase2()
-
- def stop(self):
- tdSql.close()
- tdLog.success(f"{__file__} successfully executed")
-
-event = threading.Event()
-
-tdCases.addLinux(__file__, TDTestCase())
-tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/system-test/fulltest.sh b/tests/system-test/fulltest.sh
index 63011a7836..d3dd93f9ca 100755
--- a/tests/system-test/fulltest.sh
+++ b/tests/system-test/fulltest.sh
@@ -32,22 +32,48 @@ python3 ./test.py -f 1-insert/block_wise.py
python3 ./test.py -f 1-insert/create_retentions.py
python3 ./test.py -f 1-insert/table_param_ttl.py
-#python3 ./test.py -f 1-insert/update_data.py
+python3 ./test.py -f 2-query/abs.py
+python3 ./test.py -f 2-query/abs.py -R
+python3 ./test.py -f 2-query/and_or_for_byte.py
+python3 ./test.py -f 2-query/and_or_for_byte.py -R
+python3 ./test.py -f 2-query/apercentile.py
+python3 ./test.py -f 2-query/apercentile.py -R
+python3 ./test.py -f 2-query/arccos.py
+python3 ./test.py -f 2-query/arccos.py -R
+python3 ./test.py -f 2-query/arcsin.py
+python3 ./test.py -f 2-query/arcsin.py -R
+python3 ./test.py -f 2-query/arctan.py
+python3 ./test.py -f 2-query/arctan.py -R
+python3 ./test.py -f 2-query/avg.py
+python3 ./test.py -f 2-query/avg.py -R
+python3 ./test.py -f 2-query/between.py
+python3 ./test.py -f 2-query/between.py -R
+python3 ./test.py -f 2-query/bottom.py
+python3 ./test.py -f 2-query/bottom.py -R
+python3 ./test.py -f 2-query/cast.py
+python3 ./test.py -f 2-query/cast.py -R
+python3 ./test.py -f 2-query/ceil.py
+python3 ./test.py -f 2-query/ceil.py -R
+python3 ./test.py -f 2-query/char_length.py
+python3 ./test.py -f 2-query/char_length.py -R
+python3 ./test.py -f 2-query/check_tsdb.py
+python3 ./test.py -f 2-query/check_tsdb.py -R
+# jira python3 ./test.py -f 1-insert/update_data.py
+python3 ./test.py -f 1-insert/delete_data.py
python3 ./test.py -f 2-query/db.py
-python3 ./test.py -f 2-query/between.py
+
+python3 ./test.py -f 2-query/db.py
python3 ./test.py -f 2-query/distinct.py
python3 ./test.py -f 2-query/varchar.py
python3 ./test.py -f 2-query/ltrim.py
python3 ./test.py -f 2-query/rtrim.py
python3 ./test.py -f 2-query/length.py
-python3 ./test.py -f 2-query/char_length.py
python3 ./test.py -f 2-query/upper.py
python3 ./test.py -f 2-query/lower.py
python3 ./test.py -f 2-query/join.py
python3 ./test.py -f 2-query/join2.py
-python3 ./test.py -f 2-query/cast.py
python3 ./test.py -f 2-query/substr.py
python3 ./test.py -f 2-query/union.py
python3 ./test.py -f 2-query/union1.py
@@ -55,7 +81,6 @@ python3 ./test.py -f 2-query/concat.py
python3 ./test.py -f 2-query/concat2.py
python3 ./test.py -f 2-query/concat_ws.py
python3 ./test.py -f 2-query/concat_ws2.py
-python3 ./test.py -f 2-query/check_tsdb.py
python3 ./test.py -f 2-query/spread.py
python3 ./test.py -f 2-query/hyperloglog.py
python3 ./test.py -f 2-query/explain.py
@@ -79,11 +104,7 @@ python3 ./test.py -f 2-query/Timediff.py
python3 ./test.py -f 2-query/json_tag.py
python3 ./test.py -f 2-query/top.py
-python3 ./test.py -f 2-query/bottom.py
python3 ./test.py -f 2-query/percentile.py
-python3 ./test.py -f 2-query/apercentile.py
-python3 ./test.py -f 2-query/abs.py
-python3 ./test.py -f 2-query/ceil.py
python3 ./test.py -f 2-query/floor.py
python3 ./test.py -f 2-query/round.py
python3 ./test.py -f 2-query/log.py
@@ -92,16 +113,12 @@ python3 ./test.py -f 2-query/sqrt.py
python3 ./test.py -f 2-query/sin.py
python3 ./test.py -f 2-query/cos.py
python3 ./test.py -f 2-query/tan.py
-python3 ./test.py -f 2-query/arcsin.py
-python3 ./test.py -f 2-query/arccos.py
-python3 ./test.py -f 2-query/arctan.py
python3 ./test.py -f 2-query/query_cols_tags_and_or.py
# python3 ./test.py -f 2-query/nestedQuery.py
# TD-15983 subquery output duplicate name column.
# Please Xiangyang Guo modify the following script
# python3 ./test.py -f 2-query/nestedQuery_str.py
-python3 ./test.py -f 2-query/avg.py
python3 ./test.py -f 2-query/elapsed.py
python3 ./test.py -f 2-query/csum.py
python3 ./test.py -f 2-query/mavg.py
@@ -124,7 +141,6 @@ python3 ./test.py -f 2-query/distribute_agg_avg.py
python3 ./test.py -f 2-query/distribute_agg_stddev.py
python3 ./test.py -f 2-query/twa.py
python3 ./test.py -f 2-query/irate.py
-python3 ./test.py -f 2-query/and_or_for_byte.py
python3 ./test.py -f 2-query/count_partition.py
python3 ./test.py -f 2-query/function_null.py
python3 ./test.py -f 2-query/queryQnode.py
@@ -144,7 +160,7 @@ python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateStb.py -N 5
# BUG python3 ./test.py -f 6-cluster/5dnode3mnodeStopInsert.py
# python3 ./test.py -f 6-cluster/5dnode3mnodeDrop.py -N 5
# python3 test.py -f 6-cluster/5dnode3mnodeStopConnect.py -N 5 -M 3
-# BUG Redict python3 ./test.py -f 6-cluster/5dnode3mnodeAdd1Ddnoe.py -N 6 -M 3 -C 5
+# BUG Redict python3 ./test.py -f 6-cluster/5dnode3mnodeAdd1Ddnoe.py -N 6 -M 3 -C 5
# python3 ./test.py -f 6-cluster/5dnode3mnodeRestartDnodeInsertData.py -N 5 -M 3
python3 ./test.py -f 6-cluster/5dnode3mnodeAdd1Ddnoe.py -N 6 -M 3 -C 5
@@ -184,22 +200,24 @@ python3 ./test.py -f 7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb.py
python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-1ctb-funcNFilter.py
python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb-funcNFilter.py
python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb.py
-python3 ./test.py -f 7-tmq/tmqAutoCreateTbl.py
+#python3 ./test.py -f 7-tmq/tmqAutoCreateTbl.py
#python3 ./test.py -f 7-tmq/tmqDnodeRestart.py
python3 ./test.py -f 7-tmq/tmqUpdate-1ctb.py
python3 ./test.py -f 7-tmq/tmqUpdate-multiCtb-snapshot0.py
python3 ./test.py -f 7-tmq/tmqUpdate-multiCtb-snapshot1.py
-#python3 ./test.py -f 7-tmq/tmqDelete-1ctb.py
+python3 ./test.py -f 7-tmq/tmqDelete-1ctb.py
+python3 ./test.py -f 7-tmq/tmqDelete-multiCtb.py
+python3 ./test.py -f 7-tmq/tmqDropStb.py
python3 ./test.py -f 7-tmq/tmqUdf.py
-python3 ./test.py -f 7-tmq/tmqUdf-multCtb-snapshot0.py
-python3 ./test.py -f 7-tmq/tmqUdf-multCtb-snapshot1.py
+# python3 ./test.py -f 7-tmq/tmqUdf-multCtb-snapshot0.py
+# python3 ./test.py -f 7-tmq/tmqUdf-multCtb-snapshot1.py
python3 ./test.py -f 7-tmq/stbTagFilter-1ctb.py
-#python3 ./test.py -f 7-tmq/stbTagFilter-multiCtb.py
+# python3 ./test.py -f 7-tmq/stbTagFilter-multiCtb.py
#------------querPolicy 2-----------
-python3 ./test.py -f 2-query/between.py -Q 2
+python3 ./test.py -f 2-query/between.py -Q 2
python3 ./test.py -f 2-query/distinct.py -Q 2
python3 ./test.py -f 2-query/varchar.py -Q 2
python3 ./test.py -f 2-query/ltrim.py -Q 2
@@ -256,7 +274,7 @@ python3 ./test.py -f 2-query/arccos.py -Q 2
python3 ./test.py -f 2-query/arctan.py -Q 2
python3 ./test.py -f 2-query/query_cols_tags_and_or.py -Q 2
-# python3 ./test.py -f 2-query/nestedQuery.py -Q 2
+# python3 ./test.py -f 2-query/nestedQuery.py -Q 2
# python3 ./test.py -f 2-query/nestedQuery_str.py -Q 2
python3 ./test.py -f 2-query/avg.py -Q 2
diff --git a/tests/system-test/test.py b/tests/system-test/test.py
index b893f7af64..eccd12aca6 100644
--- a/tests/system-test/test.py
+++ b/tests/system-test/test.py
@@ -22,13 +22,17 @@ import json
import platform
import socket
import threading
+
+import toml
sys.path.append("../pytest")
from util.log import *
from util.dnodes import *
from util.cases import *
from util.cluster import *
+from util.taosadapter import *
import taos
+import taosrest
def checkRunTimeError():
import win32gui
@@ -50,7 +54,7 @@ def checkRunTimeError():
os.system("TASKKILL /F /IM taosd.exe")
if __name__ == "__main__":
-
+
fileName = "all"
deployPath = ""
masterIp = ""
@@ -63,11 +67,13 @@ if __name__ == "__main__":
dnodeNums = 1
mnodeNums = 0
updateCfgDict = {}
+ adapter_cfg_dict = {}
execCmd = ""
queryPolicy = 1
createDnodeNums = 1
- opts, args = getopt.gnu_getopt(sys.argv[1:], 'f:p:m:l:scghrd:k:e:N:M:Q:C:', [
- 'file=', 'path=', 'master', 'logSql', 'stop', 'cluster', 'valgrind', 'help', 'restart', 'updateCfgDict', 'killv', 'execCmd','dnodeNums','mnodeNums','queryPolicy','createDnodeNums'])
+ restful = False
+ opts, args = getopt.gnu_getopt(sys.argv[1:], 'f:p:m:l:scghrd:k:e:N:M:Q:C:RD:', [
+ 'file=', 'path=', 'master', 'logSql', 'stop', 'cluster', 'valgrind', 'help', 'restart', 'updateCfgDict', 'killv', 'execCmd','dnodeNums','mnodeNums','queryPolicy','createDnodeNums','restful','adaptercfgupdate'])
for key, value in opts:
if key in ['-h', '--help']:
tdLog.printNoPrefix(
@@ -87,11 +93,13 @@ if __name__ == "__main__":
tdLog.printNoPrefix('-M create mnode numbers in clusters')
tdLog.printNoPrefix('-Q set queryPolicy in one dnode')
tdLog.printNoPrefix('-C create Dnode Numbers in one cluster')
-
+ tdLog.printNoPrefix('-R restful realization form')
+ tdLog.printNoPrefix('-D taosadapter update cfg dict ')
+
sys.exit(0)
- if key in ['-r', '--restart']:
+ if key in ['-r', '--restart']:
restart = True
if key in ['-f', '--file']:
@@ -135,7 +143,7 @@ if __name__ == "__main__":
try:
execCmd = base64.b64decode(value.encode()).decode()
except:
- print('updateCfgDict convert fail.')
+ print('execCmd run fail.')
sys.exit(0)
if key in ['-N', '--dnodeNums']:
@@ -150,8 +158,21 @@ if __name__ == "__main__":
if key in ['-C', '--createDnodeNums']:
createDnodeNums = value
+ if key in ['-R', '--restful']:
+ restful = True
+
+ if key in ['-D', '--adaptercfgupdate']:
+ try:
+ adaptercfgupdate = eval(base64.b64decode(value.encode()).decode())
+ except:
+ print('adapter cfg update convert fail.')
+ sys.exit(0)
+
if not execCmd == "":
- tdDnodes.init(deployPath)
+ if restful:
+ tAdapter.init(deployPath)
+ else:
+ tdDnodes.init(deployPath)
print(execCmd)
exec(execCmd)
quit()
@@ -184,8 +205,33 @@ if __name__ == "__main__":
if valgrind:
time.sleep(2)
+ if restful:
+ toBeKilled = "taosadapter"
+
+ killCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs kill -TERM > /dev/null 2>&1" % toBeKilled
+
+ psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled
+ processID = subprocess.check_output(psCmd, shell=True)
+
+ while(processID):
+ os.system(killCmd)
+ time.sleep(1)
+ processID = subprocess.check_output(psCmd, shell=True)
+
+ for port in range(6030, 6041):
+ usePortPID = "lsof -i tcp:%d | grep LISTEn | awk '{print $2}'" % port
+ processID = subprocess.check_output(usePortPID, shell=True)
+
+ if processID:
+ killCmd = "kill -TERM %s" % processID
+ os.system(killCmd)
+ fuserCmd = "fuser -k -n tcp %d" % port
+ os.system(fuserCmd)
+
+ tdLog.info('stop taosadapter')
+
tdLog.info('stop All dnodes')
-
+
if masterIp == "":
host = socket.gethostname()
else:
@@ -213,6 +259,7 @@ if __name__ == "__main__":
except Exception as r:
print(r)
updateCfgDictStr = ''
+ # adapter_cfg_dict_str = ''
if is_test_framework:
moduleName = fileName.replace(".py", "").replace(os.sep, ".")
uModule = importlib.import_module(moduleName)
@@ -221,30 +268,44 @@ if __name__ == "__main__":
if ((json.dumps(updateCfgDict) == '{}') and hasattr(ucase, 'updatecfgDict')):
updateCfgDict = ucase.updatecfgDict
updateCfgDictStr = "-d %s"%base64.b64encode(json.dumps(updateCfgDict).encode()).decode()
+ if ((json.dumps(adapter_cfg_dict) == '{}') and hasattr(ucase, 'taosadapter_cfg_dict')):
+ adapter_cfg_dict = ucase.taosadapter_cfg_dict
+ # adapter_cfg_dict_str = f"-D {base64.b64encode(toml.dumps(adapter_cfg_dict).encode()).decode()}"
except Exception as r:
print(r)
else:
pass
+ if restful:
+ tAdapter.init(deployPath, masterIp)
+ tAdapter.stop(force_kill=True)
+
if dnodeNums == 1 :
tdDnodes.deploy(1,updateCfgDict)
tdDnodes.start(1)
tdCases.logSql(logSql)
+ if restful:
+ tAdapter.deploy(adapter_cfg_dict)
+ tAdapter.start()
+
if queryPolicy != 1:
queryPolicy=int(queryPolicy)
- conn = taos.connect(
- host,
- config=tdDnodes.getSimCfgPath())
- tdSql.init(conn.cursor())
- tdSql.execute("create qnode on dnode 1")
- tdSql.execute('alter local "queryPolicy" "%d"'%queryPolicy)
- tdSql.query("show local variables;")
- for i in range(tdSql.queryRows):
- if tdSql.queryResult[i][0] == "queryPolicy" :
- if int(tdSql.queryResult[i][1]) == int(queryPolicy):
- tdLog.success('alter queryPolicy to %d successfully'%queryPolicy)
- else :
- tdLog.debug(tdSql.queryResult)
- tdLog.exit("alter queryPolicy to %d failed"%queryPolicy)
+ if restful:
+ conn = taosrest.connect(url=f"http://{host}:6041")
+ else:
+ conn = taos.connect(host,config=tdDnodes.getSimCfgPath())
+
+ cursor = conn.cursor()
+ cursor.execute("create qnode on dnode 1")
+ cursor.execute(f'alter local "queryPolicy" "{queryPolicy}"')
+ cursor.execute("show local variables")
+ res = cursor.fetchall()
+ for i in range(cursor.rowcount):
+ if res[i][0] == "queryPolicy" :
+ if int(res[i][1]) == int(queryPolicy):
+ tdLog.success(f'alter queryPolicy to {queryPolicy} successfully')
+ else:
+ tdLog.debug(res)
+ tdLog.exit(f"alter queryPolicy to {queryPolicy} failed")
else :
tdLog.debug("create an cluster with %s nodes and make %s dnode as independent mnode"%(dnodeNums,mnodeNums))
dnodeslist = cluster.configure_cluster(dnodeNums=dnodeNums,mnodeNums=mnodeNums)
@@ -258,10 +319,16 @@ if __name__ == "__main__":
for dnode in tdDnodes.dnodes:
tdDnodes.starttaosd(dnode.index)
tdCases.logSql(logSql)
- conn = taos.connect(
- host,
- config=tdDnodes.getSimCfgPath())
- print(tdDnodes.getSimCfgPath(),host)
+
+ if restful:
+ tAdapter.deploy(adapter_cfg_dict)
+ tAdapter.start()
+
+ if not restful:
+ conn = taos.connect(host,config=tdDnodes.getSimCfgPath())
+ else:
+ conn = taosrest.connect(url=f"http://{host}:6041")
+ tdLog.info(tdDnodes.getSimCfgPath(),host)
if createDnodeNums == 1:
createDnodeNums=dnodeNums
else:
@@ -275,9 +342,10 @@ if __name__ == "__main__":
if ucase is not None and hasattr(ucase, 'noConn') and ucase.noConn == True:
conn = None
else:
- conn = taos.connect(
- host="%s"%(host),
- config=tdDnodes.sim.getCfgDir())
+ if not restful:
+ conn = taos.connect(host="%s"%(host), config=tdDnodes.sim.getCfgDir())
+ else:
+ conn = taosrest.connect(url=f"http://{host}:6041")
if is_test_framework:
tdCases.runOneWindows(conn, fileName)
else:
@@ -302,28 +370,55 @@ if __name__ == "__main__":
ucase = uModule.TDTestCase()
if (json.dumps(updateCfgDict) == '{}'):
updateCfgDict = ucase.updatecfgDict
+ if (json.dumps(adapter_cfg_dict) == '{}'):
+ adapter_cfg_dict = ucase.taosadapter_cfg_dict
except:
pass
+
+ if restful:
+ tAdapter.init(deployPath, masterIp)
+ tAdapter.stop(force_kill=True)
+
if dnodeNums == 1 :
tdDnodes.deploy(1,updateCfgDict)
tdDnodes.start(1)
tdCases.logSql(logSql)
+
+ if restful:
+ tAdapter.deploy(adapter_cfg_dict)
+ tAdapter.start()
+
if queryPolicy != 1:
queryPolicy=int(queryPolicy)
- conn = taos.connect(
- host,
- config=tdDnodes.getSimCfgPath())
- tdSql.init(conn.cursor())
- tdSql.execute("create qnode on dnode 1")
- tdSql.execute('alter local "queryPolicy" "%d"'%queryPolicy)
- tdSql.query("show local variables;")
- for i in range(tdSql.queryRows):
- if tdSql.queryResult[i][0] == "queryPolicy" :
- if int(tdSql.queryResult[i][1]) == int(queryPolicy):
- tdLog.success('alter queryPolicy to %d successfully'%queryPolicy)
- else :
- tdLog.debug(tdSql.queryResult)
- tdLog.exit("alter queryPolicy to %d failed"%queryPolicy)
+ if not restful:
+ conn = taos.connect(host,config=tdDnodes.getSimCfgPath())
+ else:
+ conn = taosrest.connect(url=f"http://{host}:6041")
+ # tdSql.init(conn.cursor())
+ # tdSql.execute("create qnode on dnode 1")
+ # tdSql.execute('alter local "queryPolicy" "%d"'%queryPolicy)
+ # tdSql.query("show local variables;")
+ # for i in range(tdSql.queryRows):
+ # if tdSql.queryResult[i][0] == "queryPolicy" :
+ # if int(tdSql.queryResult[i][1]) == int(queryPolicy):
+ # tdLog.success('alter queryPolicy to %d successfully'%queryPolicy)
+ # else :
+ # tdLog.debug(tdSql.queryResult)
+ # tdLog.exit("alter queryPolicy to %d failed"%queryPolicy)
+
+ cursor = conn.cursor()
+ cursor.execute("create qnode on dnode 1")
+ cursor.execute(f'alter local "queryPolicy" "{queryPolicy}"')
+ cursor.execute("show local variables")
+ res = cursor.fetchall()
+ for i in range(cursor.rowcount):
+ if res[i][0] == "queryPolicy" :
+ if int(res[i][1]) == int(queryPolicy):
+ tdLog.success(f'alter queryPolicy to {queryPolicy} successfully')
+ else:
+ tdLog.debug(res)
+ tdLog.exit(f"alter queryPolicy to {queryPolicy} failed")
+
else :
tdLog.debug("create an cluster with %s nodes and make %s dnode as independent mnode"%(dnodeNums,mnodeNums))
dnodeslist = cluster.configure_cluster(dnodeNums=dnodeNums,mnodeNums=mnodeNums)
@@ -337,9 +432,15 @@ if __name__ == "__main__":
for dnode in tdDnodes.dnodes:
tdDnodes.starttaosd(dnode.index)
tdCases.logSql(logSql)
- conn = taos.connect(
- host,
- config=tdDnodes.getSimCfgPath())
+
+ if restful:
+ tAdapter.deploy(adapter_cfg_dict)
+ tAdapter.start()
+
+ if not restful:
+ conn = taos.connect(host,config=tdDnodes.getSimCfgPath())
+ else:
+ conn = taosrest.connect(url=f"http://{host}:6041")
print(tdDnodes.getSimCfgPath(),host)
if createDnodeNums == 1:
createDnodeNums=dnodeNums
@@ -351,8 +452,8 @@ if __name__ == "__main__":
print("check dnode ready")
except Exception as r:
print(r)
-
-
+
+
if testCluster:
tdLog.info("Procedures for testing cluster")
if fileName == "all":
@@ -361,30 +462,35 @@ if __name__ == "__main__":
tdCases.runOneCluster(fileName)
else:
tdLog.info("Procedures for testing self-deployment")
- conn = taos.connect(
- host,
- config=tdDnodes.getSimCfgPath())
-
+ if not restful:
+ conn = taos.connect(host,config=tdDnodes.getSimCfgPath())
+ else:
+ conn = taosrest.connect(url=f"http://{host}:6041")
+
if fileName == "all":
tdCases.runAllLinux(conn)
else:
tdCases.runOneLinux(conn, fileName)
-
+
if restart:
if fileName == "all":
tdLog.info("not need to query ")
- else:
+ else:
sp = fileName.rsplit(".", 1)
if len(sp) == 2 and sp[1] == "py":
tdDnodes.stopAll()
tdDnodes.start(1)
- time.sleep(1)
- conn = taos.connect( host, config=tdDnodes.getSimCfgPath())
+ time.sleep(1)
+ if not restful:
+ conn = taos.connect( host, config=tdDnodes.getSimCfgPath())
+ else:
+ conn = taosrest.connect(url=f"http://{host}:6041")
tdLog.info("Procedures for tdengine deployed in %s" % (host))
tdLog.info("query test after taosd restart")
tdCases.runOneLinux(conn, sp[0] + "_" + "restart.py")
else:
tdLog.info("not need to query")
+
if conn is not None:
conn.close()
sys.exit(0)
diff --git a/tests/tsim/src/simSystem.c b/tests/tsim/src/simSystem.c
index 1c751f290a..f2fefb903d 100644
--- a/tests/tsim/src/simSystem.c
+++ b/tests/tsim/src/simSystem.c
@@ -99,6 +99,7 @@ SScript *simProcessCallOver(SScript *script) {
}
if (simScriptPos == -1) return NULL;
+ if (!simExecSuccess) return NULL;
return simScriptList[simScriptPos];
} else {
diff --git a/tools/shell/src/shellEngine.c b/tools/shell/src/shellEngine.c
index a496cc2864..a0adb7c7bc 100644
--- a/tools/shell/src/shellEngine.c
+++ b/tools/shell/src/shellEngine.c
@@ -197,7 +197,7 @@ void shellRunSingleCommandImp(char *command) {
et = taosGetTimestampUs();
if (error_no == 0) {
- printf("Query OK, %d rows affected (%.6fs)\r\n", numOfRows, (et - st) / 1E6);
+ printf("Query OK, %d rows in database (%.6fs)\r\n", numOfRows, (et - st) / 1E6);
} else {
printf("Query interrupted (%s), %d rows affected (%.6fs)\r\n", taos_errstr(pSql), numOfRows, (et - st) / 1E6);
}
diff --git a/tools/taos-tools b/tools/taos-tools
index 2b75339b8b..f84cb6e515 160000
--- a/tools/taos-tools
+++ b/tools/taos-tools
@@ -1 +1 @@
-Subproject commit 2b75339b8b5c239619d1f09970d03075c58140dd
+Subproject commit f84cb6e51556d8030585128c2b252aa2a6453328
diff --git a/tools/taosws-rs b/tools/taosws-rs
index d8cf0e7e06..267a96fb09 160000
--- a/tools/taosws-rs
+++ b/tools/taosws-rs
@@ -1 +1 @@
-Subproject commit d8cf0e7e067d193cfaf3e920b6ec6cbb9b9f4165
+Subproject commit 267a96fb09fc2ba14acfa47f7d3678def64c29c5