Merge branch '3.0' into fix/TD-18076
This commit is contained in:
commit
a3d4dce3e5
|
@ -31,6 +31,6 @@ func main() {
|
|||
log.Fatalln("scan error:\n", err)
|
||||
return
|
||||
}
|
||||
log.Fatalln(r.ts, r.current)
|
||||
log.Println(r.ts, r.current)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,13 +1,13 @@
|
|||
import taos
|
||||
|
||||
lines = ["d1001,2018-10-03 14:38:05.000,10.30000,219,0.31000,California.SanFrancisco,2",
|
||||
"d1004,2018-10-03 14:38:05.000,10.80000,223,0.29000,California.LosAngeles,3",
|
||||
"d1003,2018-10-03 14:38:05.500,11.80000,221,0.28000,California.LosAngeles,2",
|
||||
"d1004,2018-10-03 14:38:06.500,11.50000,221,0.35000,California.LosAngeles,3",
|
||||
"d1002,2018-10-03 14:38:16.650,10.30000,218,0.25000,California.SanFrancisco,3",
|
||||
"d1001,2018-10-03 14:38:15.000,12.60000,218,0.33000,California.SanFrancisco,2",
|
||||
"d1001,2018-10-03 14:38:16.800,12.30000,221,0.31000,California.SanFrancisco,2",
|
||||
"d1003,2018-10-03 14:38:16.600,13.40000,223,0.29000,California.LosAngeles,2"]
|
||||
lines = ["d1001,2018-10-03 14:38:05.000,10.30000,219,0.31000,'California.SanFrancisco',2",
|
||||
"d1004,2018-10-03 14:38:05.000,10.80000,223,0.29000,'California.LosAngeles',3",
|
||||
"d1003,2018-10-03 14:38:05.500,11.80000,221,0.28000,'California.LosAngeles',2",
|
||||
"d1004,2018-10-03 14:38:06.500,11.50000,221,0.35000,'California.LosAngeles',3",
|
||||
"d1002,2018-10-03 14:38:16.650,10.30000,218,0.25000,'California.SanFrancisco',3",
|
||||
"d1001,2018-10-03 14:38:15.000,12.60000,218,0.33000,'California.SanFrancisco',2",
|
||||
"d1001,2018-10-03 14:38:16.800,12.30000,221,0.31000,'California.SanFrancisco',2",
|
||||
"d1003,2018-10-03 14:38:16.600,13.40000,223,0.29000,'California.LosAngeles',2"]
|
||||
|
||||
|
||||
def get_connection() -> taos.TaosConnection:
|
||||
|
|
|
@ -46,114 +46,122 @@ apt-get 方式只适用于 Debian 或 Ubuntu 系统
|
|||
</TabItem>
|
||||
<TabItem label="Deb 安装" value="debinst">
|
||||
|
||||
1、从官网下载获得 deb 安装包,例如 TDengine-server-2.4.0.7-Linux-x64.deb;
|
||||
2、进入到 TDengine-server-2.4.0.7-Linux-x64.deb 安装包所在目录,执行如下的安装命令:
|
||||
1、从官网下载获得 deb 安装包,例如 TDengine-server-3.0.0.10002-Linux-x64.deb;
|
||||
2、进入到 TDengine-server-3.0.0.10002-Linux-x64.deb 安装包所在目录,执行如下的安装命令:
|
||||
|
||||
```
|
||||
$ sudo dpkg -i TDengine-server-2.4.0.7-Linux-x64.deb
|
||||
(Reading database ... 137504 files and directories currently installed.)
|
||||
Preparing to unpack TDengine-server-2.4.0.7-Linux-x64.deb ...
|
||||
TDengine is removed successfully!
|
||||
Unpacking tdengine (2.4.0.7) over (2.4.0.7) ...
|
||||
Setting up tdengine (2.4.0.7) ...
|
||||
$ sudo dpkg -i TDengine-server-3.0.0.10002-Linux-x64.deb
|
||||
Selecting previously unselected package tdengine.
|
||||
(Reading database ... 119653 files and directories currently installed.)
|
||||
Preparing to unpack TDengine-server-3.0.0.10002-Linux-x64.deb ...
|
||||
Unpacking tdengine (3.0.0.10002) ...
|
||||
Setting up tdengine (3.0.0.10002) ...
|
||||
Start to install TDengine...
|
||||
|
||||
System hostname is: ubuntu-1804
|
||||
System hostname is: v3cluster-0002
|
||||
|
||||
Enter FQDN:port (like h1.taosdata.com:6030) of an existing TDengine cluster node to join
|
||||
OR leave it blank to build one:
|
||||
|
||||
Enter your email address for priority support or enter empty to skip:
|
||||
Enter your email address for priority support or enter empty to skip:
|
||||
Created symlink /etc/systemd/system/multi-user.target.wants/taosd.service → /etc/systemd/system/taosd.service.
|
||||
|
||||
To configure TDengine : edit /etc/taos/taos.cfg
|
||||
To start TDengine : sudo systemctl start taosd
|
||||
To access TDengine : taos -h ubuntu-1804 to login into TDengine server
|
||||
To access TDengine : taos -h v3cluster-0002 to login into TDengine server
|
||||
|
||||
|
||||
TDengine is installed successfully!
|
||||
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
||||
<TabItem label="RPM 安装" value="rpminst">
|
||||
|
||||
1、从官网下载获得 rpm 安装包,例如 TDengine-server-2.4.0.7-Linux-x64.rpm;
|
||||
2、进入到 TDengine-server-2.4.0.7-Linux-x64.rpm 安装包所在目录,执行如下的安装命令:
|
||||
1、从官网下载获得 rpm 安装包,例如 TDengine-server-3.0.0.10002-Linux-x64.rpm;
|
||||
2、进入到 TDengine-server-3.0.0.10002-Linux-x64.rpm 安装包所在目录,执行如下的安装命令:
|
||||
|
||||
```
|
||||
$ sudo rpm -ivh TDengine-server-2.4.0.7-Linux-x64.rpm
|
||||
$ sudo rpm -ivh TDengine-server-3.0.0.10002-Linux-x64.rpm
|
||||
Preparing... ################################# [100%]
|
||||
Stop taosd service success!
|
||||
Updating / installing...
|
||||
1:tdengine-2.4.0.7-3 ################################# [100%]
|
||||
1:tdengine-3.0.0.10002-3 ################################# [100%]
|
||||
Start to install TDengine...
|
||||
|
||||
System hostname is: centos7
|
||||
System hostname is: chenhaoran01
|
||||
|
||||
Enter FQDN:port (like h1.taosdata.com:6030) of an existing TDengine cluster node to join
|
||||
OR leave it blank to build one:
|
||||
|
||||
Enter your email address for priority support or enter empty to skip:
|
||||
|
||||
Enter your email address for priority support or enter empty to skip:
|
||||
Created symlink from /etc/systemd/system/multi-user.target.wants/taosd.service to /etc/systemd/system/taosd.service.
|
||||
|
||||
To configure TDengine : edit /etc/taos/taos.cfg
|
||||
To start TDengine : sudo systemctl start taosd
|
||||
To access TDengine : taos -h centos7 to login into TDengine server
|
||||
To access TDengine : taos -h chenhaoran01 to login into TDengine server
|
||||
|
||||
|
||||
TDengine is installed successfully!
|
||||
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
||||
<TabItem label="tar.gz 安装" value="tarinst">
|
||||
|
||||
1、从官网下载获得 tar.gz 安装包,例如 TDengine-server-2.4.0.7-Linux-x64.tar.gz;
|
||||
2、进入到 TDengine-server-2.4.0.7-Linux-x64.tar.gz 安装包所在目录,先解压文件后,进入子目录,执行其中的 install.sh 安装脚本:
|
||||
1、从官网下载获得 tar.gz 安装包,例如 TDengine-server-3.0.0.10002-Linux-x64.tar.gz;
|
||||
2、进入到 TDengine-server-3.0.0.10002-Linux-x64.tar.gz 安装包所在目录,先解压文件后,进入子目录,执行其中的 install.sh 安装脚本:
|
||||
|
||||
```
|
||||
$ tar xvzf TDengine-enterprise-server-2.4.0.7-Linux-x64.tar.gz
|
||||
TDengine-enterprise-server-2.4.0.7/
|
||||
TDengine-enterprise-server-2.4.0.7/driver/
|
||||
TDengine-enterprise-server-2.4.0.7/driver/vercomp.txt
|
||||
TDengine-enterprise-server-2.4.0.7/driver/libtaos.so.2.4.0.7
|
||||
TDengine-enterprise-server-2.4.0.7/install.sh
|
||||
TDengine-enterprise-server-2.4.0.7/examples/
|
||||
$ tar -zxvf TDengine-server-3.0.0.10002-Linux-x64.tar.gz
|
||||
TDengine-server-3.0.0.10002/
|
||||
TDengine-server-3.0.0.10002/driver/
|
||||
TDengine-server-3.0.0.10002/driver/libtaos.so.3.0.0.10002
|
||||
TDengine-server-3.0.0.10002/driver/vercomp.txt
|
||||
TDengine-server-3.0.0.10002/release_note
|
||||
TDengine-server-3.0.0.10002/taos.tar.gz
|
||||
TDengine-server-3.0.0.10002/install.sh
|
||||
...
|
||||
|
||||
$ ll
|
||||
total 43816
|
||||
drwxrwxr-x 3 ubuntu ubuntu 4096 Feb 22 09:31 ./
|
||||
drwxr-xr-x 20 ubuntu ubuntu 4096 Feb 22 09:30 ../
|
||||
drwxrwxr-x 4 ubuntu ubuntu 4096 Feb 22 09:30 TDengine-enterprise-server-2.4.0.7/
|
||||
-rw-rw-r-- 1 ubuntu ubuntu 44852544 Feb 22 09:31 TDengine-enterprise-server-2.4.0.7-Linux-x64.tar.gz
|
||||
total 56832
|
||||
drwxr-xr-x 3 root root 4096 Aug 8 10:29 ./
|
||||
drwxrwxrwx 6 root root 4096 Aug 5 16:45 ../
|
||||
drwxr-xr-x 4 root root 4096 Aug 4 18:03 TDengine-server-3.0.0.10002/
|
||||
-rwxr-xr-x 1 root root 58183066 Aug 8 10:28 TDengine-server-3.0.0.10002-Linux-x64.tar.gz*
|
||||
|
||||
$ cd TDengine-enterprise-server-2.4.0.7/
|
||||
$ cd TDengine-server-3.0.0.10002/
|
||||
|
||||
$ ll
|
||||
total 40784
|
||||
drwxrwxr-x 4 ubuntu ubuntu 4096 Feb 22 09:30 ./
|
||||
drwxrwxr-x 3 ubuntu ubuntu 4096 Feb 22 09:31 ../
|
||||
drwxrwxr-x 2 ubuntu ubuntu 4096 Feb 22 09:30 driver/
|
||||
drwxrwxr-x 10 ubuntu ubuntu 4096 Feb 22 09:30 examples/
|
||||
-rwxrwxr-x 1 ubuntu ubuntu 33294 Feb 22 09:30 install.sh*
|
||||
-rw-rw-r-- 1 ubuntu ubuntu 41704288 Feb 22 09:30 taos.tar.gz
|
||||
total 51612
|
||||
drwxr-xr-x 4 root root 4096 Aug 4 18:03 ./
|
||||
drwxr-xr-x 3 root root 4096 Aug 8 10:29 ../
|
||||
drwxr-xr-x 2 root root 4096 Aug 4 18:03 driver/
|
||||
drwxr-xr-x 11 root root 4096 Aug 4 18:03 examples/
|
||||
-rwxr-xr-x 1 root root 30980 Aug 4 18:03 install.sh*
|
||||
-rw-r--r-- 1 root root 6724 Aug 4 18:03 release_note
|
||||
-rw-r--r-- 1 root root 52793079 Aug 4 18:03 taos.tar.gz
|
||||
|
||||
$ sudo ./install.sh
|
||||
|
||||
Start to update TDengine...
|
||||
Start to install TDengine...
|
||||
Created symlink /etc/systemd/system/multi-user.target.wants/taosd.service → /etc/systemd/system/taosd.service.
|
||||
Nginx for TDengine is updated successfully!
|
||||
|
||||
System hostname is: v3cluster-0002
|
||||
|
||||
Enter FQDN:port (like h1.taosdata.com:6030) of an existing TDengine cluster node to join
|
||||
OR leave it blank to build one:
|
||||
|
||||
Enter your email address for priority support or enter empty to skip:
|
||||
|
||||
To configure TDengine : edit /etc/taos/taos.cfg
|
||||
To configure Taos Adapter (if has) : edit /etc/taos/taosadapter.toml
|
||||
To configure taosadapter (if has) : edit /etc/taos/taosadapter.toml
|
||||
To start TDengine : sudo systemctl start taosd
|
||||
To access TDengine : use taos -h ubuntu-1804 in shell OR from http://127.0.0.1:6060
|
||||
To access TDengine : taos -h v3cluster-0002 to login into TDengine server
|
||||
|
||||
TDengine is updated successfully!
|
||||
Install taoskeeper as a standalone service
|
||||
taoskeeper is installed, enable it by `systemctl enable taoskeeper`
|
||||
TDengine is installed successfully!
|
||||
```
|
||||
|
||||
:::info
|
||||
|
|
|
@ -56,8 +56,8 @@ lrwxrwxrwx 1 root root 13 Feb 22 09:34 log -> /var/log/taos/
|
|||
|
||||
```
|
||||
$ sudo dpkg -r tdengine
|
||||
(Reading database ... 137504 files and directories currently installed.)
|
||||
Removing tdengine (2.4.0.7) ...
|
||||
(Reading database ... 120119 files and directories currently installed.)
|
||||
Removing tdengine (3.0.0.10002) ...
|
||||
TDengine is removed successfully!
|
||||
|
||||
```
|
||||
|
@ -81,10 +81,7 @@ TDengine is removed successfully!
|
|||
|
||||
```
|
||||
$ rmtaos
|
||||
Nginx for TDengine is running, stopping it...
|
||||
TDengine is removed successfully!
|
||||
|
||||
taosKeeper is removed successfully!
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
|
|
@ -1,241 +1,200 @@
|
|||
---
|
||||
title: 常见问题及反馈
|
||||
---
|
||||
|
||||
## 问题反馈
|
||||
|
||||
如果 FAQ 中的信息不能够帮到您,需要 TDengine 技术团队的技术支持与协助,请将以下两个目录中内容打包:
|
||||
|
||||
1. /var/log/taos (如果没有修改过默认路径)
|
||||
2. /etc/taos
|
||||
|
||||
附上必要的问题描述,包括使用的 TDengine 版本信息、平台环境信息、发生该问题的执行操作、出现问题的表征及大概的时间,在 [GitHub](https://github.com/taosdata/TDengine) 提交 issue。
|
||||
|
||||
为了保证有足够的 debug 信息,如果问题能够重复,请修改/etc/taos/taos.cfg 文件,最后面添加一行“debugFlag 135"(不带引号本身),然后重启 taosd, 重复问题,然后再递交。也可以通过如下 SQL 语句,临时设置 taosd 的日志级别。
|
||||
|
||||
```
|
||||
alter dnode <dnode_id> debugFlag 135;
|
||||
```
|
||||
|
||||
但系统正常运行时,请一定将 debugFlag 设置为 131,否则会产生大量的日志信息,降低系统效率。
|
||||
|
||||
## 常见问题列表
|
||||
|
||||
### 1. TDengine2.0 之前的版本升级到 2.0 及以上的版本应该注意什么?☆☆☆
|
||||
|
||||
2.0 版在之前版本的基础上,进行了完全的重构,配置文件和数据文件是不兼容的。在升级之前务必进行如下操作:
|
||||
|
||||
1. 删除配置文件,执行 `sudo rm -rf /etc/taos/taos.cfg`
|
||||
2. 删除日志文件,执行 `sudo rm -rf /var/log/taos/`
|
||||
3. 确保数据已经不再需要的前提下,删除数据文件,执行 `sudo rm -rf /var/lib/taos/`
|
||||
4. 安装最新稳定版本的 TDengine
|
||||
5. 如果需要迁移数据或者数据文件损坏,请联系涛思数据官方技术支持团队,进行协助解决
|
||||
|
||||
### 2. Windows 平台下 JDBCDriver 找不到动态链接库,怎么办?
|
||||
|
||||
请看为此问题撰写的 [技术博客](https://www.taosdata.com/blog/2019/12/03/950.html)。
|
||||
|
||||
### 3. 创建数据表时提示 more dnodes are needed
|
||||
|
||||
请看为此问题撰写的 [技术博客](https://www.taosdata.com/blog/2019/12/03/965.html)。
|
||||
|
||||
### 4. 如何让 TDengine crash 时生成 core 文件?
|
||||
|
||||
请看为此问题撰写的 [技术博客](https://www.taosdata.com/blog/2019/12/06/974.html)。
|
||||
|
||||
### 5. 遇到错误“Unable to establish connection” 怎么办?
|
||||
|
||||
客户端遇到连接故障,请按照下面的步骤进行检查:
|
||||
|
||||
1. 检查网络环境
|
||||
|
||||
- 云服务器:检查云服务器的安全组是否打开 TCP/UDP 端口 6030-6042 的访问权限
|
||||
- 本地虚拟机:检查网络能否 ping 通,尽量避免使用`localhost` 作为 hostname
|
||||
- 公司服务器:如果为 NAT 网络环境,请务必检查服务器能否将消息返回值客户端
|
||||
|
||||
2. 确保客户端与服务端版本号是完全一致的,开源社区版和企业版也不能混用
|
||||
|
||||
3. 在服务器,执行 `systemctl status taosd` 检查*taosd*运行状态。如果没有运行,启动*taosd*
|
||||
|
||||
4. 确认客户端连接时指定了正确的服务器 FQDN (Fully Qualified Domain Name —— 可在服务器上执行 Linux 命令 hostname -f 获得),FQDN 配置参考:[一篇文章说清楚 TDengine 的 FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html)。
|
||||
|
||||
5. ping 服务器 FQDN,如果没有反应,请检查你的网络,DNS 设置,或客户端所在计算机的系统 hosts 文件。如果部署的是 TDengine 集群,客户端需要能 ping 通所有集群节点的 FQDN。
|
||||
|
||||
6. 检查防火墙设置(Ubuntu 使用 ufw status,CentOS 使用 firewall-cmd --list-port),确保集群中所有主机在端口 6030-6042 上的 TCP/UDP 协议能够互通。
|
||||
|
||||
7. 对于 Linux 上的 JDBC(ODBC, Python, Go 等接口类似)连接, 确保*libtaos.so*在目录*/usr/local/taos/driver*里, 并且*/usr/local/taos/driver*在系统库函数搜索路径*LD_LIBRARY_PATH*里
|
||||
|
||||
8. 对于 Windows 上的 JDBC, ODBC, Python, Go 等连接,确保*C:\TDengine\driver\taos.dll*在你的系统库函数搜索目录里 (建议*taos.dll*放在目录 _C:\Windows\System32_)
|
||||
|
||||
9. 如果仍不能排除连接故障
|
||||
|
||||
- Linux 系统请使用命令行工具 nc 来分别判断指定端口的 TCP 和 UDP 连接是否通畅
|
||||
检查 UDP 端口连接是否工作:`nc -vuz {hostIP} {port} `
|
||||
检查服务器侧 TCP 端口连接是否工作:`nc -l {port}`
|
||||
检查客户端侧 TCP 端口连接是否工作:`nc {hostIP} {port}`
|
||||
|
||||
- Windows 系统请使用 PowerShell 命令 Test-NetConnection -ComputerName {fqdn} -Port {port} 检测服务段端口是否访问
|
||||
|
||||
10. 也可以使用 taos 程序内嵌的网络连通检测功能,来验证服务器和客户端之间指定的端口连接是否通畅(包括 TCP 和 UDP):[TDengine 内嵌网络检测工具使用指南](https://www.taosdata.com/blog/2020/09/08/1816.html)。
|
||||
|
||||
### 6. 遇到错误 “Unexpected generic error in RPC”或者“Unable to resolve FQDN” 怎么办?
|
||||
|
||||
产生这个错误,是由于客户端或数据节点无法解析 FQDN(Fully Qualified Domain Name)导致。对于 TAOS Shell 或客户端应用,请做如下检查:
|
||||
|
||||
1. 请检查连接的服务器的 FQDN 是否正确,FQDN 配置参考:[一篇文章说清楚 TDengine 的 FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html)
|
||||
2. 如果网络配置有 DNS server,请检查是否正常工作
|
||||
3. 如果网络没有配置 DNS server,请检查客户端所在机器的 hosts 文件,查看该 FQDN 是否配置,并是否有正确的 IP 地址
|
||||
4. 如果网络配置 OK,从客户端所在机器,你需要能 Ping 该连接的 FQDN,否则客户端是无法连接服务器的
|
||||
5. 如果服务器曾经使用过 TDengine,且更改过 hostname,建议检查 data 目录的 dnodeEps.json 是否符合当前配置的 EP,路径默认为/var/lib/taos/dnode。正常情况下,建议更换新的数据目录或者备份后删除以前的数据目录,这样可以避免该问题。
|
||||
6. 检查/etc/hosts 和/etc/hostname 是否是预配置的 FQDN
|
||||
|
||||
### 7. 虽然语法正确,为什么我还是得到 "Invalid SQL" 错误?
|
||||
|
||||
如果你确认语法正确,2.0 之前版本,请检查 SQL 语句长度是否超过 64K。如果超过,也会返回这个错误。
|
||||
|
||||
### 8. 是否支持 validation queries?
|
||||
|
||||
TDengine 还没有一组专用的 validation queries。然而建议你使用系统监测的数据库”log"来做。
|
||||
|
||||
<a class="anchor" id="update"></a>
|
||||
|
||||
### 9. 我可以删除或更新一条记录吗?
|
||||
|
||||
TDengine 目前尚不支持删除功能,未来根据用户需求可能会支持。
|
||||
|
||||
从 2.0.8.0 开始,TDengine 支持更新已经写入数据的功能。使用更新功能需要在创建数据库时使用 UPDATE 1 参数,之后可以使用 INSERT INTO 命令更新已经写入的相同时间戳数据。UPDATE 参数不支持 ALTER DATABASE 命令修改。没有使用 UPDATE 1 参数创建的数据库,写入相同时间戳的数据不会修改之前的数据,也不会报错。
|
||||
|
||||
另需注意,在 UPDATE 设置为 0 时,后发送的相同时间戳的数据会被直接丢弃,但并不会报错,而且仍然会被计入 affected rows (所以不能利用 INSERT 指令的返回信息进行时间戳查重)。这样设计的主要原因是,TDengine 把写入的数据看做一个数据流,无论时间戳是否出现冲突,TDengine 都认为产生数据的原始设备真实地产生了这样的数据。UPDATE 参数只是控制这样的流数据在进行持久化时要怎样处理——UPDATE 为 0 时,表示先写入的数据覆盖后写入的数据;而 UPDATE 为 1 时,表示后写入的数据覆盖先写入的数据。这种覆盖关系如何选择,取决于对数据的后续使用和统计中,希望以先还是后生成的数据为准。
|
||||
|
||||
此外,从 2.1.7.0 版本开始,支持将 UPDATE 参数设为 2,表示“支持部分列更新”。也即,当 UPDATE 设为 1 时,如果更新一个数据行,其中某些列没有提供取值,那么这些列会被设为 NULL;而当 UPDATE 设为 2 时,如果更新一个数据行,其中某些列没有提供取值,那么这些列会保持原有数据行中的对应值。
|
||||
|
||||
### 10. 我怎么创建超过 1024 列的表?
|
||||
|
||||
使用 2.0 及其以上版本,默认支持 1024 列;2.0 之前的版本,TDengine 最大允许创建 250 列的表。但是如果确实超过限值,建议按照数据特性,逻辑地将这个宽表分解成几个小表。(从 2.1.7.0 版本开始,表的最大列数增加到了 4096 列。)
|
||||
|
||||
### 11. 最有效的写入数据的方法是什么?
|
||||
|
||||
批量插入。每条写入语句可以一张表同时插入多条记录,也可以同时插入多张表的多条记录。
|
||||
|
||||
### 12. Windows 系统下插入的 nchar 类数据中的汉字被解析成了乱码如何解决?
|
||||
|
||||
Windows 下插入 nchar 类的数据中如果有中文,请先确认系统的地区设置成了中国(在 Control Panel 里可以设置),这时 cmd 中的`taos`客户端应该已经可以正常工作了;如果是在 IDE 里开发 Java 应用,比如 Eclipse, IntelliJ,请确认 IDE 里的文件编码为 GBK(这是 Java 默认的编码类型),然后在生成 Connection 时,初始化客户端的配置,具体语句如下:
|
||||
|
||||
```JAVA
|
||||
Class.forName("com.taosdata.jdbc.TSDBDriver");
|
||||
Properties properties = new Properties();
|
||||
properties.setProperty(TSDBDriver.LOCALE_KEY, "UTF-8");
|
||||
Connection = DriverManager.getConnection(url, properties);
|
||||
```
|
||||
|
||||
### 13. Windows 系统下客户端无法正常显示中文字符?
|
||||
|
||||
Windows 系统中一般是采用 GBK/GB18030 存储中文字符,而 TDengine 的默认字符集为 UTF-8 ,在 Windows 系统中使用 TDengine 客户端时,客户端驱动会将字符统一转换为 UTF-8 编码后发送到服务端存储,因此在应用开发过程中,调用接口时正确配置当前的中文字符集即可。
|
||||
|
||||
【 v2.2.1.5以后版本 】在 Windows 10 环境下运行 TDengine 客户端命令行工具 taos 时,若无法正常输入、显示中文,可以对客户端 taos.cfg 做如下配置:
|
||||
|
||||
```
|
||||
locale C
|
||||
charset UTF-8
|
||||
```
|
||||
|
||||
### 14. JDBC 报错: the executed SQL is not a DML or a DDL?
|
||||
|
||||
请更新至最新的 JDBC 驱动,参考 [Java 连接器](/reference/connector/java)
|
||||
|
||||
### 15. taos connect failed, reason: invalid timestamp
|
||||
|
||||
常见原因是服务器和客户端时间没有校准,可以通过和时间服务器同步的方式(Linux 下使用 ntpdate 命令,Windows 在系统时间设置中选择自动同步)校准。
|
||||
|
||||
### 16. 表名显示不全
|
||||
|
||||
由于 taos shell 在终端中显示宽度有限,有可能比较长的表名显示不全,如果按照显示的不全的表名进行相关操作会发生 Table does not exist 错误。解决方法可以是通过修改 taos.cfg 文件中的设置项 maxBinaryDisplayWidth, 或者直接输入命令 set max_binary_display_width 100。或者在命令结尾使用 \G 参数来调整结果的显示方式。
|
||||
|
||||
### 17. 如何进行数据迁移?
|
||||
|
||||
TDengine 是根据 hostname 唯一标志一台机器的,在数据文件从机器 A 移动机器 B 时,注意如下两件事:
|
||||
|
||||
- 2.0.0.0 至 2.0.6.x 的版本,重新配置机器 B 的 hostname 为机器 A 的 hostname。
|
||||
- 2.0.7.0 及以后的版本,到/var/lib/taos/dnode 下,修复 dnodeEps.json 的 dnodeId 对应的 FQDN,重启。确保机器内所有机器的此文件是完全相同的。
|
||||
- 1.x 和 2.x 版本的存储结构不兼容,需要使用迁移工具或者自己开发应用导出导入数据。
|
||||
|
||||
### 18. 如何在命令行程序 taos 中临时调整日志级别
|
||||
|
||||
为了调试方便,从 2.0.16 版本开始,命令行程序 taos 新增了与日志记录相关的两条指令:
|
||||
|
||||
```sql
|
||||
ALTER LOCAL flag_name flag_value;
|
||||
```
|
||||
|
||||
其含义是,在当前的命令行程序下,修改一个特定模块的日志记录级别(只对当前命令行程序有效,如果 taos 命令行程序重启,则需要重新设置):
|
||||
|
||||
- flag_name 的取值可以是:debugFlag,cDebugFlag,tmrDebugFlag,uDebugFlag,rpcDebugFlag
|
||||
- flag_value 的取值可以是:131(输出错误和警告日志),135( 输出错误、警告和调试日志),143( 输出错误、警告、调试和跟踪日志)
|
||||
|
||||
```sql
|
||||
ALTER LOCAL RESETLOG;
|
||||
```
|
||||
|
||||
其含义是,清空本机所有由客户端生成的日志文件。
|
||||
|
||||
<a class="anchor" id="timezone"></a>
|
||||
|
||||
### 19. go 语言编写组件编译失败怎样解决?
|
||||
|
||||
TDengine 2.3.0.0 及之后的版本包含一个使用 go 语言开发的 taosAdapter 独立组件,需要单独运行,取代之前 taosd 内置的 httpd ,提供包含原 httpd 功能以及支持多种其他软件(Prometheus、Telegraf、collectd、StatsD 等)的数据接入功能。
|
||||
使用最新 develop 分支代码编译需要先 `git submodule update --init --recursive` 下载 taosAdapter 仓库代码后再编译。
|
||||
|
||||
目前编译方式默认自动编译 taosAdapter。go 语言版本要求 1.14 以上,如果发生 go 编译错误,往往是国内访问 go mod 问题,可以通过设置 go 环境变量来解决:
|
||||
|
||||
```sh
|
||||
go env -w GO111MODULE=on
|
||||
go env -w GOPROXY=https://goproxy.cn,direct
|
||||
```
|
||||
|
||||
如果希望继续使用之前的内置 httpd,可以关闭 taosAdapter 编译,使用
|
||||
`cmake .. -DBUILD_HTTP=true` 使用原来内置的 httpd。
|
||||
|
||||
### 20. 如何查询数据占用的存储空间大小?
|
||||
|
||||
默认情况下,TDengine 的数据文件存储在 /var/lib/taos ,日志文件存储在 /var/log/taos 。
|
||||
|
||||
若想查看所有数据文件占用的具体大小,可以执行 Shell 指令:`du -sh /var/lib/taos/vnode --exclude='wal'` 来查看。此处排除了 WAL 目录,因为在持续写入的情况下,这里大小几乎是固定的,并且每当正常关闭 TDengine 让数据落盘后,WAL 目录都会清空。
|
||||
|
||||
若想查看单个数据库占用的大小,可在命令行程序 taos 内指定要查看的数据库后执行 `show vgroups;` ,通过得到的 VGroup id 去 /var/lib/taos/vnode 下查看包含的文件夹大小。
|
||||
|
||||
若仅仅想查看指定(超级)表的数据块分布及大小,可查看[_block_dist 函数](https://docs.taosdata.com/taos-sql/select/#_block_dist-%E5%87%BD%E6%95%B0)
|
||||
|
||||
### 21. 客户端连接串如何保证高可用?
|
||||
|
||||
请看为此问题撰写的 [技术博客](https://www.taosdata.com/blog/2021/04/16/2287.html)
|
||||
|
||||
### 22. 时间戳的时区信息是怎样处理的?
|
||||
|
||||
TDengine 中时间戳的时区总是由客户端进行处理,而与服务端无关。具体来说,客户端会对 SQL 语句中的时间戳进行时区转换,转为 UTC 时区(即 Unix 时间戳——Unix Timestamp)再交由服务端进行写入和查询;在读取数据时,服务端也是采用 UTC 时区提供原始数据,客户端收到后再根据本地设置,把时间戳转换为本地系统所要求的时区进行显示。
|
||||
|
||||
客户端在处理时间戳字符串时,会采取如下逻辑:
|
||||
|
||||
1. 在未做特殊设置的情况下,客户端默认使用所在操作系统的时区设置。
|
||||
2. 如果在 taos.cfg 中设置了 timezone 参数,则客户端会以这个配置文件中的设置为准。
|
||||
3. 如果在 C/C++/Java/Python 等各种编程语言的 Connector Driver 中,在建立数据库连接时显式指定了 timezone,那么会以这个指定的时区设置为准。例如 Java Connector 的 JDBC URL 中就有 timezone 参数。
|
||||
4. 在书写 SQL 语句时,也可以直接使用 Unix 时间戳(例如 `1554984068000`)或带有时区的时间戳字符串,也即以 RFC 3339 格式(例如 `2013-04-12T15:52:01.123+08:00`)或 ISO-8601 格式(例如 `2013-04-12T15:52:01.123+0800`)来书写时间戳,此时这些时间戳的取值将不再受其他时区设置的影响。
|
||||
|
||||
### 23. TDengine 2.0 都会用到哪些网络端口?
|
||||
|
||||
使用到的网络端口请看文档:[serverport](/reference/config/#serverport)
|
||||
|
||||
需要注意,文档上列举的端口号都是以默认端口 6030 为前提进行说明,如果修改了配置文件中的设置,那么列举的端口都会随之出现变化,管理员可以参考上述的信息调整防火墙设置。
|
||||
|
||||
### 24. 为什么 RESTful 接口无响应、Grafana 无法添加 TDengine 为数据源、TDengineGUI 选了 6041 端口还是无法连接成功??
|
||||
|
||||
taosAdapter 从 TDengine 2.4.0.0 版本开始成为 TDengine 服务端软件的组成部分,是 TDengine 集群和应用程序之间的桥梁和适配器。在此之前 RESTful 接口等功能是由 taosd 内置的 HTTP 服务提供的,而如今要实现上述功能需要执行:```systemctl start taosadapter``` 命令来启动 taosAdapter 服务。
|
||||
|
||||
需要说明的是,taosAdapter 的日志路径 path 需要单独配置,默认路径是 /var/log/taos ;日志等级 logLevel 有 8 个等级,默认等级是 info ,配置成 panic 可关闭日志输出。请注意操作系统 / 目录的空间大小,可通过命令行参数、环境变量或配置文件来修改配置,默认配置文件是 /etc/taos/taosadapter.toml 。
|
||||
|
||||
有关 taosAdapter 组件的详细介绍请看文档:[taosAdapter](https://docs.taosdata.com/reference/taosadapter/)
|
||||
|
||||
### 25. 发生了 OOM 怎么办?
|
||||
|
||||
OOM 是操作系统的保护机制,当操作系统内存(包括 SWAP )不足时,会杀掉某些进程,从而保证操作系统的稳定运行。通常内存不足主要是如下两个原因导致,一是剩余内存小于 vm.min_free_kbytes ;二是程序请求的内存大于剩余内存。还有一种情况是内存充足但程序占用了特殊的内存地址,也会触发 OOM 。
|
||||
|
||||
TDengine 会预先为每个 VNode 分配好内存,每个 Database 的 VNode 个数受 maxVgroupsPerDb 影响,每个 VNode 占用的内存大小受 Blocks 和 Cache 影响。要防止 OOM,需要在项目建设之初合理规划内存,并合理设置 SWAP ,除此之外查询过量的数据也有可能导致内存暴涨,这取决于具体的查询语句。TDengine 企业版对内存管理做了优化,采用了新的内存分配器,对稳定性有更高要求的用户可以考虑选择企业版。
|
||||
---
|
||||
title: 常见问题及反馈
|
||||
---
|
||||
|
||||
## 问题反馈
|
||||
|
||||
如果 FAQ 中的信息不能够帮到您,需要 TDengine 技术团队的技术支持与协助,请将以下两个目录中内容打包:
|
||||
|
||||
1. /var/log/taos (如果没有修改过默认路径)
|
||||
2. /etc/taos(如果没有指定其他配置文件路径)
|
||||
|
||||
附上必要的问题描述,包括使用的 TDengine 版本信息、平台环境信息、发生该问题的执行操作、出现问题的表征及大概的时间,在 [GitHub](https://github.com/taosdata/TDengine) 提交 issue。
|
||||
|
||||
为了保证有足够的 debug 信息,如果问题能够重复,请修改/etc/taos/taos.cfg 文件,最后面添加一行“debugFlag 135"(不带引号本身),然后重启 taosd, 重复问题,然后再递交。也可以通过如下 SQL 语句,临时设置 taosd 的日志级别。
|
||||
|
||||
```
|
||||
alter dnode <dnode_id> 'debugFlag' '135';
|
||||
```
|
||||
|
||||
其中 dnode_id 请从 show dnodes; 命令输出中获取。
|
||||
|
||||
但系统正常运行时,请一定将 debugFlag 设置为 131,否则会产生大量的日志信息,降低系统效率。
|
||||
|
||||
## 常见问题列表
|
||||
|
||||
### 1. TDengine3.0 之前的版本升级到 3.0 及以上的版本应该注意什么?
|
||||
|
||||
3.0 版在之前版本的基础上,进行了完全的重构,配置文件和数据文件是不兼容的。在升级之前务必进行如下操作:
|
||||
|
||||
1. 删除配置文件,执行 `sudo rm -rf /etc/taos/taos.cfg`
|
||||
2. 删除日志文件,执行 `sudo rm -rf /var/log/taos/`
|
||||
3. 确保数据已经不再需要的前提下,删除数据文件,执行 `sudo rm -rf /var/lib/taos/`
|
||||
4. 安装最新3.0稳定版本的 TDengine
|
||||
5. 如果需要迁移数据或者数据文件损坏,请联系涛思数据官方技术支持团队,进行协助解决
|
||||
|
||||
### 2. Windows 平台下 JDBCDriver 找不到动态链接库,怎么办?
|
||||
|
||||
请看为此问题撰写的 [技术博客](https://www.taosdata.com/blog/2019/12/03/950.html)。
|
||||
|
||||
### 3. 如何让 TDengine crash 时生成 core 文件?
|
||||
|
||||
请看为此问题撰写的 [技术博客](https://www.taosdata.com/blog/2019/12/06/974.html)。
|
||||
|
||||
### 4. 遇到错误“Unable to establish connection” 怎么办?
|
||||
|
||||
客户端遇到连接故障,请按照下面的步骤进行检查:
|
||||
|
||||
1. 检查网络环境
|
||||
|
||||
- 云服务器:检查云服务器的安全组是否打开 TCP/UDP 端口 6030/6041 的访问权限
|
||||
- 本地虚拟机:检查网络能否 ping 通,尽量避免使用`localhost` 作为 hostname
|
||||
- 公司服务器:如果为 NAT 网络环境,请务必检查服务器能否将消息返回值客户端
|
||||
|
||||
2. 确保客户端与服务端版本号是完全一致的,开源社区版和企业版也不能混用
|
||||
|
||||
3. 在服务器,执行 `systemctl status taosd` 检查*taosd*运行状态。如果没有运行,启动*taosd*
|
||||
|
||||
4. 确认客户端连接时指定了正确的服务器 FQDN (Fully Qualified Domain Name —— 可在服务器上执行 Linux 命令 hostname -f 获得),FQDN 配置参考:[一篇文章说清楚 TDengine 的 FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html)。
|
||||
|
||||
5. ping 服务器 FQDN,如果没有反应,请检查你的网络,DNS 设置,或客户端所在计算机的系统 hosts 文件。如果部署的是 TDengine 集群,客户端需要能 ping 通所有集群节点的 FQDN。
|
||||
|
||||
6. 检查防火墙设置(Ubuntu 使用 ufw status,CentOS 使用 firewall-cmd --list-port),确保集群中所有主机在端口 6030/6041 上的 TCP/UDP 协议能够互通。
|
||||
|
||||
7. 对于 Linux 上的 JDBC(ODBC, Python, Go 等接口类似)连接, 确保*libtaos.so*在目录*/usr/local/taos/driver*里, 并且*/usr/local/taos/driver*在系统库函数搜索路径*LD_LIBRARY_PATH*里
|
||||
|
||||
8. 对于 Windows 上的 JDBC, ODBC, Python, Go 等连接,确保*C:\TDengine\driver\taos.dll*在你的系统库函数搜索目录里 (建议*taos.dll*放在目录 _C:\Windows\System32_)
|
||||
|
||||
9. 如果仍不能排除连接故障
|
||||
|
||||
- Linux 系统请使用命令行工具 nc 来分别判断指定端口的 TCP 和 UDP 连接是否通畅
|
||||
检查 UDP 端口连接是否工作:`nc -vuz {hostIP} {port} `
|
||||
检查服务器侧 TCP 端口连接是否工作:`nc -l {port}`
|
||||
检查客户端侧 TCP 端口连接是否工作:`nc {hostIP} {port}`
|
||||
|
||||
- Windows 系统请使用 PowerShell 命令 Test-NetConnection -ComputerName {fqdn} -Port {port} 检测服务段端口是否访问
|
||||
|
||||
10. 也可以使用 taos 程序内嵌的网络连通检测功能,来验证服务器和客户端之间指定的端口连接是否通畅:[诊断及其他](https://docs.taosdata.com/3.0-preview/operation/diagnose/)。
|
||||
|
||||
### 5. 遇到错误 Unable to resolve FQDN” 怎么办?
|
||||
|
||||
产生这个错误,是由于客户端或数据节点无法解析 FQDN(Fully Qualified Domain Name)导致。对于 TAOS Shell 或客户端应用,请做如下检查:
|
||||
|
||||
1. 请检查连接的服务器的 FQDN 是否正确,FQDN 配置参考:[一篇文章说清楚 TDengine 的 FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html)
|
||||
2. 如果网络配置有 DNS server,请检查是否正常工作
|
||||
3. 如果网络没有配置 DNS server,请检查客户端所在机器的 hosts 文件,查看该 FQDN 是否配置,并是否有正确的 IP 地址
|
||||
4. 如果网络配置 OK,从客户端所在机器,你需要能 Ping 该连接的 FQDN,否则客户端是无法连接服务器的
|
||||
5. 如果服务器曾经使用过 TDengine,且更改过 hostname,建议检查 data 目录的 dnode.json 是否符合当前配置的 EP,路径默认为/var/lib/taos/dnode。正常情况下,建议更换新的数据目录或者备份后删除以前的数据目录,这样可以避免该问题。
|
||||
6. 检查/etc/hosts 和/etc/hostname 是否是预配置的 FQDN
|
||||
|
||||
### 6. 最有效的写入数据的方法是什么?
|
||||
|
||||
批量插入。每条写入语句可以一张表同时插入多条记录,也可以同时插入多张表的多条记录。
|
||||
|
||||
### 7. Windows 系统下插入的 nchar 类数据中的汉字被解析成了乱码如何解决?
|
||||
|
||||
Windows 下插入 nchar 类的数据中如果有中文,请先确认系统的地区设置成了中国(在 Control Panel 里可以设置),这时 cmd 中的`taos`客户端应该已经可以正常工作了;如果是在 IDE 里开发 Java 应用,比如 Eclipse, IntelliJ,请确认 IDE 里的文件编码为 GBK(这是 Java 默认的编码类型),然后在生成 Connection 时,初始化客户端的配置,具体语句如下:
|
||||
|
||||
```JAVA
|
||||
Class.forName("com.taosdata.jdbc.TSDBDriver");
|
||||
Properties properties = new Properties();
|
||||
properties.setProperty(TSDBDriver.LOCALE_KEY, "UTF-8");
|
||||
Connection = DriverManager.getConnection(url, properties);
|
||||
```
|
||||
|
||||
### 8. Windows 系统下客户端无法正常显示中文字符?
|
||||
|
||||
Windows 系统中一般是采用 GBK/GB18030 存储中文字符,而 TDengine 的默认字符集为 UTF-8 ,在 Windows 系统中使用 TDengine 客户端时,客户端驱动会将字符统一转换为 UTF-8 编码后发送到服务端存储,因此在应用开发过程中,调用接口时正确配置当前的中文字符集即可。
|
||||
|
||||
在 Windows 10 环境下运行 TDengine 客户端命令行工具 taos 时,若无法正常输入、显示中文,可以对客户端 taos.cfg 做如下配置:
|
||||
|
||||
```
|
||||
locale C
|
||||
charset UTF-8
|
||||
```
|
||||
|
||||
### 9. 表名显示不全
|
||||
|
||||
由于 taos shell 在终端中显示宽度有限,有可能比较长的表名显示不全,如果按照显示的不全的表名进行相关操作会发生 Table does not exist 错误。解决方法可以是通过修改 taos.cfg 文件中的设置项 maxBinaryDisplayWidth, 或者直接输入命令 set max_binary_display_width 100。或者在命令结尾使用 \G 参数来调整结果的显示方式。
|
||||
|
||||
### 10. 如何进行数据迁移?
|
||||
|
||||
TDengine 是根据 hostname 唯一标志一台机器的,对于3.0版本,将数据文件从机器 A 移动机器 B 时,需要重新配置机器 B 的 hostname 为机器 A 的 hostname。
|
||||
|
||||
注:3.x 和 之前的1.x、2.x 版本的存储结构不兼容,需要使用迁移工具或者自己开发应用导出导入数据。
|
||||
|
||||
### 11. 如何在命令行程序 taos 中临时调整日志级别
|
||||
|
||||
为了调试方便,命令行程序 taos 新增了与日志记录相关的指令:
|
||||
|
||||
```sql
|
||||
ALTER LOCAL local_option
|
||||
|
||||
local_option: {
|
||||
'resetLog'
|
||||
| 'rpcDebugFlag' value
|
||||
| 'tmrDebugFlag' value
|
||||
| 'cDebugFlag' value
|
||||
| 'uDebugFlag' value
|
||||
| 'debugFlag' value
|
||||
}
|
||||
```
|
||||
|
||||
其含义是,在当前的命令行程序下,清空本机所有客户端生成的日志文件(resetLog),或修改一个特定模块的日志记录级别(只对当前命令行程序有效,如果 taos 命令行程序重启,则需要重新设置):
|
||||
|
||||
- value 的取值可以是:131(输出错误和警告日志),135( 输出错误、警告和调试日志),143( 输出错误、警告、调试和跟踪日志)。
|
||||
|
||||
### 12. go 语言编写组件编译失败怎样解决?
|
||||
|
||||
TDengine 3.0版本包含一个使用 go 语言开发的 taosAdapter 独立组件,需要单独运行,提供restful接入功能以及支持多种其他软件(Prometheus、Telegraf、collectd、StatsD 等)的数据接入功能。
|
||||
使用最新 develop 分支代码编译需要先 `git submodule update --init --recursive` 下载 taosAdapter 仓库代码后再编译。
|
||||
|
||||
go 语言版本要求 1.14 以上,如果发生 go 编译错误,往往是国内访问 go mod 问题,可以通过设置 go 环境变量来解决:
|
||||
|
||||
```sh
|
||||
go env -w GO111MODULE=on
|
||||
go env -w GOPROXY=https://goproxy.cn,direct
|
||||
```
|
||||
|
||||
### 13. 如何查询数据占用的存储空间大小?
|
||||
|
||||
默认情况下,TDengine 的数据文件存储在 /var/lib/taos ,日志文件存储在 /var/log/taos 。
|
||||
|
||||
若想查看所有数据文件占用的具体大小,可以执行 Shell 指令:`du -sh /var/lib/taos/vnode --exclude='wal'` 来查看。此处排除了 WAL 目录,因为在持续写入的情况下,这里大小几乎是固定的,并且每当正常关闭 TDengine 让数据落盘后,WAL 目录都会清空。
|
||||
|
||||
若想查看单个数据库占用的大小,可在命令行程序 taos 内指定要查看的数据库后执行 `show vgroups;` ,通过得到的 VGroup id 去 /var/lib/taos/vnode 下查看包含的文件夹大小。
|
||||
|
||||
### 14. 客户端连接串如何保证高可用?
|
||||
|
||||
请看为此问题撰写的 [技术博客](https://www.taosdata.com/blog/2021/04/16/2287.html)
|
||||
|
||||
### 15. 时间戳的时区信息是怎样处理的?
|
||||
|
||||
TDengine 中时间戳的时区总是由客户端进行处理,而与服务端无关。具体来说,客户端会对 SQL 语句中的时间戳进行时区转换,转为 UTC 时区(即 Unix 时间戳——Unix Timestamp)再交由服务端进行写入和查询;在读取数据时,服务端也是采用 UTC 时区提供原始数据,客户端收到后再根据本地设置,把时间戳转换为本地系统所要求的时区进行显示。
|
||||
|
||||
客户端在处理时间戳字符串时,会采取如下逻辑:
|
||||
|
||||
1. 在未做特殊设置的情况下,客户端默认使用所在操作系统的时区设置。
|
||||
2. 如果在 taos.cfg 中设置了 timezone 参数,则客户端会以这个配置文件中的设置为准。
|
||||
3. 如果在 C/C++/Java/Python 等各种编程语言的 Connector Driver 中,在建立数据库连接时显式指定了 timezone,那么会以这个指定的时区设置为准。例如 Java Connector 的 JDBC URL 中就有 timezone 参数。
|
||||
4. 在书写 SQL 语句时,也可以直接使用 Unix 时间戳(例如 `1554984068000`)或带有时区的时间戳字符串,也即以 RFC 3339 格式(例如 `2013-04-12T15:52:01.123+08:00`)或 ISO-8601 格式(例如 `2013-04-12T15:52:01.123+0800`)来书写时间戳,此时这些时间戳的取值将不再受其他时区设置的影响。
|
||||
|
||||
### 16. TDengine 3.0 都会用到哪些网络端口?
|
||||
|
||||
使用到的网络端口请看文档:[serverport](../../reference/config/#serverport)
|
||||
|
||||
需要注意,文档上列举的端口号都是以默认端口 6030 为前提进行说明,如果修改了配置文件中的设置,那么列举的端口都会随之出现变化,管理员可以参考上述的信息调整防火墙设置。
|
||||
|
||||
### 17. 为什么 RESTful 接口无响应、Grafana 无法添加 TDengine 为数据源、TDengineGUI 选了 6041 端口还是无法连接成功?
|
||||
|
||||
taosAdapter 从 TDengine 2.4.0.0 版本开始成为 TDengine 服务端软件的组成部分,是 TDengine 集群和应用程序之间的桥梁和适配器。在此之前 RESTful 接口等功能是由 taosd 内置的 HTTP 服务提供的,而如今要实现上述功能需要执行:```systemctl start taosadapter``` 命令来启动 taosAdapter 服务。
|
||||
|
||||
需要说明的是,taosAdapter 的日志路径 path 需要单独配置,默认路径是 /var/log/taos ;日志等级 logLevel 有 8 个等级,默认等级是 info ,配置成 panic 可关闭日志输出。请注意操作系统 / 目录的空间大小,可通过命令行参数、环境变量或配置文件来修改配置,默认配置文件是 /etc/taos/taosadapter.toml 。
|
||||
|
||||
有关 taosAdapter 组件的详细介绍请看文档:[taosAdapter](../../reference/taosadapter/)
|
||||
|
||||
### 18. 发生了 OOM 怎么办?
|
||||
|
||||
OOM 是操作系统的保护机制,当操作系统内存(包括 SWAP )不足时,会杀掉某些进程,从而保证操作系统的稳定运行。通常内存不足主要是如下两个原因导致,一是剩余内存小于 vm.min_free_kbytes ;二是程序请求的内存大于剩余内存。还有一种情况是内存充足但程序占用了特殊的内存地址,也会触发 OOM 。
|
||||
|
||||
TDengine 会预先为每个 VNode 分配好内存,每个 Database 的 VNode 个数受 建库时的vgroups参数影响,每个 VNode 占用的内存大小受 buffer参数 影响。要防止 OOM,需要在项目建设之初合理规划内存,并合理设置 SWAP ,除此之外查询过量的数据也有可能导致内存暴涨,这取决于具体的查询语句。TDengine 企业版对内存管理做了优化,采用了新的内存分配器,对稳定性有更高要求的用户可以考虑选择企业版。
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
Subproject commit 7ed7a97715388fa144718764d6bf20f9bfc29a12
|
|
@ -246,7 +246,7 @@ void blockDebugShowDataBlocks(const SArray* dataBlocks, const char* flag);
|
|||
// for debug
|
||||
char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** dumpBuf);
|
||||
|
||||
int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SArray* pDataBlocks, STSchema* pTSchema, int32_t vgId,
|
||||
int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SSDataBlock* pDataBlocks, STSchema* pTSchema, int32_t vgId,
|
||||
tb_uid_t suid);
|
||||
|
||||
char* buildCtbNameByGroupId(const char* stbName, uint64_t groupId);
|
||||
|
|
|
@ -2658,7 +2658,6 @@ typedef struct {
|
|||
} SVgEpSet;
|
||||
|
||||
typedef struct {
|
||||
int64_t refId;
|
||||
int64_t suid;
|
||||
int8_t level;
|
||||
} SRSmaFetchMsg;
|
||||
|
@ -2666,7 +2665,6 @@ typedef struct {
|
|||
static FORCE_INLINE int32_t tEncodeSRSmaFetchMsg(SEncoder* pCoder, const SRSmaFetchMsg* pReq) {
|
||||
if (tStartEncode(pCoder) < 0) return -1;
|
||||
|
||||
if (tEncodeI64(pCoder, pReq->refId) < 0) return -1;
|
||||
if (tEncodeI64(pCoder, pReq->suid) < 0) return -1;
|
||||
if (tEncodeI8(pCoder, pReq->level) < 0) return -1;
|
||||
|
||||
|
@ -2677,7 +2675,6 @@ static FORCE_INLINE int32_t tEncodeSRSmaFetchMsg(SEncoder* pCoder, const SRSmaFe
|
|||
static FORCE_INLINE int32_t tDecodeSRSmaFetchMsg(SDecoder* pCoder, SRSmaFetchMsg* pReq) {
|
||||
if (tStartDecode(pCoder) < 0) return -1;
|
||||
|
||||
if (tDecodeI64(pCoder, &pReq->refId) < 0) return -1;
|
||||
if (tDecodeI64(pCoder, &pReq->suid) < 0) return -1;
|
||||
if (tDecodeI8(pCoder, &pReq->level) < 0) return -1;
|
||||
|
||||
|
|
|
@ -200,6 +200,7 @@ enum {
|
|||
TD_DEF_MSG_TYPE(TDMT_VND_CANCEL_SMA, "vnode-cancel-sma", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_DROP_SMA, "vnode-drop-sma", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_SUBMIT_RSMA, "vnode-submit-rsma", SSubmitReq, SSubmitRsp)
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_FETCH_RSMA, "vnode-fetch-rsma", SRSmaFetchMsg, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_DELETE, "delete-data", SVDeleteReq, SVDeleteRsp)
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_ALTER_CONFIG, "alter-config", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_ALTER_REPLICA, "alter-replica", NULL, NULL)
|
||||
|
|
|
@ -354,8 +354,6 @@ void operateVal(void *dst, void *s1, void *s2, int32_t optr, int32_t type);
|
|||
void *getDataMin(int32_t type);
|
||||
void *getDataMax(int32_t type);
|
||||
|
||||
#define SET_DOUBLE_NULL(v) (*(uint64_t *)(v) = TSDB_DATA_DOUBLE_NULL)
|
||||
#define SET_BIGINT_NULL(v) (*(uint64_t *)(v) = TSDB_DATA_BIGINT_NULL)
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -67,7 +67,7 @@ typedef struct SResultRowEntryInfo {
|
|||
bool initialized:1; // output buffer has been initialized
|
||||
bool complete:1; // query has completed
|
||||
uint8_t isNullRes:6; // the result is null
|
||||
uint16_t numOfRes; // num of output result in current buffer
|
||||
uint16_t numOfRes; // num of output result in current buffer. NOT NULL RESULT
|
||||
} SResultRowEntryInfo;
|
||||
|
||||
// determine the real data need to calculated the result
|
||||
|
|
|
@ -121,6 +121,7 @@ typedef struct SProjectLogicNode {
|
|||
SLogicNode node;
|
||||
SNodeList* pProjections;
|
||||
char stmtName[TSDB_TABLE_NAME_LEN];
|
||||
bool ignoreGroupId;
|
||||
} SProjectLogicNode;
|
||||
|
||||
typedef struct SIndefRowsFuncLogicNode {
|
||||
|
@ -344,6 +345,7 @@ typedef struct SProjectPhysiNode {
|
|||
SPhysiNode node;
|
||||
SNodeList* pProjections;
|
||||
bool mergeDataBlock;
|
||||
bool ignoreGroupId;
|
||||
} SProjectPhysiNode;
|
||||
|
||||
typedef struct SIndefRowsFuncPhysiNode {
|
||||
|
|
|
@ -226,11 +226,36 @@ typedef struct {
|
|||
int32_t nodeId;
|
||||
int32_t childId;
|
||||
int32_t taskId;
|
||||
int64_t checkpointVer;
|
||||
int64_t processedVer;
|
||||
SEpSet epSet;
|
||||
// int64_t checkpointVer;
|
||||
// int64_t processedVer;
|
||||
SEpSet epSet;
|
||||
} SStreamChildEpInfo;
|
||||
|
||||
typedef struct {
|
||||
int32_t nodeId;
|
||||
int32_t childId;
|
||||
int64_t stateSaveVer;
|
||||
int64_t stateProcessedVer;
|
||||
} SStreamCheckpointInfo;
|
||||
|
||||
typedef struct {
|
||||
int64_t streamId;
|
||||
int64_t checkTs;
|
||||
int32_t checkpointId; // incremental
|
||||
int32_t taskId;
|
||||
SArray* checkpointVer; // SArray<SStreamCheckpointInfo>
|
||||
} SStreamMultiVgCheckpointInfo;
|
||||
|
||||
typedef struct {
|
||||
int32_t taskId;
|
||||
int32_t checkpointId; // incremental
|
||||
} SStreamCheckpointKey;
|
||||
|
||||
typedef struct {
|
||||
int32_t taskId;
|
||||
SArray* checkpointVer;
|
||||
} SStreamRecoveringState;
|
||||
|
||||
typedef struct SStreamTask {
|
||||
int64_t streamId;
|
||||
int32_t taskId;
|
||||
|
@ -256,6 +281,8 @@ typedef struct SStreamTask {
|
|||
|
||||
// children info
|
||||
SArray* childEpInfo; // SArray<SStreamChildEpInfo*>
|
||||
int32_t nextCheckId;
|
||||
SArray* checkpointInfo; // SArray<SStreamCheckpointInfo>
|
||||
|
||||
// exec
|
||||
STaskExec exec;
|
||||
|
@ -445,6 +472,7 @@ typedef struct {
|
|||
|
||||
int32_t tDecodeStreamDispatchReq(SDecoder* pDecoder, SStreamDispatchReq* pReq);
|
||||
int32_t tDecodeStreamRetrieveReq(SDecoder* pDecoder, SStreamRetrieveReq* pReq);
|
||||
void tFreeStreamDispatchReq(SStreamDispatchReq* pReq);
|
||||
|
||||
int32_t streamSetupTrigger(SStreamTask* pTask);
|
||||
|
||||
|
@ -468,6 +496,7 @@ typedef struct SStreamMeta {
|
|||
TTB* pTaskDb;
|
||||
TTB* pStateDb;
|
||||
SHashObj* pTasks;
|
||||
SHashObj* pRecoveringState;
|
||||
void* ahandle;
|
||||
TXN txn;
|
||||
FTaskExpand* expandFunc;
|
||||
|
|
|
@ -610,6 +610,8 @@ int32_t* taosGetErrno();
|
|||
#define TSDB_CODE_RSMA_QTASKINFO_CREATE TAOS_DEF_ERROR_CODE(0, 0x3152)
|
||||
#define TSDB_CODE_RSMA_FILE_CORRUPTED TAOS_DEF_ERROR_CODE(0, 0x3153)
|
||||
#define TSDB_CODE_RSMA_REMOVE_EXISTS TAOS_DEF_ERROR_CODE(0, 0x3154)
|
||||
#define TSDB_CODE_RSMA_FETCH_MSG_MSSED_UP TAOS_DEF_ERROR_CODE(0, 0x3155)
|
||||
#define TSDB_CODE_RSMA_EMPTY_INFO TAOS_DEF_ERROR_CODE(0, 0x3156)
|
||||
|
||||
//index
|
||||
#define TSDB_CODE_INDEX_REBUILDING TAOS_DEF_ERROR_CODE(0, 0x3200)
|
||||
|
|
|
@ -126,7 +126,7 @@ void *openTransporter(const char *user, const char *auth, int32_t numOfThread) {
|
|||
rpcInit.numOfThreads = numOfThread;
|
||||
rpcInit.cfp = processMsgFromServer;
|
||||
rpcInit.rfp = clientRpcRfp;
|
||||
rpcInit.tfp = clientRpcTfp;
|
||||
// rpcInit.tfp = clientRpcTfp;
|
||||
rpcInit.sessions = 1024;
|
||||
rpcInit.connType = TAOS_CONN_CLIENT;
|
||||
rpcInit.user = (char *)user;
|
||||
|
|
|
@ -658,12 +658,17 @@ typedef struct SqlParseWrapper {
|
|||
SQuery *pQuery;
|
||||
} SqlParseWrapper;
|
||||
|
||||
static void destoryTablesReq(void *p) {
|
||||
STablesReq *pRes = (STablesReq *)p;
|
||||
taosArrayDestroy(pRes->pTables);
|
||||
}
|
||||
|
||||
static void destorySqlParseWrapper(SqlParseWrapper *pWrapper) {
|
||||
taosArrayDestroy(pWrapper->catalogReq.pDbVgroup);
|
||||
taosArrayDestroy(pWrapper->catalogReq.pDbCfg);
|
||||
taosArrayDestroy(pWrapper->catalogReq.pDbInfo);
|
||||
taosArrayDestroy(pWrapper->catalogReq.pTableMeta);
|
||||
taosArrayDestroy(pWrapper->catalogReq.pTableHash);
|
||||
taosArrayDestroyEx(pWrapper->catalogReq.pTableMeta, destoryTablesReq);
|
||||
taosArrayDestroyEx(pWrapper->catalogReq.pTableHash, destoryTablesReq);
|
||||
taosArrayDestroy(pWrapper->catalogReq.pUdf);
|
||||
taosArrayDestroy(pWrapper->catalogReq.pIndex);
|
||||
taosArrayDestroy(pWrapper->catalogReq.pUser);
|
||||
|
|
|
@ -1874,21 +1874,20 @@ char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** pDataBuf)
|
|||
* @brief TODO: Assume that the final generated result it less than 3M
|
||||
*
|
||||
* @param pReq
|
||||
* @param pDataBlocks
|
||||
* @param pDataBlock
|
||||
* @param vgId
|
||||
* @param suid // TODO: check with Liao whether suid response is reasonable
|
||||
* @param suid
|
||||
*
|
||||
* TODO: colId should be set
|
||||
*/
|
||||
int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SArray* pDataBlocks, STSchema* pTSchema, int32_t vgId,
|
||||
int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SSDataBlock* pDataBlock, STSchema* pTSchema, int32_t vgId,
|
||||
tb_uid_t suid) {
|
||||
int32_t sz = taosArrayGetSize(pDataBlocks);
|
||||
int32_t bufSize = sizeof(SSubmitReq);
|
||||
int32_t sz = 1;
|
||||
for (int32_t i = 0; i < sz; ++i) {
|
||||
SDataBlockInfo* pBlkInfo = &((SSDataBlock*)taosArrayGet(pDataBlocks, i))->info;
|
||||
const SDataBlockInfo* pBlkInfo = &pDataBlock->info;
|
||||
|
||||
int32_t numOfCols = taosArrayGetSize(pDataBlocks);
|
||||
bufSize += pBlkInfo->rows * (TD_ROW_HEAD_LEN + pBlkInfo->rowSize + BitmapLen(numOfCols));
|
||||
int32_t colNum = taosArrayGetSize(pDataBlock->pDataBlock);
|
||||
bufSize += pBlkInfo->rows * (TD_ROW_HEAD_LEN + pBlkInfo->rowSize + BitmapLen(colNum));
|
||||
bufSize += sizeof(SSubmitBlk);
|
||||
}
|
||||
|
||||
|
@ -1905,7 +1904,6 @@ int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SArray* pDataBlocks
|
|||
tdSRowInit(&rb, pTSchema->version);
|
||||
|
||||
for (int32_t i = 0; i < sz; ++i) {
|
||||
SSDataBlock* pDataBlock = taosArrayGet(pDataBlocks, i);
|
||||
int32_t colNum = taosArrayGetSize(pDataBlock->pDataBlock);
|
||||
int32_t rows = pDataBlock->info.rows;
|
||||
// int32_t rowSize = pDataBlock->info.rowSize;
|
||||
|
|
|
@ -347,6 +347,7 @@ SArray *vmGetMsgHandles() {
|
|||
if (dmSetMgmtHandle(pArray, TDMT_VND_TABLES_META, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_SCH_CANCEL_TASK, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_SCH_DROP_TASK, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_FETCH_RSMA, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_CREATE_STB, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_DROP_TTL_TABLE, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_ALTER_STB, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
|
|
|
@ -255,7 +255,8 @@ static inline void dmReleaseHandle(SRpcHandleInfo *pHandle, int8_t type) {
|
|||
static bool rpcRfp(int32_t code, tmsg_t msgType) {
|
||||
if (code == TSDB_CODE_RPC_REDIRECT || code == TSDB_CODE_RPC_NETWORK_UNAVAIL || code == TSDB_CODE_NODE_NOT_DEPLOYED ||
|
||||
code == TSDB_CODE_SYN_NOT_LEADER || code == TSDB_CODE_APP_NOT_READY || code == TSDB_CODE_RPC_BROKEN_LINK) {
|
||||
if (msgType == TDMT_SCH_QUERY || msgType == TDMT_SCH_MERGE_QUERY || msgType == TDMT_SCH_FETCH || msgType == TDMT_SCH_MERGE_FETCH) {
|
||||
if (msgType == TDMT_SCH_QUERY || msgType == TDMT_SCH_MERGE_QUERY || msgType == TDMT_SCH_FETCH ||
|
||||
msgType == TDMT_SCH_MERGE_FETCH) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
|
|
|
@ -187,6 +187,7 @@ int32_t smaAsyncPreCommit(SSma* pSma);
|
|||
int32_t smaAsyncCommit(SSma* pSma);
|
||||
int32_t smaAsyncPostCommit(SSma* pSma);
|
||||
int32_t smaDoRetention(SSma* pSma, int64_t now);
|
||||
int32_t smaProcessFetch(SSma *pSma, void* pMsg);
|
||||
|
||||
int32_t tdProcessTSmaCreate(SSma* pSma, int64_t version, const char* msg);
|
||||
int32_t tdProcessTSmaInsert(SSma* pSma, int64_t indexUid, const char* msg);
|
||||
|
|
|
@ -481,7 +481,7 @@ int64_t metaGetTbNum(SMeta *pMeta) {
|
|||
/* int64_t num = 0; */
|
||||
/* vnodeGetAllCtbNum(pMeta->pVnode, &num); */
|
||||
|
||||
return pMeta->pVnode->config.vndStats.numOfCTables;
|
||||
return pMeta->pVnode->config.vndStats.numOfCTables + pMeta->pVnode->config.vndStats.numOfNTables;
|
||||
}
|
||||
|
||||
// N.B. Called by statusReq per second
|
||||
|
|
|
@ -36,19 +36,17 @@ static int32_t tdExecuteRSmaImpl(SSma *pSma, const void *pMsg, int32_t inputT
|
|||
int8_t level);
|
||||
static SRSmaInfo *tdAcquireRSmaInfoBySuid(SSma *pSma, int64_t suid);
|
||||
static void tdReleaseRSmaInfo(SSma *pSma, SRSmaInfo *pInfo);
|
||||
|
||||
static int32_t tdRSmaFetchAndSubmitResult(qTaskInfo_t taskInfo, SRSmaInfoItem *pItem, STSchema *pTSchema, int64_t suid,
|
||||
SRSmaStat *pStat, int8_t blkType);
|
||||
static void tdRSmaFetchTrigger(void *param, void *tmrId);
|
||||
|
||||
static int32_t tdRSmaQTaskInfoIterInit(SRSmaQTaskInfoIter *pIter, STFile *pTFile);
|
||||
static int32_t tdRSmaQTaskInfoIterNextBlock(SRSmaQTaskInfoIter *pIter, bool *isFinish);
|
||||
static int32_t tdRSmaQTaskInfoRestore(SSma *pSma, int8_t type, SRSmaQTaskInfoIter *pIter);
|
||||
static int32_t tdRSmaQTaskInfoItemRestore(SSma *pSma, const SRSmaQTaskInfoItem *infoItem);
|
||||
|
||||
static int32_t tdRSmaRestoreQTaskInfoInit(SSma *pSma, int64_t *nTables);
|
||||
static int32_t tdRSmaRestoreQTaskInfoReload(SSma *pSma, int8_t type, int64_t qTaskFileVer);
|
||||
static int32_t tdRSmaRestoreTSDataReload(SSma *pSma);
|
||||
static int32_t tdRSmaFetchAndSubmitResult(SSma *pSma, qTaskInfo_t taskInfo, SRSmaInfoItem *pItem, STSchema *pTSchema,
|
||||
int64_t suid, int8_t blkType);
|
||||
static void tdRSmaFetchTrigger(void *param, void *tmrId);
|
||||
static int32_t tdRSmaFetchSend(SSma *pSma, SRSmaInfo *pInfo, int8_t level);
|
||||
static int32_t tdRSmaQTaskInfoIterInit(SRSmaQTaskInfoIter *pIter, STFile *pTFile);
|
||||
static int32_t tdRSmaQTaskInfoIterNextBlock(SRSmaQTaskInfoIter *pIter, bool *isFinish);
|
||||
static int32_t tdRSmaQTaskInfoRestore(SSma *pSma, int8_t type, SRSmaQTaskInfoIter *pIter);
|
||||
static int32_t tdRSmaQTaskInfoItemRestore(SSma *pSma, const SRSmaQTaskInfoItem *infoItem);
|
||||
static int32_t tdRSmaRestoreQTaskInfoInit(SSma *pSma, int64_t *nTables);
|
||||
static int32_t tdRSmaRestoreQTaskInfoReload(SSma *pSma, int8_t type, int64_t qTaskFileVer);
|
||||
static int32_t tdRSmaRestoreTSDataReload(SSma *pSma);
|
||||
|
||||
static SRSmaInfo *tdGetRSmaInfoByItem(SRSmaInfoItem *pItem) {
|
||||
// adapt accordingly if definition of SRSmaInfo update
|
||||
|
@ -604,11 +602,8 @@ _end:
|
|||
return code;
|
||||
}
|
||||
|
||||
static int32_t tdRSmaFetchAndSubmitResult(qTaskInfo_t taskInfo, SRSmaInfoItem *pItem, STSchema *pTSchema, int64_t suid,
|
||||
SRSmaStat *pStat, int8_t blkType) {
|
||||
SArray *pResult = NULL;
|
||||
SSma *pSma = pStat->pSma;
|
||||
|
||||
static int32_t tdRSmaFetchAndSubmitResult(SSma *pSma, qTaskInfo_t taskInfo, SRSmaInfoItem *pItem, STSchema *pTSchema,
|
||||
int64_t suid, int8_t blkType) {
|
||||
while (1) {
|
||||
SSDataBlock *output = NULL;
|
||||
uint64_t ts;
|
||||
|
@ -619,30 +614,20 @@ static int32_t tdRSmaFetchAndSubmitResult(qTaskInfo_t taskInfo, SRSmaInfoItem *p
|
|||
pItem->level, terrstr(code));
|
||||
goto _err;
|
||||
}
|
||||
if (!output) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (!pResult) {
|
||||
pResult = taosArrayInit(1, sizeof(SSDataBlock));
|
||||
if (!pResult) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto _err;
|
||||
}
|
||||
}
|
||||
|
||||
taosArrayPush(pResult, output);
|
||||
|
||||
if (taosArrayGetSize(pResult) > 0) {
|
||||
#if 1
|
||||
if (output) {
|
||||
#if 0
|
||||
char flag[10] = {0};
|
||||
snprintf(flag, 10, "level %" PRIi8, pItem->level);
|
||||
SArray *pResult = taosArrayInit(1, sizeof(SSDataBlock));
|
||||
taosArrayPush(pResult, output);
|
||||
blockDebugShowDataBlocks(pResult, flag);
|
||||
taosArrayDestroy(pResult);
|
||||
#endif
|
||||
STsdb *sinkTsdb = (pItem->level == TSDB_RETENTION_L1 ? pSma->pRSmaTsdb[0] : pSma->pRSmaTsdb[1]);
|
||||
SSubmitReq *pReq = NULL;
|
||||
// TODO: the schema update should be handled later(TD-17965)
|
||||
if (buildSubmitReqFromDataBlock(&pReq, pResult, pTSchema, SMA_VID(pSma), suid) < 0) {
|
||||
if (buildSubmitReqFromDataBlock(&pReq, output, pTSchema, SMA_VID(pSma), suid) < 0) {
|
||||
smaError("vgId:%d, build submit req for rsma stable %" PRIi64 " level %" PRIi8 " failed since %s",
|
||||
SMA_VID(pSma), suid, pItem->level, terrstr());
|
||||
goto _err;
|
||||
|
@ -659,18 +644,17 @@ static int32_t tdRSmaFetchAndSubmitResult(qTaskInfo_t taskInfo, SRSmaInfoItem *p
|
|||
SMA_VID(pSma), suid, pItem->level, output->info.version);
|
||||
|
||||
taosMemoryFreeClear(pReq);
|
||||
taosArrayClear(pResult);
|
||||
} else if (terrno == 0) {
|
||||
smaDebug("vgId:%d, no rsma %" PRIi8 " data fetched yet", SMA_VID(pSma), pItem->level);
|
||||
break;
|
||||
} else {
|
||||
smaDebug("vgId:%d, no rsma %" PRIi8 " data fetched since %s", SMA_VID(pSma), pItem->level, terrstr());
|
||||
goto _err;
|
||||
}
|
||||
}
|
||||
|
||||
tdDestroySDataBlockArray(pResult);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
_err:
|
||||
tdDestroySDataBlockArray(pResult);
|
||||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
|
||||
|
@ -694,11 +678,9 @@ static int32_t tdExecuteRSmaImpl(SSma *pSma, const void *pMsg, int32_t inputType
|
|||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
|
||||
SSmaEnv *pEnv = SMA_RSMA_ENV(pSma);
|
||||
SRSmaStat *pStat = SMA_RSMA_STAT(pEnv->pStat);
|
||||
SRSmaInfoItem *pItem = RSMA_INFO_ITEM(pInfo, idx);
|
||||
|
||||
tdRSmaFetchAndSubmitResult(RSMA_INFO_QTASK(pInfo, idx), pItem, pInfo->pTSchema, suid, pStat,
|
||||
tdRSmaFetchAndSubmitResult(pSma, RSMA_INFO_QTASK(pInfo, idx), pItem, pInfo->pTSchema, suid,
|
||||
STREAM_INPUT__DATA_SUBMIT);
|
||||
atomic_store_8(&pItem->triggerStat, TASK_TRIGGER_STAT_ACTIVE);
|
||||
|
||||
|
@ -724,11 +706,13 @@ static SRSmaInfo *tdAcquireRSmaInfoBySuid(SSma *pSma, int64_t suid) {
|
|||
SRSmaInfo *pRSmaInfo = NULL;
|
||||
|
||||
if (!pEnv) {
|
||||
terrno = TSDB_CODE_RSMA_INVALID_ENV;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
pStat = (SRSmaStat *)SMA_ENV_STAT(pEnv);
|
||||
if (!pStat || !RSMA_INFO_HASH(pStat)) {
|
||||
terrno = TSDB_CODE_RSMA_INVALID_STAT;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -743,12 +727,12 @@ static SRSmaInfo *tdAcquireRSmaInfoBySuid(SSma *pSma, int64_t suid) {
|
|||
taosRUnLockLatch(SMA_ENV_LOCK(pEnv));
|
||||
return pRSmaInfo;
|
||||
}
|
||||
taosRUnLockLatch(SMA_ENV_LOCK(pEnv));
|
||||
|
||||
if (RSMA_COMMIT_STAT(pStat) == 0) { // return NULL if not in committing stat
|
||||
taosRUnLockLatch(SMA_ENV_LOCK(pEnv));
|
||||
return NULL;
|
||||
}
|
||||
taosRUnLockLatch(SMA_ENV_LOCK(pEnv));
|
||||
|
||||
|
||||
// clone the SRSmaInfo from iRsmaInfoHash to rsmaInfoHash if in committing stat
|
||||
SRSmaInfo *pCowRSmaInfo = NULL;
|
||||
|
@ -779,7 +763,7 @@ static SRSmaInfo *tdAcquireRSmaInfoBySuid(SSma *pSma, int64_t suid) {
|
|||
ASSERT(!pCowRSmaInfo);
|
||||
}
|
||||
|
||||
if(pCowRSmaInfo) {
|
||||
if (pCowRSmaInfo) {
|
||||
tdRefRSmaInfo(pSma, pCowRSmaInfo);
|
||||
}
|
||||
// unlock
|
||||
|
@ -1323,7 +1307,7 @@ _err:
|
|||
}
|
||||
|
||||
/**
|
||||
* @brief trigger to get rsma result
|
||||
* @brief trigger to get rsma result in async mode
|
||||
*
|
||||
* @param param
|
||||
* @param tmrId
|
||||
|
@ -1357,8 +1341,7 @@ static void tdRSmaFetchTrigger(void *param, void *tmrId) {
|
|||
" refId:%d",
|
||||
SMA_VID(pSma), pItem->level, rsmaTriggerStat, smaMgmt.rsetId, pRSmaInfo->refId);
|
||||
if (rsmaTriggerStat == TASK_TRIGGER_STAT_PAUSED) {
|
||||
taosTmrReset(tdRSmaFetchTrigger, pItem->maxDelay > 5000 ? 5000 : pItem->maxDelay, pItem, smaMgmt.tmrHandle,
|
||||
&pItem->tmrId);
|
||||
taosTmrReset(tdRSmaFetchTrigger, 5000, pItem, smaMgmt.tmrHandle, &pItem->tmrId);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
@ -1372,16 +1355,8 @@ static void tdRSmaFetchTrigger(void *param, void *tmrId) {
|
|||
case TASK_TRIGGER_STAT_ACTIVE: {
|
||||
smaDebug("vgId:%d, fetch rsma level %" PRIi8 " data for table:%" PRIi64 " since stat is active", SMA_VID(pSma),
|
||||
pItem->level, pRSmaInfo->suid);
|
||||
|
||||
// sync procedure => async process
|
||||
|
||||
SSDataBlock dataBlock = {.info.type = STREAM_GET_ALL};
|
||||
qTaskInfo_t taskInfo = pRSmaInfo->taskInfo[pItem->level - 1];
|
||||
qSetMultiStreamInput(taskInfo, &dataBlock, 1, STREAM_INPUT__DATA_BLOCK);
|
||||
tdRSmaFetchAndSubmitResult(taskInfo, pItem, pRSmaInfo->pTSchema, pRSmaInfo->suid, pStat,
|
||||
STREAM_INPUT__DATA_BLOCK);
|
||||
tdCleanupStreamInputDataBlock(taskInfo);
|
||||
|
||||
// async process
|
||||
tdRSmaFetchSend(pSma, pRSmaInfo, pItem->level);
|
||||
} break;
|
||||
case TASK_TRIGGER_STAT_PAUSED: {
|
||||
smaDebug("vgId:%d, not fetch rsma level %" PRIi8 " data for table:%" PRIi64 " since stat is paused",
|
||||
|
@ -1404,3 +1379,118 @@ static void tdRSmaFetchTrigger(void *param, void *tmrId) {
|
|||
_end:
|
||||
tdReleaseSmaRef(smaMgmt.rsetId, pRSmaInfo->refId);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief put rsma fetch msg to fetch queue
|
||||
*
|
||||
* @param pSma
|
||||
* @param pInfo
|
||||
* @param level
|
||||
* @return int32_t
|
||||
*/
|
||||
int32_t tdRSmaFetchSend(SSma *pSma, SRSmaInfo *pInfo, int8_t level) {
|
||||
SRSmaFetchMsg fetchMsg = { .suid = pInfo->suid, .level = level};
|
||||
int32_t ret = 0;
|
||||
int32_t contLen = 0;
|
||||
SEncoder encoder = {0};
|
||||
tEncodeSize(tEncodeSRSmaFetchMsg, &fetchMsg, contLen, ret);
|
||||
if (ret < 0) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
tEncoderClear(&encoder);
|
||||
goto _err;
|
||||
}
|
||||
|
||||
void *pBuf = rpcMallocCont(contLen + sizeof(SMsgHead));
|
||||
tEncoderInit(&encoder, POINTER_SHIFT(pBuf, sizeof(SMsgHead)), contLen);
|
||||
if (tEncodeSRSmaFetchMsg(&encoder, &fetchMsg) < 0) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
tEncoderClear(&encoder);
|
||||
}
|
||||
tEncoderClear(&encoder);
|
||||
|
||||
((SMsgHead *)pBuf)->vgId = SMA_VID(pSma);
|
||||
((SMsgHead *)pBuf)->contLen = contLen + sizeof(SMsgHead);
|
||||
|
||||
SRpcMsg rpcMsg = {
|
||||
.code = 0,
|
||||
.msgType = TDMT_VND_FETCH_RSMA,
|
||||
.pCont = pBuf,
|
||||
.contLen = contLen,
|
||||
};
|
||||
|
||||
if ((terrno = tmsgPutToQueue(&pSma->pVnode->msgCb, FETCH_QUEUE, &rpcMsg)) != 0) {
|
||||
smaError("vgId:%d, failed to put rsma fetch msg into fetch-queue for suid:%" PRIi64 " level:%" PRIi8 " since %s",
|
||||
SMA_VID(pSma), pInfo->suid, level, terrstr());
|
||||
goto _err;
|
||||
}
|
||||
|
||||
smaDebug("vgId:%d, success to put rsma fetch msg into fetch-queue for suid:%" PRIi64 " level:%" PRIi8, SMA_VID(pSma),
|
||||
pInfo->suid, level);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
_err:
|
||||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief fetch rsma data of level 2/3 and submit
|
||||
*
|
||||
* @param pSma
|
||||
* @param pMsg
|
||||
* @return int32_t
|
||||
*/
|
||||
int32_t smaProcessFetch(SSma *pSma, void *pMsg) {
|
||||
SRpcMsg *pRpcMsg = (SRpcMsg *)pMsg;
|
||||
SRSmaFetchMsg req = {0};
|
||||
SDecoder decoder = {0};
|
||||
void *pBuf = NULL;
|
||||
SRSmaInfo *pInfo = NULL;
|
||||
SRSmaInfoItem *pItem = NULL;
|
||||
|
||||
if (!pRpcMsg || pRpcMsg->contLen < sizeof(SMsgHead)) {
|
||||
terrno = TSDB_CODE_RSMA_FETCH_MSG_MSSED_UP;
|
||||
return -1;
|
||||
}
|
||||
|
||||
pBuf = POINTER_SHIFT(pRpcMsg->pCont, sizeof(SMsgHead));
|
||||
|
||||
tDecoderInit(&decoder, pBuf, pRpcMsg->contLen);
|
||||
if (tDecodeSRSmaFetchMsg(&decoder, &req) < 0) {
|
||||
terrno = TSDB_CODE_INVALID_MSG;
|
||||
goto _err;
|
||||
}
|
||||
|
||||
pInfo = tdAcquireRSmaInfoBySuid(pSma, req.suid);
|
||||
if (!pInfo) {
|
||||
if (terrno == TSDB_CODE_SUCCESS) {
|
||||
terrno = TSDB_CODE_RSMA_EMPTY_INFO;
|
||||
}
|
||||
smaWarn("vgId:%d, failed to process rsma fetch msg for suid:%" PRIi64 " level:%" PRIi8 " since %s", SMA_VID(pSma),
|
||||
req.suid, req.level, terrstr());
|
||||
goto _err;
|
||||
}
|
||||
|
||||
pItem = RSMA_INFO_ITEM(pInfo, req.level - 1);
|
||||
|
||||
SSDataBlock dataBlock = {.info.type = STREAM_GET_ALL};
|
||||
qTaskInfo_t taskInfo = RSMA_INFO_QTASK(pInfo, req.level - 1);
|
||||
if ((terrno = qSetMultiStreamInput(taskInfo, &dataBlock, 1, STREAM_INPUT__DATA_BLOCK)) < 0) {
|
||||
goto _err;
|
||||
}
|
||||
if (tdRSmaFetchAndSubmitResult(pSma, taskInfo, pItem, pInfo->pTSchema, pInfo->suid, STREAM_INPUT__DATA_BLOCK) < 0) {
|
||||
goto _err;
|
||||
}
|
||||
|
||||
tdCleanupStreamInputDataBlock(taskInfo);
|
||||
|
||||
tdReleaseRSmaInfo(pSma, pInfo);
|
||||
tDecoderClear(&decoder);
|
||||
smaDebug("vgId:%d, success to process rsma fetch msg for suid:%" PRIi64 " level:%" PRIi8, SMA_VID(pSma), req.suid,
|
||||
req.level);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
_err:
|
||||
tdReleaseRSmaInfo(pSma, pInfo);
|
||||
tDecoderClear(&decoder);
|
||||
smaError("vgId:%d, failed to process rsma fetch msg since %s", SMA_VID(pSma), terrstr());
|
||||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
|
|
|
@ -859,8 +859,10 @@ void vnodeEnqueueStreamMsg(SVnode* pVnode, SRpcMsg* pMsg) {
|
|||
tDecoderInit(&decoder, msgBody, msgLen);
|
||||
if (tDecodeStreamDispatchReq(&decoder, &req) < 0) {
|
||||
code = TSDB_CODE_MSG_DECODE_ERROR;
|
||||
tDecoderClear(&decoder);
|
||||
goto FAIL;
|
||||
}
|
||||
tDecoderClear(&decoder);
|
||||
|
||||
int32_t taskId = req.taskId;
|
||||
|
||||
|
|
|
@ -473,7 +473,7 @@ int32_t vnodeGetTimeSeriesNum(SVnode *pVnode, int64_t *num) {
|
|||
int numOfCols = 0;
|
||||
vnodeGetStbColumnNum(pVnode, id, &numOfCols);
|
||||
|
||||
*num += ctbNum * numOfCols;
|
||||
*num += ctbNum * (numOfCols - 1);
|
||||
}
|
||||
|
||||
metaCloseStbCursor(pCur);
|
||||
|
|
|
@ -325,6 +325,8 @@ int32_t vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) {
|
|||
return vnodeGetTableCfg(pVnode, pMsg, true);
|
||||
case TDMT_VND_BATCH_META:
|
||||
return vnodeGetBatchMeta(pVnode, pMsg);
|
||||
case TDMT_VND_FETCH_RSMA:
|
||||
return smaProcessFetch(pVnode->pSma, pMsg);
|
||||
case TDMT_VND_CONSUME:
|
||||
return tqProcessPollReq(pVnode->pTq, pMsg);
|
||||
case TDMT_STREAM_TASK_RUN:
|
||||
|
|
|
@ -141,6 +141,10 @@ static void inline vnodeHandleWriteMsg(SVnode *pVnode, SRpcMsg *pMsg) {
|
|||
}
|
||||
if (rsp.info.handle != NULL) {
|
||||
tmsgSendRsp(&rsp);
|
||||
} else {
|
||||
if (rsp.pCont) {
|
||||
rpcFreeCont(rsp.pCont);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -299,6 +303,10 @@ void vnodeApplyWriteMsg(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) {
|
|||
vnodePostBlockMsg(pVnode, pMsg);
|
||||
if (rsp.info.handle != NULL) {
|
||||
tmsgSendRsp(&rsp);
|
||||
} else {
|
||||
if (rsp.pCont) {
|
||||
rpcFreeCont(rsp.pCont);
|
||||
}
|
||||
}
|
||||
|
||||
vGTrace("vgId:%d, msg:%p is freed, code:0x%x index:%" PRId64, vgId, pMsg, rsp.code, pMsg->info.conn.applyIndex);
|
||||
|
@ -733,4 +741,4 @@ bool vnodeIsLeader(SVnode *pVnode) {
|
|||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1082,7 +1082,7 @@ _return:
|
|||
ctgReleaseVgInfoToCache(pCtg, dbCache);
|
||||
}
|
||||
|
||||
if (pTask->res) {
|
||||
if (pTask->res || code) {
|
||||
ctgHandleTaskEnd(pTask, code);
|
||||
}
|
||||
|
||||
|
|
|
@ -58,6 +58,7 @@ extern "C" {
|
|||
#define EXPLAIN_RATIO_TIME_FORMAT "Ratio: %f"
|
||||
#define EXPLAIN_MERGE_FORMAT "Merge"
|
||||
#define EXPLAIN_MERGE_KEYS_FORMAT "Merge Key: "
|
||||
#define EXPLAIN_IGNORE_GROUPID_FORMAT "Ignore Group Id: %s"
|
||||
|
||||
#define EXPLAIN_PLANNING_TIME_FORMAT "Planning Time: %.3f ms"
|
||||
#define EXPLAIN_EXEC_TIME_FORMAT "Execution Time: %.3f ms"
|
||||
|
|
|
@ -612,6 +612,11 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
|
|||
EXPLAIN_ROW_END();
|
||||
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
|
||||
|
||||
EXPLAIN_ROW_NEW(level + 1, EXPLAIN_OUTPUT_FORMAT);
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_IGNORE_GROUPID_FORMAT, pPrjNode->ignoreGroupId ? "true" : "false");
|
||||
EXPLAIN_ROW_END();
|
||||
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
|
||||
|
||||
if (pPrjNode->node.pConditions) {
|
||||
EXPLAIN_ROW_NEW(level + 1, EXPLAIN_FILTER_FORMAT);
|
||||
QRY_ERR_RET(nodesNodeToSQL(pPrjNode->node.pConditions, tbuf + VARSTR_HEADER_SIZE,
|
||||
|
|
|
@ -256,7 +256,7 @@ static int32_t mergeJoinJoinDownstreamTsRanges(SOperatorInfo* pOperator, int64_t
|
|||
|
||||
SArray* rightRowLocations = taosArrayInit(8, sizeof(SRowLocation));
|
||||
SArray* rightCreatedBlocks = taosArrayInit(8, POINTER_BYTES);
|
||||
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
mergeJoinGetDownStreamRowsEqualTimeStamp(pOperator, 0, pJoinInfo->leftCol.slotId, pJoinInfo->pLeft,
|
||||
pJoinInfo->leftPos, timestamp, leftRowLocations, leftCreatedBlocks);
|
||||
mergeJoinGetDownStreamRowsEqualTimeStamp(pOperator, 1, pJoinInfo->rightCol.slotId, pJoinInfo->pRight,
|
||||
|
@ -264,14 +264,20 @@ static int32_t mergeJoinJoinDownstreamTsRanges(SOperatorInfo* pOperator, int64_t
|
|||
|
||||
size_t leftNumJoin = taosArrayGetSize(leftRowLocations);
|
||||
size_t rightNumJoin = taosArrayGetSize(rightRowLocations);
|
||||
for (int32_t i = 0; i < leftNumJoin; ++i) {
|
||||
for (int32_t j = 0; j < rightNumJoin; ++j) {
|
||||
SRowLocation* leftRow = taosArrayGet(leftRowLocations, i);
|
||||
SRowLocation* rightRow = taosArrayGet(rightRowLocations, j);
|
||||
mergeJoinJoinLeftRight(pOperator, pRes, *nRows, leftRow->pDataBlock, leftRow->pos, rightRow->pDataBlock,
|
||||
rightRow->pos);
|
||||
++*nRows;
|
||||
}
|
||||
code = blockDataEnsureCapacity(pRes, *nRows + leftNumJoin * rightNumJoin);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
qError("%s can not ensure block capacity for join. left: %zu, right: %zu", GET_TASKID(pOperator->pTaskInfo), leftNumJoin, rightNumJoin);
|
||||
}
|
||||
if (code == TSDB_CODE_SUCCESS) {
|
||||
for (int32_t i = 0; i < leftNumJoin; ++i) {
|
||||
for (int32_t j = 0; j < rightNumJoin; ++j) {
|
||||
SRowLocation *leftRow = taosArrayGet(leftRowLocations, i);
|
||||
SRowLocation *rightRow = taosArrayGet(rightRowLocations, j);
|
||||
mergeJoinJoinLeftRight(pOperator, pRes, *nRows, leftRow->pDataBlock, leftRow->pos, rightRow->pDataBlock,
|
||||
rightRow->pos);
|
||||
++*nRows;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (int i = 0; i < taosArrayGetSize(rightCreatedBlocks); ++i) {
|
||||
|
|
|
@ -3845,14 +3845,17 @@ int32_t spreadFunctionMerge(SqlFunctionCtx* pCtx) {
|
|||
SSpreadInfo* pInfo = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx));
|
||||
|
||||
int32_t start = pInput->startRowIndex;
|
||||
|
||||
for (int32_t i = start; i < start + pInput->numOfRows; ++i) {
|
||||
char* data = colDataGetData(pCol, i);
|
||||
SSpreadInfo* pInputInfo = (SSpreadInfo*)varDataVal(data);
|
||||
spreadTransferInfo(pInputInfo, pInfo);
|
||||
if (pInputInfo->hasResult) {
|
||||
spreadTransferInfo(pInputInfo, pInfo);
|
||||
}
|
||||
}
|
||||
|
||||
SET_VAL(GET_RES_INFO(pCtx), 1, 1);
|
||||
if (pInfo->hasResult) {
|
||||
GET_RES_INFO(pCtx)->numOfRes = 1;
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -3861,6 +3864,8 @@ int32_t spreadFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
|
|||
SSpreadInfo* pInfo = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx));
|
||||
if (pInfo->hasResult == true) {
|
||||
SET_DOUBLE_VAL(&pInfo->result, pInfo->max - pInfo->min);
|
||||
} else {
|
||||
GET_RES_INFO(pCtx)->isNullRes = 1;
|
||||
}
|
||||
return functionFinalize(pCtx, pBlock);
|
||||
}
|
||||
|
|
|
@ -390,6 +390,7 @@ static int32_t logicProjectCopy(const SProjectLogicNode* pSrc, SProjectLogicNode
|
|||
COPY_BASE_OBJECT_FIELD(node, logicNodeCopy);
|
||||
CLONE_NODE_LIST_FIELD(pProjections);
|
||||
COPY_CHAR_ARRAY_FIELD(stmtName);
|
||||
COPY_SCALAR_FIELD(ignoreGroupId);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
|
|
@ -655,6 +655,7 @@ static int32_t jsonToLogicScanNode(const SJson* pJson, void* pObj) {
|
|||
}
|
||||
|
||||
static const char* jkProjectLogicPlanProjections = "Projections";
|
||||
static const char* jkProjectLogicPlanIgnoreGroupId = "IgnoreGroupId";
|
||||
|
||||
static int32_t logicProjectNodeToJson(const void* pObj, SJson* pJson) {
|
||||
const SProjectLogicNode* pNode = (const SProjectLogicNode*)pObj;
|
||||
|
@ -663,6 +664,9 @@ static int32_t logicProjectNodeToJson(const void* pObj, SJson* pJson) {
|
|||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = nodeListToJson(pJson, jkProjectLogicPlanProjections, pNode->pProjections);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonAddIntegerToObject(pJson, jkProjectLogicPlanIgnoreGroupId, pNode->ignoreGroupId);
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
@ -674,6 +678,9 @@ static int32_t jsonToLogicProjectNode(const SJson* pJson, void* pObj) {
|
|||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = jsonToNodeList(pJson, jkProjectLogicPlanProjections, &pNode->pProjections);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonGetBoolValue(pJson, jkProjectLogicPlanIgnoreGroupId, &pNode->ignoreGroupId);
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
@ -1689,6 +1696,7 @@ static int32_t jsonToPhysiSysTableScanNode(const SJson* pJson, void* pObj) {
|
|||
|
||||
static const char* jkProjectPhysiPlanProjections = "Projections";
|
||||
static const char* jkProjectPhysiPlanMergeDataBlock = "MergeDataBlock";
|
||||
static const char* jkProjectPhysiPlanIgnoreGroupId = "IgnoreGroupId";
|
||||
|
||||
static int32_t physiProjectNodeToJson(const void* pObj, SJson* pJson) {
|
||||
const SProjectPhysiNode* pNode = (const SProjectPhysiNode*)pObj;
|
||||
|
@ -1700,6 +1708,9 @@ static int32_t physiProjectNodeToJson(const void* pObj, SJson* pJson) {
|
|||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonAddBoolToObject(pJson, jkProjectPhysiPlanMergeDataBlock, pNode->mergeDataBlock);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonAddBoolToObject(pJson, jkProjectPhysiPlanIgnoreGroupId, pNode->ignoreGroupId);
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
@ -1714,6 +1725,9 @@ static int32_t jsonToPhysiProjectNode(const SJson* pJson, void* pObj) {
|
|||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonGetBoolValue(pJson, jkProjectPhysiPlanMergeDataBlock, &pNode->mergeDataBlock);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonGetBoolValue(pJson, jkProjectPhysiPlanIgnoreGroupId, &pNode->ignoreGroupId);
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
|
|
@ -392,6 +392,9 @@ static void destroyDataSinkNode(SDataSinkNode* pNode) { nodesDestroyNode((SNode*
|
|||
static void destroyExprNode(SExprNode* pExpr) { taosArrayDestroy(pExpr->pAssociation); }
|
||||
|
||||
static void destroyTableCfg(STableCfg* pCfg) {
|
||||
if (NULL == pCfg) {
|
||||
return;
|
||||
}
|
||||
taosArrayDestroy(pCfg->pFuncs);
|
||||
taosMemoryFree(pCfg->pComment);
|
||||
taosMemoryFree(pCfg->pSchemas);
|
||||
|
|
|
@ -339,6 +339,11 @@ static int32_t collectMetaKeyFromShowBnodes(SCollectMetaKeyCxt* pCxt, SShowStmt*
|
|||
pCxt->pMetaCache);
|
||||
}
|
||||
|
||||
static int32_t collectMetaKeyFromShowCluster(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
|
||||
return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_CLUSTER,
|
||||
pCxt->pMetaCache);
|
||||
}
|
||||
|
||||
static int32_t collectMetaKeyFromShowDatabases(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
|
||||
return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_DATABASES,
|
||||
pCxt->pMetaCache);
|
||||
|
@ -547,6 +552,8 @@ static int32_t collectMetaKeyFromQuery(SCollectMetaKeyCxt* pCxt, SNode* pStmt) {
|
|||
return collectMetaKeyFromShowSnodes(pCxt, (SShowStmt*)pStmt);
|
||||
case QUERY_NODE_SHOW_BNODES_STMT:
|
||||
return collectMetaKeyFromShowBnodes(pCxt, (SShowStmt*)pStmt);
|
||||
case QUERY_NODE_SHOW_CLUSTER_STMT:
|
||||
return collectMetaKeyFromShowCluster(pCxt, (SShowStmt*)pStmt);
|
||||
case QUERY_NODE_SHOW_DATABASES_STMT:
|
||||
return collectMetaKeyFromShowDatabases(pCxt, (SShowStmt*)pStmt);
|
||||
case QUERY_NODE_SHOW_FUNCTIONS_STMT:
|
||||
|
|
|
@ -119,6 +119,12 @@ void generateInformationSchema(MockCatalogService* mcs) {
|
|||
.addColumn("dnode_id", TSDB_DATA_TYPE_INT);
|
||||
builder.done();
|
||||
}
|
||||
{
|
||||
ITableBuilder& builder =
|
||||
mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_CLUSTER, TSDB_SYSTEM_TABLE, 1)
|
||||
.addColumn("id", TSDB_DATA_TYPE_BIGINT);
|
||||
builder.done();
|
||||
}
|
||||
}
|
||||
|
||||
void generatePerformanceSchema(MockCatalogService* mcs) {
|
||||
|
|
|
@ -25,6 +25,15 @@ class ParserShowToUseTest : public ParserDdlTest {};
|
|||
// todo SHOW apps
|
||||
// todo SHOW connections
|
||||
|
||||
TEST_F(ParserShowToUseTest, showCluster) {
|
||||
useDb("root", "test");
|
||||
|
||||
setCheckDdlFunc(
|
||||
[&](const SQuery* pQuery, ParserStage stage) { ASSERT_EQ(nodeType(pQuery->pRoot), QUERY_NODE_SELECT_STMT); });
|
||||
|
||||
run("SHOW CLUSTER");
|
||||
}
|
||||
|
||||
TEST_F(ParserShowToUseTest, showConsumers) {
|
||||
useDb("root", "test");
|
||||
|
||||
|
|
|
@ -865,6 +865,7 @@ static int32_t createProjectLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSel
|
|||
|
||||
TSWAP(pProject->node.pLimit, pSelect->pLimit);
|
||||
TSWAP(pProject->node.pSlimit, pSelect->pSlimit);
|
||||
pProject->ignoreGroupId = (NULL == pSelect->pPartitionByList);
|
||||
pProject->node.groupAction =
|
||||
(!pSelect->isSubquery && pCxt->pPlanCxt->streamQuery) ? GROUP_ACTION_KEEP : GROUP_ACTION_CLEAR;
|
||||
pProject->node.requireDataOrder = DATA_ORDER_LEVEL_NONE;
|
||||
|
@ -1078,6 +1079,7 @@ static int32_t createSetOpProjectLogicNode(SLogicPlanContext* pCxt, SSetOperator
|
|||
if (NULL == pSetOperator->pOrderByList) {
|
||||
TSWAP(pProject->node.pLimit, pSetOperator->pLimit);
|
||||
}
|
||||
pProject->ignoreGroupId = true;
|
||||
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
|
||||
|
|
|
@ -998,6 +998,7 @@ static int32_t createProjectPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChild
|
|||
}
|
||||
|
||||
pProject->mergeDataBlock = projectCanMergeDataBlock(pProjectLogicNode);
|
||||
pProject->ignoreGroupId = pProjectLogicNode->ignoreGroupId;
|
||||
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
if (0 == LIST_LENGTH(pChildren)) {
|
||||
|
|
|
@ -136,6 +136,7 @@ int32_t streamTaskEnqueue(SStreamTask* pTask, SStreamDispatchReq* pReq, SRpcMsg*
|
|||
pRsp->pCont = buf;
|
||||
pRsp->contLen = sizeof(SMsgHead) + sizeof(SStreamDispatchRsp);
|
||||
tmsgSendRsp(pRsp);
|
||||
tFreeStreamDispatchReq(pReq);
|
||||
return status == TASK_INPUT_STATUS__NORMAL ? 0 : -1;
|
||||
}
|
||||
|
||||
|
|
|
@ -62,6 +62,11 @@ int32_t tDecodeStreamDispatchReq(SDecoder* pDecoder, SStreamDispatchReq* pReq) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
void tFreeStreamDispatchReq(SStreamDispatchReq* pReq) {
|
||||
taosArrayDestroyP(pReq->data, taosMemoryFree);
|
||||
taosArrayDestroy(pReq->dataLen);
|
||||
}
|
||||
|
||||
int32_t tEncodeStreamRetrieveReq(SEncoder* pEncoder, const SStreamRetrieveReq* pReq) {
|
||||
if (tStartEncode(pEncoder) < 0) return -1;
|
||||
if (tEncodeI64(pEncoder, pReq->streamId) < 0) return -1;
|
||||
|
@ -279,7 +284,7 @@ int32_t streamDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* pDat
|
|||
}
|
||||
code = 0;
|
||||
FAIL_FIXED_DISPATCH:
|
||||
taosArrayDestroy(req.data);
|
||||
taosArrayDestroyP(req.data, taosMemoryFree);
|
||||
taosArrayDestroy(req.dataLen);
|
||||
return code;
|
||||
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
|
||||
#include "streamInc.h"
|
||||
|
||||
static int32_t streamTaskExecImpl(SStreamTask* pTask, void* data, SArray* pRes) {
|
||||
static int32_t streamTaskExecImpl(SStreamTask* pTask, const void* data, SArray* pRes) {
|
||||
void* exec = pTask->exec.executor;
|
||||
|
||||
// set input
|
||||
|
@ -82,14 +82,16 @@ static int32_t streamTaskExecImpl(SStreamTask* pTask, void* data, SArray* pRes)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#if 0
|
||||
static FORCE_INLINE int32_t streamUpdateVer(SStreamTask* pTask, SStreamDataBlock* pBlock) {
|
||||
ASSERT(pBlock->type == STREAM_INPUT__DATA_BLOCK);
|
||||
int32_t childId = pBlock->childId;
|
||||
int64_t ver = pBlock->sourceVer;
|
||||
SStreamChildEpInfo* pChildInfo = taosArrayGetP(pTask->childEpInfo, childId);
|
||||
pChildInfo->processedVer = ver;
|
||||
/*pChildInfo-> = ver;*/
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
int32_t streamPipelineExec(SStreamTask* pTask, int32_t batchNum) {
|
||||
ASSERT(pTask->taskLevel != TASK_LEVEL__SINK);
|
||||
|
@ -198,6 +200,8 @@ int32_t streamExecForAll(SStreamTask* pTask) {
|
|||
streamTaskExecImpl(pTask, data, pRes);
|
||||
qDebug("stream task %d exec end", pTask->taskId);
|
||||
|
||||
streamFreeQitem(data);
|
||||
|
||||
if (taosArrayGetSize(pRes) != 0) {
|
||||
SStreamDataBlock* qRes = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM);
|
||||
if (qRes == NULL) {
|
||||
|
|
|
@ -87,63 +87,95 @@ int32_t tDecodeSMStreamTaskRecoverRsp(SDecoder* pDecoder, SMStreamTaskRecoverRsp
|
|||
return 0;
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
int32_t vgId;
|
||||
int32_t childId;
|
||||
int64_t ver;
|
||||
} SStreamVgVerCheckpoint;
|
||||
|
||||
int32_t tEncodeSStreamVgVerCheckpoint(SEncoder* pEncoder, const SStreamVgVerCheckpoint* pCheckpoint) {
|
||||
if (tEncodeI32(pEncoder, pCheckpoint->vgId) < 0) return -1;
|
||||
int32_t tEncodeSStreamCheckpointInfo(SEncoder* pEncoder, const SStreamCheckpointInfo* pCheckpoint) {
|
||||
if (tEncodeI32(pEncoder, pCheckpoint->nodeId) < 0) return -1;
|
||||
if (tEncodeI32(pEncoder, pCheckpoint->childId) < 0) return -1;
|
||||
if (tEncodeI64(pEncoder, pCheckpoint->ver) < 0) return -1;
|
||||
if (tEncodeI64(pEncoder, pCheckpoint->stateProcessedVer) < 0) return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t tDecodeSStreamVgVerCheckpoint(SDecoder* pDecoder, SStreamVgVerCheckpoint* pCheckpoint) {
|
||||
if (tDecodeI32(pDecoder, &pCheckpoint->vgId) < 0) return -1;
|
||||
int32_t tDecodeSStreamCheckpointInfo(SDecoder* pDecoder, SStreamCheckpointInfo* pCheckpoint) {
|
||||
if (tDecodeI32(pDecoder, &pCheckpoint->nodeId) < 0) return -1;
|
||||
if (tDecodeI32(pDecoder, &pCheckpoint->childId) < 0) return -1;
|
||||
if (tDecodeI64(pDecoder, &pCheckpoint->ver) < 0) return -1;
|
||||
if (tDecodeI64(pDecoder, &pCheckpoint->stateProcessedVer) < 0) return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
int64_t streamId;
|
||||
int64_t checkTs;
|
||||
int64_t checkpointId;
|
||||
int32_t taskId;
|
||||
SArray* checkpointVer; // SArray<SStreamVgCheckpointVer>
|
||||
} SStreamAggVerCheckpoint;
|
||||
|
||||
int32_t tEncodeSStreamAggVerCheckpoint(SEncoder* pEncoder, const SStreamAggVerCheckpoint* pCheckpoint) {
|
||||
int32_t tEncodeSStreamMultiVgCheckpointInfo(SEncoder* pEncoder, const SStreamMultiVgCheckpointInfo* pCheckpoint) {
|
||||
if (tEncodeI64(pEncoder, pCheckpoint->streamId) < 0) return -1;
|
||||
if (tEncodeI64(pEncoder, pCheckpoint->checkTs) < 0) return -1;
|
||||
if (tEncodeI64(pEncoder, pCheckpoint->checkpointId) < 0) return -1;
|
||||
if (tEncodeI32(pEncoder, pCheckpoint->checkpointId) < 0) return -1;
|
||||
if (tEncodeI32(pEncoder, pCheckpoint->taskId) < 0) return -1;
|
||||
int32_t sz = taosArrayGetSize(pCheckpoint->checkpointVer);
|
||||
if (tEncodeI32(pEncoder, sz) < 0) return -1;
|
||||
for (int32_t i = 0; i < sz; i++) {
|
||||
SStreamVgVerCheckpoint* pOneVgCkpoint = taosArrayGet(pCheckpoint->checkpointVer, i);
|
||||
if (tEncodeSStreamVgVerCheckpoint(pEncoder, pOneVgCkpoint) < 0) return -1;
|
||||
SStreamCheckpointInfo* pOneVgCkpoint = taosArrayGet(pCheckpoint->checkpointVer, i);
|
||||
if (tEncodeSStreamCheckpointInfo(pEncoder, pOneVgCkpoint) < 0) return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t tDecodeSStreamAggVerCheckpoint(SDecoder* pDecoder, SStreamAggVerCheckpoint* pCheckpoint) {
|
||||
int32_t tDecodeSStreamMultiVgCheckpointInfo(SDecoder* pDecoder, SStreamMultiVgCheckpointInfo* pCheckpoint) {
|
||||
if (tDecodeI64(pDecoder, &pCheckpoint->streamId) < 0) return -1;
|
||||
if (tDecodeI64(pDecoder, &pCheckpoint->checkTs) < 0) return -1;
|
||||
if (tDecodeI64(pDecoder, &pCheckpoint->checkpointId) < 0) return -1;
|
||||
if (tDecodeI32(pDecoder, &pCheckpoint->checkpointId) < 0) return -1;
|
||||
if (tDecodeI32(pDecoder, &pCheckpoint->taskId) < 0) return -1;
|
||||
int32_t sz;
|
||||
if (tDecodeI32(pDecoder, &sz) < 0) return -1;
|
||||
for (int32_t i = 0; i < sz; i++) {
|
||||
SStreamVgVerCheckpoint oneVgCheckpoint;
|
||||
if (tDecodeSStreamVgVerCheckpoint(pDecoder, &oneVgCheckpoint) < 0) return -1;
|
||||
SStreamCheckpointInfo oneVgCheckpoint;
|
||||
if (tDecodeSStreamCheckpointInfo(pDecoder, &oneVgCheckpoint) < 0) return -1;
|
||||
taosArrayPush(pCheckpoint->checkpointVer, &oneVgCheckpoint);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t streamCheckSinkLevel(SStreamMeta* pMeta, SStreamTask* pTask) {
|
||||
void* buf = NULL;
|
||||
|
||||
ASSERT(pTask->taskLevel == TASK_LEVEL__SINK);
|
||||
int32_t sz = taosArrayGetSize(pTask->checkpointInfo);
|
||||
|
||||
SStreamMultiVgCheckpointInfo checkpoint;
|
||||
checkpoint.checkpointId = 0;
|
||||
checkpoint.checkTs = taosGetTimestampMs();
|
||||
checkpoint.streamId = pTask->streamId;
|
||||
checkpoint.taskId = pTask->taskId;
|
||||
checkpoint.checkpointVer = pTask->checkpointInfo;
|
||||
|
||||
int32_t len;
|
||||
int32_t code;
|
||||
tEncodeSize(tEncodeSStreamMultiVgCheckpointInfo, &checkpoint, len, code);
|
||||
if (code < 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
buf = taosMemoryCalloc(1, len);
|
||||
if (buf == NULL) {
|
||||
return -1;
|
||||
}
|
||||
SEncoder encoder;
|
||||
tEncoderInit(&encoder, buf, len);
|
||||
tEncodeSStreamMultiVgCheckpointInfo(&encoder, &checkpoint);
|
||||
tEncoderClear(&encoder);
|
||||
|
||||
SStreamCheckpointKey key = {
|
||||
.taskId = pTask->taskId,
|
||||
.checkpointId = checkpoint.checkpointId,
|
||||
};
|
||||
|
||||
if (tdbTbUpsert(pMeta->pStateDb, &key, sizeof(SStreamCheckpointKey), buf, len, &pMeta->txn) < 0) {
|
||||
ASSERT(0);
|
||||
goto FAIL;
|
||||
}
|
||||
|
||||
taosMemoryFree(buf);
|
||||
return 0;
|
||||
FAIL:
|
||||
if (buf) taosMemoryFree(buf);
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t streamRecoverSinkLevel(SStreamMeta* pMeta, SStreamTask* pTask) {
|
||||
ASSERT(pTask->taskLevel == TASK_LEVEL__SINK);
|
||||
// load status
|
||||
|
@ -154,9 +186,39 @@ int32_t streamRecoverSinkLevel(SStreamMeta* pMeta, SStreamTask* pTask) {
|
|||
}
|
||||
SDecoder decoder;
|
||||
tDecoderInit(&decoder, pVal, vLen);
|
||||
SStreamAggVerCheckpoint aggCheckpoint;
|
||||
tDecodeSStreamAggVerCheckpoint(&decoder, &aggCheckpoint);
|
||||
/*pTask->*/
|
||||
SStreamMultiVgCheckpointInfo aggCheckpoint;
|
||||
tDecodeSStreamMultiVgCheckpointInfo(&decoder, &aggCheckpoint);
|
||||
tDecoderClear(&decoder);
|
||||
|
||||
pTask->nextCheckId = aggCheckpoint.checkpointId + 1;
|
||||
pTask->checkpointInfo = aggCheckpoint.checkpointVer;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t streamCheckAggLevel(SStreamMeta* pMeta, SStreamTask* pTask) {
|
||||
ASSERT(pTask->taskLevel == TASK_LEVEL__AGG);
|
||||
// save and copy state
|
||||
// save state info
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t streamRecoverAggLevel(SStreamMeta* pMeta, SStreamTask* pTask) {
|
||||
ASSERT(pTask->taskLevel == TASK_LEVEL__AGG);
|
||||
// try recover sink level
|
||||
// after all sink level recovered, choose current state backend to recover
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t streamCheckSourceLevel(SStreamMeta* pMeta, SStreamTask* pTask) {
|
||||
ASSERT(pTask->taskLevel == TASK_LEVEL__SOURCE);
|
||||
// try recover agg level
|
||||
//
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t streamRecoverSourceLevel(SStreamMeta* pMeta, SStreamTask* pTask) {
|
||||
ASSERT(pTask->taskLevel == TASK_LEVEL__SOURCE);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -34,7 +34,7 @@ int32_t tEncodeStreamEpInfo(SEncoder* pEncoder, const SStreamChildEpInfo* pInfo)
|
|||
if (tEncodeI32(pEncoder, pInfo->taskId) < 0) return -1;
|
||||
if (tEncodeI32(pEncoder, pInfo->nodeId) < 0) return -1;
|
||||
if (tEncodeI32(pEncoder, pInfo->childId) < 0) return -1;
|
||||
if (tEncodeI64(pEncoder, pInfo->processedVer) < 0) return -1;
|
||||
/*if (tEncodeI64(pEncoder, pInfo->processedVer) < 0) return -1;*/
|
||||
if (tEncodeSEpSet(pEncoder, &pInfo->epSet) < 0) return -1;
|
||||
return 0;
|
||||
}
|
||||
|
@ -43,7 +43,7 @@ int32_t tDecodeStreamEpInfo(SDecoder* pDecoder, SStreamChildEpInfo* pInfo) {
|
|||
if (tDecodeI32(pDecoder, &pInfo->taskId) < 0) return -1;
|
||||
if (tDecodeI32(pDecoder, &pInfo->nodeId) < 0) return -1;
|
||||
if (tDecodeI32(pDecoder, &pInfo->childId) < 0) return -1;
|
||||
if (tDecodeI64(pDecoder, &pInfo->processedVer) < 0) return -1;
|
||||
/*if (tDecodeI64(pDecoder, &pInfo->processedVer) < 0) return -1;*/
|
||||
if (tDecodeSEpSet(pDecoder, &pInfo->epSet) < 0) return -1;
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -192,9 +192,11 @@ int32_t syncNodeRestartElectTimer(SSyncNode* pSyncNode, int32_t ms);
|
|||
int32_t syncNodeResetElectTimer(SSyncNode* pSyncNode);
|
||||
int32_t syncNodeStartHeartbeatTimer(SSyncNode* pSyncNode);
|
||||
int32_t syncNodeStartNowHeartbeatTimer(SSyncNode* pSyncNode);
|
||||
int32_t syncNodeStartHeartbeatTimerMS(SSyncNode* pSyncNode, int32_t ms);
|
||||
int32_t syncNodeStopHeartbeatTimer(SSyncNode* pSyncNode);
|
||||
int32_t syncNodeRestartHeartbeatTimer(SSyncNode* pSyncNode);
|
||||
int32_t syncNodeRestartNowHeartbeatTimer(SSyncNode* pSyncNode);
|
||||
int32_t syncNodeRestartNowHeartbeatTimerMS(SSyncNode* pSyncNode, int32_t ms);
|
||||
|
||||
// utils --------------
|
||||
int32_t syncNodeSendMsgById(const SRaftId* destRaftId, SSyncNode* pSyncNode, SRpcMsg* pMsg);
|
||||
|
|
|
@ -1322,10 +1322,10 @@ int32_t syncNodeStartHeartbeatTimer(SSyncNode* pSyncNode) {
|
|||
return ret;
|
||||
}
|
||||
|
||||
int32_t syncNodeStartNowHeartbeatTimer(SSyncNode* pSyncNode) {
|
||||
int32_t syncNodeStartHeartbeatTimerMS(SSyncNode* pSyncNode, int32_t ms) {
|
||||
int32_t ret = 0;
|
||||
if (syncEnvIsStart()) {
|
||||
taosTmrReset(pSyncNode->FpHeartbeatTimerCB, 1, pSyncNode, gSyncEnv->pTimerManager, &pSyncNode->pHeartbeatTimer);
|
||||
taosTmrReset(pSyncNode->FpHeartbeatTimerCB, ms, pSyncNode, gSyncEnv->pTimerManager, &pSyncNode->pHeartbeatTimer);
|
||||
atomic_store_64(&pSyncNode->heartbeatTimerLogicClock, pSyncNode->heartbeatTimerLogicClockUser);
|
||||
} else {
|
||||
sError("vgId:%d, start heartbeat timer error, sync env is stop", pSyncNode->vgId);
|
||||
|
@ -1333,13 +1333,18 @@ int32_t syncNodeStartNowHeartbeatTimer(SSyncNode* pSyncNode) {
|
|||
|
||||
do {
|
||||
char logBuf[128];
|
||||
snprintf(logBuf, sizeof(logBuf), "start heartbeat timer, ms:%d", 1);
|
||||
snprintf(logBuf, sizeof(logBuf), "start heartbeat timer, ms:%d", ms);
|
||||
syncNodeEventLog(pSyncNode, logBuf);
|
||||
} while (0);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int32_t syncNodeStartNowHeartbeatTimer(SSyncNode* pSyncNode) {
|
||||
int32_t ret = syncNodeStartHeartbeatTimerMS(pSyncNode, 1);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int32_t syncNodeStopHeartbeatTimer(SSyncNode* pSyncNode) {
|
||||
int32_t ret = 0;
|
||||
atomic_add_fetch_64(&pSyncNode->heartbeatTimerLogicClockUser, 1);
|
||||
|
@ -1363,6 +1368,12 @@ int32_t syncNodeRestartNowHeartbeatTimer(SSyncNode* pSyncNode) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t syncNodeRestartNowHeartbeatTimerMS(SSyncNode* pSyncNode, int32_t ms) {
|
||||
syncNodeStopHeartbeatTimer(pSyncNode);
|
||||
syncNodeStartHeartbeatTimerMS(pSyncNode, ms);
|
||||
return 0;
|
||||
}
|
||||
|
||||
// utils --------------
|
||||
int32_t syncNodeSendMsgById(const SRaftId* destRaftId, SSyncNode* pSyncNode, SRpcMsg* pMsg) {
|
||||
SEpSet epSet;
|
||||
|
|
|
@ -200,9 +200,23 @@ int32_t syncNodeAppendEntriesPeersSnapshot2(SSyncNode* pSyncNode) {
|
|||
// send msg
|
||||
syncNodeAppendEntriesBatch(pSyncNode, pDestId, pMsg);
|
||||
syncAppendEntriesBatchDestroy(pMsg);
|
||||
|
||||
// speed up
|
||||
if (pMsg->dataCount > 0 && pMsg->prevLogIndex < pSyncNode->commitIndex) {
|
||||
ret = 1;
|
||||
|
||||
do {
|
||||
char logBuf[128];
|
||||
char host[64];
|
||||
uint16_t port;
|
||||
syncUtilU642Addr(pDestId->addr, host, sizeof(host), &port);
|
||||
snprintf(logBuf, sizeof(logBuf), "speed up for %s:%d, pre-index:%ld", host, port, pMsg->prevLogIndex);
|
||||
syncNodeEventLog(pSyncNode, logBuf);
|
||||
} while (0);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
int32_t syncNodeAppendEntriesPeersSnapshot(SSyncNode* pSyncNode) {
|
||||
|
@ -309,7 +323,14 @@ int32_t syncNodeReplicate(SSyncNode* pSyncNode) {
|
|||
break;
|
||||
}
|
||||
|
||||
syncNodeRestartHeartbeatTimer(pSyncNode);
|
||||
if (ret > 0) {
|
||||
// speed up replicate
|
||||
int32_t ms = pSyncNode->heartbeatTimerMS < 50 ? pSyncNode->heartbeatTimerMS : 50;
|
||||
syncNodeRestartNowHeartbeatTimerMS(pSyncNode, ms);
|
||||
|
||||
} else {
|
||||
syncNodeRestartHeartbeatTimer(pSyncNode);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -105,13 +105,13 @@ typedef SRpcCtxVal STransCtxVal;
|
|||
typedef SRpcInfo STrans;
|
||||
typedef SRpcConnInfo STransHandleInfo;
|
||||
|
||||
// ref mgt
|
||||
// handle
|
||||
// ref mgt handle
|
||||
typedef struct SExHandle {
|
||||
void* handle;
|
||||
int64_t refId;
|
||||
void* pThrd;
|
||||
} SExHandle;
|
||||
|
||||
/*convet from fqdn to ip */
|
||||
typedef struct SCvtAddr {
|
||||
char ip[TSDB_FQDN_LEN];
|
||||
|
|
|
@ -222,14 +222,13 @@ SAsyncPool* transAsyncPoolCreate(uv_loop_t* loop, int sz, void* arg, AsyncCB cb)
|
|||
pool->asyncs = taosMemoryCalloc(1, sizeof(uv_async_t) * pool->nAsync);
|
||||
|
||||
for (int i = 0; i < pool->nAsync; i++) {
|
||||
uv_async_t* async = &(pool->asyncs[i]);
|
||||
uv_async_init(loop, async, cb);
|
||||
|
||||
SAsyncItem* item = taosMemoryCalloc(1, sizeof(SAsyncItem));
|
||||
item->pThrd = arg;
|
||||
QUEUE_INIT(&item->qmsg);
|
||||
taosThreadMutexInit(&item->mtx, NULL);
|
||||
|
||||
uv_async_t* async = &(pool->asyncs[i]);
|
||||
uv_async_init(loop, async, cb);
|
||||
async->data = item;
|
||||
}
|
||||
return pool;
|
||||
|
@ -238,7 +237,7 @@ SAsyncPool* transAsyncPoolCreate(uv_loop_t* loop, int sz, void* arg, AsyncCB cb)
|
|||
void transAsyncPoolDestroy(SAsyncPool* pool) {
|
||||
for (int i = 0; i < pool->nAsync; i++) {
|
||||
uv_async_t* async = &(pool->asyncs[i]);
|
||||
// uv_close((uv_handle_t*)async, NULL);
|
||||
|
||||
SAsyncItem* item = async->data;
|
||||
taosThreadMutexDestroy(&item->mtx);
|
||||
taosMemoryFree(item);
|
||||
|
|
|
@ -614,6 +614,8 @@ TAOS_DEFINE_ERROR(TSDB_CODE_RSMA_INVALID_STAT, "Invalid rsma state"
|
|||
TAOS_DEFINE_ERROR(TSDB_CODE_RSMA_QTASKINFO_CREATE, "Rsma qtaskinfo creation error")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_RSMA_FILE_CORRUPTED, "Rsma file corrupted")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_RSMA_REMOVE_EXISTS, "Rsma remove exists")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_RSMA_FETCH_MSG_MSSED_UP, "Rsma fetch msg is messed up")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_RSMA_EMPTY_INFO, "Rsma info is empty")
|
||||
|
||||
//index
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_INDEX_REBUILDING, "Index is rebuilding")
|
||||
|
|
|
@ -4,7 +4,7 @@ set -e
|
|||
|
||||
taosd >>/dev/null 2>&1 &
|
||||
taosadapter >>/dev/null 2>&1 &
|
||||
|
||||
sleep 10
|
||||
cd ../../docs/examples/go
|
||||
|
||||
go mod tidy
|
||||
|
|
|
@ -0,0 +1,47 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
taosd >>/dev/null 2>&1 &
|
||||
taosadapter >>/dev/null 2>&1 &
|
||||
|
||||
sleep 10
|
||||
|
||||
cd ../../docs/examples/python
|
||||
|
||||
# 1
|
||||
taos -s "create database if not exists log"
|
||||
python3 connect_example.py
|
||||
|
||||
# 2
|
||||
taos -s "drop database if exists power"
|
||||
python3 native_insert_example.py
|
||||
|
||||
# 3
|
||||
taos -s "drop database power"
|
||||
python3 bind_param_example.py
|
||||
|
||||
# 4
|
||||
taos -s "drop database power"
|
||||
python3 multi_bind_example.py
|
||||
|
||||
# 5
|
||||
python3 query_example.py
|
||||
|
||||
# 6
|
||||
python3 async_query_example.py
|
||||
|
||||
# 7
|
||||
taos -s "drop database if exists test"
|
||||
python3 line_protocol_example.py
|
||||
|
||||
# 8
|
||||
taos -s "drop database test"
|
||||
python3 telnet_line_protocol_example.py
|
||||
|
||||
# 9
|
||||
taos -s "drop database test"
|
||||
python3 json_protocol_example.py
|
||||
|
||||
# 10
|
||||
# python3 subscribe_demo.py
|
|
@ -41,7 +41,7 @@ fi
|
|||
cat ../script/jenkins/basic.txt |grep -v "^#"|grep -v "^$"|sed "s/^/,,script,/" >>$case_file
|
||||
grep "^python" ../system-test/fulltest.sh |sed "s/^/,,system-test,/" >>$case_file
|
||||
grep "^python" ../develop-test/fulltest.sh |sed "s/^/,,develop-test,/" >>$case_file
|
||||
|
||||
find ../docs-examples-test/ -name "*.sh" -printf '%f\n' | xargs -I {} echo ",,docs-examples-test,bash {}" >> $case_file
|
||||
# tar source code for run.sh to use
|
||||
# if [ $ent -eq 0 ]; then
|
||||
# cd ../../../
|
||||
|
|
|
@ -50,12 +50,14 @@ if [ $ent -eq 0 ]; then
|
|||
export LD_LIBRARY_PATH=/home/TDengine/debug/build/lib
|
||||
ln -s /home/TDengine/debug/build/lib/libtaos.so /usr/lib/libtaos.so 2>/dev/null
|
||||
ln -s /home/TDengine/debug/build/lib/libtaos.so /usr/lib/libtaos.so.1 2>/dev/null
|
||||
ln -s /home/TDengine/include/client/taos.h /usr/include/taos.h 2>/dev/null
|
||||
CONTAINER_TESTDIR=/home/TDengine
|
||||
else
|
||||
export PATH=$PATH:/home/TDinternal/debug/build/bin
|
||||
export LD_LIBRARY_PATH=/home/TDinternal/debug/build/lib
|
||||
ln -s /home/TDinternal/debug/build/lib/libtaos.so /usr/lib/libtaos.so 2>/dev/null
|
||||
ln -s /home/TDinternal/debug/build/lib/libtaos.so /usr/lib/libtaos.so.1 2>/dev/null
|
||||
ln -s /home/TDinternal/community/include/client/taos.h /usr/include/taos.h 2>/dev/null
|
||||
CONTAINER_TESTDIR=/home/TDinternal/community
|
||||
fi
|
||||
mkdir -p /var/lib/taos/subscribe
|
||||
|
|
|
@ -29,8 +29,8 @@ sql insert into ct1 values(now, 10);
|
|||
sql insert into ct1 values(now+1s, 1);
|
||||
sql insert into ct1 values(now+2s, 100);
|
||||
|
||||
print =============== wait maxdelay 15+1 seconds for results
|
||||
sleep 16000
|
||||
print =============== wait maxdelay 15+2 seconds for results
|
||||
sleep 17000
|
||||
|
||||
print =============== select * from retention level 2 from memory
|
||||
sql select * from ct1;
|
||||
|
|
|
@ -29,8 +29,8 @@ sql insert into ct1 values(now, 10, 10.0);
|
|||
sql insert into ct1 values(now+1s, 1, 1.0);
|
||||
sql insert into ct1 values(now+2s, 100, 100.0);
|
||||
|
||||
print =============== wait maxdelay 5+1 seconds for results
|
||||
sleep 6000
|
||||
print =============== wait maxdelay 5+2 seconds for results
|
||||
sleep 7000
|
||||
|
||||
print =============== select * from retention level 2 from memory
|
||||
sql select * from ct1;
|
||||
|
@ -135,8 +135,8 @@ print =============== insert after rsma qtaskinfo recovery
|
|||
sql insert into ct1 values(now, 50, 500.0);
|
||||
sql insert into ct1 values(now+1s, 40, 40.0);
|
||||
|
||||
print =============== wait maxdelay 5+1 seconds for results
|
||||
sleep 6000
|
||||
print =============== wait maxdelay 5+2 seconds for results
|
||||
sleep 7000
|
||||
|
||||
print =============== select * from retention level 2 from file and memory after rsma qtaskinfo recovery
|
||||
sql select * from ct1;
|
||||
|
|
|
@ -187,7 +187,7 @@ class TDTestCase:
|
|||
tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.{stb} tags ( {i+1} )')
|
||||
|
||||
def __insert_data(self, rows, ctb_num=20, dbname=DBNAME, rsma=False, rsma_type="sum"):
|
||||
tdLog.printNoPrefix("==========step: start inser data into tables now.....")
|
||||
tdLog.printNoPrefix("==========step: start insert data into tables now.....")
|
||||
# from ...pytest.util.common import DataSet
|
||||
data = DataSet()
|
||||
data.get_order_set(rows)
|
||||
|
|
|
@ -6,13 +6,10 @@ import random
|
|||
|
||||
|
||||
class TDTestCase:
|
||||
updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
|
||||
"jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
|
||||
"wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143,
|
||||
"maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 }
|
||||
updatecfgDict = {"maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 }
|
||||
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdLog.debug(f"start to execute {__file__}")
|
||||
tdSql.init(conn.cursor())
|
||||
self.vnode_disbutes = None
|
||||
self.ts = 1537146000000
|
||||
|
@ -31,60 +28,61 @@ class TDTestCase:
|
|||
same_result = tdSql.queryResult
|
||||
|
||||
if spread_result !=same_result:
|
||||
tdLog.exit(" max function work not as expected, sql : %s "% spread_sql)
|
||||
tdLog.exit(f" max function work not as expected, sql : {spread_sql} ")
|
||||
else:
|
||||
tdLog.info(" max function work as expected, sql : %s "% spread_sql)
|
||||
tdLog.info(f" max function work as expected, sql : {spread_sql} ")
|
||||
|
||||
|
||||
def prepare_datas_of_distribute(self):
|
||||
def prepare_datas_of_distribute(self, dbname="testdb"):
|
||||
|
||||
# prepate datas for 20 tables distributed at different vgroups
|
||||
tdSql.execute("create database if not exists testdb keep 3650 duration 1000 vgroups 5")
|
||||
tdSql.execute(" use testdb ")
|
||||
tdSql.execute(f"create database if not exists {dbname} keep 3650 duration 1000 vgroups 5")
|
||||
tdSql.execute(f" use {dbname}")
|
||||
tdSql.execute(
|
||||
'''create table stb1
|
||||
f'''create table {dbname}.stb1
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||
tags (t0 timestamp, t1 int, t2 bigint, t3 smallint, t4 tinyint, t5 float, t6 double, t7 bool, t8 binary(16),t9 nchar(32))
|
||||
'''
|
||||
)
|
||||
|
||||
tdSql.execute(
|
||||
'''
|
||||
create table t1
|
||||
f'''
|
||||
create table {dbname}.t1
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||
'''
|
||||
)
|
||||
for i in range(20):
|
||||
tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
|
||||
tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
|
||||
|
||||
for i in range(9):
|
||||
tdSql.execute(
|
||||
f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
|
||||
f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
|
||||
f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
|
||||
)
|
||||
|
||||
for i in range(1,21):
|
||||
if i ==1 or i == 4:
|
||||
continue
|
||||
else:
|
||||
tbname = "ct"+f'{i}'
|
||||
tbname = f"ct{i}"
|
||||
for j in range(9):
|
||||
tdSql.execute(
|
||||
f"insert into {tbname} values ( now()-{(i+j)*10}s, {1*(j+i)}, {11111*(j+i)}, {111*(j+i)}, {11*(j)}, {1.11*(j+i)}, {11.11*(j+i)}, {(j+i)%2}, 'binary{j}', 'nchar{j}', now()+{1*j}a )"
|
||||
f"insert into {dbname}.{tbname} values ( now()-{(i+j)*10}s, {1*(j+i)}, {11111*(j+i)}, {111*(j+i)}, {11*(j)}, {1.11*(j+i)}, {11.11*(j+i)}, {(j+i)%2}, 'binary{j}', 'nchar{j}', now()+{1*j}a )"
|
||||
)
|
||||
tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
|
||||
tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
|
||||
tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
|
||||
tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
|
||||
|
||||
tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
|
||||
tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
|
||||
tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
|
||||
tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
|
||||
tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
|
||||
tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
|
||||
tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
|
||||
|
||||
tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
|
||||
tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
|
||||
tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
|
||||
|
||||
tdSql.execute(
|
||||
f'''insert into t1 values
|
||||
f'''insert into {dbname}.t1 values
|
||||
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
|
||||
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
|
||||
|
@ -100,11 +98,11 @@ class TDTestCase:
|
|||
'''
|
||||
)
|
||||
|
||||
tdLog.info(" prepare data for distributed_aggregate done! ")
|
||||
tdLog.info(f" prepare data for distributed_aggregate done! ")
|
||||
|
||||
def check_distribute_datas(self):
|
||||
def check_distribute_datas(self, dbname="testdb"):
|
||||
# get vgroup_ids of all
|
||||
tdSql.query("show vgroups ")
|
||||
tdSql.query(f"show {dbname}.vgroups ")
|
||||
vgroups = tdSql.queryResult
|
||||
|
||||
vnode_tables={}
|
||||
|
@ -112,9 +110,8 @@ class TDTestCase:
|
|||
for vgroup_id in vgroups:
|
||||
vnode_tables[vgroup_id[0]]=[]
|
||||
|
||||
|
||||
# check sub_table of per vnode ,make sure sub_table has been distributed
|
||||
tdSql.query("show tables like 'ct%'")
|
||||
tdSql.query(f"show {dbname}.tables like 'ct%'")
|
||||
table_names = tdSql.queryResult
|
||||
tablenames = []
|
||||
for table_name in table_names:
|
||||
|
@ -126,9 +123,9 @@ class TDTestCase:
|
|||
if len(v)>=2:
|
||||
count+=1
|
||||
if count < 2:
|
||||
tdLog.exit(" the datas of all not satisfy sub_table has been distributed ")
|
||||
tdLog.exit(f" the datas of all not satisfy sub_table has been distributed ")
|
||||
|
||||
def check_spread_distribute_diff_vnode(self,col_name):
|
||||
def check_spread_distribute_diff_vnode(self,col_name, dbname="testdb"):
|
||||
|
||||
vgroup_ids = []
|
||||
for k ,v in self.vnode_disbutes.items():
|
||||
|
@ -142,13 +139,13 @@ class TDTestCase:
|
|||
distribute_tbnames.append(random.sample(vnode_tables,1)[0])
|
||||
tbname_ins = ""
|
||||
for tbname in distribute_tbnames:
|
||||
tbname_ins += "'%s' ,"%tbname
|
||||
tbname_ins += f"'{tbname}' ,"
|
||||
|
||||
tbname_filters = tbname_ins[:-1]
|
||||
|
||||
spread_sql = f"select spread({col_name}) from stb1 where tbname in ({tbname_filters})"
|
||||
spread_sql = f"select spread({col_name}) from {dbname}.stb1 where tbname in ({tbname_filters})"
|
||||
|
||||
same_sql = f"select max({col_name}) - min({col_name}) from stb1 where tbname in ({tbname_filters})"
|
||||
same_sql = f"select max({col_name}) - min({col_name}) from {dbname}.stb1 where tbname in ({tbname_filters})"
|
||||
|
||||
tdSql.query(spread_sql)
|
||||
spread_result = tdSql.queryResult
|
||||
|
@ -157,20 +154,20 @@ class TDTestCase:
|
|||
same_result = tdSql.queryResult
|
||||
|
||||
if spread_result !=same_result:
|
||||
tdLog.exit(" spread function work not as expected, sql : %s "% spread_sql)
|
||||
tdLog.exit(f" spread function work not as expected, sql : {spread_sql} ")
|
||||
else:
|
||||
tdLog.info(" spread function work as expected, sql : %s "% spread_sql)
|
||||
tdLog.info(f" spread function work as expected, sql : {spread_sql} ")
|
||||
|
||||
def check_spread_status(self):
|
||||
def check_spread_status(self, dbname="testdb"):
|
||||
# check max function work status
|
||||
|
||||
tdSql.query("show tables like 'ct%'")
|
||||
tdSql.query(f"show {dbname}.tables like 'ct%'")
|
||||
table_names = tdSql.queryResult
|
||||
tablenames = []
|
||||
for table_name in table_names:
|
||||
tablenames.append(table_name[0])
|
||||
tablenames.append(f"{dbname}.{table_name[0]}")
|
||||
|
||||
tdSql.query("desc stb1")
|
||||
tdSql.query(f"desc {dbname}.stb1")
|
||||
col_names = tdSql.queryResult
|
||||
|
||||
colnames = []
|
||||
|
@ -185,80 +182,76 @@ class TDTestCase:
|
|||
# check max function for different vnode
|
||||
|
||||
for colname in colnames:
|
||||
if colname.startswith("c"):
|
||||
if colname.startswith(f"c"):
|
||||
self.check_spread_distribute_diff_vnode(colname)
|
||||
else:
|
||||
# self.check_spread_distribute_diff_vnode(colname) # bug for tag
|
||||
pass
|
||||
|
||||
|
||||
def distribute_agg_query(self):
|
||||
def distribute_agg_query(self, dbname="testdb"):
|
||||
# basic filter
|
||||
tdSql.query("select spread(c1) from stb1 where c1 is null")
|
||||
tdSql.query(f"select spread(c1) from {dbname}.stb1 where c1 is null")
|
||||
tdSql.checkRows(1)
|
||||
|
||||
tdSql.query("select spread(c1) from stb1 where t1=1")
|
||||
tdSql.query(f"select spread(c1) from {dbname}.stb1 where t1=1")
|
||||
tdSql.checkData(0,0,8.000000000)
|
||||
|
||||
tdSql.query("select spread(c1+c2) from stb1 where c1 =1 ")
|
||||
tdSql.query(f"select spread(c1+c2) from {dbname}.stb1 where c1 =1 ")
|
||||
tdSql.checkData(0,0,0.000000000)
|
||||
|
||||
tdSql.query("select spread(c1) from stb1 where tbname=\"ct2\"")
|
||||
tdSql.query(f"select spread(c1) from {dbname}.stb1 where tbname=\"ct2\"")
|
||||
tdSql.checkData(0,0,8.000000000)
|
||||
|
||||
tdSql.query("select spread(c1) from stb1 partition by tbname")
|
||||
tdSql.query(f"select spread(c1) from {dbname}.stb1 partition by tbname")
|
||||
tdSql.checkRows(20)
|
||||
|
||||
tdSql.query("select spread(c1) from stb1 where t1> 4 partition by tbname")
|
||||
tdSql.query(f"select spread(c1) from {dbname}.stb1 where t1> 4 partition by tbname")
|
||||
tdSql.checkRows(15)
|
||||
|
||||
# union all
|
||||
tdSql.query("select spread(c1) from stb1 union all select max(c1)-min(c1) from stb1 ")
|
||||
tdSql.query(f"select spread(c1) from {dbname}.stb1 union all select max(c1)-min(c1) from {dbname}.stb1 ")
|
||||
tdSql.checkRows(2)
|
||||
tdSql.checkData(0,0,28.000000000)
|
||||
|
||||
# join
|
||||
|
||||
tdSql.execute(" create database if not exists db ")
|
||||
tdSql.execute(" use db ")
|
||||
tdSql.execute(" create stable st (ts timestamp , c1 int ,c2 float) tags(t1 int) ")
|
||||
tdSql.execute(" create table tb1 using st tags(1) ")
|
||||
tdSql.execute(" create table tb2 using st tags(2) ")
|
||||
tdSql.execute(f" create database if not exists db ")
|
||||
tdSql.execute(f" use db ")
|
||||
tdSql.execute(f" create stable db.st (ts timestamp , c1 int ,c2 float) tags(t1 int) ")
|
||||
tdSql.execute(f" create table db.tb1 using db.st tags(1) ")
|
||||
tdSql.execute(f" create table db.tb2 using db.st tags(2) ")
|
||||
|
||||
|
||||
for i in range(10):
|
||||
ts = i*10 + self.ts
|
||||
tdSql.execute(f" insert into tb1 values({ts},{i},{i}.0)")
|
||||
tdSql.execute(f" insert into tb2 values({ts},{i},{i}.0)")
|
||||
tdSql.execute(f" insert into db.tb1 values({ts},{i},{i}.0)")
|
||||
tdSql.execute(f" insert into db.tb2 values({ts},{i},{i}.0)")
|
||||
|
||||
tdSql.query("select spread(tb1.c1), spread(tb2.c2) from tb1, tb2 where tb1.ts=tb2.ts")
|
||||
tdSql.query(f"select spread(tb1.c1), spread(tb2.c2) from db.tb1 tb1, db.tb2 tb2 where tb1.ts=tb2.ts")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0,0,9.000000000)
|
||||
tdSql.checkData(0,0,9.00000)
|
||||
|
||||
# group by
|
||||
tdSql.execute(" use testdb ")
|
||||
tdSql.query(" select max(c1),c1 from stb1 group by t1 ")
|
||||
tdSql.execute(f" use {dbname} ")
|
||||
tdSql.query(f" select max(c1),c1 from {dbname}.stb1 group by t1 ")
|
||||
tdSql.checkRows(20)
|
||||
tdSql.query(" select max(c1),c1 from stb1 group by c1 ")
|
||||
tdSql.query(f" select max(c1),c1 from {dbname}.stb1 group by c1 ")
|
||||
tdSql.checkRows(30)
|
||||
tdSql.query(" select max(c1),c2 from stb1 group by c2 ")
|
||||
tdSql.query(f" select max(c1),c2 from {dbname}.stb1 group by c2 ")
|
||||
tdSql.checkRows(31)
|
||||
|
||||
# partition by tbname or partition by tag
|
||||
tdSql.query("select spread(c1) from stb1 partition by tbname")
|
||||
tdSql.query(f"select spread(c1) from {dbname}.stb1 partition by tbname")
|
||||
query_data = tdSql.queryResult
|
||||
|
||||
# nest query for support max
|
||||
tdSql.query("select spread(c2+2)+1 from (select max(c1) c2 from stb1)")
|
||||
tdSql.query(f"select spread(c2+2)+1 from (select max(c1) c2 from {dbname}.stb1)")
|
||||
tdSql.checkData(0,0,1.000000000)
|
||||
tdSql.query("select spread(c1+2)+1 as c2 from (select ts ,c1 ,c2 from stb1)")
|
||||
tdSql.query(f"select spread(c1+2)+1 as c2 from (select ts ,c1 ,c2 from {dbname}.stb1)")
|
||||
tdSql.checkData(0,0,29.000000000)
|
||||
tdSql.query("select spread(a+2)+1 as c2 from (select ts ,abs(c1) a ,c2 from stb1)")
|
||||
tdSql.query(f"select spread(a+2)+1 as c2 from (select ts ,abs(c1) a ,c2 from {dbname}.stb1)")
|
||||
tdSql.checkData(0,0,29.000000000)
|
||||
|
||||
# mixup with other functions
|
||||
tdSql.query("select max(c1),count(c1),last(c2,c3),spread(c1) from stb1")
|
||||
tdSql.query(f"select max(c1),count(c1),last(c2,c3),spread(c1) from {dbname}.stb1")
|
||||
tdSql.checkData(0,0,28)
|
||||
tdSql.checkData(0,1,184)
|
||||
tdSql.checkData(0,2,-99999)
|
||||
|
@ -275,7 +268,7 @@ class TDTestCase:
|
|||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
|
|
|
@ -7,10 +7,7 @@ import platform
|
|||
import math
|
||||
|
||||
class TDTestCase:
|
||||
updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
|
||||
"jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
|
||||
"wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143,
|
||||
"maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 }
|
||||
updatecfgDict = {"maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 }
|
||||
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
|
@ -45,55 +42,56 @@ class TDTestCase:
|
|||
else:
|
||||
tdLog.exit(" sql:%s; row:0 col:0 data:%d , expect:%d"%(stddev_sql,tdSql.queryResult[0][0],stddev_result))
|
||||
|
||||
def prepare_datas_of_distribute(self):
|
||||
def prepare_datas_of_distribute(self, dbname="testdb"):
|
||||
|
||||
# prepate datas for 20 tables distributed at different vgroups
|
||||
tdSql.execute("create database if not exists testdb keep 3650 duration 1000 vgroups 5")
|
||||
tdSql.execute(" use testdb ")
|
||||
tdSql.execute(f"create database if not exists {dbname} keep 3650 duration 1000 vgroups 5")
|
||||
tdSql.execute(f" use {dbname}")
|
||||
tdSql.execute(
|
||||
'''create table stb1
|
||||
f'''create table {dbname}.stb1
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||
tags (t0 timestamp, t1 int, t2 bigint, t3 smallint, t4 tinyint, t5 float, t6 double, t7 bool, t8 binary(16),t9 nchar(32))
|
||||
'''
|
||||
)
|
||||
|
||||
tdSql.execute(
|
||||
'''
|
||||
create table t1
|
||||
f'''
|
||||
create table {dbname}.t1
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||
'''
|
||||
)
|
||||
for i in range(20):
|
||||
tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
|
||||
tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
|
||||
|
||||
for i in range(9):
|
||||
tdSql.execute(
|
||||
f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
|
||||
f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
|
||||
f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
|
||||
)
|
||||
|
||||
for i in range(1,21):
|
||||
if i ==1 or i == 4:
|
||||
continue
|
||||
else:
|
||||
tbname = "ct"+f'{i}'
|
||||
tbname = f"ct{i}"
|
||||
for j in range(9):
|
||||
tdSql.execute(
|
||||
f"insert into {tbname} values ( now()-{(i+j)*10}s, {1*(j+i)}, {11111*(j+i)}, {111*(j+i)}, {11*(j)}, {1.11*(j+i)}, {11.11*(j+i)}, {(j+i)%2}, 'binary{j}', 'nchar{j}', now()+{1*j}a )"
|
||||
f"insert into {dbname}.{tbname} values ( now()-{(i+j)*10}s, {1*(j+i)}, {11111*(j+i)}, {111*(j+i)}, {11*(j)}, {1.11*(j+i)}, {11.11*(j+i)}, {(j+i)%2}, 'binary{j}', 'nchar{j}', now()+{1*j}a )"
|
||||
)
|
||||
tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
|
||||
tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
|
||||
tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
|
||||
tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
|
||||
|
||||
tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
|
||||
tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
|
||||
tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
|
||||
tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
|
||||
tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
|
||||
tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
|
||||
tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
|
||||
|
||||
tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
|
||||
tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
|
||||
tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
|
||||
|
||||
tdSql.execute(
|
||||
f'''insert into t1 values
|
||||
f'''insert into {dbname}.t1 values
|
||||
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
|
||||
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
|
||||
|
@ -109,11 +107,11 @@ class TDTestCase:
|
|||
'''
|
||||
)
|
||||
|
||||
tdLog.info(" prepare data for distributed_aggregate done! ")
|
||||
tdLog.info(f" prepare data for distributed_aggregate done! ")
|
||||
|
||||
def check_distribute_datas(self):
|
||||
def check_distribute_datas(self, dbname="testdb"):
|
||||
# get vgroup_ids of all
|
||||
tdSql.query("show vgroups ")
|
||||
tdSql.query(f"show {dbname}.vgroups ")
|
||||
vgroups = tdSql.queryResult
|
||||
|
||||
vnode_tables={}
|
||||
|
@ -121,9 +119,8 @@ class TDTestCase:
|
|||
for vgroup_id in vgroups:
|
||||
vnode_tables[vgroup_id[0]]=[]
|
||||
|
||||
|
||||
# check sub_table of per vnode ,make sure sub_table has been distributed
|
||||
tdSql.query("show tables like 'ct%'")
|
||||
tdSql.query(f"show {dbname}.tables like 'ct%'")
|
||||
table_names = tdSql.queryResult
|
||||
tablenames = []
|
||||
for table_name in table_names:
|
||||
|
@ -135,9 +132,9 @@ class TDTestCase:
|
|||
if len(v)>=2:
|
||||
count+=1
|
||||
if count < 2:
|
||||
tdLog.exit(" the datas of all not satisfy sub_table has been distributed ")
|
||||
tdLog.exit(f" the datas of all not satisfy sub_table has been distributed ")
|
||||
|
||||
def check_stddev_distribute_diff_vnode(self,col_name):
|
||||
def check_stddev_distribute_diff_vnode(self,col_name, dbname="testdb"):
|
||||
|
||||
vgroup_ids = []
|
||||
for k ,v in self.vnode_disbutes.items():
|
||||
|
@ -155,9 +152,9 @@ class TDTestCase:
|
|||
|
||||
tbname_filters = tbname_ins[:-1]
|
||||
|
||||
stddev_sql = f"select stddev({col_name}) from stb1 where tbname in ({tbname_filters});"
|
||||
stddev_sql = f"select stddev({col_name}) from {dbname}.stb1 where tbname in ({tbname_filters});"
|
||||
|
||||
same_sql = f"select {col_name} from stb1 where tbname in ({tbname_filters}) and {col_name} is not null "
|
||||
same_sql = f"select {col_name} from {dbname}.stb1 where tbname in ({tbname_filters}) and {col_name} is not null "
|
||||
|
||||
tdSql.query(same_sql)
|
||||
pre_data = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None]
|
||||
|
@ -175,17 +172,16 @@ class TDTestCase:
|
|||
tdSql.query(stddev_sql)
|
||||
tdSql.checkData(0,0,stddev_result)
|
||||
|
||||
|
||||
def check_stddev_status(self):
|
||||
def check_stddev_status(self, dbname="testdb"):
|
||||
# check max function work status
|
||||
|
||||
tdSql.query("show tables like 'ct%'")
|
||||
tdSql.query(f"show {dbname}.tables like 'ct%'")
|
||||
table_names = tdSql.queryResult
|
||||
tablenames = []
|
||||
for table_name in table_names:
|
||||
tablenames.append(table_name[0])
|
||||
tablenames.append(f"{dbname}.{table_name[0]}")
|
||||
|
||||
tdSql.query("desc stb1")
|
||||
tdSql.query(f"desc {dbname}.stb1")
|
||||
col_names = tdSql.queryResult
|
||||
|
||||
colnames = []
|
||||
|
@ -197,50 +193,42 @@ class TDTestCase:
|
|||
for colname in colnames:
|
||||
if colname.startswith("c"):
|
||||
self.check_stddev_functions(tablename,colname)
|
||||
else:
|
||||
# self.check_stddev_functions(tablename,colname)
|
||||
pass
|
||||
|
||||
|
||||
# check max function for different vnode
|
||||
|
||||
for colname in colnames:
|
||||
if colname.startswith("c"):
|
||||
self.check_stddev_distribute_diff_vnode(colname)
|
||||
else:
|
||||
# self.check_stddev_distribute_diff_vnode(colname) # bug for tag
|
||||
pass
|
||||
|
||||
|
||||
def distribute_agg_query(self):
|
||||
def distribute_agg_query(self, dbname="testdb"):
|
||||
# basic filter
|
||||
tdSql.query(" select stddev(c1) from stb1 ")
|
||||
tdSql.query(f"select stddev(c1) from {dbname}.stb1 ")
|
||||
tdSql.checkData(0,0,6.694663959)
|
||||
|
||||
tdSql.query(" select stddev(a) from (select stddev(c1) a from stb1 partition by tbname) ")
|
||||
tdSql.query(f"select stddev(a) from (select stddev(c1) a from {dbname}.stb1 partition by tbname) ")
|
||||
tdSql.checkData(0,0,0.156797505)
|
||||
|
||||
tdSql.query(" select stddev(c1) from stb1 where t1=1")
|
||||
tdSql.query(f"select stddev(c1) from {dbname}.stb1 where t1=1")
|
||||
tdSql.checkData(0,0,2.581988897)
|
||||
|
||||
tdSql.query("select stddev(c1+c2) from stb1 where c1 =1 ")
|
||||
tdSql.query(f"select stddev(c1+c2) from {dbname}.stb1 where c1 =1 ")
|
||||
tdSql.checkData(0,0,0.000000000)
|
||||
|
||||
tdSql.query("select stddev(c1) from stb1 where tbname=\"ct2\"")
|
||||
tdSql.query(f"select stddev(c1) from {dbname}.stb1 where tbname=\"ct2\"")
|
||||
tdSql.checkData(0,0,2.581988897)
|
||||
|
||||
tdSql.query("select stddev(c1) from stb1 partition by tbname")
|
||||
tdSql.query(f"select stddev(c1) from {dbname}.stb1 partition by tbname")
|
||||
tdSql.checkRows(20)
|
||||
|
||||
tdSql.query("select stddev(c1) from stb1 where t1> 4 partition by tbname")
|
||||
tdSql.query(f"select stddev(c1) from {dbname}.stb1 where t1> 4 partition by tbname")
|
||||
tdSql.checkRows(15)
|
||||
|
||||
# union all
|
||||
tdSql.query("select stddev(c1) from stb1 union all select stddev(c1) from stb1 ")
|
||||
tdSql.query(f"select stddev(c1) from {dbname}.stb1 union all select stddev(c1) from {dbname}.stb1 ")
|
||||
tdSql.checkRows(2)
|
||||
tdSql.checkData(0,0,6.694663959)
|
||||
|
||||
tdSql.query("select stddev(a) from (select stddev(c1) a from stb1 union all select stddev(c1) a from stb1)")
|
||||
tdSql.query(f"select stddev(a) from (select stddev(c1) a from {dbname}.stb1 union all select stddev(c1) a from {dbname}.stb1)")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0,0,0.000000000)
|
||||
|
||||
|
@ -248,38 +236,38 @@ class TDTestCase:
|
|||
|
||||
tdSql.execute(" create database if not exists db ")
|
||||
tdSql.execute(" use db ")
|
||||
tdSql.execute(" create stable st (ts timestamp , c1 int ,c2 float) tags(t1 int) ")
|
||||
tdSql.execute(" create table tb1 using st tags(1) ")
|
||||
tdSql.execute(" create table tb2 using st tags(2) ")
|
||||
tdSql.execute(" create stable db.st (ts timestamp , c1 int ,c2 float) tags(t1 int) ")
|
||||
tdSql.execute(" create table db.tb1 using db.st tags(1) ")
|
||||
tdSql.execute(" create table db.tb2 using db.st tags(2) ")
|
||||
|
||||
|
||||
for i in range(10):
|
||||
ts = i*10 + self.ts
|
||||
tdSql.execute(f" insert into tb1 values({ts},{i},{i}.0)")
|
||||
tdSql.execute(f" insert into tb2 values({ts},{i},{i}.0)")
|
||||
tdSql.execute(f" insert into db.tb1 values({ts},{i},{i}.0)")
|
||||
tdSql.execute(f" insert into db.tb2 values({ts},{i},{i}.0)")
|
||||
|
||||
tdSql.query("select stddev(tb1.c1), stddev(tb2.c2) from tb1, tb2 where tb1.ts=tb2.ts")
|
||||
tdSql.query("select stddev(tb1.c1), stddev(tb2.c2) from db.tb1 tb1, db.tb2 tb2 where tb1.ts=tb2.ts")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0,0,2.872281323)
|
||||
tdSql.checkData(0,1,2.872281323)
|
||||
|
||||
# group by
|
||||
tdSql.execute(" use testdb ")
|
||||
tdSql.execute(f" use {dbname} ")
|
||||
|
||||
# partition by tbname or partition by tag
|
||||
tdSql.query("select stddev(c1) from stb1 partition by tbname")
|
||||
tdSql.query(f"select stddev(c1) from {dbname}.stb1 partition by tbname")
|
||||
tdSql.checkRows(20)
|
||||
|
||||
# nest query for support max
|
||||
tdSql.query("select stddev(c2+2)+1 from (select stddev(c1) c2 from stb1)")
|
||||
tdSql.query(f"select stddev(c2+2)+1 from (select stddev(c1) c2 from {dbname}.stb1)")
|
||||
tdSql.checkData(0,0,1.000000000)
|
||||
tdSql.query("select stddev(c1+2) as c2 from (select ts ,c1 ,c2 from stb1)")
|
||||
tdSql.query(f"select stddev(c1+2) as c2 from (select ts ,c1 ,c2 from {dbname}.stb1)")
|
||||
tdSql.checkData(0,0,6.694663959)
|
||||
tdSql.query("select stddev(a+2) as c2 from (select ts ,abs(c1) a ,c2 from stb1)")
|
||||
tdSql.query(f"select stddev(a+2) as c2 from (select ts ,abs(c1) a ,c2 from {dbname}.stb1)")
|
||||
tdSql.checkData(0,0,6.694663959)
|
||||
|
||||
# mixup with other functions
|
||||
tdSql.query("select max(c1),count(c1),last(c2,c3),sum(c1+c2),avg(c1),stddev(c1) from stb1")
|
||||
tdSql.query(f"select max(c1),count(c1),last(c2,c3),sum(c1+c2),avg(c1),stddev(c1) from {dbname}.stb1")
|
||||
tdSql.checkData(0,0,28)
|
||||
tdSql.checkData(0,1,184)
|
||||
tdSql.checkData(0,2,-99999)
|
||||
|
|
|
@ -7,10 +7,7 @@ import platform
|
|||
|
||||
|
||||
class TDTestCase:
|
||||
updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
|
||||
"jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
|
||||
"wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143,
|
||||
"maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 }
|
||||
updatecfgDict = {"maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 }
|
||||
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
|
@ -34,55 +31,56 @@ class TDTestCase:
|
|||
tdSql.query(sum_sql)
|
||||
tdSql.checkData(0,0,pre_sum)
|
||||
|
||||
def prepare_datas_of_distribute(self):
|
||||
def prepare_datas_of_distribute(self, dbname="testdb"):
|
||||
|
||||
# prepate datas for 20 tables distributed at different vgroups
|
||||
tdSql.execute("create database if not exists testdb keep 3650 duration 1000 vgroups 5")
|
||||
tdSql.execute(" use testdb ")
|
||||
tdSql.execute(f"create database if not exists {dbname} keep 3650 duration 1000 vgroups 5")
|
||||
tdSql.execute(f" use {dbname}")
|
||||
tdSql.execute(
|
||||
'''create table stb1
|
||||
f'''create table {dbname}.stb1
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||
tags (t0 timestamp, t1 int, t2 bigint, t3 smallint, t4 tinyint, t5 float, t6 double, t7 bool, t8 binary(16),t9 nchar(32))
|
||||
'''
|
||||
)
|
||||
|
||||
tdSql.execute(
|
||||
'''
|
||||
create table t1
|
||||
f'''
|
||||
create table {dbname}.t1
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||
'''
|
||||
)
|
||||
for i in range(20):
|
||||
tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
|
||||
tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
|
||||
|
||||
for i in range(9):
|
||||
tdSql.execute(
|
||||
f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
|
||||
f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
|
||||
f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
|
||||
)
|
||||
|
||||
for i in range(1,21):
|
||||
if i ==1 or i == 4:
|
||||
continue
|
||||
else:
|
||||
tbname = "ct"+f'{i}'
|
||||
tbname = f"ct{i}"
|
||||
for j in range(9):
|
||||
tdSql.execute(
|
||||
f"insert into {tbname} values ( now()-{(i+j)*10}s, {1*(j+i)}, {11111*(j+i)}, {111*(j+i)}, {11*(j)}, {1.11*(j+i)}, {11.11*(j+i)}, {(j+i)%2}, 'binary{j}', 'nchar{j}', now()+{1*j}a )"
|
||||
f"insert into {dbname}.{tbname} values ( now()-{(i+j)*10}s, {1*(j+i)}, {11111*(j+i)}, {111*(j+i)}, {11*(j)}, {1.11*(j+i)}, {11.11*(j+i)}, {(j+i)%2}, 'binary{j}', 'nchar{j}', now()+{1*j}a )"
|
||||
)
|
||||
tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
|
||||
tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
|
||||
tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
|
||||
tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
|
||||
|
||||
tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
|
||||
tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
|
||||
tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
|
||||
tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
|
||||
tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
|
||||
tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
|
||||
tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
|
||||
|
||||
tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
|
||||
tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
|
||||
tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
|
||||
|
||||
tdSql.execute(
|
||||
f'''insert into t1 values
|
||||
f'''insert into {dbname}.t1 values
|
||||
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
|
||||
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
|
||||
|
@ -98,11 +96,11 @@ class TDTestCase:
|
|||
'''
|
||||
)
|
||||
|
||||
tdLog.info(" prepare data for distributed_aggregate done! ")
|
||||
tdLog.info(f" prepare data for distributed_aggregate done! ")
|
||||
|
||||
def check_distribute_datas(self):
|
||||
def check_distribute_datas(self, dbname="testdb"):
|
||||
# get vgroup_ids of all
|
||||
tdSql.query("show vgroups ")
|
||||
tdSql.query(f"show {dbname}.vgroups ")
|
||||
vgroups = tdSql.queryResult
|
||||
|
||||
vnode_tables={}
|
||||
|
@ -110,9 +108,8 @@ class TDTestCase:
|
|||
for vgroup_id in vgroups:
|
||||
vnode_tables[vgroup_id[0]]=[]
|
||||
|
||||
|
||||
# check sub_table of per vnode ,make sure sub_table has been distributed
|
||||
tdSql.query("show tables like 'ct%'")
|
||||
tdSql.query(f"show {dbname}.tables like 'ct%'")
|
||||
table_names = tdSql.queryResult
|
||||
tablenames = []
|
||||
for table_name in table_names:
|
||||
|
@ -124,9 +121,9 @@ class TDTestCase:
|
|||
if len(v)>=2:
|
||||
count+=1
|
||||
if count < 2:
|
||||
tdLog.exit(" the datas of all not satisfy sub_table has been distributed ")
|
||||
tdLog.exit(f" the datas of all not satisfy sub_table has been distributed ")
|
||||
|
||||
def check_sum_distribute_diff_vnode(self,col_name):
|
||||
def check_sum_distribute_diff_vnode(self,col_name, dbname="testdb"):
|
||||
|
||||
vgroup_ids = []
|
||||
for k ,v in self.vnode_disbutes.items():
|
||||
|
@ -144,9 +141,9 @@ class TDTestCase:
|
|||
|
||||
tbname_filters = tbname_ins[:-1]
|
||||
|
||||
sum_sql = f"select sum({col_name}) from stb1 where tbname in ({tbname_filters});"
|
||||
sum_sql = f"select sum({col_name}) from {dbname}.stb1 where tbname in ({tbname_filters});"
|
||||
|
||||
same_sql = f"select {col_name} from stb1 where tbname in ({tbname_filters}) and {col_name} is not null "
|
||||
same_sql = f"select {col_name} from {dbname}.stb1 where tbname in ({tbname_filters}) and {col_name} is not null "
|
||||
|
||||
tdSql.query(same_sql)
|
||||
pre_data = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None]
|
||||
|
@ -157,16 +154,16 @@ class TDTestCase:
|
|||
tdSql.query(sum_sql)
|
||||
tdSql.checkData(0,0,pre_sum)
|
||||
|
||||
def check_sum_status(self):
|
||||
def check_sum_status(self, dbname="testdb"):
|
||||
# check max function work status
|
||||
|
||||
tdSql.query("show tables like 'ct%'")
|
||||
tdSql.query(f"show {dbname}.tables like 'ct%'")
|
||||
table_names = tdSql.queryResult
|
||||
tablenames = []
|
||||
for table_name in table_names:
|
||||
tablenames.append(table_name[0])
|
||||
tablenames.append(f"{dbname}.{table_name[0]}")
|
||||
|
||||
tdSql.query("desc stb1")
|
||||
tdSql.query(f"desc {dbname}.stb1")
|
||||
col_names = tdSql.queryResult
|
||||
|
||||
colnames = []
|
||||
|
@ -183,79 +180,75 @@ class TDTestCase:
|
|||
for colname in colnames:
|
||||
if colname.startswith("c"):
|
||||
self.check_sum_distribute_diff_vnode(colname)
|
||||
else:
|
||||
# self.check_sum_distribute_diff_vnode(colname) # bug for tag
|
||||
pass
|
||||
|
||||
|
||||
def distribute_agg_query(self):
|
||||
def distribute_agg_query(self, dbname="testdb"):
|
||||
# basic filter
|
||||
tdSql.query(" select sum(c1) from stb1 ")
|
||||
tdSql.query(f"select sum(c1) from {dbname}.stb1 ")
|
||||
tdSql.checkData(0,0,2592)
|
||||
|
||||
tdSql.query(" select sum(a) from (select sum(c1) a from stb1 partition by tbname) ")
|
||||
tdSql.query(f"select sum(a) from (select sum(c1) a from {dbname}.stb1 partition by tbname) ")
|
||||
tdSql.checkData(0,0,2592)
|
||||
|
||||
tdSql.query(" select sum(c1) from stb1 where t1=1")
|
||||
tdSql.query(f"select sum(c1) from {dbname}.stb1 where t1=1")
|
||||
tdSql.checkData(0,0,54)
|
||||
|
||||
tdSql.query("select sum(c1+c2) from stb1 where c1 =1 ")
|
||||
tdSql.query(f"select sum(c1+c2) from {dbname}.stb1 where c1 =1 ")
|
||||
tdSql.checkData(0,0,22224.000000000)
|
||||
|
||||
tdSql.query("select sum(c1) from stb1 where tbname=\"ct2\"")
|
||||
tdSql.query(f"select sum(c1) from {dbname}.stb1 where tbname=\"ct2\"")
|
||||
tdSql.checkData(0,0,54)
|
||||
|
||||
tdSql.query("select sum(c1) from stb1 partition by tbname")
|
||||
tdSql.query(f"select sum(c1) from {dbname}.stb1 partition by tbname")
|
||||
tdSql.checkRows(20)
|
||||
|
||||
tdSql.query("select sum(c1) from stb1 where t1> 4 partition by tbname")
|
||||
tdSql.query(f"select sum(c1) from {dbname}.stb1 where t1> 4 partition by tbname")
|
||||
tdSql.checkRows(15)
|
||||
|
||||
# union all
|
||||
tdSql.query("select sum(c1) from stb1 union all select sum(c1) from stb1 ")
|
||||
tdSql.query(f"select sum(c1) from {dbname}.stb1 union all select sum(c1) from {dbname}.stb1 ")
|
||||
tdSql.checkRows(2)
|
||||
tdSql.checkData(0,0,2592)
|
||||
|
||||
tdSql.query("select sum(a) from (select sum(c1) a from stb1 union all select sum(c1) a from stb1)")
|
||||
tdSql.query(f"select sum(a) from (select sum(c1) a from {dbname}.stb1 union all select sum(c1) a from {dbname}.stb1)")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0,0,5184)
|
||||
|
||||
# join
|
||||
|
||||
tdSql.execute(" create database if not exists db ")
|
||||
tdSql.execute(" use db ")
|
||||
tdSql.execute(" create stable st (ts timestamp , c1 int ,c2 float) tags(t1 int) ")
|
||||
tdSql.execute(" create table tb1 using st tags(1) ")
|
||||
tdSql.execute(" create table tb2 using st tags(2) ")
|
||||
tdSql.execute(" create stable db.st (ts timestamp , c1 int ,c2 float) tags(t1 int) ")
|
||||
tdSql.execute(" create table db.tb1 using db.st tags(1) ")
|
||||
tdSql.execute(" create table db.tb2 using db.st tags(2) ")
|
||||
|
||||
|
||||
for i in range(10):
|
||||
ts = i*10 + self.ts
|
||||
tdSql.execute(f" insert into tb1 values({ts},{i},{i}.0)")
|
||||
tdSql.execute(f" insert into tb2 values({ts},{i},{i}.0)")
|
||||
tdSql.execute(f" insert into db.tb1 values({ts},{i},{i}.0)")
|
||||
tdSql.execute(f" insert into db.tb2 values({ts},{i},{i}.0)")
|
||||
|
||||
tdSql.query("select sum(tb1.c1), sum(tb2.c2) from tb1, tb2 where tb1.ts=tb2.ts")
|
||||
|
||||
tdSql.query("select sum(tb1.c1), sum(tb2.c2) from db.tb1 tb1, db.tb2 tb2 where tb1.ts=tb2.ts")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0,0,45)
|
||||
tdSql.checkData(0,1,45.000000000)
|
||||
|
||||
# group by
|
||||
tdSql.execute(" use testdb ")
|
||||
tdSql.execute(f"use {dbname} ")
|
||||
|
||||
# partition by tbname or partition by tag
|
||||
tdSql.query("select sum(c1) from stb1 partition by tbname")
|
||||
tdSql.query(f"select sum(c1) from {dbname}.stb1 partition by tbname")
|
||||
tdSql.checkRows(20)
|
||||
|
||||
# nest query for support max
|
||||
tdSql.query("select abs(c2+2)+1 from (select sum(c1) c2 from stb1)")
|
||||
tdSql.query(f"select abs(c2+2)+1 from (select sum(c1) c2 from {dbname}.stb1)")
|
||||
tdSql.checkData(0,0,2595.000000000)
|
||||
tdSql.query("select sum(c1+2) as c2 from (select ts ,c1 ,c2 from stb1)")
|
||||
tdSql.query(f"select sum(c1+2) as c2 from (select ts ,c1 ,c2 from {dbname}.stb1)")
|
||||
tdSql.checkData(0,0,2960.000000000)
|
||||
tdSql.query("select sum(a+2) as c2 from (select ts ,abs(c1) a ,c2 from stb1)")
|
||||
tdSql.query(f"select sum(a+2) as c2 from (select ts ,abs(c1) a ,c2 from {dbname}.stb1)")
|
||||
tdSql.checkData(0,0,2960.000000000)
|
||||
|
||||
# mixup with other functions
|
||||
tdSql.query("select max(c1),count(c1),last(c2,c3),sum(c1+c2) from stb1")
|
||||
tdSql.query(f"select max(c1),count(c1),last(c2,c3),sum(c1+c2) from {dbname}.stb1")
|
||||
tdSql.checkData(0,0,28)
|
||||
tdSql.checkData(0,1,184)
|
||||
tdSql.checkData(0,2,-99999)
|
||||
|
|
|
@ -90,6 +90,12 @@ python3 ./test.py -f 2-query/distribute_agg_max.py
|
|||
python3 ./test.py -f 2-query/distribute_agg_max.py -R
|
||||
python3 ./test.py -f 2-query/distribute_agg_min.py
|
||||
python3 ./test.py -f 2-query/distribute_agg_min.py -R
|
||||
python3 ./test.py -f 2-query/distribute_agg_spread.py
|
||||
python3 ./test.py -f 2-query/distribute_agg_spread.py -R
|
||||
python3 ./test.py -f 2-query/distribute_agg_stddev.py
|
||||
python3 ./test.py -f 2-query/distribute_agg_stddev.py -R
|
||||
python3 ./test.py -f 2-query/distribute_agg_sum.py
|
||||
python3 ./test.py -f 2-query/distribute_agg_sum.py -R
|
||||
|
||||
|
||||
|
||||
|
@ -156,9 +162,6 @@ python3 ./test.py -f 2-query/function_stateduration.py
|
|||
python3 ./test.py -f 2-query/statecount.py
|
||||
python3 ./test.py -f 2-query/tail.py
|
||||
python3 ./test.py -f 2-query/ttl_comment.py
|
||||
python3 ./test.py -f 2-query/distribute_agg_sum.py
|
||||
python3 ./test.py -f 2-query/distribute_agg_spread.py
|
||||
python3 ./test.py -f 2-query/distribute_agg_stddev.py
|
||||
python3 ./test.py -f 2-query/twa.py
|
||||
python3 ./test.py -f 2-query/irate.py
|
||||
python3 ./test.py -f 2-query/function_null.py
|
||||
|
@ -195,7 +198,7 @@ python3 ./test.py -f 6-cluster/5dnode3mnodeRecreateMnode.py -N 5 -M 3
|
|||
python3 ./test.py -f 6-cluster/5dnode3mnodeStopFollowerLeader.py -N 5 -M 3
|
||||
python3 ./test.py -f 6-cluster/5dnode3mnodeStop2Follower.py -N 5 -M 3
|
||||
|
||||
# vnode case
|
||||
# vnode case
|
||||
python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_createDb_replica1.py -N 4 -M 1
|
||||
python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas.py -N 4 -M 1
|
||||
python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas_querys.py -N 4 -M 1
|
||||
|
@ -214,8 +217,8 @@ python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_query
|
|||
# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_mnode3_insertdatas_querys.py -N 4 -M 1
|
||||
# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower_force_stop.py -N 4 -M 1
|
||||
# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower.py -N 4 -M 1
|
||||
# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader_force_stop.py -N 4 -M 1
|
||||
# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader.py -N 4 -M 1
|
||||
# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader_force_stop.py -N 4 -M 1
|
||||
# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader.py -N 4 -M 1
|
||||
# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups.py -N 4 -M 1
|
||||
# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups_stopOne.py -N 4 -M 1
|
||||
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
Subproject commit 3c7dafeea3e558968165b73bee0f51024898e3da
|
Loading…
Reference in New Issue