Merge branch '3.0' into 3.0test/jcy
This commit is contained in:
commit
998d4966df
|
@ -1,4 +1,5 @@
|
|||
---
|
||||
sidebar_label: Docker
|
||||
title: 通过 Docker 快速体验 TDengine
|
||||
---
|
||||
|
|
@ -0,0 +1,240 @@
|
|||
---
|
||||
sidebar_label: 安装包
|
||||
title: 使用安装包安装和卸载
|
||||
---
|
||||
|
||||
import Tabs from "@theme/Tabs";
|
||||
import TabItem from "@theme/TabItem";
|
||||
|
||||
:::info
|
||||
如果您希望对 TDengine 贡献代码或对内部实现感兴趣,请参考我们的 [TDengine GitHub 主页](https://github.com/taosdata/TDengine) 下载源码构建和安装.
|
||||
|
||||
:::
|
||||
|
||||
TDengine 开源版本提供 deb 和 rpm 格式安装包,用户可以根据自己的运行环境选择合适的安装包。其中 deb 支持 Debian/Ubuntu 及衍生系统,rpm 支持 CentOS/RHEL/SUSE 及衍生系统。同时我们也为企业用户提供 tar.gz 格式安装包。
|
||||
|
||||
## 安装
|
||||
|
||||
<Tabs>
|
||||
<TabItem value="apt-get" label="apt-get">
|
||||
可以使用 apt-get 工具从官方仓库安装。
|
||||
|
||||
**安装包仓库**
|
||||
|
||||
```
|
||||
wget -qO - http://repos.taosdata.com/tdengine.key | sudo apt-key add -
|
||||
echo "deb [arch=amd64] http://repos.taosdata.com/tdengine-stable stable main" | sudo tee /etc/apt/sources.list.d/tdengine-stable.list
|
||||
```
|
||||
|
||||
如果安装 Beta 版需要安装包仓库
|
||||
|
||||
```
|
||||
echo "deb [arch=amd64] http://repos.taosdata.com/tdengine-beta beta main" | sudo tee /etc/apt/sources.list.d/tdengine-beta.list
|
||||
```
|
||||
|
||||
**使用 apt-get 命令安装**
|
||||
|
||||
```
|
||||
sudo apt-get update
|
||||
apt-cache policy tdengine
|
||||
sudo apt-get install tdengine
|
||||
```
|
||||
|
||||
:::tip
|
||||
apt-get 方式只适用于 Debian 或 Ubuntu 系统
|
||||
::::
|
||||
</TabItem>
|
||||
<TabItem label="Deb 安装" value="debinst">
|
||||
|
||||
1、从官网下载获得 deb 安装包,例如 TDengine-server-2.4.0.7-Linux-x64.deb;
|
||||
2、进入到 TDengine-server-2.4.0.7-Linux-x64.deb 安装包所在目录,执行如下的安装命令:
|
||||
|
||||
```
|
||||
$ sudo dpkg -i TDengine-server-2.4.0.7-Linux-x64.deb
|
||||
(Reading database ... 137504 files and directories currently installed.)
|
||||
Preparing to unpack TDengine-server-2.4.0.7-Linux-x64.deb ...
|
||||
TDengine is removed successfully!
|
||||
Unpacking tdengine (2.4.0.7) over (2.4.0.7) ...
|
||||
Setting up tdengine (2.4.0.7) ...
|
||||
Start to install TDengine...
|
||||
|
||||
System hostname is: ubuntu-1804
|
||||
|
||||
Enter FQDN:port (like h1.taosdata.com:6030) of an existing TDengine cluster node to join
|
||||
OR leave it blank to build one:
|
||||
|
||||
Enter your email address for priority support or enter empty to skip:
|
||||
Created symlink /etc/systemd/system/multi-user.target.wants/taosd.service → /etc/systemd/system/taosd.service.
|
||||
|
||||
To configure TDengine : edit /etc/taos/taos.cfg
|
||||
To start TDengine : sudo systemctl start taosd
|
||||
To access TDengine : taos -h ubuntu-1804 to login into TDengine server
|
||||
|
||||
|
||||
TDengine is installed successfully!
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
||||
<TabItem label="RPM 安装" value="rpminst">
|
||||
|
||||
1、从官网下载获得 rpm 安装包,例如 TDengine-server-2.4.0.7-Linux-x64.rpm;
|
||||
2、进入到 TDengine-server-2.4.0.7-Linux-x64.rpm 安装包所在目录,执行如下的安装命令:
|
||||
|
||||
```
|
||||
$ sudo rpm -ivh TDengine-server-2.4.0.7-Linux-x64.rpm
|
||||
Preparing... ################################# [100%]
|
||||
Updating / installing...
|
||||
1:tdengine-2.4.0.7-3 ################################# [100%]
|
||||
Start to install TDengine...
|
||||
|
||||
System hostname is: centos7
|
||||
|
||||
Enter FQDN:port (like h1.taosdata.com:6030) of an existing TDengine cluster node to join
|
||||
OR leave it blank to build one:
|
||||
|
||||
Enter your email address for priority support or enter empty to skip:
|
||||
|
||||
Created symlink from /etc/systemd/system/multi-user.target.wants/taosd.service to /etc/systemd/system/taosd.service.
|
||||
|
||||
To configure TDengine : edit /etc/taos/taos.cfg
|
||||
To start TDengine : sudo systemctl start taosd
|
||||
To access TDengine : taos -h centos7 to login into TDengine server
|
||||
|
||||
|
||||
TDengine is installed successfully!
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
||||
<TabItem label="tar.gz 安装" value="tarinst">
|
||||
|
||||
1、从官网下载获得 tar.gz 安装包,例如 TDengine-server-2.4.0.7-Linux-x64.tar.gz;
|
||||
2、进入到 TDengine-server-2.4.0.7-Linux-x64.tar.gz 安装包所在目录,先解压文件后,进入子目录,执行其中的 install.sh 安装脚本:
|
||||
|
||||
```
|
||||
$ tar xvzf TDengine-enterprise-server-2.4.0.7-Linux-x64.tar.gz
|
||||
TDengine-enterprise-server-2.4.0.7/
|
||||
TDengine-enterprise-server-2.4.0.7/driver/
|
||||
TDengine-enterprise-server-2.4.0.7/driver/vercomp.txt
|
||||
TDengine-enterprise-server-2.4.0.7/driver/libtaos.so.2.4.0.7
|
||||
TDengine-enterprise-server-2.4.0.7/install.sh
|
||||
TDengine-enterprise-server-2.4.0.7/examples/
|
||||
...
|
||||
|
||||
$ ll
|
||||
total 43816
|
||||
drwxrwxr-x 3 ubuntu ubuntu 4096 Feb 22 09:31 ./
|
||||
drwxr-xr-x 20 ubuntu ubuntu 4096 Feb 22 09:30 ../
|
||||
drwxrwxr-x 4 ubuntu ubuntu 4096 Feb 22 09:30 TDengine-enterprise-server-2.4.0.7/
|
||||
-rw-rw-r-- 1 ubuntu ubuntu 44852544 Feb 22 09:31 TDengine-enterprise-server-2.4.0.7-Linux-x64.tar.gz
|
||||
|
||||
$ cd TDengine-enterprise-server-2.4.0.7/
|
||||
|
||||
$ ll
|
||||
total 40784
|
||||
drwxrwxr-x 4 ubuntu ubuntu 4096 Feb 22 09:30 ./
|
||||
drwxrwxr-x 3 ubuntu ubuntu 4096 Feb 22 09:31 ../
|
||||
drwxrwxr-x 2 ubuntu ubuntu 4096 Feb 22 09:30 driver/
|
||||
drwxrwxr-x 10 ubuntu ubuntu 4096 Feb 22 09:30 examples/
|
||||
-rwxrwxr-x 1 ubuntu ubuntu 33294 Feb 22 09:30 install.sh*
|
||||
-rw-rw-r-- 1 ubuntu ubuntu 41704288 Feb 22 09:30 taos.tar.gz
|
||||
|
||||
$ sudo ./install.sh
|
||||
|
||||
Start to update TDengine...
|
||||
Created symlink /etc/systemd/system/multi-user.target.wants/taosd.service → /etc/systemd/system/taosd.service.
|
||||
Nginx for TDengine is updated successfully!
|
||||
|
||||
To configure TDengine : edit /etc/taos/taos.cfg
|
||||
To configure Taos Adapter (if has) : edit /etc/taos/taosadapter.toml
|
||||
To start TDengine : sudo systemctl start taosd
|
||||
To access TDengine : use taos -h ubuntu-1804 in shell OR from http://127.0.0.1:6060
|
||||
|
||||
TDengine is updated successfully!
|
||||
Install taoskeeper as a standalone service
|
||||
taoskeeper is installed, enable it by `systemctl enable taoskeeper`
|
||||
```
|
||||
|
||||
:::info
|
||||
install.sh 安装脚本在执行过程中,会通过命令行交互界面询问一些配置信息。如果希望采取无交互安装方式,那么可以用 -e no 参数来执行 install.sh 脚本。运行 `./install.sh -h` 指令可以查看所有参数的详细说明信息。
|
||||
|
||||
:::
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
:::note
|
||||
当安装第一个节点时,出现 Enter FQDN:提示的时候,不需要输入任何内容。只有当安装第二个或以后更多的节点时,才需要输入已有集群中任何一个可用节点的 FQDN,支持该新节点加入集群。当然也可以不输入,而是在新节点启动前,配置到新节点的配置文件中。
|
||||
|
||||
:::
|
||||
|
||||
## 卸载
|
||||
|
||||
<Tabs>
|
||||
<TabItem label="apt-get 卸载" value="aptremove">
|
||||
|
||||
内容 TBD
|
||||
|
||||
</TabItem>
|
||||
<TabItem label="Deb 卸载" value="debuninst">
|
||||
|
||||
卸载命令如下:
|
||||
|
||||
```
|
||||
$ sudo dpkg -r tdengine
|
||||
(Reading database ... 137504 files and directories currently installed.)
|
||||
Removing tdengine (2.4.0.7) ...
|
||||
TDengine is removed successfully!
|
||||
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
||||
<TabItem label="RPM 卸载" value="rpmuninst">
|
||||
|
||||
卸载命令如下:
|
||||
|
||||
```
|
||||
$ sudo rpm -e tdengine
|
||||
TDengine is removed successfully!
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
||||
<TabItem label="tar.gz 卸载" value="taruninst">
|
||||
|
||||
卸载命令如下:
|
||||
|
||||
```
|
||||
$ rmtaos
|
||||
Nginx for TDengine is running, stopping it...
|
||||
TDengine is removed successfully!
|
||||
|
||||
taosKeeper is removed successfully!
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
:::info
|
||||
|
||||
- TDengine 提供了多种安装包,但最好不要在一个系统上同时使用 tar.gz 安装包和 deb 或 rpm 安装包。否则会相互影响,导致在使用时出现问题。
|
||||
|
||||
- 对于 deb 包安装后,如果安装目录被手工误删了部分,出现卸载、或重新安装不能成功。此时,需要清除 TDengine 包的安装信息,执行如下命令:
|
||||
|
||||
```
|
||||
$ sudo rm -f /var/lib/dpkg/info/tdengine*
|
||||
```
|
||||
|
||||
然后再重新进行安装就可以了。
|
||||
|
||||
- 对于 rpm 包安装后,如果安装目录被手工误删了部分,出现卸载、或重新安装不能成功。此时,需要清除 TDengine 包的安装信息,执行如下命令:
|
||||
|
||||
```
|
||||
$ sudo rpm -e --noscripts tdengine
|
||||
```
|
||||
|
||||
然后再重新进行安装就可以了。
|
||||
|
||||
:::
|
|
@ -0,0 +1,135 @@
|
|||
---
|
||||
sidebar_label: 开始使用
|
||||
title: 快速体验 TDengine
|
||||
---
|
||||
|
||||
import Tabs from "@theme/Tabs";
|
||||
import TabItem from "@theme/TabItem";
|
||||
import PkgInstall from "./\_pkg_install.mdx";
|
||||
import AptGetInstall from "./\_apt_get_install.mdx";
|
||||
|
||||
## 启动
|
||||
|
||||
安装后,请使用 `systemctl` 命令来启动 TDengine 的服务进程。
|
||||
|
||||
```bash
|
||||
systemctl start taosd
|
||||
```
|
||||
|
||||
检查服务是否正常工作:
|
||||
|
||||
```bash
|
||||
systemctl status taosd
|
||||
```
|
||||
|
||||
如果服务进程处于活动状态,则 status 指令会显示如下的相关信息:
|
||||
|
||||
```
|
||||
Active: active (running)
|
||||
```
|
||||
|
||||
如果后台服务进程处于停止状态,则 status 指令会显示如下的相关信息:
|
||||
|
||||
```
|
||||
Active: inactive (dead)
|
||||
```
|
||||
|
||||
如果 TDengine 服务正常工作,那么您可以通过 TDengine 的命令行程序 `taos` 来访问并体验 TDengine。
|
||||
|
||||
systemctl 命令汇总:
|
||||
|
||||
- 启动服务进程:`systemctl start taosd`
|
||||
|
||||
- 停止服务进程:`systemctl stop taosd`
|
||||
|
||||
- 重启服务进程:`systemctl restart taosd`
|
||||
|
||||
- 查看服务状态:`systemctl status taosd`
|
||||
|
||||
:::info
|
||||
|
||||
- systemctl 命令需要 _root_ 权限来运行,如果您非 _root_ 用户,请在命令前添加 sudo 。
|
||||
- `systemctl stop taosd` 指令在执行后并不会马上停止 TDengine 服务,而是会等待系统中必要的落盘工作正常完成。在数据量很大的情况下,这可能会消耗较长时间。
|
||||
- 如果系统中不支持 `systemd`,也可以用手动运行 `/usr/local/taos/bin/taosd` 方式启动 TDengine 服务。
|
||||
|
||||
:::
|
||||
|
||||
## TDengine 命令行 (CLI)
|
||||
|
||||
为便于检查 TDengine 的状态,执行数据库 (Database) 的各种即席(Ad Hoc)查询,TDengine 提供一命令行应用程序(以下简称为 TDengine CLI) taos。要进入 TDengine 命令行,您只要在安装有 TDengine 的 Linux 终端执行 `taos` 即可。
|
||||
|
||||
```bash
|
||||
taos
|
||||
```
|
||||
|
||||
如果连接服务成功,将会打印出欢迎消息和版本信息。如果失败,则会打印错误消息出来(请参考 [FAQ](/train-faq/faq) 来解决终端连接服务端失败的问题)。 TDengine CLI 的提示符号如下:
|
||||
|
||||
```cmd
|
||||
taos>
|
||||
```
|
||||
|
||||
在 TDengine CLI 中,用户可以通过 SQL 命令来创建/删除数据库、表等,并进行数据库(database)插入查询操作。在终端中运行的 SQL 语句需要以分号结束来运行。示例:
|
||||
|
||||
```sql
|
||||
create database demo;
|
||||
use demo;
|
||||
create table t (ts timestamp, speed int);
|
||||
insert into t values ('2019-07-15 00:00:00', 10);
|
||||
insert into t values ('2019-07-15 01:00:00', 20);
|
||||
select * from t;
|
||||
ts | speed |
|
||||
========================================
|
||||
2019-07-15 00:00:00.000 | 10 |
|
||||
2019-07-15 01:00:00.000 | 20 |
|
||||
Query OK, 2 row(s) in set (0.003128s)
|
||||
```
|
||||
|
||||
除执行 SQL 语句外,系统管理员还可以从 TDengine CLI 进行检查系统运行状态、添加删除用户账号等操作。TDengine CLI 连同应用驱动也可以独立安装在 Linux 或 Windows 机器上运行,更多细节请参考 [这里](../reference/taos-shell/)
|
||||
|
||||
## 使用 taosBenchmark 体验写入速度
|
||||
|
||||
启动 TDengine 的服务,在 Linux 终端执行 `taosBenchmark` (曾命名为 `taosdemo`):
|
||||
|
||||
```bash
|
||||
taosBenchmark
|
||||
```
|
||||
|
||||
该命令将在数据库 test 下面自动创建一张超级表 meters,该超级表下有 1 万张表,表名为 "d0" 到 "d9999",每张表有 1 万条记录,每条记录有 (ts, current, voltage, phase) 四个字段,时间戳从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:40:09 999",每张表带有标签 location 和 groupId,groupId 被设置为 1 到 10, location 被设置为 "California.SanFrancisco" 或者 "California.LosAngeles"。
|
||||
|
||||
这条命令很快完成 1 亿条记录的插入。具体时间取决于硬件性能,即使在一台普通的 PC 服务器往往也仅需十几秒。
|
||||
|
||||
taosBenchmark 命令本身带有很多选项,配置表的数目、记录条数等等,您可以设置不同参数进行体验,请执行 `taosBenchmark --help` 详细列出。taosBenchmark 详细使用方法请参照 [如何使用 taosBenchmark 对 TDengine 进行性能测试](https://www.taosdata.com/2021/10/09/3111.html)。
|
||||
|
||||
## 使用 TDengine CLI 体验查询速度
|
||||
|
||||
使用上述 taosBenchmark 插入数据后,可以在 TDengine CLI 输入查询命令,体验查询速度。
|
||||
|
||||
查询超级表下记录总条数:
|
||||
|
||||
```sql
|
||||
taos> select count(*) from test.meters;
|
||||
```
|
||||
|
||||
查询 1 亿条记录的平均值、最大值、最小值等:
|
||||
|
||||
```sql
|
||||
taos> select avg(current), max(voltage), min(phase) from test.meters;
|
||||
```
|
||||
|
||||
查询 location="California.SanFrancisco" 的记录总条数:
|
||||
|
||||
```sql
|
||||
taos> select count(*) from test.meters where location="California.SanFrancisco";
|
||||
```
|
||||
|
||||
查询 groupId=10 的所有记录的平均值、最大值、最小值等:
|
||||
|
||||
```sql
|
||||
taos> select avg(current), max(voltage), min(phase) from test.meters where groupId=10;
|
||||
```
|
||||
|
||||
对表 d10 按 10s 进行平均值、最大值和最小值聚合统计:
|
||||
|
||||
```sql
|
||||
taos> select avg(current), max(voltage), min(phase) from test.d10 interval(10s);
|
||||
```
|
|
@ -1,173 +1,14 @@
|
|||
---
|
||||
title: 立即开始
|
||||
description: '从 Docker,安装包或使用 apt-get 快速安装 TDengine, 通过命令行程序TDengine CLI和工具 taosdemo 快速体验 TDengine 功能'
|
||||
description: '快速设置 TDengine 环境并体验其高效写入和查询'
|
||||
---
|
||||
|
||||
import Tabs from "@theme/Tabs";
|
||||
import TabItem from "@theme/TabItem";
|
||||
import PkgInstall from "./\_pkg_install.mdx";
|
||||
import AptGetInstall from "./\_apt_get_install.mdx";
|
||||
|
||||
## 安装
|
||||
本章主要介绍如何利用 Docker 或者安装包快速设置 TDengine 环境并体验其高效写入和查询。
|
||||
|
||||
TDengine 完整的软件包包括服务端(taosd)、用于与第三方系统对接并提供 RESTful 接口的 taosAdapter、应用驱动(taosc)、命令行程序 (CLI,taos) 和一些工具软件,目前 2.X 版服务端 taosd 和 taosAdapter 仅在 Linux 系统上安装和运行,后续将支持 Windows、macOS 等系统。应用驱动 taosc 与 TDengine CLI 可以在 Windows 或 Linux 上安装和运行。TDengine 除了提供多种语言的连接器之外,还通过 [taosAdapter](/reference/taosadapter) 提供 [RESTful 接口](/reference/rest-api)。但在 2.4 之前的版本中没有 taosAdapter,RESTful 接口是由 taosd 内置的 HTTP 服务提供的。
|
||||
```mdx-code-block
|
||||
import DocCardList from '@theme/DocCardList';
|
||||
import {useCurrentSidebarCategory} from '@docusaurus/theme-common';
|
||||
|
||||
TDengine 支持 X64/ARM64/MIPS64/Alpha64 硬件平台,后续将支持 ARM32、RISC-V 等 CPU 架构。
|
||||
|
||||
<Tabs defaultValue="apt-get">
|
||||
<TabItem value="docker" label="Docker">
|
||||
如果已经安装了 docker, 只需执行下面的命令。
|
||||
|
||||
```shell
|
||||
docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp tdengine/tdengine
|
||||
```
|
||||
|
||||
确定该容器已经启动并且在正常运行
|
||||
|
||||
```shell
|
||||
docker ps
|
||||
```
|
||||
|
||||
进入该容器并执行 bash
|
||||
|
||||
```shell
|
||||
docker exec -it <container name> bash
|
||||
```
|
||||
|
||||
然后就可以执行相关的 Linux 命令操作和访问 TDengine
|
||||
|
||||
详细操作方法请参照 [通过 Docker 快速体验 TDengine](/train-faq/docker)。
|
||||
|
||||
:::info
|
||||
从 2.4.0.10 开始,除 taosd 以外,Docker 镜像还包含:taos、taosAdapter、taosdump、taosBenchmark、TDinsight 安装脚本和示例代码。启动 Docker 容器时,将同时启动 taosAdapter 和 taosd,实现对 RESTful 的支持。
|
||||
|
||||
:::
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="apt-get" label="apt-get">
|
||||
<AptGetInstall />
|
||||
</TabItem>
|
||||
<TabItem value="pkg" label="安装包">
|
||||
<PkgInstall />
|
||||
</TabItem>
|
||||
<TabItem value="src" label="源码">
|
||||
|
||||
如果您希望对 TDengine 贡献代码或对内部实现感兴趣,请参考我们的 [TDengine GitHub 主页](https://github.com/taosdata/TDengine) 下载源码构建和安装.
|
||||
|
||||
下载其他组件、最新 Beta 版及之前版本的安装包,请点击[这里](https://www.taosdata.com/cn/all-downloads/)。
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
## 启动
|
||||
|
||||
安装后,请使用 `systemctl` 命令来启动 TDengine 的服务进程。
|
||||
|
||||
```bash
|
||||
systemctl start taosd
|
||||
```
|
||||
|
||||
检查服务是否正常工作:
|
||||
|
||||
```bash
|
||||
systemctl status taosd
|
||||
```
|
||||
|
||||
如果 TDengine 服务正常工作,那么您可以通过 TDengine 的命令行程序 `taos` 来访问并体验 TDengine。
|
||||
|
||||
:::info
|
||||
|
||||
- systemctl 命令需要 _root_ 权限来运行,如果您非 _root_ 用户,请在命令前添加 sudo 。
|
||||
- 为更好的获得产品反馈,改善产品,TDengine 会采集基本的使用信息,但您可以修改系统配置文件 taos.cfg 里的配置参数 telemetryReporting,将其设为 0,就可将其关闭。
|
||||
- TDengine 采用 FQDN(一般就是 hostname)作为节点的 ID,为保证正常运行,需要给运行 taosd 的服务器配置好 FQDN,在 TDengine CLI 或应用运行的机器配置好 DNS 服务或 hosts 文件,保证 FQDN 能够解析。
|
||||
- `systemctl stop taosd` 指令在执行后并不会马上停止 TDengine 服务,而是会等待系统中必要的落盘工作正常完成。在数据量很大的情况下,这可能会消耗较长时间。
|
||||
|
||||
TDengine 支持在使用 [`systemd`](https://en.wikipedia.org/wiki/Systemd) 做进程服务管理的 Linux 系统上安装,用 `which systemctl` 命令来检测系统中是否存在 `systemd` 包:
|
||||
|
||||
```bash
|
||||
which systemctl
|
||||
```
|
||||
|
||||
如果系统中不支持 `systemd`,也可以用手动运行 `/usr/local/taos/bin/taosd` 方式启动 TDengine 服务。
|
||||
|
||||
:::note
|
||||
|
||||
## TDengine 命令行 (CLI)
|
||||
|
||||
为便于检查 TDengine 的状态,执行数据库 (Database) 的各种即席(Ad Hoc)查询,TDengine 提供一命令行应用程序(以下简称为 TDengine CLI) taos。要进入 TDengine 命令行,您只要在安装有 TDengine 的 Linux 终端执行 `taos` 即可。
|
||||
|
||||
```bash
|
||||
taos
|
||||
```
|
||||
|
||||
如果连接服务成功,将会打印出欢迎消息和版本信息。如果失败,则会打印错误消息出来(请参考 [FAQ](/train-faq/faq) 来解决终端连接服务端失败的问题)。 TDengine CLI 的提示符号如下:
|
||||
|
||||
```cmd
|
||||
taos>
|
||||
```
|
||||
|
||||
在 TDengine CLI 中,用户可以通过 SQL 命令来创建/删除数据库、表等,并进行数据库(database)插入查询操作。在终端中运行的 SQL 语句需要以分号结束来运行。示例:
|
||||
|
||||
```sql
|
||||
create database demo;
|
||||
use demo;
|
||||
create table t (ts timestamp, speed int);
|
||||
insert into t values ('2019-07-15 00:00:00', 10);
|
||||
insert into t values ('2019-07-15 01:00:00', 20);
|
||||
select * from t;
|
||||
ts | speed |
|
||||
========================================
|
||||
2019-07-15 00:00:00.000 | 10 |
|
||||
2019-07-15 01:00:00.000 | 20 |
|
||||
Query OK, 2 row(s) in set (0.003128s)
|
||||
```
|
||||
|
||||
除执行 SQL 语句外,系统管理员还可以从 TDengine CLI 进行检查系统运行状态、添加删除用户账号等操作。TDengine CLI 连同应用驱动也可以独立安装在 Linux 或 Windows 机器上运行,更多细节请参考 [这里](../reference/taos-shell/)
|
||||
|
||||
## 使用 taosBenchmark 体验写入速度
|
||||
|
||||
启动 TDengine 的服务,在 Linux 终端执行 `taosBenchmark` (曾命名为 `taosdemo`):
|
||||
|
||||
```bash
|
||||
taosBenchmark
|
||||
```
|
||||
|
||||
该命令将在数据库 test 下面自动创建一张超级表 meters,该超级表下有 1 万张表,表名为 "d0" 到 "d9999",每张表有 1 万条记录,每条记录有 (ts, current, voltage, phase) 四个字段,时间戳从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:40:09 999",每张表带有标签 location 和 groupId,groupId 被设置为 1 到 10, location 被设置为 "California.SanFrancisco" 或者 "California.LosAngeles"。
|
||||
|
||||
这条命令很快完成 1 亿条记录的插入。具体时间取决于硬件性能,即使在一台普通的 PC 服务器往往也仅需十几秒。
|
||||
|
||||
taosBenchmark 命令本身带有很多选项,配置表的数目、记录条数等等,您可以设置不同参数进行体验,请执行 `taosBenchmark --help` 详细列出。taosBenchmark 详细使用方法请参照 [如何使用 taosBenchmark 对 TDengine 进行性能测试](https://www.taosdata.com/2021/10/09/3111.html)。
|
||||
|
||||
## 使用 TDengine CLI 体验查询速度
|
||||
|
||||
使用上述 taosBenchmark 插入数据后,可以在 TDengine CLI 输入查询命令,体验查询速度。
|
||||
|
||||
查询超级表下记录总条数:
|
||||
|
||||
```sql
|
||||
taos> select count(*) from test.meters;
|
||||
```
|
||||
|
||||
查询 1 亿条记录的平均值、最大值、最小值等:
|
||||
|
||||
```sql
|
||||
taos> select avg(current), max(voltage), min(phase) from test.meters;
|
||||
```
|
||||
|
||||
查询 location="California.SanFrancisco" 的记录总条数:
|
||||
|
||||
```sql
|
||||
taos> select count(*) from test.meters where location="California.SanFrancisco";
|
||||
```
|
||||
|
||||
查询 groupId=10 的所有记录的平均值、最大值、最小值等:
|
||||
|
||||
```sql
|
||||
taos> select avg(current), max(voltage), min(phase) from test.meters where groupId=10;
|
||||
```
|
||||
|
||||
对表 d10 按 10s 进行平均值、最大值和最小值聚合统计:
|
||||
|
||||
```sql
|
||||
taos> select avg(current), max(voltage), min(phase) from test.d10 interval(10s);
|
||||
<DocCardList items={useCurrentSidebarCategory().items}/>
|
||||
```
|
|
@ -1,105 +0,0 @@
|
|||
---
|
||||
title: 数据节点管理
|
||||
---
|
||||
|
||||
上面已经介绍如何从零开始搭建集群。集群组建完成后,可以随时查看集群中当前的数据节点的状态,还可以添加新的数据节点进行扩容,删除数据节点,甚至手动进行数据节点之间的负载均衡操作。
|
||||
|
||||
:::note
|
||||
|
||||
以下所有执行命令的操作需要先登陆进 TDengine 系统,必要时请使用 root 权限。
|
||||
|
||||
:::
|
||||
|
||||
## 查看数据节点
|
||||
|
||||
启动 TDengine CLI 程序 taos,然后执行:
|
||||
|
||||
```sql
|
||||
SHOW DNODES;
|
||||
```
|
||||
|
||||
它将列出集群中所有的 dnode,每个 dnode 的 ID,end_point(fqdn:port),状态(ready,offline 等),vnode 数目,还未使用的 vnode 数目等信息。在添加或删除一个数据节点后,可以使用该命令查看。
|
||||
|
||||
输出如下(具体内容仅供参考,取决于实际的集群配置)
|
||||
|
||||
```
|
||||
taos> show dnodes;
|
||||
id | endpoint | vnodes | support_vnodes | status | create_time | note |
|
||||
============================================================================================================================================
|
||||
1 | trd01:6030 | 100 | 1024 | ready | 2022-07-15 16:47:47.726 | |
|
||||
Query OK, 1 rows affected (0.006684s)
|
||||
```
|
||||
|
||||
## 查看虚拟节点组
|
||||
|
||||
为充分利用多核技术,并提供横向扩展能力,数据需要分片处理。因此 TDengine 会将一个 DB 的数据切分成多份,存放在多个 vnode 里。这些 vnode 可能分布在多个数据节点 dnode 里,这样就实现了水平扩展。一个 vnode 仅仅属于一个 DB,但一个 DB 可以有多个 vnode。vnode 所在的数据节点是 mnode 根据当前系统资源的情况,自动进行分配的,无需任何人工干预。
|
||||
|
||||
启动 CLI 程序 taos,然后执行:
|
||||
|
||||
```sql
|
||||
USE SOME_DATABASE;
|
||||
SHOW VGROUPS;
|
||||
```
|
||||
|
||||
输出如下(具体内容仅供参考,取决于实际的集群配置)
|
||||
|
||||
```
|
||||
taos> use db;
|
||||
Database changed.
|
||||
|
||||
taos> show vgroups;
|
||||
vgroup_id | db_name | tables | v1_dnode | v1_status | v2_dnode | v2_status | v3_dnode | v3_status | status | nfiles | file_size | tsma |
|
||||
================================================================================================================================================================================================
|
||||
2 | db | 0 | 1 | leader | NULL | NULL | NULL | NULL | NULL | NULL | NULL | 0 |
|
||||
3 | db | 0 | 1 | leader | NULL | NULL | NULL | NULL | NULL | NULL | NULL | 0 |
|
||||
4 | db | 0 | 1 | leader | NULL | NULL | NULL | NULL | NULL | NULL | NULL | 0 |
|
||||
Query OK, 8 row(s) in set (0.001154s)
|
||||
```
|
||||
|
||||
## 添加数据节点
|
||||
|
||||
启动 CLI 程序 taos,然后执行:
|
||||
|
||||
```sql
|
||||
CREATE DNODE "fqdn:port";
|
||||
```
|
||||
|
||||
将新数据节点的 End Point 添加进集群的 EP 列表。“fqdn:port“需要用双引号引起来,否则出错。一个数据节点对外服务的 fqdn 和 port 可以通过配置文件 taos.cfg 进行配置,缺省是自动获取。【强烈不建议用自动获取方式来配置 FQDN,可能导致生成的数据节点的 End Point 不是所期望的】
|
||||
|
||||
然后启动新加入的数据节点的 taosd 进程,再通过 taos 查看数据节点状态:
|
||||
|
||||
```
|
||||
taos> show dnodes;
|
||||
id | endpoint | vnodes | support_vnodes | status | create_time | note |
|
||||
============================================================================================================================================
|
||||
1 | localhost:6030 | 100 | 1024 | ready | 2022-07-15 16:47:47.726 | |
|
||||
2 | localhost:7030 | 0 | 1024 | ready | 2022-07-15 16:56:13.670 | |
|
||||
Query OK, 2 rows affected (0.007031s)
|
||||
```
|
||||
|
||||
从中可以看到两个 dnode 状态都为 ready
|
||||
|
||||
## 删除数据节点
|
||||
|
||||
先停止要删除的数据节点的 taosd 进程,然后启动 CLI 程序 taos,执行:
|
||||
|
||||
```sql
|
||||
DROP DNODE "fqdn:port";
|
||||
```
|
||||
|
||||
或者
|
||||
|
||||
```sql
|
||||
DROP DNODE dnodeId;
|
||||
```
|
||||
|
||||
通过 “fqdn:port” 或 dnodeID 来指定一个具体的节点都是可以的。其中 fqdn 是被删除的节点的 FQDN,port 是其对外服务器的端口号;dnodeID 可以通过 SHOW DNODES 获得。
|
||||
|
||||
:::warning
|
||||
|
||||
数据节点一旦被 drop 之后,不能重新加入集群。需要将此节点重新部署(清空数据文件夹)。集群在完成 `drop dnode` 操作之前,会将该 dnode 的数据迁移走。
|
||||
请注意 `drop dnode` 和 停止 taosd 进程是两个不同的概念,不要混淆:因为删除 dnode 之前要执行迁移数据的操作,因此被删除的 dnode 必须保持在线状态。待删除操作结束之后,才能停止 taosd 进程。
|
||||
一个数据节点被 drop 之后,其他节点都会感知到这个 dnodeID 的删除操作,任何集群中的节点都不会再接收此 dnodeID 的请求。
|
||||
dnodeID 是集群自动分配的,不得人工指定。它在生成时是递增的,不会重复。
|
||||
|
||||
:::
|
|
@ -1 +0,0 @@
|
|||
label: 集群管理
|
|
@ -1,5 +1,6 @@
|
|||
---
|
||||
title: 集群部署
|
||||
sidebar_label: 手动部署
|
||||
title: 集群部署和管理
|
||||
---
|
||||
|
||||
## 准备工作
|
||||
|
@ -72,15 +73,16 @@ serverPort 6030
|
|||
按照《立即开始》里的步骤,启动第一个数据节点,例如 h1.taosdata.com,然后执行 taos,启动 taos shell,从 shell 里执行命令“SHOW DNODES”,如下所示:
|
||||
|
||||
```
|
||||
|
||||
Welcome to the TDengine shell from Linux, Client Version:3.0.0.0
|
||||
Copyright (c) 2022 by TAOS Data, Inc. All rights reserved.
|
||||
|
||||
Server is Enterprise trial Edition, ver:3.0.0.0 and will never expire.
|
||||
|
||||
taos> show dnodes;
|
||||
id | endpoint | vnodes | support_vnodes | status | create_time | note |
|
||||
id | endpoint | vnodes | support_vnodes | status | create_time | note |
|
||||
============================================================================================================================================
|
||||
1 | h1.taosdata.com:6030 | 0 | 1024 | ready | 2022-07-16 10:50:42.673 | |
|
||||
1 | h1.taosdata.com:6030 | 0 | 1024 | ready | 2022-07-16 10:50:42.673 | |
|
||||
Query OK, 1 rows affected (0.007984s)
|
||||
|
||||
taos>
|
||||
|
@ -91,7 +93,7 @@ taos>
|
|||
|
||||
上述命令里,可以看到刚启动的数据节点的 End Point 是:h1.taos.com:6030,就是这个新集群的 firstEp。
|
||||
|
||||
### 启动后续数据节点
|
||||
### 添加数据节点
|
||||
|
||||
将后续的数据节点添加到现有集群,具体有以下几步:
|
||||
|
||||
|
@ -125,3 +127,74 @@ firstEp 这个参数仅仅在该数据节点首次加入集群时有作用,加
|
|||
两个没有配置 firstEp 参数的数据节点 dnode 启动后,会独立运行起来。这个时候,无法将其中一个数据节点加入到另外一个数据节点,形成集群。无法将两个独立的集群合并成为新的集群。
|
||||
|
||||
:::
|
||||
|
||||
## 查看数据节点
|
||||
|
||||
启动 TDengine CLI 程序 taos,然后执行:
|
||||
|
||||
```sql
|
||||
SHOW DNODES;
|
||||
```
|
||||
|
||||
它将列出集群中所有的 dnode,每个 dnode 的 ID,end_point(fqdn:port),状态(ready,offline 等),vnode 数目,还未使用的 vnode 数目等信息。在添加或删除一个数据节点后,可以使用该命令查看。
|
||||
|
||||
输出如下(具体内容仅供参考,取决于实际的集群配置)
|
||||
|
||||
```
|
||||
taos> show dnodes;
|
||||
id | endpoint | vnodes | support_vnodes | status | create_time | note |
|
||||
============================================================================================================================================
|
||||
1 | trd01:6030 | 100 | 1024 | ready | 2022-07-15 16:47:47.726 | |
|
||||
Query OK, 1 rows affected (0.006684s)
|
||||
```
|
||||
|
||||
## 查看虚拟节点组
|
||||
|
||||
为充分利用多核技术,并提供横向扩展能力,数据需要分片处理。因此 TDengine 会将一个 DB 的数据切分成多份,存放在多个 vnode 里。这些 vnode 可能分布在多个数据节点 dnode 里,这样就实现了水平扩展。一个 vnode 仅仅属于一个 DB,但一个 DB 可以有多个 vnode。vnode 所在的数据节点是 mnode 根据当前系统资源的情况,自动进行分配的,无需任何人工干预。
|
||||
|
||||
启动 CLI 程序 taos,然后执行:
|
||||
|
||||
```sql
|
||||
USE SOME_DATABASE;
|
||||
SHOW VGROUPS;
|
||||
```
|
||||
|
||||
输出如下(具体内容仅供参考,取决于实际的集群配置)
|
||||
|
||||
```
|
||||
taos> use db;
|
||||
Database changed.
|
||||
|
||||
taos> show vgroups;
|
||||
vgroup_id | db_name | tables | v1_dnode | v1_status | v2_dnode | v2_status | v3_dnode | v3_status | status | nfiles | file_size | tsma |
|
||||
================================================================================================================================================================================================
|
||||
2 | db | 0 | 1 | leader | NULL | NULL | NULL | NULL | NULL | NULL | NULL | 0 |
|
||||
3 | db | 0 | 1 | leader | NULL | NULL | NULL | NULL | NULL | NULL | NULL | 0 |
|
||||
4 | db | 0 | 1 | leader | NULL | NULL | NULL | NULL | NULL | NULL | NULL | 0 |
|
||||
Query OK, 8 row(s) in set (0.001154s)
|
||||
```
|
||||
|
||||
## 删除数据节点
|
||||
|
||||
先停止要删除的数据节点的 taosd 进程,然后启动 CLI 程序 taos,执行:
|
||||
|
||||
```sql
|
||||
DROP DNODE "fqdn:port";
|
||||
```
|
||||
|
||||
或者
|
||||
|
||||
```sql
|
||||
DROP DNODE dnodeId;
|
||||
```
|
||||
|
||||
通过 “fqdn:port” 或 dnodeID 来指定一个具体的节点都是可以的。其中 fqdn 是被删除的节点的 FQDN,port 是其对外服务器的端口号;dnodeID 可以通过 SHOW DNODES 获得。
|
||||
|
||||
:::warning
|
||||
|
||||
数据节点一旦被 drop 之后,不能重新加入集群。需要将此节点重新部署(清空数据文件夹)。集群在完成 `drop dnode` 操作之前,会将该 dnode 的数据迁移走。
|
||||
请注意 `drop dnode` 和 停止 taosd 进程是两个不同的概念,不要混淆:因为删除 dnode 之前要执行迁移数据的操作,因此被删除的 dnode 必须保持在线状态。待删除操作结束之后,才能停止 taosd 进程。
|
||||
一个数据节点被 drop 之后,其他节点都会感知到这个 dnodeID 的删除操作,任何集群中的节点都不会再接收此 dnodeID 的请求。
|
||||
dnodeID 是集群自动分配的,不得人工指定。它在生成时是递增的,不会重复。
|
||||
|
||||
:::
|
|
@ -0,0 +1,452 @@
|
|||
---
|
||||
sidebar_label: Kubernetes
|
||||
title: 在 Kubernetes 上部署 TDengine 集群
|
||||
---
|
||||
|
||||
## 配置 ConfigMap
|
||||
|
||||
为 TDengine 创建 `taoscfg.yaml`,此文件中的配置将作为环境变量传入 TDengine 镜像,更新此配置将导致所有 TDengine POD 重启。
|
||||
|
||||
```yaml
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: taoscfg
|
||||
labels:
|
||||
app: tdengine
|
||||
data:
|
||||
CLUSTER: "1"
|
||||
TAOS_KEEP: "3650"
|
||||
TAOS_DEBUG_FLAG: "135"
|
||||
```
|
||||
|
||||
## 配置服务
|
||||
|
||||
创建一个 service 配置文件:`taosd-service.yaml`,服务名称 `metadata.name` (此处为 "taosd") 将在下一步中使用到。添加 TDengine 所用到的所有端口:
|
||||
|
||||
```yaml
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: "taosd"
|
||||
labels:
|
||||
app: "tdengine"
|
||||
spec:
|
||||
ports:
|
||||
- name: tcp6030
|
||||
protocol: "TCP"
|
||||
port: 6030
|
||||
- name: tcp6035
|
||||
protocol: "TCP"
|
||||
port: 6035
|
||||
- name: tcp6041
|
||||
protocol: "TCP"
|
||||
port: 6041
|
||||
- name: udp6030
|
||||
protocol: "UDP"
|
||||
port: 6030
|
||||
- name: udp6031
|
||||
protocol: "UDP"
|
||||
port: 6031
|
||||
- name: udp6032
|
||||
protocol: "UDP"
|
||||
port: 6032
|
||||
- name: udp6033
|
||||
protocol: "UDP"
|
||||
port: 6033
|
||||
- name: udp6034
|
||||
protocol: "UDP"
|
||||
port: 6034
|
||||
- name: udp6035
|
||||
protocol: "UDP"
|
||||
port: 6035
|
||||
- name: udp6036
|
||||
protocol: "UDP"
|
||||
port: 6036
|
||||
- name: udp6037
|
||||
protocol: "UDP"
|
||||
port: 6037
|
||||
- name: udp6038
|
||||
protocol: "UDP"
|
||||
port: 6038
|
||||
- name: udp6039
|
||||
protocol: "UDP"
|
||||
port: 6039
|
||||
- name: udp6040
|
||||
protocol: "UDP"
|
||||
port: 6040
|
||||
selector:
|
||||
app: "tdengine"
|
||||
```
|
||||
|
||||
## 有状态服务 StatefulSet
|
||||
|
||||
根据 Kubernetes 对各类部署的说明,我们将使用 StatefulSet 作为 TDengine 的服务类型,创建文件 `tdengine.yaml`:
|
||||
|
||||
```yaml
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: "tdengine"
|
||||
labels:
|
||||
app: "tdengine"
|
||||
spec:
|
||||
serviceName: "taosd"
|
||||
replicas: 2
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
selector:
|
||||
matchLabels:
|
||||
app: "tdengine"
|
||||
template:
|
||||
metadata:
|
||||
name: "tdengine"
|
||||
labels:
|
||||
app: "tdengine"
|
||||
spec:
|
||||
containers:
|
||||
- name: "tdengine"
|
||||
image: "zitsen/taosd:develop"
|
||||
imagePullPolicy: "Always"
|
||||
envFrom:
|
||||
- configMapRef:
|
||||
name: taoscfg
|
||||
ports:
|
||||
- name: tcp6030
|
||||
protocol: "TCP"
|
||||
containerPort: 6030
|
||||
- name: tcp6035
|
||||
protocol: "TCP"
|
||||
containerPort: 6035
|
||||
- name: tcp6041
|
||||
protocol: "TCP"
|
||||
containerPort: 6041
|
||||
- name: udp6030
|
||||
protocol: "UDP"
|
||||
containerPort: 6030
|
||||
- name: udp6031
|
||||
protocol: "UDP"
|
||||
containerPort: 6031
|
||||
- name: udp6032
|
||||
protocol: "UDP"
|
||||
containerPort: 6032
|
||||
- name: udp6033
|
||||
protocol: "UDP"
|
||||
containerPort: 6033
|
||||
- name: udp6034
|
||||
protocol: "UDP"
|
||||
containerPort: 6034
|
||||
- name: udp6035
|
||||
protocol: "UDP"
|
||||
containerPort: 6035
|
||||
- name: udp6036
|
||||
protocol: "UDP"
|
||||
containerPort: 6036
|
||||
- name: udp6037
|
||||
protocol: "UDP"
|
||||
containerPort: 6037
|
||||
- name: udp6038
|
||||
protocol: "UDP"
|
||||
containerPort: 6038
|
||||
- name: udp6039
|
||||
protocol: "UDP"
|
||||
containerPort: 6039
|
||||
- name: udp6040
|
||||
protocol: "UDP"
|
||||
containerPort: 6040
|
||||
env:
|
||||
# POD_NAME for FQDN config
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
# SERVICE_NAME and NAMESPACE for fqdn resolve
|
||||
- name: SERVICE_NAME
|
||||
value: "taosd"
|
||||
- name: STS_NAME
|
||||
value: "tdengine"
|
||||
- name: STS_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
# TZ for timezone settings, we recommend to always set it.
|
||||
- name: TZ
|
||||
value: "Asia/Shanghai"
|
||||
# TAOS_ prefix will configured in taos.cfg, strip prefix and camelCase.
|
||||
- name: TAOS_SERVER_PORT
|
||||
value: "6030"
|
||||
# Must set if you want a cluster.
|
||||
- name: TAOS_FIRST_EP
|
||||
value: "$(STS_NAME)-0.$(SERVICE_NAME).$(STS_NAMESPACE).svc.cluster.local:$(TAOS_SERVER_PORT)"
|
||||
# TAOS_FQND should always be setted in k8s env.
|
||||
- name: TAOS_FQDN
|
||||
value: "$(POD_NAME).$(SERVICE_NAME).$(STS_NAMESPACE).svc.cluster.local"
|
||||
volumeMounts:
|
||||
- name: taosdata
|
||||
mountPath: /var/lib/taos
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- taos
|
||||
- -s
|
||||
- "show mnodes"
|
||||
initialDelaySeconds: 5
|
||||
timeoutSeconds: 5000
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
port: 6030
|
||||
initialDelaySeconds: 15
|
||||
periodSeconds: 20
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: taosdata
|
||||
spec:
|
||||
accessModes:
|
||||
- "ReadWriteOnce"
|
||||
storageClassName: "csi-rbd-sc"
|
||||
resources:
|
||||
requests:
|
||||
storage: "10Gi"
|
||||
```
|
||||
|
||||
## 启动集群
|
||||
|
||||
将前述三个文件添加到 Kubernetes 集群中:
|
||||
|
||||
```bash
|
||||
kubectl apply -f taoscfg.yaml
|
||||
kubectl apply -f taosd-service.yaml
|
||||
kubectl apply -f tdengine.yaml
|
||||
|
||||
```
|
||||
|
||||
上面的配置将生成一个两节点的 TDengine 集群,dnode 是自动配置的,可以使用 `show dnodes` 命令查看当前集群的节点:
|
||||
|
||||
```bash
|
||||
kubectl exec -i -t tdengine-0 -- taos -s "show dnodes"
|
||||
kubectl exec -i -t tdengine-1 -- taos -s "show dnodes"
|
||||
|
||||
```
|
||||
|
||||
输出如下:
|
||||
|
||||
```
|
||||
Welcome to the TDengine shell from Linux, Client Version:2.1.1.0
|
||||
Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.
|
||||
|
||||
taos> show dnodes
|
||||
id | end_point | vnodes | cores | status | role | create_time | offline reason |
|
||||
======================================================================================================================================
|
||||
1 | tdengine-0.taosd.default.sv... | 1 | 40 | ready | any | 2021-06-01 17:13:24.181 | |
|
||||
2 | tdengine-1.taosd.default.sv... | 0 | 40 | ready | any | 2021-06-01 17:14:09.257 | |
|
||||
Query OK, 2 row(s) in set (0.000997s)
|
||||
|
||||
```
|
||||
|
||||
## 集群扩容
|
||||
|
||||
TDengine 集群支持自动扩容:
|
||||
|
||||
```bash
|
||||
kubectl scale statefulsets tdengine --replicas=4
|
||||
|
||||
```
|
||||
|
||||
上面命令行中参数 `--replica=4` 表示要将 TDengine 集群扩容到 4 个节点,执行后首先检查 POD 的状态:
|
||||
|
||||
```bash
|
||||
kubectl get pods -l app=tdengine
|
||||
|
||||
```
|
||||
|
||||
输出如下:
|
||||
|
||||
```
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
tdengine-0 1/1 Running 0 161m
|
||||
tdengine-1 1/1 Running 0 161m
|
||||
tdengine-2 1/1 Running 0 32m
|
||||
tdengine-3 1/1 Running 0 32m
|
||||
|
||||
```
|
||||
|
||||
此时 POD 的状态仍然是 Running,TDengine 集群中的 dnode 状态要等 POD 状态为 `ready` 之后才能看到:
|
||||
|
||||
```bash
|
||||
kubectl exec -i -t tdengine-0 -- taos -s "show dnodes"
|
||||
|
||||
```
|
||||
|
||||
扩容后的四节点 TDengine 集群的 dnode 列表:
|
||||
|
||||
```
|
||||
Welcome to the TDengine shell from Linux, Client Version:2.1.1.0
|
||||
Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.
|
||||
|
||||
taos> show dnodes
|
||||
id | end_point | vnodes | cores | status | role | create_time | offline reason |
|
||||
======================================================================================================================================
|
||||
1 | tdengine-0.taosd.default.sv... | 0 | 40 | ready | any | 2021-06-01 11:58:12.915 | |
|
||||
2 | tdengine-1.taosd.default.sv... | 0 | 40 | ready | any | 2021-06-01 11:58:33.127 | |
|
||||
3 | tdengine-2.taosd.default.sv... | 0 | 40 | ready | any | 2021-06-01 14:07:27.078 | |
|
||||
4 | tdengine-3.taosd.default.sv... | 1 | 40 | ready | any | 2021-06-01 14:07:48.362 | |
|
||||
Query OK, 4 row(s) in set (0.001293s)
|
||||
|
||||
```
|
||||
|
||||
## 集群缩容
|
||||
|
||||
TDengine 的缩容并没有自动化,我们尝试将一个三节点集群缩容到两节点。
|
||||
|
||||
首先,确认一个三节点 TDengine 集群正常工作,在 TDengine CLI 中查看 dnode 的状态:
|
||||
|
||||
```bash
|
||||
taos> show dnodes
|
||||
id | end_point | vnodes | cores | status | role | create_time | offline reason |
|
||||
======================================================================================================================================
|
||||
1 | tdengine-0.taosd.default.sv... | 1 | 40 | ready | any | 2021-06-01 16:27:24.852 | |
|
||||
2 | tdengine-1.taosd.default.sv... | 0 | 40 | ready | any | 2021-06-01 16:27:53.339 | |
|
||||
3 | tdengine-2.taosd.default.sv... | 0 | 40 | ready | any | 2021-06-01 16:28:49.787 | |
|
||||
Query OK, 3 row(s) in set (0.001101s)
|
||||
|
||||
```
|
||||
|
||||
想要安全的缩容,首先需要将节点从 dnode 列表中移除,也即从集群中移除:
|
||||
|
||||
```bash
|
||||
kubectl exec -i -t tdengine-0 -- taos -s "drop dnode 'tdengine-2.taosd.default.svc.cluster.local:6030'"
|
||||
|
||||
```
|
||||
|
||||
通过 `show dondes` 命令确认移除成功后,移除相应的 POD:
|
||||
|
||||
```bash
|
||||
kubectl scale statefulsets tdengine --replicas=2
|
||||
|
||||
```
|
||||
|
||||
最后一个 POD 会被删除,使用 `kubectl get pods -l app=tdengine` 查看集群状态:
|
||||
|
||||
```
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
tdengine-0 1/1 Running 0 3h40m
|
||||
tdengine-1 1/1 Running 0 3h40m
|
||||
|
||||
```
|
||||
|
||||
POD 删除后,需要手动删除 PVC,否则下次扩容时会继续使用以前的数据导致无法正常加入集群。
|
||||
|
||||
```bash
|
||||
kubectl delete pvc taosdata-tdengine-2
|
||||
|
||||
```
|
||||
|
||||
此时的集群状态是安全的,需要时还可以再次进行扩容:
|
||||
|
||||
```bash
|
||||
kubectl scale statefulsets tdengine --replicas=3
|
||||
|
||||
|
||||
```
|
||||
|
||||
`show dnodes` 输出如下:
|
||||
|
||||
```
|
||||
taos> show dnodes
|
||||
id | end_point | vnodes | cores | status | role | create_time | offline reason |
|
||||
======================================================================================================================================
|
||||
1 | tdengine-0.taosd.default.sv... | 1 | 40 | ready | any | 2021-06-01 16:27:24.852 | |
|
||||
2 | tdengine-1.taosd.default.sv... | 0 | 40 | ready | any | 2021-06-01 16:27:53.339 | |
|
||||
4 | tdengine-2.taosd.default.sv... | 0 | 40 | ready | any | 2021-06-01 16:40:49.177 | |
|
||||
|
||||
|
||||
```
|
||||
|
||||
## 删除集群
|
||||
|
||||
完整移除 TDengine 集群,需要分别清理 statefulset、svc、configmap、pvc。
|
||||
|
||||
```bash
|
||||
kubectl delete statefulset -l app=tdengine
|
||||
kubectl delete svc -l app=tdengine
|
||||
kubectl delete pvc -l app=tdengine
|
||||
kubectl delete configmap taoscfg
|
||||
|
||||
```
|
||||
|
||||
## 常见错误
|
||||
|
||||
### 错误一
|
||||
|
||||
扩容到四节点之后缩容到两节点,删除的 POD 会进入 offline 状态:
|
||||
|
||||
```
|
||||
Welcome to the TDengine shell from Linux, Client Version:2.1.1.0
|
||||
Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.
|
||||
|
||||
taos> show dnodes
|
||||
id | end_point | vnodes | cores | status | role | create_time | offline reason |
|
||||
======================================================================================================================================
|
||||
1 | tdengine-0.taosd.default.sv... | 0 | 40 | ready | any | 2021-06-01 11:58:12.915 | |
|
||||
2 | tdengine-1.taosd.default.sv... | 0 | 40 | ready | any | 2021-06-01 11:58:33.127 | |
|
||||
3 | tdengine-2.taosd.default.sv... | 0 | 40 | offline | any | 2021-06-01 14:07:27.078 | status msg timeout |
|
||||
4 | tdengine-3.taosd.default.sv... | 1 | 40 | offline | any | 2021-06-01 14:07:48.362 | status msg timeout |
|
||||
Query OK, 4 row(s) in set (0.001236s)
|
||||
|
||||
|
||||
```
|
||||
|
||||
但 `drop dnode` 的行为按不会按照预期进行,且下次集群重启后,所有的 dnode 节点将无法启动 dropping 状态无法退出。
|
||||
|
||||
### 错误二
|
||||
|
||||
TDengine 集群会持有 replica 参数,如果缩容后的节点数小于这个值,集群将无法使用:
|
||||
|
||||
创建一个库使用 replica 参数为 2,插入部分数据:
|
||||
|
||||
```bash
|
||||
kubectl exec -i -t tdengine-0 -- \
|
||||
taos -s \
|
||||
"create database if not exists test replica 2;
|
||||
use test;
|
||||
create table if not exists t1(ts timestamp, n int);
|
||||
insert into t1 values(now, 1)(now+1s, 2);"
|
||||
|
||||
|
||||
```
|
||||
|
||||
缩容到单节点:
|
||||
|
||||
```bash
|
||||
kubectl scale statefulsets tdengine --replicas=1
|
||||
|
||||
```
|
||||
|
||||
在 taos shell 中的所有数据库操作将无法成功。
|
||||
|
||||
```
|
||||
taos> show dnodes;
|
||||
id | end_point | vnodes | cores | status | role | create_time | offline reason |
|
||||
======================================================================================================================================
|
||||
1 | tdengine-0.taosd.default.sv... | 2 | 40 | ready | any | 2021-06-01 15:55:52.562 | |
|
||||
2 | tdengine-1.taosd.default.sv... | 1 | 40 | offline | any | 2021-06-01 15:56:07.212 | status msg timeout |
|
||||
Query OK, 2 row(s) in set (0.000845s)
|
||||
|
||||
taos> show dnodes;
|
||||
id | end_point | vnodes | cores | status | role | create_time | offline reason |
|
||||
======================================================================================================================================
|
||||
1 | tdengine-0.taosd.default.sv... | 2 | 40 | ready | any | 2021-06-01 15:55:52.562 | |
|
||||
2 | tdengine-1.taosd.default.sv... | 1 | 40 | offline | any | 2021-06-01 15:56:07.212 | status msg timeout |
|
||||
Query OK, 2 row(s) in set (0.000837s)
|
||||
|
||||
taos> use test;
|
||||
Database changed.
|
||||
|
||||
taos> insert into t1 values(now, 3);
|
||||
|
||||
DB error: Unable to resolve FQDN (0.013874s)
|
||||
|
||||
```
|
|
@ -0,0 +1,434 @@
|
|||
---
|
||||
sidebar_label: Helm
|
||||
title: 使用 Helm 部署 TDengine 集群
|
||||
---
|
||||
|
||||
Helm 是 Kubernetes 的包管理器,上一节使用 Kubernets 部署 TDengine 集群的操作已经足够简单,但 Helm 依然可以提供更强大的能力。
|
||||
|
||||
## 安装 Helm
|
||||
|
||||
```bash
|
||||
curl -fsSL -o get_helm.sh \
|
||||
https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3
|
||||
chmod +x get_helm.sh
|
||||
./get_helm.sh
|
||||
|
||||
```
|
||||
|
||||
Helm 会使用 kubectl 和 kubeconfig 的配置来操作 Kubernetes,可以参考 Rancher 安装 Kubernetes 的配置来进行设置。
|
||||
|
||||
## 安装 TDengine Chart
|
||||
|
||||
TDengine Chart 尚未发布到 Helm 仓库,当前可以从 GitHub 直接下载:
|
||||
|
||||
```bash
|
||||
wget https://github.com/taosdata/TDengine-Operator/raw/main/helm/tdengine-0.3.0.tgz
|
||||
|
||||
```
|
||||
|
||||
获取当前 Kubernetes 的存储类:
|
||||
|
||||
```bash
|
||||
kubectl get storageclass
|
||||
|
||||
```
|
||||
|
||||
在 minikube 默认为 standard.
|
||||
|
||||
之后,使用 helm 命令安装:
|
||||
|
||||
```bash
|
||||
helm install tdengine tdengine-0.3.0.tgz \
|
||||
--set storage.className=<your storage class name>
|
||||
|
||||
```
|
||||
|
||||
在 minikube 环境下,可以设置一个较小的容量避免超出磁盘可用空间:
|
||||
|
||||
```bash
|
||||
helm install tdengine tdengine-0.3.0.tgz \
|
||||
--set storage.className=standard \
|
||||
--set storage.dataSize=2Gi \
|
||||
--set storage.logSize=10Mi
|
||||
|
||||
```
|
||||
|
||||
部署成功后,TDengine Chart 将会输出操作 TDengine 的说明:
|
||||
|
||||
```bash
|
||||
export POD_NAME=$(kubectl get pods --namespace default \
|
||||
-l "app.kubernetes.io/name=tdengine,app.kubernetes.io/instance=tdengine" \
|
||||
-o jsonpath="{.items[0].metadata.name}")
|
||||
kubectl --namespace default exec $POD_NAME -- taos -s "show dnodes; show mnodes"
|
||||
kubectl --namespace default exec -it $POD_NAME -- taos
|
||||
|
||||
```
|
||||
|
||||
可以创建一个表进行测试:
|
||||
|
||||
```bash
|
||||
kubectl --namespace default exec $POD_NAME -- \
|
||||
taos -s "create database test;
|
||||
use test;
|
||||
create table t1 (ts timestamp, n int);
|
||||
insert into t1 values(now, 1)(now + 1s, 2);
|
||||
select * from t1;"
|
||||
|
||||
```
|
||||
|
||||
## 配置 Values
|
||||
|
||||
TDengine 支持 `values.yaml` 自定义。
|
||||
|
||||
通过 helm show values 可以获取 TDengine Chart 支持的全部 values 列表:
|
||||
|
||||
```bash
|
||||
helm show values tdengine-0.3.0.tgz
|
||||
|
||||
```
|
||||
|
||||
你可以将结果保存为 values.yaml,之后可以修改其中的各项参数,如 replica 数量,存储类名称,容量大小,TDengine 配置等,然后使用如下命令安装 TDengine 集群:
|
||||
|
||||
```bash
|
||||
helm install tdengine tdengine-0.3.0.tgz -f values.yaml
|
||||
|
||||
```
|
||||
|
||||
全部参数如下:
|
||||
|
||||
```yaml
|
||||
# Default values for tdengine.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into helm templates.
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
image:
|
||||
prefix: tdengine/tdengine
|
||||
#pullPolicy: Always
|
||||
# Overrides the image tag whose default is the chart appVersion.
|
||||
#tag: "2.4.0.5"
|
||||
|
||||
service:
|
||||
# ClusterIP is the default service type, use NodeIP only if you know what you are doing.
|
||||
type: ClusterIP
|
||||
ports:
|
||||
# TCP range required
|
||||
tcp:
|
||||
[
|
||||
6030,
|
||||
6031,
|
||||
6032,
|
||||
6033,
|
||||
6034,
|
||||
6035,
|
||||
6036,
|
||||
6037,
|
||||
6038,
|
||||
6039,
|
||||
6040,
|
||||
6041,
|
||||
6042,
|
||||
6043,
|
||||
6044,
|
||||
6045,
|
||||
6060,
|
||||
]
|
||||
# UDP range 6030-6039
|
||||
udp: [6030, 6031, 6032, 6033, 6034, 6035, 6036, 6037, 6038, 6039]
|
||||
|
||||
arbitrator: true
|
||||
|
||||
# Set timezone here, not in taoscfg
|
||||
timezone: "Asia/Shanghai"
|
||||
|
||||
resources:
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
storage:
|
||||
# Set storageClassName for pvc. K8s use default storage class if not set.
|
||||
#
|
||||
className: ""
|
||||
dataSize: "100Gi"
|
||||
logSize: "10Gi"
|
||||
|
||||
nodeSelectors:
|
||||
taosd:
|
||||
# node selectors
|
||||
|
||||
clusterDomainSuffix: ""
|
||||
# Config settings in taos.cfg file.
|
||||
#
|
||||
# The helm/k8s support will use environment variables for taos.cfg,
|
||||
# converting an upper-snake-cased variable like `TAOS_DEBUG_FLAG`,
|
||||
# to a camelCase taos config variable `debugFlag`.
|
||||
#
|
||||
# See the variable list at https://www.taosdata.com/cn/documentation/administrator .
|
||||
#
|
||||
# Note:
|
||||
# 1. firstEp/secondEp: should not be setted here, it's auto generated at scale-up.
|
||||
# 2. serverPort: should not be setted, we'll use the default 6030 in many places.
|
||||
# 3. fqdn: will be auto generated in kubenetes, user should not care about it.
|
||||
# 4. role: currently role is not supported - every node is able to be mnode and vnode.
|
||||
#
|
||||
# Btw, keep quotes "" around the value like below, even the value will be number or not.
|
||||
taoscfg:
|
||||
# number of replications, for cluster only
|
||||
TAOS_REPLICA: "1"
|
||||
|
||||
# number of management nodes in the system
|
||||
TAOS_NUM_OF_MNODES: "1"
|
||||
|
||||
# number of days per DB file
|
||||
# TAOS_DAYS: "10"
|
||||
|
||||
# number of days to keep DB file, default is 10 years.
|
||||
#TAOS_KEEP: "3650"
|
||||
|
||||
# cache block size (Mbyte)
|
||||
#TAOS_CACHE: "16"
|
||||
|
||||
# number of cache blocks per vnode
|
||||
#TAOS_BLOCKS: "6"
|
||||
|
||||
# minimum rows of records in file block
|
||||
#TAOS_MIN_ROWS: "100"
|
||||
|
||||
# maximum rows of records in file block
|
||||
#TAOS_MAX_ROWS: "4096"
|
||||
|
||||
#
|
||||
# TAOS_NUM_OF_THREADS_PER_CORE: number of threads per CPU core
|
||||
#TAOS_NUM_OF_THREADS_PER_CORE: "1.0"
|
||||
|
||||
#
|
||||
# TAOS_NUM_OF_COMMIT_THREADS: number of threads to commit cache data
|
||||
#TAOS_NUM_OF_COMMIT_THREADS: "4"
|
||||
|
||||
#
|
||||
# TAOS_RATIO_OF_QUERY_CORES:
|
||||
# the proportion of total CPU cores available for query processing
|
||||
# 2.0: the query threads will be set to double of the CPU cores.
|
||||
# 1.0: all CPU cores are available for query processing [default].
|
||||
# 0.5: only half of the CPU cores are available for query.
|
||||
# 0.0: only one core available.
|
||||
#TAOS_RATIO_OF_QUERY_CORES: "1.0"
|
||||
|
||||
#
|
||||
# TAOS_KEEP_COLUMN_NAME:
|
||||
# the last_row/first/last aggregator will not change the original column name in the result fields
|
||||
#TAOS_KEEP_COLUMN_NAME: "0"
|
||||
|
||||
# enable/disable backuping vnode directory when removing vnode
|
||||
#TAOS_VNODE_BAK: "1"
|
||||
|
||||
# enable/disable installation / usage report
|
||||
#TAOS_TELEMETRY_REPORTING: "1"
|
||||
|
||||
# enable/disable load balancing
|
||||
#TAOS_BALANCE: "1"
|
||||
|
||||
# max timer control blocks
|
||||
#TAOS_MAX_TMR_CTRL: "512"
|
||||
|
||||
# time interval of system monitor, seconds
|
||||
#TAOS_MONITOR_INTERVAL: "30"
|
||||
|
||||
# number of seconds allowed for a dnode to be offline, for cluster only
|
||||
#TAOS_OFFLINE_THRESHOLD: "8640000"
|
||||
|
||||
# RPC re-try timer, millisecond
|
||||
#TAOS_RPC_TIMER: "1000"
|
||||
|
||||
# RPC maximum time for ack, seconds.
|
||||
#TAOS_RPC_MAX_TIME: "600"
|
||||
|
||||
# time interval of dnode status reporting to mnode, seconds, for cluster only
|
||||
#TAOS_STATUS_INTERVAL: "1"
|
||||
|
||||
# time interval of heart beat from shell to dnode, seconds
|
||||
#TAOS_SHELL_ACTIVITY_TIMER: "3"
|
||||
|
||||
# minimum sliding window time, milli-second
|
||||
#TAOS_MIN_SLIDING_TIME: "10"
|
||||
|
||||
# minimum time window, milli-second
|
||||
#TAOS_MIN_INTERVAL_TIME: "10"
|
||||
|
||||
# maximum delay before launching a stream computation, milli-second
|
||||
#TAOS_MAX_STREAM_COMP_DELAY: "20000"
|
||||
|
||||
# maximum delay before launching a stream computation for the first time, milli-second
|
||||
#TAOS_MAX_FIRST_STREAM_COMP_DELAY: "10000"
|
||||
|
||||
# retry delay when a stream computation fails, milli-second
|
||||
#TAOS_RETRY_STREAM_COMP_DELAY: "10"
|
||||
|
||||
# the delayed time for launching a stream computation, from 0.1(default, 10% of whole computing time window) to 0.9
|
||||
#TAOS_STREAM_COMP_DELAY_RATIO: "0.1"
|
||||
|
||||
# max number of vgroups per db, 0 means configured automatically
|
||||
#TAOS_MAX_VGROUPS_PER_DB: "0"
|
||||
|
||||
# max number of tables per vnode
|
||||
#TAOS_MAX_TABLES_PER_VNODE: "1000000"
|
||||
|
||||
# the number of acknowledgments required for successful data writing
|
||||
#TAOS_QUORUM: "1"
|
||||
|
||||
# enable/disable compression
|
||||
#TAOS_COMP: "2"
|
||||
|
||||
# write ahead log (WAL) level, 0: no wal; 1: write wal, but no fysnc; 2: write wal, and call fsync
|
||||
#TAOS_WAL_LEVEL: "1"
|
||||
|
||||
# if walLevel is set to 2, the cycle of fsync being executed, if set to 0, fsync is called right away
|
||||
#TAOS_FSYNC: "3000"
|
||||
|
||||
# the compressed rpc message, option:
|
||||
# -1 (no compression)
|
||||
# 0 (all message compressed),
|
||||
# > 0 (rpc message body which larger than this value will be compressed)
|
||||
#TAOS_COMPRESS_MSG_SIZE: "-1"
|
||||
|
||||
# max length of an SQL
|
||||
#TAOS_MAX_SQL_LENGTH: "1048576"
|
||||
|
||||
# the maximum number of records allowed for super table time sorting
|
||||
#TAOS_MAX_NUM_OF_ORDERED_RES: "100000"
|
||||
|
||||
# max number of connections allowed in dnode
|
||||
#TAOS_MAX_SHELL_CONNS: "5000"
|
||||
|
||||
# max number of connections allowed in client
|
||||
#TAOS_MAX_CONNECTIONS: "5000"
|
||||
|
||||
# stop writing logs when the disk size of the log folder is less than this value
|
||||
#TAOS_MINIMAL_LOG_DIR_G_B: "0.1"
|
||||
|
||||
# stop writing temporary files when the disk size of the tmp folder is less than this value
|
||||
#TAOS_MINIMAL_TMP_DIR_G_B: "0.1"
|
||||
|
||||
# if disk free space is less than this value, taosd service exit directly within startup process
|
||||
#TAOS_MINIMAL_DATA_DIR_G_B: "0.1"
|
||||
|
||||
# One mnode is equal to the number of vnode consumed
|
||||
#TAOS_MNODE_EQUAL_VNODE_NUM: "4"
|
||||
|
||||
# enbale/disable http service
|
||||
#TAOS_HTTP: "1"
|
||||
|
||||
# enable/disable system monitor
|
||||
#TAOS_MONITOR: "1"
|
||||
|
||||
# enable/disable recording the SQL statements via restful interface
|
||||
#TAOS_HTTP_ENABLE_RECORD_SQL: "0"
|
||||
|
||||
# number of threads used to process http requests
|
||||
#TAOS_HTTP_MAX_THREADS: "2"
|
||||
|
||||
# maximum number of rows returned by the restful interface
|
||||
#TAOS_RESTFUL_ROW_LIMIT: "10240"
|
||||
|
||||
# The following parameter is used to limit the maximum number of lines in log files.
|
||||
# max number of lines per log filters
|
||||
# numOfLogLines 10000000
|
||||
|
||||
# enable/disable async log
|
||||
#TAOS_ASYNC_LOG: "0"
|
||||
|
||||
#
|
||||
# time of keeping log files, days
|
||||
#TAOS_LOG_KEEP_DAYS: "0"
|
||||
|
||||
# The following parameters are used for debug purpose only.
|
||||
# debugFlag 8 bits mask: FILE-SCREEN-UNUSED-HeartBeat-DUMP-TRACE_WARN-ERROR
|
||||
# 131: output warning and error
|
||||
# 135: output debug, warning and error
|
||||
# 143: output trace, debug, warning and error to log
|
||||
# 199: output debug, warning and error to both screen and file
|
||||
# 207: output trace, debug, warning and error to both screen and file
|
||||
#
|
||||
# debug flag for all log type, take effect when non-zero value\
|
||||
#TAOS_DEBUG_FLAG: "143"
|
||||
|
||||
# enable/disable recording the SQL in taos client
|
||||
#TAOS_ENABLE_RECORD_SQL: "0"
|
||||
|
||||
# generate core file when service crash
|
||||
#TAOS_ENABLE_CORE_FILE: "1"
|
||||
|
||||
# maximum display width of binary and nchar fields in the shell. The parts exceeding this limit will be hidden
|
||||
#TAOS_MAX_BINARY_DISPLAY_WIDTH: "30"
|
||||
|
||||
# enable/disable stream (continuous query)
|
||||
#TAOS_STREAM: "1"
|
||||
|
||||
# in retrieve blocking model, only in 50% query threads will be used in query processing in dnode
|
||||
#TAOS_RETRIEVE_BLOCKING_MODEL: "0"
|
||||
|
||||
# the maximum allowed query buffer size in MB during query processing for each data node
|
||||
# -1 no limit (default)
|
||||
# 0 no query allowed, queries are disabled
|
||||
#TAOS_QUERY_BUFFER_SIZE: "-1"
|
||||
```
|
||||
|
||||
## 扩容
|
||||
|
||||
关于扩容可参考上一节的说明,有一些额外的操作需要从 helm 的部署中获取。
|
||||
|
||||
首先,从部署中获取 StatefulSet 的名称。
|
||||
|
||||
```bash
|
||||
export STS_NAME=$(kubectl get statefulset \
|
||||
-l "app.kubernetes.io/name=tdengine" \
|
||||
-o jsonpath="{.items[0].metadata.name}")
|
||||
|
||||
```
|
||||
|
||||
扩容操作极其简单,增加 replica 即可。以下命令将 TDengine 扩充到三节点:
|
||||
|
||||
```bash
|
||||
kubectl scale --replicas 3 statefulset/$STS_NAME
|
||||
|
||||
```
|
||||
|
||||
使用命令 `show dnodes` 和 `show mnodes` 检查是否扩容成功。
|
||||
|
||||
## 缩容
|
||||
|
||||
:::warning
|
||||
缩容操作并没有完整测试,可能造成数据风险,请谨慎使用。
|
||||
|
||||
:::
|
||||
|
||||
获取需要缩容的 dnode 列表,并手动 Drop。
|
||||
|
||||
```bash
|
||||
kubectl --namespace default exec $POD_NAME -- \
|
||||
cat /var/lib/taos/dnode/dnodeEps.json \
|
||||
| jq '.dnodeInfos[1:] |map(.dnodeFqdn + ":" + (.dnodePort|tostring)) | .[]' -r
|
||||
kubectl --namespace default exec $POD_NAME -- taos -s "show dnodes"
|
||||
kubectl --namespace default exec $POD_NAME -- taos -s 'drop dnode "<you dnode in list>"'
|
||||
|
||||
```
|
||||
|
||||
## 删除集群
|
||||
|
||||
Helm 管理下,清理操作也变得简单:
|
||||
|
||||
```bash
|
||||
helm uninstall tdengine
|
||||
|
||||
```
|
||||
|
||||
但 Helm 也不会自动移除 PVC,需要手动获取 PVC 然后删除掉。
|
|
@ -0,0 +1 @@
|
|||
label: 部署集群
|
|
@ -1,10 +1,10 @@
|
|||
---
|
||||
title: 集群管理
|
||||
title: 部署集群
|
||||
---
|
||||
|
||||
TDengine 支持集群,提供水平扩展的能力。如果需要获得更高的处理能力,只需要多增加节点即可。TDengine 采用虚拟节点技术,将一个节点虚拟化为多个虚拟节点,以实现负载均衡。同时,TDengine可以将多个节点上的虚拟节点组成虚拟节点组,通过多副本机制,以保证供系统的高可用。TDengine的集群功能完全开源。
|
||||
|
||||
本章节主要介绍集群的部署、维护,以及如何实现高可用和负载均衡。
|
||||
本章节主要介绍如何在主机上人工部署集群,以及如何使用 Kubernetes 和 Helm部署集群。
|
||||
|
||||
```mdx-code-block
|
||||
import DocCardList from '@theme/DocCardList';
|
|
@ -6,199 +6,11 @@ description: 安装、卸载、启动、停止和升级
|
|||
import Tabs from "@theme/Tabs";
|
||||
import TabItem from "@theme/TabItem";
|
||||
|
||||
TDengine 开源版本提供 deb 和 rpm 格式安装包,用户可以根据自己的运行环境选择合适的安装包。其中 deb 支持 Debian/Ubuntu 及衍生系统,rpm 支持 CentOS/RHEL/SUSE 及衍生系统。同时我们也为企业用户提供 tar.gz 格式安装包。
|
||||
本节将介绍一些关于安装和卸载更深层次的内容,以及升级的注意事项。
|
||||
|
||||
## 安装
|
||||
## 安装和卸载
|
||||
|
||||
<Tabs>
|
||||
<TabItem label="Deb 安装" value="debinst">
|
||||
|
||||
1、从官网下载获得 deb 安装包,例如 TDengine-server-2.4.0.7-Linux-x64.deb;
|
||||
2、进入到 TDengine-server-2.4.0.7-Linux-x64.deb 安装包所在目录,执行如下的安装命令:
|
||||
|
||||
```
|
||||
$ sudo dpkg -i TDengine-server-2.4.0.7-Linux-x64.deb
|
||||
(Reading database ... 137504 files and directories currently installed.)
|
||||
Preparing to unpack TDengine-server-2.4.0.7-Linux-x64.deb ...
|
||||
TDengine is removed successfully!
|
||||
Unpacking tdengine (2.4.0.7) over (2.4.0.7) ...
|
||||
Setting up tdengine (2.4.0.7) ...
|
||||
Start to install TDengine...
|
||||
|
||||
System hostname is: ubuntu-1804
|
||||
|
||||
Enter FQDN:port (like h1.taosdata.com:6030) of an existing TDengine cluster node to join
|
||||
OR leave it blank to build one:
|
||||
|
||||
Enter your email address for priority support or enter empty to skip:
|
||||
Created symlink /etc/systemd/system/multi-user.target.wants/taosd.service → /etc/systemd/system/taosd.service.
|
||||
|
||||
To configure TDengine : edit /etc/taos/taos.cfg
|
||||
To start TDengine : sudo systemctl start taosd
|
||||
To access TDengine : taos -h ubuntu-1804 to login into TDengine server
|
||||
|
||||
|
||||
TDengine is installed successfully!
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
||||
<TabItem label="RPM 安装" value="rpminst">
|
||||
|
||||
1、从官网下载获得 rpm 安装包,例如 TDengine-server-2.4.0.7-Linux-x64.rpm;
|
||||
2、进入到 TDengine-server-2.4.0.7-Linux-x64.rpm 安装包所在目录,执行如下的安装命令:
|
||||
|
||||
```
|
||||
$ sudo rpm -ivh TDengine-server-2.4.0.7-Linux-x64.rpm
|
||||
Preparing... ################################# [100%]
|
||||
Updating / installing...
|
||||
1:tdengine-2.4.0.7-3 ################################# [100%]
|
||||
Start to install TDengine...
|
||||
|
||||
System hostname is: centos7
|
||||
|
||||
Enter FQDN:port (like h1.taosdata.com:6030) of an existing TDengine cluster node to join
|
||||
OR leave it blank to build one:
|
||||
|
||||
Enter your email address for priority support or enter empty to skip:
|
||||
|
||||
Created symlink from /etc/systemd/system/multi-user.target.wants/taosd.service to /etc/systemd/system/taosd.service.
|
||||
|
||||
To configure TDengine : edit /etc/taos/taos.cfg
|
||||
To start TDengine : sudo systemctl start taosd
|
||||
To access TDengine : taos -h centos7 to login into TDengine server
|
||||
|
||||
|
||||
TDengine is installed successfully!
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
||||
<TabItem label="tar.gz 安装" value="tarinst">
|
||||
|
||||
1、从官网下载获得 tar.gz 安装包,例如 TDengine-server-2.4.0.7-Linux-x64.tar.gz;
|
||||
2、进入到 TDengine-server-2.4.0.7-Linux-x64.tar.gz 安装包所在目录,先解压文件后,进入子目录,执行其中的 install.sh 安装脚本:
|
||||
|
||||
```
|
||||
$ tar xvzf TDengine-enterprise-server-2.4.0.7-Linux-x64.tar.gz
|
||||
TDengine-enterprise-server-2.4.0.7/
|
||||
TDengine-enterprise-server-2.4.0.7/driver/
|
||||
TDengine-enterprise-server-2.4.0.7/driver/vercomp.txt
|
||||
TDengine-enterprise-server-2.4.0.7/driver/libtaos.so.2.4.0.7
|
||||
TDengine-enterprise-server-2.4.0.7/install.sh
|
||||
TDengine-enterprise-server-2.4.0.7/examples/
|
||||
...
|
||||
|
||||
$ ll
|
||||
total 43816
|
||||
drwxrwxr-x 3 ubuntu ubuntu 4096 Feb 22 09:31 ./
|
||||
drwxr-xr-x 20 ubuntu ubuntu 4096 Feb 22 09:30 ../
|
||||
drwxrwxr-x 4 ubuntu ubuntu 4096 Feb 22 09:30 TDengine-enterprise-server-2.4.0.7/
|
||||
-rw-rw-r-- 1 ubuntu ubuntu 44852544 Feb 22 09:31 TDengine-enterprise-server-2.4.0.7-Linux-x64.tar.gz
|
||||
|
||||
$ cd TDengine-enterprise-server-2.4.0.7/
|
||||
|
||||
$ ll
|
||||
total 40784
|
||||
drwxrwxr-x 4 ubuntu ubuntu 4096 Feb 22 09:30 ./
|
||||
drwxrwxr-x 3 ubuntu ubuntu 4096 Feb 22 09:31 ../
|
||||
drwxrwxr-x 2 ubuntu ubuntu 4096 Feb 22 09:30 driver/
|
||||
drwxrwxr-x 10 ubuntu ubuntu 4096 Feb 22 09:30 examples/
|
||||
-rwxrwxr-x 1 ubuntu ubuntu 33294 Feb 22 09:30 install.sh*
|
||||
-rw-rw-r-- 1 ubuntu ubuntu 41704288 Feb 22 09:30 taos.tar.gz
|
||||
|
||||
$ sudo ./install.sh
|
||||
|
||||
Start to update TDengine...
|
||||
Created symlink /etc/systemd/system/multi-user.target.wants/taosd.service → /etc/systemd/system/taosd.service.
|
||||
Nginx for TDengine is updated successfully!
|
||||
|
||||
To configure TDengine : edit /etc/taos/taos.cfg
|
||||
To configure Taos Adapter (if has) : edit /etc/taos/taosadapter.toml
|
||||
To start TDengine : sudo systemctl start taosd
|
||||
To access TDengine : use taos -h ubuntu-1804 in shell OR from http://127.0.0.1:6060
|
||||
|
||||
TDengine is updated successfully!
|
||||
Install taoskeeper as a standalone service
|
||||
taoskeeper is installed, enable it by `systemctl enable taoskeeper`
|
||||
```
|
||||
|
||||
:::info
|
||||
install.sh 安装脚本在执行过程中,会通过命令行交互界面询问一些配置信息。如果希望采取无交互安装方式,那么可以用 -e no 参数来执行 install.sh 脚本。运行 `./install.sh -h` 指令可以查看所有参数的详细说明信息。
|
||||
|
||||
:::
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
:::note
|
||||
当安装第一个节点时,出现 Enter FQDN:提示的时候,不需要输入任何内容。只有当安装第二个或以后更多的节点时,才需要输入已有集群中任何一个可用节点的 FQDN,支持该新节点加入集群。当然也可以不输入,而是在新节点启动前,配置到新节点的配置文件中。
|
||||
|
||||
:::
|
||||
|
||||
## 卸载
|
||||
|
||||
<Tabs>
|
||||
<TabItem label="Deb 卸载" value="debuninst">
|
||||
|
||||
卸载命令如下:
|
||||
|
||||
```
|
||||
$ sudo dpkg -r tdengine
|
||||
(Reading database ... 137504 files and directories currently installed.)
|
||||
Removing tdengine (2.4.0.7) ...
|
||||
TDengine is removed successfully!
|
||||
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
||||
<TabItem label="RPM 卸载" value="rpmuninst">
|
||||
|
||||
卸载命令如下:
|
||||
|
||||
```
|
||||
$ sudo rpm -e tdengine
|
||||
TDengine is removed successfully!
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
||||
<TabItem label="tar.gz 卸载" value="taruninst">
|
||||
|
||||
卸载命令如下:
|
||||
|
||||
```
|
||||
$ rmtaos
|
||||
Nginx for TDengine is running, stopping it...
|
||||
TDengine is removed successfully!
|
||||
|
||||
taosKeeper is removed successfully!
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
:::info
|
||||
- TDengine 提供了多种安装包,但最好不要在一个系统上同时使用 tar.gz 安装包和 deb 或 rpm 安装包。否则会相互影响,导致在使用时出现问题。
|
||||
|
||||
- 对于 deb 包安装后,如果安装目录被手工误删了部分,出现卸载、或重新安装不能成功。此时,需要清除 TDengine 包的安装信息,执行如下命令:
|
||||
|
||||
```
|
||||
$ sudo rm -f /var/lib/dpkg/info/tdengine*
|
||||
```
|
||||
|
||||
然后再重新进行安装就可以了。
|
||||
|
||||
- 对于 rpm 包安装后,如果安装目录被手工误删了部分,出现卸载、或重新安装不能成功。此时,需要清除 TDengine 包的安装信息,执行如下命令:
|
||||
|
||||
```
|
||||
$ sudo rpm -e --noscripts tdengine
|
||||
```
|
||||
|
||||
然后再重新进行安装就可以了。
|
||||
|
||||
:::
|
||||
关于安装和卸载,请参考 [安装和卸载](../get-started/package)
|
||||
|
||||
## 安装目录说明
|
||||
|
||||
|
@ -234,34 +46,6 @@ lrwxrwxrwx 1 root root 13 Feb 22 09:34 log -> /var/log/taos/
|
|||
|
||||
如果是更新安装,当缺省配置文件( /etc/taos/taos.cfg )存在时,仍然使用已有的配置文件,安装包中携带的配置文件修改为 taos.cfg.orig 保存在 /usr/local/taos/cfg/ 目录,可以作为设置配置参数的参考样例;如果不存在配置文件,就使用安装包中自带的配置文件。
|
||||
|
||||
## 启动和停止
|
||||
|
||||
TDengine 使用 Linux 系统的 systemd/systemctl/service 来管理系统的启动和、停止、重启操作。TDengine 的服务进程是 taosd,默认情况下 TDengine 在系统启动后将自动启动。DBA 可以通过 systemd/systemctl/service 手动操作停止、启动、重新启动服务。
|
||||
|
||||
以 systemctl 为例,命令如下:
|
||||
|
||||
- 启动服务进程:`systemctl start taosd`
|
||||
|
||||
- 停止服务进程:`systemctl stop taosd`
|
||||
|
||||
- 重启服务进程:`systemctl restart taosd`
|
||||
|
||||
- 查看服务状态:`systemctl status taosd`
|
||||
|
||||
注意:TDengine 在 2.4 版本之后包含一个独立组件 taosAdapter 需要使用 systemctl 命令管理 taosAdapter 服务的启动和停止。
|
||||
|
||||
如果服务进程处于活动状态,则 status 指令会显示如下的相关信息:
|
||||
|
||||
```
|
||||
Active: active (running)
|
||||
```
|
||||
|
||||
如果后台服务进程处于停止状态,则 status 指令会显示如下的相关信息:
|
||||
|
||||
```
|
||||
Active: inactive (dead)
|
||||
```
|
||||
|
||||
## 升级
|
||||
升级分为两个层面:升级安装包 和 升级运行中的实例。
|
||||
|
||||
|
|
|
@ -50,6 +50,7 @@ bool tNameIsValid(const SName* name);
|
|||
const char* tNameGetTableName(const SName* name);
|
||||
|
||||
int32_t tNameGetDbName(const SName* name, char* dst);
|
||||
const char* tNameGetDbNameP(const SName* name);
|
||||
|
||||
int32_t tNameGetFullDbName(const SName* name, char* dst);
|
||||
|
||||
|
|
|
@ -193,7 +193,7 @@ int32_t taosAsyncExec(__async_exec_fn_t execFn, void* execParam, int32_t* code);
|
|||
|
||||
void destroySendMsgInfo(SMsgSendInfo* pMsgBody);
|
||||
|
||||
int32_t asyncSendMsgToServerExt(void* pTransporter, SEpSet* epSet, int64_t* pTransporterId, const SMsgSendInfo* pInfo,
|
||||
int32_t asyncSendMsgToServerExt(void* pTransporter, SEpSet* epSet, int64_t* pTransporterId, SMsgSendInfo* pInfo,
|
||||
bool persistHandle, void* ctx);
|
||||
|
||||
/**
|
||||
|
@ -205,7 +205,7 @@ int32_t asyncSendMsgToServerExt(void* pTransporter, SEpSet* epSet, int64_t* pTra
|
|||
* @param pInfo
|
||||
* @return
|
||||
*/
|
||||
int32_t asyncSendMsgToServer(void* pTransporter, SEpSet* epSet, int64_t* pTransporterId, const SMsgSendInfo* pInfo);
|
||||
int32_t asyncSendMsgToServer(void* pTransporter, SEpSet* epSet, int64_t* pTransporterId, SMsgSendInfo* pInfo);
|
||||
|
||||
int32_t queryBuildUseDbOutput(SUseDbOutput* pOut, SUseDbRsp* usedbRsp);
|
||||
|
||||
|
@ -260,6 +260,8 @@ extern int32_t (*queryProcessMsgRsp[TDMT_MAX])(void* output, char* msg, int32_t
|
|||
|
||||
#define REQUEST_TOTAL_EXEC_TIMES 2
|
||||
|
||||
#define IS_SYS_DBNAME(_dbname) (((*(_dbname) == 'i') && (0 == strcmp(_dbname, TSDB_INFORMATION_SCHEMA_DB))) || ((*(_dbname) == 'p') && (0 == strcmp(_dbname, TSDB_PERFORMANCE_SCHEMA_DB))))
|
||||
|
||||
#define qFatal(...) \
|
||||
do { \
|
||||
if (qDebugFlag & DEBUG_FATAL) { \
|
||||
|
|
|
@ -33,11 +33,12 @@ typedef struct SUpdateInfo {
|
|||
int64_t watermark;
|
||||
TSKEY minTS;
|
||||
SScalableBf* pCloseWinSBF;
|
||||
SHashObj* pMap;
|
||||
} SUpdateInfo;
|
||||
|
||||
SUpdateInfo *updateInfoInitP(SInterval* pInterval, int64_t watermark);
|
||||
SUpdateInfo *updateInfoInit(int64_t interval, int32_t precision, int64_t watermark);
|
||||
bool updateInfoIsUpdated(SUpdateInfo *pInfo, tb_uid_t tableId, TSKEY ts);
|
||||
bool updateInfoIsUpdated(SUpdateInfo *pInfo, uint64_t tableId, TSKEY ts);
|
||||
void updateInfoDestroy(SUpdateInfo *pInfo);
|
||||
void updateInfoAddCloseWindowSBF(SUpdateInfo *pInfo);
|
||||
void updateInfoDestoryColseWinSBF(SUpdateInfo *pInfo);
|
||||
|
|
|
@ -124,18 +124,16 @@ void *rpcReallocCont(void *ptr, int32_t contLen);
|
|||
// Because taosd supports multi-process mode
|
||||
// These functions should not be used on the server side
|
||||
// Please use tmsg<xx> functions, which are defined in tmsgcb.h
|
||||
void rpcSendRequest(void *thandle, const SEpSet *pEpSet, SRpcMsg *pMsg, int64_t *rid);
|
||||
void rpcSendResponse(const SRpcMsg *pMsg);
|
||||
void rpcRegisterBrokenLinkArg(SRpcMsg *msg);
|
||||
void rpcReleaseHandle(void *handle, int8_t type); // just release conn to rpc instance, no close sock
|
||||
int rpcSendRequest(void *thandle, const SEpSet *pEpSet, SRpcMsg *pMsg, int64_t *rid);
|
||||
int rpcSendResponse(const SRpcMsg *pMsg);
|
||||
int rpcRegisterBrokenLinkArg(SRpcMsg *msg);
|
||||
int rpcReleaseHandle(void *handle, int8_t type); // just release conn to rpc instance, no close sock
|
||||
|
||||
// These functions will not be called in the child process
|
||||
void rpcSendRedirectRsp(void *pConn, const SEpSet *pEpSet);
|
||||
void rpcSendRequestWithCtx(void *thandle, const SEpSet *pEpSet, SRpcMsg *pMsg, int64_t *rid, SRpcCtx *ctx);
|
||||
int32_t rpcGetConnInfo(void *thandle, SRpcConnInfo *pInfo);
|
||||
void rpcSendRecv(void *shandle, SEpSet *pEpSet, SRpcMsg *pReq, SRpcMsg *pRsp);
|
||||
void rpcSetDefaultAddr(void *thandle, const char *ip, const char *fqdn);
|
||||
void* rpcAllocHandle();
|
||||
int rpcSendRequestWithCtx(void *thandle, const SEpSet *pEpSet, SRpcMsg *pMsg, int64_t *rid, SRpcCtx *ctx);
|
||||
int rpcSendRecv(void *shandle, SEpSet *pEpSet, SRpcMsg *pReq, SRpcMsg *pRsp);
|
||||
int rpcSetDefaultAddr(void *thandle, const char *ip, const char *fqdn);
|
||||
void *rpcAllocHandle();
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -33,7 +33,7 @@ typedef struct {
|
|||
SDiskSize size;
|
||||
} SDiskSpace;
|
||||
|
||||
bool taosCheckSystemIsSmallEnd();
|
||||
bool taosCheckSystemIsLittleEnd();
|
||||
void taosGetSystemInfo();
|
||||
int32_t taosGetEmail(char *email, int32_t maxLen);
|
||||
int32_t taosGetOsReleaseName(char *releaseName, int32_t maxLen);
|
||||
|
|
|
@ -72,7 +72,6 @@ typedef struct SStmtBindInfo {
|
|||
typedef struct SStmtExecInfo {
|
||||
int32_t affectedRows;
|
||||
SRequestObj* pRequest;
|
||||
SHashObj* pVgHash;
|
||||
SHashObj* pBlockHash;
|
||||
bool autoCreateTbl;
|
||||
} SStmtExecInfo;
|
||||
|
@ -88,6 +87,7 @@ typedef struct SStmtSQLInfo {
|
|||
SArray* nodeList;
|
||||
SStmtQueryResInfo queryRes;
|
||||
bool autoCreateTbl;
|
||||
SHashObj* pVgHash;
|
||||
} SStmtSQLInfo;
|
||||
|
||||
typedef struct STscStmt {
|
||||
|
|
|
@ -88,7 +88,7 @@ void closeTransporter(SAppInstInfo *pAppInfo) {
|
|||
static bool clientRpcRfp(int32_t code, tmsg_t msgType) {
|
||||
if (NEED_REDIRECT_ERROR(code)) {
|
||||
if (msgType == TDMT_SCH_QUERY || msgType == TDMT_SCH_MERGE_QUERY || msgType == TDMT_SCH_FETCH ||
|
||||
msgType == TDMT_SCH_MERGE_FETCH) {
|
||||
msgType == TDMT_SCH_MERGE_FETCH || msgType == TDMT_SCH_QUERY_HEARTBEAT || msgType == TDMT_SCH_DROP_TASK) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
|
|
|
@ -590,6 +590,11 @@ int32_t buildAsyncExecNodeList(SRequestObj* pRequest, SArray** pNodeList, SArray
|
|||
return code;
|
||||
}
|
||||
|
||||
void freeVgList(void *list) {
|
||||
SArray* pList = *(SArray**)list;
|
||||
taosArrayDestroy(pList);
|
||||
}
|
||||
|
||||
int32_t buildSyncExecNodeList(SRequestObj* pRequest, SArray** pNodeList, SArray* pMnodeList) {
|
||||
SArray* pDbVgList = NULL;
|
||||
SArray* pQnodeList = NULL;
|
||||
|
@ -641,7 +646,7 @@ int32_t buildSyncExecNodeList(SRequestObj* pRequest, SArray** pNodeList, SArray*
|
|||
|
||||
_return:
|
||||
|
||||
taosArrayDestroy(pDbVgList);
|
||||
taosArrayDestroyEx(pDbVgList, freeVgList);
|
||||
taosArrayDestroy(pQnodeList);
|
||||
|
||||
return code;
|
||||
|
|
|
@ -6,11 +6,16 @@
|
|||
#include "clientStmt.h"
|
||||
|
||||
static int32_t stmtCreateRequest(STscStmt* pStmt) {
|
||||
int32_t code = 0;
|
||||
|
||||
if (pStmt->exec.pRequest == NULL) {
|
||||
return buildRequest(pStmt->taos->id, pStmt->sql.sqlStr, pStmt->sql.sqlLen, NULL, false, &pStmt->exec.pRequest);
|
||||
} else {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
code = buildRequest(pStmt->taos->id, pStmt->sql.sqlStr, pStmt->sql.sqlLen, NULL, false, &pStmt->exec.pRequest);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
pStmt->exec.pRequest->syncQuery = true;
|
||||
}
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t stmtSwitchStatus(STscStmt* pStmt, STMT_STATUS newStatus) {
|
||||
|
@ -155,7 +160,7 @@ int32_t stmtUpdateBindInfo(TAOS_STMT* stmt, STableMeta* pTableMeta, void* tags,
|
|||
int32_t stmtUpdateExecInfo(TAOS_STMT* stmt, SHashObj* pVgHash, SHashObj* pBlockHash, bool autoCreateTbl) {
|
||||
STscStmt* pStmt = (STscStmt*)stmt;
|
||||
|
||||
pStmt->exec.pVgHash = pVgHash;
|
||||
pStmt->sql.pVgHash = pVgHash;
|
||||
pStmt->exec.pBlockHash = pBlockHash;
|
||||
pStmt->exec.autoCreateTbl = autoCreateTbl;
|
||||
|
||||
|
@ -177,7 +182,7 @@ int32_t stmtUpdateInfo(TAOS_STMT* stmt, STableMeta* pTableMeta, void* tags, char
|
|||
int32_t stmtGetExecInfo(TAOS_STMT* stmt, SHashObj** pVgHash, SHashObj** pBlockHash) {
|
||||
STscStmt* pStmt = (STscStmt*)stmt;
|
||||
|
||||
*pVgHash = pStmt->exec.pVgHash;
|
||||
*pVgHash = pStmt->sql.pVgHash;
|
||||
*pBlockHash = pStmt->exec.pBlockHash;
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -308,6 +313,8 @@ int32_t stmtCleanSQLInfo(STscStmt* pStmt) {
|
|||
taosMemoryFree(pStmt->sql.sqlStr);
|
||||
qDestroyQuery(pStmt->sql.pQuery);
|
||||
taosArrayDestroy(pStmt->sql.nodeList);
|
||||
taosHashCleanup(pStmt->sql.pVgHash);
|
||||
pStmt->sql.pVgHash = NULL;
|
||||
|
||||
void* pIter = taosHashIterate(pStmt->sql.pTableCache, NULL);
|
||||
while (pIter) {
|
||||
|
@ -340,7 +347,7 @@ int32_t stmtRebuildDataBlock(STscStmt* pStmt, STableDataBlocks* pDataBlock, STab
|
|||
|
||||
STMT_ERR_RET(catalogGetTableHashVgroup(pStmt->pCatalog, &conn, &pStmt->bInfo.sname, &vgInfo));
|
||||
STMT_ERR_RET(
|
||||
taosHashPut(pStmt->exec.pVgHash, (const char*)&vgInfo.vgId, sizeof(vgInfo.vgId), (char*)&vgInfo, sizeof(vgInfo)));
|
||||
taosHashPut(pStmt->sql.pVgHash, (const char*)&vgInfo.vgId, sizeof(vgInfo.vgId), (char*)&vgInfo, sizeof(vgInfo)));
|
||||
|
||||
STMT_ERR_RET(qRebuildStmtDataBlock(newBlock, pDataBlock, uid, vgInfo.vgId));
|
||||
|
||||
|
@ -680,6 +687,7 @@ int stmtBindBatch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind, int32_t colIdx) {
|
|||
if (pStmt->sql.pQuery->haveResultSet) {
|
||||
setResSchemaInfo(&pStmt->exec.pRequest->body.resInfo, pStmt->sql.pQuery->pResSchema,
|
||||
pStmt->sql.pQuery->numOfResCols);
|
||||
taosMemoryFreeClear(pStmt->sql.pQuery->pResSchema);
|
||||
setResPrecision(&pStmt->exec.pRequest->body.resInfo, pStmt->sql.pQuery->precision);
|
||||
}
|
||||
|
||||
|
@ -804,7 +812,7 @@ int stmtExec(TAOS_STMT* stmt) {
|
|||
if (STMT_TYPE_QUERY == pStmt->sql.type) {
|
||||
launchQueryImpl(pStmt->exec.pRequest, pStmt->sql.pQuery, true, NULL);
|
||||
} else {
|
||||
STMT_ERR_RET(qBuildStmtOutput(pStmt->sql.pQuery, pStmt->exec.pVgHash, pStmt->exec.pBlockHash));
|
||||
STMT_ERR_RET(qBuildStmtOutput(pStmt->sql.pQuery, pStmt->sql.pVgHash, pStmt->exec.pBlockHash));
|
||||
launchQueryImpl(pStmt->exec.pRequest, pStmt->sql.pQuery, true, (autoCreateTbl ? (void**)&pRsp : NULL));
|
||||
}
|
||||
|
||||
|
@ -847,9 +855,10 @@ _return:
|
|||
int stmtClose(TAOS_STMT* stmt) {
|
||||
STscStmt* pStmt = (STscStmt*)stmt;
|
||||
|
||||
STMT_RET(stmtCleanSQLInfo(pStmt));
|
||||
|
||||
stmtCleanSQLInfo(pStmt);
|
||||
taosMemoryFree(stmt);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
const char* stmtErrstr(TAOS_STMT* stmt) {
|
||||
|
|
|
@ -51,15 +51,15 @@ int32_t tsNumOfShmThreads = 1;
|
|||
int32_t tsNumOfRpcThreads = 1;
|
||||
int32_t tsNumOfCommitThreads = 2;
|
||||
int32_t tsNumOfTaskQueueThreads = 1;
|
||||
int32_t tsNumOfMnodeQueryThreads = 2;
|
||||
int32_t tsNumOfMnodeQueryThreads = 4;
|
||||
int32_t tsNumOfMnodeFetchThreads = 1;
|
||||
int32_t tsNumOfMnodeReadThreads = 1;
|
||||
int32_t tsNumOfVnodeQueryThreads = 2;
|
||||
int32_t tsNumOfVnodeQueryThreads = 4;
|
||||
int32_t tsNumOfVnodeStreamThreads = 2;
|
||||
int32_t tsNumOfVnodeFetchThreads = 4;
|
||||
int32_t tsNumOfVnodeWriteThreads = 2;
|
||||
int32_t tsNumOfVnodeSyncThreads = 2;
|
||||
int32_t tsNumOfQnodeQueryThreads = 2;
|
||||
int32_t tsNumOfQnodeQueryThreads = 4;
|
||||
int32_t tsNumOfQnodeFetchThreads = 4;
|
||||
int32_t tsNumOfSnodeSharedThreads = 2;
|
||||
int32_t tsNumOfSnodeUniqueThreads = 2;
|
||||
|
@ -402,16 +402,16 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
|
|||
tsNumOfCommitThreads = TRANGE(tsNumOfCommitThreads, 2, 4);
|
||||
if (cfgAddInt32(pCfg, "numOfCommitThreads", tsNumOfCommitThreads, 1, 1024, 0) != 0) return -1;
|
||||
|
||||
tsNumOfMnodeQueryThreads = tsNumOfCores / 8;
|
||||
tsNumOfMnodeQueryThreads = TRANGE(tsNumOfMnodeQueryThreads, 1, 4);
|
||||
tsNumOfMnodeQueryThreads = tsNumOfCores * 2;
|
||||
tsNumOfMnodeQueryThreads = TRANGE(tsNumOfMnodeQueryThreads, 4, 8);
|
||||
if (cfgAddInt32(pCfg, "numOfMnodeQueryThreads", tsNumOfMnodeQueryThreads, 1, 1024, 0) != 0) return -1;
|
||||
|
||||
tsNumOfMnodeReadThreads = tsNumOfCores / 8;
|
||||
tsNumOfMnodeReadThreads = TRANGE(tsNumOfMnodeReadThreads, 1, 4);
|
||||
if (cfgAddInt32(pCfg, "numOfMnodeReadThreads", tsNumOfMnodeReadThreads, 1, 1024, 0) != 0) return -1;
|
||||
|
||||
tsNumOfVnodeQueryThreads = tsNumOfCores / 4;
|
||||
tsNumOfVnodeQueryThreads = TMAX(tsNumOfVnodeQueryThreads, 2);
|
||||
tsNumOfVnodeQueryThreads = tsNumOfCores * 2;
|
||||
tsNumOfVnodeQueryThreads = TMAX(tsNumOfVnodeQueryThreads, 4);
|
||||
if (cfgAddInt32(pCfg, "numOfVnodeQueryThreads", tsNumOfVnodeQueryThreads, 1, 1024, 0) != 0) return -1;
|
||||
|
||||
tsNumOfVnodeStreamThreads = tsNumOfCores / 4;
|
||||
|
@ -430,8 +430,8 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
|
|||
tsNumOfVnodeSyncThreads = TMAX(tsNumOfVnodeSyncThreads, 1);
|
||||
if (cfgAddInt32(pCfg, "numOfVnodeSyncThreads", tsNumOfVnodeSyncThreads, 1, 1024, 0) != 0) return -1;
|
||||
|
||||
tsNumOfQnodeQueryThreads = tsNumOfCores / 2;
|
||||
tsNumOfQnodeQueryThreads = TMAX(tsNumOfQnodeQueryThreads, 1);
|
||||
tsNumOfQnodeQueryThreads = tsNumOfCores * 2;
|
||||
tsNumOfQnodeQueryThreads = TMAX(tsNumOfQnodeQueryThreads, 4);
|
||||
if (cfgAddInt32(pCfg, "numOfQnodeQueryThreads", tsNumOfQnodeQueryThreads, 1, 1024, 0) != 0) return -1;
|
||||
|
||||
tsNumOfQnodeFetchThreads = tsNumOfCores / 2;
|
||||
|
|
|
@ -190,6 +190,11 @@ int32_t tNameGetDbName(const SName* name, char* dst) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
const char* tNameGetDbNameP(const SName* name) {
|
||||
return &name->dbname[0];
|
||||
}
|
||||
|
||||
|
||||
int32_t tNameGetFullDbName(const SName* name, char* dst) {
|
||||
assert(name != NULL && dst != NULL);
|
||||
snprintf(dst, TSDB_DB_FNAME_LEN, "%d.%s", name->acctId, name->dbname);
|
||||
|
|
|
@ -158,8 +158,8 @@ static void taosCleanupArgs() {
|
|||
}
|
||||
|
||||
int main(int argc, char const *argv[]) {
|
||||
if (!taosCheckSystemIsSmallEnd()) {
|
||||
printf("failed to start since on non-small-end machines\n");
|
||||
if (!taosCheckSystemIsLittleEnd()) {
|
||||
printf("failed to start since on non-little-end machines\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
|
|
@ -233,7 +233,6 @@ struct SVnodeCfg {
|
|||
};
|
||||
|
||||
typedef struct {
|
||||
TSKEY lastKey;
|
||||
uint64_t uid;
|
||||
uint64_t groupId;
|
||||
} STableKeyInfo;
|
||||
|
|
|
@ -270,7 +270,7 @@ int32_t vnodeGetAllTableList(SVnode *pVnode, uint64_t uid, SArray *list) {
|
|||
break;
|
||||
}
|
||||
|
||||
STableKeyInfo info = {.lastKey = TSKEY_INITIAL_VAL, uid = id};
|
||||
STableKeyInfo info = {uid = id};
|
||||
taosArrayPush(list, &info);
|
||||
}
|
||||
|
||||
|
|
|
@ -460,8 +460,6 @@ typedef struct SCtgOperation {
|
|||
#define CTG_FLAG_MAKE_STB(_isStb) (((_isStb) == 1) ? CTG_FLAG_STB : ((_isStb) == 0 ? CTG_FLAG_NOT_STB : CTG_FLAG_UNKNOWN_STB))
|
||||
#define CTG_FLAG_MATCH_STB(_flag, tbType) (CTG_FLAG_IS_UNKNOWN_STB(_flag) || (CTG_FLAG_IS_STB(_flag) && (tbType) == TSDB_SUPER_TABLE) || (CTG_FLAG_IS_NOT_STB(_flag) && (tbType) != TSDB_SUPER_TABLE))
|
||||
|
||||
#define CTG_IS_SYS_DBNAME(_dbname) (((*(_dbname) == 'i') && (0 == strcmp(_dbname, TSDB_INFORMATION_SCHEMA_DB))) || ((*(_dbname) == 'p') && (0 == strcmp(_dbname, TSDB_PERFORMANCE_SCHEMA_DB))))
|
||||
|
||||
#define CTG_META_SIZE(pMeta) (sizeof(STableMeta) + ((pMeta)->tableInfo.numOfTags + (pMeta)->tableInfo.numOfColumns) * sizeof(SSchema))
|
||||
|
||||
#define CTG_TABLE_NOT_EXIST(code) (code == CTG_ERR_CODE_TABLE_NOT_EXIST)
|
||||
|
|
|
@ -865,7 +865,7 @@ int32_t catalogChkTbMetaVersion(SCatalog* pCtg, SRequestConnInfo *pConn, SArray*
|
|||
|
||||
tNameFromString(&name, pTb->tbFName, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE);
|
||||
|
||||
if (CTG_IS_SYS_DBNAME(name.dbname)) {
|
||||
if (IS_SYS_DBNAME(name.dbname)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -936,7 +936,7 @@ int32_t catalogGetTableDistVgInfo(SCatalog* pCtg, SRequestConnInfo *pConn, const
|
|||
CTG_API_LEAVE(TSDB_CODE_CTG_INVALID_INPUT);
|
||||
}
|
||||
|
||||
if (CTG_IS_SYS_DBNAME(pTableName->dbname)) {
|
||||
if (IS_SYS_DBNAME(pTableName->dbname)) {
|
||||
ctgError("no valid vgInfo for db, dbname:%s", pTableName->dbname);
|
||||
CTG_API_LEAVE(TSDB_CODE_CTG_INVALID_INPUT);
|
||||
}
|
||||
|
@ -947,7 +947,7 @@ int32_t catalogGetTableDistVgInfo(SCatalog* pCtg, SRequestConnInfo *pConn, const
|
|||
int32_t catalogGetTableHashVgroup(SCatalog *pCtg, SRequestConnInfo *pConn, const SName *pTableName, SVgroupInfo *pVgroup) {
|
||||
CTG_API_ENTER();
|
||||
|
||||
if (CTG_IS_SYS_DBNAME(pTableName->dbname)) {
|
||||
if (IS_SYS_DBNAME(pTableName->dbname)) {
|
||||
ctgError("no valid vgInfo for db, dbname:%s", pTableName->dbname);
|
||||
CTG_API_LEAVE(TSDB_CODE_CTG_INVALID_INPUT);
|
||||
}
|
||||
|
|
|
@ -132,7 +132,7 @@ void ctgReleaseDBCache(SCatalog *pCtg, SCtgDBCache *dbCache) {
|
|||
|
||||
int32_t ctgAcquireDBCacheImpl(SCatalog* pCtg, const char *dbFName, SCtgDBCache **pCache, bool acquire) {
|
||||
char *p = strchr(dbFName, '.');
|
||||
if (p && CTG_IS_SYS_DBNAME(p + 1)) {
|
||||
if (p && IS_SYS_DBNAME(p + 1)) {
|
||||
dbFName = p + 1;
|
||||
}
|
||||
|
||||
|
@ -694,7 +694,7 @@ int32_t ctgDropDbCacheEnqueue(SCatalog* pCtg, const char *dbFName, int64_t dbId)
|
|||
}
|
||||
|
||||
char *p = strchr(dbFName, '.');
|
||||
if (p && CTG_IS_SYS_DBNAME(p + 1)) {
|
||||
if (p && IS_SYS_DBNAME(p + 1)) {
|
||||
dbFName = p + 1;
|
||||
}
|
||||
|
||||
|
@ -727,7 +727,7 @@ int32_t ctgDropDbVgroupEnqueue(SCatalog* pCtg, const char *dbFName, bool syncOp)
|
|||
}
|
||||
|
||||
char *p = strchr(dbFName, '.');
|
||||
if (p && CTG_IS_SYS_DBNAME(p + 1)) {
|
||||
if (p && IS_SYS_DBNAME(p + 1)) {
|
||||
dbFName = p + 1;
|
||||
}
|
||||
|
||||
|
@ -823,7 +823,7 @@ int32_t ctgUpdateVgroupEnqueue(SCatalog* pCtg, const char *dbFName, int64_t dbId
|
|||
}
|
||||
|
||||
char *p = strchr(dbFName, '.');
|
||||
if (p && CTG_IS_SYS_DBNAME(p + 1)) {
|
||||
if (p && IS_SYS_DBNAME(p + 1)) {
|
||||
dbFName = p + 1;
|
||||
}
|
||||
|
||||
|
@ -859,7 +859,7 @@ int32_t ctgUpdateTbMetaEnqueue(SCatalog* pCtg, STableMetaOutput *output, bool sy
|
|||
}
|
||||
|
||||
char *p = strchr(output->dbFName, '.');
|
||||
if (p && CTG_IS_SYS_DBNAME(p + 1)) {
|
||||
if (p && IS_SYS_DBNAME(p + 1)) {
|
||||
memmove(output->dbFName, p + 1, strlen(p + 1));
|
||||
}
|
||||
|
||||
|
@ -2123,7 +2123,7 @@ int32_t ctgStartUpdateThread() {
|
|||
|
||||
|
||||
int32_t ctgGetTbMetaFromCache(SCatalog* pCtg, SRequestConnInfo *pConn, SCtgTbMetaCtx* ctx, STableMeta** pTableMeta) {
|
||||
if (CTG_IS_SYS_DBNAME(ctx->pName->dbname)) {
|
||||
if (IS_SYS_DBNAME(ctx->pName->dbname)) {
|
||||
CTG_FLAG_SET_SYS_DB(ctx->flag);
|
||||
}
|
||||
|
||||
|
@ -2177,7 +2177,7 @@ _return:
|
|||
}
|
||||
|
||||
int32_t ctgGetTbHashVgroupFromCache(SCatalog *pCtg, const SName *pTableName, SVgroupInfo **pVgroup) {
|
||||
if (CTG_IS_SYS_DBNAME(pTableName->dbname)) {
|
||||
if (IS_SYS_DBNAME(pTableName->dbname)) {
|
||||
ctgError("no valid vgInfo for db, dbname:%s", pTableName->dbname);
|
||||
CTG_ERR_RET(TSDB_CODE_CTG_INVALID_INPUT);
|
||||
}
|
||||
|
|
|
@ -375,6 +375,8 @@ int32_t ctgGetQnodeListFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, SArray
|
|||
|
||||
CTG_ERR_RET(ctgProcessRspMsg(out, reqType, rpcRsp.pCont, rpcRsp.contLen, rpcRsp.code, NULL));
|
||||
|
||||
rpcFreeCont(rpcRsp.pCont);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -408,6 +410,8 @@ int32_t ctgGetDnodeListFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, SArray
|
|||
|
||||
CTG_ERR_RET(ctgProcessRspMsg(out, reqType, rpcRsp.pCont, rpcRsp.contLen, rpcRsp.code, NULL));
|
||||
|
||||
rpcFreeCont(rpcRsp.pCont);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -447,6 +451,8 @@ int32_t ctgGetDBVgInfoFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, SBuildU
|
|||
|
||||
CTG_ERR_RET(ctgProcessRspMsg(out, reqType, rpcRsp.pCont, rpcRsp.contLen, rpcRsp.code, input->db));
|
||||
|
||||
rpcFreeCont(rpcRsp.pCont);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -485,6 +491,8 @@ int32_t ctgGetDBCfgFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, const char
|
|||
|
||||
CTG_ERR_RET(ctgProcessRspMsg(out, reqType, rpcRsp.pCont, rpcRsp.contLen, rpcRsp.code, (char*)dbFName));
|
||||
|
||||
rpcFreeCont(rpcRsp.pCont);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -523,6 +531,8 @@ int32_t ctgGetIndexInfoFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, const
|
|||
|
||||
CTG_ERR_RET(ctgProcessRspMsg(out, reqType, rpcRsp.pCont, rpcRsp.contLen, rpcRsp.code, (char*)indexName));
|
||||
|
||||
rpcFreeCont(rpcRsp.pCont);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -564,6 +574,8 @@ int32_t ctgGetTbIndexFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, SName *n
|
|||
|
||||
CTG_ERR_RET(ctgProcessRspMsg(out, reqType, rpcRsp.pCont, rpcRsp.contLen, rpcRsp.code, (char*)tbFName));
|
||||
|
||||
rpcFreeCont(rpcRsp.pCont);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -602,6 +614,8 @@ int32_t ctgGetUdfInfoFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, const ch
|
|||
|
||||
CTG_ERR_RET(ctgProcessRspMsg(out, reqType, rpcRsp.pCont, rpcRsp.contLen, rpcRsp.code, (char*)funcName));
|
||||
|
||||
rpcFreeCont(rpcRsp.pCont);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -640,6 +654,8 @@ int32_t ctgGetUserDbAuthFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, const
|
|||
|
||||
CTG_ERR_RET(ctgProcessRspMsg(out, reqType, rpcRsp.pCont, rpcRsp.contLen, rpcRsp.code, (char*)user));
|
||||
|
||||
rpcFreeCont(rpcRsp.pCont);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -683,6 +699,8 @@ int32_t ctgGetTbMetaFromMnodeImpl(SCatalog* pCtg, SRequestConnInfo *pConn, char
|
|||
|
||||
CTG_ERR_RET(ctgProcessRspMsg(out, reqType, rpcRsp.pCont, rpcRsp.contLen, rpcRsp.code, tbFName));
|
||||
|
||||
rpcFreeCont(rpcRsp.pCont);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -740,6 +758,8 @@ int32_t ctgGetTbMetaFromVnode(SCatalog* pCtg, SRequestConnInfo *pConn, const SNa
|
|||
|
||||
CTG_ERR_RET(ctgProcessRspMsg(out, reqType, rpcRsp.pCont, rpcRsp.contLen, rpcRsp.code, tbFName));
|
||||
|
||||
rpcFreeCont(rpcRsp.pCont);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -785,6 +805,8 @@ int32_t ctgGetTableCfgFromVnode(SCatalog* pCtg, SRequestConnInfo *pConn, const S
|
|||
|
||||
CTG_ERR_RET(ctgProcessRspMsg(out, reqType, rpcRsp.pCont, rpcRsp.contLen, rpcRsp.code, (char*)tbFName));
|
||||
|
||||
rpcFreeCont(rpcRsp.pCont);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -825,6 +847,8 @@ int32_t ctgGetTableCfgFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, const S
|
|||
|
||||
CTG_ERR_RET(ctgProcessRspMsg(out, reqType, rpcRsp.pCont, rpcRsp.contLen, rpcRsp.code, (char*)tbFName));
|
||||
|
||||
rpcFreeCont(rpcRsp.pCont);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -859,6 +883,8 @@ int32_t ctgGetSvrVerFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, char **ou
|
|||
|
||||
CTG_ERR_RET(ctgProcessRspMsg(out, reqType, rpcRsp.pCont, rpcRsp.contLen, rpcRsp.code, NULL));
|
||||
|
||||
rpcFreeCont(rpcRsp.pCont);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
|
|
@ -401,8 +401,6 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
|
|||
QRY_ERR_RET(qExplainBufAppendExecInfo(pResNode->pExecInfo, tbuf, &tlen));
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
|
||||
}
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_COLUMNS_FORMAT, pTagScanNode->pScanCols->length);
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
|
||||
if (pTagScanNode->pScanPseudoCols) {
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_PSEUDO_COLUMNS_FORMAT, pTagScanNode->pScanPseudoCols->length);
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
|
||||
|
|
|
@ -13,7 +13,6 @@
|
|||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "ttime.h"
|
||||
#include "function.h"
|
||||
#include "functionMgt.h"
|
||||
#include "index.h"
|
||||
|
@ -21,6 +20,7 @@
|
|||
#include "tdatablock.h"
|
||||
#include "thash.h"
|
||||
#include "tmsg.h"
|
||||
#include "ttime.h"
|
||||
|
||||
#include "executil.h"
|
||||
#include "executorimpl.h"
|
||||
|
@ -72,7 +72,7 @@ size_t getResultRowSize(SqlFunctionCtx* pCtx, int32_t numOfOutput) {
|
|||
void cleanupGroupResInfo(SGroupResInfo* pGroupResInfo) {
|
||||
assert(pGroupResInfo != NULL);
|
||||
|
||||
for(int32_t i = 0; i < taosArrayGetSize(pGroupResInfo->pRows); ++i) {
|
||||
for (int32_t i = 0; i < taosArrayGetSize(pGroupResInfo->pRows); ++i) {
|
||||
SResKeyPos* pRes = taosArrayGetP(pGroupResInfo->pRows, i);
|
||||
taosMemoryFree(pRes);
|
||||
}
|
||||
|
@ -266,17 +266,24 @@ EDealRes doTranslateTagExpr(SNode** pNode, void* pContext) {
|
|||
}
|
||||
|
||||
int32_t isTableOk(STableKeyInfo* info, SNode* pTagCond, void* metaHandle, bool* pQualified) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
SMetaReader mr = {0};
|
||||
|
||||
metaReaderInit(&mr, metaHandle, 0);
|
||||
metaGetTableEntryByUid(&mr, info->uid);
|
||||
code = metaGetTableEntryByUid(&mr, info->uid);
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
metaReaderClear(&mr);
|
||||
|
||||
return terrno;
|
||||
}
|
||||
|
||||
SNode* pTagCondTmp = nodesCloneNode(pTagCond);
|
||||
|
||||
nodesRewriteExprPostOrder(&pTagCondTmp, doTranslateTagExpr, &mr);
|
||||
metaReaderClear(&mr);
|
||||
|
||||
SNode* pNew = NULL;
|
||||
int32_t code = scalarCalculateConstants(pTagCondTmp, &pNew);
|
||||
SNode* pNew = NULL;
|
||||
code = scalarCalculateConstants(pTagCondTmp, &pNew);
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
terrno = code;
|
||||
nodesDestroyNode(pTagCondTmp);
|
||||
|
@ -295,7 +302,8 @@ int32_t isTableOk(STableKeyInfo* info, SNode* pTagCond, void* metaHandle, bool*
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode, SNode* pTagCond, SNode* pTagIndexCond, STableListInfo* pListInfo) {
|
||||
int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode, SNode* pTagCond, SNode* pTagIndexCond,
|
||||
STableListInfo* pListInfo) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
|
||||
pListInfo->pTableList = taosArrayInit(8, sizeof(STableKeyInfo));
|
||||
|
@ -317,14 +325,14 @@ int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode,
|
|||
code = doFilterTag(pTagIndexCond, &metaArg, res, &status);
|
||||
if (code != 0 || status == SFLT_NOT_INDEX) {
|
||||
qError("failed to get tableIds from index, reason:%s, suid:%" PRIu64, tstrerror(code), tableUid);
|
||||
// code = TSDB_CODE_INDEX_REBUILDING;
|
||||
// code = TSDB_CODE_INDEX_REBUILDING;
|
||||
code = vnodeGetAllTableList(pVnode, tableUid, pListInfo->pTableList);
|
||||
} else {
|
||||
qDebug("success to get tableIds, size:%d, suid:%" PRIu64, (int)taosArrayGetSize(res), tableUid);
|
||||
}
|
||||
|
||||
for (int i = 0; i < taosArrayGetSize(res); i++) {
|
||||
STableKeyInfo info = {.lastKey = TSKEY_INITIAL_VAL, .uid = *(uint64_t*)taosArrayGet(res, i), .groupId = 0};
|
||||
STableKeyInfo info = {.uid = *(uint64_t*)taosArrayGet(res, i), .groupId = 0};
|
||||
taosArrayPush(pListInfo->pTableList, &info);
|
||||
}
|
||||
taosArrayDestroy(res);
|
||||
|
@ -338,7 +346,7 @@ int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode,
|
|||
return code;
|
||||
}
|
||||
} else { // Create one table group.
|
||||
STableKeyInfo info = {.lastKey = 0, .uid = tableUid, .groupId = 0};
|
||||
STableKeyInfo info = {.uid = tableUid, .groupId = 0};
|
||||
taosArrayPush(pListInfo->pTableList, &info);
|
||||
}
|
||||
|
||||
|
@ -610,8 +618,7 @@ static int32_t setSelectValueColumnInfo(SqlFunctionCtx* pCtx, int32_t numOfOutpu
|
|||
|
||||
for (int32_t i = 0; i < numOfOutput; ++i) {
|
||||
const char* pName = pCtx[i].pExpr->pExpr->_function.functionName;
|
||||
if ((strcmp(pName, "_select_value") == 0) ||
|
||||
(strcmp(pName, "_group_key") == 0)) {
|
||||
if ((strcmp(pName, "_select_value") == 0) || (strcmp(pName, "_group_key") == 0)) {
|
||||
pValCtx[num++] = &pCtx[i];
|
||||
} else if (fmIsSelectFunc(pCtx[i].functionId)) {
|
||||
p = &pCtx[i];
|
||||
|
@ -747,11 +754,11 @@ SInterval extractIntervalInfo(const STableScanPhysiNode* pTableScanNode) {
|
|||
SColumn extractColumnFromColumnNode(SColumnNode* pColNode) {
|
||||
SColumn c = {0};
|
||||
|
||||
c.slotId = pColNode->slotId;
|
||||
c.colId = pColNode->colId;
|
||||
c.type = pColNode->node.resType.type;
|
||||
c.bytes = pColNode->node.resType.bytes;
|
||||
c.scale = pColNode->node.resType.scale;
|
||||
c.slotId = pColNode->slotId;
|
||||
c.colId = pColNode->colId;
|
||||
c.type = pColNode->node.resType.type;
|
||||
c.bytes = pColNode->node.resType.bytes;
|
||||
c.scale = pColNode->node.resType.scale;
|
||||
c.precision = pColNode->node.resType.precision;
|
||||
return c;
|
||||
}
|
||||
|
@ -768,10 +775,10 @@ int32_t initQueryTableDataCond(SQueryTableDataCond* pCond, const STableScanPhysi
|
|||
// pCond->twindow = pTableScanNode->scanRange;
|
||||
// TODO: get it from stable scan node
|
||||
pCond->twindows = pTableScanNode->scanRange;
|
||||
pCond->suid = pTableScanNode->scan.suid;
|
||||
pCond->type = BLOCK_LOAD_OFFSET_ORDER;
|
||||
pCond->suid = pTableScanNode->scan.suid;
|
||||
pCond->type = BLOCK_LOAD_OFFSET_ORDER;
|
||||
pCond->startVersion = -1;
|
||||
pCond->endVersion = -1;
|
||||
pCond->endVersion = -1;
|
||||
// pCond->type = pTableScanNode->scanFlag;
|
||||
|
||||
int32_t j = 0;
|
||||
|
@ -850,11 +857,11 @@ static STimeWindow doCalculateTimeWindow(int64_t ts, SInterval* pInterval) {
|
|||
}
|
||||
|
||||
STimeWindow getFirstQualifiedTimeWindow(int64_t ts, STimeWindow* pWindow, SInterval* pInterval, int32_t order) {
|
||||
int32_t factor = (order == TSDB_ORDER_ASC)? -1:1;
|
||||
int32_t factor = (order == TSDB_ORDER_ASC) ? -1 : 1;
|
||||
|
||||
STimeWindow win = *pWindow;
|
||||
STimeWindow save = win;
|
||||
while(win.skey <= ts && win.ekey >= ts) {
|
||||
while (win.skey <= ts && win.ekey >= ts) {
|
||||
save = win;
|
||||
win.skey = taosTimeAdd(win.skey, factor * pInterval->sliding, pInterval->slidingUnit, pInterval->precision);
|
||||
win.ekey = taosTimeAdd(win.ekey, factor * pInterval->sliding, pInterval->slidingUnit, pInterval->precision);
|
||||
|
@ -894,7 +901,6 @@ bool hasLimitOffsetInfo(SLimitInfo* pLimitInfo) {
|
|||
pLimitInfo->slimit.offset != -1);
|
||||
}
|
||||
|
||||
|
||||
static int64_t getLimit(const SNode* pLimit) { return NULL == pLimit ? -1 : ((SLimitNode*)pLimit)->limit; }
|
||||
static int64_t getOffset(const SNode* pLimit) { return NULL == pLimit ? -1 : ((SLimitNode*)pLimit)->offset; }
|
||||
|
||||
|
@ -903,7 +909,7 @@ void initLimitInfo(const SNode* pLimit, const SNode* pSLimit, SLimitInfo* pLimit
|
|||
SLimit slimit = {.limit = getLimit(pSLimit), .offset = getOffset(pSLimit)};
|
||||
|
||||
pLimitInfo->limit = limit;
|
||||
pLimitInfo->slimit= slimit;
|
||||
pLimitInfo->slimit = slimit;
|
||||
pLimitInfo->remainOffset = limit.offset;
|
||||
pLimitInfo->remainGroupOffset = slimit.offset;
|
||||
}
|
|
@ -191,7 +191,7 @@ static SArray* filterQualifiedChildTables(const SStreamScanInfo* pScanInfo, cons
|
|||
SMetaReader mr = {0};
|
||||
metaReaderInit(&mr, pScanInfo->readHandle.meta, 0);
|
||||
for (int32_t i = 0; i < taosArrayGetSize(tableIdList); ++i) {
|
||||
int64_t* id = (int64_t*)taosArrayGet(tableIdList, i);
|
||||
uint64_t* id = (uint64_t*)taosArrayGet(tableIdList, i);
|
||||
|
||||
int32_t code = metaGetTableEntryByUid(&mr, *id);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
|
@ -206,7 +206,7 @@ static SArray* filterQualifiedChildTables(const SStreamScanInfo* pScanInfo, cons
|
|||
|
||||
if (pScanInfo->pTagCond != NULL) {
|
||||
bool qualified = false;
|
||||
STableKeyInfo info = {.groupId = 0, .uid = mr.me.uid, .lastKey = 0};
|
||||
STableKeyInfo info = {.groupId = 0, .uid = mr.me.uid};
|
||||
code = isTableOk(&info, pScanInfo->pTagCond, pScanInfo->readHandle.meta, &qualified);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
qError("failed to filter new table, uid:0x%" PRIx64 ", %s", info.uid, idstr);
|
||||
|
@ -218,9 +218,7 @@ static SArray* filterQualifiedChildTables(const SStreamScanInfo* pScanInfo, cons
|
|||
}
|
||||
}
|
||||
|
||||
/*pScanInfo->pStreamScanOp->pTaskInfo->tableqinfoList.*/
|
||||
// handle multiple partition
|
||||
|
||||
taosArrayPush(qa, id);
|
||||
}
|
||||
|
||||
|
@ -244,6 +242,19 @@ int32_t qUpdateQualifiedTableId(qTaskInfo_t tinfo, const SArray* tableIdList, bo
|
|||
|
||||
qDebug(" %d qualified child tables added into stream scanner", (int32_t)taosArrayGetSize(qa));
|
||||
code = tqReaderAddTbUidList(pScanInfo->tqReader, qa);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
||||
// add to qTaskInfo
|
||||
// todo refactor STableList
|
||||
for(int32_t i = 0; i < taosArrayGetSize(qa); ++i) {
|
||||
uint64_t* uid = taosArrayGet(qa, i);
|
||||
|
||||
STableKeyInfo keyInfo = {.uid = *uid, .groupId = 0};
|
||||
taosArrayPush(pTaskInfo->tableqinfoList.pTableList, &keyInfo);
|
||||
}
|
||||
|
||||
taosArrayDestroy(qa);
|
||||
} else { // remove the table id in current list
|
||||
qDebug(" %d remove child tables from the stream scanner", (int32_t)taosArrayGetSize(tableIdList));
|
||||
|
|
|
@ -3351,7 +3351,11 @@ static SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) {
|
|||
// filter shall be applied after apply functions and limit/offset on the result
|
||||
doFilter(pProjectInfo->pFilterNode, pInfo->pRes);
|
||||
|
||||
if (status == PROJECT_RETRIEVE_CONTINUE) {
|
||||
if (pTaskInfo->execModel == OPTR_EXEC_MODEL_STREAM) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (status == PROJECT_RETRIEVE_CONTINUE || pInfo->pRes->info.rows == 0) {
|
||||
continue;
|
||||
} else if (status == PROJECT_RETRIEVE_DONE) {
|
||||
break;
|
||||
|
@ -3957,7 +3961,7 @@ static SSDataBlock* doApplyIndefinitFunction(SOperatorInfo* pOperator) {
|
|||
|
||||
doFilter(pIndefInfo->pCondition, pInfo->pRes);
|
||||
size_t rows = pInfo->pRes->info.rows;
|
||||
if (rows >= 0) {
|
||||
if (rows > 0 || pOperator->status == OP_EXEC_DONE) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -4287,7 +4291,7 @@ int32_t generateGroupIdMap(STableListInfo* pTableListInfo, SReadHandle* pHandle,
|
|||
REPLACE_NODE(pNew);
|
||||
} else {
|
||||
taosMemoryFree(keyBuf);
|
||||
nodesClearList(groupNew);
|
||||
nodesDestroyList(groupNew);
|
||||
metaReaderClear(&mr);
|
||||
return code;
|
||||
}
|
||||
|
@ -4305,7 +4309,7 @@ int32_t generateGroupIdMap(STableListInfo* pTableListInfo, SReadHandle* pHandle,
|
|||
if (tTagIsJson(data)) {
|
||||
terrno = TSDB_CODE_QRY_JSON_IN_GROUP_ERROR;
|
||||
taosMemoryFree(keyBuf);
|
||||
nodesClearList(groupNew);
|
||||
nodesDestroyList(groupNew);
|
||||
metaReaderClear(&mr);
|
||||
return terrno;
|
||||
}
|
||||
|
@ -4328,7 +4332,7 @@ int32_t generateGroupIdMap(STableListInfo* pTableListInfo, SReadHandle* pHandle,
|
|||
info->groupId = groupId;
|
||||
groupNum++;
|
||||
|
||||
nodesClearList(groupNew);
|
||||
nodesDestroyList(groupNew);
|
||||
metaReaderClear(&mr);
|
||||
}
|
||||
taosMemoryFree(keyBuf);
|
||||
|
@ -4457,7 +4461,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
|
|||
return NULL;
|
||||
}
|
||||
} else { // Create one table group.
|
||||
STableKeyInfo info = {.lastKey = 0, .uid = pBlockNode->uid, .groupId = 0};
|
||||
STableKeyInfo info = {.uid = pBlockNode->uid, .groupId = 0};
|
||||
taosArrayPush(pTableListInfo->pTableList, &info);
|
||||
}
|
||||
|
||||
|
|
|
@ -359,6 +359,7 @@ void setTbNameColData(void* pMeta, const SSDataBlock* pBlock, SColumnInfoData* p
|
|||
|
||||
SScalarParam param = {.columnData = pColInfoData};
|
||||
fpSet.process(&srcParam, 1, ¶m);
|
||||
colDataDestroy(&infoData);
|
||||
}
|
||||
|
||||
static SSDataBlock* doTableScanImpl(SOperatorInfo* pOperator) {
|
||||
|
@ -2045,8 +2046,8 @@ static SSDataBlock* sysTableScanUserTables(SOperatorInfo* pOperator) {
|
|||
uint64_t suid = pInfo->pCur->mr.me.ctbEntry.suid;
|
||||
int32_t code = metaGetTableEntryByUid(&mr, suid);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
qError("failed to get super table meta, uid:0x%" PRIx64 ", code:%s, %s", suid, tstrerror(terrno),
|
||||
GET_TASKID(pTaskInfo));
|
||||
qError("failed to get super table meta, cname:%s, suid:0x%" PRIx64 ", code:%s, %s",
|
||||
pInfo->pCur->mr.me.name, suid, tstrerror(terrno), GET_TASKID(pTaskInfo));
|
||||
metaReaderClear(&mr);
|
||||
metaCloseTbCursor(pInfo->pCur);
|
||||
pInfo->pCur = NULL;
|
||||
|
@ -2152,16 +2153,39 @@ static SSDataBlock* sysTableScanUserTables(SOperatorInfo* pOperator) {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
static SSDataBlock* sysTableScanUserSTables(SOperatorInfo* pOperator) {
|
||||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||
SSysTableScanInfo* pInfo = pOperator->info;
|
||||
if (pOperator->status == OP_EXEC_DONE) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
pInfo->pRes->info.rows = 0;
|
||||
pOperator->status = OP_EXEC_DONE;
|
||||
|
||||
pInfo->loadInfo.totalRows += pInfo->pRes->info.rows;
|
||||
return (pInfo->pRes->info.rows == 0) ? NULL : pInfo->pRes;
|
||||
}
|
||||
|
||||
static SSDataBlock* doSysTableScan(SOperatorInfo* pOperator) {
|
||||
// build message and send to mnode to fetch the content of system tables.
|
||||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||
SSysTableScanInfo* pInfo = pOperator->info;
|
||||
|
||||
const char* name = tNameGetTableName(&pInfo->name);
|
||||
if (pInfo->showRewrite) {
|
||||
char dbName[TSDB_DB_NAME_LEN] = {0};
|
||||
getDBNameFromCondition(pInfo->pCondition, dbName);
|
||||
sprintf(pInfo->req.db, "%d.%s", pInfo->accountId, dbName);
|
||||
}
|
||||
|
||||
if (strncasecmp(name, TSDB_INS_TABLE_USER_TABLES, TSDB_TABLE_FNAME_LEN) == 0) {
|
||||
return sysTableScanUserTables(pOperator);
|
||||
} else if (strncasecmp(name, TSDB_INS_TABLE_USER_TAGS, TSDB_TABLE_FNAME_LEN) == 0) {
|
||||
return sysTableScanUserTags(pOperator);
|
||||
} else if (strncasecmp(name, TSDB_INS_TABLE_USER_STABLES, TSDB_TABLE_FNAME_LEN) == 0 && IS_SYS_DBNAME(pInfo->req.db)) {
|
||||
return sysTableScanUserSTables(pOperator);
|
||||
} else { // load the meta from mnode of the given epset
|
||||
if (pOperator->status == OP_EXEC_DONE) {
|
||||
return NULL;
|
||||
|
@ -2172,12 +2196,6 @@ static SSDataBlock* doSysTableScan(SOperatorInfo* pOperator) {
|
|||
strncpy(pInfo->req.tb, tNameGetTableName(&pInfo->name), tListLen(pInfo->req.tb));
|
||||
strcpy(pInfo->req.user, pInfo->pUser);
|
||||
|
||||
if (pInfo->showRewrite) {
|
||||
char dbName[TSDB_DB_NAME_LEN] = {0};
|
||||
getDBNameFromCondition(pInfo->pCondition, dbName);
|
||||
sprintf(pInfo->req.db, "%d.%s", pInfo->accountId, dbName);
|
||||
}
|
||||
|
||||
int32_t contLen = tSerializeSRetrieveTableReq(NULL, 0, &pInfo->req);
|
||||
char* buf1 = taosMemoryCalloc(1, contLen);
|
||||
tSerializeSRetrieveTableReq(buf1, contLen, &pInfo->req);
|
||||
|
|
|
@ -2467,9 +2467,7 @@ bool apercentileFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResult
|
|||
int32_t apercentileFunction(SqlFunctionCtx* pCtx) {
|
||||
int32_t numOfElems = 0;
|
||||
SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx);
|
||||
|
||||
SInputColumnInfoData* pInput = &pCtx->input;
|
||||
// SColumnDataAgg* pAgg = pInput->pColumnDataAgg[0];
|
||||
|
||||
SColumnInfoData* pCol = pInput->pData[0];
|
||||
int32_t type = pCol->info.type;
|
||||
|
@ -2502,6 +2500,9 @@ int32_t apercentileFunction(SqlFunctionCtx* pCtx) {
|
|||
GET_TYPED_DATA(v, double, type, data);
|
||||
tHistogramAdd(&pInfo->pHisto, v);
|
||||
}
|
||||
|
||||
qDebug("add %d elements into histogram, total:%d, numOfEntry:%d, %p", numOfElems, pInfo->pHisto->numOfElems,
|
||||
pInfo->pHisto->numOfEntries, pInfo->pHisto);
|
||||
}
|
||||
|
||||
SET_VAL(pResInfo, numOfElems, 1);
|
||||
|
@ -2540,11 +2541,19 @@ static void apercentileTransferInfo(SAPercentileInfo* pInput, SAPercentileInfo*
|
|||
if (pHisto->numOfElems <= 0) {
|
||||
memcpy(pHisto, pInput->pHisto, sizeof(SHistogramInfo) + sizeof(SHistBin) * (MAX_HISTOGRAM_BIN + 1));
|
||||
pHisto->elems = (SHistBin*)((char*)pHisto + sizeof(SHistogramInfo));
|
||||
|
||||
qDebug("merge histo, total:%"PRId64", entry:%d, %p", pHisto->numOfElems, pHisto->numOfEntries, pHisto);
|
||||
} else {
|
||||
pHisto->elems = (SHistBin*)((char*)pHisto + sizeof(SHistogramInfo));
|
||||
qDebug("input histogram, elem:%"PRId64", entry:%d, %p", pHisto->numOfElems, pHisto->numOfEntries,
|
||||
pInput->pHisto);
|
||||
|
||||
SHistogramInfo* pRes = tHistogramMerge(pHisto, pInput->pHisto, MAX_HISTOGRAM_BIN);
|
||||
memcpy(pHisto, pRes, sizeof(SHistogramInfo) + sizeof(SHistBin) * MAX_HISTOGRAM_BIN);
|
||||
pHisto->elems = (SHistBin*)((char*)pHisto + sizeof(SHistogramInfo));
|
||||
|
||||
qDebug("merge histo, total:%"PRId64", entry:%d, %p", pHisto->numOfElems, pHisto->numOfEntries,
|
||||
pHisto);
|
||||
tHistogramDestroy(&pRes);
|
||||
}
|
||||
}
|
||||
|
@ -2560,14 +2569,20 @@ int32_t apercentileFunctionMerge(SqlFunctionCtx* pCtx) {
|
|||
|
||||
SAPercentileInfo* pInfo = GET_ROWCELL_INTERBUF(pResInfo);
|
||||
|
||||
int32_t start = pInput->startRowIndex;
|
||||
qDebug("total %d rows will merge, %p", pInput->numOfRows, pInfo->pHisto);
|
||||
|
||||
int32_t start = pInput->startRowIndex;
|
||||
for (int32_t i = start; i < start + pInput->numOfRows; ++i) {
|
||||
char* data = colDataGetData(pCol, i);
|
||||
char* data = colDataGetData(pCol, i);
|
||||
|
||||
SAPercentileInfo* pInputInfo = (SAPercentileInfo*)varDataVal(data);
|
||||
apercentileTransferInfo(pInputInfo, pInfo);
|
||||
}
|
||||
|
||||
if (pInfo->algo != APERCT_ALGO_TDIGEST) {
|
||||
qDebug("after merge, total:%d, numOfEntry:%d, %p", pInfo->pHisto->numOfElems, pInfo->pHisto->numOfEntries, pInfo->pHisto);
|
||||
}
|
||||
|
||||
SET_VAL(pResInfo, 1, 1);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -2585,6 +2600,8 @@ int32_t apercentileFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
|
|||
}
|
||||
} else {
|
||||
if (pInfo->pHisto->numOfElems > 0) {
|
||||
qDebug("get the final res:%d, elements:%"PRId64", entry:%d", pInfo->pHisto->numOfElems, pInfo->pHisto->numOfEntries);
|
||||
|
||||
double ratio[] = {pInfo->percent};
|
||||
double* res = tHistogramUniform(pInfo->pHisto, ratio, 1);
|
||||
pInfo->result = *res;
|
||||
|
@ -2638,6 +2655,9 @@ int32_t apercentileCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx)
|
|||
SResultRowEntryInfo* pSResInfo = GET_RES_INFO(pSourceCtx);
|
||||
SAPercentileInfo* pSBuf = GET_ROWCELL_INTERBUF(pSResInfo);
|
||||
ASSERT(pDBuf->algo == pSBuf->algo);
|
||||
|
||||
qDebug("start to combine apercentile, %p", pDBuf->pHisto);
|
||||
|
||||
apercentileTransferInfo(pSBuf, pDBuf);
|
||||
pDResInfo->numOfRes = TMAX(pDResInfo->numOfRes, pSResInfo->numOfRes);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
|
|
@ -913,8 +913,8 @@ void udfdConnectMnodeThreadFunc(void *args) {
|
|||
}
|
||||
|
||||
int main(int argc, char *argv[]) {
|
||||
if (!taosCheckSystemIsSmallEnd()) {
|
||||
printf("failed to start since on non-small-end machines\n");
|
||||
if (!taosCheckSystemIsLittleEnd()) {
|
||||
printf("failed to start since on non-little-end machines\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
|
|
@ -707,6 +707,8 @@ static int32_t sifCalculate(SNode *pNode, SIFParam *pDst) {
|
|||
sifFreeParam(res);
|
||||
taosHashRemove(ctx.pRes, (void *)&pNode, POINTER_BYTES);
|
||||
}
|
||||
sifFreeRes(ctx.pRes);
|
||||
|
||||
SIF_RET(code);
|
||||
}
|
||||
|
||||
|
|
|
@ -369,6 +369,8 @@ static void destroyPhysiNode(SPhysiNode* pNode) {
|
|||
nodesDestroyList(pNode->pChildren);
|
||||
nodesDestroyNode(pNode->pConditions);
|
||||
nodesDestroyNode((SNode*)pNode->pOutputDataBlockDesc);
|
||||
nodesDestroyNode(pNode->pLimit);
|
||||
nodesDestroyNode(pNode->pSlimit);
|
||||
}
|
||||
|
||||
static void destroyWinodwPhysiNode(SWinodwPhysiNode* pNode) {
|
||||
|
@ -389,11 +391,16 @@ static void destroyDataSinkNode(SDataSinkNode* pNode) { nodesDestroyNode((SNode*
|
|||
|
||||
static void destroyExprNode(SExprNode* pExpr) { taosArrayDestroy(pExpr->pAssociation); }
|
||||
|
||||
static void nodesDestroyNodePointer(void* node) {
|
||||
SNode* pNode = *(SNode**)node;
|
||||
nodesDestroyNode(pNode);
|
||||
static void destroyTableCfg(STableCfg* pCfg) {
|
||||
taosArrayDestroy(pCfg->pFuncs);
|
||||
taosMemoryFree(pCfg->pComment);
|
||||
taosMemoryFree(pCfg->pSchemas);
|
||||
taosMemoryFree(pCfg->pTags);
|
||||
taosMemoryFree(pCfg);
|
||||
}
|
||||
|
||||
static void destroySmaIndex(void* pIndex) { taosMemoryFree(((STableIndexInfo*)pIndex)->expr); }
|
||||
|
||||
void nodesDestroyNode(SNode* pNode) {
|
||||
if (NULL == pNode) {
|
||||
return;
|
||||
|
@ -431,6 +438,7 @@ void nodesDestroyNode(SNode* pNode) {
|
|||
SRealTableNode* pReal = (SRealTableNode*)pNode;
|
||||
taosMemoryFreeClear(pReal->pMeta);
|
||||
taosMemoryFreeClear(pReal->pVgroupList);
|
||||
taosArrayDestroyEx(pReal->pSmaIndexes, destroySmaIndex);
|
||||
break;
|
||||
}
|
||||
case QUERY_NODE_TEMP_TABLE:
|
||||
|
@ -451,9 +459,12 @@ void nodesDestroyNode(SNode* pNode) {
|
|||
break;
|
||||
case QUERY_NODE_LIMIT: // no pointer field
|
||||
break;
|
||||
case QUERY_NODE_STATE_WINDOW:
|
||||
nodesDestroyNode(((SStateWindowNode*)pNode)->pExpr);
|
||||
case QUERY_NODE_STATE_WINDOW: {
|
||||
SStateWindowNode* pState = (SStateWindowNode*)pNode;
|
||||
nodesDestroyNode(pState->pCol);
|
||||
nodesDestroyNode(pState->pExpr);
|
||||
break;
|
||||
}
|
||||
case QUERY_NODE_SESSION_WINDOW: {
|
||||
SSessionWindowNode* pSession = (SSessionWindowNode*)pNode;
|
||||
nodesDestroyNode((SNode*)pSession->pCol);
|
||||
|
@ -500,8 +511,10 @@ void nodesDestroyNode(SNode* pNode) {
|
|||
}
|
||||
case QUERY_NODE_TABLE_OPTIONS: {
|
||||
STableOptions* pOptions = (STableOptions*)pNode;
|
||||
nodesDestroyList(pOptions->pSma);
|
||||
nodesDestroyList(pOptions->pMaxDelay);
|
||||
nodesDestroyList(pOptions->pWatermark);
|
||||
nodesDestroyList(pOptions->pRollupFuncs);
|
||||
nodesDestroyList(pOptions->pSma);
|
||||
break;
|
||||
}
|
||||
case QUERY_NODE_INDEX_OPTIONS: {
|
||||
|
@ -510,17 +523,22 @@ void nodesDestroyNode(SNode* pNode) {
|
|||
nodesDestroyNode(pOptions->pInterval);
|
||||
nodesDestroyNode(pOptions->pOffset);
|
||||
nodesDestroyNode(pOptions->pSliding);
|
||||
nodesDestroyNode(pOptions->pStreamOptions);
|
||||
break;
|
||||
}
|
||||
case QUERY_NODE_EXPLAIN_OPTIONS: // no pointer field
|
||||
break;
|
||||
case QUERY_NODE_STREAM_OPTIONS:
|
||||
nodesDestroyNode(((SStreamOptions*)pNode)->pWatermark);
|
||||
case QUERY_NODE_STREAM_OPTIONS: {
|
||||
SStreamOptions* pOptions = (SStreamOptions*)pNode;
|
||||
nodesDestroyNode(pOptions->pDelay);
|
||||
nodesDestroyNode(pOptions->pWatermark);
|
||||
break;
|
||||
}
|
||||
case QUERY_NODE_LEFT_VALUE: // no pointer field
|
||||
break;
|
||||
case QUERY_NODE_SET_OPERATOR: {
|
||||
SSetOperator* pStmt = (SSetOperator*)pNode;
|
||||
nodesDestroyList(pStmt->pProjectionList);
|
||||
nodesDestroyNode(pStmt->pLeft);
|
||||
nodesDestroyNode(pStmt->pRight);
|
||||
nodesDestroyList(pStmt->pOrderByList);
|
||||
|
@ -582,7 +600,8 @@ void nodesDestroyNode(SNode* pNode) {
|
|||
break;
|
||||
case QUERY_NODE_DROP_SUPER_TABLE_STMT: // no pointer field
|
||||
break;
|
||||
case QUERY_NODE_ALTER_TABLE_STMT: {
|
||||
case QUERY_NODE_ALTER_TABLE_STMT:
|
||||
case QUERY_NODE_ALTER_SUPER_TABLE_STMT: {
|
||||
SAlterTableStmt* pStmt = (SAlterTableStmt*)pNode;
|
||||
nodesDestroyNode((SNode*)pStmt->pOptions);
|
||||
nodesDestroyNode((SNode*)pStmt->pVal);
|
||||
|
@ -686,14 +705,15 @@ void nodesDestroyNode(SNode* pNode) {
|
|||
nodesDestroyNode(pStmt->pTbName);
|
||||
break;
|
||||
}
|
||||
case QUERY_NODE_SHOW_DNODE_VARIABLES_STMT: // no pointer field
|
||||
case QUERY_NODE_SHOW_DNODE_VARIABLES_STMT:
|
||||
nodesDestroyNode(((SShowDnodeVariablesStmt*)pNode)->pDnodeId);
|
||||
break;
|
||||
case QUERY_NODE_SHOW_CREATE_DATABASE_STMT:
|
||||
taosMemoryFreeClear(((SShowCreateDatabaseStmt*)pNode)->pCfg);
|
||||
break;
|
||||
case QUERY_NODE_SHOW_CREATE_TABLE_STMT:
|
||||
case QUERY_NODE_SHOW_CREATE_STABLE_STMT:
|
||||
taosMemoryFreeClear(((SShowCreateTableStmt*)pNode)->pCfg);
|
||||
destroyTableCfg((STableCfg*)(((SShowCreateTableStmt*)pNode)->pCfg));
|
||||
break;
|
||||
case QUERY_NODE_SHOW_TABLE_DISTRIBUTED_STMT: // no pointer field
|
||||
case QUERY_NODE_KILL_CONNECTION_STMT: // no pointer field
|
||||
|
@ -725,7 +745,8 @@ void nodesDestroyNode(SNode* pNode) {
|
|||
}
|
||||
taosArrayDestroy(pQuery->pDbList);
|
||||
taosArrayDestroy(pQuery->pTableList);
|
||||
taosArrayDestroyEx(pQuery->pPlaceholderValues, nodesDestroyNodePointer);
|
||||
taosArrayDestroy(pQuery->pPlaceholderValues);
|
||||
nodesDestroyNode(pQuery->pPrepareRoot);
|
||||
break;
|
||||
}
|
||||
case QUERY_NODE_LOGIC_PLAN_SCAN: {
|
||||
|
@ -737,7 +758,7 @@ void nodesDestroyNode(SNode* pNode) {
|
|||
nodesDestroyList(pLogicNode->pDynamicScanFuncs);
|
||||
nodesDestroyNode(pLogicNode->pTagCond);
|
||||
nodesDestroyNode(pLogicNode->pTagIndexCond);
|
||||
taosArrayDestroy(pLogicNode->pSmaIndexes);
|
||||
taosArrayDestroyEx(pLogicNode->pSmaIndexes, destroySmaIndex);
|
||||
nodesDestroyList(pLogicNode->pGroupTags);
|
||||
break;
|
||||
}
|
||||
|
@ -766,6 +787,9 @@ void nodesDestroyNode(SNode* pNode) {
|
|||
destroyLogicNode((SLogicNode*)pLogicNode);
|
||||
destroyVgDataBlockArray(pLogicNode->pDataBlocks);
|
||||
// pVgDataBlocks is weak reference
|
||||
nodesDestroyNode(pLogicNode->pAffectedRows);
|
||||
taosMemoryFreeClear(pLogicNode->pVgroupList);
|
||||
nodesDestroyList(pLogicNode->pInsertCols);
|
||||
break;
|
||||
}
|
||||
case QUERY_NODE_LOGIC_PLAN_EXCHANGE:
|
||||
|
@ -784,6 +808,7 @@ void nodesDestroyNode(SNode* pNode) {
|
|||
nodesDestroyList(pLogicNode->pFuncs);
|
||||
nodesDestroyNode(pLogicNode->pTspk);
|
||||
nodesDestroyNode(pLogicNode->pTsEnd);
|
||||
nodesDestroyNode(pLogicNode->pStateExpr);
|
||||
break;
|
||||
}
|
||||
case QUERY_NODE_LOGIC_PLAN_FILL: {
|
||||
|
@ -833,9 +858,14 @@ void nodesDestroyNode(SNode* pNode) {
|
|||
case QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN:
|
||||
case QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN:
|
||||
case QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN:
|
||||
case QUERY_NODE_PHYSICAL_PLAN_LAST_ROW_SCAN:
|
||||
destroyScanPhysiNode((SScanPhysiNode*)pNode);
|
||||
break;
|
||||
case QUERY_NODE_PHYSICAL_PLAN_LAST_ROW_SCAN: {
|
||||
SLastRowScanPhysiNode* pPhyNode = (SLastRowScanPhysiNode*)pNode;
|
||||
destroyScanPhysiNode((SScanPhysiNode*)pNode);
|
||||
nodesDestroyList(pPhyNode->pGroupTags);
|
||||
break;
|
||||
}
|
||||
case QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN:
|
||||
case QUERY_NODE_PHYSICAL_PLAN_TABLE_SEQ_SCAN:
|
||||
case QUERY_NODE_PHYSICAL_PLAN_TABLE_MERGE_SCAN:
|
||||
|
|
|
@ -462,7 +462,7 @@ explain_options(A) ::= explain_options(B) VERBOSE NK_BOOL(C).
|
|||
explain_options(A) ::= explain_options(B) RATIO NK_FLOAT(C). { A = setExplainRatio(pCxt, B, &C); }
|
||||
|
||||
/************************************************ compact *************************************************************/
|
||||
cmd ::= COMPACT VNODES IN NK_LP integer_list(A) NK_RP. { pCxt->pRootNode = createCompactStmt(pCxt, A); }
|
||||
cmd ::= COMPACT VNODES IN NK_LP integer_list NK_RP. { pCxt->errCode = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_EXPRIE_STATEMENT); }
|
||||
|
||||
/************************************************ create/drop function ************************************************/
|
||||
cmd ::= CREATE agg_func_opt(A) FUNCTION not_exists_opt(F) function_name(B)
|
||||
|
|
|
@ -387,6 +387,19 @@ SNode* createLogicConditionNode(SAstCreateContext* pCxt, ELogicConditionType typ
|
|||
return (SNode*)cond;
|
||||
}
|
||||
|
||||
static uint8_t getMinusDataType(uint8_t orgType) {
|
||||
switch (orgType) {
|
||||
case TSDB_DATA_TYPE_UTINYINT:
|
||||
case TSDB_DATA_TYPE_USMALLINT:
|
||||
case TSDB_DATA_TYPE_UINT:
|
||||
case TSDB_DATA_TYPE_UBIGINT:
|
||||
return TSDB_DATA_TYPE_BIGINT;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return orgType;
|
||||
}
|
||||
|
||||
SNode* createOperatorNode(SAstCreateContext* pCxt, EOperatorType type, SNode* pLeft, SNode* pRight) {
|
||||
CHECK_PARSER_STATUS(pCxt);
|
||||
if (OP_TYPE_MINUS == type && QUERY_NODE_VALUE == nodeType(pLeft)) {
|
||||
|
@ -402,7 +415,7 @@ SNode* createOperatorNode(SAstCreateContext* pCxt, EOperatorType type, SNode* pL
|
|||
}
|
||||
taosMemoryFree(pVal->literal);
|
||||
pVal->literal = pNewLiteral;
|
||||
pVal->node.resType.type = TSDB_DATA_TYPE_BIGINT;
|
||||
pVal->node.resType.type = getMinusDataType(pVal->node.resType.type);
|
||||
return pLeft;
|
||||
}
|
||||
SOperatorNode* op = (SOperatorNode*)nodesMakeNode(QUERY_NODE_OPERATOR);
|
||||
|
|
|
@ -1497,7 +1497,6 @@ static int32_t parseInsertBody(SInsertParseContext* pCxt) {
|
|||
memset(&pCxt->tags, 0, sizeof(pCxt->tags));
|
||||
pCxt->pVgroupsHashObj = NULL;
|
||||
pCxt->pTableBlockHashObj = NULL;
|
||||
pCxt->pTableMeta = NULL;
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -1554,7 +1553,10 @@ int32_t parseInsertSql(SParseContext* pContext, SQuery** pQuery, SParseMetaCache
|
|||
if (NULL == *pQuery) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
} else {
|
||||
nodesDestroyNode((*pQuery)->pRoot);
|
||||
}
|
||||
|
||||
(*pQuery)->execMode = QUERY_EXEC_MODE_SCHEDULE;
|
||||
(*pQuery)->haveResultSet = false;
|
||||
(*pQuery)->msgType = TDMT_VND_SUBMIT;
|
||||
|
|
|
@ -678,6 +678,7 @@ void qFreeStmtDataBlock(void* pDataBlock) {
|
|||
return;
|
||||
}
|
||||
|
||||
taosMemoryFreeClear(((STableDataBlocks*)pDataBlock)->pTableMeta);
|
||||
taosMemoryFreeClear(((STableDataBlocks*)pDataBlock)->pData);
|
||||
taosMemoryFreeClear(pDataBlock);
|
||||
}
|
||||
|
|
|
@ -1257,6 +1257,7 @@ static int32_t rewriteFuncToValue(STranslateContext* pCxt, char* pLiteral, SNode
|
|||
}
|
||||
}
|
||||
if (DEAL_RES_ERROR != translateValue(pCxt, pVal)) {
|
||||
nodesDestroyNode(*pNode);
|
||||
*pNode = (SNode*)pVal;
|
||||
} else {
|
||||
nodesDestroyNode((SNode*)pVal);
|
||||
|
@ -4009,30 +4010,7 @@ static SSchema* getTagSchema(STableMeta* pTableMeta, const char* pTagName) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static int32_t checkAlterSuperTable(STranslateContext* pCxt, SAlterTableStmt* pStmt) {
|
||||
if (TSDB_ALTER_TABLE_UPDATE_TAG_VAL == pStmt->alterType || TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME == pStmt->alterType) {
|
||||
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ALTER_TABLE,
|
||||
"Set tag value only available for child table");
|
||||
}
|
||||
|
||||
if (pStmt->alterType == TSDB_ALTER_TABLE_UPDATE_OPTIONS && -1 != pStmt->pOptions->ttl) {
|
||||
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ALTER_TABLE);
|
||||
}
|
||||
|
||||
if (pStmt->dataType.type == TSDB_DATA_TYPE_JSON && pStmt->alterType == TSDB_ALTER_TABLE_ADD_TAG) {
|
||||
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_ONLY_ONE_JSON_TAG);
|
||||
}
|
||||
|
||||
if (pStmt->dataType.type == TSDB_DATA_TYPE_JSON && pStmt->alterType == TSDB_ALTER_TABLE_ADD_COLUMN) {
|
||||
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_COL_JSON);
|
||||
}
|
||||
|
||||
STableMeta* pTableMeta = NULL;
|
||||
int32_t code = getTableMeta(pCxt, pStmt->dbName, pStmt->tableName, &pTableMeta);
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t checkAlterSuperTableImpl(STranslateContext* pCxt, SAlterTableStmt* pStmt, STableMeta* pTableMeta) {
|
||||
SSchema* pTagsSchema = getTableTagSchema(pTableMeta);
|
||||
if (getNumOfTags(pTableMeta) == 1 && pTagsSchema->type == TSDB_DATA_TYPE_JSON &&
|
||||
(pStmt->alterType == TSDB_ALTER_TABLE_ADD_TAG || pStmt->alterType == TSDB_ALTER_TABLE_DROP_TAG ||
|
||||
|
@ -4057,6 +4035,33 @@ static int32_t checkAlterSuperTable(STranslateContext* pCxt, SAlterTableStmt* pS
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t checkAlterSuperTable(STranslateContext* pCxt, SAlterTableStmt* pStmt) {
|
||||
if (TSDB_ALTER_TABLE_UPDATE_TAG_VAL == pStmt->alterType || TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME == pStmt->alterType) {
|
||||
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ALTER_TABLE,
|
||||
"Set tag value only available for child table");
|
||||
}
|
||||
|
||||
if (pStmt->alterType == TSDB_ALTER_TABLE_UPDATE_OPTIONS && -1 != pStmt->pOptions->ttl) {
|
||||
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ALTER_TABLE);
|
||||
}
|
||||
|
||||
if (pStmt->dataType.type == TSDB_DATA_TYPE_JSON && pStmt->alterType == TSDB_ALTER_TABLE_ADD_TAG) {
|
||||
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_ONLY_ONE_JSON_TAG);
|
||||
}
|
||||
|
||||
if (pStmt->dataType.type == TSDB_DATA_TYPE_JSON && pStmt->alterType == TSDB_ALTER_TABLE_ADD_COLUMN) {
|
||||
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_COL_JSON);
|
||||
}
|
||||
|
||||
STableMeta* pTableMeta = NULL;
|
||||
int32_t code = getTableMeta(pCxt, pStmt->dbName, pStmt->tableName, &pTableMeta);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = checkAlterSuperTableImpl(pCxt, pStmt, pTableMeta);
|
||||
}
|
||||
taosMemoryFree(pTableMeta);
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t translateAlterSuperTable(STranslateContext* pCxt, SAlterTableStmt* pStmt) {
|
||||
SMAlterStbReq alterReq = {0};
|
||||
int32_t code = checkAlterSuperTable(pCxt, pStmt);
|
||||
|
@ -6438,6 +6443,7 @@ static int32_t toMsgType(ENodeType type) {
|
|||
|
||||
static int32_t setRefreshMate(STranslateContext* pCxt, SQuery* pQuery) {
|
||||
if (NULL != pCxt->pDbs) {
|
||||
taosArrayDestroy(pQuery->pDbList);
|
||||
pQuery->pDbList = taosArrayInit(taosHashGetSize(pCxt->pDbs), TSDB_DB_FNAME_LEN);
|
||||
if (NULL == pQuery->pDbList) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
|
@ -6450,6 +6456,7 @@ static int32_t setRefreshMate(STranslateContext* pCxt, SQuery* pQuery) {
|
|||
}
|
||||
|
||||
if (NULL != pCxt->pTables) {
|
||||
taosArrayDestroy(pQuery->pTableList);
|
||||
pQuery->pTableList = taosArrayInit(taosHashGetSize(pCxt->pTables), sizeof(SName));
|
||||
if (NULL == pQuery->pTableList) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
|
@ -6521,6 +6528,7 @@ static int32_t setQuery(STranslateContext* pCxt, SQuery* pQuery) {
|
|||
pQuery->stableQuery = pCxt->stableQuery;
|
||||
|
||||
if (pQuery->haveResultSet) {
|
||||
taosMemoryFreeClear(pQuery->pResSchema);
|
||||
if (TSDB_CODE_SUCCESS != extractResultSchema(pQuery->pRoot, &pQuery->numOfResCols, &pQuery->pResSchema)) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
|
|
@ -865,12 +865,17 @@ STableCfg* tableCfgDup(STableCfg* pCfg) {
|
|||
STableCfg* pNew = taosMemoryMalloc(sizeof(*pNew));
|
||||
|
||||
memcpy(pNew, pCfg, sizeof(*pNew));
|
||||
if (pNew->pComment) {
|
||||
pNew->pComment = strdup(pNew->pComment);
|
||||
if (NULL != pNew->pComment) {
|
||||
pNew->pComment = taosMemoryCalloc(pNew->commentLen + 1, 1);
|
||||
memcpy(pNew->pComment, pCfg->pComment, pNew->commentLen);
|
||||
}
|
||||
if (pNew->pFuncs) {
|
||||
if (NULL != pNew->pFuncs) {
|
||||
pNew->pFuncs = taosArrayDup(pNew->pFuncs);
|
||||
}
|
||||
if (NULL != pNew->pTags) {
|
||||
pNew->pTags = taosMemoryCalloc(pNew->tagsLen + 1, 1);
|
||||
memcpy(pNew->pTags, pCfg->pTags, pNew->tagsLen);
|
||||
}
|
||||
|
||||
int32_t schemaSize = (pCfg->numOfColumns + pCfg->numOfTags) * sizeof(SSchema);
|
||||
|
||||
|
|
|
@ -82,11 +82,16 @@ static int32_t parseSqlSyntax(SParseContext* pCxt, SQuery** pQuery, SParseMetaCa
|
|||
}
|
||||
|
||||
static int32_t setValueByBindParam(SValueNode* pVal, TAOS_MULTI_BIND* pParam) {
|
||||
if (IS_VAR_DATA_TYPE(pVal->node.resType.type)) {
|
||||
taosMemoryFreeClear(pVal->datum.p);
|
||||
}
|
||||
|
||||
if (pParam->is_null && 1 == *(pParam->is_null)) {
|
||||
pVal->node.resType.type = TSDB_DATA_TYPE_NULL;
|
||||
pVal->node.resType.bytes = tDataTypes[TSDB_DATA_TYPE_NULL].bytes;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t inputSize = (NULL != pParam->length ? *(pParam->length) : tDataTypes[pParam->buffer_type].bytes);
|
||||
pVal->node.resType.type = pParam->buffer_type;
|
||||
pVal->node.resType.bytes = inputSize;
|
||||
|
@ -239,6 +244,7 @@ int32_t qStmtBindParams(SQuery* pQuery, TAOS_MULTI_BIND* pParams, int32_t colIdx
|
|||
}
|
||||
|
||||
if (TSDB_CODE_SUCCESS == code && (colIdx < 0 || colIdx + 1 == pQuery->placeholderNum)) {
|
||||
nodesDestroyNode(pQuery->pRoot);
|
||||
pQuery->pRoot = nodesCloneNode(pQuery->pPrepareRoot);
|
||||
if (NULL == pQuery->pRoot) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
|
|
|
@ -4117,7 +4117,8 @@ static YYACTIONTYPE yy_reduce(
|
|||
yymsp[-2].minor.yy616 = yylhsminor.yy616;
|
||||
break;
|
||||
case 254: /* cmd ::= COMPACT VNODES IN NK_LP integer_list NK_RP */
|
||||
{ pCxt->pRootNode = createCompactStmt(pCxt, yymsp[-1].minor.yy356); }
|
||||
{ pCxt->errCode = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_EXPRIE_STATEMENT); }
|
||||
yy_destructor(yypParser,273,&yymsp[-1].minor);
|
||||
break;
|
||||
case 255: /* cmd ::= CREATE agg_func_opt FUNCTION not_exists_opt function_name AS NK_STRING OUTPUTTYPE type_name bufsize_opt */
|
||||
{ pCxt->pRootNode = createCreateFunctionStmt(pCxt, yymsp[-6].minor.yy151, yymsp[-8].minor.yy151, &yymsp[-5].minor.yy361, &yymsp[-3].minor.yy0, yymsp[-1].minor.yy600, yymsp[0].minor.yy734); }
|
||||
|
|
|
@ -93,6 +93,17 @@ class MockCatalogServiceImpl {
|
|||
|
||||
MockCatalogServiceImpl() : id_(1) {}
|
||||
|
||||
~MockCatalogServiceImpl() {
|
||||
for (auto& cfg : dbCfg_) {
|
||||
taosArrayDestroy(cfg.second.pRetensions);
|
||||
}
|
||||
for (auto& indexes : index_) {
|
||||
for (auto& index : indexes.second) {
|
||||
taosMemoryFree(index.expr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int32_t catalogGetHandle() const { return 0; }
|
||||
|
||||
int32_t catalogGetTableMeta(const SName* pTableName, STableMeta** pTableMeta) const {
|
||||
|
@ -676,6 +687,7 @@ void MockCatalogService::destoryCatalogReq(SCatalogReq* pReq) {
|
|||
taosArrayDestroy(pReq->pIndex);
|
||||
taosArrayDestroy(pReq->pUser);
|
||||
taosArrayDestroy(pReq->pTableIndex);
|
||||
taosArrayDestroy(pReq->pTableCfg);
|
||||
delete pReq;
|
||||
}
|
||||
|
||||
|
@ -684,6 +696,11 @@ void MockCatalogService::destoryMetaRes(void* p) {
|
|||
taosMemoryFree(pRes->pRes);
|
||||
}
|
||||
|
||||
void MockCatalogService::destoryMetaArrayRes(void* p) {
|
||||
SMetaRes* pRes = (SMetaRes*)p;
|
||||
taosArrayDestroy((SArray*)pRes->pRes);
|
||||
}
|
||||
|
||||
void MockCatalogService::destoryMetaData(SMetaData* pData) {
|
||||
taosArrayDestroyEx(pData->pDbVgroup, destoryMetaRes);
|
||||
taosArrayDestroyEx(pData->pDbCfg, destoryMetaRes);
|
||||
|
@ -695,5 +712,8 @@ void MockCatalogService::destoryMetaData(SMetaData* pData) {
|
|||
taosArrayDestroyEx(pData->pIndex, destoryMetaRes);
|
||||
taosArrayDestroyEx(pData->pUser, destoryMetaRes);
|
||||
taosArrayDestroyEx(pData->pQnodeList, destoryMetaRes);
|
||||
taosArrayDestroyEx(pData->pTableCfg, destoryMetaRes);
|
||||
taosArrayDestroyEx(pData->pDnodeList, destoryMetaArrayRes);
|
||||
taosMemoryFree(pData->pSvrVer);
|
||||
delete pData;
|
||||
}
|
||||
|
|
|
@ -52,6 +52,7 @@ class MockCatalogService {
|
|||
public:
|
||||
static void destoryCatalogReq(SCatalogReq* pReq);
|
||||
static void destoryMetaRes(void* p);
|
||||
static void destoryMetaArrayRes(void* p);
|
||||
static void destoryMetaData(SMetaData* pData);
|
||||
|
||||
MockCatalogService();
|
||||
|
|
|
@ -21,7 +21,11 @@ namespace ParserTest {
|
|||
|
||||
class ParserInitialCTest : public ParserDdlTest {};
|
||||
|
||||
// todo compact
|
||||
TEST_F(ParserInitialCTest, compact) {
|
||||
useDb("root", "test");
|
||||
|
||||
run("COMPACT VNODES IN (1, 2)", TSDB_CODE_PAR_EXPRIE_STATEMENT, PARSER_STAGE_PARSE);
|
||||
}
|
||||
|
||||
TEST_F(ParserInitialCTest, createAccount) {
|
||||
useDb("root", "test");
|
||||
|
@ -32,6 +36,19 @@ TEST_F(ParserInitialCTest, createAccount) {
|
|||
TEST_F(ParserInitialCTest, createBnode) {
|
||||
useDb("root", "test");
|
||||
|
||||
SMCreateQnodeReq expect = {0};
|
||||
|
||||
auto setCreateQnodeReq = [&](int32_t dnodeId) { expect.dnodeId = dnodeId; };
|
||||
|
||||
setCheckDdlFunc([&](const SQuery* pQuery, ParserStage stage) {
|
||||
ASSERT_EQ(nodeType(pQuery->pRoot), QUERY_NODE_CREATE_BNODE_STMT);
|
||||
SMCreateQnodeReq req = {0};
|
||||
ASSERT_TRUE(TSDB_CODE_SUCCESS ==
|
||||
tDeserializeSCreateDropMQSBNodeReq(pQuery->pCmdMsg->pMsg, pQuery->pCmdMsg->msgLen, &req));
|
||||
ASSERT_EQ(req.dnodeId, expect.dnodeId);
|
||||
});
|
||||
|
||||
setCreateQnodeReq(1);
|
||||
run("CREATE BNODE ON DNODE 1");
|
||||
}
|
||||
|
||||
|
|
|
@ -123,6 +123,14 @@ class ParserTestBaseImpl {
|
|||
delete pMetaCache;
|
||||
}
|
||||
|
||||
static void _destroyQuery(SQuery** pQuery) {
|
||||
if (nullptr == pQuery) {
|
||||
return;
|
||||
}
|
||||
qDestroyQuery(*pQuery);
|
||||
taosMemoryFree(pQuery);
|
||||
}
|
||||
|
||||
bool checkResultCode(const string& pFunc, int32_t resultCode) {
|
||||
return !(stmtEnv_.checkFunc_.empty())
|
||||
? ((stmtEnv_.checkFunc_ == pFunc) ? stmtEnv_.expect_ == resultCode : TSDB_CODE_SUCCESS == resultCode)
|
||||
|
@ -278,9 +286,9 @@ class ParserTestBaseImpl {
|
|||
SParseContext cxt = {0};
|
||||
setParseContext(sql, &cxt);
|
||||
|
||||
SQuery* pQuery = nullptr;
|
||||
doParse(&cxt, &pQuery);
|
||||
unique_ptr<SQuery, void (*)(SQuery*)> query(pQuery, qDestroyQuery);
|
||||
unique_ptr<SQuery*, void (*)(SQuery**)> query((SQuery**)taosMemoryCalloc(1, sizeof(SQuery*)), _destroyQuery);
|
||||
doParse(&cxt, query.get());
|
||||
SQuery* pQuery = *(query.get());
|
||||
|
||||
doAuthenticate(&cxt, pQuery, nullptr);
|
||||
|
||||
|
@ -306,9 +314,9 @@ class ParserTestBaseImpl {
|
|||
SParseContext cxt = {0};
|
||||
setParseContext(sql, &cxt);
|
||||
|
||||
SQuery* pQuery = nullptr;
|
||||
doParseSql(&cxt, &pQuery);
|
||||
unique_ptr<SQuery, void (*)(SQuery*)> query(pQuery, qDestroyQuery);
|
||||
unique_ptr<SQuery*, void (*)(SQuery**)> query((SQuery**)taosMemoryCalloc(1, sizeof(SQuery*)), _destroyQuery);
|
||||
doParseSql(&cxt, query.get());
|
||||
SQuery* pQuery = *(query.get());
|
||||
|
||||
if (g_dump) {
|
||||
dump();
|
||||
|
@ -328,9 +336,9 @@ class ParserTestBaseImpl {
|
|||
SParseContext cxt = {0};
|
||||
setParseContext(sql, &cxt, true);
|
||||
|
||||
SQuery* pQuery = nullptr;
|
||||
doParse(&cxt, &pQuery);
|
||||
unique_ptr<SQuery, void (*)(SQuery*)> query(pQuery, qDestroyQuery);
|
||||
unique_ptr<SQuery*, void (*)(SQuery**)> query((SQuery**)taosMemoryCalloc(1, sizeof(SQuery*)), _destroyQuery);
|
||||
doParse(&cxt, query.get());
|
||||
SQuery* pQuery = *(query.get());
|
||||
|
||||
unique_ptr<SParseMetaCache, void (*)(SParseMetaCache*)> metaCache(new SParseMetaCache(), _destoryParseMetaCache);
|
||||
doCollectMetaKey(&cxt, pQuery, metaCache.get());
|
||||
|
@ -386,9 +394,9 @@ class ParserTestBaseImpl {
|
|||
|
||||
unique_ptr<SCatalogReq, void (*)(SCatalogReq*)> catalogReq(new SCatalogReq(),
|
||||
MockCatalogService::destoryCatalogReq);
|
||||
SQuery* pQuery = nullptr;
|
||||
doParseSqlSyntax(&cxt, &pQuery, catalogReq.get());
|
||||
unique_ptr<SQuery, void (*)(SQuery*)> query(pQuery, qDestroyQuery);
|
||||
unique_ptr<SQuery*, void (*)(SQuery**)> query((SQuery**)taosMemoryCalloc(1, sizeof(SQuery*)), _destroyQuery);
|
||||
doParseSqlSyntax(&cxt, query.get(), catalogReq.get());
|
||||
SQuery* pQuery = *(query.get());
|
||||
|
||||
string err;
|
||||
thread t1([&]() {
|
||||
|
|
|
@ -1068,7 +1068,11 @@ static int32_t createExchangePhysiNode(SPhysiPlanContext* pCxt, SExchangeLogicNo
|
|||
}
|
||||
|
||||
static int32_t createWindowPhysiNodeFinalize(SPhysiPlanContext* pCxt, SNodeList* pChildren, SWinodwPhysiNode* pWindow,
|
||||
SWindowLogicNode* pWindowLogicNode, SPhysiNode** pPhyNode) {
|
||||
SWindowLogicNode* pWindowLogicNode) {
|
||||
pWindow->triggerType = pWindowLogicNode->triggerType;
|
||||
pWindow->watermark = pWindowLogicNode->watermark;
|
||||
pWindow->igExpired = pWindowLogicNode->igExpired;
|
||||
|
||||
SNodeList* pPrecalcExprs = NULL;
|
||||
SNodeList* pFuncs = NULL;
|
||||
int32_t code = rewritePrecalcExprs(pCxt, pWindowLogicNode->pFuncs, &pPrecalcExprs, &pFuncs);
|
||||
|
@ -1100,16 +1104,6 @@ static int32_t createWindowPhysiNodeFinalize(SPhysiPlanContext* pCxt, SNodeList*
|
|||
code = setConditionsSlotId(pCxt, (const SLogicNode*)pWindowLogicNode, (SPhysiNode*)pWindow);
|
||||
}
|
||||
|
||||
pWindow->triggerType = pWindowLogicNode->triggerType;
|
||||
pWindow->watermark = pWindowLogicNode->watermark;
|
||||
pWindow->igExpired = pWindowLogicNode->igExpired;
|
||||
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
*pPhyNode = (SPhysiNode*)pWindow;
|
||||
} else {
|
||||
nodesDestroyNode((SNode*)pWindow);
|
||||
}
|
||||
|
||||
nodesDestroyList(pPrecalcExprs);
|
||||
nodesDestroyList(pFuncs);
|
||||
|
||||
|
@ -1156,7 +1150,14 @@ static int32_t createIntervalPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChil
|
|||
pInterval->intervalUnit = pWindowLogicNode->intervalUnit;
|
||||
pInterval->slidingUnit = pWindowLogicNode->slidingUnit;
|
||||
|
||||
return createWindowPhysiNodeFinalize(pCxt, pChildren, &pInterval->window, pWindowLogicNode, pPhyNode);
|
||||
int32_t code = createWindowPhysiNodeFinalize(pCxt, pChildren, &pInterval->window, pWindowLogicNode);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
*pPhyNode = (SPhysiNode*)pInterval;
|
||||
} else {
|
||||
nodesDestroyNode((SNode*)pInterval);
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t createSessionWindowPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren,
|
||||
|
@ -1169,7 +1170,14 @@ static int32_t createSessionWindowPhysiNode(SPhysiPlanContext* pCxt, SNodeList*
|
|||
|
||||
pSession->gap = pWindowLogicNode->sessionGap;
|
||||
|
||||
return createWindowPhysiNodeFinalize(pCxt, pChildren, &pSession->window, pWindowLogicNode, pPhyNode);
|
||||
int32_t code = createWindowPhysiNodeFinalize(pCxt, pChildren, &pSession->window, pWindowLogicNode);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
*pPhyNode = (SPhysiNode*)pSession;
|
||||
} else {
|
||||
nodesDestroyNode((SNode*)pSession);
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t createStateWindowPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren,
|
||||
|
@ -1201,12 +1209,20 @@ static int32_t createStateWindowPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pC
|
|||
}
|
||||
}
|
||||
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
nodesDestroyNode((SNode*)pState);
|
||||
return code;
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = createWindowPhysiNodeFinalize(pCxt, pChildren, &pState->window, pWindowLogicNode);
|
||||
}
|
||||
|
||||
return createWindowPhysiNodeFinalize(pCxt, pChildren, &pState->window, pWindowLogicNode, pPhyNode);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
*pPhyNode = (SPhysiNode*)pState;
|
||||
} else {
|
||||
nodesDestroyNode((SNode*)pState);
|
||||
}
|
||||
|
||||
nodesDestroyList(pPrecalcExprs);
|
||||
nodesDestroyNode(pStateKey);
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t createWindowPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren, SWindowLogicNode* pWindowLogicNode,
|
||||
|
|
|
@ -867,10 +867,11 @@ static int32_t stbSplSplitSortNode(SSplitContext* pCxt, SStableSplitInfo* pInfo)
|
|||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = stbSplCreateMergeNode(pCxt, pInfo->pSubplan, pInfo->pSplitNode, pMergeKeys, pPartSort, groupSort);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code && groupSort) {
|
||||
stbSplSetScanPartSort(pPartSort);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
nodesDestroyNode((SNode*)pInfo->pSplitNode);
|
||||
if (groupSort) {
|
||||
stbSplSetScanPartSort(pPartSort);
|
||||
}
|
||||
code = nodesListMakeStrictAppend(&pInfo->pSubplan->pChildren,
|
||||
(SNode*)splCreateScanSubplan(pCxt, pPartSort, SPLIT_FLAG_STABLE_SPLIT));
|
||||
}
|
||||
|
|
|
@ -24,6 +24,16 @@ class PlanStmtTest : public PlannerTestBase {
|
|||
return (TAOS_MULTI_BIND*)taosMemoryCalloc(nParams, sizeof(TAOS_MULTI_BIND));
|
||||
}
|
||||
|
||||
void destoryBindParams(TAOS_MULTI_BIND* pParams, int32_t nParams) {
|
||||
for (int32_t i = 0; i < nParams; ++i) {
|
||||
TAOS_MULTI_BIND* pParam = pParams + i;
|
||||
taosMemoryFree(pParam->buffer);
|
||||
taosMemoryFree(pParam->length);
|
||||
taosMemoryFree(pParam->is_null);
|
||||
}
|
||||
taosMemoryFree(pParams);
|
||||
}
|
||||
|
||||
TAOS_MULTI_BIND* buildIntegerParam(TAOS_MULTI_BIND* pBindParams, int32_t index, int64_t val, int32_t type) {
|
||||
TAOS_MULTI_BIND* pBindParam = initParam(pBindParams, index, type, 0);
|
||||
|
||||
|
@ -127,8 +137,10 @@ TEST_F(PlanStmtTest, basic) {
|
|||
useDb("root", "test");
|
||||
|
||||
prepare("SELECT * FROM t1 WHERE c1 = ?");
|
||||
bindParams(buildIntegerParam(createBindParams(1), 0, 10, TSDB_DATA_TYPE_INT), 0);
|
||||
TAOS_MULTI_BIND* pBindParams = buildIntegerParam(createBindParams(1), 0, 10, TSDB_DATA_TYPE_INT);
|
||||
bindParams(pBindParams, 0);
|
||||
exec();
|
||||
destoryBindParams(pBindParams, 1);
|
||||
|
||||
{
|
||||
prepare("SELECT * FROM t1 WHERE c1 = ? AND c2 = ?");
|
||||
|
@ -137,7 +149,7 @@ TEST_F(PlanStmtTest, basic) {
|
|||
buildStringParam(pBindParams, 1, "abc", TSDB_DATA_TYPE_VARCHAR, strlen("abc"));
|
||||
bindParams(pBindParams, -1);
|
||||
exec();
|
||||
taosMemoryFreeClear(pBindParams);
|
||||
destoryBindParams(pBindParams, 2);
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -147,7 +159,7 @@ TEST_F(PlanStmtTest, basic) {
|
|||
buildIntegerParam(pBindParams, 1, 20, TSDB_DATA_TYPE_INT);
|
||||
bindParams(pBindParams, -1);
|
||||
exec();
|
||||
taosMemoryFreeClear(pBindParams);
|
||||
destoryBindParams(pBindParams, 2);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -155,12 +167,16 @@ TEST_F(PlanStmtTest, multiExec) {
|
|||
useDb("root", "test");
|
||||
|
||||
prepare("SELECT * FROM t1 WHERE c1 = ?");
|
||||
bindParams(buildIntegerParam(createBindParams(1), 0, 10, TSDB_DATA_TYPE_INT), 0);
|
||||
TAOS_MULTI_BIND* pBindParams = buildIntegerParam(createBindParams(1), 0, 10, TSDB_DATA_TYPE_INT);
|
||||
bindParams(pBindParams, 0);
|
||||
exec();
|
||||
bindParams(buildIntegerParam(createBindParams(1), 0, 20, TSDB_DATA_TYPE_INT), 0);
|
||||
destoryBindParams(pBindParams, 1);
|
||||
pBindParams = buildIntegerParam(createBindParams(1), 0, 20, TSDB_DATA_TYPE_INT);
|
||||
bindParams(pBindParams, 0);
|
||||
exec();
|
||||
bindParams(buildIntegerParam(createBindParams(1), 0, 30, TSDB_DATA_TYPE_INT), 0);
|
||||
destoryBindParams(pBindParams, 1);
|
||||
pBindParams = buildIntegerParam(createBindParams(1), 0, 30, TSDB_DATA_TYPE_INT);
|
||||
bindParams(pBindParams, 0);
|
||||
exec();
|
||||
destoryBindParams(pBindParams, 1);
|
||||
}
|
||||
|
||||
TEST_F(PlanStmtTest, allDataType) { useDb("root", "test"); }
|
||||
|
|
|
@ -126,9 +126,9 @@ class PlannerTestBaseImpl {
|
|||
reset();
|
||||
tsQueryPolicy = queryPolicy;
|
||||
try {
|
||||
SQuery* pQuery = nullptr;
|
||||
doParseSql(sql, &pQuery);
|
||||
unique_ptr<SQuery, void (*)(SQuery*)> query(pQuery, qDestroyQuery);
|
||||
unique_ptr<SQuery*, void (*)(SQuery**)> query((SQuery**)taosMemoryCalloc(1, sizeof(SQuery*)), _destroyQuery);
|
||||
doParseSql(sql, query.get());
|
||||
SQuery* pQuery = *(query.get());
|
||||
|
||||
SPlanContext cxt = {0};
|
||||
setPlanContext(pQuery, &cxt);
|
||||
|
@ -199,6 +199,8 @@ class PlannerTestBaseImpl {
|
|||
|
||||
SLogicSubplan* pLogicSubplan = nullptr;
|
||||
doCreateLogicPlan(&cxt, &pLogicSubplan);
|
||||
unique_ptr<SLogicSubplan, void (*)(SLogicSubplan*)> logicSubplan(pLogicSubplan,
|
||||
(void (*)(SLogicSubplan*))nodesDestroyNode);
|
||||
|
||||
doOptimizeLogicPlan(&cxt, pLogicSubplan);
|
||||
|
||||
|
@ -206,9 +208,12 @@ class PlannerTestBaseImpl {
|
|||
|
||||
SQueryLogicPlan* pLogicPlan = nullptr;
|
||||
doScaleOutLogicPlan(&cxt, pLogicSubplan, &pLogicPlan);
|
||||
unique_ptr<SQueryLogicPlan, void (*)(SQueryLogicPlan*)> logicPlan(pLogicPlan,
|
||||
(void (*)(SQueryLogicPlan*))nodesDestroyNode);
|
||||
|
||||
SQueryPlan* pPlan = nullptr;
|
||||
doCreatePhysiPlan(&cxt, pLogicPlan, &pPlan);
|
||||
unique_ptr<SQueryPlan, void (*)(SQueryPlan*)> plan(pPlan, (void (*)(SQueryPlan*))nodesDestroyNode);
|
||||
|
||||
dump(g_dumpModule);
|
||||
} catch (...) {
|
||||
|
@ -249,6 +254,14 @@ class PlannerTestBaseImpl {
|
|||
vector<string> physiSubplans_;
|
||||
};
|
||||
|
||||
static void _destroyQuery(SQuery** pQuery) {
|
||||
if (nullptr == pQuery) {
|
||||
return;
|
||||
}
|
||||
qDestroyQuery(*pQuery);
|
||||
taosMemoryFree(pQuery);
|
||||
}
|
||||
|
||||
void reset() {
|
||||
stmtEnv_.sql_.clear();
|
||||
stmtEnv_.msgBuf_.fill(0);
|
||||
|
@ -400,20 +413,30 @@ class PlannerTestBaseImpl {
|
|||
pCxt->queryId = 1;
|
||||
pCxt->pUser = caseEnv_.user_.c_str();
|
||||
if (QUERY_NODE_CREATE_TOPIC_STMT == nodeType(pQuery->pRoot)) {
|
||||
pCxt->pAstRoot = ((SCreateTopicStmt*)pQuery->pRoot)->pQuery;
|
||||
SCreateTopicStmt* pStmt = (SCreateTopicStmt*)pQuery->pRoot;
|
||||
pCxt->pAstRoot = pStmt->pQuery;
|
||||
pStmt->pQuery = nullptr;
|
||||
nodesDestroyNode(pQuery->pRoot);
|
||||
pQuery->pRoot = pCxt->pAstRoot;
|
||||
pCxt->topicQuery = true;
|
||||
} else if (QUERY_NODE_CREATE_INDEX_STMT == nodeType(pQuery->pRoot)) {
|
||||
SMCreateSmaReq req = {0};
|
||||
tDeserializeSMCreateSmaReq(pQuery->pCmdMsg->pMsg, pQuery->pCmdMsg->msgLen, &req);
|
||||
g_mockCatalogService->createSmaIndex(&req);
|
||||
nodesStringToNode(req.ast, &pCxt->pAstRoot);
|
||||
tFreeSMCreateSmaReq(&req);
|
||||
nodesDestroyNode(pQuery->pRoot);
|
||||
pQuery->pRoot = pCxt->pAstRoot;
|
||||
pCxt->streamQuery = true;
|
||||
} else if (QUERY_NODE_CREATE_STREAM_STMT == nodeType(pQuery->pRoot)) {
|
||||
SCreateStreamStmt* pStmt = (SCreateStreamStmt*)pQuery->pRoot;
|
||||
pCxt->pAstRoot = pStmt->pQuery;
|
||||
pStmt->pQuery = nullptr;
|
||||
pCxt->streamQuery = true;
|
||||
pCxt->triggerType = pStmt->pOptions->triggerType;
|
||||
pCxt->watermark = (NULL != pStmt->pOptions->pWatermark ? ((SValueNode*)pStmt->pOptions->pWatermark)->datum.i : 0);
|
||||
nodesDestroyNode(pQuery->pRoot);
|
||||
pQuery->pRoot = pCxt->pAstRoot;
|
||||
} else {
|
||||
pCxt->pAstRoot = pQuery->pRoot;
|
||||
}
|
||||
|
|
|
@ -148,11 +148,12 @@ void destroySendMsgInfo(SMsgSendInfo* pMsgBody) {
|
|||
taosMemoryFreeClear(pMsgBody);
|
||||
}
|
||||
|
||||
int32_t asyncSendMsgToServerExt(void* pTransporter, SEpSet* epSet, int64_t* pTransporterId, const SMsgSendInfo* pInfo,
|
||||
int32_t asyncSendMsgToServerExt(void* pTransporter, SEpSet* epSet, int64_t* pTransporterId, SMsgSendInfo* pInfo,
|
||||
bool persistHandle, void* rpcCtx) {
|
||||
char* pMsg = rpcMallocCont(pInfo->msgInfo.len);
|
||||
if (NULL == pMsg) {
|
||||
qError("0x%" PRIx64 " msg:%s malloc failed", pInfo->requestId, TMSG_INFO(pInfo->msgType));
|
||||
destroySendMsgInfo(pInfo);
|
||||
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
return terrno;
|
||||
}
|
||||
|
@ -167,13 +168,15 @@ int32_t asyncSendMsgToServerExt(void* pTransporter, SEpSet* epSet, int64_t* pTra
|
|||
.info.persistHandle = persistHandle,
|
||||
.code = 0
|
||||
};
|
||||
assert(pInfo->fp != NULL);
|
||||
TRACE_SET_ROOTID(&rpcMsg.info.traceId, pInfo->requestId);
|
||||
rpcSendRequestWithCtx(pTransporter, epSet, &rpcMsg, pTransporterId, rpcCtx);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
int code = rpcSendRequestWithCtx(pTransporter, epSet, &rpcMsg, pTransporterId, rpcCtx);
|
||||
if (code) {
|
||||
destroySendMsgInfo(pInfo);
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t asyncSendMsgToServer(void* pTransporter, SEpSet* epSet, int64_t* pTransporterId, const SMsgSendInfo* pInfo) {
|
||||
int32_t asyncSendMsgToServer(void* pTransporter, SEpSet* epSet, int64_t* pTransporterId, SMsgSendInfo* pInfo) {
|
||||
return asyncSendMsgToServerExt(pTransporter, epSet, pTransporterId, pInfo, false, NULL);
|
||||
}
|
||||
|
||||
|
|
|
@ -509,7 +509,7 @@ int32_t schGenerateCallBackInfo(SSchJob *pJob, SSchTask *pTask, void *msg, uint3
|
|||
SMsgSendInfo *msgSendInfo = taosMemoryCalloc(1, sizeof(SMsgSendInfo));
|
||||
if (NULL == msgSendInfo) {
|
||||
SCH_TASK_ELOG("calloc %d failed", (int32_t)sizeof(SMsgSendInfo));
|
||||
SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
|
||||
SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY);
|
||||
}
|
||||
|
||||
msgSendInfo->paramFreeFp = taosMemoryFree;
|
||||
|
@ -535,7 +535,11 @@ int32_t schGenerateCallBackInfo(SSchJob *pJob, SSchTask *pTask, void *msg, uint3
|
|||
|
||||
_return:
|
||||
|
||||
destroySendMsgInfo(msgSendInfo);
|
||||
if (msgSendInfo) {
|
||||
destroySendMsgInfo(msgSendInfo);
|
||||
}
|
||||
|
||||
taosMemoryFree(msg);
|
||||
|
||||
SCH_RET(code);
|
||||
}
|
||||
|
@ -843,6 +847,7 @@ int32_t schAsyncSendMsg(SSchJob *pJob, SSchTask *pTask, SSchTrans *trans, SQuery
|
|||
|
||||
int64_t transporterId = 0;
|
||||
code = asyncSendMsgToServerExt(trans->pTrans, epSet, &transporterId, pMsgSendInfo, persistHandle, ctx);
|
||||
pMsgSendInfo = NULL;
|
||||
if (code) {
|
||||
SCH_ERR_JRET(code);
|
||||
}
|
||||
|
@ -919,7 +924,9 @@ int32_t schBuildAndSendHbMsg(SQueryNodeEpId *nodeEpId, SArray *taskAction) {
|
|||
addr.epSet.numOfEps = 1;
|
||||
memcpy(&addr.epSet.eps[0], &nodeEpId->ep, sizeof(nodeEpId->ep));
|
||||
|
||||
SCH_ERR_JRET(schAsyncSendMsg(NULL, NULL, &trans, &addr, msgType, msg, msgSize, true, &rpcCtx));
|
||||
code = schAsyncSendMsg(NULL, NULL, &trans, &addr, msgType, msg, msgSize, true, &rpcCtx);
|
||||
msg = NULL;
|
||||
SCH_ERR_JRET(code);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
||||
|
@ -1087,8 +1094,9 @@ int32_t schBuildAndSendMsg(SSchJob *pJob, SSchTask *pTask, SQueryNodeAddr *addr,
|
|||
}
|
||||
|
||||
SSchTrans trans = {.pTrans = pJob->conn.pTrans, .pHandle = SCH_GET_TASK_HANDLE(pTask)};
|
||||
SCH_ERR_JRET(
|
||||
schAsyncSendMsg(pJob, pTask, &trans, addr, msgType, msg, msgSize, persistHandle, (rpcCtx.args ? &rpcCtx : NULL)));
|
||||
schAsyncSendMsg(pJob, pTask, &trans, addr, msgType, msg, msgSize, persistHandle, (rpcCtx.args ? &rpcCtx : NULL));
|
||||
msg = NULL;
|
||||
SCH_ERR_JRET(code);
|
||||
|
||||
if (msgType == TDMT_SCH_QUERY || msgType == TDMT_SCH_MERGE_QUERY) {
|
||||
SCH_ERR_RET(schAppendTaskExecNode(pJob, pTask, addr, pTask->execId));
|
||||
|
|
|
@ -102,14 +102,14 @@ int32_t schRecordTaskSucceedNode(SSchJob *pJob, SSchTask *pTask) {
|
|||
}
|
||||
|
||||
int32_t schAppendTaskExecNode(SSchJob *pJob, SSchTask *pTask, SQueryNodeAddr *addr, int32_t execId) {
|
||||
SSchNodeInfo nodeInfo = {.addr = *addr, .handle = NULL};
|
||||
SSchNodeInfo nodeInfo = {.addr = *addr, .handle = SCH_GET_TASK_HANDLE(pTask)};
|
||||
|
||||
if (taosHashPut(pTask->execNodes, &execId, sizeof(execId), &nodeInfo, sizeof(nodeInfo))) {
|
||||
SCH_TASK_ELOG("taosHashPut nodeInfo to execNodes failed, errno:%d", errno);
|
||||
SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
|
||||
}
|
||||
|
||||
SCH_TASK_DLOG("task execNode added, execId:%d", execId);
|
||||
SCH_TASK_DLOG("task execNode added, execId:%d, handle:%p", execId, nodeInfo.handle);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -752,12 +752,18 @@ void schDropTaskOnExecNode(SSchJob *pJob, SSchTask *pTask) {
|
|||
return;
|
||||
}
|
||||
|
||||
int32_t i = 0;
|
||||
SSchNodeInfo *nodeInfo = taosHashIterate(pTask->execNodes, NULL);
|
||||
while (nodeInfo) {
|
||||
SCH_SET_TASK_HANDLE(pTask, nodeInfo->handle);
|
||||
|
||||
schBuildAndSendMsg(pJob, pTask, &nodeInfo->addr, TDMT_SCH_DROP_TASK);
|
||||
if (nodeInfo->handle) {
|
||||
SCH_SET_TASK_HANDLE(pTask, nodeInfo->handle);
|
||||
schBuildAndSendMsg(pJob, pTask, &nodeInfo->addr, TDMT_SCH_DROP_TASK);
|
||||
SCH_TASK_DLOG("start to drop task's %dth execNode", i);
|
||||
} else {
|
||||
SCH_TASK_DLOG("no need to drop task %dth execNode", i);
|
||||
}
|
||||
|
||||
++i;
|
||||
nodeInfo = taosHashIterate(pTask->execNodes, nodeInfo);
|
||||
}
|
||||
|
||||
|
|
|
@ -15,9 +15,12 @@
|
|||
|
||||
#include "tstreamUpdate.h"
|
||||
#include "ttime.h"
|
||||
#include "query.h"
|
||||
|
||||
#define DEFAULT_FALSE_POSITIVE 0.01
|
||||
#define DEFAULT_BUCKET_SIZE 131072
|
||||
#define DEFAULT_BUCKET_SIZE 1310720
|
||||
#define DEFAULT_MAP_CAPACITY 1310720
|
||||
#define DEFAULT_MAP_SIZE (DEFAULT_MAP_CAPACITY * 10)
|
||||
#define ROWS_PER_MILLISECOND 1
|
||||
#define MAX_NUM_SCALABLE_BF 100000
|
||||
#define MIN_NUM_SCALABLE_BF 10
|
||||
|
@ -120,6 +123,8 @@ SUpdateInfo *updateInfoInit(int64_t interval, int32_t precision, int64_t waterma
|
|||
}
|
||||
pInfo->numBuckets = DEFAULT_BUCKET_SIZE;
|
||||
pInfo->pCloseWinSBF = NULL;
|
||||
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
|
||||
pInfo->pMap = taosHashInit(DEFAULT_MAP_CAPACITY, hashFn, true, HASH_NO_LOCK);
|
||||
return pInfo;
|
||||
}
|
||||
|
||||
|
@ -149,8 +154,9 @@ static SScalableBf *getSBf(SUpdateInfo *pInfo, TSKEY ts) {
|
|||
return res;
|
||||
}
|
||||
|
||||
bool updateInfoIsUpdated(SUpdateInfo *pInfo, tb_uid_t tableId, TSKEY ts) {
|
||||
bool updateInfoIsUpdated(SUpdateInfo *pInfo, uint64_t tableId, TSKEY ts) {
|
||||
int32_t res = TSDB_CODE_FAILED;
|
||||
TSKEY* pMapMaxTs = taosHashGet(pInfo->pMap, &tableId, sizeof(uint64_t));
|
||||
uint64_t index = ((uint64_t)tableId) % pInfo->numBuckets;
|
||||
TSKEY maxTs = *(TSKEY *)taosArrayGet(pInfo->pTsBuckets, index);
|
||||
if (ts < maxTs - pInfo->watermark) {
|
||||
|
@ -167,7 +173,13 @@ bool updateInfoIsUpdated(SUpdateInfo *pInfo, tb_uid_t tableId, TSKEY ts) {
|
|||
res = tScalableBfPut(pSBf, &ts, sizeof(TSKEY));
|
||||
}
|
||||
|
||||
if (maxTs < ts) {
|
||||
int32_t size = taosHashGetSize(pInfo->pMap);
|
||||
if ( (!pMapMaxTs && size < DEFAULT_MAP_SIZE) || (pMapMaxTs && *pMapMaxTs < ts)) {
|
||||
taosHashPut(pInfo->pMap, &tableId, sizeof(uint64_t), &ts, sizeof(TSKEY));
|
||||
return false;
|
||||
}
|
||||
|
||||
if ( !pMapMaxTs && maxTs < ts ) {
|
||||
taosArraySet(pInfo->pTsBuckets, index, &ts);
|
||||
return false;
|
||||
}
|
||||
|
@ -177,7 +189,7 @@ bool updateInfoIsUpdated(SUpdateInfo *pInfo, tb_uid_t tableId, TSKEY ts) {
|
|||
} else if (res == TSDB_CODE_SUCCESS) {
|
||||
return false;
|
||||
}
|
||||
|
||||
qDebug("===stream===bucket:%d, tableId:%" PRIu64 ", maxTs:" PRIu64 ", maxMapTs:" PRIu64 ", ts:%" PRIu64, index, tableId, maxTs, *pMapMaxTs, ts);
|
||||
// check from tsdb api
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -473,13 +473,13 @@ static bool syncNodeOnAppendEntriesBatchLogOK(SSyncNode* pSyncNode, SyncAppendEn
|
|||
|
||||
SyncIndex myLastIndex = syncNodeGetLastIndex(pSyncNode);
|
||||
if (pMsg->prevLogIndex > myLastIndex) {
|
||||
sDebug("vgId:%d sync log not ok, preindex:%" PRId64, pSyncNode->vgId, pMsg->prevLogIndex);
|
||||
sDebug("vgId:%d, sync log not ok, preindex:%" PRId64, pSyncNode->vgId, pMsg->prevLogIndex);
|
||||
return false;
|
||||
}
|
||||
|
||||
SyncTerm myPreLogTerm = syncNodeGetPreTerm(pSyncNode, pMsg->prevLogIndex + 1);
|
||||
if (myPreLogTerm == SYNC_TERM_INVALID) {
|
||||
sDebug("vgId:%d sync log not ok2, preindex:%" PRId64, pSyncNode->vgId, pMsg->prevLogIndex);
|
||||
sDebug("vgId:%d, sync log not ok2, preindex:%" PRId64, pSyncNode->vgId, pMsg->prevLogIndex);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -487,7 +487,7 @@ static bool syncNodeOnAppendEntriesBatchLogOK(SSyncNode* pSyncNode, SyncAppendEn
|
|||
return true;
|
||||
}
|
||||
|
||||
sDebug("vgId:%d sync log not ok3, preindex:%" PRId64, pSyncNode->vgId, pMsg->prevLogIndex);
|
||||
sDebug("vgId:%d, sync log not ok3, preindex:%" PRId64, pSyncNode->vgId, pMsg->prevLogIndex);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -500,13 +500,13 @@ static bool syncNodeOnAppendEntriesLogOK(SSyncNode* pSyncNode, SyncAppendEntries
|
|||
|
||||
SyncIndex myLastIndex = syncNodeGetLastIndex(pSyncNode);
|
||||
if (pMsg->prevLogIndex > myLastIndex) {
|
||||
sDebug("vgId:%d sync log not ok, preindex:%" PRId64, pSyncNode->vgId, pMsg->prevLogIndex);
|
||||
sDebug("vgId:%d, sync log not ok, preindex:%" PRId64, pSyncNode->vgId, pMsg->prevLogIndex);
|
||||
return false;
|
||||
}
|
||||
|
||||
SyncTerm myPreLogTerm = syncNodeGetPreTerm(pSyncNode, pMsg->prevLogIndex + 1);
|
||||
if (myPreLogTerm == SYNC_TERM_INVALID) {
|
||||
sDebug("vgId:%d sync log not ok2, preindex:%" PRId64, pSyncNode->vgId, pMsg->prevLogIndex);
|
||||
sDebug("vgId:%d, sync log not ok2, preindex:%" PRId64, pSyncNode->vgId, pMsg->prevLogIndex);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -514,7 +514,7 @@ static bool syncNodeOnAppendEntriesLogOK(SSyncNode* pSyncNode, SyncAppendEntries
|
|||
return true;
|
||||
}
|
||||
|
||||
sDebug("vgId:%d sync log not ok3, preindex:%" PRId64, pSyncNode->vgId, pMsg->prevLogIndex);
|
||||
sDebug("vgId:%d, sync log not ok3, preindex:%" PRId64, pSyncNode->vgId, pMsg->prevLogIndex);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
|
@ -559,10 +559,11 @@ void syncGetRetryEpSet(int64_t rid, SEpSet* pEpSet) {
|
|||
snprintf(pEpSet->eps[i].fqdn, sizeof(pEpSet->eps[i].fqdn), "%s", (pSyncNode->pRaftCfg->cfg.nodeInfo)[i].nodeFqdn);
|
||||
pEpSet->eps[i].port = (pSyncNode->pRaftCfg->cfg.nodeInfo)[i].nodePort;
|
||||
(pEpSet->numOfEps)++;
|
||||
sInfo("vgId:%d sync get retry epset: index:%d %s:%d", pSyncNode->vgId, i, pEpSet->eps[i].fqdn, pEpSet->eps[i].port);
|
||||
sInfo("vgId:%d, sync get retry epset: index:%d %s:%d", pSyncNode->vgId, i, pEpSet->eps[i].fqdn,
|
||||
pEpSet->eps[i].port);
|
||||
}
|
||||
pEpSet->inUse = (pSyncNode->pRaftCfg->cfg.myIndex + 1) % pEpSet->numOfEps;
|
||||
sInfo("vgId:%d sync get retry epset in-use:%d", pSyncNode->vgId, pEpSet->inUse);
|
||||
sInfo("vgId:%d, sync get retry epset in-use:%d", pSyncNode->vgId, pEpSet->inUse);
|
||||
|
||||
taosReleaseRef(tsNodeRefId, pSyncNode->rid);
|
||||
}
|
||||
|
@ -2996,7 +2997,7 @@ void syncLogRecvAppendEntries(SSyncNode* pSyncNode, const SyncAppendEntries* pMs
|
|||
"datalen:%d}, %s",
|
||||
host, port, pMsg->term, pMsg->prevLogIndex, pMsg->prevLogTerm, pMsg->commitIndex, pMsg->privateTerm,
|
||||
pMsg->dataLen, s);
|
||||
syncNodeErrorLog(pSyncNode, logBuf);
|
||||
syncNodeEventLog(pSyncNode, logBuf);
|
||||
}
|
||||
|
||||
void syncLogSendAppendEntriesBatch(SSyncNode* pSyncNode, const SyncAppendEntriesBatch* pMsg, const char* s) {
|
||||
|
@ -3022,7 +3023,7 @@ void syncLogRecvAppendEntriesBatch(SSyncNode* pSyncNode, const SyncAppendEntries
|
|||
", pterm:%" PRIu64 ", commit:%" PRId64 ", datalen:%d, count:%d}, %s",
|
||||
host, port, pMsg->term, pMsg->prevLogIndex, pMsg->prevLogTerm, pMsg->privateTerm, pMsg->commitIndex,
|
||||
pMsg->dataLen, pMsg->dataCount, s);
|
||||
syncNodeErrorLog(pSyncNode, logBuf);
|
||||
syncNodeEventLog(pSyncNode, logBuf);
|
||||
}
|
||||
|
||||
void syncLogSendAppendEntriesReply(SSyncNode* pSyncNode, const SyncAppendEntriesReply* pMsg, const char* s) {
|
||||
|
@ -3046,5 +3047,5 @@ void syncLogRecvAppendEntriesReply(SSyncNode* pSyncNode, const SyncAppendEntries
|
|||
"recv sync-append-entries-reply from %s:%d {term:%" PRIu64 ", pterm:%" PRIu64 ", success:%d, match:%" PRId64
|
||||
"}, %s",
|
||||
host, port, pMsg->term, pMsg->privateTerm, pMsg->success, pMsg->matchIndex, s);
|
||||
syncNodeErrorLog(pSyncNode, logBuf);
|
||||
syncNodeEventLog(pSyncNode, logBuf);
|
||||
}
|
||||
|
|
|
@ -108,10 +108,10 @@ int32_t raftStoreSerialize(SRaftStore *pRaftStore, char *buf, size_t len) {
|
|||
cJSON *pRoot = cJSON_CreateObject();
|
||||
|
||||
char u64Buf[128] = {0};
|
||||
snprintf(u64Buf, sizeof(u64Buf), "" PRIu64 "", pRaftStore->currentTerm);
|
||||
snprintf(u64Buf, sizeof(u64Buf), "%" PRIu64 "", pRaftStore->currentTerm);
|
||||
cJSON_AddStringToObject(pRoot, "current_term", u64Buf);
|
||||
|
||||
snprintf(u64Buf, sizeof(u64Buf), "" PRIu64 "", pRaftStore->voteFor.addr);
|
||||
snprintf(u64Buf, sizeof(u64Buf), "%" PRIu64 "", pRaftStore->voteFor.addr);
|
||||
cJSON_AddStringToObject(pRoot, "vote_for_addr", u64Buf);
|
||||
|
||||
cJSON_AddNumberToObject(pRoot, "vote_for_vgid", pRaftStore->voteFor.vgId);
|
||||
|
@ -142,11 +142,11 @@ int32_t raftStoreDeserialize(SRaftStore *pRaftStore, char *buf, size_t len) {
|
|||
|
||||
cJSON *pCurrentTerm = cJSON_GetObjectItem(pRoot, "current_term");
|
||||
ASSERT(cJSON_IsString(pCurrentTerm));
|
||||
sscanf(pCurrentTerm->valuestring, "" PRIu64 "", &(pRaftStore->currentTerm));
|
||||
sscanf(pCurrentTerm->valuestring, "%" PRIu64 "", &(pRaftStore->currentTerm));
|
||||
|
||||
cJSON *pVoteForAddr = cJSON_GetObjectItem(pRoot, "vote_for_addr");
|
||||
ASSERT(cJSON_IsString(pVoteForAddr));
|
||||
sscanf(pVoteForAddr->valuestring, "" PRIu64 "", &(pRaftStore->voteFor.addr));
|
||||
sscanf(pVoteForAddr->valuestring, "%" PRIu64 "", &(pRaftStore->voteFor.addr));
|
||||
|
||||
cJSON *pVoteForVgid = cJSON_GetObjectItem(pRoot, "vote_for_vgid");
|
||||
pRaftStore->voteFor.vgId = pVoteForVgid->valueint;
|
||||
|
@ -188,11 +188,11 @@ cJSON *raftStore2Json(SRaftStore *pRaftStore) {
|
|||
cJSON *pRoot = cJSON_CreateObject();
|
||||
|
||||
if (pRaftStore != NULL) {
|
||||
snprintf(u64buf, sizeof(u64buf), "" PRIu64 "", pRaftStore->currentTerm);
|
||||
snprintf(u64buf, sizeof(u64buf), "%" PRIu64 "", pRaftStore->currentTerm);
|
||||
cJSON_AddStringToObject(pRoot, "currentTerm", u64buf);
|
||||
|
||||
cJSON *pVoteFor = cJSON_CreateObject();
|
||||
snprintf(u64buf, sizeof(u64buf), "" PRIu64 "", pRaftStore->voteFor.addr);
|
||||
snprintf(u64buf, sizeof(u64buf), "%" PRIu64 "", pRaftStore->voteFor.addr);
|
||||
cJSON_AddStringToObject(pVoteFor, "addr", u64buf);
|
||||
{
|
||||
uint64_t u64 = pRaftStore->voteFor.addr;
|
||||
|
|
|
@ -0,0 +1,84 @@
|
|||
#!/bin/bash
|
||||
|
||||
if [ $# != 1 ] ; then
|
||||
echo "Uasge: $0 log-path"
|
||||
echo ""
|
||||
exit 1
|
||||
fi
|
||||
|
||||
logpath=$1
|
||||
echo "logpath: ${logpath}"
|
||||
|
||||
echo ""
|
||||
echo "clean old log ..."
|
||||
rm -f ${logpath}/log.*
|
||||
|
||||
echo ""
|
||||
echo "generate log.dnode ..."
|
||||
for dnode in `ls ${logpath} | grep dnode`;do
|
||||
echo "generate log.${dnode}"
|
||||
cat ${logpath}/${dnode}/log/taosdlog.* | grep SYN > ${logpath}/log.${dnode}
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo "generate vgId ..."
|
||||
cat ${logpath}/log.dnode* | grep "vgId:" | grep -v ERROR | awk '{print $5}' | awk -F, '{print $1}' | sort | uniq > ${logpath}/log.vgIds.tmp
|
||||
echo "all vgIds:" > ${logpath}/log.vgIds
|
||||
cat ${logpath}/log.dnode* | grep "vgId:" | grep -v ERROR | awk '{print $5}' | awk -F, '{print $1}' | sort | uniq >> ${logpath}/log.vgIds
|
||||
for dnode in `ls ${logpath} | grep dnode | grep -v log`;do
|
||||
echo "" >> ${logpath}/log.vgIds
|
||||
echo "" >> ${logpath}/log.vgIds
|
||||
echo "${dnode}:" >> ${logpath}/log.vgIds
|
||||
cat ${logpath}/${dnode}/log/taosdlog.* | grep SYN | grep "vgId:" | grep -v ERROR | awk '{print $5}' | awk -F, '{print $1}' | sort | uniq >> ${logpath}/log.vgIds
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo "generate log.dnode.vgId ..."
|
||||
for logdnode in `ls ${logpath}/log.dnode*`;do
|
||||
for vgId in `cat ${logpath}/log.vgIds.tmp`;do
|
||||
rowNum=`cat ${logdnode} | grep "${vgId}" | awk 'BEGIN{rowNum=0}{rowNum++}END{print rowNum}'`
|
||||
#echo "-----${rowNum}"
|
||||
if [ $rowNum -gt 0 ] ; then
|
||||
echo "generate ${logdnode}.${vgId}"
|
||||
cat ${logdnode} | grep "${vgId}" > ${logdnode}.${vgId}
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo "generate log.dnode.main ..."
|
||||
for file in `ls ${logpath}/log.dnode* | grep -v vgId`;do
|
||||
echo "generate ${file}.main"
|
||||
cat ${file} | awk '{ if(index($0, "sync open") > 0 || index($0, "sync close") > 0 || index($0, "become leader") > 0) {print $0} }' > ${file}.main
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo "generate log.leader.term ..."
|
||||
cat ${logpath}/*.main | grep "become leader" | grep -v "config change" | awk '{print $5,$0}' | awk -F, '{print $4"_"$0}' | sort -k1 > ${logpath}/log.leader.term
|
||||
|
||||
echo ""
|
||||
echo "generate log.index, log.snapshot, log.records, log.actions ..."
|
||||
for file in `ls ${logpath}/log.dnode*vgId*`;do
|
||||
destfile1=${file}.index
|
||||
echo "generate ${destfile1}"
|
||||
cat ${file} | awk '{ if(index($0, "write index:") > 0 || index($0, "wal truncate, from-index") > 0) {print $0} }' > ${destfile1}
|
||||
|
||||
destfile2=${file}.snapshot
|
||||
echo "generate ${destfile2}"
|
||||
cat ${file} | awk '{ if(index($0, "snapshot sender") > 0 || index($0, "snapshot receiver") > 0) {print $0} }' | grep -v "save old" | grep -v "create new" | grep -v "udpate replicaIndex" | grep -v "delete old" | grep -v "reset for" > ${destfile2}
|
||||
|
||||
destfile3=${file}.records
|
||||
echo "generate ${destfile3}"
|
||||
cat ${file} | awk '{ if(index($0, "write index:") > 0 || index($0, "wal truncate, from-index") > 0 || index($0, "snapshot sender") > 0 || index($0, "snapshot receiver") > 0) {print $0} }' | grep -v "save old" | grep -v "create new" | grep -v "udpate replicaIndex" | grep -v "delete old" | grep -v "reset for" > ${destfile3}
|
||||
|
||||
destfile4=${file}.commit
|
||||
echo "generate ${destfile4}"
|
||||
cat ${file} | awk '{ if(index($0, "commit by") > 0) {print $0} }' > ${destfile4}
|
||||
|
||||
destfile5=${file}.actions
|
||||
echo "generate ${destfile5}"
|
||||
cat ${file} | awk '{ if(index($0, "commit by") > 0 || index($0, "sync open") > 0 || index($0, "sync close") > 0 || index($0, "become leader") > 0 || index($0, "write index:") > 0 || index($0, "wal truncate, from-index") > 0 || index($0, "snapshot sender") > 0 || index($0, "snapshot receiver") > 0) {print $0} }' | grep -v "save old" | grep -v "create new" | grep -v "udpate replicaIndex" | grep -v "delete old" | grep -v "reset for" > ${destfile5}
|
||||
|
||||
done
|
||||
|
||||
exit 0
|
|
@ -226,11 +226,13 @@ typedef struct {
|
|||
int index;
|
||||
int nAsync;
|
||||
uv_async_t* asyncs;
|
||||
int8_t stop;
|
||||
} SAsyncPool;
|
||||
|
||||
SAsyncPool* transCreateAsyncPool(uv_loop_t* loop, int sz, void* arg, AsyncCB cb);
|
||||
void transDestroyAsyncPool(SAsyncPool* pool);
|
||||
int transAsyncSend(SAsyncPool* pool, queue* mq);
|
||||
bool transAsyncPoolIsEmpty(SAsyncPool* pool);
|
||||
|
||||
#define TRANS_DESTROY_ASYNC_POOL_MSG(pool, msgType, freeFunc) \
|
||||
do { \
|
||||
|
@ -289,14 +291,14 @@ void transUnrefSrvHandle(void* handle);
|
|||
void transRefCliHandle(void* handle);
|
||||
void transUnrefCliHandle(void* handle);
|
||||
|
||||
void transReleaseCliHandle(void* handle);
|
||||
void transReleaseSrvHandle(void* handle);
|
||||
int transReleaseCliHandle(void* handle);
|
||||
int transReleaseSrvHandle(void* handle);
|
||||
|
||||
void transSendRequest(void* shandle, const SEpSet* pEpSet, STransMsg* pMsg, STransCtx* pCtx);
|
||||
void transSendRecv(void* shandle, const SEpSet* pEpSet, STransMsg* pMsg, STransMsg* pRsp);
|
||||
void transSendResponse(const STransMsg* msg);
|
||||
void transRegisterMsg(const STransMsg* msg);
|
||||
void transSetDefaultAddr(void* shandle, const char* ip, const char* fqdn);
|
||||
int transSendRequest(void* shandle, const SEpSet* pEpSet, STransMsg* pMsg, STransCtx* pCtx);
|
||||
int transSendRecv(void* shandle, const SEpSet* pEpSet, STransMsg* pMsg, STransMsg* pRsp);
|
||||
int transSendResponse(const STransMsg* msg);
|
||||
int transRegisterMsg(const STransMsg* msg);
|
||||
int transSetDefaultAddr(void* shandle, const char* ip, const char* fqdn);
|
||||
|
||||
int64_t transAllocHandle();
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@ void (*taosCloseHandle[])(void* arg) = {transCloseServer, transCloseClient};
|
|||
void (*taosRefHandle[])(void* handle) = {transRefSrvHandle, transRefCliHandle};
|
||||
void (*taosUnRefHandle[])(void* handle) = {transUnrefSrvHandle, transUnrefCliHandle};
|
||||
|
||||
void (*transReleaseHandle[])(void* handle) = {transReleaseSrvHandle, transReleaseCliHandle};
|
||||
int (*transReleaseHandle[])(void* handle) = {transReleaseSrvHandle, transReleaseCliHandle};
|
||||
|
||||
static int32_t transValidLocalFqdn(const char* localFqdn, uint32_t* ip) {
|
||||
*ip = taosGetIpv4FromFqdn(localFqdn);
|
||||
|
@ -112,7 +112,7 @@ void* rpcMallocCont(int32_t contLen) {
|
|||
void rpcFreeCont(void* cont) {
|
||||
if (cont == NULL) return;
|
||||
taosMemoryFree((char*)cont - TRANS_MSG_OVERHEAD);
|
||||
tTrace("free mem:%p", (char*)cont - TRANS_MSG_OVERHEAD);
|
||||
tTrace("rpc free cont:%p", (char*)cont - TRANS_MSG_OVERHEAD);
|
||||
}
|
||||
|
||||
void* rpcReallocCont(void* ptr, int32_t contLen) {
|
||||
|
@ -129,25 +129,20 @@ void* rpcReallocCont(void* ptr, int32_t contLen) {
|
|||
return st + TRANS_MSG_OVERHEAD;
|
||||
}
|
||||
|
||||
void rpcSendRedirectRsp(void* thandle, const SEpSet* pEpSet) {
|
||||
// deprecated api
|
||||
assert(0);
|
||||
}
|
||||
|
||||
int32_t rpcReportProgress(void* pConn, char* pCont, int32_t contLen) { return -1; }
|
||||
void rpcCancelRequest(int64_t rid) { return; }
|
||||
|
||||
void rpcSendRequest(void* shandle, const SEpSet* pEpSet, SRpcMsg* pMsg, int64_t* pRid) {
|
||||
transSendRequest(shandle, pEpSet, pMsg, NULL);
|
||||
int rpcSendRequest(void* shandle, const SEpSet* pEpSet, SRpcMsg* pMsg, int64_t* pRid) {
|
||||
return transSendRequest(shandle, pEpSet, pMsg, NULL);
|
||||
}
|
||||
void rpcSendRequestWithCtx(void* shandle, const SEpSet* pEpSet, SRpcMsg* pMsg, int64_t* pRid, SRpcCtx* pCtx) {
|
||||
transSendRequest(shandle, pEpSet, pMsg, pCtx);
|
||||
int rpcSendRequestWithCtx(void* shandle, const SEpSet* pEpSet, SRpcMsg* pMsg, int64_t* pRid, SRpcCtx* pCtx) {
|
||||
return transSendRequest(shandle, pEpSet, pMsg, pCtx);
|
||||
}
|
||||
void rpcSendRecv(void* shandle, SEpSet* pEpSet, SRpcMsg* pMsg, SRpcMsg* pRsp) {
|
||||
transSendRecv(shandle, pEpSet, pMsg, pRsp);
|
||||
int rpcSendRecv(void* shandle, SEpSet* pEpSet, SRpcMsg* pMsg, SRpcMsg* pRsp) {
|
||||
return transSendRecv(shandle, pEpSet, pMsg, pRsp);
|
||||
}
|
||||
|
||||
void rpcSendResponse(const SRpcMsg* pMsg) { transSendResponse(pMsg); }
|
||||
int rpcSendResponse(const SRpcMsg* pMsg) { return transSendResponse(pMsg); }
|
||||
|
||||
void rpcRefHandle(void* handle, int8_t type) {
|
||||
assert(type == TAOS_CONN_SERVER || type == TAOS_CONN_CLIENT);
|
||||
|
@ -159,15 +154,15 @@ void rpcUnrefHandle(void* handle, int8_t type) {
|
|||
(*taosUnRefHandle[type])(handle);
|
||||
}
|
||||
|
||||
void rpcRegisterBrokenLinkArg(SRpcMsg* msg) { transRegisterMsg(msg); }
|
||||
void rpcReleaseHandle(void* handle, int8_t type) {
|
||||
int rpcRegisterBrokenLinkArg(SRpcMsg* msg) { return transRegisterMsg(msg); }
|
||||
int rpcReleaseHandle(void* handle, int8_t type) {
|
||||
assert(type == TAOS_CONN_SERVER || type == TAOS_CONN_CLIENT);
|
||||
(*transReleaseHandle[type])(handle);
|
||||
return (*transReleaseHandle[type])(handle);
|
||||
}
|
||||
|
||||
void rpcSetDefaultAddr(void* thandle, const char* ip, const char* fqdn) {
|
||||
int rpcSetDefaultAddr(void* thandle, const char* ip, const char* fqdn) {
|
||||
// later
|
||||
transSetDefaultAddr(thandle, ip, fqdn);
|
||||
return transSetDefaultAddr(thandle, ip, fqdn);
|
||||
}
|
||||
|
||||
void* rpcAllocHandle() { return (void*)transAllocHandle(); }
|
||||
|
|
|
@ -70,6 +70,8 @@ typedef struct SCliThrd {
|
|||
|
||||
SCvtAddr cvtAddr;
|
||||
|
||||
SCliMsg* stopMsg;
|
||||
|
||||
bool quit;
|
||||
} SCliThrd;
|
||||
|
||||
|
@ -761,14 +763,17 @@ void cliConnCb(uv_connect_t* req, int status) {
|
|||
}
|
||||
|
||||
static void cliHandleQuit(SCliMsg* pMsg, SCliThrd* pThrd) {
|
||||
if (!transAsyncPoolIsEmpty(pThrd->asyncPool)) {
|
||||
pThrd->stopMsg = pMsg;
|
||||
return;
|
||||
}
|
||||
pThrd->stopMsg = NULL;
|
||||
pThrd->quit = true;
|
||||
tDebug("cli work thread %p start to quit", pThrd);
|
||||
destroyCmsg(pMsg);
|
||||
destroyConnPool(pThrd->pool);
|
||||
uv_timer_stop(&pThrd->timer);
|
||||
uv_walk(pThrd->loop, cliWalkCb, NULL);
|
||||
|
||||
// uv_stop(pThrd->loop);
|
||||
}
|
||||
static void cliHandleRelease(SCliMsg* pMsg, SCliThrd* pThrd) {
|
||||
int64_t refId = (int64_t)(pMsg->msg.info.handle);
|
||||
|
@ -925,6 +930,7 @@ static void cliAsyncCb(uv_async_t* handle) {
|
|||
if (count >= 2) {
|
||||
tTrace("cli process batch size:%d", count);
|
||||
}
|
||||
if (pThrd->stopMsg != NULL) cliHandleQuit(pThrd->stopMsg, pThrd);
|
||||
}
|
||||
|
||||
static void* cliWorkThread(void* arg) {
|
||||
|
@ -1020,6 +1026,7 @@ void cliSendQuit(SCliThrd* thrd) {
|
|||
SCliMsg* msg = taosMemoryCalloc(1, sizeof(SCliMsg));
|
||||
msg->type = Quit;
|
||||
transAsyncSend(thrd->asyncPool, &msg->q);
|
||||
atomic_store_8(&thrd->asyncPool->stop, 1);
|
||||
}
|
||||
void cliWalkCb(uv_handle_t* handle, void* arg) {
|
||||
if (!uv_is_closing(handle)) {
|
||||
|
@ -1225,33 +1232,38 @@ SCliThrd* transGetWorkThrd(STrans* trans, int64_t handle, bool* validHandle) {
|
|||
}
|
||||
return pThrd;
|
||||
}
|
||||
void transReleaseCliHandle(void* handle) {
|
||||
int transReleaseCliHandle(void* handle) {
|
||||
int idx = -1;
|
||||
bool valid = false;
|
||||
|
||||
SCliThrd* pThrd = transGetWorkThrdFromHandle((int64_t)handle, &valid);
|
||||
if (pThrd == NULL) {
|
||||
return;
|
||||
return -1;
|
||||
}
|
||||
STransMsg tmsg = {.info.handle = handle};
|
||||
SCliMsg* cmsg = taosMemoryCalloc(1, sizeof(SCliMsg));
|
||||
cmsg->msg = tmsg;
|
||||
cmsg->type = Release;
|
||||
|
||||
transAsyncSend(pThrd->asyncPool, &cmsg->q);
|
||||
return;
|
||||
if (0 != transAsyncSend(pThrd->asyncPool, &cmsg->q)) {
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void transSendRequest(void* shandle, const SEpSet* pEpSet, STransMsg* pReq, STransCtx* ctx) {
|
||||
int transSendRequest(void* shandle, const SEpSet* pEpSet, STransMsg* pReq, STransCtx* ctx) {
|
||||
STrans* pTransInst = (STrans*)transAcquireExHandle(transGetInstMgt(), (int64_t)shandle);
|
||||
if (pTransInst == NULL) return;
|
||||
if (pTransInst == NULL) {
|
||||
transFreeMsg(pReq->pCont);
|
||||
return -1;
|
||||
}
|
||||
|
||||
bool valid = false;
|
||||
SCliThrd* pThrd = transGetWorkThrd(pTransInst, (int64_t)pReq->info.handle, &valid);
|
||||
if (pThrd == NULL && valid == false) {
|
||||
transFreeMsg(pReq->pCont);
|
||||
transReleaseExHandle(transGetInstMgt(), (int64_t)shandle);
|
||||
return;
|
||||
return -1;
|
||||
}
|
||||
|
||||
TRACE_SET_MSGID(&pReq->info.traceId, tGenIdPI64());
|
||||
|
@ -1276,21 +1288,28 @@ void transSendRequest(void* shandle, const SEpSet* pEpSet, STransMsg* pReq, STra
|
|||
STraceId* trace = &pReq->info.traceId;
|
||||
tGDebug("%s send request at thread:%08" PRId64 ", dst:%s:%d, app:%p", transLabel(pTransInst), pThrd->pid,
|
||||
EPSET_GET_INUSE_IP(&pCtx->epSet), EPSET_GET_INUSE_PORT(&pCtx->epSet), pReq->info.ahandle);
|
||||
ASSERT(transAsyncSend(pThrd->asyncPool, &(cliMsg->q)) == 0);
|
||||
if (0 != transAsyncSend(pThrd->asyncPool, &(cliMsg->q))) {
|
||||
destroyCmsg(cliMsg);
|
||||
transReleaseExHandle(transGetInstMgt(), (int64_t)shandle);
|
||||
return -1;
|
||||
}
|
||||
transReleaseExHandle(transGetInstMgt(), (int64_t)shandle);
|
||||
return;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void transSendRecv(void* shandle, const SEpSet* pEpSet, STransMsg* pReq, STransMsg* pRsp) {
|
||||
int transSendRecv(void* shandle, const SEpSet* pEpSet, STransMsg* pReq, STransMsg* pRsp) {
|
||||
STrans* pTransInst = (STrans*)transAcquireExHandle(transGetInstMgt(), (int64_t)shandle);
|
||||
if (pTransInst == NULL) return;
|
||||
if (pTransInst == NULL) {
|
||||
transFreeMsg(pReq->pCont);
|
||||
return -1;
|
||||
}
|
||||
|
||||
bool valid = false;
|
||||
SCliThrd* pThrd = transGetWorkThrd(pTransInst, (int64_t)pReq->info.handle, &valid);
|
||||
if (pThrd == NULL && valid == false) {
|
||||
transFreeMsg(pReq->pCont);
|
||||
transReleaseExHandle(transGetInstMgt(), (int64_t)shandle);
|
||||
return;
|
||||
return -1;
|
||||
}
|
||||
|
||||
tsem_t* sem = taosMemoryCalloc(1, sizeof(tsem_t));
|
||||
|
@ -1317,20 +1336,28 @@ void transSendRecv(void* shandle, const SEpSet* pEpSet, STransMsg* pReq, STransM
|
|||
tGDebug("%s send request at thread:%08" PRId64 ", dst:%s:%d, app:%p", transLabel(pTransInst), pThrd->pid,
|
||||
EPSET_GET_INUSE_IP(&pCtx->epSet), EPSET_GET_INUSE_PORT(&pCtx->epSet), pReq->info.ahandle);
|
||||
|
||||
transAsyncSend(pThrd->asyncPool, &(cliMsg->q));
|
||||
if (0 != transAsyncSend(pThrd->asyncPool, &cliMsg->q)) {
|
||||
tsem_destroy(sem);
|
||||
taosMemoryFree(sem);
|
||||
destroyCmsg(cliMsg);
|
||||
transReleaseExHandle(transGetInstMgt(), (int64_t)shandle);
|
||||
return -1;
|
||||
}
|
||||
tsem_wait(sem);
|
||||
tsem_destroy(sem);
|
||||
taosMemoryFree(sem);
|
||||
|
||||
transReleaseExHandle(transGetInstMgt(), (int64_t)shandle);
|
||||
return;
|
||||
return 0;
|
||||
}
|
||||
/*
|
||||
*
|
||||
**/
|
||||
void transSetDefaultAddr(void* shandle, const char* ip, const char* fqdn) {
|
||||
int transSetDefaultAddr(void* shandle, const char* ip, const char* fqdn) {
|
||||
STrans* pTransInst = (STrans*)transAcquireExHandle(transGetInstMgt(), (int64_t)shandle);
|
||||
if (pTransInst == NULL) return;
|
||||
if (pTransInst == NULL) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
SCvtAddr cvtAddr = {0};
|
||||
if (ip != NULL && fqdn != NULL) {
|
||||
|
@ -1350,9 +1377,14 @@ void transSetDefaultAddr(void* shandle, const char* ip, const char* fqdn) {
|
|||
SCliThrd* thrd = ((SCliObj*)pTransInst->tcphandle)->pThreadObj[i];
|
||||
tDebug("%s update epset at thread:%08" PRId64, pTransInst->label, thrd->pid);
|
||||
|
||||
transAsyncSend(thrd->asyncPool, &(cliMsg->q));
|
||||
if (transAsyncSend(thrd->asyncPool, &(cliMsg->q)) != 0) {
|
||||
destroyCmsg(cliMsg);
|
||||
transReleaseExHandle(transGetInstMgt(), (int64_t)shandle);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
transReleaseExHandle(transGetInstMgt(), (int64_t)shandle);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int64_t transAllocHandle() {
|
||||
|
|
|
@ -124,6 +124,7 @@ int transAllocBuffer(SConnBuffer* connBuf, uv_buf_t* uvBuf) {
|
|||
SConnBuffer* p = connBuf;
|
||||
if (p->cap == 0) {
|
||||
p->buf = (char*)taosMemoryCalloc(CAPACITY, sizeof(char));
|
||||
tTrace("internal malloc mem:%p, size:%d", p->buf, CAPACITY);
|
||||
p->len = 0;
|
||||
p->cap = CAPACITY;
|
||||
p->total = -1;
|
||||
|
@ -136,7 +137,7 @@ int transAllocBuffer(SConnBuffer* connBuf, uv_buf_t* uvBuf) {
|
|||
} else {
|
||||
p->cap = p->total;
|
||||
p->buf = taosMemoryRealloc(p->buf, p->cap);
|
||||
tTrace("internal malloc mem:%p, size:%d", p->buf, p->cap);
|
||||
tTrace("internal realloc mem:%p, size:%d", p->buf, p->cap);
|
||||
|
||||
uvBuf->base = p->buf + p->len;
|
||||
uvBuf->len = p->cap - p->len;
|
||||
|
@ -176,7 +177,6 @@ int transSetConnOption(uv_tcp_t* stream) {
|
|||
|
||||
SAsyncPool* transCreateAsyncPool(uv_loop_t* loop, int sz, void* arg, AsyncCB cb) {
|
||||
SAsyncPool* pool = taosMemoryCalloc(1, sizeof(SAsyncPool));
|
||||
pool->index = 0;
|
||||
pool->nAsync = sz;
|
||||
pool->asyncs = taosMemoryCalloc(1, sizeof(uv_async_t) * pool->nAsync);
|
||||
|
||||
|
@ -206,6 +206,9 @@ void transDestroyAsyncPool(SAsyncPool* pool) {
|
|||
taosMemoryFree(pool);
|
||||
}
|
||||
int transAsyncSend(SAsyncPool* pool, queue* q) {
|
||||
if (atomic_load_8(&pool->stop) == 1) {
|
||||
return -1;
|
||||
}
|
||||
int idx = pool->index;
|
||||
idx = idx % pool->nAsync;
|
||||
// no need mutex here
|
||||
|
@ -225,6 +228,14 @@ int transAsyncSend(SAsyncPool* pool, queue* q) {
|
|||
}
|
||||
return uv_async_send(async);
|
||||
}
|
||||
bool transAsyncPoolIsEmpty(SAsyncPool* pool) {
|
||||
for (int i = 0; i < pool->nAsync; i++) {
|
||||
uv_async_t* async = &(pool->asyncs[i]);
|
||||
SAsyncItem* item = async->data;
|
||||
if (!QUEUE_IS_EMPTY(&item->qmsg)) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void transCtxInit(STransCtx* ctx) {
|
||||
// init transCtx
|
||||
|
@ -240,7 +251,7 @@ void transCtxCleanup(STransCtx* ctx) {
|
|||
ctx->freeFunc(iter->val);
|
||||
iter = taosHashIterate(ctx->args, iter);
|
||||
}
|
||||
|
||||
ctx->freeFunc(ctx->brokenVal.val);
|
||||
taosHashCleanup(ctx->args);
|
||||
ctx->args = NULL;
|
||||
}
|
||||
|
|
|
@ -1034,7 +1034,7 @@ void transUnrefSrvHandle(void* handle) {
|
|||
}
|
||||
}
|
||||
|
||||
void transReleaseSrvHandle(void* handle) {
|
||||
int transReleaseSrvHandle(void* handle) {
|
||||
SRpcHandleInfo* info = handle;
|
||||
SExHandle* exh = info->handle;
|
||||
int64_t refId = info->refId;
|
||||
|
@ -1053,16 +1053,16 @@ void transReleaseSrvHandle(void* handle) {
|
|||
tTrace("%s conn %p start to release", transLabel(pThrd->pTransInst), exh->handle);
|
||||
transAsyncSend(pThrd->asyncPool, &m->q);
|
||||
transReleaseExHandle(transGetRefMgt(), refId);
|
||||
return;
|
||||
return 0;
|
||||
_return1:
|
||||
tTrace("handle %p failed to send to release handle", exh);
|
||||
transReleaseExHandle(transGetRefMgt(), refId);
|
||||
return;
|
||||
return -1;
|
||||
_return2:
|
||||
tTrace("handle %p failed to send to release handle", exh);
|
||||
return;
|
||||
return -1;
|
||||
}
|
||||
void transSendResponse(const STransMsg* msg) {
|
||||
int transSendResponse(const STransMsg* msg) {
|
||||
SExHandle* exh = msg->info.handle;
|
||||
int64_t refId = msg->info.refId;
|
||||
ASYNC_CHECK_HANDLE(exh, refId);
|
||||
|
@ -1082,18 +1082,18 @@ void transSendResponse(const STransMsg* msg) {
|
|||
tGTrace("conn %p start to send resp (1/2)", exh->handle);
|
||||
transAsyncSend(pThrd->asyncPool, &m->q);
|
||||
transReleaseExHandle(transGetRefMgt(), refId);
|
||||
return;
|
||||
return 0;
|
||||
_return1:
|
||||
tTrace("handle %p failed to send resp", exh);
|
||||
rpcFreeCont(msg->pCont);
|
||||
transReleaseExHandle(transGetRefMgt(), refId);
|
||||
return;
|
||||
return -1;
|
||||
_return2:
|
||||
tTrace("handle %p failed to send resp", exh);
|
||||
rpcFreeCont(msg->pCont);
|
||||
return;
|
||||
return -1;
|
||||
}
|
||||
void transRegisterMsg(const STransMsg* msg) {
|
||||
int transRegisterMsg(const STransMsg* msg) {
|
||||
SExHandle* exh = msg->info.handle;
|
||||
int64_t refId = msg->info.refId;
|
||||
ASYNC_CHECK_HANDLE(exh, refId);
|
||||
|
@ -1112,16 +1112,17 @@ void transRegisterMsg(const STransMsg* msg) {
|
|||
tTrace("%s conn %p start to register brokenlink callback", transLabel(pTransInst), exh->handle);
|
||||
transAsyncSend(pThrd->asyncPool, &m->q);
|
||||
transReleaseExHandle(transGetRefMgt(), refId);
|
||||
return;
|
||||
return 0;
|
||||
|
||||
_return1:
|
||||
tTrace("handle %p failed to register brokenlink", exh);
|
||||
rpcFreeCont(msg->pCont);
|
||||
transReleaseExHandle(transGetRefMgt(), refId);
|
||||
return;
|
||||
return -1;
|
||||
_return2:
|
||||
tTrace("handle %p failed to register brokenlink", exh);
|
||||
rpcFreeCont(msg->pCont);
|
||||
return -1;
|
||||
}
|
||||
|
||||
int transGetConnInfo(void* thandle, STransHandleInfo* pConnInfo) { return -1; }
|
||||
|
|
|
@ -210,7 +210,7 @@ static int32_t taosGetProcCpuInfo(ProcCpuInfo *cpuInfo) {
|
|||
}
|
||||
|
||||
|
||||
bool taosCheckSystemIsSmallEnd() {
|
||||
bool taosCheckSystemIsLittleEnd() {
|
||||
union check {
|
||||
int16_t i;
|
||||
char ch[2];
|
||||
|
|
|
@ -741,7 +741,10 @@ class AnyState:
|
|||
sCnt += 1
|
||||
if (sCnt >= 2):
|
||||
raise CrashGenError(
|
||||
"Unexpected more than 1 success with task: {}".format(cls))
|
||||
"Unexpected more than 1 success with task: {}, in task set: {}".format(
|
||||
cls.__name__, # verified just now that isinstance(task, cls)
|
||||
[c.__class__.__name__ for c in tasks]
|
||||
))
|
||||
|
||||
def assertIfExistThenSuccess(self, tasks, cls):
|
||||
sCnt = 0
|
||||
|
|
|
@ -11,13 +11,13 @@
|
|||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from collections import defaultdict
|
||||
import random
|
||||
import string
|
||||
import requests
|
||||
import time
|
||||
import socket
|
||||
import json
|
||||
import toml
|
||||
from .boundary import DataBoundary
|
||||
import taos
|
||||
from util.log import *
|
||||
|
@ -25,6 +25,79 @@ from util.sql import *
|
|||
from util.cases import *
|
||||
from util.dnodes import *
|
||||
from util.common import *
|
||||
from util.constant import *
|
||||
from dataclasses import dataclass,field
|
||||
from typing import List
|
||||
|
||||
@dataclass
|
||||
class DataSet:
|
||||
ts_data : List[int] = field(default_factory=list)
|
||||
int_data : List[int] = field(default_factory=list)
|
||||
bint_data : List[int] = field(default_factory=list)
|
||||
sint_data : List[int] = field(default_factory=list)
|
||||
tint_data : List[int] = field(default_factory=list)
|
||||
uint_data : List[int] = field(default_factory=list)
|
||||
ubint_data : List[int] = field(default_factory=list)
|
||||
usint_data : List[int] = field(default_factory=list)
|
||||
utint_data : List[int] = field(default_factory=list)
|
||||
float_data : List[float] = field(default_factory=list)
|
||||
double_data : List[float] = field(default_factory=list)
|
||||
bool_data : List[int] = field(default_factory=list)
|
||||
vchar_data : List[str] = field(default_factory=list)
|
||||
nchar_data : List[str] = field(default_factory=list)
|
||||
|
||||
def get_order_set(self,
|
||||
rows,
|
||||
int_step :int = 1,
|
||||
bint_step :int = 1,
|
||||
sint_step :int = 1,
|
||||
tint_step :int = 1,
|
||||
uint_step :int = 1,
|
||||
ubint_step :int = 1,
|
||||
usint_step :int = 1,
|
||||
utint_step :int = 1,
|
||||
float_step :float = 1,
|
||||
double_step :float = 1,
|
||||
bool_start :int = 1,
|
||||
vchar_prefix:str = "vachar_",
|
||||
vchar_step :int = 1,
|
||||
nchar_prefix:str = "nchar_测试_",
|
||||
nchar_step :int = 1,
|
||||
ts_step :int = 1
|
||||
):
|
||||
for i in range(rows):
|
||||
self.int_data.append( int(i * int_step % INT_MAX ))
|
||||
self.bint_data.append( int(i * bint_step % BIGINT_MAX ))
|
||||
self.sint_data.append( int(i * sint_step % SMALLINT_MAX ))
|
||||
self.tint_data.append( int(i * tint_step % TINYINT_MAX ))
|
||||
self.uint_data.append( int(i * uint_step % INT_UN_MAX ))
|
||||
self.ubint_data.append( int(i * ubint_step % BIGINT_UN_MAX ))
|
||||
self.usint_data.append( int(i * usint_step % SMALLINT_UN_MAX ))
|
||||
self.utint_data.append( int(i * utint_step % TINYINT_UN_MAX ))
|
||||
self.float_data.append( float(i * float_step % FLOAT_MAX ))
|
||||
self.double_data.append( float(i * double_step % DOUBLE_MAX ))
|
||||
self.bool_data.append( bool((i + bool_start) % 2 ))
|
||||
self.vchar_data.append( f"{vchar_prefix}_{i * vchar_step}" )
|
||||
self.nchar_data.append( f"{nchar_prefix}_{i * nchar_step}")
|
||||
self.ts_data.append( int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000 - i * ts_step))
|
||||
|
||||
def get_disorder_set(self,
|
||||
rows,
|
||||
int_low :int = INT_MIN,
|
||||
int_up :int = INT_MAX,
|
||||
bint_low :int = BIGINT_MIN,
|
||||
bint_up :int = BIGINT_MAX,
|
||||
sint_low :int = SMALLINT_MIN,
|
||||
sint_up :int = SMALLINT_MAX,
|
||||
tint_low :int = TINYINT_MIN,
|
||||
tint_up :int = TINYINT_MAX,
|
||||
ubint_low :int = BIGINT_UN_MIN,
|
||||
ubint_up :int = BIGINT_UN_MAX,
|
||||
|
||||
|
||||
):
|
||||
pass
|
||||
|
||||
|
||||
class TDCom:
|
||||
def __init__(self):
|
||||
|
@ -372,6 +445,7 @@ class TDCom:
|
|||
|
||||
def getClientCfgPath(self):
|
||||
buildPath = self.getBuildPath()
|
||||
|
||||
if (buildPath == ""):
|
||||
tdLog.exit("taosd not found!")
|
||||
else:
|
||||
|
@ -680,4 +754,29 @@ def is_json(msg):
|
|||
else:
|
||||
return False
|
||||
|
||||
def get_path(tool="taosd"):
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
if ("community" in selfPath):
|
||||
projPath = selfPath[:selfPath.find("community")]
|
||||
else:
|
||||
projPath = selfPath[:selfPath.find("tests")]
|
||||
|
||||
paths = []
|
||||
for root, dirs, files in os.walk(projPath):
|
||||
if ((tool) in files or ("%s.exe"%tool) in files):
|
||||
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||
if ("packaging" not in rootRealPath):
|
||||
paths.append(os.path.join(root, tool))
|
||||
break
|
||||
if (len(paths) == 0):
|
||||
return ""
|
||||
return paths[0]
|
||||
|
||||
def dict2toml(in_dict: dict, file:str):
|
||||
if not isinstance(in_dict, dict):
|
||||
return ""
|
||||
with open(file, 'w') as f:
|
||||
toml.dump(in_dict, f)
|
||||
|
||||
tdCom = TDCom()
|
||||
|
|
|
@ -244,7 +244,6 @@ class TDDnode:
|
|||
# print(updatecfgDict)
|
||||
isFirstDir = 1
|
||||
if bool(updatecfgDict) and updatecfgDict[0] and updatecfgDict[0][0]:
|
||||
print(updatecfgDict[0][0])
|
||||
for key, value in updatecfgDict[0][0].items():
|
||||
if key == "clientCfg" and self.remoteIP == "" and not platform.system().lower() == 'windows':
|
||||
continue
|
||||
|
@ -324,7 +323,6 @@ class TDDnode:
|
|||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
self.running = 1
|
||||
print("dnode:%d is running with %s " % (self.index, cmd))
|
||||
tdLog.debug("dnode:%d is running with %s " % (self.index, cmd))
|
||||
if self.valgrind == 0:
|
||||
time.sleep(0.1)
|
||||
|
@ -407,7 +405,6 @@ class TDDnode:
|
|||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
self.running = 1
|
||||
print("dnode:%d is running with %s " % (self.index, cmd))
|
||||
tdLog.debug("dnode:%d is running with %s " % (self.index, cmd))
|
||||
if self.valgrind == 0:
|
||||
time.sleep(0.1)
|
||||
|
@ -665,7 +662,6 @@ class TDDnodes:
|
|||
self.check(index)
|
||||
self.dnodes[index - 1].stoptaosd()
|
||||
|
||||
|
||||
def start(self, index):
|
||||
self.check(index)
|
||||
self.dnodes[index - 1].start()
|
||||
|
|
|
@ -235,9 +235,17 @@ class TDSql:
|
|||
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
|
||||
(self.sql, row, col, self.queryResult[row][col], data))
|
||||
return
|
||||
elif isinstance(data, float) and abs(self.queryResult[row][col] - data) <= 0.000001:
|
||||
tdLog.info("sql:%s, row:%d col:%d data:%f == expect:%f" %
|
||||
(self.sql, row, col, self.queryResult[row][col], data))
|
||||
elif isinstance(data, float):
|
||||
if abs(data) >= 1 and abs((self.queryResult[row][col] - data) / data) <= 0.000001:
|
||||
tdLog.info("sql:%s, row:%d col:%d data:%f == expect:%f" %
|
||||
(self.sql, row, col, self.queryResult[row][col], data))
|
||||
elif abs(data) < 1 and abs(self.queryResult[row][col] - data) <= 0.000001:
|
||||
tdLog.info("sql:%s, row:%d col:%d data:%f == expect:%f" %
|
||||
(self.sql, row, col, self.queryResult[row][col], data))
|
||||
else:
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data)
|
||||
tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
|
||||
return
|
||||
else:
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
|
@ -323,13 +331,32 @@ class TDSql:
|
|||
args = (caller.filename, caller.lineno, self.sql, col_name_list, expect_col_name_list)
|
||||
tdLog.exit("%s(%d) failed: sql:%s, col_name_list:%s != expect_col_name_list:%s" % args)
|
||||
|
||||
def __check_equal(self, elm, expect_elm):
|
||||
if not type(elm) in(list, tuple) and elm == expect_elm:
|
||||
return True
|
||||
if type(elm) in(list, tuple) and type(expect_elm) in(list, tuple):
|
||||
if len(elm) != len(expect_elm):
|
||||
return False
|
||||
if len(elm) == 0:
|
||||
return True
|
||||
for i in range(len(elm)):
|
||||
flag = self.__check_equal(elm[i], expect_elm[i])
|
||||
if not flag:
|
||||
return False
|
||||
return True
|
||||
return False
|
||||
|
||||
def checkEqual(self, elm, expect_elm):
|
||||
if elm == expect_elm:
|
||||
tdLog.info("sql:%s, elm:%s == expect_elm:%s" % (self.sql, elm, expect_elm))
|
||||
else:
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
args = (caller.filename, caller.lineno, self.sql, elm, expect_elm)
|
||||
tdLog.exit("%s(%d) failed: sql:%s, elm:%s != expect_elm:%s" % args)
|
||||
return
|
||||
if self.__check_equal(elm, expect_elm):
|
||||
tdLog.info("sql:%s, elm:%s == expect_elm:%s" % (self.sql, elm, expect_elm))
|
||||
return
|
||||
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
args = (caller.filename, caller.lineno, self.sql, elm, expect_elm)
|
||||
tdLog.exit("%s(%d) failed: sql:%s, elm:%s != expect_elm:%s" % args)
|
||||
|
||||
def checkNotEqual(self, elm, expect_elm):
|
||||
if elm != expect_elm:
|
||||
|
|
|
@ -0,0 +1,260 @@
|
|||
import socket
|
||||
from fabric2 import Connection
|
||||
from util.log import *
|
||||
from util.common import *
|
||||
|
||||
|
||||
class TAdapter:
|
||||
def __init__(self):
|
||||
self.running = 0
|
||||
self.deployed = 0
|
||||
self.remoteIP = ""
|
||||
self.taosadapter_cfg_dict = {
|
||||
"debug" : True,
|
||||
"taosConfigDir" : "",
|
||||
"port" : 6041,
|
||||
"logLevel" : "debug",
|
||||
"cors" : {
|
||||
"allowAllOrigins" : True,
|
||||
},
|
||||
"pool" : {
|
||||
"maxConnect" : 4000,
|
||||
"maxIdle" : 4000,
|
||||
"idleTimeout" : "1h"
|
||||
},
|
||||
"ssl" : {
|
||||
"enable" : False,
|
||||
"certFile" : "",
|
||||
"keyFile" : "",
|
||||
},
|
||||
"log" : {
|
||||
"path" : "",
|
||||
"rotationCount" : 30,
|
||||
"rotationTime" : "24h",
|
||||
"rotationSize" : "1GB",
|
||||
"enableRecordHttpSql" : True,
|
||||
"sqlRotationCount" : 2,
|
||||
"sqlRotationTime" : "24h",
|
||||
"sqlRotationSize" : "1GB",
|
||||
},
|
||||
"monitor" : {
|
||||
"collectDuration" : "3s",
|
||||
"incgroup" : False,
|
||||
"pauseQueryMemoryThreshold" : 70,
|
||||
"pauseAllMemoryThreshold" : 80,
|
||||
"identity" : "",
|
||||
"writeToTD" : True,
|
||||
"user" : "root",
|
||||
"password" : "taosdata",
|
||||
"writeInterval" : "30s"
|
||||
},
|
||||
"opentsdb" : {
|
||||
"enable" : False
|
||||
},
|
||||
"influxdb" : {
|
||||
"enable" : False
|
||||
},
|
||||
"statsd" : {
|
||||
"enable" : False
|
||||
},
|
||||
"collectd" : {
|
||||
"enable" : False
|
||||
},
|
||||
"opentsdb_telnet" : {
|
||||
"enable" : False
|
||||
},
|
||||
"node_exporter" : {
|
||||
"enable" : False
|
||||
},
|
||||
"prometheus" : {
|
||||
"enable" : False
|
||||
},
|
||||
}
|
||||
# TODO: add taosadapter env:
|
||||
# 1. init cfg.toml.dict :OK
|
||||
# 2. dump dict to toml : OK
|
||||
# 3. update cfg.toml.dict :OK
|
||||
# 4. check adapter exists : OK
|
||||
# 5. deploy adapter cfg : OK
|
||||
# 6. adapter start : OK
|
||||
# 7. adapter stop
|
||||
|
||||
def init(self, path, remoteIP=""):
|
||||
self.path = path
|
||||
self.remoteIP = remoteIP
|
||||
binPath = get_path() + "/../../../"
|
||||
binPath = os.path.realpath(binPath)
|
||||
|
||||
if path == "":
|
||||
self.path = os.path.abspath(binPath + "../../")
|
||||
else:
|
||||
self.path = os.path.realpath(path)
|
||||
|
||||
if self.remoteIP:
|
||||
try:
|
||||
self.config = eval(remoteIP)
|
||||
self.remote_conn = Connection(host=self.config["host"], port=self.config["port"], user=self.config["user"], connect_kwargs={'password':self.config["password"]})
|
||||
except Exception as e:
|
||||
tdLog.notice(e)
|
||||
|
||||
def update_cfg(self, update_dict :dict):
|
||||
if not isinstance(update_dict, dict):
|
||||
return
|
||||
if "log" in update_dict and "path" in update_dict["log"]:
|
||||
del update_dict["log"]["path"]
|
||||
for key, value in update_dict.items():
|
||||
if key in ["cors", "pool", "ssl", "log", "monitor", "opentsdb", "influxdb", "statsd", "collectd", "opentsdb_telnet", "node_exporter", "prometheus"]:
|
||||
if isinstance(value, dict):
|
||||
for k, v in value.items():
|
||||
self.taosadapter_cfg_dict[key][k] = v
|
||||
else:
|
||||
self.taosadapter_cfg_dict[key] = value
|
||||
|
||||
def check_adapter(self):
|
||||
if getPath(tool="taosadapter"):
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
def remote_exec(self, updateCfgDict, execCmd):
|
||||
remoteCfgDict = copy.deepcopy(updateCfgDict)
|
||||
if "log" in remoteCfgDict and "path" in remoteCfgDict["log"]:
|
||||
del remoteCfgDict["log"]["path"]
|
||||
|
||||
remoteCfgDictStr = base64.b64encode(toml.dumps(remoteCfgDict).encode()).decode()
|
||||
execCmdStr = base64.b64encode(execCmd.encode()).decode()
|
||||
with self.remote_conn.cd((self.config["path"]+sys.path[0].replace(self.path, '')).replace('\\','/')):
|
||||
self.remote_conn.run(f"python3 ./test.py -D {remoteCfgDictStr} -e {execCmdStr}" )
|
||||
|
||||
def cfg(self, option, value):
|
||||
cmd = f"echo {option} = {value} >> {self.cfg_path}"
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
def deploy(self, *update_cfg_dict):
|
||||
self.log_dir = f"{self.path}/sim/dnode1/log"
|
||||
self.cfg_dir = f"{self.path}/sim/dnode1/cfg"
|
||||
self.cfg_path = f"{self.cfg_dir}/taosadapter.toml"
|
||||
|
||||
cmd = f"touch {self.cfg_path}"
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
|
||||
self.taosadapter_cfg_dict["log"]["path"] = self.log_dir
|
||||
if bool(update_cfg_dict):
|
||||
self.update_cfg(update_dict=update_cfg_dict)
|
||||
|
||||
if (self.remoteIP == ""):
|
||||
dict2toml(self.taosadapter_cfg_dict, self.cfg_path)
|
||||
else:
|
||||
self.remote_exec(self.taosadapter_cfg_dict, "tAdapter.deploy(update_cfg_dict)")
|
||||
|
||||
self.deployed = 1
|
||||
|
||||
tdLog.debug(f"taosadapter is deployed and configured by {self.cfg_path}")
|
||||
|
||||
def start(self):
|
||||
bin_path = get_path(tool="taosadapter")
|
||||
|
||||
if (bin_path == ""):
|
||||
tdLog.exit("taosadapter not found!")
|
||||
else:
|
||||
tdLog.info(f"taosadapter found: {bin_path}")
|
||||
|
||||
if platform.system().lower() == 'windows':
|
||||
cmd = f"mintty -h never {bin_path} -c {self.cfg_dir}"
|
||||
else:
|
||||
cmd = f"nohup {bin_path} -c {self.cfg_path} > /dev/null 2>&1 & "
|
||||
|
||||
if self.remoteIP:
|
||||
self.remote_exec(self.taosadapter_cfg_dict, f"tAdapter.deployed=1\ntAdapter.log_dir={self.log_dir}\ntAdapter.cfg_dir={self.cfg_dir}\ntAdapter.start()")
|
||||
self.running = 1
|
||||
else:
|
||||
os.system(f"rm -rf {self.log_dir}/taosadapter*")
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
self.running = 1
|
||||
tdLog.debug(f"taosadapter is running with {cmd} " )
|
||||
|
||||
time.sleep(0.1)
|
||||
|
||||
taosadapter_port = self.taosadapter_cfg_dict["port"]
|
||||
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
s.settimeout(3)
|
||||
try:
|
||||
res = s.connect_ex((self.remoteIP, taosadapter_port))
|
||||
s.shutdown(2)
|
||||
if res == 0:
|
||||
tdLog.info(f"the taosadapter has been started, using port:{taosadapter_port}")
|
||||
else:
|
||||
tdLog.info(f"the taosadapter do not started!!!")
|
||||
except socket.error as e:
|
||||
tdLog.notice("socket connect error!")
|
||||
finally:
|
||||
if s:
|
||||
s.close()
|
||||
# tdLog.debug("the taosadapter has been started.")
|
||||
time.sleep(1)
|
||||
|
||||
def start_taosadapter(self):
|
||||
"""
|
||||
use this method, must deploy taosadapter
|
||||
"""
|
||||
bin_path = get_path(tool="taosadapter")
|
||||
|
||||
if (bin_path == ""):
|
||||
tdLog.exit("taosadapter not found!")
|
||||
else:
|
||||
tdLog.info(f"taosadapter found: {bin_path}")
|
||||
|
||||
if self.deployed == 0:
|
||||
tdLog.exit("taosadapter is not deployed")
|
||||
|
||||
if platform.system().lower() == 'windows':
|
||||
cmd = f"mintty -h never {bin_path} -c {self.cfg_dir}"
|
||||
else:
|
||||
cmd = f"nohup {bin_path} -c {self.cfg_path} > /dev/null 2>&1 & "
|
||||
|
||||
if self.remoteIP:
|
||||
self.remote_exec(self.taosadapter_cfg_dict, f"tAdapter.deployed=1\ntAdapter.log_dir={self.log_dir}\ntAdapter.cfg_dir={self.cfg_dir}\ntAdapter.start()")
|
||||
self.running = 1
|
||||
else:
|
||||
if os.system(cmd) != 0:
|
||||
tdLog.exit(cmd)
|
||||
self.running = 1
|
||||
tdLog.debug(f"taosadapter is running with {cmd} " )
|
||||
|
||||
time.sleep(0.1)
|
||||
|
||||
def stop(self, force_kill=False):
|
||||
signal = "-SIGKILL" if force_kill else "-SIGTERM"
|
||||
|
||||
if self.remoteIP:
|
||||
self.remote_exec(self.taosadapter_cfg_dict, "tAdapter.running=1\ntAdapter.stop()")
|
||||
tdLog.info("stop taosadapter")
|
||||
return
|
||||
|
||||
toBeKilled = "taosadapter"
|
||||
|
||||
if self.running != 0:
|
||||
psCmd = f"ps -ef|grep -w {toBeKilled}| grep -v grep | awk '{{print $2}}'"
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8")
|
||||
|
||||
while(processID):
|
||||
killCmd = f"kill {signal} {processID} > /dev/null 2>&1"
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8")
|
||||
if not platform.system().lower() == 'windows':
|
||||
for port in range(6030, 6041):
|
||||
fuserCmd = f"fuser -k -n tcp {port} > /dev/null"
|
||||
os.system(fuserCmd)
|
||||
|
||||
self.running = 0
|
||||
tdLog.debug(f"taosadapter is stopped by kill {signal}")
|
||||
|
||||
|
||||
|
||||
tAdapter = TAdapter()
|
|
@ -218,7 +218,7 @@ typedef struct {
|
|||
} CaseCtrl;
|
||||
|
||||
#if 0
|
||||
CaseCtrl gCaseCtrl = { // default
|
||||
CaseCtrl gCaseCtrl = {
|
||||
.precision = TIME_PRECISION_MICRO,
|
||||
.bindNullNum = 0,
|
||||
.printCreateTblSql = false,
|
||||
|
@ -251,7 +251,7 @@ CaseCtrl gCaseCtrl = { // default
|
|||
|
||||
|
||||
#if 1
|
||||
CaseCtrl gCaseCtrl = {
|
||||
CaseCtrl gCaseCtrl = { // default
|
||||
.precision = TIME_PRECISION_MILLI,
|
||||
.bindNullNum = 0,
|
||||
.printCreateTblSql = false,
|
||||
|
@ -299,7 +299,7 @@ CaseCtrl gCaseCtrl = { // query case with specified col&oper
|
|||
.printRes = true,
|
||||
.runTimes = 0,
|
||||
.caseRunIdx = -1,
|
||||
.caseIdx = 23,
|
||||
.caseIdx = 5,
|
||||
.caseNum = 1,
|
||||
.caseRunNum = 1,
|
||||
};
|
||||
|
@ -328,7 +328,7 @@ CaseCtrl gCaseCtrl = { // query case with specified col&oper
|
|||
//.optrIdxList = optrIdxList,
|
||||
//.bindColTypeNum = tListLen(bindColTypeList),
|
||||
//.bindColTypeList = bindColTypeList,
|
||||
.caseIdx = 24,
|
||||
.caseIdx = 8,
|
||||
.caseNum = 1,
|
||||
.caseRunNum = 1,
|
||||
};
|
||||
|
@ -1384,6 +1384,7 @@ void bpCheckTagFields(TAOS_STMT *stmt, TAOS_MULTI_BIND* pBind) {
|
|||
}
|
||||
|
||||
bpCheckColTagFields(stmt, fieldNum, pFields, gCurCase->bindTagNum, pBind, BP_BIND_TAG);
|
||||
taosMemoryFree(pFields);
|
||||
}
|
||||
|
||||
void bpCheckColFields(TAOS_STMT *stmt, TAOS_MULTI_BIND* pBind) {
|
||||
|
@ -1401,12 +1402,13 @@ void bpCheckColFields(TAOS_STMT *stmt, TAOS_MULTI_BIND* pBind) {
|
|||
}
|
||||
|
||||
bpCheckColTagFields(stmt, fieldNum, pFields, gCurCase->bindColNum, pBind, BP_BIND_COL);
|
||||
taosMemoryFree(pFields);
|
||||
}
|
||||
|
||||
void bpShowBindParam(TAOS_MULTI_BIND *bind, int32_t num) {
|
||||
for (int32_t i = 0; i < num; ++i) {
|
||||
TAOS_MULTI_BIND* b = &bind[i];
|
||||
printf("Bind %d: type[%d],buf[%p],buflen[%d],len[%],null[%d],num[%d]\n",
|
||||
printf("Bind %d: type[%d],buf[%p],buflen[%d],len[%d],null[%d],num[%d]\n",
|
||||
i, b->buffer_type, b->buffer, b->buffer_length, b->length ? *b->length : 0, b->is_null ? *b->is_null : 0, b->num);
|
||||
}
|
||||
}
|
||||
|
@ -2596,6 +2598,7 @@ void runAll(TAOS *taos) {
|
|||
printf("%s Begin\n", gCaseCtrl.caseCatalog);
|
||||
runCaseList(taos);
|
||||
|
||||
#if 0
|
||||
strcpy(gCaseCtrl.caseCatalog, "Micro DB precision Test");
|
||||
printf("%s Begin\n", gCaseCtrl.caseCatalog);
|
||||
gCaseCtrl.precision = TIME_PRECISION_MICRO;
|
||||
|
@ -2626,7 +2629,6 @@ void runAll(TAOS *taos) {
|
|||
runCaseList(taos);
|
||||
gCaseCtrl.bindRowNum = 0;
|
||||
|
||||
#if 0
|
||||
strcpy(gCaseCtrl.caseCatalog, "Row Num Test");
|
||||
printf("%s Begin\n", gCaseCtrl.caseCatalog);
|
||||
gCaseCtrl.rowNum = 1000;
|
||||
|
@ -2640,7 +2642,6 @@ void runAll(TAOS *taos) {
|
|||
gCaseCtrl.runTimes = 2;
|
||||
runCaseList(taos);
|
||||
gCaseCtrl.runTimes = 0;
|
||||
#endif
|
||||
|
||||
strcpy(gCaseCtrl.caseCatalog, "Check Param Test");
|
||||
printf("%s Begin\n", gCaseCtrl.caseCatalog);
|
||||
|
@ -2648,19 +2649,20 @@ void runAll(TAOS *taos) {
|
|||
runCaseList(taos);
|
||||
gCaseCtrl.checkParamNum = false;
|
||||
|
||||
#if 0
|
||||
strcpy(gCaseCtrl.caseCatalog, "Bind Col Num Test");
|
||||
printf("%s Begin\n", gCaseCtrl.caseCatalog);
|
||||
gCaseCtrl.bindColNum = 6;
|
||||
runCaseList(taos);
|
||||
gCaseCtrl.bindColNum = 0;
|
||||
#endif
|
||||
|
||||
/*
|
||||
strcpy(gCaseCtrl.caseCatalog, "Bind Col Type Test");
|
||||
printf("%s Begin\n", gCaseCtrl.caseCatalog);
|
||||
gCaseCtrl.bindColTypeNum = tListLen(bindColTypeList);
|
||||
gCaseCtrl.bindColTypeList = bindColTypeList;
|
||||
runCaseList(taos);
|
||||
#endif
|
||||
*/
|
||||
|
||||
printf("All Test End\n");
|
||||
}
|
||||
|
|
|
@ -31,7 +31,7 @@
|
|||
./test.sh -f tsim/db/len.sim
|
||||
./test.sh -f tsim/db/repeat.sim
|
||||
./test.sh -f tsim/db/show_create_db.sim
|
||||
./test.sh -f tsim/db/show_create_table.sim
|
||||
# jira ./test.sh -f tsim/db/show_create_table.sim
|
||||
./test.sh -f tsim/db/tables.sim
|
||||
./test.sh -f tsim/db/taosdlog.sim
|
||||
|
||||
|
@ -87,48 +87,47 @@
|
|||
./test.sh -f tsim/parser/alter_column.sim
|
||||
./test.sh -f tsim/parser/alter_stable.sim
|
||||
./test.sh -f tsim/parser/alter.sim
|
||||
# nojira ./test.sh -f tsim/parser/alter1.sim
|
||||
# jira ./test.sh -f tsim/parser/alter1.sim
|
||||
./test.sh -f tsim/parser/auto_create_tb_drop_tb.sim
|
||||
# jira ./test.sh -f tsim/parser/auto_create_tb.sim
|
||||
./test.sh -f tsim/parser/between_and.sim
|
||||
./test.sh -f tsim/parser/binary_escapeCharacter.sim
|
||||
# nojira ./test.sh -f tsim/parser/col_arithmetic_operation.sim
|
||||
# nojira ./test.sh -f tsim/parser/columnValue.sim
|
||||
## ./test.sh -f tsim/parser/commit.sim
|
||||
## ./test.sh -f tsim/parser/condition.sim
|
||||
## ./test.sh -f tsim/parser/constCol.sim
|
||||
# ./test.sh -f tsim/parser/create_db.sim
|
||||
## ./test.sh -f tsim/parser/create_db__for_community_version.sim
|
||||
# ./test.sh -f tsim/parser/create_mt.sim
|
||||
# ./test.sh -f tsim/parser/create_tb.sim
|
||||
## ./test.sh -f tsim/parser/create_tb_with_tag_name.sim
|
||||
# ./test.sh -f tsim/parser/dbtbnameValidate.sim
|
||||
##./test.sh -f tsim/parser/distinct.sim
|
||||
#./test.sh -f tsim/parser/fill_stb.sim
|
||||
# jira ./test.sh -f tsim/parser/col_arithmetic_operation.sim
|
||||
# jira ./test.sh -f tsim/parser/columnValue.sim
|
||||
./test.sh -f tsim/parser/commit.sim
|
||||
# jira ./test.sh -f tsim/parser/condition.sim
|
||||
./test.sh -f tsim/parser/constCol.sim
|
||||
./test.sh -f tsim/parser/create_db.sim
|
||||
./test.sh -f tsim/parser/create_mt.sim
|
||||
# jira ./test.sh -f tsim/parser/create_tb_with_tag_name.sim
|
||||
./test.sh -f tsim/parser/create_tb.sim
|
||||
./test.sh -f tsim/parser/dbtbnameValidate.sim
|
||||
./test.sh -f tsim/parser/distinct.sim
|
||||
# jira ./test.sh -f tsim/parser/fill_stb.sim
|
||||
./test.sh -f tsim/parser/fill_us.sim
|
||||
./test.sh -f tsim/parser/fill.sim
|
||||
./test.sh -f tsim/parser/first_last.sim
|
||||
./test.sh -f tsim/parser/fourArithmetic-basic.sim
|
||||
## ./test.sh -f tsim/parser/function.sim
|
||||
# jira ./test.sh -f tsim/parser/function.sim
|
||||
./test.sh -f tsim/parser/groupby-basic.sim
|
||||
# ./test.sh -f tsim/parser/groupby.sim
|
||||
# ./test.sh -f tsim/parser/having_child.sim
|
||||
## ./test.sh -f tsim/parser/having.sim
|
||||
## ./test.sh -f tsim/parser/import.sim
|
||||
# ./test.sh -f tsim/parser/import_commit1.sim
|
||||
# ./test.sh -f tsim/parser/import_commit2.sim
|
||||
# ./test.sh -f tsim/parser/import_commit3.sim
|
||||
## ./test.sh -f tsim/parser/import_file.sim
|
||||
## ./test.sh -f tsim/parser/insert_multiTbl.sim
|
||||
# ./test.sh -f tsim/parser/insert_tb.sim
|
||||
## ./test.sh -f tsim/parser/interp.sim
|
||||
./test.sh -f tsim/parser/import_commit1.sim
|
||||
./test.sh -f tsim/parser/import_commit2.sim
|
||||
./test.sh -f tsim/parser/import_commit3.sim
|
||||
# jira ./test.sh -f tsim/parser/import_file.sim
|
||||
./test.sh -f tsim/parser/import.sim
|
||||
./test.sh -f tsim/parser/insert_multiTbl.sim
|
||||
./test.sh -f tsim/parser/insert_tb.sim
|
||||
# jira ./test.sh -f tsim/parser/interp.sim
|
||||
# ./test.sh -f tsim/parser/join.sim
|
||||
# ./test.sh -f tsim/parser/join_manyblocks.sim
|
||||
## ./test.sh -f tsim/parser/join_multitables.sim
|
||||
# ./test.sh -f tsim/parser/join_multivnode.sim
|
||||
# ./test.sh -f tsim/parser/last_cache.sim
|
||||
./test.sh -f tsim/parser/last_cache.sim
|
||||
## ./test.sh -f tsim/parser/last_groupby.sim
|
||||
# ./test.sh -f tsim/parser/lastrow.sim
|
||||
# jira ./test.sh -f tsim/parser/lastrow.sim
|
||||
## ./test.sh -f tsim/parser/like.sim
|
||||
# ./test.sh -f tsim/parser/limit.sim
|
||||
# ./test.sh -f tsim/parser/limit1.sim
|
||||
|
@ -160,7 +159,7 @@
|
|||
./test.sh -f tsim/parser/stableOp.sim
|
||||
# ./test.sh -f tsim/parser/tags_dynamically_specifiy.sim
|
||||
# ./test.sh -f tsim/parser/tags_filter.sim
|
||||
# jira ./test.sh -f tsim/parser/tbnameIn.sim
|
||||
./test.sh -f tsim/parser/tbnameIn.sim
|
||||
./test.sh -f tsim/parser/timestamp.sim
|
||||
./test.sh -f tsim/parser/top_groupby.sim
|
||||
./test.sh -f tsim/parser/topbot.sim
|
||||
|
@ -197,7 +196,7 @@
|
|||
./test.sh -f tsim/mnode/basic5.sim
|
||||
|
||||
# ---- show
|
||||
./test.sh -f tsim/show/basic.sim
|
||||
# jira ./test.sh -f tsim/show/basic.sim
|
||||
|
||||
# ---- table
|
||||
./test.sh -f tsim/table/autocreate.sim
|
||||
|
@ -235,15 +234,15 @@
|
|||
./test.sh -f tsim/stream/drop_stream.sim
|
||||
./test.sh -f tsim/stream/distributeInterval0.sim
|
||||
./test.sh -f tsim/stream/distributeIntervalRetrive0.sim
|
||||
# ./test.sh -f tsim/stream/distributesession0.sim
|
||||
./test.sh -f tsim/stream/distributeSession0.sim
|
||||
./test.sh -f tsim/stream/session0.sim
|
||||
./test.sh -f tsim/stream/session1.sim
|
||||
./test.sh -f tsim/stream/state0.sim
|
||||
./test.sh -f tsim/stream/triggerInterval0.sim
|
||||
# ./test.sh -f tsim/stream/triggerSession0.sim
|
||||
./test.sh -f tsim/stream/triggerSession0.sim
|
||||
./test.sh -f tsim/stream/partitionby.sim
|
||||
./test.sh -f tsim/stream/partitionby1.sim
|
||||
# ./test.sh -f tsim/stream/schedSnode.sim
|
||||
# unsupport ./test.sh -f tsim/stream/schedSnode.sim
|
||||
./test.sh -f tsim/stream/windowClose.sim
|
||||
./test.sh -f tsim/stream/ignoreExpiredData.sim
|
||||
./test.sh -f tsim/stream/sliding.sim
|
||||
|
@ -294,12 +293,12 @@
|
|||
./test.sh -f tsim/db/basic3.sim -m
|
||||
./test.sh -f tsim/db/error1.sim -m
|
||||
./test.sh -f tsim/insert/backquote.sim -m
|
||||
# nojira ./test.sh -f tsim/parser/fourArithmetic-basic.sim -m
|
||||
# unsupport ./test.sh -f tsim/parser/fourArithmetic-basic.sim -m
|
||||
./test.sh -f tsim/query/interval-offset.sim -m
|
||||
./test.sh -f tsim/tmq/basic3.sim -m
|
||||
./test.sh -f tsim/stable/vnode3.sim -m
|
||||
./test.sh -f tsim/qnode/basic1.sim -m
|
||||
# nojira ./test.sh -f tsim/mnode/basic1.sim -m
|
||||
# unsupport ./test.sh -f tsim/mnode/basic1.sim -m
|
||||
|
||||
# --- sma
|
||||
./test.sh -f tsim/sma/drop_sma.sim
|
||||
|
@ -329,7 +328,7 @@
|
|||
./test.sh -f tsim/vnode/stable_replica3_vnode3.sim
|
||||
|
||||
# --- sync
|
||||
./test.sh -f tsim/sync/3Replica1VgElect.sim
|
||||
# jira ./test.sh -f tsim/sync/3Replica1VgElect.sim
|
||||
./test.sh -f tsim/sync/3Replica5VgElect.sim
|
||||
./test.sh -f tsim/sync/oneReplica1VgElect.sim
|
||||
./test.sh -f tsim/sync/oneReplica5VgElect.sim
|
||||
|
|
|
@ -20,7 +20,7 @@ $stb = $stbPrefix . $i
|
|||
|
||||
sql drop database $db -x step1
|
||||
step1:
|
||||
sql create database $db maxrows 255 ctime 3600
|
||||
sql create database $db maxrows 255
|
||||
print ====== create tables
|
||||
sql use $db
|
||||
sql create table $stb (ts timestamp, c1 int, c2 bigint, c3 float, c4 double, c5 smallint, c6 tinyint, c7 bool, c8 binary(10), c9 nchar(10)) tags(t1 int)
|
||||
|
@ -78,12 +78,9 @@ endw
|
|||
|
||||
print ================== restart server to commit data into disk
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
sleep 500
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
sleep 100
|
||||
print ================== server restart completed
|
||||
sql connect
|
||||
sleep 100
|
||||
|
||||
print ====== select from table and check num of rows returned
|
||||
sql use $db
|
||||
|
|
|
@ -2,11 +2,11 @@ system sh/stop_dnodes.sh
|
|||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
sql connect
|
||||
|
||||
sql drop database if exists cdb
|
||||
sql create database if not exists cdb
|
||||
sql use cdb
|
||||
sql create table stb1 (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9)) TAGS(t1 int, t2 binary(10), t3 double)
|
||||
|
||||
sql create table tb1 using stb1 tags(1,'1',1.0)
|
||||
sql create table tb2 using stb1 tags(2,'2',2.0)
|
||||
sql create table tb3 using stb1 tags(3,'3',3.0)
|
||||
|
@ -45,7 +45,6 @@ sql insert into tb6 values ('2021-05-05 18:19:27',64,64.0,64,64,64,64.0,false,'6
|
|||
sql insert into tb6 values ('2021-05-05 18:19:28',NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL)
|
||||
|
||||
sql create table stb2 (ts timestamp, u1 int unsigned, u2 bigint unsigned, u3 smallint unsigned, u4 tinyint unsigned, ts2 timestamp) TAGS(t1 int unsigned, t2 bigint unsigned, t3 timestamp, t4 int)
|
||||
|
||||
sql create table tb2_1 using stb2 tags(1,1,'2021-05-05 18:38:38',1)
|
||||
sql create table tb2_2 using stb2 tags(2,2,'2021-05-05 18:58:58',2)
|
||||
|
||||
|
@ -67,7 +66,6 @@ sql insert into tb2_2 values ('2021-05-05 18:19:14',8,2,3,4,'2021-05-05 18:28:15
|
|||
sql insert into tb2_2 values ('2021-05-05 18:19:15',5,6,7,8,'2021-05-05 18:28:16')
|
||||
|
||||
sql create table stb3 (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9)) TAGS(t1 int, t2 binary(10), t3 double)
|
||||
|
||||
sql create table tb3_1 using stb3 tags(1,'1',1.0)
|
||||
sql create table tb3_2 using stb3 tags(2,'2',2.0)
|
||||
|
||||
|
@ -78,7 +76,6 @@ sql insert into tb3_1 values ('2021-04-05 18:19:03',4,4.0,4,4,4,4.0,false,'4','4
|
|||
sql insert into tb3_1 values ('2021-05-05 18:19:28',5,NULL,5,NULL,5,NULL,true,NULL,'5')
|
||||
sql insert into tb3_1 values ('2021-06-05 18:19:28',NULL,6.0,NULL,6,NULL,6.0,NULL,'6',NULL)
|
||||
sql insert into tb3_1 values ('2021-07-05 18:19:28',NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL)
|
||||
|
||||
sql insert into tb3_2 values ('2021-01-06 18:19:00',11,11.0,11,11,11,11.0,true ,'11','11')
|
||||
sql insert into tb3_2 values ('2021-02-06 18:19:01',12,12.0,12,12,12,12.0,true ,'12','12')
|
||||
sql insert into tb3_2 values ('2021-03-06 18:19:02',13,13.0,13,13,13,13.0,false,'13','13')
|
||||
|
@ -87,9 +84,7 @@ sql insert into tb3_2 values ('2021-05-06 18:19:28',15,NULL,15,NULL,15,NULL,true
|
|||
sql insert into tb3_2 values ('2021-06-06 18:19:28',NULL,16.0,NULL,16,NULL,16.0,NULL,'16',NULL)
|
||||
sql insert into tb3_2 values ('2021-07-06 18:19:28',NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL)
|
||||
|
||||
|
||||
sql create table stb4 (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9),c10 binary(16300)) TAGS(t1 int, t2 binary(10), t3 double)
|
||||
|
||||
sql create table tb4_0 using stb4 tags(0,'0',0.0)
|
||||
sql create table tb4_1 using stb4 tags(1,'1',1.0)
|
||||
sql create table tb4_2 using stb4 tags(2,'2',2.0)
|
||||
|
@ -128,19 +123,13 @@ while $i < $blockNum
|
|||
$ts0 = $ts0 + 259200000
|
||||
endw
|
||||
|
||||
sleep 100
|
||||
|
||||
sql connect
|
||||
|
||||
run tsim/parser/condition_query.sim
|
||||
|
||||
print ================== restart server to commit data into disk
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
sleep 100
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
print ================== server restart completed
|
||||
sql connect
|
||||
sleep 100
|
||||
|
||||
run tsim/parser/condition_query.sim
|
||||
|
||||
|
|
|
@ -11,14 +11,14 @@ if $rows != 28 then
|
|||
return -1
|
||||
endi
|
||||
|
||||
sql_error select * from stb1 where c8 > 0
|
||||
sql_error select * from stb1 where c7 in (0,2,3,1);
|
||||
sql_error select * from stb1 where c8 in (true);
|
||||
sql_error select * from stb1 where c8 in (1,2);
|
||||
sql_error select * from stb1 where t2 in (3.0);
|
||||
sql_error select ts,c1,c7 from stb1 where c7 > false
|
||||
sql_error select * from stb1 where c1 > NULL;
|
||||
sql_error select * from stb1 where c1 = NULL;
|
||||
sql select * from stb1 where c8 > 0
|
||||
sql select * from stb1 where c7 in (0,2,3,1);
|
||||
sql select * from stb1 where c8 in (true);
|
||||
sql select * from stb1 where c8 in (1,2);
|
||||
sql select * from stb1 where t2 in (3.0);
|
||||
sql select ts,c1,c7 from stb1 where c7 > false
|
||||
sql select * from stb1 where c1 > NULL;
|
||||
sql select * from stb1 where c1 = NULL;
|
||||
sql_error select * from stb1 where c1 LIKE '%1';
|
||||
sql_error select * from stb1 where c2 LIKE '%1';
|
||||
sql_error select * from stb1 where c3 LIKE '%1';
|
||||
|
@ -26,20 +26,20 @@ sql_error select * from stb1 where c4 LIKE '%1';
|
|||
sql_error select * from stb1 where c5 LIKE '%1';
|
||||
sql_error select * from stb1 where c6 LIKE '%1';
|
||||
sql_error select * from stb1 where c7 LIKE '%1';
|
||||
sql_error select * from stb1 where c1 = 'NULL';
|
||||
sql_error select * from stb1 where c2 > 'NULL';
|
||||
sql_error select * from stb1 where c3 <> 'NULL';
|
||||
sql_error select * from stb1 where c4 != 'null';
|
||||
sql_error select * from stb1 where c5 >= 'null';
|
||||
sql_error select * from stb1 where c6 <= 'null';
|
||||
sql_error select * from stb1 where c7 < 'nuLl';
|
||||
sql_error select * from stb1 where c8 < 'nuLl';
|
||||
sql_error select * from stb1 where c9 > 'nuLl';
|
||||
sql select * from stb1 where c1 = 'NULL';
|
||||
sql select * from stb1 where c2 > 'NULL';
|
||||
sql select * from stb1 where c3 <> 'NULL';
|
||||
sql select * from stb1 where c4 != 'null';
|
||||
sql select * from stb1 where c5 >= 'null';
|
||||
sql select * from stb1 where c6 <= 'null';
|
||||
sql select * from stb1 where c7 < 'nuLl';
|
||||
sql select * from stb1 where c8 < 'nuLl';
|
||||
sql select * from stb1 where c9 > 'nuLl';
|
||||
sql_error select * from (select * from stb1 where c7=true) a, (select * from stb1 where c1 > 30) b;
|
||||
sql_error select a.ts,a.c1,a.c8 from (select * from stb1 where c7=true) a, (select * from stb1 where c1 > 30) b where a.ts=b.ts and a.c1 > 50 or b.c1 < 60;
|
||||
sql_error select a.ts,a.c1,a.c8 from (select * from stb1 where c7=true) a, (select * from stb1 where c1 > 30) b where a.ts=b.ts and ((a.c1 > 50 and a.c1 < 60) or (b.c2 > 60));
|
||||
sql_error select * from stb1 where 'c2' is null;
|
||||
sql_error select * from stb1 where 'c2' is not null;
|
||||
sql select * from stb1 where 'c2' is null;
|
||||
sql select * from stb1 where 'c2' is not null;
|
||||
|
||||
sql select * from stb1 where c2 > 3.0 or c2 < 60;
|
||||
if $rows != 28 then
|
||||
|
@ -173,7 +173,6 @@ if $data32 != 0 then
|
|||
return -1
|
||||
endi
|
||||
|
||||
|
||||
sql select ts,c1,c7 from stb1 where c7 = true
|
||||
if $rows != 14 then
|
||||
return -1
|
||||
|
|
|
@ -8,20 +8,16 @@ sql use db;
|
|||
sql create table t (ts timestamp, i int);
|
||||
sql create table st1 (ts timestamp, f1 int) tags(t1 int);
|
||||
sql create table st2 (ts timestamp, f2 int) tags(t2 int);
|
||||
|
||||
sql create table t1 using st1 tags(1);
|
||||
sql create table t2 using st2 tags(1);
|
||||
|
||||
sql insert into t1 values(1575880055000, 1);
|
||||
sql insert into t1 values(1575880059000, 1);
|
||||
sql insert into t1 values(1575880069000, 1);
|
||||
|
||||
sql insert into t2 values(1575880055000, 2);
|
||||
|
||||
sql select st1.ts, st1.f1, st2.f2 from db.st1, db.st2 where st1.t1=st2.t2 and st1.ts=st2.ts
|
||||
|
||||
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select st1.ts, st1.f1, st2.f2 from db.st1, db.st2 where st1.t1=st2.t2 and st1.ts=st2.ts' 127.0.0.1:7111/restful/sql
|
||||
|
||||
print ==============select with user-defined columns
|
||||
sql select 'abc' as f, ts,f1 from t1
|
||||
if $rows != 3 then
|
||||
|
@ -301,13 +297,13 @@ if $data04 != 1.982700000 then
|
|||
endi
|
||||
|
||||
print ======================udc with interval
|
||||
sql select count(*), 'uuu' from t1 interval(1s) order by ts desc;
|
||||
sql select count(*), 'uuu' from t1 interval(1s);
|
||||
if $rows != 3 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print ======================udc with tags
|
||||
sql select t1,'abc',tbname from st1
|
||||
sql select distinct t1,'abc',tbname from st1
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
|
@ -343,31 +339,26 @@ if $rows != 0 then
|
|||
return -1
|
||||
endi
|
||||
|
||||
|
||||
print ======================udc with normal column group by
|
||||
|
||||
sql_error select from t1
|
||||
sql_error select abc from t1
|
||||
sql_error select abc as tu from t1
|
||||
|
||||
print ========================> td-1756
|
||||
sql_error select * from t1 where ts>now-1y
|
||||
sql_error select * from t1 where ts>now-1n
|
||||
sql select * from t1 where ts>now-1y
|
||||
sql select * from t1 where ts>now-1n
|
||||
|
||||
print ========================> td-1752
|
||||
sql select * from db.st2 where t2 < 200 and t2 is not null;
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data00 != @19-12-09 16:27:35.000@ then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data01 != 2 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data02 != 1 then
|
||||
return -1
|
||||
endi
|
||||
|
@ -376,7 +367,6 @@ sql select * from db.st2 where t2 > 200 or t2 is null;
|
|||
if $rows != 0 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql select * from st2 where t2 < 200 and t2 is null;
|
||||
if $rows != 0 then
|
||||
return -1
|
||||
|
|
|
@ -23,10 +23,10 @@ sql create database $db
|
|||
sql use $db
|
||||
sql show databases
|
||||
|
||||
if $rows != 1 then
|
||||
if $rows != 3 then
|
||||
return -1
|
||||
endi
|
||||
if $data00 != $db then
|
||||
if $data20 != $db then
|
||||
return -1
|
||||
endi
|
||||
sql drop database $db
|
||||
|
@ -38,10 +38,10 @@ sql CREATE DATABASE $db
|
|||
sql use $db
|
||||
sql show databases
|
||||
|
||||
if $rows != 1 then
|
||||
if $rows != 3 then
|
||||
return -1
|
||||
endi
|
||||
if $data00 != $db then
|
||||
if $data20 != $db then
|
||||
return -1
|
||||
endi
|
||||
sql drop database $db
|
||||
|
@ -87,7 +87,7 @@ print create_db.sim case4: db_already_exists
|
|||
sql create database db0
|
||||
sql create database db0
|
||||
sql show databases
|
||||
if $rows != 1 then
|
||||
if $rows != 3 then
|
||||
return -1
|
||||
endi
|
||||
sql drop database db0
|
||||
|
@ -107,29 +107,21 @@ $ctime = 36000 # 10 hours
|
|||
$wal = 1 # valid value is 1, 2
|
||||
$comp = 1 # max=32, automatically trimmed when exceeding
|
||||
|
||||
sql create database $db replica $replica duration $duration keep $keep maxrows $rows_db cache $cache blocks 4 ctime $ctime wal $wal comp $comp
|
||||
sql create database $db replica $replica duration $duration keep $keep maxrows $rows_db wal $wal comp $comp
|
||||
sql show databases
|
||||
if $rows != 1 then
|
||||
if $rows != 3 then
|
||||
return -1
|
||||
endi
|
||||
if $data00 != $db then
|
||||
if $data20 != $db then
|
||||
return -1
|
||||
endi
|
||||
if $data04 != $replica then
|
||||
if $data24 != $replica then
|
||||
return -1
|
||||
endi
|
||||
if $data06 != $duration then
|
||||
if $data26 != 14400m then
|
||||
return -1
|
||||
endi
|
||||
if $data07 != 365,365,365 then
|
||||
return -1
|
||||
endi
|
||||
print data08 = $data07
|
||||
if $data08 != $cache then
|
||||
print expect $cache, actual:$data08
|
||||
return -1
|
||||
endi
|
||||
if $data09 != 4 then
|
||||
if $data27 != 525600m,525600m,525600m then
|
||||
return -1
|
||||
endi
|
||||
|
||||
|
@ -160,56 +152,56 @@ sql_error create database $db keep 12,11
|
|||
sql_error create database $db keep 365001,365001,365001
|
||||
sql create database dbk0 keep 19
|
||||
sql show databases
|
||||
if $rows != 1 then
|
||||
if $rows != 3 then
|
||||
return -1
|
||||
endi
|
||||
if $data07 != 19,19,19 then
|
||||
if $data27 != 27360m,27360m,27360m then
|
||||
return -1
|
||||
endi
|
||||
sql drop database dbk0
|
||||
sql create database dbka keep 19,20
|
||||
sql show databases
|
||||
if $rows != 1 then
|
||||
if $rows != 3 then
|
||||
return -1
|
||||
endi
|
||||
if $data07 != 19,20,20 then
|
||||
if $data27 != 27360m,28800m,28800m then
|
||||
return -1
|
||||
endi
|
||||
sql drop database dbka
|
||||
|
||||
sql create database dbk1 keep 11,11,11
|
||||
sql show databases
|
||||
if $rows != 1 then
|
||||
if $rows != 3 then
|
||||
return -1
|
||||
endi
|
||||
if $data07 != 11,11,11 then
|
||||
if $data27 != 15840m,15840m,15840m then
|
||||
return -1
|
||||
endi
|
||||
sql drop database dbk1
|
||||
sql create database dbk2 keep 11,12,13
|
||||
sql show databases
|
||||
if $rows != 1 then
|
||||
if $rows != 3 then
|
||||
return -1
|
||||
endi
|
||||
if $data07 != 11,12,13 then
|
||||
if $data27 != 15840m,17280m,18720m then
|
||||
return -1
|
||||
endi
|
||||
sql drop database dbk2
|
||||
sql create database dbk3 keep 11,11,13
|
||||
sql show databases
|
||||
if $rows != 1 then
|
||||
if $rows != 3 then
|
||||
return -1
|
||||
endi
|
||||
if $data07 != 11,11,13 then
|
||||
if $data27 != 15840m,15840m,18720m then
|
||||
return -1
|
||||
endi
|
||||
sql drop database dbk3
|
||||
sql create database dbk4 keep 11,13,13
|
||||
sql show databases
|
||||
if $rows != 1 then
|
||||
if $rows != 3 then
|
||||
return -1
|
||||
endi
|
||||
if $data07 != 11,13,13 then
|
||||
if $data27 != 15840m,18720m,18720m then
|
||||
return -1
|
||||
endi
|
||||
sql drop database dbk4
|
||||
|
@ -233,38 +225,31 @@ sql_error create database $db ctime 29
|
|||
sql_error create database $db ctime 40961
|
||||
|
||||
# wal {0, 2}
|
||||
sql create database testwal wal 0
|
||||
sql_error create database testwal wal 0
|
||||
sql show databases
|
||||
if $rows != 1 then
|
||||
if $rows != 2 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql show databases
|
||||
print wallevel $data12_testwal
|
||||
if $data12_testwal != 0 then
|
||||
return -1
|
||||
endi
|
||||
sql drop database testwal
|
||||
|
||||
sql create database testwal wal 1
|
||||
sql show databases
|
||||
if $rows != 1 then
|
||||
if $rows != 3 then
|
||||
return -1
|
||||
endi
|
||||
sql show databases
|
||||
print wallevel $data12_testwal
|
||||
if $data12_testwal != 1 then
|
||||
print wallevel $data13_testwal
|
||||
if $data13_testwal != 1 then
|
||||
return -1
|
||||
endi
|
||||
sql drop database testwal
|
||||
|
||||
sql create database testwal wal 2
|
||||
sql show databases
|
||||
if $rows != 1 then
|
||||
if $rows != 3 then
|
||||
return -1
|
||||
endi
|
||||
print wallevel $data12_testwal
|
||||
if $data12_testwal != 2 then
|
||||
print wallevel $data13_testwal
|
||||
if $data13_testwal != 2 then
|
||||
return -1
|
||||
endi
|
||||
sql drop database testwal
|
||||
|
@ -278,7 +263,7 @@ sql_error create database $db comp 3
|
|||
|
||||
sql_error drop database $db
|
||||
sql show databases
|
||||
if $rows != 0 then
|
||||
if $rows != 2 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
|
|
|
@ -1,234 +0,0 @@
|
|||
system sh/stop_dnodes.sh
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
sql connect
|
||||
|
||||
print ======================== dnode1 start
|
||||
|
||||
$dbPrefix = fi_in_db
|
||||
$tbPrefix = fi_in_tb
|
||||
$mtPrefix = fi_in_mt
|
||||
$tbNum = 10
|
||||
$rowNum = 20
|
||||
$totalNum = 200
|
||||
|
||||
print excuting test script create_db.sim
|
||||
print =============== set up
|
||||
$i = 0
|
||||
$db = $dbPrefix . $i
|
||||
$mt = $mtPrefix . $i
|
||||
|
||||
sql_error createdatabase $db
|
||||
sql create database $db
|
||||
sql use $db
|
||||
sql show databases
|
||||
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data00 != $db then
|
||||
return -1
|
||||
endi
|
||||
sql drop database $db
|
||||
|
||||
# case1: case_insensitivity test
|
||||
print =========== create_db.sim case1: case insensitivity test
|
||||
sql_error CREATEDATABASE $db
|
||||
sql CREATE DATABASE $db
|
||||
sql use $db
|
||||
sql show databases
|
||||
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data00 != $db then
|
||||
return -1
|
||||
endi
|
||||
sql drop database $db
|
||||
print case_insensitivity test passed
|
||||
|
||||
# case2: illegal_db_name test
|
||||
print =========== create_db.sim case2: illegal_db_name test
|
||||
$illegal_db1 = 1db
|
||||
$illegal_db2 = d@b
|
||||
|
||||
sql_error create database $illegal_db1
|
||||
sql_error create database $illegal_db2
|
||||
print illegal_db_name test passed
|
||||
|
||||
# case3: chinese_char_in_db_name test
|
||||
print ========== create_db.sim case3: chinese_char_in_db_name test
|
||||
$CN_db1 = 数据库
|
||||
$CN_db2 = 数据库1
|
||||
$CN_db3 = db数据库1
|
||||
sql_error create database $CN_db1
|
||||
sql_error create database $CN_db2
|
||||
sql_error create database $CN_db3
|
||||
#sql show databases
|
||||
#if $rows != 3 then
|
||||
# return -1
|
||||
#endi
|
||||
#if $data00 != $CN_db1 then
|
||||
# return -1
|
||||
#endi
|
||||
#if $data10 != $CN_db2 then
|
||||
# return -1
|
||||
#endi
|
||||
#if $data20 != $CN_db3 then
|
||||
# return -1
|
||||
#endi
|
||||
#sql drop database $CN_db1
|
||||
#sql drop database $CN_db2
|
||||
#sql drop database $CN_db3
|
||||
print case_chinese_char_in_db_name test passed
|
||||
|
||||
# case4: db_already_exists
|
||||
print create_db.sim case4: db_already_exists
|
||||
sql create database db0
|
||||
sql create database db0
|
||||
sql show databases
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
sql drop database db0
|
||||
print db_already_exists test passed
|
||||
|
||||
# case5: db_meta_data
|
||||
print create_db.sim case5: db_meta_data test
|
||||
# cfg params
|
||||
$replica = 1 # max=3
|
||||
$duration = 10
|
||||
$keep = 365
|
||||
$rows_db = 1000
|
||||
$cache = 16 # 16MB
|
||||
$ablocks = 100
|
||||
$tblocks = 32 # max=512, automatically trimmed when exceeding
|
||||
$ctime = 36000 # 10 hours
|
||||
$wal = 1 # valid value is 1, 2
|
||||
$comp = 1 # max=32, automatically trimmed when exceeding
|
||||
|
||||
sql create database $db replica $replica duration $duration keep $keep maxrows $rows_db cache $cache blocks 4 ctime $ctime wal $wal comp $comp
|
||||
sql show databases
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data00 != $db then
|
||||
return -1
|
||||
endi
|
||||
if $data04 != $replica then
|
||||
return -1
|
||||
endi
|
||||
if $data06 != $duration then
|
||||
return -1
|
||||
endi
|
||||
if $data07 != 365 then
|
||||
return -1
|
||||
endi
|
||||
print data08 = $data07
|
||||
if $data08 != $cache then
|
||||
print expect $cache, actual:$data08
|
||||
return -1
|
||||
endi
|
||||
if $data09 != 4 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql drop database $db
|
||||
|
||||
## param range tests
|
||||
# replica [1,3]
|
||||
#sql_error create database $db replica 0
|
||||
sql_error create database $db replica 4
|
||||
|
||||
# day [1, 3650]
|
||||
sql_error create database $db day 0
|
||||
sql_error create database $db day 3651
|
||||
|
||||
# keep [1, infinity]
|
||||
sql_error create database $db keep 0
|
||||
sql_error create database $db keep 0,0,0
|
||||
sql_error create database $db keep 3,3,3
|
||||
sql_error create database $db keep 3
|
||||
sql_error create database $db keep 11.0
|
||||
sql_error create database $db keep 11.0,11.0,11.0
|
||||
sql_error create database $db keep "11","11","11"
|
||||
sql_error create database $db keep "11"
|
||||
sql_error create database $db keep 13,12,11
|
||||
sql_error create database $db keep 11,12,11
|
||||
sql_error create database $db keep 12,11,12
|
||||
sql_error create database $db keep 11,12,13
|
||||
sql_error create database $db keep 11,12,13,14
|
||||
sql_error create database $db keep 11,11
|
||||
sql_error create database $db keep 365001,365001,365001
|
||||
sql_error create database $db keep 365001
|
||||
sql create database dbk1 keep 11
|
||||
sql show databases
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data07 != 11 then
|
||||
return -1
|
||||
endi
|
||||
sql drop database dbk1
|
||||
sql create database dbk2 keep 12
|
||||
sql show databases
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data07 != 12 then
|
||||
return -1
|
||||
endi
|
||||
sql drop database dbk2
|
||||
sql create database dbk3 keep 11
|
||||
sql show databases
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data07 != 11 then
|
||||
return -1
|
||||
endi
|
||||
sql drop database dbk3
|
||||
sql create database dbk4 keep 13
|
||||
sql show databases
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data07 != 13 then
|
||||
return -1
|
||||
endi
|
||||
sql drop database dbk4
|
||||
#sql_error create database $db keep 3651
|
||||
|
||||
# rows [200, 10000]
|
||||
sql_error create database $db maxrows 199
|
||||
#sql_error create database $db maxrows 10001
|
||||
|
||||
# cache [100, 10485760]
|
||||
sql_error create database $db cache 0
|
||||
#sql_error create database $db cache 10485761
|
||||
|
||||
|
||||
# blocks [32, 4096 overwriten by 4096 if exceeds, Note added:2018-10-24]
|
||||
#sql_error create database $db tblocks 31
|
||||
#sql_error create database $db tblocks 4097
|
||||
|
||||
# ctime [30, 40960]
|
||||
sql_error create database $db ctime 29
|
||||
sql_error create database $db ctime 40961
|
||||
|
||||
# wal {0, 2}
|
||||
#sql_error create database $db wal 0
|
||||
sql_error create database $db wal -1
|
||||
sql_error create database $db wal 3
|
||||
|
||||
# comp {0, 1, 2}
|
||||
sql_error create database $db comp -1
|
||||
sql_error create database $db comp 3
|
||||
|
||||
sql_error drop database $db
|
||||
sql show databases
|
||||
if $rows != 0 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -69,7 +69,8 @@ sql_error create table $mt (ts $i_ts , col int) tags (tag1 int)
|
|||
sql_error create table $mt (ts timestamp, col $i_binary ) tags (tag1 int)
|
||||
sql_error create table $mt (ts timestamp, col $i_bigint ) tags (tag1 int)
|
||||
sql_error create table $mt (ts timestamp, col $i_smallint ) tags (tag1 int)
|
||||
sql_error create table $mt (ts timestamp, col $i_binary2 ) tags (tag1 int)
|
||||
sql create table $mt (ts timestamp, col $i_binary2 ) tags (tag1 int)
|
||||
sql drop table $mt
|
||||
sql_error create table $mt (ts timestamp, col $i_tinyint ) tags (tag1 int)
|
||||
sql_error create table $mt (ts timestamp, col $i_nchar ) tags (tag1 int)
|
||||
|
||||
|
@ -101,7 +102,8 @@ sql_error create table $mt (ts timestamp, col int) tags (tag1 $i_binary )
|
|||
sql_error create table $mt (ts timestamp, col int) tags (tag1 $i_bigint )
|
||||
sql_error create table $mt (ts timestamp, col int) tags (tag1 $i_smallint )
|
||||
sql_error create table $mt (ts timestamp, col int) tags (tag1 $i_tinyint )
|
||||
sql_error create table $mt (ts timestamp, col int) tags (tag1 $i_binary2 )
|
||||
sql create table $mt (ts timestamp, col int) tags (tag1 $i_binary2 )
|
||||
sql drop table $mt
|
||||
sql_error create table $mt (ts timestamp, col int) tags (tag1 $i_bool )
|
||||
sql_error create table $mt (ts timestamp, col int) tags (tag1 $nchar )
|
||||
# correct use of nchar in tags
|
||||
|
@ -144,7 +146,8 @@ sql_error create table $mt (ts timestamp, col1 int) tags ( $ses int)
|
|||
sql_error create table $mt (ts timestamp, col1 int) tags ( $int int)
|
||||
sql_error create table $mt (ts timestamp, col1 int) tags ( $bint int)
|
||||
sql_error create table $mt (ts timestamp, col1 int) tags ( $binary int)
|
||||
sql_error create table $mt (ts timestamp, col1 int) tags ( $str int)
|
||||
sql create table $mt (ts timestamp, col1 int) tags ( $str int)
|
||||
sql drop table $mt
|
||||
sql_error create table $mt (ts timestamp, col1 int) tags ( $tag int)
|
||||
sql_error create table $mt (ts timestamp, col1 int) tags ( $tags int)
|
||||
sql_error create table $mt (ts timestamp, col1 int) tags ( $sint int)
|
||||
|
@ -162,8 +165,8 @@ sql create table $tb using $mt tags (-1)
|
|||
# -x ng_tag_v
|
||||
# return -1
|
||||
#ng_tag_v:
|
||||
sql select tg from $tb
|
||||
if $data00 != -1 then
|
||||
sql show tags from $tb
|
||||
if $data05 != -1 then
|
||||
return -1
|
||||
endi
|
||||
sql drop table $tb
|
||||
|
@ -172,28 +175,21 @@ sql drop table $tb
|
|||
print create_mt.sim unmatched_tag_types
|
||||
sql reset query cache
|
||||
sql create table $tb using $mt tags ('123')
|
||||
sql select tg from $tb
|
||||
print data00 = $data00
|
||||
if $data00 != 123 then
|
||||
sql show tags from $tb
|
||||
print data05 = $data05
|
||||
if $data05 != 123 then
|
||||
return -1
|
||||
endi
|
||||
sql drop table $tb
|
||||
|
||||
sql_error create table $tb using $mt tags (abc)
|
||||
#the case below might need more consideration
|
||||
sql_error create table $tb using $mt tags ('abc')
|
||||
sql drop table if exists $tb
|
||||
sql reset query cache
|
||||
sql create table $tb using $mt tags (1e1)
|
||||
sql select tg from $tb
|
||||
if $data00 != 10 then
|
||||
return -1
|
||||
endi
|
||||
sql drop table $tb
|
||||
sql create table $tb using $mt tags ('1e1')
|
||||
sql select tg from $tb
|
||||
if $data00 != 10 then
|
||||
return -1
|
||||
endi
|
||||
sql_error create table $tb using $mt tags (1e1)
|
||||
|
||||
sql_error create table $tb using $mt tags ('1e1')
|
||||
sql_error create table $tb using $mt tags (2147483649)
|
||||
|
||||
## case: chinese_char_in_metric
|
||||
|
@ -245,7 +241,7 @@ print chinese_char_in_metrics test passed
|
|||
|
||||
sql drop database $db
|
||||
sql show databases
|
||||
if $rows != 0 then
|
||||
if $rows != 2 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
|
|
|
@ -66,7 +66,8 @@ sql_error create table $tb (ts timestamp, col $i_binary )
|
|||
sql_error create table $tb (ts timestamp, col $i_bigint )
|
||||
sql_error create table $tb (ts timestamp, col $i_smallint )
|
||||
sql_error create table $tb (ts timestamp, col $i_tinyint )
|
||||
sql_error create table $tb (ts timestamp, col $i_binary2 )
|
||||
sql create table $tb (ts timestamp, col $i_binary2 )
|
||||
sql drop table $tb
|
||||
sql_error create table $tb (ts timestamp, col $nchar )
|
||||
sql create table $tb (ts timestamp, col nchar(20))
|
||||
sql show tables
|
||||
|
@ -105,7 +106,8 @@ sql_error create table $tb (ts timestamp, $ses int)
|
|||
sql_error create table $tb (ts timestamp, $int int)
|
||||
sql_error create table $tb (ts timestamp, $bint int)
|
||||
sql_error create table $tb (ts timestamp, $binary int)
|
||||
sql_error create table $tb (ts timestamp, $str int)
|
||||
sql create table $tb (ts timestamp, $str int)
|
||||
sql drop table $tb
|
||||
sql_error create table $tb (ts timestamp, $tag int)
|
||||
sql_error create table $tb (ts timestamp, $tags int)
|
||||
sql_error create table $tb (ts timestamp, $sint int)
|
||||
|
@ -157,7 +159,7 @@ print chinese_char_in_table_support test passed
|
|||
print ========== create_tb.sim case6: table_already_exists
|
||||
sql create table tbs (ts timestamp, col int)
|
||||
sql insert into tbs values (now, 1)
|
||||
sql create table tbs (ts timestamp, col bool)
|
||||
sql_error create table tbs (ts timestamp, col bool)
|
||||
#sql_error create table tb (ts timestamp, col bool)
|
||||
print table_already_exists test passed
|
||||
|
||||
|
@ -179,7 +181,7 @@ print table_already_exists test passed
|
|||
|
||||
sql drop database $db
|
||||
sql show databases
|
||||
if $rows != 0 then
|
||||
if $rows != 2 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
|
|
|
@ -4,23 +4,17 @@ system sh/exec.sh -n dnode1 -s start
|
|||
sql connect
|
||||
|
||||
print ======================== dnode1 start
|
||||
|
||||
$db = testdb
|
||||
|
||||
sql create database $db
|
||||
sql use $db
|
||||
|
||||
sql create stable st2 (ts timestamp, f1 int) tags (id int, t1 int, t2 nchar(4), t3 double)
|
||||
|
||||
|
||||
sql insert into tb1 using st2 (id, t1) tags(1,2) values (now, 1)
|
||||
|
||||
sql select id,t1,t2,t3 from tb1
|
||||
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data00 != 1 then
|
||||
return -1
|
||||
endi
|
||||
|
@ -35,124 +29,101 @@ if $data03 != NULL then
|
|||
endi
|
||||
|
||||
sql create table tb2 using st2 (t2,t3) tags ("12",22.0)
|
||||
|
||||
sql select id,t1,t2,t3 from tb2;
|
||||
|
||||
if $rows != 1 then
|
||||
sql show tags from tb2
|
||||
if $rows != 4 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data00 != NULL then
|
||||
if $data05 != NULL then
|
||||
return -1
|
||||
endi
|
||||
if $data01 != NULL then
|
||||
if $data15 != NULL then
|
||||
return -1
|
||||
endi
|
||||
if $data02 != 12 then
|
||||
if $data25 != 12 then
|
||||
return -1
|
||||
endi
|
||||
if $data03 != 22.000000000 then
|
||||
if $data35 != 22.000000000 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
|
||||
sql create table tb3 using st2 tags (1,2,"3",33.0);
|
||||
|
||||
sql select id,t1,t2,t3 from tb3;
|
||||
|
||||
|
||||
if $rows != 1 then
|
||||
sql show tags from tb3;
|
||||
if $rows != 4 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data00 != 1 then
|
||||
if $data05 != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data01 != 2 then
|
||||
if $data15 != 2 then
|
||||
return -1
|
||||
endi
|
||||
if $data02 != 3 then
|
||||
if $data25 != 3 then
|
||||
return -1
|
||||
endi
|
||||
if $data03 != 33.000000000 then
|
||||
if $data35 != 33.000000000 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql insert into tb4 using st2 tags(1,2,"33",44.0) values (now, 1);
|
||||
|
||||
sql select id,t1,t2,t3 from tb4;
|
||||
|
||||
if $rows != 1 then
|
||||
sql show tags from tb4;
|
||||
if $rows != 4 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data00 != 1 then
|
||||
if $data05 != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data01 != 2 then
|
||||
if $data15 != 2 then
|
||||
return -1
|
||||
endi
|
||||
if $data02 != 33 then
|
||||
if $data25 != 33 then
|
||||
return -1
|
||||
endi
|
||||
if $data03 != 44.000000000 then
|
||||
if $data35 != 44.000000000 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql_error create table tb5 using st2() tags (3,3,"3",33.0);
|
||||
|
||||
sql_error create table tb6 using st2 (id,t1) tags (3,3,"3",33.0);
|
||||
|
||||
sql_error create table tb7 using st2 (id,t1) tags (3);
|
||||
|
||||
sql_error create table tb8 using st2 (ide) tags (3);
|
||||
|
||||
sql_error create table tb9 using st2 (id);
|
||||
|
||||
sql_error create table tb10 using st2 (id t1) tags (1,1);
|
||||
|
||||
sql_error create table tb10 using st2 (id,,t1) tags (1,1,1);
|
||||
|
||||
sql_error create table tb11 using st2 (id,t1,) tags (1,1,1);
|
||||
|
||||
sql create table tb12 using st2 (t1,id) tags (2,1);
|
||||
|
||||
sql select id,t1,t2,t3 from tb12;
|
||||
if $rows != 1 then
|
||||
sql show tags from tb12;
|
||||
if $rows != 5 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data00 != 1 then
|
||||
if $data05 != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data01 != 2 then
|
||||
if $data15 != 2 then
|
||||
return -1
|
||||
endi
|
||||
if $data02 != NULL then
|
||||
if $data25 != NULL then
|
||||
return -1
|
||||
endi
|
||||
if $data03 != NULL then
|
||||
if $data35 != NULL then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql create table tb13 using st2 ("t1",'id') tags (2,1);
|
||||
|
||||
sql select id,t1,t2,t3 from tb13;
|
||||
|
||||
if $rows != 1 then
|
||||
sql show tags from tb13;
|
||||
if $rows != 2 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data00 != 1 then
|
||||
if $data05 != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data01 != 2 then
|
||||
if $data15 != 2 then
|
||||
return -1
|
||||
endi
|
||||
if $data02 != NULL then
|
||||
if $data25 != NULL then
|
||||
return -1
|
||||
endi
|
||||
if $data03 != NULL then
|
||||
if $data35 != NULL then
|
||||
return -1
|
||||
endi
|
||||
|
||||
|
|
|
@ -5,77 +5,72 @@ sql connect
|
|||
|
||||
print ========== db name and table name check in create and drop, describe
|
||||
sql create database abc keep 36500
|
||||
sql create database 'abc123'
|
||||
sql create database '_ab1234'
|
||||
sql create database 'ABC123'
|
||||
sql create database '_ABC123'
|
||||
sql_error create database 'abc123'
|
||||
sql_error create database '_ab1234'
|
||||
sql_error create database 'ABC123'
|
||||
sql_error create database '_ABC123'
|
||||
sql_error create database 'aABb123 '
|
||||
sql_error create database ' xyz '
|
||||
sql_error create database ' XYZ '
|
||||
|
||||
sql use 'abc123'
|
||||
sql use '_ab1234'
|
||||
sql use 'ABC123'
|
||||
sql use '_ABC123'
|
||||
sql_error use 'abc123'
|
||||
sql_error use '_ab1234'
|
||||
sql_error use 'ABC123'
|
||||
sql_error use '_ABC123'
|
||||
sql_error use 'aABb123'
|
||||
sql_error use ' xyz '
|
||||
sql_error use ' XYZ '
|
||||
|
||||
sql drop database 'abc123'
|
||||
sql drop database '_ab1234'
|
||||
sql_error drop database 'ABC123'
|
||||
sql drop database '_ABC123'
|
||||
sql_error drop database 'aABb123'
|
||||
sql_error drop database ' xyz '
|
||||
sql_error drop database ' XYZ '
|
||||
|
||||
sql_error drop database if exists 'abc123'
|
||||
sql_error drop database if exists '_ab1234'
|
||||
sql_error drop database if exists 'ABC123'
|
||||
sql_error drop database if exists '_ABC123'
|
||||
sql_error drop database if exists 'aABb123'
|
||||
sql_error drop database if exists ' xyz '
|
||||
sql_error drop database if exists ' XYZ '
|
||||
|
||||
sql use abc
|
||||
|
||||
sql create table abc.cc (ts timestamp, c int)
|
||||
sql create table 'abc.Dd' (ts timestamp, c int)
|
||||
sql create table 'abc'.ee (ts timestamp, c int)
|
||||
sql create table 'abc'.'FF' (ts timestamp, c int)
|
||||
sql create table abc.'gG' (ts timestamp, c int)
|
||||
|
||||
sql_error create table 'abc.Dd' (ts timestamp, c int)
|
||||
sql_error create table 'abc'.ee (ts timestamp, c int)
|
||||
sql_error create table 'abc'.'FF' (ts timestamp, c int)
|
||||
sql_error create table abc.'gG' (ts timestamp, c int)
|
||||
sql_error create table table.'a1' (ts timestamp, c int)
|
||||
sql_error create table 'table'.'b1' (ts timestamp, c int)
|
||||
sql_error create table 'table'.'b1' (ts timestamp, c int)
|
||||
|
||||
|
||||
sql create table mt (ts timestamp, c1 int, c2 bigint, c3 float, c4 double, c5 smallint, c6 tinyint, c7 bool, c8 binary(10), c9 nchar(10)) tags(t1 int, t2 nchar(20), t3 binary(20), t4 bigint, t5 smallint, t6 double)
|
||||
sql create table sub_001 using mt tags ( 1 , 'tag_nchar' , 'tag_bianry' , 4 , 5 , 6.1 )
|
||||
sql_error create table sub_002 using mt tags( 2 , tag_nchar , tag_bianry , 4 , 5 , 6.2 )
|
||||
sql insert into sub_dy_tbl using mt tags ( 3 , 'tag_nchar' , 'tag_bianry' , 4 , 5 , 6.3 ) values (now, 1, 2, 3.01, 4.02, 5, 6, true, 'binary_8', 'nchar_9')
|
||||
|
||||
sql describe abc.cc
|
||||
sql describe 'abc.Dd'
|
||||
sql describe 'abc'.ee
|
||||
sql describe 'abc'.'FF'
|
||||
sql describe abc.'gG'
|
||||
sql_error describe 'abc.Dd'
|
||||
sql_error describe 'abc'.ee
|
||||
sql_error describe 'abc'.'FF'
|
||||
sql_error describe abc.'gG'
|
||||
|
||||
sql describe cc
|
||||
sql describe 'Dd'
|
||||
sql describe ee
|
||||
sql describe 'FF'
|
||||
sql describe 'gG'
|
||||
sql_error describe 'Dd'
|
||||
sql_error describe ee
|
||||
sql_error describe 'FF'
|
||||
sql_error describe 'gG'
|
||||
|
||||
sql describe mt
|
||||
sql describe sub_001
|
||||
sql describe sub_dy_tbl
|
||||
|
||||
sql describe Dd
|
||||
sql describe FF
|
||||
sql describe gG
|
||||
sql_error describe Dd
|
||||
sql_error describe FF
|
||||
sql_error describe gG
|
||||
|
||||
sql drop table abc.cc
|
||||
sql drop table 'abc.Dd'
|
||||
sql drop table 'abc'.ee
|
||||
sql drop table 'abc'.'FF'
|
||||
sql drop table abc.'gG'
|
||||
sql_error drop table 'abc.Dd'
|
||||
sql_error drop table 'abc'.ee
|
||||
sql_error drop table 'abc'.'FF'
|
||||
sql_error drop table abc.'gG'
|
||||
|
||||
sql drop table sub_001
|
||||
|
||||
sql drop table sub_dy_tbl
|
||||
sql drop table mt
|
||||
|
||||
|
|
|
@ -73,11 +73,10 @@ if $rows != 6 then
|
|||
return -1
|
||||
endi
|
||||
|
||||
|
||||
### select distinct
|
||||
sql drop database $db
|
||||
sql show databases
|
||||
if $rows != 0 then
|
||||
if $rows != 2 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -22,15 +22,10 @@ sql use $db
|
|||
sql create table tb (ts timestamp, c1 int, c2 timestamp)
|
||||
sql insert into tb values ('2019-05-05 11:30:00.000', 1, now)
|
||||
sql insert into tb values ('2019-05-05 12:00:00.000', 1, now)
|
||||
sleep 500
|
||||
sql import into tb values ('2019-05-05 11:00:00.000', -1, now)
|
||||
sleep 500
|
||||
sql import into tb values ('2019-05-05 11:59:00.000', -1, now)
|
||||
sleep 500
|
||||
sql import into tb values ('2019-05-04 08:00:00.000', -1, now)
|
||||
sleep 500
|
||||
sql import into tb values ('2019-05-04 07:59:00.000', -1, now)
|
||||
sleep 500
|
||||
|
||||
sql select * from tb
|
||||
if $rows != 6 then
|
||||
|
@ -57,11 +52,9 @@ endi
|
|||
|
||||
print ================== restart server to commit data into disk
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
sleep 500
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
print ================== server restart completed
|
||||
sql connect
|
||||
sleep 100
|
||||
|
||||
sql use $db
|
||||
sql select * from tb
|
||||
|
|
|
@ -19,7 +19,7 @@ $stb = $stbPrefix . $i
|
|||
|
||||
sql drop database $db -x step1
|
||||
step1:
|
||||
sql create database $db cache 16
|
||||
sql create database $db
|
||||
print ====== create tables
|
||||
sql use $db
|
||||
|
||||
|
@ -36,8 +36,6 @@ while $x < $rowNum
|
|||
endw
|
||||
print ====== tables created
|
||||
|
||||
sleep 500
|
||||
|
||||
$ts = $ts0 + $delta
|
||||
$ts = $ts + 1
|
||||
sql import into $tb values ( $ts , -1)
|
||||
|
|
|
@ -18,7 +18,7 @@ $stb = $stbPrefix . $i
|
|||
|
||||
sql drop database $db -x step1
|
||||
step1:
|
||||
sql create database $db cache 16
|
||||
sql create database $db
|
||||
print ====== create tables
|
||||
sql use $db
|
||||
|
||||
|
@ -35,8 +35,6 @@ while $x < $rowNum
|
|||
endw
|
||||
print ====== tables created
|
||||
|
||||
sleep 500
|
||||
|
||||
$ts = $ts0 + $delta
|
||||
$ts = $ts + 1
|
||||
sql import into $tb values ( $ts , -1)
|
||||
|
|
|
@ -18,7 +18,7 @@ $stb = $stbPrefix . $i
|
|||
|
||||
sql drop database $db -x step1
|
||||
step1:
|
||||
sql create database $db cache 16
|
||||
sql create database $db
|
||||
print ====== create tables
|
||||
sql use $db
|
||||
sql reset query cache
|
||||
|
@ -35,16 +35,12 @@ while $x < $rowNum
|
|||
endw
|
||||
print ====== tables created
|
||||
|
||||
sleep 500
|
||||
|
||||
$ts = $ts + 1
|
||||
sql insert into $tb values ( $ts , -1, -1, -1, -1, -1)
|
||||
$ts = $ts0 + $delta
|
||||
$ts = $ts + 1
|
||||
sql import into $tb values ( $ts , -2, -2, -2, -2, -2)
|
||||
|
||||
sleep 500
|
||||
|
||||
sql show databases
|
||||
|
||||
sql select count(*) from $tb
|
||||
|
|
|
@ -14,9 +14,9 @@ system tsim/parser/gendata.sh
|
|||
sql create table stbx (ts TIMESTAMP, collect_area NCHAR(12), device_id BINARY(16), imsi BINARY(16), imei BINARY(16), mdn BINARY(10), net_type BINARY(4), mno NCHAR(4), province NCHAR(10), city NCHAR(16), alarm BINARY(2)) tags(a int, b binary(12));
|
||||
|
||||
sql create table tbx (ts TIMESTAMP, collect_area NCHAR(12), device_id BINARY(16), imsi BINARY(16), imei BINARY(16), mdn BINARY(10), net_type BINARY(4), mno NCHAR(4), province NCHAR(10), city NCHAR(16), alarm BINARY(2))
|
||||
print ====== create tables success, starting import data
|
||||
print ====== create tables success, starting insert data
|
||||
|
||||
sql import into tbx file '~/data.sql'
|
||||
sql insert into tbx file '~/data.sql'
|
||||
sql import into tbx file '~/data.sql'
|
||||
|
||||
sql select count(*) from tbx
|
||||
|
|
|
@ -11,9 +11,9 @@ sql create table mul_st (ts timestamp, col1 int) tags (tag1 int)
|
|||
|
||||
# case: insert multiple recordes for multiple table in a query
|
||||
print =========== insert_multiTbl.sim case: insert multiple records for multiple table in a query
|
||||
$ts = 1500000000000
|
||||
$ts = 1600000000000
|
||||
sql insert into mul_t0 using mul_st tags(0) values ( $ts , 0) ( $ts + 1s, 1) ( $ts + 2s, 2) mul_t1 using mul_st tags(1) values ( $ts , 10) ( $ts + 1s, 11) ( $ts + 2s, 12) mul_t2 using mul_st tags(2) values ( $ts , 20) ( $ts + 1s, 21) ( $ts + 2s, 22) mul_t3 using mul_st tags(3) values ( $ts , 30) ( $ts + 1s, 31) ( $ts + 2s, 32)
|
||||
sql select * from mul_st
|
||||
sql select * from mul_st order by ts, col1 ;
|
||||
print rows = $rows
|
||||
if $rows != 12 then
|
||||
return -1
|
||||
|
@ -40,10 +40,10 @@ endi
|
|||
# insert values for specified columns
|
||||
sql create table mul_st1 (ts timestamp, col1 int, col2 float, col3 binary(10)) tags (tag1 int, tag2 int, tag3 binary(8))
|
||||
print =========== insert values for specified columns for multiple table in a query
|
||||
$ts = 1500000000000
|
||||
$ts = 1600000000000
|
||||
sql insert into mul_t10 (ts, col1, col3) using mul_st1 (tag1, tag3) tags(0, 'tag3-0') values ( $ts , 00, 'binary00') ( $ts + 1s, 01, 'binary01') ( $ts + 2s, 02, 'binary02') mul_t11 (ts, col1, col3) using mul_st1 (tag1, tag3) tags(1, 'tag3-0') values ( $ts , 10, 'binary10') ( $ts + 1s, 11, 'binary11') ( $ts + 2s, 12, 'binary12') mul_t12 (ts, col1, col3) using mul_st1 (tag1, tag3) tags(2, 'tag3-0') values ( $ts , 20, 'binary20') ( $ts + 1s, 21, 'binary21') ( $ts + 2s, 22, 'binary22') mul_t13 (ts, col1, col3) using mul_st1 (tag1, tag3) tags(3, 'tag3-0') values ( $ts , 30, 'binary30') ( $ts + 1s, 31, 'binary31') ( $ts + 2s, 32, 'binary32')
|
||||
|
||||
sql select * from mul_st1
|
||||
sql select * from mul_st1 order by ts, col1 ;
|
||||
print rows = $rows
|
||||
if $rows != 12 then
|
||||
return -1
|
||||
|
@ -58,7 +58,7 @@ endi
|
|||
if $data92 != NULL then
|
||||
return -1
|
||||
endi
|
||||
if $data93 != @binary30@ then
|
||||
if $data93 != @binary12@ then
|
||||
return -1
|
||||
endi
|
||||
|
||||
|
|
|
@ -53,7 +53,7 @@ endi
|
|||
$col1 = 2
|
||||
$col3 = 3
|
||||
$col5 = 5
|
||||
sql create table $tb using $mt tags( $tag1 )
|
||||
sql create table if not exists $tb using $mt tags( $tag1 )
|
||||
sql insert into $tb ( ts, col1, col3, col5) values ( $ts + 2000a, $col1 , $col3 , $col5 )
|
||||
sql select * from $tb order by ts desc
|
||||
if $rows != 3 then
|
||||
|
@ -99,7 +99,6 @@ if $rows != 1 then
|
|||
endi
|
||||
|
||||
sql drop database $db
|
||||
sleep 100
|
||||
sql create database $db
|
||||
sql use $db
|
||||
sql create table stb1 (ts timestamp, c1 int) tags(t1 int)
|
||||
|
@ -132,7 +131,6 @@ if $data21 != 1.000000000 then
|
|||
endi
|
||||
|
||||
sql drop database $db
|
||||
sleep 100
|
||||
sql create database $db
|
||||
sql use $db
|
||||
sql create table stb (ts timestamp, c1 int, c2 bigint, c3 float, c4 double, c5 nchar(10), c6 binary(20)) tags(t1 int, t2 bigint, t3 double, t4 float, t5 nchar(10))
|
||||
|
@ -146,7 +144,7 @@ sql insert into tb1 values ('2018-09-17 09:00:00.000', '1', 1, 1, 1, '涛思ncha
|
|||
sql insert into tb2 values ('2018-09-17 09:00:00.000', 1, '1', 1, 1, '涛思nchar', 'quoted bigint')
|
||||
sql insert into tb3 values ('2018-09-17 09:00:00.000', 1, 1, '1', 1, '涛思nchar', 'quoted float')
|
||||
sql insert into tb4 values ('2018-09-17 09:00:00.000', 1, 1, 1, '1', '涛思nchar', 'quoted double')
|
||||
sql select * from stb
|
||||
sql select * from stb order by t1
|
||||
if $rows != 5 then
|
||||
return -1
|
||||
endi
|
||||
|
@ -228,5 +226,4 @@ endi
|
|||
# return -1
|
||||
#endi
|
||||
|
||||
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -59,7 +59,6 @@ run tsim/parser/interp_test.sim
|
|||
|
||||
print ================== restart server to commit data into disk
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
sleep 500
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
print ================== server restart completed
|
||||
|
||||
|
|
|
@ -21,7 +21,7 @@ print ====== use db
|
|||
sql use $db
|
||||
|
||||
##### select interp from table
|
||||
print ====== select intp from table
|
||||
print ====== select interp from table
|
||||
$tb = $tbPrefix . 0
|
||||
## interp(*) from tb
|
||||
sql select interp(*) from $tb where ts = $ts0
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue