merge 3.0

This commit is contained in:
Xiaoyu Wang 2022-08-09 11:36:08 +08:00
commit 07ef00e89f
95 changed files with 1701 additions and 912 deletions

View File

@ -161,12 +161,7 @@ git clone https://github.com/taosdata/TDengine.git
cd TDengine
```
Go 连接器和 Grafana 插件在其他独立仓库,如果安装它们的话,需要在 TDengine 目录下通过此命令安装:
```bash
git submodule update --init --recursive
```
Go 连接器和 Grafana 插件已移到其他独立仓库。
如果使用 https 协议下载比较慢,可以通过修改 ~/.gitconfig 文件添加以下两行设置使用 ssh 协议下载。需要首先上传 ssh 密钥到 GitHub详细方法请参考 GitHub 官方文档。
```
@ -187,7 +182,6 @@ git submodule update --init --recursive
这个脚本等价于执行如下命令:
```bash
git submodule update --init --recursive
mkdir debug
cd debug
cmake .. -DBUILD_TOOLS=true

View File

@ -164,12 +164,7 @@ git clone https://github.com/taosdata/TDengine.git
cd TDengine
```
The connectors for go & Grafana and some tools have been moved to separated repositories,
so you should run this command in the TDengine directory to install them:
```bash
git submodule update --init --recursive
```
The connectors for go & Grafana and some tools have been moved to separated repositories.
You can modify the file ~/.gitconfig to use ssh protocol instead of https for better download speed. You need to upload ssh public key to GitHub first. Please refer to GitHub official documentation for detail.
@ -191,7 +186,6 @@ You can run the bash script `build.sh` to build both TDengine and taosTools incl
It equals to execute following commands:
```bash
git submodule update --init --recursive
mkdir debug
cd debug
cmake .. -DBUILD_TOOLS=true

8
build.sh Normal file
View File

@ -0,0 +1,8 @@
#!/bin/bash
if [ ! -d debug ]; then
mkdir debug || echo -e "failed to make directory for build"
fi
cd debug && cmake .. -DBUILD_TOOLS=true && make

View File

@ -115,9 +115,7 @@ ELSE ()
ENDIF ()
MESSAGE("System processor ID: ${CMAKE_SYSTEM_PROCESSOR}")
IF (${CMAKE_SYSTEM_PROCESSOR} MATCHES "arm64" OR ${CMAKE_SYSTEM_PROCESSOR} MATCHES "aarch64")
ADD_DEFINITIONS("-D_TD_ARM_")
ELSE ()
IF (TD_INTEL_64 OR TD_INTEL_32)
ADD_DEFINITIONS("-msse4.2")
IF("${FMA_SUPPORT}" MATCHES "true")
MESSAGE(STATUS "turn fma function support on")

View File

@ -85,10 +85,14 @@ IF ("${CPUTYPE}" STREQUAL "")
MESSAGE(STATUS "The current platform is aarch32")
SET(PLATFORM_ARCH_STR "arm")
SET(TD_ARM_32 TRUE)
ADD_DEFINITIONS("-D_TD_ARM_")
ADD_DEFINITIONS("-D_TD_ARM_32")
ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64")
MESSAGE(STATUS "The current platform is aarch64")
SET(PLATFORM_ARCH_STR "arm64")
SET(TD_ARM_64 TRUE)
ADD_DEFINITIONS("-D_TD_ARM_")
ADD_DEFINITIONS("-D_TD_ARM_64")
ENDIF ()
ELSE ()
# if generate ARM version:
@ -96,15 +100,21 @@ ELSE ()
IF (${CPUTYPE} MATCHES "aarch32")
SET(PLATFORM_ARCH_STR "arm")
MESSAGE(STATUS "input cpuType: aarch32")
ADD_DEFINITIONS("-D_TD_ARM_")
ADD_DEFINITIONS("-D_TD_ARM_32")
SET(TD_ARM_32 TRUE)
ELSEIF (${CPUTYPE} MATCHES "aarch64")
SET(PLATFORM_ARCH_STR "arm64")
MESSAGE(STATUS "input cpuType: aarch64")
ADD_DEFINITIONS("-D_TD_ARM_")
ADD_DEFINITIONS("-D_TD_ARM_64")
SET(TD_ARM_64 TRUE)
ELSEIF (${CPUTYPE} MATCHES "mips64")
SET(PLATFORM_ARCH_STR "mips")
MESSAGE(STATUS "input cpuType: mips64")
SET(TD_MIPS_64 TRUE)
ADD_DEFINITIONS("-D_TD_MIPS_")
ADD_DEFINITIONS("-D_TD_MIPS_64")
ELSEIF (${CPUTYPE} MATCHES "x64")
SET(PLATFORM_ARCH_STR "amd64")
MESSAGE(STATUS "input cpuType: x64")

44
docs/assets/tdengine.svg Normal file
View File

@ -0,0 +1,44 @@
<?xml version="1.0" encoding="utf-8"?>
<svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
viewBox="0 0 500 105" style="enable-background:new 0 0 1000 210;" xml:space="preserve">
<style type="text/css">
.st0{fill:#18499D;}
.st1{fill-rule:evenodd;clip-rule:evenodd;fill:#18499D;}
</style>
<g>
<path class="st0" d="M153.2,27.1h-16v49h-11.1v-49h-15.9v-9.7h43L153.2,27.1z"/>
<path class="st0" d="M180.6,17.4c17.1,0,28.5,4.9,28.5,29.4c0,22.7-11.4,29.4-28.5,29.4h-19V17.4H180.6z M180.1,66.5
c11.3,0,17.6-4.3,17.6-19.8c0-16.3-6.5-20-17.6-20h-7.4v39.8L180.1,66.5z"/>
<path class="st0" d="M259.4,55.3l-30,2c0.2,7.7,4,11.2,13.1,11.2c5.1-0.1,10.2-1.1,14.9-3.1V74c-2.7,1.6-9.3,3-16.8,3
c-13.9,0-21.9-5.2-21.9-23.7s8-23.7,21.9-23.7c15.8,0,19.1,9,19.1,19.7C259.8,51.3,259.7,53.3,259.4,55.3z M250,48.2
c0-5.7-1.2-10.5-9.3-10.5c-8.5,0-11,4.1-11.3,12L250,48.2z"/>
<path class="st0" d="M310.8,44.8v31.3h-10.6V47.8c0-6.4-1.4-9.4-7.9-9.4c-5.4,0-9.2,1.5-11.2,7.3v30.5h-10.6V30.4h10.6v5.8
c2.6-4.7,8-6.6,14.4-6.6C307.1,29.6,310.8,35.1,310.8,44.8z"/>
<path class="st0" d="M364.8,30.4v42.2c0,12.6-6.5,20.4-24.3,20.4c-4.1,0-8.3-0.4-12.4-1.1v-8.5c3.9,0.9,7.8,1.3,11.8,1.3
c10.1,0,14.4-3.3,14.4-12.5v-3.2c-2,4.5-5.7,7.2-14.1,7.2c-15,0-18.7-9.4-18.7-23.3c0-12.6,3.7-23.3,18.7-23.3
c8.6,0,12.4,3.1,14.1,7.5v-6.7H364.8z M354.7,52.8c0-9.2-2-15.1-11.9-15.1c-9.4,0-10.4,7.3-10.4,15.1c0,8.5,1.3,15.3,10.4,15.3
C352.7,68.2,354.7,62.6,354.7,52.8z"/>
<path class="st0" d="M377.3,17.6c0-5.2,1.3-5.7,6-5.7s6,0.5,6,5.7s-1.3,5.8-6,5.8S377.3,22.7,377.3,17.6z M378,30.4h10.6v45.7H378
L378,30.4z"/>
<path class="st0" d="M442,44.8v31.3h-10.5V47.8c0-6.4-1.4-9.4-7.9-9.4c-5.4,0-9.2,1.5-11.2,7.3v30.5h-10.6V30.4h10.6v5.8
c2.6-4.7,8-6.6,14.4-6.6C438.3,29.6,442,35.1,442,44.8z"/>
<path class="st0" d="M493.3,55.3l-30,2c0.3,7.7,4,11.2,13.1,11.2c5.1-0.1,10.2-1.1,14.9-3.1V74c-2.7,1.6-9.3,3-16.8,3
c-13.9,0-21.9-5.2-21.9-23.7s8-23.7,21.9-23.7c15.8,0,19.1,9,19.1,19.7C493.8,51.3,493.6,53.3,493.3,55.3z M483.9,48.2
c0-5.7-1.2-10.5-9.3-10.5c-8.5,0-11,4.1-11.3,12L483.9,48.2z"/>
<path class="st1" d="M48.8,11.8c3.1,0,5.6,2.5,5.6,5.6c0,3.1-2.5,5.6-5.6,5.6s-5.6-2.5-5.6-5.6c0,0,0,0,0,0
C43.3,14.3,45.7,11.8,48.8,11.8z M11.8,38.5c3.1,0,5.6,2.5,5.6,5.6c0,3.1-2.5,5.6-5.6,5.6s-5.6-2.5-5.6-5.6
C6.3,41,8.8,38.5,11.8,38.5z M26.3,82.1c3.1,0,5.6,2.5,5.6,5.6s-2.5,5.6-5.6,5.6c-3.1,0-5.6-2.5-5.6-5.6
C20.7,84.6,23.2,82.1,26.3,82.1z M71.3,82.1c3.1,0,5.6,2.5,5.6,5.6s-2.5,5.6-5.6,5.6s-5.6-2.5-5.6-5.6
C65.7,84.6,68.2,82.1,71.3,82.1z M57,44.9H40.6l-5,15.5l13.2,9.6L62,60.5L57,44.9L57,44.9z M41.1,43.6L46.8,26
c-0.4-0.1-0.9-0.2-1.3-0.4l-5.8,18h-19c0,0.2,0,0.3,0,0.5c0,0.3,0,0.6,0,0.8h18.5l-4.8,14.7l-15-10.9c-0.2,0.4-0.5,0.7-0.8,1.1
l12.8,9.3l0,0l2.6,1.9l-0.9,2.5l0,0l-5,15.5c0.4,0.1,0.9,0.2,1.3,0.4l5.7-17.5l12.5,9.1L32.9,81.7c0.3,0.3,0.6,0.7,0.8,1l15.1-11
l15.1,11c0.2-0.4,0.5-0.7,0.8-1L49.9,70.9l12.5-9.1l0.6,1.7l0,0l5.2,15.9c0.4-0.2,0.8-0.3,1.3-0.4l-5.8-18l2.6-1.9L79,49.8
c-0.3-0.3-0.5-0.7-0.8-1.1l-12.4,9L65.4,58l-2.3,1.6l-4.8-14.7h18.5c0-0.3,0-0.6,0-0.9c0-0.2,0-0.3,0-0.5h-19l-5.8-18
c-0.4,0.2-0.8,0.3-1.3,0.4l5.7,17.6L41.1,43.6z M18.9,38.6l22.5-16.4c0.2,0.4,0.5,0.7,0.8,1L19.6,39.7C19.4,39.3,19.1,39,18.9,38.6
L18.9,38.6z M22.9,79.4l-8.7-26.8c0.4-0.1,0.8-0.3,1.2-0.4L24.2,79C23.7,79.1,23.3,79.3,22.9,79.4L22.9,79.4z M62.5,88H35.2
c0-0.1,0-0.2,0-0.4c0-0.3,0-0.6,0-1h27.3c0,0.3,0,0.6,0,1C62.4,87.8,62.4,87.9,62.5,88L62.5,88z M83.5,52.7l-8.7,26.8
c-0.4-0.2-0.8-0.3-1.2-0.4l8.7-26.8C82.6,52.4,83,52.6,83.5,52.7z M56.3,22.2l22.5,16.4c-0.3,0.3-0.5,0.7-0.7,1.1L55.4,23.3
C55.7,22.9,56,22.6,56.3,22.2z M85.8,38.5c3.1,0,5.6,2.5,5.6,5.6c0,3.1-2.5,5.6-5.6,5.6s-5.6-2.5-5.6-5.6
C80.2,41,82.7,38.5,85.8,38.5z"/>
</g>
</svg>

After

Width:  |  Height:  |  Size: 3.7 KiB

View File

@ -31,6 +31,6 @@ func main() {
log.Fatalln("scan error:\n", err)
return
}
log.Fatalln(r.ts, r.current)
log.Println(r.ts, r.current)
}
}

View File

@ -1,13 +1,13 @@
import taos
lines = ["d1001,2018-10-03 14:38:05.000,10.30000,219,0.31000,California.SanFrancisco,2",
"d1004,2018-10-03 14:38:05.000,10.80000,223,0.29000,California.LosAngeles,3",
"d1003,2018-10-03 14:38:05.500,11.80000,221,0.28000,California.LosAngeles,2",
"d1004,2018-10-03 14:38:06.500,11.50000,221,0.35000,California.LosAngeles,3",
"d1002,2018-10-03 14:38:16.650,10.30000,218,0.25000,California.SanFrancisco,3",
"d1001,2018-10-03 14:38:15.000,12.60000,218,0.33000,California.SanFrancisco,2",
"d1001,2018-10-03 14:38:16.800,12.30000,221,0.31000,California.SanFrancisco,2",
"d1003,2018-10-03 14:38:16.600,13.40000,223,0.29000,California.LosAngeles,2"]
lines = ["d1001,2018-10-03 14:38:05.000,10.30000,219,0.31000,'California.SanFrancisco',2",
"d1004,2018-10-03 14:38:05.000,10.80000,223,0.29000,'California.LosAngeles',3",
"d1003,2018-10-03 14:38:05.500,11.80000,221,0.28000,'California.LosAngeles',2",
"d1004,2018-10-03 14:38:06.500,11.50000,221,0.35000,'California.LosAngeles',3",
"d1002,2018-10-03 14:38:16.650,10.30000,218,0.25000,'California.SanFrancisco',3",
"d1001,2018-10-03 14:38:15.000,12.60000,218,0.33000,'California.SanFrancisco',2",
"d1001,2018-10-03 14:38:16.800,12.30000,221,0.31000,'California.SanFrancisco',2",
"d1003,2018-10-03 14:38:16.600,13.40000,223,0.29000,'California.LosAngeles',2"]
def get_connection() -> taos.TaosConnection:

View File

@ -51,35 +51,16 @@ $ taos
Welcome to the TDengine shell from Linux, Client Version:3.0.0.0
Copyright (c) 2022 by TAOS Data, Inc. All rights reserved.
Server is Enterprise trial Edition, ver:3.0.0.0 and will expire at 2022-09-24 15:29:46.
Server is Community Edition.
taos>
```
## 启动 REST 服务
taosAdapter 是 TDengine 中提供 REST 服务的组件。下面这条命令会在容器中同时启动 `taosd``taosadapter` 两个服务组件。
```bash
docker run -d --name tdengine-all -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp tdengine/tdengine
```
如果想只启动 `taosadapter`
```bash
docker run -d --name tdengine-taosa -p 6041-6049:6041-6049 -p 6041-6049:6041-6049/udp -e TAOS_FIRST_EP=tdengine-all tdengine/tdengine:3.0.0.0 taosadapter
```
如果想只启动 `taosd`
```bash
docker run -d --name tdengine-taosd -p 6030-6042:6030-6042 -p 6030-6042:6030-6042/udp -e TAOS_DISABLE_ADAPTER=true tdengine/tdengine:3.0.0.0
```
## 访问 REST 接口
taosAdapter 是 TDengine 中提供 REST 服务的组件。下面这条命令会在容器中同时启动 `taosd``taosadapter` 两个服务组件。默认 Docker 镜像同时启动 TDengine 后台服务 taosd 和 taosAdatper。
可以在宿主机使用 curl 通过 RESTful 端口访问 Docker 容器内的 TDengine server。
```
@ -96,6 +77,22 @@ curl -L -u root:taosdata -d "show databases" 127.0.0.1:6041/rest/sql
TDengine REST API 详情请参考[官方文档](/reference/rest-api/)。
## 单独启动 REST 服务
如果想只启动 `taosadapter`
```bash
docker run -d --network=host --name tdengine-taosa -e TAOS_FIRST_EP=tdengine-taosd tdengine/tdengine:3.0.0.0 taosadapter
```
只启动 `taosd`
```bash
docker run -d --network=host --name tdengine-taosd -e TAOS_DISABLE_ADAPTER=true tdengine/tdengine:3.0.0.0
```
注意以上为容器使用 host 方式网络配置进行单独部署 taosAdapter 的命令行参数。其他网络访问方式请设置 hostname、 DNS 等必要的网络配置。
## 写入数据
可以使用 TDengine 的自带工具 taosBenchmark 快速体验 TDengine 的写入。
@ -107,7 +104,7 @@ TDengine REST API 详情请参考[官方文档](/reference/rest-api/)。
```
该命令将在数据库 test 下面自动创建一张超级表 meters该超级表下有 1 万张表,表名为 "d0" 到 "d9999",每张表有 1 万条记录,每条记录有 (ts, current, voltage, phase) 四个字段,时间戳从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:40:09 999",每张表带有标签 location 和 groupIdgroupId 被设置为 1 到 10 location 被设置为 "California.SanFrancisco" 或者 "California.LosAngeles"。
该命令将在数据库 test 下面自动创建一张超级表 meters该超级表下有 1 万张表,表名为 "d0" 到 "d9999",每张表有 1 万条记录,每条记录有 (ts, current, voltage, phase) 四个字段,时间戳从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:40:09 999",每张表带有标签 location 和 groupIdgroupId 被设置为 1 到 10 location 被设置为 "San Francisco" 或者 "Los Angeles"等城市名称
这条命令很快完成 1 亿条记录的插入。具体时间取决于硬件性能。
@ -129,10 +126,10 @@ taos> select count(*) from test.meters;
taos> select avg(current), max(voltage), min(phase) from test.meters;
```
查询 location="California.SanFrancisco" 的记录总条数:
查询 location="San Francisco" 的记录总条数:
```sql
taos> select count(*) from test.meters where location="California.SanFrancisco";
taos> select count(*) from test.meters where location="San Francisco";
```
查询 groupId=10 的所有记录的平均值、最大值、最小值等:

View File

@ -46,19 +46,19 @@ apt-get 方式只适用于 Debian 或 Ubuntu 系统
</TabItem>
<TabItem label="Deb 安装" value="debinst">
1、从官网下载获得 deb 安装包,例如 TDengine-server-2.4.0.7-Linux-x64.deb
2、进入到 TDengine-server-2.4.0.7-Linux-x64.deb 安装包所在目录,执行如下的安装命令:
1、从官网下载获得 deb 安装包,例如 TDengine-server-3.0.0.10002-Linux-x64.deb
2、进入到 TDengine-server-3.0.0.10002-Linux-x64.deb 安装包所在目录,执行如下的安装命令:
```
$ sudo dpkg -i TDengine-server-2.4.0.7-Linux-x64.deb
(Reading database ... 137504 files and directories currently installed.)
Preparing to unpack TDengine-server-2.4.0.7-Linux-x64.deb ...
TDengine is removed successfully!
Unpacking tdengine (2.4.0.7) over (2.4.0.7) ...
Setting up tdengine (2.4.0.7) ...
$ sudo dpkg -i TDengine-server-3.0.0.10002-Linux-x64.deb
Selecting previously unselected package tdengine.
(Reading database ... 119653 files and directories currently installed.)
Preparing to unpack TDengine-server-3.0.0.10002-Linux-x64.deb ...
Unpacking tdengine (3.0.0.10002) ...
Setting up tdengine (3.0.0.10002) ...
Start to install TDengine...
System hostname is: ubuntu-1804
System hostname is: v3cluster-0002
Enter FQDN:port (like h1.taosdata.com:6030) of an existing TDengine cluster node to join
OR leave it blank to build one:
@ -68,92 +68,100 @@ Created symlink /etc/systemd/system/multi-user.target.wants/taosd.service → /e
To configure TDengine : edit /etc/taos/taos.cfg
To start TDengine : sudo systemctl start taosd
To access TDengine : taos -h ubuntu-1804 to login into TDengine server
To access TDengine : taos -h v3cluster-0002 to login into TDengine server
TDengine is installed successfully!
```
</TabItem>
<TabItem label="RPM 安装" value="rpminst">
1、从官网下载获得 rpm 安装包,例如 TDengine-server-2.4.0.7-Linux-x64.rpm
2、进入到 TDengine-server-2.4.0.7-Linux-x64.rpm 安装包所在目录,执行如下的安装命令:
1、从官网下载获得 rpm 安装包,例如 TDengine-server-3.0.0.10002-Linux-x64.rpm
2、进入到 TDengine-server-3.0.0.10002-Linux-x64.rpm 安装包所在目录,执行如下的安装命令:
```
$ sudo rpm -ivh TDengine-server-2.4.0.7-Linux-x64.rpm
$ sudo rpm -ivh TDengine-server-3.0.0.10002-Linux-x64.rpm
Preparing... ################################# [100%]
Stop taosd service success!
Updating / installing...
1:tdengine-2.4.0.7-3 ################################# [100%]
1:tdengine-3.0.0.10002-3 ################################# [100%]
Start to install TDengine...
System hostname is: centos7
System hostname is: chenhaoran01
Enter FQDN:port (like h1.taosdata.com:6030) of an existing TDengine cluster node to join
OR leave it blank to build one:
Enter your email address for priority support or enter empty to skip:
Created symlink from /etc/systemd/system/multi-user.target.wants/taosd.service to /etc/systemd/system/taosd.service.
To configure TDengine : edit /etc/taos/taos.cfg
To start TDengine : sudo systemctl start taosd
To access TDengine : taos -h centos7 to login into TDengine server
To access TDengine : taos -h chenhaoran01 to login into TDengine server
TDengine is installed successfully!
```
</TabItem>
<TabItem label="tar.gz 安装" value="tarinst">
1、从官网下载获得 tar.gz 安装包,例如 TDengine-server-2.4.0.7-Linux-x64.tar.gz
2、进入到 TDengine-server-2.4.0.7-Linux-x64.tar.gz 安装包所在目录,先解压文件后,进入子目录,执行其中的 install.sh 安装脚本:
1、从官网下载获得 tar.gz 安装包,例如 TDengine-server-3.0.0.10002-Linux-x64.tar.gz
2、进入到 TDengine-server-3.0.0.10002-Linux-x64.tar.gz 安装包所在目录,先解压文件后,进入子目录,执行其中的 install.sh 安装脚本:
```
$ tar xvzf TDengine-enterprise-server-2.4.0.7-Linux-x64.tar.gz
TDengine-enterprise-server-2.4.0.7/
TDengine-enterprise-server-2.4.0.7/driver/
TDengine-enterprise-server-2.4.0.7/driver/vercomp.txt
TDengine-enterprise-server-2.4.0.7/driver/libtaos.so.2.4.0.7
TDengine-enterprise-server-2.4.0.7/install.sh
TDengine-enterprise-server-2.4.0.7/examples/
$ tar -zxvf TDengine-server-3.0.0.10002-Linux-x64.tar.gz
TDengine-server-3.0.0.10002/
TDengine-server-3.0.0.10002/driver/
TDengine-server-3.0.0.10002/driver/libtaos.so.3.0.0.10002
TDengine-server-3.0.0.10002/driver/vercomp.txt
TDengine-server-3.0.0.10002/release_note
TDengine-server-3.0.0.10002/taos.tar.gz
TDengine-server-3.0.0.10002/install.sh
...
$ ll
total 43816
drwxrwxr-x 3 ubuntu ubuntu 4096 Feb 22 09:31 ./
drwxr-xr-x 20 ubuntu ubuntu 4096 Feb 22 09:30 ../
drwxrwxr-x 4 ubuntu ubuntu 4096 Feb 22 09:30 TDengine-enterprise-server-2.4.0.7/
-rw-rw-r-- 1 ubuntu ubuntu 44852544 Feb 22 09:31 TDengine-enterprise-server-2.4.0.7-Linux-x64.tar.gz
total 56832
drwxr-xr-x 3 root root 4096 Aug 8 10:29 ./
drwxrwxrwx 6 root root 4096 Aug 5 16:45 ../
drwxr-xr-x 4 root root 4096 Aug 4 18:03 TDengine-server-3.0.0.10002/
-rwxr-xr-x 1 root root 58183066 Aug 8 10:28 TDengine-server-3.0.0.10002-Linux-x64.tar.gz*
$ cd TDengine-enterprise-server-2.4.0.7/
$ cd TDengine-server-3.0.0.10002/
$ ll
total 40784
drwxrwxr-x 4 ubuntu ubuntu 4096 Feb 22 09:30 ./
drwxrwxr-x 3 ubuntu ubuntu 4096 Feb 22 09:31 ../
drwxrwxr-x 2 ubuntu ubuntu 4096 Feb 22 09:30 driver/
drwxrwxr-x 10 ubuntu ubuntu 4096 Feb 22 09:30 examples/
-rwxrwxr-x 1 ubuntu ubuntu 33294 Feb 22 09:30 install.sh*
-rw-rw-r-- 1 ubuntu ubuntu 41704288 Feb 22 09:30 taos.tar.gz
total 51612
drwxr-xr-x 4 root root 4096 Aug 4 18:03 ./
drwxr-xr-x 3 root root 4096 Aug 8 10:29 ../
drwxr-xr-x 2 root root 4096 Aug 4 18:03 driver/
drwxr-xr-x 11 root root 4096 Aug 4 18:03 examples/
-rwxr-xr-x 1 root root 30980 Aug 4 18:03 install.sh*
-rw-r--r-- 1 root root 6724 Aug 4 18:03 release_note
-rw-r--r-- 1 root root 52793079 Aug 4 18:03 taos.tar.gz
$ sudo ./install.sh
Start to update TDengine...
Start to install TDengine...
Created symlink /etc/systemd/system/multi-user.target.wants/taosd.service → /etc/systemd/system/taosd.service.
Nginx for TDengine is updated successfully!
System hostname is: v3cluster-0002
Enter FQDN:port (like h1.taosdata.com:6030) of an existing TDengine cluster node to join
OR leave it blank to build one:
Enter your email address for priority support or enter empty to skip:
To configure TDengine : edit /etc/taos/taos.cfg
To configure Taos Adapter (if has) : edit /etc/taos/taosadapter.toml
To configure taosadapter (if has) : edit /etc/taos/taosadapter.toml
To start TDengine : sudo systemctl start taosd
To access TDengine : use taos -h ubuntu-1804 in shell OR from http://127.0.0.1:6060
To access TDengine : taos -h v3cluster-0002 to login into TDengine server
TDengine is updated successfully!
Install taoskeeper as a standalone service
taoskeeper is installed, enable it by `systemctl enable taoskeeper`
TDengine is installed successfully!
```
:::info

View File

@ -8,32 +8,118 @@ description: "支持用户编码的聚合函数和标量函数,在查询中嵌
从 2.2.0.0 版本开始TDengine 支持通过 C/C++ 语言进行 UDF 定义。接下来结合示例讲解 UDF 的使用方法。
用户可以通过 UDF 实现两类函数: 标量函数 和 聚合函数。
用户可以通过 UDF 实现两类函数: 标量函数和聚合函数。标量函数对每行数据返回一个值,如求绝对值 abs正弦函数 sin字符串拼接函数 concat 等。聚合函数对多行数据进行返回一个值,如求平均数 avg最大值 max 等。实现udf时需要实现规定的接口函数。接口函数的名称是 udf 名称,或者是 udf 名称和特定后缀_start, _finish, _init, _destroy)的连接。scalarfnaggfn, udf需要替换成udf函数名。
- 标量函数需要实现标量接口函数 scalarfn
- 聚合函数需要实现聚合接口函数 aggfn_start aggfn aggfn_finish。
- 无论标量函数还是聚合函数,如果需要初始化,实现 udf_init如果需要清理工作实现udf_destory。
## 用 C/C++ 语言来定义 UDF
## 实现标量函数
标量函数实现模板如下
```c
#include "taos.h"
#include "taoserror.h"
#include "taosudf.h"
### 标量函数
// initialization function. if no initialization, we can skip definition of it. The initialization function shall be concatenation of the udf name and _init suffix
// @return error number defined in taoserror.h
int32_t scalarfn_init() {
// initialization.
return TSDB_CODE_SUCCESS;
}
用户可以按照下列函数模板定义自己的标量计算函数
// scalar function main computation function
// @param inputDataBlock, input data block composed of multiple columns with each column defined by SUdfColumn
// @param resultColumn, output column
// @return error number defined in taoserror.h
int32_t scalarfn(SUdfDataBlock* inputDataBlock, SUdfColumn* resultColumn) {
// read data from inputDataBlock and process, then output to resultColumn.
return TSDB_CODE_SUCCESS;
}
`int32_t udf(SUdfDataBlock* inputDataBlock, SUdfColumn *resultColumn)`
// cleanup function. if no cleanup related processing, we can skip definition of it. The destroy function shall be concatenation of the udf name and _destroy suffix.
// @return error number defined in taoserror.h
int32_t scalarfn_destroy() {
// clean up
return TSDB_CODE_SUCCESS;
}
```
scalarfn 为函数名的占位符需要替换成函数名如bit_and。
## 实现聚合函数
聚合函数的实现模板如下
```c
#include "taos.h"
#include "taoserror.h"
#include "taosudf.h"
// Initialization function. if no initialization, we can skip definition of it. The initialization function shall be concatenation of the udf name and _init suffix
// @return error number defined in taoserror.h
int32_t aggfn_init() {
// initialization.
return TSDB_CODE_SUCCESS;
}
// aggregate start function. The intermediate value or the state(@interBuf) is initialized in this function. The function name shall be concatenation of udf name and _start suffix
// @param interbuf intermediate value to intialize
// @return error number defined in taoserror.h
int32_t aggfn_start(SUdfInterBuf* interBuf) {
// initialize intermediate value in interBuf
return TSDB_CODE_SUCESS;
}
// aggregate reduce function. This function aggregate old state(@interbuf) and one data bock(inputBlock) and output a new state(@newInterBuf).
// @param inputBlock input data block
// @param interBuf old state
// @param newInterBuf new state
// @return error number defined in taoserror.h
int32_t aggfn(SUdfDataBlock* inputBlock, SUdfInterBuf *interBuf, SUdfInterBuf *newInterBuf) {
// read from inputBlock and interBuf and output to newInterBuf
return TSDB_CODE_SUCCESS;
}
// aggregate function finish function. This function transforms the intermediate value(@interBuf) into the final output(@result). The function name must be concatenation of aggfn and _finish suffix.
// @interBuf : intermediate value
// @result: final result
// @return error number defined in taoserror.h
int32_t int32_t aggfn_finish(SUdfInterBuf* interBuf, SUdfInterBuf *result) {
// read data from inputDataBlock and process, then output to result
return TSDB_CODE_SUCCESS;
}
// cleanup function. if no cleanup related processing, we can skip definition of it. The destroy function shall be concatenation of the udf name and _destroy suffix.
// @return error number defined in taoserror.h
int32_t aggfn_destroy() {
// clean up
return TSDB_CODE_SUCCESS;
}
```
aggfn为函数名的占位符需要修改为自己的函数名如l2norm。
## 接口函数定义
接口函数的名称是 udf 名称,或者是 udf 名称和特定后缀_start, _finish, _init, _destroy)的连接。scalarfnaggfn, udf需要替换成udf函数名。
接口函数返回值表示是否成功如果错误返回错误代码。错误见taoserror.h
### 标量接口函数
`int32_t scalarfn(SUdfDataBlock* inputDataBlock, SUdfColumn *resultColumn)`
其中 udf 是函数名的占位符,以上述模板实现的函数对行数据块进行标量计算。
- scalarFunction 中各参数的具体含义是:
- 中各参数的具体含义是:
- inputDataBlock: 输入的数据块
- resultColumn: 输出列
### 聚合函数
### 聚合接口函数
用户可以按照如下函数模板定义自己的聚合函数。
`int32_t aggfn_start(SUdfInterBuf *interBuf)`
`int32_t udf_start(SUdfInterBuf *interBuf)`
`int32_t aggfn(SUdfDataBlock* inputBlock, SUdfInterBuf *interBuf, SUdfInterBuf *newInterBuf)`
`int32_t udf(SUdfDataBlock* inputBlock, SUdfInterBuf *interBuf, SUdfInterBuf *newInterBuf)`
`int32_t udf_finish(SUdfInterBuf* interBuf, SUdfInterBuf *result)`
其中 udf 是函数名的占位符。其中各参数的具体含义是:
`int32_t aggfn_finish(SUdfInterBuf* interBuf, SUdfInterBuf *result)`
其中 aggfn 是函数名的占位符。其中各参数的具体含义是:
- interBuf中间结果 buffer。
- inputBlock输入的数据块。
@ -41,21 +127,17 @@ description: "支持用户编码的聚合函数和标量函数,在查询中嵌
- result最终结果。
其计算过程为:首先调用udf_start生成结果buffer然后相关的数据会被分为多个行数据块对每个行数据块调用 udf 用数据块更新中间结果,最后再调用 udf_finish 从中间结果产生最终结果,最终结果只能含 0 或 1 条结果数据。
其计算过程为:首先调用aggfn_start生成结果buffer然后相关的数据会被分为多个行数据块对每个行数据块调用 aggfn 用数据块更新中间结果,最后再调用 aggfn_finish 从中间结果产生最终结果,最终结果只能含 0 或 1 条结果数据。
### UDF 初始化和销毁
`int32_t udf_init()`
`int32_t udf_destroy()`
其中 udf 是函数名的占位符。udf_init 完成初始化工作。 udf_destroy 完成清理工作。
其中 udf 是函数名的占位符,可以替换成自己的函数名。udf_init 完成初始化工作。 udf_destroy 完成清理工作。如果没有初始化工作无需定义udf_init函数。如果没有清理工作无需定义udf_destroy函数。
:::note
如果对应的函数不需要具体的功能,也需要实现一个空函数。
:::
### UDF 数据结构
## UDF 数据结构
```c
typedef struct SUdfColumnMeta {
int16_t type;
@ -103,6 +185,13 @@ typedef struct SUdfInterBuf {
int8_t numOfResult; //zero or one
} SUdfInterBuf;
```
数据结构说明如下:
- SUdfDataBlock 数据块包含行数 numOfRows 和列数 numCols。udfCols[i] (0 <= i <= numCols-1)表示每一列数据类型为SUdfColumn*。
- SUdfColumn 包含列的数据类型定义 colMeta 和列的数据colData。
- SUdfColumnMeta 成员定义同 taos.h 数据类型定义。
- SUdfColumnData 数据可以变长varLenCol定义了变长数据fixLenCol定义了定长数据。
- SUdfInterBuf 定义中间结构buffer以及buffer中结果个数 numOfResult
为了更好的操作以上数据结构,提供了一些便利函数,定义在 taosudf.h。
@ -151,10 +240,10 @@ CREATE AGGREGATE FUNCTION function_name AS library_path OUTPUTTYPE output_type [
- output_type此函数计算结果的数据类型与上文中 udfNormalFunc 的 itype 参数不同,这里不是使用数字表示法,而是直接写类型名称即可;
- buffer_size中间计算结果的缓冲区大小单位是字节。如果不使用可以不设置。
例如,如下语句可以把 libsqrsum.so 创建为系统中可用的 UDF
例如,如下语句可以把 libl2norm.so 创建为系统中可用的 UDF
```sql
CREATE AGGREGATE FUNCTION sqr_sum AS "/home/taos/udf_example/libsqrsum.so" OUTPUTTYPE DOUBLE bufsize 8;
CREATE AGGREGATE FUNCTION l2norm AS "/home/taos/udf_example/libl2norm.so" OUTPUTTYPE DOUBLE bufsize 8;
```
### 管理 UDF
@ -187,6 +276,8 @@ SELECT X(c1,c2) FROM table/stable;
### 标量函数示例 [bit_and](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/bit_and.c)
bit_add 实现多列的按位与功能。如果只有一列返回这一列。bit_add 忽略空值。
<details>
<summary>bit_and.c</summary>
@ -196,13 +287,15 @@ SELECT X(c1,c2) FROM table/stable;
</details>
### 聚合函数示例 [sqr_sum](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/sqr_sum.c)
### 聚合函数示例 [l2norm](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/l2norm.c)
l2norm 实现了输入列的所有数据的二阶范数,即对每个数据先平方,再累加求和,最后开方。
<details>
<summary>sqr_sum.c</summary>
<summary>l2norm.c</summary>
```c
{{#include tests/script/sh/sqr_sum.c}}
{{#include tests/script/sh/l2norm.c}}
```
</details>

View File

@ -56,8 +56,8 @@ lrwxrwxrwx 1 root root 13 Feb 22 09:34 log -> /var/log/taos/
```
$ sudo dpkg -r tdengine
(Reading database ... 137504 files and directories currently installed.)
Removing tdengine (2.4.0.7) ...
(Reading database ... 120119 files and directories currently installed.)
Removing tdengine (3.0.0.10002) ...
TDengine is removed successfully!
```
@ -81,10 +81,7 @@ TDengine is removed successfully!
```
$ rmtaos
Nginx for TDengine is running, stopping it...
TDengine is removed successfully!
taosKeeper is removed successfully!
```
</TabItem>

View File

@ -7,49 +7,47 @@ title: 常见问题及反馈
如果 FAQ 中的信息不能够帮到您,需要 TDengine 技术团队的技术支持与协助,请将以下两个目录中内容打包:
1. /var/log/taos (如果没有修改过默认路径)
2. /etc/taos
2. /etc/taos(如果没有指定其他配置文件路径)
附上必要的问题描述,包括使用的 TDengine 版本信息、平台环境信息、发生该问题的执行操作、出现问题的表征及大概的时间,在 [GitHub](https://github.com/taosdata/TDengine) 提交 issue。
为了保证有足够的 debug 信息,如果问题能够重复,请修改/etc/taos/taos.cfg 文件最后面添加一行“debugFlag 135"(不带引号本身),然后重启 taosd, 重复问题,然后再递交。也可以通过如下 SQL 语句,临时设置 taosd 的日志级别。
```
alter dnode <dnode_id> debugFlag 135;
alter dnode <dnode_id> 'debugFlag' '135';
```
其中 dnode_id 请从 show dnodes; 命令输出中获取。
但系统正常运行时,请一定将 debugFlag 设置为 131否则会产生大量的日志信息降低系统效率。
## 常见问题列表
### 1. TDengine2.0 之前的版本升级到 2.0 及以上的版本应该注意什么?☆☆☆
### 1. TDengine3.0 之前的版本升级到 3.0 及以上的版本应该注意什么?
2.0 版在之前版本的基础上,进行了完全的重构,配置文件和数据文件是不兼容的。在升级之前务必进行如下操作:
3.0 版在之前版本的基础上,进行了完全的重构,配置文件和数据文件是不兼容的。在升级之前务必进行如下操作:
1. 删除配置文件,执行 `sudo rm -rf /etc/taos/taos.cfg`
2. 删除日志文件,执行 `sudo rm -rf /var/log/taos/`
3. 确保数据已经不再需要的前提下,删除数据文件,执行 `sudo rm -rf /var/lib/taos/`
4. 安装最新稳定版本的 TDengine
4. 安装最新3.0稳定版本的 TDengine
5. 如果需要迁移数据或者数据文件损坏,请联系涛思数据官方技术支持团队,进行协助解决
### 2. Windows 平台下 JDBCDriver 找不到动态链接库,怎么办?
请看为此问题撰写的 [技术博客](https://www.taosdata.com/blog/2019/12/03/950.html)。
### 3. 创建数据表时提示 more dnodes are needed
请看为此问题撰写的 [技术博客](https://www.taosdata.com/blog/2019/12/03/965.html)。
### 4. 如何让 TDengine crash 时生成 core 文件?
### 3. 如何让 TDengine crash 时生成 core 文件?
请看为此问题撰写的 [技术博客](https://www.taosdata.com/blog/2019/12/06/974.html)。
### 5. 遇到错误“Unable to establish connection” 怎么办?
### 4. 遇到错误“Unable to establish connection” 怎么办?
客户端遇到连接故障,请按照下面的步骤进行检查:
1. 检查网络环境
- 云服务器:检查云服务器的安全组是否打开 TCP/UDP 端口 6030-6042 的访问权限
- 云服务器:检查云服务器的安全组是否打开 TCP/UDP 端口 6030/6041 的访问权限
- 本地虚拟机:检查网络能否 ping 通,尽量避免使用`localhost` 作为 hostname
- 公司服务器:如果为 NAT 网络环境,请务必检查服务器能否将消息返回值客户端
@ -61,7 +59,7 @@ title: 常见问题及反馈
5. ping 服务器 FQDN如果没有反应请检查你的网络DNS 设置,或客户端所在计算机的系统 hosts 文件。如果部署的是 TDengine 集群,客户端需要能 ping 通所有集群节点的 FQDN。
6. 检查防火墙设置Ubuntu 使用 ufw statusCentOS 使用 firewall-cmd --list-port确保集群中所有主机在端口 6030-6042 上的 TCP/UDP 协议能够互通。
6. 检查防火墙设置Ubuntu 使用 ufw statusCentOS 使用 firewall-cmd --list-port确保集群中所有主机在端口 6030/6041 上的 TCP/UDP 协议能够互通。
7. 对于 Linux 上的 JDBCODBC, Python, Go 等接口类似)连接, 确保*libtaos.so*在目录*/usr/local/taos/driver*里, 并且*/usr/local/taos/driver*在系统库函数搜索路径*LD_LIBRARY_PATH*里
@ -76,9 +74,9 @@ title: 常见问题及反馈
- Windows 系统请使用 PowerShell 命令 Test-NetConnection -ComputerName {fqdn} -Port {port} 检测服务段端口是否访问
10. 也可以使用 taos 程序内嵌的网络连通检测功能,来验证服务器和客户端之间指定的端口连接是否通畅(包括 TCP 和 UDP[TDengine 内嵌网络检测工具使用指南](https://www.taosdata.com/blog/2020/09/08/1816.html)。
10. 也可以使用 taos 程序内嵌的网络连通检测功能,来验证服务器和客户端之间指定的端口连接是否通畅[诊断及其他](https://docs.taosdata.com/3.0-preview/operation/diagnose/)。
### 6. 遇到错误 “Unexpected generic error in RPC”或者“Unable to resolve FQDN” 怎么办?
### 5. 遇到错误 Unable to resolve FQDN” 怎么办?
产生这个错误,是由于客户端或数据节点无法解析 FQDN(Fully Qualified Domain Name)导致。对于 TAOS Shell 或客户端应用,请做如下检查:
@ -86,38 +84,14 @@ title: 常见问题及反馈
2. 如果网络配置有 DNS server请检查是否正常工作
3. 如果网络没有配置 DNS server请检查客户端所在机器的 hosts 文件,查看该 FQDN 是否配置,并是否有正确的 IP 地址
4. 如果网络配置 OK从客户端所在机器你需要能 Ping 该连接的 FQDN否则客户端是无法连接服务器的
5. 如果服务器曾经使用过 TDengine且更改过 hostname建议检查 data 目录的 dnodeEps.json 是否符合当前配置的 EP路径默认为/var/lib/taos/dnode。正常情况下建议更换新的数据目录或者备份后删除以前的数据目录这样可以避免该问题。
5. 如果服务器曾经使用过 TDengine且更改过 hostname建议检查 data 目录的 dnode.json 是否符合当前配置的 EP路径默认为/var/lib/taos/dnode。正常情况下建议更换新的数据目录或者备份后删除以前的数据目录这样可以避免该问题。
6. 检查/etc/hosts 和/etc/hostname 是否是预配置的 FQDN
### 7. 虽然语法正确,为什么我还是得到 "Invalid SQL" 错误?
如果你确认语法正确2.0 之前版本,请检查 SQL 语句长度是否超过 64K。如果超过也会返回这个错误。
### 8. 是否支持 validation queries
TDengine 还没有一组专用的 validation queries。然而建议你使用系统监测的数据库”log"来做。
<a class="anchor" id="update"></a>
### 9. 我可以删除或更新一条记录吗?
TDengine 目前尚不支持删除功能,未来根据用户需求可能会支持。
从 2.0.8.0 开始TDengine 支持更新已经写入数据的功能。使用更新功能需要在创建数据库时使用 UPDATE 1 参数,之后可以使用 INSERT INTO 命令更新已经写入的相同时间戳数据。UPDATE 参数不支持 ALTER DATABASE 命令修改。没有使用 UPDATE 1 参数创建的数据库,写入相同时间戳的数据不会修改之前的数据,也不会报错。
另需注意,在 UPDATE 设置为 0 时,后发送的相同时间戳的数据会被直接丢弃,但并不会报错,而且仍然会被计入 affected rows (所以不能利用 INSERT 指令的返回信息进行时间戳查重。这样设计的主要原因是TDengine 把写入的数据看做一个数据流无论时间戳是否出现冲突TDengine 都认为产生数据的原始设备真实地产生了这样的数据。UPDATE 参数只是控制这样的流数据在进行持久化时要怎样处理——UPDATE 为 0 时,表示先写入的数据覆盖后写入的数据;而 UPDATE 为 1 时,表示后写入的数据覆盖先写入的数据。这种覆盖关系如何选择,取决于对数据的后续使用和统计中,希望以先还是后生成的数据为准。
此外,从 2.1.7.0 版本开始,支持将 UPDATE 参数设为 2表示“支持部分列更新”。也即当 UPDATE 设为 1 时,如果更新一个数据行,其中某些列没有提供取值,那么这些列会被设为 NULL而当 UPDATE 设为 2 时,如果更新一个数据行,其中某些列没有提供取值,那么这些列会保持原有数据行中的对应值。
### 10. 我怎么创建超过 1024 列的表?
使用 2.0 及其以上版本,默认支持 1024 列2.0 之前的版本TDengine 最大允许创建 250 列的表。但是如果确实超过限值,建议按照数据特性,逻辑地将这个宽表分解成几个小表。(从 2.1.7.0 版本开始,表的最大列数增加到了 4096 列。)
### 11. 最有效的写入数据的方法是什么?
### 6. 最有效的写入数据的方法是什么?
批量插入。每条写入语句可以一张表同时插入多条记录,也可以同时插入多张表的多条记录。
### 12. Windows 系统下插入的 nchar 类数据中的汉字被解析成了乱码如何解决?
### 7. Windows 系统下插入的 nchar 类数据中的汉字被解析成了乱码如何解决?
Windows 下插入 nchar 类的数据中如果有中文,请先确认系统的地区设置成了中国(在 Control Panel 里可以设置),这时 cmd 中的`taos`客户端应该已经可以正常工作了;如果是在 IDE 里开发 Java 应用,比如 Eclipse IntelliJ请确认 IDE 里的文件编码为 GBK这是 Java 默认的编码类型),然后在生成 Connection 时,初始化客户端的配置,具体语句如下:
@ -128,74 +102,61 @@ properties.setProperty(TSDBDriver.LOCALE_KEY, "UTF-8");
Connection = DriverManager.getConnection(url, properties);
```
### 13. Windows 系统下客户端无法正常显示中文字符?
### 8. Windows 系统下客户端无法正常显示中文字符?
Windows 系统中一般是采用 GBK/GB18030 存储中文字符,而 TDengine 的默认字符集为 UTF-8 ,在 Windows 系统中使用 TDengine 客户端时,客户端驱动会将字符统一转换为 UTF-8 编码后发送到服务端存储,因此在应用开发过程中,调用接口时正确配置当前的中文字符集即可。
【 v2.2.1.5以后版本 】在 Windows 10 环境下运行 TDengine 客户端命令行工具 taos 时,若无法正常输入、显示中文,可以对客户端 taos.cfg 做如下配置:
在 Windows 10 环境下运行 TDengine 客户端命令行工具 taos 时,若无法正常输入、显示中文,可以对客户端 taos.cfg 做如下配置:
```
locale C
charset UTF-8
```
### 14. JDBC 报错: the executed SQL is not a DML or a DDL
请更新至最新的 JDBC 驱动,参考 [Java 连接器](/reference/connector/java)
### 15. taos connect failed, reason&#58; invalid timestamp
常见原因是服务器和客户端时间没有校准可以通过和时间服务器同步的方式Linux 下使用 ntpdate 命令Windows 在系统时间设置中选择自动同步)校准。
### 16. 表名显示不全
### 9. 表名显示不全
由于 taos shell 在终端中显示宽度有限,有可能比较长的表名显示不全,如果按照显示的不全的表名进行相关操作会发生 Table does not exist 错误。解决方法可以是通过修改 taos.cfg 文件中的设置项 maxBinaryDisplayWidth 或者直接输入命令 set max_binary_display_width 100。或者在命令结尾使用 \G 参数来调整结果的显示方式。
### 17. 如何进行数据迁移?
### 10. 如何进行数据迁移?
TDengine 是根据 hostname 唯一标志一台机器的,在数据文件从机器 A 移动机器 B 时,注意如下两件事:
TDengine 是根据 hostname 唯一标志一台机器的对于3.0版本,将数据文件从机器 A 移动机器 B 时,需要重新配置机器 B 的 hostname 为机器 A 的 hostname。
- 2.0.0.0 至 2.0.6.x 的版本,重新配置机器 B 的 hostname 为机器 A 的 hostname。
- 2.0.7.0 及以后的版本,到/var/lib/taos/dnode 下,修复 dnodeEps.json 的 dnodeId 对应的 FQDN重启。确保机器内所有机器的此文件是完全相同的。
- 1.x 和 2.x 版本的存储结构不兼容,需要使用迁移工具或者自己开发应用导出导入数据。
3.x 和 之前的1.x、2.x 版本的存储结构不兼容,需要使用迁移工具或者自己开发应用导出导入数据。
### 18. 如何在命令行程序 taos 中临时调整日志级别
### 11. 如何在命令行程序 taos 中临时调整日志级别
为了调试方便,从 2.0.16 版本开始,命令行程序 taos 新增了与日志记录相关的两条指令:
为了调试方便,命令行程序 taos 新增了与日志记录相关的指令:
```sql
ALTER LOCAL flag_name flag_value;
ALTER LOCAL local_option
local_option: {
'resetLog'
| 'rpcDebugFlag' value
| 'tmrDebugFlag' value
| 'cDebugFlag' value
| 'uDebugFlag' value
| 'debugFlag' value
}
```
其含义是,在当前的命令行程序下,修改一个特定模块的日志记录级别(只对当前命令行程序有效,如果 taos 命令行程序重启,则需要重新设置):
其含义是,在当前的命令行程序下,清空本机所有客户端生成的日志文件(resetLog),或修改一个特定模块的日志记录级别(只对当前命令行程序有效,如果 taos 命令行程序重启,则需要重新设置):
- flag_name 的取值可以是debugFlagcDebugFlagtmrDebugFlaguDebugFlagrpcDebugFlag
- flag_value 的取值可以是131输出错误和警告日志135 输出错误、警告和调试日志143 输出错误、警告、调试和跟踪日志)
- value 的取值可以是131输出错误和警告日志135 输出错误、警告和调试日志143 输出错误、警告、调试和跟踪日志)。
```sql
ALTER LOCAL RESETLOG;
```
### 12. go 语言编写组件编译失败怎样解决?
其含义是,清空本机所有由客户端生成的日志文件。
<a class="anchor" id="timezone"></a>
### 19. go 语言编写组件编译失败怎样解决?
TDengine 2.3.0.0 及之后的版本包含一个使用 go 语言开发的 taosAdapter 独立组件,需要单独运行,取代之前 taosd 内置的 httpd ,提供包含原 httpd 功能以及支持多种其他软件Prometheus、Telegraf、collectd、StatsD 等)的数据接入功能。
TDengine 3.0版本包含一个使用 go 语言开发的 taosAdapter 独立组件需要单独运行提供restful接入功能以及支持多种其他软件Prometheus、Telegraf、collectd、StatsD 等)的数据接入功能。
使用最新 develop 分支代码编译需要先 `git submodule update --init --recursive` 下载 taosAdapter 仓库代码后再编译。
目前编译方式默认自动编译 taosAdapter。go 语言版本要求 1.14 以上,如果发生 go 编译错误,往往是国内访问 go mod 问题,可以通过设置 go 环境变量来解决:
go 语言版本要求 1.14 以上,如果发生 go 编译错误,往往是国内访问 go mod 问题,可以通过设置 go 环境变量来解决:
```sh
go env -w GO111MODULE=on
go env -w GOPROXY=https://goproxy.cn,direct
```
如果希望继续使用之前的内置 httpd可以关闭 taosAdapter 编译,使用
`cmake .. -DBUILD_HTTP=true` 使用原来内置的 httpd。
### 20. 如何查询数据占用的存储空间大小?
### 13. 如何查询数据占用的存储空间大小?
默认情况下TDengine 的数据文件存储在 /var/lib/taos ,日志文件存储在 /var/log/taos 。
@ -203,13 +164,11 @@ go env -w GOPROXY=https://goproxy.cn,direct
若想查看单个数据库占用的大小,可在命令行程序 taos 内指定要查看的数据库后执行 `show vgroups;` ,通过得到的 VGroup id 去 /var/lib/taos/vnode 下查看包含的文件夹大小。
若仅仅想查看指定(超级)表的数据块分布及大小,可查看[_block_dist 函数](https://docs.taosdata.com/taos-sql/select/#_block_dist-%E5%87%BD%E6%95%B0)
### 21. 客户端连接串如何保证高可用?
### 14. 客户端连接串如何保证高可用?
请看为此问题撰写的 [技术博客](https://www.taosdata.com/blog/2021/04/16/2287.html)
### 22. 时间戳的时区信息是怎样处理的?
### 15. 时间戳的时区信息是怎样处理的?
TDengine 中时间戳的时区总是由客户端进行处理,而与服务端无关。具体来说,客户端会对 SQL 语句中的时间戳进行时区转换,转为 UTC 时区(即 Unix 时间戳——Unix Timestamp再交由服务端进行写入和查询在读取数据时服务端也是采用 UTC 时区提供原始数据,客户端收到后再根据本地设置,把时间戳转换为本地系统所要求的时区进行显示。
@ -220,22 +179,22 @@ TDengine 中时间戳的时区总是由客户端进行处理,而与服务端
3. 如果在 C/C++/Java/Python 等各种编程语言的 Connector Driver 中,在建立数据库连接时显式指定了 timezone那么会以这个指定的时区设置为准。例如 Java Connector 的 JDBC URL 中就有 timezone 参数。
4. 在书写 SQL 语句时,也可以直接使用 Unix 时间戳(例如 `1554984068000`)或带有时区的时间戳字符串,也即以 RFC 3339 格式(例如 `2013-04-12T15:52:01.123+08:00`)或 ISO-8601 格式(例如 `2013-04-12T15:52:01.123+0800`)来书写时间戳,此时这些时间戳的取值将不再受其他时区设置的影响。
### 23. TDengine 2.0 都会用到哪些网络端口?
### 16. TDengine 3.0 都会用到哪些网络端口?
使用到的网络端口请看文档:[serverport](/reference/config/#serverport)
使用到的网络端口请看文档:[serverport](../../reference/config/#serverport)
需要注意,文档上列举的端口号都是以默认端口 6030 为前提进行说明,如果修改了配置文件中的设置,那么列举的端口都会随之出现变化,管理员可以参考上述的信息调整防火墙设置。
### 24. 为什么 RESTful 接口无响应、Grafana 无法添加 TDengine 为数据源、TDengineGUI 选了 6041 端口还是无法连接成功?
### 17. 为什么 RESTful 接口无响应、Grafana 无法添加 TDengine 为数据源、TDengineGUI 选了 6041 端口还是无法连接成功?
taosAdapter 从 TDengine 2.4.0.0 版本开始成为 TDengine 服务端软件的组成部分,是 TDengine 集群和应用程序之间的桥梁和适配器。在此之前 RESTful 接口等功能是由 taosd 内置的 HTTP 服务提供的,而如今要实现上述功能需要执行:```systemctl start taosadapter``` 命令来启动 taosAdapter 服务。
需要说明的是taosAdapter 的日志路径 path 需要单独配置,默认路径是 /var/log/taos ;日志等级 logLevel 有 8 个等级,默认等级是 info ,配置成 panic 可关闭日志输出。请注意操作系统 / 目录的空间大小,可通过命令行参数、环境变量或配置文件来修改配置,默认配置文件是 /etc/taos/taosadapter.toml 。
有关 taosAdapter 组件的详细介绍请看文档:[taosAdapter](https://docs.taosdata.com/reference/taosadapter/)
有关 taosAdapter 组件的详细介绍请看文档:[taosAdapter](../../reference/taosadapter/)
### 25. 发生了 OOM 怎么办?
### 18. 发生了 OOM 怎么办?
OOM 是操作系统的保护机制,当操作系统内存(包括 SWAP )不足时,会杀掉某些进程,从而保证操作系统的稳定运行。通常内存不足主要是如下两个原因导致,一是剩余内存小于 vm.min_free_kbytes ;二是程序请求的内存大于剩余内存。还有一种情况是内存充足但程序占用了特殊的内存地址,也会触发 OOM 。
TDengine 会预先为每个 VNode 分配好内存,每个 Database 的 VNode 个数受 maxVgroupsPerDb 影响,每个 VNode 占用的内存大小受 Blocks 和 Cache 影响。要防止 OOM需要在项目建设之初合理规划内存并合理设置 SWAP 除此之外查询过量的数据也有可能导致内存暴涨这取决于具体的查询语句。TDengine 企业版对内存管理做了优化,采用了新的内存分配器,对稳定性有更高要求的用户可以考虑选择企业版。
TDengine 会预先为每个 VNode 分配好内存,每个 Database 的 VNode 个数受 建库时的vgroups参数影响每个 VNode 占用的内存大小受 buffer参数 影响。要防止 OOM需要在项目建设之初合理规划内存并合理设置 SWAP 除此之外查询过量的数据也有可能导致内存暴涨这取决于具体的查询语句。TDengine 企业版对内存管理做了优化,采用了新的内存分配器,对稳定性有更高要求的用户可以考虑选择企业版。

View File

@ -246,7 +246,7 @@ void blockDebugShowDataBlocks(const SArray* dataBlocks, const char* flag);
// for debug
char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** dumpBuf);
int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SArray* pDataBlocks, STSchema* pTSchema, int32_t vgId,
int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SSDataBlock* pDataBlocks, STSchema* pTSchema, int32_t vgId,
tb_uid_t suid);
char* buildCtbNameByGroupId(const char* stbName, uint64_t groupId);

View File

@ -2659,7 +2659,6 @@ typedef struct {
} SVgEpSet;
typedef struct {
int64_t refId;
int64_t suid;
int8_t level;
} SRSmaFetchMsg;
@ -2667,7 +2666,6 @@ typedef struct {
static FORCE_INLINE int32_t tEncodeSRSmaFetchMsg(SEncoder* pCoder, const SRSmaFetchMsg* pReq) {
if (tStartEncode(pCoder) < 0) return -1;
if (tEncodeI64(pCoder, pReq->refId) < 0) return -1;
if (tEncodeI64(pCoder, pReq->suid) < 0) return -1;
if (tEncodeI8(pCoder, pReq->level) < 0) return -1;
@ -2678,7 +2676,6 @@ static FORCE_INLINE int32_t tEncodeSRSmaFetchMsg(SEncoder* pCoder, const SRSmaFe
static FORCE_INLINE int32_t tDecodeSRSmaFetchMsg(SDecoder* pCoder, SRSmaFetchMsg* pReq) {
if (tStartDecode(pCoder) < 0) return -1;
if (tDecodeI64(pCoder, &pReq->refId) < 0) return -1;
if (tDecodeI64(pCoder, &pReq->suid) < 0) return -1;
if (tDecodeI8(pCoder, &pReq->level) < 0) return -1;

View File

@ -200,6 +200,7 @@ enum {
TD_DEF_MSG_TYPE(TDMT_VND_CANCEL_SMA, "vnode-cancel-sma", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_DROP_SMA, "vnode-drop-sma", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_SUBMIT_RSMA, "vnode-submit-rsma", SSubmitReq, SSubmitRsp)
TD_DEF_MSG_TYPE(TDMT_VND_FETCH_RSMA, "vnode-fetch-rsma", SRSmaFetchMsg, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_DELETE, "delete-data", SVDeleteReq, SVDeleteRsp)
TD_DEF_MSG_TYPE(TDMT_VND_ALTER_CONFIG, "alter-config", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_ALTER_REPLICA, "alter-replica", NULL, NULL)

View File

@ -354,8 +354,6 @@ void operateVal(void *dst, void *s1, void *s2, int32_t optr, int32_t type);
void *getDataMin(int32_t type);
void *getDataMax(int32_t type);
#define SET_DOUBLE_NULL(v) (*(uint64_t *)(v) = TSDB_DATA_DOUBLE_NULL)
#define SET_BIGINT_NULL(v) (*(uint64_t *)(v) = TSDB_DATA_BIGINT_NULL)
#ifdef __cplusplus
}

View File

@ -67,7 +67,7 @@ typedef struct SResultRowEntryInfo {
bool initialized:1; // output buffer has been initialized
bool complete:1; // query has completed
uint8_t isNullRes:6; // the result is null
uint16_t numOfRes; // num of output result in current buffer
uint16_t numOfRes; // num of output result in current buffer. NOT NULL RESULT
} SResultRowEntryInfo;
// determine the real data need to calculated the result

View File

@ -121,6 +121,7 @@ typedef struct SProjectLogicNode {
SLogicNode node;
SNodeList* pProjections;
char stmtName[TSDB_TABLE_NAME_LEN];
bool ignoreGroupId;
} SProjectLogicNode;
typedef struct SIndefRowsFuncLogicNode {
@ -344,6 +345,7 @@ typedef struct SProjectPhysiNode {
SPhysiNode node;
SNodeList* pProjections;
bool mergeDataBlock;
bool ignoreGroupId;
} SProjectPhysiNode;
typedef struct SIndefRowsFuncPhysiNode {

View File

@ -226,11 +226,36 @@ typedef struct {
int32_t nodeId;
int32_t childId;
int32_t taskId;
int64_t checkpointVer;
int64_t processedVer;
// int64_t checkpointVer;
// int64_t processedVer;
SEpSet epSet;
} SStreamChildEpInfo;
typedef struct {
int32_t nodeId;
int32_t childId;
int64_t stateSaveVer;
int64_t stateProcessedVer;
} SStreamCheckpointInfo;
typedef struct {
int64_t streamId;
int64_t checkTs;
int32_t checkpointId; // incremental
int32_t taskId;
SArray* checkpointVer; // SArray<SStreamCheckpointInfo>
} SStreamMultiVgCheckpointInfo;
typedef struct {
int32_t taskId;
int32_t checkpointId; // incremental
} SStreamCheckpointKey;
typedef struct {
int32_t taskId;
SArray* checkpointVer;
} SStreamRecoveringState;
typedef struct SStreamTask {
int64_t streamId;
int32_t taskId;
@ -256,6 +281,8 @@ typedef struct SStreamTask {
// children info
SArray* childEpInfo; // SArray<SStreamChildEpInfo*>
int32_t nextCheckId;
SArray* checkpointInfo; // SArray<SStreamCheckpointInfo>
// exec
STaskExec exec;
@ -445,6 +472,7 @@ typedef struct {
int32_t tDecodeStreamDispatchReq(SDecoder* pDecoder, SStreamDispatchReq* pReq);
int32_t tDecodeStreamRetrieveReq(SDecoder* pDecoder, SStreamRetrieveReq* pReq);
void tFreeStreamDispatchReq(SStreamDispatchReq* pReq);
int32_t streamSetupTrigger(SStreamTask* pTask);
@ -468,6 +496,7 @@ typedef struct SStreamMeta {
TTB* pTaskDb;
TTB* pStateDb;
SHashObj* pTasks;
SHashObj* pRecoveringState;
void* ahandle;
TXN txn;
FTaskExpand* expandFunc;
@ -484,6 +513,7 @@ SStreamTask* streamMetaGetTask(SStreamMeta* pMeta, int32_t taskId);
int32_t streamMetaBegin(SStreamMeta* pMeta);
int32_t streamMetaCommit(SStreamMeta* pMeta);
int32_t streamMetaRollBack(SStreamMeta* pMeta);
int32_t streamLoadTasks(SStreamMeta* pMeta);
#ifdef __cplusplus
}

View File

@ -27,6 +27,9 @@ extern "C" {
extern bool gRaftDetailLog;
#define SYNC_RESP_TTL_MS 10000000
#define SYNC_SPEED_UP_HB_TIMER 400
#define SYNC_SPEED_UP_AFTER_MS (1000 * 20)
#define SYNC_SLOW_DOWN_RANGE 100
#define SYNC_MAX_BATCH_SIZE 1
#define SYNC_INDEX_BEGIN 0
@ -205,6 +208,7 @@ int32_t syncSetStandby(int64_t rid);
ESyncState syncGetMyRole(int64_t rid);
bool syncIsReady(int64_t rid);
const char* syncGetMyRoleStr(int64_t rid);
bool syncRestoreFinish(int64_t rid);
SyncTerm syncGetMyTerm(int64_t rid);
SyncGroupId syncGetVgId(int64_t rid);
void syncGetEpSet(int64_t rid, SEpSet* pEpSet);

View File

@ -610,6 +610,8 @@ int32_t* taosGetErrno();
#define TSDB_CODE_RSMA_QTASKINFO_CREATE TAOS_DEF_ERROR_CODE(0, 0x3152)
#define TSDB_CODE_RSMA_FILE_CORRUPTED TAOS_DEF_ERROR_CODE(0, 0x3153)
#define TSDB_CODE_RSMA_REMOVE_EXISTS TAOS_DEF_ERROR_CODE(0, 0x3154)
#define TSDB_CODE_RSMA_FETCH_MSG_MSSED_UP TAOS_DEF_ERROR_CODE(0, 0x3155)
#define TSDB_CODE_RSMA_EMPTY_INFO TAOS_DEF_ERROR_CODE(0, 0x3156)
//index
#define TSDB_CODE_INDEX_REBUILDING TAOS_DEF_ERROR_CODE(0, 0x3200)

View File

@ -359,11 +359,11 @@ typedef enum ELogicConditionType {
#define TSDB_DEFAULT_DB_SCHEMALESS TSDB_DB_SCHEMALESS_OFF
#define TSDB_DB_MIN_WAL_RETENTION_PERIOD -1
#define TSDB_DEFAULT_DB_WAL_RETENTION_PERIOD 0
#define TSDB_DEFAULT_DB_WAL_RETENTION_PERIOD (24 * 60 * 60 * 2)
#define TSDB_DB_MIN_WAL_RETENTION_SIZE -1
#define TSDB_DEFAULT_DB_WAL_RETENTION_SIZE 0
#define TSDB_DEFAULT_DB_WAL_RETENTION_SIZE -1
#define TSDB_DB_MIN_WAL_ROLL_PERIOD 0
#define TSDB_DEFAULT_DB_WAL_ROLL_PERIOD 0
#define TSDB_DEFAULT_DB_WAL_ROLL_PERIOD (24 * 60 * 60 * 1)
#define TSDB_DB_MIN_WAL_SEGMENT_SIZE 0
#define TSDB_DEFAULT_DB_WAL_SEGMENT_SIZE 0

View File

@ -126,7 +126,7 @@ void *openTransporter(const char *user, const char *auth, int32_t numOfThread) {
rpcInit.numOfThreads = numOfThread;
rpcInit.cfp = processMsgFromServer;
rpcInit.rfp = clientRpcRfp;
rpcInit.tfp = clientRpcTfp;
// rpcInit.tfp = clientRpcTfp;
rpcInit.sessions = 1024;
rpcInit.connType = TAOS_CONN_CLIENT;
rpcInit.user = (char *)user;

View File

@ -327,7 +327,13 @@ int32_t hbBuildQueryDesc(SQueryHbReqBasic *hbBasic, STscObj *pObj) {
while (pIter != NULL) {
int64_t *rid = pIter;
SRequestObj *pRequest = acquireRequest(*rid);
if (NULL == pRequest || pRequest->killed) {
if (NULL == pRequest) {
pIter = taosHashIterate(pObj->pRequests, pIter);
continue;
}
if (pRequest->killed) {
releaseRequest(*rid);
pIter = taosHashIterate(pObj->pRequests, pIter);
continue;
}

View File

@ -283,7 +283,7 @@ void asyncExecLocalCmd(SRequestObj* pRequest, SQuery* pQuery) {
int32_t code = qExecCommand(pQuery->pRoot, &pRsp);
if (TSDB_CODE_SUCCESS == code && NULL != pRsp) {
code = setQueryResultFromRsp(&pRequest->body.resInfo, pRsp, false, false);
code = setQueryResultFromRsp(&pRequest->body.resInfo, pRsp, false, true);
}
SReqResultInfo* pResultInfo = &pRequest->body.resInfo;

View File

@ -182,6 +182,7 @@ void taos_free_result(TAOS_RES *res) {
if (TD_RES_QUERY(res)) {
SRequestObj *pRequest = (SRequestObj *)res;
tscDebug("0x%" PRIx64 " taos_free_result start to free query", pRequest->requestId);
destroyRequest(pRequest);
} else if (TD_RES_TMQ(res)) {
SMqRspObj *pRsp = (SMqRspObj *)res;
@ -482,7 +483,7 @@ void taos_stop_query(TAOS_RES *res) {
int32_t numOfFields = taos_num_fields(pRequest);
// It is not a query, no need to stop.
if (numOfFields == 0) {
tscDebug("request %" PRIx64 " no need to be killed since not query", pRequest->requestId);
tscDebug("request 0x%" PRIx64 " no need to be killed since not query", pRequest->requestId);
return;
}
@ -657,12 +658,17 @@ typedef struct SqlParseWrapper {
SQuery *pQuery;
} SqlParseWrapper;
static void destoryTablesReq(void *p) {
STablesReq *pRes = (STablesReq *)p;
taosArrayDestroy(pRes->pTables);
}
static void destorySqlParseWrapper(SqlParseWrapper *pWrapper) {
taosArrayDestroy(pWrapper->catalogReq.pDbVgroup);
taosArrayDestroy(pWrapper->catalogReq.pDbCfg);
taosArrayDestroy(pWrapper->catalogReq.pDbInfo);
taosArrayDestroy(pWrapper->catalogReq.pTableMeta);
taosArrayDestroy(pWrapper->catalogReq.pTableHash);
taosArrayDestroyEx(pWrapper->catalogReq.pTableMeta, destoryTablesReq);
taosArrayDestroyEx(pWrapper->catalogReq.pTableHash, destoryTablesReq);
taosArrayDestroy(pWrapper->catalogReq.pUdf);
taosArrayDestroy(pWrapper->catalogReq.pIndex);
taosArrayDestroy(pWrapper->catalogReq.pUser);
@ -847,7 +853,7 @@ static void fetchCallback(void *pResult, void *param, int32_t code) {
}
pRequest->code =
setQueryResultFromRsp(pResultInfo, (SRetrieveTableRsp *)pResultInfo->pData, pResultInfo->convertUcs4, false);
setQueryResultFromRsp(pResultInfo, (SRetrieveTableRsp *)pResultInfo->pData, pResultInfo->convertUcs4, true);
if (pRequest->code != TSDB_CODE_SUCCESS) {
pResultInfo->numOfRows = 0;
pRequest->code = code;

View File

@ -389,7 +389,7 @@ int32_t processShowVariablesRsp(void* param, SDataBuf* pMsg, int32_t code) {
code = buildShowVariablesRsp(rsp.variables, &pRes);
}
if (TSDB_CODE_SUCCESS == code) {
code = setQueryResultFromRsp(&pRequest->body.resInfo, pRes, false, false);
code = setQueryResultFromRsp(&pRequest->body.resInfo, pRes, false, true);
}
tFreeSShowVariablesRsp(&rsp);

View File

@ -24,7 +24,7 @@
#define SYSTABLE_SCH_COL_NAME_LEN ((TSDB_COL_NAME_LEN - 1) + VARSTR_HEADER_SIZE)
static const SSysDbTableSchema dnodesSchema[] = {
{.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_SMALLINT},
{.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "endpoint", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "vnodes", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT},
{.name = "support_vnodes", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT},
@ -66,7 +66,7 @@ static const SSysDbTableSchema bnodesSchema[] = {
};
static const SSysDbTableSchema clusterSchema[] = {
{.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_BIGINT},
{.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "name", .bytes = TSDB_CLUSTER_ID_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
};

View File

@ -1874,21 +1874,20 @@ char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** pDataBuf)
* @brief TODO: Assume that the final generated result it less than 3M
*
* @param pReq
* @param pDataBlocks
* @param pDataBlock
* @param vgId
* @param suid // TODO: check with Liao whether suid response is reasonable
* @param suid
*
* TODO: colId should be set
*/
int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SArray* pDataBlocks, STSchema* pTSchema, int32_t vgId,
int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SSDataBlock* pDataBlock, STSchema* pTSchema, int32_t vgId,
tb_uid_t suid) {
int32_t sz = taosArrayGetSize(pDataBlocks);
int32_t bufSize = sizeof(SSubmitReq);
int32_t sz = 1;
for (int32_t i = 0; i < sz; ++i) {
SDataBlockInfo* pBlkInfo = &((SSDataBlock*)taosArrayGet(pDataBlocks, i))->info;
const SDataBlockInfo* pBlkInfo = &pDataBlock->info;
int32_t numOfCols = taosArrayGetSize(pDataBlocks);
bufSize += pBlkInfo->rows * (TD_ROW_HEAD_LEN + pBlkInfo->rowSize + BitmapLen(numOfCols));
int32_t colNum = taosArrayGetSize(pDataBlock->pDataBlock);
bufSize += pBlkInfo->rows * (TD_ROW_HEAD_LEN + pBlkInfo->rowSize + BitmapLen(colNum));
bufSize += sizeof(SSubmitBlk);
}
@ -1905,7 +1904,6 @@ int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SArray* pDataBlocks
tdSRowInit(&rb, pTSchema->version);
for (int32_t i = 0; i < sz; ++i) {
SSDataBlock* pDataBlock = taosArrayGet(pDataBlocks, i);
int32_t colNum = taosArrayGetSize(pDataBlock->pDataBlock);
int32_t rows = pDataBlock->info.rows;
// int32_t rowSize = pDataBlock->info.rowSize;

View File

@ -423,7 +423,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
if (cfgAddInt32(pCfg, "numOfSnodeUniqueThreads", tsNumOfSnodeUniqueThreads, 2, 1024, 0) != 0) return -1;
tsRpcQueueMemoryAllowed = tsTotalMemoryKB * 1024 * 0.1;
tsRpcQueueMemoryAllowed = TRANGE(tsRpcQueueMemoryAllowed, TSDB_MAX_MSG_SIZE * 10L, TSDB_MAX_MSG_SIZE * 10000L);
tsRpcQueueMemoryAllowed = TRANGE(tsRpcQueueMemoryAllowed, TSDB_MAX_MSG_SIZE * 10LL, TSDB_MAX_MSG_SIZE * 10000LL);
if (cfgAddInt64(pCfg, "rpcQueueMemoryAllowed", tsRpcQueueMemoryAllowed, TSDB_MAX_MSG_SIZE * 10L, INT64_MAX, 0) != 0)
return -1;

View File

@ -347,6 +347,7 @@ SArray *vmGetMsgHandles() {
if (dmSetMgmtHandle(pArray, TDMT_VND_TABLES_META, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_SCH_CANCEL_TASK, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_SCH_DROP_TASK, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_FETCH_RSMA, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_CREATE_STB, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_DROP_TTL_TABLE, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_ALTER_STB, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;

View File

@ -255,7 +255,8 @@ static inline void dmReleaseHandle(SRpcHandleInfo *pHandle, int8_t type) {
static bool rpcRfp(int32_t code, tmsg_t msgType) {
if (code == TSDB_CODE_RPC_REDIRECT || code == TSDB_CODE_RPC_NETWORK_UNAVAIL || code == TSDB_CODE_NODE_NOT_DEPLOYED ||
code == TSDB_CODE_SYN_NOT_LEADER || code == TSDB_CODE_APP_NOT_READY || code == TSDB_CODE_RPC_BROKEN_LINK) {
if (msgType == TDMT_SCH_QUERY || msgType == TDMT_SCH_MERGE_QUERY || msgType == TDMT_SCH_FETCH || msgType == TDMT_SCH_MERGE_FETCH) {
if (msgType == TDMT_SCH_QUERY || msgType == TDMT_SCH_MERGE_QUERY || msgType == TDMT_SCH_FETCH ||
msgType == TDMT_SCH_MERGE_FETCH) {
return false;
}
return true;

View File

@ -187,6 +187,7 @@ int32_t smaAsyncPreCommit(SSma* pSma);
int32_t smaAsyncCommit(SSma* pSma);
int32_t smaAsyncPostCommit(SSma* pSma);
int32_t smaDoRetention(SSma* pSma, int64_t now);
int32_t smaProcessFetch(SSma *pSma, void* pMsg);
int32_t tdProcessTSmaCreate(SSma* pSma, int64_t version, const char* msg);
int32_t tdProcessTSmaInsert(SSma* pSma, int64_t indexUid, const char* msg);

View File

@ -481,7 +481,7 @@ int64_t metaGetTbNum(SMeta *pMeta) {
/* int64_t num = 0; */
/* vnodeGetAllCtbNum(pMeta->pVnode, &num); */
return pMeta->pVnode->config.vndStats.numOfCTables;
return pMeta->pVnode->config.vndStats.numOfCTables + pMeta->pVnode->config.vndStats.numOfNTables;
}
// N.B. Called by statusReq per second

View File

@ -36,16 +36,14 @@ static int32_t tdExecuteRSmaImpl(SSma *pSma, const void *pMsg, int32_t inputT
int8_t level);
static SRSmaInfo *tdAcquireRSmaInfoBySuid(SSma *pSma, int64_t suid);
static void tdReleaseRSmaInfo(SSma *pSma, SRSmaInfo *pInfo);
static int32_t tdRSmaFetchAndSubmitResult(qTaskInfo_t taskInfo, SRSmaInfoItem *pItem, STSchema *pTSchema, int64_t suid,
SRSmaStat *pStat, int8_t blkType);
static int32_t tdRSmaFetchAndSubmitResult(SSma *pSma, qTaskInfo_t taskInfo, SRSmaInfoItem *pItem, STSchema *pTSchema,
int64_t suid, int8_t blkType);
static void tdRSmaFetchTrigger(void *param, void *tmrId);
static int32_t tdRSmaFetchSend(SSma *pSma, SRSmaInfo *pInfo, int8_t level);
static int32_t tdRSmaQTaskInfoIterInit(SRSmaQTaskInfoIter *pIter, STFile *pTFile);
static int32_t tdRSmaQTaskInfoIterNextBlock(SRSmaQTaskInfoIter *pIter, bool *isFinish);
static int32_t tdRSmaQTaskInfoRestore(SSma *pSma, int8_t type, SRSmaQTaskInfoIter *pIter);
static int32_t tdRSmaQTaskInfoItemRestore(SSma *pSma, const SRSmaQTaskInfoItem *infoItem);
static int32_t tdRSmaRestoreQTaskInfoInit(SSma *pSma, int64_t *nTables);
static int32_t tdRSmaRestoreQTaskInfoReload(SSma *pSma, int8_t type, int64_t qTaskFileVer);
static int32_t tdRSmaRestoreTSDataReload(SSma *pSma);
@ -604,11 +602,8 @@ _end:
return code;
}
static int32_t tdRSmaFetchAndSubmitResult(qTaskInfo_t taskInfo, SRSmaInfoItem *pItem, STSchema *pTSchema, int64_t suid,
SRSmaStat *pStat, int8_t blkType) {
SArray *pResult = NULL;
SSma *pSma = pStat->pSma;
static int32_t tdRSmaFetchAndSubmitResult(SSma *pSma, qTaskInfo_t taskInfo, SRSmaInfoItem *pItem, STSchema *pTSchema,
int64_t suid, int8_t blkType) {
while (1) {
SSDataBlock *output = NULL;
uint64_t ts;
@ -619,30 +614,20 @@ static int32_t tdRSmaFetchAndSubmitResult(qTaskInfo_t taskInfo, SRSmaInfoItem *p
pItem->level, terrstr(code));
goto _err;
}
if (!output) {
break;
}
if (!pResult) {
pResult = taosArrayInit(1, sizeof(SSDataBlock));
if (!pResult) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
}
}
taosArrayPush(pResult, output);
if (taosArrayGetSize(pResult) > 0) {
#if 1
if (output) {
#if 0
char flag[10] = {0};
snprintf(flag, 10, "level %" PRIi8, pItem->level);
SArray *pResult = taosArrayInit(1, sizeof(SSDataBlock));
taosArrayPush(pResult, output);
blockDebugShowDataBlocks(pResult, flag);
taosArrayDestroy(pResult);
#endif
STsdb *sinkTsdb = (pItem->level == TSDB_RETENTION_L1 ? pSma->pRSmaTsdb[0] : pSma->pRSmaTsdb[1]);
SSubmitReq *pReq = NULL;
// TODO: the schema update should be handled later(TD-17965)
if (buildSubmitReqFromDataBlock(&pReq, pResult, pTSchema, SMA_VID(pSma), suid) < 0) {
if (buildSubmitReqFromDataBlock(&pReq, output, pTSchema, SMA_VID(pSma), suid) < 0) {
smaError("vgId:%d, build submit req for rsma stable %" PRIi64 " level %" PRIi8 " failed since %s",
SMA_VID(pSma), suid, pItem->level, terrstr());
goto _err;
@ -659,18 +644,17 @@ static int32_t tdRSmaFetchAndSubmitResult(qTaskInfo_t taskInfo, SRSmaInfoItem *p
SMA_VID(pSma), suid, pItem->level, output->info.version);
taosMemoryFreeClear(pReq);
taosArrayClear(pResult);
} else if (terrno == 0) {
smaDebug("vgId:%d, no rsma %" PRIi8 " data fetched yet", SMA_VID(pSma), pItem->level);
break;
} else {
smaDebug("vgId:%d, no rsma %" PRIi8 " data fetched since %s", SMA_VID(pSma), pItem->level, terrstr());
goto _err;
}
}
tdDestroySDataBlockArray(pResult);
return TSDB_CODE_SUCCESS;
_err:
tdDestroySDataBlockArray(pResult);
return TSDB_CODE_FAILED;
}
@ -694,11 +678,9 @@ static int32_t tdExecuteRSmaImpl(SSma *pSma, const void *pMsg, int32_t inputType
return TSDB_CODE_FAILED;
}
SSmaEnv *pEnv = SMA_RSMA_ENV(pSma);
SRSmaStat *pStat = SMA_RSMA_STAT(pEnv->pStat);
SRSmaInfoItem *pItem = RSMA_INFO_ITEM(pInfo, idx);
tdRSmaFetchAndSubmitResult(RSMA_INFO_QTASK(pInfo, idx), pItem, pInfo->pTSchema, suid, pStat,
tdRSmaFetchAndSubmitResult(pSma, RSMA_INFO_QTASK(pInfo, idx), pItem, pInfo->pTSchema, suid,
STREAM_INPUT__DATA_SUBMIT);
atomic_store_8(&pItem->triggerStat, TASK_TRIGGER_STAT_ACTIVE);
@ -724,11 +706,13 @@ static SRSmaInfo *tdAcquireRSmaInfoBySuid(SSma *pSma, int64_t suid) {
SRSmaInfo *pRSmaInfo = NULL;
if (!pEnv) {
terrno = TSDB_CODE_RSMA_INVALID_ENV;
return NULL;
}
pStat = (SRSmaStat *)SMA_ENV_STAT(pEnv);
if (!pStat || !RSMA_INFO_HASH(pStat)) {
terrno = TSDB_CODE_RSMA_INVALID_STAT;
return NULL;
}
@ -743,12 +727,12 @@ static SRSmaInfo *tdAcquireRSmaInfoBySuid(SSma *pSma, int64_t suid) {
taosRUnLockLatch(SMA_ENV_LOCK(pEnv));
return pRSmaInfo;
}
taosRUnLockLatch(SMA_ENV_LOCK(pEnv));
if (RSMA_COMMIT_STAT(pStat) == 0) { // return NULL if not in committing stat
taosRUnLockLatch(SMA_ENV_LOCK(pEnv));
return NULL;
}
taosRUnLockLatch(SMA_ENV_LOCK(pEnv));
// clone the SRSmaInfo from iRsmaInfoHash to rsmaInfoHash if in committing stat
SRSmaInfo *pCowRSmaInfo = NULL;
@ -1323,7 +1307,7 @@ _err:
}
/**
* @brief trigger to get rsma result
* @brief trigger to get rsma result in async mode
*
* @param param
* @param tmrId
@ -1357,8 +1341,7 @@ static void tdRSmaFetchTrigger(void *param, void *tmrId) {
" refId:%d",
SMA_VID(pSma), pItem->level, rsmaTriggerStat, smaMgmt.rsetId, pRSmaInfo->refId);
if (rsmaTriggerStat == TASK_TRIGGER_STAT_PAUSED) {
taosTmrReset(tdRSmaFetchTrigger, pItem->maxDelay > 5000 ? 5000 : pItem->maxDelay, pItem, smaMgmt.tmrHandle,
&pItem->tmrId);
taosTmrReset(tdRSmaFetchTrigger, 5000, pItem, smaMgmt.tmrHandle, &pItem->tmrId);
}
return;
}
@ -1372,16 +1355,8 @@ static void tdRSmaFetchTrigger(void *param, void *tmrId) {
case TASK_TRIGGER_STAT_ACTIVE: {
smaDebug("vgId:%d, fetch rsma level %" PRIi8 " data for table:%" PRIi64 " since stat is active", SMA_VID(pSma),
pItem->level, pRSmaInfo->suid);
// sync procedure => async process
SSDataBlock dataBlock = {.info.type = STREAM_GET_ALL};
qTaskInfo_t taskInfo = pRSmaInfo->taskInfo[pItem->level - 1];
qSetMultiStreamInput(taskInfo, &dataBlock, 1, STREAM_INPUT__DATA_BLOCK);
tdRSmaFetchAndSubmitResult(taskInfo, pItem, pRSmaInfo->pTSchema, pRSmaInfo->suid, pStat,
STREAM_INPUT__DATA_BLOCK);
tdCleanupStreamInputDataBlock(taskInfo);
// async process
tdRSmaFetchSend(pSma, pRSmaInfo, pItem->level);
} break;
case TASK_TRIGGER_STAT_PAUSED: {
smaDebug("vgId:%d, not fetch rsma level %" PRIi8 " data for table:%" PRIi64 " since stat is paused",
@ -1404,3 +1379,118 @@ static void tdRSmaFetchTrigger(void *param, void *tmrId) {
_end:
tdReleaseSmaRef(smaMgmt.rsetId, pRSmaInfo->refId);
}
/**
* @brief put rsma fetch msg to fetch queue
*
* @param pSma
* @param pInfo
* @param level
* @return int32_t
*/
int32_t tdRSmaFetchSend(SSma *pSma, SRSmaInfo *pInfo, int8_t level) {
SRSmaFetchMsg fetchMsg = { .suid = pInfo->suid, .level = level};
int32_t ret = 0;
int32_t contLen = 0;
SEncoder encoder = {0};
tEncodeSize(tEncodeSRSmaFetchMsg, &fetchMsg, contLen, ret);
if (ret < 0) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
tEncoderClear(&encoder);
goto _err;
}
void *pBuf = rpcMallocCont(contLen + sizeof(SMsgHead));
tEncoderInit(&encoder, POINTER_SHIFT(pBuf, sizeof(SMsgHead)), contLen);
if (tEncodeSRSmaFetchMsg(&encoder, &fetchMsg) < 0) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
tEncoderClear(&encoder);
}
tEncoderClear(&encoder);
((SMsgHead *)pBuf)->vgId = SMA_VID(pSma);
((SMsgHead *)pBuf)->contLen = contLen + sizeof(SMsgHead);
SRpcMsg rpcMsg = {
.code = 0,
.msgType = TDMT_VND_FETCH_RSMA,
.pCont = pBuf,
.contLen = contLen,
};
if ((terrno = tmsgPutToQueue(&pSma->pVnode->msgCb, FETCH_QUEUE, &rpcMsg)) != 0) {
smaError("vgId:%d, failed to put rsma fetch msg into fetch-queue for suid:%" PRIi64 " level:%" PRIi8 " since %s",
SMA_VID(pSma), pInfo->suid, level, terrstr());
goto _err;
}
smaDebug("vgId:%d, success to put rsma fetch msg into fetch-queue for suid:%" PRIi64 " level:%" PRIi8, SMA_VID(pSma),
pInfo->suid, level);
return TSDB_CODE_SUCCESS;
_err:
return TSDB_CODE_FAILED;
}
/**
* @brief fetch rsma data of level 2/3 and submit
*
* @param pSma
* @param pMsg
* @return int32_t
*/
int32_t smaProcessFetch(SSma *pSma, void *pMsg) {
SRpcMsg *pRpcMsg = (SRpcMsg *)pMsg;
SRSmaFetchMsg req = {0};
SDecoder decoder = {0};
void *pBuf = NULL;
SRSmaInfo *pInfo = NULL;
SRSmaInfoItem *pItem = NULL;
if (!pRpcMsg || pRpcMsg->contLen < sizeof(SMsgHead)) {
terrno = TSDB_CODE_RSMA_FETCH_MSG_MSSED_UP;
return -1;
}
pBuf = POINTER_SHIFT(pRpcMsg->pCont, sizeof(SMsgHead));
tDecoderInit(&decoder, pBuf, pRpcMsg->contLen);
if (tDecodeSRSmaFetchMsg(&decoder, &req) < 0) {
terrno = TSDB_CODE_INVALID_MSG;
goto _err;
}
pInfo = tdAcquireRSmaInfoBySuid(pSma, req.suid);
if (!pInfo) {
if (terrno == TSDB_CODE_SUCCESS) {
terrno = TSDB_CODE_RSMA_EMPTY_INFO;
}
smaWarn("vgId:%d, failed to process rsma fetch msg for suid:%" PRIi64 " level:%" PRIi8 " since %s", SMA_VID(pSma),
req.suid, req.level, terrstr());
goto _err;
}
pItem = RSMA_INFO_ITEM(pInfo, req.level - 1);
SSDataBlock dataBlock = {.info.type = STREAM_GET_ALL};
qTaskInfo_t taskInfo = RSMA_INFO_QTASK(pInfo, req.level - 1);
if ((terrno = qSetMultiStreamInput(taskInfo, &dataBlock, 1, STREAM_INPUT__DATA_BLOCK)) < 0) {
goto _err;
}
if (tdRSmaFetchAndSubmitResult(pSma, taskInfo, pItem, pInfo->pTSchema, pInfo->suid, STREAM_INPUT__DATA_BLOCK) < 0) {
goto _err;
}
tdCleanupStreamInputDataBlock(taskInfo);
tdReleaseRSmaInfo(pSma, pInfo);
tDecoderClear(&decoder);
smaDebug("vgId:%d, success to process rsma fetch msg for suid:%" PRIi64 " level:%" PRIi8, SMA_VID(pSma), req.suid,
req.level);
return TSDB_CODE_SUCCESS;
_err:
tdReleaseRSmaInfo(pSma, pInfo);
tDecoderClear(&decoder);
smaError("vgId:%d, failed to process rsma fetch msg since %s", SMA_VID(pSma), terrstr());
return TSDB_CODE_FAILED;
}

View File

@ -859,8 +859,10 @@ void vnodeEnqueueStreamMsg(SVnode* pVnode, SRpcMsg* pMsg) {
tDecoderInit(&decoder, msgBody, msgLen);
if (tDecodeStreamDispatchReq(&decoder, &req) < 0) {
code = TSDB_CODE_MSG_DECODE_ERROR;
tDecoderClear(&decoder);
goto FAIL;
}
tDecoderClear(&decoder);
int32_t taskId = req.taskId;

View File

@ -473,7 +473,7 @@ int32_t vnodeGetTimeSeriesNum(SVnode *pVnode, int64_t *num) {
int numOfCols = 0;
vnodeGetStbColumnNum(pVnode, id, &numOfCols);
*num += ctbNum * numOfCols;
*num += ctbNum * (numOfCols - 1);
}
metaCloseStbCursor(pCur);

View File

@ -325,6 +325,8 @@ int32_t vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) {
return vnodeGetTableCfg(pVnode, pMsg, true);
case TDMT_VND_BATCH_META:
return vnodeGetBatchMeta(pVnode, pMsg);
case TDMT_VND_FETCH_RSMA:
return smaProcessFetch(pVnode->pSma, pMsg);
case TDMT_VND_CONSUME:
return tqProcessPollReq(pVnode->pTq, pMsg);
case TDMT_STREAM_TASK_RUN:

View File

@ -141,6 +141,10 @@ static void inline vnodeHandleWriteMsg(SVnode *pVnode, SRpcMsg *pMsg) {
}
if (rsp.info.handle != NULL) {
tmsgSendRsp(&rsp);
} else {
if (rsp.pCont) {
rpcFreeCont(rsp.pCont);
}
}
}
@ -299,6 +303,10 @@ void vnodeApplyWriteMsg(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) {
vnodePostBlockMsg(pVnode, pMsg);
if (rsp.info.handle != NULL) {
tmsgSendRsp(&rsp);
} else {
if (rsp.pCont) {
rpcFreeCont(rsp.pCont);
}
}
vGTrace("vgId:%d, msg:%p is freed, code:0x%x index:%" PRId64, vgId, pMsg, rsp.code, pMsg->info.conn.applyIndex);
@ -722,7 +730,8 @@ void vnodeSyncClose(SVnode *pVnode) { syncStop(pVnode->sync); }
bool vnodeIsLeader(SVnode *pVnode) {
if (!syncIsReady(pVnode->sync)) {
vDebug("vgId:%d, vnode not ready", pVnode->config.vgId);
vDebug("vgId:%d, vnode not ready, state:%s, restore:%d", pVnode->config.vgId, syncGetMyRoleStr(pVnode->sync),
syncRestoreFinish(pVnode->sync));
return false;
}

View File

@ -279,6 +279,7 @@ typedef struct SCtgMsgCtx {
void* lastOut;
void* out;
char* target;
SHashObj* pBatchs;
} SCtgMsgCtx;
@ -315,7 +316,6 @@ typedef struct SCtgTask {
SRWLatch lock;
SArray* pParents;
SCtgSubRes subRes;
SHashObj* pBatchs;
} SCtgTask;
typedef struct SCtgTaskReq {

View File

@ -855,6 +855,7 @@ int32_t ctgCallSubCb(SCtgTask *pTask) {
int32_t parentNum = taosArrayGetSize(pTask->pParents);
for (int32_t i = 0; i < parentNum; ++i) {
SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1);
SCtgTask* pParent = taosArrayGetP(pTask->pParents, i);
pParent->subRes.code = pTask->code;
@ -865,7 +866,9 @@ int32_t ctgCallSubCb(SCtgTask *pTask) {
}
}
pParent->pBatchs = pTask->pBatchs;
SCtgMsgCtx *pParMsgCtx = CTG_GET_TASK_MSGCTX(pParent, -1);
pParMsgCtx->pBatchs = pMsgCtx->pBatchs;
CTG_ERR_JRET(pParent->subRes.fp(pParent));
}
@ -1082,7 +1085,7 @@ _return:
ctgReleaseVgInfoToCache(pCtg, dbCache);
}
if (pTask->res) {
if (pTask->res || code) {
ctgHandleTaskEnd(pTask, code);
}
@ -1625,6 +1628,11 @@ _return:
int32_t ctgLaunchGetTbMetaTask(SCtgTask *pTask) {
SCatalog* pCtg = pTask->pJob->pCtg;
SRequestConnInfo* pConn = &pTask->pJob->conn;
SCtgJob* pJob = pTask->pJob;
SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1);
if (NULL == pMsgCtx->pBatchs) {
pMsgCtx->pBatchs = pJob->pBatchs;
}
CTG_ERR_RET(ctgGetTbMetaFromCache(pCtg, pConn, (SCtgTbMetaCtx*)pTask->taskCtx, (STableMeta**)&pTask->res));
if (pTask->res) {
@ -1645,6 +1653,7 @@ int32_t ctgLaunchGetTbMetasTask(SCtgTask *pTask) {
SCatalog* pCtg = pTask->pJob->pCtg;
SRequestConnInfo* pConn = &pTask->pJob->conn;
SCtgTbMetasCtx* pCtx = (SCtgTbMetasCtx*)pTask->taskCtx;
SCtgJob* pJob = pTask->pJob;
int32_t dbNum = taosArrayGetSize(pCtx->pNames);
int32_t fetchIdx = 0;
@ -1670,6 +1679,10 @@ int32_t ctgLaunchGetTbMetasTask(SCtgTask *pTask) {
for (int32_t i = 0; i < pCtx->fetchNum; ++i) {
SCtgFetch* pFetch = taosArrayGet(pCtx->pFetchs, i);
SName* pName = ctgGetFetchName(pCtx->pNames, pFetch);
SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, i);
if (NULL == pMsgCtx->pBatchs) {
pMsgCtx->pBatchs = pJob->pBatchs;
}
SCtgTaskReq tReq;
tReq.pTask = pTask;
@ -1686,6 +1699,11 @@ int32_t ctgLaunchGetDbVgTask(SCtgTask *pTask) {
SRequestConnInfo* pConn = &pTask->pJob->conn;
SCtgDBCache *dbCache = NULL;
SCtgDbVgCtx* pCtx = (SCtgDbVgCtx*)pTask->taskCtx;
SCtgJob* pJob = pTask->pJob;
SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1);
if (NULL == pMsgCtx->pBatchs) {
pMsgCtx->pBatchs = pJob->pBatchs;
}
CTG_ERR_RET(ctgAcquireVgInfoFromCache(pCtg, pCtx->dbFName, &dbCache));
if (NULL != dbCache) {
@ -1722,6 +1740,11 @@ int32_t ctgLaunchGetTbHashTask(SCtgTask *pTask) {
SRequestConnInfo* pConn = &pTask->pJob->conn;
SCtgDBCache *dbCache = NULL;
SCtgTbHashCtx* pCtx = (SCtgTbHashCtx*)pTask->taskCtx;
SCtgJob* pJob = pTask->pJob;
SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1);
if (NULL == pMsgCtx->pBatchs) {
pMsgCtx->pBatchs = pJob->pBatchs;
}
CTG_ERR_RET(ctgAcquireVgInfoFromCache(pCtg, pCtx->dbFName, &dbCache));
if (NULL != dbCache) {
@ -1761,6 +1784,7 @@ int32_t ctgLaunchGetTbHashsTask(SCtgTask *pTask) {
SRequestConnInfo* pConn = &pTask->pJob->conn;
SCtgTbHashsCtx* pCtx = (SCtgTbHashsCtx*)pTask->taskCtx;
SCtgDBCache *dbCache = NULL;
SCtgJob* pJob = pTask->pJob;
int32_t dbNum = taosArrayGetSize(pCtx->pNames);
int32_t fetchIdx = 0;
int32_t baseResIdx = 0;
@ -1803,6 +1827,10 @@ int32_t ctgLaunchGetTbHashsTask(SCtgTask *pTask) {
for (int32_t i = 0; i < pCtx->fetchNum; ++i) {
SCtgFetch* pFetch = taosArrayGet(pCtx->pFetchs, i);
STablesReq* pReq = taosArrayGet(pCtx->pNames, pFetch->dbIdx);
SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, i);
if (NULL == pMsgCtx->pBatchs) {
pMsgCtx->pBatchs = pJob->pBatchs;
}
SBuildUseDBInput input = {0};
strcpy(input.db, pReq->dbFName);
@ -1831,6 +1859,11 @@ int32_t ctgLaunchGetTbIndexTask(SCtgTask *pTask) {
SRequestConnInfo* pConn = &pTask->pJob->conn;
SCtgTbIndexCtx* pCtx = (SCtgTbIndexCtx*)pTask->taskCtx;
SArray* pRes = NULL;
SCtgJob* pJob = pTask->pJob;
SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1);
if (NULL == pMsgCtx->pBatchs) {
pMsgCtx->pBatchs = pJob->pBatchs;
}
CTG_ERR_RET(ctgReadTbIndexFromCache(pCtg, pCtx->pName, &pRes));
if (pRes) {
@ -1852,6 +1885,11 @@ int32_t ctgLaunchGetTbCfgTask(SCtgTask *pTask) {
SArray* pRes = NULL;
char dbFName[TSDB_DB_FNAME_LEN];
tNameGetFullDbName(pCtx->pName, dbFName);
SCtgJob* pJob = pTask->pJob;
SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1);
if (NULL == pMsgCtx->pBatchs) {
pMsgCtx->pBatchs = pJob->pBatchs;
}
if (pCtx->tbType <= 0) {
CTG_ERR_JRET(ctgReadTbTypeFromCache(pCtg, dbFName, pCtx->pName->tname, &pCtx->tbType));
@ -1890,6 +1928,11 @@ _return:
int32_t ctgLaunchGetQnodeTask(SCtgTask *pTask) {
SCatalog* pCtg = pTask->pJob->pCtg;
SRequestConnInfo* pConn = &pTask->pJob->conn;
SCtgJob* pJob = pTask->pJob;
SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1);
if (NULL == pMsgCtx->pBatchs) {
pMsgCtx->pBatchs = pJob->pBatchs;
}
CTG_ERR_RET(ctgGetQnodeListFromMnode(pCtg, pConn, NULL, pTask));
return TSDB_CODE_SUCCESS;
@ -1898,6 +1941,11 @@ int32_t ctgLaunchGetQnodeTask(SCtgTask *pTask) {
int32_t ctgLaunchGetDnodeTask(SCtgTask *pTask) {
SCatalog* pCtg = pTask->pJob->pCtg;
SRequestConnInfo* pConn = &pTask->pJob->conn;
SCtgJob* pJob = pTask->pJob;
SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1);
if (NULL == pMsgCtx->pBatchs) {
pMsgCtx->pBatchs = pJob->pBatchs;
}
CTG_ERR_RET(ctgGetDnodeListFromMnode(pCtg, pConn, NULL, pTask));
return TSDB_CODE_SUCCESS;
@ -1908,6 +1956,11 @@ int32_t ctgLaunchGetDbCfgTask(SCtgTask *pTask) {
SCatalog* pCtg = pTask->pJob->pCtg;
SRequestConnInfo* pConn = &pTask->pJob->conn;
SCtgDbCfgCtx* pCtx = (SCtgDbCfgCtx*)pTask->taskCtx;
SCtgJob* pJob = pTask->pJob;
SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1);
if (NULL == pMsgCtx->pBatchs) {
pMsgCtx->pBatchs = pJob->pBatchs;
}
CTG_ERR_RET(ctgGetDBCfgFromMnode(pCtg, pConn, pCtx->dbFName, NULL, pTask));
@ -1919,6 +1972,11 @@ int32_t ctgLaunchGetDbInfoTask(SCtgTask *pTask) {
SCatalog* pCtg = pTask->pJob->pCtg;
SCtgDBCache *dbCache = NULL;
SCtgDbInfoCtx* pCtx = (SCtgDbInfoCtx*)pTask->taskCtx;
SCtgJob* pJob = pTask->pJob;
SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1);
if (NULL == pMsgCtx->pBatchs) {
pMsgCtx->pBatchs = pJob->pBatchs;
}
pTask->res = taosMemoryCalloc(1, sizeof(SDbInfo));
if (NULL == pTask->res) {
@ -1953,6 +2011,11 @@ int32_t ctgLaunchGetIndexTask(SCtgTask *pTask) {
SCatalog* pCtg = pTask->pJob->pCtg;
SRequestConnInfo* pConn = &pTask->pJob->conn;
SCtgIndexCtx* pCtx = (SCtgIndexCtx*)pTask->taskCtx;
SCtgJob* pJob = pTask->pJob;
SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1);
if (NULL == pMsgCtx->pBatchs) {
pMsgCtx->pBatchs = pJob->pBatchs;
}
CTG_ERR_RET(ctgGetIndexInfoFromMnode(pCtg, pConn, pCtx->indexFName, NULL, pTask));
@ -1963,6 +2026,11 @@ int32_t ctgLaunchGetUdfTask(SCtgTask *pTask) {
SCatalog* pCtg = pTask->pJob->pCtg;
SRequestConnInfo* pConn = &pTask->pJob->conn;
SCtgUdfCtx* pCtx = (SCtgUdfCtx*)pTask->taskCtx;
SCtgJob* pJob = pTask->pJob;
SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1);
if (NULL == pMsgCtx->pBatchs) {
pMsgCtx->pBatchs = pJob->pBatchs;
}
CTG_ERR_RET(ctgGetUdfInfoFromMnode(pCtg, pConn, pCtx->udfName, NULL, pTask));
@ -1975,6 +2043,11 @@ int32_t ctgLaunchGetUserTask(SCtgTask *pTask) {
SCtgUserCtx* pCtx = (SCtgUserCtx*)pTask->taskCtx;
bool inCache = false;
bool pass = false;
SCtgJob* pJob = pTask->pJob;
SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1);
if (NULL == pMsgCtx->pBatchs) {
pMsgCtx->pBatchs = pJob->pBatchs;
}
CTG_ERR_RET(ctgChkAuthFromCache(pCtg, pCtx->user.user, pCtx->user.dbFName, pCtx->user.type, &inCache, &pass));
if (inCache) {
@ -1996,6 +2069,11 @@ int32_t ctgLaunchGetUserTask(SCtgTask *pTask) {
int32_t ctgLaunchGetSvrVerTask(SCtgTask *pTask) {
SCatalog* pCtg = pTask->pJob->pCtg;
SRequestConnInfo* pConn = &pTask->pJob->conn;
SCtgJob* pJob = pTask->pJob;
SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1);
if (NULL == pMsgCtx->pBatchs) {
pMsgCtx->pBatchs = pJob->pBatchs;
}
CTG_ERR_RET(ctgGetSvrVerFromMnode(pCtg, pConn, NULL, pTask));
@ -2129,7 +2207,10 @@ int32_t ctgSetSubTaskCb(SCtgTask *pSub, SCtgTask *pTask) {
if (CTG_TASK_DONE == pSub->status) {
pTask->subRes.code = pSub->code;
CTG_ERR_JRET((*gCtgAsyncFps[pTask->type].cloneFp)(pSub, &pTask->subRes.res));
pTask->pBatchs = pSub->pBatchs;
SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1);
SCtgMsgCtx *pSubMsgCtx = CTG_GET_TASK_MSGCTX(pSub, -1);
pMsgCtx->pBatchs = pSubMsgCtx->pBatchs;
CTG_ERR_JRET(pTask->subRes.fp(pTask));
} else {
if (NULL == pSub->pParents) {
@ -2167,7 +2248,10 @@ int32_t ctgLaunchSubTask(SCtgTask *pTask, CTG_TASK_TYPE type, ctgSubTaskCbFp fp,
CTG_ERR_RET(ctgSetSubTaskCb(pSub, pTask));
if (newTask) {
pSub->pBatchs = pTask->pBatchs;
SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1);
SCtgMsgCtx *pSubMsgCtx = CTG_GET_TASK_MSGCTX(pSub, -1);
pSubMsgCtx->pBatchs = pMsgCtx->pBatchs;
CTG_ERR_RET((*gCtgAsyncFps[pSub->type].launchFp)(pSub));
pSub->status = CTG_TASK_LAUNCHED;
}
@ -2180,7 +2264,6 @@ int32_t ctgLaunchJob(SCtgJob *pJob) {
for (int32_t i = 0; i < taskNum; ++i) {
SCtgTask *pTask = taosArrayGet(pJob->pTasks, i);
pTask->pBatchs = pJob->pBatchs;
qDebug("QID:0x%" PRIx64 " ctg launch [%dth] task", pJob->queryId, pTask->taskId);
CTG_ERR_RET((*gCtgAsyncFps[pTask->type].launchFp)(pTask));

View File

@ -69,13 +69,13 @@ int32_t ctgHandleBatchRsp(SCtgJob* pJob, SCtgTaskCallbackParam* cbParam, SDataBu
taskMsg.len = 0;
}
pTask->pBatchs = pBatchs;
SCtgTaskReq tReq;
tReq.pTask = pTask;
tReq.msgIdx = rsp.msgIdx;
SCtgMsgCtx* pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, tReq.msgIdx);
pMsgCtx->pBatchs = pBatchs;
ctgDebug("QID:0x%" PRIx64 " ctg task %d idx %d start to handle rsp %s", pJob->queryId, pTask->taskId, rsp.msgIdx, TMSG_INFO(taskMsg.msgType + 1));
ctgDebug("QID:0x%" PRIx64 " ctg task %d idx %d start to handle rsp %s, pBatchs: %p", pJob->queryId, pTask->taskId, rsp.msgIdx, TMSG_INFO(taskMsg.msgType + 1), pBatchs);
(*gCtgAsyncFps[pTask->type].handleRspFp)(&tReq, rsp.reqType, &taskMsg, (rsp.rspCode ? rsp.rspCode : rspCode));
}
@ -343,7 +343,9 @@ int32_t ctgHandleMsgCallback(void* param, SDataBuf* pMsg, int32_t rspCode) {
ctgError("taosHashInit %d batch failed", CTG_DEFAULT_BATCH_NUM);
CTG_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY);
}
pTask->pBatchs = pBatchs;
SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1);
pMsgCtx->pBatchs = pBatchs;
#endif
SCtgTaskReq tReq;
@ -444,7 +446,8 @@ int32_t ctgAddBatch(SCatalog* pCtg, int32_t vgId, SRequestConnInfo* pConn, SCtgT
uint32_t msgSize) {
int32_t code = 0;
SCtgTask* pTask = tReq->pTask;
SHashObj* pBatchs = pTask->pBatchs;
SCtgMsgCtx* pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, tReq->msgIdx);
SHashObj* pBatchs = pMsgCtx->pBatchs;
SCtgJob* pJob = pTask->pJob;
SCtgBatch* pBatch = taosHashGet(pBatchs, &vgId, sizeof(vgId));
SCtgBatch newBatch = {0};

View File

@ -60,6 +60,7 @@ extern "C" {
#define EXPLAIN_RATIO_TIME_FORMAT "Ratio: %f"
#define EXPLAIN_MERGE_FORMAT "SortMerge"
#define EXPLAIN_MERGE_KEYS_FORMAT "Merge Key: "
#define EXPLAIN_IGNORE_GROUPID_FORMAT "Ignore Group Id: %s"
#define EXPLAIN_PARTITION_KETS_FORMAT "Partition Key: "
#define EXPLAIN_INTERP_FORMAT "Interp"

View File

@ -619,6 +619,11 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
EXPLAIN_ROW_END();
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
EXPLAIN_ROW_NEW(level + 1, EXPLAIN_OUTPUT_FORMAT);
EXPLAIN_ROW_APPEND(EXPLAIN_IGNORE_GROUPID_FORMAT, pPrjNode->ignoreGroupId ? "true" : "false");
EXPLAIN_ROW_END();
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
EXPLAIN_ROW_NEW(level + 1, EXPLAIN_MERGEBLOCKS_FORMAT, pPrjNode->mergeDataBlock? "True":"False");
EXPLAIN_ROW_END();
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));

View File

@ -740,6 +740,7 @@ typedef struct STimeSliceOperatorInfo {
int64_t current;
SArray* pPrevRow; // SArray<SGroupValue>
SArray* pNextRow; // SArray<SGroupValue>
SArray* pLinearInfo; // SArray<SFillLinearInfo>
bool isPrevRowSet;
bool isNextRowSet;
int32_t fillType; // fill type

View File

@ -33,6 +33,15 @@ typedef struct SFillColInfo {
SVariant fillVal;
} SFillColInfo;
typedef struct SFillLinearInfo {
SPoint start;
SPoint end;
bool hasNull;
bool fillLastPoint;
int16_t type;
int32_t bytes;
} SFillLinearInfo;
typedef struct {
SSchema col;
char* tagVal;

View File

@ -256,7 +256,7 @@ static int32_t mergeJoinJoinDownstreamTsRanges(SOperatorInfo* pOperator, int64_t
SArray* rightRowLocations = taosArrayInit(8, sizeof(SRowLocation));
SArray* rightCreatedBlocks = taosArrayInit(8, POINTER_BYTES);
int32_t code = TSDB_CODE_SUCCESS;
mergeJoinGetDownStreamRowsEqualTimeStamp(pOperator, 0, pJoinInfo->leftCol.slotId, pJoinInfo->pLeft,
pJoinInfo->leftPos, timestamp, leftRowLocations, leftCreatedBlocks);
mergeJoinGetDownStreamRowsEqualTimeStamp(pOperator, 1, pJoinInfo->rightCol.slotId, pJoinInfo->pRight,
@ -264,6 +264,11 @@ static int32_t mergeJoinJoinDownstreamTsRanges(SOperatorInfo* pOperator, int64_t
size_t leftNumJoin = taosArrayGetSize(leftRowLocations);
size_t rightNumJoin = taosArrayGetSize(rightRowLocations);
code = blockDataEnsureCapacity(pRes, *nRows + leftNumJoin * rightNumJoin);
if (code != TSDB_CODE_SUCCESS) {
qError("%s can not ensure block capacity for join. left: %zu, right: %zu", GET_TASKID(pOperator->pTaskInfo), leftNumJoin, rightNumJoin);
}
if (code == TSDB_CODE_SUCCESS) {
for (int32_t i = 0; i < leftNumJoin; ++i) {
for (int32_t j = 0; j < rightNumJoin; ++j) {
SRowLocation *leftRow = taosArrayGet(leftRowLocations, i);
@ -273,6 +278,7 @@ static int32_t mergeJoinJoinDownstreamTsRanges(SOperatorInfo* pOperator, int64_t
++*nRows;
}
}
}
for (int i = 0; i < taosArrayGetSize(rightCreatedBlocks); ++i) {
SSDataBlock* pBlock = taosArrayGetP(rightCreatedBlocks, i);

View File

@ -2236,10 +2236,10 @@ static SSDataBlock* doSysTableScan(SOperatorInfo* pOperator) {
// build message and send to mnode to fetch the content of system tables.
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
SSysTableScanInfo* pInfo = pOperator->info;
char dbName[TSDB_DB_NAME_LEN] = {0};
const char* name = tNameGetTableName(&pInfo->name);
if (pInfo->showRewrite) {
char dbName[TSDB_DB_NAME_LEN] = {0};
getDBNameFromCondition(pInfo->pCondition, dbName);
sprintf(pInfo->req.db, "%d.%s", pInfo->accountId, dbName);
}
@ -2249,7 +2249,7 @@ static SSDataBlock* doSysTableScan(SOperatorInfo* pOperator) {
} else if (strncasecmp(name, TSDB_INS_TABLE_TAGS, TSDB_TABLE_FNAME_LEN) == 0) {
return sysTableScanUserTags(pOperator);
} else if (strncasecmp(name, TSDB_INS_TABLE_STABLES, TSDB_TABLE_FNAME_LEN) == 0 &&
IS_SYS_DBNAME(pInfo->req.db)) {
pInfo->showRewrite && IS_SYS_DBNAME(dbName)) {
return sysTableScanUserSTables(pOperator);
} else { // load the meta from mnode of the given epset
if (pOperator->status == OP_EXEC_DONE) {

View File

@ -2088,6 +2088,34 @@ static void doKeepNextRows(STimeSliceOperatorInfo* pSliceInfo, const SSDataBlock
pSliceInfo->isNextRowSet = true;
}
static void doKeepLinearInfo(STimeSliceOperatorInfo* pSliceInfo, const SSDataBlock* pBlock, int32_t rowIndex) {
int32_t numOfCols = taosArrayGetSize(pBlock->pDataBlock);
for (int32_t i = 0; i < numOfCols; ++i) {
SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, i);
SColumnInfoData* pTsCol = taosArrayGet(pBlock->pDataBlock, pSliceInfo->tsCol.slotId);
SFillLinearInfo* pLinearInfo = taosArrayGet(pSliceInfo->pLinearInfo, i);
// null data should not be kept since it can not be used to perform interpolation
if (!colDataIsNull_s(pColInfoData, i)) {
int64_t startKey = *(int64_t*)colDataGetData(pTsCol, rowIndex);
int64_t endKey = *(int64_t*)colDataGetData(pTsCol, rowIndex + 1);
pLinearInfo->start.key = startKey;
pLinearInfo->end.key = endKey;
char* val;
val = colDataGetData(pColInfoData, rowIndex);
memcpy(pLinearInfo->start.val, val, pLinearInfo->bytes);
val = colDataGetData(pColInfoData, rowIndex + 1);
memcpy(pLinearInfo->end.val, val, pLinearInfo->bytes);
pLinearInfo->hasNull = false;
} else {
pLinearInfo->hasNull = true;
}
}
}
static void genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp* pExprSup, SSDataBlock* pBlock,
SSDataBlock* pResBlock) {
int32_t rows = pResBlock->info.rows;
@ -2131,37 +2159,26 @@ static void genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp
}
case TSDB_FILL_LINEAR: {
#if 0
if (pCtx->start.key == INT64_MIN || pCtx->start.key > pCtx->startTs
|| pCtx->end.key == INT64_MIN || pCtx->end.key < pCtx->startTs) {
// goto interp_exit;
SFillLinearInfo* pLinearInfo = taosArrayGet(pSliceInfo->pLinearInfo, srcSlot);
SPoint start = pLinearInfo->start;
SPoint end = pLinearInfo->end;
SPoint current = {.key = pSliceInfo->current};
current.val = taosMemoryCalloc(pLinearInfo->bytes, 1);
// before interp range, do not fill
if (start.key == INT64_MIN || end.key == INT64_MAX) {
break;
}
double v1 = -1, v2 = -1;
GET_TYPED_DATA(v1, double, pCtx->inputType, &pCtx->start.val);
GET_TYPED_DATA(v2, double, pCtx->inputType, &pCtx->end.val);
SPoint point1 = {.key = ts, .val = &v1};
SPoint point2 = {.key = nextTs, .val = &v2};
SPoint point = {.key = pCtx->startTs, .val = pCtx->pOutput};
int32_t srcType = pCtx->inputType;
if (isNull((char *)&pCtx->start.val, srcType) || isNull((char *)&pCtx->end.val, srcType)) {
setNull(pCtx->pOutput, srcType, pCtx->inputBytes);
if (pLinearInfo->hasNull) {
colDataAppendNULL(pDst, rows);
} else {
bool exceedMax = false, exceedMin = false;
taosGetLinearInterpolationVal(&point, pCtx->outputType, &point1, &point2, TSDB_DATA_TYPE_DOUBLE, &exceedMax, &exceedMin);
if (exceedMax || exceedMin) {
__compar_fn_t func = getComparFunc((int32_t)pCtx->inputType, 0);
if (func(&pCtx->start.val, &pCtx->end.val) <= 0) {
COPY_TYPED_DATA(pCtx->pOutput, pCtx->inputType, exceedMax ? &pCtx->start.val : &pCtx->end.val);
} else {
COPY_TYPED_DATA(pCtx->pOutput, pCtx->inputType, exceedMax ? &pCtx->end.val : &pCtx->start.val);
taosGetLinearInterpolationVal(&current, pLinearInfo->type, &start, &end, pLinearInfo->type);
colDataAppend(pDst, rows, (char *)current.val, false);
}
}
}
#endif
// TODO: pResBlock->info.rows += 1;
pResBlock->info.rows += 1;
break;
}
case TSDB_FILL_PREV: {
@ -2247,6 +2264,55 @@ static int32_t initNextRowsKeeper(STimeSliceOperatorInfo* pInfo, SSDataBlock* pB
return TSDB_CODE_SUCCESS;
}
static int32_t initFillLinearInfo(STimeSliceOperatorInfo* pInfo, SSDataBlock* pBlock) {
if (pInfo->pLinearInfo != NULL) {
return TSDB_CODE_SUCCESS;
}
pInfo->pLinearInfo = taosArrayInit(4, sizeof(SFillLinearInfo));
if (pInfo->pNextRow == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}
int32_t numOfCols = taosArrayGetSize(pBlock->pDataBlock);
for (int32_t i = 0; i < numOfCols; ++i) {
SColumnInfoData* pColInfo = taosArrayGet(pBlock->pDataBlock, i);
SFillLinearInfo linearInfo = {0};
linearInfo.start.key = INT64_MIN;
linearInfo.end.key = INT64_MAX;
linearInfo.start.val = taosMemoryCalloc(1, pColInfo->info.bytes);
linearInfo.end.val = taosMemoryCalloc(1, pColInfo->info.bytes);
linearInfo.hasNull = false;
linearInfo.fillLastPoint = false;
linearInfo.type = pColInfo->info.type;
linearInfo.bytes = pColInfo->info.bytes;
taosArrayPush(pInfo->pLinearInfo, &linearInfo);
}
return TSDB_CODE_SUCCESS;
}
static int32_t initKeeperInfo(STimeSliceOperatorInfo* pInfo, SSDataBlock* pBlock) {
int32_t code;
code = initPrevRowsKeeper(pInfo, pBlock);
if (code != TSDB_CODE_SUCCESS) {
return TSDB_CODE_FAILED;
}
code = initNextRowsKeeper(pInfo, pBlock);
if (code != TSDB_CODE_SUCCESS) {
return TSDB_CODE_FAILED;
}
code = initFillLinearInfo(pInfo, pBlock);
if (code != TSDB_CODE_SUCCESS) {
return TSDB_CODE_FAILED;
}
return TSDB_CODE_SUCCESS;
}
static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) {
if (pOperator->status == OP_EXEC_DONE) {
return NULL;
@ -2279,13 +2345,7 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) {
break;
}
int32_t code;
code = initPrevRowsKeeper(pSliceInfo, pBlock);
if (code != TSDB_CODE_SUCCESS) {
longjmp(pTaskInfo->env, code);
}
code = initNextRowsKeeper(pSliceInfo, pBlock);
int32_t code = initKeeperInfo(pSliceInfo, pBlock);
if (code != TSDB_CODE_SUCCESS) {
longjmp(pTaskInfo->env, code);
}
@ -2313,6 +2373,35 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) {
pResBlock->info.rows += 1;
doKeepPrevRows(pSliceInfo, pBlock, i);
// for linear interpolation, always fill value between this and next points;
// if its the first point in data block, also fill values between previous(if there's any) and this point;
// if its the last point in data block, no need to fill, but reserve this point as the start value for next data block.
if (pSliceInfo->fillType == TSDB_FILL_LINEAR) {
doKeepLinearInfo(pSliceInfo, pBlock, i);
pSliceInfo->current =
taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision);
if (i < pBlock->info.rows - 1) {
int64_t nextTs = *(int64_t*)colDataGetData(pTsCol, i + 1);
if (nextTs > pSliceInfo->current) {
while (pSliceInfo->current < nextTs && pSliceInfo->current <= pSliceInfo->win.ekey) {
genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pBlock, pResBlock);
pSliceInfo->current =
taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision);
if (pResBlock->info.rows >= pResBlock->info.capacity) {
break;
}
}
if (pSliceInfo->current > pSliceInfo->win.ekey) {
doSetOperatorCompleted(pOperator);
break;
}
} else {
// ignore current row, and do nothing
}
} else { // it is the last row of current block
}
} else { // non-linear interpolation
pSliceInfo->current =
taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision);
if (pSliceInfo->current > pSliceInfo->win.ekey) {
@ -2323,12 +2412,39 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) {
if (pResBlock->info.rows >= pResBlock->info.capacity) {
break;
}
}
} else if (ts < pSliceInfo->current) {
// in case interpolation window starts and ends between two datapoints, fill(prev) need to interpolate
// in case of interpolation window starts and ends between two datapoints, fill(prev) need to interpolate
doKeepPrevRows(pSliceInfo, pBlock, i);
if (pSliceInfo->fillType == TSDB_FILL_LINEAR) {
doKeepLinearInfo(pSliceInfo, pBlock, i);
//pSliceInfo->current =
// taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision);
if (i < pBlock->info.rows - 1) {
// in case interpolation window starts and ends between two datapoints, fill(next) need to interpolate
int64_t nextTs = *(int64_t*)colDataGetData(pTsCol, i + 1);
if (nextTs > pSliceInfo->current) {
while (pSliceInfo->current < nextTs && pSliceInfo->current <= pSliceInfo->win.ekey) {
genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pBlock, pResBlock);
pSliceInfo->current =
taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision);
if (pResBlock->info.rows >= pResBlock->info.capacity) {
break;
}
}
if (pSliceInfo->current > pSliceInfo->win.ekey) {
doSetOperatorCompleted(pOperator);
break;
}
} else {
// ignore current row, and do nothing
}
} else { // it is the last row of current block
}
} else { // non-linear interpolation
if (i < pBlock->info.rows - 1) {
// in case of interpolation window starts and ends between two datapoints, fill(next) need to interpolate
doKeepNextRows(pSliceInfo, pBlock, i + 1);
int64_t nextTs = *(int64_t*)colDataGetData(pTsCol, i + 1);
if (nextTs > pSliceInfo->current) {
@ -2351,8 +2467,9 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) {
} else { // it is the last row of current block
doKeepPrevRows(pSliceInfo, pBlock, i);
}
}
} else { // ts > pSliceInfo->current
// in case interpolation window starts and ends between two datapoints, fill(next) need to interpolate
// in case of interpolation window starts and ends between two datapoints, fill(next) need to interpolate
doKeepNextRows(pSliceInfo, pBlock, i);
while (pSliceInfo->current < ts && pSliceInfo->current <= pSliceInfo->win.ekey) {
@ -2381,6 +2498,33 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) {
pResBlock->info.rows += 1;
doKeepPrevRows(pSliceInfo, pBlock, i);
if (pSliceInfo->fillType == TSDB_FILL_LINEAR) {
doKeepLinearInfo(pSliceInfo, pBlock, i);
pSliceInfo->current =
taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision);
if (i < pBlock->info.rows - 1) {
int64_t nextTs = *(int64_t*)colDataGetData(pTsCol, i + 1);
if (nextTs > pSliceInfo->current) {
while (pSliceInfo->current < nextTs && pSliceInfo->current <= pSliceInfo->win.ekey) {
genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pBlock, pResBlock);
pSliceInfo->current =
taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision);
if (pResBlock->info.rows >= pResBlock->info.capacity) {
break;
}
}
if (pSliceInfo->current > pSliceInfo->win.ekey) {
doSetOperatorCompleted(pOperator);
break;
}
} else {
// ignore current row, and do nothing
}
} else { // it is the last row of current block
}
} else { // non-linear interpolation
pSliceInfo->current =
taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision);
@ -2388,6 +2532,7 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) {
break;
}
}
}
if (pSliceInfo->current > pSliceInfo->win.ekey) {
doSetOperatorCompleted(pOperator);
@ -2447,6 +2592,9 @@ SOperatorInfo* createTimeSliceOperatorInfo(SOperatorInfo* downstream, SPhysiNode
pInfo->fillType = convertFillType(pInterpPhyNode->fillMode);
initResultSizeInfo(&pOperator->resultInfo, 4096);
pInfo->pPrevRow = NULL;
pInfo->pNextRow = NULL;
pInfo->pLinearInfo = NULL;
pInfo->pFillColInfo = createFillColInfo(pExprInfo, numOfExprs, (SNodeListNode*)pInterpPhyNode->pFillValues);
pInfo->pRes = createResDataBlock(pPhyNode->pOutputDataBlockDesc);
pInfo->win = pInterpPhyNode->timeRange;

View File

@ -3848,14 +3848,17 @@ int32_t spreadFunctionMerge(SqlFunctionCtx* pCtx) {
SSpreadInfo* pInfo = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx));
int32_t start = pInput->startRowIndex;
for (int32_t i = start; i < start + pInput->numOfRows; ++i) {
char* data = colDataGetData(pCol, i);
SSpreadInfo* pInputInfo = (SSpreadInfo*)varDataVal(data);
if (pInputInfo->hasResult) {
spreadTransferInfo(pInputInfo, pInfo);
}
}
SET_VAL(GET_RES_INFO(pCtx), 1, 1);
if (pInfo->hasResult) {
GET_RES_INFO(pCtx)->numOfRes = 1;
}
return TSDB_CODE_SUCCESS;
}
@ -3864,6 +3867,8 @@ int32_t spreadFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
SSpreadInfo* pInfo = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx));
if (pInfo->hasResult == true) {
SET_DOUBLE_VAL(&pInfo->result, pInfo->max - pInfo->min);
} else {
GET_RES_INFO(pCtx)->isNullRes = 1;
}
return functionFinalize(pCtx, pBlock);
}

View File

@ -390,6 +390,7 @@ static int32_t logicProjectCopy(const SProjectLogicNode* pSrc, SProjectLogicNode
COPY_BASE_OBJECT_FIELD(node, logicNodeCopy);
CLONE_NODE_LIST_FIELD(pProjections);
COPY_CHAR_ARRAY_FIELD(stmtName);
COPY_SCALAR_FIELD(ignoreGroupId);
return TSDB_CODE_SUCCESS;
}

View File

@ -655,6 +655,7 @@ static int32_t jsonToLogicScanNode(const SJson* pJson, void* pObj) {
}
static const char* jkProjectLogicPlanProjections = "Projections";
static const char* jkProjectLogicPlanIgnoreGroupId = "IgnoreGroupId";
static int32_t logicProjectNodeToJson(const void* pObj, SJson* pJson) {
const SProjectLogicNode* pNode = (const SProjectLogicNode*)pObj;
@ -663,6 +664,9 @@ static int32_t logicProjectNodeToJson(const void* pObj, SJson* pJson) {
if (TSDB_CODE_SUCCESS == code) {
code = nodeListToJson(pJson, jkProjectLogicPlanProjections, pNode->pProjections);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddIntegerToObject(pJson, jkProjectLogicPlanIgnoreGroupId, pNode->ignoreGroupId);
}
return code;
}
@ -674,6 +678,9 @@ static int32_t jsonToLogicProjectNode(const SJson* pJson, void* pObj) {
if (TSDB_CODE_SUCCESS == code) {
code = jsonToNodeList(pJson, jkProjectLogicPlanProjections, &pNode->pProjections);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetBoolValue(pJson, jkProjectLogicPlanIgnoreGroupId, &pNode->ignoreGroupId);
}
return code;
}
@ -1689,6 +1696,7 @@ static int32_t jsonToPhysiSysTableScanNode(const SJson* pJson, void* pObj) {
static const char* jkProjectPhysiPlanProjections = "Projections";
static const char* jkProjectPhysiPlanMergeDataBlock = "MergeDataBlock";
static const char* jkProjectPhysiPlanIgnoreGroupId = "IgnoreGroupId";
static int32_t physiProjectNodeToJson(const void* pObj, SJson* pJson) {
const SProjectPhysiNode* pNode = (const SProjectPhysiNode*)pObj;
@ -1700,6 +1708,9 @@ static int32_t physiProjectNodeToJson(const void* pObj, SJson* pJson) {
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddBoolToObject(pJson, jkProjectPhysiPlanMergeDataBlock, pNode->mergeDataBlock);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddBoolToObject(pJson, jkProjectPhysiPlanIgnoreGroupId, pNode->ignoreGroupId);
}
return code;
}
@ -1714,6 +1725,9 @@ static int32_t jsonToPhysiProjectNode(const SJson* pJson, void* pObj) {
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetBoolValue(pJson, jkProjectPhysiPlanMergeDataBlock, &pNode->mergeDataBlock);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetBoolValue(pJson, jkProjectPhysiPlanIgnoreGroupId, &pNode->ignoreGroupId);
}
return code;
}

View File

@ -392,6 +392,9 @@ static void destroyDataSinkNode(SDataSinkNode* pNode) { nodesDestroyNode((SNode*
static void destroyExprNode(SExprNode* pExpr) { taosArrayDestroy(pExpr->pAssociation); }
static void destroyTableCfg(STableCfg* pCfg) {
if (NULL == pCfg) {
return;
}
taosArrayDestroy(pCfg->pFuncs);
taosMemoryFree(pCfg->pComment);
taosMemoryFree(pCfg->pSchemas);

View File

@ -339,6 +339,11 @@ static int32_t collectMetaKeyFromShowBnodes(SCollectMetaKeyCxt* pCxt, SShowStmt*
pCxt->pMetaCache);
}
static int32_t collectMetaKeyFromShowCluster(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_CLUSTER,
pCxt->pMetaCache);
}
static int32_t collectMetaKeyFromShowDatabases(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_DATABASES,
pCxt->pMetaCache);
@ -547,6 +552,8 @@ static int32_t collectMetaKeyFromQuery(SCollectMetaKeyCxt* pCxt, SNode* pStmt) {
return collectMetaKeyFromShowSnodes(pCxt, (SShowStmt*)pStmt);
case QUERY_NODE_SHOW_BNODES_STMT:
return collectMetaKeyFromShowBnodes(pCxt, (SShowStmt*)pStmt);
case QUERY_NODE_SHOW_CLUSTER_STMT:
return collectMetaKeyFromShowCluster(pCxt, (SShowStmt*)pStmt);
case QUERY_NODE_SHOW_DATABASES_STMT:
return collectMetaKeyFromShowDatabases(pCxt, (SShowStmt*)pStmt);
case QUERY_NODE_SHOW_FUNCTIONS_STMT:

View File

@ -525,7 +525,7 @@ static int sortMergeDataBlockDupRows(STableDataBlocks* dataBuf, SBlockKeyInfo* p
SBlockRowMerger** ppBlkRowMerger) {
SSubmitBlk* pBlocks = (SSubmitBlk*)dataBuf->pData;
STableMeta* pTableMeta = dataBuf->pTableMeta;
int16_t nRows = pBlocks->numOfRows;
int32_t nRows = pBlocks->numOfRows;
// size is less than the total size, since duplicated rows may be removed.
@ -546,7 +546,7 @@ static int sortMergeDataBlockDupRows(STableDataBlocks* dataBuf, SBlockKeyInfo* p
int32_t extendedRowSize = getExtendedRowSize(dataBuf);
SBlockKeyTuple* pBlkKeyTuple = pBlkKeyInfo->pKeyTuple;
char* pBlockData = pBlocks->data + pBlocks->schemaLen;
int n = 0;
int32_t n = 0;
while (n < nRows) {
pBlkKeyTuple->skey = TD_ROW_KEY((STSRow*)pBlockData);
pBlkKeyTuple->payloadAddr = pBlockData;

View File

@ -119,6 +119,12 @@ void generateInformationSchema(MockCatalogService* mcs) {
.addColumn("dnode_id", TSDB_DATA_TYPE_INT);
builder.done();
}
{
ITableBuilder& builder =
mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_CLUSTER, TSDB_SYSTEM_TABLE, 1)
.addColumn("id", TSDB_DATA_TYPE_BIGINT);
builder.done();
}
}
void generatePerformanceSchema(MockCatalogService* mcs) {

View File

@ -25,6 +25,15 @@ class ParserShowToUseTest : public ParserDdlTest {};
// todo SHOW apps
// todo SHOW connections
TEST_F(ParserShowToUseTest, showCluster) {
useDb("root", "test");
setCheckDdlFunc(
[&](const SQuery* pQuery, ParserStage stage) { ASSERT_EQ(nodeType(pQuery->pRoot), QUERY_NODE_SELECT_STMT); });
run("SHOW CLUSTER");
}
TEST_F(ParserShowToUseTest, showConsumers) {
useDb("root", "test");

View File

@ -881,6 +881,7 @@ static int32_t createProjectLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSel
TSWAP(pProject->node.pLimit, pSelect->pLimit);
TSWAP(pProject->node.pSlimit, pSelect->pSlimit);
pProject->ignoreGroupId = (NULL == pSelect->pPartitionByList);
pProject->node.groupAction =
(!pSelect->isSubquery && pCxt->pPlanCxt->streamQuery) ? GROUP_ACTION_KEEP : GROUP_ACTION_CLEAR;
pProject->node.requireDataOrder = DATA_ORDER_LEVEL_NONE;
@ -1094,6 +1095,7 @@ static int32_t createSetOpProjectLogicNode(SLogicPlanContext* pCxt, SSetOperator
if (NULL == pSetOperator->pOrderByList) {
TSWAP(pProject->node.pLimit, pSetOperator->pLimit);
}
pProject->ignoreGroupId = true;
int32_t code = TSDB_CODE_SUCCESS;

View File

@ -1000,6 +1000,7 @@ static int32_t createProjectPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChild
}
pProject->mergeDataBlock = projectCanMergeDataBlock(pProjectLogicNode);
pProject->ignoreGroupId = pProjectLogicNode->ignoreGroupId;
int32_t code = TSDB_CODE_SUCCESS;
if (0 == LIST_LENGTH(pChildren)) {

View File

@ -415,7 +415,7 @@ int32_t cloneTableMeta(STableMeta* pSrc, STableMeta** pDst) {
return TSDB_CODE_SUCCESS;
}
int32_t metaSize = (pSrc->tableInfo.numOfColumns + pSrc->tableInfo.numOfTags) * sizeof(SSchema);
int32_t metaSize = sizeof(STableMeta) + (pSrc->tableInfo.numOfColumns + pSrc->tableInfo.numOfTags) * sizeof(SSchema);
*pDst = taosMemoryMalloc(metaSize);
if (NULL == *pDst) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;

View File

@ -411,7 +411,7 @@ int32_t schHandleRedirect(SSchJob *pJob, SSchTask *pTask, SDataBuf *pData, int32
if (pJob->fetched) {
SCH_UNLOCK(SCH_WRITE, &pJob->resLock);
SCH_TASK_ELOG("already fetched while got error %s", tstrerror(rspCode));
SCH_ERR_RET(rspCode);
SCH_ERR_JRET(rspCode);
}
SCH_UNLOCK(SCH_WRITE, &pJob->resLock);

View File

@ -154,6 +154,8 @@ void schedulerFreeJob(int64_t* jobId, int32_t errCode) {
return;
}
SCH_JOB_DLOG("start to free job 0x%" PRIx64 ", errCode:0x%x", *jobId, errCode);
schHandleJobDrop(pJob, errCode);
schReleaseJob(*jobId);

View File

@ -136,6 +136,7 @@ int32_t streamTaskEnqueue(SStreamTask* pTask, SStreamDispatchReq* pReq, SRpcMsg*
pRsp->pCont = buf;
pRsp->contLen = sizeof(SMsgHead) + sizeof(SStreamDispatchRsp);
tmsgSendRsp(pRsp);
tFreeStreamDispatchReq(pReq);
return status == TASK_INPUT_STATUS__NORMAL ? 0 : -1;
}

View File

@ -62,6 +62,11 @@ int32_t tDecodeStreamDispatchReq(SDecoder* pDecoder, SStreamDispatchReq* pReq) {
return 0;
}
void tFreeStreamDispatchReq(SStreamDispatchReq* pReq) {
taosArrayDestroyP(pReq->data, taosMemoryFree);
taosArrayDestroy(pReq->dataLen);
}
int32_t tEncodeStreamRetrieveReq(SEncoder* pEncoder, const SStreamRetrieveReq* pReq) {
if (tStartEncode(pEncoder) < 0) return -1;
if (tEncodeI64(pEncoder, pReq->streamId) < 0) return -1;
@ -279,7 +284,7 @@ int32_t streamDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* pDat
}
code = 0;
FAIL_FIXED_DISPATCH:
taosArrayDestroy(req.data);
taosArrayDestroyP(req.data, taosMemoryFree);
taosArrayDestroy(req.dataLen);
return code;

View File

@ -15,26 +15,26 @@
#include "streamInc.h"
static int32_t streamTaskExecImpl(SStreamTask* pTask, void* data, SArray* pRes) {
static int32_t streamTaskExecImpl(SStreamTask* pTask, const void* data, SArray* pRes) {
void* exec = pTask->exec.executor;
// set input
SStreamQueueItem* pItem = (SStreamQueueItem*)data;
const SStreamQueueItem* pItem = (const SStreamQueueItem*)data;
if (pItem->type == STREAM_INPUT__GET_RES) {
SStreamTrigger* pTrigger = (SStreamTrigger*)data;
const SStreamTrigger* pTrigger = (const SStreamTrigger*)data;
qSetMultiStreamInput(exec, pTrigger->pBlock, 1, STREAM_INPUT__DATA_BLOCK);
} else if (pItem->type == STREAM_INPUT__DATA_SUBMIT) {
ASSERT(pTask->taskLevel == TASK_LEVEL__SOURCE);
SStreamDataSubmit* pSubmit = (SStreamDataSubmit*)data;
const SStreamDataSubmit* pSubmit = (const SStreamDataSubmit*)data;
qDebug("task %d %p set submit input %p %p %d 1", pTask->taskId, pTask, pSubmit, pSubmit->data, *pSubmit->dataRef);
qSetMultiStreamInput(exec, pSubmit->data, 1, STREAM_INPUT__DATA_SUBMIT);
} else if (pItem->type == STREAM_INPUT__DATA_BLOCK || pItem->type == STREAM_INPUT__DATA_RETRIEVE) {
SStreamDataBlock* pBlock = (SStreamDataBlock*)data;
const SStreamDataBlock* pBlock = (const SStreamDataBlock*)data;
SArray* blocks = pBlock->blocks;
qDebug("task %d %p set ssdata input", pTask->taskId, pTask);
qSetMultiStreamInput(exec, blocks->pData, blocks->size, STREAM_INPUT__DATA_BLOCK);
} else if (pItem->type == STREAM_INPUT__MERGED_SUBMIT) {
SStreamMergedSubmit* pMerged = (SStreamMergedSubmit*)data;
const SStreamMergedSubmit* pMerged = (const SStreamMergedSubmit*)data;
SArray* blocks = pMerged->reqs;
qDebug("task %d %p set submit input (merged), batch num: %d", pTask->taskId, pTask, (int32_t)blocks->size);
qSetMultiStreamInput(exec, blocks->pData, blocks->size, STREAM_INPUT__MERGED_SUBMIT);
@ -52,7 +52,7 @@ static int32_t streamTaskExecImpl(SStreamTask* pTask, void* data, SArray* pRes)
if (output == NULL) {
if (pItem->type == STREAM_INPUT__DATA_RETRIEVE) {
SSDataBlock block = {0};
SStreamDataBlock* pRetrieveBlock = (SStreamDataBlock*)data;
const SStreamDataBlock* pRetrieveBlock = (const SStreamDataBlock*)data;
ASSERT(taosArrayGetSize(pRetrieveBlock->blocks) == 1);
assignOneDataBlock(&block, taosArrayGet(pRetrieveBlock->blocks, 0));
block.info.type = STREAM_PULL_OVER;
@ -82,14 +82,16 @@ static int32_t streamTaskExecImpl(SStreamTask* pTask, void* data, SArray* pRes)
return 0;
}
#if 0
static FORCE_INLINE int32_t streamUpdateVer(SStreamTask* pTask, SStreamDataBlock* pBlock) {
ASSERT(pBlock->type == STREAM_INPUT__DATA_BLOCK);
int32_t childId = pBlock->childId;
int64_t ver = pBlock->sourceVer;
SStreamChildEpInfo* pChildInfo = taosArrayGetP(pTask->childEpInfo, childId);
pChildInfo->processedVer = ver;
/*pChildInfo-> = ver;*/
return 0;
}
#endif
int32_t streamPipelineExec(SStreamTask* pTask, int32_t batchNum) {
ASSERT(pTask->taskLevel != TASK_LEVEL__SINK);
@ -198,6 +200,8 @@ int32_t streamExecForAll(SStreamTask* pTask) {
streamTaskExecImpl(pTask, data, pRes);
qDebug("stream task %d exec end", pTask->taskId);
streamFreeQitem(data);
if (taosArrayGetSize(pRes) != 0) {
SStreamDataBlock* qRes = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM);
if (qRes == NULL) {

View File

@ -48,8 +48,18 @@ SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandF
pMeta->ahandle = ahandle;
pMeta->expandFunc = expandFunc;
if (streamLoadTasks(pMeta) < 0) {
goto _err;
}
return pMeta;
_err:
if (pMeta->path) taosMemoryFree(pMeta->path);
if (pMeta->pTasks) taosHashCleanup(pMeta->pTasks);
if (pMeta->pStateDb) tdbTbClose(pMeta->pStateDb);
if (pMeta->pTaskDb) tdbTbClose(pMeta->pTaskDb);
if (pMeta->db) tdbClose(pMeta->db);
taosMemoryFree(pMeta);
return NULL;
}

View File

@ -87,66 +87,102 @@ int32_t tDecodeSMStreamTaskRecoverRsp(SDecoder* pDecoder, SMStreamTaskRecoverRsp
return 0;
}
typedef struct {
int32_t vgId;
int32_t childId;
int64_t ver;
} SStreamVgVerCheckpoint;
int32_t tEncodeSStreamVgVerCheckpoint(SEncoder* pEncoder, const SStreamVgVerCheckpoint* pCheckpoint) {
if (tEncodeI32(pEncoder, pCheckpoint->vgId) < 0) return -1;
int32_t tEncodeSStreamCheckpointInfo(SEncoder* pEncoder, const SStreamCheckpointInfo* pCheckpoint) {
if (tEncodeI32(pEncoder, pCheckpoint->nodeId) < 0) return -1;
if (tEncodeI32(pEncoder, pCheckpoint->childId) < 0) return -1;
if (tEncodeI64(pEncoder, pCheckpoint->ver) < 0) return -1;
if (tEncodeI64(pEncoder, pCheckpoint->stateProcessedVer) < 0) return -1;
return 0;
}
int32_t tDecodeSStreamVgVerCheckpoint(SDecoder* pDecoder, SStreamVgVerCheckpoint* pCheckpoint) {
if (tDecodeI32(pDecoder, &pCheckpoint->vgId) < 0) return -1;
int32_t tDecodeSStreamCheckpointInfo(SDecoder* pDecoder, SStreamCheckpointInfo* pCheckpoint) {
if (tDecodeI32(pDecoder, &pCheckpoint->nodeId) < 0) return -1;
if (tDecodeI32(pDecoder, &pCheckpoint->childId) < 0) return -1;
if (tDecodeI64(pDecoder, &pCheckpoint->ver) < 0) return -1;
if (tDecodeI64(pDecoder, &pCheckpoint->stateProcessedVer) < 0) return -1;
return 0;
}
typedef struct {
int64_t streamId;
int64_t checkTs;
int64_t checkpointId;
int32_t taskId;
SArray* checkpointVer; // SArray<SStreamVgCheckpointVer>
} SStreamAggVerCheckpoint;
int32_t tEncodeSStreamAggVerCheckpoint(SEncoder* pEncoder, const SStreamAggVerCheckpoint* pCheckpoint) {
int32_t tEncodeSStreamMultiVgCheckpointInfo(SEncoder* pEncoder, const SStreamMultiVgCheckpointInfo* pCheckpoint) {
if (tEncodeI64(pEncoder, pCheckpoint->streamId) < 0) return -1;
if (tEncodeI64(pEncoder, pCheckpoint->checkTs) < 0) return -1;
if (tEncodeI64(pEncoder, pCheckpoint->checkpointId) < 0) return -1;
if (tEncodeI32(pEncoder, pCheckpoint->checkpointId) < 0) return -1;
if (tEncodeI32(pEncoder, pCheckpoint->taskId) < 0) return -1;
int32_t sz = taosArrayGetSize(pCheckpoint->checkpointVer);
if (tEncodeI32(pEncoder, sz) < 0) return -1;
for (int32_t i = 0; i < sz; i++) {
SStreamVgVerCheckpoint* pOneVgCkpoint = taosArrayGet(pCheckpoint->checkpointVer, i);
if (tEncodeSStreamVgVerCheckpoint(pEncoder, pOneVgCkpoint) < 0) return -1;
SStreamCheckpointInfo* pOneVgCkpoint = taosArrayGet(pCheckpoint->checkpointVer, i);
if (tEncodeSStreamCheckpointInfo(pEncoder, pOneVgCkpoint) < 0) return -1;
}
return 0;
}
int32_t tDecodeSStreamAggVerCheckpoint(SDecoder* pDecoder, SStreamAggVerCheckpoint* pCheckpoint) {
int32_t tDecodeSStreamMultiVgCheckpointInfo(SDecoder* pDecoder, SStreamMultiVgCheckpointInfo* pCheckpoint) {
if (tDecodeI64(pDecoder, &pCheckpoint->streamId) < 0) return -1;
if (tDecodeI64(pDecoder, &pCheckpoint->checkTs) < 0) return -1;
if (tDecodeI64(pDecoder, &pCheckpoint->checkpointId) < 0) return -1;
if (tDecodeI32(pDecoder, &pCheckpoint->checkpointId) < 0) return -1;
if (tDecodeI32(pDecoder, &pCheckpoint->taskId) < 0) return -1;
int32_t sz;
if (tDecodeI32(pDecoder, &sz) < 0) return -1;
for (int32_t i = 0; i < sz; i++) {
SStreamVgVerCheckpoint oneVgCheckpoint;
if (tDecodeSStreamVgVerCheckpoint(pDecoder, &oneVgCheckpoint) < 0) return -1;
SStreamCheckpointInfo oneVgCheckpoint;
if (tDecodeSStreamCheckpointInfo(pDecoder, &oneVgCheckpoint) < 0) return -1;
taosArrayPush(pCheckpoint->checkpointVer, &oneVgCheckpoint);
}
return 0;
}
int32_t streamRecoverSinkLevel(SStreamMeta* pMeta, SStreamTask* pTask) {
int32_t streamSaveStateInfo(SStreamMeta* pMeta, SStreamTask* pTask) {
void* buf = NULL;
ASSERT(pTask->taskLevel == TASK_LEVEL__SINK);
// load status
SStreamMultiVgCheckpointInfo checkpoint;
checkpoint.checkpointId = atomic_fetch_add_32(&pTask->nextCheckId, 1);
checkpoint.checkTs = taosGetTimestampMs();
checkpoint.streamId = pTask->streamId;
checkpoint.taskId = pTask->taskId;
checkpoint.checkpointVer = pTask->checkpointInfo;
int32_t len;
int32_t code;
tEncodeSize(tEncodeSStreamMultiVgCheckpointInfo, &checkpoint, len, code);
if (code < 0) {
return -1;
}
buf = taosMemoryCalloc(1, len);
if (buf == NULL) {
return -1;
}
SEncoder encoder;
tEncoderInit(&encoder, buf, len);
tEncodeSStreamMultiVgCheckpointInfo(&encoder, &checkpoint);
tEncoderClear(&encoder);
SStreamCheckpointKey key = {
.taskId = pTask->taskId,
.checkpointId = checkpoint.checkpointId,
};
if (tdbTbUpsert(pMeta->pStateDb, &key, sizeof(SStreamCheckpointKey), buf, len, &pMeta->txn) < 0) {
ASSERT(0);
goto FAIL;
}
int32_t sz = taosArrayGetSize(pTask->checkpointInfo);
for (int32_t i = 0; i < sz; i++) {
SStreamCheckpointInfo* pCheck = taosArrayGet(pTask->checkpointInfo, i);
pCheck->stateSaveVer = pCheck->stateProcessedVer;
}
taosMemoryFree(buf);
return 0;
FAIL:
if (buf) taosMemoryFree(buf);
return -1;
return 0;
}
int32_t streamLoadStateInfo(SStreamMeta* pMeta, SStreamTask* pTask) {
void* pVal = NULL;
int32_t vLen = 0;
if (tdbTbGet(pMeta->pStateDb, &pTask->taskId, sizeof(void*), &pVal, &vLen) < 0) {
@ -154,9 +190,81 @@ int32_t streamRecoverSinkLevel(SStreamMeta* pMeta, SStreamTask* pTask) {
}
SDecoder decoder;
tDecoderInit(&decoder, pVal, vLen);
SStreamAggVerCheckpoint aggCheckpoint;
tDecodeSStreamAggVerCheckpoint(&decoder, &aggCheckpoint);
/*pTask->*/
SStreamMultiVgCheckpointInfo aggCheckpoint;
tDecodeSStreamMultiVgCheckpointInfo(&decoder, &aggCheckpoint);
tDecoderClear(&decoder);
pTask->nextCheckId = aggCheckpoint.checkpointId + 1;
pTask->checkpointInfo = aggCheckpoint.checkpointVer;
return 0;
}
int32_t streamSaveSinkLevel(SStreamMeta* pMeta, SStreamTask* pTask) {
ASSERT(pTask->taskLevel == TASK_LEVEL__SINK);
return streamSaveStateInfo(pMeta, pTask);
}
int32_t streamRecoverSinkLevel(SStreamMeta* pMeta, SStreamTask* pTask) {
ASSERT(pTask->taskLevel == TASK_LEVEL__SINK);
return streamLoadStateInfo(pMeta, pTask);
}
int32_t streamSaveAggLevel(SStreamMeta* pMeta, SStreamTask* pTask) {
ASSERT(pTask->taskLevel == TASK_LEVEL__AGG);
// TODO save and copy state
// save state info
if (streamSaveStateInfo(pMeta, pTask) < 0) {
return -1;
}
return 0;
}
int32_t streamFetchSinkStatus(SStreamTask* pTask) {
ASSERT(pTask->taskLevel != TASK_LEVEL__SINK);
// set self status to recover_phase1
// build fetch status msg
// send fetch msg
return 0;
}
int32_t streamProcessFetchStatusRsp(SStreamMeta* pMeta, SStreamTask* pTask, void* msg) {
// if failed, set timer and retry
// if successful
// add rsp state to partial recover hash
// if complete, begin actual recover
return 0;
}
int32_t streamRecoverAggLevel(SStreamMeta* pMeta, SStreamTask* pTask) {
ASSERT(pTask->taskLevel == TASK_LEVEL__AGG);
// recover sink level
// after all sink level recovered
// choose suitable state to recover
return 0;
}
int32_t streamSaveSourceLevel(SStreamMeta* pMeta, SStreamTask* pTask) {
ASSERT(pTask->taskLevel == TASK_LEVEL__SOURCE);
// TODO: save and copy state
return 0;
}
int32_t streamRecoverSourceLevel(SStreamMeta* pMeta, SStreamTask* pTask) {
ASSERT(pTask->taskLevel == TASK_LEVEL__SOURCE);
// if totLevel == 3
// fetch agg state
// recover from local state to agg state, not send msg
// recover from agg state to most recent log v1
// enable input queue, set status recover_phase2
// recover from v1 to queue msg v2, set status normal
// if totLevel == 2
// fetch sink state
// recover from local state to sink state v1, send msg
// enable input queue, set status recover_phase2
// recover from v1 to queue msg v2, set status normal
return 0;
}

View File

@ -34,7 +34,7 @@ int32_t tEncodeStreamEpInfo(SEncoder* pEncoder, const SStreamChildEpInfo* pInfo)
if (tEncodeI32(pEncoder, pInfo->taskId) < 0) return -1;
if (tEncodeI32(pEncoder, pInfo->nodeId) < 0) return -1;
if (tEncodeI32(pEncoder, pInfo->childId) < 0) return -1;
if (tEncodeI64(pEncoder, pInfo->processedVer) < 0) return -1;
/*if (tEncodeI64(pEncoder, pInfo->processedVer) < 0) return -1;*/
if (tEncodeSEpSet(pEncoder, &pInfo->epSet) < 0) return -1;
return 0;
}
@ -43,7 +43,7 @@ int32_t tDecodeStreamEpInfo(SDecoder* pDecoder, SStreamChildEpInfo* pInfo) {
if (tDecodeI32(pDecoder, &pInfo->taskId) < 0) return -1;
if (tDecodeI32(pDecoder, &pInfo->nodeId) < 0) return -1;
if (tDecodeI32(pDecoder, &pInfo->childId) < 0) return -1;
if (tDecodeI64(pDecoder, &pInfo->processedVer) < 0) return -1;
/*if (tDecodeI64(pDecoder, &pInfo->processedVer) < 0) return -1;*/
if (tDecodeSEpSet(pDecoder, &pInfo->epSet) < 0) return -1;
return 0;
}

View File

@ -162,6 +162,9 @@ typedef struct SSyncNode {
// is config changing
bool changing;
int64_t startTime;
int64_t lastReplicateTime;
} SSyncNode;
// open/close --------------
@ -186,15 +189,19 @@ int32_t syncNodePingAll(SSyncNode* pSyncNode);
// timer control --------------
int32_t syncNodeStartPingTimer(SSyncNode* pSyncNode);
int32_t syncNodeStopPingTimer(SSyncNode* pSyncNode);
int32_t syncNodeStartElectTimer(SSyncNode* pSyncNode, int32_t ms);
int32_t syncNodeStopElectTimer(SSyncNode* pSyncNode);
int32_t syncNodeRestartElectTimer(SSyncNode* pSyncNode, int32_t ms);
int32_t syncNodeResetElectTimer(SSyncNode* pSyncNode);
int32_t syncNodeStartHeartbeatTimer(SSyncNode* pSyncNode);
int32_t syncNodeStartNowHeartbeatTimer(SSyncNode* pSyncNode);
int32_t syncNodeStartHeartbeatTimerNow(SSyncNode* pSyncNode);
int32_t syncNodeStartHeartbeatTimerMS(SSyncNode* pSyncNode, int32_t ms);
int32_t syncNodeStopHeartbeatTimer(SSyncNode* pSyncNode);
int32_t syncNodeRestartHeartbeatTimer(SSyncNode* pSyncNode);
int32_t syncNodeRestartNowHeartbeatTimer(SSyncNode* pSyncNode);
int32_t syncNodeRestartHeartbeatTimerNow(SSyncNode* pSyncNode);
int32_t syncNodeRestartNowHeartbeatTimerMS(SSyncNode* pSyncNode, int32_t ms);
// utils --------------
int32_t syncNodeSendMsgById(const SRaftId* destRaftId, SSyncNode* pSyncNode, SRpcMsg* pMsg);

View File

@ -55,7 +55,7 @@ int32_t syncNodeAppendEntriesPeers(SSyncNode* pSyncNode);
int32_t syncNodeAppendEntriesPeersSnapshot(SSyncNode* pSyncNode);
int32_t syncNodeAppendEntriesPeersSnapshot2(SSyncNode* pSyncNode);
int32_t syncNodeReplicate(SSyncNode* pSyncNode);
int32_t syncNodeReplicate(SSyncNode* pSyncNode, bool isTimer);
int32_t syncNodeAppendEntries(SSyncNode* pSyncNode, const SRaftId* destRaftId, const SyncAppendEntries* pMsg);
int32_t syncNodeAppendEntriesBatch(SSyncNode* pSyncNode, const SRaftId* destRaftId, const SyncAppendEntriesBatch* pMsg);

View File

@ -495,6 +495,18 @@ const char* syncGetMyRoleStr(int64_t rid) {
return s;
}
bool syncRestoreFinish(int64_t rid) {
SSyncNode* pSyncNode = (SSyncNode*)taosAcquireRef(tsNodeRefId, rid);
if (pSyncNode == NULL) {
return false;
}
ASSERT(rid == pSyncNode->rid);
bool restoreFinish = pSyncNode->restoreFinish;
taosReleaseRef(tsNodeRefId, pSyncNode->rid);
return restoreFinish;
}
SyncTerm syncGetMyTerm(int64_t rid) {
SSyncNode* pSyncNode = (SSyncNode*)taosAcquireRef(tsNodeRefId, rid);
if (pSyncNode == NULL) {
@ -1086,6 +1098,10 @@ SSyncNode* syncNodeOpen(const SSyncInfo* pOldSyncInfo) {
// start raft
// syncNodeBecomeFollower(pSyncNode);
int64_t timeNow = taosGetTimestampMs();
pSyncNode->startTime = timeNow;
pSyncNode->lastReplicateTime = timeNow;
syncNodeEventLog(pSyncNode, "sync open");
return pSyncNode;
@ -1303,7 +1319,7 @@ int32_t syncNodeResetElectTimer(SSyncNode* pSyncNode) {
return ret;
}
int32_t syncNodeStartHeartbeatTimer(SSyncNode* pSyncNode) {
static int32_t syncNodeDoStartHeartbeatTimer(SSyncNode* pSyncNode) {
int32_t ret = 0;
if (syncEnvIsStart()) {
taosTmrReset(pSyncNode->FpHeartbeatTimerCB, pSyncNode->heartbeatTimerMS, pSyncNode, gSyncEnv->pTimerManager,
@ -1322,21 +1338,21 @@ int32_t syncNodeStartHeartbeatTimer(SSyncNode* pSyncNode) {
return ret;
}
int32_t syncNodeStartNowHeartbeatTimer(SSyncNode* pSyncNode) {
int32_t ret = 0;
if (syncEnvIsStart()) {
taosTmrReset(pSyncNode->FpHeartbeatTimerCB, 1, pSyncNode, gSyncEnv->pTimerManager, &pSyncNode->pHeartbeatTimer);
atomic_store_64(&pSyncNode->heartbeatTimerLogicClock, pSyncNode->heartbeatTimerLogicClockUser);
} else {
sError("vgId:%d, start heartbeat timer error, sync env is stop", pSyncNode->vgId);
int32_t syncNodeStartHeartbeatTimer(SSyncNode* pSyncNode) {
pSyncNode->heartbeatTimerMS = pSyncNode->hbBaseLine;
int32_t ret = syncNodeDoStartHeartbeatTimer(pSyncNode);
return ret;
}
do {
char logBuf[128];
snprintf(logBuf, sizeof(logBuf), "start heartbeat timer, ms:%d", 1);
syncNodeEventLog(pSyncNode, logBuf);
} while (0);
int32_t syncNodeStartHeartbeatTimerMS(SSyncNode* pSyncNode, int32_t ms) {
pSyncNode->heartbeatTimerMS = ms;
int32_t ret = syncNodeDoStartHeartbeatTimer(pSyncNode);
return ret;
}
int32_t syncNodeStartHeartbeatTimerNow(SSyncNode* pSyncNode) {
pSyncNode->heartbeatTimerMS = 1;
int32_t ret = syncNodeDoStartHeartbeatTimer(pSyncNode);
return ret;
}
@ -1357,9 +1373,15 @@ int32_t syncNodeRestartHeartbeatTimer(SSyncNode* pSyncNode) {
return 0;
}
int32_t syncNodeRestartNowHeartbeatTimer(SSyncNode* pSyncNode) {
int32_t syncNodeRestartHeartbeatTimerNow(SSyncNode* pSyncNode) {
syncNodeStopHeartbeatTimer(pSyncNode);
syncNodeStartNowHeartbeatTimer(pSyncNode);
syncNodeStartHeartbeatTimerNow(pSyncNode);
return 0;
}
int32_t syncNodeRestartNowHeartbeatTimerMS(SSyncNode* pSyncNode, int32_t ms) {
syncNodeStopHeartbeatTimer(pSyncNode);
syncNodeStartHeartbeatTimerMS(pSyncNode, ms);
return 0;
}
@ -1931,9 +1953,6 @@ void syncNodeDoConfigChange(SSyncNode* pSyncNode, SSyncCfg* pNewConfig, SyncInde
// Raft 3.6.2 Committing entries from previous terms
syncNodeAppendNoop(pSyncNode);
#if 0 // simon
syncNodeReplicate(pSyncNode);
#endif
syncMaybeAdvanceCommitIndex(pSyncNode);
} else {
@ -2115,9 +2134,6 @@ void syncNodeCandidate2Leader(SSyncNode* pSyncNode) {
// Raft 3.6.2 Committing entries from previous terms
syncNodeAppendNoop(pSyncNode);
#if 0 // simon
syncNodeReplicate(pSyncNode);
#endif
syncMaybeAdvanceCommitIndex(pSyncNode);
}
@ -2488,7 +2504,7 @@ static int32_t syncNodeAppendNoop(SSyncNode* ths) {
if (ths->state == TAOS_SYNC_STATE_LEADER) {
int32_t code = ths->pLogStore->syncLogAppendEntry(ths->pLogStore, pEntry);
ASSERT(code == 0);
syncNodeReplicate(ths);
syncNodeReplicate(ths, false);
}
syncEntryDestory(pEntry);
@ -2561,7 +2577,7 @@ int32_t syncNodeOnClientRequestCb(SSyncNode* ths, SyncClientRequest* pMsg, SyncI
// if mulit replica, start replicate right now
if (ths->replicaNum > 1) {
syncNodeReplicate(ths);
syncNodeReplicate(ths, false);
// pre commit
syncNodePreCommit(ths, pEntry, 0);
@ -2630,7 +2646,7 @@ int32_t syncNodeOnClientRequestBatchCb(SSyncNode* ths, SyncClientRequestBatch* p
if (ths->replicaNum > 1) {
// if multi replica, start replicate right now
syncNodeReplicate(ths);
syncNodeReplicate(ths, false);
} else if (ths->replicaNum == 1) {
// one replica

View File

@ -200,9 +200,25 @@ int32_t syncNodeAppendEntriesPeersSnapshot2(SSyncNode* pSyncNode) {
// send msg
syncNodeAppendEntriesBatch(pSyncNode, pDestId, pMsg);
syncAppendEntriesBatchDestroy(pMsg);
// speed up
if (pMsg->dataCount > 0 && pSyncNode->commitIndex - pMsg->prevLogIndex > SYNC_SLOW_DOWN_RANGE) {
ret = 1;
#if 0
do {
char logBuf[128];
char host[64];
uint16_t port;
syncUtilU642Addr(pDestId->addr, host, sizeof(host), &port);
snprintf(logBuf, sizeof(logBuf), "maybe speed up for %s:%d, pre-index:%ld", host, port, pMsg->prevLogIndex);
syncNodeEventLog(pSyncNode, logBuf);
} while (0);
#endif
}
}
return 0;
return ret;
}
int32_t syncNodeAppendEntriesPeersSnapshot(SSyncNode* pSyncNode) {
@ -287,7 +303,7 @@ int32_t syncNodeAppendEntriesPeersSnapshot(SSyncNode* pSyncNode) {
return ret;
}
int32_t syncNodeReplicate(SSyncNode* pSyncNode) {
int32_t syncNodeReplicate(SSyncNode* pSyncNode, bool isTimer) {
// start replicate
int32_t ret = 0;
@ -309,8 +325,40 @@ int32_t syncNodeReplicate(SSyncNode* pSyncNode) {
break;
}
// start delay
int64_t timeNow = taosGetTimestampMs();
int64_t startDelay = timeNow - pSyncNode->startTime;
// replicate delay
int64_t replicateDelay = timeNow - pSyncNode->lastReplicateTime;
pSyncNode->lastReplicateTime = timeNow;
if (ret > 0 && isTimer && startDelay > SYNC_SPEED_UP_AFTER_MS) {
// speed up replicate
int32_t ms =
pSyncNode->heartbeatTimerMS < SYNC_SPEED_UP_HB_TIMER ? pSyncNode->heartbeatTimerMS : SYNC_SPEED_UP_HB_TIMER;
syncNodeRestartNowHeartbeatTimerMS(pSyncNode, ms);
#if 0
do {
char logBuf[128];
snprintf(logBuf, sizeof(logBuf), "replicate speed up");
syncNodeEventLog(pSyncNode, logBuf);
} while (0);
#endif
} else {
syncNodeRestartHeartbeatTimer(pSyncNode);
#if 0
do {
char logBuf[128];
snprintf(logBuf, sizeof(logBuf), "replicate slow down");
syncNodeEventLog(pSyncNode, logBuf);
} while (0);
#endif
}
return ret;
}

View File

@ -58,7 +58,7 @@ int32_t syncNodeOnTimeoutCb(SSyncNode* ths, SyncTimeout* pMsg) {
++(ths->heartbeatTimerCounter);
sInfo("vgId:%d, sync timeout, type:replicate count:%d, heartbeatTimerLogicClockUser:%ld", ths->vgId,
ths->heartbeatTimerCounter, ths->heartbeatTimerLogicClockUser);
syncNodeReplicate(ths);
syncNodeReplicate(ths, true);
}
} else {
sError("vgId:%d, unknown timeout-type:%d", ths->vgId, pMsg->timeoutType);

View File

@ -105,13 +105,13 @@ typedef SRpcCtxVal STransCtxVal;
typedef SRpcInfo STrans;
typedef SRpcConnInfo STransHandleInfo;
// ref mgt
// handle
// ref mgt handle
typedef struct SExHandle {
void* handle;
int64_t refId;
void* pThrd;
} SExHandle;
/*convet from fqdn to ip */
typedef struct SCvtAddr {
char ip[TSDB_FQDN_LEN];

View File

@ -1381,6 +1381,7 @@ int transReleaseCliHandle(void* handle) {
tGDebug("send release request at thread:%08" PRId64 "", pThrd->pid);
if (0 != transAsyncSend(pThrd->asyncPool, &cmsg->q)) {
taosMemoryFree(cmsg);
return -1;
}
return 0;

View File

@ -222,14 +222,13 @@ SAsyncPool* transAsyncPoolCreate(uv_loop_t* loop, int sz, void* arg, AsyncCB cb)
pool->asyncs = taosMemoryCalloc(1, sizeof(uv_async_t) * pool->nAsync);
for (int i = 0; i < pool->nAsync; i++) {
uv_async_t* async = &(pool->asyncs[i]);
uv_async_init(loop, async, cb);
SAsyncItem* item = taosMemoryCalloc(1, sizeof(SAsyncItem));
item->pThrd = arg;
QUEUE_INIT(&item->qmsg);
taosThreadMutexInit(&item->mtx, NULL);
uv_async_t* async = &(pool->asyncs[i]);
uv_async_init(loop, async, cb);
async->data = item;
}
return pool;
@ -238,7 +237,7 @@ SAsyncPool* transAsyncPoolCreate(uv_loop_t* loop, int sz, void* arg, AsyncCB cb)
void transAsyncPoolDestroy(SAsyncPool* pool) {
for (int i = 0; i < pool->nAsync; i++) {
uv_async_t* async = &(pool->asyncs[i]);
// uv_close((uv_handle_t*)async, NULL);
SAsyncItem* item = async->data;
taosThreadMutexDestroy(&item->mtx);
taosMemoryFree(item);

View File

@ -703,7 +703,11 @@ int64_t taosFSendFile(TdFilePtr pFileOut, TdFilePtr pFileIn, int64_t *offset, in
int64_t sentbytes;
while (leftbytes > 0) {
#ifdef _TD_ARM_32
sentbytes = sendfile(pFileOut->fd, pFileIn->fd, (long int*)offset, leftbytes);
#else
sentbytes = sendfile(pFileOut->fd, pFileIn->fd, offset, leftbytes);
#endif
if (sentbytes == -1) {
if (errno == EINTR || errno == EAGAIN || errno == EWOULDBLOCK) {
continue;

View File

@ -35,7 +35,7 @@ typedef struct SCacheNode {
uint64_t addedTime; // the added time when this element is added or updated into cache
uint64_t lifespan; // life duration when this element should be remove from cache
int64_t expireTime; // expire time
uint64_t signature;
void* signature;
struct STrashElem *pTNodeHeader; // point to trash node head
uint16_t keyLen : 15; // max key size: 32kb
bool inTrashcan : 1; // denote if it is in trash or not
@ -208,7 +208,7 @@ static void taosTrashcanEmpty(SCacheObj *pCacheObj, bool force);
* @param pNode data node
*/
static FORCE_INLINE void taosCacheReleaseNode(SCacheObj *pCacheObj, SCacheNode *pNode) {
if (pNode->signature != (uint64_t)pNode) {
if (pNode->signature != pNode) {
uError("key:%s, %p data is invalid, or has been released", pNode->key, pNode);
return;
}
@ -226,7 +226,7 @@ static FORCE_INLINE void taosCacheReleaseNode(SCacheObj *pCacheObj, SCacheNode *
}
static FORCE_INLINE STrashElem *doRemoveElemInTrashcan(SCacheObj *pCacheObj, STrashElem *pElem) {
if (pElem->pData->signature != (uint64_t)pElem->pData) {
if (pElem->pData->signature != pElem->pData) {
uWarn("key:sig:0x%" PRIx64 " %p data has been released, ignore", pElem->pData->signature, pElem->pData);
return NULL;
}
@ -494,7 +494,7 @@ void *taosCacheAcquireByData(SCacheObj *pCacheObj, void *data) {
if (pCacheObj == NULL || data == NULL) return NULL;
SCacheNode *ptNode = (SCacheNode *)((char *)data - sizeof(SCacheNode));
if (ptNode->signature != (uint64_t)ptNode) {
if (ptNode->signature != ptNode) {
uError("cache:%s, key: %p the data from cache is invalid", pCacheObj->name, ptNode);
return NULL;
}
@ -511,7 +511,7 @@ void *taosCacheTransferData(SCacheObj *pCacheObj, void **data) {
if (pCacheObj == NULL || data == NULL || (*data) == NULL) return NULL;
SCacheNode *ptNode = (SCacheNode *)((char *)(*data) - sizeof(SCacheNode));
if (ptNode->signature != (uint64_t)ptNode) {
if (ptNode->signature != ptNode) {
uError("cache:%s, key: %p the data from cache is invalid", pCacheObj->name, ptNode);
return NULL;
}
@ -539,7 +539,7 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) {
// It happens when there is only one object in the cache, and two threads which has referenced this object
// start to free the it simultaneously [TD-1569].
SCacheNode *pNode = (SCacheNode *)((char *)(*data) - sizeof(SCacheNode));
if (pNode->signature != (uint64_t)pNode) {
if (pNode->signature != pNode) {
uError("cache:%s, %p, release invalid cache data", pCacheObj->name, pNode);
return;
}
@ -728,7 +728,7 @@ SCacheNode *taosCreateCacheNode(const char *key, size_t keyLen, const char *pDat
pNewNode->addedTime = (uint64_t)taosGetTimestampMs();
pNewNode->lifespan = duration;
pNewNode->expireTime = pNewNode->addedTime + pNewNode->lifespan;
pNewNode->signature = (uint64_t)pNewNode;
pNewNode->signature = pNewNode;
pNewNode->size = (uint32_t)sizeInBytes;
return pNewNode;

View File

@ -614,6 +614,8 @@ TAOS_DEFINE_ERROR(TSDB_CODE_RSMA_INVALID_STAT, "Invalid rsma state"
TAOS_DEFINE_ERROR(TSDB_CODE_RSMA_QTASKINFO_CREATE, "Rsma qtaskinfo creation error")
TAOS_DEFINE_ERROR(TSDB_CODE_RSMA_FILE_CORRUPTED, "Rsma file corrupted")
TAOS_DEFINE_ERROR(TSDB_CODE_RSMA_REMOVE_EXISTS, "Rsma remove exists")
TAOS_DEFINE_ERROR(TSDB_CODE_RSMA_FETCH_MSG_MSSED_UP, "Rsma fetch msg is messed up")
TAOS_DEFINE_ERROR(TSDB_CODE_RSMA_EMPTY_INFO, "Rsma info is empty")
//index
TAOS_DEFINE_ERROR(TSDB_CODE_INDEX_REBUILDING, "Index is rebuilding")

View File

@ -4,7 +4,7 @@ set -e
taosd >>/dev/null 2>&1 &
taosadapter >>/dev/null 2>&1 &
sleep 10
cd ../../docs/examples/go
go mod tidy

View File

@ -0,0 +1,47 @@
#!/bin/bash
set -e
taosd >>/dev/null 2>&1 &
taosadapter >>/dev/null 2>&1 &
sleep 10
cd ../../docs/examples/python
# 1
taos -s "create database if not exists log"
python3 connect_example.py
# 2
taos -s "drop database if exists power"
python3 native_insert_example.py
# 3
taos -s "drop database power"
python3 bind_param_example.py
# 4
taos -s "drop database power"
python3 multi_bind_example.py
# 5
python3 query_example.py
# 6
python3 async_query_example.py
# 7
taos -s "drop database if exists test"
python3 line_protocol_example.py
# 8
taos -s "drop database test"
python3 telnet_line_protocol_example.py
# 9
taos -s "drop database test"
python3 json_protocol_example.py
# 10
# python3 subscribe_demo.py

View File

@ -41,7 +41,7 @@ fi
cat ../script/jenkins/basic.txt |grep -v "^#"|grep -v "^$"|sed "s/^/,,script,/" >>$case_file
grep "^python" ../system-test/fulltest.sh |sed "s/^/,,system-test,/" >>$case_file
grep "^python" ../develop-test/fulltest.sh |sed "s/^/,,develop-test,/" >>$case_file
find ../docs-examples-test/ -name "*.sh" -printf '%f\n' | xargs -I {} echo ",,docs-examples-test,bash {}" >> $case_file
# tar source code for run.sh to use
# if [ $ent -eq 0 ]; then
# cd ../../../

View File

@ -50,12 +50,14 @@ if [ $ent -eq 0 ]; then
export LD_LIBRARY_PATH=/home/TDengine/debug/build/lib
ln -s /home/TDengine/debug/build/lib/libtaos.so /usr/lib/libtaos.so 2>/dev/null
ln -s /home/TDengine/debug/build/lib/libtaos.so /usr/lib/libtaos.so.1 2>/dev/null
ln -s /home/TDengine/include/client/taos.h /usr/include/taos.h 2>/dev/null
CONTAINER_TESTDIR=/home/TDengine
else
export PATH=$PATH:/home/TDinternal/debug/build/bin
export LD_LIBRARY_PATH=/home/TDinternal/debug/build/lib
ln -s /home/TDinternal/debug/build/lib/libtaos.so /usr/lib/libtaos.so 2>/dev/null
ln -s /home/TDinternal/debug/build/lib/libtaos.so /usr/lib/libtaos.so.1 2>/dev/null
ln -s /home/TDinternal/community/include/client/taos.h /usr/include/taos.h 2>/dev/null
CONTAINER_TESTDIR=/home/TDinternal/community
fi
mkdir -p /var/lib/taos/subscribe

View File

@ -4,7 +4,7 @@ rm -rf /tmp/udf/libbitand.so /tmp/udf/libsqrsum.so
mkdir -p /tmp/udf
echo "compile udf bit_and and sqr_sum"
gcc -fPIC -shared sh/bit_and.c -I../../include/libs/function/ -I../../include/client -I../../include/util -o /tmp/udf/libbitand.so
gcc -fPIC -shared sh/sqr_sum.c -I../../include/libs/function/ -I../../include/client -I../../include/util -o /tmp/udf/libsqrsum.so
gcc -fPIC -shared sh/l2norm.c -I../../include/libs/function/ -I../../include/client -I../../include/util -o /tmp/udf/libl2norm.so
echo "debug show /tmp/udf/*.so"
ls /tmp/udf/*.so

View File

@ -5,22 +5,22 @@
#include "taosudf.h"
DLL_EXPORT int32_t sqr_sum_init() {
DLL_EXPORT int32_t l2norm_init() {
return 0;
}
DLL_EXPORT int32_t sqr_sum_destroy() {
DLL_EXPORT int32_t l2norm_destroy() {
return 0;
}
DLL_EXPORT int32_t sqr_sum_start(SUdfInterBuf *buf) {
DLL_EXPORT int32_t l2norm_start(SUdfInterBuf *buf) {
*(int64_t*)(buf->buf) = 0;
buf->bufLen = sizeof(double);
buf->numOfResult = 0;
return 0;
}
DLL_EXPORT int32_t sqr_sum(SUdfDataBlock* block, SUdfInterBuf *interBuf, SUdfInterBuf *newInterBuf) {
DLL_EXPORT int32_t l2norm(SUdfDataBlock* block, SUdfInterBuf *interBuf, SUdfInterBuf *newInterBuf) {
double sumSquares = *(double*)interBuf->buf;
int8_t numNotNull = 0;
for (int32_t i = 0; i < block->numOfCols; ++i) {
@ -67,7 +67,7 @@ DLL_EXPORT int32_t sqr_sum(SUdfDataBlock* block, SUdfInterBuf *interBuf, SUdfInt
return 0;
}
DLL_EXPORT int32_t sqr_sum_finish(SUdfInterBuf* buf, SUdfInterBuf *resultData) {
DLL_EXPORT int32_t l2norm_finish(SUdfInterBuf* buf, SUdfInterBuf *resultData) {
if (buf->numOfResult == 0) {
resultData->numOfResult = 0;
return 0;

View File

@ -24,10 +24,10 @@ if $system_content == Windows_NT then
endi
if $system_content == Windows_NT then
sql create function bit_and as 'C:\\Windows\\Temp\\bitand.dll' outputtype int bufSize 8;
sql create aggregate function sqr_sum as 'C:\\Windows\\Temp\\sqrsum.dll' outputtype double bufSize 8;
sql create aggregate function l2norm as 'C:\\Windows\\Temp\\l2norm.dll' outputtype double bufSize 8;
else
sql create function bit_and as '/tmp/udf/libbitand.so' outputtype int bufSize 8;
sql create aggregate function sqr_sum as '/tmp/udf/libsqrsum.so' outputtype double bufSize 8;
sql create aggregate function l2norm as '/tmp/udf/libl2norm.so' outputtype double bufSize 8;
endi
sql show functions;
if $rows != 2 then
@ -44,7 +44,7 @@ if $data10 != 2 then
return -1
endi
sql select sqr_sum(f) from t;
sql select l2norm(f) from t;
if $rows != 1 then
print expect 1, actual $rows
return -1
@ -66,7 +66,7 @@ if $data10 != 1 then
return -1
endi
sql select sqr_sum(f1, f2) from t2;
sql select l2norm(f1, f2) from t2;
if $rows != 1 then
return -1
endi
@ -95,7 +95,7 @@ if $data30 != NULL then
return -1
endi
sql select sqr_sum(f1, f2) from t2;
sql select l2norm(f1, f2) from t2;
print $rows, $data00
if $rows != 1 then
return -1
@ -105,7 +105,7 @@ if $data00 != 2.645751311 then
endi
sql insert into t2 values(now+4s, 4, 8)(now+5s, 5, 9);
sql select sqr_sum(f1-f2), sqr_sum(f1+f2) from t2;
sql select l2norm(f1-f2), l2norm(f1+f2) from t2;
print $rows , $data00 , $data01
if $rows != 1 then
return -1;
@ -117,7 +117,7 @@ if $data01 != 18.547236991 then
return -1
endi
sql select sqr_sum(bit_and(f2, f1)), sqr_sum(bit_and(f1, f2)) from t2;
sql select l2norm(bit_and(f2, f1)), l2norm(bit_and(f1, f2)) from t2;
print $rows , $data00 , $data01
if $rows != 1 then
return -1
@ -129,7 +129,7 @@ if $data01 != 1.414213562 then
return -1
endi
sql select sqr_sum(f2) from udf.t2 group by 1-bit_and(f1, f2) order by 1-bit_and(f1,f2);
sql select l2norm(f2) from udf.t2 group by 1-bit_and(f1, f2) order by 1-bit_and(f1,f2);
print $rows , $data00 , $data10 , $data20
if $rows != 3 then
return -1
@ -149,10 +149,10 @@ sql show functions;
if $rows != 1 then
return -1
endi
if $data00 != @sqr_sum@ then
if $data00 != @l2norm@ then
return -1
endi
sql drop function sqr_sum;
sql drop function l2norm;
sql show functions;
if $rows != 0 then
return -1

View File

@ -29,8 +29,8 @@ sql insert into ct1 values(now, 10);
sql insert into ct1 values(now+1s, 1);
sql insert into ct1 values(now+2s, 100);
print =============== wait maxdelay 15+1 seconds for results
sleep 16000
print =============== wait maxdelay 15+2 seconds for results
sleep 17000
print =============== select * from retention level 2 from memory
sql select * from ct1;

View File

@ -29,8 +29,8 @@ sql insert into ct1 values(now, 10, 10.0);
sql insert into ct1 values(now+1s, 1, 1.0);
sql insert into ct1 values(now+2s, 100, 100.0);
print =============== wait maxdelay 5+1 seconds for results
sleep 6000
print =============== wait maxdelay 5+2 seconds for results
sleep 7000
print =============== select * from retention level 2 from memory
sql select * from ct1;
@ -135,8 +135,8 @@ print =============== insert after rsma qtaskinfo recovery
sql insert into ct1 values(now, 50, 500.0);
sql insert into ct1 values(now+1s, 40, 40.0);
print =============== wait maxdelay 5+1 seconds for results
sleep 6000
print =============== wait maxdelay 5+2 seconds for results
sleep 7000
print =============== select * from retention level 2 from file and memory after rsma qtaskinfo recovery
sql select * from ct1;

View File

@ -187,7 +187,7 @@ class TDTestCase:
tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.{stb} tags ( {i+1} )')
def __insert_data(self, rows, ctb_num=20, dbname=DBNAME, rsma=False, rsma_type="sum"):
tdLog.printNoPrefix("==========step: start inser data into tables now.....")
tdLog.printNoPrefix("==========step: start insert data into tables now.....")
# from ...pytest.util.common import DataSet
data = DataSet()
data.get_order_set(rows)

View File

@ -6,13 +6,10 @@ import random
class TDTestCase:
updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
"jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
"wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143,
"maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 }
updatecfgDict = {"maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 }
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdLog.debug(f"start to execute {__file__}")
tdSql.init(conn.cursor())
self.vnode_disbutes = None
self.ts = 1537146000000
@ -31,60 +28,61 @@ class TDTestCase:
same_result = tdSql.queryResult
if spread_result !=same_result:
tdLog.exit(" max function work not as expected, sql : %s "% spread_sql)
tdLog.exit(f" max function work not as expected, sql : {spread_sql} ")
else:
tdLog.info(" max function work as expected, sql : %s "% spread_sql)
tdLog.info(f" max function work as expected, sql : {spread_sql} ")
def prepare_datas_of_distribute(self):
def prepare_datas_of_distribute(self, dbname="testdb"):
# prepate datas for 20 tables distributed at different vgroups
tdSql.execute("create database if not exists testdb keep 3650 duration 1000 vgroups 5")
tdSql.execute(" use testdb ")
tdSql.execute(f"create database if not exists {dbname} keep 3650 duration 1000 vgroups 5")
tdSql.execute(f" use {dbname}")
tdSql.execute(
'''create table stb1
f'''create table {dbname}.stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t0 timestamp, t1 int, t2 bigint, t3 smallint, t4 tinyint, t5 float, t6 double, t7 bool, t8 binary(16),t9 nchar(32))
'''
)
tdSql.execute(
'''
create table t1
f'''
create table {dbname}.t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(20):
tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
for i in range(9):
tdSql.execute(
f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
for i in range(1,21):
if i ==1 or i == 4:
continue
else:
tbname = "ct"+f'{i}'
tbname = f"ct{i}"
for j in range(9):
tdSql.execute(
f"insert into {tbname} values ( now()-{(i+j)*10}s, {1*(j+i)}, {11111*(j+i)}, {111*(j+i)}, {11*(j)}, {1.11*(j+i)}, {11.11*(j+i)}, {(j+i)%2}, 'binary{j}', 'nchar{j}', now()+{1*j}a )"
f"insert into {dbname}.{tbname} values ( now()-{(i+j)*10}s, {1*(j+i)}, {11111*(j+i)}, {111*(j+i)}, {11*(j)}, {1.11*(j+i)}, {11.11*(j+i)}, {(j+i)%2}, 'binary{j}', 'nchar{j}', now()+{1*j}a )"
)
tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
f'''insert into t1 values
f'''insert into {dbname}.t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
@ -100,11 +98,11 @@ class TDTestCase:
'''
)
tdLog.info(" prepare data for distributed_aggregate done! ")
tdLog.info(f" prepare data for distributed_aggregate done! ")
def check_distribute_datas(self):
def check_distribute_datas(self, dbname="testdb"):
# get vgroup_ids of all
tdSql.query("show vgroups ")
tdSql.query(f"show {dbname}.vgroups ")
vgroups = tdSql.queryResult
vnode_tables={}
@ -112,9 +110,8 @@ class TDTestCase:
for vgroup_id in vgroups:
vnode_tables[vgroup_id[0]]=[]
# check sub_table of per vnode ,make sure sub_table has been distributed
tdSql.query("show tables like 'ct%'")
tdSql.query(f"show {dbname}.tables like 'ct%'")
table_names = tdSql.queryResult
tablenames = []
for table_name in table_names:
@ -126,9 +123,9 @@ class TDTestCase:
if len(v)>=2:
count+=1
if count < 2:
tdLog.exit(" the datas of all not satisfy sub_table has been distributed ")
tdLog.exit(f" the datas of all not satisfy sub_table has been distributed ")
def check_spread_distribute_diff_vnode(self,col_name):
def check_spread_distribute_diff_vnode(self,col_name, dbname="testdb"):
vgroup_ids = []
for k ,v in self.vnode_disbutes.items():
@ -142,13 +139,13 @@ class TDTestCase:
distribute_tbnames.append(random.sample(vnode_tables,1)[0])
tbname_ins = ""
for tbname in distribute_tbnames:
tbname_ins += "'%s' ,"%tbname
tbname_ins += f"'{tbname}' ,"
tbname_filters = tbname_ins[:-1]
spread_sql = f"select spread({col_name}) from stb1 where tbname in ({tbname_filters})"
spread_sql = f"select spread({col_name}) from {dbname}.stb1 where tbname in ({tbname_filters})"
same_sql = f"select max({col_name}) - min({col_name}) from stb1 where tbname in ({tbname_filters})"
same_sql = f"select max({col_name}) - min({col_name}) from {dbname}.stb1 where tbname in ({tbname_filters})"
tdSql.query(spread_sql)
spread_result = tdSql.queryResult
@ -157,20 +154,20 @@ class TDTestCase:
same_result = tdSql.queryResult
if spread_result !=same_result:
tdLog.exit(" spread function work not as expected, sql : %s "% spread_sql)
tdLog.exit(f" spread function work not as expected, sql : {spread_sql} ")
else:
tdLog.info(" spread function work as expected, sql : %s "% spread_sql)
tdLog.info(f" spread function work as expected, sql : {spread_sql} ")
def check_spread_status(self):
def check_spread_status(self, dbname="testdb"):
# check max function work status
tdSql.query("show tables like 'ct%'")
tdSql.query(f"show {dbname}.tables like 'ct%'")
table_names = tdSql.queryResult
tablenames = []
for table_name in table_names:
tablenames.append(table_name[0])
tablenames.append(f"{dbname}.{table_name[0]}")
tdSql.query("desc stb1")
tdSql.query(f"desc {dbname}.stb1")
col_names = tdSql.queryResult
colnames = []
@ -185,80 +182,76 @@ class TDTestCase:
# check max function for different vnode
for colname in colnames:
if colname.startswith("c"):
if colname.startswith(f"c"):
self.check_spread_distribute_diff_vnode(colname)
else:
# self.check_spread_distribute_diff_vnode(colname) # bug for tag
pass
def distribute_agg_query(self):
def distribute_agg_query(self, dbname="testdb"):
# basic filter
tdSql.query("select spread(c1) from stb1 where c1 is null")
tdSql.query(f"select spread(c1) from {dbname}.stb1 where c1 is null")
tdSql.checkRows(1)
tdSql.query("select spread(c1) from stb1 where t1=1")
tdSql.query(f"select spread(c1) from {dbname}.stb1 where t1=1")
tdSql.checkData(0,0,8.000000000)
tdSql.query("select spread(c1+c2) from stb1 where c1 =1 ")
tdSql.query(f"select spread(c1+c2) from {dbname}.stb1 where c1 =1 ")
tdSql.checkData(0,0,0.000000000)
tdSql.query("select spread(c1) from stb1 where tbname=\"ct2\"")
tdSql.query(f"select spread(c1) from {dbname}.stb1 where tbname=\"ct2\"")
tdSql.checkData(0,0,8.000000000)
tdSql.query("select spread(c1) from stb1 partition by tbname")
tdSql.query(f"select spread(c1) from {dbname}.stb1 partition by tbname")
tdSql.checkRows(20)
tdSql.query("select spread(c1) from stb1 where t1> 4 partition by tbname")
tdSql.query(f"select spread(c1) from {dbname}.stb1 where t1> 4 partition by tbname")
tdSql.checkRows(15)
# union all
tdSql.query("select spread(c1) from stb1 union all select max(c1)-min(c1) from stb1 ")
tdSql.query(f"select spread(c1) from {dbname}.stb1 union all select max(c1)-min(c1) from {dbname}.stb1 ")
tdSql.checkRows(2)
tdSql.checkData(0,0,28.000000000)
# join
tdSql.execute(" create database if not exists db ")
tdSql.execute(" use db ")
tdSql.execute(" create stable st (ts timestamp , c1 int ,c2 float) tags(t1 int) ")
tdSql.execute(" create table tb1 using st tags(1) ")
tdSql.execute(" create table tb2 using st tags(2) ")
tdSql.execute(f" create database if not exists db ")
tdSql.execute(f" use db ")
tdSql.execute(f" create stable db.st (ts timestamp , c1 int ,c2 float) tags(t1 int) ")
tdSql.execute(f" create table db.tb1 using db.st tags(1) ")
tdSql.execute(f" create table db.tb2 using db.st tags(2) ")
for i in range(10):
ts = i*10 + self.ts
tdSql.execute(f" insert into tb1 values({ts},{i},{i}.0)")
tdSql.execute(f" insert into tb2 values({ts},{i},{i}.0)")
tdSql.execute(f" insert into db.tb1 values({ts},{i},{i}.0)")
tdSql.execute(f" insert into db.tb2 values({ts},{i},{i}.0)")
tdSql.query("select spread(tb1.c1), spread(tb2.c2) from tb1, tb2 where tb1.ts=tb2.ts")
tdSql.query(f"select spread(tb1.c1), spread(tb2.c2) from db.tb1 tb1, db.tb2 tb2 where tb1.ts=tb2.ts")
tdSql.checkRows(1)
tdSql.checkData(0,0,9.000000000)
tdSql.checkData(0,0,9.00000)
# group by
tdSql.execute(" use testdb ")
tdSql.query(" select max(c1),c1 from stb1 group by t1 ")
tdSql.execute(f" use {dbname} ")
tdSql.query(f" select max(c1),c1 from {dbname}.stb1 group by t1 ")
tdSql.checkRows(20)
tdSql.query(" select max(c1),c1 from stb1 group by c1 ")
tdSql.query(f" select max(c1),c1 from {dbname}.stb1 group by c1 ")
tdSql.checkRows(30)
tdSql.query(" select max(c1),c2 from stb1 group by c2 ")
tdSql.query(f" select max(c1),c2 from {dbname}.stb1 group by c2 ")
tdSql.checkRows(31)
# partition by tbname or partition by tag
tdSql.query("select spread(c1) from stb1 partition by tbname")
tdSql.query(f"select spread(c1) from {dbname}.stb1 partition by tbname")
query_data = tdSql.queryResult
# nest query for support max
tdSql.query("select spread(c2+2)+1 from (select max(c1) c2 from stb1)")
tdSql.query(f"select spread(c2+2)+1 from (select max(c1) c2 from {dbname}.stb1)")
tdSql.checkData(0,0,1.000000000)
tdSql.query("select spread(c1+2)+1 as c2 from (select ts ,c1 ,c2 from stb1)")
tdSql.query(f"select spread(c1+2)+1 as c2 from (select ts ,c1 ,c2 from {dbname}.stb1)")
tdSql.checkData(0,0,29.000000000)
tdSql.query("select spread(a+2)+1 as c2 from (select ts ,abs(c1) a ,c2 from stb1)")
tdSql.query(f"select spread(a+2)+1 as c2 from (select ts ,abs(c1) a ,c2 from {dbname}.stb1)")
tdSql.checkData(0,0,29.000000000)
# mixup with other functions
tdSql.query("select max(c1),count(c1),last(c2,c3),spread(c1) from stb1")
tdSql.query(f"select max(c1),count(c1),last(c2,c3),spread(c1) from {dbname}.stb1")
tdSql.checkData(0,0,28)
tdSql.checkData(0,1,184)
tdSql.checkData(0,2,-99999)
@ -275,7 +268,7 @@ class TDTestCase:
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdLog.success(f"{__file__} successfully executed")
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -7,10 +7,7 @@ import platform
import math
class TDTestCase:
updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
"jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
"wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143,
"maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 }
updatecfgDict = {"maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 }
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
@ -45,55 +42,56 @@ class TDTestCase:
else:
tdLog.exit(" sql:%s; row:0 col:0 data:%d , expect:%d"%(stddev_sql,tdSql.queryResult[0][0],stddev_result))
def prepare_datas_of_distribute(self):
def prepare_datas_of_distribute(self, dbname="testdb"):
# prepate datas for 20 tables distributed at different vgroups
tdSql.execute("create database if not exists testdb keep 3650 duration 1000 vgroups 5")
tdSql.execute(" use testdb ")
tdSql.execute(f"create database if not exists {dbname} keep 3650 duration 1000 vgroups 5")
tdSql.execute(f" use {dbname}")
tdSql.execute(
'''create table stb1
f'''create table {dbname}.stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t0 timestamp, t1 int, t2 bigint, t3 smallint, t4 tinyint, t5 float, t6 double, t7 bool, t8 binary(16),t9 nchar(32))
'''
)
tdSql.execute(
'''
create table t1
f'''
create table {dbname}.t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(20):
tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
for i in range(9):
tdSql.execute(
f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
for i in range(1,21):
if i ==1 or i == 4:
continue
else:
tbname = "ct"+f'{i}'
tbname = f"ct{i}"
for j in range(9):
tdSql.execute(
f"insert into {tbname} values ( now()-{(i+j)*10}s, {1*(j+i)}, {11111*(j+i)}, {111*(j+i)}, {11*(j)}, {1.11*(j+i)}, {11.11*(j+i)}, {(j+i)%2}, 'binary{j}', 'nchar{j}', now()+{1*j}a )"
f"insert into {dbname}.{tbname} values ( now()-{(i+j)*10}s, {1*(j+i)}, {11111*(j+i)}, {111*(j+i)}, {11*(j)}, {1.11*(j+i)}, {11.11*(j+i)}, {(j+i)%2}, 'binary{j}', 'nchar{j}', now()+{1*j}a )"
)
tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
f'''insert into t1 values
f'''insert into {dbname}.t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
@ -109,11 +107,11 @@ class TDTestCase:
'''
)
tdLog.info(" prepare data for distributed_aggregate done! ")
tdLog.info(f" prepare data for distributed_aggregate done! ")
def check_distribute_datas(self):
def check_distribute_datas(self, dbname="testdb"):
# get vgroup_ids of all
tdSql.query("show vgroups ")
tdSql.query(f"show {dbname}.vgroups ")
vgroups = tdSql.queryResult
vnode_tables={}
@ -121,9 +119,8 @@ class TDTestCase:
for vgroup_id in vgroups:
vnode_tables[vgroup_id[0]]=[]
# check sub_table of per vnode ,make sure sub_table has been distributed
tdSql.query("show tables like 'ct%'")
tdSql.query(f"show {dbname}.tables like 'ct%'")
table_names = tdSql.queryResult
tablenames = []
for table_name in table_names:
@ -135,9 +132,9 @@ class TDTestCase:
if len(v)>=2:
count+=1
if count < 2:
tdLog.exit(" the datas of all not satisfy sub_table has been distributed ")
tdLog.exit(f" the datas of all not satisfy sub_table has been distributed ")
def check_stddev_distribute_diff_vnode(self,col_name):
def check_stddev_distribute_diff_vnode(self,col_name, dbname="testdb"):
vgroup_ids = []
for k ,v in self.vnode_disbutes.items():
@ -155,9 +152,9 @@ class TDTestCase:
tbname_filters = tbname_ins[:-1]
stddev_sql = f"select stddev({col_name}) from stb1 where tbname in ({tbname_filters});"
stddev_sql = f"select stddev({col_name}) from {dbname}.stb1 where tbname in ({tbname_filters});"
same_sql = f"select {col_name} from stb1 where tbname in ({tbname_filters}) and {col_name} is not null "
same_sql = f"select {col_name} from {dbname}.stb1 where tbname in ({tbname_filters}) and {col_name} is not null "
tdSql.query(same_sql)
pre_data = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None]
@ -175,17 +172,16 @@ class TDTestCase:
tdSql.query(stddev_sql)
tdSql.checkData(0,0,stddev_result)
def check_stddev_status(self):
def check_stddev_status(self, dbname="testdb"):
# check max function work status
tdSql.query("show tables like 'ct%'")
tdSql.query(f"show {dbname}.tables like 'ct%'")
table_names = tdSql.queryResult
tablenames = []
for table_name in table_names:
tablenames.append(table_name[0])
tablenames.append(f"{dbname}.{table_name[0]}")
tdSql.query("desc stb1")
tdSql.query(f"desc {dbname}.stb1")
col_names = tdSql.queryResult
colnames = []
@ -197,50 +193,42 @@ class TDTestCase:
for colname in colnames:
if colname.startswith("c"):
self.check_stddev_functions(tablename,colname)
else:
# self.check_stddev_functions(tablename,colname)
pass
# check max function for different vnode
for colname in colnames:
if colname.startswith("c"):
self.check_stddev_distribute_diff_vnode(colname)
else:
# self.check_stddev_distribute_diff_vnode(colname) # bug for tag
pass
def distribute_agg_query(self):
def distribute_agg_query(self, dbname="testdb"):
# basic filter
tdSql.query(" select stddev(c1) from stb1 ")
tdSql.query(f"select stddev(c1) from {dbname}.stb1 ")
tdSql.checkData(0,0,6.694663959)
tdSql.query(" select stddev(a) from (select stddev(c1) a from stb1 partition by tbname) ")
tdSql.query(f"select stddev(a) from (select stddev(c1) a from {dbname}.stb1 partition by tbname) ")
tdSql.checkData(0,0,0.156797505)
tdSql.query(" select stddev(c1) from stb1 where t1=1")
tdSql.query(f"select stddev(c1) from {dbname}.stb1 where t1=1")
tdSql.checkData(0,0,2.581988897)
tdSql.query("select stddev(c1+c2) from stb1 where c1 =1 ")
tdSql.query(f"select stddev(c1+c2) from {dbname}.stb1 where c1 =1 ")
tdSql.checkData(0,0,0.000000000)
tdSql.query("select stddev(c1) from stb1 where tbname=\"ct2\"")
tdSql.query(f"select stddev(c1) from {dbname}.stb1 where tbname=\"ct2\"")
tdSql.checkData(0,0,2.581988897)
tdSql.query("select stddev(c1) from stb1 partition by tbname")
tdSql.query(f"select stddev(c1) from {dbname}.stb1 partition by tbname")
tdSql.checkRows(20)
tdSql.query("select stddev(c1) from stb1 where t1> 4 partition by tbname")
tdSql.query(f"select stddev(c1) from {dbname}.stb1 where t1> 4 partition by tbname")
tdSql.checkRows(15)
# union all
tdSql.query("select stddev(c1) from stb1 union all select stddev(c1) from stb1 ")
tdSql.query(f"select stddev(c1) from {dbname}.stb1 union all select stddev(c1) from {dbname}.stb1 ")
tdSql.checkRows(2)
tdSql.checkData(0,0,6.694663959)
tdSql.query("select stddev(a) from (select stddev(c1) a from stb1 union all select stddev(c1) a from stb1)")
tdSql.query(f"select stddev(a) from (select stddev(c1) a from {dbname}.stb1 union all select stddev(c1) a from {dbname}.stb1)")
tdSql.checkRows(1)
tdSql.checkData(0,0,0.000000000)
@ -248,38 +236,38 @@ class TDTestCase:
tdSql.execute(" create database if not exists db ")
tdSql.execute(" use db ")
tdSql.execute(" create stable st (ts timestamp , c1 int ,c2 float) tags(t1 int) ")
tdSql.execute(" create table tb1 using st tags(1) ")
tdSql.execute(" create table tb2 using st tags(2) ")
tdSql.execute(" create stable db.st (ts timestamp , c1 int ,c2 float) tags(t1 int) ")
tdSql.execute(" create table db.tb1 using db.st tags(1) ")
tdSql.execute(" create table db.tb2 using db.st tags(2) ")
for i in range(10):
ts = i*10 + self.ts
tdSql.execute(f" insert into tb1 values({ts},{i},{i}.0)")
tdSql.execute(f" insert into tb2 values({ts},{i},{i}.0)")
tdSql.execute(f" insert into db.tb1 values({ts},{i},{i}.0)")
tdSql.execute(f" insert into db.tb2 values({ts},{i},{i}.0)")
tdSql.query("select stddev(tb1.c1), stddev(tb2.c2) from tb1, tb2 where tb1.ts=tb2.ts")
tdSql.query("select stddev(tb1.c1), stddev(tb2.c2) from db.tb1 tb1, db.tb2 tb2 where tb1.ts=tb2.ts")
tdSql.checkRows(1)
tdSql.checkData(0,0,2.872281323)
tdSql.checkData(0,1,2.872281323)
# group by
tdSql.execute(" use testdb ")
tdSql.execute(f" use {dbname} ")
# partition by tbname or partition by tag
tdSql.query("select stddev(c1) from stb1 partition by tbname")
tdSql.query(f"select stddev(c1) from {dbname}.stb1 partition by tbname")
tdSql.checkRows(20)
# nest query for support max
tdSql.query("select stddev(c2+2)+1 from (select stddev(c1) c2 from stb1)")
tdSql.query(f"select stddev(c2+2)+1 from (select stddev(c1) c2 from {dbname}.stb1)")
tdSql.checkData(0,0,1.000000000)
tdSql.query("select stddev(c1+2) as c2 from (select ts ,c1 ,c2 from stb1)")
tdSql.query(f"select stddev(c1+2) as c2 from (select ts ,c1 ,c2 from {dbname}.stb1)")
tdSql.checkData(0,0,6.694663959)
tdSql.query("select stddev(a+2) as c2 from (select ts ,abs(c1) a ,c2 from stb1)")
tdSql.query(f"select stddev(a+2) as c2 from (select ts ,abs(c1) a ,c2 from {dbname}.stb1)")
tdSql.checkData(0,0,6.694663959)
# mixup with other functions
tdSql.query("select max(c1),count(c1),last(c2,c3),sum(c1+c2),avg(c1),stddev(c1) from stb1")
tdSql.query(f"select max(c1),count(c1),last(c2,c3),sum(c1+c2),avg(c1),stddev(c1) from {dbname}.stb1")
tdSql.checkData(0,0,28)
tdSql.checkData(0,1,184)
tdSql.checkData(0,2,-99999)

View File

@ -7,10 +7,7 @@ import platform
class TDTestCase:
updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
"jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
"wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143,
"maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 }
updatecfgDict = {"maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 }
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
@ -34,55 +31,56 @@ class TDTestCase:
tdSql.query(sum_sql)
tdSql.checkData(0,0,pre_sum)
def prepare_datas_of_distribute(self):
def prepare_datas_of_distribute(self, dbname="testdb"):
# prepate datas for 20 tables distributed at different vgroups
tdSql.execute("create database if not exists testdb keep 3650 duration 1000 vgroups 5")
tdSql.execute(" use testdb ")
tdSql.execute(f"create database if not exists {dbname} keep 3650 duration 1000 vgroups 5")
tdSql.execute(f" use {dbname}")
tdSql.execute(
'''create table stb1
f'''create table {dbname}.stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t0 timestamp, t1 int, t2 bigint, t3 smallint, t4 tinyint, t5 float, t6 double, t7 bool, t8 binary(16),t9 nchar(32))
'''
)
tdSql.execute(
'''
create table t1
f'''
create table {dbname}.t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(20):
tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
for i in range(9):
tdSql.execute(
f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
for i in range(1,21):
if i ==1 or i == 4:
continue
else:
tbname = "ct"+f'{i}'
tbname = f"ct{i}"
for j in range(9):
tdSql.execute(
f"insert into {tbname} values ( now()-{(i+j)*10}s, {1*(j+i)}, {11111*(j+i)}, {111*(j+i)}, {11*(j)}, {1.11*(j+i)}, {11.11*(j+i)}, {(j+i)%2}, 'binary{j}', 'nchar{j}', now()+{1*j}a )"
f"insert into {dbname}.{tbname} values ( now()-{(i+j)*10}s, {1*(j+i)}, {11111*(j+i)}, {111*(j+i)}, {11*(j)}, {1.11*(j+i)}, {11.11*(j+i)}, {(j+i)%2}, 'binary{j}', 'nchar{j}', now()+{1*j}a )"
)
tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
f'''insert into t1 values
f'''insert into {dbname}.t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
@ -98,11 +96,11 @@ class TDTestCase:
'''
)
tdLog.info(" prepare data for distributed_aggregate done! ")
tdLog.info(f" prepare data for distributed_aggregate done! ")
def check_distribute_datas(self):
def check_distribute_datas(self, dbname="testdb"):
# get vgroup_ids of all
tdSql.query("show vgroups ")
tdSql.query(f"show {dbname}.vgroups ")
vgroups = tdSql.queryResult
vnode_tables={}
@ -110,9 +108,8 @@ class TDTestCase:
for vgroup_id in vgroups:
vnode_tables[vgroup_id[0]]=[]
# check sub_table of per vnode ,make sure sub_table has been distributed
tdSql.query("show tables like 'ct%'")
tdSql.query(f"show {dbname}.tables like 'ct%'")
table_names = tdSql.queryResult
tablenames = []
for table_name in table_names:
@ -124,9 +121,9 @@ class TDTestCase:
if len(v)>=2:
count+=1
if count < 2:
tdLog.exit(" the datas of all not satisfy sub_table has been distributed ")
tdLog.exit(f" the datas of all not satisfy sub_table has been distributed ")
def check_sum_distribute_diff_vnode(self,col_name):
def check_sum_distribute_diff_vnode(self,col_name, dbname="testdb"):
vgroup_ids = []
for k ,v in self.vnode_disbutes.items():
@ -144,9 +141,9 @@ class TDTestCase:
tbname_filters = tbname_ins[:-1]
sum_sql = f"select sum({col_name}) from stb1 where tbname in ({tbname_filters});"
sum_sql = f"select sum({col_name}) from {dbname}.stb1 where tbname in ({tbname_filters});"
same_sql = f"select {col_name} from stb1 where tbname in ({tbname_filters}) and {col_name} is not null "
same_sql = f"select {col_name} from {dbname}.stb1 where tbname in ({tbname_filters}) and {col_name} is not null "
tdSql.query(same_sql)
pre_data = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None]
@ -157,16 +154,16 @@ class TDTestCase:
tdSql.query(sum_sql)
tdSql.checkData(0,0,pre_sum)
def check_sum_status(self):
def check_sum_status(self, dbname="testdb"):
# check max function work status
tdSql.query("show tables like 'ct%'")
tdSql.query(f"show {dbname}.tables like 'ct%'")
table_names = tdSql.queryResult
tablenames = []
for table_name in table_names:
tablenames.append(table_name[0])
tablenames.append(f"{dbname}.{table_name[0]}")
tdSql.query("desc stb1")
tdSql.query(f"desc {dbname}.stb1")
col_names = tdSql.queryResult
colnames = []
@ -183,79 +180,75 @@ class TDTestCase:
for colname in colnames:
if colname.startswith("c"):
self.check_sum_distribute_diff_vnode(colname)
else:
# self.check_sum_distribute_diff_vnode(colname) # bug for tag
pass
def distribute_agg_query(self):
def distribute_agg_query(self, dbname="testdb"):
# basic filter
tdSql.query(" select sum(c1) from stb1 ")
tdSql.query(f"select sum(c1) from {dbname}.stb1 ")
tdSql.checkData(0,0,2592)
tdSql.query(" select sum(a) from (select sum(c1) a from stb1 partition by tbname) ")
tdSql.query(f"select sum(a) from (select sum(c1) a from {dbname}.stb1 partition by tbname) ")
tdSql.checkData(0,0,2592)
tdSql.query(" select sum(c1) from stb1 where t1=1")
tdSql.query(f"select sum(c1) from {dbname}.stb1 where t1=1")
tdSql.checkData(0,0,54)
tdSql.query("select sum(c1+c2) from stb1 where c1 =1 ")
tdSql.query(f"select sum(c1+c2) from {dbname}.stb1 where c1 =1 ")
tdSql.checkData(0,0,22224.000000000)
tdSql.query("select sum(c1) from stb1 where tbname=\"ct2\"")
tdSql.query(f"select sum(c1) from {dbname}.stb1 where tbname=\"ct2\"")
tdSql.checkData(0,0,54)
tdSql.query("select sum(c1) from stb1 partition by tbname")
tdSql.query(f"select sum(c1) from {dbname}.stb1 partition by tbname")
tdSql.checkRows(20)
tdSql.query("select sum(c1) from stb1 where t1> 4 partition by tbname")
tdSql.query(f"select sum(c1) from {dbname}.stb1 where t1> 4 partition by tbname")
tdSql.checkRows(15)
# union all
tdSql.query("select sum(c1) from stb1 union all select sum(c1) from stb1 ")
tdSql.query(f"select sum(c1) from {dbname}.stb1 union all select sum(c1) from {dbname}.stb1 ")
tdSql.checkRows(2)
tdSql.checkData(0,0,2592)
tdSql.query("select sum(a) from (select sum(c1) a from stb1 union all select sum(c1) a from stb1)")
tdSql.query(f"select sum(a) from (select sum(c1) a from {dbname}.stb1 union all select sum(c1) a from {dbname}.stb1)")
tdSql.checkRows(1)
tdSql.checkData(0,0,5184)
# join
tdSql.execute(" create database if not exists db ")
tdSql.execute(" use db ")
tdSql.execute(" create stable st (ts timestamp , c1 int ,c2 float) tags(t1 int) ")
tdSql.execute(" create table tb1 using st tags(1) ")
tdSql.execute(" create table tb2 using st tags(2) ")
tdSql.execute(" create stable db.st (ts timestamp , c1 int ,c2 float) tags(t1 int) ")
tdSql.execute(" create table db.tb1 using db.st tags(1) ")
tdSql.execute(" create table db.tb2 using db.st tags(2) ")
for i in range(10):
ts = i*10 + self.ts
tdSql.execute(f" insert into tb1 values({ts},{i},{i}.0)")
tdSql.execute(f" insert into tb2 values({ts},{i},{i}.0)")
tdSql.execute(f" insert into db.tb1 values({ts},{i},{i}.0)")
tdSql.execute(f" insert into db.tb2 values({ts},{i},{i}.0)")
tdSql.query("select sum(tb1.c1), sum(tb2.c2) from tb1, tb2 where tb1.ts=tb2.ts")
tdSql.query("select sum(tb1.c1), sum(tb2.c2) from db.tb1 tb1, db.tb2 tb2 where tb1.ts=tb2.ts")
tdSql.checkRows(1)
tdSql.checkData(0,0,45)
tdSql.checkData(0,1,45.000000000)
# group by
tdSql.execute(" use testdb ")
tdSql.execute(f"use {dbname} ")
# partition by tbname or partition by tag
tdSql.query("select sum(c1) from stb1 partition by tbname")
tdSql.query(f"select sum(c1) from {dbname}.stb1 partition by tbname")
tdSql.checkRows(20)
# nest query for support max
tdSql.query("select abs(c2+2)+1 from (select sum(c1) c2 from stb1)")
tdSql.query(f"select abs(c2+2)+1 from (select sum(c1) c2 from {dbname}.stb1)")
tdSql.checkData(0,0,2595.000000000)
tdSql.query("select sum(c1+2) as c2 from (select ts ,c1 ,c2 from stb1)")
tdSql.query(f"select sum(c1+2) as c2 from (select ts ,c1 ,c2 from {dbname}.stb1)")
tdSql.checkData(0,0,2960.000000000)
tdSql.query("select sum(a+2) as c2 from (select ts ,abs(c1) a ,c2 from stb1)")
tdSql.query(f"select sum(a+2) as c2 from (select ts ,abs(c1) a ,c2 from {dbname}.stb1)")
tdSql.checkData(0,0,2960.000000000)
# mixup with other functions
tdSql.query("select max(c1),count(c1),last(c2,c3),sum(c1+c2) from stb1")
tdSql.query(f"select max(c1),count(c1),last(c2,c3),sum(c1+c2) from {dbname}.stb1")
tdSql.checkData(0,0,28)
tdSql.checkData(0,1,184)
tdSql.checkData(0,2,-99999)

View File

@ -90,6 +90,12 @@ python3 ./test.py -f 2-query/distribute_agg_max.py
python3 ./test.py -f 2-query/distribute_agg_max.py -R
python3 ./test.py -f 2-query/distribute_agg_min.py
python3 ./test.py -f 2-query/distribute_agg_min.py -R
python3 ./test.py -f 2-query/distribute_agg_spread.py
python3 ./test.py -f 2-query/distribute_agg_spread.py -R
python3 ./test.py -f 2-query/distribute_agg_stddev.py
python3 ./test.py -f 2-query/distribute_agg_stddev.py -R
python3 ./test.py -f 2-query/distribute_agg_sum.py
python3 ./test.py -f 2-query/distribute_agg_sum.py -R
@ -156,9 +162,6 @@ python3 ./test.py -f 2-query/function_stateduration.py
python3 ./test.py -f 2-query/statecount.py
python3 ./test.py -f 2-query/tail.py
python3 ./test.py -f 2-query/ttl_comment.py
python3 ./test.py -f 2-query/distribute_agg_sum.py
python3 ./test.py -f 2-query/distribute_agg_spread.py
python3 ./test.py -f 2-query/distribute_agg_stddev.py
python3 ./test.py -f 2-query/twa.py
python3 ./test.py -f 2-query/irate.py
python3 ./test.py -f 2-query/function_null.py