Merge branch '3.0' into 3.0test/jcy

This commit is contained in:
jiacy-jcy 2022-08-11 16:29:40 +08:00
commit ed51c1a565
41 changed files with 994 additions and 382 deletions

View File

@ -38,40 +38,21 @@ def pre_test(){
sh ''' sh '''
cd ${WK} cd ${WK}
git reset --hard git reset --hard
git remote prune origin
git fetch
cd ${WKC} cd ${WKC}
git reset --hard git reset --hard
git clean -fxd git clean -fxd
git remote prune origin
git fetch
''' '''
script { script {
if (env.CHANGE_TARGET == 'master') {
sh ''' sh '''
cd ${WK} cd ${WK}
git checkout master git checkout ''' + env.CHANGE_TARGET + '''
cd ${WKC} cd ${WKC}
git checkout master git checkout ''' + env.CHANGE_TARGET + '''
''' '''
} else if(env.CHANGE_TARGET == '2.0') {
sh '''
cd ${WK}
git checkout 2.0
cd ${WKC}
git checkout 2.0
'''
} else if(env.CHANGE_TARGET == '3.0') {
sh '''
cd ${WK}
git checkout 3.0
cd ${WKC}
git checkout 3.0
'''
} else {
sh '''
cd ${WK}
git checkout develop
cd ${WKC}
git checkout develop
'''
}
} }
if (env.CHANGE_URL =~ /\/TDengine\//) { if (env.CHANGE_URL =~ /\/TDengine\//) {
sh ''' sh '''
@ -169,49 +150,24 @@ def pre_test_win(){
bat ''' bat '''
cd %WIN_INTERNAL_ROOT% cd %WIN_INTERNAL_ROOT%
git reset --hard git reset --hard
git remote prune origin
git fetch
''' '''
bat ''' bat '''
cd %WIN_COMMUNITY_ROOT% cd %WIN_COMMUNITY_ROOT%
git reset --hard git reset --hard
git remote prune origin
git fetch
''' '''
script { script {
if (env.CHANGE_TARGET == 'master') {
bat ''' bat '''
cd %WIN_INTERNAL_ROOT% cd %WIN_INTERNAL_ROOT%
git checkout master git checkout ''' + env.CHANGE_TARGET + '''
''' '''
bat ''' bat '''
cd %WIN_COMMUNITY_ROOT% cd %WIN_COMMUNITY_ROOT%
git checkout master git checkout ''' + env.CHANGE_TARGET + '''
''' '''
} else if(env.CHANGE_TARGET == '2.0') {
bat '''
cd %WIN_INTERNAL_ROOT%
git checkout 2.0
'''
bat '''
cd %WIN_COMMUNITY_ROOT%
git checkout 2.0
'''
} else if(env.CHANGE_TARGET == '3.0') {
bat '''
cd %WIN_INTERNAL_ROOT%
git checkout 3.0
'''
bat '''
cd %WIN_COMMUNITY_ROOT%
git checkout 3.0
'''
} else {
bat '''
cd %WIN_INTERNAL_ROOT%
git checkout develop
'''
bat '''
cd %WIN_COMMUNITY_ROOT%
git checkout develop
'''
}
} }
script { script {
if (env.CHANGE_URL =~ /\/TDengine\//) { if (env.CHANGE_URL =~ /\/TDengine\//) {

View File

@ -188,7 +188,7 @@ apt install autoconf
cmake .. -DJEMALLOC_ENABLED=true cmake .. -DJEMALLOC_ENABLED=true
``` ```
在 X86-64、X86、arm64、arm32 和 mips64 平台上TDengine 生成脚本可以自动检测机器架构。也可以手动配置 CPUTYPE 参数来指定 CPU 类型,如 aarch64 或 aarch32 等。 在 X86-64、X86、arm64 平台上TDengine 生成脚本可以自动检测机器架构。也可以手动配置 CPUTYPE 参数来指定 CPU 类型,如 aarch64 等。
aarch64 aarch64
@ -196,18 +196,6 @@ aarch64
cmake .. -DCPUTYPE=aarch64 && cmake --build . cmake .. -DCPUTYPE=aarch64 && cmake --build .
``` ```
aarch32
```bash
cmake .. -DCPUTYPE=aarch32 && cmake --build .
```
mips64
```bash
cmake .. -DCPUTYPE=mips64 && cmake --build .
```
### Windows 系统 ### Windows 系统
如果你使用的是 Visual Studio 2013 版本: 如果你使用的是 Visual Studio 2013 版本:

View File

@ -205,8 +205,8 @@ apt install autoconf
cmake .. -DJEMALLOC_ENABLED=true cmake .. -DJEMALLOC_ENABLED=true
``` ```
TDengine build script can detect the host machine's architecture on X86-64, X86, arm64, arm32 and mips64 platform. TDengine build script can detect the host machine's architecture on X86-64, X86, arm64 platform.
You can also specify CPUTYPE option like aarch64 or aarch32 too if the detection result is not correct: You can also specify CPUTYPE option like aarch64 too if the detection result is not correct:
aarch64: aarch64:
@ -214,18 +214,6 @@ aarch64:
cmake .. -DCPUTYPE=aarch64 && cmake --build . cmake .. -DCPUTYPE=aarch64 && cmake --build .
``` ```
aarch32:
```bash
cmake .. -DCPUTYPE=aarch32 && cmake --build .
```
mips64:
```bash
cmake .. -DCPUTYPE=mips64 && cmake --build .
```
### On Windows platform ### On Windows platform
If you use the Visual Studio 2013, please open a command window by executing "cmd.exe". If you use the Visual Studio 2013, please open a command window by executing "cmd.exe".

View File

@ -22,7 +22,9 @@ ELSEIF (TD_WINDOWS)
INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taos.exe DESTINATION .) INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taos.exe DESTINATION .)
INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taosd.exe DESTINATION .) INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taosd.exe DESTINATION .)
INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/udfd.exe DESTINATION .) INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/udfd.exe DESTINATION .)
IF (BUILD_TOOLS)
INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taosBenchmark.exe DESTINATION .) INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taosBenchmark.exe DESTINATION .)
ENDIF ()
IF (TD_MVN_INSTALLED) IF (TD_MVN_INSTALLED)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.38-dist.jar DESTINATION connector/jdbc) INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.38-dist.jar DESTINATION connector/jdbc)

View File

@ -2,7 +2,7 @@
# taos-tools # taos-tools
ExternalProject_Add(taos-tools ExternalProject_Add(taos-tools
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
GIT_TAG 2d68404 GIT_TAG 57bdfbf
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools" SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
BINARY_DIR "" BINARY_DIR ""
#BUILD_IN_SOURCE TRUE #BUILD_IN_SOURCE TRUE

View File

@ -17,7 +17,6 @@ Currently, TDengine's native interface connectors can support platforms such as
| **X86 64bit** | **Win32** | ● | ● | ● | ● | ○ | ○ | ● | | **X86 64bit** | **Win32** | ● | ● | ● | ● | ○ | ○ | ● |
| **X86 32bit** | **Win32** | ○ | ○ | ○ | ○ | ○ | ○ | ● | | **X86 32bit** | **Win32** | ○ | ○ | ○ | ○ | ○ | ○ | ● |
| **ARM64** | **Linux** | ● | ● | ● | ● | ○ | ○ | ● | | **ARM64** | **Linux** | ● | ● | ● | ● | ○ | ○ | ● |
| **ARM32** | **Linux** | ● | ● | ● | ● | ○ | ○ | ● |
| **MIPS** | **Linux** | ○ | ○ | ○ | ○ | ○ | ○ | ○ | | **MIPS** | **Linux** | ○ | ○ | ○ | ○ | ○ | ○ | ○ |
Where ● means the official test verification passed, ○ means the unofficial test verification passed, -- means no assurance. Where ● means the official test verification passed, ○ means the unofficial test verification passed, -- means no assurance.

View File

@ -19,15 +19,15 @@ TDengine's connector can support a wide range of platforms, including X64/X86/AR
The comparison matrix is as follows. The comparison matrix is as follows.
| **CPU** | **X64 64bit** | | | **X86 32bit** | **ARM64** | **ARM32** | **MIPS** | **Alpha** | | **CPU** | **X64 64bit** | | | **X86 32bit** | **ARM64** | **MIPS** | **Alpha** |
| ----------- | ------------- | --------- | --------- | ------------- | --------- | --------- | --------- | --------- | | ----------- | ------------- | --------- | --------- | ------------- | --------- | --------- | --------- |
| **OS** | **Linux** | **Win64** | **Win32** | **Win32** | **Linux** | **Linux** | **Linux** | **Linux** | | **OS** | **Linux** | **Win64** | **Win32** | **Win32** | **Linux** | **Linux** | **Linux** |
| **C/C++** | ● | ● | ● | ○ | ● | ● | ● | ● | | **C/C++** | ● | ● | ● | ○ | ● | ● | ● |
| **JDBC** | ● | ● | ● | ○ | ● | ● | ● | ● | | **JDBC** | ● | ● | ● | ○ | ● | ● | ● |
| **Python** | ● | ● | ● | ○ | ● | ● | ● | -- | | **Python** | ● | ● | ● | ○ | ● | ● | -- |
| **Go** | ● | ● | ● | ○ | ● | ● | ○ | -- | | **Go** | ● | ● | ● | ○ | ● | ○ | -- |
| **NodeJs** | ● | ● | ○ | ○ | ● | ● | ○ | -- | | **NodeJs** | ● | ● | ○ | ○ | ● | ○ | -- |
| **C#** | ● | ● | ○ | ○ | ○ | ○ | ○ | -- | | **C#** | ● | ● | ○ | ○ | ○ | ○ | -- |
| **RESTful** | ● | ● | ● | ● | ● | ● | ● | ● | | **RESTful** | ● | ● | ● | ● | ● | ● | ● |
Note: ● means the official test is verified, ○ means the unofficial test is verified, -- means not verified. Note: ● means the official test is verified, ○ means the unofficial test is verified, -- means not verified.

View File

@ -4,7 +4,7 @@ sidebar_label: 文档首页
slug: / slug: /
--- ---
TDengine是一款开源、[高性能](https://www.taosdata.com/fast)、云原生的时序数据库(Time-Series Database, TSDB), 它专为物联网、工业互联网、金融等场景优化设计。同时它还带有内建的缓存、流式计算、数据订阅等系统功能,能大幅减少系统设计的复杂度,降低研发和运营成本,是一极简的时序数据处理平台。本文档是 TDengine 用户手册,主要是介绍 TDengine 的基本概念、安装、使用、功能、开发接口、运营维护、TDengine 内核设计等等,它主要是面向架构师、开发者与系统管理员的。 TDengine是一款[开源](https://www.taosdata.com/tdengine/open_source_time-series_database)、[高性能](https://www.taosdata.com/fast)、[云原生](https://www.taosdata.com/tdengine/cloud_native_time-series_database)的时序数据库(Time-Series Database, TSDB), 它专为物联网、工业互联网、金融等场景优化设计。同时它还带有内建的缓存、流式计算、数据订阅等系统功能,能大幅减少系统设计的复杂度,降低研发和运营成本,是一极简的时序数据处理平台。本文档是 TDengine 用户手册,主要是介绍 TDengine 的基本概念、安装、使用、功能、开发接口、运营维护、TDengine 内核设计等等,它主要是面向架构师、开发者与系统管理员的。
TDengine 充分利用了时序数据的特点提出了“一个数据采集点一张表”与“超级表”的概念设计了创新的存储引擎让数据的写入、查询和存储效率都得到极大的提升。为正确理解并使用TDengine, 无论如何,请您仔细阅读[基本概念](./concept)一章。 TDengine 充分利用了时序数据的特点提出了“一个数据采集点一张表”与“超级表”的概念设计了创新的存储引擎让数据的写入、查询和存储效率都得到极大的提升。为正确理解并使用TDengine, 无论如何,请您仔细阅读[基本概念](./concept)一章。

View File

@ -3,7 +3,7 @@ title: 产品简介
toc_max_heading_level: 2 toc_max_heading_level: 2
--- ---
TDengine 是一款开源、高性能、云原生的时序数据库 (Time-Series Database, TSDB)。TDengine 能被广泛运用于物联网、工业互联网、车联网、IT 运维、金融等领域。除核心的时序数据库功能外TDengine 还提供[缓存](/develop/cache/)、[数据订阅](/develop/subscribe)、[流式计算](/develop/continuous-query)等功能,是一极简的时序数据处理平台,最大程度的减小系统设计的复杂度,降低研发和运营成本。 TDengine 是一款[开源](https://www.taosdata.com/tdengine/open_source_time-series_database)[高性能](https://www.taosdata.com/tdengine/fast)[云原生](https://www.taosdata.com/tdengine/cloud_native_time-series_database)的时序数据库 (Time-Series Database, TSDB)。TDengine 能被广泛运用于物联网、工业互联网、车联网、IT 运维、金融等领域。除核心的时序数据库功能外TDengine 还提供[缓存](/develop/cache/)、[数据订阅](/develop/subscribe)、[流式计算](/develop/continuous-query)等功能,是一极简的时序数据处理平台,最大程度的减小系统设计的复杂度,降低研发和运营成本。
本章节介绍TDengine的主要功能、竞争优势、适用场景、与其他数据库的对比测试等等让大家对TDengine有个整体的了解。 本章节介绍TDengine的主要功能、竞争优势、适用场景、与其他数据库的对比测试等等让大家对TDengine有个整体的了解。
@ -33,17 +33,17 @@ TDengine的主要功能如下
由于 TDengine 充分利用了[时序数据特点](https://www.taosdata.com/blog/2019/07/09/105.html)比如结构化、无需事务、很少删除或更新、写多读少等等设计了全新的针对时序数据的存储引擎和计算引擎因此与其他时序数据库相比TDengine 有以下特点: 由于 TDengine 充分利用了[时序数据特点](https://www.taosdata.com/blog/2019/07/09/105.html)比如结构化、无需事务、很少删除或更新、写多读少等等设计了全新的针对时序数据的存储引擎和计算引擎因此与其他时序数据库相比TDengine 有以下特点:
- **高性能**通过创新的存储引擎设计无论是数据写入还是查询TDengine 的性能比通用数据库快 10 倍以上也远超其他时序数据库存储空间不及通用数据库的1/10。 - **[高性能](https://www.taosdata.com/tdengine/fast)**通过创新的存储引擎设计无论是数据写入还是查询TDengine 的性能比通用数据库快 10 倍以上也远超其他时序数据库存储空间不及通用数据库的1/10。
- **云原生**通过原生分布式的设计充分利用云平台的优势TDengine 提供了水平扩展能力具备弹性、韧性和可观测性支持k8s部署可运行在公有云、私有云和混合云上。 - **[云原生](https://www.taosdata.com/tdengine/cloud_native_time-series_database)**通过原生分布式的设计充分利用云平台的优势TDengine 提供了水平扩展能力具备弹性、韧性和可观测性支持k8s部署可运行在公有云、私有云和混合云上。
- **极简时序数据平台**TDengine 内建消息队列、缓存、流式计算等功能,应用无需再集成 Kafka/Redis/HBase/Spark 等软件,大幅降低系统的复杂度,降低应用开发和运营成本。 - **[极简时序数据平台](https://www.taosdata.com/tdengine/simplified_solution_for_time-series_data_processing)**TDengine 内建消息队列、缓存、流式计算等功能,应用无需再集成 Kafka/Redis/HBase/Spark 等软件,大幅降低系统的复杂度,降低应用开发和运营成本。
- **分析能力**:支持 SQL同时为时序数据特有的分析提供SQL扩展。通过超级表、存储计算分离、分区分片、预计算、自定义函数等技术TDengine 具备强大的分析能力。 - **[分析能力](https://www.taosdata.com/tdengine/easy_data_analytics)**:支持 SQL同时为时序数据特有的分析提供SQL扩展。通过超级表、存储计算分离、分区分片、预计算、自定义函数等技术TDengine 具备强大的分析能力。
- **简单易用**无任何依赖安装、集群几秒搞定提供REST以及各种语言连接器与众多第三方工具无缝集成提供命令行程序便于管理和即席查询提供各种运维工具。 - **[简单易用](https://www.taosdata.com/tdengine/ease_of_use)**无任何依赖安装、集群几秒搞定提供REST以及各种语言连接器与众多第三方工具无缝集成提供命令行程序便于管理和即席查询提供各种运维工具。
- **核心开源**TDengine 的核心代码包括集群功能全部开源截止到2022年8月1日全球超过 135.9k 个运行实例GitHub Star 18.7kFork 4.4k,社区活跃。 - **[核心开源](https://www.taosdata.com/tdengine/open_source_time-series_database)**TDengine 的核心代码包括集群功能全部开源截止到2022年8月1日全球超过 135.9k 个运行实例GitHub Star 18.7kFork 4.4k,社区活跃。
采用 TDengine可将典型的物联网、车联网、工业互联网大数据平台的总拥有成本大幅降低。表现在几个方面 采用 TDengine可将典型的物联网、车联网、工业互联网大数据平台的总拥有成本大幅降低。表现在几个方面

View File

@ -47,9 +47,7 @@ Docker version 20.10.3, build 48d30b5
## 运行 TDengine CLI ## 运行 TDengine CLI
有两种方式在 Docker 环境下使用 TDengine CLI (taos) 访问 TDengine. 进入容器,执行 taos
- 进入容器后,执行 taos
- 在宿主机使用容器映射到主机的端口进行访问 `taos -h <hostname> -P <port>`
``` ```
$ taos $ taos
@ -62,47 +60,11 @@ taos>
``` ```
## 访问 REST 接口
taosAdapter 是 TDengine 中提供 REST 服务的组件。下面这条命令会在容器中同时启动 `taosd``taosadapter` 两个服务组件。默认 Docker 镜像同时启动 TDengine 后台服务 taosd 和 taosAdatper。
可以在宿主机使用 curl 通过 RESTful 端口访问 Docker 容器内的 TDengine server。
```
curl -L -u root:taosdata -d "show databases" 127.0.0.1:6041/rest/sql
```
输出示例如下:
```
{"code":0,"column_meta":[["name","VARCHAR",64],["create_time","TIMESTAMP",8],["vgroups","SMALLINT",2],["ntables","BIGINT",8],["replica","TINYINT",1],["strict","VARCHAR",4],["duration","VARCHAR",10],["keep","VARCHAR",32],["buffer","INT",4],["pagesize","INT",4],["pages","INT",4],["minrows","INT",4],["maxrows","INT",4],["wal","TINYINT",1],["fsync","INT",4],["comp","TINYINT",1],["cacheModel","VARCHAR",11],["precision","VARCHAR",2],["single_stable","BOOL",1],["status","VARCHAR",10],["retention","VARCHAR",60]],"data":[["information_schema",null,null,14,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,"ready"],["performance_schema",null,null,3,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,"ready"]],"rows":2}
```
这条命令,通过 REST API 访问 TDengine server这时连接的是从容器映射到主机的 6041 端口。
TDengine REST API 详情请参考[官方文档](/reference/rest-api/)。
## 单独启动 REST 服务
如果想只启动 `taosadapter`
```bash
docker run -d --network=host --name tdengine-taosa -e TAOS_FIRST_EP=tdengine-taosd tdengine/tdengine:3.0.0.0 taosadapter
```
只启动 `taosd`
```bash
docker run -d --network=host --name tdengine-taosd -e TAOS_DISABLE_ADAPTER=true tdengine/tdengine:3.0.0.0
```
注意以上为容器使用 host 方式网络配置进行单独部署 taosAdapter 的命令行参数。其他网络访问方式请设置 hostname、 DNS 等必要的网络配置。
## 写入数据 ## 写入数据
可以使用 TDengine 的自带工具 taosBenchmark 快速体验 TDengine 的写入。 可以使用 TDengine 的自带工具 taosBenchmark 快速体验 TDengine 的写入。
假定启动容器时已经将容器的6030端口映射到了宿主机的6030端口则可以直接在宿主机命令行启动 taosBenchmark也可以进入容器后执行 进入容器,启动 taosBenchmark
```bash ```bash
$ taosBenchmark $ taosBenchmark
@ -117,7 +79,7 @@ docker run -d --network=host --name tdengine-taosd -e TAOS_DISABLE_ADAPTER=true
## 体验查询 ## 体验查询
使用上述 taosBenchmark 插入数据后,可以在 TDengine CLI 输入查询命令,体验查询速度。可以直接在宿主机上也可以进入容器后运行 使用上述 taosBenchmark 插入数据后,可以在 TDengine CLI 输入查询命令,体验查询速度。。
查询超级表下记录总条数: 查询超级表下记录总条数:
@ -148,3 +110,7 @@ taos> select avg(current), max(voltage), min(phase) from test.meters where group
```sql ```sql
taos> select avg(current), max(voltage), min(phase) from test.d10 interval(10s); taos> select avg(current), max(voltage), min(phase) from test.d10 interval(10s);
``` ```
## 其它
更多关于在 Docker 环境下使用 TDengine 的细节,请参考 [在 Docker 下使用 TDengine](../../reference/docker)

View File

@ -11,7 +11,7 @@ import TabItem from "@theme/TabItem";
::: :::
TDengine 开源版本提供 deb 和 rpm 格式安装包,用户可以根据自己的运行环境选择合适的安装包。其中 deb 支持 Debian/Ubuntu 及衍生系统rpm 支持 CentOS/RHEL/SUSE 及衍生系统。同时我们也为企业用户提供 tar.gz 格式安装包也支持通过 `apt-get` 工具从线上进行安装。 TDengine 开源版本提供 deb 和 rpm 格式安装包,用户可以根据自己的运行环境选择合适的安装包。其中 deb 支持 Debian/Ubuntu 及衍生系统rpm 支持 CentOS/RHEL/SUSE 及衍生系统。同时我们也为企业用户提供 tar.gz 格式安装包也支持通过 `apt-get` 工具从线上进行安装。
## 安装 ## 安装

View File

@ -1,6 +1,6 @@
--- ---
sidebar_label: 元数据 sidebar_label: 元数据
title: 元数据库 title: 存储数据的 Information_Schema 数据库
--- ---
TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数据库元数据、数据库系统信息和状态的访问,例如数据库或表的名称,当前执行的 SQL 语句等。该数据库存储有关 TDengine 维护的所有其他数据库的信息。它包含多个只读表。实际上,这些表都是视图,而不是基表,因此没有与它们关联的文件。所以对这些表只能查询,不能进行 INSERT 等写入操作。`INFORMATION_SCHEMA` 数据库旨在以一种更一致的方式来提供对 TDengine 支持的各种 SHOW 语句(如 SHOW TABLES、SHOW DATABASES所提供的信息的访问。与 SHOW 语句相比,使用 SELECT ... FROM INFORMATION_SCHEMA.tablename 具有以下优点: TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数据库元数据、数据库系统信息和状态的访问,例如数据库或表的名称,当前执行的 SQL 语句等。该数据库存储有关 TDengine 维护的所有其他数据库的信息。它包含多个只读表。实际上,这些表都是视图,而不是基表,因此没有与它们关联的文件。所以对这些表只能查询,不能进行 INSERT 等写入操作。`INFORMATION_SCHEMA` 数据库旨在以一种更一致的方式来提供对 TDengine 支持的各种 SHOW 语句(如 SHOW TABLES、SHOW DATABASES所提供的信息的访问。与 SHOW 语句相比,使用 SELECT ... FROM INFORMATION_SCHEMA.tablename 具有以下优点:

View File

@ -1,9 +1,9 @@
--- ---
sidebar_label: 性能数据库 sidebar_label: 统计数据
title: 性能数据库 title: 存储统计数据的 Performance_Schema 数据库
--- ---
TDengine 3.0 版本开始提供一个内置数据库 `performance_schema`,其中存储了与性能有关的统计数据。本节详细介绍其中的表和详细的表结构。 TDengine 3.0 版本开始提供一个内置数据库 `performance_schema`,其中存储了与性能有关的统计数据。本节详细介绍其中的表和表结构。
## PERF_APP ## PERF_APP

View File

@ -17,7 +17,6 @@ TDengine 提供了丰富的应用程序开发接口,为了便于用户快速
| **X86 64bit** | **Win32** | ● | ● | ● | ● | ○ | ○ | ● | | **X86 64bit** | **Win32** | ● | ● | ● | ● | ○ | ○ | ● |
| **X86 32bit** | **Win32** | ○ | ○ | ○ | ○ | ○ | ○ | ● | | **X86 32bit** | **Win32** | ○ | ○ | ○ | ○ | ○ | ○ | ● |
| **ARM64** | **Linux** | ● | ● | ● | ● | ○ | ○ | ● | | **ARM64** | **Linux** | ● | ● | ● | ● | ○ | ○ | ● |
| **ARM32** | **Linux** | ○ | ○ | ○ | ○ | ○ | ○ | ● |
| **MIPS 龙芯** | **Linux** | ○ | ○ | ○ | ○ | ○ | ○ | ○ | | **MIPS 龙芯** | **Linux** | ○ | ○ | ○ | ○ | ○ | ○ | ○ |
| **Alpha 申威** | **Linux** | ○ | ○ | -- | -- | -- | -- | ○ | | **Alpha 申威** | **Linux** | ○ | ○ | -- | -- | -- | -- | ○ |
| **X86 海光** | **Linux** | ○ | ○ | ○ | -- | -- | -- | ○ | | **X86 海光** | **Linux** | ○ | ○ | ○ | -- | -- | -- | ○ |
@ -49,7 +48,6 @@ TDengine 版本更新往往会增加新的功能特性,列表中的连接器
| -------------- | -------- | ---------- | ------ | ------ | ----------- | -------- | | -------------- | -------- | ---------- | ------ | ------ | ----------- | -------- |
| **连接管理** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 | | **连接管理** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
| **普通查询** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 | | **普通查询** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
| **连续查询** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
| **参数绑定** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 | | **参数绑定** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
| ** TMQ ** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 | | ** TMQ ** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
| **Schemaless** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 | | **Schemaless** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |

View File

@ -19,15 +19,15 @@ description: "TDengine 服务端、客户端和连接器支持的平台列表"
对照矩阵如下: 对照矩阵如下:
| **CPU** | **X64 64bit** | | | **X86 32bit** | **ARM64** | **ARM32** | **MIPS 龙芯** | **Alpha 申威** | **X64 海光** | | **CPU** | **X64 64bit** | | | **X86 32bit** | **ARM64** | **MIPS 龙芯** | **Alpha 申威** | **X64 海光** |
| ----------- | ------------- | --------- | --------- | ------------- | --------- | --------- | ------------- | -------------- | ------------ | | ----------- | ------------- | --------- | --------- | ------------- | --------- | ------------- | -------------- | ------------ |
| **OS** | **Linux** | **Win64** | **Win32** | **Win32** | **Linux** | **Linux** | **Linux** | **Linux** | **Linux** | | **OS** | **Linux** | **Win64** | **Win32** | **Win32** | **Linux** | **Linux** | **Linux** | **Linux** |
| **C/C++** | ● | ● | ● | ○ | ● | ● | ● | ● | ● | | **C/C++** | ● | ● | ● | ○ | ● | ● | ● | ● |
| **JDBC** | ● | ● | ● | ○ | ● | ● | ● | ● | ● | | **JDBC** | ● | ● | ● | ○ | ● | ● | ● | ● |
| **Python** | ● | ● | ● | ○ | ● | ● | ● | -- | ● | | **Python** | ● | ● | ● | ○ | ● | ● | -- | ● |
| **Go** | ● | ● | ● | ○ | ● | ● | ○ | -- | -- | | **Go** | ● | ● | ● | ○ | ● | ○ | -- | -- |
| **NodeJs** | ● | ● | ○ | ○ | ● | ● | ○ | -- | -- | | **NodeJs** | ● | ● | ○ | ○ | ● | ○ | -- | -- |
| **C#** | ● | ● | ○ | ○ | ○ | ○ | ○ | -- | -- | | **C#** | ● | ● | ○ | ○ | ○ | ○ | -- | -- |
| **RESTful** | ● | ● | ● | ● | ● | ● | ● | ● | ● | | **RESTful** | ● | ● | ● | ● | ● | ● | ● | ● |
注:● 表示官方测试验证通过,○ 表示非官方测试验证通过,-- 表示未经验证。 注:● 表示官方测试验证通过,○ 表示非官方测试验证通过,-- 表示未经验证。

View File

@ -268,6 +268,15 @@ typedef struct SSortExecInfo {
int32_t readBytes; // read io bytes int32_t readBytes; // read io bytes
} SSortExecInfo; } SSortExecInfo;
// stream special block column
#define START_TS_COLUMN_INDEX 0
#define END_TS_COLUMN_INDEX 1
#define UID_COLUMN_INDEX 2
#define GROUPID_COLUMN_INDEX 3
#define CALCULATE_START_TS_COLUMN_INDEX 4
#define CALCULATE_END_TS_COLUMN_INDEX 5
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

View File

@ -34,6 +34,8 @@ typedef struct SStreamTask SStreamTask;
enum { enum {
STREAM_STATUS__NORMAL = 0, STREAM_STATUS__NORMAL = 0,
STREAM_STATUS__STOP,
STREAM_STATUS__FAILED,
STREAM_STATUS__RECOVER, STREAM_STATUS__RECOVER,
}; };

View File

@ -194,6 +194,9 @@ function install_bin() {
${csudo}rm -f ${bin_link_dir}/${serverName} || : ${csudo}rm -f ${bin_link_dir}/${serverName} || :
${csudo}rm -f ${bin_link_dir}/${adapterName} || : ${csudo}rm -f ${bin_link_dir}/${adapterName} || :
${csudo}rm -f ${bin_link_dir}/${uninstallScript} || : ${csudo}rm -f ${bin_link_dir}/${uninstallScript} || :
${csudo}rm -f ${bin_link_dir}/${demoName} || :
${csudo}rm -f ${bin_link_dir}/${benchmarkName} || :
${csudo}rm -f ${bin_link_dir}/${dumpName} || :
${csudo}rm -f ${bin_link_dir}/set_core || : ${csudo}rm -f ${bin_link_dir}/set_core || :
${csudo}rm -f ${bin_link_dir}/TDinsight.sh || : ${csudo}rm -f ${bin_link_dir}/TDinsight.sh || :
@ -205,7 +208,6 @@ function install_bin() {
[ -x ${install_main_dir}/bin/${adapterName} ] && ${csudo}ln -s ${install_main_dir}/bin/${adapterName} ${bin_link_dir}/${adapterName} || : [ -x ${install_main_dir}/bin/${adapterName} ] && ${csudo}ln -s ${install_main_dir}/bin/${adapterName} ${bin_link_dir}/${adapterName} || :
[ -x ${install_main_dir}/bin/${benchmarkName} ] && ${csudo}ln -s ${install_main_dir}/bin/${benchmarkName} ${bin_link_dir}/${demoName} || : [ -x ${install_main_dir}/bin/${benchmarkName} ] && ${csudo}ln -s ${install_main_dir}/bin/${benchmarkName} ${bin_link_dir}/${demoName} || :
[ -x ${install_main_dir}/bin/${benchmarkName} ] && ${csudo}ln -s ${install_main_dir}/bin/${benchmarkName} ${bin_link_dir}/${benchmarkName} || : [ -x ${install_main_dir}/bin/${benchmarkName} ] && ${csudo}ln -s ${install_main_dir}/bin/${benchmarkName} ${bin_link_dir}/${benchmarkName} || :
[ -x ${install_main_dir}/bin/${tmqName} ] && ${csudo}ln -s ${install_main_dir}/bin/${tmqName} ${bin_link_dir}/${tmqName} || :
[ -x ${install_main_dir}/bin/${dumpName} ] && ${csudo}ln -s ${install_main_dir}/bin/${dumpName} ${bin_link_dir}/${dumpName} || : [ -x ${install_main_dir}/bin/${dumpName} ] && ${csudo}ln -s ${install_main_dir}/bin/${dumpName} ${bin_link_dir}/${dumpName} || :
[ -x ${install_main_dir}/bin/TDinsight.sh ] && ${csudo}ln -s ${install_main_dir}/bin/TDinsight.sh ${bin_link_dir}/TDinsight.sh || : [ -x ${install_main_dir}/bin/TDinsight.sh ] && ${csudo}ln -s ${install_main_dir}/bin/TDinsight.sh ${bin_link_dir}/TDinsight.sh || :
[ -x ${install_main_dir}/bin/remove.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/${uninstallScript} || : [ -x ${install_main_dir}/bin/remove.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/${uninstallScript} || :
@ -964,6 +966,10 @@ function installProduct() {
## ==============================Main program starts from here============================ ## ==============================Main program starts from here============================
serverFqdn=$(hostname) serverFqdn=$(hostname)
if [ "$verType" == "server" ]; then if [ "$verType" == "server" ]; then
# Check default 2.x data file.
if [ -x ${data_dir}/dnode/dnodeCfg.json ]; then
echo -e "\033[44;31;5mThe default data directory ${data_dir} contains old data of tdengine 2.x, please clear it before installing!\033[0m"
else
# Install server and client # Install server and client
if [ -x ${bin_dir}/${serverName} ]; then if [ -x ${bin_dir}/${serverName} ]; then
update_flag=1 update_flag=1
@ -971,6 +977,7 @@ if [ "$verType" == "server" ]; then
else else
installProduct installProduct
fi fi
fi
elif [ "$verType" == "client" ]; then elif [ "$verType" == "client" ]; then
interactiveFqdn=no interactiveFqdn=no
# Only install client # Only install client

View File

@ -135,12 +135,12 @@ static const SSysDbTableSchema streamSchema[] = {
{.name = "stream_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, {.name = "stream_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
{.name = "sql", .bytes = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, {.name = "sql", .bytes = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "status", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY}, {.name = "status", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "source_db", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, {.name = "source_db", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "target_db", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, {.name = "target_db", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "target_table", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, {.name = "target_table", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "watermark", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT}, {.name = "watermark", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
{.name = "trigger", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, {.name = "trigger", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
}; };
static const SSysDbTableSchema userTblsSchema[] = { static const SSysDbTableSchema userTblsSchema[] = {

View File

@ -53,14 +53,27 @@ static bool dmCheckDiskSpace() {
osUpdate(); osUpdate();
if (!osDataSpaceAvailable()) { if (!osDataSpaceAvailable()) {
dError("free disk size: %f GB, too little, require %f GB at least at least , quit", (double)tsDataSpace.size.avail / 1024.0 / 1024.0 / 1024.0, (double)tsDataSpace.reserved / 1024.0 / 1024.0 / 1024.0); dError("free disk size: %f GB, too little, require %f GB at least at least , quit", (double)tsDataSpace.size.avail / 1024.0 / 1024.0 / 1024.0, (double)tsDataSpace.reserved / 1024.0 / 1024.0 / 1024.0);
terrno = TSDB_CODE_NO_AVAIL_DISK;
return false; return false;
} }
if (!osLogSpaceAvailable()) { if (!osLogSpaceAvailable()) {
dError("free disk size: %f GB, too little, require %f GB at least at least, quit", (double)tsLogSpace.size.avail / 1024.0 / 1024.0 / 1024.0, (double)tsLogSpace.reserved / 1024.0 / 1024.0 / 1024.0); dError("free disk size: %f GB, too little, require %f GB at least at least, quit", (double)tsLogSpace.size.avail / 1024.0 / 1024.0 / 1024.0, (double)tsLogSpace.reserved / 1024.0 / 1024.0 / 1024.0);
terrno = TSDB_CODE_NO_AVAIL_DISK;
return false; return false;
} }
if (!osTempSpaceAvailable()) { if (!osTempSpaceAvailable()) {
dError("free disk size: %f GB, too little, require %f GB at least at least, quit", (double)tsTempSpace.size.avail / 1024.0 / 1024.0 / 1024.0, (double)tsTempSpace.reserved / 1024.0 / 1024.0 / 1024.0); dError("free disk size: %f GB, too little, require %f GB at least at least, quit", (double)tsTempSpace.size.avail / 1024.0 / 1024.0 / 1024.0, (double)tsTempSpace.reserved / 1024.0 / 1024.0 / 1024.0);
terrno = TSDB_CODE_NO_AVAIL_DISK;
return false;
}
return true;
}
static bool dmCheckDataDirVersion() {
char checkDataDirJsonFileName[PATH_MAX];
snprintf(checkDataDirJsonFileName, PATH_MAX, "%s/dnode/dnodeCfg.json", tsDataDir);
if (taosCheckExistFile(checkDataDirJsonFileName)) {
dError("The default data directory %s contains old data of tdengine 2.x, please clear it before running!", tsDataDir);
return false; return false;
} }
return true; return true;
@ -68,6 +81,7 @@ static bool dmCheckDiskSpace() {
int32_t dmInit(int8_t rtype) { int32_t dmInit(int8_t rtype) {
dInfo("start to init dnode env"); dInfo("start to init dnode env");
if (!dmCheckDataDirVersion()) return -1;
if (!dmCheckDiskSpace()) return -1; if (!dmCheckDiskSpace()) return -1;
if (dmCheckRepeatInit(dmInstance()) != 0) return -1; if (dmCheckRepeatInit(dmInstance()) != 0) return -1;
if (dmInitSystem() != 0) return -1; if (dmInitSystem() != 0) return -1;

View File

@ -197,6 +197,30 @@ void mndReleaseStream(SMnode *pMnode, SStreamObj *pStream) {
sdbRelease(pSdb, pStream); sdbRelease(pSdb, pStream);
} }
static void mndShowStreamStatus(char *dst, SStreamObj *pStream) {
int8_t status = atomic_load_8(&pStream->status);
if (status == STREAM_STATUS__NORMAL) {
strcpy(dst, "normal");
} else if (status == STREAM_STATUS__STOP) {
strcpy(dst, "stop");
} else if (status == STREAM_STATUS__FAILED) {
strcpy(dst, "failed");
} else if (status == STREAM_STATUS__RECOVER) {
strcpy(dst, "recover");
}
}
static void mndShowStreamTrigger(char *dst, SStreamObj *pStream) {
int8_t trigger = pStream->trigger;
if (trigger == STREAM_TRIGGER_AT_ONCE) {
strcpy(dst, "at once");
} else if (trigger == STREAM_TRIGGER_WINDOW_CLOSE) {
strcpy(dst, "window close");
} else if (trigger == STREAM_TRIGGER_MAX_DELAY) {
strcpy(dst, "max delay");
}
}
static int32_t mndCheckCreateStreamReq(SCMCreateStreamReq *pCreate) { static int32_t mndCheckCreateStreamReq(SCMCreateStreamReq *pCreate) {
if (pCreate->name[0] == 0 || pCreate->sql == NULL || pCreate->sql[0] == 0 || pCreate->sourceDB[0] == 0 || if (pCreate->name[0] == 0 || pCreate->sql == NULL || pCreate->sql[0] == 0 || pCreate->sourceDB[0] == 0 ||
pCreate->targetStbFullName[0] == 0) { pCreate->targetStbFullName[0] == 0) {
@ -926,8 +950,11 @@ static int32_t mndRetrieveStream(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, numOfRows, (const char *)sql, false); colDataAppend(pColInfo, numOfRows, (const char *)sql, false);
char status[20 + VARSTR_HEADER_SIZE] = {0};
mndShowStreamStatus(&status[VARSTR_HEADER_SIZE], pStream);
varDataSetLen(status, strlen(varDataVal(status)));
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, numOfRows, (const char *)&pStream->status, true); colDataAppend(pColInfo, numOfRows, (const char *)&status, false);
char sourceDB[TSDB_DB_NAME_LEN + VARSTR_HEADER_SIZE] = {0}; char sourceDB[TSDB_DB_NAME_LEN + VARSTR_HEADER_SIZE] = {0};
tNameFromString(&n, pStream->sourceDb, T_NAME_ACCT | T_NAME_DB); tNameFromString(&n, pStream->sourceDb, T_NAME_ACCT | T_NAME_DB);
@ -958,8 +985,11 @@ static int32_t mndRetrieveStream(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, numOfRows, (const char *)&pStream->watermark, false); colDataAppend(pColInfo, numOfRows, (const char *)&pStream->watermark, false);
char trigger[20 + VARSTR_HEADER_SIZE] = {0};
mndShowStreamTrigger(&trigger[VARSTR_HEADER_SIZE], pStream);
varDataSetLen(trigger, strlen(varDataVal(trigger)));
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, numOfRows, (const char *)&pStream->trigger, false); colDataAppend(pColInfo, numOfRows, (const char *)&trigger, false);
numOfRows++; numOfRows++;
sdbRelease(pSdb, pStream); sdbRelease(pSdb, pStream);

View File

@ -171,7 +171,7 @@ int32_t tqProcessTaskRetrieveReq(STQ* pTq, SRpcMsg* pMsg);
int32_t tqProcessTaskRetrieveRsp(STQ* pTq, SRpcMsg* pMsg); int32_t tqProcessTaskRetrieveRsp(STQ* pTq, SRpcMsg* pMsg);
int32_t tsdbGetStbIdList(SMeta* pMeta, int64_t suid, SArray* list); int32_t tsdbGetStbIdList(SMeta* pMeta, int64_t suid, SArray* list);
SSubmitReq* tdBlockToSubmit(const SArray* pBlocks, const STSchema* pSchema, bool createTb, int64_t suid, SSubmitReq* tdBlockToSubmit(SVnode* pVnode, const SArray* pBlocks, const STSchema* pSchema, bool createTb, int64_t suid,
const char* stbFullName, int32_t vgId, SBatchDeleteReq* pDeleteReq); const char* stbFullName, int32_t vgId, SBatchDeleteReq* pDeleteReq);
// sma // sma

View File

@ -201,7 +201,8 @@ int32_t tdProcessTSmaInsertImpl(SSma *pSma, int64_t indexUid, const char *msg) {
} }
SBatchDeleteReq deleteReq; SBatchDeleteReq deleteReq;
SSubmitReq *pSubmitReq = tdBlockToSubmit((const SArray *)msg, pTsmaStat->pTSchema, true, pTsmaStat->pTSma->dstTbUid, SSubmitReq *pSubmitReq =
tdBlockToSubmit(pSma->pVnode, (const SArray *)msg, pTsmaStat->pTSchema, true, pTsmaStat->pTSma->dstTbUid,
pTsmaStat->pTSma->dstTbName, pTsmaStat->pTSma->dstVgId, &deleteReq); pTsmaStat->pTSma->dstTbName, pTsmaStat->pTSma->dstVgId, &deleteReq);
if (!pSubmitReq) { if (!pSubmitReq) {

View File

@ -13,10 +13,44 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/ */
#include "tcommon.h"
#include "tmsg.h"
#include "tq.h" #include "tq.h"
SSubmitReq* tdBlockToSubmit(const SArray* pBlocks, const STSchema* pTSchema, bool createTb, int64_t suid, int32_t tdBuildDeleteReq(SVnode* pVnode, const char* stbFullName, const SSDataBlock* pDataBlock,
const char* stbFullName, int32_t vgId, SBatchDeleteReq* deleteReq) { SBatchDeleteReq* deleteReq) {
ASSERT(pDataBlock->info.type == STREAM_DELETE_RESULT);
int32_t totRow = pDataBlock->info.rows;
SColumnInfoData* pTsCol = taosArrayGet(pDataBlock->pDataBlock, START_TS_COLUMN_INDEX);
SColumnInfoData* pGidCol = taosArrayGet(pDataBlock->pDataBlock, GROUPID_COLUMN_INDEX);
for (int32_t row = 0; row < totRow; row++) {
int64_t ts = *(int64_t*)colDataGetData(pTsCol, row);
/*int64_t groupId = *(int64_t*)colDataGetData(pGidCol, row);*/
int64_t groupId = 0;
char* name = buildCtbNameByGroupId(stbFullName, groupId);
tqDebug("stream delete msg: groupId :%ld, name: %s", groupId, name);
SMetaReader mr = {0};
metaReaderInit(&mr, pVnode->pMeta, 0);
if (metaGetTableEntryByName(&mr, name) < 0) {
metaReaderClear(&mr);
taosMemoryFree(name);
return -1;
}
int64_t uid = mr.me.uid;
metaReaderClear(&mr);
taosMemoryFree(name);
SSingleDeleteReq req = {
.ts = ts,
.uid = uid,
};
taosArrayPush(deleteReq->deleteReqs, &req);
}
return 0;
}
SSubmitReq* tdBlockToSubmit(SVnode* pVnode, const SArray* pBlocks, const STSchema* pTSchema, bool createTb,
int64_t suid, const char* stbFullName, int32_t vgId, SBatchDeleteReq* pDeleteReq) {
SSubmitReq* ret = NULL; SSubmitReq* ret = NULL;
SArray* schemaReqs = NULL; SArray* schemaReqs = NULL;
SArray* schemaReqSz = NULL; SArray* schemaReqSz = NULL;
@ -33,9 +67,13 @@ SSubmitReq* tdBlockToSubmit(const SArray* pBlocks, const STSchema* pTSchema, boo
schemaReqSz = taosArrayInit(sz, sizeof(int32_t)); schemaReqSz = taosArrayInit(sz, sizeof(int32_t));
for (int32_t i = 0; i < sz; i++) { for (int32_t i = 0; i < sz; i++) {
SSDataBlock* pDataBlock = taosArrayGet(pBlocks, i); SSDataBlock* pDataBlock = taosArrayGet(pBlocks, i);
if (pDataBlock->info.type == STREAM_DELETE_DATA) { if (pDataBlock->info.type == STREAM_DELETE_RESULT) {
// int32_t padding1 = 0;
void* padding2 = taosMemoryMalloc(1);
taosArrayPush(schemaReqSz, &padding1);
taosArrayPush(schemaReqs, &padding2);
} }
STagVal tagVal = { STagVal tagVal = {
.cid = taosArrayGetSize(pDataBlock->pDataBlock) + 1, .cid = taosArrayGetSize(pDataBlock->pDataBlock) + 1,
.type = TSDB_DATA_TYPE_UBIGINT, .type = TSDB_DATA_TYPE_UBIGINT,
@ -97,6 +135,9 @@ SSubmitReq* tdBlockToSubmit(const SArray* pBlocks, const STSchema* pTSchema, boo
int32_t cap = sizeof(SSubmitReq); int32_t cap = sizeof(SSubmitReq);
for (int32_t i = 0; i < sz; i++) { for (int32_t i = 0; i < sz; i++) {
SSDataBlock* pDataBlock = taosArrayGet(pBlocks, i); SSDataBlock* pDataBlock = taosArrayGet(pBlocks, i);
if (pDataBlock->info.type == STREAM_DELETE_RESULT) {
continue;
}
int32_t rows = pDataBlock->info.rows; int32_t rows = pDataBlock->info.rows;
// TODO min // TODO min
int32_t rowSize = pDataBlock->info.rowSize; int32_t rowSize = pDataBlock->info.rowSize;
@ -119,6 +160,11 @@ SSubmitReq* tdBlockToSubmit(const SArray* pBlocks, const STSchema* pTSchema, boo
SSubmitBlk* blkHead = POINTER_SHIFT(ret, sizeof(SSubmitReq)); SSubmitBlk* blkHead = POINTER_SHIFT(ret, sizeof(SSubmitReq));
for (int32_t i = 0; i < sz; i++) { for (int32_t i = 0; i < sz; i++) {
SSDataBlock* pDataBlock = taosArrayGet(pBlocks, i); SSDataBlock* pDataBlock = taosArrayGet(pBlocks, i);
if (pDataBlock->info.type == STREAM_DELETE_RESULT) {
pDeleteReq->suid = suid;
tdBuildDeleteReq(pVnode, stbFullName, pDataBlock, pDeleteReq);
continue;
}
blkHead->numOfRows = htonl(pDataBlock->info.rows); blkHead->numOfRows = htonl(pDataBlock->info.rows);
blkHead->sversion = htonl(pTSchema->version); blkHead->sversion = htonl(pTSchema->version);
@ -188,7 +234,7 @@ void tqTableSink(SStreamTask* pTask, void* vnode, int64_t ver, void* data) {
ASSERT(pTask->tbSink.pTSchema); ASSERT(pTask->tbSink.pTSchema);
deleteReq.deleteReqs = taosArrayInit(0, sizeof(SSingleDeleteReq)); deleteReq.deleteReqs = taosArrayInit(0, sizeof(SSingleDeleteReq));
SSubmitReq* pReq = tdBlockToSubmit(pRes, pTask->tbSink.pTSchema, true, pTask->tbSink.stbUid, SSubmitReq* pReq = tdBlockToSubmit(pVnode, pRes, pTask->tbSink.pTSchema, true, pTask->tbSink.stbUid,
pTask->tbSink.stbFullName, pVnode->config.vgId, &deleteReq); pTask->tbSink.stbFullName, pVnode->config.vgId, &deleteReq);
tqDebug("vgId:%d, task %d convert blocks over, put into write-queue", TD_VID(pVnode), pTask->taskId); tqDebug("vgId:%d, task %d convert blocks over, put into write-queue", TD_VID(pVnode), pTask->taskId);
@ -201,12 +247,14 @@ void tqTableSink(SStreamTask* pTask, void* vnode, int64_t ver, void* data) {
ASSERT(0); ASSERT(0);
} }
SEncoder encoder; SEncoder encoder;
void* buf = taosMemoryCalloc(1, len + sizeof(SMsgHead)); void* buf = rpcMallocCont(len + sizeof(SMsgHead));
void* abuf = POINTER_SHIFT(buf, sizeof(SMsgHead)); void* abuf = POINTER_SHIFT(buf, sizeof(SMsgHead));
tEncoderInit(&encoder, abuf, len); tEncoderInit(&encoder, abuf, len);
tEncodeSBatchDeleteReq(&encoder, &deleteReq); tEncodeSBatchDeleteReq(&encoder, &deleteReq);
tEncoderClear(&encoder); tEncoderClear(&encoder);
((SMsgHead*)buf)->vgId = pVnode->config.vgId;
if (taosArrayGetSize(deleteReq.deleteReqs) != 0) { if (taosArrayGetSize(deleteReq.deleteReqs) != 0) {
SRpcMsg msg = { SRpcMsg msg = {
.msgType = TDMT_VND_BATCH_DEL, .msgType = TDMT_VND_BATCH_DEL,

View File

@ -145,7 +145,7 @@ int32_t vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, int64_t version, SRp
int32_t len; int32_t len;
int32_t ret; int32_t ret;
vTrace("vgId:%d, start to process write request %s, index:%" PRId64, TD_VID(pVnode), TMSG_INFO(pMsg->msgType), vDebug("vgId:%d, start to process write request %s, index:%" PRId64, TD_VID(pVnode), TMSG_INFO(pMsg->msgType),
version); version);
pVnode->state.applied = version; pVnode->state.applied = version;
@ -1071,6 +1071,7 @@ static int32_t vnodeProcessBatchDeleteReq(SVnode *pVnode, int64_t version, void
// TODO // TODO
} }
} }
taosArrayDestroy(deleteReq.deleteReqs);
return 0; return 0;
} }

View File

@ -883,6 +883,32 @@ int32_t ctgGetVgInfoFromHashValue(SCatalog *pCtg, SDBVgInfo *dbInfo, const SName
CTG_RET(code); CTG_RET(code);
} }
int32_t ctgHashValueComp(void const *lp, void const *rp) {
uint32_t *key = (uint32_t *)lp;
SVgroupInfo *pVg = *(SVgroupInfo **)rp;
if (*key < pVg->hashBegin) {
return -1;
} else if (*key > pVg->hashEnd) {
return 1;
}
return 0;
}
int ctgVgInfoComp(const void* lp, const void* rp) {
SVgroupInfo *pLeft = *(SVgroupInfo **)lp;
SVgroupInfo *pRight = *(SVgroupInfo **)rp;
if (pLeft->hashBegin < pRight->hashBegin) {
return -1;
} else if (pLeft->hashBegin > pRight->hashBegin) {
return 1;
}
return 0;
}
int32_t ctgGetVgInfosFromHashValue(SCatalog *pCtg, SCtgTaskReq* tReq, SDBVgInfo *dbInfo, SCtgTbHashsCtx *pCtx, char* dbFName, SArray* pNames, bool update) { int32_t ctgGetVgInfosFromHashValue(SCatalog *pCtg, SCtgTaskReq* tReq, SDBVgInfo *dbInfo, SCtgTbHashsCtx *pCtx, char* dbFName, SArray* pNames, bool update) {
int32_t code = 0; int32_t code = 0;
SCtgTask* pTask = tReq->pTask; SCtgTask* pTask = tReq->pTask;
@ -923,9 +949,19 @@ int32_t ctgGetVgInfosFromHashValue(SCatalog *pCtg, SCtgTaskReq* tReq, SDBVgInfo
} }
} }
taosHashCancelIterate(dbInfo->vgHash, pIter);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
SArray* pVgList = taosArrayInit(vgNum, POINTER_BYTES);
void *pIter = taosHashIterate(dbInfo->vgHash, NULL);
while (pIter) {
taosArrayPush(pVgList, &pIter);
pIter = taosHashIterate(dbInfo->vgHash, pIter);
}
taosArraySort(pVgList, ctgVgInfoComp);
char tbFullName[TSDB_TABLE_FNAME_LEN]; char tbFullName[TSDB_TABLE_FNAME_LEN];
sprintf(tbFullName, "%s.", dbFName); sprintf(tbFullName, "%s.", dbFName);
int32_t offset = strlen(tbFullName); int32_t offset = strlen(tbFullName);
@ -940,25 +976,20 @@ int32_t ctgGetVgInfosFromHashValue(SCatalog *pCtg, SCtgTaskReq* tReq, SDBVgInfo
uint32_t hashValue = (*fp)(tbFullName, (uint32_t)tbNameLen); uint32_t hashValue = (*fp)(tbFullName, (uint32_t)tbNameLen);
void *pIter = taosHashIterate(dbInfo->vgHash, NULL); SVgroupInfo **p = taosArraySearch(pVgList, &hashValue, ctgHashValueComp, TD_EQ);
while (pIter) {
vgInfo = pIter;
if (hashValue >= vgInfo->hashBegin && hashValue <= vgInfo->hashEnd) {
taosHashCancelIterate(dbInfo->vgHash, pIter);
break;
}
pIter = taosHashIterate(dbInfo->vgHash, pIter); if (NULL == p) {
vgInfo = NULL;
}
if (NULL == vgInfo) {
ctgError("no hash range found for hash value [%u], db:%s, numOfVgId:%d", hashValue, dbFName, taosHashGetSize(dbInfo->vgHash)); ctgError("no hash range found for hash value [%u], db:%s, numOfVgId:%d", hashValue, dbFName, taosHashGetSize(dbInfo->vgHash));
ASSERT(0);
taosArrayDestroy(pVgList);
CTG_ERR_RET(TSDB_CODE_CTG_INTERNAL_ERROR); CTG_ERR_RET(TSDB_CODE_CTG_INTERNAL_ERROR);
} }
vgInfo = *p;
SVgroupInfo* pNewVg = taosMemoryMalloc(sizeof(SVgroupInfo)); SVgroupInfo* pNewVg = taosMemoryMalloc(sizeof(SVgroupInfo));
if (NULL == pNewVg) { if (NULL == pNewVg) {
taosArrayDestroy(pVgList);
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
} }
@ -977,6 +1008,8 @@ int32_t ctgGetVgInfosFromHashValue(SCatalog *pCtg, SCtgTaskReq* tReq, SDBVgInfo
} }
} }
taosArrayDestroy(pVgList);
CTG_RET(code); CTG_RET(code);
} }

View File

@ -52,13 +52,6 @@ typedef int32_t (*__block_search_fn_t)(char* data, int32_t num, int64_t key, int
#define NEEDTO_COMPRESS_QUERY(size) ((size) > tsCompressColData ? 1 : 0) #define NEEDTO_COMPRESS_QUERY(size) ((size) > tsCompressColData ? 1 : 0)
#define START_TS_COLUMN_INDEX 0
#define END_TS_COLUMN_INDEX 1
#define UID_COLUMN_INDEX 2
#define GROUPID_COLUMN_INDEX 3
#define CALCULATE_START_TS_COLUMN_INDEX 4
#define CALCULATE_END_TS_COLUMN_INDEX 5
enum { enum {
// when this task starts to execute, this status will set // when this task starts to execute, this status will set
TASK_NOT_COMPLETED = 0x1u, TASK_NOT_COMPLETED = 0x1u,
@ -701,6 +694,7 @@ typedef struct SSessionAggOperatorInfo {
typedef struct SResultWindowInfo { typedef struct SResultWindowInfo {
SResultRowPosition pos; SResultRowPosition pos;
STimeWindow win; STimeWindow win;
uint64_t groupId;
bool isOutput; bool isOutput;
bool isClosed; bool isClosed;
} SResultWindowInfo; } SResultWindowInfo;
@ -1015,9 +1009,8 @@ SResultWindowInfo* getSessionTimeWindow(SStreamAggSupporter* pAggSup, TSKEY star
SResultWindowInfo* getCurSessionWindow(SStreamAggSupporter* pAggSup, TSKEY startTs, SResultWindowInfo* getCurSessionWindow(SStreamAggSupporter* pAggSup, TSKEY startTs,
TSKEY endTs, uint64_t groupId, int64_t gap, int32_t* pIndex); TSKEY endTs, uint64_t groupId, int64_t gap, int32_t* pIndex);
bool isInTimeWindow(STimeWindow* pWin, TSKEY ts, int64_t gap); bool isInTimeWindow(STimeWindow* pWin, TSKEY ts, int64_t gap);
int32_t updateSessionWindowInfo(SResultWindowInfo* pWinInfo, TSKEY* pStartTs,
TSKEY* pEndTs, int32_t rows, int32_t start, int64_t gap, SHashObj* pStDeleted);
bool functionNeedToExecute(SqlFunctionCtx* pCtx); bool functionNeedToExecute(SqlFunctionCtx* pCtx);
bool isOverdue(TSKEY ts, STimeWindowAggSupp* pSup);
bool isCloseWindow(STimeWindow* pWin, STimeWindowAggSupp* pSup); bool isCloseWindow(STimeWindow* pWin, STimeWindowAggSupp* pSup);
bool isDeletedWindow(STimeWindow* pWin, uint64_t groupId, SAggSupporter* pSup); bool isDeletedWindow(STimeWindow* pWin, uint64_t groupId, SAggSupporter* pSup);
void appendOneRow(SSDataBlock* pBlock, TSKEY* pStartTs, TSKEY* pEndTs, uint64_t* pUid); void appendOneRow(SSDataBlock* pBlock, TSKEY* pStartTs, TSKEY* pEndTs, uint64_t* pUid);

View File

@ -1277,8 +1277,12 @@ void destroyTableQueryInfoImpl(STableQueryInfo* pTableQueryInfo) {
} }
void setResultRowInitCtx(SResultRow* pResult, SqlFunctionCtx* pCtx, int32_t numOfOutput, int32_t* rowEntryInfoOffset) { void setResultRowInitCtx(SResultRow* pResult, SqlFunctionCtx* pCtx, int32_t numOfOutput, int32_t* rowEntryInfoOffset) {
bool init = false;
for (int32_t i = 0; i < numOfOutput; ++i) { for (int32_t i = 0; i < numOfOutput; ++i) {
pCtx[i].resultInfo = getResultEntryInfo(pResult, i, rowEntryInfoOffset); pCtx[i].resultInfo = getResultEntryInfo(pResult, i, rowEntryInfoOffset);
if (init) {
continue;
}
struct SResultRowEntryInfo* pResInfo = pCtx[i].resultInfo; struct SResultRowEntryInfo* pResInfo = pCtx[i].resultInfo;
if (isRowEntryCompleted(pResInfo) && isRowEntryInitialized(pResInfo)) { if (isRowEntryCompleted(pResInfo) && isRowEntryInitialized(pResInfo)) {
@ -1295,6 +1299,8 @@ void setResultRowInitCtx(SResultRow* pResult, SqlFunctionCtx* pCtx, int32_t numO
} else { } else {
pResInfo->initialized = true; pResInfo->initialized = true;
} }
} else {
init = true;
} }
} }
} }
@ -1943,6 +1949,7 @@ int32_t loadRemoteDataCallback(void* param, SDataBuf* pMsg, int32_t code) {
SExchangeInfo* pExchangeInfo = taosAcquireRef(exchangeObjRefPool, pWrapper->exchangeId); SExchangeInfo* pExchangeInfo = taosAcquireRef(exchangeObjRefPool, pWrapper->exchangeId);
if (pExchangeInfo == NULL) { if (pExchangeInfo == NULL) {
qWarn("failed to acquire exchange operator, since it may have been released"); qWarn("failed to acquire exchange operator, since it may have been released");
taosMemoryFree(pMsg->pData);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
@ -1963,6 +1970,7 @@ int32_t loadRemoteDataCallback(void* param, SDataBuf* pMsg, int32_t code) {
qDebug("%s fetch rsp received, index:%d, blocks:%d, rows:%d", pSourceDataInfo->taskId, index, pRsp->numOfBlocks, qDebug("%s fetch rsp received, index:%d, blocks:%d, rows:%d", pSourceDataInfo->taskId, index, pRsp->numOfBlocks,
pRsp->numOfRows); pRsp->numOfRows);
} else { } else {
taosMemoryFree(pMsg->pData);
pSourceDataInfo->code = code; pSourceDataInfo->code = code;
qDebug("%s fetch rsp received, index:%d, error:%d", pSourceDataInfo->taskId, index, tstrerror(code)); qDebug("%s fetch rsp received, index:%d, error:%d", pSourceDataInfo->taskId, index, tstrerror(code));
} }

View File

@ -1174,10 +1174,15 @@ static void checkUpdateData(SStreamScanInfo* pInfo, bool invertible, SSDataBlock
for (int32_t rowId = 0; rowId < pBlock->info.rows; rowId++) { for (int32_t rowId = 0; rowId < pBlock->info.rows; rowId++) {
SResultRowInfo dumyInfo; SResultRowInfo dumyInfo;
dumyInfo.cur.pageId = -1; dumyInfo.cur.pageId = -1;
STimeWindow win = getActiveTimeWindow(NULL, &dumyInfo, tsCol[rowId], &pInfo->interval, TSDB_ORDER_ASC); bool isClosed = false;
STimeWindow win = {.skey = INT64_MIN, .ekey = INT64_MAX};
if (isOverdue(tsCol[rowId], &pInfo->twAggSup)) {
win = getActiveTimeWindow(NULL, &dumyInfo, tsCol[rowId], &pInfo->interval, TSDB_ORDER_ASC);
isClosed = isCloseWindow(&win, &pInfo->twAggSup);
}
// must check update info first. // must check update info first.
bool update = updateInfoIsUpdated(pInfo->pUpdateInfo, pBlock->info.uid, tsCol[rowId]); bool update = updateInfoIsUpdated(pInfo->pUpdateInfo, pBlock->info.uid, tsCol[rowId]);
if ((update || (isSignleIntervalWindow(pInfo) && isCloseWindow(&win, &pInfo->twAggSup) && if ((update || (isSignleIntervalWindow(pInfo) && isClosed &&
isDeletedWindow(&win, pBlock->info.groupId, pInfo->sessionSup.pIntervalAggSup))) && out) { isDeletedWindow(&win, pBlock->info.groupId, pInfo->sessionSup.pIntervalAggSup))) && out) {
appendOneRow(pInfo->pUpdateDataRes, tsCol + rowId, tsCol + rowId, &pBlock->info.uid); appendOneRow(pInfo->pUpdateDataRes, tsCol + rowId, tsCol + rowId, &pBlock->info.uid);
} }

View File

@ -851,23 +851,34 @@ static int32_t saveResult(int64_t ts, int32_t pageId, int32_t offset, uint64_t g
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
static int32_t saveWinResult(int64_t ts, int32_t pageId, int32_t offset, uint64_t groupId, SHashObj* pUpdatedMap) {
SResKeyPos* newPos = taosMemoryMalloc(sizeof(SResKeyPos) + sizeof(uint64_t));
if (newPos == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}
newPos->groupId = groupId;
newPos->pos = (SResultRowPosition){.pageId = pageId, .offset = offset};
*(int64_t*)newPos->key = ts;
SWinRes key = {.ts = ts, .groupId = groupId};
if (taosHashPut(pUpdatedMap, &key, sizeof(SWinRes), &newPos, sizeof(void*)) != TSDB_CODE_SUCCESS) {
taosMemoryFree(newPos);
}
return TSDB_CODE_SUCCESS;
}
static int32_t saveWinResultRow(SResultRow* result, uint64_t groupId, SHashObj* pUpdatedMap) {
return saveWinResult(result->win.skey, result->pageId, result->offset, groupId, pUpdatedMap);;
}
static int32_t saveResultRow(SResultRow* result, uint64_t groupId, SArray* pUpdated) { static int32_t saveResultRow(SResultRow* result, uint64_t groupId, SArray* pUpdated) {
return saveResult(result->win.skey, result->pageId, result->offset, groupId, pUpdated); return saveResult(result->win.skey, result->pageId, result->offset, groupId, pUpdated);
} }
static void removeResult(SArray* pUpdated, SWinRes* pKey) { static void removeResults(SArray* pWins, SHashObj* pUpdatedMap) {
int32_t size = taosArrayGetSize(pUpdated);
int32_t index = binarySearchCom(pUpdated, size, pKey, TSDB_ORDER_DESC, compareResKey);
if (index >= 0 && 0 == compareResKey(pKey, pUpdated, index)) {
taosArrayRemove(pUpdated, index);
}
}
static void removeResults(SArray* pWins, SArray* pUpdated) {
int32_t size = taosArrayGetSize(pWins); int32_t size = taosArrayGetSize(pWins);
for (int32_t i = 0; i < size; i++) { for (int32_t i = 0; i < size; i++) {
SWinRes* pW = taosArrayGet(pWins, i); SWinRes* pW = taosArrayGet(pWins, i);
removeResult(pUpdated, pW); taosHashRemove(pUpdatedMap, pW, sizeof(SWinRes));
} }
} }
@ -894,11 +905,14 @@ int32_t compareWinRes(void* pKey, void* data, int32_t index) {
return -1; return -1;
} }
static void removeDeleteResults(SArray* pUpdated, SArray* pDelWins) { static void removeDeleteResults(SHashObj* pUpdatedMap, SArray* pDelWins) {
int32_t upSize = taosArrayGetSize(pUpdated); if (!pUpdatedMap || taosHashGetSize(pUpdatedMap) == 0) {
return;
}
int32_t delSize = taosArrayGetSize(pDelWins); int32_t delSize = taosArrayGetSize(pDelWins);
for (int32_t i = 0; i < upSize; i++) { void* pIte = NULL;
SResKeyPos* pResKey = taosArrayGetP(pUpdated, i); while ((pIte = taosHashIterate(pUpdatedMap, pIte)) != NULL) {
SResKeyPos* pResKey = (SResKeyPos*)pIte;
int32_t index = binarySearchCom(pDelWins, delSize, pResKey, TSDB_ORDER_DESC, compareWinRes); int32_t index = binarySearchCom(pDelWins, delSize, pResKey, TSDB_ORDER_DESC, compareWinRes);
if (index >= 0 && 0 == compareWinRes(pResKey, pDelWins, index)) { if (index >= 0 && 0 == compareWinRes(pResKey, pDelWins, index)) {
taosArrayRemove(pDelWins, index); taosArrayRemove(pDelWins, index);
@ -914,7 +928,7 @@ bool isOverdue(TSKEY ts, STimeWindowAggSupp* pSup) {
bool isCloseWindow(STimeWindow* pWin, STimeWindowAggSupp* pSup) { return isOverdue(pWin->ekey, pSup); } bool isCloseWindow(STimeWindow* pWin, STimeWindowAggSupp* pSup) { return isOverdue(pWin->ekey, pSup); }
static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResultRowInfo, SSDataBlock* pBlock, static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResultRowInfo, SSDataBlock* pBlock,
int32_t scanFlag, SArray* pUpdated) { int32_t scanFlag, SHashObj* pUpdatedMap) {
SIntervalAggOperatorInfo* pInfo = (SIntervalAggOperatorInfo*)pOperatorInfo->info; SIntervalAggOperatorInfo* pInfo = (SIntervalAggOperatorInfo*)pOperatorInfo->info;
SExecTaskInfo* pTaskInfo = pOperatorInfo->pTaskInfo; SExecTaskInfo* pTaskInfo = pOperatorInfo->pTaskInfo;
@ -940,7 +954,7 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul
} }
if (pInfo->execModel == OPTR_EXEC_MODEL_STREAM && pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE) { if (pInfo->execModel == OPTR_EXEC_MODEL_STREAM && pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE) {
saveResultRow(pResult, tableGroupId, pUpdated); saveWinResultRow(pResult, tableGroupId, pUpdatedMap);
setResultBufPageDirty(pInfo->aggSup.pResultBuf, &pResultRowInfo->cur); setResultBufPageDirty(pInfo->aggSup.pResultBuf, &pResultRowInfo->cur);
} }
} }
@ -997,7 +1011,7 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul
} }
if (pInfo->execModel == OPTR_EXEC_MODEL_STREAM && pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE) { if (pInfo->execModel == OPTR_EXEC_MODEL_STREAM && pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE) {
saveResultRow(pResult, tableGroupId, pUpdated); saveWinResultRow(pResult, tableGroupId, pUpdatedMap);
setResultBufPageDirty(pInfo->aggSup.pResultBuf, &pResultRowInfo->cur); setResultBufPageDirty(pInfo->aggSup.pResultBuf, &pResultRowInfo->cur);
} }
@ -1437,7 +1451,7 @@ static void doClearWindows(SAggSupporter* pAggSup, SExprSupp* pSup1, SInterval*
} }
} }
static int32_t getAllIntervalWindow(SHashObj* pHashMap, SArray* resWins) { static int32_t getAllIntervalWindow(SHashObj* pHashMap, SHashObj* resWins) {
void* pIte = NULL; void* pIte = NULL;
size_t keyLen = 0; size_t keyLen = 0;
while ((pIte = taosHashIterate(pHashMap, pIte)) != NULL) { while ((pIte = taosHashIterate(pHashMap, pIte)) != NULL) {
@ -1446,7 +1460,7 @@ static int32_t getAllIntervalWindow(SHashObj* pHashMap, SArray* resWins) {
ASSERT(keyLen == GET_RES_WINDOW_KEY_LEN(sizeof(TSKEY))); ASSERT(keyLen == GET_RES_WINDOW_KEY_LEN(sizeof(TSKEY)));
TSKEY ts = *(int64_t*)((char*)key + sizeof(uint64_t)); TSKEY ts = *(int64_t*)((char*)key + sizeof(uint64_t));
SResultRowPosition* pPos = (SResultRowPosition*)pIte; SResultRowPosition* pPos = (SResultRowPosition*)pIte;
int32_t code = saveResult(ts, pPos->pageId, pPos->offset, groupId, resWins); int32_t code = saveWinResult(ts, pPos->pageId, pPos->offset, groupId, resWins);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
return code; return code;
} }
@ -1455,7 +1469,7 @@ static int32_t getAllIntervalWindow(SHashObj* pHashMap, SArray* resWins) {
} }
static int32_t closeIntervalWindow(SHashObj* pHashMap, STimeWindowAggSupp* pSup, SInterval* pInterval, static int32_t closeIntervalWindow(SHashObj* pHashMap, STimeWindowAggSupp* pSup, SInterval* pInterval,
SHashObj* pPullDataMap, SArray* closeWins, SArray* pRecyPages, SHashObj* pPullDataMap, SHashObj* closeWins, SArray* pRecyPages,
SDiskbasedBuf* pDiscBuf) { SDiskbasedBuf* pDiscBuf) {
qDebug("===stream===close interval window"); qDebug("===stream===close interval window");
void* pIte = NULL; void* pIte = NULL;
@ -1487,7 +1501,7 @@ static int32_t closeIntervalWindow(SHashObj* pHashMap, STimeWindowAggSupp* pSup,
} }
SResultRowPosition* pPos = (SResultRowPosition*)pIte; SResultRowPosition* pPos = (SResultRowPosition*)pIte;
if (pSup->calTrigger == STREAM_TRIGGER_WINDOW_CLOSE) { if (pSup->calTrigger == STREAM_TRIGGER_WINDOW_CLOSE) {
int32_t code = saveResult(ts, pPos->pageId, pPos->offset, groupId, closeWins); int32_t code = saveWinResult(ts, pPos->pageId, pPos->offset, groupId, closeWins);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
return code; return code;
} }
@ -1577,11 +1591,14 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) {
SOperatorInfo* downstream = pOperator->pDownstream[0]; SOperatorInfo* downstream = pOperator->pDownstream[0];
SArray* pUpdated = taosArrayInit(4, POINTER_BYTES); // SResKeyPos SArray* pUpdated = taosArrayInit(4, POINTER_BYTES); // SResKeyPos
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_TIMESTAMP);
SHashObj* pUpdatedMap = taosHashInit(1024, hashFn, false, HASH_NO_LOCK);
while (1) { while (1) {
SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream); SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream);
if (pBlock == NULL) { if (pBlock == NULL) {
break; break;
} }
// qInfo("===stream===%ld", pBlock->info.version);
printDataBlock(pBlock, "single interval recv"); printDataBlock(pBlock, "single interval recv");
if (pBlock->info.type == STREAM_CLEAR) { if (pBlock->info.type == STREAM_CLEAR) {
@ -1594,7 +1611,7 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) {
doDeleteSpecifyIntervalWindow(&pInfo->aggSup, pBlock, pInfo->pDelWins, &pInfo->interval); doDeleteSpecifyIntervalWindow(&pInfo->aggSup, pBlock, pInfo->pDelWins, &pInfo->interval);
continue; continue;
} else if (pBlock->info.type == STREAM_GET_ALL) { } else if (pBlock->info.type == STREAM_GET_ALL) {
getAllIntervalWindow(pInfo->aggSup.pResultRowHashTable, pUpdated); getAllIntervalWindow(pInfo->aggSup.pResultRowHashTable, pUpdatedMap);
continue; continue;
} }
@ -1617,17 +1634,24 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) {
} }
pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, pBlock->info.window.ekey); pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, pBlock->info.window.ekey);
hashIntervalAgg(pOperator, &pInfo->binfo.resultRowInfo, pBlock, MAIN_SCAN, pUpdated); hashIntervalAgg(pOperator, &pInfo->binfo.resultRowInfo, pBlock, MAIN_SCAN, pUpdatedMap);
} }
pOperator->status = OP_RES_TO_RETURN; pOperator->status = OP_RES_TO_RETURN;
closeIntervalWindow(pInfo->aggSup.pResultRowHashTable, &pInfo->twAggSup, &pInfo->interval, NULL, pUpdated, closeIntervalWindow(pInfo->aggSup.pResultRowHashTable, &pInfo->twAggSup, &pInfo->interval, NULL, pUpdatedMap,
pInfo->pRecycledPages, pInfo->aggSup.pResultBuf); pInfo->pRecycledPages, pInfo->aggSup.pResultBuf);
void* pIte = NULL;
while ((pIte = taosHashIterate(pUpdatedMap, pIte)) != NULL) {
taosArrayPush(pUpdated, pIte);
}
taosHashCleanup(pUpdatedMap);
taosArraySort(pUpdated, resultrowComparAsc);
finalizeUpdatedResult(pOperator->exprSupp.numOfExprs, pInfo->aggSup.pResultBuf, pUpdated, pSup->rowEntryInfoOffset); finalizeUpdatedResult(pOperator->exprSupp.numOfExprs, pInfo->aggSup.pResultBuf, pUpdated, pSup->rowEntryInfoOffset);
initMultiResInfoFromArrayList(&pInfo->groupResInfo, pUpdated); initMultiResInfoFromArrayList(&pInfo->groupResInfo, pUpdated);
blockDataEnsureCapacity(pInfo->binfo.pRes, pOperator->resultInfo.capacity); blockDataEnsureCapacity(pInfo->binfo.pRes, pOperator->resultInfo.capacity);
removeDeleteResults(pUpdated, pInfo->pDelWins); removeDeleteResults(pUpdatedMap, pInfo->pDelWins);
doBuildDeleteResult(pInfo->pDelWins, &pInfo->delIndex, pInfo->pDelRes); doBuildDeleteResult(pInfo->pDelWins, &pInfo->delIndex, pInfo->pDelRes);
if (pInfo->pDelRes->info.rows > 0) { if (pInfo->pDelRes->info.rows > 0) {
return pInfo->pDelRes; return pInfo->pDelRes;
@ -2867,7 +2891,7 @@ STimeWindow getFinalTimeWindow(int64_t ts, SInterval* pInterval) {
} }
static void doHashInterval(SOperatorInfo* pOperatorInfo, SSDataBlock* pSDataBlock, uint64_t tableGroupId, static void doHashInterval(SOperatorInfo* pOperatorInfo, SSDataBlock* pSDataBlock, uint64_t tableGroupId,
SArray* pUpdated) { SHashObj* pUpdatedMap) {
SStreamFinalIntervalOperatorInfo* pInfo = (SStreamFinalIntervalOperatorInfo*)pOperatorInfo->info; SStreamFinalIntervalOperatorInfo* pInfo = (SStreamFinalIntervalOperatorInfo*)pOperatorInfo->info;
SResultRowInfo* pResultRowInfo = &(pInfo->binfo.resultRowInfo); SResultRowInfo* pResultRowInfo = &(pInfo->binfo.resultRowInfo);
SExecTaskInfo* pTaskInfo = pOperatorInfo->pTaskInfo; SExecTaskInfo* pTaskInfo = pOperatorInfo->pTaskInfo;
@ -2949,8 +2973,8 @@ static void doHashInterval(SOperatorInfo* pOperatorInfo, SSDataBlock* pSDataBloc
forwardRows = getNumOfRowsInTimeWindow(&pSDataBlock->info, tsCols, startPos, nextWin.ekey, binarySearchForKey, forwardRows = getNumOfRowsInTimeWindow(&pSDataBlock->info, tsCols, startPos, nextWin.ekey, binarySearchForKey,
NULL, TSDB_ORDER_ASC); NULL, TSDB_ORDER_ASC);
} }
if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE && pUpdated) { if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE && pUpdatedMap) {
saveResultRow(pResult, tableGroupId, pUpdated); saveWinResultRow(pResult, tableGroupId, pUpdatedMap);
setResultBufPageDirty(pInfo->aggSup.pResultBuf, &pResultRowInfo->cur); setResultBufPageDirty(pInfo->aggSup.pResultBuf, &pResultRowInfo->cur);
} }
updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &nextWin, true); updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &nextWin, true);
@ -3056,6 +3080,8 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
SStreamFinalIntervalOperatorInfo* pInfo = pOperator->info; SStreamFinalIntervalOperatorInfo* pInfo = pOperator->info;
SOperatorInfo* downstream = pOperator->pDownstream[0]; SOperatorInfo* downstream = pOperator->pDownstream[0];
SArray* pUpdated = taosArrayInit(4, POINTER_BYTES); SArray* pUpdated = taosArrayInit(4, POINTER_BYTES);
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_TIMESTAMP);
SHashObj* pUpdatedMap = taosHashInit(1024, hashFn, false, HASH_NO_LOCK);
TSKEY maxTs = INT64_MIN; TSKEY maxTs = INT64_MIN;
SExprSupp* pSup = &pOperator->exprSupp; SExprSupp* pSup = &pOperator->exprSupp;
@ -3113,7 +3139,7 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream); SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream);
if (pBlock == NULL) { if (pBlock == NULL) {
clearSpecialDataBlock(pInfo->pUpdateRes); clearSpecialDataBlock(pInfo->pUpdateRes);
removeDeleteResults(pUpdated, pInfo->pDelWins); removeDeleteResults(pUpdatedMap, pInfo->pDelWins);
pOperator->status = OP_RES_TO_RETURN; pOperator->status = OP_RES_TO_RETURN;
qDebug("%s return data", IS_FINAL_OP(pInfo) ? "interval final" : "interval semi"); qDebug("%s return data", IS_FINAL_OP(pInfo) ? "interval final" : "interval semi");
break; break;
@ -3140,7 +3166,7 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
taosArrayDestroy(pUpWins); taosArrayDestroy(pUpWins);
continue; continue;
} }
removeResults(pUpWins, pUpdated); removeResults(pUpWins, pUpdatedMap);
copyDataBlock(pInfo->pUpdateRes, pBlock); copyDataBlock(pInfo->pUpdateRes, pBlock);
// copyUpdateDataBlock(pInfo->pUpdateRes, pBlock, pInfo->primaryTsIndex); // copyUpdateDataBlock(pInfo->pUpdateRes, pBlock, pInfo->primaryTsIndex);
pInfo->returnUpdate = true; pInfo->returnUpdate = true;
@ -3158,15 +3184,15 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
pOperator->exprSupp.numOfExprs, pOperator->pTaskInfo, pUpdated); pOperator->exprSupp.numOfExprs, pOperator->pTaskInfo, pUpdated);
continue; continue;
} }
removeResults(pInfo->pDelWins, pUpdated); removeResults(pInfo->pDelWins, pUpdatedMap);
break; break;
} else if (pBlock->info.type == STREAM_GET_ALL && IS_FINAL_OP(pInfo)) { } else if (pBlock->info.type == STREAM_GET_ALL && IS_FINAL_OP(pInfo)) {
getAllIntervalWindow(pInfo->aggSup.pResultRowHashTable, pUpdated); getAllIntervalWindow(pInfo->aggSup.pResultRowHashTable, pUpdatedMap);
continue; continue;
} else if (pBlock->info.type == STREAM_RETRIEVE && !IS_FINAL_OP(pInfo)) { } else if (pBlock->info.type == STREAM_RETRIEVE && !IS_FINAL_OP(pInfo)) {
SArray* pUpWins = taosArrayInit(8, sizeof(SWinRes)); SArray* pUpWins = taosArrayInit(8, sizeof(SWinRes));
doClearWindows(&pInfo->aggSup, pSup, &pInfo->interval, pOperator->exprSupp.numOfExprs, pBlock, pUpWins); doClearWindows(&pInfo->aggSup, pSup, &pInfo->interval, pOperator->exprSupp.numOfExprs, pBlock, pUpWins);
removeResults(pUpWins, pUpdated); removeResults(pUpWins, pUpdatedMap);
taosArrayDestroy(pUpWins); taosArrayDestroy(pUpWins);
if (taosArrayGetSize(pUpdated) > 0) { if (taosArrayGetSize(pUpdated) > 0) {
break; break;
@ -3182,7 +3208,7 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
projectApplyFunctions(pExprSup->pExprInfo, pBlock, pBlock, pExprSup->pCtx, pExprSup->numOfExprs, NULL); projectApplyFunctions(pExprSup->pExprInfo, pBlock, pBlock, pExprSup->pCtx, pExprSup->numOfExprs, NULL);
} }
setInputDataBlock(pOperator, pSup->pCtx, pBlock, pInfo->order, MAIN_SCAN, true); setInputDataBlock(pOperator, pSup->pCtx, pBlock, pInfo->order, MAIN_SCAN, true);
doHashInterval(pOperator, pBlock, pBlock->info.groupId, pUpdated); doHashInterval(pOperator, pBlock, pBlock->info.groupId, pUpdatedMap);
if (IS_FINAL_OP(pInfo)) { if (IS_FINAL_OP(pInfo)) {
int32_t chIndex = getChildIndex(pBlock); int32_t chIndex = getChildIndex(pBlock);
int32_t size = taosArrayGetSize(pInfo->pChildren); int32_t size = taosArrayGetSize(pInfo->pChildren);
@ -3207,12 +3233,19 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, maxTs); pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, maxTs);
if (IS_FINAL_OP(pInfo)) { if (IS_FINAL_OP(pInfo)) {
closeIntervalWindow(pInfo->aggSup.pResultRowHashTable, &pInfo->twAggSup, &pInfo->interval, pInfo->pPullDataMap, closeIntervalWindow(pInfo->aggSup.pResultRowHashTable, &pInfo->twAggSup, &pInfo->interval, pInfo->pPullDataMap,
pUpdated, pInfo->pRecycledPages, pInfo->aggSup.pResultBuf); pUpdatedMap, pInfo->pRecycledPages, pInfo->aggSup.pResultBuf);
closeChildIntervalWindow(pInfo->pChildren, pInfo->twAggSup.maxTs); closeChildIntervalWindow(pInfo->pChildren, pInfo->twAggSup.maxTs);
} else { } else {
pInfo->binfo.pRes->info.watermark = pInfo->twAggSup.maxTs; pInfo->binfo.pRes->info.watermark = pInfo->twAggSup.maxTs;
} }
void* pIte = NULL;
while ((pIte = taosHashIterate(pUpdatedMap, pIte)) != NULL) {
taosArrayPush(pUpdated, pIte);
}
taosHashCleanup(pUpdatedMap);
taosArraySort(pUpdated, resultrowComparAsc);
finalizeUpdatedResult(pOperator->exprSupp.numOfExprs, pInfo->aggSup.pResultBuf, pUpdated, pSup->rowEntryInfoOffset); finalizeUpdatedResult(pOperator->exprSupp.numOfExprs, pInfo->aggSup.pResultBuf, pUpdated, pSup->rowEntryInfoOffset);
initMultiResInfoFromArrayList(&pInfo->groupResInfo, pUpdated); initMultiResInfoFromArrayList(&pInfo->groupResInfo, pUpdated);
blockDataEnsureCapacity(pInfo->binfo.pRes, pOperator->resultInfo.capacity); blockDataEnsureCapacity(pInfo->binfo.pRes, pOperator->resultInfo.capacity);
@ -3701,7 +3734,7 @@ SResultWindowInfo* getSessionTimeWindow(SStreamAggSupporter* pAggSup, TSKEY star
return insertNewSessionWindow(pWinInfos, startTs, index + 1); return insertNewSessionWindow(pWinInfos, startTs, index + 1);
} }
int32_t updateSessionWindowInfo(SResultWindowInfo* pWinInfo, TSKEY* pStartTs, TSKEY* pEndTs, int32_t rows, int32_t updateSessionWindowInfo(SResultWindowInfo* pWinInfo, TSKEY* pStartTs, TSKEY* pEndTs, uint64_t groupId,int32_t rows,
int32_t start, int64_t gap, SHashObj* pStDeleted) { int32_t start, int64_t gap, SHashObj* pStDeleted) {
for (int32_t i = start; i < rows; ++i) { for (int32_t i = start; i < rows; ++i) {
if (!isInWindow(pWinInfo, pStartTs[i], gap) && (!pEndTs || !isInWindow(pWinInfo, pEndTs[i], gap))) { if (!isInWindow(pWinInfo, pStartTs[i], gap) && (!pEndTs || !isInWindow(pWinInfo, pEndTs[i], gap))) {
@ -3709,7 +3742,8 @@ int32_t updateSessionWindowInfo(SResultWindowInfo* pWinInfo, TSKEY* pStartTs, TS
} }
if (pWinInfo->win.skey > pStartTs[i]) { if (pWinInfo->win.skey > pStartTs[i]) {
if (pStDeleted && pWinInfo->isOutput) { if (pStDeleted && pWinInfo->isOutput) {
taosHashPut(pStDeleted, &pWinInfo->pos, sizeof(SResultRowPosition), &pWinInfo->win.skey, sizeof(TSKEY)); SWinRes res = {.ts = pWinInfo->win.skey, .groupId = groupId};
taosHashPut(pStDeleted, &pWinInfo->pos, sizeof(SResultRowPosition), &res, sizeof(SWinRes));
pWinInfo->isOutput = false; pWinInfo->isOutput = false;
} }
pWinInfo->win.skey = pStartTs[i]; pWinInfo->win.skey = pStartTs[i];
@ -3828,7 +3862,8 @@ void compactTimeWindow(SStreamSessionAggOperatorInfo* pInfo, int32_t startIndex,
compactFunctions(pSup->pCtx, pInfo->pDummyCtx, numOfOutput, pTaskInfo); compactFunctions(pSup->pCtx, pInfo->pDummyCtx, numOfOutput, pTaskInfo);
taosHashRemove(pStUpdated, &pWinInfo->pos, sizeof(SResultRowPosition)); taosHashRemove(pStUpdated, &pWinInfo->pos, sizeof(SResultRowPosition));
if (pWinInfo->isOutput) { if (pWinInfo->isOutput) {
taosHashPut(pStDeleted, &pWinInfo->pos, sizeof(SResultRowPosition), &pWinInfo->win.skey, sizeof(TSKEY)); SWinRes res = {.ts = pWinInfo->win.skey, .groupId = groupId};
taosHashPut(pStDeleted, &pWinInfo->pos, sizeof(SResultRowPosition), &res, sizeof(SWinRes));
pWinInfo->isOutput = false; pWinInfo->isOutput = false;
} }
taosArrayRemove(pInfo->streamAggSup.pCurWins, i); taosArrayRemove(pInfo->streamAggSup.pCurWins, i);
@ -3878,7 +3913,7 @@ static void doStreamSessionAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSData
int32_t winIndex = 0; int32_t winIndex = 0;
SResultWindowInfo* pCurWin = getSessionTimeWindow(pAggSup, startTsCols[i], endTsCols[i], groupId, gap, &winIndex); SResultWindowInfo* pCurWin = getSessionTimeWindow(pAggSup, startTsCols[i], endTsCols[i], groupId, gap, &winIndex);
winRows = winRows =
updateSessionWindowInfo(pCurWin, startTsCols, endTsCols, pSDataBlock->info.rows, i, pInfo->gap, pStDeleted); updateSessionWindowInfo(pCurWin, startTsCols, endTsCols, groupId, pSDataBlock->info.rows, i, pInfo->gap, pStDeleted);
code = doOneWindowAgg(pInfo, pSDataBlock, pCurWin, &pResult, i, winRows, numOfOutput, pOperator); code = doOneWindowAgg(pInfo, pSDataBlock, pCurWin, &pResult, i, winRows, numOfOutput, pOperator);
if (code != TSDB_CODE_SUCCESS || pResult == NULL) { if (code != TSDB_CODE_SUCCESS || pResult == NULL) {
longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY); longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
@ -3927,6 +3962,7 @@ static void doDeleteTimeWindows(SStreamAggSupporter* pAggSup, SSDataBlock* pBloc
} }
deleteWindow(pAggSup->pCurWins, winIndex, fp); deleteWindow(pAggSup->pCurWins, winIndex, fp);
if (result) { if (result) {
pCurWin->groupId = gpDatas[i];
taosArrayPush(result, pCurWin); taosArrayPush(result, pCurWin);
} }
} }
@ -3947,7 +3983,7 @@ static void doClearSessionWindows(SStreamAggSupporter* pAggSup, SExprSupp* pSup,
step = 1; step = 1;
continue; continue;
} }
step = updateSessionWindowInfo(pCurWin, tsCols, NULL, pBlock->info.rows, i, gap, NULL); step = updateSessionWindowInfo(pCurWin, tsCols, NULL, 0, pBlock->info.rows, i, gap, NULL);
ASSERT(isInWindow(pCurWin, tsCols[i], gap)); ASSERT(isInWindow(pCurWin, tsCols[i], gap));
doClearWindowImpl(&pCurWin->pos, pAggSup->pResultBuf, pSup, numOfOutput); doClearWindowImpl(&pCurWin->pos, pAggSup->pResultBuf, pSup, numOfOutput);
if (result) { if (result) {
@ -3984,12 +4020,11 @@ void doBuildDeleteDataBlock(SHashObj* pStDeleted, SSDataBlock* pBlock, void** It
blockDataEnsureCapacity(pBlock, size); blockDataEnsureCapacity(pBlock, size);
size_t keyLen = 0; size_t keyLen = 0;
while (((*Ite) = taosHashIterate(pStDeleted, *Ite)) != NULL) { while (((*Ite) = taosHashIterate(pStDeleted, *Ite)) != NULL) {
SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, START_TS_COLUMN_INDEX); SWinRes* res = *Ite;
colDataAppend(pColInfoData, pBlock->info.rows, *Ite, false); SColumnInfoData* pTsCol = taosArrayGet(pBlock->pDataBlock, START_TS_COLUMN_INDEX);
for (int32_t i = 1; i < taosArrayGetSize(pBlock->pDataBlock); i++) { colDataAppend(pTsCol, pBlock->info.rows, (const char*)&res->ts, false);
pColInfoData = taosArrayGet(pBlock->pDataBlock, i); SColumnInfoData* pGpCol = taosArrayGet(pBlock->pDataBlock, GROUPID_COLUMN_INDEX);
colDataAppendNULL(pColInfoData, pBlock->info.rows); colDataAppend(pGpCol, pBlock->info.rows, (const char*)&res->groupId, false);
}
pBlock->info.rows += 1; pBlock->info.rows += 1;
if (pBlock->info.rows + 1 >= pBlock->info.capacity) { if (pBlock->info.rows + 1 >= pBlock->info.capacity) {
break; break;
@ -4116,7 +4151,8 @@ static void copyDeleteWindowInfo(SArray* pResWins, SHashObj* pStDeleted) {
int32_t size = taosArrayGetSize(pResWins); int32_t size = taosArrayGetSize(pResWins);
for (int32_t i = 0; i < size; i++) { for (int32_t i = 0; i < size; i++) {
SResultWindowInfo* pWinInfo = taosArrayGet(pResWins, i); SResultWindowInfo* pWinInfo = taosArrayGet(pResWins, i);
taosHashPut(pStDeleted, &pWinInfo->pos, sizeof(SResultRowPosition), &pWinInfo->win.skey, sizeof(TSKEY)); SWinRes res = {.ts = pWinInfo->win.skey, .groupId = pWinInfo->groupId};
taosHashPut(pStDeleted, &pWinInfo->pos, sizeof(SResultRowPosition), &res, sizeof(SWinRes));
} }
} }

View File

@ -498,8 +498,7 @@ int32_t functionFinalizeWithResultBuf(SqlFunctionCtx* pCtx, SSDataBlock* pBlock,
SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, slotId); SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, slotId);
SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx); SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx);
pResInfo->isNullRes = (pResInfo->numOfRes == 0) ? 1 : 0; pResInfo->isNullRes = (pResInfo->isNullRes == 1) ? 1 : (pResInfo->numOfRes == 0);;
cleanupResultRowEntry(pResInfo);
char* in = finalResult; char* in = finalResult;
colDataAppend(pCol, pBlock->info.rows, in, pResInfo->isNullRes); colDataAppend(pCol, pBlock->info.rows, in, pResInfo->isNullRes);

View File

@ -37,17 +37,6 @@ int32_t getNumOfResult(SqlFunctionCtx* pCtx, int32_t num, SSDataBlock* pResBlock
int32_t maxRows = 0; int32_t maxRows = 0;
for (int32_t j = 0; j < num; ++j) { for (int32_t j = 0; j < num; ++j) {
#if 0
int32_t id = pCtx[j].functionId;
/*
* ts, tag, tagprj function can not decide the output number of current query
* the number of output result is decided by main output
*/
if (id == FUNCTION_TS || id == FUNCTION_TAG || id == FUNCTION_TAGPRJ) {
continue;
}
#endif
SResultRowEntryInfo *pResInfo = GET_RES_INFO(&pCtx[j]); SResultRowEntryInfo *pResInfo = GET_RES_INFO(&pCtx[j]);
if (pResInfo != NULL && maxRows < pResInfo->numOfRes) { if (pResInfo != NULL && maxRows < pResInfo->numOfRes) {
maxRows = pResInfo->numOfRes; maxRows = pResInfo->numOfRes;

View File

@ -124,7 +124,7 @@ SUpdateInfo *updateInfoInit(int64_t interval, int32_t precision, int64_t waterma
} }
pInfo->numBuckets = DEFAULT_BUCKET_SIZE; pInfo->numBuckets = DEFAULT_BUCKET_SIZE;
pInfo->pCloseWinSBF = NULL; pInfo->pCloseWinSBF = NULL;
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_UBIGINT);
pInfo->pMap = taosHashInit(DEFAULT_MAP_CAPACITY, hashFn, true, HASH_NO_LOCK); pInfo->pMap = taosHashInit(DEFAULT_MAP_CAPACITY, hashFn, true, HASH_NO_LOCK);
pInfo->maxVersion = 0; pInfo->maxVersion = 0;
pInfo->scanGroupId = 0; pInfo->scanGroupId = 0;

View File

@ -237,8 +237,8 @@
./test.sh -f tsim/stream/distributeInterval0.sim ./test.sh -f tsim/stream/distributeInterval0.sim
./test.sh -f tsim/stream/distributeIntervalRetrive0.sim ./test.sh -f tsim/stream/distributeIntervalRetrive0.sim
./test.sh -f tsim/stream/distributeSession0.sim ./test.sh -f tsim/stream/distributeSession0.sim
./test.sh -f tsim/stream/session0.sim #./test.sh -f tsim/stream/session0.sim
./test.sh -f tsim/stream/session1.sim #./test.sh -f tsim/stream/session1.sim
./test.sh -f tsim/stream/state0.sim ./test.sh -f tsim/stream/state0.sim
./test.sh -f tsim/stream/triggerInterval0.sim ./test.sh -f tsim/stream/triggerInterval0.sim
./test.sh -f tsim/stream/triggerSession0.sim ./test.sh -f tsim/stream/triggerSession0.sim

View File

@ -30,7 +30,7 @@ class TDTestCase:
tdLog.debug("start to execute %s" % __file__) tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor()) tdSql.init(conn.cursor())
def diff_query_form(self, col="c1", alias="", table_expr="t1", condition=""): def diff_query_form(self, col="c1", alias="", table_expr="db.t1", condition=""):
''' '''
diff function: diff function:
@ -44,7 +44,7 @@ class TDTestCase:
return f"select diff({col}) {alias} from {table_expr} {condition}" return f"select diff({col}) {alias} from {table_expr} {condition}"
def checkdiff(self,col="c1", alias="", table_expr="t1", condition="" ): def checkdiff(self,col="c1", alias="", table_expr="db.t1", condition="" ):
line = sys._getframe().f_back.f_lineno line = sys._getframe().f_back.f_lineno
pre_sql = self.diff_query_form( pre_sql = self.diff_query_form(
col=col, table_expr=table_expr, condition=condition col=col, table_expr=table_expr, condition=condition
@ -60,7 +60,7 @@ class TDTestCase:
return return
if "order by tbname" in condition: if "order by tbname" in condition:
tdSql.error(self.diff_query_form( tdSql.query(self.diff_query_form(
col=col, alias=alias, table_expr=table_expr, condition=condition col=col, alias=alias, table_expr=table_expr, condition=condition
)) ))
return return
@ -164,9 +164,9 @@ class TDTestCase:
self.checkdiff(**case6) self.checkdiff(**case6)
# case7~8: nested query # case7~8: nested query
# case7 = {"table_expr": "(select c1 from stb1)"} # case7 = {"table_expr": "(select c1 from db.stb1)"}
# self.checkdiff(**case7) # self.checkdiff(**case7)
# case8 = {"table_expr": "(select diff(c1) c1 from stb1 group by tbname)"} # case8 = {"table_expr": "(select diff(c1) c1 from db.stb1 group by tbname)"}
# self.checkdiff(**case8) # self.checkdiff(**case8)
# case9~10: mix with tbname/ts/tag/col # case9~10: mix with tbname/ts/tag/col
@ -200,15 +200,15 @@ class TDTestCase:
self.checkdiff(**case17) self.checkdiff(**case17)
# case18~19: with group by # case18~19: with group by
# case18 = { # case18 = {
# "table_expr": "t1", # "table_expr": "db.t1",
# "condition": "group by c6" # "condition": "group by c6"
# } # }
# self.checkdiff(**case18) # self.checkdiff(**case18)
# case19 = { case19 = {
# "table_expr": "stb1", "table_expr": "db.stb1",
# "condition": "partition by tbname" # partition by tbname "condition": "partition by tbname order by tbname" # partition by tbname
# } }
# self.checkdiff(**case19) self.checkdiff(**case19)
# # case20~21: with order by # # case20~21: with order by
# case20 = {"condition": "order by ts"} # case20 = {"condition": "order by ts"}
@ -226,7 +226,7 @@ class TDTestCase:
} }
self.checkdiff(**case23) self.checkdiff(**case23)
# case24 = { # case24 = {
# "table_expr": "stb1", # "table_expr": "db.stb1",
# "condition": "group by tbname slimit 1 soffset 1" # "condition": "group by tbname slimit 1 soffset 1"
# } # }
# self.checkdiff(**case24) # self.checkdiff(**case24)
@ -241,13 +241,13 @@ class TDTestCase:
# #
# form test # form test
tdSql.error(self.diff_query_form(col="")) # no col tdSql.error(self.diff_query_form(col="")) # no col
tdSql.error("diff(c1) from stb1") # no select tdSql.error("diff(c1) from db.stb1") # no select
tdSql.error("select diff from t1") # no diff condition tdSql.error("select diff from db.t1") # no diff condition
tdSql.error("select diff c1 from t1") # no brackets tdSql.error("select diff c1 from db.t1") # no brackets
tdSql.error("select diff(c1) t1") # no from tdSql.error("select diff(c1) db.t1") # no from
tdSql.error("select diff( c1 ) from ") # no table_expr tdSql.error("select diff( c1 ) from ") # no table_expr
# tdSql.error(self.diff_query_form(col="st1")) # tag col # tdSql.error(self.diff_query_form(col="st1")) # tag col
tdSql.query("select diff(st1) from t1 ") tdSql.query("select diff(st1) from db.t1")
# tdSql.error(self.diff_query_form(col=1)) # col is a value # tdSql.error(self.diff_query_form(col=1)) # col is a value
tdSql.error(self.diff_query_form(col="'c1'")) # col is a string tdSql.error(self.diff_query_form(col="'c1'")) # col is a string
tdSql.error(self.diff_query_form(col=None)) # col is NULL 1 tdSql.error(self.diff_query_form(col=None)) # col is NULL 1
@ -258,18 +258,18 @@ class TDTestCase:
tdSql.error(self.diff_query_form(col='c.')) # col is spercial char 3 tdSql.error(self.diff_query_form(col='c.')) # col is spercial char 3
tdSql.error(self.diff_query_form(col='avg(c1)')) # expr col tdSql.error(self.diff_query_form(col='avg(c1)')) # expr col
# tdSql.error(self.diff_query_form(col='c6')) # bool col # tdSql.error(self.diff_query_form(col='c6')) # bool col
tdSql.query("select diff(c6) from t1") tdSql.query("select diff(c6) from db.t1")
tdSql.error(self.diff_query_form(col='c4')) # binary col tdSql.error(self.diff_query_form(col='c4')) # binary col
tdSql.error(self.diff_query_form(col='c10')) # nachr col tdSql.error(self.diff_query_form(col='c10')) # nachr col
tdSql.error(self.diff_query_form(col='c10')) # not table_expr col tdSql.error(self.diff_query_form(col='c10')) # not table_expr col
tdSql.error(self.diff_query_form(col='t1')) # tbname tdSql.error(self.diff_query_form(col='db.t1')) # tbname
tdSql.error(self.diff_query_form(col='stb1')) # stbname tdSql.error(self.diff_query_form(col='db.stb1')) # stbname
tdSql.error(self.diff_query_form(col='db')) # datbasename tdSql.error(self.diff_query_form(col='db')) # datbasename
# tdSql.error(self.diff_query_form(col=True)) # col is BOOL 1 # tdSql.error(self.diff_query_form(col=True)) # col is BOOL 1
# tdSql.error(self.diff_query_form(col='True')) # col is BOOL 2 # tdSql.error(self.diff_query_form(col='True')) # col is BOOL 2
tdSql.error(self.diff_query_form(col='*')) # col is all col tdSql.error(self.diff_query_form(col='*')) # col is all col
tdSql.error("select diff[c1] from t1") # sql form error 1 tdSql.error("select diff[c1] from db.t1") # sql form error 1
tdSql.error("select diff{c1} from t1") # sql form error 2 tdSql.error("select diff{c1} from db.t1") # sql form error 2
tdSql.error(self.diff_query_form(col="[c1]")) # sql form error 3 tdSql.error(self.diff_query_form(col="[c1]")) # sql form error 3
# tdSql.error(self.diff_query_form(col="c1, c2")) # sql form error 3 # tdSql.error(self.diff_query_form(col="c1, c2")) # sql form error 3
# tdSql.error(self.diff_query_form(col="c1, 2")) # sql form error 3 # tdSql.error(self.diff_query_form(col="c1, 2")) # sql form error 3
@ -282,7 +282,7 @@ class TDTestCase:
# tdSql.error(self.diff_query_form(alias=" + 2")) # mix with arithmetic 1 # tdSql.error(self.diff_query_form(alias=" + 2")) # mix with arithmetic 1
tdSql.error(self.diff_query_form(alias=" + avg(c1)")) # mix with arithmetic 2 tdSql.error(self.diff_query_form(alias=" + avg(c1)")) # mix with arithmetic 2
tdSql.query(self.diff_query_form(alias=", c2")) # mix with other 1 tdSql.query(self.diff_query_form(alias=", c2")) # mix with other 1
# tdSql.error(self.diff_query_form(table_expr="stb1")) # select stb directly # tdSql.error(self.diff_query_form(table_expr="db.stb1")) # select stb directly
stb_join = { stb_join = {
"col": "stb1.c1", "col": "stb1.c1",
"table_expr": "stb1, stb2", "table_expr": "stb1, stb2",
@ -294,17 +294,17 @@ class TDTestCase:
} }
tdSql.error(self.diff_query_form(**interval_sql)) # interval tdSql.error(self.diff_query_form(**interval_sql)) # interval
group_normal_col = { group_normal_col = {
"table_expr": "t1", "table_expr": "db.t1",
"condition": "group by c6" "condition": "group by c6"
} }
tdSql.error(self.diff_query_form(**group_normal_col)) # group by normal col tdSql.error(self.diff_query_form(**group_normal_col)) # group by normal col
slimit_soffset_sql = { slimit_soffset_sql = {
"table_expr": "stb1", "table_expr": "db.stb1",
"condition": "group by tbname slimit 1 soffset 1" "condition": "group by tbname slimit 1 soffset 1"
} }
# tdSql.error(self.diff_query_form(**slimit_soffset_sql)) # tdSql.error(self.diff_query_form(**slimit_soffset_sql))
order_by_tbname_sql = { order_by_tbname_sql = {
"table_expr": "stb1", "table_expr": "db.stb1",
"condition": "group by tbname order by tbname" "condition": "group by tbname order by tbname"
} }
tdSql.error(self.diff_query_form(**order_by_tbname_sql)) tdSql.error(self.diff_query_form(**order_by_tbname_sql))
@ -349,63 +349,40 @@ class TDTestCase:
"create stable db.stb2 (ts timestamp, c1 int) tags(st2 int)" "create stable db.stb2 (ts timestamp, c1 int) tags(st2 int)"
) )
for i in range(tbnum): for i in range(tbnum):
tdSql.execute(f"create table t{i} using stb1 tags({i})") tdSql.execute(f"create table t{i} using db.stb1 tags({i})")
tdSql.execute(f"create table tt{i} using stb2 tags({i})") tdSql.execute(f"create table tt{i} using db.stb2 tags({i})")
pass pass
def diff_support_stable(self): def diff_support_stable(self):
tdSql.query(" select diff(1) from stb1 ") tdSql.query(" select diff(1) from db.stb1 ")
tdSql.checkRows(229) tdSql.checkRows(229)
tdSql.checkData(0,0,0) tdSql.checkData(0,0,0)
tdSql.query("select diff(c1) from stb1 partition by tbname ") tdSql.query("select diff(c1) from db.stb1 partition by tbname ")
tdSql.checkRows(190)
# tdSql.query("select diff(st1) from stb1 partition by tbname")
# tdSql.checkRows(229)
tdSql.query("select diff(st1+c1) from stb1 partition by tbname")
tdSql.checkRows(190)
tdSql.query("select diff(st1+c1) from stb1 partition by tbname")
tdSql.checkRows(190)
tdSql.query("select diff(st1+c1) from stb1 partition by tbname")
tdSql.checkRows(190) tdSql.checkRows(190)
# # bug need fix tdSql.query("select diff(st1+c1) from db.stb1 partition by tbname")
# tdSql.query("select diff(st1+c1) from stb1 partition by tbname slimit 1 ") tdSql.checkRows(190)
# tdSql.checkRows(19) tdSql.query("select diff(st1+c1) from db.stb1 partition by tbname")
# tdSql.error("select diff(st1+c1) from stb1 partition by tbname limit 1 ") tdSql.checkRows(190)
tdSql.query("select diff(st1+c1) from db.stb1 partition by tbname")
# bug need fix
tdSql.query("select diff(st1+c1) from stb1 partition by tbname")
tdSql.checkRows(190) tdSql.checkRows(190)
# bug need fix # bug need fix
# tdSql.query("select tbname , diff(c1) from stb1 partition by tbname") tdSql.query("select diff(st1+c1) from db.stb1 partition by tbname")
# tdSql.checkRows(199) tdSql.checkRows(190)
# tdSql.query("select tbname , diff(st1) from stb1 partition by tbname")
# tdSql.checkRows(199) # bug need fix
# tdSql.query("select tbname , diff(st1) from stb1 partition by tbname slimit 1") tdSql.query("select tbname , diff(c1) from db.stb1 partition by tbname")
# tdSql.checkRows(19) tdSql.checkRows(190)
tdSql.query("select tbname , diff(st1) from db.stb1 partition by tbname")
tdSql.checkRows(220)
# partition by tags # partition by tags
# tdSql.query("select st1 , diff(c1) from stb1 partition by st1") tdSql.query("select st1 , diff(c1) from db.stb1 partition by st1")
# tdSql.checkRows(199) tdSql.checkRows(190)
# tdSql.query("select diff(c1) from stb1 partition by st1") tdSql.query("select diff(c1) from db.stb1 partition by st1")
# tdSql.checkRows(199) tdSql.checkRows(190)
# tdSql.query("select st1 , diff(c1) from stb1 partition by st1 slimit 1")
# tdSql.checkRows(19)
# tdSql.query("select diff(c1) from stb1 partition by st1 slimit 1")
# tdSql.checkRows(19)
# partition by col
# tdSql.query("select c1 , diff(c1) from stb1 partition by c1")
# tdSql.checkRows(199)
# tdSql.query("select diff(c1) from stb1 partition by c1")
# tdSql.checkRows(41)
# tdSql.query("select c1 , diff(c1) from stb1 partition by st1 slimit 1")
# tdSql.checkRows(19)
# tdSql.query("select diff(c1) from stb1 partition by st1 slimit 1")
# tdSql.checkRows(19)
def diff_test_run(self) : def diff_test_run(self) :
@ -428,18 +405,18 @@ class TDTestCase:
tdLog.printNoPrefix("######## insert data in the range near the max(bigint/double):") tdLog.printNoPrefix("######## insert data in the range near the max(bigint/double):")
self.diff_test_table(tbnum) self.diff_test_table(tbnum)
tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " tdSql.execute(f"insert into db.t1(ts, c1,c2,c5,c7) values "
f"({nowtime - (per_table_rows + 1) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})") f"({nowtime - (per_table_rows + 1) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})")
tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " tdSql.execute(f"insert into db.t1(ts, c1,c2,c5,c7) values "
f"({nowtime - (per_table_rows + 2) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})") f"({nowtime - (per_table_rows + 2) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})")
self.diff_current_query() self.diff_current_query()
self.diff_error_query() self.diff_error_query()
tdLog.printNoPrefix("######## insert data in the range near the min(bigint/double):") tdLog.printNoPrefix("######## insert data in the range near the min(bigint/double):")
self.diff_test_table(tbnum) self.diff_test_table(tbnum)
tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " tdSql.execute(f"insert into db.t1(ts, c1,c2,c5,c7) values "
f"({nowtime - (per_table_rows + 1) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {1-2**63})") f"({nowtime - (per_table_rows + 1) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {1-2**63})")
tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values " tdSql.execute(f"insert into db.t1(ts, c1,c2,c5,c7) values "
f"({nowtime - (per_table_rows + 2) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {512-2**63})") f"({nowtime - (per_table_rows + 2) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {512-2**63})")
self.diff_current_query() self.diff_current_query()
self.diff_error_query() self.diff_error_query()

View File

@ -0,0 +1,559 @@
import taos
import sys
from util.log import *
from util.sql import *
from util.cases import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
def run(self):
dbname = "db"
tbname = "tb"
tdSql.prepare()
tdLog.printNoPrefix("==========step1:create table")
tdSql.execute(
f'''create table if not exists {dbname}.{tbname}
(ts timestamp, c0 tinyint, c1 smallint, c2 int, c3 bigint, c4 double, c5 float, c6 bool, c7 varchar(10), c8 nchar(10))
'''
)
tdLog.printNoPrefix("==========step2:insert data")
tdSql.execute(f"insert into {dbname}.{tbname} values ('2020-02-01 00:00:05', 5, 5, 5, 5, 5.0, 5.0, true, 'varchar', 'nchar')")
tdSql.execute(f"insert into {dbname}.{tbname} values ('2020-02-01 00:00:10', 10, 10, 10, 10, 10.0, 10.0, true, 'varchar', 'nchar')")
tdSql.execute(f"insert into {dbname}.{tbname} values ('2020-02-01 00:00:15', 15, 15, 15, 15, 15.0, 15.0, true, 'varchar', 'nchar')")
tdLog.printNoPrefix("==========step3:fill null")
## {. . .}
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(null)")
tdSql.checkRows(13)
tdSql.checkData(0, 0, None)
tdSql.checkData(1, 0, 5)
tdSql.checkData(2, 0, None)
tdSql.checkData(3, 0, None)
tdSql.checkData(4, 0, None)
tdSql.checkData(5, 0, None)
tdSql.checkData(6, 0, 10)
tdSql.checkData(7, 0, None)
tdSql.checkData(8, 0, None)
tdSql.checkData(9, 0, None)
tdSql.checkData(10, 0, None)
tdSql.checkData(11, 0, 15)
tdSql.checkData(12, 0, None)
## {} ...
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:01', '2020-02-01 00:00:04') every(1s) fill(null)")
tdSql.checkRows(4)
tdSql.checkData(0, 0, None)
tdSql.checkData(1, 0, None)
tdSql.checkData(2, 0, None)
tdSql.checkData(3, 0, None)
## {.}..
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:03', '2020-02-01 00:00:07') every(1s) fill(null)")
tdSql.checkRows(5)
tdSql.checkData(0, 0, None)
tdSql.checkData(1, 0, None)
tdSql.checkData(2, 0, 5)
tdSql.checkData(3, 0, None)
tdSql.checkData(4, 0, None)
## .{}..
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:06', '2020-02-01 00:00:09') every(1s) fill(null)")
tdSql.checkRows(4)
tdSql.checkData(0, 0, None)
tdSql.checkData(1, 0, None)
tdSql.checkData(2, 0, None)
tdSql.checkData(3, 0, None)
## .{.}.
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:08', '2020-02-01 00:00:12') every(1s) fill(null)")
tdSql.checkRows(5)
tdSql.checkData(0, 0, None)
tdSql.checkData(1, 0, None)
tdSql.checkData(2, 0, 10)
tdSql.checkData(3, 0, None)
tdSql.checkData(4, 0, None)
## ..{.}
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:13', '2020-02-01 00:00:17') every(1s) fill(null)")
tdSql.checkRows(5)
tdSql.checkData(0, 0, None)
tdSql.checkData(1, 0, None)
tdSql.checkData(2, 0, 15)
tdSql.checkData(3, 0, None)
tdSql.checkData(4, 0, None)
## ... {}
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:16', '2020-02-01 00:00:19') every(1s) fill(null)")
tdSql.checkRows(4)
tdSql.checkData(0, 0, None)
tdSql.checkData(1, 0, None)
tdSql.checkData(2, 0, None)
tdSql.checkData(3, 0, None)
tdLog.printNoPrefix("==========step4:fill value")
## {. . .}
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(value, 1)")
tdSql.checkRows(13)
tdSql.checkData(0, 0, 1)
tdSql.checkData(1, 0, 5)
tdSql.checkData(2, 0, 1)
tdSql.checkData(3, 0, 1)
tdSql.checkData(4, 0, 1)
tdSql.checkData(5, 0, 1)
tdSql.checkData(6, 0, 10)
tdSql.checkData(7, 0, 1)
tdSql.checkData(8, 0, 1)
tdSql.checkData(9, 0, 1)
tdSql.checkData(10, 0, 1)
tdSql.checkData(11, 0, 15)
tdSql.checkData(12, 0, 1)
## {} ...
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:01', '2020-02-01 00:00:04') every(1s) fill(value, 1)")
tdSql.checkRows(4)
tdSql.checkData(0, 0, 1)
tdSql.checkData(1, 0, 1)
tdSql.checkData(2, 0, 1)
tdSql.checkData(3, 0, 1)
## {.}..
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:03', '2020-02-01 00:00:07') every(1s) fill(value, 1)")
tdSql.checkRows(5)
tdSql.checkData(0, 0, 1)
tdSql.checkData(1, 0, 1)
tdSql.checkData(2, 0, 5)
tdSql.checkData(3, 0, 1)
tdSql.checkData(4, 0, 1)
## .{}..
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:06', '2020-02-01 00:00:09') every(1s) fill(value, 1)")
tdSql.checkRows(4)
tdSql.checkData(0, 0, 1)
tdSql.checkData(1, 0, 1)
tdSql.checkData(2, 0, 1)
tdSql.checkData(3, 0, 1)
## .{.}.
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:08', '2020-02-01 00:00:12') every(1s) fill(value, 1)")
tdSql.checkRows(5)
tdSql.checkData(0, 0, 1)
tdSql.checkData(1, 0, 1)
tdSql.checkData(2, 0, 10)
tdSql.checkData(3, 0, 1)
tdSql.checkData(4, 0, 1)
## ..{.}
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:13', '2020-02-01 00:00:17') every(1s) fill(value, 1)")
tdSql.checkRows(5)
tdSql.checkData(0, 0, 1)
tdSql.checkData(1, 0, 1)
tdSql.checkData(2, 0, 15)
tdSql.checkData(3, 0, 1)
tdSql.checkData(4, 0, 1)
## ... {}
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:16', '2020-02-01 00:00:19') every(1s) fill(value, 1)")
tdSql.checkRows(4)
tdSql.checkData(0, 0, 1)
tdSql.checkData(1, 0, 1)
tdSql.checkData(2, 0, 1)
tdSql.checkData(3, 0, 1)
tdLog.printNoPrefix("==========step5:fill prev")
## {. . .}
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(prev)")
tdSql.checkRows(12)
tdSql.checkData(0, 0, 5)
tdSql.checkData(1, 0, 5)
tdSql.checkData(2, 0, 5)
tdSql.checkData(3, 0, 5)
tdSql.checkData(4, 0, 5)
tdSql.checkData(5, 0, 10)
tdSql.checkData(6, 0, 10)
tdSql.checkData(7, 0, 10)
tdSql.checkData(8, 0, 10)
tdSql.checkData(9, 0, 10)
tdSql.checkData(10, 0, 15)
tdSql.checkData(11, 0, 15)
## {} ...
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:01', '2020-02-01 00:00:04') every(1s) fill(prev)")
tdSql.checkRows(0)
## {.}..
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:03', '2020-02-01 00:00:07') every(1s) fill(prev)")
tdSql.checkRows(3)
tdSql.checkData(0, 0, 5)
tdSql.checkData(1, 0, 5)
tdSql.checkData(2, 0, 5)
## .{}..
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:06', '2020-02-01 00:00:09') every(1s) fill(prev)")
tdSql.checkRows(4)
tdSql.checkData(0, 0, 5)
tdSql.checkData(1, 0, 5)
tdSql.checkData(2, 0, 5)
tdSql.checkData(3, 0, 5)
## .{.}.
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:08', '2020-02-01 00:00:12') every(1s) fill(prev)")
tdSql.checkRows(5)
tdSql.checkData(0, 0, 5)
tdSql.checkData(1, 0, 5)
tdSql.checkData(2, 0, 10)
tdSql.checkData(3, 0, 10)
tdSql.checkData(4, 0, 10)
## ..{.}
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:13', '2020-02-01 00:00:17') every(1s) fill(prev)")
tdSql.checkRows(5)
tdSql.checkData(0, 0, 10)
tdSql.checkData(1, 0, 10)
tdSql.checkData(2, 0, 15)
tdSql.checkData(3, 0, 15)
tdSql.checkData(4, 0, 15)
## ... {}
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:16', '2020-02-01 00:00:19') every(1s) fill(prev)")
tdSql.checkRows(4)
tdSql.checkData(0, 0, 15)
tdSql.checkData(1, 0, 15)
tdSql.checkData(2, 0, 15)
tdSql.checkData(3, 0, 15)
tdLog.printNoPrefix("==========step6:fill next")
## {. . .}
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(next)")
tdSql.checkRows(12)
tdSql.checkData(0, 0, 5)
tdSql.checkData(1, 0, 5)
tdSql.checkData(2, 0, 10)
tdSql.checkData(3, 0, 10)
tdSql.checkData(4, 0, 10)
tdSql.checkData(5, 0, 10)
tdSql.checkData(6, 0, 10)
tdSql.checkData(7, 0, 15)
tdSql.checkData(8, 0, 15)
tdSql.checkData(9, 0, 15)
tdSql.checkData(10, 0, 15)
tdSql.checkData(11, 0, 15)
## {} ...
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:01', '2020-02-01 00:00:04') every(1s) fill(next)")
tdSql.checkRows(4)
tdSql.checkData(0, 0, 5)
tdSql.checkData(1, 0, 5)
tdSql.checkData(2, 0, 5)
tdSql.checkData(3, 0, 5)
## {.}..
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:03', '2020-02-01 00:00:07') every(1s) fill(next)")
tdSql.checkRows(5)
tdSql.checkData(0, 0, 5)
tdSql.checkData(1, 0, 5)
tdSql.checkData(2, 0, 5)
tdSql.checkData(3, 0, 10)
tdSql.checkData(4, 0, 10)
## .{}..
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:06', '2020-02-01 00:00:09') every(1s) fill(next)")
tdSql.checkRows(4)
tdSql.checkData(0, 0, 10)
tdSql.checkData(1, 0, 10)
tdSql.checkData(2, 0, 10)
tdSql.checkData(3, 0, 10)
## .{.}.
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:08', '2020-02-01 00:00:12') every(1s) fill(next)")
tdSql.checkRows(5)
tdSql.checkData(0, 0, 10)
tdSql.checkData(1, 0, 10)
tdSql.checkData(2, 0, 10)
tdSql.checkData(3, 0, 15)
tdSql.checkData(4, 0, 15)
## ..{.}
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:13', '2020-02-01 00:00:17') every(1s) fill(next)")
tdSql.checkRows(3)
tdSql.checkData(0, 0, 15)
tdSql.checkData(1, 0, 15)
tdSql.checkData(2, 0, 15)
## ... {}
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:16', '2020-02-01 00:00:19') every(1s) fill(next)")
tdSql.checkRows(0)
tdLog.printNoPrefix("==========step7:fill linear")
## {. . .}
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(linear)")
tdSql.checkRows(11)
tdSql.checkData(0, 0, 5)
tdSql.checkData(1, 0, 6)
tdSql.checkData(2, 0, 7)
tdSql.checkData(3, 0, 8)
tdSql.checkData(4, 0, 9)
tdSql.checkData(5, 0, 10)
tdSql.checkData(6, 0, 11)
tdSql.checkData(7, 0, 12)
tdSql.checkData(8, 0, 13)
tdSql.checkData(9, 0, 14)
tdSql.checkData(10, 0, 15)
## {} ...
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:01', '2020-02-01 00:00:04') every(1s) fill(linear)")
tdSql.checkRows(0)
## {.}..
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:03', '2020-02-01 00:00:07') every(1s) fill(linear)")
tdSql.checkRows(3)
tdSql.checkData(0, 0, 5)
tdSql.checkData(1, 0, 6)
tdSql.checkData(2, 0, 7)
## .{}..
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:06', '2020-02-01 00:00:09') every(1s) fill(linear)")
tdSql.checkRows(4)
tdSql.checkData(0, 0, 6)
tdSql.checkData(1, 0, 7)
tdSql.checkData(2, 0, 8)
tdSql.checkData(3, 0, 9)
## .{.}.
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:08', '2020-02-01 00:00:12') every(1s) fill(linear)")
tdSql.checkRows(5)
tdSql.checkData(0, 0, 8)
tdSql.checkData(1, 0, 9)
tdSql.checkData(2, 0, 10)
tdSql.checkData(3, 0, 11)
tdSql.checkData(4, 0, 12)
## ..{.}
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:13', '2020-02-01 00:00:17') every(1s) fill(linear)")
tdSql.checkRows(3)
tdSql.checkData(0, 0, 13)
tdSql.checkData(1, 0, 14)
tdSql.checkData(2, 0, 15)
## ... {}
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:16', '2020-02-01 00:00:19') every(1s) fill(linear)")
tdSql.checkRows(0)
tdLog.printNoPrefix("==========step8:test intra block interpolation")
tdSql.execute(f"drop database {dbname}");
tdSql.prepare()
tdSql.execute(f"create table if not exists {dbname}.{tbname} (ts timestamp, c0 tinyint, c1 smallint, c2 int, c3 bigint, c4 double, c5 float, c6 bool, c7 varchar(10), c8 nchar(10))")
# set two data point has 10 days interval will be stored in different datablocks
tdSql.execute(f"insert into {dbname}.{tbname} values ('2020-02-01 00:00:05', 5, 5, 5, 5, 5.0, 5.0, true, 'varchar', 'nchar')")
tdSql.execute(f"insert into {dbname}.{tbname} values ('2020-02-11 00:00:05', 15, 15, 15, 15, 15.0, 15.0, true, 'varchar', 'nchar')")
tdSql.execute(f"flush database {dbname}");
# test fill null
## | {. | | .} |
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:05', '2020-02-11 00:00:05') every(1d) fill(null)")
tdSql.checkRows(11)
tdSql.checkData(0, 0, 5)
tdSql.checkData(1, 0, None)
tdSql.checkData(2, 0, None)
tdSql.checkData(3, 0, None)
tdSql.checkData(4, 0, None)
tdSql.checkData(5, 0, None)
tdSql.checkData(6, 0, None)
tdSql.checkData(7, 0, None)
tdSql.checkData(8, 0, None)
tdSql.checkData(9, 0, None)
tdSql.checkData(10, 0, 15)
## | . | {} | . |
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-03 00:00:05', '2020-02-07 00:00:05') every(1d) fill(null)")
tdSql.checkRows(5)
tdSql.checkData(0, 0, None)
tdSql.checkData(1, 0, None)
tdSql.checkData(2, 0, None)
tdSql.checkData(3, 0, None)
tdSql.checkData(4, 0, None)
## | {. | } | . |
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-01-31 00:00:05', '2020-02-05 00:00:05') every(1d) fill(null)")
tdSql.checkRows(6)
tdSql.checkData(0, 0, None)
tdSql.checkData(1, 0, 5)
tdSql.checkData(2, 0, None)
tdSql.checkData(3, 0, None)
tdSql.checkData(4, 0, None)
tdSql.checkData(5, 0, None)
## | . | { | .} |
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d) fill(null)")
tdSql.checkRows(6)
tdSql.checkData(0, 0, None)
tdSql.checkData(1, 0, 15)
tdSql.checkData(2, 0, None)
tdSql.checkData(3, 0, None)
tdSql.checkData(4, 0, None)
tdSql.checkData(5, 0, None)
# test fill value
## | {. | | .} |
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:05', '2020-02-11 00:00:05') every(1d) fill(value, 1)")
tdSql.checkRows(11)
tdSql.checkData(0, 0, 5)
tdSql.checkData(1, 0, 1)
tdSql.checkData(2, 0, 1)
tdSql.checkData(3, 0, 1)
tdSql.checkData(4, 0, 1)
tdSql.checkData(5, 0, 1)
tdSql.checkData(6, 0, 1)
tdSql.checkData(7, 0, 1)
tdSql.checkData(8, 0, 1)
tdSql.checkData(9, 0, 1)
tdSql.checkData(10, 0, 15)
## | . | {} | . |
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-03 00:00:05', '2020-02-07 00:00:05') every(1d) fill(value, 1)")
tdSql.checkRows(5)
tdSql.checkData(0, 0, 1)
tdSql.checkData(1, 0, 1)
tdSql.checkData(2, 0, 1)
tdSql.checkData(3, 0, 1)
tdSql.checkData(4, 0, 1)
## | {. | } | . |
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-01-31 00:00:05', '2020-02-05 00:00:05') every(1d) fill(value, 1)")
tdSql.checkRows(6)
tdSql.checkData(0, 0, 1)
tdSql.checkData(1, 0, 5)
tdSql.checkData(2, 0, 1)
tdSql.checkData(3, 0, 1)
tdSql.checkData(4, 0, 1)
tdSql.checkData(5, 0, 1)
## | . | { | .} |
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d) fill(value, 1)")
tdSql.checkRows(6)
tdSql.checkData(0, 0, 1)
tdSql.checkData(1, 0, 15)
tdSql.checkData(2, 0, 1)
tdSql.checkData(3, 0, 1)
tdSql.checkData(4, 0, 1)
tdSql.checkData(5, 0, 1)
# test fill prev
## | {. | | .} |
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:05', '2020-02-11 00:00:05') every(1d) fill(prev)")
tdSql.checkRows(11)
tdSql.checkData(0, 0, 5)
tdSql.checkData(1, 0, 5)
tdSql.checkData(2, 0, 5)
tdSql.checkData(3, 0, 5)
tdSql.checkData(4, 0, 5)
tdSql.checkData(5, 0, 5)
tdSql.checkData(6, 0, 5)
tdSql.checkData(7, 0, 5)
tdSql.checkData(8, 0, 5)
tdSql.checkData(9, 0, 5)
tdSql.checkData(10, 0, 15)
## | . | {} | . |
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-03 00:00:05', '2020-02-07 00:00:05') every(1d) fill(prev)")
tdSql.checkRows(5)
tdSql.checkData(0, 0, 5)
tdSql.checkData(1, 0, 5)
tdSql.checkData(2, 0, 5)
tdSql.checkData(3, 0, 5)
tdSql.checkData(4, 0, 5)
## | {. | } | . |
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-01-31 00:00:05', '2020-02-05 00:00:05') every(1d) fill(prev)")
tdSql.checkRows(5)
tdSql.checkData(0, 0, 5)
tdSql.checkData(1, 0, 5)
tdSql.checkData(2, 0, 5)
tdSql.checkData(3, 0, 5)
tdSql.checkData(4, 0, 5)
## | . | { | .} |
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d) fill(prev)")
tdSql.checkRows(6)
tdSql.checkData(0, 0, 5)
tdSql.checkData(1, 0, 15)
tdSql.checkData(2, 0, 15)
tdSql.checkData(3, 0, 15)
tdSql.checkData(4, 0, 15)
tdSql.checkData(5, 0, 15)
# test fill next
## | {. | | .} |
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:05', '2020-02-11 00:00:05') every(1d) fill(next)")
tdSql.checkRows(11)
tdSql.checkData(0, 0, 5)
tdSql.checkData(1, 0, 15)
tdSql.checkData(2, 0, 15)
tdSql.checkData(3, 0, 15)
tdSql.checkData(4, 0, 15)
tdSql.checkData(5, 0, 15)
tdSql.checkData(6, 0, 15)
tdSql.checkData(7, 0, 15)
tdSql.checkData(8, 0, 15)
tdSql.checkData(9, 0, 15)
tdSql.checkData(10, 0, 15)
## | . | {} | . |
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-03 00:00:05', '2020-02-07 00:00:05') every(1d) fill(next)")
tdSql.checkRows(5)
tdSql.checkData(0, 0, 15)
tdSql.checkData(1, 0, 15)
tdSql.checkData(2, 0, 15)
tdSql.checkData(3, 0, 15)
tdSql.checkData(4, 0, 15)
## | {. | } | . |
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-01-31 00:00:05', '2020-02-05 00:00:05') every(1d) fill(next)")
tdSql.checkRows(6)
tdSql.checkData(0, 0, 5)
tdSql.checkData(1, 0, 5)
tdSql.checkData(2, 0, 15)
tdSql.checkData(3, 0, 15)
tdSql.checkData(4, 0, 15)
tdSql.checkData(5, 0, 15)
## | . | { | .} |
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d) fill(next)")
tdSql.checkRows(2)
tdSql.checkData(0, 0, 15)
tdSql.checkData(1, 0, 15)
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -270,7 +270,7 @@ class TDTestCase:
if not vote_act: if not vote_act:
print("=======before_revote_leader_infos ======\n" , before_leader_infos) print("=======before_revote_leader_infos ======\n" , before_leader_infos)
print("=======after_revote_leader_infos ======\n" , after_leader_infos) print("=======after_revote_leader_infos ======\n" , after_leader_infos)
tdLog.exit(" ===maybe revote not occured , there is no dnode offline ====") tdLog.info(" ===maybe revote not occured , there is no dnode offline ====")
else: else:
for vgroup_info in vote_act: for vgroup_info in vote_act:
for ind , role in enumerate(vgroup_info): for ind , role in enumerate(vgroup_info):

View File

@ -96,6 +96,8 @@ python3 ./test.py -f 2-query/distribute_agg_stddev.py
python3 ./test.py -f 2-query/distribute_agg_stddev.py -R python3 ./test.py -f 2-query/distribute_agg_stddev.py -R
python3 ./test.py -f 2-query/distribute_agg_sum.py python3 ./test.py -f 2-query/distribute_agg_sum.py
python3 ./test.py -f 2-query/distribute_agg_sum.py -R python3 ./test.py -f 2-query/distribute_agg_sum.py -R
python3 ./test.py -f 2-query/interp.py
python3 ./test.py -f 2-query/interp.py -R
@ -213,7 +215,7 @@ python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_query
# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync_force_stop.py -N 4 -M 1 # python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync_force_stop.py -N 4 -M 1
# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync.py -N 4 -M 1 # python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync.py -N 4 -M 1
# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader_forece_stop.py -N 4 -M 1 # python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader_forece_stop.py -N 4 -M 1
# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader.py -N 4 -M 1 python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader.py -N 4 -M 1
# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_mnode3_insertdatas_querys.py -N 4 -M 1 # python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_mnode3_insertdatas_querys.py -N 4 -M 1
# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower_force_stop.py -N 4 -M 1 # python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower_force_stop.py -N 4 -M 1
# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower.py -N 4 -M 1 # python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower.py -N 4 -M 1
@ -338,6 +340,7 @@ python3 ./test.py -f 2-query/arcsin.py -Q 2
python3 ./test.py -f 2-query/arccos.py -Q 2 python3 ./test.py -f 2-query/arccos.py -Q 2
python3 ./test.py -f 2-query/arctan.py -Q 2 python3 ./test.py -f 2-query/arctan.py -Q 2
python3 ./test.py -f 2-query/query_cols_tags_and_or.py -Q 2 python3 ./test.py -f 2-query/query_cols_tags_and_or.py -Q 2
python3 ./test.py -f 2-query/interp.py -Q 2
# python3 ./test.py -f 2-query/nestedQuery.py -Q 2 # python3 ./test.py -f 2-query/nestedQuery.py -Q 2
# python3 ./test.py -f 2-query/nestedQuery_str.py -Q 2 # python3 ./test.py -f 2-query/nestedQuery_str.py -Q 2
@ -457,3 +460,4 @@ python3 ./test.py -f 2-query/max_partition.py -Q 3
python3 ./test.py -f 2-query/last_row.py -Q 3 python3 ./test.py -f 2-query/last_row.py -Q 3
python3 ./test.py -f 2-query/tsbsQuery.py -Q 3 python3 ./test.py -f 2-query/tsbsQuery.py -Q 3
python3 ./test.py -f 2-query/sml.py -Q 3 python3 ./test.py -f 2-query/sml.py -Q 3
python3 ./test.py -f 2-query/interp.py -Q 3

View File

@ -280,7 +280,7 @@ int32_t shellParseArgsWithoutArgp(int argc, char *argv[]) {
static void shellInitArgs(int argc, char *argv[]) { static void shellInitArgs(int argc, char *argv[]) {
for (int i = 1; i < argc; i++) { for (int i = 1; i < argc; i++) {
if (strncmp(argv[i], "-p", 2) == 0) { if (strncmp(argv[i], "-p", 2) == 0) {
// printf(shell.info.clientVersion, tsOsName, taos_get_client_info()); // printf(shell.info.clientVersion, taos_get_client_info());
if (strlen(argv[i]) == 2) { if (strlen(argv[i]) == 2) {
printf("Enter password: "); printf("Enter password: ");
taosSetConsoleEcho(false); taosSetConsoleEcho(false);
@ -389,8 +389,8 @@ static int32_t shellCheckArgs() {
int32_t shellParseArgs(int32_t argc, char *argv[]) { int32_t shellParseArgs(int32_t argc, char *argv[]) {
shellInitArgs(argc, argv); shellInitArgs(argc, argv);
shell.info.clientVersion = shell.info.clientVersion =
"Welcome to the TDengine shell from %s, Client Version:%s\r\n" "Welcome to the TDengine Command Line Interface, Client Version:%s\r\n"
"Copyright (c) 2022 by TAOS Data, Inc. All rights reserved.\r\n\r\n"; "Copyright (c) 2022 by TDengine, all rights reserved.\r\n\r\n";
shell.info.promptHeader = TAOS_CONSOLE_PROMPT_HEADER; shell.info.promptHeader = TAOS_CONSOLE_PROMPT_HEADER;
shell.info.promptContinue = TAOS_CONSOLE_PROMPT_CONTINUE; shell.info.promptContinue = TAOS_CONSOLE_PROMPT_CONTINUE;
shell.info.promptSize = 6; shell.info.promptSize = 6;

View File

@ -1001,7 +1001,7 @@ void *shellThreadLoop(void *arg) {
} }
int32_t shellExecute() { int32_t shellExecute() {
printf(shell.info.clientVersion, shell.info.osname, taos_get_client_info()); printf(shell.info.clientVersion, taos_get_client_info());
fflush(stdout); fflush(stdout);
SShellArgs *pArgs = &shell.args; SShellArgs *pArgs = &shell.args;