Merge branch 'main' of https://github.com/taosdata/TDengine into test/main/TD-26192
This commit is contained in:
commit
5d1eac3c2e
|
@ -0,0 +1,5 @@
|
||||||
|
# Security Policy
|
||||||
|
|
||||||
|
## Reporting a Vulnerability
|
||||||
|
|
||||||
|
Please submit CVE to https://github.com/taosdata/TDengine/security/advisories.
|
|
@ -1,5 +1,5 @@
|
||||||
cmake_minimum_required(VERSION 3.0)
|
cmake_minimum_required(VERSION 3.0)
|
||||||
set(CMAKE_VERBOSE_MAKEFILE ON)
|
set(CMAKE_VERBOSE_MAKEFILE FALSE)
|
||||||
set(TD_BUILD_TAOSA_INTERNAL FALSE)
|
set(TD_BUILD_TAOSA_INTERNAL FALSE)
|
||||||
|
|
||||||
#set output directory
|
#set output directory
|
||||||
|
|
|
@ -4,11 +4,11 @@ description: This document introduces the major features, competitive advantages
|
||||||
toc_max_heading_level: 2
|
toc_max_heading_level: 2
|
||||||
---
|
---
|
||||||
|
|
||||||
TDengine is an [open source](https://tdengine.com/tdengine/open-source-time-series-database/), [high-performance](https://tdengine.com/tdengine/high-performance-time-series-database/), [cloud native](https://tdengine.com/tdengine/cloud-native-time-series-database/) [time-series database](https://tdengine.com/tsdb/) optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. Its code, including its cluster feature is open source under GNU AGPL v3.0. Besides the database engine, it provides [caching](../develop/cache), [stream processing](../develop/stream), [data subscription](../develop/tmq) and other functionalities to reduce the system complexity and cost of development and operation.
|
TDengine is a big data platform designed and optimized for IoT (Internet of Things) and Industrial Internet. It can safely and effetively converge, store, process and distribute high volume data (TB or even PB) generated everyday by a lot of devices and data acquisition units, monitor and alert business operation status in real time and provide real time business insight. The core component of TDengine is TDengine OSS, which is a high performance, open source, cloud native and simplified time series database.
|
||||||
|
|
||||||
This section introduces the major features, competitive advantages, typical use-cases and benchmarks to help you get a high level overview of TDengine.
|
This section introduces the major features, competitive advantages, typical use-cases and benchmarks to help you get a high level overview of TDengine.
|
||||||
|
|
||||||
## Major Features
|
## Major Features of TDengine OSS
|
||||||
|
|
||||||
The major features are listed below:
|
The major features are listed below:
|
||||||
|
|
||||||
|
@ -132,3 +132,9 @@ As a high-performance, scalable and SQL supported time-series database, TDengine
|
||||||
- [Introduction to Time-Series Database](https://tdengine.com/tsdb/)
|
- [Introduction to Time-Series Database](https://tdengine.com/tsdb/)
|
||||||
- [Introduction to TDengine competitive advantages](https://tdengine.com/tdengine/)
|
- [Introduction to TDengine competitive advantages](https://tdengine.com/tdengine/)
|
||||||
|
|
||||||
|
|
||||||
|
## Products
|
||||||
|
|
||||||
|
There are two products offered by TDengine: TDengine Enterprise and TDengine Cloud, for details please refer to
|
||||||
|
- [TDengine Enterprise](https://www.taosdata.com/tdengine-pro)
|
||||||
|
- [TDengine Cloud](https://cloud.taosdata.com/?utm_source=menu&utm_medium=webcn)
|
||||||
|
|
|
@ -74,27 +74,27 @@ Provides information about the cluster.
|
||||||
|
|
||||||
Provides information about user-created databases. Similar to SHOW DATABASES.
|
Provides information about user-created databases. Similar to SHOW DATABASES.
|
||||||
|
|
||||||
| # | **Column** | **Data Type** | **Description** |
|
| # | **Column** | **Data Type** | **Description** |
|
||||||
| --- | :------------------: | ---------------- | ------------------------------------------------ |
|
| --- | :------------------: | ---------------- | ------------------------------------------------ |
|
||||||
| 1| name| BINARY(32)| Database name |
|
| 1 | name | VARCHAR(64) | Database name |
|
||||||
| 2 | create_time | TIMESTAMP | Creation time |
|
| 2 | create_time | TIMESTAMP | Creation time |
|
||||||
| 3 | ntables | INT | Number of standard tables and subtables (not including supertables) |
|
| 3 | ntables | INT | Number of standard tables and subtables (not including supertables) |
|
||||||
| 4 | vgroups | INT | Number of vgroups. It should be noted that `vnodes` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
| 4 | vgroups | INT | Number of vgroups. It should be noted that `vnodes` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 6 | replica | INT | Number of replicas. It should be noted that `replica` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
| 6 | replica | INT | Number of replicas. It should be noted that `replica` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 7 | strict | BINARY(4) | Obsoleted |
|
| 7 | strict | VARCHAR(4) | Obsoleted |
|
||||||
| 8 | duration | INT | Duration for storage of single files. It should be noted that `duration` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
| 8 | duration | VARCHAR(10) | Duration for storage of single files. It should be noted that `duration` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 9 | keep | INT | Data retention period. It should be noted that `keep` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
| 9 | keep | VARCHAR(32) | Data retention period. It should be noted that `keep` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 10 | buffer | INT | Write cache size per vnode, in MB. It should be noted that `buffer` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
| 10 | buffer | INT | Write cache size per vnode, in MB. It should be noted that `buffer` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 11 | pagesize | INT | Page size for vnode metadata storage engine, in KB. It should be noted that `pagesize` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
| 11 | pagesize | INT | Page size for vnode metadata storage engine, in KB. It should be noted that `pagesize` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 12 | pages | INT | Number of pages per vnode metadata storage engine. It should be noted that `pages` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
| 12 | pages | INT | Number of pages per vnode metadata storage engine. It should be noted that `pages` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 13 | minrows | INT | Maximum number of records per file block. It should be noted that `minrows` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
| 13 | minrows | INT | Maximum number of records per file block. It should be noted that `minrows` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 14 | maxrows | INT | Minimum number of records per file block. It should be noted that `maxrows` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
| 14 | maxrows | INT | Minimum number of records per file block. It should be noted that `maxrows` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 15 | comp | INT | Compression method. It should be noted that `comp` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
| 15 | comp | INT | Compression method. It should be noted that `comp` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 16 | precision | BINARY(2) | Time precision. It should be noted that `precision` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
| 16 | precision | VARCHAR(2) | Time precision. It should be noted that `precision` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 17 | status | BINARY(10) | Current database status |
|
| 17 | status | VARCHAR(10) | Current database status |
|
||||||
| 18 | retentions | BINARY (60) | Aggregation interval and retention period. It should be noted that `retentions` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
| 18 | retentions | VARCHAR(60) | Aggregation interval and retention period. It should be noted that `retentions` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 19 | single_stable | BOOL | Whether the database can contain multiple supertables. It should be noted that `single_stable` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
| 19 | single_stable | BOOL | Whether the database can contain multiple supertables. It should be noted that `single_stable` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 20 | cachemodel | BINARY(60) | Caching method for the newest data. It should be noted that `cachemodel` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
| 20 | cachemodel | VARCHAR(60) | Caching method for the newest data. It should be noted that `cachemodel` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 21 | cachesize | INT | Memory per vnode used for caching the newest data. It should be noted that `cachesize` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
| 21 | cachesize | INT | Memory per vnode used for caching the newest data. It should be noted that `cachesize` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 22 | wal_level | INT | WAL level. It should be noted that `wal_level` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
| 22 | wal_level | INT | WAL level. It should be noted that `wal_level` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
| 23 | wal_fsync_period | INT | Interval at which WAL is written to disk. It should be noted that `wal_fsync_period` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
| 23 | wal_fsync_period | INT | Interval at which WAL is written to disk. It should be noted that `wal_fsync_period` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||||
|
|
|
@ -4,20 +4,14 @@ description: 简要介绍 TDengine 的主要功能
|
||||||
toc_max_heading_level: 2
|
toc_max_heading_level: 2
|
||||||
---
|
---
|
||||||
|
|
||||||
TDengine 是一款开源、高性能、云原生的[时序数据库](https://tdengine.com/tsdb/),且针对物联网、车联网、工业互联网、金融、IT 运维等场景进行了优化。TDengine 的代码,包括集群功能,都在 GNU AGPL v3.0 下开源。除核心的时序数据库功能外,TDengine 还提供[缓存](../develop/cache/)、[数据订阅](../develop/tmq)、[流式计算](../develop/stream)等其它功能以降低系统复杂度及研发和运维成本。
|
TDengine 是一款专为物联网、工业互联网等场景设计并优化的大数据平台,它能安全高效地将大量设备、数据采集器每天产生的高达 TB 甚至 PB 级的数据进行汇聚、存储、分析和分发,对业务运行状态进行实时监测、预警,提供实时的商业洞察。其核心模块是高性能、集群开源、云原生、极简的时序数据库 TDengine OSS。
|
||||||
|
|
||||||
本章节介绍 TDengine 的主要产品和功能、竞争优势、适用场景、与其他数据库的对比测试等等,让大家对 TDengine 有个整体的了解。
|
|
||||||
|
|
||||||
## 主要产品
|
本节介绍 TDengine OSS 的主要产品和功能、竞争优势、适用场景、与其他数据库的对比测试等等,让大家对 TDengine OSS 有个整体了解
|
||||||
|
|
||||||
TDengine 有三个主要产品:TDengine Enterprise (即 TDengine 企业版),TDengine Cloud,和 TDengine OSS,关于它们的具体定义请参考
|
|
||||||
- [TDengine 企业版](https://www.taosdata.com/tdengine-pro)
|
|
||||||
- [TDengine 云服务](https://cloud.taosdata.com/?utm_source=menu&utm_medium=webcn)
|
|
||||||
- [TDengine 开源版](https://www.taosdata.com/tdengine-oss)
|
|
||||||
|
|
||||||
## 主要功能
|
## 主要功能
|
||||||
|
|
||||||
TDengine 的主要功能如下:
|
TDengine OSS 的主要功能如下:
|
||||||
|
|
||||||
1. 写入数据,支持
|
1. 写入数据,支持
|
||||||
- [SQL 写入](../develop/insert-data/sql-writing)
|
- [SQL 写入](../develop/insert-data/sql-writing)
|
||||||
|
@ -150,3 +144,10 @@ TDengine 的主要功能如下:
|
||||||
- [TDengine VS InfluxDB ,写入性能大 PK !](https://www.taosdata.com/2021/11/05/3248.html)
|
- [TDengine VS InfluxDB ,写入性能大 PK !](https://www.taosdata.com/2021/11/05/3248.html)
|
||||||
- [TDengine 和 InfluxDB 查询性能对比测试报告](https://www.taosdata.com/2022/02/22/5969.html)
|
- [TDengine 和 InfluxDB 查询性能对比测试报告](https://www.taosdata.com/2022/02/22/5969.html)
|
||||||
- [TDengine 与 InfluxDB、OpenTSDB、Cassandra、MySQL、ClickHouse 等数据库的对比测试报告](https://www.taosdata.com/downloads/TDengine_Testing_Report_cn.pdf)
|
- [TDengine 与 InfluxDB、OpenTSDB、Cassandra、MySQL、ClickHouse 等数据库的对比测试报告](https://www.taosdata.com/downloads/TDengine_Testing_Report_cn.pdf)
|
||||||
|
|
||||||
|
|
||||||
|
## 主要产品
|
||||||
|
|
||||||
|
TDengine 有两个主要产品:TDengine Enterprise (即 TDengine 企业版)和 TDengine Cloud,关于它们的具体定义请参考
|
||||||
|
- [TDengine 企业版](https://www.taosdata.com/tdengine-pro)
|
||||||
|
- [TDengine 云服务](https://cloud.taosdata.com/?utm_source=menu&utm_medium=webcn)
|
||||||
|
|
|
@ -76,25 +76,25 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
||||||
|
|
||||||
| # | **列名** | **数据类型** | **说明** |
|
| # | **列名** | **数据类型** | **说明** |
|
||||||
| --- | :------------------: | ---------------- | ------------------------------------------------ |
|
| --- | :------------------: | ---------------- | ------------------------------------------------ |
|
||||||
| 1 | name | BINARY(32) | 数据库名 |
|
| 1 | name | VARCHAR(64) | 数据库名 |
|
||||||
| 2 | create_time | TIMESTAMP | 创建时间 |
|
| 2 | create_time | TIMESTAMP | 创建时间 |
|
||||||
| 3 | ntables | INT | 数据库中表的数量,包含子表和普通表但不包含超级表 |
|
| 3 | ntables | INT | 数据库中表的数量,包含子表和普通表但不包含超级表 |
|
||||||
| 4 | vgroups | INT | 数据库中有多少个 vgroup。需要注意,`vgroups` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
| 4 | vgroups | INT | 数据库中有多少个 vgroup。需要注意,`vgroups` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||||
| 6 | replica | INT | 副本数。需要注意,`replica` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
| 6 | replica | INT | 副本数。需要注意,`replica` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||||
| 7 | strict | BINARY(4) | 废弃参数 |
|
| 7 | strict | VARCHAR(4) | 废弃参数 |
|
||||||
| 8 | duration | INT | 单文件存储数据的时间跨度。需要注意,`duration` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
| 8 | duration | VARCHAR(10) | 单文件存储数据的时间跨度。需要注意,`duration` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||||
| 9 | keep | INT | 数据保留时长。需要注意,`keep` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
| 9 | keep | VARCHAR(32) | 数据保留时长。需要注意,`keep` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||||
| 10 | buffer | INT | 每个 vnode 写缓存的内存块大小,单位 MB。需要注意,`buffer` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
| 10 | buffer | INT | 每个 vnode 写缓存的内存块大小,单位 MB。需要注意,`buffer` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||||
| 11 | pagesize | INT | 每个 VNODE 中元数据存储引擎的页大小,单位为 KB。需要注意,`pagesize` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
| 11 | pagesize | INT | 每个 VNODE 中元数据存储引擎的页大小,单位为 KB。需要注意,`pagesize` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||||
| 12 | pages | INT | 每个 vnode 元数据存储引擎的缓存页个数。需要注意,`pages` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
| 12 | pages | INT | 每个 vnode 元数据存储引擎的缓存页个数。需要注意,`pages` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||||
| 13 | minrows | INT | 文件块中记录的最大条数。需要注意,`minrows` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
| 13 | minrows | INT | 文件块中记录的最大条数。需要注意,`minrows` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||||
| 14 | maxrows | INT | 文件块中记录的最小条数。需要注意,`maxrows` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
| 14 | maxrows | INT | 文件块中记录的最小条数。需要注意,`maxrows` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||||
| 15 | comp | INT | 数据压缩方式。需要注意,`comp` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
| 15 | comp | INT | 数据压缩方式。需要注意,`comp` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||||
| 16 | precision | BINARY(2) | 时间分辨率。需要注意,`precision` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
| 16 | precision | VARCHAR(2) | 时间分辨率。需要注意,`precision` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||||
| 17 | status | BINARY(10) | 数据库状态 |
|
| 17 | status | VARCHAR(10) | 数据库状态 |
|
||||||
| 18 | retentions | BINARY (60) | 数据的聚合周期和保存时长。需要注意,`retentions` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
| 18 | retentions | VARCHAR(60) | 数据的聚合周期和保存时长。需要注意,`retentions` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||||
| 19 | single_stable | BOOL | 表示此数据库中是否只可以创建一个超级表。需要注意,`single_stable` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
| 19 | single_stable | BOOL | 表示此数据库中是否只可以创建一个超级表。需要注意,`single_stable` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||||
| 20 | cachemodel | BINARY(60) | 表示是否在内存中缓存子表的最近数据。需要注意,`cachemodel` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
| 20 | cachemodel | VARCHAR(60) | 表示是否在内存中缓存子表的最近数据。需要注意,`cachemodel` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||||
| 21 | cachesize | INT | 表示每个 vnode 中用于缓存子表最近数据的内存大小。需要注意,`cachesize` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
| 21 | cachesize | INT | 表示每个 vnode 中用于缓存子表最近数据的内存大小。需要注意,`cachesize` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||||
| 22 | wal_level | INT | WAL 级别。需要注意,`wal_level` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
| 22 | wal_level | INT | WAL 级别。需要注意,`wal_level` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||||
| 23 | wal_fsync_period | INT | 数据落盘周期。需要注意,`wal_fsync_period` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
| 23 | wal_fsync_period | INT | 数据落盘周期。需要注意,`wal_fsync_period` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||||
|
|
|
@ -44,17 +44,17 @@ OS name: "windows 10", version: "10.0", arch: "amd64", family: "windows"
|
||||||
<settings xmlns="http://maven.apache.org/SETTINGS/1.0.0"
|
<settings xmlns="http://maven.apache.org/SETTINGS/1.0.0"
|
||||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||||
xsi:schemaLocation="http://maven.apache.org/SETTINGS/1.0.0 http://maven.apache.org/xsd/settings-1.0.0.xsd">
|
xsi:schemaLocation="http://maven.apache.org/SETTINGS/1.0.0 http://maven.apache.org/xsd/settings-1.0.0.xsd">
|
||||||
<!-- 配置本地maven仓库的路径 -->
|
<!-- 配置本地maven仓库的路径 -->
|
||||||
<localRepository>D:\apache-maven-localRepository</localRepository>
|
<localRepository>D:\apache-maven-localRepository</localRepository>
|
||||||
|
|
||||||
<mirrors>
|
<mirrors>
|
||||||
<!-- 配置阿里云Maven镜像仓库 -->
|
<!-- 配置阿里云Maven镜像仓库 -->
|
||||||
<mirror>
|
<mirror>
|
||||||
<id>alimaven</id>
|
<id>alimaven</id>
|
||||||
<name>aliyun maven</name>
|
<name>aliyun maven</name>
|
||||||
<url>http://maven.aliyun.com/nexus/content/groups/public/</url>
|
<url>http://maven.aliyun.com/nexus/content/groups/public/</url>
|
||||||
<mirrorOf>central</mirrorOf>
|
<mirrorOf>central</mirrorOf>
|
||||||
</mirror>
|
</mirror>
|
||||||
</mirrors>
|
</mirrors>
|
||||||
|
|
||||||
<profiles>
|
<profiles>
|
||||||
|
@ -126,7 +126,7 @@ https://www.taosdata.com/cn/all-downloads/
|
||||||
修改client的hosts文件(C:\Windows\System32\drivers\etc\hosts),将server的hostname和ip配置到client的hosts文件中
|
修改client的hosts文件(C:\Windows\System32\drivers\etc\hosts),将server的hostname和ip配置到client的hosts文件中
|
||||||
|
|
||||||
```
|
```
|
||||||
192.168.236.136 td01
|
192.168.236.136 td01
|
||||||
```
|
```
|
||||||
|
|
||||||
配置完成后,在命令行内使用TDengine CLI连接server端
|
配置完成后,在命令行内使用TDengine CLI连接server端
|
||||||
|
|
|
@ -260,7 +260,7 @@ typedef struct SStreamTaskId {
|
||||||
typedef struct SCheckpointInfo {
|
typedef struct SCheckpointInfo {
|
||||||
int64_t checkpointId;
|
int64_t checkpointId;
|
||||||
int64_t checkpointVer; // latest checkpointId version
|
int64_t checkpointVer; // latest checkpointId version
|
||||||
int64_t currentVer; // current offset in WAL, not serialize it
|
int64_t nextProcessVer; // current offset in WAL, not serialize it
|
||||||
} SCheckpointInfo;
|
} SCheckpointInfo;
|
||||||
|
|
||||||
typedef struct SStreamStatus {
|
typedef struct SStreamStatus {
|
||||||
|
@ -312,12 +312,27 @@ typedef struct STaskSchedInfo {
|
||||||
void* pTimer;
|
void* pTimer;
|
||||||
} STaskSchedInfo;
|
} STaskSchedInfo;
|
||||||
|
|
||||||
|
typedef struct SSinkTaskRecorder {
|
||||||
|
int64_t numOfSubmit;
|
||||||
|
int64_t numOfBlocks;
|
||||||
|
int64_t numOfRows;
|
||||||
|
} SSinkTaskRecorder;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
|
int64_t created;
|
||||||
int64_t init;
|
int64_t init;
|
||||||
int64_t step1Start;
|
int64_t step1Start;
|
||||||
int64_t step2Start;
|
int64_t step2Start;
|
||||||
|
int64_t sinkStart;
|
||||||
} STaskTimestamp;
|
} STaskTimestamp;
|
||||||
|
|
||||||
|
typedef struct STokenBucket {
|
||||||
|
int32_t capacity; // total capacity
|
||||||
|
int64_t fillTimestamp;// fill timestamp
|
||||||
|
int32_t numOfToken; // total available tokens
|
||||||
|
int32_t rate; // number of token per second
|
||||||
|
} STokenBucket;
|
||||||
|
|
||||||
struct SStreamTask {
|
struct SStreamTask {
|
||||||
int64_t ver;
|
int64_t ver;
|
||||||
SStreamTaskId id;
|
SStreamTaskId id;
|
||||||
|
@ -345,6 +360,8 @@ struct SStreamTask {
|
||||||
STaskSinkSma smaSink;
|
STaskSinkSma smaSink;
|
||||||
STaskSinkFetch fetchSink;
|
STaskSinkFetch fetchSink;
|
||||||
};
|
};
|
||||||
|
SSinkTaskRecorder sinkRecorder;
|
||||||
|
STokenBucket tokenBucket;
|
||||||
|
|
||||||
void* launchTaskTimer;
|
void* launchTaskTimer;
|
||||||
SMsgCb* pMsgCb; // msg handle
|
SMsgCb* pMsgCb; // msg handle
|
||||||
|
@ -683,7 +700,6 @@ int32_t streamSourceScanHistoryData(SStreamTask* pTask);
|
||||||
int32_t streamDispatchScanHistoryFinishMsg(SStreamTask* pTask);
|
int32_t streamDispatchScanHistoryFinishMsg(SStreamTask* pTask);
|
||||||
|
|
||||||
// agg level
|
// agg level
|
||||||
int32_t streamTaskScanHistoryPrepare(SStreamTask* pTask);
|
|
||||||
int32_t streamProcessScanHistoryFinishReq(SStreamTask* pTask, SStreamScanHistoryFinishReq* pReq,
|
int32_t streamProcessScanHistoryFinishReq(SStreamTask* pTask, SStreamScanHistoryFinishReq* pReq,
|
||||||
SRpcHandleInfo* pRpcInfo);
|
SRpcHandleInfo* pRpcInfo);
|
||||||
int32_t streamProcessScanHistoryFinishRsp(SStreamTask* pTask);
|
int32_t streamProcessScanHistoryFinishRsp(SStreamTask* pTask);
|
||||||
|
|
|
@ -192,7 +192,7 @@ int32_t walApplyVer(SWal *, int64_t ver);
|
||||||
// int32_t walDataCorrupted(SWal*);
|
// int32_t walDataCorrupted(SWal*);
|
||||||
|
|
||||||
// wal reader
|
// wal reader
|
||||||
SWalReader *walOpenReader(SWal *, SWalFilterCond *pCond);
|
SWalReader *walOpenReader(SWal *, SWalFilterCond *pCond, int64_t id);
|
||||||
void walCloseReader(SWalReader *pRead);
|
void walCloseReader(SWalReader *pRead);
|
||||||
void walReadReset(SWalReader *pReader);
|
void walReadReset(SWalReader *pReader);
|
||||||
int32_t walReadVer(SWalReader *pRead, int64_t ver);
|
int32_t walReadVer(SWalReader *pRead, int64_t ver);
|
||||||
|
|
|
@ -33,17 +33,14 @@ adapterName="taosadapter"
|
||||||
benchmarkName="taosBenchmark"
|
benchmarkName="taosBenchmark"
|
||||||
dumpName="taosdump"
|
dumpName="taosdump"
|
||||||
demoName="taosdemo"
|
demoName="taosdemo"
|
||||||
xname="taosx"
|
|
||||||
|
|
||||||
clientName2="taos"
|
clientName2="taos"
|
||||||
serverName2="${clientName2}d"
|
serverName2="${clientName2}d"
|
||||||
configFile2="${clientName2}.cfg"
|
configFile2="${clientName2}.cfg"
|
||||||
productName2="TDengine"
|
productName2="TDengine"
|
||||||
emailName2="taosdata.com"
|
emailName2="taosdata.com"
|
||||||
xname2="${clientName2}x"
|
|
||||||
adapterName2="${clientName2}adapter"
|
adapterName2="${clientName2}adapter"
|
||||||
|
|
||||||
explorerName="${clientName2}-explorer"
|
|
||||||
benchmarkName2="${clientName2}Benchmark"
|
benchmarkName2="${clientName2}Benchmark"
|
||||||
demoName2="${clientName2}demo"
|
demoName2="${clientName2}demo"
|
||||||
dumpName2="${clientName2}dump"
|
dumpName2="${clientName2}dump"
|
||||||
|
@ -218,8 +215,6 @@ function install_bin() {
|
||||||
${csudo}rm -f ${bin_link_dir}/${demoName2} || :
|
${csudo}rm -f ${bin_link_dir}/${demoName2} || :
|
||||||
${csudo}rm -f ${bin_link_dir}/${benchmarkName2} || :
|
${csudo}rm -f ${bin_link_dir}/${benchmarkName2} || :
|
||||||
${csudo}rm -f ${bin_link_dir}/${dumpName2} || :
|
${csudo}rm -f ${bin_link_dir}/${dumpName2} || :
|
||||||
${csudo}rm -f ${bin_link_dir}/${xname2} || :
|
|
||||||
${csudo}rm -f ${bin_link_dir}/${explorerName} || :
|
|
||||||
${csudo}rm -f ${bin_link_dir}/set_core || :
|
${csudo}rm -f ${bin_link_dir}/set_core || :
|
||||||
${csudo}rm -f ${bin_link_dir}/TDinsight.sh || :
|
${csudo}rm -f ${bin_link_dir}/TDinsight.sh || :
|
||||||
|
|
||||||
|
@ -233,8 +228,6 @@ function install_bin() {
|
||||||
[ -x ${install_main_dir}/bin/${benchmarkName2} ] && ${csudo}ln -sf ${install_main_dir}/bin/${benchmarkName2} ${bin_link_dir}/${demoName2} || :
|
[ -x ${install_main_dir}/bin/${benchmarkName2} ] && ${csudo}ln -sf ${install_main_dir}/bin/${benchmarkName2} ${bin_link_dir}/${demoName2} || :
|
||||||
[ -x ${install_main_dir}/bin/${benchmarkName2} ] && ${csudo}ln -sf ${install_main_dir}/bin/${benchmarkName2} ${bin_link_dir}/${benchmarkName2} || :
|
[ -x ${install_main_dir}/bin/${benchmarkName2} ] && ${csudo}ln -sf ${install_main_dir}/bin/${benchmarkName2} ${bin_link_dir}/${benchmarkName2} || :
|
||||||
[ -x ${install_main_dir}/bin/${dumpName2} ] && ${csudo}ln -sf ${install_main_dir}/bin/${dumpName2} ${bin_link_dir}/${dumpName2} || :
|
[ -x ${install_main_dir}/bin/${dumpName2} ] && ${csudo}ln -sf ${install_main_dir}/bin/${dumpName2} ${bin_link_dir}/${dumpName2} || :
|
||||||
[ -x ${install_main_dir}/bin/${xname2} ] && ${csudo}ln -sf ${install_main_dir}/bin/${xname2} ${bin_link_dir}/${xname2} || :
|
|
||||||
[ -x ${install_main_dir}/bin/${explorerName} ] && ${csudo}ln -sf ${install_main_dir}/bin/${explorerName} ${bin_link_dir}/${explorerName} || :
|
|
||||||
[ -x ${install_main_dir}/bin/TDinsight.sh ] && ${csudo}ln -sf ${install_main_dir}/bin/TDinsight.sh ${bin_link_dir}/TDinsight.sh || :
|
[ -x ${install_main_dir}/bin/TDinsight.sh ] && ${csudo}ln -sf ${install_main_dir}/bin/TDinsight.sh ${bin_link_dir}/TDinsight.sh || :
|
||||||
if [ "$clientName2" == "${clientName}" ]; then
|
if [ "$clientName2" == "${clientName}" ]; then
|
||||||
[ -x ${install_main_dir}/bin/remove.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/${uninstallScript} || :
|
[ -x ${install_main_dir}/bin/remove.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/${uninstallScript} || :
|
||||||
|
@ -703,26 +696,6 @@ function clean_service_on_systemd() {
|
||||||
# if [ "$verMode" == "cluster" ] && [ "$clientName" != "$clientName2" ]; then
|
# if [ "$verMode" == "cluster" ] && [ "$clientName" != "$clientName2" ]; then
|
||||||
# ${csudo}rm -f ${service_config_dir}/${serverName2}.service
|
# ${csudo}rm -f ${service_config_dir}/${serverName2}.service
|
||||||
# fi
|
# fi
|
||||||
x_service_config="${service_config_dir}/${xName2}.service"
|
|
||||||
if [ -e "$x_service_config" ]; then
|
|
||||||
if systemctl is-active --quiet ${xName2}; then
|
|
||||||
echo "${productName2} ${xName2} is running, stopping it..."
|
|
||||||
${csudo}systemctl stop ${xName2} &>/dev/null || echo &>/dev/null
|
|
||||||
fi
|
|
||||||
${csudo}systemctl disable ${xName2} &>/dev/null || echo &>/dev/null
|
|
||||||
${csudo}rm -f ${x_service_config}
|
|
||||||
fi
|
|
||||||
|
|
||||||
explorer_service_config="${service_config_dir}/${explorerName2}.service"
|
|
||||||
if [ -e "$explorer_service_config" ]; then
|
|
||||||
if systemctl is-active --quiet ${explorerName2}; then
|
|
||||||
echo "${productName2} ${explorerName2} is running, stopping it..."
|
|
||||||
${csudo}systemctl stop ${explorerName2} &>/dev/null || echo &>/dev/null
|
|
||||||
fi
|
|
||||||
${csudo}systemctl disable ${explorerName2} &>/dev/null || echo &>/dev/null
|
|
||||||
${csudo}rm -f ${explorer_service_config}
|
|
||||||
${csudo}rm -f /etc/${clientName2}/explorer.toml
|
|
||||||
fi
|
|
||||||
}
|
}
|
||||||
|
|
||||||
function install_service_on_systemd() {
|
function install_service_on_systemd() {
|
||||||
|
|
|
@ -756,7 +756,8 @@ int32_t taosTimeCountIntervalForFill(int64_t skey, int64_t ekey, int64_t interva
|
||||||
}
|
}
|
||||||
|
|
||||||
int64_t taosTimeTruncate(int64_t ts, const SInterval* pInterval) {
|
int64_t taosTimeTruncate(int64_t ts, const SInterval* pInterval) {
|
||||||
if (pInterval->sliding == 0 && pInterval->interval == 0) {
|
if (pInterval->sliding == 0) {
|
||||||
|
ASSERT(pInterval->interval == 0);
|
||||||
return ts;
|
return ts;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -667,6 +667,7 @@ typedef struct {
|
||||||
char name[TSDB_STREAM_FNAME_LEN];
|
char name[TSDB_STREAM_FNAME_LEN];
|
||||||
// ctl
|
// ctl
|
||||||
SRWLatch lock;
|
SRWLatch lock;
|
||||||
|
|
||||||
// create info
|
// create info
|
||||||
int64_t createTime;
|
int64_t createTime;
|
||||||
int64_t updateTime;
|
int64_t updateTime;
|
||||||
|
|
|
@ -439,7 +439,7 @@ static int32_t mndProcessCreateIdxReq(SRpcMsg *pReq) {
|
||||||
|
|
||||||
pDb = mndAcquireDbByStb(pMnode, createReq.stbName);
|
pDb = mndAcquireDbByStb(pMnode, createReq.stbName);
|
||||||
if (pDb == NULL) {
|
if (pDb == NULL) {
|
||||||
terrno = TSDB_CODE_MND_INVALID_DB;
|
terrno = TSDB_CODE_MND_DB_NOT_EXIST;
|
||||||
goto _OVER;
|
goto _OVER;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -259,7 +259,7 @@ static int32_t mndProcessConnectReq(SRpcMsg *pReq) {
|
||||||
if (pDb == NULL) {
|
if (pDb == NULL) {
|
||||||
if (0 != strcmp(connReq.db, TSDB_INFORMATION_SCHEMA_DB) &&
|
if (0 != strcmp(connReq.db, TSDB_INFORMATION_SCHEMA_DB) &&
|
||||||
(0 != strcmp(connReq.db, TSDB_PERFORMANCE_SCHEMA_DB))) {
|
(0 != strcmp(connReq.db, TSDB_PERFORMANCE_SCHEMA_DB))) {
|
||||||
terrno = TSDB_CODE_MND_INVALID_DB;
|
terrno = TSDB_CODE_MND_DB_NOT_EXIST;
|
||||||
mGError("user:%s, failed to login from %s while use db:%s since %s", pReq->info.conn.user, ip, connReq.db,
|
mGError("user:%s, failed to login from %s while use db:%s since %s", pReq->info.conn.user, ip, connReq.db,
|
||||||
terrstr());
|
terrstr());
|
||||||
goto _OVER;
|
goto _OVER;
|
||||||
|
@ -313,7 +313,7 @@ _CONNECT:
|
||||||
sprintf(obj, "%s:%d", ip, pConn->port);
|
sprintf(obj, "%s:%d", ip, pConn->port);
|
||||||
|
|
||||||
char detail[1000] = {0};
|
char detail[1000] = {0};
|
||||||
sprintf(detail, "connType:%d, db:%s, pid:%d, startTime:%" PRId64 ", sVer:%s, app:%s",
|
sprintf(detail, "connType:%d, db:%s, pid:%d, startTime:%" PRId64 ", sVer:%s, app:%s",
|
||||||
connReq.connType, connReq.db, connReq.pid, connReq.startTime, connReq.sVer, connReq.app);
|
connReq.connType, connReq.db, connReq.pid, connReq.startTime, connReq.sVer, connReq.app);
|
||||||
|
|
||||||
auditRecord(pReq, pMnode->clusterId, "login", connReq.user, obj, detail);
|
auditRecord(pReq, pMnode->clusterId, "login", connReq.user, obj, detail);
|
||||||
|
|
|
@ -2320,7 +2320,7 @@ static int32_t mndProcessAlterStbReq(SRpcMsg *pReq) {
|
||||||
|
|
||||||
pDb = mndAcquireDbByStb(pMnode, alterReq.name);
|
pDb = mndAcquireDbByStb(pMnode, alterReq.name);
|
||||||
if (pDb == NULL) {
|
if (pDb == NULL) {
|
||||||
terrno = TSDB_CODE_MND_INVALID_DB;
|
terrno = TSDB_CODE_MND_DB_NOT_EXIST;
|
||||||
goto _OVER;
|
goto _OVER;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3616,7 +3616,7 @@ static int32_t mndProcessCreateIndexReq(SRpcMsg *pReq) {
|
||||||
|
|
||||||
pDb = mndAcquireDbByStb(pMnode, tagIdxReq.dbFName);
|
pDb = mndAcquireDbByStb(pMnode, tagIdxReq.dbFName);
|
||||||
if (pDb == NULL) {
|
if (pDb == NULL) {
|
||||||
terrno = TSDB_CODE_MND_INVALID_DB;
|
terrno = TSDB_CODE_MND_DB_NOT_EXIST;
|
||||||
goto _OVER;
|
goto _OVER;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1111,7 +1111,7 @@ static int32_t mndAddStreamCheckpointToTrans(STrans *pTrans, SStreamObj *pStream
|
||||||
|
|
||||||
pStream->checkpointId = checkpointId;
|
pStream->checkpointId = checkpointId;
|
||||||
pStream->checkpointFreq = taosGetTimestampMs();
|
pStream->checkpointFreq = taosGetTimestampMs();
|
||||||
atomic_store_64(&pStream->currentTick, 0);
|
pStream->currentTick = 0;
|
||||||
// 3. commit log: stream checkpoint info
|
// 3. commit log: stream checkpoint info
|
||||||
pStream->version = pStream->version + 1;
|
pStream->version = pStream->version + 1;
|
||||||
|
|
||||||
|
|
|
@ -771,6 +771,29 @@ static int32_t mndProcessRebalanceReq(SRpcMsg *pMsg) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int32_t sendDeleteSubToVnode(SMqSubscribeObj *pSub, STrans *pTrans){
|
||||||
|
// iter all vnode to delete handle
|
||||||
|
int32_t sz = taosArrayGetSize(pSub->unassignedVgs);
|
||||||
|
for (int32_t i = 0; i < sz; i++) {
|
||||||
|
SMqVgEp *pVgEp = taosArrayGetP(pSub->unassignedVgs, i);
|
||||||
|
SMqVDeleteReq *pReq = taosMemoryCalloc(1, sizeof(SMqVDeleteReq));
|
||||||
|
pReq->head.vgId = htonl(pVgEp->vgId);
|
||||||
|
pReq->vgId = pVgEp->vgId;
|
||||||
|
pReq->consumerId = -1;
|
||||||
|
memcpy(pReq->subKey, pSub->key, TSDB_SUBSCRIBE_KEY_LEN);
|
||||||
|
STransAction action = {0};
|
||||||
|
action.epSet = pVgEp->epSet;
|
||||||
|
action.pCont = pReq;
|
||||||
|
action.contLen = sizeof(SMqVDeleteReq);
|
||||||
|
action.msgType = TDMT_VND_TMQ_DELETE_SUB;
|
||||||
|
if (mndTransAppendRedoAction(pTrans, &action) != 0) {
|
||||||
|
taosMemoryFree(pReq);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int32_t mndProcessDropCgroupReq(SRpcMsg *pMsg) {
|
static int32_t mndProcessDropCgroupReq(SRpcMsg *pMsg) {
|
||||||
SMnode *pMnode = pMsg->info.node;
|
SMnode *pMnode = pMsg->info.node;
|
||||||
SMDropCgroupReq dropReq = {0};
|
SMDropCgroupReq dropReq = {0};
|
||||||
|
@ -831,6 +854,11 @@ static int32_t mndProcessDropCgroupReq(SRpcMsg *pMsg) {
|
||||||
|
|
||||||
mInfo("trans:%d, used to drop cgroup:%s on topic %s", pTrans->id, dropReq.cgroup, dropReq.topic);
|
mInfo("trans:%d, used to drop cgroup:%s on topic %s", pTrans->id, dropReq.cgroup, dropReq.topic);
|
||||||
|
|
||||||
|
code = sendDeleteSubToVnode(pSub, pTrans);
|
||||||
|
if (code != 0) {
|
||||||
|
goto end;
|
||||||
|
}
|
||||||
|
|
||||||
if (mndSetDropSubCommitLogs(pMnode, pTrans, pSub) < 0) {
|
if (mndSetDropSubCommitLogs(pMnode, pTrans, pSub) < 0) {
|
||||||
mError("cgroup %s on topic:%s, failed to drop since %s", dropReq.cgroup, dropReq.topic, terrstr());
|
mError("cgroup %s on topic:%s, failed to drop since %s", dropReq.cgroup, dropReq.topic, terrstr());
|
||||||
code = -1;
|
code = -1;
|
||||||
|
@ -1113,25 +1141,11 @@ int32_t mndDropSubByTopic(SMnode *pMnode, STrans *pTrans, const char *topicName)
|
||||||
sdbCancelFetch(pSdb, pIter);
|
sdbCancelFetch(pSdb, pIter);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
int32_t sz = taosArrayGetSize(pSub->unassignedVgs);
|
|
||||||
for (int32_t i = 0; i < sz; i++) {
|
if (sendDeleteSubToVnode(pSub, pTrans) != 0) {
|
||||||
SMqVgEp *pVgEp = taosArrayGetP(pSub->unassignedVgs, i);
|
sdbRelease(pSdb, pSub);
|
||||||
SMqVDeleteReq *pReq = taosMemoryCalloc(1, sizeof(SMqVDeleteReq));
|
sdbCancelFetch(pSdb, pIter);
|
||||||
pReq->head.vgId = htonl(pVgEp->vgId);
|
return -1;
|
||||||
pReq->vgId = pVgEp->vgId;
|
|
||||||
pReq->consumerId = -1;
|
|
||||||
memcpy(pReq->subKey, pSub->key, TSDB_SUBSCRIBE_KEY_LEN);
|
|
||||||
STransAction action = {0};
|
|
||||||
action.epSet = pVgEp->epSet;
|
|
||||||
action.pCont = pReq;
|
|
||||||
action.contLen = sizeof(SMqVDeleteReq);
|
|
||||||
action.msgType = TDMT_VND_TMQ_DELETE_SUB;
|
|
||||||
if (mndTransAppendRedoAction(pTrans, &action) != 0) {
|
|
||||||
taosMemoryFree(pReq);
|
|
||||||
sdbRelease(pSdb, pSub);
|
|
||||||
sdbCancelFetch(pSdb, pIter);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (mndSetDropSubRedoLogs(pMnode, pTrans, pSub) < 0) {
|
if (mndSetDropSubRedoLogs(pMnode, pTrans, pSub) < 0) {
|
||||||
|
|
|
@ -65,7 +65,7 @@ TEST_F(MndTestProfile, 01_ConnectMsg) {
|
||||||
connId = connectRsp.connId;
|
connId = connectRsp.connId;
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(MndTestProfile, 02_ConnectMsg_InvalidDB) {
|
TEST_F(MndTestProfile, 02_ConnectMsg_NotExistDB) {
|
||||||
char passwd[] = "taosdata";
|
char passwd[] = "taosdata";
|
||||||
char secretEncrypt[TSDB_PASSWORD_LEN + 1] = {0};
|
char secretEncrypt[TSDB_PASSWORD_LEN + 1] = {0};
|
||||||
taosEncryptPass_c((uint8_t*)passwd, strlen(passwd), secretEncrypt);
|
taosEncryptPass_c((uint8_t*)passwd, strlen(passwd), secretEncrypt);
|
||||||
|
@ -73,7 +73,7 @@ TEST_F(MndTestProfile, 02_ConnectMsg_InvalidDB) {
|
||||||
SConnectReq connectReq = {0};
|
SConnectReq connectReq = {0};
|
||||||
connectReq.pid = 1234;
|
connectReq.pid = 1234;
|
||||||
strcpy(connectReq.app, "mnode_test_profile");
|
strcpy(connectReq.app, "mnode_test_profile");
|
||||||
strcpy(connectReq.db, "invalid_db");
|
strcpy(connectReq.db, "not_exist_db");
|
||||||
strcpy(connectReq.user, "root");
|
strcpy(connectReq.user, "root");
|
||||||
strcpy(connectReq.passwd, secretEncrypt);
|
strcpy(connectReq.passwd, secretEncrypt);
|
||||||
strcpy(connectReq.sVer, version);
|
strcpy(connectReq.sVer, version);
|
||||||
|
@ -84,7 +84,7 @@ TEST_F(MndTestProfile, 02_ConnectMsg_InvalidDB) {
|
||||||
|
|
||||||
SRpcMsg* pRsp = test.SendReq(TDMT_MND_CONNECT, pReq, contLen);
|
SRpcMsg* pRsp = test.SendReq(TDMT_MND_CONNECT, pReq, contLen);
|
||||||
ASSERT_NE(pRsp, nullptr);
|
ASSERT_NE(pRsp, nullptr);
|
||||||
ASSERT_EQ(pRsp->code, TSDB_CODE_MND_INVALID_DB);
|
ASSERT_EQ(pRsp->code, TSDB_CODE_MND_DB_NOT_EXIST);
|
||||||
ASSERT_EQ(pRsp->contLen, 0);
|
ASSERT_EQ(pRsp->contLen, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -448,7 +448,7 @@ TEST_F(MndTestStb, 02_Alter_Stb_AddTag) {
|
||||||
{
|
{
|
||||||
void* pReq = BuildAlterStbAddTagReq("1.d3.stb", "tag4", &contLen);
|
void* pReq = BuildAlterStbAddTagReq("1.d3.stb", "tag4", &contLen);
|
||||||
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
|
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
|
||||||
ASSERT_EQ(pRsp->code, TSDB_CODE_MND_INVALID_DB);
|
ASSERT_EQ(pRsp->code, TSDB_CODE_MND_DB_NOT_EXIST);
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
|
@ -665,7 +665,7 @@ TEST_F(MndTestStb, 06_Alter_Stb_AddColumn) {
|
||||||
{
|
{
|
||||||
void* pReq = BuildAlterStbAddColumnReq("1.d7.stb", "tag4", &contLen);
|
void* pReq = BuildAlterStbAddColumnReq("1.d7.stb", "tag4", &contLen);
|
||||||
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
|
SRpcMsg* pRsp = test.SendReq(TDMT_MND_ALTER_STB, pReq, contLen);
|
||||||
ASSERT_EQ(pRsp->code, TSDB_CODE_MND_INVALID_DB);
|
ASSERT_EQ(pRsp->code, TSDB_CODE_MND_DB_NOT_EXIST);
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
|
|
|
@ -86,18 +86,18 @@ int32_t sndExpandTask(SSnode *pSnode, SStreamTask *pTask, int64_t ver) {
|
||||||
SCheckpointInfo* pChkInfo = &pTask->chkInfo;
|
SCheckpointInfo* pChkInfo = &pTask->chkInfo;
|
||||||
// checkpoint ver is the kept version, handled data should be the next version.
|
// checkpoint ver is the kept version, handled data should be the next version.
|
||||||
if (pTask->chkInfo.checkpointId != 0) {
|
if (pTask->chkInfo.checkpointId != 0) {
|
||||||
pTask->chkInfo.currentVer = pTask->chkInfo.checkpointVer + 1;
|
pTask->chkInfo.nextProcessVer = pTask->chkInfo.checkpointVer + 1;
|
||||||
qInfo("s-task:%s restore from the checkpointId:%" PRId64 " ver:%" PRId64 " currentVer:%" PRId64, pTask->id.idStr,
|
qInfo("s-task:%s restore from the checkpointId:%" PRId64 " ver:%" PRId64 " nextProcessVer:%" PRId64, pTask->id.idStr,
|
||||||
pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->currentVer);
|
pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->nextProcessVer);
|
||||||
} else {
|
} else {
|
||||||
if (pTask->chkInfo.currentVer == -1) {
|
if (pTask->chkInfo.nextProcessVer == -1) {
|
||||||
pTask->chkInfo.currentVer = 0;
|
pTask->chkInfo.nextProcessVer = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
qInfo("snode:%d expand stream task, s-task:%s, checkpointId:%" PRId64 " checkpointVer:%" PRId64 " currentVer:%" PRId64
|
qInfo("snode:%d expand stream task, s-task:%s, checkpointId:%" PRId64 " checkpointVer:%" PRId64 " nextProcessVer:%" PRId64
|
||||||
" child id:%d, level:%d, status:%s fill-history:%d, trigger:%" PRId64 " ms",
|
" child id:%d, level:%d, status:%s fill-history:%d, trigger:%" PRId64 " ms",
|
||||||
SNODE_HANDLE, pTask->id.idStr, pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->currentVer,
|
SNODE_HANDLE, pTask->id.idStr, pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->nextProcessVer,
|
||||||
pTask->info.selfChildId, pTask->info.taskLevel, streamGetTaskStatusStr(pTask->status.taskStatus),
|
pTask->info.selfChildId, pTask->info.taskLevel, streamGetTaskStatusStr(pTask->status.taskStatus),
|
||||||
pTask->info.fillHistory, pTask->info.triggerParam);
|
pTask->info.fillHistory, pTask->info.triggerParam);
|
||||||
|
|
||||||
|
|
|
@ -250,7 +250,7 @@ int32_t tqProcessTaskDropReq(STQ* pTq, int64_t version, char* msg, int32_t msgLe
|
||||||
int32_t tqProcessTaskPauseReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen);
|
int32_t tqProcessTaskPauseReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen);
|
||||||
int32_t tqProcessTaskResumeReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen);
|
int32_t tqProcessTaskResumeReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen);
|
||||||
int32_t tqProcessStreamTaskCheckReq(STQ* pTq, SRpcMsg* pMsg);
|
int32_t tqProcessStreamTaskCheckReq(STQ* pTq, SRpcMsg* pMsg);
|
||||||
int32_t tqProcessStreamTaskCheckRsp(STQ* pTq, int64_t version, SRpcMsg* pMsg);
|
int32_t tqProcessStreamTaskCheckRsp(STQ* pTq, SRpcMsg* pMsg);
|
||||||
int32_t tqProcessTaskRunReq(STQ* pTq, SRpcMsg* pMsg);
|
int32_t tqProcessTaskRunReq(STQ* pTq, SRpcMsg* pMsg);
|
||||||
int32_t tqProcessTaskDispatchReq(STQ* pTq, SRpcMsg* pMsg, bool exec);
|
int32_t tqProcessTaskDispatchReq(STQ* pTq, SRpcMsg* pMsg, bool exec);
|
||||||
int32_t tqProcessTaskDispatchRsp(STQ* pTq, SRpcMsg* pMsg);
|
int32_t tqProcessTaskDispatchRsp(STQ* pTq, SRpcMsg* pMsg);
|
||||||
|
|
|
@ -52,7 +52,9 @@ int metaFinishCommit(SMeta *pMeta, TXN *txn) { return tdbPostCommit(pMeta->pEnv
|
||||||
int metaPrepareAsyncCommit(SMeta *pMeta) {
|
int metaPrepareAsyncCommit(SMeta *pMeta) {
|
||||||
// return tdbPrepareAsyncCommit(pMeta->pEnv, pMeta->txn);
|
// return tdbPrepareAsyncCommit(pMeta->pEnv, pMeta->txn);
|
||||||
int code = 0;
|
int code = 0;
|
||||||
|
metaWLock(pMeta);
|
||||||
code = ttlMgrFlush(pMeta->pTtlMgr, pMeta->txn);
|
code = ttlMgrFlush(pMeta->pTtlMgr, pMeta->txn);
|
||||||
|
metaULock(pMeta);
|
||||||
code = tdbCommit(pMeta->pEnv, pMeta->txn);
|
code = tdbCommit(pMeta->pEnv, pMeta->txn);
|
||||||
|
|
||||||
return code;
|
return code;
|
||||||
|
|
|
@ -931,21 +931,16 @@ end:
|
||||||
}
|
}
|
||||||
|
|
||||||
int metaTtlFindExpired(SMeta *pMeta, int64_t timePointMs, SArray *tbUids, int32_t ttlDropMaxCount) {
|
int metaTtlFindExpired(SMeta *pMeta, int64_t timePointMs, SArray *tbUids, int32_t ttlDropMaxCount) {
|
||||||
metaWLock(pMeta);
|
metaRLock(pMeta);
|
||||||
int ret = ttlMgrFlush(pMeta->pTtlMgr, pMeta->txn);
|
|
||||||
if (ret != 0) {
|
int ret = ttlMgrFindExpired(pMeta->pTtlMgr, timePointMs, tbUids, ttlDropMaxCount);
|
||||||
metaError("ttl failed to flush, ret:%d", ret);
|
|
||||||
goto _err;
|
metaULock(pMeta);
|
||||||
}
|
|
||||||
|
|
||||||
ret = ttlMgrFindExpired(pMeta->pTtlMgr, timePointMs, tbUids, ttlDropMaxCount);
|
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
metaError("ttl failed to find expired table, ret:%d", ret);
|
metaError("ttl failed to find expired table, ret:%d", ret);
|
||||||
goto _err;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
_err:
|
|
||||||
metaULock(pMeta);
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -299,7 +299,7 @@ int ttlMgrInsertTtl(STtlManger *pTtlMgr, const STtlUpdTtlCtx *updCtx) {
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
|
||||||
_out:
|
_out:
|
||||||
metaDebug("%s, ttl mgr insert ttl, uid: %" PRId64 ", ctime: %" PRId64 ", ttlDays: %" PRId64, pTtlMgr->logPrefix,
|
metaTrace("%s, ttl mgr insert ttl, uid: %" PRId64 ", ctime: %" PRId64 ", ttlDays: %" PRId64, pTtlMgr->logPrefix,
|
||||||
updCtx->uid, updCtx->changeTimeMs, updCtx->ttlDays);
|
updCtx->uid, updCtx->changeTimeMs, updCtx->ttlDays);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -323,7 +323,7 @@ int ttlMgrDeleteTtl(STtlManger *pTtlMgr, const STtlDelTtlCtx *delCtx) {
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
|
||||||
_out:
|
_out:
|
||||||
metaDebug("%s, ttl mgr delete ttl, uid: %" PRId64, pTtlMgr->logPrefix, delCtx->uid);
|
metaTrace("%s, ttl mgr delete ttl, uid: %" PRId64, pTtlMgr->logPrefix, delCtx->uid);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -363,17 +363,37 @@ int ttlMgrUpdateChangeTime(STtlManger *pTtlMgr, const STtlUpdCtimeCtx *pUpdCtime
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
|
||||||
_out:
|
_out:
|
||||||
metaDebug("%s, ttl mgr update ctime, uid: %" PRId64 ", ctime: %" PRId64, pTtlMgr->logPrefix, pUpdCtimeCtx->uid,
|
metaTrace("%s, ttl mgr update ctime, uid: %" PRId64 ", ctime: %" PRId64, pTtlMgr->logPrefix, pUpdCtimeCtx->uid,
|
||||||
pUpdCtimeCtx->changeTimeMs);
|
pUpdCtimeCtx->changeTimeMs);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int ttlMgrFindExpired(STtlManger *pTtlMgr, int64_t timePointMs, SArray *pTbUids, int32_t ttlDropMaxCount) {
|
int ttlMgrFindExpired(STtlManger *pTtlMgr, int64_t timePointMs, SArray *pTbUids, int32_t ttlDropMaxCount) {
|
||||||
|
int ret = -1;
|
||||||
|
|
||||||
STtlIdxKeyV1 ttlKey = {.deleteTimeMs = timePointMs, .uid = INT64_MAX};
|
STtlIdxKeyV1 ttlKey = {.deleteTimeMs = timePointMs, .uid = INT64_MAX};
|
||||||
STtlExpiredCtx expiredCtx = {
|
STtlExpiredCtx expiredCtx = {
|
||||||
.ttlDropMaxCount = ttlDropMaxCount, .count = 0, .expiredKey = ttlKey, .pTbUids = pTbUids};
|
.ttlDropMaxCount = ttlDropMaxCount, .count = 0, .expiredKey = ttlKey, .pTbUids = pTbUids};
|
||||||
return tdbTbTraversal(pTtlMgr->pTtlIdx, &expiredCtx, ttlMgrFindExpiredOneEntry);
|
ret = tdbTbTraversal(pTtlMgr->pTtlIdx, &expiredCtx, ttlMgrFindExpiredOneEntry);
|
||||||
|
if (ret) {
|
||||||
|
goto _out;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t vIdx = 0;
|
||||||
|
for (size_t i = 0; i < pTbUids->size; i++) {
|
||||||
|
tb_uid_t *pUid = taosArrayGet(pTbUids, i);
|
||||||
|
if (taosHashGet(pTtlMgr->pDirtyUids, pUid, sizeof(tb_uid_t)) == NULL) {
|
||||||
|
// not in dirty && expired in tdb => must be expired
|
||||||
|
taosArraySet(pTbUids, vIdx, pUid);
|
||||||
|
vIdx++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
taosArrayPopTailBatch(pTbUids, pTbUids->size - vIdx);
|
||||||
|
|
||||||
|
_out:
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool ttlMgrNeedFlush(STtlManger *pTtlMgr) {
|
static bool ttlMgrNeedFlush(STtlManger *pTtlMgr) {
|
||||||
|
|
|
@ -819,7 +819,7 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t ver) {
|
||||||
|
|
||||||
if (pTask->info.taskLevel == TASK_LEVEL__SOURCE) {
|
if (pTask->info.taskLevel == TASK_LEVEL__SOURCE) {
|
||||||
SWalFilterCond cond = {.deleteMsg = 1}; // delete msg also extract from wal files
|
SWalFilterCond cond = {.deleteMsg = 1}; // delete msg also extract from wal files
|
||||||
pTask->exec.pWalReader = walOpenReader(pTq->pVnode->pWal, &cond);
|
pTask->exec.pWalReader = walOpenReader(pTq->pVnode->pWal, &cond, pTask->id.taskId);
|
||||||
}
|
}
|
||||||
|
|
||||||
// reset the task status from unfinished transaction
|
// reset the task status from unfinished transaction
|
||||||
|
@ -834,14 +834,14 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t ver) {
|
||||||
|
|
||||||
// checkpoint ver is the kept version, handled data should be the next version.
|
// checkpoint ver is the kept version, handled data should be the next version.
|
||||||
if (pTask->chkInfo.checkpointId != 0) {
|
if (pTask->chkInfo.checkpointId != 0) {
|
||||||
pTask->chkInfo.currentVer = pTask->chkInfo.checkpointVer + 1;
|
pTask->chkInfo.nextProcessVer = pTask->chkInfo.checkpointVer + 1;
|
||||||
tqInfo("s-task:%s restore from the checkpointId:%" PRId64 " ver:%" PRId64 " currentVer:%" PRId64, pTask->id.idStr,
|
tqInfo("s-task:%s restore from the checkpointId:%" PRId64 " ver:%" PRId64 " currentVer:%" PRId64, pTask->id.idStr,
|
||||||
pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->currentVer);
|
pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->nextProcessVer);
|
||||||
}
|
}
|
||||||
|
|
||||||
tqInfo("vgId:%d expand stream task, s-task:%s, checkpointId:%" PRId64 " checkpointVer:%" PRId64 " currentVer:%" PRId64
|
tqInfo("vgId:%d expand stream task, s-task:%s, checkpointId:%" PRId64 " checkpointVer:%" PRId64 " currentVer:%" PRId64
|
||||||
" child id:%d, level:%d, status:%s fill-history:%d, trigger:%" PRId64 " ms",
|
" child id:%d, level:%d, status:%s fill-history:%d, trigger:%" PRId64 " ms",
|
||||||
vgId, pTask->id.idStr, pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->currentVer,
|
vgId, pTask->id.idStr, pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->nextProcessVer,
|
||||||
pTask->info.selfChildId, pTask->info.taskLevel, streamGetTaskStatusStr(pTask->status.taskStatus),
|
pTask->info.selfChildId, pTask->info.taskLevel, streamGetTaskStatusStr(pTask->status.taskStatus),
|
||||||
pTask->info.fillHistory, pTask->info.triggerParam);
|
pTask->info.fillHistory, pTask->info.triggerParam);
|
||||||
|
|
||||||
|
@ -890,9 +890,10 @@ int32_t tqProcessStreamTaskCheckReq(STQ* pTq, SRpcMsg* pMsg) {
|
||||||
return streamSendCheckRsp(pTq->pStreamMeta, &req, &rsp, &pMsg->info, taskId);
|
return streamSendCheckRsp(pTq->pStreamMeta, &req, &rsp, &pMsg->info, taskId);
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t tqProcessStreamTaskCheckRsp(STQ* pTq, int64_t sversion, SRpcMsg* pMsg) {
|
int32_t tqProcessStreamTaskCheckRsp(STQ* pTq, SRpcMsg* pMsg) {
|
||||||
char* pReq = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead));
|
char* pReq = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead));
|
||||||
int32_t len = pMsg->contLen - sizeof(SMsgHead);
|
int32_t len = pMsg->contLen - sizeof(SMsgHead);
|
||||||
|
int32_t vgId = pTq->pStreamMeta->vgId;
|
||||||
|
|
||||||
int32_t code;
|
int32_t code;
|
||||||
SStreamTaskCheckRsp rsp;
|
SStreamTaskCheckRsp rsp;
|
||||||
|
@ -901,7 +902,9 @@ int32_t tqProcessStreamTaskCheckRsp(STQ* pTq, int64_t sversion, SRpcMsg* pMsg) {
|
||||||
tDecoderInit(&decoder, (uint8_t*)pReq, len);
|
tDecoderInit(&decoder, (uint8_t*)pReq, len);
|
||||||
code = tDecodeStreamTaskCheckRsp(&decoder, &rsp);
|
code = tDecodeStreamTaskCheckRsp(&decoder, &rsp);
|
||||||
if (code < 0) {
|
if (code < 0) {
|
||||||
|
terrno = TSDB_CODE_INVALID_MSG;
|
||||||
tDecoderClear(&decoder);
|
tDecoderClear(&decoder);
|
||||||
|
tqError("vgId:%d failed to parse check rsp msg, code:%s", vgId, tstrerror(terrno));
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1087,14 +1090,17 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// now we can stop the stream task execution
|
// now we can stop the stream task execution
|
||||||
streamTaskHalt(pStreamTask);
|
|
||||||
|
|
||||||
|
int64_t latestVer = 0;
|
||||||
|
taosThreadMutexLock(&pStreamTask->lock);
|
||||||
|
streamTaskHalt(pStreamTask);
|
||||||
tqDebug("s-task:%s level:%d sched-status:%d is halt by fill-history task:%s", pStreamTask->id.idStr,
|
tqDebug("s-task:%s level:%d sched-status:%d is halt by fill-history task:%s", pStreamTask->id.idStr,
|
||||||
pStreamTask->info.taskLevel, pStreamTask->status.schedStatus, id);
|
pStreamTask->info.taskLevel, pStreamTask->status.schedStatus, id);
|
||||||
|
latestVer = walReaderGetCurrentVer(pStreamTask->exec.pWalReader);
|
||||||
|
taosThreadMutexUnlock(&pStreamTask->lock);
|
||||||
|
|
||||||
// if it's an source task, extract the last version in wal.
|
// if it's an source task, extract the last version in wal.
|
||||||
pRange = &pTask->dataRange.range;
|
pRange = &pTask->dataRange.range;
|
||||||
int64_t latestVer = walReaderGetCurrentVer(pStreamTask->exec.pWalReader);
|
|
||||||
done = streamHistoryTaskSetVerRangeStep2(pTask, latestVer);
|
done = streamHistoryTaskSetVerRangeStep2(pTask, latestVer);
|
||||||
|
|
||||||
if (done) {
|
if (done) {
|
||||||
|
@ -1115,7 +1121,7 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) {
|
||||||
|
|
||||||
int64_t dstVer = pTask->dataRange.range.minVer - 1;
|
int64_t dstVer = pTask->dataRange.range.minVer - 1;
|
||||||
|
|
||||||
pTask->chkInfo.currentVer = dstVer;
|
pTask->chkInfo.nextProcessVer = dstVer;
|
||||||
walReaderSetSkipToVersion(pTask->exec.pWalReader, dstVer);
|
walReaderSetSkipToVersion(pTask->exec.pWalReader, dstVer);
|
||||||
tqDebug("s-task:%s wal reader start scan WAL verRange:%" PRId64 "-%" PRId64 ", set sched-status:%d", id, dstVer,
|
tqDebug("s-task:%s wal reader start scan WAL verRange:%" PRId64 "-%" PRId64 ", set sched-status:%d", id, dstVer,
|
||||||
pTask->dataRange.range.maxVer, TASK_SCHED_STATUS__INACTIVE);
|
pTask->dataRange.range.maxVer, TASK_SCHED_STATUS__INACTIVE);
|
||||||
|
@ -1123,7 +1129,7 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) {
|
||||||
atomic_store_8(&pTask->status.schedStatus, TASK_SCHED_STATUS__INACTIVE);
|
atomic_store_8(&pTask->status.schedStatus, TASK_SCHED_STATUS__INACTIVE);
|
||||||
|
|
||||||
// set the fill-history task to be normal
|
// set the fill-history task to be normal
|
||||||
if (pTask->info.fillHistory == 1) {
|
if (pTask->info.fillHistory == 1 && !streamTaskShouldStop(&pTask->status)) {
|
||||||
streamSetStatusNormal(pTask);
|
streamSetStatusNormal(pTask);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1148,7 +1154,7 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) {
|
||||||
tqDebug(
|
tqDebug(
|
||||||
"s-task:%s scan-history in stream time window completed, now start to handle data from WAL, start "
|
"s-task:%s scan-history in stream time window completed, now start to handle data from WAL, start "
|
||||||
"ver:%" PRId64 ", window:%" PRId64 " - %" PRId64,
|
"ver:%" PRId64 ", window:%" PRId64 " - %" PRId64,
|
||||||
id, pTask->chkInfo.currentVer, pWindow->skey, pWindow->ekey);
|
id, pTask->chkInfo.nextProcessVer, pWindow->skey, pWindow->ekey);
|
||||||
}
|
}
|
||||||
|
|
||||||
code = streamTaskScanHistoryDataComplete(pTask);
|
code = streamTaskScanHistoryDataComplete(pTask);
|
||||||
|
@ -1283,8 +1289,8 @@ int32_t tqProcessTaskRunReq(STQ* pTq, SRpcMsg* pMsg) {
|
||||||
// even in halt status, the data in inputQ must be processed
|
// even in halt status, the data in inputQ must be processed
|
||||||
int8_t st = pTask->status.taskStatus;
|
int8_t st = pTask->status.taskStatus;
|
||||||
if (st == TASK_STATUS__NORMAL || st == TASK_STATUS__SCAN_HISTORY || st == TASK_STATUS__CK) {
|
if (st == TASK_STATUS__NORMAL || st == TASK_STATUS__SCAN_HISTORY || st == TASK_STATUS__CK) {
|
||||||
tqDebug("vgId:%d s-task:%s start to process block from inputQ, last chk point:%" PRId64, vgId, pTask->id.idStr,
|
tqDebug("vgId:%d s-task:%s start to process block from inputQ, next checked ver:%" PRId64, vgId, pTask->id.idStr,
|
||||||
pTask->chkInfo.currentVer);
|
pTask->chkInfo.nextProcessVer);
|
||||||
streamProcessRunReq(pTask);
|
streamProcessRunReq(pTask);
|
||||||
} else {
|
} else {
|
||||||
atomic_store_8(&pTask->status.schedStatus, TASK_SCHED_STATUS__INACTIVE);
|
atomic_store_8(&pTask->status.schedStatus, TASK_SCHED_STATUS__INACTIVE);
|
||||||
|
@ -1423,10 +1429,10 @@ int32_t tqProcessTaskResumeImpl(STQ* pTq, SStreamTask* pTask, int64_t sversion,
|
||||||
walReaderSetSkipToVersion(pTask->exec.pWalReader, sversion);
|
walReaderSetSkipToVersion(pTask->exec.pWalReader, sversion);
|
||||||
tqDebug("vgId:%d s-task:%s resume to exec, prev paused version:%" PRId64 ", start from vnode ver:%" PRId64
|
tqDebug("vgId:%d s-task:%s resume to exec, prev paused version:%" PRId64 ", start from vnode ver:%" PRId64
|
||||||
", schedStatus:%d",
|
", schedStatus:%d",
|
||||||
vgId, pTask->id.idStr, pTask->chkInfo.currentVer, sversion, pTask->status.schedStatus);
|
vgId, pTask->id.idStr, pTask->chkInfo.nextProcessVer, sversion, pTask->status.schedStatus);
|
||||||
} else { // from the previous paused version and go on
|
} else { // from the previous paused version and go on
|
||||||
tqDebug("vgId:%d s-task:%s resume to exec, from paused ver:%" PRId64 ", vnode ver:%" PRId64 ", schedStatus:%d",
|
tqDebug("vgId:%d s-task:%s resume to exec, from paused ver:%" PRId64 ", vnode ver:%" PRId64 ", schedStatus:%d",
|
||||||
vgId, pTask->id.idStr, pTask->chkInfo.currentVer, sversion, pTask->status.schedStatus);
|
vgId, pTask->id.idStr, pTask->chkInfo.nextProcessVer, sversion, pTask->status.schedStatus);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (level == TASK_LEVEL__SOURCE && pTask->info.fillHistory &&
|
if (level == TASK_LEVEL__SOURCE && pTask->info.fillHistory &&
|
||||||
|
|
|
@ -312,14 +312,14 @@ static int buildHandle(STQ* pTq, STqHandle* handle){
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
} else if (handle->execHandle.subType == TOPIC_SUB_TYPE__DB) {
|
} else if (handle->execHandle.subType == TOPIC_SUB_TYPE__DB) {
|
||||||
handle->pWalReader = walOpenReader(pVnode->pWal, NULL);
|
handle->pWalReader = walOpenReader(pVnode->pWal, NULL, 0);
|
||||||
handle->execHandle.pTqReader = tqReaderOpen(pVnode);
|
handle->execHandle.pTqReader = tqReaderOpen(pVnode);
|
||||||
|
|
||||||
buildSnapContext(reader.vnode, reader.version, 0, handle->execHandle.subType, handle->fetchMeta,
|
buildSnapContext(reader.vnode, reader.version, 0, handle->execHandle.subType, handle->fetchMeta,
|
||||||
(SSnapContext**)(&reader.sContext));
|
(SSnapContext**)(&reader.sContext));
|
||||||
handle->execHandle.task = qCreateQueueExecTaskInfo(NULL, &reader, vgId, NULL, handle->consumerId);
|
handle->execHandle.task = qCreateQueueExecTaskInfo(NULL, &reader, vgId, NULL, handle->consumerId);
|
||||||
} else if (handle->execHandle.subType == TOPIC_SUB_TYPE__TABLE) {
|
} else if (handle->execHandle.subType == TOPIC_SUB_TYPE__TABLE) {
|
||||||
handle->pWalReader = walOpenReader(pVnode->pWal, NULL);
|
handle->pWalReader = walOpenReader(pVnode->pWal, NULL, 0);
|
||||||
|
|
||||||
if(handle->execHandle.execTb.qmsg != NULL && strcmp(handle->execHandle.execTb.qmsg, "") != 0) {
|
if(handle->execHandle.execTb.qmsg != NULL && strcmp(handle->execHandle.execTb.qmsg, "") != 0) {
|
||||||
if (nodesStringToNode(handle->execHandle.execTb.qmsg, &handle->execHandle.execTb.node) != 0) {
|
if (nodesStringToNode(handle->execHandle.execTb.qmsg, &handle->execHandle.execTb.node) != 0) {
|
||||||
|
|
|
@ -187,25 +187,26 @@ end:
|
||||||
int32_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, uint64_t reqId) {
|
int32_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, uint64_t reqId) {
|
||||||
int32_t code = -1;
|
int32_t code = -1;
|
||||||
int32_t vgId = TD_VID(pTq->pVnode);
|
int32_t vgId = TD_VID(pTq->pVnode);
|
||||||
|
int64_t id = pHandle->pWalReader->readerId;
|
||||||
|
|
||||||
int64_t offset = *fetchOffset;
|
int64_t offset = *fetchOffset;
|
||||||
int64_t lastVer = walGetLastVer(pHandle->pWalReader->pWal);
|
int64_t lastVer = walGetLastVer(pHandle->pWalReader->pWal);
|
||||||
int64_t committedVer = walGetCommittedVer(pHandle->pWalReader->pWal);
|
int64_t committedVer = walGetCommittedVer(pHandle->pWalReader->pWal);
|
||||||
int64_t appliedVer = walGetAppliedVer(pHandle->pWalReader->pWal);
|
int64_t appliedVer = walGetAppliedVer(pHandle->pWalReader->pWal);
|
||||||
|
|
||||||
wDebug("vgId:%d, wal start to fetch, index:%" PRId64 ", last index:%" PRId64 " commit index:%" PRId64 ", applied index:%" PRId64,
|
wDebug("vgId:%d, wal start to fetch, index:%" PRId64 ", last index:%" PRId64 " commit index:%" PRId64 ", applied index:%" PRId64", 0x%"PRIx64,
|
||||||
vgId, offset, lastVer, committedVer, appliedVer);
|
vgId, offset, lastVer, committedVer, appliedVer, id);
|
||||||
|
|
||||||
while (offset <= appliedVer) {
|
while (offset <= appliedVer) {
|
||||||
if (walFetchHead(pHandle->pWalReader, offset) < 0) {
|
if (walFetchHead(pHandle->pWalReader, offset) < 0) {
|
||||||
tqDebug("tmq poll: consumer:0x%" PRIx64 ", (epoch %d) vgId:%d offset %" PRId64
|
tqDebug("tmq poll: consumer:0x%" PRIx64 ", (epoch %d) vgId:%d offset %" PRId64
|
||||||
", no more log to return, reqId:0x%" PRIx64,
|
", no more log to return, reqId:0x%" PRIx64 " 0x%" PRIx64,
|
||||||
pHandle->consumerId, pHandle->epoch, vgId, offset, reqId);
|
pHandle->consumerId, pHandle->epoch, vgId, offset, reqId, id);
|
||||||
goto END;
|
goto END;
|
||||||
}
|
}
|
||||||
|
|
||||||
tqDebug("vgId:%d, consumer:0x%" PRIx64 " taosx get msg ver %" PRId64 ", type: %s, reqId:0x%" PRIx64, vgId,
|
tqDebug("vgId:%d, consumer:0x%" PRIx64 " taosx get msg ver %" PRId64 ", type: %s, reqId:0x%" PRIx64" 0x%"PRIx64, vgId,
|
||||||
pHandle->consumerId, offset, TMSG_INFO(pHandle->pWalReader->pHead->head.msgType), reqId);
|
pHandle->consumerId, offset, TMSG_INFO(pHandle->pWalReader->pHead->head.msgType), reqId, id);
|
||||||
|
|
||||||
if (pHandle->pWalReader->pHead->head.msgType == TDMT_VND_SUBMIT) {
|
if (pHandle->pWalReader->pHead->head.msgType == TDMT_VND_SUBMIT) {
|
||||||
code = walFetchBody(pHandle->pWalReader);
|
code = walFetchBody(pHandle->pWalReader);
|
||||||
|
@ -250,7 +251,7 @@ STqReader* tqReaderOpen(SVnode* pVnode) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
pReader->pWalReader = walOpenReader(pVnode->pWal, NULL);
|
pReader->pWalReader = walOpenReader(pVnode->pWal, NULL, 0);
|
||||||
if (pReader->pWalReader == NULL) {
|
if (pReader->pWalReader == NULL) {
|
||||||
taosMemoryFree(pReader);
|
taosMemoryFree(pReader);
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -491,11 +492,11 @@ bool tqNextBlockImpl(STqReader* pReader, const char* idstr) {
|
||||||
|
|
||||||
void* ret = taosHashGet(pReader->tbIdHash, &pSubmitTbData->uid, sizeof(int64_t));
|
void* ret = taosHashGet(pReader->tbIdHash, &pSubmitTbData->uid, sizeof(int64_t));
|
||||||
if (ret != NULL) {
|
if (ret != NULL) {
|
||||||
tqDebug("block found, ver:%" PRId64 ", uid:%" PRId64", %s", pReader->msg.ver, pSubmitTbData->uid, idstr);
|
tqDebug("block found, ver:%" PRId64 ", uid:%" PRId64 ", %s", pReader->msg.ver, pSubmitTbData->uid, idstr);
|
||||||
return true;
|
return true;
|
||||||
} else {
|
} else {
|
||||||
tqInfo("discard submit block, uid:%" PRId64 ", total queried tables:%d continue %s", pSubmitTbData->uid,
|
tqDebug("discard submit block, uid:%" PRId64 ", total queried tables:%d continue %s", pSubmitTbData->uid,
|
||||||
taosHashGetSize(pReader->tbIdHash), idstr);
|
taosHashGetSize(pReader->tbIdHash), idstr);
|
||||||
}
|
}
|
||||||
|
|
||||||
pReader->nextBlk++;
|
pReader->nextBlk++;
|
||||||
|
|
|
@ -24,10 +24,13 @@ typedef struct STableSinkInfo {
|
||||||
tstr name;
|
tstr name;
|
||||||
} STableSinkInfo;
|
} STableSinkInfo;
|
||||||
|
|
||||||
static int32_t doSinkResultBlock(SVnode* pVnode, int32_t blockIndex, char* stbFullName, int64_t suid,
|
static int32_t doBuildSubmitFromResBlock(SVnode* pVnode, int32_t blockIndex, char* stbFullName, int64_t suid,
|
||||||
SSDataBlock* pDataBlock, SStreamTask* pTask);
|
SSDataBlock* pDataBlock, SStreamTask* pTask, SSubmitTbData* pTableData);
|
||||||
static int32_t doSinkDeleteBlock(SVnode* pVnode, char* stbFullName, SSDataBlock* pDataBlock, SStreamTask* pTask,
|
static int32_t doBuildAndSendDeleteMsg(SVnode* pVnode, char* stbFullName, SSDataBlock* pDataBlock, SStreamTask* pTask,
|
||||||
int64_t suid);
|
int64_t suid);
|
||||||
|
static int32_t tqBuildSubmitReq(SSubmitReq2* pSubmitReq, int32_t vgId, void** pMsg, int32_t* msgLen);
|
||||||
|
static void fillBucket(STokenBucket* pBucket);
|
||||||
|
static bool hasAvailableToken(STokenBucket* pBucket);
|
||||||
|
|
||||||
int32_t tqBuildDeleteReq(const char* stbFullName, const SSDataBlock* pDataBlock, SBatchDeleteReq* deleteReq,
|
int32_t tqBuildDeleteReq(const char* stbFullName, const SSDataBlock* pDataBlock, SBatchDeleteReq* deleteReq,
|
||||||
const char* pIdStr) {
|
const char* pIdStr) {
|
||||||
|
@ -133,6 +136,148 @@ static int32_t tqPutReqToQueue(SVnode* pVnode, SVCreateTbBatchReq* pReqs) {
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int32_t doBuildAndSendCreateTableMsg(SVnode* pVnode, char* stbFullName, SSDataBlock* pDataBlock, SStreamTask* pTask,
|
||||||
|
int64_t suid) {
|
||||||
|
tqDebug("s-task:%s build create table msg", pTask->id.idStr);
|
||||||
|
|
||||||
|
STSchema* pTSchema = pTask->tbSink.pTSchema;
|
||||||
|
int32_t rows = pDataBlock->info.rows;
|
||||||
|
SArray* tagArray = NULL;
|
||||||
|
int32_t code = 0;
|
||||||
|
|
||||||
|
SVCreateTbBatchReq reqs = {0};
|
||||||
|
|
||||||
|
SArray* crTblArray = reqs.pArray = taosArrayInit(1, sizeof(SVCreateTbReq));
|
||||||
|
if (NULL == reqs.pArray) {
|
||||||
|
goto _end;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (int32_t rowId = 0; rowId < rows; rowId++) {
|
||||||
|
SVCreateTbReq* pCreateTbReq = &((SVCreateTbReq){0});
|
||||||
|
|
||||||
|
// set const
|
||||||
|
pCreateTbReq->flags = 0;
|
||||||
|
pCreateTbReq->type = TSDB_CHILD_TABLE;
|
||||||
|
pCreateTbReq->ctb.suid = suid;
|
||||||
|
|
||||||
|
// set super table name
|
||||||
|
SName name = {0};
|
||||||
|
tNameFromString(&name, stbFullName, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE);
|
||||||
|
pCreateTbReq->ctb.stbName = taosStrdup((char*)tNameGetTableName(&name)); // taosStrdup(stbFullName);
|
||||||
|
|
||||||
|
// set tag content
|
||||||
|
int32_t size = taosArrayGetSize(pDataBlock->pDataBlock);
|
||||||
|
if (size == 2) {
|
||||||
|
tagArray = taosArrayInit(1, sizeof(STagVal));
|
||||||
|
if (!tagArray) {
|
||||||
|
tdDestroySVCreateTbReq(pCreateTbReq);
|
||||||
|
goto _end;
|
||||||
|
}
|
||||||
|
|
||||||
|
STagVal tagVal = {
|
||||||
|
.cid = pTSchema->numOfCols + 1, .type = TSDB_DATA_TYPE_UBIGINT, .i64 = pDataBlock->info.id.groupId};
|
||||||
|
|
||||||
|
taosArrayPush(tagArray, &tagVal);
|
||||||
|
|
||||||
|
// set tag name
|
||||||
|
SArray* tagName = taosArrayInit(1, TSDB_COL_NAME_LEN);
|
||||||
|
char tagNameStr[TSDB_COL_NAME_LEN] = "group_id";
|
||||||
|
taosArrayPush(tagName, tagNameStr);
|
||||||
|
pCreateTbReq->ctb.tagName = tagName;
|
||||||
|
} else {
|
||||||
|
tagArray = taosArrayInit(size - 1, sizeof(STagVal));
|
||||||
|
if (!tagArray) {
|
||||||
|
tdDestroySVCreateTbReq(pCreateTbReq);
|
||||||
|
goto _end;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (int32_t tagId = UD_TAG_COLUMN_INDEX, step = 1; tagId < size; tagId++, step++) {
|
||||||
|
SColumnInfoData* pTagData = taosArrayGet(pDataBlock->pDataBlock, tagId);
|
||||||
|
|
||||||
|
STagVal tagVal = {.cid = pTSchema->numOfCols + step, .type = pTagData->info.type};
|
||||||
|
void* pData = colDataGetData(pTagData, rowId);
|
||||||
|
if (colDataIsNull_s(pTagData, rowId)) {
|
||||||
|
continue;
|
||||||
|
} else if (IS_VAR_DATA_TYPE(pTagData->info.type)) {
|
||||||
|
tagVal.nData = varDataLen(pData);
|
||||||
|
tagVal.pData = (uint8_t*)varDataVal(pData);
|
||||||
|
} else {
|
||||||
|
memcpy(&tagVal.i64, pData, pTagData->info.bytes);
|
||||||
|
}
|
||||||
|
taosArrayPush(tagArray, &tagVal);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pCreateTbReq->ctb.tagNum = TMAX(size - UD_TAG_COLUMN_INDEX, 1);
|
||||||
|
|
||||||
|
STag* pTag = NULL;
|
||||||
|
tTagNew(tagArray, 1, false, &pTag);
|
||||||
|
tagArray = taosArrayDestroy(tagArray);
|
||||||
|
if (pTag == NULL) {
|
||||||
|
tdDestroySVCreateTbReq(pCreateTbReq);
|
||||||
|
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
|
goto _end;
|
||||||
|
}
|
||||||
|
|
||||||
|
pCreateTbReq->ctb.pTag = (uint8_t*)pTag;
|
||||||
|
|
||||||
|
// set table name
|
||||||
|
if (!pDataBlock->info.parTbName[0]) {
|
||||||
|
SColumnInfoData* pGpIdColInfo = taosArrayGet(pDataBlock->pDataBlock, UD_GROUPID_COLUMN_INDEX);
|
||||||
|
|
||||||
|
void* pGpIdData = colDataGetData(pGpIdColInfo, rowId);
|
||||||
|
pCreateTbReq->name = buildCtbNameByGroupId(stbFullName, *(uint64_t*)pGpIdData);
|
||||||
|
} else {
|
||||||
|
pCreateTbReq->name = taosStrdup(pDataBlock->info.parTbName);
|
||||||
|
}
|
||||||
|
|
||||||
|
taosArrayPush(reqs.pArray, pCreateTbReq);
|
||||||
|
tqDebug("s-task:%s build create table:%s msg complete", pTask->id.idStr, pCreateTbReq->name);
|
||||||
|
}
|
||||||
|
|
||||||
|
reqs.nReqs = taosArrayGetSize(reqs.pArray);
|
||||||
|
code = tqPutReqToQueue(pVnode, &reqs);
|
||||||
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
tqError("s-task:%s failed to send create table msg", pTask->id.idStr);
|
||||||
|
}
|
||||||
|
|
||||||
|
_end:
|
||||||
|
taosArrayDestroy(tagArray);
|
||||||
|
taosArrayDestroyEx(crTblArray, (FDelete)tdDestroySVCreateTbReq);
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int32_t doBuildSubmitAndSendMsg(SVnode* pVnode, SStreamTask* pTask, int32_t numOfBlocks, SSubmitReq2* pReq) {
|
||||||
|
const char* id = pTask->id.idStr;
|
||||||
|
int32_t vgId = TD_VID(pVnode);
|
||||||
|
int32_t len = 0;
|
||||||
|
void* pBuf = NULL;
|
||||||
|
|
||||||
|
int32_t code = tqBuildSubmitReq(pReq, vgId, &pBuf, &len);
|
||||||
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
tqError("s-task:%s build submit msg failed, vgId:%d, code:%s", id, vgId, tstrerror(code));
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
|
||||||
|
SRpcMsg msg = {.msgType = TDMT_VND_SUBMIT, .pCont = pBuf, .contLen = len};
|
||||||
|
code = tmsgPutToQueue(&pVnode->msgCb, WRITE_QUEUE, &msg);
|
||||||
|
if (code == TSDB_CODE_SUCCESS) {
|
||||||
|
tqDebug("s-task:%s vgId:%d send submit %d blocks into dstTables completed", id, vgId, numOfBlocks);
|
||||||
|
} else {
|
||||||
|
tqError("s-task:%s failed to put into write-queue since %s", id, terrstr());
|
||||||
|
}
|
||||||
|
|
||||||
|
pTask->sinkRecorder.numOfSubmit += 1;
|
||||||
|
|
||||||
|
if ((pTask->sinkRecorder.numOfSubmit % 5000) == 0) {
|
||||||
|
SSinkTaskRecorder* pRec = &pTask->sinkRecorder;
|
||||||
|
double el = (taosGetTimestampMs() - pTask->tsInfo.sinkStart) / 1000.0;
|
||||||
|
tqInfo("s-task:%s vgId:%d write %" PRId64 " blocks (%" PRId64 " rows) in %" PRId64
|
||||||
|
" submit into dst table, duration:%.2f Sec.",
|
||||||
|
pTask->id.idStr, vgId, pRec->numOfBlocks, pRec->numOfRows, pRec->numOfSubmit, el);
|
||||||
|
}
|
||||||
|
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
void tqSinkToTablePipeline(SStreamTask* pTask, void* vnode, void* data) {
|
void tqSinkToTablePipeline(SStreamTask* pTask, void* vnode, void* data) {
|
||||||
const SArray* pBlocks = (const SArray*)data;
|
const SArray* pBlocks = (const SArray*)data;
|
||||||
|
@ -143,136 +288,147 @@ void tqSinkToTablePipeline(SStreamTask* pTask, void* vnode, void* data) {
|
||||||
int32_t vgId = TD_VID(pVnode);
|
int32_t vgId = TD_VID(pVnode);
|
||||||
int32_t numOfBlocks = taosArrayGetSize(pBlocks);
|
int32_t numOfBlocks = taosArrayGetSize(pBlocks);
|
||||||
int32_t code = TSDB_CODE_SUCCESS;
|
int32_t code = TSDB_CODE_SUCCESS;
|
||||||
|
const char* id = pTask->id.idStr;
|
||||||
|
|
||||||
tqDebug("vgId:%d, s-task:%s write %d stream resBlock(s) into table", vgId, pTask->id.idStr, numOfBlocks);
|
if (pTask->tsInfo.sinkStart == 0) {
|
||||||
|
pTask->tsInfo.sinkStart = taosGetTimestampMs();
|
||||||
|
}
|
||||||
|
|
||||||
SArray* tagArray = NULL;
|
bool isMixBlocks = true;
|
||||||
SArray* pVals = NULL;
|
for(int32_t i = 0; i < numOfBlocks; ++i) {
|
||||||
SArray* crTblArray = NULL;
|
SSDataBlock* p = taosArrayGet(pBlocks, i);
|
||||||
|
if (p->info.type == STREAM_DELETE_RESULT || p->info.type == STREAM_CREATE_CHILD_TABLE) {
|
||||||
for (int32_t i = 0; i < numOfBlocks; i++) {
|
isMixBlocks = true;
|
||||||
SSDataBlock* pDataBlock = taosArrayGet(pBlocks, i);
|
break;
|
||||||
int32_t rows = pDataBlock->info.rows;
|
|
||||||
|
|
||||||
if (pDataBlock->info.type == STREAM_DELETE_RESULT) {
|
|
||||||
code = doSinkDeleteBlock(pVnode, stbFullName, pDataBlock, pTask, suid);
|
|
||||||
} else if (pDataBlock->info.type == STREAM_CREATE_CHILD_TABLE) {
|
|
||||||
tqDebug("s-task:%s build create table msg", pTask->id.idStr);
|
|
||||||
|
|
||||||
SVCreateTbBatchReq reqs = {0};
|
|
||||||
crTblArray = reqs.pArray = taosArrayInit(1, sizeof(SVCreateTbReq));
|
|
||||||
if (NULL == reqs.pArray) {
|
|
||||||
goto _end;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (int32_t rowId = 0; rowId < rows; rowId++) {
|
|
||||||
SVCreateTbReq* pCreateTbReq = &((SVCreateTbReq){0});
|
|
||||||
|
|
||||||
// set const
|
|
||||||
pCreateTbReq->flags = 0;
|
|
||||||
pCreateTbReq->type = TSDB_CHILD_TABLE;
|
|
||||||
pCreateTbReq->ctb.suid = suid;
|
|
||||||
|
|
||||||
// set super table name
|
|
||||||
SName name = {0};
|
|
||||||
tNameFromString(&name, stbFullName, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE);
|
|
||||||
pCreateTbReq->ctb.stbName = taosStrdup((char*)tNameGetTableName(&name)); // taosStrdup(stbFullName);
|
|
||||||
|
|
||||||
// set tag content
|
|
||||||
int32_t size = taosArrayGetSize(pDataBlock->pDataBlock);
|
|
||||||
if (size == 2) {
|
|
||||||
tagArray = taosArrayInit(1, sizeof(STagVal));
|
|
||||||
|
|
||||||
if (!tagArray) {
|
|
||||||
tdDestroySVCreateTbReq(pCreateTbReq);
|
|
||||||
goto _end;
|
|
||||||
}
|
|
||||||
|
|
||||||
STagVal tagVal = {
|
|
||||||
.cid = pTSchema->numOfCols + 1, .type = TSDB_DATA_TYPE_UBIGINT, .i64 = pDataBlock->info.id.groupId};
|
|
||||||
|
|
||||||
taosArrayPush(tagArray, &tagVal);
|
|
||||||
|
|
||||||
// set tag name
|
|
||||||
SArray* tagName = taosArrayInit(1, TSDB_COL_NAME_LEN);
|
|
||||||
char tagNameStr[TSDB_COL_NAME_LEN] = "group_id";
|
|
||||||
taosArrayPush(tagName, tagNameStr);
|
|
||||||
pCreateTbReq->ctb.tagName = tagName;
|
|
||||||
} else {
|
|
||||||
tagArray = taosArrayInit(size - 1, sizeof(STagVal));
|
|
||||||
if (!tagArray) {
|
|
||||||
tdDestroySVCreateTbReq(pCreateTbReq);
|
|
||||||
goto _end;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (int32_t tagId = UD_TAG_COLUMN_INDEX, step = 1; tagId < size; tagId++, step++) {
|
|
||||||
SColumnInfoData* pTagData = taosArrayGet(pDataBlock->pDataBlock, tagId);
|
|
||||||
|
|
||||||
STagVal tagVal = {.cid = pTSchema->numOfCols + step, .type = pTagData->info.type};
|
|
||||||
void* pData = colDataGetData(pTagData, rowId);
|
|
||||||
if (colDataIsNull_s(pTagData, rowId)) {
|
|
||||||
continue;
|
|
||||||
} else if (IS_VAR_DATA_TYPE(pTagData->info.type)) {
|
|
||||||
tagVal.nData = varDataLen(pData);
|
|
||||||
tagVal.pData = (uint8_t*) varDataVal(pData);
|
|
||||||
} else {
|
|
||||||
memcpy(&tagVal.i64, pData, pTagData->info.bytes);
|
|
||||||
}
|
|
||||||
taosArrayPush(tagArray, &tagVal);
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
pCreateTbReq->ctb.tagNum = TMAX(size - UD_TAG_COLUMN_INDEX, 1);
|
|
||||||
|
|
||||||
STag* pTag = NULL;
|
|
||||||
tTagNew(tagArray, 1, false, &pTag);
|
|
||||||
tagArray = taosArrayDestroy(tagArray);
|
|
||||||
if (pTag == NULL) {
|
|
||||||
tdDestroySVCreateTbReq(pCreateTbReq);
|
|
||||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
|
||||||
goto _end;
|
|
||||||
}
|
|
||||||
|
|
||||||
pCreateTbReq->ctb.pTag = (uint8_t*)pTag;
|
|
||||||
|
|
||||||
// set table name
|
|
||||||
if (!pDataBlock->info.parTbName[0]) {
|
|
||||||
SColumnInfoData* pGpIdColInfo = taosArrayGet(pDataBlock->pDataBlock, UD_GROUPID_COLUMN_INDEX);
|
|
||||||
void* pGpIdData = colDataGetData(pGpIdColInfo, rowId);
|
|
||||||
pCreateTbReq->name = buildCtbNameByGroupId(stbFullName, *(uint64_t*)pGpIdData);
|
|
||||||
} else {
|
|
||||||
pCreateTbReq->name = taosStrdup(pDataBlock->info.parTbName);
|
|
||||||
}
|
|
||||||
|
|
||||||
taosArrayPush(reqs.pArray, pCreateTbReq);
|
|
||||||
tqDebug("s-task:%s build create table:%s msg complete", pTask->id.idStr, pCreateTbReq->name);
|
|
||||||
}
|
|
||||||
|
|
||||||
reqs.nReqs = taosArrayGetSize(reqs.pArray);
|
|
||||||
if (tqPutReqToQueue(pVnode, &reqs) != TSDB_CODE_SUCCESS) {
|
|
||||||
goto _end;
|
|
||||||
}
|
|
||||||
|
|
||||||
tagArray = taosArrayDestroy(tagArray);
|
|
||||||
taosArrayDestroyEx(crTblArray, (FDelete)tdDestroySVCreateTbReq);
|
|
||||||
crTblArray = NULL;
|
|
||||||
} else if (pDataBlock->info.type == STREAM_CHECKPOINT) {
|
|
||||||
continue;
|
|
||||||
} else {
|
|
||||||
code = doSinkResultBlock(pVnode, i, stbFullName, suid, pDataBlock, pTask);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
tqDebug("vgId:%d, s-task:%s write results completed", vgId, pTask->id.idStr);
|
if (isMixBlocks) {
|
||||||
|
tqDebug("vgId:%d, s-task:%s write %d stream resBlock(s) into table, has delete block, submit one-by-one", vgId, id,
|
||||||
|
numOfBlocks);
|
||||||
|
|
||||||
|
for(int32_t i = 0; i < numOfBlocks; ++i) {
|
||||||
|
SSDataBlock* pDataBlock = taosArrayGet(pBlocks, i);
|
||||||
|
if (pDataBlock->info.type == STREAM_DELETE_RESULT) {
|
||||||
|
code = doBuildAndSendDeleteMsg(pVnode, stbFullName, pDataBlock, pTask, suid);
|
||||||
|
} else if (pDataBlock->info.type == STREAM_CREATE_CHILD_TABLE) {
|
||||||
|
code = doBuildAndSendCreateTableMsg(pVnode, stbFullName, pDataBlock, pTask, suid);
|
||||||
|
} else if (pDataBlock->info.type == STREAM_CHECKPOINT) {
|
||||||
|
continue;
|
||||||
|
} else {
|
||||||
|
pTask->sinkRecorder.numOfBlocks += 1;
|
||||||
|
|
||||||
|
SSubmitReq2 submitReq = {.aSubmitTbData = taosArrayInit(1, sizeof(SSubmitTbData))};
|
||||||
|
if (submitReq.aSubmitTbData == NULL) {
|
||||||
|
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
|
tqError("s-task:%s vgId:%d failed to prepare submit msg in sink task, code:%s", id, vgId, tstrerror(code));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
SSubmitTbData tbData = {.suid = suid, .uid = 0, .sver = pTSchema->version};
|
||||||
|
code = doBuildSubmitFromResBlock(pVnode, i, stbFullName, suid, pDataBlock, pTask, &tbData);
|
||||||
|
taosArrayPush(submitReq.aSubmitTbData, &tbData);
|
||||||
|
|
||||||
|
code = doBuildSubmitAndSendMsg(pVnode, pTask, 1, &submitReq);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
tqDebug("vgId:%d, s-task:%s write %d stream resBlock(s) into table, merge submit msg", vgId, id, numOfBlocks);
|
||||||
|
SHashObj* pTableIndexMap = taosHashInit(numOfBlocks, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
|
||||||
|
|
||||||
|
SSubmitReq2 submitReq = {.aSubmitTbData = taosArrayInit(1, sizeof(SSubmitTbData))};
|
||||||
|
if (submitReq.aSubmitTbData == NULL) {
|
||||||
|
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
|
tqError("s-task:%s vgId:%d failed to prepare submit msg in sink task, code:%s", id, vgId, tstrerror(code));
|
||||||
|
taosHashCleanup(pTableIndexMap);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool hasSubmit = false;
|
||||||
|
|
||||||
|
for (int32_t i = 0; i < numOfBlocks; i++) {
|
||||||
|
SSDataBlock* pDataBlock = taosArrayGet(pBlocks, i);
|
||||||
|
if (pDataBlock->info.type == STREAM_DELETE_RESULT) {
|
||||||
|
code = doBuildAndSendDeleteMsg(pVnode, stbFullName, pDataBlock, pTask, suid);
|
||||||
|
} else if (pDataBlock->info.type == STREAM_CREATE_CHILD_TABLE) {
|
||||||
|
code = doBuildAndSendCreateTableMsg(pVnode, stbFullName, pDataBlock, pTask, suid);
|
||||||
|
} else if (pDataBlock->info.type == STREAM_CHECKPOINT) {
|
||||||
|
continue;
|
||||||
|
} else {
|
||||||
|
hasSubmit = true;
|
||||||
|
pTask->sinkRecorder.numOfBlocks += 1;
|
||||||
|
|
||||||
|
SSubmitTbData tbData = {.suid = suid, .uid = 0, .sver = pTSchema->version};
|
||||||
|
code = doBuildSubmitFromResBlock(pVnode, i, stbFullName, suid, pDataBlock, pTask, &tbData);
|
||||||
|
|
||||||
|
int32_t* index = taosHashGet(pTableIndexMap, &tbData.uid, sizeof(tbData.uid));
|
||||||
|
if (index == NULL) { // no data yet, append it
|
||||||
|
taosArrayPush(submitReq.aSubmitTbData, &tbData);
|
||||||
|
|
||||||
|
int32_t size = (int32_t)taosArrayGetSize(submitReq.aSubmitTbData) - 1;
|
||||||
|
taosHashPut(pTableIndexMap, &tbData.uid, sizeof(tbData.uid), &size, sizeof(size));
|
||||||
|
} else {
|
||||||
|
SSubmitTbData* pExisted = taosArrayGet(submitReq.aSubmitTbData, *index);
|
||||||
|
// merge the new submit table block with the existed blocks
|
||||||
|
// if ts in the new data block overlap with existed one, replace it
|
||||||
|
int32_t oldLen = taosArrayGetSize(pExisted->aRowP);
|
||||||
|
int32_t newLen = taosArrayGetSize(tbData.aRowP);
|
||||||
|
|
||||||
|
int32_t j = 0, k = 0;
|
||||||
|
SArray* pFinal = taosArrayInit(oldLen + newLen, POINTER_BYTES);
|
||||||
|
while (j < newLen && k < oldLen) {
|
||||||
|
SRow* pNewRow = taosArrayGetP(tbData.aRowP, j);
|
||||||
|
SRow* pOldRow = taosArrayGetP(pExisted->aRowP, k);
|
||||||
|
if (pNewRow->ts <= pOldRow->ts) {
|
||||||
|
taosArrayPush(pFinal, &pNewRow);
|
||||||
|
if (pNewRow->ts < pOldRow->ts) {
|
||||||
|
j += 1;
|
||||||
|
} else {
|
||||||
|
j += 1;
|
||||||
|
k += 1;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
taosArrayPush(pFinal, &pOldRow);
|
||||||
|
k += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
while (j < newLen) {
|
||||||
|
SRow* pRow = taosArrayGetP(tbData.aRowP, j++);
|
||||||
|
taosArrayPush(pFinal, &pRow);
|
||||||
|
}
|
||||||
|
|
||||||
|
while (k < oldLen) {
|
||||||
|
SRow* pRow = taosArrayGetP(pExisted->aRowP, k++);
|
||||||
|
taosArrayPush(pFinal, &pRow);
|
||||||
|
}
|
||||||
|
|
||||||
|
taosArrayDestroy(tbData.aRowP);
|
||||||
|
taosArrayDestroy(pExisted->aRowP);
|
||||||
|
pExisted->aRowP = pFinal;
|
||||||
|
|
||||||
|
tqDebug("s-task:%s rows merged, final rows:%d, uid:%" PRId64 ", existed auto-create table:%d, new-block:%d", id,
|
||||||
|
(int32_t)taosArrayGetSize(pFinal), pExisted->uid, (pExisted->pCreateTbReq != NULL), (tbData.pCreateTbReq != NULL));
|
||||||
|
}
|
||||||
|
|
||||||
|
pTask->sinkRecorder.numOfRows += pDataBlock->info.rows;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
taosHashCleanup(pTableIndexMap);
|
||||||
|
|
||||||
|
if (hasSubmit) {
|
||||||
|
doBuildSubmitAndSendMsg(pVnode, pTask, numOfBlocks, &submitReq);
|
||||||
|
} else {
|
||||||
|
tDestroySubmitReq(&submitReq, TSDB_MSG_FLG_ENCODE);
|
||||||
|
tqDebug("vgId:%d, s-task:%s write results completed", vgId, id);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
_end:
|
|
||||||
taosArrayDestroy(tagArray);
|
|
||||||
taosArrayDestroy(pVals);
|
|
||||||
taosArrayDestroyEx(crTblArray, (FDelete)tdDestroySVCreateTbReq);
|
|
||||||
// TODO: change
|
// TODO: change
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t doSinkDeleteBlock(SVnode* pVnode, char* stbFullName, SSDataBlock* pDataBlock, SStreamTask* pTask,
|
int32_t doBuildAndSendDeleteMsg(SVnode* pVnode, char* stbFullName, SSDataBlock* pDataBlock, SStreamTask* pTask,
|
||||||
int64_t suid) {
|
int64_t suid) {
|
||||||
SBatchDeleteReq deleteReq = {.suid = suid, .deleteReqs = taosArrayInit(0, sizeof(SSingleDeleteReq))};
|
SBatchDeleteReq deleteReq = {.suid = suid, .deleteReqs = taosArrayInit(0, sizeof(SSingleDeleteReq))};
|
||||||
|
|
||||||
|
@ -390,7 +546,6 @@ static int32_t doPutIntoCache(SSHashObj* pSinkTableMap, STableSinkInfo* pTableSi
|
||||||
int32_t code = tqPutTableInfo(pSinkTableMap, groupId, pTableSinkInfo);
|
int32_t code = tqPutTableInfo(pSinkTableMap, groupId, pTableSinkInfo);
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
taosMemoryFreeClear(pTableSinkInfo);
|
taosMemoryFreeClear(pTableSinkInfo);
|
||||||
tqError("s-task:%s failed to put tableSinkInfo in to cache, code:%s", id, tstrerror(code));
|
|
||||||
} else {
|
} else {
|
||||||
tqDebug("s-task:%s new dst table:%s(uid:%" PRIu64 ") added into cache, total:%d", id, pTableSinkInfo->name.data,
|
tqDebug("s-task:%s new dst table:%s(uid:%" PRIu64 ") added into cache, total:%d", id, pTableSinkInfo->name.data,
|
||||||
pTableSinkInfo->uid, tSimpleHashGetSize(pSinkTableMap));
|
pTableSinkInfo->uid, tSimpleHashGetSize(pSinkTableMap));
|
||||||
|
@ -399,26 +554,75 @@ static int32_t doPutIntoCache(SSHashObj* pSinkTableMap, STableSinkInfo* pTableSi
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t doSinkResultBlock(SVnode* pVnode, int32_t blockIndex, char* stbFullName, int64_t suid, SSDataBlock* pDataBlock,
|
int32_t tqBuildSubmitReq(SSubmitReq2* pSubmitReq, int32_t vgId, void** pMsg, int32_t* msgLen) {
|
||||||
SStreamTask* pTask) {
|
int32_t code = 0;
|
||||||
|
void* pBuf = NULL;
|
||||||
|
*msgLen = 0;
|
||||||
|
|
||||||
|
// encode
|
||||||
|
int32_t len = 0;
|
||||||
|
tEncodeSize(tEncodeSubmitReq, pSubmitReq, len, code);
|
||||||
|
|
||||||
|
SEncoder encoder;
|
||||||
|
len += sizeof(SSubmitReq2Msg);
|
||||||
|
|
||||||
|
pBuf = rpcMallocCont(len);
|
||||||
|
if (NULL == pBuf) {
|
||||||
|
tDestroySubmitReq(pSubmitReq, TSDB_MSG_FLG_ENCODE);
|
||||||
|
return TSDB_CODE_OUT_OF_MEMORY;
|
||||||
|
}
|
||||||
|
|
||||||
|
((SSubmitReq2Msg*)pBuf)->header.vgId = vgId;
|
||||||
|
((SSubmitReq2Msg*)pBuf)->header.contLen = htonl(len);
|
||||||
|
((SSubmitReq2Msg*)pBuf)->version = htobe64(1);
|
||||||
|
|
||||||
|
tEncoderInit(&encoder, POINTER_SHIFT(pBuf, sizeof(SSubmitReq2Msg)), len - sizeof(SSubmitReq2Msg));
|
||||||
|
if (tEncodeSubmitReq(&encoder, pSubmitReq) < 0) {
|
||||||
|
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
|
tqError("failed to encode submit req, code:%s, ignore and continue", terrstr());
|
||||||
|
tEncoderClear(&encoder);
|
||||||
|
rpcFreeCont(pBuf);
|
||||||
|
tDestroySubmitReq(pSubmitReq, TSDB_MSG_FLG_ENCODE);
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
|
||||||
|
tEncoderClear(&encoder);
|
||||||
|
tDestroySubmitReq(pSubmitReq, TSDB_MSG_FLG_ENCODE);
|
||||||
|
|
||||||
|
*msgLen = len;
|
||||||
|
*pMsg = pBuf;
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int32_t tsAscendingSortFn(const void* p1, const void* p2) {
|
||||||
|
SRow* pRow1 = *(SRow**) p1;
|
||||||
|
SRow* pRow2 = *(SRow**) p2;
|
||||||
|
|
||||||
|
if (pRow1->ts == pRow2->ts) {
|
||||||
|
return 0;
|
||||||
|
} else {
|
||||||
|
return pRow1->ts > pRow2->ts? 1:-1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t doBuildSubmitFromResBlock(SVnode* pVnode, int32_t blockIndex, char* stbFullName, int64_t suid,
|
||||||
|
SSDataBlock* pDataBlock, SStreamTask* pTask, SSubmitTbData* pTableData) {
|
||||||
int32_t numOfRows = pDataBlock->info.rows;
|
int32_t numOfRows = pDataBlock->info.rows;
|
||||||
int32_t vgId = TD_VID(pVnode);
|
int32_t vgId = TD_VID(pVnode);
|
||||||
uint64_t groupId = pDataBlock->info.id.groupId;
|
uint64_t groupId = pDataBlock->info.id.groupId;
|
||||||
STSchema* pTSchema = pTask->tbSink.pTSchema;
|
STSchema* pTSchema = pTask->tbSink.pTSchema;
|
||||||
int32_t code = TSDB_CODE_SUCCESS;
|
int32_t code = TSDB_CODE_SUCCESS;
|
||||||
void* pBuf = NULL;
|
|
||||||
SArray* pVals = NULL;
|
SArray* pVals = NULL;
|
||||||
const char* id = pTask->id.idStr;
|
const char* id = pTask->id.idStr;
|
||||||
|
|
||||||
SSubmitTbData tbData = {.suid = suid, .uid = 0, .sver = pTSchema->version};
|
tqDebug("s-task:%s sink data pipeline, build submit msg from %dth resBlock, including %d rows, dst suid:%" PRId64,
|
||||||
tqDebug("s-task:%s sink data pipeline, build submit msg from %d-th resBlock, including %d rows, dst suid:%" PRId64,
|
|
||||||
id, blockIndex + 1, numOfRows, suid);
|
id, blockIndex + 1, numOfRows, suid);
|
||||||
|
|
||||||
tbData.aRowP = taosArrayInit(numOfRows, sizeof(SRow*));
|
pTableData->aRowP = taosArrayInit(numOfRows, sizeof(SRow*));
|
||||||
pVals = taosArrayInit(pTSchema->numOfCols, sizeof(SColVal));
|
pVals = taosArrayInit(pTSchema->numOfCols, sizeof(SColVal));
|
||||||
|
|
||||||
if (tbData.aRowP == NULL || pVals == NULL) {
|
if (pTableData->aRowP == NULL || pVals == NULL) {
|
||||||
taosArrayDestroy(tbData.aRowP);
|
pTableData->aRowP = taosArrayDestroy(pTableData->aRowP);
|
||||||
taosArrayDestroy(pVals);
|
taosArrayDestroy(pVals);
|
||||||
|
|
||||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
|
@ -459,13 +663,21 @@ int32_t doSinkResultBlock(SVnode* pVnode, int32_t blockIndex, char* stbFullName,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (exist) {
|
if (exist) {
|
||||||
tbData.uid = pTableSinkInfo->uid;
|
pTableData->uid = pTableSinkInfo->uid;
|
||||||
|
|
||||||
if (tbData.uid == 0) {
|
if (pTableData->uid == 0) {
|
||||||
tqDebug("s-task:%s cached tableInfo uid is invalid, acquire it from meta", id);
|
tqDebug("s-task:%s cached tableInfo uid is invalid, acquire it from meta", id);
|
||||||
}
|
}
|
||||||
|
|
||||||
while (pTableSinkInfo->uid == 0) {
|
while (pTableSinkInfo->uid == 0) {
|
||||||
|
if (streamTaskShouldStop(&pTask->status)) {
|
||||||
|
tqDebug("s-task:%s task will stop, quit from waiting for table:%s create", id, dstTableName);
|
||||||
|
pTableData->aRowP = taosArrayDestroy(pTableData->aRowP);
|
||||||
|
taosArrayDestroy(pVals);
|
||||||
|
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
// wait for the table to be created
|
// wait for the table to be created
|
||||||
SMetaReader mr = {0};
|
SMetaReader mr = {0};
|
||||||
metaReaderDoInit(&mr, pVnode->pMeta, 0);
|
metaReaderDoInit(&mr, pVnode->pMeta, 0);
|
||||||
|
@ -476,21 +688,21 @@ int32_t doSinkResultBlock(SVnode* pVnode, int32_t blockIndex, char* stbFullName,
|
||||||
if (!isValid) { // not valid table, ignore it
|
if (!isValid) { // not valid table, ignore it
|
||||||
metaReaderClear(&mr);
|
metaReaderClear(&mr);
|
||||||
|
|
||||||
taosArrayDestroy(tbData.aRowP);
|
pTableData->aRowP = taosArrayDestroy(pTableData->aRowP);
|
||||||
taosArrayDestroy(pVals);
|
taosArrayDestroy(pVals);
|
||||||
|
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
} else {
|
} else {
|
||||||
tqDebug("s-task:%s set uid:%"PRIu64" for dstTable:%s from meta", id, mr.me.uid, pTableSinkInfo->name.data);
|
tqDebug("s-task:%s set uid:%"PRIu64" for dstTable:%s from meta", id, mr.me.uid, pTableSinkInfo->name.data);
|
||||||
|
|
||||||
tbData.uid = mr.me.uid;
|
pTableData->uid = mr.me.uid;
|
||||||
pTableSinkInfo->uid = mr.me.uid;
|
pTableSinkInfo->uid = mr.me.uid;
|
||||||
metaReaderClear(&mr);
|
metaReaderClear(&mr);
|
||||||
}
|
}
|
||||||
} else { // not exist, wait and retry
|
} else { // not exist, wait and retry
|
||||||
metaReaderClear(&mr);
|
metaReaderClear(&mr);
|
||||||
taosMsleep(100);
|
taosMsleep(100);
|
||||||
tqDebug("s-task:%s wait for the table:%s ready before insert data", id, dstTableName);
|
tqDebug("s-task:%s wait 100ms for the table:%s ready before insert data", id, dstTableName);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -510,12 +722,12 @@ int32_t doSinkResultBlock(SVnode* pVnode, int32_t blockIndex, char* stbFullName,
|
||||||
|
|
||||||
tqDebug("s-task:%s stream write into table:%s, table auto created", id, dstTableName);
|
tqDebug("s-task:%s stream write into table:%s, table auto created", id, dstTableName);
|
||||||
|
|
||||||
tbData.flags = SUBMIT_REQ_AUTO_CREATE_TABLE;
|
pTableData->flags = SUBMIT_REQ_AUTO_CREATE_TABLE;
|
||||||
tbData.pCreateTbReq = buildAutoCreateTableReq(stbFullName, suid, pTSchema->numOfCols + 1, pDataBlock);
|
pTableData->pCreateTbReq = buildAutoCreateTableReq(stbFullName, suid, pTSchema->numOfCols + 1, pDataBlock);
|
||||||
if (tbData.pCreateTbReq == NULL) {
|
if (pTableData->pCreateTbReq == NULL) {
|
||||||
tqError("s-task:%s failed to build auto create table req, code:%s", id, tstrerror(terrno));
|
tqError("s-task:%s failed to build auto create table req, code:%s", id, tstrerror(terrno));
|
||||||
|
|
||||||
taosArrayDestroy(tbData.aRowP);
|
pTableData->aRowP = taosArrayDestroy(pTableData->aRowP);
|
||||||
taosArrayDestroy(pVals);
|
taosArrayDestroy(pVals);
|
||||||
|
|
||||||
return terrno;
|
return terrno;
|
||||||
|
@ -527,14 +739,14 @@ int32_t doSinkResultBlock(SVnode* pVnode, int32_t blockIndex, char* stbFullName,
|
||||||
if (!isValid) {
|
if (!isValid) {
|
||||||
metaReaderClear(&mr);
|
metaReaderClear(&mr);
|
||||||
taosMemoryFree(pTableSinkInfo);
|
taosMemoryFree(pTableSinkInfo);
|
||||||
taosArrayDestroy(tbData.aRowP);
|
pTableData->aRowP = taosArrayDestroy(pTableData->aRowP);
|
||||||
taosArrayDestroy(pVals);
|
taosArrayDestroy(pVals);
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
} else {
|
} else {
|
||||||
tbData.uid = mr.me.uid;
|
pTableData->uid = mr.me.uid;
|
||||||
metaReaderClear(&mr);
|
metaReaderClear(&mr);
|
||||||
|
|
||||||
doPutIntoCache(pTask->tbSink.pTblInfo, pTableSinkInfo, groupId, tbData.uid, id);
|
doPutIntoCache(pTask->tbSink.pTblInfo, pTableSinkInfo, groupId, pTableData->uid, id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -565,7 +777,7 @@ int32_t doSinkResultBlock(SVnode* pVnode, int32_t blockIndex, char* stbFullName,
|
||||||
void* colData = colDataGetData(pColData, j);
|
void* colData = colDataGetData(pColData, j);
|
||||||
if (IS_STR_DATA_TYPE(pCol->type)) {
|
if (IS_STR_DATA_TYPE(pCol->type)) {
|
||||||
// address copy, no value
|
// address copy, no value
|
||||||
SValue sv = (SValue){.nData = varDataLen(colData), .pData = varDataVal(colData)};
|
SValue sv = (SValue){.nData = varDataLen(colData), .pData = (uint8_t*) varDataVal(colData)};
|
||||||
SColVal cv = COL_VAL_VALUE(pCol->colId, pCol->type, sv);
|
SColVal cv = COL_VAL_VALUE(pCol->colId, pCol->type, sv);
|
||||||
taosArrayPush(pVals, &cv);
|
taosArrayPush(pVals, &cv);
|
||||||
} else {
|
} else {
|
||||||
|
@ -582,69 +794,20 @@ int32_t doSinkResultBlock(SVnode* pVnode, int32_t blockIndex, char* stbFullName,
|
||||||
SRow* pRow = NULL;
|
SRow* pRow = NULL;
|
||||||
code = tRowBuild(pVals, (STSchema*)pTSchema, &pRow);
|
code = tRowBuild(pVals, (STSchema*)pTSchema, &pRow);
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
tDestroySubmitTbData(&tbData, TSDB_MSG_FLG_ENCODE);
|
tDestroySubmitTbData(pTableData, TSDB_MSG_FLG_ENCODE);
|
||||||
|
|
||||||
taosArrayDestroy(tbData.aRowP);
|
pTableData->aRowP = taosArrayDestroy(pTableData->aRowP);
|
||||||
taosArrayDestroy(pVals);
|
taosArrayDestroy(pVals);
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
ASSERT(pRow);
|
ASSERT(pRow);
|
||||||
taosArrayPush(tbData.aRowP, &pRow);
|
taosArrayPush(pTableData->aRowP, &pRow);
|
||||||
}
|
}
|
||||||
|
|
||||||
SSubmitReq2 submitReq = {0};
|
taosArraySort(pTableData->aRowP, tsAscendingSortFn);
|
||||||
if (!(submitReq.aSubmitTbData = taosArrayInit(1, sizeof(SSubmitTbData)))) {
|
tqDebug("s-task:%s build submit msg for dstTable:%s, numOfRows:%d", id, dstTableName, numOfRows);
|
||||||
tDestroySubmitTbData(&tbData, TSDB_MSG_FLG_ENCODE);
|
|
||||||
|
|
||||||
taosArrayDestroy(tbData.aRowP);
|
|
||||||
taosArrayDestroy(pVals);
|
|
||||||
return TSDB_CODE_OUT_OF_MEMORY;
|
|
||||||
}
|
|
||||||
|
|
||||||
taosArrayPush(submitReq.aSubmitTbData, &tbData);
|
|
||||||
|
|
||||||
// encode
|
|
||||||
int32_t len = 0;
|
|
||||||
tEncodeSize(tEncodeSubmitReq, &submitReq, len, code);
|
|
||||||
|
|
||||||
SEncoder encoder;
|
|
||||||
len += sizeof(SSubmitReq2Msg);
|
|
||||||
|
|
||||||
pBuf = rpcMallocCont(len);
|
|
||||||
if (NULL == pBuf) {
|
|
||||||
tDestroySubmitReq(&submitReq, TSDB_MSG_FLG_ENCODE);
|
|
||||||
taosArrayDestroy(tbData.aRowP);
|
|
||||||
taosArrayDestroy(pVals);
|
|
||||||
}
|
|
||||||
|
|
||||||
((SSubmitReq2Msg*)pBuf)->header.vgId = vgId;
|
|
||||||
((SSubmitReq2Msg*)pBuf)->header.contLen = htonl(len);
|
|
||||||
((SSubmitReq2Msg*)pBuf)->version = htobe64(1);
|
|
||||||
|
|
||||||
tEncoderInit(&encoder, POINTER_SHIFT(pBuf, sizeof(SSubmitReq2Msg)), len - sizeof(SSubmitReq2Msg));
|
|
||||||
if (tEncodeSubmitReq(&encoder, &submitReq) < 0) {
|
|
||||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
|
||||||
tqError("failed to encode submit req, code:%s, ignore and continue", terrstr());
|
|
||||||
tEncoderClear(&encoder);
|
|
||||||
rpcFreeCont(pBuf);
|
|
||||||
tDestroySubmitReq(&submitReq, TSDB_MSG_FLG_ENCODE);
|
|
||||||
|
|
||||||
return code;
|
|
||||||
}
|
|
||||||
|
|
||||||
tEncoderClear(&encoder);
|
|
||||||
tDestroySubmitReq(&submitReq, TSDB_MSG_FLG_ENCODE);
|
|
||||||
|
|
||||||
SRpcMsg msg = { .msgType = TDMT_VND_SUBMIT, .pCont = pBuf, .contLen = len };
|
|
||||||
code = tmsgPutToQueue(&pVnode->msgCb, WRITE_QUEUE, &msg);
|
|
||||||
|
|
||||||
if(code == TSDB_CODE_SUCCESS) {
|
|
||||||
tqDebug("s-task:%s send submit msg to dstTable:%s, numOfRows:%d", id, pTableSinkInfo->name.data, numOfRows);
|
|
||||||
} else {
|
|
||||||
tqError("s-task:%s failed to put into write-queue since %s", id, terrstr());
|
|
||||||
}
|
|
||||||
|
|
||||||
taosArrayDestroy(pVals);
|
taosArrayDestroy(pVals);
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
|
@ -112,7 +112,7 @@ int32_t tqCheckAndRunStreamTaskAsync(STQ* pTq) {
|
||||||
|
|
||||||
int32_t numOfTasks = taosArrayGetSize(pMeta->pTaskList);
|
int32_t numOfTasks = taosArrayGetSize(pMeta->pTaskList);
|
||||||
if (numOfTasks == 0) {
|
if (numOfTasks == 0) {
|
||||||
tqInfo("vgId:%d no stream tasks exist", vgId);
|
tqDebug("vgId:%d no stream tasks existed to run", vgId);
|
||||||
taosWUnLockLatch(&pMeta->lock);
|
taosWUnLockLatch(&pMeta->lock);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -150,7 +150,7 @@ int32_t tqScanWalAsync(STQ* pTq, bool ckPause) {
|
||||||
|
|
||||||
int32_t numOfTasks = taosArrayGetSize(pMeta->pTaskList);
|
int32_t numOfTasks = taosArrayGetSize(pMeta->pTaskList);
|
||||||
if (numOfTasks == 0) {
|
if (numOfTasks == 0) {
|
||||||
tqInfo("vgId:%d no stream tasks exist", vgId);
|
tqDebug("vgId:%d no stream tasks existed to run", vgId);
|
||||||
taosWUnLockLatch(&pMeta->lock);
|
taosWUnLockLatch(&pMeta->lock);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -256,36 +256,36 @@ int32_t tqStartStreamTasks(STQ* pTq) {
|
||||||
int32_t setWalReaderStartOffset(SStreamTask* pTask, int32_t vgId) {
|
int32_t setWalReaderStartOffset(SStreamTask* pTask, int32_t vgId) {
|
||||||
// seek the stored version and extract data from WAL
|
// seek the stored version and extract data from WAL
|
||||||
int64_t firstVer = walReaderGetValidFirstVer(pTask->exec.pWalReader);
|
int64_t firstVer = walReaderGetValidFirstVer(pTask->exec.pWalReader);
|
||||||
if (pTask->chkInfo.currentVer < firstVer) {
|
if (pTask->chkInfo.nextProcessVer < firstVer) {
|
||||||
tqWarn("vgId:%d s-task:%s ver:%" PRId64 " earlier than the first ver of wal range %" PRId64 ", forward to %" PRId64,
|
tqWarn("vgId:%d s-task:%s ver:%" PRId64 " earlier than the first ver of wal range %" PRId64 ", forward to %" PRId64,
|
||||||
vgId, pTask->id.idStr, pTask->chkInfo.currentVer, firstVer, firstVer);
|
vgId, pTask->id.idStr, pTask->chkInfo.nextProcessVer, firstVer, firstVer);
|
||||||
|
|
||||||
pTask->chkInfo.currentVer = firstVer;
|
pTask->chkInfo.nextProcessVer = firstVer;
|
||||||
|
|
||||||
// todo need retry if failed
|
// todo need retry if failed
|
||||||
int32_t code = walReaderSeekVer(pTask->exec.pWalReader, pTask->chkInfo.currentVer);
|
int32_t code = walReaderSeekVer(pTask->exec.pWalReader, pTask->chkInfo.nextProcessVer);
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
// append the data for the stream
|
// append the data for the stream
|
||||||
tqDebug("vgId:%d s-task:%s wal reader seek to ver:%" PRId64, vgId, pTask->id.idStr, pTask->chkInfo.currentVer);
|
tqDebug("vgId:%d s-task:%s wal reader seek to ver:%" PRId64, vgId, pTask->id.idStr, pTask->chkInfo.nextProcessVer);
|
||||||
} else {
|
} else {
|
||||||
int64_t currentVer = walReaderGetCurrentVer(pTask->exec.pWalReader);
|
int64_t currentVer = walReaderGetCurrentVer(pTask->exec.pWalReader);
|
||||||
if (currentVer == -1) { // we only seek the read for the first time
|
if (currentVer == -1) { // we only seek the read for the first time
|
||||||
int32_t code = walReaderSeekVer(pTask->exec.pWalReader, pTask->chkInfo.currentVer);
|
int32_t code = walReaderSeekVer(pTask->exec.pWalReader, pTask->chkInfo.nextProcessVer);
|
||||||
if (code != TSDB_CODE_SUCCESS) { // no data in wal, quit
|
if (code != TSDB_CODE_SUCCESS) { // no data in wal, quit
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
// append the data for the stream
|
// append the data for the stream
|
||||||
tqDebug("vgId:%d s-task:%s wal reader initial seek to ver:%" PRId64, vgId, pTask->id.idStr,
|
tqDebug("vgId:%d s-task:%s wal reader initial seek to ver:%" PRId64, vgId, pTask->id.idStr,
|
||||||
pTask->chkInfo.currentVer);
|
pTask->chkInfo.nextProcessVer);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int64_t skipToVer = walReaderGetSkipToVersion(pTask->exec.pWalReader);
|
int64_t skipToVer = walReaderGetSkipToVersion(pTask->exec.pWalReader);
|
||||||
if (skipToVer != 0 && skipToVer > pTask->chkInfo.currentVer) {
|
if (skipToVer != 0 && skipToVer > pTask->chkInfo.nextProcessVer) {
|
||||||
int32_t code = walReaderSeekVer(pTask->exec.pWalReader, skipToVer);
|
int32_t code = walReaderSeekVer(pTask->exec.pWalReader, skipToVer);
|
||||||
if (code != TSDB_CODE_SUCCESS) { // no data in wal, quit
|
if (code != TSDB_CODE_SUCCESS) { // no data in wal, quit
|
||||||
return code;
|
return code;
|
||||||
|
@ -304,7 +304,7 @@ void handleFillhistoryScanComplete(SStreamTask* pTask, int64_t ver) {
|
||||||
|
|
||||||
if ((pTask->info.fillHistory == 1) && ver > pTask->dataRange.range.maxVer) {
|
if ((pTask->info.fillHistory == 1) && ver > pTask->dataRange.range.maxVer) {
|
||||||
if (!pTask->status.appendTranstateBlock) {
|
if (!pTask->status.appendTranstateBlock) {
|
||||||
qWarn("s-task:%s fill-history scan WAL, currentVer:%" PRId64 " reach the maximum ver:%" PRId64
|
qWarn("s-task:%s fill-history scan WAL, nextProcessVer:%" PRId64 " out of the maximum ver:%" PRId64
|
||||||
", not scan wal anymore, add transfer-state block into inputQ",
|
", not scan wal anymore, add transfer-state block into inputQ",
|
||||||
id, ver, maxVer);
|
id, ver, maxVer);
|
||||||
|
|
||||||
|
@ -313,7 +313,7 @@ void handleFillhistoryScanComplete(SStreamTask* pTask, int64_t ver) {
|
||||||
/*int32_t code = */streamTaskPutTranstateIntoInputQ(pTask);
|
/*int32_t code = */streamTaskPutTranstateIntoInputQ(pTask);
|
||||||
/*int32_t code = */ streamSchedExec(pTask);
|
/*int32_t code = */ streamSchedExec(pTask);
|
||||||
} else {
|
} else {
|
||||||
qWarn("s-task:%s fill-history scan WAL, currentVer:%" PRId64 " reach the maximum ver:%" PRId64 ", not scan wal",
|
qWarn("s-task:%s fill-history scan WAL, nextProcessVer:%" PRId64 " out of the maximum ver:%" PRId64 ", not scan wal",
|
||||||
id, ver, maxVer);
|
id, ver, maxVer);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -396,25 +396,23 @@ int32_t doScanWalForAllTasks(SStreamMeta* pStreamMeta, bool* pScanIdle) {
|
||||||
int32_t numOfItems = streamTaskGetInputQItems(pTask);
|
int32_t numOfItems = streamTaskGetInputQItems(pTask);
|
||||||
int64_t maxVer = (pTask->info.fillHistory == 1) ? pTask->dataRange.range.maxVer : INT64_MAX;
|
int64_t maxVer = (pTask->info.fillHistory == 1) ? pTask->dataRange.range.maxVer : INT64_MAX;
|
||||||
|
|
||||||
|
taosThreadMutexLock(&pTask->lock);
|
||||||
|
|
||||||
|
pStatus = streamGetTaskStatusStr(pTask->status.taskStatus);
|
||||||
|
if (pTask->status.taskStatus != TASK_STATUS__NORMAL) {
|
||||||
|
tqDebug("s-task:%s not ready for submit block from wal, status:%s", pTask->id.idStr, pStatus);
|
||||||
|
taosThreadMutexUnlock(&pTask->lock);
|
||||||
|
streamMetaReleaseTask(pStreamMeta, pTask);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
SStreamQueueItem* pItem = NULL;
|
SStreamQueueItem* pItem = NULL;
|
||||||
code = extractMsgFromWal(pTask->exec.pWalReader, (void**)&pItem, maxVer, pTask->id.idStr);
|
code = extractMsgFromWal(pTask->exec.pWalReader, (void**)&pItem, maxVer, pTask->id.idStr);
|
||||||
|
|
||||||
if ((code != TSDB_CODE_SUCCESS || pItem == NULL) && (numOfItems == 0)) { // failed, continue
|
if ((code != TSDB_CODE_SUCCESS || pItem == NULL) && (numOfItems == 0)) { // failed, continue
|
||||||
handleFillhistoryScanComplete(pTask, walReaderGetCurrentVer(pTask->exec.pWalReader));
|
handleFillhistoryScanComplete(pTask, walReaderGetCurrentVer(pTask->exec.pWalReader));
|
||||||
streamMetaReleaseTask(pStreamMeta, pTask);
|
streamMetaReleaseTask(pStreamMeta, pTask);
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
taosThreadMutexLock(&pTask->lock);
|
|
||||||
pStatus = streamGetTaskStatusStr(pTask->status.taskStatus);
|
|
||||||
|
|
||||||
if (pTask->status.taskStatus != TASK_STATUS__NORMAL) {
|
|
||||||
tqDebug("s-task:%s not ready for submit block from wal, status:%s", pTask->id.idStr, pStatus);
|
|
||||||
taosThreadMutexUnlock(&pTask->lock);
|
taosThreadMutexUnlock(&pTask->lock);
|
||||||
streamMetaReleaseTask(pStreamMeta, pTask);
|
|
||||||
if (pItem != NULL) {
|
|
||||||
streamFreeQitem(pItem);
|
|
||||||
}
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -423,12 +421,12 @@ int32_t doScanWalForAllTasks(SStreamMeta* pStreamMeta, bool* pScanIdle) {
|
||||||
code = streamTaskPutDataIntoInputQ(pTask, pItem);
|
code = streamTaskPutDataIntoInputQ(pTask, pItem);
|
||||||
if (code == TSDB_CODE_SUCCESS) {
|
if (code == TSDB_CODE_SUCCESS) {
|
||||||
int64_t ver = walReaderGetCurrentVer(pTask->exec.pWalReader);
|
int64_t ver = walReaderGetCurrentVer(pTask->exec.pWalReader);
|
||||||
pTask->chkInfo.currentVer = ver;
|
pTask->chkInfo.nextProcessVer = ver;
|
||||||
handleFillhistoryScanComplete(pTask, ver);
|
handleFillhistoryScanComplete(pTask, ver);
|
||||||
tqDebug("s-task:%s set the ver:%" PRId64 " from WALReader after extract block from WAL", pTask->id.idStr, ver);
|
tqDebug("s-task:%s set the ver:%" PRId64 " from WALReader after extract block from WAL", pTask->id.idStr, ver);
|
||||||
} else {
|
} else {
|
||||||
tqError("s-task:%s append input queue failed, too many in inputQ, ver:%" PRId64, pTask->id.idStr,
|
tqError("s-task:%s append input queue failed, too many in inputQ, ver:%" PRId64, pTask->id.idStr,
|
||||||
pTask->chkInfo.currentVer);
|
pTask->chkInfo.nextProcessVer);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -857,6 +857,9 @@ static int32_t tsdbSnapWriteTombRecord(STsdbSnapWriter* writer, const STombRecor
|
||||||
} else {
|
} else {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
code = tsdbIterMergerNext(writer->ctx->tombIterMerger);
|
||||||
|
TSDB_CHECK_CODE(code, lino, _exit);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (record->suid == INT64_MAX) {
|
if (record->suid == INT64_MAX) {
|
||||||
|
|
|
@ -160,7 +160,7 @@ static int32_t vnodePreProcessDropTtlMsg(SVnode *pVnode, SRpcMsg *pMsg) {
|
||||||
}
|
}
|
||||||
|
|
||||||
{ // find expired uids
|
{ // find expired uids
|
||||||
tbUids = taosArrayInit(8, sizeof(int64_t));
|
tbUids = taosArrayInit(8, sizeof(tb_uid_t));
|
||||||
if (tbUids == NULL) {
|
if (tbUids == NULL) {
|
||||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
TSDB_CHECK_CODE(code, lino, _exit);
|
TSDB_CHECK_CODE(code, lino, _exit);
|
||||||
|
@ -756,7 +756,7 @@ int32_t vnodeProcessStreamMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo)
|
||||||
case TDMT_VND_STREAM_TASK_CHECK:
|
case TDMT_VND_STREAM_TASK_CHECK:
|
||||||
return tqProcessStreamTaskCheckReq(pVnode->pTq, pMsg);
|
return tqProcessStreamTaskCheckReq(pVnode->pTq, pMsg);
|
||||||
case TDMT_VND_STREAM_TASK_CHECK_RSP:
|
case TDMT_VND_STREAM_TASK_CHECK_RSP:
|
||||||
return tqProcessStreamTaskCheckRsp(pVnode->pTq, 0, pMsg);
|
return tqProcessStreamTaskCheckRsp(pVnode->pTq, pMsg);
|
||||||
case TDMT_STREAM_RETRIEVE:
|
case TDMT_STREAM_RETRIEVE:
|
||||||
return tqProcessTaskRetrieveReq(pVnode->pTq, pMsg);
|
return tqProcessTaskRetrieveReq(pVnode->pTq, pMsg);
|
||||||
case TDMT_STREAM_RETRIEVE_RSP:
|
case TDMT_STREAM_RETRIEVE_RSP:
|
||||||
|
@ -945,7 +945,7 @@ static int32_t vnodeProcessCreateTbReq(SVnode *pVnode, int64_t ver, void *pReq,
|
||||||
int32_t clusterId = pVnode->config.syncCfg.nodeInfo[0].clusterId;
|
int32_t clusterId = pVnode->config.syncCfg.nodeInfo[0].clusterId;
|
||||||
|
|
||||||
char detail[1000] = {0};
|
char detail[1000] = {0};
|
||||||
sprintf(detail, "btime:%" PRId64 ", flags:%d, ttl:%d, type:%d",
|
sprintf(detail, "btime:%" PRId64 ", flags:%d, ttl:%d, type:%d",
|
||||||
pCreateReq->btime, pCreateReq->flags, pCreateReq->ttl, pCreateReq->type);
|
pCreateReq->btime, pCreateReq->flags, pCreateReq->ttl, pCreateReq->type);
|
||||||
|
|
||||||
SName name = {0};
|
SName name = {0};
|
||||||
|
@ -1443,8 +1443,11 @@ static int32_t vnodeProcessSubmitReq(SVnode *pVnode, int64_t ver, void *pReq, in
|
||||||
|
|
||||||
SColData *pColData = (SColData *)taosArrayGet(pSubmitTbData->aCol, 0);
|
SColData *pColData = (SColData *)taosArrayGet(pSubmitTbData->aCol, 0);
|
||||||
TSKEY *aKey = (TSKEY *)(pColData->pData);
|
TSKEY *aKey = (TSKEY *)(pColData->pData);
|
||||||
|
vDebug("vgId:%d submit %d rows data, uid:%"PRId64, TD_VID(pVnode), pColData->nVal, pSubmitTbData->uid);
|
||||||
|
|
||||||
for (int32_t iRow = 0; iRow < pColData->nVal; iRow++) {
|
for (int32_t iRow = 0; iRow < pColData->nVal; iRow++) {
|
||||||
|
vDebug("vgId:%d uid:%"PRId64" ts:%"PRId64, TD_VID(pVnode), pSubmitTbData->uid, aKey[iRow]);
|
||||||
|
|
||||||
if (aKey[iRow] < minKey || aKey[iRow] > maxKey || (iRow > 0 && aKey[iRow] <= aKey[iRow - 1])) {
|
if (aKey[iRow] < minKey || aKey[iRow] > maxKey || (iRow > 0 && aKey[iRow] <= aKey[iRow - 1])) {
|
||||||
code = TSDB_CODE_INVALID_MSG;
|
code = TSDB_CODE_INVALID_MSG;
|
||||||
vError("vgId:%d %s failed since %s, version:%" PRId64, TD_VID(pVnode), __func__, tstrerror(terrno), ver);
|
vError("vgId:%d %s failed since %s, version:%" PRId64, TD_VID(pVnode), __func__, tstrerror(terrno), ver);
|
||||||
|
@ -1456,10 +1459,13 @@ static int32_t vnodeProcessSubmitReq(SVnode *pVnode, int64_t ver, void *pReq, in
|
||||||
int32_t nRow = TARRAY_SIZE(pSubmitTbData->aRowP);
|
int32_t nRow = TARRAY_SIZE(pSubmitTbData->aRowP);
|
||||||
SRow **aRow = (SRow **)TARRAY_DATA(pSubmitTbData->aRowP);
|
SRow **aRow = (SRow **)TARRAY_DATA(pSubmitTbData->aRowP);
|
||||||
|
|
||||||
|
vDebug("vgId:%d submit %d rows data, uid:%"PRId64, TD_VID(pVnode), nRow, pSubmitTbData->uid);
|
||||||
for (int32_t iRow = 0; iRow < nRow; ++iRow) {
|
for (int32_t iRow = 0; iRow < nRow; ++iRow) {
|
||||||
|
vDebug("vgId:%d uid:%"PRId64" ts:%"PRId64, TD_VID(pVnode), pSubmitTbData->uid, aRow[iRow]->ts);
|
||||||
|
|
||||||
if (aRow[iRow]->ts < minKey || aRow[iRow]->ts > maxKey || (iRow > 0 && aRow[iRow]->ts <= aRow[iRow - 1]->ts)) {
|
if (aRow[iRow]->ts < minKey || aRow[iRow]->ts > maxKey || (iRow > 0 && aRow[iRow]->ts <= aRow[iRow - 1]->ts)) {
|
||||||
code = TSDB_CODE_INVALID_MSG;
|
code = TSDB_CODE_INVALID_MSG;
|
||||||
vError("vgId:%d %s failed since %s, version:%" PRId64, TD_VID(pVnode), __func__, tstrerror(terrno), ver);
|
vError("vgId:%d %s failed since %s, version:%" PRId64, TD_VID(pVnode), __func__, tstrerror(code), ver);
|
||||||
goto _exit;
|
goto _exit;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1564,6 +1570,8 @@ static int32_t vnodeProcessSubmitReq(SVnode *pVnode, int64_t ver, void *pReq, in
|
||||||
} else { // create table failed
|
} else { // create table failed
|
||||||
if (terrno != TSDB_CODE_TDB_TABLE_ALREADY_EXIST) {
|
if (terrno != TSDB_CODE_TDB_TABLE_ALREADY_EXIST) {
|
||||||
code = terrno;
|
code = terrno;
|
||||||
|
vError("vgId:%d failed to create table:%s, code:%s", TD_VID(pVnode), pSubmitTbData->pCreateTbReq->name,
|
||||||
|
tstrerror(terrno));
|
||||||
goto _exit;
|
goto _exit;
|
||||||
}
|
}
|
||||||
terrno = 0;
|
terrno = 0;
|
||||||
|
|
|
@ -760,12 +760,14 @@ int32_t ctgGetCachedStbNameFromSuid(SCatalog* pCtg, char* dbFName, uint64_t suid
|
||||||
char *stb = taosHashAcquire(dbCache->stbCache, &suid, sizeof(suid));
|
char *stb = taosHashAcquire(dbCache->stbCache, &suid, sizeof(suid));
|
||||||
if (NULL == stb) {
|
if (NULL == stb) {
|
||||||
ctgDebug("stb 0x%" PRIx64 " not in cache, dbFName:%s", suid, dbFName);
|
ctgDebug("stb 0x%" PRIx64 " not in cache, dbFName:%s", suid, dbFName);
|
||||||
|
ctgReleaseDBCache(pCtg, dbCache);
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
*stbName = taosStrdup(stb);
|
*stbName = taosStrdup(stb);
|
||||||
|
|
||||||
taosHashRelease(dbCache->stbCache, stb);
|
taosHashRelease(dbCache->stbCache, stb);
|
||||||
|
ctgReleaseDBCache(pCtg, dbCache);
|
||||||
|
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
|
@ -2257,7 +2257,7 @@ FETCH_NEXT_BLOCK:
|
||||||
int32_t current = pInfo->validBlockIndex++;
|
int32_t current = pInfo->validBlockIndex++;
|
||||||
SPackedData* pSubmit = taosArrayGet(pInfo->pBlockLists, current);
|
SPackedData* pSubmit = taosArrayGet(pInfo->pBlockLists, current);
|
||||||
|
|
||||||
qDebug("set %d/%d as the input submit block, %s", current, totalBlocks, id);
|
qDebug("set %d/%d as the input submit block, %s", current + 1, totalBlocks, id);
|
||||||
if (pAPI->tqReaderFn.tqReaderSetSubmitMsg(pInfo->tqReader, pSubmit->msgStr, pSubmit->msgLen, pSubmit->ver) < 0) {
|
if (pAPI->tqReaderFn.tqReaderSetSubmitMsg(pInfo->tqReader, pSubmit->msgStr, pSubmit->msgLen, pSubmit->ver) < 0) {
|
||||||
qError("submit msg messed up when initializing stream submit block %p, current %d/%d, %s", pSubmit, current, totalBlocks, id);
|
qError("submit msg messed up when initializing stream submit block %p, current %d/%d, %s", pSubmit, current, totalBlocks, id);
|
||||||
continue;
|
continue;
|
||||||
|
|
|
@ -1786,7 +1786,7 @@ static int32_t translateDiff(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
|
||||||
}
|
}
|
||||||
|
|
||||||
uint8_t colType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
|
uint8_t colType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
|
||||||
if (!IS_SIGNED_NUMERIC_TYPE(colType) && !IS_FLOAT_TYPE(colType) && TSDB_DATA_TYPE_BOOL != colType &&
|
if (!IS_INTEGER_TYPE(colType) && !IS_FLOAT_TYPE(colType) && TSDB_DATA_TYPE_BOOL != colType &&
|
||||||
!IS_TIMESTAMP_TYPE(colType)) {
|
!IS_TIMESTAMP_TYPE(colType)) {
|
||||||
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
|
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
|
||||||
}
|
}
|
||||||
|
@ -1815,6 +1815,8 @@ static int32_t translateDiff(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
|
||||||
uint8_t resType;
|
uint8_t resType;
|
||||||
if (IS_SIGNED_NUMERIC_TYPE(colType) || IS_TIMESTAMP_TYPE(colType) || TSDB_DATA_TYPE_BOOL == colType) {
|
if (IS_SIGNED_NUMERIC_TYPE(colType) || IS_TIMESTAMP_TYPE(colType) || TSDB_DATA_TYPE_BOOL == colType) {
|
||||||
resType = TSDB_DATA_TYPE_BIGINT;
|
resType = TSDB_DATA_TYPE_BIGINT;
|
||||||
|
} else if (IS_UNSIGNED_NUMERIC_TYPE(colType)) {
|
||||||
|
resType = TSDB_DATA_TYPE_UBIGINT;
|
||||||
} else {
|
} else {
|
||||||
resType = TSDB_DATA_TYPE_DOUBLE;
|
resType = TSDB_DATA_TYPE_DOUBLE;
|
||||||
}
|
}
|
||||||
|
|
|
@ -2714,16 +2714,20 @@ static int32_t doSetPrevVal(SDiffInfo* pDiffInfo, int32_t type, const char* pv,
|
||||||
case TSDB_DATA_TYPE_BOOL:
|
case TSDB_DATA_TYPE_BOOL:
|
||||||
pDiffInfo->prev.i64 = *(bool*)pv ? 1 : 0;
|
pDiffInfo->prev.i64 = *(bool*)pv ? 1 : 0;
|
||||||
break;
|
break;
|
||||||
|
case TSDB_DATA_TYPE_UTINYINT:
|
||||||
case TSDB_DATA_TYPE_TINYINT:
|
case TSDB_DATA_TYPE_TINYINT:
|
||||||
pDiffInfo->prev.i64 = *(int8_t*)pv;
|
pDiffInfo->prev.i64 = *(int8_t*)pv;
|
||||||
break;
|
break;
|
||||||
|
case TSDB_DATA_TYPE_UINT:
|
||||||
case TSDB_DATA_TYPE_INT:
|
case TSDB_DATA_TYPE_INT:
|
||||||
pDiffInfo->prev.i64 = *(int32_t*)pv;
|
pDiffInfo->prev.i64 = *(int32_t*)pv;
|
||||||
break;
|
break;
|
||||||
|
case TSDB_DATA_TYPE_USMALLINT:
|
||||||
case TSDB_DATA_TYPE_SMALLINT:
|
case TSDB_DATA_TYPE_SMALLINT:
|
||||||
pDiffInfo->prev.i64 = *(int16_t*)pv;
|
pDiffInfo->prev.i64 = *(int16_t*)pv;
|
||||||
break;
|
break;
|
||||||
case TSDB_DATA_TYPE_TIMESTAMP:
|
case TSDB_DATA_TYPE_TIMESTAMP:
|
||||||
|
case TSDB_DATA_TYPE_UBIGINT:
|
||||||
case TSDB_DATA_TYPE_BIGINT:
|
case TSDB_DATA_TYPE_BIGINT:
|
||||||
pDiffInfo->prev.i64 = *(int64_t*)pv;
|
pDiffInfo->prev.i64 = *(int64_t*)pv;
|
||||||
break;
|
break;
|
||||||
|
@ -2745,6 +2749,7 @@ static int32_t doHandleDiff(SDiffInfo* pDiffInfo, int32_t type, const char* pv,
|
||||||
int64_t ts) {
|
int64_t ts) {
|
||||||
pDiffInfo->prevTs = ts;
|
pDiffInfo->prevTs = ts;
|
||||||
switch (type) {
|
switch (type) {
|
||||||
|
case TSDB_DATA_TYPE_UINT:
|
||||||
case TSDB_DATA_TYPE_INT: {
|
case TSDB_DATA_TYPE_INT: {
|
||||||
int32_t v = *(int32_t*)pv;
|
int32_t v = *(int32_t*)pv;
|
||||||
int64_t delta = v - pDiffInfo->prev.i64; // direct previous may be null
|
int64_t delta = v - pDiffInfo->prev.i64; // direct previous may be null
|
||||||
|
@ -2758,6 +2763,7 @@ static int32_t doHandleDiff(SDiffInfo* pDiffInfo, int32_t type, const char* pv,
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case TSDB_DATA_TYPE_BOOL:
|
case TSDB_DATA_TYPE_BOOL:
|
||||||
|
case TSDB_DATA_TYPE_UTINYINT:
|
||||||
case TSDB_DATA_TYPE_TINYINT: {
|
case TSDB_DATA_TYPE_TINYINT: {
|
||||||
int8_t v = *(int8_t*)pv;
|
int8_t v = *(int8_t*)pv;
|
||||||
int64_t delta = v - pDiffInfo->prev.i64; // direct previous may be null
|
int64_t delta = v - pDiffInfo->prev.i64; // direct previous may be null
|
||||||
|
@ -2769,6 +2775,7 @@ static int32_t doHandleDiff(SDiffInfo* pDiffInfo, int32_t type, const char* pv,
|
||||||
pDiffInfo->prev.i64 = v;
|
pDiffInfo->prev.i64 = v;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
case TSDB_DATA_TYPE_USMALLINT:
|
||||||
case TSDB_DATA_TYPE_SMALLINT: {
|
case TSDB_DATA_TYPE_SMALLINT: {
|
||||||
int16_t v = *(int16_t*)pv;
|
int16_t v = *(int16_t*)pv;
|
||||||
int64_t delta = v - pDiffInfo->prev.i64; // direct previous may be null
|
int64_t delta = v - pDiffInfo->prev.i64; // direct previous may be null
|
||||||
|
@ -2781,6 +2788,7 @@ static int32_t doHandleDiff(SDiffInfo* pDiffInfo, int32_t type, const char* pv,
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case TSDB_DATA_TYPE_TIMESTAMP:
|
case TSDB_DATA_TYPE_TIMESTAMP:
|
||||||
|
case TSDB_DATA_TYPE_UBIGINT:
|
||||||
case TSDB_DATA_TYPE_BIGINT: {
|
case TSDB_DATA_TYPE_BIGINT: {
|
||||||
int64_t v = *(int64_t*)pv;
|
int64_t v = *(int64_t*)pv;
|
||||||
int64_t delta = v - pDiffInfo->prev.i64; // direct previous may be null
|
int64_t delta = v - pDiffInfo->prev.i64; // direct previous may be null
|
||||||
|
|
|
@ -3221,8 +3221,11 @@ int32_t stbJoinOptAddFuncToScanNode(char* funcName, SScanLogicNode* pScan) {
|
||||||
SFunctionNode* pUidFunc = createFunction(funcName, NULL);
|
SFunctionNode* pUidFunc = createFunction(funcName, NULL);
|
||||||
snprintf(pUidFunc->node.aliasName, sizeof(pUidFunc->node.aliasName), "%s.%p",
|
snprintf(pUidFunc->node.aliasName, sizeof(pUidFunc->node.aliasName), "%s.%p",
|
||||||
pUidFunc->functionName, pUidFunc);
|
pUidFunc->functionName, pUidFunc);
|
||||||
nodesListStrictAppend(pScan->pScanPseudoCols, (SNode *)pUidFunc);
|
int32_t code = nodesListStrictAppend(pScan->pScanPseudoCols, (SNode *)pUidFunc);
|
||||||
return createColumnByRewriteExpr((SNode*)pUidFunc, &pScan->node.pTargets);
|
if (TSDB_CODE_SUCCESS == code) {
|
||||||
|
code = createColumnByRewriteExpr((SNode*)pUidFunc, &pScan->node.pTargets);
|
||||||
|
}
|
||||||
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -3369,12 +3372,7 @@ static int32_t stbJoinOptCreateTableScanNodes(SLogicNode* pJoin, SNodeList** ppL
|
||||||
pScan->scanType = SCAN_TYPE_TABLE;
|
pScan->scanType = SCAN_TYPE_TABLE;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (TSDB_CODE_SUCCESS == code) {
|
*ppList = pList;
|
||||||
*ppList = pList;
|
|
||||||
} else {
|
|
||||||
nodesDestroyList(pList);
|
|
||||||
*ppList = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
@ -3478,12 +3476,15 @@ static int32_t stbJoinOptCreateMergeJoinNode(SLogicNode* pOrig, SLogicNode* pChi
|
||||||
FOREACH(pNode, pJoin->node.pChildren) {
|
FOREACH(pNode, pJoin->node.pChildren) {
|
||||||
ERASE_NODE(pJoin->node.pChildren);
|
ERASE_NODE(pJoin->node.pChildren);
|
||||||
}
|
}
|
||||||
nodesListStrictAppend(pJoin->node.pChildren, (SNode *)pChild);
|
int32_t code = nodesListStrictAppend(pJoin->node.pChildren, (SNode *)pChild);
|
||||||
pChild->pParent = (SLogicNode*)pJoin;
|
if (TSDB_CODE_SUCCESS == code) {
|
||||||
|
pChild->pParent = (SLogicNode*)pJoin;
|
||||||
|
*ppLogic = (SLogicNode*)pJoin;
|
||||||
|
} else {
|
||||||
|
nodesDestroyNode((SNode*)pJoin);
|
||||||
|
}
|
||||||
|
|
||||||
*ppLogic = (SLogicNode*)pJoin;
|
return code;
|
||||||
|
|
||||||
return TSDB_CODE_SUCCESS;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t stbJoinOptCreateDynQueryCtrlNode(SLogicNode* pRoot, SLogicNode* pPrev, SLogicNode* pPost, bool* srcScan, SLogicNode** ppDynNode) {
|
static int32_t stbJoinOptCreateDynQueryCtrlNode(SLogicNode* pRoot, SLogicNode* pPrev, SLogicNode* pPost, bool* srcScan, SLogicNode** ppDynNode) {
|
||||||
|
@ -3523,11 +3524,18 @@ static int32_t stbJoinOptCreateDynQueryCtrlNode(SLogicNode* pRoot, SLogicNode* p
|
||||||
nodesListStrictAppend(pDynCtrl->stbJoin.pUidList, nodesListGetNode(pHJoin->node.pTargets, 2));
|
nodesListStrictAppend(pDynCtrl->stbJoin.pUidList, nodesListGetNode(pHJoin->node.pTargets, 2));
|
||||||
nodesListStrictAppend(pDynCtrl->stbJoin.pVgList, nodesListGetNode(pHJoin->node.pTargets, 1));
|
nodesListStrictAppend(pDynCtrl->stbJoin.pVgList, nodesListGetNode(pHJoin->node.pTargets, 1));
|
||||||
nodesListStrictAppend(pDynCtrl->stbJoin.pVgList, nodesListGetNode(pHJoin->node.pTargets, 3));
|
nodesListStrictAppend(pDynCtrl->stbJoin.pVgList, nodesListGetNode(pHJoin->node.pTargets, 3));
|
||||||
|
|
||||||
if (TSDB_CODE_SUCCESS == code) {
|
if (TSDB_CODE_SUCCESS == code) {
|
||||||
nodesListStrictAppend(pDynCtrl->node.pChildren, (SNode*)pPrev);
|
code = nodesListStrictAppend(pDynCtrl->node.pChildren, (SNode*)pPrev);
|
||||||
nodesListStrictAppend(pDynCtrl->node.pChildren, (SNode*)pPost);
|
if (TSDB_CODE_SUCCESS == code) {
|
||||||
pDynCtrl->node.pTargets = nodesCloneList(pPost->pTargets);
|
code = nodesListStrictAppend(pDynCtrl->node.pChildren, (SNode*)pPost);
|
||||||
|
}
|
||||||
|
if (TSDB_CODE_SUCCESS == code) {
|
||||||
|
pDynCtrl->node.pTargets = nodesCloneList(pPost->pTargets);
|
||||||
|
if (!pDynCtrl->node.pTargets) {
|
||||||
|
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (TSDB_CODE_SUCCESS == code) {
|
if (TSDB_CODE_SUCCESS == code) {
|
||||||
|
|
|
@ -1025,11 +1025,7 @@ static int32_t createGroupCachePhysiNode(SPhysiPlanContext* pCxt, SNodeList* pCh
|
||||||
}
|
}
|
||||||
*/
|
*/
|
||||||
|
|
||||||
if (TSDB_CODE_SUCCESS == code) {
|
*pPhyNode = (SPhysiNode*)pGrpCache;
|
||||||
*pPhyNode = (SPhysiNode*)pGrpCache;
|
|
||||||
} else {
|
|
||||||
nodesDestroyNode((SNode*)pGrpCache);
|
|
||||||
}
|
|
||||||
|
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
@ -1059,6 +1055,8 @@ static int32_t updateDynQueryCtrlStbJoinInfo(SPhysiPlanContext* pCxt, SNodeList*
|
||||||
}
|
}
|
||||||
pDynCtrl->stbJoin.batchFetch = pLogicNode->stbJoin.batchFetch;
|
pDynCtrl->stbJoin.batchFetch = pLogicNode->stbJoin.batchFetch;
|
||||||
}
|
}
|
||||||
|
nodesDestroyList(pVgList);
|
||||||
|
nodesDestroyList(pUidList);
|
||||||
|
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1671,6 +1671,9 @@ static int32_t sclGetJsonOperatorResType(SOperatorNode *pOp) {
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t sclGetBitwiseOperatorResType(SOperatorNode *pOp) {
|
static int32_t sclGetBitwiseOperatorResType(SOperatorNode *pOp) {
|
||||||
|
if (!pOp->pLeft || !pOp->pRight) {
|
||||||
|
return TSDB_CODE_TSC_INVALID_OPERATION;
|
||||||
|
}
|
||||||
SDataType ldt = ((SExprNode *)(pOp->pLeft))->resType;
|
SDataType ldt = ((SExprNode *)(pOp->pLeft))->resType;
|
||||||
SDataType rdt = ((SExprNode *)(pOp->pRight))->resType;
|
SDataType rdt = ((SExprNode *)(pOp->pRight))->resType;
|
||||||
if(TSDB_DATA_TYPE_VARBINARY == ldt.type || TSDB_DATA_TYPE_VARBINARY == rdt.type){
|
if(TSDB_DATA_TYPE_VARBINARY == ldt.type || TSDB_DATA_TYPE_VARBINARY == rdt.type){
|
||||||
|
|
|
@ -329,6 +329,7 @@ static FORCE_INLINE void varToVarbinary(char *buf, SScalarParam *pOut, int32_t r
|
||||||
if (t == NULL) {
|
if (t == NULL) {
|
||||||
sclError("Out of memory");
|
sclError("Out of memory");
|
||||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
|
taosMemoryFree(data);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
varDataSetLen(t, size);
|
varDataSetLen(t, size);
|
||||||
|
|
|
@ -77,6 +77,8 @@ int32_t streamNotifyUpstreamContinue(SStreamTask* pTask);
|
||||||
int32_t streamTaskFillHistoryFinished(SStreamTask* pTask);
|
int32_t streamTaskFillHistoryFinished(SStreamTask* pTask);
|
||||||
int32_t streamTransferStateToStreamTask(SStreamTask* pTask);
|
int32_t streamTransferStateToStreamTask(SStreamTask* pTask);
|
||||||
|
|
||||||
|
int32_t streamTaskInitTokenBucket(STokenBucket* pBucket, int32_t cap, int32_t rate);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -1591,7 +1591,6 @@ int32_t streamStateOpenBackendCf(void* backend, char* name, char** cfs, int32_t
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
int streamStateOpenBackend(void* backend, SStreamState* pState) {
|
int streamStateOpenBackend(void* backend, SStreamState* pState) {
|
||||||
// qInfo("start to open state %p on backend %p 0x%" PRIx64 "-%d", pState, backend, pState->streamId, pState->taskId);
|
|
||||||
taosAcquireRef(streamBackendId, pState->streamBackendRid);
|
taosAcquireRef(streamBackendId, pState->streamBackendRid);
|
||||||
SBackendWrapper* handle = backend;
|
SBackendWrapper* handle = backend;
|
||||||
SBackendCfWrapper* pBackendCfWrapper = taosMemoryCalloc(1, sizeof(SBackendCfWrapper));
|
SBackendCfWrapper* pBackendCfWrapper = taosMemoryCalloc(1, sizeof(SBackendCfWrapper));
|
||||||
|
|
|
@ -287,7 +287,7 @@ int32_t streamSaveAllTaskStatus(SStreamMeta* pMeta, int64_t checkpointId) {
|
||||||
streamTaskOpenAllUpstreamInput(p); // open inputQ for all upstream tasks
|
streamTaskOpenAllUpstreamInput(p); // open inputQ for all upstream tasks
|
||||||
qDebug("vgId:%d s-task:%s level:%d commit task status after checkpoint completed, checkpointId:%" PRId64
|
qDebug("vgId:%d s-task:%s level:%d commit task status after checkpoint completed, checkpointId:%" PRId64
|
||||||
", Ver(saved):%" PRId64 " currentVer:%" PRId64 ", status to be normal, prev:%s",
|
", Ver(saved):%" PRId64 " currentVer:%" PRId64 ", status to be normal, prev:%s",
|
||||||
pMeta->vgId, p->id.idStr, p->info.taskLevel, checkpointId, p->chkInfo.checkpointVer, p->chkInfo.currentVer,
|
pMeta->vgId, p->id.idStr, p->info.taskLevel, checkpointId, p->chkInfo.checkpointVer, p->chkInfo.nextProcessVer,
|
||||||
streamGetTaskStatusStr(prev));
|
streamGetTaskStatusStr(prev));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1001,10 +1001,17 @@ int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, i
|
||||||
// so the TASK_INPUT_STATUS_BLOCKED is rsp
|
// so the TASK_INPUT_STATUS_BLOCKED is rsp
|
||||||
if (pRsp->inputStatus == TASK_INPUT_STATUS__BLOCKED) {
|
if (pRsp->inputStatus == TASK_INPUT_STATUS__BLOCKED) {
|
||||||
pTask->inputInfo.status = TASK_INPUT_STATUS__BLOCKED; // block the input of current task, to push pressure to upstream
|
pTask->inputInfo.status = TASK_INPUT_STATUS__BLOCKED; // block the input of current task, to push pressure to upstream
|
||||||
pTask->msgInfo.blockingTs = taosGetTimestampMs(); // record the blocking start time
|
double el = 0;
|
||||||
|
if (pTask->msgInfo.blockingTs == 0) {
|
||||||
|
pTask->msgInfo.blockingTs = taosGetTimestampMs(); // record the blocking start time
|
||||||
|
} else {
|
||||||
|
el = (taosGetTimestampMs() - pTask->msgInfo.blockingTs) / 1000.0;
|
||||||
|
}
|
||||||
|
|
||||||
int8_t ref = atomic_add_fetch_8(&pTask->status.timerActive, 1);
|
int8_t ref = atomic_add_fetch_8(&pTask->status.timerActive, 1);
|
||||||
qError("s-task:%s inputQ of downstream task:0x%x is full, time:%" PRId64 " wait for %dms and retry dispatch data, ref:%d",
|
qError("s-task:%s inputQ of downstream task:0x%x is full, time:%" PRId64
|
||||||
id, pRsp->downstreamTaskId, pTask->msgInfo.blockingTs, DISPATCH_RETRY_INTERVAL_MS, ref);
|
" wait for %dms and retry dispatch data, total wait:%.2fSec ref:%d",
|
||||||
|
id, pRsp->downstreamTaskId, pTask->msgInfo.blockingTs, DISPATCH_RETRY_INTERVAL_MS, el, ref);
|
||||||
streamRetryDispatchStreamBlock(pTask, DISPATCH_RETRY_INTERVAL_MS);
|
streamRetryDispatchStreamBlock(pTask, DISPATCH_RETRY_INTERVAL_MS);
|
||||||
} else { // pipeline send data in output queue
|
} else { // pipeline send data in output queue
|
||||||
// this message has been sent successfully, let's try next one.
|
// this message has been sent successfully, let's try next one.
|
||||||
|
|
|
@ -84,7 +84,7 @@ static int32_t streamTaskExecImpl(SStreamTask* pTask, SStreamQueueItem* pItem, i
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pTask->inputInfo.status == TASK_INPUT_STATUS__BLOCKED) {
|
if (pTask->inputInfo.status == TASK_INPUT_STATUS__BLOCKED) {
|
||||||
qWarn("s-task:%s downstream task inputQ blocked, idle for 1sec and retry", pTask->id.idStr);
|
qWarn("s-task:%s downstream task inputQ blocked, idle for 1sec and retry exec task", pTask->id.idStr);
|
||||||
taosMsleep(1000);
|
taosMsleep(1000);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -563,11 +563,11 @@ int32_t streamExecForAll(SStreamTask* pTask) {
|
||||||
SIZE_IN_MB(resSize), totalBlocks);
|
SIZE_IN_MB(resSize), totalBlocks);
|
||||||
|
|
||||||
// update the currentVer if processing the submit blocks.
|
// update the currentVer if processing the submit blocks.
|
||||||
ASSERT(pTask->chkInfo.checkpointVer <= pTask->chkInfo.currentVer && ver >= pTask->chkInfo.checkpointVer);
|
ASSERT(pTask->chkInfo.checkpointVer <= pTask->chkInfo.nextProcessVer && ver >= pTask->chkInfo.checkpointVer);
|
||||||
|
|
||||||
if (ver != pTask->chkInfo.checkpointVer) {
|
if (ver != pTask->chkInfo.checkpointVer) {
|
||||||
qDebug("s-task:%s update checkpointVer(unsaved) from %" PRId64 " to %" PRId64, pTask->id.idStr,
|
qDebug("s-task:%s update checkpointVer(unsaved) from %" PRId64 " to %" PRId64 " , currentVer:%" PRId64,
|
||||||
pTask->chkInfo.checkpointVer, ver);
|
pTask->id.idStr, pTask->chkInfo.checkpointVer, ver, pTask->chkInfo.nextProcessVer);
|
||||||
pTask->chkInfo.checkpointVer = ver;
|
pTask->chkInfo.checkpointVer = ver;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -29,6 +29,8 @@ typedef struct SQueueReader {
|
||||||
int32_t waitDuration; // maximum wait time to format several block into a batch to process, unit: ms
|
int32_t waitDuration; // maximum wait time to format several block into a batch to process, unit: ms
|
||||||
} SQueueReader;
|
} SQueueReader;
|
||||||
|
|
||||||
|
static bool streamTaskHasAvailableToken(STokenBucket* pBucket);
|
||||||
|
|
||||||
static void streamQueueCleanup(SStreamQueue* pQueue) {
|
static void streamQueueCleanup(SStreamQueue* pQueue) {
|
||||||
void* qItem = NULL;
|
void* qItem = NULL;
|
||||||
while ((qItem = streamQueueNextItem(pQueue)) != NULL) {
|
while ((qItem = streamQueueNextItem(pQueue)) != NULL) {
|
||||||
|
@ -175,6 +177,14 @@ int32_t streamTaskGetDataFromInputQ(SStreamTask* pTask, SStreamQueueItem** pInpu
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
STokenBucket* pBucket = &pTask->tokenBucket;
|
||||||
|
bool has = streamTaskHasAvailableToken(pBucket);
|
||||||
|
if (!has) { // no available token in th bucket, ignore this execution
|
||||||
|
// qInfo("s-task:%s no available token for sink, capacity:%d, rate:%d token/sec, quit", pTask->id.idStr,
|
||||||
|
// pBucket->capacity, pBucket->rate);
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
SStreamQueueItem* qItem = streamQueueNextItem(pTask->inputInfo.queue);
|
SStreamQueueItem* qItem = streamQueueNextItem(pTask->inputInfo.queue);
|
||||||
if (qItem == NULL) {
|
if (qItem == NULL) {
|
||||||
qDebug("===stream===break batchSize:%d, %s", *numOfBlocks, id);
|
qDebug("===stream===break batchSize:%d, %s", *numOfBlocks, id);
|
||||||
|
@ -264,9 +274,9 @@ int32_t streamTaskPutDataIntoInputQ(SStreamTask* pTask, SStreamQueueItem* pItem)
|
||||||
if (type == STREAM_INPUT__DATA_SUBMIT) {
|
if (type == STREAM_INPUT__DATA_SUBMIT) {
|
||||||
SStreamDataSubmit* px = (SStreamDataSubmit*)pItem;
|
SStreamDataSubmit* px = (SStreamDataSubmit*)pItem;
|
||||||
if ((pTask->info.taskLevel == TASK_LEVEL__SOURCE) && streamQueueIsFull(pQueue)) {
|
if ((pTask->info.taskLevel == TASK_LEVEL__SOURCE) && streamQueueIsFull(pQueue)) {
|
||||||
qError(
|
// qError(
|
||||||
"s-task:%s inputQ is full, capacity(size:%d num:%dMiB), current(blocks:%d, size:%.2fMiB) stop to push data",
|
// "s-task:%s inputQ is full, capacity(size:%d num:%dMiB), current(blocks:%d, size:%.2fMiB) stop to push data",
|
||||||
pTask->id.idStr, STREAM_TASK_INPUT_QUEUE_CAPACITY, STREAM_TASK_INPUT_QUEUE_CAPACITY_IN_SIZE, total, size);
|
// pTask->id.idStr, STREAM_TASK_INPUT_QUEUE_CAPACITY, STREAM_TASK_INPUT_QUEUE_CAPACITY_IN_SIZE, total, size);
|
||||||
streamDataSubmitDestroy(px);
|
streamDataSubmitDestroy(px);
|
||||||
taosFreeQitem(pItem);
|
taosFreeQitem(pItem);
|
||||||
return -1;
|
return -1;
|
||||||
|
@ -288,8 +298,8 @@ int32_t streamTaskPutDataIntoInputQ(SStreamTask* pTask, SStreamQueueItem* pItem)
|
||||||
} else if (type == STREAM_INPUT__DATA_BLOCK || type == STREAM_INPUT__DATA_RETRIEVE ||
|
} else if (type == STREAM_INPUT__DATA_BLOCK || type == STREAM_INPUT__DATA_RETRIEVE ||
|
||||||
type == STREAM_INPUT__REF_DATA_BLOCK) {
|
type == STREAM_INPUT__REF_DATA_BLOCK) {
|
||||||
if (streamQueueIsFull(pQueue)) {
|
if (streamQueueIsFull(pQueue)) {
|
||||||
qError("s-task:%s input queue is full, capacity:%d size:%d MiB, current(blocks:%d, size:%.2fMiB) abort",
|
// qError("s-task:%s input queue is full, capacity:%d size:%d MiB, current(blocks:%d, size:%.2fMiB) abort",
|
||||||
pTask->id.idStr, STREAM_TASK_INPUT_QUEUE_CAPACITY, STREAM_TASK_INPUT_QUEUE_CAPACITY_IN_SIZE, total, size);
|
// pTask->id.idStr, STREAM_TASK_INPUT_QUEUE_CAPACITY, STREAM_TASK_INPUT_QUEUE_CAPACITY_IN_SIZE, total, size);
|
||||||
destroyStreamDataBlock((SStreamDataBlock*)pItem);
|
destroyStreamDataBlock((SStreamDataBlock*)pItem);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
@ -320,3 +330,45 @@ int32_t streamTaskPutDataIntoInputQ(SStreamTask* pTask, SStreamQueueItem* pItem)
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int32_t streamTaskInitTokenBucket(STokenBucket* pBucket, int32_t cap, int32_t rate) {
|
||||||
|
if (cap < 100 || rate < 50 || pBucket == NULL) {
|
||||||
|
qError("failed to init sink task bucket, cap:%d, rate:%d", cap, rate);
|
||||||
|
return TSDB_CODE_INVALID_PARA;
|
||||||
|
}
|
||||||
|
|
||||||
|
pBucket->capacity = cap;
|
||||||
|
pBucket->rate = rate;
|
||||||
|
pBucket->numOfToken = cap;
|
||||||
|
pBucket->fillTimestamp = taosGetTimestampMs();
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void fillBucket(STokenBucket* pBucket) {
|
||||||
|
int64_t now = taosGetTimestampMs();
|
||||||
|
int64_t delta = now - pBucket->fillTimestamp;
|
||||||
|
ASSERT(pBucket->numOfToken >= 0);
|
||||||
|
|
||||||
|
int32_t inc = (delta / 1000.0) * pBucket->rate;
|
||||||
|
if (inc > 0) {
|
||||||
|
if ((pBucket->numOfToken + inc) < pBucket->capacity) {
|
||||||
|
pBucket->numOfToken += inc;
|
||||||
|
} else {
|
||||||
|
pBucket->numOfToken = pBucket->capacity;
|
||||||
|
}
|
||||||
|
|
||||||
|
pBucket->fillTimestamp = now;
|
||||||
|
qDebug("new token available, current:%d, inc:%d ts:%"PRId64, pBucket->numOfToken, inc, now);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool streamTaskHasAvailableToken(STokenBucket* pBucket) {
|
||||||
|
fillBucket(pBucket);
|
||||||
|
if (pBucket->numOfToken > 0) {
|
||||||
|
// qDebug("current token:%d", pBucket->numOfToken);
|
||||||
|
--pBucket->numOfToken;
|
||||||
|
return true;
|
||||||
|
} else {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
|
@ -30,6 +30,13 @@ static void streamTaskSetRangeStreamCalc(SStreamTask* pTask);
|
||||||
static int32_t initScanHistoryReq(SStreamTask* pTask, SStreamScanHistoryReq* pReq, int8_t igUntreated);
|
static int32_t initScanHistoryReq(SStreamTask* pTask, SStreamScanHistoryReq* pReq, int8_t igUntreated);
|
||||||
|
|
||||||
static void streamTaskSetReady(SStreamTask* pTask, int32_t numOfReqs) {
|
static void streamTaskSetReady(SStreamTask* pTask, int32_t numOfReqs) {
|
||||||
|
if (pTask->status.taskStatus == TASK_STATUS__SCAN_HISTORY && pTask->info.taskLevel != TASK_LEVEL__SOURCE) {
|
||||||
|
pTask->numOfWaitingUpstream = taosArrayGetSize(pTask->pUpstreamInfoList);
|
||||||
|
qDebug("s-task:%s level:%d task wait for %d upstream tasks complete scan-history procedure, status:%s",
|
||||||
|
pTask->id.idStr, pTask->info.taskLevel, pTask->numOfWaitingUpstream,
|
||||||
|
streamGetTaskStatusStr(pTask->status.taskStatus));
|
||||||
|
}
|
||||||
|
|
||||||
ASSERT(pTask->status.downstreamReady == 0);
|
ASSERT(pTask->status.downstreamReady == 0);
|
||||||
pTask->status.downstreamReady = 1;
|
pTask->status.downstreamReady = 1;
|
||||||
|
|
||||||
|
@ -97,11 +104,8 @@ int32_t streamTaskLaunchScanHistory(SStreamTask* pTask) {
|
||||||
streamSetParamForScanHistory(pTask);
|
streamSetParamForScanHistory(pTask);
|
||||||
streamTaskEnablePause(pTask);
|
streamTaskEnablePause(pTask);
|
||||||
}
|
}
|
||||||
|
|
||||||
streamTaskScanHistoryPrepare(pTask);
|
|
||||||
} else if (pTask->info.taskLevel == TASK_LEVEL__SINK) {
|
} else if (pTask->info.taskLevel == TASK_LEVEL__SINK) {
|
||||||
qDebug("s-task:%s sink task do nothing to handle scan-history", pTask->id.idStr);
|
qDebug("s-task:%s sink task do nothing to handle scan-history", pTask->id.idStr);
|
||||||
streamTaskScanHistoryPrepare(pTask);
|
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -402,15 +406,6 @@ int32_t streamTaskPutTranstateIntoInputQ(SStreamTask* pTask) {
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
// agg
|
|
||||||
int32_t streamTaskScanHistoryPrepare(SStreamTask* pTask) {
|
|
||||||
pTask->numOfWaitingUpstream = taosArrayGetSize(pTask->pUpstreamInfoList);
|
|
||||||
qDebug("s-task:%s level:%d task wait for %d upstream tasks complete scan-history procedure, status:%s",
|
|
||||||
pTask->id.idStr, pTask->info.taskLevel, pTask->numOfWaitingUpstream,
|
|
||||||
streamGetTaskStatusStr(pTask->status.taskStatus));
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int32_t streamAggUpstreamScanHistoryFinish(SStreamTask* pTask) {
|
int32_t streamAggUpstreamScanHistoryFinish(SStreamTask* pTask) {
|
||||||
void* exec = pTask->exec.pExecutor;
|
void* exec = pTask->exec.pExecutor;
|
||||||
if (pTask->info.fillHistory && qRestoreStreamOperatorOption(exec) < 0) {
|
if (pTask->info.fillHistory && qRestoreStreamOperatorOption(exec) < 0) {
|
||||||
|
@ -509,7 +504,8 @@ int32_t streamProcessScanHistoryFinishRsp(SStreamTask* pTask) {
|
||||||
|
|
||||||
static void checkFillhistoryTaskStatus(SStreamTask* pTask, SStreamTask* pHTask) {
|
static void checkFillhistoryTaskStatus(SStreamTask* pTask, SStreamTask* pHTask) {
|
||||||
pHTask->dataRange.range.minVer = 0;
|
pHTask->dataRange.range.minVer = 0;
|
||||||
pHTask->dataRange.range.maxVer = pTask->chkInfo.currentVer;
|
// the query version range should be limited to the already processed data
|
||||||
|
pHTask->dataRange.range.maxVer = pTask->chkInfo.nextProcessVer - 1;
|
||||||
|
|
||||||
if (pTask->info.taskLevel == TASK_LEVEL__SOURCE) {
|
if (pTask->info.taskLevel == TASK_LEVEL__SOURCE) {
|
||||||
qDebug("s-task:%s set the launch condition for fill-history s-task:%s, window:%" PRId64 " - %" PRId64
|
qDebug("s-task:%s set the launch condition for fill-history s-task:%s, window:%" PRId64 " - %" PRId64
|
||||||
|
|
|
@ -375,16 +375,17 @@ int32_t streamTaskInit(SStreamTask* pTask, SStreamMeta* pMeta, SMsgCb* pMsgCb, i
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
pTask->tsInfo.init = taosGetTimestampMs();
|
pTask->tsInfo.created = taosGetTimestampMs();
|
||||||
pTask->inputInfo.status = TASK_INPUT_STATUS__NORMAL;
|
pTask->inputInfo.status = TASK_INPUT_STATUS__NORMAL;
|
||||||
pTask->outputInfo.status = TASK_OUTPUT_STATUS__NORMAL;
|
pTask->outputInfo.status = TASK_OUTPUT_STATUS__NORMAL;
|
||||||
pTask->pMeta = pMeta;
|
pTask->pMeta = pMeta;
|
||||||
|
|
||||||
pTask->chkInfo.currentVer = ver;
|
pTask->chkInfo.nextProcessVer = ver;
|
||||||
pTask->dataRange.range.maxVer = ver;
|
pTask->dataRange.range.maxVer = ver;
|
||||||
pTask->dataRange.range.minVer = ver;
|
pTask->dataRange.range.minVer = ver;
|
||||||
pTask->pMsgCb = pMsgCb;
|
pTask->pMsgCb = pMsgCb;
|
||||||
|
|
||||||
|
streamTaskInitTokenBucket(&pTask->tokenBucket, 150, 100);
|
||||||
taosThreadMutexInit(&pTask->lock, NULL);
|
taosThreadMutexInit(&pTask->lock, NULL);
|
||||||
streamTaskOpenAllUpstreamInput(pTask);
|
streamTaskOpenAllUpstreamInput(pTask);
|
||||||
|
|
||||||
|
|
|
@ -2562,7 +2562,11 @@ int32_t syncNodeRebuildAndCopyIfExist(SSyncNode* ths, int32_t oldtotalReplicaNum
|
||||||
ths->logReplMgrs[i]->matchIndex, ths->logReplMgrs[i]->endIndex);
|
ths->logReplMgrs[i]->matchIndex, ths->logReplMgrs[i]->endIndex);
|
||||||
}
|
}
|
||||||
|
|
||||||
SSyncLogReplMgr oldLogReplMgrs[TSDB_MAX_REPLICA + TSDB_MAX_LEARNER_REPLICA] = {0};
|
SSyncLogReplMgr* oldLogReplMgrs = NULL;
|
||||||
|
int64_t length = sizeof(SSyncLogReplMgr) * (TSDB_MAX_REPLICA + TSDB_MAX_LEARNER_REPLICA);
|
||||||
|
oldLogReplMgrs = taosMemoryMalloc(length);
|
||||||
|
if (NULL == oldLogReplMgrs) return -1;
|
||||||
|
memset(oldLogReplMgrs, 0, length);
|
||||||
|
|
||||||
for(int i = 0; i < oldtotalReplicaNum; i++){
|
for(int i = 0; i < oldtotalReplicaNum; i++){
|
||||||
oldLogReplMgrs[i] = *(ths->logReplMgrs[i]);
|
oldLogReplMgrs[i] = *(ths->logReplMgrs[i]);
|
||||||
|
@ -2643,6 +2647,8 @@ int32_t syncNodeRebuildAndCopyIfExist(SSyncNode* ths, int32_t oldtotalReplicaNum
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
taosMemoryFree(oldLogReplMgrs);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -59,7 +59,7 @@ SSyncLogStore* logStoreCreate(SSyncNode* pSyncNode) {
|
||||||
ASSERT(pData->pWal != NULL);
|
ASSERT(pData->pWal != NULL);
|
||||||
|
|
||||||
taosThreadMutexInit(&(pData->mutex), NULL);
|
taosThreadMutexInit(&(pData->mutex), NULL);
|
||||||
pData->pWalHandle = walOpenReader(pData->pWal, NULL);
|
pData->pWalHandle = walOpenReader(pData->pWal, NULL, 0);
|
||||||
ASSERT(pData->pWalHandle != NULL);
|
ASSERT(pData->pWalHandle != NULL);
|
||||||
|
|
||||||
pLogStore->syncLogUpdateCommitIndex = raftLogUpdateCommitIndex;
|
pLogStore->syncLogUpdateCommitIndex = raftLogUpdateCommitIndex;
|
||||||
|
|
|
@ -395,7 +395,7 @@ static void uvOnPipeWriteCb(uv_write_t* req, int status) {
|
||||||
if (status == 0) {
|
if (status == 0) {
|
||||||
tTrace("success to dispatch conn to work thread");
|
tTrace("success to dispatch conn to work thread");
|
||||||
} else {
|
} else {
|
||||||
tError("fail to dispatch conn to work thread");
|
tError("fail to dispatch conn to work thread, code:%s", uv_strerror(status));
|
||||||
}
|
}
|
||||||
if (!uv_is_closing((uv_handle_t*)req->data)) {
|
if (!uv_is_closing((uv_handle_t*)req->data)) {
|
||||||
uv_close((uv_handle_t*)req->data, uvFreeCb);
|
uv_close((uv_handle_t*)req->data, uvFreeCb);
|
||||||
|
|
|
@ -16,7 +16,7 @@
|
||||||
#include "taoserror.h"
|
#include "taoserror.h"
|
||||||
#include "walInt.h"
|
#include "walInt.h"
|
||||||
|
|
||||||
SWalReader *walOpenReader(SWal *pWal, SWalFilterCond *cond) {
|
SWalReader *walOpenReader(SWal *pWal, SWalFilterCond *cond, int64_t id) {
|
||||||
SWalReader *pReader = taosMemoryCalloc(1, sizeof(SWalReader));
|
SWalReader *pReader = taosMemoryCalloc(1, sizeof(SWalReader));
|
||||||
if (pReader == NULL) {
|
if (pReader == NULL) {
|
||||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
|
@ -24,7 +24,7 @@ SWalReader *walOpenReader(SWal *pWal, SWalFilterCond *cond) {
|
||||||
}
|
}
|
||||||
|
|
||||||
pReader->pWal = pWal;
|
pReader->pWal = pWal;
|
||||||
pReader->readerId = tGenIdPI64();
|
pReader->readerId = (id != 0)? id:tGenIdPI64();
|
||||||
pReader->pIdxFile = NULL;
|
pReader->pIdxFile = NULL;
|
||||||
pReader->pLogFile = NULL;
|
pReader->pLogFile = NULL;
|
||||||
pReader->curVersion = -1;
|
pReader->curVersion = -1;
|
||||||
|
@ -75,6 +75,7 @@ int32_t walNextValidMsg(SWalReader *pReader) {
|
||||||
terrno = TSDB_CODE_WAL_LOG_NOT_EXIST;
|
terrno = TSDB_CODE_WAL_LOG_NOT_EXIST;
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
while (fetchVer <= appliedVer) {
|
while (fetchVer <= appliedVer) {
|
||||||
if (walFetchHead(pReader, fetchVer) < 0) {
|
if (walFetchHead(pReader, fetchVer) < 0) {
|
||||||
return -1;
|
return -1;
|
||||||
|
@ -257,9 +258,9 @@ int32_t walFetchHead(SWalReader *pRead, int64_t ver) {
|
||||||
bool seeked = false;
|
bool seeked = false;
|
||||||
|
|
||||||
wDebug("vgId:%d, try to fetch ver %" PRId64 ", first ver:%" PRId64 ", commit ver:%" PRId64 ", last ver:%" PRId64
|
wDebug("vgId:%d, try to fetch ver %" PRId64 ", first ver:%" PRId64 ", commit ver:%" PRId64 ", last ver:%" PRId64
|
||||||
", applied ver:%" PRId64,
|
", applied ver:%" PRId64", 0x%"PRIx64,
|
||||||
pRead->pWal->cfg.vgId, ver, pRead->pWal->vers.firstVer, pRead->pWal->vers.commitVer, pRead->pWal->vers.lastVer,
|
pRead->pWal->cfg.vgId, ver, pRead->pWal->vers.firstVer, pRead->pWal->vers.commitVer, pRead->pWal->vers.lastVer,
|
||||||
pRead->pWal->vers.appliedVer);
|
pRead->pWal->vers.appliedVer, pRead->readerId);
|
||||||
|
|
||||||
// TODO: valid ver
|
// TODO: valid ver
|
||||||
if (ver > pRead->pWal->vers.commitVer) {
|
if (ver > pRead->pWal->vers.commitVer) {
|
||||||
|
@ -297,7 +298,8 @@ int32_t walFetchHead(SWalReader *pRead, int64_t ver) {
|
||||||
code = walValidHeadCksum(pRead->pHead);
|
code = walValidHeadCksum(pRead->pHead);
|
||||||
|
|
||||||
if (code != 0) {
|
if (code != 0) {
|
||||||
wError("vgId:%d, unexpected wal log index:%" PRId64 ", since head checksum not passed", pRead->pWal->cfg.vgId, ver);
|
wError("vgId:%d, unexpected wal log index:%" PRId64 ", since head checksum not passed, 0x%"PRIx64, pRead->pWal->cfg.vgId, ver,
|
||||||
|
pRead->readerId);
|
||||||
terrno = TSDB_CODE_WAL_FILE_CORRUPTED;
|
terrno = TSDB_CODE_WAL_FILE_CORRUPTED;
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
@ -307,9 +309,9 @@ int32_t walFetchHead(SWalReader *pRead, int64_t ver) {
|
||||||
|
|
||||||
int32_t walSkipFetchBody(SWalReader *pRead) {
|
int32_t walSkipFetchBody(SWalReader *pRead) {
|
||||||
wDebug("vgId:%d, skip fetch body %" PRId64 ", first ver:%" PRId64 ", commit ver:%" PRId64 ", last ver:%" PRId64
|
wDebug("vgId:%d, skip fetch body %" PRId64 ", first ver:%" PRId64 ", commit ver:%" PRId64 ", last ver:%" PRId64
|
||||||
", applied ver:%" PRId64,
|
", applied ver:%" PRId64", 0x%"PRIx64,
|
||||||
pRead->pWal->cfg.vgId, pRead->pHead->head.version, pRead->pWal->vers.firstVer, pRead->pWal->vers.commitVer,
|
pRead->pWal->cfg.vgId, pRead->pHead->head.version, pRead->pWal->vers.firstVer, pRead->pWal->vers.commitVer,
|
||||||
pRead->pWal->vers.lastVer, pRead->pWal->vers.appliedVer);
|
pRead->pWal->vers.lastVer, pRead->pWal->vers.appliedVer, pRead->readerId);
|
||||||
|
|
||||||
int64_t code = taosLSeekFile(pRead->pLogFile, pRead->pHead->head.bodyLen, SEEK_CUR);
|
int64_t code = taosLSeekFile(pRead->pLogFile, pRead->pHead->head.bodyLen, SEEK_CUR);
|
||||||
if (code < 0) {
|
if (code < 0) {
|
||||||
|
@ -324,11 +326,13 @@ int32_t walSkipFetchBody(SWalReader *pRead) {
|
||||||
int32_t walFetchBody(SWalReader *pRead) {
|
int32_t walFetchBody(SWalReader *pRead) {
|
||||||
SWalCont *pReadHead = &pRead->pHead->head;
|
SWalCont *pReadHead = &pRead->pHead->head;
|
||||||
int64_t ver = pReadHead->version;
|
int64_t ver = pReadHead->version;
|
||||||
|
int32_t vgId = pRead->pWal->cfg.vgId;
|
||||||
|
int64_t id = pRead->readerId;
|
||||||
|
|
||||||
wDebug("vgId:%d, fetch body %" PRId64 ", first ver:%" PRId64 ", commit ver:%" PRId64 ", last ver:%" PRId64
|
wDebug("vgId:%d, fetch body %" PRId64 ", first ver:%" PRId64 ", commit ver:%" PRId64 ", last ver:%" PRId64
|
||||||
", applied ver:%" PRId64,
|
", applied ver:%" PRId64 ", 0x%" PRIx64,
|
||||||
pRead->pWal->cfg.vgId, ver, pRead->pWal->vers.firstVer, pRead->pWal->vers.commitVer, pRead->pWal->vers.lastVer,
|
vgId, ver, pRead->pWal->vers.firstVer, pRead->pWal->vers.commitVer, pRead->pWal->vers.lastVer,
|
||||||
pRead->pWal->vers.appliedVer);
|
pRead->pWal->vers.appliedVer, id);
|
||||||
|
|
||||||
if (pRead->capacity < pReadHead->bodyLen) {
|
if (pRead->capacity < pReadHead->bodyLen) {
|
||||||
SWalCkHead *ptr = (SWalCkHead *)taosMemoryRealloc(pRead->pHead, sizeof(SWalCkHead) + pReadHead->bodyLen);
|
SWalCkHead *ptr = (SWalCkHead *)taosMemoryRealloc(pRead->pHead, sizeof(SWalCkHead) + pReadHead->bodyLen);
|
||||||
|
@ -344,26 +348,25 @@ int32_t walFetchBody(SWalReader *pRead) {
|
||||||
if (pReadHead->bodyLen != taosReadFile(pRead->pLogFile, pReadHead->body, pReadHead->bodyLen)) {
|
if (pReadHead->bodyLen != taosReadFile(pRead->pLogFile, pReadHead->body, pReadHead->bodyLen)) {
|
||||||
if (pReadHead->bodyLen < 0) {
|
if (pReadHead->bodyLen < 0) {
|
||||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||||
wError("vgId:%d, wal fetch body error:%" PRId64 ", read request index:%" PRId64 ", since %s",
|
wError("vgId:%d, wal fetch body error:%" PRId64 ", read request index:%" PRId64 ", since %s, 0x%"PRIx64,
|
||||||
pRead->pWal->cfg.vgId, pReadHead->version, ver, tstrerror(terrno));
|
vgId, pReadHead->version, ver, tstrerror(terrno), id);
|
||||||
} else {
|
} else {
|
||||||
wError("vgId:%d, wal fetch body error:%" PRId64 ", read request index:%" PRId64 ", since file corrupted",
|
wError("vgId:%d, wal fetch body error:%" PRId64 ", read request index:%" PRId64 ", since file corrupted, 0x%"PRIx64,
|
||||||
pRead->pWal->cfg.vgId, pReadHead->version, ver);
|
vgId, pReadHead->version, ver, id);
|
||||||
terrno = TSDB_CODE_WAL_FILE_CORRUPTED;
|
terrno = TSDB_CODE_WAL_FILE_CORRUPTED;
|
||||||
}
|
}
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pReadHead->version != ver) {
|
if (pReadHead->version != ver) {
|
||||||
wError("vgId:%d, wal fetch body error, index:%" PRId64 ", read request index:%" PRId64, pRead->pWal->cfg.vgId,
|
wError("vgId:%d, wal fetch body error, index:%" PRId64 ", read request index:%" PRId64", 0x%"PRIx64, vgId,
|
||||||
pReadHead->version, ver);
|
pReadHead->version, ver, id);
|
||||||
terrno = TSDB_CODE_WAL_FILE_CORRUPTED;
|
terrno = TSDB_CODE_WAL_FILE_CORRUPTED;
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (walValidBodyCksum(pRead->pHead) != 0) {
|
if (walValidBodyCksum(pRead->pHead) != 0) {
|
||||||
wError("vgId:%d, wal fetch body error, index:%" PRId64 ", since body checksum not passed", pRead->pWal->cfg.vgId,
|
wError("vgId:%d, wal fetch body error, index:%" PRId64 ", since body checksum not passed, 0x%" PRIx64, vgId, ver, id);
|
||||||
ver);
|
|
||||||
terrno = TSDB_CODE_WAL_FILE_CORRUPTED;
|
terrno = TSDB_CODE_WAL_FILE_CORRUPTED;
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
|
@ -326,7 +326,7 @@ TEST_F(WalCleanDeleteEnv, roll) {
|
||||||
TEST_F(WalKeepEnv, readHandleRead) {
|
TEST_F(WalKeepEnv, readHandleRead) {
|
||||||
walResetEnv();
|
walResetEnv();
|
||||||
int code;
|
int code;
|
||||||
SWalReader* pRead = walOpenReader(pWal, NULL);
|
SWalReader* pRead = walOpenReader(pWal, NULL, 0);
|
||||||
ASSERT(pRead != NULL);
|
ASSERT(pRead != NULL);
|
||||||
|
|
||||||
int i;
|
int i;
|
||||||
|
@ -387,7 +387,7 @@ TEST_F(WalRetentionEnv, repairMeta1) {
|
||||||
|
|
||||||
ASSERT_EQ(pWal->vers.lastVer, 99);
|
ASSERT_EQ(pWal->vers.lastVer, 99);
|
||||||
|
|
||||||
SWalReader* pRead = walOpenReader(pWal, NULL);
|
SWalReader* pRead = walOpenReader(pWal, NULL, 0);
|
||||||
ASSERT(pRead != NULL);
|
ASSERT(pRead != NULL);
|
||||||
|
|
||||||
for (int i = 0; i < 1000; i++) {
|
for (int i = 0; i < 1000; i++) {
|
||||||
|
|
|
@ -86,7 +86,7 @@ pip3 install kafka-python
|
||||||
python3 kafka_example_consumer.py
|
python3 kafka_example_consumer.py
|
||||||
|
|
||||||
# 21
|
# 21
|
||||||
pip3 install taos-ws-py
|
pip3 install taos-ws-py==0.2.6
|
||||||
python3 conn_websocket_pandas.py
|
python3 conn_websocket_pandas.py
|
||||||
|
|
||||||
# 22
|
# 22
|
||||||
|
|
|
@ -0,0 +1,37 @@
|
||||||
|
import time
|
||||||
|
from util.log import *
|
||||||
|
from util.sql import *
|
||||||
|
from util.cases import *
|
||||||
|
from util.dnodes import *
|
||||||
|
|
||||||
|
|
||||||
|
class TDTestCase:
|
||||||
|
updatecfgDict = {'ttlUnit': 1, "ttlPushInterval": 1, "ttlChangeOnWrite": 0}
|
||||||
|
|
||||||
|
def init(self, conn, logSql, replicaVar=1):
|
||||||
|
self.replicaVar = int(replicaVar)
|
||||||
|
tdLog.debug(f"start to excute {__file__}")
|
||||||
|
tdSql.init(conn.cursor(), True)
|
||||||
|
self.ttl = 5
|
||||||
|
self.dbname = "test"
|
||||||
|
|
||||||
|
def check_ttl_result(self):
|
||||||
|
tdSql.execute(f'create database {self.dbname}')
|
||||||
|
tdSql.execute(f'create table {self.dbname}.t1(ts timestamp, c1 int)')
|
||||||
|
tdSql.execute(f'create table {self.dbname}.t2(ts timestamp, c1 int) ttl {self.ttl}')
|
||||||
|
tdSql.query(f'show {self.dbname}.tables')
|
||||||
|
tdSql.checkRows(2)
|
||||||
|
tdSql.execute(f'flush database {self.dbname}')
|
||||||
|
time.sleep(self.ttl + 2)
|
||||||
|
tdSql.query(f'show {self.dbname}.tables')
|
||||||
|
tdSql.checkRows(1)
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
self.check_ttl_result()
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
tdSql.close()
|
||||||
|
tdLog.success(f"{__file__} successfully executed")
|
||||||
|
|
||||||
|
tdCases.addLinux(__file__, TDTestCase())
|
||||||
|
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,63 @@
|
||||||
|
import time
|
||||||
|
from util.log import *
|
||||||
|
from util.sql import *
|
||||||
|
from util.cases import *
|
||||||
|
from util.dnodes import *
|
||||||
|
|
||||||
|
|
||||||
|
class TDTestCase:
|
||||||
|
updatecfgDict = {'ttlUnit': 1, "ttlPushInterval": 3, "ttlChangeOnWrite": 1, "trimVDbIntervalSec": 360,
|
||||||
|
"ttlFlushThreshold": 100, "ttlBatchDropNum": 10}
|
||||||
|
|
||||||
|
def init(self, conn, logSql, replicaVar=1):
|
||||||
|
self.replicaVar = int(replicaVar)
|
||||||
|
tdLog.debug(f"start to excute {__file__}")
|
||||||
|
tdSql.init(conn.cursor(), True)
|
||||||
|
self.ttl = 5
|
||||||
|
self.tables = 100
|
||||||
|
self.dbname = "test"
|
||||||
|
|
||||||
|
def check_batch_drop_num(self):
|
||||||
|
tdSql.execute(f'create database {self.dbname} vgroups 1')
|
||||||
|
tdSql.execute(f'use {self.dbname}')
|
||||||
|
tdSql.execute(f'create table stb(ts timestamp, c1 int) tags(t1 int)')
|
||||||
|
for i in range(self.tables):
|
||||||
|
tdSql.execute(f'create table t{i} using stb tags({i}) ttl {self.ttl}')
|
||||||
|
|
||||||
|
tdSql.execute(f'flush database {self.dbname}')
|
||||||
|
time.sleep(self.ttl + self.updatecfgDict['ttlPushInterval'] + 1)
|
||||||
|
tdSql.query('show tables')
|
||||||
|
tdSql.checkRows(90)
|
||||||
|
|
||||||
|
def check_ttl_result(self):
|
||||||
|
tdSql.execute(f'drop database if exists {self.dbname}')
|
||||||
|
tdSql.execute(f'create database {self.dbname}')
|
||||||
|
tdSql.execute(f'create table {self.dbname}.t1(ts timestamp, c1 int)')
|
||||||
|
tdSql.execute(f'create table {self.dbname}.t2(ts timestamp, c1 int) ttl {self.ttl}')
|
||||||
|
tdSql.query(f'show {self.dbname}.tables')
|
||||||
|
tdSql.checkRows(2)
|
||||||
|
|
||||||
|
tdSql.execute(f'flush database {self.dbname}')
|
||||||
|
time.sleep(self.ttl - 1)
|
||||||
|
tdSql.execute(f'insert into {self.dbname}.t2 values(now, 1)');
|
||||||
|
|
||||||
|
tdSql.execute(f'flush database {self.dbname}')
|
||||||
|
time.sleep(self.ttl - 1)
|
||||||
|
tdSql.query(f'show {self.dbname}.tables')
|
||||||
|
tdSql.checkRows(2)
|
||||||
|
|
||||||
|
tdSql.execute(f'flush database {self.dbname}')
|
||||||
|
time.sleep(self.ttl * 2)
|
||||||
|
tdSql.query(f'show {self.dbname}.tables')
|
||||||
|
tdSql.checkRows(1)
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
self.check_batch_drop_num()
|
||||||
|
self.check_ttl_result()
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
tdSql.close()
|
||||||
|
tdLog.success(f"{__file__} successfully executed")
|
||||||
|
|
||||||
|
tdCases.addLinux(__file__, TDTestCase())
|
||||||
|
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -35,6 +35,7 @@ class TDTestCase:
|
||||||
tdSql.execute(f'create table db.{self.ntbname}_{i} (ts timestamp,c0 int) ttl {self.ttl_param}')
|
tdSql.execute(f'create table db.{self.ntbname}_{i} (ts timestamp,c0 int) ttl {self.ttl_param}')
|
||||||
tdSql.query(f'show db.tables')
|
tdSql.query(f'show db.tables')
|
||||||
tdSql.checkRows(self.tbnum)
|
tdSql.checkRows(self.tbnum)
|
||||||
|
tdSql.execute(f'flush database db')
|
||||||
sleep(self.updatecfgDict['ttlUnit']*self.ttl_param+self.updatecfgDict['ttlPushInterval'] + 1)
|
sleep(self.updatecfgDict['ttlUnit']*self.ttl_param+self.updatecfgDict['ttlPushInterval'] + 1)
|
||||||
tdSql.query(f'show db.tables')
|
tdSql.query(f'show db.tables')
|
||||||
tdSql.checkRows(0)
|
tdSql.checkRows(0)
|
||||||
|
@ -42,6 +43,7 @@ class TDTestCase:
|
||||||
tdSql.execute(f'create table db.{self.ntbname}_{i} (ts timestamp,c0 int) ttl {self.default_ttl}')
|
tdSql.execute(f'create table db.{self.ntbname}_{i} (ts timestamp,c0 int) ttl {self.default_ttl}')
|
||||||
for i in range(int(self.tbnum/2)):
|
for i in range(int(self.tbnum/2)):
|
||||||
tdSql.execute(f'alter table db.{self.ntbname}_{i} ttl {self.modify_ttl}')
|
tdSql.execute(f'alter table db.{self.ntbname}_{i} ttl {self.modify_ttl}')
|
||||||
|
tdSql.execute(f'flush database db')
|
||||||
sleep(self.updatecfgDict['ttlUnit']*self.modify_ttl+self.updatecfgDict['ttlPushInterval'] + 1)
|
sleep(self.updatecfgDict['ttlUnit']*self.modify_ttl+self.updatecfgDict['ttlPushInterval'] + 1)
|
||||||
tdSql.query(f'show db.tables')
|
tdSql.query(f'show db.tables')
|
||||||
tdSql.checkRows(self.tbnum - int(self.tbnum/2))
|
tdSql.checkRows(self.tbnum - int(self.tbnum/2))
|
||||||
|
@ -54,6 +56,7 @@ class TDTestCase:
|
||||||
tdSql.execute(f'create table db.{self.stbname}_{i} using db.{self.stbname} tags({i}) ttl {self.ttl_param}')
|
tdSql.execute(f'create table db.{self.stbname}_{i} using db.{self.stbname} tags({i}) ttl {self.ttl_param}')
|
||||||
tdSql.query(f'show db.tables')
|
tdSql.query(f'show db.tables')
|
||||||
tdSql.checkRows(self.tbnum)
|
tdSql.checkRows(self.tbnum)
|
||||||
|
tdSql.execute(f'flush database db')
|
||||||
sleep(self.updatecfgDict['ttlUnit']*self.ttl_param+self.updatecfgDict['ttlPushInterval'] + 1)
|
sleep(self.updatecfgDict['ttlUnit']*self.ttl_param+self.updatecfgDict['ttlPushInterval'] + 1)
|
||||||
tdSql.query(f'show db.tables')
|
tdSql.query(f'show db.tables')
|
||||||
tdSql.checkRows(0)
|
tdSql.checkRows(0)
|
||||||
|
@ -63,6 +66,7 @@ class TDTestCase:
|
||||||
tdSql.checkRows(self.tbnum)
|
tdSql.checkRows(self.tbnum)
|
||||||
for i in range(int(self.tbnum/2)):
|
for i in range(int(self.tbnum/2)):
|
||||||
tdSql.execute(f'alter table db.{self.stbname}_{i} ttl {self.modify_ttl}')
|
tdSql.execute(f'alter table db.{self.stbname}_{i} ttl {self.modify_ttl}')
|
||||||
|
tdSql.execute(f'flush database db')
|
||||||
sleep(self.updatecfgDict['ttlUnit']*self.modify_ttl+self.updatecfgDict['ttlPushInterval'] + 1)
|
sleep(self.updatecfgDict['ttlUnit']*self.modify_ttl+self.updatecfgDict['ttlPushInterval'] + 1)
|
||||||
tdSql.query(f'show db.tables')
|
tdSql.query(f'show db.tables')
|
||||||
tdSql.checkRows(self.tbnum - int(self.tbnum/2))
|
tdSql.checkRows(self.tbnum - int(self.tbnum/2))
|
||||||
|
@ -75,6 +79,7 @@ class TDTestCase:
|
||||||
tdSql.execute(f'insert into db.{self.stbname}_{i} using db.{self.stbname} tags({i}) ttl {self.ttl_param} values(now,1)')
|
tdSql.execute(f'insert into db.{self.stbname}_{i} using db.{self.stbname} tags({i}) ttl {self.ttl_param} values(now,1)')
|
||||||
tdSql.query(f'show db.tables')
|
tdSql.query(f'show db.tables')
|
||||||
tdSql.checkRows(self.tbnum)
|
tdSql.checkRows(self.tbnum)
|
||||||
|
tdSql.execute(f'flush database db')
|
||||||
sleep(self.updatecfgDict['ttlUnit']*self.ttl_param+self.updatecfgDict['ttlPushInterval'] + 1)
|
sleep(self.updatecfgDict['ttlUnit']*self.ttl_param+self.updatecfgDict['ttlPushInterval'] + 1)
|
||||||
tdSql.query(f'show db.tables')
|
tdSql.query(f'show db.tables')
|
||||||
tdSql.checkRows(0)
|
tdSql.checkRows(0)
|
||||||
|
|
|
@ -16,6 +16,9 @@ class TDTestCase:
|
||||||
self.perfix = 'dev'
|
self.perfix = 'dev'
|
||||||
self.tables = 10
|
self.tables = 10
|
||||||
|
|
||||||
|
def check_result(self):
|
||||||
|
for i in range(self.rowNum):
|
||||||
|
tdSql.checkData(i, 0, 1);
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
tdSql.prepare()
|
tdSql.prepare()
|
||||||
|
@ -179,11 +182,6 @@ class TDTestCase:
|
||||||
tdSql.error(f"select diff(col8) from {dbname}.stb_1")
|
tdSql.error(f"select diff(col8) from {dbname}.stb_1")
|
||||||
tdSql.error(f"select diff(col9) from {dbname}.stb")
|
tdSql.error(f"select diff(col9) from {dbname}.stb")
|
||||||
tdSql.error(f"select diff(col9) from {dbname}.stb_1")
|
tdSql.error(f"select diff(col9) from {dbname}.stb_1")
|
||||||
tdSql.error(f"select diff(col11) from {dbname}.stb_1")
|
|
||||||
tdSql.error(f"select diff(col12) from {dbname}.stb_1")
|
|
||||||
tdSql.error(f"select diff(col13) from {dbname}.stb_1")
|
|
||||||
tdSql.error(f"select diff(col14) from {dbname}.stb_1")
|
|
||||||
tdSql.error(f"select diff(col14) from {dbname}.stb_1")
|
|
||||||
tdSql.error(f"select diff(col1,col1,col1) from {dbname}.stb_1")
|
tdSql.error(f"select diff(col1,col1,col1) from {dbname}.stb_1")
|
||||||
tdSql.error(f"select diff(col1,1,col1) from {dbname}.stb_1")
|
tdSql.error(f"select diff(col1,1,col1) from {dbname}.stb_1")
|
||||||
tdSql.error(f"select diff(col1,col1,col) from {dbname}.stb_1")
|
tdSql.error(f"select diff(col1,col1,col) from {dbname}.stb_1")
|
||||||
|
@ -217,6 +215,22 @@ class TDTestCase:
|
||||||
tdSql.query(f"select diff(col6) from {dbname}.stb_1")
|
tdSql.query(f"select diff(col6) from {dbname}.stb_1")
|
||||||
tdSql.checkRows(10)
|
tdSql.checkRows(10)
|
||||||
|
|
||||||
|
tdSql.query(f"select diff(col11) from {dbname}.stb_1")
|
||||||
|
tdSql.checkRows(10)
|
||||||
|
self.check_result()
|
||||||
|
|
||||||
|
tdSql.query(f"select diff(col12) from {dbname}.stb_1")
|
||||||
|
tdSql.checkRows(10)
|
||||||
|
self.check_result()
|
||||||
|
|
||||||
|
tdSql.query(f"select diff(col13) from {dbname}.stb_1")
|
||||||
|
tdSql.checkRows(10)
|
||||||
|
self.check_result()
|
||||||
|
|
||||||
|
tdSql.query(f"select diff(col14) from {dbname}.stb_1")
|
||||||
|
tdSql.checkRows(10)
|
||||||
|
self.check_result()
|
||||||
|
|
||||||
tdSql.execute(f'''create table {dbname}.stb1(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
|
tdSql.execute(f'''create table {dbname}.stb1(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
|
||||||
col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''')
|
col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''')
|
||||||
tdSql.execute(f"create table {dbname}.stb1_1 using {dbname}.stb tags('shanghai')")
|
tdSql.execute(f"create table {dbname}.stb1_1 using {dbname}.stb tags('shanghai')")
|
||||||
|
|
|
@ -6,7 +6,8 @@ from util.cases import *
|
||||||
from util.common import *
|
from util.common import *
|
||||||
|
|
||||||
class TDTestCase:
|
class TDTestCase:
|
||||||
updatecfgDict = {'debugFlag': 135, 'asynclog': 0}
|
updatecfgDict = {'vdebugFlag': 143, 'qdebugflag':135, 'tqdebugflag':135, 'udebugflag':135, 'rpcdebugflag':135,
|
||||||
|
'asynclog': 0}
|
||||||
def init(self, conn, logSql, replicaVar=1):
|
def init(self, conn, logSql, replicaVar=1):
|
||||||
self.replicaVar = int(replicaVar)
|
self.replicaVar = int(replicaVar)
|
||||||
tdLog.debug("start to execute %s" % __file__)
|
tdLog.debug("start to execute %s" % __file__)
|
||||||
|
|
Loading…
Reference in New Issue