Merge branch '3.0' of https://github.com/taosdata/TDengine into feat/TS-4994-3.0
This commit is contained in:
commit
496d9bb405
|
@ -0,0 +1,5 @@
|
||||||
|
# Pull Request Checklist
|
||||||
|
|
||||||
|
- [ ] Is the user manual updated?
|
||||||
|
- [ ] Are the test cases passed and automated?
|
||||||
|
- [ ] Is there no significant decrease in test coverage?
|
|
@ -355,7 +355,7 @@ def pre_test_build_win() {
|
||||||
bat '''
|
bat '''
|
||||||
cd %WIN_COMMUNITY_ROOT%/tests/ci
|
cd %WIN_COMMUNITY_ROOT%/tests/ci
|
||||||
pip3 install taospy==2.7.16
|
pip3 install taospy==2.7.16
|
||||||
pip3 install taos-ws-py==0.3.3
|
pip3 install taos-ws-py==0.3.5
|
||||||
xcopy /e/y/i/f %WIN_INTERNAL_ROOT%\\debug\\build\\lib\\taos.dll C:\\Windows\\System32
|
xcopy /e/y/i/f %WIN_INTERNAL_ROOT%\\debug\\build\\lib\\taos.dll C:\\Windows\\System32
|
||||||
'''
|
'''
|
||||||
return 1
|
return 1
|
||||||
|
@ -451,8 +451,8 @@ pipeline {
|
||||||
|
|
||||||
stage('run test') {
|
stage('run test') {
|
||||||
when {
|
when {
|
||||||
allOf {
|
expression {
|
||||||
not { expression { file_no_doc_changed == '' }}
|
file_no_doc_changed != '' && env.CHANGE_TARGET != 'docs-cloud'
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
parallel {
|
parallel {
|
||||||
|
|
|
@ -97,10 +97,13 @@ ELSE()
|
||||||
SET(TD_TAOS_TOOLS TRUE)
|
SET(TD_TAOS_TOOLS TRUE)
|
||||||
ENDIF()
|
ENDIF()
|
||||||
|
|
||||||
|
SET(TAOS_LIB taos)
|
||||||
|
SET(TAOS_LIB_STATIC taos_static)
|
||||||
|
|
||||||
IF(${TD_WINDOWS})
|
IF(${TD_WINDOWS})
|
||||||
SET(TAOS_LIB taos_static)
|
SET(TAOS_LIB_PLATFORM_SPEC taos_static)
|
||||||
ELSE()
|
ELSE()
|
||||||
SET(TAOS_LIB taos)
|
SET(TAOS_LIB_PLATFORM_SPEC taos)
|
||||||
ENDIF()
|
ENDIF()
|
||||||
|
|
||||||
# build TSZ by default
|
# build TSZ by default
|
||||||
|
|
|
@ -422,7 +422,7 @@ CAST(expr AS type_name)
|
||||||
TO_ISO8601(expr [, timezone])
|
TO_ISO8601(expr [, timezone])
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: The ISO8601 date/time format converted from a UNIX timestamp, plus the timezone. You can specify any time zone with the timezone parameter. If you do not enter this parameter, the time zone on the client is used.
|
**Description**: The ISO8601 date/time format converted from a timestamp, plus the timezone. You can specify any time zone with the timezone parameter. If you do not enter this parameter, the time zone on the client is used.
|
||||||
|
|
||||||
**Return value type**: VARCHAR
|
**Return value type**: VARCHAR
|
||||||
|
|
||||||
|
@ -466,7 +466,7 @@ return_timestamp: {
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: UNIX timestamp converted from a string of date/time format
|
**Description**: timestamp converted from a string of date/time format
|
||||||
|
|
||||||
**Return value type**: BIGINT, TIMESTAMP
|
**Return value type**: BIGINT, TIMESTAMP
|
||||||
|
|
||||||
|
|
|
@ -41,12 +41,18 @@ We recommend using the latest version of `taospy`, regardless of the version of
|
||||||
|
|
||||||
|Python Client Library Version|major changes|
|
|Python Client Library Version|major changes|
|
||||||
|:-------------------:|:----:|
|
|:-------------------:|:----:|
|
||||||
|
|2.7.16|add subscription configuration (session.timeout.ms, max.poll.interval.ms)|
|
||||||
|
|2.7.15|added support for VARBINARY and GEOMETRY types|
|
||||||
|
|2.7.14|fix known issues|
|
||||||
|
|2.7.13|add TMQ synchronous submission offset interface|
|
||||||
|2.7.12|1. added support for `varbinary` type (STMT does not yet support)<br/> 2. improved query performance (thanks to contributor [hadrianl](https://github.com/taosdata/taos-connector-python/pull/209))|
|
|2.7.12|1. added support for `varbinary` type (STMT does not yet support)<br/> 2. improved query performance (thanks to contributor [hadrianl](https://github.com/taosdata/taos-connector-python/pull/209))|
|
||||||
|2.7.9|support for getting assignment and seek function on subscription|
|
|2.7.9|support for getting assignment and seek function on subscription|
|
||||||
|2.7.8|add `execute_many` method|
|
|2.7.8|add `execute_many` method|
|
||||||
|
|
||||||
|Python Websocket Connection Version|major changes|
|
|Python Websocket Connection Version|major changes|
|
||||||
|:----------------------------:|:-----:|
|
|:----------------------------:|:-----:|
|
||||||
|
|0.3.5|1. added support for VARBINARY and GEOMETRY types <br/> 2. Fix known issues|
|
||||||
|
|0.3.2|1. optimize WebSocket SQL query and insertion performance <br/> 2. Fix known issues <br/> 3. Modify the readme and document|
|
||||||
|0.2.9|bugs fixes|
|
|0.2.9|bugs fixes|
|
||||||
|0.2.5|1. support for getting assignment and seek function on subscription <br/> 2. support schemaless <br/> 3. support STMT|
|
|0.2.5|1. support for getting assignment and seek function on subscription <br/> 2. support schemaless <br/> 3. support STMT|
|
||||||
|0.2.4|support `unsubscribe` on subscription|
|
|0.2.4|support `unsubscribe` on subscription|
|
||||||
|
|
|
@ -27,6 +27,8 @@ Node.js client library needs to be run with Node.js 14 or higher version.
|
||||||
|
|
||||||
| Node.js connector version | major changes | TDengine 版本 |
|
| Node.js connector version | major changes | TDengine 版本 |
|
||||||
| :-----------------------: | :------------------: | :----------------:|
|
| :-----------------------: | :------------------: | :----------------:|
|
||||||
|
| 3.1.2 | Optimized the data protocol and parsing, resulting in a significant improvement in performance | 3.2.0.0 or later |
|
||||||
|
| 3.1.1 | Optimized data transmission performance | 3.2.0.0 or later |
|
||||||
| 3.1.0 | new version, supports websocket | 3.2.0.0 or later |
|
| 3.1.0 | new version, supports websocket | 3.2.0.0 or later |
|
||||||
|
|
||||||
## Supported features
|
## Supported features
|
||||||
|
|
|
@ -773,7 +773,7 @@ lossyColumns float|double
|
||||||
02/22 10:49:27.607990 00002933 UTL lossyColumns float|double
|
02/22 10:49:27.607990 00002933 UTL lossyColumns float|double
|
||||||
```
|
```
|
||||||
|
|
||||||
### ifAdtFse
|
### ifAdtFse
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | -------------------------------- |
|
| -------- | -------------------------------- |
|
||||||
|
@ -898,4 +898,4 @@ lossyColumns float|double
|
||||||
| 53 | udf | Yes | Yes | |
|
| 53 | udf | Yes | Yes | |
|
||||||
| 54 | enableCoreFile | Yes | Yes | |
|
| 54 | enableCoreFile | Yes | Yes | |
|
||||||
| 55 | ttlChangeOnWrite | No | Yes | |
|
| 55 | ttlChangeOnWrite | No | Yes | |
|
||||||
| 56 | keepTimeOffset | Yes | Yes(discarded since 3.2.0.0) | |
|
| 56 | keepTimeOffset | Yes | Yes(discarded since 3.2.0.0) | see "KEEP_TIME_OFFSET" |
|
||||||
|
|
|
@ -1,68 +1,44 @@
|
||||||
---
|
---
|
||||||
sidebar_label: 数据缓存
|
sidebar_label: 读缓存
|
||||||
title: 数据缓存
|
title: 读缓存
|
||||||
toc_max_heading_level: 4
|
toc_max_heading_level: 4
|
||||||
---
|
---
|
||||||
|
|
||||||
在工业互联网和物联网大数据应用场景中,时序数据库的性能表现尤为关键。这类应用程序不仅要求数据的实时写入能力,还需求能够迅速获取设备的最新状态或对最新数据进行实时计算。通常,大数据平台会通过部署 Redis 或类似的缓存技术来满足这些需求。然而,这种做法会增加系统的复杂性和运营成本。
|
在物联网(IoT)和工业互联网(IIoT)大数据应用场景中,实时数据的价值往往远超历史数据。企业不仅需要数据处理系统具备高效的实时写入能力,更需要能快速获取设备的最新状态,或者对最新数据进行实时计算和分析。无论是工业设备的状态监控、车联网中的车辆位置追踪,还是智能仪表的实时读数,当前值都是业务运行中不可或缺的核心数据。这些数据直接关系到生产安全、运营效率以及用户体验。
|
||||||
|
|
||||||
为了解决这一问题,TDengine 采用了针对性的缓存优化策略。通过精心设计的缓存机制,TDengine 实现了数据的实时高效写入和快速查询,从而有效降低整个集群的复杂性和运营成本。这种优化不仅提升了性能,还为用户带来了更简洁、易用的解决方案,使他们能够更专注于核心业务的发展。
|
例如,在工业生产中,生产线设备的当前运行状态至关重要。操作员需要实时监控温度、压力、转速等关键指标,一旦设备出现异常,这些数据必须即时呈现,以便迅速调整工艺参数,避免停产或更大的损失。在车联网领域,以滴滴为例,车辆的实时位置数据是滴滴平台优化派单策略、提升运营效率的关键,确保每位乘客快速上车并享受更高质量的出行体验。
|
||||||
|
|
||||||
## 写缓存
|
同时,看板系统和智能仪表作为现场操作和用户端的窗口,也需要实时数据支撑。无论是工厂管理者通过看板获取的实时生产指标,还是家庭用户随时查询智能水表、电表的用量,实时性不仅影响到运营和决策效率,更直接关系到用户对服务的满意程度。
|
||||||
|
|
||||||
TDengine 采用了一种创新的时间驱动缓存管理策略,亦称为写驱动的缓存管理机制。这一策略与传统的读驱动的缓存模式有所不同,其核心思想是将最新写入的数据优先保存在缓存中。当缓存容量达到预设的临界值时,系统会将最早存储的数据批量写入硬盘,从而实现缓存与硬盘之间的动态平衡。
|
## 传统缓存方案的局限性
|
||||||
|
|
||||||
在物联网数据应用中,用户往往最关注最近产生的数据,即设备的当前状态。TDengine 充分利用了这一业务特性,将最近到达的当前状态数据优先存储在缓存中,以便用户能够快速获取所需信息。
|
为了满足这些高频实时查询需求,许多企业选择将 Redis 等缓存技术集成到大数据平台中,通过在数据库和应用之间添加一层缓存来提升查询性能。然而,这种方法也带来了不少问题:
|
||||||
|
- 系统复杂性增加:需要额外部署和维护缓存集群,对系统架构提出了更高的要求。
|
||||||
|
- 运营成本上升:需要额外的硬件资源来支撑缓存,增加了维护和管理的开销。
|
||||||
|
- 一致性问题:缓存和数据库之间的数据同步需要额外的机制来保障,否则可能出现数据不一致的情况。
|
||||||
|
|
||||||
为了实现数据的分布式存储和高可用性,TDengine 引入了虚拟节点(vnode)的概念。每个 vnode 可以拥有多达 3 个副本,这些副本共同组成一个 vnode group,简称 vgroup。在创建数据库时,用户需要确定每个 vnode 的写入缓存大小,以确保数据的合理分配和高效存储。
|
## TDengine 的解决方案:内置读缓存
|
||||||
|
|
||||||
创建数据库时的两个关键参数 `vgroups` 和 `buffer` 分别决定了数据库中的数据由多少个 vgroup 进行处理,以及为每个 vnode 分配多少写入缓存。通过合理配置这两个
|
为了解决这些问题,TDengine 针对物联网和工业互联网的高频实时查询场景,设计并实现了读缓存机制。这一机制能够自动将每张表的最后一条记录缓存到内存中,从而在不引入第三方缓存技术的情况下,直接满足用户对当前值的实时查询需求。
|
||||||
参数,用户可以根据实际需求调整数据库的性能和存储容量,从而实现最佳的性能和成本效益。
|
|
||||||
|
|
||||||
例 如, 下面的 SQL 创建了包含 10 个 vgroup,每个 vnode 占 用 256MB 内存的数据库。
|
TDengine 采用时间驱动的缓存管理策略,将最新数据优先存储在缓存中,查询时无需访问硬盘即可快速返回结果。当缓存容量达到设定上限时,系统会批量将最早的数据写入硬盘,既提升了查询效率,也有效减少了硬盘的写入负担,延长硬件使用寿命。
|
||||||
```sql
|
|
||||||
CREATE DATABASE POWER VGROUPS 10 BUFFER 256 CACHEMODEL 'NONE' PAGES 128 PAGESIZE 16;
|
|
||||||
```
|
|
||||||
|
|
||||||
缓存越大越好,但超过一定阈值后再增加缓存对写入性能提升并无帮助。
|
用户可通过设置 cachemodel 参数,自定义缓存模式,包括缓存最新一行数据、每列最近的非 NULL 值,或同时缓存行和列的数据。这种灵活设计在物联网场景中尤为重要,使设备状态的实时查询更加高效精准。
|
||||||
|
|
||||||
## 读缓存
|
这种读缓存机制的内置化设计显著降低了查询延迟,避免了引入 Redis 等外部系统的复杂性和运维成本。同时,减少了频繁查询对存储系统的压力,大幅提升系统的整体吞吐能力,确保在高并发场景下依然稳定高效运行。通过读缓存,TDengine 为用户提供了一种更轻量化的实时数据处理方案,不仅优化了查询性能,还降低了整体运维成本,为物联网和工业互联网用户提供强有力的技术支持。
|
||||||
|
|
||||||
在创建数据库时,用户可以选择是否启用缓存机制以存储该数据库中每张子表的最新数据。这一缓存机制由数据库创建参数 cachemodel 进行控制。参数 cachemodel 具有如
|
## TDengine 的读缓存配置
|
||||||
下 4 种情况:
|
|
||||||
- none: 不缓存
|
在创建数据库时,用户可以选择是否启用缓存机制以存储该数据库中每张子表的最新数据。这一缓存机制由数据库创建参数 cachemodel 进行控制。参数 cachemodel 具有如 下 4 种情况:
|
||||||
- last_row: 缓存子表最近一行数据,这将显著改善 last_row 函数的性能
|
- none:不缓存
|
||||||
- last_value: 缓存子表每一列最近的非 NULL 值,这将显著改善无特殊影响(比如 WHERE, ORDER BY, GROUP BY, INTERVAL)时的 last 函数的性能
|
- last_row:缓存子表最近一行数据,这将显著改善 last_row 函数的性能
|
||||||
- both: 同时缓存最近的行和列,即等同于上述 cachemodel 值为 last_row 和 last_value 的行为同时生效
|
- last_value:缓存子表每一列最近的非 NULL 值,这将显著改善无特殊影响(比如 WHERE,ORDER BY,GROUP BY, INTERVAL)时的 last 函数的性能
|
||||||
|
- both:同时缓存最近的行和列,即等同于上述 cachemodel 值为 last_row 和 last_value 的行为同时生效
|
||||||
|
|
||||||
当使用数据库读缓存时,可以使用参数 cachesize 来配置每个 vnode 的内存大小。
|
当使用数据库读缓存时,可以使用参数 cachesize 来配置每个 vnode 的内存大小。
|
||||||
- cachesize:表示每个 vnode 中用于缓存子表最近数据的内存大小。默认为 1 ,范围是[1, 65536],单位是 MB。需要根据机器内存合理配置。
|
- cachesize:表示每个 vnode 中用于缓存子表最近数据的内存大小。默认为 1 ,范围是[1,65536],单位是 MB。需要根据机器内存合理配置。
|
||||||
|
|
||||||
## 元数据缓存
|
关于数据库的具体创建,相关参数和操作说明请参考[创建数据库](../../reference/taos-sql/database/)
|
||||||
|
|
||||||
为了提升查询和写入操作的效率,每个 vnode 都配备了缓存机制,用于存储其曾经获取过的元数据。这一元数据缓存的大小由创建数据库时的两个参数 pages 和 pagesize 共同决定。其中,pagesize 参数的单位是 KB,用于指定每个缓存页的大小。如下 SQL 会为数据库 power 的每个 vnode 创建 128 个 page、每个 page 16KB 的元数据缓存
|
|
||||||
|
|
||||||
```sql
|
|
||||||
CREATE DATABASE POWER PAGES 128 PAGESIZE 16;
|
|
||||||
```
|
|
||||||
|
|
||||||
## 文件系统缓存
|
|
||||||
|
|
||||||
TDengine 采用 WAL 技术作为基本的数据可靠性保障手段。WAL 是一种先进的数据保护机制,旨在确保在发生故障时能够迅速恢复数据。其核心原理在于,在数据实际写入数据存储层之前,先将其变更记录到一个日志文件中。这样一来,即便集群遭遇崩溃或其他故障,也能确保数据安全无损。
|
|
||||||
|
|
||||||
TDengine 利用这些日志文件实现故障前的状态恢复。在写入 WAL 的过程中,数据是以顺序追加的方式写入硬盘文件的。因此,文件系统缓存在此过程中发挥着关键作用,对写入性能产生显著影响。为了确保数据真正落盘,系统会调用 fsync 函数,该函数负责将文件系统缓存中的数据强制写入硬盘。
|
|
||||||
|
|
||||||
数据库参数 wal_level 和 wal_fsync_period 共同决定了 WAL 的保存行为。。
|
|
||||||
- wal_level:此参数控制 WAL 的保存级别。级别 1 表示仅将数据写入 WAL,但不立即执行 fsync 函数;级别 2 则表示在写入 WAL 的同时执行 fsync 函数。默认情况下,wal_level 设为 1。虽然执行 fsync 函数可以提高数据的持久性,但相应地也会降低写入性能。
|
|
||||||
- wal_fsync_period:当 wal_level 设置为 2 时,这个参数控制执行 fsync 的频率。设置为 0 表示每次写入后立即执行 fsync,这可以确保数据的安全性,但可能会牺牲一些性能。当设置为大于 0 的数值时,表示 fsync 周期,默认为 3000,范围是[1, 180000],单位毫秒。
|
|
||||||
|
|
||||||
```sql
|
|
||||||
CREATE DATABASE POWER WAL_LEVEL 2 WAL_FSYNC_PERIOD 3000;
|
|
||||||
```
|
|
||||||
|
|
||||||
在创建数据库时可以选择不同的参数类型,来选择性能优先或者可靠性优先。
|
|
||||||
- 1: 写 WAL 但不执行 fsync ,新写入 WAL 的数据保存在文件系统缓存中但并未写入磁盘,这种方式性能优先
|
|
||||||
- 2: 写 WAL 且执行 fsync,新写入 WAL 的数据被立即同步到磁盘上,可靠性更高
|
|
||||||
|
|
||||||
## 实时数据查询的缓存实践
|
## 实时数据查询的缓存实践
|
||||||
|
|
||||||
|
|
|
@ -124,7 +124,7 @@ create stream if not exists count_history_s fill_history 1 into count_history as
|
||||||
|
|
||||||
窗口关闭是由事件时间决定的,如事件流中断、或持续延迟,此时事件时间无法更新,可能导致无法得到最新的计算结果。
|
窗口关闭是由事件时间决定的,如事件流中断、或持续延迟,此时事件时间无法更新,可能导致无法得到最新的计算结果。
|
||||||
|
|
||||||
因此,流计算提供了以事件时间结合处理时间计算的 MAX_DELAY 触发模式。MAX_DELAY 模式在窗口关闭时会立即触发计算。此外,当数据写入后,计算触发的时间超过 max delay 指定的时间,则立即触发计算。
|
因此,流计算提供了以事件时间结合处理时间计算的 MAX_DELAY 触发模式:MAX_DELAY 模式在窗口关闭时会立即触发计算,它的单位可以自行指定,具体单位:a(毫秒)、s(秒)、m(分)、h(小时)、d(天)、w(周)。此外,当数据写入后,计算触发的时间超过 MAX_DELAY 指定的时间,则立即触发计算。
|
||||||
|
|
||||||
### 流计算的窗口关闭
|
### 流计算的窗口关闭
|
||||||
|
|
||||||
|
@ -259,4 +259,4 @@ flush database test1;
|
||||||
|
|
||||||
5.修改 taos.cfg,去掉 disableStream 1,或将 disableStream 改为 0
|
5.修改 taos.cfg,去掉 disableStream 1,或将 disableStream 改为 0
|
||||||
|
|
||||||
6.启动 taosd
|
6.启动 taosd
|
||||||
|
|
|
@ -8,10 +8,10 @@ description: "创建、删除数据库,查看、修改数据库参数"
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
CREATE DATABASE [IF NOT EXISTS] db_name [database_options]
|
CREATE DATABASE [IF NOT EXISTS] db_name [database_options]
|
||||||
|
|
||||||
database_options:
|
database_options:
|
||||||
database_option ...
|
database_option ...
|
||||||
|
|
||||||
database_option: {
|
database_option: {
|
||||||
VGROUPS value
|
VGROUPS value
|
||||||
| PRECISION {'ms' | 'us' | 'ns'}
|
| PRECISION {'ms' | 'us' | 'ns'}
|
||||||
|
@ -26,6 +26,7 @@ database_option: {
|
||||||
| MAXROWS value
|
| MAXROWS value
|
||||||
| MINROWS value
|
| MINROWS value
|
||||||
| KEEP value
|
| KEEP value
|
||||||
|
| KEEP_TIME_OFFSET value
|
||||||
| STT_TRIGGER value
|
| STT_TRIGGER value
|
||||||
| SINGLE_STABLE {0 | 1}
|
| SINGLE_STABLE {0 | 1}
|
||||||
| TABLE_PREFIX value
|
| TABLE_PREFIX value
|
||||||
|
@ -63,8 +64,8 @@ database_option: {
|
||||||
- DURATION:数据文件存储数据的时间跨度。可以使用加单位的表示形式,如 DURATION 100h、DURATION 10d 等,支持 m(分钟)、h(小时)和 d(天)三个单位。不加时间单位时默认单位为天,如 DURATION 50 表示 50 天。
|
- DURATION:数据文件存储数据的时间跨度。可以使用加单位的表示形式,如 DURATION 100h、DURATION 10d 等,支持 m(分钟)、h(小时)和 d(天)三个单位。不加时间单位时默认单位为天,如 DURATION 50 表示 50 天。
|
||||||
- MAXROWS:文件块中记录的最大条数,默认为 4096 条。
|
- MAXROWS:文件块中记录的最大条数,默认为 4096 条。
|
||||||
- MINROWS:文件块中记录的最小条数,默认为 100 条。
|
- MINROWS:文件块中记录的最小条数,默认为 100 条。
|
||||||
- KEEP:表示数据文件保存的天数,缺省值为 3650,取值范围 [1, 365000],且必须大于或等于3倍的 DURATION 参数值。数据库会自动删除保存时间超过 KEEP 值的数据。KEEP 可以使用加单位的表示形式,如 KEEP 100h、KEEP 10d 等,支持 m(分钟)、h(小时)和 d(天)三个单位。也可以不写单位,如 KEEP 50,此时默认单位为天。企业版支持[多级存储](https://docs.taosdata.com/tdinternal/arch/#%E5%A4%9A%E7%BA%A7%E5%AD%98%E5%82%A8)功能, 因此, 可以设置多个保存时间(多个以英文逗号分隔,最多 3 个,满足 keep 0 \<= keep 1 \<= keep 2,如 KEEP 100h,100d,3650d); 社区版不支持多级存储功能(即使配置了多个保存时间, 也不会生效, KEEP 会取最大的保存时间)。了解更多,请点击 [关于主键时间戳](https://docs.taosdata.com/reference/taos-sql/insert/#%E5%85%B3%E4%BA%8E%E4%B8%BB%E9%94%AE%E6%97%B6%E9%97%B4%E6%88%B3)。
|
- KEEP:表示数据文件保存的天数,缺省值为 3650,取值范围 [1, 365000],且必须大于或等于 3 倍的 DURATION 参数值。数据库会自动删除保存时间超过 KEEP 值的数据从而释放存储空间。KEEP 可以使用加单位的表示形式,如 KEEP 100h、KEEP 10d 等,支持 m(分钟)、h(小时)和 d(天)三个单位。也可以不写单位,如 KEEP 50,此时默认单位为天。企业版支持[多级存储](https://docs.taosdata.com/tdinternal/arch/#%E5%A4%9A%E7%BA%A7%E5%AD%98%E5%82%A8)功能, 因此, 可以设置多个保存时间(多个以英文逗号分隔,最多 3 个,满足 keep 0 \<= keep 1 \<= keep 2,如 KEEP 100h,100d,3650d); 社区版不支持多级存储功能(即使配置了多个保存时间, 也不会生效, KEEP 会取最大的保存时间)。了解更多,请点击 [关于主键时间戳](https://docs.taosdata.com/reference/taos-sql/insert/)
|
||||||
|
- KEEP_TIME_OFFSET:自 3.2.0.0 版本生效。删除或迁移保存时间超过 KEEP 值的数据的延迟执行时间,默认值为 0 (小时)。在数据文件保存时间超过 KEEP 后,删除或迁移操作不会立即执行,而会额外等待本参数指定的时间间隔,以实现与业务高峰期错开的目的。
|
||||||
- STT_TRIGGER:表示落盘文件触发文件合并的个数。开源版本固定为 1,企业版本可设置范围为 1 到 16。对于少表高频写入场景,此参数建议使用默认配置;而对于多表低频写入场景,此参数建议配置较大的值。
|
- STT_TRIGGER:表示落盘文件触发文件合并的个数。开源版本固定为 1,企业版本可设置范围为 1 到 16。对于少表高频写入场景,此参数建议使用默认配置;而对于多表低频写入场景,此参数建议配置较大的值。
|
||||||
- SINGLE_STABLE:表示此数据库中是否只可以创建一个超级表,用于超级表列非常多的情况。
|
- SINGLE_STABLE:表示此数据库中是否只可以创建一个超级表,用于超级表列非常多的情况。
|
||||||
- 0:表示可以创建多张超级表。
|
- 0:表示可以创建多张超级表。
|
||||||
|
|
|
@ -1065,7 +1065,7 @@ CAST(expr AS type_name)
|
||||||
TO_ISO8601(expr [, timezone])
|
TO_ISO8601(expr [, timezone])
|
||||||
```
|
```
|
||||||
|
|
||||||
**功能说明**:将 UNIX 时间戳转换成为 ISO8601 标准的日期时间格式,并附加时区信息。timezone 参数允许用户为输出结果指定附带任意时区信息。如果 timezone 参数省略,输出结果则附带当前客户端的系统时区信息。
|
**功能说明**:将时间戳转换成为 ISO8601 标准的日期时间格式,并附加时区信息。timezone 参数允许用户为输出结果指定附带任意时区信息。如果 timezone 参数省略,输出结果则附带当前客户端的系统时区信息。
|
||||||
|
|
||||||
**返回结果数据类型**:VARCHAR 类型。
|
**返回结果数据类型**:VARCHAR 类型。
|
||||||
|
|
||||||
|
@ -1109,7 +1109,7 @@ return_timestamp: {
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
**功能说明**:将日期时间格式的字符串转换成为 UNIX 时间戳。
|
**功能说明**:将日期时间格式的字符串转换成为时间戳。
|
||||||
|
|
||||||
**返回结果数据类型**:BIGINT, TIMESTAMP。
|
**返回结果数据类型**:BIGINT, TIMESTAMP。
|
||||||
|
|
||||||
|
@ -1257,8 +1257,8 @@ TIMEDIFF(expr1, expr2 [, time_unit])
|
||||||
**返回结果类型**:BIGINT。
|
**返回结果类型**:BIGINT。
|
||||||
|
|
||||||
**适用数据类型**:
|
**适用数据类型**:
|
||||||
- `expr1`:表示 UNIX 时间戳的 BIGINT, TIMESTAMP 类型,或符合日期时间格式的 VARCHAR, NCHAR 类型。
|
- `expr1`:表示时间戳的 BIGINT, TIMESTAMP 类型,或符合 ISO8601/RFC3339 标准的日期时间格式的 VARCHAR, NCHAR 类型。
|
||||||
- `expr2`:表示 UNIX 时间戳的 BIGINT, TIMESTAMP 类型,或符合日期时间格式的 VARCHAR, NCHAR 类型。
|
- `expr2`:表示时间戳的 BIGINT, TIMESTAMP 类型,或符合 ISO8601/RFC3339 标准的日期时间格式的 VARCHAR, NCHAR 类型。
|
||||||
- `time_unit`:见使用说明。
|
- `time_unit`:见使用说明。
|
||||||
|
|
||||||
**嵌套子查询支持**:适用于内层查询和外层查询。
|
**嵌套子查询支持**:适用于内层查询和外层查询。
|
||||||
|
@ -1301,7 +1301,7 @@ use_current_timezone: {
|
||||||
|
|
||||||
**返回结果数据类型**:TIMESTAMP。
|
**返回结果数据类型**:TIMESTAMP。
|
||||||
|
|
||||||
**应用字段**:表示 UNIX 时间戳的 BIGINT, TIMESTAMP 类型,或符合日期时间格式的 VARCHAR, NCHAR 类型。
|
**应用字段**:表示时间戳的 BIGINT, TIMESTAMP 类型,或符合 ISO8601/RFC3339 标准的日期时间格式的 VARCHAR, NCHAR 类型。
|
||||||
|
|
||||||
**适用于**:表和超级表。
|
**适用于**:表和超级表。
|
||||||
|
|
||||||
|
@ -1364,7 +1364,7 @@ WEEK(expr [, mode])
|
||||||
**返回结果类型**:BIGINT。
|
**返回结果类型**:BIGINT。
|
||||||
|
|
||||||
**适用数据类型**:
|
**适用数据类型**:
|
||||||
- `expr`:表示 UNIX 时间戳的 BIGINT, TIMESTAMP 类型,或符合日期时间格式的 VARCHAR, NCHAR 类型。
|
- `expr`:表示时间戳的 BIGINT, TIMESTAMP 类型,或符合 ISO8601/RFC3339 标准的日期时间格式的 VARCHAR, NCHAR 类型。
|
||||||
- `mode`:0 - 7 之间的整数。
|
- `mode`:0 - 7 之间的整数。
|
||||||
|
|
||||||
**嵌套子查询支持**:适用于内层查询和外层查询。
|
**嵌套子查询支持**:适用于内层查询和外层查询。
|
||||||
|
@ -1424,7 +1424,7 @@ WEEKOFYEAR(expr)
|
||||||
|
|
||||||
**返回结果类型**:BIGINT。
|
**返回结果类型**:BIGINT。
|
||||||
|
|
||||||
**适用数据类型**:表示 UNIX 时间戳的 BIGINT, TIMESTAMP 类型,或符合日期时间格式的 VARCHAR, NCHAR 类型。
|
**适用数据类型**:表示时间戳的 BIGINT, TIMESTAMP 类型,或符合 ISO8601/RFC3339 标准的日期时间格式的 VARCHAR, NCHAR 类型。
|
||||||
|
|
||||||
**嵌套子查询支持**:适用于内层查询和外层查询。
|
**嵌套子查询支持**:适用于内层查询和外层查询。
|
||||||
|
|
||||||
|
@ -1451,7 +1451,7 @@ WEEKDAY(expr)
|
||||||
|
|
||||||
**返回结果类型**:BIGINT。
|
**返回结果类型**:BIGINT。
|
||||||
|
|
||||||
**适用数据类型**:表示 UNIX 时间戳的 BIGINT, TIMESTAMP 类型,或符合日期时间格式的 VARCHAR, NCHAR 类型。
|
**适用数据类型**:表示 表示时间戳的 BIGINT, TIMESTAMP 类型,或符合 ISO8601/RFC3339 标准的日期时间格式的 VARCHAR, NCHAR 类型。
|
||||||
|
|
||||||
**嵌套子查询支持**:适用于内层查询和外层查询。
|
**嵌套子查询支持**:适用于内层查询和外层查询。
|
||||||
|
|
||||||
|
@ -1478,7 +1478,7 @@ DAYOFWEEK(expr)
|
||||||
|
|
||||||
**返回结果类型**:BIGINT。
|
**返回结果类型**:BIGINT。
|
||||||
|
|
||||||
**适用数据类型**:表示 UNIX 时间戳的 BIGINT, TIMESTAMP 类型,或符合日期时间格式的 VARCHAR, NCHAR 类型。
|
**适用数据类型**:表示时间戳的 BIGINT, TIMESTAMP 类型,或符合 ISO8601/RFC3339 标准的日期时间格式的 VARCHAR, NCHAR 类型。
|
||||||
|
|
||||||
**嵌套子查询支持**:适用于内层查询和外层查询。
|
**嵌套子查询支持**:适用于内层查询和外层查询。
|
||||||
|
|
||||||
|
|
|
@ -41,6 +41,7 @@ Python 连接器的源码托管在 [GitHub](https://github.com/taosdata/taos-con
|
||||||
|
|
||||||
|Python Connector 版本|主要变化|
|
|Python Connector 版本|主要变化|
|
||||||
|:-------------------:|:----:|
|
|:-------------------:|:----:|
|
||||||
|
|2.7.16|新增订阅配置 (session.timeout.ms, max.poll.interval.ms)|
|
||||||
|2.7.15|新增 VARBINARY 和 GEOMETRY 类型支持|
|
|2.7.15|新增 VARBINARY 和 GEOMETRY 类型支持|
|
||||||
|2.7.14|修复已知问题|
|
|2.7.14|修复已知问题|
|
||||||
|2.7.13|新增 tmq 同步提交 offset 接口|
|
|2.7.13|新增 tmq 同步提交 offset 接口|
|
||||||
|
@ -50,6 +51,7 @@ Python 连接器的源码托管在 [GitHub](https://github.com/taosdata/taos-con
|
||||||
|
|
||||||
|Python WebSocket Connector 版本|主要变化|
|
|Python WebSocket Connector 版本|主要变化|
|
||||||
|:----------------------------:|:-----:|
|
|:----------------------------:|:-----:|
|
||||||
|
|0.3.5|新增 VARBINARY 和 GEOMETRY 类型支持,修复已知问题|
|
||||||
|0.3.2|优化 WebSocket sql 查询和插入性能,修改 readme 和 文档,修复已知问题|
|
|0.3.2|优化 WebSocket sql 查询和插入性能,修改 readme 和 文档,修复已知问题|
|
||||||
|0.2.9|已知问题修复|
|
|0.2.9|已知问题修复|
|
||||||
|0.2.5|1. 数据订阅支持获取消费进度和重置消费进度 <br/> 2. 支持 schemaless <br/> 3. 支持 STMT|
|
|0.2.5|1. 数据订阅支持获取消费进度和重置消费进度 <br/> 2. 支持 schemaless <br/> 3. 支持 STMT|
|
||||||
|
|
|
@ -26,6 +26,7 @@ Node.js 连接器目前仅支持 WebSocket 连接器, 其通过 taosAdapter
|
||||||
|
|
||||||
| Node.js 连接器 版本 | 主要变化 | TDengine 版本 |
|
| Node.js 连接器 版本 | 主要变化 | TDengine 版本 |
|
||||||
| :------------------: | :----------------------: | :----------------: |
|
| :------------------: | :----------------------: | :----------------: |
|
||||||
|
| 3.1.2 | 对数据协议和解析进行了优化,性能得到大幅提升| 3.3.2.0 及更高版本 |
|
||||||
| 3.1.1 | 优化了数据传输性能 | 3.3.2.0 及更高版本 |
|
| 3.1.1 | 优化了数据传输性能 | 3.3.2.0 及更高版本 |
|
||||||
| 3.1.0 | 新版本发布,支持 WebSocket 连接 | 3.2.0.0 及更高版本 |
|
| 3.1.0 | 新版本发布,支持 WebSocket 连接 | 3.2.0.0 及更高版本 |
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,62 @@
|
||||||
|
---
|
||||||
|
sidebar_label: 数据缓存
|
||||||
|
title: 数据缓存
|
||||||
|
toc_max_heading_level: 4
|
||||||
|
---
|
||||||
|
在现代物联网(IoT)和工业互联网(IIoT)应用中,数据的高效管理对系统性能和用户体验至关重要。为了应对高并发环境下的实时读写需求,TDengine 设计了一套完整的缓存机制,包括写缓存、读缓存、元数据缓存和文件系统缓存。这些缓存机制紧密结合,既能优化数据查询的响应速度,又能提高数据写入的效率,同时保障数据的可靠性和系统的高可用性。通过灵活配置缓存参数,TDengine 为用户提供了性能与成本之间的最佳平衡。
|
||||||
|
|
||||||
|
## 写缓存
|
||||||
|
|
||||||
|
TDengine 采用了一种创新的时间驱动缓存管理策略,亦称为写驱动的缓存管理机制。这一策略与传统的读驱动的缓存模式有所不同,其核心思想是将最新写入的数据优先保存在缓存中。当缓存容量达到预设的临界值时,系统会将最早存储的数据批量写入硬盘,从而实现缓存与硬盘之间的动态平衡。
|
||||||
|
|
||||||
|
在物联网数据应用中,用户往往最关注最近产生的数据,即设备的当前状态。TDengine 充分利用了这一业务特性,将最近到达的当前状态数据优先存储在缓存中,以便用户能够快速获取所需信息。
|
||||||
|
|
||||||
|
为了实现数据的分布式存储和高可用性,TDengine 引入了虚拟节点(vnode)的概念。每个 vnode 可以拥有多达 3 个副本,这些副本共同组成一个 vnode group,简称 vgroup。在创建数据库时,用户需要确定每个 vnode 的写入缓存大小,以确保数据的合理分配和高效存储。
|
||||||
|
|
||||||
|
创建数据库时的两个关键参数 `vgroups` 和 `buffer` 分别决定了数据库中的数据由多少个 vgroup 进行处理,以及为每个 vnode 分配多少写入缓存。通过合理配置这两个
|
||||||
|
参数,用户可以根据实际需求调整数据库的性能和存储容量,从而实现最佳的性能和成本效益。
|
||||||
|
|
||||||
|
例 如, 下面的 SQL 创建了包含 10 个 vgroup,每个 vnode 占 用 256MB 内存的数据库。
|
||||||
|
```sql
|
||||||
|
CREATE DATABASE POWER VGROUPS 10 BUFFER 256 CACHEMODEL 'NONE' PAGES 128 PAGESIZE 16;
|
||||||
|
```
|
||||||
|
|
||||||
|
缓存越大越好,但超过一定阈值后再增加缓存对写入性能提升并无帮助。
|
||||||
|
|
||||||
|
## 读缓存
|
||||||
|
|
||||||
|
TDengine 的读缓存机制专为高频实时查询场景设计,尤其适用于物联网和工业互联网等需要实时掌握设备状态的业务场景。在这些场景中,用户往往最关心最新的数据,如设备的当前读数或状态。
|
||||||
|
|
||||||
|
通过设置 cachemodel 参数,TDengine 用户可以灵活选择适合的缓存模式,包括缓存最新一行数据、每列最近的非 NULL 值,或同时缓存行和列的数据。这种灵活性使 TDengine 能根据具体业务需求提供精准优化,在物联网场景下尤为突出,助力用户快速访问设备的最新状态。
|
||||||
|
|
||||||
|
这种设计不仅降低了查询的响应延迟,还能有效缓解存储系统的 I/O 压力。在高并发场景下,读缓存能够帮助系统维持更高的吞吐量,确保查询性能的稳定性。借助 TDengine 读缓存,用户无需再集成如 Redis 一类的外部缓存系统,避免了系统架构的复杂化,显著降低运维和部署成本。
|
||||||
|
|
||||||
|
此外,TDengine 的读缓存机制还能够根据实际业务场景灵活调整。在数据访问热点集中在最新记录的场景中,这种内置缓存能够显著提高用户体验,让关键数据的获取更加快速高效。相比传统缓存方案,这种无缝集成的缓存策略不仅简化了开发流程,还为用户提供了更高的性能保障。
|
||||||
|
|
||||||
|
关于 TDengine 读缓存的更多详细内容请看[读缓存](../../advanced/cache/)
|
||||||
|
|
||||||
|
## 元数据缓存
|
||||||
|
|
||||||
|
为了提升查询和写入操作的效率,每个 vnode 都配备了缓存机制,用于存储其曾经获取过的元数据。这一元数据缓存的大小由创建数据库时的两个参数 pages 和 pagesize 共同决定。其中,pagesize 参数的单位是 KB,用于指定每个缓存页的大小。如下 SQL 会为数据库 power 的每个 vnode 创建 128 个 page、每个 page 16KB 的元数据缓存
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE DATABASE POWER PAGES 128 PAGESIZE 16;
|
||||||
|
```
|
||||||
|
|
||||||
|
## 文件系统缓存
|
||||||
|
|
||||||
|
TDengine 采用 WAL 技术作为基本的数据可靠性保障手段。WAL 是一种先进的数据保护机制,旨在确保在发生故障时能够迅速恢复数据。其核心原理在于,在数据实际写入数据存储层之前,先将其变更记录到一个日志文件中。这样一来,即便集群遭遇崩溃或其他故障,也能确保数据安全无损。
|
||||||
|
|
||||||
|
TDengine 利用这些日志文件实现故障前的状态恢复。在写入 WAL 的过程中,数据是以顺序追加的方式写入硬盘文件的。因此,文件系统缓存在此过程中发挥着关键作用,对写入性能产生显著影响。为了确保数据真正落盘,系统会调用 fsync 函数,该函数负责将文件系统缓存中的数据强制写入硬盘。
|
||||||
|
|
||||||
|
数据库参数 wal_level 和 wal_fsync_period 共同决定了 WAL 的保存行为。。
|
||||||
|
- wal_level:此参数控制 WAL 的保存级别。级别 1 表示仅将数据写入 WAL,但不立即执行 fsync 函数;级别 2 则表示在写入 WAL 的同时执行 fsync 函数。默认情况下,wal_level 设为 1。虽然执行 fsync 函数可以提高数据的持久性,但相应地也会降低写入性能。
|
||||||
|
- wal_fsync_period:当 wal_level 设置为 2 时,这个参数控制执行 fsync 的频率。设置为 0 则表示每次写入后立即执行 fsync,这可以确保数据的安全性,但可能会牺牲一些性能。当设置为大于 0 的数值时,则表示 fsync 周期,默认为 3000,范围是[1, 180000],单位毫秒。
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE DATABASE POWER WAL_LEVEL 2 WAL_FSYNC_PERIOD 3000;
|
||||||
|
```
|
||||||
|
|
||||||
|
在创建数据库时,用户可以根据需求选择不同的参数设置,以在性能和可靠性之间找到最佳平衡:
|
||||||
|
- 性能优先:将数据写入 WAL,但不立即执行 fsync 操作,此时新写入的数据仅保存在文件系统缓存中,尚未同步到磁盘。这种配置能够显著提高写入性能。
|
||||||
|
- 可靠性优先:将数据写入 WAL 的同时执行 fsync 操作,将数据立即同步到磁盘,确保数据持久化,可靠性更高。
|
|
@ -1,7 +1,7 @@
|
||||||
PROJECT(TDengine)
|
PROJECT(TDengine)
|
||||||
|
|
||||||
IF (TD_LINUX)
|
IF(TD_LINUX)
|
||||||
INCLUDE_DIRECTORIES(. ${TD_SOURCE_DIR}/src/inc ${TD_SOURCE_DIR}/src/client/inc ${TD_SOURCE_DIR}/inc)
|
INCLUDE_DIRECTORIES(. ${TD_SOURCE_DIR}/src/inc ${TD_SOURCE_DIR}/src/client/inc ${TD_SOURCE_DIR}/inc)
|
||||||
AUX_SOURCE_DIRECTORY(. SRC)
|
AUX_SOURCE_DIRECTORY(. SRC)
|
||||||
|
|
||||||
add_executable(tmq "")
|
add_executable(tmq "")
|
||||||
|
@ -12,58 +12,58 @@ IF (TD_LINUX)
|
||||||
add_executable(asyncdemo "")
|
add_executable(asyncdemo "")
|
||||||
|
|
||||||
target_sources(tmq
|
target_sources(tmq
|
||||||
PRIVATE
|
PRIVATE
|
||||||
"tmq.c"
|
"tmq.c"
|
||||||
)
|
)
|
||||||
|
|
||||||
target_sources(stream_demo
|
target_sources(stream_demo
|
||||||
PRIVATE
|
PRIVATE
|
||||||
"stream_demo.c"
|
"stream_demo.c"
|
||||||
)
|
)
|
||||||
|
|
||||||
target_sources(schemaless
|
target_sources(schemaless
|
||||||
PRIVATE
|
PRIVATE
|
||||||
"schemaless.c"
|
"schemaless.c"
|
||||||
)
|
)
|
||||||
|
|
||||||
target_sources(prepare
|
target_sources(prepare
|
||||||
PRIVATE
|
PRIVATE
|
||||||
"prepare.c"
|
"prepare.c"
|
||||||
)
|
)
|
||||||
|
|
||||||
target_sources(demo
|
target_sources(demo
|
||||||
PRIVATE
|
PRIVATE
|
||||||
"demo.c"
|
"demo.c"
|
||||||
)
|
)
|
||||||
|
|
||||||
target_sources(asyncdemo
|
target_sources(asyncdemo
|
||||||
PRIVATE
|
PRIVATE
|
||||||
"asyncdemo.c"
|
"asyncdemo.c"
|
||||||
)
|
)
|
||||||
|
|
||||||
target_link_libraries(tmq
|
target_link_libraries(tmq
|
||||||
taos
|
${TAOS_LIB}
|
||||||
)
|
)
|
||||||
|
|
||||||
target_link_libraries(stream_demo
|
target_link_libraries(stream_demo
|
||||||
taos
|
${TAOS_LIB}
|
||||||
)
|
)
|
||||||
|
|
||||||
target_link_libraries(schemaless
|
target_link_libraries(schemaless
|
||||||
taos
|
${TAOS_LIB}
|
||||||
)
|
)
|
||||||
|
|
||||||
target_link_libraries(prepare
|
target_link_libraries(prepare
|
||||||
taos
|
${TAOS_LIB}
|
||||||
)
|
)
|
||||||
|
|
||||||
target_link_libraries(demo
|
target_link_libraries(demo
|
||||||
taos
|
${TAOS_LIB}
|
||||||
)
|
)
|
||||||
|
|
||||||
target_link_libraries(asyncdemo
|
target_link_libraries(asyncdemo
|
||||||
taos
|
${TAOS_LIB}
|
||||||
)
|
)
|
||||||
|
|
||||||
SET_TARGET_PROPERTIES(tmq PROPERTIES OUTPUT_NAME tmq)
|
SET_TARGET_PROPERTIES(tmq PROPERTIES OUTPUT_NAME tmq)
|
||||||
SET_TARGET_PROPERTIES(stream_demo PROPERTIES OUTPUT_NAME stream_demo)
|
SET_TARGET_PROPERTIES(stream_demo PROPERTIES OUTPUT_NAME stream_demo)
|
||||||
|
@ -71,8 +71,9 @@ IF (TD_LINUX)
|
||||||
SET_TARGET_PROPERTIES(prepare PROPERTIES OUTPUT_NAME prepare)
|
SET_TARGET_PROPERTIES(prepare PROPERTIES OUTPUT_NAME prepare)
|
||||||
SET_TARGET_PROPERTIES(demo PROPERTIES OUTPUT_NAME demo)
|
SET_TARGET_PROPERTIES(demo PROPERTIES OUTPUT_NAME demo)
|
||||||
SET_TARGET_PROPERTIES(asyncdemo PROPERTIES OUTPUT_NAME asyncdemo)
|
SET_TARGET_PROPERTIES(asyncdemo PROPERTIES OUTPUT_NAME asyncdemo)
|
||||||
ENDIF ()
|
ENDIF()
|
||||||
IF (TD_DARWIN)
|
|
||||||
INCLUDE_DIRECTORIES(. ${TD_SOURCE_DIR}/src/inc ${TD_SOURCE_DIR}/src/client/inc ${TD_SOURCE_DIR}/inc)
|
IF(TD_DARWIN)
|
||||||
|
INCLUDE_DIRECTORIES(. ${TD_SOURCE_DIR}/src/inc ${TD_SOURCE_DIR}/src/client/inc ${TD_SOURCE_DIR}/inc)
|
||||||
AUX_SOURCE_DIRECTORY(. SRC)
|
AUX_SOURCE_DIRECTORY(. SRC)
|
||||||
ENDIF ()
|
ENDIF()
|
||||||
|
|
|
@ -39,14 +39,14 @@ typedef struct {
|
||||||
} SAnalyticsUrl;
|
} SAnalyticsUrl;
|
||||||
|
|
||||||
typedef enum {
|
typedef enum {
|
||||||
ANAL_BUF_TYPE_JSON = 0,
|
ANALYTICS_BUF_TYPE_JSON = 0,
|
||||||
ANAL_BUF_TYPE_JSON_COL = 1,
|
ANALYTICS_BUF_TYPE_JSON_COL = 1,
|
||||||
ANAL_BUF_TYPE_OTHERS,
|
ANALYTICS_BUF_TYPE_OTHERS,
|
||||||
} EAnalBufType;
|
} EAnalBufType;
|
||||||
|
|
||||||
typedef enum {
|
typedef enum {
|
||||||
ANAL_HTTP_TYPE_GET = 0,
|
ANALYTICS_HTTP_TYPE_GET = 0,
|
||||||
ANAL_HTTP_TYPE_POST,
|
ANALYTICS_HTTP_TYPE_POST,
|
||||||
} EAnalHttpType;
|
} EAnalHttpType;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
|
@ -61,11 +61,11 @@ typedef struct {
|
||||||
char fileName[TSDB_FILENAME_LEN];
|
char fileName[TSDB_FILENAME_LEN];
|
||||||
int32_t numOfCols;
|
int32_t numOfCols;
|
||||||
SAnalyticsColBuf *pCols;
|
SAnalyticsColBuf *pCols;
|
||||||
} SAnalBuf;
|
} SAnalyticBuf;
|
||||||
|
|
||||||
int32_t taosAnalyticsInit();
|
int32_t taosAnalyticsInit();
|
||||||
void taosAnalyticsCleanup();
|
void taosAnalyticsCleanup();
|
||||||
SJson *taosAnalSendReqRetJson(const char *url, EAnalHttpType type, SAnalBuf *pBuf);
|
SJson *taosAnalSendReqRetJson(const char *url, EAnalHttpType type, SAnalyticBuf *pBuf);
|
||||||
|
|
||||||
int32_t taosAnalGetAlgoUrl(const char *algoName, EAnalAlgoType type, char *url, int32_t urlLen);
|
int32_t taosAnalGetAlgoUrl(const char *algoName, EAnalAlgoType type, char *url, int32_t urlLen);
|
||||||
bool taosAnalGetOptStr(const char *option, const char *optName, char *optValue, int32_t optMaxLen);
|
bool taosAnalGetOptStr(const char *option, const char *optName, char *optValue, int32_t optMaxLen);
|
||||||
|
@ -73,18 +73,18 @@ bool taosAnalGetOptInt(const char *option, const char *optName, int64_t *optV
|
||||||
int64_t taosAnalGetVersion();
|
int64_t taosAnalGetVersion();
|
||||||
void taosAnalUpdate(int64_t newVer, SHashObj *pHash);
|
void taosAnalUpdate(int64_t newVer, SHashObj *pHash);
|
||||||
|
|
||||||
int32_t tsosAnalBufOpen(SAnalBuf *pBuf, int32_t numOfCols);
|
int32_t tsosAnalBufOpen(SAnalyticBuf *pBuf, int32_t numOfCols);
|
||||||
int32_t taosAnalBufWriteOptStr(SAnalBuf *pBuf, const char *optName, const char *optVal);
|
int32_t taosAnalBufWriteOptStr(SAnalyticBuf *pBuf, const char *optName, const char *optVal);
|
||||||
int32_t taosAnalBufWriteOptInt(SAnalBuf *pBuf, const char *optName, int64_t optVal);
|
int32_t taosAnalBufWriteOptInt(SAnalyticBuf *pBuf, const char *optName, int64_t optVal);
|
||||||
int32_t taosAnalBufWriteOptFloat(SAnalBuf *pBuf, const char *optName, float optVal);
|
int32_t taosAnalBufWriteOptFloat(SAnalyticBuf *pBuf, const char *optName, float optVal);
|
||||||
int32_t taosAnalBufWriteColMeta(SAnalBuf *pBuf, int32_t colIndex, int32_t colType, const char *colName);
|
int32_t taosAnalBufWriteColMeta(SAnalyticBuf *pBuf, int32_t colIndex, int32_t colType, const char *colName);
|
||||||
int32_t taosAnalBufWriteDataBegin(SAnalBuf *pBuf);
|
int32_t taosAnalBufWriteDataBegin(SAnalyticBuf *pBuf);
|
||||||
int32_t taosAnalBufWriteColBegin(SAnalBuf *pBuf, int32_t colIndex);
|
int32_t taosAnalBufWriteColBegin(SAnalyticBuf *pBuf, int32_t colIndex);
|
||||||
int32_t taosAnalBufWriteColData(SAnalBuf *pBuf, int32_t colIndex, int32_t colType, void *colValue);
|
int32_t taosAnalBufWriteColData(SAnalyticBuf *pBuf, int32_t colIndex, int32_t colType, void *colValue);
|
||||||
int32_t taosAnalBufWriteColEnd(SAnalBuf *pBuf, int32_t colIndex);
|
int32_t taosAnalBufWriteColEnd(SAnalyticBuf *pBuf, int32_t colIndex);
|
||||||
int32_t taosAnalBufWriteDataEnd(SAnalBuf *pBuf);
|
int32_t taosAnalBufWriteDataEnd(SAnalyticBuf *pBuf);
|
||||||
int32_t taosAnalBufClose(SAnalBuf *pBuf);
|
int32_t taosAnalBufClose(SAnalyticBuf *pBuf);
|
||||||
void taosAnalBufDestroy(SAnalBuf *pBuf);
|
void taosAnalBufDestroy(SAnalyticBuf *pBuf);
|
||||||
|
|
||||||
const char *taosAnalAlgoStr(EAnalAlgoType algoType);
|
const char *taosAnalAlgoStr(EAnalAlgoType algoType);
|
||||||
EAnalAlgoType taosAnalAlgoInt(const char *algoName);
|
EAnalAlgoType taosAnalAlgoInt(const char *algoName);
|
||||||
|
|
|
@ -251,6 +251,7 @@ typedef struct SQueryTableDataCond {
|
||||||
int32_t type; // data block load type:
|
int32_t type; // data block load type:
|
||||||
bool skipRollup;
|
bool skipRollup;
|
||||||
STimeWindow twindows;
|
STimeWindow twindows;
|
||||||
|
STimeWindow extTwindows[2];
|
||||||
int64_t startVersion;
|
int64_t startVersion;
|
||||||
int64_t endVersion;
|
int64_t endVersion;
|
||||||
bool notLoadData; // response the actual data, not only the rows in the attribute of info.row of ssdatablock
|
bool notLoadData; // response the actual data, not only the rows in the attribute of info.row of ssdatablock
|
||||||
|
|
|
@ -102,6 +102,7 @@ typedef struct SCatalogReq {
|
||||||
bool svrVerRequired;
|
bool svrVerRequired;
|
||||||
bool forceUpdate;
|
bool forceUpdate;
|
||||||
bool cloned;
|
bool cloned;
|
||||||
|
bool forceFetchViewMeta;
|
||||||
} SCatalogReq;
|
} SCatalogReq;
|
||||||
|
|
||||||
typedef struct SMetaRes {
|
typedef struct SMetaRes {
|
||||||
|
|
|
@ -330,7 +330,7 @@ typedef struct SAlterDnodeStmt {
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
ENodeType type;
|
ENodeType type;
|
||||||
char url[TSDB_ANAL_ANODE_URL_LEN + 3];
|
char url[TSDB_ANALYTIC_ANODE_URL_LEN + 3];
|
||||||
} SCreateAnodeStmt;
|
} SCreateAnodeStmt;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
|
|
|
@ -174,6 +174,7 @@ char* nodesGetNameFromColumnNode(SNode* pNode);
|
||||||
int32_t nodesGetOutputNumFromSlotList(SNodeList* pSlots);
|
int32_t nodesGetOutputNumFromSlotList(SNodeList* pSlots);
|
||||||
void nodesSortList(SNodeList** pList, int32_t (*)(SNode* pNode1, SNode* pNode2));
|
void nodesSortList(SNodeList** pList, int32_t (*)(SNode* pNode1, SNode* pNode2));
|
||||||
void destroyFuncParam(void* pFuncStruct);
|
void destroyFuncParam(void* pFuncStruct);
|
||||||
|
int32_t nodesListDeduplicate(SNodeList** pList);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
|
|
|
@ -334,7 +334,7 @@ typedef struct SWindowLogicNode {
|
||||||
int64_t windowSliding;
|
int64_t windowSliding;
|
||||||
SNodeList* pTsmaSubplans;
|
SNodeList* pTsmaSubplans;
|
||||||
SNode* pAnomalyExpr;
|
SNode* pAnomalyExpr;
|
||||||
char anomalyOpt[TSDB_ANAL_ALGO_OPTION_LEN];
|
char anomalyOpt[TSDB_ANALYTIC_ALGO_OPTION_LEN];
|
||||||
} SWindowLogicNode;
|
} SWindowLogicNode;
|
||||||
|
|
||||||
typedef struct SFillLogicNode {
|
typedef struct SFillLogicNode {
|
||||||
|
@ -740,7 +740,7 @@ typedef SCountWinodwPhysiNode SStreamCountWinodwPhysiNode;
|
||||||
typedef struct SAnomalyWindowPhysiNode {
|
typedef struct SAnomalyWindowPhysiNode {
|
||||||
SWindowPhysiNode window;
|
SWindowPhysiNode window;
|
||||||
SNode* pAnomalyKey;
|
SNode* pAnomalyKey;
|
||||||
char anomalyOpt[TSDB_ANAL_ALGO_OPTION_LEN];
|
char anomalyOpt[TSDB_ANALYTIC_ALGO_OPTION_LEN];
|
||||||
} SAnomalyWindowPhysiNode;
|
} SAnomalyWindowPhysiNode;
|
||||||
|
|
||||||
typedef struct SSortPhysiNode {
|
typedef struct SSortPhysiNode {
|
||||||
|
|
|
@ -351,7 +351,7 @@ typedef struct SAnomalyWindowNode {
|
||||||
ENodeType type; // QUERY_NODE_ANOMALY_WINDOW
|
ENodeType type; // QUERY_NODE_ANOMALY_WINDOW
|
||||||
SNode* pCol; // timestamp primary key
|
SNode* pCol; // timestamp primary key
|
||||||
SNode* pExpr;
|
SNode* pExpr;
|
||||||
char anomalyOpt[TSDB_ANAL_ALGO_OPTION_LEN];
|
char anomalyOpt[TSDB_ANALYTIC_ALGO_OPTION_LEN];
|
||||||
} SAnomalyWindowNode;
|
} SAnomalyWindowNode;
|
||||||
|
|
||||||
typedef enum EFillMode {
|
typedef enum EFillMode {
|
||||||
|
|
|
@ -492,13 +492,14 @@ int32_t taosGetErrSize();
|
||||||
#define TSDB_CODE_MND_ANODE_TOO_MANY_ALGO_TYPE TAOS_DEF_ERROR_CODE(0, 0x0438)
|
#define TSDB_CODE_MND_ANODE_TOO_MANY_ALGO_TYPE TAOS_DEF_ERROR_CODE(0, 0x0438)
|
||||||
|
|
||||||
// analysis
|
// analysis
|
||||||
#define TSDB_CODE_ANAL_URL_RSP_IS_NULL TAOS_DEF_ERROR_CODE(0, 0x0440)
|
#define TSDB_CODE_ANA_URL_RSP_IS_NULL TAOS_DEF_ERROR_CODE(0, 0x0440)
|
||||||
#define TSDB_CODE_ANAL_URL_CANT_ACCESS TAOS_DEF_ERROR_CODE(0, 0x0441)
|
#define TSDB_CODE_ANA_URL_CANT_ACCESS TAOS_DEF_ERROR_CODE(0, 0x0441)
|
||||||
#define TSDB_CODE_ANAL_ALGO_NOT_FOUND TAOS_DEF_ERROR_CODE(0, 0x0442)
|
#define TSDB_CODE_ANA_ALGO_NOT_FOUND TAOS_DEF_ERROR_CODE(0, 0x0442)
|
||||||
#define TSDB_CODE_ANAL_ALGO_NOT_LOAD TAOS_DEF_ERROR_CODE(0, 0x0443)
|
#define TSDB_CODE_ANA_ALGO_NOT_LOAD TAOS_DEF_ERROR_CODE(0, 0x0443)
|
||||||
#define TSDB_CODE_ANAL_BUF_INVALID_TYPE TAOS_DEF_ERROR_CODE(0, 0x0444)
|
#define TSDB_CODE_ANA_BUF_INVALID_TYPE TAOS_DEF_ERROR_CODE(0, 0x0444)
|
||||||
#define TSDB_CODE_ANAL_ANODE_RETURN_ERROR TAOS_DEF_ERROR_CODE(0, 0x0445)
|
#define TSDB_CODE_ANA_ANODE_RETURN_ERROR TAOS_DEF_ERROR_CODE(0, 0x0445)
|
||||||
#define TSDB_CODE_ANAL_ANODE_TOO_MANY_ROWS TAOS_DEF_ERROR_CODE(0, 0x0446)
|
#define TSDB_CODE_ANA_ANODE_TOO_MANY_ROWS TAOS_DEF_ERROR_CODE(0, 0x0446)
|
||||||
|
#define TSDB_CODE_ANA_WN_DATA TAOS_DEF_ERROR_CODE(0, 0x0447)
|
||||||
|
|
||||||
// mnode-sma
|
// mnode-sma
|
||||||
#define TSDB_CODE_MND_SMA_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0480)
|
#define TSDB_CODE_MND_SMA_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0480)
|
||||||
|
|
|
@ -186,11 +186,25 @@ static int32_t tBufferGetI16(SBufferReader *reader, int16_t *value) {
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t tBufferGetI32(SBufferReader *reader, int32_t *value) {
|
static int32_t tBufferGetI32(SBufferReader *reader, int32_t *value) {
|
||||||
return tBufferGet(reader, sizeof(*value), value);
|
if (reader->offset + sizeof(int32_t) > reader->buffer->size) {
|
||||||
|
return TSDB_CODE_OUT_OF_RANGE;
|
||||||
|
}
|
||||||
|
if (value) {
|
||||||
|
*value = *(int32_t*)BR_PTR(reader);
|
||||||
|
}
|
||||||
|
reader->offset += sizeof(int32_t);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t tBufferGetI64(SBufferReader *reader, int64_t *value) {
|
static int32_t tBufferGetI64(SBufferReader *reader, int64_t *value) {
|
||||||
return tBufferGet(reader, sizeof(*value), value);
|
if (reader->offset + sizeof(int64_t) > reader->buffer->size) {
|
||||||
|
return TSDB_CODE_OUT_OF_RANGE;
|
||||||
|
}
|
||||||
|
if (value) {
|
||||||
|
*value = *(int64_t*)BR_PTR(reader);
|
||||||
|
}
|
||||||
|
reader->offset += sizeof(int64_t);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t tBufferGetU8(SBufferReader *reader, uint8_t *value) { return tBufferGet(reader, sizeof(*value), value); }
|
static int32_t tBufferGetU8(SBufferReader *reader, uint8_t *value) { return tBufferGet(reader, sizeof(*value), value); }
|
||||||
|
|
|
@ -335,12 +335,13 @@ typedef enum ELogicConditionType {
|
||||||
#define TSDB_SLOW_QUERY_SQL_LEN 512
|
#define TSDB_SLOW_QUERY_SQL_LEN 512
|
||||||
#define TSDB_SHOW_SUBQUERY_LEN 1000
|
#define TSDB_SHOW_SUBQUERY_LEN 1000
|
||||||
#define TSDB_LOG_VAR_LEN 32
|
#define TSDB_LOG_VAR_LEN 32
|
||||||
#define TSDB_ANAL_ANODE_URL_LEN 128
|
|
||||||
#define TSDB_ANAL_ALGO_NAME_LEN 64
|
#define TSDB_ANALYTIC_ANODE_URL_LEN 128
|
||||||
#define TSDB_ANAL_ALGO_TYPE_LEN 24
|
#define TSDB_ANALYTIC_ALGO_NAME_LEN 64
|
||||||
#define TSDB_ANAL_ALGO_KEY_LEN (TSDB_ANAL_ALGO_NAME_LEN + 9)
|
#define TSDB_ANALYTIC_ALGO_TYPE_LEN 24
|
||||||
#define TSDB_ANAL_ALGO_URL_LEN (TSDB_ANAL_ANODE_URL_LEN + TSDB_ANAL_ALGO_TYPE_LEN + 1)
|
#define TSDB_ANALYTIC_ALGO_KEY_LEN (TSDB_ANALYTIC_ALGO_NAME_LEN + 9)
|
||||||
#define TSDB_ANAL_ALGO_OPTION_LEN 256
|
#define TSDB_ANALYTIC_ALGO_URL_LEN (TSDB_ANALYTIC_ANODE_URL_LEN + TSDB_ANALYTIC_ALGO_TYPE_LEN + 1)
|
||||||
|
#define TSDB_ANALYTIC_ALGO_OPTION_LEN 256
|
||||||
|
|
||||||
#define TSDB_MAX_EP_NUM 10
|
#define TSDB_MAX_EP_NUM 10
|
||||||
|
|
||||||
|
|
|
@ -5,24 +5,24 @@ if(TD_ENTERPRISE)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if(TD_WINDOWS)
|
if(TD_WINDOWS)
|
||||||
add_library(taos SHARED ${CLIENT_SRC} ${CMAKE_CURRENT_SOURCE_DIR}/src/taos.rc.in)
|
add_library(${TAOS_LIB} SHARED ${CLIENT_SRC} ${CMAKE_CURRENT_SOURCE_DIR}/src/taos.rc.in)
|
||||||
else()
|
else()
|
||||||
add_library(taos SHARED ${CLIENT_SRC})
|
add_library(${TAOS_LIB} SHARED ${CLIENT_SRC})
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if(${TD_DARWIN})
|
if(${TD_DARWIN})
|
||||||
target_compile_options(taos PRIVATE -Wno-error=deprecated-non-prototype)
|
target_compile_options(${TAOS_LIB} PRIVATE -Wno-error=deprecated-non-prototype)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
INCLUDE_DIRECTORIES(jni)
|
INCLUDE_DIRECTORIES(jni)
|
||||||
|
|
||||||
target_include_directories(
|
target_include_directories(
|
||||||
taos
|
${TAOS_LIB}
|
||||||
PUBLIC "${TD_SOURCE_DIR}/include/client"
|
PUBLIC "${TD_SOURCE_DIR}/include/client"
|
||||||
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
|
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
|
||||||
)
|
)
|
||||||
target_link_libraries(
|
target_link_libraries(
|
||||||
taos
|
${TAOS_LIB}
|
||||||
INTERFACE api
|
INTERFACE api
|
||||||
PRIVATE os util common transport monitor nodes parser command planner catalog scheduler function qcom geometry
|
PRIVATE os util common transport monitor nodes parser command planner catalog scheduler function qcom geometry
|
||||||
)
|
)
|
||||||
|
@ -36,32 +36,32 @@ else()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
set_target_properties(
|
set_target_properties(
|
||||||
taos
|
${TAOS_LIB}
|
||||||
PROPERTIES
|
PROPERTIES
|
||||||
CLEAN_DIRECT_OUTPUT
|
CLEAN_DIRECT_OUTPUT
|
||||||
1
|
1
|
||||||
)
|
)
|
||||||
|
|
||||||
set_target_properties(
|
set_target_properties(
|
||||||
taos
|
${TAOS_LIB}
|
||||||
PROPERTIES
|
PROPERTIES
|
||||||
VERSION ${TD_VER_NUMBER}
|
VERSION ${TD_VER_NUMBER}
|
||||||
SOVERSION 1
|
SOVERSION 1
|
||||||
)
|
)
|
||||||
|
|
||||||
add_library(taos_static STATIC ${CLIENT_SRC})
|
add_library(${TAOS_LIB_STATIC} STATIC ${CLIENT_SRC})
|
||||||
|
|
||||||
if(${TD_DARWIN})
|
if(${TD_DARWIN})
|
||||||
target_compile_options(taos_static PRIVATE -Wno-error=deprecated-non-prototype)
|
target_compile_options(${TAOS_LIB_STATIC} PRIVATE -Wno-error=deprecated-non-prototype)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
target_include_directories(
|
target_include_directories(
|
||||||
taos_static
|
${TAOS_LIB_STATIC}
|
||||||
PUBLIC "${TD_SOURCE_DIR}/include/client"
|
PUBLIC "${TD_SOURCE_DIR}/include/client"
|
||||||
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
|
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
|
||||||
)
|
)
|
||||||
target_link_libraries(
|
target_link_libraries(
|
||||||
taos_static
|
${TAOS_LIB_STATIC}
|
||||||
INTERFACE api
|
INTERFACE api
|
||||||
PRIVATE os util common transport monitor nodes parser command planner catalog scheduler function qcom geometry
|
PRIVATE os util common transport monitor nodes parser command planner catalog scheduler function qcom geometry
|
||||||
)
|
)
|
||||||
|
|
|
@ -3032,13 +3032,13 @@ static void fetchCallback(void* pResult, void* param, int32_t code) {
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
pRequest->code = code;
|
pRequest->code = code;
|
||||||
taosMemoryFreeClear(pResultInfo->pData);
|
taosMemoryFreeClear(pResultInfo->pData);
|
||||||
pRequest->body.fetchFp(((SSyncQueryParam*)pRequest->body.interParam)->userParam, pRequest, 0);
|
pRequest->body.fetchFp(((SSyncQueryParam*)pRequest->body.interParam)->userParam, pRequest, code);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pRequest->code != TSDB_CODE_SUCCESS) {
|
if (pRequest->code != TSDB_CODE_SUCCESS) {
|
||||||
taosMemoryFreeClear(pResultInfo->pData);
|
taosMemoryFreeClear(pResultInfo->pData);
|
||||||
pRequest->body.fetchFp(((SSyncQueryParam*)pRequest->body.interParam)->userParam, pRequest, 0);
|
pRequest->body.fetchFp(((SSyncQueryParam*)pRequest->body.interParam)->userParam, pRequest, pRequest->code);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -8,31 +8,31 @@ AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} SOURCE_LIST)
|
||||||
ADD_EXECUTABLE(clientTest clientTests.cpp)
|
ADD_EXECUTABLE(clientTest clientTests.cpp)
|
||||||
TARGET_LINK_LIBRARIES(
|
TARGET_LINK_LIBRARIES(
|
||||||
clientTest
|
clientTest
|
||||||
os util common transport parser catalog scheduler gtest taos_static qcom executor function
|
os util common transport parser catalog scheduler gtest ${TAOS_LIB_STATIC} qcom executor function
|
||||||
)
|
)
|
||||||
|
|
||||||
ADD_EXECUTABLE(tmqTest tmqTest.cpp)
|
ADD_EXECUTABLE(tmqTest tmqTest.cpp)
|
||||||
TARGET_LINK_LIBRARIES(
|
TARGET_LINK_LIBRARIES(
|
||||||
tmqTest
|
tmqTest
|
||||||
PUBLIC os util common transport parser catalog scheduler function gtest taos_static qcom
|
PUBLIC os util common transport parser catalog scheduler function gtest ${TAOS_LIB_STATIC} qcom
|
||||||
)
|
)
|
||||||
|
|
||||||
ADD_EXECUTABLE(smlTest smlTest.cpp)
|
ADD_EXECUTABLE(smlTest smlTest.cpp)
|
||||||
TARGET_LINK_LIBRARIES(
|
TARGET_LINK_LIBRARIES(
|
||||||
smlTest
|
smlTest
|
||||||
PUBLIC os util common transport parser catalog scheduler function gtest taos_static qcom geometry
|
PUBLIC os util common transport parser catalog scheduler function gtest ${TAOS_LIB_STATIC} qcom geometry
|
||||||
)
|
)
|
||||||
|
|
||||||
#ADD_EXECUTABLE(clientMonitorTest clientMonitorTests.cpp)
|
#ADD_EXECUTABLE(clientMonitorTest clientMonitorTests.cpp)
|
||||||
#TARGET_LINK_LIBRARIES(
|
#TARGET_LINK_LIBRARIES(
|
||||||
# clientMonitorTest
|
# clientMonitorTest
|
||||||
# PUBLIC os util common transport monitor parser catalog scheduler function gtest taos_static qcom executor
|
# PUBLIC os util common transport monitor parser catalog scheduler function gtest ${TAOS_LIB_STATIC} qcom executor
|
||||||
#)
|
#)
|
||||||
|
|
||||||
ADD_EXECUTABLE(userOperTest ../../../tests/script/api/passwdTest.c)
|
ADD_EXECUTABLE(userOperTest ../../../tests/script/api/passwdTest.c)
|
||||||
TARGET_LINK_LIBRARIES(
|
TARGET_LINK_LIBRARIES(
|
||||||
userOperTest
|
userOperTest
|
||||||
PUBLIC taos
|
PUBLIC ${TAOS_LIB}
|
||||||
)
|
)
|
||||||
|
|
||||||
TARGET_INCLUDE_DIRECTORIES(
|
TARGET_INCLUDE_DIRECTORIES(
|
||||||
|
|
|
@ -2169,7 +2169,7 @@ int32_t tSerializeRetrieveAnalAlgoRsp(void *buf, int32_t bufLen, SRetrieveAnalAl
|
||||||
SAnalyticsUrl *pUrl = pIter;
|
SAnalyticsUrl *pUrl = pIter;
|
||||||
size_t nameLen = 0;
|
size_t nameLen = 0;
|
||||||
const char *name = taosHashGetKey(pIter, &nameLen);
|
const char *name = taosHashGetKey(pIter, &nameLen);
|
||||||
if (nameLen > 0 && nameLen <= TSDB_ANAL_ALGO_KEY_LEN && pUrl->urlLen > 0) {
|
if (nameLen > 0 && nameLen <= TSDB_ANALYTIC_ALGO_KEY_LEN && pUrl->urlLen > 0) {
|
||||||
numOfAlgos++;
|
numOfAlgos++;
|
||||||
}
|
}
|
||||||
pIter = taosHashIterate(pRsp->hash, pIter);
|
pIter = taosHashIterate(pRsp->hash, pIter);
|
||||||
|
@ -2224,7 +2224,7 @@ int32_t tDeserializeRetrieveAnalAlgoRsp(void *buf, int32_t bufLen, SRetrieveAnal
|
||||||
int32_t numOfAlgos = 0;
|
int32_t numOfAlgos = 0;
|
||||||
int32_t nameLen;
|
int32_t nameLen;
|
||||||
int32_t type;
|
int32_t type;
|
||||||
char name[TSDB_ANAL_ALGO_KEY_LEN];
|
char name[TSDB_ANALYTIC_ALGO_KEY_LEN];
|
||||||
SAnalyticsUrl url = {0};
|
SAnalyticsUrl url = {0};
|
||||||
|
|
||||||
TAOS_CHECK_EXIT(tStartDecode(&decoder));
|
TAOS_CHECK_EXIT(tStartDecode(&decoder));
|
||||||
|
@ -2233,7 +2233,7 @@ int32_t tDeserializeRetrieveAnalAlgoRsp(void *buf, int32_t bufLen, SRetrieveAnal
|
||||||
|
|
||||||
for (int32_t f = 0; f < numOfAlgos; ++f) {
|
for (int32_t f = 0; f < numOfAlgos; ++f) {
|
||||||
TAOS_CHECK_EXIT(tDecodeI32(&decoder, &nameLen));
|
TAOS_CHECK_EXIT(tDecodeI32(&decoder, &nameLen));
|
||||||
if (nameLen > 0 && nameLen <= TSDB_ANAL_ALGO_NAME_LEN) {
|
if (nameLen > 0 && nameLen <= TSDB_ANALYTIC_ALGO_NAME_LEN) {
|
||||||
TAOS_CHECK_EXIT(tDecodeCStrTo(&decoder, name));
|
TAOS_CHECK_EXIT(tDecodeCStrTo(&decoder, name));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -404,7 +404,7 @@ static const SSysDbTableSchema userCompactsDetailSchema[] = {
|
||||||
|
|
||||||
static const SSysDbTableSchema anodesSchema[] = {
|
static const SSysDbTableSchema anodesSchema[] = {
|
||||||
{.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false},
|
{.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false},
|
||||||
{.name = "url", .bytes = TSDB_ANAL_ANODE_URL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
|
{.name = "url", .bytes = TSDB_ANALYTIC_ANODE_URL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
|
||||||
{.name = "status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
|
{.name = "status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
|
||||||
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = true},
|
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = true},
|
||||||
{.name = "update_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = true},
|
{.name = "update_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = true},
|
||||||
|
@ -412,8 +412,8 @@ static const SSysDbTableSchema anodesSchema[] = {
|
||||||
|
|
||||||
static const SSysDbTableSchema anodesFullSchema[] = {
|
static const SSysDbTableSchema anodesFullSchema[] = {
|
||||||
{.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false},
|
{.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false},
|
||||||
{.name = "type", .bytes = TSDB_ANAL_ALGO_TYPE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
|
{.name = "type", .bytes = TSDB_ANALYTIC_ALGO_TYPE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
|
||||||
{.name = "algo", .bytes = TSDB_ANAL_ALGO_NAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
|
{.name = "algo", .bytes = TSDB_ANALYTIC_ALGO_NAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
|
||||||
};
|
};
|
||||||
|
|
||||||
static const SSysDbTableSchema filesetsFullSchema[] = {
|
static const SSysDbTableSchema filesetsFullSchema[] = {
|
||||||
|
|
|
@ -36,14 +36,15 @@ static void smProcessWriteQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numO
|
||||||
|
|
||||||
dTrace("msg:%p, get from snode-write queue", pMsg);
|
dTrace("msg:%p, get from snode-write queue", pMsg);
|
||||||
int32_t code = sndProcessWriteMsg(pMgmt->pSnode, pMsg, NULL);
|
int32_t code = sndProcessWriteMsg(pMgmt->pSnode, pMsg, NULL);
|
||||||
if (code < 0) {
|
// if (code < 0) {
|
||||||
dGError("snd, msg:%p failed to process write since %s", pMsg, tstrerror(code));
|
// dGError("snd, msg:%p failed to process write since %s", pMsg, tstrerror(code));
|
||||||
if (pMsg->info.handle != NULL) {
|
// if (pMsg->info.handle != NULL) {
|
||||||
tmsgSendRsp(pMsg);
|
// tmsgSendRsp(pMsg);
|
||||||
}
|
// }
|
||||||
} else {
|
// } else {
|
||||||
smSendRsp(pMsg, 0);
|
// smSendRsp(pMsg, 0);
|
||||||
}
|
// }
|
||||||
|
smSendRsp(pMsg, code);
|
||||||
|
|
||||||
dTrace("msg:%p, is freed", pMsg);
|
dTrace("msg:%p, is freed", pMsg);
|
||||||
rpcFreeCont(pMsg->pCont);
|
rpcFreeCont(pMsg->pCont);
|
||||||
|
|
|
@ -37,7 +37,9 @@ typedef struct SVnodeMgmt {
|
||||||
SSingleWorker mgmtMultiWorker;
|
SSingleWorker mgmtMultiWorker;
|
||||||
SHashObj *hash;
|
SHashObj *hash;
|
||||||
SHashObj *closedHash;
|
SHashObj *closedHash;
|
||||||
|
SHashObj *creatingHash;
|
||||||
TdThreadRwlock lock;
|
TdThreadRwlock lock;
|
||||||
|
TdThreadMutex mutex;
|
||||||
SVnodesStat state;
|
SVnodesStat state;
|
||||||
STfs *pTfs;
|
STfs *pTfs;
|
||||||
TdThread thread;
|
TdThread thread;
|
||||||
|
@ -96,6 +98,7 @@ SVnodeObj *vmAcquireVnodeImpl(SVnodeMgmt *pMgmt, int32_t vgId, bool strict);
|
||||||
void vmReleaseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode);
|
void vmReleaseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode);
|
||||||
int32_t vmOpenVnode(SVnodeMgmt *pMgmt, SWrapperCfg *pCfg, SVnode *pImpl);
|
int32_t vmOpenVnode(SVnodeMgmt *pMgmt, SWrapperCfg *pCfg, SVnode *pImpl);
|
||||||
void vmCloseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode, bool commitAndRemoveWal, bool keepClosed);
|
void vmCloseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode, bool commitAndRemoveWal, bool keepClosed);
|
||||||
|
void vmRemoveFromCreatingHash(SVnodeMgmt *pMgmt, int32_t vgId);
|
||||||
|
|
||||||
// vmHandle.c
|
// vmHandle.c
|
||||||
SArray *vmGetMsgHandles();
|
SArray *vmGetMsgHandles();
|
||||||
|
@ -113,6 +116,7 @@ int32_t vmGetVnodeListFromFile(SVnodeMgmt *pMgmt, SWrapperCfg **ppCfgs, int32_t
|
||||||
int32_t vmWriteVnodeListToFile(SVnodeMgmt *pMgmt);
|
int32_t vmWriteVnodeListToFile(SVnodeMgmt *pMgmt);
|
||||||
int32_t vmGetVnodeListFromHash(SVnodeMgmt *pMgmt, int32_t *numOfVnodes, SVnodeObj ***ppVnodes);
|
int32_t vmGetVnodeListFromHash(SVnodeMgmt *pMgmt, int32_t *numOfVnodes, SVnodeObj ***ppVnodes);
|
||||||
int32_t vmGetAllVnodeListFromHash(SVnodeMgmt *pMgmt, int32_t *numOfVnodes, SVnodeObj ***ppVnodes);
|
int32_t vmGetAllVnodeListFromHash(SVnodeMgmt *pMgmt, int32_t *numOfVnodes, SVnodeObj ***ppVnodes);
|
||||||
|
int32_t vmGetAllVnodeListFromHashWithCreating(SVnodeMgmt *pMgmt, int32_t *numOfVnodes, SVnodeObj ***ppVnodes);
|
||||||
|
|
||||||
// vmWorker.c
|
// vmWorker.c
|
||||||
int32_t vmStartWorker(SVnodeMgmt *pMgmt);
|
int32_t vmStartWorker(SVnodeMgmt *pMgmt);
|
||||||
|
|
|
@ -67,6 +67,54 @@ int32_t vmGetAllVnodeListFromHash(SVnodeMgmt *pMgmt, int32_t *numOfVnodes, SVnod
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int32_t vmGetAllVnodeListFromHashWithCreating(SVnodeMgmt *pMgmt, int32_t *numOfVnodes, SVnodeObj ***ppVnodes) {
|
||||||
|
(void)taosThreadRwlockRdlock(&pMgmt->lock);
|
||||||
|
|
||||||
|
int32_t num = 0;
|
||||||
|
int32_t size = taosHashGetSize(pMgmt->hash);
|
||||||
|
int32_t creatingSize = taosHashGetSize(pMgmt->creatingHash);
|
||||||
|
size += creatingSize;
|
||||||
|
SVnodeObj **pVnodes = taosMemoryCalloc(size, sizeof(SVnodeObj *));
|
||||||
|
if (pVnodes == NULL) {
|
||||||
|
(void)taosThreadRwlockUnlock(&pMgmt->lock);
|
||||||
|
return terrno;
|
||||||
|
}
|
||||||
|
|
||||||
|
void *pIter = taosHashIterate(pMgmt->hash, NULL);
|
||||||
|
while (pIter) {
|
||||||
|
SVnodeObj **ppVnode = pIter;
|
||||||
|
SVnodeObj *pVnode = *ppVnode;
|
||||||
|
if (pVnode && num < size) {
|
||||||
|
int32_t refCount = atomic_add_fetch_32(&pVnode->refCount, 1);
|
||||||
|
dTrace("vgId:%d,acquire vnode, vnode:%p, ref:%d", pVnode->vgId, pVnode, refCount);
|
||||||
|
pVnodes[num++] = (*ppVnode);
|
||||||
|
pIter = taosHashIterate(pMgmt->hash, pIter);
|
||||||
|
} else {
|
||||||
|
taosHashCancelIterate(pMgmt->hash, pIter);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pIter = taosHashIterate(pMgmt->creatingHash, NULL);
|
||||||
|
while (pIter) {
|
||||||
|
SVnodeObj **ppVnode = pIter;
|
||||||
|
SVnodeObj *pVnode = *ppVnode;
|
||||||
|
if (pVnode && num < size) {
|
||||||
|
int32_t refCount = atomic_add_fetch_32(&pVnode->refCount, 1);
|
||||||
|
dTrace("vgId:%d, acquire vnode, vnode:%p, ref:%d", pVnode->vgId, pVnode, refCount);
|
||||||
|
pVnodes[num++] = (*ppVnode);
|
||||||
|
pIter = taosHashIterate(pMgmt->creatingHash, pIter);
|
||||||
|
} else {
|
||||||
|
taosHashCancelIterate(pMgmt->creatingHash, pIter);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
(void)taosThreadRwlockUnlock(&pMgmt->lock);
|
||||||
|
|
||||||
|
*numOfVnodes = num;
|
||||||
|
*ppVnodes = pVnodes;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
int32_t vmGetVnodeListFromHash(SVnodeMgmt *pMgmt, int32_t *numOfVnodes, SVnodeObj ***ppVnodes) {
|
int32_t vmGetVnodeListFromHash(SVnodeMgmt *pMgmt, int32_t *numOfVnodes, SVnodeObj ***ppVnodes) {
|
||||||
(void)taosThreadRwlockRdlock(&pMgmt->lock);
|
(void)taosThreadRwlockRdlock(&pMgmt->lock);
|
||||||
|
|
||||||
|
|
|
@ -381,6 +381,7 @@ int32_t vmProcessCreateVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
||||||
if (vnodeCreate(path, &vnodeCfg, diskPrimary, pMgmt->pTfs) < 0) {
|
if (vnodeCreate(path, &vnodeCfg, diskPrimary, pMgmt->pTfs) < 0) {
|
||||||
dError("vgId:%d, failed to create vnode since %s", req.vgId, terrstr());
|
dError("vgId:%d, failed to create vnode since %s", req.vgId, terrstr());
|
||||||
vmReleaseVnode(pMgmt, pVnode);
|
vmReleaseVnode(pMgmt, pVnode);
|
||||||
|
vmRemoveFromCreatingHash(pMgmt, req.vgId);
|
||||||
(void)tFreeSCreateVnodeReq(&req);
|
(void)tFreeSCreateVnodeReq(&req);
|
||||||
code = terrno != 0 ? terrno : -1;
|
code = terrno != 0 ? terrno : -1;
|
||||||
return code;
|
return code;
|
||||||
|
@ -422,6 +423,8 @@ int32_t vmProcessCreateVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
||||||
}
|
}
|
||||||
|
|
||||||
_OVER:
|
_OVER:
|
||||||
|
vmRemoveFromCreatingHash(pMgmt, req.vgId);
|
||||||
|
|
||||||
if (code != 0) {
|
if (code != 0) {
|
||||||
int32_t r = 0;
|
int32_t r = 0;
|
||||||
r = taosThreadRwlockWrlock(&pMgmt->lock);
|
r = taosThreadRwlockWrlock(&pMgmt->lock);
|
||||||
|
|
|
@ -16,6 +16,7 @@
|
||||||
#define _DEFAULT_SOURCE
|
#define _DEFAULT_SOURCE
|
||||||
#include "vmInt.h"
|
#include "vmInt.h"
|
||||||
#include "libs/function/tudf.h"
|
#include "libs/function/tudf.h"
|
||||||
|
#include "osMemory.h"
|
||||||
#include "tfs.h"
|
#include "tfs.h"
|
||||||
#include "vnd.h"
|
#include "vnd.h"
|
||||||
|
|
||||||
|
@ -62,10 +63,20 @@ int32_t vmAllocPrimaryDisk(SVnodeMgmt *pMgmt, int32_t vgId) {
|
||||||
int32_t numOfVnodes = 0;
|
int32_t numOfVnodes = 0;
|
||||||
SVnodeObj **ppVnodes = NULL;
|
SVnodeObj **ppVnodes = NULL;
|
||||||
|
|
||||||
code = vmGetVnodeListFromHash(pMgmt, &numOfVnodes, &ppVnodes);
|
code = taosThreadMutexLock(&pMgmt->mutex);
|
||||||
if (code != 0) {
|
if (code != 0) {
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
code = vmGetAllVnodeListFromHashWithCreating(pMgmt, &numOfVnodes, &ppVnodes);
|
||||||
|
if (code != 0) {
|
||||||
|
int32_t r = taosThreadMutexUnlock(&pMgmt->mutex);
|
||||||
|
if (r != 0) {
|
||||||
|
dError("vgId:%d, failed to unlock mutex since %s", vgId, tstrerror(r));
|
||||||
|
}
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
|
||||||
for (int32_t v = 0; v < numOfVnodes; v++) {
|
for (int32_t v = 0; v < numOfVnodes; v++) {
|
||||||
SVnodeObj *pVnode = ppVnodes[v];
|
SVnodeObj *pVnode = ppVnodes[v];
|
||||||
disks[pVnode->diskPrimary] += 1;
|
disks[pVnode->diskPrimary] += 1;
|
||||||
|
@ -81,6 +92,51 @@ int32_t vmAllocPrimaryDisk(SVnodeMgmt *pMgmt, int32_t vgId) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
SVnodeObj *pCreatingVnode = taosMemoryCalloc(1, sizeof(SVnodeObj));
|
||||||
|
if (pCreatingVnode == NULL) {
|
||||||
|
code = -1;
|
||||||
|
if (terrno != 0) code = terrno;
|
||||||
|
dError("failed to alloc vnode since %s", tstrerror(code));
|
||||||
|
int32_t r = taosThreadMutexUnlock(&pMgmt->mutex);
|
||||||
|
if (r != 0) {
|
||||||
|
dError("vgId:%d, failed to unlock mutex since %s", vgId, tstrerror(r));
|
||||||
|
}
|
||||||
|
goto _OVER;
|
||||||
|
}
|
||||||
|
(void)memset(pCreatingVnode, 0, sizeof(SVnodeObj));
|
||||||
|
|
||||||
|
pCreatingVnode->vgId = vgId;
|
||||||
|
pCreatingVnode->diskPrimary = diskId;
|
||||||
|
|
||||||
|
code = taosThreadRwlockWrlock(&pMgmt->lock);
|
||||||
|
if (code != 0) {
|
||||||
|
int32_t r = taosThreadMutexUnlock(&pMgmt->mutex);
|
||||||
|
if (r != 0) {
|
||||||
|
dError("vgId:%d, failed to unlock mutex since %s", vgId, tstrerror(r));
|
||||||
|
}
|
||||||
|
taosMemoryFree(pCreatingVnode);
|
||||||
|
goto _OVER;
|
||||||
|
}
|
||||||
|
|
||||||
|
dTrace("vgId:%d, put vnode into creating hash, pCreatingVnode:%p", vgId, pCreatingVnode);
|
||||||
|
code = taosHashPut(pMgmt->creatingHash, &vgId, sizeof(int32_t), &pCreatingVnode, sizeof(SVnodeObj *));
|
||||||
|
if (code != 0) {
|
||||||
|
dError("vgId:%d, failed to put vnode to creatingHash", vgId);
|
||||||
|
taosMemoryFree(pCreatingVnode);
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t r = taosThreadRwlockUnlock(&pMgmt->lock);
|
||||||
|
if (r != 0) {
|
||||||
|
dError("vgId:%d, failed to unlock since %s", vgId, tstrerror(r));
|
||||||
|
}
|
||||||
|
|
||||||
|
code = taosThreadMutexUnlock(&pMgmt->mutex);
|
||||||
|
if (code != 0) {
|
||||||
|
goto _OVER;
|
||||||
|
}
|
||||||
|
|
||||||
|
_OVER:
|
||||||
|
|
||||||
for (int32_t i = 0; i < numOfVnodes; ++i) {
|
for (int32_t i = 0; i < numOfVnodes; ++i) {
|
||||||
if (ppVnodes == NULL || ppVnodes[i] == NULL) continue;
|
if (ppVnodes == NULL || ppVnodes[i] == NULL) continue;
|
||||||
vmReleaseVnode(pMgmt, ppVnodes[i]);
|
vmReleaseVnode(pMgmt, ppVnodes[i]);
|
||||||
|
@ -89,8 +145,13 @@ int32_t vmAllocPrimaryDisk(SVnodeMgmt *pMgmt, int32_t vgId) {
|
||||||
taosMemoryFree(ppVnodes);
|
taosMemoryFree(ppVnodes);
|
||||||
}
|
}
|
||||||
|
|
||||||
dInfo("vgId:%d, alloc disk:%d of level 0. ndisk:%d, vnodes: %d", vgId, diskId, ndisk, numOfVnodes);
|
if (code != 0) {
|
||||||
return diskId;
|
dError("vgId:%d, failed to alloc disk since %s", vgId, tstrerror(code));
|
||||||
|
return code;
|
||||||
|
} else {
|
||||||
|
dInfo("vgId:%d, alloc disk:%d of level 0. ndisk:%d, vnodes: %d", vgId, diskId, ndisk, numOfVnodes);
|
||||||
|
return diskId;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
SVnodeObj *vmAcquireVnodeImpl(SVnodeMgmt *pMgmt, int32_t vgId, bool strict) {
|
SVnodeObj *vmAcquireVnodeImpl(SVnodeMgmt *pMgmt, int32_t vgId, bool strict) {
|
||||||
|
@ -216,12 +277,12 @@ void vmCloseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode, bool commitAndRemoveWal,
|
||||||
}
|
}
|
||||||
if (keepClosed) {
|
if (keepClosed) {
|
||||||
SVnodeObj *pClosedVnode = taosMemoryCalloc(1, sizeof(SVnodeObj));
|
SVnodeObj *pClosedVnode = taosMemoryCalloc(1, sizeof(SVnodeObj));
|
||||||
(void)memset(pClosedVnode, 0, sizeof(SVnodeObj));
|
if (pClosedVnode == NULL) {
|
||||||
if (pVnode == NULL) {
|
dError("failed to alloc vnode since %s", terrstr());
|
||||||
dError("vgId:%d, failed to alloc vnode since %s", pVnode->vgId, terrstr());
|
|
||||||
(void)taosThreadRwlockUnlock(&pMgmt->lock);
|
(void)taosThreadRwlockUnlock(&pMgmt->lock);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
(void)memset(pClosedVnode, 0, sizeof(SVnodeObj));
|
||||||
|
|
||||||
pClosedVnode->vgId = pVnode->vgId;
|
pClosedVnode->vgId = pVnode->vgId;
|
||||||
pClosedVnode->dropped = pVnode->dropped;
|
pClosedVnode->dropped = pVnode->dropped;
|
||||||
|
@ -427,11 +488,18 @@ static int32_t vmOpenVnodes(SVnodeMgmt *pMgmt) {
|
||||||
|
|
||||||
pMgmt->closedHash =
|
pMgmt->closedHash =
|
||||||
taosHashInit(TSDB_MIN_VNODES, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_ENTRY_LOCK);
|
taosHashInit(TSDB_MIN_VNODES, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_ENTRY_LOCK);
|
||||||
if (pMgmt->hash == NULL) {
|
if (pMgmt->closedHash == NULL) {
|
||||||
dError("failed to init vnode closed hash since %s", terrstr());
|
dError("failed to init vnode closed hash since %s", terrstr());
|
||||||
return TSDB_CODE_OUT_OF_MEMORY;
|
return TSDB_CODE_OUT_OF_MEMORY;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pMgmt->creatingHash =
|
||||||
|
taosHashInit(TSDB_MIN_VNODES, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_ENTRY_LOCK);
|
||||||
|
if (pMgmt->creatingHash == NULL) {
|
||||||
|
dError("failed to init vnode creatingHash hash since %s", terrstr());
|
||||||
|
return TSDB_CODE_OUT_OF_MEMORY;
|
||||||
|
}
|
||||||
|
|
||||||
SWrapperCfg *pCfgs = NULL;
|
SWrapperCfg *pCfgs = NULL;
|
||||||
int32_t numOfVnodes = 0;
|
int32_t numOfVnodes = 0;
|
||||||
if (vmGetVnodeListFromFile(pMgmt, &pCfgs, &numOfVnodes) != 0) {
|
if (vmGetVnodeListFromFile(pMgmt, &pCfgs, &numOfVnodes) != 0) {
|
||||||
|
@ -509,6 +577,32 @@ static int32_t vmOpenVnodes(SVnodeMgmt *pMgmt) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void vmRemoveFromCreatingHash(SVnodeMgmt *pMgmt, int32_t vgId) {
|
||||||
|
SVnodeObj *pOld = NULL;
|
||||||
|
|
||||||
|
(void)taosThreadRwlockWrlock(&pMgmt->lock);
|
||||||
|
int32_t r = taosHashGetDup(pMgmt->creatingHash, &vgId, sizeof(int32_t), (void *)&pOld);
|
||||||
|
if (r != 0) {
|
||||||
|
dError("vgId:%d, failed to get vnode from creating Hash", vgId);
|
||||||
|
}
|
||||||
|
dTrace("vgId:%d, remove from creating Hash", vgId);
|
||||||
|
r = taosHashRemove(pMgmt->creatingHash, &vgId, sizeof(int32_t));
|
||||||
|
if (r != 0) {
|
||||||
|
dError("vgId:%d, failed to remove vnode from hash", vgId);
|
||||||
|
}
|
||||||
|
(void)taosThreadRwlockUnlock(&pMgmt->lock);
|
||||||
|
|
||||||
|
if (pOld) {
|
||||||
|
dTrace("vgId:%d, free vnode pOld:%p", vgId, &pOld);
|
||||||
|
vmFreeVnodeObj(&pOld);
|
||||||
|
}
|
||||||
|
|
||||||
|
_OVER:
|
||||||
|
if (r != 0) {
|
||||||
|
dError("vgId:%d, failed to remove vnode from creatingHash since %s", vgId, tstrerror(r));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void *vmCloseVnodeInThread(void *param) {
|
static void *vmCloseVnodeInThread(void *param) {
|
||||||
SVnodeThread *pThread = param;
|
SVnodeThread *pThread = param;
|
||||||
SVnodeMgmt *pMgmt = pThread->pMgmt;
|
SVnodeMgmt *pMgmt = pThread->pMgmt;
|
||||||
|
@ -614,6 +708,18 @@ static void vmCloseVnodes(SVnodeMgmt *pMgmt) {
|
||||||
pMgmt->closedHash = NULL;
|
pMgmt->closedHash = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pIter = taosHashIterate(pMgmt->creatingHash, NULL);
|
||||||
|
while (pIter) {
|
||||||
|
SVnodeObj **ppVnode = pIter;
|
||||||
|
vmFreeVnodeObj(ppVnode);
|
||||||
|
pIter = taosHashIterate(pMgmt->creatingHash, pIter);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pMgmt->creatingHash != NULL) {
|
||||||
|
taosHashCleanup(pMgmt->creatingHash);
|
||||||
|
pMgmt->creatingHash = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
dInfo("total vnodes:%d are all closed", numOfVnodes);
|
dInfo("total vnodes:%d are all closed", numOfVnodes);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -622,6 +728,7 @@ static void vmCleanup(SVnodeMgmt *pMgmt) {
|
||||||
vmStopWorker(pMgmt);
|
vmStopWorker(pMgmt);
|
||||||
vnodeCleanup();
|
vnodeCleanup();
|
||||||
(void)taosThreadRwlockDestroy(&pMgmt->lock);
|
(void)taosThreadRwlockDestroy(&pMgmt->lock);
|
||||||
|
(void)taosThreadMutexDestroy(&pMgmt->mutex);
|
||||||
(void)taosThreadMutexDestroy(&pMgmt->fileLock);
|
(void)taosThreadMutexDestroy(&pMgmt->fileLock);
|
||||||
taosMemoryFree(pMgmt);
|
taosMemoryFree(pMgmt);
|
||||||
}
|
}
|
||||||
|
@ -714,6 +821,12 @@ static int32_t vmInit(SMgmtInputOpt *pInput, SMgmtOutputOpt *pOutput) {
|
||||||
goto _OVER;
|
goto _OVER;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
code = taosThreadMutexInit(&pMgmt->mutex, NULL);
|
||||||
|
if (code != 0) {
|
||||||
|
code = TAOS_SYSTEM_ERROR(errno);
|
||||||
|
goto _OVER;
|
||||||
|
}
|
||||||
|
|
||||||
code = taosThreadMutexInit(&pMgmt->fileLock, NULL);
|
code = taosThreadMutexInit(&pMgmt->fileLock, NULL);
|
||||||
if (code != 0) {
|
if (code != 0) {
|
||||||
code = TAOS_SYSTEM_ERROR(errno);
|
code = TAOS_SYSTEM_ERROR(errno);
|
||||||
|
|
|
@ -6,5 +6,5 @@ target_include_directories(
|
||||||
PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/inc"
|
PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/inc"
|
||||||
)
|
)
|
||||||
target_link_libraries(
|
target_link_libraries(
|
||||||
node_util cjson mnode vnode qnode snode wal sync taos_static tfs monitor monitorfw
|
node_util cjson mnode vnode qnode snode wal sync ${TAOS_LIB_STATIC} tfs monitor monitorfw
|
||||||
)
|
)
|
|
@ -309,7 +309,7 @@ static int32_t mndCreateAnode(SMnode *pMnode, SRpcMsg *pReq, SMCreateAnodeReq *p
|
||||||
anodeObj.updateTime = anodeObj.createdTime;
|
anodeObj.updateTime = anodeObj.createdTime;
|
||||||
anodeObj.version = 0;
|
anodeObj.version = 0;
|
||||||
anodeObj.urlLen = pCreate->urlLen;
|
anodeObj.urlLen = pCreate->urlLen;
|
||||||
if (anodeObj.urlLen > TSDB_ANAL_ANODE_URL_LEN) {
|
if (anodeObj.urlLen > TSDB_ANALYTIC_ANODE_URL_LEN) {
|
||||||
code = TSDB_CODE_MND_ANODE_TOO_LONG_URL;
|
code = TSDB_CODE_MND_ANODE_TOO_LONG_URL;
|
||||||
goto _OVER;
|
goto _OVER;
|
||||||
}
|
}
|
||||||
|
@ -491,23 +491,24 @@ static int32_t mndSetDropAnodeRedoLogs(STrans *pTrans, SAnodeObj *pObj) {
|
||||||
int32_t code = 0;
|
int32_t code = 0;
|
||||||
SSdbRaw *pRedoRaw = mndAnodeActionEncode(pObj);
|
SSdbRaw *pRedoRaw = mndAnodeActionEncode(pObj);
|
||||||
if (pRedoRaw == NULL) {
|
if (pRedoRaw == NULL) {
|
||||||
code = TSDB_CODE_MND_RETURN_VALUE_NULL;
|
code = terrno;
|
||||||
if (terrno != 0) code = terrno;
|
return code;
|
||||||
TAOS_RETURN(code);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
TAOS_CHECK_RETURN(mndTransAppendRedolog(pTrans, pRedoRaw));
|
TAOS_CHECK_RETURN(mndTransAppendRedolog(pTrans, pRedoRaw));
|
||||||
TAOS_CHECK_RETURN(sdbSetRawStatus(pRedoRaw, SDB_STATUS_DROPPING));
|
TAOS_CHECK_RETURN(sdbSetRawStatus(pRedoRaw, SDB_STATUS_DROPPING));
|
||||||
TAOS_RETURN(code);
|
|
||||||
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t mndSetDropAnodeCommitLogs(STrans *pTrans, SAnodeObj *pObj) {
|
static int32_t mndSetDropAnodeCommitLogs(STrans *pTrans, SAnodeObj *pObj) {
|
||||||
int32_t code = 0;
|
int32_t code = 0;
|
||||||
SSdbRaw *pCommitRaw = mndAnodeActionEncode(pObj);
|
SSdbRaw *pCommitRaw = mndAnodeActionEncode(pObj);
|
||||||
if (pCommitRaw == NULL) {
|
if (pCommitRaw == NULL) {
|
||||||
code = TSDB_CODE_MND_RETURN_VALUE_NULL;
|
code = terrno;
|
||||||
if (terrno != 0) code = terrno;
|
return code;
|
||||||
TAOS_RETURN(code);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
TAOS_CHECK_RETURN(mndTransAppendCommitlog(pTrans, pCommitRaw));
|
TAOS_CHECK_RETURN(mndTransAppendCommitlog(pTrans, pCommitRaw));
|
||||||
TAOS_CHECK_RETURN(sdbSetRawStatus(pCommitRaw, SDB_STATUS_DROPPED));
|
TAOS_CHECK_RETURN(sdbSetRawStatus(pCommitRaw, SDB_STATUS_DROPPED));
|
||||||
TAOS_RETURN(code);
|
TAOS_RETURN(code);
|
||||||
|
@ -521,25 +522,25 @@ static int32_t mndSetDropAnodeInfoToTrans(SMnode *pMnode, STrans *pTrans, SAnode
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t mndDropAnode(SMnode *pMnode, SRpcMsg *pReq, SAnodeObj *pObj) {
|
static int32_t mndDropAnode(SMnode *pMnode, SRpcMsg *pReq, SAnodeObj *pObj) {
|
||||||
int32_t code = -1;
|
int32_t code = 0;
|
||||||
|
int32_t lino = 0;
|
||||||
|
|
||||||
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_NOTHING, pReq, "drop-anode");
|
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_NOTHING, pReq, "drop-anode");
|
||||||
if (pTrans == NULL) {
|
TSDB_CHECK_NULL(pTrans, code, lino, _OVER, terrno);
|
||||||
code = TSDB_CODE_MND_RETURN_VALUE_NULL;
|
|
||||||
if (terrno != 0) code = terrno;
|
|
||||||
goto _OVER;
|
|
||||||
}
|
|
||||||
mndTransSetSerial(pTrans);
|
mndTransSetSerial(pTrans);
|
||||||
|
mInfo("trans:%d, to drop anode:%d", pTrans->id, pObj->id);
|
||||||
|
|
||||||
mInfo("trans:%d, used to drop anode:%d", pTrans->id, pObj->id);
|
code = mndSetDropAnodeInfoToTrans(pMnode, pTrans, pObj, false);
|
||||||
TAOS_CHECK_GOTO(mndSetDropAnodeInfoToTrans(pMnode, pTrans, pObj, false), NULL, _OVER);
|
mndReleaseAnode(pMnode, pObj);
|
||||||
TAOS_CHECK_GOTO(mndTransPrepare(pMnode, pTrans), NULL, _OVER);
|
|
||||||
|
|
||||||
code = 0;
|
TSDB_CHECK_CODE(code, lino, _OVER);
|
||||||
|
|
||||||
|
code = mndTransPrepare(pMnode, pTrans);
|
||||||
|
|
||||||
_OVER:
|
_OVER:
|
||||||
mndTransDrop(pTrans);
|
mndTransDrop(pTrans);
|
||||||
TAOS_RETURN(code);
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t mndProcessDropAnodeReq(SRpcMsg *pReq) {
|
static int32_t mndProcessDropAnodeReq(SRpcMsg *pReq) {
|
||||||
|
@ -560,20 +561,20 @@ static int32_t mndProcessDropAnodeReq(SRpcMsg *pReq) {
|
||||||
|
|
||||||
pObj = mndAcquireAnode(pMnode, dropReq.anodeId);
|
pObj = mndAcquireAnode(pMnode, dropReq.anodeId);
|
||||||
if (pObj == NULL) {
|
if (pObj == NULL) {
|
||||||
code = TSDB_CODE_MND_RETURN_VALUE_NULL;
|
code = terrno;
|
||||||
if (terrno != 0) code = terrno;
|
|
||||||
goto _OVER;
|
goto _OVER;
|
||||||
}
|
}
|
||||||
|
|
||||||
code = mndDropAnode(pMnode, pReq, pObj);
|
code = mndDropAnode(pMnode, pReq, pObj);
|
||||||
if (code == 0) code = TSDB_CODE_ACTION_IN_PROGRESS;
|
if (code == 0) {
|
||||||
|
code = TSDB_CODE_ACTION_IN_PROGRESS;
|
||||||
|
}
|
||||||
|
|
||||||
_OVER:
|
_OVER:
|
||||||
if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) {
|
if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) {
|
||||||
mError("anode:%d, failed to drop since %s", dropReq.anodeId, tstrerror(code));
|
mError("anode:%d, failed to drop since %s", dropReq.anodeId, tstrerror(code));
|
||||||
}
|
}
|
||||||
|
|
||||||
mndReleaseAnode(pMnode, pObj);
|
|
||||||
tFreeSMDropAnodeReq(&dropReq);
|
tFreeSMDropAnodeReq(&dropReq);
|
||||||
TAOS_RETURN(code);
|
TAOS_RETURN(code);
|
||||||
}
|
}
|
||||||
|
@ -584,7 +585,7 @@ static int32_t mndRetrieveAnodes(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB
|
||||||
int32_t numOfRows = 0;
|
int32_t numOfRows = 0;
|
||||||
int32_t cols = 0;
|
int32_t cols = 0;
|
||||||
SAnodeObj *pObj = NULL;
|
SAnodeObj *pObj = NULL;
|
||||||
char buf[TSDB_ANAL_ANODE_URL_LEN + VARSTR_HEADER_SIZE];
|
char buf[TSDB_ANALYTIC_ANODE_URL_LEN + VARSTR_HEADER_SIZE];
|
||||||
char status[64];
|
char status[64];
|
||||||
int32_t code = 0;
|
int32_t code = 0;
|
||||||
|
|
||||||
|
@ -642,7 +643,7 @@ static int32_t mndRetrieveAnodesFull(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock
|
||||||
int32_t numOfRows = 0;
|
int32_t numOfRows = 0;
|
||||||
int32_t cols = 0;
|
int32_t cols = 0;
|
||||||
SAnodeObj *pObj = NULL;
|
SAnodeObj *pObj = NULL;
|
||||||
char buf[TSDB_ANAL_ALGO_NAME_LEN + VARSTR_HEADER_SIZE];
|
char buf[TSDB_ANALYTIC_ALGO_NAME_LEN + VARSTR_HEADER_SIZE];
|
||||||
int32_t code = 0;
|
int32_t code = 0;
|
||||||
|
|
||||||
while (numOfRows < rows) {
|
while (numOfRows < rows) {
|
||||||
|
@ -693,7 +694,7 @@ static int32_t mndDecodeAlgoList(SJson *pJson, SAnodeObj *pObj) {
|
||||||
int32_t code = 0;
|
int32_t code = 0;
|
||||||
int32_t protocol = 0;
|
int32_t protocol = 0;
|
||||||
double tmp = 0;
|
double tmp = 0;
|
||||||
char buf[TSDB_ANAL_ALGO_NAME_LEN + 1] = {0};
|
char buf[TSDB_ANALYTIC_ALGO_NAME_LEN + 1] = {0};
|
||||||
|
|
||||||
code = tjsonGetDoubleValue(pJson, "protocol", &tmp);
|
code = tjsonGetDoubleValue(pJson, "protocol", &tmp);
|
||||||
if (code < 0) return TSDB_CODE_INVALID_JSON_FORMAT;
|
if (code < 0) return TSDB_CODE_INVALID_JSON_FORMAT;
|
||||||
|
@ -753,10 +754,10 @@ static int32_t mndDecodeAlgoList(SJson *pJson, SAnodeObj *pObj) {
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t mndGetAnodeAlgoList(const char *url, SAnodeObj *pObj) {
|
static int32_t mndGetAnodeAlgoList(const char *url, SAnodeObj *pObj) {
|
||||||
char anodeUrl[TSDB_ANAL_ANODE_URL_LEN + 1] = {0};
|
char anodeUrl[TSDB_ANALYTIC_ANODE_URL_LEN + 1] = {0};
|
||||||
snprintf(anodeUrl, TSDB_ANAL_ANODE_URL_LEN, "%s/%s", url, "list");
|
snprintf(anodeUrl, TSDB_ANALYTIC_ANODE_URL_LEN, "%s/%s", url, "list");
|
||||||
|
|
||||||
SJson *pJson = taosAnalSendReqRetJson(anodeUrl, ANAL_HTTP_TYPE_GET, NULL);
|
SJson *pJson = taosAnalSendReqRetJson(anodeUrl, ANALYTICS_HTTP_TYPE_GET, NULL);
|
||||||
if (pJson == NULL) return terrno;
|
if (pJson == NULL) return terrno;
|
||||||
|
|
||||||
int32_t code = mndDecodeAlgoList(pJson, pObj);
|
int32_t code = mndDecodeAlgoList(pJson, pObj);
|
||||||
|
@ -769,10 +770,10 @@ static int32_t mndGetAnodeStatus(SAnodeObj *pObj, char *status, int32_t statusLe
|
||||||
int32_t code = 0;
|
int32_t code = 0;
|
||||||
int32_t protocol = 0;
|
int32_t protocol = 0;
|
||||||
double tmp = 0;
|
double tmp = 0;
|
||||||
char anodeUrl[TSDB_ANAL_ANODE_URL_LEN + 1] = {0};
|
char anodeUrl[TSDB_ANALYTIC_ANODE_URL_LEN + 1] = {0};
|
||||||
snprintf(anodeUrl, TSDB_ANAL_ANODE_URL_LEN, "%s/%s", pObj->url, "status");
|
snprintf(anodeUrl, TSDB_ANALYTIC_ANODE_URL_LEN, "%s/%s", pObj->url, "status");
|
||||||
|
|
||||||
SJson *pJson = taosAnalSendReqRetJson(anodeUrl, ANAL_HTTP_TYPE_GET, NULL);
|
SJson *pJson = taosAnalSendReqRetJson(anodeUrl, ANALYTICS_HTTP_TYPE_GET, NULL);
|
||||||
if (pJson == NULL) return terrno;
|
if (pJson == NULL) return terrno;
|
||||||
|
|
||||||
code = tjsonGetDoubleValue(pJson, "protocol", &tmp);
|
code = tjsonGetDoubleValue(pJson, "protocol", &tmp);
|
||||||
|
@ -808,7 +809,7 @@ static int32_t mndProcessAnalAlgoReq(SRpcMsg *pReq) {
|
||||||
SAnodeObj *pObj = NULL;
|
SAnodeObj *pObj = NULL;
|
||||||
SAnalyticsUrl url;
|
SAnalyticsUrl url;
|
||||||
int32_t nameLen;
|
int32_t nameLen;
|
||||||
char name[TSDB_ANAL_ALGO_KEY_LEN];
|
char name[TSDB_ANALYTIC_ALGO_KEY_LEN];
|
||||||
SRetrieveAnalAlgoReq req = {0};
|
SRetrieveAnalAlgoReq req = {0};
|
||||||
SRetrieveAnalAlgoRsp rsp = {0};
|
SRetrieveAnalAlgoRsp rsp = {0};
|
||||||
|
|
||||||
|
@ -847,13 +848,13 @@ static int32_t mndProcessAnalAlgoReq(SRpcMsg *pReq) {
|
||||||
goto _OVER;
|
goto _OVER;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
url.url = taosMemoryMalloc(TSDB_ANAL_ANODE_URL_LEN + TSDB_ANAL_ALGO_TYPE_LEN + 1);
|
url.url = taosMemoryMalloc(TSDB_ANALYTIC_ANODE_URL_LEN + TSDB_ANALYTIC_ALGO_TYPE_LEN + 1);
|
||||||
if (url.url == NULL) {
|
if (url.url == NULL) {
|
||||||
sdbRelease(pSdb, pAnode);
|
sdbRelease(pSdb, pAnode);
|
||||||
goto _OVER;
|
goto _OVER;
|
||||||
}
|
}
|
||||||
|
|
||||||
url.urlLen = 1 + tsnprintf(url.url, TSDB_ANAL_ANODE_URL_LEN + TSDB_ANAL_ALGO_TYPE_LEN, "%s/%s", pAnode->url,
|
url.urlLen = 1 + tsnprintf(url.url, TSDB_ANALYTIC_ANODE_URL_LEN + TSDB_ANALYTIC_ALGO_TYPE_LEN, "%s/%s", pAnode->url,
|
||||||
taosAnalAlgoUrlStr(url.type));
|
taosAnalAlgoUrlStr(url.type));
|
||||||
if (taosHashPut(rsp.hash, name, nameLen, &url, sizeof(SAnalyticsUrl)) != 0) {
|
if (taosHashPut(rsp.hash, name, nameLen, &url, sizeof(SAnalyticsUrl)) != 0) {
|
||||||
taosMemoryFree(url.url);
|
taosMemoryFree(url.url);
|
||||||
|
|
|
@ -53,7 +53,7 @@ static inline int32_t mndAcquireRpc(SMnode *pMnode) {
|
||||||
if (pMnode->stopped) {
|
if (pMnode->stopped) {
|
||||||
code = TSDB_CODE_APP_IS_STOPPING;
|
code = TSDB_CODE_APP_IS_STOPPING;
|
||||||
} else if (!mndIsLeader(pMnode)) {
|
} else if (!mndIsLeader(pMnode)) {
|
||||||
code = -1;
|
code = 1;
|
||||||
} else {
|
} else {
|
||||||
#if 1
|
#if 1
|
||||||
(void)atomic_add_fetch_32(&pMnode->rpcRef, 1);
|
(void)atomic_add_fetch_32(&pMnode->rpcRef, 1);
|
||||||
|
@ -1002,8 +1002,12 @@ int64_t mndGenerateUid(const char *name, int32_t len) {
|
||||||
|
|
||||||
int32_t mndGetMonitorInfo(SMnode *pMnode, SMonClusterInfo *pClusterInfo, SMonVgroupInfo *pVgroupInfo,
|
int32_t mndGetMonitorInfo(SMnode *pMnode, SMonClusterInfo *pClusterInfo, SMonVgroupInfo *pVgroupInfo,
|
||||||
SMonStbInfo *pStbInfo, SMonGrantInfo *pGrantInfo) {
|
SMonStbInfo *pStbInfo, SMonGrantInfo *pGrantInfo) {
|
||||||
int32_t code = 0;
|
int32_t code = mndAcquireRpc(pMnode);
|
||||||
TAOS_CHECK_RETURN(mndAcquireRpc(pMnode));
|
if (code < 0) {
|
||||||
|
TAOS_RETURN(code);
|
||||||
|
} else if (code == 1) {
|
||||||
|
TAOS_RETURN(TSDB_CODE_SUCCESS);
|
||||||
|
}
|
||||||
|
|
||||||
SSdb *pSdb = pMnode->pSdb;
|
SSdb *pSdb = pMnode->pSdb;
|
||||||
int64_t ms = taosGetTimestampMs();
|
int64_t ms = taosGetTimestampMs();
|
||||||
|
|
|
@ -324,7 +324,11 @@ static int32_t metaGenerateNewMeta(SMeta **ppMeta) {
|
||||||
SMetaEntry me = {0};
|
SMetaEntry me = {0};
|
||||||
tDecoderInit(&dc, value, valueSize);
|
tDecoderInit(&dc, value, valueSize);
|
||||||
if (metaDecodeEntry(&dc, &me) == 0) {
|
if (metaDecodeEntry(&dc, &me) == 0) {
|
||||||
if (metaHandleEntry(pNewMeta, &me) != 0) {
|
if (me.type == TSDB_CHILD_TABLE &&
|
||||||
|
tdbTbGet(pMeta->pUidIdx, &me.ctbEntry.suid, sizeof(me.ctbEntry.suid), NULL, NULL) != 0) {
|
||||||
|
metaError("vgId:%d failed to get super table uid:%" PRId64 " for child table uid:%" PRId64,
|
||||||
|
TD_VID(pVnode), me.ctbEntry.suid, uid);
|
||||||
|
} else if (metaHandleEntry(pNewMeta, &me) != 0) {
|
||||||
metaError("vgId:%d failed to handle entry, uid:%" PRId64, TD_VID(pVnode), uid);
|
metaError("vgId:%d failed to handle entry, uid:%" PRId64, TD_VID(pVnode), uid);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -972,7 +972,7 @@ static int32_t tsdbDataFileWriteBrinRecord(SDataFileWriter *writer, const SBrinR
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((writer->brinBlock->numOfRecords) >= writer->config->maxRow) {
|
if ((writer->brinBlock->numOfRecords) >= 256) {
|
||||||
TAOS_CHECK_GOTO(tsdbDataFileWriteBrinBlock(writer), &lino, _exit);
|
TAOS_CHECK_GOTO(tsdbDataFileWriteBrinBlock(writer), &lino, _exit);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -836,6 +836,7 @@ static int32_t doLoadBlockIndex(STsdbReader* pReader, SDataFileReader* pFileRead
|
||||||
pList = &pReader->status.uidList;
|
pList = &pReader->status.uidList;
|
||||||
|
|
||||||
int32_t i = 0;
|
int32_t i = 0;
|
||||||
|
int32_t j = 0;
|
||||||
while (i < TARRAY2_SIZE(pBlkArray)) {
|
while (i < TARRAY2_SIZE(pBlkArray)) {
|
||||||
pBrinBlk = &pBlkArray->data[i];
|
pBrinBlk = &pBlkArray->data[i];
|
||||||
if (pBrinBlk->maxTbid.suid < pReader->info.suid) {
|
if (pBrinBlk->maxTbid.suid < pReader->info.suid) {
|
||||||
|
@ -851,7 +852,7 @@ static int32_t doLoadBlockIndex(STsdbReader* pReader, SDataFileReader* pFileRead
|
||||||
(pBrinBlk->minTbid.suid <= pReader->info.suid) && (pBrinBlk->maxTbid.suid >= pReader->info.suid), code, lino,
|
(pBrinBlk->minTbid.suid <= pReader->info.suid) && (pBrinBlk->maxTbid.suid >= pReader->info.suid), code, lino,
|
||||||
_end, TSDB_CODE_INTERNAL_ERROR);
|
_end, TSDB_CODE_INTERNAL_ERROR);
|
||||||
|
|
||||||
if (pBrinBlk->maxTbid.suid == pReader->info.suid && pBrinBlk->maxTbid.uid < pList->tableUidList[0]) {
|
if (pBrinBlk->maxTbid.suid == pReader->info.suid && pBrinBlk->maxTbid.uid < pList->tableUidList[j]) {
|
||||||
i += 1;
|
i += 1;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -864,6 +865,14 @@ static int32_t doLoadBlockIndex(STsdbReader* pReader, SDataFileReader* pFileRead
|
||||||
TSDB_CHECK_NULL(p1, code, lino, _end, terrno);
|
TSDB_CHECK_NULL(p1, code, lino, _end, terrno);
|
||||||
|
|
||||||
i += 1;
|
i += 1;
|
||||||
|
if (pBrinBlk->maxTbid.suid == pReader->info.suid) {
|
||||||
|
while (j < numOfTables && pList->tableUidList[j] < pBrinBlk->maxTbid.uid) {
|
||||||
|
j++;
|
||||||
|
}
|
||||||
|
if (j >= numOfTables) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
et2 = taosGetTimestampUs();
|
et2 = taosGetTimestampUs();
|
||||||
|
@ -1134,7 +1143,12 @@ static int32_t getCurrentBlockInfo(SDataBlockIter* pBlockIter, SFileDataBlockInf
|
||||||
*pInfo = NULL;
|
*pInfo = NULL;
|
||||||
|
|
||||||
size_t num = TARRAY_SIZE(pBlockIter->blockList);
|
size_t num = TARRAY_SIZE(pBlockIter->blockList);
|
||||||
TSDB_CHECK_CONDITION(num != 0, code, lino, _end, TSDB_CODE_INVALID_PARA);
|
if (num == 0) {
|
||||||
|
// Some callers would attempt to call this function. Filter out certain normal cases and return directly to avoid
|
||||||
|
// generating excessive unnecessary error logs.
|
||||||
|
TSDB_CHECK_CONDITION(num == pBlockIter->numOfBlocks, code, lino, _end, TSDB_CODE_INVALID_PARA);
|
||||||
|
return TSDB_CODE_INVALID_PARA;
|
||||||
|
}
|
||||||
|
|
||||||
*pInfo = taosArrayGet(pBlockIter->blockList, pBlockIter->index);
|
*pInfo = taosArrayGet(pBlockIter->blockList, pBlockIter->index);
|
||||||
TSDB_CHECK_NULL(*pInfo, code, lino, _end, TSDB_CODE_INVALID_PARA);
|
TSDB_CHECK_NULL(*pInfo, code, lino, _end, TSDB_CODE_INVALID_PARA);
|
||||||
|
@ -4807,7 +4821,7 @@ static int32_t checkForNeighborFileBlock(STsdbReader* pReader, STableBlockScanIn
|
||||||
pBlockData = &pReader->status.fileBlockData;
|
pBlockData = &pReader->status.fileBlockData;
|
||||||
asc = ASCENDING_TRAVERSE(pReader->info.order);
|
asc = ASCENDING_TRAVERSE(pReader->info.order);
|
||||||
pVerRange = &pReader->info.verRange;
|
pVerRange = &pReader->info.verRange;
|
||||||
ASCENDING_TRAVERSE(pReader->info.order) ? 1 : -1;
|
step = ASCENDING_TRAVERSE(pReader->info.order) ? 1 : -1;
|
||||||
|
|
||||||
*state = CHECK_FILEBLOCK_QUIT;
|
*state = CHECK_FILEBLOCK_QUIT;
|
||||||
code = loadNeighborIfOverlap(pFBlock, pScanInfo, pReader, &loadNeighbor);
|
code = loadNeighborIfOverlap(pFBlock, pScanInfo, pReader, &loadNeighbor);
|
||||||
|
@ -5530,12 +5544,10 @@ int32_t tsdbReaderOpen2(void* pVnode, SQueryTableDataCond* pCond, void* pTableLi
|
||||||
// update the SQueryTableDataCond to create inner reader
|
// update the SQueryTableDataCond to create inner reader
|
||||||
int32_t order = pCond->order;
|
int32_t order = pCond->order;
|
||||||
if (order == TSDB_ORDER_ASC) {
|
if (order == TSDB_ORDER_ASC) {
|
||||||
pCond->twindows.ekey = window.skey - 1;
|
pCond->twindows = pCond->extTwindows[0];
|
||||||
pCond->twindows.skey = INT64_MIN;
|
|
||||||
pCond->order = TSDB_ORDER_DESC;
|
pCond->order = TSDB_ORDER_DESC;
|
||||||
} else {
|
} else {
|
||||||
pCond->twindows.skey = window.ekey + 1;
|
pCond->twindows = pCond->extTwindows[1];
|
||||||
pCond->twindows.ekey = INT64_MAX;
|
|
||||||
pCond->order = TSDB_ORDER_ASC;
|
pCond->order = TSDB_ORDER_ASC;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5544,11 +5556,9 @@ int32_t tsdbReaderOpen2(void* pVnode, SQueryTableDataCond* pCond, void* pTableLi
|
||||||
TSDB_CHECK_CODE(code, lino, _end);
|
TSDB_CHECK_CODE(code, lino, _end);
|
||||||
|
|
||||||
if (order == TSDB_ORDER_ASC) {
|
if (order == TSDB_ORDER_ASC) {
|
||||||
pCond->twindows.skey = window.ekey + 1;
|
pCond->twindows = pCond->extTwindows[1];
|
||||||
pCond->twindows.ekey = INT64_MAX;
|
|
||||||
} else {
|
} else {
|
||||||
pCond->twindows.skey = INT64_MIN;
|
pCond->twindows = pCond->extTwindows[0];
|
||||||
pCond->twindows.ekey = window.ekey - 1;
|
|
||||||
}
|
}
|
||||||
pCond->order = order;
|
pCond->order = order;
|
||||||
|
|
||||||
|
@ -6115,7 +6125,7 @@ int32_t tsdbNextDataBlock2(STsdbReader* pReader, bool* hasNext) {
|
||||||
TSDB_CHECK_CODE(code, lino, _end);
|
TSDB_CHECK_CODE(code, lino, _end);
|
||||||
}
|
}
|
||||||
|
|
||||||
goto _end;
|
return code;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6142,7 +6152,7 @@ int32_t tsdbNextDataBlock2(STsdbReader* pReader, bool* hasNext) {
|
||||||
acquired = false;
|
acquired = false;
|
||||||
TSDB_CHECK_CODE(code, lino, _end);
|
TSDB_CHECK_CODE(code, lino, _end);
|
||||||
}
|
}
|
||||||
goto _end;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pReader->step == EXTERNAL_ROWS_MAIN && pReader->innerReader[1] != NULL) {
|
if (pReader->step == EXTERNAL_ROWS_MAIN && pReader->innerReader[1] != NULL) {
|
||||||
|
@ -6168,7 +6178,7 @@ int32_t tsdbNextDataBlock2(STsdbReader* pReader, bool* hasNext) {
|
||||||
TSDB_CHECK_CODE(code, lino, _end);
|
TSDB_CHECK_CODE(code, lino, _end);
|
||||||
}
|
}
|
||||||
|
|
||||||
goto _end;
|
return code;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -271,6 +271,7 @@ typedef struct SCtgViewsCtx {
|
||||||
SArray* pNames;
|
SArray* pNames;
|
||||||
SArray* pResList;
|
SArray* pResList;
|
||||||
SArray* pFetchs;
|
SArray* pFetchs;
|
||||||
|
bool forceFetch;
|
||||||
} SCtgViewsCtx;
|
} SCtgViewsCtx;
|
||||||
|
|
||||||
typedef enum {
|
typedef enum {
|
||||||
|
@ -831,12 +832,12 @@ typedef struct SCtgCacheItemInfo {
|
||||||
#define ctgDebug(param, ...) qDebug("CTG:%p " param, pCtg, __VA_ARGS__)
|
#define ctgDebug(param, ...) qDebug("CTG:%p " param, pCtg, __VA_ARGS__)
|
||||||
#define ctgTrace(param, ...) qTrace("CTG:%p " param, pCtg, __VA_ARGS__)
|
#define ctgTrace(param, ...) qTrace("CTG:%p " param, pCtg, __VA_ARGS__)
|
||||||
|
|
||||||
#define ctgTaskFatal(param, ...) qFatal("qid:%" PRIx64 " CTG:%p " param, pTask->pJob->queryId, pCtg, __VA_ARGS__)
|
#define ctgTaskFatal(param, ...) qFatal("QID:%" PRIx64 " CTG:%p " param, pTask->pJob->queryId, pCtg, __VA_ARGS__)
|
||||||
#define ctgTaskError(param, ...) qError("qid:%" PRIx64 " CTG:%p " param, pTask->pJob->queryId, pCtg, __VA_ARGS__)
|
#define ctgTaskError(param, ...) qError("QID:%" PRIx64 " CTG:%p " param, pTask->pJob->queryId, pCtg, __VA_ARGS__)
|
||||||
#define ctgTaskWarn(param, ...) qWarn("qid:%" PRIx64 " CTG:%p " param, pTask->pJob->queryId, pCtg, __VA_ARGS__)
|
#define ctgTaskWarn(param, ...) qWarn("QID:%" PRIx64 " CTG:%p " param, pTask->pJob->queryId, pCtg, __VA_ARGS__)
|
||||||
#define ctgTaskInfo(param, ...) qInfo("qid:%" PRIx64 " CTG:%p " param, pTask->pJob->queryId, pCtg, __VA_ARGS__)
|
#define ctgTaskInfo(param, ...) qInfo("QID:%" PRIx64 " CTG:%p " param, pTask->pJob->queryId, pCtg, __VA_ARGS__)
|
||||||
#define ctgTaskDebug(param, ...) qDebug("qid:%" PRIx64 " CTG:%p " param, pTask->pJob->queryId, pCtg, __VA_ARGS__)
|
#define ctgTaskDebug(param, ...) qDebug("QID:%" PRIx64 " CTG:%p " param, pTask->pJob->queryId, pCtg, __VA_ARGS__)
|
||||||
#define ctgTaskTrace(param, ...) qTrace("qid:%" PRIx64 " CTG:%p " param, pTask->pJob->queryId, pCtg, __VA_ARGS__)
|
#define ctgTaskTrace(param, ...) qTrace("QID:%" PRIx64 " CTG:%p " param, pTask->pJob->queryId, pCtg, __VA_ARGS__)
|
||||||
|
|
||||||
#define CTG_LOCK_DEBUG(...) \
|
#define CTG_LOCK_DEBUG(...) \
|
||||||
do { \
|
do { \
|
||||||
|
|
|
@ -20,6 +20,11 @@
|
||||||
#include "tref.h"
|
#include "tref.h"
|
||||||
#include "trpc.h"
|
#include "trpc.h"
|
||||||
|
|
||||||
|
typedef struct SCtgViewTaskParam {
|
||||||
|
bool forceFetch;
|
||||||
|
SArray* pTableReqs;
|
||||||
|
} SCtgViewTaskParam;
|
||||||
|
|
||||||
void ctgIsTaskDone(SCtgJob* pJob, CTG_TASK_TYPE type, bool* done) {
|
void ctgIsTaskDone(SCtgJob* pJob, CTG_TASK_TYPE type, bool* done) {
|
||||||
SCtgTask* pTask = NULL;
|
SCtgTask* pTask = NULL;
|
||||||
|
|
||||||
|
@ -500,7 +505,7 @@ int32_t ctgInitGetTbTagTask(SCtgJob* pJob, int32_t taskIdx, void* param) {
|
||||||
|
|
||||||
int32_t ctgInitGetViewsTask(SCtgJob* pJob, int32_t taskIdx, void* param) {
|
int32_t ctgInitGetViewsTask(SCtgJob* pJob, int32_t taskIdx, void* param) {
|
||||||
SCtgTask task = {0};
|
SCtgTask task = {0};
|
||||||
|
SCtgViewTaskParam* p = param;
|
||||||
task.type = CTG_TASK_GET_VIEW;
|
task.type = CTG_TASK_GET_VIEW;
|
||||||
task.taskId = taskIdx;
|
task.taskId = taskIdx;
|
||||||
task.pJob = pJob;
|
task.pJob = pJob;
|
||||||
|
@ -511,7 +516,8 @@ int32_t ctgInitGetViewsTask(SCtgJob* pJob, int32_t taskIdx, void* param) {
|
||||||
}
|
}
|
||||||
|
|
||||||
SCtgViewsCtx* ctx = task.taskCtx;
|
SCtgViewsCtx* ctx = task.taskCtx;
|
||||||
ctx->pNames = param;
|
ctx->pNames = p->pTableReqs;
|
||||||
|
ctx->forceFetch = p->forceFetch;
|
||||||
ctx->pResList = taosArrayInit(pJob->viewNum, sizeof(SMetaRes));
|
ctx->pResList = taosArrayInit(pJob->viewNum, sizeof(SMetaRes));
|
||||||
if (NULL == ctx->pResList) {
|
if (NULL == ctx->pResList) {
|
||||||
qError("QID:0x%" PRIx64 " taosArrayInit %d SMetaRes %d failed", pJob->queryId, pJob->viewNum,
|
qError("QID:0x%" PRIx64 " taosArrayInit %d SMetaRes %d failed", pJob->queryId, pJob->viewNum,
|
||||||
|
@ -849,13 +855,12 @@ int32_t ctgInitJob(SCatalog* pCtg, SRequestConnInfo* pConn, SCtgJob** job, const
|
||||||
int32_t tbCfgNum = (int32_t)taosArrayGetSize(pReq->pTableCfg);
|
int32_t tbCfgNum = (int32_t)taosArrayGetSize(pReq->pTableCfg);
|
||||||
int32_t tbTagNum = (int32_t)taosArrayGetSize(pReq->pTableTag);
|
int32_t tbTagNum = (int32_t)taosArrayGetSize(pReq->pTableTag);
|
||||||
int32_t viewNum = (int32_t)ctgGetTablesReqNum(pReq->pView);
|
int32_t viewNum = (int32_t)ctgGetTablesReqNum(pReq->pView);
|
||||||
int32_t tbTsmaNum = (int32_t)taosArrayGetSize(pReq->pTableTSMAs);
|
int32_t tbTsmaNum = tsQuerySmaOptimize ? (int32_t)taosArrayGetSize(pReq->pTableTSMAs) : 0;
|
||||||
int32_t tsmaNum = (int32_t)taosArrayGetSize(pReq->pTSMAs);
|
int32_t tsmaNum = (int32_t)taosArrayGetSize(pReq->pTSMAs);
|
||||||
int32_t tbNameNum = (int32_t)ctgGetTablesReqNum(pReq->pTableName);
|
int32_t tbNameNum = (int32_t)ctgGetTablesReqNum(pReq->pTableName);
|
||||||
|
|
||||||
int32_t taskNum = tbMetaNum + dbVgNum + udfNum + tbHashNum + qnodeNum + dnodeNum + svrVerNum + dbCfgNum + indexNum +
|
int32_t taskNum = tbMetaNum + dbVgNum + udfNum + tbHashNum + qnodeNum + dnodeNum + svrVerNum + dbCfgNum + indexNum +
|
||||||
userNum + dbInfoNum + tbIndexNum + tbCfgNum + tbTagNum + viewNum + tbTsmaNum + tbNameNum;
|
userNum + dbInfoNum + tbIndexNum + tbCfgNum + tbTagNum + viewNum + tbTsmaNum + tbNameNum;
|
||||||
|
|
||||||
*job = taosMemoryCalloc(1, sizeof(SCtgJob));
|
*job = taosMemoryCalloc(1, sizeof(SCtgJob));
|
||||||
if (NULL == *job) {
|
if (NULL == *job) {
|
||||||
ctgError("failed to calloc, size:%d,QID:0x%" PRIx64, (int32_t)sizeof(SCtgJob), pConn->requestId);
|
ctgError("failed to calloc, size:%d,QID:0x%" PRIx64, (int32_t)sizeof(SCtgJob), pConn->requestId);
|
||||||
|
@ -1014,7 +1019,8 @@ int32_t ctgInitJob(SCatalog* pCtg, SRequestConnInfo* pConn, SCtgJob** job, const
|
||||||
}
|
}
|
||||||
|
|
||||||
if (viewNum > 0) {
|
if (viewNum > 0) {
|
||||||
CTG_ERR_JRET(ctgInitTask(pJob, CTG_TASK_GET_VIEW, pReq->pView, NULL));
|
SCtgViewTaskParam param = {.forceFetch = pReq->forceFetchViewMeta, .pTableReqs = pReq->pView};
|
||||||
|
CTG_ERR_JRET(ctgInitTask(pJob, CTG_TASK_GET_VIEW, ¶m, NULL));
|
||||||
}
|
}
|
||||||
if (tbTsmaNum > 0) {
|
if (tbTsmaNum > 0) {
|
||||||
CTG_ERR_JRET(ctgInitTask(pJob, CTG_TASK_GET_TB_TSMA, pReq->pTableTSMAs, NULL));
|
CTG_ERR_JRET(ctgInitTask(pJob, CTG_TASK_GET_TB_TSMA, pReq->pTableTSMAs, NULL));
|
||||||
|
@ -3712,16 +3718,14 @@ int32_t ctgLaunchGetViewsTask(SCtgTask* pTask) {
|
||||||
bool tbMetaDone = false;
|
bool tbMetaDone = false;
|
||||||
SName* pName = NULL;
|
SName* pName = NULL;
|
||||||
|
|
||||||
/*
|
ctgIsTaskDone(pJob, CTG_TASK_GET_TB_META_BATCH, &tbMetaDone);
|
||||||
ctgIsTaskDone(pJob, CTG_TASK_GET_TB_META_BATCH, &tbMetaDone);
|
if (tbMetaDone && !pCtx->forceFetch) {
|
||||||
if (tbMetaDone) {
|
CTG_ERR_RET(ctgBuildViewNullRes(pTask, pCtx));
|
||||||
CTG_ERR_RET(ctgBuildViewNullRes(pTask, pCtx));
|
TSWAP(pTask->res, pCtx->pResList);
|
||||||
TSWAP(pTask->res, pCtx->pResList);
|
|
||||||
|
|
||||||
CTG_ERR_RET(ctgHandleTaskEnd(pTask, 0));
|
CTG_ERR_RET(ctgHandleTaskEnd(pTask, 0));
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
*/
|
|
||||||
|
|
||||||
int32_t dbNum = taosArrayGetSize(pCtx->pNames);
|
int32_t dbNum = taosArrayGetSize(pCtx->pNames);
|
||||||
int32_t fetchIdx = 0;
|
int32_t fetchIdx = 0;
|
||||||
|
|
|
@ -9,7 +9,7 @@ IF(NOT TD_DARWIN)
|
||||||
ADD_EXECUTABLE(catalogTest ${SOURCE_LIST})
|
ADD_EXECUTABLE(catalogTest ${SOURCE_LIST})
|
||||||
TARGET_LINK_LIBRARIES(
|
TARGET_LINK_LIBRARIES(
|
||||||
catalogTest
|
catalogTest
|
||||||
PUBLIC os util common nodes catalog transport gtest qcom taos_static
|
PUBLIC os util common nodes catalog transport gtest qcom ${TAOS_LIB_STATIC}
|
||||||
)
|
)
|
||||||
|
|
||||||
TARGET_INCLUDE_DIRECTORIES(
|
TARGET_INCLUDE_DIRECTORIES(
|
||||||
|
|
|
@ -44,9 +44,9 @@ typedef struct {
|
||||||
SExprSupp scalarSup;
|
SExprSupp scalarSup;
|
||||||
int32_t tsSlotId;
|
int32_t tsSlotId;
|
||||||
STimeWindowAggSupp twAggSup;
|
STimeWindowAggSupp twAggSup;
|
||||||
char algoName[TSDB_ANAL_ALGO_NAME_LEN];
|
char algoName[TSDB_ANALYTIC_ALGO_NAME_LEN];
|
||||||
char algoUrl[TSDB_ANAL_ALGO_URL_LEN];
|
char algoUrl[TSDB_ANALYTIC_ALGO_URL_LEN];
|
||||||
char anomalyOpt[TSDB_ANAL_ALGO_OPTION_LEN];
|
char anomalyOpt[TSDB_ANALYTIC_ALGO_OPTION_LEN];
|
||||||
SAnomalyWindowSupp anomalySup;
|
SAnomalyWindowSupp anomalySup;
|
||||||
SWindowRowsSup anomalyWinRowSup;
|
SWindowRowsSup anomalyWinRowSup;
|
||||||
SColumn anomalyCol;
|
SColumn anomalyCol;
|
||||||
|
@ -75,13 +75,13 @@ int32_t createAnomalywindowOperatorInfo(SOperatorInfo* downstream, SPhysiNode* p
|
||||||
|
|
||||||
if (!taosAnalGetOptStr(pAnomalyNode->anomalyOpt, "algo", pInfo->algoName, sizeof(pInfo->algoName))) {
|
if (!taosAnalGetOptStr(pAnomalyNode->anomalyOpt, "algo", pInfo->algoName, sizeof(pInfo->algoName))) {
|
||||||
qError("failed to get anomaly_window algorithm name from %s", pAnomalyNode->anomalyOpt);
|
qError("failed to get anomaly_window algorithm name from %s", pAnomalyNode->anomalyOpt);
|
||||||
code = TSDB_CODE_ANAL_ALGO_NOT_FOUND;
|
code = TSDB_CODE_ANA_ALGO_NOT_FOUND;
|
||||||
goto _error;
|
goto _error;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (taosAnalGetAlgoUrl(pInfo->algoName, ANAL_ALGO_TYPE_ANOMALY_DETECT, pInfo->algoUrl, sizeof(pInfo->algoUrl)) != 0) {
|
if (taosAnalGetAlgoUrl(pInfo->algoName, ANAL_ALGO_TYPE_ANOMALY_DETECT, pInfo->algoUrl, sizeof(pInfo->algoUrl)) != 0) {
|
||||||
qError("failed to get anomaly_window algorithm url from %s", pInfo->algoName);
|
qError("failed to get anomaly_window algorithm url from %s", pInfo->algoName);
|
||||||
code = TSDB_CODE_ANAL_ALGO_NOT_LOAD;
|
code = TSDB_CODE_ANA_ALGO_NOT_LOAD;
|
||||||
goto _error;
|
goto _error;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -262,7 +262,7 @@ static void anomalyDestroyOperatorInfo(void* param) {
|
||||||
|
|
||||||
static int32_t anomalyCacheBlock(SAnomalyWindowOperatorInfo* pInfo, SSDataBlock* pSrc) {
|
static int32_t anomalyCacheBlock(SAnomalyWindowOperatorInfo* pInfo, SSDataBlock* pSrc) {
|
||||||
if (pInfo->anomalySup.cachedRows > ANAL_ANOMALY_WINDOW_MAX_ROWS) {
|
if (pInfo->anomalySup.cachedRows > ANAL_ANOMALY_WINDOW_MAX_ROWS) {
|
||||||
return TSDB_CODE_ANAL_ANODE_TOO_MANY_ROWS;
|
return TSDB_CODE_ANA_ANODE_TOO_MANY_ROWS;
|
||||||
}
|
}
|
||||||
|
|
||||||
SSDataBlock* pDst = NULL;
|
SSDataBlock* pDst = NULL;
|
||||||
|
@ -287,7 +287,7 @@ static int32_t anomalyFindWindow(SAnomalyWindowSupp* pSupp, TSKEY key) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t anomalyParseJson(SJson* pJson, SArray* pWindows) {
|
static int32_t anomalyParseJson(SJson* pJson, SArray* pWindows, const char* pId) {
|
||||||
int32_t code = 0;
|
int32_t code = 0;
|
||||||
int32_t rows = 0;
|
int32_t rows = 0;
|
||||||
STimeWindow win = {0};
|
STimeWindow win = {0};
|
||||||
|
@ -295,8 +295,23 @@ static int32_t anomalyParseJson(SJson* pJson, SArray* pWindows) {
|
||||||
taosArrayClear(pWindows);
|
taosArrayClear(pWindows);
|
||||||
|
|
||||||
tjsonGetInt32ValueFromDouble(pJson, "rows", rows, code);
|
tjsonGetInt32ValueFromDouble(pJson, "rows", rows, code);
|
||||||
if (code < 0) return TSDB_CODE_INVALID_JSON_FORMAT;
|
if (code < 0) {
|
||||||
if (rows <= 0) return 0;
|
return TSDB_CODE_INVALID_JSON_FORMAT;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (rows < 0) {
|
||||||
|
char pMsg[1024] = {0};
|
||||||
|
code = tjsonGetStringValue(pJson, "msg", pMsg);
|
||||||
|
if (code) {
|
||||||
|
qError("%s failed to get error msg from rsp, unknown error", pId);
|
||||||
|
} else {
|
||||||
|
qError("%s failed to exec forecast, msg:%s", pId, pMsg);
|
||||||
|
}
|
||||||
|
|
||||||
|
return TSDB_CODE_ANA_WN_DATA;
|
||||||
|
} else if (rows == 0) {
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
SJson* res = tjsonGetObjectItem(pJson, "res");
|
SJson* res = tjsonGetObjectItem(pJson, "res");
|
||||||
if (res == NULL) return TSDB_CODE_INVALID_JSON_FORMAT;
|
if (res == NULL) return TSDB_CODE_INVALID_JSON_FORMAT;
|
||||||
|
@ -313,7 +328,10 @@ static int32_t anomalyParseJson(SJson* pJson, SArray* pWindows) {
|
||||||
|
|
||||||
SJson* start = tjsonGetArrayItem(row, 0);
|
SJson* start = tjsonGetArrayItem(row, 0);
|
||||||
SJson* end = tjsonGetArrayItem(row, 1);
|
SJson* end = tjsonGetArrayItem(row, 1);
|
||||||
if (start == NULL || end == NULL) return TSDB_CODE_INVALID_JSON_FORMAT;
|
if (start == NULL || end == NULL) {
|
||||||
|
qError("%s invalid res from analytic sys, code:%s", pId, tstrerror(TSDB_CODE_INVALID_JSON_FORMAT));
|
||||||
|
return TSDB_CODE_INVALID_JSON_FORMAT;
|
||||||
|
}
|
||||||
|
|
||||||
tjsonGetObjectValueBigInt(start, &win.skey);
|
tjsonGetObjectValueBigInt(start, &win.skey);
|
||||||
tjsonGetObjectValueBigInt(end, &win.ekey);
|
tjsonGetObjectValueBigInt(end, &win.ekey);
|
||||||
|
@ -322,52 +340,57 @@ static int32_t anomalyParseJson(SJson* pJson, SArray* pWindows) {
|
||||||
win.ekey = win.skey + 1;
|
win.ekey = win.skey + 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (taosArrayPush(pWindows, &win) == NULL) return TSDB_CODE_OUT_OF_BUFFER;
|
if (taosArrayPush(pWindows, &win) == NULL) {
|
||||||
|
qError("%s out of memory in generating anomaly_window", pId);
|
||||||
|
return TSDB_CODE_OUT_OF_BUFFER;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t numOfWins = taosArrayGetSize(pWindows);
|
int32_t numOfWins = taosArrayGetSize(pWindows);
|
||||||
qDebug("anomaly window recevied, total:%d", numOfWins);
|
qDebug("%s anomaly window recevied, total:%d", pId, numOfWins);
|
||||||
for (int32_t i = 0; i < numOfWins; ++i) {
|
for (int32_t i = 0; i < numOfWins; ++i) {
|
||||||
STimeWindow* pWindow = taosArrayGet(pWindows, i);
|
STimeWindow* pWindow = taosArrayGet(pWindows, i);
|
||||||
qDebug("anomaly win:%d [%" PRId64 ", %" PRId64 ")", i, pWindow->skey, pWindow->ekey);
|
qDebug("%s anomaly win:%d [%" PRId64 ", %" PRId64 ")", pId, i, pWindow->skey, pWindow->ekey);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t anomalyAnalysisWindow(SOperatorInfo* pOperator) {
|
static int32_t anomalyAnalysisWindow(SOperatorInfo* pOperator) {
|
||||||
SAnomalyWindowOperatorInfo* pInfo = pOperator->info;
|
SAnomalyWindowOperatorInfo* pInfo = pOperator->info;
|
||||||
SAnomalyWindowSupp* pSupp = &pInfo->anomalySup;
|
SAnomalyWindowSupp* pSupp = &pInfo->anomalySup;
|
||||||
SJson* pJson = NULL;
|
SJson* pJson = NULL;
|
||||||
SAnalBuf analBuf = {.bufType = ANAL_BUF_TYPE_JSON};
|
SAnalyticBuf analBuf = {.bufType = ANALYTICS_BUF_TYPE_JSON};
|
||||||
char dataBuf[64] = {0};
|
char dataBuf[64] = {0};
|
||||||
int32_t code = 0;
|
int32_t code = 0;
|
||||||
int64_t ts = 0;
|
int64_t ts = 0;
|
||||||
|
int32_t lino = 0;
|
||||||
|
const char* pId = GET_TASKID(pOperator->pTaskInfo);
|
||||||
|
|
||||||
// int64_t ts = taosGetTimestampMs();
|
|
||||||
snprintf(analBuf.fileName, sizeof(analBuf.fileName), "%s/tdengine-anomaly-%" PRId64 "-%" PRId64, tsTempDir, ts,
|
snprintf(analBuf.fileName, sizeof(analBuf.fileName), "%s/tdengine-anomaly-%" PRId64 "-%" PRId64, tsTempDir, ts,
|
||||||
pSupp->groupId);
|
pSupp->groupId);
|
||||||
code = tsosAnalBufOpen(&analBuf, 2);
|
code = tsosAnalBufOpen(&analBuf, 2);
|
||||||
if (code != 0) goto _OVER;
|
QUERY_CHECK_CODE(code, lino, _OVER);
|
||||||
|
|
||||||
const char* prec = TSDB_TIME_PRECISION_MILLI_STR;
|
const char* prec = TSDB_TIME_PRECISION_MILLI_STR;
|
||||||
if (pInfo->anomalyCol.precision == TSDB_TIME_PRECISION_MICRO) prec = TSDB_TIME_PRECISION_MICRO_STR;
|
if (pInfo->anomalyCol.precision == TSDB_TIME_PRECISION_MICRO) prec = TSDB_TIME_PRECISION_MICRO_STR;
|
||||||
if (pInfo->anomalyCol.precision == TSDB_TIME_PRECISION_NANO) prec = TSDB_TIME_PRECISION_NANO_STR;
|
if (pInfo->anomalyCol.precision == TSDB_TIME_PRECISION_NANO) prec = TSDB_TIME_PRECISION_NANO_STR;
|
||||||
|
|
||||||
code = taosAnalBufWriteColMeta(&analBuf, 0, TSDB_DATA_TYPE_TIMESTAMP, "ts");
|
code = taosAnalBufWriteColMeta(&analBuf, 0, TSDB_DATA_TYPE_TIMESTAMP, "ts");
|
||||||
if (code != 0) goto _OVER;
|
QUERY_CHECK_CODE(code, lino, _OVER);
|
||||||
|
|
||||||
code = taosAnalBufWriteColMeta(&analBuf, 1, pInfo->anomalyCol.type, "val");
|
code = taosAnalBufWriteColMeta(&analBuf, 1, pInfo->anomalyCol.type, "val");
|
||||||
if (code != 0) goto _OVER;
|
QUERY_CHECK_CODE(code, lino, _OVER);
|
||||||
|
|
||||||
code = taosAnalBufWriteDataBegin(&analBuf);
|
code = taosAnalBufWriteDataBegin(&analBuf);
|
||||||
if (code != 0) goto _OVER;
|
QUERY_CHECK_CODE(code, lino, _OVER);
|
||||||
|
|
||||||
int32_t numOfBlocks = (int32_t)taosArrayGetSize(pSupp->blocks);
|
int32_t numOfBlocks = (int32_t)taosArrayGetSize(pSupp->blocks);
|
||||||
|
|
||||||
// timestamp
|
// timestamp
|
||||||
code = taosAnalBufWriteColBegin(&analBuf, 0);
|
code = taosAnalBufWriteColBegin(&analBuf, 0);
|
||||||
if (code != 0) goto _OVER;
|
QUERY_CHECK_CODE(code, lino, _OVER);
|
||||||
|
|
||||||
for (int32_t i = 0; i < numOfBlocks; ++i) {
|
for (int32_t i = 0; i < numOfBlocks; ++i) {
|
||||||
SSDataBlock* pBlock = taosArrayGetP(pSupp->blocks, i);
|
SSDataBlock* pBlock = taosArrayGetP(pSupp->blocks, i);
|
||||||
if (pBlock == NULL) break;
|
if (pBlock == NULL) break;
|
||||||
|
@ -375,15 +398,17 @@ static int32_t anomalyAnalysisWindow(SOperatorInfo* pOperator) {
|
||||||
if (pTsCol == NULL) break;
|
if (pTsCol == NULL) break;
|
||||||
for (int32_t j = 0; j < pBlock->info.rows; ++j) {
|
for (int32_t j = 0; j < pBlock->info.rows; ++j) {
|
||||||
code = taosAnalBufWriteColData(&analBuf, 0, TSDB_DATA_TYPE_TIMESTAMP, &((TSKEY*)pTsCol->pData)[j]);
|
code = taosAnalBufWriteColData(&analBuf, 0, TSDB_DATA_TYPE_TIMESTAMP, &((TSKEY*)pTsCol->pData)[j]);
|
||||||
if (code != 0) goto _OVER;
|
QUERY_CHECK_CODE(code, lino, _OVER);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
code = taosAnalBufWriteColEnd(&analBuf, 0);
|
code = taosAnalBufWriteColEnd(&analBuf, 0);
|
||||||
if (code != 0) goto _OVER;
|
QUERY_CHECK_CODE(code, lino, _OVER);
|
||||||
|
|
||||||
// data
|
// data
|
||||||
code = taosAnalBufWriteColBegin(&analBuf, 1);
|
code = taosAnalBufWriteColBegin(&analBuf, 1);
|
||||||
if (code != 0) goto _OVER;
|
QUERY_CHECK_CODE(code, lino, _OVER);
|
||||||
|
|
||||||
for (int32_t i = 0; i < numOfBlocks; ++i) {
|
for (int32_t i = 0; i < numOfBlocks; ++i) {
|
||||||
SSDataBlock* pBlock = taosArrayGetP(pSupp->blocks, i);
|
SSDataBlock* pBlock = taosArrayGetP(pSupp->blocks, i);
|
||||||
if (pBlock == NULL) break;
|
if (pBlock == NULL) break;
|
||||||
|
@ -392,48 +417,47 @@ static int32_t anomalyAnalysisWindow(SOperatorInfo* pOperator) {
|
||||||
|
|
||||||
for (int32_t j = 0; j < pBlock->info.rows; ++j) {
|
for (int32_t j = 0; j < pBlock->info.rows; ++j) {
|
||||||
code = taosAnalBufWriteColData(&analBuf, 1, pValCol->info.type, colDataGetData(pValCol, j));
|
code = taosAnalBufWriteColData(&analBuf, 1, pValCol->info.type, colDataGetData(pValCol, j));
|
||||||
if (code != 0) goto _OVER;
|
QUERY_CHECK_CODE(code, lino, _OVER);
|
||||||
if (code != 0) goto _OVER;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
code = taosAnalBufWriteColEnd(&analBuf, 1);
|
code = taosAnalBufWriteColEnd(&analBuf, 1);
|
||||||
if (code != 0) goto _OVER;
|
QUERY_CHECK_CODE(code, lino, _OVER);
|
||||||
|
|
||||||
code = taosAnalBufWriteDataEnd(&analBuf);
|
code = taosAnalBufWriteDataEnd(&analBuf);
|
||||||
if (code != 0) goto _OVER;
|
QUERY_CHECK_CODE(code, lino, _OVER);
|
||||||
|
|
||||||
code = taosAnalBufWriteOptStr(&analBuf, "option", pInfo->anomalyOpt);
|
code = taosAnalBufWriteOptStr(&analBuf, "option", pInfo->anomalyOpt);
|
||||||
if (code != 0) goto _OVER;
|
QUERY_CHECK_CODE(code, lino, _OVER);
|
||||||
|
|
||||||
code = taosAnalBufWriteOptStr(&analBuf, "algo", pInfo->algoName);
|
code = taosAnalBufWriteOptStr(&analBuf, "algo", pInfo->algoName);
|
||||||
if (code != 0) goto _OVER;
|
QUERY_CHECK_CODE(code, lino, _OVER);
|
||||||
|
|
||||||
code = taosAnalBufWriteOptStr(&analBuf, "prec", prec);
|
code = taosAnalBufWriteOptStr(&analBuf, "prec", prec);
|
||||||
if (code != 0) goto _OVER;
|
QUERY_CHECK_CODE(code, lino, _OVER);
|
||||||
|
|
||||||
int64_t wncheck = ANAL_FORECAST_DEFAULT_WNCHECK;
|
int64_t wncheck = ANAL_FORECAST_DEFAULT_WNCHECK;
|
||||||
bool hasWncheck = taosAnalGetOptInt(pInfo->anomalyOpt, "wncheck", &wncheck);
|
bool hasWncheck = taosAnalGetOptInt(pInfo->anomalyOpt, "wncheck", &wncheck);
|
||||||
if (!hasWncheck) {
|
if (!hasWncheck) {
|
||||||
qDebug("anomaly_window wncheck not found from %s, use default:%" PRId64, pInfo->anomalyOpt, wncheck);
|
qDebug("anomaly_window wncheck not found from %s, use default:%" PRId64, pInfo->anomalyOpt, wncheck);
|
||||||
}
|
}
|
||||||
|
|
||||||
code = taosAnalBufWriteOptInt(&analBuf, "wncheck", wncheck);
|
code = taosAnalBufWriteOptInt(&analBuf, "wncheck", wncheck);
|
||||||
if (code != 0) goto _OVER;
|
QUERY_CHECK_CODE(code, lino, _OVER);
|
||||||
|
|
||||||
code = taosAnalBufClose(&analBuf);
|
code = taosAnalBufClose(&analBuf);
|
||||||
if (code != 0) goto _OVER;
|
QUERY_CHECK_CODE(code, lino, _OVER);
|
||||||
|
|
||||||
pJson = taosAnalSendReqRetJson(pInfo->algoUrl, ANAL_HTTP_TYPE_POST, &analBuf);
|
pJson = taosAnalSendReqRetJson(pInfo->algoUrl, ANALYTICS_HTTP_TYPE_POST, &analBuf);
|
||||||
if (pJson == NULL) {
|
if (pJson == NULL) {
|
||||||
code = terrno;
|
code = terrno;
|
||||||
goto _OVER;
|
goto _OVER;
|
||||||
}
|
}
|
||||||
|
|
||||||
code = anomalyParseJson(pJson, pSupp->windows);
|
code = anomalyParseJson(pJson, pSupp->windows, pId);
|
||||||
if (code != 0) goto _OVER;
|
|
||||||
|
|
||||||
_OVER:
|
_OVER:
|
||||||
if (code != 0) {
|
if (code != 0) {
|
||||||
qError("failed to analysis window since %s", tstrerror(code));
|
qError("%s failed to analysis window since %s, lino:%d", pId, tstrerror(code), lino);
|
||||||
}
|
}
|
||||||
|
|
||||||
taosAnalBufDestroy(&analBuf);
|
taosAnalBufDestroy(&analBuf);
|
||||||
|
|
|
@ -29,9 +29,9 @@
|
||||||
#ifdef USE_ANALYTICS
|
#ifdef USE_ANALYTICS
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
char algoName[TSDB_ANAL_ALGO_NAME_LEN];
|
char algoName[TSDB_ANALYTIC_ALGO_NAME_LEN];
|
||||||
char algoUrl[TSDB_ANAL_ALGO_URL_LEN];
|
char algoUrl[TSDB_ANALYTIC_ALGO_URL_LEN];
|
||||||
char algoOpt[TSDB_ANAL_ALGO_OPTION_LEN];
|
char algoOpt[TSDB_ANALYTIC_ALGO_OPTION_LEN];
|
||||||
int64_t maxTs;
|
int64_t maxTs;
|
||||||
int64_t minTs;
|
int64_t minTs;
|
||||||
int64_t numOfRows;
|
int64_t numOfRows;
|
||||||
|
@ -47,7 +47,7 @@ typedef struct {
|
||||||
int16_t inputValSlot;
|
int16_t inputValSlot;
|
||||||
int8_t inputValType;
|
int8_t inputValType;
|
||||||
int8_t inputPrecision;
|
int8_t inputPrecision;
|
||||||
SAnalBuf analBuf;
|
SAnalyticBuf analBuf;
|
||||||
} SForecastSupp;
|
} SForecastSupp;
|
||||||
|
|
||||||
typedef struct SForecastOperatorInfo {
|
typedef struct SForecastOperatorInfo {
|
||||||
|
@ -74,12 +74,12 @@ static FORCE_INLINE int32_t forecastEnsureBlockCapacity(SSDataBlock* pBlock, int
|
||||||
|
|
||||||
static int32_t forecastCacheBlock(SForecastSupp* pSupp, SSDataBlock* pBlock) {
|
static int32_t forecastCacheBlock(SForecastSupp* pSupp, SSDataBlock* pBlock) {
|
||||||
if (pSupp->cachedRows > ANAL_FORECAST_MAX_ROWS) {
|
if (pSupp->cachedRows > ANAL_FORECAST_MAX_ROWS) {
|
||||||
return TSDB_CODE_ANAL_ANODE_TOO_MANY_ROWS;
|
return TSDB_CODE_ANA_ANODE_TOO_MANY_ROWS;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t code = TSDB_CODE_SUCCESS;
|
int32_t code = TSDB_CODE_SUCCESS;
|
||||||
int32_t lino = 0;
|
int32_t lino = 0;
|
||||||
SAnalBuf* pBuf = &pSupp->analBuf;
|
SAnalyticBuf* pBuf = &pSupp->analBuf;
|
||||||
|
|
||||||
qDebug("block:%d, %p rows:%" PRId64, pSupp->numOfBlocks, pBlock, pBlock->info.rows);
|
qDebug("block:%d, %p rows:%" PRId64, pSupp->numOfBlocks, pBlock, pBlock->info.rows);
|
||||||
pSupp->numOfBlocks++;
|
pSupp->numOfBlocks++;
|
||||||
|
@ -108,7 +108,7 @@ static int32_t forecastCacheBlock(SForecastSupp* pSupp, SSDataBlock* pBlock) {
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t forecastCloseBuf(SForecastSupp* pSupp) {
|
static int32_t forecastCloseBuf(SForecastSupp* pSupp) {
|
||||||
SAnalBuf* pBuf = &pSupp->analBuf;
|
SAnalyticBuf* pBuf = &pSupp->analBuf;
|
||||||
int32_t code = 0;
|
int32_t code = 0;
|
||||||
|
|
||||||
for (int32_t i = 0; i < 2; ++i) {
|
for (int32_t i = 0; i < 2; ++i) {
|
||||||
|
@ -180,8 +180,8 @@ static int32_t forecastCloseBuf(SForecastSupp* pSupp) {
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t forecastAnalysis(SForecastSupp* pSupp, SSDataBlock* pBlock) {
|
static int32_t forecastAnalysis(SForecastSupp* pSupp, SSDataBlock* pBlock, const char* pId) {
|
||||||
SAnalBuf* pBuf = &pSupp->analBuf;
|
SAnalyticBuf* pBuf = &pSupp->analBuf;
|
||||||
int32_t resCurRow = pBlock->info.rows;
|
int32_t resCurRow = pBlock->info.rows;
|
||||||
int8_t tmpI8;
|
int8_t tmpI8;
|
||||||
int16_t tmpI16;
|
int16_t tmpI16;
|
||||||
|
@ -192,28 +192,45 @@ static int32_t forecastAnalysis(SForecastSupp* pSupp, SSDataBlock* pBlock) {
|
||||||
int32_t code = 0;
|
int32_t code = 0;
|
||||||
|
|
||||||
SColumnInfoData* pResValCol = taosArrayGet(pBlock->pDataBlock, pSupp->resValSlot);
|
SColumnInfoData* pResValCol = taosArrayGet(pBlock->pDataBlock, pSupp->resValSlot);
|
||||||
if (NULL == pResValCol) return TSDB_CODE_OUT_OF_RANGE;
|
if (NULL == pResValCol) {
|
||||||
|
return terrno;
|
||||||
|
}
|
||||||
|
|
||||||
SColumnInfoData* pResTsCol = (pSupp->resTsSlot != -1 ? taosArrayGet(pBlock->pDataBlock, pSupp->resTsSlot) : NULL);
|
SColumnInfoData* pResTsCol = (pSupp->resTsSlot != -1 ? taosArrayGet(pBlock->pDataBlock, pSupp->resTsSlot) : NULL);
|
||||||
SColumnInfoData* pResLowCol = (pSupp->resLowSlot != -1 ? taosArrayGet(pBlock->pDataBlock, pSupp->resLowSlot) : NULL);
|
SColumnInfoData* pResLowCol = (pSupp->resLowSlot != -1 ? taosArrayGet(pBlock->pDataBlock, pSupp->resLowSlot) : NULL);
|
||||||
SColumnInfoData* pResHighCol =
|
SColumnInfoData* pResHighCol =
|
||||||
(pSupp->resHighSlot != -1 ? taosArrayGet(pBlock->pDataBlock, pSupp->resHighSlot) : NULL);
|
(pSupp->resHighSlot != -1 ? taosArrayGet(pBlock->pDataBlock, pSupp->resHighSlot) : NULL);
|
||||||
|
|
||||||
SJson* pJson = taosAnalSendReqRetJson(pSupp->algoUrl, ANAL_HTTP_TYPE_POST, pBuf);
|
SJson* pJson = taosAnalSendReqRetJson(pSupp->algoUrl, ANALYTICS_HTTP_TYPE_POST, pBuf);
|
||||||
if (pJson == NULL) return terrno;
|
if (pJson == NULL) {
|
||||||
|
return terrno;
|
||||||
|
}
|
||||||
|
|
||||||
int32_t rows = 0;
|
int32_t rows = 0;
|
||||||
tjsonGetInt32ValueFromDouble(pJson, "rows", rows, code);
|
tjsonGetInt32ValueFromDouble(pJson, "rows", rows, code);
|
||||||
if (code < 0) goto _OVER;
|
if (rows < 0 && code == 0) {
|
||||||
if (rows <= 0) goto _OVER;
|
char pMsg[1024] = {0};
|
||||||
|
code = tjsonGetStringValue(pJson, "msg", pMsg);
|
||||||
|
if (code != 0) {
|
||||||
|
qError("%s failed to get msg from rsp, unknown error", pId);
|
||||||
|
} else {
|
||||||
|
qError("%s failed to exec forecast, msg:%s", pId, pMsg);
|
||||||
|
}
|
||||||
|
|
||||||
|
tjsonDelete(pJson);
|
||||||
|
return TSDB_CODE_ANA_WN_DATA;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (code < 0) {
|
||||||
|
goto _OVER;
|
||||||
|
}
|
||||||
|
|
||||||
SJson* res = tjsonGetObjectItem(pJson, "res");
|
SJson* res = tjsonGetObjectItem(pJson, "res");
|
||||||
if (res == NULL) goto _OVER;
|
if (res == NULL) goto _OVER;
|
||||||
int32_t ressize = tjsonGetArraySize(res);
|
int32_t ressize = tjsonGetArraySize(res);
|
||||||
bool returnConf = (pSupp->resHighSlot != -1 || pSupp->resLowSlot != -1);
|
bool returnConf = (pSupp->resHighSlot != -1 || pSupp->resLowSlot != -1);
|
||||||
if (returnConf) {
|
|
||||||
if (ressize != 4) goto _OVER;
|
if ((returnConf && (ressize != 4)) || ((!returnConf) && (ressize != 2))) {
|
||||||
} else if (ressize != 2) {
|
|
||||||
goto _OVER;
|
goto _OVER;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -313,41 +330,25 @@ static int32_t forecastAnalysis(SForecastSupp* pSupp, SSDataBlock* pBlock) {
|
||||||
resCurRow++;
|
resCurRow++;
|
||||||
}
|
}
|
||||||
|
|
||||||
// for (int32_t i = rows; i < pSupp->optRows; ++i) {
|
|
||||||
// colDataSetNNULL(pResValCol, rows, (pSupp->optRows - rows));
|
|
||||||
// if (pResTsCol != NULL) {
|
|
||||||
// colDataSetNNULL(pResTsCol, rows, (pSupp->optRows - rows));
|
|
||||||
// }
|
|
||||||
// if (pResLowCol != NULL) {
|
|
||||||
// colDataSetNNULL(pResLowCol, rows, (pSupp->optRows - rows));
|
|
||||||
// }
|
|
||||||
// if (pResHighCol != NULL) {
|
|
||||||
// colDataSetNNULL(pResHighCol, rows, (pSupp->optRows - rows));
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
|
|
||||||
// if (rows == pSupp->optRows) {
|
|
||||||
// pResValCol->hasNull = false;
|
|
||||||
// }
|
|
||||||
|
|
||||||
pBlock->info.rows += rows;
|
pBlock->info.rows += rows;
|
||||||
|
|
||||||
if (pJson != NULL) tjsonDelete(pJson);
|
if (pJson != NULL) tjsonDelete(pJson);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
_OVER:
|
_OVER:
|
||||||
if (pJson != NULL) tjsonDelete(pJson);
|
tjsonDelete(pJson);
|
||||||
if (code == 0) {
|
if (code == 0) {
|
||||||
code = TSDB_CODE_INVALID_JSON_FORMAT;
|
code = TSDB_CODE_INVALID_JSON_FORMAT;
|
||||||
}
|
}
|
||||||
qError("failed to perform forecast finalize since %s", tstrerror(code));
|
|
||||||
return TSDB_CODE_INVALID_JSON_FORMAT;
|
qError("%s failed to perform forecast finalize since %s", pId, tstrerror(code));
|
||||||
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t forecastAggregateBlocks(SForecastSupp* pSupp, SSDataBlock* pResBlock) {
|
static int32_t forecastAggregateBlocks(SForecastSupp* pSupp, SSDataBlock* pResBlock, const char* pId) {
|
||||||
int32_t code = TSDB_CODE_SUCCESS;
|
int32_t code = TSDB_CODE_SUCCESS;
|
||||||
int32_t lino = 0;
|
int32_t lino = 0;
|
||||||
SAnalBuf* pBuf = &pSupp->analBuf;
|
SAnalyticBuf* pBuf = &pSupp->analBuf;
|
||||||
|
|
||||||
code = forecastCloseBuf(pSupp);
|
code = forecastCloseBuf(pSupp);
|
||||||
QUERY_CHECK_CODE(code, lino, _end);
|
QUERY_CHECK_CODE(code, lino, _end);
|
||||||
|
@ -355,10 +356,10 @@ static int32_t forecastAggregateBlocks(SForecastSupp* pSupp, SSDataBlock* pResBl
|
||||||
code = forecastEnsureBlockCapacity(pResBlock, 1);
|
code = forecastEnsureBlockCapacity(pResBlock, 1);
|
||||||
QUERY_CHECK_CODE(code, lino, _end);
|
QUERY_CHECK_CODE(code, lino, _end);
|
||||||
|
|
||||||
code = forecastAnalysis(pSupp, pResBlock);
|
code = forecastAnalysis(pSupp, pResBlock, pId);
|
||||||
QUERY_CHECK_CODE(code, lino, _end);
|
QUERY_CHECK_CODE(code, lino, _end);
|
||||||
|
|
||||||
uInfo("block:%d, forecast finalize", pSupp->numOfBlocks);
|
uInfo("%s block:%d, forecast finalize", pId, pSupp->numOfBlocks);
|
||||||
|
|
||||||
_end:
|
_end:
|
||||||
pSupp->numOfBlocks = 0;
|
pSupp->numOfBlocks = 0;
|
||||||
|
@ -373,9 +374,10 @@ static int32_t forecastNext(SOperatorInfo* pOperator, SSDataBlock** ppRes) {
|
||||||
SForecastOperatorInfo* pInfo = pOperator->info;
|
SForecastOperatorInfo* pInfo = pOperator->info;
|
||||||
SSDataBlock* pResBlock = pInfo->pRes;
|
SSDataBlock* pResBlock = pInfo->pRes;
|
||||||
SForecastSupp* pSupp = &pInfo->forecastSupp;
|
SForecastSupp* pSupp = &pInfo->forecastSupp;
|
||||||
SAnalBuf* pBuf = &pSupp->analBuf;
|
SAnalyticBuf* pBuf = &pSupp->analBuf;
|
||||||
int64_t st = taosGetTimestampUs();
|
int64_t st = taosGetTimestampUs();
|
||||||
int32_t numOfBlocks = pSupp->numOfBlocks;
|
int32_t numOfBlocks = pSupp->numOfBlocks;
|
||||||
|
const char* pId = GET_TASKID(pOperator->pTaskInfo);
|
||||||
|
|
||||||
blockDataCleanup(pResBlock);
|
blockDataCleanup(pResBlock);
|
||||||
|
|
||||||
|
@ -389,45 +391,46 @@ static int32_t forecastNext(SOperatorInfo* pOperator, SSDataBlock** ppRes) {
|
||||||
pSupp->groupId = pBlock->info.id.groupId;
|
pSupp->groupId = pBlock->info.id.groupId;
|
||||||
numOfBlocks++;
|
numOfBlocks++;
|
||||||
pSupp->cachedRows += pBlock->info.rows;
|
pSupp->cachedRows += pBlock->info.rows;
|
||||||
qDebug("group:%" PRId64 ", blocks:%d, rows:%" PRId64 ", total rows:%" PRId64, pSupp->groupId, numOfBlocks,
|
qDebug("%s group:%" PRId64 ", blocks:%d, rows:%" PRId64 ", total rows:%" PRId64, pId, pSupp->groupId, numOfBlocks,
|
||||||
pBlock->info.rows, pSupp->cachedRows);
|
pBlock->info.rows, pSupp->cachedRows);
|
||||||
code = forecastCacheBlock(pSupp, pBlock);
|
code = forecastCacheBlock(pSupp, pBlock);
|
||||||
QUERY_CHECK_CODE(code, lino, _end);
|
QUERY_CHECK_CODE(code, lino, _end);
|
||||||
} else {
|
} else {
|
||||||
qDebug("group:%" PRId64 ", read finish for new group coming, blocks:%d", pSupp->groupId, numOfBlocks);
|
qDebug("%s group:%" PRId64 ", read finish for new group coming, blocks:%d", pId, pSupp->groupId, numOfBlocks);
|
||||||
code = forecastAggregateBlocks(pSupp, pResBlock);
|
code = forecastAggregateBlocks(pSupp, pResBlock, pId);
|
||||||
QUERY_CHECK_CODE(code, lino, _end);
|
QUERY_CHECK_CODE(code, lino, _end);
|
||||||
pSupp->groupId = pBlock->info.id.groupId;
|
pSupp->groupId = pBlock->info.id.groupId;
|
||||||
numOfBlocks = 1;
|
numOfBlocks = 1;
|
||||||
pSupp->cachedRows = pBlock->info.rows;
|
pSupp->cachedRows = pBlock->info.rows;
|
||||||
qDebug("group:%" PRId64 ", new group, rows:%" PRId64 ", total rows:%" PRId64, pSupp->groupId, pBlock->info.rows,
|
qDebug("%s group:%" PRId64 ", new group, rows:%" PRId64 ", total rows:%" PRId64, pId, pSupp->groupId,
|
||||||
pSupp->cachedRows);
|
pBlock->info.rows, pSupp->cachedRows);
|
||||||
code = forecastCacheBlock(pSupp, pBlock);
|
code = forecastCacheBlock(pSupp, pBlock);
|
||||||
QUERY_CHECK_CODE(code, lino, _end);
|
QUERY_CHECK_CODE(code, lino, _end);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pResBlock->info.rows > 0) {
|
if (pResBlock->info.rows > 0) {
|
||||||
(*ppRes) = pResBlock;
|
(*ppRes) = pResBlock;
|
||||||
qDebug("group:%" PRId64 ", return to upstream, blocks:%d", pResBlock->info.id.groupId, numOfBlocks);
|
qDebug("%s group:%" PRId64 ", return to upstream, blocks:%d", pId, pResBlock->info.id.groupId, numOfBlocks);
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (numOfBlocks > 0) {
|
if (numOfBlocks > 0) {
|
||||||
qDebug("group:%" PRId64 ", read finish, blocks:%d", pSupp->groupId, numOfBlocks);
|
qDebug("%s group:%" PRId64 ", read finish, blocks:%d", pId, pSupp->groupId, numOfBlocks);
|
||||||
code = forecastAggregateBlocks(pSupp, pResBlock);
|
code = forecastAggregateBlocks(pSupp, pResBlock, pId);
|
||||||
QUERY_CHECK_CODE(code, lino, _end);
|
QUERY_CHECK_CODE(code, lino, _end);
|
||||||
}
|
}
|
||||||
|
|
||||||
int64_t cost = taosGetTimestampUs() - st;
|
int64_t cost = taosGetTimestampUs() - st;
|
||||||
qDebug("all groups finished, cost:%" PRId64 "us", cost);
|
qDebug("%s all groups finished, cost:%" PRId64 "us", pId, cost);
|
||||||
|
|
||||||
_end:
|
_end:
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
qError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
|
qError("%s %s failed at line %d since %s", pId, __func__, lino, tstrerror(code));
|
||||||
pTaskInfo->code = code;
|
pTaskInfo->code = code;
|
||||||
T_LONG_JMP(pTaskInfo->env, code);
|
T_LONG_JMP(pTaskInfo->env, code);
|
||||||
}
|
}
|
||||||
|
|
||||||
(*ppRes) = (pResBlock->info.rows == 0) ? NULL : pResBlock;
|
(*ppRes) = (pResBlock->info.rows == 0) ? NULL : pResBlock;
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
@ -498,7 +501,7 @@ static int32_t forecastParseInput(SForecastSupp* pSupp, SNodeList* pFuncs) {
|
||||||
pSupp->inputPrecision = pTsNode->node.resType.precision;
|
pSupp->inputPrecision = pTsNode->node.resType.precision;
|
||||||
pSupp->inputValSlot = pValNode->slotId;
|
pSupp->inputValSlot = pValNode->slotId;
|
||||||
pSupp->inputValType = pValNode->node.resType.type;
|
pSupp->inputValType = pValNode->node.resType.type;
|
||||||
tstrncpy(pSupp->algoOpt, "algo=arima", TSDB_ANAL_ALGO_OPTION_LEN);
|
tstrncpy(pSupp->algoOpt, "algo=arima", TSDB_ANALYTIC_ALGO_OPTION_LEN);
|
||||||
} else {
|
} else {
|
||||||
return TSDB_CODE_PLAN_INTERNAL_ERROR;
|
return TSDB_CODE_PLAN_INTERNAL_ERROR;
|
||||||
}
|
}
|
||||||
|
@ -516,22 +519,22 @@ static int32_t forecastParseAlgo(SForecastSupp* pSupp) {
|
||||||
|
|
||||||
if (!taosAnalGetOptStr(pSupp->algoOpt, "algo", pSupp->algoName, sizeof(pSupp->algoName))) {
|
if (!taosAnalGetOptStr(pSupp->algoOpt, "algo", pSupp->algoName, sizeof(pSupp->algoName))) {
|
||||||
qError("failed to get forecast algorithm name from %s", pSupp->algoOpt);
|
qError("failed to get forecast algorithm name from %s", pSupp->algoOpt);
|
||||||
return TSDB_CODE_ANAL_ALGO_NOT_FOUND;
|
return TSDB_CODE_ANA_ALGO_NOT_FOUND;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (taosAnalGetAlgoUrl(pSupp->algoName, ANAL_ALGO_TYPE_FORECAST, pSupp->algoUrl, sizeof(pSupp->algoUrl)) != 0) {
|
if (taosAnalGetAlgoUrl(pSupp->algoName, ANAL_ALGO_TYPE_FORECAST, pSupp->algoUrl, sizeof(pSupp->algoUrl)) != 0) {
|
||||||
qError("failed to get forecast algorithm url from %s", pSupp->algoName);
|
qError("failed to get forecast algorithm url from %s", pSupp->algoName);
|
||||||
return TSDB_CODE_ANAL_ALGO_NOT_LOAD;
|
return TSDB_CODE_ANA_ALGO_NOT_LOAD;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t forecastCreateBuf(SForecastSupp* pSupp) {
|
static int32_t forecastCreateBuf(SForecastSupp* pSupp) {
|
||||||
SAnalBuf* pBuf = &pSupp->analBuf;
|
SAnalyticBuf* pBuf = &pSupp->analBuf;
|
||||||
int64_t ts = 0; // taosGetTimestampMs();
|
int64_t ts = 0; // taosGetTimestampMs();
|
||||||
|
|
||||||
pBuf->bufType = ANAL_BUF_TYPE_JSON_COL;
|
pBuf->bufType = ANALYTICS_BUF_TYPE_JSON_COL;
|
||||||
snprintf(pBuf->fileName, sizeof(pBuf->fileName), "%s/tdengine-forecast-%" PRId64, tsTempDir, ts);
|
snprintf(pBuf->fileName, sizeof(pBuf->fileName), "%s/tdengine-forecast-%" PRId64, tsTempDir, ts);
|
||||||
int32_t code = tsosAnalBufOpen(pBuf, 2);
|
int32_t code = tsosAnalBufOpen(pBuf, 2);
|
||||||
if (code != 0) goto _OVER;
|
if (code != 0) goto _OVER;
|
||||||
|
|
|
@ -3040,7 +3040,6 @@ static int32_t setBlockIntoRes(SStreamScanInfo* pInfo, const SSDataBlock* pBlock
|
||||||
pBlockInfo->rows, pTaskInfo, &pTableScanInfo->base.metaCache);
|
pBlockInfo->rows, pTaskInfo, &pTableScanInfo->base.metaCache);
|
||||||
// ignore the table not exists error, since this table may have been dropped during the scan procedure.
|
// ignore the table not exists error, since this table may have been dropped during the scan procedure.
|
||||||
if (code) {
|
if (code) {
|
||||||
blockDataFreeRes((SSDataBlock*)pBlock);
|
|
||||||
QUERY_CHECK_CODE(code, lino, _end);
|
QUERY_CHECK_CODE(code, lino, _end);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3408,6 +3407,8 @@ int32_t streamScanOperatorEncode(SStreamScanInfo* pInfo, void** pBuff, int32_t*
|
||||||
QUERY_CHECK_CODE(code, lino, _end);
|
QUERY_CHECK_CODE(code, lino, _end);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
qDebug("%s last scan range %d. %" PRId64 ",%" PRId64, __func__, __LINE__, pInfo->lastScanRange.skey, pInfo->lastScanRange.ekey);
|
||||||
|
|
||||||
*pLen = len;
|
*pLen = len;
|
||||||
|
|
||||||
_end:
|
_end:
|
||||||
|
@ -3473,21 +3474,20 @@ void streamScanOperatorDecode(void* pBuff, int32_t len, SStreamScanInfo* pInfo)
|
||||||
goto _end;
|
goto _end;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pInfo->pUpdateInfo != NULL) {
|
void* pUpInfo = taosMemoryCalloc(1, sizeof(SUpdateInfo));
|
||||||
void* pUpInfo = taosMemoryCalloc(1, sizeof(SUpdateInfo));
|
if (!pUpInfo) {
|
||||||
if (!pUpInfo) {
|
lino = __LINE__;
|
||||||
lino = __LINE__;
|
goto _end;
|
||||||
goto _end;
|
}
|
||||||
}
|
code = pInfo->stateStore.updateInfoDeserialize(pDeCoder, pUpInfo);
|
||||||
code = pInfo->stateStore.updateInfoDeserialize(pDeCoder, pUpInfo);
|
if (code == TSDB_CODE_SUCCESS) {
|
||||||
if (code == TSDB_CODE_SUCCESS) {
|
pInfo->stateStore.updateInfoDestroy(pInfo->pUpdateInfo);
|
||||||
pInfo->stateStore.updateInfoDestroy(pInfo->pUpdateInfo);
|
pInfo->pUpdateInfo = pUpInfo;
|
||||||
pInfo->pUpdateInfo = pUpInfo;
|
qDebug("%s line:%d. stream scan updateinfo deserialize success", __func__, __LINE__);
|
||||||
} else {
|
} else {
|
||||||
taosMemoryFree(pUpInfo);
|
taosMemoryFree(pUpInfo);
|
||||||
lino = __LINE__;
|
code = TSDB_CODE_SUCCESS;
|
||||||
goto _end;
|
qDebug("%s line:%d. stream scan did not have updateinfo", __func__, __LINE__);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (tDecodeIsEnd(pDeCoder)) {
|
if (tDecodeIsEnd(pDeCoder)) {
|
||||||
|
@ -3507,6 +3507,7 @@ void streamScanOperatorDecode(void* pBuff, int32_t len, SStreamScanInfo* pInfo)
|
||||||
lino = __LINE__;
|
lino = __LINE__;
|
||||||
goto _end;
|
goto _end;
|
||||||
}
|
}
|
||||||
|
qDebug("%s last scan range %d. %" PRId64 ",%" PRId64, __func__, __LINE__, pInfo->lastScanRange.skey, pInfo->lastScanRange.ekey);
|
||||||
|
|
||||||
_end:
|
_end:
|
||||||
if (pDeCoder != NULL) {
|
if (pDeCoder != NULL) {
|
||||||
|
|
|
@ -1131,6 +1131,47 @@ static int32_t extractPkColumnFromFuncs(SNodeList* pFuncs, bool* pHasPk, SColumn
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Determine the actual time range for reading data based on the RANGE clause and the WHERE conditions.
|
||||||
|
* @param[in] cond The range specified by WHERE condition.
|
||||||
|
* @param[in] range The range specified by RANGE clause.
|
||||||
|
* @param[out] twindow The range to be read in DESC order, and only one record is needed.
|
||||||
|
* @param[out] extTwindow The external range to read for only one record, which is used for FILL clause.
|
||||||
|
* @note `cond` and `twindow` may be the same address.
|
||||||
|
*/
|
||||||
|
static int32_t getQueryExtWindow(const STimeWindow* cond, const STimeWindow* range, STimeWindow* twindow,
|
||||||
|
STimeWindow* extTwindows) {
|
||||||
|
int32_t code = TSDB_CODE_SUCCESS;
|
||||||
|
int32_t lino = 0;
|
||||||
|
STimeWindow tempWindow;
|
||||||
|
|
||||||
|
if (cond->skey > cond->ekey || range->skey > range->ekey) {
|
||||||
|
*twindow = extTwindows[0] = extTwindows[1] = TSWINDOW_DESC_INITIALIZER;
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (range->ekey < cond->skey) {
|
||||||
|
extTwindows[1] = *cond;
|
||||||
|
*twindow = extTwindows[0] = TSWINDOW_DESC_INITIALIZER;
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (cond->ekey < range->skey) {
|
||||||
|
extTwindows[0] = *cond;
|
||||||
|
*twindow = extTwindows[1] = TSWINDOW_DESC_INITIALIZER;
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only scan data in the time range intersecion.
|
||||||
|
extTwindows[0] = extTwindows[1] = *cond;
|
||||||
|
twindow->skey = TMAX(cond->skey, range->skey);
|
||||||
|
twindow->ekey = TMIN(cond->ekey, range->ekey);
|
||||||
|
extTwindows[0].ekey = twindow->skey - 1;
|
||||||
|
extTwindows[1].skey = twindow->ekey + 1;
|
||||||
|
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
|
||||||
int32_t createTimeSliceOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SOperatorInfo** pOptrInfo) {
|
int32_t createTimeSliceOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SOperatorInfo** pOptrInfo) {
|
||||||
QRY_PARAM_CHECK(pOptrInfo);
|
QRY_PARAM_CHECK(pOptrInfo);
|
||||||
|
|
||||||
|
@ -1206,8 +1247,10 @@ int32_t createTimeSliceOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyN
|
||||||
|
|
||||||
if (downstream->operatorType == QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN) {
|
if (downstream->operatorType == QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN) {
|
||||||
STableScanInfo* pScanInfo = (STableScanInfo*)downstream->info;
|
STableScanInfo* pScanInfo = (STableScanInfo*)downstream->info;
|
||||||
pScanInfo->base.cond.twindows = pInfo->win;
|
SQueryTableDataCond *cond = &pScanInfo->base.cond;
|
||||||
pScanInfo->base.cond.type = TIMEWINDOW_RANGE_EXTERNAL;
|
cond->type = TIMEWINDOW_RANGE_EXTERNAL;
|
||||||
|
code = getQueryExtWindow(&cond->twindows, &pInfo->win, &cond->twindows, cond->extTwindows);
|
||||||
|
QUERY_CHECK_CODE(code, lino, _error);
|
||||||
}
|
}
|
||||||
|
|
||||||
setOperatorInfo(pOperator, "TimeSliceOperator", QUERY_NODE_PHYSICAL_PLAN_INTERP_FUNC, false, OP_NOT_OPENED, pInfo,
|
setOperatorInfo(pOperator, "TimeSliceOperator", QUERY_NODE_PHYSICAL_PLAN_INTERP_FUNC, false, OP_NOT_OPENED, pInfo,
|
||||||
|
|
|
@ -9,7 +9,7 @@ MESSAGE(STATUS "build parser unit test")
|
||||||
# ADD_EXECUTABLE(executorTest ${SOURCE_LIST})
|
# ADD_EXECUTABLE(executorTest ${SOURCE_LIST})
|
||||||
# TARGET_LINK_LIBRARIES(
|
# TARGET_LINK_LIBRARIES(
|
||||||
# executorTest
|
# executorTest
|
||||||
# PRIVATE os util common transport gtest taos_static qcom executor function planner scalar nodes vnode
|
# PRIVATE os util common transport gtest ${TAOS_LIB_STATIC} qcom executor function planner scalar nodes vnode
|
||||||
# )
|
# )
|
||||||
#
|
#
|
||||||
# TARGET_INCLUDE_DIRECTORIES(
|
# TARGET_INCLUDE_DIRECTORIES(
|
||||||
|
|
|
@ -3037,61 +3037,60 @@ int32_t lastRowFunction(SqlFunctionCtx* pCtx) {
|
||||||
TSKEY startKey = getRowPTs(pInput->pPTS, 0);
|
TSKEY startKey = getRowPTs(pInput->pPTS, 0);
|
||||||
TSKEY endKey = getRowPTs(pInput->pPTS, pInput->totalRows - 1);
|
TSKEY endKey = getRowPTs(pInput->pPTS, pInput->totalRows - 1);
|
||||||
|
|
||||||
#if 0
|
if (pCtx->order == TSDB_ORDER_ASC && !pCtx->hasPrimaryKey) {
|
||||||
int32_t blockDataOrder = (startKey <= endKey) ? TSDB_ORDER_ASC : TSDB_ORDER_DESC;
|
|
||||||
|
|
||||||
// the optimized version only valid if all tuples in one block are monotonious increasing or descreasing.
|
|
||||||
// this assumption is NOT always works if project operator exists in downstream.
|
|
||||||
if (blockDataOrder == TSDB_ORDER_ASC) {
|
|
||||||
for (int32_t i = pInput->numOfRows + pInput->startRowIndex - 1; i >= pInput->startRowIndex; --i) {
|
for (int32_t i = pInput->numOfRows + pInput->startRowIndex - 1; i >= pInput->startRowIndex; --i) {
|
||||||
char* data = colDataGetData(pInputCol, i);
|
bool isNull = colDataIsNull(pInputCol, pInput->numOfRows, i, NULL);
|
||||||
|
char* data = isNull ? NULL : colDataGetData(pInputCol, i);
|
||||||
TSKEY cts = getRowPTs(pInput->pPTS, i);
|
TSKEY cts = getRowPTs(pInput->pPTS, i);
|
||||||
numOfElems++;
|
numOfElems++;
|
||||||
|
|
||||||
if (pResInfo->numOfRes == 0 || pInfo->ts < cts) {
|
if (pResInfo->numOfRes == 0 || pInfo->ts < cts) {
|
||||||
doSaveLastrow(pCtx, data, i, cts, pInfo);
|
int32_t code = doSaveLastrow(pCtx, data, i, cts, pInfo);
|
||||||
|
if (code != TSDB_CODE_SUCCESS) return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
} else { // descending order
|
} else if (!pCtx->hasPrimaryKey && pCtx->order == TSDB_ORDER_DESC) {
|
||||||
|
// the optimized version only valid if all tuples in one block are monotonious increasing or descreasing.
|
||||||
|
// this assumption is NOT always works if project operator exists in downstream.
|
||||||
for (int32_t i = pInput->startRowIndex; i < pInput->numOfRows + pInput->startRowIndex; ++i) {
|
for (int32_t i = pInput->startRowIndex; i < pInput->numOfRows + pInput->startRowIndex; ++i) {
|
||||||
char* data = colDataGetData(pInputCol, i);
|
bool isNull = colDataIsNull(pInputCol, pInput->numOfRows, i, NULL);
|
||||||
|
char* data = isNull ? NULL : colDataGetData(pInputCol, i);
|
||||||
TSKEY cts = getRowPTs(pInput->pPTS, i);
|
TSKEY cts = getRowPTs(pInput->pPTS, i);
|
||||||
numOfElems++;
|
numOfElems++;
|
||||||
|
|
||||||
if (pResInfo->numOfRes == 0 || pInfo->ts < cts) {
|
if (pResInfo->numOfRes == 0 || pInfo->ts < cts) {
|
||||||
doSaveLastrow(pCtx, data, i, cts, pInfo);
|
int32_t code = doSaveLastrow(pCtx, data, i, cts, pInfo);
|
||||||
|
if (code != TSDB_CODE_SUCCESS) return code;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
} else {
|
||||||
#else
|
int64_t* pts = (int64_t*)pInput->pPTS->pData;
|
||||||
|
int from = -1;
|
||||||
|
int32_t i = -1;
|
||||||
|
while (funcInputGetNextRowIndex(pInput, from, false, &i, &from)) {
|
||||||
|
bool isNull = colDataIsNull(pInputCol, pInput->numOfRows, i, NULL);
|
||||||
|
char* data = isNull ? NULL : colDataGetData(pInputCol, i);
|
||||||
|
TSKEY cts = pts[i];
|
||||||
|
|
||||||
int64_t* pts = (int64_t*)pInput->pPTS->pData;
|
numOfElems++;
|
||||||
int from = -1;
|
char* pkData = NULL;
|
||||||
int32_t i = -1;
|
if (pCtx->hasPrimaryKey) {
|
||||||
while (funcInputGetNextRowIndex(pInput, from, false, &i, &from)) {
|
pkData = colDataGetData(pkCol, i);
|
||||||
bool isNull = colDataIsNull(pInputCol, pInput->numOfRows, i, NULL);
|
}
|
||||||
char* data = isNull ? NULL : colDataGetData(pInputCol, i);
|
if (pResInfo->numOfRes == 0 || pInfo->ts < cts ||
|
||||||
TSKEY cts = pts[i];
|
(pInfo->ts == pts[i] && pkCompareFn && pkCompareFn(pkData, pInfo->pkData) < 0)) {
|
||||||
|
int32_t code = doSaveLastrow(pCtx, data, i, cts, pInfo);
|
||||||
numOfElems++;
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
char* pkData = NULL;
|
return code;
|
||||||
if (pCtx->hasPrimaryKey) {
|
}
|
||||||
pkData = colDataGetData(pkCol, i);
|
pResInfo->numOfRes = 1;
|
||||||
}
|
|
||||||
if (pResInfo->numOfRes == 0 || pInfo->ts < cts ||
|
|
||||||
(pInfo->ts == pts[i] && pkCompareFn && pkCompareFn(pkData, pInfo->pkData) < 0)) {
|
|
||||||
int32_t code = doSaveLastrow(pCtx, data, i, cts, pInfo);
|
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
|
||||||
return code;
|
|
||||||
}
|
}
|
||||||
pResInfo->numOfRes = 1;
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
#endif
|
}
|
||||||
|
|
||||||
SET_VAL(pResInfo, numOfElems, 1);
|
SET_VAL(pResInfo, numOfElems, 1);
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
|
|
|
@ -153,6 +153,12 @@ static bool caseWhenNodeEqual(const SCaseWhenNode* a, const SCaseWhenNode* b) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool groupingSetNodeEqual(const SGroupingSetNode* a, const SGroupingSetNode* b) {
|
||||||
|
COMPARE_SCALAR_FIELD(groupingSetType);
|
||||||
|
COMPARE_NODE_LIST_FIELD(pParameterList);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
bool nodesEqualNode(const SNode* a, const SNode* b) {
|
bool nodesEqualNode(const SNode* a, const SNode* b) {
|
||||||
if (a == b) {
|
if (a == b) {
|
||||||
return true;
|
return true;
|
||||||
|
@ -181,10 +187,11 @@ bool nodesEqualNode(const SNode* a, const SNode* b) {
|
||||||
return whenThenNodeEqual((const SWhenThenNode*)a, (const SWhenThenNode*)b);
|
return whenThenNodeEqual((const SWhenThenNode*)a, (const SWhenThenNode*)b);
|
||||||
case QUERY_NODE_CASE_WHEN:
|
case QUERY_NODE_CASE_WHEN:
|
||||||
return caseWhenNodeEqual((const SCaseWhenNode*)a, (const SCaseWhenNode*)b);
|
return caseWhenNodeEqual((const SCaseWhenNode*)a, (const SCaseWhenNode*)b);
|
||||||
|
case QUERY_NODE_GROUPING_SET:
|
||||||
|
return groupingSetNodeEqual((const SGroupingSetNode*)a, (const SGroupingSetNode*)b);
|
||||||
case QUERY_NODE_REAL_TABLE:
|
case QUERY_NODE_REAL_TABLE:
|
||||||
case QUERY_NODE_TEMP_TABLE:
|
case QUERY_NODE_TEMP_TABLE:
|
||||||
case QUERY_NODE_JOIN_TABLE:
|
case QUERY_NODE_JOIN_TABLE:
|
||||||
case QUERY_NODE_GROUPING_SET:
|
|
||||||
case QUERY_NODE_ORDER_BY_EXPR:
|
case QUERY_NODE_ORDER_BY_EXPR:
|
||||||
case QUERY_NODE_LIMIT:
|
case QUERY_NODE_LIMIT:
|
||||||
return false;
|
return false;
|
||||||
|
|
|
@ -2958,3 +2958,46 @@ void nodesSortList(SNodeList** pList, int32_t (*comp)(SNode* pNode1, SNode* pNod
|
||||||
inSize *= 2;
|
inSize *= 2;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static SNode* nodesListFindNode(SNodeList* pList, SNode* pNode) {
|
||||||
|
SNode* pFound = NULL;
|
||||||
|
FOREACH(pFound, pList) {
|
||||||
|
if (nodesEqualNode(pFound, pNode)) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return pFound;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t nodesListDeduplicate(SNodeList** ppList) {
|
||||||
|
if (!ppList || LIST_LENGTH(*ppList) <= 1) return TSDB_CODE_SUCCESS;
|
||||||
|
if (LIST_LENGTH(*ppList) == 2) {
|
||||||
|
SNode* pNode1 = nodesListGetNode(*ppList, 0);
|
||||||
|
SNode* pNode2 = nodesListGetNode(*ppList, 1);
|
||||||
|
if (nodesEqualNode(pNode1, pNode2)) {
|
||||||
|
SListCell* pCell = nodesListGetCell(*ppList, 1);
|
||||||
|
(void)nodesListErase(*ppList, pCell);
|
||||||
|
}
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
SNodeList* pTmp = NULL;
|
||||||
|
int32_t code = nodesMakeList(&pTmp);
|
||||||
|
if (TSDB_CODE_SUCCESS == code) {
|
||||||
|
SNode* pNode = NULL;
|
||||||
|
FOREACH(pNode, *ppList) {
|
||||||
|
SNode* pFound = nodesListFindNode(pTmp, pNode);
|
||||||
|
if (NULL == pFound) {
|
||||||
|
code = nodesCloneNode(pNode, &pFound);
|
||||||
|
if (TSDB_CODE_SUCCESS == code) code = nodesListStrictAppend(pTmp, pFound);
|
||||||
|
if (TSDB_CODE_SUCCESS != code) break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (TSDB_CODE_SUCCESS == code) {
|
||||||
|
nodesDestroyList(*ppList);
|
||||||
|
*ppList = pTmp;
|
||||||
|
} else {
|
||||||
|
nodesDestroyList(pTmp);
|
||||||
|
}
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
|
|
@ -115,6 +115,7 @@ typedef struct SParseMetaCache {
|
||||||
SHashObj* pTableName; // key is tbFUid, elements is STableMeta*(append with tbName)
|
SHashObj* pTableName; // key is tbFUid, elements is STableMeta*(append with tbName)
|
||||||
SArray* pDnodes; // element is SEpSet
|
SArray* pDnodes; // element is SEpSet
|
||||||
bool dnodeRequired;
|
bool dnodeRequired;
|
||||||
|
bool forceFetchViewMeta;
|
||||||
} SParseMetaCache;
|
} SParseMetaCache;
|
||||||
|
|
||||||
int32_t generateSyntaxErrMsg(SMsgBuf* pBuf, int32_t errCode, ...);
|
int32_t generateSyntaxErrMsg(SMsgBuf* pBuf, int32_t errCode, ...);
|
||||||
|
|
|
@ -1377,7 +1377,7 @@ SNode* createAnomalyWindowNode(SAstCreateContext* pCxt, SNode* pExpr, const STok
|
||||||
CHECK_MAKE_NODE(pAnomaly->pCol);
|
CHECK_MAKE_NODE(pAnomaly->pCol);
|
||||||
pAnomaly->pExpr = pExpr;
|
pAnomaly->pExpr = pExpr;
|
||||||
if (pFuncOpt == NULL) {
|
if (pFuncOpt == NULL) {
|
||||||
tstrncpy(pAnomaly->anomalyOpt, "algo=iqr", TSDB_ANAL_ALGO_OPTION_LEN);
|
tstrncpy(pAnomaly->anomalyOpt, "algo=iqr", TSDB_ANALYTIC_ALGO_OPTION_LEN);
|
||||||
} else {
|
} else {
|
||||||
(void)trimString(pFuncOpt->z, pFuncOpt->n, pAnomaly->anomalyOpt, sizeof(pAnomaly->anomalyOpt));
|
(void)trimString(pFuncOpt->z, pFuncOpt->n, pAnomaly->anomalyOpt, sizeof(pAnomaly->anomalyOpt));
|
||||||
}
|
}
|
||||||
|
|
|
@ -824,7 +824,7 @@ static int32_t collectMetaKeyFromShowCreateView(SCollectMetaKeyCxt* pCxt, SShowC
|
||||||
if (TSDB_CODE_SUCCESS == code) {
|
if (TSDB_CODE_SUCCESS == code) {
|
||||||
code = reserveTableMetaInCache(pCxt->pParseCxt->acctId, pStmt->dbName, pStmt->viewName, pCxt->pMetaCache);
|
code = reserveTableMetaInCache(pCxt->pParseCxt->acctId, pStmt->dbName, pStmt->viewName, pCxt->pMetaCache);
|
||||||
}
|
}
|
||||||
|
pCxt->pMetaCache->forceFetchViewMeta = true;
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -906,6 +906,7 @@ static int32_t collectMetaKeyFromCreateViewStmt(SCollectMetaKeyCxt* pCxt, SCreat
|
||||||
static int32_t collectMetaKeyFromDropViewStmt(SCollectMetaKeyCxt* pCxt, SDropViewStmt* pStmt) {
|
static int32_t collectMetaKeyFromDropViewStmt(SCollectMetaKeyCxt* pCxt, SDropViewStmt* pStmt) {
|
||||||
int32_t code = reserveViewUserAuthInCache(pCxt->pParseCxt->acctId, pCxt->pParseCxt->pUser, pStmt->dbName,
|
int32_t code = reserveViewUserAuthInCache(pCxt->pParseCxt->acctId, pCxt->pParseCxt->pUser, pStmt->dbName,
|
||||||
pStmt->viewName, AUTH_TYPE_ALTER, pCxt->pMetaCache);
|
pStmt->viewName, AUTH_TYPE_ALTER, pCxt->pMetaCache);
|
||||||
|
pCxt->pMetaCache->forceFetchViewMeta = true;
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -329,14 +329,23 @@ static int32_t calcConstGroupBy(SCalcConstContext* pCxt, SSelectStmt* pSelect) {
|
||||||
if (TSDB_CODE_SUCCESS == code) {
|
if (TSDB_CODE_SUCCESS == code) {
|
||||||
SNode* pNode = NULL;
|
SNode* pNode = NULL;
|
||||||
FOREACH(pNode, pSelect->pGroupByList) {
|
FOREACH(pNode, pSelect->pGroupByList) {
|
||||||
|
bool hasNotValue = false;
|
||||||
SNode* pGroupPara = NULL;
|
SNode* pGroupPara = NULL;
|
||||||
FOREACH(pGroupPara, ((SGroupingSetNode*)pNode)->pParameterList) {
|
FOREACH(pGroupPara, ((SGroupingSetNode*)pNode)->pParameterList) {
|
||||||
if (QUERY_NODE_VALUE != nodeType(pGroupPara)) {
|
if (QUERY_NODE_VALUE != nodeType(pGroupPara)) {
|
||||||
return code;
|
hasNotValue = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (!hasNotValue) {
|
||||||
|
if (pSelect->hasAggFuncs) {
|
||||||
|
ERASE_NODE(pSelect->pGroupByList);
|
||||||
|
} else {
|
||||||
|
if (!cell->pPrev && !cell->pNext) continue;
|
||||||
|
ERASE_NODE(pSelect->pGroupByList);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
NODES_DESTORY_LIST(pSelect->pGroupByList);
|
|
||||||
}
|
}
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
|
@ -681,11 +681,25 @@ int32_t qBindStmtStbColsValue2(void* pBlock, SArray* pCols, TAOS_STMT2_BIND* bin
|
||||||
int32_t code = 0;
|
int32_t code = 0;
|
||||||
int16_t lastColId = -1;
|
int16_t lastColId = -1;
|
||||||
bool colInOrder = true;
|
bool colInOrder = true;
|
||||||
|
int ncharColNums = 0;
|
||||||
|
|
||||||
if (NULL == *pTSchema) {
|
if (NULL == *pTSchema) {
|
||||||
*pTSchema = tBuildTSchema(pSchema, pDataBlock->pMeta->tableInfo.numOfColumns, pDataBlock->pMeta->sversion);
|
*pTSchema = tBuildTSchema(pSchema, pDataBlock->pMeta->tableInfo.numOfColumns, pDataBlock->pMeta->sversion);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for (int c = 0; c < boundInfo->numOfBound; ++c) {
|
||||||
|
if (TSDB_DATA_TYPE_NCHAR == pSchema[boundInfo->pColIndex[c]].type) {
|
||||||
|
ncharColNums++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (ncharColNums > 0) {
|
||||||
|
ncharBinds = taosArrayInit(ncharColNums, sizeof(ncharBind));
|
||||||
|
if (!ncharBinds) {
|
||||||
|
code = terrno;
|
||||||
|
goto _return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
for (int c = 0; c < boundInfo->numOfBound; ++c) {
|
for (int c = 0; c < boundInfo->numOfBound; ++c) {
|
||||||
SSchema* pColSchema = &pSchema[boundInfo->pColIndex[c]];
|
SSchema* pColSchema = &pSchema[boundInfo->pColIndex[c]];
|
||||||
if (pColSchema->colId <= lastColId) {
|
if (pColSchema->colId <= lastColId) {
|
||||||
|
@ -710,13 +724,6 @@ int32_t qBindStmtStbColsValue2(void* pBlock, SArray* pCols, TAOS_STMT2_BIND* bin
|
||||||
if (code) {
|
if (code) {
|
||||||
goto _return;
|
goto _return;
|
||||||
}
|
}
|
||||||
if (!ncharBinds) {
|
|
||||||
ncharBinds = taosArrayInit(1, sizeof(ncharBind));
|
|
||||||
if (!ncharBinds) {
|
|
||||||
code = terrno;
|
|
||||||
goto _return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (!taosArrayPush(ncharBinds, &ncharBind)) {
|
if (!taosArrayPush(ncharBinds, &ncharBind)) {
|
||||||
code = terrno;
|
code = terrno;
|
||||||
goto _return;
|
goto _return;
|
||||||
|
|
|
@ -9661,7 +9661,7 @@ static int32_t translateDropUser(STranslateContext* pCxt, SDropUserStmt* pStmt)
|
||||||
static int32_t translateCreateAnode(STranslateContext* pCxt, SCreateAnodeStmt* pStmt) {
|
static int32_t translateCreateAnode(STranslateContext* pCxt, SCreateAnodeStmt* pStmt) {
|
||||||
SMCreateAnodeReq createReq = {0};
|
SMCreateAnodeReq createReq = {0};
|
||||||
createReq.urlLen = strlen(pStmt->url) + 1;
|
createReq.urlLen = strlen(pStmt->url) + 1;
|
||||||
if (createReq.urlLen > TSDB_ANAL_ANODE_URL_LEN) {
|
if (createReq.urlLen > TSDB_ANALYTIC_ANODE_URL_LEN) {
|
||||||
return TSDB_CODE_MND_ANODE_TOO_LONG_URL;
|
return TSDB_CODE_MND_ANODE_TOO_LONG_URL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -817,6 +817,7 @@ int32_t buildCatalogReq(const SParseMetaCache* pMetaCache, SCatalogReq* pCatalog
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
pCatalogReq->dNodeRequired = pMetaCache->dnodeRequired;
|
pCatalogReq->dNodeRequired = pMetaCache->dnodeRequired;
|
||||||
|
pCatalogReq->forceFetchViewMeta = pMetaCache->forceFetchViewMeta;
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -838,8 +838,11 @@ static int32_t createAggLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (NULL != pSelect->pGroupByList) {
|
if (NULL != pSelect->pGroupByList) {
|
||||||
pAgg->pGroupKeys = NULL;
|
code = nodesListDeduplicate(&pSelect->pGroupByList);
|
||||||
code = nodesCloneList(pSelect->pGroupByList, &pAgg->pGroupKeys);
|
if (TSDB_CODE_SUCCESS == code) {
|
||||||
|
pAgg->pGroupKeys = NULL;
|
||||||
|
code = nodesCloneList(pSelect->pGroupByList, &pAgg->pGroupKeys);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// rewrite the expression in subsequent clauses
|
// rewrite the expression in subsequent clauses
|
||||||
|
|
|
@ -204,6 +204,7 @@ static void optSetParentOrder(SLogicNode* pNode, EOrder order, SLogicNode* pNode
|
||||||
// case QUERY_NODE_LOGIC_PLAN_WINDOW:
|
// case QUERY_NODE_LOGIC_PLAN_WINDOW:
|
||||||
case QUERY_NODE_LOGIC_PLAN_AGG:
|
case QUERY_NODE_LOGIC_PLAN_AGG:
|
||||||
case QUERY_NODE_LOGIC_PLAN_SORT:
|
case QUERY_NODE_LOGIC_PLAN_SORT:
|
||||||
|
case QUERY_NODE_LOGIC_PLAN_FILL:
|
||||||
if (pNode == pNodeForcePropagate) {
|
if (pNode == pNodeForcePropagate) {
|
||||||
pNode->outputTsOrder = order;
|
pNode->outputTsOrder = order;
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -313,29 +313,29 @@ typedef struct SQWorkerMgmt {
|
||||||
#define QW_SCH_DLOG(param, ...) qDebug("QW:%p SID:%" PRIx64 " " param, mgmt, sId, __VA_ARGS__)
|
#define QW_SCH_DLOG(param, ...) qDebug("QW:%p SID:%" PRIx64 " " param, mgmt, sId, __VA_ARGS__)
|
||||||
|
|
||||||
#define QW_TASK_ELOG(param, ...) \
|
#define QW_TASK_ELOG(param, ...) \
|
||||||
qError("qid:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, qId, cId, tId, eId, __VA_ARGS__)
|
qError("QID:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, qId, cId, tId, eId, __VA_ARGS__)
|
||||||
#define QW_TASK_WLOG(param, ...) \
|
#define QW_TASK_WLOG(param, ...) \
|
||||||
qWarn("qid:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, qId, cId, tId, eId, __VA_ARGS__)
|
qWarn("QID:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, qId, cId, tId, eId, __VA_ARGS__)
|
||||||
#define QW_TASK_DLOG(param, ...) \
|
#define QW_TASK_DLOG(param, ...) \
|
||||||
qDebug("qid:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, qId, cId, tId, eId, __VA_ARGS__)
|
qDebug("QID:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, qId, cId, tId, eId, __VA_ARGS__)
|
||||||
#define QW_TASK_DLOGL(param, ...) \
|
#define QW_TASK_DLOGL(param, ...) \
|
||||||
qDebugL("qid:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, qId, cId, tId, eId, __VA_ARGS__)
|
qDebugL("QID:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, qId, cId, tId, eId, __VA_ARGS__)
|
||||||
|
|
||||||
#define QW_TASK_ELOG_E(param) \
|
#define QW_TASK_ELOG_E(param) \
|
||||||
qError("qid:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, qId, cId, tId, eId)
|
qError("QID:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, qId, cId, tId, eId)
|
||||||
#define QW_TASK_WLOG_E(param) \
|
#define QW_TASK_WLOG_E(param) \
|
||||||
qWarn("qid:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, qId, cId, tId, eId)
|
qWarn("QID:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, qId, cId, tId, eId)
|
||||||
#define QW_TASK_DLOG_E(param) \
|
#define QW_TASK_DLOG_E(param) \
|
||||||
qDebug("qid:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, qId, cId, tId, eId)
|
qDebug("QID:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, qId, cId, tId, eId)
|
||||||
|
|
||||||
#define QW_SCH_TASK_ELOG(param, ...) \
|
#define QW_SCH_TASK_ELOG(param, ...) \
|
||||||
qError("QW:%p SID:0x%" PRIx64 ",qid:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, mgmt, sId, \
|
qError("QW:%p SID:0x%" PRIx64 ",QID:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, mgmt, sId, \
|
||||||
qId, cId, tId, eId, __VA_ARGS__)
|
qId, cId, tId, eId, __VA_ARGS__)
|
||||||
#define QW_SCH_TASK_WLOG(param, ...) \
|
#define QW_SCH_TASK_WLOG(param, ...) \
|
||||||
qWarn("QW:%p SID:0x%" PRIx64 ",qid:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, mgmt, sId, qId, \
|
qWarn("QW:%p SID:0x%" PRIx64 ",QID:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, mgmt, sId, qId, \
|
||||||
cId, tId, eId, __VA_ARGS__)
|
cId, tId, eId, __VA_ARGS__)
|
||||||
#define QW_SCH_TASK_DLOG(param, ...) \
|
#define QW_SCH_TASK_DLOG(param, ...) \
|
||||||
qDebug("QW:%p SID:0x%" PRIx64 ",qid:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, mgmt, sId, \
|
qDebug("QW:%p SID:0x%" PRIx64 ",QID:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, mgmt, sId, \
|
||||||
qId, cId, tId, eId, __VA_ARGS__)
|
qId, cId, tId, eId, __VA_ARGS__)
|
||||||
|
|
||||||
#define QW_LOCK_DEBUG(...) \
|
#define QW_LOCK_DEBUG(...) \
|
||||||
|
|
|
@ -62,7 +62,7 @@ typedef enum {
|
||||||
#define SCH_DEFAULT_MAX_RETRY_NUM 6
|
#define SCH_DEFAULT_MAX_RETRY_NUM 6
|
||||||
#define SCH_MIN_AYSNC_EXEC_NUM 3
|
#define SCH_MIN_AYSNC_EXEC_NUM 3
|
||||||
#define SCH_DEFAULT_RETRY_TOTAL_ROUND 3
|
#define SCH_DEFAULT_RETRY_TOTAL_ROUND 3
|
||||||
#define SCH_DEFAULT_TASK_CAPACITY_NUM 1000
|
#define SCH_DEFAULT_TASK_CAPACITY_NUM 1000
|
||||||
|
|
||||||
typedef struct SSchDebug {
|
typedef struct SSchDebug {
|
||||||
bool lockEnable;
|
bool lockEnable;
|
||||||
|
@ -333,12 +333,13 @@ extern SSchedulerMgmt schMgmt;
|
||||||
#define SCH_UNLOCK_TASK(_task) SCH_UNLOCK(SCH_WRITE, &(_task)->lock)
|
#define SCH_UNLOCK_TASK(_task) SCH_UNLOCK(SCH_WRITE, &(_task)->lock)
|
||||||
|
|
||||||
#define SCH_CLIENT_ID(_task) ((_task) ? (_task)->clientId : -1)
|
#define SCH_CLIENT_ID(_task) ((_task) ? (_task)->clientId : -1)
|
||||||
#define SCH_TASK_ID(_task) ((_task) ? (_task)->taskId : -1)
|
#define SCH_TASK_ID(_task) ((_task) ? (_task)->taskId : -1)
|
||||||
#define SCH_TASK_EID(_task) ((_task) ? (_task)->execId : -1)
|
#define SCH_TASK_EID(_task) ((_task) ? (_task)->execId : -1)
|
||||||
|
|
||||||
#define SCH_IS_DATA_BIND_QRY_TASK(task) ((task)->plan->subplanType == SUBPLAN_TYPE_SCAN)
|
#define SCH_IS_DATA_BIND_QRY_TASK(task) ((task)->plan->subplanType == SUBPLAN_TYPE_SCAN)
|
||||||
#define SCH_IS_DATA_BIND_PLAN(_plan) (((_plan)->subplanType == SUBPLAN_TYPE_SCAN) || ((_plan)->subplanType == SUBPLAN_TYPE_MODIFY))
|
#define SCH_IS_DATA_BIND_PLAN(_plan) \
|
||||||
#define SCH_IS_DATA_BIND_TASK(task) SCH_IS_DATA_BIND_PLAN((task)->plan)
|
(((_plan)->subplanType == SUBPLAN_TYPE_SCAN) || ((_plan)->subplanType == SUBPLAN_TYPE_MODIFY))
|
||||||
|
#define SCH_IS_DATA_BIND_TASK(task) SCH_IS_DATA_BIND_PLAN((task)->plan)
|
||||||
#define SCH_IS_LEAF_TASK(_job, _task) (((_task)->level->level + 1) == (_job)->levelNum)
|
#define SCH_IS_LEAF_TASK(_job, _task) (((_task)->level->level + 1) == (_job)->levelNum)
|
||||||
#define SCH_IS_DATA_MERGE_TASK(task) (!SCH_IS_DATA_BIND_TASK(task))
|
#define SCH_IS_DATA_MERGE_TASK(task) (!SCH_IS_DATA_BIND_TASK(task))
|
||||||
#define SCH_IS_LOCAL_EXEC_TASK(_job, _task) \
|
#define SCH_IS_LOCAL_EXEC_TASK(_job, _task) \
|
||||||
|
@ -419,15 +420,15 @@ extern SSchedulerMgmt schMgmt;
|
||||||
#define SCH_SWITCH_EPSET(_addr) ((_addr)->epSet.inUse = ((_addr)->epSet.inUse + 1) % (_addr)->epSet.numOfEps)
|
#define SCH_SWITCH_EPSET(_addr) ((_addr)->epSet.inUse = ((_addr)->epSet.inUse + 1) % (_addr)->epSet.numOfEps)
|
||||||
#define SCH_TASK_NUM_OF_EPS(_addr) ((_addr)->epSet.numOfEps)
|
#define SCH_TASK_NUM_OF_EPS(_addr) ((_addr)->epSet.numOfEps)
|
||||||
|
|
||||||
#define SCH_LOG_TASK_START_TS(_task) \
|
#define SCH_LOG_TASK_START_TS(_task) \
|
||||||
do { \
|
do { \
|
||||||
int64_t us = taosGetTimestampUs(); \
|
int64_t us = taosGetTimestampUs(); \
|
||||||
if (NULL == taosArrayPush((_task)->profile.execTime, &us)) { \
|
if (NULL == taosArrayPush((_task)->profile.execTime, &us)) { \
|
||||||
qError("taosArrayPush task execTime failed, error:%s", tstrerror(terrno)); \
|
qError("taosArrayPush task execTime failed, error:%s", tstrerror(terrno)); \
|
||||||
} \
|
} \
|
||||||
if (0 == (_task)->execId) { \
|
if (0 == (_task)->execId) { \
|
||||||
(_task)->profile.startTs = us; \
|
(_task)->profile.startTs = us; \
|
||||||
} \
|
} \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#define SCH_LOG_TASK_WAIT_TS(_task) \
|
#define SCH_LOG_TASK_WAIT_TS(_task) \
|
||||||
|
@ -450,23 +451,23 @@ extern SSchedulerMgmt schMgmt;
|
||||||
(_task)->profile.endTs = us; \
|
(_task)->profile.endTs = us; \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#define SCH_JOB_ELOG(param, ...) qError("qid:0x%" PRIx64 " " param, pJob->queryId, __VA_ARGS__)
|
#define SCH_JOB_ELOG(param, ...) qError("QID:0x%" PRIx64 " " param, pJob->queryId, __VA_ARGS__)
|
||||||
#define SCH_JOB_DLOG(param, ...) qDebug("qid:0x%" PRIx64 " " param, pJob->queryId, __VA_ARGS__)
|
#define SCH_JOB_DLOG(param, ...) qDebug("QID:0x%" PRIx64 " " param, pJob->queryId, __VA_ARGS__)
|
||||||
|
|
||||||
#define SCH_TASK_ELOG(param, ...) \
|
#define SCH_TASK_ELOG(param, ...) \
|
||||||
qError("qid:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, pJob->queryId, SCH_CLIENT_ID(pTask), \
|
qError("QID:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, pJob->queryId, SCH_CLIENT_ID(pTask), \
|
||||||
SCH_TASK_ID(pTask), SCH_TASK_EID(pTask), __VA_ARGS__)
|
SCH_TASK_ID(pTask), SCH_TASK_EID(pTask), __VA_ARGS__)
|
||||||
#define SCH_TASK_DLOG(param, ...) \
|
#define SCH_TASK_DLOG(param, ...) \
|
||||||
qDebug("qid:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, pJob->queryId, SCH_CLIENT_ID(pTask), \
|
qDebug("QID:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, pJob->queryId, SCH_CLIENT_ID(pTask), \
|
||||||
SCH_TASK_ID(pTask), SCH_TASK_EID(pTask), __VA_ARGS__)
|
SCH_TASK_ID(pTask), SCH_TASK_EID(pTask), __VA_ARGS__)
|
||||||
#define SCH_TASK_TLOG(param, ...) \
|
#define SCH_TASK_TLOG(param, ...) \
|
||||||
qTrace("qid:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, pJob->queryId, SCH_CLIENT_ID(pTask), \
|
qTrace("QID:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, pJob->queryId, SCH_CLIENT_ID(pTask), \
|
||||||
SCH_TASK_ID(pTask), SCH_TASK_EID(pTask), __VA_ARGS__)
|
SCH_TASK_ID(pTask), SCH_TASK_EID(pTask), __VA_ARGS__)
|
||||||
#define SCH_TASK_DLOGL(param, ...) \
|
#define SCH_TASK_DLOGL(param, ...) \
|
||||||
qDebugL("qid:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, pJob->queryId, SCH_CLIENT_ID(pTask), \
|
qDebugL("QID:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, pJob->queryId, SCH_CLIENT_ID(pTask), \
|
||||||
SCH_TASK_ID(pTask), SCH_TASK_EID(pTask), __VA_ARGS__)
|
SCH_TASK_ID(pTask), SCH_TASK_EID(pTask), __VA_ARGS__)
|
||||||
#define SCH_TASK_WLOG(param, ...) \
|
#define SCH_TASK_WLOG(param, ...) \
|
||||||
qWarn("qid:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, pJob->queryId, SCH_CLIENT_ID(pTask), \
|
qWarn("QID:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, pJob->queryId, SCH_CLIENT_ID(pTask), \
|
||||||
SCH_TASK_ID(pTask), SCH_TASK_EID(pTask), __VA_ARGS__)
|
SCH_TASK_ID(pTask), SCH_TASK_EID(pTask), __VA_ARGS__)
|
||||||
|
|
||||||
#define SCH_SET_ERRNO(_err) \
|
#define SCH_SET_ERRNO(_err) \
|
||||||
|
@ -580,7 +581,7 @@ int32_t schDelayLaunchTask(SSchJob *pJob, SSchTask *pTask);
|
||||||
int32_t schBuildAndSendMsg(SSchJob *job, SSchTask *task, SQueryNodeAddr *addr, int32_t msgType, void *param);
|
int32_t schBuildAndSendMsg(SSchJob *job, SSchTask *task, SQueryNodeAddr *addr, int32_t msgType, void *param);
|
||||||
int32_t schAcquireJob(int64_t refId, SSchJob **ppJob);
|
int32_t schAcquireJob(int64_t refId, SSchJob **ppJob);
|
||||||
int32_t schReleaseJob(int64_t refId);
|
int32_t schReleaseJob(int64_t refId);
|
||||||
int32_t schReleaseJobEx(int64_t refId, int32_t* released);
|
int32_t schReleaseJobEx(int64_t refId, int32_t *released);
|
||||||
void schFreeFlowCtrl(SSchJob *pJob);
|
void schFreeFlowCtrl(SSchJob *pJob);
|
||||||
int32_t schChkJobNeedFlowCtrl(SSchJob *pJob, SSchLevel *pLevel);
|
int32_t schChkJobNeedFlowCtrl(SSchJob *pJob, SSchLevel *pLevel);
|
||||||
int32_t schDecTaskFlowQuota(SSchJob *pJob, SSchTask *pTask);
|
int32_t schDecTaskFlowQuota(SSchJob *pJob, SSchTask *pTask);
|
||||||
|
@ -648,7 +649,7 @@ void schDropTaskInHashList(SSchJob *pJob, SHashObj *list);
|
||||||
int32_t schNotifyTaskInHashList(SSchJob *pJob, SHashObj *list, ETaskNotifyType type, SSchTask *pTask);
|
int32_t schNotifyTaskInHashList(SSchJob *pJob, SHashObj *list, ETaskNotifyType type, SSchTask *pTask);
|
||||||
int32_t schLaunchLevelTasks(SSchJob *pJob, SSchLevel *level);
|
int32_t schLaunchLevelTasks(SSchJob *pJob, SSchLevel *level);
|
||||||
void schGetTaskFromList(SHashObj *pTaskList, uint64_t taskId, SSchTask **pTask);
|
void schGetTaskFromList(SHashObj *pTaskList, uint64_t taskId, SSchTask **pTask);
|
||||||
int32_t schValidateSubplan(SSchJob *pJob, SSubplan* pSubplan, int32_t level, int32_t idx, int32_t taskNum);
|
int32_t schValidateSubplan(SSchJob *pJob, SSubplan *pSubplan, int32_t level, int32_t idx, int32_t taskNum);
|
||||||
int32_t schInitTask(SSchJob *pJob, SSchTask *pTask, SSubplan *pPlan, SSchLevel *pLevel);
|
int32_t schInitTask(SSchJob *pJob, SSchTask *pTask, SSubplan *pPlan, SSchLevel *pLevel);
|
||||||
int32_t schSwitchTaskCandidateAddr(SSchJob *pJob, SSchTask *pTask);
|
int32_t schSwitchTaskCandidateAddr(SSchJob *pJob, SSchTask *pTask);
|
||||||
void schDirectPostJobRes(SSchedulerReq *pReq, int32_t errCode);
|
void schDirectPostJobRes(SSchedulerReq *pReq, int32_t errCode);
|
||||||
|
|
|
@ -11,12 +11,12 @@ IF(NOT TD_DARWIN)
|
||||||
IF (TD_GRANT)
|
IF (TD_GRANT)
|
||||||
TARGET_LINK_LIBRARIES(
|
TARGET_LINK_LIBRARIES(
|
||||||
schedulerTest
|
schedulerTest
|
||||||
PUBLIC os util common catalog transport gtest qcom taos_static planner scheduler grant
|
PUBLIC os util common catalog transport gtest qcom ${TAOS_LIB_STATIC} planner scheduler grant
|
||||||
)
|
)
|
||||||
ELSE ()
|
ELSE ()
|
||||||
TARGET_LINK_LIBRARIES(
|
TARGET_LINK_LIBRARIES(
|
||||||
schedulerTest
|
schedulerTest
|
||||||
PUBLIC os util common catalog transport gtest qcom taos_static planner scheduler
|
PUBLIC os util common catalog transport gtest qcom ${TAOS_LIB_STATIC} planner scheduler
|
||||||
)
|
)
|
||||||
ENDIF()
|
ENDIF()
|
||||||
|
|
||||||
|
|
|
@ -385,6 +385,10 @@ int32_t streamProcessCheckpointTriggerBlock(SStreamTask* pTask, SStreamDataBlock
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if 0
|
||||||
|
taosMsleep(20*1000);
|
||||||
|
#endif
|
||||||
|
|
||||||
if (taskLevel == TASK_LEVEL__SOURCE) {
|
if (taskLevel == TASK_LEVEL__SOURCE) {
|
||||||
int8_t type = pTask->outputInfo.type;
|
int8_t type = pTask->outputInfo.type;
|
||||||
pActiveInfo->allUpstreamTriggerRecv = 1;
|
pActiveInfo->allUpstreamTriggerRecv = 1;
|
||||||
|
|
|
@ -1170,6 +1170,7 @@ int32_t streamTaskSendCheckpointReadyMsg(SStreamTask* pTask) {
|
||||||
if (taosArrayGetSize(pTask->upstreamInfo.pList) != num) {
|
if (taosArrayGetSize(pTask->upstreamInfo.pList) != num) {
|
||||||
stError("s-task:%s invalid number of sent readyMsg:%d to upstream:%d", id, num,
|
stError("s-task:%s invalid number of sent readyMsg:%d to upstream:%d", id, num,
|
||||||
(int32_t)taosArrayGetSize(pTask->upstreamInfo.pList));
|
(int32_t)taosArrayGetSize(pTask->upstreamInfo.pList));
|
||||||
|
streamMutexUnlock(&pActiveInfo->lock);
|
||||||
return TSDB_CODE_STREAM_INTERNAL_ERROR;
|
return TSDB_CODE_STREAM_INTERNAL_ERROR;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1412,6 +1413,7 @@ int32_t streamAddCheckpointSourceRspMsg(SStreamCheckpointSourceReq* pReq, SRpcHa
|
||||||
if (size > 0) {
|
if (size > 0) {
|
||||||
STaskCheckpointReadyInfo* pReady = taosArrayGet(pActiveInfo->pReadyMsgList, 0);
|
STaskCheckpointReadyInfo* pReady = taosArrayGet(pActiveInfo->pReadyMsgList, 0);
|
||||||
if (pReady == NULL) {
|
if (pReady == NULL) {
|
||||||
|
streamMutexUnlock(&pActiveInfo->lock);
|
||||||
return terrno;
|
return terrno;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -433,6 +433,7 @@ int32_t streamMetaStopAllTasks(SStreamMeta* pMeta) {
|
||||||
// send hb msg to mnode before closing all tasks.
|
// send hb msg to mnode before closing all tasks.
|
||||||
int32_t code = streamMetaSendMsgBeforeCloseTasks(pMeta, &pTaskList);
|
int32_t code = streamMetaSendMsgBeforeCloseTasks(pMeta, &pTaskList);
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
streamMetaRUnLock(pMeta);
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -445,6 +445,11 @@ int32_t updateInfoSerialize(SEncoder* pEncoder, const SUpdateInfo* pInfo) {
|
||||||
int32_t code = TSDB_CODE_SUCCESS;
|
int32_t code = TSDB_CODE_SUCCESS;
|
||||||
int32_t lino = 0;
|
int32_t lino = 0;
|
||||||
if (!pInfo) {
|
if (!pInfo) {
|
||||||
|
if (tEncodeI32(pEncoder, -1) < 0) {
|
||||||
|
code = TSDB_CODE_FAILED;
|
||||||
|
QUERY_CHECK_CODE(code, lino, _end);
|
||||||
|
}
|
||||||
|
uDebug("%s line:%d. it did not have updateinfo", __func__, __LINE__);
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -550,6 +555,10 @@ int32_t updateInfoDeserialize(SDecoder* pDeCoder, SUpdateInfo* pInfo) {
|
||||||
|
|
||||||
int32_t size = 0;
|
int32_t size = 0;
|
||||||
if (tDecodeI32(pDeCoder, &size) < 0) return -1;
|
if (tDecodeI32(pDeCoder, &size) < 0) return -1;
|
||||||
|
|
||||||
|
if (size < 0) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
pInfo->pTsBuckets = taosArrayInit(size, sizeof(TSKEY));
|
pInfo->pTsBuckets = taosArrayInit(size, sizeof(TSKEY));
|
||||||
QUERY_CHECK_NULL(pInfo->pTsBuckets, code, lino, _error, terrno);
|
QUERY_CHECK_NULL(pInfo->pTsBuckets, code, lino, _error, terrno);
|
||||||
|
|
||||||
|
|
|
@ -29,6 +29,7 @@ typedef struct SSyncIndexMgr {
|
||||||
SyncTerm privateTerm[TSDB_MAX_REPLICA + TSDB_MAX_LEARNER_REPLICA]; // for advanced function
|
SyncTerm privateTerm[TSDB_MAX_REPLICA + TSDB_MAX_LEARNER_REPLICA]; // for advanced function
|
||||||
int64_t startTimeArr[TSDB_MAX_REPLICA + TSDB_MAX_LEARNER_REPLICA];
|
int64_t startTimeArr[TSDB_MAX_REPLICA + TSDB_MAX_LEARNER_REPLICA];
|
||||||
int64_t recvTimeArr[TSDB_MAX_REPLICA + TSDB_MAX_LEARNER_REPLICA];
|
int64_t recvTimeArr[TSDB_MAX_REPLICA + TSDB_MAX_LEARNER_REPLICA];
|
||||||
|
int64_t sentTimeArr[TSDB_MAX_REPLICA + TSDB_MAX_LEARNER_REPLICA];
|
||||||
int32_t replicaNum;
|
int32_t replicaNum;
|
||||||
int32_t totalReplicaNum;
|
int32_t totalReplicaNum;
|
||||||
SSyncNode *pNode;
|
SSyncNode *pNode;
|
||||||
|
@ -45,7 +46,9 @@ void syncIndexMgrCopyIfExist(SSyncIndexMgr * pNewIndex, SSyncIndexMgr
|
||||||
void syncIndexMgrSetStartTime(SSyncIndexMgr *pIndexMgr, const SRaftId *pRaftId, int64_t startTime);
|
void syncIndexMgrSetStartTime(SSyncIndexMgr *pIndexMgr, const SRaftId *pRaftId, int64_t startTime);
|
||||||
int64_t syncIndexMgrGetStartTime(SSyncIndexMgr *pIndexMgr, const SRaftId *pRaftId);
|
int64_t syncIndexMgrGetStartTime(SSyncIndexMgr *pIndexMgr, const SRaftId *pRaftId);
|
||||||
void syncIndexMgrSetRecvTime(SSyncIndexMgr *pIndexMgr, const SRaftId *pRaftId, int64_t recvTime);
|
void syncIndexMgrSetRecvTime(SSyncIndexMgr *pIndexMgr, const SRaftId *pRaftId, int64_t recvTime);
|
||||||
|
void syncIndexMgrSetSentTime(SSyncIndexMgr *pIndexMgr, const SRaftId *pRaftId, int64_t sentTime);
|
||||||
int64_t syncIndexMgrGetRecvTime(SSyncIndexMgr *pIndexMgr, const SRaftId *pRaftId);
|
int64_t syncIndexMgrGetRecvTime(SSyncIndexMgr *pIndexMgr, const SRaftId *pRaftId);
|
||||||
|
int64_t syncIndexMgrGetSentTime(SSyncIndexMgr *pIndexMgr, const SRaftId *pRaftId);
|
||||||
void syncIndexMgrSetTerm(SSyncIndexMgr *pIndexMgr, const SRaftId *pRaftId, SyncTerm term);
|
void syncIndexMgrSetTerm(SSyncIndexMgr *pIndexMgr, const SRaftId *pRaftId, SyncTerm term);
|
||||||
SyncTerm syncIndexMgrGetTerm(SSyncIndexMgr *pIndexMgr, const SRaftId *pRaftId);
|
SyncTerm syncIndexMgrGetTerm(SSyncIndexMgr *pIndexMgr, const SRaftId *pRaftId);
|
||||||
|
|
||||||
|
|
|
@ -46,12 +46,12 @@ extern "C" {
|
||||||
#define sLDebug(...) if (sDebugFlag & DEBUG_DEBUG) { taosPrintLongString("SYN ", DEBUG_DEBUG, sDebugFlag, __VA_ARGS__); }
|
#define sLDebug(...) if (sDebugFlag & DEBUG_DEBUG) { taosPrintLongString("SYN ", DEBUG_DEBUG, sDebugFlag, __VA_ARGS__); }
|
||||||
#define sLTrace(...) if (sDebugFlag & DEBUG_TRACE) { taosPrintLongString("SYN ", DEBUG_TRACE, sDebugFlag, __VA_ARGS__); }
|
#define sLTrace(...) if (sDebugFlag & DEBUG_TRACE) { taosPrintLongString("SYN ", DEBUG_TRACE, sDebugFlag, __VA_ARGS__); }
|
||||||
|
|
||||||
#define sNFatal(pNode, ...) if (sDebugFlag & DEBUG_FATAL) { syncPrintNodeLog("SYN FATAL ", DEBUG_FATAL, 255, pNode, __VA_ARGS__); }
|
#define sNFatal(pNode, ...) if (sDebugFlag & DEBUG_FATAL) { syncPrintNodeLog("SYN FATAL ", DEBUG_FATAL, 255, true, pNode, __VA_ARGS__); }
|
||||||
#define sNError(pNode, ...) if (sDebugFlag & DEBUG_ERROR) { syncPrintNodeLog("SYN ERROR ", DEBUG_ERROR, 255, pNode, __VA_ARGS__); }
|
#define sNError(pNode, ...) if (sDebugFlag & DEBUG_ERROR) { syncPrintNodeLog("SYN ERROR ", DEBUG_ERROR, 255, true, pNode, __VA_ARGS__); }
|
||||||
#define sNWarn(pNode, ...) if (sDebugFlag & DEBUG_WARN) { syncPrintNodeLog("SYN WARN ", DEBUG_WARN, 255, pNode, __VA_ARGS__); }
|
#define sNWarn(pNode, ...) if (sDebugFlag & DEBUG_WARN) { syncPrintNodeLog("SYN WARN ", DEBUG_WARN, 255, true, pNode, __VA_ARGS__); }
|
||||||
#define sNInfo(pNode, ...) if (sDebugFlag & DEBUG_INFO) { syncPrintNodeLog("SYN ", DEBUG_INFO, 255, pNode, __VA_ARGS__); }
|
#define sNInfo(pNode, ...) if (sDebugFlag & DEBUG_INFO) { syncPrintNodeLog("SYN ", DEBUG_INFO, 255, true, pNode, __VA_ARGS__); }
|
||||||
#define sNDebug(pNode, ...) if (sDebugFlag & DEBUG_DEBUG) { syncPrintNodeLog("SYN ", DEBUG_DEBUG, sDebugFlag, pNode, __VA_ARGS__); }
|
#define sNDebug(pNode, ...) if (sDebugFlag & DEBUG_DEBUG) { syncPrintNodeLog("SYN ", DEBUG_DEBUG, sDebugFlag, false, pNode, __VA_ARGS__); }
|
||||||
#define sNTrace(pNode, ...) if (sDebugFlag & DEBUG_TRACE) { syncPrintNodeLog("SYN ", DEBUG_TRACE, sDebugFlag, pNode, __VA_ARGS__); }
|
#define sNTrace(pNode, ...) if (sDebugFlag & DEBUG_TRACE) { syncPrintNodeLog("SYN ", DEBUG_TRACE, sDebugFlag, false, pNode, __VA_ARGS__); }
|
||||||
|
|
||||||
#define sSFatal(pSender, ...) if (sDebugFlag & DEBUG_FATAL) { syncPrintSnapshotSenderLog("SYN FATAL ", DEBUG_FATAL, 255, pSender, __VA_ARGS__); }
|
#define sSFatal(pSender, ...) if (sDebugFlag & DEBUG_FATAL) { syncPrintSnapshotSenderLog("SYN FATAL ", DEBUG_FATAL, 255, pSender, __VA_ARGS__); }
|
||||||
#define sSError(pSender, ...) if (sDebugFlag & DEBUG_ERROR) { syncPrintSnapshotSenderLog("SYN ERROR ", DEBUG_ERROR, 255, pSender, __VA_ARGS__); }
|
#define sSError(pSender, ...) if (sDebugFlag & DEBUG_ERROR) { syncPrintSnapshotSenderLog("SYN ERROR ", DEBUG_ERROR, 255, pSender, __VA_ARGS__); }
|
||||||
|
@ -85,7 +85,8 @@ void syncUtilMsgHtoN(void* msg);
|
||||||
|
|
||||||
void syncUtilGenerateArbToken(int32_t nodeId, int32_t groupId, char* buf);
|
void syncUtilGenerateArbToken(int32_t nodeId, int32_t groupId, char* buf);
|
||||||
|
|
||||||
void syncPrintNodeLog(const char* flags, ELogLevel level, int32_t dflag, SSyncNode* pNode, const char* format, ...);
|
void syncPrintNodeLog(const char* flags, ELogLevel level, int32_t dflag, bool formatTime, SSyncNode* pNode,
|
||||||
|
const char* format, ...);
|
||||||
void syncPrintSnapshotSenderLog(const char* flags, ELogLevel level, int32_t dflag, SSyncSnapshotSender* pSender,
|
void syncPrintSnapshotSenderLog(const char* flags, ELogLevel level, int32_t dflag, SSyncSnapshotSender* pSender,
|
||||||
const char* format, ...);
|
const char* format, ...);
|
||||||
void syncPrintSnapshotReceiverLog(const char* flags, ELogLevel level, int32_t dflag, SSyncSnapshotReceiver* pReceiver,
|
void syncPrintSnapshotReceiverLog(const char* flags, ELogLevel level, int32_t dflag, SSyncSnapshotReceiver* pReceiver,
|
||||||
|
|
|
@ -155,6 +155,18 @@ void syncIndexMgrSetRecvTime(SSyncIndexMgr *pIndexMgr, const SRaftId *pRaftId, i
|
||||||
DID(pRaftId), CID(pRaftId));
|
DID(pRaftId), CID(pRaftId));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void syncIndexMgrSetSentTime(SSyncIndexMgr *pIndexMgr, const SRaftId *pRaftId, int64_t sentTime) {
|
||||||
|
for (int i = 0; i < pIndexMgr->totalReplicaNum; ++i) {
|
||||||
|
if (syncUtilSameId(&((*(pIndexMgr->replicas))[i]), pRaftId)) {
|
||||||
|
(pIndexMgr->sentTimeArr)[i] = sentTime;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sError("vgId:%d, indexmgr set sent-time:%" PRId64 " for dnode:%d cluster:%d failed", pIndexMgr->pNode->vgId, sentTime,
|
||||||
|
DID(pRaftId), CID(pRaftId));
|
||||||
|
}
|
||||||
|
|
||||||
int64_t syncIndexMgrGetRecvTime(SSyncIndexMgr *pIndexMgr, const SRaftId *pRaftId) {
|
int64_t syncIndexMgrGetRecvTime(SSyncIndexMgr *pIndexMgr, const SRaftId *pRaftId) {
|
||||||
for (int i = 0; i < pIndexMgr->totalReplicaNum; ++i) {
|
for (int i = 0; i < pIndexMgr->totalReplicaNum; ++i) {
|
||||||
if (syncUtilSameId(&((*(pIndexMgr->replicas))[i]), pRaftId)) {
|
if (syncUtilSameId(&((*(pIndexMgr->replicas))[i]), pRaftId)) {
|
||||||
|
@ -168,6 +180,19 @@ int64_t syncIndexMgrGetRecvTime(SSyncIndexMgr *pIndexMgr, const SRaftId *pRaftId
|
||||||
return TSDB_CODE_SYN_INVALID_ID;
|
return TSDB_CODE_SYN_INVALID_ID;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int64_t syncIndexMgrGetSentTime(SSyncIndexMgr *pIndexMgr, const SRaftId *pRaftId) {
|
||||||
|
for (int i = 0; i < pIndexMgr->totalReplicaNum; ++i) {
|
||||||
|
if (syncUtilSameId(&((*(pIndexMgr->replicas))[i]), pRaftId)) {
|
||||||
|
int64_t recvTime = (pIndexMgr->sentTimeArr)[i];
|
||||||
|
return recvTime;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sError("vgId:%d, indexmgr get sent-time from dnode:%d cluster:%d failed", pIndexMgr->pNode->vgId, DID(pRaftId),
|
||||||
|
CID(pRaftId));
|
||||||
|
return TSDB_CODE_SYN_INVALID_ID;
|
||||||
|
}
|
||||||
|
|
||||||
void syncIndexMgrSetTerm(SSyncIndexMgr *pIndexMgr, const SRaftId *pRaftId, SyncTerm term) {
|
void syncIndexMgrSetTerm(SSyncIndexMgr *pIndexMgr, const SRaftId *pRaftId, SyncTerm term) {
|
||||||
for (int i = 0; i < pIndexMgr->totalReplicaNum; ++i) {
|
for (int i = 0; i < pIndexMgr->totalReplicaNum; ++i) {
|
||||||
if (syncUtilSameId(&((*(pIndexMgr->replicas))[i]), pRaftId)) {
|
if (syncUtilSameId(&((*(pIndexMgr->replicas))[i]), pRaftId)) {
|
||||||
|
|
|
@ -108,7 +108,13 @@ int32_t syncNodeSendAppendEntries(SSyncNode* pSyncNode, const SRaftId* destRaftI
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t syncNodeSendHeartbeat(SSyncNode* pSyncNode, const SRaftId* destId, SRpcMsg* pMsg) {
|
int32_t syncNodeSendHeartbeat(SSyncNode* pSyncNode, const SRaftId* destId, SRpcMsg* pMsg) {
|
||||||
return syncNodeSendMsgById(destId, pSyncNode, pMsg);
|
SRaftId destIdTmp = *destId;
|
||||||
|
TAOS_CHECK_RETURN(syncNodeSendMsgById(destId, pSyncNode, pMsg));
|
||||||
|
|
||||||
|
int64_t tsMs = taosGetTimestampMs();
|
||||||
|
syncIndexMgrSetSentTime(pSyncNode->pMatchIndex, &destIdTmp, tsMs);
|
||||||
|
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t syncNodeHeartbeatPeers(SSyncNode* pSyncNode) {
|
int32_t syncNodeHeartbeatPeers(SSyncNode* pSyncNode) {
|
||||||
|
|
|
@ -22,6 +22,7 @@
|
||||||
#include "syncRaftStore.h"
|
#include "syncRaftStore.h"
|
||||||
#include "syncSnapshot.h"
|
#include "syncSnapshot.h"
|
||||||
#include "tglobal.h"
|
#include "tglobal.h"
|
||||||
|
#include "ttime.h"
|
||||||
|
|
||||||
static void syncCfg2SimpleStr(const SSyncCfg* pCfg, char* buf, int32_t bufLen) {
|
static void syncCfg2SimpleStr(const SSyncCfg* pCfg, char* buf, int32_t bufLen) {
|
||||||
int32_t len = tsnprintf(buf, bufLen, "{num:%d, as:%d, [", pCfg->replicaNum, pCfg->myIndex);
|
int32_t len = tsnprintf(buf, bufLen, "{num:%d, as:%d, [", pCfg->replicaNum, pCfg->myIndex);
|
||||||
|
@ -108,13 +109,40 @@ void syncUtilGenerateArbToken(int32_t nodeId, int32_t groupId, char* buf) {
|
||||||
(void)snprintf(buf, TSDB_ARB_TOKEN_SIZE, "d%d#g%d#%" PRId64 "#%d", nodeId, groupId, currentMs, randVal);
|
(void)snprintf(buf, TSDB_ARB_TOKEN_SIZE, "d%d#g%d#%" PRId64 "#%d", nodeId, groupId, currentMs, randVal);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void syncPrintTime(bool formatTime, int32_t* len, int64_t tsMs, int32_t i, char* buf, int32_t bufLen) {
|
||||||
|
if (formatTime) {
|
||||||
|
char pBuf[TD_TIME_STR_LEN] = {0};
|
||||||
|
if (tsMs > 0) {
|
||||||
|
if (taosFormatUtcTime(pBuf, TD_TIME_STR_LEN, tsMs, TSDB_TIME_PRECISION_MILLI) != 0) {
|
||||||
|
pBuf[0] = '\0';
|
||||||
|
}
|
||||||
|
}
|
||||||
|
(*len) += tsnprintf(buf + (*len), bufLen - (*len), "%d:%s", i, pBuf);
|
||||||
|
} else {
|
||||||
|
(*len) += tsnprintf(buf + (*len), bufLen - (*len), "%d:%" PRId64, i, tsMs);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// for leader
|
// for leader
|
||||||
static void syncHearbeatReplyTime2Str(SSyncNode* pSyncNode, char* buf, int32_t bufLen) {
|
static void syncHearbeatReplyTime2Str(SSyncNode* pSyncNode, char* buf, int32_t bufLen, bool formatTime) {
|
||||||
int32_t len = 0;
|
int32_t len = 0;
|
||||||
len += tsnprintf(buf + len, bufLen - len, "%s", "{");
|
len += tsnprintf(buf + len, bufLen - len, "%s", "{");
|
||||||
for (int32_t i = 0; i < pSyncNode->replicaNum; ++i) {
|
for (int32_t i = 0; i < pSyncNode->replicaNum; ++i) {
|
||||||
int64_t tsMs = syncIndexMgrGetRecvTime(pSyncNode->pMatchIndex, &(pSyncNode->replicasId[i]));
|
int64_t tsMs = syncIndexMgrGetRecvTime(pSyncNode->pMatchIndex, &(pSyncNode->replicasId[i]));
|
||||||
len += tsnprintf(buf + len, bufLen - len, "%d:%" PRId64, i, tsMs);
|
syncPrintTime(formatTime, &len, tsMs, i, buf, bufLen);
|
||||||
|
if (i < pSyncNode->replicaNum - 1) {
|
||||||
|
len += tsnprintf(buf + len, bufLen - len, "%s", ",");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
len += tsnprintf(buf + len, bufLen - len, "%s", "}");
|
||||||
|
}
|
||||||
|
|
||||||
|
static void syncSentHearbeatTime2Str(SSyncNode* pSyncNode, char* buf, int32_t bufLen, bool formatTime) {
|
||||||
|
int32_t len = 0;
|
||||||
|
len += tsnprintf(buf + len, bufLen - len, "%s", "{");
|
||||||
|
for (int32_t i = 0; i < pSyncNode->replicaNum; ++i) {
|
||||||
|
int64_t tsMs = syncIndexMgrGetSentTime(pSyncNode->pMatchIndex, &(pSyncNode->replicasId[i]));
|
||||||
|
syncPrintTime(formatTime, &len, tsMs, i, buf, bufLen);
|
||||||
if (i < pSyncNode->replicaNum - 1) {
|
if (i < pSyncNode->replicaNum - 1) {
|
||||||
len += tsnprintf(buf + len, bufLen - len, "%s", ",");
|
len += tsnprintf(buf + len, bufLen - len, "%s", ",");
|
||||||
}
|
}
|
||||||
|
@ -123,12 +151,12 @@ static void syncHearbeatReplyTime2Str(SSyncNode* pSyncNode, char* buf, int32_t b
|
||||||
}
|
}
|
||||||
|
|
||||||
// for follower
|
// for follower
|
||||||
static void syncHearbeatTime2Str(SSyncNode* pSyncNode, char* buf, int32_t bufLen) {
|
static void syncHearbeatTime2Str(SSyncNode* pSyncNode, char* buf, int32_t bufLen, bool formatTime) {
|
||||||
int32_t len = 0;
|
int32_t len = 0;
|
||||||
len += tsnprintf(buf + len, bufLen - len, "%s", "{");
|
len += tsnprintf(buf + len, bufLen - len, "%s", "{");
|
||||||
for (int32_t i = 0; i < pSyncNode->replicaNum; ++i) {
|
for (int32_t i = 0; i < pSyncNode->replicaNum; ++i) {
|
||||||
int64_t tsMs = syncIndexMgrGetRecvTime(pSyncNode->pNextIndex, &(pSyncNode->replicasId[i]));
|
int64_t tsMs = syncIndexMgrGetRecvTime(pSyncNode->pNextIndex, &(pSyncNode->replicasId[i]));
|
||||||
len += tsnprintf(buf + len, bufLen - len, "%d:%" PRId64, i, tsMs);
|
syncPrintTime(formatTime, &len, tsMs, i, buf, bufLen);
|
||||||
if (i < pSyncNode->replicaNum - 1) {
|
if (i < pSyncNode->replicaNum - 1) {
|
||||||
len += tsnprintf(buf + len, bufLen - len, "%s", ",");
|
len += tsnprintf(buf + len, bufLen - len, "%s", ",");
|
||||||
}
|
}
|
||||||
|
@ -174,7 +202,8 @@ static void syncPeerState2Str(SSyncNode* pSyncNode, char* buf, int32_t bufLen) {
|
||||||
len += tsnprintf(buf + len, bufLen - len, "%s", "}");
|
len += tsnprintf(buf + len, bufLen - len, "%s", "}");
|
||||||
}
|
}
|
||||||
|
|
||||||
void syncPrintNodeLog(const char* flags, ELogLevel level, int32_t dflag, SSyncNode* pNode, const char* format, ...) {
|
void syncPrintNodeLog(const char* flags, ELogLevel level, int32_t dflag, bool formatTime, SSyncNode* pNode,
|
||||||
|
const char* format, ...) {
|
||||||
if (pNode == NULL || pNode->pLogStore == NULL) return;
|
if (pNode == NULL || pNode->pLogStore == NULL) return;
|
||||||
int64_t currentTerm = raftStoreGetTerm(pNode);
|
int64_t currentTerm = raftStoreGetTerm(pNode);
|
||||||
|
|
||||||
|
@ -206,10 +235,13 @@ void syncPrintNodeLog(const char* flags, ELogLevel level, int32_t dflag, SSyncNo
|
||||||
syncLogBufferStates2Str(pNode, bufferStatesStr, sizeof(bufferStatesStr));
|
syncLogBufferStates2Str(pNode, bufferStatesStr, sizeof(bufferStatesStr));
|
||||||
|
|
||||||
char hbrTimeStr[256] = "";
|
char hbrTimeStr[256] = "";
|
||||||
syncHearbeatReplyTime2Str(pNode, hbrTimeStr, sizeof(hbrTimeStr));
|
syncHearbeatReplyTime2Str(pNode, hbrTimeStr, sizeof(hbrTimeStr), formatTime);
|
||||||
|
|
||||||
char hbTimeStr[256] = "";
|
char hbTimeStr[256] = "";
|
||||||
syncHearbeatTime2Str(pNode, hbTimeStr, sizeof(hbTimeStr));
|
syncHearbeatTime2Str(pNode, hbTimeStr, sizeof(hbTimeStr), formatTime);
|
||||||
|
|
||||||
|
char sentHbTimeStr[512] = "";
|
||||||
|
syncSentHearbeatTime2Str(pNode, sentHbTimeStr, sizeof(sentHbTimeStr), formatTime);
|
||||||
|
|
||||||
char eventLog[512]; // {0};
|
char eventLog[512]; // {0};
|
||||||
va_list argpointer;
|
va_list argpointer;
|
||||||
|
@ -235,14 +267,14 @@ void syncPrintNodeLog(const char* flags, ELogLevel level, int32_t dflag, SSyncNo
|
||||||
", elect-times:%d, as-leader-times:%d, as-assigned-leader-times:%d, cfg-ch-times:%d, hb-slow:%d, hbr-slow:%d, "
|
", elect-times:%d, as-leader-times:%d, as-assigned-leader-times:%d, cfg-ch-times:%d, hb-slow:%d, hbr-slow:%d, "
|
||||||
"aq-items:%d, snaping:%" PRId64 ", replicas:%d, last-cfg:%" PRId64
|
"aq-items:%d, snaping:%" PRId64 ", replicas:%d, last-cfg:%" PRId64
|
||||||
", chging:%d, restore:%d, quorum:%d, elect-lc-timer:%" PRId64 ", hb:%" PRId64
|
", chging:%d, restore:%d, quorum:%d, elect-lc-timer:%" PRId64 ", hb:%" PRId64
|
||||||
", buffer:%s, repl-mgrs:%s, members:%s, hb:%s, hb-reply:%s, arb-token:%s, msg[sent:%d, recv:%d, slow-recev:%d]",
|
", buffer:%s, repl-mgrs:%s, members:%s, send hb:%s, recv hb:%s, recv hb-reply:%s, arb-token:%s, msg[sent:%d, recv:%d, slow-recev:%d]",
|
||||||
pNode->vgId, eventLog, syncStr(pNode->state), currentTerm, pNode->commitIndex, pNode->assignedCommitIndex,
|
pNode->vgId, eventLog, syncStr(pNode->state), currentTerm, pNode->commitIndex, pNode->assignedCommitIndex,
|
||||||
appliedIndex, logBeginIndex, logLastIndex, pNode->minMatchIndex, snapshot.lastApplyIndex,
|
appliedIndex, logBeginIndex, logLastIndex, pNode->minMatchIndex, snapshot.lastApplyIndex,
|
||||||
snapshot.lastApplyTerm, pNode->electNum, pNode->becomeLeaderNum, pNode->becomeAssignedLeaderNum,
|
snapshot.lastApplyTerm, pNode->electNum, pNode->becomeLeaderNum, pNode->becomeAssignedLeaderNum,
|
||||||
pNode->configChangeNum, pNode->hbSlowNum, pNode->hbrSlowNum, aqItems, pNode->snapshottingIndex,
|
pNode->configChangeNum, pNode->hbSlowNum, pNode->hbrSlowNum, aqItems, pNode->snapshottingIndex,
|
||||||
pNode->replicaNum, pNode->raftCfg.lastConfigIndex, pNode->changing, pNode->restoreFinish,
|
pNode->replicaNum, pNode->raftCfg.lastConfigIndex, pNode->changing, pNode->restoreFinish,
|
||||||
syncNodeDynamicQuorum(pNode), pNode->electTimerLogicClock, pNode->heartbeatTimerLogicClockUser, bufferStatesStr,
|
syncNodeDynamicQuorum(pNode), pNode->electTimerLogicClock, pNode->heartbeatTimerLogicClockUser, bufferStatesStr,
|
||||||
replMgrStatesStr, cfgStr, hbTimeStr, hbrTimeStr, pNode->arbToken, pNode->sendCount, pNode->recvCount,
|
replMgrStatesStr, cfgStr, sentHbTimeStr, hbTimeStr, hbrTimeStr, pNode->arbToken, pNode->sendCount, pNode->recvCount,
|
||||||
pNode->slowCount);
|
pNode->slowCount);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -96,7 +96,7 @@ typedef void* queue[2];
|
||||||
|
|
||||||
// #define TRANS_RETRY_COUNT_LIMIT 100 // retry count limit
|
// #define TRANS_RETRY_COUNT_LIMIT 100 // retry count limit
|
||||||
// #define TRANS_RETRY_INTERVAL 15 // retry interval (ms)
|
// #define TRANS_RETRY_INTERVAL 15 // retry interval (ms)
|
||||||
#define TRANS_CONN_TIMEOUT 3000 // connect timeout (ms)
|
#define TRANS_CONN_TIMEOUT 5000 // connect timeout (ms)
|
||||||
#define TRANS_READ_TIMEOUT 3000 // read timeout (ms)
|
#define TRANS_READ_TIMEOUT 3000 // read timeout (ms)
|
||||||
#define TRANS_PACKET_LIMIT 1024 * 1024 * 512
|
#define TRANS_PACKET_LIMIT 1024 * 1024 * 512
|
||||||
|
|
||||||
|
@ -452,6 +452,7 @@ void transPrintEpSet(SEpSet* pEpSet);
|
||||||
void transFreeMsg(void* msg);
|
void transFreeMsg(void* msg);
|
||||||
int32_t transCompressMsg(char* msg, int32_t len);
|
int32_t transCompressMsg(char* msg, int32_t len);
|
||||||
int32_t transDecompressMsg(char** msg, int32_t* len);
|
int32_t transDecompressMsg(char** msg, int32_t* len);
|
||||||
|
int32_t transDecompressMsgExt(char const* msg, int32_t len, char** out, int32_t* outLen);
|
||||||
|
|
||||||
int32_t transOpenRefMgt(int size, void (*func)(void*));
|
int32_t transOpenRefMgt(int size, void (*func)(void*));
|
||||||
void transCloseRefMgt(int32_t refMgt);
|
void transCloseRefMgt(int32_t refMgt);
|
||||||
|
|
|
@ -725,7 +725,8 @@ void cliConnTimeout(uv_timer_t* handle) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
tTrace("%s conn %p conn timeout", CONN_GET_INST_LABEL(conn), conn);
|
cliMayUpdateFqdnCache(pThrd->fqdn2ipCache, conn->dstAddr);
|
||||||
|
tTrace("%s conn %p failed to connect %s since conn timeout", CONN_GET_INST_LABEL(conn), conn, conn->dstAddr);
|
||||||
TAOS_UNUSED(transUnrefCliHandle(conn));
|
TAOS_UNUSED(transUnrefCliHandle(conn));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1334,13 +1335,31 @@ static void cliBatchSendCb(uv_write_t* req, int status) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
bool cliConnMayAddUserInfo(SCliConn* pConn, STransMsgHead** ppHead, int32_t* msgLen) {
|
bool cliConnMayAddUserInfo(SCliConn* pConn, STransMsgHead** ppHead, int32_t* msgLen) {
|
||||||
|
int32_t code = 0;
|
||||||
SCliThrd* pThrd = pConn->hostThrd;
|
SCliThrd* pThrd = pConn->hostThrd;
|
||||||
STrans* pInst = pThrd->pInst;
|
STrans* pInst = pThrd->pInst;
|
||||||
if (pConn->userInited == 1) {
|
if (pConn->userInited == 1) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
STransMsgHead* pHead = *ppHead;
|
STransMsgHead* pHead = *ppHead;
|
||||||
STransMsgHead* tHead = taosMemoryCalloc(1, *msgLen + sizeof(pInst->user));
|
int32_t len = *msgLen;
|
||||||
|
char* oriMsg = NULL;
|
||||||
|
int32_t oriLen = 0;
|
||||||
|
|
||||||
|
if (pHead->comp == 1) {
|
||||||
|
int32_t msgLen = htonl(pHead->msgLen);
|
||||||
|
code = transDecompressMsgExt((char*)(pHead), msgLen, &oriMsg, &oriLen);
|
||||||
|
if (code < 0) {
|
||||||
|
tError("failed to decompress since %s", tstrerror(code));
|
||||||
|
return false;
|
||||||
|
} else {
|
||||||
|
tDebug("decompress msg and resent, compress size %d, raw size %d", msgLen, oriLen);
|
||||||
|
}
|
||||||
|
|
||||||
|
pHead = (STransMsgHead*)oriMsg;
|
||||||
|
len = oriLen;
|
||||||
|
}
|
||||||
|
STransMsgHead* tHead = taosMemoryCalloc(1, len + sizeof(pInst->user));
|
||||||
if (tHead == NULL) {
|
if (tHead == NULL) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -1348,14 +1367,17 @@ bool cliConnMayAddUserInfo(SCliConn* pConn, STransMsgHead** ppHead, int32_t* msg
|
||||||
memcpy((char*)tHead + TRANS_MSG_OVERHEAD, pInst->user, sizeof(pInst->user));
|
memcpy((char*)tHead + TRANS_MSG_OVERHEAD, pInst->user, sizeof(pInst->user));
|
||||||
|
|
||||||
memcpy((char*)tHead + TRANS_MSG_OVERHEAD + sizeof(pInst->user), (char*)pHead + TRANS_MSG_OVERHEAD,
|
memcpy((char*)tHead + TRANS_MSG_OVERHEAD + sizeof(pInst->user), (char*)pHead + TRANS_MSG_OVERHEAD,
|
||||||
*msgLen - TRANS_MSG_OVERHEAD);
|
len - TRANS_MSG_OVERHEAD);
|
||||||
|
|
||||||
tHead->withUserInfo = 1;
|
tHead->withUserInfo = 1;
|
||||||
*ppHead = tHead;
|
*ppHead = tHead;
|
||||||
*msgLen += sizeof(pInst->user);
|
*msgLen = len + sizeof(pInst->user);
|
||||||
|
|
||||||
pConn->pInitUserReq = tHead;
|
pConn->pInitUserReq = tHead;
|
||||||
pConn->userInited = 1;
|
pConn->userInited = 1;
|
||||||
|
if (oriMsg != NULL) {
|
||||||
|
taosMemoryFree(oriMsg);
|
||||||
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
int32_t cliBatchSend(SCliConn* pConn, int8_t direct) {
|
int32_t cliBatchSend(SCliConn* pConn, int8_t direct) {
|
||||||
|
@ -1421,9 +1443,8 @@ int32_t cliBatchSend(SCliConn* pConn, int8_t direct) {
|
||||||
pReq->contLen = 0;
|
pReq->contLen = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t msgLen = transMsgLenFromCont(pReq->contLen);
|
|
||||||
|
|
||||||
STransMsgHead* pHead = transHeadFromCont(pReq->pCont);
|
STransMsgHead* pHead = transHeadFromCont(pReq->pCont);
|
||||||
|
int32_t msgLen = transMsgLenFromCont(pReq->contLen);
|
||||||
|
|
||||||
char* content = pReq->pCont;
|
char* content = pReq->pCont;
|
||||||
int32_t contLen = pReq->contLen;
|
int32_t contLen = pReq->contLen;
|
||||||
|
|
|
@ -77,6 +77,11 @@ int32_t transDecompressMsg(char** msg, int32_t* len) {
|
||||||
STransMsgHead* pNewHead = (STransMsgHead*)buf;
|
STransMsgHead* pNewHead = (STransMsgHead*)buf;
|
||||||
int32_t decompLen = LZ4_decompress_safe(pCont + sizeof(STransCompMsg), (char*)pNewHead->content,
|
int32_t decompLen = LZ4_decompress_safe(pCont + sizeof(STransCompMsg), (char*)pNewHead->content,
|
||||||
tlen - sizeof(STransMsgHead) - sizeof(STransCompMsg), oriLen);
|
tlen - sizeof(STransMsgHead) - sizeof(STransCompMsg), oriLen);
|
||||||
|
|
||||||
|
if (decompLen != oriLen) {
|
||||||
|
taosMemoryFree(buf);
|
||||||
|
return TSDB_CODE_INVALID_MSG;
|
||||||
|
}
|
||||||
memcpy((char*)pNewHead, (char*)pHead, sizeof(STransMsgHead));
|
memcpy((char*)pNewHead, (char*)pHead, sizeof(STransMsgHead));
|
||||||
|
|
||||||
*len = oriLen + sizeof(STransMsgHead);
|
*len = oriLen + sizeof(STransMsgHead);
|
||||||
|
@ -84,9 +89,36 @@ int32_t transDecompressMsg(char** msg, int32_t* len) {
|
||||||
|
|
||||||
taosMemoryFree(pHead);
|
taosMemoryFree(pHead);
|
||||||
*msg = buf;
|
*msg = buf;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
int32_t transDecompressMsgExt(char const* msg, int32_t len, char** out, int32_t* outLen) {
|
||||||
|
STransMsgHead* pHead = (STransMsgHead*)msg;
|
||||||
|
char* pCont = transContFromHead(pHead);
|
||||||
|
|
||||||
|
STransCompMsg* pComp = (STransCompMsg*)pCont;
|
||||||
|
int32_t oriLen = htonl(pComp->contLen);
|
||||||
|
|
||||||
|
int32_t tlen = len;
|
||||||
|
char* buf = taosMemoryCalloc(1, oriLen + sizeof(STransMsgHead));
|
||||||
|
if (buf == NULL) {
|
||||||
|
return terrno;
|
||||||
|
}
|
||||||
|
|
||||||
|
STransMsgHead* pNewHead = (STransMsgHead*)buf;
|
||||||
|
int32_t decompLen = LZ4_decompress_safe(pCont + sizeof(STransCompMsg), (char*)pNewHead->content,
|
||||||
|
tlen - sizeof(STransMsgHead) - sizeof(STransCompMsg), oriLen);
|
||||||
if (decompLen != oriLen) {
|
if (decompLen != oriLen) {
|
||||||
|
tError("msgLen:%d, originLen:%d, decompLen:%d", len, oriLen, decompLen);
|
||||||
|
taosMemoryFree(buf);
|
||||||
return TSDB_CODE_INVALID_MSG;
|
return TSDB_CODE_INVALID_MSG;
|
||||||
}
|
}
|
||||||
|
memcpy((char*)pNewHead, (char*)pHead, sizeof(STransMsgHead));
|
||||||
|
|
||||||
|
*out = buf;
|
||||||
|
*outLen = oriLen + sizeof(STransMsgHead);
|
||||||
|
pNewHead->msgLen = *outLen;
|
||||||
|
pNewHead->comp = 0;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -415,10 +415,10 @@ static void printFileSet(int32_t vgId, SArray* fileSet, const char* str) {
|
||||||
int32_t sz = taosArrayGetSize(fileSet);
|
int32_t sz = taosArrayGetSize(fileSet);
|
||||||
for (int32_t i = 0; i < sz; i++) {
|
for (int32_t i = 0; i < sz; i++) {
|
||||||
SWalFileInfo* pFileInfo = taosArrayGet(fileSet, i);
|
SWalFileInfo* pFileInfo = taosArrayGet(fileSet, i);
|
||||||
wInfo("vgId:%d, %s-%d, firstVer:%" PRId64 ", lastVer:%" PRId64 ", fileSize:%" PRId64 ", syncedOffset:%" PRId64
|
wTrace("vgId:%d, %s-%d, firstVer:%" PRId64 ", lastVer:%" PRId64 ", fileSize:%" PRId64 ", syncedOffset:%" PRId64
|
||||||
", createTs:%" PRId64 ", closeTs:%" PRId64,
|
", createTs:%" PRId64 ", closeTs:%" PRId64,
|
||||||
vgId, str, i, pFileInfo->firstVer, pFileInfo->lastVer, pFileInfo->fileSize, pFileInfo->syncedOffset,
|
vgId, str, i, pFileInfo->firstVer, pFileInfo->lastVer, pFileInfo->fileSize, pFileInfo->syncedOffset,
|
||||||
pFileInfo->createTs, pFileInfo->closeTs);
|
pFileInfo->createTs, pFileInfo->closeTs);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -376,6 +376,10 @@ static FORCE_INLINE int32_t walCheckAndRoll(SWal *pWal) {
|
||||||
int32_t walBeginSnapshot(SWal *pWal, int64_t ver, int64_t logRetention) {
|
int32_t walBeginSnapshot(SWal *pWal, int64_t ver, int64_t logRetention) {
|
||||||
int32_t code = 0;
|
int32_t code = 0;
|
||||||
|
|
||||||
|
if (pWal->cfg.level == TAOS_WAL_SKIP) {
|
||||||
|
TAOS_RETURN(TSDB_CODE_SUCCESS);
|
||||||
|
}
|
||||||
|
|
||||||
if (logRetention < 0) {
|
if (logRetention < 0) {
|
||||||
TAOS_RETURN(TSDB_CODE_FAILED);
|
TAOS_RETURN(TSDB_CODE_FAILED);
|
||||||
}
|
}
|
||||||
|
@ -404,6 +408,10 @@ _exit:
|
||||||
int32_t walEndSnapshot(SWal *pWal) {
|
int32_t walEndSnapshot(SWal *pWal) {
|
||||||
int32_t code = 0, lino = 0;
|
int32_t code = 0, lino = 0;
|
||||||
|
|
||||||
|
if (pWal->cfg.level == TAOS_WAL_SKIP) {
|
||||||
|
TAOS_RETURN(TSDB_CODE_SUCCESS);
|
||||||
|
}
|
||||||
|
|
||||||
TAOS_UNUSED(taosThreadRwlockWrlock(&pWal->mutex));
|
TAOS_UNUSED(taosThreadRwlockWrlock(&pWal->mutex));
|
||||||
int64_t ver = pWal->vers.verInSnapshotting;
|
int64_t ver = pWal->vers.verInSnapshotting;
|
||||||
|
|
||||||
|
|
|
@ -510,4 +510,27 @@ TEST_F(WalSkipLevel, restart) {
|
||||||
TearDown();
|
TearDown();
|
||||||
|
|
||||||
SetUp();
|
SetUp();
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(WalSkipLevel, roll) {
|
||||||
|
int code;
|
||||||
|
int i;
|
||||||
|
for (i = 0; i < 100; i++) {
|
||||||
|
code = walAppendLog(pWal, i, 0, syncMeta, (void*)ranStr, ranStrLen);
|
||||||
|
ASSERT_EQ(code, 0);
|
||||||
|
code = walCommit(pWal, i);
|
||||||
|
}
|
||||||
|
walBeginSnapshot(pWal, i - 1, 0);
|
||||||
|
walEndSnapshot(pWal);
|
||||||
|
code = walAppendLog(pWal, 5, 0, syncMeta, (void*)ranStr, ranStrLen);
|
||||||
|
ASSERT_NE(code, 0);
|
||||||
|
for (; i < 200; i++) {
|
||||||
|
code = walAppendLog(pWal, i, 0, syncMeta, (void*)ranStr, ranStrLen);
|
||||||
|
ASSERT_EQ(code, 0);
|
||||||
|
code = walCommit(pWal, i);
|
||||||
|
}
|
||||||
|
code = walBeginSnapshot(pWal, i - 1, 0);
|
||||||
|
ASSERT_EQ(code, 0);
|
||||||
|
code = walEndSnapshot(pWal);
|
||||||
|
ASSERT_EQ(code, 0);
|
||||||
}
|
}
|
|
@ -34,7 +34,7 @@ typedef struct {
|
||||||
} SCurlResp;
|
} SCurlResp;
|
||||||
|
|
||||||
static SAlgoMgmt tsAlgos = {0};
|
static SAlgoMgmt tsAlgos = {0};
|
||||||
static int32_t taosAnalBufGetCont(SAnalBuf *pBuf, char **ppCont, int64_t *pContLen);
|
static int32_t taosAnalBufGetCont(SAnalyticBuf *pBuf, char **ppCont, int64_t *pContLen);
|
||||||
|
|
||||||
const char *taosAnalAlgoStr(EAnalAlgoType type) {
|
const char *taosAnalAlgoStr(EAnalAlgoType type) {
|
||||||
switch (type) {
|
switch (type) {
|
||||||
|
@ -127,28 +127,44 @@ void taosAnalUpdate(int64_t newVer, SHashObj *pHash) {
|
||||||
}
|
}
|
||||||
|
|
||||||
bool taosAnalGetOptStr(const char *option, const char *optName, char *optValue, int32_t optMaxLen) {
|
bool taosAnalGetOptStr(const char *option, const char *optName, char *optValue, int32_t optMaxLen) {
|
||||||
char buf[TSDB_ANAL_ALGO_OPTION_LEN] = {0};
|
char buf[TSDB_ANALYTIC_ALGO_OPTION_LEN] = {0};
|
||||||
int32_t bufLen = tsnprintf(buf, sizeof(buf), "%s=", optName);
|
char *pStart = NULL;
|
||||||
|
char *pEnd = NULL;
|
||||||
|
|
||||||
char *pos1 = strstr(option, buf);
|
pStart = strstr(option, optName);
|
||||||
char *pos2 = strstr(option, ANAL_ALGO_SPLIT);
|
if (pStart == NULL) {
|
||||||
if (pos1 != NULL) {
|
|
||||||
if (optMaxLen > 0) {
|
|
||||||
int32_t copyLen = optMaxLen;
|
|
||||||
if (pos2 != NULL) {
|
|
||||||
copyLen = (int32_t)(pos2 - pos1 - strlen(optName));
|
|
||||||
copyLen = MIN(copyLen, optMaxLen);
|
|
||||||
}
|
|
||||||
tstrncpy(optValue, pos1 + bufLen, copyLen);
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
} else {
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pEnd = strstr(pStart, ANAL_ALGO_SPLIT);
|
||||||
|
if (optMaxLen > 0) {
|
||||||
|
if (pEnd > pStart) {
|
||||||
|
int32_t len = (int32_t)(pEnd - pStart);
|
||||||
|
len = MIN(len + 1, TSDB_ANALYTIC_ALGO_OPTION_LEN);
|
||||||
|
tstrncpy(buf, pStart, len);
|
||||||
|
} else {
|
||||||
|
int32_t len = MIN(tListLen(buf), strlen(pStart) + 1);
|
||||||
|
tstrncpy(buf, pStart, len);
|
||||||
|
}
|
||||||
|
|
||||||
|
char *pRight = strstr(buf, "=");
|
||||||
|
if (pRight == NULL) {
|
||||||
|
return false;
|
||||||
|
} else {
|
||||||
|
pRight += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t unused = strtrim(pRight);
|
||||||
|
|
||||||
|
int32_t vLen = MIN(optMaxLen, strlen(pRight) + 1);
|
||||||
|
tstrncpy(optValue, pRight, vLen);
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool taosAnalGetOptInt(const char *option, const char *optName, int64_t *optValue) {
|
bool taosAnalGetOptInt(const char *option, const char *optName, int64_t *optValue) {
|
||||||
char buf[TSDB_ANAL_ALGO_OPTION_LEN] = {0};
|
char buf[TSDB_ANALYTIC_ALGO_OPTION_LEN] = {0};
|
||||||
int32_t bufLen = tsnprintf(buf, sizeof(buf), "%s=", optName);
|
int32_t bufLen = tsnprintf(buf, sizeof(buf), "%s=", optName);
|
||||||
|
|
||||||
char *pos1 = strstr(option, buf);
|
char *pos1 = strstr(option, buf);
|
||||||
|
@ -163,7 +179,7 @@ bool taosAnalGetOptInt(const char *option, const char *optName, int64_t *optValu
|
||||||
|
|
||||||
int32_t taosAnalGetAlgoUrl(const char *algoName, EAnalAlgoType type, char *url, int32_t urlLen) {
|
int32_t taosAnalGetAlgoUrl(const char *algoName, EAnalAlgoType type, char *url, int32_t urlLen) {
|
||||||
int32_t code = 0;
|
int32_t code = 0;
|
||||||
char name[TSDB_ANAL_ALGO_KEY_LEN] = {0};
|
char name[TSDB_ANALYTIC_ALGO_KEY_LEN] = {0};
|
||||||
int32_t nameLen = 1 + tsnprintf(name, sizeof(name) - 1, "%d:%s", type, algoName);
|
int32_t nameLen = 1 + tsnprintf(name, sizeof(name) - 1, "%d:%s", type, algoName);
|
||||||
|
|
||||||
char *unused = strntolower(name, name, nameLen);
|
char *unused = strntolower(name, name, nameLen);
|
||||||
|
@ -175,7 +191,7 @@ int32_t taosAnalGetAlgoUrl(const char *algoName, EAnalAlgoType type, char *url,
|
||||||
uDebug("algo:%s, type:%s, url:%s", algoName, taosAnalAlgoStr(type), url);
|
uDebug("algo:%s, type:%s, url:%s", algoName, taosAnalAlgoStr(type), url);
|
||||||
} else {
|
} else {
|
||||||
url[0] = 0;
|
url[0] = 0;
|
||||||
terrno = TSDB_CODE_ANAL_ALGO_NOT_FOUND;
|
terrno = TSDB_CODE_ANA_ALGO_NOT_FOUND;
|
||||||
code = terrno;
|
code = terrno;
|
||||||
uError("algo:%s, type:%s, url not found", algoName, taosAnalAlgoStr(type));
|
uError("algo:%s, type:%s, url not found", algoName, taosAnalAlgoStr(type));
|
||||||
}
|
}
|
||||||
|
@ -276,16 +292,16 @@ _OVER:
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
SJson *taosAnalSendReqRetJson(const char *url, EAnalHttpType type, SAnalBuf *pBuf) {
|
SJson *taosAnalSendReqRetJson(const char *url, EAnalHttpType type, SAnalyticBuf *pBuf) {
|
||||||
int32_t code = -1;
|
int32_t code = -1;
|
||||||
char *pCont = NULL;
|
char *pCont = NULL;
|
||||||
int64_t contentLen;
|
int64_t contentLen;
|
||||||
SJson *pJson = NULL;
|
SJson *pJson = NULL;
|
||||||
SCurlResp curlRsp = {0};
|
SCurlResp curlRsp = {0};
|
||||||
|
|
||||||
if (type == ANAL_HTTP_TYPE_GET) {
|
if (type == ANALYTICS_HTTP_TYPE_GET) {
|
||||||
if (taosCurlGetRequest(url, &curlRsp) != 0) {
|
if (taosCurlGetRequest(url, &curlRsp) != 0) {
|
||||||
terrno = TSDB_CODE_ANAL_URL_CANT_ACCESS;
|
terrno = TSDB_CODE_ANA_URL_CANT_ACCESS;
|
||||||
goto _OVER;
|
goto _OVER;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -295,20 +311,20 @@ SJson *taosAnalSendReqRetJson(const char *url, EAnalHttpType type, SAnalBuf *pBu
|
||||||
goto _OVER;
|
goto _OVER;
|
||||||
}
|
}
|
||||||
if (taosCurlPostRequest(url, &curlRsp, pCont, contentLen) != 0) {
|
if (taosCurlPostRequest(url, &curlRsp, pCont, contentLen) != 0) {
|
||||||
terrno = TSDB_CODE_ANAL_URL_CANT_ACCESS;
|
terrno = TSDB_CODE_ANA_URL_CANT_ACCESS;
|
||||||
goto _OVER;
|
goto _OVER;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (curlRsp.data == NULL || curlRsp.dataLen == 0) {
|
if (curlRsp.data == NULL || curlRsp.dataLen == 0) {
|
||||||
terrno = TSDB_CODE_ANAL_URL_RSP_IS_NULL;
|
terrno = TSDB_CODE_ANA_URL_RSP_IS_NULL;
|
||||||
goto _OVER;
|
goto _OVER;
|
||||||
}
|
}
|
||||||
|
|
||||||
pJson = tjsonParse(curlRsp.data);
|
pJson = tjsonParse(curlRsp.data);
|
||||||
if (pJson == NULL) {
|
if (pJson == NULL) {
|
||||||
if (curlRsp.data[0] == '<') {
|
if (curlRsp.data[0] == '<') {
|
||||||
terrno = TSDB_CODE_ANAL_ANODE_RETURN_ERROR;
|
terrno = TSDB_CODE_ANA_ANODE_RETURN_ERROR;
|
||||||
} else {
|
} else {
|
||||||
terrno = TSDB_CODE_INVALID_JSON_FORMAT;
|
terrno = TSDB_CODE_INVALID_JSON_FORMAT;
|
||||||
}
|
}
|
||||||
|
@ -360,7 +376,7 @@ _OVER:
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t taosAnalJsonBufWriteOptInt(SAnalBuf *pBuf, const char *optName, int64_t optVal) {
|
static int32_t taosAnalJsonBufWriteOptInt(SAnalyticBuf *pBuf, const char *optName, int64_t optVal) {
|
||||||
char buf[64] = {0};
|
char buf[64] = {0};
|
||||||
int32_t bufLen = tsnprintf(buf, sizeof(buf), "\"%s\": %" PRId64 ",\n", optName, optVal);
|
int32_t bufLen = tsnprintf(buf, sizeof(buf), "\"%s\": %" PRId64 ",\n", optName, optVal);
|
||||||
if (taosWriteFile(pBuf->filePtr, buf, bufLen) != bufLen) {
|
if (taosWriteFile(pBuf->filePtr, buf, bufLen) != bufLen) {
|
||||||
|
@ -369,7 +385,7 @@ static int32_t taosAnalJsonBufWriteOptInt(SAnalBuf *pBuf, const char *optName, i
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t taosAnalJsonBufWriteOptStr(SAnalBuf *pBuf, const char *optName, const char *optVal) {
|
static int32_t taosAnalJsonBufWriteOptStr(SAnalyticBuf *pBuf, const char *optName, const char *optVal) {
|
||||||
char buf[128] = {0};
|
char buf[128] = {0};
|
||||||
int32_t bufLen = tsnprintf(buf, sizeof(buf), "\"%s\": \"%s\",\n", optName, optVal);
|
int32_t bufLen = tsnprintf(buf, sizeof(buf), "\"%s\": \"%s\",\n", optName, optVal);
|
||||||
if (taosWriteFile(pBuf->filePtr, buf, bufLen) != bufLen) {
|
if (taosWriteFile(pBuf->filePtr, buf, bufLen) != bufLen) {
|
||||||
|
@ -378,7 +394,7 @@ static int32_t taosAnalJsonBufWriteOptStr(SAnalBuf *pBuf, const char *optName, c
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t taosAnalJsonBufWriteOptFloat(SAnalBuf *pBuf, const char *optName, float optVal) {
|
static int32_t taosAnalJsonBufWriteOptFloat(SAnalyticBuf *pBuf, const char *optName, float optVal) {
|
||||||
char buf[128] = {0};
|
char buf[128] = {0};
|
||||||
int32_t bufLen = tsnprintf(buf, sizeof(buf), "\"%s\": %f,\n", optName, optVal);
|
int32_t bufLen = tsnprintf(buf, sizeof(buf), "\"%s\": %f,\n", optName, optVal);
|
||||||
if (taosWriteFile(pBuf->filePtr, buf, bufLen) != bufLen) {
|
if (taosWriteFile(pBuf->filePtr, buf, bufLen) != bufLen) {
|
||||||
|
@ -387,7 +403,7 @@ static int32_t taosAnalJsonBufWriteOptFloat(SAnalBuf *pBuf, const char *optName,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t taosAnalJsonBufWriteStr(SAnalBuf *pBuf, const char *buf, int32_t bufLen) {
|
static int32_t taosAnalJsonBufWriteStr(SAnalyticBuf *pBuf, const char *buf, int32_t bufLen) {
|
||||||
if (bufLen <= 0) {
|
if (bufLen <= 0) {
|
||||||
bufLen = strlen(buf);
|
bufLen = strlen(buf);
|
||||||
}
|
}
|
||||||
|
@ -397,9 +413,9 @@ static int32_t taosAnalJsonBufWriteStr(SAnalBuf *pBuf, const char *buf, int32_t
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t taosAnalJsonBufWriteStart(SAnalBuf *pBuf) { return taosAnalJsonBufWriteStr(pBuf, "{\n", 0); }
|
static int32_t taosAnalJsonBufWriteStart(SAnalyticBuf *pBuf) { return taosAnalJsonBufWriteStr(pBuf, "{\n", 0); }
|
||||||
|
|
||||||
static int32_t tsosAnalJsonBufOpen(SAnalBuf *pBuf, int32_t numOfCols) {
|
static int32_t tsosAnalJsonBufOpen(SAnalyticBuf *pBuf, int32_t numOfCols) {
|
||||||
pBuf->filePtr = taosOpenFile(pBuf->fileName, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC | TD_FILE_WRITE_THROUGH);
|
pBuf->filePtr = taosOpenFile(pBuf->fileName, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC | TD_FILE_WRITE_THROUGH);
|
||||||
if (pBuf->filePtr == NULL) {
|
if (pBuf->filePtr == NULL) {
|
||||||
return terrno;
|
return terrno;
|
||||||
|
@ -409,7 +425,7 @@ static int32_t tsosAnalJsonBufOpen(SAnalBuf *pBuf, int32_t numOfCols) {
|
||||||
if (pBuf->pCols == NULL) return TSDB_CODE_OUT_OF_MEMORY;
|
if (pBuf->pCols == NULL) return TSDB_CODE_OUT_OF_MEMORY;
|
||||||
pBuf->numOfCols = numOfCols;
|
pBuf->numOfCols = numOfCols;
|
||||||
|
|
||||||
if (pBuf->bufType == ANAL_BUF_TYPE_JSON) {
|
if (pBuf->bufType == ANALYTICS_BUF_TYPE_JSON) {
|
||||||
return taosAnalJsonBufWriteStart(pBuf);
|
return taosAnalJsonBufWriteStart(pBuf);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -426,7 +442,7 @@ static int32_t tsosAnalJsonBufOpen(SAnalBuf *pBuf, int32_t numOfCols) {
|
||||||
return taosAnalJsonBufWriteStart(pBuf);
|
return taosAnalJsonBufWriteStart(pBuf);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t taosAnalJsonBufWriteColMeta(SAnalBuf *pBuf, int32_t colIndex, int32_t colType, const char *colName) {
|
static int32_t taosAnalJsonBufWriteColMeta(SAnalyticBuf *pBuf, int32_t colIndex, int32_t colType, const char *colName) {
|
||||||
char buf[128] = {0};
|
char buf[128] = {0};
|
||||||
bool first = (colIndex == 0);
|
bool first = (colIndex == 0);
|
||||||
bool last = (colIndex == pBuf->numOfCols - 1);
|
bool last = (colIndex == pBuf->numOfCols - 1);
|
||||||
|
@ -452,16 +468,16 @@ static int32_t taosAnalJsonBufWriteColMeta(SAnalBuf *pBuf, int32_t colIndex, int
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t taosAnalJsonBufWriteDataBegin(SAnalBuf *pBuf) {
|
static int32_t taosAnalJsonBufWriteDataBegin(SAnalyticBuf *pBuf) {
|
||||||
return taosAnalJsonBufWriteStr(pBuf, "\"data\": [\n", 0);
|
return taosAnalJsonBufWriteStr(pBuf, "\"data\": [\n", 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t taosAnalJsonBufWriteStrUseCol(SAnalBuf *pBuf, const char *buf, int32_t bufLen, int32_t colIndex) {
|
static int32_t taosAnalJsonBufWriteStrUseCol(SAnalyticBuf *pBuf, const char *buf, int32_t bufLen, int32_t colIndex) {
|
||||||
if (bufLen <= 0) {
|
if (bufLen <= 0) {
|
||||||
bufLen = strlen(buf);
|
bufLen = strlen(buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pBuf->bufType == ANAL_BUF_TYPE_JSON) {
|
if (pBuf->bufType == ANALYTICS_BUF_TYPE_JSON) {
|
||||||
if (taosWriteFile(pBuf->filePtr, buf, bufLen) != bufLen) {
|
if (taosWriteFile(pBuf->filePtr, buf, bufLen) != bufLen) {
|
||||||
return terrno;
|
return terrno;
|
||||||
}
|
}
|
||||||
|
@ -474,11 +490,11 @@ static int32_t taosAnalJsonBufWriteStrUseCol(SAnalBuf *pBuf, const char *buf, in
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t taosAnalJsonBufWriteColBegin(SAnalBuf *pBuf, int32_t colIndex) {
|
static int32_t taosAnalJsonBufWriteColBegin(SAnalyticBuf *pBuf, int32_t colIndex) {
|
||||||
return taosAnalJsonBufWriteStrUseCol(pBuf, "[\n", 0, colIndex);
|
return taosAnalJsonBufWriteStrUseCol(pBuf, "[\n", 0, colIndex);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t taosAnalJsonBufWriteColEnd(SAnalBuf *pBuf, int32_t colIndex) {
|
static int32_t taosAnalJsonBufWriteColEnd(SAnalyticBuf *pBuf, int32_t colIndex) {
|
||||||
if (colIndex == pBuf->numOfCols - 1) {
|
if (colIndex == pBuf->numOfCols - 1) {
|
||||||
return taosAnalJsonBufWriteStrUseCol(pBuf, "\n]\n", 0, colIndex);
|
return taosAnalJsonBufWriteStrUseCol(pBuf, "\n]\n", 0, colIndex);
|
||||||
|
|
||||||
|
@ -487,7 +503,7 @@ static int32_t taosAnalJsonBufWriteColEnd(SAnalBuf *pBuf, int32_t colIndex) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t taosAnalJsonBufWriteColData(SAnalBuf *pBuf, int32_t colIndex, int32_t colType, void *colValue) {
|
static int32_t taosAnalJsonBufWriteColData(SAnalyticBuf *pBuf, int32_t colIndex, int32_t colType, void *colValue) {
|
||||||
char buf[64];
|
char buf[64];
|
||||||
int32_t bufLen = 0;
|
int32_t bufLen = 0;
|
||||||
|
|
||||||
|
@ -541,12 +557,12 @@ static int32_t taosAnalJsonBufWriteColData(SAnalBuf *pBuf, int32_t colIndex, int
|
||||||
return taosAnalJsonBufWriteStrUseCol(pBuf, buf, bufLen, colIndex);
|
return taosAnalJsonBufWriteStrUseCol(pBuf, buf, bufLen, colIndex);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t taosAnalJsonBufWriteDataEnd(SAnalBuf *pBuf) {
|
static int32_t taosAnalJsonBufWriteDataEnd(SAnalyticBuf *pBuf) {
|
||||||
int32_t code = 0;
|
int32_t code = 0;
|
||||||
char *pCont = NULL;
|
char *pCont = NULL;
|
||||||
int64_t contLen = 0;
|
int64_t contLen = 0;
|
||||||
|
|
||||||
if (pBuf->bufType == ANAL_BUF_TYPE_JSON_COL) {
|
if (pBuf->bufType == ANALYTICS_BUF_TYPE_JSON_COL) {
|
||||||
for (int32_t i = 0; i < pBuf->numOfCols; ++i) {
|
for (int32_t i = 0; i < pBuf->numOfCols; ++i) {
|
||||||
SAnalyticsColBuf *pCol = &pBuf->pCols[i];
|
SAnalyticsColBuf *pCol = &pBuf->pCols[i];
|
||||||
|
|
||||||
|
@ -570,14 +586,14 @@ static int32_t taosAnalJsonBufWriteDataEnd(SAnalBuf *pBuf) {
|
||||||
return taosAnalJsonBufWriteStr(pBuf, "],\n", 0);
|
return taosAnalJsonBufWriteStr(pBuf, "],\n", 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t taosAnalJsonBufWriteEnd(SAnalBuf *pBuf) {
|
static int32_t taosAnalJsonBufWriteEnd(SAnalyticBuf *pBuf) {
|
||||||
int32_t code = taosAnalJsonBufWriteOptInt(pBuf, "rows", pBuf->pCols[0].numOfRows);
|
int32_t code = taosAnalJsonBufWriteOptInt(pBuf, "rows", pBuf->pCols[0].numOfRows);
|
||||||
if (code != 0) return code;
|
if (code != 0) return code;
|
||||||
|
|
||||||
return taosAnalJsonBufWriteStr(pBuf, "\"protocol\": 1.0\n}", 0);
|
return taosAnalJsonBufWriteStr(pBuf, "\"protocol\": 1.0\n}", 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t taosAnalJsonBufClose(SAnalBuf *pBuf) {
|
int32_t taosAnalJsonBufClose(SAnalyticBuf *pBuf) {
|
||||||
int32_t code = taosAnalJsonBufWriteEnd(pBuf);
|
int32_t code = taosAnalJsonBufWriteEnd(pBuf);
|
||||||
if (code != 0) return code;
|
if (code != 0) return code;
|
||||||
|
|
||||||
|
@ -588,7 +604,7 @@ int32_t taosAnalJsonBufClose(SAnalBuf *pBuf) {
|
||||||
if (code != 0) return code;
|
if (code != 0) return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pBuf->bufType == ANAL_BUF_TYPE_JSON_COL) {
|
if (pBuf->bufType == ANALYTICS_BUF_TYPE_JSON_COL) {
|
||||||
for (int32_t i = 0; i < pBuf->numOfCols; ++i) {
|
for (int32_t i = 0; i < pBuf->numOfCols; ++i) {
|
||||||
SAnalyticsColBuf *pCol = &pBuf->pCols[i];
|
SAnalyticsColBuf *pCol = &pBuf->pCols[i];
|
||||||
if (pCol->filePtr != NULL) {
|
if (pCol->filePtr != NULL) {
|
||||||
|
@ -603,14 +619,14 @@ int32_t taosAnalJsonBufClose(SAnalBuf *pBuf) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void taosAnalBufDestroy(SAnalBuf *pBuf) {
|
void taosAnalBufDestroy(SAnalyticBuf *pBuf) {
|
||||||
if (pBuf->fileName[0] != 0) {
|
if (pBuf->fileName[0] != 0) {
|
||||||
if (pBuf->filePtr != NULL) (void)taosCloseFile(&pBuf->filePtr);
|
if (pBuf->filePtr != NULL) (void)taosCloseFile(&pBuf->filePtr);
|
||||||
// taosRemoveFile(pBuf->fileName);
|
// taosRemoveFile(pBuf->fileName);
|
||||||
pBuf->fileName[0] = 0;
|
pBuf->fileName[0] = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pBuf->bufType == ANAL_BUF_TYPE_JSON_COL) {
|
if (pBuf->bufType == ANALYTICS_BUF_TYPE_JSON_COL) {
|
||||||
for (int32_t i = 0; i < pBuf->numOfCols; ++i) {
|
for (int32_t i = 0; i < pBuf->numOfCols; ++i) {
|
||||||
SAnalyticsColBuf *pCol = &pBuf->pCols[i];
|
SAnalyticsColBuf *pCol = &pBuf->pCols[i];
|
||||||
if (pCol->fileName[0] != 0) {
|
if (pCol->fileName[0] != 0) {
|
||||||
|
@ -627,102 +643,102 @@ void taosAnalBufDestroy(SAnalBuf *pBuf) {
|
||||||
pBuf->numOfCols = 0;
|
pBuf->numOfCols = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t tsosAnalBufOpen(SAnalBuf *pBuf, int32_t numOfCols) {
|
int32_t tsosAnalBufOpen(SAnalyticBuf *pBuf, int32_t numOfCols) {
|
||||||
if (pBuf->bufType == ANAL_BUF_TYPE_JSON || pBuf->bufType == ANAL_BUF_TYPE_JSON_COL) {
|
if (pBuf->bufType == ANALYTICS_BUF_TYPE_JSON || pBuf->bufType == ANALYTICS_BUF_TYPE_JSON_COL) {
|
||||||
return tsosAnalJsonBufOpen(pBuf, numOfCols);
|
return tsosAnalJsonBufOpen(pBuf, numOfCols);
|
||||||
} else {
|
} else {
|
||||||
return TSDB_CODE_ANAL_BUF_INVALID_TYPE;
|
return TSDB_CODE_ANA_BUF_INVALID_TYPE;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t taosAnalBufWriteOptStr(SAnalBuf *pBuf, const char *optName, const char *optVal) {
|
int32_t taosAnalBufWriteOptStr(SAnalyticBuf *pBuf, const char *optName, const char *optVal) {
|
||||||
if (pBuf->bufType == ANAL_BUF_TYPE_JSON || pBuf->bufType == ANAL_BUF_TYPE_JSON_COL) {
|
if (pBuf->bufType == ANALYTICS_BUF_TYPE_JSON || pBuf->bufType == ANALYTICS_BUF_TYPE_JSON_COL) {
|
||||||
return taosAnalJsonBufWriteOptStr(pBuf, optName, optVal);
|
return taosAnalJsonBufWriteOptStr(pBuf, optName, optVal);
|
||||||
} else {
|
} else {
|
||||||
return TSDB_CODE_ANAL_BUF_INVALID_TYPE;
|
return TSDB_CODE_ANA_BUF_INVALID_TYPE;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t taosAnalBufWriteOptInt(SAnalBuf *pBuf, const char *optName, int64_t optVal) {
|
int32_t taosAnalBufWriteOptInt(SAnalyticBuf *pBuf, const char *optName, int64_t optVal) {
|
||||||
if (pBuf->bufType == ANAL_BUF_TYPE_JSON || pBuf->bufType == ANAL_BUF_TYPE_JSON_COL) {
|
if (pBuf->bufType == ANALYTICS_BUF_TYPE_JSON || pBuf->bufType == ANALYTICS_BUF_TYPE_JSON_COL) {
|
||||||
return taosAnalJsonBufWriteOptInt(pBuf, optName, optVal);
|
return taosAnalJsonBufWriteOptInt(pBuf, optName, optVal);
|
||||||
} else {
|
} else {
|
||||||
return TSDB_CODE_ANAL_BUF_INVALID_TYPE;
|
return TSDB_CODE_ANA_BUF_INVALID_TYPE;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t taosAnalBufWriteOptFloat(SAnalBuf *pBuf, const char *optName, float optVal) {
|
int32_t taosAnalBufWriteOptFloat(SAnalyticBuf *pBuf, const char *optName, float optVal) {
|
||||||
if (pBuf->bufType == ANAL_BUF_TYPE_JSON || pBuf->bufType == ANAL_BUF_TYPE_JSON_COL) {
|
if (pBuf->bufType == ANALYTICS_BUF_TYPE_JSON || pBuf->bufType == ANALYTICS_BUF_TYPE_JSON_COL) {
|
||||||
return taosAnalJsonBufWriteOptFloat(pBuf, optName, optVal);
|
return taosAnalJsonBufWriteOptFloat(pBuf, optName, optVal);
|
||||||
} else {
|
} else {
|
||||||
return TSDB_CODE_ANAL_BUF_INVALID_TYPE;
|
return TSDB_CODE_ANA_BUF_INVALID_TYPE;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t taosAnalBufWriteColMeta(SAnalBuf *pBuf, int32_t colIndex, int32_t colType, const char *colName) {
|
int32_t taosAnalBufWriteColMeta(SAnalyticBuf *pBuf, int32_t colIndex, int32_t colType, const char *colName) {
|
||||||
if (pBuf->bufType == ANAL_BUF_TYPE_JSON || pBuf->bufType == ANAL_BUF_TYPE_JSON_COL) {
|
if (pBuf->bufType == ANALYTICS_BUF_TYPE_JSON || pBuf->bufType == ANALYTICS_BUF_TYPE_JSON_COL) {
|
||||||
return taosAnalJsonBufWriteColMeta(pBuf, colIndex, colType, colName);
|
return taosAnalJsonBufWriteColMeta(pBuf, colIndex, colType, colName);
|
||||||
} else {
|
} else {
|
||||||
return TSDB_CODE_ANAL_BUF_INVALID_TYPE;
|
return TSDB_CODE_ANA_BUF_INVALID_TYPE;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t taosAnalBufWriteDataBegin(SAnalBuf *pBuf) {
|
int32_t taosAnalBufWriteDataBegin(SAnalyticBuf *pBuf) {
|
||||||
if (pBuf->bufType == ANAL_BUF_TYPE_JSON || pBuf->bufType == ANAL_BUF_TYPE_JSON_COL) {
|
if (pBuf->bufType == ANALYTICS_BUF_TYPE_JSON || pBuf->bufType == ANALYTICS_BUF_TYPE_JSON_COL) {
|
||||||
return taosAnalJsonBufWriteDataBegin(pBuf);
|
return taosAnalJsonBufWriteDataBegin(pBuf);
|
||||||
} else {
|
} else {
|
||||||
return TSDB_CODE_ANAL_BUF_INVALID_TYPE;
|
return TSDB_CODE_ANA_BUF_INVALID_TYPE;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t taosAnalBufWriteColBegin(SAnalBuf *pBuf, int32_t colIndex) {
|
int32_t taosAnalBufWriteColBegin(SAnalyticBuf *pBuf, int32_t colIndex) {
|
||||||
if (pBuf->bufType == ANAL_BUF_TYPE_JSON || pBuf->bufType == ANAL_BUF_TYPE_JSON_COL) {
|
if (pBuf->bufType == ANALYTICS_BUF_TYPE_JSON || pBuf->bufType == ANALYTICS_BUF_TYPE_JSON_COL) {
|
||||||
return taosAnalJsonBufWriteColBegin(pBuf, colIndex);
|
return taosAnalJsonBufWriteColBegin(pBuf, colIndex);
|
||||||
} else {
|
} else {
|
||||||
return TSDB_CODE_ANAL_BUF_INVALID_TYPE;
|
return TSDB_CODE_ANA_BUF_INVALID_TYPE;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t taosAnalBufWriteColData(SAnalBuf *pBuf, int32_t colIndex, int32_t colType, void *colValue) {
|
int32_t taosAnalBufWriteColData(SAnalyticBuf *pBuf, int32_t colIndex, int32_t colType, void *colValue) {
|
||||||
if (pBuf->bufType == ANAL_BUF_TYPE_JSON || pBuf->bufType == ANAL_BUF_TYPE_JSON_COL) {
|
if (pBuf->bufType == ANALYTICS_BUF_TYPE_JSON || pBuf->bufType == ANALYTICS_BUF_TYPE_JSON_COL) {
|
||||||
return taosAnalJsonBufWriteColData(pBuf, colIndex, colType, colValue);
|
return taosAnalJsonBufWriteColData(pBuf, colIndex, colType, colValue);
|
||||||
} else {
|
} else {
|
||||||
return TSDB_CODE_ANAL_BUF_INVALID_TYPE;
|
return TSDB_CODE_ANA_BUF_INVALID_TYPE;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t taosAnalBufWriteColEnd(SAnalBuf *pBuf, int32_t colIndex) {
|
int32_t taosAnalBufWriteColEnd(SAnalyticBuf *pBuf, int32_t colIndex) {
|
||||||
if (pBuf->bufType == ANAL_BUF_TYPE_JSON || pBuf->bufType == ANAL_BUF_TYPE_JSON_COL) {
|
if (pBuf->bufType == ANALYTICS_BUF_TYPE_JSON || pBuf->bufType == ANALYTICS_BUF_TYPE_JSON_COL) {
|
||||||
return taosAnalJsonBufWriteColEnd(pBuf, colIndex);
|
return taosAnalJsonBufWriteColEnd(pBuf, colIndex);
|
||||||
} else {
|
} else {
|
||||||
return TSDB_CODE_ANAL_BUF_INVALID_TYPE;
|
return TSDB_CODE_ANA_BUF_INVALID_TYPE;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t taosAnalBufWriteDataEnd(SAnalBuf *pBuf) {
|
int32_t taosAnalBufWriteDataEnd(SAnalyticBuf *pBuf) {
|
||||||
if (pBuf->bufType == ANAL_BUF_TYPE_JSON || pBuf->bufType == ANAL_BUF_TYPE_JSON_COL) {
|
if (pBuf->bufType == ANALYTICS_BUF_TYPE_JSON || pBuf->bufType == ANALYTICS_BUF_TYPE_JSON_COL) {
|
||||||
return taosAnalJsonBufWriteDataEnd(pBuf);
|
return taosAnalJsonBufWriteDataEnd(pBuf);
|
||||||
} else {
|
} else {
|
||||||
return TSDB_CODE_ANAL_BUF_INVALID_TYPE;
|
return TSDB_CODE_ANA_BUF_INVALID_TYPE;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t taosAnalBufClose(SAnalBuf *pBuf) {
|
int32_t taosAnalBufClose(SAnalyticBuf *pBuf) {
|
||||||
if (pBuf->bufType == ANAL_BUF_TYPE_JSON || pBuf->bufType == ANAL_BUF_TYPE_JSON_COL) {
|
if (pBuf->bufType == ANALYTICS_BUF_TYPE_JSON || pBuf->bufType == ANALYTICS_BUF_TYPE_JSON_COL) {
|
||||||
return taosAnalJsonBufClose(pBuf);
|
return taosAnalJsonBufClose(pBuf);
|
||||||
} else {
|
} else {
|
||||||
return TSDB_CODE_ANAL_BUF_INVALID_TYPE;
|
return TSDB_CODE_ANA_BUF_INVALID_TYPE;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t taosAnalBufGetCont(SAnalBuf *pBuf, char **ppCont, int64_t *pContLen) {
|
static int32_t taosAnalBufGetCont(SAnalyticBuf *pBuf, char **ppCont, int64_t *pContLen) {
|
||||||
*ppCont = NULL;
|
*ppCont = NULL;
|
||||||
*pContLen = 0;
|
*pContLen = 0;
|
||||||
|
|
||||||
if (pBuf->bufType == ANAL_BUF_TYPE_JSON || pBuf->bufType == ANAL_BUF_TYPE_JSON_COL) {
|
if (pBuf->bufType == ANALYTICS_BUF_TYPE_JSON || pBuf->bufType == ANALYTICS_BUF_TYPE_JSON_COL) {
|
||||||
return taosAnalJsonBufGetCont(pBuf->fileName, ppCont, pContLen);
|
return taosAnalJsonBufGetCont(pBuf->fileName, ppCont, pContLen);
|
||||||
} else {
|
} else {
|
||||||
return TSDB_CODE_ANAL_BUF_INVALID_TYPE;
|
return TSDB_CODE_ANA_BUF_INVALID_TYPE;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -730,7 +746,7 @@ static int32_t taosAnalBufGetCont(SAnalBuf *pBuf, char **ppCont, int64_t *pContL
|
||||||
|
|
||||||
int32_t taosAnalyticsInit() { return 0; }
|
int32_t taosAnalyticsInit() { return 0; }
|
||||||
void taosAnalyticsCleanup() {}
|
void taosAnalyticsCleanup() {}
|
||||||
SJson *taosAnalSendReqRetJson(const char *url, EAnalHttpType type, SAnalBuf *pBuf) { return NULL; }
|
SJson *taosAnalSendReqRetJson(const char *url, EAnalHttpType type, SAnalyticBuf *pBuf) { return NULL; }
|
||||||
|
|
||||||
int32_t taosAnalGetAlgoUrl(const char *algoName, EAnalAlgoType type, char *url, int32_t urlLen) { return 0; }
|
int32_t taosAnalGetAlgoUrl(const char *algoName, EAnalAlgoType type, char *url, int32_t urlLen) { return 0; }
|
||||||
bool taosAnalGetOptStr(const char *option, const char *optName, char *optValue, int32_t optMaxLen) { return true; }
|
bool taosAnalGetOptStr(const char *option, const char *optName, char *optValue, int32_t optMaxLen) { return true; }
|
||||||
|
@ -738,18 +754,18 @@ bool taosAnalGetOptInt(const char *option, const char *optName, int64_t *optV
|
||||||
int64_t taosAnalGetVersion() { return 0; }
|
int64_t taosAnalGetVersion() { return 0; }
|
||||||
void taosAnalUpdate(int64_t newVer, SHashObj *pHash) {}
|
void taosAnalUpdate(int64_t newVer, SHashObj *pHash) {}
|
||||||
|
|
||||||
int32_t tsosAnalBufOpen(SAnalBuf *pBuf, int32_t numOfCols) { return 0; }
|
int32_t tsosAnalBufOpen(SAnalyticBuf *pBuf, int32_t numOfCols) { return 0; }
|
||||||
int32_t taosAnalBufWriteOptStr(SAnalBuf *pBuf, const char *optName, const char *optVal) { return 0; }
|
int32_t taosAnalBufWriteOptStr(SAnalyticBuf *pBuf, const char *optName, const char *optVal) { return 0; }
|
||||||
int32_t taosAnalBufWriteOptInt(SAnalBuf *pBuf, const char *optName, int64_t optVal) { return 0; }
|
int32_t taosAnalBufWriteOptInt(SAnalyticBuf *pBuf, const char *optName, int64_t optVal) { return 0; }
|
||||||
int32_t taosAnalBufWriteOptFloat(SAnalBuf *pBuf, const char *optName, float optVal) { return 0; }
|
int32_t taosAnalBufWriteOptFloat(SAnalyticBuf *pBuf, const char *optName, float optVal) { return 0; }
|
||||||
int32_t taosAnalBufWriteColMeta(SAnalBuf *pBuf, int32_t colIndex, int32_t colType, const char *colName) { return 0; }
|
int32_t taosAnalBufWriteColMeta(SAnalyticBuf *pBuf, int32_t colIndex, int32_t colType, const char *colName) { return 0; }
|
||||||
int32_t taosAnalBufWriteDataBegin(SAnalBuf *pBuf) { return 0; }
|
int32_t taosAnalBufWriteDataBegin(SAnalyticBuf *pBuf) { return 0; }
|
||||||
int32_t taosAnalBufWriteColBegin(SAnalBuf *pBuf, int32_t colIndex) { return 0; }
|
int32_t taosAnalBufWriteColBegin(SAnalyticBuf *pBuf, int32_t colIndex) { return 0; }
|
||||||
int32_t taosAnalBufWriteColData(SAnalBuf *pBuf, int32_t colIndex, int32_t colType, void *colValue) { return 0; }
|
int32_t taosAnalBufWriteColData(SAnalyticBuf *pBuf, int32_t colIndex, int32_t colType, void *colValue) { return 0; }
|
||||||
int32_t taosAnalBufWriteColEnd(SAnalBuf *pBuf, int32_t colIndex) { return 0; }
|
int32_t taosAnalBufWriteColEnd(SAnalyticBuf *pBuf, int32_t colIndex) { return 0; }
|
||||||
int32_t taosAnalBufWriteDataEnd(SAnalBuf *pBuf) { return 0; }
|
int32_t taosAnalBufWriteDataEnd(SAnalyticBuf *pBuf) { return 0; }
|
||||||
int32_t taosAnalBufClose(SAnalBuf *pBuf) { return 0; }
|
int32_t taosAnalBufClose(SAnalyticBuf *pBuf) { return 0; }
|
||||||
void taosAnalBufDestroy(SAnalBuf *pBuf) {}
|
void taosAnalBufDestroy(SAnalyticBuf *pBuf) {}
|
||||||
|
|
||||||
const char *taosAnalAlgoStr(EAnalAlgoType algoType) { return 0; }
|
const char *taosAnalAlgoStr(EAnalAlgoType algoType) { return 0; }
|
||||||
EAnalAlgoType taosAnalAlgoInt(const char *algoName) { return 0; }
|
EAnalAlgoType taosAnalAlgoInt(const char *algoName) { return 0; }
|
||||||
|
|
|
@ -361,13 +361,14 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_ANODE_TOO_MANY_ALGO, "Anode too many algori
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_ANODE_TOO_LONG_ALGO_NAME, "Anode too long algorithm name")
|
TAOS_DEFINE_ERROR(TSDB_CODE_MND_ANODE_TOO_LONG_ALGO_NAME, "Anode too long algorithm name")
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_ANODE_TOO_MANY_ALGO_TYPE, "Anode too many algorithm type")
|
TAOS_DEFINE_ERROR(TSDB_CODE_MND_ANODE_TOO_MANY_ALGO_TYPE, "Anode too many algorithm type")
|
||||||
|
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_ANAL_URL_RSP_IS_NULL, "Analysis service response is NULL")
|
TAOS_DEFINE_ERROR(TSDB_CODE_ANA_URL_RSP_IS_NULL, "Analysis service response is NULL")
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_ANAL_URL_CANT_ACCESS, "Analysis service can't access")
|
TAOS_DEFINE_ERROR(TSDB_CODE_ANA_URL_CANT_ACCESS, "Analysis service can't access")
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_ANAL_ALGO_NOT_FOUND, "Analysis algorithm not found")
|
TAOS_DEFINE_ERROR(TSDB_CODE_ANA_ALGO_NOT_FOUND, "Analysis algorithm is missing")
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_ANAL_ALGO_NOT_LOAD, "Analysis algorithm not loaded")
|
TAOS_DEFINE_ERROR(TSDB_CODE_ANA_ALGO_NOT_LOAD, "Analysis algorithm not loaded")
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_ANAL_BUF_INVALID_TYPE, "Analysis invalid buffer type")
|
TAOS_DEFINE_ERROR(TSDB_CODE_ANA_BUF_INVALID_TYPE, "Analysis invalid buffer type")
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_ANAL_ANODE_RETURN_ERROR, "Analysis failed since anode return error")
|
TAOS_DEFINE_ERROR(TSDB_CODE_ANA_ANODE_RETURN_ERROR, "Analysis failed since anode return error")
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_ANAL_ANODE_TOO_MANY_ROWS, "Analysis failed since too many input rows for anode")
|
TAOS_DEFINE_ERROR(TSDB_CODE_ANA_ANODE_TOO_MANY_ROWS, "Analysis failed since too many input rows for anode")
|
||||||
|
TAOS_DEFINE_ERROR(TSDB_CODE_ANA_WN_DATA, "white-noise data not processed")
|
||||||
|
|
||||||
// mnode-sma
|
// mnode-sma
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_SMA_ALREADY_EXIST, "SMA already exists")
|
TAOS_DEFINE_ERROR(TSDB_CODE_MND_SMA_ALREADY_EXIST, "SMA already exists")
|
||||||
|
|
|
@ -113,6 +113,15 @@ class TDTestCase(TBase):
|
||||||
if not result:
|
if not result:
|
||||||
raise Exception(f"key:{key} not found")
|
raise Exception(f"key:{key} not found")
|
||||||
|
|
||||||
|
def checkRows(self, sql, nExpect, nRetry):
|
||||||
|
for i in range(nRetry):
|
||||||
|
res = tdSql.getResult(sql)
|
||||||
|
if len(res) == nExpect:
|
||||||
|
break
|
||||||
|
time.sleep(1)
|
||||||
|
if len(res) != nExpect:
|
||||||
|
raise Exception(f"rows:{len(res)} != {nExpect}")
|
||||||
|
|
||||||
def alterBypassFlag(self):
|
def alterBypassFlag(self):
|
||||||
"""Add test case for altering bypassFlag(TD-32907)
|
"""Add test case for altering bypassFlag(TD-32907)
|
||||||
"""
|
"""
|
||||||
|
@ -151,8 +160,7 @@ class TDTestCase(TBase):
|
||||||
tdSql.query("select * from stb0")
|
tdSql.query("select * from stb0")
|
||||||
tdSql.checkRows(2)
|
tdSql.checkRows(2)
|
||||||
tdSql.execute("flush database db")
|
tdSql.execute("flush database db")
|
||||||
tdSql.query("select * from stb0")
|
self.checkRows("select * from stb0", 0, 10)
|
||||||
tdSql.checkRows(0)
|
|
||||||
tdSql.execute("alter all dnodes 'bypassFlag 0'")
|
tdSql.execute("alter all dnodes 'bypassFlag 0'")
|
||||||
self.checkKeyValue(tdSql.getResult("show local variables"), "bypassFlag", "0")
|
self.checkKeyValue(tdSql.getResult("show local variables"), "bypassFlag", "0")
|
||||||
self.checkKeyValue(tdSql.getResult("show dnode 1 variables like 'bypassFlag'"), "bypassFlag", "0", 1, 2)
|
self.checkKeyValue(tdSql.getResult("show dnode 1 variables like 'bypassFlag'"), "bypassFlag", "0", 1, 2)
|
||||||
|
@ -161,8 +169,9 @@ class TDTestCase(TBase):
|
||||||
tdSql.query("select * from stb0")
|
tdSql.query("select * from stb0")
|
||||||
tdSql.checkRows(2)
|
tdSql.checkRows(2)
|
||||||
tdSql.execute("flush database db")
|
tdSql.execute("flush database db")
|
||||||
tdSql.query("select * from stb0")
|
for i in range(5):
|
||||||
tdSql.checkRows(2)
|
self.checkRows("select * from stb0", 2, 1)
|
||||||
|
time.sleep(1)
|
||||||
|
|
||||||
# run
|
# run
|
||||||
def run(self):
|
def run(self):
|
||||||
|
|
|
@ -366,3 +366,652 @@ taos> select _irowts as irowts ,tbname as table_name, c2 as c_c2, c3 as c_c3, _i
|
||||||
2020-02-01 00:00:16.000 | td32727 | 10 | 10 | true | 1 |
|
2020-02-01 00:00:16.000 | td32727 | 10 | 10 | true | 1 |
|
||||||
2020-02-01 00:00:16.000 | td32727 | 15 | 15 | true | 1 |
|
2020-02-01 00:00:16.000 | td32727 | 15 | 15 | true | 1 |
|
||||||
|
|
||||||
|
taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-02 00:00:00' and '2020-01-01 00:00:00' range('2020-01-01 00:00:00', '2020-01-01 00:00:30') every(1s) fill(null);
|
||||||
|
|
||||||
|
taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-02 00:00:00' and '2020-01-01 00:00:00' range('2020-01-01 00:00:00', '2020-01-01 00:00:30') every(1s) fill(value, 1);
|
||||||
|
|
||||||
|
taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-02 00:00:00' and '2020-01-01 00:00:00' range('2020-01-01 00:00:00', '2020-01-01 00:00:30') every(1s) fill(prev);
|
||||||
|
|
||||||
|
taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-02 00:00:00' and '2020-01-01 00:00:00' range('2020-01-01 00:00:00', '2020-01-01 00:00:30') every(1s) fill(next);
|
||||||
|
|
||||||
|
taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-02 00:00:00' and '2020-01-01 00:00:00' range('2020-01-01 00:00:00', '2020-01-01 00:00:30') every(1s) fill(linear);
|
||||||
|
|
||||||
|
taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-02 00:00:00' range('2020-01-01 00:00:30', '2020-01-01 00:00:00') every(1s) fill(null);
|
||||||
|
|
||||||
|
taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-02 00:00:00' range('2020-01-01 00:00:30', '2020-01-01 00:00:00') every(1s) fill(value, 1);
|
||||||
|
|
||||||
|
taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-02 00:00:00' range('2020-01-01 00:00:30', '2020-01-01 00:00:00') every(1s) fill(prev);
|
||||||
|
|
||||||
|
taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-02 00:00:00' range('2020-01-01 00:00:30', '2020-01-01 00:00:00') every(1s) fill(next);
|
||||||
|
|
||||||
|
taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-02 00:00:00' range('2020-01-01 00:00:30', '2020-01-01 00:00:00') every(1s) fill(linear);
|
||||||
|
|
||||||
|
taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:20' range('2020-01-01 00:00:21', '2020-01-01 00:00:30') every(1s) fill(null);
|
||||||
|
|
||||||
|
taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:20' range('2020-01-01 00:00:21', '2020-01-01 00:00:30') every(1s) fill(value, 1);
|
||||||
|
|
||||||
|
taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:20' range('2020-01-01 00:00:21', '2020-01-01 00:00:30') every(1s) fill(prev);
|
||||||
|
|
||||||
|
taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:20' range('2020-01-01 00:00:21', '2020-01-01 00:00:30') every(1s) fill(next);
|
||||||
|
|
||||||
|
taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:20' range('2020-01-01 00:00:21', '2020-01-01 00:00:30') every(1s) fill(linear);
|
||||||
|
|
||||||
|
taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:21', '2020-01-01 00:00:30') every(1s) fill(null);
|
||||||
|
_irowts | _isfilled | interp(c1) |
|
||||||
|
====================================================
|
||||||
|
2020-01-01 00:00:21.000 | false | 21 |
|
||||||
|
2020-01-01 00:00:22.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:23.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:24.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:25.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:26.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:27.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:28.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:29.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:30.000 | true | NULL |
|
||||||
|
|
||||||
|
taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:21', '2020-01-01 00:00:30') every(1s) fill(value, 1);
|
||||||
|
_irowts | _isfilled | interp(c1) |
|
||||||
|
====================================================
|
||||||
|
2020-01-01 00:00:21.000 | false | 21 |
|
||||||
|
2020-01-01 00:00:22.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:23.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:24.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:25.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:26.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:27.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:28.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:29.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:30.000 | true | 1 |
|
||||||
|
|
||||||
|
taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:21', '2020-01-01 00:00:30') every(1s) fill(prev);
|
||||||
|
_irowts | _isfilled | interp(c1) |
|
||||||
|
====================================================
|
||||||
|
2020-01-01 00:00:21.000 | false | 21 |
|
||||||
|
2020-01-01 00:00:22.000 | true | 21 |
|
||||||
|
2020-01-01 00:00:23.000 | true | 21 |
|
||||||
|
2020-01-01 00:00:24.000 | true | 21 |
|
||||||
|
2020-01-01 00:00:25.000 | true | 21 |
|
||||||
|
2020-01-01 00:00:26.000 | true | 21 |
|
||||||
|
2020-01-01 00:00:27.000 | true | 21 |
|
||||||
|
2020-01-01 00:00:28.000 | true | 21 |
|
||||||
|
2020-01-01 00:00:29.000 | true | 21 |
|
||||||
|
2020-01-01 00:00:30.000 | true | 21 |
|
||||||
|
|
||||||
|
taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:21', '2020-01-01 00:00:30') every(1s) fill(next);
|
||||||
|
_irowts | _isfilled | interp(c1) |
|
||||||
|
====================================================
|
||||||
|
2020-01-01 00:00:21.000 | false | 21 |
|
||||||
|
|
||||||
|
taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:21', '2020-01-01 00:00:30') every(1s) fill(linear);
|
||||||
|
_irowts | _isfilled | interp(c1) |
|
||||||
|
====================================================
|
||||||
|
2020-01-01 00:00:21.000 | false | 21 |
|
||||||
|
|
||||||
|
taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:15', '2020-01-01 00:00:30') every(1s) fill(null);
|
||||||
|
_irowts | _isfilled | interp(c1) |
|
||||||
|
====================================================
|
||||||
|
2020-01-01 00:00:15.000 | false | 15 |
|
||||||
|
2020-01-01 00:00:16.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:17.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:18.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:19.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:20.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:21.000 | false | 21 |
|
||||||
|
2020-01-01 00:00:22.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:23.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:24.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:25.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:26.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:27.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:28.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:29.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:30.000 | true | NULL |
|
||||||
|
|
||||||
|
taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:15', '2020-01-01 00:00:30') every(1s) fill(value, 1);
|
||||||
|
_irowts | _isfilled | interp(c1) |
|
||||||
|
====================================================
|
||||||
|
2020-01-01 00:00:15.000 | false | 15 |
|
||||||
|
2020-01-01 00:00:16.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:17.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:18.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:19.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:20.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:21.000 | false | 21 |
|
||||||
|
2020-01-01 00:00:22.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:23.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:24.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:25.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:26.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:27.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:28.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:29.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:30.000 | true | 1 |
|
||||||
|
|
||||||
|
taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:15', '2020-01-01 00:00:30') every(1s) fill(prev);
|
||||||
|
_irowts | _isfilled | interp(c1) |
|
||||||
|
====================================================
|
||||||
|
2020-01-01 00:00:15.000 | false | 15 |
|
||||||
|
2020-01-01 00:00:16.000 | true | 15 |
|
||||||
|
2020-01-01 00:00:17.000 | true | 15 |
|
||||||
|
2020-01-01 00:00:18.000 | true | 15 |
|
||||||
|
2020-01-01 00:00:19.000 | true | 15 |
|
||||||
|
2020-01-01 00:00:20.000 | true | 15 |
|
||||||
|
2020-01-01 00:00:21.000 | false | 21 |
|
||||||
|
2020-01-01 00:00:22.000 | true | 21 |
|
||||||
|
2020-01-01 00:00:23.000 | true | 21 |
|
||||||
|
2020-01-01 00:00:24.000 | true | 21 |
|
||||||
|
2020-01-01 00:00:25.000 | true | 21 |
|
||||||
|
2020-01-01 00:00:26.000 | true | 21 |
|
||||||
|
2020-01-01 00:00:27.000 | true | 21 |
|
||||||
|
2020-01-01 00:00:28.000 | true | 21 |
|
||||||
|
2020-01-01 00:00:29.000 | true | 21 |
|
||||||
|
2020-01-01 00:00:30.000 | true | 21 |
|
||||||
|
|
||||||
|
taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:15', '2020-01-01 00:00:30') every(1s) fill(next);
|
||||||
|
_irowts | _isfilled | interp(c1) |
|
||||||
|
====================================================
|
||||||
|
2020-01-01 00:00:15.000 | false | 15 |
|
||||||
|
2020-01-01 00:00:16.000 | true | 21 |
|
||||||
|
2020-01-01 00:00:17.000 | true | 21 |
|
||||||
|
2020-01-01 00:00:18.000 | true | 21 |
|
||||||
|
2020-01-01 00:00:19.000 | true | 21 |
|
||||||
|
2020-01-01 00:00:20.000 | true | 21 |
|
||||||
|
2020-01-01 00:00:21.000 | false | 21 |
|
||||||
|
|
||||||
|
taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:15', '2020-01-01 00:00:30') every(1s) fill(linear);
|
||||||
|
_irowts | _isfilled | interp(c1) |
|
||||||
|
====================================================
|
||||||
|
2020-01-01 00:00:15.000 | false | 15 |
|
||||||
|
2020-01-01 00:00:16.000 | true | 16 |
|
||||||
|
2020-01-01 00:00:17.000 | true | 17 |
|
||||||
|
2020-01-01 00:00:18.000 | true | 18 |
|
||||||
|
2020-01-01 00:00:19.000 | true | 19 |
|
||||||
|
2020-01-01 00:00:20.000 | true | 20 |
|
||||||
|
2020-01-01 00:00:21.000 | false | 21 |
|
||||||
|
|
||||||
|
taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:00', '2020-01-01 00:00:30') every(1s) fill(null);
|
||||||
|
_irowts | _isfilled | interp(c1) |
|
||||||
|
====================================================
|
||||||
|
2020-01-01 00:00:00.000 | false | 0 |
|
||||||
|
2020-01-01 00:00:01.000 | false | 1 |
|
||||||
|
2020-01-01 00:00:02.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:03.000 | false | 3 |
|
||||||
|
2020-01-01 00:00:04.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:05.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:06.000 | false | 6 |
|
||||||
|
2020-01-01 00:00:07.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:08.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:09.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:10.000 | false | 10 |
|
||||||
|
2020-01-01 00:00:11.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:12.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:13.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:14.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:15.000 | false | 15 |
|
||||||
|
2020-01-01 00:00:16.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:17.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:18.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:19.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:20.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:21.000 | false | 21 |
|
||||||
|
2020-01-01 00:00:22.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:23.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:24.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:25.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:26.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:27.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:28.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:29.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:30.000 | true | NULL |
|
||||||
|
|
||||||
|
taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:00', '2020-01-01 00:00:30') every(1s) fill(value, 1);
|
||||||
|
_irowts | _isfilled | interp(c1) |
|
||||||
|
====================================================
|
||||||
|
2020-01-01 00:00:00.000 | false | 0 |
|
||||||
|
2020-01-01 00:00:01.000 | false | 1 |
|
||||||
|
2020-01-01 00:00:02.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:03.000 | false | 3 |
|
||||||
|
2020-01-01 00:00:04.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:05.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:06.000 | false | 6 |
|
||||||
|
2020-01-01 00:00:07.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:08.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:09.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:10.000 | false | 10 |
|
||||||
|
2020-01-01 00:00:11.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:12.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:13.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:14.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:15.000 | false | 15 |
|
||||||
|
2020-01-01 00:00:16.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:17.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:18.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:19.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:20.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:21.000 | false | 21 |
|
||||||
|
2020-01-01 00:00:22.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:23.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:24.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:25.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:26.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:27.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:28.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:29.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:30.000 | true | 1 |
|
||||||
|
|
||||||
|
taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:00', '2020-01-01 00:00:30') every(1s) fill(prev);
|
||||||
|
_irowts | _isfilled | interp(c1) |
|
||||||
|
====================================================
|
||||||
|
2020-01-01 00:00:00.000 | false | 0 |
|
||||||
|
2020-01-01 00:00:01.000 | false | 1 |
|
||||||
|
2020-01-01 00:00:02.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:03.000 | false | 3 |
|
||||||
|
2020-01-01 00:00:04.000 | true | 3 |
|
||||||
|
2020-01-01 00:00:05.000 | true | 3 |
|
||||||
|
2020-01-01 00:00:06.000 | false | 6 |
|
||||||
|
2020-01-01 00:00:07.000 | true | 6 |
|
||||||
|
2020-01-01 00:00:08.000 | true | 6 |
|
||||||
|
2020-01-01 00:00:09.000 | true | 6 |
|
||||||
|
2020-01-01 00:00:10.000 | false | 10 |
|
||||||
|
2020-01-01 00:00:11.000 | true | 10 |
|
||||||
|
2020-01-01 00:00:12.000 | true | 10 |
|
||||||
|
2020-01-01 00:00:13.000 | true | 10 |
|
||||||
|
2020-01-01 00:00:14.000 | true | 10 |
|
||||||
|
2020-01-01 00:00:15.000 | false | 15 |
|
||||||
|
2020-01-01 00:00:16.000 | true | 15 |
|
||||||
|
2020-01-01 00:00:17.000 | true | 15 |
|
||||||
|
2020-01-01 00:00:18.000 | true | 15 |
|
||||||
|
2020-01-01 00:00:19.000 | true | 15 |
|
||||||
|
2020-01-01 00:00:20.000 | true | 15 |
|
||||||
|
2020-01-01 00:00:21.000 | false | 21 |
|
||||||
|
2020-01-01 00:00:22.000 | true | 21 |
|
||||||
|
2020-01-01 00:00:23.000 | true | 21 |
|
||||||
|
2020-01-01 00:00:24.000 | true | 21 |
|
||||||
|
2020-01-01 00:00:25.000 | true | 21 |
|
||||||
|
2020-01-01 00:00:26.000 | true | 21 |
|
||||||
|
2020-01-01 00:00:27.000 | true | 21 |
|
||||||
|
2020-01-01 00:00:28.000 | true | 21 |
|
||||||
|
2020-01-01 00:00:29.000 | true | 21 |
|
||||||
|
2020-01-01 00:00:30.000 | true | 21 |
|
||||||
|
|
||||||
|
taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:00', '2020-01-01 00:00:30') every(1s) fill(next);
|
||||||
|
_irowts | _isfilled | interp(c1) |
|
||||||
|
====================================================
|
||||||
|
2020-01-01 00:00:00.000 | false | 0 |
|
||||||
|
2020-01-01 00:00:01.000 | false | 1 |
|
||||||
|
2020-01-01 00:00:02.000 | true | 3 |
|
||||||
|
2020-01-01 00:00:03.000 | false | 3 |
|
||||||
|
2020-01-01 00:00:04.000 | true | 6 |
|
||||||
|
2020-01-01 00:00:05.000 | true | 6 |
|
||||||
|
2020-01-01 00:00:06.000 | false | 6 |
|
||||||
|
2020-01-01 00:00:07.000 | true | 10 |
|
||||||
|
2020-01-01 00:00:08.000 | true | 10 |
|
||||||
|
2020-01-01 00:00:09.000 | true | 10 |
|
||||||
|
2020-01-01 00:00:10.000 | false | 10 |
|
||||||
|
2020-01-01 00:00:11.000 | true | 15 |
|
||||||
|
2020-01-01 00:00:12.000 | true | 15 |
|
||||||
|
2020-01-01 00:00:13.000 | true | 15 |
|
||||||
|
2020-01-01 00:00:14.000 | true | 15 |
|
||||||
|
2020-01-01 00:00:15.000 | false | 15 |
|
||||||
|
2020-01-01 00:00:16.000 | true | 21 |
|
||||||
|
2020-01-01 00:00:17.000 | true | 21 |
|
||||||
|
2020-01-01 00:00:18.000 | true | 21 |
|
||||||
|
2020-01-01 00:00:19.000 | true | 21 |
|
||||||
|
2020-01-01 00:00:20.000 | true | 21 |
|
||||||
|
2020-01-01 00:00:21.000 | false | 21 |
|
||||||
|
|
||||||
|
taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:00', '2020-01-01 00:00:30') every(1s) fill(linear);
|
||||||
|
_irowts | _isfilled | interp(c1) |
|
||||||
|
====================================================
|
||||||
|
2020-01-01 00:00:00.000 | false | 0 |
|
||||||
|
2020-01-01 00:00:01.000 | false | 1 |
|
||||||
|
2020-01-01 00:00:02.000 | true | 2 |
|
||||||
|
2020-01-01 00:00:03.000 | false | 3 |
|
||||||
|
2020-01-01 00:00:04.000 | true | 4 |
|
||||||
|
2020-01-01 00:00:05.000 | true | 5 |
|
||||||
|
2020-01-01 00:00:06.000 | false | 6 |
|
||||||
|
2020-01-01 00:00:07.000 | true | 7 |
|
||||||
|
2020-01-01 00:00:08.000 | true | 8 |
|
||||||
|
2020-01-01 00:00:09.000 | true | 9 |
|
||||||
|
2020-01-01 00:00:10.000 | false | 10 |
|
||||||
|
2020-01-01 00:00:11.000 | true | 11 |
|
||||||
|
2020-01-01 00:00:12.000 | true | 12 |
|
||||||
|
2020-01-01 00:00:13.000 | true | 13 |
|
||||||
|
2020-01-01 00:00:14.000 | true | 14 |
|
||||||
|
2020-01-01 00:00:15.000 | false | 15 |
|
||||||
|
2020-01-01 00:00:16.000 | true | 16 |
|
||||||
|
2020-01-01 00:00:17.000 | true | 17 |
|
||||||
|
2020-01-01 00:00:18.000 | true | 18 |
|
||||||
|
2020-01-01 00:00:19.000 | true | 19 |
|
||||||
|
2020-01-01 00:00:20.000 | true | 20 |
|
||||||
|
2020-01-01 00:00:21.000 | false | 21 |
|
||||||
|
|
||||||
|
taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(null);
|
||||||
|
_irowts | _isfilled | interp(c1) |
|
||||||
|
====================================================
|
||||||
|
2020-01-01 00:00:00.000 | false | 0 |
|
||||||
|
2020-01-01 00:00:01.000 | false | 1 |
|
||||||
|
2020-01-01 00:00:02.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:03.000 | false | 3 |
|
||||||
|
2020-01-01 00:00:04.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:05.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:06.000 | false | 6 |
|
||||||
|
2020-01-01 00:00:07.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:08.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:09.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:10.000 | false | 10 |
|
||||||
|
2020-01-01 00:00:11.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:12.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:13.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:14.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:15.000 | false | 15 |
|
||||||
|
2020-01-01 00:00:16.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:17.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:18.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:19.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:20.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:21.000 | false | 21 |
|
||||||
|
|
||||||
|
taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(value, 1);
|
||||||
|
_irowts | _isfilled | interp(c1) |
|
||||||
|
====================================================
|
||||||
|
2020-01-01 00:00:00.000 | false | 0 |
|
||||||
|
2020-01-01 00:00:01.000 | false | 1 |
|
||||||
|
2020-01-01 00:00:02.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:03.000 | false | 3 |
|
||||||
|
2020-01-01 00:00:04.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:05.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:06.000 | false | 6 |
|
||||||
|
2020-01-01 00:00:07.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:08.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:09.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:10.000 | false | 10 |
|
||||||
|
2020-01-01 00:00:11.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:12.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:13.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:14.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:15.000 | false | 15 |
|
||||||
|
2020-01-01 00:00:16.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:17.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:18.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:19.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:20.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:21.000 | false | 21 |
|
||||||
|
|
||||||
|
taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(prev);
|
||||||
|
_irowts | _isfilled | interp(c1) |
|
||||||
|
====================================================
|
||||||
|
2020-01-01 00:00:00.000 | false | 0 |
|
||||||
|
2020-01-01 00:00:01.000 | false | 1 |
|
||||||
|
2020-01-01 00:00:02.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:03.000 | false | 3 |
|
||||||
|
2020-01-01 00:00:04.000 | true | 3 |
|
||||||
|
2020-01-01 00:00:05.000 | true | 3 |
|
||||||
|
2020-01-01 00:00:06.000 | false | 6 |
|
||||||
|
2020-01-01 00:00:07.000 | true | 6 |
|
||||||
|
2020-01-01 00:00:08.000 | true | 6 |
|
||||||
|
2020-01-01 00:00:09.000 | true | 6 |
|
||||||
|
2020-01-01 00:00:10.000 | false | 10 |
|
||||||
|
2020-01-01 00:00:11.000 | true | 10 |
|
||||||
|
2020-01-01 00:00:12.000 | true | 10 |
|
||||||
|
2020-01-01 00:00:13.000 | true | 10 |
|
||||||
|
2020-01-01 00:00:14.000 | true | 10 |
|
||||||
|
2020-01-01 00:00:15.000 | false | 15 |
|
||||||
|
2020-01-01 00:00:16.000 | true | 15 |
|
||||||
|
2020-01-01 00:00:17.000 | true | 15 |
|
||||||
|
2020-01-01 00:00:18.000 | true | 15 |
|
||||||
|
2020-01-01 00:00:19.000 | true | 15 |
|
||||||
|
2020-01-01 00:00:20.000 | true | 15 |
|
||||||
|
2020-01-01 00:00:21.000 | false | 21 |
|
||||||
|
|
||||||
|
taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(next);
|
||||||
|
_irowts | _isfilled | interp(c1) |
|
||||||
|
====================================================
|
||||||
|
2020-01-01 00:00:00.000 | false | 0 |
|
||||||
|
2020-01-01 00:00:01.000 | false | 1 |
|
||||||
|
2020-01-01 00:00:02.000 | true | 3 |
|
||||||
|
2020-01-01 00:00:03.000 | false | 3 |
|
||||||
|
2020-01-01 00:00:04.000 | true | 6 |
|
||||||
|
2020-01-01 00:00:05.000 | true | 6 |
|
||||||
|
2020-01-01 00:00:06.000 | false | 6 |
|
||||||
|
2020-01-01 00:00:07.000 | true | 10 |
|
||||||
|
2020-01-01 00:00:08.000 | true | 10 |
|
||||||
|
2020-01-01 00:00:09.000 | true | 10 |
|
||||||
|
2020-01-01 00:00:10.000 | false | 10 |
|
||||||
|
2020-01-01 00:00:11.000 | true | 15 |
|
||||||
|
2020-01-01 00:00:12.000 | true | 15 |
|
||||||
|
2020-01-01 00:00:13.000 | true | 15 |
|
||||||
|
2020-01-01 00:00:14.000 | true | 15 |
|
||||||
|
2020-01-01 00:00:15.000 | false | 15 |
|
||||||
|
2020-01-01 00:00:16.000 | true | 21 |
|
||||||
|
2020-01-01 00:00:17.000 | true | 21 |
|
||||||
|
2020-01-01 00:00:18.000 | true | 21 |
|
||||||
|
2020-01-01 00:00:19.000 | true | 21 |
|
||||||
|
2020-01-01 00:00:20.000 | true | 21 |
|
||||||
|
2020-01-01 00:00:21.000 | false | 21 |
|
||||||
|
|
||||||
|
taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(linear);
|
||||||
|
_irowts | _isfilled | interp(c1) |
|
||||||
|
====================================================
|
||||||
|
2020-01-01 00:00:00.000 | false | 0 |
|
||||||
|
2020-01-01 00:00:01.000 | false | 1 |
|
||||||
|
2020-01-01 00:00:02.000 | true | 2 |
|
||||||
|
2020-01-01 00:00:03.000 | false | 3 |
|
||||||
|
2020-01-01 00:00:04.000 | true | 4 |
|
||||||
|
2020-01-01 00:00:05.000 | true | 5 |
|
||||||
|
2020-01-01 00:00:06.000 | false | 6 |
|
||||||
|
2020-01-01 00:00:07.000 | true | 7 |
|
||||||
|
2020-01-01 00:00:08.000 | true | 8 |
|
||||||
|
2020-01-01 00:00:09.000 | true | 9 |
|
||||||
|
2020-01-01 00:00:10.000 | false | 10 |
|
||||||
|
2020-01-01 00:00:11.000 | true | 11 |
|
||||||
|
2020-01-01 00:00:12.000 | true | 12 |
|
||||||
|
2020-01-01 00:00:13.000 | true | 13 |
|
||||||
|
2020-01-01 00:00:14.000 | true | 14 |
|
||||||
|
2020-01-01 00:00:15.000 | false | 15 |
|
||||||
|
2020-01-01 00:00:16.000 | true | 16 |
|
||||||
|
2020-01-01 00:00:17.000 | true | 17 |
|
||||||
|
2020-01-01 00:00:18.000 | true | 18 |
|
||||||
|
2020-01-01 00:00:19.000 | true | 19 |
|
||||||
|
2020-01-01 00:00:20.000 | true | 20 |
|
||||||
|
2020-01-01 00:00:21.000 | false | 21 |
|
||||||
|
|
||||||
|
taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:15' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(null);
|
||||||
|
_irowts | _isfilled | interp(c1) |
|
||||||
|
====================================================
|
||||||
|
2020-01-01 00:00:00.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:01.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:02.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:03.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:04.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:05.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:06.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:07.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:08.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:09.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:10.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:11.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:12.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:13.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:14.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:15.000 | false | 15 |
|
||||||
|
2020-01-01 00:00:16.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:17.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:18.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:19.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:20.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:21.000 | false | 21 |
|
||||||
|
|
||||||
|
taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:15' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(value, 1);
|
||||||
|
_irowts | _isfilled | interp(c1) |
|
||||||
|
====================================================
|
||||||
|
2020-01-01 00:00:00.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:01.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:02.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:03.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:04.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:05.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:06.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:07.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:08.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:09.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:10.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:11.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:12.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:13.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:14.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:15.000 | false | 15 |
|
||||||
|
2020-01-01 00:00:16.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:17.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:18.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:19.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:20.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:21.000 | false | 21 |
|
||||||
|
|
||||||
|
taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:15' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(prev);
|
||||||
|
_irowts | _isfilled | interp(c1) |
|
||||||
|
====================================================
|
||||||
|
2020-01-01 00:00:15.000 | false | 15 |
|
||||||
|
2020-01-01 00:00:16.000 | true | 15 |
|
||||||
|
2020-01-01 00:00:17.000 | true | 15 |
|
||||||
|
2020-01-01 00:00:18.000 | true | 15 |
|
||||||
|
2020-01-01 00:00:19.000 | true | 15 |
|
||||||
|
2020-01-01 00:00:20.000 | true | 15 |
|
||||||
|
2020-01-01 00:00:21.000 | false | 21 |
|
||||||
|
|
||||||
|
taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:15' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(next);
|
||||||
|
_irowts | _isfilled | interp(c1) |
|
||||||
|
====================================================
|
||||||
|
2020-01-01 00:00:00.000 | true | 15 |
|
||||||
|
2020-01-01 00:00:01.000 | true | 15 |
|
||||||
|
2020-01-01 00:00:02.000 | true | 15 |
|
||||||
|
2020-01-01 00:00:03.000 | true | 15 |
|
||||||
|
2020-01-01 00:00:04.000 | true | 15 |
|
||||||
|
2020-01-01 00:00:05.000 | true | 15 |
|
||||||
|
2020-01-01 00:00:06.000 | true | 15 |
|
||||||
|
2020-01-01 00:00:07.000 | true | 15 |
|
||||||
|
2020-01-01 00:00:08.000 | true | 15 |
|
||||||
|
2020-01-01 00:00:09.000 | true | 15 |
|
||||||
|
2020-01-01 00:00:10.000 | true | 15 |
|
||||||
|
2020-01-01 00:00:11.000 | true | 15 |
|
||||||
|
2020-01-01 00:00:12.000 | true | 15 |
|
||||||
|
2020-01-01 00:00:13.000 | true | 15 |
|
||||||
|
2020-01-01 00:00:14.000 | true | 15 |
|
||||||
|
2020-01-01 00:00:15.000 | false | 15 |
|
||||||
|
2020-01-01 00:00:16.000 | true | 21 |
|
||||||
|
2020-01-01 00:00:17.000 | true | 21 |
|
||||||
|
2020-01-01 00:00:18.000 | true | 21 |
|
||||||
|
2020-01-01 00:00:19.000 | true | 21 |
|
||||||
|
2020-01-01 00:00:20.000 | true | 21 |
|
||||||
|
2020-01-01 00:00:21.000 | false | 21 |
|
||||||
|
|
||||||
|
taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:15' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(linear);
|
||||||
|
_irowts | _isfilled | interp(c1) |
|
||||||
|
====================================================
|
||||||
|
2020-01-01 00:00:15.000 | false | 15 |
|
||||||
|
2020-01-01 00:00:16.000 | true | 16 |
|
||||||
|
2020-01-01 00:00:17.000 | true | 17 |
|
||||||
|
2020-01-01 00:00:18.000 | true | 18 |
|
||||||
|
2020-01-01 00:00:19.000 | true | 19 |
|
||||||
|
2020-01-01 00:00:20.000 | true | 20 |
|
||||||
|
2020-01-01 00:00:21.000 | false | 21 |
|
||||||
|
|
||||||
|
taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:21' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(null);
|
||||||
|
_irowts | _isfilled | interp(c1) |
|
||||||
|
====================================================
|
||||||
|
2020-01-01 00:00:00.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:01.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:02.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:03.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:04.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:05.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:06.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:07.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:08.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:09.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:10.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:11.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:12.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:13.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:14.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:15.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:16.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:17.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:18.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:19.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:20.000 | true | NULL |
|
||||||
|
2020-01-01 00:00:21.000 | false | 21 |
|
||||||
|
|
||||||
|
taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:21' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(value, 1);
|
||||||
|
_irowts | _isfilled | interp(c1) |
|
||||||
|
====================================================
|
||||||
|
2020-01-01 00:00:00.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:01.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:02.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:03.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:04.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:05.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:06.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:07.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:08.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:09.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:10.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:11.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:12.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:13.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:14.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:15.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:16.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:17.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:18.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:19.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:20.000 | true | 1 |
|
||||||
|
2020-01-01 00:00:21.000 | false | 21 |
|
||||||
|
|
||||||
|
taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:21' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(prev);
|
||||||
|
_irowts | _isfilled | interp(c1) |
|
||||||
|
====================================================
|
||||||
|
2020-01-01 00:00:21.000 | false | 21 |
|
||||||
|
|
||||||
|
taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:21' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(next);
|
||||||
|
_irowts | _isfilled | interp(c1) |
|
||||||
|
====================================================
|
||||||
|
2020-01-01 00:00:00.000 | true | 21 |
|
||||||
|
2020-01-01 00:00:01.000 | true | 21 |
|
||||||
|
2020-01-01 00:00:02.000 | true | 21 |
|
||||||
|
2020-01-01 00:00:03.000 | true | 21 |
|
||||||
|
2020-01-01 00:00:04.000 | true | 21 |
|
||||||
|
2020-01-01 00:00:05.000 | true | 21 |
|
||||||
|
2020-01-01 00:00:06.000 | true | 21 |
|
||||||
|
2020-01-01 00:00:07.000 | true | 21 |
|
||||||
|
2020-01-01 00:00:08.000 | true | 21 |
|
||||||
|
2020-01-01 00:00:09.000 | true | 21 |
|
||||||
|
2020-01-01 00:00:10.000 | true | 21 |
|
||||||
|
2020-01-01 00:00:11.000 | true | 21 |
|
||||||
|
2020-01-01 00:00:12.000 | true | 21 |
|
||||||
|
2020-01-01 00:00:13.000 | true | 21 |
|
||||||
|
2020-01-01 00:00:14.000 | true | 21 |
|
||||||
|
2020-01-01 00:00:15.000 | true | 21 |
|
||||||
|
2020-01-01 00:00:16.000 | true | 21 |
|
||||||
|
2020-01-01 00:00:17.000 | true | 21 |
|
||||||
|
2020-01-01 00:00:18.000 | true | 21 |
|
||||||
|
2020-01-01 00:00:19.000 | true | 21 |
|
||||||
|
2020-01-01 00:00:20.000 | true | 21 |
|
||||||
|
2020-01-01 00:00:21.000 | false | 21 |
|
||||||
|
|
||||||
|
taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:21' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(linear);
|
||||||
|
_irowts | _isfilled | interp(c1) |
|
||||||
|
====================================================
|
||||||
|
2020-01-01 00:00:21.000 | false | 21 |
|
||||||
|
|
||||||
|
taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:22' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(null);
|
||||||
|
|
||||||
|
taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:22' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(value, 1);
|
||||||
|
|
||||||
|
taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:22' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(prev);
|
||||||
|
|
||||||
|
taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:22' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(next);
|
||||||
|
|
||||||
|
taos> select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:22' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(linear);
|
||||||
|
|
||||||
|
|
|
|
@ -13,3 +13,53 @@ select _irowts as irowts ,tbname as table_name, c2 as c_c2, c3 as c_c3, _isfille
|
||||||
select _irowts as irowts ,tbname as table_name, c2 as c_c2, c3 as c_c3, _isfilled as isfilled , interp(c1) as intp_c1 from test.td32727 partition by tbname,c2,c3 range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill (prev) order by irowts, c2, c3;
|
select _irowts as irowts ,tbname as table_name, c2 as c_c2, c3 as c_c3, _isfilled as isfilled , interp(c1) as intp_c1 from test.td32727 partition by tbname,c2,c3 range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill (prev) order by irowts, c2, c3;
|
||||||
select _irowts as irowts ,tbname as table_name, c2 as c_c2, c3 as c_c3, _isfilled as isfilled , interp(c1) as intp_c1 from test.td32727 partition by tbname,c2,c3 range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill (linear) order by irowts, c2, c3;
|
select _irowts as irowts ,tbname as table_name, c2 as c_c2, c3 as c_c3, _isfilled as isfilled , interp(c1) as intp_c1 from test.td32727 partition by tbname,c2,c3 range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill (linear) order by irowts, c2, c3;
|
||||||
select _irowts as irowts ,tbname as table_name, c2 as c_c2, c3 as c_c3, _isfilled as isfilled , interp(c1) as intp_c1 from test.td32727 partition by tbname,c2,c3 range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill (value, 1) order by irowts, c2, c3;
|
select _irowts as irowts ,tbname as table_name, c2 as c_c2, c3 as c_c3, _isfilled as isfilled , interp(c1) as intp_c1 from test.td32727 partition by tbname,c2,c3 range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill (value, 1) order by irowts, c2, c3;
|
||||||
|
select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-02 00:00:00' and '2020-01-01 00:00:00' range('2020-01-01 00:00:00', '2020-01-01 00:00:30') every(1s) fill(null);
|
||||||
|
select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-02 00:00:00' and '2020-01-01 00:00:00' range('2020-01-01 00:00:00', '2020-01-01 00:00:30') every(1s) fill(value, 1);
|
||||||
|
select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-02 00:00:00' and '2020-01-01 00:00:00' range('2020-01-01 00:00:00', '2020-01-01 00:00:30') every(1s) fill(prev);
|
||||||
|
select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-02 00:00:00' and '2020-01-01 00:00:00' range('2020-01-01 00:00:00', '2020-01-01 00:00:30') every(1s) fill(next);
|
||||||
|
select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-02 00:00:00' and '2020-01-01 00:00:00' range('2020-01-01 00:00:00', '2020-01-01 00:00:30') every(1s) fill(linear);
|
||||||
|
select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-02 00:00:00' range('2020-01-01 00:00:30', '2020-01-01 00:00:00') every(1s) fill(null);
|
||||||
|
select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-02 00:00:00' range('2020-01-01 00:00:30', '2020-01-01 00:00:00') every(1s) fill(value, 1);
|
||||||
|
select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-02 00:00:00' range('2020-01-01 00:00:30', '2020-01-01 00:00:00') every(1s) fill(prev);
|
||||||
|
select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-02 00:00:00' range('2020-01-01 00:00:30', '2020-01-01 00:00:00') every(1s) fill(next);
|
||||||
|
select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-02 00:00:00' range('2020-01-01 00:00:30', '2020-01-01 00:00:00') every(1s) fill(linear);
|
||||||
|
select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:20' range('2020-01-01 00:00:21', '2020-01-01 00:00:30') every(1s) fill(null);
|
||||||
|
select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:20' range('2020-01-01 00:00:21', '2020-01-01 00:00:30') every(1s) fill(value, 1);
|
||||||
|
select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:20' range('2020-01-01 00:00:21', '2020-01-01 00:00:30') every(1s) fill(prev);
|
||||||
|
select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:20' range('2020-01-01 00:00:21', '2020-01-01 00:00:30') every(1s) fill(next);
|
||||||
|
select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:20' range('2020-01-01 00:00:21', '2020-01-01 00:00:30') every(1s) fill(linear);
|
||||||
|
select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:21', '2020-01-01 00:00:30') every(1s) fill(null);
|
||||||
|
select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:21', '2020-01-01 00:00:30') every(1s) fill(value, 1);
|
||||||
|
select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:21', '2020-01-01 00:00:30') every(1s) fill(prev);
|
||||||
|
select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:21', '2020-01-01 00:00:30') every(1s) fill(next);
|
||||||
|
select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:21', '2020-01-01 00:00:30') every(1s) fill(linear);
|
||||||
|
select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:15', '2020-01-01 00:00:30') every(1s) fill(null);
|
||||||
|
select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:15', '2020-01-01 00:00:30') every(1s) fill(value, 1);
|
||||||
|
select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:15', '2020-01-01 00:00:30') every(1s) fill(prev);
|
||||||
|
select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:15', '2020-01-01 00:00:30') every(1s) fill(next);
|
||||||
|
select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:15', '2020-01-01 00:00:30') every(1s) fill(linear);
|
||||||
|
select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:00', '2020-01-01 00:00:30') every(1s) fill(null);
|
||||||
|
select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:00', '2020-01-01 00:00:30') every(1s) fill(value, 1);
|
||||||
|
select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:00', '2020-01-01 00:00:30') every(1s) fill(prev);
|
||||||
|
select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:00', '2020-01-01 00:00:30') every(1s) fill(next);
|
||||||
|
select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:21' range('2020-01-01 00:00:00', '2020-01-01 00:00:30') every(1s) fill(linear);
|
||||||
|
select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(null);
|
||||||
|
select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(value, 1);
|
||||||
|
select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(prev);
|
||||||
|
select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(next);
|
||||||
|
select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:00' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(linear);
|
||||||
|
select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:15' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(null);
|
||||||
|
select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:15' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(value, 1);
|
||||||
|
select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:15' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(prev);
|
||||||
|
select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:15' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(next);
|
||||||
|
select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:15' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(linear);
|
||||||
|
select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:21' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(null);
|
||||||
|
select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:21' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(value, 1);
|
||||||
|
select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:21' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(prev);
|
||||||
|
select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:21' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(next);
|
||||||
|
select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:21' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(linear);
|
||||||
|
select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:22' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(null);
|
||||||
|
select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:22' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(value, 1);
|
||||||
|
select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:22' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(prev);
|
||||||
|
select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:22' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(next);
|
||||||
|
select _irowts, _isfilled, interp(c1) from test.td32861 where ts between '2020-01-01 00:00:22' and '2020-01-01 00:00:30' range('2020-01-01 00:00:00', '2020-01-01 00:00:21') every(1s) fill(linear);
|
||||||
|
|
|
@ -38,6 +38,7 @@ class TDTestCase(TBase):
|
||||||
(ts timestamp, c0 tinyint, c1 smallint, c2 int, c3 bigint, c4 double, c5 float, c6 bool, c7 varchar(10), c8 nchar(10), c9 tinyint unsigned, c10 smallint unsigned, c11 int unsigned, c12 bigint unsigned)
|
(ts timestamp, c0 tinyint, c1 smallint, c2 int, c3 bigint, c4 double, c5 float, c6 bool, c7 varchar(10), c8 nchar(10), c9 tinyint unsigned, c10 smallint unsigned, c11 int unsigned, c12 bigint unsigned)
|
||||||
'''
|
'''
|
||||||
)
|
)
|
||||||
|
tdSql.execute("create table if not exists test.td32861(ts timestamp, c1 int);")
|
||||||
|
|
||||||
tdLog.printNoPrefix("==========step2:insert data")
|
tdLog.printNoPrefix("==========step2:insert data")
|
||||||
|
|
||||||
|
@ -45,6 +46,16 @@ class TDTestCase(TBase):
|
||||||
tdSql.execute(f"insert into test.td32727 values ('2020-02-01 00:00:10', 10, 10, 10, 10, 10.0, 10.0, true, 'varchar', 'nchar', 10, 10, 10, 10)")
|
tdSql.execute(f"insert into test.td32727 values ('2020-02-01 00:00:10', 10, 10, 10, 10, 10.0, 10.0, true, 'varchar', 'nchar', 10, 10, 10, 10)")
|
||||||
tdSql.execute(f"insert into test.td32727 values ('2020-02-01 00:00:15', 15, 15, 15, 15, 15.0, 15.0, true, 'varchar', 'nchar', 15, 15, 15, 15)")
|
tdSql.execute(f"insert into test.td32727 values ('2020-02-01 00:00:15', 15, 15, 15, 15, 15.0, 15.0, true, 'varchar', 'nchar', 15, 15, 15, 15)")
|
||||||
|
|
||||||
|
tdSql.execute(
|
||||||
|
"""insert into test.td32861 values
|
||||||
|
('2020-01-01 00:00:00', 0),
|
||||||
|
('2020-01-01 00:00:01', 1),
|
||||||
|
('2020-01-01 00:00:03', 3),
|
||||||
|
('2020-01-01 00:00:06', 6),
|
||||||
|
('2020-01-01 00:00:10', 10),
|
||||||
|
('2020-01-01 00:00:15', 15),
|
||||||
|
('2020-01-01 00:00:21', 21);"""
|
||||||
|
)
|
||||||
|
|
||||||
def test_normal_query_new(self, testCase):
|
def test_normal_query_new(self, testCase):
|
||||||
# read sql from .sql file and execute
|
# read sql from .sql file and execute
|
||||||
|
|
|
@ -7,7 +7,7 @@ RUN apt-get install -y locales psmisc sudo tree libgeos-dev libgflags2.2 libgfl
|
||||||
RUN sed -i 's/# en_US.UTF-8/en_US.UTF-8/' /etc/locale.gen && locale-gen
|
RUN sed -i 's/# en_US.UTF-8/en_US.UTF-8/' /etc/locale.gen && locale-gen
|
||||||
RUN pip3 config set global.index-url http://admin:123456@192.168.0.212:3141/admin/dev/+simple/
|
RUN pip3 config set global.index-url http://admin:123456@192.168.0.212:3141/admin/dev/+simple/
|
||||||
RUN pip3 config set global.trusted-host 192.168.0.212
|
RUN pip3 config set global.trusted-host 192.168.0.212
|
||||||
RUN pip3 install taospy==2.7.16 taos-ws-py==0.3.3 pandas psutil fabric2 requests faker simplejson toml pexpect tzlocal distro decorator loguru hyperloglog
|
RUN pip3 install taospy==2.7.16 taos-ws-py==0.3.5 pandas psutil fabric2 requests faker simplejson toml pexpect tzlocal distro decorator loguru hyperloglog
|
||||||
ENV LANG=en_US.UTF-8 LANGUAGE=en_US.UTF-8 LC_ALL=en_US.UTF-8
|
ENV LANG=en_US.UTF-8 LANGUAGE=en_US.UTF-8 LC_ALL=en_US.UTF-8
|
||||||
RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys E298A3A825C0D65DFD57CBB651716619E084DAB9
|
RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys E298A3A825C0D65DFD57CBB651716619E084DAB9
|
||||||
RUN add-apt-repository 'deb https://cloud.r-project.org/bin/linux/ubuntu focal-cran40/'
|
RUN add-apt-repository 'deb https://cloud.r-project.org/bin/linux/ubuntu focal-cran40/'
|
||||||
|
|
|
@ -130,7 +130,7 @@ pip3 install kafka-python
|
||||||
python3 kafka_example_consumer.py
|
python3 kafka_example_consumer.py
|
||||||
|
|
||||||
# 21
|
# 21
|
||||||
pip3 install taos-ws-py==0.3.3
|
pip3 install taos-ws-py==0.3.5
|
||||||
python3 conn_websocket_pandas.py
|
python3 conn_websocket_pandas.py
|
||||||
|
|
||||||
# 22
|
# 22
|
||||||
|
|
|
@ -232,6 +232,14 @@
|
||||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/agg_group_NotReturnValue.py -Q 4
|
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/agg_group_NotReturnValue.py -Q 4
|
||||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/td-32548.py
|
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/td-32548.py
|
||||||
|
|
||||||
|
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stddev_test.py
|
||||||
|
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stddev_test.py -Q 2
|
||||||
|
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stddev_test.py -Q 3
|
||||||
|
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stddev_test.py -Q 4
|
||||||
|
,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/checkpoint_info.py -N 4
|
||||||
|
,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/checkpoint_info2.py -N 4
|
||||||
|
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/test_multi_insert.py
|
||||||
|
|
||||||
,,y,system-test,./pytest.sh python3 ./test.py -f 3-enterprise/restore/restoreDnode.py -N 5 -M 3 -i False
|
,,y,system-test,./pytest.sh python3 ./test.py -f 3-enterprise/restore/restoreDnode.py -N 5 -M 3 -i False
|
||||||
,,y,system-test,./pytest.sh python3 ./test.py -f 3-enterprise/restore/restoreVnode.py -N 5 -M 3 -i False
|
,,y,system-test,./pytest.sh python3 ./test.py -f 3-enterprise/restore/restoreVnode.py -N 5 -M 3 -i False
|
||||||
,,y,system-test,./pytest.sh python3 ./test.py -f 3-enterprise/restore/restoreMnode.py -N 5 -M 3 -i False
|
,,y,system-test,./pytest.sh python3 ./test.py -f 3-enterprise/restore/restoreMnode.py -N 5 -M 3 -i False
|
||||||
|
@ -353,6 +361,7 @@
|
||||||
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/user_privilege_all.py
|
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/user_privilege_all.py
|
||||||
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/fsync.py
|
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/fsync.py
|
||||||
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/multilevel.py
|
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/multilevel.py
|
||||||
|
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/multilevel_createdb.py
|
||||||
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/ttl.py
|
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/ttl.py
|
||||||
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/ttlChangeOnWrite.py
|
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/ttlChangeOnWrite.py
|
||||||
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/compress_tsz1.py
|
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/compress_tsz1.py
|
||||||
|
@ -1294,6 +1303,7 @@
|
||||||
#,,y,script,./test.sh -f tsim/mnode/basic3.sim
|
#,,y,script,./test.sh -f tsim/mnode/basic3.sim
|
||||||
,,y,script,./test.sh -f tsim/mnode/basic4.sim
|
,,y,script,./test.sh -f tsim/mnode/basic4.sim
|
||||||
,,y,script,./test.sh -f tsim/mnode/basic5.sim
|
,,y,script,./test.sh -f tsim/mnode/basic5.sim
|
||||||
|
,,y,script,./test.sh -f tsim/mnode/basic6.sim
|
||||||
,,y,script,./test.sh -f tsim/show/basic.sim
|
,,y,script,./test.sh -f tsim/show/basic.sim
|
||||||
,,y,script,./test.sh -f tsim/table/autocreate.sim
|
,,y,script,./test.sh -f tsim/table/autocreate.sim
|
||||||
,,y,script,./test.sh -f tsim/table/basic1.sim
|
,,y,script,./test.sh -f tsim/table/basic1.sim
|
||||||
|
|
|
@ -76,9 +76,9 @@ ulimit -c unlimited
|
||||||
md5sum /usr/lib/libtaos.so.1
|
md5sum /usr/lib/libtaos.so.1
|
||||||
md5sum /home/TDinternal/debug/build/lib/libtaos.so
|
md5sum /home/TDinternal/debug/build/lib/libtaos.so
|
||||||
|
|
||||||
#get python connector and update: taospy 2.7.16 taos-ws-py 0.3.3
|
#get python connector and update: taospy 2.7.16 taos-ws-py 0.3.5
|
||||||
pip3 install taospy==2.7.16
|
pip3 install taospy==2.7.16
|
||||||
pip3 install taos-ws-py==0.3.3
|
pip3 install taos-ws-py==0.3.5
|
||||||
$TIMEOUT_CMD $cmd
|
$TIMEOUT_CMD $cmd
|
||||||
RET=$?
|
RET=$?
|
||||||
echo "cmd exit code: $RET"
|
echo "cmd exit code: $RET"
|
||||||
|
|
|
@ -16,7 +16,18 @@ msg_dict = {0: "success", 1: "failed", 2: "other errors", 3: "crash occured", 4:
|
||||||
# formal
|
# formal
|
||||||
hostname = socket.gethostname()
|
hostname = socket.gethostname()
|
||||||
|
|
||||||
group_url = 'https://open.feishu.cn/open-apis/bot/v2/hook/56c333b5-eae9-4c18-b0b6-7e4b7174f5c9'
|
group_url_test = (
|
||||||
|
'https://open.feishu.cn/open-apis/bot/v2/hook/7e409a8e-4390-4043-80d0-4e0dd2cbae7d'
|
||||||
|
)
|
||||||
|
|
||||||
|
notification_robot_url = (
|
||||||
|
"https://open.feishu.cn/open-apis/bot/v2/hook/56c333b5-eae9-4c18-b0b6-7e4b7174f5c9"
|
||||||
|
)
|
||||||
|
|
||||||
|
alert_robot_url = (
|
||||||
|
"https://open.feishu.cn/open-apis/bot/v2/hook/02363732-91f1-49c4-879c-4e98cf31a5f3"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def get_msg(text):
|
def get_msg(text):
|
||||||
return {
|
return {
|
||||||
|
@ -37,12 +48,12 @@ def get_msg(text):
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def send_msg(json):
|
def send_msg(url:str,json:dict):
|
||||||
headers = {
|
headers = {
|
||||||
'Content-Type': 'application/json'
|
'Content-Type': 'application/json'
|
||||||
}
|
}
|
||||||
|
|
||||||
req = requests.post(url=group_url, headers=headers, json=json)
|
req = requests.post(url=url, headers=headers, json=json)
|
||||||
inf = req.json()
|
inf = req.json()
|
||||||
if "StatusCode" in inf and inf["StatusCode"] == 0:
|
if "StatusCode" in inf and inf["StatusCode"] == 0:
|
||||||
pass
|
pass
|
||||||
|
@ -355,18 +366,27 @@ def main():
|
||||||
core_dir = "none"
|
core_dir = "none"
|
||||||
|
|
||||||
text = f'''
|
text = f'''
|
||||||
exit status: {msg_dict[status]}
|
Result: {msg_dict[status]}
|
||||||
test scope: crash_gen
|
|
||||||
owner: pxiao
|
|
||||||
hostname: {hostname}
|
|
||||||
start time: {starttime}
|
|
||||||
end time: {endtime}
|
|
||||||
git commit : {git_commit}
|
|
||||||
log dir: {log_dir}
|
|
||||||
core dir: {core_dir}
|
|
||||||
cmd: {cmd}'''
|
|
||||||
|
|
||||||
send_msg(get_msg(text))
|
Details
|
||||||
|
Owner: Jayden Jia
|
||||||
|
Start time: {starttime}
|
||||||
|
End time: {endtime}
|
||||||
|
Hostname: {hostname}
|
||||||
|
Commit: {git_commit}
|
||||||
|
Cmd: {cmd}
|
||||||
|
Log dir: {log_dir}
|
||||||
|
Core dir: {core_dir}
|
||||||
|
'''
|
||||||
|
text_result=text.split("Result: ")[1].split("Details")[0].strip()
|
||||||
|
print(text_result)
|
||||||
|
|
||||||
|
if text_result == "success":
|
||||||
|
send_msg(notification_robot_url, get_msg(text))
|
||||||
|
else:
|
||||||
|
send_msg(alert_robot_url, get_msg(text))
|
||||||
|
|
||||||
|
#send_msg(get_msg(text))
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print("exception:", e)
|
print("exception:", e)
|
||||||
exit(status)
|
exit(status)
|
||||||
|
|
|
@ -19,7 +19,18 @@ msg_dict = {0: "success", 1: "failed", 2: "other errors", 3: "crash occured", 4:
|
||||||
# formal
|
# formal
|
||||||
hostname = socket.gethostname()
|
hostname = socket.gethostname()
|
||||||
|
|
||||||
group_url = 'https://open.feishu.cn/open-apis/bot/v2/hook/56c333b5-eae9-4c18-b0b6-7e4b7174f5c9'
|
group_url_test = (
|
||||||
|
'https://open.feishu.cn/open-apis/bot/v2/hook/7e409a8e-4390-4043-80d0-4e0dd2cbae7d'
|
||||||
|
)
|
||||||
|
|
||||||
|
notification_robot_url = (
|
||||||
|
"https://open.feishu.cn/open-apis/bot/v2/hook/56c333b5-eae9-4c18-b0b6-7e4b7174f5c9"
|
||||||
|
)
|
||||||
|
|
||||||
|
alert_robot_url = (
|
||||||
|
"https://open.feishu.cn/open-apis/bot/v2/hook/02363732-91f1-49c4-879c-4e98cf31a5f3"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def get_msg(text):
|
def get_msg(text):
|
||||||
return {
|
return {
|
||||||
|
@ -40,13 +51,12 @@ def get_msg(text):
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def send_msg(json):
|
def send_msg(url:str,json:dict):
|
||||||
headers = {
|
headers = {
|
||||||
'Content-Type': 'application/json'
|
'Content-Type': 'application/json'
|
||||||
}
|
}
|
||||||
|
|
||||||
|
req = requests.post(url=url, headers=headers, json=json)
|
||||||
req = requests.post(url=group_url, headers=headers, json=json)
|
|
||||||
inf = req.json()
|
inf = req.json()
|
||||||
if "StatusCode" in inf and inf["StatusCode"] == 0:
|
if "StatusCode" in inf and inf["StatusCode"] == 0:
|
||||||
pass
|
pass
|
||||||
|
@ -389,18 +399,28 @@ def main():
|
||||||
core_dir = "none"
|
core_dir = "none"
|
||||||
|
|
||||||
text = f'''
|
text = f'''
|
||||||
exit status: {msg_dict[status]}
|
Result: {msg_dict[status]}
|
||||||
test scope: crash_gen
|
|
||||||
owner: pxiao
|
|
||||||
hostname: {hostname}
|
|
||||||
start time: {starttime}
|
|
||||||
end time: {endtime}
|
|
||||||
git commit : {git_commit}
|
|
||||||
log dir: {log_dir}
|
|
||||||
core dir: {core_dir}
|
|
||||||
cmd: {cmd}'''
|
|
||||||
|
|
||||||
send_msg(get_msg(text))
|
Details
|
||||||
|
Owner: Jayden Jia
|
||||||
|
Start time: {starttime}
|
||||||
|
End time: {endtime}
|
||||||
|
Hostname: {hostname}
|
||||||
|
Commit: {git_commit}
|
||||||
|
Cmd: {cmd}
|
||||||
|
Log dir: {log_dir}
|
||||||
|
Core dir: {core_dir}
|
||||||
|
'''
|
||||||
|
|
||||||
|
text_result=text.split("Result: ")[1].split("Details")[0].strip()
|
||||||
|
print(text_result)
|
||||||
|
|
||||||
|
if text_result == "success":
|
||||||
|
send_msg(notification_robot_url, get_msg(text))
|
||||||
|
else:
|
||||||
|
send_msg(alert_robot_url, get_msg(text))
|
||||||
|
|
||||||
|
#send_msg(get_msg(text))
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print("exception:", e)
|
print("exception:", e)
|
||||||
exit(status)
|
exit(status)
|
||||||
|
|
|
@ -16,7 +16,18 @@ msg_dict = {0: "success", 1: "failed", 2: "other errors", 3: "crash occured", 4:
|
||||||
# formal
|
# formal
|
||||||
hostname = socket.gethostname()
|
hostname = socket.gethostname()
|
||||||
|
|
||||||
group_url = 'https://open.feishu.cn/open-apis/bot/v2/hook/56c333b5-eae9-4c18-b0b6-7e4b7174f5c9'
|
group_url_test = (
|
||||||
|
'https://open.feishu.cn/open-apis/bot/v2/hook/7e409a8e-4390-4043-80d0-4e0dd2cbae7d'
|
||||||
|
)
|
||||||
|
|
||||||
|
notification_robot_url = (
|
||||||
|
"https://open.feishu.cn/open-apis/bot/v2/hook/56c333b5-eae9-4c18-b0b6-7e4b7174f5c9"
|
||||||
|
)
|
||||||
|
|
||||||
|
alert_robot_url = (
|
||||||
|
"https://open.feishu.cn/open-apis/bot/v2/hook/02363732-91f1-49c4-879c-4e98cf31a5f3"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def get_msg(text):
|
def get_msg(text):
|
||||||
return {
|
return {
|
||||||
|
@ -37,12 +48,12 @@ def get_msg(text):
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def send_msg(json):
|
def send_msg(url:str,json:dict):
|
||||||
headers = {
|
headers = {
|
||||||
'Content-Type': 'application/json'
|
'Content-Type': 'application/json'
|
||||||
}
|
}
|
||||||
|
|
||||||
req = requests.post(url=group_url, headers=headers, json=json)
|
req = requests.post(url=url, headers=headers, json=json)
|
||||||
inf = req.json()
|
inf = req.json()
|
||||||
if "StatusCode" in inf and inf["StatusCode"] == 0:
|
if "StatusCode" in inf and inf["StatusCode"] == 0:
|
||||||
pass
|
pass
|
||||||
|
@ -376,18 +387,28 @@ def main():
|
||||||
core_dir = "none"
|
core_dir = "none"
|
||||||
|
|
||||||
text = f'''
|
text = f'''
|
||||||
exit status: {msg_dict[status]}
|
Result: {msg_dict[status]}
|
||||||
test scope: crash_gen
|
|
||||||
owner: pxiao
|
Details
|
||||||
hostname: {hostname}
|
Owner: Jayden Jia
|
||||||
start time: {starttime}
|
Start time: {starttime}
|
||||||
end time: {endtime}
|
End time: {endtime}
|
||||||
git commit : {git_commit}
|
Hostname: {hostname}
|
||||||
log dir: {log_dir}
|
Commit: {git_commit}
|
||||||
core dir: {core_dir}
|
Cmd: {cmd}
|
||||||
cmd: {cmd}'''
|
Log dir: {log_dir}
|
||||||
|
Core dir: {core_dir}
|
||||||
send_msg(get_msg(text))
|
'''
|
||||||
|
|
||||||
|
text_result=text.split("Result: ")[1].split("Details")[0].strip()
|
||||||
|
print(text_result)
|
||||||
|
|
||||||
|
if text_result == "success":
|
||||||
|
send_msg(notification_robot_url, get_msg(text))
|
||||||
|
else:
|
||||||
|
send_msg(alert_robot_url, get_msg(text))
|
||||||
|
|
||||||
|
#send_msg(get_msg(text))
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print("exception:", e)
|
print("exception:", e)
|
||||||
exit(status)
|
exit(status)
|
||||||
|
|
|
@ -843,9 +843,10 @@ class TDSql:
|
||||||
tdSql.query("select * from information_schema.ins_vnodes")
|
tdSql.query("select * from information_schema.ins_vnodes")
|
||||||
#result: dnode_id|vgroup_id|db_name|status|role_time|start_time|restored|
|
#result: dnode_id|vgroup_id|db_name|status|role_time|start_time|restored|
|
||||||
|
|
||||||
|
results = list(tdSql.queryResult)
|
||||||
for vnode_group_id in db_vgroups_list:
|
for vnode_group_id in db_vgroups_list:
|
||||||
print(tdSql.queryResult)
|
for result in results:
|
||||||
for result in tdSql.queryResult:
|
print(f'result[2] is {result[2]}, db_name is {db_name}, result[1] is {result[1]}, vnode_group_id is {vnode_group_id}')
|
||||||
if result[2] == db_name and result[1] == vnode_group_id:
|
if result[2] == db_name and result[1] == vnode_group_id:
|
||||||
tdLog.debug(f"dbname: {db_name}, vgroup :{vnode_group_id}, dnode is {result[0]}")
|
tdLog.debug(f"dbname: {db_name}, vgroup :{vnode_group_id}, dnode is {result[0]}")
|
||||||
print(useful_trans_dnodes_list)
|
print(useful_trans_dnodes_list)
|
||||||
|
|
|
@ -0,0 +1,273 @@
|
||||||
|
// sample code to verify all TDengine API
|
||||||
|
// to compile: gcc -o apitest apitest.c -ltaos
|
||||||
|
|
||||||
|
#include <stdio.h>
|
||||||
|
#include <stdlib.h>
|
||||||
|
#include <string.h>
|
||||||
|
#include <unistd.h>
|
||||||
|
#include "taos.h"
|
||||||
|
static int64_t count = 10000;
|
||||||
|
|
||||||
|
int64_t genReqid() {
|
||||||
|
count += 100;
|
||||||
|
return count;
|
||||||
|
}
|
||||||
|
|
||||||
|
void stmtAsyncQueryCb(void* param, TAOS_RES* pRes, int code) {
|
||||||
|
int affected_rows = taos_affected_rows(pRes);
|
||||||
|
return;
|
||||||
|
/*
|
||||||
|
SSP_CB_PARAM* qParam = (SSP_CB_PARAM*)param;
|
||||||
|
if (code == 0 && pRes) {
|
||||||
|
if (qParam->fetch) {
|
||||||
|
taos_fetch_rows_a(pRes, sqAsyncFetchCb, param);
|
||||||
|
} else {
|
||||||
|
if (qParam->free) {
|
||||||
|
taos_free_result(pRes);
|
||||||
|
}
|
||||||
|
*qParam->end = 1;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
sqError("select", taos_errstr(pRes));
|
||||||
|
*qParam->end = 1;
|
||||||
|
taos_free_result(pRes);
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
}
|
||||||
|
|
||||||
|
void veriry_stmt(TAOS* taos) {
|
||||||
|
TAOS_RES* result = taos_query(taos, "drop database if exists test;");
|
||||||
|
taos_free_result(result);
|
||||||
|
usleep(100000);
|
||||||
|
result = taos_query(taos, "create database test;");
|
||||||
|
|
||||||
|
int code = taos_errno(result);
|
||||||
|
if (code != 0) {
|
||||||
|
printf("\033[31mfailed to create database, reason:%s\033[0m\n", taos_errstr(result));
|
||||||
|
taos_free_result(result);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
taos_free_result(result);
|
||||||
|
|
||||||
|
usleep(100000);
|
||||||
|
taos_select_db(taos, "test");
|
||||||
|
|
||||||
|
// create table
|
||||||
|
/*
|
||||||
|
const char* sql =
|
||||||
|
"create table m1 (ts timestamp, b bool, v1 tinyint, v2 smallint, v4 int, v8 bigint, f4 float, f8 double, bin "
|
||||||
|
"binary(40), blob nchar(10))";
|
||||||
|
*/
|
||||||
|
const char* sql =
|
||||||
|
"create table m1 (ts timestamp, blob2 nchar(10), blob nchar(10),blob3 nchar(10),blob4 nchar(10),blob5 "
|
||||||
|
"nchar(10))";
|
||||||
|
result = taos_query(taos, sql);
|
||||||
|
code = taos_errno(result);
|
||||||
|
if (code != 0) {
|
||||||
|
printf("\033[31mfailed to create table, reason:%s\033[0m\n", taos_errstr(result));
|
||||||
|
taos_free_result(result);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
taos_free_result(result);
|
||||||
|
|
||||||
|
// insert 10 records
|
||||||
|
struct {
|
||||||
|
int64_t ts[10];
|
||||||
|
char blob[10][1];
|
||||||
|
char blob2[10][1];
|
||||||
|
char blob3[10][1];
|
||||||
|
char blob4[10][1];
|
||||||
|
char blob5[10][1];
|
||||||
|
|
||||||
|
} v;
|
||||||
|
|
||||||
|
int32_t* t64_len = malloc(sizeof(int32_t) * 10);
|
||||||
|
int32_t* blob_len = malloc(sizeof(int32_t) * 10);
|
||||||
|
int32_t* blob_len2 = malloc(sizeof(int32_t) * 10);
|
||||||
|
int32_t* blob_len3 = malloc(sizeof(int32_t) * 10);
|
||||||
|
int32_t* blob_len4 = malloc(sizeof(int32_t) * 10);
|
||||||
|
int32_t* blob_len5 = malloc(sizeof(int32_t) * 10);
|
||||||
|
|
||||||
|
#include "time.h"
|
||||||
|
clock_t start, end;
|
||||||
|
TAOS_STMT2_OPTION option = {0, true, true, stmtAsyncQueryCb, NULL};
|
||||||
|
|
||||||
|
start = clock();
|
||||||
|
TAOS_STMT2* stmt = taos_stmt2_init(taos, &option);
|
||||||
|
end = clock();
|
||||||
|
printf("init time:%f\n", (double)(end - start) / CLOCKS_PER_SEC);
|
||||||
|
// TAOS_MULTI_BIND params[10];
|
||||||
|
TAOS_STMT2_BIND params[10];
|
||||||
|
char is_null[10] = {0};
|
||||||
|
|
||||||
|
params[0].buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
|
||||||
|
// params[0].buffer_length = sizeof(v.ts[0]);
|
||||||
|
params[0].buffer = v.ts;
|
||||||
|
params[0].length = t64_len;
|
||||||
|
params[0].is_null = is_null;
|
||||||
|
params[0].num = 10;
|
||||||
|
|
||||||
|
params[1].buffer_type = TSDB_DATA_TYPE_NCHAR;
|
||||||
|
// params[8].buffer_length = sizeof(v.blob2[0]);
|
||||||
|
params[1].buffer = v.blob2;
|
||||||
|
params[1].length = blob_len2;
|
||||||
|
params[1].is_null = is_null;
|
||||||
|
params[1].num = 10;
|
||||||
|
|
||||||
|
params[2].buffer_type = TSDB_DATA_TYPE_NCHAR;
|
||||||
|
// params[9].buffer_length = sizeof(v.blob[0]);
|
||||||
|
params[2].buffer = v.blob3;
|
||||||
|
params[2].length = blob_len;
|
||||||
|
params[2].is_null = is_null;
|
||||||
|
params[2].num = 10;
|
||||||
|
|
||||||
|
params[3].buffer_type = TSDB_DATA_TYPE_NCHAR;
|
||||||
|
// params[9].buffer_length = sizeof(v.blob[0]);
|
||||||
|
params[3].buffer = v.blob4;
|
||||||
|
params[3].length = blob_len;
|
||||||
|
params[3].is_null = is_null;
|
||||||
|
params[3].num = 10;
|
||||||
|
|
||||||
|
params[4].buffer_type = TSDB_DATA_TYPE_NCHAR;
|
||||||
|
// params[9].buffer_length = sizeof(v.blob[0]);
|
||||||
|
params[4].buffer = v.blob;
|
||||||
|
params[4].length = blob_len;
|
||||||
|
params[4].is_null = is_null;
|
||||||
|
params[4].num = 10;
|
||||||
|
|
||||||
|
params[5].buffer_type = TSDB_DATA_TYPE_NCHAR;
|
||||||
|
// params[9].buffer_length = sizeof(v.blob[0]);
|
||||||
|
params[5].buffer = v.blob5;
|
||||||
|
params[5].length = blob_len;
|
||||||
|
params[5].is_null = is_null;
|
||||||
|
params[5].num = 10;
|
||||||
|
|
||||||
|
sql = "insert into ? (ts, blob2, blob, blob3, blob4, blob5) values(?,?,?,?,?,?)";
|
||||||
|
start = clock();
|
||||||
|
code = taos_stmt2_prepare(stmt, sql, 0);
|
||||||
|
end = clock();
|
||||||
|
printf("prepare time:%f\n", (double)(end - start) / CLOCKS_PER_SEC);
|
||||||
|
if (code != 0) {
|
||||||
|
printf("\033[31mfailed to execute taos_stmt_prepare. error:%s\033[0m\n", taos_stmt_errstr(stmt));
|
||||||
|
taos_stmt_close(stmt);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
code = taos_stmt_set_tbname(stmt, "m1");
|
||||||
|
if (code != 0) {
|
||||||
|
printf("\033[31mfailed to execute taos_stmt_prepare. error:%s\033[0m\n", taos_stmt_errstr(stmt));
|
||||||
|
taos_stmt_close(stmt);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
|
int64_t ts = 1591060628000;
|
||||||
|
for (int i = 0; i < 10; ++i) {
|
||||||
|
is_null[i] = 0;
|
||||||
|
|
||||||
|
v.ts[i] = ts++;
|
||||||
|
|
||||||
|
v.blob[i][0] = 'a' + i;
|
||||||
|
v.blob2[i][0] = 'f' + i;
|
||||||
|
v.blob3[i][0] = 't' + i;
|
||||||
|
v.blob4[i][0] = 'A' + i;
|
||||||
|
v.blob5[i][0] = 'G' + i;
|
||||||
|
|
||||||
|
// v.blob2[i] = malloc(strlen("一二三四五六七十九八"));
|
||||||
|
// v.blob[i] = malloc(strlen("十九八七六五四三二一"));
|
||||||
|
|
||||||
|
// strcpy(v.blob2[i], "一二三四五六七十九八");
|
||||||
|
// strcpy(v.blob[i], "十九八七六五四三二一");
|
||||||
|
|
||||||
|
blob_len[i] = sizeof(char);
|
||||||
|
blob_len2[i] = sizeof(char);
|
||||||
|
blob_len3[i] = sizeof(char);
|
||||||
|
blob_len4[i] = sizeof(char);
|
||||||
|
blob_len5[i] = sizeof(char);
|
||||||
|
}
|
||||||
|
|
||||||
|
char* tbname = "m1";
|
||||||
|
TAOS_STMT2_BIND* bind_cols[1] = {¶ms[0]};
|
||||||
|
TAOS_STMT2_BINDV bindv = {1, &tbname, NULL, &bind_cols[0]};
|
||||||
|
start = clock();
|
||||||
|
// taos_stmt2_bind_param(stmt, "m1", NULL, params, -1);
|
||||||
|
taos_stmt2_bind_param(stmt, &bindv, -1);
|
||||||
|
end = clock();
|
||||||
|
printf("bind time:%f\n", (double)(end - start) / CLOCKS_PER_SEC);
|
||||||
|
// taos_stmt_bind_param_batch(stmt, params);
|
||||||
|
// taos_stmt_add_batch(stmt);
|
||||||
|
/*
|
||||||
|
int param_count = -1;
|
||||||
|
code = taos_stmt2_param_count(stmt, ¶m_count);
|
||||||
|
if (code != 0) {
|
||||||
|
printf("\033[31mfailed to execute taos_stmt_param_count. error:%s\033[0m\n", taos_stmt_errstr(stmt));
|
||||||
|
taos_stmt_close(stmt);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
printf("param_count: %d\n", param_count);
|
||||||
|
*/
|
||||||
|
TAOS_FIELD_E* fields = NULL;
|
||||||
|
int field_count = -1;
|
||||||
|
start = clock();
|
||||||
|
code = taos_stmt2_get_fields(stmt, TAOS_FIELD_COL, &field_count, NULL);
|
||||||
|
end = clock();
|
||||||
|
printf("get fields time:%f\n", (double)(end - start) / CLOCKS_PER_SEC);
|
||||||
|
if (code != 0) {
|
||||||
|
printf("\033[31mfailed to execute taos_stmt_param_count. error:%s\033[0m\n", taos_stmt_errstr(stmt));
|
||||||
|
taos_stmt_close(stmt);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
printf("col field_count: %d\n", field_count);
|
||||||
|
start = clock();
|
||||||
|
taos_stmt2_free_fields(stmt, fields);
|
||||||
|
end = clock();
|
||||||
|
printf("free time:%f\n", (double)(end - start) / CLOCKS_PER_SEC);
|
||||||
|
/*
|
||||||
|
code = taos_stmt2_get_fields(stmt, TAOS_FIELD_TAG, &field_count, &fields);
|
||||||
|
if (code != 0) {
|
||||||
|
printf("\033[31mfailed to execute taos_stmt_param_count. error:%s\033[0m\n", taos_stmt_errstr(stmt));
|
||||||
|
taos_stmt_close(stmt);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
printf("tag field_count: %d\n", field_count);
|
||||||
|
taos_stmt2_free_fields(stmt, fields);
|
||||||
|
*/
|
||||||
|
// if (taos_stmt_execute(stmt) != 0) {
|
||||||
|
start = clock();
|
||||||
|
// if (taos_stmt2_exec(stmt, NULL, stmtAsyncQueryCb, NULL) != 0) {
|
||||||
|
if (taos_stmt2_exec(stmt, NULL) != 0) {
|
||||||
|
printf("\033[31mfailed to execute insert statement.error:%s\033[0m\n", taos_stmt_errstr(stmt));
|
||||||
|
taos_stmt2_close(stmt);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
end = clock();
|
||||||
|
printf("exec time:%f\n", (double)(end - start) / CLOCKS_PER_SEC);
|
||||||
|
|
||||||
|
taos_stmt2_close(stmt);
|
||||||
|
|
||||||
|
free(blob_len);
|
||||||
|
free(blob_len2);
|
||||||
|
free(blob_len5);
|
||||||
|
free(blob_len3);
|
||||||
|
free(blob_len4);
|
||||||
|
}
|
||||||
|
|
||||||
|
int main(int argc, char* argv[]) {
|
||||||
|
const char* host = "127.0.0.1";
|
||||||
|
const char* user = "root";
|
||||||
|
const char* passwd = "taosdata";
|
||||||
|
|
||||||
|
taos_options(TSDB_OPTION_TIMEZONE, "GMT-8");
|
||||||
|
TAOS* taos = taos_connect(host, user, passwd, "", 0);
|
||||||
|
if (taos == NULL) {
|
||||||
|
printf("\033[31mfailed to connect to db, reason:%s\033[0m\n", taos_errstr(taos));
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
printf("********* verify stmt query **********\n");
|
||||||
|
veriry_stmt(taos);
|
||||||
|
|
||||||
|
printf("done\n");
|
||||||
|
taos_close(taos);
|
||||||
|
taos_cleanup();
|
||||||
|
}
|
|
@ -81,8 +81,8 @@ void veriry_stmt(TAOS* taos) {
|
||||||
float f4[10];
|
float f4[10];
|
||||||
double f8[10];
|
double f8[10];
|
||||||
char bin[10][40];
|
char bin[10][40];
|
||||||
char blob[10][80];
|
char blob[10][1];
|
||||||
char blob2[10][80];
|
char blob2[10][1];
|
||||||
} v;
|
} v;
|
||||||
|
|
||||||
int32_t* t8_len = malloc(sizeof(int32_t) * 10);
|
int32_t* t8_len = malloc(sizeof(int32_t) * 10);
|
||||||
|
@ -218,8 +218,14 @@ void veriry_stmt(TAOS* taos) {
|
||||||
for (int j = 0; j < sizeof(v.bin[0]); ++j) {
|
for (int j = 0; j < sizeof(v.bin[0]); ++j) {
|
||||||
v.bin[i][j] = (char)(i + '0');
|
v.bin[i][j] = (char)(i + '0');
|
||||||
}
|
}
|
||||||
strcpy(v.blob2[i], "一二三四五六七十九八");
|
v.blob[i][0] = 'a' + i;
|
||||||
strcpy(v.blob[i], "一二三四五六七八九十");
|
v.blob2[i][0] = 'A' + i;
|
||||||
|
|
||||||
|
// v.blob2[i] = malloc(strlen("一二三四五六七十九八"));
|
||||||
|
// v.blob[i] = malloc(strlen("十九八七六五四三二一"));
|
||||||
|
|
||||||
|
// strcpy(v.blob2[i], "一二三四五六七十九八");
|
||||||
|
// strcpy(v.blob[i], "十九八七六五四三二一");
|
||||||
|
|
||||||
t8_len[i] = sizeof(int8_t);
|
t8_len[i] = sizeof(int8_t);
|
||||||
t16_len[i] = sizeof(int16_t);
|
t16_len[i] = sizeof(int16_t);
|
||||||
|
@ -228,10 +234,9 @@ void veriry_stmt(TAOS* taos) {
|
||||||
float_len[i] = sizeof(float);
|
float_len[i] = sizeof(float);
|
||||||
double_len[i] = sizeof(double);
|
double_len[i] = sizeof(double);
|
||||||
bin_len[i] = sizeof(v.bin[0]);
|
bin_len[i] = sizeof(v.bin[0]);
|
||||||
blob_len[i] = (int32_t)strlen(v.blob[i]);
|
blob_len[i] = sizeof(char);
|
||||||
blob_len2[i] = (int32_t)strlen(v.blob2[i]);
|
blob_len2[i] = sizeof(char);
|
||||||
}
|
}
|
||||||
|
|
||||||
char* tbname = "m1";
|
char* tbname = "m1";
|
||||||
TAOS_STMT2_BIND* bind_cols[1] = {¶ms[0]};
|
TAOS_STMT2_BIND* bind_cols[1] = {¶ms[0]};
|
||||||
TAOS_STMT2_BINDV bindv = {1, &tbname, NULL, &bind_cols[0]};
|
TAOS_STMT2_BINDV bindv = {1, &tbname, NULL, &bind_cols[0]};
|
||||||
|
|
|
@ -0,0 +1,413 @@
|
||||||
|
system sh/stop_dnodes.sh
|
||||||
|
system sh/deploy.sh -n dnode1 -i 1
|
||||||
|
system sh/deploy.sh -n dnode2 -i 2
|
||||||
|
system sh/deploy.sh -n dnode3 -i 3
|
||||||
|
system sh/deploy.sh -n dnode4 -i 4
|
||||||
|
system sh/cfg.sh -n dnode1 -c compressMsgSize -v 0
|
||||||
|
system sh/cfg.sh -n dnode2 -c compressMsgSize -v 0
|
||||||
|
system sh/cfg.sh -n dnode3 -c compressMsgSize -v 0
|
||||||
|
system sh/cfg.sh -n dnode4 -c compressMsgSize -v 0
|
||||||
|
system sh/exec.sh -n dnode1 -s start
|
||||||
|
sql connect
|
||||||
|
|
||||||
|
print =============== step1: create dnodes
|
||||||
|
sql create dnode $hostname port 7200
|
||||||
|
sql create dnode $hostname port 7300
|
||||||
|
sql create dnode $hostname port 7400
|
||||||
|
|
||||||
|
$x = 0
|
||||||
|
step1:
|
||||||
|
$x = $x + 1
|
||||||
|
sleep 1000
|
||||||
|
if $x == 5 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
sql select * from information_schema.ins_dnodes
|
||||||
|
if $data(1)[4] != ready then
|
||||||
|
goto step1
|
||||||
|
endi
|
||||||
|
|
||||||
|
print =============== step2: create dnodes - with error
|
||||||
|
sql_error create mnode on dnode 1;
|
||||||
|
sql_error create mnode on dnode 2;
|
||||||
|
sql_error create mnode on dnode 3;
|
||||||
|
sql_error create mnode on dnode 4;
|
||||||
|
sql_error create mnode on dnode 5;
|
||||||
|
sql_error create mnode on dnode 6;
|
||||||
|
|
||||||
|
print =============== step3: create mnode 2 and 3
|
||||||
|
system sh/exec.sh -n dnode2 -s start
|
||||||
|
system sh/exec.sh -n dnode3 -s start
|
||||||
|
system sh/exec.sh -n dnode4 -s start
|
||||||
|
$x = 0
|
||||||
|
step3:
|
||||||
|
$x = $x + 1
|
||||||
|
sleep 1000
|
||||||
|
if $x == 5 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
sql select * from information_schema.ins_dnodes
|
||||||
|
if $data(2)[4] != ready then
|
||||||
|
goto step3
|
||||||
|
endi
|
||||||
|
if $data(3)[4] != ready then
|
||||||
|
goto step3
|
||||||
|
endi
|
||||||
|
if $data(4)[4] != ready then
|
||||||
|
goto step3
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql create mnode on dnode 2
|
||||||
|
sql create mnode on dnode 3
|
||||||
|
|
||||||
|
$x = 0
|
||||||
|
step31:
|
||||||
|
$x = $x + 1
|
||||||
|
sleep 1000
|
||||||
|
if $x == 50 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
sql select * from information_schema.ins_mnodes
|
||||||
|
$leaderNum = 0
|
||||||
|
if $data(1)[2] == leader then
|
||||||
|
$leaderNum = 1
|
||||||
|
endi
|
||||||
|
if $data(2)[2] == leader then
|
||||||
|
$leaderNum = 1
|
||||||
|
endi
|
||||||
|
if $data(3)[2] == leader then
|
||||||
|
$leaderNum = 1
|
||||||
|
endi
|
||||||
|
if $leaderNum == 0 then
|
||||||
|
goto step31
|
||||||
|
endi
|
||||||
|
|
||||||
|
print =============== step4: create dnodes - with error
|
||||||
|
sql_error create mnode on dnode 1
|
||||||
|
sql_error create mnode on dnode 2;
|
||||||
|
sql_error create mnode on dnode 3;
|
||||||
|
sql_error create mnode on dnode 4;
|
||||||
|
sql_error create mnode on dnode 5;
|
||||||
|
sql_error create mnode on dnode 6;
|
||||||
|
|
||||||
|
print =============== step5: drop mnodes - with error
|
||||||
|
sql_error drop mnode on dnode 1
|
||||||
|
sql_error drop mnode on dnode 4
|
||||||
|
sql_error drop mnode on dnode 5
|
||||||
|
sql_error drop mnode on dnode 6
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode2 -s stop
|
||||||
|
$x = 0
|
||||||
|
step5:
|
||||||
|
$x = $x + 1
|
||||||
|
sleep 1000
|
||||||
|
if $x == 10 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
sql select * from information_schema.ins_dnodes
|
||||||
|
print ===> $data00 $data01 $data02 $data03 $data04 $data05
|
||||||
|
print ===> $data10 $data11 $data12 $data13 $data14 $data15
|
||||||
|
print ===> $data20 $data21 $data22 $data23 $data24 $data25
|
||||||
|
print ===> $data30 $data31 $data32 $data33 $data34 $data35
|
||||||
|
if $data(1)[4] != ready then
|
||||||
|
goto step5
|
||||||
|
endi
|
||||||
|
if $data(2)[4] != offline then
|
||||||
|
goto step5
|
||||||
|
endi
|
||||||
|
if $data(3)[4] != ready then
|
||||||
|
goto step5
|
||||||
|
endi
|
||||||
|
if $data(4)[4] != ready then
|
||||||
|
goto step5
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql_error drop mnode on dnode 2
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode2 -s start
|
||||||
|
$x = 0
|
||||||
|
step51:
|
||||||
|
$x = $x + 1
|
||||||
|
sleep 1000
|
||||||
|
if $x == 10 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
sql select * from information_schema.ins_dnodes
|
||||||
|
print ===> $data00 $data01 $data02 $data03 $data04 $data05
|
||||||
|
print ===> $data10 $data11 $data12 $data13 $data14 $data15
|
||||||
|
print ===> $data20 $data21 $data22 $data23 $data24 $data25
|
||||||
|
print ===> $data30 $data31 $data32 $data33 $data34 $data35
|
||||||
|
if $data(1)[4] != ready then
|
||||||
|
goto step51
|
||||||
|
endi
|
||||||
|
if $data(2)[4] != ready then
|
||||||
|
goto step51
|
||||||
|
endi
|
||||||
|
if $data(3)[4] != ready then
|
||||||
|
goto step51
|
||||||
|
endi
|
||||||
|
if $data(4)[4] != ready then
|
||||||
|
goto step51
|
||||||
|
endi
|
||||||
|
|
||||||
|
print =============== step6: stop mnode1
|
||||||
|
system sh/exec.sh -n dnode1 -s stop
|
||||||
|
# sql_error drop mnode on dnode 1
|
||||||
|
|
||||||
|
$x = 0
|
||||||
|
step61:
|
||||||
|
$x = $x + 1
|
||||||
|
sleep 1000
|
||||||
|
if $x == 10 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
sql select * from information_schema.ins_mnodes -x step61
|
||||||
|
print ===> $data00 $data01 $data02 $data03 $data04 $data05
|
||||||
|
print ===> $data10 $data11 $data12 $data13 $data14 $data15
|
||||||
|
print ===> $data20 $data21 $data22 $data23 $data24 $data25
|
||||||
|
$leaderNum = 0
|
||||||
|
if $data(2)[2] == leader then
|
||||||
|
$leaderNum = 1
|
||||||
|
endi
|
||||||
|
if $data(3)[2] == leader then
|
||||||
|
$leaderNum = 1
|
||||||
|
endi
|
||||||
|
if $leaderNum != 1 then
|
||||||
|
goto step61
|
||||||
|
endi
|
||||||
|
|
||||||
|
print =============== step7: start mnode1 and wait it online
|
||||||
|
system sh/exec.sh -n dnode1 -s start
|
||||||
|
|
||||||
|
$x = 0
|
||||||
|
step71:
|
||||||
|
$x = $x + 1
|
||||||
|
sleep 1000
|
||||||
|
if $x == 50 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
sql select * from information_schema.ins_dnodes
|
||||||
|
print ===> $data00 $data01 $data02 $data03 $data04 $data05
|
||||||
|
print ===> $data10 $data11 $data12 $data13 $data14 $data15
|
||||||
|
print ===> $data20 $data21 $data22 $data23 $data24 $data25
|
||||||
|
print ===> $data30 $data31 $data32 $data33 $data34 $data35
|
||||||
|
if $data(1)[4] != ready then
|
||||||
|
goto step71
|
||||||
|
endi
|
||||||
|
if $data(2)[4] != ready then
|
||||||
|
goto step71
|
||||||
|
endi
|
||||||
|
if $data(3)[4] != ready then
|
||||||
|
goto step71
|
||||||
|
endi
|
||||||
|
if $data(4)[4] != ready then
|
||||||
|
goto step71
|
||||||
|
endi
|
||||||
|
|
||||||
|
print =============== step8: stop mnode1 and drop it
|
||||||
|
system sh/exec.sh -n dnode1 -s stop
|
||||||
|
|
||||||
|
$x = 0
|
||||||
|
step81:
|
||||||
|
$x = $x + 1
|
||||||
|
sleep 1000
|
||||||
|
if $x == 10 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
sql select * from information_schema.ins_mnodes
|
||||||
|
print ===> $data00 $data01 $data02 $data03 $data04 $data05
|
||||||
|
print ===> $data10 $data11 $data12 $data13 $data14 $data15
|
||||||
|
print ===> $data20 $data21 $data22 $data23 $data24 $data25
|
||||||
|
$leaderNum = 0
|
||||||
|
if $data(1)[2] == leader then
|
||||||
|
$leaderNum = 1
|
||||||
|
endi
|
||||||
|
if $data(2)[2] == leader then
|
||||||
|
$leaderNum = 1
|
||||||
|
endi
|
||||||
|
if $data(3)[2] == leader then
|
||||||
|
$leaderNum = 1
|
||||||
|
endi
|
||||||
|
if $leaderNum != 1 then
|
||||||
|
goto step81
|
||||||
|
endi
|
||||||
|
|
||||||
|
print =============== step9: start mnode1 and wait it dropped
|
||||||
|
print check mnode has leader step9a
|
||||||
|
$x = 0
|
||||||
|
step9a:
|
||||||
|
$x = $x + 1
|
||||||
|
sleep 1000
|
||||||
|
if $x == 10 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
print check mnode leader
|
||||||
|
sql select * from information_schema.ins_mnodes
|
||||||
|
print ===> $data00 $data01 $data02 $data03 $data04 $data05
|
||||||
|
print ===> $data10 $data11 $data12 $data13 $data14 $data15
|
||||||
|
print ===> $data20 $data21 $data22 $data23 $data24 $data25
|
||||||
|
$leaderNum = 0
|
||||||
|
if $data(1)[2] == leader then
|
||||||
|
$leaderNum = 1
|
||||||
|
endi
|
||||||
|
if $data(2)[2] == leader then
|
||||||
|
$leaderNum = 1
|
||||||
|
endi
|
||||||
|
if $data(3)[2] == leader then
|
||||||
|
$leaderNum = 1
|
||||||
|
endi
|
||||||
|
if $leaderNum != 1 then
|
||||||
|
goto step9a
|
||||||
|
endi
|
||||||
|
|
||||||
|
print start dnode1 step9b
|
||||||
|
system sh/exec.sh -n dnode1 -s start
|
||||||
|
$x = 0
|
||||||
|
step9b:
|
||||||
|
$x = $x + 1
|
||||||
|
sleep 1000
|
||||||
|
if $x == 10 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
print check dnode1 ready
|
||||||
|
sql select * from information_schema.ins_dnodes
|
||||||
|
print ===> $data00 $data01 $data02 $data03 $data04 $data05
|
||||||
|
print ===> $data10 $data11 $data12 $data13 $data14 $data15
|
||||||
|
print ===> $data20 $data21 $data22 $data23 $data24 $data25
|
||||||
|
print ===> $data30 $data31 $data32 $data33 $data34 $data35
|
||||||
|
if $data(1)[4] != ready then
|
||||||
|
goto step9b
|
||||||
|
endi
|
||||||
|
if $data(2)[4] != ready then
|
||||||
|
goto step9b
|
||||||
|
endi
|
||||||
|
if $data(3)[4] != ready then
|
||||||
|
goto step9b
|
||||||
|
endi
|
||||||
|
if $data(4)[4] != ready then
|
||||||
|
goto step9b
|
||||||
|
endi
|
||||||
|
|
||||||
|
sleep 4000
|
||||||
|
print check mnode has leader step9c
|
||||||
|
$x = 0
|
||||||
|
step9c:
|
||||||
|
$x = $x + 1
|
||||||
|
sleep 1000
|
||||||
|
if $x == 10 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
print check mnode leader
|
||||||
|
sql select * from information_schema.ins_mnodes
|
||||||
|
print ===> $data00 $data01 $data02 $data03 $data04 $data05
|
||||||
|
print ===> $data10 $data11 $data12 $data13 $data14 $data15
|
||||||
|
print ===> $data20 $data21 $data22 $data23 $data24 $data25
|
||||||
|
$leaderNum = 0
|
||||||
|
if $data(1)[2] == leader then
|
||||||
|
$leaderNum = 1
|
||||||
|
endi
|
||||||
|
if $data(2)[2] == leader then
|
||||||
|
$leaderNum = 1
|
||||||
|
endi
|
||||||
|
if $data(3)[2] == leader then
|
||||||
|
$leaderNum = 1
|
||||||
|
endi
|
||||||
|
if $leaderNum != 1 then
|
||||||
|
goto step9c
|
||||||
|
endi
|
||||||
|
|
||||||
|
print drop mnode step9d
|
||||||
|
sql drop mnode on dnode 1
|
||||||
|
|
||||||
|
$x = 0
|
||||||
|
step9d:
|
||||||
|
$x = $x + 1
|
||||||
|
sleep 1000
|
||||||
|
if $x == 20 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
print check mnode leader
|
||||||
|
sql select * from information_schema.ins_mnodes
|
||||||
|
print ===> $data00 $data01 $data02 $data03 $data04 $data05
|
||||||
|
print ===> $data10 $data11 $data12 $data13 $data14 $data15
|
||||||
|
print ===> $data20 $data21 $data22 $data23 $data24 $data25
|
||||||
|
$leaderNum = 0
|
||||||
|
if $data(1)[2] == leader then
|
||||||
|
$leaderNum = 1
|
||||||
|
endi
|
||||||
|
if $data(2)[2] == leader then
|
||||||
|
$leaderNum = 1
|
||||||
|
endi
|
||||||
|
if $data(3)[2] == leader then
|
||||||
|
$leaderNum = 1
|
||||||
|
endi
|
||||||
|
if $leaderNum != 1 then
|
||||||
|
goto step9d
|
||||||
|
endi
|
||||||
|
if $rows != 2 then
|
||||||
|
goto step9d
|
||||||
|
endi
|
||||||
|
|
||||||
|
print =============== stepa: create mnode1 again
|
||||||
|
sql create mnode on dnode 1
|
||||||
|
$x = 0
|
||||||
|
stepa:
|
||||||
|
$x = $x + 1
|
||||||
|
sleep 1000
|
||||||
|
if $x == 10 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
sql select * from information_schema.ins_mnodes
|
||||||
|
print ===> $data00 $data01 $data02 $data03 $data04 $data05
|
||||||
|
print ===> $data10 $data11 $data12 $data13 $data14 $data15
|
||||||
|
print ===> $data20 $data21 $data22 $data23 $data24 $data25
|
||||||
|
$leaderNum = 0
|
||||||
|
if $data(1)[2] == leader then
|
||||||
|
$leaderNum = 1
|
||||||
|
endi
|
||||||
|
if $data(2)[2] == leader then
|
||||||
|
$leaderNum = 1
|
||||||
|
endi
|
||||||
|
if $data(3)[2] == leader then
|
||||||
|
$leaderNum = 1
|
||||||
|
endi
|
||||||
|
if $leaderNum == 0 then
|
||||||
|
goto stepa
|
||||||
|
endi
|
||||||
|
if $leaderNum != 1 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
$x = 0
|
||||||
|
stepb:
|
||||||
|
$x = $x + 1
|
||||||
|
sleep 1000
|
||||||
|
if $x == 10 then
|
||||||
|
print ====> dnode not ready!
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
sql select * from information_schema.ins_dnodes
|
||||||
|
print ===> $data00 $data01 $data02 $data03 $data04 $data05
|
||||||
|
print ===> $data10 $data11 $data12 $data13 $data14 $data15
|
||||||
|
print ===> $data20 $data21 $data22 $data23 $data24 $data25
|
||||||
|
print ===> $data30 $data31 $data32 $data33 $data34 $data35
|
||||||
|
if $rows != 4 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data(1)[4] != ready then
|
||||||
|
goto stepb
|
||||||
|
endi
|
||||||
|
if $data(2)[4] != ready then
|
||||||
|
goto stepb
|
||||||
|
endi
|
||||||
|
if $data(3)[4] != ready then
|
||||||
|
goto stepb
|
||||||
|
endi
|
||||||
|
if $data(4)[4] != ready then
|
||||||
|
goto stepb
|
||||||
|
endi
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode1 -s stop
|
||||||
|
system sh/exec.sh -n dnode2 -s stop
|
||||||
|
system sh/exec.sh -n dnode3 -s stop
|
||||||
|
system sh/exec.sh -n dnode4 -s stop
|
|
@ -0,0 +1,67 @@
|
||||||
|
system sh/stop_dnodes.sh
|
||||||
|
system sh/deploy.sh -n dnode1 -i 1
|
||||||
|
|
||||||
|
system sh/cfg.sh -n dnode1 -c checkpointInterval -v 60
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode1 -s start
|
||||||
|
sleep 50
|
||||||
|
sql connect
|
||||||
|
|
||||||
|
print step1
|
||||||
|
print =============== create database
|
||||||
|
sql create database test vgroups 4;
|
||||||
|
sql use test;
|
||||||
|
|
||||||
|
sql create stable st(ts timestamp, a int, b int , c int)tags(ta int,tb int,tc int);
|
||||||
|
sql create table t1 using st tags(1,1,1);
|
||||||
|
sql create table t2 using st tags(2,2,2);
|
||||||
|
|
||||||
|
sql create stream streams1 trigger force_window_close IGNORE EXPIRED 1 IGNORE UPDATE 1 into streamt1 as select _wstart, count(a) from st partition by tbname interval(2s);
|
||||||
|
sql create stream streams2 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt2 as select _wstart, count(a) from st interval(2s);
|
||||||
|
|
||||||
|
run tsim/stream/checkTaskStatus.sim
|
||||||
|
|
||||||
|
sleep 70000
|
||||||
|
|
||||||
|
|
||||||
|
print restart taosd 01 ......
|
||||||
|
|
||||||
|
system sh/stop_dnodes.sh
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode1 -s start
|
||||||
|
|
||||||
|
run tsim/stream/checkTaskStatus.sim
|
||||||
|
|
||||||
|
sql insert into t1 values(now + 3000a,1,1,1);
|
||||||
|
|
||||||
|
$loop_count = 0
|
||||||
|
loop0:
|
||||||
|
|
||||||
|
sleep 2000
|
||||||
|
|
||||||
|
$loop_count = $loop_count + 1
|
||||||
|
if $loop_count == 20 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
print select * from streamt1;
|
||||||
|
sql select * from streamt1;
|
||||||
|
|
||||||
|
print $data00 $data01 $data02
|
||||||
|
|
||||||
|
if $rows == 0 then
|
||||||
|
goto loop0
|
||||||
|
endi
|
||||||
|
|
||||||
|
print select * from streamt2;
|
||||||
|
sql select * from streamt2;
|
||||||
|
|
||||||
|
print $data00 $data01 $data02
|
||||||
|
|
||||||
|
if $rows == 0 then
|
||||||
|
goto loop0
|
||||||
|
endi
|
||||||
|
|
||||||
|
print end
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -0,0 +1,89 @@
|
||||||
|
###################################################################
|
||||||
|
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This file is proprietary and confidential to TAOS Technologies.
|
||||||
|
# No part of this file may be reproduced, stored, transmitted,
|
||||||
|
# disclosed or used in any form or by any means other than as
|
||||||
|
# expressly provided by the written permission from Jianhui Tao
|
||||||
|
#
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
|
||||||
|
from util.log import *
|
||||||
|
from util.cases import *
|
||||||
|
from util.sql import *
|
||||||
|
from util.common import *
|
||||||
|
from util.sqlset import *
|
||||||
|
import glob
|
||||||
|
|
||||||
|
def scanFiles(pattern):
|
||||||
|
res = []
|
||||||
|
for f in glob.iglob(pattern):
|
||||||
|
res += [f]
|
||||||
|
return res
|
||||||
|
|
||||||
|
def checkFiles(pattern, state):
|
||||||
|
res = scanFiles(pattern)
|
||||||
|
tdLog.info(res)
|
||||||
|
num = len(res)
|
||||||
|
if num:
|
||||||
|
if state:
|
||||||
|
tdLog.info("%s: %d files exist. expect: files exist" % (pattern, num))
|
||||||
|
else:
|
||||||
|
tdLog.exit("%s: %d files exist. expect: files not exist." % (pattern, num))
|
||||||
|
else:
|
||||||
|
if state:
|
||||||
|
tdLog.exit("%s: %d files exist. expect: files exist" % (pattern, num))
|
||||||
|
else:
|
||||||
|
tdLog.info("%s: %d files exist. expect: files not exist." % (pattern, num))
|
||||||
|
|
||||||
|
class TDTestCase:
|
||||||
|
def init(self, conn, logSql, replicaVar=1):
|
||||||
|
|
||||||
|
self.replicaVar = int(replicaVar)
|
||||||
|
tdLog.debug("start to execute %s" % __file__)
|
||||||
|
tdSql.init(conn.cursor())
|
||||||
|
self.setsql = TDSetSql()
|
||||||
|
|
||||||
|
def basic(self):
|
||||||
|
tdLog.info("============== basic test ===============")
|
||||||
|
cfg={
|
||||||
|
'/mnt/data1 0 1 0' : 'dataDir',
|
||||||
|
'/mnt/data2 0 0 0' : 'dataDir',
|
||||||
|
'/mnt/data3 0 0 0' : 'dataDir',
|
||||||
|
'/mnt/data4 0 0 0' : 'dataDir'
|
||||||
|
}
|
||||||
|
tdSql.createDir('/mnt/data1')
|
||||||
|
tdSql.createDir('/mnt/data2')
|
||||||
|
tdSql.createDir('/mnt/data3')
|
||||||
|
tdSql.createDir('/mnt/data4')
|
||||||
|
|
||||||
|
tdDnodes.stop(1)
|
||||||
|
tdDnodes.deploy(1,cfg)
|
||||||
|
tdDnodes.start(1)
|
||||||
|
|
||||||
|
checkFiles(r'/mnt/data1/*/*',1)
|
||||||
|
checkFiles(r'/mnt/data2/*/*',0)
|
||||||
|
|
||||||
|
tdSql.execute('create database nws vgroups 20 stt_trigger 1 wal_level 1 wal_retention_period 0')
|
||||||
|
|
||||||
|
checkFiles(r'/mnt/data1/vnode/*/wal',5)
|
||||||
|
checkFiles(r'/mnt/data2/vnode/*/wal',5)
|
||||||
|
checkFiles(r'/mnt/data3/vnode/*/wal',5)
|
||||||
|
checkFiles(r'/mnt/data4/vnode/*/wal',5)
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
self.basic()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
tdSql.close()
|
||||||
|
tdLog.success("%s successfully executed" % __file__)
|
||||||
|
|
||||||
|
tdCases.addWindows(__file__, TDTestCase())
|
||||||
|
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -0,0 +1,32 @@
|
||||||
|
from util.sql import *
|
||||||
|
from util.common import *
|
||||||
|
import taos
|
||||||
|
taos.taos_connect
|
||||||
|
class TDTestCase:
|
||||||
|
def init(self, conn, logSql, replicaVar = 1):
|
||||||
|
self.replicaVar = int(replicaVar)
|
||||||
|
tdLog.debug(f"start to excute {__file__}")
|
||||||
|
self.conn = conn
|
||||||
|
tdSql.init(conn.cursor(), logSql)
|
||||||
|
def initdb(self):
|
||||||
|
tdSql.execute("drop database if exists d0")
|
||||||
|
tdSql.execute("create database d0")
|
||||||
|
tdSql.execute("use d0")
|
||||||
|
tdSql.execute("create stable stb0 (ts timestamp, w_ts timestamp, opc nchar(100), quality int) tags(t0 int)")
|
||||||
|
tdSql.execute("create table t0 using stb0 tags(1)")
|
||||||
|
tdSql.execute("create table t1 using stb0 tags(2)")
|
||||||
|
def multi_insert(self):
|
||||||
|
for i in range(5):
|
||||||
|
tdSql.execute(f"insert into t1 values(1721265436000, now() + {i + 1}s, '0', 12) t1(opc, quality, ts) values ('opc2', 192, now()+ {i + 2}s) t1(ts, opc, quality) values(now() + {i + 3}s, 'opc4', 10) t1 values(1721265436000, now() + {i + 4}s, '1', 191) t1(opc, quality, ts) values('opc5', 192, now() + {i + 5}s) t1 values(now(), now() + {i + 6}s, '2', 192)")
|
||||||
|
tdSql.execute("insert into t0 values(1721265436000,now(),'0',192) t0(quality,w_ts,ts) values(192,now(),1721265326000) t0(quality,w_t\
|
||||||
|
s,ts) values(190,now()+1s,1721265326000) t0 values(1721265436000,now()+2s,'1',191) t0(quality,w_ts,ts) values(192,now()+3s,\
|
||||||
|
1721265326002) t0(ts,w_ts,opc,quality) values(1721265436003,now()+4s,'3',193) t0 values(now(), now() + 4s , '2', 192)")
|
||||||
|
def run(self):
|
||||||
|
self.initdb()
|
||||||
|
self.multi_insert()
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
tdSql.close()
|
||||||
|
tdLog.success(f"{__file__} successfully executed")
|
||||||
|
tdCases.addLinux(__file__, TDTestCase())
|
||||||
|
tdCases.addWindows(__file__, TDTestCase())
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue