Merge branch '3.0' of https://github.com/taosdata/TDengine into refact/tsdb_last
This commit is contained in:
commit
e7d177a479
|
@ -2,7 +2,7 @@
|
|||
# taosws-rs
|
||||
ExternalProject_Add(taosws-rs
|
||||
GIT_REPOSITORY https://github.com/taosdata/taosws-rs.git
|
||||
GIT_TAG 9fa7e2f
|
||||
GIT_TAG 648cc62
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosws-rs"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
|
|
|
@ -73,7 +73,7 @@ If `maven` is used to manage the projects, what needs to be done is only adding
|
|||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>2.0.38</version>
|
||||
<version>3.0.0</version>
|
||||
</dependency>
|
||||
```
|
||||
|
||||
|
@ -102,7 +102,7 @@ module goexample
|
|||
|
||||
go 1.17
|
||||
|
||||
require github.com/taosdata/driver-go/v2 develop
|
||||
require github.com/taosdata/driver-go/v3 latest
|
||||
```
|
||||
|
||||
:::note
|
||||
|
@ -137,7 +137,7 @@ Node.js connector provides different ways of establishing connections by providi
|
|||
1. Install Node.js Native Connector
|
||||
|
||||
```
|
||||
npm i td2.0-connector
|
||||
npm install @tdengine/client
|
||||
```
|
||||
|
||||
:::note
|
||||
|
@ -147,7 +147,7 @@ It's recommend to use Node whose version is between `node-v12.8.0` and `node-v13
|
|||
2. Install Node.js REST Connector
|
||||
|
||||
```
|
||||
npm i td2.0-rest-connector
|
||||
npm install @tdengine/rest
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
@ -167,7 +167,7 @@ Just need to add the reference to [TDengine.Connector](https://www.nuget.org/pac
|
|||
</PropertyGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<PackageReference Include="TDengine.Connector" Version="1.0.6" />
|
||||
<PackageReference Include="TDengine.Connector" Version="3.0.0" />
|
||||
</ItemGroup>
|
||||
|
||||
</Project>
|
||||
|
@ -187,7 +187,7 @@ The sample code below are based on dotnet6.0, they may need to be adjusted if yo
|
|||
</TabItem>
|
||||
<TabItem label="R" value="r">
|
||||
|
||||
1. Download [taos-jdbcdriver-version-dist.jar](https://repo1.maven.org/maven2/com/taosdata/jdbc/taos-jdbcdriver/2.0.38/).
|
||||
1. Download [taos-jdbcdriver-version-dist.jar](https://repo1.maven.org/maven2/com/taosdata/jdbc/taos-jdbcdriver/3.0.0/).
|
||||
2. Install the dependency package `RJDBC`:
|
||||
|
||||
```R
|
||||
|
|
|
@ -2,13 +2,18 @@ Execute TDengine CLI program `taos` directly from the Linux shell to connect to
|
|||
|
||||
```text
|
||||
$ taos
|
||||
Welcome to the TDengine shell from Linux, Client Version:2.0.5.0
|
||||
Copyright (c) 2017 by TAOS Data, Inc. All rights reserved.
|
||||
Welcome to the TDengine shell from Linux, Client Version:3.0.0.0
|
||||
Copyright (c) 2022 by TAOS Data, Inc. All rights reserved.
|
||||
|
||||
Server is Community Edition.
|
||||
|
||||
taos> show databases;
|
||||
name | created_time | ntables | vgroups | replica | quorum | days | keep1,keep2,keep(D) | cache(MB)| blocks | minrows | maxrows | wallevel | fsync | comp | precision | status |
|
||||
=========================================================================================================================================================================================================================
|
||||
test | 2020-10-14 10:35:48.617 | 10 | 1 | 1 | 1 | 2 | 3650,3650,3650 | 16| 6 | 100 | 4096 | 1 | 3000 | 2 | ms | ready |
|
||||
log | 2020-10-12 09:08:21.651 | 4 | 1 | 1 | 1 | 10 | 30,30,30 | 1| 3 | 100 | 4096 | 1 | 3000 | 2 | us | ready |
|
||||
Query OK, 2 row(s) in set (0.001198s)
|
||||
name | create_time | vgroups | ntables | replica | strict | duration | keep | buffer | pagesize | pages | minrows | maxrows | comp | precision | status | retention | single_stable | cachemodel | cachesize | wal_level | wal_fsync_period | wal_retention_period | wal_retention_size | wal_roll_period | wal_seg_size |
|
||||
=========================================================================================================================================================================================================================================================================================================================================================================================================================================================================
|
||||
information_schema | NULL | NULL | 14 | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | ready | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL |
|
||||
performance_schema | NULL | NULL | 3 | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | ready | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL |
|
||||
db | 2022-08-04 14:14:49.385 | 2 | 4 | 1 | off | 14400m | 5254560m,5254560m,5254560m | 96 | 4 | 256 | 100 | 4096 | 2 | ms | ready | NULL | false | none | 1 | 1 | 3000 | 0 | 0 | 0 | 0 |
|
||||
Query OK, 3 rows in database (0.019154s)
|
||||
|
||||
taos>
|
||||
```
|
||||
|
|
|
@ -1,14 +1,19 @@
|
|||
Go to the `C:\TDengine` directory from `cmd` and execute TDengine CLI program `taos.exe` directly to connect to the TDengine service and enter the TDengine CLI interface, for example, as follows:
|
||||
|
||||
```text
|
||||
C:\TDengine>taos
|
||||
Welcome to the TDengine shell from Linux, Client Version:2.0.5.0
|
||||
Copyright (c) 2017 by TAOS Data, Inc. All rights reserved.
|
||||
taos> show databases;
|
||||
name | created_time | ntables | vgroups | replica | quorum | days | keep1,keep2,keep(D) | cache(MB) | blocks | minrows | maxrows | wallevel | fsync | comp | precision | status |
|
||||
===================================================================================================================================================================================================================================================================
|
||||
test | 2020-10-14 10:35:48.617 | 10 | 1 | 1 | 1 | 2 | 3650,3650,3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | ms | ready |
|
||||
log | 2020-10-12 09:08:21.651 | 4 | 1 | 1 | 1 | 10 | 30,30,30 | 1 | 3 | 100 | 4096 | 1 | 3000 | 2 | us | ready |
|
||||
Query OK, 2 row(s) in set (0.045000s)
|
||||
taos>
|
||||
Welcome to the TDengine shell from Windows, Client Version:3.0.0.0
|
||||
Copyright (c) 2022 by TAOS Data, Inc. All rights reserved.
|
||||
|
||||
Server is Community Edition.
|
||||
|
||||
taos> show databases;
|
||||
name | create_time | vgroups | ntables | replica | strict | duration | keep | buffer | pagesize | pages | minrows | maxrows | comp | precision | status | retention | single_stable | cachemodel | cachesize | wal_level | wal_fsync_period | wal_retention_period | wal_retention_size | wal_roll_period | wal_seg_size |
|
||||
=========================================================================================================================================================================================================================================================================================================================================================================================================================================================================
|
||||
information_schema | NULL | NULL | 14 | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | ready | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL |
|
||||
performance_schema | NULL | NULL | 3 | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | ready | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL |
|
||||
test | 2022-08-04 16:46:40.506 | 2 | 0 | 1 | off | 14400m | 5256000m,5256000m,5256000m | 96 | 4 | 256 |
|
||||
100 | 4096 | 2 | ms | ready | NULL | false | none | 1 | 1 | 3000 | 0 | 0 | 0 | 0 |
|
||||
Query OK, 3 rows in database (0.123000s)
|
||||
|
||||
taos>
|
||||
```
|
||||
|
|
|
@ -8,9 +8,9 @@ library("rJava")
|
|||
library("RJDBC")
|
||||
|
||||
args<- commandArgs(trailingOnly = TRUE)
|
||||
driver_path = args[1] # path to jdbc-driver for example: "/root/taos-jdbcdriver-2.0.37-dist.jar"
|
||||
driver_path = args[1] # path to jdbc-driver for example: "/root/taos-jdbcdriver-3.0.0-dist.jar"
|
||||
driver = JDBC("com.taosdata.jdbc.TSDBDriver", driver_path)
|
||||
conn = dbConnect(driver, "jdbc:TAOS://127.0.0.1:6030/?user=root&password=taosdata")
|
||||
dbGetQuery(conn, "SELECT server_version()")
|
||||
dbDisconnect(conn)
|
||||
# ANCHOR_END: demo
|
||||
# ANCHOR_END: demo
|
||||
|
|
|
@ -74,7 +74,7 @@ TDengine 提供了丰富的应用程序开发接口,为了便于用户快速
|
|||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>2.0.38</version>
|
||||
<version>3.0.0</version>
|
||||
</dependency>
|
||||
```
|
||||
|
||||
|
@ -103,7 +103,7 @@ module goexample
|
|||
|
||||
go 1.17
|
||||
|
||||
require github.com/taosdata/driver-go/v2 develop
|
||||
require github.com/taosdata/driver-go/v3 latest
|
||||
```
|
||||
|
||||
:::note
|
||||
|
@ -138,7 +138,7 @@ Node.js 连接器通过不同的包提供不同的连接方式。
|
|||
1. 安装 Node.js 原生连接器
|
||||
|
||||
```
|
||||
npm i td2.0-connector
|
||||
npm install @tdengine/client
|
||||
```
|
||||
|
||||
:::note
|
||||
|
@ -148,7 +148,7 @@ Node.js 连接器通过不同的包提供不同的连接方式。
|
|||
2. 安装 Node.js REST 连接器
|
||||
|
||||
```
|
||||
npm i td2.0-rest-connector
|
||||
npm install @tdengine/rest
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
@ -168,7 +168,7 @@ Node.js 连接器通过不同的包提供不同的连接方式。
|
|||
</PropertyGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<PackageReference Include="TDengine.Connector" Version="1.0.6" />
|
||||
<PackageReference Include="TDengine.Connector" Version="3.0.0" />
|
||||
</ItemGroup>
|
||||
|
||||
</Project>
|
||||
|
@ -188,7 +188,7 @@ dotnet add package TDengine.Connector
|
|||
</TabItem>
|
||||
<TabItem label="R" value="r">
|
||||
|
||||
1. 下载 [taos-jdbcdriver-version-dist.jar](https://repo1.maven.org/maven2/com/taosdata/jdbc/taos-jdbcdriver/2.0.38/)。
|
||||
1. 下载 [taos-jdbcdriver-version-dist.jar](https://repo1.maven.org/maven2/com/taosdata/jdbc/taos-jdbcdriver/3.0.0/)。
|
||||
2. 安装 R 的依赖包`RJDBC`:
|
||||
|
||||
```R
|
||||
|
|
|
@ -16,72 +16,96 @@ description: "支持用户编码的聚合函数和标量函数,在查询中嵌
|
|||
|
||||
用户可以按照下列函数模板定义自己的标量计算函数
|
||||
|
||||
`void udfNormalFunc(char* data, short itype, short ibytes, int numOfRows, long long* ts, char* dataOutput, char* interBuf, char* tsOutput, int* numOfOutput, short otype, short obytes, SUdfInit* buf)`
|
||||
`int32_t udf(SUdfDataBlock* inputDataBlock, SUdfColumn *resultColumn)`
|
||||
|
||||
其中 udfNormalFunc 是函数名的占位符,以上述模板实现的函数对行数据块进行标量计算,其参数项是固定的,用于按照约束完成与引擎之间的数据交换。
|
||||
其中 udf 是函数名的占位符,以上述模板实现的函数对行数据块进行标量计算。
|
||||
|
||||
- udfNormalFunc 中各参数的具体含义是:
|
||||
- data:输入数据。
|
||||
- itype:输入数据的类型。这里采用的是短整型表示法,与各种数据类型对应的值可以参见 [column_meta 中的列类型说明](/reference/rest-api/)。例如 4 用于表示 INT 型。
|
||||
- iBytes:输入数据中每个值会占用的字节数。
|
||||
- numOfRows:输入数据的总行数。
|
||||
- ts:主键时间戳在输入中的列数据(只读)。
|
||||
- dataOutput:输出数据的缓冲区,缓冲区大小为用户指定的输出类型大小 \* numOfRows。
|
||||
- interBuf:中间计算结果的缓冲区,大小为用户在创建 UDF 时指定的 BUFSIZE 大小。通常用于计算中间结果与最终结果不一致时使用,由引擎负责分配与释放。
|
||||
- tsOutput:主键时间戳在输出时的列数据,如果非空可用于输出结果对应的时间戳。
|
||||
- numOfOutput:输出结果的个数(行数)。
|
||||
- oType:输出数据的类型。取值含义与 itype 参数一致。
|
||||
- oBytes:输出数据中每个值占用的字节数。
|
||||
- buf:用于在 UDF 与引擎间的状态控制信息传递块。
|
||||
|
||||
[add_one.c](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/add_one.c) 是结构最简单的 UDF 实现,也即上面定义的 udfNormalFunc 函数的一个具体实现。其功能为:对传入的一个数据列(可能因 WHERE 子句进行了筛选)中的每一项,都输出 +1 之后的值,并且要求输入的列数据类型为 INT。
|
||||
- scalarFunction 中各参数的具体含义是:
|
||||
- inputDataBlock: 输入的数据块
|
||||
- resultColumn: 输出列
|
||||
|
||||
### 聚合函数
|
||||
|
||||
用户可以按照如下函数模板定义自己的聚合函数。
|
||||
|
||||
`void abs_max_merge(char* data, int32_t numOfRows, char* dataOutput, int32_t* numOfOutput, SUdfInit* buf)`
|
||||
`int32_t udf_start(SUdfInterBuf *interBuf)`
|
||||
|
||||
其中 udfMergeFunc 是函数名的占位符,以上述模板实现的函数用于对计算中间结果进行聚合,只有针对超级表的聚合查询才需要调用该函数。其中各参数的具体含义是:
|
||||
`int32_t udf(SUdfDataBlock* inputBlock, SUdfInterBuf *interBuf, SUdfInterBuf *newInterBuf)`
|
||||
|
||||
- data:udfNormalFunc 的输出数据数组,如果使用了 interBuf 那么 data 就是 interBuf 的数组。
|
||||
- numOfRows:data 中数据的行数。
|
||||
- dataOutput:输出数据的缓冲区,大小等于一条最终结果的大小。如果此时输出还不是最终结果,可以选择输出到 interBuf 中即 data 中。
|
||||
- numOfOutput:输出结果的个数(行数)。
|
||||
- buf:用于在 UDF 与引擎间的状态控制信息传递块。
|
||||
`int32_t udf_finish(SUdfInterBuf* interBuf, SUdfInterBuf *result)`
|
||||
其中 udf 是函数名的占位符。其中各参数的具体含义是:
|
||||
|
||||
[abs_max.c](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/abs_max.c) 实现的是一个聚合函数,功能是对一组数据按绝对值取最大值。
|
||||
- interBuf:中间结果 buffer。
|
||||
- inputBlock:输入的数据块。
|
||||
- newInterBuf:新的中间结果buffer。
|
||||
- result:最终结果。
|
||||
|
||||
其计算过程为:与所在查询语句相关的数据会被分为多个行数据块,对每个行数据块调用 udfNormalFunc(在本例的实现代码中,实际函数名是 `abs_max`)来生成每个子表的中间结果,再将子表的中间结果调用 udfMergeFunc(本例中,其实际的函数名是 `abs_max_merge`)进行聚合,生成超级表的最终聚合结果或中间结果。聚合查询最后还会通过 udfFinalizeFunc(本例中,其实际的函数名是 `abs_max_finalize`)再把超级表的中间结果处理为最终结果,最终结果只能含 0 或 1 条结果数据。
|
||||
|
||||
其他典型场景,如协方差的计算,也可通过定义聚合 UDF 的方式实现。
|
||||
其计算过程为:首先调用udf_start生成结果buffer,然后相关的数据会被分为多个行数据块,对每个行数据块调用 udf 用数据块更新中间结果,最后再调用 udf_finish 从中间结果产生最终结果,最终结果只能含 0 或 1 条结果数据。
|
||||
|
||||
### 最终计算
|
||||
### UDF 初始化和销毁
|
||||
`int32_t udf_init()`
|
||||
|
||||
用户可以按下面的函数模板实现自己的函数对计算结果进行最终计算,通常用于有 interBuf 使用的场景。
|
||||
`int32_t udf_destroy()`
|
||||
|
||||
`void abs_max_finalize(char* dataOutput, char* interBuf, int* numOfOutput, SUdfInit* buf)`
|
||||
|
||||
其中 udfFinalizeFunc 是函数名的占位符 ,其中各参数的具体含义是:
|
||||
- dataOutput:输出数据的缓冲区。
|
||||
- interBuf:中间结算结果缓冲区,可作为输入。
|
||||
- numOfOutput:输出数据的个数,对聚合函数来说只能是 0 或者 1。
|
||||
- buf:用于在 UDF 与引擎间的状态控制信息传递块。
|
||||
|
||||
## UDF 实现方式的规则总结
|
||||
|
||||
三类 UDF 函数: udfNormalFunc、udfMergeFunc、udfFinalizeFunc ,其函数名约定使用相同的前缀,此前缀即 udfNormalFunc 的实际函数名,也即 udfNormalFunc 函数不需要在实际函数名后添加后缀;而udfMergeFunc 的函数名要加上后缀 `_merge`、udfFinalizeFunc 的函数名要加上后缀 `_finalize`,这是 UDF 实现规则的一部分,系统会按照这些函数名后缀来调用相应功能。
|
||||
|
||||
根据 UDF 函数类型的不同,用户所要实现的功能函数也不同:
|
||||
|
||||
- 标量函数:UDF 中需实现 udfNormalFunc。
|
||||
- 聚合函数:UDF 中需实现 udfNormalFunc、udfMergeFunc(对超级表查询)、udfFinalizeFunc。
|
||||
其中 udf 是函数名的占位符。udf_init 完成初始化工作。 udf_destroy 完成清理工作。
|
||||
|
||||
:::note
|
||||
如果对应的函数不需要具体的功能,也需要实现一个空函数。
|
||||
|
||||
:::
|
||||
|
||||
### UDF 数据结构
|
||||
```c
|
||||
typedef struct SUdfColumnMeta {
|
||||
int16_t type;
|
||||
int32_t bytes;
|
||||
uint8_t precision;
|
||||
uint8_t scale;
|
||||
} SUdfColumnMeta;
|
||||
|
||||
typedef struct SUdfColumnData {
|
||||
int32_t numOfRows;
|
||||
int32_t rowsAlloc;
|
||||
union {
|
||||
struct {
|
||||
int32_t nullBitmapLen;
|
||||
char *nullBitmap;
|
||||
int32_t dataLen;
|
||||
char *data;
|
||||
} fixLenCol;
|
||||
|
||||
struct {
|
||||
int32_t varOffsetsLen;
|
||||
int32_t *varOffsets;
|
||||
int32_t payloadLen;
|
||||
char *payload;
|
||||
int32_t payloadAllocLen;
|
||||
} varLenCol;
|
||||
};
|
||||
} SUdfColumnData;
|
||||
|
||||
typedef struct SUdfColumn {
|
||||
SUdfColumnMeta colMeta;
|
||||
bool hasNull;
|
||||
SUdfColumnData colData;
|
||||
} SUdfColumn;
|
||||
|
||||
typedef struct SUdfDataBlock {
|
||||
int32_t numOfRows;
|
||||
int32_t numOfCols;
|
||||
SUdfColumn **udfCols;
|
||||
} SUdfDataBlock;
|
||||
|
||||
typedef struct SUdfInterBuf {
|
||||
int32_t bufLen;
|
||||
char* buf;
|
||||
int8_t numOfResult; //zero or one
|
||||
} SUdfInterBuf;
|
||||
```
|
||||
|
||||
为了更好的操作以上数据结构,提供了一些便利函数,定义在 taosudf.h。
|
||||
|
||||
## 编译 UDF
|
||||
|
||||
用户定义函数的 C 语言源代码无法直接被 TDengine 系统使用,而是需要先编译为 动态链接库,之后才能载入 TDengine 系统。
|
||||
|
|
|
@ -0,0 +1,481 @@
|
|||
---
|
||||
sidebar_label: 错误码
|
||||
title: TDengine C/C++ 连接器错误码
|
||||
---
|
||||
|
||||
本文中详细列举了在使用 TDengine C/C++ 连接器时客户端可能得到的错误码以及所要采取的相应动作。其它语言的连接器在使用原生连接方式时也会所得到的返回码返回给连接器的调用者。
|
||||
|
||||
| **Error Code** | **说明** | 如何处理错误 |
|
||||
| -------------- | ------------------ | ------------ |
|
||||
| 0 | 请求处理成功 | None |
|
||||
| -1 | 请求失败,未知原因 | TODO |
|
||||
| 0x0003 | RPC 认证失败 | TODO |
|
||||
| 0x0004 | RPC 重定向 |TODO|
|
||||
| 0x000B | 无法建立连接 |TODO|
|
||||
| 0x0015 | FQDN 解析失败 |检查各个dnode配置的FQDN是否正确,并且能够从其它节点 ping 到 |
|
||||
| 0x0017 | 端口已经被占用 | 检查所配置的端口被哪个进程占用,关闭该进程以释放端口;或者更改配置使用另一端口号 |
|
||||
| 0x0018 | 连接被断开 | 检查 dnode 进程是否还在,如果无异常检查网络情况 |
|
||||
| 0x0013 | 客户端和服务端的时间未同步 | 检查客户端和服务端的时间设置是否同步|
|
||||
| 0x0014 | 数据库不可用 |检查数据库是否存在,检查数据库的vgroups的状态 |
|
||||
| 0x0100 | 该操作不支持 | 参考SQL手册,使用正确的操作 |
|
||||
| 0x0102 | 无法分配内存,系统内存耗尽 | 检查系统中内存被耗尽的原因,采取措施释放内存。如果内存是被 dnode 耗尽的话重启该进程 |
|
||||
| 0x0104 | 文件被破坏 | TODO |
|
||||
| 0x0111 | 操作在进行中 | 等待该操作完成 |
|
||||
| 0x0115 | 无效的消息 | TODO |
|
||||
| 0x0116 | 无效的消息长度 | TODO |
|
||||
| 0x0117 | 无效指针 | TODO |
|
||||
| 0x0118 | 无效参数 | |
|
||||
| 0x0119 | 无效配置 | 检查配置参数的值是否在合法值域范围内 |
|
||||
| 0x011A | 无效选项 | 检查配置中是否有不支持的无效配置项 |
|
||||
| 0x011B | 无效的JSON 格式 | 修正插入数据中的JSON值|
|
||||
| 0x011C | 无效版本号 | 检查客户端版本是否匹配,更换正确的客户端 |
|
||||
| 0x011D | 无效版本信息 | 检查客户端版本是否匹配,更换正确的客户端 |
|
||||
| 0x011E | 版本不匹配 | TODO |
|
||||
| 0x011F | 校验和错误 | TODO |
|
||||
| 0x0120 | 数据压缩失败 | TODO |
|
||||
| 0x0121 | 消息未处理 | TODO |
|
||||
| 0x0122 | 配置未找到 | TODO|
|
||||
| 0x0123 | 重复初始化 | TODO |
|
||||
| 0x0124 | 无法将重复的key加入hash| TODO |
|
||||
| 0x0125 | 需要重试 | 重试 |
|
||||
| 0x0126 | RPC 队列内存耗尽 | TODO |
|
||||
| 0x0127 | 无效的时间戳 | 修正插入数据 |
|
||||
| 0x0128 | 消息解码失败 | 检查客户端版本是否匹配和兼容 |
|
||||
| 0x0129 | 磁盘耗尽 | TODO |
|
||||
| 0x012A | TODO | TODO |
|
||||
| 0x0200 | 无效操作 | 检查SQL命令,根据SQL手册使用正确的操作|
|
||||
| 0x0201 | 无效的查询句柄 | TODO |
|
||||
| 0x0202 | 无效的客户端/服务端时间组合 | TODO |
|
||||
| 0x0203 | 无效值 | TODO |
|
||||
| 0x0204 | 无效的客户端版本 | 检查客户端版本是否匹配,更换为正确的客户端版本 |
|
||||
| 0x0205 | TODO | TODO |
|
||||
| 0x0206 | 无效的 FQDN | 检查服务器 fqdn 配置是否正确,是否能够从其它服务器访问到该 fqdn |
|
||||
| 0x0207 | 用户名长度过长 | 检查用户名长度是否过长 |
|
||||
| 0x0208 | 密码长度过长 | 检查密码长度是否过长 |
|
||||
| 0x0209 | 数据库名字长度过长 | 检查数据库名是否过长 |
|
||||
| 0x020A | 表名长度过长 | 检查表名是否过长 |
|
||||
| 0x020B | 无效连接 | 检查该连接是否已经断开 |
|
||||
| 0x020C | 系统内存耗尽 | 检查系统中内存被耗尽的原因,尝试释放出内存 |
|
||||
| 0x020D | 磁盘空间耗尽 | 检查系统中磁盘耗尽的原因,尝试释放或增加磁盘空间 |
|
||||
| 0x020E | 查询缓存被清除 |TODO |
|
||||
| 0x020F | 查询已经被结束 | 尝试优化查询条件再重启查询 |
|
||||
| 0x0210 | 结果集过大无法存储 | 尝试优化查询条件,缩小结果集,再重启查询 |
|
||||
| 0x0211 | 数据库不可用 | 查看数据库是否存在,查看数据库的 vgroups 状态 |
|
||||
| 0x0212 | 操作正在进行中 | 等待操作完成 |
|
||||
| 0x0213 | 连接被服务端断开 | 查看服务端进程是否发生 crash 等异常终止情况 |
|
||||
| 0x0214 | 没有写权限 | 只进行读操作或者尝试获取写权限 |
|
||||
| 0x0215 | 连接被 kill | 重新建立连接 |
|
||||
| 0x0216 | SQL 语法错误 | 参考 SQL手册,纠正语法错误再重试 |
|
||||
| 0x0217 | 未指定数据库或者指定的数据库不可用 | 指定数据库,或者检查指定数据库的状态 |
|
||||
| 0x0218 | 所查询的表不存在 | 确认表名后纠正查询语句 |
|
||||
| 0x0219 | SQL 语句超长 | 根据 maxSQLLength 缩短SQL语句或者加大maxSQLLength (如果还未配置到上限)|
|
||||
| 0x021A | 空文件 | TODO |
|
||||
| 0x021B | Line 协议语法错误 | 纠正插入语句 |
|
||||
| 0x021C | 元数据未被缓存 | TODO |
|
||||
| 0x021D | 重复的列名 | 纠正SQL语句中的相应错误 |
|
||||
| 0x021E | tag 过长 | 纠正过长的 tag |
|
||||
| 0x021F | 列名过长 | 纠正过长的列名 |
|
||||
| 0x0220 | 重复的表名**TODO** | TODO|
|
||||
| 0x0221 | JSON 格式错误 | 纠正错误的 JSON 结构 |
|
||||
| 0x0222 | JSON 中使用了无效的数据类型 | 纠正 JSON 结构中的错误 |
|
||||
| 0x0224 | 超出了所支持的值域 | 纠正到值域范围内 |
|
||||
| 0x0229 | 无效的 tsc 输入 **TODO** | TODO |
|
||||
| 0x022A | stmt API 使用错误 | 根据参考手册正确使用 |
|
||||
| 0x022B | stmt 使用时未指定表名 | 指定表名 |
|
||||
| 0x022C | 不支持 stmt 子名 | 根据参考手册纠正错误用法 |
|
||||
| 0x022D | 查询被 kill | 优化查询语句,尽量减小计算量和结果集,然后重新启动查询 |
|
||||
| 0x022E | 在当前配置的查询策略下没有可用的计算节点 | 创建新的 qnode |
|
||||
| 0x022F | 所指定的表不是超级表 | 确认下该查询场景适用于超级表还是子表/普通表,如果是前者则纠正为超级表名 |
|
||||
| 0x0303 | 没有权限进行所发起的操作 | 申请权限或调整操作 |
|
||||
| 0x0304 | 管理节点内部错误 | TODO |
|
||||
| 0x0305 | 无效连接 | TODO |
|
||||
| 0x030B | 所要展示的操作其数据已经因为超时而被删除 | 更换想要展示操作或者放弃此次操作 |
|
||||
| 0x030C | 无效的查询ID| 确认正确的查询ID再重新发起 |
|
||||
| 0x030D | 无效的流ID| 确认正确的流ID再重新发起|
|
||||
| 0x030E | 无效的连接ID| 确认正确的连接ID再重新发起|
|
||||
| 0x0310 | mnode已经在运行 | 无须采取任何动作 |
|
||||
| 0x0311 | 配置同步失败 | TODO |
|
||||
| 0x0312 | 无法启动同步 | TODO |
|
||||
| 0x0313 | 无法创建 mnode 对应的目录 | 确认磁盘上是否有可用空间以及是否有相应的写权限 |
|
||||
| 0x0314 | 启动组件失败 | TODO |
|
||||
| 0x0315 | 用户帐号被禁用 | 联系管理员激活该帐号 |
|
||||
| 0x0320 | 元数据中已经存在所要创建的对象 | 检查所要创建的对象,比如超级表或表,是否已经存在 |
|
||||
| 0x0321 | 元数据库中非预期的一般性错误 | TODO |
|
||||
| 0x0322 | 无效的表类型 | TODO |
|
||||
| 0x0323 | 所要查找的对象不存在 | TODO |
|
||||
| 0x0325 | 无效的 key 类型 | TODO |
|
||||
| 0x0326 | 无效的动作类型 | TODO |
|
||||
| 0x0327 | 无效的状态类型 | TODO |
|
||||
| 0x0328 | 无效的原始数据版本 | TODO |
|
||||
| 0x0329 | 无效的原始数据长度 | TODO |
|
||||
| 0x032A | 无效的原始数据内容 | TODO |
|
||||
| 0x032B | 无效的 wal 版本 | TODO |
|
||||
| 0x032C | 对象创建中 | TODO |
|
||||
| 0x032D | 对象停止中 | TODO |
|
||||
| 0x0330 | dnode 已经存在 | 无需任何动作,放弃重复创建dnode的操作 |
|
||||
| 0x0331 | dnode 不存在 | 确认所要查询或者操作的dnode ID 或者 end point 是否正确 |
|
||||
| 0x0332 | vgroup 不存在 | 确认所要查询或者操作的vgroup ID 是否正确 |
|
||||
| 0x0333 | 系统拒绝 drop 其角色是 leader 的 mnode | 放弃该操作 |
|
||||
| 0x0334 | 没有足够的 dnode 创建所指定的 vgroups | 增加 dnode 或者修改现有dnode的配置参数 `supportVgroups` |
|
||||
| 0x0335 | 集群中各个dnode的配置不一致 | 检查各个dnode的配置参数确保其一致 |
|
||||
| 0x0338 | 所要查询或操作的vgroup不在所指定的dnode中| 检查vgroup ID 和 dnode ID是否正确 |
|
||||
| 0x0339 | 所要查询或操作的 vgroup 已经在所指定的dnode中 | 检查 vgroup ID 和 dnode ID是否正确 |
|
||||
| 0x033B | 集群 ID 不匹配 | TODO |
|
||||
| 0x0340 | 该帐户已经存在 | 放弃重复创建帐户的操作 |
|
||||
| 0x0342 | 无效的帐户选项 | 检查创建帐户时的参数选项是否正确 |
|
||||
| 0x0343 | 帐户授权已经过期 | 联系管理员重新授权 |
|
||||
| 0x0344 | 无效帐户 | 联系管理员确认帐户 |
|
||||
| 0x0345 | 操作的帐户过多,无法支持 | 减少同时操作的帐户数 |
|
||||
| 0x0350 | 用户已经存在 | 放弃重复创建用户的操作 |
|
||||
| 0x0351 | 无效用户 | 检查并确认正确用户 |
|
||||
| 0x0352 | 无效的用户名格式 | 查看参考手册修改用户名 |
|
||||
| 0x0353 | 无效的密码格式 | 查看参考手册修改密码 |
|
||||
| 0x0354 | 无法从连接中获取用户名 | 检查客户端环境初始化是否使用了正确的用户名 |
|
||||
| 0x0355 | 一次尝试操作的用户过多 | 查看参考手册减少同时操作的用户数 |
|
||||
| 0x0356 | 无效的修改操作 | 查看参考手册使用正确的操作 |
|
||||
| 0x0357 | 认证失败 | 使用正确的用户名和密码 |
|
||||
| 0x0360 | 要创建的超级表已经存在 | 放弃重复创建的操作或者删除该超级表再重新创建 |
|
||||
| 0x0362 | 所使用或查询的超级表不存在 | 确认超级表名是否正确,如果正确则需要先创建超级表 |
|
||||
| 0x0364 | 标签过多 | 查看参考手册减少标签数量 |
|
||||
| 0x0365 | 列过多 | 查看参考手册减少列数量 |
|
||||
| 0x0369 | 要添加的标签已经存在 | 修改标签名 |
|
||||
| 0x036A | 要查询或修改的标签不存在 | 确认标签名是否正确 |
|
||||
| 0x036B | 要添加的列已经存在 | 修改列名或者放弃该操作 |
|
||||
| 0x036C | 要查询或修改的列不存在 | 确认列名是否正确 |
|
||||
| 0x036E | 无效的超级表操作 | 查看参考手册进行正确的操作 |
|
||||
| 0x036F | 错误的行字节数 | TODO |
|
||||
| 0x0370 | 无效的函数名 | 确认函数名是否正确 |
|
||||
| 0x0372 | 无效的函数代码 | 无效的函数编码 |
|
||||
| 0x0373 | 该函数已经存在 | 修改函数名或者放弃该操作|
|
||||
| 0x0374 | 所引用的函数不存在 | 确认函数名是否正确 |
|
||||
| 0x0375 | 无效的 bufSize | 查看参考手册修改 bufSize |
|
||||
| 0x0378 | 无效的函数注释 | 查看参考手册修改函数注释 |
|
||||
| 0x0379 | 无效的函数检索消息 | TODO |
|
||||
| 0x0380 | 未指定数据库或者指定的数据库不可用 | 指定数据库,或者检查所指定的数据库的状态 |
|
||||
| 0x0381 | 数据库已经存在 | 放弃重复创建,或者修改数据库名 |
|
||||
| 0x0382 | 无效的数据库参数 | 查看参考手册使用正确的参数 |
|
||||
| 0x0383 | 无效的数据库名称 | 查看参考手册使用正确的数据库名 |
|
||||
| 0x0385 | 该帐号下的数据库过多 | 删除旧的数据库再尝试创建新数据库 |
|
||||
| 0x0388 | 数据库不存在 | 确认数据库名是否正确 |
|
||||
| 0x0389 | 无效的数据库帐户 | 确认帐户是否正确 |
|
||||
| 0x038A | 数据库参数未修改 | 查看参考手册确认修改的参数和值是否正确 |
|
||||
| 0x038B | 索引不存在 |确认索引名称是否正确 |
|
||||
| 0x039A | 无效的系统表名 | 查看参考手册确认表名是否正确 |
|
||||
| 0x03A0 | mnode 已经存在 | 放弃该操作 |
|
||||
| 0x03A1 | mnode 不存在 | 确认要查看或操作的 mnode ID |
|
||||
| 0x03A2 | qnode 已经存在 | 放弃该操作 |
|
||||
| 0x03A3 | qnode 不存在 | 确认要查看或操作的 qnode ID 是否正确 |
|
||||
| 0x03A8 | mnode 的 replica 不能小于1 | 停止 drop mnode |
|
||||
| 0x03A9 | mnode 的 replica 不能大于3 | 停止 create mnode |
|
||||
| 0x03B0 | dnode 数量过多 | 停止添加新的 dnode |
|
||||
| 0x03B1 | dnode 没有足够的可用内存 | 检查所在系统的内存使用情况 ,尝试释放出内存 |
|
||||
| 0x03B2 | 无效的 dnode 配置 | 查看参考手册纠正配置 |
|
||||
| 0x03B3 | 无效的 dnode 地址 | 确认 dnode 的 FQDN 和 serverPort参数是否正确 |
|
||||
| 0x03B4 | 无效的 dnode ID | 确认正确的 dnode ID |
|
||||
| 0x03B5 | vgroup 的分布未发生变化 | TODO |
|
||||
| 0x03B6 | 存在状态为 offline 的 dnode | drop 这些 dnode 或者启动相应的 dnode 使其状态为 ready |
|
||||
| 0x03B7 | 无效的 vgroup 副本 | TODO |
|
||||
| 0x03C0 | topic 与超级表冲突 | TODO |
|
||||
| 0x03C1 | 订阅了过多的超级表 | 查看参考手册减少超级表数量 |
|
||||
| 0x03C2 | 无效的 超级表修改参数 | 查看参考手册进行纠正 |
|
||||
| 0x03C3 | 超级表参数未被修改 | 查看参考手册确认参数是否正确 |
|
||||
| 0x03C4 | 该字段被某个主题所使用 | TODO |
|
||||
| 0x03C5 | 该数据库是单超级表模式 | 修改数据库为多超级表模式或者放弃创建新的超级表 |
|
||||
| 0x03C6 | 修改超级表使用了无效的 schema 版本 | TODO |
|
||||
| 0x03C7 | 修改超级表使用了无效的超级表 ID | 确认超级表使用是否正确 |
|
||||
| 0x03C8 | 该字段被 tsma 所使用 | TODO |
|
||||
| 0x03D0 | 该事务已经存在 | TODO |
|
||||
| 0x03D1 | 该事务不存在 | TODO |
|
||||
| 0x03D2 | 要 kill 的 stage 不存在 | TODO |
|
||||
| 0x03D3 | 冲突的事务没有完成 | TODO |
|
||||
| 0x03D4 | 未知的事务错误 | TODO |
|
||||
| 0x03D5 | 事务提交日志已满 | TODO |
|
||||
| 0x03DF | 在执行事务时无法建立连接 | 等待事务完成尝试重新建立连接 |
|
||||
| 0x03E0 | Topic 已经存在 | 修改 topic 名字或者放弃创建重复的 topic |
|
||||
| 0x03E1 | Topic 不存在 | 确认 Topic 名字是否正确 |
|
||||
| 0x03E2 | Topic 过多 | 尝试删除不用的 topic 再建立新的,或者放弃此次操作 |
|
||||
| 0x03E3 | 无效的 Topic | 确认 Topic 是否正确 |
|
||||
| 0x03E4 | 建立 Topic 的查询子名无效 | 查看参考手册纠正查询子名 |
|
||||
| 0x03E5 | 建立 Topic 的参数无效 | 查看参考手册使用正确的参数 |
|
||||
| 0x03E6 | 消费者不存在 | 确认正确的消费者 ID |
|
||||
| 0x03E7 | 消费者未修改 | TODO |
|
||||
| 0x03E8 | 订阅不存在 | 确认正确的订阅 ID |
|
||||
| 0x03E9 | 偏移量不存在 | 纠正偏移量 |
|
||||
| 0x03EA | 消费者不可用 | TODO |
|
||||
| 0x03EB | 无法删除已经被订阅的 Topic | 先取消订阅再尝试删除 |
|
||||
| 0x03EC | Consumer group正在被某些消费者使用 | TODO |
|
||||
| 0x03F0 | 流已经存在 | 修改流名称或者放弃创建该流 |
|
||||
| 0x03F1 | 要查询或操作的流不存在 | 确认正确的流 ID |
|
||||
| 0x03F2 | 无效的流参数 | 查看参考手册纠正错误的参数 |
|
||||
| 0x0480 | SMA 已经存在 | 修改 SMA 名称或者放弃创建 |
|
||||
| 0x0481 | SMA 不存在 | 确认正确的 SMA 名称或者 ID |
|
||||
| 0x0482 | SMA 参数错误 | 查看参考手册纠正参数 |
|
||||
| 0x0408 | 节点不在线 | TODO |
|
||||
| 0x0409 | 节点已经部署 | TODO |
|
||||
| 0x040A | 节点未部署 | TODO |
|
||||
| 0x0500 | 该动作作在进行中| TODO |
|
||||
| 0x0501 | 消息未被处理 | TODO |
|
||||
| 0x0502 | 该动作需要被重新处理 | TODO |
|
||||
| 0x0503 | 无效的 vgroup ID | 检查确认正确的 vgroups ID |
|
||||
| 0x0504 | vnode 初始化失败 | TODO |
|
||||
| 0x0505 | 系统磁盘空间耗尽 | 尝试释放或者增加磁盘空间 |
|
||||
| 0x0506 | 对磁盘文件没有写权限 | 检查启动 TDengine 的系统帐号的写权限 |
|
||||
| 0x0507 | 数据文件缺失 | TODO |
|
||||
| 0x0508 | vnode 没有可用内存 | TODO |
|
||||
| 0x0509 | vnode 中未预期的一般性错误 | TODO |
|
||||
| 0x050C | 数据库无空闲内存 | TODO |
|
||||
| 0x050D | 数据库正在删除中 | TODO |
|
||||
| 0x050E | 数据库正在更新中 | TODO |
|
||||
| 0x0510 | 数据库正在关闭中 | TODO |
|
||||
| 0x0511 | 数据库被暂停操作 | TODO |
|
||||
| 0x0512 | 数据库写操作被拒绝 | 检查用户权限,申请写操作授权 |
|
||||
| 0x0513 | 数据库正在同步中 | TODO |
|
||||
| 0x0514 | 无效的 tsdb 状态 | TODO |
|
||||
| 0x0520 | 指定的表不存在 | 检查确认正确的表名 |
|
||||
| 0x0521 | 指定的SMA 不存在 | 检查确认正确的 SMA名称 |
|
||||
| 0x0522 | Hash 值不匹配 | TODO |
|
||||
| 0x0523 | 指定的表不存在 | 检查确认正确的表名 |
|
||||
| 0x0524 | 无效的表动作 | TODO |
|
||||
| 0x0525 | 列名已经存在 | 修改列名或放弃操作 |
|
||||
| 0x0526 | 列名不存在 | 确认正确的列名 |
|
||||
| 0x0527 | 该列已经被订阅 | 先取消订阅再操作或者放弃操作 |
|
||||
| 0x0528 | 无效的配置文件 | 检查配置文件的路径和访问权限 |
|
||||
| 0x0529 | 无效的 term 文件 | TODO |
|
||||
| 0x0600 | 无效的表 ID | 确认表名是否正确 |
|
||||
| 0x0601 | 无效的表 类型 | TODO |
|
||||
| 0x0602 | 无效的 schema 版本 | TODO |
|
||||
| 0x0603 | 表已经存在 | 修改表名或放弃操作 |
|
||||
| 0x0604 | 配置无效 | 查看参考手册纠正配置 |
|
||||
| 0x0605 | TSDB 初始化失败 | TODO |
|
||||
| 0x0606 | 磁盘空间耗尽 | 查看磁盘空间耗尽的原因,尝试释放或增加磁盘空间 |
|
||||
| 0x0607 | 磁盘文件没有访问权限 | 确认启动集群的系统帐户是否有相应的写权限 |
|
||||
| 0x0608 | 数据文件被破坏 | TODO |
|
||||
| 0x0609 | 内存耗尽 | 检查内存被耗尽的原因,尝试释放内存 |
|
||||
| 0x060A | 标签版本过老 | TODO |
|
||||
| 0x060B | 时间戳不在允许范围内 | 查看参考手册了解允许写入的时间戳规则 |
|
||||
| 0x060C | 提交消息被破坏 | TODO |
|
||||
| 0x060D | 无效操作 | TODO |
|
||||
| 0x060E | 建表消息无效 | TODO |
|
||||
| 0x060F | 内存跳表中没有表的数据 | TODO |
|
||||
| 0x0610 | 文件已经存在 | TODO |
|
||||
| 0x0611 | 需要重新配置该表 | TODO |
|
||||
| 0x0612 | 建表的信息无效 | TODO |
|
||||
| 0x0613 | 磁盘空间耗尽 | 尝试释放或增加磁盘空间 |
|
||||
| 0x0614 | 消息被破坏 |TODO |
|
||||
| 0x0615 | 无效的标签值 | 修正标签值 |
|
||||
| 0x0616 | 未缓存最后一行的原始数据 | 修改数据库的 cacheModel 参数 |
|
||||
| 0x0618 | 该表不存在 | 检查表名是否正确 |
|
||||
| 0x0619 | 超级表已经存在 | 修改超级表名再次尝试 |
|
||||
| 0x061A | 超级表不存在 | 检查超级表名是否正确 |
|
||||
| 0x061B | 表被重新创建 | TODO |
|
||||
| 0x061C | TDB 环境打开错误 | N/A |
|
||||
| 0x0700 | 无效的查询句柄 | N/A |
|
||||
| 0x0701 | 无效的消息 | TODO |
|
||||
| 0x0702 | 磁盘空间耗尽 | 尝试释放或增加磁盘空间 |
|
||||
| 0x0703 | 系统内存耗尽 | 尝试释放内存 |
|
||||
| 0x0704 | 未知错误 | TODO |
|
||||
| 0x0705 | 重复的 Join Key | 修正查询语句中的 Join Key |
|
||||
| 0x0706 | 标签过滤条件过多 | 减小查询语句中的标签过滤条件 |
|
||||
| 0x0707 | 查询不可用 | TODO |
|
||||
| 0x0708 | TODO | TODO |
|
||||
| 0x0709 | TODO | TODO |
|
||||
| 0x070A | 查询中的时间窗口过多 | 修改查询语句以减小时间窗口的数量 |
|
||||
| 0x070B | 查询缓冲区达到上限 | TODO |
|
||||
| 0x070C | 多副本数据不一致 | TODO |
|
||||
| 0x070D | 系统错误 | TODO |
|
||||
| 0x070E | 无效的时间范围 | 修正查询语句中的时间范围 |
|
||||
| 0x070F | 无效输入 | 修正查询语句 |
|
||||
| 0x0720 | 调度器不存在 | TODO |
|
||||
| 0x0721 | 任务不存在 | TODO |
|
||||
| 0x0722 | 任务已经存在 | TODO |
|
||||
| 0x0723 | 任务上下文不存在 | TODO |
|
||||
| 0x0724 | 任务被取消 | TODO |
|
||||
| 0x0725 | 任务被停止 | TODO |
|
||||
| 0x0726 | 任务正在取消中 | TODO |
|
||||
| 0x0727 | 任务正在停止中 | TODO |
|
||||
| 0x0728 | 重复操作 | TODO |
|
||||
| 0x0729 | 任务消息错误 | TODO |
|
||||
| 0x072A | 作业已经被释放 | TODO |
|
||||
| 0x072B | 任务状态错误 | TODO |
|
||||
| 0x072C | in 和 not in 操作符不支持 JSON 类型 | 修正查询语句 |
|
||||
| 0x072D | 此处不支持 JSON |修正查询语句 |
|
||||
| 0x072E | group 和 partition by 不支持 JSON 类型 |
|
||||
| 0x072F | 查询作业不存在 | TODO |
|
||||
| 0x0800 | License 已经过期 | 重新激活或获取 License |
|
||||
| 0x0801 | 受限于 License 无法创建 dnode | 获取新的License |
|
||||
| 0x0802 | 受限于 License 无法创建帐户 | 获取新的 License |
|
||||
| 0x0803 | 受限于 License 无法创建表 | 获取新的 License |
|
||||
| 0x0804 | 受限于 License 无法创建数据库 | 获取新的 License |
|
||||
| 0x0805 | 受限于 License 无法创建用户 | 获取新的 License |
|
||||
| 0x0806 | 受限于 License 无法创建连接 | 获取新的 License |
|
||||
| 0x0807 | 受限于 License 无法创建流 | 获取新的 License |
|
||||
| 0x0808 | 写入速度受限于 License | 获取新的 License |
|
||||
| 0x0809 | 存储容量受限于 License | 获取新的 License |
|
||||
| 0x080A | 查询时间受限于 License | 获取新的 License |
|
||||
| 0x080B | CPU 核数受限于 License | 获取新的 License |
|
||||
| 0x080C | 受限于 License 无法创建超级表 | 获取新的 License |
|
||||
| 0x080D | 受限于 License 无法创建表 | 获取新的 License |
|
||||
| 0x0A00 | TQ 无效配置 | TODO |
|
||||
| 0x0A01 | TQ 初始化失败 | TODO |
|
||||
| 0x0A02 | TQ 磁盘空间耗尽 | 尝试释放或增加磁盘空间 |
|
||||
| 0x0A03 | TQ 没有写磁盘权限 | 确认启动集群的系统帐号是否具有写磁盘权限 |
|
||||
| 0x0A04 | TQ 文件被破坏 | TODO |
|
||||
| 0x0A05 | TQ 内存耗尽 | 尝试释放内存 |
|
||||
| 0x0A06 | TQ 文件已经存在 | TODO |
|
||||
| 0x0A07 | TQ 创建目录失败 | TODO |
|
||||
| 0x0A08 | TQ meta 中不存在该 key | TODO |
|
||||
| 0x0A09 | meta key在事务中不存在 | TODO |
|
||||
| 0x0A0A | meta key在事务中重复 | TODO |
|
||||
| 0x0A0B | 消费组不存在 | 指定正确的消费组 |
|
||||
| 0x0A0C | 该表的 schema 不存在 | 确认表名是否正确 |
|
||||
| 0x0A0D | 没有已经提交的 offset | TODO |
|
||||
| 0x1000 | WAL 未知错误 | TODO |
|
||||
| 0x1001 | WAL 文件被破坏 | TODO |
|
||||
| 0x1002 | WAL 大小超出上限 | TODO |
|
||||
| 0x1003 | WAL 使用了错误的版本号 | TODO |
|
||||
| 0x1004 | 系统内存耗尽 | 尝试释放内存 |
|
||||
| 0x1005 | WAL 日志不存在 | TODO |
|
||||
| 0x2201 | 无效的 mount 配置 | 修正 mount 配置参数 |
|
||||
| 0x2202 | mount 点过多 | TODO |
|
||||
| 0x2203 | 重复的 primary mount | TODO |
|
||||
| 0x2204 | primary mount 缺失 | TODO |
|
||||
| 0x2205 | no mount at tier: TODO | TODO |
|
||||
| 0x2206 | 文件已经存在 | 更改文件名或者删除该文件 |
|
||||
| 0x2207 | 无效的级别 | TODO |
|
||||
| 0x2208 | 没有可用磁盘 | TODO |
|
||||
| 0x220F | 系统内存耗尽 | TODO |
|
||||
| 0x2400 | catalog 内部错误 | TODO |
|
||||
| 0x2401 | 无效的 catalog 输入参数 | TODO |
|
||||
| 0x2402 | catalog 不可用 | TODO |
|
||||
| 0x2403 | catalog 系统错误 | TODO |
|
||||
| 0x2404 | 数据库被删除 | TODO |
|
||||
| 0x2405 | catalog 不可用 | TODO |
|
||||
| 0x2406 | 表元数据和 vgroup 不匹配 | TODO |
|
||||
| 0x2407 | catalog 不存在 | TODO |
|
||||
| 0x2550 | 无效的消息顺序 | TODO |
|
||||
| 0x2501 | 调度器状态错误 | TODO |
|
||||
| 0x2502 | 调度器内部错误 | TODO |
|
||||
| 0x2504 | 任务超时 | TODO |
|
||||
| 0x2505 | 作业正在停止中 | TODO |
|
||||
| 0x2600 | 语法错误 | 参考 SQL 手册纠正 |
|
||||
| 0x2601 | 不完整的 SQL 语句 | 参考 SQL 手册纠正 |
|
||||
| 0x2602 | 无效列名 | 使用正确的列名 |
|
||||
| 0x2603 | 表不存在 | 使用正确的表名 |
|
||||
| 0x2604 | 表名定义有二义性 | 参考 SQL 手册纠正 |
|
||||
| 0x2605 | 无效的值类型 | 参考 SQL 手册纠正 |
|
||||
| 0x2608 | 此处不能使用聚合查询 | 参考 SQL 手册纠正 |
|
||||
| 0x2609 | ORDER BY 只能用于查询语句中的结果列 | 参考 SQL 手册纠正 |
|
||||
| 0x260A | GROUP BY 缺失表达式 (TODO) | 参考 SQL 手册纠正 |
|
||||
| 0x260B | 不是 SELECT 表达式 | 参考 SQL 手册纠正 |
|
||||
| 0x260C | 不是单一分组的分组函数 (TODO) | 参考 SQL 手册纠正 |
|
||||
| 0x260D | 标签数量不匹配 | 参考 SQL 手册纠正 |
|
||||
| 0x260E | 无效的标签名 | 改用正确的标签名 |
|
||||
| 0x2610 | 名字或密码过长 | 参考 SQL 手册纠正 |
|
||||
| 0x2611 | 密码不能为空 | 提供非空密码 |
|
||||
| 0x2612 | 端口无效 | 端口号必须在 (0,65535) 范围内 |
|
||||
| 0x2613 | 地址格式错误 | 正确格式是 "fqdn: port" |
|
||||
| 0x2614 | 该语句不再支持 | 参考 SQL 手册纠正 |
|
||||
| 0x2615 | 时间窗口过小 | 参考 SQL 手册纠正 |
|
||||
| 0x2616 | 未指定数据库 | 在表名或超级表名前添加 "<dbname\>." 指定数据库 |
|
||||
| 0x2617 | 标识符无效 | 参考 SQL 手册纠正 |
|
||||
| 0x2618 | 该数据库中不存在对应的超级表 | 使用正确的数据库名或者超级表名 |
|
||||
| 0x2619 | 数据库参数无效 | 参考 SQL 手册纠正 |
|
||||
| 0x261A | 建表参数无效 | 参考 SQL 手册纠正 |
|
||||
| 0x2624 | GROUP BY 和 窗口子句不能共用 | 参考 SQL 手册纠正 |
|
||||
| 0x2627 | 聚合函数不支持嵌套 | 参考 SQL 手册纠正 |
|
||||
| 0x2628 | 在 integer/bool/varchar 类型的列上只支持 状态窗口 | 参考 SQL 手册纠正 |
|
||||
| 0x2629 | 标签列上不支持状态窗口 | 参考 SQL 手册纠正 |
|
||||
| 0x262A | 状态窗口查询不支持超级表 | 参考 SQL 手册纠正 |
|
||||
| 0x262B | 会话之间的 gap 应该是大于 0 的固定大小的窗口 | 参考 SQL 手册纠正 |
|
||||
| 0x262C | 只在主键时间戳列上支持会话 | 参考 SQL 手册纠正 |
|
||||
| 0x262D | 窗口偏移量不能是负值 | 参考 SQL 手册纠正 |
|
||||
| 0x262E | 当 interval 的单位是 "year" 时 offset 的单位不能是 "month" | 参考 SQL 手册纠正 |
|
||||
| 0x262F | offset 所指定的时间长度应该小于 interval 所指定的时间长度 | 参考 SQL 手册纠正 |
|
||||
| 0x2630 | 当 interval 是自然年/月时不能使用 slidig | 参考 SQL 手册纠正 |
|
||||
| 0x2631 | sliding 所指定的时间长度不能大于 interval 所指定的时间长度 | 参考 SQL 手册纠正 |
|
||||
| 0x2632 | sliding 不能小于 interval 的 1%% | 参考 SQL 手册纠正 |
|
||||
| 0x2633 | 当使用 JSON 类型的 tag 时只允许这一个 tag 的存在 | 去除其它 tag |
|
||||
| 0x2634 | 查询块中包含的结果列的数量不正确 | TODO |
|
||||
| 0x2635 | 时间戳不正确 | TODO |
|
||||
| 0x2637 | offset/soffset 不能小于 0 | 纠正 offset/soffset |
|
||||
| 0x2638 | offset/soffset 只能用于 partition by | 参考 SQL 手册纠正 |
|
||||
| 0x2639 | 无效的 topic 查询 | TODO |
|
||||
| 0x263A | 不能批量删除超级表 | 请逐个删除 |
|
||||
| 0x263B | 查询时间范围未指定起止时间或者时间范围过大 | 参考 SQL 手册纠正 |
|
||||
| 0x263C | 重复的列表 | 参考 SQL 手册纠正 |
|
||||
| 0x263D | 标签长度超过上限 | 参考 SQL 手册纠正 |
|
||||
| 0x263E | 行长度超过上限 | 参考 SQL 手册纠正 |
|
||||
| 0x263F | 不合法的列数量 | 参考 SQL 手册纠正 |
|
||||
| 0x2640 | 列数过多 | 参考 SQL 手册纠正 |
|
||||
| 0x2641 | 首列必须是时间戳 | 参考 SQL 手册纠正 |
|
||||
| 0x2642 | binary/nchar 类型的列长度无效 | 参考 SQL 手册纠正 |
|
||||
| 0x2643 | 标签列数量无效 | 参考 SQL 手册纠正 |
|
||||
| 0x2644 | 无权进行该操作 | 参考 SQL 手册纠正 |
|
||||
| 0x2645 | 无效的流查询 | 参考 SQL 手册纠正 |
|
||||
| 0x2646 | 无效的 _c0 或 _rowts 表达式 | 参考 SQL 手册纠正 |
|
||||
| 0x2647 | 无效的时间线函数 | 参考 SQL 手册纠正 |
|
||||
| 0x2648 | 无效的密码 | 参考 SQL 手册纠正 |
|
||||
| 0x2649 | 无效的 alter table 语句 | 参考 SQL 手册纠正 |
|
||||
| 0x264A | 不能删除时间戳主列 | 参考 SQL 手册纠正 |
|
||||
| 0x264B | 只有 binary/nchar 类型的列能够修改长度 | 参考 SQL 手册纠正 |
|
||||
| 0x264C | 无效的 tbname 伪列 | 参考 SQL 手册纠正 |
|
||||
| 0x264D | 无效的函数名 | 参考 SQL 手册纠正 |
|
||||
| 0x264E | 注释过长 | 参考 SQL 手册纠正 |
|
||||
| 0x264F | 有些函数只能用在查询的 SELECT 列表中,且不能与其它非标量函数或列混用 | 参考 SQL 手册纠正 |
|
||||
| 0x2650 | 不支持窗口查询,因为子查询的结果不包含时间戳列 | 参考 SQL 手册纠正 |
|
||||
| 0x2651 | 任何列都不能被删除 | 参考 SQL 手册纠正 |
|
||||
| 0x2652 | 只有 标签列可以是 JSON 类型 | 参考 SQL 手册纠正 |
|
||||
| 0x2653 | 列或标签的值过长 | 参考 SQL 手册纠正 |
|
||||
| 0x2655 | DELETE 语句必须有一个确定的时间范围 | 参考 SQL 手册纠正 |
|
||||
| 0x2656 | REDISTRIBUTE VGROUP 语句只支持 1 到 3 个 vgroup | 参考 SQL 手册纠正 |
|
||||
| 0x2657 | 不支持 Fill | 参考 SQL 手册纠正 |
|
||||
| 0x2658 | 无效的窗口伪列 | 参考 SQL 手册纠正 |
|
||||
| 0x2659 | 不允许做窗口查询: TODO | 参考 SQL 手册纠正 |
|
||||
| 0x265A | 不允许做流计算: TODO | 参考 SQL 手册纠正 |
|
||||
| 0x265B | 不允许做 Group By | 参考 SQL 手册纠正 |
|
||||
| 0x265D | interp 子句错误 | 参考 SQL 手册纠正 |
|
||||
| 0x265E | 窗口查询中不支持该函数 | 参考 SQL 手册纠正 |
|
||||
| 0x265F | 只支持单表 | 参考 SQL 手册纠正 |
|
||||
| 0x2660 | 无效的 SMA 索引 | 参考 SQL 手册纠正 |
|
||||
| 0x2661 | 无效的 SELECT 表达式 | 参考 SQL 手册纠正 |
|
||||
| 0x2662 | 获取表的元数据失败 | TODO |
|
||||
| 0x2663 | 表名/表名不唯一 | 参考 SQL 手册纠正 |
|
||||
| 0x266F | 解析器内部错误 | TODO |
|
||||
| 0x2700 | 计划器内部错误 | TODO |
|
||||
| 0x2701 | TODO | TODO |
|
||||
| 0x2702 | 不支持 cross join | 参考 SQL 手册纠正 |
|
||||
| 0x2800 | 函数内部错误 | 参考 SQL 手册纠正 |
|
||||
| 0x2801 | 函数参数个数错误 | 参考 SQL 手册纠正 |
|
||||
| 0x2802 | 函数参数类型错误 | 参考 SQL 手册纠正 |
|
||||
| 0x2803 | 函数参数值错误 | 参考 SQL 手册纠正 |
|
||||
| 0x2804 | 非内置函数 | 参考 SQL 手册纠正 |
|
||||
| 0x2901 | UDF 正在停止 | TODO |
|
||||
| 0x2902 | UDF 管道读取错误 | TODO |
|
||||
| 0x2903 | UDF 连接错误 | TODO |
|
||||
| 0x2904 | UDF 管道缺失 | TODO |
|
||||
| 0x2905 | UDF 加载失败 | TODO |
|
||||
| 0x2906 | UDF 无效状态 | TODO |
|
||||
| 0x2907 | UDF 无效输入 | TODO |
|
||||
| 0x2908 | UDF 没有函数句柄 | TODO |
|
||||
| 0x2909 | UDF 无效的 bufsize | TODO |
|
||||
| 0x290A | UDF 无效的输出类型 | TODO |
|
||||
| 0x3000 | 无效的行协议类型 | 修正数据中的协议类型 |
|
||||
| 0x3001 | 无效的时间戳精度类型 | 修正时间戳精度类型 |
|
||||
| 0x3002 | 无效的数据格式 | 修正数据格式 |
|
||||
| 0x3003 | 无效的无模式数据库配置 | 修改配置 |
|
||||
| 0x3004 | 写入类型与之前的不同 | 修正写入类型 |
|
||||
| 0x3100 | TSMA 初始化失败 | TODO |
|
||||
| 0x3101 | TSMA 已经存在 | 放弃重复建立 TSMA |
|
||||
| 0x3102 | 元数据中没有 TSMA 索引 | TODO |
|
||||
| 0x3103 | 无效的 TSMA 环境 | TODO |
|
||||
| 0x3104 | 无效的 TSMA 状态 | TODO |
|
||||
| 0x3105 | 无效的 TSMA 指针 | TODO |
|
||||
| 0x3106 | 无效的 TSMA 参数 | 参考 SQL 手册纠正 |
|
||||
| 0x3107 | cache 中没有该 TSMA 的索引 | TODO |
|
||||
| 0x3150 | 无效的 RSMA 索引 | TODO |
|
||||
| 0x3151 | 无效的 RSMA 状态 | TODO |
|
||||
| 0x3152 | RSMA 创建 qtaskinfo 失败 | TODO |
|
||||
| 0x3153 | RSMA 文件被破坏 | TODO |
|
||||
| 0x3200 | 索引正在重建中 |TODO |
|
||||
| 0x3201 | 无效的索引文件 | TODO |
|
||||
| 0x4000 | 无效消息 | TODO |
|
|
@ -2,13 +2,18 @@
|
|||
|
||||
```text
|
||||
$ taos
|
||||
Welcome to the TDengine shell from Linux, Client Version:2.0.5.0
|
||||
Copyright (c) 2017 by TAOS Data, Inc. All rights reserved.
|
||||
Welcome to the TDengine shell from Linux, Client Version:3.0.0.0
|
||||
Copyright (c) 2022 by TAOS Data, Inc. All rights reserved.
|
||||
|
||||
Server is Community Edition.
|
||||
|
||||
taos> show databases;
|
||||
name | created_time | ntables | vgroups | replica | quorum | days | keep1,keep2,keep(D) | cache(MB)| blocks | minrows | maxrows | wallevel | fsync | comp | precision | status |
|
||||
=========================================================================================================================================================================================================================
|
||||
test | 2020-10-14 10:35:48.617 | 10 | 1 | 1 | 1 | 2 | 3650,3650,3650 | 16| 6 | 100 | 4096 | 1 | 3000 | 2 | ms | ready |
|
||||
log | 2020-10-12 09:08:21.651 | 4 | 1 | 1 | 1 | 10 | 30,30,30 | 1| 3 | 100 | 4096 | 1 | 3000 | 2 | us | ready |
|
||||
Query OK, 2 row(s) in set (0.001198s)
|
||||
name | create_time | vgroups | ntables | replica | strict | duration | keep | buffer | pagesize | pages | minrows | maxrows | comp | precision | status | retention | single_stable | cachemodel | cachesize | wal_level | wal_fsync_period | wal_retention_period | wal_retention_size | wal_roll_period | wal_seg_size |
|
||||
=========================================================================================================================================================================================================================================================================================================================================================================================================================================================================
|
||||
information_schema | NULL | NULL | 14 | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | ready | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL |
|
||||
performance_schema | NULL | NULL | 3 | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | ready | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL |
|
||||
db | 2022-08-04 14:14:49.385 | 2 | 4 | 1 | off | 14400m | 5254560m,5254560m,5254560m | 96 | 4 | 256 | 100 | 4096 | 2 | ms | ready | NULL | false | none | 1 | 1 | 3000 | 0 | 0 | 0 | 0 |
|
||||
Query OK, 3 rows in database (0.019154s)
|
||||
|
||||
taos>
|
||||
```
|
||||
|
|
|
@ -1,14 +1,19 @@
|
|||
在 cmd 下进入到 C:\TDengine 目录下直接执行 `taos.exe`,连接到 TDengine 服务,进入到 TDengine CLI 界面,示例如下:
|
||||
|
||||
```text
|
||||
C:\TDengine>taos
|
||||
Welcome to the TDengine shell from Linux, Client Version:2.0.5.0
|
||||
Copyright (c) 2017 by TAOS Data, Inc. All rights reserved.
|
||||
taos> show databases;
|
||||
name | created_time | ntables | vgroups | replica | quorum | days | keep1,keep2,keep(D) | cache(MB) | blocks | minrows | maxrows | wallevel | fsync | comp | precision | status |
|
||||
===================================================================================================================================================================================================================================================================
|
||||
test | 2020-10-14 10:35:48.617 | 10 | 1 | 1 | 1 | 2 | 3650,3650,3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | ms | ready |
|
||||
log | 2020-10-12 09:08:21.651 | 4 | 1 | 1 | 1 | 10 | 30,30,30 | 1 | 3 | 100 | 4096 | 1 | 3000 | 2 | us | ready |
|
||||
Query OK, 2 row(s) in set (0.045000s)
|
||||
taos>
|
||||
Welcome to the TDengine shell from Windows, Client Version:3.0.0.0
|
||||
Copyright (c) 2022 by TAOS Data, Inc. All rights reserved.
|
||||
|
||||
Server is Community Edition.
|
||||
|
||||
taos> show databases;
|
||||
name | create_time | vgroups | ntables | replica | strict | duration | keep | buffer | pagesize | pages | minrows | maxrows | comp | precision | status | retention | single_stable | cachemodel | cachesize | wal_level | wal_fsync_period | wal_retention_period | wal_retention_size | wal_roll_period | wal_seg_size |
|
||||
=========================================================================================================================================================================================================================================================================================================================================================================================================================================================================
|
||||
information_schema | NULL | NULL | 14 | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | ready | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL |
|
||||
performance_schema | NULL | NULL | 3 | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | ready | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL |
|
||||
test | 2022-08-04 16:46:40.506 | 2 | 0 | 1 | off | 14400m | 5256000m,5256000m,5256000m | 96 | 4 | 256 |
|
||||
100 | 4096 | 2 | ms | ready | NULL | false | none | 1 | 1 | 3000 | 0 | 0 | 0 | 0 |
|
||||
Query OK, 3 rows in database (0.123000s)
|
||||
|
||||
taos>
|
||||
```
|
||||
|
|
|
@ -76,6 +76,13 @@ qTaskInfo_t qCreateQueueExecTaskInfo(void* msg, SReadHandle* readers, int32_t* n
|
|||
*/
|
||||
int32_t qSetMultiStreamInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numOfBlocks, int32_t type);
|
||||
|
||||
/**
|
||||
* @brief Cleanup SSDataBlock for StreamScanInfo
|
||||
*
|
||||
* @param tinfo
|
||||
*/
|
||||
void tdCleanupStreamInputDataBlock(qTaskInfo_t tinfo);
|
||||
|
||||
/**
|
||||
* Update the table id list, add or remove.
|
||||
*
|
||||
|
|
|
@ -46,9 +46,10 @@ enum {
|
|||
};
|
||||
|
||||
enum {
|
||||
TASK_EXEC_STATUS__IDLE = 1,
|
||||
TASK_EXEC_STATUS__EXECUTING,
|
||||
TASK_EXEC_STATUS__CLOSING,
|
||||
TASK_SCHED_STATUS__INACTIVE = 1,
|
||||
TASK_SCHED_STATUS__WAITING,
|
||||
TASK_SCHED_STATUS__ACTIVE,
|
||||
TASK_SCHED_STATUS__FAILED,
|
||||
};
|
||||
|
||||
enum {
|
||||
|
@ -204,13 +205,11 @@ typedef struct {
|
|||
enum {
|
||||
TASK_SOURCE__SCAN = 1,
|
||||
TASK_SOURCE__PIPE,
|
||||
TASK_SOURCE__MERGE,
|
||||
};
|
||||
|
||||
enum {
|
||||
TASK_EXEC__NONE = 1,
|
||||
TASK_EXEC__PIPE,
|
||||
TASK_EXEC__MERGE,
|
||||
};
|
||||
|
||||
enum {
|
||||
|
@ -256,7 +255,7 @@ typedef struct SStreamTask {
|
|||
int16_t dispatchMsgType;
|
||||
|
||||
int8_t taskStatus;
|
||||
int8_t execStatus;
|
||||
int8_t schedStatus;
|
||||
|
||||
// node info
|
||||
int32_t selfChildId;
|
||||
|
@ -475,7 +474,6 @@ typedef struct {
|
|||
int32_t tDecodeStreamDispatchReq(SDecoder* pDecoder, SStreamDispatchReq* pReq);
|
||||
int32_t tDecodeStreamRetrieveReq(SDecoder* pDecoder, SStreamRetrieveReq* pReq);
|
||||
|
||||
int32_t streamLaunchByWrite(SStreamTask* pTask, int32_t vgId);
|
||||
int32_t streamSetupTrigger(SStreamTask* pTask);
|
||||
|
||||
int32_t streamProcessRunReq(SStreamTask* pTask);
|
||||
|
@ -487,6 +485,9 @@ int32_t streamProcessRecoverRsp(SStreamTask* pTask, SStreamTaskRecoverRsp* pRsp)
|
|||
int32_t streamProcessRetrieveReq(SStreamTask* pTask, SStreamRetrieveReq* pReq, SRpcMsg* pMsg);
|
||||
int32_t streamProcessRetrieveRsp(SStreamTask* pTask, SStreamRetrieveRsp* pRsp);
|
||||
|
||||
int32_t streamTryExec(SStreamTask* pTask);
|
||||
int32_t streamSchedExec(SStreamTask* pTask);
|
||||
|
||||
typedef struct SStreamMeta SStreamMeta;
|
||||
|
||||
SStreamMeta* streamMetaOpen();
|
||||
|
|
|
@ -189,20 +189,6 @@ typedef struct {
|
|||
tsem_t rspSem;
|
||||
} SMqPollCbParam;
|
||||
|
||||
#if 0
|
||||
typedef struct {
|
||||
tmq_t* tmq;
|
||||
int8_t async;
|
||||
int8_t automatic;
|
||||
int8_t freeOffsets;
|
||||
tmq_commit_cb* userCb;
|
||||
tsem_t rspSem;
|
||||
int32_t rspErr;
|
||||
SArray* offsets;
|
||||
void* userParam;
|
||||
} SMqCommitCbParam;
|
||||
#endif
|
||||
|
||||
typedef struct {
|
||||
tmq_t* tmq;
|
||||
int8_t automatic;
|
||||
|
@ -385,29 +371,6 @@ static int32_t tmqMakeTopicVgKey(char* dst, const char* topicName, int32_t vg) {
|
|||
return sprintf(dst, "%s:%d", topicName, vg);
|
||||
}
|
||||
|
||||
#if 0
|
||||
int32_t tmqCommitCb(void* param, const SDataBuf* pMsg, int32_t code) {
|
||||
SMqCommitCbParam* pParam = (SMqCommitCbParam*)param;
|
||||
pParam->rspErr = code;
|
||||
if (pParam->async) {
|
||||
if (pParam->automatic && pParam->tmq->commitCb) {
|
||||
pParam->tmq->commitCb(pParam->tmq, pParam->rspErr, pParam->tmq->commitCbUserParam);
|
||||
} else if (!pParam->automatic && pParam->userCb) {
|
||||
pParam->userCb(pParam->tmq, pParam->rspErr, pParam->userParam);
|
||||
}
|
||||
|
||||
if (pParam->freeOffsets) {
|
||||
taosArrayDestroy(pParam->offsets);
|
||||
}
|
||||
|
||||
taosMemoryFree(pParam);
|
||||
} else {
|
||||
tsem_post(&pParam->rspSem);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
int32_t tmqCommitCb2(void* param, SDataBuf* pBuf, int32_t code) {
|
||||
SMqCommitCbParam2* pParam = (SMqCommitCbParam2*)param;
|
||||
SMqCommitCbParamSet* pParamSet = (SMqCommitCbParamSet*)pParam->params;
|
||||
|
@ -660,123 +623,6 @@ int32_t tmqCommitInner2(tmq_t* tmq, const TAOS_RES* msg, int8_t automatic, int8_
|
|||
return 0;
|
||||
}
|
||||
|
||||
#if 0
|
||||
int32_t tmqCommitInner(tmq_t* tmq, const TAOS_RES* msg, int8_t automatic, int8_t async,
|
||||
tmq_commit_cb* userCb, void* userParam) {
|
||||
SMqCMCommitOffsetReq req;
|
||||
SArray* pOffsets = NULL;
|
||||
void* buf = NULL;
|
||||
SMqCommitCbParam* pParam = NULL;
|
||||
SMsgSendInfo* sendInfo = NULL;
|
||||
int8_t freeOffsets;
|
||||
int32_t code = -1;
|
||||
|
||||
if (msg == NULL) {
|
||||
freeOffsets = 1;
|
||||
pOffsets = taosArrayInit(0, sizeof(SMqOffset));
|
||||
for (int32_t i = 0; i < taosArrayGetSize(tmq->clientTopics); i++) {
|
||||
SMqClientTopic* pTopic = taosArrayGet(tmq->clientTopics, i);
|
||||
for (int32_t j = 0; j < taosArrayGetSize(pTopic->vgs); j++) {
|
||||
SMqClientVg* pVg = taosArrayGet(pTopic->vgs, j);
|
||||
SMqOffset offset;
|
||||
tstrncpy(offset.topicName, pTopic->topicName, TSDB_TOPIC_FNAME_LEN);
|
||||
tstrncpy(offset.cgroup, tmq->groupId, TSDB_CGROUP_LEN);
|
||||
offset.vgId = pVg->vgId;
|
||||
offset.offset = pVg->currentOffset;
|
||||
taosArrayPush(pOffsets, &offset);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
freeOffsets = 0;
|
||||
pOffsets = (SArray*)&msg->container;
|
||||
}
|
||||
|
||||
req.num = (int32_t)pOffsets->size;
|
||||
req.offsets = pOffsets->pData;
|
||||
|
||||
SEncoder encoder;
|
||||
|
||||
tEncoderInit(&encoder, NULL, 0);
|
||||
code = tEncodeSMqCMCommitOffsetReq(&encoder, &req);
|
||||
if (code < 0) {
|
||||
goto END;
|
||||
}
|
||||
int32_t tlen = encoder.pos;
|
||||
buf = taosMemoryMalloc(tlen);
|
||||
if (buf == NULL) {
|
||||
tEncoderClear(&encoder);
|
||||
goto END;
|
||||
}
|
||||
tEncoderClear(&encoder);
|
||||
|
||||
tEncoderInit(&encoder, buf, tlen);
|
||||
tEncodeSMqCMCommitOffsetReq(&encoder, &req);
|
||||
tEncoderClear(&encoder);
|
||||
|
||||
pParam = taosMemoryCalloc(1, sizeof(SMqCommitCbParam));
|
||||
if (pParam == NULL) {
|
||||
goto END;
|
||||
}
|
||||
pParam->tmq = tmq;
|
||||
pParam->automatic = automatic;
|
||||
pParam->async = async;
|
||||
pParam->offsets = pOffsets;
|
||||
pParam->freeOffsets = freeOffsets;
|
||||
pParam->userCb = userCb;
|
||||
pParam->userParam = userParam;
|
||||
if (!async) tsem_init(&pParam->rspSem, 0, 0);
|
||||
|
||||
sendInfo = taosMemoryCalloc(1, sizeof(SMsgSendInfo));
|
||||
if (sendInfo == NULL) goto END;
|
||||
sendInfo->msgInfo = (SDataBuf){
|
||||
.pData = buf,
|
||||
.len = tlen,
|
||||
.handle = NULL,
|
||||
};
|
||||
|
||||
sendInfo->requestId = generateRequestId();
|
||||
sendInfo->requestObjRefId = 0;
|
||||
sendInfo->param = pParam;
|
||||
sendInfo->fp = tmqCommitCb;
|
||||
sendInfo->msgType = TDMT_MND_MQ_COMMIT_OFFSET;
|
||||
|
||||
SEpSet epSet = getEpSet_s(&tmq->pTscObj->pAppInfo->mgmtEp);
|
||||
|
||||
int64_t transporterId = 0;
|
||||
asyncSendMsgToServer(tmq->pTscObj->pAppInfo->pTransporter, &epSet, &transporterId, sendInfo);
|
||||
|
||||
if (!async) {
|
||||
tsem_wait(&pParam->rspSem);
|
||||
code = pParam->rspErr;
|
||||
tsem_destroy(&pParam->rspSem);
|
||||
taosMemoryFree(pParam);
|
||||
} else {
|
||||
code = 0;
|
||||
}
|
||||
|
||||
// avoid double free if msg is sent
|
||||
buf = NULL;
|
||||
|
||||
END:
|
||||
if (buf) taosMemoryFree(buf);
|
||||
/*if (pParam) taosMemoryFree(pParam);*/
|
||||
/*if (sendInfo) taosMemoryFree(sendInfo);*/
|
||||
|
||||
if (code != 0 && async) {
|
||||
if (automatic) {
|
||||
tmq->commitCb(tmq, code, (tmq_topic_vgroup_list_t*)pOffsets, tmq->commitCbUserParam);
|
||||
} else {
|
||||
userCb(tmq, code, (tmq_topic_vgroup_list_t*)pOffsets, userParam);
|
||||
}
|
||||
}
|
||||
|
||||
if (!async && freeOffsets) {
|
||||
taosArrayDestroy(pOffsets);
|
||||
}
|
||||
return code;
|
||||
}
|
||||
#endif
|
||||
|
||||
void tmqAssignAskEpTask(void* param, void* tmrId) {
|
||||
tmq_t* tmq = (tmq_t*)param;
|
||||
int8_t* pTaskType = taosAllocateQitem(sizeof(int8_t), DEF_QITEM);
|
||||
|
@ -1839,13 +1685,21 @@ int32_t tmq_consumer_close(tmq_t* tmq) {
|
|||
return rsp;
|
||||
}
|
||||
|
||||
int32_t retryCnt = 0;
|
||||
tmq_list_t* lst = tmq_list_new();
|
||||
rsp = tmq_subscribe(tmq, lst);
|
||||
while (1) {
|
||||
rsp = tmq_subscribe(tmq, lst);
|
||||
if (rsp != TSDB_CODE_MND_CONSUMER_NOT_READY || retryCnt > 5) {
|
||||
break;
|
||||
} else {
|
||||
retryCnt++;
|
||||
taosMsleep(500);
|
||||
}
|
||||
}
|
||||
|
||||
tmq_list_destroy(lst);
|
||||
|
||||
if (rsp != 0) {
|
||||
return rsp;
|
||||
}
|
||||
return rsp;
|
||||
}
|
||||
// TODO: free resources
|
||||
return 0;
|
||||
|
|
|
@ -4908,7 +4908,6 @@ int32_t tDecodeSRSmaParam(SDecoder *pCoder, SRSmaParam *pRSmaParam) {
|
|||
if (tDecodeI64v(pCoder, &pRSmaParam->watermark[i]) < 0) return -1;
|
||||
if (tDecodeI32v(pCoder, &pRSmaParam->qmsgLen[i]) < 0) return -1;
|
||||
if (pRSmaParam->qmsgLen[i] > 0) {
|
||||
tDecoderMalloc(pCoder, pRSmaParam->qmsgLen[i]);
|
||||
if (tDecodeBinary(pCoder, (uint8_t **)&pRSmaParam->qmsg[i], NULL) < 0) return -1; // qmsgLen contains len of '\0'
|
||||
} else {
|
||||
pRSmaParam->qmsg[i] = NULL;
|
||||
|
|
|
@ -92,7 +92,7 @@ static FORCE_INLINE void tdRSmaQTaskInfoIterDestroy(SRSmaQTaskInfoIter *pIter) {
|
|||
|
||||
void tdFreeQTaskInfo(qTaskInfo_t *taskHandle, int32_t vgId, int32_t level) {
|
||||
// Note: free/kill may in RC
|
||||
if (!taskHandle) return;
|
||||
if (!taskHandle || !(*taskHandle)) return;
|
||||
qTaskInfo_t otaskHandle = atomic_load_ptr(taskHandle);
|
||||
if (otaskHandle && atomic_val_compare_exchange_ptr(taskHandle, otaskHandle, NULL)) {
|
||||
smaDebug("vgId:%d, free qTaskInfo_t %p of level %d", vgId, otaskHandle, level);
|
||||
|
@ -1336,6 +1336,7 @@ static void tdRSmaFetchTrigger(void *param, void *tmrId) {
|
|||
SSDataBlock dataBlock = {.info.type = STREAM_GET_ALL};
|
||||
qSetMultiStreamInput(pItem->taskInfo, &dataBlock, 1, STREAM_INPUT__DATA_BLOCK);
|
||||
tdRSmaFetchAndSubmitResult(pItem, pRSmaInfo->pTSchema, pRSmaInfo->suid, pStat, STREAM_INPUT__DATA_BLOCK);
|
||||
tdCleanupStreamInputDataBlock(pItem->taskInfo);
|
||||
|
||||
tdUnRefRSmaInfo(pSma, pRSmaInfo);
|
||||
// atomic_store_8(&pItem->triggerStat, TASK_TRIGGER_STAT_ACTIVE);
|
||||
|
|
|
@ -216,9 +216,11 @@ int32_t tqProcessOffsetCommitReq(STQ* pTq, char* msg, int32_t msgLen) {
|
|||
|
||||
if (offset.val.type == TMQ_OFFSET__LOG) {
|
||||
STqHandle* pHandle = taosHashGet(pTq->handles, offset.subKey, strlen(offset.subKey));
|
||||
if (walRefVer(pHandle->pRef, offset.val.version) < 0) {
|
||||
ASSERT(0);
|
||||
return -1;
|
||||
if (pHandle) {
|
||||
if (walRefVer(pHandle->pRef, offset.val.version) < 0) {
|
||||
ASSERT(0);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -515,7 +517,10 @@ int32_t tqProcessVgChangeReq(STQ* pTq, char* msg, int32_t msgLen) {
|
|||
// todo lock
|
||||
STqHandle* pHandle = taosHashGet(pTq->handles, req.subKey, strlen(req.subKey));
|
||||
if (pHandle == NULL) {
|
||||
ASSERT(req.oldConsumerId == -1);
|
||||
if (req.oldConsumerId != -1) {
|
||||
tqError("vgId:%d, build new consumer handle %s for consumer %d, but old consumerId is %ld", req.vgId, req.subKey,
|
||||
req.newConsumerId, req.oldConsumerId);
|
||||
}
|
||||
ASSERT(req.newConsumerId != -1);
|
||||
STqHandle tqHandle = {0};
|
||||
pHandle = &tqHandle;
|
||||
|
@ -604,7 +609,7 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask) {
|
|||
ASSERT(taosArrayGetSize(pTask->childEpInfo) != 0);
|
||||
}
|
||||
|
||||
pTask->execStatus = TASK_EXEC_STATUS__IDLE;
|
||||
pTask->schedStatus = TASK_SCHED_STATUS__INACTIVE;
|
||||
|
||||
pTask->inputQueue = streamQueueOpen();
|
||||
pTask->outputQueue = streamQueueOpen();
|
||||
|
@ -720,7 +725,7 @@ int32_t tqProcessStreamTrigger(STQ* pTq, SSubmitReq* pReq, int64_t ver) {
|
|||
continue;
|
||||
}
|
||||
|
||||
if (streamLaunchByWrite(pTask, TD_VID(pTq->pVnode)) < 0) {
|
||||
if (streamSchedExec(pTask) < 0) {
|
||||
qError("stream task launch failed, task id %d", pTask->taskId);
|
||||
continue;
|
||||
}
|
||||
|
@ -751,12 +756,13 @@ int32_t tqProcessTaskRunReq(STQ* pTq, SRpcMsg* pMsg) {
|
|||
}
|
||||
|
||||
int32_t tqProcessTaskDispatchReq(STQ* pTq, SRpcMsg* pMsg, bool exec) {
|
||||
ASSERT(0);
|
||||
char* msgStr = pMsg->pCont;
|
||||
char* msgBody = POINTER_SHIFT(msgStr, sizeof(SMsgHead));
|
||||
int32_t msgLen = pMsg->contLen - sizeof(SMsgHead);
|
||||
SStreamDispatchReq req;
|
||||
SDecoder decoder;
|
||||
tDecoderInit(&decoder, msgBody, msgLen);
|
||||
tDecoderInit(&decoder, (uint8_t*)msgBody, msgLen);
|
||||
tDecodeStreamDispatchReq(&decoder, &req);
|
||||
int32_t taskId = req.taskId;
|
||||
SStreamTask** ppTask = (SStreamTask**)taosHashGet(pTq->pStreamTasks, &taskId, sizeof(int32_t));
|
||||
|
|
|
@ -136,31 +136,6 @@ int32_t tqSendExecReq(STQ* pTq, STqHandle* pHandle) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t tqEnqueueAll(STQ* pTq, SSubmitReq* pReq) {
|
||||
void* pIter = NULL;
|
||||
SStreamDataSubmit* pSubmit = streamDataSubmitNew(pReq);
|
||||
if (pSubmit == NULL) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
while (1) {
|
||||
pIter = taosHashIterate(pTq->handles, pIter);
|
||||
if (pIter == NULL) break;
|
||||
STqHandle* pHandle = (STqHandle*)pIter;
|
||||
if (tqEnqueue(pHandle, pSubmit) < 0) {
|
||||
continue;
|
||||
}
|
||||
int8_t execStatus = atomic_load_8(&pHandle->pushHandle.execStatus);
|
||||
if (execStatus == TASK_EXEC_STATUS__IDLE || execStatus == TASK_EXEC_STATUS__CLOSING) {
|
||||
tqSendExecReq(pTq, pHandle);
|
||||
}
|
||||
}
|
||||
|
||||
streamDataSubmitRefDec(pSubmit);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t tqPushMsgNew(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_t ver, SRpcHandleInfo handleInfo) {
|
||||
if (msgType != TDMT_VND_SUBMIT) return 0;
|
||||
void* pIter = NULL;
|
||||
|
|
|
@ -437,14 +437,10 @@ static int32_t vnodeProcessCreateStbReq(SVnode *pVnode, int64_t version, void *p
|
|||
goto _err;
|
||||
}
|
||||
|
||||
// taosMemoryFree(req.schemaRow.pSchema);
|
||||
// taosMemoryFree(req.schemaTag.pSchema);
|
||||
tDecoderClear(&coder);
|
||||
return 0;
|
||||
|
||||
_err:
|
||||
taosMemoryFree(req.schemaRow.pSchema);
|
||||
taosMemoryFree(req.schemaTag.pSchema);
|
||||
tDecoderClear(&coder);
|
||||
return -1;
|
||||
}
|
||||
|
|
|
@ -448,7 +448,8 @@ int32_t vnodeProcessSyncMsg(SVnode *pVnode, SRpcMsg *pMsg, SRpcMsg **pRsp) {
|
|||
}
|
||||
}
|
||||
|
||||
vTrace("vgId:%d, sync msg:%p is processed, type:%s code:0x%x", pVnode->config.vgId, pMsg, TMSG_INFO(pMsg->msgType), code);
|
||||
vTrace("vgId:%d, sync msg:%p is processed, type:%s code:0x%x", pVnode->config.vgId, pMsg, TMSG_INFO(pMsg->msgType),
|
||||
code);
|
||||
syncNodeRelease(pSyncNode);
|
||||
if (code != 0 && terrno == 0) {
|
||||
terrno = TSDB_CODE_SYN_INTERNAL_ERROR;
|
||||
|
@ -629,8 +630,8 @@ static int32_t vnodeSnapshotStartWrite(struct SSyncFSM *pFsm, void *pParam, void
|
|||
static int32_t vnodeSnapshotStopWrite(struct SSyncFSM *pFsm, void *pWriter, bool isApply, SSnapshot *pSnapshot) {
|
||||
#ifdef USE_TSDB_SNAPSHOT
|
||||
SVnode *pVnode = pFsm->data;
|
||||
vInfo("vgId:%d, stop write vnode snapshot, apply:%d, index:%" PRId64 " term:%" PRIu64 " config:%" PRId64, pVnode->config.vgId, isApply,
|
||||
pSnapshot->lastApplyIndex, pSnapshot->lastApplyTerm, pSnapshot->lastConfigIndex);
|
||||
vInfo("vgId:%d, stop write vnode snapshot, apply:%d, index:%" PRId64 " term:%" PRIu64 " config:%" PRId64,
|
||||
pVnode->config.vgId, isApply, pSnapshot->lastApplyIndex, pSnapshot->lastApplyTerm, pSnapshot->lastConfigIndex);
|
||||
|
||||
int32_t code = vnodeSnapWriterClose(pWriter, !isApply, pSnapshot);
|
||||
vInfo("vgId:%d, apply vnode snapshot finished, code:0x%x", pVnode->config.vgId, code);
|
||||
|
@ -707,7 +708,7 @@ int32_t vnodeSyncOpen(SVnode *pVnode, char *path) {
|
|||
}
|
||||
|
||||
setPingTimerMS(pVnode->sync, 5000);
|
||||
setElectTimerMS(pVnode->sync, 1300);
|
||||
setElectTimerMS(pVnode->sync, 2800);
|
||||
setHeartbeatTimerMS(pVnode->sync, 900);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -88,9 +88,11 @@ bool isResultRowClosed(SResultRow* pResultRow);
|
|||
|
||||
struct SResultRowEntryInfo* getResultEntryInfo(const SResultRow* pRow, int32_t index, const int32_t* offset);
|
||||
|
||||
static FORCE_INLINE SResultRow* getResultRowByPos(SDiskbasedBuf* pBuf, SResultRowPosition* pos) {
|
||||
static FORCE_INLINE SResultRow* getResultRowByPos(SDiskbasedBuf* pBuf, SResultRowPosition* pos, bool forUpdate) {
|
||||
SFilePage* bufPage = (SFilePage*)getBufPage(pBuf, pos->pageId);
|
||||
setBufPageDirty(bufPage, true);
|
||||
if (forUpdate) {
|
||||
setBufPageDirty(bufPage, true);
|
||||
}
|
||||
SResultRow* pRow = (SResultRow*)((char*)bufPage + pos->offset);
|
||||
return pRow;
|
||||
}
|
||||
|
|
|
@ -632,7 +632,7 @@ SExprInfo* createExprInfo(SNodeList* pNodeList, SNodeList* pGroupKeys, int32_t*
|
|||
tListLen(pExp->pExpr->_function.functionName));
|
||||
#if 1
|
||||
// todo refactor: add the parameter for tbname function
|
||||
if (strcmp(pExp->pExpr->_function.functionName, "tbname") == 0) {
|
||||
if (!pFuncNode->pParameterList && (strcmp(pExp->pExpr->_function.functionName, "tbname") == 0)) {
|
||||
pFuncNode->pParameterList = nodesMakeList();
|
||||
ASSERT(LIST_LENGTH(pFuncNode->pParameterList) == 0);
|
||||
SValueNode* res = (SValueNode*)nodesMakeNode(QUERY_NODE_VALUE);
|
||||
|
@ -953,7 +953,7 @@ STimeWindow getActiveTimeWindow(SDiskbasedBuf* pBuf, SResultRowInfo* pResultRowI
|
|||
return w;
|
||||
}
|
||||
|
||||
w = getResultRowByPos(pBuf, &pResultRowInfo->cur)->win;
|
||||
w = getResultRowByPos(pBuf, &pResultRowInfo->cur, false)->win;
|
||||
|
||||
// in case of typical time window, we can calculate time window directly.
|
||||
if (w.skey > ts || w.ekey < ts) {
|
||||
|
|
|
@ -83,6 +83,7 @@ static int32_t doSetStreamBlock(SOperatorInfo* pOperator, void* input, size_t nu
|
|||
taosArrayClear(p->pDataBlock);
|
||||
taosArrayAddAll(p->pDataBlock, pDataBlock->pDataBlock);
|
||||
taosArrayPush(pInfo->pBlockLists, &p);
|
||||
|
||||
}
|
||||
pInfo->blockType = STREAM_INPUT__DATA_BLOCK;
|
||||
} else {
|
||||
|
@ -93,6 +94,29 @@ static int32_t doSetStreamBlock(SOperatorInfo* pOperator, void* input, size_t nu
|
|||
}
|
||||
}
|
||||
|
||||
void tdCleanupStreamInputDataBlock(qTaskInfo_t tinfo) {
|
||||
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
|
||||
if (!pTaskInfo || !pTaskInfo->pRoot || pTaskInfo->pRoot->numOfDownstream <= 0) {
|
||||
return;
|
||||
}
|
||||
SOperatorInfo* pOptrInfo = pTaskInfo->pRoot->pDownstream[0];
|
||||
|
||||
if (pOptrInfo->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
|
||||
SStreamScanInfo* pInfo = pOptrInfo->info;
|
||||
if (pInfo->blockType == STREAM_INPUT__DATA_BLOCK) {
|
||||
for (int32_t i = 0; i < taosArrayGetSize(pInfo->pBlockLists); ++i) {
|
||||
SSDataBlock* p = *(SSDataBlock**)taosArrayGet(pInfo->pBlockLists, i);
|
||||
taosArrayDestroy(p->pDataBlock);
|
||||
taosMemoryFreeClear(p);
|
||||
}
|
||||
} else {
|
||||
ASSERT(0);
|
||||
}
|
||||
} else {
|
||||
ASSERT(0);
|
||||
}
|
||||
}
|
||||
|
||||
int32_t qSetMultiStreamInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numOfBlocks, int32_t type) {
|
||||
if (tinfo == NULL) {
|
||||
return TSDB_CODE_QRY_APP_ERROR;
|
||||
|
@ -104,7 +128,7 @@ int32_t qSetMultiStreamInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numO
|
|||
|
||||
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
|
||||
|
||||
int32_t code = doSetStreamBlock(pTaskInfo->pRoot, (void**)pBlocks, numOfBlocks, type, GET_TASKID(pTaskInfo));
|
||||
int32_t code = doSetStreamBlock(pTaskInfo->pRoot, (void*)pBlocks, numOfBlocks, type, GET_TASKID(pTaskInfo));
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
qError("%s failed to set the stream block data", GET_TASKID(pTaskInfo));
|
||||
} else {
|
||||
|
|
|
@ -258,7 +258,7 @@ SResultRow* doSetResultOutBufByKey(SDiskbasedBuf* pResultBuf, SResultRowInfo* pR
|
|||
// in case of repeat scan/reverse scan, no new time window added.
|
||||
if (isIntervalQuery) {
|
||||
if (masterscan && p1 != NULL) { // the *p1 may be NULL in case of sliding+offset exists.
|
||||
pResult = getResultRowByPos(pResultBuf, p1);
|
||||
pResult = getResultRowByPos(pResultBuf, p1, true);
|
||||
ASSERT(pResult->pageId == p1->pageId && pResult->offset == p1->offset);
|
||||
}
|
||||
} else {
|
||||
|
@ -266,7 +266,7 @@ SResultRow* doSetResultOutBufByKey(SDiskbasedBuf* pResultBuf, SResultRowInfo* pR
|
|||
// pResultRowInfo object.
|
||||
if (p1 != NULL) {
|
||||
// todo
|
||||
pResult = getResultRowByPos(pResultBuf, p1);
|
||||
pResult = getResultRowByPos(pResultBuf, p1, true);
|
||||
ASSERT(pResult->pageId == p1->pageId && pResult->offset == p1->offset);
|
||||
}
|
||||
}
|
||||
|
@ -3330,18 +3330,16 @@ static SSDataBlock* doFill(SOperatorInfo* pOperator) {
|
|||
}
|
||||
|
||||
void destroyExprInfo(SExprInfo* pExpr, int32_t numOfExprs) {
|
||||
if (pExpr) {
|
||||
for (int32_t i = 0; i < numOfExprs; ++i) {
|
||||
SExprInfo* pExprInfo = &pExpr[i];
|
||||
for (int32_t j = 0; j < pExprInfo->base.numOfParams; ++j) {
|
||||
if (pExprInfo->base.pParam[j].type == FUNC_PARAM_TYPE_COLUMN) {
|
||||
taosMemoryFreeClear(pExprInfo->base.pParam[j].pCol);
|
||||
}
|
||||
for (int32_t i = 0; i < numOfExprs; ++i) {
|
||||
SExprInfo* pExprInfo = &pExpr[i];
|
||||
for (int32_t j = 0; j < pExprInfo->base.numOfParams; ++j) {
|
||||
if (pExprInfo->base.pParam[j].type == FUNC_PARAM_TYPE_COLUMN) {
|
||||
taosMemoryFreeClear(pExprInfo->base.pParam[j].pCol);
|
||||
}
|
||||
|
||||
taosMemoryFree(pExprInfo->base.pParam);
|
||||
taosMemoryFree(pExprInfo->pExpr);
|
||||
}
|
||||
|
||||
taosMemoryFree(pExprInfo->base.pParam);
|
||||
taosMemoryFree(pExprInfo->pExpr);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3483,9 +3481,8 @@ void cleanupExprSupp(SExprSupp* pSupp) {
|
|||
destroySqlFunctionCtx(pSupp->pCtx, pSupp->numOfExprs);
|
||||
if (pSupp->pExprInfo != NULL) {
|
||||
destroyExprInfo(pSupp->pExprInfo, pSupp->numOfExprs);
|
||||
taosMemoryFreeClear(pSupp->pExprInfo);
|
||||
}
|
||||
|
||||
taosMemoryFreeClear(pSupp->pExprInfo);
|
||||
taosMemoryFree(pSupp->rowEntryInfoOffset);
|
||||
}
|
||||
|
||||
|
@ -4076,6 +4073,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
|
|||
SPhysiNode* pChildNode = (SPhysiNode*)nodesListGetNode(pPhyNode->pChildren, i);
|
||||
ops[i] = createOperatorTree(pChildNode, pTaskInfo, pHandle, pTableListInfo, pTagCond, pTagIndexCond, pUser);
|
||||
if (ops[i] == NULL) {
|
||||
taosMemoryFree(ops);
|
||||
return NULL;
|
||||
} else {
|
||||
ops[i]->resultDataBlockId = pChildNode->pOutputDataBlockDesc->dataBlockId;
|
||||
|
@ -4517,7 +4515,7 @@ int32_t createExecTaskInfoImpl(SSubplan* pPlan, SExecTaskInfo** pTaskInfo, SRead
|
|||
return code;
|
||||
|
||||
_complete:
|
||||
taosMemoryFreeClear(*pTaskInfo);
|
||||
doDestroyTask(*pTaskInfo);
|
||||
terrno = code;
|
||||
return code;
|
||||
}
|
||||
|
|
|
@ -1493,6 +1493,11 @@ static void destroyStreamScanOperatorInfo(void* param, int32_t numOfOutput) {
|
|||
if (pStreamScan->pColMatchInfo) {
|
||||
taosArrayDestroy(pStreamScan->pColMatchInfo);
|
||||
}
|
||||
if (pStreamScan->pPseudoExpr) {
|
||||
destroyExprInfo(pStreamScan->pPseudoExpr, pStreamScan->numOfPseudoExpr);
|
||||
taosMemoryFreeClear(pStreamScan->pPseudoExpr);
|
||||
}
|
||||
|
||||
updateInfoDestroy(pStreamScan->pUpdateInfo);
|
||||
blockDataDestroy(pStreamScan->pRes);
|
||||
blockDataDestroy(pStreamScan->pUpdateRes);
|
||||
|
|
|
@ -611,7 +611,7 @@ static void doInterpUnclosedTimeWindow(SOperatorInfo* pOperatorInfo, int32_t num
|
|||
break;
|
||||
}
|
||||
|
||||
SResultRow* pr = getResultRowByPos(pInfo->aggSup.pResultBuf, p1);
|
||||
SResultRow* pr = getResultRowByPos(pInfo->aggSup.pResultBuf, p1, false);
|
||||
ASSERT(pr->offset == p1->offset && pr->pageId == p1->pageId);
|
||||
|
||||
if (pr->closed) {
|
||||
|
@ -1345,7 +1345,7 @@ static void setInverFunction(SqlFunctionCtx* pCtx, int32_t num, EStreamType type
|
|||
}
|
||||
|
||||
void doClearWindowImpl(SResultRowPosition* p1, SDiskbasedBuf* pResultBuf, SExprSupp* pSup, int32_t numOfOutput) {
|
||||
SResultRow* pResult = getResultRowByPos(pResultBuf, p1);
|
||||
SResultRow* pResult = getResultRowByPos(pResultBuf, p1, false);
|
||||
SqlFunctionCtx* pCtx = pSup->pCtx;
|
||||
for (int32_t i = 0; i < numOfOutput; ++i) {
|
||||
pCtx[i].resultInfo = getResultEntryInfo(pResult, i, pSup->rowEntryInfoOffset);
|
||||
|
@ -3481,7 +3481,7 @@ static int32_t setWindowOutputBuf(SResultWindowInfo* pWinInfo, SResultRow** pRes
|
|||
pWinInfo->pos.pageId = (*pResult)->pageId;
|
||||
pWinInfo->pos.offset = (*pResult)->offset;
|
||||
} else {
|
||||
*pResult = getResultRowByPos(pAggSup->pResultBuf, &pWinInfo->pos);
|
||||
*pResult = getResultRowByPos(pAggSup->pResultBuf, &pWinInfo->pos, true);
|
||||
if (!(*pResult)) {
|
||||
qError("getResultRowByPos return NULL, TID:%s", GET_TASKID(pTaskInfo));
|
||||
return TSDB_CODE_FAILED;
|
||||
|
|
|
@ -1094,6 +1094,7 @@ static SColumnInfoData* doVectorConvert(SScalarParam* pInput, int32_t* doConvert
|
|||
static void doReleaseVec(SColumnInfoData* pCol, int32_t type) {
|
||||
if (type == VECTOR_DO_CONVERT) {
|
||||
colDataDestroy(pCol);
|
||||
taosMemoryFree(pCol);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -47,7 +47,7 @@ void streamCleanUp() {
|
|||
}
|
||||
}
|
||||
|
||||
void streamTriggerByTimer(void* param, void* tmrId) {
|
||||
void streamSchedByTimer(void* param, void* tmrId) {
|
||||
SStreamTask* pTask = (void*)param;
|
||||
|
||||
if (atomic_load_8(&pTask->taskStatus) == TASK_STATUS__DROPPING) {
|
||||
|
@ -68,28 +68,30 @@ void streamTriggerByTimer(void* param, void* tmrId) {
|
|||
atomic_store_8(&pTask->triggerStatus, TASK_TRIGGER_STATUS__IN_ACTIVE);
|
||||
|
||||
streamTaskInput(pTask, (SStreamQueueItem*)trigger);
|
||||
streamLaunchByWrite(pTask, pTask->nodeId);
|
||||
streamSchedExec(pTask);
|
||||
}
|
||||
|
||||
taosTmrReset(streamTriggerByTimer, (int32_t)pTask->triggerParam, pTask, streamEnv.timer, &pTask->timer);
|
||||
taosTmrReset(streamSchedByTimer, (int32_t)pTask->triggerParam, pTask, streamEnv.timer, &pTask->timer);
|
||||
}
|
||||
|
||||
int32_t streamSetupTrigger(SStreamTask* pTask) {
|
||||
if (pTask->triggerParam != 0) {
|
||||
pTask->timer = taosTmrStart(streamTriggerByTimer, (int32_t)pTask->triggerParam, pTask, streamEnv.timer);
|
||||
pTask->timer = taosTmrStart(streamSchedByTimer, (int32_t)pTask->triggerParam, pTask, streamEnv.timer);
|
||||
pTask->triggerStatus = TASK_TRIGGER_STATUS__IN_ACTIVE;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t streamLaunchByWrite(SStreamTask* pTask, int32_t vgId) {
|
||||
int8_t execStatus = atomic_load_8(&pTask->execStatus);
|
||||
if (execStatus == TASK_EXEC_STATUS__IDLE || execStatus == TASK_EXEC_STATUS__CLOSING) {
|
||||
int32_t streamSchedExec(SStreamTask* pTask) {
|
||||
int8_t schedStatus =
|
||||
atomic_val_compare_exchange_8(&pTask->schedStatus, TASK_SCHED_STATUS__INACTIVE, TASK_SCHED_STATUS__WAITING);
|
||||
if (schedStatus == TASK_SCHED_STATUS__INACTIVE) {
|
||||
SStreamTaskRunReq* pRunReq = rpcMallocCont(sizeof(SStreamTaskRunReq));
|
||||
if (pRunReq == NULL) return -1;
|
||||
|
||||
// TODO: do we need htonl?
|
||||
pRunReq->head.vgId = vgId;
|
||||
if (pRunReq == NULL) {
|
||||
atomic_store_8(&pTask->schedStatus, TASK_SCHED_STATUS__INACTIVE);
|
||||
return -1;
|
||||
}
|
||||
pRunReq->head.vgId = pTask->nodeId;
|
||||
pRunReq->streamId = pTask->streamId;
|
||||
pRunReq->taskId = pTask->taskId;
|
||||
SRpcMsg msg = {
|
||||
|
@ -182,14 +184,13 @@ int32_t streamProcessDispatchReq(SStreamTask* pTask, SStreamDispatchReq* pReq, S
|
|||
streamTaskEnqueue(pTask, pReq, pRsp);
|
||||
|
||||
if (exec) {
|
||||
streamExec(pTask);
|
||||
streamTryExec(pTask);
|
||||
|
||||
if (pTask->dispatchType != TASK_DISPATCH__NONE) {
|
||||
ASSERT(pTask->sinkType == TASK_SINK__NONE);
|
||||
streamDispatch(pTask);
|
||||
}
|
||||
} else {
|
||||
streamLaunchByWrite(pTask, pTask->nodeId);
|
||||
streamSchedExec(pTask);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -219,7 +220,7 @@ int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp) {
|
|||
}
|
||||
|
||||
int32_t streamProcessRunReq(SStreamTask* pTask) {
|
||||
streamExec(pTask);
|
||||
streamTryExec(pTask);
|
||||
|
||||
if (pTask->dispatchType != TASK_DISPATCH__NONE) {
|
||||
streamDispatch(pTask);
|
||||
|
@ -272,10 +273,12 @@ int32_t streamProcessRetrieveReq(SStreamTask* pTask, SStreamRetrieveReq* pReq, S
|
|||
streamTaskEnqueueRetrieve(pTask, pReq, pRsp);
|
||||
|
||||
ASSERT(pTask->execType != TASK_EXEC__NONE);
|
||||
streamExec(pTask);
|
||||
streamSchedExec(pTask);
|
||||
|
||||
ASSERT(pTask->dispatchType != TASK_DISPATCH__NONE);
|
||||
streamDispatch(pTask);
|
||||
/*streamTryExec(pTask);*/
|
||||
|
||||
/*ASSERT(pTask->dispatchType != TASK_DISPATCH__NONE);*/
|
||||
/*streamDispatch(pTask);*/
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -440,13 +440,13 @@ FAIL:
|
|||
|
||||
int32_t streamDispatch(SStreamTask* pTask) {
|
||||
ASSERT(pTask->dispatchType != TASK_DISPATCH__NONE);
|
||||
#if 1
|
||||
ASSERT(pTask->sinkType == TASK_SINK__NONE);
|
||||
|
||||
int8_t old =
|
||||
atomic_val_compare_exchange_8(&pTask->outputStatus, TASK_OUTPUT_STATUS__NORMAL, TASK_OUTPUT_STATUS__WAIT);
|
||||
if (old != TASK_OUTPUT_STATUS__NORMAL) {
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
SStreamDataBlock* pBlock = streamQueueNextItem(pTask->outputQueue);
|
||||
if (pBlock == NULL) {
|
||||
|
@ -466,22 +466,8 @@ int32_t streamDispatch(SStreamTask* pTask) {
|
|||
atomic_store_8(&pTask->outputStatus, TASK_OUTPUT_STATUS__NORMAL);
|
||||
goto FREE;
|
||||
}
|
||||
/*atomic_store_8(&pTask->outputStatus, TASK_OUTPUT_STATUS__NORMAL);*/
|
||||
FREE:
|
||||
taosArrayDestroyEx(pBlock->blocks, (FDelete)blockDataFreeRes);
|
||||
taosFreeQitem(pBlock);
|
||||
#if 0
|
||||
SRpcMsg dispatchMsg = {0};
|
||||
SEpSet* pEpSet = NULL;
|
||||
if (streamBuildDispatchMsg(pTask, pBlock, &dispatchMsg, &pEpSet) < 0) {
|
||||
ASSERT(0);
|
||||
atomic_store_8(&pTask->outputStatus, TASK_OUTPUT_STATUS__NORMAL);
|
||||
return -1;
|
||||
}
|
||||
taosArrayDestroyEx(pBlock->blocks, (FDelete)blockDataFreeRes);
|
||||
taosFreeQitem(pBlock);
|
||||
|
||||
tmsgSendReq(pEpSet, &dispatchMsg);
|
||||
#endif
|
||||
return code;
|
||||
}
|
||||
|
|
|
@ -147,24 +147,23 @@ int32_t streamPipelineExec(SStreamTask* pTask, int32_t batchNum) {
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static SArray* streamExecForQall(SStreamTask* pTask, SArray* pRes) {
|
||||
// TODO: handle version
|
||||
int32_t streamExecForAll(SStreamTask* pTask) {
|
||||
while (1) {
|
||||
int32_t cnt = 1;
|
||||
void* data = NULL;
|
||||
while (1) {
|
||||
SStreamQueueItem* qItem = streamQueueNextItem(pTask->inputQueue);
|
||||
if (qItem == NULL) {
|
||||
qDebug("stream exec over, queue empty");
|
||||
qDebug("stream task exec over, queue empty, task: %d", pTask->taskId);
|
||||
break;
|
||||
}
|
||||
if (data == NULL) {
|
||||
data = qItem;
|
||||
streamQueueProcessSuccess(pTask->inputQueue);
|
||||
if (pTask->execType == TASK_EXEC__NONE) break;
|
||||
/*if (qItem->type == STREAM_INPUT__DATA_BLOCK) {*/
|
||||
/*streamUpdateVer(pTask, (SStreamDataBlock*)qItem);*/
|
||||
/*}*/
|
||||
if (pTask->execType == TASK_EXEC__NONE) {
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
void* newRet;
|
||||
if ((newRet = streamAppendQueueItem(data, qItem)) == NULL) {
|
||||
|
@ -181,11 +180,12 @@ static SArray* streamExecForQall(SStreamTask* pTask, SArray* pRes) {
|
|||
|
||||
if (pTask->taskStatus == TASK_STATUS__DROPPING) {
|
||||
if (data) streamFreeQitem(data);
|
||||
taosArrayDestroyEx(pRes, (FDelete)blockDataFreeRes);
|
||||
return NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (data == NULL) break;
|
||||
if (data == NULL) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (pTask->execType == TASK_EXEC__NONE) {
|
||||
ASSERT(((SStreamQueueItem*)data)->type == STREAM_INPUT__DATA_BLOCK);
|
||||
|
@ -193,6 +193,8 @@ static SArray* streamExecForQall(SStreamTask* pTask, SArray* pRes) {
|
|||
continue;
|
||||
}
|
||||
|
||||
SArray* pRes = taosArrayInit(0, sizeof(SSDataBlock));
|
||||
|
||||
qDebug("stream task %d exec begin, msg batch: %d", pTask->taskId, cnt);
|
||||
streamTaskExecImpl(pTask, data, pRes);
|
||||
qDebug("stream task %d exec end", pTask->taskId);
|
||||
|
@ -203,76 +205,44 @@ static SArray* streamExecForQall(SStreamTask* pTask, SArray* pRes) {
|
|||
// TODO log failed ver
|
||||
streamQueueProcessFail(pTask->inputQueue);
|
||||
taosArrayDestroy(pRes);
|
||||
return NULL;
|
||||
return -1;
|
||||
}
|
||||
qRes->type = STREAM_INPUT__DATA_BLOCK;
|
||||
qRes->blocks = pRes;
|
||||
if (streamTaskOutput(pTask, qRes) < 0) {
|
||||
// TODO log failed ver
|
||||
/*streamQueueProcessFail(pTask->inputQueue);*/
|
||||
taosArrayDestroyEx(pRes, (FDelete)blockDataFreeRes);
|
||||
taosFreeQitem(qRes);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (((SStreamQueueItem*)data)->type == STREAM_INPUT__DATA_SUBMIT) {
|
||||
SStreamDataSubmit* pSubmit = (SStreamDataSubmit*)data;
|
||||
qRes->childId = pTask->selfChildId;
|
||||
qRes->sourceVer = pSubmit->ver;
|
||||
}
|
||||
|
||||
if (streamTaskOutput(pTask, qRes) < 0) {
|
||||
// TODO save failed ver
|
||||
/*streamQueueProcessFail(pTask->inputQueue);*/
|
||||
taosArrayDestroyEx(pRes, (FDelete)blockDataFreeRes);
|
||||
taosFreeQitem(qRes);
|
||||
return -1;
|
||||
}
|
||||
/*streamQueueProcessSuccess(pTask->inputQueue);*/
|
||||
pRes = taosArrayInit(0, sizeof(SSDataBlock));
|
||||
}
|
||||
|
||||
streamFreeQitem(data);
|
||||
}
|
||||
return pRes;
|
||||
}
|
||||
|
||||
// TODO: handle version
|
||||
int32_t streamExec(SStreamTask* pTask) {
|
||||
SArray* pRes = taosArrayInit(0, sizeof(SSDataBlock));
|
||||
if (pRes == NULL) return -1;
|
||||
while (1) {
|
||||
int8_t execStatus =
|
||||
atomic_val_compare_exchange_8(&pTask->execStatus, TASK_EXEC_STATUS__IDLE, TASK_EXEC_STATUS__EXECUTING);
|
||||
if (execStatus == TASK_EXEC_STATUS__IDLE) {
|
||||
// first run
|
||||
qDebug("stream exec, enter exec status");
|
||||
pRes = streamExecForQall(pTask, pRes);
|
||||
if (pRes == NULL) goto FAIL;
|
||||
|
||||
// temporarily disable status closing, since it runs out of threads
|
||||
#if 0
|
||||
// set status closing
|
||||
atomic_store_8(&pTask->execStatus, TASK_EXEC_STATUS__CLOSING);
|
||||
|
||||
// second run, make sure inputQ and qall are cleared
|
||||
qDebug("stream exec, enter closing status");
|
||||
pRes = streamExecForQall(pTask, pRes);
|
||||
if (pRes == NULL) goto FAIL;
|
||||
#endif
|
||||
|
||||
taosArrayDestroyEx(pRes, (FDelete)blockDataFreeRes);
|
||||
atomic_store_8(&pTask->execStatus, TASK_EXEC_STATUS__IDLE);
|
||||
qDebug("stream exec, return result");
|
||||
return 0;
|
||||
} else if (execStatus == TASK_EXEC_STATUS__CLOSING) {
|
||||
continue;
|
||||
} else if (execStatus == TASK_EXEC_STATUS__EXECUTING) {
|
||||
ASSERT(taosArrayGetSize(pRes) == 0);
|
||||
taosArrayDestroyEx(pRes, (FDelete)blockDataFreeRes);
|
||||
return 0;
|
||||
} else {
|
||||
ASSERT(0);
|
||||
}
|
||||
}
|
||||
FAIL:
|
||||
if (pRes) taosArrayDestroy(pRes);
|
||||
if (pTask->taskStatus == TASK_STATUS__DROPPING) {
|
||||
tFreeSStreamTask(pTask);
|
||||
return 0;
|
||||
} else {
|
||||
atomic_store_8(&pTask->execStatus, TASK_EXEC_STATUS__IDLE);
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t streamTryExec(SStreamTask* pTask) {
|
||||
int8_t schedStatus =
|
||||
atomic_val_compare_exchange_8(&pTask->schedStatus, TASK_SCHED_STATUS__WAITING, TASK_SCHED_STATUS__ACTIVE);
|
||||
if (schedStatus == TASK_SCHED_STATUS__WAITING) {
|
||||
int32_t code = streamExecForAll(pTask);
|
||||
if (code < 0) {
|
||||
atomic_store_8(&pTask->schedStatus, TASK_SCHED_STATUS__FAILED);
|
||||
return -1;
|
||||
}
|
||||
atomic_store_8(&pTask->schedStatus, TASK_SCHED_STATUS__INACTIVE);
|
||||
|
||||
if (!taosQueueEmpty(pTask->inputQueue->queue)) {
|
||||
streamSchedExec(pTask);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -35,9 +35,10 @@ FAIL:
|
|||
void streamQueueClose(SStreamQueue* queue) {
|
||||
while (1) {
|
||||
void* qItem = streamQueueNextItem(queue);
|
||||
if (qItem)
|
||||
if (qItem) {
|
||||
taosFreeQitem(qItem);
|
||||
else
|
||||
} else {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,7 +23,7 @@ SStreamTask* tNewSStreamTask(int64_t streamId) {
|
|||
}
|
||||
pTask->taskId = tGenIdPI32();
|
||||
pTask->streamId = streamId;
|
||||
pTask->execStatus = TASK_EXEC_STATUS__IDLE;
|
||||
pTask->schedStatus = TASK_SCHED_STATUS__INACTIVE;
|
||||
pTask->inputStatus = TASK_INPUT_STATUS__NORMAL;
|
||||
pTask->outputStatus = TASK_OUTPUT_STATUS__NORMAL;
|
||||
|
||||
|
@ -59,7 +59,7 @@ int32_t tEncodeSStreamTask(SEncoder* pEncoder, const SStreamTask* pTask) {
|
|||
if (tEncodeI16(pEncoder, pTask->dispatchMsgType) < 0) return -1;
|
||||
|
||||
if (tEncodeI8(pEncoder, pTask->taskStatus) < 0) return -1;
|
||||
if (tEncodeI8(pEncoder, pTask->execStatus) < 0) return -1;
|
||||
if (tEncodeI8(pEncoder, pTask->schedStatus) < 0) return -1;
|
||||
|
||||
if (tEncodeI32(pEncoder, pTask->selfChildId) < 0) return -1;
|
||||
if (tEncodeI32(pEncoder, pTask->nodeId) < 0) return -1;
|
||||
|
@ -114,7 +114,7 @@ int32_t tDecodeSStreamTask(SDecoder* pDecoder, SStreamTask* pTask) {
|
|||
if (tDecodeI16(pDecoder, &pTask->dispatchMsgType) < 0) return -1;
|
||||
|
||||
if (tDecodeI8(pDecoder, &pTask->taskStatus) < 0) return -1;
|
||||
if (tDecodeI8(pDecoder, &pTask->execStatus) < 0) return -1;
|
||||
if (tDecodeI8(pDecoder, &pTask->schedStatus) < 0) return -1;
|
||||
|
||||
if (tDecodeI32(pDecoder, &pTask->selfChildId) < 0) return -1;
|
||||
if (tDecodeI32(pDecoder, &pTask->nodeId) < 0) return -1;
|
||||
|
|
|
@ -28,13 +28,13 @@ extern "C" {
|
|||
#include "trpc.h"
|
||||
#include "ttimer.h"
|
||||
|
||||
#define TIMER_MAX_MS 0x7FFFFFFF
|
||||
#define ENV_TICK_TIMER_MS 1000
|
||||
#define PING_TIMER_MS 5000
|
||||
#define ELECT_TIMER_MS_MIN 1300
|
||||
#define ELECT_TIMER_MS_MAX (ELECT_TIMER_MS_MIN * 2)
|
||||
#define TIMER_MAX_MS 0x7FFFFFFF
|
||||
#define ENV_TICK_TIMER_MS 1000
|
||||
#define PING_TIMER_MS 5000
|
||||
#define ELECT_TIMER_MS_MIN 5000
|
||||
#define ELECT_TIMER_MS_MAX (ELECT_TIMER_MS_MIN * 2)
|
||||
#define ELECT_TIMER_MS_RANGE (ELECT_TIMER_MS_MAX - ELECT_TIMER_MS_MIN)
|
||||
#define HEARTBEAT_TIMER_MS 900
|
||||
#define HEARTBEAT_TIMER_MS 900
|
||||
|
||||
#define EMPTY_RAFT_ID ((SRaftId){.addr = 0, .vgId = 0})
|
||||
|
||||
|
|
|
@ -730,7 +730,8 @@ int32_t syncNodeProposeBatch(SSyncNode* pSyncNode, SRpcMsg** pMsgPArr, bool* pIs
|
|||
for (int i = 0; i < arrSize; ++i) {
|
||||
do {
|
||||
char eventLog[128];
|
||||
snprintf(eventLog, sizeof(eventLog), "propose message, type:%s batch:%d", TMSG_INFO(pMsgPArr[i]->msgType), arrSize);
|
||||
snprintf(eventLog, sizeof(eventLog), "propose message, type:%s batch:%d", TMSG_INFO(pMsgPArr[i]->msgType),
|
||||
arrSize);
|
||||
syncNodeEventLog(pSyncNode, eventLog);
|
||||
} while (0);
|
||||
|
||||
|
@ -834,7 +835,8 @@ int32_t syncNodePropose(SSyncNode* pSyncNode, SRpcMsg* pMsg, bool isWeak) {
|
|||
rpcFreeCont(rpcMsg.pCont);
|
||||
syncRespMgrDel(pSyncNode->pSyncRespMgr, seqNum);
|
||||
ret = 1;
|
||||
sDebug("vgId:%d, sync optimize index:%" PRId64 ", type:%s", pSyncNode->vgId, retIndex, TMSG_INFO(pMsg->msgType));
|
||||
sDebug("vgId:%d, sync optimize index:%" PRId64 ", type:%s", pSyncNode->vgId, retIndex,
|
||||
TMSG_INFO(pMsg->msgType));
|
||||
} else {
|
||||
ret = -1;
|
||||
terrno = TSDB_CODE_SYN_INTERNAL_ERROR;
|
||||
|
@ -1114,7 +1116,7 @@ void syncNodeStart(SSyncNode* pSyncNode) {
|
|||
}
|
||||
|
||||
int32_t ret = 0;
|
||||
ret = syncNodeStartPingTimer(pSyncNode);
|
||||
// ret = syncNodeStartPingTimer(pSyncNode);
|
||||
ASSERT(ret == 0);
|
||||
}
|
||||
|
||||
|
@ -1250,6 +1252,13 @@ int32_t syncNodeStartElectTimer(SSyncNode* pSyncNode, int32_t ms) {
|
|||
taosTmrReset(pSyncNode->FpElectTimerCB, pSyncNode->electTimerMS, pSyncNode, gSyncEnv->pTimerManager,
|
||||
&pSyncNode->pElectTimer);
|
||||
atomic_store_64(&pSyncNode->electTimerLogicClock, pSyncNode->electTimerLogicClockUser);
|
||||
|
||||
do {
|
||||
char logBuf[128];
|
||||
snprintf(logBuf, sizeof(logBuf), "elect timer reset, ms:%d", ms);
|
||||
syncNodeEventLog(pSyncNode, logBuf);
|
||||
} while (0);
|
||||
|
||||
} else {
|
||||
sError("vgId:%d, start elect timer error, sync env is stop", pSyncNode->vgId);
|
||||
}
|
||||
|
@ -1281,6 +1290,14 @@ int32_t syncNodeResetElectTimer(SSyncNode* pSyncNode) {
|
|||
electMS = syncUtilElectRandomMS(pSyncNode->electBaseLine, 2 * pSyncNode->electBaseLine);
|
||||
}
|
||||
ret = syncNodeRestartElectTimer(pSyncNode, electMS);
|
||||
|
||||
do {
|
||||
char logBuf[128];
|
||||
snprintf(logBuf, sizeof(logBuf), "reset elect timer, min:%d, max:%d, ms:%d", pSyncNode->electBaseLine,
|
||||
2 * pSyncNode->electBaseLine, electMS);
|
||||
syncNodeEventLog(pSyncNode, logBuf);
|
||||
} while (0);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1293,6 +1310,13 @@ int32_t syncNodeStartHeartbeatTimer(SSyncNode* pSyncNode) {
|
|||
} else {
|
||||
sError("vgId:%d, start heartbeat timer error, sync env is stop", pSyncNode->vgId);
|
||||
}
|
||||
|
||||
do {
|
||||
char logBuf[128];
|
||||
snprintf(logBuf, sizeof(logBuf), "start heartbeat timer, ms:%d", pSyncNode->heartbeatTimerMS);
|
||||
syncNodeEventLog(pSyncNode, logBuf);
|
||||
} while (0);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1304,6 +1328,13 @@ int32_t syncNodeStartNowHeartbeatTimer(SSyncNode* pSyncNode) {
|
|||
} else {
|
||||
sError("vgId:%d, start heartbeat timer error, sync env is stop", pSyncNode->vgId);
|
||||
}
|
||||
|
||||
do {
|
||||
char logBuf[128];
|
||||
snprintf(logBuf, sizeof(logBuf), "start heartbeat timer, ms:%d", 1);
|
||||
syncNodeEventLog(pSyncNode, logBuf);
|
||||
} while (0);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1312,6 +1343,8 @@ int32_t syncNodeStopHeartbeatTimer(SSyncNode* pSyncNode) {
|
|||
atomic_add_fetch_64(&pSyncNode->heartbeatTimerLogicClockUser, 1);
|
||||
taosTmrStop(pSyncNode->pHeartbeatTimer);
|
||||
pSyncNode->pHeartbeatTimer = NULL;
|
||||
sTrace("vgId:%d, stop heartbeat timer", pSyncNode->vgId);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1559,12 +1592,13 @@ void syncNodeEventLog(const SSyncNode* pSyncNode, char* str) {
|
|||
", sby:%d, "
|
||||
"stgy:%d, bch:%d, "
|
||||
"r-num:%d, "
|
||||
"lcfg:%" PRId64 ", chging:%d, rsto:%d, %s",
|
||||
"lcfg:%" PRId64 ", chging:%d, rsto:%d, elt:%" PRId64 ", hb:%" PRId64 ", %s",
|
||||
pSyncNode->vgId, syncUtilState2String(pSyncNode->state), str, pSyncNode->pRaftStore->currentTerm,
|
||||
pSyncNode->commitIndex, logBeginIndex, logLastIndex, snapshot.lastApplyIndex, snapshot.lastApplyTerm,
|
||||
pSyncNode->pRaftCfg->isStandBy, pSyncNode->pRaftCfg->snapshotStrategy, pSyncNode->pRaftCfg->batchSize,
|
||||
pSyncNode->replicaNum, pSyncNode->pRaftCfg->lastConfigIndex, pSyncNode->changing,
|
||||
pSyncNode->restoreFinish, printStr);
|
||||
pSyncNode->restoreFinish, pSyncNode->electTimerLogicClockUser, pSyncNode->heartbeatTimerLogicClockUser,
|
||||
printStr);
|
||||
} else {
|
||||
snprintf(logBuf, sizeof(logBuf), "%s", str);
|
||||
}
|
||||
|
@ -1894,7 +1928,7 @@ void syncNodeDoConfigChange(SSyncNode* pSyncNode, SSyncCfg* pNewConfig, SyncInde
|
|||
|
||||
// Raft 3.6.2 Committing entries from previous terms
|
||||
syncNodeAppendNoop(pSyncNode);
|
||||
#if 0 // simon
|
||||
#if 0 // simon
|
||||
syncNodeReplicate(pSyncNode);
|
||||
#endif
|
||||
syncMaybeAdvanceCommitIndex(pSyncNode);
|
||||
|
|
|
@ -141,7 +141,8 @@ int32_t syncNodeAppendEntriesPeersSnapshot2(SSyncNode* pSyncNode) {
|
|||
", match-index:%d, raftid:%" PRId64,
|
||||
pSyncNode->vgId, nextIndex, newNextIndex, SYNC_INDEX_INVALID, pDestId->addr);
|
||||
|
||||
syncNodeRestartNowHeartbeatTimer(pSyncNode);
|
||||
// syncNodeRestartNowHeartbeatTimer(pSyncNode);
|
||||
syncNodeStartNowHeartbeatTimer(pSyncNode);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
|
|
@ -48,14 +48,16 @@ int32_t syncNodeOnTimeoutCb(SSyncNode* ths, SyncTimeout* pMsg) {
|
|||
} else if (pMsg->timeoutType == SYNC_TIMEOUT_ELECTION) {
|
||||
if (atomic_load_64(&ths->electTimerLogicClockUser) <= pMsg->logicClock) {
|
||||
++(ths->electTimerCounter);
|
||||
sInfo("vgId:%d, sync timeout, type:election count:%d", ths->vgId, ths->electTimerCounter);
|
||||
sInfo("vgId:%d, sync timeout, type:election count:%d, electTimerLogicClockUser:%ld", ths->vgId,
|
||||
ths->electTimerCounter, ths->electTimerLogicClockUser);
|
||||
syncNodeElect(ths);
|
||||
}
|
||||
|
||||
} else if (pMsg->timeoutType == SYNC_TIMEOUT_HEARTBEAT) {
|
||||
if (atomic_load_64(&ths->heartbeatTimerLogicClockUser) <= pMsg->logicClock) {
|
||||
++(ths->heartbeatTimerCounter);
|
||||
sInfo("vgId:%d, sync timeout, type:replicate count:%d", ths->vgId, ths->heartbeatTimerCounter);
|
||||
sInfo("vgId:%d, sync timeout, type:replicate count:%d, heartbeatTimerLogicClockUser:%ld", ths->vgId,
|
||||
ths->heartbeatTimerCounter, ths->heartbeatTimerLogicClockUser);
|
||||
syncNodeReplicate(ths);
|
||||
}
|
||||
} else {
|
||||
|
|
|
@ -125,7 +125,10 @@ int32_t syncUtilRand(int32_t max) { return taosRand() % max; }
|
|||
|
||||
int32_t syncUtilElectRandomMS(int32_t min, int32_t max) {
|
||||
ASSERT(min > 0 && max > 0 && max >= min);
|
||||
return min + syncUtilRand(max - min);
|
||||
int32_t rdm = min + syncUtilRand(max - min);
|
||||
|
||||
// sDebug("random min:%d, max:%d, rdm:%d", min, max, rdm);
|
||||
return rdm;
|
||||
}
|
||||
|
||||
int32_t syncUtilQuorum(int32_t replicaNum) { return replicaNum / 2 + 1; }
|
||||
|
|
|
@ -176,6 +176,8 @@ int tdbBtreeInsert(SBTree *pBt, const void *pKey, int kLen, const void *pVal, in
|
|||
|
||||
tdbBtcOpen(&btc, pBt, pTxn);
|
||||
|
||||
tdbTrace("tdb insert, btc: %p, pTxn: %p", &btc, pTxn);
|
||||
|
||||
// move to the position to insert
|
||||
ret = tdbBtcMoveTo(&btc, pKey, kLen, &c);
|
||||
if (ret < 0) {
|
||||
|
@ -214,6 +216,8 @@ int tdbBtreeDelete(SBTree *pBt, const void *pKey, int kLen, TXN *pTxn) {
|
|||
|
||||
tdbBtcOpen(&btc, pBt, pTxn);
|
||||
|
||||
tdbTrace("tdb delete, btc: %p, pTxn: %p", &btc, pTxn);
|
||||
|
||||
// move the cursor
|
||||
ret = tdbBtcMoveTo(&btc, pKey, kLen, &c);
|
||||
if (ret < 0) {
|
||||
|
@ -244,6 +248,8 @@ int tdbBtreeUpsert(SBTree *pBt, const void *pKey, int nKey, const void *pData, i
|
|||
|
||||
tdbBtcOpen(&btc, pBt, pTxn);
|
||||
|
||||
tdbTrace("tdb upsert, btc: %p, pTxn: %p", &btc, pTxn);
|
||||
|
||||
// move the cursor
|
||||
ret = tdbBtcMoveTo(&btc, pKey, nKey, &c);
|
||||
if (ret < 0) {
|
||||
|
@ -283,10 +289,12 @@ int tdbBtreePGet(SBTree *pBt, const void *pKey, int kLen, void **ppKey, int *pkL
|
|||
int ret;
|
||||
void *pTKey = NULL;
|
||||
void *pTVal = NULL;
|
||||
SCellDecoder cd;
|
||||
SCellDecoder cd = {0};
|
||||
|
||||
tdbBtcOpen(&btc, pBt, NULL);
|
||||
|
||||
tdbTrace("tdb pget, btc: %p", &btc);
|
||||
|
||||
ret = tdbBtcMoveTo(&btc, pKey, kLen, &cret);
|
||||
if (ret < 0) {
|
||||
tdbBtcClose(&btc);
|
||||
|
@ -295,6 +303,7 @@ int tdbBtreePGet(SBTree *pBt, const void *pKey, int kLen, void **ppKey, int *pkL
|
|||
|
||||
if (btc.idx < 0 || cret) {
|
||||
tdbBtcClose(&btc);
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -330,9 +339,13 @@ int tdbBtreePGet(SBTree *pBt, const void *pKey, int kLen, void **ppKey, int *pkL
|
|||
}
|
||||
|
||||
if (TDB_CELLDECODER_FREE_VAL(&cd)) {
|
||||
tdbDebug("tdb btc/pget/2 decoder: %p pVal free: %p", &cd, cd.pVal);
|
||||
|
||||
tdbFree(cd.pVal);
|
||||
}
|
||||
|
||||
tdbTrace("tdb pget end, btc decoder: %p/0x%x, local decoder:%p", &btc.coder, btc.coder.freeKV, &cd);
|
||||
|
||||
tdbBtcClose(&btc);
|
||||
|
||||
return 0;
|
||||
|
@ -722,7 +735,7 @@ static int tdbBtreeBalanceNonRoot(SBTree *pBt, SPage *pParent, int idx, TXN *pTx
|
|||
int szCell;
|
||||
SBtreeInitPageArg iarg;
|
||||
int iNew, nNewCells;
|
||||
SCellDecoder cd;
|
||||
SCellDecoder cd = {0};
|
||||
|
||||
iarg.pBt = pBt;
|
||||
iarg.flags = TDB_BTREE_PAGE_GET_FLAGS(pOlds[0]);
|
||||
|
@ -1235,6 +1248,8 @@ static int tdbBtreeDecodePayload(SPage *pPage, const SCell *pCell, int nHeader,
|
|||
}
|
||||
TDB_CELLDECODER_SET_FREE_VAL(pDecoder);
|
||||
|
||||
tdbDebug("tdb btc decoder: %p/0x%x pVal: %p ", pDecoder, pDecoder->freeKV, pDecoder->pVal);
|
||||
|
||||
memcpy(pDecoder->pVal, pCell + nHeader + kLen, nLocal - kLen - sizeof(SPgno));
|
||||
|
||||
nLeft -= nLocal - kLen - sizeof(SPgno);
|
||||
|
@ -1376,6 +1391,9 @@ static int tdbBtreeDecodeCell(SPage *pPage, const SCell *pCell, SCellDecoder *pD
|
|||
leaf = TDB_BTREE_PAGE_IS_LEAF(pPage);
|
||||
|
||||
// Clear the state of decoder
|
||||
if (TDB_CELLDECODER_FREE_VAL(pDecoder)) {
|
||||
tdbFree(pDecoder->pVal);
|
||||
}
|
||||
pDecoder->kLen = -1;
|
||||
pDecoder->pKey = NULL;
|
||||
pDecoder->vLen = -1;
|
||||
|
@ -1383,6 +1401,8 @@ static int tdbBtreeDecodeCell(SPage *pPage, const SCell *pCell, SCellDecoder *pD
|
|||
pDecoder->pgno = 0;
|
||||
TDB_CELLDECODER_SET_FREE_NIL(pDecoder);
|
||||
|
||||
tdbDebug("tdb btc decoder set nil: %p/0x%x ", pDecoder, pDecoder->freeKV);
|
||||
|
||||
// 1. Decode header part
|
||||
if (!leaf) {
|
||||
ASSERT(pPage->vLen == sizeof(SPgno));
|
||||
|
@ -1650,7 +1670,7 @@ int tdbBtcMoveToLast(SBTC *pBtc) {
|
|||
|
||||
int tdbBtreeNext(SBTC *pBtc, void **ppKey, int *kLen, void **ppVal, int *vLen) {
|
||||
SCell *pCell;
|
||||
SCellDecoder cd;
|
||||
SCellDecoder cd = {0};
|
||||
void *pKey, *pVal;
|
||||
int ret;
|
||||
|
||||
|
@ -1696,7 +1716,7 @@ int tdbBtreeNext(SBTC *pBtc, void **ppKey, int *kLen, void **ppVal, int *vLen) {
|
|||
|
||||
int tdbBtreePrev(SBTC *pBtc, void **ppKey, int *kLen, void **ppVal, int *vLen) {
|
||||
SCell *pCell;
|
||||
SCellDecoder cd;
|
||||
SCellDecoder cd = {0};
|
||||
void *pKey, *pVal;
|
||||
int ret;
|
||||
|
||||
|
@ -2037,7 +2057,7 @@ int tdbBtcMoveTo(SBTC *pBtc, const void *pKey, int kLen, int *pCRst) {
|
|||
const void *pTKey;
|
||||
int tkLen;
|
||||
|
||||
tdbTrace("ttl moveto, pager:%p, ipage:%d", pPager, pBtc->iPage);
|
||||
tdbTrace("tdb moveto, pager:%p, ipage:%d", pPager, pBtc->iPage);
|
||||
if (pBtc->iPage < 0) {
|
||||
// move from a clear cursor
|
||||
ret = tdbPagerFetchPage(pPager, &pBt->root, &(pBtc->pPage), tdbBtreeInitPage,
|
||||
|
@ -2093,6 +2113,7 @@ int tdbBtcMoveTo(SBTC *pBtc, const void *pKey, int kLen, int *pCRst) {
|
|||
}
|
||||
|
||||
// search downward to the leaf
|
||||
tdbTrace("tdb search downward, pager:%p, ipage:%d", pPager, pBtc->iPage);
|
||||
for (;;) {
|
||||
int lidx, ridx;
|
||||
SPage *pPage;
|
||||
|
@ -2127,6 +2148,7 @@ int tdbBtcMoveTo(SBTC *pBtc, const void *pKey, int kLen, int *pCRst) {
|
|||
}
|
||||
|
||||
// binary search
|
||||
tdbTrace("tdb binary search, pager:%p, ipage:%d", pPager, pBtc->iPage);
|
||||
for (;;) {
|
||||
if (lidx > ridx) break;
|
||||
|
||||
|
@ -2157,6 +2179,8 @@ int tdbBtcMoveTo(SBTC *pBtc, const void *pKey, int kLen, int *pCRst) {
|
|||
}
|
||||
}
|
||||
|
||||
tdbTrace("tdb moveto end, pager:%p, ipage:%d", pPager, pBtc->iPage);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2175,6 +2199,12 @@ int tdbBtcClose(SBTC *pBtc) {
|
|||
pBtc->idx = pBtc->idxStack[pBtc->iPage];
|
||||
}
|
||||
|
||||
if (TDB_CELLDECODER_FREE_VAL(&pBtc->coder)) {
|
||||
tdbDebug("tdb btc/close decoder: %p pVal free: %p", &pBtc->coder, pBtc->coder.pVal);
|
||||
|
||||
tdbFree(pBtc->coder.pVal);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -1339,8 +1339,9 @@ class Task():
|
|||
0x03A1, # STable [does] not exist
|
||||
0x03AA, # Tag already exists
|
||||
0x0603, # Table already exists
|
||||
0x2603, # Table does not exist
|
||||
0x2603, # Table does not exist, replaced by 2662 below
|
||||
0x260d, # Tags number not matched
|
||||
0x2662, # Table does not exist #TODO: what about 2603 above?
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -0,0 +1,61 @@
|
|||
#include <string.h>
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
#include "taosudf.h"
|
||||
|
||||
|
||||
DLL_EXPORT int32_t bit_and_init() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
DLL_EXPORT int32_t bit_and_destroy() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
DLL_EXPORT int32_t bit_and(SUdfDataBlock* block, SUdfColumn *resultCol) {
|
||||
|
||||
if (block->numOfCols < 2) {
|
||||
return TSDB_CODE_UDF_INVALID_INPUT;
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < block->numOfCols; ++i) {
|
||||
SUdfColumn* col = block->udfCols[i];
|
||||
if (!(col->colMeta.type == TSDB_DATA_TYPE_INT)) {
|
||||
return TSDB_CODE_UDF_INVALID_INPUT;
|
||||
}
|
||||
}
|
||||
|
||||
SUdfColumnMeta *meta = &resultCol->colMeta;
|
||||
meta->bytes = 4;
|
||||
meta->type = TSDB_DATA_TYPE_INT;
|
||||
meta->scale = 0;
|
||||
meta->precision = 0;
|
||||
|
||||
|
||||
SUdfColumnData *resultData = &resultCol->colData;
|
||||
|
||||
resultData->numOfRows = block->numOfRows;
|
||||
|
||||
for (int32_t i = 0; i < resultData->numOfRows; ++i) {
|
||||
if (udfColDataIsNull(block->udfCols[0], i)) {
|
||||
udfColDataSetNull(resultCol, i);
|
||||
continue;
|
||||
}
|
||||
int32_t result = *(int32_t*)udfColDataGetData(block->udfCols[0], i);
|
||||
int j = 1;
|
||||
for (; j < block->numOfCols; ++j) {
|
||||
if (udfColDataIsNull(block->udfCols[j], i)) {
|
||||
udfColDataSetNull(resultCol, i);
|
||||
break;
|
||||
}
|
||||
|
||||
char* colData = udfColDataGetData(block->udfCols[j], i);
|
||||
result &= *(int32_t*)colData;
|
||||
}
|
||||
if (j == block->numOfCols) {
|
||||
udfColDataSet(resultCol, i, (char*)&result, false);
|
||||
}
|
||||
|
||||
}
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
|
@ -0,0 +1,10 @@
|
|||
set +e
|
||||
|
||||
rm -rf /tmp/udf/libbitand.so /tmp/udf/libsqrsum.so
|
||||
mkdir -p /tmp/udf
|
||||
echo "compile udf bit_and and sqr_sum"
|
||||
gcc -fPIC -shared sh/bit_and.c -o /tmp/udf/libbitand.so
|
||||
gcc -fPIC -shared sh/sqr_sum.c -o /tmp/udf/libsqrsum.so
|
||||
echo "debug show /tmp/udf/*.so"
|
||||
ls /tmp/udf/*.so
|
||||
|
|
@ -0,0 +1,80 @@
|
|||
#include <string.h>
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
#include <math.h>
|
||||
|
||||
#include "taosudf.h"
|
||||
|
||||
DLL_EXPORT int32_t sqr_sum_init() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
DLL_EXPORT int32_t sqr_sum_destroy() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
DLL_EXPORT int32_t sqr_sum_start(SUdfInterBuf *buf) {
|
||||
*(int64_t*)(buf->buf) = 0;
|
||||
buf->bufLen = sizeof(double);
|
||||
buf->numOfResult = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
DLL_EXPORT int32_t sqr_sum(SUdfDataBlock* block, SUdfInterBuf *interBuf, SUdfInterBuf *newInterBuf) {
|
||||
double sumSquares = *(double*)interBuf->buf;
|
||||
int8_t numNotNull = 0;
|
||||
for (int32_t i = 0; i < block->numOfCols; ++i) {
|
||||
SUdfColumn* col = block->udfCols[i];
|
||||
if (!(col->colMeta.type == TSDB_DATA_TYPE_INT ||
|
||||
col->colMeta.type == TSDB_DATA_TYPE_DOUBLE)) {
|
||||
return TSDB_CODE_UDF_INVALID_INPUT;
|
||||
}
|
||||
}
|
||||
for (int32_t i = 0; i < block->numOfCols; ++i) {
|
||||
for (int32_t j = 0; j < block->numOfRows; ++j) {
|
||||
SUdfColumn* col = block->udfCols[i];
|
||||
if (udfColDataIsNull(col, j)) {
|
||||
continue;
|
||||
}
|
||||
switch (col->colMeta.type) {
|
||||
case TSDB_DATA_TYPE_INT: {
|
||||
char* cell = udfColDataGetData(col, j);
|
||||
int32_t num = *(int32_t*)cell;
|
||||
sumSquares += (double)num * num;
|
||||
break;
|
||||
}
|
||||
case TSDB_DATA_TYPE_DOUBLE: {
|
||||
char* cell = udfColDataGetData(col, j);
|
||||
double num = *(double*)cell;
|
||||
sumSquares += num * num;
|
||||
break;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
++numNotNull;
|
||||
}
|
||||
}
|
||||
|
||||
*(double*)(newInterBuf->buf) = sumSquares;
|
||||
newInterBuf->bufLen = sizeof(double);
|
||||
|
||||
if (interBuf->numOfResult == 0 && numNotNull == 0) {
|
||||
newInterBuf->numOfResult = 0;
|
||||
} else {
|
||||
newInterBuf->numOfResult = 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
DLL_EXPORT int32_t sqr_sum_finish(SUdfInterBuf* buf, SUdfInterBuf *resultData) {
|
||||
if (buf->numOfResult == 0) {
|
||||
resultData->numOfResult = 0;
|
||||
return 0;
|
||||
}
|
||||
double sumSquares = *(double*)(buf->buf);
|
||||
*(double*)(resultData->buf) = sqrt(sumSquares);
|
||||
resultData->bufLen = sizeof(double);
|
||||
resultData->numOfResult = 1;
|
||||
return 0;
|
||||
}
|
|
@ -0,0 +1,161 @@
|
|||
system_content printf %OS%
|
||||
if $system_content == Windows_NT then
|
||||
return 0;
|
||||
endi
|
||||
|
||||
system sh/stop_dnodes.sh
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/cfg.sh -n dnode1 -c udf -v 1
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
sql connect
|
||||
|
||||
print ======== step1 udf
|
||||
system sh/compile_udf.sh
|
||||
sql create database udf vgroups 3;
|
||||
sql use udf;
|
||||
sql show databases;
|
||||
|
||||
sql create table t (ts timestamp, f int);
|
||||
sql insert into t values(now, 1)(now+1s, 2);
|
||||
|
||||
system_content printf %OS%
|
||||
if $system_content == Windows_NT then
|
||||
return 0;
|
||||
endi
|
||||
if $system_content == Windows_NT then
|
||||
sql create function bit_and as 'C:\\Windows\\Temp\\bitand.dll' outputtype int bufSize 8;
|
||||
sql create aggregate function sqr_sum as 'C:\\Windows\\Temp\\sqrsum.dll' outputtype double bufSize 8;
|
||||
else
|
||||
sql create function bit_and as '/tmp/udf/libbitand.so' outputtype int bufSize 8;
|
||||
sql create aggregate function sqr_sum as '/tmp/udf/libsqrsum.so' outputtype double bufSize 8;
|
||||
endi
|
||||
sql show functions;
|
||||
if $rows != 2 then
|
||||
return -1
|
||||
endi
|
||||
sql select bit_and(f, f) from t;
|
||||
if $rows != 2 then
|
||||
return -1
|
||||
endi
|
||||
if $data00 != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data10 != 2 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql select sqr_sum(f) from t;
|
||||
if $rows != 1 then
|
||||
print expect 1, actual $rows
|
||||
return -1
|
||||
endi
|
||||
if $data00 != 2.236067977 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql create table t2 (ts timestamp, f1 int, f2 int);
|
||||
sql insert into t2 values(now, 0, 0)(now+1s, 1, 1);
|
||||
sql select bit_and(f1, f2) from t2;
|
||||
if $rows != 2 then
|
||||
return -1
|
||||
endi
|
||||
if $data00 != 0 then
|
||||
return -1
|
||||
endi
|
||||
if $data10 != 1 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql select sqr_sum(f1, f2) from t2;
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data00 != 1.414213562 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql insert into t2 values(now+2s, 1, null)(now+3s, null, 2);
|
||||
sql select bit_and(f1, f2) from t2;
|
||||
print $rows , $data00 , $data10 , $data20 , $data30
|
||||
if $rows != 4 then
|
||||
return -1
|
||||
endi
|
||||
if $data00 != 0 then
|
||||
return -1
|
||||
endi
|
||||
if $data10 != 1 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data20 != NULL then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data30 != NULL then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql select sqr_sum(f1, f2) from t2;
|
||||
print $rows, $data00
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data00 != 2.645751311 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql insert into t2 values(now+4s, 4, 8)(now+5s, 5, 9);
|
||||
sql select sqr_sum(f1-f2), sqr_sum(f1+f2) from t2;
|
||||
print $rows , $data00 , $data01
|
||||
if $rows != 1 then
|
||||
return -1;
|
||||
endi
|
||||
if $data00 != 5.656854249 then
|
||||
return -1
|
||||
endi
|
||||
if $data01 != 18.547236991 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql select sqr_sum(bit_and(f2, f1)), sqr_sum(bit_and(f1, f2)) from t2;
|
||||
print $rows , $data00 , $data01
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data00 != 1.414213562 then
|
||||
return -1
|
||||
endi
|
||||
if $data01 != 1.414213562 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql select sqr_sum(f2) from udf.t2 group by 1-bit_and(f1, f2) order by 1-bit_and(f1,f2);
|
||||
print $rows , $data00 , $data10 , $data20
|
||||
if $rows != 3 then
|
||||
return -1
|
||||
endi
|
||||
if $data00 != 2.000000000 then
|
||||
return -1
|
||||
endi
|
||||
if $data10 != 9.055385138 then
|
||||
return -1
|
||||
endi
|
||||
if $data20 != 8.000000000 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql drop function bit_and;
|
||||
sql show functions;
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data00 != @sqr_sum@ then
|
||||
return -1
|
||||
endi
|
||||
sql drop function sqr_sum;
|
||||
sql show functions;
|
||||
if $rows != 0 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -78,7 +78,7 @@ sql select tbcol4, length(tbcol4), lower(tbcol4), upper(tbcol4), ltrim(tbcol4),
|
|||
sql select * from tb1 where tbcol not in (1,2,3,null);
|
||||
sql select * from tb1 where tbcol + 3 <> null;
|
||||
sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from tb1 where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0)
|
||||
#sql select tbcol5 - tbcol3 from tb1
|
||||
sql select tbcol5 - tbcol3 from tb1
|
||||
|
||||
print =============== step4: stb
|
||||
sql select avg(tbcol) as c from stb
|
||||
|
@ -109,7 +109,7 @@ sql select * from stb where tbcol + 3 <> null;
|
|||
sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from stb where tbcol = 1 and tbcol2 = 1 and tbcol3 = 1 partition by tgcol interval(1d)
|
||||
sql select _wstart, count(*) from tb1 session(ts, 1m)
|
||||
sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from stb where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0)
|
||||
#sql select tbcol5 - tbcol3 from stb
|
||||
sql select tbcol5 - tbcol3 from stb
|
||||
|
||||
print =============== step5: explain
|
||||
sql explain analyze select ts from stb where -2;
|
||||
|
|
|
@ -1,15 +1,16 @@
|
|||
from tabnanny import check
|
||||
import taos
|
||||
import time
|
||||
import inspect
|
||||
import traceback
|
||||
import socket
|
||||
from dataclasses import dataclass
|
||||
from datetime import datetime
|
||||
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.dnodes import *
|
||||
from util.common import *
|
||||
|
||||
PRIVILEGES_ALL = "ALL"
|
||||
PRIVILEGES_READ = "READ"
|
||||
|
@ -21,17 +22,40 @@ WEIGHT_WRITE = 3
|
|||
|
||||
PRIMARY_COL = "ts"
|
||||
|
||||
INT_COL = "c1"
|
||||
BINT_COL = "c2"
|
||||
SINT_COL = "c3"
|
||||
TINT_COL = "c4"
|
||||
FLOAT_COL = "c5"
|
||||
DOUBLE_COL = "c6"
|
||||
BOOL_COL = "c7"
|
||||
INT_COL = "c_int"
|
||||
BINT_COL = "c_bint"
|
||||
SINT_COL = "c_sint"
|
||||
TINT_COL = "c_tint"
|
||||
FLOAT_COL = "c_float"
|
||||
DOUBLE_COL = "c_double"
|
||||
BOOL_COL = "c_bool"
|
||||
TINT_UN_COL = "c_utint"
|
||||
SINT_UN_COL = "c_usint"
|
||||
BINT_UN_COL = "c_ubint"
|
||||
INT_UN_COL = "c_uint"
|
||||
BINARY_COL = "c_binary"
|
||||
NCHAR_COL = "c_nchar"
|
||||
TS_COL = "c_ts"
|
||||
|
||||
BINARY_COL = "c8"
|
||||
NCHAR_COL = "c9"
|
||||
TS_COL = "c10"
|
||||
NUM_COL = [INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, ]
|
||||
CHAR_COL = [BINARY_COL, NCHAR_COL, ]
|
||||
BOOLEAN_COL = [BOOL_COL, ]
|
||||
TS_TYPE_COL = [TS_COL, ]
|
||||
|
||||
INT_TAG = "t_int"
|
||||
|
||||
ALL_COL = [PRIMARY_COL, INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, BINARY_COL, NCHAR_COL, BOOL_COL, TS_COL]
|
||||
TAG_COL = [INT_TAG]
|
||||
|
||||
# insert data args:
|
||||
TIME_STEP = 10000
|
||||
NOW = int(datetime.timestamp(datetime.now()) * 1000)
|
||||
|
||||
# init db/table
|
||||
DBNAME = "db"
|
||||
STBNAME = "stb1"
|
||||
CTBNAME = "ct1"
|
||||
NTBNAME = "nt1"
|
||||
|
||||
class TDconnect:
|
||||
def __init__(self,
|
||||
|
@ -247,25 +271,26 @@ class TDTestCase:
|
|||
with taos_connect(user=user.name, passwd=user.passwd) as use:
|
||||
time.sleep(2)
|
||||
if check_priv == PRIVILEGES_ALL:
|
||||
use.query("use db")
|
||||
use.query("show tables")
|
||||
use.query("select * from ct1")
|
||||
use.query("insert into t1 (ts) values (now())")
|
||||
use.query(f"use {DBNAME}")
|
||||
use.query(f"show {DBNAME}.tables")
|
||||
use.query(f"select * from {DBNAME}.{CTBNAME}")
|
||||
use.query(f"insert into {DBNAME}.{CTBNAME} (ts) values (now())")
|
||||
elif check_priv == PRIVILEGES_READ:
|
||||
use.query("use db")
|
||||
use.query("show tables")
|
||||
use.query("select * from ct1")
|
||||
use.error("insert into t1 (ts) values (now())")
|
||||
use.query(f"use {DBNAME}")
|
||||
use.query(f"show {DBNAME}.tables")
|
||||
use.query(f"select * from {DBNAME}.{CTBNAME}")
|
||||
use.error(f"insert into {DBNAME}.{CTBNAME} (ts) values (now())")
|
||||
elif check_priv == PRIVILEGES_WRITE:
|
||||
use.query("use db")
|
||||
use.query("show tables")
|
||||
use.error("select * from ct1")
|
||||
use.query("insert into t1 (ts) values (now())")
|
||||
use.query(f"use {DBNAME}")
|
||||
use.query(f"show {DBNAME}.tables")
|
||||
use.error(f"select * from {DBNAME}.{CTBNAME}")
|
||||
use.query(f"insert into {DBNAME}.{CTBNAME} (ts) values (now())")
|
||||
elif check_priv is None:
|
||||
use.error("use db")
|
||||
use.error("show tables")
|
||||
use.error("select * from db.ct1")
|
||||
use.error("insert into db.t1 (ts) values (now())")
|
||||
use.error(f"use {DBNAME}")
|
||||
# use.error(f"show {DBNAME}.tables")
|
||||
use.error(f"show tables")
|
||||
use.error(f"select * from {DBNAME}.{CTBNAME}")
|
||||
use.error(f"insert into {DBNAME}.{CTBNAME} (ts) values (now())")
|
||||
|
||||
def __change_user_priv(self, user: User, pre_priv, invoke=False):
|
||||
if user.priv == pre_priv and invoke :
|
||||
|
@ -418,7 +443,7 @@ class TDTestCase:
|
|||
self.__grant_user_privileges(privilege="", dbname="db", user_name=self.__user_list[0]) ,
|
||||
self.__grant_user_privileges(privilege=" ".join(self.__privilege), user_name=self.__user_list[0]) ,
|
||||
f"GRANT {self.__privilege[0]} ON * TO {self.__user_list[0]}" ,
|
||||
f"GRANT {self.__privilege[0]} ON db.t1 TO {self.__user_list[0]}" ,
|
||||
f"GRANT {self.__privilege[0]} ON {DBNAME}.{NTBNAME} TO {self.__user_list[0]}" ,
|
||||
]
|
||||
|
||||
def __revoke_err(self):
|
||||
|
@ -430,7 +455,7 @@ class TDTestCase:
|
|||
self.__revoke_user_privileges(privilege="", dbname="db", user_name=self.__user_list[0]) ,
|
||||
self.__revoke_user_privileges(privilege=" ".join(self.__privilege), user_name=self.__user_list[0]) ,
|
||||
f"REVOKE {self.__privilege[0]} ON * FROM {self.__user_list[0]}" ,
|
||||
f"REVOKE {self.__privilege[0]} ON db.t1 FROM {self.__user_list[0]}" ,
|
||||
f"REVOKE {self.__privilege[0]} ON {DBNAME}.{NTBNAME} FROM {self.__user_list[0]}" ,
|
||||
]
|
||||
|
||||
def test_grant_err(self):
|
||||
|
@ -505,101 +530,48 @@ class TDTestCase:
|
|||
self.drop_user_error()
|
||||
self.drop_user_current()
|
||||
|
||||
def __create_tb(self):
|
||||
|
||||
tdLog.printNoPrefix("==========step1:create table")
|
||||
create_stb_sql = f'''create table stb1(
|
||||
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
|
||||
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
|
||||
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
|
||||
) tags (t1 int)
|
||||
'''
|
||||
create_ntb_sql = f'''create table t1(
|
||||
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
|
||||
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
|
||||
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
|
||||
)
|
||||
def __create_tb(self, stb=STBNAME, ctb_num=20, ntbnum=1, dbname=DBNAME):
|
||||
tdLog.printNoPrefix("==========step: create table")
|
||||
create_stb_sql = f'''create table {dbname}.{stb}(
|
||||
{PRIMARY_COL} timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
|
||||
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
|
||||
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp,
|
||||
{TINT_UN_COL} tinyint unsigned, {SINT_UN_COL} smallint unsigned,
|
||||
{INT_UN_COL} int unsigned, {BINT_UN_COL} bigint unsigned
|
||||
) tags ({INT_TAG} int)
|
||||
'''
|
||||
tdSql.execute(create_stb_sql)
|
||||
tdSql.execute(create_ntb_sql)
|
||||
|
||||
for i in range(4):
|
||||
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
|
||||
{ i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2}
|
||||
|
||||
def __insert_data(self, rows):
|
||||
now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
|
||||
for i in range(rows):
|
||||
tdSql.execute(
|
||||
f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f'''insert into ct1 values
|
||||
( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } )
|
||||
( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } )
|
||||
'''
|
||||
)
|
||||
|
||||
tdSql.execute(
|
||||
f'''insert into ct4 values
|
||||
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
(
|
||||
{ now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127,
|
||||
{ 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000}
|
||||
for i in range(ntbnum):
|
||||
create_ntb_sql = f'''create table {dbname}.nt{i+1}(
|
||||
{PRIMARY_COL} timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
|
||||
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
|
||||
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp,
|
||||
{TINT_UN_COL} tinyint unsigned, {SINT_UN_COL} smallint unsigned,
|
||||
{INT_UN_COL} int unsigned, {BINT_UN_COL} bigint unsigned
|
||||
)
|
||||
(
|
||||
{ now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126,
|
||||
{ 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000}
|
||||
)
|
||||
'''
|
||||
)
|
||||
|
||||
tdSql.execute(
|
||||
f'''insert into ct2 values
|
||||
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
(
|
||||
{ now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126,
|
||||
{ -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
|
||||
)
|
||||
(
|
||||
{ now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127,
|
||||
{ - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
|
||||
)
|
||||
'''
|
||||
)
|
||||
|
||||
for i in range(rows):
|
||||
insert_data = f'''insert into t1 values
|
||||
( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2},
|
||||
"binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } )
|
||||
'''
|
||||
tdSql.execute(insert_data)
|
||||
tdSql.execute(
|
||||
f'''insert into t1 values
|
||||
( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127,
|
||||
{ 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 },
|
||||
"binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
|
||||
)
|
||||
(
|
||||
{ now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126,
|
||||
{ 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 },
|
||||
"binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
|
||||
)
|
||||
tdSql.execute(create_ntb_sql)
|
||||
|
||||
for i in range(ctb_num):
|
||||
tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.{stb} tags ( {i+1} )')
|
||||
|
||||
def __insert_data(self, rows, ctb_num=20, dbname=DBNAME, star_time=NOW):
|
||||
tdLog.printNoPrefix("==========step: start inser data into tables now.....")
|
||||
# from ...pytest.util.common import DataSet
|
||||
data = DataSet()
|
||||
data.get_order_set(rows)
|
||||
|
||||
for i in range(rows):
|
||||
row_data = f'''
|
||||
{data.int_data[i]}, {data.bint_data[i]}, {data.sint_data[i]}, {data.tint_data[i]}, {data.float_data[i]}, {data.double_data[i]},
|
||||
{data.bool_data[i]}, '{data.vchar_data[i]}', '{data.nchar_data[i]}', {data.ts_data[i]}, {data.utint_data[i]},
|
||||
{data.usint_data[i]}, {data.uint_data[i]}, {data.ubint_data[i]}
|
||||
'''
|
||||
)
|
||||
tdSql.execute( f"insert into {dbname}.{NTBNAME} values ( {star_time - i * int(TIME_STEP * 1.2)}, {row_data} )" )
|
||||
|
||||
for j in range(ctb_num):
|
||||
tdSql.execute( f"insert into {dbname}.ct{j+1} values ( {star_time - j * i * TIME_STEP}, {row_data} )" )
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
|
@ -656,27 +628,81 @@ class TDTestCase:
|
|||
with taos_connect(user=self.__user_list[0], passwd=f"new{self.__passwd_list[0]}") as user:
|
||||
# user = conn
|
||||
# 不能创建用户
|
||||
tdLog.printNoPrefix("==========step5: normal user can not create user")
|
||||
tdLog.printNoPrefix("==========step4.1: normal user can not create user")
|
||||
user.error("create use utest1 pass 'utest1pass'")
|
||||
# 可以查看用户
|
||||
tdLog.printNoPrefix("==========step6: normal user can show user")
|
||||
tdLog.printNoPrefix("==========step4.2: normal user can show user")
|
||||
user.query("show users")
|
||||
assert user.queryRows == self.users_count + 1
|
||||
# 不可以修改其他用户的密码
|
||||
tdLog.printNoPrefix("==========step7: normal user can not alter other user pass")
|
||||
tdLog.printNoPrefix("==========step4.3: normal user can not alter other user pass")
|
||||
user.error(self.__alter_pass_sql(self.__user_list[1], self.__passwd_list[1] ))
|
||||
user.error(self.__alter_pass_sql("root", "taosdata_root" ))
|
||||
# 可以修改自己的密码
|
||||
tdLog.printNoPrefix("==========step8: normal user can alter owner pass")
|
||||
tdLog.printNoPrefix("==========step4.4: normal user can alter owner pass")
|
||||
user.query(self.__alter_pass_sql(self.__user_list[0], self.__passwd_list[0]))
|
||||
# 不可以删除用户,包括自己
|
||||
tdLog.printNoPrefix("==========step9: normal user can not drop any user ")
|
||||
tdLog.printNoPrefix("==========step4.5: normal user can not drop any user ")
|
||||
user.error(f"drop user {self.__user_list[0]}")
|
||||
user.error(f"drop user {self.__user_list[1]}")
|
||||
user.error("drop user root")
|
||||
|
||||
tdLog.printNoPrefix("==========step5: enable info")
|
||||
taos1_conn = taos.connect(user=self.__user_list[1], password=f"new{self.__passwd_list[1]}")
|
||||
taos1_conn.query(f"show databases")
|
||||
tdSql.execute(f"alter user {self.__user_list[1]} enable 0")
|
||||
tdSql.execute(f"alter user {self.__user_list[2]} enable 0")
|
||||
taos1_except = True
|
||||
try:
|
||||
taos1_conn.query("show databases")
|
||||
except BaseException:
|
||||
taos1_except = False
|
||||
if taos1_except:
|
||||
tdLog.exit("taos 1 connect except error not occured, when enable == 0, should not r/w ")
|
||||
else:
|
||||
tdLog.info("taos 1 connect except error occured, enable == 0")
|
||||
|
||||
taos2_except = True
|
||||
try:
|
||||
taos.connect(user=self.__user_list[2], password=f"new{self.__passwd_list[2]}")
|
||||
except BaseException:
|
||||
taos2_except = False
|
||||
if taos2_except:
|
||||
tdLog.exit("taos 2 connect except error not occured, when enable == 0, should not connect")
|
||||
else:
|
||||
tdLog.info("taos 2 connect except error occured, enable == 0, can not login")
|
||||
|
||||
tdLog.printNoPrefix("==========step6: sysinfo info")
|
||||
taos3_conn = taos.connect(user=self.__user_list[3], password=f"new{self.__passwd_list[3]}")
|
||||
taos3_conn.query(f"show dnodes")
|
||||
taos3_conn.query(f"show {DBNAME}.vgroups")
|
||||
tdSql.execute(f"alter user {self.__user_list[3]} sysinfo 0")
|
||||
tdSql.execute(f"alter user {self.__user_list[4]} sysinfo 0")
|
||||
taos3_except = True
|
||||
try:
|
||||
taos3_conn.query(f"show dnodes")
|
||||
taos3_conn.query(f"show {DBNAME}.vgroups")
|
||||
except BaseException:
|
||||
taos3_except = False
|
||||
if taos3_except:
|
||||
tdLog.exit("taos 3 query except error not occured, when sysinfo == 0, should not show info:dnode/monde/qnode ")
|
||||
else:
|
||||
tdLog.info("taos 3 query except error occured, sysinfo == 0, can not show dnode/vgroups")
|
||||
|
||||
taos4_conn = taos.connect(user=self.__user_list[4], password=f"new{self.__passwd_list[4]}")
|
||||
taos4_except = True
|
||||
try:
|
||||
taos4_conn.query(f"show mnodes")
|
||||
taos4_conn.query(f"show {DBNAME}.vgroups")
|
||||
except BaseException:
|
||||
taos4_except = False
|
||||
if taos4_except:
|
||||
tdLog.exit("taos 4 query except error not occured, when sysinfo == 0, when enable == 0, should not show info:dnode/monde/qnode")
|
||||
else:
|
||||
tdLog.info("taos 4 query except error occured, sysinfo == 0, can not show dnode/vgroups")
|
||||
|
||||
# root删除用户测试
|
||||
tdLog.printNoPrefix("==========step10: super user drop normal user")
|
||||
tdLog.printNoPrefix("==========step7: super user drop normal user")
|
||||
self.test_drop_user()
|
||||
|
||||
tdSql.query("show users")
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
import datetime
|
||||
from datetime import datetime
|
||||
import time
|
||||
|
||||
from dataclasses import dataclass
|
||||
|
@ -8,6 +8,7 @@ from util.sql import *
|
|||
from util.cases import *
|
||||
from util.dnodes import *
|
||||
from util.constant import *
|
||||
from util.common import *
|
||||
|
||||
PRIMARY_COL = "ts"
|
||||
|
||||
|
@ -38,7 +39,7 @@ TAG_COL = [INT_TAG]
|
|||
|
||||
# insert data args:
|
||||
TIME_STEP = 10000
|
||||
NOW = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
|
||||
NOW = int(datetime.timestamp(datetime.now()) * 1000)
|
||||
|
||||
# init db/table
|
||||
DBNAME = "db"
|
||||
|
@ -47,40 +48,6 @@ CTBNAME = "ct1"
|
|||
NTBNAME = "nt1"
|
||||
|
||||
|
||||
@dataclass
|
||||
class DataSet:
|
||||
ts_data : List[int] = None
|
||||
int_data : List[int] = None
|
||||
bint_data : List[int] = None
|
||||
sint_data : List[int] = None
|
||||
tint_data : List[int] = None
|
||||
int_un_data : List[int] = None
|
||||
bint_un_data: List[int] = None
|
||||
sint_un_data: List[int] = None
|
||||
tint_un_data: List[int] = None
|
||||
float_data : List[float] = None
|
||||
double_data : List[float] = None
|
||||
bool_data : List[int] = None
|
||||
binary_data : List[str] = None
|
||||
nchar_data : List[str] = None
|
||||
|
||||
def __post_init__(self):
|
||||
self.ts_data = []
|
||||
self.int_data = []
|
||||
self.bint_data = []
|
||||
self.sint_data = []
|
||||
self.tint_data = []
|
||||
self.int_un_data = []
|
||||
self.bint_un_data = []
|
||||
self.sint_un_data = []
|
||||
self.tint_un_data = []
|
||||
self.float_data = []
|
||||
self.double_data = []
|
||||
self.bool_data = []
|
||||
self.binary_data = []
|
||||
self.nchar_data = []
|
||||
|
||||
|
||||
@dataclass
|
||||
class SMAschema:
|
||||
creation : str = "CREATE"
|
||||
|
@ -164,10 +131,6 @@ class SMAschema:
|
|||
del self.other[k]
|
||||
|
||||
|
||||
|
||||
# from ...pytest.util.sql import *
|
||||
# from ...pytest.util.constant import *
|
||||
|
||||
class TDTestCase:
|
||||
updatecfgDict = {"querySmaOptimize": 1}
|
||||
|
||||
|
@ -469,14 +432,12 @@ class TDTestCase:
|
|||
err_sqls.append( SMAschema(index_flag="SMA INDEX ,", tbname=STBNAME, func=(f"min({INT_COL})",f"max({INT_COL})") ) )
|
||||
err_sqls.append( SMAschema(index_name="tbname", tbname=STBNAME, func=(f"min({INT_COL})",f"max({INT_COL})") ) )
|
||||
|
||||
|
||||
# current_set
|
||||
|
||||
cur_sqls.append( SMAschema(max_delay="",tbname=STBNAME, func=(f"min({INT_COL})",f"max({INT_COL})") ) )
|
||||
cur_sqls.append( SMAschema(watermark="",index_name="sma_index_2",tbname=STBNAME, func=(f"min({INT_COL})",f"max({INT_COL})") ) )
|
||||
cur_sqls.append( SMAschema(sliding="",index_name='sma_index_3',tbname=STBNAME, func=(f"min({INT_COL})",f"max({INT_COL})") ) )
|
||||
|
||||
|
||||
return err_sqls, cur_sqls
|
||||
|
||||
def test_create_sma(self):
|
||||
|
@ -512,102 +473,48 @@ class TDTestCase:
|
|||
self.test_create_sma()
|
||||
self.test_drop_sma()
|
||||
|
||||
pass
|
||||
|
||||
def __create_tb(self):
|
||||
def __create_tb(self, stb=STBNAME, ctb_num=20, ntbnum=1, dbname=DBNAME):
|
||||
tdLog.printNoPrefix("==========step: create table")
|
||||
create_stb_sql = f'''create table {STBNAME}(
|
||||
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
|
||||
create_stb_sql = f'''create table {dbname}.{stb}(
|
||||
{PRIMARY_COL} timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
|
||||
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
|
||||
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp,
|
||||
{TINT_UN_COL} tinyint unsigned, {SINT_UN_COL} smallint unsigned,
|
||||
{INT_UN_COL} int unsigned, {BINT_UN_COL} bigint unsigned
|
||||
) tags ({INT_TAG} int)
|
||||
'''
|
||||
create_ntb_sql = f'''create table {NTBNAME}(
|
||||
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
|
||||
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
|
||||
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp,
|
||||
{TINT_UN_COL} tinyint unsigned, {SINT_UN_COL} smallint unsigned,
|
||||
{INT_UN_COL} int unsigned, {BINT_UN_COL} bigint unsigned
|
||||
)
|
||||
'''
|
||||
tdSql.execute(create_stb_sql)
|
||||
tdSql.execute(create_ntb_sql)
|
||||
|
||||
for i in range(4):
|
||||
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
|
||||
for i in range(ntbnum):
|
||||
create_ntb_sql = f'''create table {dbname}.nt{i+1}(
|
||||
{PRIMARY_COL} timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
|
||||
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
|
||||
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp,
|
||||
{TINT_UN_COL} tinyint unsigned, {SINT_UN_COL} smallint unsigned,
|
||||
{INT_UN_COL} int unsigned, {BINT_UN_COL} bigint unsigned
|
||||
)
|
||||
'''
|
||||
tdSql.execute(create_ntb_sql)
|
||||
|
||||
def __data_set(self, rows):
|
||||
data_set = DataSet()
|
||||
for i in range(ctb_num):
|
||||
tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.{stb} tags ( {i+1} )')
|
||||
|
||||
def __insert_data(self, rows, ctb_num=20, dbname=DBNAME, star_time=NOW):
|
||||
tdLog.printNoPrefix("==========step: start inser data into tables now.....")
|
||||
# from ...pytest.util.common import DataSet
|
||||
data = DataSet()
|
||||
data.get_order_set(rows, bint_step=2)
|
||||
|
||||
for i in range(rows):
|
||||
data_set.ts_data.append(NOW + 1 * (rows - i))
|
||||
data_set.int_data.append(rows - i)
|
||||
data_set.bint_data.append(11111 * (rows - i))
|
||||
data_set.sint_data.append(111 * (rows - i) % 32767)
|
||||
data_set.tint_data.append(11 * (rows - i) % 127)
|
||||
data_set.int_un_data.append(rows - i)
|
||||
data_set.bint_un_data.append(11111 * (rows - i))
|
||||
data_set.sint_un_data.append(111 * (rows - i) % 32767)
|
||||
data_set.tint_un_data.append(11 * (rows - i) % 127)
|
||||
data_set.float_data.append(1.11 * (rows - i))
|
||||
data_set.double_data.append(1100.0011 * (rows - i))
|
||||
data_set.bool_data.append((rows - i) % 2)
|
||||
data_set.binary_data.append(f'binary{(rows - i)}')
|
||||
data_set.nchar_data.append(f'nchar_测试_{(rows - i)}')
|
||||
|
||||
return data_set
|
||||
|
||||
def __insert_data(self):
|
||||
tdLog.printNoPrefix("==========step: start inser data into tables now.....")
|
||||
data = self.__data_set(rows=self.rows)
|
||||
|
||||
# now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
|
||||
null_data = '''null, null, null, null, null, null, null, null, null, null, null, null, null, null'''
|
||||
zero_data = "0, 0, 0, 0, 0, 0, 0, 'binary_0', 'nchar_0', 0, 0, 0, 0, 0"
|
||||
|
||||
for i in range(self.rows):
|
||||
row_data = f'''
|
||||
{data.int_data[i]}, {data.bint_data[i]}, {data.sint_data[i]}, {data.tint_data[i]}, {data.float_data[i]}, {data.double_data[i]},
|
||||
{data.bool_data[i]}, '{data.binary_data[i]}', '{data.nchar_data[i]}', {data.ts_data[i]}, {data.tint_un_data[i]},
|
||||
{data.sint_un_data[i]}, {data.int_un_data[i]}, {data.bint_un_data[i]}
|
||||
'''
|
||||
neg_row_data = f'''
|
||||
{-1 * data.int_data[i]}, {-1 * data.bint_data[i]}, {-1 * data.sint_data[i]}, {-1 * data.tint_data[i]}, {-1 * data.float_data[i]}, {-1 * data.double_data[i]},
|
||||
{data.bool_data[i]}, '{data.binary_data[i]}', '{data.nchar_data[i]}', {data.ts_data[i]}, {1 * data.tint_un_data[i]},
|
||||
{1 * data.sint_un_data[i]}, {1 * data.int_un_data[i]}, {1 * data.bint_un_data[i]}
|
||||
{data.bool_data[i]}, '{data.vchar_data[i]}', '{data.nchar_data[i]}', {data.ts_data[i]}, {data.utint_data[i]},
|
||||
{data.usint_data[i]}, {data.uint_data[i]}, {data.ubint_data[i]}
|
||||
'''
|
||||
tdSql.execute( f"insert into {dbname}.{NTBNAME} values ( {star_time - i * int(TIME_STEP * 1.2)}, {row_data} )" )
|
||||
|
||||
tdSql.execute(
|
||||
f"insert into ct1 values ( {NOW - i * TIME_STEP}, {row_data} )")
|
||||
tdSql.execute(
|
||||
f"insert into ct2 values ( {NOW - i * int(TIME_STEP * 0.6)}, {neg_row_data} )")
|
||||
tdSql.execute(
|
||||
f"insert into ct4 values ( {NOW - i * int(TIME_STEP * 0.8) }, {row_data} )")
|
||||
tdSql.execute(
|
||||
f"insert into {NTBNAME} values ( {NOW - i * int(TIME_STEP * 1.2)}, {row_data} )")
|
||||
|
||||
tdSql.execute(
|
||||
f"insert into ct2 values ( {NOW + int(TIME_STEP * 0.6)}, {null_data} )")
|
||||
tdSql.execute(
|
||||
f"insert into ct2 values ( {NOW - (self.rows + 1) * int(TIME_STEP * 0.6)}, {null_data} )")
|
||||
tdSql.execute(
|
||||
f"insert into ct2 values ( {NOW - self.rows * int(TIME_STEP * 0.29) }, {null_data} )")
|
||||
|
||||
tdSql.execute(
|
||||
f"insert into ct4 values ( {NOW + int(TIME_STEP * 0.8)}, {null_data} )")
|
||||
tdSql.execute(
|
||||
f"insert into ct4 values ( {NOW - (self.rows + 1) * int(TIME_STEP * 0.8)}, {null_data} )")
|
||||
tdSql.execute(
|
||||
f"insert into ct4 values ( {NOW - self.rows * int(TIME_STEP * 0.39)}, {null_data} )")
|
||||
|
||||
tdSql.execute(
|
||||
f"insert into {NTBNAME} values ( {NOW + int(TIME_STEP * 1.2)}, {null_data} )")
|
||||
tdSql.execute(
|
||||
f"insert into {NTBNAME} values ( {NOW - (self.rows + 1) * int(TIME_STEP * 1.2)}, {null_data} )")
|
||||
tdSql.execute(
|
||||
f"insert into {NTBNAME} values ( {NOW - self.rows * int(TIME_STEP * 0.59)}, {null_data} )")
|
||||
for j in range(ctb_num):
|
||||
tdSql.execute( f"insert into {dbname}.ct{j+1} values ( {star_time - j * i * TIME_STEP}, {row_data} )" )
|
||||
|
||||
def run(self):
|
||||
self.rows = 10
|
||||
|
@ -616,14 +523,60 @@ class TDTestCase:
|
|||
|
||||
tdLog.printNoPrefix("==========step1:create table in normal database")
|
||||
tdSql.prepare()
|
||||
self.__create_tb()
|
||||
self.__insert_data()
|
||||
self.__create_tb(dbname=DBNAME)
|
||||
self.__insert_data(rows=self.rows)
|
||||
self.all_test()
|
||||
|
||||
# # from ...pytest.util.sql import *
|
||||
|
||||
# drop databases, create same name db、stb and sma index
|
||||
tdSql.prepare()
|
||||
self.__create_tb()
|
||||
self.__insert_data()
|
||||
self.__create_tb(dbname=DBNAME)
|
||||
self.__insert_data(rows=self.rows,star_time=NOW + self.rows * 2 * TIME_STEP)
|
||||
tdLog.printNoPrefix("==========step1.1 : create a tsma index and checkdata")
|
||||
tdSql.execute(f"create sma index {DBNAME}.sma_index_name1 on {DBNAME}.{STBNAME} function(max({INT_COL}),max({BINT_COL}),min({INT_COL})) interval(6m,10s) sliding(6m)")
|
||||
self.__insert_data(rows=self.rows)
|
||||
tdSql.query(f"select max({INT_COL}), max({BINT_COL}), min({INT_COL}) from {DBNAME}.{STBNAME} interval(6m,10s) sliding(6m)")
|
||||
tdSql.checkData(0, 0, self.rows - 1)
|
||||
tdSql.checkData(0, 1, (self.rows - 1) * 2 )
|
||||
tdSql.checkData(tdSql.queryRows - 1, 2, 0)
|
||||
# tdSql.checkData(0, 2, 0)
|
||||
|
||||
tdLog.printNoPrefix("==========step1.2 : alter table schema, drop col without index")
|
||||
tdSql.execute(f"alter stable {DBNAME}.{STBNAME} drop column {BINARY_COL}")
|
||||
tdSql.query(f"select max({INT_COL}), max({BINT_COL}), min({INT_COL}) from {DBNAME}.{STBNAME} interval(6m,10s) sliding(6m)")
|
||||
tdSql.checkData(0, 0, self.rows - 1)
|
||||
tdSql.checkData(0, 1, (self.rows - 1) * 2 )
|
||||
tdSql.checkData(tdSql.queryRows - 1, 2, 0)
|
||||
|
||||
tdLog.printNoPrefix("==========step1.3 : alter table schema, drop col with index")
|
||||
# TODO: TD-18047, can not drop col, when col in tsma-index and tsma-index is not dropped.
|
||||
tdSql.error(f"alter stable {DBNAME}.stb1 drop column {BINT_COL}")
|
||||
|
||||
tdLog.printNoPrefix("==========step1.4 : alter table schema, add col")
|
||||
tdSql.execute(f"alter stable {DBNAME}.{STBNAME} add column {BINT_COL}_1 bigint")
|
||||
tdSql.execute(f"insert into {DBNAME}.{CTBNAME} ({PRIMARY_COL}, {BINT_COL}_1) values(now(), 111)")
|
||||
tdSql.query(f"select max({INT_COL}), max({BINT_COL}), min({INT_COL}) from {DBNAME}.{STBNAME} interval(6m,10s) sliding(6m)")
|
||||
tdSql.checkData(0, 0, self.rows - 1)
|
||||
tdSql.checkData(0, 1, (self.rows - 1) * 2 )
|
||||
tdSql.checkData(tdSql.queryRows - 1, 2, 0)
|
||||
# tdSql.checkData(0, 2, 0)
|
||||
tdSql.query(f"select max({BINT_COL}_1) from {DBNAME}.{STBNAME} ")
|
||||
tdSql.checkData(0, 0 , 111)
|
||||
|
||||
tdSql.execute(f"flush database {DBNAME}")
|
||||
|
||||
tdLog.printNoPrefix("==========step1.5 : drop child table")
|
||||
tdSql.execute(f"drop table {CTBNAME}")
|
||||
tdSql.query(f"select max({INT_COL}), max({BINT_COL}), min({INT_COL}) from {DBNAME}.{STBNAME} interval(6m,10s) sliding(6m)")
|
||||
tdSql.checkData(0, 0, self.rows - 1)
|
||||
tdSql.checkData(0, 1, (self.rows - 1) * 2 )
|
||||
tdSql.checkData(tdSql.queryRows - 1, 2, 0)
|
||||
|
||||
tdLog.printNoPrefix("==========step1.6 : drop stable")
|
||||
tdSql.execute(f"drop table {STBNAME}")
|
||||
tdSql.error(f"select * from {DBNAME}.{STBNAME}")
|
||||
|
||||
self.all_test()
|
||||
|
||||
tdLog.printNoPrefix("==========step2:create table in rollup database")
|
||||
|
@ -640,7 +593,6 @@ class TDTestCase:
|
|||
|
||||
tdSql.execute("flush database db ")
|
||||
|
||||
|
||||
tdLog.printNoPrefix("==========step4:after wal, all check again ")
|
||||
self.all_test()
|
||||
|
||||
|
|
|
@ -27,7 +27,7 @@ python3 ./test.py -f 1-insert/alter_stable.py
|
|||
python3 ./test.py -f 1-insert/alter_table.py
|
||||
python3 ./test.py -f 1-insert/insertWithMoreVgroup.py
|
||||
python3 ./test.py -f 1-insert/table_comment.py
|
||||
python3 ./test.py -f 1-insert/time_range_wise.py
|
||||
#python3 ./test.py -f 1-insert/time_range_wise.py #TD-18130
|
||||
python3 ./test.py -f 1-insert/block_wise.py
|
||||
python3 ./test.py -f 1-insert/create_retentions.py
|
||||
python3 ./test.py -f 1-insert/table_param_ttl.py
|
||||
|
@ -196,28 +196,28 @@ python3 ./test.py -f 6-cluster/5dnode3mnodeStopFollowerLeader.py -N 5 -M 3
|
|||
python3 ./test.py -f 6-cluster/5dnode3mnodeStop2Follower.py -N 5 -M 3
|
||||
|
||||
# vnode case
|
||||
# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_createDb_replica1.py -N 4 -M 1
|
||||
# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas.py -N 4 -M 1
|
||||
# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas_querys.py -N 4 -M 1
|
||||
# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_force_stop_all_dnodes.py -N 4 -M 1
|
||||
# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas.py -N 4 -M 1
|
||||
# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_all_vnode.py -N 4 -M 1
|
||||
# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_follower.py -N 4 -M 1
|
||||
python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_createDb_replica1.py -N 4 -M 1
|
||||
python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas.py -N 4 -M 1
|
||||
python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas_querys.py -N 4 -M 1
|
||||
python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_force_stop_all_dnodes.py -N 4 -M 1
|
||||
python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas.py -N 4 -M 1
|
||||
python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_all_vnode.py -N 4 -M 1
|
||||
python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_follower.py -N 4 -M 1
|
||||
# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_leader.py -N 4 -M 1
|
||||
# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys.py -N 4 -M 1
|
||||
python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys.py -N 4 -M 1
|
||||
# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_all_dnodes.py -N 4 -M 1
|
||||
# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_sync.py -N 4 -M 1
|
||||
python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_sync.py -N 4 -M 1
|
||||
# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync_force_stop.py -N 4 -M 1
|
||||
# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync.py -N 4 -M 1
|
||||
# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader_forece_stop.py -N 4 -M 1
|
||||
# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader.py -N 4 -M 1
|
||||
# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_mnode3_insertdatas_querys.py -N 4 -M 1
|
||||
python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_mnode3_insertdatas_querys.py -N 4 -M 1
|
||||
# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower_force_stop.py -N 4 -M 1
|
||||
# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower.py -N 4 -M 1
|
||||
python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower.py -N 4 -M 1
|
||||
# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader_force_stop.py -N 4 -M 1
|
||||
# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader.py -N 4 -M 1
|
||||
# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups.py -N 4 -M 1
|
||||
# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups_stopOne.py -N 4 -M 1
|
||||
python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups.py -N 4 -M 1
|
||||
python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups_stopOne.py -N 4 -M 1
|
||||
|
||||
|
||||
python3 ./test.py -f 7-tmq/dropDbR3ConflictTransaction.py -N 3
|
||||
|
|
Loading…
Reference in New Issue