test: finish 1-insert/opentsdb_json_taosc_insert.py

This commit is contained in:
jiajingbin 2022-05-31 19:01:42 +08:00
commit 4abcac3907
111 changed files with 5309 additions and 4107 deletions

View File

@ -1,4 +1,4 @@
cmake_minimum_required(VERSION 3.16)
cmake_minimum_required(VERSION 3.0)
project(
TDengine

View File

@ -1,4 +1,4 @@
cmake_minimum_required(VERSION 3.16)
cmake_minimum_required(VERSION 3.0)
set(CMAKE_VERBOSE_MAKEFILE OFF)

View File

@ -1,4 +1,4 @@
cmake_minimum_required(VERSION 3.16)
cmake_minimum_required(VERSION 3.0)
MESSAGE("Current system is ${CMAKE_SYSTEM_NAME}")

View File

@ -4,6 +4,8 @@ title: 支持的数据类型
description: "TDengine 支持的数据类型: 时间戳、浮点型、JSON 类型等"
---
## 时间戳
使用 TDengine最重要的是时间戳。创建并插入记录、查询历史记录的时候均需要指定时间戳。时间戳有如下规则
- 时间格式为 `YYYY-MM-DD HH:mm:ss.MS`,默认时间分辨率为毫秒。比如:`2017-08-12 18:25:58.128`
@ -12,39 +14,59 @@ description: "TDengine 支持的数据类型: 时间戳、浮点型、JSON 类
- Epoch Time时间戳也可以是一个长整数表示从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始的毫秒数(相应地,如果所在 Database 的时间精度设置为“微秒”,则长整型格式的时间戳含义也就对应于从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始的微秒数;纳秒精度逻辑类似。)
- 时间可以加减,比如 now-2h表明查询时刻向前推 2 个小时(最近 2 小时)。数字后面的时间单位可以是 b(纳秒)、u(微秒)、a(毫秒)、s(秒)、m(分)、h(小时)、d(天)、w(周)。 比如 `select * from t1 where ts > now-2w and ts <= now-1w`表示查询两周前整整一周的数据。在指定降采样操作down sampling的时间窗口interval时间单位还可以使用 n (自然月) 和 y (自然年)。
TDengine 缺省的时间戳精度是毫秒,但通过在 `CREATE DATABASE` 时传递的 PRECISION 参数也可以支持微秒和纳秒。(从 2.1.5.0 版本开始支持纳秒精度)
TDengine 缺省的时间戳精度是毫秒,但通过在 `CREATE DATABASE` 时传递的 PRECISION 参数也可以支持微秒和纳秒。
```sql
CREATE DATABASE db_name PRECISION 'ns';
```
## 数据类型
在 TDengine 中,普通表的数据模型中可使用以下 10 种数据类型。
在 TDengine 中,普通表的数据模型中可使用以下数据类型。
| # | **类型** | **Bytes** | **说明** |
| --- | :-------: | --------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| 1 | TIMESTAMP | 8 | 时间戳。缺省精度毫秒,可支持微秒和纳秒。从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始,计时不能早于该时间。(从 2.0.18.0 版本开始,已经去除了这一时间范围限制)(从 2.1.5.0 版本开始支持纳秒精度) |
| 2 | INT | 4 | 整型,范围 [-2^31+1, 2^31-1], -2^31 用作 NULL |
| 3 | BIGINT | 8 | 长整型,范围 [-2^63+1, 2^63-1], -2^63 用作 NULL |
| 4 | FLOAT | 4 | 浮点型,有效位数 6-7范围 [-3.4E38, 3.4E38] |
| 5 | DOUBLE | 8 | 双精度浮点型,有效位数 15-16范围 [-1.7E308, 1.7E308] |
| 6 | BINARY | 自定义 | 记录单字节字符串,建议只用于处理 ASCII 可见字符,中文等多字节字符需使用 nchar。理论上最长可以有 16374 字节。binary 仅支持字符串输入,字符串两端需使用单引号引用。使用时须指定大小,如 binary(20) 定义了最长为 20 个单字节字符的字符串,每个字符占 1 byte 的存储空间,总共固定占用 20 bytes 的空间,此时如果用户字符串超出 20 字节将会报错。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示,即 `\`。 |
| 7 | SMALLINT | 2 | 短整型, 范围 [-32767, 32767], -32768 用作 NULL |
| 8 | TINYINT | 1 | 单字节整型,范围 [-127, 127], -128 用作 NULL |
| 9 | BOOL | 1 | 布尔型,{true, false} |
| 10 | NCHAR | 自定义 | 记录包含多字节字符在内的字符串,如中文字符。每个 nchar 字符占用 4 bytes 的存储空间。字符串两端使用单引号引用,字符串内的单引号需用转义字符 `\`。nchar 使用时须指定字符串大小,类型为 nchar(10) 的列表示此列的字符串最多存储 10 个 nchar 字符,会固定占用 40 bytes 的空间。如果用户字符串长度超出声明长度,将会报错。 |
| 11 | JSON | | json 数据类型, 只有 tag 可以是 json 格式 |
| 1 | TIMESTAMP | 8 | 时间戳。缺省精度毫秒,可支持微秒和纳秒,详细说明见上节。 |
| 2 | INT | 4 | 整型,范围 [-2^31, 2^31-1] |
| 3 | INT UNSIGNED| 4| 无符号整数,[0, 2^32-1]
| 4 | BIGINT | 8 | 长整型,范围 [-2^63, 2^63-1] |
| 5 | BIGINT UNSIGNED | 8 | 长整型,范围 [0, 2^64-1] |
| 6 | FLOAT | 4 | 浮点型,有效位数 6-7范围 [-3.4E38, 3.4E38] |
| 7 | DOUBLE | 8 | 双精度浮点型,有效位数 15-16范围 [-1.7E308, 1.7E308] |
| 8 | BINARY | 自定义 | 记录单字节字符串,建议只用于处理 ASCII 可见字符,中文等多字节字符需使用 nchar。 |
| 9 | SMALLINT | 2 | 短整型, 范围 [-32768, 32767] |
| 10 | SMALLINT UNSIGNED | 2| 无符号短整型,范围 [0, 655357] |
| 11 | TINYINT | 1 | 单字节整型,范围 [-128, 127] |
| 12 | TINYINT UNSIGNED | 1 | 无符号单字节整型,范围 [0, 255] |
| 13 | BOOL | 1 | 布尔型,{true, false} |
| 14 | NCHAR | 自定义 | 记录包含多字节字符在内的字符串,如中文字符。每个 nchar 字符占用 4 bytes 的存储空间。字符串两端使用单引号引用,字符串内的单引号需用转义字符 `\`。nchar 使用时须指定字符串大小,类型为 nchar(10) 的列表示此列的字符串最多存储 10 个 nchar 字符,会固定占用 40 bytes 的空间。如果用户字符串长度超出声明长度,将会报错。 |
| 15 | JSON | | json 数据类型, 只有 tag 可以是 json 格式 |
| 16 | VARCHAR | 自定义 | BINARY类型的别名 |
:::tip
TDengine 对 SQL 语句中的英文字符不区分大小写,自动转化为小写执行。因此用户大小写敏感的字符串及密码,需要使用单引号将字符串引起来。
:::
:::note
虽然 BINARY 类型在底层存储上支持字节型的二进制字符,但不同编程语言对二进制数据的处理方式并不保证一致,因此建议在 BINARY 类型中只存储 ASCII 可见字符,而避免存储不可见字符。多字节的数据,例如中文字符,则需要使用 NCHAR 类型进行保存。如果强行使用 BINARY 类型保存中文字符,虽然有时也能正常读写,但并不带有字符集信息,很容易出现数据乱码甚至数据损坏等情况。
- TDengine 对 SQL 语句中的英文字符不区分大小写,自动转化为小写执行。因此用户大小写敏感的字符串及密码,需要使用单引号将字符串引起来。
- 虽然 BINARY 类型在底层存储上支持字节型的二进制字符,但不同编程语言对二进制数据的处理方式并不保证一致,因此建议在 BINARY 类型中只存储 ASCII 可见字符,而避免存储不可见字符。多字节的数据,例如中文字符,则需要使用 NCHAR 类型进行保存。如果强行使用 BINARY 类型保存中文字符,虽然有时也能正常读写,但并不带有字符集信息,很容易出现数据乱码甚至数据损坏等情况。
- BINARY 类型理论上最长可以有 16374 字节。binary 仅支持字符串输入,字符串两端需使用单引号引用。使用时须指定大小,如 binary(20) 定义了最长为 20 个单字节字符的字符串,每个字符占 1 byte 的存储空间,总共固定占用 20 bytes 的空间,此时如果用户字符串超出 20 字节将会报错。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示,即 `\`
- SQL 语句中的数值类型将依据是否存在小数点或使用科学计数法表示来判断数值类型是否为整型或者浮点型因此在使用时要注意相应类型越界的情况。例如9999999999999999999 会认为超过长整型的上边界而溢出,而 9999999999999999999.0 会被认为是有效的浮点数。
:::
## 常量
TDengine支持多个类型的常量细节如下表
| # | **语法** | **类型** | **说明** |
| --- | :-------: | --------- | -------------------------------------- |
| 1 | [{+ \| -}]123 | BIGINT | 整型数值的字面量的类型均为BIGINT。如果用户输入超过了BIGINT的表示范围TDengine 按BIGINT对数值进行截断。|
| 2 | 123.45 | DOUBLE | 浮点数值的字面量的类型均为DOUBLE。TDengine依据是否存在小数点或使用科学计数法表示来判断数值类型是否为整型或者浮点型。|
| 3 | 1.2E3 | DOUBLE | 科学计数法的字面量的类型为DOUBLE。|
| 4 | 'abc' | BINARY | 单引号括住的内容为字符串字面值其类型为BINARYBINARY的size为实际的字符个数。对于字符串内的单引号可以用转义字符反斜线加单引号来表示即 \'。|
| 5 | "abc" | BINARY | 双引号括住的内容为字符串字面值其类型为BINARYBINARY的size为实际的字符个数。对于字符串内的双引号可以用转义字符反斜线加单引号来表示即 \"。 |
| 6 | TIMESTAMP {'literal' \| "literal"} | TIMESTAMP | TIMESTAMP关键字表示后面的字符串字面量需要被解释为TIMESTAMP类型。字符串需要满足YYYY-MM-DD HH:mm:ss.MS格式其时间分辨率为当前数据库的时间分辨率。 |
| 7 | {TRUE \| FALSE} | BOOL | 布尔类型字面量。 |
| 8 | {'' \| "" \| '\t' \| "\t" \| ' ' \| " " \| NULL } | -- | 空值字面量。可以用于任意类型。|
:::note
SQL 语句中的数值类型将依据是否存在小数点或使用科学计数法表示来判断数值类型是否为整型或者浮点型因此在使用时要注意相应类型越界的情况。例如9999999999999999999 会认为超过长整型的上边界而溢出,而 9999999999999999999.0 会被认为是有效的浮点数。
- TDengine依据是否存在小数点或使用科学计数法表示来判断数值类型是否为整型或者浮点型因此在使用时要注意相应类型越界的情况。例如9999999999999999999会认为超过长整型的上边界而溢出而9999999999999999999.0会被认为是有效的浮点数。
:::

View File

@ -321,6 +321,32 @@ taos> SELECT HISTOGRAM(voltage, 'log_bin', '{"start": 1, "factor": 3, "count": 3
{"lower_bin":27, "upper_bin":inf, "count":1} |
```
### ELAPSED
```mysql
SELECT ELAPSED(field_name[, time_unit]) FROM { tb_name | stb_name } [WHERE clause] [INTERVAL(interval [, offset]) [SLIDING sliding]];
```
**功能说明**elapsed函数表达了统计周期内连续的时间长度和twa函数配合使用可以计算统计曲线下的面积。在通过INTERVAL子句指定窗口的情况下统计在给定时间范围内的每个窗口内有数据覆盖的时间范围如果没有INTERVAL子句则返回整个给定时间范围内的有数据覆盖的时间范围。注意ELAPSED返回的并不是时间范围的绝对值而是绝对值除以time_unit所得到的单位个数。
**返回结果类型**Double
**应用字段**Timestamp类型
**支持的版本**2.6.0.0 及以后的版本。
**适用于**: 表,超级表,嵌套查询的外层查询
**说明**
- field_name参数只能是表的第一列即timestamp主键列。
- 按time_unit参数指定的时间单位返回最小是数据库的时间分辨率。time_unit参数未指定时以数据库的时间分辨率为时间单位。
- 可以和interval组合使用返回每个时间窗口的时间戳差值。需要特别注意的是除第一个时间窗口和最后一个时间窗口外中间窗口的时间戳差值均为窗口长度。
- order by asc/desc不影响差值的计算结果。
- 对于超级表需要和group by tbname子句组合使用不可以直接使用。
- 对于普通表不支持和group by子句组合使用。
- 对于嵌套查询仅当内层查询会输出隐式时间戳列时有效。例如select elapsed(ts) from (select diff(value) from sub1)语句diff函数会让内层查询输出隐式时间戳列此为主键列可以用于elapsed函数的第一个参数。相反例如select elapsed(ts) from (select * from sub1) 语句ts列输出到外层时已经没有了主键列的含义无法使用elapsed函数。此外elapsed函数作为一个与时间线强依赖的函数形如select elapsed(ts) from (select diff(value) from st group by tbname)尽管会返回一条计算结果,但并无实际意义,这种用法后续也将被限制。
- 不支持与leastsquares、diff、derivative、top、bottom、last_row、interp等函数混合使用。
## 选择函数
在使用所有的选择函数的时候,可以同时指定输出 ts 列或标签列(包括 tbname这样就可以方便地知道被选出的值是源于哪个数据行的。
@ -1438,35 +1464,6 @@ SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause];
- 该函数适用于内层查询和外层查询。
- 版本2.6.0.x后支持
### 四则运算
```
SELECT field_name [+|-|*|/|%][Value|field_name] FROM { tb_name | stb_name } [WHERE clause];
```
**功能说明**:统计表/超级表中某列或多列间的值加、减、乘、除、取余计算结果。
**返回数据类型**:双精度浮点数。
**应用字段**:不能应用在 timestamp、binary、nchar、bool 类型字段。
**适用于**:表、超级表。
**使用说明**
- 支持两列或多列之间进行计算,可使用括号控制计算优先级;
- NULL 字段不参与计算,如果参与计算的某行中包含 NULL该行的计算结果为 NULL。
```
taos> SELECT current + voltage * phase FROM d1001;
(current+(voltage*phase)) |
============================
78.190000713 |
84.540003240 |
80.810000718 |
Query OK, 3 row(s) in set (0.001046s)
```
### STATECOUNT
```

View File

@ -85,3 +85,44 @@ title: TDengine 参数限制与保留关键字
| CONNECTIONS | HAVING | NOT | SOFFSET | VNODES |
| CONNS | ID | NOTNULL | STABLE | WAL |
| COPY | IF | NOW | STABLES | WHERE |
| _C0 | _QSTART | _QSTOP | _QDURATION | _WSTART |
| _WSTOP | _WDURATION | _ROWTS |
## 特殊说明
### TBNAME
`TBNAME` 可以视为超级表中一个特殊的标签,代表子表的表名。
获取一个超级表所有的子表名及相关的标签信息:
```mysql
SELECT TBNAME, location FROM meters;
统计超级表下辖子表数量:
```mysql
SELECT COUNT(TBNAME) FROM meters;
```
以上两个查询均只支持在WHERE条件子句中添加针对标签TAGS的过滤条件。例如
```mysql
taos> SELECT TBNAME, location FROM meters;
tbname | location |
==================================================================
d1004 | California.SanFrancisco |
d1003 | California.SanFrancisco |
d1002 | California.LosAngeles |
d1001 | California.LosAngeles |
Query OK, 4 row(s) in set (0.000881s)
taos> SELECT COUNT(tbname) FROM meters WHERE groupId > 2;
count(tbname) |
========================
2 |
Query OK, 1 row(s) in set (0.001091s)
```
### _QSTART/_QSTOP/_QDURATION
表示查询过滤窗口的起始,结束以及持续时间。
### _WSTART/_WSTOP/_WDURATION
窗口切分聚合查询(例如 interval/session window/state window中表示每个切分窗口的起始结束以及持续时间。
### _c0/_ROWTS
_c0 _ROWTS 等价,表示表或超级表的第一列

View File

@ -1 +0,0 @@
label: 参数限制与保留关键字

View File

@ -0,0 +1,66 @@
---
sidebar_label: 运算符
title: 运算符
---
## 算术运算符
| # | **运算符** | **支持的类型** | **说明** |
| --- | :--------: | -------------- | -------------------------- |
| 1 | +, - | 数值类型 | 表达正数和负数,一元运算符 |
| 2 | +, - | 数值类型 | 表示加法和减法,二元运算符 |
| 3 | \*, / | 数值类型 | 表示乘法和除法,二元运算符 |
| 4 | % | 数值类型 | 表示取余运算,二元运算符 |
## 位运算符
| # | **运算符** | **支持的类型** | **说明** |
| --- | :--------: | -------------- | ------------------ |
| 1 | & | 数值类型 | 按位与,二元运算符 |
| 2 | \| | 数值类型 | 按位或,二元运算符 |
## JSON 运算符
`->` 运算符可以对 JSON 类型的列按键取值。`->` 左侧是列标识符,右侧是键的字符串常量,如 `col->'name'`,返回键 `'name'` 的值。
## 集合运算符
集合运算符将两个查询的结果合并为一个结果。包含集合运算符的查询称之为复合查询。复合查询中每条查询的选择列表中的相应表达式在数量上必须匹配,且结果类型以第一条查询为准,后续查询的结果类型必须可转换到第一条查询的结果类型,转换规则同 CAST 函数。
TDengine 支持 `UNION ALL``UNION` 操作符。UNION ALL 将查询返回的结果集合并返回并不去重。UNION 将查询返回的结果集合并并去重后返回。在同一个 SQL 语句中,集合操作符最多支持 100 个。
## 比较运算符
| # | **运算符** | **支持的类型** | **说明** |
| --- | :---------------: | -------------------------------------------------------------------- | -------------------- |
| 1 | = | 除 BLOB、MEDIUMBLOB 和 JSON 外的所有类型 | 相等 |
| 2 | <\>, != | 除 BLOB、MEDIUMBLOB 和 JSON 外的所有类型,且不可以为表的时间戳主键列 | 不相等 |
| 3 | \>, \< | 除 BLOB、MEDIUMBLOB 和 JSON 外的所有类型 | 大于,小于 |
| 4 | \>=, \<= | 除 BLOB、MEDIUMBLOB 和 JSON 外的所有类型 | 大于等于,小于等于 |
| 5 | IS [NOT] NULL | 所有类型 | 是否为空值 |
| 6 | [NOT] BETWEEN AND | 除 BOOL、BLOB、MEDIUMBLOB 和 JSON 外的所有类型 | 闭区间比较 |
| 7 | IN | 除 BLOB、MEDIUMBLOB 和 JSON 外的所有类型,且不可以为表的时间戳主键列 | 与列表内的任意值相等 |
| 8 | LIKE | BINARY、NCHAR 和 VARCHAR | 通配符匹配 |
| 9 | MATCH, NMATCH | BINARY、NCHAR 和 VARCHAR | 正则表达式匹配 |
| 10 | CONTAINS | JSON | JSON 中是否存在某键 |
LIKE 条件使用通配符字符串进行匹配检查,规则如下:
- '%'(百分号)匹配 0 到任意个字符;'\_'(下划线)匹配单个任意 ASCII 字符。
- 如果希望匹配字符串中原本就带有的 \_下划线字符那么可以在通配符字符串中写作 \_即加一个反斜线来进行转义。
- 通配符字符串最长不能超过 100 字节。不建议使用太长的通配符字符串,否则将有可能严重影响 LIKE 操作的执行性能。
MATCH 条件和 NMATCH 条件使用正则表达式进行匹配,规则如下:
- 支持符合 POSIX 规范的正则表达式,具体规范内容可参见 Regular Expressions。
- 只能针对子表名(即 tbname、字符串类型的标签值进行正则表达式过滤不支持普通列的过滤。
- 正则匹配字符串长度不能超过 128 字节。可以通过参数 maxRegexStringLen 设置和调整最大允许的正则匹配字符串,该参数是客户端配置参数,需要重启客户端才能生效
## 逻辑运算符
| # | **运算符** | **支持的类型** | **说明** |
| --- | :--------: | -------------- | --------------------------------------------------------------------------- |
| 1 | AND | BOOL | 逻辑与,如果两个条件均为 TRUE 则返回 TRUE。如果任一为 FALSE则返回 FALSE |
| 2 | OR | BOOL | 逻辑或,如果任一条件为 TRUE 则返回 TRUE。如果两者都是 FALSE则返回 FALSE |
TDengine 在计算逻辑条件时,会进行短路径优化,即对于 AND第一个条件为 FALSE则不再计算第二个条件直接返回 FALSE对于 OR第一个条件为 TRUE则不再计算第二个条件直接返回 TRUE。

View File

@ -38,7 +38,7 @@ taosdump 有两种安装方式:
:::tip
- taosdump 1.4.1 之后的版本提供 `-I` 参数,用于解析 avro 文件 schema 和数据,如果指定 `-s` 参数将只解析 schema。
- taosdump 1.4.2 之后的备份使用 `-B` 参数指定的批次数,默认值为 16384如果在某些环境下由于网络速度或磁盘性能不足导致 "Error actual dump .. batch .." 可以通过 `-B` 参数挑战为更小的值进行尝试。
- taosdump 1.4.2 之后的备份使用 `-B` 参数指定的批次数,默认值为 16384如果在某些环境下由于网络速度或磁盘性能不足导致 "Error actual dump .. batch .." 可以通过 `-B` 参数调整为更小的值进行尝试。
:::

View File

@ -7,7 +7,7 @@ TDengine Kafka Connector 包含两个插件: TDengine Source Connector 和 TDeng
## 什么是 Kafka Connect
Kafka Connect 是 Apache Kafka 的一个组件,用于使其它系统,比如数据库、云服务、文件系统等能方便地连接到 Kafka。数据既可以通过 Kafka Connect 从其它系统流向 Kafka, 也可以通过 Kafka Connect 从 Kafka 流向其它系统。从其它系统读数据的插件称为 Source Connector, 写数据到其它系统的插件称为 Sink Connector。Source Connector 和 Sink Connector 都不会直接连接 Kafka BrokerSource Connector 把数据转交给 Kafka Connect。Sink Connector 从 Kafka Connect 接收数据。
Kafka Connect 是 [Apache Kafka](https://kafka.apache.org/) 的一个组件,用于使其它系统,比如数据库、云服务、文件系统等能方便地连接到 Kafka。数据既可以通过 Kafka Connect 从其它系统流向 Kafka, 也可以通过 Kafka Connect 从 Kafka 流向其它系统。从其它系统读数据的插件称为 Source Connector, 写数据到其它系统的插件称为 Sink Connector。Source Connector 和 Sink Connector 都不会直接连接 Kafka BrokerSource Connector 把数据转交给 Kafka Connect。Sink Connector 从 Kafka Connect 接收数据。
![TDengine Database Kafka Connector -- Kafka Connect structure](kafka/Kafka_Connect.webp)
@ -17,7 +17,7 @@ TDengine Source Connector 用于把数据实时地从 TDengine 读出来发送
## 什么是 Confluent
Confluent 在 Kafka 的基础上增加很多扩展功能。包括:
[Confluent](https://www.confluent.io/) 在 Kafka 的基础上增加很多扩展功能。包括:
1. Schema Registry
2. REST 代理
@ -81,10 +81,10 @@ Development: false
git clone https://github.com:taosdata/kafka-connect-tdengine.git
cd kafka-connect-tdengine
mvn clean package
unzip -d $CONFLUENT_HOME/share/confluent-hub-components/ target/components/packages/taosdata-kafka-connect-tdengine-0.1.0.zip
unzip -d $CONFLUENT_HOME/share/java/ target/components/packages/taosdata-kafka-connect-tdengine-*.zip
```
以上脚本先 clone 项目源码,然后用 Maven 编译打包。打包完成后在 `target/components/packages/` 目录生成了插件的 zip 包。把这个 zip 包解压到安装插件的路径即可。安装插件的路径在配置文件 `$CONFLUENT_HOME/etc/kafka/connect-standalone.properties` 中。默认的路径为 `$CONFLUENT_HOME/share/confluent-hub-components/`。
以上脚本先 clone 项目源码,然后用 Maven 编译打包。打包完成后在 `target/components/packages/` 目录生成了插件的 zip 包。把这个 zip 包解压到安装插件的路径即可。上面的示例中使用了内置的插件安装路径: `$CONFLUENT_HOME/share/java/`。
### 用 confluent-hub 安装
@ -98,7 +98,7 @@ confluent local services start
```
:::note
一定要先安装插件再启动 Confluent, 否则会出现找不到类的错误。Kafka Connect 的日志(默认路径: /tmp/confluent.xxxx/connect/logs/connect.log中会输出成功安装的插件据此可判断插件是否安装成功
一定要先安装插件再启动 Confluent, 否则加载插件会失败
:::
:::tip
@ -125,6 +125,61 @@ Control Center is [UP]
清空数据可执行 `rm -rf /tmp/confluent.106668`
:::
### 验证各个组件是否启动成功
输入命令:
```
confluent local services status
```
如果各组件都启动成功,会得到如下输出:
```
Connect is [UP]
Control Center is [UP]
Kafka is [UP]
Kafka REST is [UP]
ksqlDB Server is [UP]
Schema Registry is [UP]
ZooKeeper is [UP]
```
### 验证插件是否安装成功
在 Kafka Connect 组件完全启动后,可用以下命令列出成功加载的插件:
```
confluent local services connect plugin list
```
如果成功安装,会输出如下:
```txt {4,9}
Available Connect Plugins:
[
{
"class": "com.taosdata.kafka.connect.sink.TDengineSinkConnector",
"type": "sink",
"version": "1.0.0"
},
{
"class": "com.taosdata.kafka.connect.source.TDengineSourceConnector",
"type": "source",
"version": "1.0.0"
},
......
```
如果插件安装失败,请检查 Kafka Connect 的启动日志是否有异常信息,用以下命令输出日志路径:
```
echo `cat /tmp/confluent.current`/connect/connect.stdout
```
该命令的输出类似: `/tmp/confluent.104086/connect/connect.stdout`
与日志文件 `connect.stdout` 同一目录,还有一个文件名为: `connect.properties`。在这个文件的末尾,可以看到最终生效的 `plugin.path` 它是一系列用逗号分割的路径。如果插件安装失败,很可能是因为实际的安装路径不包含在 `plugin.path` 中。
## TDengine Sink Connector 的使用
TDengine Sink Connector 的作用是同步指定 topic 的数据到 TDengine。用户无需提前创建数据库和超级表。可手动指定目标数据库的名字见配置参数 connection.database 也可按一定规则生成(见配置参数 connection.database.prefix)。
@ -144,7 +199,7 @@ vi sink-demo.properties
sink-demo.properties 内容如下:
```ini title="sink-demo.properties"
name=tdengine-sink-demo
name=TDengineSinkConnector
connector.class=com.taosdata.kafka.connect.sink.TDengineSinkConnector
tasks.max=1
topics=meters
@ -153,6 +208,7 @@ connection.user=root
connection.password=taosdata
connection.database=power
db.schemaless=line
data.precision=ns
key.converter=org.apache.kafka.connect.storage.StringConverter
value.converter=org.apache.kafka.connect.storage.StringConverter
```
@ -179,6 +235,7 @@ confluent local services connect connector load TDengineSinkConnector --config .
"connection.url": "jdbc:TAOS://127.0.0.1:6030",
"connection.user": "root",
"connector.class": "com.taosdata.kafka.connect.sink.TDengineSinkConnector",
"data.precision": "ns",
"db.schemaless": "line",
"key.converter": "org.apache.kafka.connect.storage.StringConverter",
"tasks.max": "1",
@ -223,10 +280,10 @@ Database changed.
taos> select * from meters;
ts | current | voltage | phase | groupid | location |
===============================================================================================================================================================
2022-03-28 09:56:51.249000000 | 11.800000000 | 221.000000000 | 0.280000000 | 2 | California.LosAngeles |
2022-03-28 09:56:51.250000000 | 13.400000000 | 223.000000000 | 0.290000000 | 2 | California.LosAngeles |
2022-03-28 09:56:51.249000000 | 10.800000000 | 223.000000000 | 0.290000000 | 3 | California.LosAngeles |
2022-03-28 09:56:51.250000000 | 11.300000000 | 221.000000000 | 0.350000000 | 3 | California.LosAngeles |
2022-03-28 09:56:51.249000000 | 11.800000000 | 221.000000000 | 0.280000000 | 2 | California.LosAngeles |
2022-03-28 09:56:51.250000000 | 13.400000000 | 223.000000000 | 0.290000000 | 2 | California.LosAngeles |
2022-03-28 09:56:51.249000000 | 10.800000000 | 223.000000000 | 0.290000000 | 3 | California.LosAngeles |
2022-03-28 09:56:51.250000000 | 11.300000000 | 221.000000000 | 0.350000000 | 3 | California.LosAngeles |
Query OK, 4 row(s) in set (0.004208s)
```
@ -356,21 +413,33 @@ confluent local services connect connector unload TDengineSourceConnector
2. `connection.database.prefix` 当 connection.database 为 null 时, 目标数据库的前缀。可以包含占位符 '${topic}'。 比如 kafka_${topic}, 对于主题 'orders' 将写入数据库 'kafka_orders'。 默认 null。当为 null 时,目标数据库的名字和主题的名字是一致的。
3. `batch.size`: 分批写入每批记录数。当 Sink Connector 一次接收到的数据大于这个值时将分批写入。
4. `max.retries`: 发生错误时的最大重试次数。默认为 1。
5. `retry.backoff.ms`: 发送错误时重试的时间间隔。单位毫秒,默认 3000。
6. `db.schemaless`: 数据格式,必须指定为: line、json、telnet 中的一个。分别代表 InfluxDB 行协议格式、 OpenTSDB JSON 格式、 OpenTSDB Telnet 行协议格式。
5. `retry.backoff.ms`: 发送错误时重试的时间间隔。单位毫秒,默认为 3000。
6. `db.schemaless`: 数据格式,可选值为:
1. line :代表 InfluxDB 行协议格式
2. json : 代表 OpenTSDB JSON 格式
3. telnet :代表 OpenTSDB Telnet 行协议格式
7. `data.precision`: 使用 InfluxDB 行协议格式时,时间戳的精度。可选值为:
1. ms 表示毫秒
2. us 表示微秒
3. ns 表示纳秒。默认为纳秒。
### TDengine Source Connector 特有的配置
1. `connection.database`: 源数据库名称,无缺省值。
2. `topic.prefix` 数据导入 kafka 后 topic 名称前缀。 使用 `topic.prefix` + `connection.database` 名称作为完整 topic 名。默认为空字符串 ""。
3. `timestamp.initial`: 数据同步起始时间。格式为'yyyy-MM-dd HH:mm:ss'。默认 "1970-01-01 00:00:00"。
4. `poll.interval.ms`: 拉取数据间隔,单位为 ms。默认 1000。
3. `timestamp.initial`: 数据同步起始时间。格式为'yyyy-MM-dd HH:mm:ss'。默认 "1970-01-01 00:00:00"。
4. `poll.interval.ms`: 拉取数据间隔,单位为 ms。默认 1000。
5. `fetch.max.rows` : 检索数据库时最大检索条数。 默认为 100。
6. `out.format`: 数据格式。取值 line 或 json。line 表示 InfluxDB Line 协议格式, json 表示 OpenTSDB JSON 格式。默认 line。
6. `out.format`: 数据格式。取值 line 或 json。line 表示 InfluxDB Line 协议格式, json 表示 OpenTSDB JSON 格式。默认为 line。
## 其他说明
1. 插件的安装位置可以自定义请参考官方文档https://docs.confluent.io/home/connect/self-managed/install.html#install-connector-manually。
2. 本教程的示例程序使用了 Confluent 平台,但是 TDengine Kafka Connector 本身同样适用于独立安装的 Kafka, 且配置方法相同。关于如何在独立安装的 Kafka 环境使用 Kafka Connect 插件, 请参考官方文档: https://kafka.apache.org/documentation/#connect。
## 问题反馈
https://github.com/taosdata/kafka-connect-tdengine/issues
无论遇到任何问题,都欢迎在本项目的 Github 仓库反馈: https://github.com/taosdata/kafka-connect-tdengine/issues
## 参考

View File

@ -12,6 +12,6 @@ Between two major release versions, some beta versions may be delivered for user
For the details please refer to [Install and Uninstall](/operation/pkg-install)。
To see the details of versions, please refer to [Download List](https://www.taosdata.com/all-downloads) and [Release Notes](https://github.com/taosdata/TDengine/releases).
To see the details of versions, please refer to [Download List](https://tdengine.com/all-downloads) and [Release Notes](https://github.com/taosdata/TDengine/releases).

View File

@ -3,6 +3,8 @@ title: Data Types
description: "TDengine supports a variety of data types including timestamp, float, JSON and many others."
---
## TIMESTAMP
When using TDengine to store and query data, the most important part of the data is timestamp. Timestamp must be specified when creating and inserting data rows. Timestamp must follow the rules below:
- The format must be `YYYY-MM-DD HH:mm:ss.MS`, the default time precision is millisecond (ms), for example `2017-08-12 18:25:58.128`
@ -17,33 +19,51 @@ Time precision in TDengine can be set by the `PRECISION` parameter when executin
CREATE DATABASE db_name PRECISION 'ns';
```
## Data Types
In TDengine, the data types below can be used when specifying a column or tag.
| # | **type** | **Bytes** | **Description** |
| --- | :-------: | --------- | ------------------------- |
| 1 | TIMESTAMP | 8 | Default precision is millisecond, microsecond and nanosecond are also supported |
| 2 | INT | 4 | Integer, the value range is [-2^31+1, 2^31-1], while -2^31 is treated as NULL |
| 3 | BIGINT | 8 | Long integer, the value range is [-2^63+1, 2^63-1], while -2^63 is treated as NULL |
| 4 | FLOAT | 4 | Floating point number, the effective number of digits is 6-7, the value range is [-3.4E38, 3.4E38] |
| 5 | DOUBLE | 8 | Double precision floating point number, the effective number of digits is 15-16, the value range is [-1.7E308, 1.7E308] |
| 6 | BINARY | User Defined | Single-byte string for ASCII visible characters. Length must be specified when defining a column or tag of binary type. The string length can be up to 16374 bytes. The string value must be quoted with single quotes. The literal single quote inside the string must be preceded with back slash like `\'` |
| 7 | SMALLINT | 2 | Short integer, the value range is [-32767, 32767], while -32768 is treated as NULL |
| 8 | TINYINT | 1 | Single-byte integer, the value range is [-127, 127], while -128 is treated as NULL |
| 9 | BOOL | 1 | Bool, the value range is {true, false} |
| 10 | NCHAR | User Defined| Multi-Byte string that can include multi byte characters like Chinese characters. Each character of NCHAR type consumes 4 bytes storage. The string value should be quoted with single quotes. Literal single quote inside the string must be preceded with backslash, like `\`. The length must be specified when defining a column or tag of NCHAR type, for example nchar(10) means it can store at most 10 characters of nchar type and will consume fixed storage of 40 bytes. An error will be reported if the string value exceeds the length defined. |
| 11 | JSON | | JSON type can only be used on tags. A tag of json type is excluded with any other tags of any other type |
:::tip
TDengine is case insensitive and treats any characters in the sql command as lower case by default, case sensitive strings must be quoted with single quotes.
:::
| 2 | INT | 4 | Integer, the value range is [-2^31, 2^31-1] |
| 3 |INT UNSIGNED|4 | Unsigned integer, the value range is [0, 2^31-1] |
| 4 | BIGINT | 8 | Long integer, the value range is [-2^63, 2^63-1] |
| 5 | BIGINT UNSIGNED | 8 | Unsigned long integer, the value range is [0, 2^63-1] |
| 6 | FLOAT | 4 | Floating point number, the effective number of digits is 6-7, the value range is [-3.4E38, 3.4E38] |
| 7 | DOUBLE | 8 | Double precision floating point number, the effective number of digits is 15-16, the value range is [-1.7E308, 1.7E308] |
| 8 | BINARY | User Defined | Single-byte string for ASCII visible characters. Length must be specified when defining a column or tag of binary type. The string length can be up to 16374 bytes. The string value must be quoted with single quotes. The literal single quote inside the string must be preceded with back slash like `\'` |
| 9 | SMALLINT | 2 | Short integer, the value range is [-32768, 32767] |
| 10 | SMALLINT UNSIGNED | 2 | Unsigned short integer, the value range is [0, 32767] |
| 11 | TINYINT | 1 | Single-byte integer, the value range is [-128, 127] |
| 12 | TINYINT UNSIGNED | 1 | Unsigned single-byte integer, the value range is [0, 127] |
| 13 | BOOL | 1 | Bool, the value range is {true, false} |
| 14 | NCHAR | User Defined| Multi-Byte string that can include multi byte characters like Chinese characters. Each character of NCHAR type consumes 4 bytes storage. The string value should be quoted with single quotes. Literal single quote inside the string must be preceded with backslash, like `\`. The length must be specified when defining a column or tag of NCHAR type, for example nchar(10) means it can store at most 10 characters of nchar type and will consume fixed storage of 40 bytes. An error will be reported if the string value exceeds the length defined. |
| 15 | JSON | | JSON type can only be used on tags. A tag of json type is excluded with any other tags of any other type |
| 16 | VARCHAR | User Defined| Alias of BINARY type |
:::note
Only ASCII visible characters are suggested to be used in a column or tag of BINARY type. Multi-byte characters must be stored in NCHAR type.
- TDengine is case insensitive and treats any characters in the sql command as lower case by default, case sensitive strings must be quoted with single quotes.
- Only ASCII visible characters are suggested to be used in a column or tag of BINARY type. Multi-byte characters must be stored in NCHAR type.
- Numeric values in SQL statements will be determined as integer or float type according to whether there is decimal point or whether scientific notation is used, so attention must be paid to avoid overflow. For example, 9999999999999999999 will be considered as overflow because it exceeds the upper limit of long integer, but 9999999999999999999.0 will be considered as a legal float number.
:::
## Constants
TDengine supports constants of multiple data type.
| # | **Syntax** | **Type** | **Description** |
| --- | :-------: | --------- | -------------------------------------- |
| 1 | [{+ \| -}]123 | BIGINT | Numeric constants are treated as BIGINT type. The value will be truncated if it exceeds the range of BIGINT type. |
| 2 | 123.45 | DOUBLE | Floating number constants are treated as DOUBLE type. TDengine determines whether it's a floating number based on if decimal point or scientific notation is used. |
| 3 | 1.2E3 | DOUBLE | Constants in scientific notation are treated ad DOUBLE type. |
| 4 | 'abc' | BINARY | String constants enclosed by single quotes are treated as BINARY type. Its size is determined as the acutal length. Single quote itself can be included by preceding backslash, i.e. `\'`, in a string constant. |
| 5 | "abc" | BINARY | String constants enclosed by double quotes are treated as BINARY type. Its size is determined as the acutal length. Double quote itself can be included by preceding backslash, i.e. `\"`, in a string constant. |
| 6 | TIMESTAMP {'literal' \| "literal"} | TIMESTAMP | A string constant following `TIMESTAMP` keyword is treated as TIMESTAMP type. The string should be in the format of "YYYY-MM-DD HH:mm:ss.MS". Its time precision is same as that of the current database being used. |
| 7 | {TRUE \| FALSE} | BOOL | BOOL type contant. |
| 8 | {'' \| "" \| '\t' \| "\t" \| ' ' \| " " \| NULL } | -- | NULL constant, it can be used for any type.|
:::note
Numeric values in SQL statements will be determined as integer or float type according to whether there is decimal point or whether scientific notation is used, so attention must be paid to avoid overflow. For example, 9999999999999999999 will be considered as overflow because it exceeds the upper limit of long integer, but 9999999999999999999.0 will be considered as a legal float number.
- TDengine determines whether it's a floating number based on if decimal point or scientific notation is used. So whether the value is determined as overflow depends on both the value and the determined type. For example, 9999999999999999999 is determined as overflow because it exceeds the upper limit of BIGINT type, while 9999999999999999999.0 is considered as a valid floating number because it is within the range of DOUBLE type.
:::

View File

@ -327,6 +327,32 @@ taos> SELECT HISTOGRAM(voltage, 'log_bin', '{"start": 1, "factor": 3, "count": 3
{"lower_bin":27, "upper_bin":inf, "count":1} |
```
### ELAPSED
```mysql
SELECT ELAPSED(field_name[, time_unit]) FROM { tb_name | stb_name } [WHERE clause] [INTERVAL(interval [, offset]) [SLIDING sliding]];
```
**Description**`elapsed` function can be used to calculate the continuous time length in which there is valid data. If it's used with `INTERVAL` clause, the returned result is the calcualted time length within each time window. If it's used without `INTERVAL` caluse, the returned result is the calculated time length within the specified time range. Please be noted that the return value of `elapsed` is the number of `time_unit` in the calculated time length.
**Return value type**Double
**Applicable Column type**Timestamp
**Applicable versions**Sicne version 2.6.0.0
**Applicable tables**: table, STable, outter in nested query
**Explanations**
- `field_name` parameter can only be the first column of a table, i.e. timestamp primary key.
- The minimum value of `time_unit` is the time precision of the database. If `time_unit` is not specified, the time precision of the database is used as the default ime unit.
- It can be used with `INTERVAL` to get the time valid time length of each time window. Please be noted that the return value is same as the time window for all time windows except for the first and the last time window.
- `order by asc/desc` has no effect on the result.
- `group by tbname` must be used together when `elapsed` is used against a STable.
- `group by` must NOT be used together when `elapsed` is used against a table or sub table.
- When used in nested query, it's only applicable when the inner query outputs an implicit timestamp column as the primary key. For example, `select elapsed(ts) from (select diff(value) from sub1)` is legal usage while `select elapsed(ts) from (select * from sub1)` is not.
- It can't be used with `leastsquares`, `diff`, `derivative`, `top`, `bottom`, `last_row`, `interp`.
## Selection Functions
When any select function is used, timestamp column or tag columns including `tbname` can be specified to show that the selected value are from which rows.
@ -1511,37 +1537,6 @@ SELECT SUBSTR(str,pos[,len]) FROM { tb_name | stb_name } [WHERE clause]
- Parameter `pos` can be an positive or negative integer; If it's positive, the starting position will be counted from the beginning of the string; if it's negative, the starting position will be counted from the end of the string.
- If `len` is not specified, it means from `pos` to the end.
### Arithmetic Operations
```
SELECT field_name [+|-|*|/|%][Value|field_name] FROM { tb_name | stb_name } [WHERE clause];
```
**Description**: The sum, difference, product, quotient, or remainder between one or more columns
**Return value type**: Double precision floating point
**Applicable column types**: Data types except for timestamp, binary, nchar, bool
**Applicable table types**: table, STable
**More explanations**:
- Arithmetic operations can be performed on two or more columns, Parentheses `()` can be used to control the order of precedence.
- NULL doesn't participate in the operation i.e. if one of the operands is NULL then result is NULL.
**Examples**:
```
taos> SELECT current + voltage * phase FROM d1001;
(current+(voltage*phase)) |
============================
78.190000713 |
84.540003240 |
80.810000718 |
Query OK, 3 row(s) in set (0.001046s)
```
### STATECOUNT
```

View File

@ -46,3 +46,44 @@ There are about 200 keywords reserved by TDengine, they can't be used as the nam
| CONNECTIONS | HAVING | NOT | SOFFSET | VNODES |
| CONNS | ID | NOTNULL | STable | WAL |
| COPY | IF | NOW | STableS | WHERE |
| _C0 | _QSTART | _QSTOP | _QDURATION | _WSTART |
| _WSTOP | _WDURATION | _ROWTS |
## Explanations
### TBNAME
`TBNAME` can be considered as a special tag, which represents the name of the subtable, in a STable.
Get the table name and tag values of all subtables in a STable.
```mysql
SELECT TBNAME, location FROM meters;
Count the number of subtables in a STable.
```mysql
SELECT COUNT(TBNAME) FROM meters;
```
Only filter on TAGS can be used in WHERE clause in the above two query statements.
```mysql
taos> SELECT TBNAME, location FROM meters;
tbname | location |
==================================================================
d1004 | California.SanFrancisco |
d1003 | California.SanFrancisco |
d1002 | California.LosAngeles |
d1001 | California.LosAngeles |
Query OK, 4 row(s) in set (0.000881s)
taos> SELECT COUNT(tbname) FROM meters WHERE groupId > 2;
count(tbname) |
========================
2 |
Query OK, 1 row(s) in set (0.001091s)
```
### _QSTART/_QSTOP/_QDURATION
The start, stop and duration of a query time window.
### _WSTART/_WSTOP/_WDURATION
The start, stop and duration of aggegate query by time window, like interval, session window, state window.
### _c0/_ROWTS
_c0 is equal to _ROWTS, it means the first column of a table or STable.

View File

@ -0,0 +1,66 @@
---
sidebar_label: Operators
title: Operators
---
## Arithmetic Operators
| # | **Operator** | **Data Types** | **Description** |
| --- | :----------: | -------------- | --------------------------------------------------------- |
| 1 | +, - | Numeric Types | Representing positive or negative numbers, unary operator |
| 2 | +, - | Numeric Types | Addition and substraction, binary operator |
| 3 | \*, / | Numeric Types | Multiplication and division, binary oeprator |
| 4 | % | Numeric Types | Taking the remainder, binary operator |
## Bitwise Operators
| # | **Operator** | **Data Types** | **Description** |
| --- | :----------: | -------------- | ----------------------------- |
| 1 | & | Numeric Types | Bitewise AND, binary operator |
| 2 | \| | Numeric Types | Bitewise OR, binary operator |
## JSON Operator
`->` operator can be used to get the value of a key in a column of JSON type, the left oeprand is the column name, the right operand is a string constant. For example, `col->'name'` returns the value of key `'name'`.
## Set Operator
Set operators are used to combine the results of two queries into single result. A query including set operators is called a combined query. The number of rows in each result in a combined query must be same, and the type is determined by the first query's result, the type of the following queriess result must be able to be converted to the type of the first query's result, the conversion rule is same as `CAST` function.
TDengine provides 2 set operators: `UNION ALL` and `UNION`. `UNION ALL` combines the results without removing duplicate data. `UNION` combines the results and remove duplicate data rows. In single SQL statement, at most 100 set operators can be used.
## Comparsion Operator
| # | **Operator** | **Data Types** | **Description** |
| --- | :---------------: | ------------------------------------------------------------------- | ----------------------------------------------- |
| 1 | = | Except for BLOB, MEDIUMBLOB and JSON | Equal |
| 2 | <\>, != | Except for BLOB, MEDIUMBLOB, JSON and primary key of timestamp type | Not equal |
| 3 | \>, \< | Except for BLOB, MEDIUMBLOB and JSON | Greater than, less than |
| 4 | \>=, \<= | Except for BLOB, MEDIUMBLOB and JSON | Greater than or equal to, less than or equal to |
| 5 | IS [NOT] NULL | Any types | Is NULL or NOT |
| 6 | [NOT] BETWEEN AND | Except for BLOB, MEDIUMBLOB and JSON | In a value range or not |
| 7 | IN | Except for BLOB, MEDIUMBLOB, JSON and primary key of timestamp type | In a list of values or not |
| 8 | LIKE | BINARY, NCHAR and VARCHAR | Wildcard matching |
| 9 | MATCH, NMATCH | BINARY, NCHAR and VARCHAR | Regular expression matching |
| 10 | CONTAINS | JSON | If A key exists in JSON |
`LIKE` operator uses wildcard to match a string, the rules are
- '%' matches 0 to any number of characters; '\_' matches any single ASCII character.
- \_ can be used to match a `_` in the string, i.e. using escape character backslash `\`
- Wildcard string is 100 bytes at most. Longer a wildcard string is, worse the performance of LIKE operator is.
`MATCH` and `NMATCH` operators use regular expressions to match a string, the rules are:
- Regular expressions of POSIX standard are supported.
- Only `tbname`, i.e. table name of sub tables, and tag columns of string types can be matched with regular expression, data columns are not supported.
- Regular expression string is 128 bytes at most, and can be adjusted by setting parameter `maxRegexStringLen`, which is a client side configuration and needs to restart the client to take effect.
## Logical Operators
| # | **Operator** | **Data Types** | **Description** |
| --- | :----------: | -------------- | ---------------------------------------------------------------------------------------- |
| 1 | AND | BOOL | Logical AND, return TRUE if both conditions are TRUE; return FALSE if any one is FALSE. |
| 2 | OR | BOOL | Logical OR, return TRUE if any condition is TRUE; return FALSE if both are FALSE |
TDengine uses shortcircut optimization when performing logical operations. For AND operator, if the first condition is evaluated to FALSE, then the second one is not evaluated. For OR operator, if the first condition is evaluated to TRUE, then the second one is not evaluated.

View File

@ -4,7 +4,7 @@ sidebar_label: C/C++
title: C/C++ Connector
---
C/C++ developers can use TDengine's client driver and the C/C++ connector, to develop their applications to connect to TDengine clusters for data writing, querying, and other functions. To use it, you need to include the TDengine header file _taos.h_, which lists the function prototypes of the provided APIs; the application also needs to link to the corresponding dynamic libraries on the platform where it is located.
C/C++ developers can use TDengine's client driver and the C/C++ connector, to develop their applications to connect to TDengine clusters for data writing, querying, and other functions. To use the C/C++ connector you must include the TDengine header file _taos.h_, which lists the function prototypes of the provided APIs. The application also needs to link to the corresponding dynamic libraries on the platform where it is located.
```c
#include <taos.h>
@ -26,7 +26,7 @@ Please refer to [list of supported platforms](/reference/connector#supported-pla
## Supported versions
The version number of the TDengine client driver and the version number of the TDengine server require one-to-one correspondence and recommend using the same version of client driver as what the TDengine server version is. Although a lower version of the client driver is compatible to work with a higher version of the server, if the first three version numbers are the same (i.e., only the fourth version number is different), but it is not recommended. It is strongly discouraged to use a higher version of the client driver to access a lower version of the TDengine server.
The version number of the TDengine client driver and the version number of the TDengine server should be the same. A lower version of the client driver is compatible with a higher version of the server, if the first three version numbers are the same (i.e., only the fourth version number is different). For e.g. if the client version is x.y.z.1 and the server version is x.y.z.2 the client and server are compatible. But in general we do not recommend using a lower client version with a newer server version. It is also strongly discouraged to use a higher version of the client driver to access a lower version of the TDengine server.
## Installation steps
@ -55,7 +55,7 @@ In the above example code, `taos_connect()` establishes a connection to port 603
:::note
- If not specified, when the return value of the API is an integer, _0_ means success, the others are error codes representing the reason for failure, and when the return value is a pointer, _NULL_ means failure.
- If not specified, when the return value of the API is an integer, _0_ means success. All others are error codes representing the reason for failure. When the return value is a pointer, _NULL_ means failure.
- All error codes and their corresponding causes are described in the `taoserror.h` file.
:::
@ -140,13 +140,12 @@ The base API is used to do things like create database connections and provide a
- `void taos_cleanup()`
Clean up the runtime environment and should be called before the application exits.
Cleans up the runtime environment and should be called before the application exits.
- ` int taos_options(TSDB_OPTION option, const void * arg, ...) `
Set client options, currently supports region setting (`TSDB_OPTION_LOCALE`), character set
(`TSDB_OPTION_CHARSET`), time zone
(`TSDB_OPTION_TIMEZONE`), configuration file path (`TSDB_OPTION_CONFIGDIR`) . The region setting, character set, and time zone default to the current settings of the operating system.
(`TSDB_OPTION_CHARSET`), time zone (`TSDB_OPTION_TIMEZONE`), configuration file path (`TSDB_OPTION_CONFIGDIR`). The region setting, character set, and time zone default to the current settings of the operating system.
- `char *taos_get_client_info()`
@ -159,7 +158,7 @@ The base API is used to do things like create database connections and provide a
- host: FQDN of any node in the TDengine cluster
- user: user name
- pass: password
- db: database name, if the user does not provide, it can also be connected correctly, the user can create a new database through this connection, if the user provides the database name, it means that the database user has already created, the default use of the database
- db: the database name. Even if the user does not provide this, the connection will still work correctly. The user can create a new database through this connection. If the user provides the database name, it means that the database has already been created and the connection can be used for regular operations on the database.
- port: the port the taosd program is listening on
NULL indicates a failure. The application needs to save the returned parameters for subsequent use.
@ -187,7 +186,7 @@ The APIs described in this subsection are all synchronous interfaces. After bein
- `TAOS_RES* taos_query(TAOS *taos, const char *sql)`
Executes an SQL command, either a DQL, DML, or DDL statement. The `taos` parameter is a handle obtained with `taos_connect()`. You can't tell if the result failed by whether the return value is `NULL`, but by parsing the error code in the result set with the `taos_errno()` function.
Executes an SQL command, either a DQL, DML, or DDL statement. The `taos` parameter is a handle obtained with `taos_connect()`. If the return value is `NULL` this does not necessarily indicate a failure. You can get the error code, if any, by parsing the error code in the result set with the `taos_errno()` function.
- `int taos_result_precision(TAOS_RES *res)`
@ -231,7 +230,7 @@ typedef struct taosField {
- ` void taos_free_result(TAOS_RES *res)`
Frees the query result set and the associated resources. Be sure to call this API to free the resources after the query is completed. Otherwise, it may lead to a memory leak in the application. However, note that the application will crash if you call a function like `taos_consume()` to get the query results after freeing the resources.
Frees the query result set and the associated resources. Be sure to call this API to free the resources after the query is completed. Failing to call this, may lead to a memory leak in the application. However, note that the application will crash if you call a function like `taos_consume()` to get the query results after freeing the resources.
- `char *taos_errstr(TAOS_RES *res)`
@ -242,7 +241,7 @@ typedef struct taosField {
Get the reason for the last API call failure. The return value is the error code.
:::note
TDengine version 2.0 and above recommends that each thread of a database application create a separate connection or a connection pool based on threads. It is not recommended to pass the connection (TAOS\*) structure to different threads for shared use in the application. Queries, writes, etc., issued based on TAOS structures are multi-thread safe, but state quantities such as "USE statement" may interfere between threads. In addition, the C connector can dynamically create new database-oriented connections on demand (this procedure is not visible to the user), and it is recommended that `taos_close()` be called only at the final exit of the program to close the connection.
TDengine version 2.0 and above recommends that each thread of a database application create a separate connection or a connection pool based on threads. It is not recommended to pass the connection (TAOS\*) structure to different threads for shared use in the application. Queries, writes, and other operations issued that are based on TAOS structures are multi-thread safe, but state quantities such as the "USE statement" may interfere between threads. In addition, the C connector can dynamically create new database-oriented connections on demand (this procedure is not visible to the user), and it is recommended that `taos_close()` be called only at the final exit of the program to close the connection.
:::
@ -274,12 +273,12 @@ All TDengine's asynchronous APIs use a non-blocking call pattern. Applications c
### Parameter Binding API
In addition to direct calls to `taos_query()` to perform queries, TDengine also provides a set of `bind` APIs that supports parameter binding, similar in style to MySQL, and currently only supports using a question mark `? ` to represent the parameter to be bound.
In addition to direct calls to `taos_query()` to perform queries, TDengine also provides a set of `bind` APIs that supports parameter binding, similar in style to MySQL. TDengine currently only supports using a question mark `? ` to represent the parameter to be bound.
Starting with versions 2.1.1.0 and 2.1.2.0, TDengine has significantly improved the bind APIs to support for data writing (INSERT) scenarios. This avoids the resource consumption of SQL syntax parsing when writing data through the parameter binding interface, thus significantly improving write performance in most cases. A typical operation, in this case, is as follows.
Starting with versions 2.1.1.0 and 2.1.2.0, TDengine has significantly improved the bind APIs to support data writing (INSERT) scenarios. This avoids the resource consumption of SQL syntax parsing when writing data through the parameter binding interface, thus significantly improving write performance in most cases. A typical operation, in this case, is as follows.
1. call `taos_stmt_init()` to create the parameter binding object.
2. call `taos_stmt_prepare()` to parse the INSERT statement. 3.
2. call `taos_stmt_prepare()` to parse the INSERT statement.
3. call `taos_stmt_set_tbname()` to set the table name if it is reserved in the INSERT statement but not the TAGS.
4. call `taos_stmt_set_tbname_tags()` to set the table name and TAGS values if the table name and TAGS are reserved in the INSERT statement (for example, if the INSERT statement takes an automatic table build).
5. call `taos_stmt_bind_param_batch()` to set the value of VALUES in multiple columns, or call `taos_stmt_bind_param()` to set the value of VALUES in a single row.
@ -383,7 +382,7 @@ In addition to writing data using the SQL method or the parameter binding API, w
**return value**
TAOS_RES structure, application can get error message by using `taos_errstr()` and also error code by using `taos_errno()`.
In some cases, the returned TAOS_RES is `NULL`, and it is still possible to call `taos_errno()` to safely get the error code information.
The returned TAOS_RES needs to be freed by the caller. Otherwise, a memory leak will occur.
The returned TAOS_RES needs to be freed by the caller in order to avoid memory leaks.
**Description**
The protocol type is enumerated and contains the following three formats.
@ -416,13 +415,13 @@ The Subscription API currently supports subscribing to one or more tables and co
This function is responsible for starting the subscription service, returning the subscription object on success and `NULL` on failure, with the following parameters.
- taos: the database connection that has been established
- restart: if the subscription already exists, whether to restart or continue the previous subscription
- topic: the topic of the subscription (i.e., the name). This parameter is the unique identifier of the subscription
- sql: the query statement of the subscription, this statement can only be _select_ statement, only the original data should be queried, only the data can be queried in time order
- fp: the callback function when the query result is received (the function prototype will be introduced later), only used when called asynchronously. This parameter should be passed `NULL` when called synchronously
- param: additional parameter when calling the callback function, the system API will pass it to the callback function as it is, without any processing
- interval: polling period in milliseconds. The callback function will be called periodically according to this parameter when called asynchronously. not recommended to set this parameter too small To avoid impact on system performance when called synchronously. If the interval between two calls to `taos_consume()` is less than this period, the API will block until the interval exceeds this period.
- taos: the database connection that has been established.
- restart: if the subscription already exists, whether to restart or continue the previous subscription.
- topic: the topic of the subscription (i.e., the name). This parameter is the unique identifier of the subscription.
- sql: the query statement of the subscription which can only be a _select_ statement. Only the original data should be queried, and data can only be queried in temporal order.
- fp: the callback function when the query result is received only used when called asynchronously. This parameter should be passed `NULL` when called synchronously. The function prototype is described below.
- param: additional parameter when calling the callback function. The system API will pass it to the callback function as is, without any processing.
- interval: polling period in milliseconds. The callback function will be called periodically according to this parameter when called asynchronously. The interval should not be too small to avoid impact on system performance when called synchronously. If the interval between two calls to `taos_consume()` is less than this period, the API will block until the interval exceeds this period.
- ` typedef void (*TAOS_SUBSCRIBE_CALLBACK)(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code)`

View File

@ -179,7 +179,7 @@ namespace TDengineExample
1. "Unable to establish connection", "Unable to resolve FQDN"
Usually, it cause by the FQDN configuration is incorrect, you can refer to [How to understand TDengine's FQDN (Chinese)](https://www.taosdata.com/blog/2021/07/29/2741.html) to solve it.
Usually, it's caused by an incorrect FQDN configuration. Please refer to this section in the [FAQ](https://docs.tdengine.com/2.4/train-faq/faq/#2-how-to-handle-unable-to-establish-connection) to troubleshoot.
2. Unhandled exception. System.DllNotFoundException: Unable to load DLL 'taos' or one of its dependencies: The specified module cannot be found.

View File

@ -225,7 +225,7 @@ See [video tutorial](https://www.taosdata.com/blog/2020/11/11/1957.html) for the
2. "Unable to establish connection", "Unable to resolve FQDN"
Usually, root cause is the FQDN is not configured correctly. You can refer to [How to understand TDengine's FQDN (In Chinese)](https://www.taosdata.com/blog/2021/07/29/2741.html).
Usually, the root cause is an incorrect FQDN configuration. You can refer to this section in the [FAQ](https://docs.tdengine.com/2.4/train-faq/faq/#2-how-to-handle-unable-to-establish-connection) to troubleshoot.
## Important Updates

View File

@ -7,7 +7,7 @@ TDengine Kafka Connector contains two plugins: TDengine Source Connector and TDe
## What is Kafka Connect?
Kafka Connect is a component of Apache Kafka that enables other systems, such as databases, cloud services, file systems, etc., to connect to Kafka easily. Data can flow from other software to Kafka via Kafka Connect and Kafka to other systems via Kafka Connect. Plugins that read data from other software are called Source Connectors, and plugins that write data to other software are called Sink Connectors. Neither Source Connector nor Sink Connector will directly connect to Kafka Broker, and Source Connector transfers data to Kafka Connect. Sink Connector receives data from Kafka Connect.
Kafka Connect is a component of [Apache Kafka](https://kafka.apache.org/) that enables other systems, such as databases, cloud services, file systems, etc., to connect to Kafka easily. Data can flow from other software to Kafka via Kafka Connect and Kafka to other systems via Kafka Connect. Plugins that read data from other software are called Source Connectors, and plugins that write data to other software are called Sink Connectors. Neither Source Connector nor Sink Connector will directly connect to Kafka Broker, and Source Connector transfers data to Kafka Connect. Sink Connector receives data from Kafka Connect.
![TDengine Database Kafka Connector -- Kafka Connect](kafka/Kafka_Connect.webp)
@ -17,7 +17,7 @@ TDengine Source Connector is used to read data from TDengine in real-time and se
## What is Confluent?
Confluent adds many extensions to Kafka. include:
[Confluent](https://www.confluent.io/) adds many extensions to Kafka. include:
1. Schema Registry
2. REST Proxy
@ -79,10 +79,10 @@ Development: false
git clone https://github.com:taosdata/kafka-connect-tdengine.git
cd kafka-connect-tdengine
mvn clean package
unzip -d $CONFLUENT_HOME/share/confluent-hub-components/ target/components/packages/taosdata-kafka-connect-tdengine-0.1.0.zip
unzip -d $CONFLUENT_HOME/share/java/ target/components/packages/taosdata-kafka-connect-tdengine-*.zip
```
The above script first clones the project source code and then compiles and packages it with Maven. After the package is complete, the zip package of the plugin is generated in the `target/components/packages/` directory. Unzip this zip package to the path where the plugin is installed. The path to install the plugin is in the configuration file `$CONFLUENT_HOME/etc/kafka/connect-standalone.properties`. The default path is `$CONFLUENT_HOME/share/confluent-hub-components/`.
The above script first clones the project source code and then compiles and packages it with Maven. After the package is complete, the zip package of the plugin is generated in the `target/components/packages/` directory. Unzip this zip package to plugin path. We used `$CONFLUENT_HOME/share/java/` above because it's a build in plugin path.
### Install with confluent-hub
@ -96,7 +96,7 @@ confluent local services start
```
:::note
Be sure to install the plugin before starting Confluent. Otherwise, there will be a class not found error. The log of Kafka Connect (default path: /tmp/confluent.xxxx/connect/logs/connect.log) will output the successfully installed plugin, which users can use to determine whether the plugin is installed successfully.
Be sure to install the plugin before starting Confluent. Otherwise, Kafka Connect will fail to discover the plugins.
:::
:::tip
@ -123,6 +123,59 @@ Control Center is [UP]
To clear data, execute `rm -rf /tmp/confluent.106668`.
:::
### Check Confluent Services Status
Use command bellow to check the status of all service:
```
confluent local services status
```
The expected output is:
```
Connect is [UP]
Control Center is [UP]
Kafka is [UP]
Kafka REST is [UP]
ksqlDB Server is [UP]
Schema Registry is [UP]
ZooKeeper is [UP]
```
### Check Successfully Loaded Plugin
After Kafka Connect was completely started, you can use bellow command to check if our plugins are installed successfully:
```
confluent local services connect plugin list
```
The output should contains `TDengineSinkConnector` and `TDengineSourceConnector` as bellow:
```
Available Connect Plugins:
[
{
"class": "com.taosdata.kafka.connect.sink.TDengineSinkConnector",
"type": "sink",
"version": "1.0.0"
},
{
"class": "com.taosdata.kafka.connect.source.TDengineSourceConnector",
"type": "source",
"version": "1.0.0"
},
......
```
If not, please check the log file of Kafka Connect. To view the log file path, please execute:
```
echo `cat /tmp/confluent.current`/connect/connect.stdout
```
It should produce a path like:`/tmp/confluent.104086/connect/connect.stdout`
Besides log file `connect.stdout` there is a file named `connect.properties`. At the end of this file you can see the effective `plugin.path` which is a series of paths joined by comma. If Kafka Connect not found our plugins, it's probably because the installed path is not included in `plugin.path`.
## The use of TDengine Sink Connector
The role of the TDengine Sink Connector is to synchronize the data of the specified topic to TDengine. Users do not need to create databases and super tables in advance. The name of the target database can be specified manually (see the configuration parameter connection.database), or it can be generated according to specific rules (see the configuration parameter connection.database.prefix).
@ -142,7 +195,7 @@ vi sink-demo.properties
sink-demo.properties' content is following:
```ini title="sink-demo.properties"
name=tdengine-sink-demo
name=TDengineSinkConnector
connector.class=com.taosdata.kafka.connect.sink.TDengineSinkConnector
tasks.max=1
topics=meters
@ -151,6 +204,7 @@ connection.user=root
connection.password=taosdata
connection.database=power
db.schemaless=line
data.precision=ns
key.converter=org.apache.kafka.connect.storage.StringConverter
value.converter=org.apache.kafka.connect.storage.StringConverter
```
@ -177,6 +231,7 @@ If the above command is executed successfully, the output is as follows:
"connection.url": "jdbc:TAOS://127.0.0.1:6030",
"connection.user": "root",
"connector.class": "com.taosdata.kafka.connect.sink.TDengineSinkConnector",
"data.precision": "ns",
"db.schemaless": "line",
"key.converter": "org.apache.kafka.connect.storage.StringConverter",
"tasks.max": "1",
@ -221,10 +276,10 @@ Database changed.
taos> select * from meters;
ts | current | voltage | phase | groupid | location |
===============================================================================================================================================================
2022-03-28 09:56:51.249000000 | 11.800000000 | 221.000000000 | 0.280000000 | 2 | California.LoSangeles |
2022-03-28 09:56:51.250000000 | 13.400000000 | 223.000000000 | 0.290000000 | 2 | California.LoSangeles |
2022-03-28 09:56:51.249000000 | 10.800000000 | 223.000000000 | 0.290000000 | 3 | California.LoSangeles |
2022-03-28 09:56:51.250000000 | 11.300000000 | 221.000000000 | 0.350000000 | 3 | California.LoSangeles |
2022-03-28 09:56:51.249000000 | 11.800000000 | 221.000000000 | 0.280000000 | 2 | California.LosAngeles |
2022-03-28 09:56:51.250000000 | 13.400000000 | 223.000000000 | 0.290000000 | 2 | California.LosAngeles |
2022-03-28 09:56:51.249000000 | 10.800000000 | 223.000000000 | 0.290000000 | 3 | California.LosAngeles |
2022-03-28 09:56:51.250000000 | 11.300000000 | 221.000000000 | 0.350000000 | 3 | California.LosAngeles |
Query OK, 4 row(s) in set (0.004208s)
```
@ -356,6 +411,7 @@ The following configuration items apply to TDengine Sink Connector and TDengine
4. `max.retries`: The maximum number of retries when an error occurs. Defaults to 1.
5. `retry.backoff.ms`: The time interval for retry when sending an error. The unit is milliseconds. The default is 3000.
6. `db.schemaless`: Data format, could be one of `line`, `json`, and `telnet`. Represent InfluxDB line protocol format, OpenTSDB JSON format, and OpenTSDB Telnet line protocol format.
7. `data.precision`: The time precision when use InfluxDB line protocol format data, could be one of `ms`, `us` and `ns`. The default is `ns`.
### TDengine Source Connector specific configuration
@ -366,7 +422,13 @@ The following configuration items apply to TDengine Sink Connector and TDengine
5. `fetch.max.rows`: The maximum number of rows retrieved when retrieving the database. Default is 100.
6. `out.format`: The data format. The value could be line or json. The line represents the InfluxDB Line protocol format, and json represents the OpenTSDB JSON format. Default is `line`.
## feedback
## Other notes
1. To install plugin to a customized location, refer to https://docs.confluent.io/home/connect/self-managed/install.html#install-connector-manually.
2. To use Kafka Connect without confluent, refer to https://kafka.apache.org/documentation/#connect.
## Feedback
https://github.com/taosdata/kafka-connect-tdengine/issues

View File

@ -5,38 +5,38 @@ title: Frequently Asked Questions
## Submit an Issue
If the tips in FAQ don't help much, please submit an issue on [GitHub](https://github.com/taosdata/TDengine) to describe your problem description, including TDengine version, hardware and OS information, the steps to reproduce the problem, etc. It would be very helpful if you package the contents in `/var/log/taos` and `/etc/taos` and upload. These two are the default directories used by TDengine, if they have been changed in your configuration, please use according to the actual configuration. It's recommended to firstly set `debugFlag` to 135 in `taos.cfg`, restart `taosd`, then reproduce the problem and collect logs. If you don't want to restart, an alternative way of setting `debugFlag` is executing `alter dnode <dnode_id> debugFlag 135` command in TDengine CLI `taos`. During normal running, however, please make sure `debugFlag` is set to 131.
If the tips in FAQ don't help much, please submit an issue on [GitHub](https://github.com/taosdata/TDengine) to describe your problem. In your description please include the TDengine version, hardware and OS information, the steps to reproduce the problem and any other relevant information. It would be very helpful if you can package the contents in `/var/log/taos` and `/etc/taos` and upload. These two are the default directories used by TDengine. If you have changed the default directories in your configuration, please package the files in your configured directories. We recommended setting `debugFlag` to 135 in `taos.cfg`, restarting `taosd`, then reproducing the problem and collecting the logs. If you don't want to restart, an alternative way of setting `debugFlag` is executing `alter dnode <dnode_id> debugFlag 135` command in TDengine CLI `taos`. During normal running, however, please make sure `debugFlag` is set to 131.
## Frequently Asked Questions
### 1. How to upgrade to TDengine 2.0 from older version?
version 2.x is not compatible with version 1.x regarding configuration file and data file, please do following before upgrading:
version 2.x is not compatible with version 1.x. With regard to the configuration and data files, please perform the following steps before upgrading. Please follow data integrity, security, backup and other relevant SOPs, best practices before removing/deleting any data.
1. Delete configuration files: `sudo rm -rf /etc/taos/taos.cfg`
1. Delete configuration files: `sudo rm -rf /etc/taos/taos.cfg`
2. Delete log files: `sudo rm -rf /var/log/taos/`
3. Delete data files if the data doesn't need to be kept: `sudo rm -rf /var/lib/taos/`
4. Install latests 2.x version
5. If the data needs to be kept and migrated to newer version, please contact professional service of TDengine for assistance
4. Install latest 2.x version
5. If the data needs to be kept and migrated to newer version, please contact professional service at TDengine for assistance.
### 2. How to handle "Unable to establish connection"
When the client is unable to connect to the server, you can try following ways to find out why.
When the client is unable to connect to the server, you can try the following ways to troubleshoot and resolve the problem.
1. Check the network
- Check if the hosts where the client and server are running can be accessible to each other, for example by `ping` command.
- Check if the TCP/UDP on port 6030-6042 are open for access if firewall is enabled. It's better to firstly disable firewall for diagnostics.
- Check if the FQDN and serverPort are configured correctly in `taos.cfg` used by the server side
- Check if the `firstEp` is set properly in the `taos.cfg` used by the client side
- Check if the hosts where the client and server are running are accessible to each other, for example by `ping` command.
- Check if the TCP/UDP on port 6030-6042 are open for access if firewall is enabled. If possible, disable the firewall for diagnostics, but please ensure that you are following security and other relevant protocols.
- Check if the FQDN and serverPort are configured correctly in `taos.cfg` used by the server side.
- Check if the `firstEp` is set properly in the `taos.cfg` used by the client side.
2. Make sure the client version and server version are same.
3. On server side, check the running status of `taosd` by executing `systemctl status taosd` . If your server is started using another way instead of `systemctl`, use the proper method to check whether the server process is running normally.
4. If using connector of Python, Java, Go, Rust, C#, node.JS on Linux to connect toe the server, please make sure `libtaos.so` is in directory `/usr/local/taos/driver` and `/usr/local/taos/driver` is in system lib search environment variable `LD_LIBRARY_PATH`.
4. If using connector of Python, Java, Go, Rust, C#, node.JS on Linux to connect to the server, please make sure `libtaos.so` is in directory `/usr/local/taos/driver` and `/usr/local/taos/driver` is in system lib search environment variable `LD_LIBRARY_PATH`.
5. If using connector on Windows, please make sure `C:\TDengine\driver\taos.dll` is in your system lib search path, it's suggested to put `taos.dll` under `C:\Windows\System32`.
5. If using connector on Windows, please make sure `C:\TDengine\driver\taos.dll` is in your system lib search path. We recommend putting `taos.dll` under `C:\Windows\System32`.
6. Some advanced network diagnostics tools
@ -45,7 +45,7 @@ When the client is unable to connect to the server, you can try following ways t
Check whether a TCP port on server side is open: `nc -l {port}`
Check whether a TCP port on client side is open: `nc {hostIP} {port}`
- On Windows system `Net-TestConnection -ComputerName {fqdn} -Port {port}` on PowerShell can be used to check whether the port on serer side is open for access.
- On Windows system `Net-TestConnection -ComputerName {fqdn} -Port {port}` on PowerShell can be used to check whether the port on server side is open for access.
7. TDengine CLI `taos` can also be used to check network, please refer to [TDengine CLI](/reference/taos-shell).

View File

@ -3,15 +3,15 @@ sidebar_label: TDengine in Docker
title: Deploy TDengine in Docker
---
Even though it's not recommended to deploy TDengine using docker in production system, docker is still very useful in development environment, especially when your host is not Linux. From version 2.0.14.0, the official image of TDengine can support X86-64, X86, arm64, and rm32 .
We do not recommend deploying TDengine using Docker in a production system. However, Docker is still very useful in a development environment, especially when your host is not Linux. From version 2.0.14.0, the official image of TDengine can support X86-64, X86, arm64, and rm32 .
In this chapter a simple step by step guide of using TDengine in docker is introduced.
In this chapter we introduce a simple step by step guide to use TDengine in Docker.
## Install Docker
The installation of docker please refer to [Get Docker](https://docs.docker.com/get-docker/).
To install Docker please refer to [Get Docker](https://docs.docker.com/get-docker/).
After docker is installed, you can check whether Docker is installed properly by displaying Docker version.
After Docker is installed, you can check whether Docker is installed properly by displaying Docker version.
```bash
$ docker -v
@ -27,7 +27,7 @@ $ docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp tdengine/tdeng
526aa188da767ae94b244226a2b2eec2b5f17dd8eff592893d9ec0cd0f3a1ccd
```
In the above command, a docker container is started to run TDengine server, the port range 6030-6049 of the container is mapped to host port range 6030-6049. If port range 6030-6049 has been occupied on the host, please change to an available host port range. Regarding the requirements about ports on the host, please refer to [Port Configuration](/reference/config/#serverport).
In the above command, a docker container is started to run TDengine server, the port range 6030-6049 of the container is mapped to host port range 6030-6049. If port range 6030-6049 has been occupied on the host, please change to an available host port range. For port requirements on the host, please refer to [Port Configuration](/reference/config/#serverport).
- **docker run**: Launch a docker container
- **-d**: the container will run in background mode
@ -95,7 +95,7 @@ In TDengine CLI, SQL commands can be executed to create/drop databases, tables,
### Access TDengine from host
If `-p` used to map ports properly between host and container, it's also able to access TDengine in container from the host as long as `firstEp` is configured correctly for the client on host.
If option `-p` used to map ports properly between host and container, it's also able to access TDengine in container from the host as long as `firstEp` is configured correctly for the client on host.
```
$ taos
@ -271,7 +271,7 @@ Below is an example output:
### Access TDengine from 3rd party tools
A lot of 3rd party tools can be used to write data into TDengine through `taosAdapter` , for details please refer to [3rd party tools](/third-party/).
A lot of 3rd party tools can be used to write data into TDengine through `taosAdapter`, for details please refer to [3rd party tools](/third-party/).
There is nothing different from the 3rd party side to access TDengine server inside a container, as long as the end point is specified correctly, the end point should be the FQDN and the mapped port of the host.

View File

@ -106,7 +106,7 @@ int32_t create_topic() {
}
taos_free_result(pRes);
/*pRes = taos_query(pConn, "create topic topic_ctb_column as abc1");*/
/*pRes = taos_query(pConn, "create topic topic_ctb_column as database abc1");*/
pRes = taos_query(pConn, "create topic topic_ctb_column as select ts, c1, c2, c3 from st1");
if (taos_errno(pRes) != 0) {
printf("failed to create topic topic_ctb_column, reason:%s\n", taos_errstr(pRes));

View File

@ -1439,8 +1439,10 @@ typedef struct {
int32_t code;
} STaskDropRsp;
#define STREAM_TRIGGER_AT_ONCE 1
#define STREAM_TRIGGER_WINDOW_CLOSE 2
#define STREAM_TRIGGER_AT_ONCE_SMA 0
#define STREAM_TRIGGER_AT_ONCE 1
#define STREAM_TRIGGER_WINDOW_CLOSE 2
#define STREAM_TRIGGER_WINDOW_CLOSE_SMA 3
typedef struct {
char name[TSDB_TABLE_FNAME_LEN];
@ -1472,15 +1474,22 @@ typedef struct {
int64_t streamId;
} SMVCreateStreamRsp, SMSCreateStreamRsp;
enum {
TOPIC_SUB_TYPE__DB = 1,
TOPIC_SUB_TYPE__TABLE,
TOPIC_SUB_TYPE__COLUMN,
};
typedef struct {
char name[TSDB_TOPIC_FNAME_LEN]; // accout.topic
int8_t igExists;
int8_t withTbName;
int8_t withSchema;
int8_t withTag;
int8_t subType;
char* sql;
char* ast;
char subscribeDbName[TSDB_DB_NAME_LEN];
char subDbName[TSDB_DB_FNAME_LEN];
union {
char* ast;
char subStbName[TSDB_TABLE_FNAME_LEN];
};
} SCMCreateTopicReq;
int32_t tSerializeSCMCreateTopicReq(void* buf, int32_t bufLen, const SCMCreateTopicReq* pReq);
@ -2144,11 +2153,6 @@ static FORCE_INLINE void* taosDecodeSMqMsg(void* buf, SMqHbMsg* pMsg) {
return buf;
}
enum {
TOPIC_SUB_TYPE__DB = 1,
TOPIC_SUB_TYPE__TABLE,
};
typedef struct {
SMsgHead head;
int64_t leftForVer;
@ -2168,10 +2172,10 @@ typedef struct {
int64_t newConsumerId;
char subKey[TSDB_SUBSCRIBE_KEY_LEN];
int8_t subType;
int8_t withTbName;
int8_t withSchema;
int8_t withTag;
char* qmsg;
// int8_t withTbName;
// int8_t withSchema;
// int8_t withTag;
char* qmsg;
} SMqRebVgReq;
static FORCE_INLINE int32_t tEncodeSMqRebVgReq(void** buf, const SMqRebVgReq* pReq) {
@ -2182,10 +2186,10 @@ static FORCE_INLINE int32_t tEncodeSMqRebVgReq(void** buf, const SMqRebVgReq* pR
tlen += taosEncodeFixedI64(buf, pReq->newConsumerId);
tlen += taosEncodeString(buf, pReq->subKey);
tlen += taosEncodeFixedI8(buf, pReq->subType);
tlen += taosEncodeFixedI8(buf, pReq->withTbName);
tlen += taosEncodeFixedI8(buf, pReq->withSchema);
tlen += taosEncodeFixedI8(buf, pReq->withTag);
if (pReq->subType == TOPIC_SUB_TYPE__TABLE) {
// tlen += taosEncodeFixedI8(buf, pReq->withTbName);
// tlen += taosEncodeFixedI8(buf, pReq->withSchema);
// tlen += taosEncodeFixedI8(buf, pReq->withTag);
if (pReq->subType == TOPIC_SUB_TYPE__COLUMN) {
tlen += taosEncodeString(buf, pReq->qmsg);
}
return tlen;
@ -2198,10 +2202,10 @@ static FORCE_INLINE void* tDecodeSMqRebVgReq(const void* buf, SMqRebVgReq* pReq)
buf = taosDecodeFixedI64(buf, &pReq->newConsumerId);
buf = taosDecodeStringTo(buf, pReq->subKey);
buf = taosDecodeFixedI8(buf, &pReq->subType);
buf = taosDecodeFixedI8(buf, &pReq->withTbName);
buf = taosDecodeFixedI8(buf, &pReq->withSchema);
buf = taosDecodeFixedI8(buf, &pReq->withTag);
if (pReq->subType == TOPIC_SUB_TYPE__TABLE) {
// buf = taosDecodeFixedI8(buf, &pReq->withTbName);
// buf = taosDecodeFixedI8(buf, &pReq->withSchema);
// buf = taosDecodeFixedI8(buf, &pReq->withTag);
if (pReq->subType == TOPIC_SUB_TYPE__COLUMN) {
buf = taosDecodeString(buf, &pReq->qmsg);
}
return (void*)buf;

View File

@ -127,134 +127,131 @@
#define TK_BLOB 109
#define TK_VARBINARY 110
#define TK_DECIMAL 111
#define TK_DELAY 112
#define TK_FILE_FACTOR 113
#define TK_NK_FLOAT 114
#define TK_ROLLUP 115
#define TK_TTL 116
#define TK_SMA 117
#define TK_SHOW 118
#define TK_DATABASES 119
#define TK_TABLES 120
#define TK_STABLES 121
#define TK_MNODES 122
#define TK_MODULES 123
#define TK_QNODES 124
#define TK_FUNCTIONS 125
#define TK_INDEXES 126
#define TK_ACCOUNTS 127
#define TK_APPS 128
#define TK_CONNECTIONS 129
#define TK_LICENCE 130
#define TK_GRANTS 131
#define TK_QUERIES 132
#define TK_SCORES 133
#define TK_TOPICS 134
#define TK_VARIABLES 135
#define TK_BNODES 136
#define TK_SNODES 137
#define TK_CLUSTER 138
#define TK_TRANSACTIONS 139
#define TK_LIKE 140
#define TK_INDEX 141
#define TK_FULLTEXT 142
#define TK_FUNCTION 143
#define TK_INTERVAL 144
#define TK_TOPIC 145
#define TK_AS 146
#define TK_CGROUP 147
#define TK_WITH 148
#define TK_SCHEMA 149
#define TK_DESC 150
#define TK_DESCRIBE 151
#define TK_RESET 152
#define TK_QUERY 153
#define TK_CACHE 154
#define TK_EXPLAIN 155
#define TK_ANALYZE 156
#define TK_VERBOSE 157
#define TK_NK_BOOL 158
#define TK_RATIO 159
#define TK_COMPACT 160
#define TK_VNODES 161
#define TK_IN 162
#define TK_OUTPUTTYPE 163
#define TK_AGGREGATE 164
#define TK_BUFSIZE 165
#define TK_STREAM 166
#define TK_INTO 167
#define TK_TRIGGER 168
#define TK_AT_ONCE 169
#define TK_WINDOW_CLOSE 170
#define TK_WATERMARK 171
#define TK_KILL 172
#define TK_CONNECTION 173
#define TK_TRANSACTION 174
#define TK_MERGE 175
#define TK_VGROUP 176
#define TK_REDISTRIBUTE 177
#define TK_SPLIT 178
#define TK_SYNCDB 179
#define TK_NULL 180
#define TK_NK_QUESTION 181
#define TK_NK_ARROW 182
#define TK_ROWTS 183
#define TK_TBNAME 184
#define TK_QSTARTTS 185
#define TK_QENDTS 186
#define TK_WSTARTTS 187
#define TK_WENDTS 188
#define TK_WDURATION 189
#define TK_CAST 190
#define TK_NOW 191
#define TK_TODAY 192
#define TK_TIMEZONE 193
#define TK_COUNT 194
#define TK_FIRST 195
#define TK_LAST 196
#define TK_LAST_ROW 197
#define TK_BETWEEN 198
#define TK_IS 199
#define TK_NK_LT 200
#define TK_NK_GT 201
#define TK_NK_LE 202
#define TK_NK_GE 203
#define TK_NK_NE 204
#define TK_MATCH 205
#define TK_NMATCH 206
#define TK_CONTAINS 207
#define TK_JOIN 208
#define TK_INNER 209
#define TK_SELECT 210
#define TK_DISTINCT 211
#define TK_WHERE 212
#define TK_PARTITION 213
#define TK_BY 214
#define TK_SESSION 215
#define TK_STATE_WINDOW 216
#define TK_SLIDING 217
#define TK_FILL 218
#define TK_VALUE 219
#define TK_NONE 220
#define TK_PREV 221
#define TK_LINEAR 222
#define TK_NEXT 223
#define TK_GROUP 224
#define TK_HAVING 225
#define TK_ORDER 226
#define TK_SLIMIT 227
#define TK_SOFFSET 228
#define TK_LIMIT 229
#define TK_OFFSET 230
#define TK_ASC 231
#define TK_NULLS 232
#define TK_ID 233
#define TK_NK_BITNOT 234
#define TK_INSERT 235
#define TK_VALUES 236
#define TK_IMPORT 237
#define TK_NK_SEMI 238
#define TK_FILE 239
#define TK_FILE_FACTOR 112
#define TK_NK_FLOAT 113
#define TK_ROLLUP 114
#define TK_TTL 115
#define TK_SMA 116
#define TK_SHOW 117
#define TK_DATABASES 118
#define TK_TABLES 119
#define TK_STABLES 120
#define TK_MNODES 121
#define TK_MODULES 122
#define TK_QNODES 123
#define TK_FUNCTIONS 124
#define TK_INDEXES 125
#define TK_ACCOUNTS 126
#define TK_APPS 127
#define TK_CONNECTIONS 128
#define TK_LICENCE 129
#define TK_GRANTS 130
#define TK_QUERIES 131
#define TK_SCORES 132
#define TK_TOPICS 133
#define TK_VARIABLES 134
#define TK_BNODES 135
#define TK_SNODES 136
#define TK_CLUSTER 137
#define TK_TRANSACTIONS 138
#define TK_LIKE 139
#define TK_INDEX 140
#define TK_FULLTEXT 141
#define TK_FUNCTION 142
#define TK_INTERVAL 143
#define TK_TOPIC 144
#define TK_AS 145
#define TK_CONSUMER 146
#define TK_GROUP 147
#define TK_DESC 148
#define TK_DESCRIBE 149
#define TK_RESET 150
#define TK_QUERY 151
#define TK_CACHE 152
#define TK_EXPLAIN 153
#define TK_ANALYZE 154
#define TK_VERBOSE 155
#define TK_NK_BOOL 156
#define TK_RATIO 157
#define TK_COMPACT 158
#define TK_VNODES 159
#define TK_IN 160
#define TK_OUTPUTTYPE 161
#define TK_AGGREGATE 162
#define TK_BUFSIZE 163
#define TK_STREAM 164
#define TK_INTO 165
#define TK_TRIGGER 166
#define TK_AT_ONCE 167
#define TK_WINDOW_CLOSE 168
#define TK_WATERMARK 169
#define TK_KILL 170
#define TK_CONNECTION 171
#define TK_TRANSACTION 172
#define TK_MERGE 173
#define TK_VGROUP 174
#define TK_REDISTRIBUTE 175
#define TK_SPLIT 176
#define TK_SYNCDB 177
#define TK_NULL 178
#define TK_NK_QUESTION 179
#define TK_NK_ARROW 180
#define TK_ROWTS 181
#define TK_TBNAME 182
#define TK_QSTARTTS 183
#define TK_QENDTS 184
#define TK_WSTARTTS 185
#define TK_WENDTS 186
#define TK_WDURATION 187
#define TK_CAST 188
#define TK_NOW 189
#define TK_TODAY 190
#define TK_TIMEZONE 191
#define TK_COUNT 192
#define TK_FIRST 193
#define TK_LAST 194
#define TK_LAST_ROW 195
#define TK_BETWEEN 196
#define TK_IS 197
#define TK_NK_LT 198
#define TK_NK_GT 199
#define TK_NK_LE 200
#define TK_NK_GE 201
#define TK_NK_NE 202
#define TK_MATCH 203
#define TK_NMATCH 204
#define TK_CONTAINS 205
#define TK_JOIN 206
#define TK_INNER 207
#define TK_SELECT 208
#define TK_DISTINCT 209
#define TK_WHERE 210
#define TK_PARTITION 211
#define TK_BY 212
#define TK_SESSION 213
#define TK_STATE_WINDOW 214
#define TK_SLIDING 215
#define TK_FILL 216
#define TK_VALUE 217
#define TK_NONE 218
#define TK_PREV 219
#define TK_LINEAR 220
#define TK_NEXT 221
#define TK_HAVING 222
#define TK_ORDER 223
#define TK_SLIMIT 224
#define TK_SOFFSET 225
#define TK_LIMIT 226
#define TK_OFFSET 227
#define TK_ASC 228
#define TK_NULLS 229
#define TK_ID 230
#define TK_NK_BITNOT 231
#define TK_INSERT 232
#define TK_VALUES 233
#define TK_IMPORT 234
#define TK_NK_SEMI 235
#define TK_FILE 236
#define TK_NK_SPACE 300
#define TK_NK_COMMENT 301

View File

@ -80,8 +80,7 @@ typedef struct SAlterDatabaseStmt {
typedef struct STableOptions {
ENodeType type;
char comment[TSDB_TB_COMMENT_LEN];
int32_t delay;
float filesFactor;
double filesFactor;
SNodeList* pRollupFuncs;
int32_t ttl;
SNodeList* pSma;
@ -239,20 +238,13 @@ typedef struct SDropComponentNodeStmt {
int32_t dnodeId;
} SDropComponentNodeStmt;
typedef struct STopicOptions {
ENodeType type;
bool withTable;
bool withSchema;
bool withTag;
} STopicOptions;
typedef struct SCreateTopicStmt {
ENodeType type;
char topicName[TSDB_TABLE_NAME_LEN];
char subscribeDbName[TSDB_DB_NAME_LEN];
bool ignoreExists;
SNode* pQuery;
STopicOptions* pOptions;
ENodeType type;
char topicName[TSDB_TABLE_NAME_LEN];
char subDbName[TSDB_DB_NAME_LEN];
char subSTbName[TSDB_TABLE_NAME_LEN];
bool ignoreExists;
SNode* pQuery;
} SCreateTopicStmt;
typedef struct SDropTopicStmt {

View File

@ -95,7 +95,6 @@ typedef enum ENodeType {
QUERY_NODE_INDEX_OPTIONS,
QUERY_NODE_EXPLAIN_OPTIONS,
QUERY_NODE_STREAM_OPTIONS,
QUERY_NODE_TOPIC_OPTIONS,
QUERY_NODE_LEFT_VALUE,
// Statement nodes are used in parser and planner module.

View File

@ -59,6 +59,7 @@ typedef struct SScanLogicNode {
int8_t triggerType;
int64_t watermark;
int16_t tsColId;
double filesFactor;
} SScanLogicNode;
typedef struct SJoinLogicNode {
@ -113,6 +114,7 @@ typedef struct SWindowLogicNode {
SNode* pStateExpr;
int8_t triggerType;
int64_t watermark;
double filesFactor;
} SWindowLogicNode;
typedef struct SFillLogicNode {
@ -222,6 +224,7 @@ typedef struct STableScanPhysiNode {
int8_t triggerType;
int64_t watermark;
int16_t tsColId;
double filesFactor;
} STableScanPhysiNode;
typedef STableScanPhysiNode STableSeqScanPhysiNode;
@ -272,6 +275,7 @@ typedef struct SWinodwPhysiNode {
SNode* pTspk; // timestamp primary key
int8_t triggerType;
int64_t watermark;
double filesFactor;
} SWinodwPhysiNode;
typedef struct SIntervalPhysiNode {

View File

@ -36,6 +36,7 @@ typedef struct SPlanContext {
int64_t watermark;
char* pMsg;
int32_t msgLen;
double filesFactor;
} SPlanContext;
// Create the physical plan for the query, according to the AST.

View File

@ -254,6 +254,7 @@ typedef enum ELogicConditionType {
#define TSDB_TRANS_STAGE_LEN 12
#define TSDB_TRANS_TYPE_LEN 16
#define TSDB_TRANS_ERROR_LEN 64
#define TSDB_TRANS_DESC_LEN 128
#define TSDB_STEP_NAME_LEN 32
#define TSDB_STEP_DESC_LEN 128
@ -344,9 +345,6 @@ typedef enum ELogicConditionType {
#define TSDB_MIN_ROLLUP_FILE_FACTOR 0
#define TSDB_MAX_ROLLUP_FILE_FACTOR 1
#define TSDB_DEFAULT_ROLLUP_FILE_FACTOR 0.1
#define TSDB_MIN_ROLLUP_DELAY 1
#define TSDB_MAX_ROLLUP_DELAY 10
#define TSDB_DEFAULT_ROLLUP_DELAY 2
#define TSDB_MIN_TABLE_TTL 0
#define TSDB_DEFAULT_TABLE_TTL 0

View File

@ -1,32 +1,25 @@
FROM ubuntu:18.04
WORKDIR /root
ARG pkgFile
ARG dirName
ARG cpuType
RUN echo ${pkgFile} && echo ${dirName}
COPY ${pkgFile} /root/
RUN tar -zxf ${pkgFile}
WORKDIR /root/
RUN cd /root/${dirName}/ && /bin/bash install.sh -e no && cd /root
RUN rm /root/${pkgFile}
RUN rm -rf /root/${dirName}
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get clean && apt-get update && apt-get install -y locales tzdata netcat && locale-gen en_US.UTF-8
ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/lib" \
LC_CTYPE=en_US.UTF-8 \
LANG=en_US.UTF-8 \
LC_ALL=en_US.UTF-8
COPY ./bin/* /usr/bin/
ENV TINI_VERSION v0.19.0
RUN bash -c 'echo -e "Downloading tini-${cpuType} ..."'
ADD https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini-${cpuType} /tini
RUN chmod +x /tini
ENTRYPOINT ["/tini", "--", "/usr/bin/entrypoint.sh"]
CMD ["taosd"]
VOLUME [ "/var/lib/taos", "/var/log/taos", "/corefile" ]
FROM ubuntu:18.04
WORKDIR /root
ARG pkgFile
ARG dirName
ARG cpuType
RUN echo ${pkgFile} && echo ${dirName}
COPY ${pkgFile} /root/
ENV TINI_VERSION v0.19.0
ADD https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini-${cpuType} /tini
ENV DEBIAN_FRONTEND=noninteractive
WORKDIR /root/
RUN tar -zxf ${pkgFile} && cd /root/${dirName}/ && /bin/bash install.sh -e no && cd /root && rm /root/${pkgFile} && rm -rf /root/${dirName} && apt-get update && apt-get install -y locales tzdata netcat && locale-gen en_US.UTF-8 && apt-get clean && rm -rf /var/lib/apt/lists/ && chmod +x /tini
ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/lib" \
LC_CTYPE=en_US.UTF-8 \
LANG=en_US.UTF-8 \
LC_ALL=en_US.UTF-8
COPY ./bin/* /usr/bin/
ENTRYPOINT ["/tini", "--", "/usr/bin/entrypoint.sh"]
CMD ["taosd"]
VOLUME [ "/var/lib/taos", "/var/log/taos", "/corefile" ]

View File

@ -11,39 +11,22 @@ DISABLE_ADAPTER=${TAOS_DISABLE_ADAPTER:-0}
unset TAOS_DISABLE_ADAPTER
# to get mnodeEpSet from data dir
DATA_DIR=${TAOS_DATA_DIR:-/var/lib/taos}
DATA_DIR=$(taosd -C|grep -E 'dataDir.*(\S+)' -o |head -n1|sed 's/dataDir *//')
DATA_DIR=${DATA_DIR:-/var/lib/taos}
# append env to custom taos.cfg
CFG_DIR=/tmp/taos
CFG_FILE=$CFG_DIR/taos.cfg
mkdir -p $CFG_DIR >/dev/null 2>&1
[ -f /etc/taos/taos.cfg ] && cat /etc/taos/taos.cfg | grep -E -v "^#|^\s*$" >$CFG_FILE
env-to-cfg >>$CFG_FILE
FQDN=$(cat $CFG_FILE | grep -E -v "^#|^$" | grep fqdn | tail -n1 | sed -E 's/.*fqdn\s+//')
FQDN=$(taosd -C|grep -E 'fqdn.*(\S+)' -o |head -n1|sed 's/fqdn *//')
# ensure the fqdn is resolved as localhost
grep "$FQDN" /etc/hosts >/dev/null || echo "127.0.0.1 $FQDN" >>/etc/hosts
FIRSET_EP=$(taosd -C|grep -E 'firstEp.*(\S+)' -o |head -n1|sed 's/firstEp *//')
# parse first ep host and port
FIRST_EP_HOST=${TAOS_FIRST_EP%:*}
FIRST_EP_PORT=${TAOS_FIRST_EP#*:}
FIRST_EP_HOST=${FIRSET_EP%:*}
FIRST_EP_PORT=${FIRSET_EP#*:}
# in case of custom server port
SERVER_PORT=$(cat $CFG_FILE | grep -E -v "^#|^$" | grep serverPort | tail -n1 | sed -E 's/.*serverPort\s+//')
SERVER_PORT=$(taosd -C|grep -E 'serverPort.*(\S+)' -o |head -n1|sed 's/serverPort *//')
SERVER_PORT=${SERVER_PORT:-6030}
# for other binaries like interpreters
if echo $1 | grep -E "taosd$" - >/dev/null; then
true # will run taosd
else
cp -f $CFG_FILE /etc/taos/taos.cfg || true
$@
exit $?
fi
set +e
ulimit -c unlimited
# set core files pattern, maybe failed
@ -62,22 +45,23 @@ fi
# if has mnode ep set or the host is first ep or not for cluster, just start.
if [ -f "$DATA_DIR/dnode/mnodeEpSet.json" ] ||
[ "$TAOS_FQDN" = "$FIRST_EP_HOST" ]; then
$@ -c $CFG_DIR
$@
# others will first wait the first ep ready.
else
if [ "$TAOS_FIRST_EP" = "" ]; then
echo "run TDengine with single node."
$@ -c $CFG_DIR
$@
exit $?
fi
while true; do
es=0
taos -h $FIRST_EP_HOST -P $FIRST_EP_PORT -n startup >/dev/null || es=$?
if [ "$es" -eq 0 ]; then
es=$(taos -h $FIRST_EP_HOST -P $FIRST_EP_PORT --check)
echo ${es}
if [ "${es%%:*}" -eq 2 ]; then
echo "execute create dnode"
taos -h $FIRST_EP_HOST -P $FIRST_EP_PORT -s "create dnode \"$FQDN:$SERVER_PORT\";"
break
fi
sleep 1s
done
$@ -c $CFG_DIR
$@
fi

View File

@ -0,0 +1,8 @@
#!/bin/sh
es=$(taos --check)
code=${es%%:*}
if [ "$code" -ne "0" ] && [ "$code" -ne "4" ]; then
exit 0
fi
echo $es
exit 1

View File

@ -2668,25 +2668,23 @@ int32_t tDeserializeSMDropCgroupReq(void *buf, int32_t bufLen, SMDropCgroupReq *
}
int32_t tSerializeSCMCreateTopicReq(void *buf, int32_t bufLen, const SCMCreateTopicReq *pReq) {
int32_t sqlLen = 0;
int32_t astLen = 0;
if (pReq->sql != NULL) sqlLen = (int32_t)strlen(pReq->sql);
if (pReq->ast != NULL) astLen = (int32_t)strlen(pReq->ast);
SEncoder encoder = {0};
tEncoderInit(&encoder, buf, bufLen);
if (tStartEncode(&encoder) < 0) return -1;
if (tEncodeCStr(&encoder, pReq->name) < 0) return -1;
if (tEncodeI8(&encoder, pReq->igExists) < 0) return -1;
if (tEncodeI8(&encoder, pReq->withTbName) < 0) return -1;
if (tEncodeI8(&encoder, pReq->withSchema) < 0) return -1;
if (tEncodeI8(&encoder, pReq->withTag) < 0) return -1;
if (tEncodeCStr(&encoder, pReq->subscribeDbName) < 0) return -1;
if (tEncodeI32(&encoder, sqlLen) < 0) return -1;
if (tEncodeI32(&encoder, astLen) < 0) return -1;
if (sqlLen > 0 && tEncodeCStr(&encoder, pReq->sql) < 0) return -1;
if (astLen > 0 && tEncodeCStr(&encoder, pReq->ast) < 0) return -1;
if (tEncodeI8(&encoder, pReq->subType) < 0) return -1;
if (tEncodeCStr(&encoder, pReq->subDbName) < 0) return -1;
if (TOPIC_SUB_TYPE__DB == pReq->subType) {
} else if (TOPIC_SUB_TYPE__TABLE == pReq->subType) {
if (tEncodeCStr(&encoder, pReq->subStbName) < 0) return -1;
} else {
if (tEncodeI32(&encoder, strlen(pReq->ast)) < 0) return -1;
if (tEncodeCStr(&encoder, pReq->ast) < 0) return -1;
}
if (tEncodeI32(&encoder, strlen(pReq->sql)) < 0) return -1;
if (tEncodeCStr(&encoder, pReq->sql) < 0) return -1;
tEndEncode(&encoder);
@ -2705,26 +2703,26 @@ int32_t tDeserializeSCMCreateTopicReq(void *buf, int32_t bufLen, SCMCreateTopicR
if (tStartDecode(&decoder) < 0) return -1;
if (tDecodeCStrTo(&decoder, pReq->name) < 0) return -1;
if (tDecodeI8(&decoder, &pReq->igExists) < 0) return -1;
if (tDecodeI8(&decoder, &pReq->withTbName) < 0) return -1;
if (tDecodeI8(&decoder, &pReq->withSchema) < 0) return -1;
if (tDecodeI8(&decoder, &pReq->withTag) < 0) return -1;
if (tDecodeCStrTo(&decoder, pReq->subscribeDbName) < 0) return -1;
if (tDecodeI8(&decoder, &pReq->subType) < 0) return -1;
if (tDecodeCStrTo(&decoder, pReq->subDbName) < 0) return -1;
if (TOPIC_SUB_TYPE__DB == pReq->subType) {
} else if (TOPIC_SUB_TYPE__TABLE == pReq->subType) {
if (tDecodeCStrTo(&decoder, pReq->subStbName) < 0) return -1;
} else {
if (tDecodeI32(&decoder, &astLen) < 0) return -1;
if (astLen > 0) {
pReq->ast = taosMemoryCalloc(1, astLen + 1);
if (pReq->ast == NULL) return -1;
if (tDecodeCStrTo(&decoder, pReq->ast) < 0) return -1;
}
}
if (tDecodeI32(&decoder, &sqlLen) < 0) return -1;
if (tDecodeI32(&decoder, &astLen) < 0) return -1;
if (sqlLen > 0) {
pReq->sql = taosMemoryCalloc(1, sqlLen + 1);
if (pReq->sql == NULL) return -1;
if (tDecodeCStrTo(&decoder, pReq->sql) < 0) return -1;
}
if (astLen > 0) {
pReq->ast = taosMemoryCalloc(1, astLen + 1);
if (pReq->ast == NULL) return -1;
if (tDecodeCStrTo(&decoder, pReq->ast) < 0) return -1;
} else {
}
tEndDecode(&decoder);
tDecoderClear(&decoder);
@ -2733,7 +2731,9 @@ int32_t tDeserializeSCMCreateTopicReq(void *buf, int32_t bufLen, SCMCreateTopicR
void tFreeSCMCreateTopicReq(SCMCreateTopicReq *pReq) {
taosMemoryFreeClear(pReq->sql);
taosMemoryFreeClear(pReq->ast);
if (TOPIC_SUB_TYPE__COLUMN == pReq->subType) {
taosMemoryFreeClear(pReq->ast);
}
}
int32_t tSerializeSCMCreateTopicRsp(void *buf, int32_t bufLen, const SCMCreateTopicRsp *pRsp) {

View File

@ -130,7 +130,7 @@ static void dmProcessRpcMsg(SDnode *pDnode, SRpcMsg *pRpc, SEpSet *pEpSet) {
_OVER:
if (code != 0) {
dError("msg:%p, failed to process since %s", pMsg, terrstr());
dTrace("msg:%p, failed to process since %s, type:%s", pMsg, terrstr(), TMSG_INFO(pRpc->msgType));
if (terrno != 0) code = terrno;
if (IsReq(pRpc)) {

View File

@ -60,14 +60,12 @@ typedef enum {
typedef enum {
TRN_STAGE_PREPARE = 0,
TRN_STAGE_REDO_LOG = 1,
TRN_STAGE_REDO_ACTION = 2,
TRN_STAGE_ROLLBACK = 3,
TRN_STAGE_UNDO_ACTION = 4,
TRN_STAGE_UNDO_LOG = 5,
TRN_STAGE_COMMIT = 6,
TRN_STAGE_COMMIT_LOG = 7,
TRN_STAGE_FINISHED = 8
TRN_STAGE_REDO_ACTION = 1,
TRN_STAGE_ROLLBACK = 2,
TRN_STAGE_UNDO_ACTION = 3,
TRN_STAGE_COMMIT = 4,
TRN_STAGE_COMMIT_ACTION = 5,
TRN_STAGE_FINISHED = 6
} ETrnStage;
typedef enum {
@ -131,7 +129,7 @@ typedef enum {
typedef enum {
TRN_EXEC_PARALLEL = 0,
TRN_EXEC_ONE_BY_ONE = 1,
TRN_EXEC_NO_PARALLEL = 1,
} ETrnExecType;
typedef enum {
@ -168,16 +166,16 @@ typedef struct {
SRpcHandleInfo rpcInfo;
void* rpcRsp;
int32_t rpcRspLen;
SArray* redoLogs;
SArray* undoLogs;
SArray* commitLogs;
int32_t redoActionPos;
SArray* redoActions;
SArray* undoActions;
SArray* commitActions;
int64_t createdTime;
int64_t lastExecTime;
int64_t dbUid;
char dbname[TSDB_DB_FNAME_LEN];
char lastError[TSDB_TRANS_ERROR_LEN];
char desc[TSDB_TRANS_DESC_LEN];
int32_t startFunc;
int32_t stopFunc;
int32_t paramLen;
@ -454,17 +452,17 @@ int32_t tEncodeSMqOffsetObj(void** buf, const SMqOffsetObj* pOffset);
void* tDecodeSMqOffsetObj(void* buf, SMqOffsetObj* pOffset);
typedef struct {
char name[TSDB_TOPIC_FNAME_LEN];
char db[TSDB_DB_FNAME_LEN];
int64_t createTime;
int64_t updateTime;
int64_t uid;
int64_t dbUid;
int32_t version;
int8_t subType; // db or table
int8_t withTbName;
int8_t withSchema;
int8_t withTag;
char name[TSDB_TOPIC_FNAME_LEN];
char db[TSDB_DB_FNAME_LEN];
int64_t createTime;
int64_t updateTime;
int64_t uid;
int64_t dbUid;
int32_t version;
int8_t subType; // column, db or stable
// int8_t withTbName;
// int8_t withSchema;
// int8_t withTag;
SRWLatch lock;
int32_t consumerCnt;
int32_t sqlLen;
@ -527,14 +525,14 @@ int32_t tEncodeSMqConsumerEp(void** buf, const SMqConsumerEp* pEp);
void* tDecodeSMqConsumerEp(const void* buf, SMqConsumerEp* pEp);
typedef struct {
char key[TSDB_SUBSCRIBE_KEY_LEN];
SRWLatch lock;
int64_t dbUid;
int32_t vgNum;
int8_t subType;
int8_t withTbName;
int8_t withSchema;
int8_t withTag;
char key[TSDB_SUBSCRIBE_KEY_LEN];
SRWLatch lock;
int64_t dbUid;
int32_t vgNum;
int8_t subType;
// int8_t withTbName;
// int8_t withSchema;
// int8_t withTag;
SHashObj* consumerHash; // consumerId -> SMqConsumerEp
SArray* unassignedVgs; // SArray<SMqVgEp*>
} SMqSubscribeObj;

View File

@ -30,7 +30,7 @@ int32_t mndSchedInitSubEp(SMnode* pMnode, const SMqTopicObj* pTopic, SMqSubscrib
int32_t mndScheduleStream(SMnode* pMnode, STrans* pTrans, SStreamObj* pStream);
int32_t mndConvertRSmaTask(const char* ast, int64_t uid, int8_t triggerType, int64_t watermark, char** pStr,
int32_t* pLen);
int32_t* pLen, double filesFactor);
#ifdef __cplusplus
}

View File

@ -26,31 +26,24 @@ typedef enum {
TRANS_START_FUNC_TEST = 1,
TRANS_STOP_FUNC_TEST = 2,
TRANS_START_FUNC_MQ_REB = 3,
TRANS_STOP_FUNC_TEST_MQ_REB = 4,
TRANS_STOP_FUNC_MQ_REB = 4,
} ETrnFunc;
typedef struct {
SEpSet epSet;
tmsg_t msgType;
int8_t msgSent;
int8_t msgReceived;
int32_t errCode;
int32_t acceptableCode;
int32_t contLen;
void *pCont;
} STransAction;
typedef struct {
int32_t id;
int32_t errCode;
int32_t acceptableCode;
int8_t stage;
int8_t isRaw;
int8_t rawWritten;
int8_t msgSent;
int8_t msgReceived;
tmsg_t msgType;
SEpSet epSet;
int32_t contLen;
void *pCont;
SSdbRaw *pRaw;
} STransLog;
typedef struct {
ETrnStep stepType;
STransAction redoAction;
STransAction undoAction;
STransLog redoLog;
STransLog undoLog;
} STransStep;
} STransAction;
typedef void (*TransCbFp)(SMnode *pMnode, void *param, int32_t paramLen);
@ -69,7 +62,7 @@ int32_t mndTransAppendUndoAction(STrans *pTrans, STransAction *pAction);
void mndTransSetRpcRsp(STrans *pTrans, void *pCont, int32_t contLen);
void mndTransSetCb(STrans *pTrans, ETrnFunc startFunc, ETrnFunc stopFunc, void *param, int32_t paramLen);
void mndTransSetDbInfo(STrans *pTrans, SDbObj *pDb);
void mndTransSetExecOneByOne(STrans *pTrans);
void mndTransSetNoParallel(STrans *pTrans);
int32_t mndTransPrepare(SMnode *pMnode, STrans *pTrans);
void mndTransProcessRsp(SRpcMsg *pRsp);

View File

@ -78,10 +78,8 @@ static int32_t mndCreateDefaultAcct(SMnode *pMnode) {
if (pRaw == NULL) return -1;
sdbSetRawStatus(pRaw, SDB_STATUS_READY);
mDebug("acct:%s, will be created while deploy sdb, raw:%p", acctObj.acct, pRaw);
#if 0
return sdbWrite(pMnode->pSdb, pRaw);
#else
mDebug("acct:%s, will be created when deploying, raw:%p", acctObj.acct, pRaw);
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_TYPE_CREATE_ACCT, NULL);
if (pTrans == NULL) {
mError("acct:%s, failed to create since %s", acctObj.acct, terrstr());
@ -94,7 +92,6 @@ static int32_t mndCreateDefaultAcct(SMnode *pMnode) {
mndTransDrop(pTrans);
return -1;
}
sdbSetRawStatus(pRaw, SDB_STATUS_READY);
if (mndTransPrepare(pMnode, pTrans) != 0) {
mError("trans:%d, failed to prepare since %s", pTrans->id, terrstr());
@ -104,7 +101,6 @@ static int32_t mndCreateDefaultAcct(SMnode *pMnode) {
mndTransDrop(pTrans);
return 0;
#endif
}
static SSdbRaw *mndAcctActionEncode(SAcctObj *pAcct) {

View File

@ -172,13 +172,13 @@ static int32_t mndCreateDefaultCluster(SMnode *pMnode) {
clusterObj.id = mndGenerateUid(clusterObj.name, TSDB_CLUSTER_ID_LEN);
clusterObj.id = (clusterObj.id >= 0 ? clusterObj.id : -clusterObj.id);
pMnode->clusterId = clusterObj.id;
mDebug("cluster:%" PRId64 ", name is %s", clusterObj.id, clusterObj.name);
mInfo("cluster:%" PRId64 ", name is %s", clusterObj.id, clusterObj.name);
SSdbRaw *pRaw = mndClusterActionEncode(&clusterObj);
if (pRaw == NULL) return -1;
sdbSetRawStatus(pRaw, SDB_STATUS_READY);
mDebug("cluster:%" PRId64 ", will be created while deploy sdb, raw:%p", clusterObj.id, pRaw);
mDebug("cluster:%" PRId64 ", will be created when deploying, raw:%p", clusterObj.id, pRaw);
#if 0
return sdbWrite(pMnode->pSdb, pRaw);
#else

View File

@ -1314,7 +1314,7 @@ int32_t mndValidateDbInfo(SMnode *pMnode, SDbVgVersion *pDbs, int32_t numOfDbs,
SDbObj *pDb = mndAcquireDb(pMnode, pDbVgVersion->dbFName);
if (pDb == NULL) {
mDebug("db:%s, no exist", pDbVgVersion->dbFName);
mTrace("db:%s, no exist", pDbVgVersion->dbFName);
memcpy(usedbRsp.db, pDbVgVersion->dbFName, TSDB_DB_FNAME_LEN);
usedbRsp.uid = pDbVgVersion->dbId;
usedbRsp.vgVersion = -1;

View File

@ -396,9 +396,9 @@ SMqSubscribeObj *tCloneSubscribeObj(const SMqSubscribeObj *pSub) {
pSubNew->dbUid = pSub->dbUid;
pSubNew->subType = pSub->subType;
pSubNew->withTbName = pSub->withTbName;
pSubNew->withSchema = pSub->withSchema;
pSubNew->withTag = pSub->withTag;
/*pSubNew->withTbName = pSub->withTbName;*/
/*pSubNew->withSchema = pSub->withSchema;*/
/*pSubNew->withTag = pSub->withTag;*/
pSubNew->vgNum = pSub->vgNum;
pSubNew->consumerHash = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
@ -431,9 +431,9 @@ int32_t tEncodeSubscribeObj(void **buf, const SMqSubscribeObj *pSub) {
tlen += taosEncodeFixedI64(buf, pSub->dbUid);
tlen += taosEncodeFixedI32(buf, pSub->vgNum);
tlen += taosEncodeFixedI8(buf, pSub->subType);
tlen += taosEncodeFixedI8(buf, pSub->withTbName);
tlen += taosEncodeFixedI8(buf, pSub->withSchema);
tlen += taosEncodeFixedI8(buf, pSub->withTag);
/*tlen += taosEncodeFixedI8(buf, pSub->withTbName);*/
/*tlen += taosEncodeFixedI8(buf, pSub->withSchema);*/
/*tlen += taosEncodeFixedI8(buf, pSub->withTag);*/
void *pIter = NULL;
int32_t sz = taosHashGetSize(pSub->consumerHash);
@ -458,9 +458,9 @@ void *tDecodeSubscribeObj(const void *buf, SMqSubscribeObj *pSub) {
buf = taosDecodeFixedI64(buf, &pSub->dbUid);
buf = taosDecodeFixedI32(buf, &pSub->vgNum);
buf = taosDecodeFixedI8(buf, &pSub->subType);
buf = taosDecodeFixedI8(buf, &pSub->withTbName);
buf = taosDecodeFixedI8(buf, &pSub->withSchema);
buf = taosDecodeFixedI8(buf, &pSub->withTag);
/*buf = taosDecodeFixedI8(buf, &pSub->withTbName);*/
/*buf = taosDecodeFixedI8(buf, &pSub->withSchema);*/
/*buf = taosDecodeFixedI8(buf, &pSub->withTag);*/
int32_t sz;
buf = taosDecodeFixedI32(buf, &sz);

View File

@ -98,7 +98,7 @@ static int32_t mndCreateDefaultDnode(SMnode *pMnode) {
if (pRaw == NULL) return -1;
if (sdbSetRawStatus(pRaw, SDB_STATUS_READY) != 0) return -1;
mDebug("dnode:%d, will be created while deploy sdb, raw:%p", dnodeObj.id, pRaw);
mDebug("dnode:%d, will be created when deploying, raw:%p", dnodeObj.id, pRaw);
#if 0
return sdbWrite(pMnode->pSdb, pRaw);
@ -388,9 +388,10 @@ static int32_t mndProcessStatusReq(SRpcMsg *pReq) {
mndReleaseMnode(pMnode, pObj);
}
int64_t dnodeVer = sdbGetTableVer(pMnode->pSdb, SDB_DNODE) + sdbGetTableVer(pMnode->pSdb, SDB_MNODE);
int64_t curMs = taosGetTimestampMs();
bool online = mndIsDnodeOnline(pMnode, pDnode, curMs);
bool dnodeChanged = (statusReq.dnodeVer != sdbGetTableVer(pMnode->pSdb, SDB_DNODE));
bool dnodeChanged = (statusReq.dnodeVer != dnodeVer);
bool reboot = (pDnode->rebootTime != statusReq.rebootTime);
bool needCheck = !online || dnodeChanged || reboot;
@ -433,7 +434,8 @@ static int32_t mndProcessStatusReq(SRpcMsg *pReq) {
if (!online) {
mInfo("dnode:%d, from offline to online", pDnode->id);
} else {
mDebug("dnode:%d, send dnode eps", pDnode->id);
mDebug("dnode:%d, send dnode epset, online:%d ver:% " PRId64 ":%" PRId64 " reboot:%d", pDnode->id, online,
statusReq.dnodeVer, dnodeVer, reboot);
}
pDnode->rebootTime = statusReq.rebootTime;
@ -441,7 +443,7 @@ static int32_t mndProcessStatusReq(SRpcMsg *pReq) {
pDnode->numOfSupportVnodes = statusReq.numOfSupportVnodes;
SStatusRsp statusRsp = {0};
statusRsp.dnodeVer = sdbGetTableVer(pMnode->pSdb, SDB_DNODE) + sdbGetTableVer(pMnode->pSdb, SDB_MNODE);
statusRsp.dnodeVer = dnodeVer;
statusRsp.dnodeCfg.dnodeId = pDnode->id;
statusRsp.dnodeCfg.clusterId = pMnode->clusterId;
statusRsp.pDnodeEps = taosArrayInit(mndGetDnodeSize(pMnode), sizeof(SDnodeEp));

View File

@ -472,7 +472,7 @@ int32_t mndProcessRpcMsg(SRpcMsg *pMsg) {
} else if (code == 0) {
mTrace("msg:%p, successfully processed and response", pMsg);
} else {
mError("msg:%p, failed to process since %s, app:%p type:%s", pMsg, terrstr(), pMsg->info.ahandle,
mDebug("msg:%p, failed to process since %s, app:%p type:%s", pMsg, terrstr(), pMsg->info.ahandle,
TMSG_INFO(pMsg->msgType));
}

View File

@ -90,7 +90,7 @@ static int32_t mndCreateDefaultMnode(SMnode *pMnode) {
if (pRaw == NULL) return -1;
sdbSetRawStatus(pRaw, SDB_STATUS_READY);
mDebug("mnode:%d, will be created while deploy sdb, raw:%p", mnodeObj.id, pRaw);
mDebug("mnode:%d, will be created when deploying, raw:%p", mnodeObj.id, pRaw);
#if 0
return sdbWrite(pMnode->pSdb, pRaw);
@ -367,7 +367,7 @@ static int32_t mndCreateMnode(SMnode *pMnode, SRpcMsg *pReq, SDnodeObj *pDnode,
if (pTrans == NULL) goto _OVER;
mDebug("trans:%d, used to create mnode:%d", pTrans->id, pCreate->dnodeId);
mndTransSetExecOneByOne(pTrans);
mndTransSetNoParallel(pTrans);
if (mndSetCreateMnodeRedoLogs(pMnode, pTrans, &mnodeObj) != 0) goto _OVER;
if (mndSetCreateMnodeCommitLogs(pMnode, pTrans, &mnodeObj) != 0) goto _OVER;
if (mndSetCreateMnodeRedoActions(pMnode, pTrans, pDnode, &mnodeObj) != 0) goto _OVER;
@ -539,7 +539,7 @@ static int32_t mndDropMnode(SMnode *pMnode, SRpcMsg *pReq, SMnodeObj *pObj) {
if (pTrans == NULL) goto _OVER;
mDebug("trans:%d, used to drop mnode:%d", pTrans->id, pObj->id);
mndTransSetExecOneByOne(pTrans);
mndTransSetNoParallel(pTrans);
if (mndSetDropMnodeRedoLogs(pMnode, pTrans, pObj) != 0) goto _OVER;
if (mndSetDropMnodeCommitLogs(pMnode, pTrans, pObj) != 0) goto _OVER;
if (mndSetDropMnodeRedoActions(pMnode, pTrans, pObj->pDnode, pObj) != 0) goto _OVER;

View File

@ -36,7 +36,7 @@
extern bool tsStreamSchedV;
int32_t mndConvertRSmaTask(const char* ast, int64_t uid, int8_t triggerType, int64_t watermark, char** pStr,
int32_t* pLen) {
int32_t* pLen, double filesFactor) {
SNode* pAst = NULL;
SQueryPlan* pPlan = NULL;
terrno = TSDB_CODE_SUCCESS;
@ -58,6 +58,7 @@ int32_t mndConvertRSmaTask(const char* ast, int64_t uid, int8_t triggerType, int
.rSmaQuery = true,
.triggerType = triggerType,
.watermark = watermark,
.filesFactor = filesFactor,
};
if (qCreateQueryPlan(&cxt, &pPlan, NULL) < 0) {
terrno = TSDB_CODE_QRY_INVALID_INPUT;
@ -506,7 +507,7 @@ int32_t mndSchedInitSubEp(SMnode* pMnode, const SMqTopicObj* pTopic, SMqSubscrib
SQueryPlan* pPlan = NULL;
SSubplan* plan = NULL;
if (pTopic->subType == TOPIC_SUB_TYPE__TABLE) {
if (pTopic->subType == TOPIC_SUB_TYPE__COLUMN) {
pPlan = qStringToQueryPlan(pTopic->physicalPlan);
if (pPlan == NULL) {
terrno = TSDB_CODE_QRY_INVALID_INPUT;
@ -552,7 +553,7 @@ int32_t mndSchedInitSubEp(SMnode* pMnode, const SMqTopicObj* pTopic, SMqSubscrib
mDebug("init subscription %s, assign vg: %d", pSub->key, pVgEp->vgId);
if (pTopic->subType == TOPIC_SUB_TYPE__TABLE) {
if (pTopic->subType == TOPIC_SUB_TYPE__COLUMN) {
int32_t msgLen;
plan->execNode.epSet = pVgEp->epSet;

View File

@ -507,7 +507,7 @@ static int32_t mndCreateSma(SMnode *pMnode, SRpcMsg *pReq, SMCreateSmaReq *pCrea
mDebug("trans:%d, used to create sma:%s", pTrans->id, pCreate->name);
mndTransSetDbInfo(pTrans, pDb);
mndTransSetExecOneByOne(pTrans);
mndTransSetNoParallel(pTrans);
if (mndSetCreateSmaRedoLogs(pMnode, pTrans, &smaObj) != 0) goto _OVER;
if (mndSetCreateSmaVgroupRedoLogs(pMnode, pTrans, &streamObj.fixedSinkVg) != 0) goto _OVER;

View File

@ -397,13 +397,13 @@ static void *mndBuildVCreateStbReq(SMnode *pMnode, SVgObj *pVgroup, SStbObj *pSt
req.pRSmaParam.xFilesFactor = pStb->xFilesFactor;
req.pRSmaParam.delay = pStb->delay;
if (pStb->ast1Len > 0) {
if (mndConvertRSmaTask(pStb->pAst1, pStb->uid, 0, 0, &req.pRSmaParam.qmsg1, &req.pRSmaParam.qmsg1Len) !=
if (mndConvertRSmaTask(pStb->pAst1, pStb->uid, 0, 0, &req.pRSmaParam.qmsg1, &req.pRSmaParam.qmsg1Len, req.pRSmaParam.xFilesFactor) !=
TSDB_CODE_SUCCESS) {
return NULL;
}
}
if (pStb->ast2Len > 0) {
if (mndConvertRSmaTask(pStb->pAst2, pStb->uid, 0, 0, &req.pRSmaParam.qmsg2, &req.pRSmaParam.qmsg2Len) !=
if (mndConvertRSmaTask(pStb->pAst2, pStb->uid, 0, 0, &req.pRSmaParam.qmsg2, &req.pRSmaParam.qmsg2Len, req.pRSmaParam.xFilesFactor) !=
TSDB_CODE_SUCCESS) {
return NULL;
}
@ -1597,7 +1597,7 @@ static int32_t mndProcessTableMetaReq(SRpcMsg *pReq) {
pReq->info.rspLen = rspLen;
code = 0;
mDebug("stb:%s.%s, meta is retrieved", infoReq.dbFName, infoReq.tbName);
mTrace("%s.%s, meta is retrieved", infoReq.dbFName, infoReq.tbName);
_OVER:
if (code != 0) {

View File

@ -93,9 +93,9 @@ static SMqSubscribeObj *mndCreateSub(SMnode *pMnode, const SMqTopicObj *pTopic,
}
pSub->dbUid = pTopic->dbUid;
pSub->subType = pTopic->subType;
pSub->withTbName = pTopic->withTbName;
pSub->withSchema = pTopic->withSchema;
pSub->withTag = pTopic->withTag;
/*pSub->withTbName = pTopic->withTbName;*/
/*pSub->withSchema = pTopic->withSchema;*/
/*pSub->withTag = pTopic->withTag;*/
ASSERT(pSub->unassignedVgs->size == 0);
ASSERT(taosHashGetSize(pSub->consumerHash) == 0);
@ -120,9 +120,9 @@ static int32_t mndBuildSubChangeReq(void **pBuf, int32_t *pLen, const SMqSubscri
req.vgId = pRebVg->pVgEp->vgId;
req.qmsg = pRebVg->pVgEp->qmsg;
req.subType = pSub->subType;
req.withTbName = pSub->withTbName;
req.withSchema = pSub->withSchema;
req.withTag = pSub->withTag;
/*req.withTbName = pSub->withTbName;*/
/*req.withSchema = pSub->withSchema;*/
/*req.withTag = pSub->withTag;*/
strncpy(req.subKey, pSub->key, TSDB_SUBSCRIBE_KEY_LEN);
int32_t tlen = sizeof(SMsgHead) + tEncodeSMqRebVgReq(NULL, &req);
@ -501,7 +501,7 @@ static int32_t mndPersistRebResult(SMnode *pMnode, SRpcMsg *pMsg, const SMqRebOu
// 4. TODO commit log: modification log
// 5. set cb
mndTransSetCb(pTrans, TRANS_START_FUNC_MQ_REB, TRANS_STOP_FUNC_TEST_MQ_REB, NULL, 0);
mndTransSetCb(pTrans, TRANS_START_FUNC_MQ_REB, TRANS_STOP_FUNC_MQ_REB, NULL, 0);
// 6. execution
if (mndTransPrepare(pMnode, pTrans) != 0) {

View File

@ -65,7 +65,7 @@ int32_t mndSyncGetSnapshot(struct SSyncFSM *pFsm, SSnapshot *pSnapshot) {
void mndRestoreFinish(struct SSyncFSM *pFsm) {
SMnode *pMnode = pFsm->data;
if (!pMnode->deploy) {
mInfo("mnode sync restore finished");
mInfo("mnode sync restore finished, and will handle outstanding transactions");
mndTransPullup(pMnode);
mndSetRestore(pMnode, true);
} else {
@ -244,7 +244,7 @@ void mndSyncStart(SMnode *pMnode) {
} else {
syncStart(pMgmt->sync);
}
mDebug("sync:%" PRId64 " is started, standby:%d", pMgmt->sync, pMgmt->standby);
mDebug("mnode sync started, id:%" PRId64 " standby:%d", pMgmt->sync, pMgmt->standby);
}
void mndSyncStop(SMnode *pMnode) {}

View File

@ -96,9 +96,9 @@ SSdbRaw *mndTopicActionEncode(SMqTopicObj *pTopic) {
SDB_SET_INT64(pRaw, dataPos, pTopic->dbUid, TOPIC_ENCODE_OVER);
SDB_SET_INT32(pRaw, dataPos, pTopic->version, TOPIC_ENCODE_OVER);
SDB_SET_INT8(pRaw, dataPos, pTopic->subType, TOPIC_ENCODE_OVER);
SDB_SET_INT8(pRaw, dataPos, pTopic->withTbName, TOPIC_ENCODE_OVER);
SDB_SET_INT8(pRaw, dataPos, pTopic->withSchema, TOPIC_ENCODE_OVER);
SDB_SET_INT8(pRaw, dataPos, pTopic->withTag, TOPIC_ENCODE_OVER);
/*SDB_SET_INT8(pRaw, dataPos, pTopic->withTbName, TOPIC_ENCODE_OVER);*/
/*SDB_SET_INT8(pRaw, dataPos, pTopic->withSchema, TOPIC_ENCODE_OVER);*/
/*SDB_SET_INT8(pRaw, dataPos, pTopic->withTag, TOPIC_ENCODE_OVER);*/
SDB_SET_INT32(pRaw, dataPos, pTopic->consumerCnt, TOPIC_ENCODE_OVER);
SDB_SET_INT32(pRaw, dataPos, pTopic->sqlLen, TOPIC_ENCODE_OVER);
@ -168,9 +168,9 @@ SSdbRow *mndTopicActionDecode(SSdbRaw *pRaw) {
SDB_GET_INT64(pRaw, dataPos, &pTopic->dbUid, TOPIC_DECODE_OVER);
SDB_GET_INT32(pRaw, dataPos, &pTopic->version, TOPIC_DECODE_OVER);
SDB_GET_INT8(pRaw, dataPos, &pTopic->subType, TOPIC_DECODE_OVER);
SDB_GET_INT8(pRaw, dataPos, &pTopic->withTbName, TOPIC_DECODE_OVER);
SDB_GET_INT8(pRaw, dataPos, &pTopic->withSchema, TOPIC_DECODE_OVER);
SDB_GET_INT8(pRaw, dataPos, &pTopic->withTag, TOPIC_DECODE_OVER);
/*SDB_GET_INT8(pRaw, dataPos, &pTopic->withTbName, TOPIC_DECODE_OVER);*/
/*SDB_GET_INT8(pRaw, dataPos, &pTopic->withSchema, TOPIC_DECODE_OVER);*/
/*SDB_GET_INT8(pRaw, dataPos, &pTopic->withTag, TOPIC_DECODE_OVER);*/
SDB_GET_INT32(pRaw, dataPos, &pTopic->consumerCnt, TOPIC_DECODE_OVER);
@ -308,11 +308,19 @@ static SDDropTopicReq *mndBuildDropTopicMsg(SMnode *pMnode, SVgObj *pVgroup, SMq
}
static int32_t mndCheckCreateTopicReq(SCMCreateTopicReq *pCreate) {
if (pCreate->name[0] == 0 || pCreate->sql == NULL || pCreate->sql[0] == 0 || pCreate->subscribeDbName[0] == 0) {
terrno = TSDB_CODE_MND_INVALID_TOPIC;
return -1;
terrno = TSDB_CODE_MND_INVALID_TOPIC;
if (pCreate->sql == NULL) return -1;
if (pCreate->subType == TOPIC_SUB_TYPE__COLUMN) {
if (pCreate->ast == NULL || pCreate->ast[0] == 0) return -1;
} else if (pCreate->subType == TOPIC_SUB_TYPE__TABLE) {
if (pCreate->subStbName[0] == 0) return -1;
} else if (pCreate->subType == TOPIC_SUB_TYPE__DB) {
if (pCreate->subDbName[0] == 0) return -1;
}
terrno = TSDB_CODE_SUCCESS;
return 0;
}
@ -328,14 +336,13 @@ static int32_t mndCreateTopic(SMnode *pMnode, SRpcMsg *pReq, SCMCreateTopicReq *
topicObj.version = 1;
topicObj.sql = strdup(pCreate->sql);
topicObj.sqlLen = strlen(pCreate->sql) + 1;
/*topicObj.refConsumerCnt = 0;*/
topicObj.subType = pCreate->subType;
if (pCreate->ast && pCreate->ast[0]) {
if (pCreate->subType == TOPIC_SUB_TYPE__COLUMN) {
topicObj.ast = strdup(pCreate->ast);
topicObj.astLen = strlen(pCreate->ast) + 1;
topicObj.subType = TOPIC_SUB_TYPE__TABLE;
topicObj.withTbName = pCreate->withTbName;
topicObj.withSchema = pCreate->withSchema;
/*topicObj.withTbName = pCreate->withTbName;*/
/*topicObj.withSchema = pCreate->withSchema;*/
SNode *pAst = NULL;
if (nodesStringToNode(pCreate->ast, &pAst) != 0) {
@ -368,13 +375,12 @@ static int32_t mndCreateTopic(SMnode *pMnode, SRpcMsg *pReq, SCMCreateTopicReq *
taosMemoryFree(topicObj.sql);
return -1;
}
} else {
topicObj.ast = NULL;
topicObj.astLen = 0;
topicObj.physicalPlan = NULL;
topicObj.subType = TOPIC_SUB_TYPE__DB;
topicObj.withTbName = 1;
topicObj.withSchema = 1;
/*} else if (pCreate->subType == TOPIC_SUB_TYPE__DB) {*/
/*topicObj.ast = NULL;*/
/*topicObj.astLen = 0;*/
/*topicObj.physicalPlan = NULL;*/
/*topicObj.withTbName = 1;*/
/*topicObj.withSchema = 1;*/
}
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_TYPE_CREATE_TOPIC, pReq);
@ -442,7 +448,7 @@ static int32_t mndProcessCreateTopicReq(SRpcMsg *pReq) {
goto CREATE_TOPIC_OVER;
}
pDb = mndAcquireDb(pMnode, createTopicReq.subscribeDbName);
pDb = mndAcquireDb(pMnode, createTopicReq.subDbName);
if (pDb == NULL) {
terrno = TSDB_CODE_MND_DB_NOT_SELECTED;
goto CREATE_TOPIC_OVER;

File diff suppressed because it is too large Load Diff

View File

@ -77,7 +77,7 @@ static int32_t mndCreateDefaultUser(SMnode *pMnode, char *acct, char *user, char
if (pRaw == NULL) return -1;
sdbSetRawStatus(pRaw, SDB_STATUS_READY);
mDebug("user:%s, will be created while deploy sdb, raw:%p", userObj.user, pRaw);
mDebug("user:%s, will be created when deploying, raw:%p", userObj.user, pRaw);
#if 0
return sdbWrite(pMnode->pSdb, pRaw);

View File

@ -501,7 +501,7 @@ int32_t mndAllocVgroup(SMnode *pMnode, SDbObj *pDb, SVgObj **ppVgroups) {
*ppVgroups = pVgroups;
code = 0;
mInfo("db:%s, %d vgroups is alloced, replica:%d", pDb->name, pDb->cfg.numOfVgroups, pDb->cfg.replications);
mInfo("db:%s, total %d vgroups is alloced, replica:%d", pDb->name, pDb->cfg.numOfVgroups, pDb->cfg.replications);
_OVER:
if (code != 0) taosMemoryFree(pVgroups);
@ -539,7 +539,7 @@ int32_t mndAddVnodeToVgroup(SMnode *pMnode, SVgObj *pVgroup, SArray *pArray) {
pVgid->role = TAOS_SYNC_STATE_FOLLOWER;
pDnode->numOfVnodes++;
mInfo("db:%s, vgId:%d, vn:%d dnode:%d is added", pVgroup->dbName, pVgroup->vgId, maxPos, pVgid->dnodeId);
mInfo("db:%s, vgId:%d, vnode_index:%d dnode:%d is added", pVgroup->dbName, pVgroup->vgId, maxPos, pVgid->dnodeId);
maxPos++;
if (maxPos == 3) return 0;
}

View File

@ -5,7 +5,9 @@ target_link_libraries(
PUBLIC sut
)
add_test(
NAME dbTest
COMMAND dbTest
)
if(NOT TD_WINDOWS)
add_test(
NAME dbTest
COMMAND dbTest
)
endif(NOT TD_WINDOWS)

View File

@ -5,7 +5,9 @@ target_link_libraries(
PUBLIC sut
)
add_test(
NAME smaTest
COMMAND smaTest
)
if(NOT TD_WINDOWS)
add_test(
NAME smaTest
COMMAND smaTest
)
endif(NOT TD_WINDOWS)

View File

@ -5,7 +5,9 @@ target_link_libraries(
PUBLIC sut
)
add_test(
NAME stbTest
COMMAND stbTest
)
if(NOT TD_WINDOWS)
add_test(
NAME stbTest
COMMAND stbTest
)
endif(NOT TD_WINDOWS)

View File

@ -168,6 +168,7 @@ typedef struct SSdb {
char *currDir;
char *tmpDir;
int64_t lastCommitVer;
int64_t lastCommitTerm;
int64_t curVer;
int64_t curTerm;
int64_t tableVer[SDB_MAX];

View File

@ -55,6 +55,7 @@ SSdb *sdbInit(SSdbOpt *pOption) {
pSdb->curVer = -1;
pSdb->curTerm = -1;
pSdb->lastCommitVer = -1;
pSdb->lastCommitTerm = -1;
pSdb->pMnode = pOption->pMnode;
taosThreadMutexInit(&pSdb->filelock, NULL);
mDebug("sdb init successfully");

View File

@ -70,6 +70,7 @@ static void sdbResetData(SSdb *pSdb) {
pSdb->curVer = -1;
pSdb->curTerm = -1;
pSdb->lastCommitVer = -1;
pSdb->lastCommitTerm = -1;
mDebug("sdb reset successfully");
}
@ -211,12 +212,12 @@ static int32_t sdbReadFileImp(SSdb *pSdb) {
char file[PATH_MAX] = {0};
snprintf(file, sizeof(file), "%s%ssdb.data", pSdb->currDir, TD_DIRSEP);
mDebug("start to read file:%s", file);
mDebug("start to read sdb file:%s", file);
SSdbRaw *pRaw = taosMemoryMalloc(WAL_MAX_SIZE + 100);
if (pRaw == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
mError("failed read file since %s", terrstr());
mError("failed read sdb file since %s", terrstr());
return -1;
}
@ -224,12 +225,12 @@ static int32_t sdbReadFileImp(SSdb *pSdb) {
if (pFile == NULL) {
taosMemoryFree(pRaw);
terrno = TAOS_SYSTEM_ERROR(errno);
mError("failed to read file:%s since %s", file, terrstr());
mError("failed to read sdb file:%s since %s", file, terrstr());
return 0;
}
if (sdbReadFileHead(pSdb, pFile) != 0) {
mError("failed to read file:%s head since %s", file, terrstr());
mError("failed to read sdb file:%s head since %s", file, terrstr());
taosMemoryFree(pRaw);
taosCloseFile(&pFile);
return -1;
@ -245,13 +246,13 @@ static int32_t sdbReadFileImp(SSdb *pSdb) {
if (ret < 0) {
code = TAOS_SYSTEM_ERROR(errno);
mError("failed to read file:%s since %s", file, tstrerror(code));
mError("failed to read sdb file:%s since %s", file, tstrerror(code));
break;
}
if (ret != readLen) {
code = TSDB_CODE_FILE_CORRUPTED;
mError("failed to read file:%s since %s", file, tstrerror(code));
mError("failed to read sdb file:%s since %s", file, tstrerror(code));
break;
}
@ -259,34 +260,36 @@ static int32_t sdbReadFileImp(SSdb *pSdb) {
ret = taosReadFile(pFile, pRaw->pData, readLen);
if (ret < 0) {
code = TAOS_SYSTEM_ERROR(errno);
mError("failed to read file:%s since %s", file, tstrerror(code));
mError("failed to read sdb file:%s since %s", file, tstrerror(code));
break;
}
if (ret != readLen) {
code = TSDB_CODE_FILE_CORRUPTED;
mError("failed to read file:%s since %s", file, tstrerror(code));
mError("failed to read sdb file:%s since %s", file, tstrerror(code));
break;
}
int32_t totalLen = sizeof(SSdbRaw) + pRaw->dataLen + sizeof(int32_t);
if ((!taosCheckChecksumWhole((const uint8_t *)pRaw, totalLen)) != 0) {
code = TSDB_CODE_CHECKSUM_ERROR;
mError("failed to read file:%s since %s", file, tstrerror(code));
mError("failed to read sdb file:%s since %s", file, tstrerror(code));
break;
}
code = sdbWriteWithoutFree(pSdb, pRaw);
if (code != 0) {
mError("failed to read file:%s since %s", file, terrstr());
mError("failed to read sdb file:%s since %s", file, terrstr());
goto _OVER;
}
}
code = 0;
pSdb->lastCommitVer = pSdb->curVer;
pSdb->lastCommitTerm = pSdb->curTerm;
memcpy(pSdb->tableVer, tableVer, sizeof(tableVer));
mDebug("read file:%s successfully, ver:%" PRId64, file, pSdb->lastCommitVer);
mDebug("read sdb file:%s successfully, ver:%" PRId64 " term:%" PRId64, file, pSdb->lastCommitVer,
pSdb->lastCommitTerm);
_OVER:
taosCloseFile(&pFile);
@ -302,7 +305,7 @@ int32_t sdbReadFile(SSdb *pSdb) {
sdbResetData(pSdb);
int32_t code = sdbReadFileImp(pSdb);
if (code != 0) {
mError("failed to read sdb since %s", terrstr());
mError("failed to read sdb file since %s", terrstr());
sdbResetData(pSdb);
}
@ -318,18 +321,19 @@ static int32_t sdbWriteFileImp(SSdb *pSdb) {
char curfile[PATH_MAX] = {0};
snprintf(curfile, sizeof(curfile), "%s%ssdb.data", pSdb->currDir, TD_DIRSEP);
mDebug("start to write file:%s, current ver:%" PRId64 " term:%" PRId64 ", commit ver:%" PRId64, curfile, pSdb->curVer,
pSdb->curTerm, pSdb->lastCommitVer);
mDebug("start to write sdb file, current ver:%" PRId64 " term:%" PRId64 ", commit ver:%" PRId64 " term:%" PRId64
" file:%s",
pSdb->curVer, pSdb->curTerm, pSdb->lastCommitVer, pSdb->lastCommitTerm, curfile);
TdFilePtr pFile = taosOpenFile(tmpfile, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC);
if (pFile == NULL) {
terrno = TAOS_SYSTEM_ERROR(errno);
mError("failed to open file:%s for write since %s", tmpfile, terrstr());
mError("failed to open sdb file:%s for write since %s", tmpfile, terrstr());
return -1;
}
if (sdbWriteFileHead(pSdb, pFile) != 0) {
mError("failed to write file:%s head since %s", tmpfile, terrstr());
mError("failed to write sdb file:%s head since %s", tmpfile, terrstr());
taosCloseFile(&pFile);
return -1;
}
@ -338,7 +342,7 @@ static int32_t sdbWriteFileImp(SSdb *pSdb) {
SdbEncodeFp encodeFp = pSdb->encodeFps[i];
if (encodeFp == NULL) continue;
mTrace("write %s to file, total %d rows", sdbTableName(i), sdbGetSize(pSdb, i));
mTrace("write %s to sdb file, total %d rows", sdbTableName(i), sdbGetSize(pSdb, i));
SHashObj *hash = pSdb->hashObjs[i];
TdThreadRwlock *pLock = &pSdb->locks[i];
@ -394,7 +398,7 @@ static int32_t sdbWriteFileImp(SSdb *pSdb) {
code = taosFsyncFile(pFile);
if (code != 0) {
code = TAOS_SYSTEM_ERROR(errno);
mError("failed to sync file:%s since %s", tmpfile, tstrerror(code));
mError("failed to sync sdb file:%s since %s", tmpfile, tstrerror(code));
}
}
@ -404,15 +408,17 @@ static int32_t sdbWriteFileImp(SSdb *pSdb) {
code = taosRenameFile(tmpfile, curfile);
if (code != 0) {
code = TAOS_SYSTEM_ERROR(errno);
mError("failed to write file:%s since %s", curfile, tstrerror(code));
mError("failed to write sdb file:%s since %s", curfile, tstrerror(code));
}
}
if (code != 0) {
mError("failed to write file:%s since %s", curfile, tstrerror(code));
mError("failed to write sdb file:%s since %s", curfile, tstrerror(code));
} else {
pSdb->lastCommitVer = pSdb->curVer;
mDebug("write file:%s successfully, ver:%" PRId64 " term:%" PRId64, curfile, pSdb->lastCommitVer, pSdb->curTerm);
pSdb->lastCommitTerm = pSdb->curTerm;
mDebug("write sdb file successfully, ver:%" PRId64 " term:%" PRId64 " file:%s", pSdb->lastCommitVer,
pSdb->lastCommitTerm, curfile);
}
terrno = code;
@ -427,7 +433,7 @@ int32_t sdbWriteFile(SSdb *pSdb) {
taosThreadMutexLock(&pSdb->filelock);
int32_t code = sdbWriteFileImp(pSdb);
if (code != 0) {
mError("failed to write sdb since %s", terrstr());
mError("failed to write sdb file since %s", terrstr());
}
taosThreadMutexUnlock(&pSdb->filelock);
return code;
@ -493,7 +499,7 @@ int32_t sdbStartRead(SSdb *pSdb, SSdbIter **ppIter) {
if (taosCopyFile(datafile, pIter->name) < 0) {
taosThreadMutexUnlock(&pSdb->filelock);
terrno = TAOS_SYSTEM_ERROR(errno);
mError("failed to copy file %s to %s since %s", datafile, pIter->name, terrstr());
mError("failed to copy sdb file %s to %s since %s", datafile, pIter->name, terrstr());
sdbCloseIter(pIter);
return -1;
}
@ -502,7 +508,7 @@ int32_t sdbStartRead(SSdb *pSdb, SSdbIter **ppIter) {
pIter->file = taosOpenFile(pIter->name, TD_FILE_READ);
if (pIter->file == NULL) {
terrno = TAOS_SYSTEM_ERROR(errno);
mError("failed to open file:%s since %s", pIter->name, terrstr());
mError("failed to open sdb file:%s since %s", pIter->name, terrstr());
sdbCloseIter(pIter);
return -1;
}

View File

@ -51,6 +51,7 @@ target_sources(
# tq
"src/tq/tq.c"
"src/tq/tqExec.c"
"src/tq/tqCommit.c"
"src/tq/tqOffset.c"
"src/tq/tqPush.c"

View File

@ -44,21 +44,27 @@ extern "C" {
typedef struct STqOffsetCfg STqOffsetCfg;
typedef struct STqOffsetStore STqOffsetStore;
// tqRead
struct STqReadHandle {
int64_t ver;
SHashObj* tbIdHash;
const SSubmitReq* pMsg;
SSubmitBlk* pBlock;
SSubmitMsgIter msgIter;
SSubmitBlkIter blkIter;
SMeta* pVnodeMeta;
SArray* pColIdList; // SArray<int16_t>
int32_t sver;
int64_t cachedSchemaUid;
SSchemaWrapper* pSchemaWrapper;
STSchema* pSchema;
SMeta* pVnodeMeta;
SHashObj* tbIdHash;
SArray* pColIdList; // SArray<int16_t>
int32_t cachedSchemaVer;
int64_t cachedSchemaUid;
SSchemaWrapper* pSchemaWrapper;
STSchema* pSchema;
};
// tqPush
typedef struct {
int64_t consumerId;
int32_t epoch;
@ -68,14 +74,15 @@ typedef struct {
SRpcMsg* handle;
} STqPushHandle;
#if 0
typedef struct {
char subKey[TSDB_SUBSCRIBE_KEY_LEN];
int64_t consumerId;
int32_t epoch;
int8_t subType;
int8_t withTbName;
int8_t withSchema;
int8_t withTag;
char subKey[TSDB_SUBSCRIBE_KEY_LEN];
int64_t consumerId;
int32_t epoch;
int8_t subType;
// int8_t withTbName;
// int8_t withSchema;
// int8_t withTag;
char* qmsg;
SHashObj* pDropTbUid;
STqPushHandle pushHandle;
@ -85,15 +92,55 @@ typedef struct {
STqReadHandle* pExecReader[5];
qTaskInfo_t task[5];
} STqExec;
#endif
int32_t tEncodeSTqExec(SEncoder* pEncoder, const STqExec* pExec);
int32_t tDecodeSTqExec(SDecoder* pDecoder, STqExec* pExec);
// tqExec
typedef struct {
char* qmsg;
qTaskInfo_t task[5];
} STqExecCol;
typedef struct {
int64_t suid;
} STqExecTb;
typedef struct {
SHashObj* pFilterOutTbUid;
} STqExecDb;
typedef struct {
int8_t subType;
STqReadHandle* pExecReader[5];
union {
STqExecCol execCol;
STqExecTb execTb;
STqExecDb execDb;
} exec;
} STqExecHandle;
typedef struct {
// info
char subKey[TSDB_SUBSCRIBE_KEY_LEN];
int64_t consumerId;
int32_t epoch;
// reader
SWalReadHandle* pWalReader;
// push
STqPushHandle pushHandle;
// exec
STqExecHandle execHandle;
} STqHandle;
struct STQ {
char* path;
SHashObj* pushMgr; // consumerId -> STqExec*
SHashObj* execs; // subKey -> STqExec
SHashObj* pStreamTasks;
SHashObj* pushMgr; // consumerId -> STqHandle*
SHashObj* handles; // subKey -> STqHandle
SHashObj* pStreamTasks; // taksId -> SStreamTask
SVnode* pVnode;
SWal* pWal;
TDB* pMetaStore;
@ -111,6 +158,16 @@ static STqMgmt tqMgmt = {0};
int tqInit();
void tqCleanUp();
// int32_t tEncodeSTqExec(SEncoder* pEncoder, const STqExec* pExec);
// int32_t tDecodeSTqExec(SDecoder* pDecoder, STqExec* pExec);
int32_t tEncodeSTqHandle(SEncoder* pEncoder, const STqHandle* pHandle);
int32_t tDecodeSTqHandle(SDecoder* pDecoder, STqHandle* pHandle);
int64_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, SWalHead** pHeadWithCkSum);
int32_t tqDataExec(STQ* pTq, STqExecHandle* pExec, SSubmitReq* pReq, SMqDataBlkRsp* pRsp, int32_t workerId);
// tqOffset
STqOffsetStore* STqOffsetOpen(STqOffsetCfg*);
void STqOffsetClose(STqOffsetStore*);

View File

@ -149,7 +149,7 @@ int32_t tdUpdateExpireWindow(SSma* pSma, SSubmitReq* pMsg, int64_t version);
int32_t tdProcessTSmaCreate(SSma* pSma, int64_t version, const char* msg);
int32_t tdProcessTSmaInsert(SSma* pSma, int64_t indexUid, const char* msg);
int32_t tdProcessRSmaCreate(SSma* pSma, SMeta* pMeta, SVCreateStbReq* pReq, SMsgCb* pMsgCb);
int32_t tdProcessRSmaCreate(SVnode *pVnode, SVCreateStbReq* pReq);
int32_t tdProcessRSmaSubmit(SSma* pSma, void* pMsg, int32_t inputType);
int32_t tdFetchTbUidList(SSma* pSma, STbUidStore** ppStore, tb_uid_t suid, tb_uid_t uid);
int32_t tdUpdateTbUidList(SSma* pSma, STbUidStore* pUidStore);

View File

@ -165,7 +165,10 @@ int32_t tdFetchTbUidList(SSma *pSma, STbUidStore **ppStore, tb_uid_t suid, tb_ui
* @param pReq
* @return int32_t
*/
int32_t tdProcessRSmaCreate(SSma *pSma, SMeta *pMeta, SVCreateStbReq *pReq, SMsgCb *pMsgCb) {
int32_t tdProcessRSmaCreate(SVnode *pVnode, SVCreateStbReq *pReq) {
SSma *pSma = pVnode->pSma;
SMeta *pMeta = pVnode->pMeta;
SMsgCb *pMsgCb = &pVnode->msgCb;
if (!pReq->rollup) {
smaTrace("vgId:%d return directly since no rollup for stable %s %" PRIi64, SMA_VID(pSma), pReq->name, pReq->suid);
return TSDB_CODE_SUCCESS;
@ -210,6 +213,7 @@ int32_t tdProcessRSmaCreate(SSma *pSma, SMeta *pMeta, SVCreateStbReq *pReq, SMsg
.reader = pReadHandle,
.meta = pMeta,
.pMsgCb = pMsgCb,
.vnode = pVnode,
};
if (param->qmsg1) {

View File

@ -51,10 +51,10 @@ int tqExecKeyCompare(const void* pKey1, int32_t kLen1, const void* pKey2, int32_
return strcmp(pKey1, pKey2);
}
int32_t tqStoreExec(STQ* pTq, const char* key, const STqExec* pExec) {
int32_t tqStoreHandle(STQ* pTq, const char* key, const STqHandle* pHandle) {
int32_t code;
int32_t vlen;
tEncodeSize(tEncodeSTqExec, pExec, vlen, code);
tEncodeSize(tEncodeSTqHandle, pHandle, vlen, code);
ASSERT(code == 0);
void* buf = taosMemoryCalloc(1, vlen);
@ -65,7 +65,7 @@ int32_t tqStoreExec(STQ* pTq, const char* key, const STqExec* pExec) {
SEncoder encoder;
tEncoderInit(&encoder, buf, vlen);
if (tEncodeSTqExec(&encoder, pExec) < 0) {
if (tEncodeSTqHandle(&encoder, pHandle) < 0) {
ASSERT(0);
}
@ -102,7 +102,7 @@ STQ* tqOpen(const char* path, SVnode* pVnode, SWal* pWal) {
pTq->pVnode = pVnode;
pTq->pWal = pWal;
pTq->execs = taosHashInit(64, MurmurHash3_32, true, HASH_ENTRY_LOCK);
pTq->handles = taosHashInit(64, MurmurHash3_32, true, HASH_ENTRY_LOCK);
pTq->pStreamTasks = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK);
@ -112,7 +112,7 @@ STQ* tqOpen(const char* path, SVnode* pVnode, SWal* pWal) {
ASSERT(0);
}
if (tdbTbOpen("exec", -1, -1, tqExecKeyCompare, pTq->pMetaStore, &pTq->pExecStore) < 0) {
if (tdbTbOpen("handles", -1, -1, tqExecKeyCompare, pTq->pMetaStore, &pTq->pExecStore) < 0) {
ASSERT(0);
}
@ -122,10 +122,6 @@ STQ* tqOpen(const char* path, SVnode* pVnode, SWal* pWal) {
ASSERT(0);
}
/*if (tdbBegin(pTq->pMetaStore, &txn) < 0) {*/
/*ASSERT(0);*/
/*}*/
TBC* pCur;
if (tdbTbcOpen(pTq->pExecStore, &pCur, &txn) < 0) {
ASSERT(0);
@ -138,30 +134,31 @@ STQ* tqOpen(const char* path, SVnode* pVnode, SWal* pWal) {
tdbTbcMoveToFirst(pCur);
SDecoder decoder;
while (tdbTbcNext(pCur, &pKey, &kLen, &pVal, &vLen) == 0) {
STqExec exec;
tDecoderInit(&decoder, (uint8_t*)pVal, vLen);
tDecodeSTqExec(&decoder, &exec);
exec.pWalReader = walOpenReadHandle(pTq->pVnode->pWal);
if (exec.subType == TOPIC_SUB_TYPE__TABLE) {
for (int32_t i = 0; i < 5; i++) {
exec.pExecReader[i] = tqInitSubmitMsgScanner(pTq->pVnode->pMeta);
SReadHandle handle = {
.reader = exec.pExecReader[i],
while (tdbTbcNext(pCur, &pKey, &kLen, &pVal, &vLen) == 0) {
STqHandle handle;
tDecoderInit(&decoder, (uint8_t*)pVal, vLen);
tDecodeSTqHandle(&decoder, &handle);
handle.pWalReader = walOpenReadHandle(pTq->pVnode->pWal);
for (int32_t i = 0; i < 5; i++) {
handle.execHandle.pExecReader[i] = tqInitSubmitMsgScanner(pTq->pVnode->pMeta);
}
if (handle.execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
for (int32_t i = 0; i < 5; i++) {
SReadHandle reader = {
.reader = handle.execHandle.pExecReader[i],
.meta = pTq->pVnode->pMeta,
.pMsgCb = &pTq->pVnode->msgCb,
};
exec.task[i] = qCreateStreamExecTaskInfo(exec.qmsg, &handle);
ASSERT(exec.task[i]);
handle.execHandle.exec.execCol.task[i] =
qCreateStreamExecTaskInfo(handle.execHandle.exec.execCol.qmsg, &reader);
ASSERT(handle.execHandle.exec.execCol.task[i]);
}
} else {
for (int32_t i = 0; i < 5; i++) {
exec.pExecReader[i] = tqInitSubmitMsgScanner(pTq->pVnode->pMeta);
}
exec.pDropTbUid = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
handle.execHandle.exec.execDb.pFilterOutTbUid =
taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
}
taosHashPut(pTq->execs, pKey, kLen, &exec, sizeof(STqExec));
taosHashPut(pTq->handles, pKey, kLen, &handle, sizeof(STqHandle));
}
if (tdbTxnClose(&txn) < 0) {
@ -174,7 +171,7 @@ STQ* tqOpen(const char* path, SVnode* pVnode, SWal* pWal) {
void tqClose(STQ* pTq) {
if (pTq) {
taosMemoryFreeClear(pTq->path);
taosHashCleanup(pTq->execs);
taosHashCleanup(pTq->handles);
taosHashCleanup(pTq->pStreamTasks);
taosHashCleanup(pTq->pushMgr);
tdbClose(pTq->pMetaStore);
@ -183,16 +180,17 @@ void tqClose(STQ* pTq) {
// TODO
}
#if 0
int32_t tEncodeSTqExec(SEncoder* pEncoder, const STqExec* pExec) {
if (tStartEncode(pEncoder) < 0) return -1;
if (tEncodeCStr(pEncoder, pExec->subKey) < 0) return -1;
if (tEncodeI64(pEncoder, pExec->consumerId) < 0) return -1;
if (tEncodeI32(pEncoder, pExec->epoch) < 0) return -1;
if (tEncodeI8(pEncoder, pExec->subType) < 0) return -1;
if (tEncodeI8(pEncoder, pExec->withTbName) < 0) return -1;
if (tEncodeI8(pEncoder, pExec->withSchema) < 0) return -1;
if (tEncodeI8(pEncoder, pExec->withTag) < 0) return -1;
if (pExec->subType == TOPIC_SUB_TYPE__TABLE) {
/*if (tEncodeI8(pEncoder, pExec->withTbName) < 0) return -1;*/
/*if (tEncodeI8(pEncoder, pExec->withSchema) < 0) return -1;*/
/*if (tEncodeI8(pEncoder, pExec->withTag) < 0) return -1;*/
if (pExec->subType == TOPIC_SUB_TYPE__COLUMN) {
if (tEncodeCStr(pEncoder, pExec->qmsg) < 0) return -1;
}
tEndEncode(pEncoder);
@ -205,34 +203,64 @@ int32_t tDecodeSTqExec(SDecoder* pDecoder, STqExec* pExec) {
if (tDecodeI64(pDecoder, &pExec->consumerId) < 0) return -1;
if (tDecodeI32(pDecoder, &pExec->epoch) < 0) return -1;
if (tDecodeI8(pDecoder, &pExec->subType) < 0) return -1;
if (tDecodeI8(pDecoder, &pExec->withTbName) < 0) return -1;
if (tDecodeI8(pDecoder, &pExec->withSchema) < 0) return -1;
if (tDecodeI8(pDecoder, &pExec->withTag) < 0) return -1;
if (pExec->subType == TOPIC_SUB_TYPE__TABLE) {
/*if (tDecodeI8(pDecoder, &pExec->withTbName) < 0) return -1;*/
/*if (tDecodeI8(pDecoder, &pExec->withSchema) < 0) return -1;*/
/*if (tDecodeI8(pDecoder, &pExec->withTag) < 0) return -1;*/
if (pExec->subType == TOPIC_SUB_TYPE__COLUMN) {
if (tDecodeCStrAlloc(pDecoder, &pExec->qmsg) < 0) return -1;
}
tEndDecode(pDecoder);
return 0;
}
#endif
int32_t tEncodeSTqHandle(SEncoder* pEncoder, const STqHandle* pHandle) {
if (tStartEncode(pEncoder) < 0) return -1;
if (tEncodeCStr(pEncoder, pHandle->subKey) < 0) return -1;
if (tEncodeI64(pEncoder, pHandle->consumerId) < 0) return -1;
if (tEncodeI32(pEncoder, pHandle->epoch) < 0) return -1;
if (tEncodeI8(pEncoder, pHandle->execHandle.subType) < 0) return -1;
if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
if (tEncodeCStr(pEncoder, pHandle->execHandle.exec.execCol.qmsg) < 0) return -1;
}
tEndEncode(pEncoder);
return pEncoder->pos;
}
int32_t tDecodeSTqHandle(SDecoder* pDecoder, STqHandle* pHandle) {
if (tStartDecode(pDecoder) < 0) return -1;
if (tDecodeCStrTo(pDecoder, pHandle->subKey) < 0) return -1;
if (tDecodeI64(pDecoder, &pHandle->consumerId) < 0) return -1;
if (tDecodeI32(pDecoder, &pHandle->epoch) < 0) return -1;
if (tDecodeI8(pDecoder, &pHandle->execHandle.subType) < 0) return -1;
if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
if (tDecodeCStrAlloc(pDecoder, &pHandle->execHandle.exec.execCol.qmsg) < 0) return -1;
}
tEndDecode(pDecoder);
return 0;
}
int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd) {
void* pIter = NULL;
while (1) {
pIter = taosHashIterate(pTq->execs, pIter);
pIter = taosHashIterate(pTq->handles, pIter);
if (pIter == NULL) break;
STqExec* pExec = (STqExec*)pIter;
if (pExec->subType == TOPIC_SUB_TYPE__DB) {
STqHandle* pExec = (STqHandle*)pIter;
if (pExec->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
for (int32_t i = 0; i < 5; i++) {
int32_t code = qUpdateQualifiedTableId(pExec->execHandle.exec.execCol.task[i], tbUidList, isAdd);
ASSERT(code == 0);
}
} else if (pExec->execHandle.subType == TOPIC_SUB_TYPE__DB) {
if (!isAdd) {
int32_t sz = taosArrayGetSize(tbUidList);
for (int32_t i = 0; i < sz; i++) {
int64_t tbUid = *(int64_t*)taosArrayGet(tbUidList, i);
taosHashPut(pExec->pDropTbUid, &tbUid, sizeof(int64_t), NULL, 0);
taosHashPut(pExec->execHandle.exec.execDb.pFilterOutTbUid, &tbUid, sizeof(int64_t), NULL, 0);
}
}
} else {
for (int32_t i = 0; i < 5; i++) {
int32_t code = qUpdateQualifiedTableId(pExec->task[i], tbUidList, isAdd);
ASSERT(code == 0);
}
// tq update id
}
}
while (1) {
@ -250,7 +278,7 @@ int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd) {
int32_t tqPushMsgNew(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_t ver, SRpcHandleInfo handleInfo) {
if (msgType != TDMT_VND_SUBMIT) return 0;
void* pIter = NULL;
STqExec* pExec = NULL;
STqHandle* pHandle = NULL;
SSubmitReq* pReq = (SSubmitReq*)msg;
int32_t workerId = 4;
int64_t fetchOffset = ver;
@ -258,84 +286,27 @@ int32_t tqPushMsgNew(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_
while (1) {
pIter = taosHashIterate(pTq->pushMgr, pIter);
if (pIter == NULL) break;
pExec = *(STqExec**)pIter;
pHandle = *(STqHandle**)pIter;
taosWLockLatch(&pExec->pushHandle.lock);
taosWLockLatch(&pHandle->pushHandle.lock);
SRpcMsg* pMsg = atomic_load_ptr(&pExec->pushHandle.handle);
SRpcMsg* pMsg = atomic_load_ptr(&pHandle->pushHandle.handle);
ASSERT(pMsg);
SMqDataBlkRsp rsp = {0};
rsp.reqOffset = pExec->pushHandle.reqOffset;
rsp.reqOffset = pHandle->pushHandle.reqOffset;
rsp.blockData = taosArrayInit(0, sizeof(void*));
rsp.blockDataLen = taosArrayInit(0, sizeof(int32_t));
if (pExec->subType == TOPIC_SUB_TYPE__TABLE) {
qTaskInfo_t task = pExec->task[workerId];
ASSERT(task);
qSetStreamInput(task, pReq, STREAM_DATA_TYPE_SUBMIT_BLOCK, false);
while (1) {
SSDataBlock* pDataBlock = NULL;
uint64_t ts = 0;
if (qExecTask(task, &pDataBlock, &ts) < 0) {
ASSERT(0);
}
if (pDataBlock == NULL) break;
ASSERT(pDataBlock->info.rows != 0);
ASSERT(pDataBlock->info.numOfCols != 0);
int32_t dataStrLen = sizeof(SRetrieveTableRsp) + blockGetEncodeSize(pDataBlock);
void* buf = taosMemoryCalloc(1, dataStrLen);
SRetrieveTableRsp* pRetrieve = (SRetrieveTableRsp*)buf;
pRetrieve->useconds = ts;
pRetrieve->precision = TSDB_DEFAULT_PRECISION;
pRetrieve->compressed = 0;
pRetrieve->completed = 1;
pRetrieve->numOfRows = htonl(pDataBlock->info.rows);
// TODO enable compress
int32_t actualLen = 0;
blockCompressEncode(pDataBlock, pRetrieve->data, &actualLen, pDataBlock->info.numOfCols, false);
actualLen += sizeof(SRetrieveTableRsp);
ASSERT(actualLen <= dataStrLen);
taosArrayPush(rsp.blockDataLen, &actualLen);
taosArrayPush(rsp.blockData, &buf);
rsp.blockNum++;
}
} else if (pExec->subType == TOPIC_SUB_TYPE__DB) {
STqReadHandle* pReader = pExec->pExecReader[workerId];
tqReadHandleSetMsg(pReader, pReq, 0);
while (tqNextDataBlock(pReader)) {
SSDataBlock block = {0};
if (tqRetrieveDataBlock(&block.pDataBlock, pReader, &block.info.groupId, &block.info.uid, &block.info.rows,
&block.info.numOfCols) < 0) {
ASSERT(0);
}
int32_t dataStrLen = sizeof(SRetrieveTableRsp) + blockGetEncodeSize(&block);
void* buf = taosMemoryCalloc(1, dataStrLen);
SRetrieveTableRsp* pRetrieve = (SRetrieveTableRsp*)buf;
/*pRetrieve->useconds = 0;*/
pRetrieve->precision = TSDB_DEFAULT_PRECISION;
pRetrieve->compressed = 0;
pRetrieve->completed = 1;
pRetrieve->numOfRows = htonl(block.info.rows);
// TODO enable compress
int32_t actualLen = 0;
blockCompressEncode(&block, pRetrieve->data, &actualLen, block.info.numOfCols, false);
actualLen += sizeof(SRetrieveTableRsp);
ASSERT(actualLen <= dataStrLen);
taosArrayPush(rsp.blockDataLen, &actualLen);
taosArrayPush(rsp.blockData, &buf);
rsp.blockNum++;
}
if (msgType == TDMT_VND_SUBMIT) {
tqDataExec(pTq, &pHandle->execHandle, pReq, &rsp, workerId);
} else {
// TODO
ASSERT(0);
}
if (rsp.blockNum == 0) {
taosWUnLockLatch(&pExec->pushHandle.lock);
taosWUnLockLatch(&pHandle->pushHandle.lock);
continue;
}
@ -352,8 +323,8 @@ int32_t tqPushMsgNew(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_
}
((SMqRspHead*)buf)->mqMsgType = TMQ_MSG_TYPE__POLL_RSP;
((SMqRspHead*)buf)->epoch = pExec->pushHandle.epoch;
((SMqRspHead*)buf)->consumerId = pExec->pushHandle.consumerId;
((SMqRspHead*)buf)->epoch = pHandle->pushHandle.epoch;
((SMqRspHead*)buf)->consumerId = pHandle->pushHandle.consumerId;
void* abuf = POINTER_SHIFT(buf, sizeof(SMqRspHead));
tEncodeSMqDataBlkRsp(&abuf, &rsp);
@ -361,11 +332,11 @@ int32_t tqPushMsgNew(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_
SRpcMsg resp = {.info = handleInfo, .pCont = buf, .contLen = tlen, .code = 0};
tmsgSendRsp(&resp);
atomic_store_ptr(&pExec->pushHandle.handle, NULL);
taosWUnLockLatch(&pExec->pushHandle.lock);
atomic_store_ptr(&pHandle->pushHandle.handle, NULL);
taosWUnLockLatch(&pHandle->pushHandle.lock);
tqDebug("vg %d offset %ld from consumer %ld (epoch %d) send rsp, block num: %d, reqOffset: %ld, rspOffset: %ld",
TD_VID(pTq->pVnode), fetchOffset, pExec->pushHandle.consumerId, pExec->pushHandle.epoch, rsp.blockNum,
TD_VID(pTq->pVnode), fetchOffset, pHandle->pushHandle.consumerId, pHandle->pushHandle.epoch, rsp.blockNum,
rsp.reqOffset, rsp.rspOffset);
// TODO destroy
@ -419,12 +390,12 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) {
tqDebug("tmq poll: consumer %ld (epoch %d) recv poll req in vg %d, req %ld %ld", consumerId, pReq->epoch,
TD_VID(pTq->pVnode), pReq->currentOffset, fetchOffset);
STqExec* pExec = taosHashGet(pTq->execs, pReq->subKey, strlen(pReq->subKey));
ASSERT(pExec);
STqHandle* pHandle = taosHashGet(pTq->handles, pReq->subKey, strlen(pReq->subKey));
ASSERT(pHandle);
int32_t consumerEpoch = atomic_load_32(&pExec->epoch);
int32_t consumerEpoch = atomic_load_32(&pHandle->epoch);
while (consumerEpoch < reqEpoch) {
consumerEpoch = atomic_val_compare_exchange_32(&pExec->epoch, consumerEpoch, reqEpoch);
consumerEpoch = atomic_val_compare_exchange_32(&pHandle->epoch, consumerEpoch, reqEpoch);
}
SWalHead* pHeadWithCkSum = taosMemoryMalloc(sizeof(SWalHead) + 2048);
@ -432,50 +403,46 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) {
return -1;
}
walSetReaderCapacity(pExec->pWalReader, 2048);
walSetReaderCapacity(pHandle->pWalReader, 2048);
SMqDataBlkRsp rsp = {0};
rsp.reqOffset = pReq->currentOffset;
rsp.withSchema = pExec->withSchema;
rsp.blockData = taosArrayInit(0, sizeof(void*));
rsp.blockDataLen = taosArrayInit(0, sizeof(int32_t));
rsp.blockSchema = taosArrayInit(0, sizeof(void*));
rsp.blockTbName = taosArrayInit(0, sizeof(void*));
int8_t withTbName = pExec->withTbName;
if (pReq->withTbName != -1) {
withTbName = pReq->withTbName;
rsp.withTbName = pReq->withTbName;
if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
rsp.withSchema = false;
rsp.withTag = false;
} else {
rsp.withSchema = true;
rsp.withTag = false;
}
rsp.withTbName = withTbName;
/*int8_t withTbName = pExec->withTbName;*/
/*if (pReq->withTbName != -1) {*/
/*withTbName = pReq->withTbName;*/
/*}*/
/*rsp.withTbName = withTbName;*/
while (1) {
consumerEpoch = atomic_load_32(&pExec->epoch);
consumerEpoch = atomic_load_32(&pHandle->epoch);
if (consumerEpoch > reqEpoch) {
tqDebug("tmq poll: consumer %ld (epoch %d) vg %d offset %ld, found new consumer epoch %d discard req epoch %d",
consumerId, pReq->epoch, TD_VID(pTq->pVnode), fetchOffset, consumerEpoch, reqEpoch);
break;
}
taosThreadMutexLock(&pExec->pWalReader->mutex);
if (walFetchHead(pExec->pWalReader, fetchOffset, pHeadWithCkSum) < 0) {
tqDebug("tmq poll: consumer %ld (epoch %d) vg %d offset %ld, no more log to return", consumerId, pReq->epoch,
TD_VID(pTq->pVnode), fetchOffset);
taosThreadMutexUnlock(&pExec->pWalReader->mutex);
if (tqFetchLog(pTq, pHandle, &fetchOffset, &pHeadWithCkSum) < 0) {
// TODO add push mgr
break;
}
if (pHeadWithCkSum->head.msgType != TDMT_VND_SUBMIT) {
ASSERT(walSkipFetchBody(pExec->pWalReader, pHeadWithCkSum) == 0);
} else {
ASSERT(walFetchBody(pExec->pWalReader, &pHeadWithCkSum) == 0);
}
SWalReadHead* pHead = &pHeadWithCkSum->head;
taosThreadMutexUnlock(&pExec->pWalReader->mutex);
#if 0
SWalReadHead* pHead;
if (walReadWithHandle_s(pExec->pWalReader, fetchOffset, &pHead) < 0) {
@ -515,122 +482,28 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) {
if (pHead->msgType == TDMT_VND_SUBMIT) {
SSubmitReq* pCont = (SSubmitReq*)&pHead->body;
// table subscribe
if (pExec->subType == TOPIC_SUB_TYPE__TABLE) {
qTaskInfo_t task = pExec->task[workerId];
ASSERT(task);
qSetStreamInput(task, pCont, STREAM_DATA_TYPE_SUBMIT_BLOCK, false);
while (1) {
SSDataBlock* pDataBlock = NULL;
uint64_t ts = 0;
if (qExecTask(task, &pDataBlock, &ts) < 0) {
ASSERT(0);
}
if (pDataBlock == NULL) break;
ASSERT(pDataBlock->info.rows != 0);
ASSERT(pDataBlock->info.numOfCols != 0);
int32_t dataStrLen = sizeof(SRetrieveTableRsp) + blockGetEncodeSize(pDataBlock);
void* buf = taosMemoryCalloc(1, dataStrLen);
SRetrieveTableRsp* pRetrieve = (SRetrieveTableRsp*)buf;
pRetrieve->useconds = ts;
pRetrieve->precision = TSDB_DEFAULT_PRECISION;
pRetrieve->compressed = 0;
pRetrieve->completed = 1;
pRetrieve->numOfRows = htonl(pDataBlock->info.rows);
// TODO enable compress
int32_t actualLen = 0;
blockCompressEncode(pDataBlock, pRetrieve->data, &actualLen, pDataBlock->info.numOfCols, false);
actualLen += sizeof(SRetrieveTableRsp);
ASSERT(actualLen <= dataStrLen);
taosArrayPush(rsp.blockDataLen, &actualLen);
taosArrayPush(rsp.blockData, &buf);
if (pExec->withSchema) {
SSchemaWrapper* pSW = tCloneSSchemaWrapper(pExec->pExecReader[workerId]->pSchemaWrapper);
taosArrayPush(rsp.blockSchema, &pSW);
}
if (withTbName) {
SMetaReader mr = {0};
metaReaderInit(&mr, pTq->pVnode->pMeta, 0);
int64_t uid = pExec->pExecReader[workerId]->msgIter.uid;
if (metaGetTableEntryByUid(&mr, uid) < 0) {
ASSERT(0);
}
char* tbName = strdup(mr.me.name);
taosArrayPush(rsp.blockTbName, &tbName);
metaReaderClear(&mr);
}
rsp.blockNum++;
}
// db subscribe
} else if (pExec->subType == TOPIC_SUB_TYPE__DB) {
rsp.withSchema = 1;
STqReadHandle* pReader = pExec->pExecReader[workerId];
tqReadHandleSetMsg(pReader, pCont, 0);
while (tqNextDataBlockFilterOut(pReader, pExec->pDropTbUid)) {
SSDataBlock block = {0};
if (tqRetrieveDataBlock(&block.pDataBlock, pReader, &block.info.groupId, &block.info.uid, &block.info.rows,
&block.info.numOfCols) < 0) {
if (terrno == TSDB_CODE_TQ_TABLE_SCHEMA_NOT_FOUND) continue;
ASSERT(0);
}
int32_t dataStrLen = sizeof(SRetrieveTableRsp) + blockGetEncodeSize(&block);
void* buf = taosMemoryCalloc(1, dataStrLen);
SRetrieveTableRsp* pRetrieve = (SRetrieveTableRsp*)buf;
/*pRetrieve->useconds = 0;*/
pRetrieve->precision = TSDB_DEFAULT_PRECISION;
pRetrieve->compressed = 0;
pRetrieve->completed = 1;
pRetrieve->numOfRows = htonl(block.info.rows);
// TODO enable compress
int32_t actualLen = 0;
blockCompressEncode(&block, pRetrieve->data, &actualLen, block.info.numOfCols, false);
actualLen += sizeof(SRetrieveTableRsp);
ASSERT(actualLen <= dataStrLen);
taosArrayPush(rsp.blockDataLen, &actualLen);
taosArrayPush(rsp.blockData, &buf);
if (withTbName) {
SMetaReader mr = {0};
metaReaderInit(&mr, pTq->pVnode->pMeta, 0);
if (metaGetTableEntryByUid(&mr, block.info.uid) < 0) {
ASSERT(0);
}
char* tbName = strdup(mr.me.name);
taosArrayPush(rsp.blockTbName, &tbName);
metaReaderClear(&mr);
}
SSchemaWrapper* pSW = tCloneSSchemaWrapper(pExec->pExecReader[workerId]->pSchemaWrapper);
taosArrayPush(rsp.blockSchema, &pSW);
rsp.blockNum++;
}
} else {
ASSERT(0);
}
tqDataExec(pTq, &pHandle->execHandle, pCont, &rsp, workerId);
} else {
// TODO
ASSERT(0);
}
// TODO batch optimization:
// TODO continue scan until meeting batch requirement
if (rsp.blockNum != 0) break;
rsp.skipLogNum++;
fetchOffset++;
if (rsp.blockNum > 0 /* threshold */) {
break;
} else {
fetchOffset++;
}
}
taosMemoryFree(pHeadWithCkSum);
ASSERT(taosArrayGetSize(rsp.blockData) == rsp.blockNum);
ASSERT(taosArrayGetSize(rsp.blockDataLen) == rsp.blockNum);
if (rsp.blockNum != 0)
rsp.rspOffset = fetchOffset;
else
rsp.rspOffset = fetchOffset - 1;
rsp.rspOffset = fetchOffset;
int32_t tlen = sizeof(SMqRspHead) + tEncodeSMqDataBlkRsp(NULL, &rsp);
void* buf = rpcMallocCont(tlen);
@ -646,13 +519,18 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) {
void* abuf = POINTER_SHIFT(buf, sizeof(SMqRspHead));
tEncodeSMqDataBlkRsp(&abuf, &rsp);
SRpcMsg resp = {.info = pMsg->info, .pCont = buf, .contLen = tlen, .code = 0};
SRpcMsg resp = {
.info = pMsg->info,
.pCont = buf,
.contLen = tlen,
.code = 0,
};
tmsgSendRsp(&resp);
tqDebug("vg %d offset %ld from consumer %ld (epoch %d) send rsp, block num: %d, reqOffset: %ld, rspOffset: %ld",
TD_VID(pTq->pVnode), fetchOffset, consumerId, pReq->epoch, rsp.blockNum, rsp.reqOffset, rsp.rspOffset);
// TODO destroy
// TODO wrap in destroy func
taosArrayDestroy(rsp.blockData);
taosArrayDestroy(rsp.blockDataLen);
taosArrayDestroyP(rsp.blockSchema, (FDelete)tDeleteSSchemaWrapper);
@ -664,7 +542,7 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) {
int32_t tqProcessVgDeleteReq(STQ* pTq, char* msg, int32_t msgLen) {
SMqVDeleteReq* pReq = (SMqVDeleteReq*)msg;
int32_t code = taosHashRemove(pTq->execs, pReq->subKey, strlen(pReq->subKey));
int32_t code = taosHashRemove(pTq->handles, pReq->subKey, strlen(pReq->subKey));
ASSERT(code == 0);
TXN txn;
@ -693,63 +571,59 @@ int32_t tqProcessVgChangeReq(STQ* pTq, char* msg, int32_t msgLen) {
SMqRebVgReq req = {0};
tDecodeSMqRebVgReq(msg, &req);
// todo lock
STqExec* pExec = taosHashGet(pTq->execs, req.subKey, strlen(req.subKey));
if (pExec == NULL) {
STqHandle* pHandle = taosHashGet(pTq->handles, req.subKey, strlen(req.subKey));
if (pHandle == NULL) {
ASSERT(req.oldConsumerId == -1);
ASSERT(req.newConsumerId != -1);
STqExec exec = {0};
pExec = &exec;
STqHandle tqHandle = {0};
pHandle = &tqHandle;
/*taosInitRWLatch(&pExec->lock);*/
memcpy(pExec->subKey, req.subKey, TSDB_SUBSCRIBE_KEY_LEN);
pExec->consumerId = req.newConsumerId;
pExec->epoch = -1;
memcpy(pHandle->subKey, req.subKey, TSDB_SUBSCRIBE_KEY_LEN);
pHandle->consumerId = req.newConsumerId;
pHandle->epoch = -1;
pExec->subType = req.subType;
pExec->withTbName = req.withTbName;
pExec->withSchema = req.withSchema;
pExec->withTag = req.withTag;
pHandle->execHandle.subType = req.subType;
/*pExec->withTbName = req.withTbName;*/
/*pExec->withSchema = req.withSchema;*/
/*pExec->withTag = req.withTag;*/
pExec->qmsg = req.qmsg;
pHandle->execHandle.exec.execCol.qmsg = req.qmsg;
req.qmsg = NULL;
pExec->pWalReader = walOpenReadHandle(pTq->pVnode->pWal);
if (pExec->subType == TOPIC_SUB_TYPE__TABLE) {
pHandle->pWalReader = walOpenReadHandle(pTq->pVnode->pWal);
for (int32_t i = 0; i < 5; i++) {
pHandle->execHandle.pExecReader[i] = tqInitSubmitMsgScanner(pTq->pVnode->pMeta);
}
if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
for (int32_t i = 0; i < 5; i++) {
pExec->pExecReader[i] = tqInitSubmitMsgScanner(pTq->pVnode->pMeta);
SReadHandle handle = {
.reader = pExec->pExecReader[i],
.reader = pHandle->execHandle.pExecReader[i],
.meta = pTq->pVnode->pMeta,
.pMsgCb = &pTq->pVnode->msgCb,
};
pExec->task[i] = qCreateStreamExecTaskInfo(pExec->qmsg, &handle);
ASSERT(pExec->task[i]);
pHandle->execHandle.exec.execCol.task[i] =
qCreateStreamExecTaskInfo(pHandle->execHandle.exec.execCol.qmsg, &handle);
ASSERT(pHandle->execHandle.exec.execCol.task[i]);
}
} else {
for (int32_t i = 0; i < 5; i++) {
pExec->pExecReader[i] = tqInitSubmitMsgScanner(pTq->pVnode->pMeta);
}
pExec->pDropTbUid = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
} else if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__DB) {
pHandle->execHandle.exec.execDb.pFilterOutTbUid =
taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
} else if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__TABLE) {
}
taosHashPut(pTq->execs, req.subKey, strlen(req.subKey), pExec, sizeof(STqExec));
if (tqStoreExec(pTq, req.subKey, pExec) < 0) {
// TODO
}
return 0;
taosHashPut(pTq->handles, req.subKey, strlen(req.subKey), pHandle, sizeof(STqHandle));
} else {
/*ASSERT(pExec->consumerId == req.oldConsumerId);*/
// TODO handle qmsg and exec modification
atomic_store_32(&pExec->epoch, -1);
atomic_store_64(&pExec->consumerId, req.newConsumerId);
atomic_add_fetch_32(&pExec->epoch, 1);
if (tqStoreExec(pTq, req.subKey, pExec) < 0) {
// TODO
}
return 0;
atomic_store_32(&pHandle->epoch, -1);
atomic_store_64(&pHandle->consumerId, req.newConsumerId);
atomic_add_fetch_32(&pHandle->epoch, 1);
}
if (tqStoreHandle(pTq, req.subKey, pHandle) < 0) {
// TODO
}
return 0;
}
void tqTableSink(SStreamTask* pTask, void* vnode, int64_t ver, void* data) {

View File

@ -0,0 +1,124 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "tq.h"
static int32_t tqAddBlockDataToRsp(const SSDataBlock* pBlock, SMqDataBlkRsp* pRsp) {
int32_t dataStrLen = sizeof(SRetrieveTableRsp) + blockGetEncodeSize(pBlock);
void* buf = taosMemoryCalloc(1, dataStrLen);
if (buf == NULL) return -1;
SRetrieveTableRsp* pRetrieve = (SRetrieveTableRsp*)buf;
pRetrieve->useconds = 0;
pRetrieve->precision = TSDB_DEFAULT_PRECISION;
pRetrieve->compressed = 0;
pRetrieve->completed = 1;
pRetrieve->numOfRows = htonl(pBlock->info.rows);
// TODO enable compress
int32_t actualLen = 0;
blockCompressEncode(pBlock, pRetrieve->data, &actualLen, pBlock->info.numOfCols, false);
actualLen += sizeof(SRetrieveTableRsp);
ASSERT(actualLen <= dataStrLen);
taosArrayPush(pRsp->blockDataLen, &actualLen);
taosArrayPush(pRsp->blockData, &buf);
return 0;
}
static int32_t tqAddBlockSchemaToRsp(const STqExecHandle* pExec, int32_t workerId, SMqDataBlkRsp* pRsp) {
SSchemaWrapper* pSW = tCloneSSchemaWrapper(pExec->pExecReader[workerId]->pSchemaWrapper);
taosArrayPush(pRsp->blockSchema, &pSW);
return 0;
}
static int32_t tqAddTbNameToRsp(const STQ* pTq, const STqExecHandle* pExec, SMqDataBlkRsp* pRsp, int32_t workerId) {
SMetaReader mr = {0};
metaReaderInit(&mr, pTq->pVnode->pMeta, 0);
int64_t uid = pExec->pExecReader[workerId]->msgIter.uid;
if (metaGetTableEntryByUid(&mr, uid) < 0) {
ASSERT(0);
return -1;
}
char* tbName = strdup(mr.me.name);
taosArrayPush(pRsp->blockTbName, &tbName);
metaReaderClear(&mr);
return 0;
}
int32_t tqDataExec(STQ* pTq, STqExecHandle* pExec, SSubmitReq* pReq, SMqDataBlkRsp* pRsp, int32_t workerId) {
if (pExec->subType == TOPIC_SUB_TYPE__COLUMN) {
qTaskInfo_t task = pExec->exec.execCol.task[workerId];
ASSERT(task);
qSetStreamInput(task, pReq, STREAM_DATA_TYPE_SUBMIT_BLOCK, false);
while (1) {
SSDataBlock* pDataBlock = NULL;
uint64_t ts = 0;
if (qExecTask(task, &pDataBlock, &ts) < 0) {
ASSERT(0);
}
if (pDataBlock == NULL) break;
ASSERT(pDataBlock->info.rows != 0);
ASSERT(pDataBlock->info.numOfCols != 0);
tqAddBlockDataToRsp(pDataBlock, pRsp);
if (pRsp->withTbName) {
tqAddTbNameToRsp(pTq, pExec, pRsp, workerId);
}
pRsp->blockNum++;
}
} else if (pExec->subType == TOPIC_SUB_TYPE__TABLE) {
pRsp->withSchema = 1;
STqReadHandle* pReader = pExec->pExecReader[workerId];
tqReadHandleSetMsg(pReader, pReq, 0);
while (tqNextDataBlock(pReader)) {
SSDataBlock block = {0};
if (tqRetrieveDataBlock(&block.pDataBlock, pReader, &block.info.groupId, &block.info.uid, &block.info.rows,
&block.info.numOfCols) < 0) {
if (terrno == TSDB_CODE_TQ_TABLE_SCHEMA_NOT_FOUND) continue;
ASSERT(0);
}
tqAddBlockDataToRsp(&block, pRsp);
if (pRsp->withTbName) {
tqAddTbNameToRsp(pTq, pExec, pRsp, workerId);
}
tqAddBlockSchemaToRsp(pExec, workerId, pRsp);
pRsp->blockNum++;
}
} else if (pExec->subType == TOPIC_SUB_TYPE__DB) {
pRsp->withSchema = 1;
STqReadHandle* pReader = pExec->pExecReader[workerId];
tqReadHandleSetMsg(pReader, pReq, 0);
while (tqNextDataBlockFilterOut(pReader, pExec->exec.execDb.pFilterOutTbUid)) {
SSDataBlock block = {0};
if (tqRetrieveDataBlock(&block.pDataBlock, pReader, &block.info.groupId, &block.info.uid, &block.info.rows,
&block.info.numOfCols) < 0) {
if (terrno == TSDB_CODE_TQ_TABLE_SCHEMA_NOT_FOUND) continue;
ASSERT(0);
}
tqAddBlockDataToRsp(&block, pRsp);
if (pRsp->withTbName) {
tqAddTbNameToRsp(pTq, pExec, pRsp, workerId);
}
tqAddBlockSchemaToRsp(pExec, workerId, pRsp);
pRsp->blockNum++;
}
}
if (pRsp->blockNum == 0) {
pRsp->skipLogNum++;
return -1;
}
return 0;
}

View File

@ -0,0 +1,14 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/

View File

@ -15,6 +15,48 @@
#include "tq.h"
int64_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, SWalHead** ppHeadWithCkSum) {
int32_t code = 0;
taosThreadMutexLock(&pHandle->pWalReader->mutex);
int64_t offset = *fetchOffset;
while (1) {
if (walFetchHead(pHandle->pWalReader, offset, *ppHeadWithCkSum) < 0) {
tqDebug("tmq poll: consumer %ld (epoch %d) vg %d offset %ld, no more log to return", pHandle->consumerId,
pHandle->epoch, TD_VID(pTq->pVnode), offset);
*fetchOffset = offset - 1;
code = -1;
goto END;
}
if ((*ppHeadWithCkSum)->head.msgType == TDMT_VND_SUBMIT) {
code = walFetchBody(pHandle->pWalReader, ppHeadWithCkSum);
if (code < 0) {
ASSERT(0);
*fetchOffset = offset;
code = -1;
goto END;
}
*fetchOffset = offset;
code = 0;
goto END;
} else {
code = walSkipFetchBody(pHandle->pWalReader, *ppHeadWithCkSum);
if (code < 0) {
ASSERT(0);
*fetchOffset = offset;
code = -1;
goto END;
}
offset++;
}
}
END:
taosThreadMutexUnlock(&pHandle->pWalReader->mutex);
return code;
}
STqReadHandle* tqInitSubmitMsgScanner(SMeta* pMeta) {
STqReadHandle* pReadHandle = taosMemoryMalloc(sizeof(STqReadHandle));
if (pReadHandle == NULL) {
@ -24,7 +66,7 @@ STqReadHandle* tqInitSubmitMsgScanner(SMeta* pMeta) {
pReadHandle->pMsg = NULL;
pReadHandle->ver = -1;
pReadHandle->pColIdList = NULL;
pReadHandle->sver = -1;
pReadHandle->cachedSchemaVer = -1;
pReadHandle->cachedSchemaUid = -1;
pReadHandle->pSchema = NULL;
pReadHandle->pSchemaWrapper = NULL;
@ -88,11 +130,11 @@ int32_t tqRetrieveDataBlock(SArray** ppCols, STqReadHandle* pHandle, uint64_t* p
// TODO set to real sversion
/*int32_t sversion = 1;*/
int32_t sversion = htonl(pHandle->pBlock->sversion);
if (pHandle->sver != sversion || pHandle->cachedSchemaUid != pHandle->msgIter.suid) {
if (pHandle->cachedSchemaVer != sversion || pHandle->cachedSchemaUid != pHandle->msgIter.suid) {
pHandle->pSchema = metaGetTbTSchema(pHandle->pVnodeMeta, pHandle->msgIter.uid, sversion);
if (pHandle->pSchema == NULL) {
tqWarn("cannot found tsschema for table: uid: %ld (suid: %ld), version %d, possibly dropped table",
pHandle->msgIter.uid, pHandle->msgIter.suid, pHandle->sver);
pHandle->msgIter.uid, pHandle->msgIter.suid, pHandle->cachedSchemaVer);
/*ASSERT(0);*/
terrno = TSDB_CODE_TQ_TABLE_SCHEMA_NOT_FOUND;
return -1;
@ -102,12 +144,12 @@ int32_t tqRetrieveDataBlock(SArray** ppCols, STqReadHandle* pHandle, uint64_t* p
pHandle->pSchemaWrapper = metaGetTableSchema(pHandle->pVnodeMeta, pHandle->msgIter.suid, sversion, true);
if (pHandle->pSchemaWrapper == NULL) {
tqWarn("cannot found schema wrapper for table: suid: %ld, version %d, possibly dropped table",
pHandle->msgIter.suid, pHandle->sver);
pHandle->msgIter.suid, pHandle->cachedSchemaVer);
/*ASSERT(0);*/
terrno = TSDB_CODE_TQ_TABLE_SCHEMA_NOT_FOUND;
return -1;
}
pHandle->sver = sversion;
pHandle->cachedSchemaVer = sversion;
pHandle->cachedSchemaUid = pHandle->msgIter.suid;
}

View File

@ -360,7 +360,7 @@ static int vnodeProcessCreateStbReq(SVnode *pVnode, int64_t version, void *pReq,
goto _err;
}
tdProcessRSmaCreate(pVnode->pSma, pVnode->pMeta, &req, &pVnode->msgCb);
tdProcessRSmaCreate(pVnode, &req);
tDecoderClear(&coder);
return 0;

View File

@ -440,6 +440,7 @@ typedef struct STimeWindowSupp {
int64_t waterMark;
TSKEY maxTs;
SColumnInfoData timeWindowData; // query time window info for scalar function execution.
SHashObj *winMap;
} STimeWindowAggSupp;
typedef struct SIntervalAggOperatorInfo {
@ -758,7 +759,7 @@ SOperatorInfo* createDataBlockInfoScanOperator(void* dataReader, SExecTaskInfo*
SOperatorInfo* createStreamScanOperatorInfo(void* pDataReader, SReadHandle* pHandle,
SArray* pTableIdList, STableScanPhysiNode* pTableScanNode, SExecTaskInfo* pTaskInfo,
STimeWindowAggSupp* pTwSup, int16_t tsColId);
STimeWindowAggSupp* pTwSup);
SOperatorInfo* createFillOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExpr, int32_t numOfCols,
@ -837,6 +838,8 @@ SResultWindowInfo* getSessionTimeWindow(SArray* pWinInfos, TSKEY ts, int64_t gap
int32_t updateSessionWindowInfo(SResultWindowInfo* pWinInfo, TSKEY* pTs, int32_t rows,
int32_t start, int64_t gap, SHashObj* pStDeleted);
bool functionNeedToExecute(SqlFunctionCtx* pCtx);
int64_t getSmaWaterMark(int64_t interval, double filesFactor);
bool isSmaStream(int8_t triggerType);
int32_t compareTimeWindow(const void* p1, const void* p2, const void* param);
#ifdef __cplusplus

View File

@ -4529,8 +4529,8 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
qDebug("%s pDataReader is not NULL", GET_TASKID(pTaskInfo));
}
SArray* tableIdList = extractTableIdList(pTableListInfo);
SOperatorInfo* pOperator = createStreamScanOperatorInfo(pDataReader, pHandle, tableIdList, pTableScanNode,
pTaskInfo, &twSup, pTableScanNode->tsColId);
SOperatorInfo* pOperator = createStreamScanOperatorInfo(pDataReader, pHandle,
tableIdList, pTableScanNode, pTaskInfo, &twSup);
taosArrayDestroy(tableIdList);
return pOperator;
@ -4631,7 +4631,19 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
STimeWindowAggSupp as = {.waterMark = pIntervalPhyNode->window.watermark,
.calTrigger = pIntervalPhyNode->window.triggerType,
.maxTs = INT64_MIN};
.maxTs = INT64_MIN,
.winMap = NULL,};
if (isSmaStream(pIntervalPhyNode->window.triggerType)) {
if (FLT_LESS(pIntervalPhyNode->window.filesFactor, 1.000000)) {
as.calTrigger = STREAM_TRIGGER_AT_ONCE_SMA;
} else {
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_TIMESTAMP);
as.winMap = taosHashInit(64, hashFn, true, HASH_NO_LOCK);
as.waterMark = getSmaWaterMark(interval.interval,
pIntervalPhyNode->window.filesFactor);
as.calTrigger = STREAM_TRIGGER_WINDOW_CLOSE_SMA;
}
}
int32_t tsSlotId = ((SColumnNode*)pIntervalPhyNode->window.pTspk)->slotId;
pOptr = createIntervalOperatorInfo(ops[0], pExprInfo, num, pResBlock, &interval, tsSlotId, &as, pTaskInfo);
@ -5300,3 +5312,18 @@ int32_t initStreamAggSupporter(SStreamAggSupporter* pSup, const char* pKey) {
}
return createDiskbasedBuf(&pSup->pResultBuf, pageSize, bufSize, pKey, TD_TMP_DIR_PATH);
}
int64_t getSmaWaterMark(int64_t interval, double filesFactor) {
int64_t waterMark = 0;
ASSERT(FLT_GREATEREQUAL(filesFactor,0.000000));
waterMark = -1 * filesFactor;
return waterMark;
}
bool isSmaStream(int8_t triggerType) {
if (triggerType == STREAM_TRIGGER_AT_ONCE ||
triggerType == STREAM_TRIGGER_WINDOW_CLOSE) {
return false;
}
return true;
}

View File

@ -875,7 +875,7 @@ static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) {
if (rows == 0) {
pOperator->status = OP_EXEC_DONE;
} else if (pInfo->pUpdateInfo) {
SSDataBlock* upRes = getUpdateDataBlock(pInfo, true); // TODO(liuyao) get invertible from plan
SSDataBlock* upRes = getUpdateDataBlock(pInfo, true);
if (upRes) {
pInfo->pUpdateRes = upRes;
if (upRes->info.type == STREAM_REPROCESS) {
@ -894,7 +894,7 @@ static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) {
SOperatorInfo* createStreamScanOperatorInfo(void* pDataReader, SReadHandle* pHandle,
SArray* pTableIdList, STableScanPhysiNode* pTableScanNode, SExecTaskInfo* pTaskInfo,
STimeWindowAggSupp* pTwSup, int16_t tsColId) {
STimeWindowAggSupp* pTwSup) {
SStreamBlockScanInfo* pInfo = taosMemoryCalloc(1, sizeof(SStreamBlockScanInfo));
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
if (pInfo == NULL || pOperator == NULL) {
@ -939,8 +939,12 @@ SOperatorInfo* createStreamScanOperatorInfo(void* pDataReader, SReadHandle* pHan
goto _error;
}
pInfo->primaryTsIndex = tsColId;
if (pSTInfo->interval.interval > 0) {
if (isSmaStream(pTableScanNode->triggerType)) {
pTwSup->waterMark = getSmaWaterMark(pSTInfo->interval.interval,
pTableScanNode->filesFactor);
}
pInfo->primaryTsIndex = 0; // pTableScanNode->tsColId;
if (pSTInfo->interval.interval > 0 && pDataReader) {
pInfo->pUpdateInfo = updateInfoInitP(&pSTInfo->interval, pTwSup->waterMark);
} else {
pInfo->pUpdateInfo = NULL;

View File

@ -748,10 +748,14 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul
longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
}
if (pInfo->execModel == OPTR_EXEC_MODEL_STREAM &&
(pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE ||
pInfo->twAggSup.calTrigger == 0) ) {
saveResult(pResult, tableGroupId, pUpdated);
if (pInfo->execModel == OPTR_EXEC_MODEL_STREAM) {
if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE ||
pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE_SMA) {
saveResult(pResult, tableGroupId, pUpdated);
}
if (pInfo->twAggSup.winMap) {
taosHashRemove(pInfo->twAggSup.winMap, &win.skey, sizeof(TSKEY));
}
}
int32_t forwardStep = 0;
@ -824,10 +828,14 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul
longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
}
if (pInfo->execModel == OPTR_EXEC_MODEL_STREAM &&
(pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE ||
pInfo->twAggSup.calTrigger == 0) ) {
saveResult(pResult, tableGroupId, pUpdated);
if (pInfo->execModel == OPTR_EXEC_MODEL_STREAM) {
if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE ||
pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE_SMA) {
saveResult(pResult, tableGroupId, pUpdated);
}
if (pInfo->twAggSup.winMap) {
taosHashRemove(pInfo->twAggSup.winMap, &win.skey, sizeof(TSKEY));
}
}
ekey = ascScan? nextWin.ekey:nextWin.skey;
@ -1172,15 +1180,23 @@ static int32_t closeIntervalWindow(SHashObj *pHashMap, STimeWindowAggSupp *pSup,
void* key = taosHashGetKey(pIte, &keyLen);
uint64_t groupId = *(uint64_t*) key;
ASSERT(keyLen == GET_RES_WINDOW_KEY_LEN(sizeof(TSKEY)));
TSKEY ts = *(uint64_t*) ((char*)key + sizeof(uint64_t));
TSKEY ts = *(int64_t*) ((char*)key + sizeof(uint64_t));
SResultRowInfo dumyInfo;
dumyInfo.cur.pageId = -1;
STimeWindow win = getActiveTimeWindow(NULL, &dumyInfo, ts, pInterval,
pInterval->precision, NULL);
if (win.ekey < pSup->maxTs - pSup->waterMark) {
if (pSup->calTrigger == STREAM_TRIGGER_WINDOW_CLOSE_SMA) {
if (taosHashGet(pSup->winMap, &win.skey, sizeof(TSKEY))) {
continue;
}
}
char keyBuf[GET_RES_WINDOW_KEY_LEN(sizeof(TSKEY))];
SET_RES_WINDOW_KEY(keyBuf, &ts, sizeof(TSKEY), groupId);
taosHashRemove(pHashMap, keyBuf, keyLen);
if (pSup->calTrigger != STREAM_TRIGGER_AT_ONCE_SMA &&
pSup->calTrigger != STREAM_TRIGGER_WINDOW_CLOSE_SMA) {
taosHashRemove(pHashMap, keyBuf, keyLen);
}
SResKeyPos* pos = taosMemoryMalloc(sizeof(SResKeyPos) + sizeof(uint64_t));
if (pos == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
@ -1192,6 +1208,7 @@ static int32_t closeIntervalWindow(SHashObj *pHashMap, STimeWindowAggSupp *pSup,
taosMemoryFree(pos);
return TSDB_CODE_OUT_OF_MEMORY;
}
taosHashPut(pSup->winMap, &win.skey, sizeof(TSKEY), NULL, 0);
}
}
return TSDB_CODE_SUCCESS;
@ -1248,7 +1265,8 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) {
&pInfo->interval, pClosed);
finalizeUpdatedResult(pOperator->numOfExprs, pInfo->aggSup.pResultBuf, pClosed,
pInfo->binfo.rowCellInfoOffset);
if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER__WINDOW_CLOSE) {
if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_WINDOW_CLOSE ||
pInfo->twAggSup.calTrigger == STREAM_TRIGGER_WINDOW_CLOSE_SMA) {
taosArrayAddAll(pUpdated, pClosed);
}
taosArrayDestroy(pClosed);
@ -2412,7 +2430,7 @@ int32_t closeSessionWindow(SArray *pWins, STimeWindowAggSupp *pTwSup, SArray *pC
return TSDB_CODE_OUT_OF_MEMORY;
}
pSeWin->isClosed = true;
if (calTrigger == STREAM_TRIGGER__WINDOW_CLOSE) {
if (calTrigger == STREAM_TRIGGER_WINDOW_CLOSE) {
pSeWin->isOutput = true;
}
}
@ -2486,7 +2504,7 @@ static SSDataBlock* doStreamSessionWindowAgg(SOperatorInfo* pOperator) {
SArray* pUpdated = taosArrayInit(16, POINTER_BYTES);
copyUpdateResult(pStUpdated, pUpdated, pBInfo->pRes->info.groupId);
taosHashCleanup(pStUpdated);
if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER__WINDOW_CLOSE) {
if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_WINDOW_CLOSE) {
taosArrayAddAll(pUpdated, pClosed);
}

View File

@ -305,6 +305,7 @@ static SNode* logicNodeCopy(const SLogicNode* pSrc, SLogicNode* pDst) {
CLONE_NODE_FIELD(pConditions);
CLONE_NODE_LIST_FIELD(pChildren);
COPY_SCALAR_FIELD(optimizedFlag);
COPY_SCALAR_FIELD(precision);
return (SNode*)pDst;
}
@ -328,6 +329,10 @@ static SNode* logicScanCopy(const SScanLogicNode* pSrc, SScanLogicNode* pDst) {
COPY_SCALAR_FIELD(intervalUnit);
COPY_SCALAR_FIELD(slidingUnit);
CLONE_NODE_FIELD(pTagCond);
COPY_SCALAR_FIELD(triggerType);
COPY_SCALAR_FIELD(watermark);
COPY_SCALAR_FIELD(tsColId);
COPY_SCALAR_FIELD(filesFactor);
return (SNode*)pDst;
}
@ -384,6 +389,7 @@ static SNode* logicWindowCopy(const SWindowLogicNode* pSrc, SWindowLogicNode* pD
CLONE_NODE_FIELD(pStateExpr);
COPY_SCALAR_FIELD(triggerType);
COPY_SCALAR_FIELD(watermark);
COPY_SCALAR_FIELD(filesFactor);
return (SNode*)pDst;
}

View File

@ -1133,6 +1133,7 @@ static const char* jkTableScanPhysiPlanSlidingUnit = "slidingUnit";
static const char* jkTableScanPhysiPlanTriggerType = "triggerType";
static const char* jkTableScanPhysiPlanWatermark = "watermark";
static const char* jkTableScanPhysiPlanTsColId = "tsColId";
static const char* jkTableScanPhysiPlanFilesFactor = "FilesFactor";
static int32_t physiTableScanNodeToJson(const void* pObj, SJson* pJson) {
const STableScanPhysiNode* pNode = (const STableScanPhysiNode*)pObj;
@ -1183,6 +1184,9 @@ static int32_t physiTableScanNodeToJson(const void* pObj, SJson* pJson) {
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddIntegerToObject(pJson, jkTableScanPhysiPlanTsColId, pNode->tsColId);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddDoubleToObject(pJson, jkTableScanPhysiPlanFilesFactor, pNode->filesFactor);
}
return code;
}
@ -1242,7 +1246,9 @@ static int32_t jsonToPhysiTableScanNode(const SJson* pJson, void* pObj) {
if (TSDB_CODE_SUCCESS == code) {
tjsonGetNumberValue(pJson, jkTableScanPhysiPlanTsColId, pNode->tsColId, code);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetDoubleValue(pJson, jkTableScanPhysiPlanFilesFactor, &pNode->filesFactor);
}
return code;
}
@ -1496,6 +1502,7 @@ static const char* jkWindowPhysiPlanFuncs = "Funcs";
static const char* jkWindowPhysiPlanTsPk = "TsPk";
static const char* jkWindowPhysiPlanTriggerType = "TriggerType";
static const char* jkWindowPhysiPlanWatermark = "Watermark";
static const char* jkWindowPhysiPlanFilesFactor = "FilesFactor";
static int32_t physiWindowNodeToJson(const void* pObj, SJson* pJson) {
const SWinodwPhysiNode* pNode = (const SWinodwPhysiNode*)pObj;
@ -1516,6 +1523,9 @@ static int32_t physiWindowNodeToJson(const void* pObj, SJson* pJson) {
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddIntegerToObject(pJson, jkWindowPhysiPlanWatermark, pNode->watermark);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddDoubleToObject(pJson, jkWindowPhysiPlanFilesFactor, pNode->filesFactor);
}
return code;
}
@ -1541,6 +1551,9 @@ static int32_t jsonToPhysiWindowNode(const SJson* pJson, void* pObj) {
tjsonGetNumberValue(pJson, jkWindowPhysiPlanWatermark, pNode->watermark, code);
;
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetDoubleValue(pJson, jkWindowPhysiPlanFilesFactor, &pNode->filesFactor);
}
return code;
}
@ -3276,7 +3289,7 @@ static int32_t createTopicStmtToJson(const void* pObj, SJson* pJson) {
int32_t code = tjsonAddStringToObject(pJson, jkCreateTopicStmtTopicName, pNode->topicName);
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddStringToObject(pJson, jkCreateTopicStmtSubscribeDbName, pNode->subscribeDbName);
code = tjsonAddStringToObject(pJson, jkCreateTopicStmtSubscribeDbName, pNode->subDbName);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddBoolToObject(pJson, jkCreateTopicStmtIgnoreExists, pNode->ignoreExists);
@ -3293,7 +3306,7 @@ static int32_t jsonToCreateTopicStmt(const SJson* pJson, void* pObj) {
int32_t code = tjsonGetStringValue(pJson, jkCreateTopicStmtTopicName, pNode->topicName);
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetStringValue(pJson, jkCreateTopicStmtSubscribeDbName, pNode->subscribeDbName);
code = tjsonGetStringValue(pJson, jkCreateTopicStmtSubscribeDbName, pNode->subDbName);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetBoolValue(pJson, jkCreateTopicStmtIgnoreExists, &pNode->ignoreExists);

View File

@ -86,8 +86,6 @@ SNodeptr nodesMakeNode(ENodeType type) {
return makeNode(type, sizeof(SExplainOptions));
case QUERY_NODE_STREAM_OPTIONS:
return makeNode(type, sizeof(SStreamOptions));
case QUERY_NODE_TOPIC_OPTIONS:
return makeNode(type, sizeof(STopicOptions));
case QUERY_NODE_LEFT_VALUE:
return makeNode(type, sizeof(SLeftValueNode));
case QUERY_NODE_SET_OPERATOR:

View File

@ -59,7 +59,6 @@ typedef enum EDatabaseOptionType {
typedef enum ETableOptionType {
TABLE_OPTION_COMMENT = 1,
TABLE_OPTION_DELAY,
TABLE_OPTION_FILE_FACTOR,
TABLE_OPTION_ROLLUP,
TABLE_OPTION_TTL,
@ -168,7 +167,7 @@ SNode* createCreateComponentNodeStmt(SAstCreateContext* pCxt, ENodeType type, co
SNode* createDropComponentNodeStmt(SAstCreateContext* pCxt, ENodeType type, const SToken* pDnodeId);
SNode* createTopicOptions(SAstCreateContext* pCxt);
SNode* createCreateTopicStmt(SAstCreateContext* pCxt, bool ignoreExists, const SToken* pTopicName, SNode* pQuery,
const SToken* pSubscribeDbName, SNode* pOptions);
const SToken* pSubDbName, SNode* pRealTable);
SNode* createDropTopicStmt(SAstCreateContext* pCxt, bool ignoreNotExists, const SToken* pTopicName);
SNode* createDropCGroupStmt(SAstCreateContext* pCxt, bool ignoreNotExists, const SToken* pCGroupId,
const SToken* pTopicName);

View File

@ -313,7 +313,6 @@ tags_def(A) ::= TAGS NK_LP column_def_list(B) NK_RP.
table_options(A) ::= . { A = createDefaultTableOptions(pCxt); }
table_options(A) ::= table_options(B) COMMENT NK_STRING(C). { A = setTableOption(pCxt, B, TABLE_OPTION_COMMENT, &C); }
table_options(A) ::= table_options(B) DELAY NK_INTEGER(C). { A = setTableOption(pCxt, B, TABLE_OPTION_DELAY, &C); }
table_options(A) ::= table_options(B) FILE_FACTOR NK_FLOAT(C). { A = setTableOption(pCxt, B, TABLE_OPTION_FILE_FACTOR, &C); }
table_options(A) ::= table_options(B) ROLLUP NK_LP func_name_list(C) NK_RP. { A = setTableOption(pCxt, B, TABLE_OPTION_ROLLUP, C); }
table_options(A) ::= table_options(B) TTL NK_INTEGER(C). { A = setTableOption(pCxt, B, TABLE_OPTION_TTL, &C); }
@ -403,17 +402,12 @@ func_list(A) ::= func_list(B) NK_COMMA func(C).
func(A) ::= function_name(B) NK_LP expression_list(C) NK_RP. { A = createFunctionNode(pCxt, &B, C); }
/************************************************ create/drop topic ***************************************************/
cmd ::= CREATE TOPIC not_exists_opt(A)
topic_name(B) topic_options(D) AS query_expression(C). { pCxt->pRootNode = createCreateTopicStmt(pCxt, A, &B, C, NULL, D); }
cmd ::= CREATE TOPIC not_exists_opt(A)
topic_name(B) topic_options(D) AS db_name(C). { pCxt->pRootNode = createCreateTopicStmt(pCxt, A, &B, NULL, &C, D); }
cmd ::= CREATE TOPIC not_exists_opt(A) topic_name(B) AS query_expression(C). { pCxt->pRootNode = createCreateTopicStmt(pCxt, A, &B, C, NULL, NULL); }
cmd ::= CREATE TOPIC not_exists_opt(A) topic_name(B) AS DATABASE db_name(C). { pCxt->pRootNode = createCreateTopicStmt(pCxt, A, &B, NULL, &C, NULL); }
cmd ::= CREATE TOPIC not_exists_opt(A) topic_name(B)
AS STABLE full_table_name(C). { pCxt->pRootNode = createCreateTopicStmt(pCxt, A, &B, NULL, NULL, C); }
cmd ::= DROP TOPIC exists_opt(A) topic_name(B). { pCxt->pRootNode = createDropTopicStmt(pCxt, A, &B); }
cmd ::= DROP CGROUP exists_opt(A) cgroup_name(B) ON topic_name(C). { pCxt->pRootNode = createDropCGroupStmt(pCxt, A, &B, &C); }
topic_options(A) ::= . { A = createTopicOptions(pCxt); }
topic_options(A) ::= topic_options(B) WITH TABLE. { ((STopicOptions*)B)->withTable = true; A = B; }
topic_options(A) ::= topic_options(B) WITH SCHEMA. { ((STopicOptions*)B)->withSchema = true; A = B; }
topic_options(A) ::= topic_options(B) WITH TAG. { ((STopicOptions*)B)->withTag = true; A = B; }
cmd ::= DROP CONSUMER GROUP exists_opt(A) cgroup_name(B) ON topic_name(C). { pCxt->pRootNode = createDropCGroupStmt(pCxt, A, &B, &C); }
/************************************************ desc/describe *******************************************************/
cmd ::= DESC full_table_name(A). { pCxt->pRootNode = createDescribeStmt(pCxt, A); }

View File

@ -857,7 +857,6 @@ SNode* createDefaultTableOptions(SAstCreateContext* pCxt) {
CHECK_PARSER_STATUS(pCxt);
STableOptions* pOptions = nodesMakeNode(QUERY_NODE_TABLE_OPTIONS);
CHECK_OUT_OF_MEM(pOptions);
pOptions->delay = TSDB_DEFAULT_ROLLUP_DELAY;
pOptions->filesFactor = TSDB_DEFAULT_ROLLUP_FILE_FACTOR;
pOptions->ttl = TSDB_DEFAULT_TABLE_TTL;
return (SNode*)pOptions;
@ -867,7 +866,6 @@ SNode* createAlterTableOptions(SAstCreateContext* pCxt) {
CHECK_PARSER_STATUS(pCxt);
STableOptions* pOptions = nodesMakeNode(QUERY_NODE_TABLE_OPTIONS);
CHECK_OUT_OF_MEM(pOptions);
pOptions->delay = -1;
pOptions->filesFactor = -1;
pOptions->ttl = -1;
return (SNode*)pOptions;
@ -882,11 +880,8 @@ SNode* setTableOption(SAstCreateContext* pCxt, SNode* pOptions, ETableOptionType
sizeof(((STableOptions*)pOptions)->comment));
}
break;
case TABLE_OPTION_DELAY:
((STableOptions*)pOptions)->delay = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
break;
case TABLE_OPTION_FILE_FACTOR:
((STableOptions*)pOptions)->filesFactor = taosStr2Float(((SToken*)pVal)->z, NULL);
((STableOptions*)pOptions)->filesFactor = taosStr2Double(((SToken*)pVal)->z, NULL);
break;
case TABLE_OPTION_ROLLUP:
((STableOptions*)pOptions)->pRollupFuncs = pVal;
@ -1266,28 +1261,22 @@ SNode* createDropComponentNodeStmt(SAstCreateContext* pCxt, ENodeType type, cons
return (SNode*)pStmt;
}
SNode* createTopicOptions(SAstCreateContext* pCxt) {
CHECK_PARSER_STATUS(pCxt);
STopicOptions* pOptions = nodesMakeNode(QUERY_NODE_TOPIC_OPTIONS);
CHECK_OUT_OF_MEM(pOptions);
pOptions->withTable = false;
pOptions->withSchema = false;
pOptions->withTag = false;
return (SNode*)pOptions;
}
SNode* createCreateTopicStmt(SAstCreateContext* pCxt, bool ignoreExists, const SToken* pTopicName, SNode* pQuery,
const SToken* pSubscribeDbName, SNode* pOptions) {
const SToken* pSubDbName, SNode* pRealTable) {
CHECK_PARSER_STATUS(pCxt);
SCreateTopicStmt* pStmt = nodesMakeNode(QUERY_NODE_CREATE_TOPIC_STMT);
CHECK_OUT_OF_MEM(pStmt);
strncpy(pStmt->topicName, pTopicName->z, pTopicName->n);
pStmt->ignoreExists = ignoreExists;
pStmt->pQuery = pQuery;
if (NULL != pSubscribeDbName) {
strncpy(pStmt->subscribeDbName, pSubscribeDbName->z, pSubscribeDbName->n);
if (NULL != pRealTable) {
strcpy(pStmt->subDbName, ((SRealTableNode*)pRealTable)->table.dbName);
strcpy(pStmt->subSTbName, ((SRealTableNode*)pRealTable)->table.tableName);
nodesDestroyNode(pRealTable);
} else if (NULL != pSubDbName) {
strncpy(pStmt->subDbName, pSubDbName->z, pSubDbName->n);
} else {
pStmt->pQuery = pQuery;
}
pStmt->pOptions = (STopicOptions*)pOptions;
return (SNode*)pStmt;
}

View File

@ -53,7 +53,6 @@ static SKeyword keywordTable[] = {
{"CACHE", TK_CACHE},
{"CACHELAST", TK_CACHELAST},
{"CAST", TK_CAST},
{"CGROUP", TK_CGROUP},
{"CLUSTER", TK_CLUSTER},
{"COLUMN", TK_COLUMN},
{"COMMENT", TK_COMMENT},
@ -62,13 +61,13 @@ static SKeyword keywordTable[] = {
{"CONNS", TK_CONNS},
{"CONNECTION", TK_CONNECTION},
{"CONNECTIONS", TK_CONNECTIONS},
{"CONSUMER", TK_CONSUMER},
{"COUNT", TK_COUNT},
{"CREATE", TK_CREATE},
{"DATABASE", TK_DATABASE},
{"DATABASES", TK_DATABASES},
{"DAYS", TK_DAYS},
{"DBS", TK_DBS},
{"DELAY", TK_DELAY},
{"DESC", TK_DESC},
{"DESCRIBE", TK_DESCRIBE},
{"DISTINCT", TK_DISTINCT},
@ -156,7 +155,6 @@ static SKeyword keywordTable[] = {
{"RETENTIONS", TK_RETENTIONS},
{"REVOKE", TK_REVOKE},
{"ROLLUP", TK_ROLLUP},
{"SCHEMA", TK_SCHEMA},
{"SCHEMALESS", TK_SCHEMALESS},
{"SCORES", TK_SCORES},
{"SELECT", TK_SELECT},
@ -214,7 +212,6 @@ static SKeyword keywordTable[] = {
{"WATERMARK", TK_WATERMARK},
{"WHERE", TK_WHERE},
{"WINDOW_CLOSE", TK_WINDOW_CLOSE},
{"WITH", TK_WITH},
{"WRITE", TK_WRITE},
{"_C0", TK_ROWTS},
{"_QENDTS", TK_QENDTS},

View File

@ -465,20 +465,22 @@ static bool isPrimaryKey(STempTableNode* pTable, SNode* pExpr) {
return isPrimaryKeyImpl(pTable, pExpr);
}
static bool findAndSetColumn(SColumnNode** pColRef, const STableNode* pTable) {
static int32_t findAndSetColumn(STranslateContext* pCxt, SColumnNode** pColRef, const STableNode* pTable,
bool* pFound) {
SColumnNode* pCol = *pColRef;
bool found = false;
*pFound = false;
if (QUERY_NODE_REAL_TABLE == nodeType(pTable)) {
const STableMeta* pMeta = ((SRealTableNode*)pTable)->pMeta;
if (isInternalPrimaryKey(pCol)) {
setColumnInfoBySchema((SRealTableNode*)pTable, pMeta->schema, false, pCol);
return true;
*pFound = true;
return TSDB_CODE_SUCCESS;
}
int32_t nums = pMeta->tableInfo.numOfTags + pMeta->tableInfo.numOfColumns;
for (int32_t i = 0; i < nums; ++i) {
if (0 == strcmp(pCol->colName, pMeta->schema[i].name)) {
setColumnInfoBySchema((SRealTableNode*)pTable, pMeta->schema + i, (i >= pMeta->tableInfo.numOfColumns), pCol);
found = true;
*pFound = true;
break;
}
}
@ -489,13 +491,15 @@ static bool findAndSetColumn(SColumnNode** pColRef, const STableNode* pTable) {
SExprNode* pExpr = (SExprNode*)pNode;
if (0 == strcmp(pCol->colName, pExpr->aliasName) ||
(isPrimaryKey((STempTableNode*)pTable, pNode) && isInternalPrimaryKey(pCol))) {
if (*pFound) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_AMBIGUOUS_COLUMN, pCol->colName);
}
setColumnInfoByExpr(pTable, pExpr, pColRef);
found = true;
break;
*pFound = true;
}
}
}
return found;
return TSDB_CODE_SUCCESS;
}
static EDealRes translateColumnWithPrefix(STranslateContext* pCxt, SColumnNode** pCol) {
@ -506,7 +510,12 @@ static EDealRes translateColumnWithPrefix(STranslateContext* pCxt, SColumnNode**
STableNode* pTable = taosArrayGetP(pTables, i);
if (belongTable(pCxt->pParseCxt->db, (*pCol), pTable)) {
foundTable = true;
if (findAndSetColumn(pCol, pTable)) {
bool foundCol = false;
pCxt->errCode = findAndSetColumn(pCxt, pCol, pTable, &foundCol);
if (TSDB_CODE_SUCCESS != pCxt->errCode) {
return DEAL_RES_ERROR;
}
if (foundCol) {
break;
}
return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_INVALID_COLUMN, (*pCol)->colName);
@ -525,14 +534,19 @@ static EDealRes translateColumnWithoutPrefix(STranslateContext* pCxt, SColumnNod
bool isInternalPk = isInternalPrimaryKey(*pCol);
for (size_t i = 0; i < nums; ++i) {
STableNode* pTable = taosArrayGetP(pTables, i);
if (findAndSetColumn(pCol, pTable)) {
bool foundCol = false;
pCxt->errCode = findAndSetColumn(pCxt, pCol, pTable, &foundCol);
if (TSDB_CODE_SUCCESS != pCxt->errCode) {
return DEAL_RES_ERROR;
}
if (foundCol) {
if (found) {
return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_AMBIGUOUS_COLUMN, (*pCol)->colName);
}
found = true;
if (isInternalPk) {
break;
}
}
if (isInternalPk) {
break;
}
}
if (!found) {
@ -1939,7 +1953,9 @@ static int32_t createPrimaryKeyColByTable(STranslateContext* pCxt, STableNode* p
}
pCol->colId = PRIMARYKEY_TIMESTAMP_COL_ID;
strcpy(pCol->colName, PK_TS_COL_INTERNAL_NAME);
if (!findAndSetColumn(&pCol, pTable)) {
bool found = false;
int32_t code = findAndSetColumn(pCxt, &pCol, pTable, &found);
if (TSDB_CODE_SUCCESS != code || !found) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_TIMELINE_FUNC);
}
*pPrimaryKey = (SNode*)pCol;
@ -2617,10 +2633,7 @@ static int32_t checkTableSchema(STranslateContext* pCxt, SCreateTableStmt* pStmt
}
static int32_t checkCreateTable(STranslateContext* pCxt, SCreateTableStmt* pStmt) {
int32_t code = checkRangeOption(pCxt, "delay", pStmt->pOptions->delay, TSDB_MIN_ROLLUP_DELAY, TSDB_MAX_ROLLUP_DELAY);
if (TSDB_CODE_SUCCESS == code) {
code = checTableFactorOption(pCxt, pStmt->pOptions->filesFactor);
}
int32_t code = checTableFactorOption(pCxt, pStmt->pOptions->filesFactor);
if (TSDB_CODE_SUCCESS == code) {
code = checkTableRollupOption(pCxt, pStmt->pOptions->pRollupFuncs);
}
@ -2861,7 +2874,6 @@ static int32_t buildRollupAst(STranslateContext* pCxt, SCreateTableStmt* pStmt,
static int32_t buildCreateStbReq(STranslateContext* pCxt, SCreateTableStmt* pStmt, SMCreateStbReq* pReq) {
pReq->igExists = pStmt->ignoreExists;
pReq->xFilesFactor = pStmt->pOptions->filesFactor;
pReq->delay = pStmt->pOptions->delay;
pReq->ttl = pStmt->pOptions->ttl;
columnDefNodeToField(pStmt->pCols, &pReq->pColumns);
columnDefNodeToField(pStmt->pTags, &pReq->pTags);
@ -3287,9 +3299,6 @@ static int32_t buildCreateTopicReq(STranslateContext* pCxt, SCreateTopicStmt* pS
tNameSetDbName(&name, pCxt->pParseCxt->acctId, pStmt->topicName, strlen(pStmt->topicName));
tNameGetFullDbName(&name, pReq->name);
pReq->igExists = pStmt->ignoreExists;
pReq->withTbName = pStmt->pOptions->withTable;
pReq->withSchema = pStmt->pOptions->withSchema;
pReq->withTag = pStmt->pOptions->withTag;
pReq->sql = strdup(pCxt->pParseCxt->pSql);
if (NULL == pReq->sql) {
@ -3298,19 +3307,25 @@ static int32_t buildCreateTopicReq(STranslateContext* pCxt, SCreateTopicStmt* pS
int32_t code = TSDB_CODE_SUCCESS;
const char* dbName;
if (NULL != pStmt->pQuery) {
dbName = ((SRealTableNode*)(((SSelectStmt*)pStmt->pQuery)->pFromTable))->table.dbName;
if ('\0' != pStmt->subSTbName[0]) {
pReq->subType = TOPIC_SUB_TYPE__TABLE;
toName(pCxt->pParseCxt->acctId, pStmt->subDbName, pStmt->subSTbName, &name);
tNameExtractFullName(&name, pReq->subStbName);
} else if ('\0' != pStmt->subDbName[0]) {
pReq->subType = TOPIC_SUB_TYPE__DB;
tNameSetDbName(&name, pCxt->pParseCxt->acctId, pStmt->subDbName, strlen(pStmt->subDbName));
tNameGetFullDbName(&name, pReq->subDbName);
} else {
pReq->subType = TOPIC_SUB_TYPE__COLUMN;
char* dbName = ((SRealTableNode*)(((SSelectStmt*)pStmt->pQuery)->pFromTable))->table.dbName;
tNameSetDbName(&name, pCxt->pParseCxt->acctId, dbName, strlen(dbName));
tNameGetFullDbName(&name, pReq->subDbName);
pCxt->pParseCxt->topicQuery = true;
code = translateQuery(pCxt, pStmt->pQuery);
if (TSDB_CODE_SUCCESS == code) {
code = nodesNodeToString(pStmt->pQuery, false, &pReq->ast, NULL);
}
} else {
dbName = pStmt->subscribeDbName;
}
tNameSetDbName(&name, pCxt->pParseCxt->acctId, dbName, strlen(dbName));
tNameGetFullDbName(&name, pReq->subscribeDbName);
return code;
}

File diff suppressed because it is too large Load Diff

View File

@ -298,14 +298,12 @@ TEST_F(ParserInitialCTest, createStable) {
auto setCreateStbReqFunc = [&](const char* pTbname, int8_t igExists = 0,
float xFilesFactor = TSDB_DEFAULT_ROLLUP_FILE_FACTOR,
int32_t delay = TSDB_DEFAULT_ROLLUP_DELAY, int32_t ttl = TSDB_DEFAULT_TABLE_TTL,
const char* pComment = nullptr) {
int32_t ttl = TSDB_DEFAULT_TABLE_TTL, const char* pComment = nullptr) {
memset(&expect, 0, sizeof(SMCreateStbReq));
int32_t len = snprintf(expect.name, sizeof(expect.name), "0.test.%s", pTbname);
expect.name[len] = '\0';
expect.igExists = igExists;
expect.xFilesFactor = xFilesFactor;
expect.delay = delay;
expect.ttl = ttl;
if (nullptr != pComment) {
expect.comment = strdup(pComment);
@ -393,7 +391,7 @@ TEST_F(ParserInitialCTest, createStable) {
addFieldToCreateStbReqFunc(false, "id", TSDB_DATA_TYPE_INT);
run("CREATE STABLE t1(ts TIMESTAMP, c1 INT) TAGS(id INT)");
setCreateStbReqFunc("t1", 1, 0.1, 2, 100, "test create table");
setCreateStbReqFunc("t1", 1, 0.1, 100, "test create table");
addFieldToCreateStbReqFunc(true, "ts", TSDB_DATA_TYPE_TIMESTAMP, 0, 0);
addFieldToCreateStbReqFunc(true, "c1", TSDB_DATA_TYPE_INT);
addFieldToCreateStbReqFunc(true, "c2", TSDB_DATA_TYPE_UINT);
@ -431,7 +429,7 @@ TEST_F(ParserInitialCTest, createStable) {
"TAGS (a1 TIMESTAMP, a2 INT, a3 INT UNSIGNED, a4 BIGINT, a5 BIGINT UNSIGNED, a6 FLOAT, a7 DOUBLE, "
"a8 BINARY(20), a9 SMALLINT, a10 SMALLINT UNSIGNED COMMENT 'test column comment', a11 TINYINT, "
"a12 TINYINT UNSIGNED, a13 BOOL, a14 NCHAR(30), a15 VARCHAR(50)) "
"TTL 100 COMMENT 'test create table' SMA(c1, c2, c3) ROLLUP (MIN) FILE_FACTOR 0.1 DELAY 2");
"TTL 100 COMMENT 'test create table' SMA(c1, c2, c3) ROLLUP (MIN) FILE_FACTOR 0.1");
}
TEST_F(ParserInitialCTest, createStream) {
@ -464,7 +462,7 @@ TEST_F(ParserInitialCTest, createTable) {
"TAGS (a1 TIMESTAMP, a2 INT, a3 INT UNSIGNED, a4 BIGINT, a5 BIGINT UNSIGNED, a6 FLOAT, a7 DOUBLE, a8 BINARY(20), "
"a9 SMALLINT, a10 SMALLINT UNSIGNED COMMENT 'test column comment', a11 TINYINT, a12 TINYINT UNSIGNED, a13 BOOL, "
"a14 NCHAR(30), a15 VARCHAR(50)) "
"TTL 100 COMMENT 'test create table' SMA(c1, c2, c3) ROLLUP (MIN) FILE_FACTOR 0.1 DELAY 2");
"TTL 100 COMMENT 'test create table' SMA(c1, c2, c3) ROLLUP (MIN) FILE_FACTOR 0.1");
run("CREATE TABLE IF NOT EXISTS t1 USING st1 TAGS(1, 'wxy')");
@ -477,13 +475,62 @@ TEST_F(ParserInitialCTest, createTable) {
TEST_F(ParserInitialCTest, createTopic) {
useDb("root", "test");
SCMCreateTopicReq expect = {0};
auto setCreateTopicReqFunc = [&](const char* pTopicName, int8_t igExists, const char* pSql, const char* pAst,
const char* pDbName = nullptr, const char* pTbname = nullptr) {
memset(&expect, 0, sizeof(SMCreateStbReq));
snprintf(expect.name, sizeof(expect.name), "0.%s", pTopicName);
expect.igExists = igExists;
expect.sql = (char*)pSql;
if (nullptr != pTbname) {
expect.subType = TOPIC_SUB_TYPE__TABLE;
snprintf(expect.subStbName, sizeof(expect.subStbName), "0.%s.%s", pDbName, pTbname);
} else if (nullptr != pAst) {
expect.subType = TOPIC_SUB_TYPE__COLUMN;
expect.ast = (char*)pAst;
} else {
expect.subType = TOPIC_SUB_TYPE__DB;
snprintf(expect.subDbName, sizeof(expect.subDbName), "0.%s", pDbName);
}
};
setCheckDdlFunc([&](const SQuery* pQuery, ParserStage stage) {
ASSERT_EQ(nodeType(pQuery->pRoot), QUERY_NODE_CREATE_TOPIC_STMT);
SCMCreateTopicReq req = {0};
ASSERT_TRUE(TSDB_CODE_SUCCESS ==
tDeserializeSCMCreateTopicReq(pQuery->pCmdMsg->pMsg, pQuery->pCmdMsg->msgLen, &req));
ASSERT_EQ(std::string(req.name), std::string(expect.name));
ASSERT_EQ(req.igExists, expect.igExists);
ASSERT_EQ(req.subType, expect.subType);
ASSERT_EQ(std::string(req.sql), std::string(expect.sql));
switch (expect.subType) {
case TOPIC_SUB_TYPE__DB:
ASSERT_EQ(std::string(req.subDbName), std::string(expect.subDbName));
break;
case TOPIC_SUB_TYPE__TABLE:
ASSERT_EQ(std::string(req.subStbName), std::string(expect.subStbName));
break;
case TOPIC_SUB_TYPE__COLUMN:
ASSERT_NE(req.ast, nullptr);
break;
default:
ASSERT_TRUE(false);
}
});
setCreateTopicReqFunc("tp1", 0, "create topic tp1 as select * from t1", "ast");
run("CREATE TOPIC tp1 AS SELECT * FROM t1");
run("CREATE TOPIC IF NOT EXISTS tp1 AS SELECT * FROM t1");
setCreateTopicReqFunc("tp1", 1, "create topic if not exists tp1 as select ts, ceil(c1) from t1", "ast");
run("CREATE TOPIC IF NOT EXISTS tp1 AS SELECT ts, CEIL(c1) FROM t1");
run("CREATE TOPIC tp1 AS test");
setCreateTopicReqFunc("tp1", 0, "create topic tp1 as database test", nullptr, "test");
run("CREATE TOPIC tp1 AS DATABASE test");
run("CREATE TOPIC IF NOT EXISTS tp1 AS test");
setCreateTopicReqFunc("tp1", 1, "create topic if not exists tp1 as stable st1", nullptr, "test", "st1");
run("CREATE TOPIC IF NOT EXISTS tp1 AS STABLE st1");
}
TEST_F(ParserInitialCTest, createUser) {

View File

@ -32,7 +32,7 @@ TEST_F(ParserInitialDTest, dropBnode) {
run("DROP BNODE ON DNODE 1");
}
// DROP CGROUP [ IF EXISTS ] cgroup_name ON topic_name
// DROP CONSUMER GROUP [ IF EXISTS ] cgroup_name ON topic_name
TEST_F(ParserInitialDTest, dropCGroup) {
useDb("root", "test");
@ -56,10 +56,10 @@ TEST_F(ParserInitialDTest, dropCGroup) {
});
setDropCgroupReqFunc("tp1", "cg1");
run("DROP CGROUP cg1 ON tp1");
run("DROP CONSUMER GROUP cg1 ON tp1");
setDropCgroupReqFunc("tp1", "cg1", 1);
run("DROP CGROUP IF EXISTS cg1 ON tp1");
run("DROP CONSUMER GROUP IF EXISTS cg1 ON tp1");
}
// todo drop database

View File

@ -252,6 +252,8 @@ TEST_F(ParserSelectTest, semanticError) {
// TSDB_CODE_PAR_AMBIGUOUS_COLUMN
run("SELECT c2 FROM t1 tt1, t1 tt2 WHERE tt1.c1 = tt2.c1", TSDB_CODE_PAR_AMBIGUOUS_COLUMN, PARSER_STAGE_TRANSLATE);
run("SELECT c2 FROM (SELECT c1 c2, c2 FROM t1)", TSDB_CODE_PAR_AMBIGUOUS_COLUMN, PARSER_STAGE_TRANSLATE);
// TSDB_CODE_PAR_WRONG_VALUE_TYPE
run("SELECT timestamp '2010a' FROM t1", TSDB_CODE_PAR_WRONG_VALUE_TYPE, PARSER_STAGE_TRANSLATE);

View File

@ -124,6 +124,7 @@ static int32_t createChildLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelec
SLogicNode* pNode = NULL;
int32_t code = func(pCxt, pSelect, &pNode);
if (TSDB_CODE_SUCCESS == code && NULL != pNode) {
pNode->precision = pSelect->precision;
code = pushLogicNode(pCxt, pRoot, pNode);
}
if (TSDB_CODE_SUCCESS != code) {
@ -400,6 +401,7 @@ static int32_t createLogicNodeByTable(SLogicPlanContext* pCxt, SSelectStmt* pSel
nodesDestroyNode(pNode);
return TSDB_CODE_OUT_OF_MEMORY;
}
pNode->precision = pSelect->precision;
*pLogicNode = pNode;
}
return code;
@ -485,6 +487,10 @@ static int32_t createWindowLogicNodeFinalize(SLogicPlanContext* pCxt, SSelectStm
pWindow->watermark = pCxt->pPlanCxt->watermark;
}
if (pCxt->pPlanCxt->rSmaQuery) {
pWindow->filesFactor = pCxt->pPlanCxt->filesFactor;
}
if (TSDB_CODE_SUCCESS == code) {
code = rewriteExprForSelect(pWindow->pFuncs, pSelect, SQL_CLAUSE_WINDOW);
}

View File

@ -99,7 +99,8 @@ static bool osdMayBeOptimized(SLogicNode* pNode) {
return false;
}
// todo: release after function splitting
if (TSDB_SUPER_TABLE == ((SScanLogicNode*)pNode)->pMeta->tableType) {
if (TSDB_SUPER_TABLE == ((SScanLogicNode*)pNode)->pMeta->tableType &&
SCAN_TYPE_STREAM != ((SScanLogicNode*)pNode)->scanType) {
return false;
}
if (NULL == pNode->pParent || (QUERY_NODE_LOGIC_PLAN_WINDOW != nodeType(pNode->pParent) &&
@ -226,6 +227,7 @@ static void setScanWindowInfo(SScanLogicNode* pScan) {
pScan->triggerType = ((SWindowLogicNode*)pScan->node.pParent)->triggerType;
pScan->watermark = ((SWindowLogicNode*)pScan->node.pParent)->watermark;
pScan->tsColId = ((SColumnNode*)((SWindowLogicNode*)pScan->node.pParent)->pTspk)->colId;
pScan->filesFactor = ((SWindowLogicNode*)pScan->node.pParent)->filesFactor;
}
}

View File

@ -506,6 +506,7 @@ static int32_t createTableScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSubp
pTableScan->triggerType = pScanLogicNode->triggerType;
pTableScan->watermark = pScanLogicNode->watermark;
pTableScan->tsColId = pScanLogicNode->tsColId;
pTableScan->filesFactor = pScanLogicNode->filesFactor;
return createScanPhysiNodeFinalize(pCxt, pSubplan, pScanLogicNode, (SScanPhysiNode*)pTableScan, pPhyNode);
}
@ -917,6 +918,7 @@ static int32_t createWindowPhysiNodeFinalize(SPhysiPlanContext* pCxt, SNodeList*
pWindow->triggerType = pWindowLogicNode->triggerType;
pWindow->watermark = pWindowLogicNode->watermark;
pWindow->filesFactor = pWindowLogicNode->filesFactor;
if (TSDB_CODE_SUCCESS == code) {
*pPhyNode = (SPhysiNode*)pWindow;

View File

@ -79,7 +79,11 @@ int32_t qwExecTask(QW_FPARAMS_DEF, SQWTaskCtx *ctx, bool *queryEnd) {
if (taskHandle) {
code = qExecTask(taskHandle, &pRes, &useconds);
if (code) {
QW_TASK_ELOG("qExecTask failed, code:%x - %s", code, tstrerror(code));
if (code != TSDB_CODE_OPS_NOT_SUPPORT) {
QW_TASK_ELOG("qExecTask failed, code:%x - %s", code, tstrerror(code));
} else {
QW_TASK_DLOG("qExecTask failed, code:%x - %s", code, tstrerror(code));
}
QW_ERR_RET(code);
}
}

View File

@ -17,9 +17,7 @@ TARGET_INCLUDE_DIRECTORIES(
PUBLIC "${TD_SOURCE_DIR}/source/libs/parser/inc"
PRIVATE "${TD_SOURCE_DIR}/source/libs/scalar/inc"
)
if(NOT TD_WINDOWS)
add_test(
NAME scalarTest
COMMAND scalarTest
)
endif(NOT TD_WINDOWS)
add_test(
NAME scalarTest
COMMAND scalarTest
)

View File

@ -2498,7 +2498,7 @@ TEST(ScalarFunctionTest, tanFunction_column) {
code = tanFunction(pInput, 1, pOutput);
ASSERT_EQ(code, TSDB_CODE_SUCCESS);
for (int32_t i = 0; i < rowNum; ++i) {
ASSERT_EQ(*((double *)colDataGetData(pOutput->columnData, i)), result[i]);
ASSERT_NEAR(*((double *)colDataGetData(pOutput->columnData, i)), result[i], 1e-15);
PRINTF("tiny_int after TAN:%f\n", *((double *)colDataGetData(pOutput->columnData, i)));
}
scltDestroyDataBlock(pInput);
@ -2517,7 +2517,7 @@ TEST(ScalarFunctionTest, tanFunction_column) {
code = tanFunction(pInput, 1, pOutput);
ASSERT_EQ(code, TSDB_CODE_SUCCESS);
for (int32_t i = 0; i < rowNum; ++i) {
ASSERT_EQ(*((double *)colDataGetData(pOutput->columnData, i)), result[i]);
ASSERT_NEAR(*((double *)colDataGetData(pOutput->columnData, i)), result[i], 1e-15);
PRINTF("float after TAN:%f\n", *((double *)colDataGetData(pOutput->columnData, i)));
}

View File

@ -42,7 +42,7 @@ static void windowSBfAdd(SUpdateInfo *pInfo, uint64_t count) {
}
static void windowSBfDelete(SUpdateInfo *pInfo, uint64_t count) {
if (count < pInfo->numSBFs - 1) {
if (count < pInfo->numSBFs) {
for (uint64_t i = 0; i < count; ++i) {
SScalableBf *pTsSBFs = taosArrayGetP(pInfo->pTsSBFs, 0);
tScalableBfDestroy(pTsSBFs);
@ -73,8 +73,8 @@ static int64_t adjustInterval(int64_t interval, int32_t precision) {
}
static int64_t adjustWatermark(int64_t adjInterval, int64_t originInt, int64_t watermark) {
if (watermark <= 0) {
watermark = TMIN(originInt/adjInterval, 1) * adjInterval;
if (watermark <= adjInterval) {
watermark = TMAX(originInt/adjInterval, 1) * adjInterval;
} else if (watermark > MAX_NUM_SCALABLE_BF * adjInterval) {
watermark = MAX_NUM_SCALABLE_BF * adjInterval;
}/* else if (watermark < MIN_NUM_SCALABLE_BF * adjInterval) {

View File

@ -494,6 +494,7 @@ class TDDnodes:
self.simDeployed = False
self.testCluster = False
self.valgrind = 0
self.killValgrind = 1
def init(self, path, remoteIP = ""):
psCmd = "ps -ef|grep -w taosd| grep -v grep| grep -v defunct | awk '{print $2}'"
@ -505,14 +506,15 @@ class TDDnodes:
processID = subprocess.check_output(
psCmd, shell=True).decode("utf-8")
psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'"
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
while(processID):
killCmd = "kill -9 %s > /dev/null 2>&1" % processID
os.system(killCmd)
time.sleep(1)
processID = subprocess.check_output(
psCmd, shell=True).decode("utf-8")
if self.killValgrind == 1:
psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'"
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
while(processID):
killCmd = "kill -9 %s > /dev/null 2>&1" % processID
os.system(killCmd)
time.sleep(1)
processID = subprocess.check_output(
psCmd, shell=True).decode("utf-8")
binPath = self.dnodes[0].getPath() + "/../../../"
# tdLog.debug("binPath %s" % (binPath))
@ -549,6 +551,9 @@ class TDDnodes:
def setValgrind(self, value):
self.valgrind = value
def setKillValgrind(self, value):
self.killValgrind = value
def deploy(self, index, *updatecfgDict):
self.sim.setTestCluster(self.testCluster)
@ -622,14 +627,15 @@ class TDDnodes:
processID = subprocess.check_output(
psCmd, shell=True).decode("utf-8")
psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'"
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
while(processID):
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
os.system(killCmd)
time.sleep(1)
processID = subprocess.check_output(
psCmd, shell=True).decode("utf-8")
if self.killValgrind == 1:
psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'"
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
while(processID):
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
os.system(killCmd)
time.sleep(1)
processID = subprocess.check_output(
psCmd, shell=True).decode("utf-8")
# if os.system(cmd) != 0 :
# tdLog.exit(cmd)

View File

@ -9,7 +9,7 @@ sql create database d0 keep 365000d,365000d,365000d
sql use d0
print =============== create super table and register rsma
sql create table if not exists stb (ts timestamp, c1 int) tags (city binary(20),district binary(20)) rollup(min) file_factor 0.1 delay 2;
sql create table if not exists stb (ts timestamp, c1 int) tags (city binary(20),district binary(20)) rollup(min) file_factor 0.1;
sql show stables
if $rows != 1 then

View File

@ -9,7 +9,7 @@ sql create database d0 retentions 15s:7d,1m:21d,15m:365d;
sql use d0
print =============== create super table and register rsma
sql create table if not exists stb (ts timestamp, c1 int) tags (city binary(20),district binary(20)) rollup(min) file_factor 0.1 delay 2;
sql create table if not exists stb (ts timestamp, c1 int) tags (city binary(20),district binary(20)) rollup(min) file_factor 0.1;
sql show stables
if $rows != 1 then

View File

@ -5,7 +5,7 @@ sleep 50
sql connect
print =============== create database
sql create database d1
sql create database d1 vgroups 1
sql use d1
print =============== create super table, include column type for count/sum/min/max/first

View File

@ -23,7 +23,7 @@ sql insert into t1 values(1648791223001,10,2,3,1.1,2);
sql insert into t1 values(1648791233002,3,2,3,2.1,3);
sql insert into t1 values(1648791243003,NULL,NULL,NULL,NULL,4);
sql insert into t1 values(1648791213002,NULL,NULL,NULL,NULL,5) (1648791233012,NULL,NULL,NULL,NULL,6);
sleep 300
sql select * from streamt order by s desc;
# row 0
@ -115,7 +115,7 @@ sql insert into t1 values(1648791233002,3,2,3,2.1,9);
sql insert into t1 values(1648791243003,4,2,3,3.1,10);
sql insert into t1 values(1648791213002,4,2,3,4.1,11) ;
sql insert into t1 values(1648791213002,4,2,3,4.1,12) (1648791223009,4,2,3,4.1,13);
sleep 300
sql select * from streamt order by s desc ;
# row 0

View File

@ -22,7 +22,7 @@ sql insert into t1 values(1648791210000,1,1,1,1.1,1);
sql insert into t1 values(1648791220000,2,2,2,2.1,2);
sql insert into t1 values(1648791230000,3,3,3,3.1,3);
sql insert into t1 values(1648791240000,4,4,4,4.1,4);
sleep 300
sql select * from streamt order by s desc;
# row 0
@ -50,7 +50,7 @@ sql insert into t1 values(1648791250005,5,5,5,5.1,5);
sql insert into t1 values(1648791260006,6,6,6,6.1,6);
sql insert into t1 values(1648791270007,7,7,7,7.1,7);
sql insert into t1 values(1648791240005,5,5,5,5.1,8) (1648791250006,6,6,6,6.1,9);
sleep 300
sql select * from streamt order by s desc;
# row 0
@ -100,7 +100,7 @@ sql insert into t1 values(1648791260007,7,7,7,7.1,12) (1648791290008,7,7,7,7.1,1
sql insert into t1 values(1648791500000,7,7,7,7.1,15) (1648791520000,8,8,8,8.1,16) (1648791540000,8,8,8,8.1,17);
sql insert into t1 values(1648791530000,8,8,8,8.1,18);
sql insert into t1 values(1648791220000,10,10,10,10.1,19) (1648791290008,2,2,2,2.1,20) (1648791540000,17,17,17,17.1,21) (1648791500001,22,22,22,22.1,22);
sleep 300
sql select * from streamt order by s desc;
# row 0

View File

@ -94,92 +94,4 @@ if $data11 != 5 then
return -1
endi
sql create table t2(ts timestamp, a int, b int , c int, d double);
sql create stream streams2 trigger window_close watermark 20s into streamt2 as select _wstartts, count(*) c1, count(d) c2 , sum(a) c3 , max(b) c4, min(c) c5 from t2 interval(10s);
sql insert into t2 values(1648791213000,1,2,3,1.0);
sql insert into t2 values(1648791239999,1,2,3,1.0);
sleep 300
sql select * from streamt2;
if $rows != 0 then
print ======$rows
return -1
endi
sql insert into t2 values(1648791240000,1,2,3,1.0);
sleep 300
sql select * from streamt2;
if $rows != 1 then
print ======$rows
return -1
endi
if $data01 != 1 then
print ======$data01
return -1
endi
sql insert into t2 values(1648791250001,1,2,3,1.0) (1648791250002,1,2,3,1.0) (1648791250003,1,2,3,1.0) (1648791240000,1,2,3,1.0);
sleep 300
sql select * from streamt2;
if $rows != 1 then
print ======$rows
return -1
endi
if $data01 != 1 then
print ======$data01
return -1
endi
sql insert into t2 values(1648791280000,1,2,3,1.0);
sleep 300
sql select * from streamt2;
if $rows != 4 then
print ======$rows
return -1
endi
if $data01 != 1 then
print ======$data01
return -1
endi
if $data11 != 1 then
print ======$data11
return -1
endi
if $data21 != 1 then
print ======$data21
return -1
endi
if $data31 != 3 then
print ======$data31
return -1
endi
sql insert into t2 values(1648791250001,1,2,3,1.0) (1648791250002,1,2,3,1.0) (1648791250003,1,2,3,1.0) (1648791280000,1,2,3,1.0) (1648791280001,1,2,3,1.0) (1648791280002,1,2,3,1.0) (1648791310000,1,2,3,1.0) (1648791280001,1,2,3,1.0);
sleep 300
sql select * from streamt2;
if $rows != 5 then
print ======$rows
return -1
endi
if $data01 != 1 then
print ======$data01
return -1
endi
if $data11 != 1 then
print ======$data11
return -1
endi
if $data21 != 1 then
print ======$data21
return -1
endi
if $data31 != 3 then
print ======$data31
return -1
endi
if $data41 != 3 then
print ======$data31
return -1
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT

View File

@ -0,0 +1,79 @@
{
"filetype": "insert",
"cfgdir": "/etc/taos/",
"host": "test216",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 8,
"thread_count_create_tbl": 8,
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 1000,
"num_of_records_per_req": 100000,
"databases": [
{
"dbinfo": {
"name": "db",
"drop": "yes",
"vgroups": 24
},
"super_tables": [
{
"name": "stb",
"child_table_exists": "no",
"childtable_count": 100000,
"childtable_prefix": "stb_",
"auto_create_table": "no",
"batch_create_tbl_num": 50000,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 5,
"interlace_rows": 100000,
"insert_interval": 0,
"max_sql_len": 10000000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 10,
"start_timestamp": "2022-05-01 00:00:00.000",
"sample_format": "csv",
"use_sample_ts": "no",
"tags_file": "",
"columns": [
{
"type": "INT"
},
{
"type": "TINYINT",
"count": 1
},
{"type": "DOUBLE"},
{
"type": "BINARY",
"len": 40,
"count": 1
},
{
"type": "nchar",
"len": 20,
"count": 1
}
],
"tags": [
{
"type": "TINYINT",
"count": 1
},
{
"type": "BINARY",
"len": 16,
"count": 1
}
]
}
]
}
]
}

View File

@ -0,0 +1,42 @@
{
"filetype": "query",
"cfgdir": "/etc/taos",
"host": "test216",
"port": 6030,
"user": "root",
"password": "taosdata",
"confirm_parameter_prompt": "no",
"databases": "db",
"query_times": 100,
"query_mode": "taosc",
"specified_table_query": {
"query_interval": 0,
"threads": 8,
"sqls": [
{
"sql": "select count(*) from stb_0 ",
"result": "./query_res0.txt"
},
{
"sql": "select last_row(*) from stb_1 ",
"result": "./query_res1.txt"
},
{
"sql": "select last(*) from stb_2 ",
"result": "./query_res2.txt"
},
{
"sql": "select first(*) from stb_3 ",
"result": "./query_res3.txt"
},
{
"sql": "select avg(c0),min(c2),max(c1) from stb_4",
"result": "./query_res4.txt"
},
{
"sql": "select avg(c0),min(c2),max(c1) from stb_5 where ts <= '2022-05-01 20:00:00.500' and ts >= '2022-05-01 00:00:00.000' ",
"result": "./query_res5.txt"
}
]
}
}

Some files were not shown because too many files have changed in this diff Show More