Merge branch '3.0' of https://github.com/taosdata/TDengine into fix/hzcheng_3.0
This commit is contained in:
commit
beda425248
|
@ -287,7 +287,7 @@ pipeline {
|
||||||
'''
|
'''
|
||||||
sh '''
|
sh '''
|
||||||
cd ${WKC}/tests/parallel_test
|
cd ${WKC}/tests/parallel_test
|
||||||
export DEFAULT_RETRY_TIME=2
|
export DEFAULT_RETRY_TIME=1
|
||||||
date
|
date
|
||||||
timeout 2100 time ./run.sh -e -m /home/m.json -t /tmp/cases.task -b ${BRANCH_NAME} -l ${WKDIR}/log -o 480
|
timeout 2100 time ./run.sh -e -m /home/m.json -t /tmp/cases.task -b ${BRANCH_NAME} -l ${WKDIR}/log -o 480
|
||||||
'''
|
'''
|
||||||
|
|
|
@ -71,8 +71,8 @@ ELSE ()
|
||||||
ENDIF ()
|
ENDIF ()
|
||||||
|
|
||||||
IF (${SANITIZER} MATCHES "true")
|
IF (${SANITIZER} MATCHES "true")
|
||||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=null -fno-sanitize=alignment -g3")
|
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3")
|
||||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=null -fno-sanitize=alignment -g3")
|
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3")
|
||||||
MESSAGE(STATUS "Will compile with Address Sanitizer!")
|
MESSAGE(STATUS "Will compile with Address Sanitizer!")
|
||||||
ELSE ()
|
ELSE ()
|
||||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -g3")
|
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -g3")
|
||||||
|
|
|
@ -29,7 +29,7 @@ title: 数据模型和基本概念
|
||||||
<td>10.3</td>
|
<td>10.3</td>
|
||||||
<td>219</td>
|
<td>219</td>
|
||||||
<td>0.31</td>
|
<td>0.31</td>
|
||||||
<td>Beijing.Chaoyang</td>
|
<td>California.SanFrancisco</td>
|
||||||
<td>2</td>
|
<td>2</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
|
@ -38,7 +38,7 @@ title: 数据模型和基本概念
|
||||||
<td>10.2</td>
|
<td>10.2</td>
|
||||||
<td>220</td>
|
<td>220</td>
|
||||||
<td>0.23</td>
|
<td>0.23</td>
|
||||||
<td>Beijing.Chaoyang</td>
|
<td>California.SanFrancisco</td>
|
||||||
<td>3</td>
|
<td>3</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
|
@ -47,7 +47,7 @@ title: 数据模型和基本概念
|
||||||
<td>11.5</td>
|
<td>11.5</td>
|
||||||
<td>221</td>
|
<td>221</td>
|
||||||
<td>0.35</td>
|
<td>0.35</td>
|
||||||
<td>Beijing.Haidian</td>
|
<td>California.LosAngeles</td>
|
||||||
<td>3</td>
|
<td>3</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
|
@ -56,7 +56,7 @@ title: 数据模型和基本概念
|
||||||
<td>13.4</td>
|
<td>13.4</td>
|
||||||
<td>223</td>
|
<td>223</td>
|
||||||
<td>0.29</td>
|
<td>0.29</td>
|
||||||
<td>Beijing.Haidian</td>
|
<td>California.LosAngeles</td>
|
||||||
<td>2</td>
|
<td>2</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
|
@ -65,7 +65,7 @@ title: 数据模型和基本概念
|
||||||
<td>12.6</td>
|
<td>12.6</td>
|
||||||
<td>218</td>
|
<td>218</td>
|
||||||
<td>0.33</td>
|
<td>0.33</td>
|
||||||
<td>Beijing.Chaoyang</td>
|
<td>California.SanFrancisco</td>
|
||||||
<td>2</td>
|
<td>2</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
|
@ -74,7 +74,7 @@ title: 数据模型和基本概念
|
||||||
<td>11.8</td>
|
<td>11.8</td>
|
||||||
<td>221</td>
|
<td>221</td>
|
||||||
<td>0.28</td>
|
<td>0.28</td>
|
||||||
<td>Beijing.Haidian</td>
|
<td>California.LosAngeles</td>
|
||||||
<td>2</td>
|
<td>2</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
|
@ -83,7 +83,7 @@ title: 数据模型和基本概念
|
||||||
<td>10.3</td>
|
<td>10.3</td>
|
||||||
<td>218</td>
|
<td>218</td>
|
||||||
<td>0.25</td>
|
<td>0.25</td>
|
||||||
<td>Beijing.Chaoyang</td>
|
<td>California.SanFrancisco</td>
|
||||||
<td>3</td>
|
<td>3</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
|
@ -92,7 +92,7 @@ title: 数据模型和基本概念
|
||||||
<td>12.3</td>
|
<td>12.3</td>
|
||||||
<td>221</td>
|
<td>221</td>
|
||||||
<td>0.31</td>
|
<td>0.31</td>
|
||||||
<td>Beijing.Chaoyang</td>
|
<td>California.SanFrancisco</td>
|
||||||
<td>2</td>
|
<td>2</td>
|
||||||
</tr>
|
</tr>
|
||||||
</tbody>
|
</tbody>
|
||||||
|
|
|
@ -132,7 +132,7 @@ Query OK, 2 row(s) in set (0.003128s)
|
||||||
taosBenchmark
|
taosBenchmark
|
||||||
```
|
```
|
||||||
|
|
||||||
该命令将在数据库 test 下面自动创建一张超级表 meters,该超级表下有 1 万张表,表名为 "d0" 到 "d9999",每张表有 1 万条记录,每条记录有 (ts, current, voltage, phase) 四个字段,时间戳从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:40:09 999",每张表带有标签 location 和 groupId,groupId 被设置为 1 到 10, location 被设置为 "beijing" 或者 "shanghai"。
|
该命令将在数据库 test 下面自动创建一张超级表 meters,该超级表下有 1 万张表,表名为 "d0" 到 "d9999",每张表有 1 万条记录,每条记录有 (ts, current, voltage, phase) 四个字段,时间戳从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:40:09 999",每张表带有标签 location 和 groupId,groupId 被设置为 1 到 10, location 被设置为 "California.SanFrancisco" 或者 "California.LosAngeles"。
|
||||||
|
|
||||||
这条命令很快完成 1 亿条记录的插入。具体时间取决于硬件性能,即使在一台普通的 PC 服务器往往也仅需十几秒。
|
这条命令很快完成 1 亿条记录的插入。具体时间取决于硬件性能,即使在一台普通的 PC 服务器往往也仅需十几秒。
|
||||||
|
|
||||||
|
@ -154,10 +154,10 @@ taos> select count(*) from test.meters;
|
||||||
taos> select avg(current), max(voltage), min(phase) from test.meters;
|
taos> select avg(current), max(voltage), min(phase) from test.meters;
|
||||||
```
|
```
|
||||||
|
|
||||||
查询 location="beijing" 的记录总条数:
|
查询 location="California.SanFrancisco" 的记录总条数:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
taos> select count(*) from test.meters where location="beijing";
|
taos> select count(*) from test.meters where location="California.SanFrancisco";
|
||||||
```
|
```
|
||||||
|
|
||||||
查询 groupId=10 的所有记录的平均值、最大值、最小值等:
|
查询 groupId=10 的所有记录的平均值、最大值、最小值等:
|
||||||
|
|
|
@ -55,10 +55,10 @@ CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAG
|
||||||
TDengine 对每个数据采集点需要独立建表。与标准的关系型数据库一样,一张表有表名,Schema,但除此之外,还可以带有一到多个标签。创建时,需要使用超级表做模板,同时指定标签的具体值。以[表 1](/tdinternal/arch#model_table1)中的智能电表为例,可以使用如下的 SQL 命令建表:
|
TDengine 对每个数据采集点需要独立建表。与标准的关系型数据库一样,一张表有表名,Schema,但除此之外,还可以带有一到多个标签。创建时,需要使用超级表做模板,同时指定标签的具体值。以[表 1](/tdinternal/arch#model_table1)中的智能电表为例,可以使用如下的 SQL 命令建表:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
CREATE TABLE d1001 USING meters TAGS ("Beijing.Chaoyang", 2);
|
CREATE TABLE d1001 USING meters TAGS ("California.SanFrancisco", 2);
|
||||||
```
|
```
|
||||||
|
|
||||||
其中 d1001 是表名,meters 是超级表的表名,后面紧跟标签 Location 的具体标签值 ”Beijing.Chaoyang",标签 groupId 的具体标签值 2。虽然在创建表时,需要指定标签值,但可以事后修改。详细细则请见 [TAOS SQL 的表管理](/taos-sql/table) 章节。
|
其中 d1001 是表名,meters 是超级表的表名,后面紧跟标签 Location 的具体标签值 "California.SanFrancisco",标签 groupId 的具体标签值 2。虽然在创建表时,需要指定标签值,但可以事后修改。详细细则请见 [TAOS SQL 的表管理](/taos-sql/table) 章节。
|
||||||
|
|
||||||
:::warning
|
:::warning
|
||||||
目前 TDengine 没有从技术层面限制使用一个 database (db1) 的超级表作为模板建立另一个 database (db2) 的子表,后续会禁止这种用法,不建议使用这种方法建表。
|
目前 TDengine 没有从技术层面限制使用一个 database (db1) 的超级表作为模板建立另一个 database (db2) 的子表,后续会禁止这种用法,不建议使用这种方法建表。
|
||||||
|
@ -72,10 +72,10 @@ TDengine 建议将数据采集点的全局唯一 ID 作为表名(比如设备序
|
||||||
在某些特殊场景中,用户在写数据时并不确定某个数据采集点的表是否存在,此时可在写入数据时使用自动建表语法来创建不存在的表,若该表已存在则不会建立新表且后面的 USING 语句被忽略。比如:
|
在某些特殊场景中,用户在写数据时并不确定某个数据采集点的表是否存在,此时可在写入数据时使用自动建表语法来创建不存在的表,若该表已存在则不会建立新表且后面的 USING 语句被忽略。比如:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
INSERT INTO d1001 USING meters TAGS ("Beijng.Chaoyang", 2) VALUES (now, 10.2, 219, 0.32);
|
INSERT INTO d1001 USING meters TAGS ("California.SanFrancisco", 2) VALUES (now, 10.2, 219, 0.32);
|
||||||
```
|
```
|
||||||
|
|
||||||
上述 SQL 语句将记录`(now, 10.2, 219, 0.32)`插入表 d1001。如果表 d1001 还未创建,则使用超级表 meters 做模板自动创建,同时打上标签值 `"Beijing.Chaoyang", 2`。
|
上述 SQL 语句将记录`(now, 10.2, 219, 0.32)`插入表 d1001。如果表 d1001 还未创建,则使用超级表 meters 做模板自动创建,同时打上标签值 `"California.SanFrancisco", 2`。
|
||||||
|
|
||||||
关于自动建表的详细语法请参见 [插入记录时自动建表](/taos-sql/insert#插入记录时自动建表) 章节。
|
关于自动建表的详细语法请参见 [插入记录时自动建表](/taos-sql/insert#插入记录时自动建表) 章节。
|
||||||
|
|
||||||
|
|
|
@ -29,7 +29,7 @@ measurement,tag_set field_set timestamp
|
||||||
例如:
|
例如:
|
||||||
|
|
||||||
```
|
```
|
||||||
meters,location=Beijing.Haidian,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611249500
|
meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611249500
|
||||||
```
|
```
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
|
@ -42,7 +42,6 @@ meters,location=Beijing.Haidian,groupid=2 current=13.4,voltage=223,phase=0.29 16
|
||||||
|
|
||||||
要了解更多可参考:[InfluxDB Line 协议官方文档](https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/) 和 [TDengine 无模式写入参考指南](/reference/schemaless/#无模式写入行协议)
|
要了解更多可参考:[InfluxDB Line 协议官方文档](https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/) 和 [TDengine 无模式写入参考指南](/reference/schemaless/#无模式写入行协议)
|
||||||
|
|
||||||
|
|
||||||
## 示例代码
|
## 示例代码
|
||||||
|
|
||||||
<Tabs defaultValue="java" groupId="lang">
|
<Tabs defaultValue="java" groupId="lang">
|
||||||
|
|
|
@ -29,10 +29,10 @@ OpenTSDB 行协议同样采用一行字符串来表示一行数据。OpenTSDB
|
||||||
例如:
|
例如:
|
||||||
|
|
||||||
```txt
|
```txt
|
||||||
meters.current 1648432611250 11.3 location=Beijing.Haidian groupid=3
|
meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3
|
||||||
```
|
```
|
||||||
|
|
||||||
参考[OpenTSDB Telnet API文档](http://opentsdb.net/docs/build/html/api_telnet/put.html)。
|
参考[OpenTSDB Telnet API 文档](http://opentsdb.net/docs/build/html/api_telnet/put.html)。
|
||||||
|
|
||||||
## 示例代码
|
## 示例代码
|
||||||
|
|
||||||
|
@ -76,9 +76,9 @@ Query OK, 2 row(s) in set (0.002544s)
|
||||||
taos> select tbname, * from `meters.current`;
|
taos> select tbname, * from `meters.current`;
|
||||||
tbname | ts | value | groupid | location |
|
tbname | ts | value | groupid | location |
|
||||||
==================================================================================================================================
|
==================================================================================================================================
|
||||||
t_0e7bcfa21a02331c06764f275... | 2022-03-28 09:56:51.249 | 10.800000000 | 3 | Beijing.Haidian |
|
t_0e7bcfa21a02331c06764f275... | 2022-03-28 09:56:51.249 | 10.800000000 | 3 | California.LosAngeles |
|
||||||
t_0e7bcfa21a02331c06764f275... | 2022-03-28 09:56:51.250 | 11.300000000 | 3 | Beijing.Haidian |
|
t_0e7bcfa21a02331c06764f275... | 2022-03-28 09:56:51.250 | 11.300000000 | 3 | California.LosAngeles |
|
||||||
t_7e7b26dd860280242c6492a16... | 2022-03-28 09:56:51.249 | 10.300000000 | 2 | Beijing.Chaoyang |
|
t_7e7b26dd860280242c6492a16... | 2022-03-28 09:56:51.249 | 10.300000000 | 2 | California.SanFrancisco |
|
||||||
t_7e7b26dd860280242c6492a16... | 2022-03-28 09:56:51.250 | 12.600000000 | 2 | Beijing.Chaoyang |
|
t_7e7b26dd860280242c6492a16... | 2022-03-28 09:56:51.250 | 12.600000000 | 2 | California.SanFrancisco |
|
||||||
Query OK, 4 row(s) in set (0.005399s)
|
Query OK, 4 row(s) in set (0.005399s)
|
||||||
```
|
```
|
||||||
|
|
|
@ -19,33 +19,33 @@ OpenTSDB JSON 格式协议采用一个 JSON 字符串表示一行或多行数据
|
||||||
|
|
||||||
```json
|
```json
|
||||||
[
|
[
|
||||||
{
|
{
|
||||||
"metric": "sys.cpu.nice",
|
"metric": "sys.cpu.nice",
|
||||||
"timestamp": 1346846400,
|
"timestamp": 1346846400,
|
||||||
"value": 18,
|
"value": 18,
|
||||||
"tags": {
|
"tags": {
|
||||||
"host": "web01",
|
"host": "web01",
|
||||||
"dc": "lga"
|
"dc": "lga"
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"metric": "sys.cpu.nice",
|
|
||||||
"timestamp": 1346846400,
|
|
||||||
"value": 9,
|
|
||||||
"tags": {
|
|
||||||
"host": "web02",
|
|
||||||
"dc": "lga"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"metric": "sys.cpu.nice",
|
||||||
|
"timestamp": 1346846400,
|
||||||
|
"value": 9,
|
||||||
|
"tags": {
|
||||||
|
"host": "web02",
|
||||||
|
"dc": "lga"
|
||||||
|
}
|
||||||
|
}
|
||||||
]
|
]
|
||||||
```
|
```
|
||||||
|
|
||||||
与 OpenTSDB 行协议类似, metric 将作为超级表名, timestamp 表示时间戳,value 表示度量值, tags 表示标签集。
|
与 OpenTSDB 行协议类似, metric 将作为超级表名, timestamp 表示时间戳,value 表示度量值, tags 表示标签集。
|
||||||
|
|
||||||
|
参考[OpenTSDB HTTP API 文档](http://opentsdb.net/docs/build/html/api_http/put.html)。
|
||||||
参考[OpenTSDB HTTP API文档](http://opentsdb.net/docs/build/html/api_http/put.html)。
|
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
|
|
||||||
- 对于 JSON 格式协议,TDengine 并不会自动把所有标签转成 nchar 类型, 字符串将将转为 nchar 类型, 数值将同样转换为 double 类型。
|
- 对于 JSON 格式协议,TDengine 并不会自动把所有标签转成 nchar 类型, 字符串将将转为 nchar 类型, 数值将同样转换为 double 类型。
|
||||||
- TDengine 只接收 JSON **数组格式**的字符串,即使一行数据也需要转换成数组形式。
|
- TDengine 只接收 JSON **数组格式**的字符串,即使一行数据也需要转换成数组形式。
|
||||||
|
|
||||||
|
@ -93,7 +93,7 @@ Query OK, 2 row(s) in set (0.001954s)
|
||||||
taos> select * from `meters.current`;
|
taos> select * from `meters.current`;
|
||||||
ts | value | groupid | location |
|
ts | value | groupid | location |
|
||||||
===================================================================================================================
|
===================================================================================================================
|
||||||
2022-03-28 09:56:51.249 | 10.300000000 | 2.000000000 | Beijing.Chaoyang |
|
2022-03-28 09:56:51.249 | 10.300000000 | 2.000000000 | California.SanFrancisco |
|
||||||
2022-03-28 09:56:51.250 | 12.600000000 | 2.000000000 | Beijing.Chaoyang |
|
2022-03-28 09:56:51.250 | 12.600000000 | 2.000000000 | California.SanFrancisco |
|
||||||
Query OK, 2 row(s) in set (0.004076s)
|
Query OK, 2 row(s) in set (0.004076s)
|
||||||
```
|
```
|
||||||
|
|
|
@ -50,14 +50,14 @@ Query OK, 2 row(s) in set (0.001100s)
|
||||||
|
|
||||||
### 示例一
|
### 示例一
|
||||||
|
|
||||||
在 TAOS Shell,查找北京所有智能电表采集的电压平均值,并按照 location 分组。
|
在 TAOS Shell,查找加利福尼亚州所有智能电表采集的电压平均值,并按照 location 分组。
|
||||||
|
|
||||||
```
|
```
|
||||||
taos> SELECT AVG(voltage) FROM meters GROUP BY location;
|
taos> SELECT AVG(voltage) FROM meters GROUP BY location;
|
||||||
avg(voltage) | location |
|
avg(voltage) | location |
|
||||||
=============================================================
|
=============================================================
|
||||||
222.000000000 | Beijing.Haidian |
|
222.000000000 | California.LosAngeles |
|
||||||
219.200000000 | Beijing.Chaoyang |
|
219.200000000 | California.SanFrancisco |
|
||||||
Query OK, 2 row(s) in set (0.002136s)
|
Query OK, 2 row(s) in set (0.002136s)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -88,10 +88,10 @@ taos> SELECT sum(current) FROM d1001 INTERVAL(10s);
|
||||||
Query OK, 2 row(s) in set (0.000883s)
|
Query OK, 2 row(s) in set (0.000883s)
|
||||||
```
|
```
|
||||||
|
|
||||||
降采样操作也适用于超级表,比如:将北京所有智能电表采集的电流值每秒钟求和
|
降采样操作也适用于超级表,比如:将加利福尼亚州所有智能电表采集的电流值每秒钟求和
|
||||||
|
|
||||||
```
|
```
|
||||||
taos> SELECT SUM(current) FROM meters where location like "Beijing%" INTERVAL(1s);
|
taos> SELECT SUM(current) FROM meters where location like "California%" INTERVAL(1s);
|
||||||
ts | sum(current) |
|
ts | sum(current) |
|
||||||
======================================================
|
======================================================
|
||||||
2018-10-03 14:38:04.000 | 10.199999809 |
|
2018-10-03 14:38:04.000 | 10.199999809 |
|
||||||
|
|
|
@ -34,8 +34,8 @@ SLIDING: 连续查询的时间窗口向前滑动的时间间隔
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
create table meters (ts timestamp, current float, voltage int, phase float) tags (location binary(64), groupId int);
|
create table meters (ts timestamp, current float, voltage int, phase float) tags (location binary(64), groupId int);
|
||||||
create table D1001 using meters tags ("Beijing.Chaoyang", 2);
|
create table D1001 using meters tags ("California.SanFrancisco", 2);
|
||||||
create table D1002 using meters tags ("Beijing.Haidian", 2);
|
create table D1002 using meters tags ("California.LosAngeles", 2);
|
||||||
...
|
...
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -184,8 +184,8 @@ taos> use power;
|
||||||
# create super table "meters"
|
# create super table "meters"
|
||||||
taos> create table meters(ts timestamp, current float, voltage int, phase int) tags(location binary(64), groupId int);
|
taos> create table meters(ts timestamp, current float, voltage int, phase int) tags(location binary(64), groupId int);
|
||||||
# create tabes using the schema defined by super table "meters"
|
# create tabes using the schema defined by super table "meters"
|
||||||
taos> create table d1001 using meters tags ("Beijing.Chaoyang", 2);
|
taos> create table d1001 using meters tags ("California.SanFrancisco", 2);
|
||||||
taos> create table d1002 using meters tags ("Beijing.Haidian", 2);
|
taos> create table d1002 using meters tags ("California.LosAngeles", 2);
|
||||||
# insert some rows
|
# insert some rows
|
||||||
taos> insert into d1001 values("2020-08-15 12:00:00.000", 12, 220, 1),("2020-08-15 12:10:00.000", 12.3, 220, 2),("2020-08-15 12:20:00.000", 12.2, 220, 1);
|
taos> insert into d1001 values("2020-08-15 12:00:00.000", 12, 220, 1),("2020-08-15 12:10:00.000", 12.3, 220, 2),("2020-08-15 12:20:00.000", 12.2, 220, 1);
|
||||||
taos> insert into d1002 values("2020-08-15 12:00:00.000", 9.9, 220, 1),("2020-08-15 12:10:00.000", 10.3, 220, 1),("2020-08-15 12:20:00.000", 11.2, 220, 1);
|
taos> insert into d1002 values("2020-08-15 12:00:00.000", 9.9, 220, 1),("2020-08-15 12:10:00.000", 10.3, 220, 1),("2020-08-15 12:20:00.000", 11.2, 220, 1);
|
||||||
|
@ -193,27 +193,28 @@ taos> insert into d1002 values("2020-08-15 12:00:00.000", 9.9, 220, 1),("2020-08
|
||||||
taos> select * from meters where current > 10;
|
taos> select * from meters where current > 10;
|
||||||
ts | current | voltage | phase | location | groupid |
|
ts | current | voltage | phase | location | groupid |
|
||||||
===========================================================================================================
|
===========================================================================================================
|
||||||
2020-08-15 12:10:00.000 | 10.30000 | 220 | 1 | Beijing.Haidian | 2 |
|
2020-08-15 12:10:00.000 | 10.30000 | 220 | 1 | California.LosAngeles | 2 |
|
||||||
2020-08-15 12:20:00.000 | 11.20000 | 220 | 1 | Beijing.Haidian | 2 |
|
2020-08-15 12:20:00.000 | 11.20000 | 220 | 1 | California.LosAngeles | 2 |
|
||||||
2020-08-15 12:00:00.000 | 12.00000 | 220 | 1 | Beijing.Chaoyang | 2 |
|
2020-08-15 12:00:00.000 | 12.00000 | 220 | 1 | California.SanFrancisco | 2 |
|
||||||
2020-08-15 12:10:00.000 | 12.30000 | 220 | 2 | Beijing.Chaoyang | 2 |
|
2020-08-15 12:10:00.000 | 12.30000 | 220 | 2 | California.SanFrancisco | 2 |
|
||||||
2020-08-15 12:20:00.000 | 12.20000 | 220 | 1 | Beijing.Chaoyang | 2 |
|
2020-08-15 12:20:00.000 | 12.20000 | 220 | 1 | California.SanFrancisco | 2 |
|
||||||
Query OK, 5 row(s) in set (0.004896s)
|
Query OK, 5 row(s) in set (0.004896s)
|
||||||
```
|
```
|
||||||
|
|
||||||
### 示例代码
|
### 示例代码
|
||||||
|
|
||||||
<Tabs defaultValue="java" groupId="lang">
|
<Tabs defaultValue="java" groupId="lang">
|
||||||
<TabItem label="Java" value="java">
|
<TabItem label="Java" value="java">
|
||||||
<Java/>
|
<Java />
|
||||||
</TabItem>
|
</TabItem>
|
||||||
<TabItem label="Python" value="Python">
|
<TabItem label="Python" value="Python">
|
||||||
<Python/>
|
<Python />
|
||||||
</TabItem>
|
</TabItem>
|
||||||
{/* <TabItem label="Go" value="go">
|
{/* <TabItem label="Go" value="go">
|
||||||
<Go/>
|
<Go/>
|
||||||
</TabItem> */}
|
</TabItem> */}
|
||||||
<TabItem label="Rust" value="rust">
|
<TabItem label="Rust" value="rust">
|
||||||
<Rust/>
|
<Rust />
|
||||||
</TabItem>
|
</TabItem>
|
||||||
{/* <TabItem label="Node.js" value="nodejs">
|
{/* <TabItem label="Node.js" value="nodejs">
|
||||||
<Node/>
|
<Node/>
|
||||||
|
@ -222,20 +223,20 @@ Query OK, 5 row(s) in set (0.004896s)
|
||||||
<CSharp/>
|
<CSharp/>
|
||||||
</TabItem> */}
|
</TabItem> */}
|
||||||
<TabItem label="C" value="c">
|
<TabItem label="C" value="c">
|
||||||
<CDemo/>
|
<CDemo />
|
||||||
</TabItem>
|
</TabItem>
|
||||||
</Tabs>
|
</Tabs>
|
||||||
|
|
||||||
### 运行示例程序
|
### 运行示例程序
|
||||||
|
|
||||||
示例程序会先消费符合查询条件的所有历史数据:
|
示例程序会先消费符合查询条件的所有历史数据:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
ts: 1597464000000 current: 12.0 voltage: 220 phase: 1 location: Beijing.Chaoyang groupid : 2
|
ts: 1597464000000 current: 12.0 voltage: 220 phase: 1 location: California.SanFrancisco groupid : 2
|
||||||
ts: 1597464600000 current: 12.3 voltage: 220 phase: 2 location: Beijing.Chaoyang groupid : 2
|
ts: 1597464600000 current: 12.3 voltage: 220 phase: 2 location: California.SanFrancisco groupid : 2
|
||||||
ts: 1597465200000 current: 12.2 voltage: 220 phase: 1 location: Beijing.Chaoyang groupid : 2
|
ts: 1597465200000 current: 12.2 voltage: 220 phase: 1 location: California.SanFrancisco groupid : 2
|
||||||
ts: 1597464600000 current: 10.3 voltage: 220 phase: 1 location: Beijing.Haidian groupid : 2
|
ts: 1597464600000 current: 10.3 voltage: 220 phase: 1 location: California.LosAngeles groupid : 2
|
||||||
ts: 1597465200000 current: 11.2 voltage: 220 phase: 1 location: Beijing.Haidian groupid : 2
|
ts: 1597465200000 current: 11.2 voltage: 220 phase: 1 location: California.LosAngeles groupid : 2
|
||||||
```
|
```
|
||||||
|
|
||||||
接着,使用 TDengine CLI 向表中新增一条数据:
|
接着,使用 TDengine CLI 向表中新增一条数据:
|
||||||
|
@ -249,5 +250,5 @@ taos> insert into d1001 values(now, 12.4, 220, 1);
|
||||||
因为这条数据的电流大于 10A,示例程序会将其消费:
|
因为这条数据的电流大于 10A,示例程序会将其消费:
|
||||||
|
|
||||||
```
|
```
|
||||||
ts: 1651146662805 current: 12.4 voltage: 220 phase: 1 location: Beijing.Chaoyang groupid: 2
|
ts: 1651146662805 current: 12.4 voltage: 220 phase: 1 location: California.SanFrancisco groupid: 2
|
||||||
```
|
```
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
---
|
---
|
||||||
sidebar_label: 缓存
|
sidebar_label: 缓存
|
||||||
title: 缓存
|
title: 缓存
|
||||||
description: "提供写驱动的缓存管理机制,将每个表最近写入的一条记录持续保存在缓存中,可以提供高性能的最近状态查询。"
|
description: "提供写驱动的缓存管理机制,将每个表最近写入的一条记录持续保存在缓存中,可以提供高性能的最近状态查询。"
|
||||||
---
|
---
|
||||||
|
|
||||||
|
@ -15,7 +15,7 @@ TDengine 将内存池按块划分进行管理,数据在内存块里是以行
|
||||||
你可以通过函数 last_row() 快速获取一张表或一张超级表的最后一条记录,这样很便于在大屏显示各设备的实时状态或采集值。例如:
|
你可以通过函数 last_row() 快速获取一张表或一张超级表的最后一条记录,这样很便于在大屏显示各设备的实时状态或采集值。例如:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
select last_row(voltage) from meters where location='Beijing.Chaoyang';
|
select last_row(voltage) from meters where location='California.SanFrancisco';
|
||||||
```
|
```
|
||||||
|
|
||||||
该 SQL 语句将获取所有位于北京朝阳区的电表最后记录的电压值。
|
该 SQL 语句将获取所有位于加利福尼亚州旧金山市的电表最后记录的电压值。
|
||||||
|
|
|
@ -67,7 +67,7 @@ INSERT INTO d1001 VALUES ('2021-07-13 14:06:34.630', 10.2, 219, 0.32) ('2021-07-
|
||||||
如果用户在写数据时并不确定某个表是否存在,此时可以在写入数据时使用自动建表语法来创建不存在的表,若该表已存在则不会建立新表。自动建表时,要求必须以超级表为模板,并写明数据表的 TAGS 取值。例如:
|
如果用户在写数据时并不确定某个表是否存在,此时可以在写入数据时使用自动建表语法来创建不存在的表,若该表已存在则不会建立新表。自动建表时,要求必须以超级表为模板,并写明数据表的 TAGS 取值。例如:
|
||||||
|
|
||||||
```
|
```
|
||||||
INSERT INTO d21001 USING meters TAGS ('Beijing.Chaoyang', 2) VALUES ('2021-07-13 14:06:32.272', 10.2, 219, 0.32);
|
INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) VALUES ('2021-07-13 14:06:32.272', 10.2, 219, 0.32);
|
||||||
```
|
```
|
||||||
|
|
||||||
也可以在自动建表时,只是指定部分 TAGS 列的取值,未被指定的 TAGS 列将置为 NULL。例如:
|
也可以在自动建表时,只是指定部分 TAGS 列的取值,未被指定的 TAGS 列将置为 NULL。例如:
|
||||||
|
@ -79,7 +79,7 @@ INSERT INTO d21001 USING meters (groupId) TAGS (2) VALUES ('2021-07-13 14:06:33.
|
||||||
自动建表语法也支持在一条语句中向多个表插入记录。例如:
|
自动建表语法也支持在一条语句中向多个表插入记录。例如:
|
||||||
|
|
||||||
```
|
```
|
||||||
INSERT INTO d21001 USING meters TAGS ('Beijing.Chaoyang', 2) VALUES ('2021-07-13 14:06:34.630', 10.2, 219, 0.32) ('2021-07-13 14:06:35.779', 10.15, 217, 0.33)
|
INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) VALUES ('2021-07-13 14:06:34.630', 10.2, 219, 0.32) ('2021-07-13 14:06:35.779', 10.15, 217, 0.33)
|
||||||
d21002 USING meters (groupId) TAGS (2) VALUES ('2021-07-13 14:06:34.255', 10.15, 217, 0.33)
|
d21002 USING meters (groupId) TAGS (2) VALUES ('2021-07-13 14:06:34.255', 10.15, 217, 0.33)
|
||||||
d21003 USING meters (groupId) TAGS (2) (ts, current, phase) VALUES ('2021-07-13 14:06:34.255', 10.27, 0.31);
|
d21003 USING meters (groupId) TAGS (2) (ts, current, phase) VALUES ('2021-07-13 14:06:34.255', 10.27, 0.31);
|
||||||
```
|
```
|
||||||
|
@ -108,13 +108,13 @@ INSERT INTO d1001 FILE '/tmp/csvfile.csv';
|
||||||
从 2.1.5.0 版本开始,支持在插入来自 CSV 文件的数据时,以超级表为模板来自动创建不存在的数据表。例如:
|
从 2.1.5.0 版本开始,支持在插入来自 CSV 文件的数据时,以超级表为模板来自动创建不存在的数据表。例如:
|
||||||
|
|
||||||
```
|
```
|
||||||
INSERT INTO d21001 USING meters TAGS ('Beijing.Chaoyang', 2) FILE '/tmp/csvfile.csv';
|
INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) FILE '/tmp/csvfile.csv';
|
||||||
```
|
```
|
||||||
|
|
||||||
也可以在一条语句中向多个表以自动建表的方式插入记录。例如:
|
也可以在一条语句中向多个表以自动建表的方式插入记录。例如:
|
||||||
|
|
||||||
```
|
```
|
||||||
INSERT INTO d21001 USING meters TAGS ('Beijing.Chaoyang', 2) FILE '/tmp/csvfile_21001.csv'
|
INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) FILE '/tmp/csvfile_21001.csv'
|
||||||
d21002 USING meters (groupId) TAGS (2) FILE '/tmp/csvfile_21002.csv';
|
d21002 USING meters (groupId) TAGS (2) FILE '/tmp/csvfile_21002.csv';
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -137,7 +137,7 @@ Query OK, 1 row(s) in set (0.001029s)
|
||||||
taos> SHOW TABLES;
|
taos> SHOW TABLES;
|
||||||
Query OK, 0 row(s) in set (0.000946s)
|
Query OK, 0 row(s) in set (0.000946s)
|
||||||
|
|
||||||
taos> INSERT INTO d1001 USING meters TAGS('Beijing.Chaoyang', 2) VALUES('a');
|
taos> INSERT INTO d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES('a');
|
||||||
|
|
||||||
DB error: invalid SQL: 'a' (invalid timestamp) (0.039494s)
|
DB error: invalid SQL: 'a' (invalid timestamp) (0.039494s)
|
||||||
|
|
||||||
|
|
|
@ -40,15 +40,15 @@ Query OK, 3 row(s) in set (0.001165s)
|
||||||
taos> SELECT * FROM meters;
|
taos> SELECT * FROM meters;
|
||||||
ts | current | voltage | phase | location | groupid |
|
ts | current | voltage | phase | location | groupid |
|
||||||
=====================================================================================================================================
|
=====================================================================================================================================
|
||||||
2018-10-03 14:38:05.500 | 11.80000 | 221 | 0.28000 | Beijing.Haidian | 2 |
|
2018-10-03 14:38:05.500 | 11.80000 | 221 | 0.28000 | California.LosAngeles | 2 |
|
||||||
2018-10-03 14:38:16.600 | 13.40000 | 223 | 0.29000 | Beijing.Haidian | 2 |
|
2018-10-03 14:38:16.600 | 13.40000 | 223 | 0.29000 | California.LosAngeles | 2 |
|
||||||
2018-10-03 14:38:05.000 | 10.80000 | 223 | 0.29000 | Beijing.Haidian | 3 |
|
2018-10-03 14:38:05.000 | 10.80000 | 223 | 0.29000 | California.LosAngeles | 3 |
|
||||||
2018-10-03 14:38:06.500 | 11.50000 | 221 | 0.35000 | Beijing.Haidian | 3 |
|
2018-10-03 14:38:06.500 | 11.50000 | 221 | 0.35000 | California.LosAngeles | 3 |
|
||||||
2018-10-03 14:38:04.000 | 10.20000 | 220 | 0.23000 | Beijing.Chaoyang | 3 |
|
2018-10-03 14:38:04.000 | 10.20000 | 220 | 0.23000 | California.SanFrancisco | 3 |
|
||||||
2018-10-03 14:38:16.650 | 10.30000 | 218 | 0.25000 | Beijing.Chaoyang | 3 |
|
2018-10-03 14:38:16.650 | 10.30000 | 218 | 0.25000 | California.SanFrancisco | 3 |
|
||||||
2018-10-03 14:38:05.000 | 10.30000 | 219 | 0.31000 | Beijing.Chaoyang | 2 |
|
2018-10-03 14:38:05.000 | 10.30000 | 219 | 0.31000 | California.SanFrancisco | 2 |
|
||||||
2018-10-03 14:38:15.000 | 12.60000 | 218 | 0.33000 | Beijing.Chaoyang | 2 |
|
2018-10-03 14:38:15.000 | 12.60000 | 218 | 0.33000 | California.SanFrancisco | 2 |
|
||||||
2018-10-03 14:38:16.800 | 12.30000 | 221 | 0.31000 | Beijing.Chaoyang | 2 |
|
2018-10-03 14:38:16.800 | 12.30000 | 221 | 0.31000 | California.SanFrancisco | 2 |
|
||||||
Query OK, 9 row(s) in set (0.002022s)
|
Query OK, 9 row(s) in set (0.002022s)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -104,8 +104,8 @@ Query OK, 1 row(s) in set (0.000849s)
|
||||||
taos> SELECT location, groupid, current FROM d1001 LIMIT 2;
|
taos> SELECT location, groupid, current FROM d1001 LIMIT 2;
|
||||||
location | groupid | current |
|
location | groupid | current |
|
||||||
======================================================================
|
======================================================================
|
||||||
Beijing.Chaoyang | 2 | 10.30000 |
|
California.SanFrancisco | 2 | 10.30000 |
|
||||||
Beijing.Chaoyang | 2 | 12.60000 |
|
California.SanFrancisco | 2 | 12.60000 |
|
||||||
Query OK, 2 row(s) in set (0.003112s)
|
Query OK, 2 row(s) in set (0.003112s)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -284,10 +284,10 @@ SELECT COUNT(TBNAME) FROM meters;
|
||||||
taos> SELECT TBNAME, location FROM meters;
|
taos> SELECT TBNAME, location FROM meters;
|
||||||
tbname | location |
|
tbname | location |
|
||||||
==================================================================
|
==================================================================
|
||||||
d1004 | Beijing.Haidian |
|
d1004 | California.LosAngeles |
|
||||||
d1003 | Beijing.Haidian |
|
d1003 | California.LosAngeles |
|
||||||
d1002 | Beijing.Chaoyang |
|
d1002 | California.SanFrancisco |
|
||||||
d1001 | Beijing.Chaoyang |
|
d1001 | California.SanFrancisco |
|
||||||
Query OK, 4 row(s) in set (0.000881s)
|
Query OK, 4 row(s) in set (0.000881s)
|
||||||
|
|
||||||
taos> SELECT COUNT(tbname) FROM meters WHERE groupId > 2;
|
taos> SELECT COUNT(tbname) FROM meters WHERE groupId > 2;
|
||||||
|
@ -327,15 +327,15 @@ Query OK, 1 row(s) in set (0.001091s)
|
||||||
|
|
||||||
- <\> 算子也可以写为 != ,请注意,这个算子不能用于数据表第一列的 timestamp 字段。
|
- <\> 算子也可以写为 != ,请注意,这个算子不能用于数据表第一列的 timestamp 字段。
|
||||||
- like 算子使用通配符字符串进行匹配检查。
|
- like 算子使用通配符字符串进行匹配检查。
|
||||||
- 在通配符字符串中:'%'(百分号)匹配 0 到任意个字符;'\_'(下划线)匹配单个任意 ASCII 字符。
|
- 在通配符字符串中:'%'(百分号)匹配 0 到任意个字符;'\_'(下划线)匹配单个任意 ASCII 字符。
|
||||||
- 如果希望匹配字符串中原本就带有的 \_(下划线)字符,那么可以在通配符字符串中写作 `\_`,也即加一个反斜线来进行转义。(从 2.2.0.0 版本开始支持)
|
- 如果希望匹配字符串中原本就带有的 \_(下划线)字符,那么可以在通配符字符串中写作 `\_`,也即加一个反斜线来进行转义。(从 2.2.0.0 版本开始支持)
|
||||||
- 通配符字符串最长不能超过 20 字节。(从 2.1.6.1 版本开始,通配符字符串的长度放宽到了 100 字节,并可以通过 taos.cfg 中的 maxWildCardsLength 参数来配置这一长度限制。但不建议使用太长的通配符字符串,将有可能严重影响 LIKE 操作的执行性能。)
|
- 通配符字符串最长不能超过 20 字节。(从 2.1.6.1 版本开始,通配符字符串的长度放宽到了 100 字节,并可以通过 taos.cfg 中的 maxWildCardsLength 参数来配置这一长度限制。但不建议使用太长的通配符字符串,将有可能严重影响 LIKE 操作的执行性能。)
|
||||||
- 同时进行多个字段的范围过滤,需要使用关键词 AND 来连接不同的查询条件,暂不支持 OR 连接的不同列之间的查询过滤条件。
|
- 同时进行多个字段的范围过滤,需要使用关键词 AND 来连接不同的查询条件,暂不支持 OR 连接的不同列之间的查询过滤条件。
|
||||||
- 从 2.3.0.0 版本开始,已支持完整的同一列和/或不同列间的 AND/OR 运算。
|
- 从 2.3.0.0 版本开始,已支持完整的同一列和/或不同列间的 AND/OR 运算。
|
||||||
- 针对单一字段的过滤,如果是时间过滤条件,则一条语句中只支持设定一个;但针对其他的(普通)列或标签列,则可以使用 `OR` 关键字进行组合条件的查询过滤。例如: `((value > 20 AND value < 30) OR (value < 12))`。
|
- 针对单一字段的过滤,如果是时间过滤条件,则一条语句中只支持设定一个;但针对其他的(普通)列或标签列,则可以使用 `OR` 关键字进行组合条件的查询过滤。例如: `((value > 20 AND value < 30) OR (value < 12))`。
|
||||||
- 从 2.3.0.0 版本开始,允许使用多个时间过滤条件,但首列时间戳的过滤运算结果只能包含一个区间。
|
- 从 2.3.0.0 版本开始,允许使用多个时间过滤条件,但首列时间戳的过滤运算结果只能包含一个区间。
|
||||||
- 从 2.0.17.0 版本开始,条件过滤开始支持 BETWEEN AND 语法,例如 `WHERE col2 BETWEEN 1.5 AND 3.25` 表示查询条件为“1.5 ≤ col2 ≤ 3.25”。
|
- 从 2.0.17.0 版本开始,条件过滤开始支持 BETWEEN AND 语法,例如 `WHERE col2 BETWEEN 1.5 AND 3.25` 表示查询条件为“1.5 ≤ col2 ≤ 3.25”。
|
||||||
- 从 2.1.4.0 版本开始,条件过滤开始支持 IN 算子,例如 `WHERE city IN ('Beijing', 'Shanghai')`。说明:BOOL 类型写作 `{true, false}` 或 `{0, 1}` 均可,但不能写作 0、1 之外的整数;FLOAT 和 DOUBLE 类型会受到浮点数精度影响,集合内的值在精度范围内认为和数据行的值完全相等才能匹配成功;TIMESTAMP 类型支持非主键的列。
|
- 从 2.1.4.0 版本开始,条件过滤开始支持 IN 算子,例如 `WHERE city IN ('California.SanFrancisco', 'California.SanDieo')`。说明:BOOL 类型写作 `{true, false}` 或 `{0, 1}` 均可,但不能写作 0、1 之外的整数;FLOAT 和 DOUBLE 类型会受到浮点数精度影响,集合内的值在精度范围内认为和数据行的值完全相等才能匹配成功;TIMESTAMP 类型支持非主键的列。
|
||||||
- 从 2.3.0.0 版本开始,条件过滤开始支持正则表达式,关键字 match/nmatch,不区分大小写。
|
- 从 2.3.0.0 版本开始,条件过滤开始支持正则表达式,关键字 match/nmatch,不区分大小写。
|
||||||
|
|
||||||
## 正则表达式过滤
|
## 正则表达式过滤
|
||||||
|
@ -380,7 +380,7 @@ WHERE t1.ts = t2.ts AND t1.deviceid = t2.deviceid AND t1.status=0;
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
|
|
||||||
JOIN语句存在如下限制要求:
|
JOIN 语句存在如下限制要求:
|
||||||
|
|
||||||
- 参与一条语句中 JOIN 操作的表/超级表最多可以有 10 个。
|
- 参与一条语句中 JOIN 操作的表/超级表最多可以有 10 个。
|
||||||
- 在包含 JOIN 操作的查询语句中不支持 FILL。
|
- 在包含 JOIN 操作的查询语句中不支持 FILL。
|
||||||
|
@ -409,13 +409,13 @@ SELECT ... FROM (SELECT ... FROM ...) ...;
|
||||||
- 在内层和外层查询中,都支持普通的表间/超级表间 JOIN。内层查询的计算结果也可以再参与数据子表的 JOIN 操作。
|
- 在内层和外层查询中,都支持普通的表间/超级表间 JOIN。内层查询的计算结果也可以再参与数据子表的 JOIN 操作。
|
||||||
- 目前内层查询、外层查询均不支持 UNION 操作。
|
- 目前内层查询、外层查询均不支持 UNION 操作。
|
||||||
- 内层查询支持的功能特性与非嵌套的查询语句能力是一致的。
|
- 内层查询支持的功能特性与非嵌套的查询语句能力是一致的。
|
||||||
- 内层查询的 ORDER BY 子句一般没有意义,建议避免这样的写法以免无谓的资源消耗。
|
- 内层查询的 ORDER BY 子句一般没有意义,建议避免这样的写法以免无谓的资源消耗。
|
||||||
- 与非嵌套的查询语句相比,外层查询所能支持的功能特性存在如下限制:
|
- 与非嵌套的查询语句相比,外层查询所能支持的功能特性存在如下限制:
|
||||||
- 计算函数部分:
|
- 计算函数部分:
|
||||||
- 如果内层查询的结果数据未提供时间戳,那么计算过程依赖时间戳的函数在外层会无法正常工作。例如:TOP, BOTTOM, FIRST, LAST, DIFF。
|
- 如果内层查询的结果数据未提供时间戳,那么计算过程依赖时间戳的函数在外层会无法正常工作。例如:TOP, BOTTOM, FIRST, LAST, DIFF。
|
||||||
- 计算过程需要两遍扫描的函数,在外层查询中无法正常工作。例如:此类函数包括:STDDEV, PERCENTILE。
|
- 计算过程需要两遍扫描的函数,在外层查询中无法正常工作。例如:此类函数包括:STDDEV, PERCENTILE。
|
||||||
- 外层查询中不支持 IN 算子,但在内层中可以使用。
|
- 外层查询中不支持 IN 算子,但在内层中可以使用。
|
||||||
- 外层查询不支持 GROUP BY。
|
- 外层查询不支持 GROUP BY。
|
||||||
|
|
||||||
:::
|
:::
|
||||||
|
|
||||||
|
|
|
@ -208,10 +208,10 @@ url 中的配置参数如下:
|
||||||
- 与原生连接方式不同,REST 接口是无状态的。在使用 JDBC REST 连接时,需要在 SQL 中指定表、超级表的数据库名称。例如:
|
- 与原生连接方式不同,REST 接口是无状态的。在使用 JDBC REST 连接时,需要在 SQL 中指定表、超级表的数据库名称。例如:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('beijing') VALUES(now, 24.6);
|
INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('California.SanFrancisco') VALUES(now, 24.6);
|
||||||
```
|
```
|
||||||
|
|
||||||
- 从 taos-jdbcdriver-2.0.36 和 TDengine 2.2.0.0 版本开始,如果在 url 中指定了 dbname,那么,JDBC REST 连接会默认使用/rest/sql/dbname 作为 restful 请求的 url,在 SQL 中不需要指定 dbname。例如:url 为 jdbc:TAOS-RS://127.0.0.1:6041/test,那么,可以执行 sql:insert into t1 using weather(ts, temperature) tags('beijing') values(now, 24.6);
|
- 从 taos-jdbcdriver-2.0.36 和 TDengine 2.2.0.0 版本开始,如果在 url 中指定了 dbname,那么,JDBC REST 连接会默认使用/rest/sql/dbname 作为 restful 请求的 url,在 SQL 中不需要指定 dbname。例如:url 为 jdbc:TAOS-RS://127.0.0.1:6041/test,那么,可以执行 sql:insert into t1 using weather(ts, temperature) tags('California.SanFrancisco') values(now, 24.6);
|
||||||
|
|
||||||
:::
|
:::
|
||||||
|
|
||||||
|
@ -563,7 +563,7 @@ public class ParameterBindingDemo {
|
||||||
// set table name
|
// set table name
|
||||||
pstmt.setTableName("t5_" + i);
|
pstmt.setTableName("t5_" + i);
|
||||||
// set tags
|
// set tags
|
||||||
pstmt.setTagNString(0, "北京-abc");
|
pstmt.setTagNString(0, "California.SanFrancisco");
|
||||||
|
|
||||||
// set columns
|
// set columns
|
||||||
ArrayList<Long> tsList = new ArrayList<>();
|
ArrayList<Long> tsList = new ArrayList<>();
|
||||||
|
@ -574,7 +574,7 @@ public class ParameterBindingDemo {
|
||||||
|
|
||||||
ArrayList<String> f1List = new ArrayList<>();
|
ArrayList<String> f1List = new ArrayList<>();
|
||||||
for (int j = 0; j < numOfRow; j++) {
|
for (int j = 0; j < numOfRow; j++) {
|
||||||
f1List.add("北京-abc");
|
f1List.add("California.LosAngeles");
|
||||||
}
|
}
|
||||||
pstmt.setNString(1, f1List, BINARY_COLUMN_SIZE);
|
pstmt.setNString(1, f1List, BINARY_COLUMN_SIZE);
|
||||||
|
|
||||||
|
@ -633,7 +633,7 @@ public class SchemalessInsertTest {
|
||||||
private static final String host = "127.0.0.1";
|
private static final String host = "127.0.0.1";
|
||||||
private static final String lineDemo = "st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000";
|
private static final String lineDemo = "st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000";
|
||||||
private static final String telnetDemo = "stb0_0 1626006833 4 host=host0 interface=eth0";
|
private static final String telnetDemo = "stb0_0 1626006833 4 host=host0 interface=eth0";
|
||||||
private static final String jsonDemo = "{\"metric\": \"meter_current\",\"timestamp\": 1346846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"Beijing\", \"id\": \"d1001\"}}";
|
private static final String jsonDemo = "{\"metric\": \"meter_current\",\"timestamp\": 1346846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}";
|
||||||
|
|
||||||
public static void main(String[] args) throws SQLException {
|
public static void main(String[] args) throws SQLException {
|
||||||
final String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata";
|
final String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata";
|
||||||
|
|
|
@ -196,10 +196,10 @@ confluent local services connect connector load TDengineSinkConnector --config .
|
||||||
准备测试数据的文本文件,内容如下:
|
准备测试数据的文本文件,内容如下:
|
||||||
|
|
||||||
```txt title="test-data.txt"
|
```txt title="test-data.txt"
|
||||||
meters,location=Beijing.Haidian,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249000000
|
meters,location=California.LosAngeles,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249000000
|
||||||
meters,location=Beijing.Haidian,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250000000
|
meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250000000
|
||||||
meters,location=Beijing.Haidian,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249000000
|
meters,location=California.LosAngeles,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249000000
|
||||||
meters,location=Beijing.Haidian,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250000000
|
meters,location=California.LosAngeles,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250000000
|
||||||
```
|
```
|
||||||
|
|
||||||
使用 kafka-console-producer 向主题 meters 添加测试数据。
|
使用 kafka-console-producer 向主题 meters 添加测试数据。
|
||||||
|
@ -223,10 +223,10 @@ Database changed.
|
||||||
taos> select * from meters;
|
taos> select * from meters;
|
||||||
ts | current | voltage | phase | groupid | location |
|
ts | current | voltage | phase | groupid | location |
|
||||||
===============================================================================================================================================================
|
===============================================================================================================================================================
|
||||||
2022-03-28 09:56:51.249000000 | 11.800000000 | 221.000000000 | 0.280000000 | 2 | Beijing.Haidian |
|
2022-03-28 09:56:51.249000000 | 11.800000000 | 221.000000000 | 0.280000000 | 2 | California.LosAngeles |
|
||||||
2022-03-28 09:56:51.250000000 | 13.400000000 | 223.000000000 | 0.290000000 | 2 | Beijing.Haidian |
|
2022-03-28 09:56:51.250000000 | 13.400000000 | 223.000000000 | 0.290000000 | 2 | California.LosAngeles |
|
||||||
2022-03-28 09:56:51.249000000 | 10.800000000 | 223.000000000 | 0.290000000 | 3 | Beijing.Haidian |
|
2022-03-28 09:56:51.249000000 | 10.800000000 | 223.000000000 | 0.290000000 | 3 | California.LosAngeles |
|
||||||
2022-03-28 09:56:51.250000000 | 11.300000000 | 221.000000000 | 0.350000000 | 3 | Beijing.Haidian |
|
2022-03-28 09:56:51.250000000 | 11.300000000 | 221.000000000 | 0.350000000 | 3 | California.LosAngeles |
|
||||||
Query OK, 4 row(s) in set (0.004208s)
|
Query OK, 4 row(s) in set (0.004208s)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -275,7 +275,7 @@ DROP DATABASE IF EXISTS test;
|
||||||
CREATE DATABASE test;
|
CREATE DATABASE test;
|
||||||
USE test;
|
USE test;
|
||||||
CREATE STABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT);
|
CREATE STABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT);
|
||||||
INSERT INTO d1001 USING meters TAGS(Beijing.Chaoyang, 2) VALUES('2018-10-03 14:38:05.000',10.30000,219,0.31000) d1001 USING meters TAGS(Beijing.Chaoyang, 2) VALUES('2018-10-03 14:38:15.000',12.60000,218,0.33000) d1001 USING meters TAGS(Beijing.Chaoyang, 2) VALUES('2018-10-03 14:38:16.800',12.30000,221,0.31000) d1002 USING meters TAGS(Beijing.Chaoyang, 3) VALUES('2018-10-03 14:38:16.650',10.30000,218,0.25000) d1003 USING meters TAGS(Beijing.Haidian, 2) VALUES('2018-10-03 14:38:05.500',11.80000,221,0.28000) d1003 USING meters TAGS(Beijing.Haidian, 2) VALUES('2018-10-03 14:38:16.600',13.40000,223,0.29000) d1004 USING meters TAGS(Beijing.Haidian, 3) VALUES('2018-10-03 14:38:05.000',10.80000,223,0.29000) d1004 USING meters TAGS(Beijing.Haidian, 3) VALUES('2018-10-03 14:38:06.500',11.50000,221,0.35000);
|
INSERT INTO d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:05.000',10.30000,219,0.31000) d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:15.000',12.60000,218,0.33000) d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:16.800',12.30000,221,0.31000) d1002 USING meters TAGS(California.SanFrancisco, 3) VALUES('2018-10-03 14:38:16.650',10.30000,218,0.25000) d1003 USING meters TAGS(California.LosAngeles, 2) VALUES('2018-10-03 14:38:05.500',11.80000,221,0.28000) d1003 USING meters TAGS(California.LosAngeles, 2) VALUES('2018-10-03 14:38:16.600',13.40000,223,0.29000) d1004 USING meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 14:38:05.000',10.80000,223,0.29000) d1004 USING meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 14:38:06.500',11.50000,221,0.35000);
|
||||||
```
|
```
|
||||||
|
|
||||||
使用 TDengine CLI, 执行 SQL 文件。
|
使用 TDengine CLI, 执行 SQL 文件。
|
||||||
|
@ -302,8 +302,8 @@ kafka-console-consumer --bootstrap-server localhost:9092 --from-beginning --topi
|
||||||
|
|
||||||
```
|
```
|
||||||
......
|
......
|
||||||
meters,location="beijing.chaoyang",groupid=2i32 current=10.3f32,voltage=219i32,phase=0.31f32 1538548685000000000
|
meters,location="California.SanFrancisco",groupid=2i32 current=10.3f32,voltage=219i32,phase=0.31f32 1538548685000000000
|
||||||
meters,location="beijing.chaoyang",groupid=2i32 current=12.6f32,voltage=218i32,phase=0.33f32 1538548695000000000
|
meters,location="California.SanFrancisco",groupid=2i32 current=12.6f32,voltage=218i32,phase=0.33f32 1538548695000000000
|
||||||
......
|
......
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -209,7 +209,7 @@ curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'show databases;' 127.0.0
|
||||||
Press enter key to continue or Ctrl-C to stop
|
Press enter key to continue or Ctrl-C to stop
|
||||||
```
|
```
|
||||||
|
|
||||||
回车后,该命令将在数据库 test 下面自动创建一张超级表 meters,该超级表下有 1 万张表,表名为 "d0" 到 "d9999",每张表有 1 万条记录,每条记录有 (ts, current, voltage, phase) 四个字段,时间戳从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:40:09 999",每张表带有标签 location 和 groupId,groupId 被设置为 1 到 10, location 被设置为 "beijing" 或者 "shanghai"。
|
回车后,该命令将在数据库 test 下面自动创建一张超级表 meters,该超级表下有 1 万张表,表名为 "d0" 到 "d9999",每张表有 1 万条记录,每条记录有 (ts, current, voltage, phase) 四个字段,时间戳从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:40:09 999",每张表带有标签 location 和 groupId,groupId 被设置为 1 到 10, location 被设置为 "California.SanFrancisco" 或者 "California.SanDieo"。
|
||||||
|
|
||||||
最后共插入 1 亿条记录。
|
最后共插入 1 亿条记录。
|
||||||
|
|
||||||
|
@ -279,7 +279,7 @@ curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'show databases;' 127.0.0
|
||||||
$ taos> select groupid, location from test.d0;
|
$ taos> select groupid, location from test.d0;
|
||||||
groupid | location |
|
groupid | location |
|
||||||
=================================
|
=================================
|
||||||
0 | shanghai |
|
0 | California.SanDieo |
|
||||||
Query OK, 1 row(s) in set (0.003490s)
|
Query OK, 1 row(s) in set (0.003490s)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -29,7 +29,7 @@ In order to explain the basic concepts and provide some sample code, the TDengin
|
||||||
<td>10.3</td>
|
<td>10.3</td>
|
||||||
<td>219</td>
|
<td>219</td>
|
||||||
<td>0.31</td>
|
<td>0.31</td>
|
||||||
<td>San Jose</td>
|
<td>California.SanFrancisco</td>
|
||||||
<td>2</td>
|
<td>2</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
|
@ -38,7 +38,7 @@ In order to explain the basic concepts and provide some sample code, the TDengin
|
||||||
<td>10.2</td>
|
<td>10.2</td>
|
||||||
<td>220</td>
|
<td>220</td>
|
||||||
<td>0.23</td>
|
<td>0.23</td>
|
||||||
<td>San Jose</td>
|
<td>California.SanFrancisco</td>
|
||||||
<td>3</td>
|
<td>3</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
|
@ -47,7 +47,7 @@ In order to explain the basic concepts and provide some sample code, the TDengin
|
||||||
<td>11.5</td>
|
<td>11.5</td>
|
||||||
<td>221</td>
|
<td>221</td>
|
||||||
<td>0.35</td>
|
<td>0.35</td>
|
||||||
<td>Mountain View</td>
|
<td>California.LosAngeles</td>
|
||||||
<td>3</td>
|
<td>3</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
|
@ -56,7 +56,7 @@ In order to explain the basic concepts and provide some sample code, the TDengin
|
||||||
<td>13.4</td>
|
<td>13.4</td>
|
||||||
<td>223</td>
|
<td>223</td>
|
||||||
<td>0.29</td>
|
<td>0.29</td>
|
||||||
<td>Mountain View</td>
|
<td>California.LosAngeles</td>
|
||||||
<td>2</td>
|
<td>2</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
|
@ -65,7 +65,7 @@ In order to explain the basic concepts and provide some sample code, the TDengin
|
||||||
<td>12.6</td>
|
<td>12.6</td>
|
||||||
<td>218</td>
|
<td>218</td>
|
||||||
<td>0.33</td>
|
<td>0.33</td>
|
||||||
<td>San Jose</td>
|
<td>California.SanFrancisco</td>
|
||||||
<td>2</td>
|
<td>2</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
|
@ -74,7 +74,7 @@ In order to explain the basic concepts and provide some sample code, the TDengin
|
||||||
<td>11.8</td>
|
<td>11.8</td>
|
||||||
<td>221</td>
|
<td>221</td>
|
||||||
<td>0.28</td>
|
<td>0.28</td>
|
||||||
<td>Mountain View</td>
|
<td>California.LosAngeles</td>
|
||||||
<td>2</td>
|
<td>2</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
|
@ -83,7 +83,7 @@ In order to explain the basic concepts and provide some sample code, the TDengin
|
||||||
<td>10.3</td>
|
<td>10.3</td>
|
||||||
<td>218</td>
|
<td>218</td>
|
||||||
<td>0.25</td>
|
<td>0.25</td>
|
||||||
<td>San Jose</td>
|
<td>California.SanFrancisco</td>
|
||||||
<td>3</td>
|
<td>3</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
|
@ -92,7 +92,7 @@ In order to explain the basic concepts and provide some sample code, the TDengin
|
||||||
<td>12.3</td>
|
<td>12.3</td>
|
||||||
<td>221</td>
|
<td>221</td>
|
||||||
<td>0.31</td>
|
<td>0.31</td>
|
||||||
<td>San Jose</td>
|
<td>California.SanFrancisco</td>
|
||||||
<td>2</td>
|
<td>2</td>
|
||||||
</tr>
|
</tr>
|
||||||
</tbody>
|
</tbody>
|
||||||
|
|
|
@ -130,7 +130,7 @@ After TDengine server is running,execute `taosBenchmark` (previously named tao
|
||||||
taosBenchmark
|
taosBenchmark
|
||||||
```
|
```
|
||||||
|
|
||||||
This command will create a super table "meters" under database "test". Under "meters", 10000 tables are created with names from "d0" to "d9999". Each table has 10000 rows and each row has four columns (ts, current, voltage, phase). Time stamp is starting from "2017-07-14 10:40:00 000" to "2017-07-14 10:40:09 999". Each table has tags "location" and "groupId". groupId is set 1 to 10 randomly, and location is set to "beijing" or "shanghai".
|
This command will create a super table "meters" under database "test". Under "meters", 10000 tables are created with names from "d0" to "d9999". Each table has 10000 rows and each row has four columns (ts, current, voltage, phase). Time stamp is starting from "2017-07-14 10:40:00 000" to "2017-07-14 10:40:09 999". Each table has tags "location" and "groupId". groupId is set 1 to 10 randomly, and location is set to "California.SanFrancisco" or "California.SanDieo".
|
||||||
|
|
||||||
This command will insert 100 million rows into the database quickly. Time to insert depends on the hardware configuration, it only takes a dozen seconds for a regular PC server.
|
This command will insert 100 million rows into the database quickly. Time to insert depends on the hardware configuration, it only takes a dozen seconds for a regular PC server.
|
||||||
|
|
||||||
|
@ -152,10 +152,10 @@ query the average, maximum, minimum of 100 million rows:
|
||||||
taos> select avg(current), max(voltage), min(phase) from test.meters;
|
taos> select avg(current), max(voltage), min(phase) from test.meters;
|
||||||
```
|
```
|
||||||
|
|
||||||
query the total number of rows with location="beijing":
|
query the total number of rows with location="California.SanFrancisco":
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
taos> select count(*) from test.meters where location="beijing";
|
taos> select count(*) from test.meters where location="California.SanFrancisco";
|
||||||
```
|
```
|
||||||
|
|
||||||
query the average, maximum, minimum of all rows with groupId=10:
|
query the average, maximum, minimum of all rows with groupId=10:
|
||||||
|
|
|
@ -52,10 +52,10 @@ At most 4096 (or 1024 prior to version 2.1.7.0) columns are allowed in a STable.
|
||||||
A specific table needs to be created for each data collection point. Similar to RDBMS, table name and schema are required to create a table. Beside, one or more tags can be created for each table. To create a table, a STable needs to be used as template and the values need to be specified for the tags. For example, for the meters in [Table 1](/tdinternal/arch#model_table1), the table can be created using below SQL statement.
|
A specific table needs to be created for each data collection point. Similar to RDBMS, table name and schema are required to create a table. Beside, one or more tags can be created for each table. To create a table, a STable needs to be used as template and the values need to be specified for the tags. For example, for the meters in [Table 1](/tdinternal/arch#model_table1), the table can be created using below SQL statement.
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
CREATE TABLE d1001 USING meters TAGS ("Beijing.Chaoyang", 2);
|
CREATE TABLE d1001 USING meters TAGS ("California.SanFrancisco", 2);
|
||||||
```
|
```
|
||||||
|
|
||||||
In the above SQL statement, "d1001" is the table name, "meters" is the STable name, followed by the value of tag "Location" and the value of tag "groupId", which are "Beijing.Chaoyang" and "2" respectively in the example. The tag values can be updated after the table is created. Please refer to [Tables](/taos-sql/table) for details.
|
In the above SQL statement, "d1001" is the table name, "meters" is the STable name, followed by the value of tag "Location" and the value of tag "groupId", which are "California.SanFrancisco" and "2" respectively in the example. The tag values can be updated after the table is created. Please refer to [Tables](/taos-sql/table) for details.
|
||||||
|
|
||||||
In TDengine system, it's recommended to create a table for a data collection point via STable. A table created via STable is called subtable in some parts of the TDengine documentation. All SQL commands applied on regular tables can be applied on subtables.
|
In TDengine system, it's recommended to create a table for a data collection point via STable. A table created via STable is called subtable in some parts of the TDengine documentation. All SQL commands applied on regular tables can be applied on subtables.
|
||||||
|
|
||||||
|
@ -70,10 +70,10 @@ It's suggested to use the global unique ID of a data collection point as the tab
|
||||||
In some circumstances, it's unknown whether the table already exists when inserting rows. The table can be created automatically using the SQL statement below, and nothing will happen if the table already exist.
|
In some circumstances, it's unknown whether the table already exists when inserting rows. The table can be created automatically using the SQL statement below, and nothing will happen if the table already exist.
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
INSERT INTO d1001 USING meters TAGS ("Beijng.Chaoyang", 2) VALUES (now, 10.2, 219, 0.32);
|
INSERT INTO d1001 USING meters TAGS ("California.SanFrancisco", 2) VALUES (now, 10.2, 219, 0.32);
|
||||||
```
|
```
|
||||||
|
|
||||||
In the above SQL statement, a row with value `(now, 10.2, 219, 0.32)` will be inserted into table "d1001". If table "d1001" doesn't exist, it will be created automatically using STable "meters" as template with tag value `"Beijing.Chaoyang", 2`.
|
In the above SQL statement, a row with value `(now, 10.2, 219, 0.32)` will be inserted into table "d1001". If table "d1001" doesn't exist, it will be created automatically using STable "meters" as template with tag value `"California.SanFrancisco", 2`.
|
||||||
|
|
||||||
For more details please refer to [Create Table Automatically](/taos-sql/insert#automatically-create-table-when-inserting).
|
For more details please refer to [Create Table Automatically](/taos-sql/insert#automatically-create-table-when-inserting).
|
||||||
|
|
||||||
|
|
|
@ -29,7 +29,7 @@ measurement,tag_set field_set timestamp
|
||||||
For example:
|
For example:
|
||||||
|
|
||||||
```
|
```
|
||||||
meters,location=Beijing.Haidian,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611249500
|
meters,location=California.LoSangeles,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611249500
|
||||||
```
|
```
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
|
|
|
@ -29,7 +29,7 @@ A single line of text is used in OpenTSDB line protocol to represent one row of
|
||||||
For example:
|
For example:
|
||||||
|
|
||||||
```txt
|
```txt
|
||||||
meters.current 1648432611250 11.3 location=Beijing.Haidian groupid=3
|
meters.current 1648432611250 11.3 location=California.LoSangeles groupid=3
|
||||||
```
|
```
|
||||||
|
|
||||||
Please refer to [OpenTSDB Telnet API](http://opentsdb.net/docs/build/html/api_telnet/put.html) for more details.
|
Please refer to [OpenTSDB Telnet API](http://opentsdb.net/docs/build/html/api_telnet/put.html) for more details.
|
||||||
|
@ -76,9 +76,9 @@ Query OK, 2 row(s) in set (0.002544s)
|
||||||
taos> select tbname, * from `meters.current`;
|
taos> select tbname, * from `meters.current`;
|
||||||
tbname | ts | value | groupid | location |
|
tbname | ts | value | groupid | location |
|
||||||
==================================================================================================================================
|
==================================================================================================================================
|
||||||
t_0e7bcfa21a02331c06764f275... | 2022-03-28 09:56:51.249 | 10.800000000 | 3 | Beijing.Haidian |
|
t_0e7bcfa21a02331c06764f275... | 2022-03-28 09:56:51.249 | 10.800000000 | 3 | California.LoSangeles |
|
||||||
t_0e7bcfa21a02331c06764f275... | 2022-03-28 09:56:51.250 | 11.300000000 | 3 | Beijing.Haidian |
|
t_0e7bcfa21a02331c06764f275... | 2022-03-28 09:56:51.250 | 11.300000000 | 3 | California.LoSangeles |
|
||||||
t_7e7b26dd860280242c6492a16... | 2022-03-28 09:56:51.249 | 10.300000000 | 2 | Beijing.Chaoyang |
|
t_7e7b26dd860280242c6492a16... | 2022-03-28 09:56:51.249 | 10.300000000 | 2 | California.SanFrancisco |
|
||||||
t_7e7b26dd860280242c6492a16... | 2022-03-28 09:56:51.250 | 12.600000000 | 2 | Beijing.Chaoyang |
|
t_7e7b26dd860280242c6492a16... | 2022-03-28 09:56:51.250 | 12.600000000 | 2 | California.SanFrancisco |
|
||||||
Query OK, 4 row(s) in set (0.005399s)
|
Query OK, 4 row(s) in set (0.005399s)
|
||||||
```
|
```
|
||||||
|
|
|
@ -93,7 +93,7 @@ Query OK, 2 row(s) in set (0.001954s)
|
||||||
taos> select * from `meters.current`;
|
taos> select * from `meters.current`;
|
||||||
ts | value | groupid | location |
|
ts | value | groupid | location |
|
||||||
===================================================================================================================
|
===================================================================================================================
|
||||||
2022-03-28 09:56:51.249 | 10.300000000 | 2.000000000 | Beijing.Chaoyang |
|
2022-03-28 09:56:51.249 | 10.300000000 | 2.000000000 | California.SanFrancisco |
|
||||||
2022-03-28 09:56:51.250 | 12.600000000 | 2.000000000 | Beijing.Chaoyang |
|
2022-03-28 09:56:51.250 | 12.600000000 | 2.000000000 | California.SanFrancisco |
|
||||||
Query OK, 2 row(s) in set (0.004076s)
|
Query OK, 2 row(s) in set (0.004076s)
|
||||||
```
|
```
|
||||||
|
|
|
@ -58,14 +58,14 @@ In summary, for a STable, its subtables can be aggregated by a simple query on t
|
||||||
|
|
||||||
### Example 1
|
### Example 1
|
||||||
|
|
||||||
In TDengine CLI `taos`, use below SQL to get the average voltage of all the meters in BeiJing grouped by location.
|
In TDengine CLI `taos`, use below SQL to get the average voltage of all the meters in California grouped by location.
|
||||||
|
|
||||||
```
|
```
|
||||||
taos> SELECT AVG(voltage) FROM meters GROUP BY location;
|
taos> SELECT AVG(voltage) FROM meters GROUP BY location;
|
||||||
avg(voltage) | location |
|
avg(voltage) | location |
|
||||||
=============================================================
|
=============================================================
|
||||||
222.000000000 | Beijing.Haidian |
|
222.000000000 | California.LoSangeles |
|
||||||
219.200000000 | Beijing.Chaoyang |
|
219.200000000 | California.SanFrancisco |
|
||||||
Query OK, 2 row(s) in set (0.002136s)
|
Query OK, 2 row(s) in set (0.002136s)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -96,10 +96,10 @@ taos> SELECT sum(current) FROM d1001 INTERVAL(10s);
|
||||||
Query OK, 2 row(s) in set (0.000883s)
|
Query OK, 2 row(s) in set (0.000883s)
|
||||||
```
|
```
|
||||||
|
|
||||||
Down sampling can also be used for STable. For example, the below SQL statement can be used to get the sum of current from all meters in BeiJing.
|
Down sampling can also be used for STable. For example, the below SQL statement can be used to get the sum of current from all meters in California.
|
||||||
|
|
||||||
```
|
```
|
||||||
taos> SELECT SUM(current) FROM meters where location like "Beijing%" INTERVAL(1s);
|
taos> SELECT SUM(current) FROM meters where location like "California%" INTERVAL(1s);
|
||||||
ts | sum(current) |
|
ts | sum(current) |
|
||||||
======================================================
|
======================================================
|
||||||
2018-10-03 14:38:04.000 | 10.199999809 |
|
2018-10-03 14:38:04.000 | 10.199999809 |
|
||||||
|
|
|
@ -34,8 +34,8 @@ In this section the use case of meters will be used to introduce how to use cont
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
create table meters (ts timestamp, current float, voltage int, phase float) tags (location binary(64), groupId int);
|
create table meters (ts timestamp, current float, voltage int, phase float) tags (location binary(64), groupId int);
|
||||||
create table D1001 using meters tags ("Beijing.Chaoyang", 2);
|
create table D1001 using meters tags ("California.SanFrancisco", 2);
|
||||||
create table D1002 using meters tags ("Beijing.Haidian", 2);
|
create table D1002 using meters tags ("California.LoSangeles", 2);
|
||||||
```
|
```
|
||||||
|
|
||||||
The SQL statement below retrieves the average voltage for a one minute time window, with each time window moving forward by 30 seconds.
|
The SQL statement below retrieves the average voltage for a one minute time window, with each time window moving forward by 30 seconds.
|
||||||
|
|
|
@ -187,8 +187,8 @@ taos> use power;
|
||||||
# create super table "meters"
|
# create super table "meters"
|
||||||
taos> create table meters(ts timestamp, current float, voltage int, phase int) tags(location binary(64), groupId int);
|
taos> create table meters(ts timestamp, current float, voltage int, phase int) tags(location binary(64), groupId int);
|
||||||
# create tabes using the schema defined by super table "meters"
|
# create tabes using the schema defined by super table "meters"
|
||||||
taos> create table d1001 using meters tags ("Beijing.Chaoyang", 2);
|
taos> create table d1001 using meters tags ("California.SanFrancisco", 2);
|
||||||
taos> create table d1002 using meters tags ("Beijing.Haidian", 2);
|
taos> create table d1002 using meters tags ("California.LoSangeles", 2);
|
||||||
# insert some rows
|
# insert some rows
|
||||||
taos> insert into d1001 values("2020-08-15 12:00:00.000", 12, 220, 1),("2020-08-15 12:10:00.000", 12.3, 220, 2),("2020-08-15 12:20:00.000", 12.2, 220, 1);
|
taos> insert into d1001 values("2020-08-15 12:00:00.000", 12, 220, 1),("2020-08-15 12:10:00.000", 12.3, 220, 2),("2020-08-15 12:20:00.000", 12.2, 220, 1);
|
||||||
taos> insert into d1002 values("2020-08-15 12:00:00.000", 9.9, 220, 1),("2020-08-15 12:10:00.000", 10.3, 220, 1),("2020-08-15 12:20:00.000", 11.2, 220, 1);
|
taos> insert into d1002 values("2020-08-15 12:00:00.000", 9.9, 220, 1),("2020-08-15 12:10:00.000", 10.3, 220, 1),("2020-08-15 12:20:00.000", 11.2, 220, 1);
|
||||||
|
@ -196,11 +196,11 @@ taos> insert into d1002 values("2020-08-15 12:00:00.000", 9.9, 220, 1),("2020-08
|
||||||
taos> select * from meters where current > 10;
|
taos> select * from meters where current > 10;
|
||||||
ts | current | voltage | phase | location | groupid |
|
ts | current | voltage | phase | location | groupid |
|
||||||
===========================================================================================================
|
===========================================================================================================
|
||||||
2020-08-15 12:10:00.000 | 10.30000 | 220 | 1 | Beijing.Haidian | 2 |
|
2020-08-15 12:10:00.000 | 10.30000 | 220 | 1 | California.LoSangeles | 2 |
|
||||||
2020-08-15 12:20:00.000 | 11.20000 | 220 | 1 | Beijing.Haidian | 2 |
|
2020-08-15 12:20:00.000 | 11.20000 | 220 | 1 | California.LoSangeles | 2 |
|
||||||
2020-08-15 12:00:00.000 | 12.00000 | 220 | 1 | Beijing.Chaoyang | 2 |
|
2020-08-15 12:00:00.000 | 12.00000 | 220 | 1 | California.SanFrancisco | 2 |
|
||||||
2020-08-15 12:10:00.000 | 12.30000 | 220 | 2 | Beijing.Chaoyang | 2 |
|
2020-08-15 12:10:00.000 | 12.30000 | 220 | 2 | California.SanFrancisco | 2 |
|
||||||
2020-08-15 12:20:00.000 | 12.20000 | 220 | 1 | Beijing.Chaoyang | 2 |
|
2020-08-15 12:20:00.000 | 12.20000 | 220 | 1 | California.SanFrancisco | 2 |
|
||||||
Query OK, 5 row(s) in set (0.004896s)
|
Query OK, 5 row(s) in set (0.004896s)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -235,11 +235,11 @@ Query OK, 5 row(s) in set (0.004896s)
|
||||||
The example programs first consume all historical data matching the criteria.
|
The example programs first consume all historical data matching the criteria.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
ts: 1597464000000 current: 12.0 voltage: 220 phase: 1 location: Beijing.Chaoyang groupid : 2
|
ts: 1597464000000 current: 12.0 voltage: 220 phase: 1 location: California.SanFrancisco groupid : 2
|
||||||
ts: 1597464600000 current: 12.3 voltage: 220 phase: 2 location: Beijing.Chaoyang groupid : 2
|
ts: 1597464600000 current: 12.3 voltage: 220 phase: 2 location: California.SanFrancisco groupid : 2
|
||||||
ts: 1597465200000 current: 12.2 voltage: 220 phase: 1 location: Beijing.Chaoyang groupid : 2
|
ts: 1597465200000 current: 12.2 voltage: 220 phase: 1 location: California.SanFrancisco groupid : 2
|
||||||
ts: 1597464600000 current: 10.3 voltage: 220 phase: 1 location: Beijing.Haidian groupid : 2
|
ts: 1597464600000 current: 10.3 voltage: 220 phase: 1 location: California.LoSangeles groupid : 2
|
||||||
ts: 1597465200000 current: 11.2 voltage: 220 phase: 1 location: Beijing.Haidian groupid : 2
|
ts: 1597465200000 current: 11.2 voltage: 220 phase: 1 location: California.LoSangeles groupid : 2
|
||||||
```
|
```
|
||||||
|
|
||||||
Next, use TDengine CLI to insert a new row.
|
Next, use TDengine CLI to insert a new row.
|
||||||
|
@ -253,5 +253,5 @@ taos> insert into d1001 values(now, 12.4, 220, 1);
|
||||||
Because the current in inserted row exceeds 10A, it will be consumed by the example program.
|
Because the current in inserted row exceeds 10A, it will be consumed by the example program.
|
||||||
|
|
||||||
```
|
```
|
||||||
ts: 1651146662805 current: 12.4 voltage: 220 phase: 1 location: Beijing.Chaoyang groupid: 2
|
ts: 1651146662805 current: 12.4 voltage: 220 phase: 1 location: California.SanFrancisco groupid: 2
|
||||||
```
|
```
|
||||||
|
|
|
@ -12,8 +12,8 @@ The memory space used by TDengine cache is fixed in size, according to the confi
|
||||||
|
|
||||||
Memory pool is divided into blocks and data is stored in row format in memory and each block follows FIFO policy. The size of each block is determined by configuration parameter `cache`, the number of blocks for each vnode is determined by `blocks`. For each vnode, the total cache size is `cache * blocks`. A cache block needs to ensure that each table can store at least dozens of records to be efficient.
|
Memory pool is divided into blocks and data is stored in row format in memory and each block follows FIFO policy. The size of each block is determined by configuration parameter `cache`, the number of blocks for each vnode is determined by `blocks`. For each vnode, the total cache size is `cache * blocks`. A cache block needs to ensure that each table can store at least dozens of records to be efficient.
|
||||||
|
|
||||||
`last_row` function can be used to retrieve the last row of a table or a STable to quickly show the current state of devices on monitoring screen. For example the below SQL statement retrieves the latest voltage of all meters in Chaoyang district of Beijing.
|
`last_row` function can be used to retrieve the last row of a table or a STable to quickly show the current state of devices on monitoring screen. For example the below SQL statement retrieves the latest voltage of all meters in San Francisco of California.
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
select last_row(voltage) from meters where location='Beijing.Chaoyang';
|
select last_row(voltage) from meters where location='California.SanFrancisco';
|
||||||
```
|
```
|
||||||
|
|
|
@ -69,7 +69,7 @@ INSERT INTO d1001 VALUES ('2021-07-13 14:06:34.630', 10.2, 219, 0.32) ('2021-07-
|
||||||
If it's not sure whether the table already exists, the table can be created automatically while inserting using below SQL statement. To use this functionality, a STable must be used as template and tag values must be provided.
|
If it's not sure whether the table already exists, the table can be created automatically while inserting using below SQL statement. To use this functionality, a STable must be used as template and tag values must be provided.
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
INSERT INTO d21001 USING meters TAGS ('Beijing.Chaoyang', 2) VALUES ('2021-07-13 14:06:32.272', 10.2, 219, 0.32);
|
INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) VALUES ('2021-07-13 14:06:32.272', 10.2, 219, 0.32);
|
||||||
```
|
```
|
||||||
|
|
||||||
It's not necessary to provide values for all tag when creating tables automatically, the tags without values provided will be set to NULL.
|
It's not necessary to provide values for all tag when creating tables automatically, the tags without values provided will be set to NULL.
|
||||||
|
@ -81,7 +81,7 @@ INSERT INTO d21001 USING meters (groupId) TAGS (2) VALUES ('2021-07-13 14:06:33.
|
||||||
Multiple rows can also be inserted into same table in single SQL statement using this way.
|
Multiple rows can also be inserted into same table in single SQL statement using this way.
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
INSERT INTO d21001 USING meters TAGS ('Beijing.Chaoyang', 2) VALUES ('2021-07-13 14:06:34.630', 10.2, 219, 0.32) ('2021-07-13 14:06:35.779', 10.15, 217, 0.33)
|
INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) VALUES ('2021-07-13 14:06:34.630', 10.2, 219, 0.32) ('2021-07-13 14:06:35.779', 10.15, 217, 0.33)
|
||||||
d21002 USING meters (groupId) TAGS (2) VALUES ('2021-07-13 14:06:34.255', 10.15, 217, 0.33)
|
d21002 USING meters (groupId) TAGS (2) VALUES ('2021-07-13 14:06:34.255', 10.15, 217, 0.33)
|
||||||
d21003 USING meters (groupId) TAGS (2) (ts, current, phase) VALUES ('2021-07-13 14:06:34.255', 10.27, 0.31);
|
d21003 USING meters (groupId) TAGS (2) (ts, current, phase) VALUES ('2021-07-13 14:06:34.255', 10.27, 0.31);
|
||||||
```
|
```
|
||||||
|
@ -110,13 +110,13 @@ INSERT INTO d1001 FILE '/tmp/csvfile.csv';
|
||||||
From version 2.1.5.0, tables can be automatically created using a super table as template when inserting data from a CSV file, Like below:
|
From version 2.1.5.0, tables can be automatically created using a super table as template when inserting data from a CSV file, Like below:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
INSERT INTO d21001 USING meters TAGS ('Beijing.Chaoyang', 2) FILE '/tmp/csvfile.csv';
|
INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) FILE '/tmp/csvfile.csv';
|
||||||
```
|
```
|
||||||
|
|
||||||
Multiple tables can be automatically created and inserted in single SQL statement, like below:
|
Multiple tables can be automatically created and inserted in single SQL statement, like below:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
INSERT INTO d21001 USING meters TAGS ('Beijing.Chaoyang', 2) FILE '/tmp/csvfile_21001.csv'
|
INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) FILE '/tmp/csvfile_21001.csv'
|
||||||
d21002 USING meters (groupId) TAGS (2) FILE '/tmp/csvfile_21002.csv';
|
d21002 USING meters (groupId) TAGS (2) FILE '/tmp/csvfile_21002.csv';
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -146,7 +146,7 @@ Query OK, 0 row(s) in set (0.000946s)
|
||||||
Then, try to create table d1001 automatically when inserting data into it.
|
Then, try to create table d1001 automatically when inserting data into it.
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
INSERT INTO d1001 USING meters TAGS('Beijing.Chaoyang', 2) VALUES('a');
|
INSERT INTO d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES('a');
|
||||||
```
|
```
|
||||||
|
|
||||||
The output shows the value to be inserted is invalid. But `SHOW TABLES` proves that the table has been created automatically by the `INSERT` statement.
|
The output shows the value to be inserted is invalid. But `SHOW TABLES` proves that the table has been created automatically by the `INSERT` statement.
|
||||||
|
|
|
@ -39,15 +39,15 @@ The result includes both data columns and tag columns for super table.
|
||||||
taos> SELECT * FROM meters;
|
taos> SELECT * FROM meters;
|
||||||
ts | current | voltage | phase | location | groupid |
|
ts | current | voltage | phase | location | groupid |
|
||||||
=====================================================================================================================================
|
=====================================================================================================================================
|
||||||
2018-10-03 14:38:05.500 | 11.80000 | 221 | 0.28000 | Beijing.Haidian | 2 |
|
2018-10-03 14:38:05.500 | 11.80000 | 221 | 0.28000 | California.LoSangeles | 2 |
|
||||||
2018-10-03 14:38:16.600 | 13.40000 | 223 | 0.29000 | Beijing.Haidian | 2 |
|
2018-10-03 14:38:16.600 | 13.40000 | 223 | 0.29000 | California.LoSangeles | 2 |
|
||||||
2018-10-03 14:38:05.000 | 10.80000 | 223 | 0.29000 | Beijing.Haidian | 3 |
|
2018-10-03 14:38:05.000 | 10.80000 | 223 | 0.29000 | California.LoSangeles | 3 |
|
||||||
2018-10-03 14:38:06.500 | 11.50000 | 221 | 0.35000 | Beijing.Haidian | 3 |
|
2018-10-03 14:38:06.500 | 11.50000 | 221 | 0.35000 | California.LoSangeles | 3 |
|
||||||
2018-10-03 14:38:04.000 | 10.20000 | 220 | 0.23000 | Beijing.Chaoyang | 3 |
|
2018-10-03 14:38:04.000 | 10.20000 | 220 | 0.23000 | California.SanFrancisco | 3 |
|
||||||
2018-10-03 14:38:16.650 | 10.30000 | 218 | 0.25000 | Beijing.Chaoyang | 3 |
|
2018-10-03 14:38:16.650 | 10.30000 | 218 | 0.25000 | California.SanFrancisco | 3 |
|
||||||
2018-10-03 14:38:05.000 | 10.30000 | 219 | 0.31000 | Beijing.Chaoyang | 2 |
|
2018-10-03 14:38:05.000 | 10.30000 | 219 | 0.31000 | California.SanFrancisco | 2 |
|
||||||
2018-10-03 14:38:15.000 | 12.60000 | 218 | 0.33000 | Beijing.Chaoyang | 2 |
|
2018-10-03 14:38:15.000 | 12.60000 | 218 | 0.33000 | California.SanFrancisco | 2 |
|
||||||
2018-10-03 14:38:16.800 | 12.30000 | 221 | 0.31000 | Beijing.Chaoyang | 2 |
|
2018-10-03 14:38:16.800 | 12.30000 | 221 | 0.31000 | California.SanFrancisco | 2 |
|
||||||
Query OK, 9 row(s) in set (0.002022s)
|
Query OK, 9 row(s) in set (0.002022s)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -102,8 +102,8 @@ Starting from version 2.0.14, tag columns can be selected together with data col
|
||||||
taos> SELECT location, groupid, current FROM d1001 LIMIT 2;
|
taos> SELECT location, groupid, current FROM d1001 LIMIT 2;
|
||||||
location | groupid | current |
|
location | groupid | current |
|
||||||
======================================================================
|
======================================================================
|
||||||
Beijing.Chaoyang | 2 | 10.30000 |
|
California.SanFrancisco | 2 | 10.30000 |
|
||||||
Beijing.Chaoyang | 2 | 12.60000 |
|
California.SanFrancisco | 2 | 12.60000 |
|
||||||
Query OK, 2 row(s) in set (0.003112s)
|
Query OK, 2 row(s) in set (0.003112s)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -271,10 +271,10 @@ Only filter on `TAGS` are allowed in the `where` clause for above two query stat
|
||||||
taos> SELECT TBNAME, location FROM meters;
|
taos> SELECT TBNAME, location FROM meters;
|
||||||
tbname | location |
|
tbname | location |
|
||||||
==================================================================
|
==================================================================
|
||||||
d1004 | Beijing.Haidian |
|
d1004 | California.LoSangeles |
|
||||||
d1003 | Beijing.Haidian |
|
d1003 | California.LoSangeles |
|
||||||
d1002 | Beijing.Chaoyang |
|
d1002 | California.SanFrancisco |
|
||||||
d1001 | Beijing.Chaoyang |
|
d1001 | California.SanFrancisco |
|
||||||
Query OK, 4 row(s) in set (0.000881s)
|
Query OK, 4 row(s) in set (0.000881s)
|
||||||
|
|
||||||
taos> SELECT COUNT(tbname) FROM meters WHERE groupId > 2;
|
taos> SELECT COUNT(tbname) FROM meters WHERE groupId > 2;
|
||||||
|
@ -323,7 +323,7 @@ Logical operations in below table can be used in `where` clause to filter the re
|
||||||
- For timestamp column, only one condition can be used; for other columns or tags, `OR` keyword can be used to combine multiple logical operators. For example, `((value > 20 AND value < 30) OR (value < 12))`.
|
- For timestamp column, only one condition can be used; for other columns or tags, `OR` keyword can be used to combine multiple logical operators. For example, `((value > 20 AND value < 30) OR (value < 12))`.
|
||||||
- From version 2.3.0.0, multiple conditions can be used on timestamp column, but the result set can only contain single time range.
|
- From version 2.3.0.0, multiple conditions can be used on timestamp column, but the result set can only contain single time range.
|
||||||
- From version 2.0.17.0, operator `BETWEEN AND` can be used in where clause, for example `WHERE col2 BETWEEN 1.5 AND 3.25` means the filter condition is equal to "1.5 ≤ col2 ≤ 3.25".
|
- From version 2.0.17.0, operator `BETWEEN AND` can be used in where clause, for example `WHERE col2 BETWEEN 1.5 AND 3.25` means the filter condition is equal to "1.5 ≤ col2 ≤ 3.25".
|
||||||
- From version 2.1.4.0, operator `IN` can be used in where clause. For example, `WHERE city IN ('Beijing', 'Shanghai')`. For bool type, both `{true, false}` and `{0, 1}` are allowed, but integers other than 0 or 1 are not allowed. FLOAT and DOUBLE types are impacted by floating precision, only values that match the condition within the tolerance will be selected. Non-primary key column of timestamp type can be used with `IN`.
|
- From version 2.1.4.0, operator `IN` can be used in where clause. For example, `WHERE city IN ('California.SanFrancisco', 'California.SanDieo')`. For bool type, both `{true, false}` and `{0, 1}` are allowed, but integers other than 0 or 1 are not allowed. FLOAT and DOUBLE types are impacted by floating precision, only values that match the condition within the tolerance will be selected. Non-primary key column of timestamp type can be used with `IN`.
|
||||||
- From version 2.3.0.0, regular expression is supported in where clause with keyword `match` or `nmatch`, the regular expression is case insensitive.
|
- From version 2.3.0.0, regular expression is supported in where clause with keyword `match` or `nmatch`, the regular expression is case insensitive.
|
||||||
|
|
||||||
## Regular Expression
|
## Regular Expression
|
||||||
|
|
|
@ -206,10 +206,10 @@ The configuration parameters in the URL are as follows.
|
||||||
- Unlike the native connection method, the REST interface is stateless. When using the JDBC REST connection, you need to specify the database name of the table and super table in SQL. For example.
|
- Unlike the native connection method, the REST interface is stateless. When using the JDBC REST connection, you need to specify the database name of the table and super table in SQL. For example.
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('beijing') VALUES(now, 24.6);
|
INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('California.SanFrancisco') VALUES(now, 24.6);
|
||||||
```
|
```
|
||||||
|
|
||||||
- Starting from taos-jdbcdriver-2.0.36 and TDengine 2.2.0.0, if dbname is specified in the URL, JDBC REST connections will use `/rest/sql/dbname` as the URL for REST requests by default, and there is no need to specify dbname in SQL. For example, if the URL is `jdbc:TAOS-RS://127.0.0.1:6041/test`, then the SQL can be executed: insert into t1 using weather(ts, temperature) tags('beijing') values(now, 24.6);
|
- Starting from taos-jdbcdriver-2.0.36 and TDengine 2.2.0.0, if dbname is specified in the URL, JDBC REST connections will use `/rest/sql/dbname` as the URL for REST requests by default, and there is no need to specify dbname in SQL. For example, if the URL is `jdbc:TAOS-RS://127.0.0.1:6041/test`, then the SQL can be executed: insert into t1 using weather(ts, temperature) tags('California.SanFrancisco') values(now, 24.6);
|
||||||
|
|
||||||
:::
|
:::
|
||||||
|
|
||||||
|
@ -565,7 +565,7 @@ public class ParameterBindingDemo {
|
||||||
// set table name
|
// set table name
|
||||||
pstmt.setTableName("t5_" + i);
|
pstmt.setTableName("t5_" + i);
|
||||||
// set tags
|
// set tags
|
||||||
pstmt.setTagNString(0, "Beijing-abc");
|
pstmt.setTagNString(0, "California-abc");
|
||||||
|
|
||||||
// set columns
|
// set columns
|
||||||
ArrayList<Long> tsList = new ArrayList<>();
|
ArrayList<Long> tsList = new ArrayList<>();
|
||||||
|
@ -576,7 +576,7 @@ public class ParameterBindingDemo {
|
||||||
|
|
||||||
ArrayList<String> f1List = new ArrayList<>();
|
ArrayList<String> f1List = new ArrayList<>();
|
||||||
for (int j = 0; j < numOfRow; j++) {
|
for (int j = 0; j < numOfRow; j++) {
|
||||||
f1List.add("Beijing-abc");
|
f1List.add("California-abc");
|
||||||
}
|
}
|
||||||
pstmt.setNString(1, f1List, BINARY_COLUMN_SIZE);
|
pstmt.setNString(1, f1List, BINARY_COLUMN_SIZE);
|
||||||
|
|
||||||
|
@ -635,7 +635,7 @@ public class SchemalessInsertTest {
|
||||||
private static final String host = "127.0.0.1";
|
private static final String host = "127.0.0.1";
|
||||||
private static final String lineDemo = "st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000";
|
private static final String lineDemo = "st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000";
|
||||||
private static final String telnetDemo = "stb0_0 1626006833 4 host=host0 interface=eth0";
|
private static final String telnetDemo = "stb0_0 1626006833 4 host=host0 interface=eth0";
|
||||||
private static final String jsonDemo = "{\"metric\": \"meter_current\",\"timestamp\": 1346846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"Beijing\", \"id\": \"d1001\"}}";
|
private static final String jsonDemo = "{\"metric\": \"meter_current\",\"timestamp\": 1346846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}";
|
||||||
|
|
||||||
public static void main(String[] args) throws SQLException {
|
public static void main(String[] args) throws SQLException {
|
||||||
final String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata";
|
final String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata";
|
||||||
|
|
|
@ -202,7 +202,7 @@ To handle the data insertion and data query from multiple timezones, Unix Timest
|
||||||
On Linux system, TDengine clients automatically obtain timezone from the host. Alternatively, the timezone can be configured explicitly in configuration file `taos.cfg` like below.
|
On Linux system, TDengine clients automatically obtain timezone from the host. Alternatively, the timezone can be configured explicitly in configuration file `taos.cfg` like below.
|
||||||
|
|
||||||
```
|
```
|
||||||
timezone UTC-8
|
timezone UTC-7
|
||||||
timezone GMT-8
|
timezone GMT-8
|
||||||
timezone Asia/Shanghai
|
timezone Asia/Shanghai
|
||||||
```
|
```
|
||||||
|
|
|
@ -194,10 +194,10 @@ If the above command is executed successfully, the output is as follows:
|
||||||
Prepare text file as test data, its content is following:
|
Prepare text file as test data, its content is following:
|
||||||
|
|
||||||
```txt title="test-data.txt"
|
```txt title="test-data.txt"
|
||||||
meters,location=Beijing.Haidian,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249000000
|
meters,location=California.LoSangeles,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249000000
|
||||||
meters,location=Beijing.Haidian,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250000000
|
meters,location=California.LoSangeles,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250000000
|
||||||
meters,location=Beijing.Haidian,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249000000
|
meters,location=California.LoSangeles,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249000000
|
||||||
meters,location=Beijing.Haidian,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250000000
|
meters,location=California.LoSangeles,groupid=3 current=11.3,voltage=221,phase=0.35 1648432611250000000
|
||||||
```
|
```
|
||||||
|
|
||||||
Use kafka-console-producer to write test data to the topic `meters`.
|
Use kafka-console-producer to write test data to the topic `meters`.
|
||||||
|
@ -221,10 +221,10 @@ Database changed.
|
||||||
taos> select * from meters;
|
taos> select * from meters;
|
||||||
ts | current | voltage | phase | groupid | location |
|
ts | current | voltage | phase | groupid | location |
|
||||||
===============================================================================================================================================================
|
===============================================================================================================================================================
|
||||||
2022-03-28 09:56:51.249000000 | 11.800000000 | 221.000000000 | 0.280000000 | 2 | Beijing.Haidian |
|
2022-03-28 09:56:51.249000000 | 11.800000000 | 221.000000000 | 0.280000000 | 2 | California.LoSangeles |
|
||||||
2022-03-28 09:56:51.250000000 | 13.400000000 | 223.000000000 | 0.290000000 | 2 | Beijing.Haidian |
|
2022-03-28 09:56:51.250000000 | 13.400000000 | 223.000000000 | 0.290000000 | 2 | California.LoSangeles |
|
||||||
2022-03-28 09:56:51.249000000 | 10.800000000 | 223.000000000 | 0.290000000 | 3 | Beijing.Haidian |
|
2022-03-28 09:56:51.249000000 | 10.800000000 | 223.000000000 | 0.290000000 | 3 | California.LoSangeles |
|
||||||
2022-03-28 09:56:51.250000000 | 11.300000000 | 221.000000000 | 0.350000000 | 3 | Beijing.Haidian |
|
2022-03-28 09:56:51.250000000 | 11.300000000 | 221.000000000 | 0.350000000 | 3 | California.LoSangeles |
|
||||||
Query OK, 4 row(s) in set (0.004208s)
|
Query OK, 4 row(s) in set (0.004208s)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -273,7 +273,7 @@ DROP DATABASE IF EXISTS test;
|
||||||
CREATE DATABASE test;
|
CREATE DATABASE test;
|
||||||
USE test;
|
USE test;
|
||||||
CREATE STABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT);
|
CREATE STABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT);
|
||||||
INSERT INTO d1001 USING meters TAGS(Beijing.Chaoyang, 2) VALUES('2018-10-03 14:38:05.000',10.30000,219,0.31000) d1001 USING meters TAGS(Beijing.Chaoyang, 2) VALUES('2018-10-03 14:38:15.000',12.60000,218,0.33000) d1001 USING meters TAGS(Beijing.Chaoyang, 2) VALUES('2018-10-03 14:38:16.800',12.30000,221,0.31000) d1002 USING meters TAGS(Beijing.Chaoyang, 3) VALUES('2018-10-03 14:38:16.650',10.30000,218,0.25000) d1003 USING meters TAGS(Beijing.Haidian, 2) VALUES('2018-10-03 14:38:05.500',11.80000,221,0.28000) d1003 USING meters TAGS(Beijing.Haidian, 2) VALUES('2018-10-03 14:38:16.600',13.40000,223,0.29000) d1004 USING meters TAGS(Beijing.Haidian, 3) VALUES('2018-10-03 14:38:05.000',10.80000,223,0.29000) d1004 USING meters TAGS(Beijing.Haidian, 3) VALUES('2018-10-03 14:38:06.500',11.50000,221,0.35000);
|
INSERT INTO d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:05.000',10.30000,219,0.31000) d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:15.000',12.60000,218,0.33000) d1001 USING meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:16.800',12.30000,221,0.31000) d1002 USING meters TAGS(California.SanFrancisco, 3) VALUES('2018-10-03 14:38:16.650',10.30000,218,0.25000) d1003 USING meters TAGS(California.LoSangeles, 2) VALUES('2018-10-03 14:38:05.500',11.80000,221,0.28000) d1003 USING meters TAGS(California.LoSangeles, 2) VALUES('2018-10-03 14:38:16.600',13.40000,223,0.29000) d1004 USING meters TAGS(California.LoSangeles, 3) VALUES('2018-10-03 14:38:05.000',10.80000,223,0.29000) d1004 USING meters TAGS(California.LoSangeles, 3) VALUES('2018-10-03 14:38:06.500',11.50000,221,0.35000);
|
||||||
```
|
```
|
||||||
|
|
||||||
Use TDengine CLI to execute SQL script
|
Use TDengine CLI to execute SQL script
|
||||||
|
@ -300,8 +300,8 @@ output:
|
||||||
|
|
||||||
````
|
````
|
||||||
......
|
......
|
||||||
meters,location="beijing.chaoyang",groupid=2i32 current=10.3f32,voltage=219i32,phase=0.31f32 1538548685000000000
|
meters,location="California.SanFrancisco",groupid=2i32 current=10.3f32,voltage=219i32,phase=0.31f32 1538548685000000000
|
||||||
meters,location="beijing.chaoyang",groupid=2i32 current=12.6f32,voltage=218i32,phase=0.33f32 1538548695000000000
|
meters,location="California.SanFrancisco",groupid=2i32 current=12.6f32,voltage=218i32,phase=0.33f32 1538548695000000000
|
||||||
......
|
......
|
||||||
````
|
````
|
||||||
|
|
||||||
|
|
|
@ -265,7 +265,7 @@ Below is an example output:
|
||||||
$ taos> select groupid, location from test.d0;
|
$ taos> select groupid, location from test.d0;
|
||||||
groupid | location |
|
groupid | location |
|
||||||
=================================
|
=================================
|
||||||
0 | shanghai |
|
0 | California.SanDieo |
|
||||||
Query OK, 1 row(s) in set (0.003490s)
|
Query OK, 1 row(s) in set (0.003490s)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -219,6 +219,16 @@ typedef struct {
|
||||||
|
|
||||||
#define GET_FORWARD_DIRECTION_FACTOR(ord) (((ord) == TSDB_ORDER_ASC) ? QUERY_ASC_FORWARD_STEP : QUERY_DESC_FORWARD_STEP)
|
#define GET_FORWARD_DIRECTION_FACTOR(ord) (((ord) == TSDB_ORDER_ASC) ? QUERY_ASC_FORWARD_STEP : QUERY_DESC_FORWARD_STEP)
|
||||||
|
|
||||||
|
#define SORT_QSORT_T 0x1
|
||||||
|
#define SORT_SPILLED_MERGE_SORT_T 0x2
|
||||||
|
typedef struct SSortExecInfo {
|
||||||
|
int32_t sortMethod;
|
||||||
|
int32_t sortBuffer;
|
||||||
|
int32_t loops; // loop count
|
||||||
|
int32_t writeBytes; // write io bytes
|
||||||
|
int32_t readBytes; // read io bytes
|
||||||
|
} SSortExecInfo;
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -198,7 +198,7 @@ void colDataTrim(SColumnInfoData* pColumnInfoData);
|
||||||
size_t blockDataGetNumOfCols(const SSDataBlock* pBlock);
|
size_t blockDataGetNumOfCols(const SSDataBlock* pBlock);
|
||||||
size_t blockDataGetNumOfRows(const SSDataBlock* pBlock);
|
size_t blockDataGetNumOfRows(const SSDataBlock* pBlock);
|
||||||
|
|
||||||
int32_t blockDataMerge(SSDataBlock* pDest, const SSDataBlock* pSrc, SArray* pIndexMap);
|
int32_t blockDataMerge(SSDataBlock* pDest, const SSDataBlock* pSrc);
|
||||||
int32_t blockDataSplitRows(SSDataBlock* pBlock, bool hasVarCol, int32_t startIndex, int32_t* stopIndex,
|
int32_t blockDataSplitRows(SSDataBlock* pBlock, bool hasVarCol, int32_t startIndex, int32_t* stopIndex,
|
||||||
int32_t pageSize);
|
int32_t pageSize);
|
||||||
int32_t blockDataToBuf(char* buf, const SSDataBlock* pBlock);
|
int32_t blockDataToBuf(char* buf, const SSDataBlock* pBlock);
|
||||||
|
|
|
@ -61,9 +61,10 @@ int32_t tTSRowBuilderGetRow(STSRowBuilder *pBuilder, const STSRow2 **ppRow);
|
||||||
// STag
|
// STag
|
||||||
int32_t tTagNew(STagVal *pTagVals, int16_t nTag, STag **ppTag);
|
int32_t tTagNew(STagVal *pTagVals, int16_t nTag, STag **ppTag);
|
||||||
void tTagFree(STag *pTag);
|
void tTagFree(STag *pTag);
|
||||||
void tTagGet(STag *pTag, int16_t cid, int8_t type, uint8_t **ppData, int32_t *nData);
|
int32_t tTagSet(STag *pTag, SSchema *pSchema, int32_t nCols, int iCol, uint8_t *pData, uint32_t nData, STag **ppTag);
|
||||||
int32_t tEncodeTag(SEncoder *pEncoder, STag *pTag);
|
void tTagGet(STag *pTag, int16_t cid, int8_t type, uint8_t **ppData, uint32_t *nData);
|
||||||
int32_t tDecodeTag(SDecoder *pDecoder, const STag **ppTag);
|
int32_t tEncodeTag(SEncoder *pEncoder, const STag *pTag);
|
||||||
|
int32_t tDecodeTag(SDecoder *pDecoder, STag **ppTag);
|
||||||
|
|
||||||
// STRUCT =================
|
// STRUCT =================
|
||||||
struct STColumn {
|
struct STColumn {
|
||||||
|
|
|
@ -660,8 +660,7 @@ typedef struct {
|
||||||
int32_t tz; // query client timezone
|
int32_t tz; // query client timezone
|
||||||
char intervalUnit;
|
char intervalUnit;
|
||||||
char slidingUnit;
|
char slidingUnit;
|
||||||
char
|
char offsetUnit;
|
||||||
offsetUnit; // TODO Remove it, the offset is the number of precision tickle, and it must be a immutable duration.
|
|
||||||
int8_t precision;
|
int8_t precision;
|
||||||
int64_t interval;
|
int64_t interval;
|
||||||
int64_t sliding;
|
int64_t sliding;
|
||||||
|
@ -950,6 +949,7 @@ typedef struct {
|
||||||
int32_t numOfCores;
|
int32_t numOfCores;
|
||||||
int32_t numOfSupportVnodes;
|
int32_t numOfSupportVnodes;
|
||||||
char dnodeEp[TSDB_EP_LEN];
|
char dnodeEp[TSDB_EP_LEN];
|
||||||
|
SMnodeLoad mload;
|
||||||
SClusterCfg clusterCfg;
|
SClusterCfg clusterCfg;
|
||||||
SArray* pVloads; // array of SVnodeLoad
|
SArray* pVloads; // array of SVnodeLoad
|
||||||
} SStatusReq;
|
} SStatusReq;
|
||||||
|
|
|
@ -29,6 +29,7 @@ extern "C" {
|
||||||
typedef struct SMnode SMnode;
|
typedef struct SMnode SMnode;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
|
int32_t dnodeId;
|
||||||
bool standby;
|
bool standby;
|
||||||
bool deploy;
|
bool deploy;
|
||||||
int8_t replica;
|
int8_t replica;
|
||||||
|
@ -54,15 +55,6 @@ SMnode *mndOpen(const char *path, const SMnodeOpt *pOption);
|
||||||
*/
|
*/
|
||||||
void mndClose(SMnode *pMnode);
|
void mndClose(SMnode *pMnode);
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Close a mnode.
|
|
||||||
*
|
|
||||||
* @param pMnode The mnode object to close.
|
|
||||||
* @param pOption Options of the mnode.
|
|
||||||
* @return int32_t 0 for success, -1 for failure.
|
|
||||||
*/
|
|
||||||
int32_t mndAlter(SMnode *pMnode, const SMnodeOpt *pOption);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Start mnode
|
* @brief Start mnode
|
||||||
*
|
*
|
||||||
|
|
|
@ -107,7 +107,7 @@ static FORCE_INLINE void streamDataSubmitRefDec(SStreamDataSubmit* pDataSubmit)
|
||||||
if (ref == 0) {
|
if (ref == 0) {
|
||||||
taosMemoryFree(pDataSubmit->data);
|
taosMemoryFree(pDataSubmit->data);
|
||||||
taosMemoryFree(pDataSubmit->dataRef);
|
taosMemoryFree(pDataSubmit->dataRef);
|
||||||
// taosFreeQitem(pDataSubmit);
|
taosFreeQitem(pDataSubmit);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -98,8 +98,18 @@ typedef struct SSyncFSM {
|
||||||
|
|
||||||
void (*FpRestoreFinishCb)(struct SSyncFSM* pFsm);
|
void (*FpRestoreFinishCb)(struct SSyncFSM* pFsm);
|
||||||
int32_t (*FpGetSnapshot)(struct SSyncFSM* pFsm, SSnapshot* pSnapshot);
|
int32_t (*FpGetSnapshot)(struct SSyncFSM* pFsm, SSnapshot* pSnapshot);
|
||||||
void* (*FpSnapshotRead)(struct SSyncFSM* pFsm, const SSnapshot* snapshot, void* iter, char** ppBuf, int32_t* len);
|
|
||||||
int32_t (*FpSnapshotApply)(struct SSyncFSM* pFsm, const SSnapshot* snapshot, char* pBuf, int32_t len);
|
// if (*ppIter == NULL)
|
||||||
|
// *ppIter = new iter;
|
||||||
|
// else
|
||||||
|
// *ppIter.next();
|
||||||
|
//
|
||||||
|
// if success, return 0. else return error code
|
||||||
|
int32_t (*FpSnapshotRead)(struct SSyncFSM* pFsm, const SSnapshot* pSnapshot, void** ppIter, char** ppBuf,
|
||||||
|
int32_t* len);
|
||||||
|
|
||||||
|
// apply data into fsm
|
||||||
|
int32_t (*FpSnapshotApply)(struct SSyncFSM* pFsm, const SSnapshot* pSnapshot, char* pBuf, int32_t len);
|
||||||
|
|
||||||
void (*FpReConfigCb)(struct SSyncFSM* pFsm, SSyncCfg newCfg, SReConfigCbMeta cbMeta);
|
void (*FpReConfigCb)(struct SSyncFSM* pFsm, SSyncCfg newCfg, SReConfigCbMeta cbMeta);
|
||||||
|
|
||||||
|
|
|
@ -313,6 +313,7 @@ int32_t* taosGetErrno();
|
||||||
#define TSDB_CODE_VND_INVALID_TABLE_ACTION TAOS_DEF_ERROR_CODE(0, 0x0519)
|
#define TSDB_CODE_VND_INVALID_TABLE_ACTION TAOS_DEF_ERROR_CODE(0, 0x0519)
|
||||||
#define TSDB_CODE_VND_COL_ALREADY_EXISTS TAOS_DEF_ERROR_CODE(0, 0x051a)
|
#define TSDB_CODE_VND_COL_ALREADY_EXISTS TAOS_DEF_ERROR_CODE(0, 0x051a)
|
||||||
#define TSDB_CODE_VND_TABLE_COL_NOT_EXISTS TAOS_DEF_ERROR_CODE(0, 0x051b)
|
#define TSDB_CODE_VND_TABLE_COL_NOT_EXISTS TAOS_DEF_ERROR_CODE(0, 0x051b)
|
||||||
|
#define TSDB_CODE_VND_READ_END TAOS_DEF_ERROR_CODE(0, 0x051c)
|
||||||
|
|
||||||
// tsdb
|
// tsdb
|
||||||
#define TSDB_CODE_TDB_INVALID_TABLE_ID TAOS_DEF_ERROR_CODE(0, 0x0600)
|
#define TSDB_CODE_TDB_INVALID_TABLE_ID TAOS_DEF_ERROR_CODE(0, 0x0600)
|
||||||
|
|
|
@ -88,6 +88,7 @@ void taosPrintLongString(const char *flags, ELogLevel level, int32_t dflag, cons
|
||||||
#define uInfo(...) { if (uDebugFlag & DEBUG_INFO) { taosPrintLog("UTL ", DEBUG_INFO, tsLogEmbedded ? 255 : uDebugFlag, __VA_ARGS__); }}
|
#define uInfo(...) { if (uDebugFlag & DEBUG_INFO) { taosPrintLog("UTL ", DEBUG_INFO, tsLogEmbedded ? 255 : uDebugFlag, __VA_ARGS__); }}
|
||||||
#define uDebug(...) { if (uDebugFlag & DEBUG_DEBUG) { taosPrintLog("UTL ", DEBUG_DEBUG, uDebugFlag, __VA_ARGS__); }}
|
#define uDebug(...) { if (uDebugFlag & DEBUG_DEBUG) { taosPrintLog("UTL ", DEBUG_DEBUG, uDebugFlag, __VA_ARGS__); }}
|
||||||
#define uTrace(...) { if (uDebugFlag & DEBUG_TRACE) { taosPrintLog("UTL ", DEBUG_TRACE, uDebugFlag, __VA_ARGS__); }}
|
#define uTrace(...) { if (uDebugFlag & DEBUG_TRACE) { taosPrintLog("UTL ", DEBUG_TRACE, uDebugFlag, __VA_ARGS__); }}
|
||||||
|
#define uDebugL(...) { if (uDebugFlag & DEBUG_DEBUG) { taosPrintLongString("UTL ", DEBUG_DEBUG, uDebugFlag, __VA_ARGS__); }}
|
||||||
|
|
||||||
#define pError(...) { taosPrintLog("APP ERROR ", DEBUG_ERROR, 255, __VA_ARGS__); }
|
#define pError(...) { taosPrintLog("APP ERROR ", DEBUG_ERROR, 255, __VA_ARGS__); }
|
||||||
#define pPrint(...) { taosPrintLog("APP ", DEBUG_INFO, 255, __VA_ARGS__); }
|
#define pPrint(...) { taosPrintLog("APP ", DEBUG_INFO, 255, __VA_ARGS__); }
|
||||||
|
|
|
@ -36,7 +36,6 @@ static const SSysDbTableSchema mnodesSchema[] = {
|
||||||
{.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
|
{.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
|
||||||
{.name = "endpoint", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
|
{.name = "endpoint", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||||
{.name = "role", .bytes = 12 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
|
{.name = "role", .bytes = 12 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||||
{.name = "role_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
|
|
||||||
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
|
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -361,19 +361,13 @@ int32_t blockDataUpdateTsWindow(SSDataBlock* pDataBlock, int32_t tsColumnIndex)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
// if pIndexMap = NULL, merger one column by on column
|
int32_t blockDataMerge(SSDataBlock* pDest, const SSDataBlock* pSrc) {
|
||||||
int32_t blockDataMerge(SSDataBlock* pDest, const SSDataBlock* pSrc, SArray* pIndexMap) {
|
|
||||||
assert(pSrc != NULL && pDest != NULL);
|
assert(pSrc != NULL && pDest != NULL);
|
||||||
int32_t capacity = pDest->info.capacity;
|
int32_t capacity = pDest->info.capacity;
|
||||||
|
|
||||||
for (int32_t i = 0; i < pDest->info.numOfCols; ++i) {
|
for (int32_t i = 0; i < pDest->info.numOfCols; ++i) {
|
||||||
int32_t mapIndex = i;
|
|
||||||
// if (pIndexMap) {
|
|
||||||
// mapIndex = *(int32_t*)taosArrayGet(pIndexMap, i);
|
|
||||||
// }
|
|
||||||
|
|
||||||
SColumnInfoData* pCol2 = taosArrayGet(pDest->pDataBlock, i);
|
SColumnInfoData* pCol2 = taosArrayGet(pDest->pDataBlock, i);
|
||||||
SColumnInfoData* pCol1 = taosArrayGet(pSrc->pDataBlock, mapIndex);
|
SColumnInfoData* pCol1 = taosArrayGet(pSrc->pDataBlock, i);
|
||||||
|
|
||||||
capacity = pDest->info.capacity;
|
capacity = pDest->info.capacity;
|
||||||
colDataMergeCol(pCol2, pDest->info.rows, &capacity, pCol1, pSrc->info.rows);
|
colDataMergeCol(pCol2, pDest->info.rows, &capacity, pCol1, pSrc->info.rows);
|
||||||
|
|
|
@ -581,7 +581,52 @@ void tTagFree(STag *pTag) {
|
||||||
if (pTag) taosMemoryFree(pTag);
|
if (pTag) taosMemoryFree(pTag);
|
||||||
}
|
}
|
||||||
|
|
||||||
void tTagGet(STag *pTag, int16_t cid, int8_t type, uint8_t **ppData, int32_t *nData) {
|
int32_t tTagSet(STag *pTag, SSchema *pSchema, int32_t nCols, int iCol, uint8_t *pData, uint32_t nData, STag **ppTag) {
|
||||||
|
STagVal *pTagVals;
|
||||||
|
int16_t nTags = 0;
|
||||||
|
SSchema *pColumn;
|
||||||
|
uint8_t *p;
|
||||||
|
uint32_t n;
|
||||||
|
|
||||||
|
pTagVals = (STagVal *)taosMemoryMalloc(sizeof(*pTagVals) * nCols);
|
||||||
|
if (pTagVals == NULL) {
|
||||||
|
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (int32_t i = 0; i < nCols; i++) {
|
||||||
|
pColumn = &pSchema[i];
|
||||||
|
|
||||||
|
if (i == iCol) {
|
||||||
|
p = pData;
|
||||||
|
n = nData;
|
||||||
|
} else {
|
||||||
|
tTagGet(pTag, pColumn->colId, pColumn->type, &p, &n);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (p == NULL) continue;
|
||||||
|
|
||||||
|
ASSERT(IS_VAR_DATA_TYPE(pColumn->type) || n == pColumn->bytes);
|
||||||
|
|
||||||
|
pTagVals[nTags].cid = pColumn->colId;
|
||||||
|
pTagVals[nTags].type = pColumn->type;
|
||||||
|
pTagVals[nTags].nData = n;
|
||||||
|
pTagVals[nTags].pData = p;
|
||||||
|
|
||||||
|
nTags++;
|
||||||
|
}
|
||||||
|
|
||||||
|
// create new tag
|
||||||
|
if (tTagNew(pTagVals, nTags, ppTag) < 0) {
|
||||||
|
taosMemoryFree(pTagVals);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
taosMemoryFree(pTagVals);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void tTagGet(STag *pTag, int16_t cid, int8_t type, uint8_t **ppData, uint32_t *nData) {
|
||||||
STagIdx *pTagIdx = bsearch(&((STagIdx){.cid = cid}), pTag->idx, pTag->nTag, sizeof(STagIdx), tTagIdxCmprFn);
|
STagIdx *pTagIdx = bsearch(&((STagIdx){.cid = cid}), pTag->idx, pTag->nTag, sizeof(STagIdx), tTagIdxCmprFn);
|
||||||
if (pTagIdx == NULL) {
|
if (pTagIdx == NULL) {
|
||||||
*ppData = NULL;
|
*ppData = NULL;
|
||||||
|
@ -597,18 +642,11 @@ void tTagGet(STag *pTag, int16_t cid, int8_t type, uint8_t **ppData, int32_t *nD
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t tEncodeTag(SEncoder *pEncoder, STag *pTag) {
|
int32_t tEncodeTag(SEncoder *pEncoder, const STag *pTag) {
|
||||||
// return tEncodeBinary(pEncoder, (uint8_t *)pTag, pTag->len);
|
return tEncodeBinary(pEncoder, (const uint8_t *)pTag, pTag->len);
|
||||||
ASSERT(0);
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t tDecodeTag(SDecoder *pDecoder, const STag **ppTag) {
|
int32_t tDecodeTag(SDecoder *pDecoder, STag **ppTag) { return tDecodeBinary(pDecoder, (uint8_t **)ppTag, NULL); }
|
||||||
// uint32_t n;
|
|
||||||
// return tDecodeBinary(pDecoder, (const uint8_t **)ppTag, &n);
|
|
||||||
ASSERT(0);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
#if 1 // ===================================================================================================================
|
#if 1 // ===================================================================================================================
|
||||||
static void dataColSetNEleNull(SDataCol *pCol, int nEle);
|
static void dataColSetNEleNull(SDataCol *pCol, int nEle);
|
||||||
|
@ -1087,7 +1125,7 @@ SKVRow tdGetKVRowFromBuilder(SKVRowBuilder *pBuilder) {
|
||||||
kvRowSetNCols(row, pBuilder->nCols);
|
kvRowSetNCols(row, pBuilder->nCols);
|
||||||
kvRowSetLen(row, tlen);
|
kvRowSetLen(row, tlen);
|
||||||
|
|
||||||
if(pBuilder->nCols > 0){
|
if (pBuilder->nCols > 0) {
|
||||||
memcpy(kvRowColIdx(row), pBuilder->pColIdx, sizeof(SColIdx) * pBuilder->nCols);
|
memcpy(kvRowColIdx(row), pBuilder->pColIdx, sizeof(SColIdx) * pBuilder->nCols);
|
||||||
memcpy(kvRowValues(row), pBuilder->buf, pBuilder->size);
|
memcpy(kvRowValues(row), pBuilder->buf, pBuilder->size);
|
||||||
}
|
}
|
||||||
|
|
|
@ -891,6 +891,9 @@ int32_t tSerializeSStatusReq(void *buf, int32_t bufLen, SStatusReq *pReq) {
|
||||||
if (tEncodeI64(&encoder, pload->pointsWritten) < 0) return -1;
|
if (tEncodeI64(&encoder, pload->pointsWritten) < 0) return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// mnode loads
|
||||||
|
if (tEncodeI32(&encoder, pReq->mload.syncState) < 0) return -1;
|
||||||
|
|
||||||
tEndEncode(&encoder);
|
tEndEncode(&encoder);
|
||||||
|
|
||||||
int32_t tlen = encoder.pos;
|
int32_t tlen = encoder.pos;
|
||||||
|
@ -946,6 +949,8 @@ int32_t tDeserializeSStatusReq(void *buf, int32_t bufLen, SStatusReq *pReq) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (tDecodeI32(&decoder, &pReq->mload.syncState) < 0) return -1;
|
||||||
|
|
||||||
tEndDecode(&decoder);
|
tEndDecode(&decoder);
|
||||||
tDecoderClear(&decoder);
|
tDecoderClear(&decoder);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -75,8 +75,9 @@ void dmSendStatusReq(SDnodeMgmt *pMgmt) {
|
||||||
(*pMgmt->getVnodeLoadsFp)(&vinfo);
|
(*pMgmt->getVnodeLoadsFp)(&vinfo);
|
||||||
req.pVloads = vinfo.pVloads;
|
req.pVloads = vinfo.pVloads;
|
||||||
|
|
||||||
SMonMloadInfo minfo = {0};
|
SMonMloadInfo minfo = {0};
|
||||||
(*pMgmt->getMnodeLoadsFp)(&minfo);
|
(*pMgmt->getMnodeLoadsFp)(&minfo);
|
||||||
|
req.mload = minfo.load;
|
||||||
|
|
||||||
int32_t contLen = tSerializeSStatusReq(NULL, 0, &req);
|
int32_t contLen = tSerializeSStatusReq(NULL, 0, &req);
|
||||||
void *pHead = rpcMallocCont(contLen);
|
void *pHead = rpcMallocCont(contLen);
|
||||||
|
|
|
@ -36,7 +36,6 @@ typedef struct SMnodeMgmt {
|
||||||
SSingleWorker monitorWorker;
|
SSingleWorker monitorWorker;
|
||||||
SReplica replicas[TSDB_MAX_REPLICA];
|
SReplica replicas[TSDB_MAX_REPLICA];
|
||||||
int8_t replica;
|
int8_t replica;
|
||||||
int8_t selfIndex;
|
|
||||||
bool stopped;
|
bool stopped;
|
||||||
int32_t refCount;
|
int32_t refCount;
|
||||||
TdThreadRwlock lock;
|
TdThreadRwlock lock;
|
||||||
|
@ -47,7 +46,6 @@ int32_t mmReadFile(SMnodeMgmt *pMgmt, bool *pDeployed);
|
||||||
int32_t mmWriteFile(SMnodeMgmt *pMgmt, SDCreateMnodeReq *pMsg, bool deployed);
|
int32_t mmWriteFile(SMnodeMgmt *pMgmt, SDCreateMnodeReq *pMsg, bool deployed);
|
||||||
|
|
||||||
// mmInt.c
|
// mmInt.c
|
||||||
int32_t mmAlter(SMnodeMgmt *pMgmt, SDAlterMnodeReq *pMsg);
|
|
||||||
int32_t mmAcquire(SMnodeMgmt *pMgmt);
|
int32_t mmAcquire(SMnodeMgmt *pMgmt);
|
||||||
void mmRelease(SMnodeMgmt *pMgmt);
|
void mmRelease(SMnodeMgmt *pMgmt);
|
||||||
|
|
||||||
|
|
|
@ -124,22 +124,6 @@ int32_t mmProcessDropReq(const SMgmtInputOpt *pInput, SRpcMsg *pMsg) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t mmProcessAlterReq(SMnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
|
||||||
SDAlterMnodeReq alterReq = {0};
|
|
||||||
if (tDeserializeSDCreateMnodeReq(pMsg->pCont, pMsg->contLen, &alterReq) != 0) {
|
|
||||||
terrno = TSDB_CODE_INVALID_MSG;
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (pMgmt->pData->dnodeId != 0 && alterReq.dnodeId != pMgmt->pData->dnodeId) {
|
|
||||||
terrno = TSDB_CODE_INVALID_OPTION;
|
|
||||||
dError("failed to alter mnode since %s, input:%d cur:%d", terrstr(), alterReq.dnodeId, pMgmt->pData->dnodeId);
|
|
||||||
return -1;
|
|
||||||
} else {
|
|
||||||
return mmAlter(pMgmt, &alterReq);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
SArray *mmGetMsgHandles() {
|
SArray *mmGetMsgHandles() {
|
||||||
int32_t code = -1;
|
int32_t code = -1;
|
||||||
SArray *pArray = taosArrayInit(64, sizeof(SMgmtHandle));
|
SArray *pArray = taosArrayInit(64, sizeof(SMgmtHandle));
|
||||||
|
|
|
@ -42,6 +42,8 @@ static void mmBuildOptionForDeploy(SMnodeMgmt *pMgmt, const SMgmtInputOpt *pInpu
|
||||||
pOption->standby = false;
|
pOption->standby = false;
|
||||||
pOption->deploy = true;
|
pOption->deploy = true;
|
||||||
pOption->msgCb = pMgmt->msgCb;
|
pOption->msgCb = pMgmt->msgCb;
|
||||||
|
pOption->dnodeId = pMgmt->pData->dnodeId;
|
||||||
|
|
||||||
pOption->replica = 1;
|
pOption->replica = 1;
|
||||||
pOption->selfIndex = 0;
|
pOption->selfIndex = 0;
|
||||||
|
|
||||||
|
@ -52,9 +54,10 @@ static void mmBuildOptionForDeploy(SMnodeMgmt *pMgmt, const SMgmtInputOpt *pInpu
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mmBuildOptionForOpen(SMnodeMgmt *pMgmt, SMnodeOpt *pOption) {
|
static void mmBuildOptionForOpen(SMnodeMgmt *pMgmt, SMnodeOpt *pOption) {
|
||||||
pOption->msgCb = pMgmt->msgCb;
|
|
||||||
pOption->deploy = false;
|
pOption->deploy = false;
|
||||||
pOption->standby = false;
|
pOption->standby = false;
|
||||||
|
pOption->msgCb = pMgmt->msgCb;
|
||||||
|
pOption->dnodeId = pMgmt->pData->dnodeId;
|
||||||
|
|
||||||
if (pMgmt->replica > 0) {
|
if (pMgmt->replica > 0) {
|
||||||
pOption->standby = true;
|
pOption->standby = true;
|
||||||
|
@ -70,44 +73,6 @@ static void mmBuildOptionForOpen(SMnodeMgmt *pMgmt, SMnodeOpt *pOption) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t mmBuildOptionForAlter(SMnodeMgmt *pMgmt, SMnodeOpt *pOption, SDCreateMnodeReq *pCreate) {
|
|
||||||
pOption->msgCb = pMgmt->msgCb;
|
|
||||||
pOption->standby = false;
|
|
||||||
pOption->deploy = false;
|
|
||||||
pOption->replica = pCreate->replica;
|
|
||||||
pOption->selfIndex = -1;
|
|
||||||
|
|
||||||
for (int32_t i = 0; i < pCreate->replica; ++i) {
|
|
||||||
SReplica *pReplica = &pOption->replicas[i];
|
|
||||||
pReplica->id = pCreate->replicas[i].id;
|
|
||||||
pReplica->port = pCreate->replicas[i].port;
|
|
||||||
memcpy(pReplica->fqdn, pCreate->replicas[i].fqdn, TSDB_FQDN_LEN);
|
|
||||||
if (pReplica->id == pMgmt->pData->dnodeId) {
|
|
||||||
pOption->selfIndex = i;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (pOption->selfIndex == -1) {
|
|
||||||
dError("failed to build mnode options since %s", terrstr());
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int32_t mmAlter(SMnodeMgmt *pMgmt, SDAlterMnodeReq *pMsg) {
|
|
||||||
SMnodeOpt option = {0};
|
|
||||||
if (mmBuildOptionForAlter(pMgmt, &option, pMsg) != 0) {
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (mndAlter(pMgmt->pMnode, &option) != 0) {
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void mmClose(SMnodeMgmt *pMgmt) {
|
static void mmClose(SMnodeMgmt *pMgmt) {
|
||||||
if (pMgmt->pMnode != NULL) {
|
if (pMgmt->pMnode != NULL) {
|
||||||
mmStopWorker(pMgmt);
|
mmStopWorker(pMgmt);
|
||||||
|
|
|
@ -32,9 +32,6 @@ static void mmProcessQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
|
||||||
dTrace("msg:%p, get from mnode queue", pMsg);
|
dTrace("msg:%p, get from mnode queue", pMsg);
|
||||||
|
|
||||||
switch (pMsg->msgType) {
|
switch (pMsg->msgType) {
|
||||||
case TDMT_DND_ALTER_MNODE:
|
|
||||||
code = mmProcessAlterReq(pMgmt, pMsg);
|
|
||||||
break;
|
|
||||||
case TDMT_MON_MM_INFO:
|
case TDMT_MON_MM_INFO:
|
||||||
code = mmProcessGetMonitorInfoReq(pMgmt, pMsg);
|
code = mmProcessGetMonitorInfoReq(pMgmt, pMsg);
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -90,8 +90,8 @@ typedef enum {
|
||||||
typedef int32_t (*ProcessCreateNodeFp)(EDndNodeType ntype, SRpcMsg *pMsg);
|
typedef int32_t (*ProcessCreateNodeFp)(EDndNodeType ntype, SRpcMsg *pMsg);
|
||||||
typedef int32_t (*ProcessDropNodeFp)(EDndNodeType ntype, SRpcMsg *pMsg);
|
typedef int32_t (*ProcessDropNodeFp)(EDndNodeType ntype, SRpcMsg *pMsg);
|
||||||
typedef void (*SendMonitorReportFp)();
|
typedef void (*SendMonitorReportFp)();
|
||||||
typedef void (*GetVnodeLoadsFp)();
|
typedef void (*GetVnodeLoadsFp)(SMonVloadInfo *pInfo);
|
||||||
typedef void (*GetMnodeLoadsFp)();
|
typedef void (*GetMnodeLoadsFp)(SMonMloadInfo *pInfo);
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
int32_t dnodeId;
|
int32_t dnodeId;
|
||||||
|
|
|
@ -67,30 +67,33 @@ typedef enum {
|
||||||
|
|
||||||
typedef enum {
|
typedef enum {
|
||||||
TRN_TYPE_BASIC_SCOPE = 1000,
|
TRN_TYPE_BASIC_SCOPE = 1000,
|
||||||
TRN_TYPE_CREATE_USER = 1001,
|
TRN_TYPE_CREATE_ACCT = 1001,
|
||||||
TRN_TYPE_ALTER_USER = 1002,
|
TRN_TYPE_CREATE_CLUSTER = 1002,
|
||||||
TRN_TYPE_DROP_USER = 1003,
|
TRN_TYPE_CREATE_USER = 1003,
|
||||||
TRN_TYPE_CREATE_FUNC = 1004,
|
TRN_TYPE_ALTER_USER = 1004,
|
||||||
TRN_TYPE_DROP_FUNC = 1005,
|
TRN_TYPE_DROP_USER = 1005,
|
||||||
|
TRN_TYPE_CREATE_FUNC = 1006,
|
||||||
|
TRN_TYPE_DROP_FUNC = 1007,
|
||||||
|
|
||||||
TRN_TYPE_CREATE_SNODE = 1006,
|
TRN_TYPE_CREATE_SNODE = 1010,
|
||||||
TRN_TYPE_DROP_SNODE = 1007,
|
TRN_TYPE_DROP_SNODE = 1011,
|
||||||
TRN_TYPE_CREATE_QNODE = 1008,
|
TRN_TYPE_CREATE_QNODE = 1012,
|
||||||
TRN_TYPE_DROP_QNODE = 1009,
|
TRN_TYPE_DROP_QNODE = 10013,
|
||||||
TRN_TYPE_CREATE_BNODE = 1010,
|
TRN_TYPE_CREATE_BNODE = 1014,
|
||||||
TRN_TYPE_DROP_BNODE = 1011,
|
TRN_TYPE_DROP_BNODE = 1015,
|
||||||
TRN_TYPE_CREATE_MNODE = 1012,
|
TRN_TYPE_CREATE_MNODE = 1016,
|
||||||
TRN_TYPE_DROP_MNODE = 1013,
|
TRN_TYPE_DROP_MNODE = 1017,
|
||||||
TRN_TYPE_CREATE_TOPIC = 1014,
|
|
||||||
TRN_TYPE_DROP_TOPIC = 1015,
|
TRN_TYPE_CREATE_TOPIC = 1020,
|
||||||
TRN_TYPE_SUBSCRIBE = 1016,
|
TRN_TYPE_DROP_TOPIC = 1021,
|
||||||
TRN_TYPE_REBALANCE = 1017,
|
TRN_TYPE_SUBSCRIBE = 1022,
|
||||||
TRN_TYPE_COMMIT_OFFSET = 1018,
|
TRN_TYPE_REBALANCE = 1023,
|
||||||
TRN_TYPE_CREATE_STREAM = 1019,
|
TRN_TYPE_COMMIT_OFFSET = 1024,
|
||||||
TRN_TYPE_DROP_STREAM = 1020,
|
TRN_TYPE_CREATE_STREAM = 1025,
|
||||||
TRN_TYPE_ALTER_STREAM = 1021,
|
TRN_TYPE_DROP_STREAM = 1026,
|
||||||
TRN_TYPE_CONSUMER_LOST = 1022,
|
TRN_TYPE_ALTER_STREAM = 1027,
|
||||||
TRN_TYPE_CONSUMER_RECOVER = 1023,
|
TRN_TYPE_CONSUMER_LOST = 1028,
|
||||||
|
TRN_TYPE_CONSUMER_RECOVER = 1029,
|
||||||
TRN_TYPE_BASIC_SCOPE_END,
|
TRN_TYPE_BASIC_SCOPE_END,
|
||||||
|
|
||||||
TRN_TYPE_GLOBAL_SCOPE = 2000,
|
TRN_TYPE_GLOBAL_SCOPE = 2000,
|
||||||
|
@ -196,9 +199,8 @@ typedef struct {
|
||||||
int32_t id;
|
int32_t id;
|
||||||
int64_t createdTime;
|
int64_t createdTime;
|
||||||
int64_t updateTime;
|
int64_t updateTime;
|
||||||
ESyncState role;
|
ESyncState state;
|
||||||
int32_t roleTerm;
|
int64_t stateStartTime;
|
||||||
int64_t roleTime;
|
|
||||||
SDnodeObj* pDnode;
|
SDnodeObj* pDnode;
|
||||||
} SMnodeObj;
|
} SMnodeObj;
|
||||||
|
|
||||||
|
|
|
@ -78,7 +78,6 @@ typedef struct {
|
||||||
SWal *pWal;
|
SWal *pWal;
|
||||||
sem_t syncSem;
|
sem_t syncSem;
|
||||||
int64_t sync;
|
int64_t sync;
|
||||||
ESyncState state;
|
|
||||||
bool standby;
|
bool standby;
|
||||||
bool restored;
|
bool restored;
|
||||||
int32_t errCode;
|
int32_t errCode;
|
||||||
|
@ -90,9 +89,10 @@ typedef struct {
|
||||||
} SGrantInfo;
|
} SGrantInfo;
|
||||||
|
|
||||||
typedef struct SMnode {
|
typedef struct SMnode {
|
||||||
int32_t selfId;
|
int32_t selfDnodeId;
|
||||||
int64_t clusterId;
|
int64_t clusterId;
|
||||||
TdThread thread;
|
TdThread thread;
|
||||||
|
bool deploy;
|
||||||
bool stopped;
|
bool stopped;
|
||||||
int8_t replica;
|
int8_t replica;
|
||||||
int8_t selfIndex;
|
int8_t selfIndex;
|
||||||
|
|
|
@ -28,7 +28,6 @@ SMnodeObj *mndAcquireMnode(SMnode *pMnode, int32_t mnodeId);
|
||||||
void mndReleaseMnode(SMnode *pMnode, SMnodeObj *pObj);
|
void mndReleaseMnode(SMnode *pMnode, SMnodeObj *pObj);
|
||||||
bool mndIsMnode(SMnode *pMnode, int32_t dnodeId);
|
bool mndIsMnode(SMnode *pMnode, int32_t dnodeId);
|
||||||
void mndGetMnodeEpSet(SMnode *pMnode, SEpSet *pEpSet);
|
void mndGetMnodeEpSet(SMnode *pMnode, SEpSet *pEpSet);
|
||||||
void mndUpdateMnodeRole(SMnode *pMnode);
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,6 +16,7 @@
|
||||||
#define _DEFAULT_SOURCE
|
#define _DEFAULT_SOURCE
|
||||||
#include "mndAcct.h"
|
#include "mndAcct.h"
|
||||||
#include "mndShow.h"
|
#include "mndShow.h"
|
||||||
|
#include "mndTrans.h"
|
||||||
|
|
||||||
#define ACCT_VER_NUMBER 1
|
#define ACCT_VER_NUMBER 1
|
||||||
#define ACCT_RESERVE_SIZE 128
|
#define ACCT_RESERVE_SIZE 128
|
||||||
|
@ -31,14 +32,16 @@ static int32_t mndProcessAlterAcctReq(SRpcMsg *pReq);
|
||||||
static int32_t mndProcessDropAcctReq(SRpcMsg *pReq);
|
static int32_t mndProcessDropAcctReq(SRpcMsg *pReq);
|
||||||
|
|
||||||
int32_t mndInitAcct(SMnode *pMnode) {
|
int32_t mndInitAcct(SMnode *pMnode) {
|
||||||
SSdbTable table = {.sdbType = SDB_ACCT,
|
SSdbTable table = {
|
||||||
.keyType = SDB_KEY_BINARY,
|
.sdbType = SDB_ACCT,
|
||||||
.deployFp = mndCreateDefaultAcct,
|
.keyType = SDB_KEY_BINARY,
|
||||||
.encodeFp = (SdbEncodeFp)mndAcctActionEncode,
|
.deployFp = mndCreateDefaultAcct,
|
||||||
.decodeFp = (SdbDecodeFp)mndAcctActionDecode,
|
.encodeFp = (SdbEncodeFp)mndAcctActionEncode,
|
||||||
.insertFp = (SdbInsertFp)mndAcctActionInsert,
|
.decodeFp = (SdbDecodeFp)mndAcctActionDecode,
|
||||||
.updateFp = (SdbUpdateFp)mndAcctActionUpdate,
|
.insertFp = (SdbInsertFp)mndAcctActionInsert,
|
||||||
.deleteFp = (SdbDeleteFp)mndAcctActionDelete};
|
.updateFp = (SdbUpdateFp)mndAcctActionUpdate,
|
||||||
|
.deleteFp = (SdbDeleteFp)mndAcctActionDelete,
|
||||||
|
};
|
||||||
|
|
||||||
mndSetMsgHandle(pMnode, TDMT_MND_CREATE_ACCT, mndProcessCreateAcctReq);
|
mndSetMsgHandle(pMnode, TDMT_MND_CREATE_ACCT, mndProcessCreateAcctReq);
|
||||||
mndSetMsgHandle(pMnode, TDMT_MND_ALTER_ACCT, mndProcessAlterAcctReq);
|
mndSetMsgHandle(pMnode, TDMT_MND_ALTER_ACCT, mndProcessAlterAcctReq);
|
||||||
|
@ -56,25 +59,52 @@ static int32_t mndCreateDefaultAcct(SMnode *pMnode) {
|
||||||
acctObj.updateTime = acctObj.createdTime;
|
acctObj.updateTime = acctObj.createdTime;
|
||||||
acctObj.acctId = 1;
|
acctObj.acctId = 1;
|
||||||
acctObj.status = 0;
|
acctObj.status = 0;
|
||||||
acctObj.cfg = (SAcctCfg){.maxUsers = INT32_MAX,
|
acctObj.cfg = (SAcctCfg){
|
||||||
.maxDbs = INT32_MAX,
|
.maxUsers = INT32_MAX,
|
||||||
.maxStbs = INT32_MAX,
|
.maxDbs = INT32_MAX,
|
||||||
.maxTbs = INT32_MAX,
|
.maxStbs = INT32_MAX,
|
||||||
.maxTimeSeries = INT32_MAX,
|
.maxTbs = INT32_MAX,
|
||||||
.maxStreams = INT32_MAX,
|
.maxTimeSeries = INT32_MAX,
|
||||||
.maxFuncs = INT32_MAX,
|
.maxStreams = INT32_MAX,
|
||||||
.maxConsumers = INT32_MAX,
|
.maxFuncs = INT32_MAX,
|
||||||
.maxConns = INT32_MAX,
|
.maxConsumers = INT32_MAX,
|
||||||
.maxTopics = INT32_MAX,
|
.maxConns = INT32_MAX,
|
||||||
.maxStorage = INT64_MAX,
|
.maxTopics = INT32_MAX,
|
||||||
.accessState = TSDB_VN_ALL_ACCCESS};
|
.maxStorage = INT64_MAX,
|
||||||
|
.accessState = TSDB_VN_ALL_ACCCESS,
|
||||||
|
};
|
||||||
|
|
||||||
SSdbRaw *pRaw = mndAcctActionEncode(&acctObj);
|
SSdbRaw *pRaw = mndAcctActionEncode(&acctObj);
|
||||||
if (pRaw == NULL) return -1;
|
if (pRaw == NULL) return -1;
|
||||||
sdbSetRawStatus(pRaw, SDB_STATUS_READY);
|
sdbSetRawStatus(pRaw, SDB_STATUS_READY);
|
||||||
|
|
||||||
mDebug("acct:%s, will be created while deploy sdb, raw:%p", acctObj.acct, pRaw);
|
mDebug("acct:%s, will be created while deploy sdb, raw:%p", acctObj.acct, pRaw);
|
||||||
|
#if 0
|
||||||
return sdbWrite(pMnode->pSdb, pRaw);
|
return sdbWrite(pMnode->pSdb, pRaw);
|
||||||
|
#else
|
||||||
|
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_TYPE_CREATE_ACCT, NULL);
|
||||||
|
if (pTrans == NULL) {
|
||||||
|
mError("acct:%s, failed to create since %s", acctObj.acct, terrstr());
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
mDebug("trans:%d, used to create acct:%s", pTrans->id, acctObj.acct);
|
||||||
|
|
||||||
|
if (mndTransAppendCommitlog(pTrans, pRaw) != 0) {
|
||||||
|
mError("trans:%d, failed to commit redo log since %s", pTrans->id, terrstr());
|
||||||
|
mndTransDrop(pTrans);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
sdbSetRawStatus(pRaw, SDB_STATUS_READY);
|
||||||
|
|
||||||
|
if (mndTransPrepare(pMnode, pTrans) != 0) {
|
||||||
|
mError("trans:%d, failed to prepare since %s", pTrans->id, terrstr());
|
||||||
|
mndTransDrop(pTrans);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
mndTransDrop(pTrans);
|
||||||
|
return 0;
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
static SSdbRaw *mndAcctActionEncode(SAcctObj *pAcct) {
|
static SSdbRaw *mndAcctActionEncode(SAcctObj *pAcct) {
|
||||||
|
|
|
@ -16,6 +16,7 @@
|
||||||
#define _DEFAULT_SOURCE
|
#define _DEFAULT_SOURCE
|
||||||
#include "mndCluster.h"
|
#include "mndCluster.h"
|
||||||
#include "mndShow.h"
|
#include "mndShow.h"
|
||||||
|
#include "mndTrans.h"
|
||||||
|
|
||||||
#define CLUSTER_VER_NUMBE 1
|
#define CLUSTER_VER_NUMBE 1
|
||||||
#define CLUSTER_RESERVE_SIZE 64
|
#define CLUSTER_RESERVE_SIZE 64
|
||||||
|
@ -177,7 +178,32 @@ static int32_t mndCreateDefaultCluster(SMnode *pMnode) {
|
||||||
sdbSetRawStatus(pRaw, SDB_STATUS_READY);
|
sdbSetRawStatus(pRaw, SDB_STATUS_READY);
|
||||||
|
|
||||||
mDebug("cluster:%" PRId64 ", will be created while deploy sdb, raw:%p", clusterObj.id, pRaw);
|
mDebug("cluster:%" PRId64 ", will be created while deploy sdb, raw:%p", clusterObj.id, pRaw);
|
||||||
|
#if 0
|
||||||
return sdbWrite(pMnode->pSdb, pRaw);
|
return sdbWrite(pMnode->pSdb, pRaw);
|
||||||
|
#else
|
||||||
|
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_TYPE_CREATE_CLUSTER, NULL);
|
||||||
|
if (pTrans == NULL) {
|
||||||
|
mError("cluster:%" PRId64 ", failed to create since %s", clusterObj.id, terrstr());
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
mDebug("trans:%d, used to create cluster:%" PRId64, pTrans->id, clusterObj.id);
|
||||||
|
|
||||||
|
if (mndTransAppendCommitlog(pTrans, pRaw) != 0) {
|
||||||
|
mError("trans:%d, failed to commit redo log since %s", pTrans->id, terrstr());
|
||||||
|
mndTransDrop(pTrans);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
sdbSetRawStatus(pRaw, SDB_STATUS_READY);
|
||||||
|
|
||||||
|
if (mndTransPrepare(pMnode, pTrans) != 0) {
|
||||||
|
mError("trans:%d, failed to prepare since %s", pTrans->id, terrstr());
|
||||||
|
mndTransDrop(pTrans);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
mndTransDrop(pTrans);
|
||||||
|
return 0;
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t mndRetrieveClusters(SRpcMsg *pMsg, SShowObj *pShow, SSDataBlock *pBlock, int32_t rows) {
|
static int32_t mndRetrieveClusters(SRpcMsg *pMsg, SShowObj *pShow, SSDataBlock *pBlock, int32_t rows) {
|
||||||
|
|
|
@ -58,14 +58,16 @@ static int32_t mndRetrieveDnodes(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB
|
||||||
static void mndCancelGetNextDnode(SMnode *pMnode, void *pIter);
|
static void mndCancelGetNextDnode(SMnode *pMnode, void *pIter);
|
||||||
|
|
||||||
int32_t mndInitDnode(SMnode *pMnode) {
|
int32_t mndInitDnode(SMnode *pMnode) {
|
||||||
SSdbTable table = {.sdbType = SDB_DNODE,
|
SSdbTable table = {
|
||||||
.keyType = SDB_KEY_INT32,
|
.sdbType = SDB_DNODE,
|
||||||
.deployFp = (SdbDeployFp)mndCreateDefaultDnode,
|
.keyType = SDB_KEY_INT32,
|
||||||
.encodeFp = (SdbEncodeFp)mndDnodeActionEncode,
|
.deployFp = (SdbDeployFp)mndCreateDefaultDnode,
|
||||||
.decodeFp = (SdbDecodeFp)mndDnodeActionDecode,
|
.encodeFp = (SdbEncodeFp)mndDnodeActionEncode,
|
||||||
.insertFp = (SdbInsertFp)mndDnodeActionInsert,
|
.decodeFp = (SdbDecodeFp)mndDnodeActionDecode,
|
||||||
.updateFp = (SdbUpdateFp)mndDnodeActionUpdate,
|
.insertFp = (SdbInsertFp)mndDnodeActionInsert,
|
||||||
.deleteFp = (SdbDeleteFp)mndDnodeActionDelete};
|
.updateFp = (SdbUpdateFp)mndDnodeActionUpdate,
|
||||||
|
.deleteFp = (SdbDeleteFp)mndDnodeActionDelete,
|
||||||
|
};
|
||||||
|
|
||||||
mndSetMsgHandle(pMnode, TDMT_MND_CREATE_DNODE, mndProcessCreateDnodeReq);
|
mndSetMsgHandle(pMnode, TDMT_MND_CREATE_DNODE, mndProcessCreateDnodeReq);
|
||||||
mndSetMsgHandle(pMnode, TDMT_MND_DROP_DNODE, mndProcessDropDnodeReq);
|
mndSetMsgHandle(pMnode, TDMT_MND_DROP_DNODE, mndProcessDropDnodeReq);
|
||||||
|
@ -90,13 +92,40 @@ static int32_t mndCreateDefaultDnode(SMnode *pMnode) {
|
||||||
dnodeObj.updateTime = dnodeObj.createdTime;
|
dnodeObj.updateTime = dnodeObj.createdTime;
|
||||||
dnodeObj.port = pMnode->replicas[0].port;
|
dnodeObj.port = pMnode->replicas[0].port;
|
||||||
memcpy(&dnodeObj.fqdn, pMnode->replicas[0].fqdn, TSDB_FQDN_LEN);
|
memcpy(&dnodeObj.fqdn, pMnode->replicas[0].fqdn, TSDB_FQDN_LEN);
|
||||||
|
snprintf(dnodeObj.ep, TSDB_EP_LEN, "%s:%u", dnodeObj.fqdn, dnodeObj.port);
|
||||||
|
|
||||||
SSdbRaw *pRaw = mndDnodeActionEncode(&dnodeObj);
|
SSdbRaw *pRaw = mndDnodeActionEncode(&dnodeObj);
|
||||||
if (pRaw == NULL) return -1;
|
if (pRaw == NULL) return -1;
|
||||||
if (sdbSetRawStatus(pRaw, SDB_STATUS_READY) != 0) return -1;
|
if (sdbSetRawStatus(pRaw, SDB_STATUS_READY) != 0) return -1;
|
||||||
|
|
||||||
mDebug("dnode:%d, will be created while deploy sdb, raw:%p", dnodeObj.id, pRaw);
|
mDebug("dnode:%d, will be created while deploy sdb, raw:%p", dnodeObj.id, pRaw);
|
||||||
|
|
||||||
|
#if 0
|
||||||
return sdbWrite(pMnode->pSdb, pRaw);
|
return sdbWrite(pMnode->pSdb, pRaw);
|
||||||
|
#else
|
||||||
|
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_TYPE_CREATE_DNODE, NULL);
|
||||||
|
if (pTrans == NULL) {
|
||||||
|
mError("dnode:%s, failed to create since %s", dnodeObj.ep, terrstr());
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
mDebug("trans:%d, used to create dnode:%s", pTrans->id, dnodeObj.ep);
|
||||||
|
|
||||||
|
if (mndTransAppendCommitlog(pTrans, pRaw) != 0) {
|
||||||
|
mError("trans:%d, failed to append commit log since %s", pTrans->id, terrstr());
|
||||||
|
mndTransDrop(pTrans);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
sdbSetRawStatus(pRaw, SDB_STATUS_READY);
|
||||||
|
|
||||||
|
if (mndTransPrepare(pMnode, pTrans) != 0) {
|
||||||
|
mError("trans:%d, failed to prepare since %s", pTrans->id, terrstr());
|
||||||
|
mndTransDrop(pTrans);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
mndTransDrop(pTrans);
|
||||||
|
return 0;
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
static SSdbRaw *mndDnodeActionEncode(SDnodeObj *pDnode) {
|
static SSdbRaw *mndDnodeActionEncode(SDnodeObj *pDnode) {
|
||||||
|
@ -350,6 +379,15 @@ static int32_t mndProcessStatusReq(SRpcMsg *pReq) {
|
||||||
mndReleaseVgroup(pMnode, pVgroup);
|
mndReleaseVgroup(pMnode, pVgroup);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
SMnodeObj *pObj = mndAcquireMnode(pMnode, pDnode->id);
|
||||||
|
if (pObj != NULL) {
|
||||||
|
if (pObj->state != statusReq.mload.syncState) {
|
||||||
|
pObj->state = statusReq.mload.syncState;
|
||||||
|
pObj->stateStartTime = taosGetTimestampMs();
|
||||||
|
}
|
||||||
|
mndReleaseMnode(pMnode, pObj);
|
||||||
|
}
|
||||||
|
|
||||||
int64_t curMs = taosGetTimestampMs();
|
int64_t curMs = taosGetTimestampMs();
|
||||||
bool online = mndIsDnodeOnline(pMnode, pDnode, curMs);
|
bool online = mndIsDnodeOnline(pMnode, pDnode, curMs);
|
||||||
bool dnodeChanged = (statusReq.dnodeVer != sdbGetTableVer(pMnode->pSdb, SDB_DNODE));
|
bool dnodeChanged = (statusReq.dnodeVer != sdbGetTableVer(pMnode->pSdb, SDB_DNODE));
|
||||||
|
@ -701,7 +739,7 @@ static int32_t mndRetrieveDnodes(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB
|
||||||
colDataAppend(pColInfo, numOfRows, (const char *)&pDnode->id, false);
|
colDataAppend(pColInfo, numOfRows, (const char *)&pDnode->id, false);
|
||||||
|
|
||||||
char buf[tListLen(pDnode->ep) + VARSTR_HEADER_SIZE] = {0};
|
char buf[tListLen(pDnode->ep) + VARSTR_HEADER_SIZE] = {0};
|
||||||
STR_WITH_MAXSIZE_TO_VARSTR(buf, pDnode->ep, pShow->pMeta->pSchemas[cols].bytes);
|
STR_WITH_MAXSIZE_TO_VARSTR(buf, pDnode->ep, pShow->pMeta->pSchemas[cols].bytes);
|
||||||
|
|
||||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||||
colDataAppend(pColInfo, numOfRows, buf, false);
|
colDataAppend(pColInfo, numOfRows, buf, false);
|
||||||
|
|
|
@ -31,6 +31,7 @@ static int32_t mndMnodeActionInsert(SSdb *pSdb, SMnodeObj *pObj);
|
||||||
static int32_t mndMnodeActionDelete(SSdb *pSdb, SMnodeObj *pObj);
|
static int32_t mndMnodeActionDelete(SSdb *pSdb, SMnodeObj *pObj);
|
||||||
static int32_t mndMnodeActionUpdate(SSdb *pSdb, SMnodeObj *pOld, SMnodeObj *pNew);
|
static int32_t mndMnodeActionUpdate(SSdb *pSdb, SMnodeObj *pOld, SMnodeObj *pNew);
|
||||||
static int32_t mndProcessCreateMnodeReq(SRpcMsg *pReq);
|
static int32_t mndProcessCreateMnodeReq(SRpcMsg *pReq);
|
||||||
|
static int32_t mndProcessAlterMnodeReq(SRpcMsg *pReq);
|
||||||
static int32_t mndProcessDropMnodeReq(SRpcMsg *pReq);
|
static int32_t mndProcessDropMnodeReq(SRpcMsg *pReq);
|
||||||
static int32_t mndProcessCreateMnodeRsp(SRpcMsg *pRsp);
|
static int32_t mndProcessCreateMnodeRsp(SRpcMsg *pRsp);
|
||||||
static int32_t mndProcessAlterMnodeRsp(SRpcMsg *pRsp);
|
static int32_t mndProcessAlterMnodeRsp(SRpcMsg *pRsp);
|
||||||
|
@ -51,6 +52,7 @@ int32_t mndInitMnode(SMnode *pMnode) {
|
||||||
};
|
};
|
||||||
|
|
||||||
mndSetMsgHandle(pMnode, TDMT_MND_CREATE_MNODE, mndProcessCreateMnodeReq);
|
mndSetMsgHandle(pMnode, TDMT_MND_CREATE_MNODE, mndProcessCreateMnodeReq);
|
||||||
|
mndSetMsgHandle(pMnode, TDMT_DND_ALTER_MNODE, mndProcessAlterMnodeReq);
|
||||||
mndSetMsgHandle(pMnode, TDMT_MND_DROP_MNODE, mndProcessDropMnodeReq);
|
mndSetMsgHandle(pMnode, TDMT_MND_DROP_MNODE, mndProcessDropMnodeReq);
|
||||||
mndSetMsgHandle(pMnode, TDMT_DND_CREATE_MNODE_RSP, mndProcessCreateMnodeRsp);
|
mndSetMsgHandle(pMnode, TDMT_DND_CREATE_MNODE_RSP, mndProcessCreateMnodeRsp);
|
||||||
mndSetMsgHandle(pMnode, TDMT_DND_ALTER_MNODE_RSP, mndProcessAlterMnodeRsp);
|
mndSetMsgHandle(pMnode, TDMT_DND_ALTER_MNODE_RSP, mndProcessAlterMnodeRsp);
|
||||||
|
@ -77,28 +79,6 @@ void mndReleaseMnode(SMnode *pMnode, SMnodeObj *pObj) {
|
||||||
sdbRelease(pMnode->pSdb, pObj);
|
sdbRelease(pMnode->pSdb, pObj);
|
||||||
}
|
}
|
||||||
|
|
||||||
void mndUpdateMnodeRole(SMnode *pMnode) {
|
|
||||||
SSdb *pSdb = pMnode->pSdb;
|
|
||||||
void *pIter = NULL;
|
|
||||||
while (1) {
|
|
||||||
SMnodeObj *pObj = NULL;
|
|
||||||
pIter = sdbFetch(pSdb, SDB_MNODE, pIter, (void **)&pObj);
|
|
||||||
if (pIter == NULL) break;
|
|
||||||
|
|
||||||
ESyncState lastRole = pObj->role;
|
|
||||||
if (pObj->id == 1) {
|
|
||||||
pObj->role = TAOS_SYNC_STATE_LEADER;
|
|
||||||
} else {
|
|
||||||
pObj->role = TAOS_SYNC_STATE_CANDIDATE;
|
|
||||||
}
|
|
||||||
if (pObj->role != lastRole) {
|
|
||||||
pObj->roleTime = taosGetTimestampMs();
|
|
||||||
}
|
|
||||||
|
|
||||||
sdbRelease(pSdb, pObj);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static int32_t mndCreateDefaultMnode(SMnode *pMnode) {
|
static int32_t mndCreateDefaultMnode(SMnode *pMnode) {
|
||||||
SMnodeObj mnodeObj = {0};
|
SMnodeObj mnodeObj = {0};
|
||||||
mnodeObj.id = 1;
|
mnodeObj.id = 1;
|
||||||
|
@ -110,7 +90,33 @@ static int32_t mndCreateDefaultMnode(SMnode *pMnode) {
|
||||||
sdbSetRawStatus(pRaw, SDB_STATUS_READY);
|
sdbSetRawStatus(pRaw, SDB_STATUS_READY);
|
||||||
|
|
||||||
mDebug("mnode:%d, will be created while deploy sdb, raw:%p", mnodeObj.id, pRaw);
|
mDebug("mnode:%d, will be created while deploy sdb, raw:%p", mnodeObj.id, pRaw);
|
||||||
|
|
||||||
|
#if 0
|
||||||
return sdbWrite(pMnode->pSdb, pRaw);
|
return sdbWrite(pMnode->pSdb, pRaw);
|
||||||
|
#else
|
||||||
|
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_TYPE_CREATE_DNODE, NULL);
|
||||||
|
if (pTrans == NULL) {
|
||||||
|
mError("mnode:%d, failed to create since %s", mnodeObj.id, terrstr());
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
mDebug("trans:%d, used to create mnode:%d", pTrans->id, mnodeObj.id);
|
||||||
|
|
||||||
|
if (mndTransAppendCommitlog(pTrans, pRaw) != 0) {
|
||||||
|
mError("trans:%d, failed to append commit log since %s", pTrans->id, terrstr());
|
||||||
|
mndTransDrop(pTrans);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
sdbSetRawStatus(pRaw, SDB_STATUS_READY);
|
||||||
|
|
||||||
|
if (mndTransPrepare(pMnode, pTrans) != 0) {
|
||||||
|
mError("trans:%d, failed to prepare since %s", pTrans->id, terrstr());
|
||||||
|
mndTransDrop(pTrans);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
mndTransDrop(pTrans);
|
||||||
|
return 0;
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
static SSdbRaw *mndMnodeActionEncode(SMnodeObj *pObj) {
|
static SSdbRaw *mndMnodeActionEncode(SMnodeObj *pObj) {
|
||||||
|
@ -183,7 +189,7 @@ static int32_t mndMnodeActionInsert(SSdb *pSdb, SMnodeObj *pObj) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
pObj->role = TAOS_SYNC_STATE_FOLLOWER;
|
pObj->state = TAOS_SYNC_STATE_ERROR;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -227,7 +233,7 @@ void mndGetMnodeEpSet(SMnode *pMnode, SEpSet *pEpSet) {
|
||||||
if (pObj->pDnode == NULL) {
|
if (pObj->pDnode == NULL) {
|
||||||
mError("mnode:%d, no corresponding dnode exists", pObj->id);
|
mError("mnode:%d, no corresponding dnode exists", pObj->id);
|
||||||
} else {
|
} else {
|
||||||
if (pObj->role == TAOS_SYNC_STATE_LEADER) {
|
if (pObj->state == TAOS_SYNC_STATE_LEADER) {
|
||||||
pEpSet->inUse = pEpSet->numOfEps;
|
pEpSet->inUse = pEpSet->numOfEps;
|
||||||
}
|
}
|
||||||
addEpIntoEpSet(pEpSet, pObj->pDnode->fqdn, pObj->pDnode->port);
|
addEpIntoEpSet(pEpSet, pObj->pDnode->fqdn, pObj->pDnode->port);
|
||||||
|
@ -555,7 +561,7 @@ static int32_t mndProcessDropMnodeReq(SRpcMsg *pReq) {
|
||||||
goto _OVER;
|
goto _OVER;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pMnode->selfId == dropReq.dnodeId) {
|
if (pMnode->selfDnodeId == dropReq.dnodeId) {
|
||||||
terrno = TSDB_CODE_MND_CANT_DROP_MASTER;
|
terrno = TSDB_CODE_MND_CANT_DROP_MASTER;
|
||||||
goto _OVER;
|
goto _OVER;
|
||||||
}
|
}
|
||||||
|
@ -626,16 +632,18 @@ static int32_t mndRetrieveMnodes(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB
|
||||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||||
colDataAppend(pColInfo, numOfRows, b1, false);
|
colDataAppend(pColInfo, numOfRows, b1, false);
|
||||||
|
|
||||||
const char *roles = syncStr(pObj->role);
|
const char *roles = NULL;
|
||||||
char *b2 = taosMemoryCalloc(1, 12 + VARSTR_HEADER_SIZE);
|
if (pObj->id == pMnode->selfDnodeId) {
|
||||||
|
roles = syncStr(TAOS_SYNC_STATE_LEADER);
|
||||||
|
} else {
|
||||||
|
roles = syncStr(pObj->state);
|
||||||
|
}
|
||||||
|
char *b2 = taosMemoryCalloc(1, 12 + VARSTR_HEADER_SIZE);
|
||||||
STR_WITH_MAXSIZE_TO_VARSTR(b2, roles, pShow->pMeta->pSchemas[cols].bytes);
|
STR_WITH_MAXSIZE_TO_VARSTR(b2, roles, pShow->pMeta->pSchemas[cols].bytes);
|
||||||
|
|
||||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||||
colDataAppend(pColInfo, numOfRows, (const char *)b2, false);
|
colDataAppend(pColInfo, numOfRows, (const char *)b2, false);
|
||||||
|
|
||||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
|
||||||
colDataAppend(pColInfo, numOfRows, (const char *)&pObj->roleTime, false);
|
|
||||||
|
|
||||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||||
colDataAppend(pColInfo, numOfRows, (const char *)&pObj->createdTime, false);
|
colDataAppend(pColInfo, numOfRows, (const char *)&pObj->createdTime, false);
|
||||||
|
|
||||||
|
@ -652,3 +660,52 @@ static void mndCancelGetNextMnode(SMnode *pMnode, void *pIter) {
|
||||||
SSdb *pSdb = pMnode->pSdb;
|
SSdb *pSdb = pMnode->pSdb;
|
||||||
sdbCancelFetch(pSdb, pIter);
|
sdbCancelFetch(pSdb, pIter);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int32_t mndProcessAlterMnodeReq(SRpcMsg *pReq) {
|
||||||
|
SMnode *pMnode = pReq->info.node;
|
||||||
|
SDAlterMnodeReq alterReq = {0};
|
||||||
|
|
||||||
|
if (tDeserializeSDCreateMnodeReq(pReq->pCont, pReq->contLen, &alterReq) != 0) {
|
||||||
|
terrno = TSDB_CODE_INVALID_MSG;
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (alterReq.dnodeId != pMnode->selfDnodeId) {
|
||||||
|
terrno = TSDB_CODE_INVALID_OPTION;
|
||||||
|
mError("failed to alter mnode since %s, input:%d cur:%d", terrstr(), alterReq.dnodeId, pMnode->selfDnodeId);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
SSyncCfg cfg = {.replicaNum = alterReq.replica, .myIndex = -1};
|
||||||
|
for (int32_t i = 0; i < alterReq.replica; ++i) {
|
||||||
|
SNodeInfo *pNode = &cfg.nodeInfo[i];
|
||||||
|
tstrncpy(pNode->nodeFqdn, alterReq.replicas[i].fqdn, sizeof(pNode->nodeFqdn));
|
||||||
|
pNode->nodePort = alterReq.replicas[i].port;
|
||||||
|
if (alterReq.replicas[i].id == pMnode->selfDnodeId) cfg.myIndex = i;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (cfg.myIndex == -1) {
|
||||||
|
mError("failed to alter mnode since myindex is -1");
|
||||||
|
return -1;
|
||||||
|
} else {
|
||||||
|
mInfo("start to alter mnode sync, replica:%d myindex:%d", cfg.replicaNum, cfg.myIndex);
|
||||||
|
for (int32_t i = 0; i < alterReq.replica; ++i) {
|
||||||
|
SNodeInfo *pNode = &cfg.nodeInfo[i];
|
||||||
|
mInfo("index:%d, fqdn:%s port:%d", i, pNode->nodeFqdn, pNode->nodePort);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
SSyncMgmt *pMgmt = &pMnode->syncMgmt;
|
||||||
|
pMgmt->standby = 0;
|
||||||
|
int32_t code = syncReconfig(pMgmt->sync, &cfg);
|
||||||
|
if (code != 0) {
|
||||||
|
mError("failed to alter mnode sync since %s", terrstr());
|
||||||
|
return code;
|
||||||
|
} else {
|
||||||
|
pMgmt->errCode = 0;
|
||||||
|
tsem_wait(&pMgmt->syncSem);
|
||||||
|
mInfo("alter mnode sync result:%s", tstrerror(pMgmt->errCode));
|
||||||
|
terrno = pMgmt->errCode;
|
||||||
|
return pMgmt->errCode;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -49,29 +49,38 @@ int32_t mndSyncGetSnapshot(struct SSyncFSM *pFsm, SSnapshot *pSnapshot) {
|
||||||
|
|
||||||
void mndRestoreFinish(struct SSyncFSM *pFsm) {
|
void mndRestoreFinish(struct SSyncFSM *pFsm) {
|
||||||
SMnode *pMnode = pFsm->data;
|
SMnode *pMnode = pFsm->data;
|
||||||
mndTransPullup(pMnode);
|
if (!pMnode->deploy) {
|
||||||
pMnode->syncMgmt.restored = true;
|
mndTransPullup(pMnode);
|
||||||
}
|
pMnode->syncMgmt.restored = true;
|
||||||
|
|
||||||
void *mndSnapshotRead(struct SSyncFSM *pFsm, const SSnapshot *snapshot, void *iter, char **ppBuf, int32_t *len) {
|
|
||||||
SMnode *pMnode = pFsm->data;
|
|
||||||
SSdbIter *pIter = iter;
|
|
||||||
|
|
||||||
if (iter == NULL) {
|
|
||||||
pIter = sdbIterInit(pMnode->pSdb);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return sdbIterRead(pMnode->pSdb, pIter, ppBuf, len);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t mndSnapshotApply(struct SSyncFSM* pFsm, const SSnapshot* snapshot, char* pBuf, int32_t len) {
|
int32_t mndSnapshotRead(struct SSyncFSM* pFsm, const SSnapshot* pSnapshot, void** ppIter, char** ppBuf, int32_t* len) {
|
||||||
|
/*
|
||||||
|
SMnode *pMnode = pFsm->data;
|
||||||
|
SSdbIter *pIter;
|
||||||
|
if (iter == NULL) {
|
||||||
|
pIter = sdbIterInit(pMnode->sdb)
|
||||||
|
} else {
|
||||||
|
pIter = iter;
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t mndSnapshotApply(struct SSyncFSM* pFsm, const SSnapshot* pSnapshot, char* pBuf, int32_t len) {
|
||||||
SMnode *pMnode = pFsm->data;
|
SMnode *pMnode = pFsm->data;
|
||||||
sdbWrite(pMnode->pSdb, (SSdbRaw*)pBuf);
|
sdbWrite(pMnode->pSdb, (SSdbRaw*)pBuf);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void mndReConfig(struct SSyncFSM* pFsm, SSyncCfg newCfg, SReConfigCbMeta cbMeta) {
|
|
||||||
|
|
||||||
|
void mndReConfig(struct SSyncFSM *pFsm, SSyncCfg newCfg, SReConfigCbMeta cbMeta) {
|
||||||
|
mInfo("mndReConfig cbMeta.code:%d, cbMeta.currentTerm:%" PRId64 ", cbMeta.term:%" PRId64 ", cbMeta.index:%" PRId64,
|
||||||
|
cbMeta.code, cbMeta.currentTerm, cbMeta.term, cbMeta.index);
|
||||||
|
SMnode *pMnode = pFsm->data;
|
||||||
|
pMnode->syncMgmt.errCode = cbMeta.code;
|
||||||
|
tsem_post(&pMnode->syncMgmt.syncSem);
|
||||||
}
|
}
|
||||||
|
|
||||||
SSyncFSM *mndSyncMakeFsm(SMnode *pMnode) {
|
SSyncFSM *mndSyncMakeFsm(SMnode *pMnode) {
|
||||||
|
@ -194,22 +203,6 @@ void mndSyncStop(SMnode *pMnode) {}
|
||||||
|
|
||||||
bool mndIsMaster(SMnode *pMnode) {
|
bool mndIsMaster(SMnode *pMnode) {
|
||||||
SSyncMgmt *pMgmt = &pMnode->syncMgmt;
|
SSyncMgmt *pMgmt = &pMnode->syncMgmt;
|
||||||
pMgmt->state = syncGetMyRole(pMgmt->sync);
|
ESyncState state = syncGetMyRole(pMgmt->sync);
|
||||||
|
return (state == TAOS_SYNC_STATE_LEADER) && (pMnode->syncMgmt.restored);
|
||||||
return (pMgmt->state == TAOS_SYNC_STATE_LEADER) && (pMnode->syncMgmt.restored);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t mndAlter(SMnode *pMnode, const SMnodeOpt *pOption) {
|
|
||||||
SSyncCfg cfg = {.replicaNum = pOption->replica, .myIndex = pOption->selfIndex};
|
|
||||||
mInfo("start to alter mnode sync, replica:%d myindex:%d standby:%d", cfg.replicaNum, cfg.myIndex, pOption->standby);
|
|
||||||
for (int32_t i = 0; i < pOption->replica; ++i) {
|
|
||||||
SNodeInfo *pNode = &cfg.nodeInfo[i];
|
|
||||||
tstrncpy(pNode->nodeFqdn, pOption->replicas[i].fqdn, sizeof(pNode->nodeFqdn));
|
|
||||||
pNode->nodePort = pOption->replicas[i].port;
|
|
||||||
mInfo("index:%d, fqdn:%s port:%d", i, pNode->nodeFqdn, pNode->nodePort);
|
|
||||||
}
|
|
||||||
|
|
||||||
SSyncMgmt *pMgmt = &pMnode->syncMgmt;
|
|
||||||
pMgmt->standby = pOption->standby;
|
|
||||||
return syncReconfig(pMgmt->sync, &cfg);
|
|
||||||
}
|
|
|
@ -563,7 +563,7 @@ STrans *mndTransCreate(SMnode *pMnode, ETrnPolicy policy, ETrnType type, const S
|
||||||
pTrans->policy = policy;
|
pTrans->policy = policy;
|
||||||
pTrans->type = type;
|
pTrans->type = type;
|
||||||
pTrans->createdTime = taosGetTimestampMs();
|
pTrans->createdTime = taosGetTimestampMs();
|
||||||
pTrans->rpcInfo = pReq->info;
|
if (pReq != NULL) pTrans->rpcInfo = pReq->info;
|
||||||
pTrans->redoLogs = taosArrayInit(TRANS_ARRAY_SIZE, sizeof(void *));
|
pTrans->redoLogs = taosArrayInit(TRANS_ARRAY_SIZE, sizeof(void *));
|
||||||
pTrans->undoLogs = taosArrayInit(TRANS_ARRAY_SIZE, sizeof(void *));
|
pTrans->undoLogs = taosArrayInit(TRANS_ARRAY_SIZE, sizeof(void *));
|
||||||
pTrans->commitLogs = taosArrayInit(TRANS_ARRAY_SIZE, sizeof(void *));
|
pTrans->commitLogs = taosArrayInit(TRANS_ARRAY_SIZE, sizeof(void *));
|
||||||
|
@ -1080,7 +1080,7 @@ static bool mndTransPerformRedoLogStage(SMnode *pMnode, STrans *pTrans) {
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool mndTransPerformRedoActionStage(SMnode *pMnode, STrans *pTrans) {
|
static bool mndTransPerformRedoActionStage(SMnode *pMnode, STrans *pTrans) {
|
||||||
if (!mndIsMaster(pMnode)) return false;
|
if (!pMnode->deploy && !mndIsMaster(pMnode)) return false;
|
||||||
|
|
||||||
bool continueExec = true;
|
bool continueExec = true;
|
||||||
int32_t code = mndTransExecuteRedoActions(pMnode, pTrans);
|
int32_t code = mndTransExecuteRedoActions(pMnode, pTrans);
|
||||||
|
@ -1171,7 +1171,7 @@ static bool mndTransPerformUndoLogStage(SMnode *pMnode, STrans *pTrans) {
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool mndTransPerformUndoActionStage(SMnode *pMnode, STrans *pTrans) {
|
static bool mndTransPerformUndoActionStage(SMnode *pMnode, STrans *pTrans) {
|
||||||
if (!mndIsMaster(pMnode)) return false;
|
if (!pMnode->deploy && !mndIsMaster(pMnode)) return false;
|
||||||
|
|
||||||
bool continueExec = true;
|
bool continueExec = true;
|
||||||
int32_t code = mndTransExecuteUndoActions(pMnode, pTrans);
|
int32_t code = mndTransExecuteUndoActions(pMnode, pTrans);
|
||||||
|
|
|
@ -78,7 +78,33 @@ static int32_t mndCreateDefaultUser(SMnode *pMnode, char *acct, char *user, char
|
||||||
sdbSetRawStatus(pRaw, SDB_STATUS_READY);
|
sdbSetRawStatus(pRaw, SDB_STATUS_READY);
|
||||||
|
|
||||||
mDebug("user:%s, will be created while deploy sdb, raw:%p", userObj.user, pRaw);
|
mDebug("user:%s, will be created while deploy sdb, raw:%p", userObj.user, pRaw);
|
||||||
|
|
||||||
|
#if 0
|
||||||
return sdbWrite(pMnode->pSdb, pRaw);
|
return sdbWrite(pMnode->pSdb, pRaw);
|
||||||
|
#else
|
||||||
|
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_TYPE_CREATE_USER, NULL);
|
||||||
|
if (pTrans == NULL) {
|
||||||
|
mError("user:%s, failed to create since %s", userObj.user, terrstr());
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
mDebug("trans:%d, used to create user:%s", pTrans->id, userObj.user);
|
||||||
|
|
||||||
|
if (mndTransAppendCommitlog(pTrans, pRaw) != 0) {
|
||||||
|
mError("trans:%d, failed to commit redo log since %s", pTrans->id, terrstr());
|
||||||
|
mndTransDrop(pTrans);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
sdbSetRawStatus(pRaw, SDB_STATUS_READY);
|
||||||
|
|
||||||
|
if (mndTransPrepare(pMnode, pTrans) != 0) {
|
||||||
|
mError("trans:%d, failed to prepare since %s", pTrans->id, terrstr());
|
||||||
|
mndTransDrop(pTrans);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
mndTransDrop(pTrans);
|
||||||
|
return 0;
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t mndCreateDefaultUsers(SMnode *pMnode) {
|
static int32_t mndCreateDefaultUsers(SMnode *pMnode) {
|
||||||
|
|
|
@ -153,8 +153,14 @@ static int32_t mndInitSdb(SMnode *pMnode) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t mndDeploySdb(SMnode *pMnode) { return sdbDeploy(pMnode->pSdb); }
|
static int32_t mndOpenSdb(SMnode *pMnode) {
|
||||||
static int32_t mndReadSdb(SMnode *pMnode) { return sdbReadFile(pMnode->pSdb); }
|
if (!pMnode->deploy) {
|
||||||
|
return sdbReadFile(pMnode->pSdb);
|
||||||
|
} else {
|
||||||
|
// return sdbDeploy(pMnode->pSdb);;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void mndCleanupSdb(SMnode *pMnode) {
|
static void mndCleanupSdb(SMnode *pMnode) {
|
||||||
if (pMnode->pSdb) {
|
if (pMnode->pSdb) {
|
||||||
|
@ -176,7 +182,7 @@ static int32_t mndAllocStep(SMnode *pMnode, char *name, MndInitFp initFp, MndCle
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t mndInitSteps(SMnode *pMnode, bool deploy) {
|
static int32_t mndInitSteps(SMnode *pMnode) {
|
||||||
if (mndAllocStep(pMnode, "mnode-sdb", mndInitSdb, mndCleanupSdb) != 0) return -1;
|
if (mndAllocStep(pMnode, "mnode-sdb", mndInitSdb, mndCleanupSdb) != 0) return -1;
|
||||||
if (mndAllocStep(pMnode, "mnode-trans", mndInitTrans, mndCleanupTrans) != 0) return -1;
|
if (mndAllocStep(pMnode, "mnode-trans", mndInitTrans, mndCleanupTrans) != 0) return -1;
|
||||||
if (mndAllocStep(pMnode, "mnode-cluster", mndInitCluster, mndCleanupCluster) != 0) return -1;
|
if (mndAllocStep(pMnode, "mnode-cluster", mndInitCluster, mndCleanupCluster) != 0) return -1;
|
||||||
|
@ -201,11 +207,7 @@ static int32_t mndInitSteps(SMnode *pMnode, bool deploy) {
|
||||||
if (mndAllocStep(pMnode, "mnode-perfs", mndInitPerfs, mndCleanupPerfs) != 0) return -1;
|
if (mndAllocStep(pMnode, "mnode-perfs", mndInitPerfs, mndCleanupPerfs) != 0) return -1;
|
||||||
if (mndAllocStep(pMnode, "mnode-db", mndInitDb, mndCleanupDb) != 0) return -1;
|
if (mndAllocStep(pMnode, "mnode-db", mndInitDb, mndCleanupDb) != 0) return -1;
|
||||||
if (mndAllocStep(pMnode, "mnode-func", mndInitFunc, mndCleanupFunc) != 0) return -1;
|
if (mndAllocStep(pMnode, "mnode-func", mndInitFunc, mndCleanupFunc) != 0) return -1;
|
||||||
if (deploy) {
|
if (mndAllocStep(pMnode, "mnode-sdb", mndOpenSdb, NULL) != 0) return -1;
|
||||||
if (mndAllocStep(pMnode, "mnode-sdb-deploy", mndDeploySdb, NULL) != 0) return -1;
|
|
||||||
} else {
|
|
||||||
if (mndAllocStep(pMnode, "mnode-sdb-read", mndReadSdb, NULL) != 0) return -1;
|
|
||||||
}
|
|
||||||
if (mndAllocStep(pMnode, "mnode-profile", mndInitProfile, mndCleanupProfile) != 0) return -1;
|
if (mndAllocStep(pMnode, "mnode-profile", mndInitProfile, mndCleanupProfile) != 0) return -1;
|
||||||
if (mndAllocStep(pMnode, "mnode-show", mndInitShow, mndCleanupShow) != 0) return -1;
|
if (mndAllocStep(pMnode, "mnode-show", mndInitShow, mndCleanupShow) != 0) return -1;
|
||||||
if (mndAllocStep(pMnode, "mnode-query", mndInitQuery, mndCleanupQuery) != 0) return -1;
|
if (mndAllocStep(pMnode, "mnode-query", mndInitQuery, mndCleanupQuery) != 0) return -1;
|
||||||
|
@ -262,7 +264,7 @@ static void mndSetOptions(SMnode *pMnode, const SMnodeOpt *pOption) {
|
||||||
pMnode->selfIndex = pOption->selfIndex;
|
pMnode->selfIndex = pOption->selfIndex;
|
||||||
memcpy(&pMnode->replicas, pOption->replicas, sizeof(SReplica) * TSDB_MAX_REPLICA);
|
memcpy(&pMnode->replicas, pOption->replicas, sizeof(SReplica) * TSDB_MAX_REPLICA);
|
||||||
pMnode->msgCb = pOption->msgCb;
|
pMnode->msgCb = pOption->msgCb;
|
||||||
pMnode->selfId = pOption->replicas[pOption->selfIndex].id;
|
pMnode->selfDnodeId = pOption->dnodeId;
|
||||||
pMnode->syncMgmt.standby = pOption->standby;
|
pMnode->syncMgmt.standby = pOption->standby;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -280,6 +282,7 @@ SMnode *mndOpen(const char *path, const SMnodeOpt *pOption) {
|
||||||
(void)taosParseTime(timestr, &pMnode->checkTime, (int32_t)strlen(timestr), TSDB_TIME_PRECISION_MILLI, 0);
|
(void)taosParseTime(timestr, &pMnode->checkTime, (int32_t)strlen(timestr), TSDB_TIME_PRECISION_MILLI, 0);
|
||||||
mndSetOptions(pMnode, pOption);
|
mndSetOptions(pMnode, pOption);
|
||||||
|
|
||||||
|
pMnode->deploy = pOption->deploy;
|
||||||
pMnode->pSteps = taosArrayInit(24, sizeof(SMnodeStep));
|
pMnode->pSteps = taosArrayInit(24, sizeof(SMnodeStep));
|
||||||
if (pMnode->pSteps == NULL) {
|
if (pMnode->pSteps == NULL) {
|
||||||
taosMemoryFree(pMnode);
|
taosMemoryFree(pMnode);
|
||||||
|
@ -297,7 +300,7 @@ SMnode *mndOpen(const char *path, const SMnodeOpt *pOption) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
code = mndInitSteps(pMnode, pOption->deploy);
|
code = mndInitSteps(pMnode);
|
||||||
if (code != 0) {
|
if (code != 0) {
|
||||||
code = terrno;
|
code = terrno;
|
||||||
mError("failed to open mnode since %s", terrstr());
|
mError("failed to open mnode since %s", terrstr());
|
||||||
|
@ -315,7 +318,6 @@ SMnode *mndOpen(const char *path, const SMnodeOpt *pOption) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
mndUpdateMnodeRole(pMnode);
|
|
||||||
mDebug("mnode open successfully ");
|
mDebug("mnode open successfully ");
|
||||||
return pMnode;
|
return pMnode;
|
||||||
}
|
}
|
||||||
|
@ -332,6 +334,10 @@ void mndClose(SMnode *pMnode) {
|
||||||
|
|
||||||
int32_t mndStart(SMnode *pMnode) {
|
int32_t mndStart(SMnode *pMnode) {
|
||||||
mndSyncStart(pMnode);
|
mndSyncStart(pMnode);
|
||||||
|
if (pMnode->deploy) {
|
||||||
|
if (sdbDeploy(pMnode->pSdb) != 0) return -1;
|
||||||
|
pMnode->syncMgmt.restored = true;
|
||||||
|
}
|
||||||
return mndInitTimer(pMnode);
|
return mndInitTimer(pMnode);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -408,8 +414,7 @@ int32_t mndProcessMsg(SRpcMsg *pMsg) {
|
||||||
mTrace("msg:%p, will be processed, type:%s app:%p", pMsg, TMSG_INFO(pMsg->msgType), ahandle);
|
mTrace("msg:%p, will be processed, type:%s app:%p", pMsg, TMSG_INFO(pMsg->msgType), ahandle);
|
||||||
|
|
||||||
if (IsReq(pMsg)) {
|
if (IsReq(pMsg)) {
|
||||||
if (!mndIsMaster(pMnode) && pMsg->msgType != TDMT_MND_TRANS_TIMER && pMsg->msgType != TDMT_MND_MQ_TIMER &&
|
if (!mndIsMaster(pMnode)) {
|
||||||
pMsg->msgType != TDMT_MND_TELEM_TIMER) {
|
|
||||||
terrno = TSDB_CODE_APP_NOT_READY;
|
terrno = TSDB_CODE_APP_NOT_READY;
|
||||||
mDebug("msg:%p, failed to process since %s, app:%p", pMsg, terrstr(), ahandle);
|
mDebug("msg:%p, failed to process since %s, app:%p", pMsg, terrstr(), ahandle);
|
||||||
return -1;
|
return -1;
|
||||||
|
@ -513,15 +518,17 @@ int32_t mndGetMonitorInfo(SMnode *pMnode, SMonClusterInfo *pClusterInfo, SMonVgr
|
||||||
SMonMnodeDesc desc = {0};
|
SMonMnodeDesc desc = {0};
|
||||||
desc.mnode_id = pObj->id;
|
desc.mnode_id = pObj->id;
|
||||||
tstrncpy(desc.mnode_ep, pObj->pDnode->ep, sizeof(desc.mnode_ep));
|
tstrncpy(desc.mnode_ep, pObj->pDnode->ep, sizeof(desc.mnode_ep));
|
||||||
tstrncpy(desc.role, syncStr(pObj->role), sizeof(desc.role));
|
|
||||||
taosArrayPush(pClusterInfo->mnodes, &desc);
|
|
||||||
sdbRelease(pSdb, pObj);
|
|
||||||
|
|
||||||
if (pObj->role == TAOS_SYNC_STATE_LEADER) {
|
if (pObj->id == pMnode->selfDnodeId) {
|
||||||
pClusterInfo->first_ep_dnode_id = pObj->id;
|
pClusterInfo->first_ep_dnode_id = pObj->id;
|
||||||
tstrncpy(pClusterInfo->first_ep, pObj->pDnode->ep, sizeof(pClusterInfo->first_ep));
|
tstrncpy(pClusterInfo->first_ep, pObj->pDnode->ep, sizeof(pClusterInfo->first_ep));
|
||||||
pClusterInfo->master_uptime = (ms - pObj->roleTime) / (86400000.0f);
|
pClusterInfo->master_uptime = (ms - pObj->stateStartTime) / (86400000.0f);
|
||||||
|
tstrncpy(desc.role, syncStr(TAOS_SYNC_STATE_LEADER), sizeof(desc.role));
|
||||||
|
} else {
|
||||||
|
tstrncpy(desc.role, syncStr(pObj->state), sizeof(desc.role));
|
||||||
}
|
}
|
||||||
|
taosArrayPush(pClusterInfo->mnodes, &desc);
|
||||||
|
sdbRelease(pSdb, pObj);
|
||||||
}
|
}
|
||||||
|
|
||||||
// vgroup info
|
// vgroup info
|
||||||
|
@ -574,6 +581,6 @@ int32_t mndGetMonitorInfo(SMnode *pMnode, SMonClusterInfo *pClusterInfo, SMonVgr
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t mndGetLoad(SMnode *pMnode, SMnodeLoad *pLoad) {
|
int32_t mndGetLoad(SMnode *pMnode, SMnodeLoad *pLoad) {
|
||||||
pLoad->syncState = pMnode->syncMgmt.state;
|
pLoad->syncState = syncGetMyRole(pMnode->syncMgmt.sync);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,8 +2,7 @@ aux_source_directory(src MNODE_SRC)
|
||||||
add_library(sdb STATIC ${MNODE_SRC})
|
add_library(sdb STATIC ${MNODE_SRC})
|
||||||
target_include_directories(
|
target_include_directories(
|
||||||
sdb
|
sdb
|
||||||
PUBLIC "${TD_SOURCE_DIR}/include/dnode/mnode/sdb"
|
PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/inc"
|
||||||
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
|
|
||||||
)
|
)
|
||||||
target_link_libraries(
|
target_link_libraries(
|
||||||
sdb os common util wal
|
sdb os common util wal
|
||||||
|
|
|
@ -27,6 +27,15 @@
|
||||||
extern "C" {
|
extern "C" {
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
// clang-format off
|
||||||
|
#define mFatal(...) { if (mDebugFlag & DEBUG_FATAL) { taosPrintLog("MND FATAL ", DEBUG_FATAL, 255, __VA_ARGS__); }}
|
||||||
|
#define mError(...) { if (mDebugFlag & DEBUG_ERROR) { taosPrintLog("MND ERROR ", DEBUG_ERROR, 255, __VA_ARGS__); }}
|
||||||
|
#define mWarn(...) { if (mDebugFlag & DEBUG_WARN) { taosPrintLog("MND WARN ", DEBUG_WARN, 255, __VA_ARGS__); }}
|
||||||
|
#define mInfo(...) { if (mDebugFlag & DEBUG_INFO) { taosPrintLog("MND ", DEBUG_INFO, 255, __VA_ARGS__); }}
|
||||||
|
#define mDebug(...) { if (mDebugFlag & DEBUG_DEBUG) { taosPrintLog("MND ", DEBUG_DEBUG, mDebugFlag, __VA_ARGS__); }}
|
||||||
|
#define mTrace(...) { if (mDebugFlag & DEBUG_TRACE) { taosPrintLog("MND ", DEBUG_TRACE, mDebugFlag, __VA_ARGS__); }}
|
||||||
|
// clang-format on
|
||||||
|
|
||||||
#define SDB_GET_VAL(pData, dataPos, val, pos, func, type) \
|
#define SDB_GET_VAL(pData, dataPos, val, pos, func, type) \
|
||||||
{ \
|
{ \
|
||||||
if (func(pRaw, dataPos, val) != 0) { \
|
if (func(pRaw, dataPos, val) != 0) { \
|
||||||
|
@ -65,7 +74,7 @@ extern "C" {
|
||||||
#define SDB_SET_INT64(pRaw, dataPos, val, pos) SDB_SET_VAL(pRaw, dataPos, val, pos, sdbSetRawInt64, int64_t)
|
#define SDB_SET_INT64(pRaw, dataPos, val, pos) SDB_SET_VAL(pRaw, dataPos, val, pos, sdbSetRawInt64, int64_t)
|
||||||
#define SDB_SET_INT32(pRaw, dataPos, val, pos) SDB_SET_VAL(pRaw, dataPos, val, pos, sdbSetRawInt32, int32_t)
|
#define SDB_SET_INT32(pRaw, dataPos, val, pos) SDB_SET_VAL(pRaw, dataPos, val, pos, sdbSetRawInt32, int32_t)
|
||||||
#define SDB_SET_INT16(pRaw, dataPos, val, pos) SDB_SET_VAL(pRaw, dataPos, val, pos, sdbSetRawInt16, int16_t)
|
#define SDB_SET_INT16(pRaw, dataPos, val, pos) SDB_SET_VAL(pRaw, dataPos, val, pos, sdbSetRawInt16, int16_t)
|
||||||
#define SDB_SET_INT8(pRaw, dataPos, val, pos) SDB_SET_VAL(pRaw, dataPos, val, pos, sdbSetRawInt8, int8_t)
|
#define SDB_SET_INT8(pRaw, dataPos, val, pos) SDB_SET_VAL(pRaw, dataPos, val, pos, sdbSetRawInt8, int8_t)
|
||||||
|
|
||||||
#define SDB_SET_BINARY(pRaw, dataPos, val, valLen, pos) \
|
#define SDB_SET_BINARY(pRaw, dataPos, val, valLen, pos) \
|
||||||
{ \
|
{ \
|
||||||
|
@ -89,8 +98,16 @@ extern "C" {
|
||||||
}
|
}
|
||||||
|
|
||||||
typedef struct SMnode SMnode;
|
typedef struct SMnode SMnode;
|
||||||
|
typedef struct SSdb SSdb;
|
||||||
typedef struct SSdbRaw SSdbRaw;
|
typedef struct SSdbRaw SSdbRaw;
|
||||||
typedef struct SSdbRow SSdbRow;
|
typedef struct SSdbRow SSdbRow;
|
||||||
|
typedef int32_t (*SdbInsertFp)(SSdb *pSdb, void *pObj);
|
||||||
|
typedef int32_t (*SdbUpdateFp)(SSdb *pSdb, void *pSrcObj, void *pDstObj);
|
||||||
|
typedef int32_t (*SdbDeleteFp)(SSdb *pSdb, void *pObj, bool callFunc);
|
||||||
|
typedef int32_t (*SdbDeployFp)(SMnode *pMnode);
|
||||||
|
typedef SSdbRow *(*SdbDecodeFp)(SSdbRaw *pRaw);
|
||||||
|
typedef SSdbRaw *(*SdbEncodeFp)(void *pObj);
|
||||||
|
typedef bool (*sdbTraverseFp)(SMnode *pMnode, void *pObj, void *p1, void *p2, void *p3);
|
||||||
|
|
||||||
typedef enum {
|
typedef enum {
|
||||||
SDB_KEY_BINARY = 1,
|
SDB_KEY_BINARY = 1,
|
||||||
|
@ -130,14 +147,47 @@ typedef enum {
|
||||||
SDB_MAX = 20
|
SDB_MAX = 20
|
||||||
} ESdbType;
|
} ESdbType;
|
||||||
|
|
||||||
typedef struct SSdb SSdb;
|
typedef struct SSdbRaw {
|
||||||
typedef int32_t (*SdbInsertFp)(SSdb *pSdb, void *pObj);
|
int8_t type;
|
||||||
typedef int32_t (*SdbUpdateFp)(SSdb *pSdb, void *pSrcObj, void *pDstObj);
|
int8_t status;
|
||||||
typedef int32_t (*SdbDeleteFp)(SSdb *pSdb, void *pObj, bool callFunc);
|
int8_t sver;
|
||||||
typedef int32_t (*SdbDeployFp)(SMnode *pMnode);
|
int8_t reserved;
|
||||||
typedef SSdbRow *(*SdbDecodeFp)(SSdbRaw *pRaw);
|
int32_t dataLen;
|
||||||
typedef SSdbRaw *(*SdbEncodeFp)(void *pObj);
|
char pData[];
|
||||||
typedef bool (*sdbTraverseFp)(SMnode *pMnode, void *pObj, void *p1, void *p2, void *p3);
|
} SSdbRaw;
|
||||||
|
|
||||||
|
typedef struct SSdbRow {
|
||||||
|
ESdbType type;
|
||||||
|
ESdbStatus status;
|
||||||
|
int32_t refCount;
|
||||||
|
char pObj[];
|
||||||
|
} SSdbRow;
|
||||||
|
|
||||||
|
typedef struct SSdb {
|
||||||
|
SMnode *pMnode;
|
||||||
|
char *currDir;
|
||||||
|
char *syncDir;
|
||||||
|
char *tmpDir;
|
||||||
|
int64_t lastCommitVer;
|
||||||
|
int64_t curVer;
|
||||||
|
int64_t curTerm;
|
||||||
|
int64_t tableVer[SDB_MAX];
|
||||||
|
int64_t maxId[SDB_MAX];
|
||||||
|
EKeyType keyTypes[SDB_MAX];
|
||||||
|
SHashObj *hashObjs[SDB_MAX];
|
||||||
|
TdThreadRwlock locks[SDB_MAX];
|
||||||
|
SdbInsertFp insertFps[SDB_MAX];
|
||||||
|
SdbUpdateFp updateFps[SDB_MAX];
|
||||||
|
SdbDeleteFp deleteFps[SDB_MAX];
|
||||||
|
SdbDeployFp deployFps[SDB_MAX];
|
||||||
|
SdbEncodeFp encodeFps[SDB_MAX];
|
||||||
|
SdbDecodeFp decodeFps[SDB_MAX];
|
||||||
|
} SSdb;
|
||||||
|
|
||||||
|
typedef struct SSdbIter {
|
||||||
|
TdFilePtr file;
|
||||||
|
int64_t readlen;
|
||||||
|
} SSdbIter;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
ESdbType sdbType;
|
ESdbType sdbType;
|
||||||
|
@ -328,36 +378,14 @@ int32_t sdbGetRawTotalSize(SSdbRaw *pRaw);
|
||||||
|
|
||||||
SSdbRow *sdbAllocRow(int32_t objSize);
|
SSdbRow *sdbAllocRow(int32_t objSize);
|
||||||
void *sdbGetRowObj(SSdbRow *pRow);
|
void *sdbGetRowObj(SSdbRow *pRow);
|
||||||
|
void sdbFreeRow(SSdb *pSdb, SSdbRow *pRow, bool callFunc);
|
||||||
typedef struct SSdb {
|
|
||||||
SMnode *pMnode;
|
|
||||||
char *currDir;
|
|
||||||
char *syncDir;
|
|
||||||
char *tmpDir;
|
|
||||||
int64_t lastCommitVer;
|
|
||||||
int64_t curVer;
|
|
||||||
int64_t curTerm;
|
|
||||||
int64_t tableVer[SDB_MAX];
|
|
||||||
int64_t maxId[SDB_MAX];
|
|
||||||
EKeyType keyTypes[SDB_MAX];
|
|
||||||
SHashObj *hashObjs[SDB_MAX];
|
|
||||||
TdThreadRwlock locks[SDB_MAX];
|
|
||||||
SdbInsertFp insertFps[SDB_MAX];
|
|
||||||
SdbUpdateFp updateFps[SDB_MAX];
|
|
||||||
SdbDeleteFp deleteFps[SDB_MAX];
|
|
||||||
SdbDeployFp deployFps[SDB_MAX];
|
|
||||||
SdbEncodeFp encodeFps[SDB_MAX];
|
|
||||||
SdbDecodeFp decodeFps[SDB_MAX];
|
|
||||||
} SSdb;
|
|
||||||
|
|
||||||
typedef struct SSdbIter {
|
|
||||||
TdFilePtr file;
|
|
||||||
int64_t readlen;
|
|
||||||
} SSdbIter;
|
|
||||||
|
|
||||||
SSdbIter *sdbIterInit(SSdb *pSdb);
|
SSdbIter *sdbIterInit(SSdb *pSdb);
|
||||||
SSdbIter *sdbIterRead(SSdb *pSdb, SSdbIter *iter, char **ppBuf, int32_t *len);
|
SSdbIter *sdbIterRead(SSdb *pSdb, SSdbIter *iter, char **ppBuf, int32_t *len);
|
||||||
|
|
||||||
|
const char *sdbTableName(ESdbType type);
|
||||||
|
void sdbPrintOper(SSdb *pSdb, SSdbRow *pRow, const char *oper);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
|
@ -14,7 +14,7 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define _DEFAULT_SOURCE
|
#define _DEFAULT_SOURCE
|
||||||
#include "sdbInt.h"
|
#include "sdb.h"
|
||||||
|
|
||||||
static int32_t sdbCreateDir(SSdb *pSdb);
|
static int32_t sdbCreateDir(SSdb *pSdb);
|
||||||
|
|
||||||
|
|
|
@ -14,7 +14,7 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define _DEFAULT_SOURCE
|
#define _DEFAULT_SOURCE
|
||||||
#include "sdbInt.h"
|
#include "sdb.h"
|
||||||
#include "tchecksum.h"
|
#include "tchecksum.h"
|
||||||
#include "wal.h"
|
#include "wal.h"
|
||||||
|
|
||||||
|
|
|
@ -14,7 +14,7 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define _DEFAULT_SOURCE
|
#define _DEFAULT_SOURCE
|
||||||
#include "sdbInt.h"
|
#include "sdb.h"
|
||||||
|
|
||||||
static void sdbCheckRow(SSdb *pSdb, SSdbRow *pRow);
|
static void sdbCheckRow(SSdb *pSdb, SSdbRow *pRow);
|
||||||
|
|
||||||
|
|
|
@ -14,7 +14,7 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define _DEFAULT_SOURCE
|
#define _DEFAULT_SOURCE
|
||||||
#include "sdbInt.h"
|
#include "sdb.h"
|
||||||
|
|
||||||
SSdbRaw *sdbAllocRaw(ESdbType type, int8_t sver, int32_t dataLen) {
|
SSdbRaw *sdbAllocRaw(ESdbType type, int8_t sver, int32_t dataLen) {
|
||||||
SSdbRaw *pRaw = taosMemoryCalloc(1, dataLen + sizeof(SSdbRaw));
|
SSdbRaw *pRaw = taosMemoryCalloc(1, dataLen + sizeof(SSdbRaw));
|
||||||
|
|
|
@ -14,7 +14,7 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define _DEFAULT_SOURCE
|
#define _DEFAULT_SOURCE
|
||||||
#include "sdbInt.h"
|
#include "sdb.h"
|
||||||
|
|
||||||
SSdbRow *sdbAllocRow(int32_t objSize) {
|
SSdbRow *sdbAllocRow(int32_t objSize) {
|
||||||
SSdbRow *pRow = taosMemoryCalloc(1, objSize + sizeof(SSdbRow));
|
SSdbRow *pRow = taosMemoryCalloc(1, objSize + sizeof(SSdbRow));
|
||||||
|
|
|
@ -13,6 +13,8 @@ target_sources(
|
||||||
"src/vnd/vnodeModule.c"
|
"src/vnd/vnodeModule.c"
|
||||||
"src/vnd/vnodeSvr.c"
|
"src/vnd/vnodeSvr.c"
|
||||||
"src/vnd/vnodeSync.c"
|
"src/vnd/vnodeSync.c"
|
||||||
|
"src/vnd/vnodeSnapshot.c"
|
||||||
|
"src/vnd/vnodeUtil.c"
|
||||||
|
|
||||||
# meta
|
# meta
|
||||||
"src/meta/metaOpen.c"
|
"src/meta/metaOpen.c"
|
||||||
|
@ -22,6 +24,7 @@ target_sources(
|
||||||
"src/meta/metaQuery.c"
|
"src/meta/metaQuery.c"
|
||||||
"src/meta/metaCommit.c"
|
"src/meta/metaCommit.c"
|
||||||
"src/meta/metaEntry.c"
|
"src/meta/metaEntry.c"
|
||||||
|
"src/meta/metaSnapshot.c"
|
||||||
|
|
||||||
# sma
|
# sma
|
||||||
"src/sma/sma.c"
|
"src/sma/sma.c"
|
||||||
|
@ -44,6 +47,7 @@ target_sources(
|
||||||
"src/tsdb/tsdbReadImpl.c"
|
"src/tsdb/tsdbReadImpl.c"
|
||||||
# "src/tsdb/tsdbSma.c"
|
# "src/tsdb/tsdbSma.c"
|
||||||
"src/tsdb/tsdbWrite.c"
|
"src/tsdb/tsdbWrite.c"
|
||||||
|
"src/tsdb/tsdbSnapshot.c"
|
||||||
|
|
||||||
# tq
|
# tq
|
||||||
"src/tq/tq.c"
|
"src/tq/tq.c"
|
||||||
|
|
|
@ -39,9 +39,10 @@ extern "C" {
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// vnode
|
// vnode
|
||||||
typedef struct SVnode SVnode;
|
typedef struct SVnode SVnode;
|
||||||
typedef struct STsdbCfg STsdbCfg; // todo: remove
|
typedef struct STsdbCfg STsdbCfg; // todo: remove
|
||||||
typedef struct SVnodeCfg SVnodeCfg;
|
typedef struct SVnodeCfg SVnodeCfg;
|
||||||
|
typedef struct SVSnapshotReader SVSnapshotReader;
|
||||||
|
|
||||||
extern const SVnodeCfg vnodeCfgDefault;
|
extern const SVnodeCfg vnodeCfgDefault;
|
||||||
|
|
||||||
|
@ -59,13 +60,14 @@ int32_t vnodeProcessQueryMsg(SVnode *pVnode, SRpcMsg *pMsg);
|
||||||
int32_t vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo);
|
int32_t vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo);
|
||||||
int32_t vnodeGetLoad(SVnode *pVnode, SVnodeLoad *pLoad);
|
int32_t vnodeGetLoad(SVnode *pVnode, SVnodeLoad *pLoad);
|
||||||
int32_t vnodeValidateTableHash(SVnode *pVnode, char *tableFName);
|
int32_t vnodeValidateTableHash(SVnode *pVnode, char *tableFName);
|
||||||
|
|
||||||
int32_t vnodeStart(SVnode *pVnode);
|
int32_t vnodeStart(SVnode *pVnode);
|
||||||
void vnodeStop(SVnode *pVnode);
|
void vnodeStop(SVnode *pVnode);
|
||||||
|
|
||||||
int64_t vnodeGetSyncHandle(SVnode *pVnode);
|
int64_t vnodeGetSyncHandle(SVnode *pVnode);
|
||||||
void vnodeGetSnapshot(SVnode *pVnode, SSnapshot *pSnapshot);
|
void vnodeGetSnapshot(SVnode *pVnode, SSnapshot *pSnapshot);
|
||||||
void vnodeGetInfo(SVnode *pVnode, const char **dbname, int32_t *vgId);
|
void vnodeGetInfo(SVnode *pVnode, const char **dbname, int32_t *vgId);
|
||||||
|
int32_t vnodeSnapshotReaderOpen(SVnode *pVnode, SVSnapshotReader **ppReader, int64_t sver, int64_t ever);
|
||||||
|
int32_t vnodeSnapshotReaderClose(SVSnapshotReader *pReader);
|
||||||
|
int32_t vnodeSnapshotRead(SVSnapshotReader *pReader, const void **ppData, uint32_t *nData);
|
||||||
|
|
||||||
// meta
|
// meta
|
||||||
typedef struct SMeta SMeta; // todo: remove
|
typedef struct SMeta SMeta; // todo: remove
|
||||||
|
|
|
@ -47,15 +47,17 @@
|
||||||
extern "C" {
|
extern "C" {
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
typedef struct SVnodeInfo SVnodeInfo;
|
typedef struct SVnodeInfo SVnodeInfo;
|
||||||
typedef struct SMeta SMeta;
|
typedef struct SMeta SMeta;
|
||||||
typedef struct SSma SSma;
|
typedef struct SSma SSma;
|
||||||
typedef struct STsdb STsdb;
|
typedef struct STsdb STsdb;
|
||||||
typedef struct STQ STQ;
|
typedef struct STQ STQ;
|
||||||
typedef struct SVState SVState;
|
typedef struct SVState SVState;
|
||||||
typedef struct SVBufPool SVBufPool;
|
typedef struct SVBufPool SVBufPool;
|
||||||
typedef struct SQWorker SQHandle;
|
typedef struct SQWorker SQHandle;
|
||||||
typedef struct STsdbKeepCfg STsdbKeepCfg;
|
typedef struct STsdbKeepCfg STsdbKeepCfg;
|
||||||
|
typedef struct SMetaSnapshotReader SMetaSnapshotReader;
|
||||||
|
typedef struct STsdbSnapshotReader STsdbSnapshotReader;
|
||||||
|
|
||||||
#define VNODE_META_DIR "meta"
|
#define VNODE_META_DIR "meta"
|
||||||
#define VNODE_TSDB_DIR "tsdb"
|
#define VNODE_TSDB_DIR "tsdb"
|
||||||
|
@ -67,8 +69,10 @@ typedef struct STsdbKeepCfg STsdbKeepCfg;
|
||||||
#define VNODE_RSMA2_DIR "rsma2"
|
#define VNODE_RSMA2_DIR "rsma2"
|
||||||
|
|
||||||
// vnd.h
|
// vnd.h
|
||||||
void* vnodeBufPoolMalloc(SVBufPool* pPool, int size);
|
void* vnodeBufPoolMalloc(SVBufPool* pPool, int size);
|
||||||
void vnodeBufPoolFree(SVBufPool* pPool, void* p);
|
void vnodeBufPoolFree(SVBufPool* pPool, void* p);
|
||||||
|
int32_t vnodeRealloc(void** pp, int32_t size);
|
||||||
|
void vnodeFree(void* p);
|
||||||
|
|
||||||
// meta
|
// meta
|
||||||
typedef struct SMCtbCursor SMCtbCursor;
|
typedef struct SMCtbCursor SMCtbCursor;
|
||||||
|
@ -95,6 +99,9 @@ STSma* metaGetSmaInfoByIndex(SMeta* pMeta, int64_t indexUid);
|
||||||
STSmaWrapper* metaGetSmaInfoByTable(SMeta* pMeta, tb_uid_t uid, bool deepCopy);
|
STSmaWrapper* metaGetSmaInfoByTable(SMeta* pMeta, tb_uid_t uid, bool deepCopy);
|
||||||
SArray* metaGetSmaIdsByTable(SMeta* pMeta, tb_uid_t uid);
|
SArray* metaGetSmaIdsByTable(SMeta* pMeta, tb_uid_t uid);
|
||||||
SArray* metaGetSmaTbUids(SMeta* pMeta);
|
SArray* metaGetSmaTbUids(SMeta* pMeta);
|
||||||
|
int32_t metaSnapshotReaderOpen(SMeta* pMeta, SMetaSnapshotReader** ppReader, int64_t sver, int64_t ever);
|
||||||
|
int32_t metaSnapshotReaderClose(SMetaSnapshotReader* pReader);
|
||||||
|
int32_t metaSnapshotRead(SMetaSnapshotReader* pReader, void** ppData, uint32_t* nData);
|
||||||
|
|
||||||
int32_t metaCreateTSma(SMeta* pMeta, int64_t version, SSmaCfg* pCfg);
|
int32_t metaCreateTSma(SMeta* pMeta, int64_t version, SSmaCfg* pCfg);
|
||||||
int32_t metaDropTSma(SMeta* pMeta, int64_t indexUid);
|
int32_t metaDropTSma(SMeta* pMeta, int64_t indexUid);
|
||||||
|
@ -112,6 +119,9 @@ tsdbReaderT* tsdbQueryTables(SVnode* pVnode, SQueryTableDataCond* pCond, STableG
|
||||||
tsdbReaderT tsdbQueryCacheLastT(STsdb* tsdb, SQueryTableDataCond* pCond, STableGroupInfo* groupList, uint64_t qId,
|
tsdbReaderT tsdbQueryCacheLastT(STsdb* tsdb, SQueryTableDataCond* pCond, STableGroupInfo* groupList, uint64_t qId,
|
||||||
void* pMemRef);
|
void* pMemRef);
|
||||||
int32_t tsdbGetTableGroupFromIdListT(STsdb* tsdb, SArray* pTableIdList, STableGroupInfo* pGroupInfo);
|
int32_t tsdbGetTableGroupFromIdListT(STsdb* tsdb, SArray* pTableIdList, STableGroupInfo* pGroupInfo);
|
||||||
|
int32_t tsdbSnapshotReaderOpen(STsdb* pTsdb, STsdbSnapshotReader** ppReader, int64_t sver, int64_t ever);
|
||||||
|
int32_t tsdbSnapshotReaderClose(STsdbSnapshotReader* pReader);
|
||||||
|
int32_t tsdbSnapshotRead(STsdbSnapshotReader* pReader, void** ppData, uint32_t* nData);
|
||||||
|
|
||||||
// tq
|
// tq
|
||||||
STQ* tqOpen(const char* path, SVnode* pVnode, SWal* pWal);
|
STQ* tqOpen(const char* path, SVnode* pVnode, SWal* pWal);
|
||||||
|
|
|
@ -0,0 +1,93 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||||
|
*
|
||||||
|
* This program is free software: you can use, redistribute, and/or modify
|
||||||
|
* it under the terms of the GNU Affero General Public License, version 3
|
||||||
|
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU Affero General Public License
|
||||||
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "meta.h"
|
||||||
|
|
||||||
|
struct SMetaSnapshotReader {
|
||||||
|
SMeta* pMeta;
|
||||||
|
TBC* pTbc;
|
||||||
|
int64_t sver;
|
||||||
|
int64_t ever;
|
||||||
|
};
|
||||||
|
|
||||||
|
int32_t metaSnapshotReaderOpen(SMeta* pMeta, SMetaSnapshotReader** ppReader, int64_t sver, int64_t ever) {
|
||||||
|
int32_t code = 0;
|
||||||
|
int32_t c = 0;
|
||||||
|
SMetaSnapshotReader* pMetaReader = NULL;
|
||||||
|
|
||||||
|
pMetaReader = (SMetaSnapshotReader*)taosMemoryCalloc(1, sizeof(*pMetaReader));
|
||||||
|
if (pMetaReader == NULL) {
|
||||||
|
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
|
goto _err;
|
||||||
|
}
|
||||||
|
pMetaReader->pMeta = pMeta;
|
||||||
|
pMetaReader->sver = sver;
|
||||||
|
pMetaReader->ever = ever;
|
||||||
|
code = tdbTbcOpen(pMeta->pTbDb, &pMetaReader->pTbc, NULL);
|
||||||
|
if (code) {
|
||||||
|
goto _err;
|
||||||
|
}
|
||||||
|
|
||||||
|
code = tdbTbcMoveTo(pMetaReader->pTbc, &(STbDbKey){.version = sver, .uid = INT64_MIN}, sizeof(STbDbKey), &c);
|
||||||
|
if (code) {
|
||||||
|
goto _err;
|
||||||
|
}
|
||||||
|
|
||||||
|
*ppReader = pMetaReader;
|
||||||
|
return code;
|
||||||
|
|
||||||
|
_err:
|
||||||
|
*ppReader = NULL;
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t metaSnapshotReaderClose(SMetaSnapshotReader* pReader) {
|
||||||
|
if (pReader) {
|
||||||
|
tdbTbcClose(pReader->pTbc);
|
||||||
|
taosMemoryFree(pReader);
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t metaSnapshotRead(SMetaSnapshotReader* pReader, void** ppData, uint32_t* nDatap) {
|
||||||
|
const void* pKey = NULL;
|
||||||
|
const void* pData = NULL;
|
||||||
|
int32_t nKey = 0;
|
||||||
|
int32_t nData = 0;
|
||||||
|
int32_t code = 0;
|
||||||
|
|
||||||
|
for (;;) {
|
||||||
|
code = tdbTbcGet(pReader->pTbc, &pKey, &nKey, &pData, &nData);
|
||||||
|
if (code || ((STbDbKey*)pData)->version > pReader->ever) {
|
||||||
|
return TSDB_CODE_VND_READ_END;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (((STbDbKey*)pData)->version < pReader->sver) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// copy the data
|
||||||
|
if (vnodeRealloc(ppData, nData) < 0) {
|
||||||
|
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
|
||||||
|
memcpy(*ppData, pData, nData);
|
||||||
|
*nDatap = nData;
|
||||||
|
return code;
|
||||||
|
}
|
|
@ -23,6 +23,7 @@ static int metaUpdateTtlIdx(SMeta *pMeta, const SMetaEntry *pME);
|
||||||
static int metaSaveToSkmDb(SMeta *pMeta, const SMetaEntry *pME);
|
static int metaSaveToSkmDb(SMeta *pMeta, const SMetaEntry *pME);
|
||||||
static int metaUpdateCtbIdx(SMeta *pMeta, const SMetaEntry *pME);
|
static int metaUpdateCtbIdx(SMeta *pMeta, const SMetaEntry *pME);
|
||||||
static int metaUpdateTagIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry);
|
static int metaUpdateTagIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry);
|
||||||
|
static int metaDropTableByUid(SMeta *pMeta, tb_uid_t uid, int *type);
|
||||||
|
|
||||||
int metaCreateSTable(SMeta *pMeta, int64_t version, SVCreateStbReq *pReq) {
|
int metaCreateSTable(SMeta *pMeta, int64_t version, SVCreateStbReq *pReq) {
|
||||||
SMetaEntry me = {0};
|
SMetaEntry me = {0};
|
||||||
|
@ -71,64 +72,71 @@ _err:
|
||||||
}
|
}
|
||||||
|
|
||||||
int metaDropSTable(SMeta *pMeta, int64_t verison, SVDropStbReq *pReq) {
|
int metaDropSTable(SMeta *pMeta, int64_t verison, SVDropStbReq *pReq) {
|
||||||
TBC *pNameIdxc = NULL;
|
void *pKey = NULL;
|
||||||
TBC *pUidIdxc = NULL;
|
int nKey = 0;
|
||||||
TBC *pCtbIdxc = NULL;
|
void *pData = NULL;
|
||||||
SCtbIdxKey *pCtbIdxKey;
|
int nData = 0;
|
||||||
const void *pKey = NULL;
|
int c = 0;
|
||||||
int nKey;
|
int rc = 0;
|
||||||
const void *pData = NULL;
|
|
||||||
int nData;
|
|
||||||
int c, ret;
|
|
||||||
|
|
||||||
// prepare uid idx cursor
|
// check if super table exists
|
||||||
tdbTbcOpen(pMeta->pUidIdx, &pUidIdxc, &pMeta->txn);
|
rc = tdbTbGet(pMeta->pNameIdx, pReq->name, strlen(pReq->name) + 1, &pData, &nData);
|
||||||
ret = tdbTbcMoveTo(pUidIdxc, &pReq->suid, sizeof(tb_uid_t), &c);
|
if (rc < 0 || *(tb_uid_t *)pData != pReq->suid) {
|
||||||
if (ret < 0 || c != 0) {
|
terrno = TSDB_CODE_VND_TABLE_NOT_EXIST;
|
||||||
terrno = TSDB_CODE_VND_TB_NOT_EXIST;
|
return -1;
|
||||||
tdbTbcClose(pUidIdxc);
|
|
||||||
goto _err;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// prepare name idx cursor
|
// drop all child tables
|
||||||
tdbTbcOpen(pMeta->pNameIdx, &pNameIdxc, &pMeta->txn);
|
TBC *pCtbIdxc = NULL;
|
||||||
ret = tdbTbcMoveTo(pNameIdxc, pReq->name, strlen(pReq->name) + 1, &c);
|
SArray *pArray = taosArrayInit(8, sizeof(tb_uid_t));
|
||||||
if (ret < 0 || c != 0) {
|
|
||||||
ASSERT(0);
|
|
||||||
}
|
|
||||||
|
|
||||||
tdbTbcDelete(pUidIdxc);
|
|
||||||
tdbTbcDelete(pNameIdxc);
|
|
||||||
tdbTbcClose(pUidIdxc);
|
|
||||||
tdbTbcClose(pNameIdxc);
|
|
||||||
|
|
||||||
// loop to drop each child table
|
|
||||||
tdbTbcOpen(pMeta->pCtbIdx, &pCtbIdxc, &pMeta->txn);
|
tdbTbcOpen(pMeta->pCtbIdx, &pCtbIdxc, &pMeta->txn);
|
||||||
ret = tdbTbcMoveTo(pCtbIdxc, &(SCtbIdxKey){.suid = pReq->suid, .uid = INT64_MIN}, sizeof(SCtbIdxKey), &c);
|
rc = tdbTbcMoveTo(pCtbIdxc, &(SCtbIdxKey){.suid = pReq->suid, .uid = INT64_MIN}, sizeof(SCtbIdxKey), &c);
|
||||||
if (ret < 0 || (c < 0 && tdbTbcMoveToNext(pCtbIdxc) < 0)) {
|
if (rc < 0) {
|
||||||
tdbTbcClose(pCtbIdxc);
|
tdbTbcClose(pCtbIdxc);
|
||||||
goto _exit;
|
metaWLock(pMeta);
|
||||||
|
goto _drop_super_table;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (;;) {
|
for (;;) {
|
||||||
tdbTbcGet(pCtbIdxc, &pKey, &nKey, NULL, NULL);
|
rc = tdbTbcNext(pCtbIdxc, &pKey, &nKey, NULL, NULL);
|
||||||
pCtbIdxKey = (SCtbIdxKey *)pKey;
|
if (rc < 0) break;
|
||||||
|
|
||||||
if (pCtbIdxKey->suid > pReq->suid) break;
|
if (((SCtbIdxKey *)pKey)->suid < pReq->suid) {
|
||||||
|
continue;
|
||||||
|
} else if (((SCtbIdxKey *)pKey)->suid > pReq->suid) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
// drop the child table (TODO)
|
taosArrayPush(pArray, &(((SCtbIdxKey *)pKey)->uid));
|
||||||
|
|
||||||
if (tdbTbcMoveToNext(pCtbIdxc) < 0) break;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
tdbTbcClose(pCtbIdxc);
|
||||||
|
|
||||||
|
metaWLock(pMeta);
|
||||||
|
|
||||||
|
for (int32_t iChild = 0; iChild < taosArrayGetSize(pArray); iChild++) {
|
||||||
|
tb_uid_t uid = *(tb_uid_t *)taosArrayGet(pArray, iChild);
|
||||||
|
metaDropTableByUid(pMeta, uid, NULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
taosArrayDestroy(pArray);
|
||||||
|
|
||||||
|
// drop super table
|
||||||
|
_drop_super_table:
|
||||||
|
tdbTbGet(pMeta->pUidIdx, &pReq->suid, sizeof(tb_uid_t), &pData, &nData);
|
||||||
|
tdbTbDelete(pMeta->pTbDb, &(STbDbKey){.version = *(int64_t *)pData, .uid = pReq->suid}, sizeof(STbDbKey),
|
||||||
|
&pMeta->txn);
|
||||||
|
tdbTbDelete(pMeta->pNameIdx, pReq->name, strlen(pReq->name) + 1, &pMeta->txn);
|
||||||
|
tdbTbDelete(pMeta->pUidIdx, &pReq->suid, sizeof(tb_uid_t), &pMeta->txn);
|
||||||
|
|
||||||
|
metaULock(pMeta);
|
||||||
|
|
||||||
_exit:
|
_exit:
|
||||||
|
tdbFree(pKey);
|
||||||
|
tdbFree(pData);
|
||||||
metaDebug("vgId:%d super table %s uid:%" PRId64 " is dropped", TD_VID(pMeta->pVnode), pReq->name, pReq->suid);
|
metaDebug("vgId:%d super table %s uid:%" PRId64 " is dropped", TD_VID(pMeta->pVnode), pReq->name, pReq->suid);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
_err:
|
|
||||||
metaError("vgId:%d failed to drop super table %s uid:%" PRId64 " since %s", TD_VID(pMeta->pVnode), pReq->name,
|
|
||||||
pReq->suid, tstrerror(terrno));
|
|
||||||
return -1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int metaAlterSTable(SMeta *pMeta, int64_t version, SVCreateStbReq *pReq) {
|
int metaAlterSTable(SMeta *pMeta, int64_t version, SVCreateStbReq *pReq) {
|
||||||
|
@ -256,122 +264,63 @@ _err:
|
||||||
}
|
}
|
||||||
|
|
||||||
int metaDropTable(SMeta *pMeta, int64_t version, SVDropTbReq *pReq, SArray *tbUids) {
|
int metaDropTable(SMeta *pMeta, int64_t version, SVDropTbReq *pReq, SArray *tbUids) {
|
||||||
TBC *pTbDbc = NULL;
|
void *pData = NULL;
|
||||||
TBC *pUidIdxc = NULL;
|
int nData = 0;
|
||||||
TBC *pNameIdxc = NULL;
|
int rc = 0;
|
||||||
const void *pData;
|
tb_uid_t uid;
|
||||||
int nData;
|
int type;
|
||||||
tb_uid_t uid;
|
|
||||||
int64_t tver;
|
|
||||||
SMetaEntry me = {0};
|
|
||||||
SDecoder coder = {0};
|
|
||||||
int8_t type;
|
|
||||||
int64_t ctime;
|
|
||||||
tb_uid_t suid;
|
|
||||||
int c = 0, ret;
|
|
||||||
|
|
||||||
// search & delete the name idx
|
rc = tdbTbGet(pMeta->pNameIdx, pReq->name, strlen(pReq->name) + 1, &pData, &nData);
|
||||||
tdbTbcOpen(pMeta->pNameIdx, &pNameIdxc, &pMeta->txn);
|
if (rc < 0) {
|
||||||
ret = tdbTbcMoveTo(pNameIdxc, pReq->name, strlen(pReq->name) + 1, &c);
|
|
||||||
if (ret < 0 || !tdbTbcIsValid(pNameIdxc) || c) {
|
|
||||||
tdbTbcClose(pNameIdxc);
|
|
||||||
terrno = TSDB_CODE_VND_TABLE_NOT_EXIST;
|
terrno = TSDB_CODE_VND_TABLE_NOT_EXIST;
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = tdbTbcGet(pNameIdxc, NULL, NULL, &pData, &nData);
|
|
||||||
if (ret < 0) {
|
|
||||||
ASSERT(0);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
uid = *(tb_uid_t *)pData;
|
uid = *(tb_uid_t *)pData;
|
||||||
|
|
||||||
tdbTbcDelete(pNameIdxc);
|
metaWLock(pMeta);
|
||||||
tdbTbcClose(pNameIdxc);
|
metaDropTableByUid(pMeta, uid, &type);
|
||||||
|
metaULock(pMeta);
|
||||||
|
|
||||||
// search & delete uid idx
|
if (type == TSDB_CHILD_TABLE && tbUids) {
|
||||||
tdbTbcOpen(pMeta->pUidIdx, &pUidIdxc, &pMeta->txn);
|
taosArrayPush(tbUids, &uid);
|
||||||
ret = tdbTbcMoveTo(pUidIdxc, &uid, sizeof(uid), &c);
|
|
||||||
if (ret < 0 || c != 0) {
|
|
||||||
ASSERT(0);
|
|
||||||
return -1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = tdbTbcGet(pUidIdxc, NULL, NULL, &pData, &nData);
|
tdbFree(pData);
|
||||||
if (ret < 0) {
|
return 0;
|
||||||
ASSERT(0);
|
}
|
||||||
return -1;
|
|
||||||
|
static int metaDropTableByUid(SMeta *pMeta, tb_uid_t uid, int *type) {
|
||||||
|
void *pData = NULL;
|
||||||
|
int nData = 0;
|
||||||
|
int rc = 0;
|
||||||
|
int64_t version;
|
||||||
|
SMetaEntry e = {0};
|
||||||
|
SDecoder dc = {0};
|
||||||
|
|
||||||
|
rc = tdbTbGet(pMeta->pUidIdx, &uid, sizeof(uid), &pData, &nData);
|
||||||
|
version = *(int64_t *)pData;
|
||||||
|
|
||||||
|
tdbTbGet(pMeta->pTbDb, &(STbDbKey){.version = version, .uid = uid}, sizeof(STbDbKey), &pData, &nData);
|
||||||
|
|
||||||
|
tDecoderInit(&dc, pData, nData);
|
||||||
|
metaDecodeEntry(&dc, &e);
|
||||||
|
|
||||||
|
if (type) *type = e.type;
|
||||||
|
|
||||||
|
tdbTbDelete(pMeta->pTbDb, &(STbDbKey){.version = version, .uid = uid}, sizeof(STbDbKey), &pMeta->txn);
|
||||||
|
tdbTbDelete(pMeta->pNameIdx, e.name, strlen(e.name) + 1, &pMeta->txn);
|
||||||
|
tdbTbDelete(pMeta->pUidIdx, &uid, sizeof(uid), &pMeta->txn);
|
||||||
|
if (e.type == TSDB_CHILD_TABLE) {
|
||||||
|
tdbTbDelete(pMeta->pCtbIdx, &(SCtbIdxKey){.suid = e.ctbEntry.suid, .uid = uid}, sizeof(SCtbIdxKey), &pMeta->txn);
|
||||||
|
} else if (e.type == TSDB_NORMAL_TABLE) {
|
||||||
|
// drop schema.db (todo)
|
||||||
|
// drop ttl.idx (todo)
|
||||||
|
} else if (e.type == TSDB_SUPER_TABLE) {
|
||||||
|
// drop schema.db (todo)
|
||||||
}
|
}
|
||||||
|
|
||||||
tver = *(int64_t *)pData;
|
tDecoderClear(&dc);
|
||||||
tdbTbcDelete(pUidIdxc);
|
tdbFree(pData);
|
||||||
tdbTbcClose(pUidIdxc);
|
|
||||||
|
|
||||||
// search and get meta entry
|
|
||||||
tdbTbcOpen(pMeta->pTbDb, &pTbDbc, &pMeta->txn);
|
|
||||||
ret = tdbTbcMoveTo(pTbDbc, &(STbDbKey){.uid = uid, .version = tver}, sizeof(STbDbKey), &c);
|
|
||||||
if (ret < 0 || c != 0) {
|
|
||||||
ASSERT(0);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = tdbTbcGet(pTbDbc, NULL, NULL, &pData, &nData);
|
|
||||||
if (ret < 0) {
|
|
||||||
ASSERT(0);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// decode entry
|
|
||||||
void *pDataCopy = taosMemoryMalloc(nData); // remove the copy (todo)
|
|
||||||
memcpy(pDataCopy, pData, nData);
|
|
||||||
tDecoderInit(&coder, pDataCopy, nData);
|
|
||||||
ret = metaDecodeEntry(&coder, &me);
|
|
||||||
if (ret < 0) {
|
|
||||||
ASSERT(0);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
type = me.type;
|
|
||||||
if (type == TSDB_CHILD_TABLE) {
|
|
||||||
ctime = me.ctbEntry.ctime;
|
|
||||||
suid = me.ctbEntry.suid;
|
|
||||||
taosArrayPush(tbUids, &me.uid);
|
|
||||||
} else if (type == TSDB_NORMAL_TABLE) {
|
|
||||||
ctime = me.ntbEntry.ctime;
|
|
||||||
suid = 0;
|
|
||||||
} else {
|
|
||||||
ASSERT(0);
|
|
||||||
}
|
|
||||||
|
|
||||||
taosMemoryFree(pDataCopy);
|
|
||||||
tDecoderClear(&coder);
|
|
||||||
tdbTbcClose(pTbDbc);
|
|
||||||
|
|
||||||
if (type == TSDB_CHILD_TABLE) {
|
|
||||||
// remove the pCtbIdx
|
|
||||||
TBC *pCtbIdxc = NULL;
|
|
||||||
tdbTbcOpen(pMeta->pCtbIdx, &pCtbIdxc, &pMeta->txn);
|
|
||||||
|
|
||||||
ret = tdbTbcMoveTo(pCtbIdxc, &(SCtbIdxKey){.suid = suid, .uid = uid}, sizeof(SCtbIdxKey), &c);
|
|
||||||
if (ret < 0 || c != 0) {
|
|
||||||
ASSERT(0);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
tdbTbcDelete(pCtbIdxc);
|
|
||||||
tdbTbcClose(pCtbIdxc);
|
|
||||||
|
|
||||||
// remove tags from pTagIdx (todo)
|
|
||||||
} else if (type == TSDB_NORMAL_TABLE) {
|
|
||||||
// remove from pSkmDb
|
|
||||||
} else {
|
|
||||||
ASSERT(0);
|
|
||||||
}
|
|
||||||
|
|
||||||
// remove from ttl (todo)
|
|
||||||
if (ctime > 0) {
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -608,14 +557,14 @@ static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pA
|
||||||
// TODO : need to update tag index
|
// TODO : need to update tag index
|
||||||
}
|
}
|
||||||
ctbEntry.version = version;
|
ctbEntry.version = version;
|
||||||
if(pTagSchema->nCols == 1 && pTagSchema->pSchema[0].type == TSDB_DATA_TYPE_JSON){
|
if (pTagSchema->nCols == 1 && pTagSchema->pSchema[0].type == TSDB_DATA_TYPE_JSON) {
|
||||||
ctbEntry.ctbEntry.pTags = taosMemoryMalloc(pAlterTbReq->nTagVal);
|
ctbEntry.ctbEntry.pTags = taosMemoryMalloc(pAlterTbReq->nTagVal);
|
||||||
if(ctbEntry.ctbEntry.pTags == NULL){
|
if (ctbEntry.ctbEntry.pTags == NULL) {
|
||||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
goto _err;
|
goto _err;
|
||||||
}
|
}
|
||||||
memcpy((void*)ctbEntry.ctbEntry.pTags, pAlterTbReq->pTagVal, pAlterTbReq->nTagVal);
|
memcpy((void *)ctbEntry.ctbEntry.pTags, pAlterTbReq->pTagVal, pAlterTbReq->nTagVal);
|
||||||
}else{
|
} else {
|
||||||
SKVRowBuilder kvrb = {0};
|
SKVRowBuilder kvrb = {0};
|
||||||
const SKVRow pOldTag = (const SKVRow)ctbEntry.ctbEntry.pTags;
|
const SKVRow pOldTag = (const SKVRow)ctbEntry.ctbEntry.pTags;
|
||||||
SKVRow pNewTag = NULL;
|
SKVRow pNewTag = NULL;
|
||||||
|
@ -649,7 +598,7 @@ static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pA
|
||||||
|
|
||||||
tDecoderClear(&dc1);
|
tDecoderClear(&dc1);
|
||||||
tDecoderClear(&dc2);
|
tDecoderClear(&dc2);
|
||||||
if (ctbEntry.ctbEntry.pTags) taosMemoryFree((void*)ctbEntry.ctbEntry.pTags);
|
if (ctbEntry.ctbEntry.pTags) taosMemoryFree((void *)ctbEntry.ctbEntry.pTags);
|
||||||
if (ctbEntry.pBuf) taosMemoryFree(ctbEntry.pBuf);
|
if (ctbEntry.pBuf) taosMemoryFree(ctbEntry.pBuf);
|
||||||
if (stbEntry.pBuf) tdbFree(stbEntry.pBuf);
|
if (stbEntry.pBuf) tdbFree(stbEntry.pBuf);
|
||||||
tdbTbcClose(pTbDbc);
|
tdbTbcClose(pTbDbc);
|
||||||
|
|
|
@ -51,6 +51,47 @@ int tqExecKeyCompare(const void* pKey1, int32_t kLen1, const void* pKey2, int32_
|
||||||
return strcmp(pKey1, pKey2);
|
return strcmp(pKey1, pKey2);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int32_t tqStoreExec(STQ* pTq, const char* key, const STqExec* pExec) {
|
||||||
|
int32_t code;
|
||||||
|
int32_t vlen;
|
||||||
|
tEncodeSize(tEncodeSTqExec, pExec, vlen, code);
|
||||||
|
ASSERT(code == 0);
|
||||||
|
|
||||||
|
void* buf = taosMemoryCalloc(1, vlen);
|
||||||
|
if (buf == NULL) {
|
||||||
|
ASSERT(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
SEncoder encoder;
|
||||||
|
tEncoderInit(&encoder, buf, vlen);
|
||||||
|
|
||||||
|
if (tEncodeSTqExec(&encoder, pExec) < 0) {
|
||||||
|
ASSERT(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
TXN txn;
|
||||||
|
|
||||||
|
if (tdbTxnOpen(&txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) < 0) {
|
||||||
|
ASSERT(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (tdbBegin(pTq->pMetaStore, &txn) < 0) {
|
||||||
|
ASSERT(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (tdbTbUpsert(pTq->pExecStore, key, (int)strlen(key), buf, vlen, &txn) < 0) {
|
||||||
|
ASSERT(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (tdbCommit(pTq->pMetaStore, &txn) < 0) {
|
||||||
|
ASSERT(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
tEncoderClear(&encoder);
|
||||||
|
taosMemoryFree(buf);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
STQ* tqOpen(const char* path, SVnode* pVnode, SWal* pWal) {
|
STQ* tqOpen(const char* path, SVnode* pVnode, SWal* pWal) {
|
||||||
STQ* pTq = taosMemoryMalloc(sizeof(STQ));
|
STQ* pTq = taosMemoryMalloc(sizeof(STQ));
|
||||||
if (pTq == NULL) {
|
if (pTq == NULL) {
|
||||||
|
@ -96,8 +137,31 @@ STQ* tqOpen(const char* path, SVnode* pVnode, SWal* pWal) {
|
||||||
int vLen;
|
int vLen;
|
||||||
|
|
||||||
tdbTbcMoveToFirst(pCur);
|
tdbTbcMoveToFirst(pCur);
|
||||||
|
SDecoder decoder;
|
||||||
while (tdbTbcNext(pCur, &pKey, &kLen, &pVal, &vLen) == 0) {
|
while (tdbTbcNext(pCur, &pKey, &kLen, &pVal, &vLen) == 0) {
|
||||||
// create, put into execsj
|
STqExec exec;
|
||||||
|
tDecoderInit(&decoder, (uint8_t*)pVal, vLen);
|
||||||
|
tDecodeSTqExec(&decoder, &exec);
|
||||||
|
exec.pWalReader = walOpenReadHandle(pTq->pVnode->pWal);
|
||||||
|
if (exec.subType == TOPIC_SUB_TYPE__TABLE) {
|
||||||
|
for (int32_t i = 0; i < 5; i++) {
|
||||||
|
exec.pExecReader[i] = tqInitSubmitMsgScanner(pTq->pVnode->pMeta);
|
||||||
|
|
||||||
|
SReadHandle handle = {
|
||||||
|
.reader = exec.pExecReader[i],
|
||||||
|
.meta = pTq->pVnode->pMeta,
|
||||||
|
.pMsgCb = &pTq->pVnode->msgCb,
|
||||||
|
};
|
||||||
|
exec.task[i] = qCreateStreamExecTaskInfo(exec.qmsg, &handle);
|
||||||
|
ASSERT(exec.task[i]);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for (int32_t i = 0; i < 5; i++) {
|
||||||
|
exec.pExecReader[i] = tqInitSubmitMsgScanner(pTq->pVnode->pMeta);
|
||||||
|
}
|
||||||
|
exec.pDropTbUid = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
|
||||||
|
}
|
||||||
|
taosHashPut(pTq->execs, pKey, kLen, &exec, sizeof(STqExec));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (tdbTxnClose(&txn) < 0) {
|
if (tdbTxnClose(&txn) < 0) {
|
||||||
|
@ -604,7 +668,9 @@ int32_t tqProcessVgDeleteReq(STQ* pTq, char* msg, int32_t msgLen) {
|
||||||
ASSERT(0);
|
ASSERT(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
tdbTbDelete(pTq->pExecStore, pReq->subKey, (int)strlen(pReq->subKey), &txn);
|
if (tdbTbDelete(pTq->pExecStore, pReq->subKey, (int)strlen(pReq->subKey), &txn) < 0) {
|
||||||
|
/*ASSERT(0);*/
|
||||||
|
}
|
||||||
|
|
||||||
if (tdbCommit(pTq->pMetaStore, &txn) < 0) {
|
if (tdbCommit(pTq->pMetaStore, &txn) < 0) {
|
||||||
ASSERT(0);
|
ASSERT(0);
|
||||||
|
@ -659,60 +725,21 @@ int32_t tqProcessVgChangeReq(STQ* pTq, char* msg, int32_t msgLen) {
|
||||||
}
|
}
|
||||||
taosHashPut(pTq->execs, req.subKey, strlen(req.subKey), pExec, sizeof(STqExec));
|
taosHashPut(pTq->execs, req.subKey, strlen(req.subKey), pExec, sizeof(STqExec));
|
||||||
|
|
||||||
int32_t code;
|
if (tqStoreExec(pTq, req.subKey, pExec) < 0) {
|
||||||
int32_t vlen;
|
// TODO
|
||||||
tEncodeSize(tEncodeSTqExec, pExec, vlen, code);
|
|
||||||
ASSERT(code == 0);
|
|
||||||
|
|
||||||
void* buf = taosMemoryCalloc(1, vlen);
|
|
||||||
if (buf == NULL) {
|
|
||||||
ASSERT(0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
SEncoder encoder;
|
|
||||||
tEncoderInit(&encoder, buf, vlen);
|
|
||||||
|
|
||||||
if (tEncodeSTqExec(&encoder, pExec) < 0) {
|
|
||||||
ASSERT(0);
|
|
||||||
}
|
|
||||||
|
|
||||||
TXN txn;
|
|
||||||
|
|
||||||
if (tdbTxnOpen(&txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) < 0) {
|
|
||||||
ASSERT(0);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (tdbBegin(pTq->pMetaStore, &txn) < 0) {
|
|
||||||
ASSERT(0);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (tdbTbUpsert(pTq->pExecStore, req.subKey, (int)strlen(req.subKey), buf, vlen, &txn) < 0) {
|
|
||||||
ASSERT(0);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (tdbCommit(pTq->pMetaStore, &txn) < 0) {
|
|
||||||
ASSERT(0);
|
|
||||||
}
|
|
||||||
|
|
||||||
tEncoderClear(&encoder);
|
|
||||||
taosMemoryFree(buf);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
} else {
|
} else {
|
||||||
/*if (req.newConsumerId != -1) {*/
|
/*ASSERT(pExec->consumerId == req.oldConsumerId);*/
|
||||||
/*taosWLockLatch(&pExec->lock);*/
|
|
||||||
ASSERT(pExec->consumerId == req.oldConsumerId);
|
|
||||||
// TODO handle qmsg and exec modification
|
// TODO handle qmsg and exec modification
|
||||||
atomic_store_32(&pExec->epoch, -1);
|
atomic_store_32(&pExec->epoch, -1);
|
||||||
atomic_store_64(&pExec->consumerId, req.newConsumerId);
|
atomic_store_64(&pExec->consumerId, req.newConsumerId);
|
||||||
atomic_add_fetch_32(&pExec->epoch, 1);
|
atomic_add_fetch_32(&pExec->epoch, 1);
|
||||||
/*taosWUnLockLatch(&pExec->lock);*/
|
|
||||||
|
if (tqStoreExec(pTq, req.subKey, pExec) < 0) {
|
||||||
|
// TODO
|
||||||
|
}
|
||||||
return 0;
|
return 0;
|
||||||
/*} else {*/
|
|
||||||
// TODO
|
|
||||||
/*taosHashRemove(pTq->tqMetaNew, req.subKey, strlen(req.subKey));*/
|
|
||||||
/*return 0;*/
|
|
||||||
/*}*/
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,36 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||||
|
*
|
||||||
|
* This program is free software: you can use, redistribute, and/or modify
|
||||||
|
* it under the terms of the GNU Affero General Public License, version 3
|
||||||
|
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU Affero General Public License
|
||||||
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "tsdb.h"
|
||||||
|
|
||||||
|
struct STsdbSnapshotReader {
|
||||||
|
STsdb* pTsdb;
|
||||||
|
// TODO
|
||||||
|
};
|
||||||
|
|
||||||
|
int32_t tsdbSnapshotReaderOpen(STsdb* pTsdb, STsdbSnapshotReader** ppReader, int64_t sver, int64_t ever) {
|
||||||
|
// TODO
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t tsdbSnapshotReaderClose(STsdbSnapshotReader* pReader) {
|
||||||
|
// TODO
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t tsdbSnapshotRead(STsdbSnapshotReader* pReader, void** ppData, uint32_t* nData) {
|
||||||
|
// TODO
|
||||||
|
return 0;
|
||||||
|
}
|
|
@ -0,0 +1,109 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||||
|
*
|
||||||
|
* This program is free software: you can use, redistribute, and/or modify
|
||||||
|
* it under the terms of the GNU Affero General Public License, version 3
|
||||||
|
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU Affero General Public License
|
||||||
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "vnodeInt.h"
|
||||||
|
|
||||||
|
struct SVSnapshotReader {
|
||||||
|
SVnode *pVnode;
|
||||||
|
int64_t sver;
|
||||||
|
int64_t ever;
|
||||||
|
int8_t isMetaEnd;
|
||||||
|
int8_t isTsdbEnd;
|
||||||
|
SMetaSnapshotReader *pMetaReader;
|
||||||
|
STsdbSnapshotReader *pTsdbReader;
|
||||||
|
void *pData;
|
||||||
|
int32_t nData;
|
||||||
|
};
|
||||||
|
|
||||||
|
int32_t vnodeSnapshotReaderOpen(SVnode *pVnode, SVSnapshotReader **ppReader, int64_t sver, int64_t ever) {
|
||||||
|
SVSnapshotReader *pReader = NULL;
|
||||||
|
|
||||||
|
pReader = (SVSnapshotReader *)taosMemoryCalloc(1, sizeof(*pReader));
|
||||||
|
if (pReader == NULL) {
|
||||||
|
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
|
goto _err;
|
||||||
|
}
|
||||||
|
pReader->pVnode = pVnode;
|
||||||
|
pReader->sver = sver;
|
||||||
|
pReader->ever = ever;
|
||||||
|
pReader->isMetaEnd = 0;
|
||||||
|
pReader->isTsdbEnd = 0;
|
||||||
|
|
||||||
|
if (metaSnapshotReaderOpen(pVnode->pMeta, &pReader->pMetaReader, sver, ever) < 0) {
|
||||||
|
taosMemoryFree(pReader);
|
||||||
|
goto _err;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (tsdbSnapshotReaderOpen(pVnode->pTsdb, &pReader->pTsdbReader, sver, ever) < 0) {
|
||||||
|
metaSnapshotReaderClose(pReader->pMetaReader);
|
||||||
|
taosMemoryFree(pReader);
|
||||||
|
goto _err;
|
||||||
|
}
|
||||||
|
|
||||||
|
_exit:
|
||||||
|
*ppReader = pReader;
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
_err:
|
||||||
|
*ppReader = NULL;
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t vnodeSnapshotReaderClose(SVSnapshotReader *pReader) {
|
||||||
|
if (pReader) {
|
||||||
|
vnodeFree(pReader->pData);
|
||||||
|
tsdbSnapshotReaderClose(pReader->pTsdbReader);
|
||||||
|
metaSnapshotReaderClose(pReader->pMetaReader);
|
||||||
|
taosMemoryFree(pReader);
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t vnodeSnapshotRead(SVSnapshotReader *pReader, const void **ppData, uint32_t *nData) {
|
||||||
|
int32_t code = 0;
|
||||||
|
|
||||||
|
if (!pReader->isMetaEnd) {
|
||||||
|
code = metaSnapshotRead(pReader->pMetaReader, &pReader->pData, &pReader->nData);
|
||||||
|
if (code) {
|
||||||
|
if (code == TSDB_CODE_VND_READ_END) {
|
||||||
|
pReader->isMetaEnd = 1;
|
||||||
|
} else {
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
*ppData = pReader->pData;
|
||||||
|
*nData = pReader->nData;
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!pReader->isTsdbEnd) {
|
||||||
|
code = tsdbSnapshotRead(pReader->pTsdbReader, &pReader->pData, &pReader->nData);
|
||||||
|
if (code) {
|
||||||
|
if (code == TSDB_CODE_VND_READ_END) {
|
||||||
|
pReader->isTsdbEnd = 1;
|
||||||
|
} else {
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
*ppData = pReader->pData;
|
||||||
|
*nData = pReader->nData;
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
code = TSDB_CODE_VND_READ_END;
|
||||||
|
return code;
|
||||||
|
}
|
|
@ -617,16 +617,18 @@ static int vnodeDebugPrintSingleSubmitMsg(SMeta *pMeta, SSubmitBlk *pBlock, SSub
|
||||||
STSchema *pSchema = NULL;
|
STSchema *pSchema = NULL;
|
||||||
tb_uid_t suid = 0;
|
tb_uid_t suid = 0;
|
||||||
STSRow *row = NULL;
|
STSRow *row = NULL;
|
||||||
|
int32_t rv = -1;
|
||||||
|
|
||||||
tInitSubmitBlkIter(msgIter, pBlock, &blkIter);
|
tInitSubmitBlkIter(msgIter, pBlock, &blkIter);
|
||||||
if (blkIter.row == NULL) return 0;
|
if (blkIter.row == NULL) return 0;
|
||||||
if (!pSchema || (suid != msgIter->suid)) {
|
if (!pSchema || (suid != msgIter->suid) || rv != TD_ROW_SVER(blkIter.row)) {
|
||||||
if (pSchema) {
|
if (pSchema) {
|
||||||
taosMemoryFreeClear(pSchema);
|
taosMemoryFreeClear(pSchema);
|
||||||
}
|
}
|
||||||
pSchema = metaGetTbTSchema(pMeta, msgIter->suid, 1); // TODO: use the real schema
|
pSchema = metaGetTbTSchema(pMeta, msgIter->suid, TD_ROW_SVER(blkIter.row)); // TODO: use the real schema
|
||||||
if (pSchema) {
|
if (pSchema) {
|
||||||
suid = msgIter->suid;
|
suid = msgIter->suid;
|
||||||
|
rv = TD_ROW_SVER(blkIter.row);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (!pSchema) {
|
if (!pSchema) {
|
||||||
|
|
|
@ -0,0 +1,45 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||||
|
*
|
||||||
|
* This program is free software: you can use, redistribute, and/or modify
|
||||||
|
* it under the terms of the GNU Affero General Public License, version 3
|
||||||
|
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU Affero General Public License
|
||||||
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "vnd.h"
|
||||||
|
|
||||||
|
int32_t vnodeRealloc(void** pp, int32_t size) {
|
||||||
|
uint8_t* p = NULL;
|
||||||
|
int32_t csize = 0;
|
||||||
|
|
||||||
|
if (*pp) {
|
||||||
|
p = (uint8_t*)(*pp) - sizeof(int32_t);
|
||||||
|
csize = *(int32_t*)p;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (csize >= size) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
p = (uint8_t*)taosMemoryRealloc(p, size);
|
||||||
|
if (p == NULL) {
|
||||||
|
return TSDB_CODE_OUT_OF_MEMORY;
|
||||||
|
}
|
||||||
|
*(int32_t*)p = size;
|
||||||
|
*pp = p + sizeof(int32_t);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void vnodeFree(void* p) {
|
||||||
|
if (p) {
|
||||||
|
taosMemoryFree(((uint8_t*)p) - sizeof(int32_t));
|
||||||
|
}
|
||||||
|
}
|
|
@ -16,6 +16,7 @@
|
||||||
#include "commandInt.h"
|
#include "commandInt.h"
|
||||||
#include "plannodes.h"
|
#include "plannodes.h"
|
||||||
#include "query.h"
|
#include "query.h"
|
||||||
|
#include "tcommon.h"
|
||||||
|
|
||||||
int32_t qExplainGenerateResNode(SPhysiNode *pNode, SExplainGroup *group, SExplainResNode **pRes);
|
int32_t qExplainGenerateResNode(SPhysiNode *pNode, SExplainGroup *group, SExplainResNode **pRes);
|
||||||
int32_t qExplainAppendGroupResRows(void *pCtx, int32_t groupId, int32_t level);
|
int32_t qExplainAppendGroupResRows(void *pCtx, int32_t groupId, int32_t level);
|
||||||
|
@ -637,13 +638,48 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
|
||||||
QRY_ERR_RET(qExplainBufAppendExecInfo(pResNode->pExecInfo, tbuf, &tlen));
|
QRY_ERR_RET(qExplainBufAppendExecInfo(pResNode->pExecInfo, tbuf, &tlen));
|
||||||
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
|
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
|
||||||
}
|
}
|
||||||
EXPLAIN_ROW_APPEND(EXPLAIN_COLUMNS_FORMAT, pSortNode->pSortKeys->length);
|
|
||||||
|
SDataBlockDescNode* pDescNode = pSortNode->node.pOutputDataBlockDesc;
|
||||||
|
EXPLAIN_ROW_APPEND(EXPLAIN_COLUMNS_FORMAT, nodesGetOutputNumFromSlotList(pDescNode->pSlots));
|
||||||
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
|
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
|
||||||
EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pSortNode->node.pOutputDataBlockDesc->totalRowSize);
|
EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pDescNode->totalRowSize);
|
||||||
EXPLAIN_ROW_APPEND(EXPLAIN_RIGHT_PARENTHESIS_FORMAT);
|
EXPLAIN_ROW_APPEND(EXPLAIN_RIGHT_PARENTHESIS_FORMAT);
|
||||||
EXPLAIN_ROW_END();
|
EXPLAIN_ROW_END();
|
||||||
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level));
|
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level));
|
||||||
|
|
||||||
|
if (EXPLAIN_MODE_ANALYZE == ctx->mode) {
|
||||||
|
// sort key
|
||||||
|
EXPLAIN_ROW_NEW(level, "Sort Key: ");
|
||||||
|
if (pResNode->pExecInfo) {
|
||||||
|
for (int32_t i = 0; i < LIST_LENGTH(pSortNode->pSortKeys); ++i) {
|
||||||
|
SOrderByExprNode *ptn = nodesListGetNode(pSortNode->pSortKeys, i);
|
||||||
|
EXPLAIN_ROW_APPEND("%s ", nodesGetNameFromColumnNode(ptn->pExpr));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
EXPLAIN_ROW_END();
|
||||||
|
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level));
|
||||||
|
|
||||||
|
// sort method
|
||||||
|
EXPLAIN_ROW_NEW(level, "Sort Method: ");
|
||||||
|
|
||||||
|
int32_t nodeNum = taosArrayGetSize(pResNode->pExecInfo);
|
||||||
|
SExplainExecInfo *execInfo = taosArrayGet(pResNode->pExecInfo, 0);
|
||||||
|
SSortExecInfo * pExecInfo = (SSortExecInfo *)execInfo->verboseInfo;
|
||||||
|
EXPLAIN_ROW_APPEND("%s", pExecInfo->sortMethod == SORT_QSORT_T ? "quicksort" : "merge sort");
|
||||||
|
if (pExecInfo->sortBuffer > 1024 * 1024) {
|
||||||
|
EXPLAIN_ROW_APPEND(" Buffers:%.2f Mb", pExecInfo->sortBuffer / (1024 * 1024.0));
|
||||||
|
} else if (pExecInfo->sortBuffer > 1024) {
|
||||||
|
EXPLAIN_ROW_APPEND(" Buffers:%.2f Kb", pExecInfo->sortBuffer / (1024.0));
|
||||||
|
} else {
|
||||||
|
EXPLAIN_ROW_APPEND(" Buffers:%d b", pExecInfo->sortBuffer);
|
||||||
|
}
|
||||||
|
|
||||||
|
EXPLAIN_ROW_APPEND(" loops:%d", pExecInfo->loops);
|
||||||
|
EXPLAIN_ROW_END();
|
||||||
|
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level));
|
||||||
|
}
|
||||||
|
|
||||||
if (verbose) {
|
if (verbose) {
|
||||||
EXPLAIN_ROW_NEW(level + 1, EXPLAIN_OUTPUT_FORMAT);
|
EXPLAIN_ROW_NEW(level + 1, EXPLAIN_OUTPUT_FORMAT);
|
||||||
EXPLAIN_ROW_APPEND(EXPLAIN_COLUMNS_FORMAT,
|
EXPLAIN_ROW_APPEND(EXPLAIN_COLUMNS_FORMAT,
|
||||||
|
@ -792,13 +828,8 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
|
||||||
QRY_ERR_RET(qExplainBufAppendExecInfo(pResNode->pExecInfo, tbuf, &tlen));
|
QRY_ERR_RET(qExplainBufAppendExecInfo(pResNode->pExecInfo, tbuf, &tlen));
|
||||||
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
|
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
|
||||||
}
|
}
|
||||||
// EXPLAIN_ROW_APPEND(EXPLAIN_FUNCTIONS_FORMAT, pPartNode->length);
|
|
||||||
// EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
|
|
||||||
EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pPartNode->node.pOutputDataBlockDesc->totalRowSize);
|
EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pPartNode->node.pOutputDataBlockDesc->totalRowSize);
|
||||||
// if (pPartNode->pGroupKeys) {
|
|
||||||
// EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
|
|
||||||
// EXPLAIN_ROW_APPEND(EXPLAIN_GROUPS_FORMAT, pPartNode->pGroupKeys->length);
|
|
||||||
// }
|
|
||||||
EXPLAIN_ROW_APPEND(EXPLAIN_RIGHT_PARENTHESIS_FORMAT);
|
EXPLAIN_ROW_APPEND(EXPLAIN_RIGHT_PARENTHESIS_FORMAT);
|
||||||
EXPLAIN_ROW_END();
|
EXPLAIN_ROW_END();
|
||||||
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level));
|
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level));
|
||||||
|
|
|
@ -622,18 +622,14 @@ typedef struct SSortedMergeOperatorInfo {
|
||||||
|
|
||||||
typedef struct SSortOperatorInfo {
|
typedef struct SSortOperatorInfo {
|
||||||
SOptrBasicInfo binfo;
|
SOptrBasicInfo binfo;
|
||||||
uint32_t sortBufSize; // max buffer size for in-memory sort
|
uint32_t sortBufSize; // max buffer size for in-memory sort
|
||||||
SArray* pSortInfo;
|
SArray* pSortInfo;
|
||||||
SSortHandle* pSortHandle;
|
SSortHandle* pSortHandle;
|
||||||
SArray* pColMatchInfo; // for index map from table scan output
|
SArray* pColMatchInfo; // for index map from table scan output
|
||||||
int32_t bufPageSize;
|
int32_t bufPageSize;
|
||||||
|
|
||||||
// TODO extact struct
|
int64_t startTs; // sort start time
|
||||||
int64_t startTs; // sort start time
|
uint64_t sortElapsed; // sort elapsed time, time to flush to disk not included.
|
||||||
uint64_t sortElapsed; // sort elapsed time, time to flush to disk not included.
|
|
||||||
uint64_t totalSize; // total load bytes from remote
|
|
||||||
uint64_t totalRows; // total number of rows
|
|
||||||
uint64_t totalElapsed; // total elapsed time
|
|
||||||
} SSortOperatorInfo;
|
} SSortOperatorInfo;
|
||||||
|
|
||||||
typedef struct STagFilterOperatorInfo {
|
typedef struct STagFilterOperatorInfo {
|
||||||
|
|
|
@ -137,6 +137,14 @@ void* tsortGetValue(STupleHandle* pVHandle, int32_t colId);
|
||||||
*/
|
*/
|
||||||
SSDataBlock* tsortGetSortedDataBlock(const SSortHandle* pSortHandle);
|
SSDataBlock* tsortGetSortedDataBlock(const SSortHandle* pSortHandle);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* return the sort execution information.
|
||||||
|
*
|
||||||
|
* @param pHandle
|
||||||
|
* @return
|
||||||
|
*/
|
||||||
|
SSortExecInfo tsortGetSortExecInfo(SSortHandle* pHandle);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -1747,8 +1747,7 @@ void setFunctionResultOutput(SOptrBasicInfo* pInfo, SAggSupporter* pSup, int32_t
|
||||||
SResultRow* pRow = doSetResultOutBufByKey(pSup->pResultBuf, pResultRowInfo, (char*)&tid, sizeof(tid), true, groupId,
|
SResultRow* pRow = doSetResultOutBufByKey(pSup->pResultBuf, pResultRowInfo, (char*)&tid, sizeof(tid), true, groupId,
|
||||||
pTaskInfo, false, pSup);
|
pTaskInfo, false, pSup);
|
||||||
|
|
||||||
ASSERT(pDataBlock->info.numOfCols == numOfExprs);
|
for (int32_t i = 0; i < numOfExprs; ++i) {
|
||||||
for (int32_t i = 0; i < pDataBlock->info.numOfCols; ++i) {
|
|
||||||
struct SResultRowEntryInfo* pEntry = getResultCell(pRow, i, rowCellInfoOffset);
|
struct SResultRowEntryInfo* pEntry = getResultCell(pRow, i, rowCellInfoOffset);
|
||||||
cleanupResultRowEntry(pEntry);
|
cleanupResultRowEntry(pEntry);
|
||||||
|
|
||||||
|
@ -1756,7 +1755,7 @@ void setFunctionResultOutput(SOptrBasicInfo* pInfo, SAggSupporter* pSup, int32_t
|
||||||
pCtx[i].scanFlag = stage;
|
pCtx[i].scanFlag = stage;
|
||||||
}
|
}
|
||||||
|
|
||||||
initCtxOutputBuffer(pCtx, pDataBlock->info.numOfCols);
|
initCtxOutputBuffer(pCtx, numOfExprs);
|
||||||
}
|
}
|
||||||
|
|
||||||
void updateOutputBuf(SOptrBasicInfo* pBInfo, int32_t* bufCapacity, int32_t numOfInputRows) {
|
void updateOutputBuf(SOptrBasicInfo* pBInfo, int32_t* bufCapacity, int32_t numOfInputRows) {
|
||||||
|
|
|
@ -806,7 +806,7 @@ static SSDataBlock* getDataFromCatch(SStreamBlockScanInfo* pInfo) {
|
||||||
SSDataBlock* pDB = createOneDataBlock(pInfo->pRes, false);
|
SSDataBlock* pDB = createOneDataBlock(pInfo->pRes, false);
|
||||||
blockDataFromBuf(pDB, buf);
|
blockDataFromBuf(pDB, buf);
|
||||||
SSDataBlock* pSub = blockDataExtractBlock(pDB, pos->rowId, 1);
|
SSDataBlock* pSub = blockDataExtractBlock(pDB, pos->rowId, 1);
|
||||||
blockDataMerge(pInfo->pRes, pSub, NULL);
|
blockDataMerge(pInfo->pRes, pSub);
|
||||||
blockDataDestroy(pDB);
|
blockDataDestroy(pDB);
|
||||||
blockDataDestroy(pSub);
|
blockDataDestroy(pSub);
|
||||||
}
|
}
|
||||||
|
@ -1046,8 +1046,9 @@ static void destroySysScanOperator(void* param, int32_t numOfOutput) {
|
||||||
blockDataDestroy(pInfo->pRes);
|
blockDataDestroy(pInfo->pRes);
|
||||||
|
|
||||||
const char* name = tNameGetTableName(&pInfo->name);
|
const char* name = tNameGetTableName(&pInfo->name);
|
||||||
if (strncasecmp(name, TSDB_INS_TABLE_USER_TABLES, TSDB_TABLE_FNAME_LEN) == 0) {
|
if (strncasecmp(name, TSDB_INS_TABLE_USER_TABLES, TSDB_TABLE_FNAME_LEN) == 0 || pInfo->pCur != NULL) {
|
||||||
metaCloseTbCursor(pInfo->pCur);
|
metaCloseTbCursor(pInfo->pCur);
|
||||||
|
pInfo->pCur = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
taosArrayDestroy(pInfo->scanCols);
|
taosArrayDestroy(pInfo->scanCols);
|
||||||
|
|
|
@ -2,6 +2,9 @@
|
||||||
#include "executorimpl.h"
|
#include "executorimpl.h"
|
||||||
|
|
||||||
static SSDataBlock* doSort(SOperatorInfo* pOperator);
|
static SSDataBlock* doSort(SOperatorInfo* pOperator);
|
||||||
|
static int32_t doOpenSortOperator(SOperatorInfo* pOperator);
|
||||||
|
static int32_t getExplainExecInfo(SOperatorInfo* pOptr, void** pOptrExplain, uint32_t* len);
|
||||||
|
|
||||||
static void destroyOrderOperatorInfo(void* param, int32_t numOfOutput);
|
static void destroyOrderOperatorInfo(void* param, int32_t numOfOutput);
|
||||||
|
|
||||||
SOperatorInfo* createSortOperatorInfo(SOperatorInfo* downstream, SSDataBlock* pResBlock, SArray* pSortInfo, SExprInfo* pExprInfo, int32_t numOfCols,
|
SOperatorInfo* createSortOperatorInfo(SOperatorInfo* downstream, SSDataBlock* pResBlock, SArray* pSortInfo, SExprInfo* pExprInfo, int32_t numOfCols,
|
||||||
|
@ -35,7 +38,7 @@ SOperatorInfo* createSortOperatorInfo(SOperatorInfo* downstream, SSDataBlock* pR
|
||||||
|
|
||||||
pOperator->pTaskInfo = pTaskInfo;
|
pOperator->pTaskInfo = pTaskInfo;
|
||||||
pOperator->fpSet =
|
pOperator->fpSet =
|
||||||
createOperatorFpSet(operatorDummyOpenFn, doSort, NULL, NULL, destroyOrderOperatorInfo, NULL, NULL, NULL);
|
createOperatorFpSet(doOpenSortOperator, doSort, NULL, NULL, destroyOrderOperatorInfo, NULL, NULL, getExplainExecInfo);
|
||||||
|
|
||||||
int32_t code = appendDownstream(pOperator, &downstream, 1);
|
int32_t code = appendDownstream(pOperator, &downstream, 1);
|
||||||
return pOperator;
|
return pOperator;
|
||||||
|
@ -121,20 +124,17 @@ void applyScalarFunction(SSDataBlock* pBlock, void* param) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
SSDataBlock* doSort(SOperatorInfo* pOperator) {
|
int32_t doOpenSortOperator(SOperatorInfo* pOperator) {
|
||||||
if (pOperator->status == OP_EXEC_DONE) {
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
|
||||||
SSortOperatorInfo* pInfo = pOperator->info;
|
SSortOperatorInfo* pInfo = pOperator->info;
|
||||||
|
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||||
|
|
||||||
if (pOperator->status == OP_RES_TO_RETURN) {
|
if (OPTR_IS_OPENED(pOperator)) {
|
||||||
return getSortedBlockData(pInfo->pSortHandle, pInfo->binfo.pRes, pOperator->resultInfo.capacity, pInfo->pColMatchInfo);
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
// pInfo->binfo.pRes is not equalled to the input datablock.
|
pInfo->startTs = taosGetTimestampUs();
|
||||||
// int32_t numOfBufPage = pInfo->sortBufSize / pInfo->bufPageSize;
|
|
||||||
|
// pInfo->binfo.pRes is not equalled to the input datablock.
|
||||||
pInfo->pSortHandle = tsortCreateSortHandle(pInfo->pSortInfo, pInfo->pColMatchInfo, SORT_SINGLESOURCE_SORT,
|
pInfo->pSortHandle = tsortCreateSortHandle(pInfo->pSortInfo, pInfo->pColMatchInfo, SORT_SINGLESOURCE_SORT,
|
||||||
-1, -1, NULL, pTaskInfo->id.str);
|
-1, -1, NULL, pTaskInfo->id.str);
|
||||||
|
|
||||||
|
@ -146,12 +146,39 @@ SSDataBlock* doSort(SOperatorInfo* pOperator) {
|
||||||
|
|
||||||
int32_t code = tsortOpen(pInfo->pSortHandle);
|
int32_t code = tsortOpen(pInfo->pSortHandle);
|
||||||
taosMemoryFreeClear(ps);
|
taosMemoryFreeClear(ps);
|
||||||
|
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
longjmp(pTaskInfo->env, terrno);
|
longjmp(pTaskInfo->env, terrno);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pOperator->cost.openCost = (taosGetTimestampUs() - pInfo->startTs)/1000.0;
|
||||||
pOperator->status = OP_RES_TO_RETURN;
|
pOperator->status = OP_RES_TO_RETURN;
|
||||||
return getSortedBlockData(pInfo->pSortHandle, pInfo->binfo.pRes, pOperator->resultInfo.capacity, pInfo->pColMatchInfo);
|
|
||||||
|
OPTR_SET_OPENED(pOperator);
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
SSDataBlock* doSort(SOperatorInfo* pOperator) {
|
||||||
|
if (pOperator->status == OP_EXEC_DONE) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||||
|
SSortOperatorInfo* pInfo = pOperator->info;
|
||||||
|
|
||||||
|
int32_t code = pOperator->fpSet._openFn(pOperator);
|
||||||
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
longjmp(pTaskInfo->env, code);
|
||||||
|
}
|
||||||
|
|
||||||
|
SSDataBlock* pBlock = getSortedBlockData(pInfo->pSortHandle, pInfo->binfo.pRes, pOperator->resultInfo.capacity, pInfo->pColMatchInfo);
|
||||||
|
|
||||||
|
if (pBlock != NULL) {
|
||||||
|
pOperator->resultInfo.totalRows += pBlock->info.rows;
|
||||||
|
} else {
|
||||||
|
doSetOperatorCompleted(pOperator);
|
||||||
|
}
|
||||||
|
return pBlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
void destroyOrderOperatorInfo(void* param, int32_t numOfOutput) {
|
void destroyOrderOperatorInfo(void* param, int32_t numOfOutput) {
|
||||||
|
@ -161,3 +188,15 @@ void destroyOrderOperatorInfo(void* param, int32_t numOfOutput) {
|
||||||
taosArrayDestroy(pInfo->pSortInfo);
|
taosArrayDestroy(pInfo->pSortInfo);
|
||||||
taosArrayDestroy(pInfo->pColMatchInfo);
|
taosArrayDestroy(pInfo->pColMatchInfo);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int32_t getExplainExecInfo(SOperatorInfo* pOptr, void** pOptrExplain, uint32_t* len) {
|
||||||
|
ASSERT(pOptr != NULL);
|
||||||
|
SSortExecInfo* pInfo = taosMemoryCalloc(1, sizeof(SSortExecInfo));
|
||||||
|
|
||||||
|
SSortOperatorInfo *pOperatorInfo = (SSortOperatorInfo*)pOptr->info;
|
||||||
|
|
||||||
|
*pInfo = tsortGetSortExecInfo(pOperatorInfo->pSortHandle);
|
||||||
|
*pOptrExplain = pInfo;
|
||||||
|
*len = sizeof(SSortExecInfo);
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
|
@ -31,20 +31,16 @@ struct STupleHandle {
|
||||||
|
|
||||||
struct SSortHandle {
|
struct SSortHandle {
|
||||||
int32_t type;
|
int32_t type;
|
||||||
|
|
||||||
int32_t pageSize;
|
int32_t pageSize;
|
||||||
int32_t numOfPages;
|
int32_t numOfPages;
|
||||||
SDiskbasedBuf *pBuf;
|
SDiskbasedBuf *pBuf;
|
||||||
|
|
||||||
SArray *pSortInfo;
|
SArray *pSortInfo;
|
||||||
SArray *pIndexMap;
|
|
||||||
SArray *pOrderedSource;
|
SArray *pOrderedSource;
|
||||||
|
|
||||||
_sort_fetch_block_fn_t fetchfp;
|
int32_t loops;
|
||||||
_sort_merge_compar_fn_t comparFn;
|
|
||||||
SMultiwayMergeTreeInfo *pMergeTree;
|
|
||||||
int64_t startTs;
|
|
||||||
uint64_t sortElapsed;
|
uint64_t sortElapsed;
|
||||||
|
int64_t startTs;
|
||||||
uint64_t totalElapsed;
|
uint64_t totalElapsed;
|
||||||
|
|
||||||
int32_t sourceId;
|
int32_t sourceId;
|
||||||
|
@ -53,13 +49,15 @@ struct SSortHandle {
|
||||||
int32_t numOfCompletedSources;
|
int32_t numOfCompletedSources;
|
||||||
bool opened;
|
bool opened;
|
||||||
const char *idStr;
|
const char *idStr;
|
||||||
|
|
||||||
bool inMemSort;
|
bool inMemSort;
|
||||||
bool needAdjust;
|
bool needAdjust;
|
||||||
STupleHandle tupleHandle;
|
STupleHandle tupleHandle;
|
||||||
|
|
||||||
void *param;
|
void *param;
|
||||||
void (*beforeFp)(SSDataBlock* pBlock, void* param);
|
void (*beforeFp)(SSDataBlock* pBlock, void* param);
|
||||||
|
|
||||||
|
_sort_fetch_block_fn_t fetchfp;
|
||||||
|
_sort_merge_compar_fn_t comparFn;
|
||||||
|
SMultiwayMergeTreeInfo *pMergeTree;
|
||||||
};
|
};
|
||||||
|
|
||||||
static int32_t msortComparFn(const void *pLeft, const void *pRight, void *param);
|
static int32_t msortComparFn(const void *pLeft, const void *pRight, void *param);
|
||||||
|
@ -80,7 +78,7 @@ SSortHandle* tsortCreateSortHandle(SArray* pSortInfo, SArray* pIndexMap, int32_t
|
||||||
pSortHandle->pageSize = pageSize;
|
pSortHandle->pageSize = pageSize;
|
||||||
pSortHandle->numOfPages = numOfPages;
|
pSortHandle->numOfPages = numOfPages;
|
||||||
pSortHandle->pSortInfo = pSortInfo;
|
pSortHandle->pSortInfo = pSortInfo;
|
||||||
pSortHandle->pIndexMap = pIndexMap;
|
pSortHandle->loops = 0;
|
||||||
|
|
||||||
if (pBlock != NULL) {
|
if (pBlock != NULL) {
|
||||||
pSortHandle->pDataBlock = createOneDataBlock(pBlock, false);
|
pSortHandle->pDataBlock = createOneDataBlock(pBlock, false);
|
||||||
|
@ -415,6 +413,9 @@ static int32_t doInternalMergeSort(SSortHandle* pHandle) {
|
||||||
int32_t numOfRows = blockDataGetCapacityInRow(pHandle->pDataBlock, pHandle->pageSize);
|
int32_t numOfRows = blockDataGetCapacityInRow(pHandle->pDataBlock, pHandle->pageSize);
|
||||||
blockDataEnsureCapacity(pHandle->pDataBlock, numOfRows);
|
blockDataEnsureCapacity(pHandle->pDataBlock, numOfRows);
|
||||||
|
|
||||||
|
// the initial pass + sortPass + final mergePass
|
||||||
|
pHandle->loops = sortPass + 2;
|
||||||
|
|
||||||
size_t numOfSorted = taosArrayGetSize(pHandle->pOrderedSource);
|
size_t numOfSorted = taosArrayGetSize(pHandle->pOrderedSource);
|
||||||
for(int32_t t = 0; t < sortPass; ++t) {
|
for(int32_t t = 0; t < sortPass; ++t) {
|
||||||
int64_t st = taosGetTimestampUs();
|
int64_t st = taosGetTimestampUs();
|
||||||
|
@ -502,12 +503,13 @@ static int32_t doInternalMergeSort(SSortHandle* pHandle) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t createInitialSortedMultiSources(SSortHandle* pHandle) {
|
static int32_t createInitialSources(SSortHandle* pHandle) {
|
||||||
size_t sortBufSize = pHandle->numOfPages * pHandle->pageSize;
|
size_t sortBufSize = pHandle->numOfPages * pHandle->pageSize;
|
||||||
|
|
||||||
if (pHandle->type == SORT_SINGLESOURCE_SORT) {
|
if (pHandle->type == SORT_SINGLESOURCE_SORT) {
|
||||||
SSortSource* source = taosArrayGetP(pHandle->pOrderedSource, 0);
|
SSortSource* source = taosArrayGetP(pHandle->pOrderedSource, 0);
|
||||||
taosArrayClear(pHandle->pOrderedSource);
|
taosArrayClear(pHandle->pOrderedSource);
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
SSDataBlock* pBlock = pHandle->fetchfp(source->param);
|
SSDataBlock* pBlock = pHandle->fetchfp(source->param);
|
||||||
if (pBlock == NULL) {
|
if (pBlock == NULL) {
|
||||||
|
@ -524,6 +526,7 @@ static int32_t createInitialSortedMultiSources(SSortHandle* pHandle) {
|
||||||
} else {
|
} else {
|
||||||
pHandle->pageSize = 4096;
|
pHandle->pageSize = 4096;
|
||||||
}
|
}
|
||||||
|
|
||||||
// todo!!
|
// todo!!
|
||||||
pHandle->numOfPages = 1024;
|
pHandle->numOfPages = 1024;
|
||||||
sortBufSize = pHandle->numOfPages * pHandle->pageSize;
|
sortBufSize = pHandle->numOfPages * pHandle->pageSize;
|
||||||
|
@ -535,7 +538,7 @@ static int32_t createInitialSortedMultiSources(SSortHandle* pHandle) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// todo relocate the columns
|
// todo relocate the columns
|
||||||
int32_t code = blockDataMerge(pHandle->pDataBlock, pBlock, pHandle->pIndexMap);
|
int32_t code = blockDataMerge(pHandle->pDataBlock, pBlock);
|
||||||
if (code != 0) {
|
if (code != 0) {
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
@ -569,6 +572,7 @@ static int32_t createInitialSortedMultiSources(SSortHandle* pHandle) {
|
||||||
pHandle->cmpParam.numOfSources = 1;
|
pHandle->cmpParam.numOfSources = 1;
|
||||||
pHandle->inMemSort = true;
|
pHandle->inMemSort = true;
|
||||||
|
|
||||||
|
pHandle->loops = 1;
|
||||||
pHandle->tupleHandle.rowIndex = -1;
|
pHandle->tupleHandle.rowIndex = -1;
|
||||||
pHandle->tupleHandle.pBlock = pHandle->pDataBlock;
|
pHandle->tupleHandle.pBlock = pHandle->pDataBlock;
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -592,7 +596,7 @@ int32_t tsortOpen(SSortHandle* pHandle) {
|
||||||
|
|
||||||
pHandle->opened = true;
|
pHandle->opened = true;
|
||||||
|
|
||||||
int32_t code = createInitialSortedMultiSources(pHandle);
|
int32_t code = createInitialSources(pHandle);
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
@ -692,3 +696,20 @@ void* tsortGetValue(STupleHandle* pVHandle, int32_t colIndex) {
|
||||||
SColumnInfoData* pColInfo = TARRAY_GET_ELEM(pVHandle->pBlock->pDataBlock, colIndex);
|
SColumnInfoData* pColInfo = TARRAY_GET_ELEM(pVHandle->pBlock->pDataBlock, colIndex);
|
||||||
return colDataGetData(pColInfo, pVHandle->rowIndex);
|
return colDataGetData(pColInfo, pVHandle->rowIndex);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
SSortExecInfo tsortGetSortExecInfo(SSortHandle* pHandle) {
|
||||||
|
SSortExecInfo info = {0};
|
||||||
|
|
||||||
|
info.sortBuffer = pHandle->pageSize * pHandle->numOfPages;
|
||||||
|
info.sortMethod = pHandle->inMemSort? SORT_QSORT_T:SORT_SPILLED_MERGE_SORT_T;
|
||||||
|
info.loops = pHandle->loops;
|
||||||
|
|
||||||
|
if (pHandle->pBuf != NULL) {
|
||||||
|
SDiskbasedBufStatis st = getDBufStatis(pHandle->pBuf);
|
||||||
|
info.writeBytes = st.flushBytes;
|
||||||
|
info.readBytes = st.loadBytes;
|
||||||
|
}
|
||||||
|
|
||||||
|
return info;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
|
@ -156,6 +156,14 @@ static int32_t translatePercentile(SFunctionNode* pFunc, char* pErrBuf, int32_t
|
||||||
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
|
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
SValueNode* pValue = (SValueNode*)nodesListGetNode(pFunc->pParameterList, 1);
|
||||||
|
|
||||||
|
if (pValue->datum.i < 0 || pValue->datum.i > 100) {
|
||||||
|
return invaildFuncParaValueErrMsg(pErrBuf, len, pFunc->functionName);
|
||||||
|
}
|
||||||
|
|
||||||
|
pValue->notReserved = true;
|
||||||
|
|
||||||
uint8_t para1Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
|
uint8_t para1Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
|
||||||
uint8_t para2Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type;
|
uint8_t para2Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type;
|
||||||
if (!IS_NUMERIC_TYPE(para1Type) || (!IS_SIGNED_NUMERIC_TYPE(para2Type) && !IS_UNSIGNED_NUMERIC_TYPE(para2Type))) {
|
if (!IS_NUMERIC_TYPE(para1Type) || (!IS_SIGNED_NUMERIC_TYPE(para2Type) && !IS_UNSIGNED_NUMERIC_TYPE(para2Type))) {
|
||||||
|
@ -175,8 +183,8 @@ static bool validAperventileAlgo(const SValueNode* pVal) {
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t translateApercentile(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
|
static int32_t translateApercentile(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
|
||||||
int32_t paraNum = LIST_LENGTH(pFunc->pParameterList);
|
int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList);
|
||||||
if (2 != paraNum && 3 != paraNum) {
|
if (2 != numOfParams && 3 != numOfParams) {
|
||||||
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
|
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -190,15 +198,15 @@ static int32_t translateApercentile(SFunctionNode* pFunc, char* pErrBuf, int32_t
|
||||||
if (nodeType(pParamNode) != QUERY_NODE_VALUE) {
|
if (nodeType(pParamNode) != QUERY_NODE_VALUE) {
|
||||||
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
|
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
|
||||||
}
|
}
|
||||||
|
|
||||||
SValueNode* pValue = (SValueNode*)pParamNode;
|
SValueNode* pValue = (SValueNode*)pParamNode;
|
||||||
if (pValue->datum.i < 0 || pValue->datum.i > 100) {
|
if (pValue->datum.i < 0 || pValue->datum.i > 100) {
|
||||||
return invaildFuncParaValueErrMsg(pErrBuf, len, pFunc->functionName);
|
return invaildFuncParaValueErrMsg(pErrBuf, len, pFunc->functionName);
|
||||||
}
|
}
|
||||||
|
|
||||||
pValue->notReserved = true;
|
pValue->notReserved = true;
|
||||||
|
|
||||||
if (3 == paraNum) {
|
if (3 == numOfParams) {
|
||||||
SNode* pPara3 = nodesListGetNode(pFunc->pParameterList, 2);
|
SNode* pPara3 = nodesListGetNode(pFunc->pParameterList, 2);
|
||||||
if (QUERY_NODE_VALUE != nodeType(pPara3) || !validAperventileAlgo((SValueNode*)pPara3)) {
|
if (QUERY_NODE_VALUE != nodeType(pPara3) || !validAperventileAlgo((SValueNode*)pPara3)) {
|
||||||
return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR,
|
return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR,
|
||||||
|
@ -218,8 +226,8 @@ static int32_t translateTbnameColumn(SFunctionNode* pFunc, char* pErrBuf, int32_
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t translateTop(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
|
static int32_t translateTop(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
|
||||||
int32_t paraNum = LIST_LENGTH(pFunc->pParameterList);
|
int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList);
|
||||||
if (2 != paraNum) {
|
if (2 != numOfParams) {
|
||||||
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
|
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -263,15 +271,16 @@ static int32_t translateSpread(SFunctionNode* pFunc, char* pErrBuf, int32_t len)
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t translateElapsed(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
|
static int32_t translateElapsed(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
|
||||||
int32_t paraNum = LIST_LENGTH(pFunc->pParameterList);
|
int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList);
|
||||||
if (1 != paraNum && 2 != paraNum) {
|
if (1 != numOfParams && 2 != numOfParams) {
|
||||||
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
|
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
|
||||||
}
|
}
|
||||||
|
|
||||||
SNode* pPara = nodesListGetNode(pFunc->pParameterList, 0);
|
//param0
|
||||||
if (QUERY_NODE_COLUMN != nodeType(pPara)) {
|
SNode* pParaNode0 = nodesListGetNode(pFunc->pParameterList, 0);
|
||||||
|
if (QUERY_NODE_COLUMN != nodeType(pParaNode0)) {
|
||||||
return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR,
|
return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR,
|
||||||
"The input parameter of ELAPSED function can only be column");
|
"The first parameter of ELAPSED function can only be column");
|
||||||
}
|
}
|
||||||
|
|
||||||
uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
|
uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
|
||||||
|
@ -279,6 +288,23 @@ static int32_t translateElapsed(SFunctionNode* pFunc, char* pErrBuf, int32_t len
|
||||||
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
|
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//param1
|
||||||
|
if (2 == numOfParams) {
|
||||||
|
SNode* pParamNode1 = nodesListGetNode(pFunc->pParameterList, 1);
|
||||||
|
if (QUERY_NODE_VALUE != nodeType(pParamNode1)) {
|
||||||
|
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
|
||||||
|
}
|
||||||
|
|
||||||
|
SValueNode* pValue = (SValueNode*)pParamNode1;
|
||||||
|
|
||||||
|
pValue->notReserved = true;
|
||||||
|
|
||||||
|
uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type;
|
||||||
|
if (!IS_INTEGER_TYPE(paraType)) {
|
||||||
|
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes, .type = TSDB_DATA_TYPE_DOUBLE};
|
pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes, .type = TSDB_DATA_TYPE_DOUBLE};
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
@ -290,26 +316,58 @@ static int32_t translateLeastSQR(SFunctionNode* pFunc, char* pErrBuf, int32_t le
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int32_t i = 0; i < numOfParams; ++i) {
|
for (int32_t i = 0; i < numOfParams; ++i) {
|
||||||
|
SNode* pParamNode = nodesListGetNode(pFunc->pParameterList, i);
|
||||||
|
if (i > 0) { //param1 & param2
|
||||||
|
if (QUERY_NODE_VALUE != nodeType(pParamNode)) {
|
||||||
|
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
|
||||||
|
}
|
||||||
|
|
||||||
|
SValueNode* pValue = (SValueNode*)pParamNode;
|
||||||
|
|
||||||
|
pValue->notReserved = true;
|
||||||
|
}
|
||||||
|
|
||||||
uint8_t colType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, i))->resType.type;
|
uint8_t colType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, i))->resType.type;
|
||||||
if (!IS_NUMERIC_TYPE(colType)) {
|
if (!IS_NUMERIC_TYPE(colType)) {
|
||||||
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
|
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
pFunc->node.resType = (SDataType){.bytes = 64, .type = TSDB_DATA_TYPE_BINARY};
|
pFunc->node.resType = (SDataType){.bytes = 64, .type = TSDB_DATA_TYPE_BINARY};
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t translateHistogram(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
|
static int32_t translateHistogram(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
|
||||||
if (4 != LIST_LENGTH(pFunc->pParameterList)) {
|
int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList);
|
||||||
|
if (4 != numOfParams) {
|
||||||
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
|
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//param0
|
||||||
|
SNode* pParaNode0 = nodesListGetNode(pFunc->pParameterList, 0);
|
||||||
|
if (QUERY_NODE_COLUMN != nodeType(pParaNode0)) {
|
||||||
|
return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR,
|
||||||
|
"The first parameter of HISTOGRAM function can only be column");
|
||||||
|
}
|
||||||
|
|
||||||
uint8_t colType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
|
uint8_t colType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
|
||||||
if (!IS_NUMERIC_TYPE(colType)) {
|
if (!IS_NUMERIC_TYPE(colType)) {
|
||||||
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
|
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//param1 ~ param3
|
||||||
|
for (int32_t i = 1; i < numOfParams; ++i) {
|
||||||
|
SNode* pParamNode = nodesListGetNode(pFunc->pParameterList, i);
|
||||||
|
if (QUERY_NODE_VALUE != nodeType(pParamNode)) {
|
||||||
|
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
|
||||||
|
}
|
||||||
|
|
||||||
|
SValueNode* pValue = (SValueNode*)pParamNode;
|
||||||
|
|
||||||
|
pValue->notReserved = true;
|
||||||
|
}
|
||||||
|
|
||||||
if (((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type != TSDB_DATA_TYPE_BINARY ||
|
if (((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type != TSDB_DATA_TYPE_BINARY ||
|
||||||
((SExprNode*)nodesListGetNode(pFunc->pParameterList, 2))->resType.type != TSDB_DATA_TYPE_BINARY ||
|
((SExprNode*)nodesListGetNode(pFunc->pParameterList, 2))->resType.type != TSDB_DATA_TYPE_BINARY ||
|
||||||
((SExprNode*)nodesListGetNode(pFunc->pParameterList, 3))->resType.type != TSDB_DATA_TYPE_BIGINT) {
|
((SExprNode*)nodesListGetNode(pFunc->pParameterList, 3))->resType.type != TSDB_DATA_TYPE_BIGINT) {
|
||||||
|
@ -336,46 +394,75 @@ static int32_t translateHLL(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t translateStateCount(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
|
static int32_t translateStateCount(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
|
||||||
if (3 != LIST_LENGTH(pFunc->pParameterList)) {
|
int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList);
|
||||||
|
if (3 != numOfParams) {
|
||||||
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
|
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//param0
|
||||||
uint8_t colType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
|
uint8_t colType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
|
||||||
if (!IS_NUMERIC_TYPE(colType)) {
|
if (!IS_NUMERIC_TYPE(colType)) {
|
||||||
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
|
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//param1 & param2
|
||||||
|
for (int32_t i = 1; i < numOfParams; ++i) {
|
||||||
|
SNode* pParamNode = nodesListGetNode(pFunc->pParameterList, i);
|
||||||
|
if (QUERY_NODE_VALUE != nodeType(pParamNode)) {
|
||||||
|
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
|
||||||
|
}
|
||||||
|
|
||||||
|
SValueNode* pValue = (SValueNode*)pParamNode;
|
||||||
|
|
||||||
|
pValue->notReserved = true;
|
||||||
|
}
|
||||||
|
|
||||||
if (((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type != TSDB_DATA_TYPE_BINARY ||
|
if (((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type != TSDB_DATA_TYPE_BINARY ||
|
||||||
(((SExprNode*)nodesListGetNode(pFunc->pParameterList, 2))->resType.type != TSDB_DATA_TYPE_BIGINT &&
|
(((SExprNode*)nodesListGetNode(pFunc->pParameterList, 2))->resType.type != TSDB_DATA_TYPE_BIGINT &&
|
||||||
((SExprNode*)nodesListGetNode(pFunc->pParameterList, 2))->resType.type != TSDB_DATA_TYPE_DOUBLE)) {
|
((SExprNode*)nodesListGetNode(pFunc->pParameterList, 2))->resType.type != TSDB_DATA_TYPE_DOUBLE)) {
|
||||||
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
|
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//set result type
|
||||||
pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes, .type = TSDB_DATA_TYPE_BIGINT};
|
pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes, .type = TSDB_DATA_TYPE_BIGINT};
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t translateStateDuration(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
|
static int32_t translateStateDuration(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
|
||||||
int32_t paraNum = LIST_LENGTH(pFunc->pParameterList);
|
int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList);
|
||||||
if (3 != paraNum && 4 != paraNum) {
|
if (3 != numOfParams && 4 != numOfParams) {
|
||||||
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
|
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//param0
|
||||||
uint8_t colType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
|
uint8_t colType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
|
||||||
if (!IS_NUMERIC_TYPE(colType)) {
|
if (!IS_NUMERIC_TYPE(colType)) {
|
||||||
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
|
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//param1, param2 & param3
|
||||||
|
for (int32_t i = 1; i < numOfParams; ++i) {
|
||||||
|
SNode* pParamNode = nodesListGetNode(pFunc->pParameterList, i);
|
||||||
|
if (QUERY_NODE_VALUE != nodeType(pParamNode)) {
|
||||||
|
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
|
||||||
|
}
|
||||||
|
|
||||||
|
SValueNode* pValue = (SValueNode*)pParamNode;
|
||||||
|
|
||||||
|
pValue->notReserved = true;
|
||||||
|
}
|
||||||
|
|
||||||
if (((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type != TSDB_DATA_TYPE_BINARY ||
|
if (((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type != TSDB_DATA_TYPE_BINARY ||
|
||||||
(((SExprNode*)nodesListGetNode(pFunc->pParameterList, 2))->resType.type != TSDB_DATA_TYPE_BIGINT &&
|
(((SExprNode*)nodesListGetNode(pFunc->pParameterList, 2))->resType.type != TSDB_DATA_TYPE_BIGINT &&
|
||||||
((SExprNode*)nodesListGetNode(pFunc->pParameterList, 2))->resType.type != TSDB_DATA_TYPE_DOUBLE)) {
|
((SExprNode*)nodesListGetNode(pFunc->pParameterList, 2))->resType.type != TSDB_DATA_TYPE_DOUBLE)) {
|
||||||
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
|
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (paraNum == 4 && ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 3))->resType.type != TSDB_DATA_TYPE_BIGINT) {
|
if (numOfParams == 4 && ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 3))->resType.type != TSDB_DATA_TYPE_BIGINT) {
|
||||||
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
|
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//set result type
|
||||||
pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes, .type = TSDB_DATA_TYPE_BIGINT};
|
pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes, .type = TSDB_DATA_TYPE_BIGINT};
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
@ -416,13 +503,28 @@ static int32_t translateMavg(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
|
||||||
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
|
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
|
||||||
}
|
}
|
||||||
|
|
||||||
SNode* pPara = nodesListGetNode(pFunc->pParameterList, 0);
|
//param0
|
||||||
if (QUERY_NODE_COLUMN != nodeType(pPara)) {
|
SNode* pParaNode0 = nodesListGetNode(pFunc->pParameterList, 0);
|
||||||
|
if (QUERY_NODE_COLUMN != nodeType(pParaNode0)) {
|
||||||
return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR,
|
return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR,
|
||||||
"The input parameter of MAVG function can only be column");
|
"The first parameter of MAVG function can only be column");
|
||||||
}
|
}
|
||||||
|
|
||||||
uint8_t colType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
|
uint8_t colType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
|
||||||
|
|
||||||
|
//param1
|
||||||
|
SNode* pParamNode1 = nodesListGetNode(pFunc->pParameterList, 1);
|
||||||
|
if (QUERY_NODE_VALUE != nodeType(pParamNode1)) {
|
||||||
|
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
|
||||||
|
}
|
||||||
|
|
||||||
|
SValueNode* pValue = (SValueNode*)pParamNode1;
|
||||||
|
if (pValue->datum.i < 1 || pValue->datum.i > 1000) {
|
||||||
|
return invaildFuncParaValueErrMsg(pErrBuf, len, pFunc->functionName);
|
||||||
|
}
|
||||||
|
|
||||||
|
pValue->notReserved = true;
|
||||||
|
|
||||||
uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type;
|
uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type;
|
||||||
if (!IS_NUMERIC_TYPE(colType) || !IS_INTEGER_TYPE(paraType)) {
|
if (!IS_NUMERIC_TYPE(colType) || !IS_INTEGER_TYPE(paraType)) {
|
||||||
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
|
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
|
||||||
|
@ -437,24 +539,41 @@ static int32_t translateSample(SFunctionNode* pFunc, char* pErrBuf, int32_t len)
|
||||||
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
|
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
|
||||||
}
|
}
|
||||||
|
|
||||||
SNode* pPara = nodesListGetNode(pFunc->pParameterList, 0);
|
//param0
|
||||||
if (QUERY_NODE_COLUMN != nodeType(pPara)) {
|
SNode* pParamNode0 = nodesListGetNode(pFunc->pParameterList, 0);
|
||||||
|
if (QUERY_NODE_COLUMN != nodeType(pParamNode0)) {
|
||||||
return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR,
|
return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR,
|
||||||
"The input parameter of SAMPLE function can only be column");
|
"The first parameter of SAMPLE function can only be column");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
SExprNode* pCol = (SExprNode*)nodesListGetNode(pFunc->pParameterList, 0);
|
||||||
|
uint8_t colType = pCol->resType.type;
|
||||||
|
|
||||||
|
//param1
|
||||||
|
SNode* pParamNode1 = nodesListGetNode(pFunc->pParameterList, 1);
|
||||||
|
if (QUERY_NODE_VALUE != nodeType(pParamNode1)) {
|
||||||
|
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
|
||||||
|
}
|
||||||
|
|
||||||
|
SValueNode* pValue = (SValueNode*)pParamNode1;
|
||||||
|
if (pValue->datum.i < 1 || pValue->datum.i > 1000) {
|
||||||
|
return invaildFuncParaValueErrMsg(pErrBuf, len, pFunc->functionName);
|
||||||
|
}
|
||||||
|
|
||||||
|
pValue->notReserved = true;
|
||||||
|
|
||||||
uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type;
|
uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type;
|
||||||
if (!IS_INTEGER_TYPE(paraType)) {
|
if (!IS_INTEGER_TYPE(paraType)) {
|
||||||
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
|
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
|
||||||
}
|
}
|
||||||
|
|
||||||
SExprNode* pCol = (SExprNode*)nodesListGetNode(pFunc->pParameterList, 0);
|
//set result type
|
||||||
uint8_t colType = pCol->resType.type;
|
|
||||||
if (IS_VAR_DATA_TYPE(colType)) {
|
if (IS_VAR_DATA_TYPE(colType)) {
|
||||||
pFunc->node.resType = (SDataType){.bytes = pCol->resType.bytes, .type = colType};
|
pFunc->node.resType = (SDataType){.bytes = pCol->resType.bytes, .type = colType};
|
||||||
} else {
|
} else {
|
||||||
pFunc->node.resType = (SDataType){.bytes = tDataTypes[colType].bytes, .type = colType};
|
pFunc->node.resType = (SDataType){.bytes = tDataTypes[colType].bytes, .type = colType};
|
||||||
}
|
}
|
||||||
|
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -464,21 +583,37 @@ static int32_t translateTail(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
|
||||||
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
|
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//param0
|
||||||
SNode* pPara = nodesListGetNode(pFunc->pParameterList, 0);
|
SNode* pPara = nodesListGetNode(pFunc->pParameterList, 0);
|
||||||
if (QUERY_NODE_COLUMN != nodeType(pPara)) {
|
if (QUERY_NODE_COLUMN != nodeType(pPara)) {
|
||||||
return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR,
|
return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR,
|
||||||
"The input parameter of TAIL function can only be column");
|
"The first parameter of TAIL function can only be column");
|
||||||
}
|
}
|
||||||
|
SExprNode* pCol = (SExprNode*)nodesListGetNode(pFunc->pParameterList, 0);
|
||||||
|
uint8_t colType = pCol->resType.type;
|
||||||
|
|
||||||
|
//param1 & param2
|
||||||
for (int32_t i = 1; i < numOfParams; ++i) {
|
for (int32_t i = 1; i < numOfParams; ++i) {
|
||||||
|
SNode* pParamNode = nodesListGetNode(pFunc->pParameterList, i);
|
||||||
|
if (QUERY_NODE_VALUE != nodeType(pParamNode)) {
|
||||||
|
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
|
||||||
|
}
|
||||||
|
|
||||||
|
SValueNode* pValue = (SValueNode*)pParamNode;
|
||||||
|
|
||||||
|
if (pValue->datum.i < ((i > 1) ? 0 : 1) || pValue->datum.i > 1000) {
|
||||||
|
return invaildFuncParaValueErrMsg(pErrBuf, len, pFunc->functionName);
|
||||||
|
}
|
||||||
|
|
||||||
|
pValue->notReserved = true;
|
||||||
|
|
||||||
uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, i))->resType.type;
|
uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, i))->resType.type;
|
||||||
if (!IS_INTEGER_TYPE(paraType)) {
|
if (!IS_INTEGER_TYPE(paraType)) {
|
||||||
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
|
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
SExprNode* pCol = (SExprNode*)nodesListGetNode(pFunc->pParameterList, 0);
|
//set result type
|
||||||
uint8_t colType = pCol->resType.type;
|
|
||||||
if (IS_VAR_DATA_TYPE(colType)) {
|
if (IS_VAR_DATA_TYPE(colType)) {
|
||||||
pFunc->node.resType = (SDataType){.bytes = pCol->resType.bytes, .type = colType};
|
pFunc->node.resType = (SDataType){.bytes = pCol->resType.bytes, .type = colType};
|
||||||
} else {
|
} else {
|
||||||
|
@ -552,8 +687,8 @@ static int32_t translateLength(SFunctionNode* pFunc, char* pErrBuf, int32_t len)
|
||||||
|
|
||||||
static int32_t translateConcatImpl(SFunctionNode* pFunc, char* pErrBuf, int32_t len, int32_t minParaNum,
|
static int32_t translateConcatImpl(SFunctionNode* pFunc, char* pErrBuf, int32_t len, int32_t minParaNum,
|
||||||
int32_t maxParaNum, bool hasSep) {
|
int32_t maxParaNum, bool hasSep) {
|
||||||
int32_t paraNum = LIST_LENGTH(pFunc->pParameterList);
|
int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList);
|
||||||
if (paraNum < minParaNum || paraNum > maxParaNum) {
|
if (numOfParams < minParaNum || numOfParams > maxParaNum) {
|
||||||
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
|
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -562,7 +697,7 @@ static int32_t translateConcatImpl(SFunctionNode* pFunc, char* pErrBuf, int32_t
|
||||||
int32_t sepBytes = 0;
|
int32_t sepBytes = 0;
|
||||||
|
|
||||||
/* For concat/concat_ws function, if params have NCHAR type, promote the final result to NCHAR */
|
/* For concat/concat_ws function, if params have NCHAR type, promote the final result to NCHAR */
|
||||||
for (int32_t i = 0; i < paraNum; ++i) {
|
for (int32_t i = 0; i < numOfParams; ++i) {
|
||||||
SNode* pPara = nodesListGetNode(pFunc->pParameterList, i);
|
SNode* pPara = nodesListGetNode(pFunc->pParameterList, i);
|
||||||
uint8_t paraType = ((SExprNode*)pPara)->resType.type;
|
uint8_t paraType = ((SExprNode*)pPara)->resType.type;
|
||||||
if (!IS_VAR_DATA_TYPE(paraType)) {
|
if (!IS_VAR_DATA_TYPE(paraType)) {
|
||||||
|
@ -573,7 +708,7 @@ static int32_t translateConcatImpl(SFunctionNode* pFunc, char* pErrBuf, int32_t
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int32_t i = 0; i < paraNum; ++i) {
|
for (int32_t i = 0; i < numOfParams; ++i) {
|
||||||
SNode* pPara = nodesListGetNode(pFunc->pParameterList, i);
|
SNode* pPara = nodesListGetNode(pFunc->pParameterList, i);
|
||||||
uint8_t paraType = ((SExprNode*)pPara)->resType.type;
|
uint8_t paraType = ((SExprNode*)pPara)->resType.type;
|
||||||
int32_t paraBytes = ((SExprNode*)pPara)->resType.bytes;
|
int32_t paraBytes = ((SExprNode*)pPara)->resType.bytes;
|
||||||
|
@ -589,7 +724,7 @@ static int32_t translateConcatImpl(SFunctionNode* pFunc, char* pErrBuf, int32_t
|
||||||
}
|
}
|
||||||
|
|
||||||
if (hasSep) {
|
if (hasSep) {
|
||||||
resultBytes += sepBytes * (paraNum - 3);
|
resultBytes += sepBytes * (numOfParams - 3);
|
||||||
}
|
}
|
||||||
|
|
||||||
pFunc->node.resType = (SDataType){.bytes = resultBytes, .type = resultType};
|
pFunc->node.resType = (SDataType){.bytes = resultBytes, .type = resultType};
|
||||||
|
@ -605,8 +740,8 @@ static int32_t translateConcatWs(SFunctionNode* pFunc, char* pErrBuf, int32_t le
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t translateSubstr(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
|
static int32_t translateSubstr(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
|
||||||
int32_t paraNum = LIST_LENGTH(pFunc->pParameterList);
|
int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList);
|
||||||
if (2 != paraNum && 3 != paraNum) {
|
if (2 != numOfParams && 3 != numOfParams) {
|
||||||
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
|
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -615,7 +750,7 @@ static int32_t translateSubstr(SFunctionNode* pFunc, char* pErrBuf, int32_t len)
|
||||||
if (!IS_VAR_DATA_TYPE(pPara1->resType.type) || !IS_INTEGER_TYPE(para2Type)) {
|
if (!IS_VAR_DATA_TYPE(pPara1->resType.type) || !IS_INTEGER_TYPE(para2Type)) {
|
||||||
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
|
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
|
||||||
}
|
}
|
||||||
if (3 == paraNum) {
|
if (3 == numOfParams) {
|
||||||
uint8_t para3Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type;
|
uint8_t para3Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type;
|
||||||
if (!IS_INTEGER_TYPE(para3Type)) {
|
if (!IS_INTEGER_TYPE(para3Type)) {
|
||||||
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
|
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
|
||||||
|
@ -692,8 +827,8 @@ static int32_t translateTimeTruncate(SFunctionNode* pFunc, char* pErrBuf, int32_
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t translateTimeDiff(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
|
static int32_t translateTimeDiff(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
|
||||||
int32_t paraNum = LIST_LENGTH(pFunc->pParameterList);
|
int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList);
|
||||||
if (2 != paraNum && 3 != paraNum) {
|
if (2 != numOfParams && 3 != numOfParams) {
|
||||||
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
|
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -704,7 +839,7 @@ static int32_t translateTimeDiff(SFunctionNode* pFunc, char* pErrBuf, int32_t le
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (3 == paraNum) {
|
if (3 == numOfParams) {
|
||||||
if (!IS_INTEGER_TYPE(((SExprNode*)nodesListGetNode(pFunc->pParameterList, 2))->resType.type)) {
|
if (!IS_INTEGER_TYPE(((SExprNode*)nodesListGetNode(pFunc->pParameterList, 2))->resType.type)) {
|
||||||
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
|
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
|
||||||
}
|
}
|
||||||
|
|
|
@ -530,7 +530,8 @@ void monSendReport() {
|
||||||
monGenLogJson(pMonitor);
|
monGenLogJson(pMonitor);
|
||||||
|
|
||||||
char *pCont = tjsonToString(pMonitor->pJson);
|
char *pCont = tjsonToString(pMonitor->pJson);
|
||||||
if (pCont != NULL) {
|
// uDebugL("report cont:%s\n", pCont);
|
||||||
|
if (pCont != NULL) {
|
||||||
EHttpCompFlag flag = tsMonitor.cfg.comp ? HTTP_GZIP : HTTP_FLAT;
|
EHttpCompFlag flag = tsMonitor.cfg.comp ? HTTP_GZIP : HTTP_FLAT;
|
||||||
if (taosSendHttpReport(tsMonitor.cfg.server, tsMonitor.cfg.port, pCont, strlen(pCont), flag) != 0) {
|
if (taosSendHttpReport(tsMonitor.cfg.server, tsMonitor.cfg.port, pCont, strlen(pCont), flag) != 0) {
|
||||||
uError("failed to send monitor msg");
|
uError("failed to send monitor msg");
|
||||||
|
|
|
@ -189,6 +189,7 @@ static int32_t createSName(SName* pName, SToken* pTableName, int32_t acctId, con
|
||||||
const char* msg1 = "name too long";
|
const char* msg1 = "name too long";
|
||||||
const char* msg2 = "invalid database name";
|
const char* msg2 = "invalid database name";
|
||||||
const char* msg3 = "db is not specified";
|
const char* msg3 = "db is not specified";
|
||||||
|
const char* msg4 = "invalid table name";
|
||||||
|
|
||||||
int32_t code = TSDB_CODE_SUCCESS;
|
int32_t code = TSDB_CODE_SUCCESS;
|
||||||
char* p = strnchr(pTableName->z, TS_PATH_DELIMITER[0], pTableName->n, true);
|
char* p = strnchr(pTableName->z, TS_PATH_DELIMITER[0], pTableName->n, true);
|
||||||
|
@ -207,6 +208,10 @@ static int32_t createSName(SName* pName, SToken* pTableName, int32_t acctId, con
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t tbLen = pTableName->n - dbLen - 1;
|
int32_t tbLen = pTableName->n - dbLen - 1;
|
||||||
|
if (tbLen <= 0) {
|
||||||
|
return buildInvalidOperationMsg(pMsgBuf, msg4);
|
||||||
|
}
|
||||||
|
|
||||||
char tbname[TSDB_TABLE_FNAME_LEN] = {0};
|
char tbname[TSDB_TABLE_FNAME_LEN] = {0};
|
||||||
strncpy(tbname, p + 1, tbLen);
|
strncpy(tbname, p + 1, tbLen);
|
||||||
/*tbLen = */ strdequote(tbname);
|
/*tbLen = */ strdequote(tbname);
|
||||||
|
|
|
@ -166,6 +166,7 @@ static int32_t streamTaskExecImpl(SStreamTask* pTask, void* data, SArray* pRes)
|
||||||
streamDataSubmitRefDec((SStreamDataSubmit*)data);
|
streamDataSubmitRefDec((SStreamDataSubmit*)data);
|
||||||
} else {
|
} else {
|
||||||
taosArrayDestroyEx(((SStreamDataBlock*)data)->blocks, (FDelete)tDeleteSSDataBlock);
|
taosArrayDestroyEx(((SStreamDataBlock*)data)->blocks, (FDelete)tDeleteSSDataBlock);
|
||||||
|
taosFreeQitem(data);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -186,7 +187,7 @@ int32_t streamExec(SStreamTask* pTask, SMsgCb* pMsgCb) {
|
||||||
|
|
||||||
streamTaskExecImpl(pTask, data, pRes);
|
streamTaskExecImpl(pTask, data, pRes);
|
||||||
|
|
||||||
taosFreeQitem(data);
|
/*taosFreeQitem(data);*/
|
||||||
|
|
||||||
if (taosArrayGetSize(pRes) != 0) {
|
if (taosArrayGetSize(pRes) != 0) {
|
||||||
SStreamDataBlock* resQ = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM);
|
SStreamDataBlock* resQ = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM);
|
||||||
|
@ -206,7 +207,7 @@ int32_t streamExec(SStreamTask* pTask, SMsgCb* pMsgCb) {
|
||||||
|
|
||||||
streamTaskExecImpl(pTask, data, pRes);
|
streamTaskExecImpl(pTask, data, pRes);
|
||||||
|
|
||||||
taosFreeQitem(data);
|
/*taosFreeQitem(data);*/
|
||||||
|
|
||||||
if (taosArrayGetSize(pRes) != 0) {
|
if (taosArrayGetSize(pRes) != 0) {
|
||||||
SStreamDataBlock* resQ = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM);
|
SStreamDataBlock* resQ = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM);
|
||||||
|
@ -228,7 +229,7 @@ int32_t streamExec(SStreamTask* pTask, SMsgCb* pMsgCb) {
|
||||||
|
|
||||||
streamTaskExecImpl(pTask, data, pRes);
|
streamTaskExecImpl(pTask, data, pRes);
|
||||||
|
|
||||||
taosFreeQitem(data);
|
/*taosFreeQitem(data);*/
|
||||||
|
|
||||||
if (taosArrayGetSize(pRes) != 0) {
|
if (taosArrayGetSize(pRes) != 0) {
|
||||||
SStreamDataBlock* resQ = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM);
|
SStreamDataBlock* resQ = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM);
|
||||||
|
|
|
@ -357,6 +357,16 @@ int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) {
|
||||||
} else {
|
} else {
|
||||||
syncNodeBecomeFollower(ths);
|
syncNodeBecomeFollower(ths);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// maybe newSyncCfg.myIndex is updated in syncNodeUpdateConfig
|
||||||
|
if (ths->pFsm->FpReConfigCb != NULL) {
|
||||||
|
SReConfigCbMeta cbMeta = {0};
|
||||||
|
cbMeta.code = 0;
|
||||||
|
cbMeta.currentTerm = ths->pRaftStore->currentTerm;
|
||||||
|
cbMeta.index = pEntry->index;
|
||||||
|
cbMeta.term = pEntry->term;
|
||||||
|
ths->pFsm->FpReConfigCb(ths->pFsm, newSyncCfg, cbMeta);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// restore finish
|
// restore finish
|
||||||
|
|
|
@ -134,6 +134,16 @@ void syncMaybeAdvanceCommitIndex(SSyncNode* pSyncNode) {
|
||||||
} else {
|
} else {
|
||||||
syncNodeBecomeFollower(pSyncNode);
|
syncNodeBecomeFollower(pSyncNode);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// maybe newSyncCfg.myIndex is updated in syncNodeUpdateConfig
|
||||||
|
if (pSyncNode->pFsm->FpReConfigCb != NULL) {
|
||||||
|
SReConfigCbMeta cbMeta = {0};
|
||||||
|
cbMeta.code = 0;
|
||||||
|
cbMeta.currentTerm = pSyncNode->pRaftStore->currentTerm;
|
||||||
|
cbMeta.index = pEntry->index;
|
||||||
|
cbMeta.term = pEntry->term;
|
||||||
|
pSyncNode->pFsm->FpReConfigCb(pSyncNode->pFsm, newSyncCfg, cbMeta);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// restore finish
|
// restore finish
|
||||||
|
|
|
@ -349,7 +349,9 @@ int32_t syncPropose(int64_t rid, const SRpcMsg* pMsg, bool isWeak) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// open/close --------------
|
// open/close --------------
|
||||||
SSyncNode* syncNodeOpen(const SSyncInfo* pSyncInfo) {
|
SSyncNode* syncNodeOpen(const SSyncInfo* pOldSyncInfo) {
|
||||||
|
SSyncInfo* pSyncInfo = (SSyncInfo*)pOldSyncInfo;
|
||||||
|
|
||||||
SSyncNode* pSyncNode = (SSyncNode*)taosMemoryMalloc(sizeof(SSyncNode));
|
SSyncNode* pSyncNode = (SSyncNode*)taosMemoryMalloc(sizeof(SSyncNode));
|
||||||
assert(pSyncNode != NULL);
|
assert(pSyncNode != NULL);
|
||||||
memset(pSyncNode, 0, sizeof(SSyncNode));
|
memset(pSyncNode, 0, sizeof(SSyncNode));
|
||||||
|
@ -361,11 +363,25 @@ SSyncNode* syncNodeOpen(const SSyncInfo* pSyncInfo) {
|
||||||
sError("failed to create dir:%s since %s", pSyncInfo->path, terrstr());
|
sError("failed to create dir:%s since %s", pSyncInfo->path, terrstr());
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
snprintf(pSyncNode->configPath, sizeof(pSyncNode->configPath), "%s/raft_config.json", pSyncInfo->path);
|
||||||
|
if (!taosCheckExistFile(pSyncNode->configPath)) {
|
||||||
// create raft config file
|
// create raft config file
|
||||||
snprintf(pSyncNode->configPath, sizeof(pSyncNode->configPath), "%s/raft_config.json", pSyncInfo->path);
|
|
||||||
ret = syncCfgCreateFile((SSyncCfg*)&(pSyncInfo->syncCfg), pSyncNode->configPath);
|
ret = syncCfgCreateFile((SSyncCfg*)&(pSyncInfo->syncCfg), pSyncNode->configPath);
|
||||||
assert(ret == 0);
|
assert(ret == 0);
|
||||||
|
|
||||||
|
} else {
|
||||||
|
// update syncCfg by raft_config.json
|
||||||
|
pSyncNode->pRaftCfg = raftCfgOpen(pSyncNode->configPath);
|
||||||
|
assert(pSyncNode->pRaftCfg != NULL);
|
||||||
|
pSyncInfo->syncCfg = pSyncNode->pRaftCfg->cfg;
|
||||||
|
|
||||||
|
char* seralized = raftCfg2Str(pSyncNode->pRaftCfg);
|
||||||
|
sInfo("syncNodeOpen update config :%s", seralized);
|
||||||
|
taosMemoryFree(seralized);
|
||||||
|
|
||||||
|
raftCfgClose(pSyncNode->pRaftCfg);
|
||||||
}
|
}
|
||||||
|
|
||||||
// init by SSyncInfo
|
// init by SSyncInfo
|
||||||
|
|
|
@ -49,7 +49,7 @@ void test4() {
|
||||||
logTest((char*)__FUNCTION__);
|
logTest((char*)__FUNCTION__);
|
||||||
}
|
}
|
||||||
|
|
||||||
int main() {
|
int main(int argc, char** argv) {
|
||||||
// taosInitLog("tmp/syncTest.log", 100);
|
// taosInitLog("tmp/syncTest.log", 100);
|
||||||
tsAsyncLog = 0;
|
tsAsyncLog = 0;
|
||||||
|
|
||||||
|
@ -58,6 +58,14 @@ int main() {
|
||||||
test3();
|
test3();
|
||||||
test4();
|
test4();
|
||||||
|
|
||||||
|
if (argc == 2) {
|
||||||
|
bool bTaosDirExist = taosDirExist(argv[1]);
|
||||||
|
printf("%s bTaosDirExist:%d \n", argv[1], bTaosDirExist);
|
||||||
|
|
||||||
|
bool bTaosCheckExistFile = taosCheckExistFile(argv[1]);
|
||||||
|
printf("%s bTaosCheckExistFile:%d \n", argv[1], bTaosCheckExistFile);
|
||||||
|
}
|
||||||
|
|
||||||
// taosCloseLog();
|
// taosCloseLog();
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -913,12 +913,12 @@ uint32_t taosGetIpv4FromFqdn(const char *fqdn) {
|
||||||
} else {
|
} else {
|
||||||
#ifdef EAI_SYSTEM
|
#ifdef EAI_SYSTEM
|
||||||
if (ret == EAI_SYSTEM) {
|
if (ret == EAI_SYSTEM) {
|
||||||
printf("failed to get the ip address, fqdn:%s, errno:%d, since:%s", fqdn, errno, strerror(errno));
|
// printf("failed to get the ip address, fqdn:%s, errno:%d, since:%s", fqdn, errno, strerror(errno));
|
||||||
} else {
|
} else {
|
||||||
printf("failed to get the ip address, fqdn:%s, ret:%d, since:%s", fqdn, ret, gai_strerror(ret));
|
// printf("failed to get the ip address, fqdn:%s, ret:%d, since:%s", fqdn, ret, gai_strerror(ret));
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
printf("failed to get the ip address, fqdn:%s, ret:%d, since:%s", fqdn, ret, gai_strerror(ret));
|
// printf("failed to get the ip address, fqdn:%s, ret:%d, since:%s", fqdn, ret, gai_strerror(ret));
|
||||||
#endif
|
#endif
|
||||||
return 0xFFFFFFFF;
|
return 0xFFFFFFFF;
|
||||||
}
|
}
|
||||||
|
|
|
@ -315,6 +315,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_VND_TABLE_NOT_EXIST, "Table does not exists
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_VND_INVALID_TABLE_ACTION, "Invalid table action")
|
TAOS_DEFINE_ERROR(TSDB_CODE_VND_INVALID_TABLE_ACTION, "Invalid table action")
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_VND_COL_ALREADY_EXISTS, "Table column already exists")
|
TAOS_DEFINE_ERROR(TSDB_CODE_VND_COL_ALREADY_EXISTS, "Table column already exists")
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_VND_TABLE_COL_NOT_EXISTS, "Table column not exists")
|
TAOS_DEFINE_ERROR(TSDB_CODE_VND_TABLE_COL_NOT_EXISTS, "Table column not exists")
|
||||||
|
TAOS_DEFINE_ERROR(TSDB_CODE_VND_READ_END, "Read end")
|
||||||
|
|
||||||
|
|
||||||
// tsdb
|
// tsdb
|
||||||
|
|
|
@ -708,7 +708,7 @@ SHashNode *doCreateHashNode(const void *key, size_t keyLen, const void *pData, s
|
||||||
pNewNode->removed = 0;
|
pNewNode->removed = 0;
|
||||||
pNewNode->next = NULL;
|
pNewNode->next = NULL;
|
||||||
|
|
||||||
memcpy(GET_HASH_NODE_DATA(pNewNode), pData, dsize);
|
if (pData) memcpy(GET_HASH_NODE_DATA(pNewNode), pData, dsize);
|
||||||
memcpy(GET_HASH_NODE_KEY(pNewNode), key, keyLen);
|
memcpy(GET_HASH_NODE_KEY(pNewNode), key, keyLen);
|
||||||
|
|
||||||
return pNewNode;
|
return pNewNode;
|
||||||
|
@ -774,7 +774,7 @@ static void *taosHashReleaseNode(SHashObj *pHashObj, void *p, int *slot) {
|
||||||
ASSERT(prevNode->next != prevNode);
|
ASSERT(prevNode->next != prevNode);
|
||||||
} else {
|
} else {
|
||||||
pe->next = pOld->next;
|
pe->next = pOld->next;
|
||||||
SHashNode* x = pe->next;
|
SHashNode *x = pe->next;
|
||||||
if (x != NULL) {
|
if (x != NULL) {
|
||||||
ASSERT(x->next != x);
|
ASSERT(x->next != x);
|
||||||
}
|
}
|
||||||
|
|
|
@ -549,11 +549,16 @@ void destroyDiskbasedBuf(SDiskbasedBuf* pBuf) {
|
||||||
// print the statistics information
|
// print the statistics information
|
||||||
{
|
{
|
||||||
SDiskbasedBufStatis* ps = &pBuf->statis;
|
SDiskbasedBufStatis* ps = &pBuf->statis;
|
||||||
uDebug(
|
if (ps->loadPages == 0) {
|
||||||
"Get/Release pages:%d/%d, flushToDisk:%.2f Kb (%d Pages), loadFromDisk:%.2f Kb (%d Pages), avgPageSize:%.2f "
|
uDebug(
|
||||||
"Kb\n",
|
"Get/Release pages:%d/%d, flushToDisk:%.2f Kb (%d Pages), loadFromDisk:%.2f Kb (%d Pages)",
|
||||||
ps->getPages, ps->releasePages, ps->flushBytes / 1024.0f, ps->flushPages, ps->loadBytes / 1024.0f,
|
ps->getPages, ps->releasePages, ps->flushBytes / 1024.0f, ps->flushPages, ps->loadBytes / 1024.0f, ps->loadPages);
|
||||||
ps->loadPages, ps->loadBytes / (1024.0 * ps->loadPages));
|
} else {
|
||||||
|
uDebug(
|
||||||
|
"Get/Release pages:%d/%d, flushToDisk:%.2f Kb (%d Pages), loadFromDisk:%.2f Kb (%d Pages), avgPageSize:%.2f Kb",
|
||||||
|
ps->getPages, ps->releasePages, ps->flushBytes / 1024.0f, ps->flushPages, ps->loadBytes / 1024.0f,
|
||||||
|
ps->loadPages, ps->loadBytes / (1024.0 * ps->loadPages));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
taosRemoveFile(pBuf->path);
|
taosRemoveFile(pBuf->path);
|
||||||
|
|
|
@ -1,93 +0,0 @@
|
||||||
# No part of this file may be reproduced, stored, transmitted,
|
|
||||||
# disclosed or used in any form or by any means other than as
|
|
||||||
# expressly provided by the written permission from Jianhui Tao
|
|
||||||
#
|
|
||||||
###################################################################
|
|
||||||
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
import sys
|
|
||||||
from util.log import *
|
|
||||||
from util.cases import *
|
|
||||||
from util.sql import *
|
|
||||||
from util.dnodes import *
|
|
||||||
|
|
||||||
|
|
||||||
class TDTestCase:
|
|
||||||
def init(self, conn, logSql):
|
|
||||||
tdLog.debug(f"start to execute {__file__}")
|
|
||||||
tdSql.init(conn.cursor(), logSql)
|
|
||||||
|
|
||||||
def insertnow(self):
|
|
||||||
|
|
||||||
# timestamp list:
|
|
||||||
# 0 -> "1970-01-01 08:00:00" | -28800000 -> "1970-01-01 00:00:00" | -946800000000 -> "1940-01-01 00:00:00"
|
|
||||||
# -631180800000 -> "1950-01-01 00:00:00"
|
|
||||||
|
|
||||||
tsp1 = 0
|
|
||||||
tsp2 = -28800000
|
|
||||||
tsp3 = -946800000000
|
|
||||||
tsp4 = "1969-01-01 00:00:00.000"
|
|
||||||
|
|
||||||
tdSql.execute("insert into tcq1 values (now-11d, 5)")
|
|
||||||
tdSql.execute(f"insert into tcq1 values ({tsp1}, 4)")
|
|
||||||
tdSql.execute(f"insert into tcq1 values ({tsp2}, 3)")
|
|
||||||
tdSql.execute(f"insert into tcq1 values ('{tsp4}', 2)")
|
|
||||||
tdSql.execute(f"insert into tcq1 values ({tsp3}, 1)")
|
|
||||||
|
|
||||||
def waitedQuery(self, sql, expectRows, timeout):
|
|
||||||
tdLog.info(f"sql: {sql}, try to retrieve {expectRows} rows in {timeout} seconds")
|
|
||||||
try:
|
|
||||||
for i in range(timeout):
|
|
||||||
tdSql.cursor.execute(sql)
|
|
||||||
self.queryResult = tdSql.cursor.fetchall()
|
|
||||||
self.queryRows = len(self.queryResult)
|
|
||||||
self.queryCols = len(tdSql.cursor.description)
|
|
||||||
# tdLog.info("sql: %s, try to retrieve %d rows,get %d rows" % (sql, expectRows, self.queryRows))
|
|
||||||
if self.queryRows >= expectRows:
|
|
||||||
return (self.queryRows, i)
|
|
||||||
time.sleep(1)
|
|
||||||
except Exception as e:
|
|
||||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
|
||||||
tdLog.notice(f"{caller.filename}({caller.lineno}) failed: sql:{sql}, {repr(e)}")
|
|
||||||
raise Exception(repr(e))
|
|
||||||
return (self.queryRows, timeout)
|
|
||||||
|
|
||||||
def cq(self):
|
|
||||||
tdSql.execute(
|
|
||||||
"create table cq1 as select avg(c1) from tcq1 where ts > -946800000000 interval(10d) sliding(1d)"
|
|
||||||
)
|
|
||||||
self.waitedQuery("select * from cq1", 1, 120)
|
|
||||||
|
|
||||||
def querycq(self):
|
|
||||||
tdSql.query("select * from cq1")
|
|
||||||
tdSql.checkData(0, 1, 1.0)
|
|
||||||
tdSql.checkData(10, 1, 2.0)
|
|
||||||
|
|
||||||
def run(self):
|
|
||||||
tdSql.execute("drop database if exists dbcq")
|
|
||||||
tdSql.execute("create database if not exists dbcq keep 36500")
|
|
||||||
tdSql.execute("use dbcq")
|
|
||||||
|
|
||||||
tdSql.execute("create table stbcq (ts timestamp, c1 int ) TAGS(t1 int)")
|
|
||||||
tdSql.execute("create table tcq1 using stbcq tags(1)")
|
|
||||||
|
|
||||||
self.insertnow()
|
|
||||||
self.cq()
|
|
||||||
self.querycq()
|
|
||||||
|
|
||||||
# after wal and sync, check again
|
|
||||||
tdSql.query("show dnodes")
|
|
||||||
index = tdSql.getData(0, 0)
|
|
||||||
tdDnodes.stop(index)
|
|
||||||
tdDnodes.start(index)
|
|
||||||
|
|
||||||
self.querycq()
|
|
||||||
|
|
||||||
def stop(self):
|
|
||||||
tdSql.close()
|
|
||||||
tdLog.success(f"{__file__} successfully executed")
|
|
||||||
|
|
||||||
|
|
||||||
tdCases.addWindows(__file__, TDTestCase())
|
|
||||||
tdCases.addLinux(__file__, TDTestCase())
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue