other: merge 3.0

This commit is contained in:
Haojun Liao 2022-05-30 23:32:00 +08:00
commit a7feca23fe
277 changed files with 18545 additions and 13869 deletions

View File

@ -269,7 +269,7 @@ pipeline {
}
}
stage('linux test') {
agent{label " slave3_0 || slave15 || slave16 || slave17 "}
agent{label " worker03 || slave215 || slave217 || slave219 "}
options { skipDefaultCheckout() }
when {
changeRequest()
@ -287,9 +287,9 @@ pipeline {
'''
sh '''
cd ${WKC}/tests/parallel_test
export DEFAULT_RETRY_TIME=1
export DEFAULT_RETRY_TIME=2
date
timeout 2100 time ./run.sh -e -m /home/m.json -t /tmp/cases.task -b ${BRANCH_NAME} -l ${WKDIR}/log -o 480
timeout 2100 time ./run.sh -e -m /home/m.json -t /tmp/cases.task -b ${BRANCH_NAME}_${BUILD_ID} -l ${WKDIR}/log -o 480
'''
}
}

View File

@ -49,7 +49,7 @@ IF(${TD_WINDOWS})
option(
BUILD_TEST
"If build unit tests using googletest"
OFF
ON
)
ELSE ()

View File

@ -243,7 +243,7 @@ void console(SRaftServer *pRaftServer) {
} else if (strcmp(cmd, "dropnode") == 0) {
char host[HOST_LEN];
char host[HOST_LEN] = {0};
uint32_t port;
parseAddr(param1, host, HOST_LEN, &port);
uint64_t rid = raftId(host, port);
@ -258,7 +258,7 @@ void console(SRaftServer *pRaftServer) {
} else if (strcmp(cmd, "put") == 0) {
char buf[256];
char buf[256] = {0};
snprintf(buf, sizeof(buf), "%s--%s", param1, param2);
putValue(&pRaftServer->raft, buf);

View File

@ -52,7 +52,7 @@ INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6,
:::info
- 要提高写入效率,需要批量写入。一批写入的记录条数越多,插入效率就越高。但一条记录不能超过 16K一条 SQL 语句总长度不能超过 1M 。
- 要提高写入效率,需要批量写入。一批写入的记录条数越多,插入效率就越高。但一条记录不能超过 48K一条 SQL 语句总长度不能超过 1M 。
- TDengine 支持多线程同时写入,要进一步提高写入速度,一个客户端需要打开 20 个以上的线程同时写。但线程数达到一定数量后,无法再提高,甚至还会下降,因为线程频繁切换,带来额外开销。
:::

View File

@ -145,7 +145,7 @@ void subscribe_callback(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code) {
taos_unsubscribe(tsub, keep);
```
其第二个参数,用于决定是否在客户端保留订阅的进度信息。如果这个参数是**false****0**),那无论下次调用 `taos_subscribe` 时的 `restart` 参数是什么,订阅都只能重新开始。另外,进度信息的保存位置是 _{DataDir}/subscribe/_ 这个目录下,每个订阅有一个与其 `topic` 同名的文件,删掉某个文件,同样会导致下次创建其对应的订阅时只能重新开始。
其第二个参数,用于决定是否在客户端保留订阅的进度信息。如果这个参数是**false****0**),那无论下次调用 `taos_subscribe` 时的 `restart` 参数是什么,订阅都只能重新开始。另外,进度信息的保存位置是 _{DataDir}/subscribe/_ 这个目录下(注:`taos.cfg` 配置文件中 `DataDir` 参数值默认为 **/var/lib/taos/**,但是 Windows 服务器上本身不存在该目录,所以需要在 Windows 的配置文件中修改 `DataDir` 参数值为相应的已存在目录",每个订阅有一个与其 `topic` 同名的文件,删掉某个文件,同样会导致下次创建其对应的订阅时只能重新开始。
代码介绍完毕,我们来看一下实际的运行效果。假设:

View File

@ -12,7 +12,7 @@ CREATE TABLE [IF NOT EXISTS] tb_name (timestamp_field_name TIMESTAMP, field1_nam
1. 表的第一个字段必须是 TIMESTAMP并且系统自动将其设为主键
2. 表名最大长度为 192
3. 表的每行长度不能超过 16k 个字符;(注意:每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)
3. 表的每行长度不能超过 48KB;(注意:每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)
4. 子表名只能由字母、数字和下划线组成,且不能以数字开头,不区分大小写
5. 使用数据类型 binary 或 nchar需指定其最长的字节数如 binary(20),表示 20 字节;
6. 为了兼容支持更多形式的表名TDengine 引入新的转义符 "\`",可以让表名与关键词不冲突,同时不受限于上述表名称合法性约束检查。但是同样具有长度限制要求。使用转义字符以后,不再对转义字符中的内容进行大小写统一。

View File

@ -86,7 +86,7 @@ ALTER STABLE stb_name MODIFY COLUMN field_name data_type(length);
ALTER STABLE stb_name ADD TAG new_tag_name tag_type;
```
为 STable 增加一个新的标签,并指定新标签的类型。标签总数不能超过 128 个,总长度不超过 16k 个字符
为 STable 增加一个新的标签,并指定新标签的类型。标签总数不能超过 128 个,总长度不超过 16KB
### 删除标签

View File

@ -698,7 +698,7 @@ SELECT INTERP(field_name) FROM { tb_name | stb_name } WHERE ts='timestamp' [FILL
SELECT TAIL(field_name, k, offset_val) FROM {tb_name | stb_name} [WHERE clause];
```
**功能说明**:返回跳过最后 offset_value 个,然后取连续 k 个记录,不忽略 NULL 值。offset_val 可以不输入。此时返回最后的 k 个记录。当有 offset_val 输入的情况下,该函数功能等效于 `order by ts desc LIMIT k OFFSET offset_val`
**功能说明**:返回跳过最后 offset_val 个,然后取连续 k 个记录,不忽略 NULL 值。offset_val 可以不输入。此时返回最后的 k 个记录。当有 offset_val 输入的情况下,该函数功能等效于 `order by ts desc LIMIT k OFFSET offset_val`
**参数范围**k: [1,100] offset_val: [0,100]。
@ -1766,6 +1766,8 @@ SELECT TIMEDIFF(ts_val1 | datetime_string1 | ts_col1, ts_val2 | datetime_string2
1u(微秒)1a(毫秒)1s(秒)1m(分)1h(小时)1d(天)。
- 如果时间单位 time_unit 未指定, 返回的时间差值精度与当前 DATABASE 设置的时间精度一致。
**支持的版本**2.6.0.0 及以后的版本。
**示例**
```sql

View File

@ -7,9 +7,9 @@ title: 边界限制
- 数据库名最大长度为 32。
- 表名最大长度为 192不包括数据库名前缀和分隔符
- 每行数据最大长度 16k 个字符, 从 2.1.7.0 版本开始,每行数据最大长度 48k 个字符(注意:数据行内每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)。
- 每行数据最大长度 48KB (注意:数据行内每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)。
- 列名最大长度为 64最多允许 4096 列,最少需要 2 列,第一列必须是时间戳。注:从 2.1.7.0 版本(不含)以前最多允许 4096 列
- 标签名最大长度为 64最多允许 128 个,至少要有 1 个标签,一个表中标签值的总长度不超过 16k 个字符
- 标签名最大长度为 64最多允许 128 个,至少要有 1 个标签,一个表中标签值的总长度不超过 16KB
- SQL 语句最大长度 1048576 个字符,也可通过客户端配置参数 maxSQLLength 修改,取值范围 65480 ~ 1048576。
- SELECT 语句的查询结果,最多允许返回 4096 列(语句中的函数调用可能也会占用一些列空间),超限时需要显式指定较少的返回数据列,以避免语句执行报错。注: 2.1.7.0 版本(不含)之前为最多允许 1024 列
- 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制。

View File

@ -23,17 +23,17 @@ title: TDengine 参数限制与保留关键字
去掉了 `` ‘“`\ `` (单双引号、撇号、反斜杠、空格)
- 数据库名:不能包含“.”以及特殊字符,不能超过 32 个字符
- 表名:不能包含“.”以及特殊字符,与所属数据库名一起,不能超过 192 个字符,每行数据最大长度 16k 个字符
- 表的列名:不能包含特殊字符,不能超过 64 个字
- 表名:不能包含“.”以及特殊字符,与所属数据库名一起,不能超过 192 个字节 ,每行数据最大长度 48KB
- 表的列名:不能包含特殊字符,不能超过 64 个字
- 数据库名、表名、列名,都不能以数字开头,合法的可用字符集是“英文字符、数字和下划线”
- 表的列数:不能超过 1024 列,最少需要 2 列,第一列必须是时间戳(从 2.1.7.0 版本开始,改为最多支持 4096 列)
- 记录的最大长度:包括时间戳 8 byte不能超过 16KB每个 BINARY/NCHAR 类型的列还会额外占用 2 个 byte 的存储位置)
- 单条 SQL 语句默认最大字符串长度1048576 byte但可通过系统配置参数 maxSQLLength 修改,取值范围 65480 ~ 1048576 byte
- 记录的最大长度:包括时间戳 8 字节,不能超过 48KB每个 BINARY/NCHAR 类型的列还会额外占用 2 个 字节 的存储位置)
- 单条 SQL 语句默认最大字符串长度1048576 字节,但可通过系统配置参数 maxSQLLength 修改,取值范围 65480 ~ 1048576 字节
- 数据库副本数:不能超过 3
- 用户名:不能超过 23 个 byte
- 用户密码:不能超过 15 个 byte
- 用户名:不能超过 23 个 字节
- 用户密码:不能超过 15 个 字节
- 标签(Tags)数量:不能超过 128 个,可以 0 个
- 标签的总长度:不能超过 16K byte
- 标签的总长度:不能超过 16KB
- 记录条数:仅受存储空间限制
- 表的个数:仅受节点个数限制
- 库的个数:仅受节点个数限制

View File

@ -14,7 +14,6 @@ import NodeInfluxLine from "../../07-develop/03-insert-data/_js_line.mdx";
import NodeOpenTSDBTelnet from "../../07-develop/03-insert-data/_js_opts_telnet.mdx";
import NodeOpenTSDBJson from "../../07-develop/03-insert-data/_js_opts_json.mdx";
import NodeQuery from "../../07-develop/04-query-data/_js.mdx";
import NodeAsyncQuery from "../../07-develop/04-query-data/_js_async.mdx";
`td2.0-connector` 和 `td2.0-rest-connector` 是 TDengine 的官方 Node.js 语言连接器。Node.js 开发人员可以通过它开发可以存取 TDengine 集群数据的应用软件。
@ -189,14 +188,8 @@ let cursor = conn.cursor();
### 查询数据
#### 同步查询
<NodeQuery />
#### 异步查询
<NodeAsyncQuery />
## 更多示例程序
| 示例程序 | 示例程序描述 |

View File

@ -82,7 +82,7 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000
:::tip
无模式所有的处理逻辑,仍会遵循 TDengine 对数据结构的底层限制,例如每行数据的总长度不能超过
16k 字节。这方面的具体限制约束请参见 [TAOS SQL 边界限制](/taos-sql/limit)
48KB。这方面的具体限制约束请参见 [TAOS SQL 边界限制](/taos-sql/limit)
:::

View File

@ -33,15 +33,15 @@ title: 常见问题及反馈
### 2. Windows 平台下 JDBCDriver 找不到动态链接库,怎么办?
请看为此问题撰写的[技术博客](https://www.taosdata.com/blog/2019/12/03/950.html)。
请看为此问题撰写的 [技术博客](https://www.taosdata.com/blog/2019/12/03/950.html)。
### 3. 创建数据表时提示 more dnodes are needed
请看为此问题撰写的[技术博客](https://www.taosdata.com/blog/2019/12/03/965.html)。
请看为此问题撰写的 [技术博客](https://www.taosdata.com/blog/2019/12/03/965.html)。
### 4. 如何让 TDengine crash 时生成 core 文件?
请看为此问题撰写的[技术博客](https://www.taosdata.com/blog/2019/12/06/974.html)。
请看为此问题撰写的 [技术博客](https://www.taosdata.com/blog/2019/12/06/974.html)。
### 5. 遇到错误“Unable to establish connection” 怎么办?
@ -128,19 +128,30 @@ properties.setProperty(TSDBDriver.LOCALE_KEY, "UTF-8");
Connection = DriverManager.getConnection(url, properties);
```
### 13.JDBC 报错: the executed SQL is not a DML or a DDL
### 13. Windows 系统下客户端无法正常显示中文字符?
Windows 系统中一般是采用 GBK/GB18030 存储中文字符,而 TDengine 的默认字符集为 UTF-8 ,在 Windows 系统中使用 TDengine 客户端时,客户端驱动会将字符统一转换为 UTF-8 编码后发送到服务端存储,因此在应用开发过程中,调用接口时正确配置当前的中文字符集即可。
【 v2.2.1.5以后版本 】在 Windows 10 环境下运行 TDengine 客户端命令行工具 taos 时,若无法正常输入、显示中文,可以对客户端 taos.cfg 做如下配置:
```
locale C
charset UTF-8
```
### 14. JDBC 报错: the executed SQL is not a DML or a DDL
请更新至最新的 JDBC 驱动,参考 [Java 连接器](/reference/connector/java)
### 14. taos connect failed, reason&#58; invalid timestamp
### 15. taos connect failed, reason&#58; invalid timestamp
常见原因是服务器和客户端时间没有校准可以通过和时间服务器同步的方式Linux 下使用 ntpdate 命令Windows 在系统时间设置中选择自动同步)校准。
### 15. 表名显示不全
### 16. 表名显示不全
由于 taos shell 在终端中显示宽度有限,有可能比较长的表名显示不全,如果按照显示的不全的表名进行相关操作会发生 Table does not exist 错误。解决方法可以是通过修改 taos.cfg 文件中的设置项 maxBinaryDisplayWidth 或者直接输入命令 set max_binary_display_width 100。或者在命令结尾使用 \G 参数来调整结果的显示方式。
### 16. 如何进行数据迁移?
### 17. 如何进行数据迁移?
TDengine 是根据 hostname 唯一标志一台机器的,在数据文件从机器 A 移动机器 B 时,注意如下两件事:
@ -148,7 +159,7 @@ TDengine 是根据 hostname 唯一标志一台机器的,在数据文件从机
- 2.0.7.0 及以后的版本,到/var/lib/taos/dnode 下,修复 dnodeEps.json 的 dnodeId 对应的 FQDN重启。确保机器内所有机器的此文件是完全相同的。
- 1.x 和 2.x 版本的存储结构不兼容,需要使用迁移工具或者自己开发应用导出导入数据。
### 17. 如何在命令行程序 taos 中临时调整日志级别
### 18. 如何在命令行程序 taos 中临时调整日志级别
为了调试方便,从 2.0.16 版本开始,命令行程序 taos 新增了与日志记录相关的两条指令:
@ -169,7 +180,7 @@ ALTER LOCAL RESETLOG;
<a class="anchor" id="timezone"></a>
### 18. go 语言编写组件编译失败怎样解决?
### 19. go 语言编写组件编译失败怎样解决?
TDengine 2.3.0.0 及之后的版本包含一个使用 go 语言开发的 taosAdapter 独立组件,需要单独运行,取代之前 taosd 内置的 httpd ,提供包含原 httpd 功能以及支持多种其他软件Prometheus、Telegraf、collectd、StatsD 等)的数据接入功能。
使用最新 develop 分支代码编译需要先 `git submodule update --init --recursive` 下载 taosAdapter 仓库代码后再编译。
@ -184,7 +195,7 @@ go env -w GOPROXY=https://goproxy.cn,direct
如果希望继续使用之前的内置 httpd可以关闭 taosAdapter 编译,使用
`cmake .. -DBUILD_HTTP=true` 使用原来内置的 httpd。
### 19. 如何查询数据占用的存储空间大小?
### 20. 如何查询数据占用的存储空间大小?
默认情况下TDengine 的数据文件存储在 /var/lib/taos ,日志文件存储在 /var/log/taos 。
@ -193,3 +204,38 @@ go env -w GOPROXY=https://goproxy.cn,direct
若想查看单个数据库占用的大小,可在命令行程序 taos 内指定要查看的数据库后执行 `show vgroups;` ,通过得到的 VGroup id 去 /var/lib/taos/vnode 下查看包含的文件夹大小。
若仅仅想查看指定(超级)表的数据块分布及大小,可查看[_block_dist 函数](https://docs.taosdata.com/taos-sql/select/#_block_dist-%E5%87%BD%E6%95%B0)
### 21. 客户端连接串如何保证高可用?
请看为此问题撰写的 [技术博客](https://www.taosdata.com/blog/2021/04/16/2287.html)
### 22. 时间戳的时区信息是怎样处理的?
TDengine 中时间戳的时区总是由客户端进行处理,而与服务端无关。具体来说,客户端会对 SQL 语句中的时间戳进行时区转换,转为 UTC 时区(即 Unix 时间戳——Unix Timestamp再交由服务端进行写入和查询在读取数据时服务端也是采用 UTC 时区提供原始数据,客户端收到后再根据本地设置,把时间戳转换为本地系统所要求的时区进行显示。
客户端在处理时间戳字符串时,会采取如下逻辑:
1. 在未做特殊设置的情况下,客户端默认使用所在操作系统的时区设置。
2. 如果在 taos.cfg 中设置了 timezone 参数,则客户端会以这个配置文件中的设置为准。
3. 如果在 C/C++/Java/Python 等各种编程语言的 Connector Driver 中,在建立数据库连接时显式指定了 timezone那么会以这个指定的时区设置为准。例如 Java Connector 的 JDBC URL 中就有 timezone 参数。
4. 在书写 SQL 语句时,也可以直接使用 Unix 时间戳(例如 `1554984068000`)或带有时区的时间戳字符串,也即以 RFC 3339 格式(例如 `2013-04-12T15:52:01.123+08:00`)或 ISO-8601 格式(例如 `2013-04-12T15:52:01.123+0800`)来书写时间戳,此时这些时间戳的取值将不再受其他时区设置的影响。
### 23. TDengine 2.0 都会用到哪些网络端口?
使用到的网络端口请看文档:[serverport](/reference/config/#serverport)
需要注意,文档上列举的端口号都是以默认端口 6030 为前提进行说明,如果修改了配置文件中的设置,那么列举的端口都会随之出现变化,管理员可以参考上述的信息调整防火墙设置。
### 24. 为什么 RESTful 接口无响应、Grafana 无法添加 TDengine 为数据源、TDengineGUI 选了 6041 端口还是无法连接成功??
taosAdapter 从 TDengine 2.4.0.0 版本开始成为 TDengine 服务端软件的组成部分,是 TDengine 集群和应用程序之间的桥梁和适配器。在此之前 RESTful 接口等功能是由 taosd 内置的 HTTP 服务提供的,而如今要实现上述功能需要执行:```systemctl start taosadapter``` 命令来启动 taosAdapter 服务。
需要说明的是taosAdapter 的日志路径 path 需要单独配置,默认路径是 /var/log/taos ;日志等级 logLevel 有 8 个等级,默认等级是 info ,配置成 panic 可关闭日志输出。请注意操作系统 / 目录的空间大小,可通过命令行参数、环境变量或配置文件来修改配置,默认配置文件是 /etc/taos/taosadapter.toml 。
有关 taosAdapter 组件的详细介绍请看文档:[taosAdapter](https://docs.taosdata.com/reference/taosadapter/)
### 25. 发生了 OOM 怎么办?
OOM 是操作系统的保护机制,当操作系统内存(包括 SWAP )不足时,会杀掉某些进程,从而保证操作系统的稳定运行。通常内存不足主要是如下两个原因导致,一是剩余内存小于 vm.min_free_kbytes ;二是程序请求的内存大于剩余内存。还有一种情况是内存充足但程序占用了特殊的内存地址,也会触发 OOM 。
TDengine 会预先为每个 VNode 分配好内存,每个 Database 的 VNode 个数受 maxVgroupsPerDb 影响,每个 VNode 占用的内存大小受 Blocks 和 Cache 影响。要防止 OOM需要在项目建设之初合理规划内存并合理设置 SWAP 除此之外查询过量的数据也有可能导致内存暴涨这取决于具体的查询语句。TDengine 企业版对内存管理做了优化,采用了新的内存分配器,对稳定性有更高要求的用户可以考虑选择企业版。

View File

@ -1,6 +1,6 @@
---
sidebar_label: Connection
title: Connect to TDengine
sidebar_label: Connect
title: Connect
description: "This document explains how to establish connections to TDengine, and briefly introduces how to install and use TDengine connectors."
---

View File

@ -1,5 +1,5 @@
---
sidebar_label: SQL
sidebar_label: Insert Using SQL
title: Insert Using SQL
---
@ -52,7 +52,7 @@ For more details about `INSERT` please refer to [INSERT](/taos-sql/insert).
:::info
- Inserting in batches can improve performance. Normally, the higher the batch size, the better the performance. Please note that a single row can't exceed 16K bytes and each SQL statement can't exceed 1MB.
- Inserting in batches can improve performance. Normally, the higher the batch size, the better the performance. Please note that a single row can't exceed 48K bytes and each SQL statement can't exceed 1MB.
- Inserting with multiple threads can also improve performance. However, depending on the system resources on the application side and the server side, when the number of inserting threads grows beyond a specific point the performance may drop instead of improving. The proper number of threads needs to be tested in a specific environment to find the best number.
:::

View File

@ -1,5 +1,5 @@
---
title: Insert
title: Insert Data
---
TDengine supports multiple protocols of inserting data, including SQL, InfluxDB Line protocol, OpenTSDB Telnet protocol, and OpenTSDB JSON protocol. Data can be inserted row by row, or in batches. Data from one or more collection points can be inserted simultaneously. Data can be inserted with multiple threads, and out of order data and historical data can be inserted as well. InfluxDB Line protocol, OpenTSDB Telnet protocol and OpenTSDB JSON protocol are the 3 kinds of schemaless insert protocols supported by TDengine. It's not necessary to create STables and tables in advance if using schemaless protocols, and the schemas can be adjusted automatically based on the data being inserted.

View File

@ -1 +1 @@
label: Select Data
label: Query Data

View File

@ -1,6 +1,6 @@
---
Sidebar_label: Select
title: Select
Sidebar_label: Query data
title: Query data
description: "This chapter introduces major query functionalities and how to perform sync and async query using connectors."
---

View File

@ -1,5 +1,5 @@
---
sidebar_label: Subscription
sidebar_label: Data Subscription
description: "Lightweight service for data subscription and publishing. Time series data inserted into TDengine continuously can be pushed automatically to subscribing clients."
title: Data Subscription
---
@ -151,7 +151,7 @@ void subscribe_callback(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code) {
taos_unsubscribe(tsub, keep);
```
The second parameter `keep` is used to specify whether to keep the subscription progress on the client sde. If it is **false**, i.e. **0**, then subscription will be restarted from beginning regardless of the `restart` parameter's value when `taos_subscribe` is invoked again. The subscription progress information is stored in _{DataDir}/subscribe/_ , under which there is a file with the same name as `topic` for each subscription, the subscription will be restarted from the beginning if the corresponding progress file is removed.
The second parameter `keep` is used to specify whether to keep the subscription progress on the client sde. If it is **false**, i.e. **0**, then subscription will be restarted from beginning regardless of the `restart` parameter's value when `taos_subscribe` is invoked again. The subscription progress information is stored in _{DataDir}/subscribe/_ , under which there is a file with the same name as `topic` for each subscription(Note: The default value of `DataDir` in the `taos.cfg` file is **/var/lib/taos/**. However, **/var/lib/taos/** does not exist on the Windows server. So you need to change the `DataDir` value to the corresponding existing directory."), the subscription will be restarted from the beginning if the corresponding progress file is removed.
Now let's see the effect of the above sample code, assuming below prerequisites have been done.

View File

@ -1,6 +1,6 @@
---
sidebar_label: UDF
title: User Defined Functions
title: User Defined Functions(UDF)
description: "Scalar functions and aggregate functions developed by users can be utilized by the query framework to expand query capability"
---

View File

@ -14,7 +14,7 @@ CREATE TABLE [IF NOT EXISTS] tb_name (timestamp_field_name TIMESTAMP, field1_nam
1. The first column of a table MUST be of type TIMESTAMP. It is automatically set as the primary key.
2. The maximum length of the table name is 192 bytes.
3. The maximum length of each row is 16k bytes, please note that the extra 2 bytes used by each BINARY/NCHAR column are also counted.
3. The maximum length of each row is 48k bytes, please note that the extra 2 bytes used by each BINARY/NCHAR column are also counted.
4. The name of the subtable can only consist of characters from the English alphabet, digits and underscore. Table names can't start with a digit. Table names are case insensitive.
5. The maximum length in bytes must be specified when using BINARY or NCHAR types.
6. Escape character "\`" can be used to avoid the conflict between table names and reserved keywords, above rules will be bypassed when using escape character on table names, but the upper limit for the name length is still valid. The table names specified using escape character are case sensitive. Only ASCII visible characters can be used with escape character.

View File

@ -21,7 +21,7 @@ SELECT select_expr [, select_expr ...]
## Wildcard
Wilcard \* can be used to specify all columns. The result includes only data columns for normal tables.
Wildcard \* can be used to specify all columns. The result includes only data columns for normal tables.
```
taos> SELECT * FROM d1001;
@ -51,14 +51,14 @@ taos> SELECT * FROM meters;
Query OK, 9 row(s) in set (0.002022s)
```
Wildcard can be used with table name as prefix, both below SQL statements have same effects and return all columns.
Wildcard can be used with table name as prefix. Both SQL statements below have the same effect and return all columns.
```SQL
SELECT * FROM d1001;
SELECT d1001.* FROM d1001;
```
In JOIN query, however, with or without table name prefix will return different results. \* without table prefix will return all the columns of both tables, but \* with table name as prefix will return only the columns of that table.
In a JOIN query, however, the results are different with or without a table name prefix. \* without table prefix will return all the columns of both tables, but \* with table name as prefix will return only the columns of that table.
```
taos> SELECT * FROM d1001, d1003 WHERE d1001.ts=d1003.ts;
@ -76,7 +76,7 @@ taos> SELECT d1001.* FROM d1001,d1003 WHERE d1001.ts = d1003.ts;
Query OK, 1 row(s) in set (0.020443s)
```
Wilcard \* can be used with some functions, but the result may be different depending on the function being used. For example, `count(*)` returns only one column, i.e. the number of rows; `first`, `last` and `last_row` return all columns of the selected row.
Wildcard \* can be used with some functions, but the result may be different depending on the function being used. For example, `count(*)` returns only one column, i.e. the number of rows; `first`, `last` and `last_row` return all columns of the selected row.
```
taos> SELECT COUNT(*) FROM d1001;
@ -96,7 +96,7 @@ Query OK, 1 row(s) in set (0.000849s)
## Tags
Starting from version 2.0.14, tag columns can be selected together with data columns when querying sub tables. Please note that, however, wildcard \* doesn't represent any tag column, that means tag columns must be specified explicitly like the example below.
Starting from version 2.0.14, tag columns can be selected together with data columns when querying sub tables. Please note however, that, wildcard \* cannot be used to represent any tag column. This means that tag columns must be specified explicitly like the example below.
```
taos> SELECT location, groupid, current FROM d1001 LIMIT 2;
@ -109,7 +109,7 @@ Query OK, 2 row(s) in set (0.003112s)
## Get distinct values
`DISTINCT` keyword can be used to get all the unique values of tag columns from a super table, it can also be used to get all the unique values of data columns from a table or subtable.
`DISTINCT` keyword can be used to get all the unique values of tag columns from a super table. It can also be used to get all the unique values of data columns from a table or subtable.
```sql
SELECT DISTINCT tag_name [, tag_name ...] FROM stb_name;
@ -118,15 +118,15 @@ SELECT DISTINCT col_name [, col_name ...] FROM tb_name;
:::info
1. Configuration parameter `maxNumOfDistinctRes` in `taos.cfg` is used to control the number of rows to output. The minimum configurable value is 100,000, the maximum configurable value is 100,000,000, the default value is 1000,000. If the actual number of rows exceeds the value of this parameter, only the number of rows specified by this parameter will be output.
2. It can't be guaranteed that the results selected by using `DISTINCT` on columns of `FLOAT` or `DOUBLE` are exactly unique because of the precision nature of floating numbers.
1. Configuration parameter `maxNumOfDistinctRes` in `taos.cfg` is used to control the number of rows to output. The minimum configurable value is 100,000, the maximum configurable value is 100,000,000, the default value is 1,000,000. If the actual number of rows exceeds the value of this parameter, only the number of rows specified by this parameter will be output.
2. It can't be guaranteed that the results selected by using `DISTINCT` on columns of `FLOAT` or `DOUBLE` are exactly unique because of the precision errors in floating point numbers.
3. `DISTINCT` can't be used in the sub-query of a nested query statement, and can't be used together with aggregate functions, `GROUP BY` or `JOIN` in the same SQL statement.
:::
## Columns Names of Result Set
When using `SELECT`, the column names in the result set will be same as that in the select clause if `AS` is not used. `AS` can be used to rename the column names in the result set. For example
When using `SELECT`, the column names in the result set will be the same as that in the select clause if `AS` is not used. `AS` can be used to rename the column names in the result set. For example
```
taos> SELECT ts, ts AS primary_key_ts FROM d1001;
@ -161,7 +161,7 @@ SELECT * FROM d1001;
## Special Query
Some special query functionalities can be performed without `FORM` sub-clause. For example, below statement can be used to get the current database in use.
Some special query functions can be invoked without `FROM` sub-clause. For example, the statement below can be used to get the current database in use.
```
taos> SELECT DATABASE();
@ -181,7 +181,7 @@ taos> SELECT DATABASE();
Query OK, 1 row(s) in set (0.000184s)
```
Below statement can be used to get the version of client or server.
The statement below can be used to get the version of client or server.
```
taos> SELECT CLIENT_VERSION();
@ -197,7 +197,7 @@ taos> SELECT SERVER_VERSION();
Query OK, 1 row(s) in set (0.000077s)
```
Below statement is used to check the server status. One integer, like `1`, is returned if the server status is OK, otherwise an error code is returned. This is compatible with the status check for TDengine from connection pool or 3rd party tools, and can avoid the problem of losing the connection from a connection pool when using the wrong heartbeat checking SQL statement.
The statement below is used to check the server status. An integer, like `1`, is returned if the server status is OK, otherwise an error code is returned. This is compatible with the status check for TDengine from connection pool or 3rd party tools, and can avoid the problem of losing the connection from a connection pool when using the wrong heartbeat checking SQL statement.
```
taos> SELECT SERVER_STATUS();
@ -284,7 +284,7 @@ taos> SELECT COUNT(tbname) FROM meters WHERE groupId > 2;
Query OK, 1 row(s) in set (0.001091s)
```
- Wildcard \* can be used to get all columns, or specific column names can be specified. Arithmetic operation can be performed on columns of number types, columns can be renamed in the result set.
- Wildcard \* can be used to get all columns, or specific column names can be specified. Arithmetic operation can be performed on columns of numerical types, columns can be renamed in the result set.
- Arithmetic operation on columns can't be used in where clause. For example, `where a*2>6;` is not allowed but `where a>6/2;` can be used instead for the same purpose.
- Arithmetic operation on columns can't be used as the objectives of select statement. For example, `select min(2*a) from t;` is not allowed but `select 2*min(a) from t;` can be used instead.
- Logical operation can be used in `WHERE` clause to filter numeric values, wildcard can be used to filter string values.
@ -318,13 +318,13 @@ Logical operations in below table can be used in the `where` clause to filter th
- Operator `like` is used together with wildcards to match strings
- '%' matches 0 or any number of characters, '\_' matches any single ASCII character.
- `\_` is used to match the \_ in the string.
- The maximum length of wildcard string is 100 bytes from version 2.1.6.1 (before that the maximum length is 20 bytes). `maxWildCardsLength` in `taos.cfg` can be used to control this threshold. Too long wildcard string may slowdown the execution performance of `LIKE` operator.
- The maximum length of wildcard string is 100 bytes from version 2.1.6.1 (before that the maximum length is 20 bytes). `maxWildCardsLength` in `taos.cfg` can be used to control this threshold. A very long wildcard string may slowdown the execution performance of `LIKE` operator.
- `AND` keyword can be used to filter multiple columns simultaneously. AND/OR operation can be performed on single or multiple columns from version 2.3.0.0. However, before 2.3.0.0 `OR` can't be used on multiple columns.
- For timestamp column, only one condition can be used; for other columns or tags, `OR` keyword can be used to combine multiple logical operators. For example, `((value > 20 AND value < 30) OR (value < 12))`.
- From version 2.3.0.0, multiple conditions can be used on timestamp column, but the result set can only contain single time range.
- From version 2.0.17.0, operator `BETWEEN AND` can be used in where clause, for example `WHERE col2 BETWEEN 1.5 AND 3.25` means the filter condition is equal to "1.5 ≤ col2 ≤ 3.25".
- From version 2.1.4.0, operator `IN` can be used in the where clause. For example, `WHERE city IN ('California.SanFrancisco', 'California.SanDiego')`. For bool type, both `{true, false}` and `{0, 1}` are allowed, but integers other than 0 or 1 are not allowed. FLOAT and DOUBLE types are impacted by floating precision, only values that match the condition within the tolerance will be selected. Non-primary key column of timestamp type can be used with `IN`.
- From version 2.3.0.0, regular expression is supported in the where clause with keyword `match` or `nmatch`, the regular expression is case insensitive.
- From version 2.1.4.0, operator `IN` can be used in the where clause. For example, `WHERE city IN ('California.SanFrancisco', 'California.SanDiego')`. For bool type, both `{true, false}` and `{0, 1}` are allowed, but integers other than 0 or 1 are not allowed. FLOAT and DOUBLE types are impacted by floating point precision errors. Only values that match the condition within the tolerance will be selected. Non-primary key column of timestamp type can be used with `IN`.
- From version 2.3.0.0, regular expression is supported in the where clause with keyword `match` or `nmatch`. The regular expression is case insensitive.
## Regular Expression
@ -364,7 +364,7 @@ FROM temp_STable t1, temp_STable t2
WHERE t1.ts = t2.ts AND t1.deviceid = t2.deviceid AND t1.status=0;
```
Similary, join operation can be performed on the result set of multiple sub queries.
Similarly, join operations can be performed on the result set of multiple sub queries.
:::note
Restrictions on join operation:
@ -380,7 +380,7 @@ Restrictions on join operation:
## Nested Query
Nested query is also called sub query, that means in a single SQL statement the result of inner query can be used as the data source of the outer query.
Nested query is also called sub query. This means that in a single SQL statement the result of inner query can be used as the data source of the outer query.
From 2.2.0.0, unassociated sub query can be used in the `FROM` clause. Unassociated means the sub query doesn't use the parameters in the parent query. More specifically, in the `tb_name_list` of `SELECT` statement, an independent SELECT statement can be used. So a complete nested query looks like:
@ -390,14 +390,14 @@ SELECT ... FROM (SELECT ... FROM ...) ...;
:::info
- Only one layer of nesting is allowed, that means no sub query is allowed in a sub query
- The result set returned by the inner query will be used as a "virtual table" by the outer query, the "virtual table" can be renamed using `AS` keyword for easy reference in the outer query.
- Only one layer of nesting is allowed, that means no sub query is allowed within a sub query
- The result set returned by the inner query will be used as a "virtual table" by the outer query. The "virtual table" can be renamed using `AS` keyword for easy reference in the outer query.
- Sub query is not allowed in continuous query.
- JOIN operation is allowed between tables/STables inside both inner and outer queries. Join operation can be performed on the result set of the inner query.
- UNION operation is not allowed in either inner query or outer query.
- The functionalities that can be used in the inner query is same as non-nested query.
- `ORDER BY` inside the inner query doesn't make any sense but will slow down the query performance significantly, so please avoid such usage.
- Compared to the non-nested query, the functionalities that can be used in the outer query have such restrictions as:
- The functions that can be used in the inner query are the same as those that can be used in a non-nested query.
- `ORDER BY` inside the inner query is unnecessary and will slow down the query performance significantly. It is best to avoid the use of `ORDER BY` inside the inner query.
- Compared to the non-nested query, the functionality that can be used in the outer query has the following restrictions:
- Functions
- If the result set returned by the inner query doesn't contain timestamp column, then functions relying on timestamp can't be used in the outer query, like `TOP`, `BOTTOM`, `FIRST`, `LAST`, `DIFF`.
- Functions that need to scan the data twice can't be used in the outer query, like `STDDEV`, `PERCENTILE`.
@ -442,8 +442,8 @@ The sum of col1 and col2 for rows later than 2018-06-01 08:00:00.000 and whose c
SELECT (col1 + col2) AS 'complex' FROM tb1 WHERE ts > '2018-06-01 08:00:00.000' AND col2 > 1.2 LIMIT 10 OFFSET 5;
```
The rows in the past 10 minutes and whose col2 is bigger than 3.14 are selected and output to the result file `/home/testoutpu.csv` with below SQL statement:
The rows in the past 10 minutes and whose col2 is bigger than 3.14 are selected and output to the result file `/home/testoutput.csv` with below SQL statement:
```SQL
SELECT COUNT(*) FROM tb1 WHERE ts >= NOW - 10m AND col2 > 3.14 >> /home/testoutpu.csv;
SELECT COUNT(*) FROM tb1 WHERE ts >= NOW - 10m AND col2 > 3.14 >> /home/testoutput.csv;
```

View File

@ -22,8 +22,8 @@ SELECT COUNT([*|field_name]) FROM tb_name [WHERE clause];
**More explanation**:
- Wildcard (\*) can be used to represent all columns, it's used to get the number of all rows
- The number of non-NULL values will be returned if this function is used on a specific column
- Wildcard (\*) is used to represent all columns. The `COUNT` function is used to get the total number of all rows.
- The number of non-NULL values will be returned if this function is used on a specific column.
**Examples**:
@ -87,7 +87,7 @@ SELECT TWA(field_name) FROM tb_name WHERE clause;
**More explanations**:
- From version 2.1.3.0, function TWA can be used on stable with `GROUP BY`, i.e. timelines generated by `GROUP BY tbname` on a STable.
- Since version 2.1.3.0, function TWA can be used on stable with `GROUP BY`, i.e. timelines generated by `GROUP BY tbname` on a STable.
### IRATE
@ -105,7 +105,7 @@ SELECT IRATE(field_name) FROM tb_name WHERE clause;
**More explanations**:
- From version 2.1.3.0, function IRATE can be used on stble with `GROUP BY`, i.e. timelines generated by `GROUP BY tbname` on a STable.
- Since version 2.1.3.0, function IRATE can be used on stble with `GROUP BY`, i.e. timelines generated by `GROUP BY tbname` on a STable.
### SUM
@ -149,7 +149,7 @@ SELECT STDDEV(field_name) FROM tb_name [WHERE clause];
**Applicable column types**: Data types except for timestamp, binary, nchar and bool
**Applicable table types**: table, STable (starting from version 2.0.15.1)
**Applicable table types**: table, STable (since version 2.0.15.1)
**Examples**:
@ -193,13 +193,13 @@ SELECT MODE(field_name) FROM tb_name [WHERE clause];
**Description**:The value which has the highest frequency of occurrence. NULL is returned if there are multiple values which have highest frequency of occurrence. It can't be used on timestamp column or tags.
**Return value type**:Same as the data type of the column being operated
**Return value type**:Same as the data type of the column being operated upon
**Applicable column types**:Data types except for timestamp
**More explanations**:Considering the number of returned result set is unpredictable, it's suggested to limit the number of unique values to 100,000, otherwise error will be returned.
**Applicable version**:From version 2.6.0.0
**Applicable version**:Since version 2.6.0.0
**Examples**:
@ -234,7 +234,7 @@ SELECT HYPERLOGLOG(field_name) FROM { tb_name | stb_name } [WHERE clause];
**More explanations**: The benefit of using hyperloglog algorithm is that the memory usage is under control when the data volume is huge. However, when the data volume is very small, the result may be not accurate, it's recommented to use `select count(data) from (select unique(col) as data from table)` in this case.
**Applicable versions**:From version 2.6.0.0
**Applicable versions**:Since version 2.6.0.0
**Examples**:
@ -271,7 +271,7 @@ SELECT MIN(field_name) FROM {tb_name | stb_name} [WHERE clause];
**Description**: The minimum value of a specific column in a table or STable
**Return value type**: Same as the data type of the column being operated
**Return value type**: Same as the data type of the column being operated upon
**Applicable column types**: Data types except for timestamp, binary, nchar and bool
@ -301,7 +301,7 @@ SELECT MAX(field_name) FROM { tb_name | stb_name } [WHERE clause];
**Description**: The maximum value of a specific column of a table or STable
**Return value type**: Same as the data type of the column being operated
**Return value type**: Same as the data type of the column being operated upon
**Applicable column types**: Data types except for timestamp, binary, nchar and bool
@ -331,7 +331,7 @@ SELECT FIRST(field_name) FROM { tb_name | stb_name } [WHERE clause];
**Description**: The first non-null value of a specific column in a table or STable
**Return value type**: Same as the column being operated
**Return value type**: Same as the column being operated upon
**Applicable column types**: Any data type
@ -341,7 +341,7 @@ SELECT FIRST(field_name) FROM { tb_name | stb_name } [WHERE clause];
- FIRST(\*) can be used to get the first non-null value of all columns
- NULL will be returned if all the values of the specified column are all NULL
- No result will NOT be returned if all the columns in the result set are all NULL
- A result will NOT be returned if all the columns in the result set are all NULL
**Examples**:
@ -367,7 +367,7 @@ SELECT LAST(field_name) FROM { tb_name | stb_name } [WHERE clause];
**Description**: The last non-NULL value of a specific column in a table or STable
**Return value type**: Same as the column being operated
**Return value type**: Same as the column being operated upon
**Applicable column types**: Any data type
@ -403,7 +403,7 @@ SELECT TOP(field_name, K) FROM { tb_name | stb_name } [WHERE clause];
**Description**: The greatest _k_ values of a specific column in a table or STable. If a value has multiple occurrences in the column but counting all of them in will exceed the upper limit _k_, then a part of them will be returned randomly.
**Return value type**: Same as the column being operated
**Return value type**: Same as the column being operated upon
**Applicable column types**: Data types except for timestamp, binary, nchar and bool
@ -442,7 +442,7 @@ SELECT BOTTOM(field_name, K) FROM { tb_name | stb_name } [WHERE clause];
**Description**: The least _k_ values of a specific column in a table or STable. If a value has multiple occurrences in the column but counting all of them in will exceed the upper limit _k_, then a part of them will be returned randomly.
**Return value type**: Same as the column being operated
**Return value type**: Same as the column being operated upon
**Applicable column types**: Data types except for timestamp, binary, nchar and bool
@ -549,7 +549,7 @@ SELECT LAST_ROW(field_name) FROM { tb_name | stb_name };
**Description**: The last row of a table or STable
**Return value type**: Same as the column being operated
**Return value type**: Same as the column being operated upon
**Applicable column types**: Any data type
@ -576,7 +576,7 @@ SELECT LAST_ROW(field_name) FROM { tb_name | stb_name };
Query OK, 1 row(s) in set (0.001042s)
```
### INTERP [From version 2.3.1]
### INTERP [Since version 2.3.1]
```
SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] [ RANGE(timestamp1,timestamp2) ] [EVERY(interval)] [FILL ({ VALUE | PREV | NULL | LINEAR | NEXT})];
@ -584,7 +584,7 @@ SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] [
**Description**: The value that matches the specified timestamp range is returned, if existing; or an interpolation value is returned.
**Return value type**: Same as the column being operated
**Return value type**: Same as the column being operated upon
**Applicable column types**: Numeric data types
@ -593,7 +593,7 @@ SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] [
**More explanations**
- `INTERP` is used to get the value that matches the specified time slice from a column. If no such value exists an interpolation value will be returned based on `FILL` parameter.
- The input data of `INTERP` is the value of the specified column, `where` can be used to filter the original data. If no `where` condition is specified then all original data is the input.
- The input data of `INTERP` is the value of the specified column and a `where` clause can be used to filter the original data. If no `where` condition is specified then all original data is the input.
- The output time range of `INTERP` is specified by `RANGE(timestamp1,timestamp2)` parameter, with timestamp1<=timestamp2. timestamp1 is the starting point of the output time range and must be specified. timestamp2 is the ending point of the output time range and must be specified. If `RANGE` is not specified, then the timestamp of the first row that matches the filter condition is treated as timestamp1, the timestamp of the last row that matches the filter condition is treated as timestamp2.
- The number of rows in the result set of `INTERP` is determined by the parameter `EVERY`. Starting from timestamp1, one interpolation is performed for every time interval specified `EVERY` parameter. If `EVERY` parameter is not used, the time windows will be considered as no ending timestamp, i.e. there is only one time window from timestamp1.
- Interpolation is performed based on `FILL` parameter. No interpolation is performed if `FILL` is not used, that means either the original data that matches is returned or nothing is returned.
@ -632,7 +632,7 @@ SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] [
taos> SELECT INTERP(current) FROM t1 where ts >= '2017-07-14 17:00:00' and ts <= '2017-07-14 20:00:00' RANGE('2017-7-14 18:00:00','2017-7-14 19:00:00') EVERY(5s) FILL(LINEAR);
```
### INTERP [Prior to version 2.3.1]
### INTERP [Since version 2.0.15.0]
```
SELECT INTERP(field_name) FROM { tb_name | stb_name } WHERE ts='timestamp' [FILL ({ VALUE | PREV | NULL | LINEAR | NEXT})];
@ -640,7 +640,7 @@ SELECT INTERP(field_name) FROM { tb_name | stb_name } WHERE ts='timestamp' [FILL
**Description**: The value of a specific column that matches the specified time slice
**Return value type**: Same as the column being operated
**Return value type**: Same as the column being operated upon
**Applicable column types**: Numeric data type
@ -648,7 +648,6 @@ SELECT INTERP(field_name) FROM { tb_name | stb_name } WHERE ts='timestamp' [FILL
**More explanations**:
- It can be used from version 2.0.15.0
- Time slice must be specified. If there is no data matching the specified time slice, interpolation is performed based on `FILL` parameter. Conditions such as tags or `tbname` can be used `Where` clause can be used to filter data.
- The timestamp specified must be within the time range of the data rows of the table or STable. If it is beyond the valid time range, nothing is returned even with `FILL` parameter.
- `INTERP` can be used to query only single time point once. `INTERP` can be used with `EVERY` to get the interpolation value every time interval.
@ -696,11 +695,11 @@ SELECT TAIL(field_name, k, offset_val) FROM {tb_name | stb_name} [WHERE clause];
**Parameter value range**: k: [1,100] offset_val: [0,100]
**Return value type**: Same as the column being operated
**Return value type**: Same as the column being operated upon
**Applicable column types**: Any data type except form timestamp, i.e. the primary key
**Applicable versions**: From version 2.6.0.0
**Applicable versions**: Since version 2.6.0.0
**Examples**:
@ -732,11 +731,11 @@ SELECT UNIQUE(field_name) FROM {tb_name | stb_name} [WHERE clause];
**Description**: The values that occur the first time in the specified column. The effect is similar to `distinct` keyword, but it can also be used to match tags or timestamp.
**Return value type**: Same as the column or tag being operated
**Return value type**: Same as the column or tag being operated upon
**Applicable column types**: Any data types except for timestamp
**Applicable versions**: From version 2.6.0.0
**Applicable versions**: Since version 2.6.0.0
**More explanations**:
@ -780,7 +779,7 @@ SELECT {DIFF(field_name, ignore_negative) | DIFF(field_name)} FROM tb_name [WHER
**Description**: The different of each row with its previous row for a specific column. `ignore_negative` can be specified as 0 or 1, the default value is 1 if it's not specified. `1` means negative values are ignored.
**Return value type**: Same as the column being operated
**Return value type**: Same as the column being operated upon
**Applicable column types**: Data types except for timestamp, binary, nchar and bool
@ -789,8 +788,8 @@ SELECT {DIFF(field_name, ignore_negative) | DIFF(field_name)} FROM tb_name [WHER
**More explanations**:
- The number of result rows is the number of rows subtracted by one, no output for the first row
- From version 2.1.30, `DIFF` can be used on STable with `GROUP by tbname`
- From version 2.6.0, `ignore_negative` parameter is supported
- Since version 2.1.30, `DIFF` can be used on STable with `GROUP by tbname`
- Since version 2.6.0, `ignore_negative` parameter is supported
**Examples**:
@ -874,7 +873,7 @@ Query OK, 1 row(s) in set (0.000836s)
SELECT CEIL(field_name) FROM { tb_name | stb_name } [WHERE clause];
```
**Description**: The round up value of a specific column
**Description**: The rounded up value of a specific column
**Return value type**: Same as the column being used
@ -896,9 +895,9 @@ SELECT CEIL(field_name) FROM { tb_name | stb_name } [WHERE clause];
SELECT FLOOR(field_name) FROM { tb_name | stb_name } [WHERE clause];
```
**Description**: The round down value of a specific column
**Description**: The rounded down value of a specific column
**More explanations**: The restrictions are same as `CEIL` function.
**More explanations**: The restrictions are same as those of the `CEIL` function.
### ROUND
@ -906,7 +905,7 @@ SELECT FLOOR(field_name) FROM { tb_name | stb_name } [WHERE clause];
SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause];
```
**Description**: The round value of a specific column.
**Description**: The rounded value of a specific column.
**More explanations**: The restrictions are same as `CEIL` function.
@ -933,7 +932,7 @@ SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause];
- Can only be used with aggregate functions
- `Group by tbname` must be used together on a STable to force the result on a single timeline
**Applicable versions**: From 2.3.0.x
**Applicable versions**: Since 2.3.0.x
### MAVG
@ -958,7 +957,7 @@ SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause];
- Can't be used with aggregate functions.
- Must be used with `GROUP BY tbname` when it's used on a STable to force the result on each single timeline.
**Applicable versions**: From 2.3.0.x
**Applicable versions**: Since 2.3.0.x
### SAMPLE
@ -981,7 +980,7 @@ SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause];
- Arithmetic operation can't be operated on the result of `SAMPLE` function
- Must be used with `Group by tbname` when it's used on a STable to force the result on each single timeline
**Applicable versions**: From 2.3.0.x
**Applicable versions**: Since 2.3.0.x
### ASIN
@ -1460,8 +1459,8 @@ SELECT field_name [+|-|*|/|%][Value|field_name] FROM { tb_name | stb_name } [WH
**More explanations**:
- Arithmetic operations can be performed on two or more columns, `()` can be used to control the precedence
- NULL doesn't participate the operation, if one of the operands is NULL then result is NULL
- Arithmetic operations can be performed on two or more columns, Parentheses `()` can be used to control the order of precedence.
- NULL doesn't participate in the operation i.e. if one of the operands is NULL then result is NULL.
**Examples**:
@ -1586,7 +1585,7 @@ Query OK, 6 row(s) in set (0.002613s)
## Time Functions
From version 2.6.0.0, below time related functions can be used in TDengine.
Since version 2.6.0.0, below time related functions can be used in TDengine.
### NOW
@ -1840,6 +1839,8 @@ SELECT TIMEDIFF(ts_val1 | datetime_string1 | ts_col1, ts_val2 | datetime_string2
1u(microsecond),1a(millisecond),1s(second),1m(minute),1h(hour),1d(day).
- The precision of the returned timestamp is same as the precision set for the current data base in use
**Applicable versions**:Since version 2.6.0.0
**Examples**:
```sql

View File

@ -3,36 +3,36 @@ sidebar_label: Interval
title: Aggregate by Time Window
---
Aggregate by time window is supported in TDengine. For example, each temperature sensor reports the temperature every second, the average temperature every 10 minutes can be retrieved by query with time window.
Window related clauses are used to divide the data set to be queried into subsets and then aggregate. There are three kinds of windows, time window, status window, and session window. There are two kinds of time windows, sliding window and flip time window.
Aggregation by time window is supported in TDengine. For example, in the case where temperature sensors report the temperature every seconds, the average temperature for every 10 minutes can be retrieved by performing a query with a time window.
Window related clauses are used to divide the data set to be queried into subsets and then aggregation is performed across the subsets. There are three kinds of windows: time window, status window, and session window. There are two kinds of time windows: sliding window and flip time/tumbling window.
## Time Window
`INTERVAL` clause is used to generate time windows of the same time interval, `SLIDING` is used to specify the time step for which the time window moves forward. The query is performed on one time window each time, and the time window moves forward with time. When defining continuous query both the size of time window and the step of forward sliding time need to be specified. As shown in the figure blow, [t0s, t0e] [t1s , t1e] [t2s, t2e] are respectively the time ranges of three time windows on which continuous queries are executed. The time step for which time window moves forward is marked by `sliding time`. Query, filter and aggregate operations are executed on each time window respectively. When the time step specified by `SLIDING` is same as the time interval specified by `INTERVAL`, the sliding time window is actually a flip time window.
The `INTERVAL` clause is used to generate time windows of the same time interval. The `SLIDING` parameter is used to specify the time step for which the time window moves forward. The query is performed on one time window each time, and the time window moves forward with time. When defining a continuous query, both the size of the time window and the step of forward sliding time need to be specified. As shown in the figure blow, [t0s, t0e] [t1s , t1e] [t2s, t2e] are respectively the time ranges of three time windows on which continuous queries are executed. The time step for which time window moves forward is marked by `sliding time`. Query, filter and aggregate operations are executed on each time window respectively. When the time step specified by `SLIDING` is same as the time interval specified by `INTERVAL`, the sliding time window is actually a flip time/tumbling window.
![TDengine Database Time Window](./timewindow-1.webp)
`INTERVAL` and `SLIDING` should be used with aggregate functions and select functions. Below SQL statement is illegal because no aggregate or selection function is used with `INTERVAL`.
`INTERVAL` and `SLIDING` should be used with aggregate functions and select functions. The SQL statement below is illegal because no aggregate or selection function is used with `INTERVAL`.
```
SELECT * FROM temp_tb_1 INTERVAL(1m);
```
The time step specified by `SLIDING` can't exceed the time interval specified by `INTERVAL`. Below SQL statement is illegal because the time length specified by `SLIDING` exceeds that specified by `INTERVAL`.
The time step specified by `SLIDING` cannot exceed the time interval specified by `INTERVAL`. The SQL statement below is illegal because the time length specified by `SLIDING` exceeds that specified by `INTERVAL`.
```
SELECT COUNT(*) FROM temp_tb_1 INTERVAL(1m) SLIDING(2m);
```
When the time length specified by `SLIDING` is the same as that specified by `INTERVAL`, the sliding window is actually a flip window. The minimum time range specified by `INTERVAL` is 10 milliseconds (10a) prior to version 2.1.5.0. From version 2.1.5.0, the minimum time range by `INTERVAL` can be 1 microsecond (1u). However, if the DB precision is millisecond, the minimum time range is 1 millisecond (1a). Please note that the `timezone` parameter should be configured to be the same value in the `taos.cfg` configuration file on client side and server side.
When the time length specified by `SLIDING` is the same as that specified by `INTERVAL`, the sliding window is actually a flip/tumbling window. The minimum time range specified by `INTERVAL` is 10 milliseconds (10a) prior to version 2.1.5.0. Since version 2.1.5.0, the minimum time range by `INTERVAL` can be 1 microsecond (1u). However, if the DB precision is millisecond, the minimum time range is 1 millisecond (1a). Please note that the `timezone` parameter should be configured to be the same value in the `taos.cfg` configuration file on client side and server side.
## Status Window
In case of using integer, bool, or string to represent the device status at a moment, the continuous rows with same status belong to same status window. Once the status changes, the status window closes. As shown in the following figure, there are two status windows according to status, [2019-04-28 14:22:072019-04-28 14:22:10] and [2019-04-28 14:22:112019-04-28 14:22:12]. Status window is not applicable to STable for now.
In case of using integer, bool, or string to represent the status of a device at any given moment, continuous rows with the same status belong to a status window. Once the status changes, the status window closes. As shown in the following figure, there are two status windows according to status, [2019-04-28 14:22:072019-04-28 14:22:10] and [2019-04-28 14:22:112019-04-28 14:22:12]. Status window is not applicable to STable for now.
![TDengine Database Status Window](./timewindow-3.webp)
`STATE_WINDOW` is used to specify the column based on which to define status window, for example
`STATE_WINDOW` is used to specify the column on which the status window will be based. For example
```
SELECT COUNT(*), FIRST(ts), status FROM temp_tb_1 STATE_WINDOW(status);
@ -44,7 +44,7 @@ SELECT COUNT(*), FIRST(ts), status FROM temp_tb_1 STATE_WINDOW(status);
SELECT COUNT(*), FIRST(ts) FROM temp_tb_1 SESSION(ts, tol_val);
```
The primary key, i.e. timestamp, is used to determine which session window the row belongs to. If the time interval between two adjacent rows is within the time range specified by `tol_val`, they belong to the same session window; otherwise they belong to two different time windows. As shown in the figure below, if the limit of time interval for the session window is specified as 12 seconds, then the 6 rows in the figure constitutes 2 time windows, [2019-04-28 14:22:102019-04-28 14:22:30] and [2019-04-28 14:23:102019-04-28 14:23:30], because the time difference between 2019-04-28 14:22:30 and 2019-04-28 14:23:10 is 40 seconds, which exceeds the time interval limit of 12 seconds.
The primary key, i.e. timestamp, is used to determine which session window a row belongs to. If the time interval between two adjacent rows is within the time range specified by `tol_val`, they belong to the same session window; otherwise they belong to two different session windows. As shown in the figure below, if the limit of time interval for the session window is specified as 12 seconds, then the 6 rows in the figure constitutes 2 time windows, [2019-04-28 14:22:102019-04-28 14:22:30] and [2019-04-28 14:23:102019-04-28 14:23:30], because the time difference between 2019-04-28 14:22:30 and 2019-04-28 14:23:10 is 40 seconds, which exceeds the time interval limit of 12 seconds.
![TDengine Database Session Window](./timewindow-2.webp)
@ -73,7 +73,7 @@ SELECT function_list FROM stb_name
### Restrictions
- Aggregate functions and select functions can be used in `function_list`, with each function having only one output, for example COUNT, AVG, SUM, STDDEV, LEASTSQUARES, PERCENTILE, MIN, MAX, FIRST, LAST. Functions having multiple output can't be used, for example DIFF or arithmetic operations.
- Aggregate functions and select functions can be used in `function_list`, with each function having only one output. For example COUNT, AVG, SUM, STDDEV, LEASTSQUARES, PERCENTILE, MIN, MAX, FIRST, LAST. Functions having multiple outputs, such as DIFF or arithmetic operations can't be used.
- `LAST_ROW` can't be used together with window aggregate.
- Scalar functions, like CEIL/FLOOR, can't be used with window aggregate.
- `WHERE` clause can be used to specify the starting and ending time and other filter conditions
@ -87,8 +87,8 @@ SELECT function_list FROM stb_name
:::info
1. Huge volume of interpolation output may be returned using `FILL`, so it's recommended to specify the time range when using `FILL`. The maximum interpolation values that can be returned in single query is 10,000,000.
2. The result set is in ascending order of timestamp in aggregate by time window aggregate.
1. A huge volume of interpolation output may be returned using `FILL`, so it's recommended to specify the time range when using `FILL`. The maximum number of interpolation values that can be returned in a single query is 10,000,000.
2. The result set is in ascending order of timestamp when you aggregate by time window.
3. If aggregate by window is used on STable, the aggregate function is performed on all the rows matching the filter conditions. If `GROUP BY` is not used in the query, the result set will be returned in ascending order of timestamp; otherwise the result set is not exactly in the order of ascending timestamp in each group.
:::
@ -97,13 +97,13 @@ Aggregate by time window is also used in continuous query, please refer to [Cont
## Examples
The table of intelligent meters can be created by the SQL statement below
A table of intelligent meters can be created by the SQL statement below
```sql
CREATE TABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT);
```
The average current, maximum current and median of current in every 10 minutes for the past 24 hours can be calculated using the below SQL statement, with missing values filled with the previous non-NULL values.
The average current, maximum current and median of current in every 10 minutes for the past 24 hours can be calculated using the SQL statement below, with missing values filled with the previous non-NULL values.
```
SELECT AVG(current), MAX(current), APERCENTILE(current, 50) FROM meters

View File

@ -4,8 +4,8 @@ title: Limits & Restrictions
## Naming Rules
1. Only English characters, digits and underscore are allowed
2. Can't start with a digit
1. Only characters from the English alphabet, digits and underscore are allowed
2. Names cannot start with a digit
3. Case insensitive without escape character "\`"
4. Identifier with escape character "\`"
To support more flexible table or column names, a new escape character "\`" is introduced. For more details please refer to [escape](/taos-sql/escape).
@ -16,38 +16,38 @@ The legal character set is `[a-zA-Z0-9!?$%^&*()_+={[}]:;@~#|<,>.?/]`.
## General Limits
- Maximum length of database name is 32 bytes
- Maximum length of table name is 192 bytes, excluding the database name prefix and the separator
- Maximum length of each data row is 48K bytes from version 2.1.7.0 , before which the limit is 16K bytes. Please note that the upper limit includes the extra 2 bytes consumed by each column of BINARY/NCHAR type.
- Maximum of column name is 64.
- Maximum length of database name is 32 bytes.
- Maximum length of table name is 192 bytes, excluding the database name prefix and the separator.
- Maximum length of each data row is 48K bytes since version 2.1.7.0 , before which the limit was 16K bytes. Please note that the upper limit includes the extra 2 bytes consumed by each column of BINARY/NCHAR type.
- Maximum length of column name is 64.
- Maximum number of columns is 4096. There must be at least 2 columns, and the first column must be timestamp.
- Maximum length of tag name is 64.
- Maximum number of tags is 128. There must be at least 1 tag. The total length of tag values should not exceed 16K bytes.
- Maximum length of singe SQL statement is 1048576, i.e. 1 MB bytes. It can be configured in the parameter `maxSQLLength` in the client side, the applicable range is [65480, 1048576].
- At most 4096 columns (or 1024 prior to 2.1.7.0) can be returned by `SELECT`, functions in the query statement may constitute columns. Error will be returned if the limit is exceeded.
- Maximum numbers of databases, STables, tables are only depending on the system resources.
- Maximum length of singe SQL statement is 1048576, i.e. 1 MB. It can be configured in the parameter `maxSQLLength` in the client side, the applicable range is [65480, 1048576].
- At most 4096 columns (or 1024 prior to 2.1.7.0) can be returned by `SELECT`. Functions in the query statement constitute columns. An error is returned if the limit is exceeded.
- Maximum numbers of databases, STables, tables are dependent only on the system resources.
- Maximum of database name is 32 bytes, and it can't include "." or special characters.
- Maximum replica number of database is 3
- Maximum length of user name is 23 bytes
- Maximum length of password is 15 bytes
- Maximum number of rows depends on the storage space only.
- Maximum number of tables depends on the number of nodes only.
- Maximum number of databases depends on the number of nodes only.
- Maximum number of vnodes for single database is 64.
- Maximum number of replicas for a database is 3.
- Maximum length of user name is 23 bytes.
- Maximum length of password is 15 bytes.
- Maximum number of rows depends only on the storage space.
- Maximum number of tables depends only on the number of nodes.
- Maximum number of databases depends only on the number of nodes.
- Maximum number of vnodes for a single database is 64.
## Restrictions of `GROUP BY`
`GROUP BY` can be performed on tags and `TBNAME`. It can be performed on data columns too, with one restriction that only one column and the number of unique values on that column is lower than 100,000. Please note that `GROUP BY` can't be performed on float or double types.
`GROUP BY` can be performed on tags and `TBNAME`. It can be performed on data columns too, with the only restriction being it can only be performed on one data column and the number of unique values in that column is lower than 100,000. Please note that `GROUP BY` cannot be performed on float or double types.
## Restrictions of `IS NOT NULL`
`IS NOT NULL` can be used on any data type of columns. The non-empty string evaluation expression, i.e. `<\>""` can only be used on non-numeric data types.
`IS NOT NULL` can be used on any data type of columns. The non-empty string evaluation expression, i.e. `< > ""` can only be used on non-numeric data types.
## Restrictions of `ORDER BY`
- Only one `order by` is allowed for normal table and subtable.
- At most two `order by` are allowed for STable, and the second one must be `ts`.
- `order by tag` must be used with `group by tag` on same tag, this rule is also applicable to `tbname`.
- `order by tag` must be used with `group by tag` on same tag. This rule is also applicable to `tbname`.
- `order by column` must be used with `group by column` or `top/bottom` on same column. This rule is applicable to table and STable.
- `order by ts` is applicable to table and STable.
- If `order by ts` is used with `group by`, the result set is sorted using `ts` in each group.
@ -56,7 +56,7 @@ The legal character set is `[a-zA-Z0-9!?$%^&*()_+={[}]:;@~#|<,>.?/]`.
### Name Restrictions of Table/Column
The name of a table or column can only be composed of ASCII characters, digits and underscore, while it can't start with a digit. The maximum length is 192 bytes. Names are case insensitive. The name mentioned in this rule doesn't include the database name prefix and the separator.
The name of a table or column can only be composed of ASCII characters, digits and underscore and it cannot start with a digit. The maximum length is 192 bytes. Names are case insensitive. The name mentioned in this rule doesn't include the database name prefix and the separator.
### Name Restrictions After Escaping

View File

@ -4,7 +4,7 @@ title: JSON Type
## Syntax
1. Tag of JSON type
1. Tag of type JSON
```sql
create STable s1 (ts timestamp, v1 int) tags (info json);
@ -12,7 +12,7 @@ title: JSON Type
create table s1_1 using s1 tags ('{"k1": "v1"}');
```
2. -> Operator of JSON
2. "->" Operator of JSON
```sql
select * from s1 where info->'k1' = 'v1';
@ -20,7 +20,7 @@ title: JSON Type
select info->'k1' from s1;
```
3. contains Operator of JSON
3. "contains" Operator of JSON
```sql
select * from s1 where info contains 'k2';
@ -30,7 +30,7 @@ title: JSON Type
## Applicable Operations
1. When JSON data type is used in `where`, `match/nmatch/between and/like/and/or/is null/is no null` can be used but `in` can't be used.
1. When a JSON data type is used in `where`, `match/nmatch/between and/like/and/or/is null/is no null` can be used but `in` can't be used.
```sql
select * from s1 where info->'k1' match 'v*';
@ -42,9 +42,9 @@ title: JSON Type
select * from s1 where info->'k1' is not null;
```
2. Tag of JSON type can be used in `group by`, `order by`, `join`, `union all` and sub query, for example `group by json->'key'`
2. A tag of JSON type can be used in `group by`, `order by`, `join`, `union all` and sub query; for example `group by json->'key'`
3. `Distinct` can be used with tag of JSON type
3. `Distinct` can be used with a tag of type JSON
```sql
select distinct info->'k1' from s1;
@ -52,9 +52,9 @@ title: JSON Type
4. Tag Operations
The value of JSON tag can be altered. Please note that the full JSON will be overriden when doing this.
The value of a JSON tag can be altered. Please note that the full JSON will be overriden when doing this.
The name of JSON tag can be altered. A tag of JSON type can't be added or removed. The column length of a JSON tag can't be changed.
The name of a JSON tag can be altered. A tag of JSON type can't be added or removed. The column length of a JSON tag can't be changed.
## Other Restrictions
@ -64,17 +64,17 @@ title: JSON Type
- JSON format
- The input string for JSON can be empty, i.e. "", "\t", or NULL, but can't be non-NULL string, bool or array.
- object can be {}, and the whole JSON is empty if so. Key can be "", and it's ignored if so.
- value can be int, double, string, boll or NULL, can't be array. Nesting is not allowed, that means value can't be another JSON.
- The input string for JSON can be empty, i.e. "", "\t", or NULL, but it can't be non-NULL string, bool or array.
- object can be {}, and the entire JSON is empty if so. Key can be "", and it's ignored if so.
- value can be int, double, string, bool or NULL, and it can't be an array. Nesting is not allowed which means that the value of a key can't be JSON.
- If one key occurs twice in JSON, only the first one is valid.
- Escape characters are not allowed in JSON.
- NULL is returned if querying a key that doesn't exist in JSON.
- NULL is returned when querying a key that doesn't exist in JSON.
- If a tag of JSON is the result of inner query, it can't be parsed and queried in the outer query.
For example, the below SQL statements are not supported.
For example, the SQL statements below are not supported.
```sql;
select jtag->'key' from (select jtag from STable);

View File

@ -7,8 +7,6 @@ This section explains the syntax of SQL to perform operations on databases, tabl
TDengine SQL is the major interface for users to write data into or query from TDengine. For ease of use, the syntax is similar to that of standard SQL. However, please note that TDengine SQL is not standard SQL. For instance, TDengine doesn't provide a delete function for time series data and so corresponding statements are not provided in TDengine SQL.
TDengine SQL doesn't support abbreviation for keywords. For example "SELECT" cannot be abbreviated as "SEL".
Syntax Specifications used in this chapter:
- The content inside <\> needs to be input by the user, excluding <\> itself.

View File

@ -6,7 +6,7 @@ description: Install, Uninstall, Start, Stop and Upgrade
import Tabs from "@theme/Tabs";
import TabItem from "@theme/TabItem";
TDengine community version provides dev and rpm packages for users to choose based on the system environment. deb supports Debian, Ubuntu and systems derived from them. rpm supports CentOS, RHEL, SUSE and systems derived from them. Furthermore, tar.gz package is provided for enterprise customers.
TDengine community version provides deb and rpm packages for users to choose from, based on their system environment. The deb package supports Debian, Ubuntu and derivative systems. The rpm package supports CentOS, RHEL, SUSE and derivative systems. Furthermore, a tar.gz package is provided for TDengine Enterprise customers.
## Install
@ -124,7 +124,7 @@ taoskeeper is installed, enable it by `systemctl enable taoskeeper`
```
:::info
Some configuration will be prompted for users to provide when install.sh is executing, the interactive mode can be disabled by executing `./install.sh -e no`. `./install -h` can show all parameters and detailed explanation.
Users will be prompted to enter some configuration information when install.sh is executing. The interactive mode can be disabled by executing `./install.sh -e no`. `./install.sh -h` can show all parameters with detailed explanation.
:::
@ -132,7 +132,7 @@ Some configuration will be prompted for users to provide when install.sh is exec
</Tabs>
:::note
When installing on the first node in the cluster, when "Enter FQDN:" is prompted, nothing needs to be provided. When installing on following nodes, when "Enter FQDN:" is prompted, the end point of the first dnode in the cluster can be input if it is already up; or just ignore it and configure later after installation is done.
When installing on the first node in the cluster, at the "Enter FQDN:" prompt, nothing needs to be provided. When installing on subsequent nodes, at the "Enter FQDN:" prompt, you must enter the end point of the first dnode in the cluster if it is already up. You can also just ignore it and configure it later after installation is finished.
:::
@ -181,14 +181,14 @@ taosKeeper is removed successfully!
:::note
- It's strongly suggested not to use multiple kinds of installation packages on a single host TDengine
- After deb package is installed, if the installation directory is removed manually so that uninstall or reinstall can't succeed, it can be resolved by cleaning up TDengine package information as in the command below and then reinstalling.
- We strongly recommend not to use multiple kinds of installation packages on a single host TDengine.
- After deb package is installed, if the installation directory is removed manually, uninstall or reinstall will not work. This issue can be resolved by using the command below which cleans up TDengine package information. You can then reinstall if needed.
```bash
$ sudo rm -f /var/lib/dpkg/info/tdengine*
```
- After rpm package is installed, if the installation directory is removed manually so that uninstall or reinstall can't succeed, it can be resolved by cleaning up TDengine package information as in the command below and then reinstalling.
- After rpm package is installed, if the installation directory is removed manually, uninstall or reinstall will not work. This issue can be resolved by using the command below which cleans up TDengine package information. You can then reinstall if needed.
```bash
$ sudo rpm -e --noscripts tdengine
@ -219,7 +219,7 @@ lrwxrwxrwx 1 root root 13 Feb 22 09:34 log -> /var/log/taos/
During the installation process:
- Configuration directory, data directory, and log directory are created automatically if they don't exist
- The default configuration file is located at /etc/taos/taos.cfg, which is a copy of /usr/local/taos/cfg/taos.cfg if not existing
- The default configuration file is located at /etc/taos/taos.cfg, which is a copy of /usr/local/taos/cfg/taos.cfg
- The default data directory is /var/lib/taos, which is a soft link to /usr/local/taos/data
- The default log directory is /var/log/taos, which is a soft link to /usr/local/taos/log
- The executables at /usr/local/taos/bin are linked to /usr/bin
@ -228,7 +228,7 @@ During the installation process:
:::note
- When TDengine is uninstalled, the configuration /etc/taos/taos.cfg, data directory /var/lib/taos, log directory /var/log/taos are kept. They can be deleted manually with caution because data can't be recovered
- When TDengine is uninstalled, the configuration /etc/taos/taos.cfg, data directory /var/lib/taos, log directory /var/log/taos are kept. They can be deleted manually with caution, because data can't be recovered. Please follow data integrity, security, backup or relevant SOPs before deleting any data.
- When reinstalling TDengine, if the default configuration file /etc/taos/taos.cfg exists, it will be kept and the configuration file in the installation package will be renamed to taos.cfg.orig and stored at /usr/local/taos/cfg to be used as configuration sample. Otherwise the configuration file in the installation package will be installed to /etc/taos/taos.cfg and used.
## Start and Stop
@ -263,18 +263,19 @@ Active: inactive (dead)
There are two aspects in upgrade operation: upgrade installation package and upgrade a running server.
Upgrading package should follow the steps mentioned previously to first uninstall the old version then install the new version.
To upgrade a package, follow the steps mentioned previously to first uninstall the old version then install the new version.
Upgrading a running server is much more complex. First please check the version number of the old version and the new version. The version number of TDengine consists of 4 sections, only if the first 3 section match can the old version be upgraded to the new version. The steps of upgrading a running server are as below:
Upgrading a running server is much more complex. First please check the version number of the old version and the new version. The version number of TDengine consists of 4 sections, only if the first 3 sections match can the old version be upgraded to the new version. The steps of upgrading a running server are as below:
- Stop inserting data
- Make sure all data are persisted into disk
- Make sure all data is persisted to disk
- Make some simple queries (Such as total rows in stables, tables and so on. Note down the values. Follow best practices and relevant SOPs.)
- Stop the cluster of TDengine
- Uninstall old version and install new version
- Start the cluster of TDengine
- Make some simple queries to make sure no data loss
- Make some simple data insertion to make sure the cluster works well
- Restore business data
- Execute simple queries, such as the ones executed prior to installing the new package, to make sure there is no data loss
- Run some simple data insertion statements to make sure the cluster works well
- Restore business services
:::warning

View File

@ -2,17 +2,17 @@
title: Resource Planning
---
The computing and storage resources need to be planned if using TDengine to build an IoT platform. How to plan the CPU, memory and disk required will be described in this chapter.
It is important to plan computing and storage resources if using TDengine to build an IoT, time-series or Big Data platform. How to plan the CPU, memory and disk resources required, will be described in this chapter.
## Memory Requirement of Server Side
The number of vgroups created for each database is the same as the number of CPU cores by default and can be configured by parameter `maxVgroupsPerDb`, each vnode in a vgroup stores one replica. Each vnode consumes a fixed size of memory, i.e. `blocks` \* `cache`. Besides, some memory is required for tag values associated with each table. A fixed amount of memory is required for each cluster. So, the memory required for each DB can be calculated using the formula below:
By default, the number of vgroups created for each database is the same as the number of CPU cores. This can be configured by the parameter `maxVgroupsPerDb`. Each vnode in a vgroup stores one replica. Each vnode consumes a fixed amount of memory, i.e. `blocks` \* `cache`. In addition, some memory is required for tag values associated with each table. A fixed amount of memory is required for each cluster. So, the memory required for each DB can be calculated using the formula below:
```
Database Memory Size = maxVgroupsPerDb * replica * (blocks * cache + 10MB) + numOfTables * (tagSizePerTable + 0.5KB)
```
For example, assuming the default value of `maxVgroupPerDB` is 64, the default value of `cache` 16M, the default value of `blocks` is 6, there are 100,000 tables in a DB, the replica number is 1, total length of tag values is 256 bytes, the total memory required for this DB is: 64 \* 1 \* (16 \* 6 + 10) + 100000 \* (0.25 + 0.5) / 1000 = 6792M.
For example, assuming the default value of `maxVgroupPerDB` is 64, the default value of `cache` is 16M, the default value of `blocks` is 6, there are 100,000 tables in a DB, the replica number is 1, total length of tag values is 256 bytes, the total memory required for this DB is: 64 \* 1 \* (16 \* 6 + 10) + 100000 \* (0.25 + 0.5) / 1000 = 6792M.
In the real operation of TDengine, we are more concerned about the memory used by each TDengine server process `taosd`.
@ -22,10 +22,10 @@ In the real operation of TDengine, we are more concerned about the memory used b
In the above formula:
1. "vnode_memory" of a `taosd` process is the memory used by all vnodes hosted by this `taosd` process. It can be roughly calculated by firstly adding up the total memory of all DBs whose memory usage can be derived according to the formula mentioned previously then dividing by number of dnodes and multiplying the number of replicas.
1. "vnode_memory" of a `taosd` process is the memory used by all vnodes hosted by this `taosd` process. It can be roughly calculated by firstly adding up the total memory of all DBs whose memory usage can be derived according to the formula for Database Memory Size, mentioned above, then dividing by number of dnodes and multiplying the number of replicas.
```
vnode_memory = sum(Database memory) / number_of_dnodes * replica
vnode_memory = (sum(Database Memory Size) / number_of_dnodes) * replica
```
2. "mnode_memory" of a `taosd` process is the memory consumed by a mnode. If there is one (and only one) mnode hosted in a `taosd` process, the memory consumed by "mnode" is "0.2KB \* the total number of tables in the cluster".
@ -56,8 +56,8 @@ So, at least 3GB needs to be reserved for such a client.
The CPU resources required depend on two aspects:
- **Data Insertion** Each dnode of TDengine can process at least 10,000 insertion requests in one second, while each insertion request can have multiple rows. The computing resource consumed between inserting 1 row one time and inserting 10 rows one time is very small. So, the more the rows to insert one time, the higher the efficiency. Inserting in bach also exposes requirements for the client side which needs to cache rows and insert in batch once the cached rows reaches a threshold.
- **Data Query** High efficiency query is provided in TDengine, but it's hard to estimate the CPU resource required because the queries used in different use cases and the frequency of queries vary significantly. It can only be verified with the query statements, query frequency, data size to be queried, etc provided by user.
- **Data Insertion** Each dnode of TDengine can process at least 10,000 insertion requests in one second, while each insertion request can have multiple rows. The difference in computing resource consumed, between inserting 1 row at a time, and inserting 10 rows at a time is very small. So, the more the number of rows that can be inserted one time, the higher the efficiency. Inserting in batch also imposes requirements on the client side which needs to cache rows to insert in batch once the number of cached rows reaches a threshold.
- **Data Query** High efficiency query is provided in TDengine, but it's hard to estimate the CPU resource required because the queries used in different use cases and the frequency of queries vary significantly. It can only be verified with the query statements, query frequency, data size to be queried, and other requirements provided by users.
In short, the CPU resource required for data insertion can be estimated but it's hard to do so for query use cases. In real operation, it's suggested to control CPU usage below 50%. If this threshold is exceeded, it's a reminder for system operator to add more nodes in the cluster to expand resources.
@ -71,12 +71,12 @@ Raw DataSize = numOfTables * rowSizePerTable * rowsPerTable
For example, there are 10,000,000 meters, while each meter collects data every 15 minutes and the data size of each collection is 128 bytes, so the raw data size of one year is: 10000000 \* 128 \* 24 \* 60 / 15 \* 365 = 44.8512(TB). Assuming compression ratio is 5, the actual disk size is: 44.851 / 5 = 8.97024(TB).
Parameter `keep` can be used to set how long the data will be kept on disk. To further reduce storage cost, multiple storage levels can be enabled in TDengine, with the coldest data stored on the cheapest storage device, and this is transparent to application programs.
Parameter `keep` can be used to set how long the data will be kept on disk. To further reduce storage cost, multiple storage levels can be enabled in TDengine, with the coldest data stored on the cheapest storage device. This is completely transparent to application programs.
To increase the performance, multiple disks can be setup for parallel data reading or data inserting. Please note that an expensive disk array is not necessary because replications are used in TDengine to provide high availability.
To increase performance, multiple disks can be setup for parallel data reading or data inserting. Please note that an expensive disk array is not necessary because replications are used in TDengine to provide high availability.
## Number of Hosts
A host can be either physical or virtual. The total memory, total CPU, total disk required can be estimated according to the formulas mentioned previously. Then, according to the system resources that a single host can provide, assuming all hosts have the same resources, the number of hosts can be derived easily.
A host can be either physical or virtual. The total memory, total CPU, total disk required can be estimated according to the formulae mentioned previously. Then, according to the system resources that a single host can provide, assuming all hosts have the same resources, the number of hosts can be derived easily.
**Quick Estimation for CPU, Memory and Disk** Please refer to [Resource Estimate](https://www.taosdata.com/config/config.html).

View File

@ -7,26 +7,26 @@ title: Fault Tolerance & Disaster Recovery
TDengine uses **WAL**, i.e. Write Ahead Log, to achieve fault tolerance and high reliability.
When a data block is received by TDengine, the original data block is first written into WAL. The log in WAL will be deleted only after the data has been written into data files in the database. Data can be recovered from WAL in case the server is stopped abnormally due to any reason and then restarted.
When a data block is received by TDengine, the original data block is first written into WAL. The log in WAL will be deleted only after the data has been written into data files in the database. Data can be recovered from WAL in case the server is stopped abnormally for any reason and then restarted.
There are 2 configuration parameters related to WAL:
- walLevel
- 0wal is disabled;
- 1wal is enabled without fsync;
- 2wal is enabled with fsync.
- fsynconly valid when walLevel is set to 2, it specifies the interval of invoking fsync. If set to 0, it means fsync is invoked immediately once WAL is written.
- 0wal is disabled
- 1wal is enabled without fsync
- 2wal is enabled with fsync
- fsyncThis parameter is only valid when walLevel is set to 2. It specifies the interval, in milliseconds, of invoking fsync. If set to 0, it means fsync is invoked immediately once WAL is written.
To achieve absolutely no data loss, walLevel needs to be set to 2 and fsync needs to be set to 1. The penalty is the performance of data ingestion downgrades. However, if the concurrent threads of data insertion on the client side can reach a big enough number, for example 50, the data ingestion performance would be still good enough, our verification shows that the drop is only 30% compared to fsync is set to 3,000 milliseconds.
To achieve absolutely no data loss, walLevel should be set to 2 and fsync should be set to 1. There is a performance penalty to the data ingestion rate. However, if the concurrent data insertion threads on the client side can reach a big enough number, for example 50, the data ingestion performance will be still good enough. Our verification shows that the drop is only 30% when fsync is set to 3,000 milliseconds.
## Disaster Recovery
TDengine uses replications to provide high availability and disaster recovery capability.
TDengine uses replication to provide high availability and disaster recovery capability.
TDengine cluster is managed by mnode. To make sure the high availability of mnode, multiple replicas can be configured by the system parameter `numOfMnodes`. The data replication between mnode replicas is performed in a synchronous way to guarantee the metadata consistency.
A TDengine cluster is managed by mnode. To ensure the high availability of mnode, multiple replicas can be configured by the system parameter `numOfMnodes`. The data replication between mnode replicas is performed in a synchronous way to guarantee metadata consistency.
The number of replicas for the time series data in TDengine is associated with each database, there can be a lot of databases in a cluster while each database can be configured with a different number of replicas. When creating a database, parameter `replica` is used to configure the number of replications. To achieve high availability, `replica` needs to be higher than 1.
The number of replicas for time series data in TDengine is associated with each database. There can be many databases in a cluster and each database can be configured with a different number of replicas. When creating a database, parameter `replica` is used to configure the number of replications. To achieve high availability, `replica` needs to be higher than 1.
The number of dnodes in a TDengine cluster must NOT be lower than the number of replicas for any database, otherwise it would fail when trying to create a table.
As long as the dnodes of a TDengine cluster are deployed on different physical machines and the replica number is set to bigger than 1, high availability can be achieved without any other assistance. If dnodes of TDengine cluster are deployed in geographically different data centers, disaster recovery can be achieved too.
As long as the dnodes of a TDengine cluster are deployed on different physical machines and the replica number is higher than 1, high availability can be achieved without any other assistance. For disaster recovery, dnodes of a TDengine cluster should be deployed in geographically different data centers.

View File

@ -2,11 +2,13 @@
title: Data Export
---
There are two ways of exporting data from a TDengine cluster, one is SQL statement in TDengine CLI, the other one is `taosdump`.
There are two ways of exporting data from a TDengine cluster:
- Using a SQL statement in TDengine CLI
- Using the `taosdump` tool
## Export Using SQL
If you want to export the data of a table or a STable, please execute below SQL statement in TDengine CLI.
If you want to export the data of a table or a STable, please execute the SQL statement below, in the TDengine CLI.
```sql
select * from <tb_name> >> data.csv;
@ -16,4 +18,4 @@ The data of table or STable specified by `tb_name` will be exported into a file
## Export Using taosdump
With `taosdump`, you can choose to export the data of all databases, a database, a table or a STable, you can also choose export the data within a time range, or even only export the schema definition of a table. For the details of using `taosdump` please refer to [Tool for exporting and importing data: taosdump](/reference/taosdump).
With `taosdump`, you can choose to export the data of all databases, a database, a table or a STable, you can also choose to export the data within a time range, or even only export the schema definition of a table. For the details of using `taosdump` please refer to [Tool for exporting and importing data: taosdump](/reference/taosdump).

View File

@ -3,7 +3,7 @@ sidebar_label: Connections & Tasks
title: Manage Connections and Query Tasks
---
A system operator can use TDengine CLI to show the connections, ongoing queries, stream computing, and can close connection or stop ongoing query task or stream computing.
A system operator can use the TDengine CLI to show connections, ongoing queries, stream computing, and can close connections or stop ongoing query tasks or stream computing.
## Show Connections
@ -13,7 +13,7 @@ SHOW CONNECTIONS;
One column of the output of the above SQL command is "ip:port", which is the end point of the client.
## Close Connections Forcedly
## Force Close Connections
```sql
KILL CONNECTION <connection-id>;
@ -27,9 +27,9 @@ In the above SQL command, `connection-id` is from the first column of the output
SHOW QUERIES;
```
The first column of the output is query ID, which is composed of the corresponding connection ID and the sequence number of the current query task started on this connection, in format of "connection-id:query-no".
The first column of the output is query ID, which is composed of the corresponding connection ID and the sequence number of the current query task started on this connection. The format is "connection-id:query-no".
## Close Queries Forcedly
## Force Close Queries
```sql
KILL QUERY <query-id>;
@ -43,9 +43,9 @@ In the above SQL command, `query-id` is from the first column of the output of `
SHOW STREAMS;
```
The first column of the output is stream ID, which is composed of the connection ID and the sequence number of the current stream started on this connection, in the format of "connection-id:stream-no".
The first column of the output is stream ID, which is composed of the connection ID and the sequence number of the current stream started on this connection. The format is "connection-id:stream-no".
## Close Continuous Query Forcedly
## Force Close Continuous Query
```sql
KILL STREAM <stream-id>;

View File

@ -2,13 +2,13 @@
title: TDengine Monitoring
---
After TDengine is started, a database named `log` for monitoring is created automatically. The information about CPU, memory, disk, bandwidth, number of requests, disk I/O speed, slow query is written into `log` database on the basis of a predefined interval. Additionally, some important system operations, like logon, create user, drop database, and alerts and warnings generated in TDengine are written into the `log` database too. A system operator can view the data in `log` database from TDengine CLI or from a web console.
After TDengine is started, a database named `log` is created automatically to help with monitoring. Information that includes CPU, memory and disk usage, bandwidth, number of requests, disk I/O speed, slow queries, is written into the `log` database at a predefined interval. Additionally, some important system operations, like logon, create user, drop database, and alerts and warnings generated in TDengine are written into the `log` database too. A system operator can view the data in `log` database from TDengine CLI or from a web console.
The collection of the monitoring information is enabled by default, but can be disabled by parameter `monitor` in the configuration file.
## TDinsight
TDinsight is a complete solution which uses the monitor database `log` mentioned previously and Grafana to monitor a TDengine cluster.
TDinsight is a complete solution which uses the monitoring database `log` mentioned previously, and Grafana, to monitor a TDengine cluster.
From version 2.3.3.0, more monitoring data has been added in the `log` database. Please refer to [TDinsight Grafana Dashboard](https://grafana.com/grafana/dashboards/15167) to learn more details about using TDinsight to monitor TDengine.

View File

@ -4,13 +4,13 @@ title: Problem Diagnostics
## Network Connection Diagnostics
When the client is unable to access the server, the network connection between the client side and the server side needs to be checked to find out the root cause and resolve problems.
When a TDengine client is unable to access a TDengine server, the network connection between the client side and the server side must be checked to find the root cause and resolve problems.
The diagnostic for network connection can be executed between Linux and Linux or between Linux and Windows.
Diagnostics for network connections can be executed between Linux and Linux or between Linux and Windows.
Diagnostic steps
1. If the port range to be diagnosed are being occupied by a `taosd` server process, please first stop `taosd.
1. If the port range to be diagnosed is being occupied by a `taosd` server process, please first stop `taosd.
2. On the server side, execute command `taos -n server -P <port> -l <pktlen>` to monitor the port range starting from the port specified by `-P` parameter with the role of "server".
3. On the client side, execute command `taos -n client -h <fqdn of server> -P <port> -l <pktlen>` to send a testing package to the specified server and port.
@ -65,13 +65,13 @@ Output of the client side for the example is below:
12/21 14:50:22.721274 0x7fc95d859200 UTL successed to test UDP port:6011
```
The output needs to be checked carefully for the system operator to find out the root cause and solve the problem.
The output needs to be checked carefully for the system operator to find the root cause and resolve the problem.
## Startup Status and RPC Diagnostic
`taos -n startup -h <fqdn of server>` can be used to check the startup status of a `taosd` process. This is a comman task for a system operator to do to determine whether `taosd` has been started successfully, especially in case of cluster.
`taos -n startup -h <fqdn of server>` can be used to check the startup status of a `taosd` process. This is a common task which should be performed by a system operator, especially in the case of a cluster, to determine whether `taosd` has been started successfully.
`taos -n rpc -h <fqdn of server>` can be used to check whether the port of a started `taosd` can be accessed or not. If `taosd` process doesn't respond or is working abnormally, this command can be used to initiate a rpc communication with the specified fqdn to determine whether it's a network problem or `taosd` is abnormal.
`taos -n rpc -h <fqdn of server>` can be used to check whether the port of a started `taosd` can be accessed or not. If `taosd` process doesn't respond or is working abnormally, this command can be used to initiate a rpc communication with the specified fqdn to determine whether it's a network problem or whether `taosd` is abnormal.
## Sync and Arbitrator Diagnostic
@ -80,13 +80,13 @@ taos -n sync -P 6040 -h <fqdn of server>
taos -n sync -P 6042 -h <fqdn of server>
```
The above commands can be executed on Linux Shell to check whether the port for sync is working well and whether the sync module on the server side is working well. Additionally, `-P 6042` is used to check whether the arbitrator is configured properly and is working well.
The above commands can be executed in a Linux shell to check whether the port for sync is working well and whether the sync module on the server side is working well. Additionally, `-P 6042` is used to check whether the arbitrator is configured properly and is working well.
## Network Speed Diagnostic
`taos -n speed -h <fqdn of server> -P 6030 -N 10 -l 10000000 -S TCP`
From version 2.2.0.0, the above command can be executed on Linux Shell to test the network speed, it sends uncompressed package to a running `taosd` server process or a simulated server process started by `taos -n server` to test the network speed. Parameters can be used when testing network speed are as below:
From version 2.2.0.0 onwards, the above command can be executed in a Linux shell to test network speed. The command sends uncompressed packages to a running `taosd` server process or a simulated server process started by `taos -n server` to test the network speed. Parameters can be used when testing network speed are as below:
-nWhen set to "speed", it means testing network speed.
-hThe FQDN or IP of the server process to be connected to; if not set, the FQDN configured in `taos.cfg` is used.
@ -99,23 +99,23 @@ From version 2.2.0.0, the above command can be executed on Linux Shell to test t
`taos -n fqdn -h <fqdn of server>`
From version 2.2.0.0, the above command can be executed on Linux Shell to test the resolution speed of FQDN. It can be used to try to resolve a FQDN to an IP address and record the time spent in this process. The parameters that can be used for this purpose are as below:
From version 2.2.0.0 onward, the above command can be executed in a Linux shell to test the resolution speed of FQDN. It can be used to try to resolve a FQDN to an IP address and record the time spent in this process. The parameters that can be used for this purpose are as below:
-nWhen set to "fqdn", it means testing the speed of resolving FQDN.
-hThe FQDN to be resolved. If not set, the `FQDN` parameter in `taos.cfg` is used by default.
## Server Log
The parameter `debugFlag` is used to control the log level of the `taosd` server process. The default value is 131, for debug purpose it needs to be escalated to 135 or 143.
The parameter `debugFlag` is used to control the log level of the `taosd` server process. The default value is 131. For debugging and tracing, it needs to be set to either 135 or 143 respectively.
Once this parameter is set to 135 or 143, the log file grows very quickly especially when there is a huge volume of data insertion and data query requests. If all the logs are stored together, some important information may be missed very easily, so on server side important information is stored at different place from other logs.
Once this parameter is set to 135 or 143, the log file grows very quickly especially when there is a huge volume of data insertion and data query requests. If all the logs are stored together, some important information may be missed very easily and so on the server side, important information is stored in a different place from other logs.
- The log at level of INFO, WARNING and ERROR is stored in `taosinfo` so that it is easy to find important information
- The log at level of DEBUG (135) and TRACE (143) and other information not handled by `taosinfo` are stored in `taosdlog`
## Client Log
An independent log file, named as "taoslog+<seq num\>" is generated for each client program, i.e. a client process. The default value of `debugFlag` is also 131 and only logs at level of INFO/ERROR/WARNING are recorded, for debugging purposes it needs to be changed to 135 or 143 so that logs at DEBUG or TRACE level can be recorded.
An independent log file, named as "taoslog+<seq num\>" is generated for each client program, i.e. a client process. The default value of `debugFlag` is also 131 and only logs at level of INFO/ERROR/WARNING are recorded. As stated above, for debugging and tracing, it needs to be changed to 135 or 143 respectively, so that logs at DEBUG or TRACE level can be recorded.
The maximum length of a single log file is controlled by parameter `numOfLogLines` and only 2 log files are kept for each `taosd` server process.

View File

@ -2,7 +2,7 @@
title: Administration
---
This chapter is mainly written for system administrators, covering download, install/uninstall, data import/export, system monitoring, user management, connection management, etc. Capacity planning and system optimization are also covered.
This chapter is mainly written for system administrators. It covers download, install/uninstall, data import/export, system monitoring, user management, connection management, capacity planning and system optimization.
```mdx-code-block
import DocCardList from '@theme/DocCardList';

View File

@ -2,10 +2,10 @@
title: REST API
---
To support the development of various types of platforms, TDengine provides an API that conforms to the REST principle, namely REST API. To minimize the learning cost, different from the other database REST APIs, TDengine directly requests the SQL command contained in the request BODY through HTTP POST to operate the database and only requires a URL.
To support the development of various types of applications and platforms, TDengine provides an API that conforms to REST principles; namely REST API. To minimize the learning cost, unlike REST APIs for other database engines, TDengine allows insertion of SQL commands in the BODY of an HTTP POST request, to operate the database.
:::note
One difference from the native connector is that the REST interface is stateless, so the `USE db_name` command has no effect. All references to table names and super table names need to specify the database name prefix. (Since version 2.2.0.0, it is supported to specify db_name in RESTful URL. If the database name prefix is not specified in the SQL command, the `db_name` specified in the URL will be used. Since version 2.4.0.0, REST service is provided by taosAdapter by default. And it requires that the `db_name` must be specified in the URL.)
One difference from the native connector is that the REST interface is stateless and so the `USE db_name` command has no effect. All references to table names and super table names need to specify the database name in the prefix. (Since version 2.2.0.0, TDengine supports specification of the db_name in RESTful URL. If the database name prefix is not specified in the SQL command, the `db_name` specified in the URL will be used. Since version 2.4.0.0, REST service is provided by taosAdapter by default and it requires that the `db_name` must be specified in the URL.)
:::
## Installation
@ -16,9 +16,9 @@ The REST interface does not rely on any TDengine native library, so the client a
If the TDengine server is already installed, it can be verified as follows:
The following is an Ubuntu environment using the `curl` tool (to confirm that it is installed) to verify that the REST interface is working.
The following example is in an Ubuntu environment and uses the `curl` tool to verify that the REST interface is working. Note that the `curl` tool may need to be installed in your environment.
The following example lists all databases, replacing `h1.taosdata.com` and `6041` (the default port) with the actual running TDengine service FQDN and port number.
The following example lists all databases on the host h1.taosdata.com. To use it in your environment, replace `h1.taosdata.com` and `6041` (the default port) with the actual running TDengine service FQDN and port number.
```html
curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'show databases;' h1.taosdata.com:6041/rest/sql
@ -89,7 +89,7 @@ For example, `http://h1.taos.com:6041/rest/sql/test` is a URL to `h1.taos.com:60
TDengine supports both Basic authentication and custom authentication mechanisms, and subsequent versions will provide a standard secure digital signature mechanism for authentication.
- The custom authentication information is as follows (Let's introduce token later)
- The custom authentication information is as follows. More details about "token" later.
```
Authorization: Taosd <TOKEN>
@ -136,7 +136,7 @@ The return result is in JSON format, as follows:
Description:
- status: tell if the operation result is success or failure.
- status: tells you whethre the operation result is success or failure.
- head: the definition of the table, or just one column "affected_rows" if no result set is returned. (As of version 2.0.17.0, it is recommended not to rely on the head return value to determine the data column type but rather use column_meta. In later versions, the head item may be removed from the return value.)
- column_meta: this item is added to the return value to indicate the data type of each column in the data with version 2.0.17.0 and later versions. Each column is described by three values: column name, column type, and type length. For example, `["current",6,4]` means that the column name is "current", the column type is 6, which is the float type, and the type length is 4, which is the float type with 4 bytes. If the column type is binary or nchar, the type length indicates the maximum length of content stored in the column, not the length of the specific data in this return value. When the column type is nchar, the type length indicates the number of Unicode characters that can be saved, not bytes.
- data: The exact data returned, presented row by row, or just [[affected_rows]] if no result set is returned. The order of the data columns in each row of data is the same as that of the data columns described in column_meta.

View File

@ -179,9 +179,9 @@ namespace TDengineExample
1. "Unable to establish connection", "Unable to resolve FQDN"
Usually, it cause by the FQDN configuration is incorrect, you can refer to [How to understand TDengine's FQDN (Chinese)](https://www.taosdata.com/blog/2021/07/29/2741.html) to solve it. 2.
Usually, it cause by the FQDN configuration is incorrect, you can refer to [How to understand TDengine's FQDN (Chinese)](https://www.taosdata.com/blog/2021/07/29/2741.html) to solve it.
Unhandled exception. System.DllNotFoundException: Unable to load DLL 'taos' or one of its dependencies: The specified module cannot be found.
2. Unhandled exception. System.DllNotFoundException: Unable to load DLL 'taos' or one of its dependencies: The specified module cannot be found.
This is usually because the program did not find the dependent client driver. The solution is to copy `C:\TDengine\driver\taos.dll` to the `C:\Windows\System32\` directory on Windows, and create the following soft link on Linux `ln -s /usr/local/taos/driver/libtaos.so.x.x .x.x /usr/lib/libtaos.so` will work.

View File

@ -15,9 +15,9 @@ import GoOpenTSDBTelnet from "../../07-develop/03-insert-data/_go_opts_telnet.md
import GoOpenTSDBJson from "../../07-develop/03-insert-data/_go_opts_json.mdx"
import GoQuery from "../../07-develop/04-query-data/_go.mdx"
`driver-go` is the official Go language connector for TDengine, which implements the interface to the Go language [database/sql](https://golang.org/pkg/database/sql/) package. Go developers can use it to develop applications that access TDengine cluster data.
`driver-go` is the official Go language connector for TDengine. It implements the [database/sql](https://golang.org/pkg/database/sql/) package, the generic Go language interface to SQL databases. Go developers can use it to develop applications that access TDengine cluster data.
`driver-go` provides two ways to establish connections. One is **native connection**, which connects to TDengine instances natively through the TDengine client driver (taosc), supporting data writing, querying, subscriptions, schemaless writing, and bind interface. The other is the **REST connection**, which connects to TDengine instances via the REST interface provided by taosAdapter. The set of features implemented by the REST connection differs slightly from the native connection.
`driver-go` provides two ways to establish connections. One is **native connection**, which connects to TDengine instances natively through the TDengine client driver (taosc), supporting data writing, querying, subscriptions, schemaless writing, and bind interface. The other is the **REST connection**, which connects to TDengine instances via the REST interface provided by taosAdapter. The set of features implemented by the REST connection differs slightly from those implemented by the native connection.
This article describes how to install `driver-go` and connect to TDengine clusters and perform basic operations such as data query and data writing through `driver-go`.
@ -213,7 +213,7 @@ func main() {
Since the REST interface is stateless, the `use db` syntax will not work. You need to put the db name into the SQL command, e.g. `create table if not exists tb1 (ts timestamp, a int)` to `create table if not exists test.tb1 (ts timestamp, a int)` otherwise it will report the error `[0x217] Database not specified or available`.
You can also put the db name in the DSN by changing `root:taosdata@http(localhost:6041)/` to `root:taosdata@http(localhost:6041)/test`. This method is supported by taosAdapter in TDengine 2.4.0.5. is supported since TDengine 2.4.0.5. Executing the `create database` statement when the specified db does not exist will not report an error while executing other queries or writing against that db will report an error.
You can also put the db name in the DSN by changing `root:taosdata@http(localhost:6041)/` to `root:taosdata@http(localhost:6041)/test`. This method is supported by taosAdapter since TDengine 2.4.0.5. Executing the `create database` statement when the specified db does not exist will not report an error while executing other queries or writing against that db will report an error.
The complete example is as follows.
@ -289,7 +289,7 @@ func main() {
6. `readBufferSize` parameter has no significant effect after being increased
If you increase `readBufferSize` will reduce the number of `syscall` calls when fetching results. If the query result is smaller, modifying this parameter will not improve significantly. If you increase the parameter value too much, the bottleneck will be parsing JSON data. If you need to optimize the query speed, you must adjust the value according to the actual situation to achieve the best query result.
Increasing `readBufferSize` will reduce the number of `syscall` calls when fetching results. If the query result is smaller, modifying this parameter will not improve performance significantly. If you increase the parameter value too much, the bottleneck will be parsing JSON data. If you need to optimize the query speed, you must adjust the value based on the actual situation to achieve the best query performance.
7. `disableCompression` parameter is set to `false` when the query efficiency is reduced

View File

@ -9,19 +9,19 @@ description: TDengine Java based on JDBC API and provide both native and REST co
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
'taos-jdbcdriver' is TDengine's official Java language connector, which allows Java developers to develop applications that access the TDengine database. 'taos-jdbcdriver' implements the interface of the JDBC driver standard and provides two forms of connectors. One is to connect to a TDengine instance natively through the TDengine client driver (taosc), which supports functions including data writing, querying, subscription, schemaless writing, and bind interface. And the other is to connect to a TDengine instance through the REST interface provided by taosAdapter (2.4.0.0 and later). REST connections implement has a slight differences to compare the set of features implemented and native connections.
'taos-jdbcdriver' is TDengine's official Java language connector, which allows Java developers to develop applications that access the TDengine database. 'taos-jdbcdriver' implements the interface of the JDBC driver standard and provides two forms of connectors. One is to connect to a TDengine instance natively through the TDengine client driver (taosc), which supports functions including data writing, querying, subscription, schemaless writing, and bind interface. And the other is to connect to a TDengine instance through the REST interface provided by taosAdapter (2.4.0.0 and later). The implementation of the REST connection and those of the native connections have slight differences in features.
![TDengine Database tdengine-connector](tdengine-jdbc-connector.webp)
The preceding diagram shows two ways for a Java app to access TDengine via connector:
- JDBC native connection: Java applications use TSDBDriver on physical node 1 (pnode1) to call client-driven directly (`libtaos.so` or `taos.dll`) APIs to send writing and query requests to taosd instances located on physical node 2 (pnode2).
- JDBC REST connection: The Java application encapsulates the SQL as a REST request via RestfulDriver, sends it to the REST server of physical node 2 (taosAdapter), requests TDengine server through the REST server, and returns the result.
- JDBC REST connection: The Java application encapsulates the SQL as a REST request via RestfulDriver, sends it to the REST server (taosAdapter) on physical node 2. taosAdapter forwards the request to TDengine server and returns the result.
Using REST connection, which does not rely on TDengine client drivers.It can be cross-platform more convenient and flexible but introduce about 30% lower performance than native connection.
The REST connection, which does not rely on TDengine client drivers, is more convenient and flexible, in addition to being cross-platform. However the performance is about 30% lower than that of the native connection.
:::info
TDengine's JDBC driver implementation is as consistent as possible with the relational database driver. Still, there are differences in the use scenarios and technical characteristics of TDengine and relational object databases, so 'taos-jdbcdriver' also has some differences from traditional JDBC drivers. You need to pay attention to the following points when using:
TDengine's JDBC driver implementation is as consistent as possible with the relational database driver. Still, there are differences in the use scenarios and technical characteristics of TDengine and relational object databases. So 'taos-jdbcdriver' also has some differences from traditional JDBC drivers. It is important to keep the following points in mind:
- TDengine does not currently support delete operations for individual data records.
- Transactional operations are not currently supported.
@ -88,7 +88,7 @@ Add following dependency in the `pom.xml` file of your Maven project:
</TabItem>
<TabItem value="source" label="Build from source code">
You can build Java connector from source code after clone TDengine project:
You can build Java connector from source code after cloning the TDengine project:
```shell
git clone https://github.com/taosdata/TDengine.git
@ -96,7 +96,7 @@ cd TDengine/src/connector/jdbc
mvn clean install -Dmaven.test.skip=true
```
After compilation, a jar package of taos-jdbcdriver-2.0.XX-dist .jar is generated in the target directory, and the compiled jar file is automatically placed in the local Maven repository.
After compilation, a jar package named taos-jdbcdriver-2.0.XX-dist.jar is generated in the target directory, and the compiled jar file is automatically placed in the local Maven repository.
</TabItem>
</Tabs>
@ -186,7 +186,7 @@ Connection conn = DriverManager.getConnection(jdbcUrl);
In the above example, a RestfulDriver with a JDBC REST connection is used to establish a connection to a database named `test` with hostname `taosdemo.com` on port `6041`. The URL specifies the user name as `root` and the password as `taosdata`.
There is no dependency on the client driver when Using a JDBC REST connection. Compared to a JDBC native connection, only the following are required: 1.
There is no dependency on the client driver when Using a JDBC REST connection. Compared to a JDBC native connection, only the following are required:
1. driverClass specified as "com.taosdata.jdbc.rs.RestfulDriver".
2. jdbcUrl starting with "jdbc:TAOS-RS://".
@ -209,7 +209,7 @@ The configuration parameters in the URL are as follows.
INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('California.SanFrancisco') VALUES(now, 24.6);
```
- Starting from taos-jdbcdriver-2.0.36 and TDengine 2.2.0.0, if dbname is specified in the URL, JDBC REST connections will use `/rest/sql/dbname` as the URL for REST requests by default, and there is no need to specify dbname in SQL. For example, if the URL is `jdbc:TAOS-RS://127.0.0.1:6041/test`, then the SQL can be executed: insert into t1 using weather(ts, temperature) tags('California.SanFrancisco') values(now, 24.6);
- Starting from taos-jdbcdriver-2.0.36 and TDengine 2.2.0.0, if dbname is specified in the URL, JDBC REST connections will use `/rest/sql/dbname` as the URL for REST requests by default, and there is no need to specify dbname in SQL. For example, if the URL is `jdbc:TAOS-RS://127.0.0.1:6041/test`, then the SQL can be executed: insert into test using weather(ts, temperature) tags('California.SanFrancisco') values(now, 24.6);
:::
@ -271,7 +271,7 @@ If the configuration parameters are duplicated in the URL, Properties, or client
2. Properties connProps
3. the configuration file taos.cfg of the TDengine client driver when using a native connection
For example, if you specify the password as `taosdata` in the URL and specify the password as `taosdemo` in the Properties simultaneously. In this case, JDBC will use the password in the URL to establish the connection.
For example, if you specify the password as `taosdata` in the URL and specify the password as `taosdemo` in the Properties simultaneously, JDBC will use the password in the URL to establish the connection.
## Usage examples
@ -323,7 +323,7 @@ while(resultSet.next()){
}
```
> The query is consistent with operating a relational database. When using subscripts to get the contents of the returned fields, starting from 1, it is recommended to use the field names to get them.
> The query is consistent with operating a relational database. When using subscripts to get the contents of the returned fields, you have to start from 1. However, we recommend using the field names to get the values of the fields in the result set.
### Handling exceptions
@ -623,7 +623,7 @@ public void setNString(int columnIndex, ArrayList<String> list, int size) throws
### Schemaless Writing
Starting with version 2.2.0.0, TDengine has added the ability to schemaless writing. It is compatible with InfluxDB's Line Protocol, OpenTSDB's telnet line protocol, and OpenTSDB's JSON format protocol. See [schemaless writing](/reference/schemaless/) for details.
Starting with version 2.2.0.0, TDengine has added the ability to perform schemaless writing. It is compatible with InfluxDB's Line Protocol, OpenTSDB's telnet line protocol, and OpenTSDB's JSON format protocol. See [schemaless writing](/reference/schemaless/) for details.
**Note**.
@ -666,16 +666,16 @@ The TDengine Java Connector supports subscription functionality with the followi
#### Create subscriptions
```java
TSDBSubscribe sub = ((TSDBConnection)conn).subscribe("topic", "select * from meters", false);
TSDBSubscribe sub = ((TSDBConnection)conn).subscribe("topicname", "select * from meters", false);
```
The three parameters of the `subscribe()` method have the following meanings.
- topic: the subscribed topic (i.e., name). This parameter is the unique identifier of the subscription
- sql: the query statement of the subscription, this statement can only be `select` statement, only the original data should be queried, and you can query only the data in the positive time order
- topicname: the name of the subscribed topic. This parameter is the unique identifier of the subscription.
- sql: the query statement of the subscription. This statement can only be a `select` statement. Only original data can be queried, and you can query the data only temporal order.
- restart: if the subscription already exists, whether to restart or continue the previous subscription
The above example will use the SQL command `select * from meters` to create a subscription named `topic`. If the subscription exists, it will continue the progress of the previous query instead of consuming all the data from the beginning.
The above example will use the SQL command `select * from meters` to create a subscription named `topicname`. If the subscription exists, it will continue the progress of the previous query instead of consuming all the data from the beginning.
#### Subscribe to consume data

View File

@ -14,7 +14,6 @@ import NodeInfluxLine from "../../07-develop/03-insert-data/_js_line.mdx";
import NodeOpenTSDBTelnet from "../../07-develop/03-insert-data/_js_opts_telnet.mdx";
import NodeOpenTSDBJson from "../../07-develop/03-insert-data/_js_opts_json.mdx";
import NodeQuery from "../../07-develop/04-query-data/_js.mdx";
import NodeAsyncQuery from "../../07-develop/04-query-data/_js_async.mdx";
`td2.0-connector` and `td2.0-rest-connector` are the official Node.js language connectors for TDengine. Node.js developers can develop applications to access TDengine instance data.
@ -189,14 +188,8 @@ let cursor = conn.cursor();
### Query data
#### Synchronous queries
<NodeQuery />
#### asynchronous query
<NodeAsyncQuery />
## More Sample Programs
| Sample Programs | Sample Program Description |

View File

@ -11,18 +11,18 @@ import TabItem from "@theme/TabItem";
`taospy` is the official Python connector for TDengine. `taospy` provides a rich set of APIs that makes it easy for Python applications to access TDengine. `taospy` wraps both the [native interface](/reference/connector/cpp) and [REST interface](/reference/rest-api) of TDengine, which correspond to the `taos` and `taosrest` modules of the `taospy` package, respectively.
In addition to wrapping the native and REST interfaces, `taospy` also provides a set of programming interfaces that conforms to the [Python Data Access Specification (PEP 249)](https://peps.python.org/pep-0249/). It is easy to integrate `taospy` with many third-party tools, such as [SQLAlchemy](https://www.sqlalchemy.org/) and [pandas](https://pandas.pydata.org/).
The connection to the server directly using the native interface provided by the client driver is referred to hereinafter as a "native connection"; the connection to the server using the REST interface provided by taosAdapter is referred to hereinafter as a "REST connection".
The direct connection to the server using the native interface provided by the client driver is referred to hereinafter as a "native connection"; the connection to the server using the REST interface provided by taosAdapter is referred to hereinafter as a "REST connection".
The source code for the Python connector is hosted on [GitHub](https://github.com/taosdata/taos-connector-python).
## Supported Platforms
- The native connection [supported platforms](/reference/connector/#supported-platforms) is the same as the one supported by the TDengine client.
- The [supported platforms](/reference/connector/#supported-platforms) for the native connection are the same as the ones supported by the TDengine client.
- REST connections are supported on all platforms that can run Python.
## Version selection
We recommend using the latest version of `taospy`, regardless what the version of TDengine is.
We recommend using the latest version of `taospy`, regardless of the version of TDengine.
## Supported features
@ -139,7 +139,7 @@ The FQDN above can be the FQDN of any dnode in the cluster, and the PORT is the
</TabItem>
<TabItem value="rest" label="REST connection" groupId="connect">
For REST connections and making sure the cluster is up, make sure the taosAdapter component is up. This can be tested using the following `curl ` command.
For REST connections, make sure the cluster and taosAdapter component, are running. This can be tested using the following `curl ` command.
```
curl -u root:taosdata http://<FQDN>:<PORT>/rest/sql -d "select server_version()"
@ -312,7 +312,7 @@ For a more detailed description of the `sql()` method, please refer to [RestClie
### Exception handling
All database operations will be thrown directly if an exception occurs. The application is responsible for exception handling. For example:
All errors from database operations are thrown directly as exceptions and the error message from the database is passed up the exception stack. The application is responsible for exception handling. For example:
```python
{{#include docs-examples/python/handle_exception.py}}

View File

@ -30,7 +30,7 @@ REST connections are supported on all platforms that can run Rust.
Please refer to [version support list](/reference/connector#version-support).
The Rust Connector is still under rapid development and is not guaranteed to be backward compatible before 1.0. Recommend to use TDengine version 2.4 or higher to avoid known issues.
The Rust Connector is still under rapid development and is not guaranteed to be backward compatible before 1.0. We recommend using TDengine version 2.4 or higher to avoid known issues.
## Installation
@ -206,7 +206,7 @@ let conn: Taos = cfg.connect();
### Connection pooling
In complex applications, recommand to enable connection pool. Connection pool for [libtaos] is implemented using [r2d2].
In complex applications, we recommend enabling connection pools. Connection pool for [libtaos] is implemented using [r2d2].
As follows, a connection pool with default parameters can be generated.
@ -269,7 +269,7 @@ The [Taos] structure is the connection manager in [libtaos] and provides two mai
Note that Rust asynchronous functions and an asynchronous runtime are required.
[Taos] provides partial Rust methodization of SQL to reduce the frequency of `format!` code blocks.
[Taos] provides a few Rust methods that encapsulate SQL to reduce the frequency of `format!` code blocks.
- `.describe(table: &str)`: Executes `DESCRIBE` and returns a Rust data structure.
- `.create_database(database: &str)`: Executes the `CREATE DATABASE` statement.
@ -279,7 +279,7 @@ In addition, this structure is also the entry point for [Parameter Binding](#Par
### Bind Interface
Similar to the C interface, Rust provides the bind interface's wraping. First, create a bind object [Stmt] for a SQL command from the [Taos] object.
Similar to the C interface, Rust provides the bind interface's wrapping. First, create a bind object [Stmt] for a SQL command from the [Taos] object.
```rust
let mut stmt: Stmt = taos.stmt("insert into ? values(? ,?)") ? ;

View File

@ -30,7 +30,7 @@ taosAdapter provides the following features.
### Install taosAdapter
taosAdapter has been part of TDengine server software since TDengine v2.4.0.0. If you use the TDengine server, you don't need additional steps to install taosAdapter. You can download taosAdapter from [TDengine official website](https://tdengine.com/all-downloads/) to download the TDengine server installation package (taosAdapter is included in v2.4.0.0 and later version). If you need to deploy taosAdapter separately on another server other than the TDengine server, you should install the full TDengine on that server to install taosAdapter. If you need to build taosAdapter from source code, you can refer to the [Building taosAdapter]( https://github.com/taosdata/taosadapter/blob/develop/BUILD.md) documentation.
taosAdapter has been part of TDengine server software since TDengine v2.4.0.0. If you use the TDengine server, you don't need additional steps to install taosAdapter. You can download taosAdapter from [TDengine official website](https://tdengine.com/all-downloads/) to download the TDengine server installation package (taosAdapter is included in v2.4.0.0 and later version). If you need to deploy taosAdapter separately on another server other than the TDengine server, you should install the full TDengine server package on that server to install taosAdapter. If you need to build taosAdapter from source code, you can refer to the [Building taosAdapter]( https://github.com/taosdata/taosadapter/blob/develop/BUILD.md) documentation.
### Start/Stop taosAdapter
@ -38,7 +38,7 @@ On Linux systems, the taosAdapter service is managed by `systemd` by default. Yo
### Remove taosAdapter
Use the command `rmtaos` to remove the TDengine server software if you use tar.gz package or use package management command like rpm or apt to remove the TDengine server, including taosAdapter.
Use the command `rmtaos` to remove the TDengine server software if you use tar.gz package. If you installed using a .deb or .rpm package, use the corresponding command, for your package manager, like apt or rpm to remove the TDengine server, including taosAdapter.
### Upgrade taosAdapter
@ -240,7 +240,7 @@ node_export is an exporter of hardware and OS metrics exposed by the \*NIX kerne
## Memory usage optimization methods
taosAdapter will monitor its memory usage during operation and adjust it with two thresholds. Valid values range from -1 to 100 integers in percent of the system's physical memory.
taosAdapter will monitor its memory usage during operation and adjust it with two thresholds. Valid values are integers between 1 to 100, and represent a percentage of the system's physical memory.
- pauseQueryMemoryThreshold
- pauseAllMemoryThreshold
@ -276,7 +276,7 @@ Corresponding configuration parameter
monitor.pauseQueryMemoryThreshold memory threshold for no more queries Environment variable `TAOS_MONITOR_PAUSE_QUERY_MEMORY_THRESHOLD` (default 70)
```
You can adjust it according to the specific application scenario and operation strategy, and it is recommended to use operation monitoring software to monitor system memory status timely. The load balancer can also check the taosAdapter running status through this interface.
You should adjust this parameter based on your specific application scenario and operation strategy. We recommend using monitoring software to monitor system memory status. The load balancer can also check the taosAdapter running status through this interface.
## taosAdapter Monitoring Metrics
@ -325,7 +325,7 @@ You can also adjust the level of the taosAdapter log output by setting the `--lo
## How to migrate from older TDengine versions to taosAdapter
In TDengine server 2.2.x.x or earlier, the TDengine server process (taosd) contains an embedded HTTP service. As mentioned earlier, taosAdapter is a standalone software managed using `systemd` and has its process ID. And there are some configuration parameters and behaviors that are different between the two. See the following table for details.
In TDengine server 2.2.x.x or earlier, the TDengine server process (taosd) contains an embedded HTTP service. As mentioned earlier, taosAdapter is a standalone software managed using `systemd` and has its own process ID. There are some configuration parameters and behaviors that are different between the two. See the following table for details.
| **#** | **embedded httpd** | **taosAdapter** | **comment** |
| ----- | ------------------- | ------------------------------------ | ------------------------------------------------------------------ ------------------------------------------------------------------------ |

View File

@ -1,10 +1,10 @@
---
title: TDengine Command Line (CLI)
sidebar_label: TDengine CLI
title: TDengine Command Line Interface (CLI)
sidebar_label: Command Line Interface
description: Instructions and tips for using the TDengine CLI
---
The TDengine command-line application (hereafter referred to as `TDengine CLI`) is the simplest way for users to manipulate and interact with TDengine instances.
The TDengine command-line interface (hereafter referred to as `TDengine CLI`) is the simplest way for users to manipulate and interact with TDengine instances.
## Installation

View File

@ -77,7 +77,7 @@ If the subtable obtained by the parse line protocol does not exist, Schemaless c
:::tip
All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed
16k bytes. See [TAOS SQL Boundary Limits](/taos-sql/limit) for specific constraints in this area.
48k bytes. See [TAOS SQL Boundary Limits](/taos-sql/limit) for specific constraints in this area.
:::
## Time resolution recognition

View File

@ -25,7 +25,7 @@ The default database name written by taosAdapter is `collectd`. You can also mod
#collectd
collectd uses a plugin mechanism to write the collected monitoring data to different data storage software in various forms. tdengine supports both direct collection plugins and write_tsdb plugins.
#### is configured to receive data from the direct collection plugin
#### Configure the direct collection plugin
Modify the relevant configuration items in the collectd configuration file (default location /etc/collectd/collectd.conf).
@ -62,7 +62,7 @@ LoadPlugin write_tsdb
</Plugin>
```
Where <taosAdapter's host\> fills in the server's domain name or IP address running taosAdapter. <port for collectd write_tsdb plugin\> Fill in the data that taosAdapter uses to receive the collectd write_tsdb plugin (default is 6047).
Where <taosAdapter's host\> is the domain name or IP address of the server running taosAdapter. <port for collectd write_tsdb plugin\> Fill in the data that taosAdapter uses to receive the collectd write_tsdb plugin (default is 6047).
```text
LoadPlugin write_tsdb

View File

@ -2,11 +2,11 @@
title: Reference
---
The reference guide is the detailed introduction to TDengine, various TDengine's connectors in different languages, and the tools that come with it.
The reference guide is a detailed introduction to TDengine including various TDengine connectors in different languages, and the tools that come with TDengine.
```mdx-code-block
import DocCardList from '@theme/DocCardList';
import {useCurrentSidebarCategory} from '@docusaurus/theme-common';
<DocCardList items={useCurrentSidebarCategory().items}/>
```
```

View File

@ -6,7 +6,7 @@ title: collectd writing
import CollectD from "../14-reference/_collectd.mdx"
collectd is a daemon used to collect system performance metric data. collectd provides various storage mechanisms to store different values. It periodically counts system performance statistics number while the system is running and storing information. You can use this information to help identify current system performance bottlenecks and predict future system load.
collectd is a daemon used to collect system performance metric data. collectd provides various storage mechanisms to store different values. It periodically counts system performance statistics while the system is running and storing information. You can use this information to help identify current system performance bottlenecks and predict future system load.
You can write the data collected by collectd to TDengine by simply modifying the configuration of collectd to the domain name (or IP address) and corresponding port of the server running taosAdapter. It can take full advantage of TDengine's efficient storage query performance and clustering capability for time-series data.

View File

@ -20,4 +20,4 @@ func main() {
// use
// var taosDSN = "root:taosdata@tcp(localhost:6030)/dbName"
// if you want to connect to a default database.
// if you want to connect a specified database named "dbName".

View File

@ -18,6 +18,6 @@ func main() {
defer taos.Close()
}
// use
// use
// var taosDSN = "root:taosdata@http(localhost:6041)/dbName"
// if you want to connect to a default database.
// if you want to connect a specified database named "dbName".

View File

@ -22,4 +22,4 @@ public class JNIConnectExample {
// use
// String jdbcUrl = "jdbc:TAOS://localhost:6030/dbName?user=root&password=taosdata";
// if you want to connect to a default database.
// if you want to connect a specified database named "dbName".

View File

@ -81,7 +81,7 @@ int32_t mndGetLoad(SMnode *pMnode, SMnodeLoad *pLoad);
* @param pMsg The request msg.
* @return int32_t 0 for success, -1 for failure.
*/
int32_t mndProcessMsg(SRpcMsg *pMsg);
int32_t mndProcessRpcMsg(SRpcMsg *pMsg);
int32_t mndProcessSyncMsg(SRpcMsg *pMsg);
/**

View File

@ -26,14 +26,17 @@ extern "C" {
typedef struct SQnode SQnode;
typedef struct {
int64_t numOfStartTask;
int64_t numOfStopTask;
int64_t numOfRecvedFetch;
int64_t numOfSentHb;
int64_t numOfSentFetch;
int64_t numOfTaskInQueue;
int64_t numOfProcessedQuery;
int64_t numOfProcessedCQuery;
int64_t numOfProcessedFetch;
int64_t numOfProcessedDrop;
int64_t memSizeInCache;
int64_t dataSizeSend;
int64_t dataSizeRecv;
int64_t numOfQueryInQueue;
int64_t numOfFetchInQueue;
int64_t numOfErrors;
int64_t waitTimeInQueryQUeue;
int64_t waitTimeInFetchQUeue;
} SQnodeLoad;
typedef struct {
@ -71,10 +74,10 @@ int32_t qndGetLoad(SQnode *pQnode, SQnodeLoad *pLoad);
* @param pQnode The qnode object.
* @param pMsg The request message
*/
int32_t qndProcessQueryMsg(SQnode *pQnode, SRpcMsg *pMsg);
int32_t qndProcessQueryMsg(SQnode *pQnode, int64_t ts, SRpcMsg *pMsg);
#ifdef __cplusplus
}
#endif
#endif /*_TD_QNODE_H_*/
#endif /*_TD_QNODE_H_*/

View File

@ -52,23 +52,31 @@ typedef struct SUserAuthInfo {
AUTH_TYPE type;
} SUserAuthInfo;
typedef struct SDbInfo {
int32_t vgVer;
int32_t tbNum;
int64_t dbId;
} SDbInfo;
typedef struct SCatalogReq {
SArray *pTableMeta; // element is SNAME
SArray *pDbVgroup; // element is db full name
SArray *pDbCfg; // element is db full name
SArray *pDbInfo; // element is db full name
SArray *pTableMeta; // element is SNAME
SArray *pTableHash; // element is SNAME
SArray *pUdf; // element is udf name
SArray *pDbCfg; // element is db full name
SArray *pIndex; // element is index name
SArray *pUser; // element is SUserAuthInfo
bool qNodeRequired; // valid qnode
} SCatalogReq;
typedef struct SMetaData {
SArray *pTableMeta; // SArray<STableMeta*>
SArray *pDbVgroup; // SArray<SArray<SVgroupInfo>*>
SArray *pDbCfg; // SArray<SDbCfgInfo>
SArray *pDbInfo; // SArray<SDbInfo>
SArray *pTableMeta; // SArray<STableMeta*>
SArray *pTableHash; // SArray<SVgroupInfo>
SArray *pUdfList; // SArray<SFuncInfo>
SArray *pDbCfg; // SArray<SDbCfgInfo>
SArray *pIndex; // SArray<SIndexInfo>
SArray *pUser; // SArray<bool>
SArray *pQnodeList; // SArray<SQueryNodeAddr>
@ -269,6 +277,8 @@ int32_t catalogChkAuth(SCatalog* pCtg, void *pRpc, const SEpSet* pMgmtEps, const
int32_t catalogUpdateUserAuthInfo(SCatalog* pCtg, SGetUserAuthRsp* pAuth);
int32_t catalogUpdateVgEpSet(SCatalog* pCtg, const char* dbFName, int32_t vgId, SEpSet *epSet);
int32_t ctgdLaunchAsyncCall(SCatalog* pCtg, void *pTrans, const SEpSet* pMgmtEps, uint64_t reqId);

View File

@ -23,6 +23,9 @@ extern "C" {
#include "function.h"
#include "querynodes.h"
#define FUNC_AGGREGATE_UDF_ID 5001
#define FUNC_SCALAR_UDF_ID 5002
typedef enum EFunctionType {
// aggregate function
FUNCTION_TYPE_APERCENTILE = 1,
@ -126,21 +129,12 @@ typedef enum EFunctionType {
struct SqlFunctionCtx;
struct SResultRowEntryInfo;
struct STimeWindow;
struct SCatalog;
typedef struct SFmGetFuncInfoParam {
struct SCatalog* pCtg;
void* pRpc;
const SEpSet* pMgmtEps;
char* pErrBuf;
int32_t errBufLen;
} SFmGetFuncInfoParam;
int32_t fmFuncMgtInit();
void fmFuncMgtDestroy();
int32_t fmGetFuncInfo(SFmGetFuncInfoParam* pParam, SFunctionNode* pFunc);
int32_t fmGetFuncInfo(SFunctionNode* pFunc, char* pMsg, int32_t msgLen);
bool fmIsBuiltinFunc(const char* pFunc);

View File

@ -192,11 +192,16 @@ void indexTermDestroy(SIndexTerm* p);
void indexInit();
/* index filter */
typedef struct SIndexMetaArg {
void* metaHandle;
uint64_t suid;
} SIndexMetaArg;
typedef enum { SFLT_NOT_INDEX, SFLT_COARSE_INDEX, SFLT_ACCURATE_INDEX } SIdxFltStatus;
SIdxFltStatus idxGetFltStatus(SNode* pFilterNode);
int32_t doFilterTag(const SNode* pFilterNode, SArray* result);
int32_t doFilterTag(const SNode* pFilterNode, SIndexMetaArg* metaArg, SArray* result);
/*
* destory index env
*

View File

@ -56,6 +56,9 @@ typedef struct SScanLogicNode {
int8_t intervalUnit;
int8_t slidingUnit;
SNode* pTagCond;
int8_t triggerType;
int64_t watermark;
int16_t tsColId;
} SScanLogicNode;
typedef struct SJoinLogicNode {
@ -216,6 +219,9 @@ typedef struct STableScanPhysiNode {
int64_t sliding;
int8_t intervalUnit;
int8_t slidingUnit;
int8_t triggerType;
int64_t watermark;
int16_t tsColId;
} STableScanPhysiNode;
typedef STableScanPhysiNode STableSeqScanPhysiNode;

View File

@ -55,9 +55,9 @@ int32_t qParseSql(SParseContext* pCxt, SQuery** pQuery);
bool qIsInsertSql(const char* pStr, size_t length);
// for async mode
int32_t qSyntaxParseSql(SParseContext* pCxt, SQuery** pQuery, struct SCatalogReq* pCatalogReq);
int32_t qSemanticAnalysisSql(SParseContext* pCxt, const struct SCatalogReq* pCatalogReq,
const struct SMetaData* pMetaData, SQuery* pQuery);
int32_t qParseSqlSyntax(SParseContext* pCxt, SQuery** pQuery, struct SCatalogReq* pCatalogReq);
int32_t qAnalyseSqlSemantic(SParseContext* pCxt, const struct SCatalogReq* pCatalogReq,
const struct SMetaData* pMetaData, SQuery* pQuery);
void qDestroyQuery(SQuery* pQueryNode);

View File

@ -43,6 +43,12 @@ typedef enum {
TASK_TYPE_TEMP,
} ETaskType;
typedef enum {
TARGET_TYPE_MNODE = 1,
TARGET_TYPE_VNODE,
TARGET_TYPE_OTHER,
} ETargetType;
typedef struct STableComInfo {
uint8_t numOfTags; // the number of tags in schema
uint8_t precision; // the number of precision
@ -126,11 +132,18 @@ typedef struct SDataBuf {
void* handle;
} SDataBuf;
typedef struct STargetInfo {
ETargetType type;
char dbFName[TSDB_DB_FNAME_LEN]; // used to update db's vgroup epset
int32_t vgId;
} STargetInfo;
typedef int32_t (*__async_send_cb_fn_t)(void* param, const SDataBuf* pMsg, int32_t code);
typedef int32_t (*__async_exec_fn_t)(void* param);
typedef struct SMsgSendInfo {
__async_send_cb_fn_t fp; // async callback function
STargetInfo target; // for update epset
void* param;
uint64_t requestId;
uint64_t requestObjRefId;

View File

@ -52,22 +52,24 @@ typedef struct {
int32_t qWorkerInit(int8_t nodeType, int32_t nodeId, SQWorkerCfg *cfg, void **qWorkerMgmt, const SMsgCb *pMsgCb);
int32_t qWorkerProcessQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg);
int32_t qWorkerProcessQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts);
int32_t qWorkerProcessCQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg);
int32_t qWorkerProcessCQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts);
int32_t qWorkerProcessFetchMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg);
int32_t qWorkerProcessFetchMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts);
int32_t qWorkerProcessFetchRsp(void *node, void *qWorkerMgmt, SRpcMsg *pMsg);
int32_t qWorkerProcessFetchRsp(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts);
int32_t qWorkerProcessCancelMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg);
int32_t qWorkerProcessCancelMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts);
int32_t qWorkerProcessDropMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg);
int32_t qWorkerProcessDropMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts);
int32_t qWorkerProcessHbMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg);
int32_t qWorkerProcessHbMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts);
void qWorkerDestroy(void **qWorkerMgmt);
int64_t qWorkerGetWaitTimeInQueue(void *qWorkerMgmt, EQueueType type);
#ifdef __cplusplus
}
#endif

View File

@ -66,12 +66,6 @@ typedef struct SSyncCfg {
SNodeInfo nodeInfo[TSDB_MAX_REPLICA];
} SSyncCfg;
typedef struct SSnapshot {
void* data;
SyncIndex lastApplyIndex;
SyncTerm lastApplyTerm;
} SSnapshot;
typedef struct SFsmCbMeta {
SyncIndex index;
bool isWeak;
@ -93,6 +87,12 @@ typedef struct SReConfigCbMeta {
uint64_t flag;
} SReConfigCbMeta;
typedef struct SSnapshot {
void *data;
SyncIndex lastApplyIndex;
SyncTerm lastApplyTerm;
} SSnapshot;
typedef struct SSyncFSM {
void* data;
@ -101,23 +101,17 @@ typedef struct SSyncFSM {
void (*FpRollBackCb)(struct SSyncFSM* pFsm, const SRpcMsg* pMsg, SFsmCbMeta cbMeta);
void (*FpRestoreFinishCb)(struct SSyncFSM* pFsm);
int32_t (*FpGetSnapshot)(struct SSyncFSM* pFsm, SSnapshot* pSnapshot);
// if (*ppIter == NULL)
// *ppIter = new iter;
// else
// *ppIter.next();
//
// if success, return 0. else return error code
int32_t (*FpSnapshotRead)(struct SSyncFSM* pFsm, const SSnapshot* pSnapshot, void** ppIter, char** ppBuf,
int32_t* len);
// apply data into fsm
int32_t (*FpSnapshotApply)(struct SSyncFSM* pFsm, const SSnapshot* pSnapshot, char* pBuf, int32_t len);
void (*FpReConfigCb)(struct SSyncFSM* pFsm, SSyncCfg newCfg, SReConfigCbMeta cbMeta);
// int32_t (*FpRestoreSnapshot)(struct SSyncFSM* pFsm, const SSnapshot* snapshot);
int32_t (*FpGetSnapshot)(struct SSyncFSM* pFsm, SSnapshot* pSnapshot);
int32_t (*FpSnapshotStartRead)(struct SSyncFSM* pFsm, void** ppReader);
int32_t (*FpSnapshotStopRead)(struct SSyncFSM* pFsm, void* pReader);
int32_t (*FpSnapshotDoRead)(struct SSyncFSM* pFsm, void* pReader, void** ppBuf, int32_t* len);
int32_t (*FpSnapshotStartWrite)(struct SSyncFSM* pFsm, void** ppWriter);
int32_t (*FpSnapshotStopWrite)(struct SSyncFSM* pFsm, void* pWriter, bool isApply);
int32_t (*FpSnapshotDoWrite)(struct SSyncFSM* pFsm, void* pWriter, void* pBuf, int32_t len);
} SSyncFSM;

View File

@ -69,6 +69,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_DUP_KEY TAOS_DEF_ERROR_CODE(0, 0x0027)
#define TSDB_CODE_NEED_RETRY TAOS_DEF_ERROR_CODE(0, 0x0028)
#define TSDB_CODE_OUT_OF_RPC_MEMORY_QUEUE TAOS_DEF_ERROR_CODE(0, 0x0029)
#define TSDB_CODE_INVALID_TIMESTAMP TAOS_DEF_ERROR_CODE(0, 0x0030)
#define TSDB_CODE_REF_NO_MEMORY TAOS_DEF_ERROR_CODE(0, 0x0040)
#define TSDB_CODE_REF_FULL TAOS_DEF_ERROR_CODE(0, 0x0041)
@ -655,7 +656,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_FUNC_FUNTION_PARA_NUM TAOS_DEF_ERROR_CODE(0, 0x2801)
#define TSDB_CODE_FUNC_FUNTION_PARA_TYPE TAOS_DEF_ERROR_CODE(0, 0x2802)
#define TSDB_CODE_FUNC_FUNTION_PARA_VALUE TAOS_DEF_ERROR_CODE(0, 0x2803)
#define TSDB_CODE_FUNC_INVALID_FUNTION TAOS_DEF_ERROR_CODE(0, 0x2804)
#define TSDB_CODE_FUNC_NOT_BUILTIN_FUNTION TAOS_DEF_ERROR_CODE(0, 0x2804)
//udf
#define TSDB_CODE_UDF_STOPPING TAOS_DEF_ERROR_CODE(0, 0x2901)

View File

@ -46,6 +46,7 @@ typedef struct {
void *ahandle;
int32_t workerId;
int32_t threadNum;
int64_t timestamp;
} SQueueInfo;
typedef enum {
@ -80,7 +81,7 @@ int32_t taosAddIntoQset(STaosQset *qset, STaosQueue *queue, void *ahandle);
void taosRemoveFromQset(STaosQset *qset, STaosQueue *queue);
int32_t taosGetQueueNumber(STaosQset *qset);
int32_t taosReadQitemFromQset(STaosQset *qset, void **ppItem, void **ahandle, FItem *itemFp);
int32_t taosReadQitemFromQset(STaosQset *qset, void **ppItem, int64_t *ts, void **ahandle, FItem *itemFp);
int32_t taosReadAllQitemsFromQset(STaosQset *qset, STaosQall *qall, void **ahandle, FItems *itemsFp);
void taosResetQsetThread(STaosQset *qset, void *pItem);

View File

@ -729,23 +729,55 @@ static void destroySendMsgInfo(SMsgSendInfo* pMsgBody) {
taosMemoryFreeClear(pMsgBody);
}
void updateTargetEpSet(SMsgSendInfo* pSendInfo, STscObj* pTscObj, SRpcMsg* pMsg, SEpSet* pEpSet) {
if (NULL == pEpSet) {
return;
}
switch (pSendInfo->target.type) {
case TARGET_TYPE_MNODE:
if (NULL == pTscObj) {
tscError("mnode epset changed but not able to update it, reqObjRefId:%" PRIx64, pSendInfo->requestObjRefId);
return;
}
updateEpSet_s(&pTscObj->pAppInfo->mgmtEp, pEpSet);
break;
case TARGET_TYPE_VNODE: {
if (NULL == pTscObj) {
tscError("vnode epset changed but not able to update it, reqObjRefId:%" PRIx64, pSendInfo->requestObjRefId);
return;
}
SCatalog* pCatalog = NULL;
int32_t code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog);
if (code != TSDB_CODE_SUCCESS) {
tscError("fail to get catalog handle, clusterId:%" PRIx64 ", error %s", pTscObj->pAppInfo->clusterId, tstrerror(code));
return;
}
catalogUpdateVgEpSet(pCatalog, pSendInfo->target.dbFName, pSendInfo->target.vgId, pEpSet);
break;
}
default:
tscDebug("epset changed, not updated, msgType %s", TMSG_INFO(pMsg->msgType));
break;
}
}
void processMsgFromServer(void* parent, SRpcMsg* pMsg, SEpSet* pEpSet) {
SMsgSendInfo* pSendInfo = (SMsgSendInfo*)pMsg->info.ahandle;
assert(pMsg->info.ahandle != NULL);
SRequestObj* pRequest = NULL;
STscObj* pTscObj = NULL;
if (pSendInfo->requestObjRefId != 0) {
SRequestObj* pRequest = (SRequestObj*)taosAcquireRef(clientReqRefPool, pSendInfo->requestObjRefId);
assert(pRequest->self == pSendInfo->requestObjRefId);
pRequest->metric.rsp = taosGetTimestampUs();
//STscObj* pTscObj = pRequest->pTscObj;
//if (pEpSet) {
// if (!isEpsetEqual(&pTscObj->pAppInfo->mgmtEp.epSet, pEpSet)) {
// updateEpSet_s(&pTscObj->pAppInfo->mgmtEp, pEpSet);
// }
//}
pTscObj = pRequest->pTscObj;
/*
* There is not response callback function for submit response.
* The actual inserted number of points is the first number.
@ -762,6 +794,8 @@ void processMsgFromServer(void* parent, SRpcMsg* pMsg, SEpSet* pEpSet) {
taosReleaseRef(clientReqRefPool, pSendInfo->requestObjRefId);
}
updateTargetEpSet(pSendInfo, pTscObj, pMsg, pEpSet);
SDataBuf buf = {.len = pMsg->contLen, .pData = NULL, .handle = pMsg->info.handle};
if (pMsg->contLen > 0) {
@ -1215,6 +1249,8 @@ void resetConnectDB(STscObj* pTscObj) {
int32_t setQueryResultFromRsp(SReqResultInfo* pResultInfo, const SRetrieveTableRsp* pRsp, bool convertUcs4) {
assert(pResultInfo != NULL && pRsp != NULL);
taosMemoryFreeClear(pResultInfo->pRspMsg);
pResultInfo->pRspMsg = (const char*)pRsp;
pResultInfo->pData = (void*)pRsp->data;
pResultInfo->numOfRows = htonl(pRsp->numOfRows);

View File

@ -1159,7 +1159,9 @@ void colInfoDataCleanup(SColumnInfoData* pColumn, uint32_t numOfRows) {
if (IS_VAR_DATA_TYPE(pColumn->info.type)) {
pColumn->varmeta.length = 0;
} else {
memset(pColumn->nullbitmap, 0, BitmapLen(numOfRows));
if (pColumn->nullbitmap != NULL) {
memset(pColumn->nullbitmap, 0, BitmapLen(numOfRows));
}
}
}
@ -1466,7 +1468,7 @@ static char* formatTimestamp(char* buf, int64_t val, int precision) {
}
void blockDebugShowData(const SArray* dataBlocks) {
char pBuf[128];
char pBuf[128] = {0};
int32_t sz = taosArrayGetSize(dataBlocks);
for (int32_t i = 0; i < sz; i++) {
SSDataBlock* pDataBlock = taosArrayGet(dataBlocks, i);
@ -1838,6 +1840,7 @@ const char* blockCompressDecode(SSDataBlock* pBlock, int32_t numOfCols, int32_t
pStart += sizeof(int32_t) * numOfRows;
if (colLen[i] > 0) {
taosMemoryFreeClear(pColInfoData->pData);
pColInfoData->pData = taosMemoryMalloc(colLen[i]);
}
} else {

View File

@ -293,7 +293,7 @@ int32_t taosAddClientLogCfg(SConfig *pCfg) {
if (cfgAddInt32(pCfg, "jniDebugFlag", jniDebugFlag, 0, 255, 1) != 0) return -1;
if (cfgAddInt32(pCfg, "simDebugFlag", 143, 0, 255, 1) != 0) return -1;
if (cfgAddInt32(pCfg, "debugFlag", 0, 0, 255, 1) != 0) return -1;
if (cfgAddInt32(pCfg, "idxDebugFlag", 0, 0, 255, 1) != 0) return -1;
if (cfgAddInt32(pCfg, "idxDebugFlag", idxDebugFlag, 0, 255, 1) != 0) return -1;
return 0;
}

View File

@ -605,6 +605,10 @@ static int32_t tdAppendKvRowToDataCol(STSRow *pRow, STSchema *pSchema, SDataCols
* @param pCols
*/
int32_t tdAppendSTSRowToDataCol(STSRow *pRow, STSchema *pSchema, SDataCols *pCols, bool isMerge) {
#ifdef TD_DEBUG_PRINT_TSDB_LOAD_DCOLS
printf("%s:%d ts: %" PRIi64 " sver:%d maxCols:%" PRIi16 " nCols:%" PRIi16 ", nRows:%d\n", __func__, __LINE__,
TD_ROW_KEY(pRow), TD_ROW_SVER(pRow), pCols->maxCols, pCols->numOfCols, pCols->numOfRows);
#endif
if (TD_IS_TP_ROW(pRow)) {
return tdAppendTpRowToDataCol(pRow, pSchema, pCols, isMerge);
} else if (TD_IS_KV_ROW(pRow)) {

View File

@ -521,10 +521,10 @@ int32_t convertStringToTimestamp(int16_t type, char *inputData, int64_t timePrec
if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_VARBINARY) {
newColData = taosMemoryCalloc(1, charLen + 1);
memcpy(newColData, varDataVal(inputData), charLen);
bool ret = taosParseTime(newColData, timeVal, charLen, (int32_t)timePrec, tsDaylight);
int32_t ret = taosParseTime(newColData, timeVal, charLen, (int32_t)timePrec, tsDaylight);
if (ret != TSDB_CODE_SUCCESS) {
taosMemoryFree(newColData);
return ret;
return TSDB_CODE_INVALID_TIMESTAMP;
}
taosMemoryFree(newColData);
} else if (type == TSDB_DATA_TYPE_NCHAR) {
@ -783,7 +783,7 @@ int64_t taosTimeTruncate(int64_t t, const SInterval* pInterval, int32_t precisio
// 2020-07-03 17:48:42
// and the parameter can also be a variable.
const char* fmtts(int64_t ts) {
static char buf[96];
static char buf[96] = {0};
size_t pos = 0;
struct tm tm;

View File

@ -40,7 +40,7 @@ static void mmProcessQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
break;
default:
pMsg->info.node = pMgmt->pMnode;
code = mndProcessMsg(pMsg);
code = mndProcessRpcMsg(pMsg);
}
if (IsReq(pMsg) && pMsg->info.handle != NULL && code != TSDB_CODE_ACTION_IN_PROGRESS) {

View File

@ -16,7 +16,11 @@
#define _DEFAULT_SOURCE
#include "qmInt.h"
void qmGetMonitorInfo(SQnodeMgmt *pMgmt, SMonQmInfo *qmInfo) {}
void qmGetMonitorInfo(SQnodeMgmt *pMgmt, SMonQmInfo *qmInfo) {
SQnodeLoad qload = {0};
qndGetLoad(pMgmt->pQnode, &qload);
}
int32_t qmProcessGetMonitorInfoReq(SQnodeMgmt *pMgmt, SRpcMsg *pMsg) {
SMonQmInfo qmInfo = {0};

View File

@ -36,7 +36,7 @@ static void qmProcessQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
code = qmProcessGetMonitorInfoReq(pMgmt, pMsg);
break;
default:
code = qndProcessQueryMsg(pMgmt->pQnode, pMsg);
code = qndProcessQueryMsg(pMgmt->pQnode, pInfo->timestamp, pMsg);
break;
}

View File

@ -62,7 +62,7 @@ static void dmProcessRpcMsg(SDnode *pDnode, SRpcMsg *pRpc, SEpSet *pEpSet) {
dmProcessNetTestReq(pDnode, pRpc);
return;
} else if (pRpc->msgType == TDMT_MND_SYSTABLE_RETRIEVE_RSP || pRpc->msgType == TDMT_VND_FETCH_RSP) {
qWorkerProcessFetchRsp(NULL, NULL, pRpc);
qWorkerProcessFetchRsp(NULL, NULL, pRpc, 0);
return;
} else if (pRpc->msgType == TDMT_MND_STATUS_RSP && pEpSet != NULL) {
dmSetMnodeEpSet(&pDnode->data, pEpSet);

View File

@ -53,6 +53,11 @@ typedef enum {
MND_AUTH_MAX
} EAuthOp;
typedef enum {
TRN_STEP_LOG = 1,
TRN_STEP_ACTION = 2,
} ETrnStep;
typedef enum {
TRN_STAGE_PREPARE = 0,
TRN_STAGE_REDO_LOG = 1,

View File

@ -75,13 +75,12 @@ typedef struct {
} STelemMgmt;
typedef struct {
SWal *pWal;
sem_t syncSem;
int64_t sync;
bool standby;
bool restored;
int32_t errCode;
int32_t transId;
SWal *pWal;
sem_t syncSem;
int64_t sync;
bool standby;
int32_t errCode;
int32_t transId;
} SSyncMgmt;
typedef struct {
@ -90,34 +89,45 @@ typedef struct {
} SGrantInfo;
typedef struct SMnode {
int32_t selfDnodeId;
int64_t clusterId;
TdThread thread;
bool deploy;
bool stopped;
int8_t replica;
int8_t selfIndex;
SReplica replicas[TSDB_MAX_REPLICA];
char *path;
int64_t checkTime;
SSdb *pSdb;
SMgmtWrapper *pWrapper;
SArray *pSteps;
SQHandle *pQuery;
SShowMgmt showMgmt;
SProfileMgmt profileMgmt;
STelemMgmt telemMgmt;
SSyncMgmt syncMgmt;
SHashObj *infosMeta;
SHashObj *perfsMeta;
SGrantInfo grant;
MndMsgFp msgFp[TDMT_MAX];
SMsgCb msgCb;
int32_t selfDnodeId;
int64_t clusterId;
TdThread thread;
TdThreadRwlock lock;
int32_t rpcRef;
int32_t syncRef;
bool stopped;
bool restored;
bool deploy;
int8_t replica;
int8_t selfIndex;
SReplica replicas[TSDB_MAX_REPLICA];
char *path;
int64_t checkTime;
SSdb *pSdb;
SArray *pSteps;
SQHandle *pQuery;
SHashObj *infosMeta;
SHashObj *perfsMeta;
SShowMgmt showMgmt;
SProfileMgmt profileMgmt;
STelemMgmt telemMgmt;
SSyncMgmt syncMgmt;
SGrantInfo grant;
MndMsgFp msgFp[TDMT_MAX];
SMsgCb msgCb;
} SMnode;
void mndSetMsgHandle(SMnode *pMnode, tmsg_t msgType, MndMsgFp fp);
int64_t mndGenerateUid(char *name, int32_t len);
int32_t mndAcquireRpcRef(SMnode *pMnode);
void mndReleaseRpcRef(SMnode *pMnode);
void mndSetRestore(SMnode *pMnode, bool restored);
void mndSetStop(SMnode *pMnode);
bool mndGetStop(SMnode *pMnode);
int32_t mndAcquireSyncRef(SMnode *pMnode);
void mndReleaseSyncRef(SMnode *pMnode);
#ifdef __cplusplus
}
#endif

View File

@ -22,6 +22,13 @@
extern "C" {
#endif
typedef enum {
TRANS_START_FUNC_TEST = 1,
TRANS_STOP_FUNC_TEST = 2,
TRANS_START_FUNC_MQ_REB = 3,
TRANS_STOP_FUNC_TEST_MQ_REB = 4,
} ETrnFunc;
typedef struct {
SEpSet epSet;
tmsg_t msgType;
@ -33,12 +40,17 @@ typedef struct {
void *pCont;
} STransAction;
typedef enum {
TEST_TRANS_START_FUNC = 1,
TEST_TRANS_STOP_FUNC = 2,
MQ_REB_TRANS_START_FUNC = 3,
MQ_REB_TRANS_STOP_FUNC = 4,
} ETrnFuncType;
typedef struct {
SSdbRaw *pRaw;
} STransLog;
typedef struct {
ETrnStep stepType;
STransAction redoAction;
STransAction undoAction;
STransLog redoLog;
STransLog undoLog;
} STransStep;
typedef void (*TransCbFp)(SMnode *pMnode, void *param, int32_t paramLen);
@ -55,7 +67,7 @@ int32_t mndTransAppendCommitlog(STrans *pTrans, SSdbRaw *pRaw);
int32_t mndTransAppendRedoAction(STrans *pTrans, STransAction *pAction);
int32_t mndTransAppendUndoAction(STrans *pTrans, STransAction *pAction);
void mndTransSetRpcRsp(STrans *pTrans, void *pCont, int32_t contLen);
void mndTransSetCb(STrans *pTrans, ETrnFuncType startFunc, ETrnFuncType stopFunc, void *param, int32_t paramLen);
void mndTransSetCb(STrans *pTrans, ETrnFunc startFunc, ETrnFunc stopFunc, void *param, int32_t paramLen);
void mndTransSetDbInfo(STrans *pTrans, SDbObj *pDb);
void mndTransSetExecOneByOne(STrans *pTrans);

View File

@ -85,7 +85,7 @@ static void *mndThreadFp(void *param) {
while (1) {
lastTime++;
taosMsleep(100);
if (pMnode->stopped) break;
if (mndGetStop(pMnode)) break;
if (lastTime % (tsTransPullupInterval * 10) == 0) {
mndPullupTrans(pMnode);
@ -118,7 +118,6 @@ static int32_t mndInitTimer(SMnode *pMnode) {
}
static void mndCleanupTimer(SMnode *pMnode) {
pMnode->stopped = true;
if (taosCheckPthreadValid(pMnode->thread)) {
taosThreadJoin(pMnode->thread, NULL);
taosThreadClear(&pMnode->thread);
@ -335,15 +334,19 @@ void mndClose(SMnode *pMnode) {
int32_t mndStart(SMnode *pMnode) {
mndSyncStart(pMnode);
if (pMnode->deploy) {
if (sdbDeploy(pMnode->pSdb) != 0) return -1;
pMnode->syncMgmt.restored = true;
if (sdbDeploy(pMnode->pSdb) != 0) {
mError("failed to deploy sdb while start mnode");
return -1;
}
mndSetRestore(pMnode, true);
}
return mndInitTimer(pMnode);
}
void mndStop(SMnode *pMnode) {
mndSetStop(pMnode);
mndSyncStop(pMnode);
return mndCleanupTimer(pMnode);
mndCleanupTimer(pMnode);
}
int32_t mndProcessSyncMsg(SRpcMsg *pMsg) {
@ -362,7 +365,12 @@ int32_t mndProcessSyncMsg(SRpcMsg *pMsg) {
return TAOS_SYNC_PROPOSE_OTHER_ERROR;
}
char logBuf[512];
if (mndAcquireSyncRef(pMnode) != 0) {
mError("failed to process sync msg:%p type:%s since %s", pMsg, TMSG_INFO(pMsg->msgType), terrstr());
return TAOS_SYNC_PROPOSE_OTHER_ERROR;
}
char logBuf[512] = {0};
char *syncNodeStr = sync2SimpleStr(pMgmt->sync);
snprintf(logBuf, sizeof(logBuf), "==vnodeProcessSyncReq== msgType:%d, syncNode: %s", pMsg->msgType, syncNodeStr);
syncRpcMsgLog2(logBuf, pMsg);
@ -405,59 +413,45 @@ int32_t mndProcessSyncMsg(SRpcMsg *pMsg) {
code = TAOS_SYNC_PROPOSE_OTHER_ERROR;
}
mndReleaseSyncRef(pMnode);
return code;
}
static int32_t mndCheckMnodeMaster(SRpcMsg *pMsg) {
if (!IsReq(pMsg)) return 0;
if (mndIsMaster(pMsg->info.node)) return 0;
static int32_t mndCheckMnodeState(SRpcMsg *pMsg) {
if (mndAcquireRpcRef(pMsg->info.node) == 0) return 0;
if (pMsg->msgType == TDMT_MND_MQ_TIMER || pMsg->msgType == TDMT_MND_TELEM_TIMER ||
pMsg->msgType == TDMT_MND_TRANS_TIMER) {
return -1;
}
mError("msg:%p, failed to check master since %s, app:%p type:%s", pMsg, terrstr(), pMsg->info.ahandle,
TMSG_INFO(pMsg->msgType));
if (IsReq(pMsg) && pMsg->msgType != TDMT_MND_MQ_TIMER && pMsg->msgType != TDMT_MND_TELEM_TIMER &&
pMsg->msgType != TDMT_MND_TRANS_TIMER) {
mError("msg:%p, failed to check mnode state since %s, app:%p type:%s", pMsg, terrstr(), pMsg->info.ahandle,
TMSG_INFO(pMsg->msgType));
SEpSet epSet = {0};
mndGetMnodeEpSet(pMsg->info.node, &epSet);
SEpSet epSet = {0};
mndGetMnodeEpSet(pMsg->info.node, &epSet);
#if 0
mTrace("msg:%p, is redirected, num:%d use:%d", pMsg, epSet.numOfEps, epSet.inUse);
for (int32_t i = 0; i < epSet.numOfEps; ++i) {
mTrace("mnode index:%d %s:%u", i, epSet.eps[i].fqdn, epSet.eps[i].port);
if (strcmp(epSet.eps[i].fqdn, tsLocalFqdn) == 0 && epSet.eps[i].port == tsServerPort) {
epSet.inUse = (i + 1) % epSet.numOfEps;
int32_t contLen = tSerializeSEpSet(NULL, 0, &epSet);
pMsg->info.rsp = rpcMallocCont(contLen);
if (pMsg->info.rsp != NULL) {
tSerializeSEpSet(pMsg->info.rsp, contLen, &epSet);
pMsg->info.rspLen = contLen;
terrno = TSDB_CODE_RPC_REDIRECT;
} else {
terrno = TSDB_CODE_OUT_OF_MEMORY;
}
}
#endif
int32_t contLen = tSerializeSEpSet(NULL, 0, &epSet);
pMsg->info.rsp = rpcMallocCont(contLen);
if (pMsg->info.rsp != NULL) {
tSerializeSEpSet(pMsg->info.rsp, contLen, &epSet);
pMsg->info.rspLen = contLen;
terrno = TSDB_CODE_RPC_REDIRECT;
} else {
terrno = TSDB_CODE_OUT_OF_MEMORY;
}
return -1;
}
static int32_t mndCheckRequestValid(SRpcMsg *pMsg) {
static int32_t mndCheckMsgContent(SRpcMsg *pMsg) {
if (!IsReq(pMsg)) return 0;
if (pMsg->contLen != 0 && pMsg->pCont != NULL) return 0;
mError("msg:%p, failed to valid request, app:%p type:%s", pMsg, pMsg->info.ahandle, TMSG_INFO(pMsg->msgType));
mError("msg:%p, failed to check msg content, app:%p type:%s", pMsg, pMsg->info.ahandle, TMSG_INFO(pMsg->msgType));
terrno = TSDB_CODE_INVALID_MSG_LEN;
return -1;
}
int32_t mndProcessMsg(SRpcMsg *pMsg) {
if (mndCheckMnodeMaster(pMsg) != 0) return -1;
if (mndCheckRequestValid(pMsg) != 0) return -1;
int32_t mndProcessRpcMsg(SRpcMsg *pMsg) {
SMnode *pMnode = pMsg->info.node;
MndMsgFp fp = pMnode->msgFp[TMSG_INDEX(pMsg->msgType)];
if (fp == NULL) {
@ -466,8 +460,13 @@ int32_t mndProcessMsg(SRpcMsg *pMsg) {
return -1;
}
mTrace("msg:%p, will be processed in mnode, app:%p type:%s", pMsg, pMsg->info.ahandle, TMSG_INFO(pMsg->msgType));
if (mndCheckMsgContent(pMsg) != 0) return -1;
if (mndCheckMnodeState(pMsg) != 0) return -1;
mTrace("msg:%p, start to process in mnode, app:%p type:%s", pMsg, pMsg->info.ahandle, TMSG_INFO(pMsg->msgType));
int32_t code = (*fp)(pMsg);
mndReleaseRpcRef(pMnode);
if (code == TSDB_CODE_ACTION_IN_PROGRESS) {
mTrace("msg:%p, won't response immediately since in progress", pMsg);
} else if (code == 0) {
@ -476,6 +475,7 @@ int32_t mndProcessMsg(SRpcMsg *pMsg) {
mError("msg:%p, failed to process since %s, app:%p type:%s", pMsg, terrstr(), pMsg->info.ahandle,
TMSG_INFO(pMsg->msgType));
}
return code;
}
@ -502,7 +502,7 @@ int64_t mndGenerateUid(char *name, int32_t len) {
int32_t mndGetMonitorInfo(SMnode *pMnode, SMonClusterInfo *pClusterInfo, SMonVgroupInfo *pVgroupInfo,
SMonGrantInfo *pGrantInfo) {
if (!mndIsMaster(pMnode)) return -1;
if (mndAcquireRpcRef(pMnode) != 0) return -1;
SSdb *pSdb = pMnode->pSdb;
int64_t ms = taosGetTimestampMs();
@ -511,6 +511,7 @@ int32_t mndGetMonitorInfo(SMnode *pMnode, SMonClusterInfo *pClusterInfo, SMonVgr
pClusterInfo->mnodes = taosArrayInit(sdbGetSize(pSdb, SDB_MNODE), sizeof(SMonMnodeDesc));
pVgroupInfo->vgroups = taosArrayInit(sdbGetSize(pSdb, SDB_VGROUP), sizeof(SMonVgroupDesc));
if (pClusterInfo->dnodes == NULL || pClusterInfo->mnodes == NULL || pVgroupInfo->vgroups == NULL) {
mndReleaseRpcRef(pMnode);
return -1;
}
@ -605,6 +606,7 @@ int32_t mndGetMonitorInfo(SMnode *pMnode, SMonClusterInfo *pClusterInfo, SMonVgr
pGrantInfo->timeseries_total = INT32_MAX;
}
mndReleaseRpcRef(pMnode);
return 0;
}
@ -612,3 +614,76 @@ int32_t mndGetLoad(SMnode *pMnode, SMnodeLoad *pLoad) {
pLoad->syncState = syncGetMyRole(pMnode->syncMgmt.sync);
return 0;
}
int32_t mndAcquireRpcRef(SMnode *pMnode) {
int32_t code = 0;
taosThreadRwlockRdlock(&pMnode->lock);
if (pMnode->stopped) {
terrno = TSDB_CODE_APP_NOT_READY;
code = -1;
} else if (!mndIsMaster(pMnode)) {
code = -1;
} else {
int32_t ref = atomic_add_fetch_32(&pMnode->rpcRef, 1);
mTrace("mnode rpc is acquired, ref:%d", ref);
}
taosThreadRwlockUnlock(&pMnode->lock);
return code;
}
void mndReleaseRpcRef(SMnode *pMnode) {
taosThreadRwlockRdlock(&pMnode->lock);
int32_t ref = atomic_sub_fetch_32(&pMnode->rpcRef, 1);
mTrace("mnode rpc is released, ref:%d", ref);
taosThreadRwlockUnlock(&pMnode->lock);
}
void mndSetRestore(SMnode *pMnode, bool restored) {
if (restored) {
taosThreadRwlockWrlock(&pMnode->lock);
pMnode->restored = true;
taosThreadRwlockUnlock(&pMnode->lock);
mTrace("mnode set restored:%d", restored);
} else {
taosThreadRwlockWrlock(&pMnode->lock);
pMnode->restored = false;
taosThreadRwlockUnlock(&pMnode->lock);
mTrace("mnode set restored:%d", restored);
while (1) {
if (pMnode->rpcRef <= 0) break;
taosMsleep(3);
}
}
}
bool mndGetRestored(SMnode *pMnode) { return pMnode->restored; }
void mndSetStop(SMnode *pMnode) {
taosThreadRwlockWrlock(&pMnode->lock);
pMnode->stopped = true;
taosThreadRwlockUnlock(&pMnode->lock);
mTrace("mnode set stopped");
}
bool mndGetStop(SMnode *pMnode) { return pMnode->stopped; }
int32_t mndAcquireSyncRef(SMnode *pMnode) {
int32_t code = 0;
taosThreadRwlockRdlock(&pMnode->lock);
if (pMnode->stopped) {
terrno = TSDB_CODE_APP_NOT_READY;
code = -1;
} else {
int32_t ref = atomic_add_fetch_32(&pMnode->syncRef, 1);
mTrace("mnode sync is acquired, ref:%d", ref);
}
taosThreadRwlockUnlock(&pMnode->lock);
return code;
}
void mndReleaseSyncRef(SMnode *pMnode) {
taosThreadRwlockRdlock(&pMnode->lock);
int32_t ref = atomic_sub_fetch_32(&pMnode->syncRef, 1);
mTrace("mnode sync is released, ref:%d", ref);
taosThreadRwlockUnlock(&pMnode->lock);
}

View File

@ -26,19 +26,19 @@ int32_t mndProcessQueryMsg(SRpcMsg *pMsg) {
mTrace("msg:%p, in query queue is processing", pMsg);
switch (pMsg->msgType) {
case TDMT_VND_QUERY:
code = qWorkerProcessQueryMsg(&handle, pMnode->pQuery, pMsg);
code = qWorkerProcessQueryMsg(&handle, pMnode->pQuery, pMsg, 0);
break;
case TDMT_VND_QUERY_CONTINUE:
code = qWorkerProcessCQueryMsg(&handle, pMnode->pQuery, pMsg);
code = qWorkerProcessCQueryMsg(&handle, pMnode->pQuery, pMsg, 0);
break;
case TDMT_VND_FETCH:
code = qWorkerProcessFetchMsg(pMnode, pMnode->pQuery, pMsg);
code = qWorkerProcessFetchMsg(pMnode, pMnode->pQuery, pMsg, 0);
break;
case TDMT_VND_DROP_TASK:
code = qWorkerProcessDropMsg(pMnode, pMnode->pQuery, pMsg);
code = qWorkerProcessDropMsg(pMnode, pMnode->pQuery, pMsg, 0);
break;
case TDMT_VND_QUERY_HEARTBEAT:
code = qWorkerProcessHbMsg(pMnode, pMnode->pQuery, pMsg);
code = qWorkerProcessHbMsg(pMnode, pMnode->pQuery, pMsg, 0);
break;
default:
terrno = TSDB_CODE_VND_APP_ERROR;

View File

@ -493,7 +493,7 @@ static int32_t mndPersistRebResult(SMnode *pMnode, SRpcMsg *pMsg, const SMqRebOu
// 4. TODO commit log: modification log
// 5. set cb
mndTransSetCb(pTrans, MQ_REB_TRANS_START_FUNC, MQ_REB_TRANS_STOP_FUNC, NULL, 0);
mndTransSetCb(pTrans, TRANS_START_FUNC_MQ_REB, TRANS_STOP_FUNC_TEST_MQ_REB, NULL, 0);
// 6. execution
if (mndTransPrepare(pMnode, pTrans) != 0) goto REB_FAIL;
@ -940,7 +940,7 @@ static int32_t mndRetrieveSubscribe(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock
}
// do not show for cleared subscription
#if 0
#if 1
int32_t sz = taosArrayGetSize(pSub->unassignedVgs);
for (int32_t i = 0; i < sz; i++) {
SMqVgEp *pVgEp = taosArrayGetP(pSub->unassignedVgs, i);

View File

@ -48,6 +48,10 @@ void mndSyncCommitMsg(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbM
mError("trans:%d, failed to propose since %s", transId, tstrerror(pMgmt->errCode));
}
tsem_post(&pMgmt->syncSem);
} else {
if (cbMeta.index - sdbGetApplyIndex(pMnode->pSdb) > 100) {
sdbWriteFile(pMnode->pSdb);
}
}
}
@ -63,41 +67,10 @@ void mndRestoreFinish(struct SSyncFSM *pFsm) {
if (!pMnode->deploy) {
mInfo("mnode sync restore finished");
mndTransPullup(pMnode);
pMnode->syncMgmt.restored = true;
}
}
int32_t mndSnapshotRead(struct SSyncFSM *pFsm, const SSnapshot *pSnapshot, void **ppIter, char **ppBuf, int32_t *len) {
SMnode *pMnode = pFsm->data;
mInfo("start to read snapshot from sdb");
int32_t code = sdbReadSnapshot(pMnode->pSdb, (SSdbIter **)ppIter, ppBuf, len);
if (code != 0) {
mError("failed to read snapshot from sdb since %s", terrstr());
mndSetRestore(pMnode, true);
} else {
if (*ppIter == NULL) {
mInfo("successfully to read snapshot from sdb");
}
mInfo("mnode sync restore finished, and will set ready after first deploy");
}
return code;
}
int32_t mndSnapshotApply(struct SSyncFSM *pFsm, const SSnapshot *pSnapshot, char *pBuf, int32_t len) {
SMnode *pMnode = pFsm->data;
pMnode->syncMgmt.restored = false;
mInfo("start to apply snapshot to sdb, len:%d", len);
int32_t code = sdbApplySnapshot(pMnode->pSdb, pBuf, len);
if (code != 0) {
mError("failed to apply snapshot to sdb, len:%d", len);
} else {
mInfo("successfully to apply snapshot to sdb, len:%d", len);
pMnode->syncMgmt.restored = true;
}
// taosMemoryFree(pBuf);
return code;
}
void mndReConfig(struct SSyncFSM *pFsm, SSyncCfg newCfg, SReConfigCbMeta cbMeta) {
@ -116,20 +89,55 @@ void mndReConfig(struct SSyncFSM *pFsm, SSyncCfg newCfg, SReConfigCbMeta cbMeta)
}
}
int32_t mndSnapshotStartRead(struct SSyncFSM *pFsm, void **ppReader) {
mInfo("start to read snapshot from sdb");
SMnode *pMnode = pFsm->data;
return sdbStartRead(pMnode->pSdb, (SSdbIter **)ppReader);
}
int32_t mndSnapshotStopRead(struct SSyncFSM *pFsm, void *pReader) {
mInfo("stop to read snapshot from sdb");
SMnode *pMnode = pFsm->data;
return sdbStopRead(pMnode->pSdb, pReader);
}
int32_t mndSnapshotDoRead(struct SSyncFSM *pFsm, void *pReader, void **ppBuf, int32_t *len) {
SMnode *pMnode = pFsm->data;
return sdbDoRead(pMnode->pSdb, pReader, ppBuf, len);
}
int32_t mndSnapshotStartWrite(struct SSyncFSM *pFsm, void **ppWriter) {
mInfo("start to apply snapshot to sdb");
SMnode *pMnode = pFsm->data;
return sdbStartWrite(pMnode->pSdb, (SSdbIter **)ppWriter);
}
int32_t mndSnapshotStopWrite(struct SSyncFSM *pFsm, void *pWriter, bool isApply) {
mInfo("stop to apply snapshot to sdb, apply:%d", isApply);
SMnode *pMnode = pFsm->data;
return sdbStopWrite(pMnode->pSdb, pWriter, isApply);
}
int32_t mndSnapshotDoWrite(struct SSyncFSM *pFsm, void *pWriter, void *pBuf, int32_t len) {
SMnode *pMnode = pFsm->data;
return sdbDoWrite(pMnode->pSdb, pWriter, pBuf, len);
}
SSyncFSM *mndSyncMakeFsm(SMnode *pMnode) {
SSyncFSM *pFsm = taosMemoryCalloc(1, sizeof(SSyncFSM));
pFsm->data = pMnode;
pFsm->FpCommitCb = mndSyncCommitMsg;
pFsm->FpPreCommitCb = NULL;
pFsm->FpRollBackCb = NULL;
pFsm->FpGetSnapshot = mndSyncGetSnapshot;
pFsm->FpRestoreFinishCb = mndRestoreFinish;
pFsm->FpSnapshotRead = mndSnapshotRead;
pFsm->FpSnapshotApply = mndSnapshotApply;
pFsm->FpReConfigCb = mndReConfig;
pFsm->FpGetSnapshot = mndSyncGetSnapshot;
pFsm->FpSnapshotStartRead = mndSnapshotStartRead;
pFsm->FpSnapshotStopRead = mndSnapshotStopRead;
pFsm->FpSnapshotDoRead = mndSnapshotDoRead;
pFsm->FpSnapshotStartWrite = mndSnapshotStartWrite;
pFsm->FpSnapshotStopWrite = mndSnapshotStopWrite;
pFsm->FpSnapshotDoWrite = mndSnapshotDoWrite;
return pFsm;
}
@ -250,7 +258,7 @@ bool mndIsMaster(SMnode *pMnode) {
return false;
}
if (!pMgmt->restored) {
if (!pMnode->restored) {
terrno = TSDB_CODE_APP_NOT_READY;
return false;
}

View File

@ -464,15 +464,15 @@ static void mndTransTestStopFunc(SMnode *pMnode, void *param, int32_t paramLen)
mInfo("test trans stop, param:%s, len:%d", (char *)param, paramLen);
}
static TransCbFp mndTransGetCbFp(ETrnFuncType ftype) {
static TransCbFp mndTransGetCbFp(ETrnFunc ftype) {
switch (ftype) {
case TEST_TRANS_START_FUNC:
case TRANS_START_FUNC_TEST:
return mndTransTestStartFunc;
case TEST_TRANS_STOP_FUNC:
case TRANS_STOP_FUNC_TEST:
return mndTransTestStopFunc;
case MQ_REB_TRANS_START_FUNC:
case TRANS_START_FUNC_MQ_REB:
return mndRebCntInc;
case MQ_REB_TRANS_STOP_FUNC:
case TRANS_STOP_FUNC_TEST_MQ_REB:
return mndRebCntDec;
default:
return NULL;
@ -657,7 +657,7 @@ void mndTransSetRpcRsp(STrans *pTrans, void *pCont, int32_t contLen) {
pTrans->rpcRspLen = contLen;
}
void mndTransSetCb(STrans *pTrans, ETrnFuncType startFunc, ETrnFuncType stopFunc, void *param, int32_t paramLen) {
void mndTransSetCb(STrans *pTrans, ETrnFunc startFunc, ETrnFunc stopFunc, void *param, int32_t paramLen) {
pTrans->startFunc = startFunc;
pTrans->stopFunc = stopFunc;
pTrans->param = param;

View File

@ -5,7 +5,9 @@ target_link_libraries(
PUBLIC sut
)
add_test(
NAME acctTest
COMMAND acctTest
)
if(NOT TD_WINDOWS)
add_test(
NAME acctTest
COMMAND acctTest
)
endif(NOT TD_WINDOWS)

View File

@ -5,7 +5,9 @@ target_link_libraries(
PUBLIC sut
)
add_test(
NAME funcTest
COMMAND funcTest
)
if(NOT TD_WINDOWS)
add_test(
NAME funcTest
COMMAND funcTest
)
endif(NOT TD_WINDOWS)

View File

@ -5,7 +5,9 @@ target_link_libraries(
PUBLIC sut
)
add_test(
NAME profileTest
COMMAND profileTest
)
if(NOT TD_WINDOWS)
add_test(
NAME profileTest
COMMAND profileTest
)
endif(NOT TD_WINDOWS)

View File

@ -492,7 +492,7 @@ TEST_F(MndTestSdb, 01_Write_Str) {
ASSERT_EQ(sdbGetSize(pSdb, SDB_USER), 2);
ASSERT_EQ(sdbGetMaxId(pSdb, SDB_USER), -1);
ASSERT_EQ(sdbGetTableVer(pSdb, SDB_USER), 2 );
ASSERT_EQ(sdbGetTableVer(pSdb, SDB_USER), 2);
sdbSetApplyIndex(pSdb, -1);
ASSERT_EQ(sdbGetApplyIndex(pSdb), -1);
ASSERT_EQ(mnode.insertTimes, 2);
@ -895,7 +895,35 @@ TEST_F(MndTestSdb, 01_Read_Str) {
ASSERT_EQ(code, TSDB_CODE_SDB_OBJ_CREATING);
}
{
SSdbIter *pReader = NULL;
SSdbIter *pWritter = NULL;
void *pBuf = NULL;
int32_t len = 0;
int32_t code = 0;
code = sdbStartRead(pSdb, &pReader);
ASSERT_EQ(code, 0);
code = sdbStartWrite(pSdb, &pWritter);
ASSERT_EQ(code, 0);
while (sdbDoRead(pSdb, pReader, &pBuf, &len) == 0) {
if (pBuf != NULL && len != 0) {
sdbDoWrite(pSdb, pWritter, pBuf, len);
taosMemoryFree(pBuf);
} else {
break;
}
}
sdbStopRead(pSdb, pReader);
sdbStopWrite(pSdb, pWritter, true);
}
ASSERT_EQ(sdbGetSize(pSdb, SDB_CONSUMER), 1);
ASSERT_EQ(sdbGetTableVer(pSdb, SDB_CONSUMER), 4);
sdbCleanup(pSdb);
ASSERT_EQ(mnode.insertTimes, 5);
ASSERT_EQ(mnode.deleteTimes, 5);
ASSERT_EQ(mnode.insertTimes, 9);
ASSERT_EQ(mnode.deleteTimes, 9);
}

View File

@ -5,7 +5,9 @@ target_link_libraries(
PUBLIC sut
)
add_test(
NAME showTest
COMMAND showTest
)
if(NOT TD_WINDOWS)
add_test(
NAME showTest
COMMAND showTest
)
endif(NOT TD_WINDOWS)

View File

@ -123,7 +123,7 @@ class MndTestTrans2 : public ::testing::Test {
sdbSetRawStatus(pUndoRaw, SDB_STATUS_DROPPED);
char *param = strdup("====> test log <=====");
mndTransSetCb(pTrans, TEST_TRANS_START_FUNC, TEST_TRANS_STOP_FUNC, param, strlen(param) + 1);
mndTransSetCb(pTrans, TRANS_START_FUNC_TEST, TRANS_STOP_FUNC_TEST, param, strlen(param) + 1);
if (pDb != NULL) {
mndTransSetDbInfo(pTrans, pDb);
@ -156,7 +156,7 @@ class MndTestTrans2 : public ::testing::Test {
sdbSetRawStatus(pUndoRaw, SDB_STATUS_DROPPED);
char *param = strdup("====> test action <=====");
mndTransSetCb(pTrans, TEST_TRANS_START_FUNC, TEST_TRANS_STOP_FUNC, param, strlen(param) + 1);
mndTransSetCb(pTrans, TRANS_START_FUNC_TEST, TRANS_STOP_FUNC_TEST, param, strlen(param) + 1);
{
STransAction action = {0};
@ -228,7 +228,7 @@ class MndTestTrans2 : public ::testing::Test {
sdbSetRawStatus(pUndoRaw, SDB_STATUS_DROPPED);
char *param = strdup("====> test log <=====");
mndTransSetCb(pTrans, TEST_TRANS_START_FUNC, TEST_TRANS_STOP_FUNC, param, strlen(param) + 1);
mndTransSetCb(pTrans, TRANS_START_FUNC_TEST, TRANS_STOP_FUNC_TEST, param, strlen(param) + 1);
int32_t code = mndTransPrepare(pMnode, pTrans);
mndTransDrop(pTrans);

View File

@ -166,7 +166,6 @@ typedef struct SSdbRow {
typedef struct SSdb {
SMnode *pMnode;
char *currDir;
char *syncDir;
char *tmpDir;
int64_t lastCommitVer;
int64_t curVer;
@ -182,11 +181,13 @@ typedef struct SSdb {
SdbDeployFp deployFps[SDB_MAX];
SdbEncodeFp encodeFps[SDB_MAX];
SdbDecodeFp decodeFps[SDB_MAX];
TdThreadMutex filelock;
} SSdb;
typedef struct SSdbIter {
TdFilePtr file;
int64_t total;
char *name;
} SSdbIter;
typedef struct {
@ -380,8 +381,13 @@ SSdbRow *sdbAllocRow(int32_t objSize);
void *sdbGetRowObj(SSdbRow *pRow);
void sdbFreeRow(SSdb *pSdb, SSdbRow *pRow, bool callFunc);
int32_t sdbReadSnapshot(SSdb *pSdb, SSdbIter **ppIter, char **ppBuf, int32_t *len);
int32_t sdbApplySnapshot(SSdb *pSdb, char *pBuf, int32_t len);
int32_t sdbStartRead(SSdb *pSdb, SSdbIter **ppIter);
int32_t sdbStopRead(SSdb *pSdb, SSdbIter *pIter);
int32_t sdbDoRead(SSdb *pSdb, SSdbIter *pIter, void **ppBuf, int32_t *len);
int32_t sdbStartWrite(SSdb *pSdb, SSdbIter **ppIter);
int32_t sdbStopWrite(SSdb *pSdb, SSdbIter *pIter, bool isApply);
int32_t sdbDoWrite(SSdb *pSdb, SSdbIter *pIter, void *pBuf, int32_t len);
const char *sdbTableName(ESdbType type);
void sdbPrintOper(SSdb *pSdb, SSdbRow *pRow, const char *oper);

View File

@ -56,6 +56,7 @@ SSdb *sdbInit(SSdbOpt *pOption) {
pSdb->curTerm = -1;
pSdb->lastCommitVer = -1;
pSdb->pMnode = pOption->pMnode;
taosThreadMutexInit(&pSdb->filelock, NULL);
mDebug("sdb init successfully");
return pSdb;
}
@ -69,11 +70,8 @@ void sdbCleanup(SSdb *pSdb) {
taosMemoryFreeClear(pSdb->currDir);
}
if (pSdb->syncDir != NULL) {
taosMemoryFreeClear(pSdb->syncDir);
}
if (pSdb->tmpDir != NULL) {
taosRemoveDir(pSdb->tmpDir);
taosMemoryFreeClear(pSdb->tmpDir);
}
@ -104,6 +102,7 @@ void sdbCleanup(SSdb *pSdb) {
mDebug("sdb table:%s is cleaned up", sdbTableName(i));
}
taosThreadMutexDestroy(&pSdb->filelock);
taosMemoryFree(pSdb);
mDebug("sdb is cleaned up");
}

View File

@ -22,13 +22,14 @@
#define SDB_RESERVE_SIZE 512
#define SDB_FILE_VER 1
static int32_t sdbRunDeployFp(SSdb *pSdb) {
static int32_t sdbDeployData(SSdb *pSdb) {
mDebug("start to deploy sdb");
for (int32_t i = SDB_MAX - 1; i >= 0; --i) {
SdbDeployFp fp = pSdb->deployFps[i];
if (fp == NULL) continue;
mDebug("start to deploy sdb:%s", sdbTableName(i));
if ((*fp)(pSdb->pMnode) != 0) {
mError("failed to deploy sdb:%s since %s", sdbTableName(i), terrstr());
return -1;
@ -39,6 +40,39 @@ static int32_t sdbRunDeployFp(SSdb *pSdb) {
return 0;
}
static void sdbResetData(SSdb *pSdb) {
mDebug("start to reset sdb");
for (ESdbType i = 0; i < SDB_MAX; ++i) {
SHashObj *hash = pSdb->hashObjs[i];
if (hash == NULL) continue;
SSdbRow **ppRow = taosHashIterate(hash, NULL);
while (ppRow != NULL) {
SSdbRow *pRow = *ppRow;
if (pRow == NULL) continue;
sdbFreeRow(pSdb, pRow, true);
ppRow = taosHashIterate(hash, ppRow);
}
}
for (ESdbType i = 0; i < SDB_MAX; ++i) {
SHashObj *hash = pSdb->hashObjs[i];
if (hash == NULL) continue;
taosHashClear(pSdb->hashObjs[i]);
pSdb->tableVer[i] = 0;
pSdb->maxId[i] = 0;
mDebug("sdb:%s is reset", sdbTableName(i));
}
pSdb->curVer = -1;
pSdb->curTerm = -1;
pSdb->lastCommitVer = -1;
mDebug("sdb reset successfully");
}
static int32_t sdbReadFileHead(SSdb *pSdb, TdFilePtr pFile) {
int64_t sver = 0;
int32_t ret = taosReadFile(pFile, &sver, sizeof(int64_t));
@ -169,11 +203,15 @@ static int32_t sdbWriteFileHead(SSdb *pSdb, TdFilePtr pFile) {
return 0;
}
int32_t sdbReadFile(SSdb *pSdb) {
static int32_t sdbReadFileImp(SSdb *pSdb) {
int64_t offset = 0;
int32_t code = 0;
int32_t readLen = 0;
int64_t ret = 0;
char file[PATH_MAX] = {0};
snprintf(file, sizeof(file), "%s%ssdb.data", pSdb->currDir, TD_DIRSEP);
mDebug("start to read file:%s", file);
SSdbRaw *pRaw = taosMemoryMalloc(WAL_MAX_SIZE + 100);
if (pRaw == NULL) {
@ -182,10 +220,6 @@ int32_t sdbReadFile(SSdb *pSdb) {
return -1;
}
char file[PATH_MAX] = {0};
snprintf(file, sizeof(file), "%s%ssdb.data", pSdb->currDir, TD_DIRSEP);
mDebug("start to read file:%s", file);
TdFilePtr pFile = taosOpenFile(file, TD_FILE_READ);
if (pFile == NULL) {
taosMemoryFree(pRaw);
@ -196,8 +230,6 @@ int32_t sdbReadFile(SSdb *pSdb) {
if (sdbReadFileHead(pSdb, pFile) != 0) {
mError("failed to read file:%s head since %s", file, terrstr());
pSdb->curVer = -1;
pSdb->curTerm = -1;
taosMemoryFree(pRaw);
taosCloseFile(&pFile);
return -1;
@ -264,6 +296,20 @@ _OVER:
return code;
}
int32_t sdbReadFile(SSdb *pSdb) {
taosThreadMutexLock(&pSdb->filelock);
sdbResetData(pSdb);
int32_t code = sdbReadFileImp(pSdb);
if (code != 0) {
mError("failed to read sdb since %s", terrstr());
sdbResetData(pSdb);
}
taosThreadMutexUnlock(&pSdb->filelock);
return code;
}
static int32_t sdbWriteFileImp(SSdb *pSdb) {
int32_t code = 0;
@ -378,44 +424,40 @@ int32_t sdbWriteFile(SSdb *pSdb) {
return 0;
}
return sdbWriteFileImp(pSdb);
taosThreadMutexLock(&pSdb->filelock);
int32_t code = sdbWriteFileImp(pSdb);
if (code != 0) {
mError("failed to write sdb since %s", terrstr());
}
taosThreadMutexUnlock(&pSdb->filelock);
return code;
}
int32_t sdbDeploy(SSdb *pSdb) {
if (sdbRunDeployFp(pSdb) != 0) {
if (sdbDeployData(pSdb) != 0) {
return -1;
}
if (sdbWriteFileImp(pSdb) != 0) {
if (sdbWriteFile(pSdb) != 0) {
return -1;
}
return 0;
}
static SSdbIter *sdbOpenIter(SSdb *pSdb) {
char datafile[PATH_MAX] = {0};
char tmpfile[PATH_MAX] = {0};
snprintf(datafile, sizeof(datafile), "%s%ssdb.data", pSdb->currDir, TD_DIRSEP);
snprintf(tmpfile, sizeof(datafile), "%s%ssdb.data", pSdb->tmpDir, TD_DIRSEP);
if (taosCopyFile(datafile, tmpfile) != 0) {
terrno = TAOS_SYSTEM_ERROR(errno);
mError("failed to copy file %s to %s since %s", datafile, tmpfile, terrstr());
return NULL;
}
static SSdbIter *sdbCreateIter(SSdb *pSdb) {
SSdbIter *pIter = taosMemoryCalloc(1, sizeof(SSdbIter));
if (pIter == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return NULL;
}
pIter->file = taosOpenFile(tmpfile, TD_FILE_READ);
if (pIter->file == NULL) {
terrno = TAOS_SYSTEM_ERROR(errno);
mError("failed to read file:%s since %s", tmpfile, terrstr());
char name[PATH_MAX + 100] = {0};
snprintf(name, sizeof(name), "%s%ssdb.data.%" PRIu64, pSdb->tmpDir, TD_DIRSEP, (uint64_t)pIter);
pIter->name = strdup(name);
if (pIter->name == NULL) {
taosMemoryFree(pIter);
terrno = TSDB_CODE_OUT_OF_MEMORY;
return NULL;
}
@ -424,130 +466,145 @@ static SSdbIter *sdbOpenIter(SSdb *pSdb) {
static void sdbCloseIter(SSdbIter *pIter) {
if (pIter == NULL) return;
if (pIter->file != NULL) {
taosCloseFile(&pIter->file);
pIter->file = NULL;
}
if (pIter->name != NULL) {
taosRemoveFile(pIter->name);
taosMemoryFree(pIter->name);
pIter->name = NULL;
}
mInfo("sdbiter:%p, is closed, total:%" PRId64, pIter, pIter->total);
taosMemoryFree(pIter);
mInfo("sdbiter:%p, is closed", pIter);
}
static SSdbIter *sdbGetIter(SSdb *pSdb, SSdbIter **ppIter) {
SSdbIter *pIter = NULL;
if (ppIter != NULL) pIter = *ppIter;
if (pIter == NULL) {
pIter = sdbOpenIter(pSdb);
if (pIter != NULL) {
mInfo("sdbiter:%p, is created to read snapshot", pIter);
*ppIter = pIter;
} else {
mError("failed to create sdbiter to read snapshot since %s", terrstr());
*ppIter = NULL;
return NULL;
}
} else {
mInfo("sdbiter:%p, continue to read snapshot, total:%" PRId64, pIter, pIter->total);
}
return pIter;
}
int32_t sdbReadSnapshot(SSdb *pSdb, SSdbIter **ppIter, char **ppBuf, int32_t *len) {
const int32_t maxlen = 100;
SSdbIter *pIter = sdbGetIter(pSdb, ppIter);
int32_t sdbStartRead(SSdb *pSdb, SSdbIter **ppIter) {
SSdbIter *pIter = sdbCreateIter(pSdb);
if (pIter == NULL) return -1;
char *pBuf = taosMemoryCalloc(1, maxlen);
if (pBuf == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
char datafile[PATH_MAX] = {0};
snprintf(datafile, sizeof(datafile), "%s%ssdb.data", pSdb->currDir, TD_DIRSEP);
taosThreadMutexLock(&pSdb->filelock);
if (taosCopyFile(datafile, pIter->name) < 0) {
taosThreadMutexUnlock(&pSdb->filelock);
terrno = TAOS_SYSTEM_ERROR(errno);
mError("failed to copy file %s to %s since %s", datafile, pIter->name, terrstr());
sdbCloseIter(pIter);
return -1;
}
taosThreadMutexUnlock(&pSdb->filelock);
pIter->file = taosOpenFile(pIter->name, TD_FILE_READ);
if (pIter->file == NULL) {
terrno = TAOS_SYSTEM_ERROR(errno);
mError("failed to open file:%s since %s", pIter->name, terrstr());
sdbCloseIter(pIter);
return -1;
}
*ppIter = pIter;
mInfo("sdbiter:%p, is created to read snapshot, file:%s", pIter, pIter->name);
return 0;
}
int32_t sdbStopRead(SSdb *pSdb, SSdbIter *pIter) {
sdbCloseIter(pIter);
return 0;
}
int32_t sdbDoRead(SSdb *pSdb, SSdbIter *pIter, void **ppBuf, int32_t *len) {
int32_t maxlen = 100;
void *pBuf = taosMemoryCalloc(1, maxlen);
if (pBuf == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}
int32_t readlen = taosReadFile(pIter->file, pBuf, maxlen);
if (readlen < 0 || (readlen == 0 && errno != 0)) {
if (readlen < 0 || readlen > maxlen) {
terrno = TAOS_SYSTEM_ERROR(errno);
mError("sdbiter:%p, failed to read snapshot since %s, total:%" PRId64, pIter, terrstr(), pIter->total);
*ppBuf = NULL;
*len = 0;
*ppIter = NULL;
sdbCloseIter(pIter);
taosMemoryFree(pBuf);
return -1;
} else if (readlen == 0) {
mInfo("sdbiter:%p, read snapshot to the end, total:%" PRId64, pIter, pIter->total);
*ppBuf = NULL;
*len = 0;
*ppIter = NULL;
sdbCloseIter(pIter);
taosMemoryFree(pBuf);
return 0;
} else if ((readlen < maxlen && errno != 0) || readlen == maxlen) {
} else { // (readlen <= maxlen)
pIter->total += readlen;
mInfo("sdbiter:%p, read:%d bytes from snapshot, total:%" PRId64, pIter, readlen, pIter->total);
*ppBuf = pBuf;
*len = readlen;
return 0;
} else if (readlen < maxlen && errno == 0) {
mInfo("sdbiter:%p, read snapshot to the end, total:%" PRId64, pIter, pIter->total);
*ppBuf = pBuf;
*len = readlen;
*ppIter = NULL;
sdbCloseIter(pIter);
return 0;
} else {
// impossible
mError("sdbiter:%p, read:%d bytes from snapshot, total:%" PRId64, pIter, readlen, pIter->total);
*ppBuf = NULL;
*len = 0;
*ppIter = NULL;
sdbCloseIter(pIter);
taosMemoryFree(pBuf);
return -1;
}
}
int32_t sdbApplySnapshot(SSdb *pSdb, char *pBuf, int32_t len) {
char datafile[PATH_MAX] = {0};
char tmpfile[PATH_MAX] = {0};
snprintf(datafile, sizeof(datafile), "%s%ssdb.data", pSdb->currDir, TD_DIRSEP);
snprintf(tmpfile, sizeof(datafile), "%s%ssdb.data", pSdb->tmpDir, TD_DIRSEP);
int32_t sdbStartWrite(SSdb *pSdb, SSdbIter **ppIter) {
SSdbIter *pIter = sdbCreateIter(pSdb);
if (pIter == NULL) return -1;
TdFilePtr pFile = taosOpenFile(tmpfile, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC);
if (pFile == NULL) {
pIter->file = taosOpenFile(pIter->name, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC);
if (pIter->file == NULL) {
terrno = TAOS_SYSTEM_ERROR(errno);
mError("failed to write %s since %s", tmpfile, terrstr());
mError("failed to open %s since %s", pIter->name, terrstr());
return -1;
}
int32_t writelen = taosWriteFile(pFile, pBuf, len);
*ppIter = pIter;
mInfo("sdbiter:%p, is created to write snapshot, file:%s", pIter, pIter->name);
return 0;
}
int32_t sdbStopWrite(SSdb *pSdb, SSdbIter *pIter, bool isApply) {
int32_t code = 0;
if (!isApply) {
sdbCloseIter(pIter);
mInfo("sdbiter:%p, not apply to sdb", pIter);
return 0;
}
taosFsyncFile(pIter->file);
taosCloseFile(&pIter->file);
pIter->file = NULL;
char datafile[PATH_MAX] = {0};
snprintf(datafile, sizeof(datafile), "%s%ssdb.data", pSdb->currDir, TD_DIRSEP);
if (taosRenameFile(pIter->name, datafile) != 0) {
terrno = TAOS_SYSTEM_ERROR(errno);
mError("sdbiter:%p, failed to rename file %s to %s since %s", pIter, pIter->name, datafile, terrstr());
sdbCloseIter(pIter);
return -1;
}
sdbCloseIter(pIter);
if (sdbReadFile(pSdb) != 0) {
mError("sdbiter:%p, failed to read from %s since %s", pIter, datafile, terrstr());
return -1;
}
mInfo("sdbiter:%p, successfully applyed to sdb", pIter);
return 0;
}
int32_t sdbDoWrite(SSdb *pSdb, SSdbIter *pIter, void *pBuf, int32_t len) {
int32_t writelen = taosWriteFile(pIter->file, pBuf, len);
if (writelen != len) {
terrno = TAOS_SYSTEM_ERROR(errno);
mError("failed to write %s since %s", tmpfile, terrstr());
taosCloseFile(&pFile);
return -1;
}
if (taosFsyncFile(pFile) != 0) {
terrno = TAOS_SYSTEM_ERROR(errno);
mError("failed to fsync %s since %s", tmpfile, terrstr());
taosCloseFile(&pFile);
return -1;
}
(void)taosCloseFile(&pFile);
if (taosRenameFile(tmpfile, datafile) != 0) {
terrno = TAOS_SYSTEM_ERROR(errno);
mError("failed to rename file %s to %s since %s", tmpfile, datafile, terrstr());
return -1;
}
if (sdbReadFile(pSdb) != 0) {
mError("failed to read from %s since %s", datafile, terrstr());
mError("failed to write len:%d since %s, total:%" PRId64, len, terrstr(), pIter->total);
return -1;
}
pIter->total += writelen;
mInfo("sdbiter:%p, write:%d bytes to snapshot, total:%" PRId64, pIter, writelen, pIter->total);
return 0;
}

View File

@ -40,37 +40,46 @@ void qndClose(SQnode *pQnode) {
taosMemoryFree(pQnode);
}
int32_t qndGetLoad(SQnode *pQnode, SQnodeLoad *pLoad) { return 0; }
int32_t qndGetLoad(SQnode *pQnode, SQnodeLoad *pLoad) {
SMsgCb* pCb = &pQnode->msgCb;
int32_t qndProcessQueryMsg(SQnode *pQnode, SRpcMsg *pMsg) {
pLoad->numOfQueryInQueue = pCb->qsizeFp(pCb->mgmt, pQnode->qndId, QUERY_QUEUE);
pLoad->numOfFetchInQueue = pCb->qsizeFp(pCb->mgmt, pQnode->qndId, FETCH_QUEUE);
pLoad->waitTimeInQueryQUeue = qWorkerGetWaitTimeInQueue(pQnode->pQuery, QUERY_QUEUE);
pLoad->waitTimeInFetchQUeue = qWorkerGetWaitTimeInQueue(pQnode->pQuery, FETCH_QUEUE);
return 0;
}
int32_t qndProcessQueryMsg(SQnode *pQnode, int64_t ts, SRpcMsg *pMsg) {
int32_t code = -1;
SReadHandle handle = {.pMsgCb = &pQnode->msgCb};
qTrace("message in qnode queue is processing");
switch (pMsg->msgType) {
case TDMT_VND_QUERY:
code = qWorkerProcessQueryMsg(&handle, pQnode->pQuery, pMsg);
code = qWorkerProcessQueryMsg(&handle, pQnode->pQuery, pMsg, ts);
break;
case TDMT_VND_QUERY_CONTINUE:
code = qWorkerProcessCQueryMsg(&handle, pQnode->pQuery, pMsg);
code = qWorkerProcessCQueryMsg(&handle, pQnode->pQuery, pMsg, ts);
break;
case TDMT_VND_FETCH:
code = qWorkerProcessFetchMsg(pQnode, pQnode->pQuery, pMsg);
code = qWorkerProcessFetchMsg(pQnode, pQnode->pQuery, pMsg, ts);
break;
case TDMT_VND_FETCH_RSP:
code = qWorkerProcessFetchRsp(pQnode, pQnode->pQuery, pMsg);
code = qWorkerProcessFetchRsp(pQnode, pQnode->pQuery, pMsg, ts);
break;
case TDMT_VND_CANCEL_TASK:
code = qWorkerProcessCancelMsg(pQnode, pQnode->pQuery, pMsg);
code = qWorkerProcessCancelMsg(pQnode, pQnode->pQuery, pMsg, ts);
break;
case TDMT_VND_DROP_TASK:
code = qWorkerProcessDropMsg(pQnode, pQnode->pQuery, pMsg);
code = qWorkerProcessDropMsg(pQnode, pQnode->pQuery, pMsg, ts);
break;
case TDMT_VND_CONSUME:
// code = tqProcessConsumeReq(pQnode->pTq, pMsg);
// break;
case TDMT_VND_QUERY_HEARTBEAT:
code = qWorkerProcessHbMsg(pQnode, pQnode->pQuery, pMsg);
code = qWorkerProcessHbMsg(pQnode, pQnode->pQuery, pMsg, ts);
break;
default:
qError("unknown msg type:%d in qnode queue", pMsg->msgType);

View File

@ -105,10 +105,12 @@ tsdbReaderT tsdbQueryCacheLast(SVnode *pVnode, SQueryTableDataCond *pCond, STab
void *pMemRef);
int32_t tsdbGetFileBlocksDistInfo(tsdbReaderT *pReader, STableBlockDistInfo *pTableBlockInfo);
bool isTsdbCacheLastRow(tsdbReaderT *pReader);
int32_t tsdbGetAllTableList(SMeta* pMeta, uint64_t uid, SArray* list);
int32_t tsdbGetAllTableList(SMeta *pMeta, uint64_t uid, SArray *list);
void * tsdbGetIdx(SMeta *pMeta);
int64_t tsdbGetNumOfRowsInMemTable(tsdbReaderT *pHandle);
bool tsdbNextDataBlock(tsdbReaderT pTsdbReadHandle);
void tsdbRetrieveDataBlockInfo(tsdbReaderT *pTsdbReadHandle, SDataBlockInfo *pBlockInfo);
bool tsdbNextDataBlock(tsdbReaderT pTsdbReadHandle);
void tsdbRetrieveDataBlockInfo(tsdbReaderT *pTsdbReadHandle, SDataBlockInfo *pBlockInfo);
int32_t tsdbRetrieveDataBlockStatisInfo(tsdbReaderT *pTsdbReadHandle, SColumnDataAgg ***pBlockStatis, bool *allHave);
SArray *tsdbRetrieveDataBlock(tsdbReaderT *pTsdbReadHandle, SArray *pColumnIdList);
void tsdbResetReadHandle(tsdbReaderT queryHandle, SQueryTableDataCond *pCond, int32_t tWinIdx);
@ -174,7 +176,7 @@ struct SMetaEntry {
int64_t version;
int8_t type;
tb_uid_t uid;
char *name;
char * name;
union {
struct {
SSchemaWrapper schemaRow;
@ -202,17 +204,17 @@ struct SMetaEntry {
struct SMetaReader {
int32_t flags;
SMeta *pMeta;
SMeta * pMeta;
SDecoder coder;
SMetaEntry me;
void *pBuf;
void * pBuf;
int32_t szBuf;
};
struct SMTbCursor {
TBC *pDbc;
void *pKey;
void *pVal;
TBC * pDbc;
void * pKey;
void * pVal;
int32_t kLen;
int32_t vLen;
SMetaReader mr;

View File

@ -79,13 +79,14 @@ struct STsdb {
struct STable {
uint64_t tid;
uint64_t uid;
STSchema *pSchema;
STSchema *pSchema; // latest schema
STSchema *pCacheSchema; // cached cache
};
#define TABLE_TID(t) (t)->tid
#define TABLE_UID(t) (t)->uid
int tsdbPrepareCommit(STsdb *pTsdb);
int tsdbPrepareCommit(STsdb *pTsdb);
typedef enum {
TSDB_FILE_HEAD = 0, // .head
TSDB_FILE_DATA, // .data
@ -181,13 +182,15 @@ int tsdbUnlockRepo(STsdb *pTsdb);
static FORCE_INLINE STSchema *tsdbGetTableSchemaImpl(STsdb *pTsdb, STable *pTable, bool lock, bool copy,
int32_t version) {
if ((version != -1) && (schemaVersion(pTable->pSchema) != version)) {
taosMemoryFreeClear(pTable->pSchema);
pTable->pSchema = metaGetTbTSchema(REPO_META(pTsdb), pTable->uid, version);
if ((version < 0) || (schemaVersion(pTable->pSchema) == version)) {
return pTable->pSchema;
}
return pTable->pSchema;
if (!pTable->pCacheSchema || (schemaVersion(pTable->pCacheSchema) != version)) {
taosMemoryFreeClear(pTable->pCacheSchema);
pTable->pCacheSchema = metaGetTbTSchema(REPO_META(pTsdb), pTable->uid, version);
}
return pTable->pCacheSchema;
}
// tsdbMemTable.h

View File

@ -103,6 +103,7 @@ SArray* metaGetSmaTbUids(SMeta* pMeta);
int32_t metaSnapshotReaderOpen(SMeta* pMeta, SMetaSnapshotReader** ppReader, int64_t sver, int64_t ever);
int32_t metaSnapshotReaderClose(SMetaSnapshotReader* pReader);
int32_t metaSnapshotRead(SMetaSnapshotReader* pReader, void** ppData, uint32_t* nData);
void* metaGetIdx(SMeta* pMeta);
int32_t metaCreateTSma(SMeta* pMeta, int64_t version, SSmaCfg* pCfg);
int32_t metaDropTSma(SMeta* pMeta, int64_t indexUid);

View File

@ -155,44 +155,53 @@ int metaTbCursorNext(SMTbCursor *pTbCur) {
}
SSchemaWrapper *metaGetTableSchema(SMeta *pMeta, tb_uid_t uid, int32_t sver, bool isinline) {
void *pKey = NULL;
void *pVal = NULL;
int kLen = 0;
int vLen = 0;
int ret;
SSkmDbKey skmDbKey;
SSchemaWrapper *pSW = NULL;
SSchema *pSchema = NULL;
void *pBuf;
SDecoder coder = {0};
void *pData = NULL;
int nData = 0;
int64_t version;
SSchemaWrapper schema = {0};
SSchemaWrapper *pSchema = NULL;
SDecoder dc = {0};
// fetch
skmDbKey.uid = uid;
skmDbKey.sver = sver;
pKey = &skmDbKey;
kLen = sizeof(skmDbKey);
metaRLock(pMeta);
ret = tdbTbGet(pMeta->pSkmDb, pKey, kLen, &pVal, &vLen);
metaULock(pMeta);
if (ret < 0) {
return NULL;
if (sver < 0) {
if (tdbTbGet(pMeta->pUidIdx, &uid, sizeof(uid), &pData, &nData) < 0) {
goto _err;
}
version = *(int64_t *)pData;
tdbTbGet(pMeta->pTbDb, &(STbDbKey){.uid = uid, .version = version}, sizeof(STbDbKey), &pData, &nData);
SMetaEntry me = {0};
tDecoderInit(&dc, pData, nData);
metaDecodeEntry(&dc, &me);
if (me.type == TSDB_SUPER_TABLE) {
pSchema = tCloneSSchemaWrapper(&me.stbEntry.schemaRow);
} else if (me.type == TSDB_NORMAL_TABLE) {
pSchema = tCloneSSchemaWrapper(&me.ntbEntry.schemaRow);
} else {
ASSERT(0);
}
tDecoderClear(&dc);
} else {
if (tdbTbGet(pMeta->pSkmDb, &(SSkmDbKey){.uid = uid, .sver = sver}, sizeof(SSkmDbKey), &pData, &nData) < 0) {
goto _err;
}
tDecoderInit(&dc, pData, nData);
tDecodeSSchemaWrapper(&dc, &schema);
pSchema = tCloneSSchemaWrapper(&schema);
tDecoderClear(&dc);
}
// decode
pBuf = pVal;
pSW = taosMemoryMalloc(sizeof(SSchemaWrapper));
metaULock(pMeta);
tdbFree(pData);
return pSchema;
tDecoderInit(&coder, pVal, vLen);
tDecodeSSchemaWrapper(&coder, pSW);
pSchema = taosMemoryMalloc(sizeof(SSchema) * pSW->nCols);
memcpy(pSchema, pSW->pSchema, sizeof(SSchema) * pSW->nCols);
tDecoderClear(&coder);
pSW->pSchema = pSchema;
tdbFree(pVal);
return pSW;
_err:
metaULock(pMeta);
tdbFree(pData);
return NULL;
}
struct SMCtbCursor {
@ -291,7 +300,7 @@ STSchema *metaGetTbTSchema(SMeta *pMeta, tb_uid_t uid, int32_t sver) {
pSW = metaGetTableSchema(pMeta, quid, sver, 0);
if (!pSW) return NULL;
tdInitTSchemaBuilder(&sb, sver);
tdInitTSchemaBuilder(&sb, pSW->version);
for (int i = 0; i < pSW->nCols; i++) {
pSchema = pSW->pSchema + i;
tdAddColToSchema(&sb, pSchema->type, pSchema->flags, pSchema->colId, pSchema->bytes);

View File

@ -31,9 +31,9 @@ int metaCreateSTable(SMeta *pMeta, int64_t version, SVCreateStbReq *pReq) {
int vLen = 0;
const void *pKey = NULL;
const void *pVal = NULL;
void *pBuf = NULL;
void * pBuf = NULL;
int32_t szBuf = 0;
void *p = NULL;
void * p = NULL;
SMetaReader mr = {0};
// validate req
@ -87,7 +87,7 @@ int metaDropSTable(SMeta *pMeta, int64_t verison, SVDropStbReq *pReq) {
}
// drop all child tables
TBC *pCtbIdxc = NULL;
TBC * pCtbIdxc = NULL;
SArray *pArray = taosArrayInit(8, sizeof(tb_uid_t));
tdbTbcOpen(pMeta->pCtbIdx, &pCtbIdxc, &pMeta->txn);
@ -142,8 +142,8 @@ _exit:
int metaAlterSTable(SMeta *pMeta, int64_t version, SVCreateStbReq *pReq) {
SMetaEntry oStbEntry = {0};
SMetaEntry nStbEntry = {0};
TBC *pUidIdxc = NULL;
TBC *pTbDbc = NULL;
TBC * pUidIdxc = NULL;
TBC * pTbDbc = NULL;
const void *pData;
int nData;
int64_t oversion;
@ -262,7 +262,7 @@ _err:
}
int metaDropTable(SMeta *pMeta, int64_t version, SVDropTbReq *pReq, SArray *tbUids) {
void *pData = NULL;
void * pData = NULL;
int nData = 0;
int rc = 0;
tb_uid_t uid;
@ -288,7 +288,7 @@ int metaDropTable(SMeta *pMeta, int64_t version, SVDropTbReq *pReq, SArray *tbUi
}
static int metaDropTableByUid(SMeta *pMeta, tb_uid_t uid, int *type) {
void *pData = NULL;
void * pData = NULL;
int nData = 0;
int rc = 0;
int64_t version;
@ -324,14 +324,14 @@ static int metaDropTableByUid(SMeta *pMeta, tb_uid_t uid, int *type) {
}
static int metaAlterTableColumn(SMeta *pMeta, int64_t version, SVAlterTbReq *pAlterTbReq) {
void *pVal = NULL;
void * pVal = NULL;
int nVal = 0;
const void *pData = NULL;
const void * pData = NULL;
int nData = 0;
int ret = 0;
tb_uid_t uid;
int64_t oversion;
SSchema *pColumn = NULL;
SSchema * pColumn = NULL;
SMetaEntry entry = {0};
SSchemaWrapper *pSchema;
int c;
@ -479,7 +479,7 @@ _err:
static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pAlterTbReq) {
SMetaEntry ctbEntry = {0};
SMetaEntry stbEntry = {0};
void *pVal = NULL;
void * pVal = NULL;
int nVal = 0;
int ret;
int c;
@ -510,7 +510,7 @@ static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pA
oversion = *(int64_t *)pData;
// search table.db
TBC *pTbDbc = NULL;
TBC * pTbDbc = NULL;
SDecoder dc1 = {0};
SDecoder dc2 = {0};
@ -534,7 +534,7 @@ static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pA
metaDecodeEntry(&dc2, &stbEntry);
SSchemaWrapper *pTagSchema = &stbEntry.stbEntry.schemaTag;
SSchema *pColumn = NULL;
SSchema * pColumn = NULL;
int32_t iCol = 0;
for (;;) {
pColumn = NULL;
@ -639,8 +639,8 @@ int metaAlterTable(SMeta *pMeta, int64_t version, SVAlterTbReq *pReq) {
static int metaSaveToTbDb(SMeta *pMeta, const SMetaEntry *pME) {
STbDbKey tbDbKey;
void *pKey = NULL;
void *pVal = NULL;
void * pKey = NULL;
void * pVal = NULL;
int kLen = 0;
int vLen = 0;
SEncoder coder = {0};
@ -755,14 +755,14 @@ static void metaDestroyTagIdxKey(STagIdxKey *pTagIdxKey) {
}
static int metaUpdateTagIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry) {
void *pData = NULL;
void * pData = NULL;
int nData = 0;
STbDbKey tbDbKey = {0};
SMetaEntry stbEntry = {0};
STagIdxKey *pTagIdxKey = NULL;
STagIdxKey * pTagIdxKey = NULL;
int32_t nTagIdxKey;
const SSchema *pTagColumn; // = &stbEntry.stbEntry.schema.pSchema[0];
const void *pTagData = NULL; //
const void * pTagData = NULL; //
SDecoder dc = {0};
// get super table
@ -804,7 +804,7 @@ static int metaUpdateTagIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry) {
static int metaSaveToSkmDb(SMeta *pMeta, const SMetaEntry *pME) {
SEncoder coder = {0};
void *pVal = NULL;
void * pVal = NULL;
int vLen = 0;
int rcode = 0;
SSkmDbKey skmDbKey = {0};
@ -880,3 +880,11 @@ _err:
metaULock(pMeta);
return -1;
}
// refactor later
void *metaGetIdx(SMeta *pMeta) {
#ifdef USE_INVERTED_INDEX
return pMeta->pTagIvtIdx;
#else
return pMeta->pTagIdx;
#endif
}

View File

@ -441,7 +441,7 @@ static int32_t tdExecuteRSma(SSma *pSma, const void *pMsg, int32_t inputType, tb
if (inputType == STREAM_DATA_TYPE_SUBMIT_BLOCK) {
// TODO: use the proper schema instead of 0, and cache STSchema in cache
STSchema *pTSchema = metaGetTbTSchema(SMA_META(pSma), suid, 1);
STSchema *pTSchema = metaGetTbTSchema(SMA_META(pSma), suid, -1);
if (!pTSchema) {
terrno = TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION;
return TSDB_CODE_FAILED;

View File

@ -235,6 +235,15 @@ int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd) {
}
}
}
while (1) {
pIter = taosHashIterate(pTq->pStreamTasks, pIter);
if (pIter == NULL) break;
SStreamTask* pTask = (SStreamTask*)pIter;
if (pTask->inputType == STREAM_INPUT__DATA_SUBMIT) {
int32_t code = qUpdateQualifiedTableId(pTask->exec.executor, tbUidList, isAdd);
ASSERT(code == 0);
}
}
return 0;
}

View File

@ -84,8 +84,8 @@ static int tsdbMergeBlockData(SCommitH *pCommith, SCommitIter *pIter, SDataCols
static void tsdbResetCommitTable(SCommitH *pCommith);
static void tsdbCloseCommitFile(SCommitH *pCommith, bool hasError);
static bool tsdbCanAddSubBlock(SCommitH *pCommith, SBlock *pBlock, SMergeInfo *pInfo);
static void tsdbLoadAndMergeFromCache(STsdb *pTsdb, SDataCols *pDataCols, int *iter, SCommitIter *pCommitIter, SDataCols *pTarget,
TSKEY maxKey, int maxRows, int8_t update);
static void tsdbLoadAndMergeFromCache(STsdb *pTsdb, SDataCols *pDataCols, int *iter, SCommitIter *pCommitIter,
SDataCols *pTarget, TSKEY maxKey, int maxRows, int8_t update);
int tsdbWriteBlockIdx(SDFile *pHeadf, SArray *pIdxA, void **ppBuf);
int tsdbApplyRtnOnFSet(STsdb *pRepo, SDFileSet *pSet, SRtn *pRtn) {
@ -466,7 +466,7 @@ static int tsdbCreateCommitIters(SCommitH *pCommith) {
pTbData = (STbData *)pNode->pData;
pCommitIter = pCommith->iters + i;
pTSchema = metaGetTbTSchema(REPO_META(pRepo), pTbData->uid, 1); // TODO: schema version
pTSchema = metaGetTbTSchema(REPO_META(pRepo), pTbData->uid, -1);
if (pTSchema) {
pCommitIter->pIter = tSkipListCreateIter(pTbData->pData);
@ -475,7 +475,8 @@ static int tsdbCreateCommitIters(SCommitH *pCommith) {
pCommitIter->pTable = (STable *)taosMemoryMalloc(sizeof(STable));
pCommitIter->pTable->uid = pTbData->uid;
pCommitIter->pTable->tid = pTbData->uid;
pCommitIter->pTable->pSchema = pTSchema; // metaGetTbTSchema(REPO_META(pRepo), pTbData->uid, 0);
pCommitIter->pTable->pSchema = pTSchema;
pCommitIter->pTable->pCacheSchema = NULL;
}
}
tSkipListDestroyIter(pSlIter);
@ -490,6 +491,7 @@ static void tsdbDestroyCommitIters(SCommitH *pCommith) {
tSkipListDestroyIter(pCommith->iters[i].pIter);
if (pCommith->iters[i].pTable) {
tdFreeSchema(pCommith->iters[i].pTable->pSchema);
tdFreeSchema(pCommith->iters[i].pTable->pCacheSchema);
taosMemoryFreeClear(pCommith->iters[i].pTable);
}
}
@ -914,7 +916,7 @@ static int tsdbMoveBlkIdx(SCommitH *pCommith, SBlockIdx *pIdx) {
while (bidx < nBlocks) {
if (!pTSchema && !tsdbCommitIsSameFile(pCommith, bidx)) {
// Set commit table
pTSchema = metaGetTbTSchema(REPO_META(pTsdb), pIdx->uid, 1); // TODO: schema version
pTSchema = metaGetTbTSchema(REPO_META(pTsdb), pIdx->uid, -1); // TODO: schema version
if (!pTSchema) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
@ -948,7 +950,7 @@ static int tsdbMoveBlkIdx(SCommitH *pCommith, SBlockIdx *pIdx) {
}
static int tsdbSetCommitTable(SCommitH *pCommith, STable *pTable) {
STSchema *pSchema = tsdbGetTableSchemaImpl(TSDB_COMMIT_REPO(pCommith),pTable, false, false, -1);
STSchema *pSchema = tsdbGetTableSchemaImpl(TSDB_COMMIT_REPO(pCommith), pTable, false, false, -1);
pCommith->pTable = pTable;
@ -1422,8 +1424,8 @@ static int tsdbMergeBlockData(SCommitH *pCommith, SCommitIter *pIter, SDataCols
int biter = 0;
while (true) {
tsdbLoadAndMergeFromCache(TSDB_COMMIT_REPO(pCommith), pCommith->readh.pDCols[0], &biter, pIter, pCommith->pDataCols, keyLimit, defaultRows,
pCfg->update);
tsdbLoadAndMergeFromCache(TSDB_COMMIT_REPO(pCommith), pCommith->readh.pDCols[0], &biter, pIter, pCommith->pDataCols,
keyLimit, defaultRows, pCfg->update);
if (pCommith->pDataCols->numOfRows == 0) break;
@ -1447,8 +1449,8 @@ static int tsdbMergeBlockData(SCommitH *pCommith, SCommitIter *pIter, SDataCols
return 0;
}
static void tsdbLoadAndMergeFromCache(STsdb *pTsdb, SDataCols *pDataCols, int *iter, SCommitIter *pCommitIter, SDataCols *pTarget,
TSKEY maxKey, int maxRows, int8_t update) {
static void tsdbLoadAndMergeFromCache(STsdb *pTsdb, SDataCols *pDataCols, int *iter, SCommitIter *pCommitIter,
SDataCols *pTarget, TSKEY maxKey, int maxRows, int8_t update) {
TSKEY key1 = INT64_MAX;
TSKEY key2 = INT64_MAX;
TSKEY lastKey = TSKEY_INITIAL_VAL;

View File

@ -248,8 +248,8 @@ static SArray* createCheckInfoFromTableGroup(STsdbReadHandle* pTsdbReadHandle, S
}
taosArrayPush(pTableCheckInfo, &info);
tsdbDebug("%p check table uid:%" PRId64 " from lastKey:%" PRId64 " %s", pTsdbReadHandle, info.tableId,
info.lastKey, pTsdbReadHandle->idStr);
tsdbDebug("%p check table uid:%" PRId64 " from lastKey:%" PRId64 " %s", pTsdbReadHandle, info.tableId, info.lastKey,
pTsdbReadHandle->idStr);
}
// TODO group table according to the tag value.
@ -352,13 +352,16 @@ static STsdb* getTsdbByRetentions(SVnode* pVnode, STsdbReadHandle* pReadHandle,
}
if (level == TSDB_RETENTION_L0) {
tsdbDebug("vgId:%d read handle %p rsma level %d is selected to query", TD_VID(pVnode), pReadHandle, TSDB_RETENTION_L0);
tsdbDebug("vgId:%d read handle %p rsma level %d is selected to query", TD_VID(pVnode), pReadHandle,
TSDB_RETENTION_L0);
return VND_RSMA0(pVnode);
} else if (level == TSDB_RETENTION_L1) {
tsdbDebug("vgId:%d read handle %p rsma level %d is selected to query", TD_VID(pVnode), pReadHandle, TSDB_RETENTION_L1);
tsdbDebug("vgId:%d read handle %p rsma level %d is selected to query", TD_VID(pVnode), pReadHandle,
TSDB_RETENTION_L1);
return VND_RSMA1(pVnode);
} else {
tsdbDebug("vgId:%d read handle %p rsma level %d is selected to query", TD_VID(pVnode), pReadHandle, TSDB_RETENTION_L2);
tsdbDebug("vgId:%d read handle %p rsma level %d is selected to query", TD_VID(pVnode), pReadHandle,
TSDB_RETENTION_L2);
return VND_RSMA2(pVnode);
}
}
@ -401,7 +404,7 @@ static STsdbReadHandle* tsdbQueryTablesImpl(SVnode* pVnode, SQueryTableDataCond*
if (pCond->numOfCols > 0) {
int32_t rowLen = 0;
for(int32_t i = 0; i < pCond->numOfCols; ++i) {
for (int32_t i = 0; i < pCond->numOfCols; ++i) {
rowLen += pCond->colList[i].bytes;
}
@ -685,7 +688,7 @@ SArray* tsdbGetQueriedTableList(tsdbReaderT* pHandle) {
}
// leave only one table for each group
//static STableGroupInfo* trimTableGroup(STimeWindow* window, STableGroupInfo* pGroupList) {
// static STableGroupInfo* trimTableGroup(STimeWindow* window, STableGroupInfo* pGroupList) {
// assert(pGroupList);
// size_t numOfGroup = taosArrayGetSize(pGroupList->pGroupList);
//
@ -717,7 +720,7 @@ SArray* tsdbGetQueriedTableList(tsdbReaderT* pHandle) {
// return pNew;
//}
//tsdbReaderT tsdbQueryRowsInExternalWindow(SVnode* pVnode, SQueryTableDataCond* pCond, STableGroupInfo* groupList,
// tsdbReaderT tsdbQueryRowsInExternalWindow(SVnode* pVnode, SQueryTableDataCond* pCond, STableGroupInfo* groupList,
// uint64_t qId, uint64_t taskId) {
// STableGroupInfo* pNew = trimTableGroup(&pCond->twindow, groupList);
//
@ -1324,7 +1327,6 @@ static int32_t handleDataMergeIfNeeded(STsdbReadHandle* pTsdbReadHandle, SBlock*
if ((ascScan && (key != TSKEY_INITIAL_VAL && key <= binfo.window.ekey)) ||
(!ascScan && (key != TSKEY_INITIAL_VAL && key >= binfo.window.skey))) {
bool cacheDataInFileBlockHole = (ascScan && (key != TSKEY_INITIAL_VAL && key < binfo.window.skey)) ||
(!ascScan && (key != TSKEY_INITIAL_VAL && key > binfo.window.ekey));
if (cacheDataInFileBlockHole) {
@ -1367,7 +1369,7 @@ static int32_t handleDataMergeIfNeeded(STsdbReadHandle* pTsdbReadHandle, SBlock*
pTsdbReadHandle->realNumOfRows = binfo.rows;
cur->rows = binfo.rows;
cur->win = binfo.window;
cur->win = binfo.window;
cur->mixBlock = false;
cur->blockCompleted = true;
@ -1378,9 +1380,9 @@ static int32_t handleDataMergeIfNeeded(STsdbReadHandle* pTsdbReadHandle, SBlock*
cur->lastKey = binfo.window.skey - 1;
cur->pos = -1;
}
} else { // partially copy to dest buffer
} else { // partially copy to dest buffer
// make sure to only load once
bool firstTimeExtract = ((cur->pos == 0 && ascScan) || (cur->pos == binfo.rows -1 && (!ascScan)));
bool firstTimeExtract = ((cur->pos == 0 && ascScan) || (cur->pos == binfo.rows - 1 && (!ascScan)));
if (pTsdbReadHandle->outputCapacity < binfo.rows && firstTimeExtract) {
code = doLoadFileDataBlock(pTsdbReadHandle, pBlock, pCheckInfo, cur->slot);
if (code != TSDB_CODE_SUCCESS) {
@ -1393,7 +1395,7 @@ static int32_t handleDataMergeIfNeeded(STsdbReadHandle* pTsdbReadHandle, SBlock*
}
if (pTsdbReadHandle->outputCapacity >= binfo.rows) {
ASSERT(cur->blockCompleted);
ASSERT(cur->blockCompleted || cur->mixBlock);
}
if (cur->rows == binfo.rows) {
@ -1889,7 +1891,7 @@ static void copyAllRemainRowsFromFileBlock(STsdbReadHandle* pTsdbReadHandle, STa
bool ascScan = ASCENDING_TRAVERSE(pTsdbReadHandle->order);
int32_t step = ascScan? 1 : -1;
int32_t step = ascScan ? 1 : -1;
int32_t start = cur->pos;
int32_t end = endPos;
@ -1904,8 +1906,8 @@ static void copyAllRemainRowsFromFileBlock(STsdbReadHandle* pTsdbReadHandle, STa
// the time window should always be ascending order: skey <= ekey
cur->win = (STimeWindow){.skey = tsArray[start], .ekey = tsArray[end]};
cur->mixBlock = (numOfRows != pBlockInfo->rows);
cur->lastKey = tsArray[endPos] + step;
cur->blockCompleted = (ascScan? (endPos == pBlockInfo->rows - 1):(endPos == 0));
cur->lastKey = tsArray[endPos] + step;
cur->blockCompleted = (ascScan ? (endPos == pBlockInfo->rows - 1) : (endPos == 0));
// The value of pos may be -1 or pBlockInfo->rows, and it is invalid in both cases.
int32_t pos = endPos + step;
@ -1921,7 +1923,7 @@ int32_t getEndPosInDataBlock(STsdbReadHandle* pTsdbReadHandle, SDataBlockInfo* p
// NOTE: reverse the order to find the end position in data block
int32_t endPos = -1;
bool ascScan = ASCENDING_TRAVERSE(pTsdbReadHandle->order);
int32_t order = ascScan? TSDB_ORDER_DESC : TSDB_ORDER_ASC;
int32_t order = ascScan ? TSDB_ORDER_DESC : TSDB_ORDER_ASC;
SQueryFilePos* cur = &pTsdbReadHandle->cur;
SDataCols* pCols = pTsdbReadHandle->rhelper.pDCols[0];
@ -1981,7 +1983,7 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf
assert(pCols->numOfRows == pBlock->numOfRows && tsArray[0] == pBlock->keyFirst &&
tsArray[pBlock->numOfRows - 1] == pBlock->keyLast);
bool ascScan = ASCENDING_TRAVERSE(pTsdbReadHandle->order);
bool ascScan = ASCENDING_TRAVERSE(pTsdbReadHandle->order);
int32_t step = ascScan ? 1 : -1;
// for search the endPos, so the order needs to reverse
@ -1992,8 +1994,9 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf
STimeWindow* pWin = &blockInfo.window;
tsdbDebug("%p uid:%" PRIu64 " start merge data block, file block range:%" PRIu64 "-%" PRIu64
" rows:%d, start:%d, end:%d, %s", pTsdbReadHandle, pCheckInfo->tableId, pWin->skey, pWin->ekey, blockInfo.rows,
cur->pos, endPos, pTsdbReadHandle->idStr);
" rows:%d, start:%d, end:%d, %s",
pTsdbReadHandle, pCheckInfo->tableId, pWin->skey, pWin->ekey, blockInfo.rows, cur->pos, endPos,
pTsdbReadHandle->idStr);
// compared with the data from in-memory buffer, to generate the correct timestamp array list
int32_t numOfRows = 0;
@ -2112,8 +2115,9 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf
}
// still assign data into current row
numOfRows += mergeTwoRowFromMem(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, &curRow, row1, row2, numOfCols,
pCheckInfo->tableId, pSchema1, pSchema2, pCfg->update, &lastKeyAppend);
numOfRows +=
mergeTwoRowFromMem(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, &curRow, row1, row2, numOfCols,
pCheckInfo->tableId, pSchema1, pSchema2, pCfg->update, &lastKeyAppend);
if (cur->win.skey == TSKEY_INITIAL_VAL) {
cur->win.skey = key;
@ -2178,8 +2182,7 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf
* if cache is empty, load remain file block data. In contrast, if there are remain data in cache, do NOT
* copy them all to result buffer, since it may be overlapped with file data block.
*/
if (node == NULL ||
((TD_ROW_KEY((STSRow*)SL_GET_NODE_DATA(node)) > pTsdbReadHandle->window.ekey) && ascScan) ||
if (node == NULL || ((TD_ROW_KEY((STSRow*)SL_GET_NODE_DATA(node)) > pTsdbReadHandle->window.ekey) && ascScan) ||
((TD_ROW_KEY((STSRow*)SL_GET_NODE_DATA(node)) < pTsdbReadHandle->window.ekey) && !ascScan)) {
// no data in cache or data in cache is greater than the ekey of time window, load data from file block
if (cur->win.skey == TSKEY_INITIAL_VAL) {
@ -2200,7 +2203,7 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf
}
cur->blockCompleted = (((pos > endPos || cur->lastKey > pTsdbReadHandle->window.ekey) && ascScan) ||
((pos < endPos || cur->lastKey < pTsdbReadHandle->window.ekey) && !ascScan));
((pos < endPos || cur->lastKey < pTsdbReadHandle->window.ekey) && !ascScan));
if (!ascScan) {
TSWAP(cur->win.skey, cur->win.ekey);
@ -2819,6 +2822,12 @@ static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int
return numOfRows;
}
void* tsdbGetIdx(SMeta* pMeta) {
if (pMeta == NULL) {
return NULL;
}
return metaGetIdx(pMeta);
}
int32_t tsdbGetAllTableList(SMeta* pMeta, uint64_t uid, SArray* list) {
SMCtbCursor* pCur = metaOpenCtbCursor(pMeta, uid);
@ -3407,65 +3416,65 @@ int32_t checkForCachedLast(STsdbReadHandle* pTsdbReadHandle) {
STimeWindow updateLastrowForEachGroup(STableListInfo* pList) {
STimeWindow window = {INT64_MAX, INT64_MIN};
// int32_t totalNumOfTable = 0;
// SArray* emptyGroup = taosArrayInit(16, sizeof(int32_t));
//
// // NOTE: starts from the buffer in case of descending timestamp order check data blocks
// size_t numOfGroups = taosArrayGetSize(groupList->pGroupList);
// for (int32_t j = 0; j < numOfGroups; ++j) {
// SArray* pGroup = taosArrayGetP(groupList->pGroupList, j);
// TSKEY key = TSKEY_INITIAL_VAL;
//
// STableKeyInfo keyInfo = {0};
//
// size_t numOfTables = taosArrayGetSize(pGroup);
// for (int32_t i = 0; i < numOfTables; ++i) {
// STableKeyInfo* pInfo = (STableKeyInfo*)taosArrayGet(pGroup, i);
//
// // if the lastKey equals to INT64_MIN, there is no data in this table
// TSKEY lastKey = 0; //((STable*)(pInfo->pTable))->lastKey;
// if (key < lastKey) {
// key = lastKey;
//
// // keyInfo.pTable = pInfo->pTable;
// keyInfo.lastKey = key;
// pInfo->lastKey = key;
//
// if (key < window.skey) {
// window.skey = key;
// }
//
// if (key > window.ekey) {
// window.ekey = key;
// }
// }
// }
//
// // more than one table in each group, only one table left for each group
// // if (keyInfo.pTable != NULL) {
// // totalNumOfTable++;
// // if (taosArrayGetSize(pGroup) == 1) {
// // // do nothing
// // } else {
// // taosArrayClear(pGroup);
// // taosArrayPush(pGroup, &keyInfo);
// // }
// // } else { // mark all the empty groups, and remove it later
// // taosArrayDestroy(pGroup);
// // taosArrayPush(emptyGroup, &j);
// // }
// }
//
// // window does not being updated, so set the original
// if (window.skey == INT64_MAX && window.ekey == INT64_MIN) {
// window = TSWINDOW_INITIALIZER;
// assert(totalNumOfTable == 0 && taosArrayGetSize(groupList->pGroupList) == numOfGroups);
// }
//
// taosArrayRemoveBatch(groupList->pGroupList, TARRAY_GET_START(emptyGroup), (int32_t)taosArrayGetSize(emptyGroup));
// taosArrayDestroy(emptyGroup);
//
// groupList->numOfTables = totalNumOfTable;
// int32_t totalNumOfTable = 0;
// SArray* emptyGroup = taosArrayInit(16, sizeof(int32_t));
//
// // NOTE: starts from the buffer in case of descending timestamp order check data blocks
// size_t numOfGroups = taosArrayGetSize(groupList->pGroupList);
// for (int32_t j = 0; j < numOfGroups; ++j) {
// SArray* pGroup = taosArrayGetP(groupList->pGroupList, j);
// TSKEY key = TSKEY_INITIAL_VAL;
//
// STableKeyInfo keyInfo = {0};
//
// size_t numOfTables = taosArrayGetSize(pGroup);
// for (int32_t i = 0; i < numOfTables; ++i) {
// STableKeyInfo* pInfo = (STableKeyInfo*)taosArrayGet(pGroup, i);
//
// // if the lastKey equals to INT64_MIN, there is no data in this table
// TSKEY lastKey = 0; //((STable*)(pInfo->pTable))->lastKey;
// if (key < lastKey) {
// key = lastKey;
//
// // keyInfo.pTable = pInfo->pTable;
// keyInfo.lastKey = key;
// pInfo->lastKey = key;
//
// if (key < window.skey) {
// window.skey = key;
// }
//
// if (key > window.ekey) {
// window.ekey = key;
// }
// }
// }
//
// // more than one table in each group, only one table left for each group
// // if (keyInfo.pTable != NULL) {
// // totalNumOfTable++;
// // if (taosArrayGetSize(pGroup) == 1) {
// // // do nothing
// // } else {
// // taosArrayClear(pGroup);
// // taosArrayPush(pGroup, &keyInfo);
// // }
// // } else { // mark all the empty groups, and remove it later
// // taosArrayDestroy(pGroup);
// // taosArrayPush(emptyGroup, &j);
// // }
// }
//
// // window does not being updated, so set the original
// if (window.skey == INT64_MAX && window.ekey == INT64_MIN) {
// window = TSWINDOW_INITIALIZER;
// assert(totalNumOfTable == 0 && taosArrayGetSize(groupList->pGroupList) == numOfGroups);
// }
//
// taosArrayRemoveBatch(groupList->pGroupList, TARRAY_GET_START(emptyGroup), (int32_t)taosArrayGetSize(emptyGroup));
// taosArrayDestroy(emptyGroup);
//
// groupList->numOfTables = totalNumOfTable;
return window;
}

View File

@ -191,9 +191,9 @@ int vnodeProcessQueryMsg(SVnode *pVnode, SRpcMsg *pMsg) {
SReadHandle handle = {.meta = pVnode->pMeta, .config = &pVnode->config, .vnode = pVnode, .pMsgCb = &pVnode->msgCb};
switch (pMsg->msgType) {
case TDMT_VND_QUERY:
return qWorkerProcessQueryMsg(&handle, pVnode->pQuery, pMsg);
return qWorkerProcessQueryMsg(&handle, pVnode->pQuery, pMsg, 0);
case TDMT_VND_QUERY_CONTINUE:
return qWorkerProcessCQueryMsg(&handle, pVnode->pQuery, pMsg);
return qWorkerProcessCQueryMsg(&handle, pVnode->pQuery, pMsg, 0);
default:
vError("unknown msg type:%d in query queue", pMsg->msgType);
return TSDB_CODE_VND_APP_ERROR;
@ -206,13 +206,16 @@ int vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) {
int32_t msgLen = pMsg->contLen - sizeof(SMsgHead);
switch (pMsg->msgType) {
case TDMT_VND_FETCH:
return qWorkerProcessFetchMsg(pVnode, pVnode->pQuery, pMsg);
return qWorkerProcessFetchMsg(pVnode, pVnode->pQuery, pMsg, 0);
case TDMT_VND_FETCH_RSP:
return qWorkerProcessFetchRsp(pVnode, pVnode->pQuery, pMsg);
return qWorkerProcessFetchRsp(pVnode, pVnode->pQuery, pMsg, 0);
case TDMT_VND_CANCEL_TASK:
return qWorkerProcessCancelMsg(pVnode, pVnode->pQuery, pMsg);
return qWorkerProcessCancelMsg(pVnode, pVnode->pQuery, pMsg, 0);
case TDMT_VND_DROP_TASK:
return qWorkerProcessDropMsg(pVnode, pVnode->pQuery, pMsg);
return qWorkerProcessDropMsg(pVnode, pVnode->pQuery, pMsg, 0);
case TDMT_VND_QUERY_HEARTBEAT:
return qWorkerProcessHbMsg(pVnode, pVnode->pQuery, pMsg, 0);
case TDMT_VND_TABLE_META:
return vnodeGetTableMeta(pVnode, pMsg);
case TDMT_VND_CONSUME:
@ -231,9 +234,6 @@ int vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) {
return tqProcessTaskDispatchRsp(pVnode->pTq, pMsg);
case TDMT_VND_TASK_RECOVER_RSP:
return tqProcessTaskRecoverRsp(pVnode->pTq, pMsg);
case TDMT_VND_QUERY_HEARTBEAT:
return qWorkerProcessHbMsg(pVnode, pVnode->pQuery, pMsg);
default:
vError("unknown msg type:%d in fetch queue", pMsg->msgType);
return TSDB_CODE_VND_APP_ERROR;
@ -260,7 +260,7 @@ int vnodeProcessSyncReq(SVnode *pVnode, SRpcMsg *pMsg, SRpcMsg **pRsp) {
SMsgHead *pHead = pMsg->pCont;
char logBuf[512];
char logBuf[512] = {0};
char *syncNodeStr = sync2SimpleStr(pVnode->sync);
snprintf(logBuf, sizeof(logBuf), "==vnodeProcessSyncReq== msgType:%d, syncNode: %s", pMsg->msgType, syncNodeStr);
syncRpcMsgLog2(logBuf, pMsg);
@ -678,6 +678,7 @@ static int vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq, in
int32_t nRows;
int32_t tsize, ret;
SEncoder encoder = {0};
SArray *newTbUids = NULL;
terrno = TSDB_CODE_SUCCESS;
pRsp->code = 0;
@ -698,6 +699,7 @@ static int vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq, in
}
submitRsp.pArray = taosArrayInit(pSubmitReq->numOfBlocks, sizeof(SSubmitBlkRsp));
newTbUids = taosArrayInit(pSubmitReq->numOfBlocks, sizeof(int64_t));
if (!submitRsp.pArray) {
pRsp->code = TSDB_CODE_OUT_OF_MEMORY;
goto _exit;
@ -727,6 +729,7 @@ static int vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq, in
goto _exit;
}
}
taosArrayPush(newTbUids, &createTbReq.uid);
submitBlkRsp.uid = createTbReq.uid;
submitBlkRsp.tblFName = taosMemoryMalloc(strlen(pVnode->config.dbname) + strlen(createTbReq.name) + 2);
@ -754,8 +757,10 @@ static int vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq, in
submitRsp.affectedRows += submitBlkRsp.affectedRows;
taosArrayPush(submitRsp.pArray, &submitBlkRsp);
}
tqUpdateTbUidList(pVnode->pTq, newTbUids, true);
_exit:
taosArrayDestroy(newTbUids);
tEncodeSize(tEncodeSSubmitRsp, &submitRsp, tsize, ret);
pRsp->pCont = rpcMallocCont(tsize);
pRsp->contLen = tsize;

Some files were not shown because too many files have changed in this diff Show More