Merge branch '3.0' into merge/mainto3.0
This commit is contained in:
commit
34120a38c5
|
@ -53,6 +53,8 @@ It is not necessary to configure your cluster specifically for active-active mod
|
|||
- The sink endpoint is the FQDN of TDengine on the secondary node.
|
||||
- You can use the native connection (port 6030) or WebSocket connection (port 6041).
|
||||
- You can specify one or more databases to replicate only the data contained in those databases. If you do not specify a database, all databases on the node are replicated except for `information_schema`, `performance_schema`, `log`, and `audit`.
|
||||
- New databases in both sides will be detected periodically to start replication, with optional `--new-database-checking-interval <SECONDS>` argument.
|
||||
- New databases checking will be disabled with `--no-new-databases`.
|
||||
|
||||
When the command is successful, the replica ID is displayed. You can use this ID to add other databases to the replication task if necessary.
|
||||
|
||||
|
@ -97,7 +99,6 @@ You can manage your active-active deployment with the following commands:
|
|||
:::note
|
||||
- This command cannot create duplicate tasks. It only adds the specified databases to the specified task.
|
||||
- The replica ID is globally unique within a taosX instance and is independent of the source/sink combination.
|
||||
|
||||
:::
|
||||
|
||||
2. Check the status of a task:
|
||||
|
@ -124,6 +125,8 @@ You can manage your active-active deployment with the following commands:
|
|||
|
||||
If you specify a database, replication for that database is stopped. If you do not specify a database, all replication tasks on the ID are stopped. If you do not specify an ID, all replication tasks on the instance are stopped.
|
||||
|
||||
Use `--no-new-databases` to not stop new-databases checking.
|
||||
|
||||
4. Restart a replication task:
|
||||
|
||||
```shell
|
||||
|
@ -132,6 +135,14 @@ You can manage your active-active deployment with the following commands:
|
|||
|
||||
If you specify a database, replication for that database is restarted. If you do not specify a database, all replication tasks in the instance are restarted. If you do not specify an ID, all replication tasks on the instance are restarted.
|
||||
|
||||
5. Update new databases checking interval:
|
||||
|
||||
```shell
|
||||
taosx replica update id --new-database-checking-interval <SECONDS>
|
||||
```
|
||||
|
||||
This command will only update the checking interval for new databases.
|
||||
|
||||
5. Check the progress of a replication task:
|
||||
|
||||
```shell
|
||||
|
|
|
@ -537,8 +537,10 @@ This document details the server error codes that may be encountered when using
|
|||
|
||||
| Error Code | Description | Possible Error Scenarios or Reasons | Recommended Actions for Users |
|
||||
| ---------- | --------------------- | ------------------------------------------------------------ | -------------------------------------------- |
|
||||
| 0x800003E6 | Consumer not exist | Consumer timeout offline | rebuild consumer to subscribe data again |
|
||||
| 0x800003EA | Consumer not ready | Consumer rebalancing | retry after 2s |
|
||||
| 0x80004000 | Invalid message | The subscribed data is illegal, generally does not occur | Check the client-side error logs for details |
|
||||
| 0x80004001 | Consumer mismatch | The vnode requested for subscription and the reassigned vnode are inconsistent, usually occurs when new consumers join the same consumer group | Internal error, not exposed to users |
|
||||
| 0x80004001 | Consumer mismatch | The vnode requested for subscription and the reassigned vnode are inconsistent, usually occurs when new consumers join the same consumer group | Internal error |
|
||||
| 0x80004002 | Consumer closed | The consumer no longer exists | Check if it has already been closed |
|
||||
| 0x80004017 | Invalid status, please subscribe topic first | tmq status invalidate | Without calling subscribe, directly poll data |
|
||||
| 0x80004100 | Stream task not exist | The stream computing task does not exist | Check the server-side error logs |
|
||||
|
|
|
@ -81,7 +81,12 @@ taosx replica start -f source_endpoint -t sink_endpoint [database...]
|
|||
taosx replica start -f td1:6030 -t td2:6030
|
||||
```
|
||||
|
||||
该示例命令会自动创建除 information_schema、performance_schema、log、audit 库之外的同步任务。可以使用 `http://td2:6041` 指定该 endpoint 使用 websocket 接口(默认是原生接口)。也可以指定数据库同步:taosx replica start -f td1:6030 -t td2:6030 db1 仅创建指定的数据库同步任务。
|
||||
该示例命令会自动创建除 information_schema、performance_schema、log、audit 库之外的同步任务,并持续监听新增的数据库,当 td1 和 td2 中新增同名数据库时可自动启动新增数据库的数据复制任务。需要说明的是:
|
||||
|
||||
- 可以使用 `http://td2:6041` 指定该 endpoint 使用 websocket 接口(默认是原生接口)。
|
||||
- 可以使用 `--new-database-checking-interval <SECONDS>` 指定新增数据库的检查间隔,默认为 30 分钟。
|
||||
- 可以使用 `--no-new-databases` 禁用监听行为。
|
||||
- 也可以指定数据库同步:taosx replica start -f td1:6030 -t td2:6030 db1 仅创建指定的数据库同步任务。此时相当于配置了 `--no-new-databases`,不会开启新增数据库自动同步。
|
||||
|
||||
2. 方法二
|
||||
|
||||
|
@ -121,6 +126,7 @@ taosx replica stop id [db...]
|
|||
该命令作用如下:
|
||||
1. 停止指定 Replica ID 下所有或指定数据库的双副本同步任务。
|
||||
2. 使用 `taosx replica stop id1 db1` 表示停止 id1 replica 下 db1的同步任务。
|
||||
3. `--no-new-databases` 选项启用时,不停止新增数据库监听任务,仅停止当前同步中的数据库。
|
||||
|
||||
### 重启双活任务
|
||||
|
||||
|
@ -145,7 +151,7 @@ taosx replica diff id [db....]
|
|||
| replica | database | source | sink | vgroup_id | current | latest | diff |
|
||||
+---------+----------+----------+----------+-----------+---------+---------+------+
|
||||
| a | opc | td1:6030 | td2:6030 | 2 | 17600 | 17600 | 0 |
|
||||
| ad | opc | td2:6030 | td2:6030 | 3 | 17600 | 17600 | 0 |
|
||||
| a | opc | td2:6030 | td2:6030 | 3 | 17600 | 17600 | 0 |
|
||||
```
|
||||
|
||||
### 删除双活任务
|
||||
|
@ -156,6 +162,16 @@ taosx replica remove id [--force]
|
|||
|
||||
删除当前所有双活同步任务。正常情况下要想删除同步任务,需要先 stop 该任务;但当 --force 启用时,会强制停止并清除任务。
|
||||
|
||||
`--no-new-databases` 选项启用时,不会删除新增数据库同步任务,仅删除当前数据库的同步任务。当 taosx 重启后,如果删除的数据库任务对应的数据库仍然存在,则会继续创建同步任务;不重启 taosx 或者不更新双活监听任务时,也不会再新建这些数据库的同步任务。
|
||||
|
||||
### 更新双活新增数据库检查间隔
|
||||
|
||||
```shell
|
||||
taosx replica update id --new-database-checking-interval <SECONDS>
|
||||
```
|
||||
|
||||
更新双活新增数据库的检查间隔,单位为秒。
|
||||
|
||||
### 推荐使用步骤
|
||||
|
||||
1. 假定在机器 A 上运行,需要首先使用 taosx replica start 来配置 taosX,其输入参数是待同步的源端和目标端服务器地址 ,在完成配置后会自动启动同步服务和任务。此处假定 taosx 服务使用标准端口,同步任务使用原生连接。
|
||||
|
|
|
@ -57,7 +57,7 @@ LIKE 条件使用通配符字符串进行匹配检查,规则如下:
|
|||
MATCH/REGEXP 条件和 NMATCH/NOT REGEXP 条件使用正则表达式进行匹配,规则如下:
|
||||
|
||||
- 支持符合 POSIX 规范的正则表达式,具体规范内容可参见 Regular Expressions。
|
||||
- MATCH 和正则表达式匹配时, 返回 TURE. NMATCH 和正则表达式不匹配时, 返回 TRUE.
|
||||
- MATCH 和正则表达式匹配时,返回 TURE。NMATCH 和正则表达式不匹配时,返回 TRUE.
|
||||
- 只能针对子表名(即 tbname)、字符串类型的标签值进行正则表达式过滤,不支持普通列的过滤。
|
||||
- 正则匹配字符串长度不能超过 128 字节。可以通过参数 maxRegexStringLen 设置和调整最大允许的正则匹配字符串,该参数是客户端配置参数,需要重启客户端才能生效
|
||||
|
||||
|
@ -65,7 +65,7 @@ MATCH/REGEXP 条件和 NMATCH/NOT REGEXP 条件使用正则表达式进行匹配
|
|||
|
||||
| # | **运算符** | **支持的类型** | **说明** |
|
||||
| --- | :--------: | -------------- | --------------------------------------------------------------------------- |
|
||||
| 1 | AND | BOOL | 逻辑与,如果两个条件均为 TRUE, 则返回 TRUE。如果任一为 FALSE,则返回 FALSE |
|
||||
| 2 | OR | BOOL | 逻辑或,如果任一条件为 TRUE, 则返回 TRUE。如果两者都是 FALSE,则返回 FALSE |
|
||||
| 1 | AND | BOOL | 逻辑与,如果两个条件均为 TRUE,则返回 TRUE。如果任一为 FALSE,则返回 FALSE |
|
||||
| 2 | OR | BOOL | 逻辑或,如果任一条件为 TRUE,则返回 TRUE。如果两者都是 FALSE,则返回 FALSE |
|
||||
|
||||
TDengine 在计算逻辑条件时,会进行短路径优化,即对于 AND,第一个条件为 FALSE,则不再计算第二个条件,直接返回 FALSE;对于 OR,第一个条件为 TRUE,则不再计算第二个条件,直接返回 TRUE。
|
||||
|
|
|
@ -33,7 +33,7 @@ description: 对 JSON 类型如何使用的详细说明
|
|||
|
||||
## 支持的操作
|
||||
|
||||
1. 在 where 条件中时,支持函数 match/nmatch/between and/like/and/or/is null/is not null,不支持 in
|
||||
1. 在 where 条件中时,支持函数 `match`、`nmatch`、`between and`、`like`、`and`、`or`、`is null`、`is not null`,不支持 `in`
|
||||
|
||||
```
|
||||
select * from s1 where info->'k1' match 'v*';
|
||||
|
@ -47,7 +47,7 @@ description: 对 JSON 类型如何使用的详细说明
|
|||
|
||||
2. 支持 json tag 放在 group by、order by、join 子句、union all 以及子查询中,比如 group by json->'key'
|
||||
|
||||
3. 支持 distinct 操作.
|
||||
3. 支持 distinct 操作
|
||||
|
||||
```
|
||||
select distinct info->'k1' from s1
|
||||
|
@ -69,8 +69,8 @@ description: 对 JSON 类型如何使用的详细说明
|
|||
|
||||
3. json 格式限制:
|
||||
|
||||
1. json 输入字符串可以为空("","\t"," "或 null)或 object,不能为非空的字符串,布尔型和数组。
|
||||
2. object 可为{},如果 object 为{},则整个 json 串记为空。key 可为"",若 key 为"",则 json 串中忽略该 k-v 对。
|
||||
1. json 输入字符串可以为空(""、"\t"、" " 或 null)或 object,不能为非空的字符串,布尔型和数组。
|
||||
2. object 可为 {},如果 object 为 {},则整个 json 串记为空。key 可为 "",若 key 为 "",则 json 串中忽略该 k-v 对。
|
||||
3. value 可以为数字(int/double)或字符串或 bool 或 null,暂不可以为数组。不允许嵌套。
|
||||
4. 若 json 字符串中出现两个相同的 key,则第一个生效。
|
||||
5. json 字符串里暂不支持转义。
|
||||
|
|
|
@ -21,8 +21,8 @@ description: TDengine 中使用转义字符的详细规则
|
|||
## 转义字符使用规则
|
||||
|
||||
1. 标识符里有转义字符(数据库名、表名、列名、别名)
|
||||
1. 普通标识符: 直接提示错误的标识符,因为标识符规定必须是数字、字母和下划线,并且不能以数字开头。
|
||||
2. 反引号``标识符: 保持原样,不转义
|
||||
1. 普通标识符:直接提示错误的标识符,因为标识符规定必须是数字、字母和下划线,并且不能以数字开头。
|
||||
2. 反引号 `` 标识符:保持原样,不转义
|
||||
2. 数据里有转义字符
|
||||
1. 遇到上面定义的转义字符会转义(`%`和`_`见下面说明),如果没有匹配的转义字符会忽略掉转义符`\ `(`\x`保持原样)。
|
||||
2. 对于`%`和`_`,因为在`like`里这两个字符是通配符,所以在模式匹配`like`里用`\%`和`\_`表示字符里本身的`%`和`_`,如果在`like`模式匹配上下文之外使用`\%`或`\_`,则它们的计算结果为字符串`\%`和`\_`,而不是`%`和`_`。
|
||||
1. 遇到上面定义的转义字符会转义(`%`和`_`见下面说明),如果没有匹配的转义字符会忽略掉转义符 `\ `(`\x`保持原样)。
|
||||
2. 对于 `%` 和 `_`,因为在 `like` 里这两个字符是通配符,所以在模式匹配 `like` 里用 `\%` 和 `\_` 表示字符里本身的 `%` 和 `_`,如果在 `like` 模式匹配上下文之外使用 `\%` 或 `\_`,则它们的计算结果为字符串 `\%` 和 `\_`,而不是 `%` 和 `_`。
|
||||
|
|
|
@ -9,13 +9,13 @@ description: 合法字符集和命名中的限制规则
|
|||
1. 合法字符:英文字符、数字和下划线。
|
||||
1. 允许英文字符或下划线开头,不允许以数字开头。
|
||||
1. 不区分大小写。
|
||||
1. 不能是[保留关键字](./20-keywords.md)。
|
||||
1. 不能是 [保留关键字](./20-keywords.md)。
|
||||
1. 转义后表(列)名规则:
|
||||
为了兼容支持更多形式的表(列)名,TDengine 引入新的转义符 "`"。使用转义字符以后:
|
||||
- 不再对转义字符中的内容进行大小写统一,即可以保留用户指定表名中的大小写属性,例如:\`aBc\` 和 \`abc\` 是不同的表(列)名,但是 abc 和 aBc 是相同的表(列)名。
|
||||
- 不再对转义字符中的内容进行大小写统一,即可以保留用户指定表名中的大小写属性,例如 \`aBc\` 和 \`abc\` 是不同的表(列)名,但是 abc 和 aBc 是相同的表(列)名。
|
||||
- 可以创建包含字母、数字和下划线以外字符的表(列)名,例如:\`abc@TD\`,但是转义后名称中仍然不能包含`.`,否则会提示`The table name cannot contain '.'`。
|
||||
- 可以创建以数字开头的表(列)名,例如\`1970\`。
|
||||
- 可以创建以[保留关键字](./20-keywords.md)命名的表(列)名,例如\`select\`。
|
||||
- 可以创建以数字开头的表(列)名,例如 \`1970\`。
|
||||
- 可以创建以 [保留关键字](./20-keywords.md) 命名的表(列)名,例如 \`select\`。
|
||||
|
||||
## 密码合法字符集
|
||||
|
||||
|
@ -27,7 +27,7 @@ description: 合法字符集和命名中的限制规则
|
|||
|
||||
- 数据库名最大长度为 64 字节
|
||||
- 表名最大长度为 192 字节,不包括数据库名前缀和分隔符
|
||||
- 每行数据最大长度 48KB(从 3.0.5.0 版本开始为 64KB) (注意:数据行内每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)
|
||||
- 每行数据最大长度 48KB(从 v3.0.5.0 开始为 64KB)(注意:数据行内每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)
|
||||
- 列名最大长度为 64 字节
|
||||
- 最多允许 4096 列,最少需要 2 列,第一列必须是时间戳。
|
||||
- 标签名最大长度为 64 字节
|
||||
|
|
|
@ -32,9 +32,9 @@ DROP DNODE dnode_id [force] [unsafe]
|
|||
|
||||
注意删除 dnode 不等于停止相应的进程。实际中推荐先将一个 dnode 删除之后再停止其所对应的进程。
|
||||
|
||||
只有在线节点可以被删除。如果要强制删除离线节点,需要执行强制删除操作, 即指定force选项。
|
||||
只有在线节点可以被删除。如果要强制删除离线节点,需要执行强制删除操作, 即指定 force 选项。
|
||||
|
||||
当节点上存在单副本,并且节点处于离线,如果要强制删除该节点,需要执行非安全删除,即制定unsafe,并且数据不可再恢复。
|
||||
当节点上存在单副本,并且节点处于离线,如果要强制删除该节点,需要执行非安全删除,即制定 unsafe,并且数据不可再恢复。
|
||||
|
||||
## 修改数据节点配置
|
||||
|
||||
|
@ -44,27 +44,27 @@ ALTER DNODE dnode_id dnode_option
|
|||
ALTER ALL DNODES dnode_option
|
||||
```
|
||||
|
||||
对于支持动态修改的配置参数,您可以使用 ALTER DNODE 或 ALTER ALL DNODES 语法修改 dnode 中配置参数的值,自 3.3.4.0 后,修改的配置参数将自动持久化,即便数据库服务重启后仍然生效。
|
||||
对于支持动态修改的配置参数,您可以使用 ALTER DNODE 或 ALTER ALL DNODES 语法修改 dnode 中配置参数的值,自 v3.3.4.0 后,修改的配置参数将自动持久化,即便数据库服务重启后仍然生效。
|
||||
|
||||
对于一个配置参数是否支持动态修改,请您参考以下页面:[taosd 参考手册](../01-components/01-taosd.md)
|
||||
对于一个配置参数是否支持动态修改,请您参考 [taosd 参考手册](../01-components/01-taosd.md)
|
||||
|
||||
value 是参数的值,需要是字符格式。如修改 dnode 1 的日志输出级别为 debug:
|
||||
value 是参数的值,需要是字符格式。如修改 dnode 1 的日志输出级别为 debug。
|
||||
|
||||
```sql
|
||||
ALTER DNODE 1 'debugFlag' '143';
|
||||
```
|
||||
|
||||
### 补充说明:
|
||||
配置参数在 dnode 中被分为全局配置参数与局部配置参数,您可以查看 SHOW VARIABLES 或 SHOW DNODE dnode_id VARIABLE 中的 category 字段来确认配置参数属于全局配置参数还是局部配置参数:
|
||||
配置参数在 dnode 中被分为全局配置参数与局部配置参数,您可以查看 SHOW VARIABLES 或 SHOW DNODE dnode_id VARIABLE 中的 category 字段来确认配置参数属于全局配置参数还是局部配置参数。
|
||||
1. 局部配置参数:您可以使用 ALTER DNODE 或 ALTER ALL DNODES 来更新某一个 dnode 或全部 dnodes 的局部配置参数。
|
||||
2. 全局配置参数:全局配置参数要求各个 dnode 保持一致,所以您只可以使用 ALTER ALL DNODES 来更新全部 dnodes 的全局配置参数。
|
||||
|
||||
配置参数是否可以动态修改,有以下三种情况:
|
||||
1. 支持动态修改 立即生效
|
||||
2. 支持动态修改 重启生效
|
||||
1. 支持动态修改,立即生效
|
||||
2. 支持动态修改,重启生效
|
||||
3. 不支持动态修改
|
||||
|
||||
对于重启后生效的配置参数,您可以通过 SHOW VARIABLES 或 SHOW DNODE dnode_id VARIABLE 看到修改后的值,但是需要重启数据库服务才使其生效。
|
||||
对于重启后生效的配置参数,您可以通过 `SHOW VARIABLES` 或 `SHOW DNODE dnode_id VARIABLE` 看到修改后的值,但是需要重启数据库服务才使其生效。
|
||||
|
||||
## 添加管理节点
|
||||
|
||||
|
@ -96,7 +96,7 @@ DROP MNODE ON DNODE dnode_id;
|
|||
CREATE QNODE ON DNODE dnode_id;
|
||||
```
|
||||
|
||||
系统启动默认没有 QNODE,用户可以创建 QNODE 来实现计算和存储的分离。一个 DNODE 上只能创建一个 QNODE。一个 DNODE 的 `supportVnodes` 参数如果不为 0,同时又在其上创建上 QNODE,则在该 dnode 中既有负责存储管理的 vnode 又有负责查询计算的 qnode,如果还在该 dnode 上创建了 mnode,则一个 dnode 上最多三种逻辑节点都可以存在。但通过配置也可以使其彻底分离。将一个 dnode 的`supportVnodes`配置为 0,可以选择在其上创建 mnode 或者 qnode 中的一种,这样可以实现三种逻辑节点在物理上的彻底分离。
|
||||
系统启动默认没有 QNODE,用户可以创建 QNODE 来实现计算和存储的分离。一个 dnode 上只能创建一个 QNODE。一个 dnode 的 `supportVnodes` 参数如果不为 0,同时又在其上创建上 QNODE,则在该 dnode 中既有负责存储管理的 vnode 又有负责查询计算的 qnode,如果还在该 dnode 上创建了 mnode,则一个 dnode 上最多三种逻辑节点都可以存在。但通过配置也可以使其彻底分离。将一个 dnode 的`supportVnodes`配置为 0,可以选择在其上创建 mnode 或者 qnode 中的一种,这样可以实现三种逻辑节点在物理上的彻底分离。
|
||||
|
||||
## 查看查询节点
|
||||
|
||||
|
@ -104,7 +104,7 @@ CREATE QNODE ON DNODE dnode_id;
|
|||
SHOW QNODES;
|
||||
```
|
||||
|
||||
列出集群中所有查询节点,包括 ID,及所在 DNODE。
|
||||
列出集群中所有查询节点,包括 ID,及所在 dnode
|
||||
|
||||
## 删除查询节点
|
||||
|
||||
|
@ -112,7 +112,7 @@ SHOW QNODES;
|
|||
DROP QNODE ON DNODE dnode_id;
|
||||
```
|
||||
|
||||
删除 ID 为 dnode_id 的 DNODE 上的 QNODE,但并不会影响该 dnode 的状态。
|
||||
删除 ID 为 dnode_id 的 dnode 上的 qnode,但并不会影响该 dnode 的状态。
|
||||
|
||||
## 查询集群状态
|
||||
|
||||
|
@ -120,7 +120,10 @@ DROP QNODE ON DNODE dnode_id;
|
|||
SHOW CLUSTER ALIVE;
|
||||
```
|
||||
|
||||
查询当前集群的状态是否可用,返回值: 0:不可用 1:完全可用 2:部分可用(集群中部分节点下线,但其它节点仍可以正常使用)
|
||||
查询当前集群的状态是否可用,返回值
|
||||
- 0:不可用
|
||||
- 1:完全可用
|
||||
- 2:部分可用(集群中部分节点下线,但其它节点仍可以正常使用)
|
||||
|
||||
## 修改客户端配置
|
||||
|
||||
|
|
|
@ -4,7 +4,7 @@ title: 元数据
|
|||
description: Information_Schema 数据库中存储了系统中所有的元数据信息
|
||||
---
|
||||
|
||||
TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数据库元数据、数据库系统信息和状态的访问,例如数据库或表的名称,当前执行的 SQL 语句等。该数据库存储有关 TDengine 维护的所有其他数据库的信息。它包含多个只读表。实际上,这些表都是视图,而不是基表,因此没有与它们关联的文件。所以对这些表只能查询,不能进行 INSERT 等写入操作。`INFORMATION_SCHEMA` 数据库旨在以一种更一致的方式来提供对 TDengine 支持的各种 SHOW 语句(如 SHOW TABLES、SHOW DATABASES)所提供的信息的访问。与 SHOW 语句相比,使用 SELECT ... FROM INFORMATION_SCHEMA.tablename 具有以下优点:
|
||||
TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数据库元数据、数据库系统信息和状态的访问,例如数据库或表的名称,当前执行的 SQL 语句等。该数据库存储有关 TDengine 维护的所有其他数据库的信息。它包含多个只读表。实际上,这些表都是视图,而不是基表,因此没有与它们关联的文件。所以对这些表只能查询,不能进行 INSERT 等写入操作。`INFORMATION_SCHEMA` 数据库旨在以一种更一致的方式来提供对 TDengine 支持的各种 SHOW 语句(如 SHOW TABLES、SHOW DATABASES)所提供的信息的访问。与 SHOW 语句相比,使用 `SELECT ... FROM INFORMATION_SCHEMA.tablename` 具有以下优点。
|
||||
|
||||
1. 可以使用 USE 语句将 INFORMATION_SCHEMA 设为默认数据库
|
||||
2. 可以使用 SELECT 语句熟悉的语法,只需要学习一些表名和列名
|
||||
|
@ -15,7 +15,7 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
:::info
|
||||
|
||||
- 由于 SHOW 语句已经被开发者熟悉和广泛使用,所以它们仍然被保留。
|
||||
- 系统表中的一些列可能是关键字,在查询时需要使用转义符'\`',例如查询数据库 test 有几个 VGROUP:
|
||||
- 系统表中的一些列可能是关键字,在查询时需要使用转义符 '\`',例如查询数据库 test 有几个 VGROUP。
|
||||
```sql
|
||||
select `vgroups` from ins_databases where name = 'test';
|
||||
```
|
||||
|
@ -26,11 +26,11 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
|
||||
## INS_DNODES
|
||||
|
||||
提供 dnode 的相关信息。也可以使用 SHOW DNODES 来查询这些信息。 SYSINFO 为 0 的用户不能查看此表。
|
||||
提供 dnode 的相关信息。也可以使用 SHOW DNODES 来查询这些信息。SYSINFO 为 0 的用户不能查看此表。
|
||||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :------------: | ------------ | ----------------------------------------------------------------------------------------------------- |
|
||||
| 1 | vnodes | SMALLINT | dnode 中的实际 vnode 个数。需要注意,`vnodes` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 1 | vnodes | SMALLINT | dnode 中的实际 vnode 个数。需要注意,`vnodes` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。|
|
||||
| 2 | support_vnodes | SMALLINT | 最多支持的 vnode 个数 |
|
||||
| 3 | status | BINARY(10) | 当前状态 |
|
||||
| 4 | note | BINARY(256) | 离线原因等信息 |
|
||||
|
@ -40,7 +40,7 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
|
||||
## INS_MNODES
|
||||
|
||||
提供 mnode 的相关信息。也可以使用 SHOW MNODES 来查询这些信息。 SYSINFO 为 0 的用户不能查看此表。
|
||||
提供 mnode 的相关信息。也可以使用 SHOW MNODES 来查询这些信息。SYSINFO 为 0 的用户不能查看此表。
|
||||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :---------: | ------------ | ------------------ |
|
||||
|
@ -73,7 +73,7 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
|
||||
## INS_CLUSTER
|
||||
|
||||
存储集群相关信息。 SYSINFO 属性为 0 的用户不能查看此表。
|
||||
存储集群相关信息。SYSINFO 属性为 0 的用户不能查看此表。
|
||||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :---------: | ------------ | ---------- |
|
||||
|
@ -111,10 +111,10 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
| 23 | wal_fsync_period | INT | 数据落盘周期。需要注意,`wal_fsync_period` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 24 | wal_retention_period | INT | WAL 的保存时长,单位为秒。需要注意,`wal_retention_period` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 25 | wal_retention_size | INT | WAL 的保存上限。需要注意,`wal_retention_size` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 26 | stt_trigger | SMALLINT | 触发文件合并的落盘文件的个数。需要注意,`stt_trigger` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 27 | table_prefix | SMALLINT | 内部存储引擎根据表名分配存储该表数据的 VNODE 时要忽略的前缀的长度。需要注意,`table_prefix` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 28 | table_suffix | SMALLINT | 内部存储引擎根据表名分配存储该表数据的 VNODE 时要忽略的后缀的长度。需要注意,`table_suffix` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 29 | tsdb_pagesize | INT | 时序数据存储引擎中的页大小。需要注意,`tsdb_pagesize` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 26 | stt_trigger | SMALLINT | 触发文件合并的落盘文件的个数。需要注意,`stt_trigger` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。|
|
||||
| 27 | table_prefix | SMALLINT | 内部存储引擎根据表名分配存储该表数据的 VNODE 时要忽略的前缀的长度。需要注意,`table_prefix` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。|
|
||||
| 28 | table_suffix | SMALLINT | 内部存储引擎根据表名分配存储该表数据的 VNODE 时要忽略的后缀的长度。需要注意,`table_suffix` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。|
|
||||
| 29 | tsdb_pagesize | INT | 时序数据存储引擎中的页大小。需要注意,`tsdb_pagesize` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。|
|
||||
|
||||
## INS_FUNCTIONS
|
||||
|
||||
|
@ -124,7 +124,7 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
| --- | :-----------: | ------------- | --------------------------------------------------------------------------------------------- |
|
||||
| 1 | name | VARCHAR(64) | 函数名 |
|
||||
| 2 | comment | VARCHAR(255) | 补充说明。需要注意,`comment` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 3 | aggregate | INT | 是否为聚合函数。需要注意,`aggregate` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 3 | aggregate | INT | 是否为聚合函数。需要注意,`aggregate` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。|
|
||||
| 4 | output_type | VARCHAR(31) | 输出类型 |
|
||||
| 5 | create_time | TIMESTAMP | 创建时间 |
|
||||
| 6 | code_len | INT | 代码长度 |
|
||||
|
@ -145,7 +145,7 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
| 3 | index_name | VARCHAR(192) | 索引名 |
|
||||
| 4 | column_name | VARCHAR(64) | 建索引的列的列名 |
|
||||
| 5 | index_type | VARCHAR(10) | 目前有 SMA 和 tag |
|
||||
| 6 | index_extensions | VARCHAR(256) | 索引的额外信息。对 SMA/tag 类型的索引,是函数名的列表。 |
|
||||
| 6 | index_extensions | VARCHAR(256) | 索引的额外信息。对 SMA/tag 类型的索引,是函数名的列表。|
|
||||
|
||||
## INS_STABLES
|
||||
|
||||
|
@ -161,7 +161,7 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
| 6 | last_update | TIMESTAMP | 最后更新时间 |
|
||||
| 7 | table_comment | VARCHAR(1024) | 表注释 |
|
||||
| 8 | watermark | VARCHAR(64) | 窗口的关闭时间。需要注意,`watermark` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 9 | max_delay | VARCHAR(64) | 推送计算结果的最大延迟。需要注意,`max_delay` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 9 | max_delay | VARCHAR(64) | 推送计算结果的最大延迟。需要注意,`max_delay` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。|
|
||||
| 10 | rollup | VARCHAR(128) | rollup 聚合函数。需要注意,`rollup` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
|
||||
## INS_TABLES
|
||||
|
@ -177,7 +177,7 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
| 5 | stable_name | VARCHAR(192) | 所属的超级表表名 |
|
||||
| 6 | uid | BIGINT | 表 id |
|
||||
| 7 | vgroup_id | INT | vgroup id |
|
||||
| 8 | ttl | INT | 表的生命周期。需要注意,`ttl` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 8 | ttl | INT | 表的生命周期。需要注意 `ttl` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。|
|
||||
| 9 | table_comment | VARCHAR(1024) | 表注释 |
|
||||
| 10 | type | VARCHAR(21) | 表类型 |
|
||||
|
||||
|
@ -215,7 +215,7 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
| 1 | name | VARCHAR(24) | 用户名 |
|
||||
| 2 | super | TINYINT | 用户是否为超级用户,1:是,0:否 |
|
||||
| 3 | enable | TINYINT | 用户是否启用,1:是,0:否 |
|
||||
| 4 | sysinfo | TINYINT | 用户是否可查看系统信息,1:是, 0:否 |
|
||||
| 4 | sysinfo | TINYINT | 用户是否可查看系统信息,1:是,0:否 |
|
||||
| 5 | create_time | TIMESTAMP | 创建时间 |
|
||||
| 6 | allowed_host | VARCHAR(49152)| IP 白名单 |
|
||||
|
||||
|
@ -232,7 +232,7 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
| 5 | users | VARCHAR(10) | 授权创建的用户数量。需要注意,`users` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 6 | accounts | VARCHAR(10) | 授权创建的帐户数量。需要注意,`accounts` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 7 | storage | VARCHAR(21) | 授权使用的存储空间大小。需要注意,`storage` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 8 | connections | VARCHAR(21) | 授权使用的客户端连接数量。需要注意,`connections` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 8 | connections | VARCHAR(21) | 授权使用的客户端连接数量。需要注意,`connections` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。|
|
||||
| 9 | databases | VARCHAR(11) | 授权使用的数据库数量。需要注意,`databases` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 10 | speed | VARCHAR(9) | 授权使用的数据点每秒写入数量 |
|
||||
| 11 | querytime | VARCHAR(9) | 授权使用的查询总时长 |
|
||||
|
@ -248,7 +248,7 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
| --- | :-------: | ------------ | ------------------------------------------------------------------------------------------------ |
|
||||
| 1 | vgroup_id | INT | vgroup id |
|
||||
| 2 | db_name | VARCHAR(32) | 数据库名 |
|
||||
| 3 | tables | INT | 此 vgroup 内有多少表。需要注意,`tables` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 3 | tables | INT | 此 vgroup 内有多少表。需要注意,`tables` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。|
|
||||
| 4 | status | VARCHAR(10) | 此 vgroup 的状态 |
|
||||
| 5 | v1_dnode | INT | 第一个成员所在的 dnode 的 id |
|
||||
| 6 | v1_status | VARCHAR(10) | 第一个成员的状态 |
|
||||
|
@ -258,7 +258,7 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
| 10 | v3_status | VARCHAR(10) | 第三个成员的状态 |
|
||||
| 11 | nfiles | INT | 此 vgroup 中数据/元数据文件的数量 |
|
||||
| 12 | file_size | INT | 此 vgroup 中数据/元数据文件的大小 |
|
||||
| 13 | tsma | TINYINT | 此 vgroup 是否专用于 Time-range-wise SMA,1: 是, 0: 否 |
|
||||
| 13 | tsma | TINYINT | 此 vgroup 是否专用于 Time-range-wise SMA,1: 是,0: 否 |
|
||||
|
||||
## INS_CONFIGS
|
||||
|
||||
|
@ -267,7 +267,7 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :------: | ------------ | --------------------------------------------------------------------------------------- |
|
||||
| 1 | name | VARCHAR(32) | 配置项名称 |
|
||||
| 2 | value | VARCHAR(64) | 该配置项的值。需要注意,`value` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 2 | value | VARCHAR(64) | 该配置项的值。需要注意,`value` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。|
|
||||
|
||||
## INS_DNODE_VARIABLES
|
||||
|
||||
|
@ -277,7 +277,7 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
| --- | :------: | ------------ | --------------------------------------------------------------------------------------- |
|
||||
| 1 | dnode_id | INT | dnode 的 ID |
|
||||
| 2 | name | VARCHAR(32) | 配置项名称 |
|
||||
| 3 | value | VARCHAR(64) | 该配置项的值。需要注意,`value` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 3 | value | VARCHAR(64) | 该配置项的值。需要注意,`value` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。|
|
||||
|
||||
## INS_TOPICS
|
||||
|
||||
|
@ -333,14 +333,14 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
|:----|:-----------|:------------|:--------|
|
||||
| 1 | db_name | VARCHAR(32) | 数据库名称 |
|
||||
| 2 | vgroup_id | INT | vgroup 的 ID |
|
||||
| 3 | wal | BIGINT | wal 文件大小, 单位为 K |
|
||||
| 4 | data1 | BIGINT | 一级存储上数据文件的大小,单位为KB |
|
||||
| 5 | data2 | BIGINT | 二级存储上数据文件的大小,单位为 KB |
|
||||
| 6 | data3 | BIGINT | 三级存储上数据文件的大小, 单位为KB |
|
||||
| 7 | cache_rdb | BIGINT | last/last_row 文件的大小,单位为KB |
|
||||
| 8 | table_meta | BIGINT | meta 文件的大小, 单位为KB |
|
||||
| 9 | s3 | BIGINT | s3 上占用的大小, 单位为KB |
|
||||
| 10 | raw_data | BIGINT | 预估的原始数据的大小, 单位为KB |
|
||||
| 3 | wal | BIGINT | wal 文件大小,单位为 KB |
|
||||
| 4 | data1 | BIGINT | 一级存储上数据文件的大小,单位为 KB |
|
||||
| 5 | data2 | BIGINT | 二级存储上数据文件的大小,单位为 KB |
|
||||
| 6 | data3 | BIGINT | 三级存储上数据文件的大小,单位为 KB |
|
||||
| 7 | cache_rdb | BIGINT | last/last_row 文件的大小,单位为 KB |
|
||||
| 8 | table_meta | BIGINT | meta 文件的大小,单位为 KB |
|
||||
| 9 | s3 | BIGINT | s3 上占用的大小,单位为 KB |
|
||||
| 10 | raw_data | BIGINT | 预估的原始数据的大小,单位为 KB |
|
||||
|
||||
|
||||
## INS_FILESETS
|
||||
|
|
|
@ -14,7 +14,7 @@ TDengine 3.0 版本开始提供一个内置数据库 `performance_schema`,其
|
|||
| --- | :----------: | ------------ | ------------------------------- |
|
||||
| 1 | app_id | UBIGINT | 客户端 ID |
|
||||
| 2 | ip | BINARY(16) | 客户端地址 |
|
||||
| 3 | pid | INT | 客户端进程 号 |
|
||||
| 3 | pid | INT | 客户端进程号 |
|
||||
| 4 | name | BINARY(24) | 客户端名称 |
|
||||
| 5 | start_time | TIMESTAMP | 客户端启动时间 |
|
||||
| 6 | insert_req | UBIGINT | insert 请求次数 |
|
||||
|
@ -69,7 +69,7 @@ TDengine 3.0 版本开始提供一个内置数据库 `performance_schema`,其
|
|||
| 1 | consumer_id | BIGINT | 消费者的唯一 ID |
|
||||
| 2 | consumer_group | BINARY(192) | 消费者组 |
|
||||
| 3 | client_id | BINARY(192) | 用户自定义字符串,通过创建 consumer 时指定 client_id 来展示 |
|
||||
| 4 | status | BINARY(20) | 消费者当前状态。消费者状态包括:ready(正常可用)、 lost(连接已丢失)、 rebalancing(消费者所属 vgroup 正在分配中)、unknown(未知状态)|
|
||||
| 4 | status | BINARY(20) | 消费者当前状态。消费者状态包括:ready(正常可用)、lost(连接已丢失)、rebalancing(消费者所属 vgroup 正在分配中)、unknown(未知状态)|
|
||||
| 5 | topics | BINARY(204) | 被订阅的 topic。若订阅多个 topic,则展示为多行 |
|
||||
| 6 | up_time | TIMESTAMP | 第一次连接 taosd 的时间 |
|
||||
| 7 | subscribe_time | TIMESTAMP | 上一次发起订阅的时间 |
|
||||
|
|
|
@ -20,7 +20,7 @@ SHOW APPS;
|
|||
SHOW CLUSTER;
|
||||
```
|
||||
|
||||
显示当前集群的信息
|
||||
显示当前集群的信息。
|
||||
|
||||
## SHOW CLUSTER ALIVE
|
||||
|
||||
|
@ -28,17 +28,22 @@ SHOW CLUSTER;
|
|||
SHOW CLUSTER ALIVE;
|
||||
```
|
||||
|
||||
查询当前集群的状态是否可用,返回值: 0:不可用 1:完全可用 2:部分可用(集群中部分节点下线,但其它节点仍可以正常使用)
|
||||
查询当前集群的状态是否可用,返回值如下
|
||||
- 0:不可用
|
||||
- 1:完全可用
|
||||
- 2:部分可用(集群中部分节点下线,但其它节点仍可以正常使用)
|
||||
|
||||
## SHOW CLUSTER MACHINES
|
||||
|
||||
```sql
|
||||
SHOW CLUSTER MACHINES; // 从 TDengine 3.2.3.0 版本开始支持
|
||||
SHOW CLUSTER MACHINES;
|
||||
```
|
||||
|
||||
显示集群的机器码等信息。
|
||||
|
||||
注:企业版独有
|
||||
备注
|
||||
- 企业版功能
|
||||
- v3.2.3.0 开始支持
|
||||
|
||||
## SHOW CONNECTIONS
|
||||
|
||||
|
@ -70,7 +75,7 @@ SHOW CREATE DATABASE db_name;
|
|||
SHOW CREATE STABLE [db_name.]stb_name;
|
||||
```
|
||||
|
||||
显示 tb_name 指定的超级表的创建语句
|
||||
显示 tb_name 指定的超级表的创建语句。
|
||||
|
||||
## SHOW CREATE TABLE
|
||||
|
||||
|
@ -114,7 +119,8 @@ SHOW GRANTS FULL; // 从 TDengine 3.2.3.0 版本开始支持
|
|||
|
||||
显示企业版许可授权的信息。
|
||||
|
||||
注:企业版独有
|
||||
备注
|
||||
- 企业版功能
|
||||
|
||||
## SHOW INDEXES
|
||||
|
||||
|
@ -147,7 +153,7 @@ SHOW MNODES;
|
|||
SHOW QNODES;
|
||||
```
|
||||
|
||||
显示当前系统中 QNODE (查询节点)的信息。
|
||||
显示当前系统中 QNODE(查询节点)的信息。
|
||||
|
||||
## SHOW QUERIES
|
||||
|
||||
|
@ -155,7 +161,7 @@ SHOW QNODES;
|
|||
SHOW QUERIES;
|
||||
```
|
||||
|
||||
显示当前系统中正在进行的写入(更新)/查询/删除。(由于内部 API 命名原因,所以统称 QUERIES)
|
||||
显示当前系统中正在进行的写入(更新)、查询、删除。(由于内部 API 命名原因,所以统称 QUERIES)
|
||||
|
||||
## SHOW SCORES
|
||||
|
||||
|
@ -165,7 +171,8 @@ SHOW SCORES;
|
|||
|
||||
显示系统被许可授权的容量的信息。
|
||||
|
||||
注:企业版独有。
|
||||
备注
|
||||
- 企业版功能
|
||||
|
||||
## SHOW STABLES
|
||||
|
||||
|
@ -219,11 +226,11 @@ SHOW TABLE DISTRIBUTED table_name;
|
|||
|
||||
_block_dist: Total_Blocks=[5] Total_Size=[93.65 KB] Average_size=[18.73 KB] Compression_Ratio=[23.98 %]
|
||||
|
||||
Total_Blocks: 表 d0 占用的 block 个数为 5 个
|
||||
Total_Blocks:表 d0 占用的 block 个数为 5 个
|
||||
|
||||
Total_Size: 表 d0 所有 block 在文件中占用的大小为 93.65 KB
|
||||
Total_Size: 表 d0 所有 block 在文件中占用的大小为 93.65 KB
|
||||
|
||||
Average_size: 平均每个 block 在文件中占用的空间大小为 18.73 KB
|
||||
Average_size:平均每个 block 在文件中占用的空间大小为 18.73 KB
|
||||
|
||||
Compression_Ratio: 数据压缩率 23.98%
|
||||
|
||||
|
@ -232,7 +239,7 @@ Compression_Ratio: 数据压缩率 23.98%
|
|||
|
||||
_block_dist: Total_Rows=[20000] Inmem_Rows=[0] MinRows=[3616] MaxRows=[4096] Average_Rows=[4000]
|
||||
|
||||
Total_Rows: 统计表 d0 的存储在磁盘上行数 20000 行(该数值仅供参考,不是精确的行数。获得精确的行数需要使用 count 函数)
|
||||
Total_Rows:统计表 d0 的存储在磁盘上行数 20000 行(该数值仅供参考,不是精确的行数。获得精确的行数需要使用 count 函数)
|
||||
|
||||
Inmem_Rows: 存储在写缓存中的数据行数(没有落盘),0 行表示内存缓存中没有数据
|
||||
|
||||
|
@ -247,7 +254,7 @@ Average_Rows: 每个 BLOCK 中的平均行数,此时为 4000 行
|
|||
|
||||
_block_dist: Total_Tables=[1] Total_Files=[2] Total_Vgroups=[1]
|
||||
|
||||
Total_Tables: 子表的个数,这里为 1
|
||||
Total_Tables: 子表的个数,这里为 1
|
||||
|
||||
Total_Files: 表数据被分别保存的数据文件数量,这里是 2 个文件
|
||||
|
||||
|
@ -281,7 +288,7 @@ Query OK, 24 row(s) in set (0.002444s)
|
|||
</code></pre>
|
||||
</details>
|
||||
|
||||
上面是块中包含数据行数的块儿分布情况图,这里的 0100 0299 0498 … 表示的是每个块中包含的数据行数,上面的意思就是这个表的 5 个块,分布在 3483 ~3681 行的块有 1 个,占整个块的 20%,分布在 3881 ~ 4096(最大行数)的块数为 4 个,占整个块的 80%, 其它区域内分布块数为 0。
|
||||
上面是块中包含数据行数的块儿分布情况图,这里的 `0100 0299 0498 …` 表示的是每个块中包含的数据行数,上面的意思就是这个表的 5 个块,分布在 `3483 ~ 3681` 行的块有 1 个,占整个块的 20%,分布在 `3881 ~ 4096`(最大行数)的块数为 4 个,占整个块的 80%, 其它区域内分布块数为 0。
|
||||
|
||||
需要注意,这里只会显示 data 文件中数据块的信息,stt 文件中的数据的信息不会被显示。
|
||||
|
||||
|
@ -309,7 +316,7 @@ SHOW TRANSACTIONS;
|
|||
SHOW TRANSACTION [tranaction_id];
|
||||
```
|
||||
|
||||
显示当前系统中正在执行的所有或者某一个事务的信息(该事务仅针对除普通表以外的元数据级别)
|
||||
显示当前系统中正在执行的所有或者某一个事务的信息(该事务仅针对除普通表以外的元数据级别)。
|
||||
|
||||
## SHOW USERS
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@ CREATE USER user_name PASS 'password' [SYSINFO {1|0}] [CREATEDB {1|0}];
|
|||
|
||||
`SYSINFO` 表示该用户是否能够查看系统信息。`1` 表示可以查看,`0` 表示无权查看。系统信息包括服务配置、dnode、vnode、存储等信息。缺省值为 `1`。
|
||||
|
||||
`CREATEDB` 表示该用户是否能够创建数据库。`1` 表示可以创建,`0` 表示无权创建。缺省值为 `0`。// 从 TDengine 企业版 3.3.2.0 开始支持
|
||||
`CREATEDB` 表示该用户是否能够创建数据库。`1` 表示可以创建,`0` 表示无权创建。缺省值为 `0`。从企业版 v3.3.2.0 开始支持。
|
||||
|
||||
在下面的示例中,我们创建一个密码为 `abc123!@#` 且可以查看系统信息的用户。
|
||||
|
||||
|
@ -76,12 +76,12 @@ alter_user_clause: {
|
|||
}
|
||||
```
|
||||
|
||||
- PASS: 修改密码,后跟新密码
|
||||
- ENABLE: 启用或禁用该用户,`1` 表示启用,`0` 表示禁用
|
||||
- SYSINFO: 允许或禁止查看系统信息,`1` 表示允许,`0` 表示禁止
|
||||
- CREATEDB: 允许或禁止创建数据库,`1` 表示允许,`0` 表示禁止。// 从 TDengine 企业版 3.3.2.0 开始支持
|
||||
- PASS:修改密码,后跟新密码
|
||||
- ENABLE:启用或禁用该用户,`1` 表示启用,`0` 表示禁用
|
||||
- SYSINFO:允许或禁止查看系统信息,`1` 表示允许,`0` 表示禁止
|
||||
- CREATEDB:允许或禁止创建数据库,`1` 表示允许,`0` 表示禁止。从企业版 v3.3.2.0 开始支持。
|
||||
|
||||
下面的示例禁用了名为 `test` 的用户:
|
||||
下面的示例禁用了名为 `test` 的用户。
|
||||
|
||||
```sql
|
||||
taos> alter user test enable 0;
|
||||
|
|
|
@ -3,7 +3,7 @@ toc_max_heading_level: 4
|
|||
title: 权限管理
|
||||
---
|
||||
|
||||
TDengine 中的权限管理分为[用户管理](../user)、数据库授权管理以及消息订阅授权管理,本节重点说明数据库授权和订阅授权。
|
||||
TDengine 中的权限管理分为 [用户管理](../user)、数据库授权管理以及消息订阅授权管理,本节重点说明数据库授权和订阅授权。
|
||||
授权管理仅在 TDengine 企业版中可用,请联系 TDengine 销售团队。授权语法在社区版可用,但不起作用。
|
||||
|
||||
## 数据库访问授权
|
||||
|
@ -33,8 +33,7 @@ priv_level : {
|
|||
对数据库的访问权限包含读和写两种权限,它们可以被分别授予,也可以被同时授予。
|
||||
|
||||
说明
|
||||
|
||||
- priv_level 格式中 "." 之前为数据库名称, "." 之后为表名称,意思为表级别的授权控制。如果 "." 之后为 "\*" ,意为 "." 前所指定的数据库中的所有表
|
||||
- priv_level 格式中 "." 之前为数据库名称,"." 之后为表名称,意思为表级别的授权控制。如果 "." 之后为 "\*",意为 "." 前所指定的数据库中的所有表
|
||||
- "dbname.\*" 意思是名为 "dbname" 的数据库中的所有表
|
||||
- "\*.\*" 意思是所有数据库名中的所有表
|
||||
|
||||
|
@ -43,9 +42,9 @@ priv_level : {
|
|||
对 root 用户和普通用户的权限的说明如下表
|
||||
|
||||
| 用户 | 描述 | 权限说明 |
|
||||
| -------- | ---------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| 超级用户 | 只有 root 是超级用户 | DB 外部 所有操作权限,例如user、dnode、udf、qnode等的CRUD DB 权限,包括 创建 删除 更新,例如修改 Option,移动 Vgruop等 读 写 Enable/Disable 用户 |
|
||||
| 普通用户 | 除 root 以外的其它用户均为普通用户 | 在可读的 DB 中,普通用户可以进行读操作 select describe show subscribe 在可写 DB 的内部,用户可以进行写操作: 创建、删除、修改 超级表 创建、删除、修改 子表 创建、删除、修改 topic 写入数据 被限制系统信息时,不可进行如下操作 show dnode、mnode、vgroups、qnode、snode 修改用户包括自身密码 show db时只能看到自己的db,并且不能看到vgroups、副本、cache等信息 无论是否被限制系统信息,都可以 管理 udf 可以创建 DB 自己创建的 DB 具备所有权限 非自己创建的 DB ,参照读、写列表中的权限 |
|
||||
| -------- | --------------------------------- | -- |
|
||||
| 超级用户 | 只有 root 是超级用户 |<br/>DB 外部:所有操作权限,例如 user、dnode、udf、qnode 等的 CRUD <br/>DB 权限:包括创建、删除、修改 Option、移动 Vgruop、读、写、Enable/Disable 用户 |
|
||||
| 普通用户 | 除 root 以外的其它用户均为普通用户 | <br/>在可读的 DB 中:普通用户可以进行读操作 select、describe、show、subscribe <br/>在可写 DB 的内部,用户可以进行写操作,创建、删除、修改超级表,创建、删除、修改子表,创建、删除、修改 topic。写入数据 <br/>被限制系统信息时,不可进行如下操作 show dnode、mnode、vgroups、qnode、snode、修改用户包括自身密码、`show db` 时只能看到自己的 db,并且不能看到 vgroups、副本、cache等信息 <br/>无论是否被限制系统信息,都可以管理 udf,可以创建 DB、自己创建的 DB 具备所有权限、非自己创建的 DB,参照读、写列表中的权限 |
|
||||
|
||||
## 消息订阅授权
|
||||
|
||||
|
@ -61,7 +60,7 @@ REVOKE SUBSCRIBE ON topic_name FROM user_name
|
|||
|
||||
## 基于标签的授权(表级授权)
|
||||
|
||||
从 TDengine 3.0.5.0 开始,我们支持按标签授权某个超级表中部分特定的子表。具体的 SQL 语法如下。
|
||||
从 v3.0.5.0 开始,我们支持按标签授权某个超级表中部分特定的子表。具体的 SQL 语法如下。
|
||||
|
||||
```sql
|
||||
GRANT privileges ON priv_level [WITH tag_condition] TO user_name
|
||||
|
@ -111,7 +110,7 @@ priv_level : {
|
|||
下表列出了在不同的数据库授权和表级授权的组合下产生的实际权限。
|
||||
|
||||
| | **表无授权** | **表读授权** | **表读授权有标签条件** | **表写授权** | **表写授权有标签条件** |
|
||||
| ---------------- | ---------------- | ---------------------------------------- | ------------------------------------------------------------ | ---------------------------------------- | ---------------------------------------------------------- |
|
||||
| -------------- | ------------- | --------------------------------- | ------------------------------------------------- | ---------------------------------- | -------------------- |
|
||||
| **数据库无授权** | 无授权 | 对此表有读权限,对数据库下的其他表无权限 | 对此表符合标签权限的子表有读权限,对数据库下的其他表无权限 | 对此表有写权限,对数据库下的其他表无权限 | 对此表符合标签权限的子表有写权限,对数据库下的其他表无权限 |
|
||||
| **数据库读授权** | 对所有表有读权限 | 对所有表有读权限 | 对此表符合标签权限的子表有读权限,对数据库下的其他表有读权限 | 对此表有写权限,对所有表有读权限 | 对此表符合标签权限的子表有写权限,所有表有读权限 |
|
||||
| **数据库写授权** | 对所有表有写权限 | 对此表有读权限,对所有表有写权限 | 对此表符合标签权限的子表有读权限,对所有表有写权限 | 对所有表有写权限 | 对此表符合标签权限的子表有写权限,数据库下的其他表有写权限 |
|
||||
|
|
|
@ -4,10 +4,10 @@ title: 自定义函数
|
|||
description: 使用 UDF 的详细指南
|
||||
---
|
||||
|
||||
除了 TDengine 的内置函数以外,用户还可以编写自己的函数逻辑并加入TDengine系统中。
|
||||
除了 TDengine 的内置函数以外,用户还可以编写自己的函数逻辑并加入 TDengine 系统中。
|
||||
## 创建 UDF
|
||||
|
||||
用户可以通过 SQL 指令在系统中加载客户端所在主机上的 UDF 函数库(不能通过 RESTful 接口或 HTTP 管理界面来进行这一过程)。一旦创建成功,则当前 TDengine 集群的所有用户都可以在 SQL 指令中使用这些函数。UDF 存储在系统的 MNode 节点上,因此即使重启 TDengine 系统,已经创建的 UDF 也仍然可用。
|
||||
用户可以通过 SQL 指令在系统中加载客户端所在主机上的 UDF 函数库(不能通过 RESTful 接口或 HTTP 管理界面来进行这一过程)。一旦创建成功,则当前 TDengine 集群的所有用户都可以在 SQL 指令中使用这些函数。UDF 存储在系统的 mnode 节点上,因此即使重启 TDengine 系统,已经创建的 UDF 也仍然可用。
|
||||
|
||||
在创建 UDF 时,需要区分标量函数和聚合函数。如果创建时声明了错误的函数类别,则可能导致通过 SQL 指令调用函数时出错。此外,用户需要保证输入数据类型与 UDF 程序匹配,UDF 输出数据类型与 OUTPUTTYPE 匹配。
|
||||
|
||||
|
@ -15,11 +15,11 @@ description: 使用 UDF 的详细指南
|
|||
```sql
|
||||
CREATE [OR REPLACE] FUNCTION function_name AS library_path OUTPUTTYPE output_type [LANGUAGE 'C|Python'];
|
||||
```
|
||||
- OR REPLACE: 如果函数已经存在,会修改已有的函数属性。
|
||||
- function_name:标量函数未来在 SQL 中被调用时的函数名;
|
||||
- LANGUAGE 'C|Python':函数编程语言,目前支持C语言和Python语言。 如果这个从句忽略,编程语言是C语言
|
||||
- library_path:如果编程语言是C,路径是包含 UDF 函数实现的动态链接库的库文件绝对路径(指的是库文件在当前客户端所在主机上的保存路径,通常是指向一个 .so 文件)。如果编程语言是Python,路径是包含 UDF 函数实现的Python文件路径。这个路径需要用英文单引号或英文双引号括起来;
|
||||
- output_type:此函数计算结果的数据类型名称;
|
||||
- OR REPLACE:如果函数已经存在,会修改已有的函数属性。
|
||||
- function_name:标量函数未来在 SQL 中被调用时的函数名。
|
||||
- LANGUAGE 'C|Python':函数编程语言,目前支持 C 语言和 Python 语言。如果这个从句忽略,编程语言是 C 语言。
|
||||
- library_path:如果编程语言是C,路径是包含 UDF 函数实现的动态链接库的库文件绝对路径(指的是库文件在当前客户端所在主机上的保存路径,通常是指向一个 .so 文件)。如果编程语言是 Python,路径是包含 UDF 函数实现的 Python 文件路径。这个路径需要用英文单引号或英文双引号括起来;
|
||||
- output_type:此函数计算结果的数据类型名称。
|
||||
|
||||
例如,如下语句可以把 libbitand.so 创建为系统中可用的 UDF:
|
||||
|
||||
|
@ -36,19 +36,19 @@ CREATE [OR REPLACE] FUNCTION function_name AS library_path OUTPUTTYPE output_typ
|
|||
```sql
|
||||
CREATE [OR REPLACE] AGGREGATE FUNCTION function_name AS library_path OUTPUTTYPE output_type [ BUFSIZE buffer_size ] [LANGUAGE 'C|Python'];
|
||||
```
|
||||
- OR REPLACE: 如果函数已经存在,会修改已有的函数属性。
|
||||
- OR REPLACE:如果函数已经存在,会修改已有的函数属性。
|
||||
- function_name:聚合函数未来在 SQL 中被调用时的函数名,必须与函数实现中 udfNormalFunc 的实际名称一致;
|
||||
- LANGUAGE 'C|Python':函数编程语言,目前支持C语言和Python语言(v3.7+)。
|
||||
- library_path:如果编程语言是C,路径是包含 UDF 函数实现的动态链接库的库文件绝对路径(指的是库文件在当前客户端所在主机上的保存路径,通常是指向一个 .so 文件)。如果编程语言是Python,路径是包含 UDF 函数实现的Python文件路径。这个路径需要用英文单引号或英文双引号括起来;;
|
||||
- LANGUAGE 'C|Python':函数编程语言,目前支持 C 语言和 Python 语言(v3.7+)。
|
||||
- library_path:如果编程语言是C,路径是包含 UDF 函数实现的动态链接库的库文件绝对路径(指的是库文件在当前客户端所在主机上的保存路径,通常是指向一个 `.so` 文件)。如果编程语言是 Python,路径是包含 UDF 函数实现的 Python 文件路径。这个路径需要用英文单引号或英文双引号括起来;
|
||||
- output_type:此函数计算结果的数据类型名称;
|
||||
- buffer_size:中间计算结果的缓冲区大小,单位是字节。如果不使用可以不设置。
|
||||
|
||||
例如,如下语句可以把 libl2norm.so 创建为系统中可用的 UDF:
|
||||
例如,如下语句可以把 libl2norm.so 创建为系统中可用的 UDF。
|
||||
|
||||
```sql
|
||||
CREATE AGGREGATE FUNCTION l2norm AS "/home/taos/udf_example/libl2norm.so" OUTPUTTYPE DOUBLE bufsize 8;
|
||||
```
|
||||
例如,使用以下语句可以修改已经定义的 l2norm 函数的缓冲区大小为64。
|
||||
例如,使用以下语句可以修改已经定义的 l2norm 函数的缓冲区大小为 64。
|
||||
```sql
|
||||
CREATE AGGREGATE FUNCTION l2norm AS "/home/taos/udf_example/libl2norm.so" OUTPUTTYPE DOUBLE bufsize 64;
|
||||
```
|
||||
|
@ -57,25 +57,26 @@ CREATE [OR REPLACE] AGGREGATE FUNCTION function_name AS library_path OUTPUTTYPE
|
|||
|
||||
## 管理 UDF
|
||||
|
||||
- 删除指定名称的用户定义函数:
|
||||
- 删除指定名称的用户定义函数。
|
||||
```
|
||||
DROP FUNCTION function_name;
|
||||
```
|
||||
|
||||
- function_name:此参数的含义与 CREATE 指令中的 function_name 参数一致,也即要删除的函数的名字,例如bit_and, l2norm
|
||||
- function_name:此参数的含义与 CREATE 指令中的 function_name 参数一致,也即要删除的函数的名字,例如 bit_and、l2norm
|
||||
```sql
|
||||
DROP FUNCTION bit_and;
|
||||
```
|
||||
- 显示系统中当前可用的所有 UDF:
|
||||
|
||||
- 显示系统中当前可用的所有 UDF。
|
||||
```sql
|
||||
SHOW FUNCTIONS;
|
||||
```
|
||||
|
||||
## 调用 UDF
|
||||
|
||||
在 SQL 指令中,可以直接以在系统中创建 UDF 时赋予的函数名来调用用户定义函数。例如:
|
||||
在 SQL 指令中,可以直接以在系统中创建 UDF 时赋予的函数名来调用用户定义函数。
|
||||
```sql
|
||||
SELECT bit_and(c1,c2) FROM table;
|
||||
```
|
||||
|
||||
表示对表 table 上名为 c1, c2 的数据列调用名为 bit_and 的用户定义函数。SQL 指令中用户定义函数可以配合 WHERE 等查询特性来使用。
|
||||
表示对表 table 上名为 c1、c2 的数据列调用名为 bit_and 的用户定义函数。SQL 指令中用户定义函数可以配合 WHERE 等查询特性来使用。
|
||||
|
|
|
@ -4,36 +4,38 @@ title: 窗口预聚集
|
|||
description: 窗口预聚集使用说明
|
||||
---
|
||||
|
||||
在大数据量场景下, 经常需要查询某段时间内的汇总结果, 当历史数据变多或者时间范围变大时, 查询时间也会相应增加. 通过预聚集的方式可以将计算结果提前存储下来, 后续查询可以直接读取聚集结果, 而不需要扫描原始数据, 如当前Block内的SMA (Small Materialized Aggregates)信息.
|
||||
Block内的SMA信息粒度较小, 若查询时间范围是日,月甚至年时, Block的数量将会很多, 因此TSMA (Time-Range Small Materialized Aggregates)支持用户指定时间窗口进行预聚集. 通过对固定时间窗口内的数据进行预计算, 并将计算结果存储下来, 查询时通过查询预计算结果以提高查询性能。
|
||||
在大数据量场景下,经常需要查询某段时间内的汇总结果,当历史数据变多或者时间范围变大时,查询时间也会相应增加。通过预聚集的方式可以将计算结果提前存储下来,后续查询可以直接读取聚集结果,而不需要扫描原始数据,如当前 Block 内的 SMA (Small Materialized Aggregates)信息。
|
||||
|
||||
Block 内的 SMA 信息粒度较小,若查询时间范围是日,月甚至年时,Block 的数量将会很多,因此 TSMA (Time-Range Small Materialized Aggregates)支持用户指定时间窗口进行预聚集。通过对固定时间窗口内的数据进行预计算,并将计算结果存储下来,查询时通过查询预计算结果以提高查询性能。
|
||||
|
||||

|
||||
|
||||
## 创建TSMA
|
||||
## 创建 TSMA
|
||||
|
||||
```sql
|
||||
-- 创建基于超级表或普通表的tsma
|
||||
-- 创建基于超级表或普通表的 tsma
|
||||
CREATE TSMA tsma_name ON [dbname.]table_name FUNCTION (func_name(func_param) [, ...] ) INTERVAL(time_duration);
|
||||
-- 创建基于小窗口tsma的大窗口tsma
|
||||
|
||||
-- 创建基于小窗口 tsma 的大窗口 tsma
|
||||
CREATE RECURSIVE TSMA tsma_name ON [db_name.]tsma_name1 INTERVAL(time_duration);
|
||||
|
||||
time_duration:
|
||||
number unit
|
||||
```
|
||||
|
||||
创建 TSMA 时需要指定 TSMA 名字, 表名字, 函数列表以及窗口大小. 当基于一个已经存在的 TSMA 创建新的 TSMA 时, 需要使用 `RECURSIVE` 关键字但不能指定 `FUNCTION()`, 新创建的 TSMA 已有 TSMA 拥有相同的函数列表, 且此种情况下所指定的 INTERVAL 必须至少为所基于的 TSMA 窗口长度的整数倍, 并且天不能基于2h或3h建立, 只能基于1h建立, 月也只能基于1d而非2d,3d建立。
|
||||
创建 TSMA 时需要指定 TSMA 名字,表名字,函数列表以及窗口大小。当基于一个已经存在的 TSMA 创建新的 TSMA 时,需要使用 `RECURSIVE` 关键字但不能指定 `FUNCTION()`,新创建的 TSMA 已有 TSMA 拥有相同的函数列表,且此种情况下所指定的 INTERVAL 必须至少为所基于的 TSMA 窗口长度的整数倍,并且天不能基于 2h 或 3h 建立,只能基于 1h 建立,月也只能基于 1d 而非 2d、3d 建立。
|
||||
|
||||
其中 TSMA 命名规则与表名字类似, 长度最大限制为表名长度限制减去输出表后缀长度, 表名长度限制为193, 输出表后缀为`_tsma_res_stb_`, TSMA 名字最大长度为178.
|
||||
其中 TSMA 命名规则与表名字类似,长度最大限制为表名长度限制减去输出表后缀长度,表名长度限制为 193,输出表后缀为`_tsma_res_stb_`,TSMA 名字最大长度为 178。
|
||||
|
||||
TSMA只能基于超级表和普通表创建, 不能基于子表创建.
|
||||
TSMA 只能基于超级表和普通表创建,不能基于子表创建。
|
||||
|
||||
函数列表中只能指定支持的聚集函数(见下文), 并且函数参数必须为1个, 即使当前函数支持多个参数, 函数参数内必须为普通列名, 不能为标签列. 函数列表中完全相同的函数和列会被去重, 如同时创建两个avg(c1), 则只会计算一个输出. TSMA 计算时将会把所有`函数中间结果`都输出到另一张超级表中, 输出超级表还包含了原始表的所有tag列. 函数列表中函数个数最多支持创建表最大列个数(包括tag列)减去 TSMA 计算附加的四列, 分别为`_wstart`, `_wend`, `_wduration`, 以及一个新增tag列 `tbname`, 再减去原始表的tag列数. 若列个数超出限制, 会报`Too many columns`错误.
|
||||
函数列表中只能指定支持的聚集函数(见下文),并且函数参数必须为 1 个,即使当前函数支持多个参数,函数参数内必须为普通列名,不能为标签列。函数列表中完全相同的函数和列会被去重,如同时创建两个 avg(c1),则只会计算一个输出。TSMA 计算时将会把所有 `函数中间结果` 都输出到另一张超级表中,输出超级表还包含了原始表的所有 tag 列。函数列表中函数个数最多支持创建表最大列个数(包括 tag 列)减去 TSMA 计算附加的四列,分别为 `_wstart`、`_wend`、`_wduration`,以及一个新增 tag 列 `tbname`,再减去原始表的 tag 列数。若列个数超出限制,会报 `Too many columns` 错误。
|
||||
|
||||
由于TSMA输出为一张超级表, 因此输出表的行长度受最大行长度限制, 不同函数的`中间结果`大小各异, 一般都大于原始数据大小, 若输出表的行长度大于最大行长度限制, 将会报`Row length exceeds max length`错误. 此时需要减少函数个数或者将常用的函数进行分组拆分到多个TSMA中.
|
||||
由于 TSMA 输出为一张超级表,因此输出表的行长度受最大行长度限制,不同函数的 `中间结果` 大小各异,一般都大于原始数据大小,若输出表的行长度大于最大行长度限制,将会报 `Row length exceeds max length` 错误。此时需要减少函数个数或者将常用的函数进行分组拆分到多个 TSMA 中。
|
||||
|
||||
窗口大小的限制为[1m ~ 1y/12n]. INTERVAL 的单位与查询中INTERVAL子句相同, 如 a (毫秒), b (纳秒), h (小时), m (分钟), s (秒), u (微秒), d (天), w(周), n(月), y(年).
|
||||
窗口大小的限制为 [1m ~ 1y/12n]。INTERVAL 的单位与查询中 INTERVAL 子句相同,如 a(毫秒)、b(纳秒)、h(小时)、m(分钟)、s(秒)、u(微秒)、d(天)、w(周)、n(月)、y(年)。
|
||||
|
||||
TSMA为库内对象, 但名字全局唯一. 集群内一共可创建TSMA个数受参数`maxTsmaNum`限制, 参数默认值为3, 范围: [0-3]. 注意, 由于TSMA后台计算使用流计算, 因此每创建一条TSMA, 将会创建一条流, 因此能够创建的TSMA条数也受当前已经存在的流条数和最大可创建流条数限制.
|
||||
TSMA 为库内对象,但名字全局唯一。集群内一共可创建 TSMA 个数受参数 `maxTsmaNum` 限制,参数默认值为 3,范围:[0-3]。注意,由于 TSMA 后台计算使用流计算,因此每创建一条 TSMA,将会创建一条流,因此能够创建的 TSMA 条数也受当前已经存在的流条数和最大可创建流条数限制。
|
||||
|
||||
## 支持的函数列表
|
||||
| 函数| 备注 |
|
||||
|
@ -44,65 +46,64 @@ TSMA为库内对象, 但名字全局唯一. 集群内一共可创建TSMA个数
|
|||
|first||
|
||||
|last||
|
||||
|avg||
|
||||
|count| 若想使用count(*), 则应创建count(ts)函数|
|
||||
|count| 若想使用 count(*),则应创建 count(ts) 函数|
|
||||
|spread||
|
||||
|stddev||
|
||||
|||
|
||||
|
||||
## 删除TSMA
|
||||
## 删除 TSMA
|
||||
```sql
|
||||
DROP TSMA [db_name.]tsma_name;
|
||||
```
|
||||
若存在其他TSMA基于当前被删除TSMA创建, 则删除操作报`Invalid drop base tsma, drop recursive tsma first`错误. 因此需先删除 所有Recursive TSMA.
|
||||
若存在其他 TSMA 基于当前被删除 TSMA 创建,则删除操作报 `Invalid drop base tsma, drop recursive tsma first` 错误。因此需先删除 所有 Recursive TSMA。
|
||||
|
||||
## TSMA的计算
|
||||
TSMA的计算结果为与原始表相同库下的一张超级表, 此表用户不可见. 不可删除, 在`DROP TSMA`时自动删除. TSMA的计算是通过流计算完成的, 此过程为后台异步过程, TSMA的计算结果不保证实时性, 但可以保证最终正确性.
|
||||
## TSMA 的计算
|
||||
TSMA 的计算结果为与原始表相同库下的一张超级表,此表用户不可见。不可删除,在 `DROP TSMA` 时自动删除。TSMA 的计算是通过流计算完成的,此过程为后台异步过程,TSMA 的计算结果不保证实时性,但可以保证最终正确性。
|
||||
|
||||
TSMA计算时若原始子表内没有数据, 则可能不会创建对应的输出子表, 因此在count查询中, 即使配置了`countAlwaysReturnValue`, 也不会返回该表的结果.
|
||||
TSMA 计算时若原始子表内没有数据,则可能不会创建对应的输出子表,因此在 count 查询中,即使配置了 `countAlwaysReturnValue`,也不会返回该表的结果。
|
||||
|
||||
当存在大量历史数据时, 创建TSMA之后, 流计算将会首先计算历史数据, 此期间新创建的TSMA不会被使用. 数据更新删除或者过期数据到来时自动重新计算影响部分数据。 在重新计算期间 TSMA 查询结果不保证实时性。若希望查询实时数据, 可以通过在 SQL 中添加 hint `/*+ skip_tsma() */` 或者关闭参数`querySmaOptimize`从原始数据查询。
|
||||
当存在大量历史数据时,创建 TSMA 之后,流计算将会首先计算历史数据,此期间新创建的 TSMA 不会被使用。数据更新删除或者过期数据到来时自动重新计算影响部分数据。在重新计算期间 TSMA 查询结果不保证实时性。若希望查询实时数据,可以通过在 SQL 中添加 hint `/*+ skip_tsma() */` 或者关闭参数 `querySmaOptimize` 从原始数据查询。
|
||||
|
||||
## TSMA的使用与限制
|
||||
## TSMA 的使用与限制
|
||||
|
||||
客户端配置参数: `querySmaOptimize`, 用于控制查询时是否使用TSMA, `True`为使用, `False`为不使用即从原始数据查询.
|
||||
- 客户端配置参数:`querySmaOptimize`,用于控制查询时是否使用TSMA,`True`为使用,`False` 为不使用即从原始数据查询。
|
||||
- 客户端配置参数:`maxTsmaCalcDelay`,单位为秒,用于控制用户可以接受的 TSMA 计算延迟,若 TSMA 的计算进度与最新时间差距在此范围内,则该 TSMA 将会被使用,若超出该范围,则不使用,默认值:600(10 分钟),最小值:600(10 分钟),最大值:86400(1 天)。
|
||||
- 客户端配置参数:`tsmaDataDeleteMark`,单位毫秒,与流计算参数 `deleteMark` 一致,用于控制流计算中间结果的保存时间,默认值为 1d,最小值为 1h。因此那些距最后一条数据时间大于配置参数的历史数据将不保存流计算中间结果,因此若修改这些时间窗口内的数据,TSMA 的计算结果中将不包含更新的结果。即与查询原始数据结果将不一致。
|
||||
|
||||
客户端配置参数:`maxTsmaCalcDelay`,单位 s,用于控制用户可以接受的 TSMA 计算延迟,若 TSMA 的计算进度与最新时间差距在此范围内, 则该 TSMA 将会被使用, 若超出该范围, 则不使用, 默认值: 600(10 分钟), 最小值: 600(10 分钟), 最大值: 86400(1 天).
|
||||
### 查询时使用 TSMA
|
||||
|
||||
客户端配置参数: `tsmaDataDeleteMark`, 单位毫秒, 与流计算参数`deleteMark`一致, 用于控制流计算中间结果的保存时间, 默认值为: 1d, 最小值为1h. 因此那些距最后一条数据时间大于配置参数的历史数据将不保存流计算中间结果, 因此若修改这些时间窗口内的数据, TSMA的计算结果中将不包含更新的结果. 即与查询原始数据结果将不一致.
|
||||
已在 TSMA 中定义的 agg 函数在大部分查询场景下都可直接使用,若存在多个可用的 TSMA,优先使用大窗口的 TSMA,未闭合窗口通过查询小窗口 TSMA 或者原始数据计算。同时也有某些场景不能使用 TSMA(见下文)。不可用时整个查询将使用原始数据进行计算。
|
||||
|
||||
### 查询时使用TSMA
|
||||
未指定窗口大小的查询语句默认优先使用包含所有查询聚合函数的最大窗口 TSMA 进行数据的计算。如 `SELECT COUNT(*) FROM stable GROUP BY tbname` 将会使用包含 count(ts) 且窗口最大的 TSMA。因此若使用聚合查询频率高时,应当尽可能创建大窗口的 TSMA。
|
||||
|
||||
已在 TSMA 中定义的 agg 函数在大部分查询场景下都可直接使用, 若存在多个可用的 TSMA, 优先使用大窗口的 TSMA, 未闭合窗口通过查询小窗口TSMA或者原始数据计算。 同时也有某些场景不能使用 TSMA(见下文)。 不可用时整个查询将使用原始数据进行计算。
|
||||
指定窗口大小时即 `INTERVAL` 语句,使用最大的可整除窗口 TSMA。窗口查询中,`INTERVAL` 的窗口大小、`OFFSET` 以及 `SLIDING` 都影响能使用的 TSMA 窗口大小。因此若使用窗口查询较多时,需要考虑经常查询的窗口大小,以及 offset、sliding 大小来创建 TSMA。
|
||||
|
||||
未指定窗口大小的查询语句默认优先使用包含所有查询聚合函数的最大窗口 TSMA 进行数据的计算。 如`SELECT COUNT(*) FROM stable GROUP BY tbname`将会使用包含count(ts)且窗口最大的TSMA。因此若使用聚合查询频率高时, 应当尽可能创建大窗口的TSMA.
|
||||
|
||||
指定窗口大小时即 `INTERVAL` 语句,使用最大的可整除窗口 TSMA。 窗口查询中, `INTERVAL` 的窗口大小, `OFFSET` 以及 `SLIDING` 都影响能使用的 TSMA 窗口大小, 可整 除窗口 TSMA 即 TSMA 窗口大小可被查询语句的 `INTERVAL, OFFSET, SLIDING` 整除的窗口。因此若使用窗口查询较多时, 需要考虑经常查询的窗口大小, 以及 offset, sliding大小来创建TSMA.
|
||||
|
||||
例 1. 如 创建 TSMA 窗口大小 `5m` 一条, `10m` 一条, 查询时 `INTERVAL(30m)`, 那么优先使用 `10m` 的 TSMA, 若查询为 `INTERVAL(30m, 10m) SLIDING(5m)`, 那么仅可使用 `5m` 的 TSMA 查询。
|
||||
例如 创建 TSMA 窗口大小 `5m` 一条,`10m` 一条,查询时 `INTERVAL(30m)`,那么优先使用 `10m` 的 TSMA,若查询为 `INTERVAL(30m, 10m) SLIDING(5m)`,那么仅可使用 `5m` 的 TSMA 查询。
|
||||
|
||||
|
||||
### 查询限制
|
||||
|
||||
在开启了参数`querySmaOptimize`并且无`skip_tsma()` hint时, 以下查询场景无法使用TSMA:
|
||||
在开启了参数 `querySmaOptimize` 并且无 `skip_tsma()` hint 时,以下查询场景无法使用 TSMA。
|
||||
|
||||
- 某个TSMA 中定义的 agg 函数不能覆盖当前查询的函数列表时
|
||||
- 某个 TSMA 中定义的 agg 函数不能覆盖当前查询的函数列表时
|
||||
- 非 `INTERVAL` 的其他窗口,或者 `INTERVAL` 查询窗口大小(包括 `INTERVAL,SLIDING,OFFSET`)不是定义窗口的整数倍,如定义窗口为 2m,查询使用 5 分钟窗口,但若存在 1m 的窗口,则可以使用。
|
||||
- 查询 `WHERE` 条件中包含任意普通列(非主键时间列)的过滤。
|
||||
- `PARTITION` 或者 `GROUY BY` 包含任意普通列或其表达式时
|
||||
- 可以使用其他更快的优化逻辑时, 如last cache优化, 若符合last优化的条件, 则先走last 优化, 无法走last时, 再判断是否可以走tsma优化
|
||||
- 可以使用其他更快的优化逻辑时,如 last cache 优化,若符合 last 优化的条件,则先走 last 优化,无法走 last 时,再判断是否可以走 tsma 优化
|
||||
- 当前 TSMA 计算进度延迟大于配置参数 `maxTsmaCalcDelay`时
|
||||
|
||||
下面是一些例子:
|
||||
下面是一些例子:
|
||||
|
||||
```sql
|
||||
SELECT agg_func_list [, pesudo_col_list] FROM stable WHERE exprs [GROUP/PARTITION BY [tbname] [, tag_list]] [HAVING ...] [INTERVAL(time_duration, offset) SLIDING(duration)]...;
|
||||
|
||||
-- 创建
|
||||
CREATE TSMA tsma1 ON stable FUNCTION(COUNT(ts), SUM(c1), SUM(c3), MIN(c1), MIN(c3), AVG(c1)) INTERVAL(1m);
|
||||
|
||||
-- 查询
|
||||
SELECT COUNT(*), SUM(c1) + SUM(c3) FROM stable; ---- use tsma1
|
||||
SELECT COUNT(*), AVG(c1) FROM stable GROUP/PARTITION BY tbname, tag1, tag2; --- use tsma1
|
||||
SELECT COUNT(*), MIN(c1) FROM stable INTERVAL(1h); ---use tsma1
|
||||
SELECT COUNT(*), MIN(c1) FROM stable INTERVAL(1h); --- use tsma1
|
||||
SELECT COUNT(*), MIN(c1), SPREAD(c1) FROM stable INTERVAL(1h); ----- can't use, spread func not defined, although SPREAD can be calculated by MIN and MAX which are defined.
|
||||
SELECT COUNT(*), MIN(c1) FROM stable INTERVAL(30s); ----- can't use tsma1, time_duration not fit. Normally, query_time_duration should be multple of create_duration.
|
||||
SELECT COUNT(*), MIN(c1) FROM stable where c2 > 0; ---- can't use tsma1, can't do c2 filtering
|
||||
|
@ -113,10 +114,10 @@ SELECT MIN(c3), MIN(c2) FROM stable INTERVAL(1m); ---- can't use tsma1, c2 is no
|
|||
CREATE RECURSIVE TSMA tsma2 on tsma1 INTERVAL(1h);
|
||||
SELECT COUNT(*), SUM(c1) FROM stable; ---- use tsma2
|
||||
SELECT COUNT(*), AVG(c1) FROM stable GROUP/PARTITION BY tbname, tag1, tag2; --- use tsma2
|
||||
SELECT COUNT(*), MIN(c1) FROM stable INTERVAL(2h); ---use tsma2
|
||||
SELECT COUNT(*), MIN(c1) FROM stable INTERVAL(2h); --- use tsma2
|
||||
SELECT COUNT(*), MIN(c1) FROM stable WHERE ts < '2023-01-01 10:10:10' INTERVAL(30m); --use tsma1
|
||||
SELECT COUNT(*), MIN(c1) + MIN(c3) FROM stable INTERVAL(30m); ---use tsma1
|
||||
SELECT COUNT(*), MIN(c1) FROM stable INTERVAL(1h) SLIDING(30m); ---use tsma1
|
||||
SELECT COUNT(*), MIN(c1) + MIN(c3) FROM stable INTERVAL(30m); --- use tsma1
|
||||
SELECT COUNT(*), MIN(c1) FROM stable INTERVAL(1h) SLIDING(30m); --- use tsma1
|
||||
SELECT COUNT(*), MIN(c1), SPREAD(c1) FROM stable INTERVAL(1h); ----- can't use tsma1 or tsma2, spread func not defined
|
||||
SELECT COUNT(*), MIN(c1) FROM stable INTERVAL(30s); ----- can't use tsma1 or tsma2, time_duration not fit. Normally, query_time_duration should be multple of create_duration.
|
||||
SELECT COUNT(*), MIN(c1) FROM stable where c2 > 0; ---- can't use tsma1 or tsam2, can't do c2 filtering
|
||||
|
@ -124,15 +125,15 @@ SELECT COUNT(*), MIN(c1) FROM stable where c2 > 0; ---- can't use tsma1 or tsam2
|
|||
|
||||
### 使用限制
|
||||
|
||||
创建TSMA之后, 对原始超级表的操作有以下限制:
|
||||
创建 TSMA 之后,对原始超级表的操作有以下限制:
|
||||
|
||||
- 必须删除该表上的所有TSMA才能删除该表.
|
||||
- 原始表所有tag列不能删除, 也不能修改tag列名或子表的tag值, 必须先删除TSMA, 才能删除tag列.
|
||||
- 若某些列被TSMA使用了, 则这些列不能被删除, 必须先删除TSMA. 添加列不受影响, 但是新添加的列不在任何TSMA中, 因此若要计算新增列, 需要新创建其他的TSMA.
|
||||
- 必须删除该表上的所有 TSMA 才能删除该表。
|
||||
- 原始表所有 tag 列不能删除,也不能修改 tag 列名或子表的 tag 值,必须先删除 TSMA,才能删除 tag 列。
|
||||
- 若某些列被 TSMA 使用了,则这些列不能被删除,必须先删除 TSMA。添加列不受影响,但是新添加的列不在任何 TSMA 中,因此若要计算新增列,需要新创建其他的 TSMA。
|
||||
|
||||
## 查看TSMA
|
||||
```sql
|
||||
SHOW [db_name.]TSMAS;
|
||||
SELECT * FROM information_schema.ins_tsma;
|
||||
```
|
||||
若创建时指定的较多的函数, 且列名较长, 在显示函数列表时可能会被截断(目前最大支持输出256KB).
|
||||
若创建时指定的较多的函数,且列名较长,在显示函数列表时可能会被截断(目前最大支持输出 256KB)。
|
||||
|
|
|
@ -9,15 +9,15 @@ description: "TDengine 3.0 版本的语法变更说明"
|
|||
| # | **元素** | **<div style={{width: 60}}>差异性</div>** | **说明** |
|
||||
| - | :------- | :-------- | :------- |
|
||||
| 1 | VARCHAR | 新增 | BINARY类型的别名。
|
||||
| 2 | TIMESTAMP字面量 | 新增 | 新增支持 TIMESTAMP 'timestamp format' 语法。
|
||||
| 3 | _ROWTS伪列 | 新增 | 表示时间戳主键。是_C0伪列的别名。
|
||||
| 4 | _IROWTS伪列 | 新增 | 用于返回 interp 函数插值结果对应的时间戳列。
|
||||
| 5 | INFORMATION_SCHEMA | 新增 | 包含各种SCHEMA定义的系统数据库。
|
||||
| 2 | TIMESTAMP 字面量 | 新增 | 新增支持 TIMESTAMP 'timestamp format' 语法。
|
||||
| 3 | _ROWTS 伪列 | 新增 | 表示时间戳主键。是_C0伪列的别名。
|
||||
| 4 | _IROWTS 伪列 | 新增 | 用于返回 interp 函数插值结果对应的时间戳列。
|
||||
| 5 | INFORMATION_SCHEMA | 新增 | 包含各种 SCHEMA 定义的系统数据库。
|
||||
| 6 | PERFORMANCE_SCHEMA | 新增 | 包含运行信息的系统数据库。
|
||||
| 7 | 连续查询 | 废除 | 不再支持连续查询。相关的各种语法和接口废除。
|
||||
| 8 | 混合运算 | 增强 | 查询中的混合运算(标量运算和矢量运算混合)全面增强,SELECT的各个子句均全面支持符合语法语义的混合运算。
|
||||
| 8 | 混合运算 | 增强 | 查询中的混合运算(标量运算和矢量运算混合)全面增强,SELECT 的各个子句均全面支持符合语法语义的混合运算。
|
||||
| 9 | 标签运算 | 新增 |在查询中,标签列可以像普通列一样参与各种运算,用于各种子句。
|
||||
| 10 | 时间线子句和时间函数用于超级表查询 | 增强 |没有PARTITION BY时,超级表的数据会被合并成一条时间线。
|
||||
| 10 | 时间线子句和时间函数用于超级表查询 | 增强 |没有 PARTITION BY 时,超级表的数据会被合并成一条时间线。
|
||||
| 11 | GEOMETRY | 新增 | 几何类型。
|
||||
|
||||
## SQL 语句变更
|
||||
|
@ -26,15 +26,15 @@ description: "TDengine 3.0 版本的语法变更说明"
|
|||
|
||||
| # | **语句** | **<div style={{width: 60}}>差异性</div>** | **说明** |
|
||||
| - | :------- | :-------- | :------- |
|
||||
| 1 | ALTER ACCOUNT | 废除 | 2.x中为企业版功能,3.0不再支持。语法暂时保留了,执行报“This statement is no longer supported”错误。
|
||||
| 2 | ALTER ALL DNODES | 新增 | 修改所有DNODE的参数。
|
||||
| 3 | ALTER DATABASE | 调整 | <p>废除</p><ul><li>QUORUM:写入需要的副本确认数。3.0 版本默认行为是强一致性,且不支持修改为弱一致性。</li><li>BLOCKS:VNODE使用的内存块数。3.0版本使用BUFFER来表示VNODE写入内存池的大小。</li><li>UPDATE:更新操作的支持模式。3.0版本所有数据库都支持部分列更新。</li><li>CACHELAST:缓存最新一行数据的模式。3.0版本用CACHEMODEL代替。</li><li>COMP:3.0版本暂不支持修改。</li></ul><p>新增</p><ul><li>CACHEMODEL:表示是否在内存中缓存子表的最近数据。</li><li>CACHESIZE:表示缓存子表最近数据的内存大小。</li><li>WAL_FSYNC_PERIOD:代替原FSYNC参数。</li><li>WAL_LEVEL:代替原WAL参数。</li><li>WAL_RETENTION_PERIOD:3.0.4.0版本新增,wal文件的额外保留策略,用于数据订阅。</li><li>WAL_RETENTION_SIZE:3.0.4.0版本新增,wal文件的额外保留策略,用于数据订阅。</li></ul><p>调整</p><ul><li>KEEP:3.0版本新增支持带单位的设置方式。</li></ul>
|
||||
| 4 | ALTER STABLE | 调整 | 废除<ul><li>CHANGE TAG:修改标签列的名称。3.0版本使用RENAME TAG代替。<br/>新增</li><li>RENAME TAG:代替原CHANGE TAG子句。</li><li>COMMENT:修改超级表的注释。</li></ul>
|
||||
| 5 | ALTER TABLE | 调整 | 废除<ul><li>CHANGE TAG:修改标签列的名称。3.0版本使用RENAME TAG代替。<br/>新增</li><li>RENAME TAG:代替原CHANGE TAG子句。</li><li>COMMENT:修改表的注释。</li><li>TTL:修改表的生命周期。</li></ul>
|
||||
| 6 | ALTER USER | 调整 | 废除<ul><li>PRIVILEGE:修改用户权限。3.0版本使用GRANT和REVOKE来授予和回收权限。<br/>新增</li><li>ENABLE:启用或停用此用户。</li><li>SYSINFO:修改用户是否可查看系统信息。</li></ul>
|
||||
| 7 | COMPACT VNODES | 暂不支持 | 整理指定VNODE的数据。3.0.0版本暂不支持。
|
||||
| 8 | CREATE ACCOUNT | 废除 | 2.x中为企业版功能,3.0不再支持。语法暂时保留了,执行报“This statement is no longer supported”错误。
|
||||
| 9 | CREATE DATABASE | 调整 | <p>废除</p><ul><li>BLOCKS:VNODE使用的内存块数。3.0版本使用BUFFER来表示VNODE写入内存池的大小。</li><li>CACHE:VNODE使用的内存块的大小。3.0版本使用BUFFER来表示VNODE写入内存池的大小。</li><li>CACHELAST:缓存最新一行数据的模式。3.0版本用CACHEMODEL代替。</li><li>DAYS:数据文件存储数据的时间跨度。3.0版本使用DURATION代替。</li><li>FSYNC:当 WAL 设置为 2 时,执行 fsync 的周期。3.0版本使用WAL_FSYNC_PERIOD代替。</li><li>QUORUM:写入需要的副本确认数。3.0版本使用STRICT来指定强一致还是弱一致。</li><li>UPDATE:更新操作的支持模式。3.0版本所有数据库都支持部分列更新。</li><li>WAL:WAL 级别。3.0版本使用WAL_LEVEL代替。</li></ul><p>新增</p><ul><li>BUFFER:一个 VNODE 写入内存池大小。</li><li>CACHEMODEL:表示是否在内存中缓存子表的最近数据。</li><li>CACHESIZE:表示缓存子表最近数据的内存大小。</li><li>DURATION:代替原DAYS参数。新增支持带单位的设置方式。</li><li>PAGES:一个 VNODE 中元数据存储引擎的缓存页个数。</li><li>PAGESIZE:一个 VNODE 中元数据存储引擎的页大小。</li><li>RETENTIONS:表示数据的聚合周期和保存时长。</li><li>STRICT:表示数据同步的一致性要求。</li><li>SINGLE_STABLE:表示此数据库中是否只可以创建一个超级表。</li><li>VGROUPS:数据库中初始VGROUP的数目。</li><li>WAL_FSYNC_PERIOD:代替原FSYNC参数。</li><li>WAL_LEVEL:代替原WAL参数。</li><li>WAL_RETENTION_PERIOD:wal文件的额外保留策略,用于数据订阅。</li><li>WAL_RETENTION_SIZE:wal文件的额外保留策略,用于数据订阅。</li></ul><p>调整</p><ul><li>KEEP:3.0版本新增支持带单位的设置方式。</li></ul>
|
||||
| 1 | ALTER ACCOUNT | 废除 | 2.x 中为企业版功能,3.0 不再支持。语法暂时保留,执行报 “This statement is no longer supported” 错误。
|
||||
| 2 | ALTER ALL DNODES | 新增 | 修改所有 DNODE 的参数。
|
||||
| 3 | ALTER DATABASE | 调整 | <p>废除</p><ul><li>QUORUM:写入需要的副本确认数。3.0 版本默认行为是强一致性,且不支持修改为弱一致性。</li><li>BLOCKS:VNODE使用的内存块数。3.0 版本使用 BUFFER 来表示 VNODE 写入内存池的大小。</li><li>UPDATE:更新操作的支持模式。3.0 版本所有数据库都支持部分列更新。</li><li>CACHELAST:缓存最新一行数据的模式。3.0 版本用 CACHEMODEL 代替。</li><li>COMP:3.0 版本暂不支持修改。</li></ul><p>新增</p><ul><li>CACHEMODEL:表示是否在内存中缓存子表的最近数据。</li><li>CACHESIZE:表示缓存子表最近数据的内存大小。</li><li>WAL_FSYNC_PERIOD:代替原 FSYNC 参数。</li><li>WAL_LEVEL:代替原 WAL 参数。</li><li>WAL_RETENTION_PERIOD:v3.0.4.0 新增,WAL 文件的额外保留策略,用于数据订阅。</li><li>WAL_RETENTION_SIZE:v3.0.4.0 新增,WAL 文件的额外保留策略,用于数据订阅。</li></ul><p>调整</p><ul><li>KEEP:3.0 版本新增支持带单位的设置方式。</li></ul>
|
||||
| 4 | ALTER STABLE | 调整 | 废除<ul><li>CHANGE TAG:修改标签列的名称。3.0 版本使用 RENAME TAG 代替。<br/>新增</li><li>RENAME TAG:代替原 CHANGE TAG 子句。</li><li>COMMENT:修改超级表的注释。</li></ul>
|
||||
| 5 | ALTER TABLE | 调整 | 废除<ul><li>CHANGE TAG:修改标签列的名称。3.0 版本使用 RENAME TAG 代替。<br/>新增</li><li>RENAME TAG:代替原 CHANGE TAG 子句。</li><li>COMMENT:修改表的注释。</li><li>TTL:修改表的生命周期。</li></ul>
|
||||
| 6 | ALTER USER | 调整 | 废除<ul><li>PRIVILEGE:修改用户权限。3.0 版本使用 GRANT 和 REVOKE 来授予和回收权限。<br/>新增</li><li>ENABLE:启用或停用此用户。</li><li>SYSINFO:修改用户是否可查看系统信息。</li></ul>
|
||||
| 7 | COMPACT VNODES | 暂不支持 | 整理指定 VNODE 的数据。
|
||||
| 8 | CREATE ACCOUNT | 废除 | 2.x 中为企业版功能,3.0 不再支持。语法暂时保留,执行报 “This statement is no longer supported” 错误。
|
||||
| 9 | CREATE DATABASE | 调整 | <p>废除</p><ul><li>BLOCKS:VNODE 使用的内存块数。3.0 版本使用 BUFFER 来表示 VNODE 写入内存池的大小。</li><li>CACHE:VNODE 使用的内存块的大小。3.0 版本使用 BUFFER 来表示 VNODE 写入内存池的大小。</li><li>CACHELAST:缓存最新一行数据的模式。3.0 版本用 CACHEMODEL 代替。</li><li>DAYS:数据文件存储数据的时间跨度。3.0 版本使用 DURATION 代替。</li><li>FSYNC:当 WAL 设置为 2 时,执行 fsync 的周期。3.0 版本使用 WAL_FSYNC_PERIOD 代替。</li><li>QUORUM:写入需要的副本确认数。3.0 版本使用 STRICT 来指定强一致还是弱一致。</li><li>UPDATE:更新操作的支持模式。3.0 版本所有数据库都支持部分列更新。</li><li>WAL:WAL 级别。3.0 版本使用 WAL_LEVEL 代替。</li></ul><p>新增</p><ul><li>BUFFER:一个 VNODE 写入内存池大小。</li><li>CACHEMODEL:表示是否在内存中缓存子表的最近数据。</li><li>CACHESIZE:表示缓存子表最近数据的内存大小。</li><li>DURATION:代替原 DAYS 参数。新增支持带单位的设置方式。</li><li>PAGES:一个 VNODE 中元数据存储引擎的缓存页个数。</li><li>PAGESIZE:一个 VNODE 中元数据存储引擎的页大小。</li><li>RETENTIONS:表示数据的聚合周期和保存时长。</li><li>STRICT:表示数据同步的一致性要求。</li><li>SINGLE_STABLE:表示此数据库中是否只可以创建一个超级表。</li><li>VGROUPS:数据库中初始 VGROUP 的数目。</li><li>WAL_FSYNC_PERIOD:代替原 FSYNC 参数。</li><li>WAL_LEVEL:代替原 WAL 参数。</li><li>WAL_RETENTION_PERIOD:WAL 文件的额外保留策略,用于数据订阅。</li><li>WAL_RETENTION_SIZE:WAL 文件的额外保留策略,用于数据订阅。</li></ul><p>调整</p><ul><li>KEEP:3.0 版本新增支持带单位的设置方式。</li></ul>
|
||||
| 10 | CREATE DNODE | 调整 | 新增主机名和端口号分开指定语法<ul><li>CREATE DNODE dnode_host_name PORT port_val</li></ul>
|
||||
| 11 | CREATE INDEX | 新增 | 创建SMA索引。
|
||||
| 12 | CREATE MNODE | 新增 | 创建管理节点。
|
||||
|
@ -43,7 +43,7 @@ description: "TDengine 3.0 版本的语法变更说明"
|
|||
| 15 | CREATE STREAM | 新增 | 创建流。
|
||||
| 16 | CREATE TABLE | 调整 | 新增表参数语法<ul><li>COMMENT:表注释。</li><li>WATERMARK:指定窗口的关闭时间。</li><li>MAX_DELAY:用于控制推送计算结果的最大延迟。</li><li>ROLLUP:指定的聚合函数,提供基于多层级的降采样聚合结果。</li><li>SMA:提供基于数据块的自定义预计算功能。</li><li>TTL:用来指定表的生命周期的参数。</li></ul>
|
||||
| 17 | CREATE TOPIC | 新增 | 创建订阅主题。
|
||||
| 18 | DROP ACCOUNT | 废除 | 2.x中为企业版功能,3.0不再支持。语法暂时保留了,执行报“This statement is no longer supported”错误。
|
||||
| 18 | DROP ACCOUNT | 废除 | 2.x 中为企业版功能,3.0 不再支持。语法暂时保留,执行报 “This statement is no longer supported” 错误。
|
||||
| 19 | DROP CONSUMER GROUP | 新增 | 删除消费组。
|
||||
| 20 | DROP INDEX | 新增 | 删除索引。
|
||||
| 21 | DROP MNODE | 新增 | 创建管理节点。
|
||||
|
@ -54,52 +54,52 @@ description: "TDengine 3.0 版本的语法变更说明"
|
|||
| 26 | EXPLAIN | 新增 | 查看查询语句的执行计划。
|
||||
| 27 | GRANT | 新增 | 授予用户权限。
|
||||
| 28 | KILL TRANSACTION | 新增 | 终止管理节点的事务。
|
||||
| 29 | KILL STREAM | 废除 | 终止连续查询。3.0版本不再支持连续查询,而是用更通用的流计算来代替。
|
||||
| 29 | KILL STREAM | 废除 | 终止连续查询。3.0 版本不再支持连续查询,而是用更通用的流计算来代替。
|
||||
| 31 | REVOKE | 新增 | 回收用户权限。
|
||||
| 32 | SELECT | 调整 | <ul><li>SELECT关闭隐式结果列,输出列均需要由SELECT子句来指定。</li><li>DISTINCT功能全面支持。2.x版本只支持对标签列去重,并且不可以和JOIN、GROUP BY等子句混用。</li><li>JOIN功能增强。增加支持:JOIN后WHERE条件中有OR条件;JOIN后的多表运算;JOIN后的多表GROUP BY。</li><li>FROM后子查询功能大幅增强。不限制子查询嵌套层数;支持子查询和UNION ALL混合使用;移除其他一些之前版本的语法限制。</li><li>WHERE后可以使用任意的标量表达式。</li><li>GROUP BY功能增强。支持任意标量表达式及其组合的分组。</li><li>SESSION可以用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。</li><li>STATE_WINDOW可以用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。</li><li>ORDER BY功能大幅增强。不再必须和GROUP BY子句一起使用;不再有排序表达式个数的限制;增加支持NULLS FIRST/LAST语法功能;支持符合语法语义的任意表达式。</li><li>新增PARTITION BY语法。替代原来的GROUP BY tags。</li></ul>
|
||||
| 33 | SHOW ACCOUNTS | 废除 | 2.x中为企业版功能,3.0不再支持。语法暂时保留了,执行报“This statement is no longer supported”错误。
|
||||
| 32 | SELECT | 调整 | <ul><li>SELECT 关闭隐式结果列,输出列均需要由 SELECT 子句来指定。</li><li>DISTINCT 功能全面支持。2.x 版本只支持对标签列去重,并且不可以和 JOIN、GROUP BY 等子句混用。</li><li>JOIN 功能增强。增加支持:JOIN 后 WHERE 条件中有 OR 条件;JOIN 后的多表运算;JOIN 后的多表 GROUP BY。</li><li>FROM 后子查询功能大幅增强。不限制子查询嵌套层数;支持子查询和 UNION ALL 混合使用;移除其他一些之前版本的语法限制。</li><li>WHERE 后可以使用任意的标量表达式。</li><li>GROUP BY 功能增强。支持任意标量表达式及其组合的分组。</li><li>SESSION 可以用于超级表。之前版本,超级表的数据会被合并成一条时间线。</li><li>STATE_WINDOW 可以用于超级表。之前版本,超级表的数据会被合并成一条时间线。</li><li>ORDER BY 功能大幅增强。不再必须和 GROUP BY 子句一起使用;不再有排序表达式个数的限制;增加支持 NULLS FIRST/LAST 语法功能;支持符合语法语义的任意表达式。</li><li>新增 PARTITION BY 语法。替代原来的 GROUP BY tags。</li></ul>
|
||||
| 33 | SHOW ACCOUNTS | 废除 | 2.x 中为企业版功能,3.0 不再支持。语法暂时保留,执行报 “This statement is no longer supported” 错误。
|
||||
| 34 | SHOW APPS |新增 | 显示接入集群的应用(客户端)信息。
|
||||
| 35 | SHOW CONSUMERS | 新增 | 显示当前数据库下所有活跃的消费者的信息。
|
||||
| 36 | SHOW DATABASES | 调整 | 3.0版本只显示数据库名。
|
||||
| 37 | SHOW FUNCTIONS | 调整 | 3.0版本只显示自定义函数名。
|
||||
| 38 | SHOW LICENCE | 新增 | 和SHOW GRANTS 命令等效。
|
||||
| 36 | SHOW DATABASES | 调整 | 3.0 版本只显示数据库名。
|
||||
| 37 | SHOW FUNCTIONS | 调整 | 3.0 版本只显示自定义函数名。
|
||||
| 38 | SHOW LICENCE | 新增 | 和 SHOW GRANTS 命令等效。
|
||||
| 39 | SHOW INDEXES | 新增 | 显示已创建的索引。
|
||||
| 40 | SHOW LOCAL VARIABLES | 新增 | 显示当前客户端配置参数的运行值。
|
||||
| 41 | SHOW MODULES | 废除 | 显示当前系统中所安装的组件的信息。
|
||||
| 42 | SHOW QNODES | 新增 | 显示当前系统中QNODE的信息。
|
||||
| 43 | SHOW STABLES | 调整 | 3.0版本只显示超级表名。
|
||||
| 44 | SHOW STREAMS | 调整 | 2.x版本此命令显示系统中已创建的连续查询的信息。3.0版本废除了连续查询,用流代替。此命令显示已创建的流。
|
||||
| 43 | SHOW STABLES | 调整 | 3.0 版本只显示超级表名。
|
||||
| 44 | SHOW STREAMS | 调整 | 2.x 版本此命令显示系统中已创建的连续查询的信息。3.0 版本废除了连续查询,用流代替。此命令显示已创建的流。
|
||||
| 45 | SHOW SUBSCRIPTIONS | 新增 | 显示当前数据库下的所有的订阅关系
|
||||
| 46 | SHOW TABLES | 调整 | 3.0版本只显示表名。
|
||||
| 47 | SHOW TABLE DISTRIBUTED | 新增 | 显示表的数据分布信息。代替2.x版本中的SELECT _block_dist() FROM \{ tb_name | stb_name }方式。
|
||||
| 46 | SHOW TABLES | 调整 | 3.0 版本只显示表名。
|
||||
| 47 | SHOW TABLE DISTRIBUTED | 新增 | 显示表的数据分布信息。代替 2.x 版本中的 `SELECT _block_dist() FROM tb_name` 方式。
|
||||
| 48 | SHOW TOPICS | 新增 | 显示当前数据库下的所有订阅主题。
|
||||
| 49 | SHOW TRANSACTIONS | 新增 | 显示当前系统中正在执行的事务的信息。
|
||||
| 50 | SHOW DNODE VARIABLES | 新增 |显示指定DNODE的配置参数。
|
||||
| 51 | SHOW VNODES | 暂不支持 | 显示当前系统中VNODE的信息。3.0.0版本暂不支持。
|
||||
| 50 | SHOW DNODE VARIABLES | 新增 |显示指定 DNODE 的配置参数。
|
||||
| 51 | SHOW VNODES | 暂不支持 | 显示当前系统中 VNODE 的信息。
|
||||
| 52 | TRIM DATABASE | 新增 | 删除过期数据,并根据多级存储的配置归整数据。
|
||||
| 53 | REDISTRIBUTE VGROUP | 新增 | 调整VGROUP中VNODE的分布。
|
||||
| 54 | BALANCE VGROUP | 新增 | 自动调整VGROUP中VNODE的分布。
|
||||
| 53 | REDISTRIBUTE VGROUP | 新增 | 调整 VGROUP 中 VNODE 的分布。
|
||||
| 54 | BALANCE VGROUP | 新增 | 自动调整 VGROUP中 VNODE 的分布。
|
||||
|
||||
## SQL 函数变更
|
||||
|
||||
| # | **函数** | ** <div style={{width: 60}}>差异性</div> ** | **说明** |
|
||||
| - | :------- | :-------- | :------- |
|
||||
| 1 | TWA | 增强 | 可以直接用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。
|
||||
| 2 | IRATE | 增强 | 可以直接用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。
|
||||
| 3 | LEASTSQUARES | 增强 | 可以用于超级表了。
|
||||
| 4 | ELAPSED | 增强 | 可以直接用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。
|
||||
| 5 | DIFF | 增强 | 可以直接用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。
|
||||
| 6 | DERIVATIVE | 增强 | 可以直接用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。
|
||||
| 7 | CSUM | 增强 | 可以直接用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。
|
||||
| 8 | MAVG | 增强 | 可以直接用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。
|
||||
| 9 | SAMPLE | 增强 | 可以直接用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。
|
||||
| 10 | STATECOUNT | 增强 | 可以直接用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。
|
||||
| 11 | STATEDURATION | 增强 | 可以直接用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。
|
||||
| 12 | TIMETRUNCATE | 增强 | 增加ignore_timezone参数,可选是否使用,默认值为1.
|
||||
| 1 | TWA | 增强 | 可以直接用于超级表。之前版本,超级表的数据会被合并成一条时间线。
|
||||
| 2 | IRATE | 增强 | 可以直接用于超级表。之前版本,超级表的数据会被合并成一条时间线。
|
||||
| 3 | LEASTSQUARES | 增强 | 可以用于超级表。
|
||||
| 4 | ELAPSED | 增强 | 可以直接用于超级表。之前版本,超级表的数据会被合并成一条时间线。
|
||||
| 5 | DIFF | 增强 | 可以直接用于超级表。之前版本,超级表的数据会被合并成一条时间线。
|
||||
| 6 | DERIVATIVE | 增强 | 可以直接用于超级表。之前版本,超级表的数据会被合并成一条时间线。
|
||||
| 7 | CSUM | 增强 | 可以直接用于超级表。之前版本,超级表的数据会被合并成一条时间线。
|
||||
| 8 | MAVG | 增强 | 可以直接用于超级表。之前版本,超级表的数据会被合并成一条时间线。
|
||||
| 9 | SAMPLE | 增强 | 可以直接用于超级表。之前版本,超级表的数据会被合并成一条时间线。
|
||||
| 10 | STATECOUNT | 增强 | 可以直接用于超级表。之前版本,超级表的数据会被合并成一条时间线。
|
||||
| 11 | STATEDURATION | 增强 | 可以直接用于超级表。之前版本,超级表的数据会被合并成一条时间线。
|
||||
| 12 | TIMETRUNCATE | 增强 | 增加 ignore_timezone 参数,可选是否使用,默认值为 1。
|
||||
|
||||
|
||||
## SCHEMALESS 变更
|
||||
|
||||
| # | **元素** | **<div style={{width: 60}}>差异性</div>** | **说明** |
|
||||
| - | :------- | :-------- | :------- |
|
||||
| 1 | 主键ts 变更为 _ts | 变更 | schemaless自动建的列名用 _ 开头,不同于2.x。
|
||||
| 1 | 主键 ts 变更为 _ts | 变更 | schemaless 自动建的列名用 `_` 开头,不同于2.x。
|
||||
|
|
|
@ -14,9 +14,9 @@ description: 关联查询详细描述
|
|||
|
||||
连接条件是指进行表关联所指定的条件,TDengine 支持的所有关联查询都需要指定连接条件,连接条件通常(Inner Join 和 Window Join 例外)只出现在 `ON` 之后。根据语义,Inner Join 中出现在 `WHERE` 之后的条件也可以视作连接条件,而 Window Join 是通过 `WINDOW_OFFSET` 来指定连接条件。
|
||||
|
||||
除 ASOF Join 外,TDengine 支持的所有 Join 类型都必须显式指定连接条件,ASOF Join 因为默认定义有隐式的连接条件,所以(在默认条件可以满足需求的情况下)可以不必显式指定连接条件。
|
||||
除 ASOF Join 外,TDengine 支持的所有 Join 类型都必须显式指定连接条件,ASOF Join 因为默认定义有隐式的连接条件,所以(在默认条件可以满足需求的情况下)可以不必显式指定连接条件。
|
||||
|
||||
除 ASOF/Window Join 外,连接条件中除了包含主连接条件外,还可以包含任意多条其他连接条件,主连接条件与其他连接条件间必须是 `AND` 关系,而其他连接条件之间则没有这个限制。其他连接条件中可以包含主键列、Tag 、普通列、常量及其标量函数或运算的任意逻辑运算组合。
|
||||
除 ASOF/Window Join 外,连接条件中除了包含主连接条件外,还可以包含任意多条其他连接条件,主连接条件与其他连接条件间必须是 `AND` 关系,而其他连接条件之间则没有这个限制。其他连接条件中可以包含主键列、Tag、普通列、常量及其标量函数或运算的任意逻辑运算组合。
|
||||
|
||||
以智能电表为例,下面这几条 SQL 都包含合法的连接条件:
|
||||
|
||||
|
@ -196,7 +196,7 @@ SELECT ... FROM table_name1 LEFT|RIGHT ASOF JOIN table_name2 [ON ...] [JLIMIT jl
|
|||
- 如果不含 `ON` 子句或 `ON` 子句中未指定主键列的匹配规则,则默认主键匹配规则运算符是 “>=”, 即(对 Left ASOF Join 来说)右表中主键时戳小于等于左表主键时戳的行数据。不支持多个主连接条件。
|
||||
- `ON` 子句中还可以指定除主键列外的 Tag、普通列(不支持标量函数及运算)之间的等值条件用于分组计算,除此之外不支持其他类型的条件。
|
||||
- 所有 ON 条件间只支持 `AND` 运算。
|
||||
- `JLIMIT` 用于指定单行匹配结果的最大行数,可选,未指定时默认值为1,即左/右表每行数据最多从右/左表中获得一行匹配结果。`JLIMIT` 取值范围为 [0, 1024]。符合匹配条件的 `jlimit_num` 条数据不要求时间戳相同,当右/左表中不存在满足条件的 `jlimit_num` 条数据时,返回的结果行数可能小于 `jlimit_num`;当右/左表中存在符合条件的多于 `jlimit_num` 条数据时,如果时间戳相同将随机返回 `jlimit_num` 条数据。
|
||||
- `JLIMIT` 用于指定单行匹配结果的最大行数,可选,未指定时默认值为 1,即左/右表每行数据最多从右/左表中获得一行匹配结果。`JLIMIT` 取值范围为 [0, 1024]。符合匹配条件的 `jlimit_num` 条数据不要求时间戳相同,当右/左表中不存在满足条件的 `jlimit_num` 条数据时,返回的结果行数可能小于 `jlimit_num`;当右/左表中存在符合条件的多于 `jlimit_num` 条数据时,如果时间戳相同将随机返回 `jlimit_num` 条数据。
|
||||
|
||||
#### 示例
|
||||
|
||||
|
|
|
@ -4,7 +4,7 @@ sidebar_label: 可配置压缩
|
|||
description: 可配置压缩算法
|
||||
---
|
||||
|
||||
从 TDengine 3.3.0.0 版本开始,TDengine 提供了更高级的压缩功能,用户可以在建表时针对每一列配置是否进行压缩、以及使用的压缩算法和压缩级别。
|
||||
从 v3.3.0.0 开始,TDengine 提供了更高级的压缩功能,用户可以在建表时针对每一列配置是否进行压缩、以及使用的压缩算法和压缩级别。
|
||||
|
||||
## 压缩术语定义
|
||||
|
||||
|
@ -15,7 +15,7 @@ description: 可配置压缩算法
|
|||
|
||||
### 压缩级别
|
||||
|
||||
在本文中特指二级压缩算法内部的级别,比如zstd,至少8个level可选,每个level 下都有不同表现,本质是压缩率、压缩速度、解压速度之间的 tradeoff,为了避免选择困难,特简化定义为如下三种级别:
|
||||
在本文中特指二级压缩算法内部的级别,比如 zstd,至少 8 个 level 可选,每个 level 下都有不同表现,本质是压缩率、压缩速度、解压速度之间的 tradeoff,为了避免选择困难,特简化定义为如下三种级别:
|
||||
|
||||
- high:压缩率最高,压缩速度和解压速度相对最差。
|
||||
- low:压缩速度和解压速度最好,压缩率相对最低。
|
||||
|
@ -23,9 +23,9 @@ description: 可配置压缩算法
|
|||
|
||||
### 压缩算法列表
|
||||
|
||||
- 编码算法列表(一级压缩):simple8b, bit-packing,delta-i, delta-d, disabled
|
||||
- 编码算法列表(一级压缩):simple8b、bit-packing、delta-i、delta-d、disabled
|
||||
|
||||
- 压缩算法列表(二级压缩): lz4、zlib、zstd、tsz、xz、disabled
|
||||
- 压缩算法列表(二级压缩):lz4、zlib、zstd、tsz、xz、disabled
|
||||
|
||||
- 各个数据类型的默认压缩算法列表和适用范围
|
||||
|
||||
|
@ -50,9 +50,9 @@ CREATE [dbname.]tabname (colName colType [ENCODE 'encode_type'] [COMPRESS 'compr
|
|||
**参数说明**
|
||||
|
||||
- tabname:超级表或者普通表名称
|
||||
- encode_type: 一级压缩,具体参数见上面列表
|
||||
- compress_type: 二级压缩,具体参数见上面列表
|
||||
- level: 特指二级压缩的级别,默认值为medium, 支持简写为 'h'/'l'/'m'
|
||||
- encode_type:一级压缩,具体参数见上面列表
|
||||
- compress_type:二级压缩,具体参数见上面列表
|
||||
- level:特指二级压缩的级别,默认值为 medium,支持简写为 'h'、'l'、'm'
|
||||
|
||||
**功能说明**
|
||||
|
||||
|
@ -67,8 +67,8 @@ ALTER TABLE [db_name.]tabName MODIFY COLUMN colName [ENCODE 'ecode_type'] [COMPR
|
|||
|
||||
**参数说明**
|
||||
|
||||
- tabName: 表名,可以为超级表、普通表
|
||||
- colName: 待更改压缩算法的列, 只能为普通列
|
||||
- tabName:表名,可以为超级表、普通表
|
||||
- colName:待更改压缩算法的列,只能为普通列
|
||||
|
||||
**功能说明**
|
||||
|
||||
|
@ -87,4 +87,4 @@ DESCRIBE [dbname.]tabName
|
|||
## 兼容性
|
||||
|
||||
- 完全兼容已经存在的数据
|
||||
- 从更低版本升级到 3.3.0.0 后不能回退
|
||||
- 从更低版本升级到 v3.3.0.0 后不能回退
|
||||
|
|
|
@ -4,14 +4,13 @@ title: "视图"
|
|||
sidebar_label: "视图"
|
||||
---
|
||||
|
||||
从 TDengine 3.2.1.0 开始,TDengine 企业版提供视图功能,便于用户简化操作,提升用户间的分享能力。
|
||||
从 v3.2.1.0 开始,TDengine 企业版提供视图功能,便于用户简化操作,提升用户间的分享能力。
|
||||
|
||||
视图(View)本质上是一个存储在数据库中的查询语句。视图(非物化视图)本身不包含数据,只有在从视图读取数据时才动态执行视图所指定的查询语句。我们在创建视图时指定一个名称,然后可以像使用普通表一样对其进行查询等操作。视图的使用需遵循以下规则:
|
||||
- 视图可以嵌套定义和使用,视图与创建时指定的或当前数据库绑定使用。
|
||||
- 在同一个数据库内,视图名称不允许重名,视图名跟表名也推荐不重名(不强制)。当出现视图与表名重名时,写入、查询、授权、回收权限等操作优先使用同名表。
|
||||
|
||||
|
||||
|
||||
## 语法
|
||||
|
||||
### 创建(更新)视图
|
||||
|
@ -58,7 +57,7 @@ DROP VIEW [IF EXISTS] [db_name.]view_name;
|
|||
### 规则
|
||||
- 视图的创建者和 root 用户默认具备所有权限。
|
||||
- 对其他用户进行授权与回收权限可以通过 GRANT 和 REVOKE 语句进行,该操作只能由 root 用户进行。
|
||||
- 视图权限需单独授权与回收,通过db.*进行的授权与回收不含视图权限。
|
||||
- 视图权限需单独授权与回收,通过 db.* 进行的授权与回收不含视图权限。
|
||||
- 视图可以嵌套定义与使用,同理对视图权限的校验也是递归进行的。
|
||||
- 为了方便视图的分享与使用,引入视图有效用户(即视图的创建用户)的概念,被授权用户可以使用视图有效用户的库、表及嵌套视图的读写权限。注:视图被 REPLACE 后有效用户也会被更新。
|
||||
|
||||
|
@ -67,7 +66,7 @@ DROP VIEW [IF EXISTS] [db_name.]view_name;
|
|||
| 序号 | 操作 | 权限要求 |
|
||||
| ---- | --------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| 1 | CREATE VIEW <br/>(创建新用户) | 用户对视图所属数据库有 WRITE 权限<br/>且<br/> 用户对视图的目标库、表、视图有查询权限,若查询中的对象是视图需满足当前表中第8条规则 |
|
||||
| 2 | CREATE OR REPLACE VIEW <br/>(覆盖旧视图) | 用户对视图所属数据库有 WRITE 权限 且 对旧有视图有 ALTER 权限 <br/>且<br/> 用户对视图的目标库、表、视图有查询权限,若查询中的对象是视图需满足当前表中第8条规则 |
|
||||
| 2 | CREATE OR REPLACE VIEW <br/>(覆盖旧视图) | 用户对视图所属数据库有 WRITE 权限 且 对旧有视图有 ALTER 权限 <br/>且<br/> 用户对视图的目标库、表、视图有查询权限,若查询中的对象是视图需满足当前表中第 8 条规则 |
|
||||
| 3 | DROP VIEW | 用户对视图有 ALTER 权限 |
|
||||
| 4 | SHOW VIEWS | 无 |
|
||||
| 5 | SHOW CREATE VIEW | 无 |
|
||||
|
|
|
@ -557,8 +557,10 @@ description: TDengine 服务端的错误码列表和详细说明
|
|||
|
||||
| 错误码 | 错误描述 | 可能的出错场景或者可能的原因 | 建议用户采取的措施 |
|
||||
| ---------- | --------------------- | -------------------------------------------------------------------------------- | ------------------------------ |
|
||||
| 0x800003E6 | Consumer not exist | Consumer 超时下线 | 重新建 consumer 订阅数据 |
|
||||
| 0x800003EA | Consumer not ready | Consumer 正在平衡中 | 等待 2 秒后重试 |
|
||||
| 0x80004000 | Invalid message | 订阅到的数据非法,一般不会出现 | 具体查看 client 端的错误日志提示 |
|
||||
| 0x80004001 | Consumer mismatch | 订阅请求的 vnode 和重新分配的 vnode不一致,一般存在于有新消费者加入相同消费者组里时 | 内部错误,不暴露给用户 |
|
||||
| 0x80004001 | Consumer mismatch | 订阅请求的 vnode 和重新分配的 vnode 不一致,一般存在于有新消费者加入相同消费者组里时 | 内部错误 |
|
||||
| 0x80004002 | Consumer closed | 消费者已经不存在了 | 查看是否已经 close 掉了 |
|
||||
| 0x80004017 | Invalid status, please subscribe topic first | 数据订阅状态不对 | 没有调用 subscribe,直接 poll 数据 |
|
||||
| 0x80004100 | Stream task not exist | 流计算任务不存在 | 具体查看 server 端的错误日志 |
|
||||
|
|
|
@ -348,9 +348,9 @@ typedef int32_t (*TScriptCloseFunc)();
|
|||
#define udfFatal(...) { if (udfDebugFlag & 1) { taosPrintLog("UDF FATAL ", 1, 255, __VA_ARGS__); }}
|
||||
#define udfError(...) { if (udfDebugFlag & 1) { taosPrintLog("UDF ERROR ", 1, 255, __VA_ARGS__); }}
|
||||
#define udfWarn(...) { if (udfDebugFlag & 2) { taosPrintLog("UDF WARN ", 2, 255, __VA_ARGS__); }}
|
||||
#define udfInfo(...) { if (udfDebugFlag & 2) { taosPrintLog("UDF ", 2, 255, __VA_ARGS__); }}
|
||||
#define udfDebug(...) { if (udfDebugFlag & 4) { taosPrintLog("UDF ", 4, udfDebugFlag, __VA_ARGS__); }}
|
||||
#define udfTrace(...) { if (udfDebugFlag & 8) { taosPrintLog("UDF ", 8, udfDebugFlag, __VA_ARGS__); }}
|
||||
#define udfInfo(...) { if (udfDebugFlag & 2) { taosPrintLog("UDF INFO ", 2, 255, __VA_ARGS__); }}
|
||||
#define udfDebug(...) { if (udfDebugFlag & 4) { taosPrintLog("UDF DEBUG ", 4, udfDebugFlag, __VA_ARGS__); }}
|
||||
#define udfTrace(...) { if (udfDebugFlag & 8) { taosPrintLog("UDF TRACE ", 8, udfDebugFlag, __VA_ARGS__); }}
|
||||
#endif
|
||||
// clang-format on
|
||||
|
||||
|
|
|
@ -409,48 +409,16 @@ void* getTaskPoolWorkerCb();
|
|||
#define IS_AUDIT_CTB_NAME(_ctbname) \
|
||||
((*(_ctbname) == 't') && (0 == strncmp(_ctbname, TSDB_AUDIT_CTB_OPERATION, TSDB_AUDIT_CTB_OPERATION_LEN)))
|
||||
|
||||
#define qFatal(...) \
|
||||
do { \
|
||||
if (qDebugFlag & DEBUG_FATAL) { \
|
||||
taosPrintLog("QRY FATAL ", DEBUG_FATAL, tsLogEmbedded ? 255 : qDebugFlag, __VA_ARGS__); \
|
||||
} \
|
||||
} while (0)
|
||||
#define qError(...) \
|
||||
do { \
|
||||
if (qDebugFlag & DEBUG_ERROR) { \
|
||||
taosPrintLog("QRY ERROR ", DEBUG_ERROR, tsLogEmbedded ? 255 : qDebugFlag, __VA_ARGS__); \
|
||||
} \
|
||||
} while (0)
|
||||
#define qWarn(...) \
|
||||
do { \
|
||||
if (qDebugFlag & DEBUG_WARN) { \
|
||||
taosPrintLog("QRY WARN ", DEBUG_WARN, tsLogEmbedded ? 255 : qDebugFlag, __VA_ARGS__); \
|
||||
} \
|
||||
} while (0)
|
||||
#define qInfo(...) \
|
||||
do { \
|
||||
if (qDebugFlag & DEBUG_INFO) { \
|
||||
taosPrintLog("QRY ", DEBUG_INFO, tsLogEmbedded ? 255 : qDebugFlag, __VA_ARGS__); \
|
||||
} \
|
||||
} while (0)
|
||||
#define qDebug(...) \
|
||||
do { \
|
||||
if (qDebugFlag & DEBUG_DEBUG) { \
|
||||
taosPrintLog("QRY ", DEBUG_DEBUG, qDebugFlag, __VA_ARGS__); \
|
||||
} \
|
||||
} while (0)
|
||||
#define qTrace(...) \
|
||||
do { \
|
||||
if (qDebugFlag & DEBUG_TRACE) { \
|
||||
taosPrintLog("QRY ", DEBUG_TRACE, qDebugFlag, __VA_ARGS__); \
|
||||
} \
|
||||
} while (0)
|
||||
#define qDebugL(...) \
|
||||
do { \
|
||||
if (qDebugFlag & DEBUG_DEBUG) { \
|
||||
taosPrintLongString("QRY ", DEBUG_DEBUG, qDebugFlag, __VA_ARGS__); \
|
||||
} \
|
||||
} while (0)
|
||||
// clang-format off
|
||||
#define qFatal(...) do { if (qDebugFlag & DEBUG_FATAL) { taosPrintLog("QRY FATAL ", DEBUG_FATAL, tsLogEmbedded ? 255 : qDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define qError(...) do { if (qDebugFlag & DEBUG_ERROR) { taosPrintLog("QRY ERROR ", DEBUG_ERROR, tsLogEmbedded ? 255 : qDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define qWarn(...) do { if (qDebugFlag & DEBUG_WARN) { taosPrintLog("QRY WARN ", DEBUG_WARN, tsLogEmbedded ? 255 : qDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define qInfo(...) do { if (qDebugFlag & DEBUG_INFO) { taosPrintLog("QRY INFO ", DEBUG_INFO, tsLogEmbedded ? 255 : qDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define qDebug(...) do { if (qDebugFlag & DEBUG_DEBUG) { taosPrintLog("QRY DEBUG ", DEBUG_DEBUG, qDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define qTrace(...) do { if (qDebugFlag & DEBUG_TRACE) { taosPrintLog("QRY TRACE ", DEBUG_TRACE, qDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define qDebugL(...)do { if (qDebugFlag & DEBUG_DEBUG) { taosPrintLongString("QRY DEBUG ", DEBUG_DEBUG, qDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define qInfoL(...) do { if (qDebugFlag & DEBUG_INFO) { taosPrintLongString("QRY INFO ", DEBUG_INFO, tsLogEmbedded ? 255 : qDebugFlag, __VA_ARGS__); }} while(0)
|
||||
// clang-format on
|
||||
|
||||
#define QRY_ERR_RET(c) \
|
||||
do { \
|
||||
|
|
|
@ -128,18 +128,19 @@ bool reportThreadSetQuit();
|
|||
void writeCrashLogToFile(int signum, void *sigInfo, char *nodeType, int64_t clusterId, int64_t startTime);
|
||||
|
||||
// clang-format off
|
||||
#define uFatal(...) { if (uDebugFlag & DEBUG_FATAL) { taosPrintLog("UTL FATAL", DEBUG_FATAL, tsLogEmbedded ? 255 : uDebugFlag, __VA_ARGS__); }}
|
||||
#define uFatal(...) { if (uDebugFlag & DEBUG_FATAL) { taosPrintLog("UTL FATAL ", DEBUG_FATAL, tsLogEmbedded ? 255 : uDebugFlag, __VA_ARGS__); }}
|
||||
#define uError(...) { if (uDebugFlag & DEBUG_ERROR) { taosPrintLog("UTL ERROR ", DEBUG_ERROR, tsLogEmbedded ? 255 : uDebugFlag, __VA_ARGS__); }}
|
||||
#define uWarn(...) { if (uDebugFlag & DEBUG_WARN) { taosPrintLog("UTL WARN ", DEBUG_WARN, tsLogEmbedded ? 255 : uDebugFlag, __VA_ARGS__); }}
|
||||
#define uInfo(...) { if (uDebugFlag & DEBUG_INFO) { taosPrintLog("UTL ", DEBUG_INFO, tsLogEmbedded ? 255 : uDebugFlag, __VA_ARGS__); }}
|
||||
#define uDebug(...) { if (uDebugFlag & DEBUG_DEBUG) { taosPrintLog("UTL ", DEBUG_DEBUG, uDebugFlag, __VA_ARGS__); }}
|
||||
#define uTrace(...) { if (uDebugFlag & DEBUG_TRACE) { taosPrintLog("UTL ", DEBUG_TRACE, uDebugFlag, __VA_ARGS__); }}
|
||||
#define uDebugL(...){ if (uDebugFlag & DEBUG_DEBUG) { taosPrintLongString("UTL ", DEBUG_DEBUG, uDebugFlag, __VA_ARGS__); }}
|
||||
#define uInfoL(...) { if (uDebugFlag & DEBUG_INFO) { taosPrintLongString("UTL ", DEBUG_INFO, tsLogEmbedded ? 255 : uDebugFlag, __VA_ARGS__); }}
|
||||
#define uInfo(...) { if (uDebugFlag & DEBUG_INFO) { taosPrintLog("UTL INFO ", DEBUG_INFO, tsLogEmbedded ? 255 : uDebugFlag, __VA_ARGS__); }}
|
||||
#define uDebug(...) { if (uDebugFlag & DEBUG_DEBUG) { taosPrintLog("UTL DEBUG ", DEBUG_DEBUG, uDebugFlag, __VA_ARGS__); }}
|
||||
#define uTrace(...) { if (uDebugFlag & DEBUG_TRACE) { taosPrintLog("UTL TRACE ", DEBUG_TRACE, uDebugFlag, __VA_ARGS__); }}
|
||||
#define uDebugL(...){ if (uDebugFlag & DEBUG_DEBUG) { taosPrintLongString("UTL DEBUG ", DEBUG_DEBUG, uDebugFlag, __VA_ARGS__); }}
|
||||
#define uInfoL(...) { if (uDebugFlag & DEBUG_INFO) { taosPrintLongString("UTL INFO ", DEBUG_INFO, tsLogEmbedded ? 255 : uDebugFlag, __VA_ARGS__); }}
|
||||
|
||||
#define pError(...) { taosPrintLog("APP ERROR ", DEBUG_ERROR, 255, __VA_ARGS__); }
|
||||
#define pPrint(...) { taosPrintLog("APP ", DEBUG_INFO, 255, __VA_ARGS__); }
|
||||
#define pPrint(...) { taosPrintLog("APP INFO ", DEBUG_INFO, 255, __VA_ARGS__); }
|
||||
// clang-format on
|
||||
|
||||
// #define BUF_PAGE_DEBUG
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -25,14 +25,14 @@ extern "C" {
|
|||
// clang-format off
|
||||
#define tscFatal(...) do { if (cDebugFlag & DEBUG_FATAL) { taosPrintLog("TSC FATAL ", DEBUG_FATAL, cDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define tscError(...) do { if (cDebugFlag & DEBUG_ERROR) { taosPrintLog("TSC ERROR ", DEBUG_ERROR, cDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define tscErrorL(...) do { if (cDebugFlag & DEBUG_ERROR) { taosPrintLongString("TSC ERROR ", DEBUG_ERROR, cDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define tscWarn(...) do { if (cDebugFlag & DEBUG_WARN) { taosPrintLog("TSC WARN ", DEBUG_WARN, cDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define tscInfo(...) do { if (cDebugFlag & DEBUG_INFO) { taosPrintLog("TSC INFO ", DEBUG_INFO, cDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define tscDebug(...) do { if (cDebugFlag & DEBUG_DEBUG) { taosPrintLog("TSC DEBUG ", DEBUG_DEBUG, cDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define tscTrace(...) do { if (cDebugFlag & DEBUG_TRACE) { taosPrintLog("TSC TRACE ", DEBUG_TRACE, cDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define tscPerf(...) do { if (cDebugFlag & DEBUG_INFO) { taosPrintLog("TSC PERF ", 0, cDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define tscErrorL(...) do { if (cDebugFlag & DEBUG_ERROR) { taosPrintLongString("TSC ERROR ", DEBUG_ERROR, cDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define tscWarnL(...) do { if (cDebugFlag & DEBUG_WARN) { taosPrintLongString("TSC WARN ", DEBUG_WARN, cDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define tscInfo(...) do { if (cDebugFlag & DEBUG_INFO) { taosPrintLog("TSC ", DEBUG_INFO, cDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define tscDebug(...) do { if (cDebugFlag & DEBUG_DEBUG) { taosPrintLog("TSC ", DEBUG_DEBUG, cDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define tscTrace(...) do { if (cDebugFlag & DEBUG_TRACE) { taosPrintLog("TSC ", DEBUG_TRACE, cDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define tscDebugL(...) do { if (cDebugFlag & DEBUG_DEBUG) { taosPrintLongString("TSC ", DEBUG_DEBUG, cDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define tscPerf(...) do { if (cDebugFlag & DEBUG_INFO) { taosPrintLog("TSC ", 0, cDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define tscDebugL(...) do { if (cDebugFlag & DEBUG_DEBUG) { taosPrintLongString("TSC DEBUG ", DEBUG_DEBUG, cDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define tscLog(...) do { taosPrintLog("TSC ", 0, DEBUG_FILE, __VA_ARGS__); } while(0)
|
||||
#define tscLogL(...) do { taosPrintLongString("TSC ", 0, DEBUG_FILE, __VA_ARGS__); } while(0)
|
||||
// clang-format on
|
||||
|
|
|
@ -205,12 +205,12 @@ extern char *gStmtStatusStr[];
|
|||
} \
|
||||
} while (0)
|
||||
|
||||
#define STMT_FLOG(param, ...) qFatal("stmt:%p " param, pStmt, __VA_ARGS__)
|
||||
#define STMT_ELOG(param, ...) qError("stmt:%p " param, pStmt, __VA_ARGS__)
|
||||
#define STMT_DLOG(param, ...) qDebug("stmt:%p " param, pStmt, __VA_ARGS__)
|
||||
#define STMT_FLOG(param, ...) qFatal("stmt:%p, " param, pStmt, __VA_ARGS__)
|
||||
#define STMT_ELOG(param, ...) qError("stmt:%p, " param, pStmt, __VA_ARGS__)
|
||||
#define STMT_DLOG(param, ...) qDebug("stmt:%p, " param, pStmt, __VA_ARGS__)
|
||||
|
||||
#define STMT_ELOG_E(param) qError("stmt:%p " param, pStmt)
|
||||
#define STMT_DLOG_E(param) qDebug("stmt:%p " param, pStmt)
|
||||
#define STMT_ELOG_E(param) qError("stmt:%p, " param, pStmt)
|
||||
#define STMT_DLOG_E(param) qDebug("stmt:%p, " param, pStmt)
|
||||
|
||||
TAOS_STMT *stmtInit(STscObj* taos, int64_t reqid, TAOS_STMT_OPTIONS* pOptions);
|
||||
int stmtClose(TAOS_STMT *stmt);
|
||||
|
|
|
@ -222,12 +222,12 @@ do { \
|
|||
} while (0)
|
||||
|
||||
|
||||
#define STMT_FLOG(param, ...) qFatal("stmt:%p " param, pStmt, __VA_ARGS__)
|
||||
#define STMT_ELOG(param, ...) qError("stmt:%p " param, pStmt, __VA_ARGS__)
|
||||
#define STMT_DLOG(param, ...) qDebug("stmt:%p " param, pStmt, __VA_ARGS__)
|
||||
#define STMT_FLOG(param, ...) qFatal("stmt:%p, " param, pStmt, __VA_ARGS__)
|
||||
#define STMT_ELOG(param, ...) qError("stmt:%p, " param, pStmt, __VA_ARGS__)
|
||||
#define STMT_DLOG(param, ...) qDebug("stmt:%p, " param, pStmt, __VA_ARGS__)
|
||||
|
||||
#define STMT_ELOG_E(param) qError("stmt:%p " param, pStmt)
|
||||
#define STMT_DLOG_E(param) qDebug("stmt:%p " param, pStmt)
|
||||
#define STMT_ELOG_E(param) qError("stmt:%p, " param, pStmt)
|
||||
#define STMT_DLOG_E(param) qDebug("stmt:%p, " param, pStmt)
|
||||
*/
|
||||
TAOS_STMT2 *stmtInit2(STscObj *taos, TAOS_STMT2_OPTION *pOptions);
|
||||
int stmtClose2(TAOS_STMT2 *stmt);
|
||||
|
|
|
@ -22,42 +22,14 @@
|
|||
#ifndef TDENGINE_JNICOMMON_H
|
||||
#define TDENGINE_JNICOMMON_H
|
||||
|
||||
#define jniFatal(...) \
|
||||
{ \
|
||||
if (jniDebugFlag & DEBUG_FATAL) { \
|
||||
taosPrintLog("JNI FATAL ", DEBUG_FATAL, jniDebugFlag, __VA_ARGS__); \
|
||||
} \
|
||||
}
|
||||
#define jniError(...) \
|
||||
{ \
|
||||
if (jniDebugFlag & DEBUG_ERROR) { \
|
||||
taosPrintLog("JNI ERROR ", DEBUG_ERROR, jniDebugFlag, __VA_ARGS__); \
|
||||
} \
|
||||
}
|
||||
#define jniWarn(...) \
|
||||
{ \
|
||||
if (jniDebugFlag & DEBUG_WARN) { \
|
||||
taosPrintLog("JNI WARN ", DEBUG_WARN, jniDebugFlag, __VA_ARGS__); \
|
||||
} \
|
||||
}
|
||||
#define jniInfo(...) \
|
||||
{ \
|
||||
if (jniDebugFlag & DEBUG_INFO) { \
|
||||
taosPrintLog("JNI ", DEBUG_INFO, jniDebugFlag, __VA_ARGS__); \
|
||||
} \
|
||||
}
|
||||
#define jniDebug(...) \
|
||||
{ \
|
||||
if (jniDebugFlag & DEBUG_DEBUG) { \
|
||||
taosPrintLog("JNI ", DEBUG_DEBUG, jniDebugFlag, __VA_ARGS__); \
|
||||
} \
|
||||
}
|
||||
#define jniTrace(...) \
|
||||
{ \
|
||||
if (jniDebugFlag & DEBUG_TRACE) { \
|
||||
taosPrintLog("JNI ", DEBUG_TRACE, jniDebugFlag, __VA_ARGS__); \
|
||||
} \
|
||||
}
|
||||
// clang-format off
|
||||
#define jniFatal(...) do { if (jniDebugFlag & DEBUG_FATAL) { taosPrintLog("JNI FATAL ", DEBUG_FATAL, jniDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define jniError(...) do { if (jniDebugFlag & DEBUG_ERROR) { taosPrintLog("JNI ERROR ", DEBUG_ERROR, jniDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define jniWarn(...) do { if (jniDebugFlag & DEBUG_WARN) { taosPrintLog("JNI WARN ", DEBUG_WARN, jniDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define jniInfo(...) do { if (jniDebugFlag & DEBUG_INFO) { taosPrintLog("JNI INFO ", DEBUG_INFO, jniDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define jniDebug(...) do { if (jniDebugFlag & DEBUG_DEBUG) { taosPrintLog("JNI DEBUG ", DEBUG_DEBUG, jniDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define jniTrace(...) do { if (jniDebugFlag & DEBUG_TRACE) { taosPrintLog("JNI TRACE ", DEBUG_TRACE, jniDebugFlag, __VA_ARGS__); }} while(0)
|
||||
// clang-format on
|
||||
|
||||
extern jclass g_arrayListClass;
|
||||
extern jmethodID g_arrayListConstructFp;
|
||||
|
|
|
@ -93,8 +93,7 @@ static int32_t registerRequest(SRequestObj *pRequest, STscObj *pTscObj) {
|
|||
|
||||
int32_t total = atomic_add_fetch_64((int64_t *)&pSummary->totalRequests, 1);
|
||||
int32_t currentInst = atomic_add_fetch_64((int64_t *)&pSummary->currentRequests, 1);
|
||||
tscDebug("0x%" PRIx64 " new Request from connObj:0x%" PRIx64
|
||||
", current:%d, app current:%d, total:%d,QID:0x%" PRIx64,
|
||||
tscDebug("req:0x%" PRIx64 ", new from connObj:0x%" PRIx64 ", current:%d, app current:%d, total:%d, QID:0x%" PRIx64,
|
||||
pRequest->self, pRequest->pTscObj->id, num, currentInst, total, pRequest->requestId);
|
||||
}
|
||||
|
||||
|
@ -134,7 +133,7 @@ static int32_t generateWriteSlowLog(STscObj *pTscObj, SRequestObj *pRequest, int
|
|||
cJSON *json = cJSON_CreateObject();
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
if (json == NULL) {
|
||||
tscError("[monitor] cJSON_CreateObject failed");
|
||||
tscError("failed to create monitor json");
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
char clusterId[32] = {0};
|
||||
|
@ -255,26 +254,25 @@ static void deregisterRequest(SRequestObj *pRequest) {
|
|||
int32_t reqType = SLOW_LOG_TYPE_OTHERS;
|
||||
|
||||
int64_t duration = taosGetTimestampUs() - pRequest->metric.start;
|
||||
tscDebug("0x%" PRIx64 " free Request from connObj: 0x%" PRIx64 ",QID:0x%" PRIx64
|
||||
" elapsed:%.2f ms, "
|
||||
"current:%d, app current:%d",
|
||||
tscDebug("req:0x%" PRIx64 ", free from connObj:0x%" PRIx64 ", QID:0x%" PRIx64
|
||||
", elapsed:%.2f ms, current:%d, app current:%d",
|
||||
pRequest->self, pTscObj->id, pRequest->requestId, duration / 1000.0, num, currentInst);
|
||||
|
||||
if (TSDB_CODE_SUCCESS == nodesSimAcquireAllocator(pRequest->allocatorRefId)) {
|
||||
if ((pRequest->pQuery && pRequest->pQuery->pRoot && QUERY_NODE_VNODE_MODIFY_STMT == pRequest->pQuery->pRoot->type &&
|
||||
(0 == ((SVnodeModifyOpStmt *)pRequest->pQuery->pRoot)->sqlNodeType)) ||
|
||||
QUERY_NODE_VNODE_MODIFY_STMT == pRequest->stmtType) {
|
||||
tscDebug("insert duration %" PRId64 "us: parseCost:%" PRId64 "us, ctgCost:%" PRId64 "us, analyseCost:%" PRId64
|
||||
"us, planCost:%" PRId64 "us, exec:%" PRId64 "us",
|
||||
duration, pRequest->metric.parseCostUs, pRequest->metric.ctgCostUs, pRequest->metric.analyseCostUs,
|
||||
pRequest->metric.planCostUs, pRequest->metric.execCostUs);
|
||||
tscDebug("req:0x%" PRIx64 ", insert duration:%" PRId64 "us, parseCost:%" PRId64 "us, ctgCost:%" PRId64
|
||||
"us, analyseCost:%" PRId64 "us, planCost:%" PRId64 "us, exec:%" PRId64 "us",
|
||||
pRequest->self, duration, pRequest->metric.parseCostUs, pRequest->metric.ctgCostUs,
|
||||
pRequest->metric.analyseCostUs, pRequest->metric.planCostUs, pRequest->metric.execCostUs);
|
||||
(void)atomic_add_fetch_64((int64_t *)&pActivity->insertElapsedTime, duration);
|
||||
reqType = SLOW_LOG_TYPE_INSERT;
|
||||
} else if (QUERY_NODE_SELECT_STMT == pRequest->stmtType) {
|
||||
tscDebug("query duration %" PRId64 "us: parseCost:%" PRId64 "us, ctgCost:%" PRId64 "us, analyseCost:%" PRId64
|
||||
"us, planCost:%" PRId64 "us, exec:%" PRId64 "us",
|
||||
duration, pRequest->metric.parseCostUs, pRequest->metric.ctgCostUs, pRequest->metric.analyseCostUs,
|
||||
pRequest->metric.planCostUs, pRequest->metric.execCostUs);
|
||||
tscDebug("req:0x%" PRIx64 ", query duration:%" PRId64 "us, parseCost:%" PRId64 "us, ctgCost:%" PRId64
|
||||
"us, analyseCost:%" PRId64 "us, planCost:%" PRId64 "us, exec:%" PRId64 "us",
|
||||
pRequest->self, duration, pRequest->metric.parseCostUs, pRequest->metric.ctgCostUs,
|
||||
pRequest->metric.analyseCostUs, pRequest->metric.planCostUs, pRequest->metric.execCostUs);
|
||||
|
||||
(void)atomic_add_fetch_64((int64_t *)&pActivity->queryElapsedTime, duration);
|
||||
reqType = SLOW_LOG_TYPE_QUERY;
|
||||
|
@ -299,7 +297,7 @@ static void deregisterRequest(SRequestObj *pRequest) {
|
|||
checkSlowLogExceptDb(pRequest, pTscObj->pAppInfo->serverCfg.monitorParas.tsSlowLogExceptDb)) {
|
||||
(void)atomic_add_fetch_64((int64_t *)&pActivity->numOfSlowQueries, 1);
|
||||
if (pTscObj->pAppInfo->serverCfg.monitorParas.tsSlowLogScope & reqType) {
|
||||
taosPrintSlowLog("PID:%d, Conn:%u,QID:0x%" PRIx64 ", Start:%" PRId64 " us, Duration:%" PRId64 "us, SQL:%s",
|
||||
taosPrintSlowLog("PID:%d, connId:%u, QID:0x%" PRIx64 ", Start:%" PRId64 "us, Duration:%" PRId64 "us, SQL:%s",
|
||||
taosGetPId(), pTscObj->connId, pRequest->requestId, pRequest->metric.start, duration,
|
||||
pRequest->sqlstr);
|
||||
if (pTscObj->pAppInfo->serverCfg.monitorParas.tsEnableMonitor) {
|
||||
|
@ -460,7 +458,7 @@ void destroyTscObj(void *pObj) {
|
|||
|
||||
STscObj *pTscObj = pObj;
|
||||
int64_t tscId = pTscObj->id;
|
||||
tscTrace("begin to destroy tscObj %" PRIx64 " p:%p", tscId, pTscObj);
|
||||
tscTrace("connObj:%" PRIx64 ", begin destroy, p:%p", tscId, pTscObj);
|
||||
|
||||
SClientHbKey connKey = {.tscRid = pTscObj->id, .connType = pTscObj->connType};
|
||||
hbDeregisterConn(pTscObj, connKey);
|
||||
|
@ -469,7 +467,7 @@ void destroyTscObj(void *pObj) {
|
|||
taosHashCleanup(pTscObj->pRequests);
|
||||
|
||||
schedulerStopQueryHb(pTscObj->pAppInfo->pTransporter);
|
||||
tscDebug("connObj 0x%" PRIx64 " p:%p destroyed, remain inst totalConn:%" PRId64, pTscObj->id, pTscObj,
|
||||
tscDebug("connObj:0x%" PRIx64 ", p:%p destroyed, remain inst totalConn:%" PRId64, pTscObj->id, pTscObj,
|
||||
pTscObj->pAppInfo->numOfConns);
|
||||
|
||||
// In any cases, we should not free app inst here. Or an race condition rises.
|
||||
|
@ -478,7 +476,7 @@ void destroyTscObj(void *pObj) {
|
|||
(void)taosThreadMutexDestroy(&pTscObj->mutex);
|
||||
taosMemoryFree(pTscObj);
|
||||
|
||||
tscTrace("end to destroy tscObj %" PRIx64 " p:%p", tscId, pTscObj);
|
||||
tscTrace("connObj:0x%" PRIx64 ", end destroy, p:%p", tscId, pTscObj);
|
||||
}
|
||||
|
||||
int32_t createTscObj(const char *user, const char *auth, const char *db, int32_t connType, SAppInstInfo *pAppInfo,
|
||||
|
@ -518,7 +516,7 @@ int32_t createTscObj(const char *user, const char *auth, const char *db, int32_t
|
|||
|
||||
(void)atomic_add_fetch_64(&(*pObj)->pAppInfo->numOfConns, 1);
|
||||
|
||||
tscDebug("connObj created, 0x%" PRIx64 ",p:%p", (*pObj)->id, *pObj);
|
||||
tscInfo("connObj:0x%" PRIx64 ", created, p:%p", (*pObj)->id, *pObj);
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -684,13 +682,13 @@ void doDestroyRequest(void *p) {
|
|||
SRequestObj *pRequest = (SRequestObj *)p;
|
||||
|
||||
uint64_t reqId = pRequest->requestId;
|
||||
tscDebug("begin to destroy request 0x%" PRIx64 " p:%p", reqId, pRequest);
|
||||
tscDebug("QID:0x%" PRIx64 ", begin destroy request, res:%p", reqId, pRequest);
|
||||
|
||||
int64_t nextReqRefId = pRequest->relation.nextRefId;
|
||||
|
||||
int32_t code = taosHashRemove(pRequest->pTscObj->pRequests, &pRequest->self, sizeof(pRequest->self));
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
tscWarn("failed to remove request from hash, code:%s", tstrerror(code));
|
||||
tscDebug("failed to remove request from hash since %s", tstrerror(code));
|
||||
}
|
||||
schedulerFreeJob(&pRequest->body.queryJob, 0);
|
||||
|
||||
|
@ -726,14 +724,12 @@ void doDestroyRequest(void *p) {
|
|||
taosMemoryFreeClear(pRequest->effectiveUser);
|
||||
taosMemoryFreeClear(pRequest->sqlstr);
|
||||
taosMemoryFree(pRequest);
|
||||
tscDebug("end to destroy request %" PRIx64 " p:%p", reqId, pRequest);
|
||||
tscDebug("QID:0x%" PRIx64 ", end destroy request, res:%p", reqId, pRequest);
|
||||
destroyNextReq(nextReqRefId);
|
||||
}
|
||||
|
||||
void destroyRequest(SRequestObj *pRequest) {
|
||||
if (pRequest == NULL) {
|
||||
return;
|
||||
}
|
||||
if (pRequest == NULL) return;
|
||||
|
||||
taos_stop_query(pRequest);
|
||||
(void)removeFromMostPrevReq(pRequest);
|
||||
|
@ -744,12 +740,12 @@ void taosStopQueryImpl(SRequestObj *pRequest) {
|
|||
|
||||
// It is not a query, no need to stop.
|
||||
if (NULL == pRequest->pQuery || QUERY_EXEC_MODE_SCHEDULE != pRequest->pQuery->execMode) {
|
||||
tscDebug("request 0x%" PRIx64 " no need to be killed since not query", pRequest->requestId);
|
||||
tscDebug("QID:0x%" PRIx64 ", no need to be killed since not query", pRequest->requestId);
|
||||
return;
|
||||
}
|
||||
|
||||
schedulerFreeJob(&pRequest->body.queryJob, TSDB_CODE_TSC_QUERY_KILLED);
|
||||
tscDebug("request %" PRIx64 " killed", pRequest->requestId);
|
||||
tscDebug("QID:0x%" PRIx64 ", killed", pRequest->requestId);
|
||||
}
|
||||
|
||||
void stopAllQueries(SRequestObj *pRequest) {
|
||||
|
@ -857,7 +853,7 @@ static void *tscCrashReportThreadFp(void *param) {
|
|||
truncateFile = true;
|
||||
}
|
||||
} else {
|
||||
tscDebug("no crash info");
|
||||
tscInfo("no crash info was found");
|
||||
}
|
||||
|
||||
taosMemoryFree(pMsg);
|
||||
|
@ -985,13 +981,13 @@ void taos_init_imp(void) {
|
|||
return;
|
||||
}
|
||||
|
||||
tscInfo("starting to initialize TAOS driver");
|
||||
|
||||
SCatalogCfg cfg = {.maxDBCacheNum = 100, .maxTblCacheNum = 100};
|
||||
ENV_ERR_RET(catalogInit(&cfg), "failed to init catalog");
|
||||
ENV_ERR_RET(schedulerInit(), "failed to init scheduler");
|
||||
ENV_ERR_RET(initClientId(), "failed to init clientId");
|
||||
|
||||
tscDebug("starting to initialize TAOS driver");
|
||||
|
||||
ENV_ERR_RET(initTaskQueue(), "failed to init task queue");
|
||||
ENV_ERR_RET(fmFuncMgtInit(), "failed to init funcMgt");
|
||||
ENV_ERR_RET(nodesInitAllocatorSet(), "failed to init allocator set");
|
||||
|
@ -1004,7 +1000,7 @@ void taos_init_imp(void) {
|
|||
ENV_ERR_RET(tscCrashReportInit(), "failed to init crash report");
|
||||
ENV_ERR_RET(qInitKeywordsTable(), "failed to init parser keywords table");
|
||||
|
||||
tscDebug("client is initialized successfully");
|
||||
tscInfo("TAOS driver is initialized successfully");
|
||||
}
|
||||
|
||||
int taos_init() {
|
||||
|
|
|
@ -121,7 +121,7 @@ static int32_t hbUpdateUserAuthInfo(SAppHbMgr *pAppHbMgr, SUserAuthBatchRsp *bat
|
|||
pTscObj->authVer = pRsp->version;
|
||||
|
||||
if (pTscObj->sysInfo != pRsp->sysInfo) {
|
||||
tscDebug("update sysInfo of user %s from %" PRIi8 " to %" PRIi8 ", tscRid:%" PRIi64, pRsp->user,
|
||||
tscDebug("update sysInfo of user %s from %" PRIi8 " to %" PRIi8 ", connObj:%" PRIi64, pRsp->user,
|
||||
pTscObj->sysInfo, pRsp->sysInfo, pTscObj->id);
|
||||
pTscObj->sysInfo = pRsp->sysInfo;
|
||||
}
|
||||
|
@ -134,7 +134,7 @@ static int32_t hbUpdateUserAuthInfo(SAppHbMgr *pAppHbMgr, SUserAuthBatchRsp *bat
|
|||
if (passInfo->fp) {
|
||||
(*passInfo->fp)(passInfo->param, &pRsp->passVer, TAOS_NOTIFY_PASSVER);
|
||||
}
|
||||
tscDebug("update passVer of user %s from %d to %d, tscRid:%" PRIi64, pRsp->user, oldVer,
|
||||
tscDebug("update passVer of user %s from %d to %d, connObj:%" PRIi64, pRsp->user, oldVer,
|
||||
atomic_load_32(&passInfo->ver), pTscObj->id);
|
||||
}
|
||||
}
|
||||
|
@ -147,7 +147,7 @@ static int32_t hbUpdateUserAuthInfo(SAppHbMgr *pAppHbMgr, SUserAuthBatchRsp *bat
|
|||
if (whiteListInfo->fp) {
|
||||
(*whiteListInfo->fp)(whiteListInfo->param, &pRsp->whiteListVer, TAOS_NOTIFY_WHITELIST_VER);
|
||||
}
|
||||
tscDebug("update whitelist version of user %s from %" PRId64 " to %" PRId64 ", tscRid:%" PRIi64, pRsp->user,
|
||||
tscDebug("update whitelist version of user %s from %" PRId64 " to %" PRId64 ", connObj:%" PRIi64, pRsp->user,
|
||||
oldVer, atomic_load_64(&whiteListInfo->ver), pTscObj->id);
|
||||
}
|
||||
} else {
|
||||
|
@ -156,7 +156,7 @@ static int32_t hbUpdateUserAuthInfo(SAppHbMgr *pAppHbMgr, SUserAuthBatchRsp *bat
|
|||
SWhiteListInfo *whiteListInfo = &pTscObj->whiteListInfo;
|
||||
int64_t oldVer = atomic_load_64(&whiteListInfo->ver);
|
||||
atomic_store_64(&whiteListInfo->ver, pRsp->whiteListVer);
|
||||
tscDebug("update whitelist version of user %s from %" PRId64 " to %" PRId64 ", tscRid:%" PRIi64, pRsp->user,
|
||||
tscDebug("update whitelist version of user %s from %" PRId64 " to %" PRId64 ", connObj:%" PRIi64, pRsp->user,
|
||||
oldVer, atomic_load_64(&whiteListInfo->ver), pTscObj->id);
|
||||
}
|
||||
releaseTscObj(pReq->connKey.tscRid);
|
||||
|
@ -388,11 +388,11 @@ static int32_t hbprocessTSMARsp(void *value, int32_t valueLen, struct SCatalog *
|
|||
STableTSMAInfo *pTsmaInfo = taosArrayGetP(hbRsp.pTsmas, i);
|
||||
|
||||
if (!pTsmaInfo->pFuncs) {
|
||||
tscDebug("hb to remove tsma: %s.%s", pTsmaInfo->dbFName, pTsmaInfo->name);
|
||||
tscDebug("hb to remove tsma:%s.%s", pTsmaInfo->dbFName, pTsmaInfo->name);
|
||||
code = catalogRemoveTSMA(pCatalog, pTsmaInfo);
|
||||
tFreeAndClearTableTSMAInfo(pTsmaInfo);
|
||||
} else {
|
||||
tscDebug("hb to update tsma: %s.%s", pTsmaInfo->dbFName, pTsmaInfo->name);
|
||||
tscDebug("hb to update tsma:%s.%s", pTsmaInfo->dbFName, pTsmaInfo->name);
|
||||
code = catalogUpdateTSMA(pCatalog, &pTsmaInfo);
|
||||
tFreeAndClearTableTSMAInfo(pTsmaInfo);
|
||||
}
|
||||
|
@ -452,7 +452,7 @@ static void hbProcessQueryRspKvs(int32_t kvNum, SArray *pKvs, struct SCatalog *p
|
|||
break;
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS != hbProcessDynViewRsp(kv->value, kv->valueLen, pCatalog)) {
|
||||
tscError("Process dyn view response failed, len: %d, value: %p", kv->valueLen, kv->value);
|
||||
tscError("Process dyn view response failed, len:%d, value:%p", kv->valueLen, kv->value);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
|
@ -463,7 +463,7 @@ static void hbProcessQueryRspKvs(int32_t kvNum, SArray *pKvs, struct SCatalog *p
|
|||
break;
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS != hbProcessViewInfoRsp(kv->value, kv->valueLen, pCatalog)) {
|
||||
tscError("Process view info response failed, len: %d, value: %p", kv->valueLen, kv->value);
|
||||
tscError("Process view info response failed, len:%d, value:%p", kv->valueLen, kv->value);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
|
@ -471,10 +471,10 @@ static void hbProcessQueryRspKvs(int32_t kvNum, SArray *pKvs, struct SCatalog *p
|
|||
#endif
|
||||
case HEARTBEAT_KEY_TSMA: {
|
||||
if (kv->valueLen <= 0 || !kv->value) {
|
||||
tscError("Invalid tsma info, len: %d, value: %p", kv->valueLen, kv->value);
|
||||
tscError("Invalid tsma info, len:%d, value:%p", kv->valueLen, kv->value);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS != hbprocessTSMARsp(kv->value, kv->valueLen, pCatalog)) {
|
||||
tscError("Process tsma info response failed, len: %d, value: %p", kv->valueLen, kv->value);
|
||||
tscError("Process tsma info response failed, len:%d, value:%p", kv->valueLen, kv->value);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@ -512,14 +512,14 @@ static int32_t hbQueryHbRspHandle(SAppHbMgr *pAppHbMgr, SClientHbRsp *pRsp) {
|
|||
pTscObj->pAppInfo->totalDnodes = pRsp->query->totalDnodes;
|
||||
pTscObj->pAppInfo->onlineDnodes = pRsp->query->onlineDnodes;
|
||||
pTscObj->connId = pRsp->query->connId;
|
||||
tscTrace("conn %u hb rsp, dnodes %d/%d", pTscObj->connId, pTscObj->pAppInfo->onlineDnodes,
|
||||
tscTrace("connId:%u, hb rsp, dnodes %d/%d", pTscObj->connId, pTscObj->pAppInfo->onlineDnodes,
|
||||
pTscObj->pAppInfo->totalDnodes);
|
||||
|
||||
if (pRsp->query->killRid) {
|
||||
tscDebug("request rid %" PRIx64 " need to be killed now", pRsp->query->killRid);
|
||||
tscDebug("QID:%" PRIx64 ", need to be killed now", pRsp->query->killRid);
|
||||
SRequestObj *pRequest = acquireRequest(pRsp->query->killRid);
|
||||
if (NULL == pRequest) {
|
||||
tscDebug("request 0x%" PRIx64 " not exist to kill", pRsp->query->killRid);
|
||||
tscDebug("QID:0x%" PRIx64 ", not exist to kill", pRsp->query->killRid);
|
||||
} else {
|
||||
taos_stop_query((TAOS_RES *)pRequest);
|
||||
(void)releaseRequest(pRsp->query->killRid);
|
||||
|
@ -548,7 +548,7 @@ static int32_t hbQueryHbRspHandle(SAppHbMgr *pAppHbMgr, SClientHbRsp *pRsp) {
|
|||
struct SCatalog *pCatalog = NULL;
|
||||
int32_t code = catalogGetHandle(pReq->clusterId, &pCatalog);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tscWarn("catalogGetHandle failed, clusterId:%" PRIx64 ", error:%s", pReq->clusterId, tstrerror(code));
|
||||
tscWarn("catalogGetHandle failed, clusterId:0x%" PRIx64 ", error:%s", pReq->clusterId, tstrerror(code));
|
||||
} else {
|
||||
hbProcessQueryRspKvs(kvNum, pRsp->info, pCatalog, pAppHbMgr);
|
||||
}
|
||||
|
@ -576,7 +576,7 @@ static int32_t hbAsyncCallBack(void *param, SDataBuf *pMsg, int32_t code) {
|
|||
int32_t delta = abs(now - pRsp.svrTimestamp);
|
||||
if (delta > timestampDeltaLimit) {
|
||||
code = TSDB_CODE_TIME_UNSYNCED;
|
||||
tscError("time diff: %ds is too big", delta);
|
||||
tscError("time diff:%ds is too big", delta);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -608,7 +608,7 @@ static int32_t hbAsyncCallBack(void *param, SDataBuf *pMsg, int32_t code) {
|
|||
|
||||
pInst->serverCfg.monitorParas = pRsp.monitorParas;
|
||||
pInst->serverCfg.enableAuditDelete = pRsp.enableAuditDelete;
|
||||
tscDebug("[monitor] paras from hb, clusterId:%" PRIx64 " monitorParas threshold:%d scope:%d", pInst->clusterId,
|
||||
tscDebug("monitor paras from hb, clusterId:0x%" PRIx64 ", threshold:%d scope:%d", pInst->clusterId,
|
||||
pRsp.monitorParas.tsSlowLogThreshold, pRsp.monitorParas.tsSlowLogScope);
|
||||
|
||||
if (rspNum) {
|
||||
|
@ -703,7 +703,7 @@ int32_t hbBuildQueryDesc(SQueryHbReqBasic *hbBasic, STscObj *pObj) {
|
|||
int32_t hbGetQueryBasicInfo(SClientHbKey *connKey, SClientHbReq *req) {
|
||||
STscObj *pTscObj = (STscObj *)acquireTscObj(connKey->tscRid);
|
||||
if (NULL == pTscObj) {
|
||||
tscWarn("tscObj rid %" PRIx64 " not exist", connKey->tscRid);
|
||||
tscWarn("tscObj rid 0x%" PRIx64 " not exist", connKey->tscRid);
|
||||
return terrno;
|
||||
}
|
||||
|
||||
|
@ -751,7 +751,7 @@ int32_t hbGetQueryBasicInfo(SClientHbKey *connKey, SClientHbReq *req) {
|
|||
static int32_t hbGetUserAuthInfo(SClientHbKey *connKey, SHbParam *param, SClientHbReq *req) {
|
||||
STscObj *pTscObj = (STscObj *)acquireTscObj(connKey->tscRid);
|
||||
if (!pTscObj) {
|
||||
tscWarn("tscObj rid %" PRIx64 " not exist", connKey->tscRid);
|
||||
tscWarn("tscObj rid 0x%" PRIx64 " not exist", connKey->tscRid);
|
||||
return terrno;
|
||||
}
|
||||
|
||||
|
@ -1041,7 +1041,7 @@ int32_t hbGetExpiredTSMAInfo(SClientHbKey *connKey, struct SCatalog *pCatalog, S
|
|||
tsma->version = htonl(tsma->version);
|
||||
}
|
||||
|
||||
tscDebug("hb got %d expred tsmas, valueLen: %lu", tsmaNum, sizeof(STSMAVersion) * tsmaNum);
|
||||
tscDebug("hb got %d expred tsmas, valueLen:%lu", tsmaNum, sizeof(STSMAVersion) * tsmaNum);
|
||||
|
||||
if (!pReq->info) {
|
||||
pReq->info = taosHashInit(64, hbKeyHashFunc, 1, HASH_ENTRY_LOCK);
|
||||
|
@ -1081,33 +1081,33 @@ int32_t hbQueryHbReqHandle(SClientHbKey *connKey, void *param, SClientHbReq *req
|
|||
|
||||
code = hbGetQueryBasicInfo(connKey, req);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tscWarn("hbGetQueryBasicInfo failed, clusterId:%" PRIx64 ", error:%s", hbParam->clusterId, tstrerror(code));
|
||||
tscWarn("hbGetQueryBasicInfo failed, clusterId:0x%" PRIx64 ", error:%s", hbParam->clusterId, tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
|
||||
if (hbParam->reqCnt == 0) {
|
||||
code = catalogGetHandle(hbParam->clusterId, &pCatalog);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tscWarn("catalogGetHandle failed, clusterId:%" PRIx64 ", error:%s", hbParam->clusterId, tstrerror(code));
|
||||
tscWarn("catalogGetHandle failed, clusterId:0x%" PRIx64 ", error:%s", hbParam->clusterId, tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
|
||||
code = hbGetAppInfo(hbParam->clusterId, req);
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
tscWarn("getAppInfo failed, clusterId:%" PRIx64 ", error:%s", hbParam->clusterId, tstrerror(code));
|
||||
tscWarn("getAppInfo failed, clusterId:0x%" PRIx64 ", error:%s", hbParam->clusterId, tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
|
||||
if (!taosHashGet(clientHbMgr.appHbHash, &hbParam->clusterId, sizeof(hbParam->clusterId))) {
|
||||
code = hbGetExpiredUserInfo(connKey, pCatalog, req);
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
tscWarn("hbGetExpiredUserInfo failed, clusterId:%" PRIx64 ", error:%s", hbParam->clusterId, tstrerror(code));
|
||||
tscWarn("hbGetExpiredUserInfo failed, clusterId:0x%" PRIx64 ", error:%s", hbParam->clusterId, tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
if (clientHbMgr.appHbHash) {
|
||||
code = taosHashPut(clientHbMgr.appHbHash, &hbParam->clusterId, sizeof(uint64_t), NULL, 0);
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
tscWarn("hbQueryHbReqHandle put clusterId failed, clusterId:%" PRIx64 ", error:%s", hbParam->clusterId,
|
||||
tscWarn("hbQueryHbReqHandle put clusterId failed, clusterId:0x%" PRIx64 ", error:%s", hbParam->clusterId,
|
||||
tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
|
@ -1118,7 +1118,7 @@ int32_t hbQueryHbReqHandle(SClientHbKey *connKey, void *param, SClientHbReq *req
|
|||
if (2 != atomic_load_8(&hbParam->pAppHbMgr->connHbFlag)) {
|
||||
code = hbGetUserAuthInfo(connKey, hbParam, req);
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
tscWarn("hbGetUserAuthInfo failed, clusterId:%" PRIx64 ", error:%s", hbParam->clusterId, tstrerror(code));
|
||||
tscWarn("hbGetUserAuthInfo failed, clusterId:0x%" PRIx64 ", error:%s", hbParam->clusterId, tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
atomic_store_8(&hbParam->pAppHbMgr->connHbFlag, 1);
|
||||
|
@ -1126,32 +1126,32 @@ int32_t hbQueryHbReqHandle(SClientHbKey *connKey, void *param, SClientHbReq *req
|
|||
|
||||
code = hbGetExpiredDBInfo(connKey, pCatalog, req);
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
tscWarn("hbGetExpiredDBInfo failed, clusterId:%" PRIx64 ", error:%s", hbParam->clusterId, tstrerror(code));
|
||||
tscWarn("hbGetExpiredDBInfo failed, clusterId:0x%" PRIx64 ", error:%s", hbParam->clusterId, tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
|
||||
code = hbGetExpiredStbInfo(connKey, pCatalog, req);
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
tscWarn("hbGetExpiredStbInfo failed, clusterId:%" PRIx64 ", error:%s", hbParam->clusterId, tstrerror(code));
|
||||
tscWarn("hbGetExpiredStbInfo failed, clusterId:0x%" PRIx64 ", error:%s", hbParam->clusterId, tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
|
||||
#ifdef TD_ENTERPRISE
|
||||
code = hbGetExpiredViewInfo(connKey, pCatalog, req);
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
tscWarn("hbGetExpiredViewInfo failed, clusterId:%" PRIx64 ", error:%s", hbParam->clusterId, tstrerror(code));
|
||||
tscWarn("hbGetExpiredViewInfo failed, clusterId:0x%" PRIx64 ", error:%s", hbParam->clusterId, tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
#endif
|
||||
code = hbGetExpiredTSMAInfo(connKey, pCatalog, req);
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
tscWarn("hbGetExpiredTSMAInfo failed, clusterId:%" PRIx64 ", error:%s", hbParam->clusterId, tstrerror(code));
|
||||
tscWarn("hbGetExpiredTSMAInfo failed, clusterId:0x%" PRIx64 ", error:%s", hbParam->clusterId, tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
} else {
|
||||
code = hbGetAppInfo(hbParam->clusterId, req);
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
tscWarn("hbGetAppInfo failed, clusterId:%" PRIx64 ", error:%s", hbParam->clusterId, tstrerror(code));
|
||||
tscWarn("hbGetAppInfo failed, clusterId:0x%" PRIx64 ", error:%s", hbParam->clusterId, tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
}
|
||||
|
@ -1537,7 +1537,7 @@ int32_t hbMgrInit() {
|
|||
if (old == 1) return 0;
|
||||
|
||||
clientHbMgr.appId = tGenIdPI64();
|
||||
tscDebug("app %" PRIx64 " initialized", clientHbMgr.appId);
|
||||
tscInfo("app initialized, appId:0x%" PRIx64, clientHbMgr.appId);
|
||||
|
||||
clientHbMgr.appSummary = taosHashInit(10, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_NO_LOCK);
|
||||
if (NULL == clientHbMgr.appSummary) {
|
||||
|
|
|
@ -171,7 +171,7 @@ int32_t taos_connect_internal(const char* ip, const char* user, const char* pass
|
|||
}
|
||||
p->instKey = key;
|
||||
key = NULL;
|
||||
tscDebug("new app inst mgr %p, user:%s, ip:%s, port:%d", p, user, epSet.epSet.eps[0].fqdn, epSet.epSet.eps[0].port);
|
||||
tscInfo("new app inst mgr:%p, user:%s, ip:%s, port:%d", p, user, epSet.epSet.eps[0].fqdn, epSet.epSet.eps[0].port);
|
||||
|
||||
pInst = &p;
|
||||
} else {
|
||||
|
@ -227,7 +227,7 @@ int32_t buildRequest(uint64_t connId, const char* sql, int sqlLen, void* param,
|
|||
|
||||
(*pRequest)->sqlstr = taosMemoryMalloc(sqlLen + 1);
|
||||
if ((*pRequest)->sqlstr == NULL) {
|
||||
tscError("0x%" PRIx64 " failed to prepare sql string buffer, %s", (*pRequest)->self, sql);
|
||||
tscError("req:0x%" PRIx64 ", failed to prepare sql string buffer, %s", (*pRequest)->self, sql);
|
||||
destroyRequest(*pRequest);
|
||||
*pRequest = NULL;
|
||||
return terrno;
|
||||
|
@ -245,7 +245,7 @@ int32_t buildRequest(uint64_t connId, const char* sql, int sqlLen, void* param,
|
|||
int32_t err = taosHashPut(pTscObj->pRequests, &(*pRequest)->self, sizeof((*pRequest)->self), &(*pRequest)->self,
|
||||
sizeof((*pRequest)->self));
|
||||
if (err) {
|
||||
tscError("%" PRId64 " failed to add to request container,QID:0x%" PRIx64 ", conn:%" PRId64 ", %s",
|
||||
tscError("req:0x%" PRId64 ", failed to add to request container, QID:0x%" PRIx64 ", connObj:%" PRId64 ", %s",
|
||||
(*pRequest)->self, (*pRequest)->requestId, pTscObj->id, sql);
|
||||
destroyRequest(*pRequest);
|
||||
*pRequest = NULL;
|
||||
|
@ -256,7 +256,7 @@ int32_t buildRequest(uint64_t connId, const char* sql, int sqlLen, void* param,
|
|||
if (tsQueryUseNodeAllocator && !qIsInsertValuesSql((*pRequest)->sqlstr, (*pRequest)->sqlLen)) {
|
||||
if (TSDB_CODE_SUCCESS !=
|
||||
nodesCreateAllocator((*pRequest)->requestId, tsQueryNodeChunkSize, &((*pRequest)->allocatorRefId))) {
|
||||
tscError("%" PRId64 " failed to create node allocator,QID:0x%" PRIx64 ", conn:%" PRId64 ", %s", (*pRequest)->self,
|
||||
tscError("req:0x%" PRId64 ", failed to create node allocator, QID:0x%" PRIx64 ", connObj:%" PRId64 ", %s", (*pRequest)->self,
|
||||
(*pRequest)->requestId, pTscObj->id, sql);
|
||||
destroyRequest(*pRequest);
|
||||
*pRequest = NULL;
|
||||
|
@ -264,7 +264,7 @@ int32_t buildRequest(uint64_t connId, const char* sql, int sqlLen, void* param,
|
|||
}
|
||||
}
|
||||
|
||||
tscDebugL("0x%" PRIx64 " SQL: %s,QID:0x%" PRIx64, (*pRequest)->self, (*pRequest)->sqlstr, (*pRequest)->requestId);
|
||||
tscDebugL("req:0x%" PRIx64 ", QID:0x%" PRIx64 ", build request", (*pRequest)->self, (*pRequest)->requestId);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -381,10 +381,10 @@ void asyncExecLocalCmd(SRequestObj* pRequest, SQuery* pQuery) {
|
|||
|
||||
if (pRequest->code != TSDB_CODE_SUCCESS) {
|
||||
pResultInfo->numOfRows = 0;
|
||||
tscError("0x%" PRIx64 " fetch results failed, code:%s,QID:0x%" PRIx64, pRequest->self, tstrerror(code),
|
||||
tscError("req:0x%" PRIx64 ", fetch results failed, code:%s, QID:0x%" PRIx64, pRequest->self, tstrerror(code),
|
||||
pRequest->requestId);
|
||||
} else {
|
||||
tscDebug("0x%" PRIx64 " fetch results, numOfRows:%" PRId64 " total Rows:%" PRId64 ", complete:%d,QID:0x%" PRIx64,
|
||||
tscDebug("req:0x%" PRIx64 ", fetch results, numOfRows:%" PRId64 " total Rows:%" PRId64 ", complete:%d, QID:0x%" PRIx64,
|
||||
pRequest->self, pResultInfo->numOfRows, pResultInfo->totalRows, pResultInfo->completed,
|
||||
pRequest->requestId);
|
||||
}
|
||||
|
@ -1030,7 +1030,7 @@ int32_t handleQueryExecRsp(SRequestObj* pRequest) {
|
|||
break;
|
||||
}
|
||||
default:
|
||||
tscError("0x%" PRIx64 ", invalid exec result for request type %d,QID:0x%" PRIx64, pRequest->self, pRequest->type,
|
||||
tscError("req:0x%" PRIx64 ", invalid exec result for request type:%d, QID:0x%" PRIx64, pRequest->self, pRequest->type,
|
||||
pRequest->requestId);
|
||||
code = TSDB_CODE_APP_ERROR;
|
||||
}
|
||||
|
@ -1075,7 +1075,7 @@ void returnToUser(SRequestObj* pRequest) {
|
|||
(void)releaseRequest(pRequest->relation.userRefId);
|
||||
return;
|
||||
} else {
|
||||
tscError("0x%" PRIx64 ", user ref 0x%" PRIx64 " is not there,QID:0x%" PRIx64, pRequest->self,
|
||||
tscError("req:0x%" PRIx64 ", user ref 0x%" PRIx64 " is not there, QID:0x%" PRIx64, pRequest->self,
|
||||
pRequest->relation.userRefId, pRequest->requestId);
|
||||
}
|
||||
}
|
||||
|
@ -1146,7 +1146,7 @@ void postSubQueryFetchCb(void* param, TAOS_RES* res, int32_t rowNum) {
|
|||
SSDataBlock* pBlock = NULL;
|
||||
pRequest->code = createResultBlock(res, rowNum, &pBlock);
|
||||
if (TSDB_CODE_SUCCESS != pRequest->code) {
|
||||
tscError("0x%" PRIx64 ", create result block failed,QID:0x%" PRIx64 " %s", pRequest->self, pRequest->requestId,
|
||||
tscError("req:0x%" PRIx64 ", create result block failed, QID:0x%" PRIx64 " %s", pRequest->self, pRequest->requestId,
|
||||
tstrerror(pRequest->code));
|
||||
returnToUser(pRequest);
|
||||
return;
|
||||
|
@ -1157,7 +1157,7 @@ void postSubQueryFetchCb(void* param, TAOS_RES* res, int32_t rowNum) {
|
|||
continuePostSubQuery(pNextReq, pBlock);
|
||||
(void)releaseRequest(pRequest->relation.nextRefId);
|
||||
} else {
|
||||
tscError("0x%" PRIx64 ", next req ref 0x%" PRIx64 " is not there,QID:0x%" PRIx64, pRequest->self,
|
||||
tscError("req:0x%" PRIx64 ", next req ref 0x%" PRIx64 " is not there, QID:0x%" PRIx64, pRequest->self,
|
||||
pRequest->relation.nextRefId, pRequest->requestId);
|
||||
}
|
||||
|
||||
|
@ -1176,7 +1176,7 @@ void handlePostSubQuery(SSqlCallbackWrapper* pWrapper) {
|
|||
continuePostSubQuery(pNextReq, NULL);
|
||||
(void)releaseRequest(pRequest->relation.nextRefId);
|
||||
} else {
|
||||
tscError("0x%" PRIx64 ", next req ref 0x%" PRIx64 " is not there,QID:0x%" PRIx64, pRequest->self,
|
||||
tscError("req:0x%" PRIx64 ", next req ref 0x%" PRIx64 " is not there, QID:0x%" PRIx64, pRequest->self,
|
||||
pRequest->relation.nextRefId, pRequest->requestId);
|
||||
}
|
||||
}
|
||||
|
@ -1208,23 +1208,23 @@ void schedulerExecCb(SExecResult* pResult, void* param, int32_t code) {
|
|||
}
|
||||
|
||||
taosMemoryFree(pResult);
|
||||
tscDebug("0x%" PRIx64 " enter scheduler exec cb, code:%s,QID:0x%" PRIx64, pRequest->self, tstrerror(code),
|
||||
tscDebug("req:0x%" PRIx64 ", enter scheduler exec cb, code:%s, QID:0x%" PRIx64, pRequest->self, tstrerror(code),
|
||||
pRequest->requestId);
|
||||
|
||||
if (code != TSDB_CODE_SUCCESS && NEED_CLIENT_HANDLE_ERROR(code) && pRequest->sqlstr != NULL) {
|
||||
tscDebug("0x%" PRIx64 " client retry to handle the error, code:%s, tryCount:%d,QID:0x%" PRIx64, pRequest->self,
|
||||
tscDebug("req:0x%" PRIx64 ", client retry to handle the error, code:%s, tryCount:%d, QID:0x%" PRIx64, pRequest->self,
|
||||
tstrerror(code), pRequest->retry, pRequest->requestId);
|
||||
if (TSDB_CODE_SUCCESS != removeMeta(pTscObj, pRequest->targetTableList, IS_VIEW_REQUEST(pRequest->type))) {
|
||||
tscError("0x%" PRIx64 " remove meta failed,QID:0x%" PRIx64, pRequest->self, pRequest->requestId);
|
||||
tscError("req:0x%" PRIx64 ", remove meta failed, QID:0x%" PRIx64, pRequest->self, pRequest->requestId);
|
||||
}
|
||||
restartAsyncQuery(pRequest, code);
|
||||
return;
|
||||
}
|
||||
|
||||
tscDebug("schedulerExecCb request type %s", TMSG_INFO(pRequest->type));
|
||||
tscTrace("req:0x%" PRIx64 ", scheduler exec cb, request type:%s", pRequest->self, TMSG_INFO(pRequest->type));
|
||||
if (NEED_CLIENT_RM_TBLMETA_REQ(pRequest->type) && NULL == pRequest->body.resInfo.execRes.res) {
|
||||
if (TSDB_CODE_SUCCESS != removeMeta(pTscObj, pRequest->targetTableList, IS_VIEW_REQUEST(pRequest->type))) {
|
||||
tscError("0x%" PRIx64 " remove meta failed,QID:0x%" PRIx64, pRequest->self, pRequest->requestId);
|
||||
tscError("req:0x%" PRIx64 ", remove meta failed, QID:0x%" PRIx64, pRequest->self, pRequest->requestId);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1322,7 +1322,7 @@ void launchQueryImpl(SRequestObj* pRequest, SQuery* pQuery, bool keepQuery, void
|
|||
if (NEED_CLIENT_RM_TBLMETA_REQ(pRequest->type) && NULL == pRequest->body.resInfo.execRes.res) {
|
||||
int ret = removeMeta(pRequest->pTscObj, pRequest->targetTableList, IS_VIEW_REQUEST(pRequest->type));
|
||||
if (TSDB_CODE_SUCCESS != ret) {
|
||||
tscError("0x%" PRIx64 " remove meta failed,code:%d,QID:0x%" PRIx64, pRequest->self, ret, pRequest->requestId);
|
||||
tscError("req:0x%" PRIx64 ", remove meta failed,code:%d, QID:0x%" PRIx64, pRequest->self, ret, pRequest->requestId);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1370,7 +1370,7 @@ static int32_t asyncExecSchQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaDat
|
|||
code = qCreateQueryPlan(&cxt, &pDag, pMnodeList);
|
||||
}
|
||||
if (code) {
|
||||
tscError("0x%" PRIx64 " failed to create query plan, code:%s 0x%" PRIx64, pRequest->self, tstrerror(code),
|
||||
tscError("req:0x%" PRIx64 ", failed to create query plan, code:%s 0x%" PRIx64, pRequest->self, tstrerror(code),
|
||||
pRequest->requestId);
|
||||
} else {
|
||||
pRequest->body.subplanNum = pDag->numOfSubplans;
|
||||
|
@ -1414,7 +1414,7 @@ static int32_t asyncExecSchQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaDat
|
|||
taosArrayDestroy(pNodeList);
|
||||
} else {
|
||||
qDestroyQueryPlan(pDag);
|
||||
tscDebug("0x%" PRIx64 " plan not executed, code:%s 0x%" PRIx64, pRequest->self, tstrerror(code),
|
||||
tscDebug("req:0x%" PRIx64 ", plan not executed, code:%s 0x%" PRIx64, pRequest->self, tstrerror(code),
|
||||
pRequest->requestId);
|
||||
destorySqlCallbackWrapper(pWrapper);
|
||||
pRequest->pWrapper = NULL;
|
||||
|
@ -1472,7 +1472,7 @@ void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaData* pResultM
|
|||
doRequestCallback(pRequest, 0);
|
||||
break;
|
||||
default:
|
||||
tscError("0x%" PRIx64 " invalid execMode %d", pRequest->self, pQuery->execMode);
|
||||
tscError("req:0x%" PRIx64 ", invalid execMode %d", pRequest->self, pQuery->execMode);
|
||||
doRequestCallback(pRequest, -1);
|
||||
break;
|
||||
}
|
||||
|
@ -1659,7 +1659,7 @@ int32_t taosConnectImpl(const char* user, const char* auth, const char* db, __ta
|
|||
*pTscObj = NULL;
|
||||
return terrno;
|
||||
} else {
|
||||
tscDebug("0x%" PRIx64 " connection is opening, connId:%u, dnodeConn:%p,QID:0x%" PRIx64, (*pTscObj)->id,
|
||||
tscInfo("connObj:0x%" PRIx64 ", connection is opening, connId:%u, dnodeConn:%p, QID:0x%" PRIx64, (*pTscObj)->id,
|
||||
(*pTscObj)->connId, (*pTscObj)->pAppInfo->pTransporter, pRequest->requestId);
|
||||
destroyRequest(pRequest);
|
||||
}
|
||||
|
@ -1754,14 +1754,14 @@ void updateTargetEpSet(SMsgSendInfo* pSendInfo, STscObj* pTscObj, SRpcMsg* pMsg,
|
|||
SCatalog* pCatalog = NULL;
|
||||
int32_t code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tscError("fail to get catalog handle, clusterId:%" PRIx64 ", error %s", pTscObj->pAppInfo->clusterId,
|
||||
tscError("fail to get catalog handle, clusterId:0x%" PRIx64 ", error:%s", pTscObj->pAppInfo->clusterId,
|
||||
tstrerror(code));
|
||||
return;
|
||||
}
|
||||
|
||||
code = catalogUpdateVgEpSet(pCatalog, pSendInfo->target.dbFName, pSendInfo->target.vgId, pEpSet);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tscError("fail to update catalog vg epset, clusterId:%" PRIx64 ", error %s", pTscObj->pAppInfo->clusterId,
|
||||
tscError("fail to update catalog vg epset, clusterId:0x%" PRIx64 ", error:%s", pTscObj->pAppInfo->clusterId,
|
||||
tstrerror(code));
|
||||
return;
|
||||
}
|
||||
|
@ -1789,14 +1789,14 @@ int32_t doProcessMsgFromServerImpl(SRpcMsg* pMsg, SEpSet* pEpSet) {
|
|||
char tbuf[40] = {0};
|
||||
TRACE_TO_STR(trace, tbuf);
|
||||
|
||||
tscDebug("processMsgFromServer handle %p, message: %s, size:%d, code: %s,QID:%s", pMsg->info.handle,
|
||||
TMSG_INFO(pMsg->msgType), pMsg->contLen, tstrerror(pMsg->code), tbuf);
|
||||
tscDebug("QID:%s, process message from server, handle:%p, message:%s, size:%d, code:%s", tbuf, pMsg->info.handle,
|
||||
TMSG_INFO(pMsg->msgType), pMsg->contLen, tstrerror(pMsg->code));
|
||||
|
||||
if (pSendInfo->requestObjRefId != 0) {
|
||||
SRequestObj* pRequest = (SRequestObj*)taosAcquireRef(clientReqRefPool, pSendInfo->requestObjRefId);
|
||||
if (pRequest) {
|
||||
if (pRequest->self != pSendInfo->requestObjRefId) {
|
||||
tscError("doProcessMsgFromServer pRequest->self:%" PRId64 " != pSendInfo->requestObjRefId:%" PRId64,
|
||||
tscError("doProcessMsgFromServer req:0x%" PRId64 " != pSendInfo->requestObjRefId:0x%" PRId64,
|
||||
pRequest->self, pSendInfo->requestObjRefId);
|
||||
|
||||
if (TSDB_CODE_SUCCESS != taosReleaseRef(clientReqRefPool, pSendInfo->requestObjRefId)) {
|
||||
|
@ -1904,7 +1904,7 @@ _exit:
|
|||
}
|
||||
|
||||
TAOS* taos_connect_auth(const char* ip, const char* user, const char* auth, const char* db, uint16_t port) {
|
||||
tscDebug("try to connect to %s:%u by auth, user:%s db:%s", ip, port, user, db);
|
||||
tscInfo("try to connect to %s:%u by auth, user:%s db:%s", ip, port, user, db);
|
||||
if (user == NULL) {
|
||||
user = TSDB_DEFAULT_USER;
|
||||
}
|
||||
|
@ -2000,7 +2000,7 @@ void* doFetchRows(SRequestObj* pRequest, bool setupOneRowPtr, bool convertUcs4)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
tscDebug("0x%" PRIx64 " fetch results, numOfRows:%" PRId64 " total Rows:%" PRId64 ", complete:%d,QID:0x%" PRIx64,
|
||||
tscDebug("req:0x%" PRIx64 ", fetch results, numOfRows:%" PRId64 " total Rows:%" PRId64 ", complete:%d, QID:0x%" PRIx64,
|
||||
pRequest->self, pResInfo->numOfRows, pResInfo->totalRows, pResInfo->completed, pRequest->requestId);
|
||||
|
||||
STscObj* pTscObj = pRequest->pTscObj;
|
||||
|
@ -2953,7 +2953,7 @@ TAOS_RES* taosQueryImpl(TAOS* taos, const char* sql, bool validateOnly, int8_t s
|
|||
return NULL;
|
||||
}
|
||||
|
||||
tscDebug("taos_query start with sql:%s", sql);
|
||||
tscDebug("connObj:0x%" PRIx64 ", taos_query start with sql:%s", *(int64_t*)taos, sql);
|
||||
|
||||
SSyncQueryParam* param = taosMemoryCalloc(1, sizeof(SSyncQueryParam));
|
||||
if (NULL == param) {
|
||||
|
@ -2984,7 +2984,7 @@ TAOS_RES* taosQueryImpl(TAOS* taos, const char* sql, bool validateOnly, int8_t s
|
|||
}
|
||||
taosMemoryFree(param);
|
||||
|
||||
tscDebug("taos_query end with sql:%s", sql);
|
||||
tscDebug("connObj:0x%" PRIx64 ", res:%p created, taos_query end", *(int64_t*)taos, pRequest);
|
||||
|
||||
return pRequest;
|
||||
}
|
||||
|
@ -3025,7 +3025,7 @@ static void fetchCallback(void* pResult, void* param, int32_t code) {
|
|||
|
||||
SReqResultInfo* pResultInfo = &pRequest->body.resInfo;
|
||||
|
||||
tscDebug("0x%" PRIx64 " enter scheduler fetch cb, code:%d - %s,QID:0x%" PRIx64, pRequest->self, code, tstrerror(code),
|
||||
tscDebug("req:0x%" PRIx64 ", enter scheduler fetch cb, code:%d - %s, QID:0x%" PRIx64, pRequest->self, code, tstrerror(code),
|
||||
pRequest->requestId);
|
||||
|
||||
pResultInfo->pData = pResult;
|
||||
|
@ -3048,10 +3048,10 @@ static void fetchCallback(void* pResult, void* param, int32_t code) {
|
|||
setQueryResultFromRsp(pResultInfo, (const SRetrieveTableRsp*)pResultInfo->pData, pResultInfo->convertUcs4);
|
||||
if (pRequest->code != TSDB_CODE_SUCCESS) {
|
||||
pResultInfo->numOfRows = 0;
|
||||
tscError("0x%" PRIx64 " fetch results failed, code:%s,QID:0x%" PRIx64, pRequest->self, tstrerror(pRequest->code),
|
||||
tscError("req:0x%" PRIx64 ", fetch results failed, code:%s, QID:0x%" PRIx64, pRequest->self, tstrerror(pRequest->code),
|
||||
pRequest->requestId);
|
||||
} else {
|
||||
tscDebug("0x%" PRIx64 " fetch results, numOfRows:%" PRId64 " total Rows:%" PRId64 ", complete:%d,QID:0x%" PRIx64,
|
||||
tscDebug("req:0x%" PRIx64 ", fetch results, numOfRows:%" PRId64 " total Rows:%" PRId64 ", complete:%d, QID:0x%" PRIx64,
|
||||
pRequest->self, pResultInfo->numOfRows, pResultInfo->totalRows, pResultInfo->completed,
|
||||
pRequest->requestId);
|
||||
|
||||
|
|
|
@ -154,7 +154,7 @@ JNIEXPORT void JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_initImp(JNIEnv *e
|
|||
}
|
||||
|
||||
jniGetGlobalMethod(env);
|
||||
jniDebug("jni initialized successfully, config directory: %s", configDir);
|
||||
jniDebug("jni initialized successfully, config directory:%s", configDir);
|
||||
}
|
||||
|
||||
JNIEXPORT jobject createTSDBException(JNIEnv *env, int code, char *msg) {
|
||||
|
@ -453,7 +453,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getAffectedRowsIm
|
|||
}
|
||||
|
||||
jint ret = taos_affected_rows((TAOS_RES *)res);
|
||||
jniDebug("jobj:%p, conn:%p, sql:%p, res: %p, affect rows:%d", jobj, tscon, (TAOS *)con, (TAOS_RES *)res,
|
||||
jniDebug("jobj:%p, conn:%p, sql:%p, res:%p, affect rows:%d", jobj, tscon, (TAOS *)con, (TAOS_RES *)res,
|
||||
(int32_t)ret);
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -279,7 +279,7 @@ setConfRet taos_set_config(const char *config) {
|
|||
}
|
||||
|
||||
TAOS *taos_connect(const char *ip, const char *user, const char *pass, const char *db, uint16_t port) {
|
||||
tscDebug("try to connect to %s:%u, user:%s db:%s", ip, port, user, db);
|
||||
tscInfo("try to connect to %s:%u, user:%s db:%s", ip, port, user, db);
|
||||
if (user == NULL) {
|
||||
user = TSDB_DEFAULT_USER;
|
||||
}
|
||||
|
@ -478,10 +478,10 @@ void taos_close_internal(void *taos) {
|
|||
}
|
||||
|
||||
STscObj *pTscObj = (STscObj *)taos;
|
||||
tscDebug("0x%" PRIx64 " try to close connection, numOfReq:%d", pTscObj->id, pTscObj->numOfReqs);
|
||||
tscDebug("connObj:0x%" PRIx64 ", try to close connection, numOfReq:%d", pTscObj->id, pTscObj->numOfReqs);
|
||||
|
||||
if (TSDB_CODE_SUCCESS != taosRemoveRef(clientConnRefPool, pTscObj->id)) {
|
||||
tscError("0x%" PRIx64 " failed to remove ref from conn pool", pTscObj->id);
|
||||
tscError("connObj:0x%" PRIx64 ", failed to remove ref from conn pool", pTscObj->id);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -535,14 +535,15 @@ void taos_free_result(TAOS_RES *res) {
|
|||
return;
|
||||
}
|
||||
|
||||
tscDebug("taos free res %p", res);
|
||||
tscTrace("res:%p, will be freed", res);
|
||||
|
||||
if (TD_RES_QUERY(res)) {
|
||||
SRequestObj *pRequest = (SRequestObj *)res;
|
||||
tscDebug("0x%" PRIx64 " taos_free_result start to free query", pRequest->requestId);
|
||||
tscDebug("QID:0x%" PRIx64 ", call taos_free_result to free query", pRequest->requestId);
|
||||
destroyRequest(pRequest);
|
||||
return;
|
||||
}
|
||||
|
||||
SMqRspObj *pRsp = (SMqRspObj *)res;
|
||||
if (TD_RES_TMQ(res)) {
|
||||
tDeleteMqDataRsp(&pRsp->dataRsp);
|
||||
|
@ -1158,7 +1159,7 @@ static void doAsyncQueryFromAnalyse(SMetaData *pResultMeta, void *param, int32_t
|
|||
SRequestObj *pRequest = pWrapper->pRequest;
|
||||
SQuery *pQuery = pRequest->pQuery;
|
||||
|
||||
qDebug("0x%" PRIx64 " start to semantic analysis,QID:0x%" PRIx64, pRequest->self, pRequest->requestId);
|
||||
qDebug("req:0x%" PRIx64 ", start to semantic analysis, QID:0x%" PRIx64, pRequest->self, pRequest->requestId);
|
||||
|
||||
int64_t analyseStart = taosGetTimestampUs();
|
||||
pRequest->metric.ctgCostUs = analyseStart - pRequest->metric.ctgStart;
|
||||
|
@ -1277,14 +1278,14 @@ void handleQueryAnslyseRes(SSqlCallbackWrapper *pWrapper, SMetaData *pResultMeta
|
|||
pRequest->pQuery = NULL;
|
||||
|
||||
if (NEED_CLIENT_HANDLE_ERROR(code)) {
|
||||
tscDebug("0x%" PRIx64 " client retry to handle the error, code:%d - %s, tryCount:%d,QID:0x%" PRIx64,
|
||||
tscDebug("req:0x%" PRIx64 ", client retry to handle the error, code:%d - %s, tryCount:%d, QID:0x%" PRIx64,
|
||||
pRequest->self, code, tstrerror(code), pRequest->retry, pRequest->requestId);
|
||||
restartAsyncQuery(pRequest, code);
|
||||
return;
|
||||
}
|
||||
|
||||
// return to app directly
|
||||
tscError("0x%" PRIx64 " error occurs, code:%s, return to user app,QID:0x%" PRIx64, pRequest->self, tstrerror(code),
|
||||
tscError("req:0x%" PRIx64 ", error occurs, code:%s, return to user app, QID:0x%" PRIx64, pRequest->self, tstrerror(code),
|
||||
pRequest->requestId);
|
||||
pRequest->code = code;
|
||||
returnToUser(pRequest);
|
||||
|
@ -1334,7 +1335,7 @@ static void doAsyncQueryFromParse(SMetaData *pResultMeta, void *param, int32_t c
|
|||
SQuery *pQuery = pRequest->pQuery;
|
||||
|
||||
pRequest->metric.ctgCostUs += taosGetTimestampUs() - pRequest->metric.ctgStart;
|
||||
qDebug("0x%" PRIx64 " start to continue parse,QID:0x%" PRIx64 ", code:%s", pRequest->self, pRequest->requestId,
|
||||
qDebug("req:0x%" PRIx64 ", start to continue parse, QID:0x%" PRIx64 ", code:%s", pRequest->self, pRequest->requestId,
|
||||
tstrerror(code));
|
||||
|
||||
if (code == TSDB_CODE_SUCCESS) {
|
||||
|
@ -1347,7 +1348,7 @@ static void doAsyncQueryFromParse(SMetaData *pResultMeta, void *param, int32_t c
|
|||
}
|
||||
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
tscError("0x%" PRIx64 " error happens, code:%d - %s,QID:0x%" PRIx64, pWrapper->pRequest->self, code,
|
||||
tscError("req:0x%" PRIx64 ", error happens, code:%d - %s, QID:0x%" PRIx64, pWrapper->pRequest->self, code,
|
||||
tstrerror(code), pWrapper->pRequest->requestId);
|
||||
destorySqlCallbackWrapper(pWrapper);
|
||||
pRequest->pWrapper = NULL;
|
||||
|
@ -1364,7 +1365,7 @@ void continueInsertFromCsv(SSqlCallbackWrapper *pWrapper, SRequestObj *pRequest)
|
|||
}
|
||||
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
tscError("0x%" PRIx64 " error happens, code:%d - %s,QID:0x%" PRIx64, pWrapper->pRequest->self, code,
|
||||
tscError("req:0x%" PRIx64 ", error happens, code:%d - %s, QID:0x%" PRIx64, pWrapper->pRequest->self, code,
|
||||
tstrerror(code), pWrapper->pRequest->requestId);
|
||||
destorySqlCallbackWrapper(pWrapper);
|
||||
pRequest->pWrapper = NULL;
|
||||
|
@ -1470,7 +1471,7 @@ void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) {
|
|||
code = pRequest->prevCode;
|
||||
terrno = code;
|
||||
pRequest->code = code;
|
||||
tscDebug("call sync query cb with code: %s", tstrerror(code));
|
||||
tscDebug("req:0x%" PRIx64 ", call sync query cb with code:%s", pRequest->self, tstrerror(code));
|
||||
doRequestCallback(pRequest, code);
|
||||
return;
|
||||
}
|
||||
|
@ -1485,7 +1486,7 @@ void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) {
|
|||
}
|
||||
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
tscError("0x%" PRIx64 " error happens, code:%d - %s,QID:0x%" PRIx64, pRequest->self, code, tstrerror(code),
|
||||
tscError("req:0x%" PRIx64 ", error happens, code:%d - %s, QID:0x%" PRIx64, pRequest->self, code, tstrerror(code),
|
||||
pRequest->requestId);
|
||||
destorySqlCallbackWrapper(pWrapper);
|
||||
pRequest->pWrapper = NULL;
|
||||
|
@ -1493,11 +1494,11 @@ void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) {
|
|||
pRequest->pQuery = NULL;
|
||||
|
||||
if (NEED_CLIENT_HANDLE_ERROR(code)) {
|
||||
tscDebug("0x%" PRIx64 " client retry to handle the error, code:%d - %s, tryCount:%d,QID:0x%" PRIx64,
|
||||
tscDebug("req:0x%" PRIx64 ", client retry to handle the error, code:%d - %s, tryCount:%d, QID:0x%" PRIx64,
|
||||
pRequest->self, code, tstrerror(code), pRequest->retry, pRequest->requestId);
|
||||
code = refreshMeta(pRequest->pTscObj, pRequest);
|
||||
if (code != 0) {
|
||||
tscWarn("0x%" PRIx64 " refresh meta failed, code:%d - %s,QID:0x%" PRIx64, pRequest->self, code, tstrerror(code),
|
||||
tscWarn("req:0x%" PRIx64 ", refresh meta failed, code:%d - %s, QID:0x%" PRIx64, pRequest->self, code, tstrerror(code),
|
||||
pRequest->requestId);
|
||||
}
|
||||
pRequest->prevCode = code;
|
||||
|
@ -1512,7 +1513,7 @@ void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) {
|
|||
}
|
||||
|
||||
void restartAsyncQuery(SRequestObj *pRequest, int32_t code) {
|
||||
tscInfo("restart request: %s p: %p", pRequest->sqlstr, pRequest);
|
||||
tscInfo("restart request:%s p:%p", pRequest->sqlstr, pRequest);
|
||||
SRequestObj *pUserReq = pRequest;
|
||||
(void)acquireRequest(pRequest->self);
|
||||
while (pUserReq) {
|
||||
|
@ -2183,7 +2184,7 @@ int taos_stmt2_bind_param(TAOS_STMT2 *stmt, TAOS_STMT2_BINDV *bindv, int32_t col
|
|||
|
||||
SSHashObj *hashTbnames = tSimpleHashInit(100, taosGetDefaultHashFunction(TSDB_DATA_TYPE_VARCHAR));
|
||||
if (NULL == hashTbnames) {
|
||||
tscError("stmt2 bind failed: %s", tstrerror(terrno));
|
||||
tscError("stmt2 bind failed, %s", tstrerror(terrno));
|
||||
return terrno;
|
||||
}
|
||||
|
||||
|
@ -2193,7 +2194,7 @@ int taos_stmt2_bind_param(TAOS_STMT2 *stmt, TAOS_STMT2_BINDV *bindv, int32_t col
|
|||
if (pStmt->sql.stbInterlaceMode) {
|
||||
if (tSimpleHashGet(hashTbnames, bindv->tbnames[i], strlen(bindv->tbnames[i])) != NULL) {
|
||||
code = terrno = TSDB_CODE_PAR_TBNAME_DUPLICATED;
|
||||
tscError("stmt2 bind failed: %s %s", tstrerror(terrno), bindv->tbnames[i]);
|
||||
tscError("stmt2 bind failed, %s %s", tstrerror(terrno), bindv->tbnames[i]);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -2259,10 +2260,6 @@ int taos_stmt2_bind_param_a(TAOS_STMT2 *stmt, TAOS_STMT2_BINDV *bindv, int32_t c
|
|||
}
|
||||
|
||||
STscStmt2 *pStmt = (STscStmt2 *)stmt;
|
||||
if (atomic_load_8((int8_t *)&pStmt->asyncBindParam.asyncBindNum) > 0) {
|
||||
tscError("async bind param is still working, please try again later");
|
||||
return TSDB_CODE_TSC_STMT_API_ERROR;
|
||||
}
|
||||
|
||||
ThreadArgs *args = (ThreadArgs *)taosMemoryMalloc(sizeof(ThreadArgs));
|
||||
args->stmt = stmt;
|
||||
|
@ -2270,14 +2267,23 @@ int taos_stmt2_bind_param_a(TAOS_STMT2 *stmt, TAOS_STMT2_BINDV *bindv, int32_t c
|
|||
args->col_idx = col_idx;
|
||||
args->fp = fp;
|
||||
args->param = param;
|
||||
|
||||
(void)taosThreadMutexLock(&(pStmt->asyncBindParam.mutex));
|
||||
if (atomic_load_8((int8_t *)&pStmt->asyncBindParam.asyncBindNum) > 0) {
|
||||
(void)taosThreadMutexUnlock(&(pStmt->asyncBindParam.mutex));
|
||||
tscError("async bind param is still working, please try again later");
|
||||
return TSDB_CODE_TSC_STMT_API_ERROR;
|
||||
}
|
||||
(void)atomic_add_fetch_8(&pStmt->asyncBindParam.asyncBindNum, 1);
|
||||
(void)taosThreadMutexUnlock(&(pStmt->asyncBindParam.mutex));
|
||||
|
||||
int code_s = taosStmt2AsyncBind(stmtAsyncBindThreadFunc, (void *)args);
|
||||
if (code_s != TSDB_CODE_SUCCESS) {
|
||||
(void)taosThreadMutexLock(&(pStmt->asyncBindParam.mutex));
|
||||
(void)taosThreadCondSignal(&(pStmt->asyncBindParam.waitCond));
|
||||
(void)atomic_sub_fetch_8(&pStmt->asyncBindParam.asyncBindNum, 1);
|
||||
(void)taosThreadMutexUnlock(&(pStmt->asyncBindParam.mutex));
|
||||
// terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
tscError("async bind failed, code:%d , %s", code_s, tstrerror(code_s));
|
||||
}
|
||||
|
||||
return code_s;
|
||||
|
|
|
@ -97,7 +97,7 @@ static void monitorFreeSlowLogDataEx(void* paras) {
|
|||
static SAppInstInfo* getAppInstByClusterId(int64_t clusterId) {
|
||||
void* p = taosHashGet(appInfo.pInstMapByClusterId, &clusterId, LONG_BYTES);
|
||||
if (p == NULL) {
|
||||
tscError("failed to get app inst, clusterId:%" PRIx64, clusterId);
|
||||
tscError("failed to get app inst, clusterId:0x%" PRIx64, clusterId);
|
||||
return NULL;
|
||||
}
|
||||
return *(SAppInstInfo**)p;
|
||||
|
@ -114,7 +114,7 @@ static int32_t monitorReportAsyncCB(void* param, SDataBuf* pMsg, int32_t code) {
|
|||
if (param != NULL) {
|
||||
MonitorSlowLogData* p = (MonitorSlowLogData*)param;
|
||||
if (code != 0) {
|
||||
tscError("failed to send slow log:%s, clusterId:%" PRIx64, p->data, p->clusterId);
|
||||
tscError("failed to send slow log:%s, clusterId:0x%" PRIx64, p->data, p->clusterId);
|
||||
}
|
||||
MonitorSlowLogData tmp = {.clusterId = p->clusterId,
|
||||
.type = p->type,
|
||||
|
@ -241,7 +241,7 @@ void monitorCreateClient(int64_t clusterId) {
|
|||
MonitorClient* pMonitor = NULL;
|
||||
taosWLockLatch(&monitorLock);
|
||||
if (taosHashGet(monitorCounterHash, &clusterId, LONG_BYTES) == NULL) {
|
||||
tscInfo("[monitor] monitorCreateClient for %" PRIx64, clusterId);
|
||||
tscInfo("clusterId:0x%" PRIx64 ", create monitor", clusterId);
|
||||
pMonitor = taosMemoryCalloc(1, sizeof(MonitorClient));
|
||||
if (pMonitor == NULL) {
|
||||
tscError("failed to create monitor client");
|
||||
|
@ -293,7 +293,7 @@ void monitorCreateClient(int64_t clusterId) {
|
|||
tscError("failed to start timer");
|
||||
goto fail;
|
||||
}
|
||||
tscInfo("[monitor] monitorCreateClient for %" PRIx64 "finished %p.", clusterId, pMonitor);
|
||||
tscInfo("clusterId:0x%" PRIx64 ", create monitor finished, monitor:%p", clusterId, pMonitor);
|
||||
}
|
||||
taosWUnLockLatch(&monitorLock);
|
||||
|
||||
|
@ -319,7 +319,7 @@ void monitorCreateClientCounter(int64_t clusterId, const char* name, const char*
|
|||
tscError("failed to add metric to collector");
|
||||
int r = taos_counter_destroy(newCounter);
|
||||
if (r) {
|
||||
tscError("failed to destroy counter, code: %d", r);
|
||||
tscError("failed to destroy counter, code:%d", r);
|
||||
}
|
||||
goto end;
|
||||
}
|
||||
|
@ -327,11 +327,11 @@ void monitorCreateClientCounter(int64_t clusterId, const char* name, const char*
|
|||
tscError("failed to put counter to monitor");
|
||||
int r = taos_counter_destroy(newCounter);
|
||||
if (r) {
|
||||
tscError("failed to destroy counter, code: %d", r);
|
||||
tscError("failed to destroy counter, code:%d", r);
|
||||
}
|
||||
goto end;
|
||||
}
|
||||
tscInfo("[monitor] monitorCreateClientCounter %" PRIx64 "(%p):%s : %p.", pMonitor->clusterId, pMonitor, name,
|
||||
tscInfo("clusterId:0x%" PRIx64 ", monitor:%p, create counter:%s %p", pMonitor->clusterId, pMonitor, name,
|
||||
newCounter);
|
||||
|
||||
end:
|
||||
|
@ -347,21 +347,21 @@ void monitorCounterInc(int64_t clusterId, const char* counterName, const char**
|
|||
|
||||
MonitorClient** ppMonitor = (MonitorClient**)taosHashGet(monitorCounterHash, &clusterId, LONG_BYTES);
|
||||
if (ppMonitor == NULL || *ppMonitor == NULL) {
|
||||
tscError("monitorCounterInc not found pMonitor %" PRId64, clusterId);
|
||||
tscError("clusterId:0x%" PRIx64 ", monitor not found", clusterId);
|
||||
goto end;
|
||||
}
|
||||
|
||||
MonitorClient* pMonitor = *ppMonitor;
|
||||
taos_counter_t** ppCounter = (taos_counter_t**)taosHashGet(pMonitor->counters, counterName, strlen(counterName));
|
||||
if (ppCounter == NULL || *ppCounter == NULL) {
|
||||
tscError("monitorCounterInc not found pCounter %" PRIx64 ":%s.", clusterId, counterName);
|
||||
tscError("clusterId:0x%" PRIx64 ", monitor:%p counter:%s not found", clusterId, pMonitor, counterName);
|
||||
goto end;
|
||||
}
|
||||
if (taos_counter_inc(*ppCounter, label_values) != 0) {
|
||||
tscError("monitorCounterInc failed to inc %" PRIx64 ":%s.", clusterId, counterName);
|
||||
tscError("clusterId:0x%" PRIx64 ", monitor:%p counter:%s inc failed", clusterId, pMonitor, counterName);
|
||||
goto end;
|
||||
}
|
||||
tscDebug("[monitor] monitorCounterInc %" PRIx64 "(%p):%s", pMonitor->clusterId, pMonitor, counterName);
|
||||
tscDebug("clusterId:0x%" PRIx64 ", monitor:%p, counter:%s inc", pMonitor->clusterId, pMonitor, counterName);
|
||||
|
||||
end:
|
||||
taosWUnLockLatch(&monitorLock);
|
||||
|
@ -379,11 +379,11 @@ static void monitorWriteSlowLog2File(MonitorSlowLogData* slowLogData, char* tmpP
|
|||
char path[PATH_MAX] = {0};
|
||||
char clusterId[32] = {0};
|
||||
if (snprintf(clusterId, sizeof(clusterId), "%" PRIx64, slowLogData->clusterId) < 0) {
|
||||
tscError("failed to generate clusterId:%" PRIx64, slowLogData->clusterId);
|
||||
tscError("failed to generate clusterId:0x%" PRIx64, slowLogData->clusterId);
|
||||
return;
|
||||
}
|
||||
taosGetTmpfilePath(tmpPath, clusterId, path);
|
||||
tscInfo("[monitor] create slow log file:%s", path);
|
||||
tscInfo("monitor create slow log file:%s", path);
|
||||
pFile = taosOpenFile(path, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_APPEND | TD_FILE_READ | TD_FILE_TRUNC);
|
||||
if (pFile == NULL) {
|
||||
tscError("failed to open file:%s since %d", path, terrno);
|
||||
|
@ -404,7 +404,7 @@ static void monitorWriteSlowLog2File(MonitorSlowLogData* slowLogData, char* tmpP
|
|||
pClient->offset = 0;
|
||||
pClient->pFile = pFile;
|
||||
if (taosHashPut(monitorSlowLogHash, &slowLogData->clusterId, LONG_BYTES, &pClient, POINTER_BYTES) != 0) {
|
||||
tscError("failed to put clusterId:%" PRId64 " to hash table", slowLogData->clusterId);
|
||||
tscError("failed to put clusterId:0x%" PRIx64 " to hash table", slowLogData->clusterId);
|
||||
int32_t ret = taosCloseFile(&pFile);
|
||||
if (ret != 0) {
|
||||
tscError("failed to close file:%p ret:%d", pFile, ret);
|
||||
|
@ -422,19 +422,19 @@ static void monitorWriteSlowLog2File(MonitorSlowLogData* slowLogData, char* tmpP
|
|||
}
|
||||
|
||||
if (taosLSeekFile(pFile, 0, SEEK_END) < 0) {
|
||||
tscError("failed to seek file:%p code: %d", pFile, terrno);
|
||||
tscError("failed to seek file:%p code:%d", pFile, terrno);
|
||||
return;
|
||||
}
|
||||
if (taosWriteFile(pFile, slowLogData->data, strlen(slowLogData->data) + 1) < 0) {
|
||||
tscError("failed to write len to file:%p since %s", pFile, terrstr());
|
||||
}
|
||||
tscDebug("[monitor] write slow log to file:%p, clusterId:%" PRIx64, pFile, slowLogData->clusterId);
|
||||
tscDebug("monitor write slow log to file:%p, clusterId:0x%" PRIx64, pFile, slowLogData->clusterId);
|
||||
}
|
||||
|
||||
static char* readFile(TdFilePtr pFile, int64_t* offset, int64_t size) {
|
||||
tscDebug("[monitor] readFile slow begin pFile:%p, offset:%" PRId64 ", size:%" PRId64, pFile, *offset, size);
|
||||
tscDebug("monitor readFile slow begin pFile:%p, offset:%" PRId64 ", size:%" PRId64, pFile, *offset, size);
|
||||
if (taosLSeekFile(pFile, *offset, SEEK_SET) < 0) {
|
||||
tscError("failed to seek file:%p code: %d", pFile, terrno);
|
||||
tscError("failed to seek file:%p code:%d", pFile, terrno);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -481,7 +481,7 @@ static char* readFile(TdFilePtr pFile, int64_t* offset, int64_t size) {
|
|||
*offset += (len + 1);
|
||||
}
|
||||
|
||||
tscDebug("[monitor] readFile slow log end, data:%s, offset:%" PRId64, pCont, *offset);
|
||||
tscDebug("monitor readFile slow log end, data:%s, offset:%" PRId64, pCont, *offset);
|
||||
return pCont;
|
||||
}
|
||||
|
||||
|
@ -525,7 +525,7 @@ static int32_t monitorReadSend(int64_t clusterId, TdFilePtr pFile, int64_t* offs
|
|||
SLOW_LOG_QUEUE_TYPE type, char* fileName) {
|
||||
SAppInstInfo* pInst = getAppInstByClusterId(clusterId);
|
||||
if (pInst == NULL) {
|
||||
tscError("failed to get app instance by clusterId:%" PRId64, clusterId);
|
||||
tscError("failed to get app instance by clusterId:0x%" PRIx64, clusterId);
|
||||
if (taosCloseFile(&pFile) != 0) {
|
||||
tscError("failed to close file:%p", pFile);
|
||||
}
|
||||
|
@ -546,13 +546,13 @@ static void monitorSendSlowLogAtBeginning(int64_t clusterId, char** fileName, Td
|
|||
int64_t size = getFileSize(*fileName);
|
||||
if (size <= offset) {
|
||||
processFileInTheEnd(pFile, *fileName);
|
||||
tscDebug("[monitor] monitorSendSlowLogAtBeginning delete file:%s", *fileName);
|
||||
tscDebug("monitor delete file:%s", *fileName);
|
||||
} else {
|
||||
int32_t code = monitorReadSend(clusterId, pFile, &offset, size, SLOW_LOG_READ_BEGINNIG, *fileName);
|
||||
if (code == 0) {
|
||||
tscDebug("[monitor] monitorSendSlowLogAtBeginning send slow log succ, clusterId:%" PRId64, clusterId);
|
||||
tscDebug("monitor send slow log succ, clusterId:0x%" PRIx64, clusterId);
|
||||
} else {
|
||||
tscError("[monitor] monitorSendSlowLogAtBeginning send slow log failed, clusterId:%" PRId64 ",ret:%d", clusterId,
|
||||
tscError("monitor send slow log failed, clusterId:0x%" PRIx64 ", ret:%d", clusterId,
|
||||
code);
|
||||
}
|
||||
*fileName = NULL;
|
||||
|
@ -562,24 +562,24 @@ static void monitorSendSlowLogAtBeginning(int64_t clusterId, char** fileName, Td
|
|||
static void monitorSendSlowLogAtRunning(int64_t clusterId) {
|
||||
void* tmp = taosHashGet(monitorSlowLogHash, &clusterId, LONG_BYTES);
|
||||
if (tmp == NULL) {
|
||||
tscError("failed to get slow log client by clusterId:%" PRId64, clusterId);
|
||||
tscError("failed to get slow log client by clusterId:0x%" PRIx64, clusterId);
|
||||
return;
|
||||
}
|
||||
SlowLogClient* pClient = (*(SlowLogClient**)tmp);
|
||||
if (pClient == NULL) {
|
||||
tscError("failed to get slow log client by clusterId:%" PRId64, clusterId);
|
||||
tscError("failed to get slow log client by clusterId:0x%" PRIx64, clusterId);
|
||||
return;
|
||||
}
|
||||
int64_t size = getFileSize(pClient->path);
|
||||
if (size <= pClient->offset) {
|
||||
if (taosFtruncateFile(pClient->pFile, 0) < 0) {
|
||||
tscError("failed to truncate file:%p code: %d", pClient->pFile, terrno);
|
||||
tscError("failed to truncate file:%p code:%d", pClient->pFile, terrno);
|
||||
}
|
||||
tscDebug("[monitor] monitorSendSlowLogAtRunning truncate file to 0 file:%p", pClient->pFile);
|
||||
tscDebug("monitor truncate file to 0 file:%p", pClient->pFile);
|
||||
pClient->offset = 0;
|
||||
} else {
|
||||
int32_t code = monitorReadSend(clusterId, pClient->pFile, &pClient->offset, size, SLOW_LOG_READ_RUNNING, NULL);
|
||||
tscDebug("[monitor] monitorSendSlowLogAtRunning send slow log clusterId:%" PRId64 ",ret:%d", clusterId, code);
|
||||
tscDebug("monitor send slow log clusterId:0x%" PRIx64 ", ret:%d", clusterId, code);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -596,13 +596,13 @@ static bool monitorSendSlowLogAtQuit(int64_t clusterId) {
|
|||
if (size <= pClient->offset) {
|
||||
processFileInTheEnd(pClient->pFile, pClient->path);
|
||||
pClient->pFile = NULL;
|
||||
tscInfo("[monitor] monitorSendSlowLogAtQuit remove file:%s", pClient->path);
|
||||
tscInfo("monitor remove file:%s", pClient->path);
|
||||
if ((--quitCnt) == 0) {
|
||||
return true;
|
||||
}
|
||||
} else {
|
||||
int32_t code = monitorReadSend(clusterId, pClient->pFile, &pClient->offset, size, SLOW_LOG_READ_QUIT, NULL);
|
||||
tscDebug("[monitor] monitorSendSlowLogAtQuit send slow log clusterId:%" PRId64 ",ret:%d", clusterId, code);
|
||||
tscDebug("monitor send slow log clusterId:0x%" PRIx64 ", ret:%d", clusterId, code);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
@ -620,7 +620,7 @@ static void monitorSendAllSlowLogAtQuit() {
|
|||
} else if (pClient->offset == 0) {
|
||||
int64_t* clusterId = (int64_t*)taosHashGetKey(pIter, NULL);
|
||||
int32_t code = monitorReadSend(*clusterId, pClient->pFile, &pClient->offset, size, SLOW_LOG_READ_QUIT, NULL);
|
||||
tscDebug("[monitor] monitorSendAllSlowLogAtQuit send slow log clusterId:%" PRId64 ",ret:%d", *clusterId, code);
|
||||
tscDebug("monitor send slow log clusterId:0x%" PRIx64 ", ret:%d", *clusterId, code);
|
||||
if (code == 0) {
|
||||
quitCnt++;
|
||||
}
|
||||
|
@ -669,7 +669,7 @@ static void monitorSendAllSlowLog() {
|
|||
int64_t size = getFileSize(pClient->path);
|
||||
if (size <= 0) {
|
||||
if (size < 0) {
|
||||
tscError("[monitor] monitorSendAllSlowLog failed to get file size:%s, err:%d", pClient->path, errno);
|
||||
tscError("monitor failed to get file size:%s, err:%d", pClient->path, errno);
|
||||
if (errno == ENOENT) {
|
||||
processFileRemoved(pClient);
|
||||
}
|
||||
|
@ -677,7 +677,7 @@ static void monitorSendAllSlowLog() {
|
|||
continue;
|
||||
}
|
||||
int32_t code = monitorReadSend(*clusterId, pClient->pFile, &pClient->offset, size, SLOW_LOG_READ_RUNNING, NULL);
|
||||
tscDebug("[monitor] monitorSendAllSlowLog send slow log clusterId:%" PRId64 ",ret:%d", *clusterId, code);
|
||||
tscDebug("monitor send slow log clusterId:0x%" PRIx64 ", ret:%d", *clusterId, code);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -686,7 +686,7 @@ static void monitorSendAllSlowLogFromTempDir(int64_t clusterId) {
|
|||
SAppInstInfo* pInst = getAppInstByClusterId((int64_t)clusterId);
|
||||
|
||||
if (pInst == NULL || !pInst->serverCfg.monitorParas.tsEnableMonitor) {
|
||||
tscInfo("[monitor] monitor is disabled, skip send slow log");
|
||||
tscInfo("monitor is disabled, skip send slow log");
|
||||
return;
|
||||
}
|
||||
char namePrefix[PATH_MAX] = {0};
|
||||
|
@ -755,7 +755,7 @@ static void monitorSendAllSlowLogFromTempDir(int64_t clusterId) {
|
|||
|
||||
static void* monitorThreadFunc(void* param) {
|
||||
setThreadName("client-monitor-slowlog");
|
||||
tscDebug("monitorThreadFunc start");
|
||||
tscInfo("monitor update thread started");
|
||||
int64_t quitTime = 0;
|
||||
while (1) {
|
||||
if (atomic_load_32(&monitorFlag) == 1) {
|
||||
|
@ -837,7 +837,7 @@ static void tscMonitorStop() {
|
|||
int32_t monitorInit() {
|
||||
int32_t code = 0;
|
||||
|
||||
tscInfo("[monitor] tscMonitor init");
|
||||
tscInfo("monitor init");
|
||||
monitorCounterHash =
|
||||
(SHashObj*)taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_ENTRY_LOCK);
|
||||
if (monitorCounterHash == NULL) {
|
||||
|
@ -887,7 +887,7 @@ int32_t monitorInit() {
|
|||
}
|
||||
|
||||
void monitorClose() {
|
||||
tscInfo("[monitor] tscMonitor close");
|
||||
tscInfo("monitor close");
|
||||
taosWLockLatch(&monitorLock);
|
||||
atomic_store_32(&monitorFlag, 1);
|
||||
tscMonitorStop();
|
||||
|
@ -907,17 +907,17 @@ int32_t monitorPutData2MonitorQueue(MonitorSlowLogData data) {
|
|||
MonitorSlowLogData* slowLogData = NULL;
|
||||
|
||||
if (atomic_load_32(&monitorFlag) == 1) {
|
||||
tscError("[monitor] slow log thread is exiting");
|
||||
tscError("monitor slow log thread is exiting");
|
||||
return -1;
|
||||
}
|
||||
|
||||
code = taosAllocateQitem(sizeof(MonitorSlowLogData), DEF_QITEM, 0, (void**)&slowLogData);
|
||||
if (code) {
|
||||
tscError("[monitor] failed to allocate slow log data");
|
||||
tscError("monitor failed to allocate slow log data");
|
||||
return code;
|
||||
}
|
||||
*slowLogData = data;
|
||||
tscDebug("[monitor] write slow log to queue, clusterId:%" PRIx64 " type:%s, data:%s", slowLogData->clusterId,
|
||||
tscDebug("monitor write slow log to queue, clusterId:0x%" PRIx64 " type:%s, data:%s", slowLogData->clusterId,
|
||||
queueTypeStr[slowLogData->type], slowLogData->data);
|
||||
if (taosWriteQitem(monitorQueue, slowLogData) == 0) {
|
||||
if (tsem2_post(&monitorSem) != 0) {
|
||||
|
@ -943,7 +943,7 @@ int32_t reportCB(void* param, SDataBuf* pMsg, int32_t code) {
|
|||
int32_t senAuditInfo(STscObj* pTscObj, void* pReq, int32_t len, uint64_t requestId) {
|
||||
SMsgSendInfo* sendInfo = taosMemoryCalloc(1, sizeof(SMsgSendInfo));
|
||||
if (sendInfo == NULL) {
|
||||
tscError("[del report]failed to allocate memory for sendInfo");
|
||||
tscError("[del report] failed to allocate memory for sendInfo");
|
||||
return terrno;
|
||||
}
|
||||
|
||||
|
@ -959,7 +959,7 @@ int32_t senAuditInfo(STscObj* pTscObj, void* pReq, int32_t len, uint64_t request
|
|||
|
||||
int32_t code = asyncSendMsgToServer(pTscObj->pAppInfo->pTransporter, &epSet, NULL, sendInfo);
|
||||
if (code != 0) {
|
||||
tscError("[del report]failed to send msg to server, code:%d", code);
|
||||
tscError("[del report] failed to send msg to server, code:%d", code);
|
||||
taosMemoryFree(sendInfo);
|
||||
return code;
|
||||
}
|
||||
|
@ -971,22 +971,22 @@ static void reportDeleteSql(SRequestObj* pRequest) {
|
|||
STscObj* pTscObj = pRequest->pTscObj;
|
||||
|
||||
if (pTscObj == NULL || pTscObj->pAppInfo == NULL) {
|
||||
tscError("[del report]invalid tsc obj");
|
||||
tscError("[del report] invalid tsc obj");
|
||||
return;
|
||||
}
|
||||
|
||||
if(pTscObj->pAppInfo->serverCfg.enableAuditDelete == 0) {
|
||||
tscDebug("[del report]audit delete is disabled");
|
||||
tscDebug("[del report] audit delete is disabled");
|
||||
return;
|
||||
}
|
||||
|
||||
if (pRequest->code != TSDB_CODE_SUCCESS) {
|
||||
tscDebug("[del report]delete request result code:%d", pRequest->code);
|
||||
tscDebug("[del report] delete request result code:%d", pRequest->code);
|
||||
return;
|
||||
}
|
||||
|
||||
if (nodeType(pStmt->pFromTable) != QUERY_NODE_REAL_TABLE) {
|
||||
tscError("[del report]invalid from table node type:%d", nodeType(pStmt->pFromTable));
|
||||
tscError("[del report] invalid from table node type:%d", nodeType(pStmt->pFromTable));
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1000,28 +1000,28 @@ static void reportDeleteSql(SRequestObj* pRequest) {
|
|||
int32_t tlen = tSerializeSAuditReq(NULL, 0, &req);
|
||||
void* pReq = taosMemoryCalloc(1, tlen);
|
||||
if (pReq == NULL) {
|
||||
tscError("[del report]failed to allocate memory for req");
|
||||
tscError("[del report] failed to allocate memory for req");
|
||||
return;
|
||||
}
|
||||
|
||||
if (tSerializeSAuditReq(pReq, tlen, &req) < 0) {
|
||||
tscError("[del report]failed to serialize req");
|
||||
tscError("[del report] failed to serialize req");
|
||||
taosMemoryFree(pReq);
|
||||
return;
|
||||
}
|
||||
|
||||
int32_t code = senAuditInfo(pRequest->pTscObj, pReq, tlen, pRequest->requestId);
|
||||
if (code != 0) {
|
||||
tscError("[del report]failed to send audit info, code:%d", code);
|
||||
tscError("[del report] failed to send audit info, code:%d", code);
|
||||
taosMemoryFree(pReq);
|
||||
return;
|
||||
}
|
||||
tscDebug("[del report]delete data, sql:%s", req.pSql);
|
||||
tscDebug("[del report] delete data, sql:%s", req.pSql);
|
||||
}
|
||||
|
||||
void clientOperateReport(SRequestObj* pRequest) {
|
||||
if (pRequest == NULL || pRequest->pQuery == NULL || pRequest->pQuery->pRoot == NULL) {
|
||||
tscError("[del report]invalid request");
|
||||
tscError("[del report] invalid request");
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -81,7 +81,7 @@ int32_t processConnectRsp(void* param, SDataBuf* pMsg, int32_t code) {
|
|||
}
|
||||
|
||||
if ((code = taosCheckVersionCompatibleFromStr(td_version, connectRsp.sVer, 3)) != 0) {
|
||||
tscError("version not compatible. client version: %s, server version: %s", td_version, connectRsp.sVer);
|
||||
tscError("version not compatible. client version:%s, server version:%s", td_version, connectRsp.sVer);
|
||||
goto End;
|
||||
}
|
||||
|
||||
|
@ -123,7 +123,7 @@ int32_t processConnectRsp(void* param, SDataBuf* pMsg, int32_t code) {
|
|||
}
|
||||
|
||||
for (int32_t i = 0; i < connectRsp.epSet.numOfEps; ++i) {
|
||||
tscDebug("0x%" PRIx64 " epSet.fqdn[%d]:%s port:%d, connObj:0x%" PRIx64, pRequest->requestId, i,
|
||||
tscDebug("QID:0x%" PRIx64 ", epSet.fqdn[%d]:%s port:%d, connObj:0x%" PRIx64, pRequest->requestId, i,
|
||||
connectRsp.epSet.eps[i].fqdn, connectRsp.epSet.eps[i].port, pTscObj->id);
|
||||
}
|
||||
|
||||
|
@ -137,7 +137,7 @@ int32_t processConnectRsp(void* param, SDataBuf* pMsg, int32_t code) {
|
|||
pTscObj->pAppInfo->clusterId = connectRsp.clusterId;
|
||||
pTscObj->pAppInfo->serverCfg.monitorParas = connectRsp.monitorParas;
|
||||
pTscObj->pAppInfo->serverCfg.enableAuditDelete = connectRsp.enableAuditDelete;
|
||||
tscDebug("[monitor] paras from connect rsp, clusterId:%" PRIx64 " monitorParas threshold:%d scope:%d",
|
||||
tscDebug("monitor paras from connect rsp, clusterId:0x%" PRIx64 ", threshold:%d scope:%d",
|
||||
connectRsp.clusterId, connectRsp.monitorParas.tsSlowLogThreshold, connectRsp.monitorParas.tsSlowLogScope);
|
||||
lastClusterId = connectRsp.clusterId;
|
||||
|
||||
|
@ -164,7 +164,7 @@ int32_t processConnectRsp(void* param, SDataBuf* pMsg, int32_t code) {
|
|||
SAppHbMgr* pAppHbMgr = taosArrayGetP(clientHbMgr.appHbMgrs, pTscObj->appHbMgrIdx);
|
||||
if (pAppHbMgr) {
|
||||
if (hbRegisterConn(pAppHbMgr, pTscObj->id, connectRsp.clusterId, connectRsp.connType) != 0) {
|
||||
tscError("0x%" PRIx64 " failed to register conn to hbMgr", pRequest->requestId);
|
||||
tscError("QID:0x%" PRIx64 ", failed to register conn to hbMgr", pRequest->requestId);
|
||||
}
|
||||
} else {
|
||||
(void)taosThreadMutexUnlock(&clientHbMgr.lock);
|
||||
|
@ -173,7 +173,7 @@ int32_t processConnectRsp(void* param, SDataBuf* pMsg, int32_t code) {
|
|||
}
|
||||
(void)taosThreadMutexUnlock(&clientHbMgr.lock);
|
||||
|
||||
tscDebug("0x%" PRIx64 " clusterId:%" PRId64 ", totalConn:%" PRId64, pRequest->requestId, connectRsp.clusterId,
|
||||
tscDebug("QID:0x%" PRIx64 ", clusterId:0x%" PRIx64 ", totalConn:%" PRId64, pRequest->requestId, connectRsp.clusterId,
|
||||
pTscObj->pAppInfo->numOfConns);
|
||||
|
||||
End:
|
||||
|
@ -229,11 +229,11 @@ int32_t processCreateDbRsp(void* param, SDataBuf* pMsg, int32_t code) {
|
|||
char dbFName[TSDB_DB_FNAME_LEN];
|
||||
(void)snprintf(dbFName, sizeof(dbFName) - 1, "%d.%s", pTscObj->acctId, TSDB_INFORMATION_SCHEMA_DB);
|
||||
if (catalogRefreshDBVgInfo(pCatalog, &conn, dbFName) != 0) {
|
||||
tscError("0x%" PRIx64 " failed to refresh db vg info", pRequest->requestId);
|
||||
tscError("QID:0x%" PRIx64 ", failed to refresh db vg info", pRequest->requestId);
|
||||
}
|
||||
(void)snprintf(dbFName, sizeof(dbFName) - 1, "%d.%s", pTscObj->acctId, TSDB_PERFORMANCE_SCHEMA_DB);
|
||||
if (catalogRefreshDBVgInfo(pCatalog, &conn, dbFName) != 0) {
|
||||
tscError("0x%" PRIx64 " failed to refresh db vg info", pRequest->requestId);
|
||||
tscError("QID:0x%" PRIx64 ", failed to refresh db vg info", pRequest->requestId);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -254,7 +254,7 @@ int32_t processUseDbRsp(void* param, SDataBuf* pMsg, int32_t code) {
|
|||
TSDB_CODE_MND_DB_IN_DROPPING == code) {
|
||||
SUseDbRsp usedbRsp = {0};
|
||||
if (tDeserializeSUseDbRsp(pMsg->pData, pMsg->len, &usedbRsp) != 0) {
|
||||
tscError("0x%" PRIx64 " deserialize SUseDbRsp failed", pRequest->requestId);
|
||||
tscError("QID:0x%" PRIx64 ", deserialize SUseDbRsp failed", pRequest->requestId);
|
||||
}
|
||||
struct SCatalog* pCatalog = NULL;
|
||||
|
||||
|
@ -262,11 +262,11 @@ int32_t processUseDbRsp(void* param, SDataBuf* pMsg, int32_t code) {
|
|||
int64_t clusterId = pRequest->pTscObj->pAppInfo->clusterId;
|
||||
int32_t code1 = catalogGetHandle(clusterId, &pCatalog);
|
||||
if (code1 != TSDB_CODE_SUCCESS) {
|
||||
tscWarn("0x%" PRIx64 "catalogGetHandle failed, clusterId:%" PRIx64 ", error:%s", pRequest->requestId, clusterId,
|
||||
tscWarn("QID:0x%" PRIx64 ", catalogGetHandle failed, clusterId:0x%" PRIx64 ", error:%s", pRequest->requestId, clusterId,
|
||||
tstrerror(code1));
|
||||
} else {
|
||||
if (catalogRemoveDB(pCatalog, usedbRsp.db, usedbRsp.uid) != 0) {
|
||||
tscError("0x%" PRIx64 "catalogRemoveDB failed, db:%s, uid:%" PRId64, pRequest->requestId, usedbRsp.db,
|
||||
tscError("QID:0x%" PRIx64 ", catalogRemoveDB failed, db:%s, uid:%" PRId64, pRequest->requestId, usedbRsp.db,
|
||||
usedbRsp.uid);
|
||||
}
|
||||
}
|
||||
|
@ -293,7 +293,7 @@ int32_t processUseDbRsp(void* param, SDataBuf* pMsg, int32_t code) {
|
|||
|
||||
SUseDbRsp usedbRsp = {0};
|
||||
if (tDeserializeSUseDbRsp(pMsg->pData, pMsg->len, &usedbRsp) != 0) {
|
||||
tscError("0x%" PRIx64 " deserialize SUseDbRsp failed", pRequest->requestId);
|
||||
tscError("QID:0x%" PRIx64 ", deserialize SUseDbRsp failed", pRequest->requestId);
|
||||
}
|
||||
|
||||
if (strlen(usedbRsp.db) == 0) {
|
||||
|
@ -321,7 +321,7 @@ int32_t processUseDbRsp(void* param, SDataBuf* pMsg, int32_t code) {
|
|||
|
||||
SName name = {0};
|
||||
if (tNameFromString(&name, usedbRsp.db, T_NAME_ACCT | T_NAME_DB) != TSDB_CODE_SUCCESS) {
|
||||
tscError("0x%" PRIx64 " failed to parse db name:%s", pRequest->requestId, usedbRsp.db);
|
||||
tscError("QID:0x%" PRIx64 ", failed to parse db name:%s", pRequest->requestId, usedbRsp.db);
|
||||
}
|
||||
|
||||
SUseDbOutput output = {0};
|
||||
|
@ -330,17 +330,17 @@ int32_t processUseDbRsp(void* param, SDataBuf* pMsg, int32_t code) {
|
|||
terrno = code;
|
||||
if (output.dbVgroup) taosHashCleanup(output.dbVgroup->vgHash);
|
||||
|
||||
tscError("0x%" PRIx64 " failed to build use db output since %s", pRequest->requestId, terrstr());
|
||||
tscError("QID:0x%" PRIx64 ", failed to build use db output since %s", pRequest->requestId, terrstr());
|
||||
} else if (output.dbVgroup && output.dbVgroup->vgHash) {
|
||||
struct SCatalog* pCatalog = NULL;
|
||||
|
||||
int32_t code1 = catalogGetHandle(pRequest->pTscObj->pAppInfo->clusterId, &pCatalog);
|
||||
if (code1 != TSDB_CODE_SUCCESS) {
|
||||
tscWarn("catalogGetHandle failed, clusterId:%" PRIx64 ", error:%s", pRequest->pTscObj->pAppInfo->clusterId,
|
||||
tscWarn("catalogGetHandle failed, clusterId:0x%" PRIx64 ", error:%s", pRequest->pTscObj->pAppInfo->clusterId,
|
||||
tstrerror(code1));
|
||||
} else {
|
||||
if (catalogUpdateDBVgInfo(pCatalog, output.db, output.dbId, output.dbVgroup) != 0) {
|
||||
tscError("0x%" PRIx64 " failed to update db vg info, db:%s, dbId:%" PRId64, pRequest->requestId, output.db,
|
||||
tscError("QID:0x%" PRIx64 ", failed to update db vg info, db:%s, dbId:%" PRId64, pRequest->requestId, output.db,
|
||||
output.dbId);
|
||||
}
|
||||
output.dbVgroup = NULL;
|
||||
|
@ -352,7 +352,7 @@ int32_t processUseDbRsp(void* param, SDataBuf* pMsg, int32_t code) {
|
|||
|
||||
char db[TSDB_DB_NAME_LEN] = {0};
|
||||
if (tNameGetDbName(&name, db) != TSDB_CODE_SUCCESS) {
|
||||
tscError("0x%" PRIx64 " failed to get db name since %s", pRequest->requestId, tstrerror(code));
|
||||
tscError("QID:0x%" PRIx64 ", failed to get db name since %s", pRequest->requestId, tstrerror(code));
|
||||
}
|
||||
|
||||
setConnectionDB(pRequest->pTscObj, db);
|
||||
|
@ -436,13 +436,13 @@ int32_t processDropDbRsp(void* param, SDataBuf* pMsg, int32_t code) {
|
|||
} else {
|
||||
SDropDbRsp dropdbRsp = {0};
|
||||
if (tDeserializeSDropDbRsp(pMsg->pData, pMsg->len, &dropdbRsp) != 0) {
|
||||
tscError("0x%" PRIx64 " deserialize SDropDbRsp failed", pRequest->requestId);
|
||||
tscError("QID:0x%" PRIx64 ", deserialize SDropDbRsp failed", pRequest->requestId);
|
||||
}
|
||||
struct SCatalog* pCatalog = NULL;
|
||||
code = catalogGetHandle(pRequest->pTscObj->pAppInfo->clusterId, &pCatalog);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
if (catalogRemoveDB(pCatalog, dropdbRsp.db, dropdbRsp.uid) != 0) {
|
||||
tscError("0x%" PRIx64 " failed to remove db:%s", pRequest->requestId, dropdbRsp.db);
|
||||
tscError("QID:0x%" PRIx64 ", failed to remove db:%s", pRequest->requestId, dropdbRsp.db);
|
||||
}
|
||||
STscObj* pTscObj = pRequest->pTscObj;
|
||||
|
||||
|
@ -453,11 +453,11 @@ int32_t processDropDbRsp(void* param, SDataBuf* pMsg, int32_t code) {
|
|||
char dbFName[TSDB_DB_FNAME_LEN] = {0};
|
||||
(void)snprintf(dbFName, sizeof(dbFName) - 1, "%d.%s", pTscObj->acctId, TSDB_INFORMATION_SCHEMA_DB);
|
||||
if (catalogRefreshDBVgInfo(pCatalog, &conn, dbFName) != TSDB_CODE_SUCCESS) {
|
||||
tscError("0x%" PRIx64 " failed to refresh db vg info, db:%s", pRequest->requestId, dbFName);
|
||||
tscError("QID:0x%" PRIx64 ", failed to refresh db vg info, db:%s", pRequest->requestId, dbFName);
|
||||
}
|
||||
(void)snprintf(dbFName, sizeof(dbFName) - 1, "%d.%s", pTscObj->acctId, TSDB_PERFORMANCE_SCHEMA_DB);
|
||||
if (catalogRefreshDBVgInfo(pCatalog, &conn, dbFName) != 0) {
|
||||
tscError("0x%" PRIx64 " failed to refresh db vg info, db:%s", pRequest->requestId, dbFName);
|
||||
tscError("QID:0x%" PRIx64 ", failed to refresh db vg info, db:%s", pRequest->requestId, dbFName);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -47,7 +47,7 @@
|
|||
} \
|
||||
} while (0)
|
||||
|
||||
#define LOG_ID_TAG "connId:0x%" PRIx64 ",QID:0x%" PRIx64
|
||||
#define LOG_ID_TAG "connId:0x%" PRIx64 ", QID:0x%" PRIx64
|
||||
#define LOG_ID_VALUE *(int64_t*)taos, pRequest->requestId
|
||||
|
||||
#define TMQ_META_VERSION "1.0"
|
||||
|
|
|
@ -410,7 +410,7 @@ END:
|
|||
int32_t smlParseEndTelnetJsonFormat(SSmlHandle *info, SSmlLineInfo *elements, SSmlKv *kvTs, SSmlKv *kv) {
|
||||
int32_t code = 0;
|
||||
int32_t lino = 0;
|
||||
uDebug("SML:0x%" PRIx64 " %s format true, ts:%" PRId64, info->id, __FUNCTION__ , kvTs->i);
|
||||
uDebug("SML:0x%" PRIx64 ", %s format true, ts:%" PRId64, info->id, __FUNCTION__ , kvTs->i);
|
||||
SML_CHECK_CODE(smlBuildCol(info->currTableDataCtx, info->currSTableMeta->schema, kvTs, 0, info->taos->optionInfo.charsetCxt));
|
||||
SML_CHECK_CODE(smlBuildCol(info->currTableDataCtx, info->currSTableMeta->schema, kv, 1, info->taos->optionInfo.charsetCxt));
|
||||
SML_CHECK_CODE(smlBuildRow(info->currTableDataCtx));
|
||||
|
@ -423,7 +423,7 @@ END:
|
|||
int32_t smlParseEndTelnetJsonUnFormat(SSmlHandle *info, SSmlLineInfo *elements, SSmlKv *kvTs, SSmlKv *kv) {
|
||||
int32_t code = 0;
|
||||
int32_t lino = 0;
|
||||
uDebug("SML:0x%" PRIx64 " %s format false, ts:%" PRId64, info->id, __FUNCTION__, kvTs->i);
|
||||
uDebug("SML:0x%" PRIx64 ", %s format false, ts:%" PRId64, info->id, __FUNCTION__, kvTs->i);
|
||||
if (elements->colArray == NULL) {
|
||||
elements->colArray = taosArrayInit(16, sizeof(SSmlKv));
|
||||
SML_CHECK_NULL(elements->colArray);
|
||||
|
@ -437,7 +437,7 @@ END:
|
|||
|
||||
int32_t smlParseEndLine(SSmlHandle *info, SSmlLineInfo *elements, SSmlKv *kvTs) {
|
||||
if (info->dataFormat) {
|
||||
uDebug("SML:0x%" PRIx64 " %s format true, ts:%" PRId64, info->id, __FUNCTION__, kvTs->i);
|
||||
uDebug("SML:0x%" PRIx64 ", %s format true, ts:%" PRId64, info->id, __FUNCTION__, kvTs->i);
|
||||
int32_t ret = smlBuildCol(info->currTableDataCtx, info->currSTableMeta->schema, kvTs, 0, info->taos->optionInfo.charsetCxt);
|
||||
if (ret == TSDB_CODE_SUCCESS) {
|
||||
ret = smlBuildRow(info->currTableDataCtx);
|
||||
|
@ -446,11 +446,11 @@ int32_t smlParseEndLine(SSmlHandle *info, SSmlLineInfo *elements, SSmlKv *kvTs)
|
|||
clearColValArraySml(info->currTableDataCtx->pValues);
|
||||
taosArrayClearP(info->escapedStringList, NULL);
|
||||
if (unlikely(ret != TSDB_CODE_SUCCESS)) {
|
||||
uError("SML:0x%" PRIx64 " %s smlBuildCol error:%d", info->id, __FUNCTION__, ret);
|
||||
uError("SML:0x%" PRIx64 ", %s smlBuildCol error:%d", info->id, __FUNCTION__, ret);
|
||||
return ret;
|
||||
}
|
||||
} else {
|
||||
uDebug("SML:0x%" PRIx64 " %s format false, ts:%" PRId64, info->id, __FUNCTION__, kvTs->i);
|
||||
uDebug("SML:0x%" PRIx64 ", %s format false, ts:%" PRId64, info->id, __FUNCTION__, kvTs->i);
|
||||
taosArraySet(elements->colArray, 0, kvTs);
|
||||
}
|
||||
info->preLine = *elements;
|
||||
|
@ -697,7 +697,7 @@ static int32_t smlGenerateSchemaAction(SSchema *colField, SHashObj *colHash, SSm
|
|||
uint16_t *index = colHash ? (uint16_t *)taosHashGet(colHash, kv->key, kv->keyLen) : NULL;
|
||||
if (index) {
|
||||
if (colField[*index].type != kv->type) {
|
||||
snprintf(info->msgBuf.buf, info->msgBuf.len, "SML:0x%" PRIx64 " %s point type and db type mismatch. db type: %s, point type: %s, key: %s",
|
||||
snprintf(info->msgBuf.buf, info->msgBuf.len, "SML:0x%" PRIx64 ", %s point type and db type mismatch, db type:%s, point type:%s, key:%s",
|
||||
info->id, __FUNCTION__, tDataTypes[colField[*index].type].name, tDataTypes[kv->type].name, kv->key);
|
||||
uError("%s", info->msgBuf.buf);
|
||||
return TSDB_CODE_SML_INVALID_DATA;
|
||||
|
@ -954,7 +954,7 @@ static int32_t smlCreateTable(SSmlHandle *info, SRequestConnInfo *conn, SSmlSTab
|
|||
SArray *pColumns = NULL;
|
||||
SArray *pTags = NULL;
|
||||
SML_CHECK_CODE(smlCheckAuth(info, conn, NULL, AUTH_TYPE_WRITE));
|
||||
uDebug("SML:0x%" PRIx64 " %s create table:%s", info->id, __FUNCTION__, pName->tname);
|
||||
uDebug("SML:0x%" PRIx64 ", %s create table:%s", info->id, __FUNCTION__, pName->tname);
|
||||
pColumns = taosArrayInit(taosArrayGetSize(sTableData->cols), sizeof(SField));
|
||||
SML_CHECK_NULL(pColumns);
|
||||
pTags = taosArrayInit(taosArrayGetSize(sTableData->tags), sizeof(SField));
|
||||
|
@ -1005,7 +1005,7 @@ static int32_t smlModifyTag(SSmlHandle *info, SHashObj* hashTmpCheck, SHashObj*
|
|||
|
||||
if (action != SCHEMA_ACTION_NULL) {
|
||||
SML_CHECK_CODE(smlCheckAuth(info, conn, pName->tname, AUTH_TYPE_WRITE));
|
||||
uDebug("SML:0x%" PRIx64 " %s change table tag, table:%s, action:%d", info->id, __FUNCTION__, pName->tname,
|
||||
uDebug("SML:0x%" PRIx64 ", %s change table tag, table:%s, action:%d", info->id, __FUNCTION__, pName->tname,
|
||||
action);
|
||||
SML_CHECK_CODE(smlBuildFields(&pColumns, &pTags, *pTableMeta, sTableData));
|
||||
SML_CHECK_CODE(smlBuildFieldsList(info, (*pTableMeta)->schema, hashTmp, sTableData->tags, pTags,
|
||||
|
@ -1036,7 +1036,7 @@ static int32_t smlModifyCols(SSmlHandle *info, SHashObj* hashTmpCheck, SHashObj*
|
|||
|
||||
if (action != SCHEMA_ACTION_NULL) {
|
||||
SML_CHECK_CODE(smlCheckAuth(info, conn, pName->tname, AUTH_TYPE_WRITE));
|
||||
uDebug("SML:0x%" PRIx64 " %s change table col, table:%s, action:%d", info->id, __FUNCTION__, pName->tname,
|
||||
uDebug("SML:0x%" PRIx64 ", %s change table col, table:%s, action:%d", info->id, __FUNCTION__, pName->tname,
|
||||
action);
|
||||
SML_CHECK_CODE(smlBuildFields(&pColumns, &pTags, *pTableMeta, sTableData));
|
||||
SML_CHECK_CODE(smlBuildFieldsList(info, (*pTableMeta)->schema, hashTmp, sTableData->cols, pColumns,
|
||||
|
@ -1068,7 +1068,7 @@ END:
|
|||
}
|
||||
|
||||
static int32_t smlModifyDBSchemas(SSmlHandle *info) {
|
||||
uDebug("SML:0x%" PRIx64 " %s start, format:%d, needModifySchema:%d", info->id, __FUNCTION__, info->dataFormat,
|
||||
uDebug("SML:0x%" PRIx64 ", %s start, format:%d, needModifySchema:%d", info->id, __FUNCTION__, info->dataFormat,
|
||||
info->needModifySchema);
|
||||
if (info->dataFormat && !info->needModifySchema) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -1132,7 +1132,7 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) {
|
|||
colHashTmp = NULL;
|
||||
tagHashTmp = NULL;
|
||||
} else {
|
||||
uError("SML:0x%" PRIx64 " %s load table meta error: %s", info->id, __FUNCTION__, tstrerror(code));
|
||||
uError("SML:0x%" PRIx64 ", %s load table meta error:%s", info->id, __FUNCTION__, tstrerror(code));
|
||||
goto END;
|
||||
}
|
||||
|
||||
|
@ -1143,11 +1143,11 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) {
|
|||
|
||||
taosMemoryFreeClear(sTableData->tableMeta);
|
||||
sTableData->tableMeta = pTableMeta;
|
||||
uDebug("SML:0x%" PRIx64 " %s modify schema uid:%" PRIu64 ", sversion:%d, tversion:%d", info->id, __FUNCTION__, pTableMeta->uid,
|
||||
uDebug("SML:0x%" PRIx64 ", %s modify schema uid:%" PRIu64 ", sversion:%d, tversion:%d", info->id, __FUNCTION__, pTableMeta->uid,
|
||||
pTableMeta->sversion, pTableMeta->tversion);
|
||||
tmp = (SSmlSTableMeta **)taosHashIterate(info->superTables, tmp);
|
||||
}
|
||||
uDebug("SML:0x%" PRIx64 " %s end success, format:%d, needModifySchema:%d", info->id, __FUNCTION__, info->dataFormat,
|
||||
uDebug("SML:0x%" PRIx64 ", %s end success, format:%d, needModifySchema:%d", info->id, __FUNCTION__, info->dataFormat,
|
||||
info->needModifySchema);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -1158,7 +1158,7 @@ END:
|
|||
taosHashCleanup(tagHashTmp);
|
||||
taosMemoryFreeClear(pTableMeta);
|
||||
(void)catalogRefreshTableMeta(info->pCatalog, &conn, &pName, 1); // ignore refresh meta code if there is an error
|
||||
uError("SML:0x%" PRIx64 " %s end failed:%d:%s, format:%d, needModifySchema:%d", info->id, __FUNCTION__, code,
|
||||
uError("SML:0x%" PRIx64 ", %s end failed:%d:%s, format:%d, needModifySchema:%d", info->id, __FUNCTION__, code,
|
||||
tstrerror(code), info->dataFormat, info->needModifySchema);
|
||||
|
||||
return code;
|
||||
|
@ -1356,7 +1356,7 @@ END:
|
|||
}
|
||||
|
||||
static int32_t smlParseEnd(SSmlHandle *info) {
|
||||
uDebug("SML:0x%" PRIx64 " %s start, format:%d, linenum:%d", info->id, __FUNCTION__, info->dataFormat,
|
||||
uDebug("SML:0x%" PRIx64 ", %s start, format:%d, linenum:%d", info->id, __FUNCTION__, info->dataFormat,
|
||||
info->lineNum);
|
||||
int32_t code = 0;
|
||||
int32_t lino = 0;
|
||||
|
@ -1376,7 +1376,7 @@ static int32_t smlParseEnd(SSmlHandle *info) {
|
|||
}
|
||||
|
||||
if (tinfo == NULL) {
|
||||
uError("SML:0x%" PRIx64 "get oneTable failed, line num:%d", info->id, i);
|
||||
uError("SML:0x%" PRIx64 ", get oneTable failed, line num:%d", info->id, i);
|
||||
smlBuildInvalidDataMsg(&info->msgBuf, "get oneTable failed", elements->measure);
|
||||
return TSDB_CODE_SML_INVALID_DATA;
|
||||
}
|
||||
|
@ -1396,14 +1396,14 @@ static int32_t smlParseEnd(SSmlHandle *info) {
|
|||
SSmlSTableMeta **tableMeta =
|
||||
(SSmlSTableMeta **)taosHashGet(info->superTables, elements->measure, elements->measureLen);
|
||||
if (tableMeta) { // update meta
|
||||
uDebug("SML:0x%" PRIx64 " %s update meta, format:%d, linenum:%d", info->id, __FUNCTION__, info->dataFormat,
|
||||
uDebug("SML:0x%" PRIx64 ", %s update meta, format:%d, linenum:%d", info->id, __FUNCTION__, info->dataFormat,
|
||||
info->lineNum);
|
||||
SML_CHECK_CODE(smlUpdateMeta((*tableMeta)->colHash, (*tableMeta)->cols, elements->colArray, false, &info->msgBuf,
|
||||
(*tableMeta)->tagHash));
|
||||
SML_CHECK_CODE(smlUpdateMeta((*tableMeta)->tagHash, (*tableMeta)->tags, tinfo->tags, true, &info->msgBuf,
|
||||
(*tableMeta)->colHash));
|
||||
} else {
|
||||
uDebug("SML:0x%" PRIx64 " %s add meta, format:%d, linenum:%d", info->id, __FUNCTION__, info->dataFormat,
|
||||
uDebug("SML:0x%" PRIx64 ", %s add meta, format:%d, linenum:%d", info->id, __FUNCTION__, info->dataFormat,
|
||||
info->lineNum);
|
||||
SSmlSTableMeta *meta = NULL;
|
||||
SML_CHECK_CODE(smlBuildSTableMeta(info->dataFormat, &meta));
|
||||
|
@ -1416,7 +1416,7 @@ static int32_t smlParseEnd(SSmlHandle *info) {
|
|||
SML_CHECK_CODE(smlInsertMeta(meta->colHash, meta->cols, elements->colArray, meta->tagHash));
|
||||
}
|
||||
}
|
||||
uDebug("SML:0x%" PRIx64 " %s end, format:%d, linenum:%d", info->id, __FUNCTION__, info->dataFormat, info->lineNum);
|
||||
uDebug("SML:0x%" PRIx64 ", %s end, format:%d, linenum:%d", info->id, __FUNCTION__, info->dataFormat, info->lineNum);
|
||||
|
||||
END:
|
||||
RETURN
|
||||
|
@ -1427,7 +1427,7 @@ static int32_t smlInsertData(SSmlHandle *info) {
|
|||
int32_t lino = 0;
|
||||
char *measure = NULL;
|
||||
SSmlTableInfo **oneTable = NULL;
|
||||
uDebug("SML:0x%" PRIx64 " %s start, format:%d", info->id, __FUNCTION__, info->dataFormat);
|
||||
uDebug("SML:0x%" PRIx64 ", %s start, format:%d", info->id, __FUNCTION__, info->dataFormat);
|
||||
|
||||
if (info->pRequest->dbList == NULL) {
|
||||
info->pRequest->dbList = taosArrayInit(1, TSDB_DB_FNAME_LEN);
|
||||
|
@ -1476,14 +1476,14 @@ static int32_t smlInsertData(SSmlHandle *info) {
|
|||
SSmlSTableMeta **pMeta =
|
||||
(SSmlSTableMeta **)taosHashGet(info->superTables, tableData->sTableName, tableData->sTableNameLen);
|
||||
if (unlikely(NULL == pMeta || NULL == *pMeta || NULL == (*pMeta)->tableMeta)) {
|
||||
uError("SML:0x%" PRIx64 " %s NULL == pMeta. table name: %s", info->id, __FUNCTION__, tableData->childTableName);
|
||||
uError("SML:0x%" PRIx64 ", %s NULL == pMeta. table name:%s", info->id, __FUNCTION__, tableData->childTableName);
|
||||
SML_CHECK_CODE(TSDB_CODE_SML_INTERNAL_ERROR);
|
||||
}
|
||||
|
||||
// use tablemeta of stable to save vgid and uid of child table
|
||||
(*pMeta)->tableMeta->vgId = vg.vgId;
|
||||
(*pMeta)->tableMeta->uid = tableData->uid; // one table merge data block together according uid
|
||||
uDebug("SML:0x%" PRIx64 " %s table:%s, uid:%" PRIu64 ", format:%d", info->id, __FUNCTION__, pName.tname,
|
||||
uDebug("SML:0x%" PRIx64 ", %s table:%s, uid:%" PRIu64 ", format:%d", info->id, __FUNCTION__, pName.tname,
|
||||
tableData->uid, info->dataFormat);
|
||||
|
||||
SML_CHECK_CODE(smlBindData(info->pQuery, info->dataFormat, tableData->tags, (*pMeta)->cols, tableData->cols,
|
||||
|
@ -1501,7 +1501,7 @@ static int32_t smlInsertData(SSmlHandle *info) {
|
|||
|
||||
launchQueryImpl(info->pRequest, info->pQuery, true, NULL); // no need to check return code
|
||||
|
||||
uDebug("SML:0x%" PRIx64 " %s end, format:%d, code:%d,%s", info->id, __FUNCTION__, info->dataFormat, info->pRequest->code,
|
||||
uDebug("SML:0x%" PRIx64 ", %s end, format:%d, code:%d,%s", info->id, __FUNCTION__, info->dataFormat, info->pRequest->code,
|
||||
tstrerror(info->pRequest->code));
|
||||
|
||||
return info->pRequest->code;
|
||||
|
@ -1558,15 +1558,15 @@ END:
|
|||
static void printRaw(int64_t id, int lineNum, int numLines, ELogLevel level, char* data, int32_t len){
|
||||
char *print = taosMemoryMalloc(len + 1);
|
||||
if (print == NULL) {
|
||||
uError("SML:0x%" PRIx64 " smlParseLine failed. code : %d", id, terrno);
|
||||
uError("SML:0x%" PRIx64 ", smlParseLine failed. code :%d", id, terrno);
|
||||
return;
|
||||
}
|
||||
(void)memcpy(print, data, len);
|
||||
print[len] = '\0';
|
||||
if (level == DEBUG_DEBUG){
|
||||
uDebug("SML:0x%" PRIx64 " smlParseLine is raw, line %d/%d : %s", id, lineNum, numLines, print);
|
||||
uDebug("SML:0x%" PRIx64 ", smlParseLine is raw, line %d/%d :%s", id, lineNum, numLines, print);
|
||||
}else if (level == DEBUG_ERROR){
|
||||
uError("SML:0x%" PRIx64 " smlParseLine failed. line %d/%d : %s", id, lineNum, numLines, print);
|
||||
uError("SML:0x%" PRIx64 ", smlParseLine failed. line %d/%d :%s", id, lineNum, numLines, print);
|
||||
}
|
||||
taosMemoryFree(print);
|
||||
}
|
||||
|
@ -1592,7 +1592,7 @@ static bool getLine(SSmlHandle *info, char *lines[], char **rawLine, char *rawLi
|
|||
if (*rawLine != NULL && (uDebugFlag & DEBUG_DEBUG)) {
|
||||
printRaw(info->id, i, numLines, DEBUG_DEBUG, *tmp, *len);
|
||||
} else {
|
||||
uDebug("SML:0x%" PRIx64 " smlParseLine is not raw, line %d/%d : %s", info->id, i, numLines, *tmp);
|
||||
uDebug("SML:0x%" PRIx64 ", smlParseLine is not raw, line %d/%d :%s", info->id, i, numLines, *tmp);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
@ -1612,7 +1612,7 @@ static int32_t smlParseJson(SSmlHandle *info, char *lines[], char *rawLine) {
|
|||
}
|
||||
|
||||
static int32_t smlParseStart(SSmlHandle *info, char *lines[], char *rawLine, char *rawLineEnd, int numLines) {
|
||||
uDebug("SML:0x%" PRIx64 " %s start", info->id, __FUNCTION__);
|
||||
uDebug("SML:0x%" PRIx64 ", %s start", info->id, __FUNCTION__);
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
if (info->protocol == TSDB_SML_JSON_PROTOCOL) {
|
||||
return smlParseJson(info, lines, rawLine);
|
||||
|
@ -1646,12 +1646,12 @@ static int32_t smlParseStart(SSmlHandle *info, char *lines[], char *rawLine, cha
|
|||
if (rawLine != NULL) {
|
||||
printRaw(info->id, i, numLines, DEBUG_ERROR, tmp, len);
|
||||
} else {
|
||||
uError("SML:0x%" PRIx64 " %s failed. line %d : %s", info->id, __FUNCTION__, i, tmp);
|
||||
uError("SML:0x%" PRIx64 ", %s failed. line %d :%s", info->id, __FUNCTION__, i, tmp);
|
||||
}
|
||||
return code;
|
||||
}
|
||||
if (info->reRun) {
|
||||
uDebug("SML:0x%" PRIx64 " %s re run", info->id, __FUNCTION__);
|
||||
uDebug("SML:0x%" PRIx64 ", %s re run", info->id, __FUNCTION__);
|
||||
i = 0;
|
||||
rawLine = oldRaw;
|
||||
code = smlClearForRerun(info);
|
||||
|
@ -1662,7 +1662,7 @@ static int32_t smlParseStart(SSmlHandle *info, char *lines[], char *rawLine, cha
|
|||
}
|
||||
i++;
|
||||
}
|
||||
uDebug("SML:0x%" PRIx64 " %s end", info->id, __FUNCTION__);
|
||||
uDebug("SML:0x%" PRIx64 ", %s end", info->id, __FUNCTION__);
|
||||
|
||||
return code;
|
||||
}
|
||||
|
@ -1689,7 +1689,7 @@ static int smlProcess(SSmlHandle *info, char *lines[], char *rawLine, char *rawL
|
|||
break;
|
||||
}
|
||||
taosMsleep(100);
|
||||
uInfo("SML:0x%" PRIx64 " smlModifyDBSchemas retry code:%s, times:%d", info->id, tstrerror(code), retryNum);
|
||||
uInfo("SML:0x%" PRIx64 ", smlModifyDBSchemas retry code:%s, times:%d", info->id, tstrerror(code), retryNum);
|
||||
} while (retryNum++ < taosHashGetSize(info->superTables) * MAX_RETRY_TIMES);
|
||||
|
||||
SML_CHECK_CODE(code);
|
||||
|
|
|
@ -103,7 +103,7 @@ int32_t stmtSwitchStatus(STscStmt* pStmt, STMT_STATUS newStatus) {
|
|||
}
|
||||
|
||||
if (pStmt->errCode && newStatus != STMT_PREPARE) {
|
||||
STMT_DLOG("stmt already failed with err: %s", tstrerror(pStmt->errCode));
|
||||
STMT_DLOG("stmt already failed with err:%s", tstrerror(pStmt->errCode));
|
||||
return pStmt->errCode;
|
||||
}
|
||||
|
||||
|
@ -574,7 +574,7 @@ int32_t stmtRebuildDataBlock(STscStmt* pStmt, STableDataCxt* pDataBlock, STableD
|
|||
STMT_ERR_RET(stmtTryAddTableVgroupInfo(pStmt, &vgId));
|
||||
STMT_ERR_RET(qRebuildStmtDataBlock(newBlock, pDataBlock, uid, suid, vgId, pStmt->sql.autoCreateTbl));
|
||||
|
||||
STMT_DLOG("tableDataCxt rebuilt, uid:%" PRId64 ", vgId:%d", uid, vgId);
|
||||
STMT_DLOG("uid:%" PRId64 ", rebuild table data context, vgId:%d", uid, vgId);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -984,7 +984,7 @@ int32_t stmtInitStbInterlaceTableInfo(STscStmt* pStmt) {
|
|||
int stmtSetDbName(TAOS_STMT* stmt, const char* dbName) {
|
||||
STscStmt* pStmt = (STscStmt*)stmt;
|
||||
|
||||
STMT_DLOG("start to set dbName: %s", dbName);
|
||||
STMT_DLOG("start to set dbName:%s", dbName);
|
||||
|
||||
STMT_ERR_RET(stmtCreateRequest(pStmt));
|
||||
|
||||
|
@ -1002,7 +1002,7 @@ int stmtSetTbName(TAOS_STMT* stmt, const char* tbName) {
|
|||
|
||||
int64_t startUs = taosGetTimestampUs();
|
||||
|
||||
STMT_DLOG("start to set tbName: %s", tbName);
|
||||
STMT_DLOG("start to set tbName:%s", tbName);
|
||||
|
||||
if (pStmt->errCode != TSDB_CODE_SUCCESS) {
|
||||
return pStmt->errCode;
|
||||
|
@ -1223,7 +1223,7 @@ int stmtBindBatch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind, int32_t colIdx) {
|
|||
|
||||
int64_t startUs = taosGetTimestampUs();
|
||||
|
||||
STMT_DLOG("start to bind stmt data, colIdx: %d", colIdx);
|
||||
STMT_DLOG("start to bind stmt data, colIdx:%d", colIdx);
|
||||
|
||||
if (pStmt->errCode != TSDB_CODE_SUCCESS) {
|
||||
return pStmt->errCode;
|
||||
|
@ -1418,7 +1418,7 @@ int stmtAddBatch(TAOS_STMT* stmt) {
|
|||
}
|
||||
/*
|
||||
int stmtUpdateTableUid(STscStmt* pStmt, SSubmitRsp* pRsp) {
|
||||
tscDebug("stmt start to update tbUid, blockNum: %d", pRsp->nBlocks);
|
||||
tscDebug("stmt start to update tbUid, blockNum:%d", pRsp->nBlocks);
|
||||
|
||||
int32_t code = 0;
|
||||
int32_t finalCode = 0;
|
||||
|
@ -1645,7 +1645,7 @@ int stmtClose(TAOS_STMT* stmt) {
|
|||
(void)taosThreadCondDestroy(&pStmt->queue.waitCond);
|
||||
(void)taosThreadMutexDestroy(&pStmt->queue.mutex);
|
||||
|
||||
STMT_DLOG("stmt %p closed, stbInterlaceMode: %d, statInfo: ctgGetTbMetaNum=>%" PRId64 ", getCacheTbInfo=>%" PRId64
|
||||
STMT_DLOG("stmt %p closed, stbInterlaceMode:%d, statInfo: ctgGetTbMetaNum=>%" PRId64 ", getCacheTbInfo=>%" PRId64
|
||||
", parseSqlNum=>%" PRId64 ", pStmt->stat.bindDataNum=>%" PRId64
|
||||
", settbnameAPI:%u, bindAPI:%u, addbatchAPI:%u, execAPI:%u"
|
||||
", setTbNameUs:%" PRId64 ", bindDataUs:%" PRId64 ",%" PRId64 ",%" PRId64 ",%" PRId64 " addBatchUs:%" PRId64
|
||||
|
|
|
@ -107,7 +107,7 @@ static int32_t stmtSwitchStatus(STscStmt2* pStmt, STMT_STATUS newStatus) {
|
|||
}
|
||||
|
||||
if (pStmt->errCode && newStatus != STMT_PREPARE) {
|
||||
STMT_DLOG("stmt already failed with err: %s", tstrerror(pStmt->errCode));
|
||||
STMT_DLOG("stmt already failed with err:%s", tstrerror(pStmt->errCode));
|
||||
return pStmt->errCode;
|
||||
}
|
||||
|
||||
|
@ -504,7 +504,7 @@ static int32_t stmtRebuildDataBlock(STscStmt2* pStmt, STableDataCxt* pDataBlock,
|
|||
STMT_ERR_RET(stmtTryAddTableVgroupInfo(pStmt, &vgId));
|
||||
STMT_ERR_RET(qRebuildStmtDataBlock(newBlock, pDataBlock, uid, suid, vgId, pStmt->sql.autoCreateTbl));
|
||||
|
||||
STMT_DLOG("tableDataCxt rebuilt, uid:%" PRId64 ", vgId:%d", uid, vgId);
|
||||
STMT_DLOG("uid:%" PRId64 ", rebuild table data context, vgId:%d", uid, vgId);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -876,7 +876,7 @@ TAOS_STMT2* stmtInit2(STscObj* taos, TAOS_STMT2_OPTION* pOptions) {
|
|||
static int stmtSetDbName2(TAOS_STMT2* stmt, const char* dbName) {
|
||||
STscStmt2* pStmt = (STscStmt2*)stmt;
|
||||
|
||||
STMT_DLOG("start to set dbName: %s", dbName);
|
||||
STMT_DLOG("start to set dbName:%s", dbName);
|
||||
|
||||
pStmt->db = taosStrdup(dbName);
|
||||
(void)strdequote(pStmt->db);
|
||||
|
@ -972,7 +972,7 @@ int stmtSetTbName2(TAOS_STMT2* stmt, const char* tbName) {
|
|||
|
||||
int64_t startUs = taosGetTimestampUs();
|
||||
|
||||
STMT_DLOG("start to set tbName: %s", tbName);
|
||||
STMT_DLOG("start to set tbName:%s", tbName);
|
||||
|
||||
if (pStmt->errCode != TSDB_CODE_SUCCESS) {
|
||||
return pStmt->errCode;
|
||||
|
@ -1340,7 +1340,7 @@ int stmtBindBatch2(TAOS_STMT2* stmt, TAOS_STMT2_BIND* bind, int32_t colIdx) {
|
|||
|
||||
int64_t startUs = taosGetTimestampUs();
|
||||
|
||||
STMT_DLOG("start to bind stmt data, colIdx: %d", colIdx);
|
||||
STMT_DLOG("start to bind stmt data, colIdx:%d", colIdx);
|
||||
|
||||
if (pStmt->errCode != TSDB_CODE_SUCCESS) {
|
||||
return pStmt->errCode;
|
||||
|
@ -1520,7 +1520,7 @@ int stmtBindBatch2(TAOS_STMT2* stmt, TAOS_STMT2_BIND* bind, int32_t colIdx) {
|
|||
}
|
||||
/*
|
||||
int stmtUpdateTableUid(STscStmt2* pStmt, SSubmitRsp* pRsp) {
|
||||
tscDebug("stmt start to update tbUid, blockNum: %d", pRsp->nBlocks);
|
||||
tscDebug("stmt start to update tbUid, blockNum:%d", pRsp->nBlocks);
|
||||
|
||||
int32_t code = 0;
|
||||
int32_t finalCode = 0;
|
||||
|
@ -1841,7 +1841,7 @@ int stmtClose2(TAOS_STMT2* stmt) {
|
|||
}
|
||||
}
|
||||
|
||||
STMT_DLOG("stmt %p closed, stbInterlaceMode: %d, statInfo: ctgGetTbMetaNum=>%" PRId64 ", getCacheTbInfo=>%" PRId64
|
||||
STMT_DLOG("stmt %p closed, stbInterlaceMode:%d, statInfo: ctgGetTbMetaNum=>%" PRId64 ", getCacheTbInfo=>%" PRId64
|
||||
", parseSqlNum=>%" PRId64 ", pStmt->stat.bindDataNum=>%" PRId64
|
||||
", settbnameAPI:%u, bindAPI:%u, addbatchAPI:%u, execAPI:%u"
|
||||
", setTbNameUs:%" PRId64 ", bindDataUs:%" PRId64 ",%" PRId64 ",%" PRId64 ",%" PRId64 " addBatchUs:%" PRId64
|
||||
|
|
|
@ -25,10 +25,9 @@
|
|||
#include "ttimer.h"
|
||||
|
||||
#define tqErrorC(...) do { if (cDebugFlag & DEBUG_ERROR || tqClientDebugFlag & DEBUG_ERROR) { taosPrintLog("TQ ERROR ", DEBUG_ERROR, tqClientDebugFlag|cDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define tqInfoC(...) do { if (cDebugFlag & DEBUG_INFO || tqClientDebugFlag & DEBUG_INFO) { taosPrintLog("TQ ", DEBUG_INFO, tqClientDebugFlag|cDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define tqDebugC(...) do { if (cDebugFlag & DEBUG_DEBUG || tqClientDebugFlag & DEBUG_DEBUG) { taosPrintLog("TQ ", DEBUG_DEBUG, tqClientDebugFlag|cDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define tqInfoC(...) do { if (cDebugFlag & DEBUG_INFO || tqClientDebugFlag & DEBUG_INFO) { taosPrintLog("TQ INFO ", DEBUG_INFO, tqClientDebugFlag|cDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define tqDebugC(...) do { if (cDebugFlag & DEBUG_DEBUG || tqClientDebugFlag & DEBUG_DEBUG) { taosPrintLog("TQ DEBUG ", DEBUG_DEBUG, tqClientDebugFlag|cDebugFlag, __VA_ARGS__); }} while(0)
|
||||
|
||||
#define EMPTY_BLOCK_POLL_IDLE_DURATION 10
|
||||
#define DEFAULT_AUTO_COMMIT_INTERVAL 5000
|
||||
#define DEFAULT_HEARTBEAT_INTERVAL 3000
|
||||
#define DEFAULT_ASKEP_INTERVAL 1000
|
||||
|
@ -149,7 +148,6 @@ struct tmq_t {
|
|||
STscObj* pTscObj; // connection
|
||||
SArray* clientTopics; // SArray<SMqClientTopic>
|
||||
STaosQueue* mqueue; // queue of rsp
|
||||
STaosQall* qall;
|
||||
STaosQueue* delayedTask; // delayed task queue for heartbeat and auto commit
|
||||
tsem2_t rspSem;
|
||||
};
|
||||
|
@ -174,7 +172,6 @@ typedef struct {
|
|||
int32_t vgId;
|
||||
int32_t vgStatus;
|
||||
int32_t vgSkipCnt; // here used to mark the slow vgroups
|
||||
int64_t emptyBlockReceiveTs; // once empty block is received, idle for ignoreCnt then start to poll data
|
||||
int64_t blockReceiveTs; // once empty block is received, idle for ignoreCnt then start to poll data
|
||||
int64_t blockSleepForReplay; // once empty block is received, idle for ignoreCnt then start to poll data
|
||||
bool seekUpdated; // offset is updated by seek operator, therefore, not update by vnode rsp.
|
||||
|
@ -356,7 +353,7 @@ tmq_conf_res_t tmq_conf_set(tmq_conf_t* conf, const char* key, const char* value
|
|||
conf->autoCommit = false;
|
||||
return TMQ_CONF_OK;
|
||||
} else {
|
||||
tqErrorC("invalid value for enable.auto.commit: %s", value);
|
||||
tqErrorC("invalid value for enable.auto.commit:%s", value);
|
||||
return TMQ_CONF_INVALID;
|
||||
}
|
||||
}
|
||||
|
@ -365,7 +362,7 @@ tmq_conf_res_t tmq_conf_set(tmq_conf_t* conf, const char* key, const char* value
|
|||
int64_t tmp;
|
||||
code = taosStr2int64(value, &tmp);
|
||||
if (tmp < 0 || code != 0) {
|
||||
tqErrorC("invalid value for auto.commit.interval.ms: %s", value);
|
||||
tqErrorC("invalid value for auto.commit.interval.ms:%s", value);
|
||||
return TMQ_CONF_INVALID;
|
||||
}
|
||||
conf->autoCommitInterval = (tmp > INT32_MAX ? INT32_MAX : tmp);
|
||||
|
@ -376,7 +373,7 @@ tmq_conf_res_t tmq_conf_set(tmq_conf_t* conf, const char* key, const char* value
|
|||
int64_t tmp;
|
||||
code = taosStr2int64(value, &tmp);
|
||||
if (tmp < 6000 || tmp > 1800000 || code != 0) {
|
||||
tqErrorC("invalid value for session.timeout.ms: %s", value);
|
||||
tqErrorC("invalid value for session.timeout.ms:%s", value);
|
||||
return TMQ_CONF_INVALID;
|
||||
}
|
||||
conf->sessionTimeoutMs = tmp;
|
||||
|
@ -387,7 +384,7 @@ tmq_conf_res_t tmq_conf_set(tmq_conf_t* conf, const char* key, const char* value
|
|||
int64_t tmp;
|
||||
code = taosStr2int64(value, &tmp);
|
||||
if (tmp < 1000 || tmp >= conf->sessionTimeoutMs || code != 0) {
|
||||
tqErrorC("invalid value for heartbeat.interval.ms: %s", value);
|
||||
tqErrorC("invalid value for heartbeat.interval.ms:%s", value);
|
||||
return TMQ_CONF_INVALID;
|
||||
}
|
||||
conf->heartBeatIntervalMs = tmp;
|
||||
|
@ -398,7 +395,7 @@ tmq_conf_res_t tmq_conf_set(tmq_conf_t* conf, const char* key, const char* value
|
|||
int32_t tmp;
|
||||
code = taosStr2int32(value, &tmp);
|
||||
if (tmp < 1000 || code != 0) {
|
||||
tqErrorC("invalid value for max.poll.interval.ms: %s", value);
|
||||
tqErrorC("invalid value for max.poll.interval.ms:%s", value);
|
||||
return TMQ_CONF_INVALID;
|
||||
}
|
||||
conf->maxPollIntervalMs = tmp;
|
||||
|
@ -416,7 +413,7 @@ tmq_conf_res_t tmq_conf_set(tmq_conf_t* conf, const char* key, const char* value
|
|||
conf->resetOffset = TMQ_OFFSET__RESET_LATEST;
|
||||
return TMQ_CONF_OK;
|
||||
} else {
|
||||
tqErrorC("invalid value for auto.offset.reset: %s", value);
|
||||
tqErrorC("invalid value for auto.offset.reset:%s", value);
|
||||
return TMQ_CONF_INVALID;
|
||||
}
|
||||
}
|
||||
|
@ -429,7 +426,7 @@ tmq_conf_res_t tmq_conf_set(tmq_conf_t* conf, const char* key, const char* value
|
|||
conf->withTbName = false;
|
||||
return TMQ_CONF_OK;
|
||||
} else {
|
||||
tqErrorC("invalid value for msg.with.table.name: %s", value);
|
||||
tqErrorC("invalid value for msg.with.table.name:%s", value);
|
||||
return TMQ_CONF_INVALID;
|
||||
}
|
||||
}
|
||||
|
@ -442,7 +439,7 @@ tmq_conf_res_t tmq_conf_set(tmq_conf_t* conf, const char* key, const char* value
|
|||
conf->snapEnable = false;
|
||||
return TMQ_CONF_OK;
|
||||
} else {
|
||||
tqErrorC("invalid value for experimental.snapshot.enable: %s", value);
|
||||
tqErrorC("invalid value for experimental.snapshot.enable:%s", value);
|
||||
return TMQ_CONF_INVALID;
|
||||
}
|
||||
}
|
||||
|
@ -481,7 +478,7 @@ tmq_conf_res_t tmq_conf_set(tmq_conf_t* conf, const char* key, const char* value
|
|||
int64_t tmp;
|
||||
code = taosStr2int64(value, &tmp);
|
||||
if (tmp <= 0 || tmp > 65535 || code != 0) {
|
||||
tqErrorC("invalid value for td.connect.port: %s", value);
|
||||
tqErrorC("invalid value for td.connect.port:%s", value);
|
||||
return TMQ_CONF_INVALID;
|
||||
}
|
||||
|
||||
|
@ -497,7 +494,7 @@ tmq_conf_res_t tmq_conf_set(tmq_conf_t* conf, const char* key, const char* value
|
|||
conf->replayEnable = false;
|
||||
return TMQ_CONF_OK;
|
||||
} else {
|
||||
tqErrorC("invalid value for enable.replay: %s", value);
|
||||
tqErrorC("invalid value for enable.replay:%s", value);
|
||||
return TMQ_CONF_INVALID;
|
||||
}
|
||||
}
|
||||
|
@ -518,7 +515,7 @@ tmq_conf_res_t tmq_conf_set(tmq_conf_t* conf, const char* key, const char* value
|
|||
int64_t tmp = 0;
|
||||
code = taosStr2int64(value, &tmp);
|
||||
if (tmp <= 0 || tmp > INT32_MAX || code != 0) {
|
||||
tqErrorC("invalid value for fetch.max.wait.ms: %s", value);
|
||||
tqErrorC("invalid value for fetch.max.wait.ms:%s", value);
|
||||
return TMQ_CONF_INVALID;
|
||||
}
|
||||
conf->maxPollWaitTime = tmp;
|
||||
|
@ -529,7 +526,7 @@ tmq_conf_res_t tmq_conf_set(tmq_conf_t* conf, const char* key, const char* value
|
|||
int64_t tmp = 0;
|
||||
code = taosStr2int64(value, &tmp);
|
||||
if (tmp <= 0 || tmp > INT32_MAX || code != 0) {
|
||||
tqErrorC("invalid value for min.poll.rows: %s", value);
|
||||
tqErrorC("invalid value for min.poll.rows:%s", value);
|
||||
return TMQ_CONF_INVALID;
|
||||
}
|
||||
conf->minPollRows = tmp;
|
||||
|
@ -547,7 +544,7 @@ tmq_conf_res_t tmq_conf_set(tmq_conf_t* conf, const char* key, const char* value
|
|||
return TMQ_CONF_OK;
|
||||
}
|
||||
|
||||
tqErrorC("unknown key: %s", key);
|
||||
tqErrorC("unknown key:%s", key);
|
||||
return TMQ_CONF_UNKNOWN;
|
||||
}
|
||||
|
||||
|
@ -1134,7 +1131,7 @@ void tmqSendHbReq(void* param, void* tmrId) {
|
|||
tDestroySMqHbReq(&req);
|
||||
if (tmrId != NULL) {
|
||||
bool ret = taosTmrReset(tmqSendHbReq, tmq->heartBeatIntervalMs, param, tmqMgmt.timer, &tmq->hbLiveTimer);
|
||||
tqDebugC("consumer:0x%" PRIx64 " reset timer for tmq heartbeat:%d, pollFlag:%d", tmq->consumerId, ret, tmq->pollFlag);
|
||||
tqDebugC("consumer:0x%" PRIx64 " reset timer for tmq heartbeat ret:%d, interval:%d, pollFlag:%d", tmq->consumerId, ret, tmq->heartBeatIntervalMs, tmq->pollFlag);
|
||||
}
|
||||
int32_t ret = taosReleaseRef(tmqMgmt.rsetId, refId);
|
||||
if (ret != 0){
|
||||
|
@ -1226,7 +1223,6 @@ static void initClientTopicFromRsp(SMqClientTopic* pTopic, SMqSubTopicEp* pTopic
|
|||
.epSet = pVgEp->epSet,
|
||||
.vgStatus = pInfo ? pInfo->vgStatus : TMQ_VG_STATUS__IDLE,
|
||||
.vgSkipCnt = 0,
|
||||
.emptyBlockReceiveTs = 0,
|
||||
.blockReceiveTs = 0,
|
||||
.blockSleepForReplay = 0,
|
||||
.numOfRows = pInfo ? pInfo->numOfRows : 0,
|
||||
|
@ -1268,7 +1264,7 @@ static void buildNewTopicList(tmq_t* tmq, SArray* newTopics, const SMqAskEpRsp*
|
|||
SMqClientTopic* pTopicCur = taosArrayGet(tmq->clientTopics, i);
|
||||
if (pTopicCur && pTopicCur->vgs) {
|
||||
int32_t vgNumCur = taosArrayGetSize(pTopicCur->vgs);
|
||||
tqInfoC("consumer:0x%" PRIx64 ", current vg num: %d", tmq->consumerId, vgNumCur);
|
||||
tqInfoC("consumer:0x%" PRIx64 ", current vg num:%d", tmq->consumerId, vgNumCur);
|
||||
for (int32_t j = 0; j < vgNumCur; j++) {
|
||||
SMqClientVg* pVgCur = taosArrayGet(pTopicCur->vgs, j);
|
||||
if (pVgCur == NULL) {
|
||||
|
@ -1474,7 +1470,7 @@ static int32_t askEp(tmq_t* pTmq, void* param, bool sync, bool updateEpSet) {
|
|||
pParam = NULL;
|
||||
|
||||
SEpSet epSet = getEpSet_s(&pTmq->pTscObj->pAppInfo->mgmtEp);
|
||||
tqDebugC("consumer:0x%" PRIx64 " ask ep from mnode,QID:0x%" PRIx64, pTmq->consumerId, sendInfo->requestId);
|
||||
tqDebugC("consumer:0x%" PRIx64 " ask ep from mnode, QID:0x%" PRIx64, pTmq->consumerId, sendInfo->requestId);
|
||||
code = asyncSendMsgToServer(pTmq->pTscObj->pAppInfo->pTransporter, &epSet, NULL, sendInfo);
|
||||
|
||||
END:
|
||||
|
@ -1487,27 +1483,14 @@ END:
|
|||
}
|
||||
|
||||
static int32_t tmqHandleAllDelayedTask(tmq_t* pTmq) {
|
||||
STaosQall* qall = NULL;
|
||||
int32_t code = 0;
|
||||
|
||||
code = taosAllocateQall(&qall);
|
||||
if (code) {
|
||||
tqErrorC("consumer:0x%" PRIx64 ", failed to allocate qall, code:%s", pTmq->consumerId, tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t numOfItems = taosReadAllQitems(pTmq->delayedTask, qall);
|
||||
if (numOfItems == 0) {
|
||||
taosFreeQall(qall);
|
||||
return 0;
|
||||
}
|
||||
|
||||
tqDebugC("consumer:0x%" PRIx64 " handle delayed %d tasks before poll data", pTmq->consumerId, numOfItems);
|
||||
tqDebugC("consumer:0x%" PRIx64 " handle delayed %d tasks before poll data", pTmq->consumerId, taosQueueItemSize(pTmq->delayedTask));
|
||||
while (1) {
|
||||
int8_t* pTaskType = NULL;
|
||||
while (taosGetQitem(qall, (void**)&pTaskType) != 0) {
|
||||
taosReadQitem(pTmq->delayedTask, (void**)&pTaskType);
|
||||
if (pTaskType == NULL) {break;}
|
||||
if (*pTaskType == TMQ_DELAYED_TASK__ASK_EP) {
|
||||
tqDebugC("consumer:0x%" PRIx64 " retrieve ask ep timer", pTmq->consumerId);
|
||||
code = askEp(pTmq, NULL, false, false);
|
||||
int32_t code = askEp(pTmq, NULL, false, false);
|
||||
if (code != 0) {
|
||||
tqErrorC("consumer:0x%" PRIx64 " failed to ask ep, code:%s", pTmq->consumerId, tstrerror(code));
|
||||
}
|
||||
|
@ -1530,23 +1513,15 @@ static int32_t tmqHandleAllDelayedTask(tmq_t* pTmq) {
|
|||
taosFreeQitem(pTaskType);
|
||||
}
|
||||
|
||||
taosFreeQall(qall);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void tmqClearUnhandleMsg(tmq_t* tmq) {
|
||||
if (tmq == NULL) return;
|
||||
while (1) {
|
||||
SMqRspWrapper* rspWrapper = NULL;
|
||||
while (taosGetQitem(tmq->qall, (void**)&rspWrapper) != 0) {
|
||||
tmqFreeRspWrapper(rspWrapper);
|
||||
taosFreeQitem(rspWrapper);
|
||||
}
|
||||
|
||||
rspWrapper = NULL;
|
||||
if (taosReadAllQitems(tmq->mqueue, tmq->qall) == 0){
|
||||
return;
|
||||
}
|
||||
while (taosGetQitem(tmq->qall, (void**)&rspWrapper) != 0) {
|
||||
taosReadQitem(tmq->mqueue, (void**)&rspWrapper);
|
||||
if (rspWrapper == NULL) break;
|
||||
tmqFreeRspWrapper(rspWrapper);
|
||||
taosFreeQitem(rspWrapper);
|
||||
}
|
||||
|
@ -1613,7 +1588,6 @@ void tmqFreeImpl(void* handle) {
|
|||
taosCloseQueue(tmq->delayedTask);
|
||||
}
|
||||
|
||||
taosFreeQall(tmq->qall);
|
||||
if(tsem2_destroy(&tmq->rspSem) != 0) {
|
||||
tqErrorC("failed to destroy sem in free tmq");
|
||||
}
|
||||
|
@ -1739,14 +1713,6 @@ tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) {
|
|||
goto _failed;
|
||||
}
|
||||
|
||||
code = taosAllocateQall(&pTmq->qall);
|
||||
if (code) {
|
||||
tqErrorC("consumer:0x%" PRIx64 " setup failed since %s, groupId:%s", pTmq->consumerId, tstrerror(code),
|
||||
pTmq->groupId);
|
||||
SET_ERROR_MSG_TMQ("allocate qall failed")
|
||||
goto _failed;
|
||||
}
|
||||
|
||||
if (conf->groupId[0] == 0) {
|
||||
tqErrorC("consumer:0x%" PRIx64 " setup failed since %s, groupId:%s", pTmq->consumerId, tstrerror(code),
|
||||
pTmq->groupId);
|
||||
|
@ -2102,7 +2068,7 @@ int32_t tmqPollCb(void* param, SDataBuf* pMsg, int32_t code) {
|
|||
goto END;
|
||||
}
|
||||
rspType = ((SMqRspHead*)pMsg->pData)->mqMsgType;
|
||||
tqDebugC("consumer:0x%" PRIx64 " recv poll rsp, vgId:%d, type %d(%s),QID:0x%" PRIx64, tmq->consumerId, vgId, rspType, tmqMsgTypeStr[rspType], requestId);
|
||||
tqDebugC("consumer:0x%" PRIx64 " recv poll rsp, vgId:%d, type %d(%s), QID:0x%" PRIx64, tmq->consumerId, vgId, rspType, tmqMsgTypeStr[rspType], requestId);
|
||||
|
||||
pRspWrapper->tmqRspType = rspType;
|
||||
pRspWrapper->pollRsp.reqId = requestId;
|
||||
|
@ -2123,12 +2089,11 @@ int32_t tmqPollCb(void* param, SDataBuf* pMsg, int32_t code) {
|
|||
taosFreeQitem(pRspWrapper);
|
||||
tqErrorC("consumer:0x%" PRIx64 " put poll res into mqueue failed, code:%d", tmq->consumerId, code);
|
||||
} else {
|
||||
tqDebugC("consumer:0x%" PRIx64 " put poll res into mqueue, type:%d(%s), vgId:%d, total in queue:%d,QID:0x%" PRIx64,
|
||||
tqDebugC("consumer:0x%" PRIx64 " put poll res into mqueue, type:%d(%s), vgId:%d, total in queue:%d, QID:0x%" PRIx64,
|
||||
tmq ? tmq->consumerId : 0, rspType, tmqMsgTypeStr[rspType], vgId, taosQueueItemSize(tmq->mqueue), requestId);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if (tsem2_post(&tmq->rspSem) != 0){
|
||||
tqErrorC("failed to post rsp sem, consumer:0x%" PRIx64, tmq->consumerId);
|
||||
}
|
||||
|
@ -2297,7 +2262,7 @@ static int32_t doTmqPollImpl(tmq_t* pTmq, SMqClientTopic* pTopic, SMqClientVg* p
|
|||
char offsetFormatBuf[TSDB_OFFSET_LEN] = {0};
|
||||
tFormatOffset(offsetFormatBuf, tListLen(offsetFormatBuf), &pVg->offsetInfo.endOffset);
|
||||
code = asyncSendMsgToServer(pTmq->pTscObj->pAppInfo->pTransporter, &pVg->epSet, NULL, sendInfo);
|
||||
tqDebugC("consumer:0x%" PRIx64 " send poll to %s vgId:%d, code:%d, epoch %d, req:%s,QID:0x%" PRIx64, pTmq->consumerId,
|
||||
tqDebugC("consumer:0x%" PRIx64 " send poll to %s vgId:%d, code:%d, epoch %d, req:%s, QID:0x%" PRIx64, pTmq->consumerId,
|
||||
pTopic->topicName, pVg->vgId, code, pTmq->epoch, offsetFormatBuf, req.reqId);
|
||||
TSDB_CHECK_CODE(code, lino, END);
|
||||
|
||||
|
@ -2322,7 +2287,7 @@ static int32_t tmqPollImpl(tmq_t* tmq) {
|
|||
taosWLockLatch(&tmq->lock);
|
||||
|
||||
if (atomic_load_8(&tmq->status) == TMQ_CONSUMER_STATUS__LOST){
|
||||
code = TSDB_CODE_TMQ_CONSUMER_MISMATCH;
|
||||
code = TSDB_CODE_MND_CONSUMER_NOT_EXIST;
|
||||
goto end;
|
||||
}
|
||||
|
||||
|
@ -2344,14 +2309,7 @@ static int32_t tmqPollImpl(tmq_t* tmq) {
|
|||
if (pVg == NULL) {
|
||||
continue;
|
||||
}
|
||||
int64_t elapsed = taosGetTimestampMs() - pVg->emptyBlockReceiveTs;
|
||||
if (elapsed < EMPTY_BLOCK_POLL_IDLE_DURATION && elapsed >= 0) { // less than 10ms
|
||||
tqDebugC("consumer:0x%" PRIx64 " epoch %d, vgId:%d idle for 10ms before start next poll", tmq->consumerId,
|
||||
tmq->epoch, pVg->vgId);
|
||||
continue;
|
||||
}
|
||||
|
||||
elapsed = taosGetTimestampMs() - pVg->blockReceiveTs;
|
||||
int64_t elapsed = taosGetTimestampMs() - pVg->blockReceiveTs;
|
||||
if (tmq->replayEnable && elapsed < pVg->blockSleepForReplay && elapsed >= 0) {
|
||||
tqDebugC("consumer:0x%" PRIx64 " epoch %d, vgId:%d idle for %" PRId64 "ms before start next poll when replay",
|
||||
tmq->consumerId, tmq->epoch, pVg->vgId, pVg->blockSleepForReplay);
|
||||
|
@ -2430,12 +2388,12 @@ static int32_t processMqRspError(tmq_t* tmq, SMqRspWrapper* pRspWrapper){
|
|||
if (pRspWrapper->code == TSDB_CODE_VND_INVALID_VGROUP_ID) { // for vnode transform
|
||||
code = askEp(tmq, NULL, false, true);
|
||||
if (code != 0) {
|
||||
tqErrorC("consumer:0x%" PRIx64 " failed to ask ep, code:%s", tmq->consumerId, tstrerror(code));
|
||||
tqErrorC("consumer:0x%" PRIx64 " failed to ask ep wher vnode transform, code:%s", tmq->consumerId, tstrerror(code));
|
||||
}
|
||||
} else if (pRspWrapper->code == TSDB_CODE_TMQ_CONSUMER_MISMATCH) {
|
||||
code = askEp(tmq, NULL, false, false);
|
||||
code = syncAskEp(tmq);
|
||||
if (code != 0) {
|
||||
tqErrorC("consumer:0x%" PRIx64 " failed to ask ep, code:%s", tmq->consumerId, tstrerror(code));
|
||||
tqErrorC("consumer:0x%" PRIx64 " failed to ask ep when consumer mismatch, code:%s", tmq->consumerId, tstrerror(code));
|
||||
}
|
||||
} else if (pRspWrapper->code == TSDB_CODE_TMQ_NO_TABLE_QUALIFIED){
|
||||
code = 0;
|
||||
|
@ -2446,7 +2404,6 @@ static int32_t processMqRspError(tmq_t* tmq, SMqRspWrapper* pRspWrapper){
|
|||
SMqClientVg* pVg = NULL;
|
||||
getVgInfo(tmq, pollRspWrapper->topicName, pollRspWrapper->vgId, &pVg);
|
||||
if (pVg) {
|
||||
pVg->emptyBlockReceiveTs = taosGetTimestampMs();
|
||||
atomic_store_32(&pVg->vgStatus, TMQ_VG_STATUS__IDLE);
|
||||
}
|
||||
taosWUnLockLatch(&tmq->lock);
|
||||
|
@ -2523,9 +2480,8 @@ static SMqRspObj* processMqRsp(tmq_t* tmq, SMqRspWrapper* pRspWrapper){
|
|||
tFormatOffset(buf, TSDB_OFFSET_LEN, &pollRspWrapper->rspOffset);
|
||||
if (pollRspWrapper->dataRsp.blockNum == 0) {
|
||||
tqDebugC("consumer:0x%" PRIx64 " empty block received, vgId:%d, offset:%s, vg total:%" PRId64
|
||||
", total:%" PRId64 ",QID:0x%" PRIx64,
|
||||
", total:%" PRId64 ", QID:0x%" PRIx64,
|
||||
tmq->consumerId, pVg->vgId, buf, pVg->numOfRows, tmq->totalRows, pollRspWrapper->reqId);
|
||||
pVg->emptyBlockReceiveTs = taosGetTimestampMs();
|
||||
} else {
|
||||
pRspObj = buildRsp(pollRspWrapper);
|
||||
if (pRspObj == NULL) {
|
||||
|
@ -2539,7 +2495,6 @@ static SMqRspObj* processMqRsp(tmq_t* tmq, SMqRspWrapper* pRspWrapper){
|
|||
tmqBuildRspFromWrapperInner(pollRspWrapper, pVg, &numOfRows, pRspObj);
|
||||
tmq->totalRows += numOfRows;
|
||||
}
|
||||
pVg->emptyBlockReceiveTs = 0;
|
||||
if (tmq->replayEnable && pRspWrapper->tmqRspType != TMQ_MSG_TYPE__POLL_RAW_DATA_RSP) {
|
||||
pVg->blockReceiveTs = taosGetTimestampMs();
|
||||
pVg->blockSleepForReplay = pRspObj->dataRsp.sleepTime;
|
||||
|
@ -2551,7 +2506,7 @@ static SMqRspObj* processMqRsp(tmq_t* tmq, SMqRspWrapper* pRspWrapper){
|
|||
}
|
||||
}
|
||||
tqDebugC("consumer:0x%" PRIx64 " process poll rsp, vgId:%d, offset:%s, blocks:%d, rows:%" PRId64
|
||||
", vg total:%" PRId64 ", total:%" PRId64 ",QID:0x%" PRIx64,
|
||||
", vg total:%" PRId64 ", total:%" PRId64 ", QID:0x%" PRIx64,
|
||||
tmq->consumerId, pVg->vgId, buf, pRspObj->dataRsp.blockNum, numOfRows, pVg->numOfRows, tmq->totalRows,
|
||||
pollRspWrapper->reqId);
|
||||
}
|
||||
|
@ -2575,22 +2530,14 @@ END:
|
|||
}
|
||||
|
||||
static void* tmqHandleAllRsp(tmq_t* tmq) {
|
||||
tqDebugC("consumer:0x%" PRIx64 " start to handle the rsp, total:%d", tmq->consumerId, taosQallItemSize(tmq->qall));
|
||||
tqDebugC("consumer:0x%" PRIx64 " start to handle the rsp, total:%d", tmq->consumerId, taosQueueItemSize(tmq->mqueue));
|
||||
|
||||
int32_t code = 0;
|
||||
void* returnVal = NULL;
|
||||
while (1) {
|
||||
SMqRspWrapper* pRspWrapper = NULL;
|
||||
if (taosGetQitem(tmq->qall, (void**)&pRspWrapper) == 0) {
|
||||
code = taosReadAllQitems(tmq->mqueue, tmq->qall);
|
||||
if (code == 0){
|
||||
goto END;
|
||||
}
|
||||
code = taosGetQitem(tmq->qall, (void**)&pRspWrapper);
|
||||
if (code == 0) {
|
||||
goto END;
|
||||
}
|
||||
}
|
||||
taosReadQitem(tmq->mqueue, (void**)&pRspWrapper);
|
||||
if (pRspWrapper == NULL) {break;}
|
||||
|
||||
tqDebugC("consumer:0x%" PRIx64 " handle rsp, type:%s", tmq->consumerId, tmqMsgTypeStr[pRspWrapper->tmqRspType]);
|
||||
if (pRspWrapper->code != 0) {
|
||||
|
@ -2629,9 +2576,6 @@ TAOS_RES* tmq_consumer_poll(tmq_t* tmq, int64_t timeout) {
|
|||
code = tmqHandleAllDelayedTask(tmq);
|
||||
TSDB_CHECK_CODE(code, lino, END);
|
||||
|
||||
code = tmqPollImpl(tmq);
|
||||
TSDB_CHECK_CODE(code, lino, END);
|
||||
|
||||
rspObj = tmqHandleAllRsp(tmq);
|
||||
if (rspObj) {
|
||||
tqDebugC("consumer:0x%" PRIx64 " return rsp %p", tmq->consumerId, rspObj);
|
||||
|
@ -2640,11 +2584,14 @@ TAOS_RES* tmq_consumer_poll(tmq_t* tmq, int64_t timeout) {
|
|||
code = terrno;
|
||||
TSDB_CHECK_CODE(code, lino, END);
|
||||
|
||||
code = tmqPollImpl(tmq);
|
||||
TSDB_CHECK_CODE(code, lino, END);
|
||||
|
||||
if (timeout >= 0) {
|
||||
int64_t currentTime = taosGetTimestampMs();
|
||||
int64_t elapsedTime = currentTime - startTime;
|
||||
TSDB_CHECK_CONDITION(elapsedTime <= timeout && elapsedTime >= 0, code, lino, END, 0);
|
||||
(void)tsem2_timewait(&tmq->rspSem, (timeout - elapsedTime));
|
||||
TSDB_CHECK_CONDITION(elapsedTime < timeout && elapsedTime >= 0, code, lino, END, 0);
|
||||
} else {
|
||||
(void)tsem2_timewait(&tmq->rspSem, 1000);
|
||||
}
|
||||
|
@ -3551,7 +3498,7 @@ int32_t tmq_get_topic_assignment(tmq_t* tmq, const char* pTopicName, tmq_topic_a
|
|||
char offsetFormatBuf[TSDB_OFFSET_LEN] = {0};
|
||||
tFormatOffset(offsetFormatBuf, tListLen(offsetFormatBuf), &pClientVg->offsetInfo.beginOffset);
|
||||
|
||||
tqInfoC("consumer:0x%" PRIx64 " %s retrieve wal info vgId:%d, epoch %d, req:%s,QID:0x%" PRIx64, tmq->consumerId,
|
||||
tqInfoC("consumer:0x%" PRIx64 " %s retrieve wal info vgId:%d, epoch %d, req:%s, QID:0x%" PRIx64, tmq->consumerId,
|
||||
pTopic->topicName, pClientVg->vgId, tmq->epoch, offsetFormatBuf, req.reqId);
|
||||
code = asyncSendMsgToServer(tmq->pTscObj->pAppInfo->pTransporter, &pClientVg->epSet, NULL, sendInfo);
|
||||
if (code != 0) {
|
||||
|
|
|
@ -184,7 +184,7 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_tmqConsumerNewIm
|
|||
}
|
||||
tmq_t *tmq = tmq_consumer_new((tmq_conf_t *)conf, msg, len);
|
||||
if (strlen(msg) > 0) {
|
||||
jniError("jobj:%p, config:%p, tmq create consumer error: %s", jobj, conf, msg);
|
||||
jniError("jobj:%p, config:%p, tmq create consumer error:%s", jobj, conf, msg);
|
||||
(*env)->CallVoidMethod(env, jconsumer, g_createConsumerErrorCallback, (*env)->NewStringUTF(env, msg));
|
||||
taosMemoryFreeClear(msg);
|
||||
return TMQ_CONSUMER_CREATE_ERROR;
|
||||
|
@ -264,7 +264,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_tmqSubscriptionIm
|
|||
int32_t res = tmq_subscription((tmq_t *)tmq, &topicList);
|
||||
if (res != JNI_SUCCESS) {
|
||||
tmq_list_destroy(topicList);
|
||||
jniError("jobj:%p, tmq:%p, tmq get subscription error: %s", jobj, tmq, tmq_err2str(res));
|
||||
jniError("jobj:%p, tmq:%p, tmq get subscription error:%s", jobj, tmq, tmq_err2str(res));
|
||||
return (jint)res;
|
||||
}
|
||||
|
||||
|
|
|
@ -1926,7 +1926,7 @@ TEST(stmt2Case, async_order) {
|
|||
auto start_time = std::chrono::steady_clock::now();
|
||||
while (!stop_task) {
|
||||
auto elapsed_time = std::chrono::steady_clock::now() - start_time;
|
||||
if (std::chrono::duration_cast<std::chrono::seconds>(elapsed_time).count() > 60) {
|
||||
if (std::chrono::duration_cast<std::chrono::seconds>(elapsed_time).count() > 100) {
|
||||
FAIL() << "Test[stmt2_async_test] timed out";
|
||||
t.detach();
|
||||
break;
|
||||
|
|
|
@ -1982,15 +1982,15 @@ _exit:
|
|||
|
||||
static int32_t taosCheckGlobalCfg() {
|
||||
uint32_t ipv4 = 0;
|
||||
uInfo("start to check global tsLocalFqdn:%s, tsServerPort:%u", tsLocalFqdn, tsServerPort);
|
||||
uInfo("check global fqdn:%s and port:%u", tsLocalFqdn, tsServerPort);
|
||||
int32_t code = taosGetIpv4FromFqdn(tsLocalFqdn, &ipv4);
|
||||
if (code) {
|
||||
uError("failed to get ip from fqdn:%s since %s, dnode can not be initialized", tsLocalFqdn, tstrerror(code));
|
||||
uError("failed to get ip from fqdn:%s since %s, can not be initialized", tsLocalFqdn, tstrerror(code));
|
||||
TAOS_RETURN(TSDB_CODE_RPC_FQDN_ERROR);
|
||||
}
|
||||
|
||||
if (tsServerPort <= 0) {
|
||||
uError("invalid server port:%u, dnode can not be initialized", tsServerPort);
|
||||
uError("invalid server port:%u, can not be initialized", tsServerPort);
|
||||
TAOS_RETURN(TSDB_CODE_RPC_FQDN_ERROR);
|
||||
}
|
||||
|
||||
|
@ -2829,7 +2829,7 @@ static void taosCheckAndSetDebugFlag(int32_t *pFlagPtr, char *name, int32_t flag
|
|||
if (code != TSDB_CODE_CFG_NOT_FOUND) {
|
||||
uError("failed to set flag %s to %d, since:%s", name, flag, tstrerror(code));
|
||||
} else {
|
||||
uDebug("failed to set flag %s to %d, since:%s", name, flag, tstrerror(code));
|
||||
uTrace("failed to set flag %s to %d, since:%s", name, flag, tstrerror(code));
|
||||
}
|
||||
}
|
||||
return;
|
||||
|
|
|
@ -39,7 +39,7 @@ static void dmUpdateDnodeCfg(SDnodeMgmt *pMgmt, SDnodeCfg *pCfg) {
|
|||
auditSetDnodeId(pCfg->dnodeId);
|
||||
code = dmWriteEps(pMgmt->pData);
|
||||
if (code != 0) {
|
||||
dInfo("failed to set local info, dnodeId:%d clusterId:%" PRId64 " reason:%s", pCfg->dnodeId, pCfg->clusterId,
|
||||
dInfo("failed to set local info, dnodeId:%d clusterId:0x%" PRIx64 " reason:%s", pCfg->dnodeId, pCfg->clusterId,
|
||||
tstrerror(code));
|
||||
}
|
||||
(void)taosThreadRwlockUnlock(&pMgmt->pData->lock);
|
||||
|
|
|
@ -307,7 +307,7 @@ static void *dmCrashReportThreadFp(void *param) {
|
|||
truncateFile = true;
|
||||
}
|
||||
} else {
|
||||
dDebug("no crash info");
|
||||
dInfo("no crash info was found");
|
||||
}
|
||||
|
||||
taosMemoryFree(pMsg);
|
||||
|
|
|
@ -34,7 +34,7 @@ static int32_t mmRequire(const SMgmtInputOpt *pInput, bool *required) {
|
|||
if (!option.deploy) {
|
||||
*required = mmDeployRequired(pInput);
|
||||
if (*required) {
|
||||
dInfo("deploy mnode required. dnodeId:%d<=0, clusterId:%" PRId64 "<=0, localEp:%s==firstEp",
|
||||
dInfo("deploy mnode required. dnodeId:%d<=0, clusterId:0x%" PRIx64 "<=0, localEp:%s==firstEp",
|
||||
pInput->pData->dnodeId, pInput->pData->clusterId, tsLocalEp);
|
||||
}
|
||||
} else {
|
||||
|
|
|
@ -49,9 +49,9 @@ extern "C" {
|
|||
#define dFatal(...) { if (dDebugFlag & DEBUG_FATAL) { taosPrintLog("DND FATAL ", DEBUG_FATAL, 255, __VA_ARGS__); }}
|
||||
#define dError(...) { if (dDebugFlag & DEBUG_ERROR) { taosPrintLog("DND ERROR ", DEBUG_ERROR, 255, __VA_ARGS__); }}
|
||||
#define dWarn(...) { if (dDebugFlag & DEBUG_WARN) { taosPrintLog("DND WARN ", DEBUG_WARN, 255, __VA_ARGS__); }}
|
||||
#define dInfo(...) { if (dDebugFlag & DEBUG_INFO) { taosPrintLog("DND ", DEBUG_INFO, 255, __VA_ARGS__); }}
|
||||
#define dDebug(...) { if (dDebugFlag & DEBUG_DEBUG) { taosPrintLog("DND ", DEBUG_DEBUG, dDebugFlag, __VA_ARGS__); }}
|
||||
#define dTrace(...) { if (dDebugFlag & DEBUG_TRACE) { taosPrintLog("DND ", DEBUG_TRACE, dDebugFlag, __VA_ARGS__); }}
|
||||
#define dInfo(...) { if (dDebugFlag & DEBUG_INFO) { taosPrintLog("DND INFO ", DEBUG_INFO, 255, __VA_ARGS__); }}
|
||||
#define dDebug(...) { if (dDebugFlag & DEBUG_DEBUG) { taosPrintLog("DND DEBUG ", DEBUG_DEBUG, dDebugFlag, __VA_ARGS__); }}
|
||||
#define dTrace(...) { if (dDebugFlag & DEBUG_TRACE) { taosPrintLog("DND TRACE ", DEBUG_TRACE, dDebugFlag, __VA_ARGS__); }}
|
||||
|
||||
#define encryptDebug(...) { \
|
||||
if (toLogFile) { \
|
||||
|
@ -83,12 +83,12 @@ extern "C" {
|
|||
}\
|
||||
}
|
||||
|
||||
#define dGFatal(param, ...) {if (dDebugFlag & DEBUG_FATAL) { char buf[40] = {0}; TRACE_TO_STR(trace, buf); dFatal(param ",QID:%s", __VA_ARGS__, buf);}}
|
||||
#define dGError(param, ...) {if (dDebugFlag & DEBUG_ERROR) { char buf[40] = {0}; TRACE_TO_STR(trace, buf); dError(param ",QID:%s", __VA_ARGS__, buf);}}
|
||||
#define dGWarn(param, ...) {if (dDebugFlag & DEBUG_WARN) { char buf[40] = {0}; TRACE_TO_STR(trace, buf); dWarn(param ",QID:%s", __VA_ARGS__, buf);}}
|
||||
#define dGInfo(param, ...) {if (dDebugFlag & DEBUG_INFO) { char buf[40] = {0}; TRACE_TO_STR(trace, buf); dInfo(param ",QID:%s", __VA_ARGS__, buf);}}
|
||||
#define dGDebug(param, ...) {if (dDebugFlag & DEBUG_DEBUG) { char buf[40] = {0}; TRACE_TO_STR(trace, buf); dDebug(param ",QID:%s", __VA_ARGS__, buf);}}
|
||||
#define dGTrace(param, ...) {if (dDebugFlag & DEBUG_TRACE) { char buf[40] = {0}; TRACE_TO_STR(trace, buf); dTrace(param ",QID:%s", __VA_ARGS__, buf);}}
|
||||
#define dGFatal(param, ...) {if (dDebugFlag & DEBUG_FATAL) { char buf[40] = {0}; TRACE_TO_STR(trace, buf); dFatal(param ", QID:%s", __VA_ARGS__, buf);}}
|
||||
#define dGError(param, ...) {if (dDebugFlag & DEBUG_ERROR) { char buf[40] = {0}; TRACE_TO_STR(trace, buf); dError(param ", QID:%s", __VA_ARGS__, buf);}}
|
||||
#define dGWarn(param, ...) {if (dDebugFlag & DEBUG_WARN) { char buf[40] = {0}; TRACE_TO_STR(trace, buf); dWarn(param ", QID:%s", __VA_ARGS__, buf);}}
|
||||
#define dGInfo(param, ...) {if (dDebugFlag & DEBUG_INFO) { char buf[40] = {0}; TRACE_TO_STR(trace, buf); dInfo(param ", QID:%s", __VA_ARGS__, buf);}}
|
||||
#define dGDebug(param, ...) {if (dDebugFlag & DEBUG_DEBUG) { char buf[40] = {0}; TRACE_TO_STR(trace, buf); dDebug(param ", QID:%s", __VA_ARGS__, buf);}}
|
||||
#define dGTrace(param, ...) {if (dDebugFlag & DEBUG_TRACE) { char buf[40] = {0}; TRACE_TO_STR(trace, buf); dTrace(param ", QID:%s", __VA_ARGS__, buf);}}
|
||||
|
||||
// clang-format on
|
||||
|
||||
|
|
|
@ -38,16 +38,16 @@ extern "C" {
|
|||
#define mFatal(...) { if (mDebugFlag & DEBUG_FATAL) { taosPrintLog("MND FATAL ", DEBUG_FATAL, 255, __VA_ARGS__); }}
|
||||
#define mError(...) { if (mDebugFlag & DEBUG_ERROR) { taosPrintLog("MND ERROR ", DEBUG_ERROR, 255, __VA_ARGS__); }}
|
||||
#define mWarn(...) { if (mDebugFlag & DEBUG_WARN) { taosPrintLog("MND WARN ", DEBUG_WARN, 255, __VA_ARGS__); }}
|
||||
#define mInfo(...) { if (mDebugFlag & DEBUG_INFO) { taosPrintLog("MND ", DEBUG_INFO, 255, __VA_ARGS__); }}
|
||||
#define mDebug(...) { if (mDebugFlag & DEBUG_DEBUG) { taosPrintLog("MND ", DEBUG_DEBUG, mDebugFlag, __VA_ARGS__); }}
|
||||
#define mTrace(...) { if (mDebugFlag & DEBUG_TRACE) { taosPrintLog("MND ", DEBUG_TRACE, mDebugFlag, __VA_ARGS__); }}
|
||||
#define mInfo(...) { if (mDebugFlag & DEBUG_INFO) { taosPrintLog("MND INFO ", DEBUG_INFO, 255, __VA_ARGS__); }}
|
||||
#define mDebug(...) { if (mDebugFlag & DEBUG_DEBUG) { taosPrintLog("MND DEBUG ", DEBUG_DEBUG, mDebugFlag, __VA_ARGS__); }}
|
||||
#define mTrace(...) { if (mDebugFlag & DEBUG_TRACE) { taosPrintLog("MND TRACE ", DEBUG_TRACE, mDebugFlag, __VA_ARGS__); }}
|
||||
|
||||
#define mGFatal(param, ...) { if (mDebugFlag & DEBUG_FATAL){ char buf[40] = {0}; TRACE_TO_STR(trace, buf); mFatal(param ",QID:%s", __VA_ARGS__, buf);}}
|
||||
#define mGError(param, ...) { if (mDebugFlag & DEBUG_ERROR){ char buf[40] = {0}; TRACE_TO_STR(trace, buf); mError(param ",QID:%s", __VA_ARGS__, buf);}}
|
||||
#define mGWarn(param, ...) { if (mDebugFlag & DEBUG_WARN){ char buf[40] = {0}; TRACE_TO_STR(trace, buf); mWarn (param ",QID:%s", __VA_ARGS__, buf);}}
|
||||
#define mGInfo(param, ...) { if (mDebugFlag & DEBUG_INFO){ char buf[40] = {0}; TRACE_TO_STR(trace, buf); mInfo (param ",QID:%s", __VA_ARGS__, buf);}}
|
||||
#define mGDebug(param, ...) { if (mDebugFlag & DEBUG_DEBUG){ char buf[40] = {0}; TRACE_TO_STR(trace, buf); mDebug(param ",QID:%s", __VA_ARGS__, buf);}}
|
||||
#define mGTrace(param, ...) { if (mDebugFlag & DEBUG_TRACE){ char buf[40] = {0}; TRACE_TO_STR(trace, buf); mTrace(param ",QID:%s", __VA_ARGS__, buf);}}
|
||||
#define mGFatal(param, ...) { if (mDebugFlag & DEBUG_FATAL){ char buf[40] = {0}; TRACE_TO_STR(trace, buf); mFatal(param ", QID:%s", __VA_ARGS__, buf);}}
|
||||
#define mGError(param, ...) { if (mDebugFlag & DEBUG_ERROR){ char buf[40] = {0}; TRACE_TO_STR(trace, buf); mError(param ", QID:%s", __VA_ARGS__, buf);}}
|
||||
#define mGWarn(param, ...) { if (mDebugFlag & DEBUG_WARN) { char buf[40] = {0}; TRACE_TO_STR(trace, buf); mWarn (param ", QID:%s", __VA_ARGS__, buf);}}
|
||||
#define mGInfo(param, ...) { if (mDebugFlag & DEBUG_INFO) { char buf[40] = {0}; TRACE_TO_STR(trace, buf); mInfo (param ", QID:%s", __VA_ARGS__, buf);}}
|
||||
#define mGDebug(param, ...) { if (mDebugFlag & DEBUG_DEBUG){ char buf[40] = {0}; TRACE_TO_STR(trace, buf); mDebug(param ", QID:%s", __VA_ARGS__, buf);}}
|
||||
#define mGTrace(param, ...) { if (mDebugFlag & DEBUG_TRACE){ char buf[40] = {0}; TRACE_TO_STR(trace, buf); mTrace(param ", QID:%s", __VA_ARGS__, buf);}}
|
||||
// clang-format on
|
||||
|
||||
#define SYSTABLE_SCH_TABLE_NAME_LEN ((TSDB_TABLE_NAME_LEN - 1) + VARSTR_HEADER_SIZE)
|
||||
|
|
|
@ -778,7 +778,7 @@ static int32_t mndProcessKillQueryReq(SRpcMsg *pReq) {
|
|||
uint64_t queryId = 0;
|
||||
char *p = strchr(killReq.queryStrId, ':');
|
||||
if (NULL == p) {
|
||||
mError("invalid query id %s", killReq.queryStrId);
|
||||
mError("invalid QID:%s", killReq.queryStrId);
|
||||
code = TSDB_CODE_MND_INVALID_QUERY_ID;
|
||||
TAOS_RETURN(code);
|
||||
}
|
||||
|
|
|
@ -2887,7 +2887,7 @@ int32_t mndProcessConsensusInTmr(SRpcMsg *pMsg) {
|
|||
if (taosArrayGetSize(pInfo->pTaskList) == 0) {
|
||||
mndClearConsensusRspEntry(pInfo);
|
||||
if (streamId == -1) {
|
||||
mError("streamId is -1, streamId:%" PRIx64" in consensus-checkpointId hashMap, cont", pInfo->streamId);
|
||||
mError("streamId is -1, streamId:%" PRIx64 " in consensus-checkpointId hashMap, cont", pInfo->streamId);
|
||||
}
|
||||
|
||||
void *p = taosArrayPush(pStreamList, &streamId);
|
||||
|
|
|
@ -32,9 +32,9 @@ extern "C" {
|
|||
#define mFatal(...) { if (mDebugFlag & DEBUG_FATAL) { taosPrintLog("MND FATAL ", DEBUG_FATAL, 255, __VA_ARGS__); }}
|
||||
#define mError(...) { if (mDebugFlag & DEBUG_ERROR) { taosPrintLog("MND ERROR ", DEBUG_ERROR, 255, __VA_ARGS__); }}
|
||||
#define mWarn(...) { if (mDebugFlag & DEBUG_WARN) { taosPrintLog("MND WARN ", DEBUG_WARN, 255, __VA_ARGS__); }}
|
||||
#define mInfo(...) { if (mDebugFlag & DEBUG_INFO) { taosPrintLog("MND ", DEBUG_INFO, 255, __VA_ARGS__); }}
|
||||
#define mDebug(...) { if (mDebugFlag & DEBUG_DEBUG) { taosPrintLog("MND ", DEBUG_DEBUG, mDebugFlag, __VA_ARGS__); }}
|
||||
#define mTrace(...) { if (mDebugFlag & DEBUG_TRACE) { taosPrintLog("MND ", DEBUG_TRACE, mDebugFlag, __VA_ARGS__); }}
|
||||
#define mInfo(...) { if (mDebugFlag & DEBUG_INFO) { taosPrintLog("MND INFO ", DEBUG_INFO, 255, __VA_ARGS__); }}
|
||||
#define mDebug(...) { if (mDebugFlag & DEBUG_DEBUG) { taosPrintLog("MND DEBUG ", DEBUG_DEBUG, mDebugFlag, __VA_ARGS__); }}
|
||||
#define mTrace(...) { if (mDebugFlag & DEBUG_TRACE) { taosPrintLog("MND TRACE ", DEBUG_TRACE, mDebugFlag, __VA_ARGS__); }}
|
||||
// clang-format on
|
||||
|
||||
#define SDB_GET_VAL(pData, dataPos, val, pos, func, type) \
|
||||
|
|
|
@ -20,9 +20,9 @@
|
|||
#include "tuuid.h"
|
||||
|
||||
// clang-format off
|
||||
#define sndError(...) do { if (sndDebugFlag & DEBUG_ERROR) {taosPrintLog("SND ERROR ", DEBUG_ERROR, sndDebugFlag, __VA_ARGS__);}} while (0)
|
||||
#define sndError(...) do { if (sndDebugFlag & DEBUG_ERROR) { taosPrintLog("SND ERROR ", DEBUG_ERROR, sndDebugFlag, __VA_ARGS__);}} while (0)
|
||||
#define sndInfo(...) do { if (sndDebugFlag & DEBUG_INFO) { taosPrintLog("SND INFO ", DEBUG_INFO, sndDebugFlag, __VA_ARGS__);}} while (0)
|
||||
#define sndDebug(...) do { if (sndDebugFlag & DEBUG_DEBUG) { taosPrintLog("SND ", DEBUG_DEBUG, sndDebugFlag, __VA_ARGS__);}} while (0)
|
||||
#define sndDebug(...) do { if (sndDebugFlag & DEBUG_DEBUG) { taosPrintLog("SND DEBUG ", DEBUG_DEBUG, sndDebugFlag, __VA_ARGS__);}} while (0)
|
||||
// clang-format on
|
||||
|
||||
int32_t sndBuildStreamTask(SSnode *pSnode, SStreamTask *pTask, int64_t nextProcessVer) {
|
||||
|
|
|
@ -32,9 +32,9 @@ typedef struct SMetaCache SMetaCache;
|
|||
#define metaFatal(...) do { if (metaDebugFlag & DEBUG_FATAL) { taosPrintLog("MTA FATAL ", DEBUG_FATAL, 255, __VA_ARGS__); }} while(0)
|
||||
#define metaError(...) do { if (metaDebugFlag & DEBUG_ERROR) { taosPrintLog("MTA ERROR ", DEBUG_ERROR, 255, __VA_ARGS__); }} while(0)
|
||||
#define metaWarn(...) do { if (metaDebugFlag & DEBUG_WARN) { taosPrintLog("MTA WARN ", DEBUG_WARN, 255, __VA_ARGS__); }} while(0)
|
||||
#define metaInfo(...) do { if (metaDebugFlag & DEBUG_INFO) { taosPrintLog("MTA ", DEBUG_INFO, 255, __VA_ARGS__); }} while(0)
|
||||
#define metaDebug(...) do { if (metaDebugFlag & DEBUG_DEBUG) { taosPrintLog("MTA ", DEBUG_DEBUG, metaDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define metaTrace(...) do { if (metaDebugFlag & DEBUG_TRACE) { taosPrintLog("MTA ", DEBUG_TRACE, metaDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define metaInfo(...) do { if (metaDebugFlag & DEBUG_INFO) { taosPrintLog("MTA INFO ", DEBUG_INFO, 255, __VA_ARGS__); }} while(0)
|
||||
#define metaDebug(...) do { if (metaDebugFlag & DEBUG_DEBUG) { taosPrintLog("MTA DEBUG ", DEBUG_DEBUG, metaDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define metaTrace(...) do { if (metaDebugFlag & DEBUG_TRACE) { taosPrintLog("MTA TRACE ", DEBUG_TRACE, metaDebugFlag, __VA_ARGS__); }} while(0)
|
||||
// clang-format on
|
||||
|
||||
// metaOpen ==================
|
||||
|
|
|
@ -27,9 +27,9 @@ extern "C" {
|
|||
#define smaFatal(...) do { if (smaDebugFlag & DEBUG_FATAL) { taosPrintLog("SMA FATAL ", DEBUG_FATAL, 255, __VA_ARGS__); }} while(0)
|
||||
#define smaError(...) do { if (smaDebugFlag & DEBUG_ERROR) { taosPrintLog("SMA ERROR ", DEBUG_ERROR, 255, __VA_ARGS__); }} while(0)
|
||||
#define smaWarn(...) do { if (smaDebugFlag & DEBUG_WARN) { taosPrintLog("SMA WARN ", DEBUG_WARN, 255, __VA_ARGS__); }} while(0)
|
||||
#define smaInfo(...) do { if (smaDebugFlag & DEBUG_INFO) { taosPrintLog("SMA ", DEBUG_INFO, 255, __VA_ARGS__); }} while(0)
|
||||
#define smaDebug(...) do { if (smaDebugFlag & DEBUG_DEBUG) { taosPrintLog("SMA ", DEBUG_DEBUG, tsdbDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define smaTrace(...) do { if (smaDebugFlag & DEBUG_TRACE) { taosPrintLog("SMA ", DEBUG_TRACE, tsdbDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define smaInfo(...) do { if (smaDebugFlag & DEBUG_INFO) { taosPrintLog("SMA INFO ", DEBUG_INFO, 255, __VA_ARGS__); }} while(0)
|
||||
#define smaDebug(...) do { if (smaDebugFlag & DEBUG_DEBUG) { taosPrintLog("SMA DEBUG ", DEBUG_DEBUG, tsdbDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define smaTrace(...) do { if (smaDebugFlag & DEBUG_TRACE) { taosPrintLog("SMA TRACE ", DEBUG_TRACE, tsdbDebugFlag, __VA_ARGS__); }} while(0)
|
||||
// clang-format on
|
||||
|
||||
#define RSMA_TASK_INFO_HASH_SLOT (8)
|
||||
|
|
|
@ -36,9 +36,9 @@ extern "C" {
|
|||
#define tqFatal(...) do { if (tqDebugFlag & DEBUG_FATAL) { taosPrintLog("TQ FATAL ", DEBUG_FATAL, 255, __VA_ARGS__); }} while(0)
|
||||
#define tqError(...) do { if (tqDebugFlag & DEBUG_ERROR) { taosPrintLog("TQ ERROR ", DEBUG_ERROR, 255, __VA_ARGS__); }} while(0)
|
||||
#define tqWarn(...) do { if (tqDebugFlag & DEBUG_WARN) { taosPrintLog("TQ WARN ", DEBUG_WARN, 255, __VA_ARGS__); }} while(0)
|
||||
#define tqInfo(...) do { if (tqDebugFlag & DEBUG_INFO) { taosPrintLog("TQ ", DEBUG_INFO, 255, __VA_ARGS__); }} while(0)
|
||||
#define tqDebug(...) do { if (tqDebugFlag & DEBUG_DEBUG) { taosPrintLog("TQ ", DEBUG_DEBUG, tqDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define tqTrace(...) do { if (tqDebugFlag & DEBUG_TRACE) { taosPrintLog("TQ ", DEBUG_TRACE, tqDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define tqInfo(...) do { if (tqDebugFlag & DEBUG_INFO) { taosPrintLog("TQ INFO ", DEBUG_INFO, 255, __VA_ARGS__); }} while(0)
|
||||
#define tqDebug(...) do { if (tqDebugFlag & DEBUG_DEBUG) { taosPrintLog("TQ DEBUG ", DEBUG_DEBUG, tqDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define tqTrace(...) do { if (tqDebugFlag & DEBUG_TRACE) { taosPrintLog("TQ TRACE ", DEBUG_TRACE, tqDebugFlag, __VA_ARGS__); }} while(0)
|
||||
// clang-format on
|
||||
|
||||
#define IS_OFFSET_RESET_TYPE(_t) ((_t) < 0)
|
||||
|
|
|
@ -31,9 +31,9 @@ extern "C" {
|
|||
#define tsdbFatal(...) do { if (tsdbDebugFlag & DEBUG_FATAL) { taosPrintLog("TSD FATAL ", DEBUG_FATAL, 255, __VA_ARGS__); }} while(0)
|
||||
#define tsdbError(...) do { if (tsdbDebugFlag & DEBUG_ERROR) { taosPrintLog("TSD ERROR ", DEBUG_ERROR, 255, __VA_ARGS__); }} while(0)
|
||||
#define tsdbWarn(...) do { if (tsdbDebugFlag & DEBUG_WARN) { taosPrintLog("TSD WARN ", DEBUG_WARN, 255, __VA_ARGS__); }} while(0)
|
||||
#define tsdbInfo(...) do { if (tsdbDebugFlag & DEBUG_INFO) { taosPrintLog("TSD ", DEBUG_INFO, 255, __VA_ARGS__); }} while(0)
|
||||
#define tsdbDebug(...) do { if (tsdbDebugFlag & DEBUG_DEBUG) { taosPrintLog("TSD ", DEBUG_DEBUG, tsdbDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define tsdbTrace(...) do { if (tsdbDebugFlag & DEBUG_TRACE) { taosPrintLog("TSD ", DEBUG_TRACE, tsdbDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define tsdbInfo(...) do { if (tsdbDebugFlag & DEBUG_INFO) { taosPrintLog("TSD INFO ", DEBUG_INFO, 255, __VA_ARGS__); }} while(0)
|
||||
#define tsdbDebug(...) do { if (tsdbDebugFlag & DEBUG_DEBUG) { taosPrintLog("TSD DEBUG ", DEBUG_DEBUG, tsdbDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define tsdbTrace(...) do { if (tsdbDebugFlag & DEBUG_TRACE) { taosPrintLog("TSD TRACE ", DEBUG_TRACE, tsdbDebugFlag, __VA_ARGS__); }} while(0)
|
||||
// clang-format on
|
||||
|
||||
typedef struct TSDBROW TSDBROW;
|
||||
|
|
|
@ -28,16 +28,16 @@ extern "C" {
|
|||
#define vFatal(...) do { if (vDebugFlag & DEBUG_FATAL) { taosPrintLog("VND FATAL ", DEBUG_FATAL, 255, __VA_ARGS__); }} while(0)
|
||||
#define vError(...) do { if (vDebugFlag & DEBUG_ERROR) { taosPrintLog("VND ERROR ", DEBUG_ERROR, 255, __VA_ARGS__); }} while(0)
|
||||
#define vWarn(...) do { if (vDebugFlag & DEBUG_WARN) { taosPrintLog("VND WARN ", DEBUG_WARN, 255, __VA_ARGS__); }} while(0)
|
||||
#define vInfo(...) do { if (vDebugFlag & DEBUG_INFO) { taosPrintLog("VND ", DEBUG_INFO, 255, __VA_ARGS__); }} while(0)
|
||||
#define vDebug(...) do { if (vDebugFlag & DEBUG_DEBUG) { taosPrintLog("VND ", DEBUG_DEBUG, vDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define vTrace(...) do { if (vDebugFlag & DEBUG_TRACE) { taosPrintLog("VND ", DEBUG_TRACE, vDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define vInfo(...) do { if (vDebugFlag & DEBUG_INFO) { taosPrintLog("VND INFO ", DEBUG_INFO, 255, __VA_ARGS__); }} while(0)
|
||||
#define vDebug(...) do { if (vDebugFlag & DEBUG_DEBUG) { taosPrintLog("VND DEBUG ", DEBUG_DEBUG, vDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define vTrace(...) do { if (vDebugFlag & DEBUG_TRACE) { taosPrintLog("VND TRACE ", DEBUG_TRACE, vDebugFlag, __VA_ARGS__); }} while(0)
|
||||
|
||||
#define vGTrace(param, ...) do { if (vDebugFlag & DEBUG_TRACE) { char buf[40] = {0}; TRACE_TO_STR(trace, buf); vTrace(param ",QID:%s", __VA_ARGS__, buf);}} while(0)
|
||||
#define vGFatal(param, ...) do { if (vDebugFlag & DEBUG_FATAL) { char buf[40] = {0}; TRACE_TO_STR(trace, buf); vFatal(param ",QID:%s", __VA_ARGS__, buf);}} while(0)
|
||||
#define vGError(param, ...) do { if (vDebugFlag & DEBUG_ERROR) { char buf[40] = {0}; TRACE_TO_STR(trace, buf); vError(param ",QID:%s", __VA_ARGS__, buf);}} while(0)
|
||||
#define vGWarn(param, ...) do { if (vDebugFlag & DEBUG_WARN) { char buf[40] = {0}; TRACE_TO_STR(trace, buf); vWarn(param ",QID:%s", __VA_ARGS__, buf);}} while(0)
|
||||
#define vGInfo(param, ...) do { if (vDebugFlag & DEBUG_INFO) { char buf[40] = {0}; TRACE_TO_STR(trace, buf); vInfo(param ",QID:%s", __VA_ARGS__, buf);}} while(0)
|
||||
#define vGDebug(param, ...) do { if (vDebugFlag & DEBUG_DEBUG) { char buf[40] = {0}; TRACE_TO_STR(trace, buf); vDebug(param ",QID:%s", __VA_ARGS__, buf);}} while(0)
|
||||
#define vGTrace(param, ...) do { if (vDebugFlag & DEBUG_TRACE) { char buf[40] = {0}; TRACE_TO_STR(trace, buf); vTrace(param ", QID:%s", __VA_ARGS__, buf);}} while(0)
|
||||
#define vGFatal(param, ...) do { if (vDebugFlag & DEBUG_FATAL) { char buf[40] = {0}; TRACE_TO_STR(trace, buf); vFatal(param ", QID:%s", __VA_ARGS__, buf);}} while(0)
|
||||
#define vGError(param, ...) do { if (vDebugFlag & DEBUG_ERROR) { char buf[40] = {0}; TRACE_TO_STR(trace, buf); vError(param ", QID:%s", __VA_ARGS__, buf);}} while(0)
|
||||
#define vGWarn(param, ...) do { if (vDebugFlag & DEBUG_WARN) { char buf[40] = {0}; TRACE_TO_STR(trace, buf); vWarn(param ", QID:%s", __VA_ARGS__, buf);}} while(0)
|
||||
#define vGInfo(param, ...) do { if (vDebugFlag & DEBUG_INFO) { char buf[40] = {0}; TRACE_TO_STR(trace, buf); vInfo(param ", QID:%s", __VA_ARGS__, buf);}} while(0)
|
||||
#define vGDebug(param, ...) do { if (vDebugFlag & DEBUG_DEBUG) { char buf[40] = {0}; TRACE_TO_STR(trace, buf); vDebug(param ", QID:%s", __VA_ARGS__, buf);}} while(0)
|
||||
|
||||
// clang-format on
|
||||
|
||||
|
|
|
@ -1017,20 +1017,20 @@ int32_t tdProcessRSmaSubmit(SSma *pSma, int64_t version, void *pReq, void *pMsg,
|
|||
|
||||
int32_t code = 0;
|
||||
if ((code = atomic_load_32(&SMA_RSMA_STAT(pSma)->execStat))) {
|
||||
smaError("vgId:%d, failed to process rsma submit since invalid exec code: %s", SMA_VID(pSma), tstrerror(code));
|
||||
smaError("vgId:%d, failed to process rsma submit since invalid exec code:%s", SMA_VID(pSma), tstrerror(code));
|
||||
goto _exit;
|
||||
}
|
||||
|
||||
STbUidStore uidStore = {0};
|
||||
|
||||
if ((code = tdFetchSubmitReqSuids(pReq, &uidStore)) < 0) {
|
||||
smaError("vgId:%d, failed to process rsma submit fetch suid since: %s", SMA_VID(pSma), tstrerror(code));
|
||||
smaError("vgId:%d, failed to process rsma submit fetch suid since %s", SMA_VID(pSma), tstrerror(code));
|
||||
goto _exit;
|
||||
}
|
||||
|
||||
if (uidStore.suid != 0) {
|
||||
if ((code = tdExecuteRSmaAsync(pSma, version, pMsg, len, STREAM_INPUT__DATA_SUBMIT, uidStore.suid)) < 0) {
|
||||
smaError("vgId:%d, failed to process rsma submit exec 1 since: %s", SMA_VID(pSma), tstrerror(code));
|
||||
smaError("vgId:%d, failed to process rsma submit exec 1 since %s", SMA_VID(pSma), tstrerror(code));
|
||||
goto _exit;
|
||||
}
|
||||
|
||||
|
@ -1054,13 +1054,13 @@ int32_t tdProcessRSmaDelete(SSma *pSma, int64_t version, void *pReq, void *pMsg,
|
|||
|
||||
int32_t code = 0;
|
||||
if ((code = atomic_load_32(&SMA_RSMA_STAT(pSma)->execStat))) {
|
||||
smaError("vgId:%d, failed to process rsma delete since invalid exec code: %s", SMA_VID(pSma), tstrerror(code));
|
||||
smaError("vgId:%d, failed to process rsma delete since invalid exec code:%s", SMA_VID(pSma), tstrerror(code));
|
||||
goto _exit;
|
||||
}
|
||||
|
||||
SDeleteRes *pDelRes = pReq;
|
||||
if ((code = tdExecuteRSmaAsync(pSma, version, pMsg, len, STREAM_INPUT__REF_DATA_BLOCK, pDelRes->suid)) < 0) {
|
||||
smaError("vgId:%d, failed to process rsma submit exec 1 since: %s", SMA_VID(pSma), tstrerror(code));
|
||||
smaError("vgId:%d, failed to process rsma submit exec 1 since %s", SMA_VID(pSma), tstrerror(code));
|
||||
goto _exit;
|
||||
}
|
||||
_exit:
|
||||
|
@ -1249,7 +1249,7 @@ int32_t tdRSmaPersistExecImpl(SRSmaStat *pRSmaStat, SHashObj *pInfoHash) {
|
|||
if (streamFlushed) {
|
||||
pRSmaInfo->items[i].streamFlushed = 1;
|
||||
if (++nStreamFlushed >= nTaskInfo) {
|
||||
smaInfo("vgId:%d, rsma commit, checkpoint ready, %d us consumed, received/total: %d/%d", TD_VID(pVnode),
|
||||
smaInfo("vgId:%d, rsma commit, checkpoint ready, %d us consumed, received/total:%d/%d", TD_VID(pVnode),
|
||||
nSleep * 10, nStreamFlushed, nTaskInfo);
|
||||
taosHashCancelIterate(pInfoHash, infoHash);
|
||||
goto _checkpoint;
|
||||
|
@ -1260,7 +1260,7 @@ int32_t tdRSmaPersistExecImpl(SRSmaStat *pRSmaStat, SHashObj *pInfoHash) {
|
|||
}
|
||||
taosUsleep(10);
|
||||
++nSleep;
|
||||
smaDebug("vgId:%d, rsma commit, wait for checkpoint ready, %d us elapsed, received/total: %d/%d", TD_VID(pVnode),
|
||||
smaDebug("vgId:%d, rsma commit, wait for checkpoint ready, %d us elapsed, received/total:%d/%d", TD_VID(pVnode),
|
||||
nSleep * 10, nStreamFlushed, nTaskInfo);
|
||||
}
|
||||
} while (0);
|
||||
|
|
|
@ -202,7 +202,7 @@ void tqPushEmptyDataRsp(STqHandle* pHandle, int32_t vgId) {
|
|||
dataRsp.blockNum = 0;
|
||||
char buf[TSDB_OFFSET_LEN] = {0};
|
||||
(void)tFormatOffset(buf, TSDB_OFFSET_LEN, &dataRsp.reqOffset);
|
||||
tqInfo("tqPushEmptyDataRsp to consumer:0x%" PRIx64 " vgId:%d, offset:%s,QID:0x%" PRIx64, req.consumerId, vgId, buf,
|
||||
tqInfo("tqPushEmptyDataRsp to consumer:0x%" PRIx64 " vgId:%d, offset:%s, QID:0x%" PRIx64, req.consumerId, vgId, buf,
|
||||
req.reqId);
|
||||
|
||||
code = tqSendDataRsp(pHandle, pHandle->msg, &req, &dataRsp, TMQ_MSG_TYPE__POLL_DATA_RSP, vgId);
|
||||
|
@ -225,7 +225,7 @@ int32_t tqSendDataRsp(STqHandle* pHandle, const SRpcMsg* pMsg, const SMqPollReq*
|
|||
(void)tFormatOffset(buf1, TSDB_OFFSET_LEN, &(pRsp->reqOffset));
|
||||
(void)tFormatOffset(buf2, TSDB_OFFSET_LEN, &(pRsp->rspOffset));
|
||||
|
||||
tqDebug("tmq poll vgId:%d consumer:0x%" PRIx64 " (epoch %d) start to send rsp, block num:%d, req:%s, rsp:%s,QID:0x%" PRIx64,
|
||||
tqDebug("tmq poll vgId:%d consumer:0x%" PRIx64 " (epoch %d) start to send rsp, block num:%d, req:%s, rsp:%s, QID:0x%" PRIx64,
|
||||
vgId, pReq->consumerId, pReq->epoch, pRsp->blockNum, buf1, buf2, pReq->reqId);
|
||||
|
||||
return tqDoSendDataRsp(&pMsg->info, pRsp, pReq->epoch, pReq->consumerId, type, sver, ever);
|
||||
|
@ -477,7 +477,7 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
|
|||
|
||||
char buf[TSDB_OFFSET_LEN] = {0};
|
||||
(void)tFormatOffset(buf, TSDB_OFFSET_LEN, &reqOffset);
|
||||
tqDebug("tmq poll: consumer:0x%" PRIx64 " (epoch %d), subkey %s, recv poll req vgId:%d, req:%s,QID:0x%" PRIx64,
|
||||
tqDebug("tmq poll: consumer:0x%" PRIx64 " (epoch %d), subkey %s, recv poll req vgId:%d, req:%s, QID:0x%" PRIx64,
|
||||
consumerId, req.epoch, pHandle->subKey, vgId, buf, req.reqId);
|
||||
|
||||
code = tqExtractDataForMq(pTq, pHandle, &req, pMsg);
|
||||
|
|
|
@ -222,12 +222,12 @@ int32_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, uint64_t
|
|||
while (offset <= appliedVer) {
|
||||
if (walFetchHead(pHandle->pWalReader, offset) < 0) {
|
||||
tqDebug("tmq poll: consumer:0x%" PRIx64 ", (epoch %d) vgId:%d offset %" PRId64
|
||||
", no more log to return,QID:0x%" PRIx64 " 0x%" PRIx64,
|
||||
", no more log to return, QID:0x%" PRIx64 " 0x%" PRIx64,
|
||||
pHandle->consumerId, pHandle->epoch, vgId, offset, reqId, id);
|
||||
goto END;
|
||||
}
|
||||
|
||||
tqDebug("vgId:%d, consumer:0x%" PRIx64 " taosx get msg ver %" PRId64 ", type: %s,QID:0x%" PRIx64 " 0x%" PRIx64,
|
||||
tqDebug("vgId:%d, consumer:0x%" PRIx64 " taosx get msg ver %" PRId64 ", type:%s, QID:0x%" PRIx64 " 0x%" PRIx64,
|
||||
vgId, pHandle->consumerId, offset, TMSG_INFO(pHandle->pWalReader->pHead->head.msgType), reqId, id);
|
||||
|
||||
if (pHandle->pWalReader->pHead->head.msgType == TDMT_VND_SUBMIT) {
|
||||
|
|
|
@ -194,10 +194,10 @@ end:
|
|||
char buf[TSDB_OFFSET_LEN] = {0};
|
||||
tFormatOffset(buf, TSDB_OFFSET_LEN, &dataRsp.rspOffset);
|
||||
if (code != 0){
|
||||
tqError("tmq poll: consumer:0x%" PRIx64 ", subkey %s, vgId:%d, rsp block:%d, rsp offset type:%s,QID:0x%" PRIx64 " error msg:%s, line:%d",
|
||||
tqError("tmq poll: consumer:0x%" PRIx64 ", subkey %s, vgId:%d, rsp block:%d, rsp offset type:%s, QID:0x%" PRIx64 " error msg:%s, line:%d",
|
||||
consumerId, pHandle->subKey, vgId, dataRsp.blockNum, buf, pRequest->reqId, tstrerror(code), lino);
|
||||
} else {
|
||||
tqDebug("tmq poll: consumer:0x%" PRIx64 ", subkey %s, vgId:%d, rsp block:%d, rsp offset type:%s,QID:0x%" PRIx64 " success",
|
||||
tqDebug("tmq poll: consumer:0x%" PRIx64 ", subkey %s, vgId:%d, rsp block:%d, rsp offset type:%s, QID:0x%" PRIx64 " success",
|
||||
consumerId, pHandle->subKey, vgId, dataRsp.blockNum, buf, pRequest->reqId);
|
||||
}
|
||||
|
||||
|
|
|
@ -470,7 +470,7 @@ int32_t tqStreamTaskProcessRetrieveReq(SStreamMeta* pMeta, SRpcMsg* pMsg) {
|
|||
}
|
||||
|
||||
// enqueue
|
||||
tqDebug("s-task:%s (vgId:%d level:%d) recv retrieve req from task:0x%x(vgId:%d),QID:0x%" PRIx64, pTask->id.idStr,
|
||||
tqDebug("s-task:%s (vgId:%d level:%d) recv retrieve req from task:0x%x(vgId:%d), QID:0x%" PRIx64, pTask->id.idStr,
|
||||
pTask->pMeta->vgId, pTask->info.taskLevel, req.srcTaskId, req.srcNodeId, req.reqId);
|
||||
|
||||
// if task is in ck status, set current ck failed
|
||||
|
|
|
@ -249,14 +249,14 @@ int32_t tsdbTFileObjRef(STFileObj *fobj) {
|
|||
(void)taosThreadMutexLock(&fobj->mutex);
|
||||
|
||||
if (fobj->ref <= 0 || fobj->state != TSDB_FSTATE_LIVE) {
|
||||
tsdbError("file %s, fobj:%p ref %d", fobj->fname, fobj, fobj->ref);
|
||||
tsdbError("file %s, fobj:%p ref:%d", fobj->fname, fobj, fobj->ref);
|
||||
(void)taosThreadMutexUnlock(&fobj->mutex);
|
||||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
|
||||
nRef = ++fobj->ref;
|
||||
(void)taosThreadMutexUnlock(&fobj->mutex);
|
||||
tsdbTrace("ref file %s, fobj:%p ref %d", fobj->fname, fobj, nRef);
|
||||
tsdbTrace("ref file %s, fobj:%p ref:%d", fobj->fname, fobj, nRef);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -266,11 +266,11 @@ int32_t tsdbTFileObjUnref(STFileObj *fobj) {
|
|||
(void)taosThreadMutexUnlock(&fobj->mutex);
|
||||
|
||||
if (nRef < 0) {
|
||||
tsdbError("file %s, fobj:%p ref %d", fobj->fname, fobj, nRef);
|
||||
tsdbError("file %s, fobj:%p ref:%d", fobj->fname, fobj, nRef);
|
||||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
|
||||
tsdbTrace("unref file %s, fobj:%p ref %d", fobj->fname, fobj, nRef);
|
||||
tsdbTrace("unref file %s, fobj:%p ref:%d", fobj->fname, fobj, nRef);
|
||||
if (nRef == 0) {
|
||||
if (fobj->state == TSDB_FSTATE_DEAD) {
|
||||
tsdbRemoveFile(fobj->fname);
|
||||
|
@ -335,14 +335,14 @@ static void tsdbTFileObjRemoveLC(STFileObj *fobj, bool remove_all) {
|
|||
int32_t tsdbTFileObjRemove(STFileObj *fobj) {
|
||||
(void)taosThreadMutexLock(&fobj->mutex);
|
||||
if (fobj->state != TSDB_FSTATE_LIVE || fobj->ref <= 0) {
|
||||
tsdbError("file %s, fobj:%p ref %d", fobj->fname, fobj, fobj->ref);
|
||||
tsdbError("file %s, fobj:%p ref:%d", fobj->fname, fobj, fobj->ref);
|
||||
(void)taosThreadMutexUnlock(&fobj->mutex);
|
||||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
fobj->state = TSDB_FSTATE_DEAD;
|
||||
int32_t nRef = --fobj->ref;
|
||||
(void)taosThreadMutexUnlock(&fobj->mutex);
|
||||
tsdbTrace("remove unref file %s, fobj:%p ref %d", fobj->fname, fobj, nRef);
|
||||
tsdbTrace("remove unref file %s, fobj:%p ref:%d", fobj->fname, fobj, nRef);
|
||||
if (nRef == 0) {
|
||||
tsdbTFileObjRemoveLC(fobj, true);
|
||||
taosMemoryFree(fobj);
|
||||
|
@ -355,14 +355,14 @@ int32_t tsdbTFileObjRemoveUpdateLC(STFileObj *fobj) {
|
|||
|
||||
if (fobj->state != TSDB_FSTATE_LIVE || fobj->ref <= 0) {
|
||||
(void)taosThreadMutexUnlock(&fobj->mutex);
|
||||
tsdbError("file %s, fobj:%p ref %d", fobj->fname, fobj, fobj->ref);
|
||||
tsdbError("file %s, fobj:%p ref:%d", fobj->fname, fobj, fobj->ref);
|
||||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
|
||||
fobj->state = TSDB_FSTATE_DEAD;
|
||||
int32_t nRef = --fobj->ref;
|
||||
(void)taosThreadMutexUnlock(&fobj->mutex);
|
||||
tsdbTrace("remove unref file %s, fobj:%p ref %d", fobj->fname, fobj, nRef);
|
||||
tsdbTrace("remove unref file %s, fobj:%p ref:%d", fobj->fname, fobj, nRef);
|
||||
if (nRef == 0) {
|
||||
tsdbTFileObjRemoveLC(fobj, false);
|
||||
taosMemoryFree(fobj);
|
||||
|
|
|
@ -230,7 +230,7 @@ static int32_t setColumnIdSlotList(SBlockLoadSuppInfo* pSupInfo, SColumnInfo* pC
|
|||
if (IS_VAR_DATA_TYPE(pCols[i].type)) {
|
||||
pSupInfo->buildBuf[i] = taosMemoryMalloc(pCols[i].bytes);
|
||||
if (pSupInfo->buildBuf[i] == NULL) {
|
||||
tsdbError("failed to prepare memory for set columnId slot list, size:%d, code: %s", pCols[i].bytes,
|
||||
tsdbError("failed to prepare memory for set columnId slot list, size:%d, code:%s", pCols[i].bytes,
|
||||
tstrerror(terrno));
|
||||
}
|
||||
TSDB_CHECK_NULL(pSupInfo->buildBuf[i], code, lino, _end, terrno);
|
||||
|
|
|
@ -30,9 +30,9 @@ extern "C" {
|
|||
#define azFatal(...) { if (azDebugFlag & DEBUG_FATAL) { taosPrintLog("AZR FATAL ", DEBUG_FATAL, 255, __VA_ARGS__); }}
|
||||
#define azError(...) { if (azDebugFlag & DEBUG_ERROR) { taosPrintLog("AZR ERROR ", DEBUG_ERROR, 255, __VA_ARGS__); }}
|
||||
#define azWarn(...) { if (azDebugFlag & DEBUG_WARN) { taosPrintLog("AZR WARN ", DEBUG_WARN, 255, __VA_ARGS__); }}
|
||||
#define azInfo(...) { if (azDebugFlag & DEBUG_INFO) { taosPrintLog("AZR ", DEBUG_INFO, 255, __VA_ARGS__); }}
|
||||
#define azDebug(...) { if (azDebugFlag & DEBUG_DEBUG) { taosPrintLog("AZR ", DEBUG_DEBUG, azDebugFlag, __VA_ARGS__); }}
|
||||
#define azTrace(...) { if (azDebugFlag & DEBUG_TRACE) { taosPrintLog("AZR ", DEBUG_TRACE, azDebugFlag, __VA_ARGS__); }}
|
||||
#define azInfo(...) { if (azDebugFlag & DEBUG_INFO) { taosPrintLog("AZR INFO ", DEBUG_INFO, 255, __VA_ARGS__); }}
|
||||
#define azDebug(...) { if (azDebugFlag & DEBUG_DEBUG) { taosPrintLog("AZR DEBUG ", DEBUG_DEBUG, azDebugFlag, __VA_ARGS__); }}
|
||||
#define azTrace(...) { if (azDebugFlag & DEBUG_TRACE) { taosPrintLog("AZR TRACE ", DEBUG_TRACE, azDebugFlag, __VA_ARGS__); }}
|
||||
// clang-format on
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
|
@ -827,19 +827,19 @@ typedef struct SCtgCacheItemInfo {
|
|||
#define CTG_CACHE_OVERFLOW(_csize, _maxsize) ((_maxsize >= 0) ? ((_csize) >= (_maxsize)*1048576L * 0.9) : false)
|
||||
#define CTG_CACHE_LOW(_csize, _maxsize) ((_maxsize >= 0) ? ((_csize) <= (_maxsize)*1048576L * 0.75) : true)
|
||||
|
||||
#define ctgFatal(param, ...) qFatal("CTG:%p " param, pCtg, __VA_ARGS__)
|
||||
#define ctgError(param, ...) qError("CTG:%p " param, pCtg, __VA_ARGS__)
|
||||
#define ctgWarn(param, ...) qWarn("CTG:%p " param, pCtg, __VA_ARGS__)
|
||||
#define ctgInfo(param, ...) qInfo("CTG:%p " param, pCtg, __VA_ARGS__)
|
||||
#define ctgDebug(param, ...) qDebug("CTG:%p " param, pCtg, __VA_ARGS__)
|
||||
#define ctgTrace(param, ...) qTrace("CTG:%p " param, pCtg, __VA_ARGS__)
|
||||
#define ctgFatal(param, ...) qFatal("CTG:%p, " param, pCtg, __VA_ARGS__)
|
||||
#define ctgError(param, ...) qError("CTG:%p, " param, pCtg, __VA_ARGS__)
|
||||
#define ctgWarn(param, ...) qWarn ("CTG:%p, " param, pCtg, __VA_ARGS__)
|
||||
#define ctgInfo(param, ...) qInfo ("CTG:%p, " param, pCtg, __VA_ARGS__)
|
||||
#define ctgDebug(param, ...) qDebug("CTG:%p, " param, pCtg, __VA_ARGS__)
|
||||
#define ctgTrace(param, ...) qTrace("CTG:%p, " param, pCtg, __VA_ARGS__)
|
||||
|
||||
#define ctgTaskFatal(param, ...) qFatal("QID:%" PRIx64 " CTG:%p " param, pTask->pJob->queryId, pCtg, __VA_ARGS__)
|
||||
#define ctgTaskError(param, ...) qError("QID:%" PRIx64 " CTG:%p " param, pTask->pJob->queryId, pCtg, __VA_ARGS__)
|
||||
#define ctgTaskWarn(param, ...) qWarn("QID:%" PRIx64 " CTG:%p " param, pTask->pJob->queryId, pCtg, __VA_ARGS__)
|
||||
#define ctgTaskInfo(param, ...) qInfo("QID:%" PRIx64 " CTG:%p " param, pTask->pJob->queryId, pCtg, __VA_ARGS__)
|
||||
#define ctgTaskDebug(param, ...) qDebug("QID:%" PRIx64 " CTG:%p " param, pTask->pJob->queryId, pCtg, __VA_ARGS__)
|
||||
#define ctgTaskTrace(param, ...) qTrace("QID:%" PRIx64 " CTG:%p " param, pTask->pJob->queryId, pCtg, __VA_ARGS__)
|
||||
#define ctgTaskFatal(param, ...) qFatal("QID:%" PRIx64 ", CTG:%p, " param, pTask->pJob->queryId, pCtg, __VA_ARGS__)
|
||||
#define ctgTaskError(param, ...) qError("QID:%" PRIx64 ", CTG:%p, " param, pTask->pJob->queryId, pCtg, __VA_ARGS__)
|
||||
#define ctgTaskWarn(param, ...) qWarn ("QID:%" PRIx64 ", CTG:%p, " param, pTask->pJob->queryId, pCtg, __VA_ARGS__)
|
||||
#define ctgTaskInfo(param, ...) qInfo ("QID:%" PRIx64 ", CTG:%p, " param, pTask->pJob->queryId, pCtg, __VA_ARGS__)
|
||||
#define ctgTaskDebug(param, ...) qDebug("QID:%" PRIx64 ", CTG:%p, " param, pTask->pJob->queryId, pCtg, __VA_ARGS__)
|
||||
#define ctgTaskTrace(param, ...) qTrace("QID:%" PRIx64 ", CTG:%p, " param, pTask->pJob->queryId, pCtg, __VA_ARGS__)
|
||||
|
||||
#define CTG_LOCK_DEBUG(...) \
|
||||
do { \
|
||||
|
|
|
@ -85,7 +85,7 @@ int32_t ctgRefreshDBVgInfo(SCatalog* pCtg, SRequestConnInfo* pConn, const char*
|
|||
code = ctgGetDBVgInfoFromMnode(pCtg, pConn, &input, &DbOut, NULL);
|
||||
if (code) {
|
||||
if (CTG_DB_NOT_EXIST(code) && (NULL != dbCache)) {
|
||||
ctgDebug("db no longer exist, dbFName:%s, dbId:0x%" PRIx64, input.db, input.dbId);
|
||||
ctgDebug("db:%s, db no longer exist, dbId:0x%" PRIx64, input.db, input.dbId);
|
||||
CTG_ERR_RET(ctgDropDbCacheEnqueue(pCtg, input.db, input.dbId));
|
||||
}
|
||||
|
||||
|
@ -114,11 +114,11 @@ int32_t ctgRefreshTbMeta(SCatalog* pCtg, SRequestConnInfo* pConn, SCtgTbMetaCtx*
|
|||
}
|
||||
|
||||
if (CTG_FLAG_IS_SYS_DB(ctx->flag)) {
|
||||
ctgDebug("will refresh tbmeta, supposed in information_schema, tbName:%s", tNameGetTableName(ctx->pName));
|
||||
ctgDebug("tb:%s, will refresh tbmeta, supposed in information_schema", tNameGetTableName(ctx->pName));
|
||||
|
||||
CTG_ERR_JRET(ctgGetTbMetaFromMnodeImpl(pCtg, pConn, (char*)ctx->pName->dbname, (char*)ctx->pName->tname, output, NULL));
|
||||
} else if (CTG_FLAG_IS_STB(ctx->flag)) {
|
||||
ctgDebug("will refresh tbmeta, supposed to be stb, tbName:%s", tNameGetTableName(ctx->pName));
|
||||
ctgDebug("tb:%s, will refresh tbmeta, supposed to be stb", tNameGetTableName(ctx->pName));
|
||||
|
||||
// if get from mnode failed, will not try vnode
|
||||
CTG_ERR_JRET(ctgGetTbMetaFromMnode(pCtg, pConn, ctx->pName, output, NULL));
|
||||
|
@ -127,13 +127,13 @@ int32_t ctgRefreshTbMeta(SCatalog* pCtg, SRequestConnInfo* pConn, SCtgTbMetaCtx*
|
|||
CTG_ERR_JRET(ctgGetTbMetaFromVnode(pCtg, pConn, ctx->pName, &vgroupInfo, output, NULL));
|
||||
}
|
||||
} else {
|
||||
ctgDebug("will refresh tbmeta, not supposed to be stb, tbName:%s, flag:%d", tNameGetTableName(ctx->pName), ctx->flag);
|
||||
ctgDebug("tb:%s, will refresh tbmeta, not supposed to be stb, flag:%d", tNameGetTableName(ctx->pName), ctx->flag);
|
||||
|
||||
// if get from vnode failed or no table meta, will not try mnode
|
||||
CTG_ERR_JRET(ctgGetTbMetaFromVnode(pCtg, pConn, ctx->pName, &vgroupInfo, output, NULL));
|
||||
|
||||
if (CTG_IS_META_TABLE(output->metaType) && TSDB_SUPER_TABLE == output->tbMeta->tableType) {
|
||||
ctgDebug("will continue to refresh tbmeta since got stb, tbName:%s", tNameGetTableName(ctx->pName));
|
||||
ctgDebug("tb:%s, will continue to refresh tbmeta since got stb", tNameGetTableName(ctx->pName));
|
||||
|
||||
taosMemoryFreeClear(output->tbMeta);
|
||||
|
||||
|
@ -163,16 +163,15 @@ int32_t ctgRefreshTbMeta(SCatalog* pCtg, SRequestConnInfo* pConn, SCtgTbMetaCtx*
|
|||
}
|
||||
|
||||
if (CTG_IS_META_NULL(output->metaType)) {
|
||||
ctgError("no tbmeta got, tbName:%s", tNameGetTableName(ctx->pName));
|
||||
ctgError("tb:%s, no tbmeta got", tNameGetTableName(ctx->pName));
|
||||
CTG_ERR_JRET(ctgRemoveTbMetaFromCache(pCtg, ctx->pName, false));
|
||||
CTG_ERR_JRET(CTG_ERR_CODE_TABLE_NOT_EXIST);
|
||||
}
|
||||
|
||||
if (CTG_IS_META_TABLE(output->metaType)) {
|
||||
ctgDebug("tbmeta got, dbFName:%s, tbName:%s, tbType:%d", output->dbFName, output->tbName,
|
||||
output->tbMeta->tableType);
|
||||
ctgDebug("tb:%s, tbmeta got, db:%s, tbType:%d", output->tbName, output->dbFName, output->tbMeta->tableType);
|
||||
} else {
|
||||
ctgDebug("tbmeta got, dbFName:%s, tbName:%s, tbType:%d, stbMetaGot:%d", output->dbFName, output->ctbName,
|
||||
ctgDebug("tb:%s, tbmeta got, db:%s, tbType:%d, stbMetaGot:%d", output->ctbName, output->dbFName,
|
||||
output->ctbMeta.tableType, CTG_IS_META_BOTH(output->metaType));
|
||||
}
|
||||
|
||||
|
@ -238,7 +237,7 @@ int32_t ctgGetTbMeta(SCatalog* pCtg, SRequestConnInfo* pConn, SCtgTbMetaCtx* ctx
|
|||
|
||||
CTG_ERR_JRET(ctgReadTbMetaFromCache(pCtg, &stbCtx, pTableMeta));
|
||||
if (NULL == *pTableMeta) {
|
||||
ctgDebug("stb no longer exist, dbFName:%s, tbName:%s", output->dbFName, ctx->pName->tname);
|
||||
ctgDebug("tb:%s, stb no longer exist, db:%s", ctx->pName->tname, output->dbFName);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -267,7 +266,7 @@ _return:
|
|||
taosMemoryFreeClear(output);
|
||||
|
||||
if (*pTableMeta) {
|
||||
ctgDebug("tbmeta returned, tbName:%s, tbType:%d", ctx->pName->tname, (*pTableMeta)->tableType);
|
||||
ctgDebug("tb:%s, tbmeta returned, tbType:%d", ctx->pName->tname, (*pTableMeta)->tableType);
|
||||
ctgdShowTableMeta(pCtg, ctx->pName->tname, *pTableMeta);
|
||||
}
|
||||
|
||||
|
@ -508,7 +507,7 @@ int32_t ctgGetTbDistVgInfo(SCatalog* pCtg, SRequestConnInfo* pConn, SName* pTabl
|
|||
CTG_ERR_JRET(ctgGenerateVgList(pCtg, vgHash, pVgList, db));
|
||||
} else {
|
||||
// USE HASH METHOD INSTEAD OF VGID IN TBMETA
|
||||
ctgError("invalid method to get none stb vgInfo, tbType:%d", tbMeta->tableType);
|
||||
ctgError("tb:%s, invalid method to get none stb vgInfo, tbType:%d", pTableName->tname, tbMeta->tableType);
|
||||
CTG_ERR_JRET(TSDB_CODE_CTG_INVALID_INPUT);
|
||||
|
||||
#if 0
|
||||
|
@ -557,7 +556,7 @@ _return:
|
|||
|
||||
int32_t ctgGetTbHashVgroup(SCatalog* pCtg, SRequestConnInfo* pConn, const SName* pTableName, SVgroupInfo* pVgroup, bool* exists) {
|
||||
if (IS_SYS_DBNAME(pTableName->dbname)) {
|
||||
ctgError("no valid vgInfo for db, dbname:%s", pTableName->dbname);
|
||||
ctgError("db:%s, no valid vgInfo for db", pTableName->dbname);
|
||||
CTG_ERR_RET(TSDB_CODE_CTG_INVALID_INPUT);
|
||||
}
|
||||
|
||||
|
@ -570,7 +569,7 @@ int32_t ctgGetTbHashVgroup(SCatalog* pCtg, SRequestConnInfo* pConn, const SName*
|
|||
CTG_ERR_JRET(ctgGetDBVgInfo(pCtg, pConn, db, &dbCache, &vgInfo, exists));
|
||||
|
||||
if (exists && false == *exists) {
|
||||
ctgDebug("db %s vgInfo not in cache", pTableName->dbname);
|
||||
ctgDebug("db:%s, vgInfo not in cache", pTableName->dbname);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -592,7 +591,7 @@ _return:
|
|||
|
||||
int32_t ctgGetTbsHashVgId(SCatalog* pCtg, SRequestConnInfo* pConn, int32_t acctId, const char* pDb, const char* pTbs[], int32_t tbNum, int32_t* vgId) {
|
||||
if (IS_SYS_DBNAME(pDb)) {
|
||||
ctgError("no valid vgInfo for db, dbname:%s", pDb);
|
||||
ctgError("db:%s, no valid vgInfo for db", pDb);
|
||||
CTG_ERR_RET(TSDB_CODE_CTG_INVALID_INPUT);
|
||||
}
|
||||
|
||||
|
@ -798,7 +797,7 @@ _return:
|
|||
|
||||
|
||||
int32_t catalogInit(SCatalogCfg* cfg) {
|
||||
qDebug("catalogInit start");
|
||||
qInfo("catalog init");
|
||||
if (gCtgMgmt.pCluster) {
|
||||
qError("catalog already initialized");
|
||||
CTG_ERR_RET(TSDB_CODE_CTG_INVALID_INPUT);
|
||||
|
@ -890,7 +889,7 @@ int32_t catalogInit(SCatalogCfg* cfg) {
|
|||
|
||||
CTG_ERR_RET(ctgStartUpdateThread());
|
||||
|
||||
qDebug("catalog initialized, maxDb:%u, maxTbl:%u, dbRentSec:%u, stbRentSec:%u", gCtgMgmt.cfg.maxDBCacheNum,
|
||||
qInfo("catalog initialized, maxDb:%u, maxTbl:%u, dbRentSec:%u, stbRentSec:%u", gCtgMgmt.cfg.maxDBCacheNum,
|
||||
gCtgMgmt.cfg.maxTblCacheNum, gCtgMgmt.cfg.dbRentSec, gCtgMgmt.cfg.stbRentSec);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -917,7 +916,7 @@ int32_t catalogGetHandle(int64_t clusterId, SCatalog** catalogHandle) {
|
|||
if (ctg && (*ctg)) {
|
||||
*catalogHandle = *ctg;
|
||||
CTG_STAT_HIT_INC(CTG_CI_CLUSTER, 1);
|
||||
qDebug("got catalog handle from cache, clusterId:0x%" PRIx64 ", CTG:%p", clusterId, *ctg);
|
||||
qDebug("CTG:%p, get catalog handle from cache, clusterId:0x%" PRIx64, *ctg, clusterId);
|
||||
CTG_API_LEAVE(TSDB_CODE_SUCCESS);
|
||||
}
|
||||
|
||||
|
@ -961,7 +960,7 @@ int32_t catalogGetHandle(int64_t clusterId, SCatalog** catalogHandle) {
|
|||
CTG_ERR_JRET(TSDB_CODE_CTG_INTERNAL_ERROR);
|
||||
}
|
||||
|
||||
qDebug("add CTG to cache, clusterId:0x%" PRIx64 ", CTG:%p", clusterId, clusterCtg);
|
||||
qDebug("CTG:%p, add CTG to cache, clusterId:0x%" PRIx64, clusterCtg, clusterId);
|
||||
|
||||
break;
|
||||
}
|
||||
|
@ -1001,7 +1000,7 @@ int32_t catalogGetDBVgVersion(SCatalog* pCtg, const char* dbFName, int32_t* vers
|
|||
|
||||
ctgReleaseVgInfoToCache(pCtg, dbCache);
|
||||
|
||||
ctgDebug("Got db vgVersion from cache, dbFName:%s, vgVersion:%d", dbFName, *version);
|
||||
ctgDebug("db:%s, get db vgVersion from cache, vgVersion:%d", dbFName, *version);
|
||||
|
||||
CTG_API_LEAVE(TSDB_CODE_SUCCESS);
|
||||
|
||||
|
@ -1069,7 +1068,7 @@ int32_t catalogGetDBVgInfo(SCatalog* pCtg, SRequestConnInfo* pConn, const char*
|
|||
pInfo->hashMethod = dbInfo->hashMethod;
|
||||
pInfo->vgNum = taosHashGetSize(dbInfo->vgHash);
|
||||
if (pInfo->vgNum <= 0) {
|
||||
ctgError("invalid vgNum %d in db %s's vgHash", pInfo->vgNum, dbFName);
|
||||
ctgError("db:%s, invalid vgNum %d in db vgHash", dbFName, pInfo->vgNum);
|
||||
CTG_ERR_JRET(TSDB_CODE_CTG_INTERNAL_ERROR);
|
||||
}
|
||||
|
||||
|
@ -1529,13 +1528,13 @@ _return:
|
|||
if (pJob) {
|
||||
int32_t code2 = taosReleaseRef(gCtgMgmt.jobPool, pJob->refId);
|
||||
if (TSDB_CODE_SUCCESS) {
|
||||
qError("release catalog job refId %" PRId64 "falied, error:%s", pJob->refId, tstrerror(code2));
|
||||
qError("release catalog job refId:%" PRId64 " falied, error:%s", pJob->refId, tstrerror(code2));
|
||||
}
|
||||
|
||||
if (code) {
|
||||
code2 = taosRemoveRef(gCtgMgmt.jobPool, pJob->refId);
|
||||
if (TSDB_CODE_SUCCESS) {
|
||||
qError("remove catalog job refId %" PRId64 "falied, error:%s", pJob->refId, tstrerror(code2));
|
||||
qError("remove catalog job refId:%" PRId64 " falied, error:%s", pJob->refId, tstrerror(code2));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1827,8 +1826,8 @@ int32_t catalogUpdateDynViewVer(SCatalog* pCtg, SDynViewVersion* pVer) {
|
|||
atomic_store_64(&pCtg->dynViewVer.svrBootTs, pVer->svrBootTs);
|
||||
atomic_store_64(&pCtg->dynViewVer.dynViewVer, pVer->dynViewVer);
|
||||
|
||||
ctgDebug("cluster %" PRIx64 " svrBootTs updated to %" PRId64, pCtg->clusterId, pVer->svrBootTs);
|
||||
ctgDebug("cluster %" PRIx64 " dynViewVer updated to %" PRId64, pCtg->clusterId, pVer->dynViewVer);
|
||||
ctgDebug("clusterId:0x%" PRIx64 ", svrBootTs updated to %" PRId64, pCtg->clusterId, pVer->svrBootTs);
|
||||
ctgDebug("clusterId:0x%" PRIx64 ", dynViewVer updated to %" PRId64, pCtg->clusterId, pVer->dynViewVer);
|
||||
|
||||
CTG_API_LEAVE(TSDB_CODE_SUCCESS);
|
||||
}
|
||||
|
@ -1973,7 +1972,7 @@ int32_t catalogClearCache(void) {
|
|||
|
||||
int32_t code = ctgClearCacheEnqueue(NULL, false, false, false, true);
|
||||
|
||||
qInfo("clear catalog cache end, code: %s", tstrerror(code));
|
||||
qInfo("clear catalog cache end, code:%s", tstrerror(code));
|
||||
|
||||
CTG_API_LEAVE_NOLOCK(code);
|
||||
}
|
||||
|
|
|
@ -77,7 +77,7 @@ int32_t ctgInitGetTbMetaTask(SCtgJob* pJob, int32_t taskIdx, void* param) {
|
|||
CTG_ERR_RET(terrno);
|
||||
}
|
||||
|
||||
qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized, tbName:%s", pJob->queryId, taskIdx,
|
||||
qDebug("QID:0x%" PRIx64 ", the %dth task type %s initialized, tbName:%s", pJob->queryId, taskIdx,
|
||||
ctgTaskTypeStr(task.type), name->tname);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -99,7 +99,7 @@ int32_t ctgInitGetTbMetasTask(SCtgJob* pJob, int32_t taskIdx, void* param) {
|
|||
ctx->pNames = param;
|
||||
ctx->pResList = taosArrayInit(pJob->tbMetaNum, sizeof(SMetaRes));
|
||||
if (NULL == ctx->pResList) {
|
||||
qError("QID:0x%" PRIx64 " taosArrayInit %d SMetaRes %d failed", pJob->queryId, pJob->tbMetaNum,
|
||||
qError("QID:0x%" PRIx64 ", taosArrayInit %d SMetaRes %d failed", pJob->queryId, pJob->tbMetaNum,
|
||||
(int32_t)sizeof(SMetaRes));
|
||||
ctgFreeTask(&task, true);
|
||||
CTG_ERR_RET(terrno);
|
||||
|
@ -110,7 +110,7 @@ int32_t ctgInitGetTbMetasTask(SCtgJob* pJob, int32_t taskIdx, void* param) {
|
|||
CTG_ERR_RET(terrno);
|
||||
}
|
||||
|
||||
qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized, dbNum:%lu, tbNum:%d", pJob->queryId, taskIdx,
|
||||
qDebug("QID:0x%" PRIx64 ", the %dth task type %s initialized, dbNum:%lu, tbNum:%d", pJob->queryId, taskIdx,
|
||||
ctgTaskTypeStr(task.type), taosArrayGetSize(ctx->pNames), pJob->tbMetaNum);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -138,7 +138,7 @@ int32_t ctgInitGetDbVgTask(SCtgJob* pJob, int32_t taskIdx, void* param) {
|
|||
CTG_ERR_RET(terrno);
|
||||
}
|
||||
|
||||
qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized, dbFName:%s", pJob->queryId, taskIdx,
|
||||
qDebug("QID:0x%" PRIx64 ", the %dth task type %s initialized, dbFName:%s", pJob->queryId, taskIdx,
|
||||
ctgTaskTypeStr(task.type), dbFName);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -166,7 +166,7 @@ int32_t ctgInitGetDbCfgTask(SCtgJob* pJob, int32_t taskIdx, void* param) {
|
|||
CTG_ERR_RET(terrno);
|
||||
}
|
||||
|
||||
qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized, dbFName:%s", pJob->queryId, taskIdx,
|
||||
qDebug("QID:0x%" PRIx64 ", the %dth task type %s initialized, dbFName:%s", pJob->queryId, taskIdx,
|
||||
ctgTaskTypeStr(task.type), dbFName);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -194,7 +194,7 @@ int32_t ctgInitGetDbInfoTask(SCtgJob* pJob, int32_t taskIdx, void* param) {
|
|||
CTG_ERR_RET(terrno);
|
||||
}
|
||||
|
||||
qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized, dbFName:%s", pJob->queryId, taskIdx,
|
||||
qDebug("QID:0x%" PRIx64 ", the %dth task type %s initialized, dbFName:%s", pJob->queryId, taskIdx,
|
||||
ctgTaskTypeStr(task.type), dbFName);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -228,7 +228,7 @@ int32_t ctgInitGetTbHashTask(SCtgJob* pJob, int32_t taskIdx, void* param) {
|
|||
CTG_ERR_RET(terrno);
|
||||
}
|
||||
|
||||
qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized, tableName:%s", pJob->queryId, taskIdx,
|
||||
qDebug("QID:0x%" PRIx64 ", the %dth task type %s initialized, tableName:%s", pJob->queryId, taskIdx,
|
||||
ctgTaskTypeStr(task.type), name->tname);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -250,7 +250,7 @@ int32_t ctgInitGetTbHashsTask(SCtgJob* pJob, int32_t taskIdx, void* param) {
|
|||
ctx->pNames = param;
|
||||
ctx->pResList = taosArrayInit(pJob->tbHashNum, sizeof(SMetaRes));
|
||||
if (NULL == ctx->pResList) {
|
||||
qError("QID:0x%" PRIx64 " taosArrayInit %d SMetaRes %d failed", pJob->queryId, pJob->tbHashNum,
|
||||
qError("QID:0x%" PRIx64 ", taosArrayInit %d SMetaRes %d failed", pJob->queryId, pJob->tbHashNum,
|
||||
(int32_t)sizeof(SMetaRes));
|
||||
ctgFreeTask(&task, true);
|
||||
CTG_ERR_RET(terrno);
|
||||
|
@ -261,7 +261,7 @@ int32_t ctgInitGetTbHashsTask(SCtgJob* pJob, int32_t taskIdx, void* param) {
|
|||
CTG_ERR_RET(terrno);
|
||||
}
|
||||
|
||||
qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized, dbNum:%lu, tbNum:%d", pJob->queryId, taskIdx,
|
||||
qDebug("QID:0x%" PRIx64 ", the %dth task type %s initialized, dbNum:%lu, tbNum:%d", pJob->queryId, taskIdx,
|
||||
ctgTaskTypeStr(task.type), taosArrayGetSize(ctx->pNames), pJob->tbHashNum);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -280,7 +280,7 @@ int32_t ctgInitGetQnodeTask(SCtgJob* pJob, int32_t taskIdx, void* param) {
|
|||
CTG_ERR_RET(terrno);
|
||||
}
|
||||
|
||||
qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized", pJob->queryId, taskIdx, ctgTaskTypeStr(task.type));
|
||||
qDebug("QID:0x%" PRIx64 ", the %dth task type %s initialized", pJob->queryId, taskIdx, ctgTaskTypeStr(task.type));
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -298,7 +298,7 @@ int32_t ctgInitGetDnodeTask(SCtgJob* pJob, int32_t taskIdx, void* param) {
|
|||
CTG_ERR_RET(terrno);
|
||||
}
|
||||
|
||||
qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized", pJob->queryId, taskIdx, ctgTaskTypeStr(task.type));
|
||||
qDebug("QID:0x%" PRIx64 ", the %dth task type %s initialized", pJob->queryId, taskIdx, ctgTaskTypeStr(task.type));
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -325,7 +325,7 @@ int32_t ctgInitGetIndexTask(SCtgJob* pJob, int32_t taskIdx, void* param) {
|
|||
CTG_ERR_RET(terrno);
|
||||
}
|
||||
|
||||
qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized, indexFName:%s", pJob->queryId, taskIdx,
|
||||
qDebug("QID:0x%" PRIx64 ", the %dth task type %s initialized, indexFName:%s", pJob->queryId, taskIdx,
|
||||
ctgTaskTypeStr(task.type), name);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -353,7 +353,7 @@ int32_t ctgInitGetUdfTask(SCtgJob* pJob, int32_t taskIdx, void* param) {
|
|||
CTG_ERR_RET(terrno);
|
||||
}
|
||||
|
||||
qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized, udfName:%s", pJob->queryId, taskIdx,
|
||||
qDebug("QID:0x%" PRIx64 ", the %dth task type %s initialized, udfName:%s", pJob->queryId, taskIdx,
|
||||
ctgTaskTypeStr(task.type), name);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -381,7 +381,7 @@ int32_t ctgInitGetUserTask(SCtgJob* pJob, int32_t taskIdx, void* param) {
|
|||
CTG_ERR_RET(terrno);
|
||||
}
|
||||
|
||||
qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized, user:%s", pJob->queryId, taskIdx,
|
||||
qDebug("QID:0x%" PRIx64 ", the %dth task type %s initialized, user:%s", pJob->queryId, taskIdx,
|
||||
ctgTaskTypeStr(task.type), user->user);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -399,7 +399,7 @@ int32_t ctgInitGetSvrVerTask(SCtgJob* pJob, int32_t taskIdx, void* param) {
|
|||
CTG_ERR_RET(terrno);
|
||||
}
|
||||
|
||||
qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized", pJob->queryId, taskIdx, ctgTaskTypeStr(task.type));
|
||||
qDebug("QID:0x%" PRIx64 ", the %dth task type %s initialized", pJob->queryId, taskIdx, ctgTaskTypeStr(task.type));
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -431,7 +431,7 @@ int32_t ctgInitGetTbIndexTask(SCtgJob* pJob, int32_t taskIdx, void* param) {
|
|||
CTG_ERR_RET(terrno);
|
||||
}
|
||||
|
||||
qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized, tbName:%s", pJob->queryId, taskIdx,
|
||||
qDebug("QID:0x%" PRIx64 ", the %dth task type %s initialized, tbName:%s", pJob->queryId, taskIdx,
|
||||
ctgTaskTypeStr(task.type), name->tname);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -464,7 +464,7 @@ int32_t ctgInitGetTbCfgTask(SCtgJob* pJob, int32_t taskIdx, void* param) {
|
|||
CTG_ERR_RET(terrno);
|
||||
}
|
||||
|
||||
qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized, tbName:%s", pJob->queryId, taskIdx,
|
||||
qDebug("QID:0x%" PRIx64 ", the %dth task type %s initialized, tbName:%s", pJob->queryId, taskIdx,
|
||||
ctgTaskTypeStr(task.type), name->tname);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -497,7 +497,7 @@ int32_t ctgInitGetTbTagTask(SCtgJob* pJob, int32_t taskIdx, void* param) {
|
|||
CTG_ERR_RET(terrno);
|
||||
}
|
||||
|
||||
qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized, tbName:%s", pJob->queryId, taskIdx,
|
||||
qDebug("QID:0x%" PRIx64 ", the %dth task type %s initialized, tbName:%s", pJob->queryId, taskIdx,
|
||||
ctgTaskTypeStr(task.type), name->tname);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -520,7 +520,7 @@ int32_t ctgInitGetViewsTask(SCtgJob* pJob, int32_t taskIdx, void* param) {
|
|||
ctx->forceFetch = p->forceFetch;
|
||||
ctx->pResList = taosArrayInit(pJob->viewNum, sizeof(SMetaRes));
|
||||
if (NULL == ctx->pResList) {
|
||||
qError("QID:0x%" PRIx64 " taosArrayInit %d SMetaRes %d failed", pJob->queryId, pJob->viewNum,
|
||||
qError("QID:0x%" PRIx64 ", taosArrayInit %d SMetaRes %d failed", pJob->queryId, pJob->viewNum,
|
||||
(int32_t)sizeof(SMetaRes));
|
||||
ctgFreeTask(&task, true);
|
||||
CTG_ERR_RET(terrno);
|
||||
|
@ -531,7 +531,7 @@ int32_t ctgInitGetViewsTask(SCtgJob* pJob, int32_t taskIdx, void* param) {
|
|||
CTG_ERR_RET(terrno);
|
||||
}
|
||||
|
||||
qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized, dbNum:%lu, viewNum:%d", pJob->queryId, taskIdx,
|
||||
qDebug("QID:0x%" PRIx64 ", the %dth task type %s initialized, dbNum:%lu, viewNum:%d", pJob->queryId, taskIdx,
|
||||
ctgTaskTypeStr(task.type), taosArrayGetSize(ctx->pNames), pJob->viewNum);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -552,7 +552,7 @@ int32_t ctgInitGetTbTSMATask(SCtgJob* pJob, int32_t taskId, void* param) {
|
|||
pTaskCtx->pNames = param;
|
||||
pTaskCtx->pResList = taosArrayInit(pJob->tbTsmaNum, sizeof(SMetaRes));
|
||||
if (NULL == pTaskCtx->pResList) {
|
||||
qError("QID:0x%" PRIx64 " taosArrayInit %d SMetaRes %d failed", pJob->queryId, pJob->tbTsmaNum,
|
||||
qError("QID:0x%" PRIx64 ", taosArrayInit %d SMetaRes %d failed", pJob->queryId, pJob->tbTsmaNum,
|
||||
(int32_t)sizeof(SMetaRes));
|
||||
ctgFreeTask(&task, true);
|
||||
CTG_ERR_RET(terrno);
|
||||
|
@ -580,7 +580,7 @@ int32_t ctgInitGetTSMATask(SCtgJob* pJob, int32_t taskId, void* param) {
|
|||
pTaskCtx->pNames = param;
|
||||
pTaskCtx->pResList = taosArrayInit(pJob->tsmaNum, sizeof(SMetaRes));
|
||||
if (NULL == pTaskCtx->pResList) {
|
||||
qError("QID:0x%" PRIx64 " taosArrayInit %d SMetaRes %d failed", pJob->queryId, pJob->tsmaNum,
|
||||
qError("QID:0x%" PRIx64 ", taosArrayInit %d SMetaRes %d failed", pJob->queryId, pJob->tsmaNum,
|
||||
(int32_t)sizeof(SMetaRes));
|
||||
ctgFreeTask(&task, true);
|
||||
CTG_ERR_RET(terrno);
|
||||
|
@ -609,7 +609,7 @@ static int32_t ctgInitGetTbNamesTask(SCtgJob* pJob, int32_t taskId, void* param)
|
|||
pTaskCtx->pNames = param;
|
||||
pTaskCtx->pResList = taosArrayInit(pJob->tbNameNum, sizeof(SMetaRes));
|
||||
if (NULL == pTaskCtx->pResList) {
|
||||
qError("QID:0x%" PRIx64 " taosArrayInit %d SMetaRes %d failed", pJob->queryId, pJob->tbNameNum,
|
||||
qError("QID:0x%" PRIx64 ", taosArrayInit %d SMetaRes %d failed", pJob->queryId, pJob->tbNameNum,
|
||||
(int32_t)sizeof(SMetaRes));
|
||||
ctgFreeTask(&task, true);
|
||||
CTG_ERR_RET(terrno);
|
||||
|
@ -873,7 +873,7 @@ int32_t ctgInitJob(SCatalog* pCtg, SRequestConnInfo* pConn, SCtgJob** job, const
|
|||
|
||||
*job = taosMemoryCalloc(1, sizeof(SCtgJob));
|
||||
if (NULL == *job) {
|
||||
ctgError("failed to calloc, size:%d,QID:0x%" PRIx64, (int32_t)sizeof(SCtgJob), pConn->requestId);
|
||||
ctgError("failed to calloc, size:%d, QID:0x%" PRIx64, (int32_t)sizeof(SCtgJob), pConn->requestId);
|
||||
CTG_ERR_RET(terrno);
|
||||
}
|
||||
|
||||
|
@ -1053,18 +1053,18 @@ int32_t ctgInitJob(SCatalog* pCtg, SRequestConnInfo* pConn, SCtgJob** job, const
|
|||
|
||||
pJob->refId = taosAddRef(gCtgMgmt.jobPool, pJob);
|
||||
if (pJob->refId < 0) {
|
||||
ctgError("add job to ref failed, error: %s", tstrerror(terrno));
|
||||
ctgError("add job to ref failed, error:%s", tstrerror(terrno));
|
||||
CTG_ERR_JRET(terrno);
|
||||
}
|
||||
|
||||
void* p = taosAcquireRef(gCtgMgmt.jobPool, pJob->refId);
|
||||
if (NULL == p) {
|
||||
ctgError("acquire job from ref failed, refId:%" PRId64 ", error: %s", pJob->refId, tstrerror(terrno));
|
||||
ctgError("acquire job from ref failed, refId:%" PRId64 ", error:%s", pJob->refId, tstrerror(terrno));
|
||||
CTG_ERR_JRET(terrno);
|
||||
}
|
||||
|
||||
double el = (taosGetTimestampUs() - st) / 1000.0;
|
||||
qDebug("QID:0x%" PRIx64 ", jobId: 0x%" PRIx64 " initialized, task num %d, forceUpdate %d, elapsed time:%.2f ms",
|
||||
qDebug("QID:0x%" PRIx64 ", jobId:0x%" PRIx64 " initialized, task num:%d, forceUpdate:%d, elapsed time:%.2f ms",
|
||||
pJob->queryId, pJob->refId, taskNum, pReq->forceUpdate, el);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
||||
|
@ -1478,16 +1478,16 @@ _return:
|
|||
int32_t ctgCallUserCb(void* param) {
|
||||
SCtgJob* pJob = (SCtgJob*)param;
|
||||
|
||||
qDebug("QID:0x%" PRIx64 " ctg start to call user cb with rsp %s", pJob->queryId, tstrerror(pJob->jobResCode));
|
||||
qDebug("QID:0x%" PRIx64 ", ctg start to call user cb with rsp %s", pJob->queryId, tstrerror(pJob->jobResCode));
|
||||
|
||||
(*pJob->userFp)(&pJob->jobRes, pJob->userParam, pJob->jobResCode);
|
||||
|
||||
qDebug("QID:0x%" PRIx64 " ctg end to call user cb", pJob->queryId);
|
||||
qDebug("QID:0x%" PRIx64 ", ctg end to call user cb", pJob->queryId);
|
||||
|
||||
int64_t refId = pJob->refId;
|
||||
int32_t code = taosRemoveRef(gCtgMgmt.jobPool, refId);
|
||||
if (code) {
|
||||
qError("QID:0x%" PRIx64 " remove ctg job %" PRId64 " from jobPool failed, error:%s", pJob->queryId, refId,
|
||||
qError("QID:0x%" PRIx64 ", remove ctg job %" PRId64 " from jobPool failed, error:%s", pJob->queryId, refId,
|
||||
tstrerror(code));
|
||||
}
|
||||
|
||||
|
@ -1498,7 +1498,7 @@ void ctgUpdateJobErrCode(SCtgJob* pJob, int32_t errCode) {
|
|||
if (!NEED_CLIENT_REFRESH_VG_ERROR(errCode) || errCode == TSDB_CODE_SUCCESS) return;
|
||||
|
||||
atomic_store_32(&pJob->jobResCode, errCode);
|
||||
qDebug("QID:0x%" PRIx64 " ctg job errCode updated to %s", pJob->queryId, tstrerror(errCode));
|
||||
qDebug("QID:0x%" PRIx64 ", ctg job errCode updated to %s", pJob->queryId, tstrerror(errCode));
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1510,7 +1510,7 @@ int32_t ctgHandleTaskEnd(SCtgTask* pTask, int32_t rspCode) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
qDebug("QID:0x%" PRIx64 " task %d end with res %s", pJob->queryId, pTask->taskId, tstrerror(rspCode));
|
||||
qDebug("QID:0x%" PRIx64 ", task:%d end with result:%s", pJob->queryId, pTask->taskId, tstrerror(rspCode));
|
||||
|
||||
pTask->code = rspCode;
|
||||
pTask->status = CTG_TASK_DONE;
|
||||
|
@ -1519,7 +1519,7 @@ int32_t ctgHandleTaskEnd(SCtgTask* pTask, int32_t rspCode) {
|
|||
|
||||
int32_t taskDone = atomic_add_fetch_32(&pJob->taskDone, 1);
|
||||
if (taskDone < taosArrayGetSize(pJob->pTasks)) {
|
||||
qDebug("QID:0x%" PRIx64 " task done: %d, total: %d", pJob->queryId, taskDone,
|
||||
qDebug("QID:0x%" PRIx64 ", task done:%d, total:%d", pJob->queryId, taskDone,
|
||||
(int32_t)taosArrayGetSize(pJob->pTasks));
|
||||
|
||||
ctgUpdateJobErrCode(pJob, rspCode);
|
||||
|
@ -2769,7 +2769,7 @@ _return:
|
|||
if (TSDB_CODE_MND_SMA_NOT_EXIST == code) {
|
||||
code = TSDB_CODE_SUCCESS;
|
||||
} else {
|
||||
ctgTaskError("Get tsma for %d.%s.%s failed with err: %s", pName->acctId, pName->dbname, pName->tname,
|
||||
ctgTaskError("get tsma for %d.%s.%s failed with err:%s", pName->acctId, pName->dbname, pName->tname,
|
||||
tstrerror(code));
|
||||
}
|
||||
}
|
||||
|
@ -2884,7 +2884,7 @@ int32_t ctgHandleGetTbTSMARsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf
|
|||
pTsmaInfo->delayDuration = TMAX(pRsp->progressDelay, pTsmaInfo->delayDuration);
|
||||
pTsmaInfo->fillHistoryFinished = pTsmaInfo->fillHistoryFinished && pRsp->fillHisFinished;
|
||||
|
||||
qDebug("received stream progress for tsma %s rsp history: %d vnode: %d, delay: %" PRId64, pTsmaInfo->name,
|
||||
qDebug("received stream progress for tsma %s rsp history:%d vnode:%d, delay:%" PRId64, pTsmaInfo->name,
|
||||
pRsp->fillHisFinished, pRsp->subFetchIdx, pRsp->progressDelay);
|
||||
|
||||
if (atomic_add_fetch_32(&pFetch->finishedSubFetchNum, 1) == pFetch->subFetchNum) {
|
||||
|
@ -2937,7 +2937,7 @@ int32_t ctgHandleGetTbTSMARsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf
|
|||
pFetch->tsmaSourceTbName = *pTbName;
|
||||
|
||||
if (CTG_IS_META_NULL(pOut->metaType)) {
|
||||
ctgTaskError("no tbmeta found when fetching tsma source tb meta: %s.%s", pTbName->dbname, pTbName->tname);
|
||||
ctgTaskError("no tbmeta found when fetching tsma source tb meta:%s.%s", pTbName->dbname, pTbName->tname);
|
||||
(void)ctgRemoveTbMetaFromCache(pCtg, pTbName, false); // ignore cache error
|
||||
CTG_ERR_JRET(CTG_ERR_CODE_TABLE_NOT_EXIST);
|
||||
}
|
||||
|
@ -2979,7 +2979,7 @@ _return:
|
|||
if (TSDB_CODE_MND_SMA_NOT_EXIST == code) {
|
||||
code = TSDB_CODE_SUCCESS;
|
||||
} else {
|
||||
ctgTaskError("Get tsma for %d.%s.%s faield with err: %s", pTbName->acctId, pTbName->dbname, pTbName->tname,
|
||||
ctgTaskError("get tsma for %d.%s.%s faield with err:%s", pTbName->acctId, pTbName->dbname, pTbName->tname,
|
||||
tstrerror(code));
|
||||
}
|
||||
}
|
||||
|
@ -3104,7 +3104,7 @@ int32_t ctgLaunchGetTbMetasTask(SCtgTask* pTask) {
|
|||
CTG_ERR_RET(TSDB_CODE_CTG_INVALID_INPUT);
|
||||
}
|
||||
|
||||
ctgDebug("start to check tb metas in db %s, tbNum %ld", pReq->dbFName, taosArrayGetSize(pReq->pTables));
|
||||
ctgDebug("start to check tb metas in db:%s, tbNum:%d", pReq->dbFName, (int32_t)taosArrayGetSize(pReq->pTables));
|
||||
CTG_ERR_RET(ctgGetTbMetasFromCache(pCtg, pConn, pCtx, i, &fetchIdx, baseResIdx, pReq->pTables));
|
||||
baseResIdx += taosArrayGetSize(pReq->pTables);
|
||||
}
|
||||
|
@ -3690,7 +3690,7 @@ int32_t ctgLaunchGetUserTask(SCtgTask* pTask) {
|
|||
if (inCache) {
|
||||
pTask->res = rsp.pRawRes;
|
||||
|
||||
ctgTaskDebug("Final res got, pass:[%d,%d], pCond:[%p,%p]", rsp.pRawRes->pass[0], rsp.pRawRes->pass[1],
|
||||
ctgTaskDebug("final res got, pass:[%d,%d], pCond:[%p,%p]", rsp.pRawRes->pass[0], rsp.pRawRes->pass[1],
|
||||
rsp.pRawRes->pCond[0], rsp.pRawRes->pCond[1]);
|
||||
|
||||
CTG_ERR_RET(ctgHandleTaskEnd(pTask, 0));
|
||||
|
@ -4015,7 +4015,7 @@ static int32_t ctgLaunchGetTbNamesTask(SCtgTask* pTask) {
|
|||
CTG_ERR_RET(TSDB_CODE_CTG_INVALID_INPUT);
|
||||
}
|
||||
|
||||
ctgDebug("start to check tbname metas in db %s, tbNum %ld", pReq->dbFName, taosArrayGetSize(pReq->pTables));
|
||||
ctgDebug("start to check tbname metas in db:%s, tbNum:%d", pReq->dbFName, (int32_t)taosArrayGetSize(pReq->pTables));
|
||||
CTG_ERR_RET(ctgGetTbNamesFromCache(pCtg, pConn, pCtx, i, &fetchIdx, baseResIdx, pReq->pTables));
|
||||
baseResIdx += taosArrayGetSize(pReq->pTables);
|
||||
}
|
||||
|
@ -4374,7 +4374,7 @@ int32_t ctgLaunchJob(SCtgJob* pJob) {
|
|||
CTG_ERR_RET(TSDB_CODE_CTG_INTERNAL_ERROR);
|
||||
}
|
||||
|
||||
qDebug("QID:0x%" PRIx64 " ctg launch [%dth] task", pJob->queryId, pTask->taskId);
|
||||
qDebug("QID:0x%" PRIx64 ", ctg launch [%dth] task", pJob->queryId, pTask->taskId);
|
||||
CTG_ERR_RET((*gCtgAsyncFps[pTask->type].launchFp)(pTask));
|
||||
|
||||
pTask = taosArrayGet(pJob->pTasks, i);
|
||||
|
@ -4387,7 +4387,7 @@ int32_t ctgLaunchJob(SCtgJob* pJob) {
|
|||
}
|
||||
|
||||
if (taskNum <= 0) {
|
||||
qDebug("QID:0x%" PRIx64 " ctg call user callback with rsp %s", pJob->queryId, tstrerror(pJob->jobResCode));
|
||||
qDebug("QID:0x%" PRIx64 ", ctg call user callback with rsp %s", pJob->queryId, tstrerror(pJob->jobResCode));
|
||||
|
||||
CTG_ERR_RET(taosAsyncExec(ctgCallUserCb, pJob, NULL));
|
||||
#if CTG_BATCH_FETCH
|
||||
|
|
|
@ -67,7 +67,7 @@ int32_t ctgRLockVgInfo(SCatalog *pCtg, SCtgDBCache *dbCache, bool *inCache) {
|
|||
if (dbCache->deleted) {
|
||||
CTG_UNLOCK(CTG_READ, &dbCache->vgCache.vgLock);
|
||||
|
||||
ctgDebug("db is dropping, dbId:0x%" PRIx64, dbCache->dbId);
|
||||
ctgDebug("dbId:0x%" PRIx64 ", db is dropping", dbCache->dbId);
|
||||
|
||||
*inCache = false;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -77,7 +77,7 @@ int32_t ctgRLockVgInfo(SCatalog *pCtg, SCtgDBCache *dbCache, bool *inCache) {
|
|||
CTG_UNLOCK(CTG_READ, &dbCache->vgCache.vgLock);
|
||||
|
||||
*inCache = false;
|
||||
ctgDebug("db vgInfo is empty, dbId:0x%" PRIx64, dbCache->dbId);
|
||||
ctgDebug("dbId:0x%" PRIx64 ", db vgInfo is empty", dbCache->dbId);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -90,7 +90,7 @@ int32_t ctgWLockVgInfo(SCatalog *pCtg, SCtgDBCache *dbCache) {
|
|||
CTG_LOCK(CTG_WRITE, &dbCache->vgCache.vgLock);
|
||||
|
||||
if (dbCache->deleted) {
|
||||
ctgDebug("db is dropping, dbId:0x%" PRIx64, dbCache->dbId);
|
||||
ctgDebug("dbId:0x%" PRIx64 ", db is dropping", dbCache->dbId);
|
||||
CTG_UNLOCK(CTG_WRITE, &dbCache->vgCache.vgLock);
|
||||
CTG_ERR_RET(TSDB_CODE_CTG_DB_DROPPED);
|
||||
}
|
||||
|
@ -129,7 +129,7 @@ int32_t ctgAcquireDBCacheImpl(SCatalog *pCtg, const char *dbFName, SCtgDBCache *
|
|||
if (NULL == dbCache) {
|
||||
*pCache = NULL;
|
||||
CTG_CACHE_NHIT_INC(CTG_CI_DB, 1);
|
||||
ctgDebug("db not in cache, dbFName:%s", dbFName);
|
||||
ctgTrace("db:%s, db not in cache", dbFName);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -144,7 +144,7 @@ int32_t ctgAcquireDBCacheImpl(SCatalog *pCtg, const char *dbFName, SCtgDBCache *
|
|||
|
||||
*pCache = NULL;
|
||||
CTG_CACHE_NHIT_INC(CTG_CI_DB, 1);
|
||||
ctgDebug("db is removing from cache, dbFName:%s", dbFName);
|
||||
ctgDebug("db:%s, db is removing from cache", dbFName);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -229,14 +229,14 @@ int32_t ctgAcquireVgInfoFromCache(SCatalog *pCtg, const char *dbFName, SCtgDBCac
|
|||
SCtgDBCache *dbCache = NULL;
|
||||
CTG_ERR_JRET(ctgAcquireDBCache(pCtg, dbFName, &dbCache));
|
||||
if (NULL == dbCache) {
|
||||
ctgDebug("db %s not in cache", dbFName);
|
||||
ctgTrace("db:%s, db not in cache", dbFName);
|
||||
goto _return;
|
||||
}
|
||||
|
||||
bool inCache = false;
|
||||
CTG_ERR_JRET(ctgRLockVgInfo(pCtg, dbCache, &inCache));
|
||||
if (!inCache) {
|
||||
ctgDebug("vgInfo of db %s not in cache", dbFName);
|
||||
ctgDebug("db:%s, vgInfo not in cache", dbFName);
|
||||
goto _return;
|
||||
}
|
||||
|
||||
|
@ -244,7 +244,7 @@ int32_t ctgAcquireVgInfoFromCache(SCatalog *pCtg, const char *dbFName, SCtgDBCac
|
|||
|
||||
CTG_CACHE_HIT_INC(CTG_CI_DB_VGROUP, 1);
|
||||
|
||||
ctgDebug("Got db vgInfo from cache, dbFName:%s", dbFName);
|
||||
ctgDebug("db:%s, get db vgInfo from cache", dbFName);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
||||
|
@ -268,26 +268,26 @@ int32_t ctgAcquireTbMetaFromCache(SCatalog *pCtg, const char *dbFName, const cha
|
|||
|
||||
CTG_ERR_JRET(ctgAcquireDBCache(pCtg, dbFName, &dbCache));
|
||||
if (NULL == dbCache) {
|
||||
ctgDebug("db %s not in cache", dbFName);
|
||||
ctgDebug("tb:%s, db not in cache, db:%s", tbName, dbFName);
|
||||
goto _return;
|
||||
}
|
||||
|
||||
pCache = taosHashAcquire(dbCache->tbCache, tbName, strlen(tbName));
|
||||
if (NULL == pCache) {
|
||||
ctgDebug("tb %s not in cache, dbFName:%s", tbName, dbFName);
|
||||
ctgDebug("tb:%s, tb not in cache, db:%s", tbName, dbFName);
|
||||
goto _return;
|
||||
}
|
||||
|
||||
CTG_LOCK(CTG_READ, &pCache->metaLock);
|
||||
if (NULL == pCache->pMeta) {
|
||||
ctgDebug("tb %s meta not in cache, dbFName:%s", tbName, dbFName);
|
||||
ctgDebug("tb:%s, meta not in cache, db:%s", tbName, dbFName);
|
||||
goto _return;
|
||||
}
|
||||
|
||||
*pDb = dbCache;
|
||||
*pTb = pCache;
|
||||
|
||||
ctgDebug("tb %s meta got in cache, dbFName:%s", tbName, dbFName);
|
||||
ctgDebug("tb:%s, meta get from cache, db:%s", tbName, dbFName);
|
||||
|
||||
CTG_META_HIT_INC(pCache->pMeta->tableType);
|
||||
|
||||
|
@ -311,14 +311,14 @@ int32_t ctgAcquireVgMetaFromCache(SCatalog *pCtg, const char *dbFName, const cha
|
|||
|
||||
CTG_ERR_JRET(ctgAcquireDBCache(pCtg, dbFName, &dbCache));
|
||||
if (NULL == dbCache) {
|
||||
ctgDebug("db %s not in cache", dbFName);
|
||||
ctgDebug("tb:%s, db not in cache, db:%s", tbName, dbFName);
|
||||
CTG_CACHE_NHIT_INC(CTG_CI_DB_VGROUP, 1);
|
||||
goto _return;
|
||||
}
|
||||
|
||||
CTG_ERR_JRET(ctgRLockVgInfo(pCtg, dbCache, &vgInCache));
|
||||
if (!vgInCache) {
|
||||
ctgDebug("vgInfo of db %s not in cache", dbFName);
|
||||
ctgDebug("tb:%s, vgInfo of db not in cache, db:%s", tbName, dbFName);
|
||||
CTG_CACHE_NHIT_INC(CTG_CI_DB_VGROUP, 1);
|
||||
goto _return;
|
||||
}
|
||||
|
@ -327,25 +327,25 @@ int32_t ctgAcquireVgMetaFromCache(SCatalog *pCtg, const char *dbFName, const cha
|
|||
|
||||
CTG_CACHE_HIT_INC(CTG_CI_DB_VGROUP, 1);
|
||||
|
||||
ctgDebug("Got db vgInfo from cache, dbFName:%s", dbFName);
|
||||
ctgDebug("tb:%s, get db vgInfo from cache, db:%s", tbName, dbFName);
|
||||
|
||||
tbCache = taosHashAcquire(dbCache->tbCache, tbName, strlen(tbName));
|
||||
if (NULL == tbCache) {
|
||||
ctgDebug("tb %s not in cache, dbFName:%s", tbName, dbFName);
|
||||
ctgDebug("tb:%s, not in cache, db:%s", tbName, dbFName);
|
||||
CTG_META_NHIT_INC();
|
||||
goto _return;
|
||||
}
|
||||
|
||||
CTG_LOCK(CTG_READ, &tbCache->metaLock);
|
||||
if (NULL == tbCache->pMeta) {
|
||||
ctgDebug("tb %s meta not in cache, dbFName:%s", tbName, dbFName);
|
||||
ctgDebug("tb:%s, meta not in cache, db:%s", tbName, dbFName);
|
||||
CTG_META_NHIT_INC();
|
||||
goto _return;
|
||||
}
|
||||
|
||||
*pTb = tbCache;
|
||||
|
||||
ctgDebug("tb %s meta got in cache, dbFName:%s", tbName, dbFName);
|
||||
ctgDebug("tb:%s, meta get from cache, db:%s", tbName, dbFName);
|
||||
|
||||
CTG_META_HIT_INC(tbCache->pMeta->tableType);
|
||||
|
||||
|
@ -378,19 +378,19 @@ int32_t ctgAcquireStbMetaFromCache(SCatalog *pCtg, char *dbFName, uint64_t suid,
|
|||
SCtgTbCache *pCache = NULL;
|
||||
ctgAcquireDBCache(pCtg, dbFName, &dbCache);
|
||||
if (NULL == dbCache) {
|
||||
ctgDebug("db %s not in cache", dbFName);
|
||||
ctgTrace("db:%s, db not in cache", dbFName);
|
||||
goto _return;
|
||||
}
|
||||
|
||||
char *stName = taosHashAcquire(dbCache->stbCache, &suid, sizeof(suid));
|
||||
if (NULL == stName) {
|
||||
ctgDebug("stb 0x%" PRIx64 " not in cache, dbFName:%s", suid, dbFName);
|
||||
ctgDebug("stb:0x%" PRIx64 ", not in cache, db:%s", suid, dbFName);
|
||||
goto _return;
|
||||
}
|
||||
|
||||
pCache = taosHashAcquire(dbCache->tbCache, stName, strlen(stName));
|
||||
if (NULL == pCache) {
|
||||
ctgDebug("stb 0x%" PRIx64 " name %s not in cache, dbFName:%s", suid, stName, dbFName);
|
||||
ctgDebug("stb:0x%" PRIx64 ", name %s not in cache, db:%s", suid, stName, dbFName);
|
||||
taosHashRelease(dbCache->stbCache, stName);
|
||||
goto _return;
|
||||
}
|
||||
|
@ -399,14 +399,14 @@ int32_t ctgAcquireStbMetaFromCache(SCatalog *pCtg, char *dbFName, uint64_t suid,
|
|||
|
||||
CTG_LOCK(CTG_READ, &pCache->metaLock);
|
||||
if (NULL == pCache->pMeta) {
|
||||
ctgDebug("stb 0x%" PRIx64 " meta not in cache, dbFName:%s", suid, dbFName);
|
||||
ctgDebug("stb:0x%" PRIx64 ", meta not in cache, db:%s", suid, dbFName);
|
||||
goto _return;
|
||||
}
|
||||
|
||||
*pDb = dbCache;
|
||||
*pTb = pCache;
|
||||
|
||||
ctgDebug("stb 0x%" PRIx64 " meta got in cache, dbFName:%s", suid, dbFName);
|
||||
ctgDebug("stb:0x%" PRIx64 ", meta got in cache, db:%s", suid, dbFName);
|
||||
|
||||
CTG_META_HIT_INC(pCache->pMeta->tableType, 1);
|
||||
|
||||
|
@ -430,13 +430,13 @@ int32_t ctgAcquireStbMetaFromCache(SCtgDBCache *dbCache, SCatalog *pCtg, char *d
|
|||
SCtgTbCache *pCache = NULL;
|
||||
char *stName = taosHashAcquire(dbCache->stbCache, &suid, sizeof(suid));
|
||||
if (NULL == stName) {
|
||||
ctgDebug("stb 0x%" PRIx64 " not in cache, dbFName:%s", suid, dbFName);
|
||||
ctgDebug("stb:0x%" PRIx64 ", not in cache, db:%s", suid, dbFName);
|
||||
goto _return;
|
||||
}
|
||||
|
||||
pCache = taosHashAcquire(dbCache->tbCache, stName, strlen(stName));
|
||||
if (NULL == pCache) {
|
||||
ctgDebug("stb 0x%" PRIx64 " name %s not in cache, dbFName:%s", suid, stName, dbFName);
|
||||
ctgDebug("stb:0x%" PRIx64 ", name %s not in cache, db:%s", suid, stName, dbFName);
|
||||
taosHashRelease(dbCache->stbCache, stName);
|
||||
goto _return;
|
||||
}
|
||||
|
@ -445,13 +445,13 @@ int32_t ctgAcquireStbMetaFromCache(SCtgDBCache *dbCache, SCatalog *pCtg, char *d
|
|||
|
||||
CTG_LOCK(CTG_READ, &pCache->metaLock);
|
||||
if (NULL == pCache->pMeta) {
|
||||
ctgDebug("stb 0x%" PRIx64 " meta not in cache, dbFName:%s", suid, dbFName);
|
||||
ctgDebug("stb:0x%" PRIx64 ", meta not in cache, db:%s", suid, dbFName);
|
||||
goto _return;
|
||||
}
|
||||
|
||||
*pTb = pCache;
|
||||
|
||||
ctgDebug("stb 0x%" PRIx64 " meta got in cache, dbFName:%s", suid, dbFName);
|
||||
ctgDebug("stb:0x%" PRIx64 ", meta get from cache, db:%s", suid, dbFName);
|
||||
|
||||
CTG_META_HIT_INC(pCache->pMeta->tableType);
|
||||
|
||||
|
@ -475,27 +475,27 @@ int32_t ctgAcquireTbIndexFromCache(SCatalog *pCtg, char *dbFName, char *tbName,
|
|||
|
||||
CTG_ERR_JRET(ctgAcquireDBCache(pCtg, dbFName, &dbCache));
|
||||
if (NULL == dbCache) {
|
||||
ctgDebug("db %s not in cache", dbFName);
|
||||
ctgDebug("tb:%s, db not in cache, db:%s", tbName, dbFName);
|
||||
goto _return;
|
||||
}
|
||||
|
||||
int32_t sz = 0;
|
||||
pCache = taosHashAcquire(dbCache->tbCache, tbName, strlen(tbName));
|
||||
if (NULL == pCache) {
|
||||
ctgDebug("tb %s not in cache, dbFName:%s", tbName, dbFName);
|
||||
ctgDebug("tb:%s, tb not in cache, db:%s", tbName, dbFName);
|
||||
goto _return;
|
||||
}
|
||||
|
||||
CTG_LOCK(CTG_READ, &pCache->indexLock);
|
||||
if (NULL == pCache->pIndex) {
|
||||
ctgDebug("tb %s index not in cache, dbFName:%s", tbName, dbFName);
|
||||
ctgDebug("tb:%s, index not in cache, db:%s", tbName, dbFName);
|
||||
goto _return;
|
||||
}
|
||||
|
||||
*pDb = dbCache;
|
||||
*pTb = pCache;
|
||||
|
||||
ctgDebug("tb %s index got in cache, dbFName:%s", tbName, dbFName);
|
||||
ctgDebug("tb:%s, index get from cache, db:%s", tbName, dbFName);
|
||||
|
||||
CTG_CACHE_HIT_INC(CTG_CI_TBL_SMA, 1);
|
||||
|
||||
|
@ -557,7 +557,7 @@ int32_t ctgCopyTbMeta(SCatalog *pCtg, SCtgTbMetaCtx *ctx, SCtgDBCache **pDb, SCt
|
|||
(*pTableMeta)->schemaExt = NULL;
|
||||
}
|
||||
|
||||
ctgDebug("Got tb %s meta from cache, type:%d, dbFName:%s", ctx->pName->tname, tbMeta->tableType, dbFName);
|
||||
ctgDebug("tb:%s, get meta from cache, type:%d, db:%s", ctx->pName->tname, tbMeta->tableType, dbFName);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -577,14 +577,14 @@ int32_t ctgCopyTbMeta(SCatalog *pCtg, SCtgTbMetaCtx *ctx, SCtgDBCache **pDb, SCt
|
|||
taosHashRelease(dbCache->tbCache, tbCache);
|
||||
*pTb = NULL;
|
||||
|
||||
ctgDebug("Got ctb %s meta from cache, will continue to get its stb meta, type:%d, dbFName:%s", ctx->pName->tname,
|
||||
ctgDebug("ctb:%s, get meta from cache, will continue to get its stb meta, tbType:%d, db:%s", ctx->pName->tname,
|
||||
ctx->tbInfo.tbType, dbFName);
|
||||
|
||||
CTG_ERR_RET(ctgAcquireStbMetaFromCache(dbCache, pCtg, dbFName, ctx->tbInfo.suid, &tbCache));
|
||||
if (NULL == tbCache) {
|
||||
taosMemoryFreeClear(*pTableMeta);
|
||||
*pDb = NULL;
|
||||
ctgDebug("stb 0x%" PRIx64 " meta not in cache", ctx->tbInfo.suid);
|
||||
ctgDebug("stb:0x%" PRIx64 ", meta not in cache", ctx->tbInfo.suid);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -592,7 +592,7 @@ int32_t ctgCopyTbMeta(SCatalog *pCtg, SCtgTbMetaCtx *ctx, SCtgDBCache **pDb, SCt
|
|||
|
||||
STableMeta *stbMeta = tbCache->pMeta;
|
||||
if (stbMeta->suid != ctx->tbInfo.suid) {
|
||||
ctgError("stb suid 0x%" PRIx64 " in stbCache mis-match, expected suid 0x%" PRIx64, stbMeta->suid, ctx->tbInfo.suid);
|
||||
ctgError("stb:0x%" PRIx64 ", suid in stbCache mis-match, expected suid:0x%" PRIx64, stbMeta->suid, ctx->tbInfo.suid);
|
||||
taosMemoryFreeClear(*pTableMeta);
|
||||
CTG_ERR_RET(TSDB_CODE_CTG_INTERNAL_ERROR);
|
||||
}
|
||||
|
@ -632,7 +632,7 @@ int32_t ctgReadTbMetaFromCache(SCatalog *pCtg, SCtgTbMetaCtx *ctx, STableMeta **
|
|||
|
||||
ctgReleaseTbMetaToCache(pCtg, dbCache, tbCache);
|
||||
|
||||
ctgDebug("Got tb %s meta from cache, dbFName:%s", ctx->pName->tname, dbFName);
|
||||
ctgDebug("tb:%s, get meta from cache, db:%s", ctx->pName->tname, dbFName);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
||||
|
@ -669,7 +669,7 @@ int32_t ctgReadTbVerFromCache(SCatalog *pCtg, SName *pTableName, int32_t *sver,
|
|||
*sver = tbMeta->sversion;
|
||||
*tver = tbMeta->tversion;
|
||||
|
||||
ctgDebug("Got tb %s ver from cache, dbFName:%s, tbType:%d, sver:%d, tver:%d, suid:0x%" PRIx64, pTableName->tname,
|
||||
ctgDebug("tb:%s, get ver from cache, db:%s, tbType:%d, sver:%d, tver:%d, suid:0x%" PRIx64, pTableName->tname,
|
||||
dbFName, *tbType, *sver, *tver, *suid);
|
||||
|
||||
ctgReleaseTbMetaToCache(pCtg, dbCache, tbCache);
|
||||
|
@ -684,19 +684,19 @@ int32_t ctgReadTbVerFromCache(SCatalog *pCtg, SName *pTableName, int32_t *sver,
|
|||
taosHashRelease(dbCache->tbCache, tbCache);
|
||||
}
|
||||
|
||||
ctgDebug("Got ctb %s ver from cache, will continue to get its stb ver, dbFName:%s", pTableName->tname, dbFName);
|
||||
ctgDebug("ctb:%s, get ver from cache, will continue to get its stb ver, db:%s", pTableName->tname, dbFName);
|
||||
|
||||
CTG_ERR_RET(ctgAcquireStbMetaFromCache(dbCache, pCtg, dbFName, *suid, &tbCache));
|
||||
if (NULL == tbCache) {
|
||||
// ctgReleaseTbMetaToCache(pCtg, dbCache, tbCache);
|
||||
ctgDebug("stb 0x%" PRIx64 " meta not in cache", *suid);
|
||||
ctgDebug("stb:0x%" PRIx64 ", meta not in cache", *suid);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
STableMeta *stbMeta = tbCache->pMeta;
|
||||
if (stbMeta->suid != *suid) {
|
||||
ctgReleaseTbMetaToCache(pCtg, dbCache, tbCache);
|
||||
ctgError("stb suid 0x%" PRIx64 " in stbCache mis-match, expected suid:0x%" PRIx64, stbMeta->suid, *suid);
|
||||
ctgError("stb:0x%" PRIx64 ", suid in stbCache mis-match, expected suid:0x%" PRIx64, stbMeta->suid, *suid);
|
||||
CTG_ERR_RET(TSDB_CODE_CTG_INTERNAL_ERROR);
|
||||
}
|
||||
|
||||
|
@ -711,7 +711,7 @@ int32_t ctgReadTbVerFromCache(SCatalog *pCtg, SName *pTableName, int32_t *sver,
|
|||
|
||||
ctgReleaseTbMetaToCache(pCtg, dbCache, tbCache);
|
||||
|
||||
ctgDebug("Got tb %s sver %d tver %d from cache, type:%d, dbFName:%s", pTableName->tname, *sver, *tver, *tbType,
|
||||
ctgDebug("tb:%s, get sver %d tver %d from cache, type:%d, db:%s", pTableName->tname, *sver, *tver, *tbType,
|
||||
dbFName);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -729,7 +729,7 @@ int32_t ctgReadTbTypeFromCache(SCatalog *pCtg, char *dbFName, char *tbName, int3
|
|||
*tbType = tbCache->pMeta->tableType;
|
||||
ctgReleaseTbMetaToCache(pCtg, dbCache, tbCache);
|
||||
|
||||
ctgDebug("Got tb %s tbType %d from cache, dbFName:%s", tbName, *tbType, dbFName);
|
||||
ctgDebug("tb:%s, get tbType %d from cache, db:%s", tbName, *tbType, dbFName);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -764,7 +764,7 @@ int32_t ctgReadDBCfgFromCache(SCatalog *pCtg, const char* dbFName, SDbCfgInfo* p
|
|||
|
||||
CTG_ERR_RET(ctgAcquireDBCache(pCtg, dbFName, &dbCache));
|
||||
if (NULL == dbCache) {
|
||||
ctgDebug("db %s not in cache", dbFName);
|
||||
ctgTrace("db:%s, db not in cache", dbFName);
|
||||
pDbCfg->cfgVersion = -1;
|
||||
CTG_CACHE_NHIT_INC(CTG_CI_DB_CFG, 1);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -801,13 +801,13 @@ int32_t ctgGetCachedStbNameFromSuid(SCatalog* pCtg, char* dbFName, uint64_t suid
|
|||
SCtgDBCache *dbCache = NULL;
|
||||
CTG_ERR_RET(ctgAcquireDBCache(pCtg, dbFName, &dbCache));
|
||||
if (NULL == dbCache) {
|
||||
ctgDebug("db %s not in cache", dbFName);
|
||||
ctgDebug("stb:0x%" PRIx64 ", db not in cache, db:%s", suid, dbFName);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
char *stb = taosHashAcquire(dbCache->stbCache, &suid, sizeof(suid));
|
||||
if (NULL == stb) {
|
||||
ctgDebug("stb 0x%" PRIx64 " not in cache, dbFName:%s", suid, dbFName);
|
||||
ctgDebug("stb:0x%" PRIx64 ", not in cache, db:%s", suid, dbFName);
|
||||
ctgReleaseDBCache(pCtg, dbCache);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -829,13 +829,13 @@ int32_t ctgChkAuthFromCache(SCatalog *pCtg, SUserAuthInfo *pReq, bool tbNotExist
|
|||
|
||||
SCtgUserAuth *pUser = (SCtgUserAuth *)taosHashGet(pCtg->userCache, pReq->user, strlen(pReq->user));
|
||||
if (NULL == pUser) {
|
||||
ctgDebug("user not in cache, user:%s", pReq->user);
|
||||
ctgDebug("user:%s, user not in cache", pReq->user);
|
||||
goto _return;
|
||||
}
|
||||
|
||||
*inCache = true;
|
||||
|
||||
ctgDebug("Got user from cache, user:%s", pReq->user);
|
||||
ctgDebug("user:%s, get user from cache", pReq->user);
|
||||
CTG_CACHE_HIT_INC(CTG_CI_USER, 1);
|
||||
|
||||
SCtgAuthReq req = {0};
|
||||
|
@ -859,7 +859,7 @@ _return:
|
|||
|
||||
*inCache = false;
|
||||
CTG_CACHE_NHIT_INC(CTG_CI_USER, 1);
|
||||
ctgDebug("Get user from cache failed, user:%s, metaNotExists:%d, code:%d", pReq->user, pRes->metaNotExists, code);
|
||||
ctgDebug("user:%s, get user from cache failed, metaNotExists:%d, code:%d", pReq->user, pRes->metaNotExists, code);
|
||||
|
||||
return code;
|
||||
}
|
||||
|
@ -1714,11 +1714,11 @@ int32_t ctgAddNewDBCache(SCatalog *pCtg, const char *dbFName, uint64_t dbId) {
|
|||
code = taosHashPut(pCtg->dbCache, dbFName, strlen(dbFName), &newDBCache, sizeof(SCtgDBCache));
|
||||
if (code) {
|
||||
if (HASH_NODE_EXIST(code)) {
|
||||
ctgDebug("db already in cache, dbFName:%s", dbFName);
|
||||
ctgDebug("db:%s, db already in cache", dbFName);
|
||||
goto _return;
|
||||
}
|
||||
|
||||
ctgError("taosHashPut db to cache failed, dbFName:%s", dbFName);
|
||||
ctgError("db:%s, taosHashPut db to cache failed", dbFName);
|
||||
CTG_ERR_JRET(terrno);
|
||||
}
|
||||
|
||||
|
@ -1727,12 +1727,12 @@ int32_t ctgAddNewDBCache(SCatalog *pCtg, const char *dbFName, uint64_t dbId) {
|
|||
SDbCacheInfo dbCacheInfo = {.dbId = newDBCache.dbId, .vgVersion = -1, .stateTs = 0, .cfgVersion = -1, .tsmaVersion = -1};
|
||||
tstrncpy(dbCacheInfo.dbFName, dbFName, sizeof(dbCacheInfo.dbFName));
|
||||
|
||||
ctgDebug("db added to cache, dbFName:%s, dbId:0x%" PRIx64, dbFName, dbId);
|
||||
ctgDebug("db:%s, db added to cache, dbId:0x%" PRIx64, dbFName, dbId);
|
||||
|
||||
if (!IS_SYS_DBNAME(dbFName)) {
|
||||
CTG_ERR_RET(ctgMetaRentAdd(&pCtg->dbRent, &dbCacheInfo, dbId, sizeof(SDbCacheInfo)));
|
||||
|
||||
ctgDebug("db added to rent, dbFName:%s, vgVersion:%d, dbId:0x%" PRIx64, dbFName, dbCacheInfo.vgVersion, dbId);
|
||||
ctgDebug("db:%s, db added to rent, vgVersion:%d, dbId:0x%" PRIx64, dbFName, dbCacheInfo.vgVersion, dbId);
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -1747,7 +1747,7 @@ _return:
|
|||
int32_t ctgRemoveDBFromCache(SCatalog *pCtg, SCtgDBCache *dbCache, const char *dbFName) {
|
||||
uint64_t dbId = dbCache->dbId;
|
||||
|
||||
ctgInfo("start to remove db from cache, dbFName:%s, dbId:0x%" PRIx64, dbFName, dbCache->dbId);
|
||||
ctgInfo("db:%s, start to remove db from cache, dbId:0x%" PRIx64, dbFName, dbCache->dbId);
|
||||
|
||||
CTG_LOCK(CTG_WRITE, &dbCache->dbLock);
|
||||
|
||||
|
@ -1760,15 +1760,15 @@ int32_t ctgRemoveDBFromCache(SCatalog *pCtg, SCtgDBCache *dbCache, const char *d
|
|||
CTG_UNLOCK(CTG_WRITE, &dbCache->dbLock);
|
||||
|
||||
CTG_ERR_RET(ctgMetaRentRemove(&pCtg->dbRent, dbId, ctgDbCacheInfoSortCompare, ctgDbCacheInfoSearchCompare));
|
||||
ctgDebug("db removed from rent, dbFName:%s, dbId:0x%" PRIx64, dbFName, dbId);
|
||||
ctgDebug("db:%s, db removed from rent, dbId:0x%" PRIx64, dbFName, dbId);
|
||||
|
||||
if (taosHashRemove(pCtg->dbCache, dbFName, strlen(dbFName))) {
|
||||
ctgInfo("taosHashRemove from dbCache failed, may be removed, dbFName:%s", dbFName);
|
||||
ctgInfo("db:%s, taosHashRemove from dbCache failed, may be removed", dbFName);
|
||||
CTG_ERR_RET(TSDB_CODE_CTG_DB_DROPPED);
|
||||
}
|
||||
|
||||
CTG_CACHE_NUM_DEC(CTG_CI_DB, 1);
|
||||
ctgInfo("db removed from cache, dbFName:%s, dbId:0x%" PRIx64, dbFName, dbId);
|
||||
ctgInfo("db:%s, db removed from cache, dbId:0x%" PRIx64, dbFName, dbId);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -1842,9 +1842,9 @@ int32_t ctgWriteTbMetaToCache(SCatalog *pCtg, SCtgDBCache *dbCache, char *dbFNam
|
|||
if (stbName) {
|
||||
uint64_t metaSize = strlen(stbName) + 1 + sizeof(orig->suid);
|
||||
if (taosHashRemove(dbCache->stbCache, &orig->suid, sizeof(orig->suid))) {
|
||||
ctgError("stb not exist in stbCache, dbFName:%s, stb:%s, suid:0x%" PRIx64, dbFName, tbName, orig->suid);
|
||||
ctgError("stb not exist in stbCache, db:%s, stb:%s, suid:0x%" PRIx64, dbFName, tbName, orig->suid);
|
||||
} else {
|
||||
ctgDebug("stb removed from stbCache, dbFName:%s, stb:%s, suid:0x%" PRIx64, dbFName, tbName, orig->suid);
|
||||
ctgDebug("stb removed from stbCache, db:%s, stb:%s, suid:0x%" PRIx64, dbFName, tbName, orig->suid);
|
||||
(void)atomic_sub_fetch_64(&dbCache->dbCacheSize, metaSize);
|
||||
}
|
||||
}
|
||||
|
@ -1855,7 +1855,7 @@ int32_t ctgWriteTbMetaToCache(SCatalog *pCtg, SCtgDBCache *dbCache, char *dbFNam
|
|||
SCtgTbCache cache = {0};
|
||||
cache.pMeta = meta;
|
||||
if (taosHashPut(dbCache->tbCache, tbName, strlen(tbName), &cache, sizeof(SCtgTbCache)) != 0) {
|
||||
ctgError("taosHashPut new tbCache failed, dbFName:%s, tbName:%s, tbType:%d", dbFName, tbName, meta->tableType);
|
||||
ctgError("taosHashPut new tbCache failed, db:%s, tbName:%s, tbType:%d", dbFName, tbName, meta->tableType);
|
||||
taosMemoryFree(meta);
|
||||
CTG_ERR_RET(terrno);
|
||||
}
|
||||
|
@ -1882,7 +1882,7 @@ int32_t ctgWriteTbMetaToCache(SCatalog *pCtg, SCtgDBCache *dbCache, char *dbFNam
|
|||
|
||||
CTG_META_NUM_INC(pCache->pMeta->tableType);
|
||||
|
||||
ctgDebug("tbmeta updated to cache, dbFName:%s, tbName:%s, tbType:%d", dbFName, tbName, meta->tableType);
|
||||
ctgDebug("tbmeta updated to cache, db:%s, tbName:%s, tbType:%d", dbFName, tbName, meta->tableType);
|
||||
ctgdShowTableMeta(pCtg, tbName, meta);
|
||||
|
||||
if (!isStb) {
|
||||
|
@ -1896,7 +1896,7 @@ int32_t ctgWriteTbMetaToCache(SCatalog *pCtg, SCtgDBCache *dbCache, char *dbFNam
|
|||
|
||||
(void)atomic_add_fetch_64(&dbCache->dbCacheSize, sizeof(meta->suid) + strlen(tbName) + 1);
|
||||
|
||||
ctgDebug("stb 0x%" PRIx64 " updated to cache, dbFName:%s, tbName:%s, tbType:%d", meta->suid, dbFName, tbName,
|
||||
ctgDebug("stb:0x%" PRIx64 ", updated to cache, db:%s, tbName:%s, tbType:%d", meta->suid, dbFName, tbName,
|
||||
meta->tableType);
|
||||
|
||||
CTG_ERR_RET(ctgUpdateRentStbVersion(pCtg, dbFName, tbName, dbId, meta->suid, pCache));
|
||||
|
@ -2121,14 +2121,14 @@ int32_t ctgWriteTbTSMAToCache(SCatalog *pCtg, SCtgDBCache *dbCache, char *dbFNam
|
|||
}
|
||||
|
||||
if (taosHashPut(dbCache->tsmaCache, tbName, strlen(tbName), &cache, sizeof(cache))) {
|
||||
ctgError("taosHashPut new tsmacache for tb: %s.%s failed", dbFName, tbName);
|
||||
ctgError("tb:%s.%s, taosHashPut new tsmacache for tb failed", dbFName, tbName);
|
||||
CTG_ERR_JRET(terrno);
|
||||
}
|
||||
|
||||
(void)atomic_add_fetch_64(&dbCache->dbCacheSize, strlen(tbName) + sizeof(STSMACache) + ctgGetTbTSMACacheSize(pTsmaCache));
|
||||
|
||||
CTG_DB_NUM_INC(CTG_CI_TBL_TSMA);
|
||||
ctgDebug("tb %s tsma updated to cache, name: %s", tbName, pTsmaCache->name);
|
||||
ctgDebug("tb:%s, tsma updated to cache, name:%s", tbName, pTsmaCache->name);
|
||||
|
||||
CTG_ERR_JRET(ctgUpdateRentTSMAVersion(pCtg, dbFName, pTsmaCache));
|
||||
*ppTsmaCache = NULL;
|
||||
|
@ -2148,7 +2148,7 @@ int32_t ctgWriteTbTSMAToCache(SCatalog *pCtg, SCtgDBCache *dbCache, char *dbFNam
|
|||
}
|
||||
|
||||
if (pInfo->tsmaId == pTsmaCache->tsmaId) {
|
||||
ctgDebug("tsma: %s removed from cache, history from %d to %d, reqTs from %" PRId64 " to %" PRId64
|
||||
ctgDebug("tsma:%s, removed from cache, history from %d to %d, reqTs from %" PRId64 " to %" PRId64
|
||||
"rspTs from %" PRId64 " to %" PRId64 " delay from %" PRId64 " to %" PRId64,
|
||||
pInfo->name, pInfo->fillHistoryFinished, pTsmaCache->fillHistoryFinished, pInfo->reqTs,
|
||||
pTsmaCache->reqTs, pInfo->rspTs, pTsmaCache->rspTs, pInfo->delayDuration, pTsmaCache->delayDuration);
|
||||
|
@ -2181,7 +2181,7 @@ int32_t ctgWriteTbTSMAToCache(SCatalog *pCtg, SCtgDBCache *dbCache, char *dbFNam
|
|||
|
||||
CTG_ERR_RET(ctgUpdateRentTSMAVersion(pCtg, dbFName, pTsmaCache));
|
||||
|
||||
ctgDebug("table %s tsma updated to cache, tsma: %s", tbName, pTsmaCache->name);
|
||||
ctgDebug("tb:%s, tsma updated to cache, tsma:%s", tbName, pTsmaCache->name);
|
||||
}
|
||||
|
||||
CTG_UNLOCK(CTG_WRITE, &pCache->tsmaLock);
|
||||
|
@ -2204,7 +2204,7 @@ int32_t ctgOpUpdateVgroup(SCtgCacheOperation *operation) {
|
|||
}
|
||||
|
||||
if (dbInfo->vgVersion < 0 || (taosHashGetSize(dbInfo->vgHash) <= 0 && !IS_SYS_DBNAME(dbFName))) {
|
||||
ctgDebug("invalid db vgInfo, dbFName:%s, vgHash:%p, vgVersion:%d, vgHashSize:%d", dbFName, dbInfo->vgHash,
|
||||
ctgDebug("invalid db vgInfo, db:%s, vgHash:%p, vgVersion:%d, vgHashSize:%d", dbFName, dbInfo->vgHash,
|
||||
dbInfo->vgVersion, taosHashGetSize(dbInfo->vgHash));
|
||||
CTG_ERR_JRET(TSDB_CODE_APP_ERROR);
|
||||
}
|
||||
|
@ -2216,7 +2216,7 @@ int32_t ctgOpUpdateVgroup(SCtgCacheOperation *operation) {
|
|||
SCtgDBCache *dbCache = NULL;
|
||||
CTG_ERR_JRET(ctgGetAddDBCache(msg->pCtg, dbFName, msg->dbId, &dbCache));
|
||||
if (NULL == dbCache) {
|
||||
ctgInfo("conflict db update, ignore this update, dbFName:%s, dbId:0x%" PRIx64, dbFName, msg->dbId);
|
||||
ctgInfo("conflict db update, ignore this update, db:%s, dbId:0x%" PRIx64, dbFName, msg->dbId);
|
||||
CTG_ERR_JRET(TSDB_CODE_CTG_INTERNAL_ERROR);
|
||||
}
|
||||
|
||||
|
@ -2228,7 +2228,7 @@ int32_t ctgOpUpdateVgroup(SCtgCacheOperation *operation) {
|
|||
SDBVgInfo *vgInfo = vgCache->vgInfo;
|
||||
|
||||
if (dbInfo->vgVersion < vgInfo->vgVersion) {
|
||||
ctgDebug("db updateVgroup is ignored, dbFName:%s, vgVer:%d, curVer:%d", dbFName, dbInfo->vgVersion,
|
||||
ctgDebug("db updateVgroup is ignored, db:%s, vgVer:%d, curVer:%d", dbFName, dbInfo->vgVersion,
|
||||
vgInfo->vgVersion);
|
||||
ctgWUnlockVgInfo(dbCache);
|
||||
|
||||
|
@ -2237,7 +2237,7 @@ int32_t ctgOpUpdateVgroup(SCtgCacheOperation *operation) {
|
|||
|
||||
if (dbInfo->vgVersion == vgInfo->vgVersion && dbInfo->numOfTable == vgInfo->numOfTable &&
|
||||
dbInfo->stateTs == vgInfo->stateTs) {
|
||||
ctgDebug("no new db vgroup update info, dbFName:%s, vgVer:%d, numOfTable:%d, stateTs:%" PRId64, dbFName,
|
||||
ctgDebug("no new db vgroup update info, db:%s, vgVer:%d, numOfTable:%d, stateTs:%" PRId64, dbFName,
|
||||
dbInfo->vgVersion, dbInfo->numOfTable, dbInfo->stateTs);
|
||||
ctgWUnlockVgInfo(dbCache);
|
||||
|
||||
|
@ -2245,7 +2245,7 @@ int32_t ctgOpUpdateVgroup(SCtgCacheOperation *operation) {
|
|||
}
|
||||
|
||||
uint64_t groupCacheSize = ctgGetDbVgroupCacheSize(vgCache->vgInfo);
|
||||
ctgDebug("sub dbGroupCacheSize %" PRIu64 " from db, dbFName:%s", groupCacheSize, dbFName);
|
||||
ctgDebug("sub dbGroupCacheSize %" PRIu64 " from db, db:%s", groupCacheSize, dbFName);
|
||||
|
||||
(void)atomic_sub_fetch_64(&dbCache->dbCacheSize, groupCacheSize);
|
||||
|
||||
|
@ -2262,14 +2262,14 @@ int32_t ctgOpUpdateVgroup(SCtgCacheOperation *operation) {
|
|||
msg->dbInfo = NULL;
|
||||
CTG_DB_NUM_SET(CTG_CI_DB_VGROUP);
|
||||
|
||||
ctgDebug("db vgInfo updated, dbFName:%s, vgVer:%d, stateTs:%" PRId64 ", dbId:0x%" PRIx64, dbFName,
|
||||
ctgDebug("db:%s, db vgInfo updated, vgVer:%d, stateTs:%" PRId64 ", dbId:0x%" PRIx64, dbFName,
|
||||
dbCacheInfo.vgVersion, dbCacheInfo.stateTs, dbCacheInfo.dbId);
|
||||
|
||||
ctgWUnlockVgInfo(dbCache);
|
||||
|
||||
uint64_t groupCacheSize = ctgGetDbVgroupCacheSize(vgCache->vgInfo);
|
||||
(void)atomic_add_fetch_64(&dbCache->dbCacheSize, groupCacheSize);
|
||||
ctgDebug("add dbGroupCacheSize %" PRIu64 " from db, dbFName:%s", groupCacheSize, dbFName);
|
||||
ctgDebug("db:%s, add dbGroupCacheSize:%" PRIu64 " from db", dbFName, groupCacheSize);
|
||||
|
||||
dbCache = NULL;
|
||||
|
||||
|
@ -2299,14 +2299,14 @@ int32_t ctgOpUpdateDbCfg(SCtgCacheOperation *operation) {
|
|||
}
|
||||
|
||||
if (cfgInfo->cfgVersion < 0) {
|
||||
ctgDebug("invalid db cfgInfo, dbFName:%s, cfgVersion:%d", dbFName, cfgInfo->cfgVersion);
|
||||
ctgDebug("invalid db cfgInfo, db:%s, cfgVersion:%d", dbFName, cfgInfo->cfgVersion);
|
||||
CTG_ERR_JRET(TSDB_CODE_APP_ERROR);
|
||||
}
|
||||
|
||||
SCtgDBCache *dbCache = NULL;
|
||||
CTG_ERR_JRET(ctgGetAddDBCache(msg->pCtg, dbFName, msg->dbId, &dbCache));
|
||||
if (NULL == dbCache) {
|
||||
ctgInfo("conflict db update, ignore this update, dbFName:%s, dbId:0x%" PRIx64, dbFName, msg->dbId);
|
||||
ctgInfo("conflict db update, ignore this update, db:%s, dbId:0x%" PRIx64, dbFName, msg->dbId);
|
||||
CTG_ERR_JRET(TSDB_CODE_CTG_INTERNAL_ERROR);
|
||||
}
|
||||
|
||||
|
@ -2333,7 +2333,7 @@ int32_t ctgOpUpdateDbCfg(SCtgCacheOperation *operation) {
|
|||
|
||||
ctgWUnlockDbCfgInfo(dbCache);
|
||||
|
||||
ctgDebug("db cfgInfo updated, dbFName:%s, cfgVer:%d", dbFName, dbCache->cfgCache.cfgInfo->cfgVersion);
|
||||
ctgDebug("db:%s, db cfgInfo updated, cfgVer:%d", dbFName, dbCache->cfgCache.cfgInfo->cfgVersion);
|
||||
|
||||
// if (!IS_SYS_DBNAME(dbFName)) {
|
||||
CTG_ERR_JRET(ctgMetaRentUpdate(&msg->pCtg->dbRent, &cacheInfo, cacheInfo.dbId, sizeof(SDbCacheInfo),
|
||||
|
@ -2365,7 +2365,7 @@ int32_t ctgOpDropDbCache(SCtgCacheOperation *operation) {
|
|||
}
|
||||
|
||||
if (msg->dbId && dbCache->dbId != msg->dbId) {
|
||||
ctgInfo("dbId already updated, dbFName:%s, dbId:0x%" PRIx64 ", targetId:0x%" PRIx64, msg->dbFName, dbCache->dbId,
|
||||
ctgInfo("db:%s, dbId already updated, dbId:0x%" PRIx64 ", targetId:0x%" PRIx64, msg->dbFName, dbCache->dbId,
|
||||
msg->dbId);
|
||||
goto _return;
|
||||
}
|
||||
|
@ -2402,7 +2402,7 @@ int32_t ctgOpDropDbVgroup(SCtgCacheOperation *operation) {
|
|||
dbCache->vgCache.vgInfo = NULL;
|
||||
|
||||
CTG_DB_NUM_RESET(CTG_CI_DB_VGROUP);
|
||||
ctgDebug("db vgInfo removed, dbFName:%s", msg->dbFName);
|
||||
ctgDebug("db:%s, db vgInfo removed", msg->dbFName);
|
||||
|
||||
ctgWUnlockVgInfo(dbCache);
|
||||
|
||||
|
@ -2425,7 +2425,7 @@ int32_t ctgOpUpdateTbMeta(SCtgCacheOperation *operation) {
|
|||
}
|
||||
|
||||
if ((!CTG_IS_META_CTABLE(pMeta->metaType)) && NULL == pMeta->tbMeta) {
|
||||
ctgError("no valid tbmeta got from meta rsp, dbFName:%s, tbName:%s", pMeta->dbFName, pMeta->tbName);
|
||||
ctgError("no valid tbmeta got from meta rsp, db:%s, tbName:%s", pMeta->dbFName, pMeta->tbName);
|
||||
CTG_ERR_JRET(TSDB_CODE_CTG_INTERNAL_ERROR);
|
||||
}
|
||||
|
||||
|
@ -2436,7 +2436,7 @@ int32_t ctgOpUpdateTbMeta(SCtgCacheOperation *operation) {
|
|||
|
||||
CTG_ERR_JRET(ctgGetAddDBCache(pCtg, pMeta->dbFName, pMeta->dbId, &dbCache));
|
||||
if (NULL == dbCache) {
|
||||
ctgInfo("conflict db update, ignore this update, dbFName:%s, dbId:0x%" PRIx64, pMeta->dbFName, pMeta->dbId);
|
||||
ctgInfo("db:%s, conflict db update, ignore this update, dbId:0x%" PRIx64, pMeta->dbFName, pMeta->dbId);
|
||||
CTG_ERR_JRET(TSDB_CODE_CTG_INTERNAL_ERROR);
|
||||
}
|
||||
|
||||
|
@ -2483,8 +2483,8 @@ int32_t ctgOpDropStbMeta(SCtgCacheOperation *operation) {
|
|||
}
|
||||
|
||||
if ((0 != msg->dbId) && (dbCache->dbId != msg->dbId)) {
|
||||
ctgDebug("dbId already modified, dbFName:%s, current:0x%" PRIx64 ", dbId:0x%" PRIx64 ", stb:%s, suid:0x%" PRIx64,
|
||||
msg->dbFName, dbCache->dbId, msg->dbId, msg->stbName, msg->suid);
|
||||
ctgDebug("stb:%s, dbId already modified, current:0x%" PRIx64 ", dbId:0x%" PRIx64 ", db:%s, suid:0x%" PRIx64,
|
||||
msg->stbName, dbCache->dbId, msg->dbId, msg->dbFName, msg->suid);
|
||||
goto _return;
|
||||
}
|
||||
|
||||
|
@ -2492,8 +2492,8 @@ int32_t ctgOpDropStbMeta(SCtgCacheOperation *operation) {
|
|||
if (stbName) {
|
||||
uint64_t metaSize = strlen(stbName) + 1 + sizeof(msg->suid);
|
||||
if (taosHashRemove(dbCache->stbCache, &msg->suid, sizeof(msg->suid))) {
|
||||
ctgDebug("stb not exist in stbCache, may be removed, dbFName:%s, stb:%s, suid:0x%" PRIx64, msg->dbFName,
|
||||
msg->stbName, msg->suid);
|
||||
ctgDebug("stb:%s, stb not exist in stbCache, may be removed, db:%s, suid:0x%" PRIx64, msg->stbName, msg->dbFName,
|
||||
msg->suid);
|
||||
} else {
|
||||
(void)atomic_sub_fetch_64(&dbCache->dbCacheSize, metaSize);
|
||||
}
|
||||
|
@ -2501,7 +2501,7 @@ int32_t ctgOpDropStbMeta(SCtgCacheOperation *operation) {
|
|||
|
||||
SCtgTbCache *pTbCache = taosHashGet(dbCache->tbCache, msg->stbName, strlen(msg->stbName));
|
||||
if (NULL == pTbCache) {
|
||||
ctgDebug("stb %s already not in cache", msg->stbName);
|
||||
ctgDebug("stb:%s, already not in cache", msg->stbName);
|
||||
goto _return;
|
||||
}
|
||||
|
||||
|
@ -2511,17 +2511,17 @@ int32_t ctgOpDropStbMeta(SCtgCacheOperation *operation) {
|
|||
ctgFreeTbCacheImpl(pTbCache, true);
|
||||
|
||||
if (taosHashRemove(dbCache->tbCache, msg->stbName, strlen(msg->stbName))) {
|
||||
ctgError("stb not exist in cache, dbFName:%s, stb:%s, suid:0x%" PRIx64, msg->dbFName, msg->stbName, msg->suid);
|
||||
ctgError("stb:%s, stb not exist in cache, db:%s, suid:0x%" PRIx64, msg->stbName, msg->dbFName, msg->suid);
|
||||
} else {
|
||||
CTG_META_NUM_DEC(tblType);
|
||||
(void)atomic_sub_fetch_64(&dbCache->dbCacheSize, sizeof(*pTbCache) + strlen(msg->stbName));
|
||||
}
|
||||
|
||||
ctgInfo("stb removed from cache, dbFName:%s, stbName:%s, suid:0x%" PRIx64, msg->dbFName, msg->stbName, msg->suid);
|
||||
ctgInfo("stb:%s, stb removed from cache, db:%s, suid:0x%" PRIx64, msg->stbName, msg->dbFName, msg->suid);
|
||||
|
||||
CTG_ERR_JRET(ctgMetaRentRemove(&msg->pCtg->stbRent, msg->suid, ctgStbVersionSortCompare, ctgStbVersionSearchCompare));
|
||||
|
||||
ctgDebug("stb removed from rent, dbFName:%s, stbName:%s, suid:0x%" PRIx64, msg->dbFName, msg->stbName, msg->suid);
|
||||
ctgDebug("stb:%s, stb removed from rent, db:%s, suid:0x%" PRIx64, msg->stbName, msg->dbFName, msg->suid);
|
||||
|
||||
_return:
|
||||
|
||||
|
@ -2547,14 +2547,14 @@ int32_t ctgOpDropTbMeta(SCtgCacheOperation *operation) {
|
|||
}
|
||||
|
||||
if ((0 != msg->dbId) && (dbCache->dbId != msg->dbId)) {
|
||||
ctgDebug("dbId 0x%" PRIx64 " not match with curId 0x%" PRIx64 ", dbFName:%s, tbName:%s", msg->dbId, dbCache->dbId,
|
||||
msg->dbFName, msg->tbName);
|
||||
ctgDebug("tb:%s, dbId:0x%" PRIx64 " not match with curId:0x%" PRIx64 ", db:%s", msg->tbName, msg->dbId,
|
||||
dbCache->dbId, msg->dbFName);
|
||||
goto _return;
|
||||
}
|
||||
|
||||
SCtgTbCache *pTbCache = taosHashGet(dbCache->tbCache, msg->tbName, strlen(msg->tbName));
|
||||
if (NULL == pTbCache) {
|
||||
ctgDebug("tb %s already not in cache", msg->tbName);
|
||||
ctgDebug("tb:%s, already not in cache", msg->tbName);
|
||||
goto _return;
|
||||
}
|
||||
|
||||
|
@ -2564,14 +2564,14 @@ int32_t ctgOpDropTbMeta(SCtgCacheOperation *operation) {
|
|||
ctgFreeTbCacheImpl(pTbCache, true);
|
||||
|
||||
if (taosHashRemove(dbCache->tbCache, msg->tbName, strlen(msg->tbName))) {
|
||||
ctgError("tb %s not exist in cache, dbFName:%s", msg->tbName, msg->dbFName);
|
||||
ctgError("tb:%s, not exist in cache, db:%s", msg->tbName, msg->dbFName);
|
||||
CTG_ERR_JRET(TSDB_CODE_CTG_INTERNAL_ERROR);
|
||||
} else {
|
||||
(void)atomic_sub_fetch_64(&dbCache->dbCacheSize, sizeof(*pTbCache) + strlen(msg->tbName));
|
||||
CTG_META_NUM_DEC(tblType);
|
||||
}
|
||||
|
||||
ctgDebug("table %s removed from cache, dbFName:%s", msg->tbName, msg->dbFName);
|
||||
ctgDebug("tb:%s, removed from cache, db:%s", msg->tbName, msg->dbFName);
|
||||
|
||||
_return:
|
||||
|
||||
|
@ -2703,7 +2703,7 @@ int32_t ctgOpUpdateEpset(SCtgCacheOperation *operation) {
|
|||
|
||||
SEp *pOrigEp = &pInfo->epSet.eps[pInfo->epSet.inUse];
|
||||
SEp *pNewEp = &msg->epSet.eps[msg->epSet.inUse];
|
||||
ctgDebug("vgroup %d epset updated from %d/%d=>%s:%d to %d/%d=>%s:%d, dbFName:%s in ctg", pInfo->vgId,
|
||||
ctgDebug("vgroup %d epset updated from %d/%d=>%s:%d to %d/%d=>%s:%d, db:%s in ctg", pInfo->vgId,
|
||||
pInfo->epSet.inUse, pInfo->epSet.numOfEps, pOrigEp->fqdn, pOrigEp->port, msg->epSet.inUse,
|
||||
msg->epSet.numOfEps, pNewEp->fqdn, pNewEp->port, msg->dbFName);
|
||||
|
||||
|
@ -2802,7 +2802,7 @@ int32_t ctgOpUpdateViewMeta(SCtgCacheOperation *operation) {
|
|||
|
||||
CTG_ERR_JRET(ctgGetAddDBCache(pCtg, pRsp->dbFName, pRsp->dbId, &dbCache));
|
||||
if (NULL == dbCache) {
|
||||
ctgInfo("conflict db update, ignore this update, dbFName:%s, dbId:0x%" PRIx64, pRsp->dbFName, pRsp->dbId);
|
||||
ctgInfo("db:%s, conflict db update, ignore this update, dbId:0x%" PRIx64, pRsp->dbFName, pRsp->dbId);
|
||||
CTG_ERR_JRET(TSDB_CODE_CTG_INTERNAL_ERROR);
|
||||
}
|
||||
|
||||
|
@ -2843,20 +2843,20 @@ int32_t ctgOpDropViewMeta(SCtgCacheOperation *operation) {
|
|||
}
|
||||
|
||||
if ((0 != msg->dbId) && (dbCache->dbId != msg->dbId)) {
|
||||
ctgDebug("dbId 0x%" PRIx64 " not match with curId 0x%" PRIx64 ", dbFName:%s, viewName:%s", msg->dbId, dbCache->dbId,
|
||||
msg->dbFName, msg->viewName);
|
||||
ctgDebug("view:%s, dbId:0x%" PRIx64 " not match with curId:0x%" PRIx64 ", db:%s", msg->viewName, msg->dbId,
|
||||
dbCache->dbId, msg->dbFName);
|
||||
goto _return;
|
||||
}
|
||||
|
||||
SCtgViewCache *pViewCache = taosHashGet(dbCache->viewCache, msg->viewName, strlen(msg->viewName));
|
||||
if (NULL == pViewCache) {
|
||||
ctgDebug("view %s already not in cache", msg->viewName);
|
||||
ctgDebug("view:%s, already not in cache", msg->viewName);
|
||||
goto _return;
|
||||
}
|
||||
|
||||
int64_t viewId = pViewCache->pMeta->viewId;
|
||||
if (0 != msg->viewId && viewId != msg->viewId) {
|
||||
ctgDebug("viewId 0x%" PRIx64 " not match with curId 0x%" PRIx64 ", viewName:%s", msg->viewId, viewId, msg->viewName);
|
||||
ctgDebug("view:%s, viewId:0x%" PRIx64 " not match with curId:0x%" PRIx64, msg->viewName, msg->viewId, viewId);
|
||||
goto _return;
|
||||
}
|
||||
|
||||
|
@ -2864,18 +2864,18 @@ int32_t ctgOpDropViewMeta(SCtgCacheOperation *operation) {
|
|||
ctgFreeViewCacheImpl(pViewCache, true);
|
||||
|
||||
if (taosHashRemove(dbCache->viewCache, msg->viewName, strlen(msg->viewName))) {
|
||||
ctgError("view %s not exist in cache, dbFName:%s", msg->viewName, msg->dbFName);
|
||||
ctgError("view:%s, not exist in cache, db:%s", msg->viewName, msg->dbFName);
|
||||
CTG_ERR_JRET(TSDB_CODE_CTG_INTERNAL_ERROR);
|
||||
} else {
|
||||
(void)atomic_sub_fetch_64(&dbCache->dbCacheSize, sizeof(SCtgViewCache) + strlen(msg->viewName));
|
||||
CTG_DB_NUM_DEC(CTG_CI_VIEW);
|
||||
}
|
||||
|
||||
ctgDebug("view %s removed from cache, dbFName:%s", msg->viewName, msg->dbFName);
|
||||
ctgDebug("view:%s, removed from cache, db:%s", msg->viewName, msg->dbFName);
|
||||
|
||||
CTG_ERR_JRET(ctgMetaRentRemove(&msg->pCtg->viewRent, viewId, ctgViewVersionSortCompare, ctgViewVersionSearchCompare));
|
||||
|
||||
ctgDebug("view %s removed from rent, dbFName:%s, viewId:0x%" PRIx64, msg->viewName, msg->dbFName, viewId);
|
||||
ctgDebug("view:%s, removed from rent, db:%s, viewId:0x%" PRIx64, msg->viewName, msg->dbFName, viewId);
|
||||
|
||||
_return:
|
||||
|
||||
|
@ -3011,10 +3011,10 @@ int32_t ctgOpDropTbTSMA(SCtgCacheOperation *operation) {
|
|||
pCtgCache->pTsmas = NULL;
|
||||
pCtgCache->retryFetch = true;
|
||||
|
||||
ctgDebug("all tsmas for table dropped: %s.%s", msg->dbFName, msg->tbName);
|
||||
ctgDebug("tb:%s.%s, all tsmas for table dropped", msg->dbFName, msg->tbName);
|
||||
code = taosHashRemove(dbCache->tsmaCache, msg->tbName, TSDB_TABLE_NAME_LEN);
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
ctgError("remove table %s.%s from tsmaCache failed, error:%s", msg->dbFName, msg->tbName, tstrerror(code));
|
||||
ctgError("tb:%s.%s, remove from tsmaCache failed, error:%s", msg->dbFName, msg->tbName, tstrerror(code));
|
||||
}
|
||||
|
||||
CTG_UNLOCK(CTG_WRITE, &pCtgCache->tsmaLock);
|
||||
|
@ -3432,7 +3432,7 @@ int32_t ctgGetTbMetasFromCache(SCatalog *pCtg, SRequestConnInfo *pConn, SCtgTbMe
|
|||
CTG_ERR_RET(ctgAcquireDBCache(pCtg, dbFName, &dbCache));
|
||||
|
||||
if (NULL == dbCache) {
|
||||
ctgDebug("db %s not in cache", dbFName);
|
||||
ctgTrace("db:%s, db not in cache", dbFName);
|
||||
for (int32_t i = 0; i < tbNum; ++i) {
|
||||
CTG_ERR_JRET(ctgAddFetch(&ctx->pFetchs, dbIdx, i, fetchIdx, baseResIdx + i, flag));
|
||||
if (NULL == taosArrayPush(ctx->pResList, &(SMetaData){0})) {
|
||||
|
@ -3452,7 +3452,7 @@ int32_t ctgGetTbMetasFromCache(SCatalog *pCtg, SRequestConnInfo *pConn, SCtgTbMe
|
|||
|
||||
pCache = taosHashAcquire(dbCache->tbCache, pName->tname, strlen(pName->tname));
|
||||
if (NULL == pCache) {
|
||||
ctgDebug("tb %s not in cache, dbFName:%s", pName->tname, dbFName);
|
||||
ctgDebug("tb:%s, tb not in cache, db:%s", pName->tname, dbFName);
|
||||
CTG_ERR_JRET(ctgAddFetch(&ctx->pFetchs, dbIdx, i, fetchIdx, baseResIdx + i, flag));
|
||||
if (NULL == taosArrayPush(ctx->pResList, &(SMetaData){0})) {
|
||||
CTG_ERR_JRET(terrno);
|
||||
|
@ -3468,7 +3468,7 @@ int32_t ctgGetTbMetasFromCache(SCatalog *pCtg, SRequestConnInfo *pConn, SCtgTbMe
|
|||
CTG_UNLOCK(CTG_READ, &pCache->metaLock);
|
||||
taosHashRelease(dbCache->tbCache, pCache);
|
||||
|
||||
ctgDebug("tb %s meta not in cache, dbFName:%s", pName->tname, dbFName);
|
||||
ctgDebug("tb:%s, meta not in cache, db:%s", pName->tname, dbFName);
|
||||
|
||||
CTG_ERR_JRET(ctgAddFetch(&ctx->pFetchs, dbIdx, i, fetchIdx, baseResIdx + i, flag));
|
||||
if (NULL == taosArrayPush(ctx->pResList, &(SMetaData){0})) {
|
||||
|
@ -3517,7 +3517,7 @@ int32_t ctgGetTbMetasFromCache(SCatalog *pCtg, SRequestConnInfo *pConn, SCtgTbMe
|
|||
CTG_UNLOCK(CTG_READ, &pCache->metaLock);
|
||||
taosHashRelease(dbCache->tbCache, pCache);
|
||||
|
||||
ctgDebug("Got tb %s meta from cache, type:%d, dbFName:%s", pName->tname, pTableMeta->tableType, dbFName);
|
||||
ctgDebug("tb:%s, get meta from cache, type:%d, db:%s", pName->tname, pTableMeta->tableType, dbFName);
|
||||
|
||||
res.pRes = pTableMeta;
|
||||
if (NULL == taosArrayPush(ctx->pResList, &res)) {
|
||||
|
@ -3542,7 +3542,7 @@ int32_t ctgGetTbMetasFromCache(SCatalog *pCtg, SRequestConnInfo *pConn, SCtgTbMe
|
|||
CTG_UNLOCK(CTG_READ, &pCache->metaLock);
|
||||
taosHashRelease(dbCache->tbCache, pCache);
|
||||
|
||||
ctgDebug("Got tb %s meta from cache, type:%d, dbFName:%s", pName->tname, pTableMeta->tableType, dbFName);
|
||||
ctgDebug("tb:%s, get meta from cache, type:%d, db:%s", pName->tname, pTableMeta->tableType, dbFName);
|
||||
|
||||
res.pRes = pTableMeta;
|
||||
if (NULL == taosArrayPush(ctx->pResList, &res)) {
|
||||
|
@ -3564,12 +3564,12 @@ int32_t ctgGetTbMetasFromCache(SCatalog *pCtg, SRequestConnInfo *pConn, SCtgTbMe
|
|||
CTG_UNLOCK(CTG_READ, &pCache->metaLock);
|
||||
taosHashRelease(dbCache->tbCache, pCache);
|
||||
|
||||
ctgDebug("Got ctb %s meta from cache, will continue to get its stb meta, type:%d, dbFName:%s", pName->tname,
|
||||
ctgDebug("ctb:%s, get meta from cache, will continue to get its stb meta, type:%d, db:%s", pName->tname,
|
||||
nctx.tbInfo.tbType, dbFName);
|
||||
|
||||
char *stName = taosHashAcquire(dbCache->stbCache, &pTableMeta->suid, sizeof(pTableMeta->suid));
|
||||
if (NULL == stName) {
|
||||
ctgDebug("stb 0x%" PRIx64 " not in cache, dbFName:%s", pTableMeta->suid, dbFName);
|
||||
ctgDebug("stb:0x%" PRIx64 ", not in cache, db:%s", pTableMeta->suid, dbFName);
|
||||
CTG_ERR_JRET(ctgAddFetch(&ctx->pFetchs, dbIdx, i, fetchIdx, baseResIdx + i, flag));
|
||||
if (NULL == taosArrayPush(ctx->pResList, &(SMetaRes){0})) {
|
||||
CTG_ERR_JRET(terrno);
|
||||
|
@ -3582,7 +3582,7 @@ int32_t ctgGetTbMetasFromCache(SCatalog *pCtg, SRequestConnInfo *pConn, SCtgTbMe
|
|||
|
||||
pCache = taosHashAcquire(dbCache->tbCache, stName, strlen(stName));
|
||||
if (NULL == pCache) {
|
||||
ctgDebug("stb 0x%" PRIx64 " name %s not in cache, dbFName:%s", pTableMeta->suid, stName, dbFName);
|
||||
ctgDebug("stb:0x%" PRIx64 ", name:%s not in cache, db:%s", pTableMeta->suid, stName, dbFName);
|
||||
taosHashRelease(dbCache->stbCache, stName);
|
||||
|
||||
CTG_ERR_JRET(ctgAddFetch(&ctx->pFetchs, dbIdx, i, fetchIdx, baseResIdx + i, flag));
|
||||
|
@ -3600,7 +3600,7 @@ int32_t ctgGetTbMetasFromCache(SCatalog *pCtg, SRequestConnInfo *pConn, SCtgTbMe
|
|||
|
||||
CTG_LOCK(CTG_READ, &pCache->metaLock);
|
||||
if (NULL == pCache->pMeta) {
|
||||
ctgDebug("stb 0x%" PRIx64 " meta not in cache, dbFName:%s", pTableMeta->suid, dbFName);
|
||||
ctgDebug("stb:0x%" PRIx64 ", meta not in cache, db:%s", pTableMeta->suid, dbFName);
|
||||
CTG_UNLOCK(CTG_READ, &pCache->metaLock);
|
||||
taosHashRelease(dbCache->tbCache, pCache);
|
||||
|
||||
|
@ -3620,7 +3620,7 @@ int32_t ctgGetTbMetasFromCache(SCatalog *pCtg, SRequestConnInfo *pConn, SCtgTbMe
|
|||
CTG_UNLOCK(CTG_READ, &pCache->metaLock);
|
||||
taosHashRelease(dbCache->tbCache, pCache);
|
||||
|
||||
ctgError("stb suid 0x%" PRIx64 " in stbCache mis-match, expected suid 0x%" PRIx64, stbMeta->suid,
|
||||
ctgError("stb:0x%" PRIx64 ", suid in stbCache mis-match, expected suid:0x%" PRIx64, stbMeta->suid,
|
||||
nctx.tbInfo.suid);
|
||||
|
||||
CTG_ERR_JRET(ctgAddFetch(&ctx->pFetchs, dbIdx, i, fetchIdx, baseResIdx + i, flag));
|
||||
|
@ -3725,7 +3725,7 @@ int32_t ctgGetTbNamesFromCache(SCatalog *pCtg, SRequestConnInfo *pConn, SCtgTbNa
|
|||
(void)tNameGetFullDbName(pName, dbFName);
|
||||
}
|
||||
|
||||
ctgDebug("db %s not in cache", dbFName);
|
||||
ctgDebug("db:%s, not in cache", dbFName);
|
||||
for (int32_t i = 0; i < tbNum; ++i) {
|
||||
CTG_ERR_JRET(ctgAddFetch(&ctx->pFetchs, dbIdx, i, fetchIdx, baseResIdx + i, flag));
|
||||
if (NULL == taosArrayPush(ctx->pResList, &(SMetaData){0})) {
|
||||
|
@ -3801,7 +3801,7 @@ int32_t ctgGetViewsFromCache(SCatalog *pCtg, SRequestConnInfo *pConn, SCtgViewsC
|
|||
CTG_ERR_RET(ctgAcquireDBCache(pCtg, dbFName, &dbCache));
|
||||
|
||||
if (NULL == dbCache) {
|
||||
ctgDebug("db %s not in cache", dbFName);
|
||||
ctgDebug("db:%s, not in cache", dbFName);
|
||||
for (int32_t i = 0; i < tbNum; ++i) {
|
||||
CTG_ERR_RET(ctgAddFetch(&ctx->pFetchs, dbIdx, i, fetchIdx, baseResIdx + i, flag));
|
||||
if (NULL == taosArrayPush(ctx->pResList, &(SMetaData){0})) {
|
||||
|
@ -3821,7 +3821,7 @@ int32_t ctgGetViewsFromCache(SCatalog *pCtg, SRequestConnInfo *pConn, SCtgViewsC
|
|||
|
||||
pCache = taosHashAcquire(dbCache->viewCache, pName->tname, strlen(pName->tname));
|
||||
if (NULL == pCache) {
|
||||
ctgDebug("view %s not in cache, dbFName:%s", pName->tname, dbFName);
|
||||
ctgDebug("view:%s, view not in cache, db:%s", pName->tname, dbFName);
|
||||
CTG_ERR_JRET(ctgAddFetch(&ctx->pFetchs, dbIdx, i, fetchIdx, baseResIdx + i, flag));
|
||||
if (NULL == taosArrayPush(ctx->pResList, &(SMetaRes){0})) {
|
||||
CTG_ERR_JRET(terrno);
|
||||
|
@ -3835,7 +3835,7 @@ int32_t ctgGetViewsFromCache(SCatalog *pCtg, SRequestConnInfo *pConn, SCtgViewsC
|
|||
if (NULL == pCache->pMeta) {
|
||||
CTG_UNLOCK(CTG_READ, &pCache->viewLock);
|
||||
taosHashRelease(dbCache->viewCache, pCache);
|
||||
ctgDebug("view %s meta not in cache, dbFName:%s", pName->tname, dbFName);
|
||||
ctgDebug("view:%s, meta not in cache, db:%s", pName->tname, dbFName);
|
||||
CTG_ERR_JRET(ctgAddFetch(&ctx->pFetchs, dbIdx, i, fetchIdx, baseResIdx + i, flag));
|
||||
if (NULL == taosArrayPush(ctx->pResList, &(SMetaRes){0})) {
|
||||
CTG_ERR_JRET(terrno);
|
||||
|
@ -3877,7 +3877,7 @@ int32_t ctgGetViewsFromCache(SCatalog *pCtg, SRequestConnInfo *pConn, SCtgViewsC
|
|||
CTG_UNLOCK(CTG_READ, &pCache->viewLock);
|
||||
taosHashRelease(dbCache->viewCache, pCache);
|
||||
|
||||
ctgDebug("Got view %s meta from cache, dbFName:%s", pName->tname, dbFName);
|
||||
ctgDebug("view:%s, get meta from cache, db:%s", pName->tname, dbFName);
|
||||
|
||||
res.pRes = pViewMeta;
|
||||
if (NULL == taosArrayPush(ctx->pResList, &res)) {
|
||||
|
@ -3917,7 +3917,7 @@ int32_t ctgGetTbTSMAFromCache(SCatalog* pCtg, SCtgTbTSMACtx* pCtx, int32_t dbIdx
|
|||
// get db cache
|
||||
CTG_ERR_RET(ctgAcquireDBCache(pCtg, dbFName, &dbCache));
|
||||
if (!dbCache) {
|
||||
ctgDebug("DB %s not in cache", dbFName);
|
||||
ctgTrace("db:%s, db not in cache", dbFName);
|
||||
for (int32_t i = 0; i < tbNum; ++i) {
|
||||
CTG_ERR_RET(ctgAddTSMAFetch(&pCtx->pFetches, dbIdx, i, fetchIdx, baseResIdx + i, flag, FETCH_TSMA_SOURCE_TB_META, NULL));
|
||||
if (NULL == taosArrayPush(pCtx->pResList, &(SMetaData){0})) {
|
||||
|
@ -3938,7 +3938,7 @@ int32_t ctgGetTbTSMAFromCache(SCatalog* pCtg, SCtgTbTSMACtx* pCtx, int32_t dbIdx
|
|||
|
||||
pTbCache = taosHashAcquire(dbCache->tbCache, pName->tname, strlen(pName->tname));
|
||||
if (!pTbCache) {
|
||||
ctgDebug("tb: %s.%s not in cache", dbFName, pName->tname);
|
||||
ctgDebug("tb:%s.%s not in cache", dbFName, pName->tname);
|
||||
CTG_ERR_JRET(ctgAddTSMAFetch(&pCtx->pFetches, dbIdx, i, fetchIdx, baseResIdx + i, flag, FETCH_TSMA_SOURCE_TB_META, NULL));
|
||||
if (NULL == taosArrayPush(pCtx->pResList, &(SMetaRes){0})) {
|
||||
CTG_ERR_JRET(terrno);
|
||||
|
@ -3950,7 +3950,7 @@ int32_t ctgGetTbTSMAFromCache(SCatalog* pCtg, SCtgTbTSMACtx* pCtx, int32_t dbIdx
|
|||
CTG_LOCK(CTG_READ, &pTbCache->metaLock);
|
||||
if (!pTbCache->pMeta) {
|
||||
CTG_UNLOCK(CTG_READ, &pTbCache->metaLock);
|
||||
ctgDebug("tb: %s.%s not in cache", dbFName, pName->tname);
|
||||
ctgDebug("tb:%s.%s not in cache", dbFName, pName->tname);
|
||||
|
||||
CTG_ERR_JRET(ctgAddTSMAFetch(&pCtx->pFetches, dbIdx, i, fetchIdx, baseResIdx + i, flag, FETCH_TSMA_SOURCE_TB_META, NULL));
|
||||
if (NULL == taosArrayPush(pCtx->pResList, &(SMetaRes){0})) {
|
||||
|
@ -3975,7 +3975,7 @@ int32_t ctgGetTbTSMAFromCache(SCatalog* pCtg, SCtgTbTSMACtx* pCtx, int32_t dbIdx
|
|||
(void)snprintf(tsmaSourceTbName.tname, TMIN(TSDB_TABLE_NAME_LEN, strlen(stbName) + 1), "%s", stbName);
|
||||
taosHashRelease(dbCache->stbCache, stbName);
|
||||
} else {
|
||||
ctgDebug("stb in db: %s, uid: %" PRId64 " not in cache", dbFName, suid);
|
||||
ctgDebug("suid:0x%" PRIx64 ", stb not in cache, db:%s", suid, dbFName);
|
||||
|
||||
CTG_ERR_JRET(ctgAddTSMAFetch(&pCtx->pFetches, dbIdx, i, fetchIdx, baseResIdx + i, flag, FETCH_TSMA_SOURCE_TB_META, NULL));
|
||||
if (NULL == taosArrayPush(pCtx->pResList, &(SMetaRes){0})) {
|
||||
|
@ -4009,7 +4009,7 @@ int32_t ctgGetTbTSMAFromCache(SCatalog* pCtg, SCtgTbTSMACtx* pCtx, int32_t dbIdx
|
|||
if (pCache->retryFetch || hasOutOfDateTSMACache(pCache->pTsmas)) {
|
||||
CTG_UNLOCK(CTG_READ, &pCache->tsmaLock);
|
||||
|
||||
ctgDebug("tsma for tb: %s.%s not in cache", tsmaSourceTbName.tname, dbFName);
|
||||
ctgDebug("tsma for tb:%s.%s not in cache", tsmaSourceTbName.tname, dbFName);
|
||||
|
||||
CTG_ERR_JRET(ctgAddTSMAFetch(&pCtx->pFetches, dbIdx, i, fetchIdx, baseResIdx + i, flag, FETCH_TB_TSMA, &tsmaSourceTbName));
|
||||
if (NULL == taosArrayPush(pCtx->pResList, &(SMetaRes){0})) {
|
||||
|
@ -4086,7 +4086,7 @@ int32_t ctgGetTSMAFromCache(SCatalog* pCtg, SCtgTbTSMACtx* pCtx, SName* pTsmaNam
|
|||
|
||||
CTG_ERR_RET(ctgAcquireDBCache(pCtg, dbFName, &pDbCache));
|
||||
if (!pDbCache) {
|
||||
ctgDebug("DB %s not in cache", dbFName);
|
||||
ctgTrace("db:%s, db not in cache", dbFName);
|
||||
CTG_RET(code);
|
||||
}
|
||||
|
||||
|
|
|
@ -48,7 +48,7 @@ int32_t ctgHandleBatchRsp(SCtgJob* pJob, SCtgTaskCallbackParam* cbParam, SDataBu
|
|||
msgNum = 0;
|
||||
}
|
||||
|
||||
ctgDebug("QID:0x%" PRIx64 " ctg got batch %d rsp %s", pJob->queryId, cbParam->batchId,
|
||||
ctgDebug("QID:0x%" PRIx64 ", ctg got batch:%d rsp:%s", pJob->queryId, cbParam->batchId,
|
||||
TMSG_INFO(cbParam->reqType + 1));
|
||||
|
||||
SHashObj* pBatchs = taosHashInit(taskNum, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false, HASH_NO_LOCK);
|
||||
|
@ -109,13 +109,13 @@ int32_t ctgHandleBatchRsp(SCtgJob* pJob, SCtgTaskCallbackParam* cbParam, SDataBu
|
|||
tReq.msgIdx = pRsp->msgIdx;
|
||||
SCtgMsgCtx* pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, tReq.msgIdx);
|
||||
if (NULL == pMsgCtx) {
|
||||
ctgError("get task %d SCtgMsgCtx failed, taskType:%d", tReq.msgIdx, pTask->type);
|
||||
ctgError("task:%d, get SCtgMsgCtx failed, taskType:%d", tReq.msgIdx, pTask->type);
|
||||
CTG_ERR_JRET(TSDB_CODE_CTG_INTERNAL_ERROR);
|
||||
}
|
||||
|
||||
pMsgCtx->pBatchs = pBatchs;
|
||||
|
||||
ctgDebug("QID:0x%" PRIx64 " ctg task %d idx %d start to handle rsp %s, pBatchs: %p", pJob->queryId, pTask->taskId,
|
||||
ctgDebug("QID:0x%" PRIx64 ", ctg task:%d idx:%d start to handle rsp:%s, pBatchs:%p", pJob->queryId, pTask->taskId,
|
||||
pRsp->msgIdx, TMSG_INFO(taskMsg.msgType + 1), pBatchs);
|
||||
|
||||
(void)(*gCtgAsyncFps[pTask->type].handleRspFp)(
|
||||
|
@ -144,11 +144,11 @@ int32_t ctgProcessRspMsg(void* out, int32_t reqType, char* msg, int32_t msgSize,
|
|||
|
||||
code = queryProcessMsgRsp[TMSG_INDEX(reqType)](out, msg, msgSize);
|
||||
if (code) {
|
||||
qError("Process qnode list rsp failed, error:%s", tstrerror(rspCode));
|
||||
qError("process qnode list rsp failed, error:%s", tstrerror(rspCode));
|
||||
CTG_ERR_RET(code);
|
||||
}
|
||||
|
||||
qDebug("Got qnode list from mnode, listNum:%d", (int32_t)taosArrayGetSize(out));
|
||||
qDebug("get qnode list from mnode, listNum:%d", (int32_t)taosArrayGetSize(out));
|
||||
break;
|
||||
}
|
||||
case TDMT_MND_DNODE_LIST: {
|
||||
|
@ -159,194 +159,194 @@ int32_t ctgProcessRspMsg(void* out, int32_t reqType, char* msg, int32_t msgSize,
|
|||
|
||||
code = queryProcessMsgRsp[TMSG_INDEX(reqType)](out, msg, msgSize);
|
||||
if (code) {
|
||||
qError("Process dnode list rsp failed, error:%s", tstrerror(rspCode));
|
||||
qError("process dnode list rsp failed, error:%s", tstrerror(rspCode));
|
||||
CTG_ERR_RET(code);
|
||||
}
|
||||
|
||||
qDebug("Got dnode list from mnode, listNum:%d", (int32_t)taosArrayGetSize(*(SArray**)out));
|
||||
qDebug("get dnode list from mnode, listNum:%d", (int32_t)taosArrayGetSize(*(SArray**)out));
|
||||
break;
|
||||
}
|
||||
case TDMT_MND_USE_DB: {
|
||||
if (TSDB_CODE_SUCCESS != rspCode) {
|
||||
qError("error rsp for use db, error:%s, dbFName:%s", tstrerror(rspCode), target);
|
||||
qError("db:%s, error rsp for use db, error:%s", target, tstrerror(rspCode));
|
||||
CTG_ERR_RET(rspCode);
|
||||
}
|
||||
|
||||
code = queryProcessMsgRsp[TMSG_INDEX(reqType)](out, msg, msgSize);
|
||||
if (code) {
|
||||
qError("Process use db rsp failed, error:%s, dbFName:%s", tstrerror(code), target);
|
||||
qError("db:%s, process use db rsp failed, error:%s", target, tstrerror(code));
|
||||
CTG_ERR_RET(code);
|
||||
}
|
||||
|
||||
qDebug("Got db vgInfo from mnode, dbFName:%s", target);
|
||||
qDebug("db:%s, get db vgInfo from mnode", target);
|
||||
break;
|
||||
}
|
||||
case TDMT_MND_GET_DB_CFG: {
|
||||
if (TSDB_CODE_SUCCESS != rspCode) {
|
||||
qError("error rsp for get db cfg, error:%s, db:%s", tstrerror(rspCode), target);
|
||||
qError("db:%s, error rsp for get db cfg, error:%s", target, tstrerror(rspCode));
|
||||
CTG_ERR_RET(rspCode);
|
||||
}
|
||||
|
||||
code = queryProcessMsgRsp[TMSG_INDEX(reqType)](out, msg, msgSize);
|
||||
if (code) {
|
||||
qError("Process get db cfg rsp failed, error:%s, db:%s", tstrerror(code), target);
|
||||
qError("db:%s, process get db cfg rsp failed, error:%s", target, tstrerror(code));
|
||||
CTG_ERR_RET(code);
|
||||
}
|
||||
|
||||
qDebug("Got db cfg from mnode, dbFName:%s", target);
|
||||
qDebug("db:%s, get db cfg from mnode", target);
|
||||
break;
|
||||
}
|
||||
case TDMT_MND_GET_INDEX: {
|
||||
if (TSDB_CODE_SUCCESS != rspCode) {
|
||||
qError("error rsp for get index, error:%s, indexName:%s", tstrerror(rspCode), target);
|
||||
qError("index:%s, error rsp for get index, error:%s", target, tstrerror(rspCode));
|
||||
CTG_ERR_RET(rspCode);
|
||||
}
|
||||
|
||||
code = queryProcessMsgRsp[TMSG_INDEX(reqType)](out, msg, msgSize);
|
||||
if (code) {
|
||||
qError("Process get index rsp failed, error:%s, indexName:%s", tstrerror(code), target);
|
||||
qError("index:%s, process get index rsp failed, error:%s", target, tstrerror(code));
|
||||
CTG_ERR_RET(code);
|
||||
}
|
||||
|
||||
qDebug("Got index from mnode, indexName:%s", target);
|
||||
qDebug("index:%s, get index from mnode", target);
|
||||
break;
|
||||
}
|
||||
case TDMT_MND_GET_TABLE_INDEX: {
|
||||
if (TSDB_CODE_SUCCESS != rspCode) {
|
||||
qError("error rsp for get table index, error:%s, tbFName:%s", tstrerror(rspCode), target);
|
||||
qError("tb:%s, error rsp for get table index, error:%s", target, tstrerror(rspCode));
|
||||
CTG_ERR_RET(rspCode);
|
||||
}
|
||||
|
||||
code = queryProcessMsgRsp[TMSG_INDEX(reqType)](out, msg, msgSize);
|
||||
if (code) {
|
||||
qError("Process get table index rsp failed, error:%s, tbFName:%s", tstrerror(code), target);
|
||||
qError("tb:%s, process get table index rsp failed, error:%s", target, tstrerror(code));
|
||||
CTG_ERR_RET(code);
|
||||
}
|
||||
|
||||
qDebug("Got table index from mnode, tbFName:%s", target);
|
||||
qDebug("tb:%s, get table index from mnode", target);
|
||||
break;
|
||||
}
|
||||
case TDMT_MND_RETRIEVE_FUNC: {
|
||||
if (TSDB_CODE_SUCCESS != rspCode) {
|
||||
qError("error rsp for get udf, error:%s, funcName:%s", tstrerror(rspCode), target);
|
||||
qError("func:%s, error rsp for get udf, error:%s", target, tstrerror(rspCode));
|
||||
CTG_ERR_RET(rspCode);
|
||||
}
|
||||
|
||||
code = queryProcessMsgRsp[TMSG_INDEX(reqType)](out, msg, msgSize);
|
||||
if (code) {
|
||||
qError("Process get udf rsp failed, error:%s, funcName:%s", tstrerror(code), target);
|
||||
qError("func:%s, Process get udf rsp failed, error:%s", target, tstrerror(code));
|
||||
CTG_ERR_RET(code);
|
||||
}
|
||||
|
||||
qDebug("Got udf from mnode, funcName:%s", target);
|
||||
qDebug("func:%s, get udf from mnode", target);
|
||||
break;
|
||||
}
|
||||
case TDMT_MND_GET_USER_AUTH: {
|
||||
if (TSDB_CODE_SUCCESS != rspCode) {
|
||||
qError("error rsp for get user auth, error:%s, user:%s", tstrerror(rspCode), target);
|
||||
qError("user:%s, error rsp for get user auth, error:%s", target, tstrerror(rspCode));
|
||||
CTG_ERR_RET(rspCode);
|
||||
}
|
||||
|
||||
code = queryProcessMsgRsp[TMSG_INDEX(reqType)](out, msg, msgSize);
|
||||
if (code) {
|
||||
qError("Process get user auth rsp failed, error:%s, user:%s", tstrerror(code), target);
|
||||
qError("user:%s, process get user auth rsp failed, error:%s", target, tstrerror(code));
|
||||
CTG_ERR_RET(code);
|
||||
}
|
||||
|
||||
qDebug("Got user auth from mnode, user:%s", target);
|
||||
qDebug("user:%s, get user auth from mnode", target);
|
||||
break;
|
||||
}
|
||||
case TDMT_MND_TABLE_META: {
|
||||
if (TSDB_CODE_SUCCESS != rspCode) {
|
||||
if (CTG_TABLE_NOT_EXIST(rspCode)) {
|
||||
SET_META_TYPE_NULL(((STableMetaOutput*)out)->metaType);
|
||||
qDebug("stablemeta not exist in mnode, tbFName:%s", target);
|
||||
qDebug("tb:%s, stablemeta not exist in mnode", target);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
qError("error rsp for stablemeta from mnode, error:%s, tbFName:%s", tstrerror(rspCode), target);
|
||||
qError("tb:%s, error rsp for stablemeta from mnode, error:%s", target, tstrerror(rspCode));
|
||||
CTG_ERR_RET(rspCode);
|
||||
}
|
||||
|
||||
code = queryProcessMsgRsp[TMSG_INDEX(reqType)](out, msg, msgSize);
|
||||
if (code) {
|
||||
qError("Process mnode stablemeta rsp failed, error:%s, tbFName:%s", tstrerror(code), target);
|
||||
qError("tb:%s, process mnode stablemeta rsp failed, error:%s", target, tstrerror(code));
|
||||
CTG_ERR_RET(code);
|
||||
}
|
||||
|
||||
qDebug("Got table meta from mnode, tbFName:%s", target);
|
||||
qDebug("tb:%s, get table meta from mnode", target);
|
||||
break;
|
||||
}
|
||||
case TDMT_VND_TABLE_META: {
|
||||
if (TSDB_CODE_SUCCESS != rspCode) {
|
||||
if (CTG_TABLE_NOT_EXIST(rspCode)) {
|
||||
SET_META_TYPE_NULL(((STableMetaOutput*)out)->metaType);
|
||||
qDebug("tablemeta not exist in vnode, tbFName:%s", target);
|
||||
qDebug("tb:%s, tablemeta not exist in vnode", target);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
qError("error rsp for table meta from vnode, code:%s, tbFName:%s", tstrerror(rspCode), target);
|
||||
qError("tb:%s, error rsp for table meta from vnode, code:%s", target, tstrerror(rspCode));
|
||||
CTG_ERR_RET(rspCode);
|
||||
}
|
||||
|
||||
code = queryProcessMsgRsp[TMSG_INDEX(reqType)](out, msg, msgSize);
|
||||
if (code) {
|
||||
qError("Process vnode tablemeta rsp failed, code:%s, tbFName:%s", tstrerror(code), target);
|
||||
qError("tb:%s, process vnode tablemeta rsp failed, code:%s", target, tstrerror(code));
|
||||
CTG_ERR_RET(code);
|
||||
}
|
||||
|
||||
qDebug("Got table meta from vnode, tbFName:%s", target);
|
||||
qDebug("tb:%s, get table meta from vnode", target);
|
||||
break;
|
||||
}
|
||||
case TDMT_VND_TABLE_NAME: {
|
||||
if (TSDB_CODE_SUCCESS != rspCode) {
|
||||
if (CTG_TABLE_NOT_EXIST(rspCode)) {
|
||||
SET_META_TYPE_NULL(((STableMetaOutput*)out)->metaType);
|
||||
qDebug("tablemeta not exist in vnode, tbFName:%s", target);
|
||||
qDebug("tb:%s, tablemeta not exist in vnode", target);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
qError("error rsp for table meta from vnode, code:%s, tbFName:%s", tstrerror(rspCode), target);
|
||||
qError("tb:%s, error rsp for table meta from vnode, code:%s", target, tstrerror(rspCode));
|
||||
CTG_ERR_RET(rspCode);
|
||||
}
|
||||
|
||||
code = queryProcessMsgRsp[TMSG_INDEX(reqType)](out, msg, msgSize);
|
||||
if (code) {
|
||||
qError("Process vnode tablemeta rsp failed, code:%s, tbFName:%s", tstrerror(code), target);
|
||||
qError("tb:%s, process vnode tablemeta rsp failed, code:%s", target, tstrerror(code));
|
||||
CTG_ERR_RET(code);
|
||||
}
|
||||
|
||||
qDebug("Got table meta from vnode, tbFName:%s", target);
|
||||
qDebug("tb:%s, get table meta from vnode", target);
|
||||
break;
|
||||
}
|
||||
case TDMT_VND_TABLE_CFG: {
|
||||
if (TSDB_CODE_SUCCESS != rspCode) {
|
||||
qError("error rsp for table cfg from vnode, code:%s, tbFName:%s", tstrerror(rspCode), target);
|
||||
qError("tb:%s, error rsp for table cfg from vnode, code:%s,", target, tstrerror(rspCode));
|
||||
CTG_ERR_RET(rspCode);
|
||||
}
|
||||
|
||||
code = queryProcessMsgRsp[TMSG_INDEX(reqType)](out, msg, msgSize);
|
||||
if (code) {
|
||||
qError("Process vnode tb cfg rsp failed, code:%s, tbFName:%s", tstrerror(code), target);
|
||||
qError("tb:%s, process vnode tb cfg rsp failed, code:%s", target, tstrerror(code));
|
||||
CTG_ERR_RET(code);
|
||||
}
|
||||
|
||||
qDebug("Got table cfg from vnode, tbFName:%s", target);
|
||||
qDebug("tb:%s, get table cfg from vnode", target);
|
||||
break;
|
||||
}
|
||||
case TDMT_MND_TABLE_CFG: {
|
||||
if (TSDB_CODE_SUCCESS != rspCode) {
|
||||
qError("error rsp for stb cfg from mnode, error:%s, tbFName:%s", tstrerror(rspCode), target);
|
||||
qError("tb:%s, error rsp for stb cfg from mnode, error:%s", target, tstrerror(rspCode));
|
||||
CTG_ERR_RET(rspCode);
|
||||
}
|
||||
|
||||
code = queryProcessMsgRsp[TMSG_INDEX(reqType)](out, msg, msgSize);
|
||||
if (code) {
|
||||
qError("Process mnode stb cfg rsp failed, error:%s, tbFName:%s", tstrerror(code), target);
|
||||
qError("tb:%s, Process mnode stb cfg rsp failed, error:%s", target, tstrerror(code));
|
||||
CTG_ERR_RET(code);
|
||||
}
|
||||
|
||||
qDebug("Got stb cfg from mnode, tbFName:%s", target);
|
||||
qDebug("tb:%s, get stb cfg from mnode", target);
|
||||
break;
|
||||
}
|
||||
case TDMT_MND_SERVER_VERSION: {
|
||||
|
@ -357,11 +357,11 @@ int32_t ctgProcessRspMsg(void* out, int32_t reqType, char* msg, int32_t msgSize,
|
|||
|
||||
code = queryProcessMsgRsp[TMSG_INDEX(reqType)](out, msg, msgSize);
|
||||
if (code) {
|
||||
qError("Process svr ver rsp failed, error:%s", tstrerror(code));
|
||||
qError("process svr ver rsp failed, error:%s", tstrerror(code));
|
||||
CTG_ERR_RET(code);
|
||||
}
|
||||
|
||||
qDebug("Got svr ver from mnode");
|
||||
qDebug("get svr ver from mnode");
|
||||
break;
|
||||
}
|
||||
case TDMT_MND_VIEW_META: {
|
||||
|
@ -376,29 +376,29 @@ int32_t ctgProcessRspMsg(void* out, int32_t reqType, char* msg, int32_t msgSize,
|
|||
|
||||
code = queryProcessMsgRsp[TMSG_INDEX(reqType)](out, msg, msgSize);
|
||||
if (code) {
|
||||
qError("Process get view-meta rsp failed, error:%s, viewFName:%s", tstrerror(code), target);
|
||||
qError("view:%s, process get view-meta rsp failed, error:%s", target, tstrerror(code));
|
||||
CTG_ERR_RET(code);
|
||||
}
|
||||
|
||||
qDebug("Got view-meta from mnode, viewFName:%s", target);
|
||||
qDebug("view:%s, get view-meta from mnode", target);
|
||||
break;
|
||||
}
|
||||
case TDMT_MND_GET_TSMA:
|
||||
case TDMT_MND_GET_TABLE_TSMA: {
|
||||
if (TSDB_CODE_SUCCESS != rspCode) {
|
||||
if (TSDB_CODE_MND_SMA_NOT_EXIST != rspCode) {
|
||||
qError("error rsp for get table tsma, error:%s, tbFName:%s", tstrerror(rspCode), target);
|
||||
qError("tb:%s, error rsp for get table tsma, error:%s", target, tstrerror(rspCode));
|
||||
}
|
||||
CTG_ERR_RET(rspCode);
|
||||
}
|
||||
|
||||
code = queryProcessMsgRsp[TMSG_INDEX(reqType)](out, msg, msgSize);
|
||||
if (code) {
|
||||
qError("Process get table tsma rsp failed, error:%s, tbFName:%s", tstrerror(code), target);
|
||||
qError("tb:%s, Process get table tsma rsp failed, error:%s", target, tstrerror(code));
|
||||
CTG_ERR_RET(code);
|
||||
}
|
||||
|
||||
qDebug("Got table tsma from mnode, tbFName:%s", target);
|
||||
qDebug("tb:%s, get table tsma from mnode", target);
|
||||
break;
|
||||
}
|
||||
case TDMT_VND_GET_STREAM_PROGRESS: {
|
||||
|
@ -407,14 +407,14 @@ int32_t ctgProcessRspMsg(void* out, int32_t reqType, char* msg, int32_t msgSize,
|
|||
}
|
||||
code = queryProcessMsgRsp[TMSG_INDEX(reqType)](out, msg, msgSize);
|
||||
if (code) {
|
||||
qError("Process get stream progress rsp failed, err: %s, tbFName: %s", tstrerror(code), target);
|
||||
qError("tb:%s, process get stream progress rsp failed, error:%s", target, tstrerror(code));
|
||||
CTG_ERR_RET(code);
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
if (TSDB_CODE_SUCCESS != rspCode) {
|
||||
qError("Got error rsp, error:%s", tstrerror(rspCode));
|
||||
qError("get error rsp, error:%s", tstrerror(rspCode));
|
||||
CTG_ERR_RET(rspCode);
|
||||
}
|
||||
|
||||
|
@ -455,7 +455,7 @@ int32_t ctgHandleMsgCallback(void* param, SDataBuf* pMsg, int32_t rspCode) {
|
|||
CTG_ERR_JRET(TSDB_CODE_CTG_INTERNAL_ERROR);
|
||||
}
|
||||
|
||||
qDebug("QID:0x%" PRIx64 " ctg task %d start to handle rsp %s", pJob->queryId, pTask->taskId,
|
||||
qDebug("QID:0x%" PRIx64 ", ctg task:%d start to handle rsp:%s", pJob->queryId, pTask->taskId,
|
||||
TMSG_INFO(cbParam->reqType + 1));
|
||||
|
||||
#if CTG_BATCH_FETCH
|
||||
|
@ -468,7 +468,7 @@ int32_t ctgHandleMsgCallback(void* param, SDataBuf* pMsg, int32_t rspCode) {
|
|||
|
||||
SCtgMsgCtx* pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1);
|
||||
if (NULL == pMsgCtx) {
|
||||
ctgError("get task %d SCtgMsgCtx failed, taskType:%d", -1, pTask->type);
|
||||
ctgError("task:%d, get SCtgMsgCtx failed, taskType:%d", -1, pTask->type);
|
||||
CTG_ERR_JRET(TSDB_CODE_CTG_INTERNAL_ERROR);
|
||||
}
|
||||
|
||||
|
@ -562,7 +562,7 @@ int32_t ctgAsyncSendMsg(SCatalog* pCtg, SRequestConnInfo* pConn, SCtgJob* pJob,
|
|||
CTG_ERR_JRET(code);
|
||||
}
|
||||
|
||||
ctgDebug("ctg req msg sent,QID:0x%" PRIx64 ", msg type:%d, %s", pJob->queryId, msgType, TMSG_INFO(msgType));
|
||||
ctgDebug("ctg req msg sent, QID:0x%" PRIx64 ", msg type:%d, %s", pJob->queryId, msgType, TMSG_INFO(msgType));
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
||||
_return:
|
||||
|
@ -583,7 +583,7 @@ int32_t ctgAddBatch(SCatalog* pCtg, int32_t vgId, SRequestConnInfo* pConn, SCtgT
|
|||
SBatchMsg req = {0};
|
||||
SCtgMsgCtx* pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, tReq->msgIdx);
|
||||
if (NULL == pMsgCtx) {
|
||||
ctgError("get task %d SCtgMsgCtx failed, taskType:%d", tReq->msgIdx, pTask->type);
|
||||
ctgError("task:%d, get SCtgMsgCtx failed, taskType:%d", tReq->msgIdx, pTask->type);
|
||||
CTG_ERR_JRET(TSDB_CODE_CTG_INTERNAL_ERROR);
|
||||
}
|
||||
|
||||
|
@ -677,7 +677,7 @@ int32_t ctgAddBatch(SCatalog* pCtg, int32_t vgId, SRequestConnInfo* pConn, SCtgT
|
|||
CTG_ERR_JRET(terrno);
|
||||
}
|
||||
|
||||
ctgDebug("task %d %s req added to batch %d, target vgId %d", pTask->taskId, TMSG_INFO(msgType), newBatch.batchId,
|
||||
ctgDebug("task:%d, %s req added to batch:%d, target vgId:%d", pTask->taskId, TMSG_INFO(msgType), newBatch.batchId,
|
||||
vgId);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -751,7 +751,7 @@ int32_t ctgAddBatch(SCatalog* pCtg, int32_t vgId, SRequestConnInfo* pConn, SCtgT
|
|||
(void)tNameGetFullDbName(pName, pBatch->dbFName);
|
||||
}
|
||||
|
||||
ctgDebug("task %d %s req added to batch %d, target vgId %d", pTask->taskId, TMSG_INFO(msgType), pBatch->batchId,
|
||||
ctgDebug("task:%d, %s req added to batch:%d, target vgId:%d", pTask->taskId, TMSG_INFO(msgType), pBatch->batchId,
|
||||
vgId);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -795,7 +795,7 @@ int32_t ctgBuildBatchReqMsg(SCtgBatch* pBatch, int32_t vgId, void** msg, int32_t
|
|||
|
||||
*pSize = msgSize;
|
||||
|
||||
qDebug("batch req %d to vg %d msg built with %d meta reqs", pBatch->batchId, vgId, num);
|
||||
qDebug("batch:%d, batch req to vgId:%d msg built with %d meta reqs", pBatch->batchId, vgId, num);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -810,7 +810,7 @@ int32_t ctgLaunchBatchs(SCatalog* pCtg, SCtgJob* pJob, SHashObj* pBatchs) {
|
|||
SCtgBatch* pBatch = (SCtgBatch*)p;
|
||||
int32_t msgSize = 0;
|
||||
|
||||
ctgDebug("QID:0x%" PRIx64 " ctg start to launch batch %d", pJob->queryId, pBatch->batchId);
|
||||
ctgDebug("QID:0x%" PRIx64 ", ctg start to launch batch:%d", pJob->queryId, pBatch->batchId);
|
||||
|
||||
CTG_ERR_JRET(ctgBuildBatchReqMsg(pBatch, *vgId, &msg, &msgSize));
|
||||
code = ctgAsyncSendMsg(pCtg, &pBatch->conn, pJob, pBatch->pTaskIds, pBatch->batchId, pBatch->pMsgIdxs,
|
||||
|
|
|
@ -50,21 +50,21 @@ int32_t ctgMetaRentAdd(SCtgRentMgmt *mgmt, void *meta, int64_t id, int32_t size)
|
|||
if (NULL == slot->meta) {
|
||||
slot->meta = taosArrayInit(CTG_DEFAULT_RENT_SLOT_SIZE, size);
|
||||
if (NULL == slot->meta) {
|
||||
qError("taosArrayInit %d failed, id:0x%" PRIx64 ", slot idx:%d, type:%d", CTG_DEFAULT_RENT_SLOT_SIZE, id, widx,
|
||||
qError("id:0x%" PRIx64 ", taosArrayInit %d failed, slot idx:%d, type:%d", id, CTG_DEFAULT_RENT_SLOT_SIZE, widx,
|
||||
mgmt->type);
|
||||
CTG_ERR_JRET(terrno);
|
||||
}
|
||||
}
|
||||
|
||||
if (NULL == taosArrayPush(slot->meta, meta)) {
|
||||
qError("taosArrayPush meta to rent failed, id:0x%" PRIx64 ", slot idx:%d, type:%d", id, widx, mgmt->type);
|
||||
qError("id:0x%" PRIx64 ", taosArrayPush meta to rent failed, slot idx:%d, type:%d", id, widx, mgmt->type);
|
||||
CTG_ERR_JRET(terrno);
|
||||
}
|
||||
|
||||
mgmt->rentCacheSize += size;
|
||||
slot->needSort = true;
|
||||
|
||||
qDebug("add meta to rent, id:0x%" PRIx64 ", slot idx:%d, type:%d", id, widx, mgmt->type);
|
||||
qDebug("id:0x%" PRIx64 ", add meta to rent, slot idx:%d, type:%d", id, widx, mgmt->type);
|
||||
|
||||
_return:
|
||||
|
||||
|
@ -81,36 +81,37 @@ int32_t ctgMetaRentUpdate(SCtgRentMgmt *mgmt, void *meta, int64_t id, int32_t si
|
|||
|
||||
CTG_LOCK(CTG_WRITE, &slot->lock);
|
||||
if (NULL == slot->meta) {
|
||||
qDebug("empty meta slot, id:0x%" PRIx64 ", slot idx:%d, type:%d", id, widx, mgmt->type);
|
||||
qDebug("id:0x%" PRIx64 ", empty meta slot, slot idx:%d, type:%d", id, widx, mgmt->type);
|
||||
CTG_ERR_JRET(TSDB_CODE_CTG_INTERNAL_ERROR);
|
||||
}
|
||||
|
||||
if (slot->needSort) {
|
||||
qDebug("meta slot before sorte, slot idx:%d, type:%d, size:%d", widx, mgmt->type,
|
||||
qDebug("id:0x%" PRIx64 ", meta slot before sort, slot idx:%d, type:%d, size:%d", id, widx, mgmt->type,
|
||||
(int32_t)taosArrayGetSize(slot->meta));
|
||||
taosArraySort(slot->meta, sortCompare);
|
||||
slot->needSort = false;
|
||||
qDebug("meta slot sorted, slot idx:%d, type:%d, size:%d", widx, mgmt->type, (int32_t)taosArrayGetSize(slot->meta));
|
||||
qDebug("id:0x%" PRIx64 ", meta slot sorted, slot idx:%d, type:%d, size:%d", id, widx, mgmt->type,
|
||||
(int32_t)taosArrayGetSize(slot->meta));
|
||||
}
|
||||
|
||||
void *orig = taosArraySearch(slot->meta, &id, searchCompare, TD_EQ);
|
||||
if (NULL == orig) {
|
||||
qDebug("meta not found in slot, id:0x%" PRIx64 ", slot idx:%d, type:%d, size:%d", id, widx, mgmt->type,
|
||||
qDebug("id:0x%" PRIx64 ", meta not found in slot, slot idx:%d, type:%d, size:%d", id, widx, mgmt->type,
|
||||
(int32_t)taosArrayGetSize(slot->meta));
|
||||
CTG_ERR_JRET(TSDB_CODE_CTG_INTERNAL_ERROR);
|
||||
}
|
||||
|
||||
TAOS_MEMCPY(orig, meta, size);
|
||||
|
||||
qDebug("meta in rent updated, id:0x%" PRIx64 ", slot idx:%d, type:%d", id, widx, mgmt->type);
|
||||
qDebug("id:0x%" PRIx64 ", meta in rent updated, slot idx:%d, type:%d", id, widx, mgmt->type);
|
||||
|
||||
_return:
|
||||
|
||||
CTG_UNLOCK(CTG_WRITE, &slot->lock);
|
||||
|
||||
if (code) {
|
||||
qDebug("meta in rent update failed, will try to add it, code:%x, id:0x%" PRIx64 ", slot idx:%d, type:%d", code, id,
|
||||
widx, mgmt->type);
|
||||
qDebug("id:0x%" PRIx64 ", meta in rent update failed, will try to add it, code:0x%x, slot idx:%d, type:%d", id,
|
||||
code, widx, mgmt->type);
|
||||
CTG_RET(ctgMetaRentAdd(mgmt, meta, id, size));
|
||||
}
|
||||
|
||||
|
@ -125,26 +126,26 @@ int32_t ctgMetaRentRemove(SCtgRentMgmt *mgmt, int64_t id, __compar_fn_t sortComp
|
|||
|
||||
CTG_LOCK(CTG_WRITE, &slot->lock);
|
||||
if (NULL == slot->meta) {
|
||||
qError("empty meta slot, id:0x%" PRIx64 ", slot idx:%d, type:%d", id, widx, mgmt->type);
|
||||
qError("id:0x%" PRIx64 ", empty meta slot, slot idx:%d, type:%d", id, widx, mgmt->type);
|
||||
CTG_ERR_JRET(TSDB_CODE_CTG_INTERNAL_ERROR);
|
||||
}
|
||||
|
||||
if (slot->needSort) {
|
||||
taosArraySort(slot->meta, sortCompare);
|
||||
slot->needSort = false;
|
||||
qDebug("meta slot sorted, slot idx:%d, type:%d", widx, mgmt->type);
|
||||
qDebug("id:0x%" PRIx64 ", meta slot sorted, slot idx:%d, type:%d", id, widx, mgmt->type);
|
||||
}
|
||||
|
||||
int32_t idx = taosArraySearchIdx(slot->meta, &id, searchCompare, TD_EQ);
|
||||
if (idx < 0) {
|
||||
qError("meta not found in slot, id:0x%" PRIx64 ", slot idx:%d, type:%d", id, widx, mgmt->type);
|
||||
qError("id:0x%" PRIx64 ", meta not found in slot, slot idx:%d, type:%d", id, widx, mgmt->type);
|
||||
CTG_ERR_JRET(TSDB_CODE_CTG_INTERNAL_ERROR);
|
||||
}
|
||||
|
||||
taosArrayRemove(slot->meta, idx);
|
||||
mgmt->rentCacheSize -= mgmt->metaSize;
|
||||
|
||||
qDebug("meta in rent removed, id:0x%" PRIx64 ", slot idx:%d, type:%d", id, widx, mgmt->type);
|
||||
qDebug("id:0x%" PRIx64 ", meta in rent removed, slot idx:%d, type:%d", id, widx, mgmt->type);
|
||||
|
||||
_return:
|
||||
|
||||
|
@ -194,7 +195,7 @@ int32_t ctgMetaRentGetImpl(SCtgRentMgmt *mgmt, void **res, uint32_t *num, int32_
|
|||
|
||||
*num = (uint32_t)metaNum;
|
||||
|
||||
qDebug("Got %d meta from rent, type:%d", (int32_t)metaNum, mgmt->type);
|
||||
qDebug("get %d meta from rent, type:%d", (int32_t)metaNum, mgmt->type);
|
||||
|
||||
_return:
|
||||
|
||||
|
@ -239,7 +240,7 @@ void ctgRemoveStbRent(SCatalog *pCtg, SCtgDBCache *dbCache) {
|
|||
|
||||
code = ctgMetaRentRemove(&pCtg->stbRent, *suid, ctgStbVersionSortCompare, ctgStbVersionSearchCompare);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
ctgDebug("stb removed from rent, suid:0x%" PRIx64, *suid);
|
||||
ctgDebug("suid:0x%" PRIx64 ", stb removed from rent", *suid);
|
||||
}
|
||||
|
||||
pIter = taosHashIterate(dbCache->stbCache, pIter);
|
||||
|
@ -257,7 +258,7 @@ void ctgRemoveViewRent(SCatalog *pCtg, SCtgDBCache *dbCache) {
|
|||
|
||||
if (TSDB_CODE_SUCCESS ==
|
||||
ctgMetaRentRemove(&pCtg->viewRent, viewId, ctgViewVersionSortCompare, ctgViewVersionSearchCompare)) {
|
||||
ctgDebug("view removed from rent, viewId:0x%" PRIx64, viewId);
|
||||
ctgDebug("viewId:0x%" PRIx64 ", view removed from rent", viewId);
|
||||
}
|
||||
|
||||
pIter = taosHashIterate(dbCache->viewCache, pIter);
|
||||
|
@ -276,7 +277,7 @@ void ctgRemoveTSMARent(SCatalog *pCtg, SCtgDBCache *dbCache) {
|
|||
for (int32_t i = 0; i < size; ++i) {
|
||||
STSMACache* pCache = taosArrayGetP(pCtgCache->pTsmas, i);
|
||||
if (TSDB_CODE_SUCCESS == ctgMetaRentRemove(&pCtg->tsmaRent, pCache->tsmaId, ctgTSMAVersionSortCompare, ctgTSMAVersionSearchCompare)) {
|
||||
ctgDebug("tsma removed from rent, viewId: %" PRIx64 " name: %s.%s.%s", pCache->tsmaId, pCache->dbFName, pCache->tb, pCache->name);
|
||||
ctgDebug("tsma:0x%" PRIx64 ", tsma removed from rent, name:%s.%s.%s", pCache->tsmaId, pCache->dbFName, pCache->tb, pCache->name);
|
||||
}
|
||||
}
|
||||
CTG_UNLOCK(CTG_READ, &pCtgCache->tsmaLock);
|
||||
|
@ -303,8 +304,8 @@ int32_t ctgUpdateRentStbVersion(SCatalog *pCtg, char *dbFName, char *tbName, uin
|
|||
CTG_ERR_RET(ctgMetaRentUpdate(&pCtg->stbRent, &metaRent, metaRent.suid, sizeof(SSTableVersion),
|
||||
ctgStbVersionSortCompare, ctgStbVersionSearchCompare));
|
||||
|
||||
ctgDebug("db %s,0x%" PRIx64 " stb %s,0x%" PRIx64 " sver %d tver %d smaVer %d updated to stbRent", dbFName, dbId,
|
||||
tbName, suid, metaRent.sversion, metaRent.tversion, metaRent.smaVer);
|
||||
ctgDebug("suid:0x%" PRIx64 ", db %s, dbId:0x%" PRIx64 ", stb:%s, sver:%d tver:%d smaVer:%d updated to stbRent", suid,
|
||||
dbFName, dbId, tbName, metaRent.sversion, metaRent.tversion, metaRent.smaVer);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -321,7 +322,7 @@ int32_t ctgUpdateRentViewVersion(SCatalog *pCtg, char *dbFName, char *viewName,
|
|||
CTG_ERR_RET(ctgMetaRentUpdate(&pCtg->viewRent, &metaRent, metaRent.viewId, sizeof(SViewVersion),
|
||||
ctgViewVersionSortCompare, ctgViewVersionSearchCompare));
|
||||
|
||||
ctgDebug("db %s,0x%" PRIx64 " view %s,0x%" PRIx64 " version %d updated to viewRent", dbFName, dbId, viewName, viewId, metaRent.version);
|
||||
ctgDebug("viewId:0x%" PRIx64 ", db %s, dbId:0x%" PRIx64 ", view %s, version:%d updated to viewRent", viewId, dbFName, dbId, viewName, metaRent.version);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -336,8 +337,8 @@ int32_t ctgUpdateRentTSMAVersion(SCatalog *pCtg, char *dbFName, const STSMACache
|
|||
CTG_ERR_RET(ctgMetaRentUpdate(&pCtg->tsmaRent, &tsmaRent, tsmaRent.tsmaId, sizeof(STSMAVersion),
|
||||
ctgTSMAVersionSortCompare, ctgTSMAVersionSearchCompare));
|
||||
|
||||
ctgDebug("db %s, 0x%" PRIx64 " tsma %s, 0x%" PRIx64 "version %d updated to tsmaRent", dbFName, tsmaRent.dbId,
|
||||
pTsmaInfo->name, pTsmaInfo->tsmaId, pTsmaInfo->version);
|
||||
ctgDebug("tsma:0x%" PRIx64 ", db:%s, dbId:0x%" PRIx64 ", view:%s, version:%d updated to tsmaRent", pTsmaInfo->tsmaId,
|
||||
dbFName, tsmaRent.dbId, pTsmaInfo->name, pTsmaInfo->version);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
|
|
@ -430,7 +430,7 @@ void ctgFreeHandle(SCatalog* pCtg) {
|
|||
|
||||
taosMemoryFree(pCtg);
|
||||
|
||||
ctgInfo("handle freed, clusterId:0x%" PRIx64, clusterId);
|
||||
ctgInfo("clusterId:0x%" PRIx64 ", handle freed", clusterId);
|
||||
}
|
||||
|
||||
void ctgClearHandleMeta(SCatalog* pCtg, int64_t* pClearedSize, int64_t* pCleardNum, bool* roundDone) {
|
||||
|
@ -541,7 +541,7 @@ void ctgClearHandle(SCatalog* pCtg) {
|
|||
|
||||
CTG_STAT_RT_INC(numOfOpClearCache, 1);
|
||||
|
||||
ctgInfo("handle cleared, clusterId:0x%" PRIx64, clusterId);
|
||||
ctgInfo("clusterId:0x%" PRIx64 ", handle cleared", clusterId);
|
||||
}
|
||||
|
||||
void ctgFreeSUseDbOutput(SUseDbOutput* pOutput) {
|
||||
|
@ -1097,7 +1097,7 @@ void ctgFreeJob(void* job) {
|
|||
|
||||
taosMemoryFree(job);
|
||||
|
||||
qDebug("QID:0x%" PRIx64 ", ctg job 0x%" PRIx64 " freed", qid, rid);
|
||||
qDebug("QID:0x%" PRIx64 ", ctg jobId:0x%" PRIx64 " freed", qid, rid);
|
||||
}
|
||||
|
||||
int32_t ctgUpdateMsgCtx(SCtgMsgCtx* pCtx, int32_t reqType, void* out, char* target) {
|
||||
|
@ -1196,7 +1196,7 @@ int32_t ctgGenerateVgList(SCatalog* pCtg, SHashObj* vgHash, SArray** pList, cons
|
|||
|
||||
*pList = vgList;
|
||||
|
||||
ctgDebug("Got vgList from cache, vgNum:%d", vgNum);
|
||||
ctgDebug("get vgList from cache, vgNum:%d", vgNum);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
||||
|
@ -1286,14 +1286,14 @@ int32_t ctgGetVgInfoFromHashValue(SCatalog* pCtg, SEpSet* pMgmtEps, SDBVgInfo* d
|
|||
*/
|
||||
|
||||
if (NULL == vgInfo) {
|
||||
ctgError("no hash range found for hash value [%u], db:%s, numOfVgId:%d", hashValue, db,
|
||||
ctgError("tb:%s, no hash range found for hash value [%u], db:%s, numOfVgId:%d", tbFullName, hashValue, db,
|
||||
(int32_t)taosArrayGetSize(dbInfo->vgArray));
|
||||
CTG_ERR_RET(TSDB_CODE_CTG_INTERNAL_ERROR);
|
||||
}
|
||||
|
||||
*pVgroup = *vgInfo;
|
||||
|
||||
ctgDebug("Got tb %s hash vgroup, vgId:%d, epNum %d, current %s port %d", tbFullName, vgInfo->vgId,
|
||||
ctgDebug("tb:%s, get hash vgroup, vgId:%d, epNum %d, current:%s port:%d", tbFullName, vgInfo->vgId,
|
||||
vgInfo->epSet.numOfEps, vgInfo->epSet.eps[vgInfo->epSet.inUse].fqdn,
|
||||
vgInfo->epSet.eps[vgInfo->epSet.inUse].port);
|
||||
|
||||
|
@ -1327,7 +1327,7 @@ int32_t ctgGetVgInfosFromHashValue(SCatalog* pCtg, SEpSet* pMgmgEpSet, SCtgTaskR
|
|||
|
||||
TAOS_MEMCPY(vgInfo, &mgmtInfo, sizeof(mgmtInfo));
|
||||
|
||||
ctgDebug("Got tb hash vgroup, vgId:%d, epNum %d, current %s port %d", vgInfo->vgId, vgInfo->epSet.numOfEps,
|
||||
ctgDebug("get tb hash vgroup, vgId:%d, epNum %d, current %s port %d", vgInfo->vgId, vgInfo->epSet.numOfEps,
|
||||
vgInfo->epSet.eps[vgInfo->epSet.inUse].fqdn, vgInfo->epSet.eps[vgInfo->epSet.inUse].port);
|
||||
|
||||
if (update) {
|
||||
|
@ -1376,7 +1376,7 @@ int32_t ctgGetVgInfosFromHashValue(SCatalog* pCtg, SEpSet* pMgmgEpSet, SCtgTaskR
|
|||
|
||||
TAOS_MEMCPY(vgInfo, pSrcVg, sizeof(*pSrcVg));
|
||||
|
||||
ctgDebug("Got tb hash vgroup, vgId:%d, epNum %d, current %s port %d", vgInfo->vgId, vgInfo->epSet.numOfEps,
|
||||
ctgDebug("get tb hash vgroup, vgId:%d, epNum %d, current %s port %d", vgInfo->vgId, vgInfo->epSet.numOfEps,
|
||||
vgInfo->epSet.eps[vgInfo->epSet.inUse].fqdn, vgInfo->epSet.eps[vgInfo->epSet.inUse].port);
|
||||
|
||||
if (update) {
|
||||
|
@ -1437,7 +1437,7 @@ int32_t ctgGetVgInfosFromHashValue(SCatalog* pCtg, SEpSet* pMgmgEpSet, SCtgTaskR
|
|||
|
||||
*pNewVg = *vgInfo;
|
||||
|
||||
ctgDebug("Got tb %s hash vgroup, vgId:%d, epNum %d, current %s port %d", tbFullName, vgInfo->vgId,
|
||||
ctgDebug("tb:%s, get hash vgroup, vgId:%d, epNum %d, current %s port %d", tbFullName, vgInfo->vgId,
|
||||
vgInfo->epSet.numOfEps, vgInfo->epSet.eps[vgInfo->epSet.inUse].fqdn,
|
||||
vgInfo->epSet.eps[vgInfo->epSet.inUse].port);
|
||||
|
||||
|
@ -1496,7 +1496,7 @@ int32_t ctgGetVgIdsFromHashValue(SCatalog* pCtg, SDBVgInfo* dbInfo, char* dbFNam
|
|||
|
||||
vgId[i] = vgInfo->vgId;
|
||||
|
||||
ctgDebug("Got tb %s vgId:%d", tbFullName, vgInfo->vgId);
|
||||
ctgDebug("tb:%s, get vgId:%d", tbFullName, vgInfo->vgId);
|
||||
}
|
||||
|
||||
CTG_RET(code);
|
||||
|
@ -1689,7 +1689,7 @@ int32_t ctgCloneMetaOutput(STableMetaOutput* output, STableMetaOutput** pOutput)
|
|||
}
|
||||
|
||||
(*pOutput)->tbMeta = taosMemoryMalloc(metaSize + schemaExtSize);
|
||||
qDebug("tbMeta cloned, size:%d, p:%p", metaSize, (*pOutput)->tbMeta);
|
||||
qTrace("tbmeta cloned, size:%d, p:%p", metaSize, (*pOutput)->tbMeta);
|
||||
if (NULL == (*pOutput)->tbMeta) {
|
||||
qError("malloc %d failed", (int32_t)sizeof(STableMetaOutput));
|
||||
taosMemoryFreeClear(*pOutput);
|
||||
|
@ -2076,7 +2076,7 @@ int32_t ctgChkSetTbAuthRes(SCatalog* pCtg, SCtgAuthReq* req, SCtgAuthRsp* res) {
|
|||
if (NULL == pMeta) {
|
||||
if (req->onlyCache) {
|
||||
res->metaNotExists = true;
|
||||
ctgDebug("db %s tb %s meta not in cache for auth", req->pRawReq->tbName.dbname, req->pRawReq->tbName.tname);
|
||||
ctgDebug("db:%s, tb:%s meta not in cache for auth", req->pRawReq->tbName.dbname, req->pRawReq->tbName.tname);
|
||||
goto _return;
|
||||
}
|
||||
|
||||
|
@ -2097,7 +2097,7 @@ int32_t ctgChkSetTbAuthRes(SCatalog* pCtg, SCtgAuthReq* req, SCtgAuthRsp* res) {
|
|||
if (NULL == stbName) {
|
||||
if (req->onlyCache) {
|
||||
res->metaNotExists = true;
|
||||
ctgDebug("suid %" PRIu64 " name not in cache for auth", pMeta->suid);
|
||||
ctgDebug("suid:%" PRIu64 ", name not in cache for auth", pMeta->suid);
|
||||
goto _return;
|
||||
}
|
||||
|
||||
|
@ -2108,7 +2108,7 @@ int32_t ctgChkSetTbAuthRes(SCatalog* pCtg, SCtgAuthReq* req, SCtgAuthRsp* res) {
|
|||
continue;
|
||||
}
|
||||
|
||||
ctgError("Invalid table type %d for %s", pMeta->tableType, tbFName);
|
||||
ctgError("invalid table type %d for %s", pMeta->tableType, tbFName);
|
||||
CTG_ERR_JRET(TSDB_CODE_INVALID_PARA);
|
||||
}
|
||||
|
||||
|
@ -2740,8 +2740,8 @@ bool isCtgTSMACacheOutOfDate(STSMACache* pTsmaCache) {
|
|||
bool ret = !pTsmaCache->fillHistoryFinished ||
|
||||
(tsMaxTsmaCalcDelay * 1000 - pTsmaCache->delayDuration) < (now - pTsmaCache->reqTs);
|
||||
if (ret) {
|
||||
qDebug("tsma %s.%s in cache has been out of date, history finished: %d, remain valid after: %" PRId64
|
||||
" passed: %" PRId64,
|
||||
qDebug("tsma:%s.%s in cache has been out of date, history finished:%d, remain valid after:%" PRId64
|
||||
" passed:%" PRId64,
|
||||
pTsmaCache->dbFName, pTsmaCache->name, pTsmaCache->fillHistoryFinished,
|
||||
tsMaxTsmaCalcDelay * 1000 - pTsmaCache->delayDuration, now - pTsmaCache->reqTs);
|
||||
}
|
||||
|
|
|
@ -360,7 +360,7 @@ int32_t doAggregateImpl(SOperatorInfo* pOperator, SqlFunctionCtx* pCtx) {
|
|||
if (pCtx[k].fpSet.cleanup != NULL) {
|
||||
pCtx[k].fpSet.cleanup(&pCtx[k]);
|
||||
}
|
||||
qError("%s aggregate function error happens, code: %s", GET_TASKID(pOperator->pTaskInfo), tstrerror(code));
|
||||
qError("%s aggregate function error happens, code:%s", GET_TASKID(pOperator->pTaskInfo), tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
}
|
||||
|
@ -803,7 +803,7 @@ int32_t applyAggFunctionOnPartialTuples(SExecTaskInfo* taskInfo, SqlFunctionCtx*
|
|||
if (pCtx[k].fpSet.cleanup != NULL) {
|
||||
pCtx[k].fpSet.cleanup(&pCtx[k]);
|
||||
}
|
||||
qError("%s apply functions error, code: %s", GET_TASKID(taskInfo), tstrerror(code));
|
||||
qError("%s apply functions error, code:%s", GET_TASKID(taskInfo), tstrerror(code));
|
||||
taskInfo->code = code;
|
||||
return code;
|
||||
}
|
||||
|
|
|
@ -2911,7 +2911,7 @@ int32_t createScanTableListInfo(SScanPhysiNode* pScanNode, SNodeList* pGroupTags
|
|||
int32_t code = getTableList(pHandle->vnode, pScanNode, pTagCond, pTagIndexCond, pTableListInfo, digest, idStr,
|
||||
&pTaskInfo->storageAPI);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
qError("failed to getTableList, code: %s", tstrerror(code));
|
||||
qError("failed to getTableList, code:%s", tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
|
||||
|
|
|
@ -645,7 +645,7 @@ int32_t qCreateExecTask(SReadHandle* readHandle, int32_t vgId, uint64_t taskId,
|
|||
|
||||
int32_t code = createExecTaskInfo(pSubplan, pTask, readHandle, taskId, vgId, sql, model);
|
||||
if (code != TSDB_CODE_SUCCESS || NULL == *pTask) {
|
||||
qError("failed to createExecTaskInfo, code: %s", tstrerror(code));
|
||||
qError("failed to createExecTaskInfo, code:%s", tstrerror(code));
|
||||
goto _error;
|
||||
}
|
||||
|
||||
|
|
|
@ -292,7 +292,7 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SSDataBlock* pBlock) {
|
|||
SqlFunctionCtx* pCtx = pOperator->exprSupp.pCtx;
|
||||
int32_t numOfGroupCols = taosArrayGetSize(pInfo->pGroupCols);
|
||||
// if (type == TSDB_DATA_TYPE_FLOAT || type == TSDB_DATA_TYPE_DOUBLE) {
|
||||
// qError("QInfo:0x%"PRIx64" group by not supported on double/float columns, abort", GET_TASKID(pRuntimeEnv));
|
||||
// qError("QInfo:0x%" PRIx64 ", group by not supported on double/float columns, abort", GET_TASKID(pRuntimeEnv));
|
||||
// return;
|
||||
// }
|
||||
|
||||
|
|
|
@ -349,7 +349,7 @@ int32_t createOperator(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SReadHand
|
|||
if (code) {
|
||||
pTaskInfo->code = code;
|
||||
tableListDestroy(pTableListInfo);
|
||||
qError("failed to createScanTableListInfo, code: %s", tstrerror(code));
|
||||
qError("failed to createScanTableListInfo, code:%s", tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -387,7 +387,7 @@ int32_t createOperator(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SReadHand
|
|||
if (code) {
|
||||
pTaskInfo->code = code;
|
||||
tableListDestroy(pTableListInfo);
|
||||
qError("failed to createScanTableListInfo, code: %s", tstrerror(code));
|
||||
qError("failed to createScanTableListInfo, code:%s", tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
}
|
||||
|
@ -417,7 +417,7 @@ int32_t createOperator(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SReadHand
|
|||
pTagIndexCond, pTaskInfo);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
pTaskInfo->code = code;
|
||||
qError("failed to getTableList, code: %s", tstrerror(code));
|
||||
qError("failed to getTableList, code:%s", tstrerror(code));
|
||||
tableListDestroy(pTableListInfo);
|
||||
return code;
|
||||
}
|
||||
|
|
|
@ -5193,7 +5193,7 @@ static int32_t doTagScanFromMetaEntryNext(SOperatorInfo* pOperator, SSDataBlock*
|
|||
setOperatorCompleted(pOperator);
|
||||
}
|
||||
|
||||
// qDebug("QInfo:0x%"PRIx64" create tag values results completed, rows:%d", GET_TASKID(pRuntimeEnv), count);
|
||||
// qDebug("QInfo:0x%" PRIx64 ", create tag values results completed, rows:%d", GET_TASKID(pRuntimeEnv), count);
|
||||
if (pOperator->status == OP_EXEC_DONE) {
|
||||
setTaskStatus(pTaskInfo, TASK_COMPLETED);
|
||||
}
|
||||
|
|
|
@ -25,9 +25,9 @@ extern "C" {
|
|||
#define fnFatal(...) { if (udfDebugFlag & DEBUG_FATAL) { taosPrintLog("UDF FATAL ", DEBUG_FATAL, 255, __VA_ARGS__); }}
|
||||
#define fnError(...) { if (udfDebugFlag & DEBUG_ERROR) { taosPrintLog("UDF ERROR ", DEBUG_ERROR, 255, __VA_ARGS__); }}
|
||||
#define fnWarn(...) { if (udfDebugFlag & DEBUG_WARN) { taosPrintLog("UDF WARN ", DEBUG_WARN, 255, __VA_ARGS__); }}
|
||||
#define fnInfo(...) { if (udfDebugFlag & DEBUG_INFO) { taosPrintLog("UDF ", DEBUG_INFO, 255, __VA_ARGS__); }}
|
||||
#define fnDebug(...) { if (udfDebugFlag & DEBUG_DEBUG) { taosPrintLog("UDF ", DEBUG_DEBUG, udfDebugFlag, __VA_ARGS__); }}
|
||||
#define fnTrace(...) { if (udfDebugFlag & DEBUG_TRACE) { taosPrintLog("UDF ", DEBUG_TRACE, udfDebugFlag, __VA_ARGS__); }}
|
||||
#define fnInfo(...) { if (udfDebugFlag & DEBUG_INFO) { taosPrintLog("UDF INFO ", DEBUG_INFO, 255, __VA_ARGS__); }}
|
||||
#define fnDebug(...) { if (udfDebugFlag & DEBUG_DEBUG) { taosPrintLog("UDF DEBUG ", DEBUG_DEBUG, udfDebugFlag, __VA_ARGS__); }}
|
||||
#define fnTrace(...) { if (udfDebugFlag & DEBUG_TRACE) { taosPrintLog("UDF TRACE ", DEBUG_TRACE, udfDebugFlag, __VA_ARGS__); }}
|
||||
// clang-format on
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
|
@ -1733,7 +1733,7 @@ void onUdfcPipeWrite(uv_write_t *write, int32_t status) {
|
|||
void onUdfcPipeConnect(uv_connect_t *connect, int32_t status) {
|
||||
SClientUvTaskNode *uvTask = connect->data;
|
||||
if (status != 0) {
|
||||
fnError("client connect error, task seq: %" PRId64 ", code: %s", uvTask->seqNum, uv_strerror(status));
|
||||
fnError("client connect error, task seq: %" PRId64 ", code:%s", uvTask->seqNum, uv_strerror(status));
|
||||
}
|
||||
uvTask->errCode = status;
|
||||
|
||||
|
@ -1812,7 +1812,7 @@ int32_t udfcQueueUvTask(SClientUvTaskNode *uvTask) {
|
|||
uv_mutex_unlock(&udfc->taskQueueMutex);
|
||||
int32_t code = uv_async_send(&udfc->loopTaskAync);
|
||||
if (code != 0) {
|
||||
fnError("udfc queue uv task to event loop failed. code: %s", uv_strerror(code));
|
||||
fnError("udfc queue uv task to event loop failed. code:%s", uv_strerror(code));
|
||||
return TSDB_CODE_UDF_UV_EXEC_FAILURE;
|
||||
}
|
||||
|
||||
|
@ -1884,7 +1884,7 @@ int32_t udfcStartUvTask(SClientUvTaskNode *uvTask) {
|
|||
int32_t err = uv_write(write, (uv_stream_t *)pipe, &uvTask->reqBuf, 1, onUdfcPipeWrite);
|
||||
if (err != 0) {
|
||||
taosMemoryFree(write);
|
||||
fnError("udfc event loop start req_rsp task uv_write failed. uvtask: %p, code: %s", uvTask, uv_strerror(err));
|
||||
fnError("udfc event loop start req_rsp task uv_write failed. uvtask: %p, code:%s", uvTask, uv_strerror(err));
|
||||
}
|
||||
code = err;
|
||||
}
|
||||
|
|
|
@ -1106,7 +1106,7 @@ void udfdProcessRpcRsp(void *parent, SRpcMsg *pMsg, SEpSet *pEpSet) {
|
|||
}
|
||||
|
||||
if (pMsg->code != TSDB_CODE_SUCCESS) {
|
||||
fnError("udfd rpc error. code: %s", tstrerror(pMsg->code));
|
||||
fnError("udfd rpc error, code:%s", tstrerror(pMsg->code));
|
||||
msgInfo->code = pMsg->code;
|
||||
goto _return;
|
||||
}
|
||||
|
@ -1312,7 +1312,7 @@ void udfdOnWrite(uv_write_t *req, int status) {
|
|||
TAOS_UDF_CHECK_PTR_RVOID(req);
|
||||
SUvUdfWork *work = (SUvUdfWork *)req->data;
|
||||
if (status < 0) {
|
||||
fnError("udfd send response error, length: %zu code: %s", work->output.len, uv_err_name(status));
|
||||
fnError("udfd send response error, length:%zu code:%s", work->output.len, uv_err_name(status));
|
||||
}
|
||||
// remove work from the connection work list
|
||||
if (work->conn != NULL) {
|
||||
|
@ -1477,7 +1477,7 @@ void udfdPipeRead(uv_stream_t *client, ssize_t nread, const uv_buf_t *buf) {
|
|||
void udfdOnNewConnection(uv_stream_t *server, int status) {
|
||||
TAOS_UDF_CHECK_PTR_RVOID(server);
|
||||
if (status < 0) {
|
||||
fnError("udfd new connection error. code: %s", uv_strerror(status));
|
||||
fnError("udfd new connection error, code:%s", uv_strerror(status));
|
||||
return;
|
||||
}
|
||||
int32_t code = 0;
|
||||
|
|
|
@ -32,12 +32,12 @@ extern "C" {
|
|||
#endif
|
||||
|
||||
// clang-format off
|
||||
#define indexFatal(...) do { if (idxDebugFlag & DEBUG_FATAL) { taosPrintLog("IDX FATAL ", DEBUG_FATAL, 255, __VA_ARGS__); }} while (0)
|
||||
#define indexError(...) do { if (idxDebugFlag & DEBUG_ERROR) { taosPrintLog("IDX ERROR ", DEBUG_ERROR, 255, __VA_ARGS__); }} while (0)
|
||||
#define indexWarn(...) do { if (idxDebugFlag & DEBUG_WARN) { taosPrintLog("IDX WARN ", DEBUG_WARN, 255, __VA_ARGS__); }} while (0)
|
||||
#define indexInfo(...) do { if (idxDebugFlag & DEBUG_INFO) { taosPrintLog("IDX ", DEBUG_INFO, 255, __VA_ARGS__); } } while (0)
|
||||
#define indexDebug(...) do { if (idxDebugFlag & DEBUG_DEBUG) { taosPrintLog("IDX ", DEBUG_DEBUG, idxDebugFlag, __VA_ARGS__);} } while (0)
|
||||
#define indexTrace(...) do { if (idxDebugFlag & DEBUG_TRACE) { taosPrintLog("IDX", DEBUG_TRACE, idxDebugFlag, __VA_ARGS__);} } while (0)
|
||||
#define indexFatal(...) do { if (idxDebugFlag & DEBUG_FATAL) { taosPrintLog("IDX FATAL ", DEBUG_FATAL, 255, __VA_ARGS__);}} while (0)
|
||||
#define indexError(...) do { if (idxDebugFlag & DEBUG_ERROR) { taosPrintLog("IDX ERROR ", DEBUG_ERROR, 255, __VA_ARGS__);}} while (0)
|
||||
#define indexWarn(...) do { if (idxDebugFlag & DEBUG_WARN) { taosPrintLog("IDX WARN ", DEBUG_WARN, 255, __VA_ARGS__);}} while (0)
|
||||
#define indexInfo(...) do { if (idxDebugFlag & DEBUG_INFO) { taosPrintLog("IDX INFO ", DEBUG_INFO, 255, __VA_ARGS__);}} while (0)
|
||||
#define indexDebug(...) do { if (idxDebugFlag & DEBUG_DEBUG) { taosPrintLog("IDX DEBUG ", DEBUG_DEBUG, idxDebugFlag, __VA_ARGS__);}} while (0)
|
||||
#define indexTrace(...) do { if (idxDebugFlag & DEBUG_TRACE) { taosPrintLog("IDX TRACE ", DEBUG_TRACE, idxDebugFlag, __VA_ARGS__);}} while (0)
|
||||
// clang-format on
|
||||
|
||||
extern void* indexQhandle;
|
||||
|
|
|
@ -20,12 +20,12 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
#define nodesFatal(...) qFatal("NODES: " __VA_ARGS__)
|
||||
#define nodesError(...) qError("NODES: " __VA_ARGS__)
|
||||
#define nodesWarn(...) qWarn("NODES: " __VA_ARGS__)
|
||||
#define nodesInfo(...) qInfo("NODES: " __VA_ARGS__)
|
||||
#define nodesDebug(...) qDebug("NODES: " __VA_ARGS__)
|
||||
#define nodesTrace(...) qTrace("NODES: " __VA_ARGS__)
|
||||
#define nodesFatal(...) qFatal("NODES " __VA_ARGS__)
|
||||
#define nodesError(...) qError("NODES " __VA_ARGS__)
|
||||
#define nodesWarn(...) qWarn ("NODES " __VA_ARGS__)
|
||||
#define nodesInfo(...) qInfo ("NODES " __VA_ARGS__)
|
||||
#define nodesDebug(...) qDebug("NODES " __VA_ARGS__)
|
||||
#define nodesTrace(...) qTrace("NODES " __VA_ARGS__)
|
||||
|
||||
#define NODES_ERR_RET(c) \
|
||||
do { \
|
||||
|
|
|
@ -202,7 +202,7 @@ static void destroyNodeAllocator(void* p) {
|
|||
|
||||
SNodeAllocator* pAllocator = p;
|
||||
|
||||
nodesDebug("query id %" PRIx64 " allocator id %" PRIx64 " alloc chunkNum: %d, chunkTotakSize: %d",
|
||||
nodesDebug("QID:0x%" PRIx64 ", destroy allocatorId:0x%" PRIx64 ", chunkNum:%d, chunkTotakSize:%d",
|
||||
pAllocator->queryId, pAllocator->self, pAllocator->chunkNum, pAllocator->chunkNum * pAllocator->chunkSize);
|
||||
|
||||
SNodeMemChunk* pChunk = pAllocator->pChunks;
|
||||
|
@ -238,7 +238,7 @@ void nodesDestroyAllocatorSet() {
|
|||
refId = pAllocator->self;
|
||||
int32_t code = taosRemoveRef(g_allocatorReqRefPool, refId);
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
nodesError("failed to remove ref at: %s:%d, rsetId:%d, refId:%" PRId64, __func__, __LINE__,
|
||||
nodesError("failed to remove ref at %s:%d, rsetId:%d, refId:%" PRId64, __func__, __LINE__,
|
||||
g_allocatorReqRefPool, refId);
|
||||
}
|
||||
pAllocator = taosIterateRef(g_allocatorReqRefPool, refId);
|
||||
|
@ -301,9 +301,9 @@ int32_t nodesReleaseAllocator(int64_t allocatorId) {
|
|||
}
|
||||
|
||||
if (NULL == g_pNodeAllocator) {
|
||||
nodesError("allocator id %" PRIx64
|
||||
" release failed: The nodesReleaseAllocator function needs to be called after the nodesAcquireAllocator "
|
||||
"function is called!",
|
||||
nodesError("allocatorId:0x%" PRIx64
|
||||
", release failed, The nodesReleaseAllocator function needs to be called after the "
|
||||
"nodesAcquireAllocator function is called!",
|
||||
allocatorId);
|
||||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
|
@ -320,7 +320,7 @@ int64_t nodesMakeAllocatorWeakRef(int64_t allocatorId) {
|
|||
|
||||
SNodeAllocator* pAllocator = taosAcquireRef(g_allocatorReqRefPool, allocatorId);
|
||||
if (NULL == pAllocator) {
|
||||
nodesError("allocator id %" PRIx64 " weak reference failed", allocatorId);
|
||||
nodesError("allocatorId:0x%" PRIx64 ", weak reference failed", allocatorId);
|
||||
return -1;
|
||||
}
|
||||
return pAllocator->self;
|
||||
|
@ -335,7 +335,7 @@ void nodesDestroyAllocator(int64_t allocatorId) {
|
|||
|
||||
int32_t code = taosRemoveRef(g_allocatorReqRefPool, allocatorId);
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
nodesError("failed to remove ref at: %s:%d, rsetId:%d, refId:%" PRId64, __func__, __LINE__, g_allocatorReqRefPool,
|
||||
nodesError("failed to remove ref at %s:%d, rsetId:%d, refId:%" PRId64, __func__, __LINE__, g_allocatorReqRefPool,
|
||||
allocatorId);
|
||||
}
|
||||
}
|
||||
|
@ -974,8 +974,9 @@ int32_t nodesMakeNode(ENodeType type, SNode** ppNodeOut) {
|
|||
default:
|
||||
break;
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS != code)
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
nodesError("nodesMakeNode unknown node = %s", nodesNodeName(type));
|
||||
}
|
||||
else
|
||||
*ppNodeOut = pNode;
|
||||
return code;
|
||||
|
@ -1349,7 +1350,7 @@ void nodesDestroyNode(SNode* pNode) {
|
|||
|
||||
int32_t code = taosCloseFile(&pStmt->fp);
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
nodesError("failed to close file: %s:%d", __func__, __LINE__);
|
||||
nodesError("failed to close file %s:%d", __func__, __LINE__);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -26,12 +26,12 @@ extern "C" {
|
|||
#include "parToken.h"
|
||||
#include "query.h"
|
||||
|
||||
#define parserFatal(param, ...) qFatal("PARSER: " param, ##__VA_ARGS__)
|
||||
#define parserError(param, ...) qError("PARSER: " param, ##__VA_ARGS__)
|
||||
#define parserWarn(param, ...) qWarn("PARSER: " param, ##__VA_ARGS__)
|
||||
#define parserInfo(param, ...) qInfo("PARSER: " param, ##__VA_ARGS__)
|
||||
#define parserDebug(param, ...) qDebug("PARSER: " param, ##__VA_ARGS__)
|
||||
#define parserTrace(param, ...) qTrace("PARSER: " param, ##__VA_ARGS__)
|
||||
#define parserFatal(param, ...) qFatal("PARSER " param, ##__VA_ARGS__)
|
||||
#define parserError(param, ...) qError("PARSER " param, ##__VA_ARGS__)
|
||||
#define parserWarn(param, ...) qWarn ("PARSER " param, ##__VA_ARGS__)
|
||||
#define parserInfo(param, ...) qInfo ("PARSER " param, ##__VA_ARGS__)
|
||||
#define parserDebug(param, ...) qDebug("PARSER " param, ##__VA_ARGS__)
|
||||
#define parserTrace(param, ...) qTrace("PARSER " param, ##__VA_ARGS__)
|
||||
|
||||
#define ROWTS_PSEUDO_COLUMN_NAME "_rowts"
|
||||
#define C0_PSEUDO_COLUMN_NAME "_c0"
|
||||
|
|
|
@ -2351,7 +2351,7 @@ static int32_t parseCsvFile(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStmt
|
|||
}
|
||||
taosMemoryFree(pLine);
|
||||
|
||||
parserDebug("0x%" PRIx64 " %d rows have been parsed", pCxt->pComCxt->requestId, *pNumOfRows);
|
||||
parserDebug("QID:0x%" PRIx64 ", %d rows have been parsed", pCxt->pComCxt->requestId, *pNumOfRows);
|
||||
|
||||
if (TSDB_CODE_SUCCESS == code && 0 == (*pNumOfRows) && 0 == pStmt->totalRowsNum &&
|
||||
(!TSDB_QUERY_HAS_TYPE(pStmt->insertType, TSDB_QUERY_TYPE_STMT_INSERT)) && !pStmt->fileProcessing) {
|
||||
|
@ -2381,10 +2381,10 @@ static int32_t parseDataFromFileImpl(SInsertParseContext* pCxt, SVnodeModifyOpSt
|
|||
if (!pStmt->fileProcessing) {
|
||||
code = taosCloseFile(&pStmt->fp);
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
parserWarn("0x%" PRIx64 " failed to close file.", pCxt->pComCxt->requestId);
|
||||
parserWarn("QID:0x%" PRIx64 ", failed to close file.", pCxt->pComCxt->requestId);
|
||||
}
|
||||
} else {
|
||||
parserDebug("0x%" PRIx64 " insert from csv. File is too large, do it in batches.", pCxt->pComCxt->requestId);
|
||||
parserDebug("QID:0x%" PRIx64 ", insert from csv. File is too large, do it in batches.", pCxt->pComCxt->requestId);
|
||||
}
|
||||
if (pStmt->insertType != TSDB_QUERY_TYPE_FILE_INSERT) {
|
||||
return buildSyntaxErrMsg(&pCxt->msg, "keyword VALUES or FILE is exclusive", NULL);
|
||||
|
@ -2676,7 +2676,7 @@ static int32_t checkTableClauseFirstToken(SInsertParseContext* pCxt, SVnodeModif
|
|||
if (pCxt->isStmtBind) {
|
||||
if (TK_NK_ID == pTbName->type || (tbNameAfterDbName != NULL && *(tbNameAfterDbName + 1) != '?')) {
|
||||
// In SQL statements, the table name has already been specified.
|
||||
parserWarn("0x%" PRIx64 " table name is specified in sql, ignore the table name in bind param",
|
||||
parserWarn("QID:0x%" PRIx64 ", table name is specified in sql, ignore the table name in bind param",
|
||||
pCxt->pComCxt->requestId);
|
||||
}
|
||||
}
|
||||
|
@ -3194,14 +3194,14 @@ static int32_t buildInsertCatalogReq(SInsertParseContext* pCxt, SVnodeModifyOpSt
|
|||
static int32_t setNextStageInfo(SInsertParseContext* pCxt, SQuery* pQuery, SCatalogReq* pCatalogReq) {
|
||||
SVnodeModifyOpStmt* pStmt = (SVnodeModifyOpStmt*)pQuery->pRoot;
|
||||
if (pCxt->missCache) {
|
||||
parserDebug("0x%" PRIx64 " %d rows of %d tables have been inserted before cache miss", pCxt->pComCxt->requestId,
|
||||
parserDebug("QID:0x%" PRIx64 ", %d rows of %d tables will be inserted before cache miss", pCxt->pComCxt->requestId,
|
||||
pStmt->totalRowsNum, pStmt->totalTbNum);
|
||||
|
||||
pQuery->execStage = QUERY_EXEC_STAGE_PARSE;
|
||||
return buildInsertCatalogReq(pCxt, pStmt, pCatalogReq);
|
||||
}
|
||||
|
||||
parserDebug("0x%" PRIx64 " %d rows of %d tables have been inserted", pCxt->pComCxt->requestId, pStmt->totalRowsNum,
|
||||
parserDebug("QID:0x%" PRIx64 ", %d rows of %d tables will be inserted", pCxt->pComCxt->requestId, pStmt->totalRowsNum,
|
||||
pStmt->totalTbNum);
|
||||
|
||||
pQuery->execStage = QUERY_EXEC_STAGE_SCHEDULE;
|
||||
|
|
|
@ -374,7 +374,7 @@ int32_t qBindStmtStbColsValue(void* pBlock, SArray* pCols, TAOS_MULTI_BIND* bind
|
|||
|
||||
code = tRowBuildFromBind(pBindInfos, boundInfo->numOfBound, colInOrder, *pTSchema, pCols, &pDataBlock->ordered, &pDataBlock->duplicateTs);
|
||||
|
||||
qDebug("stmt all %d columns bind %d rows data", boundInfo->numOfBound, rowNum);
|
||||
parserDebug("stmt all %d columns bind %d rows data", boundInfo->numOfBound, rowNum);
|
||||
|
||||
_return:
|
||||
|
||||
|
@ -427,7 +427,7 @@ int32_t qBindStmtColsValue(void* pBlock, SArray* pCols, TAOS_MULTI_BIND* bind, c
|
|||
}
|
||||
}
|
||||
|
||||
qDebug("stmt all %d columns bind %d rows data", boundInfo->numOfBound, rowNum);
|
||||
parserDebug("stmt all %d columns bind %d rows data", boundInfo->numOfBound, rowNum);
|
||||
|
||||
_return:
|
||||
|
||||
|
@ -476,7 +476,7 @@ int32_t qBindStmtSingleColValue(void* pBlock, SArray* pCols, TAOS_MULTI_BIND* bi
|
|||
IS_VAR_DATA_TYPE(pColSchema->type) ? pColSchema->bytes - VARSTR_HEADER_SIZE : -1,
|
||||
initCtxAsText, checkWKB);
|
||||
|
||||
qDebug("stmt col %d bind %d rows data", colIdx, rowNum);
|
||||
parserDebug("stmt col %d bind %d rows data", colIdx, rowNum);
|
||||
|
||||
_return:
|
||||
|
||||
|
@ -770,7 +770,7 @@ int32_t qBindStmtStbColsValue2(void* pBlock, SArray* pCols, TAOS_STMT2_BIND* bin
|
|||
|
||||
code = tRowBuildFromBind2(pBindInfos, boundInfo->numOfBound, colInOrder, *pTSchema, pCols, &pDataBlock->ordered, &pDataBlock->duplicateTs);
|
||||
|
||||
qDebug("stmt all %d columns bind %d rows data", boundInfo->numOfBound, rowNum);
|
||||
parserDebug("stmt all %d columns bind %d rows data", boundInfo->numOfBound, rowNum);
|
||||
|
||||
_return:
|
||||
if (ncharBinds) {
|
||||
|
@ -888,7 +888,7 @@ int32_t qBindStmtColsValue2(void* pBlock, SArray* pCols, TAOS_STMT2_BIND* bind,
|
|||
}
|
||||
}
|
||||
|
||||
qDebug("stmt2 all %d columns bind %d rows data as col format", boundInfo->numOfBound, rowNum);
|
||||
parserDebug("stmt2 all %d columns bind %d rows data as col format", boundInfo->numOfBound, rowNum);
|
||||
|
||||
_return:
|
||||
|
||||
|
@ -937,7 +937,7 @@ int32_t qBindStmtSingleColValue2(void* pBlock, SArray* pCols, TAOS_STMT2_BIND* b
|
|||
IS_VAR_DATA_TYPE(pColSchema->type) ? pColSchema->bytes - VARSTR_HEADER_SIZE : -1,
|
||||
initCtxAsText, checkWKB);
|
||||
|
||||
qDebug("stmt col %d bind %d rows data", colIdx, rowNum);
|
||||
parserDebug("stmt col %d bind %d rows data", colIdx, rowNum);
|
||||
|
||||
_return:
|
||||
|
||||
|
@ -1205,7 +1205,7 @@ int32_t qResetStmtColumns(SArray* pCols, bool deepClear) {
|
|||
for (int32_t i = 0; i < colNum; ++i) {
|
||||
SColData* pCol = (SColData*)taosArrayGet(pCols, i);
|
||||
if (pCol == NULL) {
|
||||
qError("qResetStmtColumns column is NULL");
|
||||
parserError("qResetStmtColumns column:%d is NULL", i);
|
||||
return terrno;
|
||||
}
|
||||
if (deepClear) {
|
||||
|
@ -1226,7 +1226,7 @@ int32_t qResetStmtDataBlock(STableDataCxt* block, bool deepClear) {
|
|||
if (pBlock->pData->flags & SUBMIT_REQ_COLUMN_DATA_FORMAT) {
|
||||
SColData* pCol = (SColData*)taosArrayGet(pBlock->pData->aCol, i);
|
||||
if (pCol == NULL) {
|
||||
qError("qResetStmtDataBlock column is NULL");
|
||||
parserError("qResetStmtDataBlock column:%d is NULL", i);
|
||||
return terrno;
|
||||
}
|
||||
if (deepClear) {
|
||||
|
|
|
@ -298,7 +298,7 @@ static int32_t createTableDataCxt(STableMeta* pTableMeta, SVCreateTbReq** pCreat
|
|||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
*pOutput = pTableCxt;
|
||||
qDebug("tableDataCxt created, code:%d, uid:%" PRId64 ", vgId:%d", code, pTableMeta->uid, pTableMeta->vgId);
|
||||
parserDebug("uid:%" PRId64 ", create table data context, code:%d, vgId:%d", pTableMeta->uid, code, pTableMeta->vgId);
|
||||
} else {
|
||||
insDestroyTableDataCxt(pTableCxt);
|
||||
}
|
||||
|
@ -478,7 +478,7 @@ static int32_t fillVgroupDataCxt(STableDataCxt* pTableCxt, SVgroupDataCxt* pVgCx
|
|||
taosMemoryFreeClear(pTableCxt->pData);
|
||||
}
|
||||
|
||||
qDebug("add tableDataCxt uid:%" PRId64 " to vgId:%d", pTableCxt->pMeta->uid, pVgCxt->vgId);
|
||||
parserDebug("uid:%" PRId64 ", add table data context to vgId:%d", pTableCxt->pMeta->uid, pVgCxt->vgId);
|
||||
|
||||
return code;
|
||||
}
|
||||
|
@ -572,7 +572,7 @@ int32_t insGetStmtTableVgUid(SHashObj* pAllVgHash, SStbInterlaceInfo* pBuildInfo
|
|||
code = catalogGetTableMeta((SCatalog*)pBuildInfo->pCatalog, &conn, &sname, &pTableMeta);
|
||||
|
||||
if (TSDB_CODE_PAR_TABLE_NOT_EXIST == code) {
|
||||
parserDebug("tb %s.%s not exist", sname.dbname, sname.tname);
|
||||
parserDebug("tb:%s.%s not exist", sname.dbname, sname.tname);
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -760,7 +760,7 @@ int32_t insMergeTableDataCxt(SHashObj* pTableHash, SArray** pVgDataBlocks, bool
|
|||
// skip the table has no data to insert
|
||||
// eg: import a csv without valid data
|
||||
// if (0 == taosArrayGetSize(pTableCxt->pData->aRowP)) {
|
||||
// qWarn("no row in tableDataCxt uid:%" PRId64 " ", pTableCxt->pMeta->uid);
|
||||
// parserWarn("no row in tableDataCxt uid:%" PRId64 " ", pTableCxt->pMeta->uid);
|
||||
// p = taosHashIterate(pTableHash, p);
|
||||
// continue;
|
||||
// }
|
||||
|
|
|
@ -533,7 +533,7 @@ static int32_t getViewMetaImpl(SParseContext* pParCxt, SParseMetaCache* pMetaCac
|
|||
}
|
||||
|
||||
if (TSDB_CODE_SUCCESS != code && TSDB_CODE_PAR_TABLE_NOT_EXIST != code) {
|
||||
parserError("0x%" PRIx64 " catalogGetViewMeta error, code:%s, dbName:%s, viewName:%s", pParCxt->requestId,
|
||||
parserError("QID:0x%" PRIx64 ", catalogGetViewMeta error, code:%s, dbName:%s, viewName:%s", pParCxt->requestId,
|
||||
tstrerror(code), pName->dbname, pName->tname);
|
||||
}
|
||||
return code;
|
||||
|
@ -549,7 +549,7 @@ static int32_t getTargetNameImpl(SParseContext* pParCxt, SParseMetaCache* pMetaC
|
|||
code = TSDB_CODE_PAR_INTERNAL_ERROR;
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS != code && TSDB_CODE_PAR_TABLE_NOT_EXIST != code) {
|
||||
parserError("0x%" PRIx64 " catalogGetTableMeta error, code:%s, dbName:%s, tbName:%s", pParCxt->requestId,
|
||||
parserError("QID:0x%" PRIx64 ", catalogGetTableMeta error, code:%s, dbName:%s, tbName:%s", pParCxt->requestId,
|
||||
tstrerror(code), pName->dbname, pName->tname);
|
||||
}
|
||||
return code;
|
||||
|
@ -565,7 +565,7 @@ static int32_t getTargetName(STranslateContext* pCxt, const SName* pName, char*
|
|||
code = getTargetNameImpl(pParCxt, pCxt->pMetaCache, pName, pTbName);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS != code && TSDB_CODE_PAR_TABLE_NOT_EXIST != code) {
|
||||
parserError("0x%" PRIx64 " catalogGetTableMeta error, code:%s, dbName:%s, tbName:%s", pCxt->pParseCxt->requestId,
|
||||
parserError("QID:0x%" PRIx64 ", catalogGetTableMeta error, code:%s, dbName:%s, tbName:%s", pCxt->pParseCxt->requestId,
|
||||
tstrerror(code), pName->dbname, pName->tname);
|
||||
}
|
||||
return code;
|
||||
|
@ -657,7 +657,7 @@ int32_t getTargetMetaImpl(SParseContext* pParCxt, SParseMetaCache* pMetaCache, c
|
|||
}
|
||||
|
||||
if (TSDB_CODE_SUCCESS != code && TSDB_CODE_PAR_TABLE_NOT_EXIST != code) {
|
||||
parserError("0x%" PRIx64 " catalogGetTableMeta error, code:%s, dbName:%s, tbName:%s", pParCxt->requestId,
|
||||
parserError("QID:0x%" PRIx64 ", catalogGetTableMeta error, code:%s, dbName:%s, tbName:%s", pParCxt->requestId,
|
||||
tstrerror(code), pName->dbname, pName->tname);
|
||||
}
|
||||
return code;
|
||||
|
@ -673,7 +673,7 @@ static int32_t getTargetMeta(STranslateContext* pCxt, const SName* pName, STable
|
|||
code = getTargetMetaImpl(pParCxt, pCxt->pMetaCache, pName, pMeta, couldBeView);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS != code && TSDB_CODE_PAR_TABLE_NOT_EXIST != code) {
|
||||
parserError("0x%" PRIx64 " catalogGetTableMeta error, code:%s, dbName:%s, tbName:%s", pCxt->pParseCxt->requestId,
|
||||
parserError("QID:0x%" PRIx64 ", catalogGetTableMeta error, code:%s, dbName:%s, tbName:%s", pCxt->pParseCxt->requestId,
|
||||
tstrerror(code), pName->dbname, pName->tname);
|
||||
}
|
||||
return code;
|
||||
|
@ -703,7 +703,7 @@ static int32_t getTableCfg(STranslateContext* pCxt, const SName* pName, STableCf
|
|||
}
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
parserError("0x%" PRIx64 " catalogRefreshGetTableCfg error, code:%s, dbName:%s, tbName:%s",
|
||||
parserError("QID:0x%" PRIx64 ", catalogRefreshGetTableCfg error, code:%s, dbName:%s, tbName:%s",
|
||||
pCxt->pParseCxt->requestId, tstrerror(code), pName->dbname, pName->tname);
|
||||
}
|
||||
return code;
|
||||
|
@ -726,7 +726,7 @@ static int32_t refreshGetTableMeta(STranslateContext* pCxt, const char* pDbName,
|
|||
code = catalogRefreshGetTableMeta(pParCxt->pCatalog, &conn, &name, pMeta, false);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
parserError("0x%" PRIx64 " catalogRefreshGetTableMeta error, code:%s, dbName:%s, tbName:%s",
|
||||
parserError("QID:0x%" PRIx64 ", catalogRefreshGetTableMeta error, code:%s, dbName:%s, tbName:%s",
|
||||
pCxt->pParseCxt->requestId, tstrerror(code), pDbName, pTableName);
|
||||
}
|
||||
return code;
|
||||
|
@ -749,7 +749,7 @@ static int32_t getDBVgInfoImpl(STranslateContext* pCxt, const SName* pName, SArr
|
|||
}
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
parserError("0x%" PRIx64 " catalogGetDBVgList error, code:%s, dbFName:%s", pCxt->pParseCxt->requestId,
|
||||
parserError("QID:0x%" PRIx64 ", catalogGetDBVgList error, code:%s, dbFName:%s", pCxt->pParseCxt->requestId,
|
||||
tstrerror(code), fullDbName);
|
||||
}
|
||||
return code;
|
||||
|
@ -786,7 +786,7 @@ static int32_t getTableHashVgroupImpl(STranslateContext* pCxt, const SName* pNam
|
|||
}
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
parserError("0x%" PRIx64 " catalogGetTableHashVgroup error, code:%s, dbName:%s, tbName:%s",
|
||||
parserError("QID:0x%" PRIx64 ", catalogGetTableHashVgroup error, code:%s, dbName:%s, tbName:%s",
|
||||
pCxt->pParseCxt->requestId, tstrerror(code), pName->dbname, pName->tname);
|
||||
}
|
||||
return code;
|
||||
|
@ -811,7 +811,7 @@ static int32_t getDBVgVersion(STranslateContext* pCxt, const char* pDbFName, int
|
|||
}
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
parserError("0x%" PRIx64 " catalogGetDBVgVersion error, code:%s, dbFName:%s", pCxt->pParseCxt->requestId,
|
||||
parserError("QID:0x%" PRIx64 ", catalogGetDBVgVersion error, code:%s, dbFName:%s", pCxt->pParseCxt->requestId,
|
||||
tstrerror(code), pDbFName);
|
||||
}
|
||||
return code;
|
||||
|
@ -842,7 +842,7 @@ static int32_t getDBCfg(STranslateContext* pCxt, const char* pDbName, SDbCfgInfo
|
|||
}
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
parserError("0x%" PRIx64 " catalogGetDBCfg error, code:%s, dbFName:%s", pCxt->pParseCxt->requestId, tstrerror(code),
|
||||
parserError("QID:0x%" PRIx64 ", catalogGetDBCfg error, code:%s, dbFName:%s", pCxt->pParseCxt->requestId, tstrerror(code),
|
||||
dbFname);
|
||||
}
|
||||
return code;
|
||||
|
@ -871,7 +871,7 @@ static int32_t getUdfInfo(STranslateContext* pCxt, SFunctionNode* pFunc) {
|
|||
tFreeSFuncInfo(&funcInfo);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
parserError("0x%" PRIx64 " catalogGetUdfInfo error, code:%s, funcName:%s", pCxt->pParseCxt->requestId,
|
||||
parserError("QID:0x%" PRIx64 ", catalogGetUdfInfo error, code:%s, funcName:%s", pCxt->pParseCxt->requestId,
|
||||
tstrerror(code), pFunc->functionName);
|
||||
}
|
||||
return code;
|
||||
|
@ -896,7 +896,7 @@ static int32_t getTableIndex(STranslateContext* pCxt, const SName* pName, SArray
|
|||
}
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
parserError("0x%" PRIx64 " getTableIndex error, code:%s, dbName:%s, tbName:%s", pCxt->pParseCxt->requestId,
|
||||
parserError("QID:0x%" PRIx64 ", getTableIndex error, code:%s, dbName:%s, tbName:%s", pCxt->pParseCxt->requestId,
|
||||
tstrerror(code), pName->dbname, pName->tname);
|
||||
}
|
||||
return code;
|
||||
|
@ -915,7 +915,7 @@ static int32_t getDnodeList(STranslateContext* pCxt, SArray** pDnodes) {
|
|||
code = catalogGetDnodeList(pParCxt->pCatalog, &conn, pDnodes);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
parserError("0x%" PRIx64 " getDnodeList error, code:%s", pCxt->pParseCxt->requestId, tstrerror(code));
|
||||
parserError("QID:0x%" PRIx64 ", getDnodeList error, code:%s", pCxt->pParseCxt->requestId, tstrerror(code));
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
@ -933,7 +933,7 @@ static int32_t getTableTsmas(STranslateContext* pCxt, const SName* pName, SArray
|
|||
code = catalogGetTableTsmas(pParCxt->pCatalog, &conn, pName, ppTsmas);
|
||||
}
|
||||
if (code)
|
||||
parserError("0x%" PRIx64 " get table tsma for : %s.%s error, code:%s", pCxt->pParseCxt->requestId, pName->dbname,
|
||||
parserError("QID:0x%" PRIx64 ", get table tsma for : %s.%s error, code:%s", pCxt->pParseCxt->requestId, pName->dbname,
|
||||
pName->tname, tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
|
@ -951,7 +951,7 @@ static int32_t getTsma(STranslateContext* pCxt, const SName* pName, STableTSMAIn
|
|||
code = catalogGetTsma(pParCxt->pCatalog, &conn, pName, pTsma);
|
||||
}
|
||||
if (code)
|
||||
parserError("0x%" PRIx64 " get tsma for: %s.%s error, code:%s", pCxt->pParseCxt->requestId, pName->dbname,
|
||||
parserError("QID:0x%" PRIx64 ", get tsma for: %s.%s error, code:%s", pCxt->pParseCxt->requestId, pName->dbname,
|
||||
pName->tname, tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
|
@ -7015,7 +7015,7 @@ static int32_t setEqualTbnameTableVgroups(STranslateContext* pCxt, SSelectStmt*
|
|||
bool stableQuery = false;
|
||||
SEqCondTbNameTableInfo* pInfo = NULL;
|
||||
|
||||
qDebug("start to update stable vg for tbname optimize, aTableNum:%d", aTableNum);
|
||||
parserDebug("start to update stable vg for tbname optimize, aTableNum:%d", aTableNum);
|
||||
for (int i = 0; i < aTableNum; ++i) {
|
||||
pInfo = taosArrayGet(aTables, i);
|
||||
int32_t numOfVgs = pInfo->pRealTable->pVgroupList->numOfVgroups;
|
||||
|
@ -7084,7 +7084,7 @@ static int32_t setEqualTbnameTableVgroups(STranslateContext* pCxt, SSelectStmt*
|
|||
}
|
||||
}
|
||||
|
||||
qDebug("before ctbname optimize, code:%d, aTableNum:%d, nTbls:%d, stableQuery:%d", code, aTableNum, nTbls,
|
||||
parserDebug("before ctbname optimize, code:%d, aTableNum:%d, nTbls:%d, stableQuery:%d", code, aTableNum, nTbls,
|
||||
stableQuery);
|
||||
|
||||
if (TSDB_CODE_SUCCESS == code && 1 == aTableNum && 1 == nTbls && stableQuery && NULL == pInfo->pRealTable->pTsmas) {
|
||||
|
@ -13061,7 +13061,7 @@ static int32_t readFromFile(char* pName, int32_t* len, char** buf) {
|
|||
int64_t s = taosReadFile(tfile, *buf, *len);
|
||||
if (s != *len) {
|
||||
int32_t code = taosCloseFile(&tfile);
|
||||
qError("failed to close file: %s in %s:%d, err: %s", pName, __func__, __LINE__, tstrerror(code));
|
||||
parserError("failed to close file: %s in %s:%d, err: %s", pName, __func__, __LINE__, tstrerror(code));
|
||||
taosMemoryFreeClear(*buf);
|
||||
return TSDB_CODE_APP_ERROR;
|
||||
}
|
||||
|
@ -15574,7 +15574,7 @@ static int32_t fillVgroupInfo(SParseContext* pParseCxt, const SName* pName, SVgr
|
|||
if (code == TSDB_CODE_SUCCESS) {
|
||||
*pVgInfo = vg;
|
||||
} else {
|
||||
parserError("0x%" PRIx64 " catalogGetTableHashVgroup error, code:%s, dbName:%s, tbName:%s", pParseCxt->requestId,
|
||||
parserError("QID:0x%" PRIx64 ", catalogGetTableHashVgroup error, code:%s, dbName:%s, tbName:%s", pParseCxt->requestId,
|
||||
tstrerror(code), pName->dbname, pName->tname);
|
||||
}
|
||||
|
||||
|
@ -15905,7 +15905,7 @@ int32_t serializeVgroupsCreateTableBatch(SHashObj* pVgroupHashmap, SArray** pOut
|
|||
|
||||
code = serializeVgroupCreateTableBatch(pTbBatch, pBufArray);
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
qError("failed to serialize create table batch msg, since:%s", tstrerror(code));
|
||||
parserError("failed to serialize create table batch msg, since:%s", tstrerror(code));
|
||||
taosHashCancelIterate(pVgroupHashmap, pTbBatch);
|
||||
break;
|
||||
}
|
||||
|
@ -15984,7 +15984,7 @@ static int32_t rewriteCreateTableFromFile(STranslateContext* pCxt, SQuery* pQuer
|
|||
taosHashClear(pModifyStmt->pVgroupsHashObj);
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
if (TSDB_CODE_INVALID_MSG_LEN == code) {
|
||||
qError("maxInsertBatchRows may need to be reduced, current:%d", tsMaxInsertBatchRows);
|
||||
parserError("maxInsertBatchRows may need to be reduced, current:%d", tsMaxInsertBatchRows);
|
||||
}
|
||||
taosHashCleanup(pModifyStmt->pVgroupsHashObj);
|
||||
return code;
|
||||
|
|
|
@ -37,7 +37,7 @@ static int32_t dumpQueryPlan(SQueryPlan* pPlan) {
|
|||
char* pStr = NULL;
|
||||
code = nodesNodeToString((SNode*)pPlan, false, &pStr, NULL);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
planDebugL("QID:0x%" PRIx64 " Query Plan, JsonPlan: %s", pPlan->queryId, pStr);
|
||||
planDebugL("QID:0x%" PRIx64 ", Query Plan, JsonPlan: %s", pPlan->queryId, pStr);
|
||||
taosMemoryFree(pStr);
|
||||
}
|
||||
return code;
|
||||
|
@ -123,7 +123,7 @@ int32_t qContinuePlanPostQuery(void* pPostPlan) {
|
|||
}
|
||||
|
||||
int32_t qSetSubplanExecutionNode(SSubplan* subplan, int32_t groupId, SDownstreamSourceNode* pSource) {
|
||||
planDebug("QID:0x%" PRIx64 " set subplan execution node, groupId:%d", subplan->id.queryId, groupId);
|
||||
planDebug("QID:0x%" PRIx64 ", set subplan execution node, groupId:%d", subplan->id.queryId, groupId);
|
||||
return setSubplanExecutionNode(subplan->pNode, groupId, pSource);
|
||||
}
|
||||
|
||||
|
@ -143,7 +143,7 @@ static void clearSubplanExecutionNode(SPhysiNode* pNode) {
|
|||
}
|
||||
|
||||
void qClearSubplanExecutionNode(SSubplan* pSubplan) {
|
||||
planDebug("QID:0x%" PRIx64 " clear subplan execution node, groupId:%d", pSubplan->id.queryId, pSubplan->id.groupId);
|
||||
planDebug("QID:0x%" PRIx64 ", clear subplan execution node, groupId:%d", pSubplan->id.queryId, pSubplan->id.groupId);
|
||||
clearSubplanExecutionNode(pSubplan->pNode);
|
||||
}
|
||||
|
||||
|
|
|
@ -165,7 +165,7 @@ int32_t initTaskQueue() {
|
|||
return -1;
|
||||
}
|
||||
|
||||
qDebug("task queue is initialized, numOfThreads: %d", tsNumOfTaskQueueThreads);
|
||||
qInfo("task queue is initialized, numOfThreads: %d", tsNumOfTaskQueueThreads);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -343,7 +343,7 @@ void destroyQueryExecRes(SExecResult* pRes) {
|
|||
break;
|
||||
}
|
||||
default:
|
||||
qError("invalid exec result for request type %d", pRes->msgType);
|
||||
qError("invalid exec result for request type:%d", pRes->msgType);
|
||||
}
|
||||
}
|
||||
// clang-format on
|
||||
|
|
|
@ -45,7 +45,7 @@ int32_t queryBuildUseDbOutput(SUseDbOutput *pOut, SUseDbRsp *usedbRsp) {
|
|||
pOut->dbVgroup->hashSuffix = usedbRsp->hashSuffix;
|
||||
pOut->dbVgroup->stateTs = usedbRsp->stateTs;
|
||||
|
||||
qDebug("Got %d vgroup for db %s, vgVersion:%d, stateTs:%" PRId64, usedbRsp->vgNum, usedbRsp->db, usedbRsp->vgVersion,
|
||||
qDebug("db:%s, get %d vgroup, vgVersion:%d, stateTs:%" PRId64, usedbRsp->db, usedbRsp->vgNum, usedbRsp->vgVersion,
|
||||
usedbRsp->stateTs);
|
||||
|
||||
if (usedbRsp->vgNum <= 0) {
|
||||
|
@ -61,7 +61,7 @@ int32_t queryBuildUseDbOutput(SUseDbOutput *pOut, SUseDbRsp *usedbRsp) {
|
|||
for (int32_t i = 0; i < usedbRsp->vgNum; ++i) {
|
||||
SVgroupInfo *pVgInfo = taosArrayGet(usedbRsp->pVgroupInfos, i);
|
||||
pOut->dbVgroup->numOfTable += pVgInfo->numOfTable;
|
||||
qDebug("the %dth vgroup, id %d, epNum %d, current %s port %d", i, pVgInfo->vgId, pVgInfo->epSet.numOfEps,
|
||||
qDebug("the %dth vgroup, id:%d, epNum:%d, current:%s port:%u", i, pVgInfo->vgId, pVgInfo->epSet.numOfEps,
|
||||
pVgInfo->epSet.eps[pVgInfo->epSet.inUse].fqdn, pVgInfo->epSet.eps[pVgInfo->epSet.inUse].port);
|
||||
if (0 != taosHashPut(pOut->dbVgroup->vgHash, &pVgInfo->vgId, sizeof(int32_t), pVgInfo, sizeof(SVgroupInfo))) {
|
||||
return terrno;
|
||||
|
@ -521,7 +521,7 @@ int32_t queryCreateCTableMetaFromMsg(STableMetaRsp *msg, SCTableMeta *pMeta) {
|
|||
pMeta->uid = msg->tuid;
|
||||
pMeta->suid = msg->suid;
|
||||
|
||||
qDebug("ctable %s uid %" PRIx64 " meta returned, type %d vgId:%d db %s suid %" PRIx64, msg->tbName, pMeta->uid,
|
||||
qDebug("ctb:%s, uid:0x%" PRIx64 " meta returned, type:%d vgId:%d db:%s suid:%" PRIx64, msg->tbName, pMeta->uid,
|
||||
pMeta->tableType, pMeta->vgId, msg->dbFName, pMeta->suid);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -572,9 +572,9 @@ int32_t queryCreateTableMetaFromMsg(STableMetaRsp *msg, bool isStb, STableMeta *
|
|||
}
|
||||
}
|
||||
|
||||
qDebug("table %s uid %" PRIx64 " meta returned, type %d vgId:%d db %s stb %s suid %" PRIx64
|
||||
" sver %d tver %d"
|
||||
" tagNum %d colNum %d precision %d rowSize %d",
|
||||
qDebug("tb:%s, uid:%" PRIx64 " meta returned, type:%d vgId:%d db:%s stb:%s suid:%" PRIx64
|
||||
" sver:%d tver:%d"
|
||||
" tagNum:%d colNum:%d precision:%d rowSize:%d",
|
||||
msg->tbName, pTableMeta->uid, pTableMeta->tableType, pTableMeta->vgId, msg->dbFName, msg->stbName,
|
||||
pTableMeta->suid, pTableMeta->sversion, pTableMeta->tversion, pTableMeta->tableInfo.numOfTags,
|
||||
pTableMeta->tableInfo.numOfColumns, pTableMeta->tableInfo.precision, pTableMeta->tableInfo.rowSize);
|
||||
|
@ -632,9 +632,9 @@ int32_t queryCreateTableMetaExFromMsg(STableMetaRsp *msg, bool isStb, STableMeta
|
|||
char *pTbName = (char *)pTableMeta + metaSize + schemaExtSize;
|
||||
tstrncpy(pTbName, msg->tbName, tbNameSize);
|
||||
|
||||
qDebug("table %s uid %" PRIx64 " meta returned, type %d vgId:%d db %s stb %s suid %" PRIx64
|
||||
" sver %d tver %d"
|
||||
" tagNum %d colNum %d precision %d rowSize %d",
|
||||
qDebug("tb:%s, uid:%" PRIx64 " meta returned, type:%d vgId:%d db:%s stb:%s suid:%" PRIx64
|
||||
" sver:%d tver:%d"
|
||||
" tagNum:%d colNum:%d precision:%d rowSize:%d",
|
||||
msg->tbName, pTableMeta->uid, pTableMeta->tableType, pTableMeta->vgId, msg->dbFName, msg->stbName,
|
||||
pTableMeta->suid, pTableMeta->sversion, pTableMeta->tversion, pTableMeta->tableInfo.numOfTags,
|
||||
pTableMeta->tableInfo.numOfColumns, pTableMeta->tableInfo.precision, pTableMeta->tableInfo.rowSize);
|
||||
|
@ -831,7 +831,7 @@ int32_t queryProcessGetDbCfgRsp(void *output, char *msg, int32_t msgSize) {
|
|||
}
|
||||
|
||||
if (tDeserializeSDbCfgRsp(msg, msgSize, &out) != 0) {
|
||||
qError("tDeserializeSDbCfgRsp failed, msgSize:%d,dbCfgRsp:%lu", msgSize, sizeof(out));
|
||||
qError("tDeserializeSDbCfgRsp failed, msgSize:%d, dbCfgRsp:%lu", msgSize, sizeof(out));
|
||||
return TSDB_CODE_INVALID_MSG;
|
||||
}
|
||||
|
||||
|
@ -981,7 +981,7 @@ int32_t queryProcessStreamProgressRsp(void* output, char* msg, int32_t msgSize)
|
|||
}
|
||||
|
||||
if (tDeserializeSStreamProgressRsp(msg, msgSize, output) != 0) {
|
||||
qError("tDeserializeStreamProgressRsp failed, msgSize: %d", msgSize);
|
||||
qError("tDeserializeStreamProgressRsp failed, msgSize:%d", msgSize);
|
||||
return TSDB_CODE_INVALID_MSG;
|
||||
}
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
|
|
@ -414,29 +414,29 @@ extern SQueryMgmt gQueryMgmt;
|
|||
#define QW_SCH_DLOG(param, ...) qDebug("QW:%p clientId:%" PRIx64 " " param, mgmt, clientId, __VA_ARGS__)
|
||||
|
||||
#define QW_TASK_ELOG(param, ...) \
|
||||
qError("QID:0x%" PRIx64 ",SID:%" PRId64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, qId, sId, cId, tId, eId, __VA_ARGS__)
|
||||
qError("QID:0x%" PRIx64 ", SID:%" PRId64 ", CID:0x%" PRIx64 ", TID:0x%" PRIx64 ", EID:%d " param, qId, sId, cId, tId, eId, __VA_ARGS__)
|
||||
#define QW_TASK_WLOG(param, ...) \
|
||||
qWarn("QID:0x%" PRIx64 ",SID:%" PRId64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, qId, sId, cId, tId, eId, __VA_ARGS__)
|
||||
qWarn("QID:0x%" PRIx64 ", SID:%" PRId64 ", CID:0x%" PRIx64 ", TID:0x%" PRIx64 ", EID:%d " param, qId, sId, cId, tId, eId, __VA_ARGS__)
|
||||
#define QW_TASK_DLOG(param, ...) \
|
||||
qDebug("QID:0x%" PRIx64 ",SID:%" PRId64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, qId, sId, cId, tId, eId, __VA_ARGS__)
|
||||
qDebug("QID:0x%" PRIx64 ", SID:%" PRId64 ", CID:0x%" PRIx64 ", TID:0x%" PRIx64 ", EID:%d " param, qId, sId, cId, tId, eId, __VA_ARGS__)
|
||||
#define QW_TASK_DLOGL(param, ...) \
|
||||
qDebugL("QID:0x%" PRIx64 ",SID:%" PRId64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, qId, sId, cId, tId, eId, __VA_ARGS__)
|
||||
qDebugL("QID:0x%" PRIx64 ", SID:%" PRId64 ", CID:0x%" PRIx64 ", TID:0x%" PRIx64 ", EID:%d " param, qId, sId, cId, tId, eId, __VA_ARGS__)
|
||||
|
||||
#define QW_TASK_ELOG_E(param) \
|
||||
qError("QID:0x%" PRIx64 ",SID:%" PRId64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, qId, sId, cId, tId, eId)
|
||||
qError("QID:0x%" PRIx64 ", SID:%" PRId64 ", CID:0x%" PRIx64 ", TID:0x%" PRIx64 ", EID:%d " param, qId, sId, cId, tId, eId)
|
||||
#define QW_TASK_WLOG_E(param) \
|
||||
qWarn("QID:0x%" PRIx64 ",SID:%" PRId64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, qId, sId, cId, tId, eId)
|
||||
qWarn("QID:0x%" PRIx64 ", SID:%" PRId64 ", CID:0x%" PRIx64 ", TID:0x%" PRIx64 ", EID:%d " param, qId, sId, cId, tId, eId)
|
||||
#define QW_TASK_DLOG_E(param) \
|
||||
qDebug("QID:0x%" PRIx64 ",SID:%" PRId64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, qId, sId, cId, tId, eId)
|
||||
qDebug("QID:0x%" PRIx64 ", SID:%" PRId64 ", CID:0x%" PRIx64 ", TID:0x%" PRIx64 ", EID:%d " param, qId, sId, cId, tId, eId)
|
||||
|
||||
#define QW_SCH_TASK_ELOG(param, ...) \
|
||||
qError("QW:%p SID:%" PRId64 ",QID:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, mgmt, sId, \
|
||||
qError("QW:%p, SID:%" PRId64 ", QID:0x%" PRIx64 ", CID:0x%" PRIx64 ", TID:0x%" PRIx64 ", EID:%d " param, mgmt, sId, \
|
||||
qId, cId, tId, eId, __VA_ARGS__)
|
||||
#define QW_SCH_TASK_WLOG(param, ...) \
|
||||
qWarn("QW:%p SID:%" PRId64 ",QID:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, mgmt, sId, qId, \
|
||||
qWarn("QW:%p, SID:%" PRId64 ", QID:0x%" PRIx64 ", CID:0x%" PRIx64 ", TID:0x%" PRIx64 ", EID:%d " param, mgmt, sId, qId, \
|
||||
cId, tId, eId, __VA_ARGS__)
|
||||
#define QW_SCH_TASK_DLOG(param, ...) \
|
||||
qDebug("QW:%p SID:%" PRId64 ",QID:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, mgmt, sId, \
|
||||
qDebug("QW:%p, SID:%" PRId64 ", QID:0x%" PRIx64 ", CID:0x%" PRIx64 ", TID:0x%" PRIx64 ", EID:%d " param, mgmt, sId, \
|
||||
qId, cId, tId, eId, __VA_ARGS__)
|
||||
|
||||
#define QW_LOCK_DEBUG(...) \
|
||||
|
|
|
@ -187,12 +187,12 @@ void qwDbgDumpJobsInfo(void) {
|
|||
int32_t jobIdx = 0;
|
||||
SQWJobInfo* pJob = (SQWJobInfo*)taosHashIterate(gQueryMgmt.pJobInfo, NULL);
|
||||
while (NULL != pJob) {
|
||||
qDebug("QID:0x%" PRIx64 " CID:0x%" PRIx64 " the %dth remain job", pJob->memInfo->jobId, pJob->memInfo->clientId, jobIdx++);
|
||||
qDebug("QID:0x%" PRIx64 ", CID:0x%" PRIx64 " the %dth remain job", pJob->memInfo->jobId, pJob->memInfo->clientId, jobIdx++);
|
||||
|
||||
int32_t sessionIdx = 0;
|
||||
SQWSessionInfo* pSession = (SQWSessionInfo*)taosHashIterate(pJob->pSessions, NULL);
|
||||
while (NULL != pSession) {
|
||||
qDebug("QID:0x%" PRIx64 ",SID:%" PRId64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d the %dth remain session",
|
||||
qDebug("QID:0x%" PRIx64 ", SID:%" PRId64 ", CID:0x%" PRIx64 ", TID:0x%" PRIx64 ", EID:%d the %dth remain session",
|
||||
pSession->qId, pSession->sId, pSession->cId, pSession->tId, pSession->eId, sessionIdx++);
|
||||
|
||||
pSession = (SQWSessionInfo*)taosHashIterate(pJob->pSessions, pSession);
|
||||
|
@ -237,7 +237,7 @@ int32_t qwDbgBuildAndSendRedirectRsp(int32_t rspType, SRpcHandleInfo *pConn, int
|
|||
|
||||
tmsgSendRsp(&rpcRsp);
|
||||
|
||||
qDebug("response %s msg, code: %s", TMSG_INFO(rspType), tstrerror(code));
|
||||
qDebug("response %s msg, code:%s", TMSG_INFO(rspType), tstrerror(code));
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
|
|
@ -339,8 +339,8 @@ int32_t qwGetQueryResFromSink(QW_FPARAMS_DEF, SQWTaskCtx *ctx, int32_t *dataLen,
|
|||
break;
|
||||
}
|
||||
|
||||
// Got data from sink
|
||||
QW_TASK_DLOG("there are data in sink, dataLength:%" PRId64 "", len);
|
||||
// get data from sink
|
||||
QW_TASK_DLOG("there are data in sink, dataLength:%" PRId64, len);
|
||||
|
||||
*dataLen += len + PAYLOAD_PREFIX_LEN;
|
||||
*pRawDataLen += rawLen + PAYLOAD_PREFIX_LEN;
|
||||
|
@ -1699,18 +1699,18 @@ void qWorkerRetireJob(uint64_t jobId, uint64_t clientId, int32_t errCode) {
|
|||
|
||||
SQWJobInfo *pJob = (SQWJobInfo *)taosHashGet(gQueryMgmt.pJobInfo, id, sizeof(id));
|
||||
if (NULL == pJob) {
|
||||
qError("QID:0x%" PRIx64 " CID:0x%" PRIx64 " fail to get job from job hash", jobId, clientId);
|
||||
qError("QID:0x%" PRIx64 ", CID:0x%" PRIx64 " fail to get job from job hash", jobId, clientId);
|
||||
return;
|
||||
}
|
||||
|
||||
if (0 == atomic_val_compare_exchange_32(&pJob->errCode, 0, errCode) &&
|
||||
0 == atomic_val_compare_exchange_8(&pJob->retired, 0, 1)) {
|
||||
qDebug("QID:0x%" PRIx64 " CID:0x%" PRIx64 " mark retired, errCode: 0x%x, allocSize:%" PRId64, jobId, clientId,
|
||||
qDebug("QID:0x%" PRIx64 ", CID:0x%" PRIx64 " mark retired, errCode: 0x%x, allocSize:%" PRId64, jobId, clientId,
|
||||
errCode, atomic_load_64(&pJob->memInfo->allocMemSize));
|
||||
|
||||
(void)qwRetireJob(pJob);
|
||||
} else {
|
||||
qDebug("QID:0x%" PRIx64 " already retired, retired: %d, errCode: 0x%x, allocSize:%" PRId64, jobId,
|
||||
qDebug("QID:0x%" PRIx64 ", already retired, retired: %d, errCode: 0x%x, allocSize:%" PRId64, jobId,
|
||||
atomic_load_8(&pJob->retired), atomic_load_32(&pJob->errCode), atomic_load_64(&pJob->memInfo->allocMemSize));
|
||||
}
|
||||
}
|
||||
|
@ -1741,10 +1741,10 @@ void qWorkerRetireJobs(int64_t retireSize, int32_t errCode) {
|
|||
|
||||
jobNum++;
|
||||
|
||||
qDebug("QID:0x%" PRIx64 " CID:0x%" PRIx64 " job mark retired in batch, retired:%d, usedSize:%" PRId64 ", retireSize:%" PRId64,
|
||||
qDebug("QID:0x%" PRIx64 ", CID:0x%" PRIx64 " job mark retired in batch, retired:%d, usedSize:%" PRId64 ", retireSize:%" PRId64,
|
||||
pJob->memInfo->jobId, pJob->memInfo->clientId, retired, aSize, retireSize);
|
||||
} else {
|
||||
qDebug("QID:0x%" PRIx64 " CID:0x%" PRIx64 " job may already failed, errCode:%s", pJob->memInfo->jobId, pJob->memInfo->clientId, tstrerror(pJob->errCode));
|
||||
qDebug("QID:0x%" PRIx64 ", CID:0x%" PRIx64 " job may already failed, errCode:%s", pJob->memInfo->jobId, pJob->memInfo->clientId, tstrerror(pJob->errCode));
|
||||
}
|
||||
|
||||
pJob = (SQWJobInfo *)taosHashIterate(gQueryMgmt.pJobInfo, pJob);
|
||||
|
|
|
@ -462,23 +462,23 @@ extern SSchedulerMgmt schMgmt;
|
|||
(_task)->profile.endTs = us; \
|
||||
} while (0)
|
||||
|
||||
#define SCH_JOB_ELOG(param, ...) qError("QID:0x%" PRIx64 ",SID:%" PRId64 " " param, pJob->queryId, pJob->seriousId, __VA_ARGS__)
|
||||
#define SCH_JOB_DLOG(param, ...) qDebug("QID:0x%" PRIx64 ",SID:%" PRId64 " " param, pJob->queryId, pJob->seriousId, __VA_ARGS__)
|
||||
#define SCH_JOB_ELOG(param, ...) qError("QID:0x%" PRIx64 ", SID:%" PRId64 ", " param, pJob->queryId, pJob->seriousId, __VA_ARGS__)
|
||||
#define SCH_JOB_DLOG(param, ...) qDebug("QID:0x%" PRIx64 ", SID:%" PRId64 ", " param, pJob->queryId, pJob->seriousId, __VA_ARGS__)
|
||||
|
||||
#define SCH_TASK_ELOG(param, ...) \
|
||||
qError("QID:0x%" PRIx64 ",SID:%" PRId64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, pJob->queryId, pJob->seriousId, SCH_CLIENT_ID(pTask), \
|
||||
qError("QID:0x%" PRIx64 ", SID:%" PRId64 ", CID:0x%" PRIx64 ", TID:0x%" PRIx64 ", EID:%d, " param, pJob->queryId, pJob->seriousId, SCH_CLIENT_ID(pTask), \
|
||||
SCH_TASK_ID(pTask), SCH_TASK_EID(pTask), __VA_ARGS__)
|
||||
#define SCH_TASK_DLOG(param, ...) \
|
||||
qDebug("QID:0x%" PRIx64 ",SID:%" PRId64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, pJob->queryId, pJob->seriousId, SCH_CLIENT_ID(pTask), \
|
||||
qDebug("QID:0x%" PRIx64 ", SID:%" PRId64 ", CID:0x%" PRIx64 ", TID:0x%" PRIx64 ", EID:%d, " param, pJob->queryId, pJob->seriousId, SCH_CLIENT_ID(pTask), \
|
||||
SCH_TASK_ID(pTask), SCH_TASK_EID(pTask), __VA_ARGS__)
|
||||
#define SCH_TASK_TLOG(param, ...) \
|
||||
qTrace("QID:0x%" PRIx64 ",SID:%" PRId64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, pJob->queryId, pJob->seriousId, SCH_CLIENT_ID(pTask), \
|
||||
qTrace("QID:0x%" PRIx64 ", SID:%" PRId64 ", CID:0x%" PRIx64 ", TID:0x%" PRIx64 ", EID:%d, " param, pJob->queryId, pJob->seriousId, SCH_CLIENT_ID(pTask), \
|
||||
SCH_TASK_ID(pTask), SCH_TASK_EID(pTask), __VA_ARGS__)
|
||||
#define SCH_TASK_DLOGL(param, ...) \
|
||||
qDebugL("QID:0x%" PRIx64 ",SID:%" PRId64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, pJob->queryId, pJob->seriousId, SCH_CLIENT_ID(pTask), \
|
||||
qDebugL("QID:0x%" PRIx64 ", SID:%" PRId64 ", CID:0x%" PRIx64 ", TID:0x%" PRIx64 ", EID:%d, " param, pJob->queryId, pJob->seriousId, SCH_CLIENT_ID(pTask), \
|
||||
SCH_TASK_ID(pTask), SCH_TASK_EID(pTask), __VA_ARGS__)
|
||||
#define SCH_TASK_WLOG(param, ...) \
|
||||
qWarn("QID:0x%" PRIx64 ",SID:%" PRId64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, pJob->queryId, pJob->seriousId, SCH_CLIENT_ID(pTask), \
|
||||
qWarn("QID:0x%" PRIx64 ", SID:%" PRId64 ", CID:0x%" PRIx64 ", TID:0x%" PRIx64 ", EID:%d, " param, pJob->queryId, pJob->seriousId, SCH_CLIENT_ID(pTask), \
|
||||
SCH_TASK_ID(pTask), SCH_TASK_EID(pTask), __VA_ARGS__)
|
||||
|
||||
#define SCH_SET_ERRNO(_err) \
|
||||
|
|
|
@ -346,7 +346,7 @@ int32_t schValidateAndBuildJob(SQueryPlan *pDag, SSchJob *pJob) {
|
|||
pJob->levelNum = levelNum;
|
||||
SCH_RESET_JOB_LEVEL_IDX(pJob);
|
||||
|
||||
atomic_add_fetch_64(&pJob->seriousId, 1);
|
||||
(void)atomic_add_fetch_64(&pJob->seriousId, 1);
|
||||
SCH_JOB_DLOG("job seriousId set to 0x%" PRIx64, pJob->seriousId);
|
||||
|
||||
SSchLevel level = {0};
|
||||
|
@ -465,7 +465,7 @@ void schDumpJobExecRes(SSchJob *pJob, SExecResult *pRes) {
|
|||
pJob->execRes.res = NULL;
|
||||
SCH_UNLOCK(SCH_WRITE, &pJob->resLock);
|
||||
|
||||
SCH_JOB_DLOG("execRes dumped, code: %s", tstrerror(pRes->code));
|
||||
SCH_JOB_DLOG("execRes dumped, code:%s", tstrerror(pRes->code));
|
||||
}
|
||||
|
||||
int32_t schDumpJobFetchRes(SSchJob *pJob, void **pData) {
|
||||
|
@ -519,9 +519,9 @@ int32_t schNotifyUserExecRes(SSchJob *pJob) {
|
|||
|
||||
schDumpJobExecRes(pJob, pRes);
|
||||
|
||||
SCH_JOB_DLOG("sch start to invoke exec cb, code: %s", tstrerror(pJob->errCode));
|
||||
SCH_JOB_DLOG("sch start to invoke exec cb, code:%s", tstrerror(pJob->errCode));
|
||||
(*pJob->userRes.execFp)(pRes, pJob->userRes.cbParam, atomic_load_32(&pJob->errCode));
|
||||
SCH_JOB_DLOG("sch end from exec cb, code: %s", tstrerror(pJob->errCode));
|
||||
SCH_JOB_DLOG("sch end from exec cb, code:%s", tstrerror(pJob->errCode));
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -534,9 +534,9 @@ int32_t schNotifyUserFetchRes(SSchJob *pJob) {
|
|||
atomic_store_32(&pJob->errCode, code);
|
||||
}
|
||||
|
||||
SCH_JOB_DLOG("sch start to invoke fetch cb, code: %s", tstrerror(pJob->errCode));
|
||||
SCH_JOB_DLOG("sch start to invoke fetch cb, code:%s", tstrerror(pJob->errCode));
|
||||
(*pJob->userRes.fetchFp)(pRes, pJob->userRes.cbParam, atomic_load_32(&pJob->errCode));
|
||||
SCH_JOB_DLOG("sch end from fetch cb, code: %s", tstrerror(pJob->errCode));
|
||||
SCH_JOB_DLOG("sch end from fetch cb, code:%s", tstrerror(pJob->errCode));
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -758,7 +758,7 @@ void schFreeJobImpl(void *job) {
|
|||
uint64_t queryId = pJob->queryId;
|
||||
int64_t refId = pJob->refId;
|
||||
|
||||
qDebug("QID:0x%" PRIx64 " begin to free sch job, refId:0x%" PRIx64 ", pointer:%p", queryId, refId, pJob);
|
||||
qDebug("QID:0x%" PRIx64 ", begin to free sch job, jobId:0x%" PRIx64 ", pointer:%p", queryId, refId, pJob);
|
||||
|
||||
schDropJobAllTasks(pJob);
|
||||
|
||||
|
@ -818,7 +818,7 @@ void schFreeJobImpl(void *job) {
|
|||
}
|
||||
}
|
||||
|
||||
qDebug("QID:0x%" PRIx64 " sch job freed, refId:0x%" PRIx64 ", pointer:%p", queryId, refId, pJob);
|
||||
qDebug("QID:0x%" PRIx64 ", sch job freed, refId:0x%" PRIx64 ", pointer:%p", queryId, refId, pJob);
|
||||
}
|
||||
|
||||
int32_t schJobFetchRows(SSchJob *pJob) {
|
||||
|
@ -853,7 +853,7 @@ int32_t schInitJob(int64_t *pJobId, SSchedulerReq *pReq) {
|
|||
int64_t refId = -1;
|
||||
SSchJob *pJob = taosMemoryCalloc(1, sizeof(SSchJob));
|
||||
if (NULL == pJob) {
|
||||
qError("QID:0x%" PRIx64 " calloc %d failed", pReq->pDag->queryId, (int32_t)sizeof(SSchJob));
|
||||
qError("QID:0x%" PRIx64 ", calloc %d failed", pReq->pDag->queryId, (int32_t)sizeof(SSchJob));
|
||||
SCH_ERR_JRET(terrno);
|
||||
}
|
||||
|
||||
|
@ -863,7 +863,7 @@ int32_t schInitJob(int64_t *pJobId, SSchedulerReq *pReq) {
|
|||
if (pReq->sql) {
|
||||
pJob->sql = taosStrdup(pReq->sql);
|
||||
if (NULL == pJob->sql) {
|
||||
qError("QID:0x%" PRIx64 " strdup sql %s failed", pReq->pDag->queryId, pReq->sql);
|
||||
qError("QID:0x%" PRIx64 ", strdup sql %s failed", pReq->pDag->queryId, pReq->sql);
|
||||
SCH_ERR_JRET(terrno);
|
||||
}
|
||||
}
|
||||
|
@ -871,7 +871,7 @@ int32_t schInitJob(int64_t *pJobId, SSchedulerReq *pReq) {
|
|||
if (pReq->allocatorRefId > 0) {
|
||||
pJob->allocatorRefId = nodesMakeAllocatorWeakRef(pReq->allocatorRefId);
|
||||
if (pJob->allocatorRefId <= 0) {
|
||||
qError("QID:0x%" PRIx64 " nodesMakeAllocatorWeakRef failed", pReq->pDag->queryId);
|
||||
qError("QID:0x%" PRIx64 ", nodesMakeAllocatorWeakRef failed", pReq->pDag->queryId);
|
||||
SCH_ERR_JRET(terrno);
|
||||
}
|
||||
}
|
||||
|
@ -883,11 +883,11 @@ int32_t schInitJob(int64_t *pJobId, SSchedulerReq *pReq) {
|
|||
pJob->pWorkerCb = pReq->pWorkerCb;
|
||||
|
||||
if (pReq->pNodeList == NULL || taosArrayGetSize(pReq->pNodeList) <= 0) {
|
||||
qDebug("QID:0x%" PRIx64 " input exec nodeList is empty", pReq->pDag->queryId);
|
||||
qDebug("QID:0x%" PRIx64 ", input exec nodeList is empty", pReq->pDag->queryId);
|
||||
} else {
|
||||
pJob->nodeList = taosArrayDup(pReq->pNodeList, NULL);
|
||||
if (NULL == pJob->nodeList) {
|
||||
qError("QID:0x%" PRIx64 " taosArrayDup failed, origNum:%d", pReq->pDag->queryId,
|
||||
qError("QID:0x%" PRIx64 ", taosArrayDup failed, origNum:%d", pReq->pDag->queryId,
|
||||
(int32_t)taosArrayGetSize(pReq->pNodeList));
|
||||
SCH_ERR_JRET(terrno);
|
||||
}
|
||||
|
@ -928,7 +928,7 @@ int32_t schInitJob(int64_t *pJobId, SSchedulerReq *pReq) {
|
|||
|
||||
*pJobId = pJob->refId;
|
||||
|
||||
SCH_JOB_DLOG("job refId:0x%" PRIx64 " created", pJob->refId);
|
||||
SCH_JOB_DLOG("jobId:0x%" PRIx64 ", job created", pJob->refId);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
||||
|
@ -941,7 +941,7 @@ _return:
|
|||
} else {
|
||||
code = taosRemoveRef(schMgmt.jobRef, pJob->refId);
|
||||
if (code) {
|
||||
SCH_JOB_DLOG("taosRemoveRef job refId:0x%" PRIx64 " from jobRef, error:%s", pJob->refId, tstrerror(code));
|
||||
SCH_JOB_DLOG("jobId:0x%" PRIx64 ", taosRemoveRef job from jobRef, error:%s", pJob->refId, tstrerror(code));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -950,7 +950,7 @@ _return:
|
|||
|
||||
int32_t schExecJob(SSchJob *pJob, SSchedulerReq *pReq) {
|
||||
int32_t code = 0;
|
||||
qDebug("QID:0x%" PRIx64 " sch job refId 0x%" PRIx64 " started", pReq->pDag->queryId, pJob->refId);
|
||||
qDebug("QID:0x%" PRIx64 ", jobId:0x%" PRIx64 ", sch job started", pReq->pDag->queryId, pJob->refId);
|
||||
|
||||
SCH_ERR_RET(schLaunchJob(pJob));
|
||||
|
||||
|
@ -958,7 +958,7 @@ int32_t schExecJob(SSchJob *pJob, SSchedulerReq *pReq) {
|
|||
SCH_JOB_DLOG("sync wait for rsp now, job status:%s", SCH_GET_JOB_STATUS_STR(pJob));
|
||||
code = tsem_wait(&pJob->rspSem);
|
||||
if (code) {
|
||||
qError("QID:0x%" PRIx64 " tsem_wait sync rspSem failed, error:%s", pReq->pDag->queryId, tstrerror(code));
|
||||
qError("QID:0x%" PRIx64 ", tsem_wait sync rspSem failed, error:%s", pReq->pDag->queryId, tstrerror(code));
|
||||
SCH_ERR_RET(code);
|
||||
}
|
||||
}
|
||||
|
@ -1023,7 +1023,7 @@ int32_t schResetJobForRetry(SSchJob *pJob, SSchTask *pTask, int32_t rspCode, boo
|
|||
|
||||
SCH_ERR_RET(schChkResetJobRetry(pJob, rspCode));
|
||||
|
||||
atomic_add_fetch_64(&pJob->seriousId, 1);
|
||||
(void)atomic_add_fetch_64(&pJob->seriousId, 1);
|
||||
|
||||
int32_t code = 0;
|
||||
int32_t numOfLevels = taosArrayGetSize(pJob->levels);
|
||||
|
@ -1206,7 +1206,7 @@ int32_t schProcessOnOpBegin(SSchJob *pJob, SCH_OP_TYPE type, SSchedulerReq *pReq
|
|||
break;
|
||||
case SCH_OP_GET_STATUS:
|
||||
if (pJob->status < JOB_TASK_STATUS_INIT || pJob->levelNum <= 0 || NULL == pJob->levels) {
|
||||
qDebug("job not initialized or not executable job, refId:0x%" PRIx64, pJob->refId);
|
||||
qDebug("jobId:0x%" PRIx64 ", job not initialized or not executable job", pJob->refId);
|
||||
SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR);
|
||||
}
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -1246,7 +1246,7 @@ int32_t schProcessOnCbBegin(SSchJob **job, SSchTask **task, uint64_t qId, int64_
|
|||
|
||||
(void)schAcquireJob(rId, &pJob);
|
||||
if (NULL == pJob) {
|
||||
qWarn("QID:0x%" PRIx64 ",TID:0x%" PRIx64 "job no exist, may be dropped, refId:0x%" PRIx64, qId, tId, rId);
|
||||
qWarn("QID:0x%" PRIx64 ", TID:0x%" PRIx64 "job no exist, may be dropped, jobId:0x%" PRIx64, qId, tId, rId);
|
||||
SCH_ERR_RET(TSDB_CODE_QRY_JOB_NOT_EXIST);
|
||||
}
|
||||
|
||||
|
|
|
@ -489,7 +489,8 @@ int32_t schHandleCallback(void *param, SDataBuf *pMsg, int32_t rspCode) {
|
|||
SSchTask *pTask = NULL;
|
||||
SSchJob *pJob = NULL;
|
||||
|
||||
qDebug("begin to handle rsp msg, type:%s, handle:%p, code:%s", TMSG_INFO(pMsg->msgType), pMsg->handle,
|
||||
int64_t qid = pParam->queryId;
|
||||
qDebug("QID:0x%" PRIx64 ", begin to handle rsp msg, type:%s, handle:%p, code:%s", qid,TMSG_INFO(pMsg->msgType), pMsg->handle,
|
||||
tstrerror(rspCode));
|
||||
|
||||
SCH_ERR_JRET(schProcessOnCbBegin(&pJob, &pTask, pParam->queryId, pParam->refId, pParam->taskId));
|
||||
|
@ -503,7 +504,7 @@ _return:
|
|||
taosMemoryFreeClear(pMsg->pData);
|
||||
taosMemoryFreeClear(pMsg->pEpSet);
|
||||
|
||||
qDebug("end to handle rsp msg, type:%s, handle:%p, code:%s", TMSG_INFO(pMsg->msgType), pMsg->handle,
|
||||
qDebug("QID:0x%" PRIx64 ", end to handle rsp msg, type:%s, handle:%p, code:%s", qid, TMSG_INFO(pMsg->msgType), pMsg->handle,
|
||||
tstrerror(rspCode));
|
||||
|
||||
SCH_RET(code);
|
||||
|
@ -511,7 +512,7 @@ _return:
|
|||
|
||||
int32_t schHandleDropCallback(void *param, SDataBuf *pMsg, int32_t code) {
|
||||
SSchTaskCallbackParam *pParam = (SSchTaskCallbackParam *)param;
|
||||
qDebug("QID:0x%" PRIx64 ",SID:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 " drop task rsp received, code:0x%x",
|
||||
qDebug("QID:0x%" PRIx64 ", SID:0x%" PRIx64 ", CID:0x%" PRIx64 ", TID:0x%" PRIx64 " drop task rsp received, code:0x%x",
|
||||
pParam->queryId, pParam->seriousId, pParam->clientId, pParam->taskId, code);
|
||||
// called if drop task rsp received code
|
||||
(void)rpcReleaseHandle(pMsg->handle, TAOS_CONN_CLIENT, 0); // ignore error
|
||||
|
@ -528,7 +529,7 @@ int32_t schHandleDropCallback(void *param, SDataBuf *pMsg, int32_t code) {
|
|||
|
||||
int32_t schHandleNotifyCallback(void *param, SDataBuf *pMsg, int32_t code) {
|
||||
SSchTaskCallbackParam *pParam = (SSchTaskCallbackParam *)param;
|
||||
qDebug("QID:0x%" PRIx64 ",SID:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 " task notify rsp received, code:0x%x",
|
||||
qDebug("QID:0x%" PRIx64 ", SID:0x%" PRIx64 ", CID:0x%" PRIx64 ", TID:0x%" PRIx64 " task notify rsp received, code:0x%x",
|
||||
pParam->queryId, pParam->seriousId, pParam->clientId, pParam->taskId, code);
|
||||
if (pMsg) {
|
||||
taosMemoryFreeClear(pMsg->pData);
|
||||
|
|
|
@ -70,7 +70,7 @@ int32_t schHandleOpBeginEvent(int64_t jobId, SSchJob** job, SCH_OP_TYPE type, SS
|
|||
SSchJob* pJob = NULL;
|
||||
(void)schAcquireJob(jobId, &pJob);
|
||||
if (NULL == pJob) {
|
||||
qDebug("Acquire sch job failed, may be dropped, jobId:0x%" PRIx64, jobId);
|
||||
qDebug("jobId:0x%" PRIx64 ", acquire sch job failed, may be dropped", jobId);
|
||||
SCH_ERR_RET(TSDB_CODE_SCH_JOB_NOT_EXISTS);
|
||||
}
|
||||
|
||||
|
|
|
@ -1014,7 +1014,7 @@ int32_t schProcessOnTaskStatusRsp(SQueryNodeEpId *pEpId, SArray *pStatusList) {
|
|||
|
||||
int32_t code = 0;
|
||||
|
||||
qDebug("QID:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d task status in server: %s", pStatus->queryId,
|
||||
qDebug("QID:0x%" PRIx64 ", CID:0x%" PRIx64 ", TID:0x%" PRIx64 ", EID:%d task status in server: %s", pStatus->queryId,
|
||||
pStatus->clientId, pStatus->taskId, pStatus->execId, jobTaskStatusStr(pStatus->status));
|
||||
|
||||
if (schProcessOnCbBegin(&pJob, &pTask, pStatus->queryId, pStatus->refId, pStatus->taskId)) {
|
||||
|
@ -1061,13 +1061,13 @@ int32_t schHandleExplainRes(SArray *pExplainRes) {
|
|||
continue;
|
||||
}
|
||||
|
||||
qDebug("QID:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ", begin to handle LOCAL explain rsp msg",
|
||||
qDebug("QID:0x%" PRIx64 ", CID:0x%" PRIx64 ", TID:0x%" PRIx64 ", begin to handle LOCAL explain rsp msg",
|
||||
localRsp->qId, localRsp->cId, localRsp->tId);
|
||||
|
||||
pJob = NULL;
|
||||
(void)schAcquireJob(localRsp->rId, &pJob);
|
||||
if (NULL == pJob) {
|
||||
qWarn("QID:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 "job no exist, may be dropped, refId:0x%" PRIx64,
|
||||
qWarn("QID:0x%" PRIx64 ", CID:0x%" PRIx64 ", TID:0x%" PRIx64 " job no exist, may be dropped, refId:0x%" PRIx64,
|
||||
localRsp->qId, localRsp->cId, localRsp->tId, localRsp->rId);
|
||||
SCH_ERR_JRET(TSDB_CODE_QRY_JOB_NOT_EXIST);
|
||||
}
|
||||
|
@ -1087,7 +1087,7 @@ int32_t schHandleExplainRes(SArray *pExplainRes) {
|
|||
|
||||
(void)schReleaseJob(pJob->refId);
|
||||
|
||||
qDebug("QID:0x%" PRIx64 ",CID:0x%" PRIx64 ",TID:0x%" PRIx64 ", end to handle LOCAL explain rsp msg, code:%x",
|
||||
qDebug("QID:0x%" PRIx64 ", CID:0x%" PRIx64 ", TID:0x%" PRIx64 ", end to handle LOCAL explain rsp msg, code:%x",
|
||||
localRsp->qId, localRsp->cId, localRsp->tId, code);
|
||||
|
||||
SCH_ERR_JRET(code);
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
#include "trpc.h"
|
||||
|
||||
FORCE_INLINE int32_t schAcquireJob(int64_t refId, SSchJob **ppJob) {
|
||||
qDebug("sch acquire jobId:0x%" PRIx64, refId);
|
||||
qTrace("jobId:0x%" PRIx64 ", sch acquire", refId);
|
||||
*ppJob = (SSchJob *)taosAcquireRef(schMgmt.jobRef, refId);
|
||||
if (NULL == *ppJob) {
|
||||
return terrno;
|
||||
|
@ -37,7 +37,7 @@ FORCE_INLINE int32_t schReleaseJob(int64_t refId) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
qDebug("sch release jobId:0x%" PRIx64, refId);
|
||||
qTrace("jobId:0x%" PRIx64 ", sch release", refId);
|
||||
return taosReleaseRef(schMgmt.jobRef, refId);
|
||||
}
|
||||
|
||||
|
@ -46,7 +46,7 @@ FORCE_INLINE int32_t schReleaseJobEx(int64_t refId, int32_t *released) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
qDebug("sch release ex jobId:0x%" PRIx64, refId);
|
||||
qTrace("jobId:0x%" PRIx64 ", sch release ex", refId);
|
||||
return taosReleaseRefEx(schMgmt.jobRef, refId, released);
|
||||
}
|
||||
|
||||
|
@ -122,7 +122,7 @@ int32_t schRemoveHbConnection(SSchJob *pJob, SSchTask *pTask, SQueryNodeEpId *ep
|
|||
SSchHbTrans *hb = taosHashGet(schMgmt.hbConnections, epId, sizeof(SQueryNodeEpId));
|
||||
if (NULL == hb) {
|
||||
SCH_UNLOCK(SCH_WRITE, &schMgmt.hbLock);
|
||||
SCH_TASK_ELOG("nodeId %d fqdn %s port %d not in hb connections", epId->nodeId, epId->ep.fqdn, epId->ep.port);
|
||||
SCH_TASK_ELOG("nodeId:%d fqdn:%s port:%d not in hb connections", epId->nodeId, epId->ep.fqdn, epId->ep.port);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -301,7 +301,7 @@ int32_t initClientId(void) {
|
|||
qError("failed to generate clientId since %s", tstrerror(code));
|
||||
SCH_ERR_RET(code);
|
||||
}
|
||||
qInfo("initialize");
|
||||
qInfo("generate clientId:%" PRIu64, schMgmt.clientId);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue