Merge branch '3.0' into enh/TD-23421-M
This commit is contained in:
commit
9e71b6d071
|
@ -459,12 +459,17 @@ TO_JSON(str_literal)
|
|||
#### TO_UNIXTIMESTAMP
|
||||
|
||||
```sql
|
||||
TO_UNIXTIMESTAMP(expr)
|
||||
TO_UNIXTIMESTAMP(expr [, return_timestamp])
|
||||
|
||||
return_timestamp: {
|
||||
0
|
||||
| 1
|
||||
}
|
||||
```
|
||||
|
||||
**Description**: UNIX timestamp converted from a string of date/time format
|
||||
|
||||
**Return value type**: BIGINT
|
||||
**Return value type**: BIGINT, TIMESTAMP
|
||||
|
||||
**Applicable column types**: VARCHAR and NCHAR
|
||||
|
||||
|
@ -476,6 +481,7 @@ TO_UNIXTIMESTAMP(expr)
|
|||
|
||||
- The input string must be compatible with ISO8601/RFC3339 standard, NULL will be returned if the string can't be converted
|
||||
- The precision of the returned timestamp is same as the precision set for the current data base in use
|
||||
- return_timestamp indicates whether the returned value type is TIMESTAMP or not. If this parameter set to 1, function will return TIMESTAMP type. Otherwise function will return BIGINT type. If parameter is omitted, default return value type is BIGINT.
|
||||
|
||||
|
||||
### Time and Date Functions
|
||||
|
|
|
@ -69,7 +69,7 @@ Provides information about SQL queries currently running. Similar to SHOW QUERIE
|
|||
| 1 | consumer_id | BIGINT | Consumer ID |
|
||||
| 2 | consumer_group | BINARY(192) | Consumer group |
|
||||
| 3 | client_id | BINARY(192) | Client ID (user-defined) |
|
||||
| 4 | status | BINARY(20) | Consumer status |
|
||||
| 4 | status | BINARY(20) | Consumer status. All possible status include: ready(consumer is in normal state), lost(the connection between consumer and mnode is broken), rebalance(the redistribution of vgroups that belongs to current consumer is now in progress), unknown(consumer is in invalid state)
|
||||
| 5 | topics | BINARY(204) | Subscribed topic. Returns one row for each topic. |
|
||||
| 6 | up_time | TIMESTAMP | Time of first connection to TDengine Server |
|
||||
| 7 | subscribe_time | TIMESTAMP | Time of first subscription |
|
||||
|
|
|
@ -42,3 +42,304 @@ An existing Grafana Notification Channel can be specified with parameter `-E`, t
|
|||
Launch `TDinsight.sh` with the command above and restart Grafana, then open Dashboard `http://localhost:3000/d/tdinsight`.
|
||||
|
||||
For more use cases and restrictions please refer to [TDinsight](/reference/tdinsight/).
|
||||
|
||||
## log database
|
||||
|
||||
The data of tdinsight dashboard is stored in `log` database (default. You can change it in taoskeeper's config file. For more infrmation, please reference to [taoskeeper document](/reference/taosKeeper)). The taoskeeper will create log database on taoskeeper startup.
|
||||
|
||||
### cluster\_info table
|
||||
|
||||
`cluster_info` table contains cluster information records.
|
||||
|
||||
|field|type|is\_tag|comment|
|
||||
|:----|:---|:-----|:------|
|
||||
|ts|TIMESTAMP||timestamp|
|
||||
|first\_ep|VARCHAR||first ep of cluster|
|
||||
|first\_ep\_dnode\_id|INT||dnode id or first\_ep|
|
||||
|version|VARCHAR||tdengine version. such as: 3.0.4.0|
|
||||
|master\_uptime|FLOAT||days of master's uptime|
|
||||
|monitor\_interval|INT||monitor interval in second|
|
||||
|dbs\_total|INT||total number of databases in cluster|
|
||||
|tbs\_total|BIGINT||total number of tables in cluster|
|
||||
|stbs\_total|INT||total number of stables in cluster|
|
||||
|dnodes\_total|INT||total number of dnodes in cluster|
|
||||
|dnodes\_alive|INT||total number of dnodes in ready state|
|
||||
|mnodes\_total|INT||total number of mnodes in cluster|
|
||||
|mnodes\_alive|INT||total number of mnodes in ready state|
|
||||
|vgroups\_total|INT||total number of vgroups in cluster|
|
||||
|vgroups\_alive|INT||total number of vgroups in ready state|
|
||||
|vnodes\_total|INT||total number of vnode in cluster|
|
||||
|vnodes\_alive|INT||total number of vnode in ready state|
|
||||
|connections\_total|INT||total number of connections to cluster|
|
||||
|topics\_total|INT||total number of topics in cluster|
|
||||
|streams\_total|INT||total number of streams in cluster|
|
||||
|protocol|INT||protocol version|
|
||||
|cluster\_id|NCHAR|TAG|cluster id|
|
||||
|
||||
### d\_info table
|
||||
|
||||
`d_info` table contains dnodes information records.
|
||||
|
||||
|field|type|is\_tag|comment|
|
||||
|:----|:---|:-----|:------|
|
||||
|ts|TIMESTAMP||timestamp|
|
||||
|status|VARCHAR||dnode status|
|
||||
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|
||||
|cluster\_id|NCHAR|TAG|cluster id|
|
||||
|
||||
### m\_info table
|
||||
|
||||
`m_info` table contains mnode information records.
|
||||
|
||||
|field|type|is\_tag|comment|
|
||||
|:----|:---|:-----|:------|
|
||||
|ts|TIMESTAMP||timestamp|
|
||||
|role|VARCHAR||the role of mnode. leader or follower|
|
||||
|mnode\_id|INT|TAG|master node id|
|
||||
|mnode\_ep|NCHAR|TAG|master node endpoint|
|
||||
|cluster\_id|NCHAR|TAG|cluster id|
|
||||
|
||||
### dnodes\_info table
|
||||
|
||||
`dnodes_info` table contains dnodes information records.
|
||||
|
||||
|field|type|is\_tag|comment|
|
||||
|:----|:---|:-----|:------|
|
||||
|ts|TIMESTAMP||timestamp|
|
||||
|uptime|FLOAT||dnode uptime|
|
||||
|cpu\_engine|FLOAT||cpu usage of tdengine. read from `/proc/<taosd_pid>/stat`|
|
||||
|cpu\_system|FLOAT||cpu usage of server. read from `/proc/stat`|
|
||||
|cpu\_cores|FLOAT||cpu cores of server|
|
||||
|mem\_engine|INT||memory usage of tdengine. read from `/proc/<taosd_pid>/status`|
|
||||
|mem\_system|INT||available memory on the server|
|
||||
|mem\_total|INT||total memory of server in `KB`|
|
||||
|disk\_engine|INT|||
|
||||
|disk\_used|BIGINT||usage of data dir in `bytes`|
|
||||
|disk\_total|BIGINT||the capacity of data dir in `bytes`|
|
||||
|net\_in|FLOAT||network throughput rate in kb/s. read from `/proc/net/dev`|
|
||||
|net\_out|FLOAT||network throughput rate in kb/s. read from `/proc/net/dev`|
|
||||
|io\_read|FLOAT||io throughput rate in kb/s. read from `/proc/<taosd_pid>/io`|
|
||||
|io\_write|FLOAT||io throughput rate in kb/s. read from `/proc/<taosd_pid>/io`|
|
||||
|io\_read\_disk|FLOAT||io throughput rate of disk in kb/s. read from `/proc/<taosd_pid>/io`|
|
||||
|io\_write\_disk|FLOAT||io throughput rate of disk in kb/s. read from `/proc/<taosd_pid>/io`|
|
||||
|req\_select|INT||number of select queries received per dnode|
|
||||
|req\_select\_rate|FLOAT||number of select queries received per dnode divided by monitor interval.|
|
||||
|req\_insert|INT||number of insert queries received per dnode|
|
||||
|req\_insert\_success|INT||number of successfully insert queries received per dnode|
|
||||
|req\_insert\_rate|FLOAT||number of insert queries received per dnode divided by monitor interval|
|
||||
|req\_insert\_batch|INT||number of batch insertions|
|
||||
|req\_insert\_batch\_success|INT||number of successful batch insertions|
|
||||
|req\_insert\_batch\_rate|FLOAT||number of batch insertions divided by monitor interval|
|
||||
|errors|INT||dnode errors|
|
||||
|vnodes\_num|INT||number of vnodes per dnode|
|
||||
|masters|INT||number of master vnodes|
|
||||
|has\_mnode|INT||if the dnode has mnode|
|
||||
|has\_qnode|INT||if the dnode has qnode|
|
||||
|has\_snode|INT||if the dnode has snode|
|
||||
|has\_bnode|INT||if the dnode has bnode|
|
||||
|dnode\_id|INT|TAG|dnode id|
|
||||
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|
||||
|cluster\_id|NCHAR|TAG|cluster id|
|
||||
|
||||
### data\_dir table
|
||||
|
||||
`data_dir` table contains data directory information records.
|
||||
|
||||
|field|type|is\_tag|comment|
|
||||
|:----|:---|:-----|:------|
|
||||
|ts|TIMESTAMP||timestamp|
|
||||
|name|NCHAR||data directory. default is `/var/lib/taos`|
|
||||
|level|INT||level for multi-level storage|
|
||||
|avail|BIGINT||available space for data directory|
|
||||
|used|BIGINT||used space for data directory|
|
||||
|total|BIGINT||total space for data directory|
|
||||
|dnode\_id|INT|TAG|dnode id|
|
||||
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|
||||
|cluster\_id|NCHAR|TAG|cluster id|
|
||||
|
||||
### log\_dir table
|
||||
|
||||
`log_dir` table contains log directory information records.
|
||||
|
||||
|field|type|is\_tag|comment|
|
||||
|:----|:---|:-----|:------|
|
||||
|ts|TIMESTAMP||timestamp|
|
||||
|name|NCHAR||log directory. default is `/var/log/taos/`|
|
||||
|avail|BIGINT||available space for log directory|
|
||||
|used|BIGINT||used space for data directory|
|
||||
|total|BIGINT||total space for data directory|
|
||||
|dnode\_id|INT|TAG|dnode id|
|
||||
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|
||||
|cluster\_id|NCHAR|TAG|cluster id|
|
||||
|
||||
### temp\_dir table
|
||||
|
||||
`temp_dir` table contains temp dir information records.
|
||||
|
||||
|field|type|is\_tag|comment|
|
||||
|:----|:---|:-----|:------|
|
||||
|ts|TIMESTAMP||timestamp|
|
||||
|name|NCHAR||temp directory. default is `/tmp/`|
|
||||
|avail|BIGINT||available space for temp directory|
|
||||
|used|BIGINT||used space for temp directory|
|
||||
|total|BIGINT||total space for temp directory|
|
||||
|dnode\_id|INT|TAG|dnode id|
|
||||
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|
||||
|cluster\_id|NCHAR|TAG|cluster id|
|
||||
|
||||
### vgroups\_info table
|
||||
|
||||
`vgroups_info` table contains vgroups information records.
|
||||
|
||||
|field|type|is\_tag|comment|
|
||||
|:----|:---|:-----|:------|
|
||||
|ts|TIMESTAMP||timestamp|
|
||||
|vgroup\_id|INT||vgroup id|
|
||||
|database\_name|VARCHAR||database for the vgroup|
|
||||
|tables\_num|BIGINT||number of tables per vgroup|
|
||||
|status|VARCHAR||status|
|
||||
|dnode\_id|INT|TAG|dnode id|
|
||||
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|
||||
|cluster\_id|NCHAR|TAG|cluster id|
|
||||
|
||||
### vnodes\_role table
|
||||
|
||||
`vnodes_role` table contains vnode role information records.
|
||||
|
||||
|field|type|is\_tag|comment|
|
||||
|:----|:---|:-----|:------|
|
||||
|ts|TIMESTAMP||timestamp|
|
||||
|vnode\_role|VARCHAR||role. leader or follower|
|
||||
|dnode\_id|INT|TAG|dnode id|
|
||||
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|
||||
|cluster\_id|NCHAR|TAG|cluster id|
|
||||
|
||||
### logs table
|
||||
|
||||
`logs` table contains login information records.
|
||||
|
||||
|field|type|is\_tag|comment|
|
||||
|:----|:---|:-----|:------|
|
||||
|ts|TIMESTAMP||timestamp|
|
||||
|level|VARCHAR||log level|
|
||||
|content|NCHAR||log content|
|
||||
|dnode\_id|INT|TAG|dnode id|
|
||||
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|
||||
|cluster\_id|NCHAR|TAG|cluster id|
|
||||
|
||||
### log\_summary table
|
||||
|
||||
`log_summary` table contains log summary information records.
|
||||
|
||||
|field|type|is\_tag|comment|
|
||||
|:----|:---|:-----|:------|
|
||||
|ts|TIMESTAMP||timestamp|
|
||||
|error|INT||error count|
|
||||
|info|INT||info count|
|
||||
|debug|INT||debug count|
|
||||
|trace|INT||trace count|
|
||||
|dnode\_id|INT|TAG|dnode id|
|
||||
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|
||||
|cluster\_id|NCHAR|TAG|cluster id|
|
||||
|
||||
### grants\_info table
|
||||
|
||||
`grants_info` table contains grants information records.
|
||||
|
||||
|field|type|is\_tag|comment|
|
||||
|:----|:---|:-----|:------|
|
||||
|ts|TIMESTAMP||timestamp|
|
||||
|expire\_time|BIGINT||time until grants expire in seconds|
|
||||
|timeseries\_used|BIGINT||timeseries used|
|
||||
|timeseries\_total|BIGINT||total timeseries|
|
||||
|dnode\_id|INT|TAG|dnode id|
|
||||
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|
||||
|cluster\_id|NCHAR|TAG|cluster id|
|
||||
|
||||
### keeper\_monitor table
|
||||
|
||||
`keeper_monitor` table contains keeper monitor information records.
|
||||
|
||||
|field|type|is\_tag|comment|
|
||||
|:----|:---|:-----|:------|
|
||||
|ts|TIMESTAMP||timestamp|
|
||||
|cpu|FLOAT||cpu usage|
|
||||
|mem|FLOAT||memory usage|
|
||||
|identify|NCHAR|TAG||
|
||||
|
||||
### taosadapter\_restful\_http\_request\_total table
|
||||
|
||||
`taosadapter_restful_http_request_total` table contains taosadapter rest request information record. The timestamp column of this table is `_ts`.
|
||||
|
||||
|field|type|is\_tag|comment|
|
||||
|:----|:---|:-----|:------|
|
||||
|\_ts|TIMESTAMP||timestamp|
|
||||
|guage|DOUBLE||metric value|
|
||||
|client\_ip|NCHAR|TAG|client ip|
|
||||
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
||||
|request\_method|NCHAR|TAG|request method|
|
||||
|request\_uri|NCHAR|TAG|request uri|
|
||||
|status\_code|NCHAR|TAG|status code|
|
||||
|
||||
### taosadapter\_restful\_http\_request\_fail table
|
||||
|
||||
`taosadapter_restful_http_request_fail` table contains taosadapter failed rest request information record. The timestamp column of this table is `_ts`.
|
||||
|
||||
|field|type|is\_tag|comment|
|
||||
|:----|:---|:-----|:------|
|
||||
|\_ts|TIMESTAMP||timestamp|
|
||||
|guage|DOUBLE||metric value|
|
||||
|client\_ip|NCHAR|TAG|client ip|
|
||||
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
||||
|request\_method|NCHAR|TAG|request method|
|
||||
|request\_uri|NCHAR|TAG|request uri|
|
||||
|status\_code|NCHAR|TAG|status code|
|
||||
|
||||
### taosadapter\_restful\_http\_request\_in\_flight table
|
||||
|
||||
`taosadapter_restful_http_request_in_flight` table contains taosadapter rest request information record in real time. The timestamp column of this table is `_ts`.
|
||||
|
||||
|field|type|is\_tag|comment|
|
||||
|:----|:---|:-----|:------|
|
||||
|\_ts|TIMESTAMP||timestamp|
|
||||
|guage|DOUBLE||metric value|
|
||||
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
||||
|
||||
### taosadapter\_restful\_http\_request\_summary\_milliseconds table
|
||||
|
||||
`taosadapter_restful_http_request_summary_milliseconds` table contains the summary or rest information record. The timestamp column of this table is `_ts`.
|
||||
|
||||
|field|type|is\_tag|comment|
|
||||
|:----|:---|:-----|:------|
|
||||
|\_ts|TIMESTAMP||timestamp|
|
||||
|count|DOUBLE|||
|
||||
|sum|DOUBLE|||
|
||||
|0.5|DOUBLE|||
|
||||
|0.9|DOUBLE|||
|
||||
|0.99|DOUBLE|||
|
||||
|0.1|DOUBLE|||
|
||||
|0.2|DOUBLE|||
|
||||
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
||||
|request\_method|NCHAR|TAG|request method|
|
||||
|request\_uri|NCHAR|TAG|request uri|
|
||||
|
||||
### taosadapter\_system\_mem\_percent table
|
||||
|
||||
`taosadapter_system_mem_percent` table contains taosadapter memory usage information. The timestamp of this table is `_ts`.
|
||||
|
||||
|field|type|is\_tag|comment|
|
||||
|:----|:---|:-----|:------|
|
||||
|\_ts|TIMESTAMP||timestamp|
|
||||
|guage|DOUBLE||metric value|
|
||||
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
||||
|
||||
### taosadapter\_system\_cpu\_percent table
|
||||
|
||||
`taosadapter_system_cpu_percent` table contains taosadapter cup usage information. The timestamp of this table is `_ts`.
|
||||
|
||||
|field|type|is\_tag|comment|
|
||||
|:----|:---|:-----|:------|
|
||||
|\_ts|TIMESTAMP||timestamp|
|
||||
|guage|DOUBLE||mertic value|
|
||||
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
||||
|
||||
|
|
|
@ -108,7 +108,7 @@ The following `launchctl` commands can help you manage taoskeeper service:
|
|||
|
||||
#### Launch With Configuration File
|
||||
|
||||
You can quickly launch taosKeeper with the following commands. If you do not specify a configuration file, `/etc/taos/keeper.toml` is used by default. If this file does not specify configurations, the default values are used.
|
||||
You can quickly launch taosKeeper with the following commands. If you do not specify a configuration file, `/etc/taos/taoskeeper.toml` is used by default. If this file does not specify configurations, the default values are used.
|
||||
|
||||
```shell
|
||||
$ taoskeeper -c <keeper config file>
|
||||
|
@ -153,6 +153,10 @@ database = "log"
|
|||
|
||||
# standard tables to monitor
|
||||
tables = ["normal_table"]
|
||||
|
||||
# database options for db storing metrics data
|
||||
[metrics.databaseoptions]
|
||||
cachemodel = "none"
|
||||
```
|
||||
|
||||
### Obtain Monitoring Metrics
|
||||
|
@ -203,7 +207,7 @@ taos_cluster_info_dnodes_total{cluster_id="5981392874047724755"} 1
|
|||
taos_cluster_info_first_ep{cluster_id="5981392874047724755",value="hlb:6030"} 1
|
||||
```
|
||||
|
||||
### check_health
|
||||
### check\_health
|
||||
|
||||
```
|
||||
$ curl -i http://127.0.0.1:6043/check_health
|
||||
|
|
|
@ -36,7 +36,6 @@ database_option: {
|
|||
| TSDB_PAGESIZE value
|
||||
| WAL_RETENTION_PERIOD value
|
||||
| WAL_RETENTION_SIZE value
|
||||
| WAL_ROLL_PERIOD value
|
||||
| WAL_SEGMENT_SIZE value
|
||||
}
|
||||
```
|
||||
|
|
|
@ -459,12 +459,17 @@ TO_JSON(str_literal)
|
|||
#### TO_UNIXTIMESTAMP
|
||||
|
||||
```sql
|
||||
TO_UNIXTIMESTAMP(expr)
|
||||
TO_UNIXTIMESTAMP(expr [, return_timestamp])
|
||||
|
||||
return_timestamp: {
|
||||
0
|
||||
| 1
|
||||
}
|
||||
```
|
||||
|
||||
**功能说明**:将日期时间格式的字符串转换成为 UNIX 时间戳。
|
||||
|
||||
**返回结果数据类型**:BIGINT。
|
||||
**返回结果数据类型**:BIGINT, TIMESTAMP。
|
||||
|
||||
**应用字段**:VARCHAR, NCHAR。
|
||||
|
||||
|
@ -476,6 +481,7 @@ TO_UNIXTIMESTAMP(expr)
|
|||
|
||||
- 输入的日期时间字符串须符合 ISO8601/RFC3339 标准,无法转换的字符串格式将返回 NULL。
|
||||
- 返回的时间戳精度与当前 DATABASE 设置的时间精度一致。
|
||||
- return_timestamp 指定函数返回值是否为时间戳类型,设置为1时返回 TIMESTAMP 类型,设置为0时返回 BIGINT 类型。如不指定缺省返回 BIGINT 类型。
|
||||
|
||||
|
||||
### 时间和日期函数
|
||||
|
|
|
@ -69,7 +69,7 @@ TDengine 3.0 版本开始提供一个内置数据库 `performance_schema`,其
|
|||
| 1 | consumer_id | BIGINT | 消费者的唯一 ID |
|
||||
| 2 | consumer_group | BINARY(192) | 消费者组 |
|
||||
| 3 | client_id | BINARY(192) | 用户自定义字符串,通过创建 consumer 时指定 client_id 来展示 |
|
||||
| 4 | status | BINARY(20) | 消费者当前状态 |
|
||||
| 4 | status | BINARY(20) | 消费者当前状态。消费者状态包括:ready(正常可用)、 lost(连接已丢失)、 rebalancing(消费者所属 vgroup 正在分配中)、unknown(未知状态)|
|
||||
| 5 | topics | BINARY(204) | 被订阅的 topic。若订阅多个 topic,则展示为多行 |
|
||||
| 6 | up_time | TIMESTAMP | 第一次连接 taosd 的时间 |
|
||||
| 7 | subscribe_time | TIMESTAMP | 上一次发起订阅的时间 |
|
||||
|
|
|
@ -111,7 +111,7 @@ Active: inactive (dead)
|
|||
|
||||
#### 配置文件启动
|
||||
|
||||
执行以下命令即可快速体验 taosKeeper。当不指定 taosKeeper 配置文件时,优先使用 `/etc/taos/keeper.toml` 配置,否则将使用默认配置。
|
||||
执行以下命令即可快速体验 taosKeeper。当不指定 taosKeeper 配置文件时,优先使用 `/etc/taos/taoskeeper.toml` 配置,否则将使用默认配置。
|
||||
|
||||
```shell
|
||||
$ taoskeeper -c <keeper config file>
|
||||
|
@ -156,6 +156,10 @@ database = "log"
|
|||
|
||||
# 指定需要监控的普通表
|
||||
tables = []
|
||||
|
||||
# database options for db storing metrics data
|
||||
[metrics.databaseoptions]
|
||||
cachemodel = "none"
|
||||
```
|
||||
|
||||
### 获取监控指标
|
||||
|
@ -206,7 +210,7 @@ taos_cluster_info_dnodes_total{cluster_id="5981392874047724755"} 1
|
|||
taos_cluster_info_first_ep{cluster_id="5981392874047724755",value="hlb:6030"} 1
|
||||
```
|
||||
|
||||
### check_health
|
||||
### check\_health
|
||||
|
||||
```
|
||||
$ curl -i http://127.0.0.1:6043/check_health
|
||||
|
|
|
@ -41,9 +41,9 @@ chmod +x TDinsight.sh
|
|||
|
||||
## log 库
|
||||
|
||||
TDinsight dashboard 数据来源于 log 库(存放监控数据的默认db,可以在 taoskeeper 配置文件中修改,具体参考 [taoskeeper 文档](..../reference/taosKeeper))。taoskeeper 启动后会自动创建 log 库,并将监控数据写入到该数据库中。
|
||||
TDinsight dashboard 数据来源于 log 库(存放监控数据的默认db,可以在 taoskeeper 配置文件中修改,具体参考 [taoskeeper 文档](/reference/taosKeeper))。taoskeeper 启动后会自动创建 log 库,并将监控数据写入到该数据库中。
|
||||
|
||||
### cluster info 表
|
||||
### cluster\_info 表
|
||||
|
||||
`cluster_info` 表记录集群信息。
|
||||
|
||||
|
@ -54,7 +54,7 @@ TDinsight dashboard 数据来源于 log 库(存放监控数据的默认db,
|
|||
|first\_ep\_dnode\_id|INT||集群 first ep 的 dnode id|
|
||||
|version|VARCHAR||tdengine version。例如:3.0.4.0|
|
||||
|master\_uptime|FLOAT||当前 master 节点的uptime。单位:天|
|
||||
|monitor_interval|INT||monitor interval。单位:秒|
|
||||
|monitor\_interval|INT||monitor interval。单位:秒|
|
||||
|dbs\_total|INT||database 总数|
|
||||
|tbs\_total|BIGINT||当前集群 table 总数|
|
||||
|stbs\_total|INT||当前集群 stable 总数|
|
||||
|
@ -72,7 +72,7 @@ TDinsight dashboard 数据来源于 log 库(存放监控数据的默认db,
|
|||
|protocol|INT||协议版本,目前为 1|
|
||||
|cluster\_id|NCHAR|TAG|cluster id|
|
||||
|
||||
### d info 表
|
||||
### d\_info 表
|
||||
|
||||
`d_info` 表记录 dnode 状态信息。
|
||||
|
||||
|
@ -83,7 +83,7 @@ TDinsight dashboard 数据来源于 log 库(存放监控数据的默认db,
|
|||
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|
||||
|cluster\_id|NCHAR|TAG|cluster id|
|
||||
|
||||
### m info 表
|
||||
### m\_info 表
|
||||
|
||||
`m_info` 表记录 mnode 角色信息。
|
||||
|
||||
|
@ -95,7 +95,7 @@ TDinsight dashboard 数据来源于 log 库(存放监控数据的默认db,
|
|||
|mnode\_ep|NCHAR|TAG|master node endpoint|
|
||||
|cluster\_id|NCHAR|TAG|cluster id|
|
||||
|
||||
### dnodes info 表
|
||||
### dnodes\_info 表
|
||||
|
||||
`dnodes_info` 记录 dnode 信息。
|
||||
|
||||
|
@ -107,17 +107,17 @@ TDinsight dashboard 数据来源于 log 库(存放监控数据的默认db,
|
|||
|cpu\_system|FLOAT||服务器 cpu 使用率,从 `/proc/stat` 读取|
|
||||
|cpu\_cores|FLOAT||服务器 cpu 核数|
|
||||
|mem\_engine|INT||taosd 内存使用率,从 `/proc/<taosd_pid>/status` 读取|
|
||||
|mem\_system|INT||服务器内存使用率|
|
||||
|mem\_system|INT||服务器可用内存|
|
||||
|mem\_total|INT||服务器内存总量,单位 KB|
|
||||
|disk\_engine|INT|||
|
||||
|disk\_used|BIGINT||data dir 挂载的磁盘使用量,单位 bytes|
|
||||
|disk\_total|BIGINT||data dir 挂载的磁盘总容量,单位 bytes|
|
||||
|net\_in|FLOAT||网络吞吐率,从 `/proc/net/dev` 中读取的 received bytes。单位 bytes per second|
|
||||
|net\_out|FLOAT||网络吞吐率,从 `/proc/net/dev` 中读取的 transmit bytes。单位 bytes per second|
|
||||
|io\_read|FLOAT||io 吞吐率,从 `/proc/<taosd_pid>/io` 中读取的 rchar 与上次数值计算之后,计算得到速度。单位 bytes per second|
|
||||
|io\_write|FLOAT||io 吞吐率,从 `/proc/<taosd_pid>/io` 中读取的 wchar 与上次数值计算之后,计算得到速度。单位 bytes per second|
|
||||
|io\_read\_disk|FLOAT||磁盘 io 吞吐率,从 `/proc/<taosd_pid>/io` 中读取的 read_bytes。单位 bytes per second|
|
||||
|io\_write\_disk|FLOAT||磁盘 io 吞吐率,从 `/proc/<taosd_pid>/io` 中读取的 write_bytes。单位 bytes per second|
|
||||
|net\_in|FLOAT||网络吞吐率,从 `/proc/net/dev` 中读取的 received bytes。单位 kb/s|
|
||||
|net\_out|FLOAT||网络吞吐率,从 `/proc/net/dev` 中读取的 transmit bytes。单位 kb/s|
|
||||
|io\_read|FLOAT||io 吞吐率,从 `/proc/<taosd_pid>/io` 中读取的 rchar 与上次数值计算之后,计算得到速度。单位 kb/s|
|
||||
|io\_write|FLOAT||io 吞吐率,从 `/proc/<taosd_pid>/io` 中读取的 wchar 与上次数值计算之后,计算得到速度。单位 kb/s|
|
||||
|io\_read\_disk|FLOAT||磁盘 io 吞吐率,从 `/proc/<taosd_pid>/io` 中读取的 read_bytes。单位 kb/s|
|
||||
|io\_write\_disk|FLOAT||磁盘 io 吞吐率,从 `/proc/<taosd_pid>/io` 中读取的 write_bytes。单位 kb/s|
|
||||
|req\_select|INT||两个间隔内发生的查询请求数目|
|
||||
|req\_select\_rate|FLOAT||两个间隔内的查询请求速度 = `req_select / monitorInterval`|
|
||||
|req\_insert|INT||两个间隔内发生的写入请求,包含的单条数据数目|
|
||||
|
@ -137,7 +137,7 @@ TDinsight dashboard 数据来源于 log 库(存放监控数据的默认db,
|
|||
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|
||||
|cluster\_id|NCHAR|TAG|cluster id|
|
||||
|
||||
### data dir 表
|
||||
### data\_dir 表
|
||||
|
||||
`data_dir` 表记录 data 目录信息。
|
||||
|
||||
|
@ -153,7 +153,7 @@ TDinsight dashboard 数据来源于 log 库(存放监控数据的默认db,
|
|||
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|
||||
|cluster\_id|NCHAR|TAG|cluster id|
|
||||
|
||||
### log dir 表
|
||||
### log\_dir 表
|
||||
|
||||
`log_dir` 表记录 log 目录信息。
|
||||
|
||||
|
@ -168,7 +168,7 @@ TDinsight dashboard 数据来源于 log 库(存放监控数据的默认db,
|
|||
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|
||||
|cluster\_id|NCHAR|TAG|cluster id|
|
||||
|
||||
### tmp dir 表
|
||||
### temp\_dir 表
|
||||
|
||||
`temp_dir` 表记录 temp 目录信息。
|
||||
|
||||
|
@ -183,7 +183,7 @@ TDinsight dashboard 数据来源于 log 库(存放监控数据的默认db,
|
|||
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|
||||
|cluster\_id|NCHAR|TAG|cluster id|
|
||||
|
||||
### vgroups info 表
|
||||
### vgroups\_info 表
|
||||
|
||||
`vgroups_info` 表记录虚拟节点组信息。
|
||||
|
||||
|
@ -198,7 +198,7 @@ TDinsight dashboard 数据来源于 log 库(存放监控数据的默认db,
|
|||
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|
||||
|cluster\_id|NCHAR|TAG|cluster id|
|
||||
|
||||
### vnodes role 表
|
||||
### vnodes\_role 表
|
||||
|
||||
`vnodes_role` 表记录虚拟节点角色信息。
|
||||
|
||||
|
@ -223,7 +223,7 @@ TDinsight dashboard 数据来源于 log 库(存放监控数据的默认db,
|
|||
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|
||||
|cluster\_id|NCHAR|TAG|cluster id|
|
||||
|
||||
### log summary 表
|
||||
### log\_summary 表
|
||||
|
||||
`log_summary` 记录日志统计信息。
|
||||
|
||||
|
@ -238,7 +238,7 @@ TDinsight dashboard 数据来源于 log 库(存放监控数据的默认db,
|
|||
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|
||||
|cluster\_id|NCHAR|TAG|cluster id|
|
||||
|
||||
### grants info 表
|
||||
### grants\_info 表
|
||||
|
||||
`grants_info` 记录授权信息。
|
||||
|
||||
|
@ -252,7 +252,7 @@ TDinsight dashboard 数据来源于 log 库(存放监控数据的默认db,
|
|||
|dnode\_ep|NCHAR|TAG|dnode endpoint|
|
||||
|cluster\_id|NCHAR|TAG|cluster id|
|
||||
|
||||
### keeper monitor 表
|
||||
### keeper\_monitor 表
|
||||
|
||||
`keeper_monitor` 记录 taoskeeper 监控数据。
|
||||
|
||||
|
@ -263,7 +263,7 @@ TDinsight dashboard 数据来源于 log 库(存放监控数据的默认db,
|
|||
|mem|FLOAT||内存使用率|
|
||||
|identify|NCHAR|TAG||
|
||||
|
||||
### taosadapter restful http request total 表
|
||||
### taosadapter\_restful\_http\_request\_total 表
|
||||
|
||||
`taosadapter_restful_http_request_total` 记录 taosadapter rest 请求信息,该表为 schemaless 方式创建的表,时间戳字段名为 `_ts`。
|
||||
|
||||
|
@ -277,7 +277,7 @@ TDinsight dashboard 数据来源于 log 库(存放监控数据的默认db,
|
|||
|request\_uri|NCHAR|TAG|request uri|
|
||||
|status\_code|NCHAR|TAG|status code|
|
||||
|
||||
### taosadapter restful http request fail 表
|
||||
### taosadapter\_restful\_http\_request\_fail 表
|
||||
|
||||
`taosadapter_restful_http_request_fail` 记录 taosadapter rest 请求失败信息,该表为 schemaless 方式创建的表,时间戳字段名为 `_ts`。
|
||||
|
||||
|
@ -291,7 +291,7 @@ TDinsight dashboard 数据来源于 log 库(存放监控数据的默认db,
|
|||
|request\_uri|NCHAR|TAG|request uri|
|
||||
|status\_code|NCHAR|TAG|status code|
|
||||
|
||||
### taosadapter restful http request in flight 表
|
||||
### taosadapter\_restful\_http\_request\_in\_flight 表
|
||||
|
||||
`taosadapter_restful_http_request_in_flight` 记录 taosadapter rest 实时请求信息,该表为 schemaless 方式创建的表,时间戳字段名为 `_ts`。
|
||||
|
||||
|
@ -301,7 +301,7 @@ TDinsight dashboard 数据来源于 log 库(存放监控数据的默认db,
|
|||
|guage|DOUBLE||监控指标值|
|
||||
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
||||
|
||||
### taosadapter restful http request summary milliseconds 表
|
||||
### taosadapter\_restful\_http\_request\_summary\_milliseconds 表
|
||||
|
||||
`taosadapter_restful_http_request_summary_milliseconds` 记录 taosadapter rest 请求汇总信息,该表为 schemaless 方式创建的表,时间戳字段名为 `_ts`。
|
||||
|
||||
|
@ -319,7 +319,7 @@ TDinsight dashboard 数据来源于 log 库(存放监控数据的默认db,
|
|||
|request\_method|NCHAR|TAG|request method|
|
||||
|request\_uri|NCHAR|TAG|request uri|
|
||||
|
||||
### taosadapter system mem percent 表
|
||||
### taosadapter\_system\_mem\_percent 表
|
||||
|
||||
`taosadapter_system_mem_percent` 表记录 taosadapter 内存使用情况,该表为 schemaless 方式创建的表,时间戳字段名为 `_ts`。
|
||||
|
||||
|
@ -329,11 +329,12 @@ TDinsight dashboard 数据来源于 log 库(存放监控数据的默认db,
|
|||
|guage|DOUBLE||监控指标值|
|
||||
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
||||
|
||||
### taosadapter system cpu percent 表
|
||||
### taosadapter\_system\_cpu\_percent 表
|
||||
|
||||
`taosadapter_system_cpu_percent` 表记录 taosadapter cpu 使用情况,该表为 schemaless 方式创建的表,时间戳字段名为 `_ts`。
|
||||
|
||||
|field|type|is\_tag|comment|
|
||||
|:----|:---|:-----|:------|
|
||||
|\_ts|TIMESTAMP||timestamp|
|
||||
|guage|DOUBLE||监控指标值|
|
||||
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
||||
|
||||
|
|
|
@ -112,6 +112,7 @@ extern int32_t tsQueryNodeChunkSize;
|
|||
extern bool tsQueryUseNodeAllocator;
|
||||
extern bool tsKeepColumnName;
|
||||
extern bool tsEnableQueryHb;
|
||||
extern bool tsEnableScience;
|
||||
extern int32_t tsRedirectPeriod;
|
||||
extern int32_t tsRedirectFactor;
|
||||
extern int32_t tsRedirectMaxPeriod;
|
||||
|
|
|
@ -77,7 +77,7 @@ static inline bool tmsgIsValid(tmsg_t type) {
|
|||
}
|
||||
static inline bool vnodeIsMsgBlock(tmsg_t type) {
|
||||
return (type == TDMT_VND_CREATE_TABLE) || (type == TDMT_VND_ALTER_TABLE) || (type == TDMT_VND_DROP_TABLE) ||
|
||||
(type == TDMT_VND_UPDATE_TAG_VAL) || (type == TDMT_VND_ALTER_CONFIRM);
|
||||
(type == TDMT_VND_UPDATE_TAG_VAL) || (type == TDMT_VND_ALTER_CONFIRM) || (type == TDMT_VND_COMMIT);
|
||||
}
|
||||
|
||||
static inline bool syncUtilUserCommit(tmsg_t msgType) {
|
||||
|
@ -1304,6 +1304,9 @@ typedef struct {
|
|||
int16_t hashSuffix;
|
||||
int32_t tsdbPageSize;
|
||||
int64_t reserved[8];
|
||||
int8_t learnerReplica;
|
||||
int8_t learnerSelfIndex;
|
||||
SReplica learnerReplicas[TSDB_MAX_LEARNER_REPLICA];
|
||||
} SCreateVnodeReq;
|
||||
|
||||
int32_t tSerializeSCreateVnodeReq(void* buf, int32_t bufLen, SCreateVnodeReq* pReq);
|
||||
|
@ -1375,7 +1378,10 @@ typedef struct {
|
|||
int8_t replica;
|
||||
SReplica replicas[TSDB_MAX_REPLICA];
|
||||
int64_t reserved[8];
|
||||
} SAlterVnodeReplicaReq;
|
||||
int8_t learnerSelfIndex;
|
||||
int8_t learnerReplica;
|
||||
SReplica learnerReplicas[TSDB_MAX_LEARNER_REPLICA];
|
||||
} SAlterVnodeReplicaReq, SAlterVnodeTypeReq;
|
||||
|
||||
int32_t tSerializeSAlterVnodeReplicaReq(void* buf, int32_t bufLen, SAlterVnodeReplicaReq* pReq);
|
||||
int32_t tDeserializeSAlterVnodeReplicaReq(void* buf, int32_t bufLen, SAlterVnodeReplicaReq* pReq);
|
||||
|
@ -1647,7 +1653,10 @@ int32_t tDeserializeSCreateDropMQSNodeReq(void* buf, int32_t bufLen, SMCreateQno
|
|||
typedef struct {
|
||||
int8_t replica;
|
||||
SReplica replicas[TSDB_MAX_REPLICA];
|
||||
} SDCreateMnodeReq, SDAlterMnodeReq;
|
||||
int8_t learnerReplica;
|
||||
SReplica learnerReplicas[TSDB_MAX_LEARNER_REPLICA];
|
||||
int64_t lastIndex;
|
||||
} SDCreateMnodeReq, SDAlterMnodeReq, SDAlterMnodeTypeReq;
|
||||
|
||||
int32_t tSerializeSDCreateMnodeReq(void* buf, int32_t bufLen, SDCreateMnodeReq* pReq);
|
||||
int32_t tDeserializeSDCreateMnodeReq(void* buf, int32_t bufLen, SDCreateMnodeReq* pReq);
|
||||
|
|
|
@ -83,6 +83,8 @@ enum {
|
|||
TD_DEF_MSG_TYPE(TDMT_DND_CONFIG_DNODE, "config-dnode", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_DND_SYSTABLE_RETRIEVE, "dnode-retrieve", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_DND_MAX_MSG, "dnd-max", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_DND_ALTER_MNODE_TYPE, "dnode-alter-mnode-type", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_DND_ALTER_VNODE_TYPE, "dnode-alter-vnode-type", NULL, NULL)
|
||||
|
||||
TD_NEW_MSG_SEG(TDMT_MND_MSG)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_CONNECT, "connect", NULL, NULL)
|
||||
|
|
|
@ -33,8 +33,11 @@ typedef struct {
|
|||
bool deploy;
|
||||
int8_t selfIndex;
|
||||
int8_t numOfReplicas;
|
||||
SReplica replicas[TSDB_MAX_REPLICA];
|
||||
int8_t numOfTotalReplicas;
|
||||
SReplica replicas[TSDB_MAX_REPLICA + TSDB_MAX_LEARNER_REPLICA];
|
||||
int32_t nodeRoles[TSDB_MAX_REPLICA + TSDB_MAX_LEARNER_REPLICA];
|
||||
SMsgCb msgCb;
|
||||
int64_t lastIndex;
|
||||
} SMnodeOpt;
|
||||
|
||||
/* ------------------------ SMnode ------------------------ */
|
||||
|
@ -69,6 +72,8 @@ int32_t mndStart(SMnode *pMnode);
|
|||
*/
|
||||
void mndStop(SMnode *pMnode);
|
||||
|
||||
int32_t mndIsCatchUp(SMnode *pMnode);
|
||||
|
||||
/**
|
||||
* @brief Get mnode monitor info.
|
||||
*
|
||||
|
|
|
@ -26,6 +26,7 @@ extern "C" {
|
|||
|
||||
typedef void* qTaskInfo_t;
|
||||
typedef void* DataSinkHandle;
|
||||
|
||||
struct SRpcMsg;
|
||||
struct SSubplan;
|
||||
|
||||
|
@ -91,6 +92,9 @@ void qSetTaskId(qTaskInfo_t tinfo, uint64_t taskId, uint64_t queryId);
|
|||
|
||||
int32_t qSetStreamOpOpen(qTaskInfo_t tinfo);
|
||||
|
||||
// todo refactor
|
||||
void qGetCheckpointVersion(qTaskInfo_t tinfo, int64_t* dataVer, int64_t* ckId);
|
||||
|
||||
/**
|
||||
* Set multiple input data blocks for the stream scan.
|
||||
* @param tinfo
|
||||
|
@ -119,7 +123,7 @@ int32_t qSetSMAInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numOfBlocks,
|
|||
* @param isAdd
|
||||
* @return
|
||||
*/
|
||||
int32_t qUpdateQualifiedTableId(qTaskInfo_t tinfo, const SArray* tableIdList, bool isAdd);
|
||||
int32_t qUpdateTableListForStreamScanner(qTaskInfo_t tinfo, const SArray* tableIdList, bool isAdd);
|
||||
|
||||
/**
|
||||
* Create the exec task object according to task json
|
||||
|
@ -163,6 +167,7 @@ void qCleanExecTaskBlockBuf(qTaskInfo_t tinfo);
|
|||
* @return
|
||||
*/
|
||||
int32_t qAsyncKillTask(qTaskInfo_t tinfo, int32_t rspCode);
|
||||
|
||||
int32_t qKillTask(qTaskInfo_t tinfo, int32_t rspCode);
|
||||
|
||||
bool qTaskIsExecuting(qTaskInfo_t qinfo);
|
||||
|
@ -182,21 +187,11 @@ int32_t qSerializeTaskStatus(qTaskInfo_t tinfo, char** pOutput, int32_t* len);
|
|||
int32_t qDeserializeTaskStatus(qTaskInfo_t tinfo, const char* pInput, int32_t len);
|
||||
|
||||
STimeWindow getAlignQueryTimeWindow(SInterval* pInterval, int32_t precision, int64_t key);
|
||||
/**
|
||||
* return the scan info, in the form of tuple of two items, including table uid and current timestamp
|
||||
* @param tinfo
|
||||
* @param uid
|
||||
* @param ts
|
||||
* @return
|
||||
*/
|
||||
int32_t qGetStreamScanStatus(qTaskInfo_t tinfo, uint64_t* uid, int64_t* ts);
|
||||
|
||||
int32_t qStreamPrepareTsdbScan(qTaskInfo_t tinfo, uint64_t uid, int64_t ts);
|
||||
SArray* qGetQueriedTableListInfo(qTaskInfo_t tinfo);
|
||||
|
||||
int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subType);
|
||||
|
||||
// int32_t qStreamScanMemData(qTaskInfo_t tinfo, const SSubmitReq* pReq, int64_t ver);
|
||||
//
|
||||
int32_t qStreamSetScanMemData(qTaskInfo_t tinfo, SPackedData submit);
|
||||
|
||||
void qStreamSetOpen(qTaskInfo_t tinfo);
|
||||
|
|
|
@ -189,7 +189,7 @@ bool fmIsScalarFunc(int32_t funcId);
|
|||
bool fmIsVectorFunc(int32_t funcId);
|
||||
bool fmIsIndefiniteRowsFunc(int32_t funcId);
|
||||
bool fmIsStringFunc(int32_t funcId);
|
||||
bool fmIsDatetimeFunc(int32_t funcId);
|
||||
bool fmIsDateTimeFunc(int32_t funcId);
|
||||
bool fmIsSelectFunc(int32_t funcId);
|
||||
bool fmIsTimelineFunc(int32_t funcId);
|
||||
bool fmIsTimeorderFunc(int32_t funcId);
|
||||
|
|
|
@ -165,6 +165,8 @@ static FORCE_INLINE int32_t udfColEnsureCapacity(SUdfColumn *pColumn, int32_t ne
|
|||
if (tmp == NULL) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
uint32_t extend = BitmapLen(allocCapacity) - BitmapLen(data->rowsAlloc);
|
||||
memset(tmp + BitmapLen(data->rowsAlloc), 0, extend);
|
||||
data->fixLenCol.nullBitmap = tmp;
|
||||
data->fixLenCol.nullBitmapLen = BitmapLen(allocCapacity);
|
||||
int32_t oldLen = BitmapLen(existedRows);
|
||||
|
|
|
@ -194,6 +194,7 @@ typedef struct SRequestConnInfo {
|
|||
|
||||
typedef void (*__freeFunc)(void* param);
|
||||
|
||||
// todo add creator/destroyer function
|
||||
typedef struct SMsgSendInfo {
|
||||
__async_send_cb_fn_t fp; // async callback function
|
||||
STargetInfo target; // for update epset
|
||||
|
|
|
@ -42,6 +42,7 @@ typedef struct STdbState {
|
|||
typedef struct {
|
||||
STdbState* pTdbState;
|
||||
int32_t number;
|
||||
int64_t checkPointId;
|
||||
} SStreamState;
|
||||
|
||||
SStreamState* streamStateOpen(char* path, struct SStreamTask* pTask, bool specPath, int32_t szPage, int32_t pages);
|
||||
|
|
|
@ -13,8 +13,8 @@
|
|||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "executor.h"
|
||||
#include "os.h"
|
||||
#include "executor.h"
|
||||
#include "query.h"
|
||||
#include "streamState.h"
|
||||
#include "tdatablock.h"
|
||||
|
@ -50,6 +50,7 @@ enum {
|
|||
TASK_STATUS__RECOVER_PREPARE,
|
||||
TASK_STATUS__RECOVER1,
|
||||
TASK_STATUS__RECOVER2,
|
||||
TASK_STATUS__RESTORE, // only available for source task to replay WAL from the checkpoint
|
||||
};
|
||||
|
||||
enum {
|
||||
|
@ -103,21 +104,8 @@ typedef struct {
|
|||
int8_t type;
|
||||
} SStreamQueueItem;
|
||||
|
||||
#if 0
|
||||
typedef struct {
|
||||
int8_t type;
|
||||
int64_t ver;
|
||||
int32_t* dataRef;
|
||||
SSubmitReq* data;
|
||||
} SStreamDataSubmit;
|
||||
|
||||
typedef struct {
|
||||
int8_t type;
|
||||
int64_t ver;
|
||||
SArray* dataRefs; // SArray<int32_t*>
|
||||
SArray* reqs; // SArray<SSubmitReq*>
|
||||
} SStreamMergedSubmit;
|
||||
#endif
|
||||
typedef void FTbSink(SStreamTask* pTask, void* vnode, int64_t ver, void* data);
|
||||
typedef int32_t FTaskExpand(void* ahandle, SStreamTask* pTask, int64_t ver);
|
||||
|
||||
typedef struct {
|
||||
int8_t type;
|
||||
|
@ -219,21 +207,20 @@ static FORCE_INLINE void streamQueueProcessFail(SStreamQueue* queue) {
|
|||
}
|
||||
|
||||
static FORCE_INLINE void* streamQueueCurItem(SStreamQueue* queue) {
|
||||
//
|
||||
return queue->qItem;
|
||||
}
|
||||
|
||||
void* streamQueueNextItem(SStreamQueue* queue);
|
||||
|
||||
SStreamDataSubmit2* streamDataSubmitNew(SPackedData submit);
|
||||
SStreamDataSubmit2* streamDataSubmitNew(SPackedData submit, int32_t type);
|
||||
void streamDataSubmitDestroy(SStreamDataSubmit2* pDataSubmit);
|
||||
|
||||
SStreamDataSubmit2* streamSubmitBlockClone(SStreamDataSubmit2* pSubmit);
|
||||
|
||||
typedef struct {
|
||||
char* qmsg;
|
||||
// followings are not applicable to encoder and decoder
|
||||
void* executor;
|
||||
char* qmsg;
|
||||
void* pExecutor; // not applicable to encoder and decoder
|
||||
struct SWalReader* pWalReader; // not applicable to encoder and decoder
|
||||
} STaskExec;
|
||||
|
||||
typedef struct {
|
||||
|
@ -248,16 +235,13 @@ typedef struct {
|
|||
SUseDbRsp dbInfo;
|
||||
} STaskDispatcherShuffle;
|
||||
|
||||
typedef void FTbSink(SStreamTask* pTask, void* vnode, int64_t ver, void* data);
|
||||
|
||||
typedef struct {
|
||||
int64_t stbUid;
|
||||
char stbFullName[TSDB_TABLE_FNAME_LEN];
|
||||
SSchemaWrapper* pSchemaWrapper;
|
||||
// not applicable to encoder and decoder
|
||||
void* vnode;
|
||||
FTbSink* tbSinkFunc;
|
||||
STSchema* pTSchema;
|
||||
void* vnode; // not available to encoder and decoder
|
||||
FTbSink* tbSinkFunc;
|
||||
STSchema* pTSchema;
|
||||
} STaskSinkTb;
|
||||
|
||||
typedef void FSmaSink(void* vnode, int64_t smaId, const SArray* data);
|
||||
|
@ -280,24 +264,34 @@ typedef struct {
|
|||
SEpSet epSet;
|
||||
} SStreamChildEpInfo;
|
||||
|
||||
struct SStreamTask {
|
||||
int64_t streamId;
|
||||
int32_t taskId;
|
||||
int32_t totalLevel;
|
||||
int8_t taskLevel;
|
||||
int8_t outputType;
|
||||
int16_t dispatchMsgType;
|
||||
typedef struct SStreamId {
|
||||
int64_t streamId;
|
||||
int32_t taskId;
|
||||
const char* idStr;
|
||||
} SStreamId;
|
||||
|
||||
typedef struct SCheckpointInfo {
|
||||
int64_t id;
|
||||
int64_t version; // offset in WAL
|
||||
} SCheckpointInfo;
|
||||
|
||||
typedef struct SStreamStatus {
|
||||
int8_t taskStatus;
|
||||
int8_t schedStatus;
|
||||
} SStreamStatus;
|
||||
|
||||
// node info
|
||||
int32_t selfChildId;
|
||||
int32_t nodeId;
|
||||
SEpSet epSet;
|
||||
|
||||
int64_t recoverSnapVer;
|
||||
int64_t startVer;
|
||||
struct SStreamTask {
|
||||
SStreamId id;
|
||||
int32_t totalLevel;
|
||||
int8_t taskLevel;
|
||||
int8_t outputType;
|
||||
int16_t dispatchMsgType;
|
||||
SStreamStatus status;
|
||||
int32_t selfChildId;
|
||||
int32_t nodeId;
|
||||
SEpSet epSet;
|
||||
SCheckpointInfo chkInfo;
|
||||
STaskExec exec;
|
||||
|
||||
// fill history
|
||||
int8_t fillHistory;
|
||||
|
@ -307,9 +301,6 @@ struct SStreamTask {
|
|||
int32_t nextCheckId;
|
||||
SArray* checkpointInfo; // SArray<SStreamCheckpointInfo>
|
||||
|
||||
// exec
|
||||
STaskExec exec;
|
||||
|
||||
// output
|
||||
union {
|
||||
STaskDispatcherFixedEp fixedEpDispatcher;
|
||||
|
@ -319,44 +310,54 @@ struct SStreamTask {
|
|||
STaskSinkFetch fetchSink;
|
||||
};
|
||||
|
||||
int8_t inputStatus;
|
||||
int8_t outputStatus;
|
||||
|
||||
// STaosQueue* inputQueue1;
|
||||
// STaosQall* inputQall;
|
||||
int8_t inputStatus;
|
||||
int8_t outputStatus;
|
||||
SStreamQueue* inputQueue;
|
||||
SStreamQueue* outputQueue;
|
||||
|
||||
// trigger
|
||||
int8_t triggerStatus;
|
||||
int64_t triggerParam;
|
||||
void* timer;
|
||||
int8_t triggerStatus;
|
||||
int64_t triggerParam;
|
||||
void* timer;
|
||||
SMsgCb* pMsgCb; // msg handle
|
||||
SStreamState* pState; // state backend
|
||||
|
||||
// msg handle
|
||||
SMsgCb* pMsgCb;
|
||||
|
||||
// state backend
|
||||
SStreamState* pState;
|
||||
|
||||
// do not serialize
|
||||
int32_t recoverTryingDownstream;
|
||||
int32_t recoverWaitingUpstream;
|
||||
int64_t checkReqId;
|
||||
SArray* checkReqIds; // shuffle
|
||||
int32_t refCnt;
|
||||
|
||||
int64_t checkpointingId;
|
||||
int32_t checkpointAlignCnt;
|
||||
// the followings attributes don't be serialized
|
||||
int32_t recoverTryingDownstream;
|
||||
int32_t recoverWaitingUpstream;
|
||||
int64_t checkReqId;
|
||||
SArray* checkReqIds; // shuffle
|
||||
int32_t refCnt;
|
||||
int64_t checkpointingId;
|
||||
int32_t checkpointAlignCnt;
|
||||
struct SStreamMeta* pMeta;
|
||||
};
|
||||
|
||||
// meta
|
||||
typedef struct SStreamMeta {
|
||||
char* path;
|
||||
TDB* db;
|
||||
TTB* pTaskDb;
|
||||
TTB* pCheckpointDb;
|
||||
SHashObj* pTasks;
|
||||
void* ahandle;
|
||||
TXN* txn;
|
||||
FTaskExpand* expandFunc;
|
||||
int32_t vgId;
|
||||
SRWLatch lock;
|
||||
int8_t walScan;
|
||||
bool quit;
|
||||
} SStreamMeta;
|
||||
|
||||
int32_t tEncodeStreamEpInfo(SEncoder* pEncoder, const SStreamChildEpInfo* pInfo);
|
||||
int32_t tDecodeStreamEpInfo(SDecoder* pDecoder, SStreamChildEpInfo* pInfo);
|
||||
|
||||
SStreamTask* tNewSStreamTask(int64_t streamId);
|
||||
int32_t tEncodeSStreamTask(SEncoder* pEncoder, const SStreamTask* pTask);
|
||||
int32_t tDecodeSStreamTask(SDecoder* pDecoder, SStreamTask* pTask);
|
||||
void tFreeSStreamTask(SStreamTask* pTask);
|
||||
int32_t tAppendDataForStream(SStreamTask* pTask, SStreamQueueItem* pItem);
|
||||
SStreamTask* tNewStreamTask(int64_t streamId);
|
||||
int32_t tEncodeStreamTask(SEncoder* pEncoder, const SStreamTask* pTask);
|
||||
int32_t tDecodeStreamTask(SDecoder* pDecoder, SStreamTask* pTask);
|
||||
void tFreeStreamTask(SStreamTask* pTask);
|
||||
int32_t tAppendDataToInputQueue(SStreamTask* pTask, SStreamQueueItem* pItem);
|
||||
bool tInputQueueIsFull(const SStreamTask* pTask);
|
||||
|
||||
static FORCE_INLINE void streamTaskInputFail(SStreamTask* pTask) {
|
||||
atomic_store_8(&pTask->inputStatus, TASK_INPUT_STATUS__FAILED);
|
||||
|
@ -564,40 +565,22 @@ int32_t streamAggRecoverPrepare(SStreamTask* pTask);
|
|||
// int32_t streamAggChildrenRecoverFinish(SStreamTask* pTask);
|
||||
int32_t streamProcessRecoverFinishReq(SStreamTask* pTask, int32_t childId);
|
||||
|
||||
// expand and deploy
|
||||
typedef int32_t FTaskExpand(void* ahandle, SStreamTask* pTask, int64_t ver);
|
||||
|
||||
// meta
|
||||
typedef struct SStreamMeta {
|
||||
char* path;
|
||||
TDB* db;
|
||||
TTB* pTaskDb;
|
||||
TTB* pCheckpointDb;
|
||||
SHashObj* pTasks;
|
||||
SHashObj* pRecoverStatus;
|
||||
void* ahandle;
|
||||
TXN* txn;
|
||||
FTaskExpand* expandFunc;
|
||||
int32_t vgId;
|
||||
SRWLatch lock;
|
||||
} SStreamMeta;
|
||||
|
||||
SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandFunc, int32_t vgId);
|
||||
void streamMetaClose(SStreamMeta* streamMeta);
|
||||
|
||||
int32_t streamMetaSaveTask(SStreamMeta* pMeta, SStreamTask* pTask);
|
||||
int32_t streamMetaAddTask(SStreamMeta* pMeta, int64_t ver, SStreamTask* pTask);
|
||||
int32_t streamMetaAddSerializedTask(SStreamMeta* pMeta, int64_t startVer, char* msg, int32_t msgLen);
|
||||
// SStreamTask* streamMetaGetTask(SStreamMeta* pMeta, int32_t taskId);
|
||||
int32_t streamMetaAddDeployedTask(SStreamMeta* pMeta, int64_t ver, SStreamTask* pTask);
|
||||
int32_t streamMetaAddSerializedTask(SStreamMeta* pMeta, int64_t checkpointVer, char* msg, int32_t msgLen);
|
||||
int32_t streamMetaGetNumOfTasks(const SStreamMeta* pMeta);
|
||||
|
||||
SStreamTask* streamMetaAcquireTask(SStreamMeta* pMeta, int32_t taskId);
|
||||
void streamMetaReleaseTask(SStreamMeta* pMeta, SStreamTask* pTask);
|
||||
void streamMetaRemoveTask(SStreamMeta* pMeta, int32_t taskId);
|
||||
|
||||
int32_t streamMetaBegin(SStreamMeta* pMeta);
|
||||
int32_t streamMetaCommit(SStreamMeta* pMeta);
|
||||
int32_t streamMetaRollBack(SStreamMeta* pMeta);
|
||||
int32_t streamLoadTasks(SStreamMeta* pMeta, int64_t ver);
|
||||
int32_t streamMetaBegin(SStreamMeta* pMeta);
|
||||
int32_t streamMetaCommit(SStreamMeta* pMeta);
|
||||
int32_t streamMetaRollBack(SStreamMeta* pMeta);
|
||||
int32_t streamLoadTasks(SStreamMeta* pMeta, int64_t ver);
|
||||
|
||||
// checkpoint
|
||||
int32_t streamProcessCheckpointSourceReq(SStreamMeta* pMeta, SStreamTask* pTask, SStreamCheckpointSourceReq* pReq);
|
||||
|
|
|
@ -55,6 +55,8 @@ extern "C" {
|
|||
#define SYNC_INDEX_INVALID -1
|
||||
#define SYNC_TERM_INVALID -1
|
||||
|
||||
#define SYNC_LEARNER_CATCHUP 10
|
||||
|
||||
typedef enum {
|
||||
SYNC_STRATEGY_NO_SNAPSHOT = 0,
|
||||
SYNC_STRATEGY_STANDARD_SNAPSHOT = 1,
|
||||
|
@ -76,19 +78,29 @@ typedef enum {
|
|||
TAOS_SYNC_STATE_CANDIDATE = 101,
|
||||
TAOS_SYNC_STATE_LEADER = 102,
|
||||
TAOS_SYNC_STATE_ERROR = 103,
|
||||
TAOS_SYNC_STATE_LEARNER = 104,
|
||||
} ESyncState;
|
||||
|
||||
typedef enum {
|
||||
TAOS_SYNC_ROLE_VOTER = 0,
|
||||
TAOS_SYNC_ROLE_LEARNER = 1,
|
||||
TAOS_SYNC_ROLE_ERROR = 2,
|
||||
} ESyncRole;
|
||||
|
||||
typedef struct SNodeInfo {
|
||||
int64_t clusterId;
|
||||
int32_t nodeId;
|
||||
uint16_t nodePort;
|
||||
char nodeFqdn[TSDB_FQDN_LEN];
|
||||
int64_t clusterId;
|
||||
int32_t nodeId;
|
||||
uint16_t nodePort;
|
||||
char nodeFqdn[TSDB_FQDN_LEN];
|
||||
ESyncRole nodeRole;
|
||||
} SNodeInfo;
|
||||
|
||||
typedef struct SSyncCfg {
|
||||
int32_t totalReplicaNum;
|
||||
int32_t replicaNum;
|
||||
int32_t myIndex;
|
||||
SNodeInfo nodeInfo[TSDB_MAX_REPLICA];
|
||||
SNodeInfo nodeInfo[TSDB_MAX_REPLICA + TSDB_MAX_LEARNER_REPLICA];
|
||||
SyncIndex lastIndex;
|
||||
} SSyncCfg;
|
||||
|
||||
typedef struct SFsmCbMeta {
|
||||
|
@ -155,6 +167,7 @@ typedef struct SSyncFSM {
|
|||
|
||||
void (*FpBecomeLeaderCb)(const struct SSyncFSM* pFsm);
|
||||
void (*FpBecomeFollowerCb)(const struct SSyncFSM* pFsm);
|
||||
void (*FpBecomeLearnerCb)(const struct SSyncFSM* pFsm);
|
||||
|
||||
int32_t (*FpGetSnapshot)(const struct SSyncFSM* pFsm, SSnapshot* pSnapshot, void* pReaderParam, void** ppReader);
|
||||
void (*FpGetSnapshotInfo)(const struct SSyncFSM* pFsm, SSnapshot* pSnapshot);
|
||||
|
@ -236,6 +249,7 @@ void syncStop(int64_t rid);
|
|||
void syncPreStop(int64_t rid);
|
||||
void syncPostStop(int64_t rid);
|
||||
int32_t syncPropose(int64_t rid, SRpcMsg* pMsg, bool isWeak, int64_t* seq);
|
||||
int32_t syncIsCatchUp(int64_t rid);
|
||||
int32_t syncProcessMsg(int64_t rid, SRpcMsg* pMsg);
|
||||
int32_t syncReconfig(int64_t rid, SSyncCfg* pCfg);
|
||||
int32_t syncBeginSnapshot(int64_t rid, int64_t lastApplyIndex);
|
||||
|
|
|
@ -138,7 +138,8 @@ typedef struct {
|
|||
int8_t enableRef;
|
||||
} SWalFilterCond;
|
||||
|
||||
typedef struct {
|
||||
// todo hide this struct
|
||||
typedef struct SWalReader {
|
||||
SWal *pWal;
|
||||
int64_t readerId;
|
||||
TdFilePtr pLogFile;
|
||||
|
@ -196,6 +197,7 @@ void walReadReset(SWalReader *pReader);
|
|||
int32_t walReadVer(SWalReader *pRead, int64_t ver);
|
||||
int32_t walReadSeekVer(SWalReader *pRead, int64_t ver);
|
||||
int32_t walNextValidMsg(SWalReader *pRead);
|
||||
int64_t walReaderGetCurrentVer(const SWalReader* pReader);
|
||||
|
||||
// only for tq usage
|
||||
void walSetReaderCapacity(SWalReader *pRead, int32_t capacity);
|
||||
|
|
|
@ -547,6 +547,7 @@ int32_t* taosGetErrno();
|
|||
#define TSDB_CODE_SYN_INVALID_SNAPSHOT_MSG TAOS_DEF_ERROR_CODE(0, 0x0915) // internal
|
||||
#define TSDB_CODE_SYN_BUFFER_FULL TAOS_DEF_ERROR_CODE(0, 0x0916)
|
||||
#define TSDB_CODE_SYN_WRITE_STALL TAOS_DEF_ERROR_CODE(0, 0x0917)
|
||||
#define TSDB_CODE_SYN_NEGO_WIN_EXCEEDED TAOS_DEF_ERROR_CODE(0, 0X0918)
|
||||
#define TSDB_CODE_SYN_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x09FF)
|
||||
|
||||
// tq
|
||||
|
|
|
@ -285,8 +285,11 @@ typedef enum ELogicConditionType {
|
|||
#define TSDB_DNODE_ROLE_VNODE 2
|
||||
|
||||
#define TSDB_MAX_REPLICA 5
|
||||
#define TSDB_MAX_LEARNER_REPLICA 10
|
||||
#define TSDB_SYNC_LOG_BUFFER_SIZE 4096
|
||||
#define TSDB_SYNC_LOG_BUFFER_RETENTION (TSDB_SYNC_LOG_BUFFER_SIZE >> 4)
|
||||
#define TSDB_SYNC_LOG_BUFFER_RETENTION 256
|
||||
#define TSDB_SYNC_APPLYQ_SIZE_LIMIT 512
|
||||
#define TSDB_SYNC_NEGOTIATION_WIN 512
|
||||
|
||||
#define TSDB_TBNAME_COLUMN_INDEX (-1)
|
||||
#define TSDB_MULTI_TABLEMETA_MAX_NUM 100000 // maximum batch size allowed to load table meta
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
#!/usr/bin/expect
|
||||
set packageName [lindex $argv 0]
|
||||
set packageSuffix [lindex $argv 1]
|
||||
set timeout 3
|
||||
set timeout 30
|
||||
if { ${packageSuffix} == "deb" } {
|
||||
spawn dpkg -i ${packageName}
|
||||
} elseif { ${packageSuffix} == "rpm"} {
|
||||
|
|
|
@ -246,7 +246,7 @@ if [ ! -f debRpmAutoInstall.sh ];then
|
|||
echo '#!/usr/bin/expect ' > debRpmAutoInstall.sh
|
||||
echo 'set packageName [lindex $argv 0]' >> debRpmAutoInstall.sh
|
||||
echo 'set packageSuffix [lindex $argv 1]' >> debRpmAutoInstall.sh
|
||||
echo 'set timeout 3 ' >> debRpmAutoInstall.sh
|
||||
echo 'set timeout 30 ' >> debRpmAutoInstall.sh
|
||||
echo 'if { ${packageSuffix} == "deb" } {' >> debRpmAutoInstall.sh
|
||||
echo ' spawn dpkg -i ${packageName} ' >> debRpmAutoInstall.sh
|
||||
echo '} elseif { ${packageSuffix} == "rpm"} {' >> debRpmAutoInstall.sh
|
||||
|
|
|
@ -197,7 +197,8 @@ if [[ $productName == "TDengine" ]]; then
|
|||
mkdir -p ${install_dir}/connector
|
||||
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
[ -f ${build_dir}/lib/*.jar ] && cp ${build_dir}/lib/*.jar ${install_dir}/connector || :
|
||||
jars=$(ls ${build_dir}/lib/*.jar 2>/dev/null|wc -l)
|
||||
[ "${jars}" != "0" ] && cp ${build_dir}/lib/*.jar ${install_dir}/connector || :
|
||||
fi
|
||||
git clone --depth 1 https://github.com/taosdata/driver-go ${install_dir}/connector/go
|
||||
rm -rf ${install_dir}/connector/go/.git ||:
|
||||
|
|
|
@ -338,7 +338,20 @@ if [ "$verMode" == "cluster" ] || [ "$verMode" == "cloud" ]; then
|
|||
connector_dir="${code_dir}/connector"
|
||||
mkdir -p ${install_dir}/connector
|
||||
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
|
||||
[ -f ${build_dir}/lib/*.jar ] && cp ${build_dir}/lib/*.jar ${install_dir}/connector || :
|
||||
tmp_pwd=`pwd`
|
||||
cd ${install_dir}/connector
|
||||
if [ ! -d taos-connector-jdbc ];then
|
||||
git clone -b main --depth=1 https://github.com/taosdata/taos-connector-jdbc.git ||:
|
||||
fi
|
||||
cd taos-connector-jdbc
|
||||
mvn clean package -Dmaven.test.skip=true
|
||||
echo ${build_dir}/lib/
|
||||
cp target/*.jar ${build_dir}/lib/
|
||||
cd ${install_dir}/connector
|
||||
rm -rf taos-connector-jdbc
|
||||
cd ${tmp_pwd}
|
||||
jars=$(ls ${build_dir}/lib/*.jar 2>/dev/null|wc -l)
|
||||
[ "${jars}" != "0" ] && cp ${build_dir}/lib/*.jar ${install_dir}/connector || :
|
||||
git clone --depth 1 https://github.com/taosdata/driver-go ${install_dir}/connector/go
|
||||
rm -rf ${install_dir}/connector/go/.git ||:
|
||||
|
||||
|
|
|
@ -36,14 +36,6 @@ extern "C" {
|
|||
|
||||
#include "tconfig.h"
|
||||
|
||||
#define CHECK_CODE_GOTO(expr, label) \
|
||||
do { \
|
||||
code = expr; \
|
||||
if (TSDB_CODE_SUCCESS != code) { \
|
||||
goto label; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define ERROR_MSG_BUF_DEFAULT_SIZE 512
|
||||
#define HEARTBEAT_INTERVAL 1500 // ms
|
||||
|
||||
|
@ -293,28 +285,7 @@ static FORCE_INLINE SReqResultInfo* tmqGetCurResInfo(TAOS_RES* res) {
|
|||
return (SReqResultInfo*)&msg->resInfo;
|
||||
}
|
||||
|
||||
static FORCE_INLINE SReqResultInfo* tmqGetNextResInfo(TAOS_RES* res, bool convertUcs4) {
|
||||
SMqRspObj* pRspObj = (SMqRspObj*)res;
|
||||
pRspObj->resIter++;
|
||||
|
||||
if (pRspObj->resIter < pRspObj->rsp.blockNum) {
|
||||
SRetrieveTableRsp* pRetrieve = (SRetrieveTableRsp*)taosArrayGetP(pRspObj->rsp.blockData, pRspObj->resIter);
|
||||
if (pRspObj->rsp.withSchema) {
|
||||
SSchemaWrapper* pSW = (SSchemaWrapper*)taosArrayGetP(pRspObj->rsp.blockSchema, pRspObj->resIter);
|
||||
setResSchemaInfo(&pRspObj->resInfo, pSW->pSchema, pSW->nCols);
|
||||
taosMemoryFreeClear(pRspObj->resInfo.row);
|
||||
taosMemoryFreeClear(pRspObj->resInfo.pCol);
|
||||
taosMemoryFreeClear(pRspObj->resInfo.length);
|
||||
taosMemoryFreeClear(pRspObj->resInfo.convertBuf);
|
||||
taosMemoryFreeClear(pRspObj->resInfo.convertJson);
|
||||
}
|
||||
|
||||
setQueryResultFromRsp(&pRspObj->resInfo, pRetrieve, convertUcs4, false);
|
||||
return &pRspObj->resInfo;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
SReqResultInfo* tmqGetNextResInfo(TAOS_RES* res, bool convertUcs4);
|
||||
|
||||
static FORCE_INLINE SReqResultInfo* tscGetCurResInfo(TAOS_RES* res) {
|
||||
if (TD_RES_QUERY(res)) return &(((SRequestObj*)res)->body.resInfo);
|
||||
|
@ -327,7 +298,6 @@ extern int32_t clientConnRefPool;
|
|||
extern int32_t timestampDeltaLimit;
|
||||
extern int64_t lastClusterId;
|
||||
|
||||
|
||||
__async_send_cb_fn_t getMsgRspHandle(int32_t msgType);
|
||||
|
||||
SMsgSendInfo* buildMsgInfoImpl(SRequestObj* pReqObj);
|
||||
|
@ -380,7 +350,6 @@ void taos_close_internal(void* taos);
|
|||
// global, called by mgmt
|
||||
int hbMgrInit();
|
||||
void hbMgrCleanUp();
|
||||
int hbHandleRsp(SClientHbBatchRsp* hbRsp);
|
||||
|
||||
// cluster level
|
||||
SAppHbMgr* appHbMgrInit(SAppInstInfo* pAppInstInfo, char* key);
|
||||
|
@ -393,9 +362,6 @@ void stopAllRequests(SHashObj* pRequests);
|
|||
int hbRegisterConn(SAppHbMgr* pAppHbMgr, int64_t tscRefId, int64_t clusterId, int8_t connType);
|
||||
void hbDeregisterConn(SAppHbMgr* pAppHbMgr, SClientHbKey connKey);
|
||||
|
||||
// --- mq
|
||||
void hbMgrInitMqHbRspHandle();
|
||||
|
||||
typedef struct SSqlCallbackWrapper {
|
||||
SParseContext* pParseCtx;
|
||||
SCatalogReq* pCatalogReq;
|
||||
|
|
|
@ -1039,8 +1039,7 @@ static int32_t asyncExecSchQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaDat
|
|||
.sysInfo = pRequest->pTscObj->sysInfo,
|
||||
.allocatorId = pRequest->allocatorRefId};
|
||||
|
||||
SAppInstInfo* pAppInfo = getAppInfo(pRequest);
|
||||
SQueryPlan* pDag = NULL;
|
||||
SQueryPlan* pDag = NULL;
|
||||
|
||||
int64_t st = taosGetTimestampUs();
|
||||
int32_t code = qCreateQueryPlan(&cxt, &pDag, pMnodeList);
|
||||
|
@ -1052,7 +1051,6 @@ static int32_t asyncExecSchQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaDat
|
|||
}
|
||||
|
||||
pRequest->metric.execStart = taosGetTimestampUs();
|
||||
|
||||
pRequest->metric.planCostUs = pRequest->metric.execStart - st;
|
||||
|
||||
if (TSDB_CODE_SUCCESS == code && !pRequest->validateOnly) {
|
||||
|
|
|
@ -1259,8 +1259,9 @@ JNIEXPORT jobject JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_schemalessInse
|
|||
int code = taos_errno(tres);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
jniError("jobj:%p, conn:%p, code: 0x%x, msg:%s", jobj, taos, code, taos_errstr(tres));
|
||||
jobject jobject = createSchemalessResp(env, 0, code, taos_errstr(tres));
|
||||
taos_free_result(tres);
|
||||
return createSchemalessResp(env, 0, code, taos_errstr(tres));
|
||||
return jobject;
|
||||
}
|
||||
taos_free_result(tres);
|
||||
|
||||
|
@ -1286,8 +1287,9 @@ JNIEXPORT jobject JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_schemalessInse
|
|||
int code = taos_errno(tres);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
jniError("jobj:%p, conn:%p, code: 0x%x, msg:%s", jobj, taos, code, taos_errstr(tres));
|
||||
jobject jobject = createSchemalessResp(env, 0, code, taos_errstr(tres));
|
||||
taos_free_result(tres);
|
||||
return createSchemalessResp(env, 0, code, taos_errstr(tres));
|
||||
return jobject;
|
||||
}
|
||||
taos_free_result(tres);
|
||||
|
||||
|
@ -1315,8 +1317,9 @@ JNIEXPORT jobject JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_schemalessInse
|
|||
int code = taos_errno(tres);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
jniError("jobj:%p, conn:%p, code: 0x%x, msg:%s", jobj, taos, code, taos_errstr(tres));
|
||||
jobject jobject = createSchemalessResp(env, 0, code, taos_errstr(tres));
|
||||
taos_free_result(tres);
|
||||
return createSchemalessResp(env, 0, code, taos_errstr(tres));
|
||||
return jobject;
|
||||
}
|
||||
taos_free_result(tres);
|
||||
|
||||
|
@ -1343,8 +1346,9 @@ JNIEXPORT jobject JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_schemalessInse
|
|||
int code = taos_errno(tres);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
jniError("jobj:%p, conn:%p, code: 0x%x, msg:%s", jobj, taos, code, taos_errstr(tres));
|
||||
jobject jobject = createSchemalessResp(env, 0, code, taos_errstr(tres));
|
||||
taos_free_result(tres);
|
||||
return createSchemalessResp(env, 0, code, taos_errstr(tres));
|
||||
return jobject;
|
||||
}
|
||||
taos_free_result(tres);
|
||||
|
||||
|
|
|
@ -210,6 +210,11 @@ typedef struct {
|
|||
tmq_t* pTmq;
|
||||
} SMqCommitCbParam;
|
||||
|
||||
typedef struct SSyncCommitInfo {
|
||||
tsem_t sem;
|
||||
int32_t code;
|
||||
} SSyncCommitInfo;
|
||||
|
||||
static int32_t doAskEp(tmq_t* tmq);
|
||||
static int32_t makeTopicVgroupKey(char* dst, const char* topicName, int32_t vg);
|
||||
static int32_t tmqCommitDone(SMqCommitCbParamSet* pParamSet);
|
||||
|
@ -521,11 +526,7 @@ static int32_t doSendCommitMsg(tmq_t* tmq, SMqClientVg* pVg, const char* pTopicN
|
|||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
pMsgSendInfo->msgInfo = (SDataBuf){
|
||||
.pData = buf,
|
||||
.len = sizeof(SMsgHead) + len,
|
||||
.handle = NULL,
|
||||
};
|
||||
pMsgSendInfo->msgInfo = (SDataBuf) { .pData = buf, .len = sizeof(SMsgHead) + len, .handle = NULL };
|
||||
|
||||
pMsgSendInfo->requestId = generateRequestId();
|
||||
pMsgSendInfo->requestObjRefId = 0;
|
||||
|
@ -786,11 +787,7 @@ void tmqSendHbReq(void* param, void* tmrId) {
|
|||
goto OVER;
|
||||
}
|
||||
|
||||
sendInfo->msgInfo = (SDataBuf){
|
||||
.pData = pReq,
|
||||
.len = tlen,
|
||||
.handle = NULL,
|
||||
};
|
||||
sendInfo->msgInfo = (SDataBuf){ .pData = pReq, .len = tlen, .handle = NULL };
|
||||
|
||||
sendInfo->requestId = generateRequestId();
|
||||
sendInfo->requestObjRefId = 0;
|
||||
|
@ -2126,13 +2123,8 @@ void tmq_commit_async(tmq_t* tmq, const TAOS_RES* pRes, tmq_commit_cb* cb, void*
|
|||
}
|
||||
}
|
||||
|
||||
typedef struct SSyncCommitInfo {
|
||||
tsem_t sem;
|
||||
int32_t code;
|
||||
} SSyncCommitInfo;
|
||||
|
||||
static void commitCallBackFn(tmq_t* pTmq, int32_t code, void* param) {
|
||||
SSyncCommitInfo* pInfo = (SSyncCommitInfo*)param;
|
||||
static void commitCallBackFn(tmq_t *pTmq, int32_t code, void* param) {
|
||||
SSyncCommitInfo* pInfo = (SSyncCommitInfo*) param;
|
||||
pInfo->code = code;
|
||||
tsem_post(&pInfo->sem);
|
||||
}
|
||||
|
@ -2309,3 +2301,26 @@ void commitRspCountDown(SMqCommitCbParamSet* pParamSet, int64_t consumerId, cons
|
|||
waitingRspNum);
|
||||
}
|
||||
}
|
||||
|
||||
SReqResultInfo* tmqGetNextResInfo(TAOS_RES* res, bool convertUcs4) {
|
||||
SMqRspObj* pRspObj = (SMqRspObj*)res;
|
||||
pRspObj->resIter++;
|
||||
|
||||
if (pRspObj->resIter < pRspObj->rsp.blockNum) {
|
||||
SRetrieveTableRsp* pRetrieve = (SRetrieveTableRsp*)taosArrayGetP(pRspObj->rsp.blockData, pRspObj->resIter);
|
||||
if (pRspObj->rsp.withSchema) {
|
||||
SSchemaWrapper* pSW = (SSchemaWrapper*)taosArrayGetP(pRspObj->rsp.blockSchema, pRspObj->resIter);
|
||||
setResSchemaInfo(&pRspObj->resInfo, pSW->pSchema, pSW->nCols);
|
||||
taosMemoryFreeClear(pRspObj->resInfo.row);
|
||||
taosMemoryFreeClear(pRspObj->resInfo.pCol);
|
||||
taosMemoryFreeClear(pRspObj->resInfo.length);
|
||||
taosMemoryFreeClear(pRspObj->resInfo.convertBuf);
|
||||
taosMemoryFreeClear(pRspObj->resInfo.convertJson);
|
||||
}
|
||||
|
||||
setQueryResultFromRsp(&pRspObj->resInfo, pRetrieve, convertUcs4, false);
|
||||
return &pRspObj->resInfo;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
|
@ -221,6 +221,7 @@ static void doBitmapMerge(SColumnInfoData* pColumnInfoData, int32_t numOfRow1, c
|
|||
}
|
||||
|
||||
uint8_t* p = (uint8_t*)pSource->nullbitmap;
|
||||
pColumnInfoData->nullbitmap[BitmapLen(numOfRow1) - 1] &= (0B11111111 << shiftBits); // clear remind bits
|
||||
pColumnInfoData->nullbitmap[BitmapLen(numOfRow1) - 1] |= (p[0] >> remindBits); // copy remind bits
|
||||
|
||||
if (BitmapLen(numOfRow1) == BitmapLen(total)) {
|
||||
|
@ -232,6 +233,7 @@ static void doBitmapMerge(SColumnInfoData* pColumnInfoData, int32_t numOfRow1, c
|
|||
|
||||
uint8_t* start = (uint8_t*)&pColumnInfoData->nullbitmap[BitmapLen(numOfRow1)];
|
||||
int32_t overCount = BitmapLen(total) - BitmapLen(numOfRow1);
|
||||
memset(start, 0, overCount);
|
||||
while (i < len) { // size limit of pSource->nullbitmap
|
||||
if (i >= 1) {
|
||||
start[i - 1] |= (p[i] >> remindBits); // copy remind bits
|
||||
|
@ -309,9 +311,11 @@ int32_t colDataMergeCol(SColumnInfoData* pColumnInfoData, int32_t numOfRow1, int
|
|||
pColumnInfoData->pData = tmp;
|
||||
if (BitmapLen(numOfRow1) < BitmapLen(finalNumOfRows)) {
|
||||
char* btmp = taosMemoryRealloc(pColumnInfoData->nullbitmap, BitmapLen(finalNumOfRows));
|
||||
if (btmp == NULL) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
uint32_t extend = BitmapLen(finalNumOfRows) - BitmapLen(numOfRow1);
|
||||
memset(btmp + BitmapLen(numOfRow1), 0, extend);
|
||||
|
||||
pColumnInfoData->nullbitmap = btmp;
|
||||
}
|
||||
|
||||
|
|
|
@ -105,6 +105,7 @@ int32_t tsQueryPolicy = 1;
|
|||
int32_t tsQueryRspPolicy = 0;
|
||||
int64_t tsQueryMaxConcurrentTables = 200; // unit is TSDB_TABLE_NUM_UNIT
|
||||
bool tsEnableQueryHb = false;
|
||||
bool tsEnableScience = false; // on taos-cli show float and doulbe with scientific notation if true
|
||||
int32_t tsQuerySmaOptimize = 0;
|
||||
int32_t tsQueryRsmaTolerance = 1000; // the tolerance time (ms) to judge from which level to query rsma data.
|
||||
bool tsQueryPlannerTrace = false;
|
||||
|
@ -117,6 +118,7 @@ int32_t tsRedirectMaxPeriod = 1000;
|
|||
int32_t tsMaxRetryWaitTime = 10000;
|
||||
bool tsUseAdapter = false;
|
||||
|
||||
|
||||
/*
|
||||
* denote if the server needs to compress response message at the application layer to client, including query rsp,
|
||||
* metricmeta rsp, and multi-meter query rsp message body. The client compress the submit message to server.
|
||||
|
@ -328,6 +330,7 @@ static int32_t taosAddClientCfg(SConfig *pCfg) {
|
|||
if (cfgAddInt32(pCfg, "compressColData", tsCompressColData, -1, 100000000, 1) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "queryPolicy", tsQueryPolicy, 1, 4, 1) != 0) return -1;
|
||||
if (cfgAddBool(pCfg, "enableQueryHb", tsEnableQueryHb, false) != 0) return -1;
|
||||
if (cfgAddBool(pCfg, "enableScience", tsEnableScience, false) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "querySmaOptimize", tsQuerySmaOptimize, 0, 1, 1) != 0) return -1;
|
||||
if (cfgAddBool(pCfg, "queryPlannerTrace", tsQueryPlannerTrace, true) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "queryNodeChunkSize", tsQueryNodeChunkSize, 1024, 128 * 1024, true) != 0) return -1;
|
||||
|
@ -730,6 +733,7 @@ static int32_t taosSetClientCfg(SConfig *pCfg) {
|
|||
tsNumOfTaskQueueThreads = cfgGetItem(pCfg, "numOfTaskQueueThreads")->i32;
|
||||
tsQueryPolicy = cfgGetItem(pCfg, "queryPolicy")->i32;
|
||||
tsEnableQueryHb = cfgGetItem(pCfg, "enableQueryHb")->bval;
|
||||
tsEnableScience = cfgGetItem(pCfg, "enableScience")->bval;
|
||||
tsQuerySmaOptimize = cfgGetItem(pCfg, "querySmaOptimize")->i32;
|
||||
tsQueryPlannerTrace = cfgGetItem(pCfg, "queryPlannerTrace")->bval;
|
||||
tsQueryNodeChunkSize = cfgGetItem(pCfg, "queryNodeChunkSize")->i32;
|
||||
|
|
|
@ -4190,6 +4190,12 @@ int32_t tSerializeSCreateVnodeReq(void *buf, int32_t bufLen, SCreateVnodeReq *pR
|
|||
for (int32_t i = 0; i < 8; ++i) {
|
||||
if (tEncodeI64(&encoder, pReq->reserved[i]) < 0) return -1;
|
||||
}
|
||||
if (tEncodeI8(&encoder, pReq->learnerReplica) < 0) return -1;
|
||||
if (tEncodeI8(&encoder, pReq->learnerSelfIndex) < 0) return -1;
|
||||
for (int32_t i = 0; i < TSDB_MAX_LEARNER_REPLICA; ++i) {
|
||||
SReplica *pReplica = &pReq->learnerReplicas[i];
|
||||
if (tEncodeSReplica(&encoder, pReplica) < 0) return -1;
|
||||
}
|
||||
|
||||
tEndEncode(&encoder);
|
||||
|
||||
|
@ -4268,6 +4274,14 @@ int32_t tDeserializeSCreateVnodeReq(void *buf, int32_t bufLen, SCreateVnodeReq *
|
|||
for (int32_t i = 0; i < 8; ++i) {
|
||||
if (tDecodeI64(&decoder, &pReq->reserved[i]) < 0) return -1;
|
||||
}
|
||||
if (!tDecodeIsEnd(&decoder)) {
|
||||
if (tDecodeI8(&decoder, &pReq->learnerReplica) < 0) return -1;
|
||||
if (tDecodeI8(&decoder, &pReq->learnerSelfIndex) < 0) return -1;
|
||||
for (int32_t i = 0; i < TSDB_MAX_LEARNER_REPLICA; ++i) {
|
||||
SReplica *pReplica = &pReq->learnerReplicas[i];
|
||||
if (tDecodeSReplica(&decoder, pReplica) < 0) return -1;
|
||||
}
|
||||
}
|
||||
|
||||
tEndDecode(&decoder);
|
||||
tDecoderClear(&decoder);
|
||||
|
@ -4494,6 +4508,12 @@ int32_t tSerializeSAlterVnodeReplicaReq(void *buf, int32_t bufLen, SAlterVnodeRe
|
|||
for (int32_t i = 0; i < 8; ++i) {
|
||||
if (tEncodeI64(&encoder, pReq->reserved[i]) < 0) return -1;
|
||||
}
|
||||
if (tEncodeI8(&encoder, pReq->learnerSelfIndex) < 0) return -1;
|
||||
if (tEncodeI8(&encoder, pReq->learnerReplica) < 0) return -1;
|
||||
for (int32_t i = 0; i < TSDB_MAX_LEARNER_REPLICA; ++i) {
|
||||
SReplica *pReplica = &pReq->learnerReplicas[i];
|
||||
if (tEncodeSReplica(&encoder, pReplica) < 0) return -1;
|
||||
}
|
||||
tEndEncode(&encoder);
|
||||
|
||||
int32_t tlen = encoder.pos;
|
||||
|
@ -4517,7 +4537,15 @@ int32_t tDeserializeSAlterVnodeReplicaReq(void *buf, int32_t bufLen, SAlterVnode
|
|||
for (int32_t i = 0; i < 8; ++i) {
|
||||
if (tDecodeI64(&decoder, &pReq->reserved[i]) < 0) return -1;
|
||||
}
|
||||
|
||||
if (!tDecodeIsEnd(&decoder)) {
|
||||
if (tDecodeI8(&decoder, &pReq->learnerSelfIndex) < 0) return -1;
|
||||
if (tDecodeI8(&decoder, &pReq->learnerReplica) < 0) return -1;
|
||||
for (int32_t i = 0; i < TSDB_MAX_LEARNER_REPLICA; ++i) {
|
||||
SReplica *pReplica = &pReq->learnerReplicas[i];
|
||||
if (tDecodeSReplica(&decoder, pReplica) < 0) return -1;
|
||||
}
|
||||
}
|
||||
|
||||
tEndDecode(&decoder);
|
||||
tDecoderClear(&decoder);
|
||||
return 0;
|
||||
|
@ -4828,6 +4856,12 @@ int32_t tSerializeSDCreateMnodeReq(void *buf, int32_t bufLen, SDCreateMnodeReq *
|
|||
SReplica *pReplica = &pReq->replicas[i];
|
||||
if (tEncodeSReplica(&encoder, pReplica) < 0) return -1;
|
||||
}
|
||||
if (tEncodeI8(&encoder, pReq->learnerReplica) < 0) return -1;
|
||||
for (int32_t i = 0; i < TSDB_MAX_LEARNER_REPLICA; ++i) {
|
||||
SReplica *pReplica = &pReq->learnerReplicas[i];
|
||||
if (tEncodeSReplica(&encoder, pReplica) < 0) return -1;
|
||||
}
|
||||
if (tEncodeI64(&encoder, pReq->lastIndex) < 0) return -1;
|
||||
tEndEncode(&encoder);
|
||||
|
||||
int32_t tlen = encoder.pos;
|
||||
|
@ -4845,6 +4879,14 @@ int32_t tDeserializeSDCreateMnodeReq(void *buf, int32_t bufLen, SDCreateMnodeReq
|
|||
SReplica *pReplica = &pReq->replicas[i];
|
||||
if (tDecodeSReplica(&decoder, pReplica) < 0) return -1;
|
||||
}
|
||||
if (!tDecodeIsEnd(&decoder)) {
|
||||
if (tDecodeI8(&decoder, &pReq->learnerReplica) < 0) return -1;
|
||||
for (int32_t i = 0; i < TSDB_MAX_LEARNER_REPLICA; ++i) {
|
||||
SReplica *pReplica = &pReq->learnerReplicas[i];
|
||||
if (tDecodeSReplica(&decoder, pReplica) < 0) return -1;
|
||||
}
|
||||
if (tDecodeI64(&decoder, &pReq->lastIndex) < 0) return -1;
|
||||
}
|
||||
tEndDecode(&decoder);
|
||||
|
||||
tDecoderClear(&decoder);
|
||||
|
|
|
@ -32,6 +32,7 @@ typedef struct SDnodeMgmt {
|
|||
TdThread crashReportThread;
|
||||
SSingleWorker mgmtWorker;
|
||||
ProcessCreateNodeFp processCreateNodeFp;
|
||||
ProcessAlterNodeTypeFp processAlterNodeTypeFp;
|
||||
ProcessDropNodeFp processDropNodeFp;
|
||||
SendMonitorReportFp sendMonitorReportFp;
|
||||
GetVnodeLoadsFp getVnodeLoadsFp;
|
||||
|
|
|
@ -352,6 +352,7 @@ SArray *dmGetMsgHandles() {
|
|||
if (dmSetMgmtHandle(pArray, TDMT_DND_CONFIG_DNODE, dmPutNodeMsgToMgmtQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_DND_SERVER_STATUS, dmPutNodeMsgToMgmtQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_DND_SYSTABLE_RETRIEVE, dmPutNodeMsgToMgmtQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_DND_ALTER_MNODE_TYPE, dmPutNodeMsgToMgmtQueue, 0) == NULL) goto _OVER;
|
||||
|
||||
// Requests handled by MNODE
|
||||
if (dmSetMgmtHandle(pArray, TDMT_MND_GRANT, dmPutNodeMsgToMgmtQueue, 0) == NULL) goto _OVER;
|
||||
|
|
|
@ -48,6 +48,7 @@ static int32_t dmOpenMgmt(SMgmtInputOpt *pInput, SMgmtOutputOpt *pOutput) {
|
|||
pMgmt->path = pInput->path;
|
||||
pMgmt->name = pInput->name;
|
||||
pMgmt->processCreateNodeFp = pInput->processCreateNodeFp;
|
||||
pMgmt->processAlterNodeTypeFp = pInput->processAlterNodeTypeFp;
|
||||
pMgmt->processDropNodeFp = pInput->processDropNodeFp;
|
||||
pMgmt->sendMonitorReportFp = pInput->sendMonitorReportFp;
|
||||
pMgmt->getVnodeLoadsFp = pInput->getVnodeLoadsFp;
|
||||
|
|
|
@ -227,6 +227,9 @@ static void dmProcessMgmtQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
|
|||
case TDMT_DND_DROP_SNODE:
|
||||
code = (*pMgmt->processDropNodeFp)(SNODE, pMsg);
|
||||
break;
|
||||
case TDMT_DND_ALTER_MNODE_TYPE:
|
||||
code = (*pMgmt->processAlterNodeTypeFp)(MNODE, pMsg);
|
||||
break;
|
||||
case TDMT_DND_SERVER_STATUS:
|
||||
code = dmProcessServerRunStatus(pMgmt, pMsg);
|
||||
break;
|
||||
|
|
|
@ -24,12 +24,16 @@ static int32_t mmDecodeOption(SJson *pJson, SMnodeOpt *pOption) {
|
|||
if (code < 0) return -1;
|
||||
tjsonGetInt32ValueFromDouble(pJson, "selfIndex", pOption->selfIndex, code);
|
||||
if (code < 0) return 0;
|
||||
tjsonGetInt32ValueFromDouble(pJson, "lastIndex", pOption->lastIndex, code);
|
||||
if (code < 0) return 0;
|
||||
|
||||
SJson *replicas = tjsonGetObjectItem(pJson, "replicas");
|
||||
if (replicas == NULL) return 0;
|
||||
pOption->numOfReplicas = tjsonGetArraySize(replicas);
|
||||
pOption->numOfTotalReplicas = tjsonGetArraySize(replicas);
|
||||
|
||||
for (int32_t i = 0; i < pOption->numOfReplicas; ++i) {
|
||||
pOption->numOfReplicas = 0;
|
||||
|
||||
for (int32_t i = 0; i < pOption->numOfTotalReplicas; ++i) {
|
||||
SJson *replica = tjsonGetArrayItem(replicas, i);
|
||||
if (replica == NULL) return -1;
|
||||
|
||||
|
@ -40,6 +44,14 @@ static int32_t mmDecodeOption(SJson *pJson, SMnodeOpt *pOption) {
|
|||
if (code < 0) return -1;
|
||||
tjsonGetUInt16ValueFromDouble(replica, "port", pReplica->port, code);
|
||||
if (code < 0) return -1;
|
||||
tjsonGetInt32ValueFromDouble(replica, "role", pOption->nodeRoles[i], code);
|
||||
if (code < 0) return -1;
|
||||
if(pOption->nodeRoles[i] == TAOS_SYNC_ROLE_VOTER){
|
||||
pOption->numOfReplicas++;
|
||||
}
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < pOption->numOfTotalReplicas; ++i) {
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -112,14 +124,14 @@ _OVER:
|
|||
}
|
||||
|
||||
static int32_t mmEncodeOption(SJson *pJson, const SMnodeOpt *pOption) {
|
||||
if (pOption->deploy && pOption->numOfReplicas > 0) {
|
||||
if (pOption->deploy && pOption->numOfTotalReplicas > 0) {
|
||||
if (tjsonAddDoubleToObject(pJson, "selfIndex", pOption->selfIndex) < 0) return -1;
|
||||
|
||||
SJson *replicas = tjsonCreateArray();
|
||||
if (replicas == NULL) return -1;
|
||||
if (tjsonAddItemToObject(pJson, "replicas", replicas) < 0) return -1;
|
||||
|
||||
for (int32_t i = 0; i < pOption->numOfReplicas; ++i) {
|
||||
for (int32_t i = 0; i < pOption->numOfTotalReplicas; ++i) {
|
||||
SJson *replica = tjsonCreateObject();
|
||||
if (replica == NULL) return -1;
|
||||
|
||||
|
@ -127,10 +139,13 @@ static int32_t mmEncodeOption(SJson *pJson, const SMnodeOpt *pOption) {
|
|||
if (tjsonAddDoubleToObject(replica, "id", pReplica->id) < 0) return -1;
|
||||
if (tjsonAddStringToObject(replica, "fqdn", pReplica->fqdn) < 0) return -1;
|
||||
if (tjsonAddDoubleToObject(replica, "port", pReplica->port) < 0) return -1;
|
||||
if (tjsonAddDoubleToObject(replica, "role", pOption->nodeRoles[i]) < 0) return -1;
|
||||
if (tjsonAddItemToArray(replicas, replica) < 0) return -1;
|
||||
}
|
||||
}
|
||||
|
||||
if (tjsonAddDoubleToObject(pJson, "lastIndex", pOption->lastIndex) < 0) return -1;
|
||||
|
||||
if (tjsonAddDoubleToObject(pJson, "deployed", pOption->deploy) < 0) return -1;
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -33,12 +33,24 @@ int32_t mmProcessCreateReq(const SMgmtInputOpt *pInput, SRpcMsg *pMsg) {
|
|||
return -1;
|
||||
}
|
||||
|
||||
SMnodeOpt option = {.deploy = true, .numOfReplicas = createReq.replica, .selfIndex = -1};
|
||||
SMnodeOpt option = {.deploy = true, .numOfReplicas = createReq.replica,
|
||||
.numOfTotalReplicas = createReq.replica + createReq.learnerReplica,
|
||||
.selfIndex = -1, .lastIndex = createReq.lastIndex};
|
||||
|
||||
memcpy(option.replicas, createReq.replicas, sizeof(createReq.replicas));
|
||||
for (int32_t i = 0; i < option.numOfReplicas; ++i) {
|
||||
for (int32_t i = 0; i < createReq.replica; ++i) {
|
||||
if (createReq.replicas[i].id == pInput->pData->dnodeId) {
|
||||
option.selfIndex = i;
|
||||
}
|
||||
option.nodeRoles[i] = TAOS_SYNC_ROLE_VOTER;
|
||||
}
|
||||
|
||||
memcpy(&(option.replicas[createReq.replica]), createReq.learnerReplicas, sizeof(createReq.learnerReplicas));
|
||||
for (int32_t i = 0; i < createReq.learnerReplica; ++i) {
|
||||
if (createReq.learnerReplicas[i].id == pInput->pData->dnodeId) {
|
||||
option.selfIndex = createReq.replica + i;
|
||||
}
|
||||
option.nodeRoles[createReq.replica + i] = TAOS_SYNC_ROLE_LEARNER;
|
||||
}
|
||||
|
||||
if (option.selfIndex == -1) {
|
||||
|
@ -92,6 +104,8 @@ SArray *mmGetMsgHandles() {
|
|||
if (dmSetMgmtHandle(pArray, TDMT_DND_CREATE_VNODE_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_DND_DROP_VNODE_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_DND_CONFIG_DNODE_RSP, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_DND_ALTER_MNODE_TYPE_RSP, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_DND_ALTER_VNODE_TYPE_RSP, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;
|
||||
|
||||
if (dmSetMgmtHandle(pArray, TDMT_MND_CONNECT, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_MND_CREATE_ACCT, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
|
|
|
@ -45,9 +45,11 @@ static void mmBuildOptionForDeploy(SMnodeMgmt *pMgmt, const SMgmtInputOpt *pInpu
|
|||
pOption->dnodeId = pMgmt->pData->dnodeId;
|
||||
pOption->selfIndex = 0;
|
||||
pOption->numOfReplicas = 1;
|
||||
pOption->numOfTotalReplicas = 1;
|
||||
pOption->replicas[0].id = 1;
|
||||
pOption->replicas[0].port = tsServerPort;
|
||||
tstrncpy(pOption->replicas[0].fqdn, tsLocalFqdn, TSDB_FQDN_LEN);
|
||||
pOption->lastIndex = SYNC_INDEX_INVALID;
|
||||
}
|
||||
|
||||
static void mmBuildOptionForOpen(SMnodeMgmt *pMgmt, SMnodeOpt *pOption) {
|
||||
|
@ -123,9 +125,10 @@ static int32_t mmOpen(SMgmtInputOpt *pInput, SMgmtOutputOpt *pOutput) {
|
|||
}
|
||||
tmsgReportStartup("mnode-worker", "initialized");
|
||||
|
||||
if (option.numOfReplicas > 0) {
|
||||
if (option.numOfTotalReplicas > 0) {
|
||||
option.deploy = true;
|
||||
option.numOfReplicas = 0;
|
||||
option.numOfTotalReplicas = 0;
|
||||
if (mmWriteFile(pMgmt->path, &option) != 0) {
|
||||
dError("failed to write mnode file since %s", terrstr());
|
||||
return -1;
|
||||
|
@ -152,6 +155,10 @@ static void mmStop(SMnodeMgmt *pMgmt) {
|
|||
mndStop(pMgmt->pMnode);
|
||||
}
|
||||
|
||||
static int32_t mmSyncIsCatchUp(SMnodeMgmt *pMgmt) {
|
||||
return mndIsCatchUp(pMgmt->pMnode);
|
||||
}
|
||||
|
||||
SMgmtFunc mmGetMgmtFunc() {
|
||||
SMgmtFunc mgmtFunc = {0};
|
||||
mgmtFunc.openFp = mmOpen;
|
||||
|
@ -162,6 +169,7 @@ SMgmtFunc mmGetMgmtFunc() {
|
|||
mgmtFunc.dropFp = (NodeDropFp)mmProcessDropReq;
|
||||
mgmtFunc.requiredFp = mmRequire;
|
||||
mgmtFunc.getHandlesFp = mmGetMsgHandles;
|
||||
mgmtFunc.isCatchUpFp = (NodeIsCatchUpFp)mmSyncIsCatchUp;
|
||||
|
||||
return mgmtFunc;
|
||||
}
|
||||
|
|
|
@ -55,6 +55,7 @@ int32_t smOpen(SMgmtInputOpt *pInput, SMgmtOutputOpt *pOutput) {
|
|||
smClose(pMgmt);
|
||||
return -1;
|
||||
}
|
||||
|
||||
tmsgReportStartup("snode-impl", "initialized");
|
||||
|
||||
if (smStartWorker(pMgmt) != 0) {
|
||||
|
|
|
@ -90,6 +90,7 @@ int32_t vmProcessDropVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg);
|
|||
int32_t vmProcessAlterVnodeReplicaReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg);
|
||||
int32_t vmProcessDisableVnodeWriteReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg);
|
||||
int32_t vmProcessAlterHashRangeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg);
|
||||
int32_t vmProcessAlterVnodeTypeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg);
|
||||
|
||||
// vmFile.c
|
||||
int32_t vmGetVnodeListFromFile(SVnodeMgmt *pMgmt, SWrapperCfg **ppCfgs, int32_t *numOfVnodes);
|
||||
|
|
|
@ -131,15 +131,34 @@ static void vmGenerateVnodeCfg(SCreateVnodeReq *pCreate, SVnodeCfg *pCfg) {
|
|||
pCfg->tsdbPageSize = pCreate->tsdbPageSize * 1024;
|
||||
|
||||
pCfg->standby = 0;
|
||||
pCfg->syncCfg.myIndex = pCreate->selfIndex;
|
||||
pCfg->syncCfg.replicaNum = pCreate->replica;
|
||||
pCfg->syncCfg.replicaNum = 0;
|
||||
pCfg->syncCfg.totalReplicaNum = 0;
|
||||
|
||||
memset(&pCfg->syncCfg.nodeInfo, 0, sizeof(pCfg->syncCfg.nodeInfo));
|
||||
for (int32_t i = 0; i < pCreate->replica; ++i) {
|
||||
SNodeInfo *pNode = &pCfg->syncCfg.nodeInfo[i];
|
||||
pNode->nodeId = pCreate->replicas[i].id;
|
||||
pNode->nodePort = pCreate->replicas[i].port;
|
||||
tstrncpy(pNode->nodeFqdn, pCreate->replicas[i].fqdn, TSDB_FQDN_LEN);
|
||||
pNode->nodeId = pCreate->replicas[pCfg->syncCfg.replicaNum].id;
|
||||
pNode->nodePort = pCreate->replicas[pCfg->syncCfg.replicaNum].port;
|
||||
pNode->nodeRole = TAOS_SYNC_ROLE_VOTER;
|
||||
tstrncpy(pNode->nodeFqdn, pCreate->replicas[pCfg->syncCfg.replicaNum].fqdn, TSDB_FQDN_LEN);
|
||||
tmsgUpdateDnodeInfo(&pNode->nodeId, &pNode->clusterId, pNode->nodeFqdn, &pNode->nodePort);
|
||||
pCfg->syncCfg.replicaNum++;
|
||||
}
|
||||
if(pCreate->selfIndex != -1){
|
||||
pCfg->syncCfg.myIndex = pCreate->selfIndex;
|
||||
}
|
||||
for (int32_t i = pCfg->syncCfg.replicaNum; i < pCreate->replica + pCreate->learnerReplica; ++i) {
|
||||
SNodeInfo *pNode = &pCfg->syncCfg.nodeInfo[i];
|
||||
pNode->nodeId = pCreate->learnerReplicas[pCfg->syncCfg.totalReplicaNum].id;
|
||||
pNode->nodePort = pCreate->learnerReplicas[pCfg->syncCfg.totalReplicaNum].port;
|
||||
pNode->nodeRole = TAOS_SYNC_ROLE_LEARNER;
|
||||
tstrncpy(pNode->nodeFqdn, pCreate->learnerReplicas[pCfg->syncCfg.totalReplicaNum].fqdn, TSDB_FQDN_LEN);
|
||||
tmsgUpdateDnodeInfo(&pNode->nodeId, &pNode->clusterId, pNode->nodeFqdn, &pNode->nodePort);
|
||||
pCfg->syncCfg.totalReplicaNum++;
|
||||
}
|
||||
pCfg->syncCfg.totalReplicaNum += pCfg->syncCfg.replicaNum;
|
||||
if(pCreate->learnerSelfIndex != -1){
|
||||
pCfg->syncCfg.myIndex = pCreate->replica + pCreate->learnerSelfIndex;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -182,23 +201,40 @@ int32_t vmProcessCreateVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
|||
return -1;
|
||||
}
|
||||
|
||||
dInfo("vgId:%d, start to create vnode, page:%d pageSize:%d buffer:%d szPage:%d szBuf:%" PRIu64
|
||||
if(req.learnerReplica == 0)
|
||||
{
|
||||
req.learnerSelfIndex = -1;
|
||||
}
|
||||
|
||||
dInfo("vgId:%d, vnode management handle msgType:%s, start to create vnode, page:%d pageSize:%d buffer:%d szPage:%d szBuf:%" PRIu64
|
||||
", cacheLast:%d cacheLastSize:%d sstTrigger:%d tsdbPageSize:%d %d dbname:%s dbId:%" PRId64
|
||||
", days:%d keep0:%d keep1:%d keep2:%d tsma:%d precision:%d compression:%d minRows:%d maxRows:%d"
|
||||
", wal fsync:%d level:%d retentionPeriod:%d retentionSize:%" PRId64 " rollPeriod:%d segSize:%" PRId64
|
||||
", hash method:%d begin:%u end:%u prefix:%d surfix:%d replica:%d selfIndex:%d strict:%d",
|
||||
req.vgId, req.pages, req.pageSize, req.buffer, req.pageSize * 1024, (uint64_t)req.buffer * 1024 * 1024,
|
||||
", hash method:%d begin:%u end:%u prefix:%d surfix:%d replica:%d selfIndex:%d "
|
||||
"learnerReplica:%d learnerSelfIndex:%d strict:%d",
|
||||
req.vgId, TMSG_INFO(pMsg->msgType), req.pages, req.pageSize, req.buffer, req.pageSize * 1024,
|
||||
(uint64_t)req.buffer * 1024 * 1024,
|
||||
req.cacheLast, req.cacheLastSize, req.sstTrigger, req.tsdbPageSize, req.tsdbPageSize * 1024, req.db, req.dbUid,
|
||||
req.daysPerFile, req.daysToKeep0, req.daysToKeep1, req.daysToKeep2, req.isTsma, req.precision, req.compression,
|
||||
req.minRows, req.maxRows, req.walFsyncPeriod, req.walLevel, req.walRetentionPeriod, req.walRetentionSize,
|
||||
req.walRollPeriod, req.walSegmentSize, req.hashMethod, req.hashBegin, req.hashEnd, req.hashPrefix,
|
||||
req.hashSuffix, req.replica, req.selfIndex, req.strict);
|
||||
req.hashSuffix, req.replica, req.selfIndex, req.learnerReplica, req.learnerSelfIndex, req.strict);
|
||||
for (int32_t i = 0; i < req.replica; ++i) {
|
||||
dInfo("vgId:%d, replica:%d ep:%s:%u dnode:%d", req.vgId, i, req.replicas[i].fqdn, req.replicas[i].port,
|
||||
req.replicas[i].id);
|
||||
}
|
||||
for (int32_t i = 0; i < req.learnerReplica; ++i) {
|
||||
dInfo("vgId:%d, learnerReplica:%d ep:%s:%u dnode:%d", req.vgId, i, req.learnerReplicas[i].fqdn,
|
||||
req.learnerReplicas[i].port, req.replicas[i].id);
|
||||
}
|
||||
|
||||
SReplica *pReplica = &req.replicas[req.selfIndex];
|
||||
SReplica *pReplica = NULL;
|
||||
if(req.selfIndex != -1){
|
||||
pReplica = &req.replicas[req.selfIndex];
|
||||
}
|
||||
else{
|
||||
pReplica = &req.learnerReplicas[req.learnerSelfIndex];
|
||||
}
|
||||
if (pReplica->id != pMgmt->pData->dnodeId || pReplica->port != tsServerPort ||
|
||||
strcmp(pReplica->fqdn, tsLocalFqdn) != 0) {
|
||||
terrno = TSDB_CODE_INVALID_MSG;
|
||||
|
@ -275,7 +311,8 @@ _OVER:
|
|||
vnodeClose(pImpl);
|
||||
vnodeDestroy(path, pMgmt->pTfs);
|
||||
} else {
|
||||
dInfo("vgId:%d, vnode is created", req.vgId);
|
||||
dInfo("vgId:%d, vnode management handle msgType:%s, end to create vnode, vnode is created",
|
||||
req.vgId, TMSG_INFO(pMsg->msgType));
|
||||
}
|
||||
|
||||
tFreeSCreateVnodeReq(&req);
|
||||
|
@ -283,6 +320,110 @@ _OVER:
|
|||
return code;
|
||||
}
|
||||
|
||||
int32_t vmProcessAlterVnodeTypeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
||||
SAlterVnodeTypeReq req = {0};
|
||||
if (tDeserializeSAlterVnodeReplicaReq(pMsg->pCont, pMsg->contLen, &req) != 0) {
|
||||
terrno = TSDB_CODE_INVALID_MSG;
|
||||
return -1;
|
||||
}
|
||||
|
||||
if(req.learnerReplicas == 0){
|
||||
req.learnerSelfIndex = -1;
|
||||
}
|
||||
|
||||
dInfo("vgId:%d, vnode management handle msgType:%s, start to process alter-node-type-request",
|
||||
req.vgId, TMSG_INFO(pMsg->msgType));
|
||||
|
||||
SVnodeObj *pVnode = vmAcquireVnode(pMgmt, req.vgId);
|
||||
if (pVnode == NULL) {
|
||||
dError("vgId:%d, failed to alter hashrange since %s", req.vgId, terrstr());
|
||||
terrno = TSDB_CODE_VND_NOT_EXIST;
|
||||
return -1;
|
||||
}
|
||||
|
||||
dInfo("vgId:%d, checking node catch up", req.vgId);
|
||||
if(vnodeIsCatchUp(pVnode->pImpl) != 0){
|
||||
return -1;
|
||||
}
|
||||
|
||||
dInfo("node:%s, catched up leader, continue to process alter-node-type-request", pMgmt->name);
|
||||
|
||||
int32_t vgId = req.vgId;
|
||||
dInfo("vgId:%d, start to alter vnode type replica:%d selfIndex:%d strict:%d", vgId, req.replica, req.selfIndex,
|
||||
req.strict);
|
||||
for (int32_t i = 0; i < req.replica; ++i) {
|
||||
SReplica *pReplica = &req.replicas[i];
|
||||
dInfo("vgId:%d, replica:%d ep:%s:%u dnode:%d", vgId, i, pReplica->fqdn, pReplica->port, pReplica->id);
|
||||
}
|
||||
for (int32_t i = 0; i < req.learnerReplica; ++i) {
|
||||
SReplica *pReplica = &req.learnerReplicas[i];
|
||||
dInfo("vgId:%d, learnerReplicas:%d ep:%s:%u dnode:%d", vgId, i, pReplica->fqdn, pReplica->port, pReplica->id);
|
||||
}
|
||||
|
||||
if (req.replica <= 0 ||
|
||||
(req.selfIndex < 0 && req.learnerSelfIndex <0)||
|
||||
req.selfIndex >= req.replica || req.learnerSelfIndex >= req.learnerReplica) {
|
||||
terrno = TSDB_CODE_INVALID_MSG;
|
||||
dError("vgId:%d, failed to alter replica since invalid msg", vgId);
|
||||
return -1;
|
||||
}
|
||||
|
||||
SReplica *pReplica = NULL;
|
||||
if(req.selfIndex > 0){
|
||||
pReplica = &req.replicas[req.selfIndex];
|
||||
}
|
||||
else{
|
||||
pReplica = &req.learnerReplicas[req.learnerSelfIndex];
|
||||
}
|
||||
|
||||
if (pReplica->id != pMgmt->pData->dnodeId || pReplica->port != tsServerPort ||
|
||||
strcmp(pReplica->fqdn, tsLocalFqdn) != 0) {
|
||||
terrno = TSDB_CODE_INVALID_MSG;
|
||||
dError("vgId:%d, dnodeId:%d ep:%s:%u not matched with local dnode", vgId, pReplica->id, pReplica->fqdn,
|
||||
pReplica->port);
|
||||
return -1;
|
||||
}
|
||||
|
||||
dInfo("vgId:%d, start to close vnode", vgId);
|
||||
SWrapperCfg wrapperCfg = {
|
||||
.dropped = pVnode->dropped,
|
||||
.vgId = pVnode->vgId,
|
||||
.vgVersion = pVnode->vgVersion,
|
||||
};
|
||||
tstrncpy(wrapperCfg.path, pVnode->path, sizeof(wrapperCfg.path));
|
||||
vmCloseVnode(pMgmt, pVnode, false);
|
||||
|
||||
char path[TSDB_FILENAME_LEN] = {0};
|
||||
snprintf(path, TSDB_FILENAME_LEN, "vnode%svnode%d", TD_DIRSEP, vgId);
|
||||
|
||||
dInfo("vgId:%d, start to alter vnode replica at %s", vgId, path);
|
||||
if (vnodeAlterReplica(path, &req, pMgmt->pTfs) < 0) {
|
||||
dError("vgId:%d, failed to alter vnode at %s since %s", vgId, path, terrstr());
|
||||
return -1;
|
||||
}
|
||||
|
||||
dInfo("vgId:%d, begin to open vnode", vgId);
|
||||
SVnode *pImpl = vnodeOpen(path, pMgmt->pTfs, pMgmt->msgCb);
|
||||
if (pImpl == NULL) {
|
||||
dError("vgId:%d, failed to open vnode at %s since %s", vgId, path, terrstr());
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (vmOpenVnode(pMgmt, &wrapperCfg, pImpl) != 0) {
|
||||
dError("vgId:%d, failed to open vnode mgmt since %s", vgId, terrstr());
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (vnodeStart(pImpl) != 0) {
|
||||
dError("vgId:%d, failed to start sync since %s", vgId, terrstr());
|
||||
return -1;
|
||||
}
|
||||
|
||||
dInfo("vgId:%d, vnode management handle msgType:%s, end to process alter-node-type-request, vnode config is altered",
|
||||
req.vgId, TMSG_INFO(pMsg->msgType));
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t vmProcessDisableVnodeWriteReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
||||
SDisableVnodeWriteReq req = {0};
|
||||
if (tDeserializeSDisableVnodeWriteReq(pMsg->pCont, pMsg->contLen, &req) != 0) {
|
||||
|
@ -377,21 +518,40 @@ int32_t vmProcessAlterVnodeReplicaReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
|||
return -1;
|
||||
}
|
||||
|
||||
if(alterReq.learnerReplica == 0){
|
||||
alterReq.learnerSelfIndex = -1;
|
||||
}
|
||||
|
||||
int32_t vgId = alterReq.vgId;
|
||||
dInfo("vgId:%d, start to alter vnode replica:%d selfIndex:%d strict:%d", vgId, alterReq.replica, alterReq.selfIndex,
|
||||
alterReq.strict);
|
||||
dInfo("vgId:%d,vnode management handle msgType:%s, start to alter vnode replica:%d selfIndex:%d leanerReplica:%d "
|
||||
"learnerSelfIndex:%d strict:%d",
|
||||
vgId, TMSG_INFO(pMsg->msgType), alterReq.replica, alterReq.selfIndex, alterReq.learnerReplica,
|
||||
alterReq.learnerSelfIndex, alterReq.strict);
|
||||
for (int32_t i = 0; i < alterReq.replica; ++i) {
|
||||
SReplica *pReplica = &alterReq.replicas[i];
|
||||
dInfo("vgId:%d, replica:%d ep:%s:%u dnode:%d", vgId, i, pReplica->fqdn, pReplica->port, pReplica->port);
|
||||
}
|
||||
|
||||
if (alterReq.replica <= 0 || alterReq.selfIndex < 0 || alterReq.selfIndex >= alterReq.replica) {
|
||||
for (int32_t i = 0; i < alterReq.learnerReplica; ++i) {
|
||||
SReplica *pReplica = &alterReq.learnerReplicas[i];
|
||||
dInfo("vgId:%d, learnerReplicas:%d ep:%s:%u dnode:%d", vgId, i, pReplica->fqdn, pReplica->port, pReplica->port);
|
||||
}
|
||||
|
||||
if (alterReq.replica <= 0 ||
|
||||
(alterReq.selfIndex < 0 && alterReq.learnerSelfIndex <0)||
|
||||
alterReq.selfIndex >= alterReq.replica || alterReq.learnerSelfIndex >= alterReq.learnerReplica) {
|
||||
terrno = TSDB_CODE_INVALID_MSG;
|
||||
dError("vgId:%d, failed to alter replica since invalid msg", vgId);
|
||||
return -1;
|
||||
}
|
||||
|
||||
SReplica *pReplica = &alterReq.replicas[alterReq.selfIndex];
|
||||
SReplica *pReplica = NULL;
|
||||
if(alterReq.selfIndex != -1){
|
||||
pReplica = &alterReq.replicas[alterReq.selfIndex];
|
||||
}
|
||||
else{
|
||||
pReplica = &alterReq.learnerReplicas[alterReq.learnerSelfIndex];
|
||||
}
|
||||
|
||||
if (pReplica->id != pMgmt->pData->dnodeId || pReplica->port != tsServerPort ||
|
||||
strcmp(pReplica->fqdn, tsLocalFqdn) != 0) {
|
||||
terrno = TSDB_CODE_INVALID_MSG;
|
||||
|
@ -425,7 +585,7 @@ int32_t vmProcessAlterVnodeReplicaReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
|||
return -1;
|
||||
}
|
||||
|
||||
dInfo("vgId:%d, close vnode", vgId);
|
||||
dInfo("vgId:%d, begin to open vnode", vgId);
|
||||
SVnode *pImpl = vnodeOpen(path, pMgmt->pTfs, pMgmt->msgCb);
|
||||
if (pImpl == NULL) {
|
||||
dError("vgId:%d, failed to open vnode at %s since %s", vgId, path, terrstr());
|
||||
|
@ -442,7 +602,10 @@ int32_t vmProcessAlterVnodeReplicaReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
|||
return -1;
|
||||
}
|
||||
|
||||
dInfo("vgId:%d, vnode config is altered", vgId);
|
||||
dInfo("vgId:%d, vnode management handle msgType:%s, end to alter vnode replica:%d selfIndex:%d leanerReplica:%d "
|
||||
"learnerSelfIndex:%d strict:%d",
|
||||
vgId, TMSG_INFO(pMsg->msgType), alterReq.replica, alterReq.selfIndex, alterReq.learnerReplica,
|
||||
alterReq.learnerSelfIndex, alterReq.strict);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -548,6 +711,7 @@ SArray *vmGetMsgHandles() {
|
|||
if (dmSetMgmtHandle(pArray, TDMT_VND_TRIM, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_DND_CREATE_VNODE, vmPutMsgToMgmtQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_DND_DROP_VNODE, vmPutMsgToMgmtQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_DND_ALTER_VNODE_TYPE, vmPutMsgToMgmtQueue, 0) == NULL) goto _OVER;
|
||||
|
||||
if (dmSetMgmtHandle(pArray, TDMT_SYNC_TIMEOUT_ELECTION, vmPutMsgToSyncQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_SYNC_CLIENT_REQUEST, vmPutMsgToSyncQueue, 0) == NULL) goto _OVER;
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
|
||||
#define _DEFAULT_SOURCE
|
||||
#include "vmInt.h"
|
||||
#include "vnd.h"
|
||||
|
||||
SVnodeObj *vmAcquireVnode(SVnodeMgmt *pMgmt, int32_t vgId) {
|
||||
SVnodeObj *pVnode = NULL;
|
||||
|
@ -78,6 +79,11 @@ int32_t vmOpenVnode(SVnodeMgmt *pMgmt, SWrapperCfg *pCfg, SVnode *pImpl) {
|
|||
|
||||
void vmCloseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode, bool commitAndRemoveWal) {
|
||||
char path[TSDB_FILENAME_LEN] = {0};
|
||||
bool atExit = true;
|
||||
|
||||
if (vnodeIsLeader(pVnode->pImpl)) {
|
||||
vnodeProposeCommitOnNeed(pVnode->pImpl, atExit);
|
||||
}
|
||||
|
||||
taosThreadRwlockWrlock(&pMgmt->lock);
|
||||
taosHashRemove(pMgmt->hash, &pVnode->vgId, sizeof(int32_t));
|
||||
|
|
|
@ -49,6 +49,9 @@ static void vmProcessMgmtQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
|
|||
case TDMT_VND_ALTER_HASHRANGE:
|
||||
code = vmProcessAlterHashRangeReq(pMgmt, pMsg);
|
||||
break;
|
||||
case TDMT_DND_ALTER_VNODE_TYPE:
|
||||
code = vmProcessAlterVnodeTypeReq(pMgmt, pMsg);
|
||||
break;
|
||||
default:
|
||||
terrno = TSDB_CODE_MSG_NOT_PROCESSED;
|
||||
dGError("msg:%p, not processed in vnode-mgmt queue", pMsg);
|
||||
|
|
|
@ -169,6 +169,8 @@ static int32_t dmProcessCreateNodeReq(EDndNodeType ntype, SRpcMsg *pMsg) {
|
|||
return -1;
|
||||
}
|
||||
|
||||
dInfo("start to process create-node-request");
|
||||
|
||||
pWrapper = &pDnode->wrappers[ntype];
|
||||
if (taosMkDir(pWrapper->path) != 0) {
|
||||
dmReleaseWrapper(pWrapper);
|
||||
|
@ -198,6 +200,64 @@ static int32_t dmProcessCreateNodeReq(EDndNodeType ntype, SRpcMsg *pMsg) {
|
|||
return code;
|
||||
}
|
||||
|
||||
static int32_t dmProcessAlterNodeTypeReq(EDndNodeType ntype, SRpcMsg *pMsg) {
|
||||
SDnode *pDnode = dmInstance();
|
||||
|
||||
SMgmtWrapper *pWrapper = dmAcquireWrapper(pDnode, ntype);
|
||||
if (pWrapper == NULL) {
|
||||
dError("fail to process alter node type since node not exist");
|
||||
return -1;
|
||||
}
|
||||
dmReleaseWrapper(pWrapper);
|
||||
|
||||
dInfo("node:%s, start to process alter-node-type-request", pWrapper->name);
|
||||
|
||||
pWrapper = &pDnode->wrappers[ntype];
|
||||
|
||||
if(pWrapper->func.isCatchUpFp != NULL){
|
||||
dInfo("node:%s, checking node catch up", pWrapper->name);
|
||||
if(!(*pWrapper->func.isCatchUpFp)(pWrapper->pMgmt) == 0){
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
dInfo("node:%s, catched up leader, continue to process alter-node-type-request", pWrapper->name);
|
||||
|
||||
taosThreadMutexLock(&pDnode->mutex);
|
||||
|
||||
dInfo("node:%s, stopping node", pWrapper->name);
|
||||
dmStopNode(pWrapper);
|
||||
dInfo("node:%s, closing node", pWrapper->name);
|
||||
dmCloseNode(pWrapper);
|
||||
|
||||
pWrapper = &pDnode->wrappers[ntype];
|
||||
if (taosMkDir(pWrapper->path) != 0) {
|
||||
dmReleaseWrapper(pWrapper);
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
dError("failed to create dir:%s since %s", pWrapper->path, terrstr());
|
||||
return -1;
|
||||
}
|
||||
|
||||
SMgmtInputOpt input = dmBuildMgmtInputOpt(pWrapper);
|
||||
|
||||
dInfo("node:%s, start to create", pWrapper->name);
|
||||
int32_t code = (*pWrapper->func.createFp)(&input, pMsg);
|
||||
if (code != 0) {
|
||||
dError("node:%s, failed to create since %s", pWrapper->name, terrstr());
|
||||
} else {
|
||||
dInfo("node:%s, has been created", pWrapper->name);
|
||||
code = dmOpenNode(pWrapper);
|
||||
if (code == 0) {
|
||||
code = dmStartNode(pWrapper);
|
||||
}
|
||||
pWrapper->deployed = true;
|
||||
pWrapper->required = true;
|
||||
}
|
||||
|
||||
taosThreadMutexUnlock(&pDnode->mutex);
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t dmProcessDropNodeReq(EDndNodeType ntype, SRpcMsg *pMsg) {
|
||||
SDnode *pDnode = dmInstance();
|
||||
|
||||
|
@ -251,6 +311,7 @@ SMgmtInputOpt dmBuildMgmtInputOpt(SMgmtWrapper *pWrapper) {
|
|||
.name = pWrapper->name,
|
||||
.pData = &pWrapper->pDnode->data,
|
||||
.processCreateNodeFp = dmProcessCreateNodeReq,
|
||||
.processAlterNodeTypeFp = dmProcessAlterNodeTypeReq,
|
||||
.processDropNodeFp = dmProcessDropNodeReq,
|
||||
.sendMonitorReportFp = dmSendMonitorReport,
|
||||
.getVnodeLoadsFp = dmGetVnodeLoads,
|
||||
|
|
|
@ -89,6 +89,7 @@ typedef void (*SendMonitorReportFp)();
|
|||
typedef void (*GetVnodeLoadsFp)(SMonVloadInfo *pInfo);
|
||||
typedef void (*GetMnodeLoadsFp)(SMonMloadInfo *pInfo);
|
||||
typedef void (*GetQnodeLoadsFp)(SQnodeLoad *pInfo);
|
||||
typedef int32_t (*ProcessAlterNodeTypeFp)(EDndNodeType ntype, SRpcMsg *pMsg);
|
||||
|
||||
typedef struct {
|
||||
int32_t dnodeId;
|
||||
|
@ -112,6 +113,7 @@ typedef struct {
|
|||
SDnodeData *pData;
|
||||
SMsgCb msgCb;
|
||||
ProcessCreateNodeFp processCreateNodeFp;
|
||||
ProcessAlterNodeTypeFp processAlterNodeTypeFp;
|
||||
ProcessDropNodeFp processDropNodeFp;
|
||||
SendMonitorReportFp sendMonitorReportFp;
|
||||
GetVnodeLoadsFp getVnodeLoadsFp;
|
||||
|
@ -132,6 +134,7 @@ typedef int32_t (*NodeCreateFp)(const SMgmtInputOpt *pInput, SRpcMsg *pMsg);
|
|||
typedef int32_t (*NodeDropFp)(const SMgmtInputOpt *pInput, SRpcMsg *pMsg);
|
||||
typedef int32_t (*NodeRequireFp)(const SMgmtInputOpt *pInput, bool *required);
|
||||
typedef SArray *(*NodeGetHandlesFp)(); // array of SMgmtHandle
|
||||
typedef bool (*NodeIsCatchUpFp)(void *pMgmt);
|
||||
|
||||
typedef struct {
|
||||
NodeOpenFp openFp;
|
||||
|
@ -142,6 +145,7 @@ typedef struct {
|
|||
NodeDropFp dropFp;
|
||||
NodeRequireFp requiredFp;
|
||||
NodeGetHandlesFp getHandlesFp;
|
||||
NodeIsCatchUpFp isCatchUpFp;
|
||||
} SMgmtFunc;
|
||||
|
||||
typedef struct {
|
||||
|
|
|
@ -215,6 +215,8 @@ typedef struct {
|
|||
bool syncRestore;
|
||||
int64_t stateStartTime;
|
||||
SDnodeObj* pDnode;
|
||||
int32_t role;
|
||||
SyncIndex lastIndex;
|
||||
} SMnodeObj;
|
||||
|
||||
typedef struct {
|
||||
|
@ -342,6 +344,7 @@ typedef struct {
|
|||
ESyncState syncState;
|
||||
bool syncRestore;
|
||||
bool syncCanRead;
|
||||
ESyncRole nodeRole;
|
||||
} SVnodeGid;
|
||||
|
||||
typedef struct {
|
||||
|
@ -362,7 +365,7 @@ typedef struct {
|
|||
int8_t compact;
|
||||
int8_t isTsma;
|
||||
int8_t replica;
|
||||
SVnodeGid vnodeGid[TSDB_MAX_REPLICA];
|
||||
SVnodeGid vnodeGid[TSDB_MAX_REPLICA + TSDB_MAX_LEARNER_REPLICA];
|
||||
void* pTsma;
|
||||
int32_t numOfCachedTables;
|
||||
} SVgObj;
|
||||
|
|
|
@ -92,8 +92,11 @@ typedef struct {
|
|||
int64_t transSeq;
|
||||
TdThreadMutex lock;
|
||||
int8_t selfIndex;
|
||||
int8_t numOfTotalReplicas;
|
||||
int8_t numOfReplicas;
|
||||
SReplica replicas[TSDB_MAX_REPLICA];
|
||||
SReplica replicas[TSDB_MAX_REPLICA + TSDB_MAX_LEARNER_REPLICA];
|
||||
ESyncRole nodeRoles[TSDB_MAX_REPLICA + TSDB_MAX_LEARNER_REPLICA];
|
||||
SyncIndex lastIndex;
|
||||
} SSyncMgmt;
|
||||
|
||||
typedef struct {
|
||||
|
|
|
@ -70,7 +70,7 @@ int32_t tEncodeSStreamObj(SEncoder *pEncoder, const SStreamObj *pObj) {
|
|||
if (tEncodeI32(pEncoder, innerSz) < 0) return -1;
|
||||
for (int32_t j = 0; j < innerSz; j++) {
|
||||
SStreamTask *pTask = taosArrayGetP(pArray, j);
|
||||
if (tEncodeSStreamTask(pEncoder, pTask) < 0) return -1;
|
||||
if (tEncodeStreamTask(pEncoder, pTask) < 0) return -1;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -130,7 +130,7 @@ int32_t tDecodeSStreamObj(SDecoder *pDecoder, SStreamObj *pObj, int32_t sver) {
|
|||
taosArrayDestroy(pArray);
|
||||
return -1;
|
||||
}
|
||||
if (tDecodeSStreamTask(pDecoder, pTask) < 0) {
|
||||
if (tDecodeStreamTask(pDecoder, pTask) < 0) {
|
||||
taosMemoryFree(pTask);
|
||||
taosArrayDestroy(pArray);
|
||||
return -1;
|
||||
|
@ -158,7 +158,10 @@ void tFreeStreamObj(SStreamObj *pStream) {
|
|||
taosMemoryFree(pStream->sql);
|
||||
taosMemoryFree(pStream->ast);
|
||||
taosMemoryFree(pStream->physicalPlan);
|
||||
if (pStream->outputSchema.nCols) taosMemoryFree(pStream->outputSchema.pSchema);
|
||||
|
||||
if (pStream->outputSchema.nCols) {
|
||||
taosMemoryFree(pStream->outputSchema.pSchema);
|
||||
}
|
||||
|
||||
int32_t sz = taosArrayGetSize(pStream->tasks);
|
||||
for (int32_t i = 0; i < sz; i++) {
|
||||
|
@ -166,11 +169,14 @@ void tFreeStreamObj(SStreamObj *pStream) {
|
|||
int32_t taskSz = taosArrayGetSize(pLevel);
|
||||
for (int32_t j = 0; j < taskSz; j++) {
|
||||
SStreamTask *pTask = taosArrayGetP(pLevel, j);
|
||||
tFreeSStreamTask(pTask);
|
||||
tFreeStreamTask(pTask);
|
||||
}
|
||||
|
||||
taosArrayDestroy(pLevel);
|
||||
}
|
||||
|
||||
taosArrayDestroy(pStream->tasks);
|
||||
|
||||
// tagSchema.pSchema
|
||||
if (pStream->tagSchema.nCols > 0) {
|
||||
taosMemoryFree(pStream->tagSchema.pSchema);
|
||||
|
|
|
@ -491,7 +491,10 @@ static void mndSetOptions(SMnode *pMnode, const SMnodeOpt *pOption) {
|
|||
pMnode->selfDnodeId = pOption->dnodeId;
|
||||
pMnode->syncMgmt.selfIndex = pOption->selfIndex;
|
||||
pMnode->syncMgmt.numOfReplicas = pOption->numOfReplicas;
|
||||
pMnode->syncMgmt.numOfTotalReplicas = pOption->numOfTotalReplicas;
|
||||
pMnode->syncMgmt.lastIndex = pOption->lastIndex;
|
||||
memcpy(pMnode->syncMgmt.replicas, pOption->replicas, sizeof(pOption->replicas));
|
||||
memcpy(pMnode->syncMgmt.nodeRoles, pOption->nodeRoles, sizeof(pOption->nodeRoles));
|
||||
}
|
||||
|
||||
SMnode *mndOpen(const char *path, const SMnodeOpt *pOption) {
|
||||
|
@ -582,6 +585,11 @@ int32_t mndStart(SMnode *pMnode) {
|
|||
return mndInitTimer(pMnode);
|
||||
}
|
||||
|
||||
int32_t mndIsCatchUp(SMnode *pMnode) {
|
||||
int64_t rid = pMnode->syncMgmt.sync;
|
||||
return syncIsCatchUp(rid);
|
||||
}
|
||||
|
||||
void mndStop(SMnode *pMnode) {
|
||||
mndSetStop(pMnode);
|
||||
mndSyncStop(pMnode);
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
#include "mndTrans.h"
|
||||
#include "tmisce.h"
|
||||
|
||||
#define MNODE_VER_NUMBER 1
|
||||
#define MNODE_VER_NUMBER 2
|
||||
#define MNODE_RESERVE_SIZE 64
|
||||
|
||||
static int32_t mndCreateDefaultMnode(SMnode *pMnode);
|
||||
|
@ -53,6 +53,7 @@ int32_t mndInitMnode(SMnode *pMnode) {
|
|||
|
||||
mndSetMsgHandle(pMnode, TDMT_MND_CREATE_MNODE, mndProcessCreateMnodeReq);
|
||||
mndSetMsgHandle(pMnode, TDMT_DND_CREATE_MNODE_RSP, mndTransProcessRsp);
|
||||
mndSetMsgHandle(pMnode, TDMT_DND_ALTER_MNODE_TYPE_RSP, mndTransProcessRsp);
|
||||
mndSetMsgHandle(pMnode, TDMT_MND_ALTER_MNODE, mndProcessAlterMnodeReq);
|
||||
mndSetMsgHandle(pMnode, TDMT_MND_ALTER_MNODE_RSP, mndTransProcessRsp);
|
||||
mndSetMsgHandle(pMnode, TDMT_MND_DROP_MNODE, mndProcessDropMnodeReq);
|
||||
|
@ -126,6 +127,8 @@ static SSdbRaw *mndMnodeActionEncode(SMnodeObj *pObj) {
|
|||
SDB_SET_INT32(pRaw, dataPos, pObj->id, _OVER)
|
||||
SDB_SET_INT64(pRaw, dataPos, pObj->createdTime, _OVER)
|
||||
SDB_SET_INT64(pRaw, dataPos, pObj->updateTime, _OVER)
|
||||
SDB_SET_INT32(pRaw, dataPos, pObj->role, _OVER)
|
||||
SDB_SET_INT64(pRaw, dataPos, pObj->lastIndex, _OVER)
|
||||
SDB_SET_RESERVE(pRaw, dataPos, MNODE_RESERVE_SIZE, _OVER)
|
||||
|
||||
terrno = 0;
|
||||
|
@ -149,7 +152,7 @@ static SSdbRow *mndMnodeActionDecode(SSdbRaw *pRaw) {
|
|||
int8_t sver = 0;
|
||||
if (sdbGetRawSoftVer(pRaw, &sver) != 0) return NULL;
|
||||
|
||||
if (sver != MNODE_VER_NUMBER) {
|
||||
if (sver != 1 && sver != 2) {
|
||||
terrno = TSDB_CODE_SDB_INVALID_DATA_VER;
|
||||
goto _OVER;
|
||||
}
|
||||
|
@ -164,6 +167,10 @@ static SSdbRow *mndMnodeActionDecode(SSdbRaw *pRaw) {
|
|||
SDB_GET_INT32(pRaw, dataPos, &pObj->id, _OVER)
|
||||
SDB_GET_INT64(pRaw, dataPos, &pObj->createdTime, _OVER)
|
||||
SDB_GET_INT64(pRaw, dataPos, &pObj->updateTime, _OVER)
|
||||
if(sver >=2){
|
||||
SDB_GET_INT32(pRaw, dataPos, &pObj->role, _OVER)
|
||||
SDB_GET_INT64(pRaw, dataPos, &pObj->lastIndex, _OVER)
|
||||
}
|
||||
SDB_GET_RESERVE(pRaw, dataPos, MNODE_RESERVE_SIZE, _OVER)
|
||||
|
||||
terrno = 0;
|
||||
|
@ -204,7 +211,9 @@ static int32_t mndMnodeActionDelete(SSdb *pSdb, SMnodeObj *pObj) {
|
|||
|
||||
static int32_t mndMnodeActionUpdate(SSdb *pSdb, SMnodeObj *pOld, SMnodeObj *pNew) {
|
||||
mTrace("mnode:%d, perform update action, old row:%p new row:%p", pOld->id, pOld, pNew);
|
||||
pOld->role = pNew->role;
|
||||
pOld->updateTime = pNew->updateTime;
|
||||
pOld->lastIndex = pNew->lastIndex;
|
||||
mndReloadSyncConfig(pSdb->pMnode);
|
||||
|
||||
return 0;
|
||||
|
@ -302,6 +311,27 @@ static int32_t mndBuildCreateMnodeRedoAction(STrans *pTrans, SDCreateMnodeReq *p
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int32_t mndBuildAlterMnodeTypeRedoAction(STrans *pTrans,
|
||||
SDAlterMnodeTypeReq *pAlterMnodeTypeReq, SEpSet *pAlterMnodeTypeEpSet) {
|
||||
int32_t contLen = tSerializeSDCreateMnodeReq(NULL, 0, pAlterMnodeTypeReq);
|
||||
void *pReq = taosMemoryMalloc(contLen);
|
||||
tSerializeSDCreateMnodeReq(pReq, contLen, pAlterMnodeTypeReq);
|
||||
|
||||
STransAction action = {
|
||||
.epSet = *pAlterMnodeTypeEpSet,
|
||||
.pCont = pReq,
|
||||
.contLen = contLen,
|
||||
.msgType = TDMT_DND_ALTER_MNODE_TYPE,
|
||||
.acceptableCode = TSDB_CODE_MNODE_ALREADY_DEPLOYED,
|
||||
};
|
||||
|
||||
if (mndTransAppendRedoAction(pTrans, &action) != 0) {
|
||||
taosMemoryFree(pReq);
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int32_t mndBuildAlterMnodeRedoAction(STrans *pTrans, SDCreateMnodeReq *pAlterReq, SEpSet *pAlterEpSet) {
|
||||
int32_t contLen = tSerializeSDCreateMnodeReq(NULL, 0, pAlterReq);
|
||||
void *pReq = taosMemoryMalloc(contLen);
|
||||
|
@ -347,6 +377,7 @@ static int32_t mndSetCreateMnodeRedoActions(SMnode *pMnode, STrans *pTrans, SDno
|
|||
SSdb *pSdb = pMnode->pSdb;
|
||||
void *pIter = NULL;
|
||||
int32_t numOfReplicas = 0;
|
||||
int32_t numOfLearnerReplicas = 0;
|
||||
SDCreateMnodeReq createReq = {0};
|
||||
SEpSet createEpset = {0};
|
||||
|
||||
|
@ -355,18 +386,29 @@ static int32_t mndSetCreateMnodeRedoActions(SMnode *pMnode, STrans *pTrans, SDno
|
|||
pIter = sdbFetch(pSdb, SDB_MNODE, pIter, (void **)&pMObj);
|
||||
if (pIter == NULL) break;
|
||||
|
||||
createReq.replicas[numOfReplicas].id = pMObj->id;
|
||||
createReq.replicas[numOfReplicas].port = pMObj->pDnode->port;
|
||||
memcpy(createReq.replicas[numOfReplicas].fqdn, pMObj->pDnode->fqdn, TSDB_FQDN_LEN);
|
||||
if(pMObj->role == TAOS_SYNC_ROLE_VOTER){
|
||||
createReq.replicas[numOfReplicas].id = pMObj->id;
|
||||
createReq.replicas[numOfReplicas].port = pMObj->pDnode->port;
|
||||
memcpy(createReq.replicas[numOfReplicas].fqdn, pMObj->pDnode->fqdn, TSDB_FQDN_LEN);
|
||||
numOfReplicas++;
|
||||
}
|
||||
else{
|
||||
createReq.learnerReplicas[numOfLearnerReplicas].id = pMObj->id;
|
||||
createReq.learnerReplicas[numOfLearnerReplicas].port = pMObj->pDnode->port;
|
||||
memcpy(createReq.learnerReplicas[numOfLearnerReplicas].fqdn, pMObj->pDnode->fqdn, TSDB_FQDN_LEN);
|
||||
numOfLearnerReplicas++;
|
||||
}
|
||||
|
||||
numOfReplicas++;
|
||||
sdbRelease(pSdb, pMObj);
|
||||
}
|
||||
|
||||
createReq.replica = numOfReplicas + 1;
|
||||
createReq.replicas[numOfReplicas].id = pDnode->id;
|
||||
createReq.replicas[numOfReplicas].port = pDnode->port;
|
||||
memcpy(createReq.replicas[numOfReplicas].fqdn, pDnode->fqdn, TSDB_FQDN_LEN);
|
||||
createReq.replica = numOfReplicas;
|
||||
createReq.learnerReplica = numOfLearnerReplicas + 1;
|
||||
createReq.learnerReplicas[numOfLearnerReplicas].id = pDnode->id;
|
||||
createReq.learnerReplicas[numOfLearnerReplicas].port = pDnode->port;
|
||||
memcpy(createReq.learnerReplicas[numOfLearnerReplicas].fqdn, pDnode->fqdn, TSDB_FQDN_LEN);
|
||||
|
||||
createReq.lastIndex = pObj->lastIndex;
|
||||
|
||||
createEpset.inUse = 0;
|
||||
createEpset.numOfEps = 1;
|
||||
|
@ -378,23 +420,78 @@ static int32_t mndSetCreateMnodeRedoActions(SMnode *pMnode, STrans *pTrans, SDno
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int32_t mndSetAlterMnodeTypeRedoActions(SMnode *pMnode, STrans *pTrans, SDnodeObj *pDnode, SMnodeObj *pObj) {
|
||||
SSdb *pSdb = pMnode->pSdb;
|
||||
void *pIter = NULL;
|
||||
SDAlterMnodeTypeReq alterReq = {0};
|
||||
SEpSet createEpset = {0};
|
||||
|
||||
while (1) {
|
||||
SMnodeObj *pMObj = NULL;
|
||||
pIter = sdbFetch(pSdb, SDB_MNODE, pIter, (void **)&pMObj);
|
||||
if (pIter == NULL) break;
|
||||
|
||||
if(pMObj->role == TAOS_SYNC_ROLE_VOTER){
|
||||
alterReq.replicas[alterReq.replica].id = pMObj->id;
|
||||
alterReq.replicas[alterReq.replica].port = pMObj->pDnode->port;
|
||||
memcpy(alterReq.replicas[alterReq.replica].fqdn, pMObj->pDnode->fqdn, TSDB_FQDN_LEN);
|
||||
alterReq.replica++;
|
||||
}
|
||||
else{
|
||||
alterReq.learnerReplicas[alterReq.learnerReplica].id = pMObj->id;
|
||||
alterReq.learnerReplicas[alterReq.learnerReplica].port = pMObj->pDnode->port;
|
||||
memcpy(alterReq.learnerReplicas[alterReq.learnerReplica].fqdn, pMObj->pDnode->fqdn, TSDB_FQDN_LEN);
|
||||
alterReq.learnerReplica++;
|
||||
}
|
||||
|
||||
sdbRelease(pSdb, pMObj);
|
||||
}
|
||||
|
||||
alterReq.replicas[alterReq.replica].id = pDnode->id;
|
||||
alterReq.replicas[alterReq.replica].port = pDnode->port;
|
||||
memcpy(alterReq.replicas[alterReq.replica].fqdn, pDnode->fqdn, TSDB_FQDN_LEN);
|
||||
alterReq.replica++;
|
||||
|
||||
alterReq.lastIndex = pObj->lastIndex;
|
||||
|
||||
createEpset.inUse = 0;
|
||||
createEpset.numOfEps = 1;
|
||||
createEpset.eps[0].port = pDnode->port;
|
||||
memcpy(createEpset.eps[0].fqdn, pDnode->fqdn, TSDB_FQDN_LEN);
|
||||
|
||||
if (mndBuildAlterMnodeTypeRedoAction(pTrans, &alterReq, &createEpset) != 0) return -1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int32_t mndCreateMnode(SMnode *pMnode, SRpcMsg *pReq, SDnodeObj *pDnode, SMCreateMnodeReq *pCreate) {
|
||||
int32_t code = -1;
|
||||
|
||||
SMnodeObj mnodeObj = {0};
|
||||
mnodeObj.id = pDnode->id;
|
||||
mnodeObj.createdTime = taosGetTimestampMs();
|
||||
mnodeObj.updateTime = mnodeObj.createdTime;
|
||||
|
||||
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_GLOBAL, pReq, "create-mnode");
|
||||
if (pTrans == NULL) goto _OVER;
|
||||
mndTransSetSerial(pTrans);
|
||||
mInfo("trans:%d, used to create mnode:%d", pTrans->id, pCreate->dnodeId);
|
||||
if (mndTrancCheckConflict(pMnode, pTrans) != 0) goto _OVER;
|
||||
|
||||
SMnodeObj mnodeObj = {0};
|
||||
mnodeObj.id = pDnode->id;
|
||||
mnodeObj.createdTime = taosGetTimestampMs();
|
||||
mnodeObj.updateTime = mnodeObj.createdTime;
|
||||
mnodeObj.role = TAOS_SYNC_ROLE_LEARNER;
|
||||
mnodeObj.lastIndex = pMnode->applied;
|
||||
|
||||
if (mndSetCreateMnodeRedoActions(pMnode, pTrans, pDnode, &mnodeObj) != 0) goto _OVER;
|
||||
if (mndSetCreateMnodeRedoLogs(pMnode, pTrans, &mnodeObj) != 0) goto _OVER;
|
||||
if (mndSetCreateMnodeCommitLogs(pMnode, pTrans, &mnodeObj) != 0) goto _OVER;
|
||||
|
||||
SMnodeObj mnodeLeaderObj = {0};
|
||||
mnodeLeaderObj.id = pDnode->id;
|
||||
mnodeLeaderObj.createdTime = taosGetTimestampMs();
|
||||
mnodeLeaderObj.updateTime = mnodeLeaderObj.createdTime;
|
||||
mnodeLeaderObj.role = TAOS_SYNC_ROLE_VOTER;
|
||||
mnodeLeaderObj.lastIndex = pMnode->applied + 1;
|
||||
|
||||
if (mndSetAlterMnodeTypeRedoActions(pMnode, pTrans, pDnode, &mnodeLeaderObj) != 0) goto _OVER;
|
||||
if (mndSetCreateMnodeCommitLogs(pMnode, pTrans, &mnodeLeaderObj) != 0) goto _OVER;
|
||||
if (mndTransPrepare(pMnode, pTrans) != 0) goto _OVER;
|
||||
|
||||
code = 0;
|
||||
|
@ -514,6 +611,7 @@ static int32_t mndSetDropMnodeRedoActions(SMnode *pMnode, STrans *pTrans, SDnode
|
|||
|
||||
int32_t mndSetDropMnodeInfoToTrans(SMnode *pMnode, STrans *pTrans, SMnodeObj *pObj, bool force) {
|
||||
if (pObj == NULL) return 0;
|
||||
pObj->lastIndex = pMnode->applied;
|
||||
if (mndSetDropMnodeRedoActions(pMnode, pTrans, pObj->pDnode, pObj, force) != 0) return -1;
|
||||
if (mndSetDropMnodeCommitLogs(pMnode, pTrans, pObj) != 0) return -1;
|
||||
return 0;
|
||||
|
@ -730,7 +828,8 @@ static void mndReloadSyncConfig(SMnode *pMnode) {
|
|||
void *pIter = NULL;
|
||||
int32_t updatingMnodes = 0;
|
||||
int32_t readyMnodes = 0;
|
||||
SSyncCfg cfg = {.myIndex = -1};
|
||||
SSyncCfg cfg = {.myIndex = -1, .lastIndex = 0,};
|
||||
SyncIndex maxIndex = 0;
|
||||
|
||||
while (1) {
|
||||
pIter = sdbFetchAll(pSdb, SDB_MNODE, pIter, (void **)&pObj, &objStatus, false);
|
||||
|
@ -745,26 +844,41 @@ static void mndReloadSyncConfig(SMnode *pMnode) {
|
|||
}
|
||||
|
||||
if (objStatus == SDB_STATUS_READY || objStatus == SDB_STATUS_CREATING) {
|
||||
SNodeInfo *pNode = &cfg.nodeInfo[cfg.replicaNum];
|
||||
SNodeInfo *pNode = &cfg.nodeInfo[cfg.totalReplicaNum];
|
||||
pNode->nodeId = pObj->pDnode->id;
|
||||
pNode->clusterId = mndGetClusterId(pMnode);
|
||||
pNode->nodePort = pObj->pDnode->port;
|
||||
pNode->nodeRole = pObj->role;
|
||||
tstrncpy(pNode->nodeFqdn, pObj->pDnode->fqdn, TSDB_FQDN_LEN);
|
||||
(void)tmsgUpdateDnodeInfo(&pNode->nodeId, &pNode->clusterId, pNode->nodeFqdn, &pNode->nodePort);
|
||||
mInfo("vgId:1, ep:%s:%u dnode:%d", pNode->nodeFqdn, pNode->nodePort, pNode->nodeId);
|
||||
if (pObj->pDnode->id == pMnode->selfDnodeId) {
|
||||
cfg.myIndex = cfg.replicaNum;
|
||||
cfg.myIndex = cfg.totalReplicaNum;
|
||||
}
|
||||
if(pNode->nodeRole == TAOS_SYNC_ROLE_VOTER){
|
||||
cfg.replicaNum++;
|
||||
}
|
||||
cfg.totalReplicaNum++;
|
||||
if(pObj->lastIndex > cfg.lastIndex){
|
||||
cfg.lastIndex = pObj->lastIndex;
|
||||
}
|
||||
cfg.replicaNum++;
|
||||
}
|
||||
|
||||
if (objStatus == SDB_STATUS_DROPPING) {
|
||||
if(pObj->lastIndex > cfg.lastIndex){
|
||||
cfg.lastIndex = pObj->lastIndex;
|
||||
}
|
||||
}
|
||||
|
||||
mInfo("vgId:1, mnode:%d, role:%d, lastIndex:%" PRId64, pObj->id, pObj->role, pObj->lastIndex);
|
||||
|
||||
sdbReleaseLock(pSdb, pObj, false);
|
||||
}
|
||||
|
||||
if (readyMnodes <= 0 || updatingMnodes <= 0) {
|
||||
mInfo("vgId:1, mnode sync not reconfig since readyMnodes:%d updatingMnodes:%d", readyMnodes, updatingMnodes);
|
||||
return;
|
||||
}
|
||||
//if (readyMnodes <= 0 || updatingMnodes <= 0) {
|
||||
// mInfo("vgId:1, mnode sync not reconfig since readyMnodes:%d updatingMnodes:%d", readyMnodes, updatingMnodes);
|
||||
// return;
|
||||
//}
|
||||
|
||||
if (cfg.myIndex == -1) {
|
||||
#if 1
|
||||
|
@ -777,12 +891,14 @@ static void mndReloadSyncConfig(SMnode *pMnode) {
|
|||
return;
|
||||
}
|
||||
|
||||
if (updatingMnodes > 0) {
|
||||
mInfo("vgId:1, mnode sync reconfig, replica:%d myIndex:%d", cfg.replicaNum, cfg.myIndex);
|
||||
for (int32_t i = 0; i < cfg.replicaNum; ++i) {
|
||||
if (pMnode->syncMgmt.sync > 0) {
|
||||
mInfo("vgId:1, mnode sync reconfig, totalReplica:%d replica:%d myIndex:%d",
|
||||
cfg.totalReplicaNum, cfg.replicaNum, cfg.myIndex);
|
||||
|
||||
for (int32_t i = 0; i < cfg.totalReplicaNum; ++i) {
|
||||
SNodeInfo *pNode = &cfg.nodeInfo[i];
|
||||
mInfo("vgId:1, index:%d, ep:%s:%u dnode:%d cluster:%" PRId64, i, pNode->nodeFqdn, pNode->nodePort, pNode->nodeId,
|
||||
pNode->clusterId);
|
||||
mInfo("vgId:1, index:%d, ep:%s:%u dnode:%d cluster:%" PRId64 " role:%d", i, pNode->nodeFqdn, pNode->nodePort,
|
||||
pNode->nodeId, pNode->clusterId, pNode->nodeRole);
|
||||
}
|
||||
|
||||
int32_t code = syncReconfig(pMnode->syncMgmt.sync, &cfg);
|
||||
|
|
|
@ -138,7 +138,7 @@ int32_t mndAddDispatcherToInnerTask(SMnode* pMnode, SStreamObj* pStream, SStream
|
|||
for (int32_t j = 0; j < sinkLvSize; j++) {
|
||||
SStreamTask* pLastLevelTask = taosArrayGetP(sinkLv, j);
|
||||
if (pLastLevelTask->nodeId == pVgInfo->vgId) {
|
||||
pVgInfo->taskId = pLastLevelTask->taskId;
|
||||
pVgInfo->taskId = pLastLevelTask->id.taskId;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -149,7 +149,7 @@ int32_t mndAddDispatcherToInnerTask(SMnode* pMnode, SStreamObj* pStream, SStream
|
|||
SArray* pArray = taosArrayGetP(pStream->tasks, 0);
|
||||
// one sink only
|
||||
SStreamTask* lastLevelTask = taosArrayGetP(pArray, 0);
|
||||
pTask->fixedEpDispatcher.taskId = lastLevelTask->taskId;
|
||||
pTask->fixedEpDispatcher.taskId = lastLevelTask->id.taskId;
|
||||
pTask->fixedEpDispatcher.nodeId = lastLevelTask->nodeId;
|
||||
pTask->fixedEpDispatcher.epSet = lastLevelTask->epSet;
|
||||
}
|
||||
|
@ -224,7 +224,7 @@ int32_t mndAddShuffleSinkTasksToStream(SMnode* pMnode, SStreamObj* pStream) {
|
|||
continue;
|
||||
}
|
||||
|
||||
SStreamTask* pTask = tNewSStreamTask(pStream->uid);
|
||||
SStreamTask* pTask = tNewStreamTask(pStream->uid);
|
||||
if (pTask == NULL) {
|
||||
sdbRelease(pSdb, pVgroup);
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
|
@ -260,7 +260,7 @@ int32_t mndAddShuffleSinkTasksToStream(SMnode* pMnode, SStreamObj* pStream) {
|
|||
|
||||
int32_t mndAddFixedSinkTaskToStream(SMnode* pMnode, SStreamObj* pStream) {
|
||||
SArray* tasks = taosArrayGetP(pStream->tasks, 0);
|
||||
SStreamTask* pTask = tNewSStreamTask(pStream->uid);
|
||||
SStreamTask* pTask = tNewStreamTask(pStream->uid);
|
||||
if (pTask == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
return -1;
|
||||
|
@ -350,12 +350,13 @@ int32_t mndScheduleStream(SMnode* pMnode, SStreamObj* pStream) {
|
|||
return -1;
|
||||
}
|
||||
|
||||
pInnerTask = tNewSStreamTask(pStream->uid);
|
||||
pInnerTask = tNewStreamTask(pStream->uid);
|
||||
if (pInnerTask == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
qDestroyQueryPlan(pPlan);
|
||||
return -1;
|
||||
}
|
||||
|
||||
pInnerTask->fillHistory = pStream->fillHistory;
|
||||
mndAddTaskToTaskSet(taskInnerLevel, pInnerTask);
|
||||
|
||||
|
@ -421,7 +422,7 @@ int32_t mndScheduleStream(SMnode* pMnode, SStreamObj* pStream) {
|
|||
continue;
|
||||
}
|
||||
|
||||
SStreamTask* pTask = tNewSStreamTask(pStream->uid);
|
||||
SStreamTask* pTask = tNewStreamTask(pStream->uid);
|
||||
if (pTask == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
sdbRelease(pSdb, pVgroup);
|
||||
|
@ -440,7 +441,7 @@ int32_t mndScheduleStream(SMnode* pMnode, SStreamObj* pStream) {
|
|||
pTask->dispatchMsgType = TDMT_STREAM_TASK_DISPATCH;
|
||||
pTask->outputType = TASK_OUTPUT__FIXED_DISPATCH;
|
||||
|
||||
pTask->fixedEpDispatcher.taskId = pInnerTask->taskId;
|
||||
pTask->fixedEpDispatcher.taskId = pInnerTask->id.taskId;
|
||||
pTask->fixedEpDispatcher.nodeId = pInnerTask->nodeId;
|
||||
pTask->fixedEpDispatcher.epSet = pInnerTask->epSet;
|
||||
|
||||
|
@ -460,7 +461,7 @@ int32_t mndScheduleStream(SMnode* pMnode, SStreamObj* pStream) {
|
|||
pEpInfo->childId = pTask->selfChildId;
|
||||
pEpInfo->epSet = pTask->epSet;
|
||||
pEpInfo->nodeId = pTask->nodeId;
|
||||
pEpInfo->taskId = pTask->taskId;
|
||||
pEpInfo->taskId = pTask->id.taskId;
|
||||
taosArrayPush(pInnerTask->childEpInfo, &pEpInfo);
|
||||
sdbRelease(pSdb, pVgroup);
|
||||
}
|
||||
|
@ -491,7 +492,7 @@ int32_t mndScheduleStream(SMnode* pMnode, SStreamObj* pStream) {
|
|||
continue;
|
||||
}
|
||||
|
||||
SStreamTask* pTask = tNewSStreamTask(pStream->uid);
|
||||
SStreamTask* pTask = tNewStreamTask(pStream->uid);
|
||||
if (pTask == NULL) {
|
||||
sdbRelease(pSdb, pVgroup);
|
||||
qDestroyQueryPlan(pPlan);
|
||||
|
|
|
@ -35,12 +35,12 @@
|
|||
|
||||
static int32_t mndStreamActionInsert(SSdb *pSdb, SStreamObj *pStream);
|
||||
static int32_t mndStreamActionDelete(SSdb *pSdb, SStreamObj *pStream);
|
||||
static int32_t mndStreamActionUpdate(SSdb *pSdb, SStreamObj *pStream, SStreamObj *pNewStream);
|
||||
static int32_t mndStreamActionUpdate(SSdb *pSdb, SStreamObj *pOldStream, SStreamObj *pNewStream);
|
||||
static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq);
|
||||
static int32_t mndProcessDropStreamReq(SRpcMsg *pReq);
|
||||
static int32_t mndProcessStreamCheckpointTmr(SRpcMsg *pReq);
|
||||
// static int32_t mndProcessStreamDoCheckpoint(SRpcMsg *pReq);
|
||||
/*static int32_t mndProcessRecoverStreamReq(SRpcMsg *pReq);*/
|
||||
static int32_t mndProcessStreamDoCheckpoint(SRpcMsg *pReq);
|
||||
static int32_t mndProcessRecoverStreamReq(SRpcMsg *pReq);
|
||||
static int32_t mndProcessStreamMetaReq(SRpcMsg *pReq);
|
||||
static int32_t mndGetStreamMeta(SRpcMsg *pReq, SShowObj *pShow, STableMetaRsp *pMeta);
|
||||
static int32_t mndRetrieveStream(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, int32_t rows);
|
||||
|
@ -418,7 +418,7 @@ FAIL:
|
|||
int32_t mndPersistTaskDeployReq(STrans *pTrans, const SStreamTask *pTask) {
|
||||
SEncoder encoder;
|
||||
tEncoderInit(&encoder, NULL, 0);
|
||||
tEncodeSStreamTask(&encoder, pTask);
|
||||
tEncodeStreamTask(&encoder, pTask);
|
||||
int32_t size = encoder.pos;
|
||||
int32_t tlen = sizeof(SMsgHead) + size;
|
||||
tEncoderClear(&encoder);
|
||||
|
@ -430,7 +430,7 @@ int32_t mndPersistTaskDeployReq(STrans *pTrans, const SStreamTask *pTask) {
|
|||
((SMsgHead *)buf)->vgId = htonl(pTask->nodeId);
|
||||
void *abuf = POINTER_SHIFT(buf, sizeof(SMsgHead));
|
||||
tEncoderInit(&encoder, abuf, size);
|
||||
tEncodeSStreamTask(&encoder, pTask);
|
||||
tEncodeStreamTask(&encoder, pTask);
|
||||
tEncoderClear(&encoder);
|
||||
|
||||
STransAction action = {0};
|
||||
|
@ -601,7 +601,7 @@ static int32_t mndPersistTaskDropReq(STrans *pTrans, SStreamTask *pTask) {
|
|||
return -1;
|
||||
}
|
||||
pReq->head.vgId = htonl(pTask->nodeId);
|
||||
pReq->taskId = pTask->taskId;
|
||||
pReq->taskId = pTask->id.taskId;
|
||||
STransAction action = {0};
|
||||
memcpy(&action.epSet, &pTask->epSet, sizeof(SEpSet));
|
||||
action.pCont = pReq;
|
||||
|
@ -1209,7 +1209,7 @@ static int32_t mndRetrieveStreamTask(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock
|
|||
|
||||
// task id
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataSetVal(pColInfo, numOfRows, (const char *)&pTask->taskId, false);
|
||||
colDataSetVal(pColInfo, numOfRows, (const char *)&pTask->id.taskId, false);
|
||||
|
||||
// node type
|
||||
char nodeType[20 + VARSTR_HEADER_SIZE] = {0};
|
||||
|
|
|
@ -671,7 +671,7 @@ static int32_t mndProcessRebalanceReq(SRpcMsg *pMsg) {
|
|||
// possibly no vg is changed
|
||||
// when each topic is re-balanced, issue an trans to save the results in sdb.
|
||||
if (mndPersistRebResult(pMnode, pMsg, &rebOutput) < 0) {
|
||||
mError("mq re-balance persist output error, possibly vnode splitted or dropped");
|
||||
mError("mq re-balance persist output error, possibly vnode splitted or dropped,msg:%s", terrstr());
|
||||
}
|
||||
|
||||
taosArrayDestroy(rebOutput.newConsumers);
|
||||
|
|
|
@ -237,6 +237,23 @@ static void mndBecomeFollower(const SSyncFSM *pFsm) {
|
|||
taosThreadMutexUnlock(&pMgmt->lock);
|
||||
}
|
||||
|
||||
static void mndBecomeLearner(const SSyncFSM *pFsm) {
|
||||
SMnode *pMnode = pFsm->data;
|
||||
SSyncMgmt *pMgmt = &pMnode->syncMgmt;
|
||||
mInfo("vgId:1, become learner");
|
||||
|
||||
taosThreadMutexLock(&pMgmt->lock);
|
||||
if (pMgmt->transId != 0) {
|
||||
mInfo("vgId:1, become learner and post sem, trans:%d, failed to propose since not leader", pMgmt->transId);
|
||||
pMgmt->transId = 0;
|
||||
pMgmt->transSec = 0;
|
||||
pMgmt->transSeq = 0;
|
||||
pMgmt->errCode = TSDB_CODE_SYN_NOT_LEADER;
|
||||
tsem_post(&pMgmt->syncSem);
|
||||
}
|
||||
taosThreadMutexUnlock(&pMgmt->lock);
|
||||
}
|
||||
|
||||
static void mndBecomeLeader(const SSyncFSM *pFsm) {
|
||||
mInfo("vgId:1, become leader");
|
||||
SMnode *pMnode = pFsm->data;
|
||||
|
@ -278,6 +295,7 @@ SSyncFSM *mndSyncMakeFsm(SMnode *pMnode) {
|
|||
pFsm->FpReConfigCb = NULL;
|
||||
pFsm->FpBecomeLeaderCb = mndBecomeLeader;
|
||||
pFsm->FpBecomeFollowerCb = mndBecomeFollower;
|
||||
pFsm->FpBecomeLearnerCb = mndBecomeLearner;
|
||||
pFsm->FpGetSnapshot = mndSyncGetSnapshot;
|
||||
pFsm->FpGetSnapshotInfo = mndSyncGetSnapshotInfo;
|
||||
pFsm->FpSnapshotStartRead = mndSnapshotStartRead;
|
||||
|
@ -317,13 +335,16 @@ int32_t mndInitSync(SMnode *pMnode) {
|
|||
|
||||
mInfo("vgId:1, start to open sync, replica:%d selfIndex:%d", pMgmt->numOfReplicas, pMgmt->selfIndex);
|
||||
SSyncCfg *pCfg = &syncInfo.syncCfg;
|
||||
pCfg->totalReplicaNum = pMgmt->numOfTotalReplicas;
|
||||
pCfg->replicaNum = pMgmt->numOfReplicas;
|
||||
pCfg->myIndex = pMgmt->selfIndex;
|
||||
for (int32_t i = 0; i < pMgmt->numOfReplicas; ++i) {
|
||||
pCfg->lastIndex = pMgmt->lastIndex;
|
||||
for (int32_t i = 0; i < pMgmt->numOfTotalReplicas; ++i) {
|
||||
SNodeInfo *pNode = &pCfg->nodeInfo[i];
|
||||
pNode->nodeId = pMgmt->replicas[i].id;
|
||||
pNode->nodePort = pMgmt->replicas[i].port;
|
||||
tstrncpy(pNode->nodeFqdn, pMgmt->replicas[i].fqdn, sizeof(pNode->nodeFqdn));
|
||||
pNode->nodeRole = pMgmt->nodeRoles[i];
|
||||
(void)tmsgUpdateDnodeInfo(&pNode->nodeId, &pNode->clusterId, pNode->nodeFqdn, &pNode->nodePort);
|
||||
mInfo("vgId:1, index:%d ep:%s:%u dnode:%d cluster:%" PRId64, i, pNode->nodeFqdn, pNode->nodePort, pNode->nodeId,
|
||||
pNode->clusterId);
|
||||
|
|
|
@ -62,6 +62,7 @@ int32_t mndInitVgroup(SMnode *pMnode) {
|
|||
mndSetMsgHandle(pMnode, TDMT_VND_COMPACT_RSP, mndTransProcessRsp);
|
||||
mndSetMsgHandle(pMnode, TDMT_VND_DISABLE_WRITE_RSP, mndTransProcessRsp);
|
||||
mndSetMsgHandle(pMnode, TDMT_SYNC_FORCE_FOLLOWER_RSP, mndTransProcessRsp);
|
||||
mndSetMsgHandle(pMnode, TDMT_DND_ALTER_VNODE_TYPE_RSP, mndTransProcessRsp);
|
||||
|
||||
mndSetMsgHandle(pMnode, TDMT_MND_REDISTRIBUTE_VGROUP, mndProcessRedistributeVgroupMsg);
|
||||
mndSetMsgHandle(pMnode, TDMT_MND_SPLIT_VGROUP, mndProcessSplitVgroupMsg);
|
||||
|
@ -203,7 +204,7 @@ static int32_t mndVgroupActionUpdate(SSdb *pSdb, SVgObj *pOld, SVgObj *pNew) {
|
|||
pNew->compStorage = pOld->compStorage;
|
||||
pNew->pointsWritten = pOld->pointsWritten;
|
||||
pNew->compact = pOld->compact;
|
||||
memcpy(pOld->vnodeGid, pNew->vnodeGid, TSDB_MAX_REPLICA * sizeof(SVnodeGid));
|
||||
memcpy(pOld->vnodeGid, pNew->vnodeGid, (TSDB_MAX_REPLICA + TSDB_MAX_LEARNER_REPLICA) * sizeof(SVnodeGid));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -244,8 +245,10 @@ void *mndBuildCreateVnodeReq(SMnode *pMnode, SDnodeObj *pDnode, SDbObj *pDb, SVg
|
|||
createReq.compression = pDb->cfg.compression;
|
||||
createReq.strict = pDb->cfg.strict;
|
||||
createReq.cacheLast = pDb->cfg.cacheLast;
|
||||
createReq.replica = pVgroup->replica;
|
||||
createReq.replica = 0;
|
||||
createReq.learnerReplica = 0;
|
||||
createReq.selfIndex = -1;
|
||||
createReq.learnerSelfIndex = -1;
|
||||
createReq.hashBegin = pVgroup->hashBegin;
|
||||
createReq.hashEnd = pVgroup->hashEnd;
|
||||
createReq.hashMethod = pDb->cfg.hashMethod;
|
||||
|
@ -263,7 +266,15 @@ void *mndBuildCreateVnodeReq(SMnode *pMnode, SDnodeObj *pDnode, SDbObj *pDb, SVg
|
|||
createReq.tsdbPageSize = pDb->cfg.tsdbPageSize;
|
||||
|
||||
for (int32_t v = 0; v < pVgroup->replica; ++v) {
|
||||
SReplica *pReplica = &createReq.replicas[v];
|
||||
SReplica *pReplica = NULL;
|
||||
|
||||
if(pVgroup->vnodeGid[v].nodeRole == TAOS_SYNC_ROLE_VOTER){
|
||||
pReplica = &createReq.replicas[createReq.replica];
|
||||
}
|
||||
else{
|
||||
pReplica = &createReq.learnerReplicas[createReq.learnerReplica];
|
||||
}
|
||||
|
||||
SVnodeGid *pVgid = &pVgroup->vnodeGid[v];
|
||||
SDnodeObj *pVgidDnode = mndAcquireDnode(pMnode, pVgid->dnodeId);
|
||||
if (pVgidDnode == NULL) {
|
||||
|
@ -275,21 +286,40 @@ void *mndBuildCreateVnodeReq(SMnode *pMnode, SDnodeObj *pDnode, SDbObj *pDb, SVg
|
|||
memcpy(pReplica->fqdn, pVgidDnode->fqdn, TSDB_FQDN_LEN);
|
||||
mndReleaseDnode(pMnode, pVgidDnode);
|
||||
|
||||
if (pDnode->id == pVgid->dnodeId) {
|
||||
createReq.selfIndex = v;
|
||||
if(pVgroup->vnodeGid[v].nodeRole == TAOS_SYNC_ROLE_VOTER){
|
||||
if (pDnode->id == pVgid->dnodeId) {
|
||||
createReq.selfIndex = createReq.replica;
|
||||
}
|
||||
}
|
||||
else{
|
||||
if (pDnode->id == pVgid->dnodeId) {
|
||||
createReq.learnerSelfIndex = createReq.learnerReplica;
|
||||
}
|
||||
}
|
||||
|
||||
if(pVgroup->vnodeGid[v].nodeRole == TAOS_SYNC_ROLE_VOTER){
|
||||
createReq.replica++;
|
||||
}
|
||||
else{
|
||||
createReq.learnerReplica++;
|
||||
}
|
||||
}
|
||||
|
||||
if (createReq.selfIndex == -1) {
|
||||
if (createReq.selfIndex == -1 && createReq.learnerSelfIndex == -1) {
|
||||
terrno = TSDB_CODE_APP_ERROR;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
mInfo("vgId:%d, build create vnode req, replica:%d selfIndex:%d strict:%d", createReq.vgId, createReq.replica,
|
||||
createReq.selfIndex, createReq.strict);
|
||||
mInfo("vgId:%d, build create vnode req, replica:%d selfIndex:%d learnerReplica:%d learnerSelfIndex:%d strict:%d",
|
||||
createReq.vgId, createReq.replica, createReq.selfIndex, createReq.learnerReplica,
|
||||
createReq.learnerReplica, createReq.strict);
|
||||
for (int32_t i = 0; i < createReq.replica; ++i) {
|
||||
mInfo("vgId:%d, replica:%d ep:%s:%u", createReq.vgId, i, createReq.replicas[i].fqdn, createReq.replicas[i].port);
|
||||
}
|
||||
for (int32_t i = 0; i < createReq.learnerReplica; ++i) {
|
||||
mInfo("vgId:%d, replica:%d ep:%s:%u", createReq.vgId, i, createReq.learnerReplicas[i].fqdn,
|
||||
createReq.learnerReplicas[i].port);
|
||||
}
|
||||
|
||||
int32_t contLen = tSerializeSCreateVnodeReq(NULL, 0, &createReq);
|
||||
if (contLen < 0) {
|
||||
|
@ -356,12 +386,24 @@ static void *mndBuildAlterVnodeReplicaReq(SMnode *pMnode, SDbObj *pDb, SVgObj *p
|
|||
SAlterVnodeReplicaReq alterReq = {
|
||||
.vgId = pVgroup->vgId,
|
||||
.strict = pDb->cfg.strict,
|
||||
.replica = pVgroup->replica,
|
||||
.replica = 0,
|
||||
.learnerReplica = 0,
|
||||
.selfIndex = -1,
|
||||
.learnerSelfIndex = -1,
|
||||
};
|
||||
|
||||
for (int32_t v = 0; v < pVgroup->replica; ++v) {
|
||||
SReplica *pReplica = &alterReq.replicas[v];
|
||||
SReplica *pReplica = NULL;
|
||||
|
||||
if(pVgroup->vnodeGid[v].nodeRole == TAOS_SYNC_ROLE_VOTER){
|
||||
pReplica = &alterReq.replicas[alterReq.replica];
|
||||
alterReq.replica++;
|
||||
}
|
||||
else{
|
||||
pReplica = &alterReq.learnerReplicas[alterReq.learnerReplica];
|
||||
alterReq.learnerReplica++;
|
||||
}
|
||||
|
||||
SVnodeGid *pVgid = &pVgroup->vnodeGid[v];
|
||||
SDnodeObj *pVgidDnode = mndAcquireDnode(pMnode, pVgid->dnodeId);
|
||||
if (pVgidDnode == NULL) return NULL;
|
||||
|
@ -371,18 +413,30 @@ static void *mndBuildAlterVnodeReplicaReq(SMnode *pMnode, SDbObj *pDb, SVgObj *p
|
|||
memcpy(pReplica->fqdn, pVgidDnode->fqdn, TSDB_FQDN_LEN);
|
||||
mndReleaseDnode(pMnode, pVgidDnode);
|
||||
|
||||
if (dnodeId == pVgid->dnodeId) {
|
||||
alterReq.selfIndex = v;
|
||||
if(pVgroup->vnodeGid[v].nodeRole == TAOS_SYNC_ROLE_VOTER){
|
||||
if (dnodeId == pVgid->dnodeId) {
|
||||
alterReq.selfIndex = v;
|
||||
}
|
||||
}
|
||||
else{
|
||||
if (dnodeId == pVgid->dnodeId) {
|
||||
alterReq.learnerSelfIndex = v;
|
||||
}
|
||||
}
|
||||
}
|
||||
alterReq.replica = pVgroup->replica;
|
||||
mInfo("vgId:%d, build alter vnode req, replica:%d selfIndex:%d strict:%d", alterReq.vgId, alterReq.replica,
|
||||
alterReq.selfIndex, alterReq.strict);
|
||||
|
||||
mInfo("vgId:%d, build alter vnode req, replica:%d selfIndex:%d learnerReplica:%d learnerSelfIndex:%d strict:%d",
|
||||
alterReq.vgId, alterReq.replica, alterReq.selfIndex, alterReq.learnerReplica,
|
||||
alterReq.learnerSelfIndex, alterReq.strict);
|
||||
for (int32_t i = 0; i < alterReq.replica; ++i) {
|
||||
mInfo("vgId:%d, replica:%d ep:%s:%u", alterReq.vgId, i, alterReq.replicas[i].fqdn, alterReq.replicas[i].port);
|
||||
}
|
||||
for (int32_t i = 0; i < alterReq.learnerReplica; ++i) {
|
||||
mInfo("vgId:%d, learnerReplica:%d ep:%s:%u", alterReq.vgId, i,
|
||||
alterReq.learnerReplicas[i].fqdn, alterReq.learnerReplicas[i].port);
|
||||
}
|
||||
|
||||
if (alterReq.selfIndex == -1) {
|
||||
if (alterReq.selfIndex == -1 && alterReq.learnerSelfIndex == -1) {
|
||||
terrno = TSDB_CODE_APP_ERROR;
|
||||
return NULL;
|
||||
}
|
||||
|
@ -1194,6 +1248,30 @@ int32_t mndAddAlterVnodeReplicaAction(SMnode *pMnode, STrans *pTrans, SDbObj *pD
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t mndAddAlterVnodeTypeAction(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup, int32_t dnodeId) {
|
||||
SDnodeObj *pDnode = mndAcquireDnode(pMnode, dnodeId);
|
||||
if (pDnode == NULL) return -1;
|
||||
|
||||
STransAction action = {0};
|
||||
action.epSet = mndGetDnodeEpset(pDnode);
|
||||
mndReleaseDnode(pMnode, pDnode);
|
||||
|
||||
int32_t contLen = 0;
|
||||
void *pReq = mndBuildAlterVnodeReplicaReq(pMnode, pDb, pVgroup, dnodeId, &contLen);
|
||||
if (pReq == NULL) return -1;
|
||||
|
||||
action.pCont = pReq;
|
||||
action.contLen = contLen;
|
||||
action.msgType = TDMT_DND_ALTER_VNODE_TYPE;
|
||||
|
||||
if (mndTransAppendRedoAction(pTrans, &action) != 0) {
|
||||
taosMemoryFree(pReq);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int32_t mndAddDisableVnodeWriteAction(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup,
|
||||
int32_t dnodeId) {
|
||||
SDnodeObj *pDnode = mndAcquireDnode(pMnode, dnodeId);
|
||||
|
@ -1953,18 +2031,33 @@ int32_t mndBuildAlterVgroupAction(SMnode *pMnode, STrans *pTrans, SDbObj *pOldDb
|
|||
mInfo("db:%s, vgId:%d, will add 2 vnodes, vn:0 dnode:%d", pVgroup->dbName, pVgroup->vgId,
|
||||
pVgroup->vnodeGid[0].dnodeId);
|
||||
|
||||
//add first
|
||||
if (mndAddVnodeToVgroup(pMnode, pTrans, &newVgroup, pArray) != 0) return -1;
|
||||
|
||||
newVgroup.vnodeGid[0].nodeRole = TAOS_SYNC_ROLE_VOTER;
|
||||
newVgroup.vnodeGid[1].nodeRole = TAOS_SYNC_ROLE_LEARNER;
|
||||
if (mndAddAlterVnodeReplicaAction(pMnode, pTrans, pNewDb, &newVgroup, newVgroup.vnodeGid[0].dnodeId) != 0)
|
||||
return -1;
|
||||
|
||||
if (mndAddCreateVnodeAction(pMnode, pTrans, pNewDb, &newVgroup, &newVgroup.vnodeGid[1]) != 0) return -1;
|
||||
newVgroup.vnodeGid[1].nodeRole = TAOS_SYNC_ROLE_VOTER;
|
||||
if (mndAddAlterVnodeTypeAction(pMnode, pTrans, pNewDb, &newVgroup, newVgroup.vnodeGid[1].dnodeId) != 0)
|
||||
return -1;
|
||||
|
||||
if (mndAddAlterVnodeConfirmAction(pMnode, pTrans, pNewDb, &newVgroup) != 0) return -1;
|
||||
|
||||
//add second
|
||||
if (mndAddVnodeToVgroup(pMnode, pTrans, &newVgroup, pArray) != 0) return -1;
|
||||
newVgroup.vnodeGid[0].nodeRole = TAOS_SYNC_ROLE_VOTER;
|
||||
newVgroup.vnodeGid[1].nodeRole = TAOS_SYNC_ROLE_VOTER;
|
||||
newVgroup.vnodeGid[2].nodeRole = TAOS_SYNC_ROLE_VOTER;
|
||||
|
||||
if (mndAddAlterVnodeReplicaAction(pMnode, pTrans, pNewDb, &newVgroup, newVgroup.vnodeGid[0].dnodeId) != 0)
|
||||
return -1;
|
||||
if (mndAddAlterVnodeReplicaAction(pMnode, pTrans, pNewDb, &newVgroup, newVgroup.vnodeGid[1].dnodeId) != 0)
|
||||
return -1;
|
||||
if (mndAddCreateVnodeAction(pMnode, pTrans, pNewDb, &newVgroup, &newVgroup.vnodeGid[2]) != 0) return -1;
|
||||
|
||||
if (mndAddAlterVnodeConfirmAction(pMnode, pTrans, pNewDb, &newVgroup) != 0) return -1;
|
||||
} else if (newVgroup.replica == 3 && pNewDb->cfg.replications == 1) {
|
||||
mInfo("db:%s, vgId:%d, will remove 2 vnodes, vn:0 dnode:%d vn:1 dnode:%d vn:2 dnode:%d", pVgroup->dbName,
|
||||
|
|
|
@ -32,6 +32,7 @@ void sndEnqueueStreamDispatch(SSnode *pSnode, SRpcMsg *pMsg) {
|
|||
tDecoderClear(&decoder);
|
||||
goto FAIL;
|
||||
}
|
||||
|
||||
tDecoderClear(&decoder);
|
||||
|
||||
int32_t taskId = req.taskId;
|
||||
|
@ -65,7 +66,7 @@ int32_t sndExpandTask(SSnode *pSnode, SStreamTask *pTask, int64_t ver) {
|
|||
ASSERT(taosArrayGetSize(pTask->childEpInfo) != 0);
|
||||
|
||||
pTask->refCnt = 1;
|
||||
pTask->schedStatus = TASK_SCHED_STATUS__INACTIVE;
|
||||
pTask->status.schedStatus = TASK_SCHED_STATUS__INACTIVE;
|
||||
pTask->inputQueue = streamQueueOpen();
|
||||
pTask->outputQueue = streamQueueOpen();
|
||||
|
||||
|
@ -76,21 +77,19 @@ int32_t sndExpandTask(SSnode *pSnode, SStreamTask *pTask, int64_t ver) {
|
|||
pTask->inputStatus = TASK_INPUT_STATUS__NORMAL;
|
||||
pTask->outputStatus = TASK_OUTPUT_STATUS__NORMAL;
|
||||
pTask->pMsgCb = &pSnode->msgCb;
|
||||
pTask->startVer = ver;
|
||||
pTask->chkInfo.version = ver;
|
||||
pTask->pMeta = pSnode->pMeta;
|
||||
|
||||
pTask->pState = streamStateOpen(pSnode->path, pTask, false, -1, -1);
|
||||
if (pTask->pState == NULL) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
SReadHandle mgHandle = {
|
||||
.vnode = NULL,
|
||||
.numOfVgroups = (int32_t)taosArrayGetSize(pTask->childEpInfo),
|
||||
.pStateBackend = pTask->pState,
|
||||
};
|
||||
int32_t numOfChildEp = taosArrayGetSize(pTask->childEpInfo);
|
||||
SReadHandle mgHandle = { .vnode = NULL, .numOfVgroups = numOfChildEp, .pStateBackend = pTask->pState };
|
||||
|
||||
pTask->exec.executor = qCreateStreamExecTaskInfo(pTask->exec.qmsg, &mgHandle, 0);
|
||||
ASSERT(pTask->exec.executor);
|
||||
pTask->exec.pExecutor = qCreateStreamExecTaskInfo(pTask->exec.qmsg, &mgHandle, 0);
|
||||
ASSERT(pTask->exec.pExecutor);
|
||||
|
||||
streamSetupTrigger(pTask);
|
||||
return 0;
|
||||
|
@ -140,9 +139,10 @@ int32_t sndProcessTaskDeployReq(SSnode *pSnode, char *msg, int32_t msgLen) {
|
|||
if (pTask == NULL) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
SDecoder decoder;
|
||||
tDecoderInit(&decoder, (uint8_t *)msg, msgLen);
|
||||
code = tDecodeSStreamTask(&decoder, pTask);
|
||||
code = tDecodeStreamTask(&decoder, pTask);
|
||||
if (code < 0) {
|
||||
tDecoderClear(&decoder);
|
||||
taosMemoryFree(pTask);
|
||||
|
@ -153,7 +153,7 @@ int32_t sndProcessTaskDeployReq(SSnode *pSnode, char *msg, int32_t msgLen) {
|
|||
ASSERT(pTask->taskLevel == TASK_LEVEL__AGG);
|
||||
|
||||
// 2.save task
|
||||
code = streamMetaAddTask(pSnode->pMeta, -1, pTask);
|
||||
code = streamMetaAddDeployedTask(pSnode->pMeta, -1, pTask);
|
||||
if (code < 0) {
|
||||
return -1;
|
||||
}
|
||||
|
|
|
@ -57,6 +57,7 @@ target_sources(
|
|||
|
||||
# tq
|
||||
"src/tq/tq.c"
|
||||
"src/tq/tqUtil.c"
|
||||
"src/tq/tqScan.c"
|
||||
"src/tq/tqMeta.c"
|
||||
"src/tq/tqRead.c"
|
||||
|
@ -64,6 +65,7 @@ target_sources(
|
|||
"src/tq/tqPush.c"
|
||||
"src/tq/tqSink.c"
|
||||
"src/tq/tqCommit.c"
|
||||
"src/tq/tqRestore.c"
|
||||
"src/tq/tqSnapshot.c"
|
||||
"src/tq/tqOffsetSnapshot.c"
|
||||
)
|
||||
|
|
|
@ -68,6 +68,7 @@ void vnodeGetSnapshot(SVnode *pVnode, SSnapshot *pSnapshot);
|
|||
void vnodeGetInfo(SVnode *pVnode, const char **dbname, int32_t *vgId);
|
||||
int32_t vnodeProcessCreateTSma(SVnode *pVnode, void *pCont, uint32_t contLen);
|
||||
int32_t vnodeGetAllTableList(SVnode *pVnode, uint64_t uid, SArray *list);
|
||||
int32_t vnodeIsCatchUp(SVnode *pVnode);
|
||||
|
||||
int32_t vnodeGetCtbIdList(SVnode *pVnode, int64_t suid, SArray *list);
|
||||
int32_t vnodeGetCtbIdListByFilter(SVnode *pVnode, int64_t suid, SArray *list, bool (*filter)(void *arg), void *arg);
|
||||
|
@ -92,7 +93,7 @@ int32_t vnodeProcessQueryMsg(SVnode *pVnode, SRpcMsg *pMsg);
|
|||
int32_t vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo);
|
||||
void vnodeProposeWriteMsg(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs);
|
||||
void vnodeApplyWriteMsg(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs);
|
||||
void vnodeProposeCommitOnNeed(SVnode *pVnode);
|
||||
void vnodeProposeCommitOnNeed(SVnode *pVnode, bool atExit);
|
||||
|
||||
// meta
|
||||
typedef struct SMeta SMeta; // todo: remove
|
||||
|
@ -256,15 +257,16 @@ void tqCloseReader(STqReader *);
|
|||
|
||||
void tqReaderSetColIdList(STqReader *pReader, SArray *pColIdList);
|
||||
int32_t tqReaderSetTbUidList(STqReader *pReader, const SArray *tbUidList);
|
||||
int32_t tqReaderAddTbUidList(STqReader *pReader, const SArray *tbUidList);
|
||||
int32_t tqReaderAddTbUidList(STqReader *pReader, const SArray *pTableUidList);
|
||||
int32_t tqReaderRemoveTbUidList(STqReader *pReader, const SArray *tbUidList);
|
||||
|
||||
int32_t tqSeekVer(STqReader *pReader, int64_t ver, const char *id);
|
||||
void tqNextBlock(STqReader *pReader, SFetchRet *ret);
|
||||
void tqNextBlock(STqReader *pReader, SFetchRet *ret);
|
||||
int32_t extractSubmitMsgFromWal(SWalReader *pReader, SPackedData *pPackedData);
|
||||
|
||||
int32_t tqReaderSetSubmitReq2(STqReader *pReader, void *msgStr, int32_t msgLen, int64_t ver);
|
||||
int32_t tqReaderSetSubmitMsg(STqReader *pReader, void *msgStr, int32_t msgLen, int64_t ver);
|
||||
// int32_t tqReaderSetDataMsg(STqReader *pReader, const SSubmitReq *pMsg, int64_t ver);
|
||||
bool tqNextDataBlock2(STqReader *pReader);
|
||||
bool tqNextDataBlock(STqReader *pReader);
|
||||
bool tqNextDataBlockFilterOut2(STqReader *pReader, SHashObj *filterOutUids);
|
||||
int32_t tqRetrieveDataBlock2(SSDataBlock *pBlock, STqReader *pReader, SSubmitTbData **pSubmitTbDataRet);
|
||||
int32_t tqRetrieveTaosxBlock2(STqReader *pReader, SArray *blocks, SArray *schemas, SSubmitTbData **pSubmitTbDataRet);
|
||||
|
|
|
@ -80,7 +80,7 @@ typedef struct {
|
|||
|
||||
typedef struct {
|
||||
int8_t subType;
|
||||
STqReader* pExecReader;
|
||||
STqReader* pTqReader;
|
||||
qTaskInfo_t task;
|
||||
union {
|
||||
STqExecCol execCol;
|
||||
|
@ -128,6 +128,10 @@ typedef struct {
|
|||
tmr_h timer;
|
||||
} STqMgmt;
|
||||
|
||||
typedef struct {
|
||||
int32_t size;
|
||||
} STqOffsetHead;
|
||||
|
||||
static STqMgmt tqMgmt = {0};
|
||||
|
||||
int32_t tEncodeSTqHandle(SEncoder* pEncoder, const STqHandle* pHandle);
|
||||
|
@ -154,10 +158,6 @@ int32_t tqMetaSaveCheckInfo(STQ* pTq, const char* key, const void* value, int32_
|
|||
int32_t tqMetaDeleteCheckInfo(STQ* pTq, const char* key);
|
||||
int32_t tqMetaRestoreCheckInfo(STQ* pTq);
|
||||
|
||||
typedef struct {
|
||||
int32_t size;
|
||||
} STqOffsetHead;
|
||||
|
||||
STqOffsetStore* tqOffsetOpen(STQ* pTq);
|
||||
void tqOffsetClose(STqOffsetStore*);
|
||||
STqOffset* tqOffsetRead(STqOffsetStore* pStore, const char* subscribeKey);
|
||||
|
@ -176,6 +176,18 @@ int32_t tqOffsetRestoreFromFile(STqOffsetStore* pStore, const char* fname);
|
|||
|
||||
// tqStream
|
||||
int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t ver);
|
||||
int32_t tqStreamTasksScanWal(STQ* pTq);
|
||||
|
||||
// tq util
|
||||
void createStreamTaskOffsetKey(char* dst, uint64_t streamId, uint32_t taskId);
|
||||
int32_t tqAddInputBlockNLaunchTask(SStreamTask* pTask, SStreamQueueItem* pQueueItem, int64_t ver);
|
||||
int32_t launchTaskForWalBlock(SStreamTask* pTask, SFetchRet* pRet, STqOffset* pOffset);
|
||||
int32_t tqExtractDataForMq(STQ* pTq, STqHandle* pHandle, const SMqPollReq* pRequest, SRpcMsg* pMsg);
|
||||
|
||||
void doSaveTaskOffset(STqOffsetStore* pOffsetStore, const char* pKey, int64_t ver);
|
||||
void saveOffsetForAllTasks(STQ* pTq, int64_t ver);
|
||||
void initOffsetForAllRestoreTasks(STQ* pTq);
|
||||
int32_t transferToWalReadTask(SStreamMeta* pStreamMeta, SArray* pTaskList);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -96,8 +96,7 @@ int32_t vnodeGetBatchMeta(SVnode* pVnode, SRpcMsg* pMsg);
|
|||
|
||||
// vnodeCommit.c
|
||||
int32_t vnodeBegin(SVnode* pVnode);
|
||||
int32_t vnodeShouldCommit(SVnode* pVnode);
|
||||
void vnodeUpdCommitSched(SVnode* pVnode);
|
||||
int32_t vnodeShouldCommit(SVnode* pVnode, bool atExit);
|
||||
void vnodeRollback(SVnode* pVnode);
|
||||
int32_t vnodeSaveInfo(const char* dir, const SVnodeInfo* pCfg);
|
||||
int32_t vnodeCommitInfo(const char* dir);
|
||||
|
@ -115,7 +114,6 @@ void vnodeSyncClose(SVnode* pVnode);
|
|||
void vnodeRedirectRpcMsg(SVnode* pVnode, SRpcMsg* pMsg, int32_t code);
|
||||
bool vnodeIsLeader(SVnode* pVnode);
|
||||
bool vnodeIsRoleLeader(SVnode* pVnode);
|
||||
int vnodeShouldCommit(SVnode* pVnode);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -192,9 +192,10 @@ void tqCleanUp();
|
|||
STQ* tqOpen(const char* path, SVnode* pVnode);
|
||||
void tqClose(STQ*);
|
||||
int tqPushMsg(STQ*, void* msg, int32_t msgLen, tmsg_t msgType, int64_t ver);
|
||||
int tqRegisterPushEntry(STQ* pTq, void* pHandle, const SMqPollReq* pRequest, SRpcMsg* pRpcMsg, SMqDataRsp* pDataRsp,
|
||||
int tqRegisterPushHandle(STQ* pTq, void* pHandle, const SMqPollReq* pRequest, SRpcMsg* pRpcMsg, SMqDataRsp* pDataRsp,
|
||||
int32_t type);
|
||||
int tqUnregisterPushEntry(STQ* pTq, const char* pKey, int32_t keyLen, uint64_t consumerId, bool rspConsumer);
|
||||
int tqUnregisterPushHandle(STQ* pTq, const char* pKey, int32_t keyLen, uint64_t consumerId, bool rspConsumer);
|
||||
int tqStartStreamTasks(STQ* pTq); // restore all stream tasks after vnode launching completed.
|
||||
|
||||
int tqCommit(STQ*);
|
||||
int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd);
|
||||
|
@ -377,7 +378,6 @@ struct SVnode {
|
|||
STQ* pTq;
|
||||
SSink* pSink;
|
||||
tsem_t canCommit;
|
||||
SVCommitSched commitSched;
|
||||
int64_t sync;
|
||||
TdThreadMutex lock;
|
||||
bool blocked;
|
||||
|
@ -386,9 +386,6 @@ struct SVnode {
|
|||
int32_t blockSec;
|
||||
int64_t blockSeq;
|
||||
SQHandle* pQuery;
|
||||
#if 0
|
||||
SRpcHandleInfo blockInfo;
|
||||
#endif
|
||||
};
|
||||
|
||||
#define TD_VID(PVNODE) ((PVNODE)->config.vgId)
|
||||
|
|
|
@ -531,10 +531,11 @@ static void freePayload(const void* key, size_t keyLen, void* value) {
|
|||
return;
|
||||
}
|
||||
|
||||
SHashObj* pHashObj = (SHashObj*)p[0];
|
||||
SHashObj* pHashObj = (SHashObj*)p[0];
|
||||
|
||||
STagFilterResEntry** pEntry = taosHashGet(pHashObj, &p[1], sizeof(uint64_t));
|
||||
|
||||
{
|
||||
if (pEntry != NULL && (*pEntry) != NULL) {
|
||||
int64_t st = taosGetTimestampUs();
|
||||
|
||||
SListIter iter = {0};
|
||||
|
@ -547,9 +548,9 @@ static void freePayload(const void* key, size_t keyLen, void* value) {
|
|||
void* tmp = tdListPopNode(&((*pEntry)->list), pNode);
|
||||
taosMemoryFree(tmp);
|
||||
|
||||
int64_t et = taosGetTimestampUs();
|
||||
metaInfo("clear items in cache, remain cached item:%d, elapsed time:%.2fms", listNEles(&((*pEntry)->list)),
|
||||
(et - st) / 1000.0);
|
||||
double el = (taosGetTimestampUs() - st) / 1000.0;
|
||||
metaInfo("clear items in meta-cache, remain cached item:%d, elapsed time:%.2fms", listNEles(&((*pEntry)->list)),
|
||||
el);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -168,7 +168,7 @@ static int32_t tdUpdateTbUidListImpl(SSma *pSma, tb_uid_t *suid, SArray *tbUids,
|
|||
|
||||
for (int32_t i = 0; i < TSDB_RETENTION_L2; ++i) {
|
||||
if (pRSmaInfo->taskInfo[i]) {
|
||||
if ((terrno = qUpdateQualifiedTableId(pRSmaInfo->taskInfo[i], tbUids, isAdd)) < 0) {
|
||||
if ((terrno = qUpdateTableListForStreamScanner(pRSmaInfo->taskInfo[i], tbUids, isAdd)) < 0) {
|
||||
tdReleaseRSmaInfo(pSma, pRSmaInfo);
|
||||
smaError("vgId:%d, update tbUidList failed for uid:%" PRIi64 " level %d since %s", SMA_VID(pSma), *suid, i,
|
||||
terrstr());
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
// 0: not init
|
||||
// 1: already inited
|
||||
// 2: wait to be inited or cleaup
|
||||
#define WAL_READ_TASKS_ID (-1)
|
||||
|
||||
int32_t tqInit() {
|
||||
int8_t old;
|
||||
|
@ -61,12 +62,12 @@ static void destroyTqHandle(void* data) {
|
|||
if (pData->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
|
||||
taosMemoryFreeClear(pData->execHandle.execCol.qmsg);
|
||||
} else if (pData->execHandle.subType == TOPIC_SUB_TYPE__DB) {
|
||||
tqCloseReader(pData->execHandle.pExecReader);
|
||||
tqCloseReader(pData->execHandle.pTqReader);
|
||||
walCloseReader(pData->pWalReader);
|
||||
taosHashCleanup(pData->execHandle.execDb.pFilterOutTbUid);
|
||||
} else if (pData->execHandle.subType == TOPIC_SUB_TYPE__TABLE) {
|
||||
walCloseReader(pData->pWalReader);
|
||||
tqCloseReader(pData->execHandle.pExecReader);
|
||||
tqCloseReader(pData->execHandle.pTqReader);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -82,12 +83,18 @@ static void tqPushEntryFree(void* data) {
|
|||
taosMemoryFree(p);
|
||||
}
|
||||
|
||||
static bool tqOffsetLessOrEqual(const STqOffset* pLeft, const STqOffset* pRight) {
|
||||
return pLeft->val.type == TMQ_OFFSET__LOG && pRight->val.type == TMQ_OFFSET__LOG &&
|
||||
pLeft->val.version <= pRight->val.version;
|
||||
}
|
||||
|
||||
STQ* tqOpen(const char* path, SVnode* pVnode) {
|
||||
STQ* pTq = taosMemoryCalloc(1, sizeof(STQ));
|
||||
if (pTq == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
pTq->path = taosStrdup(path);
|
||||
pTq->pVnode = pVnode;
|
||||
pTq->walLogLastVer = pVnode->pWal->vers.lastVer;
|
||||
|
@ -138,44 +145,6 @@ void tqClose(STQ* pTq) {
|
|||
taosMemoryFree(pTq);
|
||||
}
|
||||
|
||||
int32_t tqSendMetaPollRsp(STQ* pTq, const SRpcMsg* pMsg, const SMqPollReq* pReq, const SMqMetaRsp* pRsp) {
|
||||
int32_t len = 0;
|
||||
int32_t code = 0;
|
||||
tEncodeSize(tEncodeSMqMetaRsp, pRsp, len, code);
|
||||
if (code < 0) {
|
||||
return -1;
|
||||
}
|
||||
int32_t tlen = sizeof(SMqRspHead) + len;
|
||||
void* buf = rpcMallocCont(tlen);
|
||||
if (buf == NULL) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
((SMqRspHead*)buf)->mqMsgType = TMQ_MSG_TYPE__POLL_META_RSP;
|
||||
((SMqRspHead*)buf)->epoch = pReq->epoch;
|
||||
((SMqRspHead*)buf)->consumerId = pReq->consumerId;
|
||||
|
||||
void* abuf = POINTER_SHIFT(buf, sizeof(SMqRspHead));
|
||||
|
||||
SEncoder encoder = {0};
|
||||
tEncoderInit(&encoder, abuf, len);
|
||||
tEncodeSMqMetaRsp(&encoder, pRsp);
|
||||
tEncoderClear(&encoder);
|
||||
|
||||
SRpcMsg resp = {
|
||||
.info = pMsg->info,
|
||||
.pCont = buf,
|
||||
.contLen = tlen,
|
||||
.code = 0,
|
||||
};
|
||||
tmsgSendRsp(&resp);
|
||||
|
||||
tqDebug("vgId:%d, from consumer:0x%" PRIx64 " (epoch %d) send rsp, res msg type %d, offset type:%d",
|
||||
TD_VID(pTq->pVnode), pReq->consumerId, pReq->epoch, pRsp->resMsgType, pRsp->rspOffset.type);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int32_t doSendDataRsp(const SRpcHandleInfo* pRpcHandleInfo, const SMqDataRsp* pRsp, int32_t epoch,
|
||||
int64_t consumerId, int32_t type) {
|
||||
int32_t len = 0;
|
||||
|
@ -253,11 +222,6 @@ int32_t tqSendDataRsp(STQ* pTq, const SRpcMsg* pMsg, const SMqPollReq* pReq, con
|
|||
return 0;
|
||||
}
|
||||
|
||||
static FORCE_INLINE bool tqOffsetLessOrEqual(const STqOffset* pLeft, const STqOffset* pRight) {
|
||||
return pLeft->val.type == TMQ_OFFSET__LOG && pRight->val.type == TMQ_OFFSET__LOG &&
|
||||
pLeft->val.version <= pRight->val.version;
|
||||
}
|
||||
|
||||
int32_t tqProcessOffsetCommitReq(STQ* pTq, int64_t sversion, char* msg, int32_t msgLen) {
|
||||
STqOffset offset = {0};
|
||||
int32_t vgId = TD_VID(pTq->pVnode);
|
||||
|
@ -330,318 +294,6 @@ int32_t tqCheckColModifiable(STQ* pTq, int64_t tbUid, int32_t colId) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int32_t tqInitDataRsp(SMqDataRsp* pRsp, const SMqPollReq* pReq, int8_t subType) {
|
||||
pRsp->reqOffset = pReq->reqOffset;
|
||||
|
||||
pRsp->blockData = taosArrayInit(0, sizeof(void*));
|
||||
pRsp->blockDataLen = taosArrayInit(0, sizeof(int32_t));
|
||||
|
||||
if (pRsp->blockData == NULL || pRsp->blockDataLen == NULL) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
pRsp->withTbName = 0;
|
||||
pRsp->withSchema = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int32_t tqInitTaosxRsp(STaosxRsp* pRsp, const SMqPollReq* pReq) {
|
||||
pRsp->reqOffset = pReq->reqOffset;
|
||||
|
||||
pRsp->withTbName = 1;
|
||||
pRsp->withSchema = 1;
|
||||
pRsp->blockData = taosArrayInit(0, sizeof(void*));
|
||||
pRsp->blockDataLen = taosArrayInit(0, sizeof(int32_t));
|
||||
pRsp->blockTbName = taosArrayInit(0, sizeof(void*));
|
||||
pRsp->blockSchema = taosArrayInit(0, sizeof(void*));
|
||||
|
||||
if (pRsp->blockData == NULL || pRsp->blockDataLen == NULL || pRsp->blockTbName == NULL || pRsp->blockSchema == NULL) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int32_t extractResetOffsetVal(STqOffsetVal* pOffsetVal, STQ* pTq, STqHandle* pHandle, const SMqPollReq* pRequest,
|
||||
SRpcMsg* pMsg, bool* pBlockReturned) {
|
||||
uint64_t consumerId = pRequest->consumerId;
|
||||
STqOffsetVal reqOffset = pRequest->reqOffset;
|
||||
STqOffset* pOffset = tqOffsetRead(pTq->pOffsetStore, pRequest->subKey);
|
||||
int32_t vgId = TD_VID(pTq->pVnode);
|
||||
|
||||
*pBlockReturned = false;
|
||||
|
||||
// In this vnode, data has been polled by consumer for this topic, so let's continue from the last offset value.
|
||||
if (pOffset != NULL) {
|
||||
*pOffsetVal = pOffset->val;
|
||||
|
||||
char formatBuf[80];
|
||||
tFormatOffset(formatBuf, 80, pOffsetVal);
|
||||
tqDebug("tmq poll: consumer:0x%" PRIx64
|
||||
", subkey %s, vgId:%d, existed offset found, offset reset to %s and continue. reqId:0x%" PRIx64,
|
||||
consumerId, pHandle->subKey, vgId, formatBuf, pRequest->reqId);
|
||||
return 0;
|
||||
} else {
|
||||
// no poll occurs in this vnode for this topic, let's seek to the right offset value.
|
||||
if (reqOffset.type == TMQ_OFFSET__RESET_EARLIEAST) {
|
||||
if (pRequest->useSnapshot) {
|
||||
tqDebug("tmq poll: consumer:0x%" PRIx64 ", subkey:%s, vgId:%d, (earliest) set offset to be snapshot",
|
||||
consumerId, pHandle->subKey, vgId);
|
||||
|
||||
if (pHandle->fetchMeta) {
|
||||
tqOffsetResetToMeta(pOffsetVal, 0);
|
||||
} else {
|
||||
tqOffsetResetToData(pOffsetVal, 0, 0);
|
||||
}
|
||||
} else {
|
||||
pHandle->pRef = walRefFirstVer(pTq->pVnode->pWal, pHandle->pRef);
|
||||
if (pHandle->pRef == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
return -1;
|
||||
}
|
||||
|
||||
// offset set to previous version when init
|
||||
tqOffsetResetToLog(pOffsetVal, pHandle->pRef->refVer - 1);
|
||||
}
|
||||
} else if (reqOffset.type == TMQ_OFFSET__RESET_LATEST) {
|
||||
if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
|
||||
SMqDataRsp dataRsp = {0};
|
||||
tqInitDataRsp(&dataRsp, pRequest, pHandle->execHandle.subType);
|
||||
|
||||
tqOffsetResetToLog(&dataRsp.rspOffset, walGetLastVer(pTq->pVnode->pWal));
|
||||
tqDebug("tmq poll: consumer:0x%" PRIx64 ", subkey %s, vgId:%d, (latest) offset reset to %" PRId64, consumerId,
|
||||
pHandle->subKey, vgId, dataRsp.rspOffset.version);
|
||||
int32_t code = tqSendDataRsp(pTq, pMsg, pRequest, &dataRsp, TMQ_MSG_TYPE__POLL_RSP);
|
||||
tDeleteSMqDataRsp(&dataRsp);
|
||||
|
||||
*pBlockReturned = true;
|
||||
return code;
|
||||
} else {
|
||||
STaosxRsp taosxRsp = {0};
|
||||
tqInitTaosxRsp(&taosxRsp, pRequest);
|
||||
tqOffsetResetToLog(&taosxRsp.rspOffset, walGetLastVer(pTq->pVnode->pWal));
|
||||
int32_t code = tqSendDataRsp(pTq, pMsg, pRequest, (SMqDataRsp*)&taosxRsp, TMQ_MSG_TYPE__TAOSX_RSP);
|
||||
tDeleteSTaosxRsp(&taosxRsp);
|
||||
|
||||
*pBlockReturned = true;
|
||||
return code;
|
||||
}
|
||||
} else if (reqOffset.type == TMQ_OFFSET__RESET_NONE) {
|
||||
tqError("tmq poll: subkey:%s, no offset committed for consumer:0x%" PRIx64
|
||||
" in vg %d, subkey %s, reset none failed",
|
||||
pHandle->subKey, consumerId, vgId, pRequest->subKey);
|
||||
terrno = TSDB_CODE_TQ_NO_COMMITTED_OFFSET;
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define IS_OFFSET_RESET_TYPE(_t) ((_t) < 0)
|
||||
|
||||
static int32_t extractDataAndRspForNormalSubscribe(STQ* pTq, STqHandle* pHandle, const SMqPollReq* pRequest,
|
||||
SRpcMsg* pMsg, STqOffsetVal* pOffset) {
|
||||
uint64_t consumerId = pRequest->consumerId;
|
||||
int32_t vgId = TD_VID(pTq->pVnode);
|
||||
|
||||
SMqDataRsp dataRsp = {0};
|
||||
tqInitDataRsp(&dataRsp, pRequest, pHandle->execHandle.subType);
|
||||
|
||||
// lock
|
||||
taosWLockLatch(&pTq->lock);
|
||||
|
||||
qSetTaskId(pHandle->execHandle.task, consumerId, pRequest->reqId);
|
||||
int code = tqScanData(pTq, pHandle, &dataRsp, pOffset);
|
||||
if (code != 0) {
|
||||
goto end;
|
||||
}
|
||||
|
||||
// till now, all data has been transferred to consumer, new data needs to push client once arrived.
|
||||
if (dataRsp.blockNum == 0 && dataRsp.reqOffset.type == TMQ_OFFSET__LOG &&
|
||||
dataRsp.reqOffset.version == dataRsp.rspOffset.version && pHandle->consumerId == pRequest->consumerId) {
|
||||
code = tqRegisterPushEntry(pTq, pHandle, pRequest, pMsg, &dataRsp, TMQ_MSG_TYPE__POLL_RSP);
|
||||
taosWUnLockLatch(&pTq->lock);
|
||||
return code;
|
||||
}
|
||||
|
||||
code = tqSendDataRsp(pTq, pMsg, pRequest, (SMqDataRsp*)&dataRsp, TMQ_MSG_TYPE__POLL_RSP);
|
||||
|
||||
// NOTE: this pHandle->consumerId may have been changed already.
|
||||
|
||||
end : {
|
||||
char buf[80] = {0};
|
||||
tFormatOffset(buf, 80, &dataRsp.rspOffset);
|
||||
tqDebug("tmq poll: consumer:0x%" PRIx64 ", subkey %s, vgId:%d, rsp block:%d, rsp offset type:%s, reqId:0x%" PRIx64
|
||||
" code:%d",
|
||||
consumerId, pHandle->subKey, vgId, dataRsp.blockNum, buf, pRequest->reqId, code);
|
||||
taosWUnLockLatch(&pTq->lock);
|
||||
tDeleteSMqDataRsp(&dataRsp);
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t extractDataAndRspForDbStbSubscribe(STQ* pTq, STqHandle* pHandle, const SMqPollReq* pRequest,
|
||||
SRpcMsg* pMsg, STqOffsetVal* offset) {
|
||||
int code = 0;
|
||||
int32_t vgId = TD_VID(pTq->pVnode);
|
||||
SWalCkHead* pCkHead = NULL;
|
||||
SMqMetaRsp metaRsp = {0};
|
||||
STaosxRsp taosxRsp = {0};
|
||||
tqInitTaosxRsp(&taosxRsp, pRequest);
|
||||
|
||||
if (offset->type != TMQ_OFFSET__LOG) {
|
||||
if (tqScanTaosx(pTq, pHandle, &taosxRsp, &metaRsp, offset) < 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (metaRsp.metaRspLen > 0) {
|
||||
code = tqSendMetaPollRsp(pTq, pMsg, pRequest, &metaRsp);
|
||||
tqDebug("tmq poll: consumer:0x%" PRIx64 " subkey:%s vgId:%d, send meta offset type:%d,uid:%" PRId64
|
||||
",ts:%" PRId64,
|
||||
pRequest->consumerId, pHandle->subKey, vgId, metaRsp.rspOffset.type, metaRsp.rspOffset.uid,
|
||||
metaRsp.rspOffset.ts);
|
||||
taosMemoryFree(metaRsp.metaRsp);
|
||||
tDeleteSTaosxRsp(&taosxRsp);
|
||||
return code;
|
||||
}
|
||||
|
||||
tqDebug("taosx poll: consumer:0x%" PRIx64 " subkey:%s vgId:%d, send data blockNum:%d, offset type:%d,uid:%" PRId64
|
||||
",ts:%" PRId64,
|
||||
pRequest->consumerId, pHandle->subKey, vgId, taosxRsp.blockNum, taosxRsp.rspOffset.type,
|
||||
taosxRsp.rspOffset.uid, taosxRsp.rspOffset.ts);
|
||||
if (taosxRsp.blockNum > 0) {
|
||||
code = tqSendDataRsp(pTq, pMsg, pRequest, (SMqDataRsp*)&taosxRsp, TMQ_MSG_TYPE__TAOSX_RSP);
|
||||
tDeleteSTaosxRsp(&taosxRsp);
|
||||
return code;
|
||||
} else {
|
||||
*offset = taosxRsp.rspOffset;
|
||||
}
|
||||
}
|
||||
|
||||
if (offset->type == TMQ_OFFSET__LOG) {
|
||||
int64_t fetchVer = offset->version + 1;
|
||||
pCkHead = taosMemoryMalloc(sizeof(SWalCkHead) + 2048);
|
||||
if (pCkHead == NULL) {
|
||||
tDeleteSTaosxRsp(&taosxRsp);
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
return -1;
|
||||
}
|
||||
walSetReaderCapacity(pHandle->pWalReader, 2048);
|
||||
int totalRows = 0;
|
||||
while (1) {
|
||||
int32_t savedEpoch = atomic_load_32(&pHandle->epoch);
|
||||
if (savedEpoch > pRequest->epoch) {
|
||||
tqWarn("tmq poll: consumer:0x%" PRIx64 " (epoch %d), subkey:%s vgId:%d offset %" PRId64
|
||||
", found new consumer epoch %d, discard req epoch %d",
|
||||
pRequest->consumerId, pRequest->epoch, pHandle->subKey, vgId, fetchVer, savedEpoch, pRequest->epoch);
|
||||
break;
|
||||
}
|
||||
|
||||
if (tqFetchLog(pTq, pHandle, &fetchVer, &pCkHead, pRequest->reqId) < 0) {
|
||||
tqOffsetResetToLog(&taosxRsp.rspOffset, fetchVer);
|
||||
code = tqSendDataRsp(pTq, pMsg, pRequest, (SMqDataRsp*)&taosxRsp, TMQ_MSG_TYPE__TAOSX_RSP);
|
||||
tDeleteSTaosxRsp(&taosxRsp);
|
||||
taosMemoryFreeClear(pCkHead);
|
||||
return code;
|
||||
}
|
||||
|
||||
SWalCont* pHead = &pCkHead->head;
|
||||
tqDebug("tmq poll: consumer:0x%" PRIx64 " (epoch %d) iter log, vgId:%d offset %" PRId64 " msgType %d",
|
||||
pRequest->consumerId, pRequest->epoch, vgId, fetchVer, pHead->msgType);
|
||||
|
||||
// process meta
|
||||
if (pHead->msgType != TDMT_VND_SUBMIT) {
|
||||
if (totalRows > 0) {
|
||||
tqOffsetResetToLog(&taosxRsp.rspOffset, fetchVer - 1);
|
||||
code = tqSendDataRsp(pTq, pMsg, pRequest, (SMqDataRsp*)&taosxRsp, TMQ_MSG_TYPE__TAOSX_RSP);
|
||||
tDeleteSTaosxRsp(&taosxRsp);
|
||||
taosMemoryFreeClear(pCkHead);
|
||||
return code;
|
||||
}
|
||||
|
||||
tqDebug("fetch meta msg, ver:%" PRId64 ", type:%s", pHead->version, TMSG_INFO(pHead->msgType));
|
||||
tqOffsetResetToLog(&metaRsp.rspOffset, fetchVer);
|
||||
metaRsp.resMsgType = pHead->msgType;
|
||||
metaRsp.metaRspLen = pHead->bodyLen;
|
||||
metaRsp.metaRsp = pHead->body;
|
||||
if (tqSendMetaPollRsp(pTq, pMsg, pRequest, &metaRsp) < 0) {
|
||||
code = -1;
|
||||
taosMemoryFreeClear(pCkHead);
|
||||
tDeleteSTaosxRsp(&taosxRsp);
|
||||
return code;
|
||||
}
|
||||
code = 0;
|
||||
taosMemoryFreeClear(pCkHead);
|
||||
tDeleteSTaosxRsp(&taosxRsp);
|
||||
return code;
|
||||
}
|
||||
|
||||
// process data
|
||||
SPackedData submit = {
|
||||
.msgStr = POINTER_SHIFT(pHead->body, sizeof(SSubmitReq2Msg)),
|
||||
.msgLen = pHead->bodyLen - sizeof(SSubmitReq2Msg),
|
||||
.ver = pHead->version,
|
||||
};
|
||||
|
||||
if (tqTaosxScanLog(pTq, pHandle, submit, &taosxRsp, &totalRows) < 0) {
|
||||
tqError("tmq poll: tqTaosxScanLog error %" PRId64 ", in vgId:%d, subkey %s", pRequest->consumerId, vgId,
|
||||
pRequest->subKey);
|
||||
taosMemoryFreeClear(pCkHead);
|
||||
tDeleteSTaosxRsp(&taosxRsp);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (totalRows >= 4096 || taosxRsp.createTableNum > 0) {
|
||||
tqOffsetResetToLog(&taosxRsp.rspOffset, fetchVer);
|
||||
code = tqSendDataRsp(pTq, pMsg, pRequest, (SMqDataRsp*)&taosxRsp, TMQ_MSG_TYPE__TAOSX_RSP);
|
||||
tDeleteSTaosxRsp(&taosxRsp);
|
||||
taosMemoryFreeClear(pCkHead);
|
||||
return code;
|
||||
} else {
|
||||
fetchVer++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
tDeleteSTaosxRsp(&taosxRsp);
|
||||
taosMemoryFreeClear(pCkHead);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int32_t doPollDataForMq(STQ* pTq, STqHandle* pHandle, const SMqPollReq* pRequest, SRpcMsg* pMsg) {
|
||||
int32_t code = -1;
|
||||
STqOffsetVal offset = {0};
|
||||
STqOffsetVal reqOffset = pRequest->reqOffset;
|
||||
|
||||
// 1. reset the offset if needed
|
||||
if (IS_OFFSET_RESET_TYPE(reqOffset.type)) {
|
||||
// handle the reset offset cases, according to the consumer's choice.
|
||||
bool blockReturned = false;
|
||||
code = extractResetOffsetVal(&offset, pTq, pHandle, pRequest, pMsg, &blockReturned);
|
||||
if (code != 0) {
|
||||
return code;
|
||||
}
|
||||
|
||||
// empty block returned, quit
|
||||
if (blockReturned) {
|
||||
return 0;
|
||||
}
|
||||
} else { // use the consumer specified offset
|
||||
// the offset value can not be monotonious increase??
|
||||
offset = reqOffset;
|
||||
}
|
||||
|
||||
// this is a normal subscribe requirement
|
||||
if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
|
||||
return extractDataAndRspForNormalSubscribe(pTq, pHandle, pRequest, pMsg, &offset);
|
||||
}
|
||||
|
||||
// todo handle the case where re-balance occurs.
|
||||
// for taosx
|
||||
return extractDataAndRspForDbStbSubscribe(pTq, pHandle, pRequest, pMsg, &offset);
|
||||
}
|
||||
|
||||
int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
|
||||
SMqPollReq req = {0};
|
||||
if (tDeserializeSMqPollReq(pMsg->pCont, pMsg->contLen, &req) < 0) {
|
||||
|
@ -689,7 +341,7 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
|
|||
tqDebug("tmq poll: consumer:0x%" PRIx64 " (epoch %d), subkey %s, recv poll req vgId:%d, req:%s, reqId:0x%" PRIx64,
|
||||
consumerId, req.epoch, pHandle->subKey, vgId, buf, req.reqId);
|
||||
|
||||
return doPollDataForMq(pTq, pHandle, &req, pMsg);
|
||||
return tqExtractDataForMq(pTq, pHandle, &req, pMsg);
|
||||
}
|
||||
|
||||
int32_t tqProcessDeleteSubReq(STQ* pTq, int64_t sversion, char* msg, int32_t msgLen) {
|
||||
|
@ -815,10 +467,10 @@ int32_t tqProcessSubscribeReq(STQ* pTq, int64_t sversion, char* msg, int32_t msg
|
|||
&pHandle->execHandle.numOfCols, req.newConsumerId);
|
||||
void* scanner = NULL;
|
||||
qExtractStreamScanner(pHandle->execHandle.task, &scanner);
|
||||
pHandle->execHandle.pExecReader = qExtractReaderFromStreamScanner(scanner);
|
||||
pHandle->execHandle.pTqReader = qExtractReaderFromStreamScanner(scanner);
|
||||
} else if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__DB) {
|
||||
pHandle->pWalReader = walOpenReader(pVnode->pWal, NULL);
|
||||
pHandle->execHandle.pExecReader = tqOpenReader(pVnode);
|
||||
pHandle->execHandle.pTqReader = tqOpenReader(pVnode);
|
||||
|
||||
pHandle->execHandle.execDb.pFilterOutTbUid =
|
||||
taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
|
||||
|
@ -837,8 +489,8 @@ int32_t tqProcessSubscribeReq(STQ* pTq, int64_t sversion, char* msg, int32_t msg
|
|||
int64_t tbUid = *(int64_t*)taosArrayGet(tbUidList, i);
|
||||
tqDebug("vgId:%d, idx %d, uid:%" PRId64, vgId, i, tbUid);
|
||||
}
|
||||
pHandle->execHandle.pExecReader = tqOpenReader(pVnode);
|
||||
tqReaderSetTbUidList(pHandle->execHandle.pExecReader, tbUidList);
|
||||
pHandle->execHandle.pTqReader = tqOpenReader(pVnode);
|
||||
tqReaderSetTbUidList(pHandle->execHandle.pTqReader, tbUidList);
|
||||
taosArrayDestroy(tbUidList);
|
||||
|
||||
buildSnapContext(handle.meta, handle.version, req.suid, pHandle->execHandle.subType, pHandle->fetchMeta,
|
||||
|
@ -874,7 +526,7 @@ int32_t tqProcessSubscribeReq(STQ* pTq, int64_t sversion, char* msg, int32_t msg
|
|||
atomic_store_32(&pHandle->epoch, -1);
|
||||
|
||||
// remove if it has been register in the push manager, and return one empty block to consumer
|
||||
tqUnregisterPushEntry(pTq, req.subKey, (int32_t)strlen(req.subKey), pHandle->consumerId, true);
|
||||
tqUnregisterPushHandle(pTq, req.subKey, (int32_t)strlen(req.subKey), pHandle->consumerId, true);
|
||||
|
||||
atomic_store_64(&pHandle->consumerId, req.newConsumerId);
|
||||
atomic_add_fetch_32(&pHandle->epoch, 1);
|
||||
|
@ -896,16 +548,14 @@ int32_t tqProcessSubscribeReq(STQ* pTq, int64_t sversion, char* msg, int32_t msg
|
|||
}
|
||||
|
||||
int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t ver) {
|
||||
#if 0
|
||||
if (pTask->taskLevel == TASK_LEVEL__AGG) {
|
||||
A(taosArrayGetSize(pTask->childEpInfo) != 0);
|
||||
}
|
||||
#endif
|
||||
// todo extract method
|
||||
char buf[128] = {0};
|
||||
sprintf(buf, "0x%"PRIx64"-%d", pTask->id.streamId, pTask->id.taskId);
|
||||
|
||||
int32_t vgId = TD_VID(pTq->pVnode);
|
||||
pTask->id.idStr = taosStrdup(buf);
|
||||
pTask->refCnt = 1;
|
||||
pTask->schedStatus = TASK_SCHED_STATUS__INACTIVE;
|
||||
|
||||
pTask->status.schedStatus = TASK_SCHED_STATUS__INACTIVE;
|
||||
pTask->inputQueue = streamQueueOpen();
|
||||
pTask->outputQueue = streamQueueOpen();
|
||||
|
||||
|
@ -916,11 +566,14 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t ver) {
|
|||
pTask->inputStatus = TASK_INPUT_STATUS__NORMAL;
|
||||
pTask->outputStatus = TASK_OUTPUT_STATUS__NORMAL;
|
||||
pTask->pMsgCb = &pTq->pVnode->msgCb;
|
||||
pTask->startVer = ver;
|
||||
pTask->pMeta = pTq->pStreamMeta;
|
||||
pTask->chkInfo.version = ver;
|
||||
|
||||
// expand executor
|
||||
if (pTask->fillHistory) {
|
||||
pTask->taskStatus = TASK_STATUS__WAIT_DOWNSTREAM;
|
||||
pTask->status.taskStatus = TASK_STATUS__WAIT_DOWNSTREAM;
|
||||
} else {
|
||||
pTask->status.taskStatus = TASK_STATUS__RESTORE;
|
||||
}
|
||||
|
||||
if (pTask->taskLevel == TASK_LEVEL__SOURCE) {
|
||||
|
@ -930,14 +583,10 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t ver) {
|
|||
}
|
||||
|
||||
SReadHandle handle = {
|
||||
.meta = pTq->pVnode->pMeta,
|
||||
.vnode = pTq->pVnode,
|
||||
.initTqReader = 1,
|
||||
.pStateBackend = pTask->pState,
|
||||
};
|
||||
.meta = pTq->pVnode->pMeta, .vnode = pTq->pVnode, .initTqReader = 1, .pStateBackend = pTask->pState};
|
||||
|
||||
pTask->exec.executor = qCreateStreamExecTaskInfo(pTask->exec.qmsg, &handle, vgId);
|
||||
if (pTask->exec.executor == NULL) {
|
||||
pTask->exec.pExecutor = qCreateStreamExecTaskInfo(pTask->exec.qmsg, &handle, vgId);
|
||||
if (pTask->exec.pExecutor == NULL) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -946,14 +595,12 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t ver) {
|
|||
if (pTask->pState == NULL) {
|
||||
return -1;
|
||||
}
|
||||
SReadHandle mgHandle = {
|
||||
.vnode = NULL,
|
||||
.numOfVgroups = (int32_t)taosArrayGetSize(pTask->childEpInfo),
|
||||
.pStateBackend = pTask->pState,
|
||||
};
|
||||
|
||||
pTask->exec.executor = qCreateStreamExecTaskInfo(pTask->exec.qmsg, &mgHandle, vgId);
|
||||
if (pTask->exec.executor == NULL) {
|
||||
int32_t numOfVgroups = (int32_t)taosArrayGetSize(pTask->childEpInfo);
|
||||
SReadHandle mgHandle = { .vnode = NULL, .numOfVgroups = numOfVgroups, .pStateBackend = pTask->pState};
|
||||
|
||||
pTask->exec.pExecutor = qCreateStreamExecTaskInfo(pTask->exec.qmsg, &mgHandle, vgId);
|
||||
if (pTask->exec.pExecutor == NULL) {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
@ -974,16 +621,20 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t ver) {
|
|||
ver1 = info.skmVer;
|
||||
}
|
||||
|
||||
pTask->tbSink.pTSchema =
|
||||
tBuildTSchema(pTask->tbSink.pSchemaWrapper->pSchema, pTask->tbSink.pSchemaWrapper->nCols, ver1);
|
||||
if (pTask->tbSink.pTSchema == NULL) {
|
||||
SSchemaWrapper* pschemaWrapper = pTask->tbSink.pSchemaWrapper;
|
||||
pTask->tbSink.pTSchema = tBuildTSchema(pschemaWrapper->pSchema, pschemaWrapper->nCols, ver1);
|
||||
if(pTask->tbSink.pTSchema == NULL) {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
if (pTask->taskLevel == TASK_LEVEL__SOURCE) {
|
||||
pTask->exec.pWalReader = walOpenReader(pTq->pVnode->pWal, NULL);
|
||||
}
|
||||
|
||||
streamSetupTrigger(pTask);
|
||||
tqInfo("expand stream task on vg %d, task id %d, child id %d, level %d", vgId, pTask->taskId, pTask->selfChildId,
|
||||
pTask->taskLevel);
|
||||
tqInfo("vgId:%d expand stream task, s-task:%s, ver:%" PRId64 " child id:%d, level:%d", vgId, pTask->id.idStr,
|
||||
pTask->chkInfo.version, pTask->selfChildId, pTask->taskLevel);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1006,18 +657,24 @@ int32_t tqProcessStreamTaskCheckReq(STQ* pTq, SRpcMsg* pMsg) {
|
|||
.upstreamNodeId = req.upstreamNodeId,
|
||||
.upstreamTaskId = req.upstreamTaskId,
|
||||
};
|
||||
|
||||
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, taskId);
|
||||
if (pTask && atomic_load_8(&pTask->taskStatus) == TASK_STATUS__NORMAL) {
|
||||
rsp.status = 1;
|
||||
if (pTask) {
|
||||
rsp.status = (atomic_load_8(&pTask->status.taskStatus) == TASK_STATUS__NORMAL) ? 1 : 0;
|
||||
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
|
||||
|
||||
tqDebug("tq recv task check req(reqId:0x%" PRIx64
|
||||
") %d at node %d task status:%d, check req from task %d at node %d, rsp status %d",
|
||||
rsp.reqId, rsp.downstreamTaskId, rsp.downstreamNodeId, pTask->status.taskStatus, rsp.upstreamTaskId,
|
||||
rsp.upstreamNodeId, rsp.status);
|
||||
} else {
|
||||
rsp.status = 0;
|
||||
tqDebug("tq recv task check(taskId:%d not built yet) req(reqId:0x%" PRIx64
|
||||
") %d at node %d, check req from task %d at node %d, rsp status %d",
|
||||
taskId, rsp.reqId, rsp.downstreamTaskId, rsp.downstreamNodeId, rsp.upstreamTaskId, rsp.upstreamNodeId,
|
||||
rsp.status);
|
||||
}
|
||||
|
||||
if (pTask) streamMetaReleaseTask(pTq->pStreamMeta, pTask);
|
||||
|
||||
tqDebug("tq recv task check req(reqId:0x%" PRIx64 ") %d at node %d check req from task %d at node %d, status %d",
|
||||
rsp.reqId, rsp.downstreamTaskId, rsp.downstreamNodeId, rsp.upstreamTaskId, rsp.upstreamNodeId, rsp.status);
|
||||
|
||||
SEncoder encoder;
|
||||
int32_t code;
|
||||
int32_t len;
|
||||
|
@ -1035,13 +692,7 @@ int32_t tqProcessStreamTaskCheckReq(STQ* pTq, SRpcMsg* pMsg) {
|
|||
tEncodeSStreamTaskCheckRsp(&encoder, &rsp);
|
||||
tEncoderClear(&encoder);
|
||||
|
||||
SRpcMsg rspMsg = {
|
||||
.code = 0,
|
||||
.pCont = buf,
|
||||
.contLen = sizeof(SMsgHead) + len,
|
||||
.info = pMsg->info,
|
||||
};
|
||||
|
||||
SRpcMsg rspMsg = { .code = 0, .pCont = buf, .contLen = sizeof(SMsgHead) + len, .info = pMsg->info };
|
||||
tmsgSendRsp(&rspMsg);
|
||||
return 0;
|
||||
}
|
||||
|
@ -1057,8 +708,8 @@ int32_t tqProcessStreamTaskCheckRsp(STQ* pTq, int64_t sversion, char* msg, int32
|
|||
tDecoderClear(&decoder);
|
||||
return -1;
|
||||
}
|
||||
tDecoderClear(&decoder);
|
||||
|
||||
tDecoderClear(&decoder);
|
||||
tqDebug("tq recv task check rsp(reqId:0x%" PRIx64 ") %d at node %d check req from task %d at node %d, status %d",
|
||||
rsp.reqId, rsp.downstreamTaskId, rsp.downstreamNodeId, rsp.upstreamTaskId, rsp.upstreamNodeId, rsp.status);
|
||||
|
||||
|
@ -1090,17 +741,20 @@ int32_t tqProcessTaskDeployReq(STQ* pTq, int64_t sversion, char* msg, int32_t ms
|
|||
|
||||
SDecoder decoder;
|
||||
tDecoderInit(&decoder, (uint8_t*)msg, msgLen);
|
||||
code = tDecodeSStreamTask(&decoder, pTask);
|
||||
code = tDecodeStreamTask(&decoder, pTask);
|
||||
if (code < 0) {
|
||||
tDecoderClear(&decoder);
|
||||
taosMemoryFree(pTask);
|
||||
return -1;
|
||||
}
|
||||
|
||||
tDecoderClear(&decoder);
|
||||
|
||||
// 2.save task
|
||||
code = streamMetaAddTask(pTq->pStreamMeta, sversion, pTask);
|
||||
code = streamMetaAddDeployedTask(pTq->pStreamMeta, sversion, pTask);
|
||||
if (code < 0) {
|
||||
tqError("vgId:%d failed to add s-task:%s, total:%d", TD_VID(pTq->pVnode), pTask->id.idStr,
|
||||
streamMetaGetNumOfTasks(pTq->pStreamMeta));
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -1109,6 +763,8 @@ int32_t tqProcessTaskDeployReq(STQ* pTq, int64_t sversion, char* msg, int32_t ms
|
|||
streamTaskCheckDownstream(pTask, sversion);
|
||||
}
|
||||
|
||||
tqDebug("vgId:%d s-task:%s is deployed and add meta from mnd, status:%d, total:%d", TD_VID(pTq->pVnode),
|
||||
pTask->id.idStr, pTask->status.taskStatus, streamMetaGetNumOfTasks(pTq->pStreamMeta));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1124,7 +780,7 @@ int32_t tqProcessTaskRecover1Req(STQ* pTq, SRpcMsg* pMsg) {
|
|||
}
|
||||
|
||||
// check param
|
||||
int64_t fillVer1 = pTask->startVer;
|
||||
int64_t fillVer1 = pTask->chkInfo.version;
|
||||
if (fillVer1 <= 0) {
|
||||
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
|
||||
return -1;
|
||||
|
@ -1133,7 +789,7 @@ int32_t tqProcessTaskRecover1Req(STQ* pTq, SRpcMsg* pMsg) {
|
|||
// do recovery step 1
|
||||
streamSourceRecoverScanStep1(pTask);
|
||||
|
||||
if (atomic_load_8(&pTask->taskStatus) == TASK_STATUS__DROPPING) {
|
||||
if (atomic_load_8(&pTask->status.taskStatus) == TASK_STATUS__DROPPING) {
|
||||
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
|
||||
return 0;
|
||||
}
|
||||
|
@ -1148,7 +804,7 @@ int32_t tqProcessTaskRecover1Req(STQ* pTq, SRpcMsg* pMsg) {
|
|||
|
||||
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
|
||||
|
||||
if (atomic_load_8(&pTask->taskStatus) == TASK_STATUS__DROPPING) {
|
||||
if (atomic_load_8(&pTask->status.taskStatus) == TASK_STATUS__DROPPING) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1190,7 +846,7 @@ int32_t tqProcessTaskRecover2Req(STQ* pTq, int64_t sversion, char* msg, int32_t
|
|||
return -1;
|
||||
}
|
||||
|
||||
if (atomic_load_8(&pTask->taskStatus) == TASK_STATUS__DROPPING) {
|
||||
if (atomic_load_8(&pTask->status.taskStatus) == TASK_STATUS__DROPPING) {
|
||||
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
|
||||
return 0;
|
||||
}
|
||||
|
@ -1309,7 +965,7 @@ int32_t tqProcessDelReq(STQ* pTq, void* pReq, int32_t len, int64_t ver) {
|
|||
SStreamTask* pTask = *(SStreamTask**)pIter;
|
||||
if (pTask->taskLevel != TASK_LEVEL__SOURCE) continue;
|
||||
|
||||
qDebug("delete req enqueue stream task: %d, ver: %" PRId64, pTask->taskId, ver);
|
||||
qDebug("delete req enqueue stream task: %d, ver: %" PRId64, pTask->id.taskId, ver);
|
||||
|
||||
if (!failed) {
|
||||
SStreamRefDataBlock* pRefBlock = taosAllocateQitem(sizeof(SStreamRefDataBlock), DEF_QITEM, 0);
|
||||
|
@ -1318,8 +974,8 @@ int32_t tqProcessDelReq(STQ* pTq, void* pReq, int32_t len, int64_t ver) {
|
|||
pRefBlock->dataRef = pRef;
|
||||
atomic_add_fetch_32(pRefBlock->dataRef, 1);
|
||||
|
||||
if (tAppendDataForStream(pTask, (SStreamQueueItem*)pRefBlock) < 0) {
|
||||
qError("stream task input del failed, task id %d", pTask->taskId);
|
||||
if (tAppendDataToInputQueue(pTask, (SStreamQueueItem*)pRefBlock) < 0) {
|
||||
qError("stream task input del failed, task id %d", pTask->id.taskId);
|
||||
|
||||
atomic_sub_fetch_32(pRef, 1);
|
||||
taosFreeQitem(pRefBlock);
|
||||
|
@ -1327,7 +983,7 @@ int32_t tqProcessDelReq(STQ* pTq, void* pReq, int32_t len, int64_t ver) {
|
|||
}
|
||||
|
||||
if (streamSchedExec(pTask) < 0) {
|
||||
qError("stream task launch failed, task id %d", pTask->taskId);
|
||||
qError("stream task launch failed, task id %d", pTask->id.taskId);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -1353,13 +1009,13 @@ int32_t tqProcessDelReq(STQ* pTq, void* pReq, int32_t len, int64_t ver) {
|
|||
taosArrayPush(pStreamBlock->blocks, &block);
|
||||
|
||||
if (!failed) {
|
||||
if (tAppendDataForStream(pTask, (SStreamQueueItem*)pStreamBlock) < 0) {
|
||||
qError("stream task input del failed, task id %d", pTask->taskId);
|
||||
if (tAppendDataToInputQueue(pTask, (SStreamQueueItem*)pStreamBlock) < 0) {
|
||||
qError("stream task input del failed, task id %d", pTask->id.taskId);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (streamSchedExec(pTask) < 0) {
|
||||
qError("stream task launch failed, task id %d", pTask->taskId);
|
||||
qError("stream task launch failed, task id %d", pTask->id.taskId);
|
||||
continue;
|
||||
}
|
||||
} else {
|
||||
|
@ -1372,17 +1028,32 @@ int32_t tqProcessDelReq(STQ* pTq, void* pReq, int32_t len, int64_t ver) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t tqProcessSubmitReq(STQ* pTq, SPackedData submit) {
|
||||
void* pIter = NULL;
|
||||
bool succ = true;
|
||||
static int32_t addSubmitBlockNLaunchTask(STqOffsetStore* pOffsetStore, SStreamTask* pTask, SStreamDataSubmit2* pSubmit,
|
||||
const char* key, int64_t ver) {
|
||||
doSaveTaskOffset(pOffsetStore, key, ver);
|
||||
int32_t code = tqAddInputBlockNLaunchTask(pTask, (SStreamQueueItem*)pSubmit, ver);
|
||||
|
||||
SStreamDataSubmit2* pSubmit = streamDataSubmitNew(submit);
|
||||
// remove the offset, if all functions are completed successfully.
|
||||
if (code == TSDB_CODE_SUCCESS) {
|
||||
tqOffsetDelete(pOffsetStore, key);
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t tqProcessSubmitReq(STQ* pTq, SPackedData submit) {
|
||||
#if 0
|
||||
void* pIter = NULL;
|
||||
SStreamDataSubmit2* pSubmit = streamDataSubmitNew(submit, STREAM_INPUT__DATA_SUBMIT);
|
||||
if (pSubmit == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
tqError("failed to create data submit for stream since out of memory");
|
||||
succ = false;
|
||||
saveOffsetForAllTasks(pTq, submit.ver);
|
||||
return -1;
|
||||
}
|
||||
|
||||
SArray* pInputQueueFullTasks = taosArrayInit(4, POINTER_BYTES);
|
||||
|
||||
while (1) {
|
||||
pIter = taosHashIterate(pTq->pStreamMeta->pTasks, pIter);
|
||||
if (pIter == NULL) {
|
||||
|
@ -1394,47 +1065,75 @@ int32_t tqProcessSubmitReq(STQ* pTq, SPackedData submit) {
|
|||
continue;
|
||||
}
|
||||
|
||||
if (pTask->taskStatus == TASK_STATUS__RECOVER_PREPARE || pTask->taskStatus == TASK_STATUS__WAIT_DOWNSTREAM) {
|
||||
tqDebug("stream task:%d skip push data, not ready for processing, status %d", pTask->taskId, pTask->taskStatus);
|
||||
if (pTask->status.taskStatus == TASK_STATUS__RECOVER_PREPARE || pTask->status.taskStatus == TASK_STATUS__WAIT_DOWNSTREAM) {
|
||||
tqDebug("stream task:%d skip push data, not ready for processing, status %d", pTask->id.taskId,
|
||||
pTask->status.taskStatus);
|
||||
continue;
|
||||
}
|
||||
|
||||
tqDebug("data submit enqueue stream task:%d, ver: %" PRId64, pTask->taskId, submit.ver);
|
||||
if (succ) {
|
||||
int32_t code = tAppendDataForStream(pTask, (SStreamQueueItem*)pSubmit);
|
||||
if (code < 0) {
|
||||
// let's handle the back pressure
|
||||
// check if offset value exists
|
||||
char key[128] = {0};
|
||||
createStreamTaskOffsetKey(key, pTask->id.streamId, pTask->id.taskId);
|
||||
|
||||
tqError("stream task:%d failed to put into queue for, too many", pTask->taskId);
|
||||
continue;
|
||||
if (tInputQueueIsFull(pTask)) {
|
||||
STqOffset* pOffset = tqOffsetRead(pTq->pOffsetStore, key);
|
||||
|
||||
int64_t ver = submit.ver;
|
||||
if (pOffset == NULL) {
|
||||
doSaveTaskOffset(pTq->pOffsetStore, key, submit.ver);
|
||||
} else {
|
||||
ver = pOffset->val.version;
|
||||
}
|
||||
|
||||
if (streamSchedExec(pTask) < 0) {
|
||||
tqError("stream task:%d launch failed, code:%s", pTask->taskId, tstrerror(terrno));
|
||||
continue;
|
||||
}
|
||||
} else {
|
||||
streamTaskInputFail(pTask);
|
||||
tqDebug("s-task:%s input queue is full, discard submit block, ver:%" PRId64, pTask->id.idStr, ver);
|
||||
taosArrayPush(pInputQueueFullTasks, &pTask);
|
||||
continue;
|
||||
}
|
||||
|
||||
// check if offset value exists
|
||||
STqOffset* pOffset = tqOffsetRead(pTq->pOffsetStore, key);
|
||||
ASSERT(pOffset == NULL);
|
||||
|
||||
addSubmitBlockNLaunchTask(pTq->pOffsetStore, pTask, pSubmit, key, submit.ver);
|
||||
}
|
||||
|
||||
if (pSubmit != NULL) {
|
||||
streamDataSubmitDestroy(pSubmit);
|
||||
taosFreeQitem(pSubmit);
|
||||
}
|
||||
streamDataSubmitDestroy(pSubmit);
|
||||
taosFreeQitem(pSubmit);
|
||||
#endif
|
||||
|
||||
return succ ? 0 : -1;
|
||||
tqStartStreamTasks(pTq);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t tqProcessTaskRunReq(STQ* pTq, SRpcMsg* pMsg) {
|
||||
SStreamTaskRunReq* pReq = pMsg->pCont;
|
||||
int32_t taskId = pReq->taskId;
|
||||
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, taskId);
|
||||
if (pTask) {
|
||||
streamProcessRunReq(pTask);
|
||||
|
||||
int32_t taskId = pReq->taskId;
|
||||
int32_t vgId = TD_VID(pTq->pVnode);
|
||||
|
||||
if (taskId == WAL_READ_TASKS_ID) { // all tasks are extracted submit data from the wal
|
||||
tqStreamTasksScanWal(pTq);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, taskId);
|
||||
if (pTask != NULL) {
|
||||
if (pTask->status.taskStatus == TASK_STATUS__NORMAL) {
|
||||
tqDebug("vgId:%d s-task:%s start to process run req", vgId, pTask->id.idStr);
|
||||
streamProcessRunReq(pTask);
|
||||
} else if (pTask->status.taskStatus == TASK_STATUS__RESTORE) {
|
||||
tqDebug("vgId:%d s-task:%s start to process block from wal, last chk point:%" PRId64, vgId,
|
||||
pTask->id.idStr, pTask->chkInfo.version);
|
||||
streamProcessRunReq(pTask);
|
||||
} else {
|
||||
tqDebug("vgId:%d s-task:%s ignore run req since not in ready state", vgId, pTask->id.idStr);
|
||||
}
|
||||
|
||||
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
|
||||
tqStartStreamTasks(pTq);
|
||||
return 0;
|
||||
} else {
|
||||
tqError("vgId:%d failed to found s-task, taskId:%d", vgId, taskId);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
@ -1447,14 +1146,10 @@ int32_t tqProcessTaskDispatchReq(STQ* pTq, SRpcMsg* pMsg, bool exec) {
|
|||
SDecoder decoder;
|
||||
tDecoderInit(&decoder, (uint8_t*)msgBody, msgLen);
|
||||
tDecodeStreamDispatchReq(&decoder, &req);
|
||||
int32_t taskId = req.taskId;
|
||||
|
||||
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, taskId);
|
||||
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, req.taskId);
|
||||
if (pTask) {
|
||||
SRpcMsg rsp = {
|
||||
.info = pMsg->info,
|
||||
.code = 0,
|
||||
};
|
||||
SRpcMsg rsp = { .info = pMsg->info, .code = 0 };
|
||||
streamProcessDispatchReq(pTask, &req, &rsp, exec);
|
||||
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
|
||||
return 0;
|
||||
|
@ -1467,7 +1162,7 @@ int32_t tqProcessTaskDispatchRsp(STQ* pTq, SRpcMsg* pMsg) {
|
|||
SStreamDispatchRsp* pRsp = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead));
|
||||
int32_t taskId = ntohl(pRsp->upstreamTaskId);
|
||||
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, taskId);
|
||||
tqDebug("recv dispatch rsp, code: %x", pMsg->code);
|
||||
tqDebug("recv dispatch rsp, code:%x", pMsg->code);
|
||||
if (pTask) {
|
||||
streamProcessDispatchRsp(pTask, pRsp, pMsg->code);
|
||||
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
|
||||
|
@ -1495,10 +1190,7 @@ int32_t tqProcessTaskRetrieveReq(STQ* pTq, SRpcMsg* pMsg) {
|
|||
int32_t taskId = req.dstTaskId;
|
||||
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, taskId);
|
||||
if (pTask) {
|
||||
SRpcMsg rsp = {
|
||||
.info = pMsg->info,
|
||||
.code = 0,
|
||||
};
|
||||
SRpcMsg rsp = { .info = pMsg->info, .code = 0 };
|
||||
streamProcessRetrieveReq(pTask, &req, &rsp);
|
||||
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
|
||||
tDeleteStreamRetrieveReq(&req);
|
||||
|
@ -1534,10 +1226,7 @@ int32_t vnodeEnqueueStreamMsg(SVnode* pVnode, SRpcMsg* pMsg) {
|
|||
|
||||
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, taskId);
|
||||
if (pTask) {
|
||||
SRpcMsg rsp = {
|
||||
.info = pMsg->info,
|
||||
.code = 0,
|
||||
};
|
||||
SRpcMsg rsp = { .info = pMsg->info, .code = 0 };
|
||||
streamProcessDispatchReq(pTask, &req, &rsp, false);
|
||||
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
|
||||
rpcFreeCont(pMsg->pCont);
|
||||
|
@ -1554,10 +1243,7 @@ FAIL:
|
|||
|
||||
SMsgHead* pRspHead = rpcMallocCont(sizeof(SMsgHead) + sizeof(SStreamDispatchRsp));
|
||||
if (pRspHead == NULL) {
|
||||
SRpcMsg rsp = {
|
||||
.code = TSDB_CODE_OUT_OF_MEMORY,
|
||||
.info = pMsg->info,
|
||||
};
|
||||
SRpcMsg rsp = { .code = TSDB_CODE_OUT_OF_MEMORY, .info = pMsg->info };
|
||||
tqDebug("send dispatch error rsp, code: %x", code);
|
||||
tmsgSendRsp(&rsp);
|
||||
rpcFreeCont(pMsg->pCont);
|
||||
|
@ -1575,11 +1261,7 @@ FAIL:
|
|||
pRsp->inputStatus = TASK_OUTPUT_STATUS__NORMAL;
|
||||
|
||||
SRpcMsg rsp = {
|
||||
.code = code,
|
||||
.info = pMsg->info,
|
||||
.contLen = sizeof(SMsgHead) + sizeof(SStreamDispatchRsp),
|
||||
.pCont = pRspHead,
|
||||
};
|
||||
.code = code, .info = pMsg->info, .contLen = sizeof(SMsgHead) + sizeof(SStreamDispatchRsp), .pCont = pRspHead};
|
||||
tqDebug("send dispatch error rsp, code: %x", code);
|
||||
tmsgSendRsp(&rsp);
|
||||
rpcFreeCont(pMsg->pCont);
|
||||
|
@ -1588,3 +1270,40 @@ FAIL:
|
|||
}
|
||||
|
||||
int32_t tqCheckLogInWal(STQ* pTq, int64_t sversion) { return sversion <= pTq->walLogLastVer; }
|
||||
|
||||
int32_t tqStartStreamTasks(STQ* pTq) {
|
||||
int32_t vgId = TD_VID(pTq->pVnode);
|
||||
|
||||
SStreamMeta* pMeta = pTq->pStreamMeta;
|
||||
taosWLockLatch(&pMeta->lock);
|
||||
pMeta->walScan += 1;
|
||||
|
||||
if (pMeta->walScan > 1) {
|
||||
tqDebug("vgId:%d wal read task has been launched, remain scan times:%d", vgId, pMeta->walScan);
|
||||
taosWUnLockLatch(&pTq->pStreamMeta->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SStreamTaskRunReq* pRunReq = rpcMallocCont(sizeof(SStreamTaskRunReq));
|
||||
if (pRunReq == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
tqError("vgId:%d failed restore stream tasks, code:%s", vgId, terrstr(terrno));
|
||||
taosWUnLockLatch(&pTq->pStreamMeta->lock);
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t numOfTasks = taosHashGetSize(pTq->pStreamMeta->pTasks);
|
||||
|
||||
tqInfo("vgId:%d start wal scan stream tasks, tasks:%d", vgId, numOfTasks);
|
||||
initOffsetForAllRestoreTasks(pTq);
|
||||
|
||||
pRunReq->head.vgId = vgId;
|
||||
pRunReq->streamId = 0;
|
||||
pRunReq->taskId = WAL_READ_TASKS_ID;
|
||||
|
||||
SRpcMsg msg = {.msgType = TDMT_STREAM_TASK_RUN, .pCont = pRunReq, .contLen = sizeof(SStreamTaskRunReq)};
|
||||
tmsgPutToQueue(&pTq->pVnode->msgCb, STREAM_QUEUE, &msg);
|
||||
taosWUnLockLatch(&pTq->pStreamMeta->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -16,10 +16,13 @@
|
|||
#include "tq.h"
|
||||
|
||||
int tqCommit(STQ* pTq) {
|
||||
#if 0
|
||||
// stream meta commit does not be aligned to the vnode commit
|
||||
if (streamMetaCommit(pTq->pStreamMeta) < 0) {
|
||||
tqError("vgId:%d, failed to commit stream meta since %s", TD_VID(pTq->pVnode), terrstr());
|
||||
return -1;
|
||||
}
|
||||
#endif
|
||||
|
||||
return tqOffsetCommitFile(pTq->pOffsetStore);
|
||||
}
|
||||
|
|
|
@ -320,15 +320,15 @@ int32_t tqMetaRestoreHandle(STQ* pTq) {
|
|||
code = -1;
|
||||
goto end;
|
||||
}
|
||||
handle.execHandle.pExecReader = qExtractReaderFromStreamScanner(scanner);
|
||||
if (handle.execHandle.pExecReader == NULL) {
|
||||
handle.execHandle.pTqReader = qExtractReaderFromStreamScanner(scanner);
|
||||
if (handle.execHandle.pTqReader == NULL) {
|
||||
tqError("cannot extract exec reader for %s", handle.subKey);
|
||||
code = -1;
|
||||
goto end;
|
||||
}
|
||||
} else if (handle.execHandle.subType == TOPIC_SUB_TYPE__DB) {
|
||||
handle.pWalReader = walOpenReader(pTq->pVnode->pWal, NULL);
|
||||
handle.execHandle.pExecReader = tqOpenReader(pTq->pVnode);
|
||||
handle.execHandle.pTqReader = tqOpenReader(pTq->pVnode);
|
||||
|
||||
buildSnapContext(reader.meta, reader.version, 0, handle.execHandle.subType, handle.fetchMeta,
|
||||
(SSnapContext**)(&reader.sContext));
|
||||
|
@ -343,8 +343,8 @@ int32_t tqMetaRestoreHandle(STQ* pTq) {
|
|||
int64_t tbUid = *(int64_t*)taosArrayGet(tbUidList, i);
|
||||
tqDebug("vgId:%d, idx %d, uid:%" PRId64, vgId, i, tbUid);
|
||||
}
|
||||
handle.execHandle.pExecReader = tqOpenReader(pTq->pVnode);
|
||||
tqReaderSetTbUidList(handle.execHandle.pExecReader, tbUidList);
|
||||
handle.execHandle.pTqReader = tqOpenReader(pTq->pVnode);
|
||||
tqReaderSetTbUidList(handle.execHandle.pTqReader, tbUidList);
|
||||
taosArrayDestroy(tbUidList);
|
||||
|
||||
buildSnapContext(reader.meta, reader.version, handle.execHandle.execTb.suid, handle.execHandle.subType,
|
||||
|
|
|
@ -128,31 +128,35 @@ int32_t tqOffsetDelete(STqOffsetStore* pStore, const char* subscribeKey) {
|
|||
}
|
||||
|
||||
int32_t tqOffsetCommitFile(STqOffsetStore* pStore) {
|
||||
if (!pStore->needCommit) return 0;
|
||||
if (!pStore->needCommit) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
// TODO file name should be with a newer version
|
||||
char* fname = tqOffsetBuildFName(pStore->pTq->path, 0);
|
||||
TdFilePtr pFile = taosOpenFile(fname, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_APPEND);
|
||||
if (pFile == NULL) {
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
|
||||
int32_t err = terrno;
|
||||
const char* errStr = tstrerror(err);
|
||||
int32_t sysErr = errno;
|
||||
const char* sysErrStr = strerror(errno);
|
||||
tqError("vgId:%d, cannot open file %s when commit offset since %s", pStore->pTq->pVnode->config.vgId, fname,
|
||||
sysErrStr);
|
||||
const char* err = strerror(errno);
|
||||
tqError("vgId:%d, failed to open offset file %s, since %s", TD_VID(pStore->pTq->pVnode), fname, err);
|
||||
taosMemoryFree(fname);
|
||||
return -1;
|
||||
}
|
||||
|
||||
taosMemoryFree(fname);
|
||||
|
||||
void* pIter = NULL;
|
||||
while (1) {
|
||||
pIter = taosHashIterate(pStore->pHash, pIter);
|
||||
if (pIter == NULL) break;
|
||||
if (pIter == NULL) {
|
||||
break;
|
||||
}
|
||||
|
||||
STqOffset* pOffset = (STqOffset*)pIter;
|
||||
int32_t bodyLen;
|
||||
int32_t code;
|
||||
tEncodeSize(tEncodeSTqOffset, pOffset, bodyLen, code);
|
||||
|
||||
if (code < 0) {
|
||||
taosHashCancelIterate(pStore->pHash, pIter);
|
||||
return -1;
|
||||
|
@ -166,6 +170,7 @@ int32_t tqOffsetCommitFile(STqOffsetStore* pStore) {
|
|||
SEncoder encoder;
|
||||
tEncoderInit(&encoder, abuf, bodyLen);
|
||||
tEncodeSTqOffset(&encoder, pOffset);
|
||||
|
||||
// write file
|
||||
int64_t writeLen;
|
||||
if ((writeLen = taosWriteFile(pFile, buf, totLen)) != totLen) {
|
||||
|
@ -174,8 +179,10 @@ int32_t tqOffsetCommitFile(STqOffsetStore* pStore) {
|
|||
taosMemoryFree(buf);
|
||||
return -1;
|
||||
}
|
||||
|
||||
taosMemoryFree(buf);
|
||||
}
|
||||
|
||||
// close and rename file
|
||||
taosCloseFile(&pFile);
|
||||
pStore->needCommit = 0;
|
||||
|
|
|
@ -323,15 +323,22 @@ int32_t tqPushMsg(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_t v
|
|||
taosWUnLockLatch(&pTq->lock);
|
||||
}
|
||||
|
||||
// push data for stream processing
|
||||
if (!tsDisableStream && vnodeIsRoleLeader(pTq->pVnode)) {
|
||||
tqDebug("handle submit, restore:%d, size:%d", pTq->pVnode->restored, (int)taosHashGetSize(pTq->pStreamMeta->pTasks));
|
||||
|
||||
// push data for stream processing:
|
||||
// 1. the vnode has already been restored.
|
||||
// 2. the vnode should be the leader.
|
||||
// 3. the stream is not suspended yet.
|
||||
if (!tsDisableStream && vnodeIsRoleLeader(pTq->pVnode) && pTq->pVnode->restored) {
|
||||
if (taosHashGetSize(pTq->pStreamMeta->pTasks) == 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (msgType == TDMT_VND_SUBMIT) {
|
||||
#if 0
|
||||
void* data = taosMemoryMalloc(len);
|
||||
if (data == NULL) {
|
||||
// todo: for all stream in this vnode, keep this offset in the offset files, and wait for a moment, and then retry
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
tqError("vgId:%d, failed to copy submit data for stream processing, since out of memory", vgId);
|
||||
return -1;
|
||||
|
@ -340,7 +347,10 @@ int32_t tqPushMsg(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_t v
|
|||
memcpy(data, pReq, len);
|
||||
SPackedData submit = {.msgStr = data, .msgLen = len, .ver = ver};
|
||||
|
||||
tqDebug("tq copy write msg %p %d %" PRId64 " from %p", data, len, ver, pReq);
|
||||
tqDebug("vgId:%d tq copy submit msg:%p len:%d ver:%" PRId64 " from %p for stream", vgId, data, len, ver, pReq);
|
||||
tqProcessSubmitReq(pTq, submit);
|
||||
#endif
|
||||
SPackedData submit = {0};
|
||||
tqProcessSubmitReq(pTq, submit);
|
||||
}
|
||||
|
||||
|
@ -352,7 +362,7 @@ int32_t tqPushMsg(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_t v
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t tqRegisterPushEntry(STQ* pTq, void* pHandle, const SMqPollReq* pRequest, SRpcMsg* pRpcMsg, SMqDataRsp* pDataRsp,
|
||||
int32_t tqRegisterPushHandle(STQ* pTq, void* pHandle, const SMqPollReq* pRequest, SRpcMsg* pRpcMsg, SMqDataRsp* pDataRsp,
|
||||
int32_t type) {
|
||||
uint64_t consumerId = pRequest->consumerId;
|
||||
int32_t vgId = TD_VID(pTq->pVnode);
|
||||
|
@ -389,7 +399,7 @@ int32_t tqRegisterPushEntry(STQ* pTq, void* pHandle, const SMqPollReq* pRequest,
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t tqUnregisterPushEntry(STQ* pTq, const char* pKey, int32_t keyLen, uint64_t consumerId, bool rspConsumer) {
|
||||
int32_t tqUnregisterPushHandle(STQ* pTq, const char* pKey, int32_t keyLen, uint64_t consumerId, bool rspConsumer) {
|
||||
int32_t vgId = TD_VID(pTq->pVnode);
|
||||
STqPushEntry** pEntry = taosHashGet(pTq->pPushMgr, pKey, keyLen);
|
||||
|
||||
|
|
|
@ -113,7 +113,7 @@ bool isValValidForTable(STqHandle* pHandle, SWalCont* pHead) {
|
|||
}
|
||||
|
||||
SMetaReader mr = {0};
|
||||
metaReaderInit(&mr, pHandle->execHandle.pExecReader->pVnodeMeta, 0);
|
||||
metaReaderInit(&mr, pHandle->execHandle.pTqReader->pVnodeMeta, 0);
|
||||
|
||||
if (metaGetTableEntryByName(&mr, req.tbName) < 0) {
|
||||
metaReaderClear(&mr);
|
||||
|
@ -262,8 +262,6 @@ STqReader* tqOpenReader(SVnode* pVnode) {
|
|||
}
|
||||
|
||||
pReader->pVnodeMeta = pVnode->pMeta;
|
||||
/*pReader->pMsg = NULL;*/
|
||||
// pReader->ver = -1;
|
||||
pReader->pColIdList = NULL;
|
||||
pReader->cachedSchemaVer = 0;
|
||||
pReader->cachedSchemaSuid = 0;
|
||||
|
@ -298,7 +296,29 @@ int32_t tqSeekVer(STqReader* pReader, int64_t ver, const char* id) {
|
|||
if (walReadSeekVer(pReader->pWalReader, ver) < 0) {
|
||||
return -1;
|
||||
}
|
||||
tqDebug("tmq poll: wal reader seek to ver success ver:%"PRId64" %s", ver, id);
|
||||
tqDebug("wal reader seek to ver:%"PRId64" %s", ver, id);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t extractSubmitMsgFromWal(SWalReader* pReader, SPackedData* pPackedData) {
|
||||
if (walNextValidMsg(pReader) < 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
void* pBody = POINTER_SHIFT(pReader->pHead->head.body, sizeof(SSubmitReq2Msg));
|
||||
int32_t len = pReader->pHead->head.bodyLen - sizeof(SSubmitReq2Msg);
|
||||
int64_t ver = pReader->pHead->head.version;
|
||||
|
||||
void* data = taosMemoryMalloc(len);
|
||||
if (data == NULL) {
|
||||
// todo: for all stream in this vnode, keep this offset in the offset files, and wait for a moment, and then retry
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
tqError("vgId:%d, failed to copy submit data for stream processing, since out of memory", 0);
|
||||
return -1;
|
||||
}
|
||||
|
||||
memcpy(data, pBody, len);
|
||||
*pPackedData = (SPackedData){.ver = ver, .msgLen = len, .msgStr = data};
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -309,26 +329,28 @@ void tqNextBlock(STqReader* pReader, SFetchRet* ret) {
|
|||
ret->fetchType = FETCH_TYPE__NONE;
|
||||
return;
|
||||
}
|
||||
void* body = POINTER_SHIFT(pReader->pWalReader->pHead->head.body, sizeof(SSubmitReq2Msg));
|
||||
|
||||
void* pBody = POINTER_SHIFT(pReader->pWalReader->pHead->head.body, sizeof(SSubmitReq2Msg));
|
||||
int32_t bodyLen = pReader->pWalReader->pHead->head.bodyLen - sizeof(SSubmitReq2Msg);
|
||||
int64_t ver = pReader->pWalReader->pHead->head.version;
|
||||
|
||||
tqReaderSetSubmitReq2(pReader, body, bodyLen, ver);
|
||||
tqReaderSetSubmitMsg(pReader, pBody, bodyLen, ver);
|
||||
}
|
||||
|
||||
while (tqNextDataBlock2(pReader)) {
|
||||
while (tqNextDataBlock(pReader)) {
|
||||
memset(&ret->data, 0, sizeof(SSDataBlock));
|
||||
int32_t code = tqRetrieveDataBlock2(&ret->data, pReader, NULL);
|
||||
if (code != 0 || ret->data.info.rows == 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
ret->fetchType = FETCH_TYPE__DATA;
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int32_t tqReaderSetSubmitReq2(STqReader* pReader, void* msgStr, int32_t msgLen, int64_t ver) {
|
||||
int32_t tqReaderSetSubmitMsg(STqReader* pReader, void* msgStr, int32_t msgLen, int64_t ver) {
|
||||
pReader->msg2.msgStr = msgStr;
|
||||
pReader->msg2.msgLen = msgLen;
|
||||
pReader->msg2.ver = ver;
|
||||
|
@ -345,7 +367,7 @@ int32_t tqReaderSetSubmitReq2(STqReader* pReader, void* msgStr, int32_t msgLen,
|
|||
return 0;
|
||||
}
|
||||
|
||||
bool tqNextDataBlock2(STqReader* pReader) {
|
||||
bool tqNextDataBlock(STqReader* pReader) {
|
||||
if (pReader->msg2.msgStr == NULL) {
|
||||
return false;
|
||||
}
|
||||
|
@ -354,13 +376,20 @@ bool tqNextDataBlock2(STqReader* pReader) {
|
|||
while (pReader->nextBlk < blockSz) {
|
||||
tqDebug("tq reader next data block %p, %d %" PRId64 " %d", pReader->msg2.msgStr, pReader->msg2.msgLen,
|
||||
pReader->msg2.ver, pReader->nextBlk);
|
||||
|
||||
SSubmitTbData* pSubmitTbData = taosArrayGet(pReader->submit.aSubmitTbData, pReader->nextBlk);
|
||||
if (pReader->tbIdHash == NULL) return true;
|
||||
if (pReader->tbIdHash == NULL) {
|
||||
return true;
|
||||
}
|
||||
|
||||
void* ret = taosHashGet(pReader->tbIdHash, &pSubmitTbData->uid, sizeof(int64_t));
|
||||
if (ret != NULL) {
|
||||
tqDebug("tq reader block found, ver:%"PRId64", uid:%"PRId64, pReader->msg2.ver, pSubmitTbData->uid);
|
||||
return true;
|
||||
} else {
|
||||
tqDebug("tq reader discard block, uid:%"PRId64", continue", pSubmitTbData->uid);
|
||||
}
|
||||
|
||||
pReader->nextBlk++;
|
||||
}
|
||||
|
||||
|
@ -427,7 +456,10 @@ int32_t tqRetrieveDataBlock2(SSDataBlock* pBlock, STqReader* pReader, SSubmitTbD
|
|||
SSubmitTbData* pSubmitTbData = taosArrayGet(pReader->submit.aSubmitTbData, pReader->nextBlk);
|
||||
pReader->nextBlk++;
|
||||
|
||||
if (pSubmitTbDataRet) *pSubmitTbDataRet = pSubmitTbData;
|
||||
if (pSubmitTbDataRet) {
|
||||
*pSubmitTbDataRet = pSubmitTbData;
|
||||
}
|
||||
|
||||
int32_t sversion = pSubmitTbData->sver;
|
||||
int64_t suid = pSubmitTbData->suid;
|
||||
int64_t uid = pSubmitTbData->uid;
|
||||
|
@ -900,7 +932,7 @@ int tqReaderSetTbUidList(STqReader* pReader, const SArray* tbUidList) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
int tqReaderAddTbUidList(STqReader* pReader, const SArray* tbUidList) {
|
||||
int tqReaderAddTbUidList(STqReader* pReader, const SArray* pTableUidList) {
|
||||
if (pReader->tbIdHash == NULL) {
|
||||
pReader->tbIdHash = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_ENTRY_LOCK);
|
||||
if (pReader->tbIdHash == NULL) {
|
||||
|
@ -909,8 +941,9 @@ int tqReaderAddTbUidList(STqReader* pReader, const SArray* tbUidList) {
|
|||
}
|
||||
}
|
||||
|
||||
for (int i = 0; i < taosArrayGetSize(tbUidList); i++) {
|
||||
int64_t* pKey = (int64_t*)taosArrayGet(tbUidList, i);
|
||||
int32_t numOfTables = taosArrayGetSize(pTableUidList);
|
||||
for (int i = 0; i < numOfTables; i++) {
|
||||
int64_t* pKey = (int64_t*)taosArrayGet(pTableUidList, i);
|
||||
taosHashPut(pReader->tbIdHash, pKey, sizeof(int64_t), NULL, 0);
|
||||
}
|
||||
|
||||
|
@ -926,30 +959,34 @@ int tqReaderRemoveTbUidList(STqReader* pReader, const SArray* tbUidList) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
// todo update the table list in wal reader
|
||||
int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd) {
|
||||
void* pIter = NULL;
|
||||
void* pIter = NULL;
|
||||
int32_t vgId = TD_VID(pTq->pVnode);
|
||||
|
||||
// update the table list for each consumer handle
|
||||
while (1) {
|
||||
pIter = taosHashIterate(pTq->pHandle, pIter);
|
||||
if (pIter == NULL) {
|
||||
break;
|
||||
}
|
||||
|
||||
STqHandle* pExec = (STqHandle*)pIter;
|
||||
if (pExec->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
|
||||
int32_t code = qUpdateQualifiedTableId(pExec->execHandle.task, tbUidList, isAdd);
|
||||
STqHandle* pTqHandle = (STqHandle*)pIter;
|
||||
if (pTqHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
|
||||
int32_t code = qUpdateTableListForStreamScanner(pTqHandle->execHandle.task, tbUidList, isAdd);
|
||||
if (code != 0) {
|
||||
tqError("update qualified table error for %s", pExec->subKey);
|
||||
tqError("update qualified table error for %s", pTqHandle->subKey);
|
||||
continue;
|
||||
}
|
||||
} else if (pExec->execHandle.subType == TOPIC_SUB_TYPE__DB) {
|
||||
} else if (pTqHandle->execHandle.subType == TOPIC_SUB_TYPE__DB) {
|
||||
if (!isAdd) {
|
||||
int32_t sz = taosArrayGetSize(tbUidList);
|
||||
for (int32_t i = 0; i < sz; i++) {
|
||||
int64_t tbUid = *(int64_t*)taosArrayGet(tbUidList, i);
|
||||
taosHashPut(pExec->execHandle.execDb.pFilterOutTbUid, &tbUid, sizeof(int64_t), NULL, 0);
|
||||
taosHashPut(pTqHandle->execHandle.execDb.pFilterOutTbUid, &tbUid, sizeof(int64_t), NULL, 0);
|
||||
}
|
||||
}
|
||||
} else if (pExec->execHandle.subType == TOPIC_SUB_TYPE__TABLE) {
|
||||
} else if (pTqHandle->execHandle.subType == TOPIC_SUB_TYPE__TABLE) {
|
||||
if (isAdd) {
|
||||
SArray* qa = taosArrayInit(4, sizeof(tb_uid_t));
|
||||
SMetaReader mr = {0};
|
||||
|
@ -964,35 +1001,43 @@ int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd) {
|
|||
}
|
||||
|
||||
tDecoderClear(&mr.coder);
|
||||
|
||||
if (mr.me.type != TSDB_CHILD_TABLE || mr.me.ctbEntry.suid != pExec->execHandle.execTb.suid) {
|
||||
if (mr.me.type != TSDB_CHILD_TABLE || mr.me.ctbEntry.suid != pTqHandle->execHandle.execTb.suid) {
|
||||
tqDebug("table uid %" PRId64 " does not add to tq handle", *id);
|
||||
continue;
|
||||
}
|
||||
|
||||
tqDebug("table uid %" PRId64 " add to tq handle", *id);
|
||||
taosArrayPush(qa, id);
|
||||
}
|
||||
|
||||
metaReaderClear(&mr);
|
||||
if (taosArrayGetSize(qa) > 0) {
|
||||
tqReaderAddTbUidList(pExec->execHandle.pExecReader, qa);
|
||||
tqReaderAddTbUidList(pTqHandle->execHandle.pTqReader, qa);
|
||||
}
|
||||
|
||||
taosArrayDestroy(qa);
|
||||
} else {
|
||||
tqReaderRemoveTbUidList(pExec->execHandle.pExecReader, tbUidList);
|
||||
tqReaderRemoveTbUidList(pTqHandle->execHandle.pTqReader, tbUidList);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// update the table list handle for each stream scanner/wal reader
|
||||
while (1) {
|
||||
pIter = taosHashIterate(pTq->pStreamMeta->pTasks, pIter);
|
||||
if (pIter == NULL) break;
|
||||
if (pIter == NULL) {
|
||||
break;
|
||||
}
|
||||
|
||||
SStreamTask* pTask = *(SStreamTask**)pIter;
|
||||
if (pTask->taskLevel == TASK_LEVEL__SOURCE) {
|
||||
int32_t code = qUpdateQualifiedTableId(pTask->exec.executor, tbUidList, isAdd);
|
||||
int32_t code = qUpdateTableListForStreamScanner(pTask->exec.pExecutor, tbUidList, isAdd);
|
||||
if (code != 0) {
|
||||
tqError("update qualified table error for stream task %d", pTask->taskId);
|
||||
tqError("vgId:%d, s-task:%s update qualified table error for stream task", vgId, pTask->id.idStr);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -0,0 +1,167 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "tq.h"
|
||||
|
||||
static int32_t streamTaskReplayWal(SStreamMeta* pStreamMeta, STqOffsetStore* pOffsetStore, bool* pScanIdle);
|
||||
static int32_t transferToNormalTask(SStreamMeta* pStreamMeta, SArray* pTaskList);
|
||||
|
||||
// this function should be executed by stream threads.
|
||||
// there is a case that the WAL increases more fast than the restore procedure, and this restore procedure
|
||||
// will not stop eventually.
|
||||
int tqStreamTasksScanWal(STQ* pTq) {
|
||||
int32_t vgId = TD_VID(pTq->pVnode);
|
||||
SStreamMeta* pMeta = pTq->pStreamMeta;
|
||||
int64_t st = taosGetTimestampMs();
|
||||
|
||||
while (1) {
|
||||
tqInfo("vgId:%d continue check if data in wal are available", vgId);
|
||||
|
||||
// check all restore tasks
|
||||
bool allFull = true;
|
||||
streamTaskReplayWal(pTq->pStreamMeta, pTq->pOffsetStore, &allFull);
|
||||
|
||||
int32_t times = 0;
|
||||
|
||||
if (allFull) {
|
||||
taosWLockLatch(&pMeta->lock);
|
||||
pMeta->walScan -= 1;
|
||||
times = pMeta->walScan;
|
||||
|
||||
if (pMeta->walScan <= 0) {
|
||||
taosWUnLockLatch(&pMeta->lock);
|
||||
break;
|
||||
}
|
||||
|
||||
taosWUnLockLatch(&pMeta->lock);
|
||||
tqInfo("vgId:%d scan wal for stream tasks for %d times", vgId, times);
|
||||
}
|
||||
}
|
||||
|
||||
double el = (taosGetTimestampMs() - st) / 1000.0;
|
||||
tqInfo("vgId:%d scan wal for stream tasks completed, elapsed time:%.2f sec", vgId, el);
|
||||
|
||||
// restore wal scan flag
|
||||
// atomic_store_8(&pTq->pStreamMeta->walScan, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
//int32_t transferToNormalTask(SStreamMeta* pStreamMeta, SArray* pTaskList) {
|
||||
// int32_t numOfTask = taosArrayGetSize(pTaskList);
|
||||
// if (numOfTask <= 0) {
|
||||
// return TSDB_CODE_SUCCESS;
|
||||
// }
|
||||
//
|
||||
// // todo: add lock
|
||||
// for (int32_t i = 0; i < numOfTask; ++i) {
|
||||
// SStreamTask* pTask = taosArrayGetP(pTaskList, i);
|
||||
// tqDebug("vgId:%d transfer s-task:%s state restore -> ready, checkpoint:%" PRId64 " checkpoint id:%" PRId64,
|
||||
// pStreamMeta->vgId, pTask->id.idStr, pTask->chkInfo.version, pTask->chkInfo.id);
|
||||
// taosHashRemove(pStreamMeta->pWalReadTasks, &pTask->id.taskId, sizeof(pTask->id.taskId));
|
||||
//
|
||||
// // NOTE: do not change the following order
|
||||
// atomic_store_8(&pTask->status.taskStatus, TASK_STATUS__NORMAL);
|
||||
// taosHashPut(pStreamMeta->pTasks, &pTask->id.taskId, sizeof(pTask->id.taskId), &pTask, POINTER_BYTES);
|
||||
// }
|
||||
//
|
||||
// return TSDB_CODE_SUCCESS;
|
||||
//}
|
||||
|
||||
int32_t streamTaskReplayWal(SStreamMeta* pStreamMeta, STqOffsetStore* pOffsetStore, bool* pScanIdle) {
|
||||
void* pIter = NULL;
|
||||
int32_t vgId = pStreamMeta->vgId;
|
||||
|
||||
*pScanIdle = true;
|
||||
|
||||
bool allWalChecked = true;
|
||||
tqDebug("vgId:%d start to check wal to extract new submit block", vgId);
|
||||
|
||||
while (1) {
|
||||
pIter = taosHashIterate(pStreamMeta->pTasks, pIter);
|
||||
if (pIter == NULL) {
|
||||
break;
|
||||
}
|
||||
|
||||
SStreamTask* pTask = *(SStreamTask**)pIter;
|
||||
if (pTask->taskLevel != TASK_LEVEL__SOURCE) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (pTask->status.taskStatus == TASK_STATUS__RECOVER_PREPARE ||
|
||||
pTask->status.taskStatus == TASK_STATUS__WAIT_DOWNSTREAM) {
|
||||
tqDebug("s-task:%s skip push data, not ready for processing, status %d", pTask->id.idStr,
|
||||
pTask->status.taskStatus);
|
||||
continue;
|
||||
}
|
||||
|
||||
// check if offset value exists
|
||||
char key[128] = {0};
|
||||
createStreamTaskOffsetKey(key, pTask->id.streamId, pTask->id.taskId);
|
||||
|
||||
if (tInputQueueIsFull(pTask)) {
|
||||
tqDebug("vgId:%d s-task:%s input queue is full, do nothing", vgId, pTask->id.idStr);
|
||||
continue;
|
||||
}
|
||||
|
||||
*pScanIdle = false;
|
||||
|
||||
// check if offset value exists
|
||||
STqOffset* pOffset = tqOffsetRead(pOffsetStore, key);
|
||||
ASSERT(pOffset != NULL);
|
||||
|
||||
// seek the stored version and extract data from WAL
|
||||
int32_t code = walReadSeekVer(pTask->exec.pWalReader, pOffset->val.version);
|
||||
if (code != TSDB_CODE_SUCCESS) { // no data in wal, quit
|
||||
continue;
|
||||
}
|
||||
|
||||
// append the data for the stream
|
||||
tqDebug("vgId:%d wal reader seek to ver:%" PRId64 " %s", vgId, pOffset->val.version, pTask->id.idStr);
|
||||
|
||||
SPackedData packData = {0};
|
||||
code = extractSubmitMsgFromWal(pTask->exec.pWalReader, &packData);
|
||||
if (code != TSDB_CODE_SUCCESS) { // failed, continue
|
||||
continue;
|
||||
}
|
||||
|
||||
SStreamDataSubmit2* p = streamDataSubmitNew(packData, STREAM_INPUT__DATA_SUBMIT);
|
||||
if (p == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
tqError("%s failed to create data submit for stream since out of memory", pTask->id.idStr);
|
||||
continue;
|
||||
}
|
||||
|
||||
allWalChecked = false;
|
||||
|
||||
tqDebug("s-task:%s submit data extracted from WAL", pTask->id.idStr);
|
||||
code = tqAddInputBlockNLaunchTask(pTask, (SStreamQueueItem*)p, packData.ver);
|
||||
if (code == TSDB_CODE_SUCCESS) {
|
||||
pOffset->val.version = walReaderGetCurrentVer(pTask->exec.pWalReader);
|
||||
tqDebug("s-task:%s set the ver:%" PRId64 " from WALReader after extract block from WAL", pTask->id.idStr,
|
||||
pOffset->val.version);
|
||||
} else {
|
||||
// do nothing
|
||||
}
|
||||
|
||||
streamDataSubmitDestroy(p);
|
||||
taosFreeQitem(p);
|
||||
}
|
||||
|
||||
if (allWalChecked) {
|
||||
*pScanIdle = true;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -38,7 +38,7 @@ int32_t tqAddBlockDataToRsp(const SSDataBlock* pBlock, SMqDataRsp* pRsp, int32_t
|
|||
}
|
||||
|
||||
static int32_t tqAddBlockSchemaToRsp(const STqExecHandle* pExec, STaosxRsp* pRsp) {
|
||||
SSchemaWrapper* pSW = tCloneSSchemaWrapper(pExec->pExecReader->pSchemaWrapper);
|
||||
SSchemaWrapper* pSW = tCloneSSchemaWrapper(pExec->pTqReader->pSchemaWrapper);
|
||||
if (pSW == NULL) {
|
||||
return -1;
|
||||
}
|
||||
|
@ -137,7 +137,7 @@ int32_t tqScanTaosx(STQ* pTq, const STqHandle* pHandle, STaosxRsp* pRsp, SMqMeta
|
|||
if (pDataBlock != NULL && pDataBlock->info.rows > 0) {
|
||||
if (pRsp->withTbName) {
|
||||
if (pOffset->type == TMQ_OFFSET__LOG) {
|
||||
int64_t uid = pExec->pExecReader->lastBlkUid;
|
||||
int64_t uid = pExec->pTqReader->lastBlkUid;
|
||||
if (tqAddTbNameToRsp(pTq, uid, pRsp, 1) < 0) {
|
||||
continue;
|
||||
}
|
||||
|
@ -203,9 +203,9 @@ int32_t tqTaosxScanLog(STQ* pTq, STqHandle* pHandle, SPackedData submit, STaosxR
|
|||
SArray* pSchemas = taosArrayInit(0, sizeof(void*));
|
||||
|
||||
if (pExec->subType == TOPIC_SUB_TYPE__TABLE) {
|
||||
STqReader* pReader = pExec->pExecReader;
|
||||
tqReaderSetSubmitReq2(pReader, submit.msgStr, submit.msgLen, submit.ver);
|
||||
while (tqNextDataBlock2(pReader)) {
|
||||
STqReader* pReader = pExec->pTqReader;
|
||||
tqReaderSetSubmitMsg(pReader, submit.msgStr, submit.msgLen, submit.ver);
|
||||
while (tqNextDataBlock(pReader)) {
|
||||
taosArrayClear(pBlocks);
|
||||
taosArrayClear(pSchemas);
|
||||
SSubmitTbData* pSubmitTbDataRet = NULL;
|
||||
|
@ -213,7 +213,7 @@ int32_t tqTaosxScanLog(STQ* pTq, STqHandle* pHandle, SPackedData submit, STaosxR
|
|||
if (terrno == TSDB_CODE_TQ_TABLE_SCHEMA_NOT_FOUND) continue;
|
||||
}
|
||||
if (pRsp->withTbName) {
|
||||
int64_t uid = pExec->pExecReader->lastBlkUid;
|
||||
int64_t uid = pExec->pTqReader->lastBlkUid;
|
||||
if (tqAddTbNameToRsp(pTq, uid, pRsp, taosArrayGetSize(pBlocks)) < 0) {
|
||||
taosArrayDestroyEx(pBlocks, (FDelete)blockDataFreeRes);
|
||||
taosArrayDestroyP(pSchemas, (FDelete)tDeleteSSchemaWrapper);
|
||||
|
@ -262,8 +262,8 @@ int32_t tqTaosxScanLog(STQ* pTq, STqHandle* pHandle, SPackedData submit, STaosxR
|
|||
}
|
||||
}
|
||||
} else if (pExec->subType == TOPIC_SUB_TYPE__DB) {
|
||||
STqReader* pReader = pExec->pExecReader;
|
||||
tqReaderSetSubmitReq2(pReader, submit.msgStr, submit.msgLen, submit.ver);
|
||||
STqReader* pReader = pExec->pTqReader;
|
||||
tqReaderSetSubmitMsg(pReader, submit.msgStr, submit.msgLen, submit.ver);
|
||||
while (tqNextDataBlockFilterOut2(pReader, pExec->execDb.pFilterOutTbUid)) {
|
||||
taosArrayClear(pBlocks);
|
||||
taosArrayClear(pSchemas);
|
||||
|
@ -272,7 +272,7 @@ int32_t tqTaosxScanLog(STQ* pTq, STqHandle* pHandle, SPackedData submit, STaosxR
|
|||
if (terrno == TSDB_CODE_TQ_TABLE_SCHEMA_NOT_FOUND) continue;
|
||||
}
|
||||
if (pRsp->withTbName) {
|
||||
int64_t uid = pExec->pExecReader->lastBlkUid;
|
||||
int64_t uid = pExec->pTqReader->lastBlkUid;
|
||||
if (tqAddTbNameToRsp(pTq, uid, pRsp, taosArrayGetSize(pBlocks)) < 0) {
|
||||
taosArrayDestroyEx(pBlocks, (FDelete)blockDataFreeRes);
|
||||
taosArrayDestroyP(pSchemas, (FDelete)tDeleteSSchemaWrapper);
|
||||
|
|
|
@ -87,7 +87,7 @@ void tqSinkToTablePipeline(SStreamTask* pTask, void* vnode, int64_t ver, void* d
|
|||
return;
|
||||
}
|
||||
|
||||
tqDebug("vgId:%d, task %d write into table, block num: %d", TD_VID(pVnode), pTask->taskId, blockSz);
|
||||
tqDebug("vgId:%d, s-task:%s write into table, block num: %d", TD_VID(pVnode), pTask->id.idStr, blockSz);
|
||||
for (int32_t i = 0; i < blockSz; i++) {
|
||||
bool createTb = true;
|
||||
SSDataBlock* pDataBlock = taosArrayGet(pBlocks, i);
|
||||
|
@ -382,7 +382,7 @@ void tqSinkToTablePipeline2(SStreamTask* pTask, void* vnode, int64_t ver, void*
|
|||
|
||||
int32_t blockSz = taosArrayGetSize(pBlocks);
|
||||
|
||||
tqDebug("vgId:%d, task %d write into table, block num: %d", TD_VID(pVnode), pTask->taskId, blockSz);
|
||||
tqDebug("vgId:%d, s-task:%s write results blocks:%d into table", TD_VID(pVnode), pTask->id.idStr, blockSz);
|
||||
|
||||
void* pBuf = NULL;
|
||||
SArray* tagArray = NULL;
|
||||
|
@ -475,11 +475,9 @@ void tqSinkToTablePipeline2(SStreamTask* pTask, void* vnode, int64_t ver, void*
|
|||
}
|
||||
for (int32_t tagId = UD_TAG_COLUMN_INDEX, step = 1; tagId < size; tagId++, step++) {
|
||||
SColumnInfoData* pTagData = taosArrayGet(pDataBlock->pDataBlock, tagId);
|
||||
STagVal tagVal = {
|
||||
.cid = pTSchema->numOfCols + step,
|
||||
.type = pTagData->info.type,
|
||||
};
|
||||
void* pData = colDataGetData(pTagData, rowId);
|
||||
|
||||
STagVal tagVal = {.cid = pTSchema->numOfCols + step, .type = pTagData->info.type};
|
||||
void* pData = colDataGetData(pTagData, rowId);
|
||||
if (colDataIsNull_s(pTagData, rowId)) {
|
||||
continue;
|
||||
} else if (IS_VAR_DATA_TYPE(pTagData->info.type)) {
|
||||
|
|
|
@ -0,0 +1,462 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "tq.h"
|
||||
|
||||
#define IS_OFFSET_RESET_TYPE(_t) ((_t) < 0)
|
||||
|
||||
static int32_t tqSendMetaPollRsp(STQ* pTq, const SRpcMsg* pMsg, const SMqPollReq* pReq, const SMqMetaRsp* pRsp);
|
||||
|
||||
// stream_task:stream_id:task_id
|
||||
void createStreamTaskOffsetKey(char* dst, uint64_t streamId, uint32_t taskId) {
|
||||
int32_t n = 12;
|
||||
char* p = dst;
|
||||
|
||||
memcpy(p, "stream_task:", n);
|
||||
p += n;
|
||||
|
||||
int32_t inc = tintToHex(streamId, p);
|
||||
p += inc;
|
||||
|
||||
*(p++) = ':';
|
||||
tintToHex(taskId, p);
|
||||
}
|
||||
|
||||
int32_t tqAddInputBlockNLaunchTask(SStreamTask* pTask, SStreamQueueItem* pQueueItem, int64_t ver) {
|
||||
int32_t code = tAppendDataToInputQueue(pTask, pQueueItem);
|
||||
if (code < 0) {
|
||||
tqError("s-task:%s failed to put into queue, too many, next start ver:%" PRId64, pTask->id.idStr, ver);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (streamSchedExec(pTask) < 0) {
|
||||
tqError("stream task:%d failed to be launched, code:%s", pTask->id.taskId, tstrerror(terrno));
|
||||
return -1;
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
void initOffsetForAllRestoreTasks(STQ* pTq) {
|
||||
void* pIter = NULL;
|
||||
|
||||
while(1) {
|
||||
pIter = taosHashIterate(pTq->pStreamMeta->pTasks, pIter);
|
||||
if (pIter == NULL) {
|
||||
break;
|
||||
}
|
||||
|
||||
SStreamTask* pTask = *(SStreamTask**)pIter;
|
||||
if (pTask->taskLevel != TASK_LEVEL__SOURCE) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (pTask->status.taskStatus == TASK_STATUS__RECOVER_PREPARE || pTask->status.taskStatus == TASK_STATUS__WAIT_DOWNSTREAM) {
|
||||
tqDebug("s-task:%s skip push data, since not ready, status %d", pTask->id.idStr, pTask->status.taskStatus);
|
||||
continue;
|
||||
}
|
||||
|
||||
char key[128] = {0};
|
||||
createStreamTaskOffsetKey(key, pTask->id.streamId, pTask->id.taskId);
|
||||
|
||||
STqOffset* pOffset = tqOffsetRead(pTq->pOffsetStore, key);
|
||||
if (pOffset == NULL) {
|
||||
doSaveTaskOffset(pTq->pOffsetStore, key, pTask->chkInfo.version);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void saveOffsetForAllTasks(STQ* pTq, int64_t ver) {
|
||||
void* pIter = NULL;
|
||||
|
||||
while(1) {
|
||||
pIter = taosHashIterate(pTq->pStreamMeta->pTasks, pIter);
|
||||
if (pIter == NULL) {
|
||||
break;
|
||||
}
|
||||
|
||||
SStreamTask* pTask = *(SStreamTask**)pIter;
|
||||
if (pTask->taskLevel != TASK_LEVEL__SOURCE) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (pTask->status.taskStatus == TASK_STATUS__RECOVER_PREPARE || pTask->status.taskStatus == TASK_STATUS__WAIT_DOWNSTREAM) {
|
||||
tqDebug("s-task:%s skip push data, not ready for processing, status %d", pTask->id.idStr,
|
||||
pTask->status.taskStatus);
|
||||
continue;
|
||||
}
|
||||
|
||||
char key[128] = {0};
|
||||
createStreamTaskOffsetKey(key, pTask->id.streamId, pTask->id.taskId);
|
||||
|
||||
STqOffset* pOffset = tqOffsetRead(pTq->pOffsetStore, key);
|
||||
if (pOffset == NULL) {
|
||||
doSaveTaskOffset(pTq->pOffsetStore, key, ver);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void doSaveTaskOffset(STqOffsetStore* pOffsetStore, const char* pKey, int64_t ver) {
|
||||
STqOffset offset = {0};
|
||||
tqOffsetResetToLog(&offset.val, ver);
|
||||
|
||||
tstrncpy(offset.subKey, pKey, tListLen(offset.subKey));
|
||||
|
||||
// keep the offset info in the offset store
|
||||
tqOffsetWrite(pOffsetStore, &offset);
|
||||
}
|
||||
|
||||
static int32_t tqInitDataRsp(SMqDataRsp* pRsp, const SMqPollReq* pReq, int8_t subType) {
|
||||
pRsp->reqOffset = pReq->reqOffset;
|
||||
|
||||
pRsp->blockData = taosArrayInit(0, sizeof(void*));
|
||||
pRsp->blockDataLen = taosArrayInit(0, sizeof(int32_t));
|
||||
|
||||
if (pRsp->blockData == NULL || pRsp->blockDataLen == NULL) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
pRsp->withTbName = 0;
|
||||
pRsp->withSchema = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int32_t tqInitTaosxRsp(STaosxRsp* pRsp, const SMqPollReq* pReq) {
|
||||
pRsp->reqOffset = pReq->reqOffset;
|
||||
|
||||
pRsp->withTbName = 1;
|
||||
pRsp->withSchema = 1;
|
||||
pRsp->blockData = taosArrayInit(0, sizeof(void*));
|
||||
pRsp->blockDataLen = taosArrayInit(0, sizeof(int32_t));
|
||||
pRsp->blockTbName = taosArrayInit(0, sizeof(void*));
|
||||
pRsp->blockSchema = taosArrayInit(0, sizeof(void*));
|
||||
|
||||
if (pRsp->blockData == NULL || pRsp->blockDataLen == NULL || pRsp->blockTbName == NULL || pRsp->blockSchema == NULL) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int32_t extractResetOffsetVal(STqOffsetVal* pOffsetVal, STQ* pTq, STqHandle* pHandle, const SMqPollReq* pRequest,
|
||||
SRpcMsg* pMsg, bool* pBlockReturned) {
|
||||
uint64_t consumerId = pRequest->consumerId;
|
||||
STqOffsetVal reqOffset = pRequest->reqOffset;
|
||||
STqOffset* pOffset = tqOffsetRead(pTq->pOffsetStore, pRequest->subKey);
|
||||
int32_t vgId = TD_VID(pTq->pVnode);
|
||||
|
||||
*pBlockReturned = false;
|
||||
|
||||
// In this vnode, data has been polled by consumer for this topic, so let's continue from the last offset value.
|
||||
if (pOffset != NULL) {
|
||||
*pOffsetVal = pOffset->val;
|
||||
|
||||
char formatBuf[80];
|
||||
tFormatOffset(formatBuf, 80, pOffsetVal);
|
||||
tqDebug("tmq poll: consumer:0x%" PRIx64 ", subkey %s, vgId:%d, existed offset found, offset reset to %s and continue. reqId:0x%"PRIx64,
|
||||
consumerId, pHandle->subKey, vgId, formatBuf, pRequest->reqId);
|
||||
return 0;
|
||||
} else {
|
||||
// no poll occurs in this vnode for this topic, let's seek to the right offset value.
|
||||
if (reqOffset.type == TMQ_OFFSET__RESET_EARLIEAST) {
|
||||
if (pRequest->useSnapshot) {
|
||||
tqDebug("tmq poll: consumer:0x%" PRIx64 ", subkey:%s, vgId:%d, (earliest) set offset to be snapshot",
|
||||
consumerId, pHandle->subKey, vgId);
|
||||
|
||||
if (pHandle->fetchMeta) {
|
||||
tqOffsetResetToMeta(pOffsetVal, 0);
|
||||
} else {
|
||||
tqOffsetResetToData(pOffsetVal, 0, 0);
|
||||
}
|
||||
} else {
|
||||
pHandle->pRef = walRefFirstVer(pTq->pVnode->pWal, pHandle->pRef);
|
||||
if (pHandle->pRef == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
return -1;
|
||||
}
|
||||
|
||||
// offset set to previous version when init
|
||||
tqOffsetResetToLog(pOffsetVal, pHandle->pRef->refVer - 1);
|
||||
}
|
||||
} else if (reqOffset.type == TMQ_OFFSET__RESET_LATEST) {
|
||||
if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
|
||||
SMqDataRsp dataRsp = {0};
|
||||
tqInitDataRsp(&dataRsp, pRequest, pHandle->execHandle.subType);
|
||||
|
||||
tqOffsetResetToLog(&dataRsp.rspOffset, walGetLastVer(pTq->pVnode->pWal));
|
||||
tqDebug("tmq poll: consumer:0x%" PRIx64 ", subkey %s, vgId:%d, (latest) offset reset to %" PRId64, consumerId,
|
||||
pHandle->subKey, vgId, dataRsp.rspOffset.version);
|
||||
int32_t code = tqSendDataRsp(pTq, pMsg, pRequest, &dataRsp, TMQ_MSG_TYPE__POLL_RSP);
|
||||
tDeleteSMqDataRsp(&dataRsp);
|
||||
|
||||
*pBlockReturned = true;
|
||||
return code;
|
||||
} else {
|
||||
STaosxRsp taosxRsp = {0};
|
||||
tqInitTaosxRsp(&taosxRsp, pRequest);
|
||||
tqOffsetResetToLog(&taosxRsp.rspOffset, walGetLastVer(pTq->pVnode->pWal));
|
||||
int32_t code = tqSendDataRsp(pTq, pMsg, pRequest, (SMqDataRsp*)&taosxRsp, TMQ_MSG_TYPE__TAOSX_RSP);
|
||||
tDeleteSTaosxRsp(&taosxRsp);
|
||||
|
||||
*pBlockReturned = true;
|
||||
return code;
|
||||
}
|
||||
} else if (reqOffset.type == TMQ_OFFSET__RESET_NONE) {
|
||||
tqError("tmq poll: subkey:%s, no offset committed for consumer:0x%" PRIx64 " in vg %d, subkey %s, reset none failed",
|
||||
pHandle->subKey, consumerId, vgId, pRequest->subKey);
|
||||
terrno = TSDB_CODE_TQ_NO_COMMITTED_OFFSET;
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int32_t extractDataAndRspForNormalSubscribe(STQ* pTq, STqHandle* pHandle, const SMqPollReq* pRequest,
|
||||
SRpcMsg* pMsg, STqOffsetVal* pOffset) {
|
||||
uint64_t consumerId = pRequest->consumerId;
|
||||
int32_t vgId = TD_VID(pTq->pVnode);
|
||||
|
||||
SMqDataRsp dataRsp = {0};
|
||||
tqInitDataRsp(&dataRsp, pRequest, pHandle->execHandle.subType);
|
||||
|
||||
// lock
|
||||
taosWLockLatch(&pTq->lock);
|
||||
|
||||
qSetTaskId(pHandle->execHandle.task, consumerId, pRequest->reqId);
|
||||
int code = tqScanData(pTq, pHandle, &dataRsp, pOffset);
|
||||
if(code != 0) {
|
||||
goto end;
|
||||
}
|
||||
|
||||
// till now, all data has been transferred to consumer, new data needs to push client once arrived.
|
||||
if (dataRsp.blockNum == 0 && dataRsp.reqOffset.type == TMQ_OFFSET__LOG &&
|
||||
dataRsp.reqOffset.version == dataRsp.rspOffset.version && pHandle->consumerId == pRequest->consumerId) {
|
||||
code = tqRegisterPushHandle(pTq, pHandle, pRequest, pMsg, &dataRsp, TMQ_MSG_TYPE__POLL_RSP);
|
||||
taosWUnLockLatch(&pTq->lock);
|
||||
return code;
|
||||
}
|
||||
|
||||
|
||||
code = tqSendDataRsp(pTq, pMsg, pRequest, (SMqDataRsp*)&dataRsp, TMQ_MSG_TYPE__POLL_RSP);
|
||||
|
||||
// NOTE: this pHandle->consumerId may have been changed already.
|
||||
|
||||
end:
|
||||
{
|
||||
char buf[80] = {0};
|
||||
tFormatOffset(buf, 80, &dataRsp.rspOffset);
|
||||
tqDebug("tmq poll: consumer:0x%" PRIx64 ", subkey %s, vgId:%d, rsp block:%d, rsp offset type:%s, reqId:0x%" PRIx64 " code:%d",
|
||||
consumerId, pHandle->subKey, vgId, dataRsp.blockNum, buf, pRequest->reqId, code);
|
||||
taosWUnLockLatch(&pTq->lock);
|
||||
tDeleteSMqDataRsp(&dataRsp);
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
|
||||
static int32_t extractDataAndRspForDbStbSubscribe(STQ* pTq, STqHandle* pHandle, const SMqPollReq* pRequest, SRpcMsg* pMsg, STqOffsetVal *offset) {
|
||||
int code = 0;
|
||||
int32_t vgId = TD_VID(pTq->pVnode);
|
||||
SWalCkHead* pCkHead = NULL;
|
||||
SMqMetaRsp metaRsp = {0};
|
||||
STaosxRsp taosxRsp = {0};
|
||||
tqInitTaosxRsp(&taosxRsp, pRequest);
|
||||
|
||||
if (offset->type != TMQ_OFFSET__LOG) {
|
||||
if (tqScanTaosx(pTq, pHandle, &taosxRsp, &metaRsp, offset) < 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (metaRsp.metaRspLen > 0) {
|
||||
code = tqSendMetaPollRsp(pTq, pMsg, pRequest, &metaRsp);
|
||||
tqDebug("tmq poll: consumer:0x%" PRIx64 " subkey:%s vgId:%d, send meta offset type:%d,uid:%" PRId64 ",ts:%" PRId64,
|
||||
pRequest->consumerId, pHandle->subKey, vgId, metaRsp.rspOffset.type, metaRsp.rspOffset.uid, metaRsp.rspOffset.ts);
|
||||
taosMemoryFree(metaRsp.metaRsp);
|
||||
tDeleteSTaosxRsp(&taosxRsp);
|
||||
return code;
|
||||
}
|
||||
|
||||
tqDebug("taosx poll: consumer:0x%" PRIx64 " subkey:%s vgId:%d, send data blockNum:%d, offset type:%d,uid:%" PRId64
|
||||
",ts:%" PRId64,pRequest->consumerId, pHandle->subKey, vgId, taosxRsp.blockNum, taosxRsp.rspOffset.type, taosxRsp.rspOffset.uid,taosxRsp.rspOffset.ts);
|
||||
if (taosxRsp.blockNum > 0) {
|
||||
code = tqSendDataRsp(pTq, pMsg, pRequest, (SMqDataRsp*)&taosxRsp, TMQ_MSG_TYPE__TAOSX_RSP);
|
||||
tDeleteSTaosxRsp(&taosxRsp);
|
||||
return code;
|
||||
}else {
|
||||
*offset = taosxRsp.rspOffset;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if (offset->type == TMQ_OFFSET__LOG) {
|
||||
int64_t fetchVer = offset->version + 1;
|
||||
pCkHead = taosMemoryMalloc(sizeof(SWalCkHead) + 2048);
|
||||
if (pCkHead == NULL) {
|
||||
tDeleteSTaosxRsp(&taosxRsp);
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
return -1;
|
||||
}
|
||||
walSetReaderCapacity(pHandle->pWalReader, 2048);
|
||||
int totalRows = 0;
|
||||
while (1) {
|
||||
int32_t savedEpoch = atomic_load_32(&pHandle->epoch);
|
||||
if (savedEpoch > pRequest->epoch) {
|
||||
tqWarn("tmq poll: consumer:0x%" PRIx64 " (epoch %d), subkey:%s vgId:%d offset %" PRId64
|
||||
", found new consumer epoch %d, discard req epoch %d", pRequest->consumerId, pRequest->epoch, pHandle->subKey, vgId, fetchVer, savedEpoch, pRequest->epoch);
|
||||
break;
|
||||
}
|
||||
|
||||
if (tqFetchLog(pTq, pHandle, &fetchVer, &pCkHead, pRequest->reqId) < 0) {
|
||||
tqOffsetResetToLog(&taosxRsp.rspOffset, fetchVer);
|
||||
code = tqSendDataRsp(pTq, pMsg, pRequest, (SMqDataRsp*)&taosxRsp, TMQ_MSG_TYPE__TAOSX_RSP);
|
||||
tDeleteSTaosxRsp(&taosxRsp);
|
||||
taosMemoryFreeClear(pCkHead);
|
||||
return code;
|
||||
}
|
||||
|
||||
SWalCont* pHead = &pCkHead->head;
|
||||
tqDebug("tmq poll: consumer:0x%" PRIx64 " (epoch %d) iter log, vgId:%d offset %" PRId64 " msgType %d", pRequest->consumerId,
|
||||
pRequest->epoch, vgId, fetchVer, pHead->msgType);
|
||||
|
||||
// process meta
|
||||
if (pHead->msgType != TDMT_VND_SUBMIT) {
|
||||
if(totalRows > 0) {
|
||||
tqOffsetResetToLog(&taosxRsp.rspOffset, fetchVer - 1);
|
||||
code = tqSendDataRsp(pTq, pMsg, pRequest, (SMqDataRsp*)&taosxRsp, TMQ_MSG_TYPE__TAOSX_RSP);
|
||||
tDeleteSTaosxRsp(&taosxRsp);
|
||||
taosMemoryFreeClear(pCkHead);
|
||||
return code;
|
||||
}
|
||||
|
||||
tqDebug("fetch meta msg, ver:%" PRId64 ", type:%s", pHead->version, TMSG_INFO(pHead->msgType));
|
||||
tqOffsetResetToLog(&metaRsp.rspOffset, fetchVer);
|
||||
metaRsp.resMsgType = pHead->msgType;
|
||||
metaRsp.metaRspLen = pHead->bodyLen;
|
||||
metaRsp.metaRsp = pHead->body;
|
||||
if (tqSendMetaPollRsp(pTq, pMsg, pRequest, &metaRsp) < 0) {
|
||||
code = -1;
|
||||
taosMemoryFreeClear(pCkHead);
|
||||
tDeleteSTaosxRsp(&taosxRsp);
|
||||
return code;
|
||||
}
|
||||
code = 0;
|
||||
taosMemoryFreeClear(pCkHead);
|
||||
tDeleteSTaosxRsp(&taosxRsp);
|
||||
return code;
|
||||
}
|
||||
|
||||
// process data
|
||||
SPackedData submit = {
|
||||
.msgStr = POINTER_SHIFT(pHead->body, sizeof(SSubmitReq2Msg)),
|
||||
.msgLen = pHead->bodyLen - sizeof(SSubmitReq2Msg),
|
||||
.ver = pHead->version,
|
||||
};
|
||||
|
||||
if (tqTaosxScanLog(pTq, pHandle, submit, &taosxRsp, &totalRows) < 0) {
|
||||
tqError("tmq poll: tqTaosxScanLog error %" PRId64 ", in vgId:%d, subkey %s", pRequest->consumerId, vgId,
|
||||
pRequest->subKey);
|
||||
taosMemoryFreeClear(pCkHead);
|
||||
tDeleteSTaosxRsp(&taosxRsp);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (totalRows >= 4096 || taosxRsp.createTableNum > 0) {
|
||||
tqOffsetResetToLog(&taosxRsp.rspOffset, fetchVer);
|
||||
code = tqSendDataRsp(pTq, pMsg, pRequest, (SMqDataRsp*)&taosxRsp, TMQ_MSG_TYPE__TAOSX_RSP);
|
||||
tDeleteSTaosxRsp(&taosxRsp);
|
||||
taosMemoryFreeClear(pCkHead);
|
||||
return code;
|
||||
} else {
|
||||
fetchVer++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
tDeleteSTaosxRsp(&taosxRsp);
|
||||
taosMemoryFreeClear(pCkHead);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t tqExtractDataForMq(STQ* pTq, STqHandle* pHandle, const SMqPollReq* pRequest, SRpcMsg* pMsg) {
|
||||
int32_t code = -1;
|
||||
STqOffsetVal offset = {0};
|
||||
STqOffsetVal reqOffset = pRequest->reqOffset;
|
||||
|
||||
// 1. reset the offset if needed
|
||||
if (IS_OFFSET_RESET_TYPE(reqOffset.type)) {
|
||||
// handle the reset offset cases, according to the consumer's choice.
|
||||
bool blockReturned = false;
|
||||
code = extractResetOffsetVal(&offset, pTq, pHandle, pRequest, pMsg, &blockReturned);
|
||||
if (code != 0) {
|
||||
return code;
|
||||
}
|
||||
|
||||
// empty block returned, quit
|
||||
if (blockReturned) {
|
||||
return 0;
|
||||
}
|
||||
} else { // use the consumer specified offset
|
||||
// the offset value can not be monotonious increase??
|
||||
offset = reqOffset;
|
||||
}
|
||||
|
||||
// this is a normal subscribe requirement
|
||||
if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
|
||||
return extractDataAndRspForNormalSubscribe(pTq, pHandle, pRequest, pMsg, &offset);
|
||||
}
|
||||
|
||||
// todo handle the case where re-balance occurs.
|
||||
// for taosx
|
||||
return extractDataAndRspForDbStbSubscribe(pTq, pHandle, pRequest, pMsg, &offset);
|
||||
}
|
||||
|
||||
int32_t tqSendMetaPollRsp(STQ* pTq, const SRpcMsg* pMsg, const SMqPollReq* pReq, const SMqMetaRsp* pRsp) {
|
||||
int32_t len = 0;
|
||||
int32_t code = 0;
|
||||
tEncodeSize(tEncodeSMqMetaRsp, pRsp, len, code);
|
||||
if (code < 0) {
|
||||
return -1;
|
||||
}
|
||||
int32_t tlen = sizeof(SMqRspHead) + len;
|
||||
void* buf = rpcMallocCont(tlen);
|
||||
if (buf == NULL) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
((SMqRspHead*)buf)->mqMsgType = TMQ_MSG_TYPE__POLL_META_RSP;
|
||||
((SMqRspHead*)buf)->epoch = pReq->epoch;
|
||||
((SMqRspHead*)buf)->consumerId = pReq->consumerId;
|
||||
|
||||
void* abuf = POINTER_SHIFT(buf, sizeof(SMqRspHead));
|
||||
|
||||
SEncoder encoder = {0};
|
||||
tEncoderInit(&encoder, abuf, len);
|
||||
tEncodeSMqMetaRsp(&encoder, pRsp);
|
||||
tEncoderClear(&encoder);
|
||||
|
||||
SRpcMsg resp = {
|
||||
.info = pMsg->info,
|
||||
.pCont = buf,
|
||||
.contLen = tlen,
|
||||
.code = 0,
|
||||
};
|
||||
tmsgSendRsp(&resp);
|
||||
|
||||
tqDebug("vgId:%d, from consumer:0x%" PRIx64 " (epoch %d) send rsp, res msg type %d, offset type:%d",
|
||||
TD_VID(pTq->pVnode), pReq->consumerId, pReq->epoch, pRsp->resMsgType, pRsp->rspOffset.type);
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -362,15 +362,6 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32
|
|||
p->ts = pCol->ts;
|
||||
p->colVal = pCol->colVal;
|
||||
singleTableLastTs = pCol->ts;
|
||||
|
||||
// only set value for last row query
|
||||
if (HASTYPE(pr->type, CACHESCAN_RETRIEVE_LAST_ROW)) {
|
||||
if (taosArrayGetSize(pTableUidList) == 0) {
|
||||
taosArrayPush(pTableUidList, &pKeyInfo->uid);
|
||||
} else {
|
||||
taosArraySet(pTableUidList, 0, &pKeyInfo->uid);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
SLastCol* p = taosArrayGet(pLastCols, slotId);
|
||||
|
@ -417,6 +408,12 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32
|
|||
}
|
||||
}
|
||||
|
||||
if (taosArrayGetSize(pTableUidList) == 0) {
|
||||
taosArrayPush(pTableUidList, &pKeyInfo->uid);
|
||||
} else {
|
||||
taosArraySet(pTableUidList, 0, &pKeyInfo->uid);
|
||||
}
|
||||
|
||||
tsdbCacheRelease(lruCache, h);
|
||||
}
|
||||
|
||||
|
|
|
@ -831,6 +831,7 @@ static int32_t doLoadBlockIndex(STsdbReader* pReader, SDataFReader* pFileReader,
|
|||
// this block belongs to a table that is not queried.
|
||||
STableBlockScanInfo* pScanInfo = getTableBlockScanInfo(pReader->status.pTableMap, pBlockIdx->uid, pReader->idStr);
|
||||
if (pScanInfo == NULL) {
|
||||
tsdbBICacheRelease(pFileReader->pTsdb->biCache, handle);
|
||||
return terrno;
|
||||
}
|
||||
|
||||
|
@ -2088,7 +2089,7 @@ static int32_t doMergeFileBlockAndLastBlock(SLastBlockReader* pLastBlockReader,
|
|||
pBlockScanInfo->lastKey = tsLastBlock;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
} else {
|
||||
int32_t code = tsdbRowMergerInit(&merge, NULL, &fRow, pReader->pSchema);
|
||||
code = tsdbRowMergerInit(&merge, NULL, &fRow, pReader->pSchema);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
@ -2112,7 +2113,7 @@ static int32_t doMergeFileBlockAndLastBlock(SLastBlockReader* pLastBlockReader,
|
|||
}
|
||||
}
|
||||
} else { // not merge block data
|
||||
int32_t code = tsdbRowMergerInit(&merge, NULL, &fRow, pReader->pSchema);
|
||||
code = tsdbRowMergerInit(&merge, NULL, &fRow, pReader->pSchema);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
@ -2575,7 +2576,7 @@ int32_t mergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pBloc
|
|||
SRow* pTSRow = NULL;
|
||||
SRowMerger merge = {0};
|
||||
|
||||
int32_t code = tsdbRowMergerInit(&merge, NULL, &fRow, pReader->pSchema);
|
||||
code = tsdbRowMergerInit(&merge, NULL, &fRow, pReader->pSchema);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
@ -3242,8 +3243,8 @@ static int32_t readRowsCountFromFiles(STsdbReader* pReader) {
|
|||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
|
||||
while (1) {
|
||||
bool hasNext = false;
|
||||
int32_t code = filesetIteratorNext(&pReader->status.fileIter, pReader, &hasNext);
|
||||
bool hasNext = false;
|
||||
code = filesetIteratorNext(&pReader->status.fileIter, pReader, &hasNext);
|
||||
if (code) {
|
||||
return code;
|
||||
}
|
||||
|
@ -3515,8 +3516,8 @@ SVersionRange getQueryVerRange(SVnode* pVnode, SQueryTableDataCond* pCond, int8_
|
|||
int64_t startVer = (pCond->startVersion == -1) ? 0 : pCond->startVersion;
|
||||
|
||||
int64_t endVer = 0;
|
||||
if (pCond->endVersion ==
|
||||
-1) { // user not specified end version, set current maximum version of vnode as the endVersion
|
||||
if (pCond->endVersion == -1) {
|
||||
// user not specified end version, set current maximum version of vnode as the endVersion
|
||||
endVer = pVnode->state.applied;
|
||||
} else {
|
||||
endVer = (pCond->endVersion > pVnode->state.applied) ? pVnode->state.applied : pCond->endVersion;
|
||||
|
|
|
@ -1259,6 +1259,11 @@ void tBlockDataReset(SBlockData *pBlockData) {
|
|||
pBlockData->suid = 0;
|
||||
pBlockData->uid = 0;
|
||||
pBlockData->nRow = 0;
|
||||
for (int32_t i = 0; i < pBlockData->nColData; i++) {
|
||||
tColDataDestroy(&pBlockData->aColData[i]);
|
||||
}
|
||||
pBlockData->nColData = 0;
|
||||
taosMemoryFreeClear(pBlockData->aColData);
|
||||
}
|
||||
|
||||
void tBlockDataClear(SBlockData *pBlockData) {
|
||||
|
|
|
@ -57,6 +57,28 @@ int vnodeCheckCfg(const SVnodeCfg *pCfg) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
const char* vnodeRoleToStr(ESyncRole role) {
|
||||
switch (role) {
|
||||
case TAOS_SYNC_ROLE_VOTER:
|
||||
return "voter";
|
||||
case TAOS_SYNC_ROLE_LEARNER:
|
||||
return "learner";
|
||||
default:
|
||||
return "unknown";
|
||||
}
|
||||
}
|
||||
|
||||
const ESyncRole vnodeStrToRole(char* str) {
|
||||
if(strcmp(str, "voter") == 0){
|
||||
return TAOS_SYNC_ROLE_VOTER;
|
||||
}
|
||||
if(strcmp(str, "learner") == 0){
|
||||
return TAOS_SYNC_ROLE_LEARNER;
|
||||
}
|
||||
|
||||
return TAOS_SYNC_ROLE_ERROR;
|
||||
}
|
||||
|
||||
int vnodeEncodeConfig(const void *pObj, SJson *pJson) {
|
||||
const SVnodeCfg *pCfg = (SVnodeCfg *)pObj;
|
||||
|
||||
|
@ -117,6 +139,7 @@ int vnodeEncodeConfig(const void *pObj, SJson *pJson) {
|
|||
if (tjsonAddIntegerToObject(pJson, "hashSuffix", pCfg->hashSuffix) < 0) return -1;
|
||||
|
||||
if (tjsonAddIntegerToObject(pJson, "syncCfg.replicaNum", pCfg->syncCfg.replicaNum) < 0) return -1;
|
||||
if (tjsonAddIntegerToObject(pJson, "syncCfg.totalReplicaNum", pCfg->syncCfg.totalReplicaNum) < 0) return -1;
|
||||
if (tjsonAddIntegerToObject(pJson, "syncCfg.myIndex", pCfg->syncCfg.myIndex) < 0) return -1;
|
||||
|
||||
if (tjsonAddIntegerToObject(pJson, "vndStats.stables", pCfg->vndStats.numOfSTables) < 0) return -1;
|
||||
|
@ -128,9 +151,9 @@ int vnodeEncodeConfig(const void *pObj, SJson *pJson) {
|
|||
SJson *nodeInfo = tjsonCreateArray();
|
||||
if (nodeInfo == NULL) return -1;
|
||||
if (tjsonAddItemToObject(pJson, "syncCfg.nodeInfo", nodeInfo) < 0) return -1;
|
||||
vDebug("vgId:%d, encode config, replicas:%d selfIndex:%d", pCfg->vgId, pCfg->syncCfg.replicaNum,
|
||||
pCfg->syncCfg.myIndex);
|
||||
for (int i = 0; i < pCfg->syncCfg.replicaNum; ++i) {
|
||||
vDebug("vgId:%d, encode config, replicas:%d totalReplicas:%d selfIndex:%d", pCfg->vgId, pCfg->syncCfg.replicaNum,
|
||||
pCfg->syncCfg.totalReplicaNum, pCfg->syncCfg.myIndex);
|
||||
for (int i = 0; i < pCfg->syncCfg.totalReplicaNum; ++i) {
|
||||
SJson *info = tjsonCreateObject();
|
||||
SNodeInfo *pNode = (SNodeInfo *)&pCfg->syncCfg.nodeInfo[i];
|
||||
if (info == NULL) return -1;
|
||||
|
@ -138,6 +161,7 @@ int vnodeEncodeConfig(const void *pObj, SJson *pJson) {
|
|||
if (tjsonAddStringToObject(info, "nodeFqdn", pNode->nodeFqdn) < 0) return -1;
|
||||
if (tjsonAddIntegerToObject(info, "nodeId", pNode->nodeId) < 0) return -1;
|
||||
if (tjsonAddIntegerToObject(info, "clusterId", pNode->clusterId) < 0) return -1;
|
||||
if (tjsonAddStringToObject(info, "nodeRole", vnodeRoleToStr(pNode->nodeRole)) < 0) return -1;
|
||||
if (tjsonAddItemToArray(nodeInfo, info) < 0) return -1;
|
||||
vDebug("vgId:%d, encode config, replica:%d ep:%s:%u dnode:%d", pCfg->vgId, i, pNode->nodeFqdn, pNode->nodePort,
|
||||
pNode->nodeId);
|
||||
|
@ -235,6 +259,8 @@ int vnodeDecodeConfig(const SJson *pJson, void *pObj) {
|
|||
|
||||
tjsonGetNumberValue(pJson, "syncCfg.replicaNum", pCfg->syncCfg.replicaNum, code);
|
||||
if (code < 0) return -1;
|
||||
tjsonGetNumberValue(pJson, "syncCfg.totalReplicaNum", pCfg->syncCfg.totalReplicaNum, code);
|
||||
if (code < 0) return -1;
|
||||
tjsonGetNumberValue(pJson, "syncCfg.myIndex", pCfg->syncCfg.myIndex, code);
|
||||
if (code < 0) return -1;
|
||||
|
||||
|
@ -251,10 +277,13 @@ int vnodeDecodeConfig(const SJson *pJson, void *pObj) {
|
|||
|
||||
SJson *nodeInfo = tjsonGetObjectItem(pJson, "syncCfg.nodeInfo");
|
||||
int arraySize = tjsonGetArraySize(nodeInfo);
|
||||
if (arraySize != pCfg->syncCfg.replicaNum) return -1;
|
||||
if(pCfg->syncCfg.totalReplicaNum == 0 && pCfg->syncCfg.replicaNum > 0){
|
||||
pCfg->syncCfg.totalReplicaNum = pCfg->syncCfg.replicaNum;
|
||||
}
|
||||
if (arraySize != pCfg->syncCfg.totalReplicaNum) return -1;
|
||||
|
||||
vDebug("vgId:%d, decode config, replicas:%d selfIndex:%d", pCfg->vgId, pCfg->syncCfg.replicaNum,
|
||||
pCfg->syncCfg.myIndex);
|
||||
vDebug("vgId:%d, decode config, replicas:%d totalReplicas:%d selfIndex:%d", pCfg->vgId, pCfg->syncCfg.replicaNum,
|
||||
pCfg->syncCfg.totalReplicaNum, pCfg->syncCfg.myIndex);
|
||||
for (int i = 0; i < arraySize; ++i) {
|
||||
SJson *info = tjsonGetArrayItem(nodeInfo, i);
|
||||
SNodeInfo *pNode = &pCfg->syncCfg.nodeInfo[i];
|
||||
|
@ -266,6 +295,15 @@ int vnodeDecodeConfig(const SJson *pJson, void *pObj) {
|
|||
if (code < 0) return -1;
|
||||
tjsonGetNumberValue(info, "clusterId", pNode->clusterId, code);
|
||||
if (code < 0) return -1;
|
||||
char role[10] = {0};
|
||||
code = tjsonGetStringValue(info, "nodeRole", role);
|
||||
if (code < 0) return -1;
|
||||
if(strlen(role) != 0){
|
||||
pNode->nodeRole = vnodeStrToRole(role);
|
||||
}
|
||||
else{
|
||||
pNode->nodeRole = TAOS_SYNC_ROLE_VOTER;
|
||||
}
|
||||
vDebug("vgId:%d, decode config, replica:%d ep:%s:%u dnode:%d", pCfg->vgId, i, pNode->nodeFqdn, pNode->nodePort,
|
||||
pNode->nodeId);
|
||||
}
|
||||
|
|
|
@ -143,22 +143,13 @@ _exit:
|
|||
return code;
|
||||
}
|
||||
|
||||
void vnodeUpdCommitSched(SVnode *pVnode) {
|
||||
int64_t randNum = taosRand();
|
||||
pVnode->commitSched.commitMs = taosGetMonoTimestampMs();
|
||||
pVnode->commitSched.maxWaitMs = tsVndCommitMaxIntervalMs + (randNum % tsVndCommitMaxIntervalMs);
|
||||
}
|
||||
|
||||
int vnodeShouldCommit(SVnode *pVnode) {
|
||||
SVCommitSched *pSched = &pVnode->commitSched;
|
||||
int64_t nowMs = taosGetMonoTimestampMs();
|
||||
int vnodeShouldCommit(SVnode *pVnode, bool atExit) {
|
||||
bool diskAvail = osDataSpaceAvailable();
|
||||
bool needCommit = false;
|
||||
|
||||
taosThreadMutexLock(&pVnode->mutex);
|
||||
if (pVnode->inUse && diskAvail) {
|
||||
needCommit =
|
||||
((pVnode->inUse->size > pVnode->inUse->node.size) && (pSched->commitMs + SYNC_VND_COMMIT_MIN_MS < nowMs));
|
||||
needCommit = (pVnode->inUse->size > pVnode->inUse->node.size) || (pVnode->inUse->size > 0 && atExit);
|
||||
}
|
||||
taosThreadMutexUnlock(&pVnode->mutex);
|
||||
return needCommit;
|
||||
|
@ -430,8 +421,6 @@ static int vnodeCommitImpl(SCommitInfo *pInfo) {
|
|||
vInfo("vgId:%d, start to commit, commitId:%" PRId64 " version:%" PRId64 " term: %" PRId64, TD_VID(pVnode),
|
||||
pInfo->info.state.commitID, pInfo->info.state.committed, pInfo->info.state.commitTerm);
|
||||
|
||||
vnodeUpdCommitSched(pVnode);
|
||||
|
||||
// persist wal before starting
|
||||
if (walPersist(pVnode->pWal) < 0) {
|
||||
vError("vgId:%d, failed to persist wal since %s", TD_VID(pVnode), terrstr());
|
||||
|
|
|
@ -76,19 +76,41 @@ int32_t vnodeAlterReplica(const char *path, SAlterVnodeReplicaReq *pReq, STfs *p
|
|||
}
|
||||
|
||||
SSyncCfg *pCfg = &info.config.syncCfg;
|
||||
pCfg->myIndex = pReq->selfIndex;
|
||||
pCfg->replicaNum = pReq->replica;
|
||||
|
||||
pCfg->replicaNum = 0;
|
||||
pCfg->totalReplicaNum = 0;
|
||||
memset(&pCfg->nodeInfo, 0, sizeof(pCfg->nodeInfo));
|
||||
|
||||
vInfo("vgId:%d, save config while alter, replicas:%d selfIndex:%d", pReq->vgId, pCfg->replicaNum, pCfg->myIndex);
|
||||
for (int i = 0; i < pReq->replica; ++i) {
|
||||
SNodeInfo *pNode = &pCfg->nodeInfo[i];
|
||||
pNode->nodeId = pReq->replicas[i].id;
|
||||
pNode->nodePort = pReq->replicas[i].port;
|
||||
tstrncpy(pNode->nodeFqdn, pReq->replicas[i].fqdn, sizeof(pNode->nodeFqdn));
|
||||
pNode->nodeRole = TAOS_SYNC_ROLE_VOTER;
|
||||
(void)tmsgUpdateDnodeInfo(&pNode->nodeId, &pNode->clusterId, pNode->nodeFqdn, &pNode->nodePort);
|
||||
vInfo("vgId:%d, replica:%d ep:%s:%u dnode:%d", pReq->vgId, i, pNode->nodeFqdn, pNode->nodePort, pNode->nodeId);
|
||||
pCfg->replicaNum++;
|
||||
}
|
||||
if(pReq->selfIndex != -1){
|
||||
pCfg->myIndex = pReq->selfIndex;
|
||||
}
|
||||
for (int i = pCfg->replicaNum; i < pReq->replica + pReq->learnerReplica; ++i) {
|
||||
SNodeInfo *pNode = &pCfg->nodeInfo[i];
|
||||
pNode->nodeId = pReq->learnerReplicas[pCfg->totalReplicaNum].id;
|
||||
pNode->nodePort = pReq->learnerReplicas[pCfg->totalReplicaNum].port;
|
||||
pNode->nodeRole = TAOS_SYNC_ROLE_LEARNER;
|
||||
tstrncpy(pNode->nodeFqdn, pReq->learnerReplicas[pCfg->totalReplicaNum].fqdn, sizeof(pNode->nodeFqdn));
|
||||
(void)tmsgUpdateDnodeInfo(&pNode->nodeId, &pNode->clusterId, pNode->nodeFqdn, &pNode->nodePort);
|
||||
vInfo("vgId:%d, replica:%d ep:%s:%u dnode:%d", pReq->vgId, i, pNode->nodeFqdn, pNode->nodePort, pNode->nodeId);
|
||||
pCfg->totalReplicaNum++;
|
||||
}
|
||||
pCfg->totalReplicaNum += pReq->replica;
|
||||
if(pReq->learnerSelfIndex != -1){
|
||||
pCfg->myIndex = pReq->replica + pReq->learnerSelfIndex;
|
||||
}
|
||||
|
||||
vInfo("vgId:%d, save config while alter, replicas:%d totalReplicas:%d selfIndex:%d",
|
||||
pReq->vgId, pCfg->replicaNum, pCfg->totalReplicaNum, pCfg->myIndex);
|
||||
|
||||
info.config.syncCfg = *pCfg;
|
||||
ret = vnodeSaveInfo(dir, &info);
|
||||
|
@ -181,6 +203,7 @@ int32_t vnodeAlterHashRange(const char *srcPath, const char *dstPath, SAlterVnod
|
|||
SSyncCfg *pCfg = &info.config.syncCfg;
|
||||
pCfg->myIndex = 0;
|
||||
pCfg->replicaNum = 1;
|
||||
pCfg->totalReplicaNum = 1;
|
||||
memset(&pCfg->nodeInfo, 0, sizeof(pCfg->nodeInfo));
|
||||
|
||||
vInfo("vgId:%d, alter vnode replicas to 1", pReq->srcVgId);
|
||||
|
@ -248,7 +271,7 @@ SVnode *vnodeOpen(const char *path, STfs *pTfs, SMsgCb msgCb) {
|
|||
// save vnode info on dnode ep changed
|
||||
bool updated = false;
|
||||
SSyncCfg *pCfg = &info.config.syncCfg;
|
||||
for (int32_t i = 0; i < pCfg->replicaNum; ++i) {
|
||||
for (int32_t i = 0; i < pCfg->totalReplicaNum; ++i) {
|
||||
SNodeInfo *pNode = &pCfg->nodeInfo[i];
|
||||
if (tmsgUpdateDnodeInfo(&pNode->nodeId, &pNode->clusterId, pNode->nodeFqdn, &pNode->nodePort)) {
|
||||
updated = true;
|
||||
|
@ -286,8 +309,6 @@ SVnode *vnodeOpen(const char *path, STfs *pTfs, SMsgCb msgCb) {
|
|||
taosThreadMutexInit(&pVnode->mutex, NULL);
|
||||
taosThreadCondInit(&pVnode->poolNotEmpty, NULL);
|
||||
|
||||
vnodeUpdCommitSched(pVnode);
|
||||
|
||||
int8_t rollback = vnodeShouldRollback(pVnode);
|
||||
|
||||
// open buffer pool
|
||||
|
@ -406,6 +427,10 @@ void vnodeClose(SVnode *pVnode) {
|
|||
// start the sync timer after the queue is ready
|
||||
int32_t vnodeStart(SVnode *pVnode) { return vnodeSyncStart(pVnode); }
|
||||
|
||||
int32_t vnodeIsCatchUp(SVnode *pVnode){
|
||||
return syncIsCatchUp(pVnode->sync);
|
||||
}
|
||||
|
||||
void vnodeStop(SVnode *pVnode) {}
|
||||
|
||||
int64_t vnodeGetSyncHandle(SVnode *pVnode) { return pVnode->sync; }
|
||||
|
|
|
@ -400,7 +400,7 @@ int32_t vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, int64_t version, SRp
|
|||
}
|
||||
break;
|
||||
case TDMT_STREAM_TASK_DEPLOY: {
|
||||
if (tqProcessTaskDeployReq(pVnode->pTq, version, pReq, len) < 0) {
|
||||
if (pVnode->restored && tqProcessTaskDeployReq(pVnode->pTq, version, pReq, len) < 0) {
|
||||
goto _err;
|
||||
}
|
||||
} break;
|
||||
|
@ -447,13 +447,11 @@ int32_t vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, int64_t version, SRp
|
|||
|
||||
walApplyVer(pVnode->pWal, version);
|
||||
|
||||
/*vInfo("vgId:%d, push msg begin", pVnode->config.vgId);*/
|
||||
if (tqPushMsg(pVnode->pTq, pMsg->pCont, pMsg->contLen, pMsg->msgType, version) < 0) {
|
||||
/*vInfo("vgId:%d, push msg end", pVnode->config.vgId);*/
|
||||
vError("vgId:%d, failed to push msg to TQ since %s", TD_VID(pVnode), tstrerror(terrno));
|
||||
return -1;
|
||||
}
|
||||
/*vInfo("vgId:%d, push msg end", pVnode->config.vgId);*/
|
||||
|
||||
// commit if need
|
||||
if (needCommit) {
|
||||
|
@ -541,13 +539,10 @@ int32_t vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) {
|
|||
return vnodeGetBatchMeta(pVnode, pMsg);
|
||||
case TDMT_VND_TMQ_CONSUME:
|
||||
return tqProcessPollReq(pVnode->pTq, pMsg);
|
||||
|
||||
case TDMT_STREAM_TASK_RUN:
|
||||
return tqProcessTaskRunReq(pVnode->pTq, pMsg);
|
||||
#if 1
|
||||
case TDMT_STREAM_TASK_DISPATCH:
|
||||
return tqProcessTaskDispatchReq(pVnode->pTq, pMsg, true);
|
||||
#endif
|
||||
case TDMT_STREAM_TASK_CHECK:
|
||||
return tqProcessStreamTaskCheckReq(pVnode->pTq, pMsg);
|
||||
case TDMT_STREAM_TASK_DISPATCH_RSP:
|
||||
|
@ -1452,7 +1447,8 @@ int32_t vnodeProcessCreateTSma(SVnode *pVnode, void *pCont, uint32_t contLen) {
|
|||
}
|
||||
|
||||
static int32_t vnodeProcessAlterConfirmReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp) {
|
||||
vInfo("vgId:%d, alter replica confim msg is processed", TD_VID(pVnode));
|
||||
vInfo("vgId:%d, vnode management handle msgType:alter-confirm, alter replica confim msg is processed",
|
||||
TD_VID(pVnode));
|
||||
pRsp->msgType = TDMT_VND_ALTER_CONFIRM_RSP;
|
||||
pRsp->code = TSDB_CODE_SUCCESS;
|
||||
pRsp->pCont = NULL;
|
||||
|
|
|
@ -112,9 +112,6 @@ static int32_t inline vnodeProposeMsg(SVnode *pVnode, SRpcMsg *pMsg, bool isWeak
|
|||
pVnode->blocked = true;
|
||||
pVnode->blockSec = taosGetTimestampSec();
|
||||
pVnode->blockSeq = seq;
|
||||
#if 0
|
||||
pVnode->blockInfo = pMsg->info;
|
||||
#endif
|
||||
}
|
||||
taosThreadMutexUnlock(&pVnode->lock);
|
||||
|
||||
|
@ -129,8 +126,8 @@ static int32_t inline vnodeProposeMsg(SVnode *pVnode, SRpcMsg *pMsg, bool isWeak
|
|||
return code;
|
||||
}
|
||||
|
||||
void vnodeProposeCommitOnNeed(SVnode *pVnode) {
|
||||
if (!vnodeShouldCommit(pVnode)) {
|
||||
void vnodeProposeCommitOnNeed(SVnode *pVnode, bool atExit) {
|
||||
if (!vnodeShouldCommit(pVnode, atExit)) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -145,18 +142,18 @@ void vnodeProposeCommitOnNeed(SVnode *pVnode) {
|
|||
rpcMsg.pCont = pHead;
|
||||
rpcMsg.info.noResp = 1;
|
||||
|
||||
vInfo("vgId:%d, propose vnode commit", pVnode->config.vgId);
|
||||
bool isWeak = false;
|
||||
if (vnodeProposeMsg(pVnode, &rpcMsg, isWeak) < 0) {
|
||||
vTrace("vgId:%d, failed to propose vnode commit since %s", pVnode->config.vgId, terrstr());
|
||||
goto _out;
|
||||
|
||||
if (!atExit) {
|
||||
if (vnodeProposeMsg(pVnode, &rpcMsg, isWeak) < 0) {
|
||||
vTrace("vgId:%d, failed to propose vnode commit since %s", pVnode->config.vgId, terrstr());
|
||||
}
|
||||
rpcFreeCont(rpcMsg.pCont);
|
||||
rpcMsg.pCont = NULL;
|
||||
} else {
|
||||
tmsgPutToQueue(&pVnode->msgCb, WRITE_QUEUE, &rpcMsg);
|
||||
}
|
||||
|
||||
vInfo("vgId:%d, proposed vnode commit", pVnode->config.vgId);
|
||||
|
||||
_out:
|
||||
vnodeUpdCommitSched(pVnode);
|
||||
rpcFreeCont(rpcMsg.pCont);
|
||||
rpcMsg.pCont = NULL;
|
||||
}
|
||||
|
||||
#if BATCH_ENABLE
|
||||
|
@ -236,7 +233,8 @@ void vnodeProposeWriteMsg(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs)
|
|||
continue;
|
||||
}
|
||||
|
||||
vnodeProposeCommitOnNeed(pVnode);
|
||||
bool atExit = false;
|
||||
vnodeProposeCommitOnNeed(pVnode, atExit);
|
||||
|
||||
code = vnodePreProcessWriteMsg(pVnode, pMsg);
|
||||
if (code != 0) {
|
||||
|
@ -288,7 +286,8 @@ void vnodeProposeWriteMsg(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs)
|
|||
continue;
|
||||
}
|
||||
|
||||
vnodeProposeCommitOnNeed(pVnode);
|
||||
bool atExit = false;
|
||||
vnodeProposeCommitOnNeed(pVnode, atExit);
|
||||
|
||||
code = vnodePreProcessWriteMsg(pVnode, pMsg);
|
||||
if (code != 0) {
|
||||
|
@ -549,6 +548,9 @@ static void vnodeRestoreFinish(const SSyncFSM *pFsm, const SyncIndex commitIdx)
|
|||
|
||||
pVnode->restored = true;
|
||||
vInfo("vgId:%d, sync restore finished", pVnode->config.vgId);
|
||||
|
||||
// start to restore all stream tasks
|
||||
tqStartStreamTasks(pVnode->pTq);
|
||||
}
|
||||
|
||||
static void vnodeBecomeFollower(const SSyncFSM *pFsm) {
|
||||
|
@ -564,6 +566,19 @@ static void vnodeBecomeFollower(const SSyncFSM *pFsm) {
|
|||
taosThreadMutexUnlock(&pVnode->lock);
|
||||
}
|
||||
|
||||
static void vnodeBecomeLearner(const SSyncFSM *pFsm) {
|
||||
SVnode *pVnode = pFsm->data;
|
||||
vInfo("vgId:%d, become learner", pVnode->config.vgId);
|
||||
|
||||
taosThreadMutexLock(&pVnode->lock);
|
||||
if (pVnode->blocked) {
|
||||
pVnode->blocked = false;
|
||||
vDebug("vgId:%d, become learner and post block", pVnode->config.vgId);
|
||||
tsem_post(&pVnode->syncSem);
|
||||
}
|
||||
taosThreadMutexUnlock(&pVnode->lock);
|
||||
}
|
||||
|
||||
static void vnodeBecomeLeader(const SSyncFSM *pFsm) {
|
||||
SVnode *pVnode = pFsm->data;
|
||||
vDebug("vgId:%d, become leader", pVnode->config.vgId);
|
||||
|
@ -605,6 +620,7 @@ static SSyncFSM *vnodeSyncMakeFsm(SVnode *pVnode) {
|
|||
pFsm->FpApplyQueueItems = vnodeApplyQueueItems;
|
||||
pFsm->FpBecomeLeaderCb = vnodeBecomeLeader;
|
||||
pFsm->FpBecomeFollowerCb = vnodeBecomeFollower;
|
||||
pFsm->FpBecomeLearnerCb = vnodeBecomeLearner;
|
||||
pFsm->FpReConfigCb = NULL;
|
||||
pFsm->FpSnapshotStartRead = vnodeSnapshotStartRead;
|
||||
pFsm->FpSnapshotStopRead = vnodeSnapshotStopRead;
|
||||
|
@ -637,7 +653,7 @@ int32_t vnodeSyncOpen(SVnode *pVnode, char *path) {
|
|||
|
||||
SSyncCfg *pCfg = &syncInfo.syncCfg;
|
||||
vInfo("vgId:%d, start to open sync, replica:%d selfIndex:%d", pVnode->config.vgId, pCfg->replicaNum, pCfg->myIndex);
|
||||
for (int32_t i = 0; i < pCfg->replicaNum; ++i) {
|
||||
for (int32_t i = 0; i < pCfg->totalReplicaNum; ++i) {
|
||||
SNodeInfo *pNode = &pCfg->nodeInfo[i];
|
||||
vInfo("vgId:%d, index:%d ep:%s:%u dnode:%d cluster:%" PRId64, pVnode->config.vgId, i, pNode->nodeFqdn, pNode->nodePort,
|
||||
pNode->nodeId, pNode->clusterId);
|
||||
|
|
|
@ -127,14 +127,9 @@ enum {
|
|||
};
|
||||
|
||||
typedef struct {
|
||||
// TODO remove prepareStatus
|
||||
// STqOffsetVal prepareStatus; // for tmq
|
||||
STqOffsetVal currentOffset; // for tmq
|
||||
SMqMetaRsp metaRsp; // for tmq fetching meta
|
||||
// int8_t returned;
|
||||
int64_t snapshotVer;
|
||||
// const SSubmitReq* pReq;
|
||||
|
||||
STqOffsetVal currentOffset; // for tmq
|
||||
SMqMetaRsp metaRsp; // for tmq fetching meta
|
||||
int64_t snapshotVer;
|
||||
SPackedData submit;
|
||||
SSchemaWrapper* schema;
|
||||
char tbName[TSDB_TABLE_NAME_LEN];
|
||||
|
@ -144,6 +139,8 @@ typedef struct {
|
|||
int64_t fillHistoryVer1;
|
||||
int64_t fillHistoryVer2;
|
||||
SStreamState* pState;
|
||||
int64_t dataVersion;
|
||||
int64_t checkPointId;
|
||||
} SStreamTaskInfo;
|
||||
|
||||
typedef struct {
|
||||
|
@ -191,7 +188,6 @@ enum {
|
|||
OP_OPENED = 0x1,
|
||||
OP_RES_TO_RETURN = 0x5,
|
||||
OP_EXEC_DONE = 0x9,
|
||||
// OP_EXEC_RECV = 0x11,
|
||||
};
|
||||
|
||||
typedef struct SOperatorFpSet {
|
||||
|
@ -560,6 +556,7 @@ typedef struct SStreamIntervalOperatorInfo {
|
|||
uint64_t numOfDatapack;
|
||||
SArray* pUpdated;
|
||||
SSHashObj* pUpdatedMap;
|
||||
int64_t dataVersion;
|
||||
} SStreamIntervalOperatorInfo;
|
||||
|
||||
typedef struct SDataGroupInfo {
|
||||
|
@ -609,6 +606,7 @@ typedef struct SStreamSessionAggOperatorInfo {
|
|||
bool ignoreExpiredDataSaved;
|
||||
SArray* pUpdated;
|
||||
SSHashObj* pStUpdated;
|
||||
int64_t dataVersion;
|
||||
} SStreamSessionAggOperatorInfo;
|
||||
|
||||
typedef struct SStreamStateAggOperatorInfo {
|
||||
|
@ -627,6 +625,7 @@ typedef struct SStreamStateAggOperatorInfo {
|
|||
bool ignoreExpiredDataSaved;
|
||||
SArray* pUpdated;
|
||||
SSHashObj* pSeUpdated;
|
||||
int64_t dataVersion;
|
||||
} SStreamStateAggOperatorInfo;
|
||||
|
||||
typedef struct SStreamPartitionOperatorInfo {
|
||||
|
@ -827,7 +826,7 @@ void setTaskKilled(SExecTaskInfo* pTaskInfo, int32_t rspCode);
|
|||
void doDestroyTask(SExecTaskInfo* pTaskInfo);
|
||||
void setTaskStatus(SExecTaskInfo* pTaskInfo, int8_t status);
|
||||
|
||||
char* buildTaskId(uint64_t taskId, uint64_t queryId);
|
||||
void buildTaskId(uint64_t taskId, uint64_t queryId, char* dst);
|
||||
|
||||
SArray* getTableListInfo(const SExecTaskInfo* pTaskInfo);
|
||||
|
||||
|
|
|
@ -45,13 +45,13 @@ static void destroyCacheScanOperator(void* param);
|
|||
static int32_t extractCacheScanSlotId(const SArray* pColMatchInfo, SExecTaskInfo* pTaskInfo, int32_t** pSlotIds);
|
||||
static int32_t removeRedundantTsCol(SLastRowScanPhysiNode* pScanNode, SColMatchInfo* pColMatchInfo);
|
||||
|
||||
#define SCAN_ROW_TYPE(_t) ((_t)? CACHESCAN_RETRIEVE_LAST : CACHESCAN_RETRIEVE_LAST_ROW)
|
||||
#define SCAN_ROW_TYPE(_t) ((_t) ? CACHESCAN_RETRIEVE_LAST : CACHESCAN_RETRIEVE_LAST_ROW)
|
||||
|
||||
SOperatorInfo* createCacherowsScanOperator(SLastRowScanPhysiNode* pScanNode, SReadHandle* readHandle,
|
||||
STableListInfo* pTableListInfo, SExecTaskInfo* pTaskInfo) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
SCacheRowsScanInfo* pInfo = taosMemoryCalloc(1, sizeof(SCacheRowsScanInfo));
|
||||
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
|
||||
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
|
||||
if (pInfo == NULL || pOperator == NULL) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
tableListDestroy(pTableListInfo);
|
||||
|
@ -91,7 +91,8 @@ SOperatorInfo* createCacherowsScanOperator(SLastRowScanPhysiNode* pScanNode, SRe
|
|||
|
||||
uint64_t suid = tableListGetSuid(pTableListInfo);
|
||||
code = tsdbCacherowsReaderOpen(pInfo->readHandle.vnode, pInfo->retrieveType, pList, totalTables,
|
||||
taosArrayGetSize(pInfo->matchInfo.pList), suid, &pInfo->pLastrowReader, pTaskInfo->id.str);
|
||||
taosArrayGetSize(pInfo->matchInfo.pList), suid, &pInfo->pLastrowReader,
|
||||
pTaskInfo->id.str);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
goto _error;
|
||||
}
|
||||
|
@ -114,7 +115,8 @@ SOperatorInfo* createCacherowsScanOperator(SLastRowScanPhysiNode* pScanNode, SRe
|
|||
p->pCtx = createSqlFunctionCtx(p->pExprInfo, p->numOfExprs, &p->rowEntryInfoOffset);
|
||||
}
|
||||
|
||||
setOperatorInfo(pOperator, "CachedRowScanOperator", QUERY_NODE_PHYSICAL_PLAN_LAST_ROW_SCAN, false, OP_NOT_OPENED, pInfo, pTaskInfo);
|
||||
setOperatorInfo(pOperator, "CachedRowScanOperator", QUERY_NODE_PHYSICAL_PLAN_LAST_ROW_SCAN, false, OP_NOT_OPENED,
|
||||
pInfo, pTaskInfo);
|
||||
pOperator->exprSupp.numOfExprs = taosArrayGetSize(pInfo->pRes->pDataBlock);
|
||||
|
||||
pOperator->fpSet =
|
||||
|
@ -123,7 +125,7 @@ SOperatorInfo* createCacherowsScanOperator(SLastRowScanPhysiNode* pScanNode, SRe
|
|||
pOperator->cost.openCost = 0;
|
||||
return pOperator;
|
||||
|
||||
_error:
|
||||
_error:
|
||||
pTaskInfo->code = code;
|
||||
destroyCacheScanOperator(pInfo);
|
||||
taosMemoryFree(pOperator);
|
||||
|
@ -136,8 +138,8 @@ SSDataBlock* doScanCache(SOperatorInfo* pOperator) {
|
|||
}
|
||||
|
||||
SCacheRowsScanInfo* pInfo = pOperator->info;
|
||||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||
STableListInfo* pTableList = pInfo->pTableList;
|
||||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||
STableListInfo* pTableList = pInfo->pTableList;
|
||||
|
||||
uint64_t suid = tableListGetSuid(pTableList);
|
||||
int32_t size = tableListGetSize(pTableList);
|
||||
|
@ -194,8 +196,8 @@ SSDataBlock* doScanCache(SOperatorInfo* pOperator) {
|
|||
pRes->info.rows = 1;
|
||||
|
||||
SExprSupp* pSup = &pInfo->pseudoExprSup;
|
||||
int32_t code = addTagPseudoColumnData(&pInfo->readHandle, pSup->pExprInfo, pSup->numOfExprs, pRes,
|
||||
pRes->info.rows, GET_TASKID(pTaskInfo), NULL);
|
||||
int32_t code = addTagPseudoColumnData(&pInfo->readHandle, pSup->pExprInfo, pSup->numOfExprs, pRes,
|
||||
pRes->info.rows, GET_TASKID(pTaskInfo), NULL);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
pTaskInfo->code = code;
|
||||
return NULL;
|
||||
|
@ -217,7 +219,7 @@ SSDataBlock* doScanCache(SOperatorInfo* pOperator) {
|
|||
}
|
||||
|
||||
STableKeyInfo* pList = NULL;
|
||||
int32_t num = 0;
|
||||
int32_t num = 0;
|
||||
|
||||
int32_t code = tableListGetGroupList(pTableList, pInfo->currentGroupIndex, &pList, &num);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
|
@ -251,11 +253,9 @@ SSDataBlock* doScanCache(SOperatorInfo* pOperator) {
|
|||
pInfo->pRes->info.id.groupId = pKeyInfo->groupId;
|
||||
|
||||
if (taosArrayGetSize(pInfo->pUidList) > 0) {
|
||||
ASSERT((pInfo->retrieveType & CACHESCAN_RETRIEVE_LAST_ROW) == CACHESCAN_RETRIEVE_LAST_ROW);
|
||||
|
||||
pInfo->pRes->info.id.uid = *(tb_uid_t*)taosArrayGet(pInfo->pUidList, 0);
|
||||
code = addTagPseudoColumnData(&pInfo->readHandle, pSup->pExprInfo, pSup->numOfExprs, pInfo->pRes, pInfo->pRes->info.rows,
|
||||
GET_TASKID(pTaskInfo), NULL);
|
||||
code = addTagPseudoColumnData(&pInfo->readHandle, pSup->pExprInfo, pSup->numOfExprs, pInfo->pRes,
|
||||
pInfo->pRes->info.rows, GET_TASKID(pTaskInfo), NULL);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
pTaskInfo->code = code;
|
||||
return NULL;
|
||||
|
@ -325,7 +325,7 @@ int32_t removeRedundantTsCol(SLastRowScanPhysiNode* pScanNode, SColMatchInfo* pC
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
size_t size = taosArrayGetSize(pColMatchInfo->pList);
|
||||
size_t size = taosArrayGetSize(pColMatchInfo->pList);
|
||||
SArray* pMatchInfo = taosArrayInit(size, sizeof(SColMatchItem));
|
||||
|
||||
for (int32_t i = 0; i < size; ++i) {
|
||||
|
|
|
@ -127,7 +127,7 @@ static int32_t doSetStreamBlock(SOperatorInfo* pOperator, void* input, size_t nu
|
|||
pOperator->status = OP_NOT_OPENED;
|
||||
|
||||
SStreamScanInfo* pInfo = pOperator->info;
|
||||
qDebug("task stream set total blocks:%d %s", (int32_t)numOfBlocks, id);
|
||||
qDebug("s-task set source blocks:%d %s", (int32_t)numOfBlocks, id);
|
||||
ASSERT(pInfo->validBlockIndex == 0 && taosArrayGetSize(pInfo->pBlockLists) == 0);
|
||||
|
||||
if (type == STREAM_INPUT__MERGED_SUBMIT) {
|
||||
|
@ -173,9 +173,7 @@ void doSetTaskId(SOperatorInfo* pOperator) {
|
|||
void qSetTaskId(qTaskInfo_t tinfo, uint64_t taskId, uint64_t queryId) {
|
||||
SExecTaskInfo* pTaskInfo = tinfo;
|
||||
pTaskInfo->id.queryId = queryId;
|
||||
|
||||
taosMemoryFreeClear(pTaskInfo->id.str);
|
||||
pTaskInfo->id.str = buildTaskId(taskId, queryId);
|
||||
buildTaskId(taskId, queryId, pTaskInfo->id.str);
|
||||
|
||||
// set the idstr for tsdbReader
|
||||
doSetTaskId(pTaskInfo->pRoot);
|
||||
|
@ -198,6 +196,13 @@ int32_t qSetStreamOpOpen(qTaskInfo_t tinfo) {
|
|||
return code;
|
||||
}
|
||||
|
||||
void qGetCheckpointVersion(qTaskInfo_t tinfo, int64_t* dataVer, int64_t* ckId) {
|
||||
SExecTaskInfo* pTaskInfo = tinfo;
|
||||
*dataVer = pTaskInfo->streamInfo.dataVersion;
|
||||
*ckId = pTaskInfo->streamInfo.checkPointId;
|
||||
}
|
||||
|
||||
|
||||
int32_t qSetMultiStreamInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numOfBlocks, int32_t type) {
|
||||
if (tinfo == NULL) {
|
||||
return TSDB_CODE_APP_ERROR;
|
||||
|
@ -363,27 +368,23 @@ static SArray* filterUnqualifiedTables(const SStreamScanInfo* pScanInfo, const S
|
|||
return qa;
|
||||
}
|
||||
|
||||
int32_t qUpdateQualifiedTableId(qTaskInfo_t tinfo, const SArray* tableIdList, bool isAdd) {
|
||||
int32_t qUpdateTableListForStreamScanner(qTaskInfo_t tinfo, const SArray* tableIdList, bool isAdd) {
|
||||
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
|
||||
const char* id = GET_TASKID(pTaskInfo);
|
||||
int32_t code = 0;
|
||||
|
||||
if (isAdd) {
|
||||
qDebug("add %d tables id into query list, %s", (int32_t)taosArrayGetSize(tableIdList), pTaskInfo->id.str);
|
||||
qDebug("add %d tables id into query list, %s", (int32_t)taosArrayGetSize(tableIdList), id);
|
||||
}
|
||||
|
||||
// traverse to the stream scanner node to add this table id
|
||||
SOperatorInfo* pInfo = pTaskInfo->pRoot;
|
||||
while (pInfo->operatorType != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
|
||||
pInfo = pInfo->pDownstream[0];
|
||||
}
|
||||
|
||||
int32_t code = 0;
|
||||
SOperatorInfo* pInfo = extractOperatorInTree(pTaskInfo->pRoot, QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN, id);
|
||||
SStreamScanInfo* pScanInfo = pInfo->info;
|
||||
|
||||
if (isAdd) { // add new table id
|
||||
SArray* qa = filterUnqualifiedTables(pScanInfo, tableIdList, GET_TASKID(pTaskInfo));
|
||||
int32_t numOfQualifiedTables = taosArrayGetSize(qa);
|
||||
|
||||
qDebug(" %d qualified child tables added into stream scanner", numOfQualifiedTables);
|
||||
|
||||
qDebug("%d qualified child tables added into stream scanner, %s", numOfQualifiedTables, id);
|
||||
code = tqReaderAddTbUidList(pScanInfo->tqReader, qa);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
taosArrayDestroy(qa);
|
||||
|
@ -424,19 +425,6 @@ int32_t qUpdateQualifiedTableId(qTaskInfo_t tinfo, const SArray* tableIdList, bo
|
|||
}
|
||||
}
|
||||
|
||||
#if 0
|
||||
bool exists = false;
|
||||
for (int32_t k = 0; k < taosArrayGetSize(pListInfo->pTableList); ++k) {
|
||||
STableKeyInfo* pKeyInfo = taosArrayGet(pListInfo->pTableList, k);
|
||||
if (pKeyInfo->uid == keyInfo.uid) {
|
||||
qWarn("ignore duplicated query table uid:%" PRIu64 " added, %s", pKeyInfo->uid, pTaskInfo->id.str);
|
||||
exists = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (!exists) {
|
||||
#endif
|
||||
|
||||
tableListAddTableInfo(pTableListInfo, keyInfo.uid, keyInfo.groupId);
|
||||
}
|
||||
|
||||
|
@ -447,7 +435,7 @@ int32_t qUpdateQualifiedTableId(qTaskInfo_t tinfo, const SArray* tableIdList, bo
|
|||
|
||||
taosArrayDestroy(qa);
|
||||
} else { // remove the table id in current list
|
||||
qDebug(" %d remove child tables from the stream scanner", (int32_t)taosArrayGetSize(tableIdList));
|
||||
qDebug("%d remove child tables from the stream scanner, %s", (int32_t)taosArrayGetSize(tableIdList), id);
|
||||
taosWLockLatch(&pTaskInfo->lock);
|
||||
code = tqReaderRemoveTbUidList(pScanInfo->tqReader, tableIdList);
|
||||
taosWUnLockLatch(&pTaskInfo->lock);
|
||||
|
@ -1273,3 +1261,22 @@ void qProcessRspMsg(void* parent, SRpcMsg* pMsg, SEpSet* pEpSet) {
|
|||
rpcFreeCont(pMsg->pCont);
|
||||
destroySendMsgInfo(pSendInfo);
|
||||
}
|
||||
|
||||
SArray* qGetQueriedTableListInfo(qTaskInfo_t tinfo) {
|
||||
SExecTaskInfo* pTaskInfo = tinfo;
|
||||
SArray* plist = getTableListInfo(pTaskInfo);
|
||||
|
||||
// only extract table in the first elements
|
||||
STableListInfo* pTableListInfo = taosArrayGetP(plist, 0);
|
||||
|
||||
SArray* pUidList = taosArrayInit(10, sizeof(uint64_t));
|
||||
|
||||
int32_t numOfTables = tableListGetSize(pTableListInfo);
|
||||
for(int32_t i = 0; i < numOfTables; ++i) {
|
||||
STableKeyInfo* pKeyInfo = tableListGetInfo(pTableListInfo, i);
|
||||
taosArrayPush(pUidList, &pKeyInfo->uid);
|
||||
}
|
||||
|
||||
taosArrayDestroy(plist);
|
||||
return pUidList;
|
||||
}
|
||||
|
|
|
@ -1151,8 +1151,8 @@ void cleanupExprSupp(SExprSupp* pSupp) {
|
|||
|
||||
void cleanupBasicInfo(SOptrBasicInfo* pInfo) { pInfo->pRes = blockDataDestroy(pInfo->pRes); }
|
||||
|
||||
char* buildTaskId(uint64_t taskId, uint64_t queryId) {
|
||||
char* p = taosMemoryMalloc(64);
|
||||
void buildTaskId(uint64_t taskId, uint64_t queryId, char* dst) {
|
||||
char* p = dst;
|
||||
|
||||
int32_t offset = 6;
|
||||
memcpy(p, "TID:0x", offset);
|
||||
|
@ -1163,7 +1163,6 @@ char* buildTaskId(uint64_t taskId, uint64_t queryId) {
|
|||
offset += tintToHex(queryId, &p[offset]);
|
||||
|
||||
p[offset] = 0;
|
||||
return p;
|
||||
}
|
||||
|
||||
SExecTaskInfo* doCreateExecTaskInfo(uint64_t queryId, uint64_t taskId, int32_t vgId, EOPTR_EXEC_MODEL model,
|
||||
|
@ -1185,7 +1184,9 @@ SExecTaskInfo* doCreateExecTaskInfo(uint64_t queryId, uint64_t taskId, int32_t v
|
|||
taosInitRWLatch(&pTaskInfo->lock);
|
||||
pTaskInfo->id.vgId = vgId;
|
||||
pTaskInfo->id.queryId = queryId;
|
||||
pTaskInfo->id.str = buildTaskId(taskId, queryId);
|
||||
|
||||
pTaskInfo->id.str = taosMemoryMalloc(64);
|
||||
buildTaskId(taskId, queryId, pTaskInfo->id.str);
|
||||
return pTaskInfo;
|
||||
}
|
||||
|
||||
|
@ -2008,7 +2009,11 @@ void qStreamCloseTsdbReader(void* task) {
|
|||
}
|
||||
|
||||
static void extractTableList(SArray* pList, const SOperatorInfo* pOperator) {
|
||||
if (pOperator->operatorType == QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN) {
|
||||
if (pOperator->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
|
||||
SStreamScanInfo* pScanInfo = pOperator->info;
|
||||
STableScanInfo* pTableScanInfo = pScanInfo->pTableScanOp->info;
|
||||
taosArrayPush(pList, &pTableScanInfo->base.pTableListInfo);
|
||||
} else if (pOperator->operatorType == QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN) {
|
||||
STableScanInfo* pScanInfo = pOperator->info;
|
||||
taosArrayPush(pList, &pScanInfo->base.pTableListInfo);
|
||||
} else {
|
||||
|
|
|
@ -1637,7 +1637,7 @@ static SSDataBlock* doQueueScan(SOperatorInfo* pOperator) {
|
|||
if (pTaskInfo->streamInfo.submit.msgStr != NULL) {
|
||||
if (pInfo->tqReader->msg2.msgStr == NULL) {
|
||||
SPackedData submit = pTaskInfo->streamInfo.submit;
|
||||
if (tqReaderSetSubmitReq2(pInfo->tqReader, submit.msgStr, submit.msgLen, submit.ver) < 0) {
|
||||
if (tqReaderSetSubmitMsg(pInfo->tqReader, submit.msgStr, submit.msgLen, submit.ver) < 0) {
|
||||
qError("submit msg messed up when initing stream submit block %p", submit.msgStr);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -1646,7 +1646,7 @@ static SSDataBlock* doQueueScan(SOperatorInfo* pOperator) {
|
|||
blockDataCleanup(pInfo->pRes);
|
||||
SDataBlockInfo* pBlockInfo = &pInfo->pRes->info;
|
||||
|
||||
while (tqNextDataBlock2(pInfo->tqReader)) {
|
||||
while (tqNextDataBlock(pInfo->tqReader)) {
|
||||
SSDataBlock block = {0};
|
||||
|
||||
int32_t code = tqRetrieveDataBlock2(&block, pInfo->tqReader, NULL);
|
||||
|
@ -1812,7 +1812,6 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
|
|||
|
||||
/*resetTableScanInfo(pTSInfo, pWin);*/
|
||||
tsdbReaderClose(pTSInfo->base.dataReader);
|
||||
qDebug("4");
|
||||
|
||||
pTSInfo->base.dataReader = NULL;
|
||||
pInfo->pTableScanOp->status = OP_OPENED;
|
||||
|
@ -1895,7 +1894,6 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
|
|||
pTaskInfo->streamInfo.recoverStep = STREAM_RECOVER_STEP__NONE;
|
||||
STableScanInfo* pTSInfo = pInfo->pTableScanOp->info;
|
||||
tsdbReaderClose(pTSInfo->base.dataReader);
|
||||
qDebug("5");
|
||||
|
||||
pTSInfo->base.dataReader = NULL;
|
||||
|
||||
|
@ -1922,6 +1920,7 @@ FETCH_NEXT_BLOCK:
|
|||
if (pBlock->info.parTbName[0]) {
|
||||
streamStatePutParName(pTaskInfo->streamInfo.pState, pBlock->info.id.groupId, pBlock->info.parTbName);
|
||||
}
|
||||
|
||||
// TODO move into scan
|
||||
pBlock->info.calWin.skey = INT64_MIN;
|
||||
pBlock->info.calWin.ekey = INT64_MAX;
|
||||
|
@ -2064,7 +2063,7 @@ FETCH_NEXT_BLOCK:
|
|||
|
||||
int32_t current = pInfo->validBlockIndex++;
|
||||
SPackedData* pSubmit = taosArrayGet(pInfo->pBlockLists, current);
|
||||
if (tqReaderSetSubmitReq2(pInfo->tqReader, pSubmit->msgStr, pSubmit->msgLen, pSubmit->ver) < 0) {
|
||||
if (tqReaderSetSubmitMsg(pInfo->tqReader, pSubmit->msgStr, pSubmit->msgLen, pSubmit->ver) < 0) {
|
||||
qError("submit msg messed up when initing stream submit block %p, current %d, total %d", pSubmit, current,
|
||||
totBlockNum);
|
||||
continue;
|
||||
|
@ -2073,7 +2072,7 @@ FETCH_NEXT_BLOCK:
|
|||
|
||||
blockDataCleanup(pInfo->pRes);
|
||||
|
||||
while (tqNextDataBlock2(pInfo->tqReader)) {
|
||||
while (tqNextDataBlock(pInfo->tqReader)) {
|
||||
SSDataBlock block = {0};
|
||||
|
||||
int32_t code = tqRetrieveDataBlock2(&block, pInfo->tqReader, NULL);
|
||||
|
|
|
@ -2333,9 +2333,15 @@ static int32_t getNextQualifiedFinalWindow(SInterval* pInterval, STimeWindow* pN
|
|||
return startPos;
|
||||
}
|
||||
|
||||
static void setStreamDataVersion(SExecTaskInfo* pTaskInfo, int64_t version, int64_t ckId) {
|
||||
pTaskInfo->streamInfo.dataVersion = version;
|
||||
pTaskInfo->streamInfo.checkPointId = ckId;
|
||||
}
|
||||
|
||||
static void doStreamIntervalAggImpl(SOperatorInfo* pOperatorInfo, SSDataBlock* pSDataBlock, uint64_t groupId,
|
||||
SSHashObj* pUpdatedMap) {
|
||||
SStreamIntervalOperatorInfo* pInfo = (SStreamIntervalOperatorInfo*)pOperatorInfo->info;
|
||||
pInfo->dataVersion = TMAX(pInfo->dataVersion, pSDataBlock->info.version);
|
||||
|
||||
SResultRowInfo* pResultRowInfo = &(pInfo->binfo.resultRowInfo);
|
||||
SExecTaskInfo* pTaskInfo = pOperatorInfo->pTaskInfo;
|
||||
|
@ -2432,10 +2438,8 @@ static void doStreamIntervalAggImpl(SOperatorInfo* pOperatorInfo, SSDataBlock* p
|
|||
updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &nextWin, true);
|
||||
applyAggFunctionOnPartialTuples(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, startPos, forwardRows,
|
||||
pSDataBlock->info.rows, numOfOutput);
|
||||
SWinKey key = {
|
||||
.ts = nextWin.skey,
|
||||
.groupId = groupId,
|
||||
};
|
||||
|
||||
SWinKey key = { .ts = nextWin.skey, .groupId = groupId };
|
||||
saveOutputBuf(pInfo->pState, &key, pResult, pInfo->aggSup.resultRowSize);
|
||||
releaseOutputBuf(pInfo->pState, &key, pResult);
|
||||
if (pInfo->delKey.ts > key.ts) {
|
||||
|
@ -2503,6 +2507,7 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
|
|||
clearFunctionContext(&pOperator->exprSupp);
|
||||
// semi interval operator clear disk buffer
|
||||
clearStreamIntervalOperator(pInfo);
|
||||
setStreamDataVersion(pTaskInfo, pInfo->dataVersion, pInfo->pState->checkPointId);
|
||||
qDebug("===stream===clear semi operator");
|
||||
} else {
|
||||
deleteIntervalDiscBuf(pInfo->pState, pInfo->pPullDataMap, pInfo->twAggSup.maxTs - pInfo->twAggSup.deleteMark,
|
||||
|
@ -2776,6 +2781,7 @@ SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream,
|
|||
pInfo->numOfDatapack = 0;
|
||||
pInfo->pUpdated = NULL;
|
||||
pInfo->pUpdatedMap = NULL;
|
||||
pInfo->dataVersion = 0;
|
||||
|
||||
pOperator->operatorType = pPhyNode->type;
|
||||
pOperator->blocking = true;
|
||||
|
@ -3126,6 +3132,8 @@ static void doStreamSessionAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSData
|
|||
int32_t rows = pSDataBlock->info.rows;
|
||||
int32_t winRows = 0;
|
||||
|
||||
pInfo->dataVersion = TMAX(pInfo->dataVersion, pSDataBlock->info.version);
|
||||
|
||||
SColumnInfoData* pStartTsCol = taosArrayGet(pSDataBlock->pDataBlock, pInfo->primaryTsIndex);
|
||||
TSKEY* startTsCols = (int64_t*)pStartTsCol->pData;
|
||||
SColumnInfoData* pEndTsCol = NULL;
|
||||
|
@ -3589,6 +3597,7 @@ SOperatorInfo* createStreamSessionAggOperatorInfo(SOperatorInfo* downstream, SPh
|
|||
pInfo->ignoreExpiredDataSaved = false;
|
||||
pInfo->pUpdated = NULL;
|
||||
pInfo->pStUpdated = NULL;
|
||||
pInfo->dataVersion = 0;
|
||||
|
||||
setOperatorInfo(pOperator, "StreamSessionWindowAggOperator", QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION, true,
|
||||
OP_NOT_OPENED, pInfo, pTaskInfo);
|
||||
|
@ -3899,6 +3908,9 @@ static void doStreamStateAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSDataBl
|
|||
TSKEY* tsCols = NULL;
|
||||
SResultRow* pResult = NULL;
|
||||
int32_t winRows = 0;
|
||||
|
||||
pInfo->dataVersion = TMAX(pInfo->dataVersion, pSDataBlock->info.version);
|
||||
|
||||
if (pSDataBlock->pDataBlock != NULL) {
|
||||
SColumnInfoData* pColDataInfo = taosArrayGet(pSDataBlock->pDataBlock, pInfo->primaryTsIndex);
|
||||
tsCols = (int64_t*)pColDataInfo->pData;
|
||||
|
@ -4115,6 +4127,7 @@ SOperatorInfo* createStreamStateAggOperatorInfo(SOperatorInfo* downstream, SPhys
|
|||
pInfo->ignoreExpiredDataSaved = false;
|
||||
pInfo->pUpdated = NULL;
|
||||
pInfo->pSeUpdated = NULL;
|
||||
pInfo->dataVersion = 0;
|
||||
|
||||
setOperatorInfo(pOperator, "StreamStateAggOperator", QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE, true, OP_NOT_OPENED,
|
||||
pInfo, pTaskInfo);
|
||||
|
@ -4750,6 +4763,7 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) {
|
|||
&pInfo->delKey);
|
||||
setOperatorCompleted(pOperator);
|
||||
streamStateCommit(pTaskInfo->streamInfo.pState);
|
||||
setStreamDataVersion(pTaskInfo, pInfo->dataVersion, pInfo->pState->checkPointId);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -4771,6 +4785,7 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) {
|
|||
pInfo->numOfDatapack = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
pInfo->numOfDatapack++;
|
||||
printDataBlock(pBlock, "single interval recv");
|
||||
|
||||
|
|
|
@ -1933,14 +1933,35 @@ static int32_t translateToIso8601(SFunctionNode* pFunc, char* pErrBuf, int32_t l
|
|||
}
|
||||
|
||||
static int32_t translateToUnixtimestamp(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
|
||||
if (1 != LIST_LENGTH(pFunc->pParameterList)) {
|
||||
int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList);
|
||||
int16_t resType = TSDB_DATA_TYPE_BIGINT;
|
||||
|
||||
if (1 != numOfParams && 2 != numOfParams) {
|
||||
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
|
||||
}
|
||||
|
||||
if (!IS_STR_DATA_TYPE(((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type)) {
|
||||
uint8_t para1Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
|
||||
if (!IS_STR_DATA_TYPE(para1Type)) {
|
||||
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
|
||||
}
|
||||
|
||||
if (2 == numOfParams) {
|
||||
uint8_t para2Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type;
|
||||
if (!IS_INTEGER_TYPE(para2Type)) {
|
||||
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
|
||||
}
|
||||
|
||||
SValueNode* pValue = (SValueNode*)nodesListGetNode(pFunc->pParameterList, 1);
|
||||
if (pValue->datum.i == 1) {
|
||||
resType = TSDB_DATA_TYPE_TIMESTAMP;
|
||||
} else if (pValue->datum.i == 0) {
|
||||
resType = TSDB_DATA_TYPE_BIGINT;
|
||||
} else {
|
||||
return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR,
|
||||
"TO_UNIXTIMESTAMP function second parameter should be 0/1");
|
||||
}
|
||||
}
|
||||
|
||||
// add database precision as param
|
||||
uint8_t dbPrec = pFunc->node.resType.precision;
|
||||
int32_t code = addDbPrecisonParam(&pFunc->pParameterList, dbPrec);
|
||||
|
@ -1948,7 +1969,7 @@ static int32_t translateToUnixtimestamp(SFunctionNode* pFunc, char* pErrBuf, int
|
|||
return code;
|
||||
}
|
||||
|
||||
pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes, .type = TSDB_DATA_TYPE_BIGINT};
|
||||
pFunc->node.resType = (SDataType){.bytes = tDataTypes[resType].bytes, .type = resType};
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
|
|
@ -3353,7 +3353,7 @@ int32_t spreadFunction(SqlFunctionCtx* pCtx) {
|
|||
goto _spread_over;
|
||||
}
|
||||
double tmin = 0.0, tmax = 0.0;
|
||||
if (IS_SIGNED_NUMERIC_TYPE(type)) {
|
||||
if (IS_SIGNED_NUMERIC_TYPE(type) || IS_TIMESTAMP_TYPE(type)) {
|
||||
tmin = (double)GET_INT64_VAL(&pAgg->min);
|
||||
tmax = (double)GET_INT64_VAL(&pAgg->max);
|
||||
} else if (IS_FLOAT_TYPE(type)) {
|
||||
|
|
|
@ -174,6 +174,8 @@ bool fmIsSelectFunc(int32_t funcId) { return isSpecificClassifyFunc(funcId, FUNC
|
|||
|
||||
bool fmIsTimelineFunc(int32_t funcId) { return isSpecificClassifyFunc(funcId, FUNC_MGT_TIMELINE_FUNC); }
|
||||
|
||||
bool fmIsDateTimeFunc(int32_t funcId) { return isSpecificClassifyFunc(funcId, FUNC_MGT_DATETIME_FUNC); }
|
||||
|
||||
bool fmIsPseudoColumnFunc(int32_t funcId) { return isSpecificClassifyFunc(funcId, FUNC_MGT_PSEUDO_COLUMN_FUNC); }
|
||||
|
||||
bool fmIsScanPseudoColumnFunc(int32_t funcId) { return isSpecificClassifyFunc(funcId, FUNC_MGT_SCAN_PC_FUNC); }
|
||||
|
@ -184,6 +186,7 @@ bool fmIsWindowClauseFunc(int32_t funcId) { return fmIsAggFunc(funcId) || fmIsWi
|
|||
|
||||
bool fmIsIndefiniteRowsFunc(int32_t funcId) { return isSpecificClassifyFunc(funcId, FUNC_MGT_INDEFINITE_ROWS_FUNC); }
|
||||
|
||||
|
||||
bool fmIsSpecialDataRequiredFunc(int32_t funcId) {
|
||||
return isSpecificClassifyFunc(funcId, FUNC_MGT_SPECIAL_DATA_REQUIRED);
|
||||
}
|
||||
|
|
|
@ -343,7 +343,7 @@ typedef struct SUdfcFuncStub {
|
|||
char udfName[TSDB_FUNC_NAME_LEN + 1];
|
||||
UdfcFuncHandle handle;
|
||||
int32_t refCount;
|
||||
int64_t lastRefTime;
|
||||
int64_t createTime;
|
||||
} SUdfcFuncStub;
|
||||
|
||||
typedef struct SUdfcProxy {
|
||||
|
@ -363,6 +363,7 @@ typedef struct SUdfcProxy {
|
|||
|
||||
uv_mutex_t udfStubsMutex;
|
||||
SArray *udfStubs; // SUdfcFuncStub
|
||||
SArray *expiredUdfStubs; //SUdfcFuncStub
|
||||
|
||||
uv_mutex_t udfcUvMutex;
|
||||
int8_t initialized;
|
||||
|
@ -959,7 +960,7 @@ int32_t udfcOpen();
|
|||
int32_t udfcClose();
|
||||
|
||||
int32_t acquireUdfFuncHandle(char *udfName, UdfcFuncHandle *pHandle);
|
||||
void releaseUdfFuncHandle(char *udfName);
|
||||
void releaseUdfFuncHandle(char *udfName, UdfcFuncHandle handle);
|
||||
int32_t cleanUpUdfs();
|
||||
|
||||
bool udfAggGetEnv(struct SFunctionNode *pFunc, SFuncExecEnv *pEnv);
|
||||
|
@ -967,6 +968,8 @@ bool udfAggInit(struct SqlFunctionCtx *pCtx, struct SResultRowEntryInfo *pRes
|
|||
int32_t udfAggProcess(struct SqlFunctionCtx *pCtx);
|
||||
int32_t udfAggFinalize(struct SqlFunctionCtx *pCtx, SSDataBlock *pBlock);
|
||||
|
||||
void cleanupNotExpiredUdfs();
|
||||
void cleanupExpiredUdfs();
|
||||
int compareUdfcFuncSub(const void *elem1, const void *elem2) {
|
||||
SUdfcFuncStub *stub1 = (SUdfcFuncStub *)elem1;
|
||||
SUdfcFuncStub *stub2 = (SUdfcFuncStub *)elem2;
|
||||
|
@ -982,16 +985,24 @@ int32_t acquireUdfFuncHandle(char *udfName, UdfcFuncHandle *pHandle) {
|
|||
if (stubIndex != -1) {
|
||||
SUdfcFuncStub *foundStub = taosArrayGet(gUdfcProxy.udfStubs, stubIndex);
|
||||
UdfcFuncHandle handle = foundStub->handle;
|
||||
if (handle != NULL && ((SUdfcUvSession *)handle)->udfUvPipe != NULL) {
|
||||
*pHandle = foundStub->handle;
|
||||
++foundStub->refCount;
|
||||
foundStub->lastRefTime = taosGetTimestampUs();
|
||||
uv_mutex_unlock(&gUdfcProxy.udfStubsMutex);
|
||||
return 0;
|
||||
int64_t currUs = taosGetTimestampUs();
|
||||
bool expired = (currUs - foundStub->createTime) >= 10 * 1000 * 1000;
|
||||
if (!expired) {
|
||||
if (handle != NULL && ((SUdfcUvSession *)handle)->udfUvPipe != NULL) {
|
||||
*pHandle = foundStub->handle;
|
||||
++foundStub->refCount;
|
||||
uv_mutex_unlock(&gUdfcProxy.udfStubsMutex);
|
||||
return 0;
|
||||
} else {
|
||||
fnInfo("udf invalid handle for %s, refCount: %d, create time: %" PRId64 ". remove it from cache", udfName,
|
||||
foundStub->refCount, foundStub->createTime);
|
||||
taosArrayRemove(gUdfcProxy.udfStubs, stubIndex);
|
||||
}
|
||||
} else {
|
||||
fnInfo("invalid handle for %s, refCount: %d, last ref time: %" PRId64 ". remove it from cache", udfName,
|
||||
foundStub->refCount, foundStub->lastRefTime);
|
||||
fnInfo("udf handle expired for %s, will setup udf. move it to expired list", udfName);
|
||||
taosArrayRemove(gUdfcProxy.udfStubs, stubIndex);
|
||||
taosArrayPush(gUdfcProxy.expiredUdfStubs, foundStub);
|
||||
taosArraySort(gUdfcProxy.expiredUdfStubs, compareUdfcFuncSub);
|
||||
}
|
||||
}
|
||||
*pHandle = NULL;
|
||||
|
@ -1001,7 +1012,7 @@ int32_t acquireUdfFuncHandle(char *udfName, UdfcFuncHandle *pHandle) {
|
|||
strncpy(stub.udfName, udfName, TSDB_FUNC_NAME_LEN);
|
||||
stub.handle = *pHandle;
|
||||
++stub.refCount;
|
||||
stub.lastRefTime = taosGetTimestampUs();
|
||||
stub.createTime = taosGetTimestampUs();
|
||||
taosArrayPush(gUdfcProxy.udfStubs, &stub);
|
||||
taosArraySort(gUdfcProxy.udfStubs, compareUdfcFuncSub);
|
||||
} else {
|
||||
|
@ -1012,21 +1023,75 @@ int32_t acquireUdfFuncHandle(char *udfName, UdfcFuncHandle *pHandle) {
|
|||
return code;
|
||||
}
|
||||
|
||||
void releaseUdfFuncHandle(char *udfName) {
|
||||
void releaseUdfFuncHandle(char *udfName, UdfcFuncHandle handle) {
|
||||
uv_mutex_lock(&gUdfcProxy.udfStubsMutex);
|
||||
SUdfcFuncStub key = {0};
|
||||
strncpy(key.udfName, udfName, TSDB_FUNC_NAME_LEN);
|
||||
SUdfcFuncStub *foundStub = taosArraySearch(gUdfcProxy.udfStubs, &key, compareUdfcFuncSub, TD_EQ);
|
||||
if (!foundStub) {
|
||||
SUdfcFuncStub *expiredStub = taosArraySearch(gUdfcProxy.expiredUdfStubs, &key, compareUdfcFuncSub, TD_EQ);
|
||||
if (!foundStub && !expiredStub) {
|
||||
uv_mutex_unlock(&gUdfcProxy.udfStubsMutex);
|
||||
return;
|
||||
}
|
||||
if (foundStub->refCount > 0) {
|
||||
if (foundStub != NULL && foundStub->handle == handle && foundStub->refCount > 0) {
|
||||
--foundStub->refCount;
|
||||
}
|
||||
if (expiredStub != NULL && expiredStub->handle == handle && expiredStub->refCount > 0) {
|
||||
--expiredStub->refCount;
|
||||
}
|
||||
uv_mutex_unlock(&gUdfcProxy.udfStubsMutex);
|
||||
}
|
||||
|
||||
void cleanupExpiredUdfs() {
|
||||
int32_t i = 0;
|
||||
SArray *expiredUdfStubs = taosArrayInit(16, sizeof(SUdfcFuncStub));
|
||||
while (i < taosArrayGetSize(gUdfcProxy.expiredUdfStubs)) {
|
||||
SUdfcFuncStub *stub = taosArrayGet(gUdfcProxy.expiredUdfStubs, i);
|
||||
if (stub->refCount == 0) {
|
||||
fnInfo("tear down udf. expired. udf name: %s, handle: %p, ref count: %d", stub->udfName, stub->handle, stub->refCount);
|
||||
doTeardownUdf(stub->handle);
|
||||
} else {
|
||||
fnInfo("udf still in use. expired. udf name: %s, ref count: %d, create time: %" PRId64 ", handle: %p", stub->udfName,
|
||||
stub->refCount, stub->createTime, stub->handle);
|
||||
UdfcFuncHandle handle = stub->handle;
|
||||
if (handle != NULL && ((SUdfcUvSession *)handle)->udfUvPipe != NULL) {
|
||||
taosArrayPush(expiredUdfStubs, stub);
|
||||
} else {
|
||||
fnInfo("udf invalid handle for %s, expired. refCount: %d, create time: %" PRId64 ". remove it from cache",
|
||||
stub->udfName, stub->refCount, stub->createTime);
|
||||
}
|
||||
}
|
||||
++i;
|
||||
}
|
||||
taosArrayDestroy(gUdfcProxy.expiredUdfStubs);
|
||||
gUdfcProxy.expiredUdfStubs = expiredUdfStubs;
|
||||
}
|
||||
|
||||
void cleanupNotExpiredUdfs() {
|
||||
SArray *udfStubs = taosArrayInit(16, sizeof(SUdfcFuncStub));
|
||||
int32_t i = 0;
|
||||
while (i < taosArrayGetSize(gUdfcProxy.udfStubs)) {
|
||||
SUdfcFuncStub *stub = taosArrayGet(gUdfcProxy.udfStubs, i);
|
||||
if (stub->refCount == 0) {
|
||||
fnInfo("tear down udf. udf name: %s, handle: %p, ref count: %d", stub->udfName, stub->handle, stub->refCount);
|
||||
doTeardownUdf(stub->handle);
|
||||
} else {
|
||||
fnInfo("udf still in use. udf name: %s, ref count: %d, create time: %" PRId64 ", handle: %p", stub->udfName,
|
||||
stub->refCount, stub->createTime, stub->handle);
|
||||
UdfcFuncHandle handle = stub->handle;
|
||||
if (handle != NULL && ((SUdfcUvSession *)handle)->udfUvPipe != NULL) {
|
||||
taosArrayPush(udfStubs, stub);
|
||||
} else {
|
||||
fnInfo("udf invalid handle for %s, refCount: %d, create time: %" PRId64 ". remove it from cache",
|
||||
stub->udfName, stub->refCount, stub->createTime);
|
||||
}
|
||||
}
|
||||
++i;
|
||||
}
|
||||
taosArrayDestroy(gUdfcProxy.udfStubs);
|
||||
gUdfcProxy.udfStubs = udfStubs;
|
||||
}
|
||||
|
||||
int32_t cleanUpUdfs() {
|
||||
int8_t initialized = atomic_load_8(&gUdfcProxy.initialized);
|
||||
if (!initialized) {
|
||||
|
@ -1034,32 +1099,15 @@ int32_t cleanUpUdfs() {
|
|||
}
|
||||
|
||||
uv_mutex_lock(&gUdfcProxy.udfStubsMutex);
|
||||
if (gUdfcProxy.udfStubs == NULL || taosArrayGetSize(gUdfcProxy.udfStubs) == 0) {
|
||||
if ((gUdfcProxy.udfStubs == NULL || taosArrayGetSize(gUdfcProxy.udfStubs) == 0) &&
|
||||
(gUdfcProxy.expiredUdfStubs == NULL || taosArrayGetSize(gUdfcProxy.expiredUdfStubs) == 0)) {
|
||||
uv_mutex_unlock(&gUdfcProxy.udfStubsMutex);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
SArray *udfStubs = taosArrayInit(16, sizeof(SUdfcFuncStub));
|
||||
int32_t i = 0;
|
||||
while (i < taosArrayGetSize(gUdfcProxy.udfStubs)) {
|
||||
SUdfcFuncStub *stub = taosArrayGet(gUdfcProxy.udfStubs, i);
|
||||
if (stub->refCount == 0) {
|
||||
fnInfo("tear down udf. udf name: %s, handle: %p, ref count: %d", stub->udfName, stub->handle, stub->refCount);
|
||||
doTeardownUdf(stub->handle);
|
||||
} else {
|
||||
fnInfo("udf still in use. udf name: %s, ref count: %d, last ref time: %" PRId64 ", handle: %p", stub->udfName,
|
||||
stub->refCount, stub->lastRefTime, stub->handle);
|
||||
UdfcFuncHandle handle = stub->handle;
|
||||
if (handle != NULL && ((SUdfcUvSession *)handle)->udfUvPipe != NULL) {
|
||||
taosArrayPush(udfStubs, stub);
|
||||
} else {
|
||||
fnInfo("udf invalid handle for %s, refCount: %d, last ref time: %" PRId64 ". remove it from cache",
|
||||
stub->udfName, stub->refCount, stub->lastRefTime);
|
||||
}
|
||||
}
|
||||
++i;
|
||||
}
|
||||
taosArrayDestroy(gUdfcProxy.udfStubs);
|
||||
gUdfcProxy.udfStubs = udfStubs;
|
||||
|
||||
cleanupNotExpiredUdfs();
|
||||
cleanupExpiredUdfs();
|
||||
|
||||
uv_mutex_unlock(&gUdfcProxy.udfStubsMutex);
|
||||
return 0;
|
||||
}
|
||||
|
@ -1075,7 +1123,7 @@ int32_t callUdfScalarFunc(char *udfName, SScalarParam *input, int32_t numOfCols,
|
|||
code = doCallUdfScalarFunc(handle, input, numOfCols, output);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
fnError("udfc scalar function execution failure");
|
||||
releaseUdfFuncHandle(udfName);
|
||||
releaseUdfFuncHandle(udfName, handle);
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -1089,7 +1137,7 @@ int32_t callUdfScalarFunc(char *udfName, SScalarParam *input, int32_t numOfCols,
|
|||
code = TSDB_CODE_UDF_INVALID_OUTPUT_TYPE;
|
||||
}
|
||||
}
|
||||
releaseUdfFuncHandle(udfName);
|
||||
releaseUdfFuncHandle(udfName, handle);
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -1122,7 +1170,7 @@ bool udfAggInit(struct SqlFunctionCtx *pCtx, struct SResultRowEntryInfo *pResult
|
|||
SUdfInterBuf buf = {0};
|
||||
if ((udfCode = doCallUdfAggInit(handle, &buf)) != 0) {
|
||||
fnError("udfAggInit error. step doCallUdfAggInit. udf code: %d", udfCode);
|
||||
releaseUdfFuncHandle(pCtx->udfName);
|
||||
releaseUdfFuncHandle(pCtx->udfName, handle);
|
||||
return false;
|
||||
}
|
||||
if (buf.bufLen <= session->bufSize) {
|
||||
|
@ -1131,10 +1179,10 @@ bool udfAggInit(struct SqlFunctionCtx *pCtx, struct SResultRowEntryInfo *pResult
|
|||
udfRes->interResNum = buf.numOfResult;
|
||||
} else {
|
||||
fnError("udfc inter buf size %d is greater than function bufSize %d", buf.bufLen, session->bufSize);
|
||||
releaseUdfFuncHandle(pCtx->udfName);
|
||||
releaseUdfFuncHandle(pCtx->udfName, handle);
|
||||
return false;
|
||||
}
|
||||
releaseUdfFuncHandle(pCtx->udfName);
|
||||
releaseUdfFuncHandle(pCtx->udfName, handle);
|
||||
freeUdfInterBuf(&buf);
|
||||
return true;
|
||||
}
|
||||
|
@ -1191,7 +1239,7 @@ int32_t udfAggProcess(struct SqlFunctionCtx *pCtx) {
|
|||
taosArrayDestroy(pTempBlock->pDataBlock);
|
||||
taosMemoryFree(pTempBlock);
|
||||
|
||||
releaseUdfFuncHandle(pCtx->udfName);
|
||||
releaseUdfFuncHandle(pCtx->udfName, handle);
|
||||
freeUdfInterBuf(&newState);
|
||||
return udfCode;
|
||||
}
|
||||
|
@ -1236,7 +1284,7 @@ int32_t udfAggFinalize(struct SqlFunctionCtx *pCtx, SSDataBlock *pBlock) {
|
|||
freeUdfInterBuf(&resultBuf);
|
||||
|
||||
int32_t numOfResults = functionFinalizeWithResultBuf(pCtx, pBlock, udfRes->finalResBuf);
|
||||
releaseUdfFuncHandle(pCtx->udfName);
|
||||
releaseUdfFuncHandle(pCtx->udfName, handle);
|
||||
return udfCallCode == 0 ? numOfResults : udfCallCode;
|
||||
}
|
||||
|
||||
|
@ -1663,6 +1711,7 @@ int32_t udfcOpen() {
|
|||
uv_barrier_wait(&proxy->initBarrier);
|
||||
uv_mutex_init(&proxy->udfStubsMutex);
|
||||
proxy->udfStubs = taosArrayInit(8, sizeof(SUdfcFuncStub));
|
||||
proxy->expiredUdfStubs = taosArrayInit(8, sizeof(SUdfcFuncStub));
|
||||
uv_mutex_init(&proxy->udfcUvMutex);
|
||||
fnInfo("udfc initialized") return 0;
|
||||
}
|
||||
|
@ -1679,6 +1728,7 @@ int32_t udfcClose() {
|
|||
uv_thread_join(&udfc->loopThread);
|
||||
uv_mutex_destroy(&udfc->taskQueueMutex);
|
||||
uv_barrier_destroy(&udfc->initBarrier);
|
||||
taosArrayDestroy(udfc->expiredUdfStubs);
|
||||
taosArrayDestroy(udfc->udfStubs);
|
||||
uv_mutex_destroy(&udfc->udfStubsMutex);
|
||||
uv_mutex_destroy(&udfc->udfcUvMutex);
|
||||
|
|
|
@ -591,7 +591,7 @@ SUdf *udfdNewUdf(const char *udfName) {
|
|||
SUdf *udfdGetOrCreateUdf(const char *udfName) {
|
||||
uv_mutex_lock(&global.udfsMutex);
|
||||
SUdf **pUdfHash = taosHashGet(global.udfsHash, udfName, strlen(udfName));
|
||||
int64_t currTime = taosGetTimestampSec();
|
||||
int64_t currTime = taosGetTimestampMs();
|
||||
bool expired = false;
|
||||
if (pUdfHash) {
|
||||
expired = currTime - (*pUdfHash)->lastFetchTime > 10 * 1000; // 10s
|
||||
|
@ -688,6 +688,8 @@ void udfdProcessCallRequest(SUvUdfWork *uvUdf, SUdfRequest *request) {
|
|||
output.colMeta.type = udf->outputType;
|
||||
output.colMeta.precision = 0;
|
||||
output.colMeta.scale = 0;
|
||||
udfColEnsureCapacity(&output, call->block.info.rows);
|
||||
|
||||
SUdfDataBlock input = {0};
|
||||
convertDataBlockToUdfDataBlock(&call->block, &input);
|
||||
code = udf->scriptPlugin->udfScalarProcFunc(&input, &output, udf->scriptUdfCtx);
|
||||
|
|
|
@ -136,6 +136,7 @@ static bool logicConditionNodeEqual(const SLogicConditionNode* a, const SLogicCo
|
|||
|
||||
static bool functionNodeEqual(const SFunctionNode* a, const SFunctionNode* b) {
|
||||
COMPARE_SCALAR_FIELD(funcId);
|
||||
COMPARE_STRING_FIELD(functionName);
|
||||
COMPARE_NODE_LIST_FIELD(pParameterList);
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -1047,8 +1047,8 @@ sliding_opt(A) ::= SLIDING NK_LP duration_literal(B) NK_RP.
|
|||
|
||||
fill_opt(A) ::= . { A = NULL; }
|
||||
fill_opt(A) ::= FILL NK_LP fill_mode(B) NK_RP. { A = createFillNode(pCxt, B, NULL); }
|
||||
fill_opt(A) ::= FILL NK_LP VALUE NK_COMMA literal_list(B) NK_RP. { A = createFillNode(pCxt, FILL_MODE_VALUE, createNodeListNode(pCxt, B)); }
|
||||
fill_opt(A) ::= FILL NK_LP VALUE_F NK_COMMA literal_list(B) NK_RP. { A = createFillNode(pCxt, FILL_MODE_VALUE_F, createNodeListNode(pCxt, B)); }
|
||||
fill_opt(A) ::= FILL NK_LP VALUE NK_COMMA expression_list(B) NK_RP. { A = createFillNode(pCxt, FILL_MODE_VALUE, createNodeListNode(pCxt, B)); }
|
||||
fill_opt(A) ::= FILL NK_LP VALUE_F NK_COMMA expression_list(B) NK_RP. { A = createFillNode(pCxt, FILL_MODE_VALUE_F, createNodeListNode(pCxt, B)); }
|
||||
|
||||
%type fill_mode { EFillMode }
|
||||
%destructor fill_mode { }
|
||||
|
|
|
@ -1310,7 +1310,8 @@ static EDealRes translateOperator(STranslateContext* pCxt, SOperatorNode* pOp) {
|
|||
}
|
||||
|
||||
static EDealRes haveVectorFunction(SNode* pNode, void* pContext) {
|
||||
if (isAggFunc(pNode) || isIndefiniteRowsFunc(pNode) || isWindowPseudoColumnFunc(pNode) || isInterpPseudoColumnFunc(pNode)) {
|
||||
if (isAggFunc(pNode) || isIndefiniteRowsFunc(pNode) || isWindowPseudoColumnFunc(pNode) ||
|
||||
isInterpPseudoColumnFunc(pNode)) {
|
||||
*((bool*)pContext) = true;
|
||||
return DEAL_RES_END;
|
||||
}
|
||||
|
@ -1569,6 +1570,21 @@ static int32_t translateTimelineFunc(STranslateContext* pCxt, SFunctionNode* pFu
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t translateDateTimeFunc(STranslateContext* pCxt, SFunctionNode* pFunc) {
|
||||
if (!fmIsDateTimeFunc(pFunc->funcId)) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
if (!isSelectStmt(pCxt->pCurrStmt)) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
SSelectStmt* pSelect = (SSelectStmt*)pCxt->pCurrStmt;
|
||||
pFunc->node.resType.precision = pSelect->precision;
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static bool hasFillClause(SNode* pCurrStmt) {
|
||||
if (!isSelectStmt(pCurrStmt)) {
|
||||
return false;
|
||||
|
@ -1708,6 +1724,7 @@ static void setFuncClassification(SNode* pCurrStmt, SFunctionNode* pFunc) {
|
|||
SSelectStmt* pSelect = (SSelectStmt*)pCurrStmt;
|
||||
pSelect->hasAggFuncs = pSelect->hasAggFuncs ? true : fmIsAggFunc(pFunc->funcId);
|
||||
pSelect->hasRepeatScanFuncs = pSelect->hasRepeatScanFuncs ? true : fmIsRepeatScanFunc(pFunc->funcId);
|
||||
|
||||
if (fmIsIndefiniteRowsFunc(pFunc->funcId)) {
|
||||
pSelect->hasIndefiniteRowsFunc = true;
|
||||
pSelect->returnRows = fmGetFuncReturnRows(pFunc);
|
||||
|
@ -1856,6 +1873,9 @@ static int32_t translateNormalFunction(STranslateContext* pCxt, SFunctionNode* p
|
|||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = translateTimelineFunc(pCxt, pFunc);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = translateDateTimeFunc(pCxt, pFunc);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = translateBlockDistFunc(pCxt, pFunc);
|
||||
}
|
||||
|
@ -2911,6 +2931,11 @@ static int32_t convertFillValue(STranslateContext* pCxt, SDataType dt, SNodeList
|
|||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = scalarCalculateConstants(pCaseFunc, &pCell->pNode);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code && QUERY_NODE_VALUE != nodeType(pCell->pNode)) {
|
||||
code = generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Fill value is just a constant");
|
||||
} else if (TSDB_CODE_SUCCESS != code) {
|
||||
code = generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Filled data type mismatch");
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -2927,9 +2952,9 @@ static int32_t checkFillValues(STranslateContext* pCxt, SFillNode* pFill, SNodeL
|
|||
if (fillNo >= LIST_LENGTH(pFillValues->pNodeList)) {
|
||||
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Filled values number mismatch");
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS !=
|
||||
convertFillValue(pCxt, ((SExprNode*)pProject)->resType, pFillValues->pNodeList, fillNo)) {
|
||||
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Filled data type mismatch");
|
||||
int32_t code = convertFillValue(pCxt, ((SExprNode*)pProject)->resType, pFillValues->pNodeList, fillNo);
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
return code;
|
||||
}
|
||||
++fillNo;
|
||||
}
|
||||
|
@ -3010,12 +3035,13 @@ static int32_t translateSelectList(STranslateContext* pCxt, SSelectStmt* pSelect
|
|||
}
|
||||
|
||||
static int32_t translateHaving(STranslateContext* pCxt, SSelectStmt* pSelect) {
|
||||
if (NULL == pSelect->pGroupByList && NULL != pSelect->pHaving) {
|
||||
if (NULL == pSelect->pGroupByList && NULL == pSelect->pPartitionByList && NULL == pSelect->pWindow &&
|
||||
NULL != pSelect->pHaving) {
|
||||
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_GROUPBY_LACK_EXPRESSION);
|
||||
}
|
||||
pCxt->currClause = SQL_CLAUSE_HAVING;
|
||||
int32_t code = translateExpr(pCxt, &pSelect->pHaving);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
if (TSDB_CODE_SUCCESS == code && (NULL != pSelect->pGroupByList || NULL != pSelect->pWindow)) {
|
||||
code = checkExprForGroupBy(pCxt, &pSelect->pHaving);
|
||||
}
|
||||
return code;
|
||||
|
@ -5179,26 +5205,32 @@ static int32_t checkAlterSuperTableBySchema(STranslateContext* pCxt, SAlterTable
|
|||
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_ONLY_ONE_JSON_TAG);
|
||||
}
|
||||
|
||||
if (getNumOfTags(pTableMeta) == 1 && pStmt->alterType == TSDB_ALTER_TABLE_DROP_TAG) {
|
||||
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ALTER_TABLE, "the only tag cannot be dropped");
|
||||
}
|
||||
|
||||
int32_t tagsLen = 0;
|
||||
for (int32_t i = 0; i < pTableMeta->tableInfo.numOfTags; ++i) {
|
||||
tagsLen += pTagsSchema[i].bytes;
|
||||
}
|
||||
|
||||
if (TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES == pStmt->alterType ||
|
||||
TSDB_ALTER_TABLE_UPDATE_TAG_BYTES == pStmt->alterType) {
|
||||
TSDB_ALTER_TABLE_UPDATE_TAG_BYTES == pStmt->alterType || TSDB_ALTER_TABLE_DROP_COLUMN == pStmt->alterType ||
|
||||
TSDB_ALTER_TABLE_DROP_TAG == pStmt->alterType) {
|
||||
if (TSDB_SUPER_TABLE != pTableMeta->tableType) {
|
||||
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ALTER_TABLE, "Table is not super table");
|
||||
}
|
||||
|
||||
const SSchema* pSchema = getColSchema(pTableMeta, pStmt->colName);
|
||||
if (NULL == pSchema) {
|
||||
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_COLUMN, pStmt->colName);
|
||||
} else if (!IS_VAR_DATA_TYPE(pSchema->type) || pSchema->type != pStmt->dataType.type ||
|
||||
pSchema->bytes >= calcTypeBytes(pStmt->dataType)) {
|
||||
return generateSyntaxErrMsg(
|
||||
&pCxt->msgBuf,
|
||||
(TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES == pStmt->alterType || TSDB_ALTER_TABLE_DROP_COLUMN == pStmt->alterType)
|
||||
? TSDB_CODE_PAR_INVALID_COLUMN
|
||||
: TSDB_CODE_PAR_INVALID_TAG_NAME,
|
||||
pStmt->colName);
|
||||
}
|
||||
|
||||
if ((TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES == pStmt->alterType ||
|
||||
TSDB_ALTER_TABLE_UPDATE_TAG_BYTES == pStmt->alterType) &&
|
||||
(!IS_VAR_DATA_TYPE(pSchema->type) || pSchema->type != pStmt->dataType.type ||
|
||||
pSchema->bytes >= calcTypeBytes(pStmt->dataType))) {
|
||||
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_MODIFY_COL);
|
||||
}
|
||||
|
||||
|
@ -5243,6 +5275,10 @@ static int32_t checkAlterSuperTableBySchema(STranslateContext* pCxt, SAlterTable
|
|||
}
|
||||
}
|
||||
|
||||
if (getNumOfTags(pTableMeta) == 1 && pStmt->alterType == TSDB_ALTER_TABLE_DROP_TAG) {
|
||||
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ALTER_TABLE, "the only tag cannot be dropped");
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -5715,6 +5751,14 @@ static int32_t translateDropCGroup(STranslateContext* pCxt, SDropCGroupStmt* pSt
|
|||
|
||||
static int32_t translateAlterLocal(STranslateContext* pCxt, SAlterLocalStmt* pStmt) {
|
||||
// The statement is executed directly on the client without constructing a message.
|
||||
if ('\0' != pStmt->value[0]) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
char* p = strchr(pStmt->config, ' ');
|
||||
if (NULL != p) {
|
||||
*p = 0;
|
||||
strcpy(pStmt->value, p + 1);
|
||||
}
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -8293,10 +8337,6 @@ static void destoryAlterTbReq(SVAlterTbReq* pReq) {
|
|||
|
||||
static int32_t rewriteAlterTableImpl(STranslateContext* pCxt, SAlterTableStmt* pStmt, STableMeta* pTableMeta,
|
||||
SQuery* pQuery) {
|
||||
if (getNumOfTags(pTableMeta) == 1 && pStmt->alterType == TSDB_ALTER_TABLE_DROP_TAG) {
|
||||
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ALTER_TABLE, "the only tag cannot be dropped");
|
||||
}
|
||||
|
||||
if (TSDB_SUPER_TABLE == pTableMeta->tableType) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
} else if (TSDB_CHILD_TABLE != pTableMeta->tableType && TSDB_NORMAL_TABLE != pTableMeta->tableType) {
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -740,6 +740,13 @@ static int32_t createWindowLogicNodeFinalize(SLogicPlanContext* pCxt, SSelectStm
|
|||
code = createColumnByRewriteExprs(pWindow->pFuncs, &pWindow->node.pTargets);
|
||||
}
|
||||
|
||||
if (TSDB_CODE_SUCCESS == code && NULL != pSelect->pHaving) {
|
||||
pWindow->node.pConditions = nodesCloneNode(pSelect->pHaving);
|
||||
if (NULL == pWindow->node.pConditions) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
}
|
||||
|
||||
pSelect->hasAggFuncs = false;
|
||||
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
|
@ -1132,6 +1139,14 @@ static int32_t createPartitionLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pS
|
|||
}
|
||||
}
|
||||
|
||||
if (TSDB_CODE_SUCCESS == code && NULL != pSelect->pHaving && !pSelect->hasAggFuncs && NULL == pSelect->pGroupByList &&
|
||||
NULL == pSelect->pWindow) {
|
||||
pPartition->node.pConditions = nodesCloneNode(pSelect->pHaving);
|
||||
if (NULL == pPartition->node.pConditions) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
}
|
||||
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
*pLogicNode = (SLogicNode*)pPartition;
|
||||
} else {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue