Merge branch '3.0' of github.com:taosdata/TDengine into szhou/feature/user-tags
This commit is contained in:
commit
de1ca5b68f
|
@ -21,4 +21,4 @@
|
||||||
url = https://github.com/taosdata/taosadapter.git
|
url = https://github.com/taosdata/taosadapter.git
|
||||||
[submodule "tools/taosws-rs"]
|
[submodule "tools/taosws-rs"]
|
||||||
path = tools/taosws-rs
|
path = tools/taosws-rs
|
||||||
url = https://github.com/taosdata/taosws-rs.git
|
url = https://github.com/taosdata/taosws-rs
|
||||||
|
|
|
@ -6,9 +6,9 @@ title: Deployment
|
||||||
|
|
||||||
### Step 1
|
### Step 1
|
||||||
|
|
||||||
The FQDN of all hosts must be setup properly. For e.g. FQDNs may have to be configured in the /etc/hosts file on each host. You must confirm that each FQDN can be accessed from any other host. For e.g. you can do this by using the `ping` command.
|
The FQDN of all hosts must be setup properly. All FQDNs need to be configured in the /etc/hosts file on each host. You must confirm that each FQDN can be accessed from any other host, you can do this by using the `ping` command.
|
||||||
|
|
||||||
To get the hostname on any host, the command `hostname -f` can be executed. `ping <FQDN>` command can be executed on each host to check whether any other host is accessible from it. If any host is not accessible, the network configuration, like /etc/hosts or DNS configuration, needs to be checked and revised, to make any two hosts accessible to each other.
|
The command `hostname -f` can be executed to get the hostname on any host. `ping <FQDN>` command can be executed on each host to check whether any other host is accessible from it. If any host is not accessible, the network configuration, like /etc/hosts or DNS configuration, needs to be checked and revised, to make any two hosts accessible to each other.
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
|
|
||||||
|
@ -20,7 +20,7 @@ To get the hostname on any host, the command `hostname -f` can be executed. `pin
|
||||||
|
|
||||||
### Step 2
|
### Step 2
|
||||||
|
|
||||||
If any previous version of TDengine has been installed and configured on any host, the installation needs to be removed and the data needs to be cleaned up. For details about uninstalling please refer to [Install and Uninstall](/operation/pkg-install). To clean up the data, please use `rm -rf /var/lib/taos/\*` assuming the `dataDir` is configured as `/var/lib/taos`.
|
If any previous version of TDengine has been installed and configured on any host, the installation needs to be removed and the data needs to be cleaned up. For details about uninstalling please refer to [Install and Uninstall](/operation/pkg-install). To clean up the data, please use `rm -rf /var/lib/taos/\*` assuming the `dataDir` is configured as `/var/lib/taos`.
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
|
|
||||||
|
@ -54,22 +54,12 @@ serverPort 6030
|
||||||
|
|
||||||
For all the dnodes in a TDengine cluster, the below parameters must be configured exactly the same, any node whose configuration is different from dnodes already in the cluster can't join the cluster.
|
For all the dnodes in a TDengine cluster, the below parameters must be configured exactly the same, any node whose configuration is different from dnodes already in the cluster can't join the cluster.
|
||||||
|
|
||||||
| **#** | **Parameter** | **Definition** |
|
| **#** | **Parameter** | **Definition** |
|
||||||
| ----- | ------------------ | --------------------------------------------------------------------------------- |
|
| ----- | -------------- | ------------------------------------------------------------- |
|
||||||
| 1 | numOfMnodes | The number of management nodes in the cluster |
|
| 1 | statusInterval | The time interval for which dnode reports its status to mnode |
|
||||||
| 2 | mnodeEqualVnodeNum | The ratio of resource consuming of mnode to vnode |
|
| 2 | timezone | Time Zone where the server is located |
|
||||||
| 3 | offlineThreshold | The threshold of dnode offline, once it's reached the dnode is considered as down |
|
| 3 | locale | Location code of the system |
|
||||||
| 4 | statusInterval | The interval by which dnode reports its status to mnode |
|
| 4 | charset | Character set of the system |
|
||||||
| 5 | arbitrator | End point of the arbitrator component in the cluster |
|
|
||||||
| 6 | timezone | Timezone |
|
|
||||||
| 7 | balance | Enable load balance automatically |
|
|
||||||
| 8 | maxTablesPerVnode | Maximum number of tables that can be created in each vnode |
|
|
||||||
| 9 | maxVgroupsPerDb | Maximum number vgroups that can be used by each DB |
|
|
||||||
|
|
||||||
:::note
|
|
||||||
Prior to version 2.0.19.0, besides the above parameters, `locale` and `charset` must also be configured the same for each dnode.
|
|
||||||
|
|
||||||
:::
|
|
||||||
|
|
||||||
## Start Cluster
|
## Start Cluster
|
||||||
|
|
||||||
|
@ -77,19 +67,19 @@ In the following example we assume that first dnode has FQDN h1.taosdata.com and
|
||||||
|
|
||||||
### Start The First DNODE
|
### Start The First DNODE
|
||||||
|
|
||||||
The first dnode can be started following the instructions in [Get Started](/get-started/). Then TDengine CLI `taos` can be launched to execute command `show dnodes`, the output is as following for example:
|
Start the first dnode following the instructions in [Get Started](/get-started/). Then launch TDengine CLI `taos` and execute command `show dnodes`, the output is as following for example:
|
||||||
|
|
||||||
```
|
```
|
||||||
Welcome to the TDengine shell from Linux, Client Version:2.0.0.0
|
Welcome to the TDengine shell from Linux, Client Version:3.0.0.0
|
||||||
|
Copyright (c) 2022 by TAOS Data, Inc. All rights reserved.
|
||||||
|
|
||||||
|
Server is Enterprise trial Edition, ver:3.0.0.0 and will never expire.
|
||||||
Copyright (c) 2017 by TAOS Data, Inc. All rights reserved.
|
|
||||||
|
|
||||||
taos> show dnodes;
|
taos> show dnodes;
|
||||||
id | end_point | vnodes | cores | status | role | create_time |
|
id | endpoint | vnodes | support_vnodes | status | create_time | note |
|
||||||
=====================================================================================
|
============================================================================================================================================
|
||||||
1 | h1.taosdata.com:6030 | 0 | 2 | ready | any | 2020-07-31 03:49:29.202 |
|
1 | h1.taosdata.com:6030 | 0 | 1024 | ready | 2022-07-16 10:50:42.673 | |
|
||||||
Query OK, 1 row(s) in set (0.006385s)
|
Query OK, 1 rows affected (0.007984s)
|
||||||
|
|
||||||
taos>
|
taos>
|
||||||
```
|
```
|
||||||
|
@ -100,7 +90,7 @@ From the above output, it is shown that the end point of the started dnode is "h
|
||||||
|
|
||||||
There are a few steps necessary to add other dnodes in the cluster.
|
There are a few steps necessary to add other dnodes in the cluster.
|
||||||
|
|
||||||
Let's assume we are starting the second dnode with FQDN, h2.taosdata.com. First we make sure the configuration is correct.
|
Let's assume we are starting the second dnode with FQDN, h2.taosdata.com. Firstly we make sure the configuration is correct.
|
||||||
|
|
||||||
```c
|
```c
|
||||||
// firstEp is the end point to connect to when any dnode starts
|
// firstEp is the end point to connect to when any dnode starts
|
||||||
|
@ -114,7 +104,7 @@ serverPort 6030
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Second, we can start `taosd` as instructed in [Get Started](/get-started/).
|
Secondly, we can start `taosd` as instructed in [Get Started](/get-started/).
|
||||||
|
|
||||||
Then, on the first dnode i.e. h1.taosdata.com in our example, use TDengine CLI `taos` to execute the following command to add the end point of the dnode in the cluster. In the command "fqdn:port" should be quoted using double quotes.
|
Then, on the first dnode i.e. h1.taosdata.com in our example, use TDengine CLI `taos` to execute the following command to add the end point of the dnode in the cluster. In the command "fqdn:port" should be quoted using double quotes.
|
||||||
|
|
||||||
|
|
|
@ -39,31 +39,25 @@ USE SOME_DATABASE;
|
||||||
SHOW VGROUPS;
|
SHOW VGROUPS;
|
||||||
```
|
```
|
||||||
|
|
||||||
The example output is below:
|
The output is like below:
|
||||||
|
|
||||||
```
|
|
||||||
taos> show dnodes;
|
|
||||||
id | end_point | vnodes | cores | status | role | create_time | offline reason |
|
|
||||||
======================================================================================================================================
|
|
||||||
1 | localhost:6030 | 9 | 8 | ready | any | 2022-04-15 08:27:09.359 | |
|
|
||||||
Query OK, 1 row(s) in set (0.008298s)
|
|
||||||
|
|
||||||
taos> use db;
|
taos> use db;
|
||||||
Database changed.
|
Database changed.
|
||||||
|
|
||||||
taos> show vgroups;
|
taos> show vgroups;
|
||||||
vgId | tables | status | onlines | v1_dnode | v1_status | compacting |
|
vgId | tables | status | onlines | v1_dnode | v1_status | compacting |
|
||||||
==========================================================================================
|
==========================================================================================
|
||||||
14 | 38000 | ready | 1 | 1 | leader | 0 |
|
14 | 38000 | ready | 1 | 1 | leader | 0 |
|
||||||
15 | 38000 | ready | 1 | 1 | leader | 0 |
|
15 | 38000 | ready | 1 | 1 | leader | 0 |
|
||||||
16 | 38000 | ready | 1 | 1 | leader | 0 |
|
16 | 38000 | ready | 1 | 1 | leader | 0 |
|
||||||
17 | 38000 | ready | 1 | 1 | leader | 0 |
|
17 | 38000 | ready | 1 | 1 | leader | 0 |
|
||||||
18 | 37001 | ready | 1 | 1 | leader | 0 |
|
18 | 37001 | ready | 1 | 1 | leader | 0 |
|
||||||
19 | 37000 | ready | 1 | 1 | leader | 0 |
|
19 | 37000 | ready | 1 | 1 | leader | 0 |
|
||||||
20 | 37000 | ready | 1 | 1 | leader | 0 |
|
20 | 37000 | ready | 1 | 1 | leader | 0 |
|
||||||
21 | 37000 | ready | 1 | 1 | leader | 0 |
|
21 | 37000 | ready | 1 | 1 | leader | 0 |
|
||||||
Query OK, 8 row(s) in set (0.001154s)
|
Query OK, 8 row(s) in set (0.001154s)
|
||||||
```
|
|
||||||
|
````
|
||||||
|
|
||||||
## Add DNODE
|
## Add DNODE
|
||||||
|
|
||||||
|
@ -71,7 +65,7 @@ Launch TDengine CLI `taos` and execute the command below to add the end point of
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
CREATE DNODE "fqdn:port";
|
CREATE DNODE "fqdn:port";
|
||||||
```
|
````
|
||||||
|
|
||||||
The example output is as below:
|
The example output is as below:
|
||||||
|
|
||||||
|
@ -142,72 +136,3 @@ In the above example, when `show dnodes` is executed the first time, two dnodes
|
||||||
- dnodeID is allocated automatically and can't be manually modified. dnodeID is generated in ascending order without duplication.
|
- dnodeID is allocated automatically and can't be manually modified. dnodeID is generated in ascending order without duplication.
|
||||||
|
|
||||||
:::
|
:::
|
||||||
|
|
||||||
## Move VNODE
|
|
||||||
|
|
||||||
A vnode can be manually moved from one dnode to another.
|
|
||||||
|
|
||||||
Launch TDengine CLI `taos` and execute below command:
|
|
||||||
|
|
||||||
```sql
|
|
||||||
ALTER DNODE <source-dnodeId> BALANCE "VNODE:<vgId>-DNODE:<dest-dnodeId>";
|
|
||||||
```
|
|
||||||
|
|
||||||
In the above command, `source-dnodeId` is the original dnodeId where the vnode resides, `dest-dnodeId` specifies the target dnode. vgId (vgroup ID) can be shown by `SHOW VGROUPS `.
|
|
||||||
|
|
||||||
First `show vgroups` is executed to show the vgroup distribution.
|
|
||||||
|
|
||||||
```
|
|
||||||
taos> show vgroups;
|
|
||||||
vgId | tables | status | onlines | v1_dnode | v1_status | compacting |
|
|
||||||
==========================================================================================
|
|
||||||
14 | 38000 | ready | 1 | 3 | leader | 0 |
|
|
||||||
15 | 38000 | ready | 1 | 3 | leader | 0 |
|
|
||||||
16 | 38000 | ready | 1 | 3 | leader | 0 |
|
|
||||||
17 | 38000 | ready | 1 | 3 | leader | 0 |
|
|
||||||
18 | 37001 | ready | 1 | 3 | leader | 0 |
|
|
||||||
19 | 37000 | ready | 1 | 1 | leader | 0 |
|
|
||||||
20 | 37000 | ready | 1 | 1 | leader | 0 |
|
|
||||||
21 | 37000 | ready | 1 | 1 | leader | 0 |
|
|
||||||
Query OK, 8 row(s) in set (0.001314s)
|
|
||||||
```
|
|
||||||
|
|
||||||
It can be seen that there are 5 vgroups in dnode 3 and 3 vgroups in node 1, now we want to move vgId 18 from dnode 3 to dnode 1. Execute the below command in `taos`
|
|
||||||
|
|
||||||
```
|
|
||||||
taos> alter dnode 3 balance "vnode:18-dnode:1";
|
|
||||||
|
|
||||||
DB error: Balance already enabled (0.00755
|
|
||||||
```
|
|
||||||
|
|
||||||
However, the operation fails with error message show above, which means automatic load balancing has been enabled in the current database so manual load balance can't be performed.
|
|
||||||
|
|
||||||
Shutdown the cluster, configure `balance` parameter in all the dnodes to 0, then restart the cluster, and execute `alter dnode` and `show vgroups` as below.
|
|
||||||
|
|
||||||
```
|
|
||||||
taos> alter dnode 3 balance "vnode:18-dnode:1";
|
|
||||||
Query OK, 0 row(s) in set (0.000575s)
|
|
||||||
|
|
||||||
taos> show vgroups;
|
|
||||||
vgId | tables | status | onlines | v1_dnode | v1_status | v2_dnode | v2_status | compacting |
|
|
||||||
=================================================================================================================
|
|
||||||
14 | 38000 | ready | 1 | 3 | leader | 0 | NULL | 0 |
|
|
||||||
15 | 38000 | ready | 1 | 3 | leader | 0 | NULL | 0 |
|
|
||||||
16 | 38000 | ready | 1 | 3 | leader | 0 | NULL | 0 |
|
|
||||||
17 | 38000 | ready | 1 | 3 | leader | 0 | NULL | 0 |
|
|
||||||
18 | 37001 | ready | 2 | 1 | follower | 3 | leader | 0 |
|
|
||||||
19 | 37000 | ready | 1 | 1 | leader | 0 | NULL | 0 |
|
|
||||||
20 | 37000 | ready | 1 | 1 | leader | 0 | NULL | 0 |
|
|
||||||
21 | 37000 | ready | 1 | 1 | leader | 0 | NULL | 0 |
|
|
||||||
Query OK, 8 row(s) in set (0.001242s)
|
|
||||||
```
|
|
||||||
|
|
||||||
It can be seen from above output that vgId 18 has been moved from dnode 3 to dnode 1.
|
|
||||||
|
|
||||||
:::note
|
|
||||||
|
|
||||||
- Manual load balancing can only be performed when the automatic load balancing is disabled, i.e. `balance` is set to 0.
|
|
||||||
- Only a vnode in normal state, i.e. leader or follower, can be moved. vnode can't be moved when its in status offline, unsynced or syncing.
|
|
||||||
- Before moving a vnode, it's necessary to make sure the target dnode has enough resources: CPU, memory and disk.
|
|
||||||
|
|
||||||
:::
|
|
||||||
|
|
|
@ -1,81 +0,0 @@
|
||||||
---
|
|
||||||
sidebar_label: HA & LB
|
|
||||||
title: High Availability and Load Balancing
|
|
||||||
---
|
|
||||||
|
|
||||||
## High Availability of Vnode
|
|
||||||
|
|
||||||
High availability of vnode and mnode can be achieved through replicas in TDengine.
|
|
||||||
|
|
||||||
A TDengine cluster can have multiple databases. Each database has a number of vnodes associated with it. A different number of replicas can be configured for each DB. When creating a database, the parameter `replica` is used to specify the number of replicas. The default value for `replica` is 1. Naturally, a single replica cannot guarantee high availability since if one node is down, the data service is unavailable. Note that the number of dnodes in the cluster must NOT be lower than the number of replicas set for any DB, otherwise the `create table` operation will fail with error "more dnodes are needed". The SQL statement below is used to create a database named "demo" with 3 replicas.
|
|
||||||
|
|
||||||
```sql
|
|
||||||
CREATE DATABASE demo replica 3;
|
|
||||||
```
|
|
||||||
|
|
||||||
The data in a DB is divided into multiple shards and stored in multiple vgroups. The number of vnodes in each vgroup is determined by the number of replicas set for the DB. The vnodes in each vgroup store exactly the same data. For the purpose of high availability, the vnodes in a vgroup must be located in different dnodes on different hosts. As long as over half of the vnodes in a vgroup are in an online state, the vgroup is able to provide data access. Otherwise the vgroup can't provide data access for reading or inserting data.
|
|
||||||
|
|
||||||
There may be data for multiple DBs in a dnode. When a dnode is down, multiple DBs may be affected. While in theory, the cluster will provide data access for reading or inserting data if over half the vnodes in vgroups are online, because of the possibly complex mapping between vnodes and dnodes, it is difficult to guarantee that the cluster will work properly if over half of the dnodes are online.
|
|
||||||
|
|
||||||
## High Availability of Mnode
|
|
||||||
|
|
||||||
Each TDengine cluster is managed by `mnode`, which is a module of `taosd`. For the high availability of mnode, multiple mnodes can be configured using system parameter `numOfMNodes`. The valid range for `numOfMnodes` is [1,3]. To ensure data consistency between mnodes, data replication between mnodes is performed synchronously.
|
|
||||||
|
|
||||||
There may be multiple dnodes in a cluster, but only one mnode can be started in each dnode. Which one or ones of the dnodes will be designated as mnodes is automatically determined by TDengine according to the cluster configuration and system resources. The command `show mnodes` can be executed in TDengine `taos` to show the mnodes in the cluster.
|
|
||||||
|
|
||||||
```sql
|
|
||||||
SHOW MNODES;
|
|
||||||
```
|
|
||||||
|
|
||||||
The end point and role/status (leader, follower, unsynced, or offline) of all mnodes can be shown by the above command. When the first dnode is started in a cluster, there must be one mnode in this dnode. Without at least one mnode, the cluster cannot work. If `numOfMNodes` is configured to 2, another mnode will be started when the second dnode is launched.
|
|
||||||
|
|
||||||
For the high availability of mnode, `numOfMnodes` needs to be configured to 2 or a higher value. Because the data consistency between mnodes must be guaranteed, the replica confirmation parameter `quorum` is set to 2 automatically if `numOfMNodes` is set to 2 or higher.
|
|
||||||
|
|
||||||
:::note
|
|
||||||
If high availability is important for your system, both vnode and mnode must be configured to have multiple replicas.
|
|
||||||
|
|
||||||
:::
|
|
||||||
|
|
||||||
## Load Balancing
|
|
||||||
|
|
||||||
Load balancing will be triggered in 3 cases without manual intervention.
|
|
||||||
|
|
||||||
- When a new dnode joins the cluster, automatic load balancing may be triggered. Some data from other dnodes may be transferred to the new dnode automatically.
|
|
||||||
- When a dnode is removed from the cluster, the data from this dnode will be transferred to other dnodes automatically.
|
|
||||||
- When a dnode is too hot, i.e. too much data has been stored in it, automatic load balancing may be triggered to migrate some vnodes from this dnode to other dnodes.
|
|
||||||
|
|
||||||
:::tip
|
|
||||||
Automatic load balancing is controlled by the parameter `balance`, 0 means disabled and 1 means enabled. This is set in the file [taos.cfg](https://docs.tdengine.com/reference/config/#balance).
|
|
||||||
|
|
||||||
:::
|
|
||||||
|
|
||||||
## Dnode Offline
|
|
||||||
|
|
||||||
When a dnode is offline, it can be detected by the TDengine cluster. There are two cases:
|
|
||||||
|
|
||||||
- The dnode comes online before the threshold configured in `offlineThreshold` is reached. The dnode is still in the cluster and data replication is started automatically. The dnode can work properly after the data sync is finished.
|
|
||||||
|
|
||||||
- If the dnode has been offline over the threshold configured in `offlineThreshold` in `taos.cfg`, the dnode will be removed from the cluster automatically. A system alert will be generated and automatic load balancing will be triggered if `balance` is set to 1. When the removed dnode is restarted and becomes online, it will not join the cluster automatically. The system administrator has to manually join the dnode to the cluster.
|
|
||||||
|
|
||||||
:::note
|
|
||||||
If all the vnodes in a vgroup (or mnodes in mnode group) are in offline or unsynced status, the leader node can only be voted on, after all the vnodes or mnodes in the group become online and can exchange status. Following this, the vgroup (or mnode group) is able to provide service.
|
|
||||||
|
|
||||||
:::
|
|
||||||
|
|
||||||
## Arbitrator
|
|
||||||
|
|
||||||
The "arbitrator" component is used to address the special case when the number of replicas is set to an even number like 2,4 etc. If half of the vnodes in a vgroup don't work, it is impossible to vote and select a leader node. This situation also applies to mnodes if the number of mnodes is set to an even number like 2,4 etc.
|
|
||||||
|
|
||||||
To resolve this problem, a new arbitrator component named `tarbitrator`, an abbreviation of TDengine Arbitrator, was introduced. The `tarbitrator` simulates a vnode or mnode but it's only responsible for network communication and doesn't handle any actual data access. As long as more than half of the vnode or mnode, including Arbitrator, are available the vnode group or mnode group can provide data insertion or query services normally.
|
|
||||||
|
|
||||||
Normally, it's prudent to configure the replica number for each DB or system parameter `numOfMNodes` to be an odd number. However, if a user is very sensitive to storage space, a replica number of 2 plus arbitrator component can be used to achieve both lower cost of storage space and high availability.
|
|
||||||
|
|
||||||
Arbitrator component is installed with the server package. For details about how to install, please refer to [Install](/operation/pkg-install). The `-p` parameter of `tarbitrator` can be used to specify the port on which it provides service.
|
|
||||||
|
|
||||||
In the configuration file `taos.cfg` of each dnode, parameter `arbitrator` needs to be configured to the end point of the `tarbitrator` process. Arbitrator component will be used automatically if the replica is configured to an even number and will be ignored if the replica is configured to an odd number.
|
|
||||||
|
|
||||||
Arbitrator can be shown by executing command in TDengine CLI `taos` with its role shown as "arb".
|
|
||||||
|
|
||||||
```sql
|
|
||||||
SHOW DNODES;
|
|
||||||
```
|
|
|
@ -0,0 +1,30 @@
|
||||||
|
---
|
||||||
|
sidebar_label: High Availability
|
||||||
|
title: High Availability
|
||||||
|
---
|
||||||
|
|
||||||
|
## High Availability of Vnode
|
||||||
|
|
||||||
|
High availability of vnode can be achieved through replicas in TDengine.
|
||||||
|
|
||||||
|
A TDengine cluster can have multiple databases. Each database has a number of vnodes associated with it. A different number of replicas can be configured for each DB. When creating a database, the parameter `replica` is used to specify the number of replicas. The default value for `replica` is 1. Naturally, a single replica cannot guarantee high availability since if one node is down, the data service is unavailable. Note that the number of dnodes in the cluster must NOT be lower than the number of replicas set for any DB, otherwise the `create table` operation will fail with error "more dnodes are needed". The SQL statement below is used to create a database named "demo" with 3 replicas.
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE DATABASE demo replica 3;
|
||||||
|
```
|
||||||
|
|
||||||
|
The data in a DB is divided into multiple shards and stored in multiple vgroups. The number of vnodes in each vgroup is determined by the number of replicas set for the DB. The vnodes in each vgroup store exactly the same data. For the purpose of high availability, the vnodes in a vgroup must be located in different dnodes on different hosts. As long as over half of the vnodes in a vgroup are in an online state, the vgroup is able to provide data access. Otherwise the vgroup can't provide data access for reading or inserting data.
|
||||||
|
|
||||||
|
There may be data for multiple DBs in a dnode. When a dnode is down, multiple DBs may be affected. While in theory, the cluster will provide data access for reading or inserting data if over half the vnodes in vgroups are online, because of the possibly complex mapping between vnodes and dnodes, it is difficult to guarantee that the cluster will work properly if over half of the dnodes are online.
|
||||||
|
|
||||||
|
## High Availability of Mnode
|
||||||
|
|
||||||
|
Each TDengine cluster is managed by `mnode`, which is a module of `taosd`. For the high availability of mnode, multiple mnodes can be configured in the system. When a TDengine cluster is started from scratch, there is only one `mnode`, then you can use command `create mnode` to and start corresponding dnode to add more mnodes.
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SHOW MNODES;
|
||||||
|
```
|
||||||
|
|
||||||
|
The end point and role/status (leader, follower, candidate, offline) of all mnodes can be shown by the above command. When the first dnode is started in a cluster, there must be one mnode in this dnode. Without at least one mnode, the cluster cannot work.
|
||||||
|
|
||||||
|
From TDengine 3.0.0, RAFT procotol is used to guarantee the high availability, so the number of mnodes is should be 1 or 3.
|
|
@ -0,0 +1,20 @@
|
||||||
|
---
|
||||||
|
sidebar_label: Load Balance
|
||||||
|
title: Load Balance
|
||||||
|
---
|
||||||
|
|
||||||
|
The load balance in TDengine is mainly about processing data series data. TDengine employes builtin hash algorithm to distribute all the tables, sub-tables and their data of a database across all the vgroups that belongs to the database. Each table or sub-table can only be handled by a single vgroup, while each vgroup can process multiple table or sub-table.
|
||||||
|
|
||||||
|
The number of vgroup can be specified when creating a database, using the parameter `vgroups`.
|
||||||
|
|
||||||
|
```sql
|
||||||
|
create database db0 vgroups 100;
|
||||||
|
```
|
||||||
|
|
||||||
|
The proper value of `vgroups` depends on available system resources. Assuming there is only one database to be created in the system, then the number of `vgroups` is determined by the available resources from all dnodes. In principle more vgroups can be created if you have more CPU and memory. Disk I/O is another important factor to consider. Once the bottleneck shows on disk I/O, more vgroups may downgrad the system performance significantly. If multiple databases are to be created in the system, then the total number of `vroups` of all the databases are dependent on the available system resources. It needs to be careful to distribute vgroups among these databases, you need to consider the number of tables, data writing frequency, size of each data row for all these databases. A recommended practice is to firstly choose a starting number for `vgroups`, for example double of the number of CPU cores, then try to adjust and optimize system configurations to find the best setting for `vgroups`, then distribute these vgroups among databases.
|
||||||
|
|
||||||
|
Furthermode, TDengine distributes the vgroups of each database equally among all dnodes. In case of replica 3, the distrubtion is even more complex, TDengine tries its best to prevent any dnode from becoming a bottleneck.
|
||||||
|
|
||||||
|
TDegnine utilizes the above ways to achieve load balance in a cluster, and finally achieve higher throughput.
|
||||||
|
|
||||||
|
Once the load balance is achieved, after some operations like deleting tables or droping databases, the load across all dnodes may become inbalanced, the method of rebalance will be provided in later versions. However, even without explicit rebalancing, TDengine will try its best to achieve new balance without manual interfering when a new database is created.
|
|
@ -10,7 +10,7 @@ title: 集群部署
|
||||||
|
|
||||||
### 第一步
|
### 第一步
|
||||||
|
|
||||||
如果搭建集群的物理节点中,存有之前的测试数据、装过 1.X 的版本,或者装过其他版本的 TDengine,请先将其删除,并清空所有数据(如果需要保留原有数据,请联系涛思交付团队进行旧版本升级、数据迁移),具体步骤请参考博客[《TDengine 多种安装包的安装和卸载》](https://www.taosdata.com/blog/2019/08/09/566.html)。
|
如果搭建集群的物理节点中,存有之前的测试数据,或者装过其他版本的 TDengine,请先将其删除,并清空所有数据(如果需要保留原有数据,请联系涛思交付团队进行旧版本升级、数据迁移),具体步骤请参考博客[《TDengine 多种安装包的安装和卸载》](https://www.taosdata.com/blog/2019/08/09/566.html)。
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
因为 FQDN 的信息会写进文件,如果之前没有配置或者更改 FQDN,且启动了 TDengine。请一定在确保数据无用或者备份的前提下,清理一下之前的数据(rm -rf /var/lib/taos/\*);
|
因为 FQDN 的信息会写进文件,如果之前没有配置或者更改 FQDN,且启动了 TDengine。请一定在确保数据无用或者备份的前提下,清理一下之前的数据(rm -rf /var/lib/taos/\*);
|
||||||
|
@ -54,30 +54,16 @@ fqdn h1.taosdata.com
|
||||||
// 配置本数据节点的端口号,缺省是 6030
|
// 配置本数据节点的端口号,缺省是 6030
|
||||||
serverPort 6030
|
serverPort 6030
|
||||||
|
|
||||||
// 副本数为偶数的时候,需要配置,请参考《Arbitrator 的使用》的部分
|
|
||||||
arbitrator ha.taosdata.com:6042
|
|
||||||
```
|
|
||||||
|
|
||||||
一定要修改的参数是 firstEp 和 fqdn。在每个数据节点,firstEp 需全部配置成一样,但 fqdn 一定要配置成其所在数据节点的值。其他参数可不做任何修改,除非你很清楚为什么要修改。
|
一定要修改的参数是 firstEp 和 fqdn。在每个数据节点,firstEp 需全部配置成一样,但 fqdn 一定要配置成其所在数据节点的值。其他参数可不做任何修改,除非你很清楚为什么要修改。
|
||||||
|
|
||||||
加入到集群中的数据节点 dnode,涉及集群相关的下表 9 项参数必须完全相同,否则不能成功加入到集群中。
|
加入到集群中的数据节点 dnode,下表中涉及集群相关的参数必须完全相同,否则不能成功加入到集群中。
|
||||||
|
|
||||||
| **#** | **配置参数名称** | **含义** |
|
| **#** | **配置参数名称** | **含义** |
|
||||||
| ----- | ------------------ | ------------------------------------------- |
|
| ----- | ------------------ | ------------------------------------------- |
|
||||||
| 1 | numOfMnodes | 系统中管理节点个数 |
|
| 1 | statusInterval | dnode 向 mnode 报告状态时长 |
|
||||||
| 2 | mnodeEqualVnodeNum | 一个 mnode 等同于 vnode 消耗的个数 |
|
| 2 | timezone | 时区 |
|
||||||
| 3 | offlineThreshold | dnode 离线阈值,超过该时间将导致 Dnode 离线 |
|
| 3 | locale | 系统区位信息及编码格式 |
|
||||||
| 4 | statusInterval | dnode 向 mnode 报告状态时长 |
|
| 4 | charset | 字符集编码 |
|
||||||
| 5 | arbitrator | 系统中裁决器的 End Point |
|
|
||||||
| 6 | timezone | 时区 |
|
|
||||||
| 7 | balance | 是否启动负载均衡 |
|
|
||||||
| 8 | maxTablesPerVnode | 每个 vnode 中能够创建的最大表个数 |
|
|
||||||
| 9 | maxVgroupsPerDb | 每个 DB 中能够使用的最大 vgroup 个数 |
|
|
||||||
|
|
||||||
:::note
|
|
||||||
在 2.0.19.0 及更早的版本中,除以上 9 项参数外,dnode 加入集群时,还会要求 locale 和 charset 参数的取值也一致。
|
|
||||||
|
|
||||||
:::
|
|
||||||
|
|
||||||
## 启动集群
|
## 启动集群
|
||||||
|
|
||||||
|
@ -86,19 +72,22 @@ arbitrator ha.taosdata.com:6042
|
||||||
按照《立即开始》里的步骤,启动第一个数据节点,例如 h1.taosdata.com,然后执行 taos,启动 taos shell,从 shell 里执行命令“SHOW DNODES”,如下所示:
|
按照《立即开始》里的步骤,启动第一个数据节点,例如 h1.taosdata.com,然后执行 taos,启动 taos shell,从 shell 里执行命令“SHOW DNODES”,如下所示:
|
||||||
|
|
||||||
```
|
```
|
||||||
Welcome to the TDengine shell from Linux, Client Version:2.0.0.0
|
Welcome to the TDengine shell from Linux, Client Version:3.0.0.0
|
||||||
|
Copyright (c) 2022 by TAOS Data, Inc. All rights reserved.
|
||||||
|
|
||||||
|
Server is Enterprise trial Edition, ver:3.0.0.0 and will never expire.
|
||||||
Copyright (c) 2017 by TAOS Data, Inc. All rights reserved.
|
|
||||||
|
|
||||||
taos> show dnodes;
|
taos> show dnodes;
|
||||||
id | end_point | vnodes | cores | status | role | create_time |
|
id | endpoint | vnodes | support_vnodes | status | create_time | note |
|
||||||
=====================================================================================
|
============================================================================================================================================
|
||||||
1 | h1.taos.com:6030 | 0 | 2 | ready | any | 2020-07-31 03:49:29.202 |
|
1 | h1.taosdata.com:6030 | 0 | 1024 | ready | 2022-07-16 10:50:42.673 | |
|
||||||
Query OK, 1 row(s) in set (0.006385s)
|
Query OK, 1 rows affected (0.007984s)
|
||||||
|
|
||||||
taos>
|
taos>
|
||||||
```
|
|
||||||
|
taos>
|
||||||
|
|
||||||
|
````
|
||||||
|
|
||||||
上述命令里,可以看到刚启动的数据节点的 End Point 是:h1.taos.com:6030,就是这个新集群的 firstEp。
|
上述命令里,可以看到刚启动的数据节点的 End Point 是:h1.taos.com:6030,就是这个新集群的 firstEp。
|
||||||
|
|
||||||
|
@ -112,7 +101,7 @@ taos>
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
CREATE DNODE "h2.taos.com:6030";
|
CREATE DNODE "h2.taos.com:6030";
|
||||||
```
|
````
|
||||||
|
|
||||||
将新数据节点的 End Point(准备工作中第四步获知的)添加进集群的 EP 列表。“fqdn:port”需要用双引号引起来,否则出错。请注意将示例的“h2.taos.com:6030” 替换为这个新数据节点的 End Point。
|
将新数据节点的 End Point(准备工作中第四步获知的)添加进集群的 EP 列表。“fqdn:port”需要用双引号引起来,否则出错。请注意将示例的“h2.taos.com:6030” 替换为这个新数据节点的 End Point。
|
||||||
|
|
||||||
|
|
|
@ -24,15 +24,15 @@ SHOW DNODES;
|
||||||
|
|
||||||
```
|
```
|
||||||
taos> show dnodes;
|
taos> show dnodes;
|
||||||
id | end_point | vnodes | cores | status | role | create_time | offline reason |
|
id | endpoint | vnodes | support_vnodes | status | create_time | note |
|
||||||
======================================================================================================================================
|
============================================================================================================================================
|
||||||
1 | localhost:6030 | 9 | 8 | ready | any | 2022-04-15 08:27:09.359 | |
|
1 | trd01:6030 | 100 | 1024 | ready | 2022-07-15 16:47:47.726 | |
|
||||||
Query OK, 1 row(s) in set (0.008298s)
|
Query OK, 1 rows affected (0.006684s)
|
||||||
```
|
```
|
||||||
|
|
||||||
## 查看虚拟节点组
|
## 查看虚拟节点组
|
||||||
|
|
||||||
为充分利用多核技术,并提供 scalability,数据需要分片处理。因此 TDengine 会将一个 DB 的数据切分成多份,存放在多个 vnode 里。这些 vnode 可能分布在多个数据节点 dnode 里,这样就实现了水平扩展。一个 vnode 仅仅属于一个 DB,但一个 DB 可以有多个 vnode。vnode 所在的数据节点是 mnode 根据当前系统资源的情况,自动进行分配的,无需任何人工干预。
|
为充分利用多核技术,并提供横向扩展能力,数据需要分片处理。因此 TDengine 会将一个 DB 的数据切分成多份,存放在多个 vnode 里。这些 vnode 可能分布在多个数据节点 dnode 里,这样就实现了水平扩展。一个 vnode 仅仅属于一个 DB,但一个 DB 可以有多个 vnode。vnode 所在的数据节点是 mnode 根据当前系统资源的情况,自动进行分配的,无需任何人工干预。
|
||||||
|
|
||||||
启动 CLI 程序 taos,然后执行:
|
启动 CLI 程序 taos,然后执行:
|
||||||
|
|
||||||
|
@ -44,26 +44,15 @@ SHOW VGROUPS;
|
||||||
输出如下(具体内容仅供参考,取决于实际的集群配置)
|
输出如下(具体内容仅供参考,取决于实际的集群配置)
|
||||||
|
|
||||||
```
|
```
|
||||||
taos> show dnodes;
|
|
||||||
id | end_point | vnodes | cores | status | role | create_time | offline reason |
|
|
||||||
======================================================================================================================================
|
|
||||||
1 | localhost:6030 | 9 | 8 | ready | any | 2022-04-15 08:27:09.359 | |
|
|
||||||
Query OK, 1 row(s) in set (0.008298s)
|
|
||||||
|
|
||||||
taos> use db;
|
taos> use db;
|
||||||
Database changed.
|
Database changed.
|
||||||
|
|
||||||
taos> show vgroups;
|
taos> show vgroups;
|
||||||
vgId | tables | status | onlines | v1_dnode | v1_status | compacting |
|
vgroup_id | db_name | tables | v1_dnode | v1_status | v2_dnode | v2_status | v3_dnode | v3_status | status | nfiles | file_size | tsma |
|
||||||
==========================================================================================
|
================================================================================================================================================================================================
|
||||||
14 | 38000 | ready | 1 | 1 | master | 0 |
|
2 | db | 0 | 1 | leader | NULL | NULL | NULL | NULL | NULL | NULL | NULL | 0 |
|
||||||
15 | 38000 | ready | 1 | 1 | master | 0 |
|
3 | db | 0 | 1 | leader | NULL | NULL | NULL | NULL | NULL | NULL | NULL | 0 |
|
||||||
16 | 38000 | ready | 1 | 1 | master | 0 |
|
4 | db | 0 | 1 | leader | NULL | NULL | NULL | NULL | NULL | NULL | NULL | 0 |
|
||||||
17 | 38000 | ready | 1 | 1 | master | 0 |
|
|
||||||
18 | 37001 | ready | 1 | 1 | master | 0 |
|
|
||||||
19 | 37000 | ready | 1 | 1 | master | 0 |
|
|
||||||
20 | 37000 | ready | 1 | 1 | master | 0 |
|
|
||||||
21 | 37000 | ready | 1 | 1 | master | 0 |
|
|
||||||
Query OK, 8 row(s) in set (0.001154s)
|
Query OK, 8 row(s) in set (0.001154s)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -77,67 +66,35 @@ CREATE DNODE "fqdn:port";
|
||||||
|
|
||||||
将新数据节点的 End Point 添加进集群的 EP 列表。“fqdn:port“需要用双引号引起来,否则出错。一个数据节点对外服务的 fqdn 和 port 可以通过配置文件 taos.cfg 进行配置,缺省是自动获取。【强烈不建议用自动获取方式来配置 FQDN,可能导致生成的数据节点的 End Point 不是所期望的】
|
将新数据节点的 End Point 添加进集群的 EP 列表。“fqdn:port“需要用双引号引起来,否则出错。一个数据节点对外服务的 fqdn 和 port 可以通过配置文件 taos.cfg 进行配置,缺省是自动获取。【强烈不建议用自动获取方式来配置 FQDN,可能导致生成的数据节点的 End Point 不是所期望的】
|
||||||
|
|
||||||
示例如下:
|
然后启动新加入的数据节点的 taosd 进程,再通过 taos 查看数据节点状态:
|
||||||
```
|
|
||||||
taos> create dnode "localhost:7030";
|
|
||||||
Query OK, 0 of 0 row(s) in database (0.008203s)
|
|
||||||
|
|
||||||
taos> show dnodes;
|
|
||||||
id | end_point | vnodes | cores | status | role | create_time | offline reason |
|
|
||||||
======================================================================================================================================
|
|
||||||
1 | localhost:6030 | 9 | 8 | ready | any | 2022-04-15 08:27:09.359 | |
|
|
||||||
2 | localhost:7030 | 0 | 0 | offline | any | 2022-04-19 08:11:42.158 | status not received |
|
|
||||||
Query OK, 2 row(s) in set (0.001017s)
|
|
||||||
```
|
|
||||||
|
|
||||||
在上面的示例中可以看到新创建的 dnode 的状态为 offline,待该 dnode 被启动并连接上配置文件中指定的 firstEp后再次查看,得到如下结果(示例)
|
|
||||||
|
|
||||||
```
|
```
|
||||||
taos> show dnodes;
|
taos> show dnodes;
|
||||||
id | end_point | vnodes | cores | status | role | create_time | offline reason |
|
id | endpoint | vnodes | support_vnodes | status | create_time | note |
|
||||||
======================================================================================================================================
|
============================================================================================================================================
|
||||||
1 | localhost:6030 | 3 | 8 | ready | any | 2022-04-15 08:27:09.359 | |
|
1 | localhost:6030 | 100 | 1024 | ready | 2022-07-15 16:47:47.726 | |
|
||||||
2 | localhost:7030 | 6 | 8 | ready | any | 2022-04-19 08:14:59.165 | |
|
2 | localhost:7030 | 0 | 1024 | ready | 2022-07-15 16:56:13.670 | |
|
||||||
Query OK, 2 row(s) in set (0.001316s)
|
Query OK, 2 rows affected (0.007031s)
|
||||||
```
|
```
|
||||||
|
|
||||||
从中可以看到两个 dnode 状态都为 ready
|
从中可以看到两个 dnode 状态都为 ready
|
||||||
|
|
||||||
|
|
||||||
## 删除数据节点
|
## 删除数据节点
|
||||||
|
|
||||||
启动 CLI 程序 taos,然后执行:
|
先停止要删除的数据节点的 taosd 进程,然后启动 CLI 程序 taos,执行:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
DROP DNODE "fqdn:port";
|
DROP DNODE "fqdn:port";
|
||||||
```
|
```
|
||||||
|
|
||||||
或者
|
或者
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
DROP DNODE dnodeId;
|
DROP DNODE dnodeId;
|
||||||
```
|
```
|
||||||
|
|
||||||
通过 “fqdn:port” 或 dnodeID 来指定一个具体的节点都是可以的。其中 fqdn 是被删除的节点的 FQDN,port 是其对外服务器的端口号;dnodeID 可以通过 SHOW DNODES 获得。
|
通过 “fqdn:port” 或 dnodeID 来指定一个具体的节点都是可以的。其中 fqdn 是被删除的节点的 FQDN,port 是其对外服务器的端口号;dnodeID 可以通过 SHOW DNODES 获得。
|
||||||
|
|
||||||
示例如下:
|
|
||||||
```
|
|
||||||
taos> show dnodes;
|
|
||||||
id | end_point | vnodes | cores | status | role | create_time | offline reason |
|
|
||||||
======================================================================================================================================
|
|
||||||
1 | localhost:6030 | 9 | 8 | ready | any | 2022-04-15 08:27:09.359 | |
|
|
||||||
2 | localhost:7030 | 0 | 0 | offline | any | 2022-04-19 08:11:42.158 | status not received |
|
|
||||||
Query OK, 2 row(s) in set (0.001017s)
|
|
||||||
|
|
||||||
taos> drop dnode 2;
|
|
||||||
Query OK, 0 of 0 row(s) in database (0.000518s)
|
|
||||||
|
|
||||||
taos> show dnodes;
|
|
||||||
id | end_point | vnodes | cores | status | role | create_time | offline reason |
|
|
||||||
======================================================================================================================================
|
|
||||||
1 | localhost:6030 | 9 | 8 | ready | any | 2022-04-15 08:27:09.359 | |
|
|
||||||
Query OK, 1 row(s) in set (0.001137s)
|
|
||||||
```
|
|
||||||
|
|
||||||
上面的示例中,初次执行 `show dnodes` 列出了两个 dnode, 执行 `drop dnode 2` 删除其中 ID 为 2 的 dnode 之后再次执行 `show dnodes`,可以看到只剩下 ID 为 1 的 dnode 。
|
|
||||||
|
|
||||||
:::warning
|
:::warning
|
||||||
|
|
||||||
数据节点一旦被 drop 之后,不能重新加入集群。需要将此节点重新部署(清空数据文件夹)。集群在完成 `drop dnode` 操作之前,会将该 dnode 的数据迁移走。
|
数据节点一旦被 drop 之后,不能重新加入集群。需要将此节点重新部署(清空数据文件夹)。集群在完成 `drop dnode` 操作之前,会将该 dnode 的数据迁移走。
|
||||||
|
@ -146,71 +103,3 @@ Query OK, 1 row(s) in set (0.001137s)
|
||||||
dnodeID 是集群自动分配的,不得人工指定。它在生成时是递增的,不会重复。
|
dnodeID 是集群自动分配的,不得人工指定。它在生成时是递增的,不会重复。
|
||||||
|
|
||||||
:::
|
:::
|
||||||
|
|
||||||
## 手动迁移数据节点
|
|
||||||
|
|
||||||
手动将某个 vnode 迁移到指定的 dnode。
|
|
||||||
|
|
||||||
启动 CLI 程序 taos,然后执行:
|
|
||||||
|
|
||||||
```sql
|
|
||||||
ALTER DNODE <source-dnodeId> BALANCE "VNODE:<vgId>-DNODE:<dest-dnodeId>";
|
|
||||||
```
|
|
||||||
|
|
||||||
其中:source-dnodeId 是源 dnodeId,也就是待迁移的 vnode 所在的 dnodeID;vgId 可以通过 SHOW VGROUPS 获得,列表的第一列;dest-dnodeId 是目标 dnodeId。
|
|
||||||
|
|
||||||
首先执行 `show vgroups` 查看 vgroup 的分布情况
|
|
||||||
```
|
|
||||||
taos> show vgroups;
|
|
||||||
vgId | tables | status | onlines | v1_dnode | v1_status | compacting |
|
|
||||||
==========================================================================================
|
|
||||||
14 | 38000 | ready | 1 | 3 | master | 0 |
|
|
||||||
15 | 38000 | ready | 1 | 3 | master | 0 |
|
|
||||||
16 | 38000 | ready | 1 | 3 | master | 0 |
|
|
||||||
17 | 38000 | ready | 1 | 3 | master | 0 |
|
|
||||||
18 | 37001 | ready | 1 | 3 | master | 0 |
|
|
||||||
19 | 37000 | ready | 1 | 1 | master | 0 |
|
|
||||||
20 | 37000 | ready | 1 | 1 | master | 0 |
|
|
||||||
21 | 37000 | ready | 1 | 1 | master | 0 |
|
|
||||||
Query OK, 8 row(s) in set (0.001314s)
|
|
||||||
```
|
|
||||||
|
|
||||||
从中可以看到在 dnode 3 中有5个 vgroup,而 dnode 1 有 3 个 vgroup,假定我们想将其中 vgId 为18 的 vgroup 从 dnode 3 迁移到 dnode 1
|
|
||||||
|
|
||||||
```
|
|
||||||
taos> alter dnode 3 balance "vnode:18-dnode:1";
|
|
||||||
|
|
||||||
DB error: Balance already enabled (0.00755
|
|
||||||
```
|
|
||||||
|
|
||||||
上面的结果表明目前所在数据库已经启动了 balance 选项,所以无法进行手动迁移。
|
|
||||||
|
|
||||||
停止整个集群,将两个 dnode 的配置文件中的 balance 都设置为 0 (默认为1)之后,重新启动集群,再次执行 ` alter dnode` 和 `show vgroups` 命令如下
|
|
||||||
```
|
|
||||||
taos> alter dnode 3 balance "vnode:18-dnode:1";
|
|
||||||
Query OK, 0 row(s) in set (0.000575s)
|
|
||||||
|
|
||||||
taos> show vgroups;
|
|
||||||
vgId | tables | status | onlines | v1_dnode | v1_status | v2_dnode | v2_status | compacting |
|
|
||||||
=================================================================================================================
|
|
||||||
14 | 38000 | ready | 1 | 3 | master | 0 | NULL | 0 |
|
|
||||||
15 | 38000 | ready | 1 | 3 | master | 0 | NULL | 0 |
|
|
||||||
16 | 38000 | ready | 1 | 3 | master | 0 | NULL | 0 |
|
|
||||||
17 | 38000 | ready | 1 | 3 | master | 0 | NULL | 0 |
|
|
||||||
18 | 37001 | ready | 2 | 1 | slave | 3 | master | 0 |
|
|
||||||
19 | 37000 | ready | 1 | 1 | master | 0 | NULL | 0 |
|
|
||||||
20 | 37000 | ready | 1 | 1 | master | 0 | NULL | 0 |
|
|
||||||
21 | 37000 | ready | 1 | 1 | master | 0 | NULL | 0 |
|
|
||||||
Query OK, 8 row(s) in set (0.001242s)
|
|
||||||
```
|
|
||||||
|
|
||||||
从上面的输出可以看到 vgId 为 18 的 vnode 被从 dnode 3 迁移到了 dnode 1。
|
|
||||||
|
|
||||||
:::warning
|
|
||||||
|
|
||||||
只有在集群的自动负载均衡选项关闭时(balance 设置为 0),才允许手动迁移。
|
|
||||||
只有处于正常工作状态的 vnode 才能被迁移:master/slave;当处于 offline/unsynced/syncing 状态时,是不能迁移的。
|
|
||||||
迁移前,务必核实目标 dnode 的资源足够:CPU、内存、硬盘。
|
|
||||||
|
|
||||||
:::
|
|
||||||
|
|
||||||
|
|
|
@ -1,87 +0,0 @@
|
||||||
---
|
|
||||||
title: 高可用与负载均衡
|
|
||||||
---
|
|
||||||
|
|
||||||
## Vnode 的高可用性
|
|
||||||
|
|
||||||
TDengine 通过多副本的机制来提供系统的高可用性,包括 vnode 和 mnode 的高可用性。
|
|
||||||
|
|
||||||
vnode 的副本数是与 DB 关联的,一个集群里可以有多个 DB,根据运营的需求,每个 DB 可以配置不同的副本数。创建数据库时,通过参数 replica 指定副本数(缺省为 1)。如果副本数为 1,系统的可靠性无法保证,只要数据所在的节点宕机,就将无法提供服务。集群的节点数必须大于等于副本数,否则创建表时将返回错误“more dnodes are needed”。比如下面的命令将创建副本数为 3 的数据库 demo:
|
|
||||||
|
|
||||||
```sql
|
|
||||||
CREATE DATABASE demo replica 3;
|
|
||||||
```
|
|
||||||
|
|
||||||
一个 DB 里的数据会被切片分到多个 vnode group,vnode group 里的 vnode 数目就是 DB 的副本数,同一个 vnode group 里各 vnode 的数据是完全一致的。为保证高可用性,vnode group 里的 vnode 一定要分布在不同的数据节点 dnode 里(实际部署时,需要在不同的物理机上),只要一个 vnode group 里超过半数的 vnode 处于工作状态,这个 vnode group 就能正常的对外服务。
|
|
||||||
|
|
||||||
一个数据节点 dnode 里可能有多个 DB 的数据,因此一个 dnode 离线时,可能会影响到多个 DB。如果一个 vnode group 里的一半或一半以上的 vnode 不工作,那么该 vnode group 就无法对外服务,无法插入或读取数据,这样会影响到它所属的 DB 的一部分表的读写操作。
|
|
||||||
|
|
||||||
因为 vnode 的引入,无法简单地给出结论:“集群中过半数据节点 dnode 工作,集群就应该工作”。但是对于简单的情形,很好下结论。比如副本数为 3,只有三个 dnode,那如果仅有一个节点不工作,整个集群还是可以正常工作的,但如果有两个数据节点不工作,那整个集群就无法正常工作了。
|
|
||||||
|
|
||||||
## Mnode 的高可用性
|
|
||||||
|
|
||||||
TDengine 集群是由 mnode(taosd 的一个模块,管理节点)负责管理的,为保证 mnode 的高可用,可以配置多个 mnode 副本,副本数由系统配置参数 numOfMnodes 决定,有效范围为 1-3。为保证元数据的强一致性,mnode 副本之间是通过同步的方式进行数据复制的。
|
|
||||||
|
|
||||||
一个集群有多个数据节点 dnode,但一个 dnode 至多运行一个 mnode 实例。多个 dnode 情况下,哪个 dnode 可以作为 mnode 呢?这是完全由系统根据整个系统资源情况,自动指定的。用户可通过 CLI 程序 taos,在 TDengine 的 console 里,执行如下命令:
|
|
||||||
|
|
||||||
```sql
|
|
||||||
SHOW MNODES;
|
|
||||||
```
|
|
||||||
|
|
||||||
来查看 mnode 列表,该列表将列出 mnode 所处的 dnode 的 End Point 和角色(master,slave,unsynced 或 offline)。当集群中第一个数据节点启动时,该数据节点一定会运行一个 mnode 实例,否则该数据节点 dnode 无法正常工作,因为一个系统是必须有至少一个 mnode 的。如果 numOfMnodes 配置为 2,启动第二个 dnode 时,该 dnode 也将运行一个 mnode 实例。
|
|
||||||
|
|
||||||
为保证 mnode 服务的高可用性,numOfMnodes 必须设置为 2 或更大。因为 mnode 保存的元数据必须是强一致的,如果 numOfMnodes 大于 2,复制参数 quorum 自动设为 2,也就是说,至少要保证有两个副本写入数据成功,才通知客户端应用写入成功。
|
|
||||||
|
|
||||||
:::note
|
|
||||||
一个 TDengine 高可用系统,无论是 vnode 还是 mnode,都必须配置多个副本。
|
|
||||||
|
|
||||||
:::
|
|
||||||
|
|
||||||
## 负载均衡
|
|
||||||
|
|
||||||
有三种情况,将触发负载均衡,而且都无需人工干预。
|
|
||||||
|
|
||||||
当一个新数据节点添加进集群时,系统将自动触发负载均衡,一些节点上的数据将被自动转移到新数据节点上,无需任何人工干预。
|
|
||||||
当一个数据节点从集群中移除时,系统将自动把该数据节点上的数据转移到其他数据节点,无需任何人工干预。
|
|
||||||
如果一个数据节点过热(数据量过大),系统将自动进行负载均衡,将该数据节点的一些 vnode 自动挪到其他节点。
|
|
||||||
当上述三种情况发生时,系统将启动各个数据节点的负载计算,从而决定如何挪动。
|
|
||||||
|
|
||||||
:::tip
|
|
||||||
负载均衡由参数 balance 控制,它决定是否启动自动负载均衡,0 表示禁用,1 表示启用自动负载均衡。
|
|
||||||
|
|
||||||
:::
|
|
||||||
|
|
||||||
## 数据节点离线处理
|
|
||||||
|
|
||||||
如果一个数据节点离线,TDengine 集群将自动检测到。有如下两种情况:
|
|
||||||
|
|
||||||
该数据节点离线超过一定时间(taos.cfg 里配置参数 offlineThreshold 控制时长),系统将自动把该数据节点删除,产生系统报警信息,触发负载均衡流程。如果该被删除的数据节点重新上线时,它将无法加入集群,需要系统管理员重新将其添加进集群才会开始工作。
|
|
||||||
|
|
||||||
离线后,在 offlineThreshold 的时长内重新上线,系统将自动启动数据恢复流程,等数据完全恢复后,该节点将开始正常工作。
|
|
||||||
|
|
||||||
:::note
|
|
||||||
如果一个虚拟节点组(包括 mnode 组)里所归属的每个数据节点都处于离线或 unsynced 状态,必须等该虚拟节点组里的所有数据节点都上线、都能交换状态信息后,才能选出 Master,该虚拟节点组才能对外提供服务。比如整个集群有 3 个数据节点,副本数为 3,如果 3 个数据节点都宕机,然后 2 个数据节点重启,是无法工作的,只有等 3 个数据节点都重启成功,才能对外服务。
|
|
||||||
|
|
||||||
:::
|
|
||||||
|
|
||||||
## Arbitrator 的使用
|
|
||||||
|
|
||||||
如果副本数为偶数,当一个 vnode group 里一半或超过一半的 vnode 不工作时,是无法从中选出 master 的。同理,一半或超过一半的 mnode 不工作时,是无法选出 mnode 的 master 的,因为存在“split brain”问题。
|
|
||||||
|
|
||||||
为解决这个问题,TDengine 引入了 Arbitrator 的概念。Arbitrator 模拟一个 vnode 或 mnode 在工作,但只简单的负责网络连接,不处理任何数据插入或访问。只要包含 Arbitrator 在内,超过半数的 vnode 或 mnode 工作,那么该 vnode group 或 mnode 组就可以正常的提供数据插入或查询服务。比如对于副本数为 2 的情形,如果一个节点 A 离线,但另外一个节点 B 正常,而且能连接到 Arbitrator,那么节点 B 就能正常工作。
|
|
||||||
|
|
||||||
总之,在目前版本下,TDengine 建议在双副本环境要配置 Arbitrator,以提升系统的可用性。
|
|
||||||
|
|
||||||
Arbitrator 的执行程序名为 tarbitrator。该程序对系统资源几乎没有要求,只需要保证有网络连接,找任何一台 Linux 服务器运行它即可。以下简要描述安装配置的步骤:
|
|
||||||
|
|
||||||
请点击 安装包下载,在 TDengine Arbitrator Linux 一节中,选择合适的版本下载并安装。
|
|
||||||
该应用的命令行参数 -p 可以指定其对外服务的端口号,缺省是 6042。
|
|
||||||
|
|
||||||
修改每个 taosd 实例的配置文件,在 taos.cfg 里将参数 arbitrator 设置为 tarbitrator 程序所对应的 End Point。(如果该参数配置了,当副本数为偶数时,系统将自动连接配置的 Arbitrator。如果副本数为奇数,即使配置了 Arbitrator,系统也不会去建立连接。)
|
|
||||||
|
|
||||||
在配置文件中配置了的 Arbitrator,会出现在 SHOW DNODES 指令的返回结果中,对应的 role 列的值会是“arb”。
|
|
||||||
查看集群 Arbitrator 的状态【2.0.14.0 以后支持】
|
|
||||||
|
|
||||||
```sql
|
|
||||||
SHOW DNODES;
|
|
||||||
```
|
|
|
@ -0,0 +1,33 @@
|
||||||
|
---
|
||||||
|
title: 高可用
|
||||||
|
---
|
||||||
|
|
||||||
|
## Vnode 的高可用性
|
||||||
|
|
||||||
|
TDengine 通过多副本的机制来提供系统的高可用性,包括 vnode 和 mnode 的高可用性。
|
||||||
|
|
||||||
|
vnode 的副本数是与 DB 关联的,一个集群里可以有多个 DB,根据运营的需求,每个 DB 可以配置不同的副本数。创建数据库时,通过参数 replica 指定副本数(缺省为 1)。如果副本数为 1,系统的可靠性无法保证,只要数据所在的节点宕机,就将无法提供服务。集群的节点数必须大于等于副本数,否则创建表时将返回错误“more dnodes are needed”。比如下面的命令将创建副本数为 3 的数据库 demo:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE DATABASE demo replica 3;
|
||||||
|
```
|
||||||
|
|
||||||
|
一个 DB 里的数据会被切片分到多个 vnode group,vnode group 里的 vnode 数目就是 DB 的副本数,同一个 vnode group 里各 vnode 的数据是完全一致的。为保证高可用性,vnode group 里的 vnode 一定要分布在不同的数据节点 dnode 里(实际部署时,需要在不同的物理机上),只要一个 vnode group 里超过半数的 vnode 处于工作状态,这个 vnode group 就能正常的对外服务。
|
||||||
|
|
||||||
|
一个数据节点 dnode 里可能有多个 DB 的数据,因此一个 dnode 离线时,可能会影响到多个 DB。如果一个 vnode group 里的一半或一半以上的 vnode 不工作,那么该 vnode group 就无法对外服务,无法插入或读取数据,这样会影响到它所属的 DB 的一部分表的读写操作。
|
||||||
|
|
||||||
|
因为 vnode 的引入,无法简单地给出结论:“集群中过半数据节点 dnode 工作,集群就应该工作”。但是对于简单的情形,很好下结论。比如副本数为 3,只有三个 dnode,那如果仅有一个节点不工作,整个集群还是可以正常工作的,但如果有两个数据节点不工作,那整个集群就无法正常工作了。
|
||||||
|
|
||||||
|
## Mnode 的高可用性
|
||||||
|
|
||||||
|
TDengine 集群是由 mnode(taosd 的一个模块,管理节点)负责管理的,为保证 mnode 的高可用,可以配置多个 mnode 副本,在集群启动时只有一个 mnode,用户可以通过 `create mnode` 来增加新的 mnode。用户可以通过该命令自主决定哪几个 dnode 会承担 mnode 的角色。为保证元数据的强一致性,在有多个 mnode 时,mnode 副本之间是通过同步的方式进行数据复制的。
|
||||||
|
|
||||||
|
一个集群有多个数据节点 dnode,但一个 dnode 至多运行一个 mnode 实例。用户可通过 CLI 程序 taos,在 TDengine 的 console 里,执行如下命令:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SHOW MNODES;
|
||||||
|
```
|
||||||
|
|
||||||
|
来查看 mnode 列表,该列表将列出 mnode 所处的 dnode 的 End Point 和角色(leader, follower, candidate, offline)。当集群中第一个数据节点启动时,该数据节点一定会运行一个 mnode 实例,否则该数据节点 dnode 无法正常工作,因为一个系统是必须有至少一个 mnode 的。
|
||||||
|
|
||||||
|
在 TDengine 3.0 及以后的版本中,数据同步采用 RAFT 协议,所以 mnode 的数量应该被设置为 1 个或者 3 个。
|
|
@ -0,0 +1,19 @@
|
||||||
|
---
|
||||||
|
title: 负载均衡
|
||||||
|
---
|
||||||
|
|
||||||
|
TDengine 中的负载均衡主要指对时序数据的处理的负载均衡。TDengine 采用 Hash 一致性算法将一个数据库中的所有表和子表的数据均衡分散在属于该数据库的所有 vgroup 中,每张表或子表只能由一个 vgroup 处理,一个 vgroup 可能负责处理多个表或子表。
|
||||||
|
|
||||||
|
创建数据库时可以指定其中的 vgroup 的数量:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
create database db0 vgroups 100;
|
||||||
|
```
|
||||||
|
|
||||||
|
如何指定合适的 vgroup 的数量,这取决于系统资源。假定系统中只计划建立一个数据库,则 vgroup 数量由集群中所有 dnode 所能使用的资源决定。原则上可用的 CPU 和 Memory 越多,可建立的 vgroup 也越多。但也要考虑到磁盘性能,过多的 vgroup 在磁盘性能达到上限后反而会拖累整个系统的性能。假如系统中会建立多个数据库,则多个数据库的 vgroup 之和取决于系统中可用资源的数量。要综合考虑多个数据库之间表的数量、写入频率、数据量等多个因素在多个数据库之间分配 vgroup。实际中建议首先根据系统资源配置选择一个初始的 vgroup 数量,比如 CPU 总核数的 2 倍,以此为起点通过测试找到最佳的 vgroup 数量配置,此为系统中的 vgroup 总数。如果有多个数据库的话,再根据各个数据库的表数和数据量对 vgroup 进行分配。
|
||||||
|
|
||||||
|
此外,对于任意数据库的 vgroup,TDengine 都是尽可能将其均衡分散在多个 dnode 上。在多副本情况下(replica 3),这种均衡分布尤其复杂,TDengine 的分布策略会尽量避免任意一个 dnode 成为写入的瓶颈。
|
||||||
|
|
||||||
|
通过以上措施可以最大限度地在整个 TDengine 集群中实现负载均衡,负载均衡也能反过来提升系统总的数据处理能力。
|
||||||
|
|
||||||
|
在初始的负载均衡建立起来之后,如果由于删库、删表等动作,特别是删库动作会导致属于它的 vnode 都被删除,这有可能会造成一定程度的负载失衡,在后续版本中会提供重新平衡的方法。但如果有新的数据库建立,TDengine 也能够一定程度自我再平衡而无须人工干预。
|
|
@ -57,7 +57,7 @@ enum {
|
||||||
// STREAM_INPUT__TABLE_SCAN,
|
// STREAM_INPUT__TABLE_SCAN,
|
||||||
STREAM_INPUT__TQ_SCAN,
|
STREAM_INPUT__TQ_SCAN,
|
||||||
STREAM_INPUT__DATA_RETRIEVE,
|
STREAM_INPUT__DATA_RETRIEVE,
|
||||||
STREAM_INPUT__TRIGGER,
|
STREAM_INPUT__GET_RES,
|
||||||
STREAM_INPUT__CHECKPOINT,
|
STREAM_INPUT__CHECKPOINT,
|
||||||
STREAM_INPUT__DROP,
|
STREAM_INPUT__DROP,
|
||||||
};
|
};
|
||||||
|
@ -155,10 +155,10 @@ typedef struct SQueryTableDataCond {
|
||||||
int32_t numOfCols;
|
int32_t numOfCols;
|
||||||
SColumnInfo* colList;
|
SColumnInfo* colList;
|
||||||
int32_t type; // data block load type:
|
int32_t type; // data block load type:
|
||||||
// int32_t numOfTWindows;
|
// int32_t numOfTWindows;
|
||||||
STimeWindow twindows;
|
STimeWindow twindows;
|
||||||
int64_t startVersion;
|
int64_t startVersion;
|
||||||
int64_t endVersion;
|
int64_t endVersion;
|
||||||
} SQueryTableDataCond;
|
} SQueryTableDataCond;
|
||||||
|
|
||||||
int32_t tEncodeDataBlock(void** buf, const SSDataBlock* pBlock);
|
int32_t tEncodeDataBlock(void** buf, const SSDataBlock* pBlock);
|
||||||
|
|
|
@ -75,13 +75,18 @@ typedef uint16_t tmsg_t;
|
||||||
#define TSDB_IE_TYPE_DNODE_EXT 6
|
#define TSDB_IE_TYPE_DNODE_EXT 6
|
||||||
#define TSDB_IE_TYPE_DNODE_STATE 7
|
#define TSDB_IE_TYPE_DNODE_STATE 7
|
||||||
|
|
||||||
enum { CONN_TYPE__QUERY = 1, CONN_TYPE__TMQ, CONN_TYPE__UDFD, CONN_TYPE__MAX };
|
enum {
|
||||||
|
CONN_TYPE__QUERY = 1,
|
||||||
|
CONN_TYPE__TMQ,
|
||||||
|
CONN_TYPE__UDFD,
|
||||||
|
CONN_TYPE__MAX,
|
||||||
|
};
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
HEARTBEAT_KEY_USER_AUTHINFO = 1,
|
HEARTBEAT_KEY_USER_AUTHINFO = 1,
|
||||||
HEARTBEAT_KEY_DBINFO,
|
HEARTBEAT_KEY_DBINFO,
|
||||||
HEARTBEAT_KEY_STBINFO,
|
HEARTBEAT_KEY_STBINFO,
|
||||||
HEARTBEAT_KEY_MQ_TMP,
|
HEARTBEAT_KEY_TMQ,
|
||||||
};
|
};
|
||||||
|
|
||||||
typedef enum _mgmt_table {
|
typedef enum _mgmt_table {
|
||||||
|
@ -526,6 +531,7 @@ typedef struct {
|
||||||
int8_t superUser;
|
int8_t superUser;
|
||||||
int8_t connType;
|
int8_t connType;
|
||||||
SEpSet epSet;
|
SEpSet epSet;
|
||||||
|
int32_t svrTimestamp;
|
||||||
char sVer[TSDB_VERSION_LEN];
|
char sVer[TSDB_VERSION_LEN];
|
||||||
char sDetailVer[128];
|
char sDetailVer[128];
|
||||||
} SConnectRsp;
|
} SConnectRsp;
|
||||||
|
@ -1969,7 +1975,7 @@ typedef struct SVCreateTbReq {
|
||||||
int8_t type;
|
int8_t type;
|
||||||
union {
|
union {
|
||||||
struct {
|
struct {
|
||||||
char* name; // super table name
|
char* name; // super table name
|
||||||
tb_uid_t suid;
|
tb_uid_t suid;
|
||||||
SArray* tagName;
|
SArray* tagName;
|
||||||
uint8_t* pTag;
|
uint8_t* pTag;
|
||||||
|
@ -2146,6 +2152,15 @@ typedef struct {
|
||||||
char cgroup[TSDB_CGROUP_LEN];
|
char cgroup[TSDB_CGROUP_LEN];
|
||||||
} SMqAskEpReq;
|
} SMqAskEpReq;
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
int64_t consumerId;
|
||||||
|
int32_t epoch;
|
||||||
|
} SMqHbReq;
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
int8_t reserved;
|
||||||
|
} SMqHbRsp;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
int32_t key;
|
int32_t key;
|
||||||
int32_t valueLen;
|
int32_t valueLen;
|
||||||
|
@ -2234,6 +2249,7 @@ typedef struct {
|
||||||
typedef struct {
|
typedef struct {
|
||||||
int64_t reqId;
|
int64_t reqId;
|
||||||
int64_t rspId;
|
int64_t rspId;
|
||||||
|
int32_t svrTimestamp;
|
||||||
SArray* rsps; // SArray<SClientHbRsp>
|
SArray* rsps; // SArray<SClientHbRsp>
|
||||||
} SClientHbBatchRsp;
|
} SClientHbBatchRsp;
|
||||||
|
|
||||||
|
@ -2334,29 +2350,30 @@ static FORCE_INLINE int32_t tDecodeSClientHbKey(SDecoder* pDecoder, SClientHbKey
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
typedef struct SMqHbVgInfo {
|
typedef struct {
|
||||||
int32_t vgId;
|
int32_t vgId;
|
||||||
} SMqHbVgInfo;
|
// TODO stas
|
||||||
|
} SMqReportVgInfo;
|
||||||
|
|
||||||
static FORCE_INLINE int32_t taosEncodeSMqVgInfo(void** buf, const SMqHbVgInfo* pVgInfo) {
|
static FORCE_INLINE int32_t taosEncodeSMqVgInfo(void** buf, const SMqReportVgInfo* pVgInfo) {
|
||||||
int32_t tlen = 0;
|
int32_t tlen = 0;
|
||||||
tlen += taosEncodeFixedI32(buf, pVgInfo->vgId);
|
tlen += taosEncodeFixedI32(buf, pVgInfo->vgId);
|
||||||
return tlen;
|
return tlen;
|
||||||
}
|
}
|
||||||
|
|
||||||
static FORCE_INLINE void* taosDecodeSMqVgInfo(void* buf, SMqHbVgInfo* pVgInfo) {
|
static FORCE_INLINE void* taosDecodeSMqVgInfo(void* buf, SMqReportVgInfo* pVgInfo) {
|
||||||
buf = taosDecodeFixedI32(buf, &pVgInfo->vgId);
|
buf = taosDecodeFixedI32(buf, &pVgInfo->vgId);
|
||||||
return buf;
|
return buf;
|
||||||
}
|
}
|
||||||
|
|
||||||
typedef struct SMqHbTopicInfo {
|
typedef struct {
|
||||||
int32_t epoch;
|
int32_t epoch;
|
||||||
int64_t topicUid;
|
int64_t topicUid;
|
||||||
char name[TSDB_TOPIC_FNAME_LEN];
|
char name[TSDB_TOPIC_FNAME_LEN];
|
||||||
SArray* pVgInfo; // SArray<SMqHbVgInfo>
|
SArray* pVgInfo; // SArray<SMqHbVgInfo>
|
||||||
} SMqHbTopicInfo;
|
} SMqTopicInfo;
|
||||||
|
|
||||||
static FORCE_INLINE int32_t taosEncodeSMqHbTopicInfoMsg(void** buf, const SMqHbTopicInfo* pTopicInfo) {
|
static FORCE_INLINE int32_t taosEncodeSMqTopicInfoMsg(void** buf, const SMqTopicInfo* pTopicInfo) {
|
||||||
int32_t tlen = 0;
|
int32_t tlen = 0;
|
||||||
tlen += taosEncodeFixedI32(buf, pTopicInfo->epoch);
|
tlen += taosEncodeFixedI32(buf, pTopicInfo->epoch);
|
||||||
tlen += taosEncodeFixedI64(buf, pTopicInfo->topicUid);
|
tlen += taosEncodeFixedI64(buf, pTopicInfo->topicUid);
|
||||||
|
@ -2364,35 +2381,35 @@ static FORCE_INLINE int32_t taosEncodeSMqHbTopicInfoMsg(void** buf, const SMqHbT
|
||||||
int32_t sz = taosArrayGetSize(pTopicInfo->pVgInfo);
|
int32_t sz = taosArrayGetSize(pTopicInfo->pVgInfo);
|
||||||
tlen += taosEncodeFixedI32(buf, sz);
|
tlen += taosEncodeFixedI32(buf, sz);
|
||||||
for (int32_t i = 0; i < sz; i++) {
|
for (int32_t i = 0; i < sz; i++) {
|
||||||
SMqHbVgInfo* pVgInfo = (SMqHbVgInfo*)taosArrayGet(pTopicInfo->pVgInfo, i);
|
SMqReportVgInfo* pVgInfo = (SMqReportVgInfo*)taosArrayGet(pTopicInfo->pVgInfo, i);
|
||||||
tlen += taosEncodeSMqVgInfo(buf, pVgInfo);
|
tlen += taosEncodeSMqVgInfo(buf, pVgInfo);
|
||||||
}
|
}
|
||||||
return tlen;
|
return tlen;
|
||||||
}
|
}
|
||||||
|
|
||||||
static FORCE_INLINE void* taosDecodeSMqHbTopicInfoMsg(void* buf, SMqHbTopicInfo* pTopicInfo) {
|
static FORCE_INLINE void* taosDecodeSMqTopicInfoMsg(void* buf, SMqTopicInfo* pTopicInfo) {
|
||||||
buf = taosDecodeFixedI32(buf, &pTopicInfo->epoch);
|
buf = taosDecodeFixedI32(buf, &pTopicInfo->epoch);
|
||||||
buf = taosDecodeFixedI64(buf, &pTopicInfo->topicUid);
|
buf = taosDecodeFixedI64(buf, &pTopicInfo->topicUid);
|
||||||
buf = taosDecodeStringTo(buf, pTopicInfo->name);
|
buf = taosDecodeStringTo(buf, pTopicInfo->name);
|
||||||
int32_t sz;
|
int32_t sz;
|
||||||
buf = taosDecodeFixedI32(buf, &sz);
|
buf = taosDecodeFixedI32(buf, &sz);
|
||||||
pTopicInfo->pVgInfo = taosArrayInit(sz, sizeof(SMqHbVgInfo));
|
pTopicInfo->pVgInfo = taosArrayInit(sz, sizeof(SMqReportVgInfo));
|
||||||
for (int32_t i = 0; i < sz; i++) {
|
for (int32_t i = 0; i < sz; i++) {
|
||||||
SMqHbVgInfo vgInfo;
|
SMqReportVgInfo vgInfo;
|
||||||
buf = taosDecodeSMqVgInfo(buf, &vgInfo);
|
buf = taosDecodeSMqVgInfo(buf, &vgInfo);
|
||||||
taosArrayPush(pTopicInfo->pVgInfo, &vgInfo);
|
taosArrayPush(pTopicInfo->pVgInfo, &vgInfo);
|
||||||
}
|
}
|
||||||
return buf;
|
return buf;
|
||||||
}
|
}
|
||||||
|
|
||||||
typedef struct SMqHbMsg {
|
typedef struct {
|
||||||
int32_t status; // ask hb endpoint
|
int32_t status; // ask hb endpoint
|
||||||
int32_t epoch;
|
int32_t epoch;
|
||||||
int64_t consumerId;
|
int64_t consumerId;
|
||||||
SArray* pTopics; // SArray<SMqHbTopicInfo>
|
SArray* pTopics; // SArray<SMqHbTopicInfo>
|
||||||
} SMqHbMsg;
|
} SMqReportReq;
|
||||||
|
|
||||||
static FORCE_INLINE int32_t taosEncodeSMqMsg(void** buf, const SMqHbMsg* pMsg) {
|
static FORCE_INLINE int32_t taosEncodeSMqReportMsg(void** buf, const SMqReportReq* pMsg) {
|
||||||
int32_t tlen = 0;
|
int32_t tlen = 0;
|
||||||
tlen += taosEncodeFixedI32(buf, pMsg->status);
|
tlen += taosEncodeFixedI32(buf, pMsg->status);
|
||||||
tlen += taosEncodeFixedI32(buf, pMsg->epoch);
|
tlen += taosEncodeFixedI32(buf, pMsg->epoch);
|
||||||
|
@ -2400,22 +2417,22 @@ static FORCE_INLINE int32_t taosEncodeSMqMsg(void** buf, const SMqHbMsg* pMsg) {
|
||||||
int32_t sz = taosArrayGetSize(pMsg->pTopics);
|
int32_t sz = taosArrayGetSize(pMsg->pTopics);
|
||||||
tlen += taosEncodeFixedI32(buf, sz);
|
tlen += taosEncodeFixedI32(buf, sz);
|
||||||
for (int32_t i = 0; i < sz; i++) {
|
for (int32_t i = 0; i < sz; i++) {
|
||||||
SMqHbTopicInfo* topicInfo = (SMqHbTopicInfo*)taosArrayGet(pMsg->pTopics, i);
|
SMqTopicInfo* topicInfo = (SMqTopicInfo*)taosArrayGet(pMsg->pTopics, i);
|
||||||
tlen += taosEncodeSMqHbTopicInfoMsg(buf, topicInfo);
|
tlen += taosEncodeSMqTopicInfoMsg(buf, topicInfo);
|
||||||
}
|
}
|
||||||
return tlen;
|
return tlen;
|
||||||
}
|
}
|
||||||
|
|
||||||
static FORCE_INLINE void* taosDecodeSMqMsg(void* buf, SMqHbMsg* pMsg) {
|
static FORCE_INLINE void* taosDecodeSMqReportMsg(void* buf, SMqReportReq* pMsg) {
|
||||||
buf = taosDecodeFixedI32(buf, &pMsg->status);
|
buf = taosDecodeFixedI32(buf, &pMsg->status);
|
||||||
buf = taosDecodeFixedI32(buf, &pMsg->epoch);
|
buf = taosDecodeFixedI32(buf, &pMsg->epoch);
|
||||||
buf = taosDecodeFixedI64(buf, &pMsg->consumerId);
|
buf = taosDecodeFixedI64(buf, &pMsg->consumerId);
|
||||||
int32_t sz;
|
int32_t sz;
|
||||||
buf = taosDecodeFixedI32(buf, &sz);
|
buf = taosDecodeFixedI32(buf, &sz);
|
||||||
pMsg->pTopics = taosArrayInit(sz, sizeof(SMqHbTopicInfo));
|
pMsg->pTopics = taosArrayInit(sz, sizeof(SMqTopicInfo));
|
||||||
for (int32_t i = 0; i < sz; i++) {
|
for (int32_t i = 0; i < sz; i++) {
|
||||||
SMqHbTopicInfo topicInfo;
|
SMqTopicInfo topicInfo;
|
||||||
buf = taosDecodeSMqHbTopicInfoMsg(buf, &topicInfo);
|
buf = taosDecodeSMqTopicInfoMsg(buf, &topicInfo);
|
||||||
taosArrayPush(pMsg->pTopics, &topicInfo);
|
taosArrayPush(pMsg->pTopics, &topicInfo);
|
||||||
}
|
}
|
||||||
return buf;
|
return buf;
|
||||||
|
@ -2438,9 +2455,6 @@ typedef struct {
|
||||||
int8_t igNotExists;
|
int8_t igNotExists;
|
||||||
} SMDropStreamReq;
|
} SMDropStreamReq;
|
||||||
|
|
||||||
int32_t tSerializeSMDropStreamReq(void* buf, int32_t bufLen, const SMDropStreamReq* pReq);
|
|
||||||
int32_t tDeserializeSMDropStreamReq(void* buf, int32_t bufLen, SMDropStreamReq* pReq);
|
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
int8_t reserved;
|
int8_t reserved;
|
||||||
} SMDropStreamRsp;
|
} SMDropStreamRsp;
|
||||||
|
@ -2455,6 +2469,27 @@ typedef struct {
|
||||||
int8_t reserved;
|
int8_t reserved;
|
||||||
} SVDropStreamTaskRsp;
|
} SVDropStreamTaskRsp;
|
||||||
|
|
||||||
|
int32_t tSerializeSMDropStreamReq(void* buf, int32_t bufLen, const SMDropStreamReq* pReq);
|
||||||
|
int32_t tDeserializeSMDropStreamReq(void* buf, int32_t bufLen, SMDropStreamReq* pReq);
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
char name[TSDB_STREAM_FNAME_LEN];
|
||||||
|
int8_t igNotExists;
|
||||||
|
} SMRecoverStreamReq;
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
int8_t reserved;
|
||||||
|
} SMRecoverStreamRsp;
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
int64_t recoverObjUid;
|
||||||
|
int32_t taskId;
|
||||||
|
int32_t hasCheckPoint;
|
||||||
|
} SMVStreamGatherInfoReq;
|
||||||
|
|
||||||
|
int32_t tSerializeSMRecoverStreamReq(void* buf, int32_t bufLen, const SMRecoverStreamReq* pReq);
|
||||||
|
int32_t tDeserializeSMRecoverStreamReq(void* buf, int32_t bufLen, SMRecoverStreamReq* pReq);
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
int64_t leftForVer;
|
int64_t leftForVer;
|
||||||
int32_t vgId;
|
int32_t vgId;
|
||||||
|
@ -2877,7 +2912,8 @@ static FORCE_INLINE int32_t tEncodeSMqMetaRsp(void** buf, const SMqMetaRsp* pRsp
|
||||||
}
|
}
|
||||||
|
|
||||||
static FORCE_INLINE void* tDecodeSMqMetaRsp(const void* buf, SMqMetaRsp* pRsp) {
|
static FORCE_INLINE void* tDecodeSMqMetaRsp(const void* buf, SMqMetaRsp* pRsp) {
|
||||||
buf = taosDecodeFixedI64(buf, &pRsp->reqOffset);buf = taosDecodeFixedI64(buf, &pRsp->rspOffset);
|
buf = taosDecodeFixedI64(buf, &pRsp->reqOffset);
|
||||||
|
buf = taosDecodeFixedI64(buf, &pRsp->rspOffset);
|
||||||
buf = taosDecodeFixedI16(buf, &pRsp->resMsgType);
|
buf = taosDecodeFixedI16(buf, &pRsp->resMsgType);
|
||||||
buf = taosDecodeFixedI32(buf, &pRsp->metaRspLen);
|
buf = taosDecodeFixedI32(buf, &pRsp->metaRspLen);
|
||||||
buf = taosDecodeBinary(buf, &pRsp->metaRsp, pRsp->metaRspLen);
|
buf = taosDecodeBinary(buf, &pRsp->metaRsp, pRsp->metaRspLen);
|
||||||
|
@ -2901,89 +2937,6 @@ typedef struct {
|
||||||
int32_t tEncodeSMqDataRsp(SEncoder* pEncoder, const SMqDataRsp* pRsp);
|
int32_t tEncodeSMqDataRsp(SEncoder* pEncoder, const SMqDataRsp* pRsp);
|
||||||
int32_t tDecodeSMqDataRsp(SDecoder* pDecoder, SMqDataRsp* pRsp);
|
int32_t tDecodeSMqDataRsp(SDecoder* pDecoder, SMqDataRsp* pRsp);
|
||||||
|
|
||||||
#if 0
|
|
||||||
typedef struct {
|
|
||||||
SMqRspHead head;
|
|
||||||
int64_t reqOffset;
|
|
||||||
int64_t rspOffset;
|
|
||||||
int32_t skipLogNum;
|
|
||||||
int32_t blockNum;
|
|
||||||
int8_t withTbName;
|
|
||||||
int8_t withSchema;
|
|
||||||
SArray* blockDataLen; // SArray<int32_t>
|
|
||||||
SArray* blockData; // SArray<SRetrieveTableRsp*>
|
|
||||||
SArray* blockTbName; // SArray<char*>
|
|
||||||
SArray* blockSchema; // SArray<SSchemaWrapper>
|
|
||||||
} SMqDataBlkRsp;
|
|
||||||
|
|
||||||
static FORCE_INLINE int32_t tEncodeSMqDataBlkRsp(void** buf, const SMqDataBlkRsp* pRsp) {
|
|
||||||
int32_t tlen = 0;
|
|
||||||
tlen += taosEncodeFixedI64(buf, pRsp->reqOffset);
|
|
||||||
tlen += taosEncodeFixedI64(buf, pRsp->rspOffset);
|
|
||||||
tlen += taosEncodeFixedI32(buf, pRsp->skipLogNum);
|
|
||||||
tlen += taosEncodeFixedI32(buf, pRsp->blockNum);
|
|
||||||
if (pRsp->blockNum != 0) {
|
|
||||||
tlen += taosEncodeFixedI8(buf, pRsp->withTbName);
|
|
||||||
tlen += taosEncodeFixedI8(buf, pRsp->withSchema);
|
|
||||||
|
|
||||||
for (int32_t i = 0; i < pRsp->blockNum; i++) {
|
|
||||||
int32_t bLen = *(int32_t*)taosArrayGet(pRsp->blockDataLen, i);
|
|
||||||
void* data = taosArrayGetP(pRsp->blockData, i);
|
|
||||||
tlen += taosEncodeFixedI32(buf, bLen);
|
|
||||||
tlen += taosEncodeBinary(buf, data, bLen);
|
|
||||||
if (pRsp->withSchema) {
|
|
||||||
SSchemaWrapper* pSW = (SSchemaWrapper*)taosArrayGetP(pRsp->blockSchema, i);
|
|
||||||
tlen += taosEncodeSSchemaWrapper(buf, pSW);
|
|
||||||
}
|
|
||||||
if (pRsp->withTbName) {
|
|
||||||
char* tbName = (char*)taosArrayGetP(pRsp->blockTbName, i);
|
|
||||||
tlen += taosEncodeString(buf, tbName);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return tlen;
|
|
||||||
}
|
|
||||||
|
|
||||||
static FORCE_INLINE void* tDecodeSMqDataBlkRsp(const void* buf, SMqDataBlkRsp* pRsp) {
|
|
||||||
buf = taosDecodeFixedI64(buf, &pRsp->reqOffset);
|
|
||||||
buf = taosDecodeFixedI64(buf, &pRsp->rspOffset);
|
|
||||||
buf = taosDecodeFixedI32(buf, &pRsp->skipLogNum);
|
|
||||||
buf = taosDecodeFixedI32(buf, &pRsp->blockNum);
|
|
||||||
if (pRsp->blockNum != 0) {
|
|
||||||
pRsp->blockData = taosArrayInit(pRsp->blockNum, sizeof(void*));
|
|
||||||
pRsp->blockDataLen = taosArrayInit(pRsp->blockNum, sizeof(int32_t));
|
|
||||||
buf = taosDecodeFixedI8(buf, &pRsp->withTbName);
|
|
||||||
buf = taosDecodeFixedI8(buf, &pRsp->withSchema);
|
|
||||||
if (pRsp->withTbName) {
|
|
||||||
pRsp->blockTbName = taosArrayInit(pRsp->blockNum, sizeof(void*));
|
|
||||||
}
|
|
||||||
if (pRsp->withSchema) {
|
|
||||||
pRsp->blockSchema = taosArrayInit(pRsp->blockNum, sizeof(void*));
|
|
||||||
}
|
|
||||||
|
|
||||||
for (int32_t i = 0; i < pRsp->blockNum; i++) {
|
|
||||||
int32_t bLen = 0;
|
|
||||||
void* data = NULL;
|
|
||||||
buf = taosDecodeFixedI32(buf, &bLen);
|
|
||||||
buf = taosDecodeBinary(buf, &data, bLen);
|
|
||||||
taosArrayPush(pRsp->blockDataLen, &bLen);
|
|
||||||
taosArrayPush(pRsp->blockData, &data);
|
|
||||||
if (pRsp->withSchema) {
|
|
||||||
SSchemaWrapper* pSW = (SSchemaWrapper*)taosMemoryMalloc(sizeof(SSchemaWrapper));
|
|
||||||
buf = taosDecodeSSchemaWrapper(buf, pSW);
|
|
||||||
taosArrayPush(pRsp->blockSchema, &pSW);
|
|
||||||
}
|
|
||||||
if (pRsp->withTbName) {
|
|
||||||
char* name = NULL;
|
|
||||||
buf = taosDecodeString(buf, &name);
|
|
||||||
taosArrayPush(pRsp->blockTbName, &name);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return (void*)buf;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
SMqRspHead head;
|
SMqRspHead head;
|
||||||
char cgroup[TSDB_CGROUP_LEN];
|
char cgroup[TSDB_CGROUP_LEN];
|
||||||
|
|
|
@ -131,6 +131,7 @@ enum {
|
||||||
TD_DEF_MSG_TYPE(TDMT_MND_CREATE_STREAM, "create-stream", SCMCreateStreamReq, SCMCreateStreamRsp)
|
TD_DEF_MSG_TYPE(TDMT_MND_CREATE_STREAM, "create-stream", SCMCreateStreamReq, SCMCreateStreamRsp)
|
||||||
TD_DEF_MSG_TYPE(TDMT_MND_ALTER_STREAM, "alter-stream", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_MND_ALTER_STREAM, "alter-stream", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_MND_DROP_STREAM, "drop-stream", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_MND_DROP_STREAM, "drop-stream", NULL, NULL)
|
||||||
|
TD_DEF_MSG_TYPE(TDMT_MND_RECOVER_STREAM, "recover-stream", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_MND_CREATE_INDEX, "create-index", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_MND_CREATE_INDEX, "create-index", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_MND_DROP_INDEX, "drop-index", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_MND_DROP_INDEX, "drop-index", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_MND_GET_INDEX, "get-index", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_MND_GET_INDEX, "get-index", NULL, NULL)
|
||||||
|
@ -143,6 +144,7 @@ enum {
|
||||||
TD_DEF_MSG_TYPE(TDMT_MND_MQ_ASK_EP, "ask-ep", SMqAskEpReq, SMqAskEpRsp)
|
TD_DEF_MSG_TYPE(TDMT_MND_MQ_ASK_EP, "ask-ep", SMqAskEpReq, SMqAskEpRsp)
|
||||||
TD_DEF_MSG_TYPE(TDMT_MND_MQ_CONSUMER_LOST, "consumer-lost", SMqConsumerLostMsg, NULL)
|
TD_DEF_MSG_TYPE(TDMT_MND_MQ_CONSUMER_LOST, "consumer-lost", SMqConsumerLostMsg, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_MND_MQ_CONSUMER_RECOVER, "consumer-recover", SMqConsumerRecoverMsg, NULL)
|
TD_DEF_MSG_TYPE(TDMT_MND_MQ_CONSUMER_RECOVER, "consumer-recover", SMqConsumerRecoverMsg, NULL)
|
||||||
|
TD_DEF_MSG_TYPE(TDMT_MND_MQ_HB, "consumer-hb", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_MND_MQ_DO_REBALANCE, "do-rebalance", SMqDoRebalanceMsg, NULL)
|
TD_DEF_MSG_TYPE(TDMT_MND_MQ_DO_REBALANCE, "do-rebalance", SMqDoRebalanceMsg, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_MND_MQ_DROP_CGROUP, "drop-cgroup", SMqDropCGroupReq, SMqDropCGroupRsp)
|
TD_DEF_MSG_TYPE(TDMT_MND_MQ_DROP_CGROUP, "drop-cgroup", SMqDropCGroupReq, SMqDropCGroupRsp)
|
||||||
TD_DEF_MSG_TYPE(TDMT_MND_MQ_COMMIT_OFFSET, "mnode-commit-offset", SMqCMCommitOffsetReq, SMqCMCommitOffsetRsp)
|
TD_DEF_MSG_TYPE(TDMT_MND_MQ_COMMIT_OFFSET, "mnode-commit-offset", SMqCMCommitOffsetReq, SMqCMCommitOffsetRsp)
|
||||||
|
|
|
@ -79,8 +79,8 @@
|
||||||
#define TK_NOT 61
|
#define TK_NOT 61
|
||||||
#define TK_EXISTS 62
|
#define TK_EXISTS 62
|
||||||
#define TK_BUFFER 63
|
#define TK_BUFFER 63
|
||||||
#define TK_CACHELAST 64
|
#define TK_CACHEMODEL 64
|
||||||
#define TK_CACHELASTSIZE 65
|
#define TK_CACHESIZE 65
|
||||||
#define TK_COMP 66
|
#define TK_COMP 66
|
||||||
#define TK_DURATION 67
|
#define TK_DURATION 67
|
||||||
#define TK_NK_VARIABLE 68
|
#define TK_NK_VARIABLE 68
|
||||||
|
|
|
@ -192,6 +192,8 @@ int32_t qExtractStreamScanner(qTaskInfo_t tinfo, void** scanner);
|
||||||
|
|
||||||
int32_t qStreamInput(qTaskInfo_t tinfo, void* pItem);
|
int32_t qStreamInput(qTaskInfo_t tinfo, void* pItem);
|
||||||
|
|
||||||
|
int32_t qStreamPrepareRecover(qTaskInfo_t tinfo, int64_t startVer, int64_t endVer);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -190,14 +190,13 @@ bool fmIsUserDefinedFunc(int32_t funcId);
|
||||||
bool fmIsDistExecFunc(int32_t funcId);
|
bool fmIsDistExecFunc(int32_t funcId);
|
||||||
bool fmIsForbidFillFunc(int32_t funcId);
|
bool fmIsForbidFillFunc(int32_t funcId);
|
||||||
bool fmIsForbidStreamFunc(int32_t funcId);
|
bool fmIsForbidStreamFunc(int32_t funcId);
|
||||||
bool fmIsForbidWindowFunc(int32_t funcId);
|
|
||||||
bool fmIsForbidGroupByFunc(int32_t funcId);
|
|
||||||
bool fmIsIntervalInterpoFunc(int32_t funcId);
|
bool fmIsIntervalInterpoFunc(int32_t funcId);
|
||||||
bool fmIsInterpFunc(int32_t funcId);
|
bool fmIsInterpFunc(int32_t funcId);
|
||||||
bool fmIsLastRowFunc(int32_t funcId);
|
bool fmIsLastRowFunc(int32_t funcId);
|
||||||
bool fmIsSystemInfoFunc(int32_t funcId);
|
bool fmIsSystemInfoFunc(int32_t funcId);
|
||||||
bool fmIsImplicitTsFunc(int32_t funcId);
|
bool fmIsImplicitTsFunc(int32_t funcId);
|
||||||
bool fmIsClientPseudoColumnFunc(int32_t funcId);
|
bool fmIsClientPseudoColumnFunc(int32_t funcId);
|
||||||
|
bool fmIsMultiRowsFunc(int32_t funcId);
|
||||||
|
|
||||||
int32_t fmGetDistMethod(const SFunctionNode* pFunc, SFunctionNode** pPartialFunc, SFunctionNode** pMergeFunc);
|
int32_t fmGetDistMethod(const SFunctionNode* pFunc, SFunctionNode** pPartialFunc, SFunctionNode** pMergeFunc);
|
||||||
|
|
||||||
|
|
|
@ -51,7 +51,8 @@ extern "C" {
|
||||||
typedef struct SDatabaseOptions {
|
typedef struct SDatabaseOptions {
|
||||||
ENodeType type;
|
ENodeType type;
|
||||||
int32_t buffer;
|
int32_t buffer;
|
||||||
int8_t cacheLast;
|
char cacheModelStr[TSDB_CACHE_MODEL_STR_LEN];
|
||||||
|
int8_t cacheModel;
|
||||||
int32_t cacheLastSize;
|
int32_t cacheLastSize;
|
||||||
int8_t compressionLevel;
|
int8_t compressionLevel;
|
||||||
int32_t daysPerFile;
|
int32_t daysPerFile;
|
||||||
|
@ -66,6 +67,7 @@ typedef struct SDatabaseOptions {
|
||||||
char precisionStr[3];
|
char precisionStr[3];
|
||||||
int8_t precision;
|
int8_t precision;
|
||||||
int8_t replica;
|
int8_t replica;
|
||||||
|
char strictStr[TSDB_DB_STRICT_STR_LEN];
|
||||||
int8_t strict;
|
int8_t strict;
|
||||||
int8_t walLevel;
|
int8_t walLevel;
|
||||||
int32_t numOfVgroups;
|
int32_t numOfVgroups;
|
||||||
|
|
|
@ -258,6 +258,7 @@ typedef struct SSelectStmt {
|
||||||
bool hasAggFuncs;
|
bool hasAggFuncs;
|
||||||
bool hasRepeatScanFuncs;
|
bool hasRepeatScanFuncs;
|
||||||
bool hasIndefiniteRowsFunc;
|
bool hasIndefiniteRowsFunc;
|
||||||
|
bool hasMultiRowsFunc;
|
||||||
bool hasSelectFunc;
|
bool hasSelectFunc;
|
||||||
bool hasSelectValFunc;
|
bool hasSelectValFunc;
|
||||||
bool hasOtherVectorFunc;
|
bool hasOtherVectorFunc;
|
||||||
|
|
|
@ -104,6 +104,16 @@ int32_t maxScalarFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *
|
||||||
int32_t avgScalarFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
int32_t avgScalarFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
||||||
int32_t stddevScalarFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
int32_t stddevScalarFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
||||||
int32_t leastSQRScalarFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
int32_t leastSQRScalarFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
||||||
|
int32_t percentileScalarFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
||||||
|
int32_t apercentileScalarFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
||||||
|
int32_t spreadScalarFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
||||||
|
int32_t derivativeScalarFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
||||||
|
int32_t irateScalarFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
||||||
|
int32_t twaScalarFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
||||||
|
int32_t mavgScalarFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
||||||
|
int32_t hllScalarFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
||||||
|
int32_t csumScalarFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
||||||
|
int32_t diffScalarFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,9 +31,18 @@ extern "C" {
|
||||||
|
|
||||||
typedef struct SStreamTask SStreamTask;
|
typedef struct SStreamTask SStreamTask;
|
||||||
|
|
||||||
|
enum {
|
||||||
|
STREAM_STATUS__NORMAL = 0,
|
||||||
|
STREAM_STATUS__RECOVER,
|
||||||
|
};
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
TASK_STATUS__NORMAL = 0,
|
TASK_STATUS__NORMAL = 0,
|
||||||
TASK_STATUS__DROPPING,
|
TASK_STATUS__DROPPING,
|
||||||
|
TASK_STATUS__FAIL,
|
||||||
|
TASK_STATUS__STOP,
|
||||||
|
TASK_STATUS__PREPARE_RECOVER,
|
||||||
|
TASK_STATUS__RECOVERING,
|
||||||
};
|
};
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
|
@ -72,6 +81,7 @@ typedef struct {
|
||||||
int8_t type;
|
int8_t type;
|
||||||
|
|
||||||
int32_t srcVgId;
|
int32_t srcVgId;
|
||||||
|
int32_t childId;
|
||||||
int64_t sourceVer;
|
int64_t sourceVer;
|
||||||
|
|
||||||
SArray* blocks; // SArray<SSDataBlock*>
|
SArray* blocks; // SArray<SSDataBlock*>
|
||||||
|
@ -222,6 +232,8 @@ typedef struct {
|
||||||
int32_t nodeId;
|
int32_t nodeId;
|
||||||
int32_t childId;
|
int32_t childId;
|
||||||
int32_t taskId;
|
int32_t taskId;
|
||||||
|
int64_t checkpointVer;
|
||||||
|
int64_t processedVer;
|
||||||
SEpSet epSet;
|
SEpSet epSet;
|
||||||
} SStreamChildEpInfo;
|
} SStreamChildEpInfo;
|
||||||
|
|
||||||
|
@ -232,6 +244,7 @@ typedef struct SStreamTask {
|
||||||
int8_t execType;
|
int8_t execType;
|
||||||
int8_t sinkType;
|
int8_t sinkType;
|
||||||
int8_t dispatchType;
|
int8_t dispatchType;
|
||||||
|
int8_t isStreamDistributed;
|
||||||
int16_t dispatchMsgType;
|
int16_t dispatchMsgType;
|
||||||
|
|
||||||
int8_t taskStatus;
|
int8_t taskStatus;
|
||||||
|
@ -242,6 +255,13 @@ typedef struct SStreamTask {
|
||||||
int32_t nodeId;
|
int32_t nodeId;
|
||||||
SEpSet epSet;
|
SEpSet epSet;
|
||||||
|
|
||||||
|
// used for semi or single task,
|
||||||
|
// while final task should have processedVer for each child
|
||||||
|
int64_t recoverSnapVer;
|
||||||
|
int64_t startVer;
|
||||||
|
int64_t checkpointVer;
|
||||||
|
int64_t processedVer;
|
||||||
|
|
||||||
// children info
|
// children info
|
||||||
SArray* childEpInfo; // SArray<SStreamChildEpInfo*>
|
SArray* childEpInfo; // SArray<SStreamChildEpInfo*>
|
||||||
|
|
||||||
|
@ -316,12 +336,12 @@ static FORCE_INLINE int32_t streamTaskInput(SStreamTask* pTask, SStreamQueueItem
|
||||||
} else if (pItem->type == STREAM_INPUT__CHECKPOINT) {
|
} else if (pItem->type == STREAM_INPUT__CHECKPOINT) {
|
||||||
taosWriteQitem(pTask->inputQueue->queue, pItem);
|
taosWriteQitem(pTask->inputQueue->queue, pItem);
|
||||||
// qStreamInput(pTask->exec.executor, pItem);
|
// qStreamInput(pTask->exec.executor, pItem);
|
||||||
} else if (pItem->type == STREAM_INPUT__TRIGGER) {
|
} else if (pItem->type == STREAM_INPUT__GET_RES) {
|
||||||
taosWriteQitem(pTask->inputQueue->queue, pItem);
|
taosWriteQitem(pTask->inputQueue->queue, pItem);
|
||||||
// qStreamInput(pTask->exec.executor, pItem);
|
// qStreamInput(pTask->exec.executor, pItem);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pItem->type != STREAM_INPUT__TRIGGER && pItem->type != STREAM_INPUT__CHECKPOINT && pTask->triggerParam != 0) {
|
if (pItem->type != STREAM_INPUT__GET_RES && pItem->type != STREAM_INPUT__CHECKPOINT && pTask->triggerParam != 0) {
|
||||||
atomic_val_compare_exchange_8(&pTask->triggerStatus, TASK_TRIGGER_STATUS__IN_ACTIVE, TASK_TRIGGER_STATUS__ACTIVE);
|
atomic_val_compare_exchange_8(&pTask->triggerStatus, TASK_TRIGGER_STATUS__IN_ACTIVE, TASK_TRIGGER_STATUS__ACTIVE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -420,6 +440,36 @@ typedef struct {
|
||||||
int8_t inputStatus;
|
int8_t inputStatus;
|
||||||
} SStreamTaskRecoverRsp;
|
} SStreamTaskRecoverRsp;
|
||||||
|
|
||||||
|
int32_t tEncodeStreamTaskRecoverReq(SEncoder* pEncoder, const SStreamTaskRecoverReq* pReq);
|
||||||
|
int32_t tDecodeStreamTaskRecoverReq(SDecoder* pDecoder, SStreamTaskRecoverReq* pReq);
|
||||||
|
|
||||||
|
int32_t tEncodeStreamTaskRecoverRsp(SEncoder* pEncoder, const SStreamTaskRecoverRsp* pRsp);
|
||||||
|
int32_t tDecodeStreamTaskRecoverRsp(SDecoder* pDecoder, SStreamTaskRecoverRsp* pRsp);
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
int64_t streamId;
|
||||||
|
int32_t taskId;
|
||||||
|
} SMStreamTaskRecoverReq;
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
int64_t streamId;
|
||||||
|
int32_t taskId;
|
||||||
|
} SMStreamTaskRecoverRsp;
|
||||||
|
|
||||||
|
int32_t tEncodeSMStreamTaskRecoverReq(SEncoder* pEncoder, const SMStreamTaskRecoverReq* pReq);
|
||||||
|
int32_t tDecodeSMStreamTaskRecoverReq(SDecoder* pDecoder, SMStreamTaskRecoverReq* pReq);
|
||||||
|
|
||||||
|
int32_t tEncodeSMStreamTaskRecoverRsp(SEncoder* pEncoder, const SMStreamTaskRecoverRsp* pRsp);
|
||||||
|
int32_t tDecodeSMStreamTaskRecoverRsp(SDecoder* pDecoder, SMStreamTaskRecoverRsp* pRsp);
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
int64_t streamId;
|
||||||
|
} SPStreamTaskRecoverReq;
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
int8_t reserved;
|
||||||
|
} SPStreamTaskRecoverRsp;
|
||||||
|
|
||||||
int32_t tDecodeStreamDispatchReq(SDecoder* pDecoder, SStreamDispatchReq* pReq);
|
int32_t tDecodeStreamDispatchReq(SDecoder* pDecoder, SStreamDispatchReq* pReq);
|
||||||
int32_t tDecodeStreamRetrieveReq(SDecoder* pDecoder, SStreamRetrieveReq* pReq);
|
int32_t tDecodeStreamRetrieveReq(SDecoder* pDecoder, SStreamRetrieveReq* pReq);
|
||||||
|
|
||||||
|
|
|
@ -135,7 +135,7 @@ void rpcSendRequestWithCtx(void *thandle, const SEpSet *pEpSet, SRpcMsg *pMsg
|
||||||
int32_t rpcGetConnInfo(void *thandle, SRpcConnInfo *pInfo);
|
int32_t rpcGetConnInfo(void *thandle, SRpcConnInfo *pInfo);
|
||||||
void rpcSendRecv(void *shandle, SEpSet *pEpSet, SRpcMsg *pReq, SRpcMsg *pRsp);
|
void rpcSendRecv(void *shandle, SEpSet *pEpSet, SRpcMsg *pReq, SRpcMsg *pRsp);
|
||||||
void rpcSetDefaultAddr(void *thandle, const char *ip, const char *fqdn);
|
void rpcSetDefaultAddr(void *thandle, const char *ip, const char *fqdn);
|
||||||
int64_t rpcAllocHandle();
|
void* rpcAllocHandle();
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
|
|
|
@ -73,6 +73,7 @@ int32_t* taosGetErrno();
|
||||||
#define TSDB_CODE_MSG_DECODE_ERROR TAOS_DEF_ERROR_CODE(0, 0x0031)
|
#define TSDB_CODE_MSG_DECODE_ERROR TAOS_DEF_ERROR_CODE(0, 0x0031)
|
||||||
#define TSDB_CODE_NO_AVAIL_DISK TAOS_DEF_ERROR_CODE(0, 0x0032)
|
#define TSDB_CODE_NO_AVAIL_DISK TAOS_DEF_ERROR_CODE(0, 0x0032)
|
||||||
#define TSDB_CODE_NOT_FOUND TAOS_DEF_ERROR_CODE(0, 0x0033)
|
#define TSDB_CODE_NOT_FOUND TAOS_DEF_ERROR_CODE(0, 0x0033)
|
||||||
|
#define TSDB_CODE_TIME_UNSYNCED TAOS_DEF_ERROR_CODE(0, 0x0034)
|
||||||
|
|
||||||
#define TSDB_CODE_REF_NO_MEMORY TAOS_DEF_ERROR_CODE(0, 0x0040)
|
#define TSDB_CODE_REF_NO_MEMORY TAOS_DEF_ERROR_CODE(0, 0x0040)
|
||||||
#define TSDB_CODE_REF_FULL TAOS_DEF_ERROR_CODE(0, 0x0041)
|
#define TSDB_CODE_REF_FULL TAOS_DEF_ERROR_CODE(0, 0x0041)
|
||||||
|
@ -122,7 +123,7 @@ int32_t* taosGetErrno();
|
||||||
#define TSDB_CODE_TSC_DUP_COL_NAMES TAOS_DEF_ERROR_CODE(0, 0x021D)
|
#define TSDB_CODE_TSC_DUP_COL_NAMES TAOS_DEF_ERROR_CODE(0, 0x021D)
|
||||||
#define TSDB_CODE_TSC_INVALID_TAG_LENGTH TAOS_DEF_ERROR_CODE(0, 0x021E)
|
#define TSDB_CODE_TSC_INVALID_TAG_LENGTH TAOS_DEF_ERROR_CODE(0, 0x021E)
|
||||||
#define TSDB_CODE_TSC_INVALID_COLUMN_LENGTH TAOS_DEF_ERROR_CODE(0, 0x021F)
|
#define TSDB_CODE_TSC_INVALID_COLUMN_LENGTH TAOS_DEF_ERROR_CODE(0, 0x021F)
|
||||||
#define TSDB_CODE_TSC_DUP_TAG_NAMES TAOS_DEF_ERROR_CODE(0, 0x0220)
|
#define TSDB_CODE_TSC_DUP_NAMES TAOS_DEF_ERROR_CODE(0, 0x0220)
|
||||||
#define TSDB_CODE_TSC_INVALID_JSON TAOS_DEF_ERROR_CODE(0, 0x0221)
|
#define TSDB_CODE_TSC_INVALID_JSON TAOS_DEF_ERROR_CODE(0, 0x0221)
|
||||||
#define TSDB_CODE_TSC_INVALID_JSON_TYPE TAOS_DEF_ERROR_CODE(0, 0x0222)
|
#define TSDB_CODE_TSC_INVALID_JSON_TYPE TAOS_DEF_ERROR_CODE(0, 0x0222)
|
||||||
#define TSDB_CODE_TSC_VALUE_OUT_OF_RANGE TAOS_DEF_ERROR_CODE(0, 0x0223)
|
#define TSDB_CODE_TSC_VALUE_OUT_OF_RANGE TAOS_DEF_ERROR_CODE(0, 0x0223)
|
||||||
|
@ -331,7 +332,6 @@ int32_t* taosGetErrno();
|
||||||
#define TSDB_CODE_VND_INVALID_TABLE_ACTION TAOS_DEF_ERROR_CODE(0, 0x0519)
|
#define TSDB_CODE_VND_INVALID_TABLE_ACTION TAOS_DEF_ERROR_CODE(0, 0x0519)
|
||||||
#define TSDB_CODE_VND_COL_ALREADY_EXISTS TAOS_DEF_ERROR_CODE(0, 0x051a)
|
#define TSDB_CODE_VND_COL_ALREADY_EXISTS TAOS_DEF_ERROR_CODE(0, 0x051a)
|
||||||
#define TSDB_CODE_VND_TABLE_COL_NOT_EXISTS TAOS_DEF_ERROR_CODE(0, 0x051b)
|
#define TSDB_CODE_VND_TABLE_COL_NOT_EXISTS TAOS_DEF_ERROR_CODE(0, 0x051b)
|
||||||
#define TSDB_CODE_VND_READ_END TAOS_DEF_ERROR_CODE(0, 0x051c)
|
|
||||||
|
|
||||||
// tsdb
|
// tsdb
|
||||||
#define TSDB_CODE_TDB_INVALID_TABLE_ID TAOS_DEF_ERROR_CODE(0, 0x0600)
|
#define TSDB_CODE_TDB_INVALID_TABLE_ID TAOS_DEF_ERROR_CODE(0, 0x0600)
|
||||||
|
@ -615,6 +615,7 @@ int32_t* taosGetErrno();
|
||||||
#define TSDB_CODE_SML_INVALID_PRECISION_TYPE TAOS_DEF_ERROR_CODE(0, 0x3001)
|
#define TSDB_CODE_SML_INVALID_PRECISION_TYPE TAOS_DEF_ERROR_CODE(0, 0x3001)
|
||||||
#define TSDB_CODE_SML_INVALID_DATA TAOS_DEF_ERROR_CODE(0, 0x3002)
|
#define TSDB_CODE_SML_INVALID_DATA TAOS_DEF_ERROR_CODE(0, 0x3002)
|
||||||
#define TSDB_CODE_SML_INVALID_DB_CONF TAOS_DEF_ERROR_CODE(0, 0x3003)
|
#define TSDB_CODE_SML_INVALID_DB_CONF TAOS_DEF_ERROR_CODE(0, 0x3003)
|
||||||
|
#define TSDB_CODE_SML_NOT_SAME_TYPE TAOS_DEF_ERROR_CODE(0, 0x3004)
|
||||||
|
|
||||||
//tsma
|
//tsma
|
||||||
#define TSDB_CODE_TSMA_INIT_FAILED TAOS_DEF_ERROR_CODE(0, 0x3100)
|
#define TSDB_CODE_TSMA_INIT_FAILED TAOS_DEF_ERROR_CODE(0, 0x3100)
|
||||||
|
|
|
@ -53,7 +53,7 @@ extern const int32_t TYPE_BYTES[16];
|
||||||
#define TSDB_DATA_BIGINT_NULL 0x8000000000000000LL
|
#define TSDB_DATA_BIGINT_NULL 0x8000000000000000LL
|
||||||
#define TSDB_DATA_TIMESTAMP_NULL TSDB_DATA_BIGINT_NULL
|
#define TSDB_DATA_TIMESTAMP_NULL TSDB_DATA_BIGINT_NULL
|
||||||
|
|
||||||
#define TSDB_DATA_FLOAT_NULL 0x7FF00000 // it is an NAN
|
#define TSDB_DATA_FLOAT_NULL 0x7FF00000 // it is an NAN
|
||||||
#define TSDB_DATA_DOUBLE_NULL 0x7FFFFF0000000000LL // an NAN
|
#define TSDB_DATA_DOUBLE_NULL 0x7FFFFF0000000000LL // an NAN
|
||||||
#define TSDB_DATA_NCHAR_NULL 0xFFFFFFFF
|
#define TSDB_DATA_NCHAR_NULL 0xFFFFFFFF
|
||||||
#define TSDB_DATA_BINARY_NULL 0xFF
|
#define TSDB_DATA_BINARY_NULL 0xFF
|
||||||
|
@ -107,9 +107,10 @@ extern const int32_t TYPE_BYTES[16];
|
||||||
|
|
||||||
#define TSDB_INS_USER_STABLES_DBNAME_COLID 2
|
#define TSDB_INS_USER_STABLES_DBNAME_COLID 2
|
||||||
|
|
||||||
#define TSDB_TICK_PER_SECOND(precision) \
|
#define TSDB_TICK_PER_SECOND(precision) \
|
||||||
((int64_t)((precision) == TSDB_TIME_PRECISION_MILLI ? 1000LL \
|
((int64_t)((precision) == TSDB_TIME_PRECISION_MILLI \
|
||||||
: ((precision) == TSDB_TIME_PRECISION_MICRO ? 1000000LL : 1000000000LL)))
|
? 1000LL \
|
||||||
|
: ((precision) == TSDB_TIME_PRECISION_MICRO ? 1000000LL : 1000000000LL)))
|
||||||
|
|
||||||
#define T_MEMBER_SIZE(type, member) sizeof(((type *)0)->member)
|
#define T_MEMBER_SIZE(type, member) sizeof(((type *)0)->member)
|
||||||
#define T_APPEND_MEMBER(dst, ptr, type, member) \
|
#define T_APPEND_MEMBER(dst, ptr, type, member) \
|
||||||
|
@ -328,15 +329,25 @@ typedef enum ELogicConditionType {
|
||||||
#define TSDB_MIN_DB_REPLICA 1
|
#define TSDB_MIN_DB_REPLICA 1
|
||||||
#define TSDB_MAX_DB_REPLICA 3
|
#define TSDB_MAX_DB_REPLICA 3
|
||||||
#define TSDB_DEFAULT_DB_REPLICA 1
|
#define TSDB_DEFAULT_DB_REPLICA 1
|
||||||
|
#define TSDB_DB_STRICT_STR_LEN sizeof(TSDB_DB_STRICT_OFF_STR)
|
||||||
|
#define TSDB_DB_STRICT_OFF_STR "off"
|
||||||
|
#define TSDB_DB_STRICT_ON_STR "on"
|
||||||
#define TSDB_DB_STRICT_OFF 0
|
#define TSDB_DB_STRICT_OFF 0
|
||||||
#define TSDB_DB_STRICT_ON 1
|
#define TSDB_DB_STRICT_ON 1
|
||||||
#define TSDB_DEFAULT_DB_STRICT 0
|
#define TSDB_DEFAULT_DB_STRICT TSDB_DB_STRICT_OFF
|
||||||
#define TSDB_MIN_DB_CACHE_LAST 0
|
#define TSDB_CACHE_MODEL_STR_LEN sizeof(TSDB_CACHE_MODEL_LAST_VALUE_STR)
|
||||||
#define TSDB_MAX_DB_CACHE_LAST 3
|
#define TSDB_CACHE_MODEL_NONE_STR "none"
|
||||||
#define TSDB_DEFAULT_CACHE_LAST 0
|
#define TSDB_CACHE_MODEL_LAST_ROW_STR "last_row"
|
||||||
#define TSDB_MIN_DB_CACHE_LAST_SIZE 1 // MB
|
#define TSDB_CACHE_MODEL_LAST_VALUE_STR "last_value"
|
||||||
#define TSDB_MAX_DB_CACHE_LAST_SIZE 65536
|
#define TSDB_CACHE_MODEL_BOTH_STR "both"
|
||||||
#define TSDB_DEFAULT_CACHE_LAST_SIZE 1
|
#define TSDB_CACHE_MODEL_NONE 0
|
||||||
|
#define TSDB_CACHE_MODEL_LAST_ROW 1
|
||||||
|
#define TSDB_CACHE_MODEL_LAST_VALUE 2
|
||||||
|
#define TSDB_CACHE_MODEL_BOTH 3
|
||||||
|
#define TSDB_DEFAULT_CACHE_MODEL TSDB_CACHE_MODEL_NONE
|
||||||
|
#define TSDB_MIN_DB_CACHE_SIZE 1 // MB
|
||||||
|
#define TSDB_MAX_DB_CACHE_SIZE 65536
|
||||||
|
#define TSDB_DEFAULT_CACHE_SIZE 1
|
||||||
#define TSDB_DB_STREAM_MODE_OFF 0
|
#define TSDB_DB_STREAM_MODE_OFF 0
|
||||||
#define TSDB_DB_STREAM_MODE_ON 1
|
#define TSDB_DB_STREAM_MODE_ON 1
|
||||||
#define TSDB_DEFAULT_DB_STREAM_MODE 0
|
#define TSDB_DEFAULT_DB_STREAM_MODE 0
|
||||||
|
|
|
@ -286,7 +286,7 @@ static FORCE_INLINE SReqResultInfo* tscGetCurResInfo(TAOS_RES* res) {
|
||||||
extern SAppInfo appInfo;
|
extern SAppInfo appInfo;
|
||||||
extern int32_t clientReqRefPool;
|
extern int32_t clientReqRefPool;
|
||||||
extern int32_t clientConnRefPool;
|
extern int32_t clientConnRefPool;
|
||||||
extern void* tscQhandle;
|
extern int32_t timestampDeltaLimit;
|
||||||
|
|
||||||
__async_send_cb_fn_t getMsgRspHandle(int32_t msgType);
|
__async_send_cb_fn_t getMsgRspHandle(int32_t msgType);
|
||||||
|
|
||||||
|
|
|
@ -161,7 +161,7 @@ JNIEXPORT jstring JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_tmqGetTableNam
|
||||||
* Signature: (JJLcom/taosdata/jdbc/TSDBResultSetBlockData;ILjava/util/List;)I
|
* Signature: (JJLcom/taosdata/jdbc/TSDBResultSetBlockData;ILjava/util/List;)I
|
||||||
*/
|
*/
|
||||||
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_fetchRawBlockImp(JNIEnv *, jobject, jlong, jlong,
|
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_fetchRawBlockImp(JNIEnv *, jobject, jlong, jlong,
|
||||||
jobject, jint, jobject);
|
jobject, jobject);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
|
|
|
@ -278,7 +278,7 @@ JNIEXPORT jstring JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_tmqGetTableNam
|
||||||
}
|
}
|
||||||
|
|
||||||
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_fetchRawBlockImp(JNIEnv *env, jobject jobj, jlong con,
|
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_fetchRawBlockImp(JNIEnv *env, jobject jobj, jlong con,
|
||||||
jlong res, jobject rowobj, jint flag,
|
jlong res, jobject rowobj,
|
||||||
jobject arrayListObj) {
|
jobject arrayListObj) {
|
||||||
TAOS *tscon = (TAOS *)con;
|
TAOS *tscon = (TAOS *)con;
|
||||||
int32_t code = check_for_params(jobj, con, res);
|
int32_t code = check_for_params(jobj, con, res);
|
||||||
|
@ -309,16 +309,14 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_fetchRawBlockImp(
|
||||||
|
|
||||||
TAOS_FIELD *fields = taos_fetch_fields(tres);
|
TAOS_FIELD *fields = taos_fetch_fields(tres);
|
||||||
jniDebug("jobj:%p, conn:%p, resultset:%p, fields size is %d", jobj, tscon, tres, numOfFields);
|
jniDebug("jobj:%p, conn:%p, resultset:%p, fields size is %d", jobj, tscon, tres, numOfFields);
|
||||||
if (flag) {
|
for (int i = 0; i < numOfFields; ++i) {
|
||||||
for (int i = 0; i < numOfFields; ++i) {
|
jobject metadataObj = (*env)->NewObject(env, g_metadataClass, g_metadataConstructFp);
|
||||||
jobject metadataObj = (*env)->NewObject(env, g_metadataClass, g_metadataConstructFp);
|
(*env)->SetIntField(env, metadataObj, g_metadataColtypeField, fields[i].type);
|
||||||
(*env)->SetIntField(env, metadataObj, g_metadataColtypeField, fields[i].type);
|
(*env)->SetIntField(env, metadataObj, g_metadataColsizeField, fields[i].bytes);
|
||||||
(*env)->SetIntField(env, metadataObj, g_metadataColsizeField, fields[i].bytes);
|
(*env)->SetIntField(env, metadataObj, g_metadataColindexField, i);
|
||||||
(*env)->SetIntField(env, metadataObj, g_metadataColindexField, i);
|
jstring metadataObjColname = (*env)->NewStringUTF(env, fields[i].name);
|
||||||
jstring metadataObjColname = (*env)->NewStringUTF(env, fields[i].name);
|
(*env)->SetObjectField(env, metadataObj, g_metadataColnameField, metadataObjColname);
|
||||||
(*env)->SetObjectField(env, metadataObj, g_metadataColnameField, metadataObjColname);
|
(*env)->CallBooleanMethod(env, arrayListObj, g_arrayListAddFp, metadataObj);
|
||||||
(*env)->CallBooleanMethod(env, arrayListObj, g_arrayListAddFp, metadataObj);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
(*env)->CallVoidMethod(env, rowobj, g_blockdataSetNumOfRowsFp, (jint)numOfRows);
|
(*env)->CallVoidMethod(env, rowobj, g_blockdataSetNumOfRowsFp, (jint)numOfRows);
|
||||||
|
|
|
@ -35,6 +35,8 @@ SAppInfo appInfo;
|
||||||
int32_t clientReqRefPool = -1;
|
int32_t clientReqRefPool = -1;
|
||||||
int32_t clientConnRefPool = -1;
|
int32_t clientConnRefPool = -1;
|
||||||
|
|
||||||
|
int32_t timestampDeltaLimit = 900; // s
|
||||||
|
|
||||||
static TdThreadOnce tscinit = PTHREAD_ONCE_INIT;
|
static TdThreadOnce tscinit = PTHREAD_ONCE_INIT;
|
||||||
volatile int32_t tscInitRes = 0;
|
volatile int32_t tscInitRes = 0;
|
||||||
|
|
||||||
|
@ -181,7 +183,7 @@ void destroyTscObj(void *pObj) {
|
||||||
|
|
||||||
destroyAllRequests(pTscObj->pRequests);
|
destroyAllRequests(pTscObj->pRequests);
|
||||||
taosHashCleanup(pTscObj->pRequests);
|
taosHashCleanup(pTscObj->pRequests);
|
||||||
|
|
||||||
schedulerStopQueryHb(pTscObj->pAppInfo->pTransporter);
|
schedulerStopQueryHb(pTscObj->pAppInfo->pTransporter);
|
||||||
tscDebug("connObj 0x%" PRIx64 " p:%p destroyed, remain inst totalConn:%" PRId64, pTscObj->id, pTscObj,
|
tscDebug("connObj 0x%" PRIx64 " p:%p destroyed, remain inst totalConn:%" PRId64, pTscObj->id, pTscObj,
|
||||||
pTscObj->pAppInfo->numOfConns);
|
pTscObj->pAppInfo->numOfConns);
|
||||||
|
|
|
@ -70,7 +70,7 @@ static int32_t hbProcessDBInfoRsp(void *value, int32_t valueLen, struct SCatalog
|
||||||
if (NULL == vgInfo) {
|
if (NULL == vgInfo) {
|
||||||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||||
}
|
}
|
||||||
|
|
||||||
vgInfo->vgVersion = rsp->vgVersion;
|
vgInfo->vgVersion = rsp->vgVersion;
|
||||||
vgInfo->hashMethod = rsp->hashMethod;
|
vgInfo->hashMethod = rsp->hashMethod;
|
||||||
vgInfo->vgHash = taosHashInit(rsp->vgNum, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_ENTRY_LOCK);
|
vgInfo->vgHash = taosHashInit(rsp->vgNum, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_ENTRY_LOCK);
|
||||||
|
@ -156,21 +156,22 @@ static int32_t hbQueryHbRspHandle(SAppHbMgr *pAppHbMgr, SClientHbRsp *pRsp) {
|
||||||
STscObj *pTscObj = (STscObj *)acquireTscObj(pRsp->connKey.tscRid);
|
STscObj *pTscObj = (STscObj *)acquireTscObj(pRsp->connKey.tscRid);
|
||||||
if (NULL == pTscObj) {
|
if (NULL == pTscObj) {
|
||||||
tscDebug("tscObj rid %" PRIx64 " not exist", pRsp->connKey.tscRid);
|
tscDebug("tscObj rid %" PRIx64 " not exist", pRsp->connKey.tscRid);
|
||||||
} else {
|
} else {
|
||||||
if (pRsp->query->totalDnodes > 1 && !isEpsetEqual(&pTscObj->pAppInfo->mgmtEp.epSet, &pRsp->query->epSet)) {
|
if (pRsp->query->totalDnodes > 1 && !isEpsetEqual(&pTscObj->pAppInfo->mgmtEp.epSet, &pRsp->query->epSet)) {
|
||||||
SEpSet* pOrig = &pTscObj->pAppInfo->mgmtEp.epSet;
|
SEpSet *pOrig = &pTscObj->pAppInfo->mgmtEp.epSet;
|
||||||
SEp* pOrigEp = &pOrig->eps[pOrig->inUse];
|
SEp *pOrigEp = &pOrig->eps[pOrig->inUse];
|
||||||
SEp* pNewEp = &pRsp->query->epSet.eps[pRsp->query->epSet.inUse];
|
SEp *pNewEp = &pRsp->query->epSet.eps[pRsp->query->epSet.inUse];
|
||||||
tscDebug("mnode epset updated from %d/%d=>%s:%d to %d/%d=>%s:%d in hb",
|
tscDebug("mnode epset updated from %d/%d=>%s:%d to %d/%d=>%s:%d in hb", pOrig->inUse, pOrig->numOfEps,
|
||||||
pOrig->inUse, pOrig->numOfEps, pOrigEp->fqdn, pOrigEp->port,
|
pOrigEp->fqdn, pOrigEp->port, pRsp->query->epSet.inUse, pRsp->query->epSet.numOfEps, pNewEp->fqdn,
|
||||||
pRsp->query->epSet.inUse, pRsp->query->epSet.numOfEps, pNewEp->fqdn, pNewEp->port);
|
pNewEp->port);
|
||||||
|
|
||||||
updateEpSet_s(&pTscObj->pAppInfo->mgmtEp, &pRsp->query->epSet);
|
updateEpSet_s(&pTscObj->pAppInfo->mgmtEp, &pRsp->query->epSet);
|
||||||
}
|
}
|
||||||
|
|
||||||
pTscObj->pAppInfo->totalDnodes = pRsp->query->totalDnodes;
|
pTscObj->pAppInfo->totalDnodes = pRsp->query->totalDnodes;
|
||||||
pTscObj->pAppInfo->onlineDnodes = pRsp->query->onlineDnodes;
|
pTscObj->pAppInfo->onlineDnodes = pRsp->query->onlineDnodes;
|
||||||
pTscObj->connId = pRsp->query->connId;
|
pTscObj->connId = pRsp->query->connId;
|
||||||
|
tscTrace("conn %p hb rsp, dnodes %d/%d", pTscObj->connId, pTscObj->pAppInfo->onlineDnodes, pTscObj->pAppInfo->totalDnodes);
|
||||||
|
|
||||||
if (pRsp->query->killRid) {
|
if (pRsp->query->killRid) {
|
||||||
tscDebug("request rid %" PRIx64 " need to be killed now", pRsp->query->killRid);
|
tscDebug("request rid %" PRIx64 " need to be killed now", pRsp->query->killRid);
|
||||||
|
@ -263,13 +264,20 @@ static int32_t hbQueryHbRspHandle(SAppHbMgr *pAppHbMgr, SClientHbRsp *pRsp) {
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t hbAsyncCallBack(void *param, SDataBuf *pMsg, int32_t code) {
|
static int32_t hbAsyncCallBack(void *param, SDataBuf *pMsg, int32_t code) {
|
||||||
static int32_t emptyRspNum = 0;
|
static int32_t emptyRspNum = 0;
|
||||||
char *key = (char *)param;
|
char *key = (char *)param;
|
||||||
SClientHbBatchRsp pRsp = {0};
|
SClientHbBatchRsp pRsp = {0};
|
||||||
if (TSDB_CODE_SUCCESS == code) {
|
if (TSDB_CODE_SUCCESS == code) {
|
||||||
tDeserializeSClientHbBatchRsp(pMsg->pData, pMsg->len, &pRsp);
|
tDeserializeSClientHbBatchRsp(pMsg->pData, pMsg->len, &pRsp);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int32_t now = taosGetTimestampSec();
|
||||||
|
int32_t delta = abs(now - pRsp.svrTimestamp);
|
||||||
|
if (delta > timestampDeltaLimit) {
|
||||||
|
code = TSDB_CODE_TIME_UNSYNCED;
|
||||||
|
tscError("time diff: %ds is too big", delta);
|
||||||
|
}
|
||||||
|
|
||||||
int32_t rspNum = taosArrayGetSize(pRsp.rsps);
|
int32_t rspNum = taosArrayGetSize(pRsp.rsps);
|
||||||
|
|
||||||
taosThreadMutexLock(&appInfo.mutex);
|
taosThreadMutexLock(&appInfo.mutex);
|
||||||
|
@ -286,7 +294,8 @@ static int32_t hbAsyncCallBack(void *param, SDataBuf *pMsg, int32_t code) {
|
||||||
taosMemoryFreeClear(param);
|
taosMemoryFreeClear(param);
|
||||||
|
|
||||||
if (code != 0) {
|
if (code != 0) {
|
||||||
(*pInst)->onlineDnodes = 0;
|
(*pInst)->onlineDnodes = ((*pInst)->totalDnodes ? 0 : -1);
|
||||||
|
tscDebug("hb rsp error %s, update server status %d/%d", tstrerror(code), (*pInst)->onlineDnodes, (*pInst)->totalDnodes);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (rspNum) {
|
if (rspNum) {
|
||||||
|
@ -373,7 +382,7 @@ int32_t hbGetQueryBasicInfo(SClientHbKey *connKey, SClientHbReq *req) {
|
||||||
releaseTscObj(connKey->tscRid);
|
releaseTscObj(connKey->tscRid);
|
||||||
return TSDB_CODE_QRY_OUT_OF_MEMORY;
|
return TSDB_CODE_QRY_OUT_OF_MEMORY;
|
||||||
}
|
}
|
||||||
|
|
||||||
hbBasic->connId = pTscObj->connId;
|
hbBasic->connId = pTscObj->connId;
|
||||||
|
|
||||||
int32_t numOfQueries = pTscObj->pRequests ? taosHashGetSize(pTscObj->pRequests) : 0;
|
int32_t numOfQueries = pTscObj->pRequests ? taosHashGetSize(pTscObj->pRequests) : 0;
|
||||||
|
@ -392,7 +401,6 @@ int32_t hbGetQueryBasicInfo(SClientHbKey *connKey, SClientHbReq *req) {
|
||||||
return TSDB_CODE_QRY_OUT_OF_MEMORY;
|
return TSDB_CODE_QRY_OUT_OF_MEMORY;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
int32_t code = hbBuildQueryDesc(hbBasic, pTscObj);
|
int32_t code = hbBuildQueryDesc(hbBasic, pTscObj);
|
||||||
if (code) {
|
if (code) {
|
||||||
releaseTscObj(connKey->tscRid);
|
releaseTscObj(connKey->tscRid);
|
||||||
|
@ -436,13 +444,12 @@ int32_t hbGetExpiredUserInfo(SClientHbKey *connKey, struct SCatalog *pCatalog, S
|
||||||
if (NULL == req->info) {
|
if (NULL == req->info) {
|
||||||
req->info = taosHashInit(64, hbKeyHashFunc, 1, HASH_ENTRY_LOCK);
|
req->info = taosHashInit(64, hbKeyHashFunc, 1, HASH_ENTRY_LOCK);
|
||||||
}
|
}
|
||||||
|
|
||||||
taosHashPut(req->info, &kv.key, sizeof(kv.key), &kv, sizeof(kv));
|
taosHashPut(req->info, &kv.key, sizeof(kv.key), &kv, sizeof(kv));
|
||||||
|
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
int32_t hbGetExpiredDBInfo(SClientHbKey *connKey, struct SCatalog *pCatalog, SClientHbReq *req) {
|
int32_t hbGetExpiredDBInfo(SClientHbKey *connKey, struct SCatalog *pCatalog, SClientHbReq *req) {
|
||||||
SDbVgVersion *dbs = NULL;
|
SDbVgVersion *dbs = NULL;
|
||||||
uint32_t dbNum = 0;
|
uint32_t dbNum = 0;
|
||||||
|
@ -483,8 +490,8 @@ int32_t hbGetExpiredDBInfo(SClientHbKey *connKey, struct SCatalog *pCatalog, SCl
|
||||||
|
|
||||||
int32_t hbGetExpiredStbInfo(SClientHbKey *connKey, struct SCatalog *pCatalog, SClientHbReq *req) {
|
int32_t hbGetExpiredStbInfo(SClientHbKey *connKey, struct SCatalog *pCatalog, SClientHbReq *req) {
|
||||||
SSTableVersion *stbs = NULL;
|
SSTableVersion *stbs = NULL;
|
||||||
uint32_t stbNum = 0;
|
uint32_t stbNum = 0;
|
||||||
int32_t code = 0;
|
int32_t code = 0;
|
||||||
|
|
||||||
code = catalogGetExpiredSTables(pCatalog, &stbs, &stbNum);
|
code = catalogGetExpiredSTables(pCatalog, &stbs, &stbNum);
|
||||||
if (TSDB_CODE_SUCCESS != code) {
|
if (TSDB_CODE_SUCCESS != code) {
|
||||||
|
@ -521,20 +528,19 @@ int32_t hbGetExpiredStbInfo(SClientHbKey *connKey, struct SCatalog *pCatalog, SC
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t hbGetAppInfo(int64_t clusterId, SClientHbReq *req) {
|
int32_t hbGetAppInfo(int64_t clusterId, SClientHbReq *req) {
|
||||||
SAppHbReq* pApp = taosHashGet(clientHbMgr.appSummary, &clusterId, sizeof(clusterId));
|
SAppHbReq *pApp = taosHashGet(clientHbMgr.appSummary, &clusterId, sizeof(clusterId));
|
||||||
if (NULL != pApp) {
|
if (NULL != pApp) {
|
||||||
memcpy(&req->app, pApp, sizeof(*pApp));
|
memcpy(&req->app, pApp, sizeof(*pApp));
|
||||||
} else {
|
} else {
|
||||||
memset(&req->app.summary, 0, sizeof(req->app.summary));
|
memset(&req->app.summary, 0, sizeof(req->app.summary));
|
||||||
req->app.pid = taosGetPId();
|
req->app.pid = taosGetPId();
|
||||||
req->app.appId = clientHbMgr.appId;
|
req->app.appId = clientHbMgr.appId;
|
||||||
taosGetAppName(req->app.name, NULL);
|
taosGetAppName(req->app.name, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
int32_t hbQueryHbReqHandle(SClientHbKey *connKey, void *param, SClientHbReq *req) {
|
int32_t hbQueryHbReqHandle(SClientHbKey *connKey, void *param, SClientHbReq *req) {
|
||||||
int64_t *clusterId = (int64_t *)param;
|
int64_t *clusterId = (int64_t *)param;
|
||||||
struct SCatalog *pCatalog = NULL;
|
struct SCatalog *pCatalog = NULL;
|
||||||
|
@ -567,7 +573,8 @@ int32_t hbQueryHbReqHandle(SClientHbKey *connKey, void *param, SClientHbReq *req
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
void hbMgrInitMqHbHandle() {
|
static FORCE_INLINE void hbMgrInitHandle() {
|
||||||
|
// init all handle
|
||||||
clientHbMgr.reqHandle[CONN_TYPE__QUERY] = hbQueryHbReqHandle;
|
clientHbMgr.reqHandle[CONN_TYPE__QUERY] = hbQueryHbReqHandle;
|
||||||
clientHbMgr.reqHandle[CONN_TYPE__TMQ] = hbMqHbReqHandle;
|
clientHbMgr.reqHandle[CONN_TYPE__TMQ] = hbMqHbReqHandle;
|
||||||
|
|
||||||
|
@ -575,11 +582,6 @@ void hbMgrInitMqHbHandle() {
|
||||||
clientHbMgr.rspHandle[CONN_TYPE__TMQ] = hbMqHbRspHandle;
|
clientHbMgr.rspHandle[CONN_TYPE__TMQ] = hbMqHbRspHandle;
|
||||||
}
|
}
|
||||||
|
|
||||||
static FORCE_INLINE void hbMgrInitHandle() {
|
|
||||||
// init all handle
|
|
||||||
hbMgrInitMqHbHandle();
|
|
||||||
}
|
|
||||||
|
|
||||||
SClientHbBatchReq *hbGatherAllInfo(SAppHbMgr *pAppHbMgr) {
|
SClientHbBatchReq *hbGatherAllInfo(SAppHbMgr *pAppHbMgr) {
|
||||||
SClientHbBatchReq *pBatchReq = taosMemoryCalloc(1, sizeof(SClientHbBatchReq));
|
SClientHbBatchReq *pBatchReq = taosMemoryCalloc(1, sizeof(SClientHbBatchReq));
|
||||||
if (pBatchReq == NULL) {
|
if (pBatchReq == NULL) {
|
||||||
|
@ -602,7 +604,7 @@ SClientHbBatchReq *hbGatherAllInfo(SAppHbMgr *pAppHbMgr) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
//hbClearClientHbReq(pOneReq);
|
// hbClearClientHbReq(pOneReq);
|
||||||
|
|
||||||
pIter = taosHashIterate(pAppHbMgr->activeInfo, pIter);
|
pIter = taosHashIterate(pAppHbMgr->activeInfo, pIter);
|
||||||
}
|
}
|
||||||
|
@ -615,11 +617,9 @@ SClientHbBatchReq *hbGatherAllInfo(SAppHbMgr *pAppHbMgr) {
|
||||||
return pBatchReq;
|
return pBatchReq;
|
||||||
}
|
}
|
||||||
|
|
||||||
void hbThreadFuncUnexpectedStopped(void) {
|
void hbThreadFuncUnexpectedStopped(void) { atomic_store_8(&clientHbMgr.threadStop, 2); }
|
||||||
atomic_store_8(&clientHbMgr.threadStop, 2);
|
|
||||||
}
|
|
||||||
|
|
||||||
void hbMergeSummary(SAppClusterSummary* dst, SAppClusterSummary* src) {
|
void hbMergeSummary(SAppClusterSummary *dst, SAppClusterSummary *src) {
|
||||||
dst->numOfInsertsReq += src->numOfInsertsReq;
|
dst->numOfInsertsReq += src->numOfInsertsReq;
|
||||||
dst->numOfInsertRows += src->numOfInsertRows;
|
dst->numOfInsertRows += src->numOfInsertRows;
|
||||||
dst->insertElapsedTime += src->insertElapsedTime;
|
dst->insertElapsedTime += src->insertElapsedTime;
|
||||||
|
@ -633,7 +633,7 @@ void hbMergeSummary(SAppClusterSummary* dst, SAppClusterSummary* src) {
|
||||||
|
|
||||||
int32_t hbGatherAppInfo(void) {
|
int32_t hbGatherAppInfo(void) {
|
||||||
SAppHbReq req = {0};
|
SAppHbReq req = {0};
|
||||||
int sz = taosArrayGetSize(clientHbMgr.appHbMgrs);
|
int sz = taosArrayGetSize(clientHbMgr.appHbMgrs);
|
||||||
if (sz > 0) {
|
if (sz > 0) {
|
||||||
req.pid = taosGetPId();
|
req.pid = taosGetPId();
|
||||||
req.appId = clientHbMgr.appId;
|
req.appId = clientHbMgr.appId;
|
||||||
|
@ -641,11 +641,11 @@ int32_t hbGatherAppInfo(void) {
|
||||||
}
|
}
|
||||||
|
|
||||||
taosHashClear(clientHbMgr.appSummary);
|
taosHashClear(clientHbMgr.appSummary);
|
||||||
|
|
||||||
for (int32_t i = 0; i < sz; ++i) {
|
for (int32_t i = 0; i < sz; ++i) {
|
||||||
SAppHbMgr *pAppHbMgr = taosArrayGetP(clientHbMgr.appHbMgrs, i);
|
SAppHbMgr *pAppHbMgr = taosArrayGetP(clientHbMgr.appHbMgrs, i);
|
||||||
uint64_t clusterId = pAppHbMgr->pAppInstInfo->clusterId;
|
uint64_t clusterId = pAppHbMgr->pAppInstInfo->clusterId;
|
||||||
SAppHbReq* pApp = taosHashGet(clientHbMgr.appSummary, &clusterId, sizeof(clusterId));
|
SAppHbReq *pApp = taosHashGet(clientHbMgr.appSummary, &clusterId, sizeof(clusterId));
|
||||||
if (NULL == pApp) {
|
if (NULL == pApp) {
|
||||||
memcpy(&req.summary, &pAppHbMgr->pAppInstInfo->summary, sizeof(req.summary));
|
memcpy(&req.summary, &pAppHbMgr->pAppInstInfo->summary, sizeof(req.summary));
|
||||||
req.startTime = pAppHbMgr->startTime;
|
req.startTime = pAppHbMgr->startTime;
|
||||||
|
@ -654,7 +654,7 @@ int32_t hbGatherAppInfo(void) {
|
||||||
if (pAppHbMgr->startTime < pApp->startTime) {
|
if (pAppHbMgr->startTime < pApp->startTime) {
|
||||||
pApp->startTime = pAppHbMgr->startTime;
|
pApp->startTime = pAppHbMgr->startTime;
|
||||||
}
|
}
|
||||||
|
|
||||||
hbMergeSummary(&pApp->summary, &pAppHbMgr->pAppInstInfo->summary);
|
hbMergeSummary(&pApp->summary, &pAppHbMgr->pAppInstInfo->summary);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -662,7 +662,6 @@ int32_t hbGatherAppInfo(void) {
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static void *hbThreadFunc(void *param) {
|
static void *hbThreadFunc(void *param) {
|
||||||
setThreadName("hb");
|
setThreadName("hb");
|
||||||
#ifdef WINDOWS
|
#ifdef WINDOWS
|
||||||
|
@ -681,7 +680,7 @@ static void *hbThreadFunc(void *param) {
|
||||||
if (sz > 0) {
|
if (sz > 0) {
|
||||||
hbGatherAppInfo();
|
hbGatherAppInfo();
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int i = 0; i < sz; i++) {
|
for (int i = 0; i < sz; i++) {
|
||||||
SAppHbMgr *pAppHbMgr = taosArrayGetP(clientHbMgr.appHbMgrs, i);
|
SAppHbMgr *pAppHbMgr = taosArrayGetP(clientHbMgr.appHbMgrs, i);
|
||||||
|
|
||||||
|
@ -698,7 +697,7 @@ static void *hbThreadFunc(void *param) {
|
||||||
if (buf == NULL) {
|
if (buf == NULL) {
|
||||||
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||||
tFreeClientHbBatchReq(pReq);
|
tFreeClientHbBatchReq(pReq);
|
||||||
//hbClearReqInfo(pAppHbMgr);
|
// hbClearReqInfo(pAppHbMgr);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -708,7 +707,7 @@ static void *hbThreadFunc(void *param) {
|
||||||
if (pInfo == NULL) {
|
if (pInfo == NULL) {
|
||||||
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||||
tFreeClientHbBatchReq(pReq);
|
tFreeClientHbBatchReq(pReq);
|
||||||
//hbClearReqInfo(pAppHbMgr);
|
// hbClearReqInfo(pAppHbMgr);
|
||||||
taosMemoryFree(buf);
|
taosMemoryFree(buf);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -725,7 +724,7 @@ static void *hbThreadFunc(void *param) {
|
||||||
SEpSet epSet = getEpSet_s(&pAppInstInfo->mgmtEp);
|
SEpSet epSet = getEpSet_s(&pAppInstInfo->mgmtEp);
|
||||||
asyncSendMsgToServer(pAppInstInfo->pTransporter, &epSet, &transporterId, pInfo);
|
asyncSendMsgToServer(pAppInstInfo->pTransporter, &epSet, &transporterId, pInfo);
|
||||||
tFreeClientHbBatchReq(pReq);
|
tFreeClientHbBatchReq(pReq);
|
||||||
//hbClearReqInfo(pAppHbMgr);
|
// hbClearReqInfo(pAppHbMgr);
|
||||||
|
|
||||||
atomic_add_fetch_32(&pAppHbMgr->reportCnt, 1);
|
atomic_add_fetch_32(&pAppHbMgr->reportCnt, 1);
|
||||||
}
|
}
|
||||||
|
@ -759,7 +758,7 @@ static void hbStopThread() {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
taosThreadJoin(clientHbMgr.thread, NULL);
|
taosThreadJoin(clientHbMgr.thread, NULL);
|
||||||
|
|
||||||
tscDebug("hb thread stopped");
|
tscDebug("hb thread stopped");
|
||||||
}
|
}
|
||||||
|
@ -808,7 +807,7 @@ void hbFreeAppHbMgr(SAppHbMgr *pTarget) {
|
||||||
}
|
}
|
||||||
taosHashCleanup(pTarget->activeInfo);
|
taosHashCleanup(pTarget->activeInfo);
|
||||||
pTarget->activeInfo = NULL;
|
pTarget->activeInfo = NULL;
|
||||||
|
|
||||||
taosMemoryFree(pTarget->key);
|
taosMemoryFree(pTarget->key);
|
||||||
taosMemoryFree(pTarget);
|
taosMemoryFree(pTarget);
|
||||||
}
|
}
|
||||||
|
@ -843,7 +842,7 @@ int hbMgrInit() {
|
||||||
|
|
||||||
clientHbMgr.appId = tGenIdPI64();
|
clientHbMgr.appId = tGenIdPI64();
|
||||||
tscDebug("app %" PRIx64 " initialized", clientHbMgr.appId);
|
tscDebug("app %" PRIx64 " initialized", clientHbMgr.appId);
|
||||||
|
|
||||||
clientHbMgr.appSummary = taosHashInit(10, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
|
clientHbMgr.appSummary = taosHashInit(10, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
|
||||||
clientHbMgr.appHbMgrs = taosArrayInit(0, sizeof(void *));
|
clientHbMgr.appHbMgrs = taosArrayInit(0, sizeof(void *));
|
||||||
taosThreadMutexInit(&clientHbMgr.lock, NULL);
|
taosThreadMutexInit(&clientHbMgr.lock, NULL);
|
||||||
|
@ -881,7 +880,7 @@ int hbRegisterConnImpl(SAppHbMgr *pAppHbMgr, SClientHbKey connKey, int64_t clust
|
||||||
SClientHbReq hbReq = {0};
|
SClientHbReq hbReq = {0};
|
||||||
hbReq.connKey = connKey;
|
hbReq.connKey = connKey;
|
||||||
hbReq.clusterId = clusterId;
|
hbReq.clusterId = clusterId;
|
||||||
//hbReq.info = taosHashInit(64, hbKeyHashFunc, 1, HASH_ENTRY_LOCK);
|
// hbReq.info = taosHashInit(64, hbKeyHashFunc, 1, HASH_ENTRY_LOCK);
|
||||||
|
|
||||||
taosHashPut(pAppHbMgr->activeInfo, &connKey, sizeof(SClientHbKey), &hbReq, sizeof(SClientHbReq));
|
taosHashPut(pAppHbMgr->activeInfo, &connKey, sizeof(SClientHbKey), &hbReq, sizeof(SClientHbReq));
|
||||||
|
|
||||||
|
@ -920,4 +919,3 @@ void hbDeregisterConn(SAppHbMgr *pAppHbMgr, SClientHbKey connKey) {
|
||||||
|
|
||||||
atomic_sub_fetch_32(&pAppHbMgr->connKeyCnt, 1);
|
atomic_sub_fetch_32(&pAppHbMgr->connKeyCnt, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1276,7 +1276,12 @@ int32_t doProcessMsgFromServer(void* param) {
|
||||||
assert(pMsg->info.ahandle != NULL);
|
assert(pMsg->info.ahandle != NULL);
|
||||||
STscObj* pTscObj = NULL;
|
STscObj* pTscObj = NULL;
|
||||||
|
|
||||||
tscDebug("processMsgFromServer message: %s, code: %s", TMSG_INFO(pMsg->msgType), tstrerror(pMsg->code));
|
STraceId* trace = &pMsg->info.traceId;
|
||||||
|
char tbuf[40] = {0};
|
||||||
|
TRACE_TO_STR(trace, tbuf);
|
||||||
|
|
||||||
|
tscDebug("processMsgFromServer handle %p, message: %s, code: %s, gtid: %s", pMsg->info.handle, TMSG_INFO(pMsg->msgType), tstrerror(pMsg->code),
|
||||||
|
tbuf);
|
||||||
|
|
||||||
if (pSendInfo->requestObjRefId != 0) {
|
if (pSendInfo->requestObjRefId != 0) {
|
||||||
SRequestObj* pRequest = (SRequestObj*)taosAcquireRef(clientReqRefPool, pSendInfo->requestObjRefId);
|
SRequestObj* pRequest = (SRequestObj*)taosAcquireRef(clientReqRefPool, pSendInfo->requestObjRefId);
|
||||||
|
|
|
@ -142,6 +142,7 @@ void taos_close(TAOS *taos) {
|
||||||
|
|
||||||
int taos_errno(TAOS_RES *res) {
|
int taos_errno(TAOS_RES *res) {
|
||||||
if (res == NULL || TD_RES_TMQ_META(res)) {
|
if (res == NULL || TD_RES_TMQ_META(res)) {
|
||||||
|
if (terrno == TSDB_CODE_RPC_REDIRECT) terrno = TSDB_CODE_RPC_NETWORK_UNAVAIL;
|
||||||
return terrno;
|
return terrno;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -149,11 +150,13 @@ int taos_errno(TAOS_RES *res) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
return ((SRequestObj *)res)->code;
|
return ((SRequestObj *)res)->code == TSDB_CODE_RPC_REDIRECT ? TSDB_CODE_RPC_NETWORK_UNAVAIL
|
||||||
|
: ((SRequestObj *)res)->code;
|
||||||
}
|
}
|
||||||
|
|
||||||
const char *taos_errstr(TAOS_RES *res) {
|
const char *taos_errstr(TAOS_RES *res) {
|
||||||
if (res == NULL || TD_RES_TMQ_META(res)) {
|
if (res == NULL || TD_RES_TMQ_META(res)) {
|
||||||
|
if (terrno == TSDB_CODE_RPC_REDIRECT) terrno = TSDB_CODE_RPC_NETWORK_UNAVAIL;
|
||||||
return (const char *)tstrerror(terrno);
|
return (const char *)tstrerror(terrno);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -165,7 +168,8 @@ const char *taos_errstr(TAOS_RES *res) {
|
||||||
if (NULL != pRequest->msgBuf && (strlen(pRequest->msgBuf) > 0 || pRequest->code == TSDB_CODE_RPC_FQDN_ERROR)) {
|
if (NULL != pRequest->msgBuf && (strlen(pRequest->msgBuf) > 0 || pRequest->code == TSDB_CODE_RPC_FQDN_ERROR)) {
|
||||||
return pRequest->msgBuf;
|
return pRequest->msgBuf;
|
||||||
} else {
|
} else {
|
||||||
return (const char *)tstrerror(pRequest->code);
|
return pRequest->code == TSDB_CODE_RPC_REDIRECT ? (const char *)tstrerror(TSDB_CODE_RPC_NETWORK_UNAVAIL)
|
||||||
|
: (const char *)tstrerror(pRequest->code);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -52,6 +52,18 @@ int32_t processConnectRsp(void* param, SDataBuf* pMsg, int32_t code) {
|
||||||
|
|
||||||
SConnectRsp connectRsp = {0};
|
SConnectRsp connectRsp = {0};
|
||||||
tDeserializeSConnectRsp(pMsg->pData, pMsg->len, &connectRsp);
|
tDeserializeSConnectRsp(pMsg->pData, pMsg->len, &connectRsp);
|
||||||
|
|
||||||
|
int32_t now = taosGetTimestampSec();
|
||||||
|
int32_t delta = abs(now - connectRsp.svrTimestamp);
|
||||||
|
if (delta > timestampDeltaLimit) {
|
||||||
|
code = TSDB_CODE_TIME_UNSYNCED;
|
||||||
|
tscError("time diff:%ds is too big", delta);
|
||||||
|
taosMemoryFree(pMsg->pData);
|
||||||
|
setErrno(pRequest, code);
|
||||||
|
tsem_post(&pRequest->body.rspSem);
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
|
||||||
/*assert(connectRsp.epSet.numOfEps > 0);*/
|
/*assert(connectRsp.epSet.numOfEps > 0);*/
|
||||||
if (connectRsp.epSet.numOfEps == 0) {
|
if (connectRsp.epSet.numOfEps == 0) {
|
||||||
taosMemoryFree(pMsg->pData);
|
taosMemoryFree(pMsg->pData);
|
||||||
|
|
|
@ -274,11 +274,16 @@ static int32_t smlGenerateSchemaAction(SSchema *colField, SHashObj *colHash, SSm
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t smlFindNearestPowerOf2(int32_t length) {
|
static int32_t smlFindNearestPowerOf2(int32_t length, uint8_t type) {
|
||||||
int32_t result = 1;
|
int32_t result = 1;
|
||||||
while (result <= length) {
|
while (result <= length) {
|
||||||
result *= 2;
|
result *= 2;
|
||||||
}
|
}
|
||||||
|
if (type == TSDB_DATA_TYPE_BINARY && result > TSDB_MAX_BINARY_LEN - VARSTR_HEADER_SIZE){
|
||||||
|
result = TSDB_MAX_BINARY_LEN - VARSTR_HEADER_SIZE;
|
||||||
|
} else if (type == TSDB_DATA_TYPE_NCHAR && result > (TSDB_MAX_BINARY_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE){
|
||||||
|
result = (TSDB_MAX_BINARY_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
|
||||||
|
}
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -287,7 +292,7 @@ static int32_t smlBuildColumnDescription(SSmlKv *field, char *buf, int32_t bufSi
|
||||||
char tname[TSDB_TABLE_NAME_LEN] = {0};
|
char tname[TSDB_TABLE_NAME_LEN] = {0};
|
||||||
memcpy(tname, field->key, field->keyLen);
|
memcpy(tname, field->key, field->keyLen);
|
||||||
if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) {
|
if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) {
|
||||||
int32_t bytes = smlFindNearestPowerOf2(field->length);
|
int32_t bytes = smlFindNearestPowerOf2(field->length, type);
|
||||||
int out = snprintf(buf, bufSize, "`%s` %s(%d)", tname, tDataTypes[field->type].name, bytes);
|
int out = snprintf(buf, bufSize, "`%s` %s(%d)", tname, tDataTypes[field->type].name, bytes);
|
||||||
*outBytes = out;
|
*outBytes = out;
|
||||||
} else {
|
} else {
|
||||||
|
@ -834,7 +839,7 @@ static int32_t smlParseTS(SSmlHandle *info, const char *data, int32_t len, SArra
|
||||||
ASSERT(0);
|
ASSERT(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ts == -1) return TSDB_CODE_TSC_INVALID_TIME_STAMP;
|
if (ts == -1) return TSDB_CODE_INVALID_TIMESTAMP;
|
||||||
|
|
||||||
// add ts to
|
// add ts to
|
||||||
SSmlKv *kv = (SSmlKv *)taosMemoryCalloc(sizeof(SSmlKv), 1);
|
SSmlKv *kv = (SSmlKv *)taosMemoryCalloc(sizeof(SSmlKv), 1);
|
||||||
|
@ -851,35 +856,41 @@ static int32_t smlParseTS(SSmlHandle *info, const char *data, int32_t len, SArra
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool smlParseValue(SSmlKv *pVal, SSmlMsgBuf *msg) {
|
static int32_t smlParseValue(SSmlKv *pVal, SSmlMsgBuf *msg) {
|
||||||
// binary
|
// binary
|
||||||
if (smlIsBinary(pVal->value, pVal->length)) {
|
if (smlIsBinary(pVal->value, pVal->length)) {
|
||||||
pVal->type = TSDB_DATA_TYPE_BINARY;
|
pVal->type = TSDB_DATA_TYPE_BINARY;
|
||||||
pVal->length -= BINARY_ADD_LEN;
|
pVal->length -= BINARY_ADD_LEN;
|
||||||
|
if (pVal->length > TSDB_MAX_BINARY_LEN - VARSTR_HEADER_SIZE){
|
||||||
|
return TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN;
|
||||||
|
}
|
||||||
pVal->value += (BINARY_ADD_LEN - 1);
|
pVal->value += (BINARY_ADD_LEN - 1);
|
||||||
return true;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
// nchar
|
// nchar
|
||||||
if (smlIsNchar(pVal->value, pVal->length)) {
|
if (smlIsNchar(pVal->value, pVal->length)) {
|
||||||
pVal->type = TSDB_DATA_TYPE_NCHAR;
|
pVal->type = TSDB_DATA_TYPE_NCHAR;
|
||||||
pVal->length -= NCHAR_ADD_LEN;
|
pVal->length -= NCHAR_ADD_LEN;
|
||||||
|
if(pVal->length > (TSDB_MAX_NCHAR_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE){
|
||||||
|
return TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN;
|
||||||
|
}
|
||||||
pVal->value += (NCHAR_ADD_LEN - 1);
|
pVal->value += (NCHAR_ADD_LEN - 1);
|
||||||
return true;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
// bool
|
// bool
|
||||||
if (smlParseBool(pVal)) {
|
if (smlParseBool(pVal)) {
|
||||||
pVal->type = TSDB_DATA_TYPE_BOOL;
|
pVal->type = TSDB_DATA_TYPE_BOOL;
|
||||||
pVal->length = (int16_t)tDataTypes[pVal->type].bytes;
|
pVal->length = (int16_t)tDataTypes[pVal->type].bytes;
|
||||||
return true;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
// number
|
// number
|
||||||
if (smlParseNumber(pVal, msg)) {
|
if (smlParseNumber(pVal, msg)) {
|
||||||
pVal->length = (int16_t)tDataTypes[pVal->type].bytes;
|
pVal->length = (int16_t)tDataTypes[pVal->type].bytes;
|
||||||
return true;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
return false;
|
return TSDB_CODE_TSC_INVALID_VALUE;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t smlParseInfluxString(const char *sql, SSmlLineInfo *elements, SSmlMsgBuf *msg) {
|
static int32_t smlParseInfluxString(const char *sql, SSmlLineInfo *elements, SSmlMsgBuf *msg) {
|
||||||
|
@ -906,7 +917,7 @@ static int32_t smlParseInfluxString(const char *sql, SSmlLineInfo *elements, SSm
|
||||||
elements->measureLen = sql - elements->measure;
|
elements->measureLen = sql - elements->measure;
|
||||||
if (IS_INVALID_TABLE_LEN(elements->measureLen)) {
|
if (IS_INVALID_TABLE_LEN(elements->measureLen)) {
|
||||||
smlBuildInvalidDataMsg(msg, "measure is empty or too large than 192", NULL);
|
smlBuildInvalidDataMsg(msg, "measure is empty or too large than 192", NULL);
|
||||||
return TSDB_CODE_SML_INVALID_DATA;
|
return TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH;
|
||||||
}
|
}
|
||||||
|
|
||||||
// parse tag
|
// parse tag
|
||||||
|
@ -1001,11 +1012,11 @@ static int32_t smlParseTelnetTags(const char *data, SArray *cols, char *childTab
|
||||||
|
|
||||||
if (IS_INVALID_COL_LEN(keyLen)) {
|
if (IS_INVALID_COL_LEN(keyLen)) {
|
||||||
smlBuildInvalidDataMsg(msg, "invalid key or key is too long than 64", key);
|
smlBuildInvalidDataMsg(msg, "invalid key or key is too long than 64", key);
|
||||||
return TSDB_CODE_SML_INVALID_DATA;
|
return TSDB_CODE_TSC_INVALID_COLUMN_LENGTH;
|
||||||
}
|
}
|
||||||
if (smlCheckDuplicateKey(key, keyLen, dumplicateKey)) {
|
if (smlCheckDuplicateKey(key, keyLen, dumplicateKey)) {
|
||||||
smlBuildInvalidDataMsg(msg, "dumplicate key", key);
|
smlBuildInvalidDataMsg(msg, "dumplicate key", key);
|
||||||
return TSDB_CODE_TSC_DUP_TAG_NAMES;
|
return TSDB_CODE_TSC_DUP_NAMES;
|
||||||
}
|
}
|
||||||
|
|
||||||
// parse value
|
// parse value
|
||||||
|
@ -1026,7 +1037,7 @@ static int32_t smlParseTelnetTags(const char *data, SArray *cols, char *childTab
|
||||||
|
|
||||||
if (valueLen == 0) {
|
if (valueLen == 0) {
|
||||||
smlBuildInvalidDataMsg(msg, "invalid value", value);
|
smlBuildInvalidDataMsg(msg, "invalid value", value);
|
||||||
return TSDB_CODE_SML_INVALID_DATA;
|
return TSDB_CODE_TSC_INVALID_VALUE;
|
||||||
}
|
}
|
||||||
|
|
||||||
// handle child table name
|
// handle child table name
|
||||||
|
@ -1059,7 +1070,7 @@ static int32_t smlParseTelnetString(SSmlHandle *info, const char *sql, SSmlTable
|
||||||
smlParseTelnetElement(&sql, &tinfo->sTableName, &tinfo->sTableNameLen);
|
smlParseTelnetElement(&sql, &tinfo->sTableName, &tinfo->sTableNameLen);
|
||||||
if (!(tinfo->sTableName) || IS_INVALID_TABLE_LEN(tinfo->sTableNameLen)) {
|
if (!(tinfo->sTableName) || IS_INVALID_TABLE_LEN(tinfo->sTableNameLen)) {
|
||||||
smlBuildInvalidDataMsg(&info->msgBuf, "invalid data", sql);
|
smlBuildInvalidDataMsg(&info->msgBuf, "invalid data", sql);
|
||||||
return TSDB_CODE_SML_INVALID_DATA;
|
return TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH;
|
||||||
}
|
}
|
||||||
|
|
||||||
// parse timestamp
|
// parse timestamp
|
||||||
|
@ -1074,7 +1085,7 @@ static int32_t smlParseTelnetString(SSmlHandle *info, const char *sql, SSmlTable
|
||||||
int32_t ret = smlParseTS(info, timestamp, tLen, cols);
|
int32_t ret = smlParseTS(info, timestamp, tLen, cols);
|
||||||
if (ret != TSDB_CODE_SUCCESS) {
|
if (ret != TSDB_CODE_SUCCESS) {
|
||||||
smlBuildInvalidDataMsg(&info->msgBuf, "invalid timestamp", sql);
|
smlBuildInvalidDataMsg(&info->msgBuf, "invalid timestamp", sql);
|
||||||
return TSDB_CODE_SML_INVALID_DATA;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
// parse value
|
// parse value
|
||||||
|
@ -1083,7 +1094,7 @@ static int32_t smlParseTelnetString(SSmlHandle *info, const char *sql, SSmlTable
|
||||||
smlParseTelnetElement(&sql, &value, &valueLen);
|
smlParseTelnetElement(&sql, &value, &valueLen);
|
||||||
if (!value || valueLen == 0) {
|
if (!value || valueLen == 0) {
|
||||||
smlBuildInvalidDataMsg(&info->msgBuf, "invalid value", sql);
|
smlBuildInvalidDataMsg(&info->msgBuf, "invalid value", sql);
|
||||||
return TSDB_CODE_SML_INVALID_DATA;
|
return TSDB_CODE_TSC_INVALID_VALUE;
|
||||||
}
|
}
|
||||||
|
|
||||||
SSmlKv *kv = (SSmlKv *)taosMemoryCalloc(sizeof(SSmlKv), 1);
|
SSmlKv *kv = (SSmlKv *)taosMemoryCalloc(sizeof(SSmlKv), 1);
|
||||||
|
@ -1093,15 +1104,15 @@ static int32_t smlParseTelnetString(SSmlHandle *info, const char *sql, SSmlTable
|
||||||
kv->keyLen = VALUE_LEN;
|
kv->keyLen = VALUE_LEN;
|
||||||
kv->value = value;
|
kv->value = value;
|
||||||
kv->length = valueLen;
|
kv->length = valueLen;
|
||||||
if (!smlParseValue(kv, &info->msgBuf)) {
|
if ((ret = smlParseValue(kv, &info->msgBuf)) != TSDB_CODE_SUCCESS) {
|
||||||
return TSDB_CODE_SML_INVALID_DATA;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
// parse tags
|
// parse tags
|
||||||
ret = smlParseTelnetTags(sql, tinfo->tags, tinfo->childTableName, info->dumplicateKey, &info->msgBuf);
|
ret = smlParseTelnetTags(sql, tinfo->tags, tinfo->childTableName, info->dumplicateKey, &info->msgBuf);
|
||||||
if (ret != TSDB_CODE_SUCCESS) {
|
if (ret != TSDB_CODE_SUCCESS) {
|
||||||
smlBuildInvalidDataMsg(&info->msgBuf, "invalid data", sql);
|
smlBuildInvalidDataMsg(&info->msgBuf, "invalid data", sql);
|
||||||
return TSDB_CODE_SML_INVALID_DATA;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
|
@ -1135,11 +1146,11 @@ static int32_t smlParseCols(const char *data, int32_t len, SArray *cols, char *c
|
||||||
|
|
||||||
if (IS_INVALID_COL_LEN(keyLen)) {
|
if (IS_INVALID_COL_LEN(keyLen)) {
|
||||||
smlBuildInvalidDataMsg(msg, "invalid key or key is too long than 64", key);
|
smlBuildInvalidDataMsg(msg, "invalid key or key is too long than 64", key);
|
||||||
return TSDB_CODE_SML_INVALID_DATA;
|
return TSDB_CODE_TSC_INVALID_COLUMN_LENGTH;
|
||||||
}
|
}
|
||||||
if (smlCheckDuplicateKey(key, keyLen, dumplicateKey)) {
|
if (smlCheckDuplicateKey(key, keyLen, dumplicateKey)) {
|
||||||
smlBuildInvalidDataMsg(msg, "dumplicate key", key);
|
smlBuildInvalidDataMsg(msg, "dumplicate key", key);
|
||||||
return TSDB_CODE_TSC_DUP_TAG_NAMES;
|
return TSDB_CODE_TSC_DUP_NAMES;
|
||||||
}
|
}
|
||||||
|
|
||||||
// parse value
|
// parse value
|
||||||
|
@ -1195,8 +1206,9 @@ static int32_t smlParseCols(const char *data, int32_t len, SArray *cols, char *c
|
||||||
if (isTag) {
|
if (isTag) {
|
||||||
kv->type = TSDB_DATA_TYPE_NCHAR;
|
kv->type = TSDB_DATA_TYPE_NCHAR;
|
||||||
} else {
|
} else {
|
||||||
if (!smlParseValue(kv, msg)) {
|
int32_t ret = smlParseValue(kv, msg);
|
||||||
return TSDB_CODE_SML_INVALID_DATA;
|
if (ret != TSDB_CODE_SUCCESS) {
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1204,8 +1216,8 @@ static int32_t smlParseCols(const char *data, int32_t len, SArray *cols, char *c
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool smlUpdateMeta(SHashObj *metaHash, SArray *metaArray, SArray *cols, SSmlMsgBuf *msg) {
|
static int32_t smlUpdateMeta(SHashObj *metaHash, SArray *metaArray, SArray *cols, SSmlMsgBuf *msg) {
|
||||||
for (int i = 0; i < taosArrayGetSize(cols); ++i) { // jump timestamp
|
for (int i = 0; i < taosArrayGetSize(cols); ++i) {
|
||||||
SSmlKv *kv = (SSmlKv *)taosArrayGetP(cols, i);
|
SSmlKv *kv = (SSmlKv *)taosArrayGetP(cols, i);
|
||||||
|
|
||||||
int16_t *index = (int16_t *)taosHashGet(metaHash, kv->key, kv->keyLen);
|
int16_t *index = (int16_t *)taosHashGet(metaHash, kv->key, kv->keyLen);
|
||||||
|
@ -1213,7 +1225,7 @@ static bool smlUpdateMeta(SHashObj *metaHash, SArray *metaArray, SArray *cols, S
|
||||||
SSmlKv **value = (SSmlKv **)taosArrayGet(metaArray, *index);
|
SSmlKv **value = (SSmlKv **)taosArrayGet(metaArray, *index);
|
||||||
if (kv->type != (*value)->type) {
|
if (kv->type != (*value)->type) {
|
||||||
smlBuildInvalidDataMsg(msg, "the type is not the same like before", kv->key);
|
smlBuildInvalidDataMsg(msg, "the type is not the same like before", kv->key);
|
||||||
return false;
|
return TSDB_CODE_SML_NOT_SAME_TYPE;
|
||||||
} else {
|
} else {
|
||||||
if (IS_VAR_DATA_TYPE(kv->type)) { // update string len, if bigger
|
if (IS_VAR_DATA_TYPE(kv->type)) { // update string len, if bigger
|
||||||
if (kv->length > (*value)->length) {
|
if (kv->length > (*value)->length) {
|
||||||
|
@ -1230,7 +1242,7 @@ static bool smlUpdateMeta(SHashObj *metaHash, SArray *metaArray, SArray *cols, S
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return true;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void smlInsertMeta(SHashObj *metaHash, SArray *metaArray, SArray *cols) {
|
static void smlInsertMeta(SHashObj *metaHash, SArray *metaArray, SArray *cols) {
|
||||||
|
@ -1564,10 +1576,16 @@ static int32_t smlParseTSFromJSONObj(SSmlHandle *info, cJSON *root, int64_t *tsV
|
||||||
double timeDouble = value->valuedouble;
|
double timeDouble = value->valuedouble;
|
||||||
if (smlDoubleToInt64OverFlow(timeDouble)) {
|
if (smlDoubleToInt64OverFlow(timeDouble)) {
|
||||||
smlBuildInvalidDataMsg(&info->msgBuf, "timestamp is too large", NULL);
|
smlBuildInvalidDataMsg(&info->msgBuf, "timestamp is too large", NULL);
|
||||||
return TSDB_CODE_TSC_INVALID_TIME_STAMP;
|
return TSDB_CODE_INVALID_TIMESTAMP;
|
||||||
}
|
}
|
||||||
if (timeDouble <= 0) {
|
|
||||||
return TSDB_CODE_TSC_INVALID_TIME_STAMP;
|
if (timeDouble == 0) {
|
||||||
|
*tsVal = taosGetTimestampNs();
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (timeDouble < 0) {
|
||||||
|
return TSDB_CODE_INVALID_TIMESTAMP;
|
||||||
}
|
}
|
||||||
|
|
||||||
*tsVal = timeDouble;
|
*tsVal = timeDouble;
|
||||||
|
@ -1578,7 +1596,7 @@ static int32_t smlParseTSFromJSONObj(SSmlHandle *info, cJSON *root, int64_t *tsV
|
||||||
timeDouble = timeDouble * NANOSECOND_PER_SEC;
|
timeDouble = timeDouble * NANOSECOND_PER_SEC;
|
||||||
if (smlDoubleToInt64OverFlow(timeDouble)) {
|
if (smlDoubleToInt64OverFlow(timeDouble)) {
|
||||||
smlBuildInvalidDataMsg(&info->msgBuf, "timestamp is too large", NULL);
|
smlBuildInvalidDataMsg(&info->msgBuf, "timestamp is too large", NULL);
|
||||||
return TSDB_CODE_TSC_INVALID_TIME_STAMP;
|
return TSDB_CODE_INVALID_TIMESTAMP;
|
||||||
}
|
}
|
||||||
} else if (typeLen == 2 && (type->valuestring[1] == 's' || type->valuestring[1] == 'S')) {
|
} else if (typeLen == 2 && (type->valuestring[1] == 's' || type->valuestring[1] == 'S')) {
|
||||||
switch (type->valuestring[0]) {
|
switch (type->valuestring[0]) {
|
||||||
|
@ -1589,7 +1607,7 @@ static int32_t smlParseTSFromJSONObj(SSmlHandle *info, cJSON *root, int64_t *tsV
|
||||||
timeDouble = timeDouble * NANOSECOND_PER_MSEC;
|
timeDouble = timeDouble * NANOSECOND_PER_MSEC;
|
||||||
if (smlDoubleToInt64OverFlow(timeDouble)) {
|
if (smlDoubleToInt64OverFlow(timeDouble)) {
|
||||||
smlBuildInvalidDataMsg(&info->msgBuf, "timestamp is too large", NULL);
|
smlBuildInvalidDataMsg(&info->msgBuf, "timestamp is too large", NULL);
|
||||||
return TSDB_CODE_TSC_INVALID_TIME_STAMP;
|
return TSDB_CODE_INVALID_TIMESTAMP;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case 'u':
|
case 'u':
|
||||||
|
@ -1599,7 +1617,7 @@ static int32_t smlParseTSFromJSONObj(SSmlHandle *info, cJSON *root, int64_t *tsV
|
||||||
timeDouble = timeDouble * NANOSECOND_PER_USEC;
|
timeDouble = timeDouble * NANOSECOND_PER_USEC;
|
||||||
if (smlDoubleToInt64OverFlow(timeDouble)) {
|
if (smlDoubleToInt64OverFlow(timeDouble)) {
|
||||||
smlBuildInvalidDataMsg(&info->msgBuf, "timestamp is too large", NULL);
|
smlBuildInvalidDataMsg(&info->msgBuf, "timestamp is too large", NULL);
|
||||||
return TSDB_CODE_TSC_INVALID_TIME_STAMP;
|
return TSDB_CODE_INVALID_TIMESTAMP;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case 'n':
|
case 'n':
|
||||||
|
@ -1634,11 +1652,11 @@ static int32_t smlParseTSFromJSON(SSmlHandle *info, cJSON *root, SArray *cols) {
|
||||||
double timeDouble = timestamp->valuedouble;
|
double timeDouble = timestamp->valuedouble;
|
||||||
if (smlDoubleToInt64OverFlow(timeDouble)) {
|
if (smlDoubleToInt64OverFlow(timeDouble)) {
|
||||||
smlBuildInvalidDataMsg(&info->msgBuf, "timestamp is too large", NULL);
|
smlBuildInvalidDataMsg(&info->msgBuf, "timestamp is too large", NULL);
|
||||||
return TSDB_CODE_TSC_INVALID_TIME_STAMP;
|
return TSDB_CODE_INVALID_TIMESTAMP;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (timeDouble < 0) {
|
if (timeDouble < 0) {
|
||||||
return TSDB_CODE_TSC_INVALID_TIME_STAMP;
|
return TSDB_CODE_INVALID_TIMESTAMP;
|
||||||
}
|
}
|
||||||
|
|
||||||
uint8_t tsLen = smlGetTimestampLen((int64_t)timeDouble);
|
uint8_t tsLen = smlGetTimestampLen((int64_t)timeDouble);
|
||||||
|
@ -1648,19 +1666,19 @@ static int32_t smlParseTSFromJSON(SSmlHandle *info, cJSON *root, SArray *cols) {
|
||||||
timeDouble = timeDouble * NANOSECOND_PER_SEC;
|
timeDouble = timeDouble * NANOSECOND_PER_SEC;
|
||||||
if (smlDoubleToInt64OverFlow(timeDouble)) {
|
if (smlDoubleToInt64OverFlow(timeDouble)) {
|
||||||
smlBuildInvalidDataMsg(&info->msgBuf, "timestamp is too large", NULL);
|
smlBuildInvalidDataMsg(&info->msgBuf, "timestamp is too large", NULL);
|
||||||
return TSDB_CODE_TSC_INVALID_TIME_STAMP;
|
return TSDB_CODE_INVALID_TIMESTAMP;
|
||||||
}
|
}
|
||||||
} else if (tsLen == TSDB_TIME_PRECISION_MILLI_DIGITS) {
|
} else if (tsLen == TSDB_TIME_PRECISION_MILLI_DIGITS) {
|
||||||
tsVal = tsVal * NANOSECOND_PER_MSEC;
|
tsVal = tsVal * NANOSECOND_PER_MSEC;
|
||||||
timeDouble = timeDouble * NANOSECOND_PER_MSEC;
|
timeDouble = timeDouble * NANOSECOND_PER_MSEC;
|
||||||
if (smlDoubleToInt64OverFlow(timeDouble)) {
|
if (smlDoubleToInt64OverFlow(timeDouble)) {
|
||||||
smlBuildInvalidDataMsg(&info->msgBuf, "timestamp is too large", NULL);
|
smlBuildInvalidDataMsg(&info->msgBuf, "timestamp is too large", NULL);
|
||||||
return TSDB_CODE_TSC_INVALID_TIME_STAMP;
|
return TSDB_CODE_INVALID_TIMESTAMP;
|
||||||
}
|
}
|
||||||
} else if (timeDouble == 0) {
|
} else if (timeDouble == 0) {
|
||||||
tsVal = taosGetTimestampNs();
|
tsVal = taosGetTimestampNs();
|
||||||
} else {
|
} else {
|
||||||
return TSDB_CODE_TSC_INVALID_TIME_STAMP;
|
return TSDB_CODE_INVALID_TIMESTAMP;
|
||||||
}
|
}
|
||||||
} else if (cJSON_IsObject(timestamp)) {
|
} else if (cJSON_IsObject(timestamp)) {
|
||||||
int32_t ret = smlParseTSFromJSONObj(info, timestamp, &tsVal);
|
int32_t ret = smlParseTSFromJSONObj(info, timestamp, &tsVal);
|
||||||
|
@ -1779,6 +1797,14 @@ static int32_t smlConvertJSONString(SSmlKv *pVal, char *typeStr, cJSON *value) {
|
||||||
return TSDB_CODE_TSC_INVALID_JSON_TYPE;
|
return TSDB_CODE_TSC_INVALID_JSON_TYPE;
|
||||||
}
|
}
|
||||||
pVal->length = (int16_t)strlen(value->valuestring);
|
pVal->length = (int16_t)strlen(value->valuestring);
|
||||||
|
|
||||||
|
if (pVal->type == TSDB_DATA_TYPE_BINARY && pVal->length > TSDB_MAX_BINARY_LEN - VARSTR_HEADER_SIZE){
|
||||||
|
return TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN;
|
||||||
|
}
|
||||||
|
if (pVal->type == TSDB_DATA_TYPE_NCHAR && pVal->length > (TSDB_MAX_NCHAR_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE){
|
||||||
|
return TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN;
|
||||||
|
}
|
||||||
|
|
||||||
return smlJsonCreateSring(&pVal->value, value->valuestring, pVal->length);
|
return smlJsonCreateSring(&pVal->value, value->valuestring, pVal->length);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1913,7 +1939,7 @@ static int32_t smlParseTagsFromJSON(cJSON *root, SArray *pKVs, char *childTableN
|
||||||
}
|
}
|
||||||
// check duplicate keys
|
// check duplicate keys
|
||||||
if (smlCheckDuplicateKey(tag->string, keyLen, dumplicateKey)) {
|
if (smlCheckDuplicateKey(tag->string, keyLen, dumplicateKey)) {
|
||||||
return TSDB_CODE_TSC_DUP_TAG_NAMES;
|
return TSDB_CODE_TSC_DUP_NAMES;
|
||||||
}
|
}
|
||||||
|
|
||||||
// handle child table name
|
// handle child table name
|
||||||
|
@ -2033,7 +2059,7 @@ static int32_t smlParseInfluxLine(SSmlHandle *info, const char *sql) {
|
||||||
}
|
}
|
||||||
if (taosArrayGetSize(cols) > TSDB_MAX_COLUMNS) {
|
if (taosArrayGetSize(cols) > TSDB_MAX_COLUMNS) {
|
||||||
smlBuildInvalidDataMsg(&info->msgBuf, "too many columns than 4096", NULL);
|
smlBuildInvalidDataMsg(&info->msgBuf, "too many columns than 4096", NULL);
|
||||||
return TSDB_CODE_SML_INVALID_DATA;
|
return TSDB_CODE_PAR_TOO_MANY_COLUMNS;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool hasTable = true;
|
bool hasTable = true;
|
||||||
|
@ -2065,7 +2091,7 @@ static int32_t smlParseInfluxLine(SSmlHandle *info, const char *sql) {
|
||||||
|
|
||||||
if (taosArrayGetSize((*oneTable)->tags) > TSDB_MAX_TAGS) {
|
if (taosArrayGetSize((*oneTable)->tags) > TSDB_MAX_TAGS) {
|
||||||
smlBuildInvalidDataMsg(&info->msgBuf, "too many tags than 128", NULL);
|
smlBuildInvalidDataMsg(&info->msgBuf, "too many tags than 128", NULL);
|
||||||
return TSDB_CODE_SML_INVALID_DATA;
|
return TSDB_CODE_PAR_INVALID_TAGS_NUM;
|
||||||
}
|
}
|
||||||
|
|
||||||
(*oneTable)->sTableName = elements.measure;
|
(*oneTable)->sTableName = elements.measure;
|
||||||
|
@ -2084,12 +2110,12 @@ static int32_t smlParseInfluxLine(SSmlHandle *info, const char *sql) {
|
||||||
SSmlSTableMeta **tableMeta = (SSmlSTableMeta **)taosHashGet(info->superTables, elements.measure, elements.measureLen);
|
SSmlSTableMeta **tableMeta = (SSmlSTableMeta **)taosHashGet(info->superTables, elements.measure, elements.measureLen);
|
||||||
if (tableMeta) { // update meta
|
if (tableMeta) { // update meta
|
||||||
ret = smlUpdateMeta((*tableMeta)->colHash, (*tableMeta)->cols, cols, &info->msgBuf);
|
ret = smlUpdateMeta((*tableMeta)->colHash, (*tableMeta)->cols, cols, &info->msgBuf);
|
||||||
if (!hasTable && ret) {
|
if (!hasTable && ret == TSDB_CODE_SUCCESS) {
|
||||||
ret = smlUpdateMeta((*tableMeta)->tagHash, (*tableMeta)->tags, (*oneTable)->tags, &info->msgBuf);
|
ret = smlUpdateMeta((*tableMeta)->tagHash, (*tableMeta)->tags, (*oneTable)->tags, &info->msgBuf);
|
||||||
}
|
}
|
||||||
if (!ret) {
|
if (ret != TSDB_CODE_SUCCESS) {
|
||||||
uError("SML:0x%" PRIx64 " smlUpdateMeta failed", info->id);
|
uError("SML:0x%" PRIx64 " smlUpdateMeta failed", info->id);
|
||||||
return TSDB_CODE_SML_INVALID_DATA;
|
return ret;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
SSmlSTableMeta *meta = smlBuildSTableMeta();
|
SSmlSTableMeta *meta = smlBuildSTableMeta();
|
||||||
|
@ -2138,7 +2164,7 @@ static int32_t smlParseTelnetLine(SSmlHandle *info, void *data) {
|
||||||
smlDestroyTableInfo(info, tinfo);
|
smlDestroyTableInfo(info, tinfo);
|
||||||
smlDestroyCols(cols);
|
smlDestroyCols(cols);
|
||||||
taosArrayDestroy(cols);
|
taosArrayDestroy(cols);
|
||||||
return TSDB_CODE_SML_INVALID_DATA;
|
return TSDB_CODE_PAR_INVALID_TAGS_NUM;
|
||||||
}
|
}
|
||||||
taosHashClear(info->dumplicateKey);
|
taosHashClear(info->dumplicateKey);
|
||||||
|
|
||||||
|
@ -2169,9 +2195,9 @@ static int32_t smlParseTelnetLine(SSmlHandle *info, void *data) {
|
||||||
if (!hasTable && ret) {
|
if (!hasTable && ret) {
|
||||||
ret = smlUpdateMeta((*tableMeta)->tagHash, (*tableMeta)->tags, (*oneTable)->tags, &info->msgBuf);
|
ret = smlUpdateMeta((*tableMeta)->tagHash, (*tableMeta)->tags, (*oneTable)->tags, &info->msgBuf);
|
||||||
}
|
}
|
||||||
if (!ret) {
|
if (ret != TSDB_CODE_SUCCESS) {
|
||||||
uError("SML:0x%" PRIx64 " smlUpdateMeta failed", info->id);
|
uError("SML:0x%" PRIx64 " smlUpdateMeta failed", info->id);
|
||||||
return TSDB_CODE_SML_INVALID_DATA;
|
return ret;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
SSmlSTableMeta *meta = smlBuildSTableMeta();
|
SSmlSTableMeta *meta = smlBuildSTableMeta();
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because one or more lines are too long
|
@ -76,7 +76,7 @@ static const SSysDbTableSchema userDBSchema[] = {
|
||||||
{.name = "vgroups", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT},
|
{.name = "vgroups", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT},
|
||||||
{.name = "ntables", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
|
{.name = "ntables", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
|
||||||
{.name = "replica", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT},
|
{.name = "replica", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT},
|
||||||
{.name = "strict", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
|
{.name = "strict", .bytes = TSDB_DB_STRICT_STR_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||||
{.name = "duration", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
|
{.name = "duration", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||||
{.name = "keep", .bytes = 32 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
|
{.name = "keep", .bytes = 32 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||||
{.name = "buffer", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
|
{.name = "buffer", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
|
||||||
|
@ -87,9 +87,9 @@ static const SSysDbTableSchema userDBSchema[] = {
|
||||||
{.name = "wal", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT},
|
{.name = "wal", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT},
|
||||||
{.name = "fsync", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
|
{.name = "fsync", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
|
||||||
{.name = "comp", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT},
|
{.name = "comp", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT},
|
||||||
{.name = "cache_model", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT},
|
{.name = "cacheModel", .bytes = TSDB_CACHE_MODEL_STR_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||||
{.name = "precision", .bytes = 2 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
|
{.name = "precision", .bytes = 2 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||||
{.name = "single_stable_model", .bytes = 1, .type = TSDB_DATA_TYPE_BOOL},
|
{.name = "single_stable", .bytes = 1, .type = TSDB_DATA_TYPE_BOOL},
|
||||||
{.name = "status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
|
{.name = "status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||||
// {.name = "schemaless", .bytes = 1, .type = TSDB_DATA_TYPE_BOOL},
|
// {.name = "schemaless", .bytes = 1, .type = TSDB_DATA_TYPE_BOOL},
|
||||||
{.name = "retention", .bytes = 60 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
|
{.name = "retention", .bytes = 60 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||||
|
|
|
@ -1752,7 +1752,7 @@ char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** pDataBuf)
|
||||||
int32_t colNum = taosArrayGetSize(pDataBlock->pDataBlock);
|
int32_t colNum = taosArrayGetSize(pDataBlock->pDataBlock);
|
||||||
int32_t rows = pDataBlock->info.rows;
|
int32_t rows = pDataBlock->info.rows;
|
||||||
int32_t len = 0;
|
int32_t len = 0;
|
||||||
len += snprintf(dumpBuf + len, size - len, "\n%s |block type %d |child id %d|group id:%" PRIu64 "| uid:%ld\n", flag,
|
len += snprintf(dumpBuf + len, size - len, "===stream===%s |block type %d |child id %d|group id:%" PRIu64 "| uid:%ld|\n", flag,
|
||||||
(int32_t)pDataBlock->info.type, pDataBlock->info.childId, pDataBlock->info.groupId,
|
(int32_t)pDataBlock->info.type, pDataBlock->info.childId, pDataBlock->info.groupId,
|
||||||
pDataBlock->info.uid);
|
pDataBlock->info.uid);
|
||||||
if (len >= size - 1) return dumpBuf;
|
if (len >= size - 1) return dumpBuf;
|
||||||
|
|
|
@ -453,6 +453,7 @@ int32_t tSerializeSClientHbBatchRsp(void *buf, int32_t bufLen, const SClientHbBa
|
||||||
if (tStartEncode(&encoder) < 0) return -1;
|
if (tStartEncode(&encoder) < 0) return -1;
|
||||||
if (tEncodeI64(&encoder, pBatchRsp->reqId) < 0) return -1;
|
if (tEncodeI64(&encoder, pBatchRsp->reqId) < 0) return -1;
|
||||||
if (tEncodeI64(&encoder, pBatchRsp->rspId) < 0) return -1;
|
if (tEncodeI64(&encoder, pBatchRsp->rspId) < 0) return -1;
|
||||||
|
if (tEncodeI32(&encoder, pBatchRsp->svrTimestamp) < 0) return -1;
|
||||||
|
|
||||||
int32_t rspNum = taosArrayGetSize(pBatchRsp->rsps);
|
int32_t rspNum = taosArrayGetSize(pBatchRsp->rsps);
|
||||||
if (tEncodeI32(&encoder, rspNum) < 0) return -1;
|
if (tEncodeI32(&encoder, rspNum) < 0) return -1;
|
||||||
|
@ -474,6 +475,7 @@ int32_t tDeserializeSClientHbBatchRsp(void *buf, int32_t bufLen, SClientHbBatchR
|
||||||
if (tStartDecode(&decoder) < 0) return -1;
|
if (tStartDecode(&decoder) < 0) return -1;
|
||||||
if (tDecodeI64(&decoder, &pBatchRsp->reqId) < 0) return -1;
|
if (tDecodeI64(&decoder, &pBatchRsp->reqId) < 0) return -1;
|
||||||
if (tDecodeI64(&decoder, &pBatchRsp->rspId) < 0) return -1;
|
if (tDecodeI64(&decoder, &pBatchRsp->rspId) < 0) return -1;
|
||||||
|
if (tDecodeI32(&decoder, &pBatchRsp->svrTimestamp) < 0) return -1;
|
||||||
|
|
||||||
int32_t rspNum = 0;
|
int32_t rspNum = 0;
|
||||||
if (tDecodeI32(&decoder, &rspNum) < 0) return -1;
|
if (tDecodeI32(&decoder, &rspNum) < 0) return -1;
|
||||||
|
@ -3613,6 +3615,7 @@ int32_t tSerializeSConnectRsp(void *buf, int32_t bufLen, SConnectRsp *pRsp) {
|
||||||
if (tEncodeI8(&encoder, pRsp->superUser) < 0) return -1;
|
if (tEncodeI8(&encoder, pRsp->superUser) < 0) return -1;
|
||||||
if (tEncodeI8(&encoder, pRsp->connType) < 0) return -1;
|
if (tEncodeI8(&encoder, pRsp->connType) < 0) return -1;
|
||||||
if (tEncodeSEpSet(&encoder, &pRsp->epSet) < 0) return -1;
|
if (tEncodeSEpSet(&encoder, &pRsp->epSet) < 0) return -1;
|
||||||
|
if (tEncodeI32(&encoder, pRsp->svrTimestamp) < 0) return -1;
|
||||||
if (tEncodeCStr(&encoder, pRsp->sVer) < 0) return -1;
|
if (tEncodeCStr(&encoder, pRsp->sVer) < 0) return -1;
|
||||||
if (tEncodeCStr(&encoder, pRsp->sDetailVer) < 0) return -1;
|
if (tEncodeCStr(&encoder, pRsp->sDetailVer) < 0) return -1;
|
||||||
tEndEncode(&encoder);
|
tEndEncode(&encoder);
|
||||||
|
@ -3634,6 +3637,7 @@ int32_t tDeserializeSConnectRsp(void *buf, int32_t bufLen, SConnectRsp *pRsp) {
|
||||||
if (tDecodeI8(&decoder, &pRsp->superUser) < 0) return -1;
|
if (tDecodeI8(&decoder, &pRsp->superUser) < 0) return -1;
|
||||||
if (tDecodeI8(&decoder, &pRsp->connType) < 0) return -1;
|
if (tDecodeI8(&decoder, &pRsp->connType) < 0) return -1;
|
||||||
if (tDecodeSEpSet(&decoder, &pRsp->epSet) < 0) return -1;
|
if (tDecodeSEpSet(&decoder, &pRsp->epSet) < 0) return -1;
|
||||||
|
if (tDecodeI32(&decoder, &pRsp->svrTimestamp) < 0) return -1;
|
||||||
if (tDecodeCStrTo(&decoder, pRsp->sVer) < 0) return -1;
|
if (tDecodeCStrTo(&decoder, pRsp->sVer) < 0) return -1;
|
||||||
if (tDecodeCStrTo(&decoder, pRsp->sDetailVer) < 0) return -1;
|
if (tDecodeCStrTo(&decoder, pRsp->sDetailVer) < 0) return -1;
|
||||||
tEndDecode(&decoder);
|
tEndDecode(&decoder);
|
||||||
|
@ -4823,6 +4827,35 @@ int32_t tDeserializeSMDropStreamReq(void *buf, int32_t bufLen, SMDropStreamReq *
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int32_t tSerializeSMRecoverStreamReq(void *buf, int32_t bufLen, const SMRecoverStreamReq *pReq) {
|
||||||
|
SEncoder encoder = {0};
|
||||||
|
tEncoderInit(&encoder, buf, bufLen);
|
||||||
|
|
||||||
|
if (tStartEncode(&encoder) < 0) return -1;
|
||||||
|
if (tEncodeCStr(&encoder, pReq->name) < 0) return -1;
|
||||||
|
if (tEncodeI8(&encoder, pReq->igNotExists) < 0) return -1;
|
||||||
|
|
||||||
|
tEndEncode(&encoder);
|
||||||
|
|
||||||
|
int32_t tlen = encoder.pos;
|
||||||
|
tEncoderClear(&encoder);
|
||||||
|
return tlen;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t tDeserializeSMRecoverStreamReq(void *buf, int32_t bufLen, SMRecoverStreamReq *pReq) {
|
||||||
|
SDecoder decoder = {0};
|
||||||
|
tDecoderInit(&decoder, buf, bufLen);
|
||||||
|
|
||||||
|
if (tStartDecode(&decoder) < 0) return -1;
|
||||||
|
if (tDecodeCStrTo(&decoder, pReq->name) < 0) return -1;
|
||||||
|
if (tDecodeI8(&decoder, &pReq->igNotExists) < 0) return -1;
|
||||||
|
|
||||||
|
tEndDecode(&decoder);
|
||||||
|
|
||||||
|
tDecoderClear(&decoder);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
void tFreeSCMCreateStreamReq(SCMCreateStreamReq *pReq) {
|
void tFreeSCMCreateStreamReq(SCMCreateStreamReq *pReq) {
|
||||||
taosMemoryFreeClear(pReq->sql);
|
taosMemoryFreeClear(pReq->sql);
|
||||||
taosMemoryFreeClear(pReq->ast);
|
taosMemoryFreeClear(pReq->ast);
|
||||||
|
@ -4945,8 +4978,8 @@ int tEncodeSVCreateTbReq(SEncoder *pCoder, const SVCreateTbReq *pReq) {
|
||||||
if (tEncodeTag(pCoder, (const STag *)pReq->ctb.pTag) < 0) return -1;
|
if (tEncodeTag(pCoder, (const STag *)pReq->ctb.pTag) < 0) return -1;
|
||||||
int32_t len = taosArrayGetSize(pReq->ctb.tagName);
|
int32_t len = taosArrayGetSize(pReq->ctb.tagName);
|
||||||
if (tEncodeI32(pCoder, len) < 0) return -1;
|
if (tEncodeI32(pCoder, len) < 0) return -1;
|
||||||
for (int32_t i = 0; i < len; i++){
|
for (int32_t i = 0; i < len; i++) {
|
||||||
char* name = taosArrayGet(pReq->ctb.tagName, i);
|
char *name = taosArrayGet(pReq->ctb.tagName, i);
|
||||||
if (tEncodeCStr(pCoder, name) < 0) return -1;
|
if (tEncodeCStr(pCoder, name) < 0) return -1;
|
||||||
}
|
}
|
||||||
} else if (pReq->type == TSDB_NORMAL_TABLE) {
|
} else if (pReq->type == TSDB_NORMAL_TABLE) {
|
||||||
|
@ -4982,9 +5015,9 @@ int tDecodeSVCreateTbReq(SDecoder *pCoder, SVCreateTbReq *pReq) {
|
||||||
int32_t len = 0;
|
int32_t len = 0;
|
||||||
if (tDecodeI32(pCoder, &len) < 0) return -1;
|
if (tDecodeI32(pCoder, &len) < 0) return -1;
|
||||||
pReq->ctb.tagName = taosArrayInit(len, TSDB_COL_NAME_LEN);
|
pReq->ctb.tagName = taosArrayInit(len, TSDB_COL_NAME_LEN);
|
||||||
if(pReq->ctb.tagName == NULL) return -1;
|
if (pReq->ctb.tagName == NULL) return -1;
|
||||||
for (int32_t i = 0; i < len; i++){
|
for (int32_t i = 0; i < len; i++) {
|
||||||
char name[TSDB_COL_NAME_LEN] = {0};
|
char name[TSDB_COL_NAME_LEN] = {0};
|
||||||
char *tmp = NULL;
|
char *tmp = NULL;
|
||||||
if (tDecodeCStr(pCoder, &tmp) < 0) return -1;
|
if (tDecodeCStr(pCoder, &tmp) < 0) return -1;
|
||||||
strcpy(name, tmp);
|
strcpy(name, tmp);
|
||||||
|
|
|
@ -196,6 +196,7 @@ SArray *mmGetMsgHandles() {
|
||||||
if (dmSetMgmtHandle(pArray, TDMT_MND_DROP_TOPIC, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
if (dmSetMgmtHandle(pArray, TDMT_MND_DROP_TOPIC, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||||
if (dmSetMgmtHandle(pArray, TDMT_MND_SUBSCRIBE, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
if (dmSetMgmtHandle(pArray, TDMT_MND_SUBSCRIBE, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||||
if (dmSetMgmtHandle(pArray, TDMT_MND_MQ_ASK_EP, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;
|
if (dmSetMgmtHandle(pArray, TDMT_MND_MQ_ASK_EP, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;
|
||||||
|
if (dmSetMgmtHandle(pArray, TDMT_MND_MQ_HB, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;
|
||||||
if (dmSetMgmtHandle(pArray, TDMT_MND_MQ_DROP_CGROUP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
if (dmSetMgmtHandle(pArray, TDMT_MND_MQ_DROP_CGROUP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||||
if (dmSetMgmtHandle(pArray, TDMT_MND_MQ_DROP_CGROUP_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
if (dmSetMgmtHandle(pArray, TDMT_MND_MQ_DROP_CGROUP_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||||
if (dmSetMgmtHandle(pArray, TDMT_MND_MQ_COMMIT_OFFSET, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
if (dmSetMgmtHandle(pArray, TDMT_MND_MQ_COMMIT_OFFSET, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||||
|
|
|
@ -148,9 +148,9 @@ static int32_t mmStart(SMnodeMgmt *pMgmt) {
|
||||||
|
|
||||||
static void mmStop(SMnodeMgmt *pMgmt) {
|
static void mmStop(SMnodeMgmt *pMgmt) {
|
||||||
dDebug("mnode-mgmt start to stop");
|
dDebug("mnode-mgmt start to stop");
|
||||||
|
mndPreClose(pMgmt->pMnode);
|
||||||
taosThreadRwlockWrlock(&pMgmt->lock);
|
taosThreadRwlockWrlock(&pMgmt->lock);
|
||||||
pMgmt->stopped = 1;
|
pMgmt->stopped = 1;
|
||||||
mndPreClose(pMgmt->pMnode);
|
|
||||||
taosThreadRwlockUnlock(&pMgmt->lock);
|
taosThreadRwlockUnlock(&pMgmt->lock);
|
||||||
|
|
||||||
mndStop(pMgmt->pMnode);
|
mndStop(pMgmt->pMnode);
|
||||||
|
|
|
@ -81,6 +81,7 @@ int32_t qmPutRpcMsgToQueue(SQnodeMgmt *pMgmt, EQueueType qtype, SRpcMsg *pRpc) {
|
||||||
taosWriteQitem(pMgmt->queryWorker.queue, pMsg);
|
taosWriteQitem(pMgmt->queryWorker.queue, pMsg);
|
||||||
return 0;
|
return 0;
|
||||||
case READ_QUEUE:
|
case READ_QUEUE:
|
||||||
|
case FETCH_QUEUE:
|
||||||
dTrace("msg:%p, is created and will put into qnode-fetch queue", pMsg);
|
dTrace("msg:%p, is created and will put into qnode-fetch queue", pMsg);
|
||||||
taosWriteQitem(pMgmt->fetchWorker.queue, pMsg);
|
taosWriteQitem(pMgmt->fetchWorker.queue, pMsg);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -221,11 +221,11 @@ int32_t dmInitMsgHandle(SDnode *pDnode) {
|
||||||
|
|
||||||
static inline int32_t dmSendReq(const SEpSet *pEpSet, SRpcMsg *pMsg) {
|
static inline int32_t dmSendReq(const SEpSet *pEpSet, SRpcMsg *pMsg) {
|
||||||
SDnode *pDnode = dmInstance();
|
SDnode *pDnode = dmInstance();
|
||||||
if (pDnode->status != DND_STAT_RUNNING) {
|
if (pDnode->status != DND_STAT_RUNNING && pMsg->msgType < TDMT_SYNC_MSG) {
|
||||||
rpcFreeCont(pMsg->pCont);
|
rpcFreeCont(pMsg->pCont);
|
||||||
pMsg->pCont = NULL;
|
pMsg->pCont = NULL;
|
||||||
terrno = TSDB_CODE_NODE_OFFLINE;
|
terrno = TSDB_CODE_NODE_OFFLINE;
|
||||||
dError("failed to send rpc msg since %s, handle:%p", terrstr(), pMsg->info.handle);
|
dError("failed to send rpc msg:%s since %s, handle:%p", TMSG_INFO(pMsg->msgType), terrstr(), pMsg->info.handle);
|
||||||
return -1;
|
return -1;
|
||||||
} else {
|
} else {
|
||||||
rpcSendRequest(pDnode->trans.clientRpc, pEpSet, pMsg, NULL);
|
rpcSendRequest(pDnode->trans.clientRpc, pEpSet, pMsg, NULL);
|
||||||
|
|
|
@ -559,6 +559,7 @@ typedef struct {
|
||||||
// info
|
// info
|
||||||
int64_t uid;
|
int64_t uid;
|
||||||
int8_t status;
|
int8_t status;
|
||||||
|
int8_t isDistributed;
|
||||||
// config
|
// config
|
||||||
int8_t igExpired;
|
int8_t igExpired;
|
||||||
int8_t trigger;
|
int8_t trigger;
|
||||||
|
@ -586,6 +587,23 @@ typedef struct {
|
||||||
int32_t tEncodeSStreamObj(SEncoder* pEncoder, const SStreamObj* pObj);
|
int32_t tEncodeSStreamObj(SEncoder* pEncoder, const SStreamObj* pObj);
|
||||||
int32_t tDecodeSStreamObj(SDecoder* pDecoder, SStreamObj* pObj);
|
int32_t tDecodeSStreamObj(SDecoder* pDecoder, SStreamObj* pObj);
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
char streamName[TSDB_STREAM_FNAME_LEN];
|
||||||
|
int64_t uid;
|
||||||
|
int64_t streamUid;
|
||||||
|
SArray* childInfo; // SArray<SStreamChildEpInfo>
|
||||||
|
} SStreamCheckpointObj;
|
||||||
|
|
||||||
|
#if 0
|
||||||
|
typedef struct {
|
||||||
|
int64_t uid;
|
||||||
|
int64_t streamId;
|
||||||
|
int8_t isDistributed;
|
||||||
|
int8_t status;
|
||||||
|
int8_t stage;
|
||||||
|
} SStreamRecoverObj;
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -50,8 +50,8 @@ extern "C" {
|
||||||
// clang-format on
|
// clang-format on
|
||||||
|
|
||||||
#define SYSTABLE_SCH_TABLE_NAME_LEN ((TSDB_TABLE_NAME_LEN - 1) + VARSTR_HEADER_SIZE)
|
#define SYSTABLE_SCH_TABLE_NAME_LEN ((TSDB_TABLE_NAME_LEN - 1) + VARSTR_HEADER_SIZE)
|
||||||
#define SYSTABLE_SCH_DB_NAME_LEN ((TSDB_DB_NAME_LEN - 1) + VARSTR_HEADER_SIZE)
|
#define SYSTABLE_SCH_DB_NAME_LEN ((TSDB_DB_NAME_LEN - 1) + VARSTR_HEADER_SIZE)
|
||||||
#define SYSTABLE_SCH_COL_NAME_LEN ((TSDB_COL_NAME_LEN - 1) + VARSTR_HEADER_SIZE)
|
#define SYSTABLE_SCH_COL_NAME_LEN ((TSDB_COL_NAME_LEN - 1) + VARSTR_HEADER_SIZE)
|
||||||
|
|
||||||
typedef int32_t (*MndMsgFp)(SRpcMsg *pMsg);
|
typedef int32_t (*MndMsgFp)(SRpcMsg *pMsg);
|
||||||
typedef int32_t (*MndInitFp)(SMnode *pMnode);
|
typedef int32_t (*MndInitFp)(SMnode *pMnode);
|
||||||
|
@ -61,7 +61,7 @@ typedef void (*ShowFreeIterFp)(SMnode *pMnode, void *pIter);
|
||||||
typedef struct SQWorker SQHandle;
|
typedef struct SQWorker SQHandle;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
const char * name;
|
const char *name;
|
||||||
MndInitFp initFp;
|
MndInitFp initFp;
|
||||||
MndCleanupFp cleanupFp;
|
MndCleanupFp cleanupFp;
|
||||||
} SMnodeStep;
|
} SMnodeStep;
|
||||||
|
@ -70,7 +70,7 @@ typedef struct {
|
||||||
int64_t showId;
|
int64_t showId;
|
||||||
ShowRetrieveFp retrieveFps[TSDB_MGMT_TABLE_MAX];
|
ShowRetrieveFp retrieveFps[TSDB_MGMT_TABLE_MAX];
|
||||||
ShowFreeIterFp freeIterFps[TSDB_MGMT_TABLE_MAX];
|
ShowFreeIterFp freeIterFps[TSDB_MGMT_TABLE_MAX];
|
||||||
SCacheObj * cache;
|
SCacheObj *cache;
|
||||||
} SShowMgmt;
|
} SShowMgmt;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
|
@ -84,12 +84,13 @@ typedef struct {
|
||||||
} STelemMgmt;
|
} STelemMgmt;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
tsem_t syncSem;
|
tsem_t syncSem;
|
||||||
int64_t sync;
|
int64_t sync;
|
||||||
bool standby;
|
bool standby;
|
||||||
SReplica replica;
|
SReplica replica;
|
||||||
int32_t errCode;
|
int32_t errCode;
|
||||||
int32_t transId;
|
int32_t transId;
|
||||||
|
int8_t leaderTransferFinish;
|
||||||
} SSyncMgmt;
|
} SSyncMgmt;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
|
@ -107,14 +108,14 @@ typedef struct SMnode {
|
||||||
bool stopped;
|
bool stopped;
|
||||||
bool restored;
|
bool restored;
|
||||||
bool deploy;
|
bool deploy;
|
||||||
char * path;
|
char *path;
|
||||||
int64_t checkTime;
|
int64_t checkTime;
|
||||||
SSdb * pSdb;
|
SSdb *pSdb;
|
||||||
SArray * pSteps;
|
SArray *pSteps;
|
||||||
SQHandle * pQuery;
|
SQHandle *pQuery;
|
||||||
SHashObj * infosMeta;
|
SHashObj *infosMeta;
|
||||||
SHashObj * perfsMeta;
|
SHashObj *perfsMeta;
|
||||||
SWal * pWal;
|
SWal *pWal;
|
||||||
SShowMgmt showMgmt;
|
SShowMgmt showMgmt;
|
||||||
SProfileMgmt profileMgmt;
|
SProfileMgmt profileMgmt;
|
||||||
STelemMgmt telemMgmt;
|
STelemMgmt telemMgmt;
|
||||||
|
|
|
@ -27,6 +27,7 @@ typedef enum {
|
||||||
TRANS_STOP_FUNC_TEST = 2,
|
TRANS_STOP_FUNC_TEST = 2,
|
||||||
TRANS_START_FUNC_MQ_REB = 3,
|
TRANS_START_FUNC_MQ_REB = 3,
|
||||||
TRANS_STOP_FUNC_MQ_REB = 4,
|
TRANS_STOP_FUNC_MQ_REB = 4,
|
||||||
|
TRANS_FUNC_RECOVER_STREAM_STEP_NEXT = 5,
|
||||||
} ETrnFunc;
|
} ETrnFunc;
|
||||||
|
|
||||||
typedef enum {
|
typedef enum {
|
||||||
|
|
|
@ -48,6 +48,7 @@ static void mndCancelGetNextConsumer(SMnode *pMnode, void *pIter);
|
||||||
|
|
||||||
static int32_t mndProcessSubscribeReq(SRpcMsg *pMsg);
|
static int32_t mndProcessSubscribeReq(SRpcMsg *pMsg);
|
||||||
static int32_t mndProcessAskEpReq(SRpcMsg *pMsg);
|
static int32_t mndProcessAskEpReq(SRpcMsg *pMsg);
|
||||||
|
static int32_t mndProcessMqHbReq(SRpcMsg *pMsg);
|
||||||
static int32_t mndProcessMqTimerMsg(SRpcMsg *pMsg);
|
static int32_t mndProcessMqTimerMsg(SRpcMsg *pMsg);
|
||||||
static int32_t mndProcessConsumerLostMsg(SRpcMsg *pMsg);
|
static int32_t mndProcessConsumerLostMsg(SRpcMsg *pMsg);
|
||||||
static int32_t mndProcessConsumerRecoverMsg(SRpcMsg *pMsg);
|
static int32_t mndProcessConsumerRecoverMsg(SRpcMsg *pMsg);
|
||||||
|
@ -62,6 +63,7 @@ int32_t mndInitConsumer(SMnode *pMnode) {
|
||||||
.deleteFp = (SdbDeleteFp)mndConsumerActionDelete};
|
.deleteFp = (SdbDeleteFp)mndConsumerActionDelete};
|
||||||
|
|
||||||
mndSetMsgHandle(pMnode, TDMT_MND_SUBSCRIBE, mndProcessSubscribeReq);
|
mndSetMsgHandle(pMnode, TDMT_MND_SUBSCRIBE, mndProcessSubscribeReq);
|
||||||
|
mndSetMsgHandle(pMnode, TDMT_MND_MQ_HB, mndProcessMqHbReq);
|
||||||
mndSetMsgHandle(pMnode, TDMT_MND_MQ_ASK_EP, mndProcessAskEpReq);
|
mndSetMsgHandle(pMnode, TDMT_MND_MQ_ASK_EP, mndProcessAskEpReq);
|
||||||
mndSetMsgHandle(pMnode, TDMT_MND_MQ_TIMER, mndProcessMqTimerMsg);
|
mndSetMsgHandle(pMnode, TDMT_MND_MQ_TIMER, mndProcessMqTimerMsg);
|
||||||
mndSetMsgHandle(pMnode, TDMT_MND_MQ_CONSUMER_LOST, mndProcessConsumerLostMsg);
|
mndSetMsgHandle(pMnode, TDMT_MND_MQ_CONSUMER_LOST, mndProcessConsumerLostMsg);
|
||||||
|
@ -255,24 +257,15 @@ static int32_t mndProcessMqTimerMsg(SRpcMsg *pMsg) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t mndProcessAskEpReq(SRpcMsg *pMsg) {
|
static int32_t mndProcessMqHbReq(SRpcMsg *pMsg) {
|
||||||
SMnode *pMnode = pMsg->info.node;
|
SMnode *pMnode = pMsg->info.node;
|
||||||
SMqAskEpReq *pReq = (SMqAskEpReq *)pMsg->pCont;
|
SMqHbReq *pReq = (SMqHbReq *)pMsg->pCont;
|
||||||
SMqAskEpRsp rsp = {0};
|
int64_t consumerId = be64toh(pReq->consumerId);
|
||||||
int64_t consumerId = be64toh(pReq->consumerId);
|
|
||||||
int32_t epoch = ntohl(pReq->epoch);
|
|
||||||
|
|
||||||
SMqConsumerObj *pConsumer = mndAcquireConsumer(pMsg->info.node, consumerId);
|
SMqConsumerObj *pConsumer = mndAcquireConsumer(pMnode, consumerId);
|
||||||
if (pConsumer == NULL) {
|
|
||||||
terrno = TSDB_CODE_MND_CONSUMER_NOT_EXIST;
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
ASSERT(strcmp(pReq->cgroup, pConsumer->cgroup) == 0);
|
|
||||||
/*int32_t hbStatus = atomic_load_32(&pConsumer->hbStatus);*/
|
|
||||||
atomic_store_32(&pConsumer->hbStatus, 0);
|
atomic_store_32(&pConsumer->hbStatus, 0);
|
||||||
|
|
||||||
// 1. check consumer status
|
|
||||||
int32_t status = atomic_load_32(&pConsumer->status);
|
int32_t status = atomic_load_32(&pConsumer->status);
|
||||||
|
|
||||||
if (status == MQ_CONSUMER_STATUS__LOST_REBD) {
|
if (status == MQ_CONSUMER_STATUS__LOST_REBD) {
|
||||||
|
@ -286,6 +279,46 @@ static int32_t mndProcessAskEpReq(SRpcMsg *pMsg) {
|
||||||
tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, pRpcMsg);
|
tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, pRpcMsg);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mndReleaseConsumer(pMnode, pConsumer);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int32_t mndProcessAskEpReq(SRpcMsg *pMsg) {
|
||||||
|
SMnode *pMnode = pMsg->info.node;
|
||||||
|
SMqAskEpReq *pReq = (SMqAskEpReq *)pMsg->pCont;
|
||||||
|
SMqAskEpRsp rsp = {0};
|
||||||
|
int64_t consumerId = be64toh(pReq->consumerId);
|
||||||
|
int32_t epoch = ntohl(pReq->epoch);
|
||||||
|
|
||||||
|
SMqConsumerObj *pConsumer = mndAcquireConsumer(pMnode, consumerId);
|
||||||
|
if (pConsumer == NULL) {
|
||||||
|
terrno = TSDB_CODE_MND_CONSUMER_NOT_EXIST;
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
ASSERT(strcmp(pReq->cgroup, pConsumer->cgroup) == 0);
|
||||||
|
|
||||||
|
#if 1
|
||||||
|
atomic_store_32(&pConsumer->hbStatus, 0);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// 1. check consumer status
|
||||||
|
int32_t status = atomic_load_32(&pConsumer->status);
|
||||||
|
|
||||||
|
#if 1
|
||||||
|
if (status == MQ_CONSUMER_STATUS__LOST_REBD) {
|
||||||
|
SMqConsumerRecoverMsg *pRecoverMsg = rpcMallocCont(sizeof(SMqConsumerRecoverMsg));
|
||||||
|
|
||||||
|
pRecoverMsg->consumerId = consumerId;
|
||||||
|
SRpcMsg *pRpcMsg = taosMemoryCalloc(1, sizeof(SRpcMsg));
|
||||||
|
pRpcMsg->msgType = TDMT_MND_MQ_CONSUMER_RECOVER;
|
||||||
|
pRpcMsg->pCont = pRecoverMsg;
|
||||||
|
pRpcMsg->contLen = sizeof(SMqConsumerRecoverMsg);
|
||||||
|
tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, pRpcMsg);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
if (status != MQ_CONSUMER_STATUS__READY) {
|
if (status != MQ_CONSUMER_STATUS__READY) {
|
||||||
terrno = TSDB_CODE_MND_CONSUMER_NOT_READY;
|
terrno = TSDB_CODE_MND_CONSUMER_NOT_READY;
|
||||||
return -1;
|
return -1;
|
||||||
|
|
|
@ -293,7 +293,7 @@ static int32_t mndCheckDbCfg(SMnode *pMnode, SDbCfg *pCfg) {
|
||||||
if (pCfg->buffer < TSDB_MIN_BUFFER_PER_VNODE || pCfg->buffer > TSDB_MAX_BUFFER_PER_VNODE) return -1;
|
if (pCfg->buffer < TSDB_MIN_BUFFER_PER_VNODE || pCfg->buffer > TSDB_MAX_BUFFER_PER_VNODE) return -1;
|
||||||
if (pCfg->pageSize < TSDB_MIN_PAGESIZE_PER_VNODE || pCfg->pageSize > TSDB_MAX_PAGESIZE_PER_VNODE) return -1;
|
if (pCfg->pageSize < TSDB_MIN_PAGESIZE_PER_VNODE || pCfg->pageSize > TSDB_MAX_PAGESIZE_PER_VNODE) return -1;
|
||||||
if (pCfg->pages < TSDB_MIN_PAGES_PER_VNODE || pCfg->pages > TSDB_MAX_PAGES_PER_VNODE) return -1;
|
if (pCfg->pages < TSDB_MIN_PAGES_PER_VNODE || pCfg->pages > TSDB_MAX_PAGES_PER_VNODE) return -1;
|
||||||
if (pCfg->cacheLastSize < TSDB_MIN_DB_CACHE_LAST_SIZE || pCfg->cacheLastSize > TSDB_MAX_DB_CACHE_LAST_SIZE) return -1;
|
if (pCfg->cacheLastSize < TSDB_MIN_DB_CACHE_SIZE || pCfg->cacheLastSize > TSDB_MAX_DB_CACHE_SIZE) return -1;
|
||||||
if (pCfg->daysPerFile < TSDB_MIN_DAYS_PER_FILE || pCfg->daysPerFile > TSDB_MAX_DAYS_PER_FILE) return -1;
|
if (pCfg->daysPerFile < TSDB_MIN_DAYS_PER_FILE || pCfg->daysPerFile > TSDB_MAX_DAYS_PER_FILE) return -1;
|
||||||
if (pCfg->daysToKeep0 < TSDB_MIN_KEEP || pCfg->daysToKeep0 > TSDB_MAX_KEEP) return -1;
|
if (pCfg->daysToKeep0 < TSDB_MIN_KEEP || pCfg->daysToKeep0 > TSDB_MAX_KEEP) return -1;
|
||||||
if (pCfg->daysToKeep1 < TSDB_MIN_KEEP || pCfg->daysToKeep1 > TSDB_MAX_KEEP) return -1;
|
if (pCfg->daysToKeep1 < TSDB_MIN_KEEP || pCfg->daysToKeep1 > TSDB_MAX_KEEP) return -1;
|
||||||
|
@ -312,7 +312,7 @@ static int32_t mndCheckDbCfg(SMnode *pMnode, SDbCfg *pCfg) {
|
||||||
if (pCfg->replications != 1 && pCfg->replications != 3) return -1;
|
if (pCfg->replications != 1 && pCfg->replications != 3) return -1;
|
||||||
if (pCfg->strict < TSDB_DB_STRICT_OFF || pCfg->strict > TSDB_DB_STRICT_ON) return -1;
|
if (pCfg->strict < TSDB_DB_STRICT_OFF || pCfg->strict > TSDB_DB_STRICT_ON) return -1;
|
||||||
if (pCfg->schemaless < TSDB_DB_SCHEMALESS_OFF || pCfg->schemaless > TSDB_DB_SCHEMALESS_ON) return -1;
|
if (pCfg->schemaless < TSDB_DB_SCHEMALESS_OFF || pCfg->schemaless > TSDB_DB_SCHEMALESS_ON) return -1;
|
||||||
if (pCfg->cacheLast < TSDB_MIN_DB_CACHE_LAST || pCfg->cacheLast > TSDB_MAX_DB_CACHE_LAST) return -1;
|
if (pCfg->cacheLast < TSDB_CACHE_MODEL_NONE || pCfg->cacheLast > TSDB_CACHE_MODEL_BOTH) return -1;
|
||||||
if (pCfg->hashMethod != 1) return -1;
|
if (pCfg->hashMethod != 1) return -1;
|
||||||
if (pCfg->replications > mndGetDnodeSize(pMnode)) {
|
if (pCfg->replications > mndGetDnodeSize(pMnode)) {
|
||||||
terrno = TSDB_CODE_MND_NO_ENOUGH_DNODES;
|
terrno = TSDB_CODE_MND_NO_ENOUGH_DNODES;
|
||||||
|
@ -341,8 +341,8 @@ static void mndSetDefaultDbCfg(SDbCfg *pCfg) {
|
||||||
if (pCfg->compression < 0) pCfg->compression = TSDB_DEFAULT_COMP_LEVEL;
|
if (pCfg->compression < 0) pCfg->compression = TSDB_DEFAULT_COMP_LEVEL;
|
||||||
if (pCfg->replications < 0) pCfg->replications = TSDB_DEFAULT_DB_REPLICA;
|
if (pCfg->replications < 0) pCfg->replications = TSDB_DEFAULT_DB_REPLICA;
|
||||||
if (pCfg->strict < 0) pCfg->strict = TSDB_DEFAULT_DB_STRICT;
|
if (pCfg->strict < 0) pCfg->strict = TSDB_DEFAULT_DB_STRICT;
|
||||||
if (pCfg->cacheLast < 0) pCfg->cacheLast = TSDB_DEFAULT_CACHE_LAST;
|
if (pCfg->cacheLast < 0) pCfg->cacheLast = TSDB_DEFAULT_CACHE_MODEL;
|
||||||
if (pCfg->cacheLastSize <= 0) pCfg->cacheLastSize = TSDB_DEFAULT_CACHE_LAST_SIZE;
|
if (pCfg->cacheLastSize <= 0) pCfg->cacheLastSize = TSDB_DEFAULT_CACHE_SIZE;
|
||||||
if (pCfg->numOfRetensions < 0) pCfg->numOfRetensions = 0;
|
if (pCfg->numOfRetensions < 0) pCfg->numOfRetensions = 0;
|
||||||
if (pCfg->schemaless < 0) pCfg->schemaless = TSDB_DB_SCHEMALESS_OFF;
|
if (pCfg->schemaless < 0) pCfg->schemaless = TSDB_DB_SCHEMALESS_OFF;
|
||||||
}
|
}
|
||||||
|
@ -1443,6 +1443,22 @@ char *buildRetension(SArray *pRetension) {
|
||||||
return p1;
|
return p1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static const char *getCacheModelStr(int8_t cacheModel) {
|
||||||
|
switch (cacheModel) {
|
||||||
|
case TSDB_CACHE_MODEL_NONE:
|
||||||
|
return TSDB_CACHE_MODEL_NONE_STR;
|
||||||
|
case TSDB_CACHE_MODEL_LAST_ROW:
|
||||||
|
return TSDB_CACHE_MODEL_LAST_ROW_STR;
|
||||||
|
case TSDB_CACHE_MODEL_LAST_VALUE:
|
||||||
|
return TSDB_CACHE_MODEL_LAST_VALUE_STR;
|
||||||
|
case TSDB_CACHE_MODEL_BOTH:
|
||||||
|
return TSDB_CACHE_MODEL_BOTH_STR;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
return "unknown";
|
||||||
|
}
|
||||||
|
|
||||||
static void dumpDbInfoData(SSDataBlock *pBlock, SDbObj *pDb, SShowObj *pShow, int32_t rows, int64_t numOfTables,
|
static void dumpDbInfoData(SSDataBlock *pBlock, SDbObj *pDb, SShowObj *pShow, int32_t rows, int64_t numOfTables,
|
||||||
bool sysDb, ESdbStatus objStatus, bool sysinfo) {
|
bool sysDb, ESdbStatus objStatus, bool sysinfo) {
|
||||||
int32_t cols = 0;
|
int32_t cols = 0;
|
||||||
|
@ -1491,7 +1507,7 @@ static void dumpDbInfoData(SSDataBlock *pBlock, SDbObj *pDb, SShowObj *pShow, in
|
||||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||||
colDataAppend(pColInfo, rows, (const char *)&pDb->cfg.replications, false);
|
colDataAppend(pColInfo, rows, (const char *)&pDb->cfg.replications, false);
|
||||||
|
|
||||||
const char *strictStr = pDb->cfg.strict ? "strict" : "no_strict";
|
const char *strictStr = pDb->cfg.strict ? "on" : "off";
|
||||||
char strictVstr[24] = {0};
|
char strictVstr[24] = {0};
|
||||||
STR_WITH_SIZE_TO_VARSTR(strictVstr, strictStr, strlen(strictStr));
|
STR_WITH_SIZE_TO_VARSTR(strictVstr, strictStr, strlen(strictStr));
|
||||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||||
|
@ -1539,8 +1555,11 @@ static void dumpDbInfoData(SSDataBlock *pBlock, SDbObj *pDb, SShowObj *pShow, in
|
||||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||||
colDataAppend(pColInfo, rows, (const char *)&pDb->cfg.compression, false);
|
colDataAppend(pColInfo, rows, (const char *)&pDb->cfg.compression, false);
|
||||||
|
|
||||||
|
const char *cacheModelStr = getCacheModelStr(pDb->cfg.cacheLast);
|
||||||
|
char cacheModelVstr[24] = {0};
|
||||||
|
STR_WITH_SIZE_TO_VARSTR(cacheModelVstr, cacheModelStr, strlen(cacheModelStr));
|
||||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||||
colDataAppend(pColInfo, rows, (const char *)&pDb->cfg.cacheLast, false);
|
colDataAppend(pColInfo, rows, (const char *)cacheModelVstr, false);
|
||||||
|
|
||||||
const char *precStr = NULL;
|
const char *precStr = NULL;
|
||||||
switch (pDb->cfg.precision) {
|
switch (pDb->cfg.precision) {
|
||||||
|
|
|
@ -27,6 +27,7 @@ int32_t tEncodeSStreamObj(SEncoder *pEncoder, const SStreamObj *pObj) {
|
||||||
|
|
||||||
if (tEncodeI64(pEncoder, pObj->uid) < 0) return -1;
|
if (tEncodeI64(pEncoder, pObj->uid) < 0) return -1;
|
||||||
if (tEncodeI8(pEncoder, pObj->status) < 0) return -1;
|
if (tEncodeI8(pEncoder, pObj->status) < 0) return -1;
|
||||||
|
if (tEncodeI8(pEncoder, pObj->isDistributed) < 0) return -1;
|
||||||
|
|
||||||
if (tEncodeI8(pEncoder, pObj->igExpired) < 0) return -1;
|
if (tEncodeI8(pEncoder, pObj->igExpired) < 0) return -1;
|
||||||
if (tEncodeI8(pEncoder, pObj->trigger) < 0) return -1;
|
if (tEncodeI8(pEncoder, pObj->trigger) < 0) return -1;
|
||||||
|
@ -72,6 +73,7 @@ int32_t tDecodeSStreamObj(SDecoder *pDecoder, SStreamObj *pObj) {
|
||||||
|
|
||||||
if (tDecodeI64(pDecoder, &pObj->uid) < 0) return -1;
|
if (tDecodeI64(pDecoder, &pObj->uid) < 0) return -1;
|
||||||
if (tDecodeI8(pDecoder, &pObj->status) < 0) return -1;
|
if (tDecodeI8(pDecoder, &pObj->status) < 0) return -1;
|
||||||
|
if (tDecodeI8(pDecoder, &pObj->isDistributed) < 0) return -1;
|
||||||
|
|
||||||
if (tDecodeI8(pDecoder, &pObj->igExpired) < 0) return -1;
|
if (tDecodeI8(pDecoder, &pObj->igExpired) < 0) return -1;
|
||||||
if (tDecodeI8(pDecoder, &pObj->trigger) < 0) return -1;
|
if (tDecodeI8(pDecoder, &pObj->trigger) < 0) return -1;
|
||||||
|
|
|
@ -368,7 +368,18 @@ SMnode *mndOpen(const char *path, const SMnodeOpt *pOption) {
|
||||||
|
|
||||||
void mndPreClose(SMnode *pMnode) {
|
void mndPreClose(SMnode *pMnode) {
|
||||||
if (pMnode != NULL) {
|
if (pMnode != NULL) {
|
||||||
|
atomic_store_8(&(pMnode->syncMgmt.leaderTransferFinish), 0);
|
||||||
syncLeaderTransfer(pMnode->syncMgmt.sync);
|
syncLeaderTransfer(pMnode->syncMgmt.sync);
|
||||||
|
|
||||||
|
/*
|
||||||
|
mDebug("vgId:1, mnode start leader transfer");
|
||||||
|
// wait for leader transfer finish
|
||||||
|
while (!atomic_load_8(&(pMnode->syncMgmt.leaderTransferFinish))) {
|
||||||
|
taosMsleep(10);
|
||||||
|
mDebug("vgId:1, mnode waiting for leader transfer");
|
||||||
|
}
|
||||||
|
mDebug("vgId:1, mnode finish leader transfer");
|
||||||
|
*/
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -218,7 +218,6 @@ bool mndIsMnode(SMnode *pMnode, int32_t dnodeId) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void mndGetMnodeEpSet(SMnode *pMnode, SEpSet *pEpSet) {
|
void mndGetMnodeEpSet(SMnode *pMnode, SEpSet *pEpSet) {
|
||||||
#if 0
|
|
||||||
SSdb *pSdb = pMnode->pSdb;
|
SSdb *pSdb = pMnode->pSdb;
|
||||||
int32_t totalMnodes = sdbGetSize(pSdb, SDB_MNODE);
|
int32_t totalMnodes = sdbGetSize(pSdb, SDB_MNODE);
|
||||||
void *pIter = NULL;
|
void *pIter = NULL;
|
||||||
|
@ -238,9 +237,10 @@ void mndGetMnodeEpSet(SMnode *pMnode, SEpSet *pEpSet) {
|
||||||
addEpIntoEpSet(pEpSet, pObj->pDnode->fqdn, pObj->pDnode->port);
|
addEpIntoEpSet(pEpSet, pObj->pDnode->fqdn, pObj->pDnode->port);
|
||||||
sdbRelease(pSdb, pObj);
|
sdbRelease(pSdb, pObj);
|
||||||
}
|
}
|
||||||
#else
|
|
||||||
syncGetRetryEpSet(pMnode->syncMgmt.sync, pEpSet);
|
if (pEpSet->numOfEps == 0) {
|
||||||
#endif
|
syncGetRetryEpSet(pMnode->syncMgmt.sync, pEpSet);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t mndSetCreateMnodeRedoLogs(SMnode *pMnode, STrans *pTrans, SMnodeObj *pObj) {
|
static int32_t mndSetCreateMnodeRedoLogs(SMnode *pMnode, STrans *pTrans, SMnodeObj *pObj) {
|
||||||
|
|
|
@ -15,10 +15,10 @@
|
||||||
|
|
||||||
#define _DEFAULT_SOURCE
|
#define _DEFAULT_SOURCE
|
||||||
#include "mndProfile.h"
|
#include "mndProfile.h"
|
||||||
#include "mndPrivilege.h"
|
|
||||||
#include "mndDb.h"
|
#include "mndDb.h"
|
||||||
#include "mndDnode.h"
|
#include "mndDnode.h"
|
||||||
#include "mndMnode.h"
|
#include "mndMnode.h"
|
||||||
|
#include "mndPrivilege.h"
|
||||||
#include "mndQnode.h"
|
#include "mndQnode.h"
|
||||||
#include "mndShow.h"
|
#include "mndShow.h"
|
||||||
#include "mndStb.h"
|
#include "mndStb.h"
|
||||||
|
@ -274,6 +274,7 @@ static int32_t mndProcessConnectReq(SRpcMsg *pReq) {
|
||||||
connectRsp.connId = pConn->id;
|
connectRsp.connId = pConn->id;
|
||||||
connectRsp.connType = connReq.connType;
|
connectRsp.connType = connReq.connType;
|
||||||
connectRsp.dnodeNum = mndGetDnodeSize(pMnode);
|
connectRsp.dnodeNum = mndGetDnodeSize(pMnode);
|
||||||
|
connectRsp.svrTimestamp = taosGetTimestampSec();
|
||||||
|
|
||||||
strcpy(connectRsp.sVer, version);
|
strcpy(connectRsp.sVer, version);
|
||||||
snprintf(connectRsp.sDetailVer, sizeof(connectRsp.sDetailVer), "ver:%s\nbuild:%s\ngitinfo:%s", version, buildinfo,
|
snprintf(connectRsp.sDetailVer, sizeof(connectRsp.sDetailVer), "ver:%s\nbuild:%s\ngitinfo:%s", version, buildinfo,
|
||||||
|
@ -387,67 +388,7 @@ static void mndCancelGetNextApp(SMnode *pMnode, void *pIter) {
|
||||||
}
|
}
|
||||||
|
|
||||||
static SClientHbRsp *mndMqHbBuildRsp(SMnode *pMnode, SClientHbReq *pReq) {
|
static SClientHbRsp *mndMqHbBuildRsp(SMnode *pMnode, SClientHbReq *pReq) {
|
||||||
#if 0
|
//
|
||||||
SClientHbRsp* pRsp = taosMemoryMalloc(sizeof(SClientHbRsp));
|
|
||||||
if (pRsp == NULL) {
|
|
||||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
pRsp->connKey = pReq->connKey;
|
|
||||||
SMqHbBatchRsp batchRsp;
|
|
||||||
batchRsp.batchRsps = taosArrayInit(0, sizeof(SMqHbRsp));
|
|
||||||
if (batchRsp.batchRsps == NULL) {
|
|
||||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
SClientHbKey connKey = pReq->connKey;
|
|
||||||
SHashObj* pObj = pReq->info;
|
|
||||||
SKv* pKv = taosHashGet(pObj, "mq-tmp", strlen("mq-tmp") + 1);
|
|
||||||
if (pKv == NULL) {
|
|
||||||
taosMemoryFree(pRsp);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
SMqHbMsg mqHb;
|
|
||||||
taosDecodeSMqMsg(pKv->value, &mqHb);
|
|
||||||
/*int64_t clientUid = htonl(pKv->value);*/
|
|
||||||
/*if (mqHb.epoch )*/
|
|
||||||
int sz = taosArrayGetSize(mqHb.pTopics);
|
|
||||||
SMqConsumerObj* pConsumer = mndAcquireConsumer(pMnode, mqHb.consumerId);
|
|
||||||
for (int i = 0; i < sz; i++) {
|
|
||||||
SMqHbOneTopicBatchRsp innerBatchRsp;
|
|
||||||
innerBatchRsp.rsps = taosArrayInit(sz, sizeof(SMqHbRsp));
|
|
||||||
if (innerBatchRsp.rsps == NULL) {
|
|
||||||
//TODO
|
|
||||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
SMqHbTopicInfo* topicInfo = taosArrayGet(mqHb.pTopics, i);
|
|
||||||
SMqConsumerTopic* pConsumerTopic = taosHashGet(pConsumer->topicHash, topicInfo->name, strlen(topicInfo->name)+1);
|
|
||||||
if (pConsumerTopic->epoch != topicInfo->epoch) {
|
|
||||||
//add new vgids into rsp
|
|
||||||
int vgSz = taosArrayGetSize(topicInfo->pVgInfo);
|
|
||||||
for (int j = 0; j < vgSz; j++) {
|
|
||||||
SMqHbRsp innerRsp;
|
|
||||||
SMqHbVgInfo* pVgInfo = taosArrayGet(topicInfo->pVgInfo, i);
|
|
||||||
SVgObj* pVgObj = mndAcquireVgroup(pMnode, pVgInfo->vgId);
|
|
||||||
innerRsp.epSet = mndGetVgroupEpset(pMnode, pVgObj);
|
|
||||||
taosArrayPush(innerBatchRsp.rsps, &innerRsp);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
taosArrayPush(batchRsp.batchRsps, &innerBatchRsp);
|
|
||||||
}
|
|
||||||
int32_t tlen = taosEncodeSMqHbBatchRsp(NULL, &batchRsp);
|
|
||||||
void* buf = taosMemoryMalloc(tlen);
|
|
||||||
if (buf == NULL) {
|
|
||||||
//TODO
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
void* abuf = buf;
|
|
||||||
taosEncodeSMqHbBatchRsp(&abuf, &batchRsp);
|
|
||||||
pRsp->body = buf;
|
|
||||||
pRsp->bodyLen = tlen;
|
|
||||||
return pRsp;
|
|
||||||
#endif
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -623,6 +564,7 @@ static int32_t mndProcessHeartBeatReq(SRpcMsg *pReq) {
|
||||||
}
|
}
|
||||||
|
|
||||||
SClientHbBatchRsp batchRsp = {0};
|
SClientHbBatchRsp batchRsp = {0};
|
||||||
|
batchRsp.svrTimestamp = taosGetTimestampSec();
|
||||||
batchRsp.rsps = taosArrayInit(0, sizeof(SClientHbRsp));
|
batchRsp.rsps = taosArrayInit(0, sizeof(SClientHbRsp));
|
||||||
|
|
||||||
int32_t sz = taosArrayGetSize(batchReq.reqs);
|
int32_t sz = taosArrayGetSize(batchReq.reqs);
|
||||||
|
|
|
@ -319,6 +319,7 @@ int32_t mndScheduleStream(SMnode* pMnode, SStreamObj* pStream) {
|
||||||
int32_t totLevel = LIST_LENGTH(pPlan->pSubplans);
|
int32_t totLevel = LIST_LENGTH(pPlan->pSubplans);
|
||||||
ASSERT(totLevel <= 2);
|
ASSERT(totLevel <= 2);
|
||||||
pStream->tasks = taosArrayInit(totLevel, sizeof(void*));
|
pStream->tasks = taosArrayInit(totLevel, sizeof(void*));
|
||||||
|
pStream->isDistributed = totLevel == 2;
|
||||||
|
|
||||||
bool hasExtraSink = false;
|
bool hasExtraSink = false;
|
||||||
bool externalTargetDB = strcmp(pStream->sourceDb, pStream->targetDb) != 0;
|
bool externalTargetDB = strcmp(pStream->sourceDb, pStream->targetDb) != 0;
|
||||||
|
|
|
@ -709,7 +709,7 @@ static int32_t mndProcessCreateSmaReq(SRpcMsg *pReq) {
|
||||||
|
|
||||||
_OVER:
|
_OVER:
|
||||||
if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) {
|
if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) {
|
||||||
mError("sma:%s, failed to create since %s", createReq.name, terrstr(terrno));
|
mError("sma:%s, failed to create since %s", createReq.name, terrstr());
|
||||||
}
|
}
|
||||||
|
|
||||||
mndReleaseStb(pMnode, pStb);
|
mndReleaseStb(pMnode, pStb);
|
||||||
|
|
|
@ -1689,6 +1689,9 @@ static int32_t mndAlterStb(SMnode *pMnode, SRpcMsg *pReq, const SMAlterStbReq *p
|
||||||
_OVER:
|
_OVER:
|
||||||
taosMemoryFreeClear(stbObj.pTags);
|
taosMemoryFreeClear(stbObj.pTags);
|
||||||
taosMemoryFreeClear(stbObj.pColumns);
|
taosMemoryFreeClear(stbObj.pColumns);
|
||||||
|
if (pAlter->commentLen > 0) {
|
||||||
|
taosMemoryFreeClear(stbObj.comment);
|
||||||
|
}
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1733,7 +1736,7 @@ _OVER:
|
||||||
|
|
||||||
mndReleaseStb(pMnode, pStb);
|
mndReleaseStb(pMnode, pStb);
|
||||||
mndReleaseDb(pMnode, pDb);
|
mndReleaseDb(pMnode, pDb);
|
||||||
taosArrayDestroy(alterReq.pFields);
|
tFreeSMAltertbReq(&alterReq);
|
||||||
|
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
|
@ -36,7 +36,7 @@ static int32_t mndStreamActionDelete(SSdb *pSdb, SStreamObj *pStream);
|
||||||
static int32_t mndStreamActionUpdate(SSdb *pSdb, SStreamObj *pStream, SStreamObj *pNewStream);
|
static int32_t mndStreamActionUpdate(SSdb *pSdb, SStreamObj *pStream, SStreamObj *pNewStream);
|
||||||
static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq);
|
static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq);
|
||||||
static int32_t mndProcessDropStreamReq(SRpcMsg *pReq);
|
static int32_t mndProcessDropStreamReq(SRpcMsg *pReq);
|
||||||
/*static int32_t mndProcessDropStreamInRsp(SRpcMsg *pRsp);*/
|
static int32_t mndProcessRecoverStreamReq(SRpcMsg *pReq);
|
||||||
static int32_t mndProcessStreamMetaReq(SRpcMsg *pReq);
|
static int32_t mndProcessStreamMetaReq(SRpcMsg *pReq);
|
||||||
static int32_t mndGetStreamMeta(SRpcMsg *pReq, SShowObj *pShow, STableMetaRsp *pMeta);
|
static int32_t mndGetStreamMeta(SRpcMsg *pReq, SShowObj *pShow, STableMetaRsp *pMeta);
|
||||||
static int32_t mndRetrieveStream(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, int32_t rows);
|
static int32_t mndRetrieveStream(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, int32_t rows);
|
||||||
|
@ -55,6 +55,7 @@ int32_t mndInitStream(SMnode *pMnode) {
|
||||||
|
|
||||||
mndSetMsgHandle(pMnode, TDMT_MND_CREATE_STREAM, mndProcessCreateStreamReq);
|
mndSetMsgHandle(pMnode, TDMT_MND_CREATE_STREAM, mndProcessCreateStreamReq);
|
||||||
mndSetMsgHandle(pMnode, TDMT_MND_DROP_STREAM, mndProcessDropStreamReq);
|
mndSetMsgHandle(pMnode, TDMT_MND_DROP_STREAM, mndProcessDropStreamReq);
|
||||||
|
mndSetMsgHandle(pMnode, TDMT_MND_RECOVER_STREAM, mndProcessRecoverStreamReq);
|
||||||
|
|
||||||
mndSetMsgHandle(pMnode, TDMT_STREAM_TASK_DEPLOY_RSP, mndTransProcessRsp);
|
mndSetMsgHandle(pMnode, TDMT_STREAM_TASK_DEPLOY_RSP, mndTransProcessRsp);
|
||||||
mndSetMsgHandle(pMnode, TDMT_STREAM_TASK_DROP_RSP, mndTransProcessRsp);
|
mndSetMsgHandle(pMnode, TDMT_STREAM_TASK_DROP_RSP, mndTransProcessRsp);
|
||||||
|
@ -176,7 +177,7 @@ static int32_t mndStreamActionUpdate(SSdb *pSdb, SStreamObj *pOldStream, SStream
|
||||||
|
|
||||||
taosWLockLatch(&pOldStream->lock);
|
taosWLockLatch(&pOldStream->lock);
|
||||||
|
|
||||||
// TODO handle update
|
pOldStream->status = pNewStream->status;
|
||||||
|
|
||||||
taosWUnLockLatch(&pOldStream->lock);
|
taosWUnLockLatch(&pOldStream->lock);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -394,6 +395,20 @@ int32_t mndPersistDropStreamLog(SMnode *pMnode, STrans *pTrans, SStreamObj *pStr
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int32_t mndSetStreamRecover(SMnode *pMnode, STrans *pTrans, const SStreamObj *pStream) {
|
||||||
|
SStreamObj streamObj = {0};
|
||||||
|
memcpy(streamObj.name, pStream->name, TSDB_STREAM_FNAME_LEN);
|
||||||
|
streamObj.status = STREAM_STATUS__RECOVER;
|
||||||
|
SSdbRaw *pCommitRaw = mndStreamActionEncode(&streamObj);
|
||||||
|
if (pCommitRaw == NULL || mndTransAppendCommitlog(pTrans, pCommitRaw) != 0) {
|
||||||
|
mError("stream trans:%d, failed to append commit log since %s", pTrans->id, terrstr());
|
||||||
|
mndTransDrop(pTrans);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
sdbSetRawStatus(pCommitRaw, SDB_STATUS_READY);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int32_t mndCreateStbForStream(SMnode *pMnode, STrans *pTrans, const SStreamObj *pStream, const char *user) {
|
static int32_t mndCreateStbForStream(SMnode *pMnode, STrans *pTrans, const SStreamObj *pStream, const char *user) {
|
||||||
SStbObj *pStb = NULL;
|
SStbObj *pStb = NULL;
|
||||||
SDbObj *pDb = NULL;
|
SDbObj *pDb = NULL;
|
||||||
|
@ -491,6 +506,76 @@ static int32_t mndPersistTaskDropReq(STrans *pTrans, SStreamTask *pTask) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int32_t mndPersistTaskRecoverReq(STrans *pTrans, SStreamTask *pTask) {
|
||||||
|
SMStreamTaskRecoverReq *pReq = taosMemoryCalloc(1, sizeof(SMStreamTaskRecoverReq));
|
||||||
|
if (pReq == NULL) {
|
||||||
|
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
pReq->streamId = pTask->streamId;
|
||||||
|
pReq->taskId = pTask->taskId;
|
||||||
|
int32_t len;
|
||||||
|
int32_t code;
|
||||||
|
tEncodeSize(tEncodeSMStreamTaskRecoverReq, pReq, len, code);
|
||||||
|
if (code != 0) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
void *buf = taosMemoryCalloc(1, sizeof(SMsgHead) + len);
|
||||||
|
if (buf == NULL) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
void *abuf = POINTER_SHIFT(buf, sizeof(SMsgHead));
|
||||||
|
SEncoder encoder;
|
||||||
|
tEncoderInit(&encoder, abuf, len);
|
||||||
|
tEncodeSMStreamTaskRecoverReq(&encoder, pReq);
|
||||||
|
((SMsgHead *)buf)->vgId = pTask->nodeId;
|
||||||
|
|
||||||
|
STransAction action = {0};
|
||||||
|
memcpy(&action.epSet, &pTask->epSet, sizeof(SEpSet));
|
||||||
|
action.pCont = buf;
|
||||||
|
action.contLen = sizeof(SMsgHead) + len;
|
||||||
|
action.msgType = TDMT_STREAM_TASK_RECOVER;
|
||||||
|
if (mndTransAppendRedoAction(pTrans, &action) != 0) {
|
||||||
|
taosMemoryFree(buf);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t mndRecoverStreamTasks(SMnode *pMnode, STrans *pTrans, SStreamObj *pStream) {
|
||||||
|
if (pStream->isDistributed) {
|
||||||
|
int32_t lv = taosArrayGetSize(pStream->tasks);
|
||||||
|
for (int32_t i = 0; i < lv; i++) {
|
||||||
|
SArray *pTasks = taosArrayGetP(pStream->tasks, i);
|
||||||
|
int32_t sz = taosArrayGetSize(pTasks);
|
||||||
|
SStreamTask *pTask = taosArrayGetP(pTasks, 0);
|
||||||
|
if (!pTask->isDataScan && pTask->execType != TASK_EXEC__NONE) {
|
||||||
|
ASSERT(sz == 1);
|
||||||
|
if (mndPersistTaskRecoverReq(pTrans, pTask) < 0) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
int32_t lv = taosArrayGetSize(pStream->tasks);
|
||||||
|
for (int32_t i = 0; i < lv; i++) {
|
||||||
|
SArray *pTasks = taosArrayGetP(pStream->tasks, i);
|
||||||
|
int32_t sz = taosArrayGetSize(pTasks);
|
||||||
|
for (int32_t j = 0; j < sz; j++) {
|
||||||
|
SStreamTask *pTask = taosArrayGetP(pTasks, j);
|
||||||
|
if (!pTask->isDataScan) break;
|
||||||
|
ASSERT(pTask->execType != TASK_EXEC__NONE);
|
||||||
|
if (mndPersistTaskRecoverReq(pTrans, pTask) < 0) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
int32_t mndDropStreamTasks(SMnode *pMnode, STrans *pTrans, SStreamObj *pStream) {
|
int32_t mndDropStreamTasks(SMnode *pMnode, STrans *pTrans, SStreamObj *pStream) {
|
||||||
int32_t lv = taosArrayGetSize(pStream->tasks);
|
int32_t lv = taosArrayGetSize(pStream->tasks);
|
||||||
for (int32_t i = 0; i < lv; i++) {
|
for (int32_t i = 0; i < lv; i++) {
|
||||||
|
@ -672,6 +757,69 @@ static int32_t mndProcessDropStreamReq(SRpcMsg *pReq) {
|
||||||
return TSDB_CODE_ACTION_IN_PROGRESS;
|
return TSDB_CODE_ACTION_IN_PROGRESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int32_t mndProcessRecoverStreamReq(SRpcMsg *pReq) {
|
||||||
|
SMnode *pMnode = pReq->info.node;
|
||||||
|
SStreamObj *pStream = NULL;
|
||||||
|
/*SDbObj *pDb = NULL;*/
|
||||||
|
/*SUserObj *pUser = NULL;*/
|
||||||
|
|
||||||
|
SMRecoverStreamReq recoverReq = {0};
|
||||||
|
if (tDeserializeSMRecoverStreamReq(pReq->pCont, pReq->contLen, &recoverReq) < 0) {
|
||||||
|
ASSERT(0);
|
||||||
|
terrno = TSDB_CODE_INVALID_MSG;
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
pStream = mndAcquireStream(pMnode, recoverReq.name);
|
||||||
|
|
||||||
|
if (pStream == NULL) {
|
||||||
|
if (recoverReq.igNotExists) {
|
||||||
|
mDebug("stream:%s, not exist, ignore not exist is set", recoverReq.name);
|
||||||
|
sdbRelease(pMnode->pSdb, pStream);
|
||||||
|
return 0;
|
||||||
|
} else {
|
||||||
|
terrno = TSDB_CODE_MND_STREAM_NOT_EXIST;
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (mndCheckDbPrivilegeByName(pMnode, pReq->info.conn.user, MND_OPER_WRITE_DB, pStream->targetDb) != 0) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_NOTHING, pReq);
|
||||||
|
if (pTrans == NULL) {
|
||||||
|
mError("stream:%s, failed to recover since %s", recoverReq.name, terrstr());
|
||||||
|
sdbRelease(pMnode->pSdb, pStream);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
mDebug("trans:%d, used to drop stream:%s", pTrans->id, recoverReq.name);
|
||||||
|
|
||||||
|
// broadcast to recover all tasks
|
||||||
|
if (mndRecoverStreamTasks(pMnode, pTrans, pStream) < 0) {
|
||||||
|
mError("stream:%s, failed to recover task since %s", recoverReq.name, terrstr());
|
||||||
|
sdbRelease(pMnode->pSdb, pStream);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// update stream status
|
||||||
|
if (mndSetStreamRecover(pMnode, pTrans, pStream) < 0) {
|
||||||
|
sdbRelease(pMnode->pSdb, pStream);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (mndTransPrepare(pMnode, pTrans) != 0) {
|
||||||
|
mError("trans:%d, failed to prepare recover stream trans since %s", pTrans->id, terrstr());
|
||||||
|
sdbRelease(pMnode->pSdb, pStream);
|
||||||
|
mndTransDrop(pTrans);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
sdbRelease(pMnode->pSdb, pStream);
|
||||||
|
|
||||||
|
return TSDB_CODE_ACTION_IN_PROGRESS;
|
||||||
|
}
|
||||||
|
|
||||||
int32_t mndDropStreamByDb(SMnode *pMnode, STrans *pTrans, SDbObj *pDb) {
|
int32_t mndDropStreamByDb(SMnode *pMnode, STrans *pTrans, SDbObj *pDb) {
|
||||||
SSdb *pSdb = pMnode->pSdb;
|
SSdb *pSdb = pMnode->pSdb;
|
||||||
void *pIter = NULL;
|
void *pIter = NULL;
|
||||||
|
|
|
@ -56,23 +56,24 @@ void mndSyncCommitMsg(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbM
|
||||||
sdbSetApplyInfo(pMnode->pSdb, cbMeta.index, cbMeta.term, cbMeta.lastConfigIndex);
|
sdbSetApplyInfo(pMnode->pSdb, cbMeta.index, cbMeta.term, cbMeta.lastConfigIndex);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pMgmt->transId == transId && transId != 0) {
|
if (transId <= 0) {
|
||||||
|
mError("trans:%d, invalid commit msg", transId);
|
||||||
|
} else if (transId == pMgmt->transId) {
|
||||||
if (pMgmt->errCode != 0) {
|
if (pMgmt->errCode != 0) {
|
||||||
mError("trans:%d, failed to propose since %s", transId, tstrerror(pMgmt->errCode));
|
mError("trans:%d, failed to propose since %s", transId, tstrerror(pMgmt->errCode));
|
||||||
}
|
}
|
||||||
pMgmt->transId = 0;
|
pMgmt->transId = 0;
|
||||||
tsem_post(&pMgmt->syncSem);
|
tsem_post(&pMgmt->syncSem);
|
||||||
} else {
|
} else {
|
||||||
#if 1
|
|
||||||
mError("trans:%d, invalid commit msg since trandId not match with %d", transId, pMgmt->transId);
|
|
||||||
#else
|
|
||||||
STrans *pTrans = mndAcquireTrans(pMnode, transId);
|
STrans *pTrans = mndAcquireTrans(pMnode, transId);
|
||||||
if (pTrans != NULL) {
|
if (pTrans != NULL) {
|
||||||
|
mDebug("trans:%d, execute in mnode which not leader", transId);
|
||||||
mndTransExecute(pMnode, pTrans);
|
mndTransExecute(pMnode, pTrans);
|
||||||
mndReleaseTrans(pMnode, pTrans);
|
mndReleaseTrans(pMnode, pTrans);
|
||||||
|
// sdbWriteFile(pMnode->pSdb, SDB_WRITE_DELTA);
|
||||||
|
} else {
|
||||||
|
mError("trans:%d, not found while execute in mnode since %s", transId, terrstr());
|
||||||
}
|
}
|
||||||
// sdbWriteFile(pMnode->pSdb, SDB_WRITE_DELTA);
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -153,6 +154,12 @@ int32_t mndSnapshotDoWrite(struct SSyncFSM *pFsm, void *pWriter, void *pBuf, int
|
||||||
return sdbDoWrite(pMnode->pSdb, pWriter, pBuf, len);
|
return sdbDoWrite(pMnode->pSdb, pWriter, pBuf, len);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void mndLeaderTransfer(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta) {
|
||||||
|
SMnode *pMnode = pFsm->data;
|
||||||
|
atomic_store_8(&(pMnode->syncMgmt.leaderTransferFinish), 1);
|
||||||
|
mDebug("vgId:1, mnd leader transfer finish");
|
||||||
|
}
|
||||||
|
|
||||||
SSyncFSM *mndSyncMakeFsm(SMnode *pMnode) {
|
SSyncFSM *mndSyncMakeFsm(SMnode *pMnode) {
|
||||||
SSyncFSM *pFsm = taosMemoryCalloc(1, sizeof(SSyncFSM));
|
SSyncFSM *pFsm = taosMemoryCalloc(1, sizeof(SSyncFSM));
|
||||||
pFsm->data = pMnode;
|
pFsm->data = pMnode;
|
||||||
|
@ -160,6 +167,7 @@ SSyncFSM *mndSyncMakeFsm(SMnode *pMnode) {
|
||||||
pFsm->FpPreCommitCb = NULL;
|
pFsm->FpPreCommitCb = NULL;
|
||||||
pFsm->FpRollBackCb = NULL;
|
pFsm->FpRollBackCb = NULL;
|
||||||
pFsm->FpRestoreFinishCb = mndRestoreFinish;
|
pFsm->FpRestoreFinishCb = mndRestoreFinish;
|
||||||
|
pFsm->FpLeaderTransferCb = mndLeaderTransfer;
|
||||||
pFsm->FpReConfigCb = mndReConfig;
|
pFsm->FpReConfigCb = mndReConfig;
|
||||||
pFsm->FpGetSnapshot = mndSyncGetSnapshot;
|
pFsm->FpGetSnapshot = mndSyncGetSnapshot;
|
||||||
pFsm->FpGetSnapshotInfo = mndSyncGetSnapshotInfo;
|
pFsm->FpGetSnapshotInfo = mndSyncGetSnapshotInfo;
|
||||||
|
|
|
@ -639,6 +639,7 @@ static int32_t mndRetrieveVgroups(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *p
|
||||||
if (pShow->pIter == NULL) break;
|
if (pShow->pIter == NULL) break;
|
||||||
|
|
||||||
if (pDb != NULL && pVgroup->dbUid != pDb->uid) {
|
if (pDb != NULL && pVgroup->dbUid != pDb->uid) {
|
||||||
|
sdbRelease(pSdb, pVgroup);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -137,17 +137,18 @@ typedef enum {
|
||||||
SDB_USER = 7,
|
SDB_USER = 7,
|
||||||
SDB_AUTH = 8,
|
SDB_AUTH = 8,
|
||||||
SDB_ACCT = 9,
|
SDB_ACCT = 9,
|
||||||
SDB_STREAM = 10,
|
SDB_STREAM_CK = 10,
|
||||||
SDB_OFFSET = 11,
|
SDB_STREAM = 11,
|
||||||
SDB_SUBSCRIBE = 12,
|
SDB_OFFSET = 12,
|
||||||
SDB_CONSUMER = 13,
|
SDB_SUBSCRIBE = 13,
|
||||||
SDB_TOPIC = 14,
|
SDB_CONSUMER = 14,
|
||||||
SDB_VGROUP = 15,
|
SDB_TOPIC = 15,
|
||||||
SDB_SMA = 16,
|
SDB_VGROUP = 16,
|
||||||
SDB_STB = 17,
|
SDB_SMA = 17,
|
||||||
SDB_DB = 18,
|
SDB_STB = 18,
|
||||||
SDB_FUNC = 19,
|
SDB_DB = 19,
|
||||||
SDB_MAX = 20
|
SDB_FUNC = 20,
|
||||||
|
SDB_MAX = 21
|
||||||
} ESdbType;
|
} ESdbType;
|
||||||
|
|
||||||
typedef struct SSdbRaw {
|
typedef struct SSdbRaw {
|
||||||
|
@ -308,7 +309,7 @@ void sdbRelease(SSdb *pSdb, void *pObj);
|
||||||
* @return void* The next iterator of the table.
|
* @return void* The next iterator of the table.
|
||||||
*/
|
*/
|
||||||
void *sdbFetch(SSdb *pSdb, ESdbType type, void *pIter, void **ppObj);
|
void *sdbFetch(SSdb *pSdb, ESdbType type, void *pIter, void **ppObj);
|
||||||
void *sdbFetchAll(SSdb *pSdb, ESdbType type, void *pIter, void **ppObj, ESdbStatus *status) ;
|
void *sdbFetchAll(SSdb *pSdb, ESdbType type, void *pIter, void **ppObj, ESdbStatus *status);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Cancel a traversal
|
* @brief Cancel a traversal
|
||||||
|
|
|
@ -30,15 +30,6 @@
|
||||||
extern "C" {
|
extern "C" {
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
enum {
|
|
||||||
STREAM_STATUS__RUNNING = 1,
|
|
||||||
STREAM_STATUS__STOPPED,
|
|
||||||
STREAM_STATUS__CREATING,
|
|
||||||
STREAM_STATUS__STOPING,
|
|
||||||
STREAM_STATUS__RESTORING,
|
|
||||||
STREAM_STATUS__DELETING,
|
|
||||||
};
|
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
SHashObj* pHash; // taskId -> SStreamTask
|
SHashObj* pHash; // taskId -> SStreamTask
|
||||||
} SStreamMeta;
|
} SStreamMeta;
|
||||||
|
|
|
@ -138,7 +138,7 @@ void *tsdbGetIdx(SMeta *pMeta);
|
||||||
void *tsdbGetIvtIdx(SMeta *pMeta);
|
void *tsdbGetIvtIdx(SMeta *pMeta);
|
||||||
|
|
||||||
int32_t tsdbLastRowReaderOpen(void *pVnode, int32_t type, SArray *pTableIdList, int32_t numOfCols, void **pReader);
|
int32_t tsdbLastRowReaderOpen(void *pVnode, int32_t type, SArray *pTableIdList, int32_t numOfCols, void **pReader);
|
||||||
int32_t tsdbRetrieveLastRow(void *pReader, SSDataBlock *pResBlock, const int32_t *slotIds, SArray* pTableUids);
|
int32_t tsdbRetrieveLastRow(void *pReader, SSDataBlock *pResBlock, const int32_t *slotIds, SArray *pTableUids);
|
||||||
int32_t tsdbLastrowReaderClose(void *pReader);
|
int32_t tsdbLastrowReaderClose(void *pReader);
|
||||||
int32_t tsdbGetTableSchema(SVnode *pVnode, int64_t uid, STSchema **pSchema, int64_t *suid);
|
int32_t tsdbGetTableSchema(SVnode *pVnode, int64_t uid, STSchema **pSchema, int64_t *suid);
|
||||||
|
|
||||||
|
|
|
@ -32,6 +32,8 @@ extern "C" {
|
||||||
#define smaTrace(...) do { if (smaDebugFlag & DEBUG_TRACE) { taosPrintLog("SMA ", DEBUG_TRACE, tsdbDebugFlag, __VA_ARGS__); }} while(0)
|
#define smaTrace(...) do { if (smaDebugFlag & DEBUG_TRACE) { taosPrintLog("SMA ", DEBUG_TRACE, tsdbDebugFlag, __VA_ARGS__); }} while(0)
|
||||||
// clang-format on
|
// clang-format on
|
||||||
|
|
||||||
|
#define RSMA_TASK_INFO_HASH_SLOT 8
|
||||||
|
|
||||||
typedef struct SSmaEnv SSmaEnv;
|
typedef struct SSmaEnv SSmaEnv;
|
||||||
typedef struct SSmaStat SSmaStat;
|
typedef struct SSmaStat SSmaStat;
|
||||||
typedef struct STSmaStat STSmaStat;
|
typedef struct STSmaStat STSmaStat;
|
||||||
|
@ -41,7 +43,7 @@ typedef struct SRSmaInfo SRSmaInfo;
|
||||||
typedef struct SRSmaInfoItem SRSmaInfoItem;
|
typedef struct SRSmaInfoItem SRSmaInfoItem;
|
||||||
|
|
||||||
struct SSmaEnv {
|
struct SSmaEnv {
|
||||||
TdThreadRwlock lock;
|
SRWLatch lock;
|
||||||
int8_t type;
|
int8_t type;
|
||||||
SSmaStat *pStat;
|
SSmaStat *pStat;
|
||||||
};
|
};
|
||||||
|
@ -52,7 +54,7 @@ typedef struct {
|
||||||
void *tmrHandle; // shared by all fetch tasks
|
void *tmrHandle; // shared by all fetch tasks
|
||||||
} SSmaMgmt;
|
} SSmaMgmt;
|
||||||
|
|
||||||
#define SMA_ENV_LOCK(env) ((env)->lock)
|
#define SMA_ENV_LOCK(env) (&(env)->lock)
|
||||||
#define SMA_ENV_TYPE(env) ((env)->type)
|
#define SMA_ENV_TYPE(env) ((env)->type)
|
||||||
#define SMA_ENV_STAT(env) ((env)->pStat)
|
#define SMA_ENV_STAT(env) ((env)->pStat)
|
||||||
|
|
||||||
|
@ -64,10 +66,14 @@ struct STSmaStat {
|
||||||
|
|
||||||
struct SRSmaStat {
|
struct SRSmaStat {
|
||||||
SSma *pSma;
|
SSma *pSma;
|
||||||
int64_t submitVer;
|
int64_t commitAppliedVer; // vnode applied version for async commit
|
||||||
int64_t refId; // shared by fetch tasks
|
int64_t commitSubmitVer; // rsma submit version for async commit
|
||||||
int8_t triggerStat; // shared by fetch tasks
|
int64_t submitVer; // latest submit version
|
||||||
SHashObj *rsmaInfoHash; // key: stbUid, value: SRSmaInfo;
|
int64_t refId; // shared by fetch tasks
|
||||||
|
int8_t triggerStat; // shared by fetch tasks
|
||||||
|
int8_t commitStat; // 0 not in committing, 1 in committing
|
||||||
|
SHashObj *rsmaInfoHash; // key: stbUid, value: SRSmaInfo;
|
||||||
|
SHashObj *iRsmaInfoHash; // key: stbUid, value: SRSmaInfo; immutable rsmaInfoHash
|
||||||
};
|
};
|
||||||
|
|
||||||
struct SSmaStat {
|
struct SSmaStat {
|
||||||
|
@ -78,12 +84,29 @@ struct SSmaStat {
|
||||||
T_REF_DECLARE()
|
T_REF_DECLARE()
|
||||||
};
|
};
|
||||||
|
|
||||||
#define SMA_TSMA_STAT(s) (&(s)->tsmaStat)
|
#define SMA_TSMA_STAT(s) (&(s)->tsmaStat)
|
||||||
#define SMA_RSMA_STAT(s) (&(s)->rsmaStat)
|
#define SMA_RSMA_STAT(s) (&(s)->rsmaStat)
|
||||||
#define RSMA_INFO_HASH(r) ((r)->rsmaInfoHash)
|
#define RSMA_INFO_HASH(r) ((r)->rsmaInfoHash)
|
||||||
#define RSMA_TRIGGER_STAT(r) (&(r)->triggerStat)
|
#define RSMA_IMU_INFO_HASH(r) ((r)->iRsmaInfoHash)
|
||||||
#define RSMA_REF_ID(r) ((r)->refId)
|
#define RSMA_TRIGGER_STAT(r) (&(r)->triggerStat)
|
||||||
#define RSMA_SUBMIT_VER(r) ((r)->submitVer)
|
#define RSMA_COMMIT_STAT(r) (&(r)->commitStat)
|
||||||
|
#define RSMA_REF_ID(r) ((r)->refId)
|
||||||
|
#define RSMA_SUBMIT_VER(r) ((r)->submitVer)
|
||||||
|
|
||||||
|
struct SRSmaInfoItem {
|
||||||
|
void *taskInfo; // qTaskInfo_t
|
||||||
|
int64_t refId;
|
||||||
|
tmr_h tmrId;
|
||||||
|
int32_t maxDelay;
|
||||||
|
int8_t level;
|
||||||
|
int8_t triggerStat;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct SRSmaInfo {
|
||||||
|
STSchema *pTSchema;
|
||||||
|
int64_t suid;
|
||||||
|
SRSmaInfoItem items[TSDB_RETENTION_L2];
|
||||||
|
};
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
TASK_TRIGGER_STAT_INIT = 0,
|
TASK_TRIGGER_STAT_INIT = 0,
|
||||||
|
@ -94,6 +117,14 @@ enum {
|
||||||
TASK_TRIGGER_STAT_DROPPED = 5,
|
TASK_TRIGGER_STAT_DROPPED = 5,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
enum {
|
||||||
|
RSMA_ROLE_CREATE = 0,
|
||||||
|
RSMA_ROLE_DROP = 1,
|
||||||
|
RSMA_ROLE_FETCH = 2,
|
||||||
|
RSMA_ROLE_SUBMIT = 3,
|
||||||
|
RSMA_ROLE_ITERATE = 4,
|
||||||
|
};
|
||||||
|
|
||||||
void tdDestroySmaEnv(SSmaEnv *pSmaEnv);
|
void tdDestroySmaEnv(SSmaEnv *pSmaEnv);
|
||||||
void *tdFreeSmaEnv(SSmaEnv *pSmaEnv);
|
void *tdFreeSmaEnv(SSmaEnv *pSmaEnv);
|
||||||
|
|
||||||
|
@ -112,33 +143,6 @@ int32_t tdCheckAndInitSmaEnv(SSma *pSma, int8_t smaType);
|
||||||
int32_t tdLockSma(SSma *pSma);
|
int32_t tdLockSma(SSma *pSma);
|
||||||
int32_t tdUnLockSma(SSma *pSma);
|
int32_t tdUnLockSma(SSma *pSma);
|
||||||
|
|
||||||
static FORCE_INLINE int32_t tdRLockSmaEnv(SSmaEnv *pEnv) {
|
|
||||||
int code = taosThreadRwlockRdlock(&(pEnv->lock));
|
|
||||||
if (code != 0) {
|
|
||||||
terrno = TAOS_SYSTEM_ERROR(code);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static FORCE_INLINE int32_t tdWLockSmaEnv(SSmaEnv *pEnv) {
|
|
||||||
int code = taosThreadRwlockWrlock(&(pEnv->lock));
|
|
||||||
if (code != 0) {
|
|
||||||
terrno = TAOS_SYSTEM_ERROR(code);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static FORCE_INLINE int32_t tdUnLockSmaEnv(SSmaEnv *pEnv) {
|
|
||||||
int code = taosThreadRwlockUnlock(&(pEnv->lock));
|
|
||||||
if (code != 0) {
|
|
||||||
terrno = TAOS_SYSTEM_ERROR(code);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static FORCE_INLINE int8_t tdSmaStat(STSmaStat *pTStat) {
|
static FORCE_INLINE int8_t tdSmaStat(STSmaStat *pTStat) {
|
||||||
if (pTStat) {
|
if (pTStat) {
|
||||||
return atomic_load_8(&pTStat->state);
|
return atomic_load_8(&pTStat->state);
|
||||||
|
@ -184,10 +188,12 @@ static FORCE_INLINE void tdSmaStatSetDropped(STSmaStat *pTStat) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int32_t tdCloneRSmaInfo(SSma *pSma, SRSmaInfo *pDest, SRSmaInfo *pSrc);
|
||||||
|
void tdFreeQTaskInfo(qTaskInfo_t *taskHandle, int32_t vgId, int32_t level);
|
||||||
static int32_t tdDestroySmaState(SSmaStat *pSmaStat, int8_t smaType);
|
static int32_t tdDestroySmaState(SSmaStat *pSmaStat, int8_t smaType);
|
||||||
void *tdFreeSmaState(SSmaStat *pSmaStat, int8_t smaType);
|
void *tdFreeSmaState(SSmaStat *pSmaStat, int8_t smaType);
|
||||||
void *tdFreeRSmaInfo(SSma *pSma, SRSmaInfo *pInfo);
|
void *tdFreeRSmaInfo(SSma *pSma, SRSmaInfo *pInfo, bool isDeepFree);
|
||||||
int32_t tdRSmaPersistExecImpl(SRSmaStat *pRSmaStat);
|
int32_t tdRSmaPersistExecImpl(SRSmaStat *pRSmaStat, SHashObj *pInfoHash);
|
||||||
|
|
||||||
int32_t tdProcessRSmaCreateImpl(SSma *pSma, SRSmaParam *param, int64_t suid, const char *tbName);
|
int32_t tdProcessRSmaCreateImpl(SSma *pSma, SRSmaParam *param, int64_t suid, const char *tbName);
|
||||||
int32_t tdProcessRSmaRestoreImpl(SSma *pSma);
|
int32_t tdProcessRSmaRestoreImpl(SSma *pSma);
|
||||||
|
|
|
@ -24,12 +24,12 @@ extern "C" {
|
||||||
|
|
||||||
// tsdbDebug ================
|
// tsdbDebug ================
|
||||||
// clang-format off
|
// clang-format off
|
||||||
#define tsdbFatal(...) do { if (tsdbDebugFlag & DEBUG_FATAL) { taosPrintLog("TSDB FATAL ", DEBUG_FATAL, 255, __VA_ARGS__); }} while(0)
|
#define tsdbFatal(...) do { if (tsdbDebugFlag & DEBUG_FATAL) { taosPrintLog("TSD FATAL ", DEBUG_FATAL, 255, __VA_ARGS__); }} while(0)
|
||||||
#define tsdbError(...) do { if (tsdbDebugFlag & DEBUG_ERROR) { taosPrintLog("TSDB ERROR ", DEBUG_ERROR, 255, __VA_ARGS__); }} while(0)
|
#define tsdbError(...) do { if (tsdbDebugFlag & DEBUG_ERROR) { taosPrintLog("TSD ERROR ", DEBUG_ERROR, 255, __VA_ARGS__); }} while(0)
|
||||||
#define tsdbWarn(...) do { if (tsdbDebugFlag & DEBUG_WARN) { taosPrintLog("TSDB WARN ", DEBUG_WARN, 255, __VA_ARGS__); }} while(0)
|
#define tsdbWarn(...) do { if (tsdbDebugFlag & DEBUG_WARN) { taosPrintLog("TSD WARN ", DEBUG_WARN, 255, __VA_ARGS__); }} while(0)
|
||||||
#define tsdbInfo(...) do { if (tsdbDebugFlag & DEBUG_INFO) { taosPrintLog("TSDB ", DEBUG_INFO, 255, __VA_ARGS__); }} while(0)
|
#define tsdbInfo(...) do { if (tsdbDebugFlag & DEBUG_INFO) { taosPrintLog("TSD ", DEBUG_INFO, 255, __VA_ARGS__); }} while(0)
|
||||||
#define tsdbDebug(...) do { if (tsdbDebugFlag & DEBUG_DEBUG) { taosPrintLog("TSDB ", DEBUG_DEBUG, tsdbDebugFlag, __VA_ARGS__); }} while(0)
|
#define tsdbDebug(...) do { if (tsdbDebugFlag & DEBUG_DEBUG) { taosPrintLog("TSD ", DEBUG_DEBUG, tsdbDebugFlag, __VA_ARGS__); }} while(0)
|
||||||
#define tsdbTrace(...) do { if (tsdbDebugFlag & DEBUG_TRACE) { taosPrintLog("TSDB ", DEBUG_TRACE, tsdbDebugFlag, __VA_ARGS__); }} while(0)
|
#define tsdbTrace(...) do { if (tsdbDebugFlag & DEBUG_TRACE) { taosPrintLog("TSD ", DEBUG_TRACE, tsdbDebugFlag, __VA_ARGS__); }} while(0)
|
||||||
// clang-format on
|
// clang-format on
|
||||||
|
|
||||||
typedef struct TSDBROW TSDBROW;
|
typedef struct TSDBROW TSDBROW;
|
||||||
|
@ -115,7 +115,6 @@ int32_t tGetBlock(uint8_t *p, void *ph);
|
||||||
int32_t tBlockCmprFn(const void *p1, const void *p2);
|
int32_t tBlockCmprFn(const void *p1, const void *p2);
|
||||||
bool tBlockHasSma(SBlock *pBlock);
|
bool tBlockHasSma(SBlock *pBlock);
|
||||||
// SBlockIdx
|
// SBlockIdx
|
||||||
void tBlockIdxReset(SBlockIdx *pBlockIdx);
|
|
||||||
int32_t tPutBlockIdx(uint8_t *p, void *ph);
|
int32_t tPutBlockIdx(uint8_t *p, void *ph);
|
||||||
int32_t tGetBlockIdx(uint8_t *p, void *ph);
|
int32_t tGetBlockIdx(uint8_t *p, void *ph);
|
||||||
int32_t tCmprBlockIdx(void const *lhs, void const *rhs);
|
int32_t tCmprBlockIdx(void const *lhs, void const *rhs);
|
||||||
|
@ -126,6 +125,8 @@ void tColDataClear(void *ph);
|
||||||
int32_t tColDataAppendValue(SColData *pColData, SColVal *pColVal);
|
int32_t tColDataAppendValue(SColData *pColData, SColVal *pColVal);
|
||||||
int32_t tColDataGetValue(SColData *pColData, int32_t iRow, SColVal *pColVal);
|
int32_t tColDataGetValue(SColData *pColData, int32_t iRow, SColVal *pColVal);
|
||||||
int32_t tColDataCopy(SColData *pColDataSrc, SColData *pColDataDest);
|
int32_t tColDataCopy(SColData *pColDataSrc, SColData *pColDataDest);
|
||||||
|
int32_t tPutColData(uint8_t *p, SColData *pColData);
|
||||||
|
int32_t tGetColData(uint8_t *p, SColData *pColData);
|
||||||
// SBlockData
|
// SBlockData
|
||||||
#define tBlockDataFirstRow(PBLOCKDATA) tsdbRowFromBlockData(PBLOCKDATA, 0)
|
#define tBlockDataFirstRow(PBLOCKDATA) tsdbRowFromBlockData(PBLOCKDATA, 0)
|
||||||
#define tBlockDataLastRow(PBLOCKDATA) tsdbRowFromBlockData(PBLOCKDATA, (PBLOCKDATA)->nRow - 1)
|
#define tBlockDataLastRow(PBLOCKDATA) tsdbRowFromBlockData(PBLOCKDATA, (PBLOCKDATA)->nRow - 1)
|
||||||
|
@ -134,14 +135,17 @@ int32_t tColDataCopy(SColData *pColDataSrc, SColData *pColDataDest);
|
||||||
int32_t tBlockDataInit(SBlockData *pBlockData);
|
int32_t tBlockDataInit(SBlockData *pBlockData);
|
||||||
void tBlockDataReset(SBlockData *pBlockData);
|
void tBlockDataReset(SBlockData *pBlockData);
|
||||||
int32_t tBlockDataSetSchema(SBlockData *pBlockData, STSchema *pTSchema);
|
int32_t tBlockDataSetSchema(SBlockData *pBlockData, STSchema *pTSchema);
|
||||||
|
int32_t tBlockDataCorrectSchema(SBlockData *pBlockData, SBlockData *pBlockDataFrom);
|
||||||
void tBlockDataClearData(SBlockData *pBlockData);
|
void tBlockDataClearData(SBlockData *pBlockData);
|
||||||
void tBlockDataClear(SBlockData *pBlockData);
|
void tBlockDataClear(SBlockData *pBlockData, int8_t deepClear);
|
||||||
int32_t tBlockDataAddColData(SBlockData *pBlockData, int32_t iColData, SColData **ppColData);
|
int32_t tBlockDataAddColData(SBlockData *pBlockData, int32_t iColData, SColData **ppColData);
|
||||||
int32_t tBlockDataAppendRow(SBlockData *pBlockData, TSDBROW *pRow, STSchema *pTSchema);
|
int32_t tBlockDataAppendRow(SBlockData *pBlockData, TSDBROW *pRow, STSchema *pTSchema);
|
||||||
int32_t tBlockDataMerge(SBlockData *pBlockData1, SBlockData *pBlockData2, SBlockData *pBlockData);
|
int32_t tBlockDataMerge(SBlockData *pBlockData1, SBlockData *pBlockData2, SBlockData *pBlockData);
|
||||||
int32_t tBlockDataCopy(SBlockData *pBlockDataSrc, SBlockData *pBlockDataDest);
|
int32_t tBlockDataCopy(SBlockData *pBlockDataSrc, SBlockData *pBlockDataDest);
|
||||||
SColData *tBlockDataGetColDataByIdx(SBlockData *pBlockData, int32_t idx);
|
SColData *tBlockDataGetColDataByIdx(SBlockData *pBlockData, int32_t idx);
|
||||||
void tBlockDataGetColData(SBlockData *pBlockData, int16_t cid, SColData **ppColData);
|
void tBlockDataGetColData(SBlockData *pBlockData, int16_t cid, SColData **ppColData);
|
||||||
|
int32_t tPutBlockData(uint8_t *p, SBlockData *pBlockData);
|
||||||
|
int32_t tGetBlockData(uint8_t *p, SBlockData *pBlockData);
|
||||||
// SDelIdx
|
// SDelIdx
|
||||||
int32_t tPutDelIdx(uint8_t *p, void *ph);
|
int32_t tPutDelIdx(uint8_t *p, void *ph);
|
||||||
int32_t tGetDelIdx(uint8_t *p, void *ph);
|
int32_t tGetDelIdx(uint8_t *p, void *ph);
|
||||||
|
@ -202,7 +206,7 @@ int32_t tsdbFSStateUpsertDelFile(STsdbFSState *pState, SDelFile *pDelFile);
|
||||||
int32_t tsdbFSStateUpsertDFileSet(STsdbFSState *pState, SDFileSet *pSet);
|
int32_t tsdbFSStateUpsertDFileSet(STsdbFSState *pState, SDFileSet *pSet);
|
||||||
void tsdbFSStateDeleteDFileSet(STsdbFSState *pState, int32_t fid);
|
void tsdbFSStateDeleteDFileSet(STsdbFSState *pState, int32_t fid);
|
||||||
SDelFile *tsdbFSStateGetDelFile(STsdbFSState *pState);
|
SDelFile *tsdbFSStateGetDelFile(STsdbFSState *pState);
|
||||||
SDFileSet *tsdbFSStateGetDFileSet(STsdbFSState *pState, int32_t fid);
|
SDFileSet *tsdbFSStateGetDFileSet(STsdbFSState *pState, int32_t fid, int32_t flag);
|
||||||
// tsdbReaderWriter.c ==============================================================================================
|
// tsdbReaderWriter.c ==============================================================================================
|
||||||
// SDataFWriter
|
// SDataFWriter
|
||||||
int32_t tsdbDataFWriterOpen(SDataFWriter **ppWriter, STsdb *pTsdb, SDFileSet *pSet);
|
int32_t tsdbDataFWriterOpen(SDataFWriter **ppWriter, STsdb *pTsdb, SDFileSet *pSet);
|
||||||
|
@ -357,10 +361,6 @@ struct TSDBROW {
|
||||||
struct SBlockIdx {
|
struct SBlockIdx {
|
||||||
int64_t suid;
|
int64_t suid;
|
||||||
int64_t uid;
|
int64_t uid;
|
||||||
TSKEY minKey;
|
|
||||||
TSKEY maxKey;
|
|
||||||
int64_t minVersion;
|
|
||||||
int64_t maxVersion;
|
|
||||||
int64_t offset;
|
int64_t offset;
|
||||||
int64_t size;
|
int64_t size;
|
||||||
};
|
};
|
||||||
|
|
|
@ -164,9 +164,12 @@ void smaCleanUp();
|
||||||
int32_t smaOpen(SVnode* pVnode);
|
int32_t smaOpen(SVnode* pVnode);
|
||||||
int32_t smaClose(SSma* pSma);
|
int32_t smaClose(SSma* pSma);
|
||||||
int32_t smaBegin(SSma* pSma);
|
int32_t smaBegin(SSma* pSma);
|
||||||
int32_t smaPreCommit(SSma* pSma);
|
int32_t smaSyncPreCommit(SSma* pSma);
|
||||||
int32_t smaCommit(SSma* pSma);
|
int32_t smaSyncCommit(SSma* pSma);
|
||||||
int32_t smaPostCommit(SSma* pSma);
|
int32_t smaSyncPostCommit(SSma* pSma);
|
||||||
|
int32_t smaAsyncPreCommit(SSma* pSma);
|
||||||
|
int32_t smaAsyncCommit(SSma* pSma);
|
||||||
|
int32_t smaAsyncPostCommit(SSma* pSma);
|
||||||
|
|
||||||
int32_t tdProcessTSmaCreate(SSma* pSma, int64_t version, const char* msg);
|
int32_t tdProcessTSmaCreate(SSma* pSma, int64_t version, const char* msg);
|
||||||
int32_t tdProcessTSmaInsert(SSma* pSma, int64_t indexUid, const char* msg);
|
int32_t tdProcessTSmaInsert(SSma* pSma, int64_t indexUid, const char* msg);
|
||||||
|
@ -309,6 +312,7 @@ void smaHandleRes(void* pVnode, int64_t smaId, const SArray* data);
|
||||||
|
|
||||||
struct SSnapDataHdr {
|
struct SSnapDataHdr {
|
||||||
int8_t type;
|
int8_t type;
|
||||||
|
int64_t index;
|
||||||
int64_t size;
|
int64_t size;
|
||||||
uint8_t data[];
|
uint8_t data[];
|
||||||
};
|
};
|
||||||
|
|
|
@ -26,60 +26,72 @@ struct SMetaSnapReader {
|
||||||
int32_t metaSnapReaderOpen(SMeta* pMeta, int64_t sver, int64_t ever, SMetaSnapReader** ppReader) {
|
int32_t metaSnapReaderOpen(SMeta* pMeta, int64_t sver, int64_t ever, SMetaSnapReader** ppReader) {
|
||||||
int32_t code = 0;
|
int32_t code = 0;
|
||||||
int32_t c = 0;
|
int32_t c = 0;
|
||||||
SMetaSnapReader* pMetaSnapReader = NULL;
|
SMetaSnapReader* pReader = NULL;
|
||||||
|
|
||||||
// alloc
|
// alloc
|
||||||
pMetaSnapReader = (SMetaSnapReader*)taosMemoryCalloc(1, sizeof(*pMetaSnapReader));
|
pReader = (SMetaSnapReader*)taosMemoryCalloc(1, sizeof(*pReader));
|
||||||
if (pMetaSnapReader == NULL) {
|
if (pReader == NULL) {
|
||||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
goto _err;
|
goto _err;
|
||||||
}
|
}
|
||||||
pMetaSnapReader->pMeta = pMeta;
|
pReader->pMeta = pMeta;
|
||||||
pMetaSnapReader->sver = sver;
|
pReader->sver = sver;
|
||||||
pMetaSnapReader->ever = ever;
|
pReader->ever = ever;
|
||||||
|
|
||||||
// impl
|
// impl
|
||||||
code = tdbTbcOpen(pMeta->pTbDb, &pMetaSnapReader->pTbc, NULL);
|
code = tdbTbcOpen(pMeta->pTbDb, &pReader->pTbc, NULL);
|
||||||
if (code) {
|
if (code) {
|
||||||
|
taosMemoryFree(pReader);
|
||||||
goto _err;
|
goto _err;
|
||||||
}
|
}
|
||||||
|
|
||||||
code = tdbTbcMoveTo(pMetaSnapReader->pTbc, &(STbDbKey){.version = sver, .uid = INT64_MIN}, sizeof(STbDbKey), &c);
|
code = tdbTbcMoveTo(pReader->pTbc, &(STbDbKey){.version = sver, .uid = INT64_MIN}, sizeof(STbDbKey), &c);
|
||||||
if (code) {
|
if (code) {
|
||||||
|
taosMemoryFree(pReader);
|
||||||
goto _err;
|
goto _err;
|
||||||
}
|
}
|
||||||
|
|
||||||
*ppReader = pMetaSnapReader;
|
metaInfo("vgId:%d vnode snapshot meta reader opened", TD_VID(pMeta->pVnode));
|
||||||
|
|
||||||
|
*ppReader = pReader;
|
||||||
return code;
|
return code;
|
||||||
|
|
||||||
_err:
|
_err:
|
||||||
metaError("vgId:%d meta snap reader open failed since %s", TD_VID(pMeta->pVnode), tstrerror(code));
|
metaError("vgId:%d vnode snapshot meta reader open failed since %s", TD_VID(pMeta->pVnode), tstrerror(code));
|
||||||
*ppReader = NULL;
|
*ppReader = NULL;
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t metaSnapReaderClose(SMetaSnapReader** ppReader) {
|
int32_t metaSnapReaderClose(SMetaSnapReader** ppReader) {
|
||||||
|
int32_t code = 0;
|
||||||
|
|
||||||
tdbTbcClose((*ppReader)->pTbc);
|
tdbTbcClose((*ppReader)->pTbc);
|
||||||
taosMemoryFree(*ppReader);
|
taosMemoryFree(*ppReader);
|
||||||
*ppReader = NULL;
|
*ppReader = NULL;
|
||||||
return 0;
|
|
||||||
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t metaSnapRead(SMetaSnapReader* pReader, uint8_t** ppData) {
|
int32_t metaSnapRead(SMetaSnapReader* pReader, uint8_t** ppData) {
|
||||||
|
int32_t code = 0;
|
||||||
const void* pKey = NULL;
|
const void* pKey = NULL;
|
||||||
const void* pData = NULL;
|
const void* pData = NULL;
|
||||||
int32_t nKey = 0;
|
int32_t nKey = 0;
|
||||||
int32_t nData = 0;
|
int32_t nData = 0;
|
||||||
int32_t code = 0;
|
STbDbKey key;
|
||||||
|
|
||||||
|
*ppData = NULL;
|
||||||
for (;;) {
|
for (;;) {
|
||||||
code = tdbTbcGet(pReader->pTbc, &pKey, &nKey, &pData, &nData);
|
if (tdbTbcGet(pReader->pTbc, &pKey, &nKey, &pData, &nData)) {
|
||||||
if (code || ((STbDbKey*)pData)->version > pReader->ever) {
|
|
||||||
code = TSDB_CODE_VND_READ_END;
|
|
||||||
goto _exit;
|
goto _exit;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (((STbDbKey*)pData)->version < pReader->sver) {
|
key = ((STbDbKey*)pKey)[0];
|
||||||
|
if (key.version > pReader->ever) {
|
||||||
|
goto _exit;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (key.version < pReader->sver) {
|
||||||
tdbTbcMoveToNext(pReader->pTbc);
|
tdbTbcMoveToNext(pReader->pTbc);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -88,17 +100,28 @@ int32_t metaSnapRead(SMetaSnapReader* pReader, uint8_t** ppData) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
// copy the data
|
ASSERT(pData && nData);
|
||||||
if (tRealloc(ppData, sizeof(SSnapDataHdr) + nData) < 0) {
|
|
||||||
|
*ppData = taosMemoryMalloc(sizeof(SSnapDataHdr) + nData);
|
||||||
|
if (*ppData == NULL) {
|
||||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
return code;
|
goto _err;
|
||||||
}
|
}
|
||||||
((SSnapDataHdr*)(*ppData))->type = 0; // TODO: use macro
|
|
||||||
((SSnapDataHdr*)(*ppData))->size = nData;
|
SSnapDataHdr* pHdr = (SSnapDataHdr*)(*ppData);
|
||||||
memcpy(((SSnapDataHdr*)(*ppData))->data, pData, nData);
|
pHdr->type = 0; // TODO: use macro
|
||||||
|
pHdr->size = nData;
|
||||||
|
memcpy(pHdr->data, pData, nData);
|
||||||
|
|
||||||
|
metaInfo("vgId:%d vnode snapshot meta read data, version:%" PRId64 " uid:%" PRId64 " nData:%d",
|
||||||
|
TD_VID(pReader->pMeta->pVnode), key.version, key.uid, nData);
|
||||||
|
|
||||||
_exit:
|
_exit:
|
||||||
return code;
|
return code;
|
||||||
|
|
||||||
|
_err:
|
||||||
|
metaError("vgId:%d vnode snapshot meta read data failed since %s", TD_VID(pReader->pMeta->pVnode), tstrerror(code));
|
||||||
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
// SMetaSnapWriter ========================================
|
// SMetaSnapWriter ========================================
|
||||||
|
@ -108,18 +131,6 @@ struct SMetaSnapWriter {
|
||||||
int64_t ever;
|
int64_t ever;
|
||||||
};
|
};
|
||||||
|
|
||||||
static int32_t metaSnapRollback(SMetaSnapWriter* pWriter) {
|
|
||||||
int32_t code = 0;
|
|
||||||
// TODO
|
|
||||||
return code;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int32_t metaSnapCommit(SMetaSnapWriter* pWriter) {
|
|
||||||
int32_t code = 0;
|
|
||||||
// TODO
|
|
||||||
return code;
|
|
||||||
}
|
|
||||||
|
|
||||||
int32_t metaSnapWriterOpen(SMeta* pMeta, int64_t sver, int64_t ever, SMetaSnapWriter** ppWriter) {
|
int32_t metaSnapWriterOpen(SMeta* pMeta, int64_t sver, int64_t ever, SMetaSnapWriter** ppWriter) {
|
||||||
int32_t code = 0;
|
int32_t code = 0;
|
||||||
SMetaSnapWriter* pWriter;
|
SMetaSnapWriter* pWriter;
|
||||||
|
@ -148,10 +159,9 @@ int32_t metaSnapWriterClose(SMetaSnapWriter** ppWriter, int8_t rollback) {
|
||||||
SMetaSnapWriter* pWriter = *ppWriter;
|
SMetaSnapWriter* pWriter = *ppWriter;
|
||||||
|
|
||||||
if (rollback) {
|
if (rollback) {
|
||||||
code = metaSnapRollback(pWriter);
|
ASSERT(0);
|
||||||
if (code) goto _err;
|
|
||||||
} else {
|
} else {
|
||||||
code = metaSnapCommit(pWriter);
|
code = metaCommit(pWriter->pMeta);
|
||||||
if (code) goto _err;
|
if (code) goto _err;
|
||||||
}
|
}
|
||||||
taosMemoryFree(pWriter);
|
taosMemoryFree(pWriter);
|
||||||
|
@ -170,15 +180,16 @@ int32_t metaSnapWrite(SMetaSnapWriter* pWriter, uint8_t* pData, uint32_t nData)
|
||||||
SMetaEntry metaEntry = {0};
|
SMetaEntry metaEntry = {0};
|
||||||
SDecoder* pDecoder = &(SDecoder){0};
|
SDecoder* pDecoder = &(SDecoder){0};
|
||||||
|
|
||||||
tDecoderInit(pDecoder, pData, nData);
|
tDecoderInit(pDecoder, pData + sizeof(SSnapDataHdr), nData - sizeof(SSnapDataHdr));
|
||||||
metaDecodeEntry(pDecoder, &metaEntry);
|
metaDecodeEntry(pDecoder, &metaEntry);
|
||||||
|
|
||||||
code = metaHandleEntry(pMeta, &metaEntry);
|
code = metaHandleEntry(pMeta, &metaEntry);
|
||||||
if (code) goto _err;
|
if (code) goto _err;
|
||||||
|
|
||||||
|
tDecoderClear(pDecoder);
|
||||||
return code;
|
return code;
|
||||||
|
|
||||||
_err:
|
_err:
|
||||||
metaError("vgId:%d meta snapshot write failed since %s", TD_VID(pMeta->pVnode), tstrerror(code));
|
metaError("vgId:%d vnode snapshot meta write failed since %s", TD_VID(pMeta->pVnode), tstrerror(code));
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
|
@ -15,9 +15,13 @@
|
||||||
|
|
||||||
#include "sma.h"
|
#include "sma.h"
|
||||||
|
|
||||||
static int32_t tdProcessRSmaPreCommitImpl(SSma *pSma);
|
static int32_t tdProcessRSmaSyncPreCommitImpl(SSma *pSma);
|
||||||
static int32_t tdProcessRSmaCommitImpl(SSma *pSma);
|
static int32_t tdProcessRSmaSyncCommitImpl(SSma *pSma);
|
||||||
static int32_t tdProcessRSmaPostCommitImpl(SSma *pSma);
|
static int32_t tdProcessRSmaSyncPostCommitImpl(SSma *pSma);
|
||||||
|
static int32_t tdProcessRSmaAsyncPreCommitImpl(SSma *pSma);
|
||||||
|
static int32_t tdProcessRSmaAsyncCommitImpl(SSma *pSma);
|
||||||
|
static int32_t tdProcessRSmaAsyncPostCommitImpl(SSma *pSma);
|
||||||
|
static int32_t tdCleanupQTaskInfoFiles(SSma *pSma, SRSmaStat *pRSmaStat);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Only applicable to Rollup SMA
|
* @brief Only applicable to Rollup SMA
|
||||||
|
@ -25,7 +29,7 @@ static int32_t tdProcessRSmaPostCommitImpl(SSma *pSma);
|
||||||
* @param pSma
|
* @param pSma
|
||||||
* @return int32_t
|
* @return int32_t
|
||||||
*/
|
*/
|
||||||
int32_t smaPreCommit(SSma *pSma) { return tdProcessRSmaPreCommitImpl(pSma); }
|
int32_t smaSyncPreCommit(SSma *pSma) { return tdProcessRSmaSyncPreCommitImpl(pSma); }
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Only applicable to Rollup SMA
|
* @brief Only applicable to Rollup SMA
|
||||||
|
@ -33,7 +37,7 @@ int32_t smaPreCommit(SSma *pSma) { return tdProcessRSmaPreCommitImpl(pSma); }
|
||||||
* @param pSma
|
* @param pSma
|
||||||
* @return int32_t
|
* @return int32_t
|
||||||
*/
|
*/
|
||||||
int32_t smaCommit(SSma *pSma) { return tdProcessRSmaCommitImpl(pSma); }
|
int32_t smaSyncCommit(SSma *pSma) { return tdProcessRSmaSyncCommitImpl(pSma); }
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Only applicable to Rollup SMA
|
* @brief Only applicable to Rollup SMA
|
||||||
|
@ -41,7 +45,31 @@ int32_t smaCommit(SSma *pSma) { return tdProcessRSmaCommitImpl(pSma); }
|
||||||
* @param pSma
|
* @param pSma
|
||||||
* @return int32_t
|
* @return int32_t
|
||||||
*/
|
*/
|
||||||
int32_t smaPostCommit(SSma *pSma) { return tdProcessRSmaPostCommitImpl(pSma); }
|
int32_t smaSyncPostCommit(SSma *pSma) { return tdProcessRSmaSyncPostCommitImpl(pSma); }
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Only applicable to Rollup SMA
|
||||||
|
*
|
||||||
|
* @param pSma
|
||||||
|
* @return int32_t
|
||||||
|
*/
|
||||||
|
int32_t smaAsyncPreCommit(SSma *pSma) { return tdProcessRSmaAsyncPreCommitImpl(pSma); }
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Only applicable to Rollup SMA
|
||||||
|
*
|
||||||
|
* @param pSma
|
||||||
|
* @return int32_t
|
||||||
|
*/
|
||||||
|
int32_t smaAsyncCommit(SSma *pSma) { return tdProcessRSmaAsyncCommitImpl(pSma); }
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Only applicable to Rollup SMA
|
||||||
|
*
|
||||||
|
* @param pSma
|
||||||
|
* @return int32_t
|
||||||
|
*/
|
||||||
|
int32_t smaAsyncPostCommit(SSma *pSma) { return tdProcessRSmaAsyncPostCommitImpl(pSma); }
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief set rsma trigger stat active
|
* @brief set rsma trigger stat active
|
||||||
|
@ -62,18 +90,17 @@ int32_t smaBegin(SSma *pSma) {
|
||||||
atomic_val_compare_exchange_8(RSMA_TRIGGER_STAT(pRSmaStat), TASK_TRIGGER_STAT_PAUSED, TASK_TRIGGER_STAT_ACTIVE);
|
atomic_val_compare_exchange_8(RSMA_TRIGGER_STAT(pRSmaStat), TASK_TRIGGER_STAT_PAUSED, TASK_TRIGGER_STAT_ACTIVE);
|
||||||
switch (rsmaTriggerStat) {
|
switch (rsmaTriggerStat) {
|
||||||
case TASK_TRIGGER_STAT_PAUSED: {
|
case TASK_TRIGGER_STAT_PAUSED: {
|
||||||
smaDebug("vgId:%d rsma trigger stat from paused to active", SMA_VID(pSma));
|
smaDebug("vgId:%d, rsma trigger stat from paused to active", SMA_VID(pSma));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case TASK_TRIGGER_STAT_INIT: {
|
case TASK_TRIGGER_STAT_INIT: {
|
||||||
atomic_store_8(RSMA_TRIGGER_STAT(pRSmaStat), TASK_TRIGGER_STAT_ACTIVE);
|
atomic_store_8(RSMA_TRIGGER_STAT(pRSmaStat), TASK_TRIGGER_STAT_ACTIVE);
|
||||||
smaDebug("vgId:%d rsma trigger stat from init to active", SMA_VID(pSma));
|
smaDebug("vgId:%d, rsma trigger stat from init to active", SMA_VID(pSma));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
default: {
|
default: {
|
||||||
atomic_store_8(RSMA_TRIGGER_STAT(pRSmaStat), TASK_TRIGGER_STAT_ACTIVE);
|
atomic_store_8(RSMA_TRIGGER_STAT(pRSmaStat), TASK_TRIGGER_STAT_ACTIVE);
|
||||||
smaWarn("vgId:%d rsma trigger stat %" PRIi8 " is unexpected", SMA_VID(pSma), rsmaTriggerStat);
|
smaError("vgId:%d, rsma trigger stat %" PRIi8 " is unexpected", SMA_VID(pSma), rsmaTriggerStat);
|
||||||
ASSERT(0);
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -81,7 +108,7 @@ int32_t smaBegin(SSma *pSma) {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief pre-commit for rollup sma.
|
* @brief pre-commit for rollup sma(sync commit).
|
||||||
* 1) set trigger stat of rsma timer TASK_TRIGGER_STAT_PAUSED.
|
* 1) set trigger stat of rsma timer TASK_TRIGGER_STAT_PAUSED.
|
||||||
* 2) wait all triggered fetch tasks finished
|
* 2) wait all triggered fetch tasks finished
|
||||||
* 3) perform persist task for qTaskInfo
|
* 3) perform persist task for qTaskInfo
|
||||||
|
@ -89,7 +116,7 @@ int32_t smaBegin(SSma *pSma) {
|
||||||
* @param pSma
|
* @param pSma
|
||||||
* @return int32_t
|
* @return int32_t
|
||||||
*/
|
*/
|
||||||
static int32_t tdProcessRSmaPreCommitImpl(SSma *pSma) {
|
static int32_t tdProcessRSmaSyncPreCommitImpl(SSma *pSma) {
|
||||||
SSmaEnv *pSmaEnv = SMA_RSMA_ENV(pSma);
|
SSmaEnv *pSmaEnv = SMA_RSMA_ENV(pSma);
|
||||||
if (!pSmaEnv) {
|
if (!pSmaEnv) {
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
|
@ -98,8 +125,7 @@ static int32_t tdProcessRSmaPreCommitImpl(SSma *pSma) {
|
||||||
SSmaStat *pStat = SMA_ENV_STAT(pSmaEnv);
|
SSmaStat *pStat = SMA_ENV_STAT(pSmaEnv);
|
||||||
SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pStat);
|
SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pStat);
|
||||||
|
|
||||||
|
// step 1: set rsma stat paused
|
||||||
// step 1: set persistence task paused
|
|
||||||
atomic_store_8(RSMA_TRIGGER_STAT(pRSmaStat), TASK_TRIGGER_STAT_PAUSED);
|
atomic_store_8(RSMA_TRIGGER_STAT(pRSmaStat), TASK_TRIGGER_STAT_PAUSED);
|
||||||
|
|
||||||
// step 2: wait all triggered fetch tasks finished
|
// step 2: wait all triggered fetch tasks finished
|
||||||
|
@ -119,7 +145,9 @@ static int32_t tdProcessRSmaPreCommitImpl(SSma *pSma) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// step 3: perform persist task for qTaskInfo
|
// step 3: perform persist task for qTaskInfo
|
||||||
tdRSmaPersistExecImpl(pRSmaStat);
|
pRSmaStat->commitAppliedVer = pSma->pVnode->state.applied;
|
||||||
|
pRSmaStat->commitSubmitVer = pRSmaStat->submitVer;
|
||||||
|
tdRSmaPersistExecImpl(pRSmaStat, RSMA_INFO_HASH(pRSmaStat));
|
||||||
|
|
||||||
smaDebug("vgId:%d, rsma pre commit success", SMA_VID(pSma));
|
smaDebug("vgId:%d, rsma pre commit success", SMA_VID(pSma));
|
||||||
|
|
||||||
|
@ -132,7 +160,7 @@ static int32_t tdProcessRSmaPreCommitImpl(SSma *pSma) {
|
||||||
* @param pSma
|
* @param pSma
|
||||||
* @return int32_t
|
* @return int32_t
|
||||||
*/
|
*/
|
||||||
static int32_t tdProcessRSmaCommitImpl(SSma *pSma) {
|
static int32_t tdProcessRSmaSyncCommitImpl(SSma *pSma) {
|
||||||
SSmaEnv *pSmaEnv = SMA_RSMA_ENV(pSma);
|
SSmaEnv *pSmaEnv = SMA_RSMA_ENV(pSma);
|
||||||
if (!pSmaEnv) {
|
if (!pSmaEnv) {
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
|
@ -140,21 +168,9 @@ static int32_t tdProcessRSmaCommitImpl(SSma *pSma) {
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
static int32_t tdCleanupQTaskInfoFiles(SSma *pSma, SRSmaStat *pRSmaStat) {
|
||||||
* @brief post-commit for rollup sma
|
SVnode *pVnode = pSma->pVnode;
|
||||||
* 1) clean up the outdated qtaskinfo files
|
int64_t committed = pRSmaStat->commitAppliedVer;
|
||||||
*
|
|
||||||
* @param pSma
|
|
||||||
* @return int32_t
|
|
||||||
*/
|
|
||||||
static int32_t tdProcessRSmaPostCommitImpl(SSma *pSma) {
|
|
||||||
SVnode *pVnode = pSma->pVnode;
|
|
||||||
|
|
||||||
if (!VND_IS_RSMA(pVnode)) {
|
|
||||||
return TSDB_CODE_SUCCESS;
|
|
||||||
}
|
|
||||||
|
|
||||||
int64_t committed = pVnode->state.committed;
|
|
||||||
TdDirPtr pDir = NULL;
|
TdDirPtr pDir = NULL;
|
||||||
TdDirEntryPtr pDirEntry = NULL;
|
TdDirEntryPtr pDirEntry = NULL;
|
||||||
char dir[TSDB_FILENAME_LEN];
|
char dir[TSDB_FILENAME_LEN];
|
||||||
|
@ -222,5 +238,159 @@ static int32_t tdProcessRSmaPostCommitImpl(SSma *pSma) {
|
||||||
|
|
||||||
taosCloseDir(&pDir);
|
taosCloseDir(&pDir);
|
||||||
regfree(®ex);
|
regfree(®ex);
|
||||||
|
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief post-commit for rollup sma
|
||||||
|
* 1) clean up the outdated qtaskinfo files
|
||||||
|
*
|
||||||
|
* @param pSma
|
||||||
|
* @return int32_t
|
||||||
|
*/
|
||||||
|
static int32_t tdProcessRSmaSyncPostCommitImpl(SSma *pSma) {
|
||||||
|
SVnode *pVnode = pSma->pVnode;
|
||||||
|
if (!VND_IS_RSMA(pVnode)) {
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
SSmaEnv *pSmaEnv = SMA_RSMA_ENV(pSma);
|
||||||
|
SRSmaStat *pRSmaStat = SMA_RSMA_STAT(SMA_ENV_STAT(pSmaEnv));
|
||||||
|
|
||||||
|
// cleanup outdated qtaskinfo files
|
||||||
|
tdCleanupQTaskInfoFiles(pSma, pRSmaStat);
|
||||||
|
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Rsma async commit implementation
|
||||||
|
* 1) set rsma stat TASK_TRIGGER_STAT_PAUSED
|
||||||
|
* 2) Wait all running fetch task finish to fetch and put submitMsg into level 2/3 wQueue(blocking level 1 write)
|
||||||
|
* 3)
|
||||||
|
*
|
||||||
|
* @param pSma
|
||||||
|
* @return int32_t
|
||||||
|
*/
|
||||||
|
static int32_t tdProcessRSmaAsyncPreCommitImpl(SSma *pSma) {
|
||||||
|
SSmaEnv *pSmaEnv = SMA_RSMA_ENV(pSma);
|
||||||
|
if (!pSmaEnv) {
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
SSmaStat *pStat = SMA_ENV_STAT(pSmaEnv);
|
||||||
|
SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pStat);
|
||||||
|
|
||||||
|
// step 1: set rsma stat
|
||||||
|
atomic_store_8(RSMA_TRIGGER_STAT(pRSmaStat), TASK_TRIGGER_STAT_PAUSED);
|
||||||
|
atomic_store_8(RSMA_COMMIT_STAT(pRSmaStat), 1);
|
||||||
|
|
||||||
|
// step 2: wait all triggered fetch tasks finished
|
||||||
|
int32_t nLoops = 0;
|
||||||
|
while (1) {
|
||||||
|
if (T_REF_VAL_GET(pStat) == 0) {
|
||||||
|
smaDebug("vgId:%d, rsma fetch tasks all finished", SMA_VID(pSma));
|
||||||
|
break;
|
||||||
|
} else {
|
||||||
|
smaDebug("vgId:%d, rsma fetch tasks not all finished yet", SMA_VID(pSma));
|
||||||
|
}
|
||||||
|
++nLoops;
|
||||||
|
if (nLoops > 1000) {
|
||||||
|
sched_yield();
|
||||||
|
nLoops = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// step 3: swap rsmaInfoHash and iRsmaInfoHash
|
||||||
|
ASSERT(!RSMA_IMU_INFO_HASH(pRSmaStat));
|
||||||
|
ASSERT(RSMA_INFO_HASH(pRSmaStat));
|
||||||
|
|
||||||
|
RSMA_IMU_INFO_HASH(pRSmaStat) = RSMA_INFO_HASH(pRSmaStat);
|
||||||
|
RSMA_INFO_HASH(pRSmaStat) =
|
||||||
|
taosHashInit(RSMA_TASK_INFO_HASH_SLOT, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_ENTRY_LOCK);
|
||||||
|
|
||||||
|
if (!RSMA_INFO_HASH(pRSmaStat)) {
|
||||||
|
smaError("vgId:%d, rsma async commit failed since %s", SMA_VID(pSma), terrstr());
|
||||||
|
return TSDB_CODE_FAILED;
|
||||||
|
}
|
||||||
|
|
||||||
|
// step 4: others
|
||||||
|
pRSmaStat->commitAppliedVer = pSma->pVnode->state.applied;
|
||||||
|
pRSmaStat->commitSubmitVer = pRSmaStat->submitVer;
|
||||||
|
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief commit for rollup sma
|
||||||
|
*
|
||||||
|
* @param pSma
|
||||||
|
* @return int32_t
|
||||||
|
*/
|
||||||
|
static int32_t tdProcessRSmaAsyncCommitImpl(SSma *pSma) {
|
||||||
|
SSmaEnv *pSmaEnv = SMA_RSMA_ENV(pSma);
|
||||||
|
if (!pSmaEnv) {
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
SSmaStat *pStat = SMA_ENV_STAT(pSmaEnv);
|
||||||
|
SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pStat);
|
||||||
|
|
||||||
|
// perform persist task for qTaskInfo
|
||||||
|
tdRSmaPersistExecImpl(pRSmaStat, RSMA_IMU_INFO_HASH(pRSmaStat));
|
||||||
|
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Migrate rsmaInfo from iRsmaInfo to rsmaInfo if rsmaInfoHash not empty.
|
||||||
|
*
|
||||||
|
* @param pSma
|
||||||
|
* @return int32_t
|
||||||
|
*/
|
||||||
|
static int32_t tdProcessRSmaAsyncPostCommitImpl(SSma *pSma) {
|
||||||
|
SSmaEnv *pSmaEnv = SMA_RSMA_ENV(pSma);
|
||||||
|
if (!pSmaEnv) {
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
SSmaStat *pStat = SMA_ENV_STAT(pSmaEnv);
|
||||||
|
SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pStat);
|
||||||
|
|
||||||
|
// step 1: merge rsmaInfoHash and iRsmaInfoHash
|
||||||
|
taosWLockLatch(SMA_ENV_LOCK(pSmaEnv));
|
||||||
|
|
||||||
|
if (taosHashGetSize(RSMA_INFO_HASH(pRSmaStat)) <= 0) {
|
||||||
|
// TODO: optimization - just switch the hash pointer if rsmaInfoHash is empty
|
||||||
|
}
|
||||||
|
|
||||||
|
void *pIter = taosHashIterate(RSMA_IMU_INFO_HASH(pRSmaStat), NULL);
|
||||||
|
while (pIter) {
|
||||||
|
tb_uid_t *pSuid = (tb_uid_t *)taosHashGetKey(pIter, NULL);
|
||||||
|
|
||||||
|
if (!taosHashGet(RSMA_INFO_HASH(pRSmaStat), pSuid, sizeof(tb_uid_t))) {
|
||||||
|
taosHashPut(RSMA_INFO_HASH(pRSmaStat), pSuid, sizeof(tb_uid_t), pIter, sizeof(pIter));
|
||||||
|
smaDebug("vgId:%d, rsma async post commit, migrated from iRsmaInfoHash for table:%" PRIi64, SMA_VID(pSma),
|
||||||
|
*pSuid);
|
||||||
|
} else {
|
||||||
|
// free the resources
|
||||||
|
SRSmaInfo *pRSmaInfo = *(SRSmaInfo **)pIter;
|
||||||
|
tdFreeRSmaInfo(pSma, pRSmaInfo, false);
|
||||||
|
smaDebug("vgId:%d, rsma async post commit, free rsma info since already COW for table:%" PRIi64, SMA_VID(pSma),
|
||||||
|
*pSuid);
|
||||||
|
}
|
||||||
|
|
||||||
|
pIter = taosHashIterate(RSMA_IMU_INFO_HASH(pRSmaStat), pIter);
|
||||||
|
}
|
||||||
|
|
||||||
|
taosHashCleanup(RSMA_IMU_INFO_HASH(pRSmaStat));
|
||||||
|
RSMA_IMU_INFO_HASH(pRSmaStat) = NULL;
|
||||||
|
|
||||||
|
taosWUnLockLatch(SMA_ENV_LOCK(pSmaEnv));
|
||||||
|
|
||||||
|
// step 2: cleanup outdated qtaskinfo files
|
||||||
|
tdCleanupQTaskInfoFiles(pSma, pRSmaStat);
|
||||||
|
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,7 +17,6 @@
|
||||||
|
|
||||||
typedef struct SSmaStat SSmaStat;
|
typedef struct SSmaStat SSmaStat;
|
||||||
|
|
||||||
#define RSMA_TASK_INFO_HASH_SLOT 8
|
|
||||||
#define SMA_MGMT_REF_NUM 10240
|
#define SMA_MGMT_REF_NUM 10240
|
||||||
|
|
||||||
extern SSmaMgmt smaMgmt;
|
extern SSmaMgmt smaMgmt;
|
||||||
|
@ -109,12 +108,7 @@ static SSmaEnv *tdNewSmaEnv(const SSma *pSma, int8_t smaType, const char *path)
|
||||||
|
|
||||||
SMA_ENV_TYPE(pEnv) = smaType;
|
SMA_ENV_TYPE(pEnv) = smaType;
|
||||||
|
|
||||||
int code = taosThreadRwlockInit(&(pEnv->lock), NULL);
|
taosInitRWLatch(&(pEnv->lock));
|
||||||
if (code) {
|
|
||||||
terrno = TAOS_SYSTEM_ERROR(code);
|
|
||||||
taosMemoryFree(pEnv);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (tdInitSmaStat(&SMA_ENV_STAT(pEnv), smaType, pSma) != TSDB_CODE_SUCCESS) {
|
if (tdInitSmaStat(&SMA_ENV_STAT(pEnv), smaType, pSma) != TSDB_CODE_SUCCESS) {
|
||||||
tdFreeSmaEnv(pEnv);
|
tdFreeSmaEnv(pEnv);
|
||||||
|
@ -148,7 +142,6 @@ static int32_t tdInitSmaEnv(SSma *pSma, int8_t smaType, const char *path, SSmaEn
|
||||||
void tdDestroySmaEnv(SSmaEnv *pSmaEnv) {
|
void tdDestroySmaEnv(SSmaEnv *pSmaEnv) {
|
||||||
if (pSmaEnv) {
|
if (pSmaEnv) {
|
||||||
pSmaEnv->pStat = tdFreeSmaState(pSmaEnv->pStat, SMA_ENV_TYPE(pSmaEnv));
|
pSmaEnv->pStat = tdFreeSmaState(pSmaEnv->pStat, SMA_ENV_TYPE(pSmaEnv));
|
||||||
taosThreadRwlockDestroy(&(pSmaEnv->lock));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -260,7 +253,7 @@ static void tdDestroyRSmaStat(void *pRSmaStat) {
|
||||||
void *infoHash = taosHashIterate(RSMA_INFO_HASH(pStat), NULL);
|
void *infoHash = taosHashIterate(RSMA_INFO_HASH(pStat), NULL);
|
||||||
while (infoHash) {
|
while (infoHash) {
|
||||||
SRSmaInfo *pSmaInfo = *(SRSmaInfo **)infoHash;
|
SRSmaInfo *pSmaInfo = *(SRSmaInfo **)infoHash;
|
||||||
tdFreeRSmaInfo(pSma, pSmaInfo);
|
tdFreeRSmaInfo(pSma, pSmaInfo, true);
|
||||||
infoHash = taosHashIterate(RSMA_INFO_HASH(pStat), infoHash);
|
infoHash = taosHashIterate(RSMA_INFO_HASH(pStat), infoHash);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -311,7 +304,6 @@ int32_t tdDestroySmaState(SSmaStat *pSmaStat, int8_t smaType) {
|
||||||
if (taosRemoveRef(smaMgmt.rsetId, RSMA_REF_ID(pRSmaStat)) < 0) {
|
if (taosRemoveRef(smaMgmt.rsetId, RSMA_REF_ID(pRSmaStat)) < 0) {
|
||||||
smaError("vgId:%d, remove refId:%" PRIi64 " from rsmaRef:%" PRIi32 " failed since %s", SMA_VID(pRSmaStat->pSma),
|
smaError("vgId:%d, remove refId:%" PRIi64 " from rsmaRef:%" PRIi32 " failed since %s", SMA_VID(pRSmaStat->pSma),
|
||||||
RSMA_REF_ID(pRSmaStat), smaMgmt.rsetId, terrstr());
|
RSMA_REF_ID(pRSmaStat), smaMgmt.rsetId, terrstr());
|
||||||
ASSERT(0);
|
|
||||||
} else {
|
} else {
|
||||||
smaDebug("vgId:%d, remove refId:%" PRIi64 " from rsmaRef:%" PRIi32 " succeed", SMA_VID(pRSmaStat->pSma),
|
smaDebug("vgId:%d, remove refId:%" PRIi64 " from rsmaRef:%" PRIi32 " succeed", SMA_VID(pRSmaStat->pSma),
|
||||||
RSMA_REF_ID(pRSmaStat), smaMgmt.rsetId);
|
RSMA_REF_ID(pRSmaStat), smaMgmt.rsetId);
|
||||||
|
|
|
@ -48,20 +48,7 @@ static int32_t tdRSmaRestoreQTaskInfoInit(SSma *pSma, int64_t *nTables);
|
||||||
static int32_t tdRSmaRestoreQTaskInfoReload(SSma *pSma, int64_t *committed);
|
static int32_t tdRSmaRestoreQTaskInfoReload(SSma *pSma, int64_t *committed);
|
||||||
static int32_t tdRSmaRestoreTSDataReload(SSma *pSma, int64_t committed);
|
static int32_t tdRSmaRestoreTSDataReload(SSma *pSma, int64_t committed);
|
||||||
|
|
||||||
struct SRSmaInfoItem {
|
|
||||||
void *taskInfo; // qTaskInfo_t
|
|
||||||
int64_t refId;
|
|
||||||
tmr_h tmrId;
|
|
||||||
int32_t maxDelay;
|
|
||||||
int8_t level;
|
|
||||||
int8_t triggerStat;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct SRSmaInfo {
|
|
||||||
STSchema *pTSchema;
|
|
||||||
int64_t suid;
|
|
||||||
SRSmaInfoItem items[TSDB_RETENTION_L2];
|
|
||||||
};
|
|
||||||
|
|
||||||
static SRSmaInfo *tdGetRSmaInfoByItem(SRSmaInfoItem *pItem) {
|
static SRSmaInfo *tdGetRSmaInfoByItem(SRSmaInfoItem *pItem) {
|
||||||
// adapt accordingly if definition of SRSmaInfo update
|
// adapt accordingly if definition of SRSmaInfo update
|
||||||
|
@ -102,8 +89,9 @@ static FORCE_INLINE int32_t tdRSmaQTaskInfoContLen(int32_t lenWithHead) {
|
||||||
|
|
||||||
static FORCE_INLINE void tdRSmaQTaskInfoIterDestroy(SRSmaQTaskInfoIter *pIter) { taosMemoryFreeClear(pIter->pBuf); }
|
static FORCE_INLINE void tdRSmaQTaskInfoIterDestroy(SRSmaQTaskInfoIter *pIter) { taosMemoryFreeClear(pIter->pBuf); }
|
||||||
|
|
||||||
static FORCE_INLINE void tdFreeTaskHandle(qTaskInfo_t *taskHandle, int32_t vgId, int32_t level) {
|
void tdFreeQTaskInfo(qTaskInfo_t *taskHandle, int32_t vgId, int32_t level) {
|
||||||
// Note: free/kill may in RC
|
// Note: free/kill may in RC
|
||||||
|
if (!taskHandle) return;
|
||||||
qTaskInfo_t otaskHandle = atomic_load_ptr(taskHandle);
|
qTaskInfo_t otaskHandle = atomic_load_ptr(taskHandle);
|
||||||
if (otaskHandle && atomic_val_compare_exchange_ptr(taskHandle, otaskHandle, NULL)) {
|
if (otaskHandle && atomic_val_compare_exchange_ptr(taskHandle, otaskHandle, NULL)) {
|
||||||
smaDebug("vgId:%d, free qTaskInfo_t %p of level %d", vgId, otaskHandle, level);
|
smaDebug("vgId:%d, free qTaskInfo_t %p of level %d", vgId, otaskHandle, level);
|
||||||
|
@ -111,25 +99,36 @@ static FORCE_INLINE void tdFreeTaskHandle(qTaskInfo_t *taskHandle, int32_t vgId,
|
||||||
} else {
|
} else {
|
||||||
smaDebug("vgId:%d, not free qTaskInfo_t %p of level %d", vgId, otaskHandle, level);
|
smaDebug("vgId:%d, not free qTaskInfo_t %p of level %d", vgId, otaskHandle, level);
|
||||||
}
|
}
|
||||||
|
// TODO: clear files related to qTaskInfo?
|
||||||
}
|
}
|
||||||
|
|
||||||
void *tdFreeRSmaInfo(SSma *pSma, SRSmaInfo *pInfo) {
|
/**
|
||||||
|
* @brief general function to free rsmaInfo
|
||||||
|
*
|
||||||
|
* @param pSma
|
||||||
|
* @param pInfo
|
||||||
|
* @param isDeepFree Only stop tmrId and free pTSchema for deep free
|
||||||
|
* @return void*
|
||||||
|
*/
|
||||||
|
void *tdFreeRSmaInfo(SSma *pSma, SRSmaInfo *pInfo, bool isDeepFree) {
|
||||||
if (pInfo) {
|
if (pInfo) {
|
||||||
for (int32_t i = 0; i < TSDB_RETENTION_L2; ++i) {
|
for (int32_t i = 0; i < TSDB_RETENTION_L2; ++i) {
|
||||||
SRSmaInfoItem *pItem = &pInfo->items[i];
|
SRSmaInfoItem *pItem = &pInfo->items[i];
|
||||||
if (pItem->taskInfo) {
|
if (pItem->taskInfo) {
|
||||||
if (pItem->tmrId) {
|
if (isDeepFree && pItem->tmrId) {
|
||||||
smaDebug("vgId:%d, table %" PRIi64 " stop fetch timer %p level %d", SMA_VID(pSma), pInfo->suid, pItem->tmrId,
|
smaDebug("vgId:%d, table %" PRIi64 " stop fetch timer %p level %d", SMA_VID(pSma), pInfo->suid, pItem->tmrId,
|
||||||
i + 1);
|
i + 1);
|
||||||
taosTmrStopA(&pItem->tmrId);
|
taosTmrStopA(&pItem->tmrId);
|
||||||
}
|
}
|
||||||
tdFreeTaskHandle(&pItem->taskInfo, SMA_VID(pSma), i + 1);
|
tdFreeQTaskInfo(&pItem->taskInfo, SMA_VID(pSma), i + 1);
|
||||||
} else {
|
} else {
|
||||||
smaDebug("vgId:%d, table %" PRIi64 " no need to destroy rsma info level %d since empty taskInfo", SMA_VID(pSma),
|
smaDebug("vgId:%d, table %" PRIi64 " no need to destroy rsma info level %d since empty taskInfo", SMA_VID(pSma),
|
||||||
pInfo->suid, i + 1);
|
pInfo->suid, i + 1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
taosMemoryFree(pInfo->pTSchema);
|
if (isDeepFree) {
|
||||||
|
taosMemoryFree(pInfo->pTSchema);
|
||||||
|
}
|
||||||
taosMemoryFree(pInfo);
|
taosMemoryFree(pInfo);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -151,7 +150,7 @@ static int32_t tdUpdateTbUidListImpl(SSma *pSma, tb_uid_t *suid, SArray *tbUids)
|
||||||
|
|
||||||
if (!suid || !tbUids) {
|
if (!suid || !tbUids) {
|
||||||
terrno = TSDB_CODE_INVALID_PTR;
|
terrno = TSDB_CODE_INVALID_PTR;
|
||||||
smaError("vgId:%d, failed to get rsma info for uid:%" PRIi64 " since %s", SMA_VID(pSma), *suid, terrstr(terrno));
|
smaError("vgId:%d, failed to get rsma info for uid:%" PRIi64 " since %s", SMA_VID(pSma), *suid, terrstr());
|
||||||
return TSDB_CODE_FAILED;
|
return TSDB_CODE_FAILED;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -165,7 +164,7 @@ static int32_t tdUpdateTbUidListImpl(SSma *pSma, tb_uid_t *suid, SArray *tbUids)
|
||||||
|
|
||||||
if (pRSmaInfo->items[0].taskInfo) {
|
if (pRSmaInfo->items[0].taskInfo) {
|
||||||
if ((qUpdateQualifiedTableId(pRSmaInfo->items[0].taskInfo, tbUids, true) < 0)) {
|
if ((qUpdateQualifiedTableId(pRSmaInfo->items[0].taskInfo, tbUids, true) < 0)) {
|
||||||
smaError("vgId:%d, update tbUidList failed for uid:%" PRIi64 " since %s", SMA_VID(pSma), *suid, terrstr(terrno));
|
smaError("vgId:%d, update tbUidList failed for uid:%" PRIi64 " since %s", SMA_VID(pSma), *suid, terrstr());
|
||||||
return TSDB_CODE_FAILED;
|
return TSDB_CODE_FAILED;
|
||||||
} else {
|
} else {
|
||||||
smaDebug("vgId:%d, update tbUidList succeed for qTaskInfo:%p with suid:%" PRIi64 ", uid:%" PRIi64, SMA_VID(pSma),
|
smaDebug("vgId:%d, update tbUidList succeed for qTaskInfo:%p with suid:%" PRIi64 ", uid:%" PRIi64, SMA_VID(pSma),
|
||||||
|
@ -175,7 +174,7 @@ static int32_t tdUpdateTbUidListImpl(SSma *pSma, tb_uid_t *suid, SArray *tbUids)
|
||||||
|
|
||||||
if (pRSmaInfo->items[1].taskInfo) {
|
if (pRSmaInfo->items[1].taskInfo) {
|
||||||
if ((qUpdateQualifiedTableId(pRSmaInfo->items[1].taskInfo, tbUids, true) < 0)) {
|
if ((qUpdateQualifiedTableId(pRSmaInfo->items[1].taskInfo, tbUids, true) < 0)) {
|
||||||
smaError("vgId:%d, update tbUidList failed for uid:%" PRIi64 " since %s", SMA_VID(pSma), *suid, terrstr(terrno));
|
smaError("vgId:%d, update tbUidList failed for uid:%" PRIi64 " since %s", SMA_VID(pSma), *suid, terrstr());
|
||||||
return TSDB_CODE_FAILED;
|
return TSDB_CODE_FAILED;
|
||||||
} else {
|
} else {
|
||||||
smaDebug("vgId:%d, update tbUidList succeed for qTaskInfo:%p with suid:%" PRIi64 ", uid:%" PRIi64, SMA_VID(pSma),
|
smaDebug("vgId:%d, update tbUidList succeed for qTaskInfo:%p with suid:%" PRIi64 ", uid:%" PRIi64, SMA_VID(pSma),
|
||||||
|
@ -257,22 +256,22 @@ int32_t tdFetchTbUidList(SSma *pSma, STbUidStore **ppStore, tb_uid_t suid, tb_ui
|
||||||
|
|
||||||
static int32_t tdSetRSmaInfoItemParams(SSma *pSma, SRSmaParam *param, SRSmaStat *pStat, SRSmaInfo *pRSmaInfo,
|
static int32_t tdSetRSmaInfoItemParams(SSma *pSma, SRSmaParam *param, SRSmaStat *pStat, SRSmaInfo *pRSmaInfo,
|
||||||
int8_t idx) {
|
int8_t idx) {
|
||||||
SRetention *pRetention = SMA_RETENTION(pSma);
|
if ((param->qmsgLen > 0) && param->qmsg[idx]) {
|
||||||
STsdbCfg *pTsdbCfg = SMA_TSDB_CFG(pSma);
|
SRetention *pRetention = SMA_RETENTION(pSma);
|
||||||
|
STsdbCfg *pTsdbCfg = SMA_TSDB_CFG(pSma);
|
||||||
|
SVnode *pVnode = pSma->pVnode;
|
||||||
|
SReadHandle handle = {
|
||||||
|
.meta = pVnode->pMeta,
|
||||||
|
.vnode = pVnode,
|
||||||
|
.initTqReader = 1,
|
||||||
|
};
|
||||||
|
|
||||||
SReadHandle handle = {
|
|
||||||
.meta = pSma->pVnode->pMeta,
|
|
||||||
.vnode = pSma->pVnode,
|
|
||||||
.initTqReader = 1,
|
|
||||||
};
|
|
||||||
|
|
||||||
if (param->qmsg[idx]) {
|
|
||||||
SRSmaInfoItem *pItem = &(pRSmaInfo->items[idx]);
|
SRSmaInfoItem *pItem = &(pRSmaInfo->items[idx]);
|
||||||
pItem->refId = RSMA_REF_ID(pStat);
|
pItem->refId = RSMA_REF_ID(pStat);
|
||||||
pItem->taskInfo = qCreateStreamExecTaskInfo(param->qmsg[idx], &handle);
|
pItem->taskInfo = qCreateStreamExecTaskInfo(param->qmsg[idx], &handle);
|
||||||
if (!pItem->taskInfo) {
|
if (!pItem->taskInfo) {
|
||||||
terrno = TSDB_CODE_RSMA_QTASKINFO_CREATE;
|
terrno = TSDB_CODE_RSMA_QTASKINFO_CREATE;
|
||||||
goto _err;
|
return TSDB_CODE_FAILED;
|
||||||
}
|
}
|
||||||
pItem->triggerStat = TASK_TRIGGER_STAT_INACTIVE;
|
pItem->triggerStat = TASK_TRIGGER_STAT_INACTIVE;
|
||||||
if (param->maxdelay[idx] < TSDB_MIN_ROLLUP_MAX_DELAY) {
|
if (param->maxdelay[idx] < TSDB_MIN_ROLLUP_MAX_DELAY) {
|
||||||
|
@ -286,13 +285,11 @@ static int32_t tdSetRSmaInfoItemParams(SSma *pSma, SRSmaParam *param, SRSmaStat
|
||||||
pItem->maxDelay = TSDB_MAX_ROLLUP_MAX_DELAY;
|
pItem->maxDelay = TSDB_MAX_ROLLUP_MAX_DELAY;
|
||||||
}
|
}
|
||||||
pItem->level = idx == 0 ? TSDB_RETENTION_L1 : TSDB_RETENTION_L2;
|
pItem->level = idx == 0 ? TSDB_RETENTION_L1 : TSDB_RETENTION_L2;
|
||||||
smaInfo("vgId:%d table:%" PRIi64 " level:%" PRIi8 " maxdelay:%" PRIi64 " watermark:%" PRIi64
|
smaInfo("vgId:%d, table:%" PRIi64 " level:%" PRIi8 " maxdelay:%" PRIi64 " watermark:%" PRIi64
|
||||||
", finally maxdelay:%" PRIi32,
|
", finally maxdelay:%" PRIi32,
|
||||||
SMA_VID(pSma), pRSmaInfo->suid, idx + 1, param->maxdelay[idx], param->watermark[idx], pItem->maxDelay);
|
TD_VID(pVnode), pRSmaInfo->suid, idx + 1, param->maxdelay[idx], param->watermark[idx], pItem->maxDelay);
|
||||||
}
|
}
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
_err:
|
|
||||||
return TSDB_CODE_FAILED;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -357,7 +354,7 @@ int32_t tdProcessRSmaCreateImpl(SSma *pSma, SRSmaParam *param, int64_t suid, con
|
||||||
|
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
_err:
|
_err:
|
||||||
tdFreeRSmaInfo(pSma, pRSmaInfo);
|
tdFreeRSmaInfo(pSma, pRSmaInfo, true);
|
||||||
return TSDB_CODE_FAILED;
|
return TSDB_CODE_FAILED;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -562,7 +559,9 @@ static int32_t tdRSmaFetchAndSubmitResult(SRSmaInfoItem *pItem, STSchema *pTSche
|
||||||
SSDataBlock *output = NULL;
|
SSDataBlock *output = NULL;
|
||||||
uint64_t ts;
|
uint64_t ts;
|
||||||
if (qExecTask(pItem->taskInfo, &output, &ts) < 0) {
|
if (qExecTask(pItem->taskInfo, &output, &ts) < 0) {
|
||||||
ASSERT(false);
|
smaError("vgId:%d, qExecTask for rsma table %" PRIi64 "l evel %" PRIi8 " failed since %s", SMA_VID(pSma), suid,
|
||||||
|
pItem->level, terrstr());
|
||||||
|
goto _err;
|
||||||
}
|
}
|
||||||
if (!output) {
|
if (!output) {
|
||||||
break;
|
break;
|
||||||
|
@ -572,7 +571,7 @@ static int32_t tdRSmaFetchAndSubmitResult(SRSmaInfoItem *pItem, STSchema *pTSche
|
||||||
pResult = taosArrayInit(1, sizeof(SSDataBlock));
|
pResult = taosArrayInit(1, sizeof(SSDataBlock));
|
||||||
if (!pResult) {
|
if (!pResult) {
|
||||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
return TSDB_CODE_FAILED;
|
goto _err;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -649,9 +648,18 @@ static int32_t tdExecuteRSmaImpl(SSma *pSma, const void *pMsg, int32_t inputType
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief During async commit, the SRSmaInfo object would be COW from iRSmaInfoHash and write lock should be applied.
|
||||||
|
*
|
||||||
|
* @param pSma
|
||||||
|
* @param suid
|
||||||
|
* @return SRSmaInfo*
|
||||||
|
*/
|
||||||
static SRSmaInfo *tdGetRSmaInfoBySuid(SSma *pSma, int64_t suid) {
|
static SRSmaInfo *tdGetRSmaInfoBySuid(SSma *pSma, int64_t suid) {
|
||||||
SSmaEnv *pEnv = SMA_RSMA_ENV(pSma);
|
SSmaEnv *pEnv = SMA_RSMA_ENV(pSma);
|
||||||
SRSmaStat *pStat = NULL;
|
SRSmaStat *pStat = NULL;
|
||||||
|
SRSmaInfo *pRSmaInfo = NULL;
|
||||||
|
|
||||||
if (!pEnv) {
|
if (!pEnv) {
|
||||||
// only applicable when rsma env exists
|
// only applicable when rsma env exists
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -662,11 +670,37 @@ static SRSmaInfo *tdGetRSmaInfoBySuid(SSma *pSma, int64_t suid) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
SRSmaInfo *pRSmaInfo = taosHashGet(RSMA_INFO_HASH(pStat), &suid, sizeof(tb_uid_t));
|
pRSmaInfo = taosHashGet(RSMA_INFO_HASH(pStat), &suid, sizeof(tb_uid_t));
|
||||||
if (!pRSmaInfo || !(pRSmaInfo = *(SRSmaInfo **)pRSmaInfo)) {
|
if (pRSmaInfo && (pRSmaInfo = *(SRSmaInfo **)pRSmaInfo)) {
|
||||||
|
return pRSmaInfo;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (RSMA_COMMIT_STAT(pStat) == 0) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
return pRSmaInfo;
|
|
||||||
|
// clone the SRSmaInfo from iRsmaInfoHash to rsmaInfoHash if in committing stat
|
||||||
|
SRSmaInfo *pCowRSmaInfo = NULL;
|
||||||
|
// lock
|
||||||
|
taosWLockLatch(SMA_ENV_LOCK(pEnv));
|
||||||
|
void *iRSmaInfo = taosHashGet(RSMA_IMU_INFO_HASH(pStat), &suid, sizeof(tb_uid_t));
|
||||||
|
if (iRSmaInfo) {
|
||||||
|
SRSmaInfo *pIRSmaInfo = *(SRSmaInfo **)iRSmaInfo;
|
||||||
|
if (pIRSmaInfo) {
|
||||||
|
if (tdCloneRSmaInfo(pSma, pCowRSmaInfo, pIRSmaInfo) < 0) {
|
||||||
|
taosWUnLockLatch(SMA_ENV_LOCK(pEnv));
|
||||||
|
smaError("vgId:%d, clone rsma info failed for suid:%" PRIu64 " since %s", SMA_VID(pSma), suid, terrstr());
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
if (taosHashPut(RSMA_INFO_HASH(pStat), &suid, sizeof(tb_uid_t), &pCowRSmaInfo, sizeof(pCowRSmaInfo)) < 0) {
|
||||||
|
taosWUnLockLatch(SMA_ENV_LOCK(pEnv));
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// unlock
|
||||||
|
taosWUnLockLatch(SMA_ENV_LOCK(pEnv));
|
||||||
|
return pCowRSmaInfo;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t tdExecuteRSma(SSma *pSma, const void *pMsg, int32_t inputType, tb_uid_t suid) {
|
static int32_t tdExecuteRSma(SSma *pSma, const void *pMsg, int32_t inputType, tb_uid_t suid) {
|
||||||
|
@ -891,10 +925,17 @@ int32_t tdProcessRSmaRestoreImpl(SSma *pSma) {
|
||||||
|
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
_err:
|
_err:
|
||||||
smaError("vgId:%d failed to restore rsma task since %s", SMA_VID(pSma), terrstr());
|
smaError("vgId:%d, failed to restore rsma task since %s", SMA_VID(pSma), terrstr());
|
||||||
return TSDB_CODE_FAILED;
|
return TSDB_CODE_FAILED;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Restore from SRSmaQTaskInfoItem
|
||||||
|
*
|
||||||
|
* @param pSma
|
||||||
|
* @param pItem
|
||||||
|
* @return int32_t
|
||||||
|
*/
|
||||||
static int32_t tdRSmaQTaskInfoItemRestore(SSma *pSma, const SRSmaQTaskInfoItem *pItem) {
|
static int32_t tdRSmaQTaskInfoItemRestore(SSma *pSma, const SRSmaQTaskInfoItem *pItem) {
|
||||||
SRSmaInfo *pRSmaInfo = NULL;
|
SRSmaInfo *pRSmaInfo = NULL;
|
||||||
void *qTaskInfo = NULL;
|
void *qTaskInfo = NULL;
|
||||||
|
@ -920,7 +961,7 @@ static int32_t tdRSmaQTaskInfoItemRestore(SSma *pSma, const SRSmaQTaskInfoItem *
|
||||||
|
|
||||||
if (qDeserializeTaskStatus(qTaskInfo, pItem->qTaskInfo, pItem->len) < 0) {
|
if (qDeserializeTaskStatus(qTaskInfo, pItem->qTaskInfo, pItem->len) < 0) {
|
||||||
smaError("vgId:%d, restore rsma task failed for table:%" PRIi64 " level %d since %s", SMA_VID(pSma), pItem->suid,
|
smaError("vgId:%d, restore rsma task failed for table:%" PRIi64 " level %d since %s", SMA_VID(pSma), pItem->suid,
|
||||||
pItem->type, terrstr(terrno));
|
pItem->type, terrstr());
|
||||||
return TSDB_CODE_FAILED;
|
return TSDB_CODE_FAILED;
|
||||||
}
|
}
|
||||||
smaDebug("vgId:%d, restore rsma task success for table:%" PRIi64 " level %d", SMA_VID(pSma), pItem->suid,
|
smaDebug("vgId:%d, restore rsma task success for table:%" PRIi64 " level %d", SMA_VID(pSma), pItem->suid,
|
||||||
|
@ -1067,26 +1108,27 @@ static int32_t tdRSmaQTaskInfoRestore(SSma *pSma, SRSmaQTaskInfoIter *pIter) {
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t tdRSmaPersistExecImpl(SRSmaStat *pRSmaStat) {
|
int32_t tdRSmaPersistExecImpl(SRSmaStat *pRSmaStat, SHashObj *pInfoHash) {
|
||||||
SSma *pSma = pRSmaStat->pSma;
|
SSma *pSma = pRSmaStat->pSma;
|
||||||
SVnode *pVnode = pSma->pVnode;
|
SVnode *pVnode = pSma->pVnode;
|
||||||
int32_t vid = SMA_VID(pSma);
|
int32_t vid = SMA_VID(pSma);
|
||||||
int64_t toffset = 0;
|
int64_t toffset = 0;
|
||||||
bool isFileCreated = false;
|
bool isFileCreated = false;
|
||||||
|
|
||||||
if (taosHashGetSize(RSMA_INFO_HASH(pRSmaStat)) <= 0) {
|
if (taosHashGetSize(pInfoHash) <= 0) {
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
void *infoHash = taosHashIterate(RSMA_INFO_HASH(pRSmaStat), NULL);
|
void *infoHash = taosHashIterate(pInfoHash, NULL);
|
||||||
if (!infoHash) {
|
if (!infoHash) {
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
STFile tFile = {0};
|
STFile tFile = {0};
|
||||||
if (RSMA_SUBMIT_VER(pRSmaStat) > 0) {
|
#if 0
|
||||||
|
if (pRSmaStat->commitAppliedVer > 0) {
|
||||||
char qTaskInfoFName[TSDB_FILENAME_LEN];
|
char qTaskInfoFName[TSDB_FILENAME_LEN];
|
||||||
tdRSmaQTaskInfoGetFName(vid, pSma->pVnode->state.applied, qTaskInfoFName);
|
tdRSmaQTaskInfoGetFName(vid, pRSmaStat->commitAppliedVer, qTaskInfoFName);
|
||||||
if (tdInitTFile(&tFile, tfsGetPrimaryPath(pVnode->pTfs), qTaskInfoFName) < 0) {
|
if (tdInitTFile(&tFile, tfsGetPrimaryPath(pVnode->pTfs), qTaskInfoFName) < 0) {
|
||||||
smaError("vgId:%d, rsma persit, init %s failed since %s", vid, qTaskInfoFName, terrstr());
|
smaError("vgId:%d, rsma persit, init %s failed since %s", vid, qTaskInfoFName, terrstr());
|
||||||
goto _err;
|
goto _err;
|
||||||
|
@ -1099,6 +1141,7 @@ int32_t tdRSmaPersistExecImpl(SRSmaStat *pRSmaStat) {
|
||||||
|
|
||||||
isFileCreated = true;
|
isFileCreated = true;
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
while (infoHash) {
|
while (infoHash) {
|
||||||
SRSmaInfo *pRSmaInfo = *(SRSmaInfo **)infoHash;
|
SRSmaInfo *pRSmaInfo = *(SRSmaInfo **)infoHash;
|
||||||
|
@ -1114,7 +1157,7 @@ int32_t tdRSmaPersistExecImpl(SRSmaStat *pRSmaStat) {
|
||||||
int8_t type = (int8_t)(i + 1);
|
int8_t type = (int8_t)(i + 1);
|
||||||
if (qSerializeTaskStatus(taskInfo, &pOutput, &len) < 0) {
|
if (qSerializeTaskStatus(taskInfo, &pOutput, &len) < 0) {
|
||||||
smaError("vgId:%d, rsma, table %" PRIi64 " level %d serialize qTaskInfo failed since %s", vid, pRSmaInfo->suid,
|
smaError("vgId:%d, rsma, table %" PRIi64 " level %d serialize qTaskInfo failed since %s", vid, pRSmaInfo->suid,
|
||||||
i + 1, terrstr(terrno));
|
i + 1, terrstr());
|
||||||
goto _err;
|
goto _err;
|
||||||
}
|
}
|
||||||
if (!pOutput || len <= 0) {
|
if (!pOutput || len <= 0) {
|
||||||
|
@ -1130,7 +1173,7 @@ int32_t tdRSmaPersistExecImpl(SRSmaStat *pRSmaStat) {
|
||||||
|
|
||||||
if (!isFileCreated) {
|
if (!isFileCreated) {
|
||||||
char qTaskInfoFName[TSDB_FILENAME_LEN];
|
char qTaskInfoFName[TSDB_FILENAME_LEN];
|
||||||
tdRSmaQTaskInfoGetFName(vid, pSma->pVnode->state.applied, qTaskInfoFName);
|
tdRSmaQTaskInfoGetFName(vid, pRSmaStat->commitAppliedVer, qTaskInfoFName);
|
||||||
if (tdInitTFile(&tFile, tfsGetPrimaryPath(pVnode->pTfs), qTaskInfoFName) < 0) {
|
if (tdInitTFile(&tFile, tfsGetPrimaryPath(pVnode->pTfs), qTaskInfoFName) < 0) {
|
||||||
smaError("vgId:%d, rsma persit, init %s failed since %s", vid, qTaskInfoFName, terrstr());
|
smaError("vgId:%d, rsma persit, init %s failed since %s", vid, qTaskInfoFName, terrstr());
|
||||||
goto _err;
|
goto _err;
|
||||||
|
@ -1163,11 +1206,11 @@ int32_t tdRSmaPersistExecImpl(SRSmaStat *pRSmaStat) {
|
||||||
taosMemoryFree(pOutput);
|
taosMemoryFree(pOutput);
|
||||||
}
|
}
|
||||||
|
|
||||||
infoHash = taosHashIterate(RSMA_INFO_HASH(pRSmaStat), infoHash);
|
infoHash = taosHashIterate(pInfoHash, infoHash);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (isFileCreated) {
|
if (isFileCreated) {
|
||||||
tFile.info.qTaskInfo.submitVer = atomic_load_64(&pRSmaStat->submitVer);
|
tFile.info.qTaskInfo.submitVer = atomic_load_64(&pRSmaStat->commitSubmitVer);
|
||||||
if (tdUpdateTFileHeader(&tFile) < 0) {
|
if (tdUpdateTFileHeader(&tFile) < 0) {
|
||||||
smaError("vgId:%d, rsma, failed to update tfile %s header since %s", vid, TD_TFILE_FULL_NAME(&tFile),
|
smaError("vgId:%d, rsma, failed to update tfile %s header since %s", vid, TD_TFILE_FULL_NAME(&tFile),
|
||||||
tstrerror(terrno));
|
tstrerror(terrno));
|
||||||
|
@ -1217,6 +1260,10 @@ static void tdRSmaFetchTrigger(void *param, void *tmrId) {
|
||||||
smaDebug("vgId:%d, not fetch rsma level %" PRIi8 " data since stat is %" PRIi8 ", rsetId rsetId:%" PRIi64
|
smaDebug("vgId:%d, not fetch rsma level %" PRIi8 " data since stat is %" PRIi8 ", rsetId rsetId:%" PRIi64
|
||||||
" refId:%d",
|
" refId:%d",
|
||||||
SMA_VID(pSma), pItem->level, rsmaTriggerStat, smaMgmt.rsetId, pItem->refId);
|
SMA_VID(pSma), pItem->level, rsmaTriggerStat, smaMgmt.rsetId, pItem->refId);
|
||||||
|
if (rsmaTriggerStat == TASK_TRIGGER_STAT_PAUSED) {
|
||||||
|
taosTmrReset(tdRSmaFetchTrigger, pItem->maxDelay > 5000 ? 5000 : pItem->maxDelay, pItem, smaMgmt.tmrHandle,
|
||||||
|
&pItem->tmrId);
|
||||||
|
}
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
|
|
|
@ -313,4 +313,99 @@ int32_t tdReleaseSmaRef(int32_t rsetId, int64_t refId, const char *tags, int32_t
|
||||||
|
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int32_t tdCloneQTaskInfo(SSma *pSma, qTaskInfo_t dstTaskInfo, qTaskInfo_t srcTaskInfo, SRSmaParam *param,
|
||||||
|
tb_uid_t suid, int8_t idx) {
|
||||||
|
SVnode *pVnode = pSma->pVnode;
|
||||||
|
char *pOutput = NULL;
|
||||||
|
int32_t len = 0;
|
||||||
|
|
||||||
|
if (qSerializeTaskStatus(srcTaskInfo, &pOutput, &len) < 0) {
|
||||||
|
smaError("vgId:%d, rsma clone, table %" PRIi64 " serialize qTaskInfo failed since %s", TD_VID(pVnode), suid,
|
||||||
|
terrstr());
|
||||||
|
goto _err;
|
||||||
|
}
|
||||||
|
|
||||||
|
SReadHandle handle = {
|
||||||
|
.meta = pVnode->pMeta,
|
||||||
|
.vnode = pVnode,
|
||||||
|
.initTqReader = 1,
|
||||||
|
};
|
||||||
|
ASSERT(!dstTaskInfo);
|
||||||
|
dstTaskInfo = qCreateStreamExecTaskInfo(param->qmsg[idx], &handle);
|
||||||
|
if (!dstTaskInfo) {
|
||||||
|
terrno = TSDB_CODE_RSMA_QTASKINFO_CREATE;
|
||||||
|
goto _err;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (qDeserializeTaskStatus(dstTaskInfo, pOutput, len) < 0) {
|
||||||
|
smaError("vgId:%d, rsma clone, restore rsma task for table:%" PRIi64 " failed since %s", TD_VID(pVnode), suid,
|
||||||
|
terrstr());
|
||||||
|
goto _err;
|
||||||
|
}
|
||||||
|
|
||||||
|
smaError("vgId:%d, rsma clone, restore rsma task for table:%" PRIi64 " succeed", TD_VID(pVnode), suid);
|
||||||
|
|
||||||
|
taosMemoryFreeClear(pOutput);
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
_err:
|
||||||
|
taosMemoryFreeClear(pOutput);
|
||||||
|
tdFreeQTaskInfo(dstTaskInfo, TD_VID(pVnode), idx + 1);
|
||||||
|
return TSDB_CODE_FAILED;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief pTSchema is shared
|
||||||
|
*
|
||||||
|
* @param pSma
|
||||||
|
* @param pDest
|
||||||
|
* @param pSrc
|
||||||
|
* @return int32_t
|
||||||
|
*/
|
||||||
|
int32_t tdCloneRSmaInfo(SSma *pSma, SRSmaInfo *pDest, SRSmaInfo *pSrc) {
|
||||||
|
SVnode *pVnode = pSma->pVnode;
|
||||||
|
SRSmaParam *param = NULL;
|
||||||
|
if (!pSrc) {
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!pDest) {
|
||||||
|
pDest = taosMemoryCalloc(1, sizeof(SRSmaInfo));
|
||||||
|
if (!pDest) {
|
||||||
|
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
|
return TSDB_CODE_FAILED;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
memcpy(pDest, pSrc, sizeof(SRSmaInfo));
|
||||||
|
|
||||||
|
SMetaReader mr = {0};
|
||||||
|
metaReaderInit(&mr, SMA_META(pSma), 0);
|
||||||
|
smaDebug("vgId:%d, rsma clone, suid is %" PRIi64, TD_VID(pVnode), pSrc->suid);
|
||||||
|
if (metaGetTableEntryByUid(&mr, pSrc->suid) < 0) {
|
||||||
|
smaError("vgId:%d, rsma clone, failed to get table meta for %" PRIi64 " since %s", TD_VID(pVnode), pSrc->suid,
|
||||||
|
terrstr());
|
||||||
|
goto _err;
|
||||||
|
}
|
||||||
|
ASSERT(mr.me.type == TSDB_SUPER_TABLE);
|
||||||
|
ASSERT(mr.me.uid == pSrc->suid);
|
||||||
|
if (TABLE_IS_ROLLUP(mr.me.flags)) {
|
||||||
|
param = &mr.me.stbEntry.rsmaParam;
|
||||||
|
for (int i = 0; i < TSDB_RETENTION_L2; ++i) {
|
||||||
|
SRSmaInfoItem *pItem = &pSrc->items[i];
|
||||||
|
if (pItem->taskInfo) {
|
||||||
|
tdCloneQTaskInfo(pSma, pDest->items[i].taskInfo, pItem->taskInfo, param, pSrc->suid, i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
smaDebug("vgId:%d, rsma clone env success for %" PRIi64, TD_VID(pVnode), pSrc->suid);
|
||||||
|
}
|
||||||
|
|
||||||
|
metaReaderClear(&mr);
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
_err:
|
||||||
|
metaReaderClear(&mr);
|
||||||
|
tdFreeRSmaInfo(pSma, pDest, false);
|
||||||
|
return TSDB_CODE_FAILED;
|
||||||
|
}
|
||||||
|
|
||||||
// ...
|
// ...
|
|
@ -476,9 +476,9 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow) {
|
||||||
if (--state->iFileSet >= 0) {
|
if (--state->iFileSet >= 0) {
|
||||||
pFileSet = (SDFileSet *)taosArrayGet(state->aDFileSet, state->iFileSet);
|
pFileSet = (SDFileSet *)taosArrayGet(state->aDFileSet, state->iFileSet);
|
||||||
} else {
|
} else {
|
||||||
// tBlockDataClear(&state->blockData);
|
// tBlockDataClear(&state->blockData, 1);
|
||||||
if (state->pBlockData) {
|
if (state->pBlockData) {
|
||||||
tBlockDataClear(state->pBlockData);
|
tBlockDataClear(state->pBlockData, 1);
|
||||||
state->pBlockData = NULL;
|
state->pBlockData = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -500,9 +500,7 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow) {
|
||||||
if (code) goto _err;
|
if (code) goto _err;
|
||||||
|
|
||||||
/* if (state->pBlockIdx) { */
|
/* if (state->pBlockIdx) { */
|
||||||
/* tBlockIdxReset(state->blockIdx); */
|
|
||||||
/* } */
|
/* } */
|
||||||
/* tBlockIdxReset(state->blockIdx); */
|
|
||||||
/* code = tMapDataSearch(&state->blockIdxMap, state->pBlockIdxExp, tGetBlockIdx, tCmprBlockIdx,
|
/* code = tMapDataSearch(&state->blockIdxMap, state->pBlockIdxExp, tGetBlockIdx, tCmprBlockIdx,
|
||||||
* &state->blockIdx);
|
* &state->blockIdx);
|
||||||
*/
|
*/
|
||||||
|
@ -582,8 +580,8 @@ _err:
|
||||||
state->aBlockIdx = NULL;
|
state->aBlockIdx = NULL;
|
||||||
}
|
}
|
||||||
if (state->pBlockData) {
|
if (state->pBlockData) {
|
||||||
// tBlockDataClear(&state->blockData);
|
// tBlockDataClear(&state->blockData, 1);
|
||||||
tBlockDataClear(state->pBlockData);
|
tBlockDataClear(state->pBlockData, 1);
|
||||||
state->pBlockData = NULL;
|
state->pBlockData = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -609,8 +607,8 @@ int32_t clearNextRowFromFS(void *iter) {
|
||||||
state->aBlockIdx = NULL;
|
state->aBlockIdx = NULL;
|
||||||
}
|
}
|
||||||
if (state->pBlockData) {
|
if (state->pBlockData) {
|
||||||
// tBlockDataClear(&state->blockData);
|
// tBlockDataClear(&state->blockData, 1);
|
||||||
tBlockDataClear(state->pBlockData);
|
tBlockDataClear(state->pBlockData, 1);
|
||||||
state->pBlockData = NULL;
|
state->pBlockData = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -263,7 +263,7 @@ static int32_t tsdbCommitFileDataStart(SCommitter *pCommitter) {
|
||||||
taosArrayClear(pCommitter->aBlockIdx);
|
taosArrayClear(pCommitter->aBlockIdx);
|
||||||
tMapDataReset(&pCommitter->oBlockMap);
|
tMapDataReset(&pCommitter->oBlockMap);
|
||||||
tBlockDataReset(&pCommitter->oBlockData);
|
tBlockDataReset(&pCommitter->oBlockData);
|
||||||
pRSet = tsdbFSStateGetDFileSet(pTsdb->fs->nState, pCommitter->commitFid);
|
pRSet = tsdbFSStateGetDFileSet(pTsdb->fs->nState, pCommitter->commitFid, TD_EQ);
|
||||||
if (pRSet) {
|
if (pRSet) {
|
||||||
code = tsdbDataFReaderOpen(&pCommitter->pReader, pTsdb, pRSet);
|
code = tsdbDataFReaderOpen(&pCommitter->pReader, pTsdb, pRSet);
|
||||||
if (code) goto _err;
|
if (code) goto _err;
|
||||||
|
@ -284,16 +284,7 @@ static int32_t tsdbCommitFileDataStart(SCommitter *pCommitter) {
|
||||||
.fLast = {.commitID = pCommitter->commitID, .size = 0},
|
.fLast = {.commitID = pCommitter->commitID, .size = 0},
|
||||||
.fSma = pRSet->fSma};
|
.fSma = pRSet->fSma};
|
||||||
} else {
|
} else {
|
||||||
STfs *pTfs = pTsdb->pVnode->pTfs;
|
wSet = (SDFileSet){.diskId = (SDiskID){.level = 0, .id = 0},
|
||||||
SDiskID did = {.level = 0, .id = 0};
|
|
||||||
|
|
||||||
// TODO: alloc a new disk
|
|
||||||
// tfsAllocDisk(pTfs, 0, &did);
|
|
||||||
|
|
||||||
// create the directory
|
|
||||||
tfsMkdirRecurAt(pTfs, pTsdb->path, did);
|
|
||||||
|
|
||||||
wSet = (SDFileSet){.diskId = did,
|
|
||||||
.fid = pCommitter->commitFid,
|
.fid = pCommitter->commitFid,
|
||||||
.fHead = {.commitID = pCommitter->commitID, .offset = 0, .size = 0},
|
.fHead = {.commitID = pCommitter->commitID, .offset = 0, .size = 0},
|
||||||
.fData = {.commitID = pCommitter->commitID, .size = 0},
|
.fData = {.commitID = pCommitter->commitID, .size = 0},
|
||||||
|
@ -1001,10 +992,10 @@ _exit:
|
||||||
static void tsdbCommitDataEnd(SCommitter *pCommitter) {
|
static void tsdbCommitDataEnd(SCommitter *pCommitter) {
|
||||||
taosArrayDestroy(pCommitter->aBlockIdx);
|
taosArrayDestroy(pCommitter->aBlockIdx);
|
||||||
tMapDataClear(&pCommitter->oBlockMap);
|
tMapDataClear(&pCommitter->oBlockMap);
|
||||||
tBlockDataClear(&pCommitter->oBlockData);
|
tBlockDataClear(&pCommitter->oBlockData, 1);
|
||||||
taosArrayDestroy(pCommitter->aBlockIdxN);
|
taosArrayDestroy(pCommitter->aBlockIdxN);
|
||||||
tMapDataClear(&pCommitter->nBlockMap);
|
tMapDataClear(&pCommitter->nBlockMap);
|
||||||
tBlockDataClear(&pCommitter->nBlockData);
|
tBlockDataClear(&pCommitter->nBlockData, 1);
|
||||||
tTSchemaDestroy(pCommitter->skmTable.pTSchema);
|
tTSchemaDestroy(pCommitter->skmTable.pTSchema);
|
||||||
tTSchemaDestroy(pCommitter->skmRow.pTSchema);
|
tTSchemaDestroy(pCommitter->skmRow.pTSchema);
|
||||||
}
|
}
|
||||||
|
|
|
@ -698,6 +698,6 @@ void tsdbFSStateDeleteDFileSet(STsdbFSState *pState, int32_t fid) {
|
||||||
|
|
||||||
SDelFile *tsdbFSStateGetDelFile(STsdbFSState *pState) { return pState->pDelFile; }
|
SDelFile *tsdbFSStateGetDelFile(STsdbFSState *pState) { return pState->pDelFile; }
|
||||||
|
|
||||||
SDFileSet *tsdbFSStateGetDFileSet(STsdbFSState *pState, int32_t fid) {
|
SDFileSet *tsdbFSStateGetDFileSet(STsdbFSState *pState, int32_t fid, int32_t flag) {
|
||||||
return (SDFileSet *)taosArraySearch(pState->aDFileSet, &(SDFileSet){.fid = fid}, tDFileSetCmprFn, TD_EQ);
|
return (SDFileSet *)taosArraySearch(pState->aDFileSet, &(SDFileSet){.fid = fid}, tDFileSetCmprFn, flag);
|
||||||
}
|
}
|
||||||
|
|
|
@ -63,10 +63,10 @@ typedef struct SBlockLoadSuppInfo {
|
||||||
} SBlockLoadSuppInfo;
|
} SBlockLoadSuppInfo;
|
||||||
|
|
||||||
typedef struct SFilesetIter {
|
typedef struct SFilesetIter {
|
||||||
int32_t numOfFiles; // number of total files
|
int32_t numOfFiles; // number of total files
|
||||||
int32_t index; // current accessed index in the list
|
int32_t index; // current accessed index in the list
|
||||||
SArray* pFileList; // data file list
|
SArray* pFileList; // data file list
|
||||||
int32_t order;
|
int32_t order;
|
||||||
} SFilesetIter;
|
} SFilesetIter;
|
||||||
|
|
||||||
typedef struct SFileDataBlockInfo {
|
typedef struct SFileDataBlockInfo {
|
||||||
|
@ -830,8 +830,8 @@ static int32_t doLoadFileBlockData(STsdbReader* pReader, SDataBlockIter* pBlockI
|
||||||
SBlockLoadSuppInfo* pSupInfo = &pReader->suppInfo;
|
SBlockLoadSuppInfo* pSupInfo = &pReader->suppInfo;
|
||||||
SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
|
SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
|
||||||
|
|
||||||
int32_t code = tsdbReadColData(pReader->pFileReader, &pBlockScanInfo->blockIdx, pBlock, pSupInfo->colIds, numOfCols,
|
int32_t code = tsdbReadColData(pReader->pFileReader, &pBlockScanInfo->blockIdx, pBlock, pSupInfo->colIds, numOfCols,
|
||||||
pBlockData, NULL, NULL);
|
pBlockData, NULL, NULL);
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
goto _error;
|
goto _error;
|
||||||
}
|
}
|
||||||
|
@ -1991,7 +1991,7 @@ static TSDBKEY getCurrentKeyInBuf(SDataBlockIter* pBlockIter, STsdbReader* pRead
|
||||||
|
|
||||||
static int32_t moveToNextFile(STsdbReader* pReader, int32_t* numOfBlocks) {
|
static int32_t moveToNextFile(STsdbReader* pReader, int32_t* numOfBlocks) {
|
||||||
SReaderStatus* pStatus = &pReader->status;
|
SReaderStatus* pStatus = &pReader->status;
|
||||||
SArray* pIndexList = taosArrayInit(4, sizeof(SBlockIdx));
|
SArray* pIndexList = taosArrayInit(4, sizeof(SBlockIdx));
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
bool hasNext = filesetIteratorNext(&pStatus->fileIter, pReader);
|
bool hasNext = filesetIteratorNext(&pStatus->fileIter, pReader);
|
||||||
|
@ -3006,14 +3006,14 @@ SArray* tsdbRetrieveDataBlock(STsdbReader* pReader, SArray* pIdList) {
|
||||||
|
|
||||||
code = doLoadFileBlockData(pReader, &pStatus->blockIter, pBlockScanInfo, &pStatus->fileBlockData);
|
code = doLoadFileBlockData(pReader, &pStatus->blockIter, pBlockScanInfo, &pStatus->fileBlockData);
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
tBlockDataClear(&pStatus->fileBlockData);
|
tBlockDataClear(&pStatus->fileBlockData, 1);
|
||||||
|
|
||||||
terrno = code;
|
terrno = code;
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
copyBlockDataToSDataBlock(pReader, pBlockScanInfo);
|
copyBlockDataToSDataBlock(pReader, pBlockScanInfo);
|
||||||
tBlockDataClear(&pStatus->fileBlockData);
|
tBlockDataClear(&pStatus->fileBlockData, 1);
|
||||||
return pReader->pResBlock->pDataBlock;
|
return pReader->pResBlock->pDataBlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3132,8 +3132,8 @@ int32_t tsdbGetFileBlocksDistInfo(STsdbReader* pReader, STableBlockDistInfo* pTa
|
||||||
hasNext = (pBlockIter->numOfBlocks > 0);
|
hasNext = (pBlockIter->numOfBlocks > 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
// tsdbDebug("%p %d blocks found in file for %d table(s), fid:%d, %s", pReader, numOfBlocks, numOfTables,
|
// tsdbDebug("%p %d blocks found in file for %d table(s), fid:%d, %s", pReader, numOfBlocks, numOfTables,
|
||||||
// pReader->pFileGroup->fid, pReader->idStr);
|
// pReader->pFileGroup->fid, pReader->idStr);
|
||||||
}
|
}
|
||||||
|
|
||||||
return code;
|
return code;
|
||||||
|
@ -3204,4 +3204,3 @@ int32_t tsdbGetTableSchema(SVnode* pVnode, int64_t uid, STSchema** pSchema, int6
|
||||||
|
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -979,21 +979,21 @@ int32_t tsdbReadColData(SDataFReader *pReader, SBlockIdx *pBlockIdx, SBlock *pBl
|
||||||
|
|
||||||
code = tBlockDataCopy(pBlockData, pBlockData2);
|
code = tBlockDataCopy(pBlockData, pBlockData2);
|
||||||
if (code) {
|
if (code) {
|
||||||
tBlockDataClear(pBlockData1);
|
tBlockDataClear(pBlockData1, 1);
|
||||||
tBlockDataClear(pBlockData2);
|
tBlockDataClear(pBlockData2, 1);
|
||||||
goto _err;
|
goto _err;
|
||||||
}
|
}
|
||||||
|
|
||||||
code = tBlockDataMerge(pBlockData1, pBlockData2, pBlockData);
|
code = tBlockDataMerge(pBlockData1, pBlockData2, pBlockData);
|
||||||
if (code) {
|
if (code) {
|
||||||
tBlockDataClear(pBlockData1);
|
tBlockDataClear(pBlockData1, 1);
|
||||||
tBlockDataClear(pBlockData2);
|
tBlockDataClear(pBlockData2, 1);
|
||||||
goto _err;
|
goto _err;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
tBlockDataClear(pBlockData1);
|
tBlockDataClear(pBlockData1, 1);
|
||||||
tBlockDataClear(pBlockData2);
|
tBlockDataClear(pBlockData2, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
tFree(pBuf1);
|
tFree(pBuf1);
|
||||||
|
@ -1115,29 +1115,29 @@ int32_t tsdbReadBlockData(SDataFReader *pReader, SBlockIdx *pBlockIdx, SBlock *p
|
||||||
for (iSubBlock = 1; iSubBlock < pBlock->nSubBlock; iSubBlock++) {
|
for (iSubBlock = 1; iSubBlock < pBlock->nSubBlock; iSubBlock++) {
|
||||||
code = tsdbReadSubBlockData(pReader, pBlockIdx, pBlock, iSubBlock, pBlockData1, ppBuf1, ppBuf2);
|
code = tsdbReadSubBlockData(pReader, pBlockIdx, pBlock, iSubBlock, pBlockData1, ppBuf1, ppBuf2);
|
||||||
if (code) {
|
if (code) {
|
||||||
tBlockDataClear(pBlockData1);
|
tBlockDataClear(pBlockData1, 1);
|
||||||
tBlockDataClear(pBlockData2);
|
tBlockDataClear(pBlockData2, 1);
|
||||||
goto _err;
|
goto _err;
|
||||||
}
|
}
|
||||||
|
|
||||||
code = tBlockDataCopy(pBlockData, pBlockData2);
|
code = tBlockDataCopy(pBlockData, pBlockData2);
|
||||||
if (code) {
|
if (code) {
|
||||||
tBlockDataClear(pBlockData1);
|
tBlockDataClear(pBlockData1, 1);
|
||||||
tBlockDataClear(pBlockData2);
|
tBlockDataClear(pBlockData2, 1);
|
||||||
goto _err;
|
goto _err;
|
||||||
}
|
}
|
||||||
|
|
||||||
// merge two block data
|
// merge two block data
|
||||||
code = tBlockDataMerge(pBlockData1, pBlockData2, pBlockData);
|
code = tBlockDataMerge(pBlockData1, pBlockData2, pBlockData);
|
||||||
if (code) {
|
if (code) {
|
||||||
tBlockDataClear(pBlockData1);
|
tBlockDataClear(pBlockData1, 1);
|
||||||
tBlockDataClear(pBlockData2);
|
tBlockDataClear(pBlockData2, 1);
|
||||||
goto _err;
|
goto _err;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
tBlockDataClear(pBlockData1);
|
tBlockDataClear(pBlockData1, 1);
|
||||||
tBlockDataClear(pBlockData2);
|
tBlockDataClear(pBlockData2, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
ASSERT(pBlock->nRow == pBlockData->nRow);
|
ASSERT(pBlock->nRow == pBlockData->nRow);
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -36,15 +36,15 @@ int32_t tMapDataPutItem(SMapData *pMapData, void *pItem, int32_t (*tPutItemFn)(u
|
||||||
|
|
||||||
// alloc
|
// alloc
|
||||||
code = tRealloc((uint8_t **)&pMapData->aOffset, sizeof(int32_t) * pMapData->nItem);
|
code = tRealloc((uint8_t **)&pMapData->aOffset, sizeof(int32_t) * pMapData->nItem);
|
||||||
if (code) goto _err;
|
if (code) goto _exit;
|
||||||
code = tRealloc(&pMapData->pData, pMapData->nData);
|
code = tRealloc(&pMapData->pData, pMapData->nData);
|
||||||
if (code) goto _err;
|
if (code) goto _exit;
|
||||||
|
|
||||||
// put
|
// put
|
||||||
pMapData->aOffset[nItem] = offset;
|
pMapData->aOffset[nItem] = offset;
|
||||||
tPutItemFn(pMapData->pData + offset, pItem);
|
tPutItemFn(pMapData->pData + offset, pItem);
|
||||||
|
|
||||||
_err:
|
_exit:
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -189,25 +189,12 @@ static FORCE_INLINE int32_t tGetTSDBKEY(uint8_t *p, TSDBKEY *pKey) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// SBlockIdx ======================================================
|
// SBlockIdx ======================================================
|
||||||
void tBlockIdxReset(SBlockIdx *pBlockIdx) {
|
|
||||||
pBlockIdx->minKey = TSKEY_MAX;
|
|
||||||
pBlockIdx->maxKey = TSKEY_MIN;
|
|
||||||
pBlockIdx->minVersion = VERSION_MAX;
|
|
||||||
pBlockIdx->maxVersion = VERSION_MIN;
|
|
||||||
pBlockIdx->offset = -1;
|
|
||||||
pBlockIdx->size = -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
int32_t tPutBlockIdx(uint8_t *p, void *ph) {
|
int32_t tPutBlockIdx(uint8_t *p, void *ph) {
|
||||||
int32_t n = 0;
|
int32_t n = 0;
|
||||||
SBlockIdx *pBlockIdx = (SBlockIdx *)ph;
|
SBlockIdx *pBlockIdx = (SBlockIdx *)ph;
|
||||||
|
|
||||||
n += tPutI64(p ? p + n : p, pBlockIdx->suid);
|
n += tPutI64(p ? p + n : p, pBlockIdx->suid);
|
||||||
n += tPutI64(p ? p + n : p, pBlockIdx->uid);
|
n += tPutI64(p ? p + n : p, pBlockIdx->uid);
|
||||||
n += tPutI64(p ? p + n : p, pBlockIdx->minKey);
|
|
||||||
n += tPutI64(p ? p + n : p, pBlockIdx->maxKey);
|
|
||||||
n += tPutI64v(p ? p + n : p, pBlockIdx->minVersion);
|
|
||||||
n += tPutI64v(p ? p + n : p, pBlockIdx->maxVersion);
|
|
||||||
n += tPutI64v(p ? p + n : p, pBlockIdx->offset);
|
n += tPutI64v(p ? p + n : p, pBlockIdx->offset);
|
||||||
n += tPutI64v(p ? p + n : p, pBlockIdx->size);
|
n += tPutI64v(p ? p + n : p, pBlockIdx->size);
|
||||||
|
|
||||||
|
@ -220,10 +207,6 @@ int32_t tGetBlockIdx(uint8_t *p, void *ph) {
|
||||||
|
|
||||||
n += tGetI64(p + n, &pBlockIdx->suid);
|
n += tGetI64(p + n, &pBlockIdx->suid);
|
||||||
n += tGetI64(p + n, &pBlockIdx->uid);
|
n += tGetI64(p + n, &pBlockIdx->uid);
|
||||||
n += tGetI64(p + n, &pBlockIdx->minKey);
|
|
||||||
n += tGetI64(p + n, &pBlockIdx->maxKey);
|
|
||||||
n += tGetI64v(p + n, &pBlockIdx->minVersion);
|
|
||||||
n += tGetI64v(p + n, &pBlockIdx->maxVersion);
|
|
||||||
n += tGetI64v(p + n, &pBlockIdx->offset);
|
n += tGetI64v(p + n, &pBlockIdx->offset);
|
||||||
n += tGetI64v(p + n, &pBlockIdx->size);
|
n += tGetI64v(p + n, &pBlockIdx->size);
|
||||||
|
|
||||||
|
@ -921,6 +904,76 @@ _exit:
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int32_t tPutColData(uint8_t *p, SColData *pColData) {
|
||||||
|
int32_t n = 0;
|
||||||
|
|
||||||
|
n += tPutI16v(p ? p + n : p, pColData->cid);
|
||||||
|
n += tPutI8(p ? p + n : p, pColData->type);
|
||||||
|
n += tPutI8(p ? p + n : p, pColData->smaOn);
|
||||||
|
n += tPutI32v(p ? p + n : p, pColData->nVal);
|
||||||
|
n += tPutU8(p ? p + n : p, pColData->flag);
|
||||||
|
|
||||||
|
if (pColData->flag == HAS_NONE || pColData->flag == HAS_NULL) goto _exit;
|
||||||
|
if (pColData->flag != HAS_VALUE) {
|
||||||
|
// bitmap
|
||||||
|
|
||||||
|
int32_t size = BIT2_SIZE(pColData->nVal);
|
||||||
|
if (p) {
|
||||||
|
memcpy(p + n, pColData->pBitMap, size);
|
||||||
|
}
|
||||||
|
n += size;
|
||||||
|
}
|
||||||
|
if (IS_VAR_DATA_TYPE(pColData->type)) {
|
||||||
|
// offset
|
||||||
|
|
||||||
|
int32_t size = sizeof(int32_t) * pColData->nVal;
|
||||||
|
if (p) {
|
||||||
|
memcpy(p + n, pColData->aOffset, size);
|
||||||
|
}
|
||||||
|
n += size;
|
||||||
|
}
|
||||||
|
n += tPutI32v(p ? p + n : p, pColData->nData);
|
||||||
|
if (p) {
|
||||||
|
memcpy(p + n, pColData->pData, pColData->nData);
|
||||||
|
}
|
||||||
|
n += pColData->nData;
|
||||||
|
|
||||||
|
_exit:
|
||||||
|
return n;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t tGetColData(uint8_t *p, SColData *pColData) {
|
||||||
|
int32_t n = 0;
|
||||||
|
|
||||||
|
n += tGetI16v(p + n, &pColData->cid);
|
||||||
|
n += tGetI8(p + n, &pColData->type);
|
||||||
|
n += tGetI8(p + n, &pColData->smaOn);
|
||||||
|
n += tGetI32v(p + n, &pColData->nVal);
|
||||||
|
n += tGetU8(p + n, &pColData->flag);
|
||||||
|
|
||||||
|
if (pColData->flag == HAS_NONE || pColData->flag == HAS_NULL) goto _exit;
|
||||||
|
if (pColData->flag != HAS_VALUE) {
|
||||||
|
// bitmap
|
||||||
|
|
||||||
|
int32_t size = BIT2_SIZE(pColData->nVal);
|
||||||
|
pColData->pBitMap = p + n;
|
||||||
|
n += size;
|
||||||
|
}
|
||||||
|
if (IS_VAR_DATA_TYPE(pColData->type)) {
|
||||||
|
// offset
|
||||||
|
|
||||||
|
int32_t size = sizeof(int32_t) * pColData->nVal;
|
||||||
|
pColData->aOffset = (int32_t *)(p + n);
|
||||||
|
n += size;
|
||||||
|
}
|
||||||
|
n += tGetI32v(p + n, &pColData->nData);
|
||||||
|
pColData->pData = p + n;
|
||||||
|
n += pColData->nData;
|
||||||
|
|
||||||
|
_exit:
|
||||||
|
return n;
|
||||||
|
}
|
||||||
|
|
||||||
static FORCE_INLINE int32_t tColDataCmprFn(const void *p1, const void *p2) {
|
static FORCE_INLINE int32_t tColDataCmprFn(const void *p1, const void *p2) {
|
||||||
SColData *pColData1 = (SColData *)p1;
|
SColData *pColData1 = (SColData *)p1;
|
||||||
SColData *pColData2 = (SColData *)p2;
|
SColData *pColData2 = (SColData *)p2;
|
||||||
|
@ -962,11 +1015,11 @@ void tBlockDataReset(SBlockData *pBlockData) {
|
||||||
taosArrayClear(pBlockData->aIdx);
|
taosArrayClear(pBlockData->aIdx);
|
||||||
}
|
}
|
||||||
|
|
||||||
void tBlockDataClear(SBlockData *pBlockData) {
|
void tBlockDataClear(SBlockData *pBlockData, int8_t deepClear) {
|
||||||
tFree((uint8_t *)pBlockData->aVersion);
|
tFree((uint8_t *)pBlockData->aVersion);
|
||||||
tFree((uint8_t *)pBlockData->aTSKEY);
|
tFree((uint8_t *)pBlockData->aTSKEY);
|
||||||
taosArrayDestroy(pBlockData->aIdx);
|
taosArrayDestroy(pBlockData->aIdx);
|
||||||
taosArrayDestroyEx(pBlockData->aColData, tColDataClear);
|
taosArrayDestroyEx(pBlockData->aColData, deepClear ? tColDataClear : NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t tBlockDataSetSchema(SBlockData *pBlockData, STSchema *pTSchema) {
|
int32_t tBlockDataSetSchema(SBlockData *pBlockData, STSchema *pTSchema) {
|
||||||
|
@ -1079,6 +1132,46 @@ _err:
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int32_t tBlockDataCorrectSchema(SBlockData *pBlockData, SBlockData *pBlockDataFrom) {
|
||||||
|
int32_t code = 0;
|
||||||
|
|
||||||
|
int32_t iColData = 0;
|
||||||
|
for (int32_t iColDataFrom = 0; iColDataFrom < taosArrayGetSize(pBlockDataFrom->aIdx); iColDataFrom++) {
|
||||||
|
SColData *pColDataFrom = tBlockDataGetColDataByIdx(pBlockDataFrom, iColDataFrom);
|
||||||
|
|
||||||
|
while (true) {
|
||||||
|
SColData *pColData;
|
||||||
|
if (iColData < taosArrayGetSize(pBlockData->aIdx)) {
|
||||||
|
pColData = tBlockDataGetColDataByIdx(pBlockData, iColData);
|
||||||
|
} else {
|
||||||
|
pColData = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pColData == NULL || pColData->cid > pColDataFrom->cid) {
|
||||||
|
code = tBlockDataAddColData(pBlockData, iColData, &pColData);
|
||||||
|
if (code) goto _exit;
|
||||||
|
|
||||||
|
tColDataInit(pColData, pColDataFrom->cid, pColDataFrom->type, pColDataFrom->smaOn);
|
||||||
|
for (int32_t iRow = 0; iRow < pBlockData->nRow; iRow++) {
|
||||||
|
code = tColDataAppendValue(pColData, &COL_VAL_NONE(pColData->cid, pColData->type));
|
||||||
|
if (code) goto _exit;
|
||||||
|
}
|
||||||
|
|
||||||
|
iColData++;
|
||||||
|
break;
|
||||||
|
} else if (pColData->cid == pColDataFrom->cid) {
|
||||||
|
iColData++;
|
||||||
|
break;
|
||||||
|
} else {
|
||||||
|
iColData++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
_exit:
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
|
||||||
int32_t tBlockDataMerge(SBlockData *pBlockData1, SBlockData *pBlockData2, SBlockData *pBlockData) {
|
int32_t tBlockDataMerge(SBlockData *pBlockData1, SBlockData *pBlockData2, SBlockData *pBlockData) {
|
||||||
int32_t code = 0;
|
int32_t code = 0;
|
||||||
|
|
||||||
|
@ -1239,6 +1332,52 @@ void tBlockDataGetColData(SBlockData *pBlockData, int16_t cid, SColData **ppColD
|
||||||
*ppColData = NULL;
|
*ppColData = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int32_t tPutBlockData(uint8_t *p, SBlockData *pBlockData) {
|
||||||
|
int32_t n = 0;
|
||||||
|
|
||||||
|
n += tPutI32v(p ? p + n : p, pBlockData->nRow);
|
||||||
|
if (p) {
|
||||||
|
memcpy(p + n, pBlockData->aVersion, sizeof(int64_t) * pBlockData->nRow);
|
||||||
|
}
|
||||||
|
n = n + sizeof(int64_t) * pBlockData->nRow;
|
||||||
|
if (p) {
|
||||||
|
memcpy(p + n, pBlockData->aTSKEY, sizeof(TSKEY) * pBlockData->nRow);
|
||||||
|
}
|
||||||
|
n = n + sizeof(TSKEY) * pBlockData->nRow;
|
||||||
|
|
||||||
|
int32_t nCol = taosArrayGetSize(pBlockData->aIdx);
|
||||||
|
n += tPutI32v(p ? p + n : p, nCol);
|
||||||
|
for (int32_t iCol = 0; iCol < nCol; iCol++) {
|
||||||
|
SColData *pColData = tBlockDataGetColDataByIdx(pBlockData, iCol);
|
||||||
|
n += tPutColData(p ? p + n : p, pColData);
|
||||||
|
}
|
||||||
|
|
||||||
|
return n;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t tGetBlockData(uint8_t *p, SBlockData *pBlockData) {
|
||||||
|
int32_t n = 0;
|
||||||
|
|
||||||
|
tBlockDataReset(pBlockData);
|
||||||
|
|
||||||
|
n += tGetI32v(p + n, &pBlockData->nRow);
|
||||||
|
pBlockData->aVersion = (int64_t *)(p + n);
|
||||||
|
n = n + sizeof(int64_t) * pBlockData->nRow;
|
||||||
|
pBlockData->aTSKEY = (TSKEY *)(p + n);
|
||||||
|
n = n + sizeof(TSKEY) * pBlockData->nRow;
|
||||||
|
|
||||||
|
int32_t nCol;
|
||||||
|
n += tGetI32v(p + n, &nCol);
|
||||||
|
for (int32_t iCol = 0; iCol < nCol; iCol++) {
|
||||||
|
SColData *pColData;
|
||||||
|
|
||||||
|
if (tBlockDataAddColData(pBlockData, iCol, &pColData)) return -1;
|
||||||
|
n += tGetColData(p + n, pColData);
|
||||||
|
}
|
||||||
|
|
||||||
|
return n;
|
||||||
|
}
|
||||||
|
|
||||||
// ALGORITHM ==============================
|
// ALGORITHM ==============================
|
||||||
void tsdbCalcColDataSMA(SColData *pColData, SColumnDataAgg *pColAgg) {
|
void tsdbCalcColDataSMA(SColData *pColData, SColumnDataAgg *pColAgg) {
|
||||||
SColVal colVal;
|
SColVal colVal;
|
||||||
|
|
|
@ -233,7 +233,8 @@ int vnodeCommit(SVnode *pVnode) {
|
||||||
walBeginSnapshot(pVnode->pWal, pVnode->state.applied);
|
walBeginSnapshot(pVnode->pWal, pVnode->state.applied);
|
||||||
|
|
||||||
// preCommit
|
// preCommit
|
||||||
smaPreCommit(pVnode->pSma);
|
// smaSyncPreCommit(pVnode->pSma);
|
||||||
|
smaAsyncPreCommit(pVnode->pSma);
|
||||||
|
|
||||||
// commit each sub-system
|
// commit each sub-system
|
||||||
if (metaCommit(pVnode->pMeta) < 0) {
|
if (metaCommit(pVnode->pMeta) < 0) {
|
||||||
|
@ -242,6 +243,8 @@ int vnodeCommit(SVnode *pVnode) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (VND_IS_RSMA(pVnode)) {
|
if (VND_IS_RSMA(pVnode)) {
|
||||||
|
smaAsyncCommit(pVnode->pSma);
|
||||||
|
|
||||||
if (tsdbCommit(VND_RSMA0(pVnode)) < 0) {
|
if (tsdbCommit(VND_RSMA0(pVnode)) < 0) {
|
||||||
ASSERT(0);
|
ASSERT(0);
|
||||||
return -1;
|
return -1;
|
||||||
|
@ -276,7 +279,8 @@ int vnodeCommit(SVnode *pVnode) {
|
||||||
pVnode->state.committed = info.state.committed;
|
pVnode->state.committed = info.state.committed;
|
||||||
|
|
||||||
// postCommit
|
// postCommit
|
||||||
smaPostCommit(pVnode->pSma);
|
// smaSyncPostCommit(pVnode->pSma);
|
||||||
|
smaAsyncPostCommit(pVnode->pSma);
|
||||||
|
|
||||||
// apply the commit (TODO)
|
// apply the commit (TODO)
|
||||||
walEndSnapshot(pVnode->pWal);
|
walEndSnapshot(pVnode->pWal);
|
||||||
|
|
|
@ -20,13 +20,13 @@ struct SVSnapReader {
|
||||||
SVnode *pVnode;
|
SVnode *pVnode;
|
||||||
int64_t sver;
|
int64_t sver;
|
||||||
int64_t ever;
|
int64_t ever;
|
||||||
|
int64_t index;
|
||||||
// meta
|
// meta
|
||||||
int8_t metaDone;
|
int8_t metaDone;
|
||||||
SMetaSnapReader *pMetaReader;
|
SMetaSnapReader *pMetaReader;
|
||||||
// tsdb
|
// tsdb
|
||||||
int8_t tsdbDone;
|
int8_t tsdbDone;
|
||||||
STsdbSnapReader *pTsdbReader;
|
STsdbSnapReader *pTsdbReader;
|
||||||
uint8_t *pData;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
int32_t vnodeSnapReaderOpen(SVnode *pVnode, int64_t sver, int64_t ever, SVSnapReader **ppReader) {
|
int32_t vnodeSnapReaderOpen(SVnode *pVnode, int64_t sver, int64_t ever, SVSnapReader **ppReader) {
|
||||||
|
@ -42,12 +42,7 @@ int32_t vnodeSnapReaderOpen(SVnode *pVnode, int64_t sver, int64_t ever, SVSnapRe
|
||||||
pReader->sver = sver;
|
pReader->sver = sver;
|
||||||
pReader->ever = ever;
|
pReader->ever = ever;
|
||||||
|
|
||||||
code = metaSnapReaderOpen(pVnode->pMeta, sver, ever, &pReader->pMetaReader);
|
vInfo("vgId:%d vnode snapshot reader opened, sver:%" PRId64 " ever:%" PRId64, TD_VID(pVnode), sver, ever);
|
||||||
if (code) goto _err;
|
|
||||||
|
|
||||||
code = tsdbSnapReaderOpen(pVnode->pTsdb, sver, ever, &pReader->pTsdbReader);
|
|
||||||
if (code) goto _err;
|
|
||||||
|
|
||||||
*ppReader = pReader;
|
*ppReader = pReader;
|
||||||
return code;
|
return code;
|
||||||
|
|
||||||
|
@ -60,54 +55,85 @@ _err:
|
||||||
int32_t vnodeSnapReaderClose(SVSnapReader *pReader) {
|
int32_t vnodeSnapReaderClose(SVSnapReader *pReader) {
|
||||||
int32_t code = 0;
|
int32_t code = 0;
|
||||||
|
|
||||||
tFree(pReader->pData);
|
if (pReader->pTsdbReader) {
|
||||||
if (pReader->pTsdbReader) tsdbSnapReaderClose(&pReader->pTsdbReader);
|
tsdbSnapReaderClose(&pReader->pTsdbReader);
|
||||||
if (pReader->pMetaReader) metaSnapReaderClose(&pReader->pMetaReader);
|
}
|
||||||
taosMemoryFree(pReader);
|
|
||||||
|
|
||||||
|
if (pReader->pMetaReader) {
|
||||||
|
metaSnapReaderClose(&pReader->pMetaReader);
|
||||||
|
}
|
||||||
|
|
||||||
|
vInfo("vgId:%d vnode snapshot reader closed", TD_VID(pReader->pVnode));
|
||||||
|
taosMemoryFree(pReader);
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t vnodeSnapRead(SVSnapReader *pReader, uint8_t **ppData, uint32_t *nData) {
|
int32_t vnodeSnapRead(SVSnapReader *pReader, uint8_t **ppData, uint32_t *nData) {
|
||||||
int32_t code = 0;
|
int32_t code = 0;
|
||||||
|
|
||||||
|
// META ==============
|
||||||
if (!pReader->metaDone) {
|
if (!pReader->metaDone) {
|
||||||
code = metaSnapRead(pReader->pMetaReader, &pReader->pData);
|
// open reader if not
|
||||||
|
if (pReader->pMetaReader == NULL) {
|
||||||
|
code = metaSnapReaderOpen(pReader->pVnode->pMeta, pReader->sver, pReader->ever, &pReader->pMetaReader);
|
||||||
|
if (code) goto _err;
|
||||||
|
}
|
||||||
|
|
||||||
|
code = metaSnapRead(pReader->pMetaReader, ppData);
|
||||||
if (code) {
|
if (code) {
|
||||||
if (code == TSDB_CODE_VND_READ_END) {
|
goto _err;
|
||||||
|
} else {
|
||||||
|
if (*ppData) {
|
||||||
|
goto _exit;
|
||||||
|
} else {
|
||||||
pReader->metaDone = 1;
|
pReader->metaDone = 1;
|
||||||
} else {
|
code = metaSnapReaderClose(&pReader->pMetaReader);
|
||||||
goto _err;
|
if (code) goto _err;
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
*ppData = pReader->pData;
|
|
||||||
*nData = sizeof(SSnapDataHdr) + ((SSnapDataHdr *)pReader->pData)->size;
|
|
||||||
goto _exit;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TSDB ==============
|
||||||
if (!pReader->tsdbDone) {
|
if (!pReader->tsdbDone) {
|
||||||
code = tsdbSnapRead(pReader->pTsdbReader, &pReader->pData);
|
// open if not
|
||||||
|
if (pReader->pTsdbReader == NULL) {
|
||||||
|
code = tsdbSnapReaderOpen(pReader->pVnode->pTsdb, pReader->sver, pReader->ever, &pReader->pTsdbReader);
|
||||||
|
if (code) goto _err;
|
||||||
|
}
|
||||||
|
|
||||||
|
code = tsdbSnapRead(pReader->pTsdbReader, ppData);
|
||||||
if (code) {
|
if (code) {
|
||||||
if (code == TSDB_CODE_VND_READ_END) {
|
goto _err;
|
||||||
pReader->tsdbDone = 1;
|
|
||||||
} else {
|
|
||||||
goto _err;
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
*ppData = pReader->pData;
|
if (*ppData) {
|
||||||
*nData = sizeof(SSnapDataHdr) + ((SSnapDataHdr *)pReader->pData)->size;
|
goto _exit;
|
||||||
goto _exit;
|
} else {
|
||||||
|
pReader->tsdbDone = 1;
|
||||||
|
code = tsdbSnapReaderClose(&pReader->pTsdbReader);
|
||||||
|
if (code) goto _err;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
code = TSDB_CODE_VND_READ_END;
|
*ppData = NULL;
|
||||||
|
*nData = 0;
|
||||||
|
|
||||||
_exit:
|
_exit:
|
||||||
|
if (*ppData) {
|
||||||
|
SSnapDataHdr *pHdr = (SSnapDataHdr *)(*ppData);
|
||||||
|
|
||||||
|
pReader->index++;
|
||||||
|
*nData = sizeof(SSnapDataHdr) + pHdr->size;
|
||||||
|
pHdr->index = pReader->index;
|
||||||
|
vInfo("vgId:%d vnode snapshot read data,index:%" PRId64 " type:%d nData:%d ", TD_VID(pReader->pVnode),
|
||||||
|
pReader->index, pHdr->type, *nData);
|
||||||
|
} else {
|
||||||
|
vInfo("vgId:%d vnode snapshot read data end, index:%" PRId64, TD_VID(pReader->pVnode), pReader->index);
|
||||||
|
}
|
||||||
return code;
|
return code;
|
||||||
|
|
||||||
_err:
|
_err:
|
||||||
vError("vgId:% snapshot read failed since %s", TD_VID(pReader->pVnode), tstrerror(code));
|
vError("vgId:% vnode snapshot read failed since %s", TD_VID(pReader->pVnode), tstrerror(code));
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -116,24 +142,13 @@ struct SVSnapWriter {
|
||||||
SVnode *pVnode;
|
SVnode *pVnode;
|
||||||
int64_t sver;
|
int64_t sver;
|
||||||
int64_t ever;
|
int64_t ever;
|
||||||
|
int64_t index;
|
||||||
// meta
|
// meta
|
||||||
SMetaSnapWriter *pMetaSnapWriter;
|
SMetaSnapWriter *pMetaSnapWriter;
|
||||||
// tsdb
|
// tsdb
|
||||||
STsdbSnapWriter *pTsdbSnapWriter;
|
STsdbSnapWriter *pTsdbSnapWriter;
|
||||||
};
|
};
|
||||||
|
|
||||||
static int32_t vnodeSnapRollback(SVSnapWriter *pWriter) {
|
|
||||||
int32_t code = 0;
|
|
||||||
// TODO
|
|
||||||
return code;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int32_t vnodeSnapCommit(SVSnapWriter *pWriter) {
|
|
||||||
int32_t code = 0;
|
|
||||||
// TODO
|
|
||||||
return code;
|
|
||||||
}
|
|
||||||
|
|
||||||
int32_t vnodeSnapWriterOpen(SVnode *pVnode, int64_t sver, int64_t ever, SVSnapWriter **ppWriter) {
|
int32_t vnodeSnapWriterOpen(SVnode *pVnode, int64_t sver, int64_t ever, SVSnapWriter **ppWriter) {
|
||||||
int32_t code = 0;
|
int32_t code = 0;
|
||||||
SVSnapWriter *pWriter = NULL;
|
SVSnapWriter *pWriter = NULL;
|
||||||
|
@ -148,62 +163,78 @@ int32_t vnodeSnapWriterOpen(SVnode *pVnode, int64_t sver, int64_t ever, SVSnapWr
|
||||||
pWriter->sver = sver;
|
pWriter->sver = sver;
|
||||||
pWriter->ever = ever;
|
pWriter->ever = ever;
|
||||||
|
|
||||||
|
vInfo("vgId:%d vnode snapshot writer opened", TD_VID(pVnode));
|
||||||
|
*ppWriter = pWriter;
|
||||||
return code;
|
return code;
|
||||||
|
|
||||||
_err:
|
_err:
|
||||||
vError("vgId:%d vnode snapshot writer open failed since %s", TD_VID(pVnode), tstrerror(code));
|
vError("vgId:%d vnode snapshot writer open failed since %s", TD_VID(pVnode), tstrerror(code));
|
||||||
|
*ppWriter = NULL;
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t vnodeSnapWriterClose(SVSnapWriter *pWriter, int8_t rollback) {
|
int32_t vnodeSnapWriterClose(SVSnapWriter *pWriter, int8_t rollback) {
|
||||||
int32_t code = 0;
|
int32_t code = 0;
|
||||||
|
|
||||||
if (rollback) {
|
if (pWriter->pMetaSnapWriter) {
|
||||||
code = vnodeSnapRollback(pWriter);
|
code = metaSnapWriterClose(&pWriter->pMetaSnapWriter, rollback);
|
||||||
if (code) goto _err;
|
|
||||||
} else {
|
|
||||||
code = vnodeSnapCommit(pWriter);
|
|
||||||
if (code) goto _err;
|
if (code) goto _err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (pWriter->pTsdbSnapWriter) {
|
||||||
|
code = tsdbSnapWriterClose(&pWriter->pTsdbSnapWriter, rollback);
|
||||||
|
if (code) goto _err;
|
||||||
|
}
|
||||||
|
|
||||||
|
_exit:
|
||||||
|
vInfo("vgId:%d vnode snapshot writer closed, rollback:%d", TD_VID(pWriter->pVnode), rollback);
|
||||||
taosMemoryFree(pWriter);
|
taosMemoryFree(pWriter);
|
||||||
return code;
|
return code;
|
||||||
|
|
||||||
_err:
|
_err:
|
||||||
vError("vgId:%d vnode snapshow writer close failed since %s", TD_VID(pWriter->pVnode), tstrerror(code));
|
vError("vgId:%d vnode snapshot writer close failed since %s", TD_VID(pWriter->pVnode), tstrerror(code));
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t vnodeSnapWrite(SVSnapWriter *pWriter, uint8_t *pData, uint32_t nData) {
|
int32_t vnodeSnapWrite(SVSnapWriter *pWriter, uint8_t *pData, uint32_t nData) {
|
||||||
int32_t code = 0;
|
int32_t code = 0;
|
||||||
SSnapDataHdr *pSnapDataHdr = (SSnapDataHdr *)pData;
|
SSnapDataHdr *pHdr = (SSnapDataHdr *)pData;
|
||||||
SVnode *pVnode = pWriter->pVnode;
|
SVnode *pVnode = pWriter->pVnode;
|
||||||
|
|
||||||
ASSERT(pSnapDataHdr->size + sizeof(SSnapDataHdr) == nData);
|
ASSERT(pHdr->size + sizeof(SSnapDataHdr) == nData);
|
||||||
|
ASSERT(pHdr->index == pWriter->index + 1);
|
||||||
|
pWriter->index = pHdr->index;
|
||||||
|
|
||||||
if (pSnapDataHdr->type == 0) {
|
vInfo("vgId:%d vnode snapshot write data, index:%" PRId64 " type:%d nData:%d", TD_VID(pVnode), pHdr->index,
|
||||||
|
pHdr->type, nData);
|
||||||
|
|
||||||
|
if (pHdr->type == 0) {
|
||||||
// meta
|
// meta
|
||||||
|
|
||||||
if (pWriter->pMetaSnapWriter == NULL) {
|
if (pWriter->pMetaSnapWriter == NULL) {
|
||||||
code = metaSnapWriterOpen(pVnode->pMeta, pWriter->sver, pWriter->ever, &pWriter->pMetaSnapWriter);
|
code = metaSnapWriterOpen(pVnode->pMeta, pWriter->sver, pWriter->ever, &pWriter->pMetaSnapWriter);
|
||||||
if (code) goto _err;
|
if (code) goto _err;
|
||||||
}
|
}
|
||||||
|
|
||||||
code = metaSnapWrite(pWriter->pMetaSnapWriter, pData + sizeof(SSnapDataHdr), nData - sizeof(SSnapDataHdr));
|
code = metaSnapWrite(pWriter->pMetaSnapWriter, pData, nData);
|
||||||
if (code) goto _err;
|
if (code) goto _err;
|
||||||
} else {
|
} else {
|
||||||
// tsdb
|
// tsdb
|
||||||
|
|
||||||
if (pWriter->pTsdbSnapWriter == NULL) {
|
if (pWriter->pTsdbSnapWriter == NULL) {
|
||||||
code = tsdbSnapWriterOpen(pVnode->pTsdb, pWriter->sver, pWriter->ever, &pWriter->pTsdbSnapWriter);
|
code = tsdbSnapWriterOpen(pVnode->pTsdb, pWriter->sver, pWriter->ever, &pWriter->pTsdbSnapWriter);
|
||||||
if (code) goto _err;
|
if (code) goto _err;
|
||||||
}
|
}
|
||||||
|
|
||||||
code = tsdbSnapWrite(pWriter->pTsdbSnapWriter, pData + sizeof(SSnapDataHdr), nData - sizeof(SSnapDataHdr));
|
code = tsdbSnapWrite(pWriter->pTsdbSnapWriter, pData, nData);
|
||||||
if (code) goto _err;
|
if (code) goto _err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
_exit:
|
||||||
return code;
|
return code;
|
||||||
|
|
||||||
_err:
|
_err:
|
||||||
vError("vgId:%d vnode snapshot write failed since %s", TD_VID(pVnode), tstrerror(code));
|
vError("vgId:%d vnode snapshot write failed since %s, index:%" PRId64 " type:%d nData:%d", TD_VID(pVnode),
|
||||||
|
tstrerror(code), pHdr->index, pHdr->type, nData);
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
|
@ -689,6 +689,7 @@ _exit:
|
||||||
tEncoderInit(&encoder, pRsp->pCont, pRsp->contLen);
|
tEncoderInit(&encoder, pRsp->pCont, pRsp->contLen);
|
||||||
tEncodeSVDropTbBatchRsp(&encoder, &rsp);
|
tEncodeSVDropTbBatchRsp(&encoder, &rsp);
|
||||||
tEncoderClear(&encoder);
|
tEncoderClear(&encoder);
|
||||||
|
taosArrayDestroy(rsp.pArray);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -900,7 +901,7 @@ static int32_t vnodeProcessCreateTSmaReq(SVnode *pVnode, int64_t version, void *
|
||||||
_err:
|
_err:
|
||||||
tDecoderClear(&coder);
|
tDecoderClear(&coder);
|
||||||
vError("vgId:%d, failed to create tsma %s:%" PRIi64 " version %" PRIi64 "for table %" PRIi64 " since %s",
|
vError("vgId:%d, failed to create tsma %s:%" PRIi64 " version %" PRIi64 "for table %" PRIi64 " since %s",
|
||||||
TD_VID(pVnode), req.indexName, req.indexUid, version, req.tableUid, terrstr(terrno));
|
TD_VID(pVnode), req.indexName, req.indexUid, version, req.tableUid, terrstr());
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -536,6 +536,10 @@ static int32_t vnodeSnapshotDoWrite(struct SSyncFSM *pFsm, void *pWriter, void *
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void vnodeLeaderTransfer(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta) {
|
||||||
|
SVnode *pVnode = pFsm->data;
|
||||||
|
}
|
||||||
|
|
||||||
static SSyncFSM *vnodeSyncMakeFsm(SVnode *pVnode) {
|
static SSyncFSM *vnodeSyncMakeFsm(SVnode *pVnode) {
|
||||||
SSyncFSM *pFsm = taosMemoryCalloc(1, sizeof(SSyncFSM));
|
SSyncFSM *pFsm = taosMemoryCalloc(1, sizeof(SSyncFSM));
|
||||||
pFsm->data = pVnode;
|
pFsm->data = pVnode;
|
||||||
|
@ -544,6 +548,7 @@ static SSyncFSM *vnodeSyncMakeFsm(SVnode *pVnode) {
|
||||||
pFsm->FpRollBackCb = vnodeSyncRollBackMsg;
|
pFsm->FpRollBackCb = vnodeSyncRollBackMsg;
|
||||||
pFsm->FpGetSnapshotInfo = vnodeSyncGetSnapshot;
|
pFsm->FpGetSnapshotInfo = vnodeSyncGetSnapshot;
|
||||||
pFsm->FpRestoreFinishCb = NULL;
|
pFsm->FpRestoreFinishCb = NULL;
|
||||||
|
pFsm->FpLeaderTransferCb = vnodeLeaderTransfer;
|
||||||
pFsm->FpReConfigCb = vnodeSyncReconfig;
|
pFsm->FpReConfigCb = vnodeSyncReconfig;
|
||||||
pFsm->FpSnapshotStartRead = vnodeSnapshotStartRead;
|
pFsm->FpSnapshotStartRead = vnodeSnapshotStartRead;
|
||||||
pFsm->FpSnapshotStopRead = vnodeSnapshotStopRead;
|
pFsm->FpSnapshotStopRead = vnodeSnapshotStopRead;
|
||||||
|
@ -579,8 +584,8 @@ int32_t vnodeSyncOpen(SVnode *pVnode, char *path) {
|
||||||
}
|
}
|
||||||
|
|
||||||
setPingTimerMS(pVnode->sync, 5000);
|
setPingTimerMS(pVnode->sync, 5000);
|
||||||
setElectTimerMS(pVnode->sync, 500);
|
setElectTimerMS(pVnode->sync, 1300);
|
||||||
setHeartbeatTimerMS(pVnode->sync, 100);
|
setHeartbeatTimerMS(pVnode->sync, 900);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -163,7 +163,7 @@ int32_t ctgInitGetQnodeTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
|
||||||
|
|
||||||
taosArrayPush(pJob->pTasks, &task);
|
taosArrayPush(pJob->pTasks, &task);
|
||||||
|
|
||||||
qDebug("QID:0x%" PRIx64 " the %d task type %s initialized", pJob->queryId, taskIdx, ctgTaskTypeStr(task.type));
|
qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized", pJob->queryId, taskIdx, ctgTaskTypeStr(task.type));
|
||||||
|
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
@ -178,7 +178,7 @@ int32_t ctgInitGetDnodeTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
|
||||||
|
|
||||||
taosArrayPush(pJob->pTasks, &task);
|
taosArrayPush(pJob->pTasks, &task);
|
||||||
|
|
||||||
qDebug("QID:0x%" PRIx64 " the %d task type %s initialized", pJob->queryId, taskIdx, ctgTaskTypeStr(task.type));
|
qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized", pJob->queryId, taskIdx, ctgTaskTypeStr(task.type));
|
||||||
|
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
@ -264,7 +264,7 @@ int32_t ctgInitGetSvrVerTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
|
||||||
|
|
||||||
taosArrayPush(pJob->pTasks, &task);
|
taosArrayPush(pJob->pTasks, &task);
|
||||||
|
|
||||||
qDebug("QID:0x%" PRIx64 " [%dth] task type %s initialized", pJob->queryId, taskIdx, ctgTaskTypeStr(task.type));
|
qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized", pJob->queryId, taskIdx, ctgTaskTypeStr(task.type));
|
||||||
|
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
|
@ -15,10 +15,10 @@
|
||||||
|
|
||||||
#include "command.h"
|
#include "command.h"
|
||||||
#include "catalog.h"
|
#include "catalog.h"
|
||||||
#include "tdatablock.h"
|
|
||||||
#include "tglobal.h"
|
|
||||||
#include "commandInt.h"
|
#include "commandInt.h"
|
||||||
#include "scheduler.h"
|
#include "scheduler.h"
|
||||||
|
#include "tdatablock.h"
|
||||||
|
#include "tglobal.h"
|
||||||
|
|
||||||
extern SConfig* tsCfg;
|
extern SConfig* tsCfg;
|
||||||
|
|
||||||
|
@ -222,7 +222,7 @@ static void setCreateDBResultIntoDataBlock(SSDataBlock* pBlock, char* dbFName, S
|
||||||
char* retentions = buildRetension(pCfg->pRetensions);
|
char* retentions = buildRetension(pCfg->pRetensions);
|
||||||
|
|
||||||
len += sprintf(buf2 + VARSTR_HEADER_SIZE,
|
len += sprintf(buf2 + VARSTR_HEADER_SIZE,
|
||||||
"CREATE DATABASE `%s` BUFFER %d CACHELAST %d COMP %d DURATION %dm "
|
"CREATE DATABASE `%s` BUFFER %d CACHEMODEL %d COMP %d DURATION %dm "
|
||||||
"FSYNC %d MAXROWS %d MINROWS %d KEEP %dm,%dm,%dm PAGES %d PAGESIZE %d PRECISION '%s' REPLICA %d "
|
"FSYNC %d MAXROWS %d MINROWS %d KEEP %dm,%dm,%dm PAGES %d PAGESIZE %d PRECISION '%s' REPLICA %d "
|
||||||
"STRICT %d WAL %d VGROUPS %d SINGLE_STABLE %d",
|
"STRICT %d WAL %d VGROUPS %d SINGLE_STABLE %d",
|
||||||
dbFName, pCfg->buffer, pCfg->cacheLast, pCfg->compression, pCfg->daysPerFile, pCfg->fsyncPeriod,
|
dbFName, pCfg->buffer, pCfg->cacheLast, pCfg->compression, pCfg->daysPerFile, pCfg->fsyncPeriod,
|
||||||
|
@ -483,7 +483,7 @@ static int32_t execShowCreateSTable(SShowCreateTableStmt* pStmt, SRetrieveTableR
|
||||||
|
|
||||||
static int32_t execAlterCmd(char* cmd, char* value, bool* processed) {
|
static int32_t execAlterCmd(char* cmd, char* value, bool* processed) {
|
||||||
int32_t code = 0;
|
int32_t code = 0;
|
||||||
|
|
||||||
if (0 == strcasecmp(cmd, COMMAND_RESET_LOG)) {
|
if (0 == strcasecmp(cmd, COMMAND_RESET_LOG)) {
|
||||||
taosResetLog();
|
taosResetLog();
|
||||||
cfgDumpCfg(tsCfg, 0, false);
|
cfgDumpCfg(tsCfg, 0, false);
|
||||||
|
@ -502,13 +502,13 @@ _return:
|
||||||
if (code) {
|
if (code) {
|
||||||
terrno = code;
|
terrno = code;
|
||||||
}
|
}
|
||||||
|
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t execAlterLocal(SAlterLocalStmt* pStmt) {
|
static int32_t execAlterLocal(SAlterLocalStmt* pStmt) {
|
||||||
bool processed = false;
|
bool processed = false;
|
||||||
|
|
||||||
if (execAlterCmd(pStmt->config, pStmt->value, &processed)) {
|
if (execAlterCmd(pStmt->config, pStmt->value, &processed)) {
|
||||||
return terrno;
|
return terrno;
|
||||||
}
|
}
|
||||||
|
@ -516,7 +516,7 @@ static int32_t execAlterLocal(SAlterLocalStmt* pStmt) {
|
||||||
if (processed) {
|
if (processed) {
|
||||||
goto _return;
|
goto _return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cfgSetItem(tsCfg, pStmt->config, pStmt->value, CFG_STYPE_ALTER_CMD)) {
|
if (cfgSetItem(tsCfg, pStmt->config, pStmt->value, CFG_STYPE_ALTER_CMD)) {
|
||||||
return terrno;
|
return terrno;
|
||||||
}
|
}
|
||||||
|
|
|
@ -139,6 +139,12 @@ typedef struct STaskIdInfo {
|
||||||
char* str;
|
char* str;
|
||||||
} STaskIdInfo;
|
} STaskIdInfo;
|
||||||
|
|
||||||
|
enum {
|
||||||
|
STREAM_RECOVER_STEP__NONE = 0,
|
||||||
|
STREAM_RECOVER_STEP__PREPARE,
|
||||||
|
STREAM_RECOVER_STEP__SCAN,
|
||||||
|
};
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
//TODO remove prepareStatus
|
//TODO remove prepareStatus
|
||||||
STqOffsetVal prepareStatus; // for tmq
|
STqOffsetVal prepareStatus; // for tmq
|
||||||
|
@ -147,6 +153,10 @@ typedef struct {
|
||||||
SSDataBlock* pullOverBlk; // for streaming
|
SSDataBlock* pullOverBlk; // for streaming
|
||||||
SWalFilterCond cond;
|
SWalFilterCond cond;
|
||||||
int64_t lastScanUid;
|
int64_t lastScanUid;
|
||||||
|
int8_t recoverStep;
|
||||||
|
SQueryTableDataCond tableCond;
|
||||||
|
int64_t recoverStartVer;
|
||||||
|
int64_t recoverEndVer;
|
||||||
} SStreamTaskInfo;
|
} SStreamTaskInfo;
|
||||||
|
|
||||||
typedef struct SExecTaskInfo {
|
typedef struct SExecTaskInfo {
|
||||||
|
@ -361,6 +371,13 @@ typedef struct SessionWindowSupporter {
|
||||||
uint8_t parentType;
|
uint8_t parentType;
|
||||||
} SessionWindowSupporter;
|
} SessionWindowSupporter;
|
||||||
|
|
||||||
|
typedef struct STimeWindowSupp {
|
||||||
|
int8_t calTrigger;
|
||||||
|
int64_t waterMark;
|
||||||
|
TSKEY maxTs;
|
||||||
|
SColumnInfoData timeWindowData; // query time window info for scalar function execution.
|
||||||
|
} STimeWindowAggSupp;
|
||||||
|
|
||||||
typedef struct SStreamScanInfo {
|
typedef struct SStreamScanInfo {
|
||||||
uint64_t tableUid; // queried super table uid
|
uint64_t tableUid; // queried super table uid
|
||||||
SExprInfo* pPseudoExpr;
|
SExprInfo* pPseudoExpr;
|
||||||
|
@ -397,6 +414,7 @@ typedef struct SStreamScanInfo {
|
||||||
SSDataBlock* pDeleteDataRes; // delete data SSDataBlock
|
SSDataBlock* pDeleteDataRes; // delete data SSDataBlock
|
||||||
int32_t deleteDataIndex;
|
int32_t deleteDataIndex;
|
||||||
STimeWindow updateWin;
|
STimeWindow updateWin;
|
||||||
|
STimeWindowAggSupp twAggSup;
|
||||||
|
|
||||||
// status for tmq
|
// status for tmq
|
||||||
// SSchemaWrapper schema;
|
// SSchemaWrapper schema;
|
||||||
|
@ -442,13 +460,6 @@ typedef struct SAggSupporter {
|
||||||
int32_t resultRowSize; // the result buffer size for each result row, with the meta data size for each row
|
int32_t resultRowSize; // the result buffer size for each result row, with the meta data size for each row
|
||||||
} SAggSupporter;
|
} SAggSupporter;
|
||||||
|
|
||||||
typedef struct STimeWindowSupp {
|
|
||||||
int8_t calTrigger;
|
|
||||||
int64_t waterMark;
|
|
||||||
TSKEY maxTs;
|
|
||||||
SColumnInfoData timeWindowData; // query time window info for scalar function execution.
|
|
||||||
} STimeWindowAggSupp;
|
|
||||||
|
|
||||||
typedef struct SIntervalAggOperatorInfo {
|
typedef struct SIntervalAggOperatorInfo {
|
||||||
// SOptrBasicInfo should be first, SAggSupporter should be second for stream encode
|
// SOptrBasicInfo should be first, SAggSupporter should be second for stream encode
|
||||||
SOptrBasicInfo binfo; // basic info
|
SOptrBasicInfo binfo; // basic info
|
||||||
|
@ -942,6 +953,7 @@ bool isInTimeWindow(STimeWindow* pWin, TSKEY ts, int64_t gap);
|
||||||
int32_t updateSessionWindowInfo(SResultWindowInfo* pWinInfo, TSKEY* pStartTs,
|
int32_t updateSessionWindowInfo(SResultWindowInfo* pWinInfo, TSKEY* pStartTs,
|
||||||
TSKEY* pEndTs, int32_t rows, int32_t start, int64_t gap, SHashObj* pStDeleted);
|
TSKEY* pEndTs, int32_t rows, int32_t start, int64_t gap, SHashObj* pStDeleted);
|
||||||
bool functionNeedToExecute(SqlFunctionCtx* pCtx);
|
bool functionNeedToExecute(SqlFunctionCtx* pCtx);
|
||||||
|
bool isCloseWindow(STimeWindow* pWin, STimeWindowAggSupp* pSup);
|
||||||
|
|
||||||
int32_t finalizeResultRowIntoResultDataBlock(SDiskbasedBuf* pBuf, SResultRowPosition* resultRowPosition,
|
int32_t finalizeResultRowIntoResultDataBlock(SDiskbasedBuf* pBuf, SResultRowPosition* resultRowPosition,
|
||||||
SqlFunctionCtx* pCtx, SExprInfo* pExprInfo, int32_t numOfExprs, const int32_t* rowCellOffset,
|
SqlFunctionCtx* pCtx, SExprInfo* pExprInfo, int32_t numOfExprs, const int32_t* rowCellOffset,
|
||||||
|
|
|
@ -139,18 +139,29 @@ SSDataBlock* doScanLastrow(SOperatorInfo* pOperator) {
|
||||||
colDataAppend(pDst, 0, p, isNull);
|
colDataAppend(pDst, 0, p, isNull);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pInfo->pRes->info.uid = *(tb_uid_t*)taosArrayGet(pInfo->pUidList, pInfo->indexOfBufferedRes);
|
||||||
|
pInfo->pRes->info.rows = 1;
|
||||||
|
|
||||||
if (pInfo->pseudoExprSup.numOfExprs > 0) {
|
if (pInfo->pseudoExprSup.numOfExprs > 0) {
|
||||||
SExprSupp* pSup = &pInfo->pseudoExprSup;
|
SExprSupp* pSup = &pInfo->pseudoExprSup;
|
||||||
addTagPseudoColumnData(&pInfo->readHandle, pSup->pExprInfo, pSup->numOfExprs, pInfo->pRes,
|
int32_t code = addTagPseudoColumnData(&pInfo->readHandle, pSup->pExprInfo, pSup->numOfExprs, pInfo->pRes,
|
||||||
GET_TASKID(pTaskInfo));
|
GET_TASKID(pTaskInfo));
|
||||||
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
pTaskInfo->code = code;
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pInfo->pRes->info.uid = *(tb_uid_t*)taosArrayGet(pInfo->pUidList, pInfo->indexOfBufferedRes);
|
if (pTableList->map != NULL) {
|
||||||
int64_t* groupId = taosHashGet(pTableList->map, &pInfo->pRes->info.uid, sizeof(int64_t));
|
int64_t* groupId = taosHashGet(pTableList->map, &pInfo->pRes->info.uid, sizeof(int64_t));
|
||||||
pInfo->pRes->info.groupId = *groupId;
|
pInfo->pRes->info.groupId = *groupId;
|
||||||
|
} else {
|
||||||
|
ASSERT(taosArrayGetSize(pTableList->pTableList) == 1);
|
||||||
|
STableKeyInfo* pKeyInfo = taosArrayGet(pTableList->pTableList, 0);
|
||||||
|
pInfo->pRes->info.groupId = pKeyInfo->groupId;
|
||||||
|
}
|
||||||
|
|
||||||
pInfo->indexOfBufferedRes += 1;
|
pInfo->indexOfBufferedRes += 1;
|
||||||
pInfo->pRes->info.rows = 1;
|
|
||||||
return pInfo->pRes;
|
return pInfo->pRes;
|
||||||
} else {
|
} else {
|
||||||
doSetOperatorCompleted(pOperator);
|
doSetOperatorCompleted(pOperator);
|
||||||
|
@ -182,8 +193,12 @@ SSDataBlock* doScanLastrow(SOperatorInfo* pOperator) {
|
||||||
STableKeyInfo* pKeyInfo = taosArrayGet(pGroupTableList, 0);
|
STableKeyInfo* pKeyInfo = taosArrayGet(pGroupTableList, 0);
|
||||||
pInfo->pRes->info.groupId = pKeyInfo->groupId;
|
pInfo->pRes->info.groupId = pKeyInfo->groupId;
|
||||||
|
|
||||||
addTagPseudoColumnData(&pInfo->readHandle, pSup->pExprInfo, pSup->numOfExprs, pInfo->pRes,
|
code = addTagPseudoColumnData(&pInfo->readHandle, pSup->pExprInfo, pSup->numOfExprs, pInfo->pRes,
|
||||||
GET_TASKID(pTaskInfo));
|
GET_TASKID(pTaskInfo));
|
||||||
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
pTaskInfo->code = code;
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
tsdbLastrowReaderClose(pInfo->pLastrowReader);
|
tsdbLastrowReaderClose(pInfo->pLastrowReader);
|
||||||
|
|
|
@ -65,7 +65,7 @@ size_t getResultRowSize(SqlFunctionCtx* pCtx, int32_t numOfOutput) {
|
||||||
}
|
}
|
||||||
|
|
||||||
rowSize +=
|
rowSize +=
|
||||||
(numOfOutput * sizeof(bool)); // expand rowSize to mark if col is null for top/bottom result(saveTupleData)
|
(numOfOutput * sizeof(bool)); // expand rowSize to mark if col is null for top/bottom result(doSaveTupleData)
|
||||||
return rowSize;
|
return rowSize;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -115,9 +115,6 @@ void initGroupedResultInfo(SGroupResInfo* pGroupResInfo, SHashObj* pHashmap, int
|
||||||
p->groupId = *(uint64_t*)key;
|
p->groupId = *(uint64_t*)key;
|
||||||
p->pos = *(SResultRowPosition*)pData;
|
p->pos = *(SResultRowPosition*)pData;
|
||||||
memcpy(p->key, (char*)key + sizeof(uint64_t), keyLen - sizeof(uint64_t));
|
memcpy(p->key, (char*)key + sizeof(uint64_t), keyLen - sizeof(uint64_t));
|
||||||
#ifdef BUF_PAGE_DEBUG
|
|
||||||
qDebug("page_groupRes, groupId:%" PRIu64 ",pageId:%d,offset:%d\n", p->groupId, p->pos.pageId, p->pos.offset);
|
|
||||||
#endif
|
|
||||||
taosArrayPush(pGroupResInfo->pRows, &p);
|
taosArrayPush(pGroupResInfo->pRows, &p);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -261,6 +261,15 @@ int32_t qStreamInput(qTaskInfo_t tinfo, void* pItem) {
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
int32_t qStreamPrepareRecover(qTaskInfo_t tinfo, int64_t startVer, int64_t endVer) {
|
||||||
|
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
|
||||||
|
ASSERT(pTaskInfo->execModel == OPTR_EXEC_MODEL_STREAM);
|
||||||
|
pTaskInfo->streamInfo.recoverStartVer = startVer;
|
||||||
|
pTaskInfo->streamInfo.recoverEndVer = endVer;
|
||||||
|
pTaskInfo->streamInfo.recoverStep = STREAM_RECOVER_STEP__PREPARE;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
void* qExtractReaderFromStreamScanner(void* scanner) {
|
void* qExtractReaderFromStreamScanner(void* scanner) {
|
||||||
SStreamScanInfo* pInfo = scanner;
|
SStreamScanInfo* pInfo = scanner;
|
||||||
return (void*)pInfo->tqReader;
|
return (void*)pInfo->tqReader;
|
||||||
|
|
|
@ -1506,15 +1506,11 @@ int32_t doCopyToSDataBlock(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, SExprI
|
||||||
int32_t numOfExprs) {
|
int32_t numOfExprs) {
|
||||||
int32_t numOfRows = getNumOfTotalRes(pGroupResInfo);
|
int32_t numOfRows = getNumOfTotalRes(pGroupResInfo);
|
||||||
int32_t start = pGroupResInfo->index;
|
int32_t start = pGroupResInfo->index;
|
||||||
#ifdef BUF_PAGE_DEBUG
|
|
||||||
qDebug("\npage_copytoblock rows:%d", numOfRows);
|
|
||||||
#endif
|
|
||||||
for (int32_t i = start; i < numOfRows; i += 1) {
|
for (int32_t i = start; i < numOfRows; i += 1) {
|
||||||
SResKeyPos* pPos = taosArrayGetP(pGroupResInfo->pRows, i);
|
SResKeyPos* pPos = taosArrayGetP(pGroupResInfo->pRows, i);
|
||||||
SFilePage* page = getBufPage(pBuf, pPos->pos.pageId);
|
SFilePage* page = getBufPage(pBuf, pPos->pos.pageId);
|
||||||
#ifdef BUF_PAGE_DEBUG
|
|
||||||
qDebug("page_copytoblock pos pageId:%d, offset:%d", pPos->pos.pageId, pPos->pos.offset);
|
|
||||||
#endif
|
|
||||||
SResultRow* pRow = (SResultRow*)((char*)page + pPos->pos.offset);
|
SResultRow* pRow = (SResultRow*)((char*)page + pPos->pos.offset);
|
||||||
|
|
||||||
doUpdateNumOfRows(pRow, numOfExprs, rowCellOffset);
|
doUpdateNumOfRows(pRow, numOfExprs, rowCellOffset);
|
||||||
|
@ -1980,6 +1976,7 @@ int32_t loadRemoteDataCallback(void* param, SDataBuf* pMsg, int32_t code) {
|
||||||
qDebug("%s fetch rsp received, index:%d, rows:%d", pSourceDataInfo->taskId, index, pRsp->numOfRows);
|
qDebug("%s fetch rsp received, index:%d, rows:%d", pSourceDataInfo->taskId, index, pRsp->numOfRows);
|
||||||
} else {
|
} else {
|
||||||
pSourceDataInfo->code = code;
|
pSourceDataInfo->code = code;
|
||||||
|
qDebug("%s fetch rsp received, index:%d, error:%d", pSourceDataInfo->taskId, index, tstrerror(code));
|
||||||
}
|
}
|
||||||
|
|
||||||
pSourceDataInfo->status = EX_SOURCE_DATA_READY;
|
pSourceDataInfo->status = EX_SOURCE_DATA_READY;
|
||||||
|
@ -2221,6 +2218,8 @@ static SSDataBlock* concurrentlyLoadRemoteDataImpl(SOperatorInfo* pOperator, SEx
|
||||||
if (completed == totalSources) {
|
if (completed == totalSources) {
|
||||||
return setAllSourcesCompleted(pOperator, startTs);
|
return setAllSourcesCompleted(pOperator, startTs);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
sched_yield();
|
||||||
}
|
}
|
||||||
|
|
||||||
_error:
|
_error:
|
||||||
|
@ -3747,7 +3746,7 @@ void doDestroyExchangeOperatorInfo(void* param) {
|
||||||
taosArrayDestroy(pExInfo->pSources);
|
taosArrayDestroy(pExInfo->pSources);
|
||||||
taosArrayDestroy(pExInfo->pSourceDataInfo);
|
taosArrayDestroy(pExInfo->pSourceDataInfo);
|
||||||
if (pExInfo->pResult != NULL) {
|
if (pExInfo->pResult != NULL) {
|
||||||
blockDataDestroy(pExInfo->pResult);
|
pExInfo->pResult = blockDataDestroy(pExInfo->pResult);
|
||||||
}
|
}
|
||||||
|
|
||||||
tsem_destroy(&pExInfo->ready);
|
tsem_destroy(&pExInfo->ready);
|
||||||
|
|
|
@ -29,7 +29,7 @@
|
||||||
static void* getCurrentDataGroupInfo(const SPartitionOperatorInfo* pInfo, SDataGroupInfo** pGroupInfo, int32_t len);
|
static void* getCurrentDataGroupInfo(const SPartitionOperatorInfo* pInfo, SDataGroupInfo** pGroupInfo, int32_t len);
|
||||||
static int32_t* setupColumnOffset(const SSDataBlock* pBlock, int32_t rowCapacity);
|
static int32_t* setupColumnOffset(const SSDataBlock* pBlock, int32_t rowCapacity);
|
||||||
static int32_t setGroupResultOutputBuf(SOperatorInfo* pOperator, SOptrBasicInfo* binfo, int32_t numOfCols, char* pData, int16_t bytes,
|
static int32_t setGroupResultOutputBuf(SOperatorInfo* pOperator, SOptrBasicInfo* binfo, int32_t numOfCols, char* pData, int16_t bytes,
|
||||||
int32_t groupId, SDiskbasedBuf* pBuf, SAggSupporter* pAggSup);
|
uint64_t groupId, SDiskbasedBuf* pBuf, SAggSupporter* pAggSup);
|
||||||
|
|
||||||
static void destroyGroupOperatorInfo(void* param, int32_t numOfOutput) {
|
static void destroyGroupOperatorInfo(void* param, int32_t numOfOutput) {
|
||||||
SGroupbyOperatorInfo* pInfo = (SGroupbyOperatorInfo*)param;
|
SGroupbyOperatorInfo* pInfo = (SGroupbyOperatorInfo*)param;
|
||||||
|
@ -264,7 +264,7 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SSDataBlock* pBlock) {
|
||||||
}
|
}
|
||||||
|
|
||||||
len = buildGroupKeys(pInfo->keyBuf, pInfo->pGroupColVals);
|
len = buildGroupKeys(pInfo->keyBuf, pInfo->pGroupColVals);
|
||||||
int32_t ret = setGroupResultOutputBuf(pOperator, &(pInfo->binfo), pOperator->exprSupp.numOfExprs, pInfo->keyBuf, len, 0, pInfo->aggSup.pResultBuf, &pInfo->aggSup);
|
int32_t ret = setGroupResultOutputBuf(pOperator, &(pInfo->binfo), pOperator->exprSupp.numOfExprs, pInfo->keyBuf, len, pBlock->info.groupId, pInfo->aggSup.pResultBuf, &pInfo->aggSup);
|
||||||
if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code
|
if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code
|
||||||
longjmp(pTaskInfo->env, TSDB_CODE_QRY_APP_ERROR);
|
longjmp(pTaskInfo->env, TSDB_CODE_QRY_APP_ERROR);
|
||||||
}
|
}
|
||||||
|
@ -282,7 +282,7 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SSDataBlock* pBlock) {
|
||||||
len = buildGroupKeys(pInfo->keyBuf, pInfo->pGroupColVals);
|
len = buildGroupKeys(pInfo->keyBuf, pInfo->pGroupColVals);
|
||||||
int32_t ret =
|
int32_t ret =
|
||||||
setGroupResultOutputBuf(pOperator, &(pInfo->binfo), pOperator->exprSupp.numOfExprs, pInfo->keyBuf, len,
|
setGroupResultOutputBuf(pOperator, &(pInfo->binfo), pOperator->exprSupp.numOfExprs, pInfo->keyBuf, len,
|
||||||
0, pInfo->aggSup.pResultBuf, &pInfo->aggSup);
|
pBlock->info.groupId, pInfo->aggSup.pResultBuf, &pInfo->aggSup);
|
||||||
if (ret != TSDB_CODE_SUCCESS) {
|
if (ret != TSDB_CODE_SUCCESS) {
|
||||||
longjmp(pTaskInfo->env, TSDB_CODE_QRY_APP_ERROR);
|
longjmp(pTaskInfo->env, TSDB_CODE_QRY_APP_ERROR);
|
||||||
}
|
}
|
||||||
|
@ -293,6 +293,29 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SSDataBlock* pBlock) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static SSDataBlock* buildGroupResultDataBlock(SOperatorInfo* pOperator) {
|
||||||
|
SGroupbyOperatorInfo* pInfo = pOperator->info;
|
||||||
|
|
||||||
|
SSDataBlock* pRes = pInfo->binfo.pRes;
|
||||||
|
while(1) {
|
||||||
|
doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf);
|
||||||
|
doFilter(pInfo->pCondition, pRes);
|
||||||
|
|
||||||
|
bool hasRemain = hasDataInGroupInfo(&pInfo->groupResInfo);
|
||||||
|
if (!hasRemain) {
|
||||||
|
doSetOperatorCompleted(pOperator);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pRes->info.rows > 0) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pOperator->resultInfo.totalRows += pRes->info.rows;
|
||||||
|
return (pRes->info.rows == 0)? NULL:pRes;
|
||||||
|
}
|
||||||
|
|
||||||
static SSDataBlock* hashGroupbyAggregate(SOperatorInfo* pOperator) {
|
static SSDataBlock* hashGroupbyAggregate(SOperatorInfo* pOperator) {
|
||||||
if (pOperator->status == OP_EXEC_DONE) {
|
if (pOperator->status == OP_EXEC_DONE) {
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -304,22 +327,7 @@ static SSDataBlock* hashGroupbyAggregate(SOperatorInfo* pOperator) {
|
||||||
SSDataBlock* pRes = pInfo->binfo.pRes;
|
SSDataBlock* pRes = pInfo->binfo.pRes;
|
||||||
|
|
||||||
if (pOperator->status == OP_RES_TO_RETURN) {
|
if (pOperator->status == OP_RES_TO_RETURN) {
|
||||||
while(1) {
|
return buildGroupResultDataBlock(pOperator);
|
||||||
doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf);
|
|
||||||
doFilter(pInfo->pCondition, pRes);
|
|
||||||
|
|
||||||
bool hasRemain = hasDataInGroupInfo(&pInfo->groupResInfo);
|
|
||||||
if (!hasRemain) {
|
|
||||||
doSetOperatorCompleted(pOperator);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (pRes->info.rows > 0) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
pOperator->resultInfo.totalRows += pRes->info.rows;
|
|
||||||
return (pRes->info.rows == 0)? NULL:pRes;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t order = TSDB_ORDER_ASC;
|
int32_t order = TSDB_ORDER_ASC;
|
||||||
|
@ -373,26 +381,7 @@ static SSDataBlock* hashGroupbyAggregate(SOperatorInfo* pOperator) {
|
||||||
initGroupedResultInfo(&pInfo->groupResInfo, pInfo->aggSup.pResultRowHashTable, 0);
|
initGroupedResultInfo(&pInfo->groupResInfo, pInfo->aggSup.pResultRowHashTable, 0);
|
||||||
|
|
||||||
pOperator->cost.openCost = (taosGetTimestampUs() - st) / 1000.0;
|
pOperator->cost.openCost = (taosGetTimestampUs() - st) / 1000.0;
|
||||||
|
return buildGroupResultDataBlock(pOperator);
|
||||||
while(1) {
|
|
||||||
doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf);
|
|
||||||
doFilter(pInfo->pCondition, pRes);
|
|
||||||
|
|
||||||
bool hasRemain = hasDataInGroupInfo(&pInfo->groupResInfo);
|
|
||||||
if (!hasRemain) {
|
|
||||||
doSetOperatorCompleted(pOperator);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (pRes->info.rows > 0) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t rows = pRes->info.rows;
|
|
||||||
pOperator->resultInfo.totalRows += rows;
|
|
||||||
|
|
||||||
return (rows == 0)? NULL:pRes;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
SOperatorInfo* createGroupOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols,
|
SOperatorInfo* createGroupOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols,
|
||||||
|
@ -800,7 +789,7 @@ SOperatorInfo* createPartitionOperatorInfo(SOperatorInfo* downstream, SPartition
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t setGroupResultOutputBuf(SOperatorInfo* pOperator, SOptrBasicInfo* binfo, int32_t numOfCols, char* pData, int16_t bytes,
|
int32_t setGroupResultOutputBuf(SOperatorInfo* pOperator, SOptrBasicInfo* binfo, int32_t numOfCols, char* pData, int16_t bytes,
|
||||||
int32_t groupId, SDiskbasedBuf* pBuf, SAggSupporter* pAggSup) {
|
uint64_t groupId, SDiskbasedBuf* pBuf, SAggSupporter* pAggSup) {
|
||||||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||||
SResultRowInfo* pResultRowInfo = &binfo->resultRowInfo;
|
SResultRowInfo* pResultRowInfo = &binfo->resultRowInfo;
|
||||||
SqlFunctionCtx* pCtx = pOperator->exprSupp.pCtx;
|
SqlFunctionCtx* pCtx = pOperator->exprSupp.pCtx;
|
||||||
|
|
|
@ -516,10 +516,15 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) {
|
||||||
}
|
}
|
||||||
|
|
||||||
SArray* tableList = taosArrayGetP(pTaskInfo->tableqinfoList.pGroupList, pInfo->currentGroupId);
|
SArray* tableList = taosArrayGetP(pTaskInfo->tableqinfoList.pGroupList, pInfo->currentGroupId);
|
||||||
|
|
||||||
tsdbReaderClose(pInfo->dataReader);
|
tsdbReaderClose(pInfo->dataReader);
|
||||||
|
|
||||||
int32_t code = tsdbReaderOpen(pInfo->readHandle.vnode, &pInfo->cond, tableList, (STsdbReader**)&pInfo->dataReader,
|
int32_t code = tsdbReaderOpen(pInfo->readHandle.vnode, &pInfo->cond, tableList, (STsdbReader**)&pInfo->dataReader,
|
||||||
GET_TASKID(pTaskInfo));
|
GET_TASKID(pTaskInfo));
|
||||||
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
longjmp(pTaskInfo->env, code);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
SSDataBlock* result = doTableScanGroup(pOperator);
|
SSDataBlock* result = doTableScanGroup(pOperator);
|
||||||
|
@ -798,7 +803,12 @@ static bool isStateWindow(SStreamScanInfo* pInfo) {
|
||||||
|
|
||||||
static bool isIntervalWindow(SStreamScanInfo* pInfo) {
|
static bool isIntervalWindow(SStreamScanInfo* pInfo) {
|
||||||
return pInfo->sessionSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL ||
|
return pInfo->sessionSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL ||
|
||||||
pInfo->sessionSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL;
|
pInfo->sessionSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL ||
|
||||||
|
pInfo->sessionSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool isSignleIntervalWindow(SStreamScanInfo* pInfo) {
|
||||||
|
return pInfo->sessionSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static uint64_t getGroupId(SOperatorInfo* pOperator, uint64_t uid) {
|
static uint64_t getGroupId(SOperatorInfo* pOperator, uint64_t uid) {
|
||||||
|
@ -871,6 +881,7 @@ static bool prepareRangeScan(SStreamScanInfo* pInfo, SSDataBlock* pBlock, int32_
|
||||||
}
|
}
|
||||||
|
|
||||||
resetTableScanInfo(pInfo->pTableScanOp->info, &win);
|
resetTableScanInfo(pInfo->pTableScanOp->info, &win);
|
||||||
|
pInfo->pTableScanOp->status = OP_OPENED;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1125,9 +1136,14 @@ static void setUpdateData(SStreamScanInfo* pInfo, SSDataBlock* pBlock, SSDataBlo
|
||||||
static void checkUpdateData(SStreamScanInfo* pInfo, bool invertible, SSDataBlock* pBlock, bool out) {
|
static void checkUpdateData(SStreamScanInfo* pInfo, bool invertible, SSDataBlock* pBlock, bool out) {
|
||||||
SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, pInfo->primaryTsIndex);
|
SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, pInfo->primaryTsIndex);
|
||||||
ASSERT(pColDataInfo->info.type == TSDB_DATA_TYPE_TIMESTAMP);
|
ASSERT(pColDataInfo->info.type == TSDB_DATA_TYPE_TIMESTAMP);
|
||||||
TSKEY* ts = (TSKEY*)pColDataInfo->pData;
|
TSKEY* tsCol = (TSKEY*)pColDataInfo->pData;
|
||||||
for (int32_t rowId = 0; rowId < pBlock->info.rows; rowId++) {
|
for (int32_t rowId = 0; rowId < pBlock->info.rows; rowId++) {
|
||||||
if (updateInfoIsUpdated(pInfo->pUpdateInfo, pBlock->info.uid, ts[rowId]) && out) {
|
SResultRowInfo dumyInfo;
|
||||||
|
dumyInfo.cur.pageId = -1;
|
||||||
|
STimeWindow win = getActiveTimeWindow(NULL, &dumyInfo, tsCol[rowId], &pInfo->interval, TSDB_ORDER_ASC);
|
||||||
|
// must check update info first.
|
||||||
|
bool update = updateInfoIsUpdated(pInfo->pUpdateInfo, pBlock->info.uid, tsCol[rowId]);
|
||||||
|
if ( (update || (isSignleIntervalWindow(pInfo) && isCloseWindow(&win, &pInfo->twAggSup)) ) && out) {
|
||||||
taosArrayPush(pInfo->tsArray, &rowId);
|
taosArrayPush(pInfo->tsArray, &rowId);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1193,8 +1209,6 @@ static int32_t setBlockIntoRes(SStreamScanInfo* pInfo, const SSDataBlock* pBlock
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ASSERT(pInfo->pRes->pDataBlock != NULL);
|
|
||||||
|
|
||||||
// currently only the tbname pseudo column
|
// currently only the tbname pseudo column
|
||||||
if (pInfo->numOfPseudoExpr > 0) {
|
if (pInfo->numOfPseudoExpr > 0) {
|
||||||
int32_t code = addTagPseudoColumnData(&pInfo->readHandle, pInfo->pPseudoExpr, pInfo->numOfPseudoExpr, pInfo->pRes,
|
int32_t code = addTagPseudoColumnData(&pInfo->readHandle, pInfo->pPseudoExpr, pInfo->numOfPseudoExpr, pInfo->pRes,
|
||||||
|
@ -1259,6 +1273,24 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (pTaskInfo->streamInfo.recoverStep == STREAM_RECOVER_STEP__PREPARE) {
|
||||||
|
STableScanInfo* pTSInfo = pInfo->pTableScanOp->info;
|
||||||
|
memcpy(&pTSInfo->cond, &pTaskInfo->streamInfo.tableCond, sizeof(SQueryTableDataCond));
|
||||||
|
pTSInfo->scanTimes = 0;
|
||||||
|
pTSInfo->currentGroupId = -1;
|
||||||
|
pTaskInfo->streamInfo.recoverStep = STREAM_RECOVER_STEP__SCAN;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pTaskInfo->streamInfo.recoverStep == STREAM_RECOVER_STEP__SCAN) {
|
||||||
|
SSDataBlock* pBlock = doTableScan(pInfo->pTableScanOp);
|
||||||
|
if (pBlock != NULL) {
|
||||||
|
return pBlock;
|
||||||
|
}
|
||||||
|
// TODO fill in bloom filter
|
||||||
|
pTaskInfo->streamInfo.recoverStep = STREAM_RECOVER_STEP__NONE;
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
size_t total = taosArrayGetSize(pInfo->pBlockLists);
|
size_t total = taosArrayGetSize(pInfo->pBlockLists);
|
||||||
// TODO: refactor
|
// TODO: refactor
|
||||||
if (pInfo->blockType == STREAM_INPUT__DATA_BLOCK) {
|
if (pInfo->blockType == STREAM_INPUT__DATA_BLOCK) {
|
||||||
|
@ -1392,6 +1424,7 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
|
||||||
pInfo->tsArrayIndex = 0;
|
pInfo->tsArrayIndex = 0;
|
||||||
checkUpdateData(pInfo, true, pInfo->pRes, true);
|
checkUpdateData(pInfo, true, pInfo->pRes, true);
|
||||||
setUpdateData(pInfo, pInfo->pRes, pInfo->pUpdateRes);
|
setUpdateData(pInfo, pInfo->pRes, pInfo->pUpdateRes);
|
||||||
|
pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, pBlockInfo->window.ekey);
|
||||||
if (pInfo->pUpdateRes->info.rows > 0) {
|
if (pInfo->pUpdateRes->info.rows > 0) {
|
||||||
if (pInfo->pUpdateRes->info.type == STREAM_CLEAR) {
|
if (pInfo->pUpdateRes->info.type == STREAM_CLEAR) {
|
||||||
pInfo->updateResIndex = 0;
|
pInfo->updateResIndex = 0;
|
||||||
|
@ -1551,6 +1584,7 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys
|
||||||
goto _error;
|
goto _error;
|
||||||
}
|
}
|
||||||
taosArrayDestroy(tableIdList);
|
taosArrayDestroy(tableIdList);
|
||||||
|
memcpy(&pTaskInfo->streamInfo.tableCond, &pTSInfo->cond, sizeof(SQueryTableDataCond));
|
||||||
}
|
}
|
||||||
|
|
||||||
// create the pseduo columns info
|
// create the pseduo columns info
|
||||||
|
@ -1562,13 +1596,14 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys
|
||||||
pInfo->pUpdateRes = createResDataBlock(pDescNode);
|
pInfo->pUpdateRes = createResDataBlock(pDescNode);
|
||||||
pInfo->pCondition = pScanPhyNode->node.pConditions;
|
pInfo->pCondition = pScanPhyNode->node.pConditions;
|
||||||
pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE;
|
pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE;
|
||||||
pInfo->sessionSup = (SessionWindowSupporter){.pStreamAggSup = NULL, .gap = -1};
|
pInfo->sessionSup = (SessionWindowSupporter){.pStreamAggSup = NULL, .gap = -1, .parentType = QUERY_NODE_PHYSICAL_PLAN};
|
||||||
pInfo->groupId = 0;
|
pInfo->groupId = 0;
|
||||||
pInfo->pPullDataRes = createPullDataBlock();
|
pInfo->pPullDataRes = createPullDataBlock();
|
||||||
pInfo->pStreamScanOp = pOperator;
|
pInfo->pStreamScanOp = pOperator;
|
||||||
pInfo->deleteDataIndex = 0;
|
pInfo->deleteDataIndex = 0;
|
||||||
pInfo->pDeleteDataRes = createPullDataBlock();
|
pInfo->pDeleteDataRes = createPullDataBlock();
|
||||||
pInfo->updateWin = (STimeWindow){.skey = INT64_MAX, .ekey = INT64_MAX};
|
pInfo->updateWin = (STimeWindow){.skey = INT64_MAX, .ekey = INT64_MAX};
|
||||||
|
pInfo->twAggSup = *pTwSup;
|
||||||
|
|
||||||
pOperator->name = "StreamScanOperator";
|
pOperator->name = "StreamScanOperator";
|
||||||
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN;
|
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN;
|
||||||
|
|
|
@ -234,9 +234,9 @@ void destroyOrderOperatorInfo(void* param, int32_t numOfOutput) {
|
||||||
SSortOperatorInfo* pInfo = (SSortOperatorInfo*)param;
|
SSortOperatorInfo* pInfo = (SSortOperatorInfo*)param;
|
||||||
pInfo->binfo.pRes = blockDataDestroy(pInfo->binfo.pRes);
|
pInfo->binfo.pRes = blockDataDestroy(pInfo->binfo.pRes);
|
||||||
|
|
||||||
|
tsortDestroySortHandle(pInfo->pSortHandle);
|
||||||
taosArrayDestroy(pInfo->pSortInfo);
|
taosArrayDestroy(pInfo->pSortInfo);
|
||||||
taosArrayDestroy(pInfo->pColMatchInfo);
|
taosArrayDestroy(pInfo->pColMatchInfo);
|
||||||
|
|
||||||
taosMemoryFreeClear(param);
|
taosMemoryFreeClear(param);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -539,14 +539,12 @@ int32_t doOpenMultiwayMergeOperator(SOperatorInfo* pOperator) {
|
||||||
}
|
}
|
||||||
|
|
||||||
pInfo->startTs = taosGetTimestampUs();
|
pInfo->startTs = taosGetTimestampUs();
|
||||||
|
|
||||||
int32_t numOfBufPage = pInfo->sortBufSize / pInfo->bufPageSize;
|
int32_t numOfBufPage = pInfo->sortBufSize / pInfo->bufPageSize;
|
||||||
|
|
||||||
pInfo->pSortHandle = tsortCreateSortHandle(pInfo->pSortInfo, SORT_MULTISOURCE_MERGE, pInfo->bufPageSize, numOfBufPage,
|
pInfo->pSortHandle = tsortCreateSortHandle(pInfo->pSortInfo, SORT_MULTISOURCE_MERGE, pInfo->bufPageSize, numOfBufPage,
|
||||||
pInfo->pInputBlock, pTaskInfo->id.str);
|
pInfo->pInputBlock, pTaskInfo->id.str);
|
||||||
|
|
||||||
tsortSetFetchRawDataFp(pInfo->pSortHandle, loadNextDataBlock, NULL, NULL);
|
tsortSetFetchRawDataFp(pInfo->pSortHandle, loadNextDataBlock, NULL, NULL);
|
||||||
|
|
||||||
tsortSetCompareGroupId(pInfo->pSortHandle, pInfo->groupSort);
|
tsortSetCompareGroupId(pInfo->pSortHandle, pInfo->groupSort);
|
||||||
|
|
||||||
for (int32_t i = 0; i < pOperator->numOfDownstream; ++i) {
|
for (int32_t i = 0; i < pOperator->numOfDownstream; ++i) {
|
||||||
|
@ -556,7 +554,6 @@ int32_t doOpenMultiwayMergeOperator(SOperatorInfo* pOperator) {
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t code = tsortOpen(pInfo->pSortHandle);
|
int32_t code = tsortOpen(pInfo->pSortHandle);
|
||||||
|
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
longjmp(pTaskInfo->env, terrno);
|
longjmp(pTaskInfo->env, terrno);
|
||||||
}
|
}
|
||||||
|
@ -674,6 +671,7 @@ void destroyMultiwayMergeOperatorInfo(void* param, int32_t numOfOutput) {
|
||||||
pInfo->binfo.pRes = blockDataDestroy(pInfo->binfo.pRes);
|
pInfo->binfo.pRes = blockDataDestroy(pInfo->binfo.pRes);
|
||||||
pInfo->pInputBlock = blockDataDestroy(pInfo->pInputBlock);
|
pInfo->pInputBlock = blockDataDestroy(pInfo->pInputBlock);
|
||||||
|
|
||||||
|
tsortDestroySortHandle(pInfo->pSortHandle);
|
||||||
taosArrayDestroy(pInfo->pSortInfo);
|
taosArrayDestroy(pInfo->pSortInfo);
|
||||||
taosArrayDestroy(pInfo->pColMatchInfo);
|
taosArrayDestroy(pInfo->pColMatchInfo);
|
||||||
|
|
||||||
|
|
|
@ -652,8 +652,8 @@ static void doInterpUnclosedTimeWindow(SOperatorInfo* pOperatorInfo, int32_t num
|
||||||
}
|
}
|
||||||
|
|
||||||
void printDataBlock(SSDataBlock* pBlock, const char* flag) {
|
void printDataBlock(SSDataBlock* pBlock, const char* flag) {
|
||||||
if (pBlock == NULL) {
|
if (!pBlock || pBlock->info.rows == 0) {
|
||||||
qDebug("======printDataBlock Block is Null");
|
qDebug("======printDataBlock: Block is Null or Empty");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
char* pBuf = NULL;
|
char* pBuf = NULL;
|
||||||
|
@ -1353,13 +1353,13 @@ static int32_t closeIntervalWindow(SHashObj* pHashMap, STimeWindowAggSupp* pSup,
|
||||||
if (chIds && pPullDataMap) {
|
if (chIds && pPullDataMap) {
|
||||||
SArray* chAy = *(SArray**)chIds;
|
SArray* chAy = *(SArray**)chIds;
|
||||||
int32_t size = taosArrayGetSize(chAy);
|
int32_t size = taosArrayGetSize(chAy);
|
||||||
qDebug("window %" PRId64 " wait child size:%d", win.skey, size);
|
qDebug("===stream===window %" PRId64 " wait child size:%d", win.skey, size);
|
||||||
for (int32_t i = 0; i < size; i++) {
|
for (int32_t i = 0; i < size; i++) {
|
||||||
qDebug("window %" PRId64 " wait chid id:%d", win.skey, *(int32_t*)taosArrayGet(chAy, i));
|
qDebug("===stream===window %" PRId64 " wait child id:%d", win.skey, *(int32_t*)taosArrayGet(chAy, i));
|
||||||
}
|
}
|
||||||
continue;
|
continue;
|
||||||
} else if (pPullDataMap) {
|
} else if (pPullDataMap) {
|
||||||
qDebug("close window %" PRId64, win.skey);
|
qDebug("===stream===close window %" PRId64, win.skey);
|
||||||
}
|
}
|
||||||
SResultRowPosition* pPos = (SResultRowPosition*)pIte;
|
SResultRowPosition* pPos = (SResultRowPosition*)pIte;
|
||||||
if (pSup->calTrigger == STREAM_TRIGGER_WINDOW_CLOSE) {
|
if (pSup->calTrigger == STREAM_TRIGGER_WINDOW_CLOSE) {
|
||||||
|
@ -1626,6 +1626,12 @@ SSDataBlock* createDeleteBlock() {
|
||||||
return pBlock;
|
return pBlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void initIntervalDownStream(SOperatorInfo* downstream, uint8_t type) {
|
||||||
|
ASSERT(downstream->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN);
|
||||||
|
SStreamScanInfo* pScanInfo = downstream->info;
|
||||||
|
pScanInfo->sessionSup.parentType = type;
|
||||||
|
}
|
||||||
|
|
||||||
SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols,
|
SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols,
|
||||||
SSDataBlock* pResBlock, SInterval* pInterval, int32_t primaryTsSlotId,
|
SSDataBlock* pResBlock, SInterval* pInterval, int32_t primaryTsSlotId,
|
||||||
STimeWindowAggSupp* pTwAggSupp, SIntervalPhysiNode* pPhyNode,
|
STimeWindowAggSupp* pTwAggSupp, SIntervalPhysiNode* pPhyNode,
|
||||||
|
@ -1701,6 +1707,10 @@ SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo*
|
||||||
pOperator->fpSet = createOperatorFpSet(doOpenIntervalAgg, doBuildIntervalResult, doStreamIntervalAgg, NULL,
|
pOperator->fpSet = createOperatorFpSet(doOpenIntervalAgg, doBuildIntervalResult, doStreamIntervalAgg, NULL,
|
||||||
destroyIntervalOperatorInfo, aggEncodeResultRow, aggDecodeResultRow, NULL);
|
destroyIntervalOperatorInfo, aggEncodeResultRow, aggDecodeResultRow, NULL);
|
||||||
|
|
||||||
|
if (nodeType(pPhyNode) == QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL) {
|
||||||
|
initIntervalDownStream(downstream, QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL);
|
||||||
|
}
|
||||||
|
|
||||||
code = appendDownstream(pOperator, &downstream, 1);
|
code = appendDownstream(pOperator, &downstream, 1);
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
goto _error;
|
goto _error;
|
||||||
|
@ -2472,22 +2482,26 @@ static void doHashInterval(SOperatorInfo* pOperatorInfo, SSDataBlock* pSDataBloc
|
||||||
SPullWindowInfo pull = {.window = nextWin, .groupId = tableGroupId};
|
SPullWindowInfo pull = {.window = nextWin, .groupId = tableGroupId};
|
||||||
// add pull data request
|
// add pull data request
|
||||||
taosArrayPush(pInfo->pPullWins, &pull);
|
taosArrayPush(pInfo->pPullWins, &pull);
|
||||||
addPullWindow(pInfo->pPullDataMap, &winRes, taosArrayGetSize(pInfo->pChildren));
|
int32_t size = taosArrayGetSize(pInfo->pChildren);
|
||||||
|
addPullWindow(pInfo->pPullDataMap, &winRes, size);
|
||||||
|
qDebug("===stream===prepare retrive %" PRId64 ", size:%d", winRes.ts, size);
|
||||||
} else {
|
} else {
|
||||||
int32_t index = -1;
|
int32_t index = -1;
|
||||||
SArray* chArray = NULL;
|
SArray* chArray = NULL;
|
||||||
|
int32_t chId = 0;
|
||||||
if (chIds) {
|
if (chIds) {
|
||||||
chArray = *(void**)chIds;
|
chArray = *(void**)chIds;
|
||||||
int32_t chId = getChildIndex(pSDataBlock);
|
chId = getChildIndex(pSDataBlock);
|
||||||
index = taosArraySearchIdx(chArray, &chId, compareInt32Val, TD_EQ);
|
index = taosArraySearchIdx(chArray, &chId, compareInt32Val, TD_EQ);
|
||||||
}
|
}
|
||||||
if (index != -1 && pSDataBlock->info.type == STREAM_PULL_DATA) {
|
// if (index != -1 && pSDataBlock->info.type == STREAM_PULL_DATA) {
|
||||||
taosArrayRemove(chArray, index);
|
// qDebug("===stream===delete child id %d", chId);
|
||||||
if (taosArrayGetSize(chArray) == 0) {
|
// taosArrayRemove(chArray, index);
|
||||||
// pull data is over
|
// if (taosArrayGetSize(chArray) == 0) {
|
||||||
taosHashRemove(pInfo->pPullDataMap, &winRes, sizeof(SWinRes));
|
// // pull data is over
|
||||||
}
|
// taosHashRemove(pInfo->pPullDataMap, &winRes, sizeof(SWinRes));
|
||||||
}
|
// }
|
||||||
|
// }
|
||||||
if (index == -1 || pSDataBlock->info.type == STREAM_PULL_DATA) {
|
if (index == -1 || pSDataBlock->info.type == STREAM_PULL_DATA) {
|
||||||
ignore = false;
|
ignore = false;
|
||||||
}
|
}
|
||||||
|
@ -2611,6 +2625,7 @@ void processPullOver(SSDataBlock* pBlock, SHashObj* pMap) {
|
||||||
SArray* chArray = *(SArray**)chIds;
|
SArray* chArray = *(SArray**)chIds;
|
||||||
int32_t index = taosArraySearchIdx(chArray, &chId, compareInt32Val, TD_EQ);
|
int32_t index = taosArraySearchIdx(chArray, &chId, compareInt32Val, TD_EQ);
|
||||||
if (index != -1) {
|
if (index != -1) {
|
||||||
|
qDebug("===stream===window %" PRId64 " delete child id %d", winRes.ts, chId);
|
||||||
taosArrayRemove(chArray, index);
|
taosArrayRemove(chArray, index);
|
||||||
if (taosArrayGetSize(chArray) == 0) {
|
if (taosArrayGetSize(chArray) == 0) {
|
||||||
// pull data is over
|
// pull data is over
|
||||||
|
@ -2629,7 +2644,7 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
|
||||||
|
|
||||||
SExprSupp* pSup = &pOperator->exprSupp;
|
SExprSupp* pSup = &pOperator->exprSupp;
|
||||||
|
|
||||||
qDebug("interval status %d %s", pOperator->status, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
|
qDebug("interval status %d %s", pOperator->status, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
|
||||||
|
|
||||||
if (pOperator->status == OP_EXEC_DONE) {
|
if (pOperator->status == OP_EXEC_DONE) {
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -2645,18 +2660,18 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
|
||||||
}
|
}
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
printDataBlock(pInfo->binfo.pRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
|
printDataBlock(pInfo->binfo.pRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
|
||||||
return pInfo->binfo.pRes;
|
return pInfo->binfo.pRes;
|
||||||
} else {
|
} else {
|
||||||
doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf);
|
doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf);
|
||||||
if (pInfo->binfo.pRes->info.rows != 0) {
|
if (pInfo->binfo.pRes->info.rows != 0) {
|
||||||
printDataBlock(pInfo->binfo.pRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
|
printDataBlock(pInfo->binfo.pRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
|
||||||
return pInfo->binfo.pRes;
|
return pInfo->binfo.pRes;
|
||||||
}
|
}
|
||||||
if (pInfo->pUpdateRes->info.rows != 0 && pInfo->returnUpdate) {
|
if (pInfo->pUpdateRes->info.rows != 0 && pInfo->returnUpdate) {
|
||||||
pInfo->returnUpdate = false;
|
pInfo->returnUpdate = false;
|
||||||
ASSERT(!IS_FINAL_OP(pInfo));
|
ASSERT(!IS_FINAL_OP(pInfo));
|
||||||
printDataBlock(pInfo->pUpdateRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
|
printDataBlock(pInfo->pUpdateRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
|
||||||
// process the rest of the data
|
// process the rest of the data
|
||||||
return pInfo->pUpdateRes;
|
return pInfo->pUpdateRes;
|
||||||
}
|
}
|
||||||
|
@ -2664,13 +2679,13 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
|
||||||
if (pInfo->pPullDataRes->info.rows != 0) {
|
if (pInfo->pPullDataRes->info.rows != 0) {
|
||||||
// process the rest of the data
|
// process the rest of the data
|
||||||
ASSERT(IS_FINAL_OP(pInfo));
|
ASSERT(IS_FINAL_OP(pInfo));
|
||||||
printDataBlock(pInfo->pPullDataRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
|
printDataBlock(pInfo->pPullDataRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
|
||||||
return pInfo->pPullDataRes;
|
return pInfo->pPullDataRes;
|
||||||
}
|
}
|
||||||
doBuildDeleteResult(pInfo->pDelWins, &pInfo->delIndex, pInfo->pDelRes);
|
doBuildDeleteResult(pInfo->pDelWins, &pInfo->delIndex, pInfo->pDelRes);
|
||||||
if (pInfo->pDelRes->info.rows != 0) {
|
if (pInfo->pDelRes->info.rows != 0) {
|
||||||
// process the rest of the data
|
// process the rest of the data
|
||||||
printDataBlock(pInfo->pDelRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
|
printDataBlock(pInfo->pDelRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
|
||||||
return pInfo->pDelRes;
|
return pInfo->pDelRes;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2681,10 +2696,10 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
|
||||||
clearSpecialDataBlock(pInfo->pUpdateRes);
|
clearSpecialDataBlock(pInfo->pUpdateRes);
|
||||||
removeDeleteResults(pUpdated, pInfo->pDelWins);
|
removeDeleteResults(pUpdated, pInfo->pDelWins);
|
||||||
pOperator->status = OP_RES_TO_RETURN;
|
pOperator->status = OP_RES_TO_RETURN;
|
||||||
qDebug("%s return data", IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
|
qDebug("%s return data", IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
printDataBlock(pBlock, IS_FINAL_OP(pInfo) ? "interval Final recv" : "interval Semi recv");
|
printDataBlock(pBlock, IS_FINAL_OP(pInfo) ? "interval Final recv" : "interval Semi recv");
|
||||||
maxTs = TMAX(maxTs, pBlock->info.window.ekey);
|
maxTs = TMAX(maxTs, pBlock->info.window.ekey);
|
||||||
|
|
||||||
if (pBlock->info.type == STREAM_NORMAL || pBlock->info.type == STREAM_PULL_DATA ||
|
if (pBlock->info.type == STREAM_NORMAL || pBlock->info.type == STREAM_PULL_DATA ||
|
||||||
|
@ -2759,6 +2774,7 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
|
||||||
SStreamFinalIntervalOperatorInfo* pTmpInfo = pChildOp->info;
|
SStreamFinalIntervalOperatorInfo* pTmpInfo = pChildOp->info;
|
||||||
pTmpInfo->twAggSup.calTrigger = STREAM_TRIGGER_AT_ONCE;
|
pTmpInfo->twAggSup.calTrigger = STREAM_TRIGGER_AT_ONCE;
|
||||||
taosArrayPush(pInfo->pChildren, &pChildOp);
|
taosArrayPush(pInfo->pChildren, &pChildOp);
|
||||||
|
qDebug("===stream===add child, id:%d", chIndex);
|
||||||
}
|
}
|
||||||
SOperatorInfo* pChildOp = taosArrayGetP(pInfo->pChildren, chIndex);
|
SOperatorInfo* pChildOp = taosArrayGetP(pInfo->pChildren, chIndex);
|
||||||
SStreamFinalIntervalOperatorInfo* pChInfo = pChildOp->info;
|
SStreamFinalIntervalOperatorInfo* pChInfo = pChildOp->info;
|
||||||
|
@ -2783,14 +2799,14 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
|
||||||
blockDataEnsureCapacity(pInfo->binfo.pRes, pOperator->resultInfo.capacity);
|
blockDataEnsureCapacity(pInfo->binfo.pRes, pOperator->resultInfo.capacity);
|
||||||
doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf);
|
doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf);
|
||||||
if (pInfo->binfo.pRes->info.rows != 0) {
|
if (pInfo->binfo.pRes->info.rows != 0) {
|
||||||
printDataBlock(pInfo->binfo.pRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
|
printDataBlock(pInfo->binfo.pRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
|
||||||
return pInfo->binfo.pRes;
|
return pInfo->binfo.pRes;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pInfo->pUpdateRes->info.rows != 0 && pInfo->returnUpdate) {
|
if (pInfo->pUpdateRes->info.rows != 0 && pInfo->returnUpdate) {
|
||||||
pInfo->returnUpdate = false;
|
pInfo->returnUpdate = false;
|
||||||
ASSERT(!IS_FINAL_OP(pInfo));
|
ASSERT(!IS_FINAL_OP(pInfo));
|
||||||
printDataBlock(pInfo->pUpdateRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
|
printDataBlock(pInfo->pUpdateRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
|
||||||
// process the rest of the data
|
// process the rest of the data
|
||||||
return pInfo->pUpdateRes;
|
return pInfo->pUpdateRes;
|
||||||
}
|
}
|
||||||
|
@ -2799,14 +2815,14 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
|
||||||
if (pInfo->pPullDataRes->info.rows != 0) {
|
if (pInfo->pPullDataRes->info.rows != 0) {
|
||||||
// process the rest of the data
|
// process the rest of the data
|
||||||
ASSERT(IS_FINAL_OP(pInfo));
|
ASSERT(IS_FINAL_OP(pInfo));
|
||||||
printDataBlock(pInfo->pPullDataRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
|
printDataBlock(pInfo->pPullDataRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
|
||||||
return pInfo->pPullDataRes;
|
return pInfo->pPullDataRes;
|
||||||
}
|
}
|
||||||
|
|
||||||
doBuildDeleteResult(pInfo->pDelWins, &pInfo->delIndex, pInfo->pDelRes);
|
doBuildDeleteResult(pInfo->pDelWins, &pInfo->delIndex, pInfo->pDelRes);
|
||||||
if (pInfo->pDelRes->info.rows != 0) {
|
if (pInfo->pDelRes->info.rows != 0) {
|
||||||
// process the rest of the data
|
// process the rest of the data
|
||||||
printDataBlock(pInfo->pDelRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
|
printDataBlock(pInfo->pDelRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
|
||||||
return pInfo->pDelRes;
|
return pInfo->pDelRes;
|
||||||
}
|
}
|
||||||
// ASSERT(false);
|
// ASSERT(false);
|
||||||
|
@ -3010,6 +3026,7 @@ void initDummyFunction(SqlFunctionCtx* pDummy, SqlFunctionCtx* pCtx, int32_t num
|
||||||
pDummy[i].functionId = pCtx[i].functionId;
|
pDummy[i].functionId = pCtx[i].functionId;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void initDownStream(SOperatorInfo* downstream, SStreamAggSupporter* pAggSup, int64_t gap, int64_t waterMark,
|
void initDownStream(SOperatorInfo* downstream, SStreamAggSupporter* pAggSup, int64_t gap, int64_t waterMark,
|
||||||
uint8_t type) {
|
uint8_t type) {
|
||||||
ASSERT(downstream->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN);
|
ASSERT(downstream->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN);
|
||||||
|
|
|
@ -109,6 +109,10 @@ static int32_t sortComparClearup(SMsortComparParam* cmpParam) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void tsortDestroySortHandle(SSortHandle* pSortHandle) {
|
void tsortDestroySortHandle(SSortHandle* pSortHandle) {
|
||||||
|
if (pSortHandle == NULL) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
tsortClose(pSortHandle);
|
tsortClose(pSortHandle);
|
||||||
if (pSortHandle->pMergeTree != NULL) {
|
if (pSortHandle->pMergeTree != NULL) {
|
||||||
tMergeTreeDestroy(pSortHandle->pMergeTree);
|
tMergeTreeDestroy(pSortHandle->pMergeTree);
|
||||||
|
@ -119,7 +123,6 @@ void tsortDestroySortHandle(SSortHandle* pSortHandle) {
|
||||||
blockDataDestroy(pSortHandle->pDataBlock);
|
blockDataDestroy(pSortHandle->pDataBlock);
|
||||||
for (size_t i = 0; i < taosArrayGetSize(pSortHandle->pOrderedSource); i++){
|
for (size_t i = 0; i < taosArrayGetSize(pSortHandle->pOrderedSource); i++){
|
||||||
SSortSource** pSource = taosArrayGet(pSortHandle->pOrderedSource, i);
|
SSortSource** pSource = taosArrayGet(pSortHandle->pOrderedSource, i);
|
||||||
blockDataDestroy((*pSource)->src.pBlock);
|
|
||||||
taosMemoryFreeClear(*pSource);
|
taosMemoryFreeClear(*pSource);
|
||||||
}
|
}
|
||||||
taosArrayDestroy(pSortHandle->pOrderedSource);
|
taosArrayDestroy(pSortHandle->pOrderedSource);
|
||||||
|
@ -621,7 +624,7 @@ static int32_t createInitialSources(SSortHandle* pHandle) {
|
||||||
pHandle->sortElapsed += el;
|
pHandle->sortElapsed += el;
|
||||||
|
|
||||||
// All sorted data can fit in memory, external memory sort is not needed. Return to directly
|
// All sorted data can fit in memory, external memory sort is not needed. Return to directly
|
||||||
if (size <= sortBufSize) {
|
if (size <= sortBufSize && pHandle->pBuf == NULL) {
|
||||||
pHandle->cmpParam.numOfSources = 1;
|
pHandle->cmpParam.numOfSources = 1;
|
||||||
pHandle->inMemSort = true;
|
pHandle->inMemSort = true;
|
||||||
|
|
||||||
|
|
|
@ -106,7 +106,7 @@ bool irateFuncSetup(SqlFunctionCtx *pCtx, SResultRowEntryInfo* pResInfo);
|
||||||
int32_t irateFunction(SqlFunctionCtx *pCtx);
|
int32_t irateFunction(SqlFunctionCtx *pCtx);
|
||||||
int32_t irateFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock);
|
int32_t irateFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock);
|
||||||
|
|
||||||
int32_t cacheLastRowFunction(SqlFunctionCtx* pCtx);
|
int32_t cachedLastRowFunction(SqlFunctionCtx* pCtx);
|
||||||
|
|
||||||
bool getFirstLastFuncEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv);
|
bool getFirstLastFuncEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv);
|
||||||
int32_t firstFunction(SqlFunctionCtx *pCtx);
|
int32_t firstFunction(SqlFunctionCtx *pCtx);
|
||||||
|
|
|
@ -44,10 +44,9 @@ extern "C" {
|
||||||
#define FUNC_MGT_FORBID_FILL_FUNC FUNC_MGT_FUNC_CLASSIFICATION_MASK(15)
|
#define FUNC_MGT_FORBID_FILL_FUNC FUNC_MGT_FUNC_CLASSIFICATION_MASK(15)
|
||||||
#define FUNC_MGT_INTERVAL_INTERPO_FUNC FUNC_MGT_FUNC_CLASSIFICATION_MASK(16)
|
#define FUNC_MGT_INTERVAL_INTERPO_FUNC FUNC_MGT_FUNC_CLASSIFICATION_MASK(16)
|
||||||
#define FUNC_MGT_FORBID_STREAM_FUNC FUNC_MGT_FUNC_CLASSIFICATION_MASK(17)
|
#define FUNC_MGT_FORBID_STREAM_FUNC FUNC_MGT_FUNC_CLASSIFICATION_MASK(17)
|
||||||
#define FUNC_MGT_FORBID_WINDOW_FUNC FUNC_MGT_FUNC_CLASSIFICATION_MASK(18)
|
#define FUNC_MGT_SYSTEM_INFO_FUNC FUNC_MGT_FUNC_CLASSIFICATION_MASK(18)
|
||||||
#define FUNC_MGT_FORBID_GROUP_BY_FUNC FUNC_MGT_FUNC_CLASSIFICATION_MASK(19)
|
#define FUNC_MGT_CLIENT_PC_FUNC FUNC_MGT_FUNC_CLASSIFICATION_MASK(19)
|
||||||
#define FUNC_MGT_SYSTEM_INFO_FUNC FUNC_MGT_FUNC_CLASSIFICATION_MASK(20)
|
#define FUNC_MGT_MULTI_ROWS_FUNC FUNC_MGT_FUNC_CLASSIFICATION_MASK(20)
|
||||||
#define FUNC_MGT_CLIENT_PC_FUNC FUNC_MGT_FUNC_CLASSIFICATION_MASK(21)
|
|
||||||
|
|
||||||
#define FUNC_MGT_TEST_MASK(val, mask) (((val) & (mask)) != 0)
|
#define FUNC_MGT_TEST_MASK(val, mask) (((val) & (mask)) != 0)
|
||||||
|
|
||||||
|
|
|
@ -2034,6 +2034,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
||||||
.getEnvFunc = getPercentileFuncEnv,
|
.getEnvFunc = getPercentileFuncEnv,
|
||||||
.initFunc = percentileFunctionSetup,
|
.initFunc = percentileFunctionSetup,
|
||||||
.processFunc = percentileFunction,
|
.processFunc = percentileFunction,
|
||||||
|
.sprocessFunc = percentileScalarFunction,
|
||||||
.finalizeFunc = percentileFinalize,
|
.finalizeFunc = percentileFinalize,
|
||||||
.invertFunc = NULL,
|
.invertFunc = NULL,
|
||||||
.combineFunc = NULL,
|
.combineFunc = NULL,
|
||||||
|
@ -2046,6 +2047,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
||||||
.getEnvFunc = getApercentileFuncEnv,
|
.getEnvFunc = getApercentileFuncEnv,
|
||||||
.initFunc = apercentileFunctionSetup,
|
.initFunc = apercentileFunctionSetup,
|
||||||
.processFunc = apercentileFunction,
|
.processFunc = apercentileFunction,
|
||||||
|
.sprocessFunc = apercentileScalarFunction,
|
||||||
.finalizeFunc = apercentileFinalize,
|
.finalizeFunc = apercentileFinalize,
|
||||||
.invertFunc = NULL,
|
.invertFunc = NULL,
|
||||||
.combineFunc = apercentileCombine,
|
.combineFunc = apercentileCombine,
|
||||||
|
@ -2079,7 +2081,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
||||||
{
|
{
|
||||||
.name = "top",
|
.name = "top",
|
||||||
.type = FUNCTION_TYPE_TOP,
|
.type = FUNCTION_TYPE_TOP,
|
||||||
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_FORBID_STREAM_FUNC,
|
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_ROWS_FUNC | FUNC_MGT_FORBID_STREAM_FUNC,
|
||||||
.translateFunc = translateTopBot,
|
.translateFunc = translateTopBot,
|
||||||
.getEnvFunc = getTopBotFuncEnv,
|
.getEnvFunc = getTopBotFuncEnv,
|
||||||
.initFunc = topBotFunctionSetup,
|
.initFunc = topBotFunctionSetup,
|
||||||
|
@ -2093,7 +2095,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
||||||
{
|
{
|
||||||
.name = "bottom",
|
.name = "bottom",
|
||||||
.type = FUNCTION_TYPE_BOTTOM,
|
.type = FUNCTION_TYPE_BOTTOM,
|
||||||
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_FORBID_STREAM_FUNC,
|
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_ROWS_FUNC | FUNC_MGT_FORBID_STREAM_FUNC,
|
||||||
.translateFunc = translateTopBot,
|
.translateFunc = translateTopBot,
|
||||||
.getEnvFunc = getTopBotFuncEnv,
|
.getEnvFunc = getTopBotFuncEnv,
|
||||||
.initFunc = topBotFunctionSetup,
|
.initFunc = topBotFunctionSetup,
|
||||||
|
@ -2113,6 +2115,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
||||||
.getEnvFunc = getSpreadFuncEnv,
|
.getEnvFunc = getSpreadFuncEnv,
|
||||||
.initFunc = spreadFunctionSetup,
|
.initFunc = spreadFunctionSetup,
|
||||||
.processFunc = spreadFunction,
|
.processFunc = spreadFunction,
|
||||||
|
.sprocessFunc = spreadScalarFunction,
|
||||||
.finalizeFunc = spreadFinalize,
|
.finalizeFunc = spreadFinalize,
|
||||||
.invertFunc = NULL,
|
.invertFunc = NULL,
|
||||||
.combineFunc = spreadCombine,
|
.combineFunc = spreadCombine,
|
||||||
|
@ -2204,6 +2207,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
||||||
.getEnvFunc = getDerivativeFuncEnv,
|
.getEnvFunc = getDerivativeFuncEnv,
|
||||||
.initFunc = derivativeFuncSetup,
|
.initFunc = derivativeFuncSetup,
|
||||||
.processFunc = derivativeFunction,
|
.processFunc = derivativeFunction,
|
||||||
|
.sprocessFunc = derivativeScalarFunction,
|
||||||
.finalizeFunc = functionFinalize
|
.finalizeFunc = functionFinalize
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -2214,6 +2218,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
||||||
.getEnvFunc = getIrateFuncEnv,
|
.getEnvFunc = getIrateFuncEnv,
|
||||||
.initFunc = irateFuncSetup,
|
.initFunc = irateFuncSetup,
|
||||||
.processFunc = irateFunction,
|
.processFunc = irateFunction,
|
||||||
|
.sprocessFunc = irateScalarFunction,
|
||||||
.finalizeFunc = irateFinalize
|
.finalizeFunc = irateFinalize
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -2233,7 +2238,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
||||||
.translateFunc = translateFirstLast,
|
.translateFunc = translateFirstLast,
|
||||||
.getEnvFunc = getFirstLastFuncEnv,
|
.getEnvFunc = getFirstLastFuncEnv,
|
||||||
.initFunc = functionSetup,
|
.initFunc = functionSetup,
|
||||||
.processFunc = cacheLastRowFunction,
|
.processFunc = cachedLastRowFunction,
|
||||||
.finalizeFunc = firstLastFinalize
|
.finalizeFunc = firstLastFinalize
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -2315,12 +2320,13 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
||||||
.getEnvFunc = getTwaFuncEnv,
|
.getEnvFunc = getTwaFuncEnv,
|
||||||
.initFunc = twaFunctionSetup,
|
.initFunc = twaFunctionSetup,
|
||||||
.processFunc = twaFunction,
|
.processFunc = twaFunction,
|
||||||
|
.sprocessFunc = twaScalarFunction,
|
||||||
.finalizeFunc = twaFinalize
|
.finalizeFunc = twaFinalize
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
.name = "histogram",
|
.name = "histogram",
|
||||||
.type = FUNCTION_TYPE_HISTOGRAM,
|
.type = FUNCTION_TYPE_HISTOGRAM,
|
||||||
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_FORBID_FILL_FUNC,
|
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_MULTI_ROWS_FUNC | FUNC_MGT_FORBID_FILL_FUNC,
|
||||||
.translateFunc = translateHistogram,
|
.translateFunc = translateHistogram,
|
||||||
.getEnvFunc = getHistogramFuncEnv,
|
.getEnvFunc = getHistogramFuncEnv,
|
||||||
.initFunc = histogramFunctionSetup,
|
.initFunc = histogramFunctionSetup,
|
||||||
|
@ -2363,6 +2369,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
||||||
.getEnvFunc = getHLLFuncEnv,
|
.getEnvFunc = getHLLFuncEnv,
|
||||||
.initFunc = functionSetup,
|
.initFunc = functionSetup,
|
||||||
.processFunc = hllFunction,
|
.processFunc = hllFunction,
|
||||||
|
.sprocessFunc = hllScalarFunction,
|
||||||
.finalizeFunc = hllFinalize,
|
.finalizeFunc = hllFinalize,
|
||||||
.invertFunc = NULL,
|
.invertFunc = NULL,
|
||||||
.combineFunc = hllCombine,
|
.combineFunc = hllCombine,
|
||||||
|
@ -2396,17 +2403,18 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
||||||
{
|
{
|
||||||
.name = "diff",
|
.name = "diff",
|
||||||
.type = FUNCTION_TYPE_DIFF,
|
.type = FUNCTION_TYPE_DIFF,
|
||||||
.classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_FORBID_WINDOW_FUNC,
|
.classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_FORBID_STREAM_FUNC,
|
||||||
.translateFunc = translateDiff,
|
.translateFunc = translateDiff,
|
||||||
.getEnvFunc = getDiffFuncEnv,
|
.getEnvFunc = getDiffFuncEnv,
|
||||||
.initFunc = diffFunctionSetup,
|
.initFunc = diffFunctionSetup,
|
||||||
.processFunc = diffFunction,
|
.processFunc = diffFunction,
|
||||||
|
.sprocessFunc = diffScalarFunction,
|
||||||
.finalizeFunc = functionFinalize
|
.finalizeFunc = functionFinalize
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
.name = "statecount",
|
.name = "statecount",
|
||||||
.type = FUNCTION_TYPE_STATE_COUNT,
|
.type = FUNCTION_TYPE_STATE_COUNT,
|
||||||
.classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_FORBID_WINDOW_FUNC,
|
.classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_FORBID_STREAM_FUNC,
|
||||||
.translateFunc = translateStateCount,
|
.translateFunc = translateStateCount,
|
||||||
.getEnvFunc = getStateFuncEnv,
|
.getEnvFunc = getStateFuncEnv,
|
||||||
.initFunc = functionSetup,
|
.initFunc = functionSetup,
|
||||||
|
@ -2416,7 +2424,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
||||||
{
|
{
|
||||||
.name = "stateduration",
|
.name = "stateduration",
|
||||||
.type = FUNCTION_TYPE_STATE_DURATION,
|
.type = FUNCTION_TYPE_STATE_DURATION,
|
||||||
.classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_FORBID_WINDOW_FUNC,
|
.classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | FUNC_MGT_FORBID_STREAM_FUNC,
|
||||||
.translateFunc = translateStateDuration,
|
.translateFunc = translateStateDuration,
|
||||||
.getEnvFunc = getStateFuncEnv,
|
.getEnvFunc = getStateFuncEnv,
|
||||||
.initFunc = functionSetup,
|
.initFunc = functionSetup,
|
||||||
|
@ -2426,27 +2434,29 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
||||||
{
|
{
|
||||||
.name = "csum",
|
.name = "csum",
|
||||||
.type = FUNCTION_TYPE_CSUM,
|
.type = FUNCTION_TYPE_CSUM,
|
||||||
.classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_FORBID_WINDOW_FUNC,
|
.classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_FORBID_STREAM_FUNC,
|
||||||
.translateFunc = translateCsum,
|
.translateFunc = translateCsum,
|
||||||
.getEnvFunc = getCsumFuncEnv,
|
.getEnvFunc = getCsumFuncEnv,
|
||||||
.initFunc = functionSetup,
|
.initFunc = functionSetup,
|
||||||
.processFunc = csumFunction,
|
.processFunc = csumFunction,
|
||||||
|
.sprocessFunc = csumScalarFunction,
|
||||||
.finalizeFunc = NULL
|
.finalizeFunc = NULL
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
.name = "mavg",
|
.name = "mavg",
|
||||||
.type = FUNCTION_TYPE_MAVG,
|
.type = FUNCTION_TYPE_MAVG,
|
||||||
.classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_FORBID_WINDOW_FUNC,
|
.classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_FORBID_STREAM_FUNC,
|
||||||
.translateFunc = translateMavg,
|
.translateFunc = translateMavg,
|
||||||
.getEnvFunc = getMavgFuncEnv,
|
.getEnvFunc = getMavgFuncEnv,
|
||||||
.initFunc = mavgFunctionSetup,
|
.initFunc = mavgFunctionSetup,
|
||||||
.processFunc = mavgFunction,
|
.processFunc = mavgFunction,
|
||||||
|
.sprocessFunc = mavgScalarFunction,
|
||||||
.finalizeFunc = NULL
|
.finalizeFunc = NULL
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
.name = "sample",
|
.name = "sample",
|
||||||
.type = FUNCTION_TYPE_SAMPLE,
|
.type = FUNCTION_TYPE_SAMPLE,
|
||||||
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_FORBID_STREAM_FUNC,
|
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_ROWS_FUNC | FUNC_MGT_FORBID_STREAM_FUNC,
|
||||||
.translateFunc = translateSample,
|
.translateFunc = translateSample,
|
||||||
.getEnvFunc = getSampleFuncEnv,
|
.getEnvFunc = getSampleFuncEnv,
|
||||||
.initFunc = sampleFunctionSetup,
|
.initFunc = sampleFunctionSetup,
|
||||||
|
@ -2456,8 +2466,8 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
||||||
{
|
{
|
||||||
.name = "tail",
|
.name = "tail",
|
||||||
.type = FUNCTION_TYPE_TAIL,
|
.type = FUNCTION_TYPE_TAIL,
|
||||||
.classification = FUNC_MGT_SELECT_FUNC | FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_FORBID_STREAM_FUNC |
|
.classification = FUNC_MGT_SELECT_FUNC | FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_FORBID_STREAM_FUNC |
|
||||||
FUNC_MGT_FORBID_WINDOW_FUNC | FUNC_MGT_FORBID_GROUP_BY_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC,
|
FUNC_MGT_IMPLICIT_TS_FUNC,
|
||||||
.translateFunc = translateTail,
|
.translateFunc = translateTail,
|
||||||
.getEnvFunc = getTailFuncEnv,
|
.getEnvFunc = getTailFuncEnv,
|
||||||
.initFunc = tailFunctionSetup,
|
.initFunc = tailFunctionSetup,
|
||||||
|
@ -2467,8 +2477,8 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
||||||
{
|
{
|
||||||
.name = "unique",
|
.name = "unique",
|
||||||
.type = FUNCTION_TYPE_UNIQUE,
|
.type = FUNCTION_TYPE_UNIQUE,
|
||||||
.classification = FUNC_MGT_SELECT_FUNC | FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_TIMELINE_FUNC |
|
.classification = FUNC_MGT_SELECT_FUNC | FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_TIMELINE_FUNC |
|
||||||
FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_FORBID_WINDOW_FUNC | FUNC_MGT_FORBID_GROUP_BY_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC,
|
FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC,
|
||||||
.translateFunc = translateUnique,
|
.translateFunc = translateUnique,
|
||||||
.getEnvFunc = getUniqueFuncEnv,
|
.getEnvFunc = getUniqueFuncEnv,
|
||||||
.initFunc = uniqueFunctionSetup,
|
.initFunc = uniqueFunctionSetup,
|
||||||
|
|
|
@ -80,13 +80,14 @@ typedef struct STopBotRes {
|
||||||
} STopBotRes;
|
} STopBotRes;
|
||||||
|
|
||||||
typedef struct SFirstLastRes {
|
typedef struct SFirstLastRes {
|
||||||
bool hasResult;
|
bool hasResult;
|
||||||
// used for last_row function only, isNullRes in SResultRowEntry can not be passed to downstream.So,
|
// used for last_row function only, isNullRes in SResultRowEntry can not be passed to downstream.So,
|
||||||
// this attribute is required
|
// this attribute is required
|
||||||
bool isNull;
|
bool isNull;
|
||||||
int32_t bytes;
|
int32_t bytes;
|
||||||
int64_t ts;
|
int64_t ts;
|
||||||
char buf[];
|
STuplePos pos;
|
||||||
|
char buf[];
|
||||||
} SFirstLastRes;
|
} SFirstLastRes;
|
||||||
|
|
||||||
typedef struct SStddevRes {
|
typedef struct SStddevRes {
|
||||||
|
@ -1141,8 +1142,8 @@ bool getMinmaxFuncEnv(SFunctionNode* UNUSED_PARAM(pFunc), SFuncExecEnv* pEnv) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void saveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos);
|
static void doSaveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos);
|
||||||
static void copyTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos);
|
static void doCopyTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos);
|
||||||
|
|
||||||
static int32_t findRowIndex(int32_t start, int32_t num, SColumnInfoData* pCol, const char* tval) {
|
static int32_t findRowIndex(int32_t start, int32_t num, SColumnInfoData* pCol, const char* tval) {
|
||||||
// the data is loaded, not only the block SMA value
|
// the data is loaded, not only the block SMA value
|
||||||
|
@ -1195,7 +1196,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
|
||||||
pBuf->v = *(int64_t*)tval;
|
pBuf->v = *(int64_t*)tval;
|
||||||
if (pCtx->subsidiaries.num > 0) {
|
if (pCtx->subsidiaries.num > 0) {
|
||||||
index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval);
|
index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval);
|
||||||
saveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos);
|
doSaveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if (IS_SIGNED_NUMERIC_TYPE(type)) {
|
if (IS_SIGNED_NUMERIC_TYPE(type)) {
|
||||||
|
@ -1207,7 +1208,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
|
||||||
pBuf->v = val;
|
pBuf->v = val;
|
||||||
if (pCtx->subsidiaries.num > 0) {
|
if (pCtx->subsidiaries.num > 0) {
|
||||||
index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval);
|
index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval);
|
||||||
saveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos);
|
doSaveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1220,7 +1221,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
|
||||||
pBuf->v = val;
|
pBuf->v = val;
|
||||||
if (pCtx->subsidiaries.num > 0) {
|
if (pCtx->subsidiaries.num > 0) {
|
||||||
index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval);
|
index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval);
|
||||||
saveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos);
|
doSaveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else if (type == TSDB_DATA_TYPE_DOUBLE) {
|
} else if (type == TSDB_DATA_TYPE_DOUBLE) {
|
||||||
|
@ -1232,7 +1233,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
|
||||||
pBuf->v = val;
|
pBuf->v = val;
|
||||||
if (pCtx->subsidiaries.num > 0) {
|
if (pCtx->subsidiaries.num > 0) {
|
||||||
index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval);
|
index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval);
|
||||||
saveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos);
|
doSaveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else if (type == TSDB_DATA_TYPE_FLOAT) {
|
} else if (type == TSDB_DATA_TYPE_FLOAT) {
|
||||||
|
@ -1246,7 +1247,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
|
||||||
|
|
||||||
if (pCtx->subsidiaries.num > 0) {
|
if (pCtx->subsidiaries.num > 0) {
|
||||||
index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval);
|
index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval);
|
||||||
saveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos);
|
doSaveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1271,7 +1272,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
|
||||||
if (!pBuf->assign) {
|
if (!pBuf->assign) {
|
||||||
*val = pData[i];
|
*val = pData[i];
|
||||||
if (pCtx->subsidiaries.num > 0) {
|
if (pCtx->subsidiaries.num > 0) {
|
||||||
saveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
|
doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
|
||||||
}
|
}
|
||||||
pBuf->assign = true;
|
pBuf->assign = true;
|
||||||
} else {
|
} else {
|
||||||
|
@ -1283,7 +1284,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
|
||||||
if ((*val < pData[i]) ^ isMinFunc) {
|
if ((*val < pData[i]) ^ isMinFunc) {
|
||||||
*val = pData[i];
|
*val = pData[i];
|
||||||
if (pCtx->subsidiaries.num > 0) {
|
if (pCtx->subsidiaries.num > 0) {
|
||||||
copyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
|
doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1302,7 +1303,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
|
||||||
if (!pBuf->assign) {
|
if (!pBuf->assign) {
|
||||||
*val = pData[i];
|
*val = pData[i];
|
||||||
if (pCtx->subsidiaries.num > 0) {
|
if (pCtx->subsidiaries.num > 0) {
|
||||||
saveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
|
doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
|
||||||
}
|
}
|
||||||
pBuf->assign = true;
|
pBuf->assign = true;
|
||||||
} else {
|
} else {
|
||||||
|
@ -1314,7 +1315,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
|
||||||
if ((*val < pData[i]) ^ isMinFunc) {
|
if ((*val < pData[i]) ^ isMinFunc) {
|
||||||
*val = pData[i];
|
*val = pData[i];
|
||||||
if (pCtx->subsidiaries.num > 0) {
|
if (pCtx->subsidiaries.num > 0) {
|
||||||
copyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
|
doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1333,7 +1334,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
|
||||||
if (!pBuf->assign) {
|
if (!pBuf->assign) {
|
||||||
*val = pData[i];
|
*val = pData[i];
|
||||||
if (pCtx->subsidiaries.num > 0) {
|
if (pCtx->subsidiaries.num > 0) {
|
||||||
saveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
|
doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
|
||||||
}
|
}
|
||||||
pBuf->assign = true;
|
pBuf->assign = true;
|
||||||
} else {
|
} else {
|
||||||
|
@ -1345,7 +1346,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
|
||||||
if ((*val < pData[i]) ^ isMinFunc) {
|
if ((*val < pData[i]) ^ isMinFunc) {
|
||||||
*val = pData[i];
|
*val = pData[i];
|
||||||
if (pCtx->subsidiaries.num > 0) {
|
if (pCtx->subsidiaries.num > 0) {
|
||||||
copyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
|
doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1364,7 +1365,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
|
||||||
if (!pBuf->assign) {
|
if (!pBuf->assign) {
|
||||||
*val = pData[i];
|
*val = pData[i];
|
||||||
if (pCtx->subsidiaries.num > 0) {
|
if (pCtx->subsidiaries.num > 0) {
|
||||||
saveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
|
doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
|
||||||
}
|
}
|
||||||
pBuf->assign = true;
|
pBuf->assign = true;
|
||||||
} else {
|
} else {
|
||||||
|
@ -1376,7 +1377,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
|
||||||
if ((*val < pData[i]) ^ isMinFunc) {
|
if ((*val < pData[i]) ^ isMinFunc) {
|
||||||
*val = pData[i];
|
*val = pData[i];
|
||||||
if (pCtx->subsidiaries.num > 0) {
|
if (pCtx->subsidiaries.num > 0) {
|
||||||
copyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
|
doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1397,7 +1398,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
|
||||||
if (!pBuf->assign) {
|
if (!pBuf->assign) {
|
||||||
*val = pData[i];
|
*val = pData[i];
|
||||||
if (pCtx->subsidiaries.num > 0) {
|
if (pCtx->subsidiaries.num > 0) {
|
||||||
saveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
|
doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
|
||||||
}
|
}
|
||||||
pBuf->assign = true;
|
pBuf->assign = true;
|
||||||
} else {
|
} else {
|
||||||
|
@ -1409,7 +1410,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
|
||||||
if ((*val < pData[i]) ^ isMinFunc) {
|
if ((*val < pData[i]) ^ isMinFunc) {
|
||||||
*val = pData[i];
|
*val = pData[i];
|
||||||
if (pCtx->subsidiaries.num > 0) {
|
if (pCtx->subsidiaries.num > 0) {
|
||||||
copyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
|
doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1428,7 +1429,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
|
||||||
if (!pBuf->assign) {
|
if (!pBuf->assign) {
|
||||||
*val = pData[i];
|
*val = pData[i];
|
||||||
if (pCtx->subsidiaries.num > 0) {
|
if (pCtx->subsidiaries.num > 0) {
|
||||||
saveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
|
doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
|
||||||
}
|
}
|
||||||
pBuf->assign = true;
|
pBuf->assign = true;
|
||||||
} else {
|
} else {
|
||||||
|
@ -1440,7 +1441,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
|
||||||
if ((*val < pData[i]) ^ isMinFunc) {
|
if ((*val < pData[i]) ^ isMinFunc) {
|
||||||
*val = pData[i];
|
*val = pData[i];
|
||||||
if (pCtx->subsidiaries.num > 0) {
|
if (pCtx->subsidiaries.num > 0) {
|
||||||
copyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
|
doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1459,7 +1460,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
|
||||||
if (!pBuf->assign) {
|
if (!pBuf->assign) {
|
||||||
*val = pData[i];
|
*val = pData[i];
|
||||||
if (pCtx->subsidiaries.num > 0) {
|
if (pCtx->subsidiaries.num > 0) {
|
||||||
saveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
|
doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
|
||||||
}
|
}
|
||||||
pBuf->assign = true;
|
pBuf->assign = true;
|
||||||
} else {
|
} else {
|
||||||
|
@ -1471,7 +1472,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
|
||||||
if ((*val < pData[i]) ^ isMinFunc) {
|
if ((*val < pData[i]) ^ isMinFunc) {
|
||||||
*val = pData[i];
|
*val = pData[i];
|
||||||
if (pCtx->subsidiaries.num > 0) {
|
if (pCtx->subsidiaries.num > 0) {
|
||||||
copyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
|
doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1490,7 +1491,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
|
||||||
if (!pBuf->assign) {
|
if (!pBuf->assign) {
|
||||||
*val = pData[i];
|
*val = pData[i];
|
||||||
if (pCtx->subsidiaries.num > 0) {
|
if (pCtx->subsidiaries.num > 0) {
|
||||||
saveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
|
doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
|
||||||
}
|
}
|
||||||
pBuf->assign = true;
|
pBuf->assign = true;
|
||||||
} else {
|
} else {
|
||||||
|
@ -1502,7 +1503,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
|
||||||
if ((*val < pData[i]) ^ isMinFunc) {
|
if ((*val < pData[i]) ^ isMinFunc) {
|
||||||
*val = pData[i];
|
*val = pData[i];
|
||||||
if (pCtx->subsidiaries.num > 0) {
|
if (pCtx->subsidiaries.num > 0) {
|
||||||
copyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
|
doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1522,7 +1523,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
|
||||||
if (!pBuf->assign) {
|
if (!pBuf->assign) {
|
||||||
*val = pData[i];
|
*val = pData[i];
|
||||||
if (pCtx->subsidiaries.num > 0) {
|
if (pCtx->subsidiaries.num > 0) {
|
||||||
saveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
|
doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
|
||||||
}
|
}
|
||||||
pBuf->assign = true;
|
pBuf->assign = true;
|
||||||
} else {
|
} else {
|
||||||
|
@ -1534,7 +1535,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
|
||||||
if ((*val < pData[i]) ^ isMinFunc) {
|
if ((*val < pData[i]) ^ isMinFunc) {
|
||||||
*val = pData[i];
|
*val = pData[i];
|
||||||
if (pCtx->subsidiaries.num > 0) {
|
if (pCtx->subsidiaries.num > 0) {
|
||||||
copyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
|
doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1553,7 +1554,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
|
||||||
if (!pBuf->assign) {
|
if (!pBuf->assign) {
|
||||||
*val = pData[i];
|
*val = pData[i];
|
||||||
if (pCtx->subsidiaries.num > 0) {
|
if (pCtx->subsidiaries.num > 0) {
|
||||||
saveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
|
doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
|
||||||
}
|
}
|
||||||
pBuf->assign = true;
|
pBuf->assign = true;
|
||||||
} else {
|
} else {
|
||||||
|
@ -1565,7 +1566,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
|
||||||
if ((*val < pData[i]) ^ isMinFunc) {
|
if ((*val < pData[i]) ^ isMinFunc) {
|
||||||
*val = pData[i];
|
*val = pData[i];
|
||||||
if (pCtx->subsidiaries.num > 0) {
|
if (pCtx->subsidiaries.num > 0) {
|
||||||
copyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
|
doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2640,7 +2641,7 @@ int32_t apercentileCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx)
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t getFirstLastInfoSize(int32_t resBytes) {
|
int32_t getFirstLastInfoSize(int32_t resBytes) {
|
||||||
return sizeof(SFirstLastRes) + resBytes + sizeof(int64_t) + sizeof(STuplePos);
|
return sizeof(SFirstLastRes) + resBytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool getFirstLastFuncEnv(SFunctionNode* pFunc, SFuncExecEnv* pEnv) {
|
bool getFirstLastFuncEnv(SFunctionNode* pFunc, SFuncExecEnv* pEnv) {
|
||||||
|
@ -2669,6 +2670,33 @@ static FORCE_INLINE TSKEY getRowPTs(SColumnInfoData* pTsColInfo, int32_t rowInde
|
||||||
return *(TSKEY*)colDataGetData(pTsColInfo, rowIndex);
|
return *(TSKEY*)colDataGetData(pTsColInfo, rowIndex);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void saveTupleData(const SSDataBlock* pSrcBlock, int32_t rowIndex, SqlFunctionCtx* pCtx, SFirstLastRes* pInfo) {
|
||||||
|
if (pCtx->subsidiaries.num <= 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!pInfo->hasResult) {
|
||||||
|
doSaveTupleData(pCtx, rowIndex, pSrcBlock, &pInfo->pos);
|
||||||
|
} else {
|
||||||
|
doCopyTupleData(pCtx, rowIndex, pSrcBlock, &pInfo->pos);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void doSaveCurrentVal(SqlFunctionCtx* pCtx, int32_t rowIndex, int64_t currentTs, int32_t type, char* pData) {
|
||||||
|
SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx);
|
||||||
|
SFirstLastRes* pInfo = GET_ROWCELL_INTERBUF(pResInfo);
|
||||||
|
|
||||||
|
if (IS_VAR_DATA_TYPE(type)) {
|
||||||
|
pInfo->bytes = varDataTLen(pData);
|
||||||
|
}
|
||||||
|
|
||||||
|
memcpy(pInfo->buf, pData, pInfo->bytes);
|
||||||
|
pInfo->ts = currentTs;
|
||||||
|
saveTupleData(pCtx->pSrcBlock, rowIndex, pCtx, pInfo);
|
||||||
|
|
||||||
|
pInfo->hasResult = true;
|
||||||
|
}
|
||||||
|
|
||||||
// This ordinary first function does not care if current scan is ascending order or descending order scan
|
// This ordinary first function does not care if current scan is ascending order or descending order scan
|
||||||
// the OPTIMIZED version of first function will only handle the ascending order scan
|
// the OPTIMIZED version of first function will only handle the ascending order scan
|
||||||
int32_t firstFunction(SqlFunctionCtx* pCtx) {
|
int32_t firstFunction(SqlFunctionCtx* pCtx) {
|
||||||
|
@ -2680,9 +2708,7 @@ int32_t firstFunction(SqlFunctionCtx* pCtx) {
|
||||||
SInputColumnInfoData* pInput = &pCtx->input;
|
SInputColumnInfoData* pInput = &pCtx->input;
|
||||||
SColumnInfoData* pInputCol = pInput->pData[0];
|
SColumnInfoData* pInputCol = pInput->pData[0];
|
||||||
|
|
||||||
int32_t type = pInputCol->info.type;
|
pInfo->bytes = pInputCol->info.bytes;
|
||||||
int32_t bytes = pInputCol->info.bytes;
|
|
||||||
pInfo->bytes = bytes;
|
|
||||||
|
|
||||||
// All null data column, return directly.
|
// All null data column, return directly.
|
||||||
if (pInput->colDataAggIsSet && (pInput->pColumnDataAgg[0]->numOfNull == pInput->totalRows)) {
|
if (pInput->colDataAggIsSet && (pInput->pColumnDataAgg[0]->numOfNull == pInput->totalRows)) {
|
||||||
|
@ -2697,11 +2723,13 @@ int32_t firstFunction(SqlFunctionCtx* pCtx) {
|
||||||
|
|
||||||
int32_t blockDataOrder = (startKey <= endKey) ? TSDB_ORDER_ASC : TSDB_ORDER_DESC;
|
int32_t blockDataOrder = (startKey <= endKey) ? TSDB_ORDER_ASC : TSDB_ORDER_DESC;
|
||||||
|
|
||||||
|
// please ref. to the comment in lastRowFunction for the reason why disabling the opt version of last/first function.
|
||||||
|
// we will use this opt implementation in an new version that is only available in scan subplan
|
||||||
|
#if 0
|
||||||
if (blockDataOrder == TSDB_ORDER_ASC) {
|
if (blockDataOrder == TSDB_ORDER_ASC) {
|
||||||
// filter according to current result firstly
|
// filter according to current result firstly
|
||||||
if (pResInfo->numOfRes > 0) {
|
if (pResInfo->numOfRes > 0) {
|
||||||
TSKEY ts = *(TSKEY*)(pInfo->buf + bytes);
|
if (pInfo->ts < startKey) {
|
||||||
if (ts < startKey) {
|
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2715,26 +2743,8 @@ int32_t firstFunction(SqlFunctionCtx* pCtx) {
|
||||||
|
|
||||||
char* data = colDataGetData(pInputCol, i);
|
char* data = colDataGetData(pInputCol, i);
|
||||||
TSKEY cts = getRowPTs(pInput->pPTS, i);
|
TSKEY cts = getRowPTs(pInput->pPTS, i);
|
||||||
|
if (pResInfo->numOfRes == 0 || pInfo->ts > cts) {
|
||||||
if (pResInfo->numOfRes == 0 || *(TSKEY*)(pInfo->buf + bytes) > cts) {
|
doSaveCurrentVal(pCtx, i, cts, pInputCol->info.type, data);
|
||||||
if (IS_VAR_DATA_TYPE(type)) {
|
|
||||||
bytes = varDataTLen(data);
|
|
||||||
pInfo->bytes = bytes;
|
|
||||||
}
|
|
||||||
memcpy(pInfo->buf, data, bytes);
|
|
||||||
*(TSKEY*)(pInfo->buf + bytes) = cts;
|
|
||||||
// handle selectivity
|
|
||||||
if (pCtx->subsidiaries.num > 0) {
|
|
||||||
STuplePos* pTuplePos = (STuplePos*)(pInfo->buf + bytes + sizeof(TSKEY));
|
|
||||||
if (!pInfo->hasResult) {
|
|
||||||
saveTupleData(pCtx, i, pCtx->pSrcBlock, pTuplePos);
|
|
||||||
} else {
|
|
||||||
copyTupleData(pCtx, i, pCtx->pSrcBlock, pTuplePos);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
pInfo->hasResult = true;
|
|
||||||
// DO_UPDATE_TAG_COLUMNS(pCtx, ts);
|
|
||||||
pResInfo->numOfRes = 1;
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2742,8 +2752,7 @@ int32_t firstFunction(SqlFunctionCtx* pCtx) {
|
||||||
// in case of descending order time stamp serial, which usually happens as the results of the nest query,
|
// in case of descending order time stamp serial, which usually happens as the results of the nest query,
|
||||||
// all data needs to be check.
|
// all data needs to be check.
|
||||||
if (pResInfo->numOfRes > 0) {
|
if (pResInfo->numOfRes > 0) {
|
||||||
TSKEY ts = *(TSKEY*)(pInfo->buf + bytes);
|
if (pInfo->ts < endKey) {
|
||||||
if (ts < endKey) {
|
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2758,28 +2767,28 @@ int32_t firstFunction(SqlFunctionCtx* pCtx) {
|
||||||
char* data = colDataGetData(pInputCol, i);
|
char* data = colDataGetData(pInputCol, i);
|
||||||
TSKEY cts = getRowPTs(pInput->pPTS, i);
|
TSKEY cts = getRowPTs(pInput->pPTS, i);
|
||||||
|
|
||||||
if (pResInfo->numOfRes == 0 || *(TSKEY*)(pInfo->buf + bytes) > cts) {
|
if (pResInfo->numOfRes == 0 || pInfo->ts > cts) {
|
||||||
if (IS_VAR_DATA_TYPE(type)) {
|
doSaveCurrentVal(pCtx, i, cts, pInputCol->info.type, data);
|
||||||
bytes = varDataTLen(data);
|
|
||||||
pInfo->bytes = bytes;
|
|
||||||
}
|
|
||||||
memcpy(pInfo->buf, data, bytes);
|
|
||||||
*(TSKEY*)(pInfo->buf + bytes) = cts;
|
|
||||||
// handle selectivity
|
|
||||||
if (pCtx->subsidiaries.num > 0) {
|
|
||||||
STuplePos* pTuplePos = (STuplePos*)(pInfo->buf + bytes + sizeof(TSKEY));
|
|
||||||
if (!pInfo->hasResult) {
|
|
||||||
saveTupleData(pCtx, i, pCtx->pSrcBlock, pTuplePos);
|
|
||||||
} else {
|
|
||||||
copyTupleData(pCtx, i, pCtx->pSrcBlock, pTuplePos);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
pInfo->hasResult = true;
|
|
||||||
pResInfo->numOfRes = 1;
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
#else
|
||||||
|
for (int32_t i = pInput->startRowIndex; i < pInput->startRowIndex + pInput->numOfRows; ++i) {
|
||||||
|
if (pInputCol->hasNull && colDataIsNull(pInputCol, pInput->totalRows, i, pColAgg)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
numOfElems++;
|
||||||
|
|
||||||
|
char* data = colDataGetData(pInputCol, i);
|
||||||
|
TSKEY cts = getRowPTs(pInput->pPTS, i);
|
||||||
|
if (pResInfo->numOfRes == 0 || pInfo->ts > cts) {
|
||||||
|
doSaveCurrentVal(pCtx, i, cts, pInputCol->info.type, data);
|
||||||
|
pResInfo->numOfRes = 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
SET_VAL(pResInfo, numOfElems, 1);
|
SET_VAL(pResInfo, numOfElems, 1);
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
|
@ -2811,6 +2820,8 @@ int32_t lastFunction(SqlFunctionCtx* pCtx) {
|
||||||
|
|
||||||
int32_t blockDataOrder = (startKey <= endKey) ? TSDB_ORDER_ASC : TSDB_ORDER_DESC;
|
int32_t blockDataOrder = (startKey <= endKey) ? TSDB_ORDER_ASC : TSDB_ORDER_DESC;
|
||||||
|
|
||||||
|
// please ref. to the comment in lastRowFunction for the reason why disabling the opt version of last/first function.
|
||||||
|
#if 0
|
||||||
if (blockDataOrder == TSDB_ORDER_ASC) {
|
if (blockDataOrder == TSDB_ORDER_ASC) {
|
||||||
for (int32_t i = pInput->numOfRows + pInput->startRowIndex - 1; i >= pInput->startRowIndex; --i) {
|
for (int32_t i = pInput->numOfRows + pInput->startRowIndex - 1; i >= pInput->startRowIndex; --i) {
|
||||||
if (pInputCol->hasNull && colDataIsNull(pInputCol, pInput->totalRows, i, pColAgg)) {
|
if (pInputCol->hasNull && colDataIsNull(pInputCol, pInput->totalRows, i, pColAgg)) {
|
||||||
|
@ -2821,26 +2832,10 @@ int32_t lastFunction(SqlFunctionCtx* pCtx) {
|
||||||
|
|
||||||
char* data = colDataGetData(pInputCol, i);
|
char* data = colDataGetData(pInputCol, i);
|
||||||
TSKEY cts = getRowPTs(pInput->pPTS, i);
|
TSKEY cts = getRowPTs(pInput->pPTS, i);
|
||||||
if (pResInfo->numOfRes == 0 || *(TSKEY*)(pInfo->buf + bytes) < cts) {
|
if (pResInfo->numOfRes == 0 || pInfo->ts < cts) {
|
||||||
if (IS_VAR_DATA_TYPE(type)) {
|
doSaveCurrentVal(pCtx, i, cts, type, data);
|
||||||
bytes = varDataTLen(data);
|
|
||||||
pInfo->bytes = bytes;
|
|
||||||
}
|
|
||||||
memcpy(pInfo->buf, data, bytes);
|
|
||||||
*(TSKEY*)(pInfo->buf + bytes) = cts;
|
|
||||||
// handle selectivity
|
|
||||||
if (pCtx->subsidiaries.num > 0) {
|
|
||||||
STuplePos* pTuplePos = (STuplePos*)(pInfo->buf + bytes + sizeof(TSKEY));
|
|
||||||
if (!pInfo->hasResult) {
|
|
||||||
saveTupleData(pCtx, i, pCtx->pSrcBlock, pTuplePos);
|
|
||||||
} else {
|
|
||||||
copyTupleData(pCtx, i, pCtx->pSrcBlock, pTuplePos);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
pInfo->hasResult = true;
|
|
||||||
// DO_UPDATE_TAG_COLUMNS(pCtx, ts);
|
|
||||||
pResInfo->numOfRes = 1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
} else { // descending order
|
} else { // descending order
|
||||||
|
@ -2853,28 +2848,28 @@ int32_t lastFunction(SqlFunctionCtx* pCtx) {
|
||||||
|
|
||||||
char* data = colDataGetData(pInputCol, i);
|
char* data = colDataGetData(pInputCol, i);
|
||||||
TSKEY cts = getRowPTs(pInput->pPTS, i);
|
TSKEY cts = getRowPTs(pInput->pPTS, i);
|
||||||
if (pResInfo->numOfRes == 0 || *(TSKEY*)(pInfo->buf + bytes) < cts) {
|
if (pResInfo->numOfRes == 0 || pInfo->ts < cts) {
|
||||||
if (IS_VAR_DATA_TYPE(type)) {
|
doSaveCurrentVal(pCtx, i, cts, type, data);
|
||||||
bytes = varDataTLen(data);
|
|
||||||
pInfo->bytes = bytes;
|
|
||||||
}
|
|
||||||
memcpy(pInfo->buf, data, bytes);
|
|
||||||
*(TSKEY*)(pInfo->buf + bytes) = cts;
|
|
||||||
// handle selectivity
|
|
||||||
if (pCtx->subsidiaries.num > 0) {
|
|
||||||
STuplePos* pTuplePos = (STuplePos*)(pInfo->buf + bytes + sizeof(TSKEY));
|
|
||||||
if (!pInfo->hasResult) {
|
|
||||||
saveTupleData(pCtx, i, pCtx->pSrcBlock, pTuplePos);
|
|
||||||
} else {
|
|
||||||
copyTupleData(pCtx, i, pCtx->pSrcBlock, pTuplePos);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
pInfo->hasResult = true;
|
|
||||||
pResInfo->numOfRes = 1;
|
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
#else
|
||||||
|
for (int32_t i = pInput->startRowIndex; i < pInput->numOfRows + pInput->startRowIndex; ++i) {
|
||||||
|
if (pInputCol->hasNull && colDataIsNull(pInputCol, pInput->totalRows, i, pColAgg)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
numOfElems++;
|
||||||
|
|
||||||
|
char* data = colDataGetData(pInputCol, i);
|
||||||
|
TSKEY cts = getRowPTs(pInput->pPTS, i);
|
||||||
|
if (pResInfo->numOfRes == 0 || pInfo->ts < cts) {
|
||||||
|
doSaveCurrentVal(pCtx, i, cts, type, data);
|
||||||
|
pResInfo->numOfRes = 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
SET_VAL(pResInfo, numOfElems, 1);
|
SET_VAL(pResInfo, numOfElems, 1);
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
|
@ -2885,8 +2880,9 @@ static void firstLastTransferInfo(SqlFunctionCtx* pCtx, SFirstLastRes* pInput, S
|
||||||
int32_t start = pColInfo->startRowIndex;
|
int32_t start = pColInfo->startRowIndex;
|
||||||
|
|
||||||
pOutput->bytes = pInput->bytes;
|
pOutput->bytes = pInput->bytes;
|
||||||
TSKEY* tsIn = (TSKEY*)(pInput->buf + pInput->bytes);
|
TSKEY* tsIn = &pInput->ts;
|
||||||
TSKEY* tsOut = (TSKEY*)(pOutput->buf + pInput->bytes);
|
TSKEY* tsOut = &pOutput->ts;
|
||||||
|
|
||||||
if (pOutput->hasResult) {
|
if (pOutput->hasResult) {
|
||||||
if (isFirst) {
|
if (isFirst) {
|
||||||
if (*tsIn > *tsOut) {
|
if (*tsIn > *tsOut) {
|
||||||
|
@ -2898,20 +2894,12 @@ static void firstLastTransferInfo(SqlFunctionCtx* pCtx, SFirstLastRes* pInput, S
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
*tsOut = *tsIn;
|
*tsOut = *tsIn;
|
||||||
memcpy(pOutput->buf, pInput->buf, pOutput->bytes);
|
memcpy(pOutput->buf, pInput->buf, pOutput->bytes);
|
||||||
// handle selectivity
|
saveTupleData(pCtx->pSrcBlock, start, pCtx, pOutput);
|
||||||
STuplePos* pTuplePos = (STuplePos*)(pOutput->buf + pOutput->bytes + sizeof(TSKEY));
|
|
||||||
if (pCtx->subsidiaries.num > 0) {
|
|
||||||
if (!pOutput->hasResult) {
|
|
||||||
saveTupleData(pCtx, start, pCtx->pSrcBlock, pTuplePos);
|
|
||||||
} else {
|
|
||||||
copyTupleData(pCtx, start, pCtx->pSrcBlock, pTuplePos);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
pOutput->hasResult = true;
|
|
||||||
|
|
||||||
return;
|
pOutput->hasResult = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t firstLastFunctionMergeImpl(SqlFunctionCtx* pCtx, bool isFirstQuery) {
|
static int32_t firstLastFunctionMergeImpl(SqlFunctionCtx* pCtx, bool isFirstQuery) {
|
||||||
|
@ -2953,34 +2941,34 @@ int32_t firstLastFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
|
||||||
colDataAppend(pCol, pBlock->info.rows, pRes->buf, pRes->isNull || pResInfo->isNullRes);
|
colDataAppend(pCol, pBlock->info.rows, pRes->buf, pRes->isNull || pResInfo->isNullRes);
|
||||||
|
|
||||||
// handle selectivity
|
// handle selectivity
|
||||||
STuplePos* pTuplePos = (STuplePos*)(pRes->buf + pRes->bytes + sizeof(TSKEY));
|
setSelectivityValue(pCtx, pBlock, &pRes->pos, pBlock->info.rows);
|
||||||
setSelectivityValue(pCtx, pBlock, pTuplePos, pBlock->info.rows);
|
|
||||||
|
|
||||||
return pResInfo->numOfRes;
|
return pResInfo->numOfRes;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t firstLastPartialFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
|
int32_t firstLastPartialFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
|
||||||
SResultRowEntryInfo* pEntryInfo = GET_RES_INFO(pCtx);
|
SResultRowEntryInfo* pEntryInfo = GET_RES_INFO(pCtx);
|
||||||
SFirstLastRes* pRes = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx));
|
SFirstLastRes* pRes = GET_ROWCELL_INTERBUF(pEntryInfo);
|
||||||
|
|
||||||
int32_t resultBytes = getFirstLastInfoSize(pRes->bytes);
|
int32_t resultBytes = getFirstLastInfoSize(pRes->bytes);
|
||||||
char* res = taosMemoryCalloc(resultBytes + VARSTR_HEADER_SIZE, sizeof(char));
|
|
||||||
|
|
||||||
|
// todo check for failure
|
||||||
|
char* res = taosMemoryCalloc(resultBytes + VARSTR_HEADER_SIZE, sizeof(char));
|
||||||
memcpy(varDataVal(res), pRes, resultBytes);
|
memcpy(varDataVal(res), pRes, resultBytes);
|
||||||
|
|
||||||
varDataSetLen(res, resultBytes);
|
varDataSetLen(res, resultBytes);
|
||||||
|
|
||||||
int32_t slotId = pCtx->pExpr->base.resSchema.slotId;
|
int32_t slotId = pCtx->pExpr->base.resSchema.slotId;
|
||||||
SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, slotId);
|
SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, slotId);
|
||||||
|
|
||||||
colDataAppend(pCol, pBlock->info.rows, res, false);
|
colDataAppend(pCol, pBlock->info.rows, res, false);
|
||||||
// handle selectivity
|
setSelectivityValue(pCtx, pBlock, &pRes->pos, pBlock->info.rows);
|
||||||
STuplePos* pTuplePos = (STuplePos*)(pRes->buf + pRes->bytes + sizeof(TSKEY));
|
|
||||||
setSelectivityValue(pCtx, pBlock, pTuplePos, pBlock->info.rows);
|
|
||||||
|
|
||||||
taosMemoryFree(res);
|
taosMemoryFree(res);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//todo rewrite:
|
||||||
int32_t lastCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx) {
|
int32_t lastCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx) {
|
||||||
SResultRowEntryInfo* pDResInfo = GET_RES_INFO(pDestCtx);
|
SResultRowEntryInfo* pDResInfo = GET_RES_INFO(pDestCtx);
|
||||||
char* pDBuf = GET_ROWCELL_INTERBUF(pDResInfo);
|
char* pDBuf = GET_ROWCELL_INTERBUF(pDResInfo);
|
||||||
|
@ -2998,6 +2986,28 @@ int32_t lastCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx) {
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void doSaveLastrow(SqlFunctionCtx *pCtx, char* pData, int32_t rowIndex, int64_t cts, SFirstLastRes* pInfo) {
|
||||||
|
SInputColumnInfoData* pInput = &pCtx->input;
|
||||||
|
SColumnInfoData* pInputCol = pInput->pData[0];
|
||||||
|
|
||||||
|
if (colDataIsNull_s(pInputCol, rowIndex)) {
|
||||||
|
pInfo->isNull = true;
|
||||||
|
} else {
|
||||||
|
pInfo->isNull = false;
|
||||||
|
|
||||||
|
if (IS_VAR_DATA_TYPE(pInputCol->info.type)) {
|
||||||
|
pInfo->bytes = varDataTLen(pData);
|
||||||
|
}
|
||||||
|
|
||||||
|
memcpy(pInfo->buf, pData, pInfo->bytes);
|
||||||
|
}
|
||||||
|
|
||||||
|
pInfo->ts = cts;
|
||||||
|
saveTupleData(pCtx->pSrcBlock, rowIndex, pCtx, pInfo);
|
||||||
|
|
||||||
|
pInfo->hasResult = true;
|
||||||
|
}
|
||||||
|
|
||||||
int32_t lastRowFunction(SqlFunctionCtx* pCtx) {
|
int32_t lastRowFunction(SqlFunctionCtx* pCtx) {
|
||||||
int32_t numOfElems = 0;
|
int32_t numOfElems = 0;
|
||||||
|
|
||||||
|
@ -3007,102 +3017,58 @@ int32_t lastRowFunction(SqlFunctionCtx* pCtx) {
|
||||||
SInputColumnInfoData* pInput = &pCtx->input;
|
SInputColumnInfoData* pInput = &pCtx->input;
|
||||||
SColumnInfoData* pInputCol = pInput->pData[0];
|
SColumnInfoData* pInputCol = pInput->pData[0];
|
||||||
|
|
||||||
int32_t type = pInputCol->info.type;
|
|
||||||
int32_t bytes = pInputCol->info.bytes;
|
int32_t bytes = pInputCol->info.bytes;
|
||||||
pInfo->bytes = bytes;
|
pInfo->bytes = bytes;
|
||||||
|
|
||||||
SColumnDataAgg* pColAgg = (pInput->colDataAggIsSet) ? pInput->pColumnDataAgg[0] : NULL;
|
|
||||||
|
|
||||||
TSKEY startKey = getRowPTs(pInput->pPTS, 0);
|
TSKEY startKey = getRowPTs(pInput->pPTS, 0);
|
||||||
TSKEY endKey = getRowPTs(pInput->pPTS, pInput->totalRows - 1);
|
TSKEY endKey = getRowPTs(pInput->pPTS, pInput->totalRows - 1);
|
||||||
|
|
||||||
int32_t blockDataOrder = (startKey <= endKey) ? TSDB_ORDER_ASC : TSDB_ORDER_DESC;
|
int32_t blockDataOrder = (startKey <= endKey) ? TSDB_ORDER_ASC : TSDB_ORDER_DESC;
|
||||||
|
|
||||||
|
#if 0
|
||||||
|
// the optimized version only function if all tuples in one block are monotonious increasing or descreasing.
|
||||||
|
// this is NOT always works if project operator exists in downstream.
|
||||||
if (blockDataOrder == TSDB_ORDER_ASC) {
|
if (blockDataOrder == TSDB_ORDER_ASC) {
|
||||||
for (int32_t i = pInput->numOfRows + pInput->startRowIndex - 1; i >= pInput->startRowIndex; --i) {
|
for (int32_t i = pInput->numOfRows + pInput->startRowIndex - 1; i >= pInput->startRowIndex; --i) {
|
||||||
char* data = colDataGetData(pInputCol, i);
|
char* data = colDataGetData(pInputCol, i);
|
||||||
TSKEY cts = getRowPTs(pInput->pPTS, i);
|
TSKEY cts = getRowPTs(pInput->pPTS, i);
|
||||||
if (pResInfo->numOfRes == 0 || *(TSKEY*)(pInfo->buf) < cts) {
|
numOfElems++;
|
||||||
if (pInputCol->hasNull && colDataIsNull(pInputCol, pInput->totalRows, i, pColAgg)) {
|
|
||||||
pInfo->isNull = true;
|
if (pResInfo->numOfRes == 0 || pInfo->ts < cts) {
|
||||||
} else {
|
doSaveLastrow(pCtx, data, i, cts, pInfo);
|
||||||
pInfo->isNull = false;
|
|
||||||
if (IS_VAR_DATA_TYPE(type)) {
|
|
||||||
bytes = varDataTLen(data);
|
|
||||||
pInfo->bytes = bytes;
|
|
||||||
}
|
|
||||||
memcpy(pInfo->buf + sizeof(TSKEY), data, bytes);
|
|
||||||
}
|
|
||||||
*(TSKEY*)(pInfo->buf) = cts;
|
|
||||||
numOfElems++;
|
|
||||||
// handle selectivity
|
|
||||||
if (pCtx->subsidiaries.num > 0) {
|
|
||||||
STuplePos* pTuplePos = (STuplePos*)(pInfo->buf + bytes + sizeof(TSKEY));
|
|
||||||
if (!pInfo->hasResult) {
|
|
||||||
saveTupleData(pCtx, i, pCtx->pSrcBlock, pTuplePos);
|
|
||||||
} else {
|
|
||||||
copyTupleData(pCtx, i, pCtx->pSrcBlock, pTuplePos);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
pInfo->hasResult = true;
|
|
||||||
// DO_UPDATE_TAG_COLUMNS(pCtx, ts);
|
|
||||||
pResInfo->numOfRes = 1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
} else { // descending order
|
} else { // descending order
|
||||||
for (int32_t i = pInput->startRowIndex; i < pInput->numOfRows + pInput->startRowIndex; ++i) {
|
for (int32_t i = pInput->startRowIndex; i < pInput->numOfRows + pInput->startRowIndex; ++i) {
|
||||||
char* data = colDataGetData(pInputCol, i);
|
char* data = colDataGetData(pInputCol, i);
|
||||||
TSKEY cts = getRowPTs(pInput->pPTS, i);
|
TSKEY cts = getRowPTs(pInput->pPTS, i);
|
||||||
if (pResInfo->numOfRes == 0 || *(TSKEY*)(pInfo->buf) < cts) {
|
numOfElems++;
|
||||||
if (pInputCol->hasNull && colDataIsNull(pInputCol, pInput->totalRows, i, pColAgg)) {
|
|
||||||
pInfo->isNull = true;
|
if (pResInfo->numOfRes == 0 || pInfo->ts < cts) {
|
||||||
} else {
|
doSaveLastrow(pCtx, data, i, cts, pInfo);
|
||||||
pInfo->isNull = false;
|
|
||||||
if (IS_VAR_DATA_TYPE(type)) {
|
|
||||||
bytes = varDataTLen(data);
|
|
||||||
pInfo->bytes = bytes;
|
|
||||||
}
|
|
||||||
memcpy(pInfo->buf + sizeof(TSKEY), data, bytes);
|
|
||||||
}
|
|
||||||
*(TSKEY*)(pInfo->buf) = cts;
|
|
||||||
numOfElems++;
|
|
||||||
// handle selectivity
|
|
||||||
if (pCtx->subsidiaries.num > 0) {
|
|
||||||
STuplePos* pTuplePos = (STuplePos*)(pInfo->buf + bytes + sizeof(TSKEY));
|
|
||||||
if (!pInfo->hasResult) {
|
|
||||||
saveTupleData(pCtx, i, pCtx->pSrcBlock, pTuplePos);
|
|
||||||
} else {
|
|
||||||
copyTupleData(pCtx, i, pCtx->pSrcBlock, pTuplePos);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
pInfo->hasResult = true;
|
|
||||||
pResInfo->numOfRes = 1;
|
|
||||||
// DO_UPDATE_TAG_COLUMNS(pCtx, ts);
|
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
#else
|
||||||
|
for (int32_t i = pInput->startRowIndex; i < pInput->numOfRows + pInput->startRowIndex; ++i) {
|
||||||
|
char* data = colDataGetData(pInputCol, i);
|
||||||
|
TSKEY cts = getRowPTs(pInput->pPTS, i);
|
||||||
|
numOfElems++;
|
||||||
|
|
||||||
|
if (pResInfo->numOfRes == 0 || pInfo->ts < cts) {
|
||||||
|
doSaveLastrow(pCtx, data, i, cts, pInfo);
|
||||||
|
pResInfo->numOfRes = 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
||||||
SET_VAL(pResInfo, numOfElems, 1);
|
SET_VAL(pResInfo, numOfElems, 1);
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t lastRowFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
|
|
||||||
int32_t slotId = pCtx->pExpr->base.resSchema.slotId;
|
|
||||||
SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, slotId);
|
|
||||||
|
|
||||||
SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx);
|
|
||||||
|
|
||||||
SFirstLastRes* pRes = GET_ROWCELL_INTERBUF(pResInfo);
|
|
||||||
colDataAppend(pCol, pBlock->info.rows, pRes->buf + sizeof(TSKEY), pRes->isNull);
|
|
||||||
// handle selectivity
|
|
||||||
STuplePos* pTuplePos = (STuplePos*)(pRes->buf + pRes->bytes + sizeof(TSKEY));
|
|
||||||
setSelectivityValue(pCtx, pBlock, pTuplePos, pBlock->info.rows);
|
|
||||||
|
|
||||||
return pResInfo->numOfRes;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool getDiffFuncEnv(SFunctionNode* UNUSED_PARAM(pFunc), SFuncExecEnv* pEnv) {
|
bool getDiffFuncEnv(SFunctionNode* UNUSED_PARAM(pFunc), SFuncExecEnv* pEnv) {
|
||||||
pEnv->calcMemSize = sizeof(SDiffInfo);
|
pEnv->calcMemSize = sizeof(SDiffInfo);
|
||||||
return true;
|
return true;
|
||||||
|
@ -3425,7 +3391,7 @@ void doAddIntoResult(SqlFunctionCtx* pCtx, void* pData, int32_t rowIndex, SSData
|
||||||
|
|
||||||
// save the data of this tuple
|
// save the data of this tuple
|
||||||
if (pCtx->subsidiaries.num > 0) {
|
if (pCtx->subsidiaries.num > 0) {
|
||||||
saveTupleData(pCtx, rowIndex, pSrcBlock, &pItem->tuplePos);
|
doSaveTupleData(pCtx, rowIndex, pSrcBlock, &pItem->tuplePos);
|
||||||
}
|
}
|
||||||
#ifdef BUF_PAGE_DEBUG
|
#ifdef BUF_PAGE_DEBUG
|
||||||
qDebug("page_saveTuple i:%d, item:%p,pageId:%d, offset:%d\n", pEntryInfo->numOfRes, pItem, pItem->tuplePos.pageId,
|
qDebug("page_saveTuple i:%d, item:%p,pageId:%d, offset:%d\n", pEntryInfo->numOfRes, pItem, pItem->tuplePos.pageId,
|
||||||
|
@ -3449,7 +3415,7 @@ void doAddIntoResult(SqlFunctionCtx* pCtx, void* pData, int32_t rowIndex, SSData
|
||||||
|
|
||||||
// save the data of this tuple by over writing the old data
|
// save the data of this tuple by over writing the old data
|
||||||
if (pCtx->subsidiaries.num > 0) {
|
if (pCtx->subsidiaries.num > 0) {
|
||||||
copyTupleData(pCtx, rowIndex, pSrcBlock, &pItem->tuplePos);
|
doCopyTupleData(pCtx, rowIndex, pSrcBlock, &pItem->tuplePos);
|
||||||
}
|
}
|
||||||
#ifdef BUF_PAGE_DEBUG
|
#ifdef BUF_PAGE_DEBUG
|
||||||
qDebug("page_copyTuple pageId:%d, offset:%d", pItem->tuplePos.pageId, pItem->tuplePos.offset);
|
qDebug("page_copyTuple pageId:%d, offset:%d", pItem->tuplePos.pageId, pItem->tuplePos.offset);
|
||||||
|
@ -3466,7 +3432,7 @@ void doAddIntoResult(SqlFunctionCtx* pCtx, void* pData, int32_t rowIndex, SSData
|
||||||
* |(n columns, one bit for each column)| src column #1| src column #2|
|
* |(n columns, one bit for each column)| src column #1| src column #2|
|
||||||
* +------------------------------------+--------------+--------------+
|
* +------------------------------------+--------------+--------------+
|
||||||
*/
|
*/
|
||||||
void saveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos) {
|
void doSaveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos) {
|
||||||
SFilePage* pPage = NULL;
|
SFilePage* pPage = NULL;
|
||||||
|
|
||||||
// todo refactor: move away
|
// todo refactor: move away
|
||||||
|
@ -3527,7 +3493,7 @@ void saveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pS
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
void copyTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos) {
|
void doCopyTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos) {
|
||||||
SFilePage* pPage = getBufPage(pCtx->pBuf, pPos->pageId);
|
SFilePage* pPage = getBufPage(pCtx->pBuf, pPos->pageId);
|
||||||
|
|
||||||
int32_t numOfCols = pCtx->subsidiaries.num;
|
int32_t numOfCols = pCtx->subsidiaries.num;
|
||||||
|
@ -4843,7 +4809,7 @@ static void doReservoirSample(SqlFunctionCtx* pCtx, SSampleInfo* pInfo, char* da
|
||||||
if (pInfo->numSampled < pInfo->samples) {
|
if (pInfo->numSampled < pInfo->samples) {
|
||||||
sampleAssignResult(pInfo, data, pInfo->numSampled);
|
sampleAssignResult(pInfo, data, pInfo->numSampled);
|
||||||
if (pCtx->subsidiaries.num > 0) {
|
if (pCtx->subsidiaries.num > 0) {
|
||||||
saveTupleData(pCtx, index, pCtx->pSrcBlock, &pInfo->tuplePos[pInfo->numSampled]);
|
doSaveTupleData(pCtx, index, pCtx->pSrcBlock, &pInfo->tuplePos[pInfo->numSampled]);
|
||||||
}
|
}
|
||||||
pInfo->numSampled++;
|
pInfo->numSampled++;
|
||||||
} else {
|
} else {
|
||||||
|
@ -4851,7 +4817,7 @@ static void doReservoirSample(SqlFunctionCtx* pCtx, SSampleInfo* pInfo, char* da
|
||||||
if (j < pInfo->samples) {
|
if (j < pInfo->samples) {
|
||||||
sampleAssignResult(pInfo, data, j);
|
sampleAssignResult(pInfo, data, j);
|
||||||
if (pCtx->subsidiaries.num > 0) {
|
if (pCtx->subsidiaries.num > 0) {
|
||||||
copyTupleData(pCtx, index, pCtx->pSrcBlock, &pInfo->tuplePos[j]);
|
doCopyTupleData(pCtx, index, pCtx->pSrcBlock, &pInfo->tuplePos[j]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -5993,7 +5959,7 @@ int32_t interpFunction(SqlFunctionCtx* pCtx) {
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t cacheLastRowFunction(SqlFunctionCtx* pCtx) {
|
int32_t cachedLastRowFunction(SqlFunctionCtx* pCtx) {
|
||||||
int32_t numOfElems = 0;
|
int32_t numOfElems = 0;
|
||||||
|
|
||||||
SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx);
|
SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx);
|
||||||
|
@ -6002,9 +5968,7 @@ int32_t cacheLastRowFunction(SqlFunctionCtx* pCtx) {
|
||||||
SInputColumnInfoData* pInput = &pCtx->input;
|
SInputColumnInfoData* pInput = &pCtx->input;
|
||||||
SColumnInfoData* pInputCol = pInput->pData[0];
|
SColumnInfoData* pInputCol = pInput->pData[0];
|
||||||
|
|
||||||
int32_t type = pInputCol->info.type;
|
|
||||||
int32_t bytes = pInputCol->info.bytes;
|
int32_t bytes = pInputCol->info.bytes;
|
||||||
|
|
||||||
pInfo->bytes = bytes;
|
pInfo->bytes = bytes;
|
||||||
|
|
||||||
// last_row function does not ignore the null value
|
// last_row function does not ignore the null value
|
||||||
|
@ -6014,28 +5978,8 @@ int32_t cacheLastRowFunction(SqlFunctionCtx* pCtx) {
|
||||||
char* data = colDataGetData(pInputCol, i);
|
char* data = colDataGetData(pInputCol, i);
|
||||||
TSKEY cts = getRowPTs(pInput->pPTS, i);
|
TSKEY cts = getRowPTs(pInput->pPTS, i);
|
||||||
if (pResInfo->numOfRes == 0 || pInfo->ts < cts) {
|
if (pResInfo->numOfRes == 0 || pInfo->ts < cts) {
|
||||||
if (colDataIsNull_s(pInputCol, i)) {
|
doSaveLastrow(pCtx, data, i, cts, pInfo);
|
||||||
pInfo->isNull = true;
|
pResInfo->numOfRes = 1;
|
||||||
} else {
|
|
||||||
if (IS_VAR_DATA_TYPE(type)) {
|
|
||||||
bytes = varDataTLen(data);
|
|
||||||
pInfo->bytes = bytes;
|
|
||||||
}
|
|
||||||
|
|
||||||
memcpy(pInfo->buf, data, bytes);
|
|
||||||
}
|
|
||||||
|
|
||||||
pInfo->ts = cts;
|
|
||||||
if (pCtx->subsidiaries.num > 0) {
|
|
||||||
STuplePos* pTuplePos = (STuplePos*)(pInfo->buf + bytes + sizeof(TSKEY));
|
|
||||||
if (!pInfo->hasResult) {
|
|
||||||
saveTupleData(pCtx, i, pCtx->pSrcBlock, pTuplePos);
|
|
||||||
} else {
|
|
||||||
copyTupleData(pCtx, i, pCtx->pSrcBlock, pTuplePos);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pInfo->hasResult = true;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -175,16 +175,14 @@ bool fmIsIntervalInterpoFunc(int32_t funcId) { return isSpecificClassifyFunc(fun
|
||||||
|
|
||||||
bool fmIsForbidStreamFunc(int32_t funcId) { return isSpecificClassifyFunc(funcId, FUNC_MGT_FORBID_STREAM_FUNC); }
|
bool fmIsForbidStreamFunc(int32_t funcId) { return isSpecificClassifyFunc(funcId, FUNC_MGT_FORBID_STREAM_FUNC); }
|
||||||
|
|
||||||
bool fmIsForbidWindowFunc(int32_t funcId) { return isSpecificClassifyFunc(funcId, FUNC_MGT_FORBID_WINDOW_FUNC); }
|
|
||||||
|
|
||||||
bool fmIsForbidGroupByFunc(int32_t funcId) { return isSpecificClassifyFunc(funcId, FUNC_MGT_FORBID_GROUP_BY_FUNC); }
|
|
||||||
|
|
||||||
bool fmIsSystemInfoFunc(int32_t funcId) { return isSpecificClassifyFunc(funcId, FUNC_MGT_SYSTEM_INFO_FUNC); }
|
bool fmIsSystemInfoFunc(int32_t funcId) { return isSpecificClassifyFunc(funcId, FUNC_MGT_SYSTEM_INFO_FUNC); }
|
||||||
|
|
||||||
bool fmIsImplicitTsFunc(int32_t funcId) { return isSpecificClassifyFunc(funcId, FUNC_MGT_IMPLICIT_TS_FUNC); }
|
bool fmIsImplicitTsFunc(int32_t funcId) { return isSpecificClassifyFunc(funcId, FUNC_MGT_IMPLICIT_TS_FUNC); }
|
||||||
|
|
||||||
bool fmIsClientPseudoColumnFunc(int32_t funcId) { return isSpecificClassifyFunc(funcId, FUNC_MGT_CLIENT_PC_FUNC); }
|
bool fmIsClientPseudoColumnFunc(int32_t funcId) { return isSpecificClassifyFunc(funcId, FUNC_MGT_CLIENT_PC_FUNC); }
|
||||||
|
|
||||||
|
bool fmIsMultiRowsFunc(int32_t funcId) { return isSpecificClassifyFunc(funcId, FUNC_MGT_MULTI_ROWS_FUNC); }
|
||||||
|
|
||||||
bool fmIsInterpFunc(int32_t funcId) {
|
bool fmIsInterpFunc(int32_t funcId) {
|
||||||
if (funcId < 0 || funcId >= funcMgtBuiltinsNum) {
|
if (funcId < 0 || funcId >= funcMgtBuiltinsNum) {
|
||||||
return false;
|
return false;
|
||||||
|
|
|
@ -382,6 +382,15 @@ void udfdProcessRpcRsp(void *parent, SRpcMsg *pMsg, SEpSet *pEpSet) {
|
||||||
if (msgInfo->rpcType == UDFD_RPC_MNODE_CONNECT) {
|
if (msgInfo->rpcType == UDFD_RPC_MNODE_CONNECT) {
|
||||||
SConnectRsp connectRsp = {0};
|
SConnectRsp connectRsp = {0};
|
||||||
tDeserializeSConnectRsp(pMsg->pCont, pMsg->contLen, &connectRsp);
|
tDeserializeSConnectRsp(pMsg->pCont, pMsg->contLen, &connectRsp);
|
||||||
|
|
||||||
|
int32_t now = taosGetTimestampSec();
|
||||||
|
int32_t delta = abs(now - connectRsp.svrTimestamp);
|
||||||
|
if (delta > 900) {
|
||||||
|
msgInfo->code = TSDB_CODE_TIME_UNSYNCED;
|
||||||
|
goto _return;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
if (connectRsp.epSet.numOfEps == 0) {
|
if (connectRsp.epSet.numOfEps == 0) {
|
||||||
msgInfo->code = TSDB_CODE_MND_APP_ERROR;
|
msgInfo->code = TSDB_CODE_MND_APP_ERROR;
|
||||||
goto _return;
|
goto _return;
|
||||||
|
|
|
@ -516,13 +516,14 @@ static void idxCacheMakeRoomForWrite(IndexCache* cache) {
|
||||||
idxCacheRef(cache);
|
idxCacheRef(cache);
|
||||||
cache->imm = cache->mem;
|
cache->imm = cache->mem;
|
||||||
cache->mem = idxInternalCacheCreate(cache->type);
|
cache->mem = idxInternalCacheCreate(cache->type);
|
||||||
|
|
||||||
cache->mem->pCache = cache;
|
cache->mem->pCache = cache;
|
||||||
cache->occupiedMem = 0;
|
cache->occupiedMem = 0;
|
||||||
if (quit == false) {
|
if (quit == false) {
|
||||||
atomic_store_32(&cache->merging, 1);
|
atomic_store_32(&cache->merging, 1);
|
||||||
}
|
}
|
||||||
// sched to merge
|
// 1. sched to merge
|
||||||
// unref cache in bgwork
|
// 2. unref cache in bgwork
|
||||||
idxCacheSchedToMerge(cache, quit);
|
idxCacheSchedToMerge(cache, quit);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -3663,7 +3663,7 @@ static int32_t jsonToDownstreamSourceNode(const SJson* pJson, void* pObj) {
|
||||||
}
|
}
|
||||||
|
|
||||||
static const char* jkDatabaseOptionsBuffer = "Buffer";
|
static const char* jkDatabaseOptionsBuffer = "Buffer";
|
||||||
static const char* jkDatabaseOptionsCachelast = "Cachelast";
|
static const char* jkDatabaseOptionsCacheModel = "CacheModel";
|
||||||
static const char* jkDatabaseOptionsCompressionLevel = "CompressionLevel";
|
static const char* jkDatabaseOptionsCompressionLevel = "CompressionLevel";
|
||||||
static const char* jkDatabaseOptionsDaysPerFileNode = "DaysPerFileNode";
|
static const char* jkDatabaseOptionsDaysPerFileNode = "DaysPerFileNode";
|
||||||
static const char* jkDatabaseOptionsDaysPerFile = "DaysPerFile";
|
static const char* jkDatabaseOptionsDaysPerFile = "DaysPerFile";
|
||||||
|
@ -3687,7 +3687,7 @@ static int32_t databaseOptionsToJson(const void* pObj, SJson* pJson) {
|
||||||
|
|
||||||
int32_t code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsBuffer, pNode->buffer);
|
int32_t code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsBuffer, pNode->buffer);
|
||||||
if (TSDB_CODE_SUCCESS == code) {
|
if (TSDB_CODE_SUCCESS == code) {
|
||||||
code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsCachelast, pNode->cacheLast);
|
code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsCacheModel, pNode->cacheModel);
|
||||||
}
|
}
|
||||||
if (TSDB_CODE_SUCCESS == code) {
|
if (TSDB_CODE_SUCCESS == code) {
|
||||||
code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsCompressionLevel, pNode->compressionLevel);
|
code = tjsonAddIntegerToObject(pJson, jkDatabaseOptionsCompressionLevel, pNode->compressionLevel);
|
||||||
|
@ -3749,7 +3749,7 @@ static int32_t jsonToDatabaseOptions(const SJson* pJson, void* pObj) {
|
||||||
|
|
||||||
int32_t code = tjsonGetIntValue(pJson, jkDatabaseOptionsBuffer, &pNode->buffer);
|
int32_t code = tjsonGetIntValue(pJson, jkDatabaseOptionsBuffer, &pNode->buffer);
|
||||||
if (TSDB_CODE_SUCCESS == code) {
|
if (TSDB_CODE_SUCCESS == code) {
|
||||||
code = tjsonGetTinyIntValue(pJson, jkDatabaseOptionsCachelast, &pNode->cacheLast);
|
code = tjsonGetTinyIntValue(pJson, jkDatabaseOptionsCacheModel, &pNode->cacheModel);
|
||||||
}
|
}
|
||||||
if (TSDB_CODE_SUCCESS == code) {
|
if (TSDB_CODE_SUCCESS == code) {
|
||||||
code = tjsonGetTinyIntValue(pJson, jkDatabaseOptionsCompressionLevel, &pNode->compressionLevel);
|
code = tjsonGetTinyIntValue(pJson, jkDatabaseOptionsCompressionLevel, &pNode->compressionLevel);
|
||||||
|
|
|
@ -1543,7 +1543,7 @@ typedef struct SCollectFuncsCxt {
|
||||||
int32_t errCode;
|
int32_t errCode;
|
||||||
FFuncClassifier classifier;
|
FFuncClassifier classifier;
|
||||||
SNodeList* pFuncs;
|
SNodeList* pFuncs;
|
||||||
SHashObj* pAliasName;
|
SHashObj* pFuncsSet;
|
||||||
} SCollectFuncsCxt;
|
} SCollectFuncsCxt;
|
||||||
|
|
||||||
static EDealRes collectFuncs(SNode* pNode, void* pContext) {
|
static EDealRes collectFuncs(SNode* pNode, void* pContext) {
|
||||||
|
@ -1551,28 +1551,40 @@ static EDealRes collectFuncs(SNode* pNode, void* pContext) {
|
||||||
if (QUERY_NODE_FUNCTION == nodeType(pNode) && pCxt->classifier(((SFunctionNode*)pNode)->funcId) &&
|
if (QUERY_NODE_FUNCTION == nodeType(pNode) && pCxt->classifier(((SFunctionNode*)pNode)->funcId) &&
|
||||||
!(((SExprNode*)pNode)->orderAlias)) {
|
!(((SExprNode*)pNode)->orderAlias)) {
|
||||||
SExprNode* pExpr = (SExprNode*)pNode;
|
SExprNode* pExpr = (SExprNode*)pNode;
|
||||||
if (NULL == taosHashGet(pCxt->pAliasName, pExpr->aliasName, strlen(pExpr->aliasName))) {
|
if (NULL == taosHashGet(pCxt->pFuncsSet, &pExpr, POINTER_BYTES)) {
|
||||||
pCxt->errCode = nodesListStrictAppend(pCxt->pFuncs, nodesCloneNode(pNode));
|
pCxt->errCode = nodesListStrictAppend(pCxt->pFuncs, nodesCloneNode(pNode));
|
||||||
taosHashPut(pCxt->pAliasName, pExpr->aliasName, strlen(pExpr->aliasName), &pExpr, POINTER_BYTES);
|
taosHashPut(pCxt->pFuncsSet, &pExpr, POINTER_BYTES, &pExpr, POINTER_BYTES);
|
||||||
}
|
}
|
||||||
return (TSDB_CODE_SUCCESS == pCxt->errCode ? DEAL_RES_IGNORE_CHILD : DEAL_RES_ERROR);
|
return (TSDB_CODE_SUCCESS == pCxt->errCode ? DEAL_RES_IGNORE_CHILD : DEAL_RES_ERROR);
|
||||||
}
|
}
|
||||||
return DEAL_RES_CONTINUE;
|
return DEAL_RES_CONTINUE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static uint32_t funcNodeHash(const char* pKey, uint32_t len) {
|
||||||
|
SExprNode* pExpr = *(SExprNode**)pKey;
|
||||||
|
return MurmurHash3_32(pExpr->aliasName, strlen(pExpr->aliasName));
|
||||||
|
}
|
||||||
|
|
||||||
|
static int32_t funcNodeEqual(const void* pLeft, const void* pRight, size_t len) {
|
||||||
|
if (0 != strcmp((*(const SExprNode**)pLeft)->aliasName, (*(const SExprNode**)pRight)->aliasName)) {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
return nodesEqualNode(*(const SNode**)pLeft, *(const SNode**)pRight) ? 0 : 1;
|
||||||
|
}
|
||||||
|
|
||||||
int32_t nodesCollectFuncs(SSelectStmt* pSelect, ESqlClause clause, FFuncClassifier classifier, SNodeList** pFuncs) {
|
int32_t nodesCollectFuncs(SSelectStmt* pSelect, ESqlClause clause, FFuncClassifier classifier, SNodeList** pFuncs) {
|
||||||
if (NULL == pSelect || NULL == pFuncs) {
|
if (NULL == pSelect || NULL == pFuncs) {
|
||||||
return TSDB_CODE_FAILED;
|
return TSDB_CODE_FAILED;
|
||||||
}
|
}
|
||||||
|
|
||||||
SCollectFuncsCxt cxt = {
|
SCollectFuncsCxt cxt = {.errCode = TSDB_CODE_SUCCESS,
|
||||||
.errCode = TSDB_CODE_SUCCESS,
|
.classifier = classifier,
|
||||||
.classifier = classifier,
|
.pFuncs = (NULL == *pFuncs ? nodesMakeList() : *pFuncs),
|
||||||
.pFuncs = (NULL == *pFuncs ? nodesMakeList() : *pFuncs),
|
.pFuncsSet = taosHashInit(4, funcNodeHash, false, false)};
|
||||||
.pAliasName = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_VARCHAR), false, false)};
|
if (NULL == cxt.pFuncs || NULL == cxt.pFuncsSet) {
|
||||||
if (NULL == cxt.pFuncs) {
|
|
||||||
return TSDB_CODE_OUT_OF_MEMORY;
|
return TSDB_CODE_OUT_OF_MEMORY;
|
||||||
}
|
}
|
||||||
|
taosHashSetEqualFp(cxt.pFuncsSet, funcNodeEqual);
|
||||||
*pFuncs = NULL;
|
*pFuncs = NULL;
|
||||||
nodesWalkSelectStmt(pSelect, clause, collectFuncs, &cxt);
|
nodesWalkSelectStmt(pSelect, clause, collectFuncs, &cxt);
|
||||||
if (TSDB_CODE_SUCCESS == cxt.errCode) {
|
if (TSDB_CODE_SUCCESS == cxt.errCode) {
|
||||||
|
@ -1584,7 +1596,7 @@ int32_t nodesCollectFuncs(SSelectStmt* pSelect, ESqlClause clause, FFuncClassifi
|
||||||
} else {
|
} else {
|
||||||
nodesDestroyList(cxt.pFuncs);
|
nodesDestroyList(cxt.pFuncs);
|
||||||
}
|
}
|
||||||
taosHashCleanup(cxt.pAliasName);
|
taosHashCleanup(cxt.pFuncsSet);
|
||||||
|
|
||||||
return cxt.errCode;
|
return cxt.errCode;
|
||||||
}
|
}
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue