Merge branch '3.0' into fix/TD-25010-3
|
@ -175,7 +175,7 @@ cd TDengine
|
||||||
```bash
|
```bash
|
||||||
mkdir debug
|
mkdir debug
|
||||||
cd debug
|
cd debug
|
||||||
cmake .. -DBUILD_TOOLS=true
|
cmake .. -DBUILD_TOOLS=true -DBUILD_CONTRIB=true
|
||||||
make
|
make
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -183,7 +183,7 @@ It equals to execute following commands:
|
||||||
```bash
|
```bash
|
||||||
mkdir debug
|
mkdir debug
|
||||||
cd debug
|
cd debug
|
||||||
cmake .. -DBUILD_TOOLS=true
|
cmake .. -DBUILD_TOOLS=true -DBUILD_CONTRIB=true
|
||||||
make
|
make
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
2
build.sh
|
@ -4,5 +4,5 @@ if [ ! -d debug ]; then
|
||||||
mkdir debug || echo -e "failed to make directory for build"
|
mkdir debug || echo -e "failed to make directory for build"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
cd debug && cmake .. -DBUILD_TOOLS=true && make
|
cd debug && cmake .. -DBUILD_TOOLS=true -DBUILD_CONTRIB=true && make
|
||||||
|
|
||||||
|
|
|
@ -32,6 +32,20 @@ docker run -d -p 6030:6030 -p 6041:6041 -p 6043-6049:6043-6049 -p 6043-6049:6043
|
||||||
|
|
||||||
Note that TDengine Server 3.0 uses TCP port 6030. Port 6041 is used by taosAdapter for the REST API service. Ports 6043 through 6049 are used by taosAdapter for other connectors. You can open these ports as needed.
|
Note that TDengine Server 3.0 uses TCP port 6030. Port 6041 is used by taosAdapter for the REST API service. Ports 6043 through 6049 are used by taosAdapter for other connectors. You can open these ports as needed.
|
||||||
|
|
||||||
|
If you need to persist data to a specific directory on your local machine, please run the following command:
|
||||||
|
```shell
|
||||||
|
docker run -d -v ~/data/taos/dnode/data:/var/lib/taos \
|
||||||
|
-v ~/data/taos/dnode/log:/var/log/taos \
|
||||||
|
-p 6030:6030 -p 6041:6041 -p 6043-6049:6043-6049 -p 6043-6049:6043-6049/udp tdengine/tdengine
|
||||||
|
```
|
||||||
|
:::note
|
||||||
|
|
||||||
|
- /var/lib/taos: TDengine's default data file directory. The location can be changed via [configuration file]. Also you can modify ~/data/taos/dnode/data to your any local empty data directory
|
||||||
|
- /var/log/taos: TDengine's default log file directory. The location can be changed via [configure file]. you can modify ~/data/taos/dnode/log to your any local empty log directory
|
||||||
|
|
||||||
|
:::
|
||||||
|
|
||||||
|
|
||||||
Run the following command to ensure that your container is running:
|
Run the following command to ensure that your container is running:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
|
@ -113,4 +127,4 @@ In the query above you are selecting the first timestamp (ts) in the interval, a
|
||||||
|
|
||||||
## Additional Information
|
## Additional Information
|
||||||
|
|
||||||
For more information about deploying TDengine in a Docker environment, see [Using TDengine in Docker](../../reference/docker).
|
For more information about deploying TDengine in a Docker environment, see [Deploying TDengine with Docker](../../deployment/docker).
|
||||||
|
|
|
@ -83,7 +83,7 @@ If `maven` is used to manage the projects, what needs to be done is only adding
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>com.taosdata.jdbc</groupId>
|
<groupId>com.taosdata.jdbc</groupId>
|
||||||
<artifactId>taos-jdbcdriver</artifactId>
|
<artifactId>taos-jdbcdriver</artifactId>
|
||||||
<version>3.2.1</version>
|
<version>3.2.4</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
---
|
---
|
||||||
title: Deploying TDengine with Docker
|
title: Deploying TDengine with Docker
|
||||||
|
sidebar_label: Docker
|
||||||
description: This chapter describes how to start and access TDengine in a Docker container.
|
description: This chapter describes how to start and access TDengine in a Docker container.
|
||||||
---
|
---
|
||||||
|
|
||||||
|
@ -10,8 +11,17 @@ This chapter describes how to start the TDengine service in a container and acce
|
||||||
The TDengine image starts with the HTTP service activated by default, using the following command:
|
The TDengine image starts with the HTTP service activated by default, using the following command:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
docker run -d --name tdengine -p 6041:6041 tdengine/tdengine
|
docker run -d --name tdengine \
|
||||||
|
-v ~/data/taos/dnode/data:/var/lib/taos \
|
||||||
|
-v ~/data/taos/dnode/log:/var/log/taos \
|
||||||
|
-p 6041:6041 tdengine/tdengine
|
||||||
```
|
```
|
||||||
|
:::note
|
||||||
|
|
||||||
|
* /var/lib/taos: TDengine's default data file directory. The location can be changed via [configuration file]. And also you can modify ~/data/taos/dnode/data to your any other local emtpy data directory
|
||||||
|
* /var/log/taos: TDengine's default log file directory. The location can be changed via [configure file]. And also you can modify ~/data/taos/dnode/log to your any other local empty log directory
|
||||||
|
|
||||||
|
:::
|
||||||
|
|
||||||
The above command starts a container named "tdengine" and maps the HTTP service port 6041 to the host port 6041. You can verify that the HTTP service provided in this container is available using the following command.
|
The above command starts a container named "tdengine" and maps the HTTP service port 6041 to the host port 6041. You can verify that the HTTP service provided in this container is available using the following command.
|
||||||
|
|
||||||
|
@ -283,39 +293,38 @@ services:
|
||||||
environment:
|
environment:
|
||||||
TAOS_FQDN: "td-1"
|
TAOS_FQDN: "td-1"
|
||||||
TAOS_FIRST_EP: "td-1"
|
TAOS_FIRST_EP: "td-1"
|
||||||
|
ports:
|
||||||
|
- 6041:6041
|
||||||
|
- 6030:6030
|
||||||
volumes:
|
volumes:
|
||||||
- taosdata-td1:/var/lib/taos/
|
# /var/lib/taos: TDengine's default data file directory. The location can be changed via [configuration file]. you can modify ~/data/taos/dnode1/data to your own data directory
|
||||||
- taoslog-td1:/var/log/taos/
|
- ~/data/taos/dnode1/data:/var/lib/taos
|
||||||
|
# /var/log/taos: TDengine's default log file directory. The location can be changed via [configure file]. you can modify ~/data/taos/dnode1/log to your own log directory
|
||||||
|
- ~/data/taos/dnode1/log:/var/log/taos
|
||||||
td-2:
|
td-2:
|
||||||
image: tdengine/tdengine:$VERSION
|
image: tdengine/tdengine:$VERSION
|
||||||
environment:
|
environment:
|
||||||
TAOS_FQDN: "td-2"
|
TAOS_FQDN: "td-2"
|
||||||
TAOS_FIRST_EP: "td-1"
|
TAOS_FIRST_EP: "td-1"
|
||||||
volumes:
|
volumes:
|
||||||
- taosdata-td2:/var/lib/taos/
|
- ~/data/taos/dnode2/data:/var/lib/taos
|
||||||
- taoslog-td2:/var/log/taos/
|
- ~/data/taos/dnode2/log:/var/log/taos
|
||||||
td-3:
|
td-3:
|
||||||
image: tdengine/tdengine:$VERSION
|
image: tdengine/tdengine:$VERSION
|
||||||
environment:
|
environment:
|
||||||
TAOS_FQDN: "td-3"
|
TAOS_FQDN: "td-3"
|
||||||
TAOS_FIRST_EP: "td-1"
|
TAOS_FIRST_EP: "td-1"
|
||||||
volumes:
|
volumes:
|
||||||
- taosdata-td3:/var/lib/taos/
|
- ~/data/taos/dnode3/data:/var/lib/taos
|
||||||
- taoslog-td3:/var/log/taos/
|
- ~/data/taos/dnode3/log:/var/log/taos
|
||||||
volumes:
|
|
||||||
taosdata-td1:
|
|
||||||
taoslog-td1:
|
|
||||||
taosdata-td2:
|
|
||||||
taoslog-td2:
|
|
||||||
taosdata-td3:
|
|
||||||
taoslog-td3:
|
|
||||||
```
|
```
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
|
|
||||||
- The `VERSION` environment variable is used to set the tdengine image tag
|
- The `VERSION` environment variable is used to set the tdengine image tag
|
||||||
- `TAOS_FIRST_EP` must be set on the newly created instance so that it can join the TDengine cluster; if there is a high availability requirement, `TAOS_SECOND_EP` needs to be used at the same time
|
- `TAOS_FIRST_EP` must be set on the newly created instance so that it can join the TDengine cluster; if there is a high availability requirement, `TAOS_SECOND_EP` needs to be used at the same time
|
||||||
:::
|
|
||||||
|
:::
|
||||||
|
|
||||||
2. Start the cluster
|
2. Start the cluster
|
||||||
|
|
||||||
|
@ -382,24 +391,22 @@ networks:
|
||||||
services:
|
services:
|
||||||
td-1:
|
td-1:
|
||||||
image: tdengine/tdengine:$VERSION
|
image: tdengine/tdengine:$VERSION
|
||||||
networks:
|
|
||||||
- inter
|
|
||||||
environment:
|
environment:
|
||||||
TAOS_FQDN: "td-1"
|
TAOS_FQDN: "td-1"
|
||||||
TAOS_FIRST_EP: "td-1"
|
TAOS_FIRST_EP: "td-1"
|
||||||
volumes:
|
volumes:
|
||||||
- taosdata-td1:/var/lib/taos/
|
# /var/lib/taos: TDengine's default data file directory. The location can be changed via [configuration file]. you can modify ~/data/taos/dnode1/data to your own data directory
|
||||||
- taoslog-td1:/var/log/taos/
|
- ~/data/taos/dnode1/data:/var/lib/taos
|
||||||
|
# /var/log/taos: TDengine's default log file directory. The location can be changed via [configure file]. you can modify ~/data/taos/dnode1/log to your own log directory
|
||||||
|
- ~/data/taos/dnode1/log:/var/log/taos
|
||||||
td-2:
|
td-2:
|
||||||
image: tdengine/tdengine:$VERSION
|
image: tdengine/tdengine:$VERSION
|
||||||
networks:
|
|
||||||
- inter
|
|
||||||
environment:
|
environment:
|
||||||
TAOS_FQDN: "td-2"
|
TAOS_FQDN: "td-2"
|
||||||
TAOS_FIRST_EP: "td-1"
|
TAOS_FIRST_EP: "td-1"
|
||||||
volumes:
|
volumes:
|
||||||
- taosdata-td2:/var/lib/taos/
|
- ~/data/taos/dnode2/data:/var/lib/taos
|
||||||
- taoslog-td2:/var/log/taos/
|
- ~/data/taos/dnode2/log:/var/log/taos
|
||||||
adapter:
|
adapter:
|
||||||
image: tdengine/tdengine:$VERSION
|
image: tdengine/tdengine:$VERSION
|
||||||
entrypoint: "taosadapter"
|
entrypoint: "taosadapter"
|
||||||
|
@ -431,11 +438,6 @@ services:
|
||||||
>> /etc/nginx/nginx.conf;cat /etc/nginx/nginx.conf;
|
>> /etc/nginx/nginx.conf;cat /etc/nginx/nginx.conf;
|
||||||
nginx -g 'daemon off;'",
|
nginx -g 'daemon off;'",
|
||||||
]
|
]
|
||||||
volumes:
|
|
||||||
taosdata-td1:
|
|
||||||
taoslog-td1:
|
|
||||||
taosdata-td2:
|
|
||||||
taoslog-td2:
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## Deploy with docker swarm
|
## Deploy with docker swarm
|
|
@ -5,7 +5,7 @@ description: This document describes how to deploy a TDengine cluster on a serve
|
||||||
|
|
||||||
TDengine has a native distributed design and provides the ability to scale out. A few nodes can form a TDengine cluster. If you need higher processing power, you just need to add more nodes into the cluster. TDengine uses virtual node technology to virtualize a node into multiple virtual nodes to achieve load balancing. At the same time, TDengine can group virtual nodes on different nodes into virtual node groups, and use the replication mechanism to ensure the high availability of the system. The cluster feature of TDengine is completely open source.
|
TDengine has a native distributed design and provides the ability to scale out. A few nodes can form a TDengine cluster. If you need higher processing power, you just need to add more nodes into the cluster. TDengine uses virtual node technology to virtualize a node into multiple virtual nodes to achieve load balancing. At the same time, TDengine can group virtual nodes on different nodes into virtual node groups, and use the replication mechanism to ensure the high availability of the system. The cluster feature of TDengine is completely open source.
|
||||||
|
|
||||||
This document describes how to manually deploy a cluster on a host as well as how to deploy on Kubernetes and by using Helm.
|
This document describes how to manually deploy a cluster on a host directly and deploy a cluster with Docker, Kubernetes or Helm.
|
||||||
|
|
||||||
```mdx-code-block
|
```mdx-code-block
|
||||||
import DocCardList from '@theme/DocCardList';
|
import DocCardList from '@theme/DocCardList';
|
||||||
|
|
|
@ -42,7 +42,7 @@ In TDengine, the data types below can be used when specifying a column or tag.
|
||||||
| 14 | NCHAR | User Defined | Multi-byte string that can include multi byte characters like Chinese characters. Each character of NCHAR type consumes 4 bytes storage. The string value should be quoted with single quotes. Literal single quote inside the string must be preceded with backslash, like `\'`. The length must be specified when defining a column or tag of NCHAR type, for example nchar(10) means it can store at most 10 characters of nchar type and will consume fixed storage of 40 bytes. An error will be reported if the string value exceeds the length defined. |
|
| 14 | NCHAR | User Defined | Multi-byte string that can include multi byte characters like Chinese characters. Each character of NCHAR type consumes 4 bytes storage. The string value should be quoted with single quotes. Literal single quote inside the string must be preceded with backslash, like `\'`. The length must be specified when defining a column or tag of NCHAR type, for example nchar(10) means it can store at most 10 characters of nchar type and will consume fixed storage of 40 bytes. An error will be reported if the string value exceeds the length defined. |
|
||||||
| 15 | JSON | | JSON type can only be used on tags. A tag of json type is excluded with any other tags of any other type. |
|
| 15 | JSON | | JSON type can only be used on tags. A tag of json type is excluded with any other tags of any other type. |
|
||||||
| 16 | VARCHAR | User-defined | Alias of BINARY |
|
| 16 | VARCHAR | User-defined | Alias of BINARY |
|
||||||
| 16 | GEOMETRY | User-defined | Geometry |
|
| 17 | GEOMETRY | User-defined | Geometry |
|
||||||
:::note
|
:::note
|
||||||
|
|
||||||
- Each row of the table cannot be longer than 48KB (64KB since version 3.0.5.0) (note that each BINARY/NCHAR/GEOMETRY column takes up an additional 2 bytes of storage space).
|
- Each row of the table cannot be longer than 48KB (64KB since version 3.0.5.0) (note that each BINARY/NCHAR/GEOMETRY column takes up an additional 2 bytes of storage space).
|
||||||
|
|
|
@ -1274,3 +1274,161 @@ SELECT SERVER_STATUS();
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: The server status.
|
**Description**: The server status.
|
||||||
|
|
||||||
|
|
||||||
|
## Geometry Functions
|
||||||
|
|
||||||
|
### Geometry Input Functions
|
||||||
|
|
||||||
|
Geometry input functions create geometry data from WTK.
|
||||||
|
|
||||||
|
#### ST_GeomFromText
|
||||||
|
|
||||||
|
```sql
|
||||||
|
ST_GeomFromText(VARCHAR WKT expr)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Description**: Return a specified GEOMETRY value from Well-Known Text representation (WKT).
|
||||||
|
|
||||||
|
**Return value type**: GEOMETRY
|
||||||
|
|
||||||
|
**Applicable data types**: VARCHAR
|
||||||
|
|
||||||
|
**Applicable table types**: standard tables and supertables
|
||||||
|
|
||||||
|
**Explanations**:
|
||||||
|
- The input can be one of WTK string, like POINT, LINESTRING, POLYGON, MULTIPOINT, MULTILINESTRING, MULTIPOLYGON, GEOMETRYCOLLECTION.
|
||||||
|
- The output is a GEOMETRY data type, internal defined as binary string.
|
||||||
|
|
||||||
|
### Geometry Output Functions
|
||||||
|
|
||||||
|
Geometry output functions convert geometry data into WTK.
|
||||||
|
|
||||||
|
#### ST_AsText
|
||||||
|
|
||||||
|
```sql
|
||||||
|
ST_AsText(GEOMETRY geom)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Description**: Return a specified Well-Known Text representation (WKT) value from GEOMETRY data.
|
||||||
|
|
||||||
|
**Return value type**: VARCHAR
|
||||||
|
|
||||||
|
**Applicable data types**: GEOMETRY
|
||||||
|
|
||||||
|
**Applicable table types**: standard tables and supertables
|
||||||
|
|
||||||
|
**Explanations**:
|
||||||
|
- The output can be one of WTK string, like POINT, LINESTRING, POLYGON, MULTIPOINT, MULTILINESTRING, MULTIPOLYGON, GEOMETRYCOLLECTION.
|
||||||
|
|
||||||
|
### Geometry Relationships Functions
|
||||||
|
|
||||||
|
Geometry relationships functions determine spatial relationships between geometries.
|
||||||
|
|
||||||
|
#### ST_Intersects
|
||||||
|
|
||||||
|
```sql
|
||||||
|
ST_Intersects(GEOMETRY geomA, GEOMETRY geomB)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Description**: Compares two geometries and returns true if they intersect.
|
||||||
|
|
||||||
|
**Return value type**: BOOL
|
||||||
|
|
||||||
|
**Applicable data types**: GEOMETRY, GEOMETRY
|
||||||
|
|
||||||
|
**Applicable table types**: standard tables and supertables
|
||||||
|
|
||||||
|
**Explanations**:
|
||||||
|
- Geometries intersect if they have any point in common.
|
||||||
|
|
||||||
|
|
||||||
|
#### ST_Equals
|
||||||
|
|
||||||
|
```sql
|
||||||
|
ST_Equals(GEOMETRY geomA, GEOMETRY geomB)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Description**: Returns TRUE if the given geometries are "spatially equal".
|
||||||
|
|
||||||
|
**Return value type**: BOOL
|
||||||
|
|
||||||
|
**Applicable data types**: GEOMETRY, GEOMETRY
|
||||||
|
|
||||||
|
**Applicable table types**: standard tables and supertables
|
||||||
|
|
||||||
|
**Explanations**:
|
||||||
|
- 'Spatially equal' means ST_Contains(A,B) = true and ST_Contains(B,A) = true, and the ordering of points can be different but represent the same geometry structure.
|
||||||
|
|
||||||
|
|
||||||
|
#### ST_Touches
|
||||||
|
|
||||||
|
```sql
|
||||||
|
ST_Touches(GEOMETRY geomA, GEOMETRY geomB)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Description**: Returns TRUE if A and B intersect, but their interiors do not intersect.
|
||||||
|
|
||||||
|
**Return value type**: BOOL
|
||||||
|
|
||||||
|
**Applicable data types**: GEOMETRY, GEOMETRY
|
||||||
|
|
||||||
|
**Applicable table types**: standard tables and supertables
|
||||||
|
|
||||||
|
**Explanations**:
|
||||||
|
- A and B have at least one point in common, and the common points lie in at least one boundary.
|
||||||
|
- For Point/Point inputs the relationship is always FALSE, since points do not have a boundary.
|
||||||
|
|
||||||
|
|
||||||
|
#### ST_Covers
|
||||||
|
|
||||||
|
```sql
|
||||||
|
ST_Covers(GEOMETRY geomA, GEOMETRY geomB)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Description**: Returns TRUE if every point in Geometry B lies inside (intersects the interior or boundary of) Geometry A.
|
||||||
|
|
||||||
|
**Return value type**: BOOL
|
||||||
|
|
||||||
|
**Applicable data types**: GEOMETRY, GEOMETRY
|
||||||
|
|
||||||
|
**Applicable table types**: standard tables and supertables
|
||||||
|
|
||||||
|
**Explanations**:
|
||||||
|
- A covers B means no point of B lies outside (in the exterior of) A.
|
||||||
|
|
||||||
|
|
||||||
|
#### ST_Contains
|
||||||
|
|
||||||
|
```sql
|
||||||
|
ST_Contains(GEOMETRY geomA, GEOMETRY geomB)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Description**: Returns TRUE if geometry A contains geometry B.
|
||||||
|
|
||||||
|
**Return value type**: BOOL
|
||||||
|
|
||||||
|
**Applicable data types**: GEOMETRY, GEOMETRY
|
||||||
|
|
||||||
|
**Applicable table types**: standard tables and supertables
|
||||||
|
|
||||||
|
**Explanations**:
|
||||||
|
- A contains B if and only if all points of B lie inside (i.e. in the interior or boundary of) A (or equivalently, no points of B lie in the exterior of A), and the interiors of A and B have at least one point in common.
|
||||||
|
|
||||||
|
|
||||||
|
#### ST_ContainsProperly
|
||||||
|
|
||||||
|
```sql
|
||||||
|
ST_ContainsProperly(GEOMETRY geomA, GEOMETRY geomB)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Description**: Returns TRUE if every point of B lies inside A.
|
||||||
|
|
||||||
|
**Return value type**: BOOL
|
||||||
|
|
||||||
|
**Applicable data types**: GEOMETRY, GEOMETRY
|
||||||
|
|
||||||
|
**Applicable table types**: standard tables and supertables
|
||||||
|
|
||||||
|
**Explanations**:
|
||||||
|
- There is no point of B that lies on the boundary of A or in the exterior of A.
|
||||||
|
|
|
@ -54,7 +54,7 @@ LIKE is used together with wildcards to match strings. Its usage is described as
|
||||||
MATCH and NMATCH are used together with regular expressions to match strings. Their usage is described as follows:
|
MATCH and NMATCH are used together with regular expressions to match strings. Their usage is described as follows:
|
||||||
|
|
||||||
- Use POSIX regular expression syntax. For more information, see Regular Expressions.
|
- Use POSIX regular expression syntax. For more information, see Regular Expressions.
|
||||||
- Regular expression can be used against only table names, i.e. `tbname`, and tags of binary/nchar types, but can't be used against data columns.
|
- Regular expression can be used against only table names, i.e. `tbname`, and tags/columns of binary/nchar types.
|
||||||
- The maximum length of regular expression string is 128 bytes. Configuration parameter `maxRegexStringLen` can be used to set the maximum allowed regular expression. It's a configuration parameter on the client side, and will take effect after restarting the client.
|
- The maximum length of regular expression string is 128 bytes. Configuration parameter `maxRegexStringLen` can be used to set the maximum allowed regular expression. It's a configuration parameter on the client side, and will take effect after restarting the client.
|
||||||
|
|
||||||
## Logical Operators
|
## Logical Operators
|
||||||
|
|
|
@ -178,6 +178,7 @@ The following list shows all reserved keywords:
|
||||||
|
|
||||||
- MATCH
|
- MATCH
|
||||||
- MAX_DELAY
|
- MAX_DELAY
|
||||||
|
- MAX_SPEED
|
||||||
- MAXROWS
|
- MAXROWS
|
||||||
- MERGE
|
- MERGE
|
||||||
- META
|
- META
|
||||||
|
|
|
@ -373,7 +373,7 @@ conn.execute("CREATE STABLE weather(ts TIMESTAMP, temperature FLOAT) TAGS (locat
|
||||||
<TabItem value="websocket" label="WebSocket connection">
|
<TabItem value="websocket" label="WebSocket connection">
|
||||||
|
|
||||||
```python
|
```python
|
||||||
conn = taosws.connect(url="ws://localhost:6041")
|
conn = taosws.connect("taosws://localhost:6041")
|
||||||
# Execute a sql, ignore the result set, just get affected rows. It's useful for DDL and DML statement.
|
# Execute a sql, ignore the result set, just get affected rows. It's useful for DDL and DML statement.
|
||||||
conn.execute("DROP DATABASE IF EXISTS test")
|
conn.execute("DROP DATABASE IF EXISTS test")
|
||||||
conn.execute("CREATE DATABASE test")
|
conn.execute("CREATE DATABASE test")
|
||||||
|
|
|
@ -0,0 +1,87 @@
|
||||||
|
---
|
||||||
|
toc_max_heading_level: 4
|
||||||
|
sidebar_label: R
|
||||||
|
title: R Language Connector
|
||||||
|
---
|
||||||
|
|
||||||
|
import Tabs from '@theme/Tabs';
|
||||||
|
import TabItem from '@theme/TabItem';
|
||||||
|
|
||||||
|
import Rdemo from "../../07-develop/01-connect/_connect_r.mdx"
|
||||||
|
|
||||||
|
By using the RJDBC library in R, you can enable R programs to access TDengine data. Here are the installation process, configuration steps, and an example code in R.
|
||||||
|
|
||||||
|
## Installation Process
|
||||||
|
|
||||||
|
Before getting started, make sure you have installed the R language environment. Then, follow these steps to install and configure the RJDBC library:
|
||||||
|
|
||||||
|
1. Install Java Development Kit (JDK): RJDBC library requires Java environment. Download the appropriate JDK for your operating system from the official Oracle website and follow the installation guide.
|
||||||
|
|
||||||
|
2. Install the RJDBC library: Execute the following command in the R console to install the RJDBC library.
|
||||||
|
|
||||||
|
```r
|
||||||
|
install.packages("RJDBC", repos='http://cran.us.r-project.org')
|
||||||
|
```
|
||||||
|
|
||||||
|
:::note
|
||||||
|
1. The default R language package version 4.2 which shipped with Ubuntu might lead unresponsive bug. Please install latest version of R language package from the [official website](https://www.r-project.org/).
|
||||||
|
2. On Linux systems, installing the RJDBC package may require installing the necessary components for compilation. For example, on Ubuntu, you can execute the command ``apt install -y libbz2-dev libpcre2-dev libicu-dev`` to install the required components.
|
||||||
|
3. On Windows systems, you need to set the **JAVA_HOME** environment variable.
|
||||||
|
:::
|
||||||
|
|
||||||
|
3. Download the TDengine JDBC driver: Visit the Maven website and download the TDengine JDBC driver (taos-jdbcdriver-X.X.X-dist.jar) to your local machine.
|
||||||
|
|
||||||
|
## Configuration Process
|
||||||
|
|
||||||
|
Once you have completed the installation steps, you need to do some configuration to enable the RJDBC library to connect and access the TDengine time-series database.
|
||||||
|
|
||||||
|
1. Load the RJDBC library and other necessary libraries in your R script:
|
||||||
|
|
||||||
|
```r
|
||||||
|
library(DBI)
|
||||||
|
library(rJava)
|
||||||
|
library(RJDBC)
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Set the JDBC driver and JDBC URL:
|
||||||
|
|
||||||
|
```r
|
||||||
|
# Set the JDBC driver path (specify the location on your local machine)
|
||||||
|
driverPath <- "/path/to/taos-jdbcdriver-X.X.X-dist.jar"
|
||||||
|
|
||||||
|
# Set the JDBC URL (specify the FQDN and credentials of your TDengine cluster)
|
||||||
|
url <- "jdbc:TAOS://localhost:6030/?user=root&password=taosdata"
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Load the JDBC driver:
|
||||||
|
|
||||||
|
```r
|
||||||
|
# Load the JDBC driver
|
||||||
|
drv <- JDBC("com.taosdata.jdbc.TSDBDriver", driverPath)
|
||||||
|
```
|
||||||
|
|
||||||
|
4. Create a TDengine database connection:
|
||||||
|
|
||||||
|
```r
|
||||||
|
# Create a database connection
|
||||||
|
conn <- dbConnect(drv, url)
|
||||||
|
```
|
||||||
|
|
||||||
|
5. Once the connection is established, you can use the ``conn`` object for various database operations such as querying data and inserting data.
|
||||||
|
|
||||||
|
6. Finally, don't forget to close the database connection after you are done:
|
||||||
|
|
||||||
|
```r
|
||||||
|
# Close the database connection
|
||||||
|
dbDisconnect(conn)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Example Code Using RJDBC in R
|
||||||
|
|
||||||
|
Here's an example code that uses the RJDBC library to connect to a TDengine time-series database and perform a query operation:
|
||||||
|
|
||||||
|
<Rdemo/>
|
||||||
|
|
||||||
|
Please modify the JDBC driver, JDBC URL, username, password, and SQL query statement according to your specific TDengine time-series database environment and requirements.
|
||||||
|
|
||||||
|
By following the steps and using the provided example code, you can use the RJDBC library in the R language to access the TDengine time-series database and perform tasks such as data querying and analysis.
|
|
@ -7,10 +7,10 @@ description: This document describes the supported platforms for the TDengine se
|
||||||
|
|
||||||
| | **Windows Server 2016/2019** | **Windows 10/11** | **CentOS 7.9/8** | **Ubuntu 18 or later** | **macOS** |
|
| | **Windows Server 2016/2019** | **Windows 10/11** | **CentOS 7.9/8** | **Ubuntu 18 or later** | **macOS** |
|
||||||
| ------------ | ---------------------------- | ----------------- | ---------------- | ---------------- | --------- |
|
| ------------ | ---------------------------- | ----------------- | ---------------- | ---------------- | --------- |
|
||||||
| X64 | ● | ● | ● | ● | ● |
|
| X64 | ●/E | ●/E | ● | ● | ● |
|
||||||
| ARM64 | | | ● | | ● |
|
| ARM64 | | | ● | | ● |
|
||||||
|
|
||||||
Note: ● means officially tested and verified, ○ means unofficially tested and verified.
|
Note: 1) ● means officially tested and verified, ○ means unofficially tested and verified, E means only supported by the enterprise edition. 2) The community edition only supports newer versions of mainstream operating systems, including Ubuntu 18+/CentOS 7+/RetHat/Debian/CoreOS/FreeBSD/OpenSUSE/SUSE Linux/Fedora/macOS, etc. If you have requirements for other operating systems and editions, please contact support of the enterprise edition.
|
||||||
|
|
||||||
## List of supported platforms for TDengine clients and connectors
|
## List of supported platforms for TDengine clients and connectors
|
||||||
|
|
||||||
|
|
|
@ -1 +0,0 @@
|
||||||
label: TDengine Docker images
|
|
|
@ -0,0 +1,40 @@
|
||||||
|
---
|
||||||
|
sidebar_label: qStudio
|
||||||
|
title: qStudio
|
||||||
|
description: Step-by-Step Guide to Accessing TDengine Data with qStudio
|
||||||
|
---
|
||||||
|
|
||||||
|
qStudio is a free cross-platform SQL data analysis tool that allows easy browsing of tables, variables, functions, and configuration settings in a database. The latest version of qStudio includes built-in support for TDengine.
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
To connect TDengine using qStudio, you need to complete the following preparations:
|
||||||
|
|
||||||
|
- Install qStudio: qStudio supports major operating systems, including Windows, macOS, and Linux. Please ensure you download the correct installation package for your platform from the [download page](https://www.timestored.com/qstudio/download/).
|
||||||
|
- Set up TDengine instance: Make sure TDengine is installed and running correctly, and the taosAdapter is installed and running. For detailed information, refer to the taosAdapter User Manual.
|
||||||
|
|
||||||
|
## Connecting to TDengine with qStudio
|
||||||
|
|
||||||
|
1. Launch the qStudio application and select "Server" and then "Add Server..." from the menu. Choose TDengine from the Server Type dropdown.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
2. Configure the TDengine connection by entering the host address, port number, username, and password. If TDengine is deployed on the local machine, you can fill in the username and password only. The default username is "root," and the default password is "taosdata." Click "Test" to test the connection's availability. If the TDengine Java connector is not installed on the local machine, qStudio will prompt you to download and install it.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
3. Once connected successfully, the screen will display as shown below. If the connection fails, check that the TDengine service and taosAdapter are running correctly, and ensure that the host address, port number, username, and password are correct.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
4. Use qStudio to select databases and tables to browse data from the TDengine server.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
5. You can also perform operations on TDengine data by executing SQL commands.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
6. qStudio supports charting functions based on the data. For more information, please refer to the [qStudio documentation](https://www.timestored.com/qstudio/help).
|
||||||
|
|
||||||
|

|
After Width: | Height: | Size: 94 KiB |
After Width: | Height: | Size: 148 KiB |
After Width: | Height: | Size: 34 KiB |
After Width: | Height: | Size: 93 KiB |
After Width: | Height: | Size: 39 KiB |
After Width: | Height: | Size: 78 KiB |
|
@ -12,6 +12,11 @@ import Release from "/components/ReleaseV3";
|
||||||
|
|
||||||
## 3.1.0.0
|
## 3.1.0.0
|
||||||
|
|
||||||
|
:::note IMPORTANT
|
||||||
|
- Once you upgrade to TDengine 3.1.0.0, you cannot roll back to any previous version of TDengine. Upgrading to 3.1.0.0 will alter your data such that it cannot be read by previous versions.
|
||||||
|
- You must remove all streams before upgrading to TDengine 3.1.0.0. If you upgrade a deployment that contains streams, the upgrade will fail and your deployment will become nonoperational.
|
||||||
|
:::
|
||||||
|
|
||||||
<Release type="tdengine" version="3.1.0.0" />
|
<Release type="tdengine" version="3.1.0.0" />
|
||||||
|
|
||||||
## 3.0.7.1
|
## 3.0.7.1
|
||||||
|
|
|
@ -8,9 +8,13 @@ library("rJava")
|
||||||
library("RJDBC")
|
library("RJDBC")
|
||||||
|
|
||||||
args<- commandArgs(trailingOnly = TRUE)
|
args<- commandArgs(trailingOnly = TRUE)
|
||||||
driver_path = args[1] # path to jdbc-driver for example: "/root/taos-jdbcdriver-3.0.0-dist.jar"
|
driver_path = args[1] # path to jdbc-driver for example: "/root/taos-jdbcdriver-3.2.4-dist.jar"
|
||||||
driver = JDBC("com.taosdata.jdbc.TSDBDriver", driver_path)
|
driver = JDBC("com.taosdata.jdbc.TSDBDriver", driver_path)
|
||||||
conn = dbConnect(driver, "jdbc:TAOS://127.0.0.1:6030/?user=root&password=taosdata")
|
conn = dbConnect(driver, "jdbc:TAOS://127.0.0.1:6030/?user=root&password=taosdata")
|
||||||
dbGetQuery(conn, "SELECT server_version()")
|
dbGetQuery(conn, "SELECT server_version()")
|
||||||
|
dbSendUpdate(conn, "create database if not exists rtest")
|
||||||
|
dbSendUpdate(conn, "create table if not exists rtest.test (ts timestamp, current float, voltage int, devname varchar(20))")
|
||||||
|
dbSendUpdate(conn, "insert into rtest.test values (now, 1.2, 220, 'test')")
|
||||||
|
dbGetQuery(conn, "select * from rtest.test")
|
||||||
dbDisconnect(conn)
|
dbDisconnect(conn)
|
||||||
# ANCHOR_END: demo
|
# ANCHOR_END: demo
|
||||||
|
|
|
@ -2,11 +2,19 @@ if (! "RJDBC" %in% installed.packages()[, "Package"]) {
|
||||||
install.packages('RJDBC', repos='http://cran.us.r-project.org')
|
install.packages('RJDBC', repos='http://cran.us.r-project.org')
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# ANCHOR: demo
|
||||||
library("DBI")
|
library("DBI")
|
||||||
library("rJava")
|
library("rJava")
|
||||||
library("RJDBC")
|
library("RJDBC")
|
||||||
driver_path = "/home/debug/build/lib/taos-jdbcdriver-2.0.38-dist.jar"
|
|
||||||
|
args<- commandArgs(trailingOnly = TRUE)
|
||||||
|
driver_path = args[1] # path to jdbc-driver for example: "/root/taos-jdbcdriver-3.2.4-dist.jar"
|
||||||
driver = JDBC("com.taosdata.jdbc.rs.RestfulDriver", driver_path)
|
driver = JDBC("com.taosdata.jdbc.rs.RestfulDriver", driver_path)
|
||||||
conn = dbConnect(driver, "jdbc:TAOS-RS://localhost:6041?user=root&password=taosdata")
|
conn = dbConnect(driver, "jdbc:TAOS-RS://localhost:6041?user=root&password=taosdata")
|
||||||
dbGetQuery(conn, "SELECT server_version()")
|
dbGetQuery(conn, "SELECT server_version()")
|
||||||
dbDisconnect(conn)
|
dbSendUpdate(conn, "create database if not exists rtest")
|
||||||
|
dbSendUpdate(conn, "create table if not exists rtest.test (ts timestamp, current float, voltage int, devname varchar(20))")
|
||||||
|
dbSendUpdate(conn, "insert into rtest.test values (now, 1.2, 220, 'test')")
|
||||||
|
dbGetQuery(conn, "select * from rtest.test")
|
||||||
|
dbDisconnect(conn)
|
||||||
|
# ANCHOR_END: demo
|
||||||
|
|
|
@ -0,0 +1 @@
|
||||||
|
apt install -y libbz2-dev libpcre2-dev libicu-dev
|
|
@ -22,7 +22,7 @@
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>com.taosdata.jdbc</groupId>
|
<groupId>com.taosdata.jdbc</groupId>
|
||||||
<artifactId>taos-jdbcdriver</artifactId>
|
<artifactId>taos-jdbcdriver</artifactId>
|
||||||
<version>3.2.1</version>
|
<version>3.2.4</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
<!-- ANCHOR_END: dep-->
|
<!-- ANCHOR_END: dep-->
|
||||||
<dependency>
|
<dependency>
|
||||||
|
@ -33,4 +33,4 @@
|
||||||
</dependency>
|
</dependency>
|
||||||
</dependencies>
|
</dependencies>
|
||||||
|
|
||||||
</project>
|
</project>
|
||||||
|
|
|
@ -28,6 +28,21 @@ docker run -d -p 6030:6030 -p 6041:6041 -p 6043-6049:6043-6049 -p 6043-6049:6043
|
||||||
|
|
||||||
注意:TDengine 3.0 服务端仅使用 6030 TCP 端口。6041 为 taosAdapter 所使用提供 REST 服务端口。6043-6049 为 taosAdapter 提供第三方应用接入所使用端口,可根据需要选择是否打开。
|
注意:TDengine 3.0 服务端仅使用 6030 TCP 端口。6041 为 taosAdapter 所使用提供 REST 服务端口。6043-6049 为 taosAdapter 提供第三方应用接入所使用端口,可根据需要选择是否打开。
|
||||||
|
|
||||||
|
如果需要将数据持久化到本机的某一个文件夹,则执行下边的命令:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
docker run -d -v ~/data/taos/dnode/data:/var/lib/taos \
|
||||||
|
-v ~/data/taos/dnode/log:/var/log/taos \
|
||||||
|
-p 6030:6030 -p 6041:6041 -p 6043-6049:6043-6049 -p 6043-6049:6043-6049/udp tdengine/tdengine
|
||||||
|
```
|
||||||
|
|
||||||
|
:::note
|
||||||
|
|
||||||
|
- /var/lib/taos: TDengine 默认数据文件目录。可通过[配置文件]修改位置。你可以修改~/data/taos/dnode/data为你自己的数据目录
|
||||||
|
- /var/log/taos: TDengine 默认日志文件目录。可通过[配置文件]修改位置。你可以修改~/data/taos/dnode/log为你自己的日志目录
|
||||||
|
|
||||||
|
:::
|
||||||
|
|
||||||
确定该容器已经启动并且在正常运行。
|
确定该容器已经启动并且在正常运行。
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
|
@ -108,4 +123,4 @@ SELECT FIRST(ts), AVG(current), MAX(voltage), MIN(phase) FROM test.d10 INTERVAL(
|
||||||
|
|
||||||
## 其它
|
## 其它
|
||||||
|
|
||||||
更多关于在 Docker 环境下使用 TDengine 的细节,请参考 [在 Docker 下使用 TDengine](../../reference/docker)。
|
更多关于在 Docker 环境下使用 TDengine 的细节,请参考 [用 Docker 部署 TDengine](../../deployment/docker)。
|
||||||
|
|
|
@ -82,7 +82,7 @@ TDengine 提供了丰富的应用程序开发接口,为了便于用户快速
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>com.taosdata.jdbc</groupId>
|
<groupId>com.taosdata.jdbc</groupId>
|
||||||
<artifactId>taos-jdbcdriver</artifactId>
|
<artifactId>taos-jdbcdriver</artifactId>
|
||||||
<version>3.2.1</version>
|
<version>3.2.4</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -446,7 +446,7 @@ TDengine 的 JDBC 原生连接实现大幅改进了参数绑定方式对数据
|
||||||
**注意**:
|
**注意**:
|
||||||
|
|
||||||
- JDBC REST 连接目前不支持参数绑定
|
- JDBC REST 连接目前不支持参数绑定
|
||||||
- 以下示例代码基于 taos-jdbcdriver-3.2.1
|
- 以下示例代码基于 taos-jdbcdriver-3.2.4
|
||||||
- binary 类型数据需要调用 setString 方法,nchar 类型数据需要调用 setNString 方法
|
- binary 类型数据需要调用 setString 方法,nchar 类型数据需要调用 setNString 方法
|
||||||
- 预处理语句中指定数据库与子表名称不要使用 `db.?`,应直接使用 `?`,然后在 setTableName 中指定数据库,如:`prepareStatement.setTableName("db.t1")`。
|
- 预处理语句中指定数据库与子表名称不要使用 `db.?`,应直接使用 `?`,然后在 setTableName 中指定数据库,如:`prepareStatement.setTableName("db.t1")`。
|
||||||
|
|
||||||
|
|
|
@ -375,7 +375,7 @@ conn.execute("CREATE STABLE weather(ts TIMESTAMP, temperature FLOAT) TAGS (locat
|
||||||
<TabItem value="websocket" label="WebSocket 连接">
|
<TabItem value="websocket" label="WebSocket 连接">
|
||||||
|
|
||||||
```python
|
```python
|
||||||
conn = taosws.connect(url="ws://localhost:6041")
|
conn = taosws.connect("taosws://localhost:6041")
|
||||||
# Execute a sql, ignore the result set, just get affected rows. It's useful for DDL and DML statement.
|
# Execute a sql, ignore the result set, just get affected rows. It's useful for DDL and DML statement.
|
||||||
conn.execute("DROP DATABASE IF EXISTS test")
|
conn.execute("DROP DATABASE IF EXISTS test")
|
||||||
conn.execute("CREATE DATABASE test")
|
conn.execute("CREATE DATABASE test")
|
||||||
|
|
|
@ -0,0 +1,89 @@
|
||||||
|
---
|
||||||
|
toc_max_heading_level: 4
|
||||||
|
sidebar_label: R
|
||||||
|
title: R Language Connector
|
||||||
|
---
|
||||||
|
|
||||||
|
import Tabs from '@theme/Tabs';
|
||||||
|
import TabItem from '@theme/TabItem';
|
||||||
|
|
||||||
|
import Rdemo from "../07-develop/01-connect/_connect_r.mdx"
|
||||||
|
|
||||||
|
通过 R 语言中的 RJDBC 库可以使 R 语言程序支持访问 TDengine 数据。以下是安装过程、配置过程以及 R 语言示例代码。
|
||||||
|
|
||||||
|
## 安装过程
|
||||||
|
|
||||||
|
在开始之前,请确保已经安装了R语言环境。然后按照以下步骤安装和配置RJDBC库:
|
||||||
|
|
||||||
|
1. 安装Java Development Kit (JDK):RJDBC库需要依赖Java环境。请从Oracle官方网站下载适合您操作系统的JDK,并按照安装指南进行安装。
|
||||||
|
|
||||||
|
2. 安装RJDBC库:在R控制台中执行以下命令来安装RJDBC库。
|
||||||
|
|
||||||
|
```r
|
||||||
|
install.packages("RJDBC", repos='http://cran.us.r-project.org')
|
||||||
|
```
|
||||||
|
|
||||||
|
:::note
|
||||||
|
1. Ubuntu 系统自带的 R 语言软件版本 4.2 在调用 RJDBC 库会产生无响应 bug,请安装 R 语言[官网](https://www.r-project.org/)的安装包。
|
||||||
|
2. 在 Linux 上安装 RJDBC 包可能需要安装编译需要的组件,以 Ubuntu 为例执行 `apt install -y libbz2-dev libpcre2-dev libicu-dev` 命令安装。
|
||||||
|
3. 在 Windows 系统上需要设置 JAVA_HOME 环境变量。
|
||||||
|
:::
|
||||||
|
|
||||||
|
3. 下载 TDengine JDBC 驱动程序:访问 maven.org 网站,下载 TDengine JDBC 驱动程序(taos-jdbcdriver-X.X.X-dist.jar)。
|
||||||
|
|
||||||
|
4. 将 TDengine JDBC 驱动程序放置在适当的位置:在您的计算机上选择一个合适的位置,将 TDengine JDBC 驱动程序文件(taos-jdbcdriver-X.X.X-dist.jar)保存在此处。
|
||||||
|
|
||||||
|
## 配置过程
|
||||||
|
|
||||||
|
完成了安装步骤后,您需要进行一些配置,以便RJDBC库能够正确连接和访问TDengine时序数据库。
|
||||||
|
|
||||||
|
1. 在 R 脚本中加载 RJDBC 和其他必要的库:
|
||||||
|
|
||||||
|
```r
|
||||||
|
library(DBI)
|
||||||
|
library(rJava)
|
||||||
|
library(RJDBC)
|
||||||
|
```
|
||||||
|
|
||||||
|
2. 设置 JDBC 驱动程序和 JDBC URL:
|
||||||
|
|
||||||
|
```r
|
||||||
|
# 设置JDBC驱动程序路径(根据您实际保存的位置进行修改)
|
||||||
|
driverPath <- "/path/to/taos-jdbcdriver-X.X.X-dist.jar"
|
||||||
|
|
||||||
|
# 设置JDBC URL(根据您的具体环境进行修改)
|
||||||
|
url <- "jdbc:TAOS://localhost:6030/?user=root&password=taosdata"
|
||||||
|
```
|
||||||
|
|
||||||
|
3. 加载 JDBC 驱动程序:
|
||||||
|
|
||||||
|
```r
|
||||||
|
# 加载JDBC驱动程序
|
||||||
|
drv <- JDBC("com.taosdata.jdbc.TSDBDriver", driverPath)
|
||||||
|
```
|
||||||
|
|
||||||
|
4. 创建 TDengine 数据库连接:
|
||||||
|
|
||||||
|
```r
|
||||||
|
# 创建数据库连接
|
||||||
|
conn <- dbConnect(drv, url)
|
||||||
|
```
|
||||||
|
|
||||||
|
5. 连接成功后,您可以使用 conn 对象进行各种数据库操作,如查询数据、插入数据等。
|
||||||
|
|
||||||
|
6. 最后,不要忘记在使用完成后关闭数据库连接:
|
||||||
|
|
||||||
|
```r
|
||||||
|
# 关闭数据库连接
|
||||||
|
dbDisconnect(conn)
|
||||||
|
```
|
||||||
|
|
||||||
|
## 使用 RJDBC 的 R 语言示例代码
|
||||||
|
|
||||||
|
以下是一个使用 RJDBC 库连接 TDengine 时序数据库并执行查询操作的示例代码:
|
||||||
|
|
||||||
|
<Rdemo/>
|
||||||
|
|
||||||
|
请根据您的实际情况修改JDBC驱动程序、JDBC URL、用户名、密码以及SQL查询语句,以适配您的 TDengine 时序数据库环境和要求。
|
||||||
|
|
||||||
|
通过以上步骤和示例代码,您可以在 R 语言环境中使用 RJDBC 库访问 TDengine 时序数据库,进行数据查询和分析等操作。
|
|
@ -1,5 +1,6 @@
|
||||||
---
|
---
|
||||||
title: 用 Docker 部署 TDengine
|
title: 用 Docker 部署 TDengine
|
||||||
|
sidebar_label: Docker
|
||||||
description: '本章主要介绍如何在容器中启动 TDengine 服务并访问它'
|
description: '本章主要介绍如何在容器中启动 TDengine 服务并访问它'
|
||||||
---
|
---
|
||||||
|
|
||||||
|
@ -10,8 +11,17 @@ description: '本章主要介绍如何在容器中启动 TDengine 服务并访
|
||||||
TDengine 镜像启动时默认激活 HTTP 服务,使用下列命令
|
TDengine 镜像启动时默认激活 HTTP 服务,使用下列命令
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
docker run -d --name tdengine -p 6041:6041 tdengine/tdengine
|
docker run -d --name tdengine \
|
||||||
|
-v ~/data/taos/dnode/data:/var/lib/taos \
|
||||||
|
-v ~/data/taos/dnode/log:/var/log/taos \
|
||||||
|
-p 6041:6041 tdengine/tdengine
|
||||||
```
|
```
|
||||||
|
:::note
|
||||||
|
|
||||||
|
- /var/lib/taos: TDengine 默认数据文件目录。可通过[配置文件]修改位置。你可以修改~/data/taos/dnode/data为你自己的数据目录
|
||||||
|
- /var/log/taos: TDengine 默认日志文件目录。可通过[配置文件]修改位置。你可以修改~/data/taos/dnode/log为你自己的日志目录
|
||||||
|
|
||||||
|
:::
|
||||||
|
|
||||||
以上命令启动了一个名为“tdengine”的容器,并把其中的 HTTP 服务的端 6041 映射到了主机端口 6041。使用如下命令可以验证该容器中提供的 HTTP 服务是否可用:
|
以上命令启动了一个名为“tdengine”的容器,并把其中的 HTTP 服务的端 6041 映射到了主机端口 6041。使用如下命令可以验证该容器中提供的 HTTP 服务是否可用:
|
||||||
|
|
||||||
|
@ -291,38 +301,37 @@ services:
|
||||||
environment:
|
environment:
|
||||||
TAOS_FQDN: "td-1"
|
TAOS_FQDN: "td-1"
|
||||||
TAOS_FIRST_EP: "td-1"
|
TAOS_FIRST_EP: "td-1"
|
||||||
|
ports:
|
||||||
|
- 6041:6041
|
||||||
|
- 6030:6030
|
||||||
volumes:
|
volumes:
|
||||||
- taosdata-td1:/var/lib/taos/
|
# /var/lib/taos: TDengine 默认数据文件目录。可通过[配置文件]修改位置。你可以修改~/data/taos/dnode1/data为你自己的数据目录
|
||||||
- taoslog-td1:/var/log/taos/
|
- ~/data/taos/dnode1/data:/var/lib/taos
|
||||||
|
# /var/log/taos: TDengine 默认日志文件目录。可通过[配置文件]修改位置。你可以修改~/data/taos/dnode1/log为你自己的日志目录
|
||||||
|
- ~/data/taos/dnode1/log:/var/log/taos
|
||||||
td-2:
|
td-2:
|
||||||
image: tdengine/tdengine:$VERSION
|
image: tdengine/tdengine:$VERSION
|
||||||
environment:
|
environment:
|
||||||
TAOS_FQDN: "td-2"
|
TAOS_FQDN: "td-2"
|
||||||
TAOS_FIRST_EP: "td-1"
|
TAOS_FIRST_EP: "td-1"
|
||||||
volumes:
|
volumes:
|
||||||
- taosdata-td2:/var/lib/taos/
|
- ~/data/taos/dnode2/data:/var/lib/taos
|
||||||
- taoslog-td2:/var/log/taos/
|
- ~/data/taos/dnode2/log:/var/log/taos
|
||||||
td-3:
|
td-3:
|
||||||
image: tdengine/tdengine:$VERSION
|
image: tdengine/tdengine:$VERSION
|
||||||
environment:
|
environment:
|
||||||
TAOS_FQDN: "td-3"
|
TAOS_FQDN: "td-3"
|
||||||
TAOS_FIRST_EP: "td-1"
|
TAOS_FIRST_EP: "td-1"
|
||||||
volumes:
|
volumes:
|
||||||
- taosdata-td3:/var/lib/taos/
|
- ~/data/taos/dnode3/data:/var/lib/taos
|
||||||
- taoslog-td3:/var/log/taos/
|
- ~/data/taos/dnode3/log:/var/log/taos
|
||||||
volumes:
|
|
||||||
taosdata-td1:
|
|
||||||
taoslog-td1:
|
|
||||||
taosdata-td2:
|
|
||||||
taoslog-td2:
|
|
||||||
taosdata-td3:
|
|
||||||
taoslog-td3:
|
|
||||||
```
|
```
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
|
|
||||||
* `VERSION` 环境变量被用来设置 tdengine image tag
|
* `VERSION` 环境变量被用来设置 tdengine image tag
|
||||||
* 在新创建的实例上必须设置 `TAOS_FIRST_EP` 以使其能够加入 TDengine 集群;如果有高可用需求,则需要同时使用 `TAOS_SECOND_EP`
|
* 在新创建的实例上必须设置 `TAOS_FIRST_EP` 以使其能够加入 TDengine 集群;如果有高可用需求,则需要同时使用 `TAOS_SECOND_EP`
|
||||||
|
|
||||||
:::
|
:::
|
||||||
|
|
||||||
2. 启动集群
|
2. 启动集群
|
||||||
|
@ -397,24 +406,22 @@ networks:
|
||||||
services:
|
services:
|
||||||
td-1:
|
td-1:
|
||||||
image: tdengine/tdengine:$VERSION
|
image: tdengine/tdengine:$VERSION
|
||||||
networks:
|
|
||||||
- inter
|
|
||||||
environment:
|
environment:
|
||||||
TAOS_FQDN: "td-1"
|
TAOS_FQDN: "td-1"
|
||||||
TAOS_FIRST_EP: "td-1"
|
TAOS_FIRST_EP: "td-1"
|
||||||
volumes:
|
volumes:
|
||||||
- taosdata-td1:/var/lib/taos/
|
# /var/lib/taos: TDengine 默认数据文件目录。可通过[配置文件]修改位置。你可以修改~/data/taos/dnode1/data为你自己的数据目录
|
||||||
- taoslog-td1:/var/log/taos/
|
- ~/data/taos/dnode1/data:/var/lib/taos
|
||||||
|
# /var/log/taos: TDengine 默认日志文件目录。可通过[配置文件]修改位置。你可以修改~/data/taos/dnode1/log为你自己的日志目录
|
||||||
|
- ~/data/taos/dnode1/log:/var/log/taos
|
||||||
td-2:
|
td-2:
|
||||||
image: tdengine/tdengine:$VERSION
|
image: tdengine/tdengine:$VERSION
|
||||||
networks:
|
|
||||||
- inter
|
|
||||||
environment:
|
environment:
|
||||||
TAOS_FQDN: "td-2"
|
TAOS_FQDN: "td-2"
|
||||||
TAOS_FIRST_EP: "td-1"
|
TAOS_FIRST_EP: "td-1"
|
||||||
volumes:
|
volumes:
|
||||||
- taosdata-td2:/var/lib/taos/
|
- ~/data/taos/dnode2/data:/var/lib/taos
|
||||||
- taoslog-td2:/var/log/taos/
|
- ~/data/taos/dnode2/log:/var/log/taos
|
||||||
adapter:
|
adapter:
|
||||||
image: tdengine/tdengine:$VERSION
|
image: tdengine/tdengine:$VERSION
|
||||||
entrypoint: "taosadapter"
|
entrypoint: "taosadapter"
|
||||||
|
@ -446,11 +453,6 @@ services:
|
||||||
>> /etc/nginx/nginx.conf;cat /etc/nginx/nginx.conf;
|
>> /etc/nginx/nginx.conf;cat /etc/nginx/nginx.conf;
|
||||||
nginx -g 'daemon off;'",
|
nginx -g 'daemon off;'",
|
||||||
]
|
]
|
||||||
volumes:
|
|
||||||
taosdata-td1:
|
|
||||||
taoslog-td1:
|
|
||||||
taosdata-td2:
|
|
||||||
taoslog-td2:
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## 使用 docker swarm 部署
|
## 使用 docker swarm 部署
|
|
@ -6,7 +6,7 @@ description: 部署 TDengine 集群的多种方式
|
||||||
|
|
||||||
TDengine 支持集群,提供水平扩展的能力。如果需要获得更高的处理能力,只需要多增加节点即可。TDengine 采用虚拟节点技术,将一个节点虚拟化为多个虚拟节点,以实现负载均衡。同时,TDengine可以将多个节点上的虚拟节点组成虚拟节点组,通过多副本机制,以保证供系统的高可用。TDengine的集群功能完全开源。
|
TDengine 支持集群,提供水平扩展的能力。如果需要获得更高的处理能力,只需要多增加节点即可。TDengine 采用虚拟节点技术,将一个节点虚拟化为多个虚拟节点,以实现负载均衡。同时,TDengine可以将多个节点上的虚拟节点组成虚拟节点组,通过多副本机制,以保证供系统的高可用。TDengine的集群功能完全开源。
|
||||||
|
|
||||||
本章节主要介绍如何在主机上人工部署集群,以及如何使用 Kubernetes 和 Helm部署集群。
|
本章节主要介绍如何在主机上人工部署集群,docker部署,以及如何使用 Kubernetes 和 Helm部署集群。
|
||||||
|
|
||||||
```mdx-code-block
|
```mdx-code-block
|
||||||
import DocCardList from '@theme/DocCardList';
|
import DocCardList from '@theme/DocCardList';
|
||||||
|
|
|
@ -82,7 +82,7 @@ INSERT INTO d1001 (ts, current, phase) VALUES ('2021-07-13 14:06:33.196', 10.27,
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
INSERT INTO d1001 VALUES ('2021-07-13 14:06:34.630', 10.2, 219, 0.32) ('2021-07-13 14:06:35.779', 10.15, 217, 0.33)
|
INSERT INTO d1001 VALUES ('2021-07-13 14:06:34.630', 10.2, 219, 0.32) ('2021-07-13 14:06:35.779', 10.15, 217, 0.33)
|
||||||
d1002 (ts, current, phase) VALUES ('2021-07-13 14:06:34.255', 10.27, 0.31);
|
d1002 (ts, current, phase) VALUES ('2021-07-13 14:06:34.255', 10.27, 0.31);
|
||||||
```
|
```
|
||||||
|
|
||||||
## 插入记录时自动建表
|
## 插入记录时自动建表
|
||||||
|
|
|
@ -315,7 +315,7 @@ WHERE (column|tbname) match/MATCH/nmatch/NMATCH _regex_
|
||||||
|
|
||||||
### 使用限制
|
### 使用限制
|
||||||
|
|
||||||
只能针对表名(即 tbname 筛选)、binary/nchar 类型标签值进行正则表达式过滤,不支持普通列的过滤。
|
只能针对表名(即 tbname 筛选)、binary/nchar 类型值进行正则表达式过滤。
|
||||||
|
|
||||||
正则匹配字符串长度不能超过 128 字节。可以通过参数 _maxRegexStringLen_ 设置和调整最大允许的正则匹配字符串,该参数是客户端配置参数,需要重启才能生效。
|
正则匹配字符串长度不能超过 128 字节。可以通过参数 _maxRegexStringLen_ 设置和调整最大允许的正则匹配字符串,该参数是客户端配置参数,需要重启才能生效。
|
||||||
|
|
||||||
|
|
|
@ -1265,3 +1265,140 @@ SELECT SERVER_STATUS();
|
||||||
```
|
```
|
||||||
|
|
||||||
**说明**:检测服务端是否所有 dnode 都在线,如果是则返回成功,否则返回无法建立连接的错误。
|
**说明**:检测服务端是否所有 dnode 都在线,如果是则返回成功,否则返回无法建立连接的错误。
|
||||||
|
|
||||||
|
|
||||||
|
## Geometry 函数
|
||||||
|
|
||||||
|
### Geometry 输入函数:
|
||||||
|
|
||||||
|
#### ST_GeomFromText
|
||||||
|
|
||||||
|
```sql
|
||||||
|
ST_GeomFromText(VARCHAR WKT expr)
|
||||||
|
```
|
||||||
|
|
||||||
|
**功能说明**:根据 Well-Known Text (WKT) 表示从指定的几何值创建几何数据。
|
||||||
|
|
||||||
|
**返回值类型**:GEOMETRY
|
||||||
|
|
||||||
|
**适用数据类型**:VARCHAR
|
||||||
|
|
||||||
|
**适用表类型**:标准表和超表
|
||||||
|
|
||||||
|
**使用说明**:输入可以是 WKT 字符串之一,例如点(POINT)、线串(LINESTRING)、多边形(POLYGON)、多点集(MULTIPOINT)、多线串(MULTILINESTRING)、多多边形(MULTIPOLYGON)、几何集合(GEOMETRYCOLLECTION)。输出是以二进制字符串形式定义的 GEOMETRY 数据类型。
|
||||||
|
|
||||||
|
### Geometry 输出函数:
|
||||||
|
|
||||||
|
#### ST_AsText
|
||||||
|
|
||||||
|
```sql
|
||||||
|
ST_AsText(GEOMETRY geom)
|
||||||
|
```
|
||||||
|
|
||||||
|
**功能说明**:从几何数据中返回指定的 Well-Known Text (WKT) 表示。
|
||||||
|
|
||||||
|
**返回值类型**:VARCHAR
|
||||||
|
|
||||||
|
**适用数据类型**:GEOMETRY
|
||||||
|
|
||||||
|
**适用表类型**:标准表和超表
|
||||||
|
|
||||||
|
**使用说明**:输出可以是 WKT 字符串之一,例如点(POINT)、线串(LINESTRING)、多边形(POLYGON)、多点集(MULTIPOINT)、多线串(MULTILINESTRING)、多多边形(MULTIPOLYGON)、几何集合(GEOMETRYCOLLECTION)。
|
||||||
|
|
||||||
|
### Geometry 关系函数:
|
||||||
|
|
||||||
|
#### ST_Intersects
|
||||||
|
|
||||||
|
```sql
|
||||||
|
ST_Intersects(GEOMETRY geomA, GEOMETRY geomB)
|
||||||
|
```
|
||||||
|
|
||||||
|
##功能说明**:比较两个几何对象,并在它们相交时返回 true。
|
||||||
|
|
||||||
|
**返回值类型**:BOOL
|
||||||
|
|
||||||
|
**适用数据类型**:GEOMETRY,GEOMETRY
|
||||||
|
|
||||||
|
**适用表类型**:标准表和超表
|
||||||
|
|
||||||
|
**使用说明**:如果两个几何对象有任何一个共享点,则它们相交。
|
||||||
|
|
||||||
|
#### ST_Equals
|
||||||
|
|
||||||
|
```sql
|
||||||
|
ST_Equals(GEOMETRY geomA, GEOMETRY geomB)
|
||||||
|
```
|
||||||
|
|
||||||
|
**功能说明**:如果给定的几何对象是"空间相等"的,则返回 TRUE。
|
||||||
|
|
||||||
|
**返回值类型**:BOOL
|
||||||
|
|
||||||
|
**适用数据类型**:GEOMETRY,GEOMETRY
|
||||||
|
|
||||||
|
**适用表类型**:标准表和超表
|
||||||
|
|
||||||
|
**使用说明**:"空间相等"意味着 ST_Contains(A,B) = true 和 ST_Contains(B,A) = true,并且点的顺序可能不同,但表示相同的几何结构。
|
||||||
|
|
||||||
|
#### ST_Touches
|
||||||
|
|
||||||
|
```sql
|
||||||
|
ST_Touches(GEOMETRY geomA, GEOMETRY geomB)
|
||||||
|
```
|
||||||
|
|
||||||
|
**功能说明**:如果 A 和 B 相交,但它们的内部不相交,则返回 TRUE。
|
||||||
|
|
||||||
|
**返回值类型**:BOOL
|
||||||
|
|
||||||
|
**适用数据类型**:GEOMETRY,GEOMETRY
|
||||||
|
|
||||||
|
**适用表类型**:标准表和超表
|
||||||
|
|
||||||
|
**使用说明**:A 和 B 至少有一个公共点,并且这些公共点位于至少一个边界中。对于点/点输入,关系始终为 FALSE,因为点没有边界。
|
||||||
|
|
||||||
|
#### ST_Covers
|
||||||
|
|
||||||
|
```sql
|
||||||
|
ST_Covers(GEOMETRY geomA, GEOMETRY geomB)
|
||||||
|
```
|
||||||
|
|
||||||
|
**功能说明**:如果 B 中的每个点都位于几何形状 A 内部(与内部或边界相交),则返回 TRUE。
|
||||||
|
|
||||||
|
**返回值类型**:BOOL
|
||||||
|
|
||||||
|
**适用数据类型**:GEOMETRY,GEOMETRY
|
||||||
|
|
||||||
|
**适用表类型**:标准表和超表
|
||||||
|
|
||||||
|
**使用说明**:A 包含 B 意味着 B 中的没有点位于 A 的外部(在外部)。
|
||||||
|
|
||||||
|
#### ST_Contains
|
||||||
|
|
||||||
|
```sql
|
||||||
|
ST_Contains(GEOMETRY geomA, GEOMETRY geomB)
|
||||||
|
```
|
||||||
|
|
||||||
|
**功能说明**:如果 A 包含 B,描述:如果几何形状 A 包含几何形状 B,则返回 TRUE。
|
||||||
|
|
||||||
|
**返回值类型**:BOOL
|
||||||
|
|
||||||
|
**适用数据类型**:GEOMETRY,GEOMETRY
|
||||||
|
|
||||||
|
**适用表类型**:标准表和超表
|
||||||
|
|
||||||
|
**使用说明**:A 包含 B 当且仅当 B 的所有点位于 A 的内部(即位于内部或边界上)(或等效地,B 的没有点位于 A 的外部),并且 A 和 B 的内部至少有一个公共点。
|
||||||
|
|
||||||
|
#### ST_ContainsProperly
|
||||||
|
|
||||||
|
```sql
|
||||||
|
ST_ContainsProperly(GEOMETRY geomA, GEOMETRY geomB)
|
||||||
|
```
|
||||||
|
|
||||||
|
**功能说明**:如果 B 的每个点都位于 A 内部,则返回 TRUE。
|
||||||
|
|
||||||
|
**返回值类型**:BOOL
|
||||||
|
|
||||||
|
**适用数据类型**:GEOMETRY,GEOMETRY
|
||||||
|
|
||||||
|
**适用表类型**:标准表和超表
|
||||||
|
|
||||||
|
**使用说明**:B 的没有点位于 A 的边界或外部。
|
||||||
|
|
|
@ -178,6 +178,7 @@ description: TDengine 保留关键字的详细列表
|
||||||
|
|
||||||
- MATCH
|
- MATCH
|
||||||
- MAX_DELAY
|
- MAX_DELAY
|
||||||
|
- MAX_SPEED
|
||||||
- MAXROWS
|
- MAXROWS
|
||||||
- MERGE
|
- MERGE
|
||||||
- META
|
- META
|
||||||
|
|
|
@ -7,12 +7,13 @@ description: "TDengine 服务端、客户端和连接器支持的平台列表"
|
||||||
|
|
||||||
| | **Windows server 2016/2019** | **Windows 10/11** | **CentOS 7.9/8** | **Ubuntu 18 以上** | **统信 UOS** | **银河/中标麒麟** | **凝思 V60/V80** | **macOS** |
|
| | **Windows server 2016/2019** | **Windows 10/11** | **CentOS 7.9/8** | **Ubuntu 18 以上** | **统信 UOS** | **银河/中标麒麟** | **凝思 V60/V80** | **macOS** |
|
||||||
| ------------ | ---------------------------- | ----------------- | ---------------- | ---------------- | ------------ | ----------------- | ---------------- | --------- |
|
| ------------ | ---------------------------- | ----------------- | ---------------- | ---------------- | ------------ | ----------------- | ---------------- | --------- |
|
||||||
| X64 | ● | ● | ● | ● | ● | ● | ● | ● |
|
| X64 | ●/E | ●/E | ● | ● | ●/E | ●/E | ●/E | ● |
|
||||||
| 树莓派 ARM64 | | | ● | | | | | |
|
| 树莓派 ARM64 | | | ● | | | | | |
|
||||||
| 华为云 ARM64 | | | | ● | | | | |
|
| 华为云 ARM64 | | | | ● | | | | |
|
||||||
| M1 | | | | | | | | ● |
|
| M1 | | | | | | | | ● |
|
||||||
|
|
||||||
注: ● 表示经过官方测试验证, ○ 表示非官方测试验证。
|
注:1) ● 表示经过官方测试验证, ○ 表示非官方测试验证,E 表示仅企业版支持。
|
||||||
|
2) 社区版仅支持主流操作系统的较新版本,包括 Ubuntu 18+/CentOS 7+/RetHat/Debian/CoreOS/FreeBSD/OpenSUSE/SUSE Linux/Fedora/macOS 等。如果有其他操作系统及版本的需求,请联系企业版支持。
|
||||||
|
|
||||||
## TDengine 客户端和连接器支持的平台列表
|
## TDengine 客户端和连接器支持的平台列表
|
||||||
|
|
||||||
|
|
|
@ -1 +0,0 @@
|
||||||
label: TDengine Docker 镜像
|
|
|
@ -95,30 +95,11 @@ taos -C
|
||||||
### maxShellConns
|
### maxShellConns
|
||||||
|
|
||||||
| 属性 | 说明 |
|
| 属性 | 说明 |
|
||||||
| --------| ----------------------- |
|
| -------- | ----------------------- |
|
||||||
| 适用范围 | 仅服务端适用 |
|
| 适用范围 | 仅服务端适用 |
|
||||||
| 含义 | 一个 dnode 容许的连接数 |
|
| 含义 | 一个 dnode 容许的连接数 |
|
||||||
| 取值范围 | 10-50000000 |
|
| 取值范围 | 10-50000000 |
|
||||||
| 缺省值 | 5000 |
|
| 缺省值 | 5000 |
|
||||||
|
|
||||||
### numOfRpcSessions
|
|
||||||
|
|
||||||
| 属性 | 说明 |
|
|
||||||
| --------| ---------------------- |
|
|
||||||
| 适用范围 | 客户端和服务端都适用 |
|
|
||||||
| 含义 | 一个客户端能创建的最大连接数|
|
|
||||||
| 取值范围 | 100-100000 |
|
|
||||||
| 缺省值 | 10000 |
|
|
||||||
|
|
||||||
### timeToGetAvailableConn
|
|
||||||
|
|
||||||
| 属性 | 说明 |
|
|
||||||
| -------- | --------------------|
|
|
||||||
| 适用范围 | 客户端和服务端都适用 |
|
|
||||||
| 含义 |获得可用连接的最长等待时间|
|
|
||||||
| 取值范围 | 10-50000000(单位为毫秒)|
|
|
||||||
| 缺省值 | 500000 |
|
|
||||||
|
|
||||||
|
|
||||||
### numOfRpcSessions
|
### numOfRpcSessions
|
||||||
|
|
||||||
|
@ -127,7 +108,7 @@ taos -C
|
||||||
| 适用范围 | 客户端和服务端都适用 |
|
| 适用范围 | 客户端和服务端都适用 |
|
||||||
| 含义 | 一个客户端能创建的最大连接数 |
|
| 含义 | 一个客户端能创建的最大连接数 |
|
||||||
| 取值范围 | 100-100000 |
|
| 取值范围 | 100-100000 |
|
||||||
| 缺省值 | 10000 |
|
| 缺省值 | 30000 |
|
||||||
|
|
||||||
### timeToGetAvailableConn
|
### timeToGetAvailableConn
|
||||||
|
|
||||||
|
@ -392,12 +373,12 @@ charset 的有效值是 UTF-8。
|
||||||
|
|
||||||
### metaCacheMaxSize
|
### metaCacheMaxSize
|
||||||
|
|
||||||
| 属性 | 说明 |
|
| 属性 | 说明 |
|
||||||
| -------- | ---------------------------------------------- |
|
| -------- | ------------------------------------ |
|
||||||
| 适用范围 | 仅客户端适用 |
|
| 适用范围 | 仅客户端适用 |
|
||||||
| 含义 | 指定单个客户端元数据缓存大小的最大值 |
|
| 含义 | 指定单个客户端元数据缓存大小的最大值 |
|
||||||
| 单位 | MB |
|
| 单位 | MB |
|
||||||
| 缺省值 | -1 (无限制) |
|
| 缺省值 | -1 (无限制) |
|
||||||
|
|
||||||
## 集群相关
|
## 集群相关
|
||||||
|
|
||||||
|
@ -479,13 +460,13 @@ charset 的有效值是 UTF-8。
|
||||||
|
|
||||||
### slowLogScope
|
### slowLogScope
|
||||||
|
|
||||||
| 属性 | 说明 |
|
| 属性 | 说明 |
|
||||||
| -------- | --------------------------------------------------------------|
|
| -------- | ---------------------------------------------------------- |
|
||||||
| 适用范围 | 仅客户端适用 |
|
| 适用范围 | 仅客户端适用 |
|
||||||
| 含义 | 指定启动记录哪些类型的慢查询 |
|
| 含义 | 指定启动记录哪些类型的慢查询 |
|
||||||
| 可选值 | ALL, QUERY, INSERT, OTHERS, NONE |
|
| 可选值 | ALL, QUERY, INSERT, OTHERS, NONE |
|
||||||
| 缺省值 | ALL |
|
| 缺省值 | ALL |
|
||||||
| 补充说明 | 默认记录所有类型的慢查询,可通过配置只记录某一类型的慢查询 |
|
| 补充说明 | 默认记录所有类型的慢查询,可通过配置只记录某一类型的慢查询 |
|
||||||
|
|
||||||
### debugFlag
|
### debugFlag
|
||||||
|
|
||||||
|
@ -685,16 +666,16 @@ charset 的有效值是 UTF-8。
|
||||||
| 适用范围 | 仅客户端适用 |
|
| 适用范围 | 仅客户端适用 |
|
||||||
| 含义 | schemaless 列数据是否顺序一致,从3.0.3.0开始,该配置废弃 |
|
| 含义 | schemaless 列数据是否顺序一致,从3.0.3.0开始,该配置废弃 |
|
||||||
| 值域 | 0:不一致;1: 一致 |
|
| 值域 | 0:不一致;1: 一致 |
|
||||||
| 缺省值 | 0
|
| 缺省值 | 0 |
|
||||||
|
|
||||||
### smlTsDefaultName
|
### smlTsDefaultName
|
||||||
|
|
||||||
| 属性 | 说明 |
|
| 属性 | 说明 |
|
||||||
| -------- | -------------------------------------------------------- |
|
| -------- | -------------------------------------------- |
|
||||||
| 适用范围 | 仅客户端适用 |
|
| 适用范围 | 仅客户端适用 |
|
||||||
| 含义 | schemaless自动建表的时间列名字通过该配置设置 |
|
| 含义 | schemaless自动建表的时间列名字通过该配置设置 |
|
||||||
| 类型 | 字符串 |
|
| 类型 | 字符串 |
|
||||||
| 缺省值 | _ts |
|
| 缺省值 | _ts |
|
||||||
|
|
||||||
## 其他
|
## 其他
|
||||||
|
|
||||||
|
@ -728,31 +709,31 @@ charset 的有效值是 UTF-8。
|
||||||
|
|
||||||
### ttlChangeOnWrite
|
### ttlChangeOnWrite
|
||||||
|
|
||||||
| 属性 | 说明 |
|
| 属性 | 说明 |
|
||||||
| -------- | ------------------ |
|
| -------- | ------------------------------------ |
|
||||||
| 适用范围 | 仅服务端适用 |
|
| 适用范围 | 仅服务端适用 |
|
||||||
| 含义 | ttl 到期时间是否伴随表的修改操作改变 |
|
| 含义 | ttl 到期时间是否伴随表的修改操作改变 |
|
||||||
| 取值范围 | 0: 不改变;1:改变 |
|
| 取值范围 | 0: 不改变;1:改变 |
|
||||||
| 缺省值 | 0 |
|
| 缺省值 | 0 |
|
||||||
|
|
||||||
### keepTimeOffset
|
### keepTimeOffset
|
||||||
|
|
||||||
| 属性 | 说明 |
|
| 属性 | 说明 |
|
||||||
| -------- | ------------------ |
|
| -------- | -------------- |
|
||||||
| 适用范围 | 仅服务端适用 |
|
| 适用范围 | 仅服务端适用 |
|
||||||
| 含义 | 迁移操作的延时 |
|
| 含义 | 迁移操作的延时 |
|
||||||
| 单位 | 小时 |
|
| 单位 | 小时 |
|
||||||
| 取值范围 | 0-23 |
|
| 取值范围 | 0-23 |
|
||||||
| 缺省值 | 0 |
|
| 缺省值 | 0 |
|
||||||
|
|
||||||
### tmqMaxTopicNum
|
### tmqMaxTopicNum
|
||||||
|
|
||||||
| 属性 | 说明 |
|
| 属性 | 说明 |
|
||||||
| -------- | ------------------ |
|
| -------- | --------------------------- |
|
||||||
| 适用范围 | 仅服务端适用 |
|
| 适用范围 | 仅服务端适用 |
|
||||||
| 含义 | 订阅最多可建立的 topic 数量 |
|
| 含义 | 订阅最多可建立的 topic 数量 |
|
||||||
| 取值范围 | 1-10000|
|
| 取值范围 | 1-10000 |
|
||||||
| 缺省值 | 20 |
|
| 缺省值 | 20 |
|
||||||
|
|
||||||
## 压缩参数
|
## 压缩参数
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,41 @@
|
||||||
|
---
|
||||||
|
sidebar_label: qStudio
|
||||||
|
title: qStudio
|
||||||
|
description: 使用 qStudio 存取 TDengine 数据的详细指南
|
||||||
|
---
|
||||||
|
|
||||||
|
qStudio 是一款免费的多平台 SQL 数据分析工具,可以轻松浏览数据库中的表、变量、函数和配置设置。最新版本 qStudio 内嵌支持 TDengine。
|
||||||
|
|
||||||
|
## 前置条件
|
||||||
|
|
||||||
|
使用 qStudio 连接 TDengine 需要以下几方面的准备工作。
|
||||||
|
|
||||||
|
- 安装 qStudio。qStudio 支持主流操作系统包括 Windows、macOS 和 Linux。请注意[下载](https://www.timestored.com/qstudio/download/)正确平台的安装包。
|
||||||
|
- 安装 TDengine 实例,请确认 TDengine 正常运行,并且 taosAdapter 已经安装并正常运行,具体细节请参考 [taosAdapter 的使用手册](/reference/taosadapter)。
|
||||||
|
|
||||||
|
## 使用 qStudio 连接 TDengine
|
||||||
|
|
||||||
|
1. 启动 qStudio 应用,从菜单项选择“Server” 和 “Add Server...”,然后在 Server Type 下拉框中选择 TDengine。
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
2. 配置 TDengine 连接,填入主机地址、端口号、用户名和密码。如果 TDengine 部署在本机,可以只填用户名和密码,默认用户名为 root,默认密码为 taosdata。点击“Test”可以对连接是否可用进行测试。如果本机没有安装 TDengine Java
|
||||||
|
连接器,qStudio 会提示下载安装。
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
3. 连接成功将显示如下图所示。如果显示连接失败,请检查 TDengine 服务和 taosAdapter 是否正确运行,主机地址、端口号、用户名和密码是否正确。
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
4. 使用 qStudio 选择数据库和表可以浏览 TDengine 服务的数据。
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
5. 也可以通过执行 SQL 命令的方式对 TDengine 数据进行操作。
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
6. qStudio 支持根据数据绘制图表等功能,请参考 [qStudio 的帮助文档](https://www.timestored.com/qstudio/help)
|
||||||
|
|
||||||
|

|
After Width: | Height: | Size: 94 KiB |
After Width: | Height: | Size: 148 KiB |
After Width: | Height: | Size: 34 KiB |
After Width: | Height: | Size: 93 KiB |
After Width: | Height: | Size: 39 KiB |
After Width: | Height: | Size: 78 KiB |
|
@ -20,18 +20,12 @@ mvn clean compile exec:java -Dexec.mainClass="com.taosdata.example.JdbcDemo" -De
|
||||||
```
|
```
|
||||||
|
|
||||||
## Compile the Demo Code and Run It
|
## Compile the Demo Code and Run It
|
||||||
To compile taos-jdbcdriver, go to the source directory ``TDengine/src/connector/jdbc`` and execute
|
|
||||||
```
|
```
|
||||||
mvn clean package -Dmaven.test.skip=true
|
mvn clean package -Dmaven.test.skip=true
|
||||||
```
|
```
|
||||||
|
|
||||||
To compile the demo project, go to the source directory ``TDengine/tests/examples/JDBC/JDBCDemo`` and execute
|
To run JDBCDemo.jar, execute
|
||||||
```
|
```
|
||||||
mvn clean package assembly:single
|
java -jar target/JDBCDemo-SNAPSHOT-jar-with-dependencies.jar -host [HOSTNAME]
|
||||||
```
|
|
||||||
|
|
||||||
To run JDBCDemo.jar, go to ``TDengine/tests/examples/JDBC/JDBCDemo`` and execute
|
|
||||||
```
|
|
||||||
java -Djava.ext.dirs=../../../../src/connector/jdbc/target:$JAVA_HOME/jre/lib/ext -jar target/JDBCDemo-SNAPSHOT-jar-with-dependencies.jar -host [HOSTNAME]
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -16,8 +16,6 @@ public class JdbcRestfulDemo {
|
||||||
|
|
||||||
Properties properties = new Properties();
|
Properties properties = new Properties();
|
||||||
properties.setProperty("charset", "UTF-8");
|
properties.setProperty("charset", "UTF-8");
|
||||||
properties.setProperty("locale", "en_US.UTF-8");
|
|
||||||
properties.setProperty("timezone", "UTC-8");
|
|
||||||
|
|
||||||
Connection conn = DriverManager.getConnection(url, properties);
|
Connection conn = DriverManager.getConnection(url, properties);
|
||||||
Statement stmt = conn.createStatement();
|
Statement stmt = conn.createStatement();
|
||||||
|
|
|
@ -58,6 +58,7 @@ extern int32_t tsTagFilterResCacheSize;
|
||||||
extern int32_t tsNumOfRpcThreads;
|
extern int32_t tsNumOfRpcThreads;
|
||||||
extern int32_t tsNumOfRpcSessions;
|
extern int32_t tsNumOfRpcSessions;
|
||||||
extern int32_t tsTimeToGetAvailableConn;
|
extern int32_t tsTimeToGetAvailableConn;
|
||||||
|
extern int32_t tsKeepAliveIdle;
|
||||||
extern int32_t tsNumOfCommitThreads;
|
extern int32_t tsNumOfCommitThreads;
|
||||||
extern int32_t tsNumOfTaskQueueThreads;
|
extern int32_t tsNumOfTaskQueueThreads;
|
||||||
extern int32_t tsNumOfMnodeQueryThreads;
|
extern int32_t tsNumOfMnodeQueryThreads;
|
||||||
|
|
|
@ -2767,6 +2767,7 @@ typedef struct {
|
||||||
typedef struct {
|
typedef struct {
|
||||||
SMsgHead head;
|
SMsgHead head;
|
||||||
int64_t leftForVer;
|
int64_t leftForVer;
|
||||||
|
int64_t streamId;
|
||||||
int32_t taskId;
|
int32_t taskId;
|
||||||
} SVDropStreamTaskReq;
|
} SVDropStreamTaskReq;
|
||||||
|
|
||||||
|
@ -2958,6 +2959,7 @@ int32_t tDecodeMqVgOffset(SDecoder* pDecoder, SMqVgOffset* pOffset);
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
SMsgHead head;
|
SMsgHead head;
|
||||||
|
int64_t streamId;
|
||||||
int32_t taskId;
|
int32_t taskId;
|
||||||
} SVPauseStreamTaskReq;
|
} SVPauseStreamTaskReq;
|
||||||
|
|
||||||
|
@ -2976,6 +2978,7 @@ int32_t tDeserializeSMPauseStreamReq(void* buf, int32_t bufLen, SMPauseStreamReq
|
||||||
typedef struct {
|
typedef struct {
|
||||||
SMsgHead head;
|
SMsgHead head;
|
||||||
int32_t taskId;
|
int32_t taskId;
|
||||||
|
int64_t streamId;
|
||||||
int8_t igUntreated;
|
int8_t igUntreated;
|
||||||
} SVResumeStreamTaskReq;
|
} SVResumeStreamTaskReq;
|
||||||
|
|
||||||
|
|
|
@ -74,7 +74,7 @@ typedef enum {
|
||||||
* @param vgId
|
* @param vgId
|
||||||
* @return
|
* @return
|
||||||
*/
|
*/
|
||||||
qTaskInfo_t qCreateStreamExecTaskInfo(void* msg, SReadHandle* readers, int32_t vgId);
|
qTaskInfo_t qCreateStreamExecTaskInfo(void* msg, SReadHandle* readers, int32_t vgId, int32_t taskId);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create the exec task for queue mode
|
* Create the exec task for queue mode
|
||||||
|
@ -95,8 +95,6 @@ int32_t qGetTableList(int64_t suid, void* pVnode, void* node, SArray **tableList
|
||||||
*/
|
*/
|
||||||
void qSetTaskId(qTaskInfo_t tinfo, uint64_t taskId, uint64_t queryId);
|
void qSetTaskId(qTaskInfo_t tinfo, uint64_t taskId, uint64_t queryId);
|
||||||
|
|
||||||
//void qSetTaskCode(qTaskInfo_t tinfo, int32_t code);
|
|
||||||
|
|
||||||
int32_t qSetStreamOpOpen(qTaskInfo_t tinfo);
|
int32_t qSetStreamOpOpen(qTaskInfo_t tinfo);
|
||||||
|
|
||||||
// todo refactor
|
// todo refactor
|
||||||
|
|
|
@ -30,6 +30,7 @@ extern "C" {
|
||||||
|
|
||||||
typedef struct SStreamTask SStreamTask;
|
typedef struct SStreamTask SStreamTask;
|
||||||
|
|
||||||
|
#define SSTREAM_TASK_VER 1
|
||||||
enum {
|
enum {
|
||||||
STREAM_STATUS__NORMAL = 0,
|
STREAM_STATUS__NORMAL = 0,
|
||||||
STREAM_STATUS__STOP,
|
STREAM_STATUS__STOP,
|
||||||
|
@ -266,13 +267,13 @@ typedef struct SCheckpointInfo {
|
||||||
} SCheckpointInfo;
|
} SCheckpointInfo;
|
||||||
|
|
||||||
typedef struct SStreamStatus {
|
typedef struct SStreamStatus {
|
||||||
int8_t taskStatus;
|
int8_t taskStatus;
|
||||||
int8_t downstreamReady; // downstream tasks are all ready now, if this flag is set
|
int8_t downstreamReady; // downstream tasks are all ready now, if this flag is set
|
||||||
int8_t schedStatus;
|
int8_t schedStatus;
|
||||||
int8_t keepTaskStatus;
|
int8_t keepTaskStatus;
|
||||||
bool transferState;
|
bool transferState;
|
||||||
int8_t timerActive; // timer is active
|
int8_t timerActive; // timer is active
|
||||||
int8_t pauseAllowed; // allowed task status to be set to be paused
|
int8_t pauseAllowed; // allowed task status to be set to be paused
|
||||||
} SStreamStatus;
|
} SStreamStatus;
|
||||||
|
|
||||||
typedef struct SHistDataRange {
|
typedef struct SHistDataRange {
|
||||||
|
@ -309,6 +310,7 @@ typedef struct {
|
||||||
} STaskTimestamp;
|
} STaskTimestamp;
|
||||||
|
|
||||||
struct SStreamTask {
|
struct SStreamTask {
|
||||||
|
int64_t ver;
|
||||||
SStreamId id;
|
SStreamId id;
|
||||||
SSTaskBasicInfo info;
|
SSTaskBasicInfo info;
|
||||||
STaskOutputInfo outputInfo;
|
STaskOutputInfo outputInfo;
|
||||||
|
@ -589,10 +591,10 @@ bool streamTaskShouldPause(const SStreamStatus* pStatus);
|
||||||
bool streamTaskIsIdle(const SStreamTask* pTask);
|
bool streamTaskIsIdle(const SStreamTask* pTask);
|
||||||
int32_t streamTaskEndScanWAL(SStreamTask* pTask);
|
int32_t streamTaskEndScanWAL(SStreamTask* pTask);
|
||||||
|
|
||||||
SStreamChildEpInfo * streamTaskGetUpstreamTaskEpInfo(SStreamTask* pTask, int32_t taskId);
|
SStreamChildEpInfo* streamTaskGetUpstreamTaskEpInfo(SStreamTask* pTask, int32_t taskId);
|
||||||
int32_t streamScanExec(SStreamTask* pTask, int32_t batchSize);
|
int32_t streamScanExec(SStreamTask* pTask, int32_t batchSize);
|
||||||
|
|
||||||
char* createStreamTaskIdStr(int64_t streamId, int32_t taskId);
|
char* createStreamTaskIdStr(int64_t streamId, int32_t taskId);
|
||||||
|
|
||||||
// recover and fill history
|
// recover and fill history
|
||||||
void streamTaskCheckDownstreamTasks(SStreamTask* pTask);
|
void streamTaskCheckDownstreamTasks(SStreamTask* pTask);
|
||||||
|
@ -628,7 +630,8 @@ int32_t streamDispatchTransferStateMsg(SStreamTask* pTask);
|
||||||
|
|
||||||
// agg level
|
// agg level
|
||||||
int32_t streamTaskScanHistoryPrepare(SStreamTask* pTask);
|
int32_t streamTaskScanHistoryPrepare(SStreamTask* pTask);
|
||||||
int32_t streamProcessScanHistoryFinishReq(SStreamTask* pTask, SStreamScanHistoryFinishReq *pReq, SRpcHandleInfo* pRpcInfo);
|
int32_t streamProcessScanHistoryFinishReq(SStreamTask* pTask, SStreamScanHistoryFinishReq* pReq,
|
||||||
|
SRpcHandleInfo* pRpcInfo);
|
||||||
int32_t streamProcessScanHistoryFinishRsp(SStreamTask* pTask);
|
int32_t streamProcessScanHistoryFinishRsp(SStreamTask* pTask);
|
||||||
|
|
||||||
// stream task meta
|
// stream task meta
|
||||||
|
@ -641,9 +644,9 @@ void streamMetaClose(SStreamMeta* streamMeta);
|
||||||
int32_t streamMetaSaveTask(SStreamMeta* pMeta, SStreamTask* pTask);
|
int32_t streamMetaSaveTask(SStreamMeta* pMeta, SStreamTask* pTask);
|
||||||
int32_t streamMetaRemoveTask(SStreamMeta* pMeta, int32_t taskId);
|
int32_t streamMetaRemoveTask(SStreamMeta* pMeta, int32_t taskId);
|
||||||
int32_t streamMetaRegisterTask(SStreamMeta* pMeta, int64_t ver, SStreamTask* pTask, bool* pAdded);
|
int32_t streamMetaRegisterTask(SStreamMeta* pMeta, int64_t ver, SStreamTask* pTask, bool* pAdded);
|
||||||
int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int32_t taskId);
|
int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId);
|
||||||
int32_t streamMetaGetNumOfTasks(SStreamMeta* pMeta); // todo remove it
|
int32_t streamMetaGetNumOfTasks(SStreamMeta* pMeta); // todo remove it
|
||||||
SStreamTask* streamMetaAcquireTask(SStreamMeta* pMeta, int32_t taskId);
|
SStreamTask* streamMetaAcquireTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId);
|
||||||
void streamMetaReleaseTask(SStreamMeta* pMeta, SStreamTask* pTask);
|
void streamMetaReleaseTask(SStreamMeta* pMeta, SStreamTask* pTask);
|
||||||
|
|
||||||
int32_t streamMetaBegin(SStreamMeta* pMeta);
|
int32_t streamMetaBegin(SStreamMeta* pMeta);
|
||||||
|
@ -659,7 +662,6 @@ int32_t streamTaskReleaseState(SStreamTask* pTask);
|
||||||
int32_t streamTaskReloadState(SStreamTask* pTask);
|
int32_t streamTaskReloadState(SStreamTask* pTask);
|
||||||
int32_t streamAlignTransferState(SStreamTask* pTask);
|
int32_t streamAlignTransferState(SStreamTask* pTask);
|
||||||
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -300,6 +300,25 @@ void tfsClosedir(STfsDir *pDir);
|
||||||
*/
|
*/
|
||||||
int32_t tfsGetMonitorInfo(STfs *pTfs, SMonDiskInfo *pInfo);
|
int32_t tfsGetMonitorInfo(STfs *pTfs, SMonDiskInfo *pInfo);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Check if disk space available at level
|
||||||
|
*
|
||||||
|
* @param pTfs The fs object.
|
||||||
|
* #param level the level
|
||||||
|
* @return bool
|
||||||
|
*/
|
||||||
|
bool tfsDiskSpaceAvailable(STfs *pTfs, int32_t level);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Check if disk space sufficient at disk of level
|
||||||
|
*
|
||||||
|
* @param pTfs The fs object.
|
||||||
|
* @param level the level
|
||||||
|
* @param disk the disk
|
||||||
|
* @return bool
|
||||||
|
*/
|
||||||
|
bool tfsDiskSpaceSufficient(STfs *pTfs, int32_t level, int32_t disk);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -89,7 +89,7 @@ typedef struct SRpcInit {
|
||||||
int32_t retryMinInterval; // retry init interval
|
int32_t retryMinInterval; // retry init interval
|
||||||
int32_t retryStepFactor; // retry interval factor
|
int32_t retryStepFactor; // retry interval factor
|
||||||
int32_t retryMaxInterval; // retry max interval
|
int32_t retryMaxInterval; // retry max interval
|
||||||
int64_t retryMaxTimouet;
|
int64_t retryMaxTimeout;
|
||||||
|
|
||||||
int32_t failFastThreshold;
|
int32_t failFastThreshold;
|
||||||
int32_t failFastInterval;
|
int32_t failFastInterval;
|
||||||
|
|
|
@ -95,6 +95,8 @@ struct tm *taosLocalTime(const time_t *timep, struct tm *result, char *buf);
|
||||||
struct tm *taosLocalTimeNolock(struct tm *result, const time_t *timep, int dst);
|
struct tm *taosLocalTimeNolock(struct tm *result, const time_t *timep, int dst);
|
||||||
time_t taosTime(time_t *t);
|
time_t taosTime(time_t *t);
|
||||||
time_t taosMktime(struct tm *timep);
|
time_t taosMktime(struct tm *timep);
|
||||||
|
int64_t user_mktime64(const uint32_t year, const uint32_t mon, const uint32_t day, const uint32_t hour,
|
||||||
|
const uint32_t min, const uint32_t sec, int64_t time_zone);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
|
|
|
@ -613,6 +613,11 @@ function install_examples() {
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function install_web() {
|
||||||
|
if [ -d "${script_dir}/share" ]; then
|
||||||
|
${csudo}cp -rf ${script_dir}/share/* ${install_main_dir}/share > /dev/null 2>&1 ||:
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
function clean_service_on_sysvinit() {
|
function clean_service_on_sysvinit() {
|
||||||
if ps aux | grep -v grep | grep ${serverName2} &>/dev/null; then
|
if ps aux | grep -v grep | grep ${serverName2} &>/dev/null; then
|
||||||
|
@ -888,6 +893,7 @@ function updateProduct() {
|
||||||
fi
|
fi
|
||||||
|
|
||||||
install_examples
|
install_examples
|
||||||
|
install_web
|
||||||
if [ -z $1 ]; then
|
if [ -z $1 ]; then
|
||||||
install_bin
|
install_bin
|
||||||
install_service
|
install_service
|
||||||
|
@ -898,29 +904,29 @@ function updateProduct() {
|
||||||
openresty_work=false
|
openresty_work=false
|
||||||
|
|
||||||
echo
|
echo
|
||||||
echo -e "${GREEN_DARK}To configure ${productName2} ${NC}: edit ${cfg_install_dir}/${configFile2}"
|
echo -e "${GREEN_DARK}To configure ${productName2} ${NC}\t: edit ${cfg_install_dir}/${configFile2}"
|
||||||
[ -f ${configDir}/${clientName2}adapter.toml ] && [ -f ${installDir}/bin/${clientName2}adapter ] && \
|
[ -f ${configDir}/${clientName2}adapter.toml ] && [ -f ${installDir}/bin/${clientName2}adapter ] && \
|
||||||
echo -e "${GREEN_DARK}To configure ${clientName2}Adapter ${NC}: edit ${configDir}/${clientName2}adapter.toml"
|
echo -e "${GREEN_DARK}To configure ${clientName2}Adapter ${NC}\t: edit ${configDir}/${clientName2}adapter.toml"
|
||||||
if ((${service_mod} == 0)); then
|
if ((${service_mod} == 0)); then
|
||||||
echo -e "${GREEN_DARK}To start ${productName2} ${NC}: ${csudo}systemctl start ${serverName2}${NC}"
|
echo -e "${GREEN_DARK}To start ${productName2} ${NC}\t: ${csudo}systemctl start ${serverName2}${NC}"
|
||||||
[ -f ${service_config_dir}/${clientName2}adapter.service ] && [ -f ${installDir}/bin/${clientName2}adapter ] && \
|
[ -f ${service_config_dir}/${clientName2}adapter.service ] && [ -f ${installDir}/bin/${clientName2}adapter ] && \
|
||||||
echo -e "${GREEN_DARK}To start ${clientName2}Adapter ${NC}: ${csudo}systemctl start ${clientName2}adapter ${NC}"
|
echo -e "${GREEN_DARK}To start ${clientName2}Adapter ${NC}\t: ${csudo}systemctl start ${clientName2}adapter ${NC}"
|
||||||
elif ((${service_mod} == 1)); then
|
elif ((${service_mod} == 1)); then
|
||||||
echo -e "${GREEN_DARK}To start ${productName2} ${NC}: ${csudo}service ${serverName2} start${NC}"
|
echo -e "${GREEN_DARK}To start ${productName2} ${NC}\t: ${csudo}service ${serverName2} start${NC}"
|
||||||
[ -f ${service_config_dir}/${clientName2}adapter.service ] && [ -f ${installDir}/bin/${clientName2}adapter ] && \
|
[ -f ${service_config_dir}/${clientName2}adapter.service ] && [ -f ${installDir}/bin/${clientName2}adapter ] && \
|
||||||
echo -e "${GREEN_DARK}To start ${clientName2}Adapter ${NC}: ${csudo}service ${clientName2}adapter start${NC}"
|
echo -e "${GREEN_DARK}To start ${clientName2}Adapter ${NC}\t: ${csudo}service ${clientName2}adapter start${NC}"
|
||||||
else
|
else
|
||||||
echo -e "${GREEN_DARK}To start ${productName2} ${NC}: ./${serverName2}${NC}"
|
echo -e "${GREEN_DARK}To start ${productName2} ${NC}\t: ./${serverName2}${NC}"
|
||||||
[ -f ${installDir}/bin/${clientName2}adapter ] && \
|
[ -f ${installDir}/bin/${clientName2}adapter ] && \
|
||||||
echo -e "${GREEN_DARK}To start ${clientName2}Adapter ${NC}: ${clientName2}adapter &${NC}"
|
echo -e "${GREEN_DARK}To start ${clientName2}Adapter ${NC}\t: ${clientName2}adapter ${NC}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo -e "${GREEN_DARK}To enable ${clientName2}keeper ${NC}: sudo systemctl enable ${clientName2}keeper &${NC}"
|
echo -e "${GREEN_DARK}To enable ${clientName2}keeper ${NC}\t: sudo systemctl enable ${clientName2}keeper ${NC}"
|
||||||
|
|
||||||
if [ ${openresty_work} = 'true' ]; then
|
if [ ${openresty_work} = 'true' ]; then
|
||||||
echo -e "${GREEN_DARK}To access ${productName2} ${NC}: use ${GREEN_UNDERLINE}${clientName2} -h $serverFqdn${NC} in shell OR from ${GREEN_UNDERLINE}http://127.0.0.1:${web_port}${NC}"
|
echo -e "${GREEN_DARK}To access ${productName2} ${NC}\t: use ${GREEN_UNDERLINE}${clientName2} -h $serverFqdn${NC} in shell OR from ${GREEN_UNDERLINE}http://127.0.0.1:${web_port}${NC}"
|
||||||
else
|
else
|
||||||
echo -e "${GREEN_DARK}To access ${productName2} ${NC}: use ${GREEN_UNDERLINE}${clientName2} -h $serverFqdn${NC} in shell${NC}"
|
echo -e "${GREEN_DARK}To access ${productName2} ${NC}\t: use ${GREEN_UNDERLINE}${clientName2} -h $serverFqdn${NC} in shell${NC}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if ((${prompt_force} == 1)); then
|
if ((${prompt_force} == 1)); then
|
||||||
|
@ -968,7 +974,7 @@ function installProduct() {
|
||||||
install_connector
|
install_connector
|
||||||
fi
|
fi
|
||||||
install_examples
|
install_examples
|
||||||
|
install_web
|
||||||
if [ -z $1 ]; then # install service and client
|
if [ -z $1 ]; then # install service and client
|
||||||
# For installing new
|
# For installing new
|
||||||
install_bin
|
install_bin
|
||||||
|
@ -982,24 +988,24 @@ function installProduct() {
|
||||||
|
|
||||||
# Ask if to start the service
|
# Ask if to start the service
|
||||||
echo
|
echo
|
||||||
echo -e "${GREEN_DARK}To configure ${productName2} ${NC}: edit ${cfg_install_dir}/${configFile2}"
|
echo -e "${GREEN_DARK}To configure ${productName2} ${NC}\t: edit ${cfg_install_dir}/${configFile2}"
|
||||||
[ -f ${configDir}/${clientName2}adapter.toml ] && [ -f ${installDir}/bin/${clientName2}adapter ] && \
|
[ -f ${configDir}/${clientName2}adapter.toml ] && [ -f ${installDir}/bin/${clientName2}adapter ] && \
|
||||||
echo -e "${GREEN_DARK}To configure ${clientName2}Adapter ${NC}: edit ${configDir}/${clientName2}adapter.toml"
|
echo -e "${GREEN_DARK}To configure ${clientName2}Adapter ${NC}\t: edit ${configDir}/${clientName2}adapter.toml"
|
||||||
if ((${service_mod} == 0)); then
|
if ((${service_mod} == 0)); then
|
||||||
echo -e "${GREEN_DARK}To start ${productName2} ${NC}: ${csudo}systemctl start ${serverName2}${NC}"
|
echo -e "${GREEN_DARK}To start ${productName2} ${NC}\t: ${csudo}systemctl start ${serverName2}${NC}"
|
||||||
[ -f ${service_config_dir}/${clientName2}adapter.service ] && [ -f ${installDir}/bin/${clientName2}adapter ] && \
|
[ -f ${service_config_dir}/${clientName2}adapter.service ] && [ -f ${installDir}/bin/${clientName2}adapter ] && \
|
||||||
echo -e "${GREEN_DARK}To start ${clientName2}Adapter ${NC}: ${csudo}systemctl start ${clientName2}adapter ${NC}"
|
echo -e "${GREEN_DARK}To start ${clientName2}Adapter ${NC}\t: ${csudo}systemctl start ${clientName2}adapter ${NC}"
|
||||||
elif ((${service_mod} == 1)); then
|
elif ((${service_mod} == 1)); then
|
||||||
echo -e "${GREEN_DARK}To start ${productName2} ${NC}: ${csudo}service ${serverName2} start${NC}"
|
echo -e "${GREEN_DARK}To start ${productName2} ${NC}\t: ${csudo}service ${serverName2} start${NC}"
|
||||||
[ -f ${service_config_dir}/${clientName2}adapter.service ] && [ -f ${installDir}/bin/${clientName2}adapter ] && \
|
[ -f ${service_config_dir}/${clientName2}adapter.service ] && [ -f ${installDir}/bin/${clientName2}adapter ] && \
|
||||||
echo -e "${GREEN_DARK}To start ${clientName2}Adapter ${NC}: ${csudo}service ${clientName2}adapter start${NC}"
|
echo -e "${GREEN_DARK}To start ${clientName2}Adapter ${NC}\t: ${csudo}service ${clientName2}adapter start${NC}"
|
||||||
else
|
else
|
||||||
echo -e "${GREEN_DARK}To start ${productName2} ${NC}: ${serverName2}${NC}"
|
echo -e "${GREEN_DARK}To start ${productName2} ${NC}\t: ${serverName2}${NC}"
|
||||||
[ -f ${installDir}/bin/${clientName2}adapter ] && \
|
[ -f ${installDir}/bin/${clientName2}adapter ] && \
|
||||||
echo -e "${GREEN_DARK}To start ${clientName2}Adapter ${NC}: ${clientName2}adapter &${NC}"
|
echo -e "${GREEN_DARK}To start ${clientName2}Adapter ${NC}\t: ${clientName2}adapter ${NC}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo -e "${GREEN_DARK}To enable ${clientName2}keeper ${NC}: sudo systemctl enable ${clientName2}keeper &${NC}"
|
echo -e "${GREEN_DARK}To enable ${clientName2}keeper ${NC}\t: sudo systemctl enable ${clientName2}keeper ${NC}"
|
||||||
|
|
||||||
if [ ! -z "$firstEp" ]; then
|
if [ ! -z "$firstEp" ]; then
|
||||||
tmpFqdn=${firstEp%%:*}
|
tmpFqdn=${firstEp%%:*}
|
||||||
|
@ -1010,14 +1016,14 @@ function installProduct() {
|
||||||
tmpPort=""
|
tmpPort=""
|
||||||
fi
|
fi
|
||||||
if [[ "$tmpPort" != "" ]]; then
|
if [[ "$tmpPort" != "" ]]; then
|
||||||
echo -e "${GREEN_DARK}To access ${productName2} ${NC}: ${clientName2} -h $tmpFqdn -P $tmpPort${GREEN_DARK} to login into cluster, then${NC}"
|
echo -e "${GREEN_DARK}To access ${productName2} ${NC}\t: ${clientName2} -h $tmpFqdn -P $tmpPort${GREEN_DARK} to login into cluster, then${NC}"
|
||||||
else
|
else
|
||||||
echo -e "${GREEN_DARK}To access ${productName2} ${NC}: ${clientName2} -h $tmpFqdn${GREEN_DARK} to login into cluster, then${NC}"
|
echo -e "${GREEN_DARK}To access ${productName2} ${NC}\t: ${clientName2} -h $tmpFqdn${GREEN_DARK} to login into cluster, then${NC}"
|
||||||
fi
|
fi
|
||||||
echo -e "${GREEN_DARK}execute ${NC}: create dnode 'newDnodeFQDN:port'; ${GREEN_DARK}to add this new node${NC}"
|
echo -e "${GREEN_DARK}execute ${NC}: create dnode 'newDnodeFQDN:port'; ${GREEN_DARK}to add this new node${NC}"
|
||||||
echo
|
echo
|
||||||
elif [ ! -z "$serverFqdn" ]; then
|
elif [ ! -z "$serverFqdn" ]; then
|
||||||
echo -e "${GREEN_DARK}To access ${productName2} ${NC}: ${clientName2} -h $serverFqdn${GREEN_DARK} to login into ${productName2} server${NC}"
|
echo -e "${GREEN_DARK}To access ${productName2} ${NC}\t: ${clientName2} -h $serverFqdn${GREEN_DARK} to login into ${productName2} server${NC}"
|
||||||
echo
|
echo
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
|
@ -319,6 +319,11 @@ if [[ $dbName == "taos" ]]; then
|
||||||
mkdir -p ${install_dir}/examples/taosbenchmark-json && cp ${examples_dir}/../tools/taos-tools/example/* ${install_dir}/examples/taosbenchmark-json
|
mkdir -p ${install_dir}/examples/taosbenchmark-json && cp ${examples_dir}/../tools/taos-tools/example/* ${install_dir}/examples/taosbenchmark-json
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if [ "$verMode" == "cluster" ] || [ "$verMode" == "cloud" ]; then
|
||||||
|
mkdir -p ${install_dir}/share/
|
||||||
|
cp -rf ${build_dir}/share/{etc,srv} ${install_dir}/share ||:
|
||||||
|
fi
|
||||||
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Copy driver
|
# Copy driver
|
||||||
|
|
|
@ -123,8 +123,8 @@ function clean_bin() {
|
||||||
${csudo}rm -f ${bin_link_dir}/set_core || :
|
${csudo}rm -f ${bin_link_dir}/set_core || :
|
||||||
${csudo}rm -f ${bin_link_dir}/TDinsight.sh || :
|
${csudo}rm -f ${bin_link_dir}/TDinsight.sh || :
|
||||||
${csudo}rm -f ${bin_link_dir}/${keeperName2} || :
|
${csudo}rm -f ${bin_link_dir}/${keeperName2} || :
|
||||||
${csudo}rm -f ${bin_link_dir}/${xName2} || :
|
# ${csudo}rm -f ${bin_link_dir}/${xName2} || :
|
||||||
${csudo}rm -f ${bin_link_dir}/${explorerName2} || :
|
# ${csudo}rm -f ${bin_link_dir}/${explorerName2} || :
|
||||||
|
|
||||||
if [ "$verMode" == "cluster" ] && [ "$clientName" != "$clientName2" ]; then
|
if [ "$verMode" == "cluster" ] && [ "$clientName" != "$clientName2" ]; then
|
||||||
${csudo}rm -f ${bin_link_dir}/${clientName2} || :
|
${csudo}rm -f ${bin_link_dir}/${clientName2} || :
|
||||||
|
@ -194,26 +194,26 @@ function clean_service_on_systemd() {
|
||||||
fi
|
fi
|
||||||
${csudo}systemctl disable ${tarbitrator_service_name} &>/dev/null || echo &>/dev/null
|
${csudo}systemctl disable ${tarbitrator_service_name} &>/dev/null || echo &>/dev/null
|
||||||
|
|
||||||
x_service_config="${service_config_dir}/${xName2}.service"
|
# x_service_config="${service_config_dir}/${xName2}.service"
|
||||||
if [ -e "$x_service_config" ]; then
|
# if [ -e "$x_service_config" ]; then
|
||||||
if systemctl is-active --quiet ${xName2}; then
|
# if systemctl is-active --quiet ${xName2}; then
|
||||||
echo "${productName2} ${xName2} is running, stopping it..."
|
# echo "${productName2} ${xName2} is running, stopping it..."
|
||||||
${csudo}systemctl stop ${xName2} &>/dev/null || echo &>/dev/null
|
# ${csudo}systemctl stop ${xName2} &>/dev/null || echo &>/dev/null
|
||||||
fi
|
# fi
|
||||||
${csudo}systemctl disable ${xName2} &>/dev/null || echo &>/dev/null
|
# ${csudo}systemctl disable ${xName2} &>/dev/null || echo &>/dev/null
|
||||||
${csudo}rm -f ${x_service_config}
|
# ${csudo}rm -f ${x_service_config}
|
||||||
fi
|
# fi
|
||||||
|
|
||||||
explorer_service_config="${service_config_dir}/${explorerName2}.service"
|
# explorer_service_config="${service_config_dir}/${explorerName2}.service"
|
||||||
if [ -e "$explorer_service_config" ]; then
|
# if [ -e "$explorer_service_config" ]; then
|
||||||
if systemctl is-active --quiet ${explorerName2}; then
|
# if systemctl is-active --quiet ${explorerName2}; then
|
||||||
echo "${productName2} ${explorerName2} is running, stopping it..."
|
# echo "${productName2} ${explorerName2} is running, stopping it..."
|
||||||
${csudo}systemctl stop ${explorerName2} &>/dev/null || echo &>/dev/null
|
# ${csudo}systemctl stop ${explorerName2} &>/dev/null || echo &>/dev/null
|
||||||
fi
|
# fi
|
||||||
${csudo}systemctl disable ${explorerName2} &>/dev/null || echo &>/dev/null
|
# ${csudo}systemctl disable ${explorerName2} &>/dev/null || echo &>/dev/null
|
||||||
${csudo}rm -f ${explorer_service_config}
|
# ${csudo}rm -f ${explorer_service_config}
|
||||||
${csudo}rm -f /etc/${clientName2}/explorer.toml
|
# ${csudo}rm -f /etc/${clientName2}/explorer.toml
|
||||||
fi
|
# fi
|
||||||
}
|
}
|
||||||
|
|
||||||
function clean_service_on_sysvinit() {
|
function clean_service_on_sysvinit() {
|
||||||
|
|
|
@ -169,7 +169,7 @@ void *openTransporter(const char *user, const char *auth, int32_t numOfThread) {
|
||||||
rpcInit.retryMinInterval = tsRedirectPeriod;
|
rpcInit.retryMinInterval = tsRedirectPeriod;
|
||||||
rpcInit.retryStepFactor = tsRedirectFactor;
|
rpcInit.retryStepFactor = tsRedirectFactor;
|
||||||
rpcInit.retryMaxInterval = tsRedirectMaxPeriod;
|
rpcInit.retryMaxInterval = tsRedirectMaxPeriod;
|
||||||
rpcInit.retryMaxTimouet = tsMaxRetryWaitTime;
|
rpcInit.retryMaxTimeout = tsMaxRetryWaitTime;
|
||||||
|
|
||||||
int32_t connLimitNum = tsNumOfRpcSessions / (tsNumOfRpcThreads * 3);
|
int32_t connLimitNum = tsNumOfRpcSessions / (tsNumOfRpcThreads * 3);
|
||||||
connLimitNum = TMAX(connLimitNum, 10);
|
connLimitNum = TMAX(connLimitNum, 10);
|
||||||
|
|
|
@ -1442,4 +1442,178 @@ TEST(clientCase, sub_tb_mt_test) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST(clientCase, ts_3756) {
|
||||||
|
// taos_options(TSDB_OPTION_CONFIGDIR, "~/first/cfg");
|
||||||
|
|
||||||
|
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
|
||||||
|
ASSERT_NE(pConn, nullptr);
|
||||||
|
|
||||||
|
tmq_conf_t* conf = tmq_conf_new();
|
||||||
|
|
||||||
|
tmq_conf_set(conf, "enable.auto.commit", "false");
|
||||||
|
tmq_conf_set(conf, "auto.commit.interval.ms", "2000");
|
||||||
|
tmq_conf_set(conf, "group.id", "group_id_2");
|
||||||
|
tmq_conf_set(conf, "td.connect.user", "root");
|
||||||
|
tmq_conf_set(conf, "td.connect.pass", "taosdata");
|
||||||
|
tmq_conf_set(conf, "auto.offset.reset", "latest");
|
||||||
|
tmq_conf_set(conf, "msg.with.table.name", "false");
|
||||||
|
|
||||||
|
tmq_t* tmq = tmq_consumer_new(conf, NULL, 0);
|
||||||
|
tmq_conf_destroy(conf);
|
||||||
|
|
||||||
|
// 创建订阅 topics 列表
|
||||||
|
tmq_list_t* topicList = tmq_list_new();
|
||||||
|
tmq_list_append(topicList, "tp");
|
||||||
|
|
||||||
|
// 启动订阅
|
||||||
|
tmq_subscribe(tmq, topicList);
|
||||||
|
tmq_list_destroy(topicList);
|
||||||
|
|
||||||
|
TAOS_FIELD* fields = NULL;
|
||||||
|
int32_t numOfFields = 0;
|
||||||
|
int32_t precision = 0;
|
||||||
|
int32_t totalRows = 0;
|
||||||
|
int32_t msgCnt = 0;
|
||||||
|
int32_t timeout = 200;
|
||||||
|
|
||||||
|
int32_t count = 0;
|
||||||
|
|
||||||
|
tmq_topic_assignment* pAssign = NULL;
|
||||||
|
int32_t numOfAssign = 0;
|
||||||
|
|
||||||
|
int32_t code = tmq_get_topic_assignment(tmq, "tp", &pAssign, &numOfAssign);
|
||||||
|
if (code != 0) {
|
||||||
|
printf("error occurs:%s\n", tmq_err2str(code));
|
||||||
|
tmq_free_assignment(pAssign);
|
||||||
|
tmq_consumer_close(tmq);
|
||||||
|
taos_close(pConn);
|
||||||
|
fprintf(stderr, "%d msg consumed, include %d rows\n", msgCnt, totalRows);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
for(int i = 0; i < numOfAssign; i++){
|
||||||
|
printf("assign i:%d, vgId:%d, offset:%lld, start:%lld, end:%lld\n", i, pAssign[i].vgId, pAssign[i].currentOffset, pAssign[i].begin, pAssign[i].end);
|
||||||
|
}
|
||||||
|
|
||||||
|
// tmq_offset_seek(tmq, "tp", pAssign[0].vgId, 4);
|
||||||
|
tmq_free_assignment(pAssign);
|
||||||
|
|
||||||
|
code = tmq_get_topic_assignment(tmq, "tp", &pAssign, &numOfAssign);
|
||||||
|
if (code != 0) {
|
||||||
|
printf("error occurs:%s\n", tmq_err2str(code));
|
||||||
|
tmq_free_assignment(pAssign);
|
||||||
|
tmq_consumer_close(tmq);
|
||||||
|
taos_close(pConn);
|
||||||
|
fprintf(stderr, "%d msg consumed, include %d rows\n", msgCnt, totalRows);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
for(int i = 0; i < numOfAssign; i++){
|
||||||
|
printf("assign i:%d, vgId:%d, offset:%lld, start:%lld, end:%lld\n", i, pAssign[i].vgId, pAssign[i].currentOffset, pAssign[i].begin, pAssign[i].end);
|
||||||
|
}
|
||||||
|
|
||||||
|
tmq_free_assignment(pAssign);
|
||||||
|
|
||||||
|
code = tmq_get_topic_assignment(tmq, "tp", &pAssign, &numOfAssign);
|
||||||
|
if (code != 0) {
|
||||||
|
printf("error occurs:%s\n", tmq_err2str(code));
|
||||||
|
tmq_free_assignment(pAssign);
|
||||||
|
tmq_consumer_close(tmq);
|
||||||
|
taos_close(pConn);
|
||||||
|
fprintf(stderr, "%d msg consumed, include %d rows\n", msgCnt, totalRows);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
for(int i = 0; i < numOfAssign; i++){
|
||||||
|
printf("assign i:%d, vgId:%d, offset:%lld, start:%lld, end:%lld\n", i, pAssign[i].vgId, pAssign[i].currentOffset, pAssign[i].begin, pAssign[i].end);
|
||||||
|
}
|
||||||
|
|
||||||
|
while (1) {
|
||||||
|
printf("start to poll\n");
|
||||||
|
TAOS_RES* pRes = tmq_consumer_poll(tmq, timeout);
|
||||||
|
if (pRes) {
|
||||||
|
char buf[128];
|
||||||
|
|
||||||
|
const char* topicName = tmq_get_topic_name(pRes);
|
||||||
|
// const char* dbName = tmq_get_db_name(pRes);
|
||||||
|
// int32_t vgroupId = tmq_get_vgroup_id(pRes);
|
||||||
|
//
|
||||||
|
// printf("topic: %s\n", topicName);
|
||||||
|
// printf("db: %s\n", dbName);
|
||||||
|
// printf("vgroup id: %d\n", vgroupId);
|
||||||
|
|
||||||
|
printSubResults(pRes, &totalRows);
|
||||||
|
|
||||||
|
tmq_topic_assignment* pAssignTmp = NULL;
|
||||||
|
int32_t numOfAssignTmp = 0;
|
||||||
|
|
||||||
|
code = tmq_get_topic_assignment(tmq, "tp", &pAssignTmp, &numOfAssignTmp);
|
||||||
|
if (code != 0) {
|
||||||
|
printf("error occurs:%s\n", tmq_err2str(code));
|
||||||
|
tmq_free_assignment(pAssign);
|
||||||
|
tmq_consumer_close(tmq);
|
||||||
|
taos_close(pConn);
|
||||||
|
fprintf(stderr, "%d msg consumed, include %d rows\n", msgCnt, totalRows);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
for(int i = 0; i < numOfAssign; i++){
|
||||||
|
printf("assign i:%d, vgId:%d, offset:%lld, start:%lld, end:%lld\n", i, pAssignTmp[i].vgId, pAssignTmp[i].currentOffset, pAssignTmp[i].begin, pAssignTmp[i].end);
|
||||||
|
}
|
||||||
|
if(numOfAssign != 0){
|
||||||
|
int i = 0;
|
||||||
|
for(; i < numOfAssign; i++){
|
||||||
|
if(pAssign[i].currentOffset != pAssignTmp[i].currentOffset){
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if(i == numOfAssign){
|
||||||
|
printf("all position is same\n");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
tmq_free_assignment(pAssign);
|
||||||
|
}
|
||||||
|
numOfAssign = numOfAssignTmp;
|
||||||
|
pAssign = pAssignTmp;
|
||||||
|
|
||||||
|
} else {
|
||||||
|
// tmq_offset_seek(tmq, "tp", pAssign[0].vgId, pAssign[0].currentOffset);
|
||||||
|
// tmq_offset_seek(tmq, "tp", pAssign[1].vgId, pAssign[1].currentOffset);
|
||||||
|
// tmq_commit_sync(tmq, pRes);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// tmq_commit_sync(tmq, pRes);
|
||||||
|
if (pRes != NULL) {
|
||||||
|
taos_free_result(pRes);
|
||||||
|
// if ((++count) > 1) {
|
||||||
|
// break;
|
||||||
|
// }
|
||||||
|
} else {
|
||||||
|
// break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// tmq_offset_seek(tmq, "tp", pAssign[0].vgId, pAssign[0].begin);
|
||||||
|
}
|
||||||
|
|
||||||
|
tmq_free_assignment(pAssign);
|
||||||
|
|
||||||
|
code = tmq_get_topic_assignment(tmq, "tp", &pAssign, &numOfAssign);
|
||||||
|
if (code != 0) {
|
||||||
|
printf("error occurs:%s\n", tmq_err2str(code));
|
||||||
|
tmq_free_assignment(pAssign);
|
||||||
|
tmq_consumer_close(tmq);
|
||||||
|
taos_close(pConn);
|
||||||
|
fprintf(stderr, "%d msg consumed, include %d rows\n", msgCnt, totalRows);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
for(int i = 0; i < numOfAssign; i++){
|
||||||
|
printf("assign i:%d, vgId:%d, offset:%lld, start:%lld, end:%lld\n", i, pAssign[i].vgId, pAssign[i].currentOffset, pAssign[i].begin, pAssign[i].end);
|
||||||
|
}
|
||||||
|
|
||||||
|
tmq_consumer_close(tmq);
|
||||||
|
taos_close(pConn);
|
||||||
|
fprintf(stderr, "%d msg consumed, include %d rows\n", msgCnt, totalRows);
|
||||||
|
}
|
||||||
#pragma GCC diagnostic pop
|
#pragma GCC diagnostic pop
|
||||||
|
|
|
@ -549,6 +549,7 @@ SSDataBlock* blockDataExtractBlock(SSDataBlock* pBlock, int32_t startIndex, int3
|
||||||
pDst->info = pBlock->info;
|
pDst->info = pBlock->info;
|
||||||
pDst->info.rows = 0;
|
pDst->info.rows = 0;
|
||||||
pDst->info.capacity = 0;
|
pDst->info.capacity = 0;
|
||||||
|
pDst->info.rowSize = 0;
|
||||||
size_t numOfCols = taosArrayGetSize(pBlock->pDataBlock);
|
size_t numOfCols = taosArrayGetSize(pBlock->pDataBlock);
|
||||||
for (int32_t i = 0; i < numOfCols; ++i) {
|
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||||
SColumnInfoData colInfo = {0};
|
SColumnInfoData colInfo = {0};
|
||||||
|
|
|
@ -47,8 +47,10 @@ bool tsPrintAuth = false;
|
||||||
|
|
||||||
// queue & threads
|
// queue & threads
|
||||||
int32_t tsNumOfRpcThreads = 1;
|
int32_t tsNumOfRpcThreads = 1;
|
||||||
int32_t tsNumOfRpcSessions = 10000;
|
int32_t tsNumOfRpcSessions = 30000;
|
||||||
int32_t tsTimeToGetAvailableConn = 500000;
|
int32_t tsTimeToGetAvailableConn = 500000;
|
||||||
|
int32_t tsKeepAliveIdle = 60;
|
||||||
|
|
||||||
int32_t tsNumOfCommitThreads = 2;
|
int32_t tsNumOfCommitThreads = 2;
|
||||||
int32_t tsNumOfTaskQueueThreads = 4;
|
int32_t tsNumOfTaskQueueThreads = 4;
|
||||||
int32_t tsNumOfMnodeQueryThreads = 4;
|
int32_t tsNumOfMnodeQueryThreads = 4;
|
||||||
|
@ -436,6 +438,9 @@ static int32_t taosAddClientCfg(SConfig *pCfg) {
|
||||||
if (cfgAddInt32(pCfg, "timeToGetAvailableConn", tsTimeToGetAvailableConn, 20, 1000000, CFG_SCOPE_BOTH) != 0)
|
if (cfgAddInt32(pCfg, "timeToGetAvailableConn", tsTimeToGetAvailableConn, 20, 1000000, CFG_SCOPE_BOTH) != 0)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
|
tsKeepAliveIdle = TRANGE(tsKeepAliveIdle, 1, 72000);
|
||||||
|
if (cfgAddInt32(pCfg, "keepAliveIdle", tsKeepAliveIdle, 1, 7200000, CFG_SCOPE_BOTH) != 0) return -1;
|
||||||
|
|
||||||
tsNumOfTaskQueueThreads = tsNumOfCores / 2;
|
tsNumOfTaskQueueThreads = tsNumOfCores / 2;
|
||||||
tsNumOfTaskQueueThreads = TMAX(tsNumOfTaskQueueThreads, 4);
|
tsNumOfTaskQueueThreads = TMAX(tsNumOfTaskQueueThreads, 4);
|
||||||
if (tsNumOfTaskQueueThreads >= 10) {
|
if (tsNumOfTaskQueueThreads >= 10) {
|
||||||
|
@ -512,6 +517,9 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
|
||||||
tsTimeToGetAvailableConn = TRANGE(tsTimeToGetAvailableConn, 20, 1000000);
|
tsTimeToGetAvailableConn = TRANGE(tsTimeToGetAvailableConn, 20, 1000000);
|
||||||
if (cfgAddInt32(pCfg, "timeToGetAvailableConn", tsNumOfRpcSessions, 20, 1000000, CFG_SCOPE_BOTH) != 0) return -1;
|
if (cfgAddInt32(pCfg, "timeToGetAvailableConn", tsNumOfRpcSessions, 20, 1000000, CFG_SCOPE_BOTH) != 0) return -1;
|
||||||
|
|
||||||
|
tsKeepAliveIdle = TRANGE(tsKeepAliveIdle, 1, 72000);
|
||||||
|
if (cfgAddInt32(pCfg, "keepAliveIdle", tsKeepAliveIdle, 1, 7200000, CFG_SCOPE_BOTH) != 0) return -1;
|
||||||
|
|
||||||
tsNumOfCommitThreads = tsNumOfCores / 2;
|
tsNumOfCommitThreads = tsNumOfCores / 2;
|
||||||
tsNumOfCommitThreads = TRANGE(tsNumOfCommitThreads, 2, 4);
|
tsNumOfCommitThreads = TRANGE(tsNumOfCommitThreads, 2, 4);
|
||||||
if (cfgAddInt32(pCfg, "numOfCommitThreads", tsNumOfCommitThreads, 1, 1024, CFG_SCOPE_SERVER) != 0) return -1;
|
if (cfgAddInt32(pCfg, "numOfCommitThreads", tsNumOfCommitThreads, 1, 1024, CFG_SCOPE_SERVER) != 0) return -1;
|
||||||
|
@ -667,6 +675,13 @@ static int32_t taosUpdateServerCfg(SConfig *pCfg) {
|
||||||
pItem->stype = stype;
|
pItem->stype = stype;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pItem = cfgGetItem(tsCfg, "keepAliveIdle");
|
||||||
|
if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
|
||||||
|
tsKeepAliveIdle = TRANGE(tsKeepAliveIdle, 1, 720000);
|
||||||
|
pItem->i32 = tsKeepAliveIdle;
|
||||||
|
pItem->stype = stype;
|
||||||
|
}
|
||||||
|
|
||||||
pItem = cfgGetItem(tsCfg, "numOfCommitThreads");
|
pItem = cfgGetItem(tsCfg, "numOfCommitThreads");
|
||||||
if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
|
if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
|
||||||
tsNumOfCommitThreads = numOfCores / 2;
|
tsNumOfCommitThreads = numOfCores / 2;
|
||||||
|
@ -898,6 +913,8 @@ static int32_t taosSetClientCfg(SConfig *pCfg) {
|
||||||
tsNumOfRpcSessions = cfgGetItem(pCfg, "numOfRpcSessions")->i32;
|
tsNumOfRpcSessions = cfgGetItem(pCfg, "numOfRpcSessions")->i32;
|
||||||
|
|
||||||
tsTimeToGetAvailableConn = cfgGetItem(pCfg, "timeToGetAvailableConn")->i32;
|
tsTimeToGetAvailableConn = cfgGetItem(pCfg, "timeToGetAvailableConn")->i32;
|
||||||
|
|
||||||
|
tsKeepAliveIdle = cfgGetItem(pCfg, "keepAliveIdle")->i32;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -937,6 +954,8 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
|
||||||
tsNumOfRpcSessions = cfgGetItem(pCfg, "numOfRpcSessions")->i32;
|
tsNumOfRpcSessions = cfgGetItem(pCfg, "numOfRpcSessions")->i32;
|
||||||
tsTimeToGetAvailableConn = cfgGetItem(pCfg, "timeToGetAvailableConn")->i32;
|
tsTimeToGetAvailableConn = cfgGetItem(pCfg, "timeToGetAvailableConn")->i32;
|
||||||
|
|
||||||
|
tsKeepAliveIdle = cfgGetItem(pCfg, "keepAliveIdle")->i32;
|
||||||
|
|
||||||
tsNumOfCommitThreads = cfgGetItem(pCfg, "numOfCommitThreads")->i32;
|
tsNumOfCommitThreads = cfgGetItem(pCfg, "numOfCommitThreads")->i32;
|
||||||
tsNumOfMnodeReadThreads = cfgGetItem(pCfg, "numOfMnodeReadThreads")->i32;
|
tsNumOfMnodeReadThreads = cfgGetItem(pCfg, "numOfMnodeReadThreads")->i32;
|
||||||
tsNumOfVnodeQueryThreads = cfgGetItem(pCfg, "numOfVnodeQueryThreads")->i32;
|
tsNumOfVnodeQueryThreads = cfgGetItem(pCfg, "numOfVnodeQueryThreads")->i32;
|
||||||
|
|
|
@ -25,46 +25,6 @@
|
||||||
|
|
||||||
#include "tlog.h"
|
#include "tlog.h"
|
||||||
|
|
||||||
/*
|
|
||||||
* mktime64 - Converts date to seconds.
|
|
||||||
* Converts Gregorian date to seconds since 1970-01-01 00:00:00.
|
|
||||||
* Assumes input in normal date format, i.e. 1980-12-31 23:59:59
|
|
||||||
* => year=1980, mon=12, day=31, hour=23, min=59, sec=59.
|
|
||||||
*
|
|
||||||
* [For the Julian calendar (which was used in Russia before 1917,
|
|
||||||
* Britain & colonies before 1752, anywhere else before 1582,
|
|
||||||
* and is still in use by some communities) leave out the
|
|
||||||
* -year/100+year/400 terms, and add 10.]
|
|
||||||
*
|
|
||||||
* This algorithm was first published by Gauss (I think).
|
|
||||||
*
|
|
||||||
* A leap second can be indicated by calling this function with sec as
|
|
||||||
* 60 (allowable under ISO 8601). The leap second is treated the same
|
|
||||||
* as the following second since they don't exist in UNIX time.
|
|
||||||
*
|
|
||||||
* An encoding of midnight at the end of the day as 24:00:00 - ie. midnight
|
|
||||||
* tomorrow - (allowable under ISO 8601) is supported.
|
|
||||||
*/
|
|
||||||
static int64_t user_mktime64(const uint32_t year0, const uint32_t mon0, const uint32_t day, const uint32_t hour,
|
|
||||||
const uint32_t min, const uint32_t sec, int64_t time_zone) {
|
|
||||||
uint32_t mon = mon0, year = year0;
|
|
||||||
|
|
||||||
/* 1..12 -> 11,12,1..10 */
|
|
||||||
if (0 >= (int32_t)(mon -= 2)) {
|
|
||||||
mon += 12; /* Puts Feb last since it has leap day */
|
|
||||||
year -= 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// int64_t res = (((((int64_t) (year/4 - year/100 + year/400 + 367*mon/12 + day) +
|
|
||||||
// year*365 - 719499)*24 + hour)*60 + min)*60 + sec);
|
|
||||||
int64_t res;
|
|
||||||
res = 367 * ((int64_t)mon) / 12;
|
|
||||||
res += year / 4 - year / 100 + year / 400 + day + ((int64_t)year) * 365 - 719499;
|
|
||||||
res = res * 24;
|
|
||||||
res = ((res + hour) * 60 + min) * 60 + sec;
|
|
||||||
|
|
||||||
return (res + time_zone);
|
|
||||||
}
|
|
||||||
|
|
||||||
// ==== mktime() kernel code =================//
|
// ==== mktime() kernel code =================//
|
||||||
static int64_t m_deltaUtc = 0;
|
static int64_t m_deltaUtc = 0;
|
||||||
|
|
|
@ -460,7 +460,6 @@ static void vmCleanup(SVnodeMgmt *pMgmt) {
|
||||||
vmCloseVnodes(pMgmt);
|
vmCloseVnodes(pMgmt);
|
||||||
vmStopWorker(pMgmt);
|
vmStopWorker(pMgmt);
|
||||||
vnodeCleanup();
|
vnodeCleanup();
|
||||||
tfsClose(pMgmt->pTfs);
|
|
||||||
taosThreadRwlockDestroy(&pMgmt->lock);
|
taosThreadRwlockDestroy(&pMgmt->lock);
|
||||||
taosMemoryFree(pMgmt);
|
taosMemoryFree(pMgmt);
|
||||||
}
|
}
|
||||||
|
@ -535,20 +534,9 @@ static int32_t vmInit(SMgmtInputOpt *pInput, SMgmtOutputOpt *pOutput) {
|
||||||
pMgmt->msgCb.mgmt = pMgmt;
|
pMgmt->msgCb.mgmt = pMgmt;
|
||||||
taosThreadRwlockInit(&pMgmt->lock, NULL);
|
taosThreadRwlockInit(&pMgmt->lock, NULL);
|
||||||
|
|
||||||
SDiskCfg dCfg = {0};
|
pMgmt->pTfs = pInput->pTfs;
|
||||||
tstrncpy(dCfg.dir, tsDataDir, TSDB_FILENAME_LEN);
|
|
||||||
dCfg.level = 0;
|
|
||||||
dCfg.primary = 1;
|
|
||||||
SDiskCfg *pDisks = tsDiskCfg;
|
|
||||||
int32_t numOfDisks = tsDiskCfgNum;
|
|
||||||
if (numOfDisks <= 0 || pDisks == NULL) {
|
|
||||||
pDisks = &dCfg;
|
|
||||||
numOfDisks = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
pMgmt->pTfs = tfsOpen(pDisks, numOfDisks);
|
|
||||||
if (pMgmt->pTfs == NULL) {
|
if (pMgmt->pTfs == NULL) {
|
||||||
dError("failed to init tfs since %s", terrstr());
|
dError("tfs is null.");
|
||||||
goto _OVER;
|
goto _OVER;
|
||||||
}
|
}
|
||||||
tmsgReportStartup("vnode-tfs", "initialized");
|
tmsgReportStartup("vnode-tfs", "initialized");
|
||||||
|
|
|
@ -15,6 +15,7 @@
|
||||||
|
|
||||||
#define _DEFAULT_SOURCE
|
#define _DEFAULT_SOURCE
|
||||||
#include "vmInt.h"
|
#include "vmInt.h"
|
||||||
|
#include "vnodeInt.h"
|
||||||
|
|
||||||
static inline void vmSendRsp(SRpcMsg *pMsg, int32_t code) {
|
static inline void vmSendRsp(SRpcMsg *pMsg, int32_t code) {
|
||||||
if (pMsg->info.handle == NULL) return;
|
if (pMsg->info.handle == NULL) return;
|
||||||
|
@ -114,9 +115,10 @@ static void vmProcessFetchQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numO
|
||||||
const STraceId *trace = &pMsg->info.traceId;
|
const STraceId *trace = &pMsg->info.traceId;
|
||||||
dGTrace("vgId:%d, msg:%p get from vnode-fetch queue", pVnode->vgId, pMsg);
|
dGTrace("vgId:%d, msg:%p get from vnode-fetch queue", pVnode->vgId, pMsg);
|
||||||
|
|
||||||
|
terrno = 0;
|
||||||
int32_t code = vnodeProcessFetchMsg(pVnode->pImpl, pMsg, pInfo);
|
int32_t code = vnodeProcessFetchMsg(pVnode->pImpl, pMsg, pInfo);
|
||||||
if (code != 0) {
|
if (code != 0) {
|
||||||
if (terrno != 0) {
|
if (code == -1 && terrno != 0) {
|
||||||
code = terrno;
|
code = terrno;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -158,6 +160,15 @@ static void vmSendResponse(SRpcMsg *pMsg) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool vmDataSpaceSufficient(SVnodeObj *pVnode) {
|
||||||
|
STfs *pTfs = pVnode->pImpl->pTfs;
|
||||||
|
if (pTfs) {
|
||||||
|
return tfsDiskSpaceSufficient(pTfs, 0, pVnode->diskPrimary);
|
||||||
|
} else {
|
||||||
|
return osDataSpaceSufficient();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static int32_t vmPutMsgToQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg, EQueueType qtype) {
|
static int32_t vmPutMsgToQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg, EQueueType qtype) {
|
||||||
const STraceId *trace = &pMsg->info.traceId;
|
const STraceId *trace = &pMsg->info.traceId;
|
||||||
if (pMsg->contLen < sizeof(SMsgHead)) {
|
if (pMsg->contLen < sizeof(SMsgHead)) {
|
||||||
|
@ -203,7 +214,7 @@ static int32_t vmPutMsgToQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg, EQueueType qtyp
|
||||||
taosWriteQitem(pVnode->pFetchQ, pMsg);
|
taosWriteQitem(pVnode->pFetchQ, pMsg);
|
||||||
break;
|
break;
|
||||||
case WRITE_QUEUE:
|
case WRITE_QUEUE:
|
||||||
if (!osDataSpaceSufficient()) {
|
if (!vmDataSpaceSufficient(pVnode)) {
|
||||||
terrno = TSDB_CODE_NO_ENOUGH_DISKSPACE;
|
terrno = TSDB_CODE_NO_ENOUGH_DISKSPACE;
|
||||||
code = terrno;
|
code = terrno;
|
||||||
dError("vgId:%d, msg:%p put into vnode-write queue failed since %s", pVnode->vgId, pMsg, terrstr(code));
|
dError("vgId:%d, msg:%p put into vnode-write queue failed since %s", pVnode->vgId, pMsg, terrstr(code));
|
||||||
|
|
|
@ -20,6 +20,7 @@
|
||||||
#include "uv.h"
|
#include "uv.h"
|
||||||
|
|
||||||
#include "dmInt.h"
|
#include "dmInt.h"
|
||||||
|
#include "tfs.h"
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
extern "C" {
|
extern "C" {
|
||||||
|
@ -79,6 +80,7 @@ typedef struct SDnode {
|
||||||
TdThreadMutex mutex;
|
TdThreadMutex mutex;
|
||||||
TdFilePtr lockfile;
|
TdFilePtr lockfile;
|
||||||
SDnodeData data;
|
SDnodeData data;
|
||||||
|
STfs *pTfs;
|
||||||
SMgmtWrapper wrappers[NODE_END];
|
SMgmtWrapper wrappers[NODE_END];
|
||||||
} SDnode;
|
} SDnode;
|
||||||
|
|
||||||
|
@ -124,4 +126,4 @@ void dmGetQnodeLoads(SQnodeLoad *pInfo);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /*_TD_DND_MGMT_H_*/
|
#endif /*_TD_DND_MGMT_H_*/
|
||||||
|
|
|
@ -96,28 +96,23 @@ _exit:
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool dmCheckDiskSpace() {
|
static bool dmDataSpaceAvailable() {
|
||||||
osUpdate();
|
SDnode *pDnode = dmInstance();
|
||||||
// sufficiency
|
if (pDnode->pTfs) {
|
||||||
if (!osDataSpaceSufficient()) {
|
return tfsDiskSpaceAvailable(pDnode->pTfs, 0);
|
||||||
dWarn("free data disk size: %f GB, not sufficient, expected %f GB at least",
|
|
||||||
(double)tsDataSpace.size.avail / 1024.0 / 1024.0 / 1024.0,
|
|
||||||
(double)tsDataSpace.reserved / 1024.0 / 1024.0 / 1024.0);
|
|
||||||
}
|
}
|
||||||
if (!osLogSpaceSufficient()) {
|
|
||||||
dWarn("free log disk size: %f GB, not sufficient, expected %f GB at least",
|
|
||||||
(double)tsLogSpace.size.avail / 1024.0 / 1024.0 / 1024.0,
|
|
||||||
(double)tsLogSpace.reserved / 1024.0 / 1024.0 / 1024.0);
|
|
||||||
}
|
|
||||||
if (!osTempSpaceSufficient()) {
|
|
||||||
dWarn("free temp disk size: %f GB, not sufficient, expected %f GB at least",
|
|
||||||
(double)tsTempSpace.size.avail / 1024.0 / 1024.0 / 1024.0,
|
|
||||||
(double)tsTempSpace.reserved / 1024.0 / 1024.0 / 1024.0);
|
|
||||||
}
|
|
||||||
// availability
|
|
||||||
bool ret = true;
|
|
||||||
if (!osDataSpaceAvailable()) {
|
if (!osDataSpaceAvailable()) {
|
||||||
dError("data disk space unavailable, i.e. %s", tsDataDir);
|
dError("data disk space unavailable, i.e. %s", tsDataDir);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool dmCheckDiskSpace() {
|
||||||
|
osUpdate();
|
||||||
|
// availability
|
||||||
|
bool ret = true;
|
||||||
|
if (!dmDataSpaceAvailable()) {
|
||||||
terrno = TSDB_CODE_NO_DISKSPACE;
|
terrno = TSDB_CODE_NO_DISKSPACE;
|
||||||
ret = false;
|
ret = false;
|
||||||
}
|
}
|
||||||
|
@ -134,6 +129,34 @@ static bool dmCheckDiskSpace() {
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int32_t dmDiskInit() {
|
||||||
|
SDnode *pDnode = dmInstance();
|
||||||
|
SDiskCfg dCfg = {0};
|
||||||
|
tstrncpy(dCfg.dir, tsDataDir, TSDB_FILENAME_LEN);
|
||||||
|
dCfg.level = 0;
|
||||||
|
dCfg.primary = 1;
|
||||||
|
SDiskCfg *pDisks = tsDiskCfg;
|
||||||
|
int32_t numOfDisks = tsDiskCfgNum;
|
||||||
|
if (numOfDisks <= 0 || pDisks == NULL) {
|
||||||
|
pDisks = &dCfg;
|
||||||
|
numOfDisks = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
pDnode->pTfs = tfsOpen(pDisks, numOfDisks);
|
||||||
|
if (pDnode->pTfs == NULL) {
|
||||||
|
dError("failed to init tfs since %s", terrstr());
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t dmDiskClose() {
|
||||||
|
SDnode *pDnode = dmInstance();
|
||||||
|
tfsClose(pDnode->pTfs);
|
||||||
|
pDnode->pTfs = NULL;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static bool dmCheckDataDirVersion() {
|
static bool dmCheckDataDirVersion() {
|
||||||
char checkDataDirJsonFileName[PATH_MAX] = {0};
|
char checkDataDirJsonFileName[PATH_MAX] = {0};
|
||||||
snprintf(checkDataDirJsonFileName, PATH_MAX, "%s/dnode/dnodeCfg.json", tsDataDir);
|
snprintf(checkDataDirJsonFileName, PATH_MAX, "%s/dnode/dnodeCfg.json", tsDataDir);
|
||||||
|
@ -147,6 +170,7 @@ static bool dmCheckDataDirVersion() {
|
||||||
|
|
||||||
int32_t dmInit() {
|
int32_t dmInit() {
|
||||||
dInfo("start to init dnode env");
|
dInfo("start to init dnode env");
|
||||||
|
if (dmDiskInit() != 0) return -1;
|
||||||
if (!dmCheckDataDirVersion()) return -1;
|
if (!dmCheckDataDirVersion()) return -1;
|
||||||
if (!dmCheckDiskSpace()) return -1;
|
if (!dmCheckDiskSpace()) return -1;
|
||||||
if (dmCheckRepeatInit(dmInstance()) != 0) return -1;
|
if (dmCheckRepeatInit(dmInstance()) != 0) return -1;
|
||||||
|
@ -177,6 +201,7 @@ void dmCleanup() {
|
||||||
udfcClose();
|
udfcClose();
|
||||||
udfStopUdfd();
|
udfStopUdfd();
|
||||||
taosStopCacheRefreshWorker();
|
taosStopCacheRefreshWorker();
|
||||||
|
dmDiskClose();
|
||||||
dInfo("dnode env is cleaned up");
|
dInfo("dnode env is cleaned up");
|
||||||
|
|
||||||
taosCleanupCfg();
|
taosCleanupCfg();
|
||||||
|
@ -367,6 +392,7 @@ SMgmtInputOpt dmBuildMgmtInputOpt(SMgmtWrapper *pWrapper) {
|
||||||
SMgmtInputOpt opt = {
|
SMgmtInputOpt opt = {
|
||||||
.path = pWrapper->path,
|
.path = pWrapper->path,
|
||||||
.name = pWrapper->name,
|
.name = pWrapper->name,
|
||||||
|
.pTfs = pWrapper->pDnode->pTfs,
|
||||||
.pData = &pWrapper->pDnode->data,
|
.pData = &pWrapper->pDnode->data,
|
||||||
.processCreateNodeFp = dmProcessCreateNodeReq,
|
.processCreateNodeFp = dmProcessCreateNodeReq,
|
||||||
.processAlterNodeTypeFp = dmProcessAlterNodeTypeReq,
|
.processAlterNodeTypeFp = dmProcessAlterNodeTypeReq,
|
||||||
|
|
|
@ -299,7 +299,7 @@ int32_t dmInitClient(SDnode *pDnode) {
|
||||||
rpcInit.retryMinInterval = tsRedirectPeriod;
|
rpcInit.retryMinInterval = tsRedirectPeriod;
|
||||||
rpcInit.retryStepFactor = tsRedirectFactor;
|
rpcInit.retryStepFactor = tsRedirectFactor;
|
||||||
rpcInit.retryMaxInterval = tsRedirectMaxPeriod;
|
rpcInit.retryMaxInterval = tsRedirectMaxPeriod;
|
||||||
rpcInit.retryMaxTimouet = tsMaxRetryWaitTime;
|
rpcInit.retryMaxTimeout = tsMaxRetryWaitTime;
|
||||||
|
|
||||||
rpcInit.failFastInterval = 5000; // interval threshold(ms)
|
rpcInit.failFastInterval = 5000; // interval threshold(ms)
|
||||||
rpcInit.failFastThreshold = 3; // failed threshold
|
rpcInit.failFastThreshold = 3; // failed threshold
|
||||||
|
|
|
@ -37,6 +37,7 @@
|
||||||
#include "monitor.h"
|
#include "monitor.h"
|
||||||
#include "qnode.h"
|
#include "qnode.h"
|
||||||
#include "sync.h"
|
#include "sync.h"
|
||||||
|
#include "tfs.h"
|
||||||
#include "wal.h"
|
#include "wal.h"
|
||||||
|
|
||||||
#include "libs/function/tudf.h"
|
#include "libs/function/tudf.h"
|
||||||
|
@ -111,6 +112,7 @@ typedef struct {
|
||||||
typedef struct {
|
typedef struct {
|
||||||
const char *path;
|
const char *path;
|
||||||
const char *name;
|
const char *name;
|
||||||
|
STfs *pTfs;
|
||||||
SDnodeData *pData;
|
SDnodeData *pData;
|
||||||
SMsgCb msgCb;
|
SMsgCb msgCb;
|
||||||
ProcessCreateNodeFp processCreateNodeFp;
|
ProcessCreateNodeFp processCreateNodeFp;
|
||||||
|
|
|
@ -70,6 +70,7 @@ int32_t tEncodeSStreamObj(SEncoder *pEncoder, const SStreamObj *pObj) {
|
||||||
if (tEncodeI32(pEncoder, innerSz) < 0) return -1;
|
if (tEncodeI32(pEncoder, innerSz) < 0) return -1;
|
||||||
for (int32_t j = 0; j < innerSz; j++) {
|
for (int32_t j = 0; j < innerSz; j++) {
|
||||||
SStreamTask *pTask = taosArrayGetP(pArray, j);
|
SStreamTask *pTask = taosArrayGetP(pArray, j);
|
||||||
|
pTask->ver = SSTREAM_TASK_VER;
|
||||||
if (tEncodeStreamTask(pEncoder, pTask) < 0) return -1;
|
if (tEncodeStreamTask(pEncoder, pTask) < 0) return -1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -154,7 +155,7 @@ int32_t tDecodeSStreamObj(SDecoder *pDecoder, SStreamObj *pObj, int32_t sver) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void* freeStreamTasks(SArray* pTaskLevel) {
|
static void *freeStreamTasks(SArray *pTaskLevel) {
|
||||||
int32_t numOfLevel = taosArrayGetSize(pTaskLevel);
|
int32_t numOfLevel = taosArrayGetSize(pTaskLevel);
|
||||||
for (int32_t i = 0; i < numOfLevel; i++) {
|
for (int32_t i = 0; i < numOfLevel; i++) {
|
||||||
SArray *pLevel = taosArrayGetP(pTaskLevel, i);
|
SArray *pLevel = taosArrayGetP(pTaskLevel, i);
|
||||||
|
@ -192,14 +193,14 @@ SMqVgEp *tCloneSMqVgEp(const SMqVgEp *pVgEp) {
|
||||||
SMqVgEp *pVgEpNew = taosMemoryMalloc(sizeof(SMqVgEp));
|
SMqVgEp *pVgEpNew = taosMemoryMalloc(sizeof(SMqVgEp));
|
||||||
if (pVgEpNew == NULL) return NULL;
|
if (pVgEpNew == NULL) return NULL;
|
||||||
pVgEpNew->vgId = pVgEp->vgId;
|
pVgEpNew->vgId = pVgEp->vgId;
|
||||||
// pVgEpNew->qmsg = taosStrdup(pVgEp->qmsg);
|
// pVgEpNew->qmsg = taosStrdup(pVgEp->qmsg);
|
||||||
pVgEpNew->epSet = pVgEp->epSet;
|
pVgEpNew->epSet = pVgEp->epSet;
|
||||||
return pVgEpNew;
|
return pVgEpNew;
|
||||||
}
|
}
|
||||||
|
|
||||||
void tDeleteSMqVgEp(SMqVgEp *pVgEp) {
|
void tDeleteSMqVgEp(SMqVgEp *pVgEp) {
|
||||||
if (pVgEp) {
|
if (pVgEp) {
|
||||||
// taosMemoryFreeClear(pVgEp->qmsg);
|
// taosMemoryFreeClear(pVgEp->qmsg);
|
||||||
taosMemoryFree(pVgEp);
|
taosMemoryFree(pVgEp);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -207,14 +208,14 @@ void tDeleteSMqVgEp(SMqVgEp *pVgEp) {
|
||||||
int32_t tEncodeSMqVgEp(void **buf, const SMqVgEp *pVgEp) {
|
int32_t tEncodeSMqVgEp(void **buf, const SMqVgEp *pVgEp) {
|
||||||
int32_t tlen = 0;
|
int32_t tlen = 0;
|
||||||
tlen += taosEncodeFixedI32(buf, pVgEp->vgId);
|
tlen += taosEncodeFixedI32(buf, pVgEp->vgId);
|
||||||
// tlen += taosEncodeString(buf, pVgEp->qmsg);
|
// tlen += taosEncodeString(buf, pVgEp->qmsg);
|
||||||
tlen += taosEncodeSEpSet(buf, &pVgEp->epSet);
|
tlen += taosEncodeSEpSet(buf, &pVgEp->epSet);
|
||||||
return tlen;
|
return tlen;
|
||||||
}
|
}
|
||||||
|
|
||||||
void *tDecodeSMqVgEp(const void *buf, SMqVgEp *pVgEp, int8_t sver) {
|
void *tDecodeSMqVgEp(const void *buf, SMqVgEp *pVgEp, int8_t sver) {
|
||||||
buf = taosDecodeFixedI32(buf, &pVgEp->vgId);
|
buf = taosDecodeFixedI32(buf, &pVgEp->vgId);
|
||||||
if(sver == 1){
|
if (sver == 1) {
|
||||||
uint64_t size = 0;
|
uint64_t size = 0;
|
||||||
buf = taosDecodeVariantU64(buf, &size);
|
buf = taosDecodeVariantU64(buf, &size);
|
||||||
buf = POINTER_SHIFT(buf, size);
|
buf = POINTER_SHIFT(buf, size);
|
||||||
|
@ -223,7 +224,7 @@ void *tDecodeSMqVgEp(const void *buf, SMqVgEp *pVgEp, int8_t sver) {
|
||||||
return (void *)buf;
|
return (void *)buf;
|
||||||
}
|
}
|
||||||
|
|
||||||
SMqConsumerObj *tNewSMqConsumerObj(int64_t consumerId, char* cgroup) {
|
SMqConsumerObj *tNewSMqConsumerObj(int64_t consumerId, char *cgroup) {
|
||||||
SMqConsumerObj *pConsumer = taosMemoryCalloc(1, sizeof(SMqConsumerObj));
|
SMqConsumerObj *pConsumer = taosMemoryCalloc(1, sizeof(SMqConsumerObj));
|
||||||
if (pConsumer == NULL) {
|
if (pConsumer == NULL) {
|
||||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
|
@ -260,12 +261,12 @@ SMqConsumerObj *tNewSMqConsumerObj(int64_t consumerId, char* cgroup) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void tDeleteSMqConsumerObj(SMqConsumerObj *pConsumer, bool delete) {
|
void tDeleteSMqConsumerObj(SMqConsumerObj *pConsumer, bool delete) {
|
||||||
if(pConsumer == NULL) return;
|
if (pConsumer == NULL) return;
|
||||||
taosArrayDestroyP(pConsumer->currentTopics, (FDelete)taosMemoryFree);
|
taosArrayDestroyP(pConsumer->currentTopics, (FDelete)taosMemoryFree);
|
||||||
taosArrayDestroyP(pConsumer->rebNewTopics, (FDelete)taosMemoryFree);
|
taosArrayDestroyP(pConsumer->rebNewTopics, (FDelete)taosMemoryFree);
|
||||||
taosArrayDestroyP(pConsumer->rebRemovedTopics, (FDelete)taosMemoryFree);
|
taosArrayDestroyP(pConsumer->rebRemovedTopics, (FDelete)taosMemoryFree);
|
||||||
taosArrayDestroyP(pConsumer->assignedTopics, (FDelete)taosMemoryFree);
|
taosArrayDestroyP(pConsumer->assignedTopics, (FDelete)taosMemoryFree);
|
||||||
if(delete){
|
if (delete) {
|
||||||
taosMemoryFree(pConsumer);
|
taosMemoryFree(pConsumer);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -392,7 +393,7 @@ void *tDecodeSMqConsumerObj(const void *buf, SMqConsumerObj *pConsumer, int8_t s
|
||||||
taosArrayPush(pConsumer->assignedTopics, &topic);
|
taosArrayPush(pConsumer->assignedTopics, &topic);
|
||||||
}
|
}
|
||||||
|
|
||||||
if(sver > 1){
|
if (sver > 1) {
|
||||||
buf = taosDecodeFixedI8(buf, &pConsumer->withTbName);
|
buf = taosDecodeFixedI8(buf, &pConsumer->withTbName);
|
||||||
buf = taosDecodeFixedI8(buf, &pConsumer->autoCommit);
|
buf = taosDecodeFixedI8(buf, &pConsumer->autoCommit);
|
||||||
buf = taosDecodeFixedI32(buf, &pConsumer->autoCommitInterval);
|
buf = taosDecodeFixedI32(buf, &pConsumer->autoCommitInterval);
|
||||||
|
@ -401,18 +402,18 @@ void *tDecodeSMqConsumerObj(const void *buf, SMqConsumerObj *pConsumer, int8_t s
|
||||||
return (void *)buf;
|
return (void *)buf;
|
||||||
}
|
}
|
||||||
|
|
||||||
//SMqConsumerEp *tCloneSMqConsumerEp(const SMqConsumerEp *pConsumerEpOld) {
|
// SMqConsumerEp *tCloneSMqConsumerEp(const SMqConsumerEp *pConsumerEpOld) {
|
||||||
// SMqConsumerEp *pConsumerEpNew = taosMemoryMalloc(sizeof(SMqConsumerEp));
|
// SMqConsumerEp *pConsumerEpNew = taosMemoryMalloc(sizeof(SMqConsumerEp));
|
||||||
// if (pConsumerEpNew == NULL) return NULL;
|
// if (pConsumerEpNew == NULL) return NULL;
|
||||||
// pConsumerEpNew->consumerId = pConsumerEpOld->consumerId;
|
// pConsumerEpNew->consumerId = pConsumerEpOld->consumerId;
|
||||||
// pConsumerEpNew->vgs = taosArrayDup(pConsumerEpOld->vgs, NULL);
|
// pConsumerEpNew->vgs = taosArrayDup(pConsumerEpOld->vgs, NULL);
|
||||||
// return pConsumerEpNew;
|
// return pConsumerEpNew;
|
||||||
//}
|
// }
|
||||||
//
|
//
|
||||||
//void tDeleteSMqConsumerEp(void *data) {
|
// void tDeleteSMqConsumerEp(void *data) {
|
||||||
// SMqConsumerEp *pConsumerEp = (SMqConsumerEp *)data;
|
// SMqConsumerEp *pConsumerEp = (SMqConsumerEp *)data;
|
||||||
// taosArrayDestroy(pConsumerEp->vgs);
|
// taosArrayDestroy(pConsumerEp->vgs);
|
||||||
//}
|
// }
|
||||||
|
|
||||||
int32_t tEncodeSMqConsumerEp(void **buf, const SMqConsumerEp *pConsumerEp) {
|
int32_t tEncodeSMqConsumerEp(void **buf, const SMqConsumerEp *pConsumerEp) {
|
||||||
int32_t tlen = 0;
|
int32_t tlen = 0;
|
||||||
|
@ -420,7 +421,7 @@ int32_t tEncodeSMqConsumerEp(void **buf, const SMqConsumerEp *pConsumerEp) {
|
||||||
tlen += taosEncodeArray(buf, pConsumerEp->vgs, (FEncode)tEncodeSMqVgEp);
|
tlen += taosEncodeArray(buf, pConsumerEp->vgs, (FEncode)tEncodeSMqVgEp);
|
||||||
int32_t szVgs = taosArrayGetSize(pConsumerEp->offsetRows);
|
int32_t szVgs = taosArrayGetSize(pConsumerEp->offsetRows);
|
||||||
tlen += taosEncodeFixedI32(buf, szVgs);
|
tlen += taosEncodeFixedI32(buf, szVgs);
|
||||||
for (int32_t j= 0; j < szVgs; ++j) {
|
for (int32_t j = 0; j < szVgs; ++j) {
|
||||||
OffsetRows *offRows = taosArrayGet(pConsumerEp->offsetRows, j);
|
OffsetRows *offRows = taosArrayGet(pConsumerEp->offsetRows, j);
|
||||||
tlen += taosEncodeFixedI32(buf, offRows->vgId);
|
tlen += taosEncodeFixedI32(buf, offRows->vgId);
|
||||||
tlen += taosEncodeFixedI64(buf, offRows->rows);
|
tlen += taosEncodeFixedI64(buf, offRows->rows);
|
||||||
|
@ -434,28 +435,28 @@ int32_t tEncodeSMqConsumerEp(void **buf, const SMqConsumerEp *pConsumerEp) {
|
||||||
// do nothing
|
// do nothing
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
//#if 0
|
// #if 0
|
||||||
// int32_t sz = taosArrayGetSize(pConsumerEp->vgs);
|
// int32_t sz = taosArrayGetSize(pConsumerEp->vgs);
|
||||||
// tlen += taosEncodeFixedI32(buf, sz);
|
// tlen += taosEncodeFixedI32(buf, sz);
|
||||||
// for (int32_t i = 0; i < sz; i++) {
|
// for (int32_t i = 0; i < sz; i++) {
|
||||||
// SMqVgEp *pVgEp = taosArrayGetP(pConsumerEp->vgs, i);
|
// SMqVgEp *pVgEp = taosArrayGetP(pConsumerEp->vgs, i);
|
||||||
// tlen += tEncodeSMqVgEp(buf, pVgEp);
|
// tlen += tEncodeSMqVgEp(buf, pVgEp);
|
||||||
// }
|
// }
|
||||||
//#endif
|
// #endif
|
||||||
return tlen;
|
return tlen;
|
||||||
}
|
}
|
||||||
|
|
||||||
void *tDecodeSMqConsumerEp(const void *buf, SMqConsumerEp *pConsumerEp, int8_t sver) {
|
void *tDecodeSMqConsumerEp(const void *buf, SMqConsumerEp *pConsumerEp, int8_t sver) {
|
||||||
buf = taosDecodeFixedI64(buf, &pConsumerEp->consumerId);
|
buf = taosDecodeFixedI64(buf, &pConsumerEp->consumerId);
|
||||||
buf = taosDecodeArray(buf, &pConsumerEp->vgs, (FDecode)tDecodeSMqVgEp, sizeof(SMqVgEp), sver);
|
buf = taosDecodeArray(buf, &pConsumerEp->vgs, (FDecode)tDecodeSMqVgEp, sizeof(SMqVgEp), sver);
|
||||||
if (sver > 1){
|
if (sver > 1) {
|
||||||
int32_t szVgs = 0;
|
int32_t szVgs = 0;
|
||||||
buf = taosDecodeFixedI32(buf, &szVgs);
|
buf = taosDecodeFixedI32(buf, &szVgs);
|
||||||
if(szVgs > 0){
|
if (szVgs > 0) {
|
||||||
pConsumerEp->offsetRows = taosArrayInit(szVgs, sizeof(OffsetRows));
|
pConsumerEp->offsetRows = taosArrayInit(szVgs, sizeof(OffsetRows));
|
||||||
if (NULL == pConsumerEp->offsetRows) return NULL;
|
if (NULL == pConsumerEp->offsetRows) return NULL;
|
||||||
for (int32_t j= 0; j < szVgs; ++j) {
|
for (int32_t j = 0; j < szVgs; ++j) {
|
||||||
OffsetRows* offRows = taosArrayReserve(pConsumerEp->offsetRows, 1);
|
OffsetRows *offRows = taosArrayReserve(pConsumerEp->offsetRows, 1);
|
||||||
buf = taosDecodeFixedI32(buf, &offRows->vgId);
|
buf = taosDecodeFixedI32(buf, &offRows->vgId);
|
||||||
buf = taosDecodeFixedI64(buf, &offRows->rows);
|
buf = taosDecodeFixedI64(buf, &offRows->rows);
|
||||||
buf = taosDecodeFixedI8(buf, &offRows->offset.type);
|
buf = taosDecodeFixedI8(buf, &offRows->offset.type);
|
||||||
|
@ -470,21 +471,21 @@ void *tDecodeSMqConsumerEp(const void *buf, SMqConsumerEp *pConsumerEp, int8_t s
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
//#if 0
|
// #if 0
|
||||||
// int32_t sz;
|
// int32_t sz;
|
||||||
// buf = taosDecodeFixedI32(buf, &sz);
|
// buf = taosDecodeFixedI32(buf, &sz);
|
||||||
// pConsumerEp->vgs = taosArrayInit(sz, sizeof(void *));
|
// pConsumerEp->vgs = taosArrayInit(sz, sizeof(void *));
|
||||||
// for (int32_t i = 0; i < sz; i++) {
|
// for (int32_t i = 0; i < sz; i++) {
|
||||||
// SMqVgEp *pVgEp = taosMemoryMalloc(sizeof(SMqVgEp));
|
// SMqVgEp *pVgEp = taosMemoryMalloc(sizeof(SMqVgEp));
|
||||||
// buf = tDecodeSMqVgEp(buf, pVgEp);
|
// buf = tDecodeSMqVgEp(buf, pVgEp);
|
||||||
// taosArrayPush(pConsumerEp->vgs, &pVgEp);
|
// taosArrayPush(pConsumerEp->vgs, &pVgEp);
|
||||||
// }
|
// }
|
||||||
//#endif
|
// #endif
|
||||||
|
|
||||||
return (void *)buf;
|
return (void *)buf;
|
||||||
}
|
}
|
||||||
|
|
||||||
SMqSubscribeObj *tNewSubscribeObj(const char* key) {
|
SMqSubscribeObj *tNewSubscribeObj(const char *key) {
|
||||||
SMqSubscribeObj *pSubObj = taosMemoryCalloc(1, sizeof(SMqSubscribeObj));
|
SMqSubscribeObj *pSubObj = taosMemoryCalloc(1, sizeof(SMqSubscribeObj));
|
||||||
if (pSubObj == NULL) {
|
if (pSubObj == NULL) {
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -577,7 +578,7 @@ int32_t tEncodeSubscribeObj(void **buf, const SMqSubscribeObj *pSub) {
|
||||||
|
|
||||||
int32_t szVgs = taosArrayGetSize(pSub->offsetRows);
|
int32_t szVgs = taosArrayGetSize(pSub->offsetRows);
|
||||||
tlen += taosEncodeFixedI32(buf, szVgs);
|
tlen += taosEncodeFixedI32(buf, szVgs);
|
||||||
for (int32_t j= 0; j < szVgs; ++j) {
|
for (int32_t j = 0; j < szVgs; ++j) {
|
||||||
OffsetRows *offRows = taosArrayGet(pSub->offsetRows, j);
|
OffsetRows *offRows = taosArrayGet(pSub->offsetRows, j);
|
||||||
tlen += taosEncodeFixedI32(buf, offRows->vgId);
|
tlen += taosEncodeFixedI32(buf, offRows->vgId);
|
||||||
tlen += taosEncodeFixedI64(buf, offRows->rows);
|
tlen += taosEncodeFixedI64(buf, offRows->rows);
|
||||||
|
@ -617,14 +618,14 @@ void *tDecodeSubscribeObj(const void *buf, SMqSubscribeObj *pSub, int8_t sver) {
|
||||||
buf = taosDecodeArray(buf, &pSub->unassignedVgs, (FDecode)tDecodeSMqVgEp, sizeof(SMqVgEp), sver);
|
buf = taosDecodeArray(buf, &pSub->unassignedVgs, (FDecode)tDecodeSMqVgEp, sizeof(SMqVgEp), sver);
|
||||||
buf = taosDecodeStringTo(buf, pSub->dbName);
|
buf = taosDecodeStringTo(buf, pSub->dbName);
|
||||||
|
|
||||||
if (sver > 1){
|
if (sver > 1) {
|
||||||
int32_t szVgs = 0;
|
int32_t szVgs = 0;
|
||||||
buf = taosDecodeFixedI32(buf, &szVgs);
|
buf = taosDecodeFixedI32(buf, &szVgs);
|
||||||
if(szVgs > 0){
|
if (szVgs > 0) {
|
||||||
pSub->offsetRows = taosArrayInit(szVgs, sizeof(OffsetRows));
|
pSub->offsetRows = taosArrayInit(szVgs, sizeof(OffsetRows));
|
||||||
if (NULL == pSub->offsetRows) return NULL;
|
if (NULL == pSub->offsetRows) return NULL;
|
||||||
for (int32_t j= 0; j < szVgs; ++j) {
|
for (int32_t j = 0; j < szVgs; ++j) {
|
||||||
OffsetRows* offRows = taosArrayReserve(pSub->offsetRows, 1);
|
OffsetRows *offRows = taosArrayReserve(pSub->offsetRows, 1);
|
||||||
buf = taosDecodeFixedI32(buf, &offRows->vgId);
|
buf = taosDecodeFixedI32(buf, &offRows->vgId);
|
||||||
buf = taosDecodeFixedI64(buf, &offRows->rows);
|
buf = taosDecodeFixedI64(buf, &offRows->rows);
|
||||||
buf = taosDecodeFixedI8(buf, &offRows->offset.type);
|
buf = taosDecodeFixedI8(buf, &offRows->offset.type);
|
||||||
|
@ -639,71 +640,71 @@ void *tDecodeSubscribeObj(const void *buf, SMqSubscribeObj *pSub, int8_t sver) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
buf = taosDecodeString(buf, &pSub->qmsg);
|
buf = taosDecodeString(buf, &pSub->qmsg);
|
||||||
}else{
|
} else {
|
||||||
pSub->qmsg = taosStrdup("");
|
pSub->qmsg = taosStrdup("");
|
||||||
}
|
}
|
||||||
return (void *)buf;
|
return (void *)buf;
|
||||||
}
|
}
|
||||||
|
|
||||||
//SMqSubActionLogEntry *tCloneSMqSubActionLogEntry(SMqSubActionLogEntry *pEntry) {
|
// SMqSubActionLogEntry *tCloneSMqSubActionLogEntry(SMqSubActionLogEntry *pEntry) {
|
||||||
// SMqSubActionLogEntry *pEntryNew = taosMemoryMalloc(sizeof(SMqSubActionLogEntry));
|
// SMqSubActionLogEntry *pEntryNew = taosMemoryMalloc(sizeof(SMqSubActionLogEntry));
|
||||||
// if (pEntryNew == NULL) return NULL;
|
// if (pEntryNew == NULL) return NULL;
|
||||||
// pEntryNew->epoch = pEntry->epoch;
|
// pEntryNew->epoch = pEntry->epoch;
|
||||||
// pEntryNew->consumers = taosArrayDup(pEntry->consumers, (__array_item_dup_fn_t)tCloneSMqConsumerEp);
|
// pEntryNew->consumers = taosArrayDup(pEntry->consumers, (__array_item_dup_fn_t)tCloneSMqConsumerEp);
|
||||||
// return pEntryNew;
|
// return pEntryNew;
|
||||||
//}
|
// }
|
||||||
//
|
//
|
||||||
//void tDeleteSMqSubActionLogEntry(SMqSubActionLogEntry *pEntry) {
|
// void tDeleteSMqSubActionLogEntry(SMqSubActionLogEntry *pEntry) {
|
||||||
// taosArrayDestroyEx(pEntry->consumers, (FDelete)tDeleteSMqConsumerEp);
|
// taosArrayDestroyEx(pEntry->consumers, (FDelete)tDeleteSMqConsumerEp);
|
||||||
//}
|
// }
|
||||||
|
|
||||||
//int32_t tEncodeSMqSubActionLogEntry(void **buf, const SMqSubActionLogEntry *pEntry) {
|
// int32_t tEncodeSMqSubActionLogEntry(void **buf, const SMqSubActionLogEntry *pEntry) {
|
||||||
// int32_t tlen = 0;
|
// int32_t tlen = 0;
|
||||||
// tlen += taosEncodeFixedI32(buf, pEntry->epoch);
|
// tlen += taosEncodeFixedI32(buf, pEntry->epoch);
|
||||||
// tlen += taosEncodeArray(buf, pEntry->consumers, (FEncode)tEncodeSMqSubActionLogEntry);
|
// tlen += taosEncodeArray(buf, pEntry->consumers, (FEncode)tEncodeSMqSubActionLogEntry);
|
||||||
// return tlen;
|
// return tlen;
|
||||||
//}
|
// }
|
||||||
//
|
//
|
||||||
//void *tDecodeSMqSubActionLogEntry(const void *buf, SMqSubActionLogEntry *pEntry) {
|
// void *tDecodeSMqSubActionLogEntry(const void *buf, SMqSubActionLogEntry *pEntry) {
|
||||||
// buf = taosDecodeFixedI32(buf, &pEntry->epoch);
|
// buf = taosDecodeFixedI32(buf, &pEntry->epoch);
|
||||||
// buf = taosDecodeArray(buf, &pEntry->consumers, (FDecode)tDecodeSMqSubActionLogEntry, sizeof(SMqSubActionLogEntry));
|
// buf = taosDecodeArray(buf, &pEntry->consumers, (FDecode)tDecodeSMqSubActionLogEntry, sizeof(SMqSubActionLogEntry));
|
||||||
// return (void *)buf;
|
// return (void *)buf;
|
||||||
//}
|
// }
|
||||||
|
|
||||||
//SMqSubActionLogObj *tCloneSMqSubActionLogObj(SMqSubActionLogObj *pLog) {
|
// SMqSubActionLogObj *tCloneSMqSubActionLogObj(SMqSubActionLogObj *pLog) {
|
||||||
// SMqSubActionLogObj *pLogNew = taosMemoryMalloc(sizeof(SMqSubActionLogObj));
|
// SMqSubActionLogObj *pLogNew = taosMemoryMalloc(sizeof(SMqSubActionLogObj));
|
||||||
// if (pLogNew == NULL) return pLogNew;
|
// if (pLogNew == NULL) return pLogNew;
|
||||||
// memcpy(pLogNew->key, pLog->key, TSDB_SUBSCRIBE_KEY_LEN);
|
// memcpy(pLogNew->key, pLog->key, TSDB_SUBSCRIBE_KEY_LEN);
|
||||||
// pLogNew->logs = taosArrayDup(pLog->logs, (__array_item_dup_fn_t)tCloneSMqConsumerEp);
|
// pLogNew->logs = taosArrayDup(pLog->logs, (__array_item_dup_fn_t)tCloneSMqConsumerEp);
|
||||||
// return pLogNew;
|
// return pLogNew;
|
||||||
//}
|
// }
|
||||||
//
|
//
|
||||||
//void tDeleteSMqSubActionLogObj(SMqSubActionLogObj *pLog) {
|
// void tDeleteSMqSubActionLogObj(SMqSubActionLogObj *pLog) {
|
||||||
// taosArrayDestroyEx(pLog->logs, (FDelete)tDeleteSMqConsumerEp);
|
// taosArrayDestroyEx(pLog->logs, (FDelete)tDeleteSMqConsumerEp);
|
||||||
//}
|
// }
|
||||||
|
|
||||||
//int32_t tEncodeSMqSubActionLogObj(void **buf, const SMqSubActionLogObj *pLog) {
|
// int32_t tEncodeSMqSubActionLogObj(void **buf, const SMqSubActionLogObj *pLog) {
|
||||||
// int32_t tlen = 0;
|
// int32_t tlen = 0;
|
||||||
// tlen += taosEncodeString(buf, pLog->key);
|
// tlen += taosEncodeString(buf, pLog->key);
|
||||||
// tlen += taosEncodeArray(buf, pLog->logs, (FEncode)tEncodeSMqSubActionLogEntry);
|
// tlen += taosEncodeArray(buf, pLog->logs, (FEncode)tEncodeSMqSubActionLogEntry);
|
||||||
// return tlen;
|
// return tlen;
|
||||||
//}
|
// }
|
||||||
//
|
//
|
||||||
//void *tDecodeSMqSubActionLogObj(const void *buf, SMqSubActionLogObj *pLog) {
|
// void *tDecodeSMqSubActionLogObj(const void *buf, SMqSubActionLogObj *pLog) {
|
||||||
// buf = taosDecodeStringTo(buf, pLog->key);
|
// buf = taosDecodeStringTo(buf, pLog->key);
|
||||||
// buf = taosDecodeArray(buf, &pLog->logs, (FDecode)tDecodeSMqSubActionLogEntry, sizeof(SMqSubActionLogEntry));
|
// buf = taosDecodeArray(buf, &pLog->logs, (FDecode)tDecodeSMqSubActionLogEntry, sizeof(SMqSubActionLogEntry));
|
||||||
// return (void *)buf;
|
// return (void *)buf;
|
||||||
//}
|
// }
|
||||||
//
|
//
|
||||||
//int32_t tEncodeSMqOffsetObj(void **buf, const SMqOffsetObj *pOffset) {
|
// int32_t tEncodeSMqOffsetObj(void **buf, const SMqOffsetObj *pOffset) {
|
||||||
// int32_t tlen = 0;
|
// int32_t tlen = 0;
|
||||||
// tlen += taosEncodeString(buf, pOffset->key);
|
// tlen += taosEncodeString(buf, pOffset->key);
|
||||||
// tlen += taosEncodeFixedI64(buf, pOffset->offset);
|
// tlen += taosEncodeFixedI64(buf, pOffset->offset);
|
||||||
// return tlen;
|
// return tlen;
|
||||||
//}
|
// }
|
||||||
//
|
//
|
||||||
//void *tDecodeSMqOffsetObj(void *buf, SMqOffsetObj *pOffset) {
|
// void *tDecodeSMqOffsetObj(void *buf, SMqOffsetObj *pOffset) {
|
||||||
// buf = taosDecodeStringTo(buf, pOffset->key);
|
// buf = taosDecodeStringTo(buf, pOffset->key);
|
||||||
// buf = taosDecodeFixedI64(buf, &pOffset->offset);
|
// buf = taosDecodeFixedI64(buf, &pOffset->offset);
|
||||||
// return buf;
|
// return buf;
|
||||||
//}
|
// }
|
||||||
|
|
|
@ -41,7 +41,7 @@ static const char *offlineReason[] = {
|
||||||
"timezone not match",
|
"timezone not match",
|
||||||
"locale not match",
|
"locale not match",
|
||||||
"charset not match",
|
"charset not match",
|
||||||
"ttl change on write not match"
|
"ttlChangeOnWrite not match",
|
||||||
"unknown",
|
"unknown",
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -515,7 +515,6 @@ int32_t mndRetrieveTagIdx(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, i
|
||||||
if (pDb == NULL) return 0;
|
if (pDb == NULL) return 0;
|
||||||
}
|
}
|
||||||
SSmaAndTagIter *pIter = pShow->pIter;
|
SSmaAndTagIter *pIter = pShow->pIter;
|
||||||
int invalid = -1;
|
|
||||||
while (numOfRows < rows) {
|
while (numOfRows < rows) {
|
||||||
pIter->pIdxIter = sdbFetch(pSdb, SDB_IDX, pIter->pIdxIter, (void **)&pIdx);
|
pIter->pIdxIter = sdbFetch(pSdb, SDB_IDX, pIter->pIdxIter, (void **)&pIdx);
|
||||||
if (pIter->pIdxIter == NULL) break;
|
if (pIter->pIdxIter == NULL) break;
|
||||||
|
@ -552,7 +551,7 @@ int32_t mndRetrieveTagIdx(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, i
|
||||||
|
|
||||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||||
|
|
||||||
colDataSetVal(pColInfo, numOfRows, (const char *)&invalid, false);
|
colDataSetVal(pColInfo, numOfRows, NULL, true);
|
||||||
|
|
||||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||||
colDataSetVal(pColInfo, numOfRows, (const char *)&pIdx->createdTime, false);
|
colDataSetVal(pColInfo, numOfRows, (const char *)&pIdx->createdTime, false);
|
||||||
|
|
|
@ -232,7 +232,8 @@ int32_t mndAddShuffleSinkTasksToStream(SMnode* pMnode, SArray* pTaskList, SStrea
|
||||||
|
|
||||||
int32_t mndAddSinkTaskToStream(SStreamObj* pStream, SArray* pTaskList, SMnode* pMnode, int32_t vgId, SVgObj* pVgroup,
|
int32_t mndAddSinkTaskToStream(SStreamObj* pStream, SArray* pTaskList, SMnode* pMnode, int32_t vgId, SVgObj* pVgroup,
|
||||||
int32_t fillHistory) {
|
int32_t fillHistory) {
|
||||||
SStreamTask* pTask = tNewStreamTask(pStream->uid, TASK_LEVEL__SINK, fillHistory, 0, pTaskList);
|
int64_t uid = (fillHistory == 0)? pStream->uid:pStream->hTaskUid;
|
||||||
|
SStreamTask* pTask = tNewStreamTask(uid, TASK_LEVEL__SINK, fillHistory, 0, pTaskList);
|
||||||
if (pTask == NULL) {
|
if (pTask == NULL) {
|
||||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
return -1;
|
return -1;
|
||||||
|
@ -335,8 +336,8 @@ static void setHTasksId(SArray* pTaskList, const SArray* pHTaskList) {
|
||||||
(*pHTask)->streamTaskId.taskId = (*pStreamTask)->id.taskId;
|
(*pHTask)->streamTaskId.taskId = (*pStreamTask)->id.taskId;
|
||||||
(*pHTask)->streamTaskId.streamId = (*pStreamTask)->id.streamId;
|
(*pHTask)->streamTaskId.streamId = (*pStreamTask)->id.streamId;
|
||||||
|
|
||||||
mDebug("s-task:0x%x related history task:0x%x, level:%d", (*pStreamTask)->id.taskId, (*pHTask)->id.taskId,
|
mDebug("s-task:0x%" PRIx64 "-0x%x related history task:0x%" PRIx64 "-0x%x, level:%d", (*pStreamTask)->id.streamId,
|
||||||
(*pHTask)->info.taskLevel);
|
(*pStreamTask)->id.taskId, (*pHTask)->id.streamId, (*pHTask)->id.taskId, (*pHTask)->info.taskLevel);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -28,7 +28,7 @@
|
||||||
#include "parser.h"
|
#include "parser.h"
|
||||||
#include "tname.h"
|
#include "tname.h"
|
||||||
|
|
||||||
#define MND_STREAM_VER_NUMBER 2
|
#define MND_STREAM_VER_NUMBER 3
|
||||||
#define MND_STREAM_RESERVE_SIZE 64
|
#define MND_STREAM_RESERVE_SIZE 64
|
||||||
|
|
||||||
#define MND_STREAM_MAX_NUM 60
|
#define MND_STREAM_MAX_NUM 60
|
||||||
|
@ -140,10 +140,12 @@ SSdbRow *mndStreamActionDecode(SSdbRaw *pRaw) {
|
||||||
void *buf = NULL;
|
void *buf = NULL;
|
||||||
|
|
||||||
int8_t sver = 0;
|
int8_t sver = 0;
|
||||||
if (sdbGetRawSoftVer(pRaw, &sver) != 0) goto STREAM_DECODE_OVER;
|
if (sdbGetRawSoftVer(pRaw, &sver) != 0) {
|
||||||
|
goto STREAM_DECODE_OVER;
|
||||||
|
}
|
||||||
|
|
||||||
if (sver != 1 && sver != 2) {
|
if (sver != MND_STREAM_VER_NUMBER) {
|
||||||
terrno = TSDB_CODE_SDB_INVALID_DATA_VER;
|
terrno = 0;
|
||||||
goto STREAM_DECODE_OVER;
|
goto STREAM_DECODE_OVER;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -198,12 +200,13 @@ static int32_t mndStreamActionDelete(SSdb *pSdb, SStreamObj *pStream) {
|
||||||
|
|
||||||
static int32_t mndStreamActionUpdate(SSdb *pSdb, SStreamObj *pOldStream, SStreamObj *pNewStream) {
|
static int32_t mndStreamActionUpdate(SSdb *pSdb, SStreamObj *pOldStream, SStreamObj *pNewStream) {
|
||||||
mTrace("stream:%s, perform update action", pOldStream->name);
|
mTrace("stream:%s, perform update action", pOldStream->name);
|
||||||
atomic_exchange_64(&pOldStream->updateTime, pNewStream->updateTime);
|
|
||||||
atomic_exchange_32(&pOldStream->version, pNewStream->version);
|
atomic_exchange_32(&pOldStream->version, pNewStream->version);
|
||||||
|
|
||||||
taosWLockLatch(&pOldStream->lock);
|
taosWLockLatch(&pOldStream->lock);
|
||||||
|
|
||||||
pOldStream->status = pNewStream->status;
|
pOldStream->status = pNewStream->status;
|
||||||
|
pOldStream->updateTime = pNewStream->updateTime;
|
||||||
|
|
||||||
taosWUnLockLatch(&pOldStream->lock);
|
taosWUnLockLatch(&pOldStream->lock);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -429,9 +432,11 @@ FAIL:
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t mndPersistTaskDeployReq(STrans *pTrans, const SStreamTask *pTask) {
|
int32_t mndPersistTaskDeployReq(STrans *pTrans, SStreamTask *pTask) {
|
||||||
SEncoder encoder;
|
SEncoder encoder;
|
||||||
tEncoderInit(&encoder, NULL, 0);
|
tEncoderInit(&encoder, NULL, 0);
|
||||||
|
|
||||||
|
pTask->ver = SSTREAM_TASK_VER;
|
||||||
tEncodeStreamTask(&encoder, pTask);
|
tEncodeStreamTask(&encoder, pTask);
|
||||||
|
|
||||||
int32_t size = encoder.pos;
|
int32_t size = encoder.pos;
|
||||||
|
@ -520,7 +525,6 @@ int32_t mndPersistDropStreamLog(SMnode *pMnode, STrans *pTrans, SStreamObj *pStr
|
||||||
SSdbRaw *pCommitRaw = mndStreamActionEncode(pStream);
|
SSdbRaw *pCommitRaw = mndStreamActionEncode(pStream);
|
||||||
if (pCommitRaw == NULL || mndTransAppendCommitlog(pTrans, pCommitRaw) != 0) {
|
if (pCommitRaw == NULL || mndTransAppendCommitlog(pTrans, pCommitRaw) != 0) {
|
||||||
mError("trans:%d, failed to append commit log since %s", pTrans->id, terrstr());
|
mError("trans:%d, failed to append commit log since %s", pTrans->id, terrstr());
|
||||||
mndTransDrop(pTrans);
|
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -537,7 +541,6 @@ static int32_t mndSetStreamRecover(SMnode *pMnode, STrans *pTrans, const SStream
|
||||||
if (pCommitRaw == NULL) return -1;
|
if (pCommitRaw == NULL) return -1;
|
||||||
if (mndTransAppendCommitlog(pTrans, pCommitRaw) != 0) {
|
if (mndTransAppendCommitlog(pTrans, pCommitRaw) != 0) {
|
||||||
mError("stream trans:%d, failed to append commit log since %s", pTrans->id, terrstr());
|
mError("stream trans:%d, failed to append commit log since %s", pTrans->id, terrstr());
|
||||||
mndTransDrop(pTrans);
|
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
(void)sdbSetRawStatus(pCommitRaw, SDB_STATUS_READY);
|
(void)sdbSetRawStatus(pCommitRaw, SDB_STATUS_READY);
|
||||||
|
@ -646,6 +649,8 @@ static int32_t mndPersistTaskDropReq(STrans *pTrans, SStreamTask *pTask) {
|
||||||
|
|
||||||
pReq->head.vgId = htonl(pTask->info.nodeId);
|
pReq->head.vgId = htonl(pTask->info.nodeId);
|
||||||
pReq->taskId = pTask->id.taskId;
|
pReq->taskId = pTask->id.taskId;
|
||||||
|
pReq->streamId = pTask->id.streamId;
|
||||||
|
|
||||||
STransAction action = {0};
|
STransAction action = {0};
|
||||||
memcpy(&action.epSet, &pTask->info.epSet, sizeof(SEpSet));
|
memcpy(&action.epSet, &pTask->info.epSet, sizeof(SEpSet));
|
||||||
action.pCont = pReq;
|
action.pCont = pReq;
|
||||||
|
@ -1264,7 +1269,7 @@ static int32_t mndRetrieveStreamTask(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock
|
||||||
// task id
|
// task id
|
||||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||||
|
|
||||||
char idstr[128] = {0};
|
char idstr[128] = {0};
|
||||||
int32_t len = tintToHex(pTask->id.taskId, &idstr[4]);
|
int32_t len = tintToHex(pTask->id.taskId, &idstr[4]);
|
||||||
idstr[2] = '0';
|
idstr[2] = '0';
|
||||||
idstr[3] = 'x';
|
idstr[3] = 'x';
|
||||||
|
@ -1304,7 +1309,7 @@ static int32_t mndRetrieveStreamTask(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock
|
||||||
colDataSetVal(pColInfo, numOfRows, (const char *)&level, false);
|
colDataSetVal(pColInfo, numOfRows, (const char *)&level, false);
|
||||||
|
|
||||||
// status
|
// status
|
||||||
char status[20 + VARSTR_HEADER_SIZE] = {0};
|
char status[20 + VARSTR_HEADER_SIZE] = {0};
|
||||||
int8_t taskStatus = atomic_load_8(&pTask->status.taskStatus);
|
int8_t taskStatus = atomic_load_8(&pTask->status.taskStatus);
|
||||||
if (taskStatus == TASK_STATUS__NORMAL) {
|
if (taskStatus == TASK_STATUS__NORMAL) {
|
||||||
memcpy(varDataVal(status), "normal", 6);
|
memcpy(varDataVal(status), "normal", 6);
|
||||||
|
@ -1358,6 +1363,8 @@ static int32_t mndPauseStreamTask(STrans *pTrans, SStreamTask *pTask) {
|
||||||
}
|
}
|
||||||
pReq->head.vgId = htonl(pTask->info.nodeId);
|
pReq->head.vgId = htonl(pTask->info.nodeId);
|
||||||
pReq->taskId = pTask->id.taskId;
|
pReq->taskId = pTask->id.taskId;
|
||||||
|
pReq->streamId = pTask->id.streamId;
|
||||||
|
|
||||||
STransAction action = {0};
|
STransAction action = {0};
|
||||||
memcpy(&action.epSet, &pTask->info.epSet, sizeof(SEpSet));
|
memcpy(&action.epSet, &pTask->info.epSet, sizeof(SEpSet));
|
||||||
action.pCont = pReq;
|
action.pCont = pReq;
|
||||||
|
@ -1370,7 +1377,7 @@ static int32_t mndPauseStreamTask(STrans *pTrans, SStreamTask *pTask) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t mndPauseAllStreamTaskImpl(STrans *pTrans, SArray* tasks) {
|
int32_t mndPauseAllStreamTaskImpl(STrans *pTrans, SArray *tasks) {
|
||||||
int32_t size = taosArrayGetSize(tasks);
|
int32_t size = taosArrayGetSize(tasks);
|
||||||
for (int32_t i = 0; i < size; i++) {
|
for (int32_t i = 0; i < size; i++) {
|
||||||
SArray *pTasks = taosArrayGetP(tasks, i);
|
SArray *pTasks = taosArrayGetP(tasks, i);
|
||||||
|
@ -1409,7 +1416,6 @@ static int32_t mndPersistStreamLog(STrans *pTrans, const SStreamObj *pStream, in
|
||||||
if (pCommitRaw == NULL) return -1;
|
if (pCommitRaw == NULL) return -1;
|
||||||
if (mndTransAppendCommitlog(pTrans, pCommitRaw) != 0) {
|
if (mndTransAppendCommitlog(pTrans, pCommitRaw) != 0) {
|
||||||
mError("stream trans:%d, failed to append commit log since %s", pTrans->id, terrstr());
|
mError("stream trans:%d, failed to append commit log since %s", pTrans->id, terrstr());
|
||||||
mndTransDrop(pTrans);
|
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
(void)sdbSetRawStatus(pCommitRaw, SDB_STATUS_READY);
|
(void)sdbSetRawStatus(pCommitRaw, SDB_STATUS_READY);
|
||||||
|
@ -1431,7 +1437,6 @@ static int32_t mndProcessPauseStreamReq(SRpcMsg *pReq) {
|
||||||
if (pStream == NULL) {
|
if (pStream == NULL) {
|
||||||
if (pauseReq.igNotExists) {
|
if (pauseReq.igNotExists) {
|
||||||
mInfo("stream:%s, not exist, if exist is set", pauseReq.name);
|
mInfo("stream:%s, not exist, if exist is set", pauseReq.name);
|
||||||
sdbRelease(pMnode->pSdb, pStream);
|
|
||||||
return 0;
|
return 0;
|
||||||
} else {
|
} else {
|
||||||
terrno = TSDB_CODE_MND_STREAM_NOT_EXIST;
|
terrno = TSDB_CODE_MND_STREAM_NOT_EXIST;
|
||||||
|
@ -1440,6 +1445,7 @@ static int32_t mndProcessPauseStreamReq(SRpcMsg *pReq) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pStream->status == STREAM_STATUS__PAUSE) {
|
if (pStream->status == STREAM_STATUS__PAUSE) {
|
||||||
|
sdbRelease(pMnode->pSdb, pStream);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1491,7 +1497,6 @@ static int32_t mndProcessPauseStreamReq(SRpcMsg *pReq) {
|
||||||
return TSDB_CODE_ACTION_IN_PROGRESS;
|
return TSDB_CODE_ACTION_IN_PROGRESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static int32_t mndResumeStreamTask(STrans *pTrans, SStreamTask *pTask, int8_t igUntreated) {
|
static int32_t mndResumeStreamTask(STrans *pTrans, SStreamTask *pTask, int8_t igUntreated) {
|
||||||
SVResumeStreamTaskReq *pReq = taosMemoryCalloc(1, sizeof(SVResumeStreamTaskReq));
|
SVResumeStreamTaskReq *pReq = taosMemoryCalloc(1, sizeof(SVResumeStreamTaskReq));
|
||||||
if (pReq == NULL) {
|
if (pReq == NULL) {
|
||||||
|
@ -1500,7 +1505,9 @@ static int32_t mndResumeStreamTask(STrans *pTrans, SStreamTask *pTask, int8_t ig
|
||||||
}
|
}
|
||||||
pReq->head.vgId = htonl(pTask->info.nodeId);
|
pReq->head.vgId = htonl(pTask->info.nodeId);
|
||||||
pReq->taskId = pTask->id.taskId;
|
pReq->taskId = pTask->id.taskId;
|
||||||
|
pReq->streamId = pTask->id.streamId;
|
||||||
pReq->igUntreated = igUntreated;
|
pReq->igUntreated = igUntreated;
|
||||||
|
|
||||||
STransAction action = {0};
|
STransAction action = {0};
|
||||||
memcpy(&action.epSet, &pTask->info.epSet, sizeof(SEpSet));
|
memcpy(&action.epSet, &pTask->info.epSet, sizeof(SEpSet));
|
||||||
action.pCont = pReq;
|
action.pCont = pReq;
|
||||||
|
|
|
@ -799,6 +799,7 @@ static int32_t mndProcessDropTopicReq(SRpcMsg *pReq) {
|
||||||
mndTransDrop(pTrans);
|
mndTransDrop(pTrans);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
sdbRelease(pSdb, pVgroup);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -863,6 +863,7 @@ static int32_t mndProcessAlterUserReq(SRpcMsg *pReq) {
|
||||||
mndReleaseDb(pMnode, pDb);
|
mndReleaseDb(pMnode, pDb);
|
||||||
goto _OVER;
|
goto _OVER;
|
||||||
}
|
}
|
||||||
|
mndReleaseDb(pMnode, pDb);
|
||||||
} else {
|
} else {
|
||||||
while (1) {
|
while (1) {
|
||||||
SDbObj *pDb = NULL;
|
SDbObj *pDb = NULL;
|
||||||
|
@ -887,6 +888,7 @@ static int32_t mndProcessAlterUserReq(SRpcMsg *pReq) {
|
||||||
mndReleaseDb(pMnode, pDb);
|
mndReleaseDb(pMnode, pDb);
|
||||||
goto _OVER;
|
goto _OVER;
|
||||||
}
|
}
|
||||||
|
mndReleaseDb(pMnode, pDb);
|
||||||
} else {
|
} else {
|
||||||
while (1) {
|
while (1) {
|
||||||
SDbObj *pDb = NULL;
|
SDbObj *pDb = NULL;
|
||||||
|
@ -908,6 +910,7 @@ static int32_t mndProcessAlterUserReq(SRpcMsg *pReq) {
|
||||||
goto _OVER;
|
goto _OVER;
|
||||||
}
|
}
|
||||||
taosHashRemove(newUser.readDbs, alterReq.objname, len);
|
taosHashRemove(newUser.readDbs, alterReq.objname, len);
|
||||||
|
mndReleaseDb(pMnode, pDb);
|
||||||
} else {
|
} else {
|
||||||
taosHashClear(newUser.readDbs);
|
taosHashClear(newUser.readDbs);
|
||||||
}
|
}
|
||||||
|
@ -922,6 +925,7 @@ static int32_t mndProcessAlterUserReq(SRpcMsg *pReq) {
|
||||||
goto _OVER;
|
goto _OVER;
|
||||||
}
|
}
|
||||||
taosHashRemove(newUser.writeDbs, alterReq.objname, len);
|
taosHashRemove(newUser.writeDbs, alterReq.objname, len);
|
||||||
|
mndReleaseDb(pMnode, pDb);
|
||||||
} else {
|
} else {
|
||||||
taosHashClear(newUser.writeDbs);
|
taosHashClear(newUser.writeDbs);
|
||||||
}
|
}
|
||||||
|
|
|
@ -35,9 +35,7 @@ void sndEnqueueStreamDispatch(SSnode *pSnode, SRpcMsg *pMsg) {
|
||||||
|
|
||||||
tDecoderClear(&decoder);
|
tDecoderClear(&decoder);
|
||||||
|
|
||||||
int32_t taskId = req.taskId;
|
SStreamTask *pTask = streamMetaAcquireTask(pSnode->pMeta, req.streamId, req.taskId);
|
||||||
|
|
||||||
SStreamTask *pTask = streamMetaAcquireTask(pSnode->pMeta, taskId);
|
|
||||||
if (pTask) {
|
if (pTask) {
|
||||||
SRpcMsg rsp = {
|
SRpcMsg rsp = {
|
||||||
.info = pMsg->info,
|
.info = pMsg->info,
|
||||||
|
@ -88,7 +86,7 @@ int32_t sndExpandTask(SSnode *pSnode, SStreamTask *pTask, int64_t ver) {
|
||||||
SReadHandle handle = { .vnode = NULL, .numOfVgroups = numOfChildEp, .pStateBackend = pTask->pState, .fillHistory = pTask->info.fillHistory };
|
SReadHandle handle = { .vnode = NULL, .numOfVgroups = numOfChildEp, .pStateBackend = pTask->pState, .fillHistory = pTask->info.fillHistory };
|
||||||
initStreamStateAPI(&handle.api);
|
initStreamStateAPI(&handle.api);
|
||||||
|
|
||||||
pTask->exec.pExecutor = qCreateStreamExecTaskInfo(pTask->exec.qmsg, &handle, 0);
|
pTask->exec.pExecutor = qCreateStreamExecTaskInfo(pTask->exec.qmsg, &handle, 0, pTask->id.taskId);
|
||||||
ASSERT(pTask->exec.pExecutor);
|
ASSERT(pTask->exec.pExecutor);
|
||||||
|
|
||||||
taosThreadMutexInit(&pTask->lock, NULL);
|
taosThreadMutexInit(&pTask->lock, NULL);
|
||||||
|
@ -181,21 +179,21 @@ int32_t sndProcessTaskDropReq(SSnode *pSnode, char *msg, int32_t msgLen) {
|
||||||
SVDropStreamTaskReq *pReq = (SVDropStreamTaskReq *)msg;
|
SVDropStreamTaskReq *pReq = (SVDropStreamTaskReq *)msg;
|
||||||
qDebug("snode:%d receive msg to drop stream task:0x%x", pSnode->pMeta->vgId, pReq->taskId);
|
qDebug("snode:%d receive msg to drop stream task:0x%x", pSnode->pMeta->vgId, pReq->taskId);
|
||||||
|
|
||||||
SStreamTask* pTask = streamMetaAcquireTask(pSnode->pMeta, pReq->taskId);
|
SStreamTask* pTask = streamMetaAcquireTask(pSnode->pMeta, pReq->streamId, pReq->taskId);
|
||||||
if (pTask == NULL) {
|
if (pTask == NULL) {
|
||||||
qError("vgId:%d failed to acquire s-task:0x%x when dropping it", pSnode->pMeta->vgId, pReq->taskId);
|
qError("vgId:%d failed to acquire s-task:0x%x when dropping it", pSnode->pMeta->vgId, pReq->taskId);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
streamMetaUnregisterTask(pSnode->pMeta, pReq->taskId);
|
streamMetaUnregisterTask(pSnode->pMeta, pReq->streamId, pReq->taskId);
|
||||||
streamMetaReleaseTask(pSnode->pMeta, pTask);
|
streamMetaReleaseTask(pSnode->pMeta, pTask);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t sndProcessTaskRunReq(SSnode *pSnode, SRpcMsg *pMsg) {
|
int32_t sndProcessTaskRunReq(SSnode *pSnode, SRpcMsg *pMsg) {
|
||||||
SStreamTaskRunReq *pReq = pMsg->pCont;
|
SStreamTaskRunReq *pReq = pMsg->pCont;
|
||||||
int32_t taskId = pReq->taskId;
|
|
||||||
SStreamTask *pTask = streamMetaAcquireTask(pSnode->pMeta, taskId);
|
SStreamTask *pTask = streamMetaAcquireTask(pSnode->pMeta, pReq->streamId, pReq->taskId);
|
||||||
if (pTask) {
|
if (pTask) {
|
||||||
streamProcessRunReq(pTask);
|
streamProcessRunReq(pTask);
|
||||||
streamMetaReleaseTask(pSnode->pMeta, pTask);
|
streamMetaReleaseTask(pSnode->pMeta, pTask);
|
||||||
|
@ -213,9 +211,8 @@ int32_t sndProcessTaskDispatchReq(SSnode *pSnode, SRpcMsg *pMsg, bool exec) {
|
||||||
SDecoder decoder;
|
SDecoder decoder;
|
||||||
tDecoderInit(&decoder, (uint8_t *)msgBody, msgLen);
|
tDecoderInit(&decoder, (uint8_t *)msgBody, msgLen);
|
||||||
tDecodeStreamDispatchReq(&decoder, &req);
|
tDecodeStreamDispatchReq(&decoder, &req);
|
||||||
int32_t taskId = req.taskId;
|
|
||||||
|
|
||||||
SStreamTask *pTask = streamMetaAcquireTask(pSnode->pMeta, taskId);
|
SStreamTask *pTask = streamMetaAcquireTask(pSnode->pMeta, req.streamId, req.taskId);
|
||||||
if (pTask) {
|
if (pTask) {
|
||||||
SRpcMsg rsp = { .info = pMsg->info, .code = 0 };
|
SRpcMsg rsp = { .info = pMsg->info, .code = 0 };
|
||||||
streamProcessDispatchMsg(pTask, &req, &rsp, exec);
|
streamProcessDispatchMsg(pTask, &req, &rsp, exec);
|
||||||
|
@ -235,8 +232,7 @@ int32_t sndProcessTaskRetrieveReq(SSnode *pSnode, SRpcMsg *pMsg) {
|
||||||
tDecoderInit(&decoder, msgBody, msgLen);
|
tDecoderInit(&decoder, msgBody, msgLen);
|
||||||
tDecodeStreamRetrieveReq(&decoder, &req);
|
tDecodeStreamRetrieveReq(&decoder, &req);
|
||||||
tDecoderClear(&decoder);
|
tDecoderClear(&decoder);
|
||||||
int32_t taskId = req.dstTaskId;
|
SStreamTask *pTask = streamMetaAcquireTask(pSnode->pMeta, req.streamId, req.dstTaskId);
|
||||||
SStreamTask *pTask = streamMetaAcquireTask(pSnode->pMeta, taskId);
|
|
||||||
|
|
||||||
if (pTask) {
|
if (pTask) {
|
||||||
SRpcMsg rsp = { .info = pMsg->info, .code = 0};
|
SRpcMsg rsp = { .info = pMsg->info, .code = 0};
|
||||||
|
@ -251,8 +247,11 @@ int32_t sndProcessTaskRetrieveReq(SSnode *pSnode, SRpcMsg *pMsg) {
|
||||||
|
|
||||||
int32_t sndProcessTaskDispatchRsp(SSnode *pSnode, SRpcMsg *pMsg) {
|
int32_t sndProcessTaskDispatchRsp(SSnode *pSnode, SRpcMsg *pMsg) {
|
||||||
SStreamDispatchRsp *pRsp = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead));
|
SStreamDispatchRsp *pRsp = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead));
|
||||||
int32_t taskId = ntohl(pRsp->upstreamTaskId);
|
|
||||||
SStreamTask *pTask = streamMetaAcquireTask(pSnode->pMeta, taskId);
|
int32_t taskId = htonl(pRsp->upstreamTaskId);
|
||||||
|
int64_t streamId = htobe64(pRsp->streamId);
|
||||||
|
|
||||||
|
SStreamTask *pTask = streamMetaAcquireTask(pSnode->pMeta, streamId, taskId);
|
||||||
if (pTask) {
|
if (pTask) {
|
||||||
streamProcessDispatchRsp(pTask, pRsp, pMsg->code);
|
streamProcessDispatchRsp(pTask, pRsp, pMsg->code);
|
||||||
streamMetaReleaseTask(pSnode->pMeta, pTask);
|
streamMetaReleaseTask(pSnode->pMeta, pTask);
|
||||||
|
@ -260,7 +259,6 @@ int32_t sndProcessTaskDispatchRsp(SSnode *pSnode, SRpcMsg *pMsg) {
|
||||||
} else {
|
} else {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t sndProcessTaskRetrieveRsp(SSnode *pSnode, SRpcMsg *pMsg) {
|
int32_t sndProcessTaskRetrieveRsp(SSnode *pSnode, SRpcMsg *pMsg) {
|
||||||
|
@ -297,7 +295,7 @@ int32_t sndProcessStreamTaskScanHistoryFinishReq(SSnode *pSnode, SRpcMsg *pMsg)
|
||||||
tDecoderClear(&decoder);
|
tDecoderClear(&decoder);
|
||||||
|
|
||||||
// find task
|
// find task
|
||||||
SStreamTask *pTask = streamMetaAcquireTask(pSnode->pMeta, req.downstreamTaskId);
|
SStreamTask *pTask = streamMetaAcquireTask(pSnode->pMeta, req.streamId, req.downstreamTaskId);
|
||||||
if (pTask == NULL) {
|
if (pTask == NULL) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
@ -340,7 +338,7 @@ int32_t sndProcessStreamTaskCheckReq(SSnode *pSnode, SRpcMsg *pMsg) {
|
||||||
.upstreamTaskId = req.upstreamTaskId,
|
.upstreamTaskId = req.upstreamTaskId,
|
||||||
};
|
};
|
||||||
|
|
||||||
SStreamTask *pTask = streamMetaAcquireTask(pSnode->pMeta, taskId);
|
SStreamTask *pTask = streamMetaAcquireTask(pSnode->pMeta, req.streamId, taskId);
|
||||||
|
|
||||||
if (pTask != NULL) {
|
if (pTask != NULL) {
|
||||||
rsp.status = streamTaskCheckStatus(pTask);
|
rsp.status = streamTaskCheckStatus(pTask);
|
||||||
|
@ -400,7 +398,7 @@ int32_t sndProcessStreamTaskCheckRsp(SSnode* pSnode, SRpcMsg* pMsg) {
|
||||||
qDebug("tq task:0x%x (vgId:%d) recv check rsp(reqId:0x%" PRIx64 ") from 0x%x (vgId:%d) status %d",
|
qDebug("tq task:0x%x (vgId:%d) recv check rsp(reqId:0x%" PRIx64 ") from 0x%x (vgId:%d) status %d",
|
||||||
rsp.upstreamTaskId, rsp.upstreamNodeId, rsp.reqId, rsp.downstreamTaskId, rsp.downstreamNodeId, rsp.status);
|
rsp.upstreamTaskId, rsp.upstreamNodeId, rsp.reqId, rsp.downstreamTaskId, rsp.downstreamNodeId, rsp.status);
|
||||||
|
|
||||||
SStreamTask* pTask = streamMetaAcquireTask(pSnode->pMeta, rsp.upstreamTaskId);
|
SStreamTask* pTask = streamMetaAcquireTask(pSnode->pMeta, rsp.streamId, rsp.upstreamTaskId);
|
||||||
if (pTask == NULL) {
|
if (pTask == NULL) {
|
||||||
qError("tq failed to locate the stream task:0x%x (vgId:%d), it may have been destroyed", rsp.upstreamTaskId,
|
qError("tq failed to locate the stream task:0x%x (vgId:%d), it may have been destroyed", rsp.upstreamTaskId,
|
||||||
pSnode->pMeta->vgId);
|
pSnode->pMeta->vgId);
|
||||||
|
|
|
@ -267,7 +267,7 @@ static int32_t tdSetRSmaInfoItemParams(SSma *pSma, SRSmaParam *param, SRSmaStat
|
||||||
SReadHandle handle = {.vnode = pVnode, .initTqReader = 1, .pStateBackend = pStreamState};
|
SReadHandle handle = {.vnode = pVnode, .initTqReader = 1, .pStateBackend = pStreamState};
|
||||||
initStorageAPI(&handle.api);
|
initStorageAPI(&handle.api);
|
||||||
|
|
||||||
pRSmaInfo->taskInfo[idx] = qCreateStreamExecTaskInfo(param->qmsg[idx], &handle, TD_VID(pVnode));
|
pRSmaInfo->taskInfo[idx] = qCreateStreamExecTaskInfo(param->qmsg[idx], &handle, TD_VID(pVnode), 0);
|
||||||
if (!pRSmaInfo->taskInfo[idx]) {
|
if (!pRSmaInfo->taskInfo[idx]) {
|
||||||
terrno = TSDB_CODE_RSMA_QTASKINFO_CREATE;
|
terrno = TSDB_CODE_RSMA_QTASKINFO_CREATE;
|
||||||
return TSDB_CODE_FAILED;
|
return TSDB_CODE_FAILED;
|
||||||
|
|
|
@ -183,7 +183,7 @@ static bool hasStreamTaskInTimer(SStreamMeta* pMeta) {
|
||||||
}
|
}
|
||||||
|
|
||||||
SStreamTask* pTask = *(SStreamTask**)pIter;
|
SStreamTask* pTask = *(SStreamTask**)pIter;
|
||||||
if (pTask->status.timerActive == 1) {
|
if (pTask->status.timerActive >= 1) {
|
||||||
inTimer = true;
|
inTimer = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -646,7 +646,8 @@ int32_t tqProcessVgCommittedInfoReq(STQ* pTq, SRpcMsg* pMsg) {
|
||||||
SDecoder decoder;
|
SDecoder decoder;
|
||||||
tDecoderInit(&decoder, (uint8_t*)data, len);
|
tDecoderInit(&decoder, (uint8_t*)data, len);
|
||||||
if (tDecodeMqVgOffset(&decoder, &vgOffset) < 0) {
|
if (tDecodeMqVgOffset(&decoder, &vgOffset) < 0) {
|
||||||
return TSDB_CODE_OUT_OF_MEMORY;
|
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
|
return terrno;
|
||||||
}
|
}
|
||||||
|
|
||||||
tDecoderClear(&decoder);
|
tDecoderClear(&decoder);
|
||||||
|
@ -654,19 +655,22 @@ int32_t tqProcessVgCommittedInfoReq(STQ* pTq, SRpcMsg* pMsg) {
|
||||||
STqOffset* pOffset = &vgOffset.offset;
|
STqOffset* pOffset = &vgOffset.offset;
|
||||||
STqOffset* pSavedOffset = tqOffsetRead(pTq->pOffsetStore, pOffset->subKey);
|
STqOffset* pSavedOffset = tqOffsetRead(pTq->pOffsetStore, pOffset->subKey);
|
||||||
if (pSavedOffset == NULL) {
|
if (pSavedOffset == NULL) {
|
||||||
return TSDB_CODE_TMQ_NO_COMMITTED;
|
terrno = TSDB_CODE_TMQ_NO_COMMITTED;
|
||||||
|
return terrno;
|
||||||
}
|
}
|
||||||
vgOffset.offset = *pSavedOffset;
|
vgOffset.offset = *pSavedOffset;
|
||||||
|
|
||||||
int32_t code = 0;
|
int32_t code = 0;
|
||||||
tEncodeSize(tEncodeMqVgOffset, &vgOffset, len, code);
|
tEncodeSize(tEncodeMqVgOffset, &vgOffset, len, code);
|
||||||
if (code < 0) {
|
if (code < 0) {
|
||||||
return TSDB_CODE_INVALID_PARA;
|
terrno = TSDB_CODE_INVALID_PARA;
|
||||||
|
return terrno;
|
||||||
}
|
}
|
||||||
|
|
||||||
void* buf = rpcMallocCont(len);
|
void* buf = rpcMallocCont(len);
|
||||||
if (buf == NULL) {
|
if (buf == NULL) {
|
||||||
return TSDB_CODE_OUT_OF_MEMORY;
|
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
|
return terrno;
|
||||||
}
|
}
|
||||||
SEncoder encoder;
|
SEncoder encoder;
|
||||||
tEncoderInit(&encoder, buf, len);
|
tEncoderInit(&encoder, buf, len);
|
||||||
|
@ -956,7 +960,7 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t ver) {
|
||||||
.winRange = pTask->dataRange.window};
|
.winRange = pTask->dataRange.window};
|
||||||
initStorageAPI(&handle.api);
|
initStorageAPI(&handle.api);
|
||||||
|
|
||||||
pTask->exec.pExecutor = qCreateStreamExecTaskInfo(pTask->exec.qmsg, &handle, vgId);
|
pTask->exec.pExecutor = qCreateStreamExecTaskInfo(pTask->exec.qmsg, &handle, vgId, pTask->id.taskId);
|
||||||
if (pTask->exec.pExecutor == NULL) {
|
if (pTask->exec.pExecutor == NULL) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
@ -983,7 +987,7 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t ver) {
|
||||||
.winRange = pTask->dataRange.window};
|
.winRange = pTask->dataRange.window};
|
||||||
initStorageAPI(&handle.api);
|
initStorageAPI(&handle.api);
|
||||||
|
|
||||||
pTask->exec.pExecutor = qCreateStreamExecTaskInfo(pTask->exec.qmsg, &handle, vgId);
|
pTask->exec.pExecutor = qCreateStreamExecTaskInfo(pTask->exec.qmsg, &handle, vgId, pTask->id.taskId);
|
||||||
if (pTask->exec.pExecutor == NULL) {
|
if (pTask->exec.pExecutor == NULL) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
@ -1062,7 +1066,7 @@ int32_t tqProcessStreamTaskCheckReq(STQ* pTq, SRpcMsg* pMsg) {
|
||||||
.upstreamTaskId = req.upstreamTaskId,
|
.upstreamTaskId = req.upstreamTaskId,
|
||||||
};
|
};
|
||||||
|
|
||||||
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, taskId);
|
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, req.streamId, taskId);
|
||||||
if (pTask != NULL) {
|
if (pTask != NULL) {
|
||||||
rsp.status = streamTaskCheckStatus(pTask);
|
rsp.status = streamTaskCheckStatus(pTask);
|
||||||
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
|
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
|
||||||
|
@ -1072,8 +1076,9 @@ int32_t tqProcessStreamTaskCheckReq(STQ* pTq, SRpcMsg* pMsg) {
|
||||||
pTask->id.idStr, pStatus, rsp.reqId, rsp.upstreamTaskId, rsp.upstreamNodeId, rsp.status);
|
pTask->id.idStr, pStatus, rsp.reqId, rsp.upstreamTaskId, rsp.upstreamNodeId, rsp.status);
|
||||||
} else {
|
} else {
|
||||||
rsp.status = 0;
|
rsp.status = 0;
|
||||||
tqDebug("tq recv task check(taskId:0x%x not built yet) req(reqId:0x%" PRIx64 ") from task:0x%x (vgId:%d), rsp status %d",
|
tqDebug("tq recv task check(taskId:0x%" PRIx64 "-0x%x not built yet) req(reqId:0x%" PRIx64
|
||||||
taskId, rsp.reqId, rsp.upstreamTaskId, rsp.upstreamNodeId, rsp.status);
|
") from task:0x%x (vgId:%d), rsp status %d",
|
||||||
|
req.streamId, taskId, rsp.reqId, rsp.upstreamTaskId, rsp.upstreamNodeId, rsp.status);
|
||||||
}
|
}
|
||||||
|
|
||||||
return streamSendCheckRsp(pTq->pStreamMeta, &req, &rsp, &pMsg->info, taskId);
|
return streamSendCheckRsp(pTq->pStreamMeta, &req, &rsp, &pMsg->info, taskId);
|
||||||
|
@ -1099,7 +1104,7 @@ int32_t tqProcessStreamTaskCheckRsp(STQ* pTq, int64_t sversion, SRpcMsg* pMsg) {
|
||||||
tqDebug("tq task:0x%x (vgId:%d) recv check rsp(reqId:0x%" PRIx64 ") from 0x%x (vgId:%d) status %d",
|
tqDebug("tq task:0x%x (vgId:%d) recv check rsp(reqId:0x%" PRIx64 ") from 0x%x (vgId:%d) status %d",
|
||||||
rsp.upstreamTaskId, rsp.upstreamNodeId, rsp.reqId, rsp.downstreamTaskId, rsp.downstreamNodeId, rsp.status);
|
rsp.upstreamTaskId, rsp.upstreamNodeId, rsp.reqId, rsp.downstreamTaskId, rsp.downstreamNodeId, rsp.status);
|
||||||
|
|
||||||
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, rsp.upstreamTaskId);
|
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, rsp.streamId, rsp.upstreamTaskId);
|
||||||
if (pTask == NULL) {
|
if (pTask == NULL) {
|
||||||
tqError("tq failed to locate the stream task:0x%x (vgId:%d), it may have been destroyed", rsp.upstreamTaskId,
|
tqError("tq failed to locate the stream task:0x%x (vgId:%d), it may have been destroyed", rsp.upstreamTaskId,
|
||||||
pTq->pStreamMeta->vgId);
|
pTq->pStreamMeta->vgId);
|
||||||
|
@ -1149,32 +1154,27 @@ int32_t tqProcessTaskDeployReq(STQ* pTq, int64_t sversion, char* msg, int32_t ms
|
||||||
taosWLockLatch(&pStreamMeta->lock);
|
taosWLockLatch(&pStreamMeta->lock);
|
||||||
code = streamMetaRegisterTask(pStreamMeta, sversion, pTask, &added);
|
code = streamMetaRegisterTask(pStreamMeta, sversion, pTask, &added);
|
||||||
int32_t numOfTasks = streamMetaGetNumOfTasks(pStreamMeta);
|
int32_t numOfTasks = streamMetaGetNumOfTasks(pStreamMeta);
|
||||||
|
taosWUnLockLatch(&pStreamMeta->lock);
|
||||||
|
|
||||||
if (code < 0) {
|
if (code < 0) {
|
||||||
tqError("vgId:%d failed to add s-task:0x%x, total:%d", vgId, pTask->id.taskId, numOfTasks);
|
tqError("vgId:%d failed to add s-task:0x%x, total:%d", vgId, pTask->id.taskId, numOfTasks);
|
||||||
tFreeStreamTask(pTask);
|
tFreeStreamTask(pTask);
|
||||||
taosWUnLockLatch(&pStreamMeta->lock);
|
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// not added into meta store
|
// not added into meta store
|
||||||
if (!added) {
|
if (added) {
|
||||||
|
tqDebug("vgId:%d s-task:0x%x is deployed and add into meta, numOfTasks:%d", vgId, taskId, numOfTasks);
|
||||||
|
SStreamTask* p = streamMetaAcquireTask(pStreamMeta, pTask->id.streamId, taskId);
|
||||||
|
if (p != NULL) { // reset the downstreamReady flag.
|
||||||
|
streamTaskCheckDownstreamTasks(p);
|
||||||
|
}
|
||||||
|
streamMetaReleaseTask(pStreamMeta, p);
|
||||||
|
} else {
|
||||||
tqWarn("vgId:%d failed to add s-task:0x%x, already exists in meta store", vgId, taskId);
|
tqWarn("vgId:%d failed to add s-task:0x%x, already exists in meta store", vgId, taskId);
|
||||||
tFreeStreamTask(pTask);
|
tFreeStreamTask(pTask);
|
||||||
pTask = NULL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
taosWUnLockLatch(&pStreamMeta->lock);
|
|
||||||
|
|
||||||
tqDebug("vgId:%d s-task:0x%x is deployed and add into meta, numOfTasks:%d", vgId, taskId, numOfTasks);
|
|
||||||
|
|
||||||
// 3. It's an fill history task, do nothing. wait for the main task to start it
|
|
||||||
SStreamTask* p = streamMetaAcquireTask(pStreamMeta, taskId);
|
|
||||||
if (p != NULL) { // reset the downstreamReady flag.
|
|
||||||
streamTaskCheckDownstreamTasks(p);
|
|
||||||
}
|
|
||||||
|
|
||||||
streamMetaReleaseTask(pStreamMeta, p);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1183,7 +1183,7 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) {
|
||||||
SStreamMeta* pMeta = pTq->pStreamMeta;
|
SStreamMeta* pMeta = pTq->pStreamMeta;
|
||||||
|
|
||||||
int32_t code = TSDB_CODE_SUCCESS;
|
int32_t code = TSDB_CODE_SUCCESS;
|
||||||
SStreamTask* pTask = streamMetaAcquireTask(pMeta, pReq->taskId);
|
SStreamTask* pTask = streamMetaAcquireTask(pMeta, pReq->streamId, pReq->taskId);
|
||||||
if (pTask == NULL) {
|
if (pTask == NULL) {
|
||||||
tqError("vgId:%d failed to acquire stream task:0x%x during stream recover, task may have been destroyed",
|
tqError("vgId:%d failed to acquire stream task:0x%x during stream recover, task may have been destroyed",
|
||||||
pMeta->vgId, pReq->taskId);
|
pMeta->vgId, pReq->taskId);
|
||||||
|
@ -1239,7 +1239,7 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) {
|
||||||
bool done = false;
|
bool done = false;
|
||||||
|
|
||||||
// 1. get the related stream task
|
// 1. get the related stream task
|
||||||
pStreamTask = streamMetaAcquireTask(pMeta, pTask->streamTaskId.taskId);
|
pStreamTask = streamMetaAcquireTask(pMeta, pTask->streamTaskId.streamId, pTask->streamTaskId.taskId);
|
||||||
if (pStreamTask == NULL) {
|
if (pStreamTask == NULL) {
|
||||||
// todo delete this task, if the related stream task is dropped
|
// todo delete this task, if the related stream task is dropped
|
||||||
qError("failed to find s-task:0x%x, it may have been destroyed, drop fill-history task:%s",
|
qError("failed to find s-task:0x%x, it may have been destroyed, drop fill-history task:%s",
|
||||||
|
@ -1247,7 +1247,7 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) {
|
||||||
|
|
||||||
tqDebug("s-task:%s fill-history task set status to be dropping", id);
|
tqDebug("s-task:%s fill-history task set status to be dropping", id);
|
||||||
|
|
||||||
streamMetaUnregisterTask(pMeta, pTask->id.taskId);
|
streamMetaUnregisterTask(pMeta, pTask->id.streamId, pTask->id.taskId);
|
||||||
streamMetaReleaseTask(pMeta, pTask);
|
streamMetaReleaseTask(pMeta, pTask);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
@ -1277,7 +1277,6 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) {
|
||||||
if (done) {
|
if (done) {
|
||||||
pTask->tsInfo.step2Start = taosGetTimestampMs();
|
pTask->tsInfo.step2Start = taosGetTimestampMs();
|
||||||
streamTaskEndScanWAL(pTask);
|
streamTaskEndScanWAL(pTask);
|
||||||
streamMetaReleaseTask(pMeta, pTask);
|
|
||||||
} else {
|
} else {
|
||||||
STimeWindow* pWindow = &pTask->dataRange.window;
|
STimeWindow* pWindow = &pTask->dataRange.window;
|
||||||
tqDebug("s-task:%s level:%d verRange:%" PRId64 " - %" PRId64 " window:%" PRId64 "-%" PRId64
|
tqDebug("s-task:%s level:%d verRange:%" PRId64 " - %" PRId64 " window:%" PRId64 "-%" PRId64
|
||||||
|
@ -1303,13 +1302,11 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) {
|
||||||
streamSetStatusNormal(pTask);
|
streamSetStatusNormal(pTask);
|
||||||
}
|
}
|
||||||
|
|
||||||
// 4. 1) transfer the ownership of executor state, 2) update the scan data range for source task.
|
|
||||||
// 5. resume the related stream task.
|
|
||||||
streamMetaReleaseTask(pMeta, pTask);
|
|
||||||
streamMetaReleaseTask(pMeta, pStreamTask);
|
|
||||||
|
|
||||||
tqStartStreamTasks(pTq);
|
tqStartStreamTasks(pTq);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
streamMetaReleaseTask(pMeta, pTask);
|
||||||
|
streamMetaReleaseTask(pMeta, pStreamTask);
|
||||||
} else {
|
} else {
|
||||||
// todo update the chkInfo version for current task.
|
// todo update the chkInfo version for current task.
|
||||||
// this task has an associated history stream task, so we need to scan wal from the end version of
|
// this task has an associated history stream task, so we need to scan wal from the end version of
|
||||||
|
@ -1358,7 +1355,7 @@ int32_t tqProcessTaskTransferStateReq(STQ* pTq, SRpcMsg* pMsg) {
|
||||||
|
|
||||||
tqDebug("vgId:%d start to process transfer state msg, from s-task:0x%x", pTq->pStreamMeta->vgId, req.downstreamTaskId);
|
tqDebug("vgId:%d start to process transfer state msg, from s-task:0x%x", pTq->pStreamMeta->vgId, req.downstreamTaskId);
|
||||||
|
|
||||||
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, req.downstreamTaskId);
|
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, req.streamId, req.downstreamTaskId);
|
||||||
if (pTask == NULL) {
|
if (pTask == NULL) {
|
||||||
tqError("failed to find task:0x%x, it may have been dropped already. process transfer state failed", req.downstreamTaskId);
|
tqError("failed to find task:0x%x, it may have been dropped already. process transfer state failed", req.downstreamTaskId);
|
||||||
return -1;
|
return -1;
|
||||||
|
@ -1394,7 +1391,7 @@ int32_t tqProcessTaskScanHistoryFinishReq(STQ* pTq, SRpcMsg* pMsg) {
|
||||||
tDecodeStreamScanHistoryFinishReq(&decoder, &req);
|
tDecodeStreamScanHistoryFinishReq(&decoder, &req);
|
||||||
tDecoderClear(&decoder);
|
tDecoderClear(&decoder);
|
||||||
|
|
||||||
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, req.downstreamTaskId);
|
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, req.streamId, req.downstreamTaskId);
|
||||||
if (pTask == NULL) {
|
if (pTask == NULL) {
|
||||||
tqError("vgId:%d process scan history finish msg, failed to find task:0x%x, it may be destroyed",
|
tqError("vgId:%d process scan history finish msg, failed to find task:0x%x, it may be destroyed",
|
||||||
pTq->pStreamMeta->vgId, req.downstreamTaskId);
|
pTq->pStreamMeta->vgId, req.downstreamTaskId);
|
||||||
|
@ -1420,7 +1417,7 @@ int32_t tqProcessTaskScanHistoryFinishRsp(STQ* pTq, SRpcMsg* pMsg) {
|
||||||
tDecodeCompleteHistoryDataMsg(&decoder, &req);
|
tDecodeCompleteHistoryDataMsg(&decoder, &req);
|
||||||
tDecoderClear(&decoder);
|
tDecoderClear(&decoder);
|
||||||
|
|
||||||
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, req.upstreamTaskId);
|
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, req.streamId, req.upstreamTaskId);
|
||||||
if (pTask == NULL) {
|
if (pTask == NULL) {
|
||||||
tqError("vgId:%d process scan history finish rsp, failed to find task:0x%x, it may be destroyed",
|
tqError("vgId:%d process scan history finish rsp, failed to find task:0x%x, it may be destroyed",
|
||||||
pTq->pStreamMeta->vgId, req.upstreamTaskId);
|
pTq->pStreamMeta->vgId, req.upstreamTaskId);
|
||||||
|
@ -1511,11 +1508,11 @@ int32_t tqProcessTaskRunReq(STQ* pTq, SRpcMsg* pMsg) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, taskId);
|
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, pReq->streamId, taskId);
|
||||||
if (pTask != NULL) {
|
if (pTask != NULL) {
|
||||||
// even in halt status, the data in inputQ must be processed
|
// even in halt status, the data in inputQ must be processed
|
||||||
int8_t st = pTask->status.taskStatus;
|
int8_t st = pTask->status.taskStatus;
|
||||||
if (st == TASK_STATUS__NORMAL || st == TASK_STATUS__SCAN_HISTORY/* || st == TASK_STATUS__SCAN_HISTORY_WAL*/) {
|
if (st == TASK_STATUS__NORMAL || st == TASK_STATUS__SCAN_HISTORY) {
|
||||||
tqDebug("vgId:%d s-task:%s start to process block from inputQ, last chk point:%" PRId64, vgId, pTask->id.idStr,
|
tqDebug("vgId:%d s-task:%s start to process block from inputQ, last chk point:%" PRId64, vgId, pTask->id.idStr,
|
||||||
pTask->chkInfo.version);
|
pTask->chkInfo.version);
|
||||||
streamProcessRunReq(pTask);
|
streamProcessRunReq(pTask);
|
||||||
|
@ -1528,8 +1525,9 @@ int32_t tqProcessTaskRunReq(STQ* pTq, SRpcMsg* pMsg) {
|
||||||
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
|
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
|
||||||
tqStartStreamTasks(pTq);
|
tqStartStreamTasks(pTq);
|
||||||
return 0;
|
return 0;
|
||||||
} else {
|
} else { // NOTE: pTask->status.schedStatus is not updated since it is not be handled by the run exec.
|
||||||
tqError("vgId:%d failed to found s-task, taskId:%d", vgId, taskId);
|
// todo add one function to handle this
|
||||||
|
tqError("vgId:%d failed to found s-task, taskId:0x%x may have been dropped", vgId, taskId);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1545,7 +1543,7 @@ int32_t tqProcessTaskDispatchReq(STQ* pTq, SRpcMsg* pMsg, bool exec) {
|
||||||
tDecoderInit(&decoder, (uint8_t*)msgBody, msgLen);
|
tDecoderInit(&decoder, (uint8_t*)msgBody, msgLen);
|
||||||
tDecodeStreamDispatchReq(&decoder, &req);
|
tDecodeStreamDispatchReq(&decoder, &req);
|
||||||
|
|
||||||
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, req.taskId);
|
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, req.streamId, req.taskId);
|
||||||
if (pTask) {
|
if (pTask) {
|
||||||
SRpcMsg rsp = {.info = pMsg->info, .code = 0};
|
SRpcMsg rsp = {.info = pMsg->info, .code = 0};
|
||||||
streamProcessDispatchMsg(pTask, &req, &rsp, exec);
|
streamProcessDispatchMsg(pTask, &req, &rsp, exec);
|
||||||
|
@ -1559,10 +1557,12 @@ int32_t tqProcessTaskDispatchReq(STQ* pTq, SRpcMsg* pMsg, bool exec) {
|
||||||
|
|
||||||
int32_t tqProcessTaskDispatchRsp(STQ* pTq, SRpcMsg* pMsg) {
|
int32_t tqProcessTaskDispatchRsp(STQ* pTq, SRpcMsg* pMsg) {
|
||||||
SStreamDispatchRsp* pRsp = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead));
|
SStreamDispatchRsp* pRsp = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead));
|
||||||
int32_t taskId = ntohl(pRsp->upstreamTaskId);
|
|
||||||
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, taskId);
|
|
||||||
|
|
||||||
int32_t vgId = pTq->pStreamMeta->vgId;
|
int32_t vgId = pTq->pStreamMeta->vgId;
|
||||||
|
int32_t taskId = htonl(pRsp->upstreamTaskId);
|
||||||
|
int64_t streamId = htobe64(pRsp->streamId);
|
||||||
|
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, streamId, taskId);
|
||||||
|
|
||||||
if (pTask) {
|
if (pTask) {
|
||||||
streamProcessDispatchRsp(pTask, pRsp, pMsg->code);
|
streamProcessDispatchRsp(pTask, pRsp, pMsg->code);
|
||||||
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
|
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
|
||||||
|
@ -1576,13 +1576,13 @@ int32_t tqProcessTaskDispatchRsp(STQ* pTq, SRpcMsg* pMsg) {
|
||||||
int32_t tqProcessTaskDropReq(STQ* pTq, int64_t sversion, char* msg, int32_t msgLen) {
|
int32_t tqProcessTaskDropReq(STQ* pTq, int64_t sversion, char* msg, int32_t msgLen) {
|
||||||
SVDropStreamTaskReq* pReq = (SVDropStreamTaskReq*)msg;
|
SVDropStreamTaskReq* pReq = (SVDropStreamTaskReq*)msg;
|
||||||
tqDebug("vgId:%d receive msg to drop stream task:0x%x", TD_VID(pTq->pVnode), pReq->taskId);
|
tqDebug("vgId:%d receive msg to drop stream task:0x%x", TD_VID(pTq->pVnode), pReq->taskId);
|
||||||
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, pReq->taskId);
|
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, pReq->streamId, pReq->taskId);
|
||||||
if (pTask == NULL) {
|
if (pTask == NULL) {
|
||||||
tqError("vgId:%d failed to acquire s-task:0x%x when dropping it", pTq->pStreamMeta->vgId, pReq->taskId);
|
tqError("vgId:%d failed to acquire s-task:0x%x when dropping it", pTq->pStreamMeta->vgId, pReq->taskId);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
streamMetaUnregisterTask(pTq->pStreamMeta, pReq->taskId);
|
streamMetaUnregisterTask(pTq->pStreamMeta, pReq->streamId, pReq->taskId);
|
||||||
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
|
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -1591,7 +1591,7 @@ int32_t tqProcessTaskPauseReq(STQ* pTq, int64_t sversion, char* msg, int32_t msg
|
||||||
SVPauseStreamTaskReq* pReq = (SVPauseStreamTaskReq*)msg;
|
SVPauseStreamTaskReq* pReq = (SVPauseStreamTaskReq*)msg;
|
||||||
|
|
||||||
SStreamMeta* pMeta = pTq->pStreamMeta;
|
SStreamMeta* pMeta = pTq->pStreamMeta;
|
||||||
SStreamTask* pTask = streamMetaAcquireTask(pMeta, pReq->taskId);
|
SStreamTask* pTask = streamMetaAcquireTask(pMeta, pReq->streamId, pReq->taskId);
|
||||||
if (pTask == NULL) {
|
if (pTask == NULL) {
|
||||||
tqError("vgId:%d process pause req, failed to acquire task:0x%x, it may have been dropped already", pMeta->vgId,
|
tqError("vgId:%d process pause req, failed to acquire task:0x%x, it may have been dropped already", pMeta->vgId,
|
||||||
pReq->taskId);
|
pReq->taskId);
|
||||||
|
@ -1604,7 +1604,7 @@ int32_t tqProcessTaskPauseReq(STQ* pTq, int64_t sversion, char* msg, int32_t msg
|
||||||
|
|
||||||
SStreamTask* pHistoryTask = NULL;
|
SStreamTask* pHistoryTask = NULL;
|
||||||
if (pTask->historyTaskId.taskId != 0) {
|
if (pTask->historyTaskId.taskId != 0) {
|
||||||
pHistoryTask = streamMetaAcquireTask(pMeta, pTask->historyTaskId.taskId);
|
pHistoryTask = streamMetaAcquireTask(pMeta, pTask->historyTaskId.streamId, pTask->historyTaskId.taskId);
|
||||||
if (pHistoryTask == NULL) {
|
if (pHistoryTask == NULL) {
|
||||||
tqError("vgId:%d process pause req, failed to acquire fill-history task:0x%x, it may have been dropped already",
|
tqError("vgId:%d process pause req, failed to acquire fill-history task:0x%x, it may have been dropped already",
|
||||||
pMeta->vgId, pTask->historyTaskId.taskId);
|
pMeta->vgId, pTask->historyTaskId.taskId);
|
||||||
|
@ -1663,13 +1663,13 @@ int32_t tqProcessTaskResumeImpl(STQ* pTq, SStreamTask* pTask, int64_t sversion,
|
||||||
|
|
||||||
int32_t tqProcessTaskResumeReq(STQ* pTq, int64_t sversion, char* msg, int32_t msgLen) {
|
int32_t tqProcessTaskResumeReq(STQ* pTq, int64_t sversion, char* msg, int32_t msgLen) {
|
||||||
SVResumeStreamTaskReq* pReq = (SVResumeStreamTaskReq*)msg;
|
SVResumeStreamTaskReq* pReq = (SVResumeStreamTaskReq*)msg;
|
||||||
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, pReq->taskId);
|
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, pReq->streamId, pReq->taskId);
|
||||||
int32_t code = tqProcessTaskResumeImpl(pTq, pTask, sversion, pReq->igUntreated);
|
int32_t code = tqProcessTaskResumeImpl(pTq, pTask, sversion, pReq->igUntreated);
|
||||||
if (code != 0) {
|
if (code != 0) {
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
SStreamTask* pHistoryTask = streamMetaAcquireTask(pTq->pStreamMeta, pTask->historyTaskId.taskId);
|
SStreamTask* pHistoryTask = streamMetaAcquireTask(pTq->pStreamMeta, pTask->historyTaskId.streamId, pTask->historyTaskId.taskId);
|
||||||
if (pHistoryTask) {
|
if (pHistoryTask) {
|
||||||
code = tqProcessTaskResumeImpl(pTq, pHistoryTask, sversion, pReq->igUntreated);
|
code = tqProcessTaskResumeImpl(pTq, pHistoryTask, sversion, pReq->igUntreated);
|
||||||
}
|
}
|
||||||
|
@ -1688,8 +1688,7 @@ int32_t tqProcessTaskRetrieveReq(STQ* pTq, SRpcMsg* pMsg) {
|
||||||
tDecodeStreamRetrieveReq(&decoder, &req);
|
tDecodeStreamRetrieveReq(&decoder, &req);
|
||||||
tDecoderClear(&decoder);
|
tDecoderClear(&decoder);
|
||||||
|
|
||||||
int32_t taskId = req.dstTaskId;
|
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, req.streamId, req.dstTaskId);
|
||||||
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, taskId);
|
|
||||||
|
|
||||||
if (pTask) {
|
if (pTask) {
|
||||||
SRpcMsg rsp = {.info = pMsg->info, .code = 0};
|
SRpcMsg rsp = {.info = pMsg->info, .code = 0};
|
||||||
|
@ -1727,7 +1726,7 @@ int32_t vnodeEnqueueStreamMsg(SVnode* pVnode, SRpcMsg* pMsg) {
|
||||||
tDecoderClear(&decoder);
|
tDecoderClear(&decoder);
|
||||||
|
|
||||||
int32_t taskId = req.taskId;
|
int32_t taskId = req.taskId;
|
||||||
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, taskId);
|
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, req.streamId, req.taskId);
|
||||||
if (pTask != NULL) {
|
if (pTask != NULL) {
|
||||||
SRpcMsg rsp = {.info = pMsg->info, .code = 0};
|
SRpcMsg rsp = {.info = pMsg->info, .code = 0};
|
||||||
streamProcessDispatchMsg(pTask, &req, &rsp, false);
|
streamProcessDispatchMsg(pTask, &req, &rsp, false);
|
||||||
|
|
|
@ -72,8 +72,8 @@ int32_t tqStreamTasksStatusCheck(STQ* pTq) {
|
||||||
taosWUnLockLatch(&pMeta->lock);
|
taosWUnLockLatch(&pMeta->lock);
|
||||||
|
|
||||||
for (int32_t i = 0; i < numOfTasks; ++i) {
|
for (int32_t i = 0; i < numOfTasks; ++i) {
|
||||||
int32_t* pTaskId = taosArrayGet(pTaskList, i);
|
SStreamId* pTaskId = taosArrayGet(pTaskList, i);
|
||||||
SStreamTask* pTask = streamMetaAcquireTask(pMeta, *pTaskId);
|
SStreamTask* pTask = streamMetaAcquireTask(pMeta, pTaskId->streamId, pTaskId->taskId);
|
||||||
if (pTask == NULL) {
|
if (pTask == NULL) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -242,8 +242,8 @@ int32_t createStreamTaskRunReq(SStreamMeta* pStreamMeta, bool* pScanIdle) {
|
||||||
numOfTasks = taosArrayGetSize(pTaskList);
|
numOfTasks = taosArrayGetSize(pTaskList);
|
||||||
|
|
||||||
for (int32_t i = 0; i < numOfTasks; ++i) {
|
for (int32_t i = 0; i < numOfTasks; ++i) {
|
||||||
int32_t* pTaskId = taosArrayGet(pTaskList, i);
|
SStreamId* pTaskId = taosArrayGet(pTaskList, i);
|
||||||
SStreamTask* pTask = streamMetaAcquireTask(pStreamMeta, *pTaskId);
|
SStreamTask* pTask = streamMetaAcquireTask(pStreamMeta, pTaskId->streamId, pTaskId->taskId);
|
||||||
if (pTask == NULL) {
|
if (pTask == NULL) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
|
@ -49,7 +49,7 @@ typedef struct {
|
||||||
} ctx[1];
|
} ctx[1];
|
||||||
|
|
||||||
// reader
|
// reader
|
||||||
SSttFileReader *sttReader;
|
TSttFileReaderArray sttReaderArray[1];
|
||||||
|
|
||||||
// iter
|
// iter
|
||||||
TTsdbIterArray dataIterArray[1];
|
TTsdbIterArray dataIterArray[1];
|
||||||
|
@ -226,7 +226,7 @@ static int32_t tsdbCommitOpenReader(SCommitter2 *committer) {
|
||||||
int32_t code = 0;
|
int32_t code = 0;
|
||||||
int32_t lino = 0;
|
int32_t lino = 0;
|
||||||
|
|
||||||
ASSERT(committer->sttReader == NULL);
|
ASSERT(TARRAY2_SIZE(committer->sttReaderArray) == 0);
|
||||||
|
|
||||||
if (committer->ctx->fset == NULL //
|
if (committer->ctx->fset == NULL //
|
||||||
|| committer->sttTrigger > 1 //
|
|| committer->sttTrigger > 1 //
|
||||||
|
@ -241,31 +241,32 @@ static int32_t tsdbCommitOpenReader(SCommitter2 *committer) {
|
||||||
|
|
||||||
ASSERT(lvl->level == 0);
|
ASSERT(lvl->level == 0);
|
||||||
|
|
||||||
if (TARRAY2_SIZE(lvl->fobjArr) == 0) {
|
STFileObj *fobj = NULL;
|
||||||
return 0;
|
TARRAY2_FOREACH(lvl->fobjArr, fobj) {
|
||||||
|
SSttFileReader *sttReader;
|
||||||
|
|
||||||
|
SSttFileReaderConfig config = {
|
||||||
|
.tsdb = committer->tsdb,
|
||||||
|
.szPage = committer->szPage,
|
||||||
|
.file = fobj->f[0],
|
||||||
|
};
|
||||||
|
|
||||||
|
code = tsdbSttFileReaderOpen(fobj->fname, &config, &sttReader);
|
||||||
|
TSDB_CHECK_CODE(code, lino, _exit);
|
||||||
|
|
||||||
|
code = TARRAY2_APPEND(committer->sttReaderArray, sttReader);
|
||||||
|
TSDB_CHECK_CODE(code, lino, _exit);
|
||||||
|
|
||||||
|
STFileOp op = {
|
||||||
|
.optype = TSDB_FOP_REMOVE,
|
||||||
|
.fid = fobj->f->fid,
|
||||||
|
.of = fobj->f[0],
|
||||||
|
};
|
||||||
|
|
||||||
|
code = TARRAY2_APPEND(committer->fopArray, op);
|
||||||
|
TSDB_CHECK_CODE(code, lino, _exit);
|
||||||
}
|
}
|
||||||
|
|
||||||
ASSERT(TARRAY2_SIZE(lvl->fobjArr) == 1);
|
|
||||||
|
|
||||||
STFileObj *fobj = TARRAY2_FIRST(lvl->fobjArr);
|
|
||||||
|
|
||||||
SSttFileReaderConfig config = {
|
|
||||||
.tsdb = committer->tsdb,
|
|
||||||
.szPage = committer->szPage,
|
|
||||||
.file = fobj->f[0],
|
|
||||||
};
|
|
||||||
code = tsdbSttFileReaderOpen(fobj->fname, &config, &committer->sttReader);
|
|
||||||
TSDB_CHECK_CODE(code, lino, _exit);
|
|
||||||
|
|
||||||
STFileOp op = {
|
|
||||||
.optype = TSDB_FOP_REMOVE,
|
|
||||||
.fid = fobj->f->fid,
|
|
||||||
.of = fobj->f[0],
|
|
||||||
};
|
|
||||||
|
|
||||||
code = TARRAY2_APPEND(committer->fopArray, op);
|
|
||||||
TSDB_CHECK_CODE(code, lino, _exit);
|
|
||||||
|
|
||||||
_exit:
|
_exit:
|
||||||
if (code) {
|
if (code) {
|
||||||
TSDB_ERROR_LOG(TD_VID(committer->tsdb->pVnode), lino, code);
|
TSDB_ERROR_LOG(TD_VID(committer->tsdb->pVnode), lino, code);
|
||||||
|
@ -273,7 +274,10 @@ _exit:
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t tsdbCommitCloseReader(SCommitter2 *committer) { return tsdbSttFileReaderClose(&committer->sttReader); }
|
static int32_t tsdbCommitCloseReader(SCommitter2 *committer) {
|
||||||
|
TARRAY2_CLEAR(committer->sttReaderArray, tsdbSttFileReaderClose);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int32_t tsdbCommitOpenIter(SCommitter2 *committer) {
|
static int32_t tsdbCommitOpenIter(SCommitter2 *committer) {
|
||||||
int32_t code = 0;
|
int32_t code = 0;
|
||||||
|
@ -310,10 +314,11 @@ static int32_t tsdbCommitOpenIter(SCommitter2 *committer) {
|
||||||
TSDB_CHECK_CODE(code, lino, _exit);
|
TSDB_CHECK_CODE(code, lino, _exit);
|
||||||
|
|
||||||
// STT
|
// STT
|
||||||
if (committer->sttReader) {
|
SSttFileReader *sttReader;
|
||||||
|
TARRAY2_FOREACH(committer->sttReaderArray, sttReader) {
|
||||||
// data iter
|
// data iter
|
||||||
config.type = TSDB_ITER_TYPE_STT;
|
config.type = TSDB_ITER_TYPE_STT;
|
||||||
config.sttReader = committer->sttReader;
|
config.sttReader = sttReader;
|
||||||
|
|
||||||
code = tsdbIterOpen(&config, &iter);
|
code = tsdbIterOpen(&config, &iter);
|
||||||
TSDB_CHECK_CODE(code, lino, _exit);
|
TSDB_CHECK_CODE(code, lino, _exit);
|
||||||
|
@ -323,7 +328,7 @@ static int32_t tsdbCommitOpenIter(SCommitter2 *committer) {
|
||||||
|
|
||||||
// tomb iter
|
// tomb iter
|
||||||
config.type = TSDB_ITER_TYPE_STT_TOMB;
|
config.type = TSDB_ITER_TYPE_STT_TOMB;
|
||||||
config.sttReader = committer->sttReader;
|
config.sttReader = sttReader;
|
||||||
|
|
||||||
code = tsdbIterOpen(&config, &iter);
|
code = tsdbIterOpen(&config, &iter);
|
||||||
TSDB_CHECK_CODE(code, lino, _exit);
|
TSDB_CHECK_CODE(code, lino, _exit);
|
||||||
|
|
|
@ -780,19 +780,20 @@ static int32_t tsdbFSRunBgTask(void *arg) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t tsdbFSScheduleBgTaskImpl(STFileSystem *fs, EFSBgTaskT type, int32_t (*run)(void *), void (*free)(void *),
|
static int32_t tsdbFSScheduleBgTaskImpl(STFileSystem *fs, EFSBgTaskT type, int32_t (*run)(void *),
|
||||||
void *arg, int64_t *taskid) {
|
void (*destroy)(void *), void *arg, int64_t *taskid) {
|
||||||
if (fs->stop) {
|
if (fs->stop) {
|
||||||
|
if (destroy) {
|
||||||
|
destroy(arg);
|
||||||
|
}
|
||||||
return 0; // TODO: use a better error code
|
return 0; // TODO: use a better error code
|
||||||
}
|
}
|
||||||
|
|
||||||
// check if same task is on
|
|
||||||
// if (fs->bgTaskRunning && fs->bgTaskRunning->type == type) {
|
|
||||||
// return 0;
|
|
||||||
// }
|
|
||||||
|
|
||||||
for (STFSBgTask *task = fs->bgTaskQueue->next; task != fs->bgTaskQueue; task = task->next) {
|
for (STFSBgTask *task = fs->bgTaskQueue->next; task != fs->bgTaskQueue; task = task->next) {
|
||||||
if (task->type == type) {
|
if (task->type == type) {
|
||||||
|
if (destroy) {
|
||||||
|
destroy(arg);
|
||||||
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -804,7 +805,7 @@ static int32_t tsdbFSScheduleBgTaskImpl(STFileSystem *fs, EFSBgTaskT type, int32
|
||||||
|
|
||||||
task->type = type;
|
task->type = type;
|
||||||
task->run = run;
|
task->run = run;
|
||||||
task->free = free;
|
task->free = destroy;
|
||||||
task->arg = arg;
|
task->arg = arg;
|
||||||
task->scheduleTime = taosGetTimestampMs();
|
task->scheduleTime = taosGetTimestampMs();
|
||||||
task->taskid = ++fs->taskid;
|
task->taskid = ++fs->taskid;
|
||||||
|
|
|
@ -15,6 +15,7 @@
|
||||||
|
|
||||||
#include "tsdb.h"
|
#include "tsdb.h"
|
||||||
#include "tsdbFSet2.h"
|
#include "tsdbFSet2.h"
|
||||||
|
#include "tsdbMerge.h"
|
||||||
#include "tsdbReadUtil.h"
|
#include "tsdbReadUtil.h"
|
||||||
#include "tsdbSttFileRW.h"
|
#include "tsdbSttFileRW.h"
|
||||||
|
|
||||||
|
@ -352,10 +353,14 @@ static int32_t extractSttBlockInfo(SLDataIter *pIter, const TSttBlkArray *pArray
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t uidComparFn(const void *p1, const void *p2) {
|
static int32_t suidComparFn(const void *target, const void *p2) {
|
||||||
const uint64_t *uid1 = p1;
|
const uint64_t *targetUid = target;
|
||||||
const uint64_t *uid2 = p2;
|
const uint64_t *uid2 = p2;
|
||||||
return (*uid1) - (*uid2);
|
if (*uid2 == (*targetUid)) {
|
||||||
|
return 0;
|
||||||
|
} else {
|
||||||
|
return (*targetUid) < (*uid2) ? -1 : 1;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool existsFromSttBlkStatis(const TStatisBlkArray *pStatisBlkArray, uint64_t suid, uint64_t uid,
|
static bool existsFromSttBlkStatis(const TStatisBlkArray *pStatisBlkArray, uint64_t suid, uint64_t uid,
|
||||||
|
@ -372,29 +377,55 @@ static bool existsFromSttBlkStatis(const TStatisBlkArray *pStatisBlkArray, uint6
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// for (; i < TARRAY2_SIZE(pStatisBlkArray); ++i) {
|
|
||||||
// SStatisBlk *p = &pStatisBlkArray->data[i];
|
|
||||||
// if (p->minTbid.uid <= uid && p->maxTbid.uid >= uid) {
|
|
||||||
// break;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// if (p->maxTbid.uid < uid) {
|
|
||||||
// break;
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
|
|
||||||
if (i >= TARRAY2_SIZE(pStatisBlkArray)) {
|
if (i >= TARRAY2_SIZE(pStatisBlkArray)) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
SStatisBlk *p = &pStatisBlkArray->data[i];
|
while (i < TARRAY2_SIZE(pStatisBlkArray)) {
|
||||||
STbStatisBlock block = {0};
|
SStatisBlk *p = &pStatisBlkArray->data[i];
|
||||||
tsdbSttFileReadStatisBlock(pReader, p, &block);
|
if (p->minTbid.suid > suid) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
int32_t index = tarray2SearchIdx(block.uid, &uid, sizeof(int64_t), uidComparFn, TD_EQ);
|
STbStatisBlock block = {0};
|
||||||
tStatisBlockDestroy(&block);
|
tsdbSttFileReadStatisBlock(pReader, p, &block);
|
||||||
|
|
||||||
return (index != -1);
|
int32_t index = tarray2SearchIdx(block.suid, &suid, sizeof(int64_t), suidComparFn, TD_EQ);
|
||||||
|
if (index == -1) {
|
||||||
|
tStatisBlockDestroy(&block);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t j = index;
|
||||||
|
if (block.uid->data[j] == uid) {
|
||||||
|
tStatisBlockDestroy(&block);
|
||||||
|
return true;
|
||||||
|
} else if (block.uid->data[j] > uid) {
|
||||||
|
while (j >= 0 && block.suid->data[j] == suid) {
|
||||||
|
if (block.uid->data[j] == uid) {
|
||||||
|
tStatisBlockDestroy(&block);
|
||||||
|
return true;
|
||||||
|
} else {
|
||||||
|
j -= 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
j = index + 1;
|
||||||
|
while (j < block.suid->size && block.suid->data[j] == suid) {
|
||||||
|
if (block.uid->data[j] == uid) {
|
||||||
|
tStatisBlockDestroy(&block);
|
||||||
|
return true;
|
||||||
|
} else {
|
||||||
|
j += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
tStatisBlockDestroy(&block);
|
||||||
|
i += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t tLDataIterOpen2(struct SLDataIter *pIter, SSttFileReader *pSttFileReader, int32_t iStt, int8_t backward,
|
int32_t tLDataIterOpen2(struct SLDataIter *pIter, SSttFileReader *pSttFileReader, int32_t iStt, int8_t backward,
|
||||||
|
@ -452,12 +483,12 @@ int32_t tLDataIterOpen2(struct SLDataIter *pIter, SSttFileReader *pSttFileReader
|
||||||
tsdbDebug("load the stt file info completed, elapsed time:%.2fms, %s", el, idStr);
|
tsdbDebug("load the stt file info completed, elapsed time:%.2fms, %s", el, idStr);
|
||||||
}
|
}
|
||||||
|
|
||||||
// bool exists = existsFromSttBlkStatis(pBlockLoadInfo->pSttStatisBlkArray, suid, uid, pIter->pReader);
|
// bool exists = existsFromSttBlkStatis(pBlockLoadInfo->pSttStatisBlkArray, suid, uid, pIter->pReader);
|
||||||
// if (!exists) {
|
// if (!exists) {
|
||||||
// pIter->iSttBlk = -1;
|
// pIter->iSttBlk = -1;
|
||||||
// pIter->pSttBlk = NULL;
|
// pIter->pSttBlk = NULL;
|
||||||
// return TSDB_CODE_SUCCESS;
|
// return TSDB_CODE_SUCCESS;
|
||||||
// }
|
// }
|
||||||
|
|
||||||
// find the start block, actually we could load the position to avoid repeatly searching for the start position when
|
// find the start block, actually we could load the position to avoid repeatly searching for the start position when
|
||||||
// the skey is updated.
|
// the skey is updated.
|
||||||
|
|
|
@ -439,7 +439,7 @@ static int32_t tsdbReaderCreate(SVnode* pVnode, SQueryTableDataCond* pCond, void
|
||||||
return code;
|
return code;
|
||||||
|
|
||||||
_end:
|
_end:
|
||||||
tsdbReaderClose(pReader);
|
tsdbReaderClose2(pReader);
|
||||||
*ppReader = NULL;
|
*ppReader = NULL;
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
@ -1121,6 +1121,8 @@ static bool getNeighborBlockOfSameTable(SFileDataBlockInfo* pBlockInfo, STableBl
|
||||||
SBrinRecord* p = taosArrayGet(pTableBlockScanInfo->pBlockList, pBlockInfo->tbBlockIdx + step);
|
SBrinRecord* p = taosArrayGet(pTableBlockScanInfo->pBlockList, pBlockInfo->tbBlockIdx + step);
|
||||||
memcpy(pRecord, p, sizeof(SBrinRecord));
|
memcpy(pRecord, p, sizeof(SBrinRecord));
|
||||||
|
|
||||||
|
*nextIndex = pBlockInfo->tbBlockIdx + step;
|
||||||
|
|
||||||
// tMapDataGetItemByIdx(&pTableBlockScanInfo->mapData, pIndex->ordinalIndex, pBlock, tGetDataBlk);
|
// tMapDataGetItemByIdx(&pTableBlockScanInfo->mapData, pIndex->ordinalIndex, pBlock, tGetDataBlk);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -1729,41 +1731,45 @@ static int32_t mergeFileBlockAndLastBlock(STsdbReader* pReader, SLastBlockReader
|
||||||
|
|
||||||
// row in last file block
|
// row in last file block
|
||||||
TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex);
|
TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex);
|
||||||
int64_t ts = getCurrentKeyInLastBlock(pLastBlockReader);
|
int64_t tsLast = getCurrentKeyInLastBlock(pLastBlockReader);
|
||||||
|
|
||||||
if (ASCENDING_TRAVERSE(pReader->info.order)) {
|
if (ASCENDING_TRAVERSE(pReader->info.order)) {
|
||||||
if (key < ts) { // imem, mem are all empty, file blocks (data blocks and last block) exist
|
if (key < tsLast) {
|
||||||
return mergeRowsInFileBlocks(pBlockData, pBlockScanInfo, key, pReader);
|
return mergeRowsInFileBlocks(pBlockData, pBlockScanInfo, key, pReader);
|
||||||
} else if (key == ts) {
|
} else if (key > tsLast) {
|
||||||
SRow* pTSRow = NULL;
|
return doMergeFileBlockAndLastBlock(pLastBlockReader, pReader, pBlockScanInfo, NULL, false);
|
||||||
int32_t code = tsdbRowMergerAdd(pMerger, &fRow, pReader->info.pSchema);
|
}
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
} else {
|
||||||
return code;
|
if (key > tsLast) {
|
||||||
}
|
return mergeRowsInFileBlocks(pBlockData, pBlockScanInfo, key, pReader);
|
||||||
|
} else if (key < tsLast) {
|
||||||
doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader);
|
|
||||||
|
|
||||||
TSDBROW* pRow1 = tMergeTreeGetRow(&pLastBlockReader->mergeTree);
|
|
||||||
tsdbRowMergerAdd(pMerger, pRow1, NULL);
|
|
||||||
|
|
||||||
doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, ts, pMerger, &pReader->info.verRange, pReader->idStr);
|
|
||||||
|
|
||||||
code = tsdbRowMergerGetRow(pMerger, &pTSRow);
|
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
|
||||||
return code;
|
|
||||||
}
|
|
||||||
|
|
||||||
code = doAppendRowFromTSRow(pReader->resBlockInfo.pResBlock, pReader, pTSRow, pBlockScanInfo);
|
|
||||||
|
|
||||||
taosMemoryFree(pTSRow);
|
|
||||||
tsdbRowMergerClear(pMerger);
|
|
||||||
return code;
|
|
||||||
} else { // key > ts
|
|
||||||
return doMergeFileBlockAndLastBlock(pLastBlockReader, pReader, pBlockScanInfo, NULL, false);
|
return doMergeFileBlockAndLastBlock(pLastBlockReader, pReader, pBlockScanInfo, NULL, false);
|
||||||
}
|
}
|
||||||
} else { // desc order
|
|
||||||
return doMergeFileBlockAndLastBlock(pLastBlockReader, pReader, pBlockScanInfo, pBlockData, true);
|
|
||||||
}
|
}
|
||||||
|
// the following for key == tsLast
|
||||||
|
SRow* pTSRow = NULL;
|
||||||
|
int32_t code = tsdbRowMergerAdd(pMerger, &fRow, pReader->info.pSchema);
|
||||||
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
|
||||||
|
doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader);
|
||||||
|
|
||||||
|
TSDBROW* pRow1 = tMergeTreeGetRow(&pLastBlockReader->mergeTree);
|
||||||
|
tsdbRowMergerAdd(pMerger, pRow1, NULL);
|
||||||
|
|
||||||
|
doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, tsLast, pMerger, &pReader->info.verRange, pReader->idStr);
|
||||||
|
|
||||||
|
code = tsdbRowMergerGetRow(pMerger, &pTSRow);
|
||||||
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
|
||||||
|
code = doAppendRowFromTSRow(pReader->resBlockInfo.pResBlock, pReader, pTSRow, pBlockScanInfo);
|
||||||
|
|
||||||
|
taosMemoryFree(pTSRow);
|
||||||
|
tsdbRowMergerClear(pMerger);
|
||||||
|
return code;
|
||||||
|
|
||||||
} else { // only last block exists
|
} else { // only last block exists
|
||||||
return doMergeFileBlockAndLastBlock(pLastBlockReader, pReader, pBlockScanInfo, NULL, false);
|
return doMergeFileBlockAndLastBlock(pLastBlockReader, pReader, pBlockScanInfo, NULL, false);
|
||||||
}
|
}
|
||||||
|
@ -2190,7 +2196,8 @@ static int32_t buildComposedDataBlockImpl(STsdbReader* pReader, STableBlockScanI
|
||||||
SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
|
SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
|
||||||
|
|
||||||
TSDBROW *pRow = NULL, *piRow = NULL;
|
TSDBROW *pRow = NULL, *piRow = NULL;
|
||||||
int64_t key = (pBlockData->nRow > 0 && (!pDumpInfo->allDumped)) ? pBlockData->aTSKEY[pDumpInfo->rowIndex] : INT64_MIN;
|
int64_t key = (pBlockData->nRow > 0 && (!pDumpInfo->allDumped)) ? pBlockData->aTSKEY[pDumpInfo->rowIndex] :
|
||||||
|
(ASCENDING_TRAVERSE(pReader->info.order) ? INT64_MAX : INT64_MIN);
|
||||||
if (pBlockScanInfo->iter.hasVal) {
|
if (pBlockScanInfo->iter.hasVal) {
|
||||||
pRow = getValidMemRow(&pBlockScanInfo->iter, pBlockScanInfo->delSkyline, pReader);
|
pRow = getValidMemRow(&pBlockScanInfo->iter, pBlockScanInfo->delSkyline, pReader);
|
||||||
}
|
}
|
||||||
|
@ -2564,9 +2571,8 @@ static int32_t doLoadLastBlockSequentially(STsdbReader* pReader) {
|
||||||
|
|
||||||
// load the last data block of current table
|
// load the last data block of current table
|
||||||
STableBlockScanInfo* pScanInfo = *(STableBlockScanInfo**)pStatus->pTableIter;
|
STableBlockScanInfo* pScanInfo = *(STableBlockScanInfo**)pStatus->pTableIter;
|
||||||
if (pReader->pIgnoreTables && taosHashGet(*pReader->pIgnoreTables, &pScanInfo->uid, sizeof(pScanInfo->uid))) {
|
if (pScanInfo == NULL) {
|
||||||
// reset the index in last block when handing a new file
|
tsdbError("table Iter is null, invalid pScanInfo, try next table %s", pReader->idStr);
|
||||||
// doCleanupTableScanInfo(pScanInfo);
|
|
||||||
bool hasNexTable = moveToNextTable(pUidList, pStatus);
|
bool hasNexTable = moveToNextTable(pUidList, pStatus);
|
||||||
if (!hasNexTable) {
|
if (!hasNexTable) {
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
|
@ -2575,8 +2581,15 @@ static int32_t doLoadLastBlockSequentially(STsdbReader* pReader) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
// reset the index in last block when handing a new file
|
if (pReader->pIgnoreTables && taosHashGet(*pReader->pIgnoreTables, &pScanInfo->uid, sizeof(pScanInfo->uid))) {
|
||||||
// doCleanupTableScanInfo(pScanInfo);
|
// reset the index in last block when handing a new file
|
||||||
|
bool hasNexTable = moveToNextTable(pUidList, pStatus);
|
||||||
|
if (!hasNexTable) {
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
bool hasDataInLastFile = initLastBlockReader(pLastBlockReader, pScanInfo, pReader);
|
bool hasDataInLastFile = initLastBlockReader(pLastBlockReader, pScanInfo, pReader);
|
||||||
if (!hasDataInLastFile) {
|
if (!hasDataInLastFile) {
|
||||||
|
@ -2667,16 +2680,32 @@ static int32_t doBuildDataBlock(STsdbReader* pReader) {
|
||||||
(ASCENDING_TRAVERSE(pReader->info.order)) ? pBlockInfo->record.firstKey : pBlockInfo->record.lastKey;
|
(ASCENDING_TRAVERSE(pReader->info.order)) ? pBlockInfo->record.firstKey : pBlockInfo->record.lastKey;
|
||||||
code = buildDataBlockFromBuf(pReader, pScanInfo, endKey);
|
code = buildDataBlockFromBuf(pReader, pScanInfo, endKey);
|
||||||
} else {
|
} else {
|
||||||
if (hasDataInLastBlock(pLastBlockReader) && !ASCENDING_TRAVERSE(pReader->info.order)) {
|
bool bHasDataInLastBlock = hasDataInLastBlock(pLastBlockReader);
|
||||||
// only return the rows in last block
|
int64_t tsLast = bHasDataInLastBlock ? getCurrentKeyInLastBlock(pLastBlockReader) : INT64_MIN;
|
||||||
int64_t tsLast = getCurrentKeyInLastBlock(pLastBlockReader);
|
if (!bHasDataInLastBlock || ((ASCENDING_TRAVERSE(pReader->info.order) && pBlockInfo->record.lastKey < tsLast) ||
|
||||||
ASSERT(tsLast >= pBlockInfo->record.lastKey);
|
(!ASCENDING_TRAVERSE(pReader->info.order) && pBlockInfo->record.firstKey > tsLast))) {
|
||||||
|
// whole block is required, return it directly
|
||||||
|
SDataBlockInfo* pInfo = &pReader->resBlockInfo.pResBlock->info;
|
||||||
|
pInfo->rows = pBlockInfo->record.numRow;
|
||||||
|
pInfo->id.uid = pScanInfo->uid;
|
||||||
|
pInfo->dataLoad = 0;
|
||||||
|
pInfo->window = (STimeWindow){.skey = pBlockInfo->record.firstKey, .ekey = pBlockInfo->record.lastKey};
|
||||||
|
setComposedBlockFlag(pReader, false);
|
||||||
|
setBlockAllDumped(&pStatus->fBlockDumpInfo, pBlockInfo->record.lastKey, pReader->info.order);
|
||||||
|
|
||||||
|
// update the last key for the corresponding table
|
||||||
|
pScanInfo->lastKey = ASCENDING_TRAVERSE(pReader->info.order) ? pInfo->window.ekey : pInfo->window.skey;
|
||||||
|
tsdbDebug("%p uid:%" PRIu64
|
||||||
|
" clean file block retrieved from file, global index:%d, "
|
||||||
|
"table index:%d, rows:%d, brange:%" PRId64 "-%" PRId64 ", %s",
|
||||||
|
pReader, pScanInfo->uid, pBlockIter->index, pBlockInfo->tbBlockIdx, pBlockInfo->record.numRow,
|
||||||
|
pBlockInfo->record.firstKey, pBlockInfo->record.lastKey, pReader->idStr);
|
||||||
|
} else {
|
||||||
SBlockData* pBData = &pReader->status.fileBlockData;
|
SBlockData* pBData = &pReader->status.fileBlockData;
|
||||||
tBlockDataReset(pBData);
|
tBlockDataReset(pBData);
|
||||||
|
|
||||||
SSDataBlock* pResBlock = pReader->resBlockInfo.pResBlock;
|
SSDataBlock* pResBlock = pReader->resBlockInfo.pResBlock;
|
||||||
tsdbDebug("load data in last block firstly, due to desc scan data, %s", pReader->idStr);
|
tsdbDebug("load data in last block firstly %s", pReader->idStr);
|
||||||
|
|
||||||
int64_t st = taosGetTimestampUs();
|
int64_t st = taosGetTimestampUs();
|
||||||
|
|
||||||
|
@ -2707,23 +2736,8 @@ static int32_t doBuildDataBlock(STsdbReader* pReader) {
|
||||||
pReader, pResBlock->info.id.uid, pResBlock->info.window.skey, pResBlock->info.window.ekey,
|
pReader, pResBlock->info.id.uid, pResBlock->info.window.skey, pResBlock->info.window.ekey,
|
||||||
pResBlock->info.rows, el, pReader->idStr);
|
pResBlock->info.rows, el, pReader->idStr);
|
||||||
}
|
}
|
||||||
} else { // whole block is required, return it directly
|
|
||||||
SDataBlockInfo* pInfo = &pReader->resBlockInfo.pResBlock->info;
|
|
||||||
pInfo->rows = pBlockInfo->record.numRow;
|
|
||||||
pInfo->id.uid = pScanInfo->uid;
|
|
||||||
pInfo->dataLoad = 0;
|
|
||||||
pInfo->window = (STimeWindow){.skey = pBlockInfo->record.firstKey, .ekey = pBlockInfo->record.lastKey};
|
|
||||||
setComposedBlockFlag(pReader, false);
|
|
||||||
setBlockAllDumped(&pStatus->fBlockDumpInfo, pBlockInfo->record.lastKey, pReader->info.order);
|
|
||||||
|
|
||||||
// update the last key for the corresponding table
|
|
||||||
pScanInfo->lastKey = ASCENDING_TRAVERSE(pReader->info.order) ? pInfo->window.ekey : pInfo->window.skey;
|
|
||||||
tsdbDebug("%p uid:%" PRIu64
|
|
||||||
" clean file block retrieved from file, global index:%d, "
|
|
||||||
"table index:%d, rows:%d, brange:%" PRId64 "-%" PRId64 ", %s",
|
|
||||||
pReader, pScanInfo->uid, pBlockIter->index, pBlockInfo->tbBlockIdx, pBlockInfo->record.numRow,
|
|
||||||
pBlockInfo->record.firstKey, pBlockInfo->record.lastKey, pReader->idStr);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return (pReader->code != TSDB_CODE_SUCCESS) ? pReader->code : code;
|
return (pReader->code != TSDB_CODE_SUCCESS) ? pReader->code : code;
|
||||||
|
@ -4096,12 +4110,10 @@ int32_t tsdbReaderSuspend2(STsdbReader* pReader) {
|
||||||
}
|
}
|
||||||
|
|
||||||
tsdbDataFileReaderClose(&pReader->pFileReader);
|
tsdbDataFileReaderClose(&pReader->pFileReader);
|
||||||
|
|
||||||
int64_t loadBlocks = 0;
|
int64_t loadBlocks = 0;
|
||||||
double elapse = 0;
|
double elapse = 0;
|
||||||
pReader->status.pLDataIterArray = destroySttBlockReader(pReader->status.pLDataIterArray, &loadBlocks, &elapse);
|
pReader->status.pLDataIterArray = destroySttBlockReader(pReader->status.pLDataIterArray, &loadBlocks, &elapse);
|
||||||
pReader->status.pLDataIterArray = taosArrayInit(4, POINTER_BYTES);
|
pReader->status.pLDataIterArray = taosArrayInit(4, POINTER_BYTES);
|
||||||
|
|
||||||
// resetDataBlockScanInfo excluding lastKey
|
// resetDataBlockScanInfo excluding lastKey
|
||||||
STableBlockScanInfo** p = NULL;
|
STableBlockScanInfo** p = NULL;
|
||||||
int32_t iter = 0;
|
int32_t iter = 0;
|
||||||
|
|
|
@ -342,18 +342,18 @@ static int32_t tsdbSnapCmprTombData(STsdbSnapReader* reader, uint8_t** data) {
|
||||||
int32_t code = 0;
|
int32_t code = 0;
|
||||||
int32_t lino = 0;
|
int32_t lino = 0;
|
||||||
|
|
||||||
int64_t size = sizeof(SSnapDataHdr);
|
int64_t size = 0;
|
||||||
for (int32_t i = 0; i < ARRAY_SIZE(reader->tombBlock->dataArr); i++) {
|
for (int32_t i = 0; i < ARRAY_SIZE(reader->tombBlock->dataArr); i++) {
|
||||||
size += TARRAY2_DATA_LEN(reader->tombBlock->dataArr + i);
|
size += TARRAY2_DATA_LEN(reader->tombBlock->dataArr + i);
|
||||||
}
|
}
|
||||||
|
|
||||||
data[0] = taosMemoryMalloc(size);
|
data[0] = taosMemoryMalloc(size + sizeof(SSnapDataHdr));
|
||||||
if (data[0] == NULL) {
|
if (data[0] == NULL) {
|
||||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
TSDB_CHECK_CODE(code, lino, _exit);
|
TSDB_CHECK_CODE(code, lino, _exit);
|
||||||
}
|
}
|
||||||
|
|
||||||
SSnapDataHdr* hdr = (SSnapDataHdr*)data[0];
|
SSnapDataHdr* hdr = (SSnapDataHdr*)(data[0]);
|
||||||
hdr->type = SNAP_DATA_DEL;
|
hdr->type = SNAP_DATA_DEL;
|
||||||
hdr->size = size;
|
hdr->size = size;
|
||||||
|
|
||||||
|
@ -938,7 +938,7 @@ static int32_t tsdbSnapWriteDecmprTombBlock(SSnapDataHdr* hdr, STombBlock* tombB
|
||||||
int32_t code = 0;
|
int32_t code = 0;
|
||||||
int32_t lino = 0;
|
int32_t lino = 0;
|
||||||
|
|
||||||
int64_t size = hdr->size - sizeof(*hdr);
|
int64_t size = hdr->size;
|
||||||
ASSERT(size % TOMB_RECORD_ELEM_NUM == 0);
|
ASSERT(size % TOMB_RECORD_ELEM_NUM == 0);
|
||||||
size = size / TOMB_RECORD_ELEM_NUM;
|
size = size / TOMB_RECORD_ELEM_NUM;
|
||||||
ASSERT(size % sizeof(int64_t) == 0);
|
ASSERT(size % sizeof(int64_t) == 0);
|
||||||
|
|
|
@ -628,7 +628,6 @@ int32_t vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) {
|
||||||
return tqProcessVgCommittedInfoReq(pVnode->pTq, pMsg);
|
return tqProcessVgCommittedInfoReq(pVnode->pTq, pMsg);
|
||||||
case TDMT_VND_TMQ_SEEK:
|
case TDMT_VND_TMQ_SEEK:
|
||||||
return tqProcessSeekReq(pVnode->pTq, pMsg);
|
return tqProcessSeekReq(pVnode->pTq, pMsg);
|
||||||
|
|
||||||
default:
|
default:
|
||||||
vError("unknown msg type:%d in fetch queue", pMsg->msgType);
|
vError("unknown msg type:%d in fetch queue", pMsg->msgType);
|
||||||
return TSDB_CODE_APP_ERROR;
|
return TSDB_CODE_APP_ERROR;
|
||||||
|
|
|
@ -1009,7 +1009,7 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
|
||||||
SStateWinodwPhysiNode *pStateNode = (SStateWinodwPhysiNode *)pNode;
|
SStateWinodwPhysiNode *pStateNode = (SStateWinodwPhysiNode *)pNode;
|
||||||
|
|
||||||
EXPLAIN_ROW_NEW(level, EXPLAIN_STATE_WINDOW_FORMAT,
|
EXPLAIN_ROW_NEW(level, EXPLAIN_STATE_WINDOW_FORMAT,
|
||||||
nodesGetNameFromColumnNode(((STargetNode *)pStateNode->pStateKey)->pExpr));
|
nodesGetNameFromColumnNode(pStateNode->pStateKey));
|
||||||
EXPLAIN_ROW_APPEND(EXPLAIN_LEFT_PARENTHESIS_FORMAT);
|
EXPLAIN_ROW_APPEND(EXPLAIN_LEFT_PARENTHESIS_FORMAT);
|
||||||
if (pResNode->pExecInfo) {
|
if (pResNode->pExecInfo) {
|
||||||
QRY_ERR_RET(qExplainBufAppendExecInfo(pResNode->pExecInfo, tbuf, &tlen));
|
QRY_ERR_RET(qExplainBufAppendExecInfo(pResNode->pExecInfo, tbuf, &tlen));
|
||||||
|
|
|
@ -25,6 +25,7 @@ extern "C" {
|
||||||
#include "tsort.h"
|
#include "tsort.h"
|
||||||
#include "ttszip.h"
|
#include "ttszip.h"
|
||||||
#include "tvariant.h"
|
#include "tvariant.h"
|
||||||
|
#include "theap.h"
|
||||||
|
|
||||||
#include "dataSinkMgt.h"
|
#include "dataSinkMgt.h"
|
||||||
#include "executil.h"
|
#include "executil.h"
|
||||||
|
@ -417,6 +418,14 @@ typedef struct SIntervalAggOperatorInfo {
|
||||||
EOPTR_EXEC_MODEL execModel; // operator execution model [batch model|stream model]
|
EOPTR_EXEC_MODEL execModel; // operator execution model [batch model|stream model]
|
||||||
STimeWindowAggSupp twAggSup;
|
STimeWindowAggSupp twAggSup;
|
||||||
SArray* pPrevValues; // SArray<SGroupKeys> used to keep the previous not null value for interpolation.
|
SArray* pPrevValues; // SArray<SGroupKeys> used to keep the previous not null value for interpolation.
|
||||||
|
// for limit optimization
|
||||||
|
bool limited;
|
||||||
|
int64_t limit;
|
||||||
|
bool slimited;
|
||||||
|
int64_t slimit;
|
||||||
|
uint64_t curGroupId; // initialize to UINT64_MAX
|
||||||
|
uint64_t handledGroupNum;
|
||||||
|
BoundedQueue* pBQ;
|
||||||
} SIntervalAggOperatorInfo;
|
} SIntervalAggOperatorInfo;
|
||||||
|
|
||||||
typedef struct SMergeAlignedIntervalAggOperatorInfo {
|
typedef struct SMergeAlignedIntervalAggOperatorInfo {
|
||||||
|
|
|
@ -191,6 +191,8 @@ int32_t getProperSortPageSize(size_t rowSize, uint32_t numOfCols);
|
||||||
bool tsortIsClosed(SSortHandle* pHandle);
|
bool tsortIsClosed(SSortHandle* pHandle);
|
||||||
void tsortSetClosed(SSortHandle* pHandle);
|
void tsortSetClosed(SSortHandle* pHandle);
|
||||||
|
|
||||||
|
void setSingleTableMerge(SSortHandle* pHandle);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -2118,8 +2118,9 @@ int32_t buildGroupIdMapForAllTables(STableListInfo* pTableListInfo, SReadHandle*
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
if (pScanNode->groupOrderScan) pTableListInfo->numOfOuputGroups = taosArrayGetSize(pTableListInfo->pTableList);
|
||||||
|
|
||||||
if (groupSort) {
|
if (groupSort || pScanNode->groupOrderScan) {
|
||||||
code = sortTableGroup(pTableListInfo);
|
code = sortTableGroup(pTableListInfo);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -304,7 +304,7 @@ qTaskInfo_t qCreateQueueExecTaskInfo(void* msg, SReadHandle* pReaderHandle, int3
|
||||||
return pTaskInfo;
|
return pTaskInfo;
|
||||||
}
|
}
|
||||||
|
|
||||||
qTaskInfo_t qCreateStreamExecTaskInfo(void* msg, SReadHandle* readers, int32_t vgId) {
|
qTaskInfo_t qCreateStreamExecTaskInfo(void* msg, SReadHandle* readers, int32_t vgId, int32_t taskId) {
|
||||||
if (msg == NULL) {
|
if (msg == NULL) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
@ -317,7 +317,7 @@ qTaskInfo_t qCreateStreamExecTaskInfo(void* msg, SReadHandle* readers, int32_t v
|
||||||
}
|
}
|
||||||
|
|
||||||
qTaskInfo_t pTaskInfo = NULL;
|
qTaskInfo_t pTaskInfo = NULL;
|
||||||
code = qCreateExecTask(readers, vgId, 0, pPlan, &pTaskInfo, NULL, NULL, OPTR_EXEC_MODEL_STREAM);
|
code = qCreateExecTask(readers, vgId, taskId, pPlan, &pTaskInfo, NULL, NULL, OPTR_EXEC_MODEL_STREAM);
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
nodesDestroyNode((SNode*)pPlan);
|
nodesDestroyNode((SNode*)pPlan);
|
||||||
qDestroyTask(pTaskInfo);
|
qDestroyTask(pTaskInfo);
|
||||||
|
@ -1046,7 +1046,7 @@ int32_t qStreamInfoResetTimewindowFilter(qTaskInfo_t tinfo) {
|
||||||
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
|
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
|
||||||
STimeWindow* pWindow = &pTaskInfo->streamInfo.fillHistoryWindow;
|
STimeWindow* pWindow = &pTaskInfo->streamInfo.fillHistoryWindow;
|
||||||
|
|
||||||
qDebug("%s set remove scan-history filter window:%" PRId64 "-%" PRId64 ", new window:%" PRId64 "-%" PRId64,
|
qDebug("%s remove scan-history filter window:%" PRId64 "-%" PRId64 ", set new window:%" PRId64 "-%" PRId64,
|
||||||
GET_TASKID(pTaskInfo), pWindow->skey, pWindow->ekey, INT64_MIN, INT64_MAX);
|
GET_TASKID(pTaskInfo), pWindow->skey, pWindow->ekey, INT64_MIN, INT64_MAX);
|
||||||
|
|
||||||
pWindow->skey = INT64_MIN;
|
pWindow->skey = INT64_MIN;
|
||||||
|
|
|
@ -275,7 +275,6 @@ SOperatorInfo* createOperator(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SR
|
||||||
SNode* pTagIndexCond, const char* pUser, const char* dbname) {
|
SNode* pTagIndexCond, const char* pUser, const char* dbname) {
|
||||||
int32_t type = nodeType(pPhyNode);
|
int32_t type = nodeType(pPhyNode);
|
||||||
const char* idstr = GET_TASKID(pTaskInfo);
|
const char* idstr = GET_TASKID(pTaskInfo);
|
||||||
|
|
||||||
if (pPhyNode->pChildren == NULL || LIST_LENGTH(pPhyNode->pChildren) == 0) {
|
if (pPhyNode->pChildren == NULL || LIST_LENGTH(pPhyNode->pChildren) == 0) {
|
||||||
SOperatorInfo* pOperator = NULL;
|
SOperatorInfo* pOperator = NULL;
|
||||||
if (QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN == type) {
|
if (QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN == type) {
|
||||||
|
|
|
@ -848,30 +848,29 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) {
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((++pInfo->currentGroupId) >= tableListGetOutputGroups(pInfo->base.pTableListInfo)) {
|
while (1) {
|
||||||
setOperatorCompleted(pOperator);
|
if ((++pInfo->currentGroupId) >= tableListGetOutputGroups(pInfo->base.pTableListInfo)) {
|
||||||
return NULL;
|
setOperatorCompleted(pOperator);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
// reset value for the next group data output
|
||||||
|
pOperator->status = OP_OPENED;
|
||||||
|
resetLimitInfoForNextGroup(&pInfo->base.limitInfo);
|
||||||
|
|
||||||
|
int32_t num = 0;
|
||||||
|
STableKeyInfo* pList = NULL;
|
||||||
|
tableListGetGroupList(pInfo->base.pTableListInfo, pInfo->currentGroupId, &pList, &num);
|
||||||
|
|
||||||
|
pAPI->tsdReader.tsdSetQueryTableList(pInfo->base.dataReader, pList, num);
|
||||||
|
pAPI->tsdReader.tsdReaderResetStatus(pInfo->base.dataReader, &pInfo->base.cond);
|
||||||
|
pInfo->scanTimes = 0;
|
||||||
|
|
||||||
|
result = doGroupedTableScan(pOperator);
|
||||||
|
if (result != NULL) {
|
||||||
|
return result;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// reset value for the next group data output
|
|
||||||
pOperator->status = OP_OPENED;
|
|
||||||
resetLimitInfoForNextGroup(&pInfo->base.limitInfo);
|
|
||||||
|
|
||||||
int32_t num = 0;
|
|
||||||
STableKeyInfo* pList = NULL;
|
|
||||||
tableListGetGroupList(pInfo->base.pTableListInfo, pInfo->currentGroupId, &pList, &num);
|
|
||||||
|
|
||||||
pAPI->tsdReader.tsdSetQueryTableList(pInfo->base.dataReader, pList, num);
|
|
||||||
pAPI->tsdReader.tsdReaderResetStatus(pInfo->base.dataReader, &pInfo->base.cond);
|
|
||||||
pInfo->scanTimes = 0;
|
|
||||||
|
|
||||||
result = doGroupedTableScan(pOperator);
|
|
||||||
if (result != NULL) {
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
setOperatorCompleted(pOperator);
|
|
||||||
return NULL;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2938,17 +2937,22 @@ int32_t startGroupTableMergeScan(SOperatorInfo* pOperator) {
|
||||||
// one table has one data block
|
// one table has one data block
|
||||||
int32_t numOfTable = tableEndIdx - tableStartIdx + 1;
|
int32_t numOfTable = tableEndIdx - tableStartIdx + 1;
|
||||||
|
|
||||||
STableMergeScanSortSourceParam param = {0};
|
STableMergeScanSortSourceParam *param = taosMemoryCalloc(1, sizeof(STableMergeScanSortSourceParam));
|
||||||
param.pOperator = pOperator;
|
param->pOperator = pOperator;
|
||||||
STableKeyInfo* startKeyInfo = tableListGetInfo(pInfo->base.pTableListInfo, tableStartIdx);
|
STableKeyInfo* startKeyInfo = tableListGetInfo(pInfo->base.pTableListInfo, tableStartIdx);
|
||||||
pAPI->tsdReader.tsdReaderOpen(pHandle->vnode, &pInfo->base.cond, startKeyInfo, numOfTable, pInfo->pReaderBlock, (void**)&pInfo->base.dataReader, GET_TASKID(pTaskInfo), false, NULL);
|
pAPI->tsdReader.tsdReaderOpen(pHandle->vnode, &pInfo->base.cond, startKeyInfo, numOfTable, pInfo->pReaderBlock, (void**)&pInfo->base.dataReader, GET_TASKID(pTaskInfo), false, NULL);
|
||||||
|
|
||||||
SSortSource* ps = taosMemoryCalloc(1, sizeof(SSortSource));
|
SSortSource* ps = taosMemoryCalloc(1, sizeof(SSortSource));
|
||||||
ps->param = ¶m;
|
ps->param = param;
|
||||||
ps->onlyRef = true;
|
ps->onlyRef = false;
|
||||||
tsortAddSource(pInfo->pSortHandle, ps);
|
tsortAddSource(pInfo->pSortHandle, ps);
|
||||||
|
|
||||||
int32_t code = tsortOpen(pInfo->pSortHandle);
|
int32_t code = TSDB_CODE_SUCCESS;
|
||||||
|
if (numOfTable == 1) {
|
||||||
|
setSingleTableMerge(pInfo->pSortHandle);
|
||||||
|
} else {
|
||||||
|
code = tsortOpen(pInfo->pSortHandle);
|
||||||
|
}
|
||||||
|
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
T_LONG_JMP(pTaskInfo->env, terrno);
|
T_LONG_JMP(pTaskInfo->env, terrno);
|
||||||
|
@ -3587,4 +3591,4 @@ static void destoryTableCountScanOperator(void* param) {
|
||||||
|
|
||||||
taosArrayDestroy(pTableCountScanInfo->stbUidList);
|
taosArrayDestroy(pTableCountScanInfo->stbUidList);
|
||||||
taosMemoryFreeClear(param);
|
taosMemoryFreeClear(param);
|
||||||
}
|
}
|
||||||
|
|
|
@ -876,7 +876,67 @@ bool needDeleteWindowBuf(STimeWindow* pWin, STimeWindowAggSupp* pTwSup) {
|
||||||
return pTwSup->maxTs != INT64_MIN && pWin->ekey < pTwSup->maxTs - pTwSup->deleteMark;
|
return pTwSup->maxTs != INT64_MIN && pWin->ekey < pTwSup->maxTs - pTwSup->deleteMark;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResultRowInfo, SSDataBlock* pBlock,
|
static bool tsKeyCompFn(void* l, void* r, void* param) {
|
||||||
|
TSKEY* lTS = (TSKEY*)l;
|
||||||
|
TSKEY* rTS = (TSKEY*)r;
|
||||||
|
SIntervalAggOperatorInfo* pInfo = param;
|
||||||
|
return pInfo->binfo.outputTsOrder == ORDER_ASC ? *lTS < *rTS : *lTS > *rTS;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool isCalculatedWin(SIntervalAggOperatorInfo* pInfo, const STimeWindow* win, uint64_t tableGroupId) {
|
||||||
|
char keyBuf[sizeof(TSKEY) + sizeof(uint64_t)] = {0};
|
||||||
|
SET_RES_WINDOW_KEY(keyBuf, (char*)&win->skey, sizeof(TSKEY), tableGroupId);
|
||||||
|
return tSimpleHashGet(pInfo->aggSup.pResultRowHashTable, keyBuf, GET_RES_WINDOW_KEY_LEN(sizeof(TSKEY))) != NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief check if cur window should be filtered out by limit info
|
||||||
|
* @retval true if should be filtered out
|
||||||
|
* @retval false if not filtering out
|
||||||
|
* @note If no limit info, we skip filtering.
|
||||||
|
* If input/output ts order mismatch, we skip filtering too.
|
||||||
|
* eg. input ts order: desc, and output ts order: asc, limit: 10
|
||||||
|
* IntervalOperator should output the first 10 windows, however, we can't find the first 10 windows until we scan
|
||||||
|
* every tuple in every block.
|
||||||
|
* And the boundedQueue keeps refreshing all records with smaller ts key.
|
||||||
|
*/
|
||||||
|
static bool filterWindowWithLimit(SIntervalAggOperatorInfo* pOperatorInfo, STimeWindow* win, uint64_t groupId) {
|
||||||
|
if (!pOperatorInfo->limited // if no limit info, no filter will be applied
|
||||||
|
|| pOperatorInfo->binfo.inputTsOrder !=
|
||||||
|
pOperatorInfo->binfo.outputTsOrder // if input/output ts order mismatch, no filter
|
||||||
|
) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if (pOperatorInfo->limit == 0) return true;
|
||||||
|
|
||||||
|
if (pOperatorInfo->pBQ == NULL) {
|
||||||
|
pOperatorInfo->pBQ = createBoundedQueue(pOperatorInfo->limit - 1, tsKeyCompFn, taosMemoryFree, pOperatorInfo);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool shouldFilter = false;
|
||||||
|
// if BQ has been full, compare it with top of BQ
|
||||||
|
if (taosBQSize(pOperatorInfo->pBQ) == taosBQMaxSize(pOperatorInfo->pBQ) + 1) {
|
||||||
|
PriorityQueueNode* top = taosBQTop(pOperatorInfo->pBQ);
|
||||||
|
shouldFilter = tsKeyCompFn(top->data, &win->skey, pOperatorInfo);
|
||||||
|
}
|
||||||
|
if (shouldFilter) {
|
||||||
|
return true;
|
||||||
|
} else if (isCalculatedWin(pOperatorInfo, win, groupId)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// cur win not been filtered out and not been pushed into BQ yet, push it into BQ
|
||||||
|
PriorityQueueNode node = {.data = taosMemoryMalloc(sizeof(TSKEY))};
|
||||||
|
*((TSKEY*)node.data) = win->skey;
|
||||||
|
|
||||||
|
if (NULL == taosBQPush(pOperatorInfo->pBQ, &node)) {
|
||||||
|
taosMemoryFree(node.data);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResultRowInfo, SSDataBlock* pBlock,
|
||||||
int32_t scanFlag) {
|
int32_t scanFlag) {
|
||||||
SIntervalAggOperatorInfo* pInfo = (SIntervalAggOperatorInfo*)pOperatorInfo->info;
|
SIntervalAggOperatorInfo* pInfo = (SIntervalAggOperatorInfo*)pOperatorInfo->info;
|
||||||
|
|
||||||
|
@ -891,8 +951,21 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul
|
||||||
TSKEY ts = getStartTsKey(&pBlock->info.window, tsCols);
|
TSKEY ts = getStartTsKey(&pBlock->info.window, tsCols);
|
||||||
SResultRow* pResult = NULL;
|
SResultRow* pResult = NULL;
|
||||||
|
|
||||||
|
if (tableGroupId != pInfo->curGroupId) {
|
||||||
|
pInfo->handledGroupNum += 1;
|
||||||
|
if (pInfo->slimited && pInfo->handledGroupNum > pInfo->slimit) {
|
||||||
|
return true;
|
||||||
|
} else {
|
||||||
|
pInfo->curGroupId = tableGroupId;
|
||||||
|
destroyBoundedQueue(pInfo->pBQ);
|
||||||
|
pInfo->pBQ = NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
STimeWindow win =
|
STimeWindow win =
|
||||||
getActiveTimeWindow(pInfo->aggSup.pResultBuf, pResultRowInfo, ts, &pInfo->interval, pInfo->binfo.inputTsOrder);
|
getActiveTimeWindow(pInfo->aggSup.pResultBuf, pResultRowInfo, ts, &pInfo->interval, pInfo->binfo.inputTsOrder);
|
||||||
|
if (filterWindowWithLimit(pInfo, &win, tableGroupId)) return false;
|
||||||
|
|
||||||
int32_t ret = setTimeWindowOutputBuf(pResultRowInfo, &win, (scanFlag == MAIN_SCAN), &pResult, tableGroupId,
|
int32_t ret = setTimeWindowOutputBuf(pResultRowInfo, &win, (scanFlag == MAIN_SCAN), &pResult, tableGroupId,
|
||||||
pSup->pCtx, numOfOutput, pSup->rowEntryInfoOffset, &pInfo->aggSup, pTaskInfo);
|
pSup->pCtx, numOfOutput, pSup->rowEntryInfoOffset, &pInfo->aggSup, pTaskInfo);
|
||||||
if (ret != TSDB_CODE_SUCCESS || pResult == NULL) {
|
if (ret != TSDB_CODE_SUCCESS || pResult == NULL) {
|
||||||
|
@ -929,7 +1002,7 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul
|
||||||
while (1) {
|
while (1) {
|
||||||
int32_t prevEndPos = forwardRows - 1 + startPos;
|
int32_t prevEndPos = forwardRows - 1 + startPos;
|
||||||
startPos = getNextQualifiedWindow(&pInfo->interval, &nextWin, &pBlock->info, tsCols, prevEndPos, pInfo->binfo.inputTsOrder);
|
startPos = getNextQualifiedWindow(&pInfo->interval, &nextWin, &pBlock->info, tsCols, prevEndPos, pInfo->binfo.inputTsOrder);
|
||||||
if (startPos < 0) {
|
if (startPos < 0 || filterWindowWithLimit(pInfo, &nextWin, tableGroupId)) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
// null data, failed to allocate more memory buffer
|
// null data, failed to allocate more memory buffer
|
||||||
|
@ -963,6 +1036,7 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul
|
||||||
if (pInfo->timeWindowInterpo) {
|
if (pInfo->timeWindowInterpo) {
|
||||||
saveDataBlockLastRow(pInfo->pPrevValues, pBlock, pInfo->pInterpCols);
|
saveDataBlockLastRow(pInfo->pPrevValues, pBlock, pInfo->pInterpCols);
|
||||||
}
|
}
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
void doCloseWindow(SResultRowInfo* pResultRowInfo, const SIntervalAggOperatorInfo* pInfo, SResultRow* pResult) {
|
void doCloseWindow(SResultRowInfo* pResultRowInfo, const SIntervalAggOperatorInfo* pInfo, SResultRow* pResult) {
|
||||||
|
@ -1043,7 +1117,7 @@ static int32_t doOpenIntervalAgg(SOperatorInfo* pOperator) {
|
||||||
|
|
||||||
// the pDataBlock are always the same one, no need to call this again
|
// the pDataBlock are always the same one, no need to call this again
|
||||||
setInputDataBlock(pSup, pBlock, pInfo->binfo.inputTsOrder, scanFlag, true);
|
setInputDataBlock(pSup, pBlock, pInfo->binfo.inputTsOrder, scanFlag, true);
|
||||||
hashIntervalAgg(pOperator, &pInfo->binfo.resultRowInfo, pBlock, scanFlag);
|
if (hashIntervalAgg(pOperator, &pInfo->binfo.resultRowInfo, pBlock, scanFlag)) break;
|
||||||
}
|
}
|
||||||
|
|
||||||
initGroupedResultInfo(&pInfo->groupResInfo, pInfo->aggSup.pResultRowHashTable, pInfo->binfo.outputTsOrder);
|
initGroupedResultInfo(&pInfo->groupResInfo, pInfo->aggSup.pResultRowHashTable, pInfo->binfo.outputTsOrder);
|
||||||
|
@ -1495,6 +1569,7 @@ void destroyIntervalOperatorInfo(void* param) {
|
||||||
|
|
||||||
cleanupGroupResInfo(&pInfo->groupResInfo);
|
cleanupGroupResInfo(&pInfo->groupResInfo);
|
||||||
colDataDestroy(&pInfo->twAggSup.timeWindowData);
|
colDataDestroy(&pInfo->twAggSup.timeWindowData);
|
||||||
|
destroyBoundedQueue(pInfo->pBQ);
|
||||||
taosMemoryFreeClear(param);
|
taosMemoryFreeClear(param);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1658,6 +1733,17 @@ SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SIntervalPh
|
||||||
pInfo->interval = interval;
|
pInfo->interval = interval;
|
||||||
pInfo->twAggSup = as;
|
pInfo->twAggSup = as;
|
||||||
pInfo->binfo.mergeResultBlock = pPhyNode->window.mergeDataBlock;
|
pInfo->binfo.mergeResultBlock = pPhyNode->window.mergeDataBlock;
|
||||||
|
if (pPhyNode->window.node.pLimit) {
|
||||||
|
SLimitNode* pLimit = (SLimitNode*)pPhyNode->window.node.pLimit;
|
||||||
|
pInfo->limited = true;
|
||||||
|
pInfo->limit = pLimit->limit + pLimit->offset;
|
||||||
|
}
|
||||||
|
if (pPhyNode->window.node.pSlimit) {
|
||||||
|
SLimitNode* pLimit = (SLimitNode*)pPhyNode->window.node.pSlimit;
|
||||||
|
pInfo->slimited = true;
|
||||||
|
pInfo->slimit = pLimit->limit + pLimit->offset;
|
||||||
|
pInfo->curGroupId = UINT64_MAX;
|
||||||
|
}
|
||||||
|
|
||||||
if (pPhyNode->window.pExprs != NULL) {
|
if (pPhyNode->window.pExprs != NULL) {
|
||||||
int32_t numOfScalar = 0;
|
int32_t numOfScalar = 0;
|
||||||
|
@ -1858,7 +1944,7 @@ SOperatorInfo* createStatewindowOperatorInfo(SOperatorInfo* downstream, SStateWi
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t tsSlotId = ((SColumnNode*)pStateNode->window.pTspk)->slotId;
|
int32_t tsSlotId = ((SColumnNode*)pStateNode->window.pTspk)->slotId;
|
||||||
SColumnNode* pColNode = (SColumnNode*)((STargetNode*)pStateNode->pStateKey)->pExpr;
|
SColumnNode* pColNode = (SColumnNode*)(pStateNode->pStateKey);
|
||||||
|
|
||||||
if (pStateNode->window.pExprs != NULL) {
|
if (pStateNode->window.pExprs != NULL) {
|
||||||
int32_t numOfScalarExpr = 0;
|
int32_t numOfScalarExpr = 0;
|
||||||
|
@ -3574,6 +3660,7 @@ static SSDataBlock* doStreamSessionAgg(SOperatorInfo* pOperator) {
|
||||||
SStreamSessionAggOperatorInfo* pInfo = pOperator->info;
|
SStreamSessionAggOperatorInfo* pInfo = pOperator->info;
|
||||||
SOptrBasicInfo* pBInfo = &pInfo->binfo;
|
SOptrBasicInfo* pBInfo = &pInfo->binfo;
|
||||||
SStreamAggSupporter* pAggSup = &pInfo->streamAggSup;
|
SStreamAggSupporter* pAggSup = &pInfo->streamAggSup;
|
||||||
|
qDebug("===stream=== stream session agg");
|
||||||
if (pOperator->status == OP_EXEC_DONE) {
|
if (pOperator->status == OP_EXEC_DONE) {
|
||||||
return NULL;
|
return NULL;
|
||||||
} else if (pOperator->status == OP_RES_TO_RETURN) {
|
} else if (pOperator->status == OP_RES_TO_RETURN) {
|
||||||
|
@ -3736,6 +3823,7 @@ void streamSessionReloadState(SOperatorInfo* pOperator) {
|
||||||
setSessionOutputBuf(pAggSup, pSeKeyBuf[i].win.skey, pSeKeyBuf[i].win.ekey, pSeKeyBuf[i].groupId, &winInfo);
|
setSessionOutputBuf(pAggSup, pSeKeyBuf[i].win.skey, pSeKeyBuf[i].win.ekey, pSeKeyBuf[i].groupId, &winInfo);
|
||||||
int32_t winNum = compactSessionWindow(pOperator, &winInfo, pInfo->pStUpdated, pInfo->pStDeleted, true);
|
int32_t winNum = compactSessionWindow(pOperator, &winInfo, pInfo->pStUpdated, pInfo->pStDeleted, true);
|
||||||
if (winNum > 0) {
|
if (winNum > 0) {
|
||||||
|
qDebug("===stream=== reload state. save result %" PRId64 ", %" PRIu64, winInfo.sessionWin.win.skey, winInfo.sessionWin.groupId);
|
||||||
if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE) {
|
if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE) {
|
||||||
saveResult(winInfo, pInfo->pStUpdated);
|
saveResult(winInfo, pInfo->pStUpdated);
|
||||||
} else if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_WINDOW_CLOSE) {
|
} else if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_WINDOW_CLOSE) {
|
||||||
|
@ -3754,7 +3842,7 @@ void streamSessionReloadState(SOperatorInfo* pOperator) {
|
||||||
SOperatorInfo* downstream = pOperator->pDownstream[0];
|
SOperatorInfo* downstream = pOperator->pDownstream[0];
|
||||||
if (downstream->fpSet.reloadStreamStateFn) {
|
if (downstream->fpSet.reloadStreamStateFn) {
|
||||||
downstream->fpSet.reloadStreamStateFn(downstream);
|
downstream->fpSet.reloadStreamStateFn(downstream);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
SOperatorInfo* createStreamSessionAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode,
|
SOperatorInfo* createStreamSessionAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode,
|
||||||
|
@ -3863,6 +3951,7 @@ static SSDataBlock* doStreamSessionSemiAgg(SOperatorInfo* pOperator) {
|
||||||
SExprSupp* pSup = &pOperator->exprSupp;
|
SExprSupp* pSup = &pOperator->exprSupp;
|
||||||
SStreamAggSupporter* pAggSup = &pInfo->streamAggSup;
|
SStreamAggSupporter* pAggSup = &pInfo->streamAggSup;
|
||||||
|
|
||||||
|
qDebug("===stream=== stream session semi agg");
|
||||||
if (pOperator->status == OP_EXEC_DONE) {
|
if (pOperator->status == OP_EXEC_DONE) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
@ -4373,6 +4462,7 @@ static void compactStateWindow(SOperatorInfo* pOperator, SResultWindowInfo* pCur
|
||||||
initSessionOutputBuf(pCurWin, &pCurResult, pSup->pCtx, numOfOutput, pSup->rowEntryInfoOffset);
|
initSessionOutputBuf(pCurWin, &pCurResult, pSup->pCtx, numOfOutput, pSup->rowEntryInfoOffset);
|
||||||
SResultRow* pWinResult = NULL;
|
SResultRow* pWinResult = NULL;
|
||||||
initSessionOutputBuf(pNextWin, &pWinResult, pAggSup->pDummyCtx, numOfOutput, pSup->rowEntryInfoOffset);
|
initSessionOutputBuf(pNextWin, &pWinResult, pAggSup->pDummyCtx, numOfOutput, pSup->rowEntryInfoOffset);
|
||||||
|
pCurWin->sessionWin.win.ekey = TMAX(pCurWin->sessionWin.win.ekey, pNextWin->sessionWin.win.ekey);
|
||||||
|
|
||||||
updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pCurWin->sessionWin.win, 1);
|
updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pCurWin->sessionWin.win, 1);
|
||||||
compactFunctions(pSup->pCtx, pAggSup->pDummyCtx, numOfOutput, pTaskInfo, &pInfo->twAggSup.timeWindowData);
|
compactFunctions(pSup->pCtx, pAggSup->pDummyCtx, numOfOutput, pTaskInfo, &pInfo->twAggSup.timeWindowData);
|
||||||
|
@ -4449,7 +4539,7 @@ SOperatorInfo* createStreamStateAggOperatorInfo(SOperatorInfo* downstream, SPhys
|
||||||
SExecTaskInfo* pTaskInfo, SReadHandle* pHandle) {
|
SExecTaskInfo* pTaskInfo, SReadHandle* pHandle) {
|
||||||
SStreamStateWinodwPhysiNode* pStateNode = (SStreamStateWinodwPhysiNode*)pPhyNode;
|
SStreamStateWinodwPhysiNode* pStateNode = (SStreamStateWinodwPhysiNode*)pPhyNode;
|
||||||
int32_t tsSlotId = ((SColumnNode*)pStateNode->window.pTspk)->slotId;
|
int32_t tsSlotId = ((SColumnNode*)pStateNode->window.pTspk)->slotId;
|
||||||
SColumnNode* pColNode = (SColumnNode*)((STargetNode*)pStateNode->pStateKey)->pExpr;
|
SColumnNode* pColNode = (SColumnNode*)(pStateNode->pStateKey);
|
||||||
int32_t code = TSDB_CODE_SUCCESS;
|
int32_t code = TSDB_CODE_SUCCESS;
|
||||||
|
|
||||||
SStreamStateAggOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SStreamStateAggOperatorInfo));
|
SStreamStateAggOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SStreamStateAggOperatorInfo));
|
||||||
|
|
|
@ -69,8 +69,14 @@ struct SSortHandle {
|
||||||
_sort_fetch_block_fn_t fetchfp;
|
_sort_fetch_block_fn_t fetchfp;
|
||||||
_sort_merge_compar_fn_t comparFn;
|
_sort_merge_compar_fn_t comparFn;
|
||||||
SMultiwayMergeTreeInfo* pMergeTree;
|
SMultiwayMergeTreeInfo* pMergeTree;
|
||||||
|
|
||||||
|
bool singleTableMerge;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
void setSingleTableMerge(SSortHandle* pHandle) {
|
||||||
|
pHandle->singleTableMerge = true;
|
||||||
|
}
|
||||||
|
|
||||||
static int32_t msortComparFn(const void* pLeft, const void* pRight, void* param);
|
static int32_t msortComparFn(const void* pLeft, const void* pRight, void* param);
|
||||||
|
|
||||||
// | offset[0] | offset[1] |....| nullbitmap | data |...|
|
// | offset[0] | offset[1] |....| nullbitmap | data |...|
|
||||||
|
@ -995,7 +1001,12 @@ static int32_t createBlocksMergeSortInitialSources(SSortHandle* pHandle) {
|
||||||
SArray* aExtSrc = taosArrayInit(nSrc, POINTER_BYTES);
|
SArray* aExtSrc = taosArrayInit(nSrc, POINTER_BYTES);
|
||||||
|
|
||||||
size_t maxBufSize = pHandle->numOfPages * pHandle->pageSize;
|
size_t maxBufSize = pHandle->numOfPages * pHandle->pageSize;
|
||||||
createPageBuf(pHandle);
|
|
||||||
|
int32_t code = createPageBuf(pHandle);
|
||||||
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
taosArrayDestroy(aExtSrc);
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
|
||||||
SSortSource* pSrc = taosArrayGetP(pHandle->pOrderedSource, 0);
|
SSortSource* pSrc = taosArrayGetP(pHandle->pOrderedSource, 0);
|
||||||
int32_t szSort = 0;
|
int32_t szSort = 0;
|
||||||
|
@ -1070,7 +1081,7 @@ static int32_t createBlocksMergeSortInitialSources(SSortHandle* pHandle) {
|
||||||
taosArrayDestroy(aExtSrc);
|
taosArrayDestroy(aExtSrc);
|
||||||
|
|
||||||
pHandle->type = SORT_SINGLESOURCE_SORT;
|
pHandle->type = SORT_SINGLESOURCE_SORT;
|
||||||
return 0;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t createBlocksQuickSortInitialSources(SSortHandle* pHandle) {
|
static int32_t createBlocksQuickSortInitialSources(SSortHandle* pHandle) {
|
||||||
|
@ -1453,6 +1464,26 @@ static STupleHandle* tsortPQSortNextTuple(SSortHandle* pHandle) {
|
||||||
return &pHandle->tupleHandle;
|
return &pHandle->tupleHandle;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static STupleHandle* tsortSingleTableMergeNextTuple(SSortHandle* pHandle) {
|
||||||
|
if (1 == pHandle->numOfCompletedSources) return NULL;
|
||||||
|
if (pHandle->tupleHandle.pBlock && pHandle->tupleHandle.rowIndex + 1 < pHandle->tupleHandle.pBlock->info.rows) {
|
||||||
|
pHandle->tupleHandle.rowIndex++;
|
||||||
|
} else {
|
||||||
|
if (pHandle->tupleHandle.rowIndex == -1) return NULL;
|
||||||
|
SSortSource** pSource = taosArrayGet(pHandle->pOrderedSource, 0);
|
||||||
|
SSortSource* source = *pSource;
|
||||||
|
SSDataBlock* pBlock = pHandle->fetchfp(source->param);
|
||||||
|
if (!pBlock || pBlock->info.rows == 0) {
|
||||||
|
setCurrentSourceDone(source, pHandle);
|
||||||
|
pHandle->tupleHandle.pBlock = NULL;
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
pHandle->tupleHandle.pBlock = pBlock;
|
||||||
|
pHandle->tupleHandle.rowIndex = 0;
|
||||||
|
}
|
||||||
|
return &pHandle->tupleHandle;
|
||||||
|
}
|
||||||
|
|
||||||
int32_t tsortOpen(SSortHandle* pHandle) {
|
int32_t tsortOpen(SSortHandle* pHandle) {
|
||||||
if (pHandle->opened) {
|
if (pHandle->opened) {
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -1470,7 +1501,9 @@ int32_t tsortOpen(SSortHandle* pHandle) {
|
||||||
}
|
}
|
||||||
|
|
||||||
STupleHandle* tsortNextTuple(SSortHandle* pHandle) {
|
STupleHandle* tsortNextTuple(SSortHandle* pHandle) {
|
||||||
if (pHandle->pBoundedQueue)
|
if (pHandle->singleTableMerge)
|
||||||
|
return tsortSingleTableMergeNextTuple(pHandle);
|
||||||
|
else if (pHandle->pBoundedQueue)
|
||||||
return tsortPQSortNextTuple(pHandle);
|
return tsortPQSortNextTuple(pHandle);
|
||||||
else
|
else
|
||||||
return tsortBufMergeSortNextTuple(pHandle);
|
return tsortBufMergeSortNextTuple(pHandle);
|
||||||
|
|
|
@ -468,7 +468,8 @@ static int32_t translateStddevMerge(SFunctionNode* pFunc, char* pErrBuf, int32_t
|
||||||
|
|
||||||
static int32_t translateWduration(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
|
static int32_t translateWduration(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
|
||||||
// pseudo column do not need to check parameters
|
// pseudo column do not need to check parameters
|
||||||
pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes, .type = TSDB_DATA_TYPE_BIGINT};
|
pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes, .type = TSDB_DATA_TYPE_BIGINT,
|
||||||
|
.precision = pFunc->node.resType.precision};
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -491,7 +492,8 @@ static int32_t translateTimePseudoColumn(SFunctionNode* pFunc, char* pErrBuf, in
|
||||||
// pseudo column do not need to check parameters
|
// pseudo column do not need to check parameters
|
||||||
|
|
||||||
pFunc->node.resType =
|
pFunc->node.resType =
|
||||||
(SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_TIMESTAMP].bytes, .type = TSDB_DATA_TYPE_TIMESTAMP};
|
(SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_TIMESTAMP].bytes, .type = TSDB_DATA_TYPE_TIMESTAMP,
|
||||||
|
.precision = pFunc->node.resType.precision};
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|