Merge remote-tracking branch 'origin/3.0' into feat/TD-24499
This commit is contained in:
commit
73d3c21e21
|
@ -2,7 +2,7 @@
|
||||||
IF (DEFINED VERNUMBER)
|
IF (DEFINED VERNUMBER)
|
||||||
SET(TD_VER_NUMBER ${VERNUMBER})
|
SET(TD_VER_NUMBER ${VERNUMBER})
|
||||||
ELSE ()
|
ELSE ()
|
||||||
SET(TD_VER_NUMBER "3.0.4.3")
|
SET(TD_VER_NUMBER "3.0.5.0")
|
||||||
ENDIF ()
|
ENDIF ()
|
||||||
|
|
||||||
IF (DEFINED VERCOMPATIBLE)
|
IF (DEFINED VERCOMPATIBLE)
|
||||||
|
|
|
@ -274,7 +274,7 @@ if(${BUILD_WITH_ROCKSDB})
|
||||||
option(WITH_TOOLS "" OFF)
|
option(WITH_TOOLS "" OFF)
|
||||||
option(WITH_LIBURING "" OFF)
|
option(WITH_LIBURING "" OFF)
|
||||||
IF (TD_LINUX)
|
IF (TD_LINUX)
|
||||||
option(ROCKSDB_BUILD_SHARED "Build shared versions of the RocksDB libraries" ON)
|
option(ROCKSDB_BUILD_SHARED "Build shared versions of the RocksDB libraries" OFF)
|
||||||
ELSE()
|
ELSE()
|
||||||
option(ROCKSDB_BUILD_SHARED "Build shared versions of the RocksDB libraries" OFF)
|
option(ROCKSDB_BUILD_SHARED "Build shared versions of the RocksDB libraries" OFF)
|
||||||
ENDIF()
|
ENDIF()
|
||||||
|
|
|
@ -105,6 +105,12 @@ class Consumer:
|
||||||
def poll(self, timeout: float = 1.0):
|
def poll(self, timeout: float = 1.0):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
def assignment(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def poll(self, timeout: float = 1.0):
|
||||||
|
pass
|
||||||
|
|
||||||
def close(self):
|
def close(self):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
|
@ -55,7 +55,7 @@ window_clause: {
|
||||||
| INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] [WATERMARK(watermark_val)] [FILL(fill_mod_and_val)]
|
| INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] [WATERMARK(watermark_val)] [FILL(fill_mod_and_val)]
|
||||||
|
|
||||||
interp_clause:
|
interp_clause:
|
||||||
RANGE(ts_val, ts_val) EVERY(every_val) FILL(fill_mod_and_val)
|
RANGE(ts_val [, ts_val]) EVERY(every_val) FILL(fill_mod_and_val)
|
||||||
|
|
||||||
partition_by_clause:
|
partition_by_clause:
|
||||||
PARTITION BY expr [, expr] ...
|
PARTITION BY expr [, expr] ...
|
||||||
|
|
|
@ -889,9 +889,10 @@ ignore_null_values: {
|
||||||
- `INTERP` is used to get the value that matches the specified time slice from a column. If no such value exists an interpolation value will be returned based on `FILL` parameter.
|
- `INTERP` is used to get the value that matches the specified time slice from a column. If no such value exists an interpolation value will be returned based on `FILL` parameter.
|
||||||
- The input data of `INTERP` is the value of the specified column and a `where` clause can be used to filter the original data. If no `where` condition is specified then all original data is the input.
|
- The input data of `INTERP` is the value of the specified column and a `where` clause can be used to filter the original data. If no `where` condition is specified then all original data is the input.
|
||||||
- `INTERP` must be used along with `RANGE`, `EVERY`, `FILL` keywords.
|
- `INTERP` must be used along with `RANGE`, `EVERY`, `FILL` keywords.
|
||||||
- The output time range of `INTERP` is specified by `RANGE(timestamp1,timestamp2)` parameter, with timestamp1 <= timestamp2. timestamp1 is the starting point of the output time range and must be specified. timestamp2 is the ending point of the output time range and must be specified.
|
- The output time range of `INTERP` is specified by `RANGE(timestamp1,timestamp2)` parameter, with timestamp1 <= timestamp2. timestamp1 is the starting point of the output time range. timestamp2 is the ending point of the output time range.
|
||||||
- The number of rows in the result set of `INTERP` is determined by the parameter `EVERY(time_unit)`. Starting from timestamp1, one interpolation is performed for every time interval specified `time_unit` parameter. The parameter `time_unit` must be an integer, with no quotes, with a time unit of: a(millisecond)), s(second), m(minute), h(hour), d(day), or w(week). For example, `EVERY(500a)` will interpolate every 500 milliseconds.
|
- The number of rows in the result set of `INTERP` is determined by the parameter `EVERY(time_unit)`. Starting from timestamp1, one interpolation is performed for every time interval specified `time_unit` parameter. The parameter `time_unit` must be an integer, with no quotes, with a time unit of: a(millisecond)), s(second), m(minute), h(hour), d(day), or w(week). For example, `EVERY(500a)` will interpolate every 500 milliseconds.
|
||||||
- Interpolation is performed based on `FILL` parameter. For more information about FILL clause, see [FILL Clause](../distinguished/#fill-clause).
|
- Interpolation is performed based on `FILL` parameter. For more information about FILL clause, see [FILL Clause](../distinguished/#fill-clause).
|
||||||
|
- When only one timestamp value is specified in `RANGE` clause, `INTERP` is used to generate interpolation at this point in time. In this case, `EVERY` clause can be omitted. For example, SELECT INTERP(col) FROM tb RANGE('2023-01-01 00:00:00') FILL(linear).
|
||||||
- `INTERP` can be applied to supertable by interpolating primary key sorted data of all its childtables. It can also be used with `partition by tbname` when applied to supertable to generate interpolation on each single timeline.
|
- `INTERP` can be applied to supertable by interpolating primary key sorted data of all its childtables. It can also be used with `partition by tbname` when applied to supertable to generate interpolation on each single timeline.
|
||||||
- Pseudocolumn `_irowts` can be used along with `INTERP` to return the timestamps associated with interpolation points(support after version 3.0.2.0).
|
- Pseudocolumn `_irowts` can be used along with `INTERP` to return the timestamps associated with interpolation points(support after version 3.0.2.0).
|
||||||
- Pseudocolumn `_isfilled` can be used along with `INTERP` to indicate whether the results are original records or data points generated by interpolation algorithm(support after version 3.0.3.0).
|
- Pseudocolumn `_isfilled` can be used along with `INTERP` to indicate whether the results are original records or data points generated by interpolation algorithm(support after version 3.0.3.0).
|
||||||
|
@ -902,7 +903,7 @@ ignore_null_values: {
|
||||||
- We want to downsample every 1 hour and use a linear fill for missing values. Note the order in which the "partition by" clause and the "range", "every" and "fill" parameters are used.
|
- We want to downsample every 1 hour and use a linear fill for missing values. Note the order in which the "partition by" clause and the "range", "every" and "fill" parameters are used.
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT _irowts,INTERP(current) FROM test.meters PARTITION BY TBNAME RANGE('2017-07-22 00:00:00','2017-07-24 12:25:00') EVERY(1h) FILL(LINEAR)
|
SELECT _irowts,INTERP(current) FROM test.meters PARTITION BY TBNAME RANGE('2017-07-22 00:00:00','2017-07-24 12:25:00') EVERY(1h) FILL(LINEAR)
|
||||||
```
|
```
|
||||||
|
|
||||||
### LAST
|
### LAST
|
||||||
|
|
|
@ -32,25 +32,22 @@ TDengine's JDBC driver implementation is as consistent as possible with the rela
|
||||||
Native connections are supported on the same platforms as the TDengine client driver.
|
Native connections are supported on the same platforms as the TDengine client driver.
|
||||||
REST connection supports all platforms that can run Java.
|
REST connection supports all platforms that can run Java.
|
||||||
|
|
||||||
## Version support
|
|
||||||
|
|
||||||
Please refer to [version support list](/reference/connector#version-support)
|
|
||||||
|
|
||||||
## Recent update logs
|
## Recent update logs
|
||||||
|
|
||||||
| taos-jdbcdriver version | major changes |
|
| taos-jdbcdriver version | major changes | TDengine version |
|
||||||
| :---------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------: |
|
| :---------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------: | :--------------: |
|
||||||
| 3.2.1 | JDBC REST connection supports schemaless/prepareStatement over WebSocket |
|
| 3.2.1 | subscription add seek function | 3.0.5.0 or later |
|
||||||
| 3.2.0 | This version has been deprecated |
|
| 3.2.1 | JDBC REST connection supports schemaless/prepareStatement over WebSocket | 3.0.3.0 or later |
|
||||||
| 3.1.0 | JDBC REST connection supports subscription over WebSocket |
|
| 3.2.0 | This version has been deprecated | - |
|
||||||
| 3.0.1 - 3.0.4 | fix the resultSet data is parsed incorrectly sometimes. 3.0.1 is compiled on JDK 11, you are advised to use other version in the JDK 8 environment |
|
| 3.1.0 | JDBC REST connection supports subscription over WebSocket | - |
|
||||||
| 3.0.0 | Support for TDengine 3.0 |
|
| 3.0.1 - 3.0.4 | fix the resultSet data is parsed incorrectly sometimes. 3.0.1 is compiled on JDK 11, you are advised to use other version in the JDK 8 environment | - |
|
||||||
| 2.0.42 | fix wasNull interface return value in WebSocket connection |
|
| 3.0.0 | Support for TDengine 3.0 | 3.0.0.0 or later |
|
||||||
| 2.0.41 | fix decode method of username and password in REST connection |
|
| 2.0.42 | fix wasNull interface return value in WebSocket connection | - |
|
||||||
| 2.0.39 - 2.0.40 | Add REST connection/request timeout parameters |
|
| 2.0.41 | fix decode method of username and password in REST connection | - |
|
||||||
| 2.0.38 | JDBC REST connections add bulk pull function |
|
| 2.0.39 - 2.0.40 | Add REST connection/request timeout parameters | - |
|
||||||
| 2.0.37 | Support json tags |
|
| 2.0.38 | JDBC REST connections add bulk pull function | - |
|
||||||
| 2.0.36 | Support schemaless writing |
|
| 2.0.37 | Support json tags | - |
|
||||||
|
| 2.0.36 | Support schemaless writing | - |
|
||||||
|
|
||||||
**Note**: adding `batchfetch` to the REST connection and setting it to true will enable the WebSocket connection.
|
**Note**: adding `batchfetch` to the REST connection and setting it to true will enable the WebSocket connection.
|
||||||
|
|
||||||
|
@ -102,6 +99,8 @@ For specific error codes, please refer to.
|
||||||
| 0x2319 | user is required | The user name information is missing when creating the connection |
|
| 0x2319 | user is required | The user name information is missing when creating the connection |
|
||||||
| 0x231a | password is required | Password information is missing when creating a connection |
|
| 0x231a | password is required | Password information is missing when creating a connection |
|
||||||
| 0x231c | httpEntity is null, sql: | Execution exception occurred during the REST connection |
|
| 0x231c | httpEntity is null, sql: | Execution exception occurred during the REST connection |
|
||||||
|
| 0x231d | can't create connection with server within | Increase the connection time by adding the httpConnectTimeout parameter, or check the connection to the taos adapter. |
|
||||||
|
| 0x231e | failed to complete the task within the specified time | Increase the execution time by adding the messageWaitTimeout parameter, or check the connection to the taos adapter. |
|
||||||
| 0x2350 | unknown error | Unknown exception, please return to the developer on github. |
|
| 0x2350 | unknown error | Unknown exception, please return to the developer on github. |
|
||||||
| 0x2352 | Unsupported encoding | An unsupported character encoding set is specified under the native Connection. |
|
| 0x2352 | Unsupported encoding | An unsupported character encoding set is specified under the native Connection. |
|
||||||
| 0x2353 | internal error of database, please see taoslog for more details | An error occurs when the prepare statement is executed on the native connection. Check the taos log to locate the fault. |
|
| 0x2353 | internal error of database, please see taoslog for more details | An error occurs when the prepare statement is executed on the native connection. Check the taos log to locate the fault. |
|
||||||
|
@ -117,8 +116,8 @@ For specific error codes, please refer to.
|
||||||
| 0x2376 | failed to set consumer topic, topic name is empty | During data subscription creation, the subscription topic name is empty. Check that the specified topic name is correct. |
|
| 0x2376 | failed to set consumer topic, topic name is empty | During data subscription creation, the subscription topic name is empty. Check that the specified topic name is correct. |
|
||||||
| 0x2377 | consumer reference has been destroyed | The subscription data transfer channel has been closed. Please check the connection to TDengine. |
|
| 0x2377 | consumer reference has been destroyed | The subscription data transfer channel has been closed. Please check the connection to TDengine. |
|
||||||
| 0x2378 | consumer create error | Failed to create a data subscription. Check the taos log according to the error message to locate the fault. |
|
| 0x2378 | consumer create error | Failed to create a data subscription. Check the taos log according to the error message to locate the fault. |
|
||||||
| - | can't create connection with server within | Increase the connection time by adding the httpConnectTimeout parameter, or check the connection to the taos adapter. |
|
| 0x2379 | seek offset must not be a negative number | The seek interface parameter cannot be negative. Use the correct parameter |
|
||||||
| - | failed to complete the task within the specified time | Increase the execution time by adding the messageWaitTimeout parameter, or check the connection to the taos adapter. |
|
| 0x237a | vGroup not found in result set | subscription is not bound to the VGroup due to the rebalance mechanism |
|
||||||
|
|
||||||
- [TDengine Java Connector](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java)
|
- [TDengine Java Connector](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java)
|
||||||
<!-- - [TDengine_ERROR_CODE](../error-code) -->
|
<!-- - [TDengine_ERROR_CODE](../error-code) -->
|
||||||
|
@ -169,7 +168,7 @@ Add following dependency in the `pom.xml` file of your Maven project:
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>com.taosdata.jdbc</groupId>
|
<groupId>com.taosdata.jdbc</groupId>
|
||||||
<artifactId>taos-jdbcdriver</artifactId>
|
<artifactId>taos-jdbcdriver</artifactId>
|
||||||
<version>3.2.1</version>
|
<version>3.2.2</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -913,14 +912,15 @@ public class SchemalessWsTest {
|
||||||
|
|
||||||
public static void main(String[] args) throws SQLException {
|
public static void main(String[] args) throws SQLException {
|
||||||
final String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata&batchfetch=true";
|
final String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata&batchfetch=true";
|
||||||
Connection connection = DriverManager.getConnection(url);
|
try(Connection connection = DriverManager.getConnection(url)){
|
||||||
init(connection);
|
init(connection);
|
||||||
|
|
||||||
SchemalessWriter writer = new SchemalessWriter(connection, "test_ws_schemaless");
|
try(SchemalessWriter writer = new SchemalessWriter(connection, "test_ws_schemaless")){
|
||||||
writer.write(lineDemo, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO_SECONDS);
|
writer.write(lineDemo, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO_SECONDS);
|
||||||
writer.write(telnetDemo, SchemalessProtocolType.TELNET, SchemalessTimestampType.MILLI_SECONDS);
|
writer.write(telnetDemo, SchemalessProtocolType.TELNET, SchemalessTimestampType.MILLI_SECONDS);
|
||||||
writer.write(jsonDemo, SchemalessProtocolType.JSON, SchemalessTimestampType.SECONDS);
|
writer.write(jsonDemo, SchemalessProtocolType.JSON, SchemalessTimestampType.SECONDS);
|
||||||
System.exit(0);
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private static void init(Connection connection) throws SQLException {
|
private static void init(Connection connection) throws SQLException {
|
||||||
|
@ -991,6 +991,17 @@ while(true) {
|
||||||
|
|
||||||
`poll` obtains one message each time it is run.
|
`poll` obtains one message each time it is run.
|
||||||
|
|
||||||
|
#### Assignment subscription Offset
|
||||||
|
|
||||||
|
```
|
||||||
|
long position(TopicPartition partition) throws SQLException;
|
||||||
|
Map<TopicPartition, Long> position(String topic) throws SQLException;
|
||||||
|
Map<TopicPartition, Long> beginningOffsets(String topic) throws SQLException;
|
||||||
|
Map<TopicPartition, Long> endOffsets(String topic) throws SQLException;
|
||||||
|
|
||||||
|
void seek(TopicPartition partition, long offset) throws SQLException;
|
||||||
|
```
|
||||||
|
|
||||||
#### Close subscriptions
|
#### Close subscriptions
|
||||||
|
|
||||||
```java
|
```java
|
||||||
|
|
|
@ -29,7 +29,7 @@ REST connections are supported on all platforms that can run Go.
|
||||||
|
|
||||||
## Version support
|
## Version support
|
||||||
|
|
||||||
Please refer to [version support list](/reference/connector#version-support)
|
Please refer to [version support list](https://github.com/taosdata/driver-go#remind)
|
||||||
|
|
||||||
## Supported features
|
## Supported features
|
||||||
|
|
||||||
|
@ -379,6 +379,15 @@ Note: `tmq.TopicPartition` is reserved for compatibility purpose
|
||||||
|
|
||||||
Commit information.
|
Commit information.
|
||||||
|
|
||||||
|
* `func (c *Consumer) Assignment() (partitions []tmq.TopicPartition, err error)`
|
||||||
|
|
||||||
|
Get Assignment(TDengine >= 3.0.5.0 and driver-go >= v3.5.0 are required).
|
||||||
|
|
||||||
|
* `func (c *Consumer) Seek(partition tmq.TopicPartition, ignoredTimeoutMs int) error`
|
||||||
|
Note: `ignoredTimeoutMs` is reserved for compatibility purpose
|
||||||
|
|
||||||
|
Seek offset(TDengine >= 3.0.5.0 and driver-go >= v3.5.0 are required).
|
||||||
|
|
||||||
* `func (c *Consumer) Unsubscribe() error`
|
* `func (c *Consumer) Unsubscribe() error`
|
||||||
|
|
||||||
Unsubscribe.
|
Unsubscribe.
|
||||||
|
@ -468,6 +477,15 @@ Note: `tmq.TopicPartition` is reserved for compatibility purpose
|
||||||
|
|
||||||
Commit information.
|
Commit information.
|
||||||
|
|
||||||
|
* `func (c *Consumer) Assignment() (partitions []tmq.TopicPartition, err error)`
|
||||||
|
|
||||||
|
Get Assignment(TDengine >= 3.0.5.0 and driver-go >= v3.5.0 are required).
|
||||||
|
|
||||||
|
* `func (c *Consumer) Seek(partition tmq.TopicPartition, ignoredTimeoutMs int) error`
|
||||||
|
Note: `ignoredTimeoutMs` is reserved for compatibility purpose
|
||||||
|
|
||||||
|
Seek offset(TDengine >= 3.0.5.0 and driver-go >= v3.5.0 are required).
|
||||||
|
|
||||||
* `func (c *Consumer) Unsubscribe() error`
|
* `func (c *Consumer) Unsubscribe() error`
|
||||||
|
|
||||||
Unsubscribe.
|
Unsubscribe.
|
||||||
|
@ -476,7 +494,7 @@ Unsubscribe.
|
||||||
|
|
||||||
Close consumer.
|
Close consumer.
|
||||||
|
|
||||||
For a complete example see [GitHub sample file](https://github.com/taosdata/driver-go/blob/3.0/examples/tmqoverws/main.go)
|
For a complete example see [GitHub sample file](https://github.com/taosdata/driver-go/blob/main/examples/tmqoverws/main.go)
|
||||||
|
|
||||||
### parameter binding via WebSocket
|
### parameter binding via WebSocket
|
||||||
|
|
||||||
|
@ -524,7 +542,7 @@ For a complete example see [GitHub sample file](https://github.com/taosdata/driv
|
||||||
|
|
||||||
Closes the parameter binding.
|
Closes the parameter binding.
|
||||||
|
|
||||||
For a complete example see [GitHub sample file](https://github.com/taosdata/driver-go/blob/3.0/examples/stmtoverws/main.go)
|
For a complete example see [GitHub sample file](https://github.com/taosdata/driver-go/blob/main/examples/stmtoverws/main.go)
|
||||||
|
|
||||||
## API Reference
|
## API Reference
|
||||||
|
|
||||||
|
|
|
@ -27,9 +27,14 @@ The source code for the Rust connectors is located on [GitHub](https://github.co
|
||||||
Native connections are supported on the same platforms as the TDengine client driver.
|
Native connections are supported on the same platforms as the TDengine client driver.
|
||||||
Websocket connections are supported on all platforms that can run Go.
|
Websocket connections are supported on all platforms that can run Go.
|
||||||
|
|
||||||
## Version support
|
## Version history
|
||||||
|
|
||||||
Please refer to [version support list](/reference/connector#version-support)
|
| connector-rust version | TDengine version | major features |
|
||||||
|
| :----------------: | :--------------: | :--------------------------------------------------: |
|
||||||
|
| v0.8.10 | 3.0.5.0 or later | TMQ: Get consuming progress and seek offset to consume. |
|
||||||
|
| v0.8.0 | 3.0.4.0 | Support schemaless insert. |
|
||||||
|
| v0.7.6 | 3.0.3.0 | Support req_id in query. |
|
||||||
|
| v0.6.0 | 3.0.0.0 | Base features. |
|
||||||
|
|
||||||
The Rust Connector is still under rapid development and is not guaranteed to be backward compatible before 1.0. We recommend using TDengine version 3.0 or higher to avoid known issues.
|
The Rust Connector is still under rapid development and is not guaranteed to be backward compatible before 1.0. We recommend using TDengine version 3.0 or higher to avoid known issues.
|
||||||
|
|
||||||
|
@ -499,6 +504,22 @@ The TMQ is of [futures::Stream](https://docs.rs/futures/latest/futures/stream/in
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Get assignments:
|
||||||
|
|
||||||
|
Version requirements connector-rust >= v0.8.8, TDengine >= 3.0.5.0
|
||||||
|
|
||||||
|
```rust
|
||||||
|
let assignments = consumer.assignments().await.unwrap();
|
||||||
|
```
|
||||||
|
|
||||||
|
Seek offset:
|
||||||
|
|
||||||
|
Version requirements connector-rust >= v0.8.8, TDengine >= 3.0.5.0
|
||||||
|
|
||||||
|
```rust
|
||||||
|
consumer.offset_seek(topic, vgroup_id, offset).await;
|
||||||
|
```
|
||||||
|
|
||||||
Unsubscribe:
|
Unsubscribe:
|
||||||
|
|
||||||
```rust
|
```rust
|
||||||
|
@ -513,7 +534,7 @@ The following parameters can be configured for the TMQ DSN. Only `group.id` is m
|
||||||
- `enable.auto.commit`: Automatically commits. This can be enabled when data consistency is not essential.
|
- `enable.auto.commit`: Automatically commits. This can be enabled when data consistency is not essential.
|
||||||
- `auto.commit.interval.ms`: Interval for automatic commits.
|
- `auto.commit.interval.ms`: Interval for automatic commits.
|
||||||
|
|
||||||
For more information, see [GitHub sample file](https://github.com/taosdata/taos-connector-rust/blob/main/examples/subscribe.rs).
|
For more information, see [GitHub sample file](https://github.com/taosdata/TDengine/blob/3.0/docs/examples/rust/nativeexample/examples/subscribe_demo.rs).
|
||||||
|
|
||||||
For information about other structure APIs, see the [Rust documentation](https://docs.rs/taos).
|
For information about other structure APIs, see the [Rust documentation](https://docs.rs/taos).
|
||||||
|
|
||||||
|
|
|
@ -453,6 +453,170 @@ As the way to connect introduced above but add `req_id` argument.
|
||||||
</TabItem>
|
</TabItem>
|
||||||
</Tabs>
|
</Tabs>
|
||||||
|
|
||||||
|
### Subscription
|
||||||
|
|
||||||
|
Connector support data subscription. For more information about subscroption, please refer to [Data Subscription](../../../develop/tmq/).
|
||||||
|
|
||||||
|
<Tabs defaultValue="native">
|
||||||
|
<TabItem value="native" label="native connection">
|
||||||
|
|
||||||
|
The `consumer` in the connector contains the subscription api.
|
||||||
|
|
||||||
|
#### Create Consumer
|
||||||
|
|
||||||
|
The syntax for creating a consumer is `consumer = Consumer(configs)`. For more subscription api parameters, please refer to [Data Subscription](../../../develop/tmq/).
|
||||||
|
|
||||||
|
```python
|
||||||
|
from taos.tmq import Consumer
|
||||||
|
|
||||||
|
consumer = Consumer({"group.id": "local", "td.connect.ip": "127.0.0.1"})
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Subscribe topics
|
||||||
|
|
||||||
|
The `subscribe` function is used to subscribe to a list of topics.
|
||||||
|
|
||||||
|
```python
|
||||||
|
consumer.subscribe(['topic1', 'topic2'])
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Consume
|
||||||
|
|
||||||
|
The `poll` function is used to consume data in tmq. The parameter of the `poll` function is a value of type float representing the timeout in seconds. It returns a `Message` before timing out, or `None` on timing out. You have to handle error messages in response data.
|
||||||
|
|
||||||
|
```python
|
||||||
|
while True:
|
||||||
|
res = consumer.poll(1)
|
||||||
|
if not res:
|
||||||
|
continue
|
||||||
|
err = res.error()
|
||||||
|
if err is not None:
|
||||||
|
raise err
|
||||||
|
val = res.value()
|
||||||
|
|
||||||
|
for block in val:
|
||||||
|
print(block.fetchall())
|
||||||
|
```
|
||||||
|
|
||||||
|
#### assignment
|
||||||
|
|
||||||
|
The `assignment` function is used to get the assignment of the topic.
|
||||||
|
|
||||||
|
```python
|
||||||
|
assignments = consumer.assignment()
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Seek
|
||||||
|
|
||||||
|
The `seek` function is used to reset the assignment of the topic.
|
||||||
|
|
||||||
|
```python
|
||||||
|
tp = TopicPartition(topic='topic1', partition=0, offset=0)
|
||||||
|
consumer.seek(tp)
|
||||||
|
```
|
||||||
|
|
||||||
|
#### After consuming data
|
||||||
|
|
||||||
|
You should unsubscribe to the topics and close the consumer after consuming.
|
||||||
|
|
||||||
|
```python
|
||||||
|
consumer.unsubscribe()
|
||||||
|
consumer.close()
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Tmq subscription example
|
||||||
|
|
||||||
|
```python
|
||||||
|
{{#include docs/examples/python/tmq_example.py}}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### assignment and seek example
|
||||||
|
|
||||||
|
```python
|
||||||
|
{{#include docs/examples/python/tmq_assignment_example.py:taos_get_assignment_and_seek_demo}}
|
||||||
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
|
||||||
|
<TabItem value="websocket" label="WebSocket connection">
|
||||||
|
|
||||||
|
In addition to native connections, the connector also supports subscriptions via websockets.
|
||||||
|
|
||||||
|
#### Create Consumer
|
||||||
|
|
||||||
|
The syntax for creating a consumer is "consumer = consumer = Consumer(conf=configs)". You need to specify that the `td.connect.websocket.scheme` parameter is set to "ws" in the configuration. For more subscription api parameters, please refer to [Data Subscription](../../../develop/tmq/#create-a-consumer).
|
||||||
|
|
||||||
|
```python
|
||||||
|
import taosws
|
||||||
|
|
||||||
|
consumer = taosws.(conf={"group.id": "local", "td.connect.websocket.scheme": "ws"})
|
||||||
|
```
|
||||||
|
|
||||||
|
#### subscribe topics
|
||||||
|
|
||||||
|
The `subscribe` function is used to subscribe to a list of topics.
|
||||||
|
|
||||||
|
```python
|
||||||
|
consumer.subscribe(['topic1', 'topic2'])
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Consume
|
||||||
|
|
||||||
|
The `poll` function is used to consume data in tmq. The parameter of the `poll` function is a value of type float representing the timeout in seconds. It returns a `Message` before timing out, or `None` on timing out. You have to handle error messages in response data.
|
||||||
|
|
||||||
|
```python
|
||||||
|
while True:
|
||||||
|
res = consumer.poll(timeout=1.0)
|
||||||
|
if not res:
|
||||||
|
continue
|
||||||
|
err = res.error()
|
||||||
|
if err is not None:
|
||||||
|
raise err
|
||||||
|
for block in message:
|
||||||
|
for row in block:
|
||||||
|
print(row)
|
||||||
|
```
|
||||||
|
|
||||||
|
#### assignment
|
||||||
|
|
||||||
|
The `assignment` function is used to get the assignment of the topic.
|
||||||
|
|
||||||
|
```python
|
||||||
|
assignments = consumer.assignment()
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Seek
|
||||||
|
|
||||||
|
The `seek` function is used to reset the assignment of the topic.
|
||||||
|
|
||||||
|
```python
|
||||||
|
consumer.seek(topic='topic1', partition=0, offset=0)
|
||||||
|
```
|
||||||
|
|
||||||
|
#### After consuming data
|
||||||
|
|
||||||
|
You should unsubscribe to the topics and close the consumer after consuming.
|
||||||
|
|
||||||
|
```python
|
||||||
|
consumer.unsubscribe()
|
||||||
|
consumer.close()
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Subscription example
|
||||||
|
|
||||||
|
```python
|
||||||
|
{{#include docs/examples/python/tmq_websocket_example.py}}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Assignment and seek example
|
||||||
|
|
||||||
|
```python
|
||||||
|
{{#include docs/examples/python/tmq_websocket_assgnment_example.py:taosws_get_assignment_and_seek_demo}}
|
||||||
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
</Tabs>
|
||||||
|
|
||||||
### Schemaless Insert
|
### Schemaless Insert
|
||||||
|
|
||||||
Connector support schemaless insert.
|
Connector support schemaless insert.
|
||||||
|
@ -507,7 +671,8 @@ Insert with req_id argument
|
||||||
|
|
||||||
| Example program links | Example program content |
|
| Example program links | Example program content |
|
||||||
| ------------------------------------------------------------------------------------------------------------- | ------------------- ---- |
|
| ------------------------------------------------------------------------------------------------------------- | ------------------- ---- |
|
||||||
| [bind_multi.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/bind-multi.py) | parameter binding, bind multiple rows at once |
|
| [bind_multi.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/bind-multi.py) | parameter binding,
|
||||||
|
bind multiple rows at once |
|
||||||
| [bind_row.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/bind-row.py) | bind_row.py
|
| [bind_row.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/bind-row.py) | bind_row.py
|
||||||
| [insert_lines.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/insert-lines.py) | InfluxDB line protocol writing |
|
| [insert_lines.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/insert-lines.py) | InfluxDB line protocol writing |
|
||||||
| [json_tag.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/json-tag.py) | Use JSON type tags |
|
| [json_tag.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/json-tag.py) | Use JSON type tags |
|
||||||
|
|
|
@ -5,7 +5,7 @@ description: This document describes the configuration parameters for the TDengi
|
||||||
|
|
||||||
## Configuration File on Server Side
|
## Configuration File on Server Side
|
||||||
|
|
||||||
On the server side, the actual service of TDengine is provided by an executable `taosd` whose parameters can be configured in file `taos.cfg` to meet the requirements of different use cases. The default location of `taos.cfg` is `/etc/taos`, but can be changed by using `-c` parameter on the CLI of `taosd`. For example, the configuration file can be put under `/home/user` and used like below
|
On the server side, the actual service of TDengine is provided by an executable `taosd` whose parameters can be configured in file `taos.cfg` to meet the requirements of different use cases. The default location of `taos.cfg` is `/etc/taos` on Linux system, it's located under `C:\TDengine` on Windows system. The location of configuration file can be specified by using `-c` parameter on the CLI of `taosd`. For example, on Linux system the configuration file can be put under `/home/user` and used like below
|
||||||
|
|
||||||
```
|
```
|
||||||
taosd -c /home/user
|
taosd -c /home/user
|
||||||
|
|
|
@ -16,165 +16,79 @@ TDengine Source Connector is used to read data from TDengine in real-time and se
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
## What is Confluent?
|
|
||||||
|
|
||||||
[Confluent](https://www.confluent.io/) adds many extensions to Kafka. include:
|
|
||||||
|
|
||||||
1. Schema Registry
|
|
||||||
2. REST Proxy
|
|
||||||
3. Non-Java Clients
|
|
||||||
4. Many packaged Kafka Connect plugins
|
|
||||||
5. GUI for managing and monitoring Kafka - Confluent Control Center
|
|
||||||
|
|
||||||
Some of these extensions are available in the community version of Confluent. Some are only available in the enterprise version.
|
|
||||||

|
|
||||||
|
|
||||||
Confluent Enterprise Edition provides the `confluent` command-line tool to manage various components.
|
|
||||||
|
|
||||||
## Prerequisites
|
## Prerequisites
|
||||||
|
|
||||||
1. Linux operating system
|
1. Linux operating system
|
||||||
2. Java 8 and Maven installed
|
2. Java 8 and Maven installed
|
||||||
3. Git is installed
|
3. Git/curl/vi is installed
|
||||||
4. TDengine is installed and started. If not, please refer to [Installation and Uninstallation](/operation/pkg-install)
|
4. TDengine is installed and started. If not, please refer to [Installation and Uninstallation](/operation/pkg-install)
|
||||||
|
|
||||||
## Install Confluent
|
## Install Kafka
|
||||||
|
|
||||||
Confluent provides two installation methods: Docker and binary packages. This article only introduces binary package installation.
|
|
||||||
|
|
||||||
Execute in any directory:
|
Execute in any directory:
|
||||||
|
|
||||||
````
|
```shell
|
||||||
curl -O http://packages.confluent.io/archive/7.1/confluent-7.1.1.tar.gz
|
curl -O https://downloads.apache.org/kafka/3.4.0/kafka_2.13-3.4.0.tgz
|
||||||
tar xzf confluent-7.1.1.tar.gz -C /opt/
|
tar xzf kafka_2.13-3.4.0.tgz -C /opt/
|
||||||
````
|
ln -s /opt/kafka_2.13-3.4.0 /opt/kafka
|
||||||
|
```
|
||||||
|
|
||||||
Then you need to add the `$CONFLUENT_HOME/bin` directory to the PATH.
|
Then you need to add the `$KAFKA_HOME/bin` directory to the PATH.
|
||||||
|
|
||||||
```title=".profile"
|
```title=".profile"
|
||||||
export CONFLUENT_HOME=/opt/confluent-7.1.1
|
export KAFKA_HOME=/opt/kafka
|
||||||
export PATH=$CONFLUENT_HOME/bin:$PATH
|
export PATH=$PATH:$KAFKA_HOME/bin
|
||||||
```
|
```
|
||||||
|
|
||||||
Users can append the above script to the current user's profile file (~/.profile or ~/.bash_profile)
|
Users can append the above script to the current user's profile file (~/.profile or ~/.bash_profile)
|
||||||
|
|
||||||
After the installation is complete, you can enter `confluent version` for simple verification:
|
|
||||||
|
|
||||||
```
|
|
||||||
# confluent version
|
|
||||||
confluent - Confluent CLI
|
|
||||||
|
|
||||||
Version: v2.6.1
|
|
||||||
Git Ref: 6d920590
|
|
||||||
Build Date: 2022-02-18T06:14:21Z
|
|
||||||
Go Version: go1.17.6 (linux/amd64)
|
|
||||||
Development: false
|
|
||||||
```
|
|
||||||
|
|
||||||
## Install TDengine Connector plugin
|
## Install TDengine Connector plugin
|
||||||
|
|
||||||
### Install from source code
|
### Install from source code
|
||||||
|
|
||||||
```
|
```shell
|
||||||
git clone --branch 3.0 https://github.com/taosdata/kafka-connect-tdengine.git
|
git clone --branch 3.0 https://github.com/taosdata/kafka-connect-tdengine.git
|
||||||
cd kafka-connect-tdengine
|
cd kafka-connect-tdengine
|
||||||
mvn clean package
|
mvn clean package -Dmaven.test.skip=true
|
||||||
unzip -d $CONFLUENT_HOME/share/java/ target/components/packages/taosdata-kafka-connect-tdengine-*.zip
|
unzip -d $KAFKA_HOME/components/ target/components/packages/taosdata-kafka-connect-tdengine-*.zip
|
||||||
```
|
```
|
||||||
|
|
||||||
The above script first clones the project source code and then compiles and packages it with Maven. After the package is complete, the zip package of the plugin is generated in the `target/components/packages/` directory. Unzip this zip package to plugin path. We used `$CONFLUENT_HOME/share/java/` above because it's a build in plugin path.
|
The above script first clones the project source code and then compiles and packages it with Maven. After the package is complete, the zip package of the plugin is generated in the `target/components/packages/` directory. Unzip this zip package to plugin path. We used `$KAFKA_HOME/components/` above because it's a build in plugin path.
|
||||||
|
|
||||||
### Install with confluent-hub
|
### Add configuration file
|
||||||
|
|
||||||
[Confluent Hub](https://www.confluent.io/hub) provides a service to download Kafka Connect plugins. After TDengine Kafka Connector is published to Confluent Hub, it can be installed using the command tool `confluent-hub`.
|
add kafka-connect-tdengine plugin path to `plugin.path` in `$KAFKA_HOME/config/connect-distributed.properties`.
|
||||||
**TDengine Kafka Connector is currently not officially released and cannot be installed in this way**.
|
|
||||||
|
|
||||||
## Start Confluent
|
```properties
|
||||||
|
plugin.path=/usr/share/java,/opt/kafka/components
|
||||||
```
|
|
||||||
confluent local services start
|
|
||||||
```
|
```
|
||||||
|
|
||||||
:::note
|
## Start Kafka Services
|
||||||
Be sure to install the plugin before starting Confluent. Otherwise, Kafka Connect will fail to discover the plugins.
|
|
||||||
:::
|
|
||||||
|
|
||||||
:::tip
|
Use command bellow to start all services:
|
||||||
If a component fails to start, try clearing the data and restarting. The data directory will be printed to the console at startup, e.g.:
|
|
||||||
|
|
||||||
```title="Console output log" {1}
|
```shell
|
||||||
Using CONFLUENT_CURRENT: /tmp/confluent.106668
|
zookeeper-server-start.sh -daemon $KAFKA_HOME/config/zookeeper.properties
|
||||||
Starting ZooKeeper
|
|
||||||
ZooKeeper is [UP]
|
|
||||||
Starting Kafka
|
|
||||||
Kafka is [UP]
|
|
||||||
Starting Schema Registry
|
|
||||||
Schema Registry is [UP]
|
|
||||||
Starting Kafka REST
|
|
||||||
Kafka REST is [UP]
|
|
||||||
Starting Connect
|
|
||||||
Connect is [UP]
|
|
||||||
Starting ksqlDB Server
|
|
||||||
ksqlDB Server is [UP]
|
|
||||||
Starting Control Center
|
|
||||||
Control Center is [UP]
|
|
||||||
```
|
|
||||||
|
|
||||||
To clear data, execute `rm -rf /tmp/confluent.106668`.
|
kafka-server-start.sh -daemon $KAFKA_HOME/config/server.properties
|
||||||
:::
|
|
||||||
|
|
||||||
### Check Confluent Services Status
|
connect-distributed.sh -daemon $KAFKA_HOME/config/connect-distributed.properties
|
||||||
|
|
||||||
Use command bellow to check the status of all service:
|
|
||||||
|
|
||||||
```
|
|
||||||
confluent local services status
|
|
||||||
```
|
|
||||||
|
|
||||||
The expected output is:
|
|
||||||
```
|
|
||||||
Connect is [UP]
|
|
||||||
Control Center is [UP]
|
|
||||||
Kafka is [UP]
|
|
||||||
Kafka REST is [UP]
|
|
||||||
ksqlDB Server is [UP]
|
|
||||||
Schema Registry is [UP]
|
|
||||||
ZooKeeper is [UP]
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### Check Successfully Loaded Plugin
|
### Check Successfully Loaded Plugin
|
||||||
|
|
||||||
After Kafka Connect was completely started, you can use bellow command to check if our plugins are installed successfully:
|
After Kafka Connect was completely started, you can use bellow command to check if our plugins are installed successfully:
|
||||||
```
|
|
||||||
confluent local services connect plugin list
|
```shell
|
||||||
|
curl http://localhost:8083/connectors
|
||||||
```
|
```
|
||||||
|
|
||||||
The output should contains `TDengineSinkConnector` and `TDengineSourceConnector` as bellow:
|
The output as bellow:
|
||||||
|
|
||||||
|
```txt
|
||||||
|
[]
|
||||||
```
|
```
|
||||||
Available Connect Plugins:
|
|
||||||
[
|
|
||||||
{
|
|
||||||
"class": "com.taosdata.kafka.connect.sink.TDengineSinkConnector",
|
|
||||||
"type": "sink",
|
|
||||||
"version": "1.0.0"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"class": "com.taosdata.kafka.connect.source.TDengineSourceConnector",
|
|
||||||
"type": "source",
|
|
||||||
"version": "1.0.0"
|
|
||||||
},
|
|
||||||
......
|
|
||||||
```
|
|
||||||
|
|
||||||
If not, please check the log file of Kafka Connect. To view the log file path, please execute:
|
|
||||||
|
|
||||||
```
|
|
||||||
echo `cat /tmp/confluent.current`/connect/connect.stdout
|
|
||||||
```
|
|
||||||
It should produce a path like:`/tmp/confluent.104086/connect/connect.stdout`
|
|
||||||
|
|
||||||
Besides log file `connect.stdout` there is a file named `connect.properties`. At the end of this file you can see the effective `plugin.path` which is a series of paths joined by comma. If Kafka Connect not found our plugins, it's probably because the installed path is not included in `plugin.path`.
|
|
||||||
|
|
||||||
## The use of TDengine Sink Connector
|
## The use of TDengine Sink Connector
|
||||||
|
|
||||||
|
@ -184,40 +98,47 @@ TDengine Sink Connector internally uses TDengine [modeless write interface](/ref
|
||||||
|
|
||||||
The following example synchronizes the data of the topic meters to the target database power. The data format is the InfluxDB Line protocol format.
|
The following example synchronizes the data of the topic meters to the target database power. The data format is the InfluxDB Line protocol format.
|
||||||
|
|
||||||
### Add configuration file
|
### Add Sink Connector configuration file
|
||||||
|
|
||||||
```
|
```shell
|
||||||
mkdir ~/test
|
mkdir ~/test
|
||||||
cd ~/test
|
cd ~/test
|
||||||
vi sink-demo.properties
|
vi sink-demo.json
|
||||||
```
|
```
|
||||||
|
|
||||||
sink-demo.properties' content is following:
|
sink-demo.json' content is following:
|
||||||
|
|
||||||
```ini title="sink-demo.properties"
|
```json title="sink-demo.json"
|
||||||
name=TDengineSinkConnector
|
{
|
||||||
connector.class=com.taosdata.kafka.connect.sink.TDengineSinkConnector
|
"name": "TDengineSinkConnector",
|
||||||
tasks.max=1
|
"config": {
|
||||||
topics=meters
|
"connector.class":"com.taosdata.kafka.connect.sink.TDengineSinkConnector",
|
||||||
connection.url=jdbc:TAOS://127.0.0.1:6030
|
"tasks.max": "1",
|
||||||
connection.user=root
|
"topics": "meters",
|
||||||
connection.password=taosdata
|
"connection.url": "jdbc:TAOS://127.0.0.1:6030",
|
||||||
connection.database=power
|
"connection.user": "root",
|
||||||
db.schemaless=line
|
"connection.password": "taosdata",
|
||||||
data.precision=ns
|
"connection.database": "power",
|
||||||
key.converter=org.apache.kafka.connect.storage.StringConverter
|
"db.schemaless": "line",
|
||||||
value.converter=org.apache.kafka.connect.storage.StringConverter
|
"data.precision": "ns",
|
||||||
|
"key.converter": "org.apache.kafka.connect.storage.StringConverter",
|
||||||
|
"value.converter": "org.apache.kafka.connect.storage.StringConverter",
|
||||||
|
"errors.tolerance": "all",
|
||||||
|
"errors.deadletterqueue.topic.name": "dead_letter_topic",
|
||||||
|
"errors.deadletterqueue.topic.replication.factor": 1
|
||||||
|
}
|
||||||
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
Key configuration instructions:
|
Key configuration instructions:
|
||||||
|
|
||||||
1. `topics=meters` and `connection.database=power` means to subscribe to the data of the topic meters and write to the database power.
|
1. `"topics": "meters"` and `"connection.database": "power"` means to subscribe to the data of the topic meters and write to the database power.
|
||||||
2. `db.schemaless=line` means the data in the InfluxDB Line protocol format.
|
2. `"db.schemaless": "line"` means the data in the InfluxDB Line protocol format.
|
||||||
|
|
||||||
### Create Connector instance
|
### Create Sink Connector instance
|
||||||
|
|
||||||
````
|
````shell
|
||||||
confluent local services connect connector load TDengineSinkConnector --config ./sink-demo.properties
|
curl -X POST -d @sink-demo.json http://localhost:8083/connectors -H "Content-Type: application/json"
|
||||||
````
|
````
|
||||||
|
|
||||||
If the above command is executed successfully, the output is as follows:
|
If the above command is executed successfully, the output is as follows:
|
||||||
|
@ -237,7 +158,10 @@ If the above command is executed successfully, the output is as follows:
|
||||||
"tasks.max": "1",
|
"tasks.max": "1",
|
||||||
"topics": "meters",
|
"topics": "meters",
|
||||||
"value.converter": "org.apache.kafka.connect.storage.StringConverter",
|
"value.converter": "org.apache.kafka.connect.storage.StringConverter",
|
||||||
"name": "TDengineSinkConnector"
|
"name": "TDengineSinkConnector",
|
||||||
|
"errors.tolerance": "all",
|
||||||
|
"errors.deadletterqueue.topic.name": "dead_letter_topic",
|
||||||
|
"errors.deadletterqueue.topic.replication.factor": "1",
|
||||||
},
|
},
|
||||||
"tasks": [],
|
"tasks": [],
|
||||||
"type": "sink"
|
"type": "sink"
|
||||||
|
@ -257,8 +181,8 @@ meters,location=California.LoSangeles,groupid=3 current=11.3,voltage=221,phase=0
|
||||||
|
|
||||||
Use kafka-console-producer to write test data to the topic `meters`.
|
Use kafka-console-producer to write test data to the topic `meters`.
|
||||||
|
|
||||||
```
|
```shell
|
||||||
cat test-data.txt | kafka-console-producer --broker-list localhost:9092 --topic meters
|
cat test-data.txt | kafka-console-producer.sh --broker-list localhost:9092 --topic meters
|
||||||
```
|
```
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
|
@ -269,12 +193,12 @@ TDengine Sink Connector will automatically create the database if the target dat
|
||||||
|
|
||||||
Use the TDengine CLI to verify that the sync was successful.
|
Use the TDengine CLI to verify that the sync was successful.
|
||||||
|
|
||||||
```
|
```sql
|
||||||
taos> use power;
|
taos> use power;
|
||||||
Database changed.
|
Database changed.
|
||||||
|
|
||||||
taos> select * from meters;
|
taos> select * from meters;
|
||||||
ts | current | voltage | phase | groupid | location |
|
_ts | current | voltage | phase | groupid | location |
|
||||||
===============================================================================================================================================================
|
===============================================================================================================================================================
|
||||||
2022-03-28 09:56:51.249000000 | 11.800000000 | 221.000000000 | 0.280000000 | 2 | California.LosAngeles |
|
2022-03-28 09:56:51.249000000 | 11.800000000 | 221.000000000 | 0.280000000 | 2 | California.LosAngeles |
|
||||||
2022-03-28 09:56:51.250000000 | 13.400000000 | 223.000000000 | 0.290000000 | 2 | California.LosAngeles |
|
2022-03-28 09:56:51.250000000 | 13.400000000 | 223.000000000 | 0.290000000 | 2 | California.LosAngeles |
|
||||||
|
@ -291,31 +215,39 @@ The role of the TDengine Source Connector is to push all the data of a specific
|
||||||
|
|
||||||
TDengine Source Connector will convert the data in TDengine data table into [InfluxDB Line protocol format](/develop/insert-data/influxdb-line/) or [OpenTSDB JSON protocol format](/develop/insert-data/opentsdb-json ) and then write to Kafka.
|
TDengine Source Connector will convert the data in TDengine data table into [InfluxDB Line protocol format](/develop/insert-data/influxdb-line/) or [OpenTSDB JSON protocol format](/develop/insert-data/opentsdb-json ) and then write to Kafka.
|
||||||
|
|
||||||
The following sample program synchronizes the data in the database test to the topic tdengine-source-test.
|
The following sample program synchronizes the data in the database test to the topic tdengine-test-meters.
|
||||||
|
|
||||||
### Add configuration file
|
### Add Source Connector configuration file
|
||||||
|
|
||||||
```
|
```shell
|
||||||
vi source-demo.properties
|
vi source-demo.json
|
||||||
```
|
```
|
||||||
|
|
||||||
Input following content:
|
Input following content:
|
||||||
|
|
||||||
```ini title="source-demo.properties"
|
```json title="source-demo.json"
|
||||||
name=TDengineSourceConnector
|
{
|
||||||
connector.class=com.taosdata.kafka.connect.source.TDengineSourceConnector
|
"name":"TDengineSourceConnector",
|
||||||
tasks.max=1
|
"config":{
|
||||||
connection.url=jdbc:TAOS://127.0.0.1:6030
|
"connector.class": "com.taosdata.kafka.connect.source.TDengineSourceConnector",
|
||||||
connection.username=root
|
"tasks.max": 1,
|
||||||
connection.password=taosdata
|
"connection.url": "jdbc:TAOS://127.0.0.1:6030",
|
||||||
connection.database=test
|
"connection.username": "root",
|
||||||
connection.attempts=3
|
"connection.password": "taosdata",
|
||||||
connection.backoff.ms=5000
|
"connection.database": "test",
|
||||||
topic.prefix=tdengine-source-
|
"connection.attempts": 3,
|
||||||
poll.interval.ms=1000
|
"connection.backoff.ms": 5000,
|
||||||
fetch.max.rows=100
|
"topic.prefix": "tdengine",
|
||||||
key.converter=org.apache.kafka.connect.storage.StringConverter
|
"topic.delimiter": "-",
|
||||||
value.converter=org.apache.kafka.connect.storage.StringConverter
|
"poll.interval.ms": 1000,
|
||||||
|
"fetch.max.rows": 100,
|
||||||
|
"topic.per.stable": true,
|
||||||
|
"topic.ignore.db": false,
|
||||||
|
"out.format": "line",
|
||||||
|
"key.converter": "org.apache.kafka.connect.storage.StringConverter",
|
||||||
|
"value.converter": "org.apache.kafka.connect.storage.StringConverter"
|
||||||
|
}
|
||||||
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
### Prepare test data
|
### Prepare test data
|
||||||
|
@ -340,40 +272,40 @@ INSERT INTO d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES('2018-1
|
||||||
|
|
||||||
Use TDengine CLI to execute SQL script
|
Use TDengine CLI to execute SQL script
|
||||||
|
|
||||||
```
|
```shell
|
||||||
taos -f prepare-source-data.sql
|
taos -f prepare-source-data.sql
|
||||||
```
|
```
|
||||||
|
|
||||||
### Create Connector instance
|
### Create Connector instance
|
||||||
|
|
||||||
````
|
```shell
|
||||||
confluent local services connect connector load TDengineSourceConnector --config source-demo.properties
|
curl -X POST -d @source-demo.json http://localhost:8083/connectors -H "Content-Type: application/json"
|
||||||
````
|
```
|
||||||
|
|
||||||
### View topic data
|
### View topic data
|
||||||
|
|
||||||
Use the kafka-console-consumer command-line tool to monitor data in the topic tdengine-source-test. In the beginning, all historical data will be output. After inserting two new data into TDengine, kafka-console-consumer immediately outputs the two new data. The output is in InfluxDB line protocol format.
|
Use the kafka-console-consumer command-line tool to monitor data in the topic tdengine-test-meters. In the beginning, all historical data will be output. After inserting two new data into TDengine, kafka-console-consumer immediately outputs the two new data. The output is in InfluxDB line protocol format.
|
||||||
|
|
||||||
````
|
````shell
|
||||||
kafka-console-consumer --bootstrap-server localhost:9092 --from-beginning --topic tdengine-source-test
|
kafka-console-consumer.sh --bootstrap-server localhost:9092 --from-beginning --topic tdengine-test-meters
|
||||||
````
|
````
|
||||||
|
|
||||||
output:
|
output:
|
||||||
|
|
||||||
````
|
```txt
|
||||||
......
|
......
|
||||||
meters,location="California.SanFrancisco",groupid=2i32 current=10.3f32,voltage=219i32,phase=0.31f32 1538548685000000000
|
meters,location="California.SanFrancisco",groupid=2i32 current=10.3f32,voltage=219i32,phase=0.31f32 1538548685000000000
|
||||||
meters,location="California.SanFrancisco",groupid=2i32 current=12.6f32,voltage=218i32,phase=0.33f32 1538548695000000000
|
meters,location="California.SanFrancisco",groupid=2i32 current=12.6f32,voltage=218i32,phase=0.33f32 1538548695000000000
|
||||||
......
|
......
|
||||||
````
|
```
|
||||||
|
|
||||||
All historical data is displayed. Switch to the TDengine CLI and insert two new pieces of data:
|
All historical data is displayed. Switch to the TDengine CLI and insert two new pieces of data:
|
||||||
|
|
||||||
````
|
```sql
|
||||||
USE test;
|
USE test;
|
||||||
INSERT INTO d1001 VALUES (now, 13.3, 229, 0.38);
|
INSERT INTO d1001 VALUES (now, 13.3, 229, 0.38);
|
||||||
INSERT INTO d1002 VALUES (now, 16.3, 233, 0.22);
|
INSERT INTO d1002 VALUES (now, 16.3, 233, 0.22);
|
||||||
````
|
```
|
||||||
|
|
||||||
Switch back to kafka-console-consumer, and the command line window has printed out the two pieces of data just inserted.
|
Switch back to kafka-console-consumer, and the command line window has printed out the two pieces of data just inserted.
|
||||||
|
|
||||||
|
@ -383,16 +315,16 @@ After testing, use the unload command to stop the loaded connector.
|
||||||
|
|
||||||
View currently active connectors:
|
View currently active connectors:
|
||||||
|
|
||||||
````
|
```shell
|
||||||
confluent local services connect connector status
|
curl http://localhost:8083/connectors
|
||||||
````
|
```
|
||||||
|
|
||||||
You should now have two active connectors if you followed the previous steps. Use the following command to unload:
|
You should now have two active connectors if you followed the previous steps. Use the following command to unload:
|
||||||
|
|
||||||
````
|
```shell
|
||||||
confluent local services connect connector unload TDengineSinkConnector
|
curl -X DELETE http://localhost:8083/connectors/TDengineSinkConnector
|
||||||
confluent local services connect connector unload TDengineSourceConnector
|
curl -X DELETE http://localhost:8083/connectors/TDengineSourceConnector
|
||||||
````
|
```
|
||||||
|
|
||||||
## Configuration reference
|
## Configuration reference
|
||||||
|
|
||||||
|
@ -427,22 +359,20 @@ The following configuration items apply to TDengine Sink Connector and TDengine
|
||||||
3. `timestamp.initial`: Data synchronization start time. The format is 'yyyy-MM-dd HH:mm:ss'. If it is not set, the data importing to Kafka will be started from the first/oldest row in the database.
|
3. `timestamp.initial`: Data synchronization start time. The format is 'yyyy-MM-dd HH:mm:ss'. If it is not set, the data importing to Kafka will be started from the first/oldest row in the database.
|
||||||
4. `poll.interval.ms`: The time interval for checking newly created tables or removed tables, default value is 1000.
|
4. `poll.interval.ms`: The time interval for checking newly created tables or removed tables, default value is 1000.
|
||||||
5. `fetch.max.rows`: The maximum number of rows retrieved when retrieving the database, default is 100.
|
5. `fetch.max.rows`: The maximum number of rows retrieved when retrieving the database, default is 100.
|
||||||
6. `query.interval.ms`: The time range of reading data from TDengine each time, its unit is millisecond. It should be adjusted according to the data flow in rate, the default value is 1000.
|
6. `query.interval.ms`: The time range of reading data from TDengine each time, its unit is millisecond. It should be adjusted according to the data flow in rate, the default value is 0, this means to get all the data to the latest time.
|
||||||
7. `topic.per.stable`: If it's set to true, it means one super table in TDengine corresponds to a topic in Kafka, the topic naming rule is `<topic.prefix>-<connection.database>-<stable.name>`; if it's set to false, it means the whole DB corresponds to a topic in Kafka, the topic naming rule is `<topic.prefix>-<connection.database>`.
|
7. `out.format`: Result output format. `line` indicates that the output format is InfluxDB line protocol format, `json` indicates that the output format is json. The default is line.
|
||||||
|
8. `topic.per.stable`: If it's set to true, it means one super table in TDengine corresponds to a topic in Kafka, the topic naming rule is `<topic.prefix><topic.delimiter><connection.database><topic.delimiter><stable.name>`; if it's set to false, it means the whole DB corresponds to a topic in Kafka, the topic naming rule is `<topic.prefix><topic.delimiter><connection.database>`.
|
||||||
|
9. `topic.ignore.db`: Whether the topic naming rule contains the database name: true indicates that the rule is `<topic.prefix><topic.delimiter><stable.name>`, false indicates that the rule is `<topic.prefix><topic.delimiter><connection.database><topic.delimiter><stable.name>`, and the default is false. Does not take effect when `topic.per.stable` is set to false.
|
||||||
|
10. `topic.delimiter`: topic name delimiter,default is `-`。
|
||||||
|
|
||||||
## Other notes
|
## Other notes
|
||||||
|
|
||||||
1. To install plugin to a customized location, refer to https://docs.confluent.io/home/connect/self-managed/install.html#install-connector-manually.
|
1. To use Kafka Connect, refer to <https://kafka.apache.org/documentation/#connect>.
|
||||||
2. To use Kafka Connect without confluent, refer to https://kafka.apache.org/documentation/#connect.
|
|
||||||
|
|
||||||
## Feedback
|
## Feedback
|
||||||
|
|
||||||
https://github.com/taosdata/kafka-connect-tdengine/issues
|
<https://github.com/taosdata/kafka-connect-tdengine/issues>
|
||||||
|
|
||||||
## Reference
|
## Reference
|
||||||
|
|
||||||
1. https://www.confluent.io/what-is-apache-kafka
|
1. For more information, see <https://kafka.apache.org/documentation/>
|
||||||
2. https://developer.confluent.io/learn-kafka/kafka-connect/intro
|
|
||||||
3. https://docs.confluent.io/platform/current/platform.html
|
|
||||||
|
|
|
@ -10,6 +10,10 @@ For TDengine 2.x installation packages by version, please visit [here](https://w
|
||||||
|
|
||||||
import Release from "/components/ReleaseV3";
|
import Release from "/components/ReleaseV3";
|
||||||
|
|
||||||
|
## 3.0.5.0
|
||||||
|
|
||||||
|
<Release type="tdengine" version="3.0.5.0" />
|
||||||
|
|
||||||
## 3.0.4.2
|
## 3.0.4.2
|
||||||
|
|
||||||
<Release type="tdengine" version="3.0.4.2" />
|
<Release type="tdengine" version="3.0.4.2" />
|
||||||
|
|
|
@ -10,6 +10,10 @@ For other historical version installers, please visit [here](https://www.taosdat
|
||||||
|
|
||||||
import Release from "/components/ReleaseV3";
|
import Release from "/components/ReleaseV3";
|
||||||
|
|
||||||
|
## 2.5.1
|
||||||
|
|
||||||
|
<Release type="tools" version="2.5.1" />
|
||||||
|
|
||||||
## 2.5.0
|
## 2.5.0
|
||||||
|
|
||||||
<Release type="tools" version="2.5.0" />
|
<Release type="tools" version="2.5.0" />
|
||||||
|
|
|
@ -0,0 +1,58 @@
|
||||||
|
import taos
|
||||||
|
from taos.tmq import Consumer
|
||||||
|
import taosws
|
||||||
|
|
||||||
|
|
||||||
|
def prepare():
|
||||||
|
conn = taos.connect()
|
||||||
|
conn.execute("drop topic if exists tmq_assignment_demo_topic")
|
||||||
|
conn.execute("drop database if exists tmq_assignment_demo_db")
|
||||||
|
conn.execute("create database if not exists tmq_assignment_demo_db wal_retention_period 3600")
|
||||||
|
conn.select_db("tmq_assignment_demo_db")
|
||||||
|
conn.execute(
|
||||||
|
"create table if not exists tmq_assignment_demo_table (ts timestamp, c1 int, c2 float, c3 binary(10)) tags(t1 int)")
|
||||||
|
conn.execute(
|
||||||
|
"create topic if not exists tmq_assignment_demo_topic as select ts, c1, c2, c3 from tmq_assignment_demo_table")
|
||||||
|
conn.execute("insert into d0 using tmq_assignment_demo_table tags (0) values (now-2s, 1, 1.0, 'tmq test')")
|
||||||
|
conn.execute("insert into d0 using tmq_assignment_demo_table tags (0) values (now-1s, 2, 2.0, 'tmq test')")
|
||||||
|
conn.execute("insert into d0 using tmq_assignment_demo_table tags (0) values (now, 3, 3.0, 'tmq test')")
|
||||||
|
|
||||||
|
|
||||||
|
def taos_get_assignment_and_seek_demo():
|
||||||
|
prepare()
|
||||||
|
consumer = Consumer(
|
||||||
|
{
|
||||||
|
"group.id": "0",
|
||||||
|
# should disable snapshot,
|
||||||
|
# otherwise it will cause invalid params error
|
||||||
|
"experimental.snapshot.enable": "false",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
consumer.subscribe(["tmq_assignment_demo_topic"])
|
||||||
|
|
||||||
|
# get topic assignment
|
||||||
|
assignments = consumer.assignment()
|
||||||
|
for assignment in assignments:
|
||||||
|
print(assignment)
|
||||||
|
|
||||||
|
# poll
|
||||||
|
consumer.poll(1)
|
||||||
|
consumer.poll(1)
|
||||||
|
|
||||||
|
# get topic assignment again
|
||||||
|
after_pool_assignments = consumer.assignment()
|
||||||
|
for assignment in after_pool_assignments:
|
||||||
|
print(assignment)
|
||||||
|
|
||||||
|
# seek to the beginning
|
||||||
|
for assignment in assignments:
|
||||||
|
consumer.seek(assignment)
|
||||||
|
|
||||||
|
# now the assignment should be the same as before poll
|
||||||
|
assignments = consumer.assignment()
|
||||||
|
for assignment in assignments:
|
||||||
|
print(assignment)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
taosws_get_assignment_and_seek_demo()
|
|
@ -0,0 +1,57 @@
|
||||||
|
import taos
|
||||||
|
import taosws
|
||||||
|
|
||||||
|
|
||||||
|
def prepare():
|
||||||
|
conn = taos.connect()
|
||||||
|
conn.execute("drop topic if exists tmq_assignment_demo_topic")
|
||||||
|
conn.execute("drop database if exists tmq_assignment_demo_db")
|
||||||
|
conn.execute("create database if not exists tmq_assignment_demo_db wal_retention_period 3600")
|
||||||
|
conn.select_db("tmq_assignment_demo_db")
|
||||||
|
conn.execute(
|
||||||
|
"create table if not exists tmq_assignment_demo_table (ts timestamp, c1 int, c2 float, c3 binary(10)) tags(t1 int)")
|
||||||
|
conn.execute(
|
||||||
|
"create topic if not exists tmq_assignment_demo_topic as select ts, c1, c2, c3 from tmq_assignment_demo_table")
|
||||||
|
conn.execute("insert into d0 using tmq_assignment_demo_table tags (0) values (now-2s, 1, 1.0, 'tmq test')")
|
||||||
|
conn.execute("insert into d0 using tmq_assignment_demo_table tags (0) values (now-1s, 2, 2.0, 'tmq test')")
|
||||||
|
conn.execute("insert into d0 using tmq_assignment_demo_table tags (0) values (now, 3, 3.0, 'tmq test')")
|
||||||
|
|
||||||
|
|
||||||
|
def taosws_get_assignment_and_seek_demo():
|
||||||
|
prepare()
|
||||||
|
consumer = taosws.Consumer(conf={
|
||||||
|
"td.connect.websocket.scheme": "ws",
|
||||||
|
# should disable snapshot,
|
||||||
|
# otherwise it will cause invalid params error
|
||||||
|
"experimental.snapshot.enable": "false",
|
||||||
|
"group.id": "0",
|
||||||
|
})
|
||||||
|
consumer.subscribe(["tmq_assignment_demo_topic"])
|
||||||
|
|
||||||
|
# get topic assignment
|
||||||
|
assignments = consumer.assignment()
|
||||||
|
for assignment in assignments:
|
||||||
|
print(assignment.to_string())
|
||||||
|
|
||||||
|
# poll
|
||||||
|
consumer.poll(1)
|
||||||
|
consumer.poll(1)
|
||||||
|
|
||||||
|
# get topic assignment again
|
||||||
|
after_poll_assignments = consumer.assignment()
|
||||||
|
for assignment in after_poll_assignments:
|
||||||
|
print(assignment.to_string())
|
||||||
|
|
||||||
|
# seek to the beginning
|
||||||
|
for assignment in assignments:
|
||||||
|
for a in assignment.assignments():
|
||||||
|
consumer.seek(assignment.topic(), a.vg_id(), a.offset())
|
||||||
|
|
||||||
|
# now the assignment should be the same as before poll
|
||||||
|
assignments = consumer.assignment()
|
||||||
|
for assignment in assignments:
|
||||||
|
print(assignment.to_string())
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
taosws_get_assignment_and_seek_demo()
|
|
@ -105,6 +105,12 @@ class Consumer:
|
||||||
def poll(self, timeout: float = 1.0):
|
def poll(self, timeout: float = 1.0):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
def assignment(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def seek(self, partition):
|
||||||
|
pass
|
||||||
|
|
||||||
def close(self):
|
def close(self):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
|
@ -32,25 +32,22 @@ TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致
|
||||||
原生连接支持的平台和 TDengine 客户端驱动支持的平台一致。
|
原生连接支持的平台和 TDengine 客户端驱动支持的平台一致。
|
||||||
REST 连接支持所有能运行 Java 的平台。
|
REST 连接支持所有能运行 Java 的平台。
|
||||||
|
|
||||||
## 版本支持
|
## 版本历史
|
||||||
|
|
||||||
请参考[版本支持列表](../#版本支持)
|
| taos-jdbcdriver 版本 | 主要变化 | TDengine 版本 |
|
||||||
|
| :------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------: |
|
||||||
## 最近更新记录
|
| 3.2.2 | 新增功能:数据订阅支持 seek 功能。 | 3.0.5.0 及更高版本 |
|
||||||
|
| 3.2.1 | 新增功能:WebSocket 连接支持 schemaless 与 prepareStatement 写入。变更:consumer poll 返回结果集为 ConsumerRecord,可通过 value() 获取指定结果集数据。 | 3.0.3.0 及更高版本 |
|
||||||
| taos-jdbcdriver 版本 | 主要变化 |
|
| 3.2.0 | 存在连接问题,不推荐使用 | - |
|
||||||
| :------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------: |
|
| 3.1.0 | WebSocket 连接支持订阅功能 | - |
|
||||||
| 3.2.1 | 新增功能:WebSocket 连接支持 schemaless 与 prepareStatement 写入。变更:consumer poll 返回结果集为 ConsumerRecord,可通过 value() 获取指定结果集数据。 |
|
| 3.0.1 - 3.0.4 | 修复一些情况下结果集数据解析错误的问题。3.0.1 在 JDK 11 环境编译,JDK 8 环境下建议使用其他版本 | - |
|
||||||
| 3.2.0 | 存在连接问题,不推荐使用 |
|
| 3.0.0 | 支持 TDengine 3.0 | 3.0.0.0 及更高版本 |
|
||||||
| 3.1.0 | WebSocket 连接支持订阅功能 |
|
| 2.0.42 | 修在 WebSocket 连接中 wasNull 接口返回值 | - |
|
||||||
| 3.0.1 - 3.0.4 | 修复一些情况下结果集数据解析错误的问题。3.0.1 在 JDK 11 环境编译,JDK 8 环境下建议使用其他版本 |
|
| 2.0.41 | 修正 REST 连接中用户名和密码转码方式 | - |
|
||||||
| 3.0.0 | 支持 TDengine 3.0 |
|
| 2.0.39 - 2.0.40 | 增加 REST 连接/请求 超时设置 | - |
|
||||||
| 2.0.42 | 修在 WebSocket 连接中 wasNull 接口返回值 |
|
| 2.0.38 | JDBC REST 连接增加批量拉取功能 | - |
|
||||||
| 2.0.41 | 修正 REST 连接中用户名和密码转码方式 |
|
| 2.0.37 | 增加对 json tag 支持 | - |
|
||||||
| 2.0.39 - 2.0.40 | 增加 REST 连接/请求 超时设置 |
|
| 2.0.36 | 增加对 schemaless 写入支持 | - |
|
||||||
| 2.0.38 | JDBC REST 连接增加批量拉取功能 |
|
|
||||||
| 2.0.37 | 增加对 json tag 支持 |
|
|
||||||
| 2.0.36 | 增加对 schemaless 写入支持 |
|
|
||||||
|
|
||||||
**注**:REST 连接中增加 `batchfetch` 参数并设置为 true,将开启 WebSocket 连接。
|
**注**:REST 连接中增加 `batchfetch` 参数并设置为 true,将开启 WebSocket 连接。
|
||||||
|
|
||||||
|
@ -80,45 +77,47 @@ JDBC 连接器可能报错的错误码包括 4 种:
|
||||||
|
|
||||||
具体的错误码请参考:
|
具体的错误码请参考:
|
||||||
|
|
||||||
| Error Code | Description | Suggested Actions |
|
| Error Code | Description | Suggested Actions |
|
||||||
| ---------- | --------------------------------------------------------------- | --------------------------------------------------------------------------------------- |
|
| ---------- | --------------------------------------------------------------- | ----------------------------------------------------------------------------------------- |
|
||||||
| 0x2301 | connection already closed | 连接已经关闭,检查连接情况,或重新创建连接去执行相关指令。 |
|
| 0x2301 | connection already closed | 连接已经关闭,检查连接情况,或重新创建连接去执行相关指令。 |
|
||||||
| 0x2302 | this operation is NOT supported currently! | 当前使用接口不支持,可以更换其他连接方式。 |
|
| 0x2302 | this operation is NOT supported currently! | 当前使用接口不支持,可以更换其他连接方式。 |
|
||||||
| 0x2303 | invalid variables | 参数不合法,请检查相应接口规范,调整参数类型及大小。 |
|
| 0x2303 | invalid variables | 参数不合法,请检查相应接口规范,调整参数类型及大小。 |
|
||||||
| 0x2304 | statement is closed | statement 已经关闭,请检查 statement 是否关闭后再次使用,或是连接是否正常。 |
|
| 0x2304 | statement is closed | statement 已经关闭,请检查 statement 是否关闭后再次使用,或是连接是否正常。 |
|
||||||
| 0x2305 | resultSet is closed | resultSet 结果集已经释放,请检查 resultSet 是否释放后再次使用。 |
|
| 0x2305 | resultSet is closed | resultSet 结果集已经释放,请检查 resultSet 是否释放后再次使用。 |
|
||||||
| 0x2306 | Batch is empty! | prepareStatement 添加参数后再执行 executeBatch。 |
|
| 0x2306 | Batch is empty! | prepareStatement 添加参数后再执行 executeBatch。 |
|
||||||
| 0x2307 | Can not issue data manipulation statements with executeQuery() | 更新操作应该使用 executeUpdate(),而不是 executeQuery()。 |
|
| 0x2307 | Can not issue data manipulation statements with executeQuery() | 更新操作应该使用 executeUpdate(),而不是 executeQuery()。 |
|
||||||
| 0x2308 | Can not issue SELECT via executeUpdate() | 查询操作应该使用 executeQuery(),而不是 executeUpdate()。 |
|
| 0x2308 | Can not issue SELECT via executeUpdate() | 查询操作应该使用 executeQuery(),而不是 executeUpdate()。 |
|
||||||
| 0x230d | parameter index out of range | 参数越界,请检查参数的合理范围。 |
|
| 0x230d | parameter index out of range | 参数越界,请检查参数的合理范围。 |
|
||||||
| 0x230e | connection already closed | 连接已经关闭,请检查 Connection 是否关闭后再次使用,或是连接是否正常。 |
|
| 0x230e | connection already closed | 连接已经关闭,请检查 Connection 是否关闭后再次使用,或是连接是否正常。 |
|
||||||
| 0x230f | unknown sql type in tdengine | 请检查 TDengine 支持的 Data Type 类型。 |
|
| 0x230f | unknown sql type in tdengine | 请检查 TDengine 支持的 Data Type 类型。 |
|
||||||
| 0x2310 | can't register JDBC-JNI driver | 不能注册 JNI 驱动,请检查 url 是否填写正确。 |
|
| 0x2310 | can't register JDBC-JNI driver | 不能注册 JNI 驱动,请检查 url 是否填写正确。 |
|
||||||
| 0x2312 | url is not set | 请检查 REST 连接 url 是否填写正确。 |
|
| 0x2312 | url is not set | 请检查 REST 连接 url 是否填写正确。 |
|
||||||
| 0x2314 | numeric value out of range | 请检查获取结果集中数值类型是否使用了正确的接口。 |
|
| 0x2314 | numeric value out of range | 请检查获取结果集中数值类型是否使用了正确的接口。 |
|
||||||
| 0x2315 | unknown taos type in tdengine | 在 TDengine 数据类型与 JDBC 数据类型转换时,是否指定了正确的 TDengine 数据类型。 |
|
| 0x2315 | unknown taos type in tdengine | 在 TDengine 数据类型与 JDBC 数据类型转换时,是否指定了正确的 TDengine 数据类型。 |
|
||||||
| 0x2317 | | REST 连接中使用了错误的请求类型。 |
|
| 0x2317 | | REST 连接中使用了错误的请求类型。 |
|
||||||
| 0x2318 | | REST 连接中出现了数据传输异常,请检查网络情况并重试。 |
|
| 0x2318 | | REST 连接中出现了数据传输异常,请检查网络情况并重试。 |
|
||||||
| 0x2319 | user is required | 创建连接时缺少用户名信息 |
|
| 0x2319 | user is required | 创建连接时缺少用户名信息 |
|
||||||
| 0x231a | password is required | 创建连接时缺少密码信息 |
|
| 0x231a | password is required | 创建连接时缺少密码信息 |
|
||||||
| 0x231c | httpEntity is null, sql: | REST 连接中执行出现异常 |
|
| 0x231c | httpEntity is null, sql: | REST 连接中执行出现异常 |
|
||||||
| 0x2350 | unknown error | 未知异常,请在 github 反馈给开发人员。 |
|
| 0x231d | can't create connection with server within | 通过增加参数 httpConnectTimeout 增加连接耗时,或是请检查与 taosAdapter 之间的连接情况。 |
|
||||||
| 0x2352 | Unsupported encoding | 本地连接下指定了不支持的字符编码集 |
|
| 0x231e | failed to complete the task within the specified time | 通过增加参数 messageWaitTimeout 增加执行耗时,或是请检查与 taosAdapter 之间的连接情况。 |
|
||||||
| 0x2353 | internal error of database, please see taoslog for more details | 本地连接执行 prepareStatement 时出现错误,请检查 taos log 进行问题定位。 |
|
| 0x2350 | unknown error | 未知异常,请在 github 反馈给开发人员。 |
|
||||||
| 0x2354 | JNI connection is NULL | 本地连接执行命令时,Connection 已经关闭。请检查与 TDengine 的连接情况。 |
|
| 0x2352 | Unsupported encoding | 本地连接下指定了不支持的字符编码集 |
|
||||||
| 0x2355 | JNI result set is NULL | 本地连接获取结果集,结果集异常,请检查连接情况,并重试。 |
|
| 0x2353 | internal error of database, please see taoslog for more details | 本地连接执行 prepareStatement 时出现错误,请检查 taos log 进行问题定位。 |
|
||||||
| 0x2356 | invalid num of fields | 本地连接获取结果集的 meta 信息不匹配。 |
|
| 0x2354 | JNI connection is NULL | 本地连接执行命令时,Connection 已经关闭。请检查与 TDengine 的连接情况。 |
|
||||||
| 0x2357 | empty sql string | 填写正确的 SQL 进行执行。 |
|
| 0x2355 | JNI result set is NULL | 本地连接获取结果集,结果集异常,请检查连接情况,并重试。 |
|
||||||
| 0x2359 | JNI alloc memory failed, please see taoslog for more details | 本地连接分配内存错误,请检查 taos log 进行问题定位。 |
|
| 0x2356 | invalid num of fields | 本地连接获取结果集的 meta 信息不匹配。 |
|
||||||
| 0x2371 | consumer properties must not be null! | 创建订阅时参数为空,请填写正确的参数。 |
|
| 0x2357 | empty sql string | 填写正确的 SQL 进行执行。 |
|
||||||
| 0x2372 | configs contain empty key, failed to set consumer property | 参数 key 中包含空值,请填写正确的参数。 |
|
| 0x2359 | JNI alloc memory failed, please see taoslog for more details | 本地连接分配内存错误,请检查 taos log 进行问题定位。 |
|
||||||
| 0x2373 | failed to set consumer property, | 参数 value 中包含空值,请填写正确的参数。 |
|
| 0x2371 | consumer properties must not be null! | 创建订阅时参数为空,请填写正确的参数。 |
|
||||||
| 0x2375 | topic reference has been destroyed | 创建数据订阅过程中,topic 引用被释放。请检查与 TDengine 的连接情况。 |
|
| 0x2372 | configs contain empty key, failed to set consumer property | 参数 key 中包含空值,请填写正确的参数。 |
|
||||||
| 0x2376 | failed to set consumer topic, topic name is empty | 创建数据订阅过程中,订阅 topic 名称为空。请检查指定的 topic 名称是否填写正确。 |
|
| 0x2373 | failed to set consumer property, | 参数 value 中包含空值,请填写正确的参数。 |
|
||||||
| 0x2377 | consumer reference has been destroyed | 订阅数据传输通道已经关闭,请检查与 TDengine 的连接情况。 |
|
| 0x2375 | topic reference has been destroyed | 创建数据订阅过程中,topic 引用被释放。请检查与 TDengine 的连接情况。 |
|
||||||
| 0x2378 | consumer create error | 创建数据订阅失败,请根据错误信息检查 taos log 进行问题定位。 |
|
| 0x2376 | failed to set consumer topic, topic name is empty | 创建数据订阅过程中,订阅 topic 名称为空。请检查指定的 topic 名称是否填写正确。 |
|
||||||
| - | can't create connection with server within | 通过增加参数 httpConnectTimeout 增加连接耗时,或是请检查与 taosAdapter 之间的连接情况。 |
|
| 0x2377 | consumer reference has been destroyed | 订阅数据传输通道已经关闭,请检查与 TDengine 的连接情况。 |
|
||||||
| - | failed to complete the task within the specified time | 通过增加参数 messageWaitTimeout 增加执行耗时,或是请检查与 taosAdapter 之间的连接情况。 |
|
| 0x2378 | consumer create error | 创建数据订阅失败,请根据错误信息检查 taos log 进行问题定位。 |
|
||||||
|
| 0x2379 | seek offset must not be a negative number | seek 接口参数不能为负值,请使用正确的参数 |
|
||||||
|
| 0x237a | vGroup not found in result set | VGroup 没有分配给当前 consumer,由于 Rebalance 机制导致 Consumer 与 VGroup 不是绑定的关系 |
|
||||||
|
|
||||||
- [TDengine Java Connector](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java)
|
- [TDengine Java Connector](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java)
|
||||||
<!-- - [TDengine_ERROR_CODE](../error-code) -->
|
<!-- - [TDengine_ERROR_CODE](../error-code) -->
|
||||||
|
@ -169,7 +168,7 @@ Maven 项目中,在 pom.xml 中添加以下依赖:
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>com.taosdata.jdbc</groupId>
|
<groupId>com.taosdata.jdbc</groupId>
|
||||||
<artifactId>taos-jdbcdriver</artifactId>
|
<artifactId>taos-jdbcdriver</artifactId>
|
||||||
<version>3.2.1</version>
|
<version>3.2.2</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -916,14 +915,15 @@ public class SchemalessWsTest {
|
||||||
|
|
||||||
public static void main(String[] args) throws SQLException {
|
public static void main(String[] args) throws SQLException {
|
||||||
final String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata&batchfetch=true";
|
final String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata&batchfetch=true";
|
||||||
Connection connection = DriverManager.getConnection(url);
|
try(Connection connection = DriverManager.getConnection(url)){
|
||||||
init(connection);
|
init(connection);
|
||||||
|
|
||||||
SchemalessWriter writer = new SchemalessWriter(connection, "test_ws_schemaless");
|
try(SchemalessWriter writer = new SchemalessWriter(connection, "test_ws_schemaless")){
|
||||||
writer.write(lineDemo, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO_SECONDS);
|
writer.write(lineDemo, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO_SECONDS);
|
||||||
writer.write(telnetDemo, SchemalessProtocolType.TELNET, SchemalessTimestampType.MILLI_SECONDS);
|
writer.write(telnetDemo, SchemalessProtocolType.TELNET, SchemalessTimestampType.MILLI_SECONDS);
|
||||||
writer.write(jsonDemo, SchemalessProtocolType.JSON, SchemalessTimestampType.SECONDS);
|
writer.write(jsonDemo, SchemalessProtocolType.JSON, SchemalessTimestampType.SECONDS);
|
||||||
System.exit(0);
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private static void init(Connection connection) throws SQLException {
|
private static void init(Connection connection) throws SQLException {
|
||||||
|
@ -994,6 +994,17 @@ while(true) {
|
||||||
|
|
||||||
`poll` 每次调用获取一个消息。
|
`poll` 每次调用获取一个消息。
|
||||||
|
|
||||||
|
#### 指定订阅 Offset
|
||||||
|
|
||||||
|
```
|
||||||
|
long position(TopicPartition partition) throws SQLException;
|
||||||
|
Map<TopicPartition, Long> position(String topic) throws SQLException;
|
||||||
|
Map<TopicPartition, Long> beginningOffsets(String topic) throws SQLException;
|
||||||
|
Map<TopicPartition, Long> endOffsets(String topic) throws SQLException;
|
||||||
|
|
||||||
|
void seek(TopicPartition partition, long offset) throws SQLException;
|
||||||
|
```
|
||||||
|
|
||||||
#### 关闭订阅
|
#### 关闭订阅
|
||||||
|
|
||||||
```java
|
```java
|
||||||
|
|
|
@ -30,7 +30,7 @@ REST 连接支持所有能运行 Go 的平台。
|
||||||
|
|
||||||
## 版本支持
|
## 版本支持
|
||||||
|
|
||||||
请参考[版本支持列表](../#版本支持)
|
请参考[版本支持列表](https://github.com/taosdata/driver-go#remind)
|
||||||
|
|
||||||
## 支持的功能特性
|
## 支持的功能特性
|
||||||
|
|
||||||
|
@ -383,6 +383,15 @@ func main() {
|
||||||
|
|
||||||
提交消息。
|
提交消息。
|
||||||
|
|
||||||
|
* `func (c *Consumer) Assignment() (partitions []tmq.TopicPartition, err error)`
|
||||||
|
|
||||||
|
获取消费进度。(需要 TDengine >= 3.0.5.0, driver-go >= v3.5.0)
|
||||||
|
|
||||||
|
* `func (c *Consumer) Seek(partition tmq.TopicPartition, ignoredTimeoutMs int) error`
|
||||||
|
注意:出于兼容目的保留 `ignoredTimeoutMs` 参数,当前未使用
|
||||||
|
|
||||||
|
按照指定的进度消费。(需要 TDengine >= 3.0.5.0, driver-go >= v3.5.0)
|
||||||
|
|
||||||
* `func (c *Consumer) Close() error`
|
* `func (c *Consumer) Close() error`
|
||||||
|
|
||||||
关闭连接。
|
关闭连接。
|
||||||
|
@ -468,11 +477,20 @@ func main() {
|
||||||
|
|
||||||
提交消息。
|
提交消息。
|
||||||
|
|
||||||
|
* `func (c *Consumer) Assignment() (partitions []tmq.TopicPartition, err error)`
|
||||||
|
|
||||||
|
获取消费进度。(需要 TDengine >= 3.0.5.0, driver-go >= v3.5.0)
|
||||||
|
|
||||||
|
* `func (c *Consumer) Seek(partition tmq.TopicPartition, ignoredTimeoutMs int) error`
|
||||||
|
注意:出于兼容目的保留 `ignoredTimeoutMs` 参数,当前未使用
|
||||||
|
|
||||||
|
按照指定的进度消费。(需要 TDengine >= 3.0.5.0, driver-go >= v3.5.0)
|
||||||
|
|
||||||
* `func (c *Consumer) Close() error`
|
* `func (c *Consumer) Close() error`
|
||||||
|
|
||||||
关闭连接。
|
关闭连接。
|
||||||
|
|
||||||
完整订阅示例参见 [GitHub 示例文件](https://github.com/taosdata/driver-go/blob/3.0/examples/tmqoverws/main.go)
|
完整订阅示例参见 [GitHub 示例文件](https://github.com/taosdata/driver-go/blob/main/examples/tmqoverws/main.go)
|
||||||
|
|
||||||
### 通过 WebSocket 进行参数绑定
|
### 通过 WebSocket 进行参数绑定
|
||||||
|
|
||||||
|
@ -520,7 +538,7 @@ func main() {
|
||||||
|
|
||||||
结束参数绑定。
|
结束参数绑定。
|
||||||
|
|
||||||
完整参数绑定示例参见 [GitHub 示例文件](https://github.com/taosdata/driver-go/blob/3.0/examples/stmtoverws/main.go)
|
完整参数绑定示例参见 [GitHub 示例文件](https://github.com/taosdata/driver-go/blob/main/examples/stmtoverws/main.go)
|
||||||
|
|
||||||
## API 参考
|
## API 参考
|
||||||
|
|
||||||
|
|
|
@ -26,9 +26,14 @@ import RustQuery from "../07-develop/04-query-data/_rust.mdx"
|
||||||
原生连接支持的平台和 TDengine 客户端驱动支持的平台一致。
|
原生连接支持的平台和 TDengine 客户端驱动支持的平台一致。
|
||||||
Websocket 连接支持所有能运行 Rust 的平台。
|
Websocket 连接支持所有能运行 Rust 的平台。
|
||||||
|
|
||||||
## 版本支持
|
## 版本历史
|
||||||
|
|
||||||
请参考[版本支持列表](../#版本支持)
|
| Rust 连接器版本 | TDengine 版本 | 主要功能 |
|
||||||
|
| :----------------: | :--------------: | :--------------------------------------------------: |
|
||||||
|
| v0.8.10 | 3.0.5.0 or later | 消息订阅:获取消费进度及按照指定进度开始消费。 |
|
||||||
|
| v0.8.0 | 3.0.4.0 | 支持无模式写入。 |
|
||||||
|
| v0.7.6 | 3.0.3.0 | 支持在请求中使用 req_id。 |
|
||||||
|
| v0.6.0 | 3.0.0.0 | 基础功能。 |
|
||||||
|
|
||||||
Rust 连接器仍然在快速开发中,1.0 之前无法保证其向后兼容。建议使用 3.0 版本以上的 TDengine,以避免已知问题。
|
Rust 连接器仍然在快速开发中,1.0 之前无法保证其向后兼容。建议使用 3.0 版本以上的 TDengine,以避免已知问题。
|
||||||
|
|
||||||
|
@ -502,6 +507,22 @@ TMQ 消息队列是一个 [futures::Stream](https://docs.rs/futures/latest/futur
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
获取消费进度:
|
||||||
|
|
||||||
|
版本要求 connector-rust >= v0.8.8, TDengine >= 3.0.5.0
|
||||||
|
|
||||||
|
```rust
|
||||||
|
let assignments = consumer.assignments().await.unwrap();
|
||||||
|
```
|
||||||
|
|
||||||
|
按照指定的进度消费:
|
||||||
|
|
||||||
|
版本要求 connector-rust >= v0.8.8, TDengine >= 3.0.5.0
|
||||||
|
|
||||||
|
```rust
|
||||||
|
consumer.offset_seek(topic, vgroup_id, offset).await;
|
||||||
|
```
|
||||||
|
|
||||||
停止订阅:
|
停止订阅:
|
||||||
|
|
||||||
```rust
|
```rust
|
||||||
|
@ -516,7 +537,7 @@ consumer.unsubscribe().await;
|
||||||
- `enable.auto.commit`: 当设置为 `true` 时,将启用自动标记模式,当对数据一致性不敏感时,可以启用此方式。
|
- `enable.auto.commit`: 当设置为 `true` 时,将启用自动标记模式,当对数据一致性不敏感时,可以启用此方式。
|
||||||
- `auto.commit.interval.ms`: 自动标记的时间间隔。
|
- `auto.commit.interval.ms`: 自动标记的时间间隔。
|
||||||
|
|
||||||
完整订阅示例参见 [GitHub 示例文件](https://github.com/taosdata/taos-connector-rust/blob/main/examples/subscribe.rs).
|
完整订阅示例参见 [GitHub 示例文件](https://github.com/taosdata/TDengine/blob/3.0/docs/examples/rust/nativeexample/examples/subscribe_demo.rs).
|
||||||
|
|
||||||
其他相关结构体 API 使用说明请移步 Rust 文档托管网页:<https://docs.rs/taos>。
|
其他相关结构体 API 使用说明请移步 Rust 文档托管网页:<https://docs.rs/taos>。
|
||||||
|
|
||||||
|
|
|
@ -456,27 +456,169 @@ TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线
|
||||||
|
|
||||||
### 数据订阅
|
### 数据订阅
|
||||||
|
|
||||||
连接器支持数据订阅功能,数据订阅功能请参考 [数据订阅](../../develop/tmq/)。
|
连接器支持数据订阅功能,数据订阅功能请参考 [数据订阅文档](../../develop/tmq/)。
|
||||||
|
|
||||||
<Tabs defaultValue="native">
|
<Tabs defaultValue="native">
|
||||||
<TabItem value="native" label="原生连接">
|
<TabItem value="native" label="原生连接">
|
||||||
|
|
||||||
`Consumer` 提供了 Python 连接器订阅 TMQ 数据的 API,相关 API 定义请参考 [数据订阅文档](../../develop/tmq/#%E4%B8%BB%E8%A6%81%E6%95%B0%E6%8D%AE%E7%BB%93%E6%9E%84%E5%92%8C-api)。
|
`Consumer` 提供了 Python 连接器订阅 TMQ 数据的 API。
|
||||||
|
|
||||||
|
#### 创建 Consumer
|
||||||
|
|
||||||
|
创建 Consumer 语法为 `consumer = Consumer(configs)`,参数定义请参考 [数据订阅文档](../../develop/tmq/#%E5%88%9B%E5%BB%BA%E6%B6%88%E8%B4%B9%E8%80%85-consumer)。
|
||||||
|
|
||||||
|
```python
|
||||||
|
from taos.tmq import Consumer
|
||||||
|
|
||||||
|
consumer = Consumer({"group.id": "local", "td.connect.ip": "127.0.0.1"})
|
||||||
|
```
|
||||||
|
|
||||||
|
#### 订阅 topics
|
||||||
|
|
||||||
|
Comsumer API 的 `subscribe` 方法用于订阅 topics,consumer 支持同时订阅多个 topic。
|
||||||
|
|
||||||
|
```python
|
||||||
|
consumer.subscribe(['topic1', 'topic2'])
|
||||||
|
```
|
||||||
|
|
||||||
|
#### 消费数据
|
||||||
|
|
||||||
|
Consumer API 的 `poll` 方法用于消费数据,`poll` 方法接收一个 float 类型的超时时间,超时时间单位为秒(s),`poll` 方法在超时之前返回一条 Message 类型的数据或超时返回 `None`。消费者必须通过 Message 的 `error()` 方法校验返回数据的 error 信息。
|
||||||
|
|
||||||
|
```python
|
||||||
|
while True:
|
||||||
|
res = consumer.poll(1)
|
||||||
|
if not res:
|
||||||
|
continue
|
||||||
|
err = res.error()
|
||||||
|
if err is not None:
|
||||||
|
raise err
|
||||||
|
val = res.value()
|
||||||
|
|
||||||
|
for block in val:
|
||||||
|
print(block.fetchall())
|
||||||
|
```
|
||||||
|
|
||||||
|
#### 获取消费进度
|
||||||
|
|
||||||
|
Consumer API 的 `assignment` 方法用于获取 Consumer 订阅的所有 topic 的消费进度,返回结果类型为 TopicPartition 列表。
|
||||||
|
|
||||||
|
```python
|
||||||
|
assignments = consumer.assignment()
|
||||||
|
```
|
||||||
|
|
||||||
|
#### 重置消费进度
|
||||||
|
|
||||||
|
Consumer API 的 `seek` 方法用于重置 Consumer 的消费进度到指定位置,方法参数类型为 TopicPartition。
|
||||||
|
|
||||||
|
```python
|
||||||
|
tp = TopicPartition(topic='topic1', partition=0, offset=0)
|
||||||
|
consumer.seek(tp)
|
||||||
|
```
|
||||||
|
|
||||||
|
#### 结束消费
|
||||||
|
|
||||||
|
消费结束后,应当取消订阅,并关闭 Consumer。
|
||||||
|
|
||||||
|
```python
|
||||||
|
consumer.unsubscribe()
|
||||||
|
consumer.close()
|
||||||
|
```
|
||||||
|
|
||||||
|
#### tmq 订阅示例代码
|
||||||
|
|
||||||
```python
|
```python
|
||||||
{{#include docs/examples/python/tmq_example.py}}
|
{{#include docs/examples/python/tmq_example.py}}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
#### 获取和重置消费进度示例代码
|
||||||
|
|
||||||
|
```python
|
||||||
|
{{#include docs/examples/python/tmq_assignment_example.py:taos_get_assignment_and_seek_demo}}
|
||||||
|
```
|
||||||
|
|
||||||
</TabItem>
|
</TabItem>
|
||||||
|
|
||||||
<TabItem value="websocket" label="WebSocket 连接">
|
<TabItem value="websocket" label="WebSocket 连接">
|
||||||
|
|
||||||
除了原生的连接方式,Python 连接器还支持通过 websocket 订阅 TMQ 数据。
|
除了原生的连接方式,Python 连接器还支持通过 websocket 订阅 TMQ 数据,使用 websocket 方式订阅 TMQ 数据需要安装 `taos-ws-py`。
|
||||||
|
|
||||||
|
taosws `Consumer` API 提供了基于 Websocket 订阅 TMQ 数据的 API。
|
||||||
|
|
||||||
|
#### 创建 Consumer
|
||||||
|
|
||||||
|
创建 Consumer 语法为 `consumer = Consumer(conf=configs)`,使用时需要指定 `td.connect.websocket.scheme` 参数值为 "ws",参数定义请参考 [数据订阅文档](../../develop/tmq/#%E5%88%9B%E5%BB%BA%E6%B6%88%E8%B4%B9%E8%80%85-consumer)。
|
||||||
|
|
||||||
|
```python
|
||||||
|
import taosws
|
||||||
|
|
||||||
|
consumer = taosws.(conf={"group.id": "local", "td.connect.websocket.scheme": "ws"})
|
||||||
|
```
|
||||||
|
|
||||||
|
#### 订阅 topics
|
||||||
|
|
||||||
|
Comsumer API 的 `subscribe` 方法用于订阅 topics,consumer 支持同时订阅多个 topic。
|
||||||
|
|
||||||
|
```python
|
||||||
|
consumer.subscribe(['topic1', 'topic2'])
|
||||||
|
```
|
||||||
|
|
||||||
|
#### 消费数据
|
||||||
|
|
||||||
|
Consumer API 的 `poll` 方法用于消费数据,`poll` 方法接收一个 float 类型的超时时间,超时时间单位为秒(s),`poll` 方法在超时之前返回一条 Message 类型的数据或超时返回 `None`。消费者必须通过 Message 的 `error()` 方法校验返回数据的 error 信息。
|
||||||
|
|
||||||
|
```python
|
||||||
|
while True:
|
||||||
|
res = consumer.poll(timeout=1.0)
|
||||||
|
if not res:
|
||||||
|
continue
|
||||||
|
err = res.error()
|
||||||
|
if err is not None:
|
||||||
|
raise err
|
||||||
|
for block in message:
|
||||||
|
for row in block:
|
||||||
|
print(row)
|
||||||
|
```
|
||||||
|
|
||||||
|
#### 获取消费进度
|
||||||
|
|
||||||
|
Consumer API 的 `assignment` 方法用于获取 Consumer 订阅的所有 topic 的消费进度,返回结果类型为 TopicPartition 列表。
|
||||||
|
|
||||||
|
```python
|
||||||
|
assignments = consumer.assignment()
|
||||||
|
```
|
||||||
|
|
||||||
|
#### 重置消费进度
|
||||||
|
|
||||||
|
Consumer API 的 `seek` 方法用于重置 Consumer 的消费进度到指定位置。
|
||||||
|
|
||||||
|
```python
|
||||||
|
consumer.seek(topic='topic1', partition=0, offset=0)
|
||||||
|
```
|
||||||
|
|
||||||
|
#### 结束消费
|
||||||
|
|
||||||
|
消费结束后,应当取消订阅,并关闭 Consumer。
|
||||||
|
|
||||||
|
```python
|
||||||
|
consumer.unsubscribe()
|
||||||
|
consumer.close()
|
||||||
|
```
|
||||||
|
|
||||||
|
#### tmq 订阅示例代码
|
||||||
|
|
||||||
```python
|
```python
|
||||||
{{#include docs/examples/python/tmq_websocket_example.py}}
|
{{#include docs/examples/python/tmq_websocket_example.py}}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
连接器提供了 `assignment` 接口,用于获取 topic assignment 的功能,可以查询订阅的 topic 的消费进度,并提供 `seek` 接口,用于重置 topic 的消费进度。
|
||||||
|
|
||||||
|
#### 获取和重置消费进度示例代码
|
||||||
|
|
||||||
|
```python
|
||||||
|
{{#include docs/examples/python/tmq_websocket_assgnment_example.py:taosws_get_assignment_and_seek_demo}}
|
||||||
|
```
|
||||||
|
|
||||||
</TabItem>
|
</TabItem>
|
||||||
</Tabs>
|
</Tabs>
|
||||||
|
|
||||||
|
|
|
@ -55,7 +55,7 @@ window_clause: {
|
||||||
| INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] [WATERMARK(watermark_val)] [FILL(fill_mod_and_val)]
|
| INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] [WATERMARK(watermark_val)] [FILL(fill_mod_and_val)]
|
||||||
|
|
||||||
interp_clause:
|
interp_clause:
|
||||||
RANGE(ts_val, ts_val) EVERY(every_val) FILL(fill_mod_and_val)
|
RANGE(ts_val [, ts_val]) EVERY(every_val) FILL(fill_mod_and_val)
|
||||||
|
|
||||||
partition_by_clause:
|
partition_by_clause:
|
||||||
PARTITION BY expr [, expr] ...
|
PARTITION BY expr [, expr] ...
|
||||||
|
|
|
@ -890,9 +890,10 @@ ignore_null_values: {
|
||||||
- INTERP 用于在指定时间断面获取指定列的记录值,如果该时间断面不存在符合条件的行数据,那么会根据 FILL 参数的设定进行插值。
|
- INTERP 用于在指定时间断面获取指定列的记录值,如果该时间断面不存在符合条件的行数据,那么会根据 FILL 参数的设定进行插值。
|
||||||
- INTERP 的输入数据为指定列的数据,可以通过条件语句(where 子句)来对原始列数据进行过滤,如果没有指定过滤条件则输入为全部数据。
|
- INTERP 的输入数据为指定列的数据,可以通过条件语句(where 子句)来对原始列数据进行过滤,如果没有指定过滤条件则输入为全部数据。
|
||||||
- INTERP 需要同时与 RANGE,EVERY 和 FILL 关键字一起使用。
|
- INTERP 需要同时与 RANGE,EVERY 和 FILL 关键字一起使用。
|
||||||
- INTERP 的输出时间范围根据 RANGE(timestamp1,timestamp2)字段来指定,需满足 timestamp1 <= timestamp2。其中 timestamp1(必选值)为输出时间范围的起始值,即如果 timestamp1 时刻符合插值条件则 timestamp1 为输出的第一条记录,timestamp2(必选值)为输出时间范围的结束值,即输出的最后一条记录的 timestamp 不能大于 timestamp2。
|
- INTERP 的输出时间范围根据 RANGE(timestamp1, timestamp2)字段来指定,需满足 timestamp1 <= timestamp2。其中 timestamp1 为输出时间范围的起始值,即如果 timestamp1 时刻符合插值条件则 timestamp1 为输出的第一条记录,timestamp2 为输出时间范围的结束值,即输出的最后一条记录的 timestamp 不能大于 timestamp2。
|
||||||
- INTERP 根据 EVERY(time_unit) 字段来确定输出时间范围内的结果条数,即从 timestamp1 开始每隔固定长度的时间(time_unit 值)进行插值,time_unit 可取值时间单位:1a(毫秒),1s(秒),1m(分),1h(小时),1d(天),1w(周)。例如 EVERY(500a) 将对于指定数据每500毫秒间隔进行一次插值.
|
- INTERP 根据 EVERY(time_unit) 字段来确定输出时间范围内的结果条数,即从 timestamp1 开始每隔固定长度的时间(time_unit 值)进行插值,time_unit 可取值时间单位:1a(毫秒),1s(秒),1m(分),1h(小时),1d(天),1w(周)。例如 EVERY(500a) 将对于指定数据每500毫秒间隔进行一次插值.
|
||||||
- INTERP 根据 FILL 字段来决定在每个符合输出条件的时刻如何进行插值。关于 FILL 子句如何使用请参考 [FILL 子句](../distinguished/#fill-子句)
|
- INTERP 根据 FILL 字段来决定在每个符合输出条件的时刻如何进行插值。关于 FILL 子句如何使用请参考 [FILL 子句](../distinguished/#fill-子句)
|
||||||
|
- INTERP 可以在 RANGE 字段中只指定唯一的时间戳对单个时间点进行插值,在这种情况下,EVERY 字段可以省略。例如:SELECT INTERP(col) FROM tb RANGE('2023-01-01 00:00:00') FILL(linear).
|
||||||
- INTERP 作用于超级表时, 会将该超级表下的所有子表数据按照主键列排序后进行插值计算,也可以搭配 PARTITION BY tbname 使用,将结果强制规约到单个时间线。
|
- INTERP 作用于超级表时, 会将该超级表下的所有子表数据按照主键列排序后进行插值计算,也可以搭配 PARTITION BY tbname 使用,将结果强制规约到单个时间线。
|
||||||
- INTERP 可以与伪列 _irowts 一起使用,返回插值点所对应的时间戳(3.0.2.0版本以后支持)。
|
- INTERP 可以与伪列 _irowts 一起使用,返回插值点所对应的时间戳(3.0.2.0版本以后支持)。
|
||||||
- INTERP 可以与伪列 _isfilled 一起使用,显示返回结果是否为原始记录或插值算法产生的数据(3.0.3.0版本以后支持)。
|
- INTERP 可以与伪列 _isfilled 一起使用,显示返回结果是否为原始记录或插值算法产生的数据(3.0.3.0版本以后支持)。
|
||||||
|
|
|
@ -5,7 +5,7 @@ description: "TDengine 客户端和服务配置列表"
|
||||||
|
|
||||||
## 为服务端指定配置文件
|
## 为服务端指定配置文件
|
||||||
|
|
||||||
TDengine 系统后台服务由 taosd 提供,可以在配置文件 taos.cfg 里修改配置参数,以满足不同场景的需求。配置文件的缺省位置在/etc/taos 目录,可以通过 taosd 命令行执行参数 -c 指定配置文件目录。比如,指定配置文件位于`/home/user` 这个目录:
|
TDengine 系统后台服务由 taosd 提供,可以在配置文件 taos.cfg 里修改配置参数,以满足不同场景的需求。在 Linux 系统上,配置文件的缺省位置在 `/etc/taos` 目录,在 Windows 系统上缺省位置在 `C:\TDengine` 。可以通过 taosd 命令行执行参数 -c 指定配置文件所在目录。比如,在 Linux 系统上可以指定配置文件位于 `/home/user` 这个目录:
|
||||||
|
|
||||||
```
|
```
|
||||||
taosd -c /home/user
|
taosd -c /home/user
|
||||||
|
|
|
@ -16,169 +16,78 @@ TDengine Source Connector 用于把数据实时地从 TDengine 读出来发送
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
## 什么是 Confluent?
|
|
||||||
|
|
||||||
[Confluent](https://www.confluent.io/) 在 Kafka 的基础上增加很多扩展功能。包括:
|
|
||||||
|
|
||||||
1. Schema Registry
|
|
||||||
2. REST 代理
|
|
||||||
3. 非 Java 客户端
|
|
||||||
4. 很多打包好的 Kafka Connect 插件
|
|
||||||
5. 管理和监控 Kafka 的 GUI —— Confluent 控制中心
|
|
||||||
|
|
||||||
这些扩展功能有的包含在社区版本的 Confluent 中,有的只有企业版能用。
|
|
||||||

|
|
||||||
|
|
||||||
Confluent 企业版提供了 `confluent` 命令行工具管理各个组件。
|
|
||||||
|
|
||||||
## 前置条件
|
## 前置条件
|
||||||
|
|
||||||
运行本教程中示例的前提条件。
|
运行本教程中示例的前提条件。
|
||||||
|
|
||||||
1. Linux 操作系统
|
1. Linux 操作系统
|
||||||
2. 已安装 Java 8 和 Maven
|
2. 已安装 Java 8 和 Maven
|
||||||
3. 已安装 Git
|
3. 已安装 Git、curl、vi
|
||||||
4. 已安装并启动 TDengine。如果还没有可参考[安装和卸载](/operation/pkg-install)
|
4. 已安装并启动 TDengine。如果还没有可参考[安装和卸载](/operation/pkg-install)
|
||||||
|
|
||||||
## 安装 Confluent
|
## 安装 Kafka
|
||||||
|
|
||||||
Confluent 提供了 Docker 和二进制包两种安装方式。本文仅介绍二进制包方式安装。
|
|
||||||
|
|
||||||
在任意目录下执行:
|
在任意目录下执行:
|
||||||
|
|
||||||
```
|
```shell
|
||||||
curl -O http://packages.confluent.io/archive/7.1/confluent-7.1.1.tar.gz
|
curl -O https://downloads.apache.org/kafka/3.4.0/kafka_2.13-3.4.0.tgz
|
||||||
tar xzf confluent-7.1.1.tar.gz -C /opt/
|
tar xzf kafka_2.13-3.4.0.tgz -C /opt/
|
||||||
|
ln -s /opt/kafka_2.13-3.4.0 /opt/kafka
|
||||||
```
|
```
|
||||||
|
|
||||||
然后需要把 `$CONFLUENT_HOME/bin` 目录加入 PATH。
|
然后需要把 `$KAFKA_HOME/bin` 目录加入 PATH。
|
||||||
|
|
||||||
```title=".profile"
|
```title=".profile"
|
||||||
export CONFLUENT_HOME=/opt/confluent-7.1.1
|
export KAFKA_HOME=/opt/kafka
|
||||||
export PATH=$CONFLUENT_HOME/bin:$PATH
|
export PATH=$PATH:$KAFKA_HOME/bin
|
||||||
```
|
```
|
||||||
|
|
||||||
以上脚本可以追加到当前用户的 profile 文件(~/.profile 或 ~/.bash_profile)
|
以上脚本可以追加到当前用户的 profile 文件(~/.profile 或 ~/.bash_profile)
|
||||||
|
|
||||||
安装完成之后,可以输入`confluent version`做简单验证:
|
|
||||||
|
|
||||||
```
|
|
||||||
# confluent version
|
|
||||||
confluent - Confluent CLI
|
|
||||||
|
|
||||||
Version: v2.6.1
|
|
||||||
Git Ref: 6d920590
|
|
||||||
Build Date: 2022-02-18T06:14:21Z
|
|
||||||
Go Version: go1.17.6 (linux/amd64)
|
|
||||||
Development: false
|
|
||||||
```
|
|
||||||
|
|
||||||
## 安装 TDengine Connector 插件
|
## 安装 TDengine Connector 插件
|
||||||
|
|
||||||
### 从源码安装
|
### 编译插件
|
||||||
|
|
||||||
```
|
```shell
|
||||||
git clone --branch 3.0 https://github.com/taosdata/kafka-connect-tdengine.git
|
git clone --branch 3.0 https://github.com/taosdata/kafka-connect-tdengine.git
|
||||||
cd kafka-connect-tdengine
|
cd kafka-connect-tdengine
|
||||||
mvn clean package
|
mvn clean package -Dmaven.test.skip=true
|
||||||
unzip -d $CONFLUENT_HOME/share/java/ target/components/packages/taosdata-kafka-connect-tdengine-*.zip
|
unzip -d $KAFKA_HOME/components/ target/components/packages/taosdata-kafka-connect-tdengine-*.zip
|
||||||
```
|
```
|
||||||
|
|
||||||
以上脚本先 clone 项目源码,然后用 Maven 编译打包。打包完成后在 `target/components/packages/` 目录生成了插件的 zip 包。把这个 zip 包解压到安装插件的路径即可。上面的示例中使用了内置的插件安装路径: `$CONFLUENT_HOME/share/java/`。
|
以上脚本先 clone 项目源码,然后用 Maven 编译打包。打包完成后在 `target/components/packages/` 目录生成了插件的 zip 包。把这个 zip 包解压到安装插件的路径即可。上面的示例中使用了内置的插件安装路径: `$KAFKA_HOME/components/`。
|
||||||
|
|
||||||
### 用 confluent-hub 安装
|
### 配置插件
|
||||||
|
|
||||||
[Confluent Hub](https://www.confluent.io/hub) 提供下载 Kafka Connect 插件的服务。在 TDengine Kafka Connector 发布到 Confluent Hub 后可以使用命令工具 `confluent-hub` 安装。
|
将 kafka-connect-tdengine 插件加入 `$KAFKA_HOME/config/connect-distributed.properties` 配置文件 plugin.path 中
|
||||||
**TDengine Kafka Connector 目前没有正式发布,不能用这种方式安装**。
|
|
||||||
|
|
||||||
## 启动 Confluent
|
```properties
|
||||||
|
plugin.path=/usr/share/java,/opt/kafka/components
|
||||||
```
|
|
||||||
confluent local services start
|
|
||||||
```
|
```
|
||||||
|
|
||||||
:::note
|
## 启动 Kafka
|
||||||
一定要先安装插件再启动 Confluent, 否则加载插件会失败。
|
|
||||||
:::
|
|
||||||
|
|
||||||
:::tip
|
```shell
|
||||||
若某组件启动失败,可尝试清空数据,重新启动。数据目录在启动时将被打印到控制台,比如 :
|
zookeeper-server-start.sh -daemon $KAFKA_HOME/config/zookeeper.properties
|
||||||
|
|
||||||
```title="控制台输出日志" {1}
|
kafka-server-start.sh -daemon $KAFKA_HOME/config/server.properties
|
||||||
Using CONFLUENT_CURRENT: /tmp/confluent.106668
|
|
||||||
Starting ZooKeeper
|
connect-distributed.sh -daemon $KAFKA_HOME/config/connect-distributed.properties
|
||||||
ZooKeeper is [UP]
|
|
||||||
Starting Kafka
|
|
||||||
Kafka is [UP]
|
|
||||||
Starting Schema Registry
|
|
||||||
Schema Registry is [UP]
|
|
||||||
Starting Kafka REST
|
|
||||||
Kafka REST is [UP]
|
|
||||||
Starting Connect
|
|
||||||
Connect is [UP]
|
|
||||||
Starting ksqlDB Server
|
|
||||||
ksqlDB Server is [UP]
|
|
||||||
Starting Control Center
|
|
||||||
Control Center is [UP]
|
|
||||||
```
|
```
|
||||||
|
|
||||||
清空数据可执行 `rm -rf /tmp/confluent.106668`。
|
### 验证 kafka Connect 是否启动成功
|
||||||
:::
|
|
||||||
|
|
||||||
### 验证各个组件是否启动成功
|
|
||||||
|
|
||||||
输入命令:
|
输入命令:
|
||||||
|
|
||||||
```
|
```shell
|
||||||
confluent local services status
|
curl http://localhost:8083/connectors
|
||||||
```
|
```
|
||||||
|
|
||||||
如果各组件都启动成功,会得到如下输出:
|
如果各组件都启动成功,会得到如下输出:
|
||||||
|
|
||||||
|
```txt
|
||||||
|
[]
|
||||||
```
|
```
|
||||||
Connect is [UP]
|
|
||||||
Control Center is [UP]
|
|
||||||
Kafka is [UP]
|
|
||||||
Kafka REST is [UP]
|
|
||||||
ksqlDB Server is [UP]
|
|
||||||
Schema Registry is [UP]
|
|
||||||
ZooKeeper is [UP]
|
|
||||||
```
|
|
||||||
|
|
||||||
### 验证插件是否安装成功
|
|
||||||
|
|
||||||
在 Kafka Connect 组件完全启动后,可用以下命令列出成功加载的插件:
|
|
||||||
|
|
||||||
```
|
|
||||||
confluent local services connect plugin list
|
|
||||||
```
|
|
||||||
|
|
||||||
如果成功安装,会输出如下:
|
|
||||||
|
|
||||||
```txt {4,9}
|
|
||||||
Available Connect Plugins:
|
|
||||||
[
|
|
||||||
{
|
|
||||||
"class": "com.taosdata.kafka.connect.sink.TDengineSinkConnector",
|
|
||||||
"type": "sink",
|
|
||||||
"version": "1.0.0"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"class": "com.taosdata.kafka.connect.source.TDengineSourceConnector",
|
|
||||||
"type": "source",
|
|
||||||
"version": "1.0.0"
|
|
||||||
},
|
|
||||||
......
|
|
||||||
```
|
|
||||||
|
|
||||||
如果插件安装失败,请检查 Kafka Connect 的启动日志是否有异常信息,用以下命令输出日志路径:
|
|
||||||
```
|
|
||||||
echo `cat /tmp/confluent.current`/connect/connect.stdout
|
|
||||||
```
|
|
||||||
该命令的输出类似: `/tmp/confluent.104086/connect/connect.stdout`。
|
|
||||||
|
|
||||||
与日志文件 `connect.stdout` 同一目录,还有一个文件名为: `connect.properties`。在这个文件的末尾,可以看到最终生效的 `plugin.path`, 它是一系列用逗号分割的路径。如果插件安装失败,很可能是因为实际的安装路径不包含在 `plugin.path` 中。
|
|
||||||
|
|
||||||
|
|
||||||
## TDengine Sink Connector 的使用
|
## TDengine Sink Connector 的使用
|
||||||
|
|
||||||
|
@ -188,40 +97,47 @@ TDengine Sink Connector 内部使用 TDengine [无模式写入接口](../../conn
|
||||||
|
|
||||||
下面的示例将主题 meters 的数据,同步到目标数据库 power。数据格式为 InfluxDB Line 协议格式。
|
下面的示例将主题 meters 的数据,同步到目标数据库 power。数据格式为 InfluxDB Line 协议格式。
|
||||||
|
|
||||||
### 添加配置文件
|
### 添加 Sink Connector 配置文件
|
||||||
|
|
||||||
```
|
```shell
|
||||||
mkdir ~/test
|
mkdir ~/test
|
||||||
cd ~/test
|
cd ~/test
|
||||||
vi sink-demo.properties
|
vi sink-demo.json
|
||||||
```
|
```
|
||||||
|
|
||||||
sink-demo.properties 内容如下:
|
sink-demo.json 内容如下:
|
||||||
|
|
||||||
```ini title="sink-demo.properties"
|
```json title="sink-demo.json"
|
||||||
name=TDengineSinkConnector
|
{
|
||||||
connector.class=com.taosdata.kafka.connect.sink.TDengineSinkConnector
|
"name": "TDengineSinkConnector",
|
||||||
tasks.max=1
|
"config": {
|
||||||
topics=meters
|
"connector.class":"com.taosdata.kafka.connect.sink.TDengineSinkConnector",
|
||||||
connection.url=jdbc:TAOS://127.0.0.1:6030
|
"tasks.max": "1",
|
||||||
connection.user=root
|
"topics": "meters",
|
||||||
connection.password=taosdata
|
"connection.url": "jdbc:TAOS://127.0.0.1:6030",
|
||||||
connection.database=power
|
"connection.user": "root",
|
||||||
db.schemaless=line
|
"connection.password": "taosdata",
|
||||||
data.precision=ns
|
"connection.database": "power",
|
||||||
key.converter=org.apache.kafka.connect.storage.StringConverter
|
"db.schemaless": "line",
|
||||||
value.converter=org.apache.kafka.connect.storage.StringConverter
|
"data.precision": "ns",
|
||||||
|
"key.converter": "org.apache.kafka.connect.storage.StringConverter",
|
||||||
|
"value.converter": "org.apache.kafka.connect.storage.StringConverter",
|
||||||
|
"errors.tolerance": "all",
|
||||||
|
"errors.deadletterqueue.topic.name": "dead_letter_topic",
|
||||||
|
"errors.deadletterqueue.topic.replication.factor": 1
|
||||||
|
}
|
||||||
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
关键配置说明:
|
关键配置说明:
|
||||||
|
|
||||||
1. `topics=meters` 和 `connection.database=power`, 表示订阅主题 meters 的数据,并写入数据库 power。
|
1. `"topics": "meters"` 和 `"connection.database": "power"`, 表示订阅主题 meters 的数据,并写入数据库 power。
|
||||||
2. `db.schemaless=line`, 表示使用 InfluxDB Line 协议格式的数据。
|
2. `"db.schemaless": "line"`, 表示使用 InfluxDB Line 协议格式的数据。
|
||||||
|
|
||||||
### 创建 Connector 实例
|
### 创建 Sink Connector 实例
|
||||||
|
|
||||||
```
|
```shell
|
||||||
confluent local services connect connector load TDengineSinkConnector --config ./sink-demo.properties
|
curl -X POST -d @sink-demo.json http://localhost:8083/connectors -H "Content-Type: application/json"
|
||||||
```
|
```
|
||||||
|
|
||||||
若以上命令执行成功,则有如下输出:
|
若以上命令执行成功,则有如下输出:
|
||||||
|
@ -241,7 +157,10 @@ confluent local services connect connector load TDengineSinkConnector --config .
|
||||||
"tasks.max": "1",
|
"tasks.max": "1",
|
||||||
"topics": "meters",
|
"topics": "meters",
|
||||||
"value.converter": "org.apache.kafka.connect.storage.StringConverter",
|
"value.converter": "org.apache.kafka.connect.storage.StringConverter",
|
||||||
"name": "TDengineSinkConnector"
|
"name": "TDengineSinkConnector",
|
||||||
|
"errors.tolerance": "all",
|
||||||
|
"errors.deadletterqueue.topic.name": "dead_letter_topic",
|
||||||
|
"errors.deadletterqueue.topic.replication.factor": "1",
|
||||||
},
|
},
|
||||||
"tasks": [],
|
"tasks": [],
|
||||||
"type": "sink"
|
"type": "sink"
|
||||||
|
@ -261,8 +180,8 @@ meters,location=California.LosAngeles,groupid=3 current=11.3,voltage=221,phase=0
|
||||||
|
|
||||||
使用 kafka-console-producer 向主题 meters 添加测试数据。
|
使用 kafka-console-producer 向主题 meters 添加测试数据。
|
||||||
|
|
||||||
```
|
```shell
|
||||||
cat test-data.txt | kafka-console-producer --broker-list localhost:9092 --topic meters
|
cat test-data.txt | kafka-console-producer.sh --broker-list localhost:9092 --topic meters
|
||||||
```
|
```
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
|
@ -273,12 +192,12 @@ cat test-data.txt | kafka-console-producer --broker-list localhost:9092 --topic
|
||||||
|
|
||||||
使用 TDengine CLI 验证同步是否成功。
|
使用 TDengine CLI 验证同步是否成功。
|
||||||
|
|
||||||
```
|
```sql
|
||||||
taos> use power;
|
taos> use power;
|
||||||
Database changed.
|
Database changed.
|
||||||
|
|
||||||
taos> select * from meters;
|
taos> select * from meters;
|
||||||
ts | current | voltage | phase | groupid | location |
|
_ts | current | voltage | phase | groupid | location |
|
||||||
===============================================================================================================================================================
|
===============================================================================================================================================================
|
||||||
2022-03-28 09:56:51.249000000 | 11.800000000 | 221.000000000 | 0.280000000 | 2 | California.LosAngeles |
|
2022-03-28 09:56:51.249000000 | 11.800000000 | 221.000000000 | 0.280000000 | 2 | California.LosAngeles |
|
||||||
2022-03-28 09:56:51.250000000 | 13.400000000 | 223.000000000 | 0.290000000 | 2 | California.LosAngeles |
|
2022-03-28 09:56:51.250000000 | 13.400000000 | 223.000000000 | 0.290000000 | 2 | California.LosAngeles |
|
||||||
|
@ -295,31 +214,39 @@ TDengine Source Connector 的作用是将 TDengine 某个数据库某一时刻
|
||||||
|
|
||||||
TDengine Source Connector 会将 TDengine 数据表中的数据转换成 [InfluxDB Line 协议格式](/develop/insert-data/influxdb-line/) 或 [OpenTSDB JSON 协议格式](/develop/insert-data/opentsdb-json), 然后写入 Kafka。
|
TDengine Source Connector 会将 TDengine 数据表中的数据转换成 [InfluxDB Line 协议格式](/develop/insert-data/influxdb-line/) 或 [OpenTSDB JSON 协议格式](/develop/insert-data/opentsdb-json), 然后写入 Kafka。
|
||||||
|
|
||||||
下面的示例程序同步数据库 test 中的数据到主题 tdengine-source-test。
|
下面的示例程序同步数据库 test 中的数据到主题 tdengine-test-meters。
|
||||||
|
|
||||||
### 添加配置文件
|
### 添加 Source Connector 配置文件
|
||||||
|
|
||||||
```
|
```shell
|
||||||
vi source-demo.properties
|
vi source-demo.json
|
||||||
```
|
```
|
||||||
|
|
||||||
输入以下内容:
|
输入以下内容:
|
||||||
|
|
||||||
```ini title="source-demo.properties"
|
```json title="source-demo.json"
|
||||||
name=TDengineSourceConnector
|
{
|
||||||
connector.class=com.taosdata.kafka.connect.source.TDengineSourceConnector
|
"name":"TDengineSourceConnector",
|
||||||
tasks.max=1
|
"config":{
|
||||||
connection.url=jdbc:TAOS://127.0.0.1:6030
|
"connector.class": "com.taosdata.kafka.connect.source.TDengineSourceConnector",
|
||||||
connection.username=root
|
"tasks.max": 1,
|
||||||
connection.password=taosdata
|
"connection.url": "jdbc:TAOS://127.0.0.1:6030",
|
||||||
connection.database=test
|
"connection.username": "root",
|
||||||
connection.attempts=3
|
"connection.password": "taosdata",
|
||||||
connection.backoff.ms=5000
|
"connection.database": "test",
|
||||||
topic.prefix=tdengine-source-
|
"connection.attempts": 3,
|
||||||
poll.interval.ms=1000
|
"connection.backoff.ms": 5000,
|
||||||
fetch.max.rows=100
|
"topic.prefix": "tdengine",
|
||||||
key.converter=org.apache.kafka.connect.storage.StringConverter
|
"topic.delimiter": "-",
|
||||||
value.converter=org.apache.kafka.connect.storage.StringConverter
|
"poll.interval.ms": 1000,
|
||||||
|
"fetch.max.rows": 100,
|
||||||
|
"topic.per.stable": true,
|
||||||
|
"topic.ignore.db": false,
|
||||||
|
"out.format": "line",
|
||||||
|
"key.converter": "org.apache.kafka.connect.storage.StringConverter",
|
||||||
|
"value.converter": "org.apache.kafka.connect.storage.StringConverter"
|
||||||
|
}
|
||||||
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
### 准备测试数据
|
### 准备测试数据
|
||||||
|
@ -344,27 +271,27 @@ INSERT INTO d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES('2018-1
|
||||||
|
|
||||||
使用 TDengine CLI, 执行 SQL 文件。
|
使用 TDengine CLI, 执行 SQL 文件。
|
||||||
|
|
||||||
```
|
```shell
|
||||||
taos -f prepare-source-data.sql
|
taos -f prepare-source-data.sql
|
||||||
```
|
```
|
||||||
|
|
||||||
### 创建 Connector 实例
|
### 创建 Source Connector 实例
|
||||||
|
|
||||||
```
|
```shell
|
||||||
confluent local services connect connector load TDengineSourceConnector --config source-demo.properties
|
curl -X POST -d @source-demo.json http://localhost:8083/connectors -H "Content-Type: application/json"
|
||||||
```
|
```
|
||||||
|
|
||||||
### 查看 topic 数据
|
### 查看 topic 数据
|
||||||
|
|
||||||
使用 kafka-console-consumer 命令行工具监控主题 tdengine-source-test 中的数据。一开始会输出所有历史数据, 往 TDengine 插入两条新的数据之后,kafka-console-consumer 也立即输出了新增的两条数据。 输出数据 InfluxDB line protocol 的格式。
|
使用 kafka-console-consumer 命令行工具监控主题 tdengine-test-meters 中的数据。一开始会输出所有历史数据, 往 TDengine 插入两条新的数据之后,kafka-console-consumer 也立即输出了新增的两条数据。 输出数据 InfluxDB line protocol 的格式。
|
||||||
|
|
||||||
```
|
```shell
|
||||||
kafka-console-consumer --bootstrap-server localhost:9092 --from-beginning --topic tdengine-source-test
|
kafka-console-consumer.sh --bootstrap-server localhost:9092 --from-beginning --topic tdengine-test-meters
|
||||||
```
|
```
|
||||||
|
|
||||||
输出:
|
输出:
|
||||||
|
|
||||||
```
|
```txt
|
||||||
......
|
......
|
||||||
meters,location="California.SanFrancisco",groupid=2i32 current=10.3f32,voltage=219i32,phase=0.31f32 1538548685000000000
|
meters,location="California.SanFrancisco",groupid=2i32 current=10.3f32,voltage=219i32,phase=0.31f32 1538548685000000000
|
||||||
meters,location="California.SanFrancisco",groupid=2i32 current=12.6f32,voltage=218i32,phase=0.33f32 1538548695000000000
|
meters,location="California.SanFrancisco",groupid=2i32 current=12.6f32,voltage=218i32,phase=0.33f32 1538548695000000000
|
||||||
|
@ -373,7 +300,7 @@ meters,location="California.SanFrancisco",groupid=2i32 current=12.6f32,voltage=2
|
||||||
|
|
||||||
此时会显示所有历史数据。切换到 TDengine CLI, 插入两条新的数据:
|
此时会显示所有历史数据。切换到 TDengine CLI, 插入两条新的数据:
|
||||||
|
|
||||||
```
|
```sql
|
||||||
USE test;
|
USE test;
|
||||||
INSERT INTO d1001 VALUES (now, 13.3, 229, 0.38);
|
INSERT INTO d1001 VALUES (now, 13.3, 229, 0.38);
|
||||||
INSERT INTO d1002 VALUES (now, 16.3, 233, 0.22);
|
INSERT INTO d1002 VALUES (now, 16.3, 233, 0.22);
|
||||||
|
@ -387,15 +314,15 @@ INSERT INTO d1002 VALUES (now, 16.3, 233, 0.22);
|
||||||
|
|
||||||
查看当前活跃的 connector:
|
查看当前活跃的 connector:
|
||||||
|
|
||||||
```
|
```shell
|
||||||
confluent local services connect connector status
|
curl http://localhost:8083/connectors
|
||||||
```
|
```
|
||||||
|
|
||||||
如果按照前述操作,此时应有两个活跃的 connector。使用下面的命令 unload:
|
如果按照前述操作,此时应有两个活跃的 connector。使用下面的命令 unload:
|
||||||
|
|
||||||
```
|
```shell
|
||||||
confluent local services connect connector unload TDengineSinkConnector
|
curl -X DELETE http://localhost:8083/connectors/TDengineSinkConnector
|
||||||
confluent local services connect connector unload TDengineSourceConnector
|
curl -X DELETE http://localhost:8083/connectors/TDengineSourceConnector
|
||||||
```
|
```
|
||||||
|
|
||||||
## 配置参考
|
## 配置参考
|
||||||
|
@ -437,20 +364,20 @@ confluent local services connect connector unload TDengineSourceConnector
|
||||||
3. `timestamp.initial`: 数据同步起始时间。格式为'yyyy-MM-dd HH:mm:ss',若未指定则从指定 DB 中最早的一条记录开始。
|
3. `timestamp.initial`: 数据同步起始时间。格式为'yyyy-MM-dd HH:mm:ss',若未指定则从指定 DB 中最早的一条记录开始。
|
||||||
4. `poll.interval.ms`: 检查是否有新建或删除的表的时间间隔,单位为 ms。默认为 1000。
|
4. `poll.interval.ms`: 检查是否有新建或删除的表的时间间隔,单位为 ms。默认为 1000。
|
||||||
5. `fetch.max.rows` : 检索数据库时最大检索条数。 默认为 100。
|
5. `fetch.max.rows` : 检索数据库时最大检索条数。 默认为 100。
|
||||||
6. `query.interval.ms`: 从 TDengine 一次读取数据的时间跨度,需要根据表中的数据特征合理配置,避免一次查询的数据量过大或过小;在具体的环境中建议通过测试设置一个较优值,默认值为 1000.
|
6. `query.interval.ms`: 从 TDengine 一次读取数据的时间跨度,需要根据表中的数据特征合理配置,避免一次查询的数据量过大或过小;在具体的环境中建议通过测试设置一个较优值,默认值为 0,即获取到当前最新时间的所有数据。
|
||||||
7. `topic.per.stable`: 如果设置为true,表示一个超级表对应一个 Kafka topic,topic的命名规则 `<topic.prefix>-<connection.database>-<stable.name>`;如果设置为 false,则指定的 DB 中的所有数据进入一个 Kafka topic,topic 的命名规则为 `<topic.prefix>-<connection.database>`
|
7. `out.format` : 结果集输出格式。`line` 表示输出格式为 InfluxDB Line 协议格式,`json` 表示输出格式是 json。默认为 line。
|
||||||
|
8. `topic.per.stable`: 如果设置为 true,表示一个超级表对应一个 Kafka topic,topic的命名规则 `<topic.prefix><topic.delimiter><connection.database><topic.delimiter><stable.name>`;如果设置为 false,则指定的 DB 中的所有数据进入一个 Kafka topic,topic 的命名规则为 `<topic.prefix><topic.delimiter><connection.database>`
|
||||||
|
9. `topic.ignore.db`: topic 命名规则是否包含 database 名称,true 表示规则为 `<topic.prefix><topic.delimiter><stable.name>`,false 表示规则为 `<topic.prefix><topic.delimiter><connection.database><topic.delimiter><stable.name>`,默认 false。此配置项在 `topic.per.stable` 设置为 false 时不生效。
|
||||||
|
10. `topic.delimiter`: topic 名称分割符,默认为 `-`。
|
||||||
|
|
||||||
## 其他说明
|
## 其他说明
|
||||||
|
|
||||||
1. 插件的安装位置可以自定义,请参考官方文档:https://docs.confluent.io/home/connect/self-managed/install.html#install-connector-manually。
|
1. 关于如何在独立安装的 Kafka 环境使用 Kafka Connect 插件, 请参考官方文档:<https://kafka.apache.org/documentation/#connect>。
|
||||||
2. 本教程的示例程序使用了 Confluent 平台,但是 TDengine Kafka Connector 本身同样适用于独立安装的 Kafka, 且配置方法相同。关于如何在独立安装的 Kafka 环境使用 Kafka Connect 插件, 请参考官方文档: https://kafka.apache.org/documentation/#connect。
|
|
||||||
|
|
||||||
## 问题反馈
|
## 问题反馈
|
||||||
|
|
||||||
无论遇到任何问题,都欢迎在本项目的 Github 仓库反馈: https://github.com/taosdata/kafka-connect-tdengine/issues。
|
无论遇到任何问题,都欢迎在本项目的 Github 仓库反馈:<https://github.com/taosdata/kafka-connect-tdengine/issues>。
|
||||||
|
|
||||||
## 参考
|
## 参考
|
||||||
|
|
||||||
1. https://www.confluent.io/what-is-apache-kafka
|
1. <https://kafka.apache.org/documentation/>
|
||||||
2. https://developer.confluent.io/learn-kafka/kafka-connect/intro
|
|
||||||
3. https://docs.confluent.io/platform/current/platform.html
|
|
||||||
|
|
|
@ -10,6 +10,10 @@ TDengine 2.x 各版本安装包请访问[这里](https://www.taosdata.com/all-do
|
||||||
|
|
||||||
import Release from "/components/ReleaseV3";
|
import Release from "/components/ReleaseV3";
|
||||||
|
|
||||||
|
## 3.0.5.0
|
||||||
|
|
||||||
|
<Release type="tdengine" version="3.0.5.0" />
|
||||||
|
|
||||||
## 3.0.4.2
|
## 3.0.4.2
|
||||||
|
|
||||||
<Release type="tdengine" version="3.0.4.2" />
|
<Release type="tdengine" version="3.0.4.2" />
|
||||||
|
|
|
@ -10,6 +10,10 @@ taosTools 各版本安装包下载链接如下:
|
||||||
|
|
||||||
import Release from "/components/ReleaseV3";
|
import Release from "/components/ReleaseV3";
|
||||||
|
|
||||||
|
## 2.5.1
|
||||||
|
|
||||||
|
<Release type="tools" version="2.5.1" />
|
||||||
|
|
||||||
## 2.5.0
|
## 2.5.0
|
||||||
|
|
||||||
<Release type="tools" version="2.5.0" />
|
<Release type="tools" version="2.5.0" />
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
#spring.datasource.password=taosdata
|
#spring.datasource.password=taosdata
|
||||||
# datasource config - JDBC-RESTful
|
# datasource config - JDBC-RESTful
|
||||||
spring.datasource.driver-class-name=com.taosdata.jdbc.rs.RestfulDriver
|
spring.datasource.driver-class-name=com.taosdata.jdbc.rs.RestfulDriver
|
||||||
spring.datasource.url=jdbc:TAOS-RS://localhost:6041/test?timezone=UTC-8&charset=UTF-8&locale=en_US.UTF-8
|
spring.datasource.url=jdbc:TAOS-RS://localhost:6041/test
|
||||||
spring.datasource.username=root
|
spring.datasource.username=root
|
||||||
spring.datasource.password=taosdata
|
spring.datasource.password=taosdata
|
||||||
spring.datasource.druid.initial-size=5
|
spring.datasource.druid.initial-size=5
|
||||||
|
|
|
@ -42,27 +42,27 @@ IF (TD_LINUX)
|
||||||
)
|
)
|
||||||
|
|
||||||
target_link_libraries(tmq
|
target_link_libraries(tmq
|
||||||
taos_static
|
taos
|
||||||
)
|
)
|
||||||
|
|
||||||
target_link_libraries(stream_demo
|
target_link_libraries(stream_demo
|
||||||
taos_static
|
taos
|
||||||
)
|
)
|
||||||
|
|
||||||
target_link_libraries(schemaless
|
target_link_libraries(schemaless
|
||||||
taos_static
|
taos
|
||||||
)
|
)
|
||||||
|
|
||||||
target_link_libraries(prepare
|
target_link_libraries(prepare
|
||||||
taos_static
|
taos
|
||||||
)
|
)
|
||||||
|
|
||||||
target_link_libraries(demo
|
target_link_libraries(demo
|
||||||
taos_static
|
taos
|
||||||
)
|
)
|
||||||
|
|
||||||
target_link_libraries(asyncdemo
|
target_link_libraries(asyncdemo
|
||||||
taos_static
|
taos
|
||||||
)
|
)
|
||||||
|
|
||||||
SET_TARGET_PROPERTIES(tmq PROPERTIES OUTPUT_NAME tmq)
|
SET_TARGET_PROPERTIES(tmq PROPERTIES OUTPUT_NAME tmq)
|
||||||
|
|
|
@ -248,6 +248,7 @@ int32_t buildSubmitReqFromDataBlock(SSubmitReq2** pReq, const SSDataBlock* pData
|
||||||
tb_uid_t suid);
|
tb_uid_t suid);
|
||||||
|
|
||||||
char* buildCtbNameByGroupId(const char* stbName, uint64_t groupId);
|
char* buildCtbNameByGroupId(const char* stbName, uint64_t groupId);
|
||||||
|
int32_t buildCtbNameByGroupIdImpl(const char* stbName, uint64_t groupId, char* pBuf);
|
||||||
|
|
||||||
static FORCE_INLINE int32_t blockGetEncodeSize(const SSDataBlock* pBlock) {
|
static FORCE_INLINE int32_t blockGetEncodeSize(const SSDataBlock* pBlock) {
|
||||||
return blockDataGetSerialMetaSize(taosArrayGetSize(pBlock->pDataBlock)) + blockDataGetSize(pBlock);
|
return blockDataGetSerialMetaSize(taosArrayGetSize(pBlock->pDataBlock)) + blockDataGetSize(pBlock);
|
||||||
|
|
|
@ -150,7 +150,7 @@ enum {
|
||||||
TD_DEF_MSG_TYPE(TDMT_MND_TMQ_HB, "consumer-hb", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_MND_TMQ_HB, "consumer-hb", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_MND_TMQ_DO_REBALANCE, "do-rebalance", SMqDoRebalanceMsg, NULL)
|
TD_DEF_MSG_TYPE(TDMT_MND_TMQ_DO_REBALANCE, "do-rebalance", SMqDoRebalanceMsg, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_MND_TMQ_DROP_CGROUP, "drop-cgroup", SMqDropCGroupReq, SMqDropCGroupRsp)
|
TD_DEF_MSG_TYPE(TDMT_MND_TMQ_DROP_CGROUP, "drop-cgroup", SMqDropCGroupReq, SMqDropCGroupRsp)
|
||||||
TD_DEF_MSG_TYPE(TDMT_MND_UNUSED2, "unused2", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_MND_CREATE_VG, "create-vg", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_MND_TMQ_TIMER, "tmq-tmr", SMTimerReq, NULL)
|
TD_DEF_MSG_TYPE(TDMT_MND_TMQ_TIMER, "tmq-tmr", SMTimerReq, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_MND_TELEM_TIMER, "telem-tmr", SMTimerReq, SMTimerReq)
|
TD_DEF_MSG_TYPE(TDMT_MND_TELEM_TIMER, "telem-tmr", SMTimerReq, SMTimerReq)
|
||||||
TD_DEF_MSG_TYPE(TDMT_MND_TRANS_TIMER, "trans-tmr", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_MND_TRANS_TIMER, "trans-tmr", NULL, NULL)
|
||||||
|
|
|
@ -163,6 +163,7 @@ typedef struct {
|
||||||
int64_t checkPointId;
|
int64_t checkPointId;
|
||||||
int32_t taskId;
|
int32_t taskId;
|
||||||
int64_t streamId;
|
int64_t streamId;
|
||||||
|
int64_t streamBackendRid;
|
||||||
} SStreamState;
|
} SStreamState;
|
||||||
|
|
||||||
typedef struct SFunctionStateStore {
|
typedef struct SFunctionStateStore {
|
||||||
|
|
|
@ -327,6 +327,7 @@ struct SStreamTask {
|
||||||
int64_t checkpointingId;
|
int64_t checkpointingId;
|
||||||
int32_t checkpointAlignCnt;
|
int32_t checkpointAlignCnt;
|
||||||
struct SStreamMeta* pMeta;
|
struct SStreamMeta* pMeta;
|
||||||
|
SSHashObj* pNameMap;
|
||||||
};
|
};
|
||||||
|
|
||||||
// meta
|
// meta
|
||||||
|
@ -344,7 +345,6 @@ typedef struct SStreamMeta {
|
||||||
SRWLatch lock;
|
SRWLatch lock;
|
||||||
int32_t walScanCounter;
|
int32_t walScanCounter;
|
||||||
void* streamBackend;
|
void* streamBackend;
|
||||||
int32_t streamBackendId;
|
|
||||||
int64_t streamBackendRid;
|
int64_t streamBackendRid;
|
||||||
SHashObj* pTaskBackendUnique;
|
SHashObj* pTaskBackendUnique;
|
||||||
} SStreamMeta;
|
} SStreamMeta;
|
||||||
|
|
|
@ -154,14 +154,14 @@ typedef struct SSnapshotMeta {
|
||||||
typedef struct SSyncFSM {
|
typedef struct SSyncFSM {
|
||||||
void* data;
|
void* data;
|
||||||
|
|
||||||
int32_t (*FpCommitCb)(const struct SSyncFSM* pFsm, SRpcMsg* pMsg, const SFsmCbMeta* pMeta);
|
int32_t (*FpCommitCb)(const struct SSyncFSM* pFsm, SRpcMsg* pMsg, SFsmCbMeta* pMeta);
|
||||||
SyncIndex (*FpAppliedIndexCb)(const struct SSyncFSM* pFsm);
|
SyncIndex (*FpAppliedIndexCb)(const struct SSyncFSM* pFsm);
|
||||||
int32_t (*FpPreCommitCb)(const struct SSyncFSM* pFsm, SRpcMsg* pMsg, const SFsmCbMeta* pMeta);
|
int32_t (*FpPreCommitCb)(const struct SSyncFSM* pFsm, SRpcMsg* pMsg, SFsmCbMeta* pMeta);
|
||||||
void (*FpRollBackCb)(const struct SSyncFSM* pFsm, SRpcMsg* pMsg, const SFsmCbMeta* pMeta);
|
void (*FpRollBackCb)(const struct SSyncFSM* pFsm, SRpcMsg* pMsg, SFsmCbMeta* pMeta);
|
||||||
|
|
||||||
void (*FpRestoreFinishCb)(const struct SSyncFSM* pFsm, const SyncIndex commitIdx);
|
void (*FpRestoreFinishCb)(const struct SSyncFSM* pFsm, const SyncIndex commitIdx);
|
||||||
void (*FpReConfigCb)(const struct SSyncFSM* pFsm, SRpcMsg* pMsg, const SReConfigCbMeta* pMeta);
|
void (*FpReConfigCb)(const struct SSyncFSM* pFsm, SRpcMsg* pMsg, SReConfigCbMeta* pMeta);
|
||||||
void (*FpLeaderTransferCb)(const struct SSyncFSM* pFsm, SRpcMsg* pMsg, const SFsmCbMeta* pMeta);
|
void (*FpLeaderTransferCb)(const struct SSyncFSM* pFsm, SRpcMsg* pMsg, SFsmCbMeta* pMeta);
|
||||||
bool (*FpApplyQueueEmptyCb)(const struct SSyncFSM* pFsm);
|
bool (*FpApplyQueueEmptyCb)(const struct SSyncFSM* pFsm);
|
||||||
int32_t (*FpApplyQueueItems)(const struct SSyncFSM* pFsm);
|
int32_t (*FpApplyQueueItems)(const struct SSyncFSM* pFsm);
|
||||||
|
|
||||||
|
|
|
@ -22,21 +22,20 @@ extern "C" {
|
||||||
|
|
||||||
// If the error is in a third-party library, place this header file under the third-party library header file.
|
// If the error is in a third-party library, place this header file under the third-party library header file.
|
||||||
// When you want to use this feature, you should find or add the same function in the following sectio
|
// When you want to use this feature, you should find or add the same function in the following sectio
|
||||||
// #if !defined(WINDOWS)
|
#if !defined(WINDOWS)
|
||||||
|
|
||||||
// #ifndef ALLOW_FORBID_FUNC
|
#ifndef ALLOW_FORBID_FUNC
|
||||||
// #define malloc MALLOC_FUNC_TAOS_FORBID
|
#define malloc MALLOC_FUNC_TAOS_FORBID
|
||||||
// #define calloc CALLOC_FUNC_TAOS_FORBID
|
#define calloc CALLOC_FUNC_TAOS_FORBID
|
||||||
// #define realloc REALLOC_FUNC_TAOS_FORBID
|
#define realloc REALLOC_FUNC_TAOS_FORBID
|
||||||
// #define free FREE_FUNC_TAOS_FORBID
|
#define free FREE_FUNC_TAOS_FORBID
|
||||||
// #ifdef strdup
|
#ifdef strdup
|
||||||
// #undef strdup
|
#undef strdup
|
||||||
// #define strdup STRDUP_FUNC_TAOS_FORBID
|
#define strdup STRDUP_FUNC_TAOS_FORBID
|
||||||
// #endif
|
#endif
|
||||||
// #endif // ifndef ALLOW_FORBID_FUNC
|
#endif // ifndef ALLOW_FORBID_FUNC
|
||||||
// #endif // if !defined(WINDOWS)
|
#endif // if !defined(WINDOWS)
|
||||||
|
|
||||||
// // #define taosMemoryFree malloc
|
|
||||||
// #define taosMemoryMalloc malloc
|
// #define taosMemoryMalloc malloc
|
||||||
// #define taosMemoryCalloc calloc
|
// #define taosMemoryCalloc calloc
|
||||||
// #define taosMemoryRealloc realloc
|
// #define taosMemoryRealloc realloc
|
||||||
|
|
|
@ -345,7 +345,7 @@ int32_t* taosGetErrno();
|
||||||
#define TSDB_CODE_MND_TRANS_CLOG_IS_NULL TAOS_DEF_ERROR_CODE(0, 0x03D4)
|
#define TSDB_CODE_MND_TRANS_CLOG_IS_NULL TAOS_DEF_ERROR_CODE(0, 0x03D4)
|
||||||
#define TSDB_CODE_MND_TRANS_NETWORK_UNAVAILL TAOS_DEF_ERROR_CODE(0, 0x03D5)
|
#define TSDB_CODE_MND_TRANS_NETWORK_UNAVAILL TAOS_DEF_ERROR_CODE(0, 0x03D5)
|
||||||
#define TSDB_CODE_MND_LAST_TRANS_NOT_FINISHED TAOS_DEF_ERROR_CODE(0, 0x03D6) //internal
|
#define TSDB_CODE_MND_LAST_TRANS_NOT_FINISHED TAOS_DEF_ERROR_CODE(0, 0x03D6) //internal
|
||||||
#define TSDB_CODE_MND_TRNAS_SYNC_TIMEOUT TAOS_DEF_ERROR_CODE(0, 0x03D7)
|
#define TSDB_CODE_MND_TRANS_SYNC_TIMEOUT TAOS_DEF_ERROR_CODE(0, 0x03D7)
|
||||||
#define TSDB_CODE_MND_TRANS_UNKNOW_ERROR TAOS_DEF_ERROR_CODE(0, 0x03DF)
|
#define TSDB_CODE_MND_TRANS_UNKNOW_ERROR TAOS_DEF_ERROR_CODE(0, 0x03DF)
|
||||||
|
|
||||||
// mnode-mq
|
// mnode-mq
|
||||||
|
|
|
@ -80,5 +80,4 @@ fi
|
||||||
|
|
||||||
# there can not libtaos.so*, otherwise ln -s error
|
# there can not libtaos.so*, otherwise ln -s error
|
||||||
${csudo}rm -f ${install_main_dir}/driver/libtaos.* || :
|
${csudo}rm -f ${install_main_dir}/driver/libtaos.* || :
|
||||||
[ -f ${install_main_dir}/driver/librocksdb.* ] && ${csudo}rm -f ${install_main_dir}/driver/librocksdb.* || :
|
|
||||||
[ -f ${install_main_dir}/driver/libtaosws.so ] && ${csudo}rm -f ${install_main_dir}/driver/libtaosws.so || :
|
[ -f ${install_main_dir}/driver/libtaosws.so ] && ${csudo}rm -f ${install_main_dir}/driver/libtaosws.so || :
|
||||||
|
|
|
@ -40,7 +40,6 @@ else
|
||||||
${csudo}rm -f ${inc_link_dir}/taosudf.h || :
|
${csudo}rm -f ${inc_link_dir}/taosudf.h || :
|
||||||
[ -f ${inc_link_dir}/taosws.h ] && ${csudo}rm -f ${inc_link_dir}/taosws.h || :
|
[ -f ${inc_link_dir}/taosws.h ] && ${csudo}rm -f ${inc_link_dir}/taosws.h || :
|
||||||
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
|
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
|
||||||
[ -f ${lib_link_dir}/librocksdb.* ] && ${csudo}rm -f ${lib_link_dir}/librocksdb.* || :
|
|
||||||
[ -f ${lib_link_dir}/libtaosws.so ] && ${csudo}rm -f ${lib_link_dir}/libtaosws.so || :
|
[ -f ${lib_link_dir}/libtaosws.so ] && ${csudo}rm -f ${lib_link_dir}/libtaosws.so || :
|
||||||
|
|
||||||
${csudo}rm -f ${log_link_dir} || :
|
${csudo}rm -f ${log_link_dir} || :
|
||||||
|
|
|
@ -31,7 +31,6 @@ cd ${pkg_dir}
|
||||||
|
|
||||||
libfile="libtaos.so.${tdengine_ver}"
|
libfile="libtaos.so.${tdengine_ver}"
|
||||||
wslibfile="libtaosws.so"
|
wslibfile="libtaosws.so"
|
||||||
rocksdblib="librocksdb.so.8"
|
|
||||||
|
|
||||||
# create install dir
|
# create install dir
|
||||||
install_home_path="/usr/local/taos"
|
install_home_path="/usr/local/taos"
|
||||||
|
@ -95,7 +94,6 @@ fi
|
||||||
|
|
||||||
cp ${compile_dir}/build/bin/taos ${pkg_dir}${install_home_path}/bin
|
cp ${compile_dir}/build/bin/taos ${pkg_dir}${install_home_path}/bin
|
||||||
cp ${compile_dir}/build/lib/${libfile} ${pkg_dir}${install_home_path}/driver
|
cp ${compile_dir}/build/lib/${libfile} ${pkg_dir}${install_home_path}/driver
|
||||||
[ -f ${compile_dir}/build/lib/${rocksdblib} ] && cp ${compile_dir}/build/lib/${rocksdblib} ${pkg_dir}${install_home_path}/driver ||:
|
|
||||||
[ -f ${compile_dir}/build/lib/${wslibfile} ] && cp ${compile_dir}/build/lib/${wslibfile} ${pkg_dir}${install_home_path}/driver ||:
|
[ -f ${compile_dir}/build/lib/${wslibfile} ] && cp ${compile_dir}/build/lib/${wslibfile} ${pkg_dir}${install_home_path}/driver ||:
|
||||||
cp ${compile_dir}/../include/client/taos.h ${pkg_dir}${install_home_path}/include
|
cp ${compile_dir}/../include/client/taos.h ${pkg_dir}${install_home_path}/include
|
||||||
cp ${compile_dir}/../include/common/taosdef.h ${pkg_dir}${install_home_path}/include
|
cp ${compile_dir}/../include/common/taosdef.h ${pkg_dir}${install_home_path}/include
|
||||||
|
|
|
@ -45,7 +45,6 @@ echo buildroot: %{buildroot}
|
||||||
|
|
||||||
libfile="libtaos.so.%{_version}"
|
libfile="libtaos.so.%{_version}"
|
||||||
wslibfile="libtaosws.so"
|
wslibfile="libtaosws.so"
|
||||||
rocksdblib="librocksdb.so.8"
|
|
||||||
|
|
||||||
# create install path, and cp file
|
# create install path, and cp file
|
||||||
mkdir -p %{buildroot}%{homepath}/bin
|
mkdir -p %{buildroot}%{homepath}/bin
|
||||||
|
@ -93,7 +92,6 @@ if [ -f %{_compiledir}/build/bin/taosadapter ]; then
|
||||||
fi
|
fi
|
||||||
cp %{_compiledir}/build/lib/${libfile} %{buildroot}%{homepath}/driver
|
cp %{_compiledir}/build/lib/${libfile} %{buildroot}%{homepath}/driver
|
||||||
[ -f %{_compiledir}/build/lib/${wslibfile} ] && cp %{_compiledir}/build/lib/${wslibfile} %{buildroot}%{homepath}/driver ||:
|
[ -f %{_compiledir}/build/lib/${wslibfile} ] && cp %{_compiledir}/build/lib/${wslibfile} %{buildroot}%{homepath}/driver ||:
|
||||||
[ -f %{_compiledir}/build/lib/${rocksdblib} ] && cp %{_compiledir}/build/lib/${rocksdblib} %{buildroot}%{homepath}/driver ||:
|
|
||||||
cp %{_compiledir}/../include/client/taos.h %{buildroot}%{homepath}/include
|
cp %{_compiledir}/../include/client/taos.h %{buildroot}%{homepath}/include
|
||||||
cp %{_compiledir}/../include/common/taosdef.h %{buildroot}%{homepath}/include
|
cp %{_compiledir}/../include/common/taosdef.h %{buildroot}%{homepath}/include
|
||||||
cp %{_compiledir}/../include/util/taoserror.h %{buildroot}%{homepath}/include
|
cp %{_compiledir}/../include/util/taoserror.h %{buildroot}%{homepath}/include
|
||||||
|
@ -176,7 +174,6 @@ fi
|
||||||
|
|
||||||
# there can not libtaos.so*, otherwise ln -s error
|
# there can not libtaos.so*, otherwise ln -s error
|
||||||
${csudo}rm -f %{homepath}/driver/libtaos* || :
|
${csudo}rm -f %{homepath}/driver/libtaos* || :
|
||||||
${csudo}rm -f %{homepath}/driver/librocksdb* || :
|
|
||||||
|
|
||||||
#Scripts executed after installation
|
#Scripts executed after installation
|
||||||
%post
|
%post
|
||||||
|
@ -222,7 +219,6 @@ if [ $1 -eq 0 ];then
|
||||||
${csudo}rm -f ${inc_link_dir}/taoserror.h || :
|
${csudo}rm -f ${inc_link_dir}/taoserror.h || :
|
||||||
${csudo}rm -f ${inc_link_dir}/taosudf.h || :
|
${csudo}rm -f ${inc_link_dir}/taosudf.h || :
|
||||||
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
|
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
|
||||||
${csudo}rm -f ${lib_link_dir}/librocksdb.* || :
|
|
||||||
|
|
||||||
${csudo}rm -f ${log_link_dir} || :
|
${csudo}rm -f ${log_link_dir} || :
|
||||||
${csudo}rm -f ${data_link_dir} || :
|
${csudo}rm -f ${data_link_dir} || :
|
||||||
|
|
|
@ -250,30 +250,18 @@ function install_lib() {
|
||||||
# Remove links
|
# Remove links
|
||||||
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
|
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
|
||||||
${csudo}rm -f ${lib64_link_dir}/libtaos.* || :
|
${csudo}rm -f ${lib64_link_dir}/libtaos.* || :
|
||||||
${csudo}rm -f ${lib_link_dir}/librocksdb.* || :
|
|
||||||
${csudo}rm -f ${lib64_link_dir}/librocksdb.* || :
|
|
||||||
#${csudo}rm -rf ${v15_java_app_dir} || :
|
#${csudo}rm -rf ${v15_java_app_dir} || :
|
||||||
${csudo}cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo}chmod 777 ${install_main_dir}/driver/*
|
${csudo}cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo}chmod 777 ${install_main_dir}/driver/*
|
||||||
|
|
||||||
${csudo}ln -sf ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1
|
${csudo}ln -sf ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1
|
||||||
${csudo}ln -sf ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so
|
${csudo}ln -sf ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so
|
||||||
|
|
||||||
${csudo}ln -sf ${install_main_dir}/driver/librocksdb.* ${lib_link_dir}/librocksdb.so.8
|
|
||||||
${csudo}ln -sf ${lib_link_dir}/librocksdb.so.8 ${lib_link_dir}/librocksdb.so
|
|
||||||
|
|
||||||
${csudo}ln -sf ${install_main_dir}/driver/librocksdb.* ${lib_link_dir}/librocksdb.so.8
|
|
||||||
${csudo}ln -sf ${lib_link_dir}/librocksdb.so.8 ${lib_link_dir}/librocksdb.so
|
|
||||||
|
|
||||||
|
|
||||||
[ -f ${install_main_dir}/driver/libtaosws.so ] && ${csudo}ln -sf ${install_main_dir}/driver/libtaosws.so ${lib_link_dir}/libtaosws.so || :
|
[ -f ${install_main_dir}/driver/libtaosws.so ] && ${csudo}ln -sf ${install_main_dir}/driver/libtaosws.so ${lib_link_dir}/libtaosws.so || :
|
||||||
|
|
||||||
if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.so ]]; then
|
if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.so ]]; then
|
||||||
${csudo}ln -sf ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || :
|
${csudo}ln -sf ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || :
|
||||||
${csudo}ln -sf ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || :
|
${csudo}ln -sf ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || :
|
||||||
|
|
||||||
${csudo}ln -sf ${install_main_dir}/driver/librocksdb.* ${lib64_link_dir}/librocksdb.so.8 || :
|
|
||||||
${csudo}ln -sf ${lib64_link_dir}/librocksdb.so.8 ${lib64_link_dir}/librocksdb.so || :
|
|
||||||
|
|
||||||
[ -f ${install_main_dir}/libtaosws.so ] && ${csudo}ln -sf ${install_main_dir}/libtaosws.so ${lib64_link_dir}/libtaosws.so || :
|
[ -f ${install_main_dir}/libtaosws.so ] && ${csudo}ln -sf ${install_main_dir}/libtaosws.so ${lib64_link_dir}/libtaosws.so || :
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
|
@ -111,11 +111,9 @@ fi
|
||||||
if [ "$osType" == "Darwin" ]; then
|
if [ "$osType" == "Darwin" ]; then
|
||||||
lib_files="${build_dir}/lib/libtaos.${version}.dylib"
|
lib_files="${build_dir}/lib/libtaos.${version}.dylib"
|
||||||
wslib_files="${build_dir}/lib/libtaosws.dylib"
|
wslib_files="${build_dir}/lib/libtaosws.dylib"
|
||||||
rocksdb_lib_files="${build_dir}/lib/librocksdb.dylib.8.1.1"
|
|
||||||
else
|
else
|
||||||
lib_files="${build_dir}/lib/libtaos.so.${version}"
|
lib_files="${build_dir}/lib/libtaos.so.${version}"
|
||||||
wslib_files="${build_dir}/lib/libtaosws.so"
|
wslib_files="${build_dir}/lib/libtaosws.so"
|
||||||
rocksdb_lib_files="${build_dir}/lib/librocksdb.so.8.1.1"
|
|
||||||
fi
|
fi
|
||||||
header_files="${code_dir}/include/client/taos.h ${code_dir}/include/common/taosdef.h ${code_dir}/include/util/taoserror.h ${code_dir}/include/libs/function/taosudf.h"
|
header_files="${code_dir}/include/client/taos.h ${code_dir}/include/common/taosdef.h ${code_dir}/include/util/taoserror.h ${code_dir}/include/libs/function/taosudf.h"
|
||||||
|
|
||||||
|
@ -338,7 +336,6 @@ fi
|
||||||
# Copy driver
|
# Copy driver
|
||||||
mkdir -p ${install_dir}/driver && cp ${lib_files} ${install_dir}/driver && echo "${versionComp}" >${install_dir}/driver/vercomp.txt
|
mkdir -p ${install_dir}/driver && cp ${lib_files} ${install_dir}/driver && echo "${versionComp}" >${install_dir}/driver/vercomp.txt
|
||||||
[ -f ${wslib_files} ] && cp ${wslib_files} ${install_dir}/driver || :
|
[ -f ${wslib_files} ] && cp ${wslib_files} ${install_dir}/driver || :
|
||||||
[ -f ${rocksdb_lib_files} ] && cp ${rocksdb_lib_files} ${install_dir}/driver || :
|
|
||||||
|
|
||||||
# Copy connector
|
# Copy connector
|
||||||
if [ "$verMode" == "cluster" ]; then
|
if [ "$verMode" == "cluster" ]; then
|
||||||
|
|
|
@ -202,19 +202,10 @@ function install_lib() {
|
||||||
log_print "start install lib from ${lib_dir} to ${lib_link_dir}"
|
log_print "start install lib from ${lib_dir} to ${lib_link_dir}"
|
||||||
${csudo}rm -f ${lib_link_dir}/libtaos* || :
|
${csudo}rm -f ${lib_link_dir}/libtaos* || :
|
||||||
${csudo}rm -f ${lib64_link_dir}/libtaos* || :
|
${csudo}rm -f ${lib64_link_dir}/libtaos* || :
|
||||||
|
|
||||||
#rocksdb
|
|
||||||
[ -f ${lib_link_dir}/librocksdb* ] && ${csudo}rm -f ${lib_link_dir}/librocksdb* || :
|
|
||||||
[ -f ${lib64_link_dir}/librocksdb* ] && ${csudo}rm -f ${lib64_link_dir}/librocksdb* || :
|
|
||||||
|
|
||||||
#rocksdb
|
|
||||||
[ -f ${lib_link_dir}/librocksdb* ] && ${csudo}rm -f ${lib_link_dir}/librocksdb* || :
|
|
||||||
[ -f ${lib64_link_dir}/librocksdb* ] && ${csudo}rm -f ${lib64_link_dir}/librocksdb* || :
|
|
||||||
|
|
||||||
[ -f ${lib_link_dir}/libtaosws.${lib_file_ext} ] && ${csudo}rm -f ${lib_link_dir}/libtaosws.${lib_file_ext} || :
|
[ -f ${lib_link_dir}/libtaosws.${lib_file_ext} ] && ${csudo}rm -f ${lib_link_dir}/libtaosws.${lib_file_ext} || :
|
||||||
[ -f ${lib64_link_dir}/libtaosws.${lib_file_ext} ] && ${csudo}rm -f ${lib64_link_dir}/libtaosws.${lib_file_ext} || :
|
[ -f ${lib64_link_dir}/libtaosws.${lib_file_ext} ] && ${csudo}rm -f ${lib64_link_dir}/libtaosws.${lib_file_ext} || :
|
||||||
|
|
||||||
${csudo}ln -s ${lib_dir}/librocksdb.* ${lib_link_dir}/librocksdb.${lib_file_ext_1} 2>>${install_log_path} || return 1
|
|
||||||
${csudo}ln -s ${lib_dir}/libtaos.* ${lib_link_dir}/libtaos.${lib_file_ext_1} 2>>${install_log_path} || return 1
|
${csudo}ln -s ${lib_dir}/libtaos.* ${lib_link_dir}/libtaos.${lib_file_ext_1} 2>>${install_log_path} || return 1
|
||||||
${csudo}ln -s ${lib_link_dir}/libtaos.${lib_file_ext_1} ${lib_link_dir}/libtaos.${lib_file_ext} 2>>${install_log_path} || return 1
|
${csudo}ln -s ${lib_link_dir}/libtaos.${lib_file_ext_1} ${lib_link_dir}/libtaos.${lib_file_ext} 2>>${install_log_path} || return 1
|
||||||
|
|
||||||
|
@ -223,7 +214,6 @@ function install_lib() {
|
||||||
if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.${lib_file_ext} ]]; then
|
if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.${lib_file_ext} ]]; then
|
||||||
${csudo}ln -s ${lib_dir}/libtaos.* ${lib64_link_dir}/libtaos.${lib_file_ext_1} 2>>${install_log_path} || return 1
|
${csudo}ln -s ${lib_dir}/libtaos.* ${lib64_link_dir}/libtaos.${lib_file_ext_1} 2>>${install_log_path} || return 1
|
||||||
${csudo}ln -s ${lib64_link_dir}/libtaos.${lib_file_ext_1} ${lib64_link_dir}/libtaos.${lib_file_ext} 2>>${install_log_path} || return 1
|
${csudo}ln -s ${lib64_link_dir}/libtaos.${lib_file_ext_1} ${lib64_link_dir}/libtaos.${lib_file_ext} 2>>${install_log_path} || return 1
|
||||||
${csudo}ln -s ${lib_dir}/librocksdb.* ${lib64_link_dir}/librocksdb.${lib_file_ext_1} 2>>${install_log_path} || return 1
|
|
||||||
|
|
||||||
[ -f ${lib_dir}/libtaosws.${lib_file_ext} ] && ${csudo}ln -sf ${lib_dir}/libtaosws.${lib_file_ext} ${lib64_link_dir}/libtaosws.${lib_file_ext} 2>>${install_log_path}
|
[ -f ${lib_dir}/libtaosws.${lib_file_ext} ] && ${csudo}ln -sf ${lib_dir}/libtaosws.${lib_file_ext} ${lib64_link_dir}/libtaosws.${lib_file_ext} 2>>${install_log_path}
|
||||||
fi
|
fi
|
||||||
|
|
|
@ -142,11 +142,9 @@ function clean_local_bin() {
|
||||||
function clean_lib() {
|
function clean_lib() {
|
||||||
# Remove link
|
# Remove link
|
||||||
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
|
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
|
||||||
${csudo}rm -f ${lib_link_dir}/librocksdb.* || :
|
|
||||||
[ -f ${lib_link_dir}/libtaosws.* ] && ${csudo}rm -f ${lib_link_dir}/libtaosws.* || :
|
[ -f ${lib_link_dir}/libtaosws.* ] && ${csudo}rm -f ${lib_link_dir}/libtaosws.* || :
|
||||||
|
|
||||||
${csudo}rm -f ${lib64_link_dir}/libtaos.* || :
|
${csudo}rm -f ${lib64_link_dir}/libtaos.* || :
|
||||||
${csudo}rm -f ${lib64_link_dir}/librocksdb.* || :
|
|
||||||
[ -f ${lib64_link_dir}/libtaosws.* ] && ${csudo}rm -f ${lib64_link_dir}/libtaosws.* || :
|
[ -f ${lib64_link_dir}/libtaosws.* ] && ${csudo}rm -f ${lib64_link_dir}/libtaosws.* || :
|
||||||
#${csudo}rm -rf ${v15_java_app_dir} || :
|
#${csudo}rm -rf ${v15_java_app_dir} || :
|
||||||
|
|
||||||
|
|
|
@ -2465,19 +2465,31 @@ _end:
|
||||||
}
|
}
|
||||||
|
|
||||||
char* buildCtbNameByGroupId(const char* stbFullName, uint64_t groupId) {
|
char* buildCtbNameByGroupId(const char* stbFullName, uint64_t groupId) {
|
||||||
if (stbFullName[0] == 0) {
|
char* pBuf = taosMemoryCalloc(1, TSDB_TABLE_NAME_LEN + 1);
|
||||||
|
if (!pBuf) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
int32_t code = buildCtbNameByGroupIdImpl(stbFullName, groupId, pBuf);
|
||||||
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
taosMemoryFree(pBuf);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
return pBuf;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t buildCtbNameByGroupIdImpl(const char* stbFullName, uint64_t groupId, char* cname) {
|
||||||
|
if (stbFullName[0] == 0) {
|
||||||
|
return TSDB_CODE_FAILED;
|
||||||
|
}
|
||||||
|
|
||||||
SArray* tags = taosArrayInit(0, sizeof(SSmlKv));
|
SArray* tags = taosArrayInit(0, sizeof(SSmlKv));
|
||||||
if (tags == NULL) {
|
if (tags == NULL) {
|
||||||
return NULL;
|
return TSDB_CODE_FAILED;
|
||||||
}
|
}
|
||||||
|
|
||||||
void* cname = taosMemoryCalloc(1, TSDB_TABLE_NAME_LEN + 1);
|
|
||||||
if (cname == NULL) {
|
if (cname == NULL) {
|
||||||
taosArrayDestroy(tags);
|
taosArrayDestroy(tags);
|
||||||
return NULL;
|
return TSDB_CODE_FAILED;
|
||||||
}
|
}
|
||||||
|
|
||||||
SSmlKv pTag = {.key = "group_id",
|
SSmlKv pTag = {.key = "group_id",
|
||||||
|
@ -2499,9 +2511,9 @@ char* buildCtbNameByGroupId(const char* stbFullName, uint64_t groupId) {
|
||||||
taosArrayDestroy(tags);
|
taosArrayDestroy(tags);
|
||||||
|
|
||||||
if ((rname.ctbShortName && rname.ctbShortName[0]) == 0) {
|
if ((rname.ctbShortName && rname.ctbShortName[0]) == 0) {
|
||||||
return NULL;
|
return TSDB_CODE_FAILED;
|
||||||
}
|
}
|
||||||
return rname.ctbShortName;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t blockEncode(const SSDataBlock* pBlock, char* data, int32_t numOfCols) {
|
int32_t blockEncode(const SSDataBlock* pBlock, char* data, int32_t numOfCols) {
|
||||||
|
|
|
@ -108,7 +108,7 @@ typedef enum {
|
||||||
TRN_STAGE_UNDO_ACTION = 3,
|
TRN_STAGE_UNDO_ACTION = 3,
|
||||||
TRN_STAGE_COMMIT = 4,
|
TRN_STAGE_COMMIT = 4,
|
||||||
TRN_STAGE_COMMIT_ACTION = 5,
|
TRN_STAGE_COMMIT_ACTION = 5,
|
||||||
TRN_STAGE_FINISHED = 6,
|
TRN_STAGE_FINISH = 6,
|
||||||
TRN_STAGE_PRE_FINISH = 7
|
TRN_STAGE_PRE_FINISH = 7
|
||||||
} ETrnStage;
|
} ETrnStage;
|
||||||
|
|
||||||
|
@ -157,6 +157,7 @@ typedef struct {
|
||||||
void* rpcRsp;
|
void* rpcRsp;
|
||||||
int32_t rpcRspLen;
|
int32_t rpcRspLen;
|
||||||
int32_t redoActionPos;
|
int32_t redoActionPos;
|
||||||
|
SArray* prepareActions;
|
||||||
SArray* redoActions;
|
SArray* redoActions;
|
||||||
SArray* undoActions;
|
SArray* undoActions;
|
||||||
SArray* commitActions;
|
SArray* commitActions;
|
||||||
|
|
|
@ -70,6 +70,7 @@ int32_t mndTransAppendRedolog(STrans *pTrans, SSdbRaw *pRaw);
|
||||||
int32_t mndTransAppendUndolog(STrans *pTrans, SSdbRaw *pRaw);
|
int32_t mndTransAppendUndolog(STrans *pTrans, SSdbRaw *pRaw);
|
||||||
int32_t mndTransAppendCommitlog(STrans *pTrans, SSdbRaw *pRaw);
|
int32_t mndTransAppendCommitlog(STrans *pTrans, SSdbRaw *pRaw);
|
||||||
int32_t mndTransAppendNullLog(STrans *pTrans);
|
int32_t mndTransAppendNullLog(STrans *pTrans);
|
||||||
|
int32_t mndTransAppendPrepareAction(STrans *pTrans, STransAction *pAction);
|
||||||
int32_t mndTransAppendRedoAction(STrans *pTrans, STransAction *pAction);
|
int32_t mndTransAppendRedoAction(STrans *pTrans, STransAction *pAction);
|
||||||
int32_t mndTransAppendUndoAction(STrans *pTrans, STransAction *pAction);
|
int32_t mndTransAppendUndoAction(STrans *pTrans, STransAction *pAction);
|
||||||
void mndTransSetRpcRsp(STrans *pTrans, void *pCont, int32_t contLen);
|
void mndTransSetRpcRsp(STrans *pTrans, void *pCont, int32_t contLen);
|
||||||
|
@ -78,15 +79,23 @@ void mndTransSetDbName(STrans *pTrans, const char *dbname, const char *stbnam
|
||||||
void mndTransSetSerial(STrans *pTrans);
|
void mndTransSetSerial(STrans *pTrans);
|
||||||
void mndTransSetParallel(STrans *pTrans);
|
void mndTransSetParallel(STrans *pTrans);
|
||||||
void mndTransSetOper(STrans *pTrans, EOperType oper);
|
void mndTransSetOper(STrans *pTrans, EOperType oper);
|
||||||
int32_t mndTrancCheckConflict(SMnode *pMnode, STrans *pTrans);
|
int32_t mndTransCheckConflict(SMnode *pMnode, STrans *pTrans);
|
||||||
|
static int32_t mndTrancCheckConflict(SMnode *pMnode, STrans *pTrans) {
|
||||||
|
return mndTransCheckConflict(pMnode, pTrans);
|
||||||
|
}
|
||||||
int32_t mndTransPrepare(SMnode *pMnode, STrans *pTrans);
|
int32_t mndTransPrepare(SMnode *pMnode, STrans *pTrans);
|
||||||
int32_t mndTransProcessRsp(SRpcMsg *pRsp);
|
int32_t mndTransProcessRsp(SRpcMsg *pRsp);
|
||||||
void mndTransPullup(SMnode *pMnode);
|
void mndTransPullup(SMnode *pMnode);
|
||||||
int32_t mndKillTrans(SMnode *pMnode, STrans *pTrans);
|
int32_t mndKillTrans(SMnode *pMnode, STrans *pTrans);
|
||||||
void mndTransExecute(SMnode *pMnode, STrans *pTrans, bool isLeader);
|
void mndTransExecute(SMnode *pMnode, STrans *pTrans);
|
||||||
|
void mndTransRefresh(SMnode *pMnode, STrans *pTrans);
|
||||||
int32_t mndSetRpcInfoForDbTrans(SMnode *pMnode, SRpcMsg *pMsg, EOperType oper, const char *dbname);
|
int32_t mndSetRpcInfoForDbTrans(SMnode *pMnode, SRpcMsg *pMsg, EOperType oper, const char *dbname);
|
||||||
|
|
||||||
|
SSdbRaw *mndTransEncode(STrans *pTrans);
|
||||||
|
SSdbRow *mndTransDecode(SSdbRaw *pRaw);
|
||||||
|
void mndTransDropData(STrans *pTrans);
|
||||||
|
|
||||||
|
bool mndTransPerformPrepareStage(SMnode *pMnode, STrans *pTrans);
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -27,6 +27,7 @@ void mndCleanupVgroup(SMnode *pMnode);
|
||||||
SVgObj *mndAcquireVgroup(SMnode *pMnode, int32_t vgId);
|
SVgObj *mndAcquireVgroup(SMnode *pMnode, int32_t vgId);
|
||||||
void mndReleaseVgroup(SMnode *pMnode, SVgObj *pVgroup);
|
void mndReleaseVgroup(SMnode *pMnode, SVgObj *pVgroup);
|
||||||
SSdbRaw *mndVgroupActionEncode(SVgObj *pVgroup);
|
SSdbRaw *mndVgroupActionEncode(SVgObj *pVgroup);
|
||||||
|
SSdbRow *mndVgroupActionDecode(SSdbRaw *pRaw);
|
||||||
SEpSet mndGetVgroupEpset(SMnode *pMnode, const SVgObj *pVgroup);
|
SEpSet mndGetVgroupEpset(SMnode *pMnode, const SVgObj *pVgroup);
|
||||||
int32_t mndGetVnodesNum(SMnode *pMnode, int32_t dnodeId);
|
int32_t mndGetVnodesNum(SMnode *pMnode, int32_t dnodeId);
|
||||||
void mndSortVnodeGid(SVgObj *pVgroup);
|
void mndSortVnodeGid(SVgObj *pVgroup);
|
||||||
|
@ -36,6 +37,7 @@ int64_t mndGetVgroupMemory(SMnode *pMnode, SDbObj *pDb, SVgObj *pVgroup);
|
||||||
SArray *mndBuildDnodesArray(SMnode *, int32_t exceptDnodeId);
|
SArray *mndBuildDnodesArray(SMnode *, int32_t exceptDnodeId);
|
||||||
int32_t mndAllocSmaVgroup(SMnode *, SDbObj *pDb, SVgObj *pVgroup);
|
int32_t mndAllocSmaVgroup(SMnode *, SDbObj *pDb, SVgObj *pVgroup);
|
||||||
int32_t mndAllocVgroup(SMnode *, SDbObj *pDb, SVgObj **ppVgroups);
|
int32_t mndAllocVgroup(SMnode *, SDbObj *pDb, SVgObj **ppVgroups);
|
||||||
|
int32_t mndAddPrepareNewVgAction(SMnode *, STrans *pTrans, SVgObj *pVg);
|
||||||
int32_t mndAddCreateVnodeAction(SMnode *, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup, SVnodeGid *pVgid);
|
int32_t mndAddCreateVnodeAction(SMnode *, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup, SVnodeGid *pVgid);
|
||||||
int32_t mndAddAlterVnodeConfirmAction(SMnode *, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup);
|
int32_t mndAddAlterVnodeConfirmAction(SMnode *, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup);
|
||||||
int32_t mndAddAlterVnodeAction(SMnode *, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup, tmsg_t msgType);
|
int32_t mndAddAlterVnodeAction(SMnode *, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup, tmsg_t msgType);
|
||||||
|
|
|
@ -414,6 +414,13 @@ static void mndSetDefaultDbCfg(SDbCfg *pCfg) {
|
||||||
if (pCfg->tsdbPageSize <= 0) pCfg->tsdbPageSize = TSDB_DEFAULT_TSDB_PAGESIZE;
|
if (pCfg->tsdbPageSize <= 0) pCfg->tsdbPageSize = TSDB_DEFAULT_TSDB_PAGESIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int32_t mndSetPrepareNewVgActions(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroups) {
|
||||||
|
for (int32_t v = 0; v < pDb->cfg.numOfVgroups; ++v) {
|
||||||
|
if (mndAddPrepareNewVgAction(pMnode, pTrans, (pVgroups + v)) != 0) return -1;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int32_t mndSetCreateDbRedoLogs(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroups) {
|
static int32_t mndSetCreateDbRedoLogs(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroups) {
|
||||||
SSdbRaw *pDbRaw = mndDbActionEncode(pDb);
|
SSdbRaw *pDbRaw = mndDbActionEncode(pDb);
|
||||||
if (pDbRaw == NULL) return -1;
|
if (pDbRaw == NULL) return -1;
|
||||||
|
@ -424,7 +431,7 @@ static int32_t mndSetCreateDbRedoLogs(SMnode *pMnode, STrans *pTrans, SDbObj *pD
|
||||||
SSdbRaw *pVgRaw = mndVgroupActionEncode(pVgroups + v);
|
SSdbRaw *pVgRaw = mndVgroupActionEncode(pVgroups + v);
|
||||||
if (pVgRaw == NULL) return -1;
|
if (pVgRaw == NULL) return -1;
|
||||||
if (mndTransAppendRedolog(pTrans, pVgRaw) != 0) return -1;
|
if (mndTransAppendRedolog(pTrans, pVgRaw) != 0) return -1;
|
||||||
if (sdbSetRawStatus(pVgRaw, SDB_STATUS_CREATING) != 0) return -1;
|
if (sdbSetRawStatus(pVgRaw, SDB_STATUS_UPDATE) != 0) return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -589,9 +596,10 @@ static int32_t mndCreateDb(SMnode *pMnode, SRpcMsg *pReq, SCreateDbReq *pCreate,
|
||||||
mInfo("trans:%d, used to create db:%s", pTrans->id, pCreate->db);
|
mInfo("trans:%d, used to create db:%s", pTrans->id, pCreate->db);
|
||||||
|
|
||||||
mndTransSetDbName(pTrans, dbObj.name, NULL);
|
mndTransSetDbName(pTrans, dbObj.name, NULL);
|
||||||
if (mndTrancCheckConflict(pMnode, pTrans) != 0) goto _OVER;
|
if (mndTransCheckConflict(pMnode, pTrans) != 0) goto _OVER;
|
||||||
|
|
||||||
mndTransSetOper(pTrans, MND_OPER_CREATE_DB);
|
mndTransSetOper(pTrans, MND_OPER_CREATE_DB);
|
||||||
|
if (mndSetPrepareNewVgActions(pMnode, pTrans, &dbObj, pVgroups) != 0) goto _OVER;
|
||||||
if (mndSetCreateDbRedoLogs(pMnode, pTrans, &dbObj, pVgroups) != 0) goto _OVER;
|
if (mndSetCreateDbRedoLogs(pMnode, pTrans, &dbObj, pVgroups) != 0) goto _OVER;
|
||||||
if (mndSetCreateDbUndoLogs(pMnode, pTrans, &dbObj, pVgroups) != 0) goto _OVER;
|
if (mndSetCreateDbUndoLogs(pMnode, pTrans, &dbObj, pVgroups) != 0) goto _OVER;
|
||||||
if (mndSetCreateDbCommitLogs(pMnode, pTrans, &dbObj, pVgroups, pNewUserDuped) != 0) goto _OVER;
|
if (mndSetCreateDbCommitLogs(pMnode, pTrans, &dbObj, pVgroups, pNewUserDuped) != 0) goto _OVER;
|
||||||
|
@ -832,7 +840,7 @@ static int32_t mndAlterDb(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pOld, SDbObj *p
|
||||||
|
|
||||||
int32_t code = -1;
|
int32_t code = -1;
|
||||||
mndTransSetDbName(pTrans, pOld->name, NULL);
|
mndTransSetDbName(pTrans, pOld->name, NULL);
|
||||||
if (mndTrancCheckConflict(pMnode, pTrans) != 0) goto _OVER;
|
if (mndTransCheckConflict(pMnode, pTrans) != 0) goto _OVER;
|
||||||
|
|
||||||
if (mndSetAlterDbRedoLogs(pMnode, pTrans, pOld, pNew) != 0) goto _OVER;
|
if (mndSetAlterDbRedoLogs(pMnode, pTrans, pOld, pNew) != 0) goto _OVER;
|
||||||
if (mndSetAlterDbCommitLogs(pMnode, pTrans, pOld, pNew) != 0) goto _OVER;
|
if (mndSetAlterDbCommitLogs(pMnode, pTrans, pOld, pNew) != 0) goto _OVER;
|
||||||
|
@ -1129,7 +1137,7 @@ static int32_t mndDropDb(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb) {
|
||||||
mInfo("trans:%d start to drop db:%s", pTrans->id, pDb->name);
|
mInfo("trans:%d start to drop db:%s", pTrans->id, pDb->name);
|
||||||
|
|
||||||
mndTransSetDbName(pTrans, pDb->name, NULL);
|
mndTransSetDbName(pTrans, pDb->name, NULL);
|
||||||
if (mndTrancCheckConflict(pMnode, pTrans) != 0) {
|
if (mndTransCheckConflict(pMnode, pTrans) != 0) {
|
||||||
goto _OVER;
|
goto _OVER;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -632,7 +632,7 @@ static int32_t mndCreateDnode(SMnode *pMnode, SRpcMsg *pReq, SCreateDnodeReq *pC
|
||||||
pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_GLOBAL, pReq, "create-dnode");
|
pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_GLOBAL, pReq, "create-dnode");
|
||||||
if (pTrans == NULL) goto _OVER;
|
if (pTrans == NULL) goto _OVER;
|
||||||
mInfo("trans:%d, used to create dnode:%s", pTrans->id, dnodeObj.ep);
|
mInfo("trans:%d, used to create dnode:%s", pTrans->id, dnodeObj.ep);
|
||||||
if (mndTrancCheckConflict(pMnode, pTrans) != 0) goto _OVER;
|
if (mndTransCheckConflict(pMnode, pTrans) != 0) goto _OVER;
|
||||||
|
|
||||||
pRaw = mndDnodeActionEncode(&dnodeObj);
|
pRaw = mndDnodeActionEncode(&dnodeObj);
|
||||||
if (pRaw == NULL || mndTransAppendCommitlog(pTrans, pRaw) != 0) goto _OVER;
|
if (pRaw == NULL || mndTransAppendCommitlog(pTrans, pRaw) != 0) goto _OVER;
|
||||||
|
@ -889,7 +889,7 @@ static int32_t mndDropDnode(SMnode *pMnode, SRpcMsg *pReq, SDnodeObj *pDnode, SM
|
||||||
if (pTrans == NULL) goto _OVER;
|
if (pTrans == NULL) goto _OVER;
|
||||||
mndTransSetSerial(pTrans);
|
mndTransSetSerial(pTrans);
|
||||||
mInfo("trans:%d, used to drop dnode:%d, force:%d", pTrans->id, pDnode->id, force);
|
mInfo("trans:%d, used to drop dnode:%d, force:%d", pTrans->id, pDnode->id, force);
|
||||||
if (mndTrancCheckConflict(pMnode, pTrans) != 0) goto _OVER;
|
if (mndTransCheckConflict(pMnode, pTrans) != 0) goto _OVER;
|
||||||
|
|
||||||
pRaw = mndDnodeActionEncode(pDnode);
|
pRaw = mndDnodeActionEncode(pDnode);
|
||||||
if (pRaw == NULL) goto _OVER;
|
if (pRaw == NULL) goto _OVER;
|
||||||
|
|
|
@ -645,7 +645,7 @@ int32_t mndAddIndexImpl(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SStbObj *pSt
|
||||||
|
|
||||||
// mInfo("trans:%d, used to add index to stb:%s", pTrans->id, pStb->name);
|
// mInfo("trans:%d, used to add index to stb:%s", pTrans->id, pStb->name);
|
||||||
mndTransSetDbName(pTrans, pDb->name, pStb->name);
|
mndTransSetDbName(pTrans, pDb->name, pStb->name);
|
||||||
if (mndTrancCheckConflict(pMnode, pTrans) != 0) goto _OVER;
|
if (mndTransCheckConflict(pMnode, pTrans) != 0) goto _OVER;
|
||||||
|
|
||||||
mndTransSetSerial(pTrans);
|
mndTransSetSerial(pTrans);
|
||||||
|
|
||||||
|
@ -721,7 +721,7 @@ static int32_t mndDropIdx(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SIdxObj *p
|
||||||
|
|
||||||
mInfo("trans:%d, used to drop idx:%s", pTrans->id, pIdx->name);
|
mInfo("trans:%d, used to drop idx:%s", pTrans->id, pIdx->name);
|
||||||
mndTransSetDbName(pTrans, pDb->name, NULL);
|
mndTransSetDbName(pTrans, pDb->name, NULL);
|
||||||
if (mndTrancCheckConflict(pMnode, pTrans) != 0) goto _OVER;
|
if (mndTransCheckConflict(pMnode, pTrans) != 0) goto _OVER;
|
||||||
|
|
||||||
mndTransSetSerial(pTrans);
|
mndTransSetSerial(pTrans);
|
||||||
if (mndSetDropIdxRedoLogs(pMnode, pTrans, pIdx) != 0) goto _OVER;
|
if (mndSetDropIdxRedoLogs(pMnode, pTrans, pIdx) != 0) goto _OVER;
|
||||||
|
@ -860,4 +860,4 @@ int32_t mndDropIdxsByDb(SMnode *pMnode, STrans *pTrans, SDbObj *pDb) {
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -578,7 +578,7 @@ static int32_t mndCreateMnode(SMnode *pMnode, SRpcMsg *pReq, SDnodeObj *pDnode,
|
||||||
if (pTrans == NULL) goto _OVER;
|
if (pTrans == NULL) goto _OVER;
|
||||||
mndTransSetSerial(pTrans);
|
mndTransSetSerial(pTrans);
|
||||||
mInfo("trans:%d, used to create mnode:%d", pTrans->id, pCreate->dnodeId);
|
mInfo("trans:%d, used to create mnode:%d", pTrans->id, pCreate->dnodeId);
|
||||||
if (mndTrancCheckConflict(pMnode, pTrans) != 0) goto _OVER;
|
if (mndTransCheckConflict(pMnode, pTrans) != 0) goto _OVER;
|
||||||
|
|
||||||
SMnodeObj mnodeObj = {0};
|
SMnodeObj mnodeObj = {0};
|
||||||
mnodeObj.id = pDnode->id;
|
mnodeObj.id = pDnode->id;
|
||||||
|
@ -732,7 +732,7 @@ static int32_t mndDropMnode(SMnode *pMnode, SRpcMsg *pReq, SMnodeObj *pObj) {
|
||||||
if (pTrans == NULL) goto _OVER;
|
if (pTrans == NULL) goto _OVER;
|
||||||
mndTransSetSerial(pTrans);
|
mndTransSetSerial(pTrans);
|
||||||
mInfo("trans:%d, used to drop mnode:%d", pTrans->id, pObj->id);
|
mInfo("trans:%d, used to drop mnode:%d", pTrans->id, pObj->id);
|
||||||
if (mndTrancCheckConflict(pMnode, pTrans) != 0) goto _OVER;
|
if (mndTransCheckConflict(pMnode, pTrans) != 0) goto _OVER;
|
||||||
|
|
||||||
if (mndSetDropMnodeInfoToTrans(pMnode, pTrans, pObj, false) != 0) goto _OVER;
|
if (mndSetDropMnodeInfoToTrans(pMnode, pTrans, pObj, false) != 0) goto _OVER;
|
||||||
if (mndTransPrepare(pMnode, pTrans) != 0) goto _OVER;
|
if (mndTransPrepare(pMnode, pTrans) != 0) goto _OVER;
|
||||||
|
|
|
@ -388,7 +388,7 @@ static int32_t mndSetCreateSmaVgroupRedoLogs(SMnode *pMnode, STrans *pTrans, SVg
|
||||||
SSdbRaw *pVgRaw = mndVgroupActionEncode(pVgroup);
|
SSdbRaw *pVgRaw = mndVgroupActionEncode(pVgroup);
|
||||||
if (pVgRaw == NULL) return -1;
|
if (pVgRaw == NULL) return -1;
|
||||||
if (mndTransAppendRedolog(pTrans, pVgRaw) != 0) return -1;
|
if (mndTransAppendRedolog(pTrans, pVgRaw) != 0) return -1;
|
||||||
if (sdbSetRawStatus(pVgRaw, SDB_STATUS_CREATING) != 0) return -1;
|
if (sdbSetRawStatus(pVgRaw, SDB_STATUS_UPDATE) != 0) return -1;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -622,11 +622,11 @@ static int32_t mndCreateSma(SMnode *pMnode, SRpcMsg *pReq, SMCreateSmaReq *pCrea
|
||||||
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_DB, pReq, "create-sma");
|
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_DB, pReq, "create-sma");
|
||||||
if (pTrans == NULL) goto _OVER;
|
if (pTrans == NULL) goto _OVER;
|
||||||
mndTransSetDbName(pTrans, pDb->name, NULL);
|
mndTransSetDbName(pTrans, pDb->name, NULL);
|
||||||
if (mndTrancCheckConflict(pMnode, pTrans) != 0) goto _OVER;
|
if (mndTransCheckConflict(pMnode, pTrans) != 0) goto _OVER;
|
||||||
|
|
||||||
mndTransSetSerial(pTrans);
|
mndTransSetSerial(pTrans);
|
||||||
mInfo("trans:%d, used to create sma:%s stream:%s", pTrans->id, pCreate->name, streamObj.name);
|
mInfo("trans:%d, used to create sma:%s stream:%s", pTrans->id, pCreate->name, streamObj.name);
|
||||||
|
if (mndAddPrepareNewVgAction(pMnode, pTrans, &streamObj.fixedSinkVg) != 0) goto _OVER;
|
||||||
if (mndSetCreateSmaRedoLogs(pMnode, pTrans, &smaObj) != 0) goto _OVER;
|
if (mndSetCreateSmaRedoLogs(pMnode, pTrans, &smaObj) != 0) goto _OVER;
|
||||||
if (mndSetCreateSmaVgroupRedoLogs(pMnode, pTrans, &streamObj.fixedSinkVg) != 0) goto _OVER;
|
if (mndSetCreateSmaVgroupRedoLogs(pMnode, pTrans, &streamObj.fixedSinkVg) != 0) goto _OVER;
|
||||||
if (mndSetCreateSmaCommitLogs(pMnode, pTrans, &smaObj) != 0) goto _OVER;
|
if (mndSetCreateSmaCommitLogs(pMnode, pTrans, &smaObj) != 0) goto _OVER;
|
||||||
|
@ -845,7 +845,7 @@ static int32_t mndDropSma(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SSmaObj *p
|
||||||
|
|
||||||
mInfo("trans:%d, used to drop sma:%s", pTrans->id, pSma->name);
|
mInfo("trans:%d, used to drop sma:%s", pTrans->id, pSma->name);
|
||||||
mndTransSetDbName(pTrans, pDb->name, NULL);
|
mndTransSetDbName(pTrans, pDb->name, NULL);
|
||||||
if (mndTrancCheckConflict(pMnode, pTrans) != 0) goto _OVER;
|
if (mndTransCheckConflict(pMnode, pTrans) != 0) goto _OVER;
|
||||||
|
|
||||||
mndTransSetSerial(pTrans);
|
mndTransSetSerial(pTrans);
|
||||||
|
|
||||||
|
|
|
@ -874,7 +874,7 @@ _OVER:
|
||||||
|
|
||||||
int32_t mndAddStbToTrans(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SStbObj *pStb) {
|
int32_t mndAddStbToTrans(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SStbObj *pStb) {
|
||||||
mndTransSetDbName(pTrans, pDb->name, pStb->name);
|
mndTransSetDbName(pTrans, pDb->name, pStb->name);
|
||||||
if (mndTrancCheckConflict(pMnode, pTrans) != 0) return -1;
|
if (mndTransCheckConflict(pMnode, pTrans) != 0) return -1;
|
||||||
if (mndSetCreateStbRedoLogs(pMnode, pTrans, pDb, pStb) != 0) return -1;
|
if (mndSetCreateStbRedoLogs(pMnode, pTrans, pDb, pStb) != 0) return -1;
|
||||||
if (mndSetCreateStbUndoLogs(pMnode, pTrans, pDb, pStb) != 0) return -1;
|
if (mndSetCreateStbUndoLogs(pMnode, pTrans, pDb, pStb) != 0) return -1;
|
||||||
if (mndSetCreateStbCommitLogs(pMnode, pTrans, pDb, pStb) != 0) return -1;
|
if (mndSetCreateStbCommitLogs(pMnode, pTrans, pDb, pStb) != 0) return -1;
|
||||||
|
@ -1968,7 +1968,7 @@ static int32_t mndAlterStbImp(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SStbOb
|
||||||
|
|
||||||
mInfo("trans:%d, used to alter stb:%s", pTrans->id, pStb->name);
|
mInfo("trans:%d, used to alter stb:%s", pTrans->id, pStb->name);
|
||||||
mndTransSetDbName(pTrans, pDb->name, pStb->name);
|
mndTransSetDbName(pTrans, pDb->name, pStb->name);
|
||||||
if (mndTrancCheckConflict(pMnode, pTrans) != 0) goto _OVER;
|
if (mndTransCheckConflict(pMnode, pTrans) != 0) goto _OVER;
|
||||||
|
|
||||||
if (needRsp) {
|
if (needRsp) {
|
||||||
void *pCont = NULL;
|
void *pCont = NULL;
|
||||||
|
@ -1998,7 +1998,7 @@ static int32_t mndAlterStbAndUpdateTagIdxImp(SMnode *pMnode, SRpcMsg *pReq, SDbO
|
||||||
mInfo("trans:%d, used to alter stb:%s", pTrans->id, pStb->name);
|
mInfo("trans:%d, used to alter stb:%s", pTrans->id, pStb->name);
|
||||||
mndTransSetDbName(pTrans, pDb->name, pStb->name);
|
mndTransSetDbName(pTrans, pDb->name, pStb->name);
|
||||||
|
|
||||||
if (mndTrancCheckConflict(pMnode, pTrans) != 0) goto _OVER;
|
if (mndTransCheckConflict(pMnode, pTrans) != 0) goto _OVER;
|
||||||
|
|
||||||
if (needRsp) {
|
if (needRsp) {
|
||||||
void *pCont = NULL;
|
void *pCont = NULL;
|
||||||
|
@ -2242,7 +2242,7 @@ static int32_t mndDropStb(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SStbObj *p
|
||||||
|
|
||||||
mInfo("trans:%d, used to drop stb:%s", pTrans->id, pStb->name);
|
mInfo("trans:%d, used to drop stb:%s", pTrans->id, pStb->name);
|
||||||
mndTransSetDbName(pTrans, pDb->name, pStb->name);
|
mndTransSetDbName(pTrans, pDb->name, pStb->name);
|
||||||
if (mndTrancCheckConflict(pMnode, pTrans) != 0) goto _OVER;
|
if (mndTransCheckConflict(pMnode, pTrans) != 0) goto _OVER;
|
||||||
|
|
||||||
if (mndSetDropStbRedoLogs(pMnode, pTrans, pStb) != 0) goto _OVER;
|
if (mndSetDropStbRedoLogs(pMnode, pTrans, pStb) != 0) goto _OVER;
|
||||||
if (mndSetDropStbCommitLogs(pMnode, pTrans, pStb) != 0) goto _OVER;
|
if (mndSetDropStbCommitLogs(pMnode, pTrans, pStb) != 0) goto _OVER;
|
||||||
|
@ -3298,7 +3298,7 @@ static int32_t mndCheckIndexReq(SCreateTagIndexReq *pReq) {
|
||||||
|
|
||||||
mInfo("trans:%d, used to add index to stb:%s", pTrans->id, pStb->name);
|
mInfo("trans:%d, used to add index to stb:%s", pTrans->id, pStb->name);
|
||||||
mndTransSetDbName(pTrans, pDb->name, pStb->name);
|
mndTransSetDbName(pTrans, pDb->name, pStb->name);
|
||||||
if (mndTrancCheckConflict(pMnode, pTrans) != 0) goto _OVER;
|
if (mndTransCheckConflict(pMnode, pTrans) != 0) goto _OVER;
|
||||||
|
|
||||||
if (mndSetAlterStbRedoLogs(pMnode, pTrans, pDb, pStb) != 0) goto _OVER;
|
if (mndSetAlterStbRedoLogs(pMnode, pTrans, pDb, pStb) != 0) goto _OVER;
|
||||||
if (mndSetAlterStbCommitLogs(pMnode, pTrans, pDb, pStb) != 0) goto _OVER;
|
if (mndSetAlterStbCommitLogs(pMnode, pTrans, pDb, pStb) != 0) goto _OVER;
|
||||||
|
|
|
@ -735,7 +735,7 @@ static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) {
|
||||||
mInfo("trans:%d, used to create stream:%s", pTrans->id, createStreamReq.name);
|
mInfo("trans:%d, used to create stream:%s", pTrans->id, createStreamReq.name);
|
||||||
|
|
||||||
mndTransSetDbName(pTrans, createStreamReq.sourceDB, streamObj.targetDb);
|
mndTransSetDbName(pTrans, createStreamReq.sourceDB, streamObj.targetDb);
|
||||||
if (mndTrancCheckConflict(pMnode, pTrans) != 0) {
|
if (mndTransCheckConflict(pMnode, pTrans) != 0) {
|
||||||
mndTransDrop(pTrans);
|
mndTransDrop(pTrans);
|
||||||
goto _OVER;
|
goto _OVER;
|
||||||
}
|
}
|
||||||
|
@ -890,7 +890,7 @@ static int32_t mndProcessStreamDoCheckpoint(SRpcMsg *pReq) {
|
||||||
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_DB_INSIDE, pReq, "stream-checkpoint");
|
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_DB_INSIDE, pReq, "stream-checkpoint");
|
||||||
if (pTrans == NULL) return -1;
|
if (pTrans == NULL) return -1;
|
||||||
mndTransSetDbName(pTrans, pStream->sourceDb, pStream->targetDb);
|
mndTransSetDbName(pTrans, pStream->sourceDb, pStream->targetDb);
|
||||||
if (mndTrancCheckConflict(pMnode, pTrans) != 0) {
|
if (mndTransCheckConflict(pMnode, pTrans) != 0) {
|
||||||
mndReleaseStream(pMnode, pStream);
|
mndReleaseStream(pMnode, pStream);
|
||||||
mndTransDrop(pTrans);
|
mndTransDrop(pTrans);
|
||||||
return -1;
|
return -1;
|
||||||
|
@ -1001,7 +1001,7 @@ static int32_t mndProcessDropStreamReq(SRpcMsg *pReq) {
|
||||||
mInfo("trans:%d, used to drop stream:%s", pTrans->id, dropReq.name);
|
mInfo("trans:%d, used to drop stream:%s", pTrans->id, dropReq.name);
|
||||||
|
|
||||||
mndTransSetDbName(pTrans, pStream->sourceDb, pStream->targetDb);
|
mndTransSetDbName(pTrans, pStream->sourceDb, pStream->targetDb);
|
||||||
if (mndTrancCheckConflict(pMnode, pTrans) != 0) {
|
if (mndTransCheckConflict(pMnode, pTrans) != 0) {
|
||||||
sdbRelease(pMnode->pSdb, pStream);
|
sdbRelease(pMnode->pSdb, pStream);
|
||||||
mndTransDrop(pTrans);
|
mndTransDrop(pTrans);
|
||||||
return -1;
|
return -1;
|
||||||
|
@ -1369,7 +1369,7 @@ static int32_t mndProcessPauseStreamReq(SRpcMsg *pReq) {
|
||||||
mInfo("trans:%d, used to pause stream:%s", pTrans->id, pauseReq.name);
|
mInfo("trans:%d, used to pause stream:%s", pTrans->id, pauseReq.name);
|
||||||
|
|
||||||
mndTransSetDbName(pTrans, pStream->sourceDb, pStream->targetDb);
|
mndTransSetDbName(pTrans, pStream->sourceDb, pStream->targetDb);
|
||||||
if (mndTrancCheckConflict(pMnode, pTrans) != 0) {
|
if (mndTransCheckConflict(pMnode, pTrans) != 0) {
|
||||||
sdbRelease(pMnode->pSdb, pStream);
|
sdbRelease(pMnode->pSdb, pStream);
|
||||||
mndTransDrop(pTrans);
|
mndTransDrop(pTrans);
|
||||||
return -1;
|
return -1;
|
||||||
|
@ -1477,7 +1477,7 @@ static int32_t mndProcessResumeStreamReq(SRpcMsg *pReq) {
|
||||||
mInfo("trans:%d, used to pause stream:%s", pTrans->id, pauseReq.name);
|
mInfo("trans:%d, used to pause stream:%s", pTrans->id, pauseReq.name);
|
||||||
|
|
||||||
mndTransSetDbName(pTrans, pStream->sourceDb, pStream->targetDb);
|
mndTransSetDbName(pTrans, pStream->sourceDb, pStream->targetDb);
|
||||||
if (mndTrancCheckConflict(pMnode, pTrans) != 0) {
|
if (mndTransCheckConflict(pMnode, pTrans) != 0) {
|
||||||
sdbRelease(pMnode->pSdb, pStream);
|
sdbRelease(pMnode->pSdb, pStream);
|
||||||
mndTransDrop(pTrans);
|
mndTransDrop(pTrans);
|
||||||
return -1;
|
return -1;
|
||||||
|
|
|
@ -489,7 +489,7 @@ static int32_t mndPersistRebResult(SMnode *pMnode, SRpcMsg *pMsg, const SMqRebOu
|
||||||
}
|
}
|
||||||
|
|
||||||
mndTransSetDbName(pTrans, pOutput->pSub->dbName, NULL);
|
mndTransSetDbName(pTrans, pOutput->pSub->dbName, NULL);
|
||||||
if (mndTrancCheckConflict(pMnode, pTrans) != 0) {
|
if (mndTransCheckConflict(pMnode, pTrans) != 0) {
|
||||||
mndTransDrop(pTrans);
|
mndTransDrop(pTrans);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,6 +17,7 @@
|
||||||
#include "mndSync.h"
|
#include "mndSync.h"
|
||||||
#include "mndCluster.h"
|
#include "mndCluster.h"
|
||||||
#include "mndTrans.h"
|
#include "mndTrans.h"
|
||||||
|
#include "mndVgroup.h"
|
||||||
|
|
||||||
static int32_t mndSyncEqCtrlMsg(const SMsgCb *msgcb, SRpcMsg *pMsg) {
|
static int32_t mndSyncEqCtrlMsg(const SMsgCb *msgcb, SRpcMsg *pMsg) {
|
||||||
if (pMsg == NULL || pMsg->pCont == NULL) {
|
if (pMsg == NULL || pMsg->pCont == NULL) {
|
||||||
|
@ -73,76 +74,200 @@ static int32_t mndSyncSendMsg(const SEpSet *pEpSet, SRpcMsg *pMsg) {
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t mndProcessWriteMsg(const SSyncFSM *pFsm, SRpcMsg *pMsg, const SFsmCbMeta *pMeta) {
|
static int32_t mndTransValidatePrepareAction(SMnode *pMnode, STrans *pTrans, STransAction *pAction) {
|
||||||
SMnode *pMnode = pFsm->data;
|
SSdbRow *pRow = NULL;
|
||||||
|
int32_t code = -1;
|
||||||
|
|
||||||
|
if (pAction->msgType == TDMT_MND_CREATE_VG) {
|
||||||
|
pRow = mndVgroupActionDecode(pAction->pRaw);
|
||||||
|
if (pRow == NULL) goto _OUT;
|
||||||
|
|
||||||
|
SVgObj *pVgroup = sdbGetRowObj(pRow);
|
||||||
|
if (pVgroup == NULL) goto _OUT;
|
||||||
|
|
||||||
|
int32_t maxVgId = sdbGetMaxId(pMnode->pSdb, SDB_VGROUP);
|
||||||
|
if (maxVgId > pVgroup->vgId) {
|
||||||
|
mError("trans:%d, failed to satisfy vgroup id %d of prepare action. maxVgId:%d", pTrans->id, pVgroup->vgId,
|
||||||
|
maxVgId);
|
||||||
|
goto _OUT;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
code = 0;
|
||||||
|
_OUT:
|
||||||
|
taosMemoryFreeClear(pRow);
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int32_t mndTransValidatePrepareStage(SMnode *pMnode, STrans *pTrans) {
|
||||||
|
int32_t code = -1;
|
||||||
|
int32_t action = 0;
|
||||||
|
|
||||||
|
int32_t numOfActions = taosArrayGetSize(pTrans->prepareActions);
|
||||||
|
if (numOfActions == 0) {
|
||||||
|
code = 0;
|
||||||
|
goto _OUT;
|
||||||
|
}
|
||||||
|
|
||||||
|
mInfo("trans:%d, validate %d prepare actions.", pTrans->id, numOfActions);
|
||||||
|
|
||||||
|
for (action = 0; action < numOfActions; ++action) {
|
||||||
|
STransAction *pAction = taosArrayGet(pTrans->prepareActions, action);
|
||||||
|
|
||||||
|
if (pAction->actionType != TRANS_ACTION_RAW) {
|
||||||
|
mError("trans:%d, prepare action:%d of unexpected type:%d", pTrans->id, action, pAction->actionType);
|
||||||
|
goto _OUT;
|
||||||
|
}
|
||||||
|
|
||||||
|
code = mndTransValidatePrepareAction(pMnode, pTrans, pAction);
|
||||||
|
if (code != 0) {
|
||||||
|
mError("trans:%d, failed to validate prepare action: %d, numOfActions:%d", pTrans->id, action, numOfActions);
|
||||||
|
goto _OUT;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
code = 0;
|
||||||
|
_OUT:
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int32_t mndTransValidateImp(SMnode *pMnode, STrans *pTrans) {
|
||||||
|
if (pTrans->stage == TRN_STAGE_PREPARE) {
|
||||||
|
if (mndTransCheckConflict(pMnode, pTrans) < 0) {
|
||||||
|
mError("trans:%d, failed to validate trans conflicts.", pTrans->id);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
return mndTransValidatePrepareStage(pMnode, pTrans);
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int32_t mndTransValidate(SMnode *pMnode, SSdbRaw *pRaw) {
|
||||||
|
STrans *pTrans = NULL;
|
||||||
|
int32_t code = -1;
|
||||||
|
|
||||||
|
SSdbRow *pRow = mndTransDecode(pRaw);
|
||||||
|
if (pRow == NULL) goto _OUT;
|
||||||
|
|
||||||
|
pTrans = sdbGetRowObj(pRow);
|
||||||
|
if (pTrans == NULL) goto _OUT;
|
||||||
|
|
||||||
|
code = mndTransValidateImp(pMnode, pTrans);
|
||||||
|
|
||||||
|
_OUT:
|
||||||
|
if (pTrans) mndTransDropData(pTrans);
|
||||||
|
if (pRow) taosMemoryFreeClear(pRow);
|
||||||
|
if (code) terrno = (terrno ? terrno : TSDB_CODE_MND_TRANS_CONFLICT);
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t mndProcessWriteMsg(SMnode *pMnode, SRpcMsg *pMsg, SFsmCbMeta *pMeta) {
|
||||||
|
terrno = TSDB_CODE_SUCCESS;
|
||||||
SSyncMgmt *pMgmt = &pMnode->syncMgmt;
|
SSyncMgmt *pMgmt = &pMnode->syncMgmt;
|
||||||
SSdbRaw *pRaw = pMsg->pCont;
|
SSdbRaw *pRaw = pMsg->pCont;
|
||||||
|
STrans *pTrans = NULL;
|
||||||
|
int32_t code = -1;
|
||||||
int32_t transId = sdbGetIdFromRaw(pMnode->pSdb, pRaw);
|
int32_t transId = sdbGetIdFromRaw(pMnode->pSdb, pRaw);
|
||||||
|
|
||||||
|
if (transId <= 0) {
|
||||||
|
mError("trans:%d, invalid commit msg, cache transId:%d seq:%" PRId64, transId, pMgmt->transId, pMgmt->transSeq);
|
||||||
|
terrno = TSDB_CODE_INVALID_MSG;
|
||||||
|
goto _OUT;
|
||||||
|
}
|
||||||
|
|
||||||
mInfo("trans:%d, is proposed, saved:%d code:0x%x, apply index:%" PRId64 " term:%" PRIu64 " config:%" PRId64
|
mInfo("trans:%d, is proposed, saved:%d code:0x%x, apply index:%" PRId64 " term:%" PRIu64 " config:%" PRId64
|
||||||
" role:%s raw:%p sec:%d seq:%" PRId64,
|
" role:%s raw:%p sec:%d seq:%" PRId64,
|
||||||
transId, pMgmt->transId, pMeta->code, pMeta->index, pMeta->term, pMeta->lastConfigIndex, syncStr(pMeta->state),
|
transId, pMgmt->transId, pMeta->code, pMeta->index, pMeta->term, pMeta->lastConfigIndex, syncStr(pMeta->state),
|
||||||
pRaw, pMgmt->transSec, pMgmt->transSeq);
|
pRaw, pMgmt->transSec, pMgmt->transSeq);
|
||||||
|
|
||||||
if (pMeta->code == 0) {
|
code = mndTransValidate(pMnode, pRaw);
|
||||||
int32_t code = sdbWriteWithoutFree(pMnode->pSdb, pRaw);
|
if (code != 0) {
|
||||||
if (code != 0) {
|
mError("trans:%d, failed to validate requested trans since %s", transId, terrstr());
|
||||||
mError("trans:%d, failed to write to sdb since %s", transId, terrstr());
|
code = 0;
|
||||||
return 0;
|
pMeta->code = terrno;
|
||||||
}
|
goto _OUT;
|
||||||
sdbSetApplyInfo(pMnode->pSdb, pMeta->index, pMeta->term, pMeta->lastConfigIndex);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
taosThreadMutexLock(&pMgmt->lock);
|
code = sdbWriteWithoutFree(pMnode->pSdb, pRaw);
|
||||||
pMgmt->errCode = pMeta->code;
|
if (code != 0) {
|
||||||
|
mError("trans:%d, failed to write to sdb since %s", transId, terrstr());
|
||||||
if (transId <= 0) {
|
code = 0;
|
||||||
taosThreadMutexUnlock(&pMgmt->lock);
|
pMeta->code = terrno;
|
||||||
mError("trans:%d, invalid commit msg, cache transId:%d seq:%" PRId64, transId, pMgmt->transId, pMgmt->transSeq);
|
goto _OUT;
|
||||||
} else if (transId == pMgmt->transId) {
|
|
||||||
if (pMgmt->errCode != 0) {
|
|
||||||
mError("trans:%d, failed to propose since %s, post sem", transId, tstrerror(pMgmt->errCode));
|
|
||||||
} else {
|
|
||||||
mInfo("trans:%d, is proposed and post sem, seq:%" PRId64, transId, pMgmt->transSeq);
|
|
||||||
}
|
|
||||||
pMgmt->transId = 0;
|
|
||||||
pMgmt->transSec = 0;
|
|
||||||
pMgmt->transSeq = 0;
|
|
||||||
tsem_post(&pMgmt->syncSem);
|
|
||||||
taosThreadMutexUnlock(&pMgmt->lock);
|
|
||||||
} else {
|
|
||||||
taosThreadMutexUnlock(&pMgmt->lock);
|
|
||||||
STrans *pTrans = mndAcquireTrans(pMnode, transId);
|
|
||||||
if (pTrans != NULL) {
|
|
||||||
mInfo("trans:%d, execute in mnode which not leader or sync timeout, createTime:%" PRId64 " saved trans:%d",
|
|
||||||
transId, pTrans->createdTime, pMgmt->transId);
|
|
||||||
mndTransExecute(pMnode, pTrans, false);
|
|
||||||
mndReleaseTrans(pMnode, pTrans);
|
|
||||||
} else {
|
|
||||||
mError("trans:%d, not found while execute in mnode since %s", transId, terrstr());
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pTrans = mndAcquireTrans(pMnode, transId);
|
||||||
|
if (pTrans == NULL) {
|
||||||
|
mError("trans:%d, not found while execute in mnode since %s", transId, terrstr());
|
||||||
|
goto _OUT;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pTrans->stage == TRN_STAGE_PREPARE) {
|
||||||
|
bool continueExec = mndTransPerformPrepareStage(pMnode, pTrans);
|
||||||
|
if (!continueExec) goto _OUT;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pTrans->id != pMgmt->transId) {
|
||||||
|
mInfo("trans:%d, execute in mnode which not leader or sync timeout, createTime:%" PRId64 " saved trans:%d",
|
||||||
|
pTrans->id, pTrans->createdTime, pMgmt->transId);
|
||||||
|
mndTransRefresh(pMnode, pTrans);
|
||||||
|
}
|
||||||
|
|
||||||
|
sdbSetApplyInfo(pMnode->pSdb, pMeta->index, pMeta->term, pMeta->lastConfigIndex);
|
||||||
sdbWriteFile(pMnode->pSdb, tsMndSdbWriteDelta);
|
sdbWriteFile(pMnode->pSdb, tsMndSdbWriteDelta);
|
||||||
|
code = 0;
|
||||||
|
|
||||||
|
_OUT:
|
||||||
|
if (pTrans) mndReleaseTrans(pMnode, pTrans);
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int32_t mndPostMgmtCode(SMnode *pMnode, int32_t code) {
|
||||||
|
SSyncMgmt *pMgmt = &pMnode->syncMgmt;
|
||||||
|
taosThreadMutexLock(&pMgmt->lock);
|
||||||
|
if (pMgmt->transId == 0) {
|
||||||
|
goto _OUT;
|
||||||
|
}
|
||||||
|
|
||||||
|
pMgmt->transId = 0;
|
||||||
|
pMgmt->transSec = 0;
|
||||||
|
pMgmt->transSeq = 0;
|
||||||
|
pMgmt->errCode = code;
|
||||||
|
tsem_post(&pMgmt->syncSem);
|
||||||
|
|
||||||
|
if (pMgmt->errCode != 0) {
|
||||||
|
mError("trans:%d, failed to propose since %s, post sem", pMgmt->transId, tstrerror(pMgmt->errCode));
|
||||||
|
} else {
|
||||||
|
mInfo("trans:%d, is proposed and post sem, seq:%" PRId64, pMgmt->transId, pMgmt->transSeq);
|
||||||
|
}
|
||||||
|
|
||||||
|
_OUT:
|
||||||
|
taosThreadMutexUnlock(&pMgmt->lock);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t mndSyncCommitMsg(const SSyncFSM *pFsm, SRpcMsg *pMsg, const SFsmCbMeta *pMeta) {
|
int32_t mndSyncCommitMsg(const SSyncFSM *pFsm, SRpcMsg *pMsg, SFsmCbMeta *pMeta) {
|
||||||
int32_t code = 0;
|
SMnode *pMnode = pFsm->data;
|
||||||
|
int32_t code = pMsg->code;
|
||||||
|
if (code != 0) {
|
||||||
|
goto _OUT;
|
||||||
|
}
|
||||||
|
|
||||||
pMsg->info.conn.applyIndex = pMeta->index;
|
pMsg->info.conn.applyIndex = pMeta->index;
|
||||||
pMsg->info.conn.applyTerm = pMeta->term;
|
pMsg->info.conn.applyTerm = pMeta->term;
|
||||||
|
pMeta->code = 0;
|
||||||
|
|
||||||
if (pMsg->code == 0) {
|
atomic_store_64(&pMnode->applied, pMsg->info.conn.applyIndex);
|
||||||
SMnode *pMnode = pFsm->data;
|
|
||||||
atomic_store_64(&pMnode->applied, pMsg->info.conn.applyIndex);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!syncUtilUserCommit(pMsg->msgType)) {
|
if (!syncUtilUserCommit(pMsg->msgType)) {
|
||||||
goto _out;
|
goto _OUT;
|
||||||
}
|
}
|
||||||
code = mndProcessWriteMsg(pFsm, pMsg, pMeta);
|
|
||||||
|
|
||||||
_out:
|
code = mndProcessWriteMsg(pMnode, pMsg, pMeta);
|
||||||
|
|
||||||
|
_OUT:
|
||||||
|
mndPostMgmtCode(pMnode, code ? code : pMeta->code);
|
||||||
rpcFreeCont(pMsg->pCont);
|
rpcFreeCont(pMsg->pCont);
|
||||||
pMsg->pCont = NULL;
|
pMsg->pCont = NULL;
|
||||||
return code;
|
return code;
|
||||||
|
|
|
@ -753,7 +753,7 @@ static int32_t mndProcessDropTopicReq(SRpcMsg *pReq) {
|
||||||
}
|
}
|
||||||
|
|
||||||
mndTransSetDbName(pTrans, pTopic->db, NULL);
|
mndTransSetDbName(pTrans, pTopic->db, NULL);
|
||||||
if (mndTrancCheckConflict(pMnode, pTrans) != 0) {
|
if (mndTransCheckConflict(pMnode, pTrans) != 0) {
|
||||||
mndReleaseTopic(pMnode, pTopic);
|
mndReleaseTopic(pMnode, pTopic);
|
||||||
mndTransDrop(pTrans);
|
mndTransDrop(pTrans);
|
||||||
return -1;
|
return -1;
|
||||||
|
|
|
@ -23,28 +23,25 @@
|
||||||
#include "mndSync.h"
|
#include "mndSync.h"
|
||||||
#include "mndUser.h"
|
#include "mndUser.h"
|
||||||
|
|
||||||
#define TRANS_VER_NUMBER 1
|
#define TRANS_VER1_NUMBER 1
|
||||||
|
#define TRANS_VER2_NUMBER 2
|
||||||
#define TRANS_ARRAY_SIZE 8
|
#define TRANS_ARRAY_SIZE 8
|
||||||
#define TRANS_RESERVE_SIZE 48
|
#define TRANS_RESERVE_SIZE 48
|
||||||
|
|
||||||
static SSdbRaw *mndTransActionEncode(STrans *pTrans);
|
|
||||||
static SSdbRow *mndTransActionDecode(SSdbRaw *pRaw);
|
|
||||||
static int32_t mndTransActionInsert(SSdb *pSdb, STrans *pTrans);
|
static int32_t mndTransActionInsert(SSdb *pSdb, STrans *pTrans);
|
||||||
static int32_t mndTransActionUpdate(SSdb *pSdb, STrans *OldTrans, STrans *pOld);
|
static int32_t mndTransActionUpdate(SSdb *pSdb, STrans *OldTrans, STrans *pOld);
|
||||||
static int32_t mndTransActionDelete(SSdb *pSdb, STrans *pTrans, bool callFunc);
|
static int32_t mndTransDelete(SSdb *pSdb, STrans *pTrans, bool callFunc);
|
||||||
|
|
||||||
static int32_t mndTransAppendLog(SArray *pArray, SSdbRaw *pRaw);
|
static int32_t mndTransAppendLog(SArray *pArray, SSdbRaw *pRaw);
|
||||||
static int32_t mndTransAppendAction(SArray *pArray, STransAction *pAction);
|
static int32_t mndTransAppendAction(SArray *pArray, STransAction *pAction);
|
||||||
static void mndTransDropLogs(SArray *pArray);
|
static void mndTransDropLogs(SArray *pArray);
|
||||||
static void mndTransDropActions(SArray *pArray);
|
static void mndTransDropActions(SArray *pArray);
|
||||||
static void mndTransDropData(STrans *pTrans);
|
|
||||||
static int32_t mndTransExecuteActions(SMnode *pMnode, STrans *pTrans, SArray *pArray);
|
static int32_t mndTransExecuteActions(SMnode *pMnode, STrans *pTrans, SArray *pArray);
|
||||||
static int32_t mndTransExecuteRedoLogs(SMnode *pMnode, STrans *pTrans);
|
static int32_t mndTransExecuteRedoLogs(SMnode *pMnode, STrans *pTrans);
|
||||||
static int32_t mndTransExecuteUndoLogs(SMnode *pMnode, STrans *pTrans);
|
static int32_t mndTransExecuteUndoLogs(SMnode *pMnode, STrans *pTrans);
|
||||||
static int32_t mndTransExecuteRedoActions(SMnode *pMnode, STrans *pTrans);
|
static int32_t mndTransExecuteRedoActions(SMnode *pMnode, STrans *pTrans);
|
||||||
static int32_t mndTransExecuteUndoActions(SMnode *pMnode, STrans *pTrans);
|
static int32_t mndTransExecuteUndoActions(SMnode *pMnode, STrans *pTrans);
|
||||||
static int32_t mndTransExecuteCommitActions(SMnode *pMnode, STrans *pTrans);
|
static int32_t mndTransExecuteCommitActions(SMnode *pMnode, STrans *pTrans);
|
||||||
static bool mndTransPerformPrepareStage(SMnode *pMnode, STrans *pTrans);
|
|
||||||
static bool mndTransPerformRedoLogStage(SMnode *pMnode, STrans *pTrans);
|
static bool mndTransPerformRedoLogStage(SMnode *pMnode, STrans *pTrans);
|
||||||
static bool mndTransPerformRedoActionStage(SMnode *pMnode, STrans *pTrans);
|
static bool mndTransPerformRedoActionStage(SMnode *pMnode, STrans *pTrans);
|
||||||
static bool mndTransPerformUndoLogStage(SMnode *pMnode, STrans *pTrans);
|
static bool mndTransPerformUndoLogStage(SMnode *pMnode, STrans *pTrans);
|
||||||
|
@ -52,7 +49,7 @@ static bool mndTransPerformUndoActionStage(SMnode *pMnode, STrans *pTrans);
|
||||||
static bool mndTransPerformCommitActionStage(SMnode *pMnode, STrans *pTrans);
|
static bool mndTransPerformCommitActionStage(SMnode *pMnode, STrans *pTrans);
|
||||||
static bool mndTransPerformCommitStage(SMnode *pMnode, STrans *pTrans);
|
static bool mndTransPerformCommitStage(SMnode *pMnode, STrans *pTrans);
|
||||||
static bool mndTransPerformRollbackStage(SMnode *pMnode, STrans *pTrans);
|
static bool mndTransPerformRollbackStage(SMnode *pMnode, STrans *pTrans);
|
||||||
static bool mndTransPerfromFinishedStage(SMnode *pMnode, STrans *pTrans);
|
static bool mndTransPerformFinishStage(SMnode *pMnode, STrans *pTrans);
|
||||||
static bool mndCannotExecuteTransAction(SMnode *pMnode) { return !pMnode->deploy && !mndIsLeader(pMnode); }
|
static bool mndCannotExecuteTransAction(SMnode *pMnode) { return !pMnode->deploy && !mndIsLeader(pMnode); }
|
||||||
|
|
||||||
static void mndTransSendRpcRsp(SMnode *pMnode, STrans *pTrans);
|
static void mndTransSendRpcRsp(SMnode *pMnode, STrans *pTrans);
|
||||||
|
@ -67,11 +64,11 @@ int32_t mndInitTrans(SMnode *pMnode) {
|
||||||
SSdbTable table = {
|
SSdbTable table = {
|
||||||
.sdbType = SDB_TRANS,
|
.sdbType = SDB_TRANS,
|
||||||
.keyType = SDB_KEY_INT32,
|
.keyType = SDB_KEY_INT32,
|
||||||
.encodeFp = (SdbEncodeFp)mndTransActionEncode,
|
.encodeFp = (SdbEncodeFp)mndTransEncode,
|
||||||
.decodeFp = (SdbDecodeFp)mndTransActionDecode,
|
.decodeFp = (SdbDecodeFp)mndTransDecode,
|
||||||
.insertFp = (SdbInsertFp)mndTransActionInsert,
|
.insertFp = (SdbInsertFp)mndTransActionInsert,
|
||||||
.updateFp = (SdbUpdateFp)mndTransActionUpdate,
|
.updateFp = (SdbUpdateFp)mndTransActionUpdate,
|
||||||
.deleteFp = (SdbDeleteFp)mndTransActionDelete,
|
.deleteFp = (SdbDeleteFp)mndTransDelete,
|
||||||
};
|
};
|
||||||
|
|
||||||
mndSetMsgHandle(pMnode, TDMT_MND_TRANS_TIMER, mndProcessTransTimer);
|
mndSetMsgHandle(pMnode, TDMT_MND_TRANS_TIMER, mndProcessTransTimer);
|
||||||
|
@ -103,15 +100,55 @@ static int32_t mndTransGetActionsSize(SArray *pArray) {
|
||||||
return rawDataLen;
|
return rawDataLen;
|
||||||
}
|
}
|
||||||
|
|
||||||
static SSdbRaw *mndTransActionEncode(STrans *pTrans) {
|
|
||||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
static int32_t mndTransEncodeAction(SSdbRaw *pRaw, int32_t *offset, SArray *pActions, int32_t actionsNum) {
|
||||||
|
int32_t dataPos = *offset;
|
||||||
|
int8_t unused = 0;
|
||||||
|
int32_t ret = -1;
|
||||||
|
|
||||||
|
for (int32_t i = 0; i < actionsNum; ++i) {
|
||||||
|
STransAction *pAction = taosArrayGet(pActions, i);
|
||||||
|
SDB_SET_INT32(pRaw, dataPos, pAction->id, _OVER)
|
||||||
|
SDB_SET_INT32(pRaw, dataPos, pAction->errCode, _OVER)
|
||||||
|
SDB_SET_INT32(pRaw, dataPos, pAction->acceptableCode, _OVER)
|
||||||
|
SDB_SET_INT32(pRaw, dataPos, pAction->retryCode, _OVER)
|
||||||
|
SDB_SET_INT8(pRaw, dataPos, pAction->actionType, _OVER)
|
||||||
|
SDB_SET_INT8(pRaw, dataPos, pAction->stage, _OVER)
|
||||||
|
SDB_SET_INT8(pRaw, dataPos, pAction->reserved, _OVER)
|
||||||
|
if (pAction->actionType == TRANS_ACTION_RAW) {
|
||||||
|
int32_t len = sdbGetRawTotalSize(pAction->pRaw);
|
||||||
|
SDB_SET_INT8(pRaw, dataPos, unused /*pAction->rawWritten*/, _OVER)
|
||||||
|
SDB_SET_INT32(pRaw, dataPos, len, _OVER)
|
||||||
|
SDB_SET_BINARY(pRaw, dataPos, (void *)pAction->pRaw, len, _OVER)
|
||||||
|
} else if (pAction->actionType == TRANS_ACTION_MSG) {
|
||||||
|
SDB_SET_BINARY(pRaw, dataPos, (void *)&pAction->epSet, sizeof(SEpSet), _OVER)
|
||||||
|
SDB_SET_INT16(pRaw, dataPos, pAction->msgType, _OVER)
|
||||||
|
SDB_SET_INT8(pRaw, dataPos, unused /*pAction->msgSent*/, _OVER)
|
||||||
|
SDB_SET_INT8(pRaw, dataPos, unused /*pAction->msgReceived*/, _OVER)
|
||||||
|
SDB_SET_INT32(pRaw, dataPos, pAction->contLen, _OVER)
|
||||||
|
SDB_SET_BINARY(pRaw, dataPos, pAction->pCont, pAction->contLen, _OVER)
|
||||||
|
} else {
|
||||||
|
// nothing
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ret = 0;
|
||||||
|
|
||||||
|
_OVER:
|
||||||
|
*offset = dataPos;
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
SSdbRaw *mndTransEncode(STrans *pTrans) {
|
||||||
|
terrno = TSDB_CODE_INVALID_MSG;
|
||||||
|
int8_t sver = taosArrayGetSize(pTrans->prepareActions) ? TRANS_VER2_NUMBER : TRANS_VER1_NUMBER;
|
||||||
|
|
||||||
int32_t rawDataLen = sizeof(STrans) + TRANS_RESERVE_SIZE + pTrans->paramLen;
|
int32_t rawDataLen = sizeof(STrans) + TRANS_RESERVE_SIZE + pTrans->paramLen;
|
||||||
|
rawDataLen += mndTransGetActionsSize(pTrans->prepareActions);
|
||||||
rawDataLen += mndTransGetActionsSize(pTrans->redoActions);
|
rawDataLen += mndTransGetActionsSize(pTrans->redoActions);
|
||||||
rawDataLen += mndTransGetActionsSize(pTrans->undoActions);
|
rawDataLen += mndTransGetActionsSize(pTrans->undoActions);
|
||||||
rawDataLen += mndTransGetActionsSize(pTrans->commitActions);
|
rawDataLen += mndTransGetActionsSize(pTrans->commitActions);
|
||||||
|
|
||||||
SSdbRaw *pRaw = sdbAllocRaw(SDB_TRANS, TRANS_VER_NUMBER, rawDataLen);
|
SSdbRaw *pRaw = sdbAllocRaw(SDB_TRANS, sver, rawDataLen);
|
||||||
if (pRaw == NULL) {
|
if (pRaw == NULL) {
|
||||||
mError("trans:%d, failed to alloc raw since %s", pTrans->id, terrstr());
|
mError("trans:%d, failed to alloc raw since %s", pTrans->id, terrstr());
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -131,91 +168,22 @@ static SSdbRaw *mndTransActionEncode(STrans *pTrans) {
|
||||||
SDB_SET_BINARY(pRaw, dataPos, pTrans->stbname, TSDB_TABLE_FNAME_LEN, _OVER)
|
SDB_SET_BINARY(pRaw, dataPos, pTrans->stbname, TSDB_TABLE_FNAME_LEN, _OVER)
|
||||||
SDB_SET_INT32(pRaw, dataPos, pTrans->redoActionPos, _OVER)
|
SDB_SET_INT32(pRaw, dataPos, pTrans->redoActionPos, _OVER)
|
||||||
|
|
||||||
|
int32_t prepareActionNum = taosArrayGetSize(pTrans->prepareActions);
|
||||||
int32_t redoActionNum = taosArrayGetSize(pTrans->redoActions);
|
int32_t redoActionNum = taosArrayGetSize(pTrans->redoActions);
|
||||||
int32_t undoActionNum = taosArrayGetSize(pTrans->undoActions);
|
int32_t undoActionNum = taosArrayGetSize(pTrans->undoActions);
|
||||||
int32_t commitActionNum = taosArrayGetSize(pTrans->commitActions);
|
int32_t commitActionNum = taosArrayGetSize(pTrans->commitActions);
|
||||||
|
|
||||||
|
if (sver > TRANS_VER1_NUMBER) {
|
||||||
|
SDB_SET_INT32(pRaw, dataPos, prepareActionNum, _OVER)
|
||||||
|
}
|
||||||
SDB_SET_INT32(pRaw, dataPos, redoActionNum, _OVER)
|
SDB_SET_INT32(pRaw, dataPos, redoActionNum, _OVER)
|
||||||
SDB_SET_INT32(pRaw, dataPos, undoActionNum, _OVER)
|
SDB_SET_INT32(pRaw, dataPos, undoActionNum, _OVER)
|
||||||
SDB_SET_INT32(pRaw, dataPos, commitActionNum, _OVER)
|
SDB_SET_INT32(pRaw, dataPos, commitActionNum, _OVER)
|
||||||
|
|
||||||
int8_t unused = 0;
|
if (mndTransEncodeAction(pRaw, &dataPos, pTrans->prepareActions, prepareActionNum) < 0) goto _OVER;
|
||||||
for (int32_t i = 0; i < redoActionNum; ++i) {
|
if (mndTransEncodeAction(pRaw, &dataPos, pTrans->redoActions, redoActionNum) < 0) goto _OVER;
|
||||||
STransAction *pAction = taosArrayGet(pTrans->redoActions, i);
|
if (mndTransEncodeAction(pRaw, &dataPos, pTrans->undoActions, undoActionNum) < 0) goto _OVER;
|
||||||
SDB_SET_INT32(pRaw, dataPos, pAction->id, _OVER)
|
if (mndTransEncodeAction(pRaw, &dataPos, pTrans->commitActions, commitActionNum) < 0) goto _OVER;
|
||||||
SDB_SET_INT32(pRaw, dataPos, pAction->errCode, _OVER)
|
|
||||||
SDB_SET_INT32(pRaw, dataPos, pAction->acceptableCode, _OVER)
|
|
||||||
SDB_SET_INT32(pRaw, dataPos, pAction->retryCode, _OVER)
|
|
||||||
SDB_SET_INT8(pRaw, dataPos, pAction->actionType, _OVER)
|
|
||||||
SDB_SET_INT8(pRaw, dataPos, pAction->stage, _OVER)
|
|
||||||
SDB_SET_INT8(pRaw, dataPos, pAction->reserved, _OVER)
|
|
||||||
if (pAction->actionType == TRANS_ACTION_RAW) {
|
|
||||||
int32_t len = sdbGetRawTotalSize(pAction->pRaw);
|
|
||||||
SDB_SET_INT8(pRaw, dataPos, unused /*pAction->rawWritten*/, _OVER)
|
|
||||||
SDB_SET_INT32(pRaw, dataPos, len, _OVER)
|
|
||||||
SDB_SET_BINARY(pRaw, dataPos, (void *)pAction->pRaw, len, _OVER)
|
|
||||||
} else if (pAction->actionType == TRANS_ACTION_MSG) {
|
|
||||||
SDB_SET_BINARY(pRaw, dataPos, (void *)&pAction->epSet, sizeof(SEpSet), _OVER)
|
|
||||||
SDB_SET_INT16(pRaw, dataPos, pAction->msgType, _OVER)
|
|
||||||
SDB_SET_INT8(pRaw, dataPos, unused /*pAction->msgSent*/, _OVER)
|
|
||||||
SDB_SET_INT8(pRaw, dataPos, unused /*pAction->msgReceived*/, _OVER)
|
|
||||||
SDB_SET_INT32(pRaw, dataPos, pAction->contLen, _OVER)
|
|
||||||
SDB_SET_BINARY(pRaw, dataPos, pAction->pCont, pAction->contLen, _OVER)
|
|
||||||
} else {
|
|
||||||
// nothing
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for (int32_t i = 0; i < undoActionNum; ++i) {
|
|
||||||
STransAction *pAction = taosArrayGet(pTrans->undoActions, i);
|
|
||||||
SDB_SET_INT32(pRaw, dataPos, pAction->id, _OVER)
|
|
||||||
SDB_SET_INT32(pRaw, dataPos, pAction->errCode, _OVER)
|
|
||||||
SDB_SET_INT32(pRaw, dataPos, pAction->acceptableCode, _OVER)
|
|
||||||
SDB_SET_INT32(pRaw, dataPos, pAction->retryCode, _OVER)
|
|
||||||
SDB_SET_INT8(pRaw, dataPos, pAction->actionType, _OVER)
|
|
||||||
SDB_SET_INT8(pRaw, dataPos, pAction->stage, _OVER)
|
|
||||||
SDB_SET_INT8(pRaw, dataPos, pAction->reserved, _OVER)
|
|
||||||
if (pAction->actionType == TRANS_ACTION_RAW) {
|
|
||||||
int32_t len = sdbGetRawTotalSize(pAction->pRaw);
|
|
||||||
SDB_SET_INT8(pRaw, dataPos, unused /*pAction->rawWritten*/, _OVER)
|
|
||||||
SDB_SET_INT32(pRaw, dataPos, len, _OVER)
|
|
||||||
SDB_SET_BINARY(pRaw, dataPos, (void *)pAction->pRaw, len, _OVER)
|
|
||||||
} else if (pAction->actionType == TRANS_ACTION_MSG) {
|
|
||||||
SDB_SET_BINARY(pRaw, dataPos, (void *)&pAction->epSet, sizeof(SEpSet), _OVER)
|
|
||||||
SDB_SET_INT16(pRaw, dataPos, pAction->msgType, _OVER)
|
|
||||||
SDB_SET_INT8(pRaw, dataPos, unused /*pAction->msgSent*/, _OVER)
|
|
||||||
SDB_SET_INT8(pRaw, dataPos, unused /*pAction->msgReceived*/, _OVER)
|
|
||||||
SDB_SET_INT32(pRaw, dataPos, pAction->contLen, _OVER)
|
|
||||||
SDB_SET_BINARY(pRaw, dataPos, pAction->pCont, pAction->contLen, _OVER)
|
|
||||||
} else {
|
|
||||||
// nothing
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for (int32_t i = 0; i < commitActionNum; ++i) {
|
|
||||||
STransAction *pAction = taosArrayGet(pTrans->commitActions, i);
|
|
||||||
SDB_SET_INT32(pRaw, dataPos, pAction->id, _OVER)
|
|
||||||
SDB_SET_INT32(pRaw, dataPos, pAction->errCode, _OVER)
|
|
||||||
SDB_SET_INT32(pRaw, dataPos, pAction->acceptableCode, _OVER)
|
|
||||||
SDB_SET_INT32(pRaw, dataPos, pAction->retryCode, _OVER)
|
|
||||||
SDB_SET_INT8(pRaw, dataPos, pAction->actionType, _OVER)
|
|
||||||
SDB_SET_INT8(pRaw, dataPos, pAction->stage, _OVER)
|
|
||||||
SDB_SET_INT8(pRaw, dataPos, pAction->reserved, _OVER)
|
|
||||||
if (pAction->actionType == TRANS_ACTION_RAW) {
|
|
||||||
int32_t len = sdbGetRawTotalSize(pAction->pRaw);
|
|
||||||
SDB_SET_INT8(pRaw, dataPos, unused /*pAction->rawWritten*/, _OVER)
|
|
||||||
SDB_SET_INT32(pRaw, dataPos, len, _OVER)
|
|
||||||
SDB_SET_BINARY(pRaw, dataPos, (void *)pAction->pRaw, len, _OVER)
|
|
||||||
} else if (pAction->actionType == TRANS_ACTION_MSG) {
|
|
||||||
SDB_SET_BINARY(pRaw, dataPos, (void *)&pAction->epSet, sizeof(SEpSet), _OVER)
|
|
||||||
SDB_SET_INT16(pRaw, dataPos, pAction->msgType, _OVER)
|
|
||||||
SDB_SET_INT8(pRaw, dataPos, unused /*pAction->msgSent*/, _OVER)
|
|
||||||
SDB_SET_INT8(pRaw, dataPos, unused /*pAction->msgReceived*/, _OVER)
|
|
||||||
SDB_SET_INT32(pRaw, dataPos, pAction->contLen, _OVER)
|
|
||||||
SDB_SET_BINARY(pRaw, dataPos, pAction->pCont, pAction->contLen, _OVER)
|
|
||||||
} else {
|
|
||||||
// nothing
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
SDB_SET_INT32(pRaw, dataPos, pTrans->startFunc, _OVER)
|
SDB_SET_INT32(pRaw, dataPos, pTrans->startFunc, _OVER)
|
||||||
SDB_SET_INT32(pRaw, dataPos, pTrans->stopFunc, _OVER)
|
SDB_SET_INT32(pRaw, dataPos, pTrans->stopFunc, _OVER)
|
||||||
|
@ -242,23 +210,76 @@ _OVER:
|
||||||
return pRaw;
|
return pRaw;
|
||||||
}
|
}
|
||||||
|
|
||||||
static SSdbRow *mndTransActionDecode(SSdbRaw *pRaw) {
|
static int32_t mndTransDecodeAction(SSdbRaw *pRaw, int32_t *offset, SArray *pActions, int32_t actionNum) {
|
||||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
STransAction action = {0};
|
||||||
|
int32_t dataPos = *offset;
|
||||||
|
int8_t unused = 0;
|
||||||
|
int8_t stage = 0;
|
||||||
|
int8_t actionType = 0;
|
||||||
|
int32_t dataLen = 0;
|
||||||
|
int32_t ret = -1;
|
||||||
|
|
||||||
|
for (int32_t i = 0; i < actionNum; ++i) {
|
||||||
|
memset(&action, 0, sizeof(action));
|
||||||
|
SDB_GET_INT32(pRaw, dataPos, &action.id, _OVER)
|
||||||
|
SDB_GET_INT32(pRaw, dataPos, &action.errCode, _OVER)
|
||||||
|
SDB_GET_INT32(pRaw, dataPos, &action.acceptableCode, _OVER)
|
||||||
|
SDB_GET_INT32(pRaw, dataPos, &action.retryCode, _OVER)
|
||||||
|
SDB_GET_INT8(pRaw, dataPos, &actionType, _OVER)
|
||||||
|
action.actionType = actionType;
|
||||||
|
SDB_GET_INT8(pRaw, dataPos, &stage, _OVER)
|
||||||
|
action.stage = stage;
|
||||||
|
SDB_GET_INT8(pRaw, dataPos, &action.reserved, _OVER)
|
||||||
|
if (action.actionType == TRANS_ACTION_RAW) {
|
||||||
|
SDB_GET_INT8(pRaw, dataPos, &unused /*&action.rawWritten*/, _OVER)
|
||||||
|
SDB_GET_INT32(pRaw, dataPos, &dataLen, _OVER)
|
||||||
|
action.pRaw = taosMemoryMalloc(dataLen);
|
||||||
|
if (action.pRaw == NULL) goto _OVER;
|
||||||
|
mTrace("raw:%p, is created", action.pRaw);
|
||||||
|
SDB_GET_BINARY(pRaw, dataPos, (void *)action.pRaw, dataLen, _OVER);
|
||||||
|
if (taosArrayPush(pActions, &action) == NULL) goto _OVER;
|
||||||
|
action.pRaw = NULL;
|
||||||
|
} else if (action.actionType == TRANS_ACTION_MSG) {
|
||||||
|
SDB_GET_BINARY(pRaw, dataPos, (void *)&action.epSet, sizeof(SEpSet), _OVER);
|
||||||
|
tmsgUpdateDnodeEpSet(&action.epSet);
|
||||||
|
SDB_GET_INT16(pRaw, dataPos, &action.msgType, _OVER)
|
||||||
|
SDB_GET_INT8(pRaw, dataPos, &unused /*&action.msgSent*/, _OVER)
|
||||||
|
SDB_GET_INT8(pRaw, dataPos, &unused /*&action.msgReceived*/, _OVER)
|
||||||
|
SDB_GET_INT32(pRaw, dataPos, &action.contLen, _OVER)
|
||||||
|
action.pCont = taosMemoryMalloc(action.contLen);
|
||||||
|
if (action.pCont == NULL) goto _OVER;
|
||||||
|
SDB_GET_BINARY(pRaw, dataPos, action.pCont, action.contLen, _OVER);
|
||||||
|
if (taosArrayPush(pActions, &action) == NULL) goto _OVER;
|
||||||
|
action.pCont = NULL;
|
||||||
|
} else {
|
||||||
|
if (taosArrayPush(pActions, &action) == NULL) goto _OVER;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ret = 0;
|
||||||
|
|
||||||
|
_OVER:
|
||||||
|
*offset = dataPos;
|
||||||
|
taosMemoryFreeClear(action.pCont);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
SSdbRow *mndTransDecode(SSdbRaw *pRaw) {
|
||||||
|
terrno = TSDB_CODE_INVALID_MSG;
|
||||||
|
|
||||||
SSdbRow *pRow = NULL;
|
SSdbRow *pRow = NULL;
|
||||||
STrans *pTrans = NULL;
|
STrans *pTrans = NULL;
|
||||||
char *pData = NULL;
|
char *pData = NULL;
|
||||||
int32_t dataLen = 0;
|
int32_t dataLen = 0;
|
||||||
int8_t sver = 0;
|
int8_t sver = 0;
|
||||||
|
int32_t prepareActionNum = 0;
|
||||||
int32_t redoActionNum = 0;
|
int32_t redoActionNum = 0;
|
||||||
int32_t undoActionNum = 0;
|
int32_t undoActionNum = 0;
|
||||||
int32_t commitActionNum = 0;
|
int32_t commitActionNum = 0;
|
||||||
int32_t dataPos = 0;
|
int32_t dataPos = 0;
|
||||||
STransAction action = {0};
|
|
||||||
|
|
||||||
if (sdbGetRawSoftVer(pRaw, &sver) != 0) goto _OVER;
|
if (sdbGetRawSoftVer(pRaw, &sver) != 0) goto _OVER;
|
||||||
|
|
||||||
if (sver != TRANS_VER_NUMBER) {
|
if (sver != TRANS_VER1_NUMBER && sver != TRANS_VER2_NUMBER) {
|
||||||
terrno = TSDB_CODE_SDB_INVALID_DATA_VER;
|
terrno = TSDB_CODE_SDB_INVALID_DATA_VER;
|
||||||
goto _OVER;
|
goto _OVER;
|
||||||
}
|
}
|
||||||
|
@ -294,127 +315,28 @@ static SSdbRow *mndTransActionDecode(SSdbRaw *pRaw) {
|
||||||
SDB_GET_BINARY(pRaw, dataPos, pTrans->dbname, TSDB_TABLE_FNAME_LEN, _OVER)
|
SDB_GET_BINARY(pRaw, dataPos, pTrans->dbname, TSDB_TABLE_FNAME_LEN, _OVER)
|
||||||
SDB_GET_BINARY(pRaw, dataPos, pTrans->stbname, TSDB_TABLE_FNAME_LEN, _OVER)
|
SDB_GET_BINARY(pRaw, dataPos, pTrans->stbname, TSDB_TABLE_FNAME_LEN, _OVER)
|
||||||
SDB_GET_INT32(pRaw, dataPos, &pTrans->redoActionPos, _OVER)
|
SDB_GET_INT32(pRaw, dataPos, &pTrans->redoActionPos, _OVER)
|
||||||
|
|
||||||
|
if (sver > TRANS_VER1_NUMBER) {
|
||||||
|
SDB_GET_INT32(pRaw, dataPos, &prepareActionNum, _OVER)
|
||||||
|
}
|
||||||
SDB_GET_INT32(pRaw, dataPos, &redoActionNum, _OVER)
|
SDB_GET_INT32(pRaw, dataPos, &redoActionNum, _OVER)
|
||||||
SDB_GET_INT32(pRaw, dataPos, &undoActionNum, _OVER)
|
SDB_GET_INT32(pRaw, dataPos, &undoActionNum, _OVER)
|
||||||
SDB_GET_INT32(pRaw, dataPos, &commitActionNum, _OVER)
|
SDB_GET_INT32(pRaw, dataPos, &commitActionNum, _OVER)
|
||||||
|
|
||||||
|
pTrans->prepareActions = taosArrayInit(prepareActionNum, sizeof(STransAction));
|
||||||
pTrans->redoActions = taosArrayInit(redoActionNum, sizeof(STransAction));
|
pTrans->redoActions = taosArrayInit(redoActionNum, sizeof(STransAction));
|
||||||
pTrans->undoActions = taosArrayInit(undoActionNum, sizeof(STransAction));
|
pTrans->undoActions = taosArrayInit(undoActionNum, sizeof(STransAction));
|
||||||
pTrans->commitActions = taosArrayInit(commitActionNum, sizeof(STransAction));
|
pTrans->commitActions = taosArrayInit(commitActionNum, sizeof(STransAction));
|
||||||
|
|
||||||
|
if (pTrans->prepareActions == NULL) goto _OVER;
|
||||||
if (pTrans->redoActions == NULL) goto _OVER;
|
if (pTrans->redoActions == NULL) goto _OVER;
|
||||||
if (pTrans->undoActions == NULL) goto _OVER;
|
if (pTrans->undoActions == NULL) goto _OVER;
|
||||||
if (pTrans->commitActions == NULL) goto _OVER;
|
if (pTrans->commitActions == NULL) goto _OVER;
|
||||||
|
|
||||||
int8_t unused = 0;
|
if (mndTransDecodeAction(pRaw, &dataPos, pTrans->prepareActions, prepareActionNum) < 0) goto _OVER;
|
||||||
for (int32_t i = 0; i < redoActionNum; ++i) {
|
if (mndTransDecodeAction(pRaw, &dataPos, pTrans->redoActions, redoActionNum) < 0) goto _OVER;
|
||||||
memset(&action, 0, sizeof(action));
|
if (mndTransDecodeAction(pRaw, &dataPos, pTrans->undoActions, undoActionNum) < 0) goto _OVER;
|
||||||
SDB_GET_INT32(pRaw, dataPos, &action.id, _OVER)
|
if (mndTransDecodeAction(pRaw, &dataPos, pTrans->commitActions, commitActionNum) < 0) goto _OVER;
|
||||||
SDB_GET_INT32(pRaw, dataPos, &action.errCode, _OVER)
|
|
||||||
SDB_GET_INT32(pRaw, dataPos, &action.acceptableCode, _OVER)
|
|
||||||
SDB_GET_INT32(pRaw, dataPos, &action.retryCode, _OVER)
|
|
||||||
SDB_GET_INT8(pRaw, dataPos, &actionType, _OVER)
|
|
||||||
action.actionType = actionType;
|
|
||||||
SDB_GET_INT8(pRaw, dataPos, &stage, _OVER)
|
|
||||||
action.stage = stage;
|
|
||||||
SDB_GET_INT8(pRaw, dataPos, &action.reserved, _OVER)
|
|
||||||
if (action.actionType == TRANS_ACTION_RAW) {
|
|
||||||
SDB_GET_INT8(pRaw, dataPos, &unused /*&action.rawWritten*/, _OVER)
|
|
||||||
SDB_GET_INT32(pRaw, dataPos, &dataLen, _OVER)
|
|
||||||
action.pRaw = taosMemoryMalloc(dataLen);
|
|
||||||
if (action.pRaw == NULL) goto _OVER;
|
|
||||||
mTrace("raw:%p, is created", action.pRaw);
|
|
||||||
SDB_GET_BINARY(pRaw, dataPos, (void *)action.pRaw, dataLen, _OVER);
|
|
||||||
if (taosArrayPush(pTrans->redoActions, &action) == NULL) goto _OVER;
|
|
||||||
action.pRaw = NULL;
|
|
||||||
} else if (action.actionType == TRANS_ACTION_MSG) {
|
|
||||||
SDB_GET_BINARY(pRaw, dataPos, (void *)&action.epSet, sizeof(SEpSet), _OVER);
|
|
||||||
tmsgUpdateDnodeEpSet(&action.epSet);
|
|
||||||
SDB_GET_INT16(pRaw, dataPos, &action.msgType, _OVER)
|
|
||||||
SDB_GET_INT8(pRaw, dataPos, &unused /*&action.msgSent*/, _OVER)
|
|
||||||
SDB_GET_INT8(pRaw, dataPos, &unused /*&action.msgReceived*/, _OVER)
|
|
||||||
SDB_GET_INT32(pRaw, dataPos, &action.contLen, _OVER)
|
|
||||||
action.pCont = taosMemoryMalloc(action.contLen);
|
|
||||||
if (action.pCont == NULL) goto _OVER;
|
|
||||||
SDB_GET_BINARY(pRaw, dataPos, action.pCont, action.contLen, _OVER);
|
|
||||||
if (taosArrayPush(pTrans->redoActions, &action) == NULL) goto _OVER;
|
|
||||||
action.pCont = NULL;
|
|
||||||
} else {
|
|
||||||
if (taosArrayPush(pTrans->redoActions, &action) == NULL) goto _OVER;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for (int32_t i = 0; i < undoActionNum; ++i) {
|
|
||||||
memset(&action, 0, sizeof(action));
|
|
||||||
SDB_GET_INT32(pRaw, dataPos, &action.id, _OVER)
|
|
||||||
SDB_GET_INT32(pRaw, dataPos, &action.errCode, _OVER)
|
|
||||||
SDB_GET_INT32(pRaw, dataPos, &action.acceptableCode, _OVER)
|
|
||||||
SDB_GET_INT32(pRaw, dataPos, &action.retryCode, _OVER)
|
|
||||||
SDB_GET_INT8(pRaw, dataPos, &actionType, _OVER)
|
|
||||||
action.actionType = actionType;
|
|
||||||
SDB_GET_INT8(pRaw, dataPos, &stage, _OVER)
|
|
||||||
action.stage = stage;
|
|
||||||
SDB_GET_INT8(pRaw, dataPos, &action.reserved, _OVER)
|
|
||||||
if (action.actionType == TRANS_ACTION_RAW) {
|
|
||||||
SDB_GET_INT8(pRaw, dataPos, &unused /*&action.rawWritten*/, _OVER)
|
|
||||||
SDB_GET_INT32(pRaw, dataPos, &dataLen, _OVER)
|
|
||||||
action.pRaw = taosMemoryMalloc(dataLen);
|
|
||||||
if (action.pRaw == NULL) goto _OVER;
|
|
||||||
mTrace("raw:%p, is created", action.pRaw);
|
|
||||||
SDB_GET_BINARY(pRaw, dataPos, (void *)action.pRaw, dataLen, _OVER);
|
|
||||||
if (taosArrayPush(pTrans->undoActions, &action) == NULL) goto _OVER;
|
|
||||||
action.pRaw = NULL;
|
|
||||||
} else if (action.actionType == TRANS_ACTION_MSG) {
|
|
||||||
SDB_GET_BINARY(pRaw, dataPos, (void *)&action.epSet, sizeof(SEpSet), _OVER);
|
|
||||||
SDB_GET_INT16(pRaw, dataPos, &action.msgType, _OVER)
|
|
||||||
SDB_GET_INT8(pRaw, dataPos, &unused /*&action.msgSent*/, _OVER)
|
|
||||||
SDB_GET_INT8(pRaw, dataPos, &unused /*&action.msgReceived*/, _OVER)
|
|
||||||
SDB_GET_INT32(pRaw, dataPos, &action.contLen, _OVER)
|
|
||||||
action.pCont = taosMemoryMalloc(action.contLen);
|
|
||||||
if (action.pCont == NULL) goto _OVER;
|
|
||||||
SDB_GET_BINARY(pRaw, dataPos, action.pCont, action.contLen, _OVER);
|
|
||||||
if (taosArrayPush(pTrans->undoActions, &action) == NULL) goto _OVER;
|
|
||||||
action.pCont = NULL;
|
|
||||||
} else {
|
|
||||||
if (taosArrayPush(pTrans->undoActions, &action) == NULL) goto _OVER;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for (int32_t i = 0; i < commitActionNum; ++i) {
|
|
||||||
memset(&action, 0, sizeof(action));
|
|
||||||
SDB_GET_INT32(pRaw, dataPos, &action.id, _OVER)
|
|
||||||
SDB_GET_INT32(pRaw, dataPos, &action.errCode, _OVER)
|
|
||||||
SDB_GET_INT32(pRaw, dataPos, &action.acceptableCode, _OVER)
|
|
||||||
SDB_GET_INT32(pRaw, dataPos, &action.retryCode, _OVER)
|
|
||||||
SDB_GET_INT8(pRaw, dataPos, &actionType, _OVER)
|
|
||||||
action.actionType = actionType;
|
|
||||||
SDB_GET_INT8(pRaw, dataPos, &stage, _OVER)
|
|
||||||
action.stage = stage;
|
|
||||||
SDB_GET_INT8(pRaw, dataPos, &action.reserved, _OVER)
|
|
||||||
if (action.actionType) {
|
|
||||||
SDB_GET_INT8(pRaw, dataPos, &unused /*&action.rawWritten*/, _OVER)
|
|
||||||
SDB_GET_INT32(pRaw, dataPos, &dataLen, _OVER)
|
|
||||||
action.pRaw = taosMemoryMalloc(dataLen);
|
|
||||||
if (action.pRaw == NULL) goto _OVER;
|
|
||||||
mTrace("raw:%p, is created", action.pRaw);
|
|
||||||
SDB_GET_BINARY(pRaw, dataPos, (void *)action.pRaw, dataLen, _OVER);
|
|
||||||
if (taosArrayPush(pTrans->commitActions, &action) == NULL) goto _OVER;
|
|
||||||
action.pRaw = NULL;
|
|
||||||
} else if (action.actionType == TRANS_ACTION_MSG) {
|
|
||||||
SDB_GET_BINARY(pRaw, dataPos, (void *)&action.epSet, sizeof(SEpSet), _OVER);
|
|
||||||
SDB_GET_INT16(pRaw, dataPos, &action.msgType, _OVER)
|
|
||||||
SDB_GET_INT8(pRaw, dataPos, &unused /*&action.msgSent*/, _OVER)
|
|
||||||
SDB_GET_INT8(pRaw, dataPos, &unused /*&action.msgReceived*/, _OVER)
|
|
||||||
SDB_GET_INT32(pRaw, dataPos, &action.contLen, _OVER)
|
|
||||||
action.pCont = taosMemoryMalloc(action.contLen);
|
|
||||||
if (action.pCont == NULL) goto _OVER;
|
|
||||||
SDB_GET_BINARY(pRaw, dataPos, action.pCont, action.contLen, _OVER);
|
|
||||||
if (taosArrayPush(pTrans->commitActions, &action) == NULL) goto _OVER;
|
|
||||||
action.pCont = NULL;
|
|
||||||
} else {
|
|
||||||
if (taosArrayPush(pTrans->redoActions, &action) == NULL) goto _OVER;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
SDB_GET_INT32(pRaw, dataPos, &pTrans->startFunc, _OVER)
|
SDB_GET_INT32(pRaw, dataPos, &pTrans->startFunc, _OVER)
|
||||||
SDB_GET_INT32(pRaw, dataPos, &pTrans->stopFunc, _OVER)
|
SDB_GET_INT32(pRaw, dataPos, &pTrans->stopFunc, _OVER)
|
||||||
|
@ -434,7 +356,6 @@ _OVER:
|
||||||
mError("trans:%d, failed to parse from raw:%p since %s", pTrans->id, pRaw, terrstr());
|
mError("trans:%d, failed to parse from raw:%p since %s", pTrans->id, pRaw, terrstr());
|
||||||
mndTransDropData(pTrans);
|
mndTransDropData(pTrans);
|
||||||
taosMemoryFreeClear(pRow);
|
taosMemoryFreeClear(pRow);
|
||||||
taosMemoryFreeClear(action.pCont);
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -458,7 +379,7 @@ static const char *mndTransStr(ETrnStage stage) {
|
||||||
return "commit";
|
return "commit";
|
||||||
case TRN_STAGE_COMMIT_ACTION:
|
case TRN_STAGE_COMMIT_ACTION:
|
||||||
return "commitAction";
|
return "commitAction";
|
||||||
case TRN_STAGE_FINISHED:
|
case TRN_STAGE_FINISH:
|
||||||
return "finished";
|
return "finished";
|
||||||
case TRN_STAGE_PRE_FINISH:
|
case TRN_STAGE_PRE_FINISH:
|
||||||
return "pre-finish";
|
return "pre-finish";
|
||||||
|
@ -519,7 +440,11 @@ static int32_t mndTransActionInsert(SSdb *pSdb, STrans *pTrans) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mndTransDropData(STrans *pTrans) {
|
void mndTransDropData(STrans *pTrans) {
|
||||||
|
if (pTrans->prepareActions != NULL) {
|
||||||
|
mndTransDropActions(pTrans->prepareActions);
|
||||||
|
pTrans->prepareActions = NULL;
|
||||||
|
}
|
||||||
if (pTrans->redoActions != NULL) {
|
if (pTrans->redoActions != NULL) {
|
||||||
mndTransDropActions(pTrans->redoActions);
|
mndTransDropActions(pTrans->redoActions);
|
||||||
pTrans->redoActions = NULL;
|
pTrans->redoActions = NULL;
|
||||||
|
@ -549,7 +474,7 @@ static void mndTransDropData(STrans *pTrans) {
|
||||||
(void)taosThreadMutexDestroy(&pTrans->mutex);
|
(void)taosThreadMutexDestroy(&pTrans->mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t mndTransActionDelete(SSdb *pSdb, STrans *pTrans, bool callFunc) {
|
static int32_t mndTransDelete(SSdb *pSdb, STrans *pTrans, bool callFunc) {
|
||||||
mInfo("trans:%d, perform delete action, row:%p stage:%s callfunc:%d, stopFunc:%d", pTrans->id, pTrans,
|
mInfo("trans:%d, perform delete action, row:%p stage:%s callfunc:%d, stopFunc:%d", pTrans->id, pTrans,
|
||||||
mndTransStr(pTrans->stage), callFunc, pTrans->stopFunc);
|
mndTransStr(pTrans->stage), callFunc, pTrans->stopFunc);
|
||||||
|
|
||||||
|
@ -586,10 +511,11 @@ static int32_t mndTransActionUpdate(SSdb *pSdb, STrans *pOld, STrans *pNew) {
|
||||||
pOld->id, pOld, mndTransStr(pOld->stage), pOld->createdTime, pNew, mndTransStr(pNew->stage),
|
pOld->id, pOld, mndTransStr(pOld->stage), pOld->createdTime, pNew, mndTransStr(pNew->stage),
|
||||||
pNew->createdTime);
|
pNew->createdTime);
|
||||||
// only occured while sync timeout
|
// only occured while sync timeout
|
||||||
terrno = TSDB_CODE_MND_TRNAS_SYNC_TIMEOUT;
|
terrno = TSDB_CODE_MND_TRANS_SYNC_TIMEOUT;
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mndTransUpdateActions(pOld->prepareActions, pNew->prepareActions);
|
||||||
mndTransUpdateActions(pOld->redoActions, pNew->redoActions);
|
mndTransUpdateActions(pOld->redoActions, pNew->redoActions);
|
||||||
mndTransUpdateActions(pOld->undoActions, pNew->undoActions);
|
mndTransUpdateActions(pOld->undoActions, pNew->undoActions);
|
||||||
mndTransUpdateActions(pOld->commitActions, pNew->commitActions);
|
mndTransUpdateActions(pOld->commitActions, pNew->commitActions);
|
||||||
|
@ -607,7 +533,7 @@ static int32_t mndTransActionUpdate(SSdb *pSdb, STrans *pOld, STrans *pNew) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pOld->stage == TRN_STAGE_PRE_FINISH) {
|
if (pOld->stage == TRN_STAGE_PRE_FINISH) {
|
||||||
pOld->stage = TRN_STAGE_FINISHED;
|
pOld->stage = TRN_STAGE_FINISH;
|
||||||
mTrace("trans:%d, stage from pre-finish to finished since perform update action", pNew->id);
|
mTrace("trans:%d, stage from pre-finish to finished since perform update action", pNew->id);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -646,6 +572,7 @@ STrans *mndTransCreate(SMnode *pMnode, ETrnPolicy policy, ETrnConflct conflict,
|
||||||
pTrans->conflict = conflict;
|
pTrans->conflict = conflict;
|
||||||
pTrans->exec = TRN_EXEC_PARALLEL;
|
pTrans->exec = TRN_EXEC_PARALLEL;
|
||||||
pTrans->createdTime = taosGetTimestampMs();
|
pTrans->createdTime = taosGetTimestampMs();
|
||||||
|
pTrans->prepareActions = taosArrayInit(TRANS_ARRAY_SIZE, sizeof(STransAction));
|
||||||
pTrans->redoActions = taosArrayInit(TRANS_ARRAY_SIZE, sizeof(STransAction));
|
pTrans->redoActions = taosArrayInit(TRANS_ARRAY_SIZE, sizeof(STransAction));
|
||||||
pTrans->undoActions = taosArrayInit(TRANS_ARRAY_SIZE, sizeof(STransAction));
|
pTrans->undoActions = taosArrayInit(TRANS_ARRAY_SIZE, sizeof(STransAction));
|
||||||
pTrans->commitActions = taosArrayInit(TRANS_ARRAY_SIZE, sizeof(STransAction));
|
pTrans->commitActions = taosArrayInit(TRANS_ARRAY_SIZE, sizeof(STransAction));
|
||||||
|
@ -728,6 +655,13 @@ int32_t mndTransAppendCommitlog(STrans *pTrans, SSdbRaw *pRaw) {
|
||||||
return mndTransAppendAction(pTrans->commitActions, &action);
|
return mndTransAppendAction(pTrans->commitActions, &action);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int32_t mndTransAppendPrepareAction(STrans *pTrans, STransAction *pAction) {
|
||||||
|
pAction->stage = TRN_STAGE_PREPARE;
|
||||||
|
pAction->actionType = TRANS_ACTION_RAW;
|
||||||
|
pAction->mTraceId = pTrans->mTraceId;
|
||||||
|
return mndTransAppendAction(pTrans->prepareActions, pAction);
|
||||||
|
}
|
||||||
|
|
||||||
int32_t mndTransAppendRedoAction(STrans *pTrans, STransAction *pAction) {
|
int32_t mndTransAppendRedoAction(STrans *pTrans, STransAction *pAction) {
|
||||||
pAction->stage = TRN_STAGE_REDO_ACTION;
|
pAction->stage = TRN_STAGE_REDO_ACTION;
|
||||||
pAction->actionType = TRANS_ACTION_MSG;
|
pAction->actionType = TRANS_ACTION_MSG;
|
||||||
|
@ -800,7 +734,7 @@ void mndTransSetParallel(STrans *pTrans) { pTrans->exec = TRN_EXEC_PARALLEL; }
|
||||||
void mndTransSetOper(STrans *pTrans, EOperType oper) { pTrans->oper = oper; }
|
void mndTransSetOper(STrans *pTrans, EOperType oper) { pTrans->oper = oper; }
|
||||||
|
|
||||||
static int32_t mndTransSync(SMnode *pMnode, STrans *pTrans) {
|
static int32_t mndTransSync(SMnode *pMnode, STrans *pTrans) {
|
||||||
SSdbRaw *pRaw = mndTransActionEncode(pTrans);
|
SSdbRaw *pRaw = mndTransEncode(pTrans);
|
||||||
if (pRaw == NULL) {
|
if (pRaw == NULL) {
|
||||||
mError("trans:%d, failed to encode while sync trans since %s", pTrans->id, terrstr());
|
mError("trans:%d, failed to encode while sync trans since %s", pTrans->id, terrstr());
|
||||||
return -1;
|
return -1;
|
||||||
|
@ -872,7 +806,7 @@ static bool mndCheckTransConflict(SMnode *pMnode, STrans *pNew) {
|
||||||
return conflict;
|
return conflict;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t mndTrancCheckConflict(SMnode *pMnode, STrans *pTrans) {
|
int32_t mndTransCheckConflict(SMnode *pMnode, STrans *pTrans) {
|
||||||
if (pTrans->conflict == TRN_CONFLICT_DB || pTrans->conflict == TRN_CONFLICT_DB_INSIDE) {
|
if (pTrans->conflict == TRN_CONFLICT_DB || pTrans->conflict == TRN_CONFLICT_DB_INSIDE) {
|
||||||
if (strlen(pTrans->dbname) == 0 && strlen(pTrans->stbname) == 0) {
|
if (strlen(pTrans->dbname) == 0 && strlen(pTrans->stbname) == 0) {
|
||||||
terrno = TSDB_CODE_MND_TRANS_CONFLICT;
|
terrno = TSDB_CODE_MND_TRANS_CONFLICT;
|
||||||
|
@ -891,7 +825,7 @@ int32_t mndTrancCheckConflict(SMnode *pMnode, STrans *pTrans) {
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t mndTransPrepare(SMnode *pMnode, STrans *pTrans) {
|
int32_t mndTransPrepare(SMnode *pMnode, STrans *pTrans) {
|
||||||
if (mndTrancCheckConflict(pMnode, pTrans) != 0) {
|
if (mndTransCheckConflict(pMnode, pTrans) != 0) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -922,7 +856,7 @@ int32_t mndTransPrepare(SMnode *pMnode, STrans *pTrans) {
|
||||||
pTrans->rpcRsp = NULL;
|
pTrans->rpcRsp = NULL;
|
||||||
pTrans->rpcRspLen = 0;
|
pTrans->rpcRspLen = 0;
|
||||||
|
|
||||||
mndTransExecute(pMnode, pNew, true);
|
mndTransExecute(pMnode, pNew);
|
||||||
mndReleaseTrans(pMnode, pNew);
|
mndReleaseTrans(pMnode, pNew);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -961,7 +895,7 @@ static void mndTransSendRpcRsp(SMnode *pMnode, STrans *pTrans) {
|
||||||
bool sendRsp = false;
|
bool sendRsp = false;
|
||||||
int32_t code = pTrans->code;
|
int32_t code = pTrans->code;
|
||||||
|
|
||||||
if (pTrans->stage == TRN_STAGE_FINISHED) {
|
if (pTrans->stage == TRN_STAGE_FINISH) {
|
||||||
sendRsp = true;
|
sendRsp = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1003,7 +937,7 @@ static void mndTransSendRpcRsp(SMnode *pMnode, STrans *pTrans) {
|
||||||
code = TSDB_CODE_MND_TRANS_NETWORK_UNAVAILL;
|
code = TSDB_CODE_MND_TRANS_NETWORK_UNAVAILL;
|
||||||
}
|
}
|
||||||
if (code == TSDB_CODE_SYN_TIMEOUT) {
|
if (code == TSDB_CODE_SYN_TIMEOUT) {
|
||||||
code = TSDB_CODE_MND_TRNAS_SYNC_TIMEOUT;
|
code = TSDB_CODE_MND_TRANS_SYNC_TIMEOUT;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (i != 0 && code == 0) {
|
if (i != 0 && code == 0) {
|
||||||
|
@ -1104,7 +1038,7 @@ int32_t mndTransProcessRsp(SRpcMsg *pRsp) {
|
||||||
mInfo("trans:%d, invalid action, index:%d, code:0x%x", transId, action, pRsp->code);
|
mInfo("trans:%d, invalid action, index:%d, code:0x%x", transId, action, pRsp->code);
|
||||||
}
|
}
|
||||||
|
|
||||||
mndTransExecute(pMnode, pTrans, true);
|
mndTransExecute(pMnode, pTrans);
|
||||||
|
|
||||||
_OVER:
|
_OVER:
|
||||||
mndReleaseTrans(pMnode, pTrans);
|
mndReleaseTrans(pMnode, pTrans);
|
||||||
|
@ -1392,8 +1326,25 @@ static int32_t mndTransExecuteRedoActionsSerial(SMnode *pMnode, STrans *pTrans)
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool mndTransPerformPrepareStage(SMnode *pMnode, STrans *pTrans) {
|
bool mndTransPerformPrepareStage(SMnode *pMnode, STrans *pTrans) {
|
||||||
bool continueExec = true;
|
bool continueExec = true;
|
||||||
|
int32_t code = 0;
|
||||||
|
|
||||||
|
int32_t numOfActions = taosArrayGetSize(pTrans->prepareActions);
|
||||||
|
if (numOfActions == 0) goto _OVER;
|
||||||
|
|
||||||
|
mInfo("trans:%d, execute %d prepare actions.", pTrans->id, numOfActions);
|
||||||
|
|
||||||
|
for (int32_t action = 0; action < numOfActions; ++action) {
|
||||||
|
STransAction *pAction = taosArrayGet(pTrans->prepareActions, action);
|
||||||
|
code = mndTransExecSingleAction(pMnode, pTrans, pAction);
|
||||||
|
if (code != 0) {
|
||||||
|
mError("trans:%d, failed to execute prepare action:%d, numOfActions:%d", pTrans->id, action, numOfActions);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
_OVER:
|
||||||
pTrans->stage = TRN_STAGE_REDO_ACTION;
|
pTrans->stage = TRN_STAGE_REDO_ACTION;
|
||||||
mInfo("trans:%d, stage from prepare to redoAction", pTrans->id);
|
mInfo("trans:%d, stage from prepare to redoAction", pTrans->id);
|
||||||
return continueExec;
|
return continueExec;
|
||||||
|
@ -1476,7 +1427,7 @@ static bool mndTransPerformCommitActionStage(SMnode *pMnode, STrans *pTrans) {
|
||||||
|
|
||||||
if (code == 0) {
|
if (code == 0) {
|
||||||
pTrans->code = 0;
|
pTrans->code = 0;
|
||||||
pTrans->stage = TRN_STAGE_FINISHED; // TRN_STAGE_PRE_FINISH is not necessary
|
pTrans->stage = TRN_STAGE_FINISH; // TRN_STAGE_PRE_FINISH is not necessary
|
||||||
mInfo("trans:%d, stage from commitAction to finished", pTrans->id);
|
mInfo("trans:%d, stage from commitAction to finished", pTrans->id);
|
||||||
continueExec = true;
|
continueExec = true;
|
||||||
} else {
|
} else {
|
||||||
|
@ -1528,14 +1479,14 @@ static bool mndTransPerformRollbackStage(SMnode *pMnode, STrans *pTrans) {
|
||||||
return continueExec;
|
return continueExec;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool mndTransPerfromPreFinishedStage(SMnode *pMnode, STrans *pTrans) {
|
static bool mndTransPerformPreFinishStage(SMnode *pMnode, STrans *pTrans) {
|
||||||
if (mndCannotExecuteTransAction(pMnode)) return false;
|
if (mndCannotExecuteTransAction(pMnode)) return false;
|
||||||
|
|
||||||
bool continueExec = true;
|
bool continueExec = true;
|
||||||
int32_t code = mndTransPreFinish(pMnode, pTrans);
|
int32_t code = mndTransPreFinish(pMnode, pTrans);
|
||||||
|
|
||||||
if (code == 0) {
|
if (code == 0) {
|
||||||
pTrans->stage = TRN_STAGE_FINISHED;
|
pTrans->stage = TRN_STAGE_FINISH;
|
||||||
mInfo("trans:%d, stage from pre-finish to finish", pTrans->id);
|
mInfo("trans:%d, stage from pre-finish to finish", pTrans->id);
|
||||||
continueExec = true;
|
continueExec = true;
|
||||||
} else {
|
} else {
|
||||||
|
@ -1547,10 +1498,10 @@ static bool mndTransPerfromPreFinishedStage(SMnode *pMnode, STrans *pTrans) {
|
||||||
return continueExec;
|
return continueExec;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool mndTransPerfromFinishedStage(SMnode *pMnode, STrans *pTrans) {
|
static bool mndTransPerformFinishStage(SMnode *pMnode, STrans *pTrans) {
|
||||||
bool continueExec = false;
|
bool continueExec = false;
|
||||||
|
|
||||||
SSdbRaw *pRaw = mndTransActionEncode(pTrans);
|
SSdbRaw *pRaw = mndTransEncode(pTrans);
|
||||||
if (pRaw == NULL) {
|
if (pRaw == NULL) {
|
||||||
mError("trans:%d, failed to encode while finish trans since %s", pTrans->id, terrstr());
|
mError("trans:%d, failed to encode while finish trans since %s", pTrans->id, terrstr());
|
||||||
return false;
|
return false;
|
||||||
|
@ -1567,12 +1518,12 @@ static bool mndTransPerfromFinishedStage(SMnode *pMnode, STrans *pTrans) {
|
||||||
return continueExec;
|
return continueExec;
|
||||||
}
|
}
|
||||||
|
|
||||||
void mndTransExecute(SMnode *pMnode, STrans *pTrans, bool isLeader) {
|
void mndTransExecuteImp(SMnode *pMnode, STrans *pTrans, bool topHalf) {
|
||||||
bool continueExec = true;
|
bool continueExec = true;
|
||||||
|
|
||||||
while (continueExec) {
|
while (continueExec) {
|
||||||
mInfo("trans:%d, continue to execute, stage:%s createTime:%" PRId64 " leader:%d", pTrans->id,
|
mInfo("trans:%d, continue to execute, stage:%s createTime:%" PRId64 " topHalf:%d", pTrans->id,
|
||||||
mndTransStr(pTrans->stage), pTrans->createdTime, isLeader);
|
mndTransStr(pTrans->stage), pTrans->createdTime, topHalf);
|
||||||
pTrans->lastExecTime = taosGetTimestampMs();
|
pTrans->lastExecTime = taosGetTimestampMs();
|
||||||
switch (pTrans->stage) {
|
switch (pTrans->stage) {
|
||||||
case TRN_STAGE_PREPARE:
|
case TRN_STAGE_PREPARE:
|
||||||
|
@ -1582,7 +1533,7 @@ void mndTransExecute(SMnode *pMnode, STrans *pTrans, bool isLeader) {
|
||||||
continueExec = mndTransPerformRedoActionStage(pMnode, pTrans);
|
continueExec = mndTransPerformRedoActionStage(pMnode, pTrans);
|
||||||
break;
|
break;
|
||||||
case TRN_STAGE_COMMIT:
|
case TRN_STAGE_COMMIT:
|
||||||
if (isLeader) {
|
if (topHalf) {
|
||||||
continueExec = mndTransPerformCommitStage(pMnode, pTrans);
|
continueExec = mndTransPerformCommitStage(pMnode, pTrans);
|
||||||
} else {
|
} else {
|
||||||
mInfo("trans:%d, can not commit since not leader", pTrans->id);
|
mInfo("trans:%d, can not commit since not leader", pTrans->id);
|
||||||
|
@ -1593,7 +1544,7 @@ void mndTransExecute(SMnode *pMnode, STrans *pTrans, bool isLeader) {
|
||||||
continueExec = mndTransPerformCommitActionStage(pMnode, pTrans);
|
continueExec = mndTransPerformCommitActionStage(pMnode, pTrans);
|
||||||
break;
|
break;
|
||||||
case TRN_STAGE_ROLLBACK:
|
case TRN_STAGE_ROLLBACK:
|
||||||
if (isLeader) {
|
if (topHalf) {
|
||||||
continueExec = mndTransPerformRollbackStage(pMnode, pTrans);
|
continueExec = mndTransPerformRollbackStage(pMnode, pTrans);
|
||||||
} else {
|
} else {
|
||||||
mInfo("trans:%d, can not rollback since not leader", pTrans->id);
|
mInfo("trans:%d, can not rollback since not leader", pTrans->id);
|
||||||
|
@ -1604,15 +1555,15 @@ void mndTransExecute(SMnode *pMnode, STrans *pTrans, bool isLeader) {
|
||||||
continueExec = mndTransPerformUndoActionStage(pMnode, pTrans);
|
continueExec = mndTransPerformUndoActionStage(pMnode, pTrans);
|
||||||
break;
|
break;
|
||||||
case TRN_STAGE_PRE_FINISH:
|
case TRN_STAGE_PRE_FINISH:
|
||||||
if (isLeader) {
|
if (topHalf) {
|
||||||
continueExec = mndTransPerfromPreFinishedStage(pMnode, pTrans);
|
continueExec = mndTransPerformPreFinishStage(pMnode, pTrans);
|
||||||
} else {
|
} else {
|
||||||
mInfo("trans:%d, can not pre-finish since not leader", pTrans->id);
|
mInfo("trans:%d, can not pre-finish since not leader", pTrans->id);
|
||||||
continueExec = false;
|
continueExec = false;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case TRN_STAGE_FINISHED:
|
case TRN_STAGE_FINISH:
|
||||||
continueExec = mndTransPerfromFinishedStage(pMnode, pTrans);
|
continueExec = mndTransPerformFinishStage(pMnode, pTrans);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
continueExec = false;
|
continueExec = false;
|
||||||
|
@ -1623,6 +1574,16 @@ void mndTransExecute(SMnode *pMnode, STrans *pTrans, bool isLeader) {
|
||||||
mndTransSendRpcRsp(pMnode, pTrans);
|
mndTransSendRpcRsp(pMnode, pTrans);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void mndTransExecute(SMnode *pMnode, STrans *pTrans) {
|
||||||
|
bool topHalf = true;
|
||||||
|
return mndTransExecuteImp(pMnode, pTrans, topHalf);
|
||||||
|
}
|
||||||
|
|
||||||
|
void mndTransRefresh(SMnode *pMnode, STrans *pTrans) {
|
||||||
|
bool topHalf = false;
|
||||||
|
return mndTransExecuteImp(pMnode, pTrans, topHalf);
|
||||||
|
}
|
||||||
|
|
||||||
static int32_t mndProcessTransTimer(SRpcMsg *pReq) {
|
static int32_t mndProcessTransTimer(SRpcMsg *pReq) {
|
||||||
mTrace("start to process trans timer");
|
mTrace("start to process trans timer");
|
||||||
mndTransPullup(pReq->info.node);
|
mndTransPullup(pReq->info.node);
|
||||||
|
@ -1649,7 +1610,7 @@ int32_t mndKillTrans(SMnode *pMnode, STrans *pTrans) {
|
||||||
pAction->errCode = 0;
|
pAction->errCode = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
mndTransExecute(pMnode, pTrans, true);
|
mndTransExecute(pMnode, pTrans);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1707,7 +1668,7 @@ void mndTransPullup(SMnode *pMnode) {
|
||||||
int32_t *pTransId = taosArrayGet(pArray, i);
|
int32_t *pTransId = taosArrayGet(pArray, i);
|
||||||
STrans *pTrans = mndAcquireTrans(pMnode, *pTransId);
|
STrans *pTrans = mndAcquireTrans(pMnode, *pTransId);
|
||||||
if (pTrans != NULL) {
|
if (pTrans != NULL) {
|
||||||
mndTransExecute(pMnode, pTrans, true);
|
mndTransExecute(pMnode, pTrans);
|
||||||
}
|
}
|
||||||
mndReleaseTrans(pMnode, pTrans);
|
mndReleaseTrans(pMnode, pTrans);
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,7 +28,6 @@
|
||||||
#define VGROUP_VER_NUMBER 1
|
#define VGROUP_VER_NUMBER 1
|
||||||
#define VGROUP_RESERVE_SIZE 64
|
#define VGROUP_RESERVE_SIZE 64
|
||||||
|
|
||||||
static SSdbRow *mndVgroupActionDecode(SSdbRaw *pRaw);
|
|
||||||
static int32_t mndVgroupActionInsert(SSdb *pSdb, SVgObj *pVgroup);
|
static int32_t mndVgroupActionInsert(SSdb *pSdb, SVgObj *pVgroup);
|
||||||
static int32_t mndVgroupActionDelete(SSdb *pSdb, SVgObj *pVgroup);
|
static int32_t mndVgroupActionDelete(SSdb *pSdb, SVgObj *pVgroup);
|
||||||
static int32_t mndVgroupActionUpdate(SSdb *pSdb, SVgObj *pOld, SVgObj *pNew);
|
static int32_t mndVgroupActionUpdate(SSdb *pSdb, SVgObj *pOld, SVgObj *pNew);
|
||||||
|
@ -483,15 +482,15 @@ static void *mndBuildDisableVnodeWriteReq(SMnode *pMnode, SDbObj *pDb, int32_t v
|
||||||
return pReq;
|
return pReq;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *mndBuildAlterVnodeHashRangeReq(SMnode *pMnode, SVgObj *pVgroup, int32_t dstVgId, int32_t *pContLen) {
|
static void *mndBuildAlterVnodeHashRangeReq(SMnode *pMnode, int32_t srcVgId, SVgObj *pVgroup, int32_t *pContLen) {
|
||||||
SAlterVnodeHashRangeReq alterReq = {
|
SAlterVnodeHashRangeReq alterReq = {
|
||||||
.srcVgId = pVgroup->vgId,
|
.srcVgId = srcVgId,
|
||||||
.dstVgId = dstVgId,
|
.dstVgId = pVgroup->vgId,
|
||||||
.hashBegin = pVgroup->hashBegin,
|
.hashBegin = pVgroup->hashBegin,
|
||||||
.hashEnd = pVgroup->hashEnd,
|
.hashEnd = pVgroup->hashEnd,
|
||||||
};
|
};
|
||||||
|
|
||||||
mInfo("vgId:%d, build alter vnode hashrange req, dstVgId:%d, hashrange:[%u, %u]", pVgroup->vgId, dstVgId,
|
mInfo("vgId:%d, build alter vnode hashrange req, dstVgId:%d, hashrange:[%u, %u]", srcVgId, pVgroup->vgId,
|
||||||
pVgroup->hashBegin, pVgroup->hashEnd);
|
pVgroup->hashBegin, pVgroup->hashEnd);
|
||||||
int32_t contLen = tSerializeSAlterVnodeHashRangeReq(NULL, 0, &alterReq);
|
int32_t contLen = tSerializeSAlterVnodeHashRangeReq(NULL, 0, &alterReq);
|
||||||
if (contLen < 0) {
|
if (contLen < 0) {
|
||||||
|
@ -1207,12 +1206,12 @@ int32_t mndAddAlterVnodeConfirmAction(SMnode *pMnode, STrans *pTrans, SDbObj *pD
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t mndAddAlterVnodeHashRangeAction(SMnode *pMnode, STrans *pTrans, SVgObj *pVgroup, int32_t dstVgId) {
|
static int32_t mndAddAlterVnodeHashRangeAction(SMnode *pMnode, STrans *pTrans, int32_t srcVgId, SVgObj *pVgroup) {
|
||||||
STransAction action = {0};
|
STransAction action = {0};
|
||||||
action.epSet = mndGetVgroupEpset(pMnode, pVgroup);
|
action.epSet = mndGetVgroupEpset(pMnode, pVgroup);
|
||||||
|
|
||||||
int32_t contLen = 0;
|
int32_t contLen = 0;
|
||||||
void *pReq = mndBuildAlterVnodeHashRangeReq(pMnode, pVgroup, dstVgId, &contLen);
|
void *pReq = mndBuildAlterVnodeHashRangeReq(pMnode, srcVgId, pVgroup, &contLen);
|
||||||
if (pReq == NULL) return -1;
|
if (pReq == NULL) return -1;
|
||||||
|
|
||||||
action.pCont = pReq;
|
action.pCont = pReq;
|
||||||
|
@ -1247,6 +1246,21 @@ int32_t mndAddAlterVnodeConfigAction(SMnode *pMnode, STrans *pTrans, SDbObj *pDb
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int32_t mndAddPrepareNewVgAction(SMnode *pMnode, STrans *pTrans, SVgObj *pVg) {
|
||||||
|
SSdbRaw *pRaw = mndVgroupActionEncode(pVg);
|
||||||
|
if (pRaw == NULL) goto _err;
|
||||||
|
|
||||||
|
STransAction action = {.pRaw = pRaw, .msgType = TDMT_MND_CREATE_VG};
|
||||||
|
if (mndTransAppendPrepareAction(pTrans, &action) != 0) goto _err;
|
||||||
|
(void)sdbSetRawStatus(pRaw, SDB_STATUS_CREATING);
|
||||||
|
pRaw = NULL;
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
_err:
|
||||||
|
sdbFreeRaw(pRaw);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
int32_t mndAddAlterVnodeReplicaAction(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup, int32_t dnodeId) {
|
int32_t mndAddAlterVnodeReplicaAction(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup, int32_t dnodeId) {
|
||||||
SDnodeObj *pDnode = mndAcquireDnode(pMnode, dnodeId);
|
SDnodeObj *pDnode = mndAcquireDnode(pMnode, dnodeId);
|
||||||
if (pDnode == NULL) return -1;
|
if (pDnode == NULL) return -1;
|
||||||
|
@ -2241,10 +2255,13 @@ static int32_t mndAddAdjustVnodeHashRangeAction(SMnode *pMnode, STrans *pTrans,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t mndTransCommitVgStatus(STrans *pTrans, SVgObj *pVg, ESdbStatus vgStatus) {
|
typedef int32_t (*FpTransActionCb)(STrans *pTrans, SSdbRaw *pRaw);
|
||||||
|
|
||||||
|
static int32_t mndAddVgStatusAction(STrans *pTrans, SVgObj *pVg, ESdbStatus vgStatus, ETrnStage stage) {
|
||||||
|
FpTransActionCb appendActionCb = (stage == TRN_STAGE_COMMIT_ACTION) ? mndTransAppendCommitlog : mndTransAppendRedolog;
|
||||||
SSdbRaw *pRaw = mndVgroupActionEncode(pVg);
|
SSdbRaw *pRaw = mndVgroupActionEncode(pVg);
|
||||||
if (pRaw == NULL) goto _err;
|
if (pRaw == NULL) goto _err;
|
||||||
if (mndTransAppendCommitlog(pTrans, pRaw) != 0) goto _err;
|
if (appendActionCb(pTrans, pRaw) != 0) goto _err;
|
||||||
(void)sdbSetRawStatus(pRaw, vgStatus);
|
(void)sdbSetRawStatus(pRaw, vgStatus);
|
||||||
pRaw = NULL;
|
pRaw = NULL;
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -2253,18 +2270,32 @@ _err:
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int32_t mndAddDbStatusAction(STrans *pTrans, SDbObj *pDb, ESdbStatus dbStatus, ETrnStage stage) {
|
||||||
|
FpTransActionCb appendActionCb = (stage == TRN_STAGE_COMMIT_ACTION) ? mndTransAppendCommitlog : mndTransAppendRedolog;
|
||||||
|
SSdbRaw *pRaw = mndDbActionEncode(pDb);
|
||||||
|
if (pRaw == NULL) goto _err;
|
||||||
|
if (appendActionCb(pTrans, pRaw) != 0) goto _err;
|
||||||
|
(void)sdbSetRawStatus(pRaw, dbStatus);
|
||||||
|
pRaw = NULL;
|
||||||
|
return 0;
|
||||||
|
_err:
|
||||||
|
sdbFreeRaw(pRaw);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
int32_t mndSplitVgroup(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SVgObj *pVgroup) {
|
int32_t mndSplitVgroup(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SVgObj *pVgroup) {
|
||||||
int32_t code = -1;
|
int32_t code = -1;
|
||||||
STrans *pTrans = NULL;
|
STrans *pTrans = NULL;
|
||||||
SSdbRaw *pRaw = NULL;
|
|
||||||
SDbObj dbObj = {0};
|
SDbObj dbObj = {0};
|
||||||
SArray *pArray = mndBuildDnodesArray(pMnode, 0);
|
SArray *pArray = mndBuildDnodesArray(pMnode, 0);
|
||||||
|
|
||||||
pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_GLOBAL, pReq, "split-vgroup");
|
pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_DB, pReq, "split-vgroup");
|
||||||
if (pTrans == NULL) goto _OVER;
|
if (pTrans == NULL) goto _OVER;
|
||||||
mndTransSetSerial(pTrans);
|
mndTransSetSerial(pTrans);
|
||||||
mInfo("trans:%d, used to split vgroup, vgId:%d", pTrans->id, pVgroup->vgId);
|
mInfo("trans:%d, used to split vgroup, vgId:%d", pTrans->id, pVgroup->vgId);
|
||||||
|
|
||||||
|
mndTransSetDbName(pTrans, pDb->name, NULL);
|
||||||
|
|
||||||
SVgObj newVg1 = {0};
|
SVgObj newVg1 = {0};
|
||||||
memcpy(&newVg1, pVgroup, sizeof(SVgObj));
|
memcpy(&newVg1, pVgroup, sizeof(SVgObj));
|
||||||
mInfo("vgId:%d, vgroup info before split, replica:%d hashBegin:%u hashEnd:%u", newVg1.vgId, newVg1.replica,
|
mInfo("vgId:%d, vgroup info before split, replica:%d hashBegin:%u hashEnd:%u", newVg1.vgId, newVg1.replica,
|
||||||
|
@ -2316,32 +2347,25 @@ int32_t mndSplitVgroup(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SVgObj *pVgro
|
||||||
|
|
||||||
// alter vgId and hash range
|
// alter vgId and hash range
|
||||||
int32_t maxVgId = sdbGetMaxId(pMnode->pSdb, SDB_VGROUP);
|
int32_t maxVgId = sdbGetMaxId(pMnode->pSdb, SDB_VGROUP);
|
||||||
if (mndAddAlterVnodeHashRangeAction(pMnode, pTrans, &newVg1, maxVgId) != 0) goto _OVER;
|
int32_t srcVgId = newVg1.vgId;
|
||||||
newVg1.vgId = maxVgId;
|
newVg1.vgId = maxVgId;
|
||||||
|
if (mndAddPrepareNewVgAction(pMnode, pTrans, &newVg1) != 0) goto _OVER;
|
||||||
|
if (mndAddAlterVnodeHashRangeAction(pMnode, pTrans, srcVgId, &newVg1) != 0) goto _OVER;
|
||||||
|
|
||||||
maxVgId++;
|
maxVgId++;
|
||||||
if (mndAddAlterVnodeHashRangeAction(pMnode, pTrans, &newVg2, maxVgId) != 0) goto _OVER;
|
srcVgId = newVg2.vgId;
|
||||||
newVg2.vgId = maxVgId;
|
newVg2.vgId = maxVgId;
|
||||||
|
if (mndAddPrepareNewVgAction(pMnode, pTrans, &newVg2) != 0) goto _OVER;
|
||||||
|
if (mndAddAlterVnodeHashRangeAction(pMnode, pTrans, srcVgId, &newVg2) != 0) goto _OVER;
|
||||||
|
|
||||||
if (mndAddAlterVnodeConfirmAction(pMnode, pTrans, pDb, &newVg1) != 0) goto _OVER;
|
if (mndAddAlterVnodeConfirmAction(pMnode, pTrans, pDb, &newVg1) != 0) goto _OVER;
|
||||||
|
|
||||||
if (mndAddAlterVnodeConfirmAction(pMnode, pTrans, pDb, &newVg2) != 0) goto _OVER;
|
if (mndAddAlterVnodeConfirmAction(pMnode, pTrans, pDb, &newVg2) != 0) goto _OVER;
|
||||||
|
|
||||||
// adjust vgroup replica
|
if (mndAddVgStatusAction(pTrans, &newVg1, SDB_STATUS_READY, TRN_STAGE_REDO_ACTION) < 0) goto _OVER;
|
||||||
if (pDb->cfg.replications != newVg1.replica) {
|
if (mndAddVgStatusAction(pTrans, &newVg2, SDB_STATUS_READY, TRN_STAGE_REDO_ACTION) < 0) goto _OVER;
|
||||||
if (mndBuildAlterVgroupAction(pMnode, pTrans, pDb, pDb, &newVg1, pArray) != 0) goto _OVER;
|
if (mndAddVgStatusAction(pTrans, pVgroup, SDB_STATUS_DROPPED, TRN_STAGE_REDO_ACTION) < 0) goto _OVER;
|
||||||
} else {
|
|
||||||
if (mndTransCommitVgStatus(pTrans, &newVg1, SDB_STATUS_READY) < 0) goto _OVER;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (pDb->cfg.replications != newVg2.replica) {
|
|
||||||
if (mndBuildAlterVgroupAction(pMnode, pTrans, pDb, pDb, &newVg2, pArray) != 0) goto _OVER;
|
|
||||||
} else {
|
|
||||||
if (mndTransCommitVgStatus(pTrans, &newVg2, SDB_STATUS_READY) < 0) goto _OVER;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (mndTransCommitVgStatus(pTrans, pVgroup, SDB_STATUS_DROPPED) < 0) goto _OVER;
|
|
||||||
|
|
||||||
|
// update db status
|
||||||
memcpy(&dbObj, pDb, sizeof(SDbObj));
|
memcpy(&dbObj, pDb, sizeof(SDbObj));
|
||||||
if (dbObj.cfg.pRetensions != NULL) {
|
if (dbObj.cfg.pRetensions != NULL) {
|
||||||
dbObj.cfg.pRetensions = taosArrayDup(pDb->cfg.pRetensions, NULL);
|
dbObj.cfg.pRetensions = taosArrayDup(pDb->cfg.pRetensions, NULL);
|
||||||
|
@ -2350,11 +2374,27 @@ int32_t mndSplitVgroup(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SVgObj *pVgro
|
||||||
dbObj.vgVersion++;
|
dbObj.vgVersion++;
|
||||||
dbObj.updateTime = taosGetTimestampMs();
|
dbObj.updateTime = taosGetTimestampMs();
|
||||||
dbObj.cfg.numOfVgroups++;
|
dbObj.cfg.numOfVgroups++;
|
||||||
pRaw = mndDbActionEncode(&dbObj);
|
if (mndAddDbStatusAction(pTrans, &dbObj, SDB_STATUS_READY, TRN_STAGE_REDO_ACTION) < 0) goto _OVER;
|
||||||
if (pRaw == NULL) goto _OVER;
|
|
||||||
if (mndTransAppendCommitlog(pTrans, pRaw) != 0) goto _OVER;
|
// adjust vgroup replica
|
||||||
(void)sdbSetRawStatus(pRaw, SDB_STATUS_READY);
|
if (pDb->cfg.replications != newVg1.replica) {
|
||||||
pRaw = NULL;
|
if (mndBuildAlterVgroupAction(pMnode, pTrans, pDb, pDb, &newVg1, pArray) != 0) goto _OVER;
|
||||||
|
} else {
|
||||||
|
if (mndAddVgStatusAction(pTrans, &newVg1, SDB_STATUS_READY, TRN_STAGE_COMMIT_ACTION) < 0) goto _OVER;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pDb->cfg.replications != newVg2.replica) {
|
||||||
|
if (mndBuildAlterVgroupAction(pMnode, pTrans, pDb, pDb, &newVg2, pArray) != 0) goto _OVER;
|
||||||
|
} else {
|
||||||
|
if (mndAddVgStatusAction(pTrans, &newVg2, SDB_STATUS_READY, TRN_STAGE_COMMIT_ACTION) < 0) goto _OVER;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (mndAddVgStatusAction(pTrans, pVgroup, SDB_STATUS_DROPPED, TRN_STAGE_COMMIT_ACTION) < 0) goto _OVER;
|
||||||
|
|
||||||
|
// commit db status
|
||||||
|
dbObj.vgVersion++;
|
||||||
|
dbObj.updateTime = taosGetTimestampMs();
|
||||||
|
if (mndAddDbStatusAction(pTrans, &dbObj, SDB_STATUS_READY, TRN_STAGE_COMMIT_ACTION) < 0) goto _OVER;
|
||||||
|
|
||||||
if (mndTransPrepare(pMnode, pTrans) != 0) goto _OVER;
|
if (mndTransPrepare(pMnode, pTrans) != 0) goto _OVER;
|
||||||
code = 0;
|
code = 0;
|
||||||
|
@ -2362,7 +2402,6 @@ int32_t mndSplitVgroup(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SVgObj *pVgro
|
||||||
_OVER:
|
_OVER:
|
||||||
taosArrayDestroy(pArray);
|
taosArrayDestroy(pArray);
|
||||||
mndTransDrop(pTrans);
|
mndTransDrop(pTrans);
|
||||||
sdbFreeRaw(pRaw);
|
|
||||||
taosArrayDestroy(dbObj.cfg.pRetensions);
|
taosArrayDestroy(dbObj.cfg.pRetensions);
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
|
@ -122,6 +122,7 @@ typedef enum {
|
||||||
SDB_STATUS_DROPPING = 2,
|
SDB_STATUS_DROPPING = 2,
|
||||||
SDB_STATUS_DROPPED = 3,
|
SDB_STATUS_DROPPED = 3,
|
||||||
SDB_STATUS_READY = 4,
|
SDB_STATUS_READY = 4,
|
||||||
|
SDB_STATUS_UPDATE = 5,
|
||||||
} ESdbStatus;
|
} ESdbStatus;
|
||||||
|
|
||||||
typedef enum {
|
typedef enum {
|
||||||
|
|
|
@ -256,6 +256,7 @@ int32_t sdbWriteWithoutFree(SSdb *pSdb, SSdbRaw *pRaw) {
|
||||||
code = sdbInsertRow(pSdb, hash, pRaw, pRow, keySize);
|
code = sdbInsertRow(pSdb, hash, pRaw, pRow, keySize);
|
||||||
break;
|
break;
|
||||||
case SDB_STATUS_READY:
|
case SDB_STATUS_READY:
|
||||||
|
case SDB_STATUS_UPDATE:
|
||||||
case SDB_STATUS_DROPPING:
|
case SDB_STATUS_DROPPING:
|
||||||
code = sdbUpdateRow(pSdb, hash, pRaw, pRow, keySize);
|
code = sdbUpdateRow(pSdb, hash, pRaw, pRow, keySize);
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -103,7 +103,7 @@ target_link_libraries(
|
||||||
|
|
||||||
# PUBLIC bdb
|
# PUBLIC bdb
|
||||||
# PUBLIC scalar
|
# PUBLIC scalar
|
||||||
PUBLIC rocksdb-shared
|
PUBLIC rocksdb
|
||||||
PUBLIC transport
|
PUBLIC transport
|
||||||
PUBLIC stream
|
PUBLIC stream
|
||||||
PUBLIC index
|
PUBLIC index
|
||||||
|
|
|
@ -879,9 +879,13 @@ static int32_t metaFilterTableByHash(SMeta *pMeta, SArray *uidList) {
|
||||||
SDecoder dc = {0};
|
SDecoder dc = {0};
|
||||||
tDecoderInit(&dc, pData, nData);
|
tDecoderInit(&dc, pData, nData);
|
||||||
metaDecodeEntry(&dc, &me);
|
metaDecodeEntry(&dc, &me);
|
||||||
|
|
||||||
if (me.type != TSDB_SUPER_TABLE) {
|
if (me.type != TSDB_SUPER_TABLE) {
|
||||||
int32_t ret = vnodeValidateTableHash(pMeta->pVnode, me.name);
|
char tbFName[TSDB_TABLE_FNAME_LEN + 1];
|
||||||
if (TSDB_CODE_VND_HASH_MISMATCH == ret) {
|
snprintf(tbFName, sizeof(tbFName), "%s.%s", pMeta->pVnode->config.dbname, me.name);
|
||||||
|
tbFName[TSDB_TABLE_FNAME_LEN] = '\0';
|
||||||
|
int32_t ret = vnodeValidateTableHash(pMeta->pVnode, tbFName);
|
||||||
|
if (ret < 0 && terrno == TSDB_CODE_VND_HASH_MISMATCH) {
|
||||||
taosArrayPush(uidList, &me.uid);
|
taosArrayPush(uidList, &me.uid);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -910,6 +914,7 @@ int32_t metaTrimTables(SMeta *pMeta) {
|
||||||
goto end;
|
goto end;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
metaInfo("vgId:%d, trim %ld tables", TD_VID(pMeta->pVnode), taosArrayGetSize(tbUids));
|
||||||
metaDropTables(pMeta, tbUids);
|
metaDropTables(pMeta, tbUids);
|
||||||
|
|
||||||
end:
|
end:
|
||||||
|
|
|
@ -298,10 +298,8 @@ void tqSinkToTablePipeline(SStreamTask* pTask, void* vnode, int64_t ver, void* d
|
||||||
if (res == TSDB_CODE_SUCCESS) {
|
if (res == TSDB_CODE_SUCCESS) {
|
||||||
memcpy(ctbName, pTableSinkInfo->tbName, strlen(pTableSinkInfo->tbName));
|
memcpy(ctbName, pTableSinkInfo->tbName, strlen(pTableSinkInfo->tbName));
|
||||||
} else {
|
} else {
|
||||||
char* tmp = buildCtbNameByGroupId(stbFullName, pDataBlock->info.id.groupId);
|
buildCtbNameByGroupIdImpl(stbFullName, pDataBlock->info.id.groupId, ctbName);
|
||||||
memcpy(ctbName, tmp, strlen(tmp));
|
memcpy(pTableSinkInfo->tbName, ctbName, strlen(ctbName));
|
||||||
memcpy(pTableSinkInfo->tbName, tmp, strlen(tmp));
|
|
||||||
taosMemoryFree(tmp);
|
|
||||||
tqDebug("vgId:%d, gropuId:%" PRIu64 " datablock table name is null", TD_VID(pVnode),
|
tqDebug("vgId:%d, gropuId:%" PRIu64 " datablock table name is null", TD_VID(pVnode),
|
||||||
pDataBlock->info.id.groupId);
|
pDataBlock->info.id.groupId);
|
||||||
}
|
}
|
||||||
|
|
|
@ -325,7 +325,7 @@ int vnodeValidateTableHash(SVnode *pVnode, char *tableFName) {
|
||||||
|
|
||||||
if (hashValue < pVnode->config.hashBegin || hashValue > pVnode->config.hashEnd) {
|
if (hashValue < pVnode->config.hashBegin || hashValue > pVnode->config.hashEnd) {
|
||||||
terrno = TSDB_CODE_VND_HASH_MISMATCH;
|
terrno = TSDB_CODE_VND_HASH_MISMATCH;
|
||||||
return TSDB_CODE_VND_HASH_MISMATCH;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -431,7 +431,7 @@ static int32_t vnodeSyncApplyMsg(const SSyncFSM *pFsm, SRpcMsg *pMsg, const SFsm
|
||||||
return tmsgPutToQueue(&pVnode->msgCb, APPLY_QUEUE, pMsg);
|
return tmsgPutToQueue(&pVnode->msgCb, APPLY_QUEUE, pMsg);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t vnodeSyncCommitMsg(const SSyncFSM *pFsm, SRpcMsg *pMsg, const SFsmCbMeta *pMeta) {
|
static int32_t vnodeSyncCommitMsg(const SSyncFSM *pFsm, SRpcMsg *pMsg, SFsmCbMeta *pMeta) {
|
||||||
if (pMsg->code == 0) {
|
if (pMsg->code == 0) {
|
||||||
return vnodeSyncApplyMsg(pFsm, pMsg, pMeta);
|
return vnodeSyncApplyMsg(pFsm, pMsg, pMeta);
|
||||||
}
|
}
|
||||||
|
@ -451,7 +451,7 @@ static int32_t vnodeSyncCommitMsg(const SSyncFSM *pFsm, SRpcMsg *pMsg, const SFs
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t vnodeSyncPreCommitMsg(const SSyncFSM *pFsm, SRpcMsg *pMsg, const SFsmCbMeta *pMeta) {
|
static int32_t vnodeSyncPreCommitMsg(const SSyncFSM *pFsm, SRpcMsg *pMsg, SFsmCbMeta *pMeta) {
|
||||||
if (pMeta->isWeak == 1) {
|
if (pMeta->isWeak == 1) {
|
||||||
return vnodeSyncApplyMsg(pFsm, pMsg, pMeta);
|
return vnodeSyncApplyMsg(pFsm, pMsg, pMeta);
|
||||||
}
|
}
|
||||||
|
@ -463,7 +463,7 @@ static SyncIndex vnodeSyncAppliedIndex(const SSyncFSM *pFSM) {
|
||||||
return atomic_load_64(&pVnode->state.applied);
|
return atomic_load_64(&pVnode->state.applied);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vnodeSyncRollBackMsg(const SSyncFSM *pFsm, SRpcMsg *pMsg, const SFsmCbMeta *pMeta) {
|
static void vnodeSyncRollBackMsg(const SSyncFSM *pFsm, SRpcMsg *pMsg, SFsmCbMeta *pMeta) {
|
||||||
SVnode *pVnode = pFsm->data;
|
SVnode *pVnode = pFsm->data;
|
||||||
vTrace("vgId:%d, rollback-cb is excuted, fsm:%p, index:%" PRId64 ", weak:%d, code:%d, state:%d %s, type:%s",
|
vTrace("vgId:%d, rollback-cb is excuted, fsm:%p, index:%" PRId64 ", weak:%d, code:%d, state:%d %s, type:%s",
|
||||||
pVnode->config.vgId, pFsm, pMeta->index, pMeta->isWeak, pMeta->code, pMeta->state, syncStr(pMeta->state),
|
pVnode->config.vgId, pFsm, pMeta->index, pMeta->isWeak, pMeta->code, pMeta->state, syncStr(pMeta->state),
|
||||||
|
|
|
@ -457,6 +457,7 @@ typedef struct SStreamIntervalOperatorInfo {
|
||||||
int64_t dataVersion;
|
int64_t dataVersion;
|
||||||
SStateStore statestore;
|
SStateStore statestore;
|
||||||
bool recvGetAll;
|
bool recvGetAll;
|
||||||
|
SHashObj* pFinalPullDataMap;
|
||||||
} SStreamIntervalOperatorInfo;
|
} SStreamIntervalOperatorInfo;
|
||||||
|
|
||||||
typedef struct SDataGroupInfo {
|
typedef struct SDataGroupInfo {
|
||||||
|
|
|
@ -647,6 +647,8 @@ uint64_t calcGroupId(char* pData, int32_t len) {
|
||||||
// NOTE: only extract the initial 8 bytes of the final MD5 digest
|
// NOTE: only extract the initial 8 bytes of the final MD5 digest
|
||||||
uint64_t id = 0;
|
uint64_t id = 0;
|
||||||
memcpy(&id, context.digest, sizeof(uint64_t));
|
memcpy(&id, context.digest, sizeof(uint64_t));
|
||||||
|
if (0 == id)
|
||||||
|
memcpy(&id, context.digest + 8, sizeof(uint64_t));
|
||||||
return id;
|
return id;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -319,6 +319,11 @@ void destroyMergeJoinOperator(void* param) {
|
||||||
}
|
}
|
||||||
nodesDestroyNode(pJoinOperator->pCondAfterMerge);
|
nodesDestroyNode(pJoinOperator->pCondAfterMerge);
|
||||||
|
|
||||||
|
taosArrayDestroy(pJoinOperator->rowCtx.leftCreatedBlocks);
|
||||||
|
taosArrayDestroy(pJoinOperator->rowCtx.rightCreatedBlocks);
|
||||||
|
taosArrayDestroy(pJoinOperator->rowCtx.leftRowLocations);
|
||||||
|
taosArrayDestroy(pJoinOperator->rowCtx.rightRowLocations);
|
||||||
|
|
||||||
pJoinOperator->pRes = blockDataDestroy(pJoinOperator->pRes);
|
pJoinOperator->pRes = blockDataDestroy(pJoinOperator->pRes);
|
||||||
taosMemoryFreeClear(param);
|
taosMemoryFreeClear(param);
|
||||||
}
|
}
|
||||||
|
|
|
@ -213,6 +213,8 @@ static int32_t doIngroupLimitOffset(SLimitInfo* pLimitInfo, uint64_t groupId, SS
|
||||||
} else {
|
} else {
|
||||||
if (limitReached && (pLimitInfo->slimit.limit >= 0 && pLimitInfo->slimit.limit <= pLimitInfo->numOfOutputGroups)) {
|
if (limitReached && (pLimitInfo->slimit.limit >= 0 && pLimitInfo->slimit.limit <= pLimitInfo->numOfOutputGroups)) {
|
||||||
setOperatorCompleted(pOperator);
|
setOperatorCompleted(pOperator);
|
||||||
|
} else if (limitReached && groupId == 0) {
|
||||||
|
setOperatorCompleted(pOperator);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1865,7 +1865,7 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
|
||||||
TSKEY maxTs = pAPI->stateStore.updateInfoFillBlockData(pInfo->pUpdateInfo, pInfo->pRecoverRes, pInfo->primaryTsIndex);
|
TSKEY maxTs = pAPI->stateStore.updateInfoFillBlockData(pInfo->pUpdateInfo, pInfo->pRecoverRes, pInfo->primaryTsIndex);
|
||||||
pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, maxTs);
|
pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, maxTs);
|
||||||
} else {
|
} else {
|
||||||
pInfo->pUpdateInfo->maxDataVersion = pTaskInfo->streamInfo.fillHistoryVer2;
|
pInfo->pUpdateInfo->maxDataVersion = TMAX(pInfo->pUpdateInfo->maxDataVersion, pTaskInfo->streamInfo.fillHistoryVer2);
|
||||||
doCheckUpdate(pInfo, pInfo->pRecoverRes->info.window.ekey, pInfo->pRecoverRes);
|
doCheckUpdate(pInfo, pInfo->pRecoverRes->info.window.ekey, pInfo->pRecoverRes);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -257,7 +257,8 @@ static bool genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp
|
||||||
|
|
||||||
|
|
||||||
// output the result
|
// output the result
|
||||||
bool hasInterp = true;
|
int32_t fillColIndex = 0;
|
||||||
|
bool hasInterp = true;
|
||||||
for (int32_t j = 0; j < pExprSup->numOfExprs; ++j) {
|
for (int32_t j = 0; j < pExprSup->numOfExprs; ++j) {
|
||||||
SExprInfo* pExprInfo = &pExprSup->pExprInfo[j];
|
SExprInfo* pExprInfo = &pExprSup->pExprInfo[j];
|
||||||
|
|
||||||
|
@ -307,7 +308,7 @@ static bool genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp
|
||||||
|
|
||||||
case TSDB_FILL_SET_VALUE:
|
case TSDB_FILL_SET_VALUE:
|
||||||
case TSDB_FILL_SET_VALUE_F: {
|
case TSDB_FILL_SET_VALUE_F: {
|
||||||
SVariant* pVar = &pSliceInfo->pFillColInfo[j].fillVal;
|
SVariant* pVar = &pSliceInfo->pFillColInfo[fillColIndex].fillVal;
|
||||||
|
|
||||||
if (pDst->info.type == TSDB_DATA_TYPE_FLOAT) {
|
if (pDst->info.type == TSDB_DATA_TYPE_FLOAT) {
|
||||||
float v = 0;
|
float v = 0;
|
||||||
|
@ -342,6 +343,8 @@ static bool genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp
|
||||||
}
|
}
|
||||||
colDataSetVal(pDst, rows, (char*)&v, false);
|
colDataSetVal(pDst, rows, (char*)&v, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
++fillColIndex;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1306,6 +1306,8 @@ static bool doDeleteWindow(SOperatorInfo* pOperator, TSKEY ts, uint64_t groupId)
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int32_t getChildIndex(SSDataBlock* pBlock) { return pBlock->info.childId; }
|
||||||
|
|
||||||
static void doDeleteWindows(SOperatorInfo* pOperator, SInterval* pInterval, SSDataBlock* pBlock, SArray* pUpWins,
|
static void doDeleteWindows(SOperatorInfo* pOperator, SInterval* pInterval, SSDataBlock* pBlock, SArray* pUpWins,
|
||||||
SSHashObj* pUpdatedMap) {
|
SSHashObj* pUpdatedMap) {
|
||||||
SStreamIntervalOperatorInfo* pInfo = pOperator->info;
|
SStreamIntervalOperatorInfo* pInfo = pOperator->info;
|
||||||
|
@ -1340,8 +1342,14 @@ static void doDeleteWindows(SOperatorInfo* pOperator, SInterval* pInterval, SSDa
|
||||||
SWinKey winRes = {.ts = win.skey, .groupId = winGpId};
|
SWinKey winRes = {.ts = win.skey, .groupId = winGpId};
|
||||||
void* chIds = taosHashGet(pInfo->pPullDataMap, &winRes, sizeof(SWinKey));
|
void* chIds = taosHashGet(pInfo->pPullDataMap, &winRes, sizeof(SWinKey));
|
||||||
if (chIds) {
|
if (chIds) {
|
||||||
getNextTimeWindow(pInterval, &win, TSDB_ORDER_ASC);
|
int32_t childId = getChildIndex(pBlock);
|
||||||
continue;
|
SArray* chArray = *(void**)chIds;
|
||||||
|
int32_t index = taosArraySearchIdx(chArray, &childId, compareInt32Val, TD_EQ);
|
||||||
|
if (index != -1) {
|
||||||
|
qDebug("===stream===try push delete window%" PRId64 "chId:%d ,continue", win.skey, childId);
|
||||||
|
getNextTimeWindow(pInterval, &win, TSDB_ORDER_ASC);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
bool res = doDeleteWindow(pOperator, win.skey, winGpId);
|
bool res = doDeleteWindow(pOperator, win.skey, winGpId);
|
||||||
if (pUpWins && res) {
|
if (pUpWins && res) {
|
||||||
|
@ -1497,6 +1505,7 @@ void destroyStreamFinalIntervalOperatorInfo(void* param) {
|
||||||
taosArrayDestroy(*(void**)pIte);
|
taosArrayDestroy(*(void**)pIte);
|
||||||
}
|
}
|
||||||
taosHashCleanup(pInfo->pPullDataMap);
|
taosHashCleanup(pInfo->pPullDataMap);
|
||||||
|
taosHashCleanup(pInfo->pFinalPullDataMap);
|
||||||
taosArrayDestroy(pInfo->pPullWins);
|
taosArrayDestroy(pInfo->pPullWins);
|
||||||
blockDataDestroy(pInfo->pPullDataRes);
|
blockDataDestroy(pInfo->pPullDataRes);
|
||||||
taosArrayDestroy(pInfo->pDelWins);
|
taosArrayDestroy(pInfo->pDelWins);
|
||||||
|
@ -2067,8 +2076,6 @@ void addPullWindow(SHashObj* pMap, SWinKey* pWinRes, int32_t size) {
|
||||||
taosHashPut(pMap, pWinRes, sizeof(SWinKey), &childIds, sizeof(void*));
|
taosHashPut(pMap, pWinRes, sizeof(SWinKey), &childIds, sizeof(void*));
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t getChildIndex(SSDataBlock* pBlock) { return pBlock->info.childId; }
|
|
||||||
|
|
||||||
static void clearStreamIntervalOperator(SStreamIntervalOperatorInfo* pInfo) {
|
static void clearStreamIntervalOperator(SStreamIntervalOperatorInfo* pInfo) {
|
||||||
tSimpleHashClear(pInfo->aggSup.pResultRowHashTable);
|
tSimpleHashClear(pInfo->aggSup.pResultRowHashTable);
|
||||||
clearDiskbasedBuf(pInfo->aggSup.pResultBuf);
|
clearDiskbasedBuf(pInfo->aggSup.pResultBuf);
|
||||||
|
@ -2112,7 +2119,7 @@ static void doBuildPullDataBlock(SArray* array, int32_t* pIndex, SSDataBlock* pB
|
||||||
blockDataUpdateTsWindow(pBlock, 0);
|
blockDataUpdateTsWindow(pBlock, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void processPullOver(SSDataBlock* pBlock, SHashObj* pMap, SInterval* pInterval) {
|
void processPullOver(SSDataBlock* pBlock, SHashObj* pMap, SHashObj* pFinalMap, SInterval* pInterval, SArray* pPullWins, int32_t numOfCh, SOperatorInfo* pOperator) {
|
||||||
SColumnInfoData* pStartCol = taosArrayGet(pBlock->pDataBlock, CALCULATE_START_TS_COLUMN_INDEX);
|
SColumnInfoData* pStartCol = taosArrayGet(pBlock->pDataBlock, CALCULATE_START_TS_COLUMN_INDEX);
|
||||||
TSKEY* tsData = (TSKEY*)pStartCol->pData;
|
TSKEY* tsData = (TSKEY*)pStartCol->pData;
|
||||||
SColumnInfoData* pEndCol = taosArrayGet(pBlock->pDataBlock, CALCULATE_END_TS_COLUMN_INDEX);
|
SColumnInfoData* pEndCol = taosArrayGet(pBlock->pDataBlock, CALCULATE_END_TS_COLUMN_INDEX);
|
||||||
|
@ -2136,6 +2143,22 @@ void processPullOver(SSDataBlock* pBlock, SHashObj* pMap, SInterval* pInterval)
|
||||||
taosArrayDestroy(chArray);
|
taosArrayDestroy(chArray);
|
||||||
taosHashRemove(pMap, &winRes, sizeof(SWinKey));
|
taosHashRemove(pMap, &winRes, sizeof(SWinKey));
|
||||||
qDebug("===stream===retrive pull data over.window %" PRId64 , winRes.ts);
|
qDebug("===stream===retrive pull data over.window %" PRId64 , winRes.ts);
|
||||||
|
|
||||||
|
void* pFinalCh = taosHashGet(pFinalMap, &winRes, sizeof(SWinKey));
|
||||||
|
if (pFinalCh) {
|
||||||
|
taosHashRemove(pFinalMap, &winRes, sizeof(SWinKey));
|
||||||
|
doDeleteWindow(pOperator, winRes.ts, winRes.groupId);
|
||||||
|
STimeWindow nextWin = getFinalTimeWindow(winRes.ts, pInterval);
|
||||||
|
SPullWindowInfo pull = {.window = nextWin,
|
||||||
|
.groupId = winRes.groupId,
|
||||||
|
.calWin.skey = nextWin.skey,
|
||||||
|
.calWin.ekey = nextWin.skey};
|
||||||
|
// add pull data request
|
||||||
|
if (savePullWindow(&pull, pPullWins) == TSDB_CODE_SUCCESS) {
|
||||||
|
addPullWindow(pMap, &winRes, numOfCh);
|
||||||
|
qDebug("===stream===prepare final retrive for delete %" PRId64 ", size:%d", winRes.ts, numOfCh);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2144,7 +2167,7 @@ void processPullOver(SSDataBlock* pBlock, SHashObj* pMap, SInterval* pInterval)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void addRetriveWindow(SArray* wins, SStreamIntervalOperatorInfo* pInfo) {
|
static void addRetriveWindow(SArray* wins, SStreamIntervalOperatorInfo* pInfo, int32_t childId) {
|
||||||
int32_t size = taosArrayGetSize(wins);
|
int32_t size = taosArrayGetSize(wins);
|
||||||
for (int32_t i = 0; i < size; i++) {
|
for (int32_t i = 0; i < size; i++) {
|
||||||
SWinKey* winKey = taosArrayGet(wins, i);
|
SWinKey* winKey = taosArrayGet(wins, i);
|
||||||
|
@ -2161,6 +2184,14 @@ static void addRetriveWindow(SArray* wins, SStreamIntervalOperatorInfo* pInfo) {
|
||||||
addPullWindow(pInfo->pPullDataMap, winKey, pInfo->numOfChild);
|
addPullWindow(pInfo->pPullDataMap, winKey, pInfo->numOfChild);
|
||||||
qDebug("===stream===prepare retrive for delete %" PRId64 ", size:%d", winKey->ts, pInfo->numOfChild);
|
qDebug("===stream===prepare retrive for delete %" PRId64 ", size:%d", winKey->ts, pInfo->numOfChild);
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
SArray* chArray = *(void**)chIds;
|
||||||
|
int32_t index = taosArraySearchIdx(chArray, &childId, compareInt32Val, TD_EQ);
|
||||||
|
qDebug("===stream===check final retrive %" PRId64",chid:%d", winKey->ts, index);
|
||||||
|
if (index == -1) {
|
||||||
|
qDebug("===stream===add final retrive %" PRId64, winKey->ts);
|
||||||
|
taosHashPut(pInfo->pFinalPullDataMap, winKey, sizeof(SWinKey), NULL, 0);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2314,7 +2345,7 @@ static void doStreamIntervalAggImpl(SOperatorInfo* pOperatorInfo, SSDataBlock* p
|
||||||
}
|
}
|
||||||
while (1) {
|
while (1) {
|
||||||
bool isClosed = isCloseWindow(&nextWin, &pInfo->twAggSup);
|
bool isClosed = isCloseWindow(&nextWin, &pInfo->twAggSup);
|
||||||
if ((pInfo->ignoreExpiredData && isClosed) || !inSlidingWindow(&pInfo->interval, &nextWin, &pSDataBlock->info)) {
|
if ((pInfo->ignoreExpiredData && isClosed && !IS_FINAL_OP(pInfo)) || !inSlidingWindow(&pInfo->interval, &nextWin, &pSDataBlock->info)) {
|
||||||
startPos = getNexWindowPos(&pInfo->interval, &pSDataBlock->info, tsCols, startPos, nextWin.ekey, &nextWin);
|
startPos = getNexWindowPos(&pInfo->interval, &pSDataBlock->info, tsCols, startPos, nextWin.ekey, &nextWin);
|
||||||
if (startPos < 0) {
|
if (startPos < 0) {
|
||||||
break;
|
break;
|
||||||
|
@ -2554,7 +2585,8 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
|
||||||
SArray* delWins = taosArrayInit(8, sizeof(SWinKey));
|
SArray* delWins = taosArrayInit(8, sizeof(SWinKey));
|
||||||
doDeleteWindows(pOperator, &pInfo->interval, pBlock, delWins, pInfo->pUpdatedMap);
|
doDeleteWindows(pOperator, &pInfo->interval, pBlock, delWins, pInfo->pUpdatedMap);
|
||||||
if (IS_FINAL_OP(pInfo)) {
|
if (IS_FINAL_OP(pInfo)) {
|
||||||
addRetriveWindow(delWins, pInfo);
|
int32_t chId = getChildIndex(pBlock);
|
||||||
|
addRetriveWindow(delWins, pInfo, chId);
|
||||||
if (pBlock->info.type != STREAM_CLEAR) {
|
if (pBlock->info.type != STREAM_CLEAR) {
|
||||||
taosArrayAddAll(pInfo->pDelWins, delWins);
|
taosArrayAddAll(pInfo->pDelWins, delWins);
|
||||||
}
|
}
|
||||||
|
@ -2589,7 +2621,7 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
|
||||||
}
|
}
|
||||||
continue;
|
continue;
|
||||||
} else if (pBlock->info.type == STREAM_PULL_OVER && IS_FINAL_OP(pInfo)) {
|
} else if (pBlock->info.type == STREAM_PULL_OVER && IS_FINAL_OP(pInfo)) {
|
||||||
processPullOver(pBlock, pInfo->pPullDataMap, &pInfo->interval);
|
processPullOver(pBlock, pInfo->pPullDataMap, pInfo->pFinalPullDataMap, &pInfo->interval, pInfo->pPullWins, pInfo->numOfChild, pOperator);
|
||||||
continue;
|
continue;
|
||||||
} else if (pBlock->info.type == STREAM_CREATE_CHILD_TABLE) {
|
} else if (pBlock->info.type == STREAM_CREATE_CHILD_TABLE) {
|
||||||
return pBlock;
|
return pBlock;
|
||||||
|
@ -2772,6 +2804,7 @@ SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream,
|
||||||
pInfo->pullIndex = 0;
|
pInfo->pullIndex = 0;
|
||||||
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
|
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
|
||||||
pInfo->pPullDataMap = taosHashInit(64, hashFn, false, HASH_NO_LOCK);
|
pInfo->pPullDataMap = taosHashInit(64, hashFn, false, HASH_NO_LOCK);
|
||||||
|
pInfo->pFinalPullDataMap = taosHashInit(64, hashFn, false, HASH_NO_LOCK);
|
||||||
pInfo->pPullDataRes = createSpecialDataBlock(STREAM_RETRIEVE);
|
pInfo->pPullDataRes = createSpecialDataBlock(STREAM_RETRIEVE);
|
||||||
pInfo->ignoreExpiredData = pIntervalPhyNode->window.igExpired;
|
pInfo->ignoreExpiredData = pIntervalPhyNode->window.igExpired;
|
||||||
pInfo->ignoreExpiredDataSaved = false;
|
pInfo->ignoreExpiredDataSaved = false;
|
||||||
|
@ -4963,6 +4996,7 @@ SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SPhys
|
||||||
|
|
||||||
pInfo->pPhyNode = NULL; // create new child
|
pInfo->pPhyNode = NULL; // create new child
|
||||||
pInfo->pPullDataMap = NULL;
|
pInfo->pPullDataMap = NULL;
|
||||||
|
pInfo->pFinalPullDataMap = NULL;
|
||||||
pInfo->pPullWins = NULL; // SPullWindowInfo
|
pInfo->pPullWins = NULL; // SPullWindowInfo
|
||||||
pInfo->pullIndex = 0;
|
pInfo->pullIndex = 0;
|
||||||
pInfo->pPullDataRes = NULL;
|
pInfo->pPullDataRes = NULL;
|
||||||
|
|
|
@ -127,6 +127,7 @@ SNode* createIntervalWindowNode(SAstCreateContext* pCxt, SNode* pInterval, SNode
|
||||||
SNode* createFillNode(SAstCreateContext* pCxt, EFillMode mode, SNode* pValues);
|
SNode* createFillNode(SAstCreateContext* pCxt, EFillMode mode, SNode* pValues);
|
||||||
SNode* createGroupingSetNode(SAstCreateContext* pCxt, SNode* pNode);
|
SNode* createGroupingSetNode(SAstCreateContext* pCxt, SNode* pNode);
|
||||||
SNode* createInterpTimeRange(SAstCreateContext* pCxt, SNode* pStart, SNode* pEnd);
|
SNode* createInterpTimeRange(SAstCreateContext* pCxt, SNode* pStart, SNode* pEnd);
|
||||||
|
SNode* createInterpTimePoint(SAstCreateContext* pCxt, SNode* pPoint);
|
||||||
SNode* createWhenThenNode(SAstCreateContext* pCxt, SNode* pWhen, SNode* pThen);
|
SNode* createWhenThenNode(SAstCreateContext* pCxt, SNode* pWhen, SNode* pThen);
|
||||||
SNode* createCaseWhenNode(SAstCreateContext* pCxt, SNode* pCase, SNodeList* pWhenThenList, SNode* pElse);
|
SNode* createCaseWhenNode(SAstCreateContext* pCxt, SNode* pCase, SNodeList* pWhenThenList, SNode* pElse);
|
||||||
|
|
||||||
|
|
|
@ -1095,6 +1095,8 @@ having_clause_opt(A) ::= HAVING search_condition(B).
|
||||||
range_opt(A) ::= . { A = NULL; }
|
range_opt(A) ::= . { A = NULL; }
|
||||||
range_opt(A) ::=
|
range_opt(A) ::=
|
||||||
RANGE NK_LP expr_or_subquery(B) NK_COMMA expr_or_subquery(C) NK_RP. { A = createInterpTimeRange(pCxt, releaseRawExprNode(pCxt, B), releaseRawExprNode(pCxt, C)); }
|
RANGE NK_LP expr_or_subquery(B) NK_COMMA expr_or_subquery(C) NK_RP. { A = createInterpTimeRange(pCxt, releaseRawExprNode(pCxt, B), releaseRawExprNode(pCxt, C)); }
|
||||||
|
range_opt(A) ::=
|
||||||
|
RANGE NK_LP expr_or_subquery(B) NK_RP. { A = createInterpTimePoint(pCxt, releaseRawExprNode(pCxt, B)); }
|
||||||
|
|
||||||
every_opt(A) ::= . { A = NULL; }
|
every_opt(A) ::= . { A = NULL; }
|
||||||
every_opt(A) ::= EVERY NK_LP duration_literal(B) NK_RP. { A = releaseRawExprNode(pCxt, B); }
|
every_opt(A) ::= EVERY NK_LP duration_literal(B) NK_RP. { A = releaseRawExprNode(pCxt, B); }
|
||||||
|
|
|
@ -695,6 +695,11 @@ SNode* createInterpTimeRange(SAstCreateContext* pCxt, SNode* pStart, SNode* pEnd
|
||||||
return createBetweenAnd(pCxt, createPrimaryKeyCol(pCxt, NULL), pStart, pEnd);
|
return createBetweenAnd(pCxt, createPrimaryKeyCol(pCxt, NULL), pStart, pEnd);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
SNode* createInterpTimePoint(SAstCreateContext* pCxt, SNode* pPoint) {
|
||||||
|
CHECK_PARSER_STATUS(pCxt);
|
||||||
|
return createOperatorNode(pCxt, OP_TYPE_EQUAL, createPrimaryKeyCol(pCxt, NULL), pPoint);
|
||||||
|
}
|
||||||
|
|
||||||
SNode* createWhenThenNode(SAstCreateContext* pCxt, SNode* pWhen, SNode* pThen) {
|
SNode* createWhenThenNode(SAstCreateContext* pCxt, SNode* pWhen, SNode* pThen) {
|
||||||
CHECK_PARSER_STATUS(pCxt);
|
CHECK_PARSER_STATUS(pCxt);
|
||||||
SWhenThenNode* pWhenThen = (SWhenThenNode*)nodesMakeNode(QUERY_NODE_WHEN_THEN);
|
SWhenThenNode* pWhenThen = (SWhenThenNode*)nodesMakeNode(QUERY_NODE_WHEN_THEN);
|
||||||
|
|
|
@ -713,6 +713,10 @@ static bool isWindowPseudoColumnFunc(const SNode* pNode) {
|
||||||
return (QUERY_NODE_FUNCTION == nodeType(pNode) && fmIsWindowPseudoColumnFunc(((SFunctionNode*)pNode)->funcId));
|
return (QUERY_NODE_FUNCTION == nodeType(pNode) && fmIsWindowPseudoColumnFunc(((SFunctionNode*)pNode)->funcId));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool isInterpFunc(const SNode* pNode) {
|
||||||
|
return (QUERY_NODE_FUNCTION == nodeType(pNode) && fmIsInterpFunc(((SFunctionNode*)pNode)->funcId));
|
||||||
|
}
|
||||||
|
|
||||||
static bool isInterpPseudoColumnFunc(const SNode* pNode) {
|
static bool isInterpPseudoColumnFunc(const SNode* pNode) {
|
||||||
return (QUERY_NODE_FUNCTION == nodeType(pNode) && fmIsInterpPseudoColumnFunc(((SFunctionNode*)pNode)->funcId));
|
return (QUERY_NODE_FUNCTION == nodeType(pNode) && fmIsInterpPseudoColumnFunc(((SFunctionNode*)pNode)->funcId));
|
||||||
}
|
}
|
||||||
|
@ -3036,7 +3040,7 @@ static int32_t translateOrderBy(STranslateContext* pCxt, SSelectStmt* pSelect) {
|
||||||
}
|
}
|
||||||
|
|
||||||
static EDealRes needFillImpl(SNode* pNode, void* pContext) {
|
static EDealRes needFillImpl(SNode* pNode, void* pContext) {
|
||||||
if (isAggFunc(pNode) && FUNCTION_TYPE_GROUP_KEY != ((SFunctionNode*)pNode)->funcType) {
|
if ((isAggFunc(pNode) || isInterpFunc(pNode)) && FUNCTION_TYPE_GROUP_KEY != ((SFunctionNode*)pNode)->funcType) {
|
||||||
*(bool*)pContext = true;
|
*(bool*)pContext = true;
|
||||||
return DEAL_RES_END;
|
return DEAL_RES_END;
|
||||||
}
|
}
|
||||||
|
@ -3060,7 +3064,7 @@ static int32_t convertFillValue(STranslateContext* pCxt, SDataType dt, SNodeList
|
||||||
code = scalarCalculateConstants(pCaseFunc, &pCell->pNode);
|
code = scalarCalculateConstants(pCaseFunc, &pCell->pNode);
|
||||||
}
|
}
|
||||||
if (TSDB_CODE_SUCCESS == code && QUERY_NODE_VALUE != nodeType(pCell->pNode)) {
|
if (TSDB_CODE_SUCCESS == code && QUERY_NODE_VALUE != nodeType(pCell->pNode)) {
|
||||||
code = generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Fill value is just a constant");
|
code = generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Fill value can only accept constant");
|
||||||
} else if (TSDB_CODE_SUCCESS != code) {
|
} else if (TSDB_CODE_SUCCESS != code) {
|
||||||
code = generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Filled data type mismatch");
|
code = generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Filled data type mismatch");
|
||||||
}
|
}
|
||||||
|
@ -3084,6 +3088,7 @@ static int32_t checkFillValues(STranslateContext* pCxt, SFillNode* pFill, SNodeL
|
||||||
if (TSDB_CODE_SUCCESS != code) {
|
if (TSDB_CODE_SUCCESS != code) {
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
++fillNo;
|
++fillNo;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3503,6 +3508,22 @@ static int32_t createDefaultFillNode(STranslateContext* pCxt, SNode** pOutput) {
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int32_t createDefaultEveryNode(STranslateContext* pCxt, SNode** pOutput) {
|
||||||
|
SValueNode* pEvery = (SValueNode*)nodesMakeNode(QUERY_NODE_VALUE);
|
||||||
|
if (NULL == pEvery) {
|
||||||
|
return TSDB_CODE_OUT_OF_MEMORY;
|
||||||
|
}
|
||||||
|
|
||||||
|
pEvery->node.resType.type = TSDB_DATA_TYPE_BIGINT;
|
||||||
|
pEvery->node.resType.bytes = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes;
|
||||||
|
pEvery->isDuration = true;
|
||||||
|
pEvery->literal = taosStrdup("1s");
|
||||||
|
|
||||||
|
|
||||||
|
*pOutput = (SNode*)pEvery;
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
static int32_t checkEvery(STranslateContext* pCxt, SValueNode* pInterval) {
|
static int32_t checkEvery(STranslateContext* pCxt, SValueNode* pInterval) {
|
||||||
int32_t len = strlen(pInterval->literal);
|
int32_t len = strlen(pInterval->literal);
|
||||||
|
|
||||||
|
@ -3518,7 +3539,12 @@ static int32_t checkEvery(STranslateContext* pCxt, SValueNode* pInterval) {
|
||||||
static int32_t translateInterpEvery(STranslateContext* pCxt, SNode** pEvery) {
|
static int32_t translateInterpEvery(STranslateContext* pCxt, SNode** pEvery) {
|
||||||
int32_t code = TSDB_CODE_SUCCESS;
|
int32_t code = TSDB_CODE_SUCCESS;
|
||||||
|
|
||||||
code = checkEvery(pCxt, (SValueNode*)(*pEvery));
|
if (NULL == *pEvery) {
|
||||||
|
code = createDefaultEveryNode(pCxt, pEvery);
|
||||||
|
}
|
||||||
|
if (TSDB_CODE_SUCCESS == code) {
|
||||||
|
code = checkEvery(pCxt, (SValueNode*)(*pEvery));
|
||||||
|
}
|
||||||
if (TSDB_CODE_SUCCESS == code) {
|
if (TSDB_CODE_SUCCESS == code) {
|
||||||
code = translateExpr(pCxt, pEvery);
|
code = translateExpr(pCxt, pEvery);
|
||||||
}
|
}
|
||||||
|
@ -3547,6 +3573,9 @@ static int32_t translateInterpFill(STranslateContext* pCxt, SSelectStmt* pSelect
|
||||||
if (TSDB_CODE_SUCCESS == code) {
|
if (TSDB_CODE_SUCCESS == code) {
|
||||||
code = checkFill(pCxt, (SFillNode*)pSelect->pFill, (SValueNode*)pSelect->pEvery, true);
|
code = checkFill(pCxt, (SFillNode*)pSelect->pFill, (SValueNode*)pSelect->pEvery, true);
|
||||||
}
|
}
|
||||||
|
if (TSDB_CODE_SUCCESS == code) {
|
||||||
|
code = checkFillValues(pCxt, (SFillNode*)pSelect->pFill, pSelect->pProjectionList);
|
||||||
|
}
|
||||||
|
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
@ -3564,8 +3593,12 @@ static int32_t translateInterp(STranslateContext* pCxt, SSelectStmt* pSelect) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (NULL == pSelect->pRange || NULL == pSelect->pEvery || NULL == pSelect->pFill) {
|
if (NULL == pSelect->pRange || NULL == pSelect->pEvery || NULL == pSelect->pFill) {
|
||||||
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_INTERP_CLAUSE,
|
if (pSelect->pRange != NULL && QUERY_NODE_OPERATOR == nodeType(pSelect->pRange) && pSelect->pEvery == NULL) {
|
||||||
"Missing RANGE clause, EVERY clause or FILL clause");
|
// single point interp every can be omitted
|
||||||
|
} else {
|
||||||
|
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_INTERP_CLAUSE,
|
||||||
|
"Missing RANGE clause, EVERY clause or FILL clause");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t code = translateExpr(pCxt, &pSelect->pRange);
|
int32_t code = translateExpr(pCxt, &pSelect->pRange);
|
||||||
|
@ -3792,7 +3825,7 @@ static int32_t translateSelectFrom(STranslateContext* pCxt, SSelectStmt* pSelect
|
||||||
if (TSDB_CODE_SUCCESS == code) {
|
if (TSDB_CODE_SUCCESS == code) {
|
||||||
code = replaceTbName(pCxt, pSelect);
|
code = replaceTbName(pCxt, pSelect);
|
||||||
}
|
}
|
||||||
|
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -11,7 +11,7 @@ if(${BUILD_WITH_ROCKSDB})
|
||||||
IF (TD_LINUX)
|
IF (TD_LINUX)
|
||||||
target_link_libraries(
|
target_link_libraries(
|
||||||
stream
|
stream
|
||||||
PUBLIC rocksdb-shared tdb
|
PUBLIC rocksdb tdb
|
||||||
PRIVATE os util transport qcom executor wal index
|
PRIVATE os util transport qcom executor wal index
|
||||||
)
|
)
|
||||||
ELSE()
|
ELSE()
|
||||||
|
|
|
@ -122,12 +122,17 @@ char* streamDefaultIterKey_rocksdb(void* iter, int32_t* len);
|
||||||
char* streamDefaultIterVal_rocksdb(void* iter, int32_t* len);
|
char* streamDefaultIterVal_rocksdb(void* iter, int32_t* len);
|
||||||
|
|
||||||
// batch func
|
// batch func
|
||||||
|
int streamStateGetCfIdx(SStreamState* pState, const char* funcName);
|
||||||
void* streamStateCreateBatch();
|
void* streamStateCreateBatch();
|
||||||
int32_t streamStateGetBatchSize(void* pBatch);
|
int32_t streamStateGetBatchSize(void* pBatch);
|
||||||
void streamStateClearBatch(void* pBatch);
|
void streamStateClearBatch(void* pBatch);
|
||||||
void streamStateDestroyBatch(void* pBatch);
|
void streamStateDestroyBatch(void* pBatch);
|
||||||
int32_t streamStatePutBatch(SStreamState* pState, const char* cfName, rocksdb_writebatch_t* pBatch, void* key,
|
int32_t streamStatePutBatch(SStreamState* pState, const char* cfName, rocksdb_writebatch_t* pBatch, void* key,
|
||||||
void* val, int32_t vlen, int64_t ttl);
|
void* val, int32_t vlen, int64_t ttl);
|
||||||
|
|
||||||
|
int32_t streamStatePutBatchOptimize(SStreamState* pState, int32_t cfIdx, rocksdb_writebatch_t* pBatch, void* key,
|
||||||
|
void* val, int32_t vlen, int64_t ttl, void* tmpBuf);
|
||||||
|
|
||||||
int32_t streamStatePutBatch_rocksdb(SStreamState* pState, void* pBatch);
|
int32_t streamStatePutBatch_rocksdb(SStreamState* pState, void* pBatch);
|
||||||
// int32_t streamDefaultIter_rocksdb(SStreamState* pState, const void* start, const void* end, SArray* result);
|
// int32_t streamDefaultIter_rocksdb(SStreamState* pState, const void* start, const void* end, SArray* result);
|
||||||
#endif
|
#endif
|
|
@ -36,8 +36,9 @@ static SStreamGlobalEnv streamEnv;
|
||||||
int32_t streamDispatchStreamBlock(SStreamTask* pTask);
|
int32_t streamDispatchStreamBlock(SStreamTask* pTask);
|
||||||
|
|
||||||
SStreamDataBlock* createStreamDataFromDispatchMsg(const SStreamDispatchReq* pReq, int32_t blockType, int32_t srcVg);
|
SStreamDataBlock* createStreamDataFromDispatchMsg(const SStreamDispatchReq* pReq, int32_t blockType, int32_t srcVg);
|
||||||
SStreamDataBlock* createStreamBlockFromResults(SStreamQueueItem* pItem, SStreamTask* pTask, int64_t resultSize, SArray* pRes);
|
SStreamDataBlock* createStreamBlockFromResults(SStreamQueueItem* pItem, SStreamTask* pTask, int64_t resultSize,
|
||||||
void destroyStreamDataBlock(SStreamDataBlock* pBlock);
|
SArray* pRes);
|
||||||
|
void destroyStreamDataBlock(SStreamDataBlock* pBlock);
|
||||||
|
|
||||||
int32_t streamRetrieveReqToData(const SStreamRetrieveReq* pReq, SStreamDataBlock* pData);
|
int32_t streamRetrieveReqToData(const SStreamRetrieveReq* pReq, SStreamDataBlock* pData);
|
||||||
int32_t streamDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* data);
|
int32_t streamDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* data);
|
||||||
|
@ -53,6 +54,8 @@ int32_t streamDispatchOneRecoverFinishReq(SStreamTask* pTask, const SStreamRecov
|
||||||
|
|
||||||
SStreamQueueItem* streamMergeQueueItem(SStreamQueueItem* dst, SStreamQueueItem* pElem);
|
SStreamQueueItem* streamMergeQueueItem(SStreamQueueItem* dst, SStreamQueueItem* pElem);
|
||||||
|
|
||||||
|
extern int32_t streamBackendId;
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -16,7 +16,9 @@
|
||||||
#include "streamBackendRocksdb.h"
|
#include "streamBackendRocksdb.h"
|
||||||
#include "executor.h"
|
#include "executor.h"
|
||||||
#include "query.h"
|
#include "query.h"
|
||||||
|
#include "streamInc.h"
|
||||||
#include "tcommon.h"
|
#include "tcommon.h"
|
||||||
|
#include "tref.h"
|
||||||
|
|
||||||
typedef struct SCompactFilteFactory {
|
typedef struct SCompactFilteFactory {
|
||||||
void* status;
|
void* status;
|
||||||
|
@ -79,8 +81,10 @@ const char* compareParKeyName(void* name);
|
||||||
const char* comparePartagKeyName(void* name);
|
const char* comparePartagKeyName(void* name);
|
||||||
|
|
||||||
void* streamBackendInit(const char* path) {
|
void* streamBackendInit(const char* path) {
|
||||||
qDebug("init stream backend");
|
uint32_t dbMemLimit = nextPow2(tsMaxStreamBackendCache) << 20;
|
||||||
SBackendHandle* pHandle = calloc(1, sizeof(SBackendHandle));
|
|
||||||
|
qDebug("start to init stream backend at %s", path);
|
||||||
|
SBackendHandle* pHandle = taosMemoryCalloc(1, sizeof(SBackendHandle));
|
||||||
pHandle->list = tdListNew(sizeof(SCfComparator));
|
pHandle->list = tdListNew(sizeof(SCfComparator));
|
||||||
taosThreadMutexInit(&pHandle->mutex, NULL);
|
taosThreadMutexInit(&pHandle->mutex, NULL);
|
||||||
taosThreadMutexInit(&pHandle->cfMutex, NULL);
|
taosThreadMutexInit(&pHandle->cfMutex, NULL);
|
||||||
|
@ -88,19 +92,22 @@ void* streamBackendInit(const char* path) {
|
||||||
|
|
||||||
rocksdb_env_t* env = rocksdb_create_default_env(); // rocksdb_envoptions_create();
|
rocksdb_env_t* env = rocksdb_create_default_env(); // rocksdb_envoptions_create();
|
||||||
|
|
||||||
rocksdb_cache_t* cache = rocksdb_cache_create_lru(64 << 20);
|
int32_t nBGThread = tsNumOfSnodeStreamThreads <= 2 ? 1 : tsNumOfSnodeStreamThreads / 2;
|
||||||
|
rocksdb_env_set_low_priority_background_threads(env, nBGThread);
|
||||||
|
rocksdb_env_set_high_priority_background_threads(env, nBGThread);
|
||||||
|
|
||||||
|
rocksdb_cache_t* cache = rocksdb_cache_create_lru(dbMemLimit / 2);
|
||||||
|
|
||||||
rocksdb_options_t* opts = rocksdb_options_create();
|
rocksdb_options_t* opts = rocksdb_options_create();
|
||||||
rocksdb_options_set_env(opts, env);
|
rocksdb_options_set_env(opts, env);
|
||||||
rocksdb_options_set_create_if_missing(opts, 1);
|
rocksdb_options_set_create_if_missing(opts, 1);
|
||||||
rocksdb_options_set_create_missing_column_families(opts, 1);
|
rocksdb_options_set_create_missing_column_families(opts, 1);
|
||||||
rocksdb_options_set_write_buffer_size(opts, 48 << 20);
|
rocksdb_options_set_max_total_wal_size(opts, dbMemLimit);
|
||||||
rocksdb_options_set_max_total_wal_size(opts, 128 << 20);
|
|
||||||
rocksdb_options_set_recycle_log_file_num(opts, 6);
|
rocksdb_options_set_recycle_log_file_num(opts, 6);
|
||||||
rocksdb_options_set_max_write_buffer_number(opts, 2);
|
rocksdb_options_set_max_write_buffer_number(opts, 3);
|
||||||
rocksdb_options_set_info_log_level(opts, 0);
|
rocksdb_options_set_info_log_level(opts, 0);
|
||||||
uint32_t dbLimit = nextPow2(tsMaxStreamBackendCache);
|
rocksdb_options_set_db_write_buffer_size(opts, dbMemLimit);
|
||||||
rocksdb_options_set_db_write_buffer_size(opts, dbLimit << 20);
|
rocksdb_options_set_write_buffer_size(opts, dbMemLimit / 2);
|
||||||
|
|
||||||
pHandle->env = env;
|
pHandle->env = env;
|
||||||
pHandle->dbOpt = opts;
|
pHandle->dbOpt = opts;
|
||||||
|
@ -119,6 +126,7 @@ void* streamBackendInit(const char* path) {
|
||||||
if (err != NULL) {
|
if (err != NULL) {
|
||||||
qError("failed to open rocksdb, path:%s, reason:%s", path, err);
|
qError("failed to open rocksdb, path:%s, reason:%s", path, err);
|
||||||
taosMemoryFreeClear(err);
|
taosMemoryFreeClear(err);
|
||||||
|
goto _EXIT;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
|
@ -129,6 +137,7 @@ void* streamBackendInit(const char* path) {
|
||||||
if (cfs != NULL) {
|
if (cfs != NULL) {
|
||||||
rocksdb_list_column_families_destroy(cfs, nCf);
|
rocksdb_list_column_families_destroy(cfs, nCf);
|
||||||
}
|
}
|
||||||
|
qDebug("succ to init stream backend at %s, backend:%p", path, pHandle);
|
||||||
|
|
||||||
return (void*)pHandle;
|
return (void*)pHandle;
|
||||||
_EXIT:
|
_EXIT:
|
||||||
|
@ -140,7 +149,8 @@ _EXIT:
|
||||||
taosHashCleanup(pHandle->cfInst);
|
taosHashCleanup(pHandle->cfInst);
|
||||||
rocksdb_compactionfilterfactory_destroy(pHandle->filterFactory);
|
rocksdb_compactionfilterfactory_destroy(pHandle->filterFactory);
|
||||||
tdListFree(pHandle->list);
|
tdListFree(pHandle->list);
|
||||||
free(pHandle);
|
taosMemoryFree(pHandle);
|
||||||
|
qDebug("failed to init stream backend at %s", path);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
void streamBackendCleanup(void* arg) {
|
void streamBackendCleanup(void* arg) {
|
||||||
|
@ -168,19 +178,20 @@ void streamBackendCleanup(void* arg) {
|
||||||
rocksdb_env_destroy(pHandle->env);
|
rocksdb_env_destroy(pHandle->env);
|
||||||
rocksdb_cache_destroy(pHandle->cache);
|
rocksdb_cache_destroy(pHandle->cache);
|
||||||
|
|
||||||
taosThreadMutexDestroy(&pHandle->mutex);
|
|
||||||
SListNode* head = tdListPopHead(pHandle->list);
|
SListNode* head = tdListPopHead(pHandle->list);
|
||||||
while (head != NULL) {
|
while (head != NULL) {
|
||||||
streamStateDestroyCompar(head->data);
|
streamStateDestroyCompar(head->data);
|
||||||
taosMemoryFree(head);
|
taosMemoryFree(head);
|
||||||
head = tdListPopHead(pHandle->list);
|
head = tdListPopHead(pHandle->list);
|
||||||
}
|
}
|
||||||
// rocksdb_compactionfilterfactory_destroy(pHandle->filterFactory);
|
|
||||||
tdListFree(pHandle->list);
|
tdListFree(pHandle->list);
|
||||||
|
taosThreadMutexDestroy(&pHandle->mutex);
|
||||||
|
|
||||||
taosThreadMutexDestroy(&pHandle->cfMutex);
|
taosThreadMutexDestroy(&pHandle->cfMutex);
|
||||||
|
|
||||||
taosMemoryFree(pHandle);
|
taosMemoryFree(pHandle);
|
||||||
|
qDebug("destroy stream backend backend:%p", pHandle);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
SListNode* streamBackendAddCompare(void* backend, void* arg) {
|
SListNode* streamBackendAddCompare(void* backend, void* arg) {
|
||||||
|
@ -204,7 +215,6 @@ void streamBackendDelCompare(void* backend, void* arg) {
|
||||||
}
|
}
|
||||||
void streamStateDestroy_rocksdb(SStreamState* pState, bool remove) { streamStateCloseBackend(pState, remove); }
|
void streamStateDestroy_rocksdb(SStreamState* pState, bool remove) { streamStateCloseBackend(pState, remove); }
|
||||||
static bool streamStateIterSeekAndValid(rocksdb_iterator_t* iter, char* buf, size_t len);
|
static bool streamStateIterSeekAndValid(rocksdb_iterator_t* iter, char* buf, size_t len);
|
||||||
int streamGetInit(SStreamState* pState, const char* funcName);
|
|
||||||
|
|
||||||
// |key|-----value------|
|
// |key|-----value------|
|
||||||
// |key|ttl|len|userData|
|
// |key|ttl|len|userData|
|
||||||
|
@ -551,14 +561,20 @@ typedef struct {
|
||||||
|
|
||||||
int32_t encodeValueFunc(void* value, int32_t vlen, int64_t ttl, char** dest) {
|
int32_t encodeValueFunc(void* value, int32_t vlen, int64_t ttl, char** dest) {
|
||||||
SStreamValue key = {.unixTimestamp = ttl, .len = vlen, .data = (char*)(value)};
|
SStreamValue key = {.unixTimestamp = ttl, .len = vlen, .data = (char*)(value)};
|
||||||
|
int32_t len = 0;
|
||||||
char* p = taosMemoryCalloc(1, sizeof(int64_t) + sizeof(int32_t) + key.len);
|
if (*dest == NULL) {
|
||||||
char* buf = p;
|
char* p = taosMemoryCalloc(1, sizeof(int64_t) + sizeof(int32_t) + key.len);
|
||||||
int32_t len = 0;
|
char* buf = p;
|
||||||
len += taosEncodeFixedI64((void**)&buf, key.unixTimestamp);
|
len += taosEncodeFixedI64((void**)&buf, key.unixTimestamp);
|
||||||
len += taosEncodeFixedI32((void**)&buf, key.len);
|
len += taosEncodeFixedI32((void**)&buf, key.len);
|
||||||
len += taosEncodeBinary((void**)&buf, (char*)value, vlen);
|
len += taosEncodeBinary((void**)&buf, (char*)value, vlen);
|
||||||
*dest = p;
|
*dest = p;
|
||||||
|
} else {
|
||||||
|
char* buf = *dest;
|
||||||
|
len += taosEncodeFixedI64((void**)&buf, key.unixTimestamp);
|
||||||
|
len += taosEncodeFixedI32((void**)&buf, key.len);
|
||||||
|
len += taosEncodeBinary((void**)&buf, (char*)value, vlen);
|
||||||
|
}
|
||||||
return len;
|
return len;
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
|
@ -707,7 +723,7 @@ int32_t streamStateOpenBackendCf(void* backend, char* name, char** cfs, int32_t
|
||||||
rocksdb_options_set_block_based_table_factory((rocksdb_options_t*)cfOpts[i], tableOpt);
|
rocksdb_options_set_block_based_table_factory((rocksdb_options_t*)cfOpts[i], tableOpt);
|
||||||
params[i].tableOpt = tableOpt;
|
params[i].tableOpt = tableOpt;
|
||||||
|
|
||||||
int idx = streamGetInit(NULL, funcname);
|
int idx = streamStateGetCfIdx(NULL, funcname);
|
||||||
SCfInit* cfPara = &ginitDict[idx];
|
SCfInit* cfPara = &ginitDict[idx];
|
||||||
|
|
||||||
rocksdb_comparator_t* compare =
|
rocksdb_comparator_t* compare =
|
||||||
|
@ -738,7 +754,7 @@ int32_t streamStateOpenBackendCf(void* backend, char* name, char** cfs, int32_t
|
||||||
char idstr[128] = {0};
|
char idstr[128] = {0};
|
||||||
sprintf(idstr, "0x%" PRIx64 "-%d", streamId, taskId);
|
sprintf(idstr, "0x%" PRIx64 "-%d", streamId, taskId);
|
||||||
|
|
||||||
int idx = streamGetInit(NULL, funcname);
|
int idx = streamStateGetCfIdx(NULL, funcname);
|
||||||
|
|
||||||
RocksdbCfInst* inst = NULL;
|
RocksdbCfInst* inst = NULL;
|
||||||
RocksdbCfInst** pInst = taosHashGet(handle->cfInst, idstr, strlen(idstr) + 1);
|
RocksdbCfInst** pInst = taosHashGet(handle->cfInst, idstr, strlen(idstr) + 1);
|
||||||
|
@ -803,7 +819,8 @@ int32_t streamStateOpenBackendCf(void* backend, char* name, char** cfs, int32_t
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
int streamStateOpenBackend(void* backend, SStreamState* pState) {
|
int streamStateOpenBackend(void* backend, SStreamState* pState) {
|
||||||
qInfo("start to open backend, %p 0x%" PRIx64 "-%d", pState, pState->streamId, pState->taskId);
|
qInfo("start to open state %p on backend %p 0x%" PRIx64 "-%d", pState, backend, pState->streamId, pState->taskId);
|
||||||
|
taosAcquireRef(streamBackendId, pState->streamBackendRid);
|
||||||
SBackendHandle* handle = backend;
|
SBackendHandle* handle = backend;
|
||||||
|
|
||||||
sprintf(pState->pTdbState->idstr, "0x%" PRIx64 "-%d", pState->streamId, pState->taskId);
|
sprintf(pState->pTdbState->idstr, "0x%" PRIx64 "-%d", pState->streamId, pState->taskId);
|
||||||
|
@ -865,8 +882,8 @@ int streamStateOpenBackend(void* backend, SStreamState* pState) {
|
||||||
taosThreadRwlockInit(&pState->pTdbState->rwLock, NULL);
|
taosThreadRwlockInit(&pState->pTdbState->rwLock, NULL);
|
||||||
SCfComparator compare = {.comp = pCompare, .numOfComp = cfLen};
|
SCfComparator compare = {.comp = pCompare, .numOfComp = cfLen};
|
||||||
pState->pTdbState->pComparNode = streamBackendAddCompare(handle, &compare);
|
pState->pTdbState->pComparNode = streamBackendAddCompare(handle, &compare);
|
||||||
// rocksdb_writeoptions_disable_WAL(pState->pTdbState->writeOpts, 1);
|
rocksdb_writeoptions_disable_WAL(pState->pTdbState->writeOpts, 1);
|
||||||
qInfo("succ to open backend, %p, 0x%" PRIx64 "-%d", pState, pState->streamId, pState->taskId);
|
qInfo("succ to open state %p on backend, %p, 0x%" PRIx64 "-%d", pState, handle, pState->streamId, pState->taskId);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -882,8 +899,8 @@ void streamStateCloseBackend(SStreamState* pState, bool remove) {
|
||||||
taosThreadMutexUnlock(&pHandle->cfMutex);
|
taosThreadMutexUnlock(&pHandle->cfMutex);
|
||||||
|
|
||||||
char* status[] = {"close", "drop"};
|
char* status[] = {"close", "drop"};
|
||||||
qInfo("start to %s backend, %p, 0x%" PRIx64 "-%d", status[remove == false ? 0 : 1], pState, pState->streamId,
|
qInfo("start to close %s state %p on backend %p 0x%" PRIx64 "-%d", status[remove == false ? 0 : 1], pState, pHandle,
|
||||||
pState->taskId);
|
pState->streamId, pState->taskId);
|
||||||
if (pState->pTdbState->rocksdb == NULL) {
|
if (pState->pTdbState->rocksdb == NULL) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -938,6 +955,7 @@ void streamStateCloseBackend(SStreamState* pState, bool remove) {
|
||||||
|
|
||||||
taosThreadRwlockDestroy(&pState->pTdbState->rwLock);
|
taosThreadRwlockDestroy(&pState->pTdbState->rwLock);
|
||||||
pState->pTdbState->rocksdb = NULL;
|
pState->pTdbState->rocksdb = NULL;
|
||||||
|
taosReleaseRef(streamBackendId, pState->streamBackendRid);
|
||||||
}
|
}
|
||||||
void streamStateDestroyCompar(void* arg) {
|
void streamStateDestroyCompar(void* arg) {
|
||||||
SCfComparator* comp = (SCfComparator*)arg;
|
SCfComparator* comp = (SCfComparator*)arg;
|
||||||
|
@ -947,7 +965,7 @@ void streamStateDestroyCompar(void* arg) {
|
||||||
taosMemoryFree(comp->comp);
|
taosMemoryFree(comp->comp);
|
||||||
}
|
}
|
||||||
|
|
||||||
int streamGetInit(SStreamState* pState, const char* funcName) {
|
int streamStateGetCfIdx(SStreamState* pState, const char* funcName) {
|
||||||
int idx = -1;
|
int idx = -1;
|
||||||
size_t len = strlen(funcName);
|
size_t len = strlen(funcName);
|
||||||
for (int i = 0; i < sizeof(ginitDict) / sizeof(ginitDict[0]); i++) {
|
for (int i = 0; i < sizeof(ginitDict) / sizeof(ginitDict[0]); i++) {
|
||||||
|
@ -994,7 +1012,7 @@ bool streamStateIterSeekAndValid(rocksdb_iterator_t* iter, char* buf, size_t len
|
||||||
}
|
}
|
||||||
rocksdb_iterator_t* streamStateIterCreate(SStreamState* pState, const char* cfName, rocksdb_snapshot_t** snapshot,
|
rocksdb_iterator_t* streamStateIterCreate(SStreamState* pState, const char* cfName, rocksdb_snapshot_t** snapshot,
|
||||||
rocksdb_readoptions_t** readOpt) {
|
rocksdb_readoptions_t** readOpt) {
|
||||||
int idx = streamGetInit(pState, cfName);
|
int idx = streamStateGetCfIdx(pState, cfName);
|
||||||
|
|
||||||
if (snapshot != NULL) {
|
if (snapshot != NULL) {
|
||||||
*snapshot = (rocksdb_snapshot_t*)rocksdb_create_snapshot(pState->pTdbState->rocksdb);
|
*snapshot = (rocksdb_snapshot_t*)rocksdb_create_snapshot(pState->pTdbState->rocksdb);
|
||||||
|
@ -1014,7 +1032,7 @@ rocksdb_iterator_t* streamStateIterCreate(SStreamState* pState, const char* cfNa
|
||||||
code = 0; \
|
code = 0; \
|
||||||
char buf[128] = {0}; \
|
char buf[128] = {0}; \
|
||||||
char* err = NULL; \
|
char* err = NULL; \
|
||||||
int i = streamGetInit(pState, funcname); \
|
int i = streamStateGetCfIdx(pState, funcname); \
|
||||||
if (i < 0) { \
|
if (i < 0) { \
|
||||||
qWarn("streamState failed to get cf name: %s", funcname); \
|
qWarn("streamState failed to get cf name: %s", funcname); \
|
||||||
code = -1; \
|
code = -1; \
|
||||||
|
@ -1045,7 +1063,7 @@ rocksdb_iterator_t* streamStateIterCreate(SStreamState* pState, const char* cfNa
|
||||||
code = 0; \
|
code = 0; \
|
||||||
char buf[128] = {0}; \
|
char buf[128] = {0}; \
|
||||||
char* err = NULL; \
|
char* err = NULL; \
|
||||||
int i = streamGetInit(pState, funcname); \
|
int i = streamStateGetCfIdx(pState, funcname); \
|
||||||
if (i < 0) { \
|
if (i < 0) { \
|
||||||
qWarn("streamState failed to get cf name: %s", funcname); \
|
qWarn("streamState failed to get cf name: %s", funcname); \
|
||||||
code = -1; \
|
code = -1; \
|
||||||
|
@ -1093,7 +1111,7 @@ rocksdb_iterator_t* streamStateIterCreate(SStreamState* pState, const char* cfNa
|
||||||
code = 0; \
|
code = 0; \
|
||||||
char buf[128] = {0}; \
|
char buf[128] = {0}; \
|
||||||
char* err = NULL; \
|
char* err = NULL; \
|
||||||
int i = streamGetInit(pState, funcname); \
|
int i = streamStateGetCfIdx(pState, funcname); \
|
||||||
if (i < 0) { \
|
if (i < 0) { \
|
||||||
qWarn("streamState failed to get cf name: %s_%s", pState->pTdbState->idstr, funcname); \
|
qWarn("streamState failed to get cf name: %s_%s", pState->pTdbState->idstr, funcname); \
|
||||||
code = -1; \
|
code = -1; \
|
||||||
|
@ -2033,7 +2051,7 @@ void streamStateClearBatch(void* pBatch) { rocksdb_writebatch_clear((rocksdb_
|
||||||
void streamStateDestroyBatch(void* pBatch) { rocksdb_writebatch_destroy((rocksdb_writebatch_t*)pBatch); }
|
void streamStateDestroyBatch(void* pBatch) { rocksdb_writebatch_destroy((rocksdb_writebatch_t*)pBatch); }
|
||||||
int32_t streamStatePutBatch(SStreamState* pState, const char* cfName, rocksdb_writebatch_t* pBatch, void* key,
|
int32_t streamStatePutBatch(SStreamState* pState, const char* cfName, rocksdb_writebatch_t* pBatch, void* key,
|
||||||
void* val, int32_t vlen, int64_t ttl) {
|
void* val, int32_t vlen, int64_t ttl) {
|
||||||
int i = streamGetInit(pState, cfName);
|
int i = streamStateGetCfIdx(pState, cfName);
|
||||||
|
|
||||||
if (i < 0) {
|
if (i < 0) {
|
||||||
qError("streamState failed to put to cf name:%s", cfName);
|
qError("streamState failed to put to cf name:%s", cfName);
|
||||||
|
@ -2049,6 +2067,21 @@ int32_t streamStatePutBatch(SStreamState* pState, const char* cfName, rocksdb_wr
|
||||||
taosMemoryFree(ttlV);
|
taosMemoryFree(ttlV);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
int32_t streamStatePutBatchOptimize(SStreamState* pState, int32_t cfIdx, rocksdb_writebatch_t* pBatch, void* key,
|
||||||
|
void* val, int32_t vlen, int64_t ttl, void* tmpBuf) {
|
||||||
|
char buf[128] = {0};
|
||||||
|
int32_t klen = ginitDict[cfIdx].enFunc((void*)key, buf);
|
||||||
|
char* ttlV = tmpBuf;
|
||||||
|
int32_t ttlVLen = ginitDict[cfIdx].enValueFunc(val, vlen, ttl, &ttlV);
|
||||||
|
|
||||||
|
rocksdb_column_family_handle_t* pCf = pState->pTdbState->pHandle[ginitDict[cfIdx].idx];
|
||||||
|
rocksdb_writebatch_put_cf((rocksdb_writebatch_t*)pBatch, pCf, buf, (size_t)klen, ttlV, (size_t)ttlVLen);
|
||||||
|
|
||||||
|
if (tmpBuf == NULL) {
|
||||||
|
taosMemoryFree(ttlV);
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
int32_t streamStatePutBatch_rocksdb(SStreamState* pState, void* pBatch) {
|
int32_t streamStatePutBatch_rocksdb(SStreamState* pState, void* pBatch) {
|
||||||
char* err = NULL;
|
char* err = NULL;
|
||||||
rocksdb_write(pState->pTdbState->rocksdb, pState->pTdbState->writeOpts, (rocksdb_writebatch_t*)pBatch, &err);
|
rocksdb_write(pState->pTdbState->rocksdb, pState->pTdbState->writeOpts, (rocksdb_writebatch_t*)pBatch, &err);
|
||||||
|
|
|
@ -15,6 +15,13 @@
|
||||||
|
|
||||||
#include "streamInc.h"
|
#include "streamInc.h"
|
||||||
|
|
||||||
|
#define MAX_BLOCK_NAME_NUM 1024
|
||||||
|
|
||||||
|
typedef struct SBlockName {
|
||||||
|
uint32_t hashValue;
|
||||||
|
char parTbName[TSDB_TABLE_NAME_LEN];
|
||||||
|
} SBlockName;
|
||||||
|
|
||||||
int32_t tEncodeStreamDispatchReq(SEncoder* pEncoder, const SStreamDispatchReq* pReq) {
|
int32_t tEncodeStreamDispatchReq(SEncoder* pEncoder, const SStreamDispatchReq* pReq) {
|
||||||
if (tStartEncode(pEncoder) < 0) return -1;
|
if (tStartEncode(pEncoder) < 0) return -1;
|
||||||
if (tEncodeI64(pEncoder, pReq->streamId) < 0) return -1;
|
if (tEncodeI64(pEncoder, pReq->streamId) < 0) return -1;
|
||||||
|
@ -331,26 +338,46 @@ FAIL:
|
||||||
|
|
||||||
int32_t streamSearchAndAddBlock(SStreamTask* pTask, SStreamDispatchReq* pReqs, SSDataBlock* pDataBlock, int32_t vgSz,
|
int32_t streamSearchAndAddBlock(SStreamTask* pTask, SStreamDispatchReq* pReqs, SSDataBlock* pDataBlock, int32_t vgSz,
|
||||||
int64_t groupId) {
|
int64_t groupId) {
|
||||||
char* ctbName = taosMemoryCalloc(1, TSDB_TABLE_FNAME_LEN);
|
uint32_t hashValue = 0;
|
||||||
if (ctbName == NULL) {
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (pDataBlock->info.parTbName[0]) {
|
|
||||||
snprintf(ctbName, TSDB_TABLE_NAME_LEN, "%s.%s", pTask->shuffleDispatcher.dbInfo.db, pDataBlock->info.parTbName);
|
|
||||||
} else {
|
|
||||||
char* ctbShortName = buildCtbNameByGroupId(pTask->shuffleDispatcher.stbFullName, groupId);
|
|
||||||
snprintf(ctbName, TSDB_TABLE_NAME_LEN, "%s.%s", pTask->shuffleDispatcher.dbInfo.db, ctbShortName);
|
|
||||||
taosMemoryFree(ctbShortName);
|
|
||||||
}
|
|
||||||
|
|
||||||
SArray* vgInfo = pTask->shuffleDispatcher.dbInfo.pVgroupInfos;
|
SArray* vgInfo = pTask->shuffleDispatcher.dbInfo.pVgroupInfos;
|
||||||
|
if (pTask->pNameMap == NULL) {
|
||||||
|
pTask->pNameMap = tSimpleHashInit(1024, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT));
|
||||||
|
}
|
||||||
|
|
||||||
/*uint32_t hashValue = MurmurHash3_32(ctbName, strlen(ctbName));*/
|
void* pVal = tSimpleHashGet(pTask->pNameMap, &groupId, sizeof(int64_t));
|
||||||
SUseDbRsp* pDbInfo = &pTask->shuffleDispatcher.dbInfo;
|
if (pVal) {
|
||||||
uint32_t hashValue =
|
SBlockName* pBln = (SBlockName*)pVal;
|
||||||
taosGetTbHashVal(ctbName, strlen(ctbName), pDbInfo->hashMethod, pDbInfo->hashPrefix, pDbInfo->hashSuffix);
|
hashValue = pBln->hashValue;
|
||||||
taosMemoryFree(ctbName);
|
if (!pDataBlock->info.parTbName[0]) {
|
||||||
|
memcpy(pDataBlock->info.parTbName, pBln->parTbName, strlen(pBln->parTbName));
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
char* ctbName = taosMemoryCalloc(1, TSDB_TABLE_FNAME_LEN);
|
||||||
|
if (ctbName == NULL) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pDataBlock->info.parTbName[0]) {
|
||||||
|
snprintf(ctbName, TSDB_TABLE_NAME_LEN, "%s.%s", pTask->shuffleDispatcher.dbInfo.db, pDataBlock->info.parTbName);
|
||||||
|
} else {
|
||||||
|
buildCtbNameByGroupIdImpl(pTask->shuffleDispatcher.stbFullName, groupId, pDataBlock->info.parTbName);
|
||||||
|
snprintf(ctbName, TSDB_TABLE_NAME_LEN, "%s.%s", pTask->shuffleDispatcher.dbInfo.db, pDataBlock->info.parTbName);
|
||||||
|
}
|
||||||
|
|
||||||
|
SArray* vgInfo = pTask->shuffleDispatcher.dbInfo.pVgroupInfos;
|
||||||
|
|
||||||
|
/*uint32_t hashValue = MurmurHash3_32(ctbName, strlen(ctbName));*/
|
||||||
|
SUseDbRsp* pDbInfo = &pTask->shuffleDispatcher.dbInfo;
|
||||||
|
hashValue =
|
||||||
|
taosGetTbHashVal(ctbName, strlen(ctbName), pDbInfo->hashMethod, pDbInfo->hashPrefix, pDbInfo->hashSuffix);
|
||||||
|
taosMemoryFree(ctbName);
|
||||||
|
SBlockName bln = {0};
|
||||||
|
bln.hashValue = hashValue;
|
||||||
|
memcpy(bln.parTbName, pDataBlock->info.parTbName, strlen(pDataBlock->info.parTbName));
|
||||||
|
if (tSimpleHashGetSize(pTask->pNameMap) < MAX_BLOCK_NAME_NUM) {
|
||||||
|
tSimpleHashPut(pTask->pNameMap, &groupId, sizeof(int64_t), &bln, sizeof(SBlockName));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
bool found = false;
|
bool found = false;
|
||||||
// TODO: optimize search
|
// TODO: optimize search
|
||||||
|
|
|
@ -20,7 +20,7 @@
|
||||||
#include "ttimer.h"
|
#include "ttimer.h"
|
||||||
|
|
||||||
static TdThreadOnce streamMetaModuleInit = PTHREAD_ONCE_INIT;
|
static TdThreadOnce streamMetaModuleInit = PTHREAD_ONCE_INIT;
|
||||||
static int32_t streamBackendId = 0;
|
int32_t streamBackendId = 0;
|
||||||
static void streamMetaEnvInit() { streamBackendId = taosOpenRef(20, streamBackendCleanup); }
|
static void streamMetaEnvInit() { streamBackendId = taosOpenRef(20, streamBackendCleanup); }
|
||||||
|
|
||||||
void streamMetaInit() { taosThreadOnce(&streamMetaModuleInit, streamMetaEnvInit); }
|
void streamMetaInit() { taosThreadOnce(&streamMetaModuleInit, streamMetaEnvInit); }
|
||||||
|
@ -79,7 +79,6 @@ SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandF
|
||||||
pMeta->vgId = vgId;
|
pMeta->vgId = vgId;
|
||||||
pMeta->ahandle = ahandle;
|
pMeta->ahandle = ahandle;
|
||||||
pMeta->expandFunc = expandFunc;
|
pMeta->expandFunc = expandFunc;
|
||||||
pMeta->streamBackendId = streamBackendId;
|
|
||||||
|
|
||||||
memset(streamPath, 0, len);
|
memset(streamPath, 0, len);
|
||||||
sprintf(streamPath, "%s/%s", pMeta->path, "state");
|
sprintf(streamPath, "%s/%s", pMeta->path, "state");
|
||||||
|
|
|
@ -106,7 +106,7 @@ SStreamState* streamStateOpen(char* path, void* pTask, bool specPath, int32_t sz
|
||||||
}
|
}
|
||||||
|
|
||||||
SStreamTask* pStreamTask = pTask;
|
SStreamTask* pStreamTask = pTask;
|
||||||
char statePath[1024];
|
char statePath[1024];
|
||||||
if (!specPath) {
|
if (!specPath) {
|
||||||
sprintf(statePath, "%s/%d", path, pStreamTask->id.taskId);
|
sprintf(statePath, "%s/%d", path, pStreamTask->id.taskId);
|
||||||
} else {
|
} else {
|
||||||
|
@ -119,10 +119,10 @@ SStreamState* streamStateOpen(char* path, void* pTask, bool specPath, int32_t sz
|
||||||
|
|
||||||
#ifdef USE_ROCKSDB
|
#ifdef USE_ROCKSDB
|
||||||
SStreamMeta* pMeta = pStreamTask->pMeta;
|
SStreamMeta* pMeta = pStreamTask->pMeta;
|
||||||
taosAcquireRef(pMeta->streamBackendId, pMeta->streamBackendRid);
|
pState->streamBackendRid = pMeta->streamBackendRid;
|
||||||
int code = streamStateOpenBackend(pMeta->streamBackend, pState);
|
int code = streamStateOpenBackend(pMeta->streamBackend, pState);
|
||||||
if (code == -1) {
|
if (code == -1) {
|
||||||
taosReleaseRef(pMeta->streamBackendId, pMeta->streamBackendRid);
|
taosReleaseRef(streamBackendId, pMeta->streamBackendRid);
|
||||||
taosMemoryFree(pState);
|
taosMemoryFree(pState);
|
||||||
pState = NULL;
|
pState = NULL;
|
||||||
}
|
}
|
||||||
|
@ -222,9 +222,7 @@ _err:
|
||||||
void streamStateClose(SStreamState* pState, bool remove) {
|
void streamStateClose(SStreamState* pState, bool remove) {
|
||||||
SStreamTask* pTask = pState->pTdbState->pOwner;
|
SStreamTask* pTask = pState->pTdbState->pOwner;
|
||||||
#ifdef USE_ROCKSDB
|
#ifdef USE_ROCKSDB
|
||||||
// streamStateCloseBackend(pState);
|
|
||||||
streamStateDestroy(pState, remove);
|
streamStateDestroy(pState, remove);
|
||||||
taosReleaseRef(pTask->pMeta->streamBackendId, pTask->pMeta->streamBackendRid);
|
|
||||||
#else
|
#else
|
||||||
tdbCommit(pState->pTdbState->db, pState->pTdbState->txn);
|
tdbCommit(pState->pTdbState->db, pState->pTdbState->txn);
|
||||||
tdbPostCommit(pState->pTdbState->db, pState->pTdbState->txn);
|
tdbPostCommit(pState->pTdbState->db, pState->pTdbState->txn);
|
||||||
|
@ -278,10 +276,10 @@ int32_t streamStateCommit(SStreamState* pState) {
|
||||||
|
|
||||||
int32_t streamStateFuncPut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen) {
|
int32_t streamStateFuncPut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen) {
|
||||||
#ifdef USE_ROCKSDB
|
#ifdef USE_ROCKSDB
|
||||||
void* pVal = NULL;
|
void* pVal = NULL;
|
||||||
int32_t len = 0;
|
int32_t len = 0;
|
||||||
int32_t code = getRowBuff(pState->pFileState, (void*)key, sizeof(SWinKey), &pVal, &len);
|
int32_t code = getRowBuff(pState->pFileState, (void*)key, sizeof(SWinKey), &pVal, &len);
|
||||||
char* buf = ((SRowBuffPos*)pVal)->pRowBuff;
|
char* buf = ((SRowBuffPos*)pVal)->pRowBuff;
|
||||||
uint32_t rowSize = streamFileStateGeSelectRowSize(pState->pFileState);
|
uint32_t rowSize = streamFileStateGeSelectRowSize(pState->pFileState);
|
||||||
memcpy(buf + len - rowSize, value, vLen);
|
memcpy(buf + len - rowSize, value, vLen);
|
||||||
return code;
|
return code;
|
||||||
|
@ -291,10 +289,10 @@ int32_t streamStateFuncPut(SStreamState* pState, const SWinKey* key, const void*
|
||||||
}
|
}
|
||||||
int32_t streamStateFuncGet(SStreamState* pState, const SWinKey* key, void** ppVal, int32_t* pVLen) {
|
int32_t streamStateFuncGet(SStreamState* pState, const SWinKey* key, void** ppVal, int32_t* pVLen) {
|
||||||
#ifdef USE_ROCKSDB
|
#ifdef USE_ROCKSDB
|
||||||
void* pVal = NULL;
|
void* pVal = NULL;
|
||||||
int32_t len = 0;
|
int32_t len = 0;
|
||||||
int32_t code = getRowBuff(pState->pFileState, (void*)key, sizeof(SWinKey), (void**)(&pVal), &len);
|
int32_t code = getRowBuff(pState->pFileState, (void*)key, sizeof(SWinKey), (void**)(&pVal), &len);
|
||||||
char* buf = ((SRowBuffPos*)pVal)->pRowBuff;
|
char* buf = ((SRowBuffPos*)pVal)->pRowBuff;
|
||||||
uint32_t rowSize = streamFileStateGeSelectRowSize(pState->pFileState);
|
uint32_t rowSize = streamFileStateGeSelectRowSize(pState->pFileState);
|
||||||
*ppVal = buf + len - rowSize;
|
*ppVal = buf + len - rowSize;
|
||||||
return code;
|
return code;
|
||||||
|
|
|
@ -224,5 +224,9 @@ void tFreeStreamTask(SStreamTask* pTask) {
|
||||||
taosMemoryFree((void*)pTask->id.idStr);
|
taosMemoryFree((void*)pTask->id.idStr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (pTask->pNameMap) {
|
||||||
|
tSimpleHashCleanup(pTask->pNameMap);
|
||||||
|
}
|
||||||
|
|
||||||
taosMemoryFree(pTask);
|
taosMemoryFree(pTask);
|
||||||
}
|
}
|
||||||
|
|
|
@ -350,6 +350,11 @@ int32_t flushSnapshot(SStreamFileState* pFileState, SStreamSnapshot* pSnapshot,
|
||||||
const int32_t BATCH_LIMIT = 256;
|
const int32_t BATCH_LIMIT = 256;
|
||||||
SListNode* pNode = NULL;
|
SListNode* pNode = NULL;
|
||||||
|
|
||||||
|
int idx = streamStateGetCfIdx(pFileState->pFileStore, "state");
|
||||||
|
|
||||||
|
int32_t len = pFileState->rowSize + sizeof(uint64_t) + sizeof(int32_t) + 1;
|
||||||
|
char* buf = taosMemoryCalloc(1, len);
|
||||||
|
|
||||||
void* batch = streamStateCreateBatch();
|
void* batch = streamStateCreateBatch();
|
||||||
while ((pNode = tdListNext(&iter)) != NULL && code == TSDB_CODE_SUCCESS) {
|
while ((pNode = tdListNext(&iter)) != NULL && code == TSDB_CODE_SUCCESS) {
|
||||||
SRowBuffPos* pPos = *(SRowBuffPos**)pNode->data;
|
SRowBuffPos* pPos = *(SRowBuffPos**)pNode->data;
|
||||||
|
@ -360,9 +365,13 @@ int32_t flushSnapshot(SStreamFileState* pFileState, SStreamSnapshot* pSnapshot,
|
||||||
}
|
}
|
||||||
|
|
||||||
SStateKey sKey = {.key = *((SWinKey*)pPos->pKey), .opNum = ((SStreamState*)pFileState->pFileStore)->number};
|
SStateKey sKey = {.key = *((SWinKey*)pPos->pKey), .opNum = ((SStreamState*)pFileState->pFileStore)->number};
|
||||||
code = streamStatePutBatch(pFileState->pFileStore, "state", batch, &sKey, pPos->pRowBuff, pFileState->rowSize, 0);
|
code = streamStatePutBatchOptimize(pFileState->pFileStore, idx, batch, &sKey, pPos->pRowBuff, pFileState->rowSize,
|
||||||
|
0, buf);
|
||||||
|
memset(buf, 0, len);
|
||||||
qDebug("===stream===put %" PRId64 " to disc, res %d", sKey.key.ts, code);
|
qDebug("===stream===put %" PRId64 " to disc, res %d", sKey.key.ts, code);
|
||||||
}
|
}
|
||||||
|
taosMemoryFree(buf);
|
||||||
|
|
||||||
if (streamStateGetBatchSize(batch) > 0) {
|
if (streamStateGetBatchSize(batch) > 0) {
|
||||||
code = streamStatePutBatch_rocksdb(pFileState->pFileStore, batch);
|
code = streamStatePutBatch_rocksdb(pFileState->pFileStore, batch);
|
||||||
}
|
}
|
||||||
|
@ -419,7 +428,7 @@ int32_t deleteExpiredCheckPoint(SStreamFileState* pFileState, TSKEY mark) {
|
||||||
if (code != 0 || len == 0 || val == NULL) {
|
if (code != 0 || len == 0 || val == NULL) {
|
||||||
return TSDB_CODE_FAILED;
|
return TSDB_CODE_FAILED;
|
||||||
}
|
}
|
||||||
memcpy(val, buf, len);
|
memcpy(buf, val, len);
|
||||||
buf[len] = 0;
|
buf[len] = 0;
|
||||||
maxCheckPointId = atol((char*)buf);
|
maxCheckPointId = atol((char*)buf);
|
||||||
taosMemoryFree(val);
|
taosMemoryFree(val);
|
||||||
|
@ -433,7 +442,7 @@ int32_t deleteExpiredCheckPoint(SStreamFileState* pFileState, TSKEY mark) {
|
||||||
if (code != 0) {
|
if (code != 0) {
|
||||||
return TSDB_CODE_FAILED;
|
return TSDB_CODE_FAILED;
|
||||||
}
|
}
|
||||||
memcpy(val, buf, len);
|
memcpy(buf, val, len);
|
||||||
buf[len] = 0;
|
buf[len] = 0;
|
||||||
taosMemoryFree(val);
|
taosMemoryFree(val);
|
||||||
|
|
||||||
|
|
|
@ -618,8 +618,7 @@ int32_t syncNodePropose(SSyncNode* pSyncNode, SRpcMsg* pMsg, bool isWeak, int64_
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// not restored, vnode enable
|
if (!pSyncNode->restoreFinish) {
|
||||||
if (!pSyncNode->restoreFinish && pSyncNode->vgId != 1) {
|
|
||||||
terrno = TSDB_CODE_SYN_PROPOSE_NOT_READY;
|
terrno = TSDB_CODE_SYN_PROPOSE_NOT_READY;
|
||||||
sNError(pSyncNode, "failed to sync propose since not ready, type:%s, last:%" PRId64 ", cmt:%" PRId64,
|
sNError(pSyncNode, "failed to sync propose since not ready, type:%s, last:%" PRId64 ", cmt:%" PRId64,
|
||||||
TMSG_INFO(pMsg->msgType), syncNodeGetLastIndex(pSyncNode), pSyncNode->commitIndex);
|
TMSG_INFO(pMsg->msgType), syncNodeGetLastIndex(pSyncNode), pSyncNode->commitIndex);
|
||||||
|
|
|
@ -275,7 +275,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_TRANS_CONFLICT, "Conflict transaction
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_TRANS_CLOG_IS_NULL, "Transaction commitlog is null")
|
TAOS_DEFINE_ERROR(TSDB_CODE_MND_TRANS_CLOG_IS_NULL, "Transaction commitlog is null")
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_TRANS_NETWORK_UNAVAILL, "Unable to establish connection While execute transaction and will continue in the background")
|
TAOS_DEFINE_ERROR(TSDB_CODE_MND_TRANS_NETWORK_UNAVAILL, "Unable to establish connection While execute transaction and will continue in the background")
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_LAST_TRANS_NOT_FINISHED, "Last Transaction not finished")
|
TAOS_DEFINE_ERROR(TSDB_CODE_MND_LAST_TRANS_NOT_FINISHED, "Last Transaction not finished")
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_TRNAS_SYNC_TIMEOUT, "Sync timeout While execute transaction and will continue in the background")
|
TAOS_DEFINE_ERROR(TSDB_CODE_MND_TRANS_SYNC_TIMEOUT, "Sync timeout While execute transaction and will continue in the background")
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_TRANS_UNKNOW_ERROR, "Unknown transaction error")
|
TAOS_DEFINE_ERROR(TSDB_CODE_MND_TRANS_UNKNOW_ERROR, "Unknown transaction error")
|
||||||
|
|
||||||
// mnode-mq
|
// mnode-mq
|
||||||
|
|
|
@ -616,6 +616,8 @@
|
||||||
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/rowlength64k_4.py -Q 2
|
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/rowlength64k_4.py -Q 2
|
||||||
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/rowlength64k_4.py -Q 3
|
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/rowlength64k_4.py -Q 3
|
||||||
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/rowlength64k_4.py -Q 4
|
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/rowlength64k_4.py -Q 4
|
||||||
|
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/precisionUS.py
|
||||||
|
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/precisionNS.py
|
||||||
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/show.py
|
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/show.py
|
||||||
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/information_schema.py
|
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/information_schema.py
|
||||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/abs.py
|
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/abs.py
|
||||||
|
|
|
@ -92,9 +92,9 @@ class TDTestCase:
|
||||||
else:
|
else:
|
||||||
tdLog.info("taosdump found: %s" % binPath)
|
tdLog.info("taosdump found: %s" % binPath)
|
||||||
|
|
||||||
os.system("%s -y --databases db -o ./taosdumptest/tmp1" % binPath)
|
os.system("%s --databases db -o ./taosdumptest/tmp1" % binPath)
|
||||||
os.system(
|
os.system(
|
||||||
"%s -y --databases db1 -o ./taosdumptest/tmp2" %
|
"%s --databases db1 -o ./taosdumptest/tmp2" %
|
||||||
binPath)
|
binPath)
|
||||||
|
|
||||||
tdSql.execute("drop database db")
|
tdSql.execute("drop database db")
|
||||||
|
@ -172,7 +172,7 @@ class TDTestCase:
|
||||||
tdSql.query("show stables")
|
tdSql.query("show stables")
|
||||||
tdSql.checkRows(2)
|
tdSql.checkRows(2)
|
||||||
os.system(
|
os.system(
|
||||||
"%s -y --databases db12312313231231321312312312_323 -o ./taosdumptest/tmp1" %
|
"%s --databases db12312313231231321312312312_323 -o ./taosdumptest/tmp1" %
|
||||||
binPath)
|
binPath)
|
||||||
tdSql.execute("drop database db12312313231231321312312312_323")
|
tdSql.execute("drop database db12312313231231321312312312_323")
|
||||||
os.system("%s -i ./taosdumptest/tmp1" % binPath)
|
os.system("%s -i ./taosdumptest/tmp1" % binPath)
|
||||||
|
|
|
@ -97,7 +97,7 @@ class TDTestCase:
|
||||||
tdSql.query("show databases")
|
tdSql.query("show databases")
|
||||||
tdSql.checkRows(2)
|
tdSql.checkRows(2)
|
||||||
|
|
||||||
os.system("%s -i ./taosdumptest/tmp -y" % binPath)
|
os.system("%s -i ./taosdumptest/tmp" % binPath)
|
||||||
|
|
||||||
tdSql.query("show databases")
|
tdSql.query("show databases")
|
||||||
tdSql.checkRows(3)
|
tdSql.checkRows(3)
|
||||||
|
@ -125,13 +125,13 @@ class TDTestCase:
|
||||||
os.system("rm ./taosdumptest/tmp/*.sql")
|
os.system("rm ./taosdumptest/tmp/*.sql")
|
||||||
os.system("rm ./taosdumptest/tmp/*.avro*")
|
os.system("rm ./taosdumptest/tmp/*.avro*")
|
||||||
os.system("rm -rf ./taosdumptest/tmp/taosdump.*")
|
os.system("rm -rf ./taosdumptest/tmp/taosdump.*")
|
||||||
os.system("%s -D test -o ./taosdumptest/tmp -y" % binPath)
|
os.system("%s -D test -o ./taosdumptest/tmp" % binPath)
|
||||||
|
|
||||||
tdSql.execute("drop database test")
|
tdSql.execute("drop database test")
|
||||||
tdSql.query("show databases")
|
tdSql.query("show databases")
|
||||||
tdSql.checkRows(3)
|
tdSql.checkRows(3)
|
||||||
|
|
||||||
os.system("%s -i ./taosdumptest/tmp -y" % binPath)
|
os.system("%s -i ./taosdumptest/tmp" % binPath)
|
||||||
|
|
||||||
tdSql.execute("use test")
|
tdSql.execute("use test")
|
||||||
tdSql.query("show stables")
|
tdSql.query("show stables")
|
||||||
|
|
|
@ -134,15 +134,15 @@ class TDTestCase:
|
||||||
# dump all data
|
# dump all data
|
||||||
|
|
||||||
os.system(
|
os.system(
|
||||||
"%s -y -g --databases timedb1 -o ./taosdumptest/dumptmp1" %
|
"%s -g --databases timedb1 -o ./taosdumptest/dumptmp1" %
|
||||||
binPath)
|
binPath)
|
||||||
|
|
||||||
# dump part data with -S -E
|
# dump part data with -S -E
|
||||||
os.system(
|
os.system(
|
||||||
'%s -y -g --databases timedb1 -S 1625068810000000000 -E 1625068860000000000 -o ./taosdumptest/dumptmp2 ' %
|
'%s -g --databases timedb1 -S 1625068810000000000 -E 1625068860000000000 -o ./taosdumptest/dumptmp2 ' %
|
||||||
binPath)
|
binPath)
|
||||||
os.system(
|
os.system(
|
||||||
'%s -y -g --databases timedb1 -S 1625068810000000000 -o ./taosdumptest/dumptmp3 ' %
|
'%s -g --databases timedb1 -S 1625068810000000000 -o ./taosdumptest/dumptmp3 ' %
|
||||||
binPath)
|
binPath)
|
||||||
|
|
||||||
tdSql.execute("drop database timedb1")
|
tdSql.execute("drop database timedb1")
|
||||||
|
@ -200,14 +200,14 @@ class TDTestCase:
|
||||||
self.createdb(precision="us")
|
self.createdb(precision="us")
|
||||||
|
|
||||||
os.system(
|
os.system(
|
||||||
"%s -y -g --databases timedb1 -o ./taosdumptest/dumptmp1" %
|
"%s -g --databases timedb1 -o ./taosdumptest/dumptmp1" %
|
||||||
binPath)
|
binPath)
|
||||||
|
|
||||||
os.system(
|
os.system(
|
||||||
'%s -y -g --databases timedb1 -S 1625068810000000 -E 1625068860000000 -o ./taosdumptest/dumptmp2 ' %
|
'%s -g --databases timedb1 -S 1625068810000000 -E 1625068860000000 -o ./taosdumptest/dumptmp2 ' %
|
||||||
binPath)
|
binPath)
|
||||||
os.system(
|
os.system(
|
||||||
'%s -y -g --databases timedb1 -S 1625068810000000 -o ./taosdumptest/dumptmp3 ' %
|
'%s -g --databases timedb1 -S 1625068810000000 -o ./taosdumptest/dumptmp3 ' %
|
||||||
binPath)
|
binPath)
|
||||||
|
|
||||||
os.system("%s -i ./taosdumptest/dumptmp1" % binPath)
|
os.system("%s -i ./taosdumptest/dumptmp1" % binPath)
|
||||||
|
@ -269,14 +269,14 @@ class TDTestCase:
|
||||||
self.createdb(precision="ms")
|
self.createdb(precision="ms")
|
||||||
|
|
||||||
os.system(
|
os.system(
|
||||||
"%s -y -g --databases timedb1 -o ./taosdumptest/dumptmp1" %
|
"%s -g --databases timedb1 -o ./taosdumptest/dumptmp1" %
|
||||||
binPath)
|
binPath)
|
||||||
|
|
||||||
os.system(
|
os.system(
|
||||||
'%s -y -g --databases timedb1 -S 1625068810000 -E 1625068860000 -o ./taosdumptest/dumptmp2 ' %
|
'%s -g --databases timedb1 -S 1625068810000 -E 1625068860000 -o ./taosdumptest/dumptmp2 ' %
|
||||||
binPath)
|
binPath)
|
||||||
os.system(
|
os.system(
|
||||||
'%s -y -g --databases timedb1 -S 1625068810000 -o ./taosdumptest/dumptmp3 ' %
|
'%s -g --databases timedb1 -S 1625068810000 -o ./taosdumptest/dumptmp3 ' %
|
||||||
binPath)
|
binPath)
|
||||||
|
|
||||||
os.system("%s -i ./taosdumptest/dumptmp1" % binPath)
|
os.system("%s -i ./taosdumptest/dumptmp1" % binPath)
|
||||||
|
|
|
@ -1,36 +1,11 @@
|
||||||
system sh/stop_dnodes.sh
|
system sh/stop_dnodes.sh
|
||||||
system sh/deploy.sh -n dnode1 -i 1
|
system sh/deploy.sh -n dnode1 -i 1
|
||||||
system sh/deploy.sh -n dnode2 -i 2
|
|
||||||
|
|
||||||
system sh/exec.sh -n dnode1 -s start
|
system sh/exec.sh -n dnode1 -s start
|
||||||
sleep 50
|
sleep 50
|
||||||
sql connect
|
sql connect
|
||||||
|
|
||||||
sql create dnode $hostname2 port 7200
|
|
||||||
|
|
||||||
system sh/exec.sh -n dnode2 -s start
|
|
||||||
|
|
||||||
print ===== step1
|
|
||||||
$x = 0
|
|
||||||
step1:
|
|
||||||
$x = $x + 1
|
|
||||||
sleep 1000
|
|
||||||
if $x == 10 then
|
|
||||||
print ====> dnode not ready!
|
|
||||||
return -1
|
|
||||||
endi
|
|
||||||
sql select * from information_schema.ins_dnodes
|
|
||||||
print ===> $data00 $data01 $data02 $data03 $data04 $data05
|
|
||||||
print ===> $data10 $data11 $data12 $data13 $data14 $data15
|
|
||||||
if $rows != 2 then
|
|
||||||
return -1
|
|
||||||
endi
|
|
||||||
if $data(1)[4] != ready then
|
|
||||||
goto step1
|
|
||||||
endi
|
|
||||||
if $data(2)[4] != ready then
|
|
||||||
goto step1
|
|
||||||
endi
|
|
||||||
|
|
||||||
print ===== step2
|
print ===== step2
|
||||||
sql drop stream if exists stream_t1;
|
sql drop stream if exists stream_t1;
|
||||||
|
@ -248,10 +223,56 @@ sql insert into ts3 values(1648791223002,2,2,3,1.1);
|
||||||
sql insert into ts4 values(1648791233003,3,2,3,2.1);
|
sql insert into ts4 values(1648791233003,3,2,3,2.1);
|
||||||
sql insert into ts3 values(1648791243004,4,2,43,73.1);
|
sql insert into ts3 values(1648791243004,4,2,43,73.1);
|
||||||
sql insert into ts4 values(1648791213002,24,22,23,4.1);
|
sql insert into ts4 values(1648791213002,24,22,23,4.1);
|
||||||
|
|
||||||
|
$loop_count = 0
|
||||||
|
loop032:
|
||||||
|
|
||||||
|
$loop_count = $loop_count + 1
|
||||||
|
if $loop_count == 30 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
sleep 1000
|
||||||
|
print 6-0 select * from streamtST1;
|
||||||
|
sql select * from streamtST1;
|
||||||
|
|
||||||
|
if $rows != 4 then
|
||||||
|
print =====rows=$rows
|
||||||
|
goto loop032
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data01 != 8 then
|
||||||
|
print =6====data01=$data01
|
||||||
|
goto loop032
|
||||||
|
endi
|
||||||
|
|
||||||
sql insert into ts3 values(1648791243005,4,20,3,3.1);
|
sql insert into ts3 values(1648791243005,4,20,3,3.1);
|
||||||
sql insert into ts4 values(1648791243006,4,2,3,3.1) (1648791243007,4,2,3,3.1) ;
|
sql insert into ts4 values(1648791243006,4,2,3,3.1) (1648791243007,4,2,3,3.1) ;
|
||||||
sql insert into ts3 values(1648791243008,4,2,30,3.1) (1648791243009,4,2,3,3.1) (1648791243010,4,2,3,3.1) ;
|
sql insert into ts3 values(1648791243008,4,2,30,3.1) (1648791243009,4,2,3,3.1) (1648791243010,4,2,3,3.1) ;
|
||||||
sql insert into ts4 values(1648791243011,4,2,3,3.1) (1648791243012,34,32,33,3.1) (1648791243013,4,2,3,3.1) (1648791243014,4,2,13,3.1);
|
sql insert into ts4 values(1648791243011,4,2,3,3.1) (1648791243012,34,32,33,3.1) (1648791243013,4,2,3,3.1) (1648791243014,4,2,13,3.1);
|
||||||
|
|
||||||
|
$loop_count = 0
|
||||||
|
loop033:
|
||||||
|
|
||||||
|
$loop_count = $loop_count + 1
|
||||||
|
if $loop_count == 30 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
sleep 1000
|
||||||
|
print 6-1 select * from streamtST1;
|
||||||
|
sql select * from streamtST1;
|
||||||
|
|
||||||
|
if $rows != 4 then
|
||||||
|
print =====rows=$rows
|
||||||
|
goto loop033
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data01 != 8 then
|
||||||
|
print =6====data01=$data01
|
||||||
|
goto loop033
|
||||||
|
endi
|
||||||
|
|
||||||
sql insert into ts3 values(1648791243005,4,42,3,3.1) (1648791243003,4,2,33,3.1) (1648791243006,4,2,3,3.1) (1648791213001,1,52,13,1.0) (1648791223001,22,22,83,1.1) ;
|
sql insert into ts3 values(1648791243005,4,42,3,3.1) (1648791243003,4,2,33,3.1) (1648791243006,4,2,3,3.1) (1648791213001,1,52,13,1.0) (1648791223001,22,22,83,1.1) ;
|
||||||
|
|
||||||
$loop_count = 0
|
$loop_count = 0
|
||||||
|
|
|
@ -132,12 +132,12 @@ if $loop_count == 10 then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
|
|
||||||
if $data01 != 1 then
|
if $data01 != 2 then
|
||||||
print =====data01=$data01
|
print =====data01=$data01
|
||||||
goto loop4
|
goto loop4
|
||||||
endi
|
endi
|
||||||
|
|
||||||
if $data02 != 1 then
|
if $data02 != 2 then
|
||||||
print =====data02=$data02
|
print =====data02=$data02
|
||||||
goto loop4
|
goto loop4
|
||||||
endi
|
endi
|
||||||
|
|
|
@ -576,13 +576,6 @@ $loop_count = 0
|
||||||
|
|
||||||
print step 7
|
print step 7
|
||||||
|
|
||||||
loop4:
|
|
||||||
sleep 100
|
|
||||||
|
|
||||||
$loop_count = $loop_count + 1
|
|
||||||
if $loop_count == 10 then
|
|
||||||
return -1
|
|
||||||
endi
|
|
||||||
|
|
||||||
sql create database test3 vgroups 6;
|
sql create database test3 vgroups 6;
|
||||||
sql use test3;
|
sql use test3;
|
||||||
|
|
|
@ -0,0 +1,293 @@
|
||||||
|
###################################################################
|
||||||
|
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This file is proprietary and confidential to TAOS Technologies.
|
||||||
|
# No part of this file may be reproduced, stored, transmitted,
|
||||||
|
# disclosed or used in any form or by any means other than as
|
||||||
|
# expressly provided by the written permission from Jianhui Tao
|
||||||
|
#
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import random
|
||||||
|
import time
|
||||||
|
|
||||||
|
import taos
|
||||||
|
from util.log import *
|
||||||
|
from util.cases import *
|
||||||
|
from util.sql import *
|
||||||
|
|
||||||
|
class TDTestCase:
|
||||||
|
|
||||||
|
# get col value and total max min ...
|
||||||
|
def getColsValue(self, i, j):
|
||||||
|
# c1 value
|
||||||
|
if random.randint(1, 10) == 5:
|
||||||
|
c1 = None
|
||||||
|
else:
|
||||||
|
c1 = 1
|
||||||
|
|
||||||
|
# c2 value
|
||||||
|
if j % 3200 == 0:
|
||||||
|
c2 = 8764231
|
||||||
|
elif random.randint(1, 10) == 5:
|
||||||
|
c2 = None
|
||||||
|
else:
|
||||||
|
c2 = random.randint(-87654297, 98765321)
|
||||||
|
|
||||||
|
|
||||||
|
value = f"({self.ts}, "
|
||||||
|
|
||||||
|
# c1
|
||||||
|
if c1 is None:
|
||||||
|
value += "null,"
|
||||||
|
else:
|
||||||
|
self.c1Cnt += 1
|
||||||
|
value += f"{c1},"
|
||||||
|
# c2
|
||||||
|
if c2 is None:
|
||||||
|
value += "null,"
|
||||||
|
else:
|
||||||
|
value += f"{c2},"
|
||||||
|
# total count
|
||||||
|
self.c2Cnt += 1
|
||||||
|
# max
|
||||||
|
if self.c2Max is None:
|
||||||
|
self.c2Max = c2
|
||||||
|
else:
|
||||||
|
if c2 > self.c2Max:
|
||||||
|
self.c2Max = c2
|
||||||
|
# min
|
||||||
|
if self.c2Min is None:
|
||||||
|
self.c2Min = c2
|
||||||
|
else:
|
||||||
|
if c2 < self.c2Min:
|
||||||
|
self.c2Min = c2
|
||||||
|
# sum
|
||||||
|
if self.c2Sum is None:
|
||||||
|
self.c2Sum = c2
|
||||||
|
else:
|
||||||
|
self.c2Sum += c2
|
||||||
|
|
||||||
|
# c3 same with ts
|
||||||
|
value += f"{self.ts})"
|
||||||
|
|
||||||
|
# move next
|
||||||
|
self.ts += 1
|
||||||
|
|
||||||
|
return value
|
||||||
|
|
||||||
|
# insert data
|
||||||
|
def insertData(self):
|
||||||
|
tdLog.info("insert data ....")
|
||||||
|
sqls = ""
|
||||||
|
for i in range(self.childCnt):
|
||||||
|
# insert child table
|
||||||
|
values = ""
|
||||||
|
pre_insert = f"insert into t{i} values "
|
||||||
|
for j in range(self.childRow):
|
||||||
|
if values == "":
|
||||||
|
values = self.getColsValue(i, j)
|
||||||
|
else:
|
||||||
|
values += "," + self.getColsValue(i, j)
|
||||||
|
|
||||||
|
# batch insert
|
||||||
|
if j % self.batchSize == 0 and values != "":
|
||||||
|
sql = pre_insert + values
|
||||||
|
tdSql.execute(sql)
|
||||||
|
values = ""
|
||||||
|
# append last
|
||||||
|
if values != "":
|
||||||
|
sql = pre_insert + values
|
||||||
|
tdSql.execute(sql)
|
||||||
|
values = ""
|
||||||
|
|
||||||
|
sql = "flush database db;"
|
||||||
|
tdLog.info(sql)
|
||||||
|
tdSql.execute(sql)
|
||||||
|
# insert finished
|
||||||
|
tdLog.info(f"insert data successfully.\n"
|
||||||
|
f" inserted child table = {self.childCnt}\n"
|
||||||
|
f" inserted child rows = {self.childRow}\n"
|
||||||
|
f" total inserted rows = {self.childCnt*self.childRow}\n")
|
||||||
|
return
|
||||||
|
|
||||||
|
|
||||||
|
# prepareEnv
|
||||||
|
def prepareEnv(self):
|
||||||
|
# init
|
||||||
|
self.ts = 1680000000000*1000*1000
|
||||||
|
self.childCnt = 5
|
||||||
|
self.childRow = 10000
|
||||||
|
self.batchSize = 5000
|
||||||
|
|
||||||
|
# total
|
||||||
|
self.c1Cnt = 0
|
||||||
|
self.c2Cnt = 0
|
||||||
|
self.c2Max = None
|
||||||
|
self.c2Min = None
|
||||||
|
self.c2Sum = None
|
||||||
|
|
||||||
|
# create database db
|
||||||
|
sql = f"create database db vgroups 2 precision 'ns' "
|
||||||
|
tdLog.info(sql)
|
||||||
|
tdSql.execute(sql)
|
||||||
|
sql = f"use db"
|
||||||
|
tdSql.execute(sql)
|
||||||
|
|
||||||
|
# create super talbe st
|
||||||
|
sql = f"create table st(ts timestamp, c1 int, c2 bigint, ts1 timestamp) tags(area int)"
|
||||||
|
tdLog.info(sql)
|
||||||
|
tdSql.execute(sql)
|
||||||
|
|
||||||
|
# create child table
|
||||||
|
for i in range(self.childCnt):
|
||||||
|
sql = f"create table t{i} using st tags({i}) "
|
||||||
|
tdSql.execute(sql)
|
||||||
|
|
||||||
|
# create stream
|
||||||
|
sql = "create stream ma into sta as select count(ts) from st interval(100b)"
|
||||||
|
tdLog.info(sql)
|
||||||
|
tdSql.execute(sql)
|
||||||
|
|
||||||
|
# insert data
|
||||||
|
self.insertData()
|
||||||
|
|
||||||
|
# check data correct
|
||||||
|
def checkExpect(self, sql, expectVal):
|
||||||
|
tdSql.query(sql)
|
||||||
|
rowCnt = tdSql.getRows()
|
||||||
|
for i in range(rowCnt):
|
||||||
|
val = tdSql.getData(i,0)
|
||||||
|
if val != expectVal:
|
||||||
|
tdLog.exit(f"Not expect . query={val} expect={expectVal} i={i} sql={sql}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
tdLog.info(f"check expect ok. sql={sql} expect ={expectVal} rowCnt={rowCnt}")
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# check time macro
|
||||||
|
def checkTimeMacro(self):
|
||||||
|
# 2 week
|
||||||
|
val = 2
|
||||||
|
nsval = val*7*24*60*60*1000*1000*1000
|
||||||
|
expectVal = self.childCnt * self.childRow
|
||||||
|
sql = f"select count(ts) from st where timediff(ts - {val}w, ts1) = {nsval} "
|
||||||
|
self.checkExpect(sql, expectVal)
|
||||||
|
|
||||||
|
# 20 day
|
||||||
|
val = 20
|
||||||
|
nsval = val*24*60*60*1000*1000*1000
|
||||||
|
uint = "d"
|
||||||
|
sql = f"select count(ts) from st where timediff(ts - {val}{uint}, ts1) = {nsval} "
|
||||||
|
self.checkExpect(sql, expectVal)
|
||||||
|
|
||||||
|
# 30 hour
|
||||||
|
val = 30
|
||||||
|
nsval = val*60*60*1000*1000*1000
|
||||||
|
uint = "h"
|
||||||
|
sql = f"select count(ts) from st where timediff(ts - {val}{uint}, ts1) = {nsval} "
|
||||||
|
self.checkExpect(sql, expectVal)
|
||||||
|
|
||||||
|
# 90 minutes
|
||||||
|
val = 90
|
||||||
|
nsval = val*60*1000*1000*1000
|
||||||
|
uint = "m"
|
||||||
|
sql = f"select count(ts) from st where timediff(ts - {val}{uint}, ts1) = {nsval} "
|
||||||
|
self.checkExpect(sql, expectVal)
|
||||||
|
# 2s
|
||||||
|
val = 2
|
||||||
|
nsval = val*1000*1000*1000
|
||||||
|
uint = "s"
|
||||||
|
sql = f"select count(ts) from st where timediff(ts - {val}{uint}, ts1) = {nsval} "
|
||||||
|
self.checkExpect(sql, expectVal)
|
||||||
|
# 20a
|
||||||
|
val = 5
|
||||||
|
nsval = val*1000*1000
|
||||||
|
uint = "a"
|
||||||
|
sql = f"select count(ts) from st where timediff(ts - {val}{uint}, ts1) = {nsval} "
|
||||||
|
self.checkExpect(sql, expectVal)
|
||||||
|
# 300u
|
||||||
|
val = 300
|
||||||
|
nsval = val*1000
|
||||||
|
uint = "u"
|
||||||
|
sql = f"select count(ts) from st where timediff(ts - {val}{uint}, ts1) = {nsval} "
|
||||||
|
self.checkExpect(sql, expectVal)
|
||||||
|
# 8b
|
||||||
|
val = 8
|
||||||
|
sql = f"select timediff(ts - {val}b, ts1) from st "
|
||||||
|
self.checkExpect(sql, val)
|
||||||
|
|
||||||
|
# init
|
||||||
|
def init(self, conn, logSql, replicaVar=1):
|
||||||
|
seed = time.clock_gettime(time.CLOCK_REALTIME)
|
||||||
|
random.seed(seed)
|
||||||
|
self.replicaVar = int(replicaVar)
|
||||||
|
tdLog.debug(f"start to excute {__file__}")
|
||||||
|
tdSql.init(conn.cursor(), True)
|
||||||
|
|
||||||
|
# where
|
||||||
|
def checkWhere(self):
|
||||||
|
cnt = 300
|
||||||
|
start = self.ts - cnt
|
||||||
|
sql = f"select count(ts) from st where ts >= {start} and ts <= {self.ts}"
|
||||||
|
self.checkExpect(sql, cnt)
|
||||||
|
|
||||||
|
for i in range(50):
|
||||||
|
cnt = random.randint(1,40000)
|
||||||
|
base = 2000
|
||||||
|
start = self.ts - cnt - base
|
||||||
|
end = self.ts - base
|
||||||
|
sql = f"select count(ts) from st where ts >= {start} and ts < {end}"
|
||||||
|
self.checkExpect(sql, cnt)
|
||||||
|
|
||||||
|
# stream
|
||||||
|
def checkStream(self):
|
||||||
|
allRows = self.childCnt * self.childRow
|
||||||
|
# ensure write data is expected
|
||||||
|
sql = "select count(*) from (select diff(ts) as a from (select ts from st order by ts asc)) where a=1;"
|
||||||
|
self.checkExpect(sql, allRows - 1)
|
||||||
|
|
||||||
|
# stream count is ok
|
||||||
|
sql =f"select count(*) from sta"
|
||||||
|
cnt = int(allRows / 100) - 1 # last window is not close, so need reduce one
|
||||||
|
self.checkExpect(sql, cnt)
|
||||||
|
|
||||||
|
# check fields
|
||||||
|
sql =f"select count(*) from sta where `count(ts)` != 100"
|
||||||
|
self.checkExpect(sql, 0)
|
||||||
|
|
||||||
|
# check timestamp
|
||||||
|
sql =f"select count(*) from (select diff(`_wstart`) from sta)"
|
||||||
|
self.checkExpect(sql, cnt - 1)
|
||||||
|
sql =f"select count(*) from (select diff(`_wstart`) as a from sta) where a != 100"
|
||||||
|
self.checkExpect(sql, 0)
|
||||||
|
|
||||||
|
# run
|
||||||
|
def run(self):
|
||||||
|
# prepare env
|
||||||
|
self.prepareEnv()
|
||||||
|
|
||||||
|
# time macro like 1w 1d 1h 1m 1s 1a 1u 1b
|
||||||
|
self.checkTimeMacro()
|
||||||
|
|
||||||
|
# check where
|
||||||
|
self.checkWhere()
|
||||||
|
|
||||||
|
# check stream
|
||||||
|
self.checkStream()
|
||||||
|
|
||||||
|
# stop
|
||||||
|
def stop(self):
|
||||||
|
tdSql.close()
|
||||||
|
tdLog.success(f"{__file__} successfully executed")
|
||||||
|
|
||||||
|
|
||||||
|
tdCases.addLinux(__file__, TDTestCase())
|
||||||
|
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,287 @@
|
||||||
|
###################################################################
|
||||||
|
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This file is proprietary and confidential to TAOS Technologies.
|
||||||
|
# No part of this file may be reproduced, stored, transmitted,
|
||||||
|
# disclosed or used in any form or by any means other than as
|
||||||
|
# expressly provided by the written permission from Jianhui Tao
|
||||||
|
#
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import random
|
||||||
|
import time
|
||||||
|
|
||||||
|
import taos
|
||||||
|
from util.log import *
|
||||||
|
from util.cases import *
|
||||||
|
from util.sql import *
|
||||||
|
|
||||||
|
class TDTestCase:
|
||||||
|
|
||||||
|
# get col value and total max min ...
|
||||||
|
def getColsValue(self, i, j):
|
||||||
|
# c1 value
|
||||||
|
if random.randint(1, 10) == 5:
|
||||||
|
c1 = None
|
||||||
|
else:
|
||||||
|
c1 = 1
|
||||||
|
|
||||||
|
# c2 value
|
||||||
|
if j % 3200 == 0:
|
||||||
|
c2 = 8764231
|
||||||
|
elif random.randint(1, 10) == 5:
|
||||||
|
c2 = None
|
||||||
|
else:
|
||||||
|
c2 = random.randint(-87654297, 98765321)
|
||||||
|
|
||||||
|
|
||||||
|
value = f"({self.ts}, "
|
||||||
|
|
||||||
|
# c1
|
||||||
|
if c1 is None:
|
||||||
|
value += "null,"
|
||||||
|
else:
|
||||||
|
self.c1Cnt += 1
|
||||||
|
value += f"{c1},"
|
||||||
|
# c2
|
||||||
|
if c2 is None:
|
||||||
|
value += "null,"
|
||||||
|
else:
|
||||||
|
value += f"{c2},"
|
||||||
|
# total count
|
||||||
|
self.c2Cnt += 1
|
||||||
|
# max
|
||||||
|
if self.c2Max is None:
|
||||||
|
self.c2Max = c2
|
||||||
|
else:
|
||||||
|
if c2 > self.c2Max:
|
||||||
|
self.c2Max = c2
|
||||||
|
# min
|
||||||
|
if self.c2Min is None:
|
||||||
|
self.c2Min = c2
|
||||||
|
else:
|
||||||
|
if c2 < self.c2Min:
|
||||||
|
self.c2Min = c2
|
||||||
|
# sum
|
||||||
|
if self.c2Sum is None:
|
||||||
|
self.c2Sum = c2
|
||||||
|
else:
|
||||||
|
self.c2Sum += c2
|
||||||
|
|
||||||
|
# c3 same with ts
|
||||||
|
value += f"{self.ts})"
|
||||||
|
|
||||||
|
# move next
|
||||||
|
self.ts += 1
|
||||||
|
|
||||||
|
return value
|
||||||
|
|
||||||
|
# insert data
|
||||||
|
def insertData(self):
|
||||||
|
tdLog.info("insert data ....")
|
||||||
|
sqls = ""
|
||||||
|
for i in range(self.childCnt):
|
||||||
|
# insert child table
|
||||||
|
values = ""
|
||||||
|
pre_insert = f"insert into t{i} values "
|
||||||
|
for j in range(self.childRow):
|
||||||
|
if values == "":
|
||||||
|
values = self.getColsValue(i, j)
|
||||||
|
else:
|
||||||
|
values += "," + self.getColsValue(i, j)
|
||||||
|
|
||||||
|
# batch insert
|
||||||
|
if j % self.batchSize == 0 and values != "":
|
||||||
|
sql = pre_insert + values
|
||||||
|
tdSql.execute(sql)
|
||||||
|
values = ""
|
||||||
|
# append last
|
||||||
|
if values != "":
|
||||||
|
sql = pre_insert + values
|
||||||
|
tdSql.execute(sql)
|
||||||
|
values = ""
|
||||||
|
|
||||||
|
sql = "flush database db;"
|
||||||
|
tdLog.info(sql)
|
||||||
|
tdSql.execute(sql)
|
||||||
|
# insert finished
|
||||||
|
tdLog.info(f"insert data successfully.\n"
|
||||||
|
f" inserted child table = {self.childCnt}\n"
|
||||||
|
f" inserted child rows = {self.childRow}\n"
|
||||||
|
f" total inserted rows = {self.childCnt*self.childRow}\n")
|
||||||
|
return
|
||||||
|
|
||||||
|
|
||||||
|
# prepareEnv
|
||||||
|
def prepareEnv(self):
|
||||||
|
# init
|
||||||
|
self.ts = 1680000000000*1000
|
||||||
|
self.childCnt = 5
|
||||||
|
self.childRow = 10000
|
||||||
|
self.batchSize = 5000
|
||||||
|
|
||||||
|
# total
|
||||||
|
self.c1Cnt = 0
|
||||||
|
self.c2Cnt = 0
|
||||||
|
self.c2Max = None
|
||||||
|
self.c2Min = None
|
||||||
|
self.c2Sum = None
|
||||||
|
|
||||||
|
# create database db
|
||||||
|
sql = f"create database db vgroups 2 precision 'us' "
|
||||||
|
tdLog.info(sql)
|
||||||
|
tdSql.execute(sql)
|
||||||
|
sql = f"use db"
|
||||||
|
tdSql.execute(sql)
|
||||||
|
|
||||||
|
# create super talbe st
|
||||||
|
sql = f"create table st(ts timestamp, c1 int, c2 bigint, ts1 timestamp) tags(area int)"
|
||||||
|
tdLog.info(sql)
|
||||||
|
tdSql.execute(sql)
|
||||||
|
|
||||||
|
# create child table
|
||||||
|
for i in range(self.childCnt):
|
||||||
|
sql = f"create table t{i} using st tags({i}) "
|
||||||
|
tdSql.execute(sql)
|
||||||
|
|
||||||
|
# create stream
|
||||||
|
sql = "create stream ma into sta as select count(ts) from st interval(100u)"
|
||||||
|
tdLog.info(sql)
|
||||||
|
tdSql.execute(sql)
|
||||||
|
|
||||||
|
# insert data
|
||||||
|
self.insertData()
|
||||||
|
|
||||||
|
# check data correct
|
||||||
|
def checkExpect(self, sql, expectVal):
|
||||||
|
tdSql.query(sql)
|
||||||
|
rowCnt = tdSql.getRows()
|
||||||
|
for i in range(rowCnt):
|
||||||
|
val = tdSql.getData(i,0)
|
||||||
|
if val != expectVal:
|
||||||
|
tdLog.exit(f"Not expect . query={val} expect={expectVal} i={i} sql={sql}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
tdLog.info(f"check expect ok. sql={sql} expect ={expectVal} rowCnt={rowCnt}")
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
# check time macro
|
||||||
|
def checkTimeMacro(self):
|
||||||
|
# 2 week
|
||||||
|
val = 2
|
||||||
|
usval = val*7*24*60*60*1000*1000
|
||||||
|
expectVal = self.childCnt * self.childRow
|
||||||
|
sql = f"select count(ts) from st where timediff(ts - {val}w, ts1) = {usval} "
|
||||||
|
self.checkExpect(sql, expectVal)
|
||||||
|
|
||||||
|
# 20 day
|
||||||
|
val = 20
|
||||||
|
usval = val*24*60*60*1000*1000
|
||||||
|
uint = "d"
|
||||||
|
sql = f"select count(ts) from st where timediff(ts - {val}{uint}, ts1) = {usval} "
|
||||||
|
self.checkExpect(sql, expectVal)
|
||||||
|
|
||||||
|
# 30 hour
|
||||||
|
val = 30
|
||||||
|
usval = val*60*60*1000*1000
|
||||||
|
uint = "h"
|
||||||
|
sql = f"select count(ts) from st where timediff(ts - {val}{uint}, ts1) = {usval} "
|
||||||
|
self.checkExpect(sql, expectVal)
|
||||||
|
|
||||||
|
# 90 minutes
|
||||||
|
val = 90
|
||||||
|
usval = val*60*1000*1000
|
||||||
|
uint = "m"
|
||||||
|
sql = f"select count(ts) from st where timediff(ts - {val}{uint}, ts1) = {usval} "
|
||||||
|
self.checkExpect(sql, expectVal)
|
||||||
|
# 2s
|
||||||
|
val = 2
|
||||||
|
usval = val*1000*1000
|
||||||
|
uint = "s"
|
||||||
|
sql = f"select count(ts) from st where timediff(ts - {val}{uint}, ts1) = {usval} "
|
||||||
|
self.checkExpect(sql, expectVal)
|
||||||
|
# 20a
|
||||||
|
val = 20
|
||||||
|
usval = val*1000
|
||||||
|
uint = "a"
|
||||||
|
sql = f"select count(ts) from st where timediff(ts - {val}{uint}, ts1) = {usval} "
|
||||||
|
self.checkExpect(sql, expectVal)
|
||||||
|
# 300u
|
||||||
|
val = 300
|
||||||
|
usval = val*1
|
||||||
|
uint = "u"
|
||||||
|
sql = f"select count(ts) from st where timediff(ts - {val}{uint}, ts1) = {usval} "
|
||||||
|
self.checkExpect(sql, expectVal)
|
||||||
|
|
||||||
|
# init
|
||||||
|
def init(self, conn, logSql, replicaVar=1):
|
||||||
|
seed = time.clock_gettime(time.CLOCK_REALTIME)
|
||||||
|
random.seed(seed)
|
||||||
|
self.replicaVar = int(replicaVar)
|
||||||
|
tdLog.debug(f"start to excute {__file__}")
|
||||||
|
tdSql.init(conn.cursor(), True)
|
||||||
|
|
||||||
|
# where
|
||||||
|
def checkWhere(self):
|
||||||
|
cnt = 300
|
||||||
|
start = self.ts - cnt
|
||||||
|
sql = f"select count(ts) from st where ts >= {start} and ts <= {self.ts}"
|
||||||
|
self.checkExpect(sql, cnt)
|
||||||
|
|
||||||
|
for i in range(50):
|
||||||
|
cnt = random.randint(1,40000)
|
||||||
|
base = 2000
|
||||||
|
start = self.ts - cnt - base
|
||||||
|
end = self.ts - base
|
||||||
|
sql = f"select count(ts) from st where ts >= {start} and ts < {end}"
|
||||||
|
self.checkExpect(sql, cnt)
|
||||||
|
|
||||||
|
# stream
|
||||||
|
def checkStream(self):
|
||||||
|
allRows = self.childCnt * self.childRow
|
||||||
|
# ensure write data is expected
|
||||||
|
sql = "select count(*) from (select diff(ts) as a from (select ts from st order by ts asc)) where a=1;"
|
||||||
|
self.checkExpect(sql, allRows - 1)
|
||||||
|
|
||||||
|
# stream count is ok
|
||||||
|
sql =f"select count(*) from sta"
|
||||||
|
cnt = int(allRows / 100) - 1 # last window is not close, so need reduce one
|
||||||
|
self.checkExpect(sql, cnt)
|
||||||
|
|
||||||
|
# check fields
|
||||||
|
sql =f"select count(*) from sta where `count(ts)` != 100"
|
||||||
|
self.checkExpect(sql, 0)
|
||||||
|
|
||||||
|
# check timestamp
|
||||||
|
sql =f"select count(*) from (select diff(`_wstart`) from sta)"
|
||||||
|
self.checkExpect(sql, cnt - 1)
|
||||||
|
sql =f"select count(*) from (select diff(`_wstart`) as a from sta) where a != 100"
|
||||||
|
self.checkExpect(sql, 0)
|
||||||
|
|
||||||
|
# run
|
||||||
|
def run(self):
|
||||||
|
# prepare env
|
||||||
|
self.prepareEnv()
|
||||||
|
|
||||||
|
# time macro like 1w 1d 1h 1m 1s 1a 1u
|
||||||
|
self.checkTimeMacro()
|
||||||
|
|
||||||
|
# check where
|
||||||
|
self.checkWhere()
|
||||||
|
|
||||||
|
# check stream
|
||||||
|
self.checkStream()
|
||||||
|
|
||||||
|
# stop
|
||||||
|
def stop(self):
|
||||||
|
tdSql.close()
|
||||||
|
tdLog.success(f"{__file__} successfully executed")
|
||||||
|
|
||||||
|
|
||||||
|
tdCases.addLinux(__file__, TDTestCase())
|
||||||
|
tdCases.addWindows(__file__, TDTestCase())
|
File diff suppressed because it is too large
Load Diff
|
@ -121,20 +121,20 @@ ELSE ()
|
||||||
BUILD_COMMAND
|
BUILD_COMMAND
|
||||||
COMMAND set CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client
|
COMMAND set CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client
|
||||||
COMMAND set CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib
|
COMMAND set CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib
|
||||||
COMMAND go build -a -o taosadapter.exe -ldflags "-X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}"
|
# COMMAND go build -a -o taosadapter.exe -ldflags "-X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}"
|
||||||
# COMMAND go build -a -o taosadapter.exe -ldflags "-s -w -X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}"
|
COMMAND go build -a -o taosadapter.exe -ldflags "-s -w -X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}"
|
||||||
# COMMAND go build -a -o taosadapter-debug.exe -ldflags "-X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}"
|
COMMAND go build -a -o taosadapter-debug.exe -ldflags "-X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}"
|
||||||
|
|
||||||
INSTALL_COMMAND
|
INSTALL_COMMAND
|
||||||
# COMMAND cmake -E echo "Comparessing taosadapter.exe"
|
COMMAND cmake -E echo "Comparessing taosadapter.exe"
|
||||||
# COMMAND cmake -E time upx taosadapter.exe
|
COMMAND cmake -E time upx taosadapter.exe
|
||||||
COMMAND cmake -E echo "Copy taosadapter.exe"
|
COMMAND cmake -E echo "Copy taosadapter.exe"
|
||||||
COMMAND cmake -E copy taosadapter.exe ${CMAKE_BINARY_DIR}/build/bin/taosadapter.exe
|
COMMAND cmake -E copy taosadapter.exe ${CMAKE_BINARY_DIR}/build/bin/taosadapter.exe
|
||||||
COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/test/cfg/
|
COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/test/cfg/
|
||||||
COMMAND cmake -E echo "Copy taosadapter.toml"
|
COMMAND cmake -E echo "Copy taosadapter.toml"
|
||||||
COMMAND cmake -E copy ./example/config/taosadapter.toml ${CMAKE_BINARY_DIR}/test/cfg/
|
COMMAND cmake -E copy ./example/config/taosadapter.toml ${CMAKE_BINARY_DIR}/test/cfg/
|
||||||
# COMMAND cmake -E echo "Copy taosadapter-debug.exe"
|
COMMAND cmake -E echo "Copy taosadapter-debug.exe"
|
||||||
# COMMAND cmake -E copy taosadapter-debug.exe ${CMAKE_BINARY_DIR}/build/bin
|
COMMAND cmake -E copy taosadapter-debug.exe ${CMAKE_BINARY_DIR}/build/bin
|
||||||
)
|
)
|
||||||
ELSE (TD_WINDOWS)
|
ELSE (TD_WINDOWS)
|
||||||
MESSAGE("Building taosAdapter on non-Windows")
|
MESSAGE("Building taosAdapter on non-Windows")
|
||||||
|
@ -149,20 +149,20 @@ ELSE ()
|
||||||
PATCH_COMMAND
|
PATCH_COMMAND
|
||||||
COMMAND git clean -f -d
|
COMMAND git clean -f -d
|
||||||
BUILD_COMMAND
|
BUILD_COMMAND
|
||||||
COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -ldflags "-X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}"
|
# COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -ldflags "-X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}"
|
||||||
# COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -ldflags "-s -w -X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}"
|
COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -ldflags "-s -w -X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}"
|
||||||
# COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -o taosadapter-debug -ldflags "-X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}"
|
COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -o taosadapter-debug -ldflags "-X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}"
|
||||||
INSTALL_COMMAND
|
INSTALL_COMMAND
|
||||||
# COMMAND cmake -E echo "Comparessing taosadapter.exe"
|
COMMAND cmake -E echo "Comparessing taosadapter.exe"
|
||||||
# COMMAND upx taosadapter || :
|
COMMAND upx taosadapter || :
|
||||||
COMMAND cmake -E echo "Copy taosadapter"
|
COMMAND cmake -E echo "Copy taosadapter"
|
||||||
COMMAND cmake -E copy taosadapter ${CMAKE_BINARY_DIR}/build/bin
|
COMMAND cmake -E copy taosadapter ${CMAKE_BINARY_DIR}/build/bin
|
||||||
COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/test/cfg/
|
COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/test/cfg/
|
||||||
COMMAND cmake -E echo "Copy taosadapter.toml"
|
COMMAND cmake -E echo "Copy taosadapter.toml"
|
||||||
COMMAND cmake -E copy ./example/config/taosadapter.toml ${CMAKE_BINARY_DIR}/test/cfg/
|
COMMAND cmake -E copy ./example/config/taosadapter.toml ${CMAKE_BINARY_DIR}/test/cfg/
|
||||||
COMMAND cmake -E copy ./taosadapter.service ${CMAKE_BINARY_DIR}/test/cfg/
|
COMMAND cmake -E copy ./taosadapter.service ${CMAKE_BINARY_DIR}/test/cfg/
|
||||||
# COMMAND cmake -E echo "Copy taosadapter-debug"
|
COMMAND cmake -E echo "Copy taosadapter-debug"
|
||||||
# COMMAND cmake -E copy taosadapter-debug ${CMAKE_BINARY_DIR}/build/bin
|
COMMAND cmake -E copy taosadapter-debug ${CMAKE_BINARY_DIR}/build/bin
|
||||||
)
|
)
|
||||||
ENDIF (TD_WINDOWS)
|
ENDIF (TD_WINDOWS)
|
||||||
ENDIF ()
|
ENDIF ()
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue