other: merge other branch.

This commit is contained in:
Haojun Liao 2023-06-09 09:41:10 +08:00
commit 53246ed6bb
105 changed files with 4207 additions and 2974 deletions

View File

@ -2,7 +2,7 @@
IF (DEFINED VERNUMBER) IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER}) SET(TD_VER_NUMBER ${VERNUMBER})
ELSE () ELSE ()
SET(TD_VER_NUMBER "3.0.4.3") SET(TD_VER_NUMBER "3.0.5.0")
ENDIF () ENDIF ()
IF (DEFINED VERCOMPATIBLE) IF (DEFINED VERCOMPATIBLE)

View File

@ -274,7 +274,7 @@ if(${BUILD_WITH_ROCKSDB})
option(WITH_TOOLS "" OFF) option(WITH_TOOLS "" OFF)
option(WITH_LIBURING "" OFF) option(WITH_LIBURING "" OFF)
IF (TD_LINUX) IF (TD_LINUX)
option(ROCKSDB_BUILD_SHARED "Build shared versions of the RocksDB libraries" ON) option(ROCKSDB_BUILD_SHARED "Build shared versions of the RocksDB libraries" OFF)
ELSE() ELSE()
option(ROCKSDB_BUILD_SHARED "Build shared versions of the RocksDB libraries" OFF) option(ROCKSDB_BUILD_SHARED "Build shared versions of the RocksDB libraries" OFF)
ENDIF() ENDIF()

View File

@ -45,7 +45,7 @@ In TDengine, the data types below can be used when specifying a column or tag.
:::note :::note
- Only ASCII visible characters are suggested to be used in a column or tag of BINARY type. Multi-byte characters must be stored in NCHAR type. - Only ASCII visible characters are suggested to be used in a column or tag of BINARY type. Multi-byte characters must be stored in NCHAR type.
- The length of BINARY can be up to 16,374 bytes. The string value must be quoted with single quotes. You must specify a length in bytes for a BINARY value, for example binary(20) for up to twenty single-byte characters. If the data exceeds the specified length, an error will occur. The literal single quote inside the string must be preceded with back slash like `\'` - The length of BINARY can be up to 16,374(data column is 65,517 and tag column is 16,382 since version 3.0.5.0) bytes. The string value must be quoted with single quotes. You must specify a length in bytes for a BINARY value, for example binary(20) for up to twenty single-byte characters. If the data exceeds the specified length, an error will occur. The literal single quote inside the string must be preceded with back slash like `\'`
- Numeric values in SQL statements will be determined as integer or float type according to whether there is decimal point or whether scientific notation is used, so attention must be paid to avoid overflow. For example, 9999999999999999999 will be considered as overflow because it exceeds the upper limit of long integer, but 9999999999999999999.0 will be considered as a legal float number. - Numeric values in SQL statements will be determined as integer or float type according to whether there is decimal point or whether scientific notation is used, so attention must be paid to avoid overflow. For example, 9999999999999999999 will be considered as overflow because it exceeds the upper limit of long integer, but 9999999999999999999.0 will be considered as a legal float number.
::: :::

View File

@ -45,7 +45,7 @@ table_option: {
1. The first column of a table MUST be of type TIMESTAMP. It is automatically set as the primary key. 1. The first column of a table MUST be of type TIMESTAMP. It is automatically set as the primary key.
2. The maximum length of the table name is 192 bytes. 2. The maximum length of the table name is 192 bytes.
3. The maximum length of each row is 48k bytes, please note that the extra 2 bytes used by each BINARY/NCHAR column are also counted. 3. The maximum length of each row is 48k(64k since version 3.0.5.0) bytes, please note that the extra 2 bytes used by each BINARY/NCHAR column are also counted.
4. The name of the subtable can only consist of characters from the English alphabet, digits and underscore. Table names can't start with a digit. Table names are case insensitive. 4. The name of the subtable can only consist of characters from the English alphabet, digits and underscore. Table names can't start with a digit. Table names are case insensitive.
5. The maximum length in bytes must be specified when using BINARY or NCHAR types. 5. The maximum length in bytes must be specified when using BINARY or NCHAR types.
6. Escape character "\`" can be used to avoid the conflict between table names and reserved keywords, above rules will be bypassed when using escape character on table names, but the upper limit for the name length is still valid. The table names specified using escape character are case sensitive. 6. Escape character "\`" can be used to avoid the conflict between table names and reserved keywords, above rules will be bypassed when using escape character on table names, but the upper limit for the name length is still valid. The table names specified using escape character are case sensitive.

View File

@ -55,7 +55,7 @@ window_clause: {
| INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] [WATERMARK(watermark_val)] [FILL(fill_mod_and_val)] | INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] [WATERMARK(watermark_val)] [FILL(fill_mod_and_val)]
interp_clause: interp_clause:
RANGE(ts_val, ts_val) EVERY(every_val) FILL(fill_mod_and_val) RANGE(ts_val [, ts_val]) EVERY(every_val) FILL(fill_mod_and_val)
partition_by_clause: partition_by_clause:
PARTITION BY expr [, expr] ... PARTITION BY expr [, expr] ...

View File

@ -889,9 +889,10 @@ ignore_null_values: {
- `INTERP` is used to get the value that matches the specified time slice from a column. If no such value exists an interpolation value will be returned based on `FILL` parameter. - `INTERP` is used to get the value that matches the specified time slice from a column. If no such value exists an interpolation value will be returned based on `FILL` parameter.
- The input data of `INTERP` is the value of the specified column and a `where` clause can be used to filter the original data. If no `where` condition is specified then all original data is the input. - The input data of `INTERP` is the value of the specified column and a `where` clause can be used to filter the original data. If no `where` condition is specified then all original data is the input.
- `INTERP` must be used along with `RANGE`, `EVERY`, `FILL` keywords. - `INTERP` must be used along with `RANGE`, `EVERY`, `FILL` keywords.
- The output time range of `INTERP` is specified by `RANGE(timestamp1,timestamp2)` parameter, with timestamp1 <= timestamp2. timestamp1 is the starting point of the output time range and must be specified. timestamp2 is the ending point of the output time range and must be specified. - The output time range of `INTERP` is specified by `RANGE(timestamp1,timestamp2)` parameter, with timestamp1 <= timestamp2. timestamp1 is the starting point of the output time range. timestamp2 is the ending point of the output time range.
- The number of rows in the result set of `INTERP` is determined by the parameter `EVERY(time_unit)`. Starting from timestamp1, one interpolation is performed for every time interval specified `time_unit` parameter. The parameter `time_unit` must be an integer, with no quotes, with a time unit of: a(millisecond)), s(second), m(minute), h(hour), d(day), or w(week). For example, `EVERY(500a)` will interpolate every 500 milliseconds. - The number of rows in the result set of `INTERP` is determined by the parameter `EVERY(time_unit)`. Starting from timestamp1, one interpolation is performed for every time interval specified `time_unit` parameter. The parameter `time_unit` must be an integer, with no quotes, with a time unit of: a(millisecond)), s(second), m(minute), h(hour), d(day), or w(week). For example, `EVERY(500a)` will interpolate every 500 milliseconds.
- Interpolation is performed based on `FILL` parameter. For more information about FILL clause, see [FILL Clause](../distinguished/#fill-clause). - Interpolation is performed based on `FILL` parameter. For more information about FILL clause, see [FILL Clause](../distinguished/#fill-clause).
- When only one timestamp value is specified in `RANGE` clause, `INTERP` is used to generate interpolation at this point in time. In this case, `EVERY` clause can be omitted. For example, SELECT INTERP(col) FROM tb RANGE('2023-01-01 00:00:00') FILL(linear).
- `INTERP` can be applied to supertable by interpolating primary key sorted data of all its childtables. It can also be used with `partition by tbname` when applied to supertable to generate interpolation on each single timeline. - `INTERP` can be applied to supertable by interpolating primary key sorted data of all its childtables. It can also be used with `partition by tbname` when applied to supertable to generate interpolation on each single timeline.
- Pseudocolumn `_irowts` can be used along with `INTERP` to return the timestamps associated with interpolation points(support after version 3.0.2.0). - Pseudocolumn `_irowts` can be used along with `INTERP` to return the timestamps associated with interpolation points(support after version 3.0.2.0).
- Pseudocolumn `_isfilled` can be used along with `INTERP` to indicate whether the results are original records or data points generated by interpolation algorithm(support after version 3.0.3.0). - Pseudocolumn `_isfilled` can be used along with `INTERP` to indicate whether the results are original records or data points generated by interpolation algorithm(support after version 3.0.3.0).
@ -902,7 +903,7 @@ ignore_null_values: {
- We want to downsample every 1 hour and use a linear fill for missing values. Note the order in which the "partition by" clause and the "range", "every" and "fill" parameters are used. - We want to downsample every 1 hour and use a linear fill for missing values. Note the order in which the "partition by" clause and the "range", "every" and "fill" parameters are used.
```sql ```sql
SELECT _irowts,INTERP(current) FROM test.meters PARTITION BY TBNAME RANGE('2017-07-22 00:00:00','2017-07-24 12:25:00') EVERY(1h) FILL(LINEAR) SELECT _irowts,INTERP(current) FROM test.meters PARTITION BY TBNAME RANGE('2017-07-22 00:00:00','2017-07-24 12:25:00') EVERY(1h) FILL(LINEAR)
``` ```
### LAST ### LAST

View File

@ -26,7 +26,7 @@ The following characters cannot occur in a password: single quotation marks ('),
- Maximum length of database name is 64 bytes - Maximum length of database name is 64 bytes
- Maximum length of table name is 192 bytes, excluding the database name prefix and the separator. - Maximum length of table name is 192 bytes, excluding the database name prefix and the separator.
- Maximum length of each data row is 48K bytes. Note that the upper limit includes the extra 2 bytes consumed by each column of BINARY/NCHAR type. - Maximum length of each data row is 48K(64K since version 3.0.5.0) bytes. Note that the upper limit includes the extra 2 bytes consumed by each column of BINARY/NCHAR type.
- The maximum length of a column name is 64 bytes. - The maximum length of a column name is 64 bytes.
- Maximum number of columns is 4096. There must be at least 2 columns, and the first column must be timestamp. - Maximum number of columns is 4096. There must be at least 2 columns, and the first column must be timestamp.
- The maximum length of a tag name is 64 bytes - The maximum length of a tag name is 64 bytes

View File

@ -32,25 +32,22 @@ TDengine's JDBC driver implementation is as consistent as possible with the rela
Native connections are supported on the same platforms as the TDengine client driver. Native connections are supported on the same platforms as the TDengine client driver.
REST connection supports all platforms that can run Java. REST connection supports all platforms that can run Java.
## Version support
Please refer to [version support list](/reference/connector#version-support)
## Recent update logs ## Recent update logs
| taos-jdbcdriver version | major changes | | taos-jdbcdriver version | major changes | TDengine version |
| :---------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------: | | :---------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------: | :--------------: |
| 3.2.1 | JDBC REST connection supports schemaless/prepareStatement over WebSocket | | 3.2.1 | subscription add seek function | 3.0.5.0 or later |
| 3.2.0 | This version has been deprecated | | 3.2.1 | JDBC REST connection supports schemaless/prepareStatement over WebSocket | 3.0.3.0 or later |
| 3.1.0 | JDBC REST connection supports subscription over WebSocket | | 3.2.0 | This version has been deprecated | - |
| 3.0.1 - 3.0.4 | fix the resultSet data is parsed incorrectly sometimes. 3.0.1 is compiled on JDK 11, you are advised to use other version in the JDK 8 environment | | 3.1.0 | JDBC REST connection supports subscription over WebSocket | - |
| 3.0.0 | Support for TDengine 3.0 | | 3.0.1 - 3.0.4 | fix the resultSet data is parsed incorrectly sometimes. 3.0.1 is compiled on JDK 11, you are advised to use other version in the JDK 8 environment | - |
| 2.0.42 | fix wasNull interface return value in WebSocket connection | | 3.0.0 | Support for TDengine 3.0 | 3.0.0.0 or later |
| 2.0.41 | fix decode method of username and password in REST connection | | 2.0.42 | fix wasNull interface return value in WebSocket connection | - |
| 2.0.39 - 2.0.40 | Add REST connection/request timeout parameters | | 2.0.41 | fix decode method of username and password in REST connection | - |
| 2.0.38 | JDBC REST connections add bulk pull function | | 2.0.39 - 2.0.40 | Add REST connection/request timeout parameters | - |
| 2.0.37 | Support json tags | | 2.0.38 | JDBC REST connections add bulk pull function | - |
| 2.0.36 | Support schemaless writing | | 2.0.37 | Support json tags | - |
| 2.0.36 | Support schemaless writing | - |
**Note**: adding `batchfetch` to the REST connection and setting it to true will enable the WebSocket connection. **Note**: adding `batchfetch` to the REST connection and setting it to true will enable the WebSocket connection.
@ -102,6 +99,8 @@ For specific error codes, please refer to.
| 0x2319 | user is required | The user name information is missing when creating the connection | | 0x2319 | user is required | The user name information is missing when creating the connection |
| 0x231a | password is required | Password information is missing when creating a connection | | 0x231a | password is required | Password information is missing when creating a connection |
| 0x231c | httpEntity is null, sql: | Execution exception occurred during the REST connection | | 0x231c | httpEntity is null, sql: | Execution exception occurred during the REST connection |
| 0x231d | can't create connection with server within | Increase the connection time by adding the httpConnectTimeout parameter, or check the connection to the taos adapter. |
| 0x231e | failed to complete the task within the specified time | Increase the execution time by adding the messageWaitTimeout parameter, or check the connection to the taos adapter. |
| 0x2350 | unknown error | Unknown exception, please return to the developer on github. | | 0x2350 | unknown error | Unknown exception, please return to the developer on github. |
| 0x2352 | Unsupported encoding | An unsupported character encoding set is specified under the native Connection. | | 0x2352 | Unsupported encoding | An unsupported character encoding set is specified under the native Connection. |
| 0x2353 | internal error of database, please see taoslog for more details | An error occurs when the prepare statement is executed on the native connection. Check the taos log to locate the fault. | | 0x2353 | internal error of database, please see taoslog for more details | An error occurs when the prepare statement is executed on the native connection. Check the taos log to locate the fault. |
@ -117,8 +116,8 @@ For specific error codes, please refer to.
| 0x2376 | failed to set consumer topic, topic name is empty | During data subscription creation, the subscription topic name is empty. Check that the specified topic name is correct. | | 0x2376 | failed to set consumer topic, topic name is empty | During data subscription creation, the subscription topic name is empty. Check that the specified topic name is correct. |
| 0x2377 | consumer reference has been destroyed | The subscription data transfer channel has been closed. Please check the connection to TDengine. | | 0x2377 | consumer reference has been destroyed | The subscription data transfer channel has been closed. Please check the connection to TDengine. |
| 0x2378 | consumer create error | Failed to create a data subscription. Check the taos log according to the error message to locate the fault. | | 0x2378 | consumer create error | Failed to create a data subscription. Check the taos log according to the error message to locate the fault. |
| - | can't create connection with server within | Increase the connection time by adding the httpConnectTimeout parameter, or check the connection to the taos adapter. | | 0x2379 | seek offset must not be a negative number | The seek interface parameter cannot be negative. Use the correct parameter |
| - | failed to complete the task within the specified time | Increase the execution time by adding the messageWaitTimeout parameter, or check the connection to the taos adapter. | | 0x237a | vGroup not found in result set | subscription is not bound to the VGroup due to the rebalance mechanism |
- [TDengine Java Connector](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java) - [TDengine Java Connector](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java)
<!-- - [TDengine_ERROR_CODE](../error-code) --> <!-- - [TDengine_ERROR_CODE](../error-code) -->
@ -169,7 +168,7 @@ Add following dependency in the `pom.xml` file of your Maven project:
<dependency> <dependency>
<groupId>com.taosdata.jdbc</groupId> <groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId> <artifactId>taos-jdbcdriver</artifactId>
<version>3.2.1</version> <version>3.2.2</version>
</dependency> </dependency>
``` ```
@ -913,14 +912,15 @@ public class SchemalessWsTest {
public static void main(String[] args) throws SQLException { public static void main(String[] args) throws SQLException {
final String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata&batchfetch=true"; final String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata&batchfetch=true";
Connection connection = DriverManager.getConnection(url); try(Connection connection = DriverManager.getConnection(url)){
init(connection); init(connection);
SchemalessWriter writer = new SchemalessWriter(connection, "test_ws_schemaless"); try(SchemalessWriter writer = new SchemalessWriter(connection, "test_ws_schemaless")){
writer.write(lineDemo, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO_SECONDS); writer.write(lineDemo, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO_SECONDS);
writer.write(telnetDemo, SchemalessProtocolType.TELNET, SchemalessTimestampType.MILLI_SECONDS); writer.write(telnetDemo, SchemalessProtocolType.TELNET, SchemalessTimestampType.MILLI_SECONDS);
writer.write(jsonDemo, SchemalessProtocolType.JSON, SchemalessTimestampType.SECONDS); writer.write(jsonDemo, SchemalessProtocolType.JSON, SchemalessTimestampType.SECONDS);
System.exit(0); }
}
} }
private static void init(Connection connection) throws SQLException { private static void init(Connection connection) throws SQLException {
@ -991,6 +991,17 @@ while(true) {
`poll` obtains one message each time it is run. `poll` obtains one message each time it is run.
#### Assignment subscription Offset
```
long position(TopicPartition partition) throws SQLException;
Map<TopicPartition, Long> position(String topic) throws SQLException;
Map<TopicPartition, Long> beginningOffsets(String topic) throws SQLException;
Map<TopicPartition, Long> endOffsets(String topic) throws SQLException;
void seek(TopicPartition partition, long offset) throws SQLException;
```
#### Close subscriptions #### Close subscriptions
```java ```java
@ -1256,6 +1267,7 @@ The source code of the sample application is under `TDengine/examples/JDBC`:
- connectionPools: using taos-jdbcdriver in connection pools such as HikariCP, Druid, dbcp, c3p0, etc. - connectionPools: using taos-jdbcdriver in connection pools such as HikariCP, Druid, dbcp, c3p0, etc.
- SpringJdbcTemplate: using taos-jdbcdriver in Spring JdbcTemplate. - SpringJdbcTemplate: using taos-jdbcdriver in Spring JdbcTemplate.
- mybatisplus-demo: using taos-jdbcdriver in Springboot + Mybatis. - mybatisplus-demo: using taos-jdbcdriver in Springboot + Mybatis.
- consumer-demo: consumer TDengine data example, the consumption rate can be controlled by parameters.
[JDBC example](https://github.com/taosdata/TDengine/tree/3.0/examples/JDBC) [JDBC example](https://github.com/taosdata/TDengine/tree/3.0/examples/JDBC)

View File

@ -29,7 +29,7 @@ REST connections are supported on all platforms that can run Go.
## Version support ## Version support
Please refer to [version support list](/reference/connector#version-support) Please refer to [version support list](https://github.com/taosdata/driver-go#remind)
## Supported features ## Supported features
@ -379,6 +379,15 @@ Note: `tmq.TopicPartition` is reserved for compatibility purpose
Commit information. Commit information.
* `func (c *Consumer) Assignment() (partitions []tmq.TopicPartition, err error)`
Get Assignment(TDengine >= 3.0.5.0 and driver-go >= v3.5.0 are required).
* `func (c *Consumer) Seek(partition tmq.TopicPartition, ignoredTimeoutMs int) error`
Note: `ignoredTimeoutMs` is reserved for compatibility purpose
Seek offset(TDengine >= 3.0.5.0 and driver-go >= v3.5.0 are required).
* `func (c *Consumer) Unsubscribe() error` * `func (c *Consumer) Unsubscribe() error`
Unsubscribe. Unsubscribe.
@ -468,6 +477,15 @@ Note: `tmq.TopicPartition` is reserved for compatibility purpose
Commit information. Commit information.
* `func (c *Consumer) Assignment() (partitions []tmq.TopicPartition, err error)`
Get Assignment(TDengine >= 3.0.5.0 and driver-go >= v3.5.0 are required).
* `func (c *Consumer) Seek(partition tmq.TopicPartition, ignoredTimeoutMs int) error`
Note: `ignoredTimeoutMs` is reserved for compatibility purpose
Seek offset(TDengine >= 3.0.5.0 and driver-go >= v3.5.0 are required).
* `func (c *Consumer) Unsubscribe() error` * `func (c *Consumer) Unsubscribe() error`
Unsubscribe. Unsubscribe.
@ -476,7 +494,7 @@ Unsubscribe.
Close consumer. Close consumer.
For a complete example see [GitHub sample file](https://github.com/taosdata/driver-go/blob/3.0/examples/tmqoverws/main.go) For a complete example see [GitHub sample file](https://github.com/taosdata/driver-go/blob/main/examples/tmqoverws/main.go)
### parameter binding via WebSocket ### parameter binding via WebSocket
@ -524,7 +542,7 @@ For a complete example see [GitHub sample file](https://github.com/taosdata/driv
Closes the parameter binding. Closes the parameter binding.
For a complete example see [GitHub sample file](https://github.com/taosdata/driver-go/blob/3.0/examples/stmtoverws/main.go) For a complete example see [GitHub sample file](https://github.com/taosdata/driver-go/blob/main/examples/stmtoverws/main.go)
## API Reference ## API Reference

View File

@ -27,9 +27,14 @@ The source code for the Rust connectors is located on [GitHub](https://github.co
Native connections are supported on the same platforms as the TDengine client driver. Native connections are supported on the same platforms as the TDengine client driver.
Websocket connections are supported on all platforms that can run Go. Websocket connections are supported on all platforms that can run Go.
## Version support ## Version history
Please refer to [version support list](/reference/connector#version-support) | connector-rust version | TDengine version | major features |
| :----------------: | :--------------: | :--------------------------------------------------: |
| v0.8.8 | 3.0.5.0 or later | TMQ: Get consuming progress and seek offset to consume. |
| v0.8.0 | 3.0.4.0 | Support schemaless insert. |
| v0.7.6 | 3.0.3.0 | Support req_id in query. |
| v0.6.0 | 3.0.0.0 | Base features. |
The Rust Connector is still under rapid development and is not guaranteed to be backward compatible before 1.0. We recommend using TDengine version 3.0 or higher to avoid known issues. The Rust Connector is still under rapid development and is not guaranteed to be backward compatible before 1.0. We recommend using TDengine version 3.0 or higher to avoid known issues.
@ -499,6 +504,22 @@ The TMQ is of [futures::Stream](https://docs.rs/futures/latest/futures/stream/in
} }
``` ```
Get assignments
Version requirements connector-rust >= v0.8.8, TDengine >= 3.0.5.0
```rust
let assignments = consumer.assignments().await.unwrap();
```
Seek offset
Version requirements connector-rust >= v0.8.8, TDengine >= 3.0.5.0
```rust
consumer.offset_seek(topic, vgroup_id, offset).await;
```
Unsubscribe: Unsubscribe:
```rust ```rust
@ -513,7 +534,7 @@ The following parameters can be configured for the TMQ DSN. Only `group.id` is m
- `enable.auto.commit`: Automatically commits. This can be enabled when data consistency is not essential. - `enable.auto.commit`: Automatically commits. This can be enabled when data consistency is not essential.
- `auto.commit.interval.ms`: Interval for automatic commits. - `auto.commit.interval.ms`: Interval for automatic commits.
For more information, see [GitHub sample file](https://github.com/taosdata/taos-connector-rust/blob/main/examples/subscribe.rs). For more information, see [GitHub sample file](https://github.com/taosdata/TDengine/blob/3.0/docs/examples/rust/nativeexample/examples/subscribe_demo.rs).
For information about other structure APIs, see the [Rust documentation](https://docs.rs/taos). For information about other structure APIs, see the [Rust documentation](https://docs.rs/taos).

View File

@ -90,7 +90,7 @@ You can configure smlChildTableName in taos.cfg to specify table names, for exam
Note: TDengine 3.0.3.0 and later automatically detect whether order is consistent. This parameter is no longer used. Note: TDengine 3.0.3.0 and later automatically detect whether order is consistent. This parameter is no longer used.
:::tip :::tip
All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed 48 KB and the total length of a tag value cannot exceed 16 KB. See [TDengine SQL Boundary Limits](/taos-sql/limit) for specific constraints in this area. All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed 48 KB(64 KB since version 3.0.5.0) and the total length of a tag value cannot exceed 16 KB. See [TDengine SQL Boundary Limits](/taos-sql/limit) for specific constraints in this area.
::: :::
## Time resolution recognition ## Time resolution recognition

View File

@ -16,165 +16,79 @@ TDengine Source Connector is used to read data from TDengine in real-time and se
![TDengine Database Kafka Connector -- streaming integration with kafka connect](kafka/streaming-integration-with-kafka-connect.webp) ![TDengine Database Kafka Connector -- streaming integration with kafka connect](kafka/streaming-integration-with-kafka-connect.webp)
## What is Confluent?
[Confluent](https://www.confluent.io/) adds many extensions to Kafka. include:
1. Schema Registry
2. REST Proxy
3. Non-Java Clients
4. Many packaged Kafka Connect plugins
5. GUI for managing and monitoring Kafka - Confluent Control Center
Some of these extensions are available in the community version of Confluent. Some are only available in the enterprise version.
![TDengine Database Kafka Connector -- Confluent platform](kafka/confluentPlatform.webp)
Confluent Enterprise Edition provides the `confluent` command-line tool to manage various components.
## Prerequisites ## Prerequisites
1. Linux operating system 1. Linux operating system
2. Java 8 and Maven installed 2. Java 8 and Maven installed
3. Git is installed 3. Git/curl/vi is installed
4. TDengine is installed and started. If not, please refer to [Installation and Uninstallation](/operation/pkg-install) 4. TDengine is installed and started. If not, please refer to [Installation and Uninstallation](/operation/pkg-install)
## Install Confluent ## Install Kafka
Confluent provides two installation methods: Docker and binary packages. This article only introduces binary package installation.
Execute in any directory: Execute in any directory:
```` ````
curl -O http://packages.confluent.io/archive/7.1/confluent-7.1.1.tar.gz curl -O https://downloads.apache.org/kafka/3.4.0/kafka_2.13-3.4.0.tgz
tar xzf confluent-7.1.1.tar.gz -C /opt/ tar xzf kafka_2.13-3.4.0.tgz -C /opt/
ln -s /opt/kafka_2.13-3.4.0 /opt/kafka
```` ````
Then you need to add the `$CONFLUENT_HOME/bin` directory to the PATH. Then you need to add the `$KAFKA_HOME/bin` directory to the PATH.
```title=".profile" ```title=".profile"
export CONFLUENT_HOME=/opt/confluent-7.1.1 export KAFKA_HOME=/opt/kafka
export PATH=$CONFLUENT_HOME/bin:$PATH export PATH=$PATH:$KAFKA_HOME/bin
``` ```
Users can append the above script to the current user's profile file (~/.profile or ~/.bash_profile) Users can append the above script to the current user's profile file (~/.profile or ~/.bash_profile)
After the installation is complete, you can enter `confluent version` for simple verification:
```
# confluent version
confluent - Confluent CLI
Version: v2.6.1
Git Ref: 6d920590
Build Date: 2022-02-18T06:14:21Z
Go Version: go1.17.6 (linux/amd64)
Development: false
```
## Install TDengine Connector plugin ## Install TDengine Connector plugin
### Install from source code ### Install from source code
``` ```shell
git clone --branch 3.0 https://github.com/taosdata/kafka-connect-tdengine.git git clone --branch 3.0 https://github.com/taosdata/kafka-connect-tdengine.git
cd kafka-connect-tdengine cd kafka-connect-tdengine
mvn clean package mvn clean package -Dmaven.test.skip=true
unzip -d $CONFLUENT_HOME/share/java/ target/components/packages/taosdata-kafka-connect-tdengine-*.zip unzip -d $KAFKA_HOME/components/ target/components/packages/taosdata-kafka-connect-tdengine-*.zip
``` ```
The above script first clones the project source code and then compiles and packages it with Maven. After the package is complete, the zip package of the plugin is generated in the `target/components/packages/` directory. Unzip this zip package to plugin path. We used `$CONFLUENT_HOME/share/java/` above because it's a build in plugin path. The above script first clones the project source code and then compiles and packages it with Maven. After the package is complete, the zip package of the plugin is generated in the `target/components/packages/` directory. Unzip this zip package to plugin path. We used `$KAFKA_HOME/components/` above because it's a build in plugin path.
### Install with confluent-hub ### Add configuration file
[Confluent Hub](https://www.confluent.io/hub) provides a service to download Kafka Connect plugins. After TDengine Kafka Connector is published to Confluent Hub, it can be installed using the command tool `confluent-hub`. add kafka-connect-tdengine plugin path to `plugin.path` in `$KAFKA_HOME/config/connect-distributed.properties`.
**TDengine Kafka Connector is currently not officially released and cannot be installed in this way**.
## Start Confluent ```properties
plugin.path=/usr/share/java,/opt/kafka/components
```
confluent local services start
``` ```
:::note ## Start Kafka Services
Be sure to install the plugin before starting Confluent. Otherwise, Kafka Connect will fail to discover the plugins.
:::
:::tip Use command bellow to start all services:
If a component fails to start, try clearing the data and restarting. The data directory will be printed to the console at startup, e.g.:
```title="Console output log" {1} ```shell
Using CONFLUENT_CURRENT: /tmp/confluent.106668 zookeeper-server-start.sh -daemon $KAFKA_HOME/config/zookeeper.properties
Starting ZooKeeper
ZooKeeper is [UP]
Starting Kafka
Kafka is [UP]
Starting Schema Registry
Schema Registry is [UP]
Starting Kafka REST
Kafka REST is [UP]
Starting Connect
Connect is [UP]
Starting ksqlDB Server
ksqlDB Server is [UP]
Starting Control Center
Control Center is [UP]
```
To clear data, execute `rm -rf /tmp/confluent.106668`. kafka-server-start.sh -daemon $KAFKA_HOME/config/server.properties
:::
### Check Confluent Services Status connect-distributed.sh -daemon $KAFKA_HOME/config/connect-distributed.properties
Use command bellow to check the status of all service:
```
confluent local services status
```
The expected output is:
```
Connect is [UP]
Control Center is [UP]
Kafka is [UP]
Kafka REST is [UP]
ksqlDB Server is [UP]
Schema Registry is [UP]
ZooKeeper is [UP]
``` ```
### Check Successfully Loaded Plugin ### Check Successfully Loaded Plugin
After Kafka Connect was completely started, you can use bellow command to check if our plugins are installed successfully: After Kafka Connect was completely started, you can use bellow command to check if our plugins are installed successfully:
```
confluent local services connect plugin list ```shell
curl http://localhost:8083/connectors
``` ```
The output should contains `TDengineSinkConnector` and `TDengineSourceConnector` as bellow: The output as bellow:
```txt
[]
``` ```
Available Connect Plugins:
[
{
"class": "com.taosdata.kafka.connect.sink.TDengineSinkConnector",
"type": "sink",
"version": "1.0.0"
},
{
"class": "com.taosdata.kafka.connect.source.TDengineSourceConnector",
"type": "source",
"version": "1.0.0"
},
......
```
If not, please check the log file of Kafka Connect. To view the log file path, please execute:
```
echo `cat /tmp/confluent.current`/connect/connect.stdout
```
It should produce a path like:`/tmp/confluent.104086/connect/connect.stdout`
Besides log file `connect.stdout` there is a file named `connect.properties`. At the end of this file you can see the effective `plugin.path` which is a series of paths joined by comma. If Kafka Connect not found our plugins, it's probably because the installed path is not included in `plugin.path`.
## The use of TDengine Sink Connector ## The use of TDengine Sink Connector
@ -184,40 +98,47 @@ TDengine Sink Connector internally uses TDengine [modeless write interface](/ref
The following example synchronizes the data of the topic meters to the target database power. The data format is the InfluxDB Line protocol format. The following example synchronizes the data of the topic meters to the target database power. The data format is the InfluxDB Line protocol format.
### Add configuration file ### Add Sink Connector configuration file
``` ```shell
mkdir ~/test mkdir ~/test
cd ~/test cd ~/test
vi sink-demo.properties vi sink-demo.json
``` ```
sink-demo.properties' content is following: sink-demo.json' content is following:
```ini title="sink-demo.properties" ```json title="sink-demo.json"
name=TDengineSinkConnector {
connector.class=com.taosdata.kafka.connect.sink.TDengineSinkConnector "name": "TDengineSinkConnector",
tasks.max=1 "config": {
topics=meters "connector.class":"com.taosdata.kafka.connect.sink.TDengineSinkConnector",
connection.url=jdbc:TAOS://127.0.0.1:6030 "tasks.max": "1",
connection.user=root "topics": "meters",
connection.password=taosdata "connection.url": "jdbc:TAOS://127.0.0.1:6030",
connection.database=power "connection.user": "root",
db.schemaless=line "connection.password": "taosdata",
data.precision=ns "connection.database": "power",
key.converter=org.apache.kafka.connect.storage.StringConverter "db.schemaless": "line",
value.converter=org.apache.kafka.connect.storage.StringConverter "data.precision": "ns",
"key.converter": "org.apache.kafka.connect.storage.StringConverter",
"value.converter": "org.apache.kafka.connect.storage.StringConverter",
"errors.tolerance": "all",
"errors.deadletterqueue.topic.name": "dead_letter_topic",
"errors.deadletterqueue.topic.replication.factor": 1
}
}
``` ```
Key configuration instructions: Key configuration instructions:
1. `topics=meters` and `connection.database=power` means to subscribe to the data of the topic meters and write to the database power. 1. `"topics": "meters"` and `"connection.database": "power"` means to subscribe to the data of the topic meters and write to the database power.
2. `db.schemaless=line` means the data in the InfluxDB Line protocol format. 2. `"db.schemaless": "line"` means the data in the InfluxDB Line protocol format.
### Create Connector instance ### Create Sink Connector instance
```` ````shell
confluent local services connect connector load TDengineSinkConnector --config ./sink-demo.properties curl -X POST -d @sink-demo.json http://localhost:8083/connectors -H "Content-Type: application/json"
```` ````
If the above command is executed successfully, the output is as follows: If the above command is executed successfully, the output is as follows:
@ -237,7 +158,10 @@ If the above command is executed successfully, the output is as follows:
"tasks.max": "1", "tasks.max": "1",
"topics": "meters", "topics": "meters",
"value.converter": "org.apache.kafka.connect.storage.StringConverter", "value.converter": "org.apache.kafka.connect.storage.StringConverter",
"name": "TDengineSinkConnector" "name": "TDengineSinkConnector",
"errors.tolerance": "all",
"errors.deadletterqueue.topic.name": "dead_letter_topic",
"errors.deadletterqueue.topic.replication.factor": "1",
}, },
"tasks": [], "tasks": [],
"type": "sink" "type": "sink"
@ -258,7 +182,7 @@ meters,location=California.LoSangeles,groupid=3 current=11.3,voltage=221,phase=0
Use kafka-console-producer to write test data to the topic `meters`. Use kafka-console-producer to write test data to the topic `meters`.
``` ```
cat test-data.txt | kafka-console-producer --broker-list localhost:9092 --topic meters cat test-data.txt | kafka-console-producer.sh --broker-list localhost:9092 --topic meters
``` ```
:::note :::note
@ -269,12 +193,12 @@ TDengine Sink Connector will automatically create the database if the target dat
Use the TDengine CLI to verify that the sync was successful. Use the TDengine CLI to verify that the sync was successful.
``` ```sql
taos> use power; taos> use power;
Database changed. Database changed.
taos> select * from meters; taos> select * from meters;
ts | current | voltage | phase | groupid | location | _ts | current | voltage | phase | groupid | location |
=============================================================================================================================================================== ===============================================================================================================================================================
2022-03-28 09:56:51.249000000 | 11.800000000 | 221.000000000 | 0.280000000 | 2 | California.LosAngeles | 2022-03-28 09:56:51.249000000 | 11.800000000 | 221.000000000 | 0.280000000 | 2 | California.LosAngeles |
2022-03-28 09:56:51.250000000 | 13.400000000 | 223.000000000 | 0.290000000 | 2 | California.LosAngeles | 2022-03-28 09:56:51.250000000 | 13.400000000 | 223.000000000 | 0.290000000 | 2 | California.LosAngeles |
@ -293,29 +217,34 @@ TDengine Source Connector will convert the data in TDengine data table into [Inf
The following sample program synchronizes the data in the database test to the topic tdengine-source-test. The following sample program synchronizes the data in the database test to the topic tdengine-source-test.
### Add configuration file ### Add Source Connector configuration file
``` ```shell
vi source-demo.properties vi source-demo.json
``` ```
Input following content: Input following content:
```ini title="source-demo.properties" ```json title="source-demo.json"
name=TDengineSourceConnector {
connector.class=com.taosdata.kafka.connect.source.TDengineSourceConnector "name":"TDengineSourceConnector",
tasks.max=1 "config":{
connection.url=jdbc:TAOS://127.0.0.1:6030 "connector.class": "com.taosdata.kafka.connect.source.TDengineSourceConnector",
connection.username=root "tasks.max": 1,
connection.password=taosdata "connection.url": "jdbc:TAOS://127.0.0.1:6030",
connection.database=test "connection.username": "root",
connection.attempts=3 "connection.password": "taosdata",
connection.backoff.ms=5000 "connection.database": "test",
topic.prefix=tdengine-source- "connection.attempts": 3,
poll.interval.ms=1000 "connection.backoff.ms": 5000,
fetch.max.rows=100 "topic.prefix": "tdengine-source",
key.converter=org.apache.kafka.connect.storage.StringConverter "poll.interval.ms": 1000,
value.converter=org.apache.kafka.connect.storage.StringConverter "fetch.max.rows": 100,
"topic.per.stable": true,
"key.converter": "org.apache.kafka.connect.storage.StringConverter",
"value.converter": "org.apache.kafka.connect.storage.StringConverter"
}
}
``` ```
### Prepare test data ### Prepare test data
@ -340,40 +269,40 @@ INSERT INTO d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES('2018-1
Use TDengine CLI to execute SQL script Use TDengine CLI to execute SQL script
``` ```shell
taos -f prepare-source-data.sql taos -f prepare-source-data.sql
``` ```
### Create Connector instance ### Create Connector instance
```` ```shell
confluent local services connect connector load TDengineSourceConnector --config source-demo.properties curl -X POST -d @source-demo.json http://localhost:8083/connectors -H "Content-Type: application/json"
```` ```
### View topic data ### View topic data
Use the kafka-console-consumer command-line tool to monitor data in the topic tdengine-source-test. In the beginning, all historical data will be output. After inserting two new data into TDengine, kafka-console-consumer immediately outputs the two new data. The output is in InfluxDB line protocol format. Use the kafka-console-consumer command-line tool to monitor data in the topic tdengine-source-test. In the beginning, all historical data will be output. After inserting two new data into TDengine, kafka-console-consumer immediately outputs the two new data. The output is in InfluxDB line protocol format.
```` ````shell
kafka-console-consumer --bootstrap-server localhost:9092 --from-beginning --topic tdengine-source-test kafka-console-consumer.sh --bootstrap-server localhost:9092 --from-beginning --topic tdengine-source-test-meters
```` ````
output: output:
```` ```txt
...... ......
meters,location="California.SanFrancisco",groupid=2i32 current=10.3f32,voltage=219i32,phase=0.31f32 1538548685000000000 meters,location="California.SanFrancisco",groupid=2i32 current=10.3f32,voltage=219i32,phase=0.31f32 1538548685000000000
meters,location="California.SanFrancisco",groupid=2i32 current=12.6f32,voltage=218i32,phase=0.33f32 1538548695000000000 meters,location="California.SanFrancisco",groupid=2i32 current=12.6f32,voltage=218i32,phase=0.33f32 1538548695000000000
...... ......
```` ```
All historical data is displayed. Switch to the TDengine CLI and insert two new pieces of data: All historical data is displayed. Switch to the TDengine CLI and insert two new pieces of data:
```` ```sql
USE test; USE test;
INSERT INTO d1001 VALUES (now, 13.3, 229, 0.38); INSERT INTO d1001 VALUES (now, 13.3, 229, 0.38);
INSERT INTO d1002 VALUES (now, 16.3, 233, 0.22); INSERT INTO d1002 VALUES (now, 16.3, 233, 0.22);
```` ```
Switch back to kafka-console-consumer, and the command line window has printed out the two pieces of data just inserted. Switch back to kafka-console-consumer, and the command line window has printed out the two pieces of data just inserted.
@ -383,16 +312,16 @@ After testing, use the unload command to stop the loaded connector.
View currently active connectors: View currently active connectors:
```` ```shell
confluent local services connect connector status curl http://localhost:8083/connectors
```` ```
You should now have two active connectors if you followed the previous steps. Use the following command to unload: You should now have two active connectors if you followed the previous steps. Use the following command to unload:
```` ```shell
confluent local services connect connector unload TDengineSinkConnector curl -X DELETE http://localhost:8083/connectors/TDengineSinkConnector
confluent local services connect connector unload TDengineSourceConnector curl -X DELETE http://localhost:8083/connectors/TDengineSourceConnector
```` ```
## Configuration reference ## Configuration reference
@ -430,19 +359,14 @@ The following configuration items apply to TDengine Sink Connector and TDengine
6. `query.interval.ms`: The time range of reading data from TDengine each time, its unit is millisecond. It should be adjusted according to the data flow in rate, the default value is 1000. 6. `query.interval.ms`: The time range of reading data from TDengine each time, its unit is millisecond. It should be adjusted according to the data flow in rate, the default value is 1000.
7. `topic.per.stable`: If it's set to true, it means one super table in TDengine corresponds to a topic in Kafka, the topic naming rule is `<topic.prefix>-<connection.database>-<stable.name>`; if it's set to false, it means the whole DB corresponds to a topic in Kafka, the topic naming rule is `<topic.prefix>-<connection.database>`. 7. `topic.per.stable`: If it's set to true, it means one super table in TDengine corresponds to a topic in Kafka, the topic naming rule is `<topic.prefix>-<connection.database>-<stable.name>`; if it's set to false, it means the whole DB corresponds to a topic in Kafka, the topic naming rule is `<topic.prefix>-<connection.database>`.
## Other notes ## Other notes
1. To install plugin to a customized location, refer to https://docs.confluent.io/home/connect/self-managed/install.html#install-connector-manually. 1. To use Kafka Connect, refer to <https://kafka.apache.org/documentation/#connect>.
2. To use Kafka Connect without confluent, refer to https://kafka.apache.org/documentation/#connect.
## Feedback ## Feedback
https://github.com/taosdata/kafka-connect-tdengine/issues <https://github.com/taosdata/kafka-connect-tdengine/issues>
## Reference ## Reference
1. https://www.confluent.io/what-is-apache-kafka 1. For more information, see <https://kafka.apache.org/documentation/>
2. https://developer.confluent.io/learn-kafka/kafka-connect/intro
3. https://docs.confluent.io/platform/current/platform.html

View File

@ -10,6 +10,10 @@ For TDengine 2.x installation packages by version, please visit [here](https://w
import Release from "/components/ReleaseV3"; import Release from "/components/ReleaseV3";
## 3.0.5.0
<Release type="tdengine" version="3.0.5.0" />
## 3.0.4.2 ## 3.0.4.2
<Release type="tdengine" version="3.0.4.2" /> <Release type="tdengine" version="3.0.4.2" />

View File

@ -10,6 +10,10 @@ For other historical version installers, please visit [here](https://www.taosdat
import Release from "/components/ReleaseV3"; import Release from "/components/ReleaseV3";
## 2.5.1
<Release type="tools" version="2.5.1" />
## 2.5.0 ## 2.5.0
<Release type="tools" version="2.5.0" /> <Release type="tools" version="2.5.0" />

View File

@ -32,25 +32,22 @@ TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致
原生连接支持的平台和 TDengine 客户端驱动支持的平台一致。 原生连接支持的平台和 TDengine 客户端驱动支持的平台一致。
REST 连接支持所有能运行 Java 的平台。 REST 连接支持所有能运行 Java 的平台。
## 版本支持 ## 版本历史
请参考[版本支持列表](../#版本支持) | taos-jdbcdriver 版本 | 主要变化 | TDengine 版本 |
| :------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------: |
## 最近更新记录 | 3.2.2 | 新增功能:数据订阅支持 seek 功能。 | 3.0.5.0 及更高版本 |
| 3.2.1 | 新增功能WebSocket 连接支持 schemaless 与 prepareStatement 写入。变更consumer poll 返回结果集为 ConsumerRecord可通过 value() 获取指定结果集数据。 | 3.0.3.0 及更高版本 |
| taos-jdbcdriver 版本 | 主要变化 | | 3.2.0 | 存在连接问题,不推荐使用 | - |
| :------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------: | | 3.1.0 | WebSocket 连接支持订阅功能 | - |
| 3.2.1 | 新增功能WebSocket 连接支持 schemaless 与 prepareStatement 写入。变更consumer poll 返回结果集为 ConsumerRecord可通过 value() 获取指定结果集数据。 | | 3.0.1 - 3.0.4 | 修复一些情况下结果集数据解析错误的问题。3.0.1 在 JDK 11 环境编译JDK 8 环境下建议使用其他版本 | - |
| 3.2.0 | 存在连接问题,不推荐使用 | | 3.0.0 | 支持 TDengine 3.0 | 3.0.0.0 及更高版本 |
| 3.1.0 | WebSocket 连接支持订阅功能 | | 2.0.42 | 修在 WebSocket 连接中 wasNull 接口返回值 | - |
| 3.0.1 - 3.0.4 | 修复一些情况下结果集数据解析错误的问题。3.0.1 在 JDK 11 环境编译JDK 8 环境下建议使用其他版本 | | 2.0.41 | 修正 REST 连接中用户名和密码转码方式 | - |
| 3.0.0 | 支持 TDengine 3.0 | | 2.0.39 - 2.0.40 | 增加 REST 连接/请求 超时设置 | - |
| 2.0.42 | 修在 WebSocket 连接中 wasNull 接口返回值 | | 2.0.38 | JDBC REST 连接增加批量拉取功能 | - |
| 2.0.41 | 修正 REST 连接中用户名和密码转码方式 | | 2.0.37 | 增加对 json tag 支持 | - |
| 2.0.39 - 2.0.40 | 增加 REST 连接/请求 超时设置 | | 2.0.36 | 增加对 schemaless 写入支持 | - |
| 2.0.38 | JDBC REST 连接增加批量拉取功能 |
| 2.0.37 | 增加对 json tag 支持 |
| 2.0.36 | 增加对 schemaless 写入支持 |
**注**REST 连接中增加 `batchfetch` 参数并设置为 true将开启 WebSocket 连接。 **注**REST 连接中增加 `batchfetch` 参数并设置为 true将开启 WebSocket 连接。
@ -80,45 +77,47 @@ JDBC 连接器可能报错的错误码包括 4 种:
具体的错误码请参考: 具体的错误码请参考:
| Error Code | Description | Suggested Actions | | Error Code | Description | Suggested Actions |
| ---------- | --------------------------------------------------------------- | --------------------------------------------------------------------------------------- | | ---------- | --------------------------------------------------------------- | ----------------------------------------------------------------------------------------- |
| 0x2301 | connection already closed | 连接已经关闭,检查连接情况,或重新创建连接去执行相关指令。 | | 0x2301 | connection already closed | 连接已经关闭,检查连接情况,或重新创建连接去执行相关指令。 |
| 0x2302 | this operation is NOT supported currently! | 当前使用接口不支持,可以更换其他连接方式。 | | 0x2302 | this operation is NOT supported currently! | 当前使用接口不支持,可以更换其他连接方式。 |
| 0x2303 | invalid variables | 参数不合法,请检查相应接口规范,调整参数类型及大小。 | | 0x2303 | invalid variables | 参数不合法,请检查相应接口规范,调整参数类型及大小。 |
| 0x2304 | statement is closed | statement 已经关闭,请检查 statement 是否关闭后再次使用,或是连接是否正常。 | | 0x2304 | statement is closed | statement 已经关闭,请检查 statement 是否关闭后再次使用,或是连接是否正常。 |
| 0x2305 | resultSet is closed | resultSet 结果集已经释放,请检查 resultSet 是否释放后再次使用。 | | 0x2305 | resultSet is closed | resultSet 结果集已经释放,请检查 resultSet 是否释放后再次使用。 |
| 0x2306 | Batch is empty! | prepareStatement 添加参数后再执行 executeBatch。 | | 0x2306 | Batch is empty! | prepareStatement 添加参数后再执行 executeBatch。 |
| 0x2307 | Can not issue data manipulation statements with executeQuery() | 更新操作应该使用 executeUpdate(),而不是 executeQuery()。 | | 0x2307 | Can not issue data manipulation statements with executeQuery() | 更新操作应该使用 executeUpdate(),而不是 executeQuery()。 |
| 0x2308 | Can not issue SELECT via executeUpdate() | 查询操作应该使用 executeQuery(),而不是 executeUpdate()。 | | 0x2308 | Can not issue SELECT via executeUpdate() | 查询操作应该使用 executeQuery(),而不是 executeUpdate()。 |
| 0x230d | parameter index out of range | 参数越界,请检查参数的合理范围。 | | 0x230d | parameter index out of range | 参数越界,请检查参数的合理范围。 |
| 0x230e | connection already closed | 连接已经关闭,请检查 Connection 是否关闭后再次使用,或是连接是否正常。 | | 0x230e | connection already closed | 连接已经关闭,请检查 Connection 是否关闭后再次使用,或是连接是否正常。 |
| 0x230f | unknown sql type in tdengine | 请检查 TDengine 支持的 Data Type 类型。 | | 0x230f | unknown sql type in tdengine | 请检查 TDengine 支持的 Data Type 类型。 |
| 0x2310 | can't register JDBC-JNI driver | 不能注册 JNI 驱动,请检查 url 是否填写正确。 | | 0x2310 | can't register JDBC-JNI driver | 不能注册 JNI 驱动,请检查 url 是否填写正确。 |
| 0x2312 | url is not set | 请检查 REST 连接 url 是否填写正确。 | | 0x2312 | url is not set | 请检查 REST 连接 url 是否填写正确。 |
| 0x2314 | numeric value out of range | 请检查获取结果集中数值类型是否使用了正确的接口。 | | 0x2314 | numeric value out of range | 请检查获取结果集中数值类型是否使用了正确的接口。 |
| 0x2315 | unknown taos type in tdengine | 在 TDengine 数据类型与 JDBC 数据类型转换时,是否指定了正确的 TDengine 数据类型。 | | 0x2315 | unknown taos type in tdengine | 在 TDengine 数据类型与 JDBC 数据类型转换时,是否指定了正确的 TDengine 数据类型。 |
| 0x2317 | | REST 连接中使用了错误的请求类型。 | | 0x2317 | | REST 连接中使用了错误的请求类型。 |
| 0x2318 | | REST 连接中出现了数据传输异常,请检查网络情况并重试。 | | 0x2318 | | REST 连接中出现了数据传输异常,请检查网络情况并重试。 |
| 0x2319 | user is required | 创建连接时缺少用户名信息 | | 0x2319 | user is required | 创建连接时缺少用户名信息 |
| 0x231a | password is required | 创建连接时缺少密码信息 | | 0x231a | password is required | 创建连接时缺少密码信息 |
| 0x231c | httpEntity is null, sql: | REST 连接中执行出现异常 | | 0x231c | httpEntity is null, sql: | REST 连接中执行出现异常 |
| 0x2350 | unknown error | 未知异常,请在 github 反馈给开发人员。 | | 0x231d | can't create connection with server within | 通过增加参数 httpConnectTimeout 增加连接耗时,或是请检查与 taosAdapter 之间的连接情况。 |
| 0x2352 | Unsupported encoding | 本地连接下指定了不支持的字符编码集 | | 0x231e | failed to complete the task within the specified time | 通过增加参数 messageWaitTimeout 增加执行耗时,或是请检查与 taosAdapter 之间的连接情况。 |
| 0x2353 | internal error of database, please see taoslog for more details | 本地连接执行 prepareStatement 时出现错误,请检查 taos log 进行问题定位。 | | 0x2350 | unknown error | 未知异常,请在 github 反馈给开发人员。 |
| 0x2354 | JNI connection is NULL | 本地连接执行命令时Connection 已经关闭。请检查与 TDengine 的连接情况。 | | 0x2352 | Unsupported encoding | 本地连接下指定了不支持的字符编码集 |
| 0x2355 | JNI result set is NULL | 本地连接获取结果集,结果集异常,请检查连接情况,并重试。 | | 0x2353 | internal error of database, please see taoslog for more details | 本地连接执行 prepareStatement 时出现错误,请检查 taos log 进行问题定位。 |
| 0x2356 | invalid num of fields | 本地连接获取结果集的 meta 信息不匹配。 | | 0x2354 | JNI connection is NULL | 本地连接执行命令时Connection 已经关闭。请检查与 TDengine 的连接情况。 |
| 0x2357 | empty sql string | 填写正确的 SQL 进行执行。 | | 0x2355 | JNI result set is NULL | 本地连接获取结果集,结果集异常,请检查连接情况,并重试。 |
| 0x2359 | JNI alloc memory failed, please see taoslog for more details | 本地连接分配内存错误,请检查 taos log 进行问题定位。 | | 0x2356 | invalid num of fields | 本地连接获取结果集的 meta 信息不匹配。 |
| 0x2371 | consumer properties must not be null! | 创建订阅时参数为空,请填写正确的参数。 | | 0x2357 | empty sql string | 填写正确的 SQL 进行执行。 |
| 0x2372 | configs contain empty key, failed to set consumer property | 参数 key 中包含空值,请填写正确的参数。 | | 0x2359 | JNI alloc memory failed, please see taoslog for more details | 本地连接分配内存错误,请检查 taos log 进行问题定位。 |
| 0x2373 | failed to set consumer property, | 参数 value 中包含空值,请填写正确的参数。 | | 0x2371 | consumer properties must not be null! | 创建订阅时参数为空,请填写正确的参数。 |
| 0x2375 | topic reference has been destroyed | 创建数据订阅过程中topic 引用被释放。请检查与 TDengine 的连接情况。 | | 0x2372 | configs contain empty key, failed to set consumer property | 参数 key 中包含空值,请填写正确的参数。 |
| 0x2376 | failed to set consumer topic, topic name is empty | 创建数据订阅过程中,订阅 topic 名称为空。请检查指定的 topic 名称是否填写正确。 | | 0x2373 | failed to set consumer property, | 参数 value 中包含空值,请填写正确的参数。 |
| 0x2377 | consumer reference has been destroyed | 订阅数据传输通道已经关闭,请检查与 TDengine 的连接情况。 | | 0x2375 | topic reference has been destroyed | 创建数据订阅过程中topic 引用被释放。请检查与 TDengine 的连接情况。 |
| 0x2378 | consumer create error | 创建数据订阅失败,请根据错误信息检查 taos log 进行问题定位。 | | 0x2376 | failed to set consumer topic, topic name is empty | 创建数据订阅过程中,订阅 topic 名称为空。请检查指定的 topic 名称是否填写正确。 |
| - | can't create connection with server within | 通过增加参数 httpConnectTimeout 增加连接耗时,或是请检查与 taosAdapter 之间的连接情况。 | | 0x2377 | consumer reference has been destroyed | 订阅数据传输通道已经关闭,请检查与 TDengine 的连接情况。 |
| - | failed to complete the task within the specified time | 通过增加参数 messageWaitTimeout 增加执行耗时,或是请检查与 taosAdapter 之间的连接情况。 | | 0x2378 | consumer create error | 创建数据订阅失败,请根据错误信息检查 taos log 进行问题定位。 |
| 0x2379 | seek offset must not be a negative number | seek 接口参数不能为负值,请使用正确的参数 |
| 0x237a | vGroup not found in result set | VGroup 没有分配给当前 consumer由于 Rebalance 机制导致 Consumer 与 VGroup 不是绑定的关系 |
- [TDengine Java Connector](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java) - [TDengine Java Connector](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java)
<!-- - [TDengine_ERROR_CODE](../error-code) --> <!-- - [TDengine_ERROR_CODE](../error-code) -->
@ -169,7 +168,7 @@ Maven 项目中,在 pom.xml 中添加以下依赖:
<dependency> <dependency>
<groupId>com.taosdata.jdbc</groupId> <groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId> <artifactId>taos-jdbcdriver</artifactId>
<version>3.2.1</version> <version>3.2.2</version>
</dependency> </dependency>
``` ```
@ -916,14 +915,15 @@ public class SchemalessWsTest {
public static void main(String[] args) throws SQLException { public static void main(String[] args) throws SQLException {
final String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata&batchfetch=true"; final String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata&batchfetch=true";
Connection connection = DriverManager.getConnection(url); try(Connection connection = DriverManager.getConnection(url)){
init(connection); init(connection);
SchemalessWriter writer = new SchemalessWriter(connection, "test_ws_schemaless"); try(SchemalessWriter writer = new SchemalessWriter(connection, "test_ws_schemaless")){
writer.write(lineDemo, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO_SECONDS); writer.write(lineDemo, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO_SECONDS);
writer.write(telnetDemo, SchemalessProtocolType.TELNET, SchemalessTimestampType.MILLI_SECONDS); writer.write(telnetDemo, SchemalessProtocolType.TELNET, SchemalessTimestampType.MILLI_SECONDS);
writer.write(jsonDemo, SchemalessProtocolType.JSON, SchemalessTimestampType.SECONDS); writer.write(jsonDemo, SchemalessProtocolType.JSON, SchemalessTimestampType.SECONDS);
System.exit(0); }
}
} }
private static void init(Connection connection) throws SQLException { private static void init(Connection connection) throws SQLException {
@ -994,6 +994,17 @@ while(true) {
`poll` 每次调用获取一个消息。 `poll` 每次调用获取一个消息。
#### 指定订阅 Offset
```
long position(TopicPartition partition) throws SQLException;
Map<TopicPartition, Long> position(String topic) throws SQLException;
Map<TopicPartition, Long> beginningOffsets(String topic) throws SQLException;
Map<TopicPartition, Long> endOffsets(String topic) throws SQLException;
void seek(TopicPartition partition, long offset) throws SQLException;
```
#### 关闭订阅 #### 关闭订阅
```java ```java
@ -1258,6 +1269,7 @@ public static void main(String[] args) throws Exception {
- connectionPoolsHikariCP, Druid, dbcp, c3p0 等连接池中使用 taos-jdbcdriver。 - connectionPoolsHikariCP, Druid, dbcp, c3p0 等连接池中使用 taos-jdbcdriver。
- SpringJdbcTemplateSpring JdbcTemplate 中使用 taos-jdbcdriver。 - SpringJdbcTemplateSpring JdbcTemplate 中使用 taos-jdbcdriver。
- mybatisplus-demoSpringboot + Mybatis 中使用 taos-jdbcdriver。 - mybatisplus-demoSpringboot + Mybatis 中使用 taos-jdbcdriver。
- consumer-demoConsumer 消费 TDengine 数据示例,可通过参数控制消费速度。
请参考:[JDBC example](https://github.com/taosdata/TDengine/tree/3.0/examples/JDBC) 请参考:[JDBC example](https://github.com/taosdata/TDengine/tree/3.0/examples/JDBC)

View File

@ -30,7 +30,7 @@ REST 连接支持所有能运行 Go 的平台。
## 版本支持 ## 版本支持
请参考[版本支持列表](../#版本支持) 请参考[版本支持列表](https://github.com/taosdata/driver-go#remind)
## 支持的功能特性 ## 支持的功能特性
@ -383,6 +383,15 @@ func main() {
提交消息。 提交消息。
* `func (c *Consumer) Assignment() (partitions []tmq.TopicPartition, err error)`
获取消费进度。(需要 TDengine >= 3.0.5.0 driver-go >= v3.5.0)
* `func (c *Consumer) Seek(partition tmq.TopicPartition, ignoredTimeoutMs int) error`
注意:出于兼容目的保留 `ignoredTimeoutMs` 参数,当前未使用
按照指定的进度消费。(需要 TDengine >= 3.0.5.0 driver-go >= v3.5.0)
* `func (c *Consumer) Close() error` * `func (c *Consumer) Close() error`
关闭连接。 关闭连接。
@ -468,11 +477,20 @@ func main() {
提交消息。 提交消息。
* `func (c *Consumer) Assignment() (partitions []tmq.TopicPartition, err error)`
获取消费进度。(需要 TDengine >= 3.0.5.0 driver-go >= v3.5.0)
* `func (c *Consumer) Seek(partition tmq.TopicPartition, ignoredTimeoutMs int) error`
注意:出于兼容目的保留 `ignoredTimeoutMs` 参数,当前未使用
按照指定的进度消费。(需要 TDengine >= 3.0.5.0 driver-go >= v3.5.0)
* `func (c *Consumer) Close() error` * `func (c *Consumer) Close() error`
关闭连接。 关闭连接。
完整订阅示例参见 [GitHub 示例文件](https://github.com/taosdata/driver-go/blob/3.0/examples/tmqoverws/main.go) 完整订阅示例参见 [GitHub 示例文件](https://github.com/taosdata/driver-go/blob/main/examples/tmqoverws/main.go)
### 通过 WebSocket 进行参数绑定 ### 通过 WebSocket 进行参数绑定
@ -520,7 +538,7 @@ func main() {
结束参数绑定。 结束参数绑定。
完整参数绑定示例参见 [GitHub 示例文件](https://github.com/taosdata/driver-go/blob/3.0/examples/stmtoverws/main.go) 完整参数绑定示例参见 [GitHub 示例文件](https://github.com/taosdata/driver-go/blob/main/examples/stmtoverws/main.go)
## API 参考 ## API 参考

View File

@ -26,9 +26,14 @@ import RustQuery from "../07-develop/04-query-data/_rust.mdx"
原生连接支持的平台和 TDengine 客户端驱动支持的平台一致。 原生连接支持的平台和 TDengine 客户端驱动支持的平台一致。
Websocket 连接支持所有能运行 Rust 的平台。 Websocket 连接支持所有能运行 Rust 的平台。
## 版本支持 ## 版本历史
请参考[版本支持列表](../#版本支持) | Rust 连接器版本 | TDengine 版本 | 主要功能 |
| :----------------: | :--------------: | :--------------------------------------------------: |
| v0.8.8 | 3.0.5.0 or later | 消息订阅:获取消费进度及按照指定进度开始消费。 |
| v0.8.0 | 3.0.4.0 | 支持无模式写入。 |
| v0.7.6 | 3.0.3.0 | 支持在请求中使用 req_id。 |
| v0.6.0 | 3.0.0.0 | 基础功能。 |
Rust 连接器仍然在快速开发中1.0 之前无法保证其向后兼容。建议使用 3.0 版本以上的 TDengine以避免已知问题。 Rust 连接器仍然在快速开发中1.0 之前无法保证其向后兼容。建议使用 3.0 版本以上的 TDengine以避免已知问题。
@ -502,6 +507,22 @@ TMQ 消息队列是一个 [futures::Stream](https://docs.rs/futures/latest/futur
} }
``` ```
获取消费进度:
版本要求 connector-rust >= v0.8.8 TDengine >= 3.0.5.0
```rust
let assignments = consumer.assignments().await.unwrap();
```
按照指定的进度消费:
版本要求 connector-rust >= v0.8.8 TDengine >= 3.0.5.0
```rust
consumer.offset_seek(topic, vgroup_id, offset).await;
```
停止订阅: 停止订阅:
```rust ```rust
@ -516,7 +537,7 @@ consumer.unsubscribe().await;
- `enable.auto.commit`: 当设置为 `true` 时,将启用自动标记模式,当对数据一致性不敏感时,可以启用此方式。 - `enable.auto.commit`: 当设置为 `true` 时,将启用自动标记模式,当对数据一致性不敏感时,可以启用此方式。
- `auto.commit.interval.ms`: 自动标记的时间间隔。 - `auto.commit.interval.ms`: 自动标记的时间间隔。
完整订阅示例参见 [GitHub 示例文件](https://github.com/taosdata/taos-connector-rust/blob/main/examples/subscribe.rs). 完整订阅示例参见 [GitHub 示例文件](https://github.com/taosdata/TDengine/blob/3.0/docs/examples/rust/nativeexample/examples/subscribe_demo.rs).
其他相关结构体 API 使用说明请移步 Rust 文档托管网页:<https://docs.rs/taos>。 其他相关结构体 API 使用说明请移步 Rust 文档托管网页:<https://docs.rs/taos>。

View File

@ -45,9 +45,9 @@ CREATE DATABASE db_name PRECISION 'ns';
:::note :::note
- 表的每行长度不能超过 48KB注意每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)。 - 表的每行长度不能超过 48KB从 3.0.5.0 版本开始为 64KB注意:每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)。
- 虽然 BINARY 类型在底层存储上支持字节型的二进制字符,但不同编程语言对二进制数据的处理方式并不保证一致,因此建议在 BINARY 类型中只存储 ASCII 可见字符,而避免存储不可见字符。多字节的数据,例如中文字符,则需要使用 NCHAR 类型进行保存。如果强行使用 BINARY 类型保存中文字符,虽然有时也能正常读写,但并不带有字符集信息,很容易出现数据乱码甚至数据损坏等情况。 - 虽然 BINARY 类型在底层存储上支持字节型的二进制字符,但不同编程语言对二进制数据的处理方式并不保证一致,因此建议在 BINARY 类型中只存储 ASCII 可见字符,而避免存储不可见字符。多字节的数据,例如中文字符,则需要使用 NCHAR 类型进行保存。如果强行使用 BINARY 类型保存中文字符,虽然有时也能正常读写,但并不带有字符集信息,很容易出现数据乱码甚至数据损坏等情况。
- BINARY 类型理论上最长可以有 16,374 字节。BINARY 仅支持字符串输入,字符串两端需使用单引号引用。使用时须指定大小,如 BINARY(20) 定义了最长为 20 个单字节字符的字符串,每个字符占 1 字节的存储空间,总共固定占用 20 字节的空间,此时如果用户字符串超出 20 字节将会报错。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示,即 `\'` - BINARY 类型理论上最长可以有 16,374(从 3.0.5.0 版本开始,数据列为 65,517标签列为 16,382 字节。BINARY 仅支持字符串输入,字符串两端需使用单引号引用。使用时须指定大小,如 BINARY(20) 定义了最长为 20 个单字节字符的字符串,每个字符占 1 字节的存储空间,总共固定占用 20 字节的空间,此时如果用户字符串超出 20 字节将会报错。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示,即 `\'`
- SQL 语句中的数值类型将依据是否存在小数点或使用科学计数法表示来判断数值类型是否为整型或者浮点型因此在使用时要注意相应类型越界的情况。例如9999999999999999999 会认为超过长整型的上边界而溢出,而 9999999999999999999.0 会被认为是有效的浮点数。 - SQL 语句中的数值类型将依据是否存在小数点或使用科学计数法表示来判断数值类型是否为整型或者浮点型因此在使用时要注意相应类型越界的情况。例如9999999999999999999 会认为超过长整型的上边界而溢出,而 9999999999999999999.0 会被认为是有效的浮点数。
::: :::

View File

@ -43,7 +43,7 @@ table_option: {
1. 表的第一个字段必须是 TIMESTAMP并且系统自动将其设为主键 1. 表的第一个字段必须是 TIMESTAMP并且系统自动将其设为主键
2. 表名最大长度为 192 2. 表名最大长度为 192
3. 表的每行长度不能超过 48KB;(注意:每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置) 3. 表的每行长度不能超过 48KB(从 3.0.5.0 版本开始为 64KB;(注意:每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)
4. 子表名只能由字母、数字和下划线组成,且不能以数字开头,不区分大小写 4. 子表名只能由字母、数字和下划线组成,且不能以数字开头,不区分大小写
5. 使用数据类型 binary 或 nchar需指定其最长的字节数如 binary(20),表示 20 字节; 5. 使用数据类型 binary 或 nchar需指定其最长的字节数如 binary(20),表示 20 字节;
6. 为了兼容支持更多形式的表名TDengine 引入新的转义符 "\`",可以让表名与关键词不冲突,同时不受限于上述表名称合法性约束检查。但是同样具有长度限制要求。使用转义字符以后,不再对转义字符中的内容进行大小写统一。 6. 为了兼容支持更多形式的表名TDengine 引入新的转义符 "\`",可以让表名与关键词不冲突,同时不受限于上述表名称合法性约束检查。但是同样具有长度限制要求。使用转义字符以后,不再对转义字符中的内容进行大小写统一。

View File

@ -55,7 +55,7 @@ window_clause: {
| INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] [WATERMARK(watermark_val)] [FILL(fill_mod_and_val)] | INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] [WATERMARK(watermark_val)] [FILL(fill_mod_and_val)]
interp_clause: interp_clause:
RANGE(ts_val, ts_val) EVERY(every_val) FILL(fill_mod_and_val) RANGE(ts_val [, ts_val]) EVERY(every_val) FILL(fill_mod_and_val)
partition_by_clause: partition_by_clause:
PARTITION BY expr [, expr] ... PARTITION BY expr [, expr] ...

View File

@ -890,9 +890,10 @@ ignore_null_values: {
- INTERP 用于在指定时间断面获取指定列的记录值,如果该时间断面不存在符合条件的行数据,那么会根据 FILL 参数的设定进行插值。 - INTERP 用于在指定时间断面获取指定列的记录值,如果该时间断面不存在符合条件的行数据,那么会根据 FILL 参数的设定进行插值。
- INTERP 的输入数据为指定列的数据可以通过条件语句where 子句)来对原始列数据进行过滤,如果没有指定过滤条件则输入为全部数据。 - INTERP 的输入数据为指定列的数据可以通过条件语句where 子句)来对原始列数据进行过滤,如果没有指定过滤条件则输入为全部数据。
- INTERP 需要同时与 RANGEEVERY 和 FILL 关键字一起使用。 - INTERP 需要同时与 RANGEEVERY 和 FILL 关键字一起使用。
- INTERP 的输出时间范围根据 RANGE(timestamp1,timestamp2)字段来指定,需满足 timestamp1 <= timestamp2。其中 timestamp1(必选值)为输出时间范围的起始值,即如果 timestamp1 时刻符合插值条件则 timestamp1 为输出的第一条记录timestamp2(必选值)为输出时间范围的结束值,即输出的最后一条记录的 timestamp 不能大于 timestamp2。 - INTERP 的输出时间范围根据 RANGE(timestamp1, timestamp2)字段来指定,需满足 timestamp1 <= timestamp2。其中 timestamp1 为输出时间范围的起始值,即如果 timestamp1 时刻符合插值条件则 timestamp1 为输出的第一条记录timestamp2 为输出时间范围的结束值,即输出的最后一条记录的 timestamp 不能大于 timestamp2。
- INTERP 根据 EVERY(time_unit) 字段来确定输出时间范围内的结果条数,即从 timestamp1 开始每隔固定长度的时间time_unit 值进行插值time_unit 可取值时间单位1a(毫秒)1s(秒)1m(分)1h(小时)1d(天)1w(周)。例如 EVERY(500a) 将对于指定数据每500毫秒间隔进行一次插值. - INTERP 根据 EVERY(time_unit) 字段来确定输出时间范围内的结果条数,即从 timestamp1 开始每隔固定长度的时间time_unit 值进行插值time_unit 可取值时间单位1a(毫秒)1s(秒)1m(分)1h(小时)1d(天)1w(周)。例如 EVERY(500a) 将对于指定数据每500毫秒间隔进行一次插值.
- INTERP 根据 FILL 字段来决定在每个符合输出条件的时刻如何进行插值。关于 FILL 子句如何使用请参考 [FILL 子句](../distinguished/#fill-子句) - INTERP 根据 FILL 字段来决定在每个符合输出条件的时刻如何进行插值。关于 FILL 子句如何使用请参考 [FILL 子句](../distinguished/#fill-子句)
- INTERP 可以在 RANGE 字段中只指定唯一的时间戳对单个时间点进行插值在这种情况下EVERY 字段可以省略。例如SELECT INTERP(col) FROM tb RANGE('2023-01-01 00:00:00') FILL(linear).
- INTERP 作用于超级表时, 会将该超级表下的所有子表数据按照主键列排序后进行插值计算,也可以搭配 PARTITION BY tbname 使用,将结果强制规约到单个时间线。 - INTERP 作用于超级表时, 会将该超级表下的所有子表数据按照主键列排序后进行插值计算,也可以搭配 PARTITION BY tbname 使用,将结果强制规约到单个时间线。
- INTERP 可以与伪列 _irowts 一起使用,返回插值点所对应的时间戳(3.0.2.0版本以后支持)。 - INTERP 可以与伪列 _irowts 一起使用,返回插值点所对应的时间戳(3.0.2.0版本以后支持)。
- INTERP 可以与伪列 _isfilled 一起使用,显示返回结果是否为原始记录或插值算法产生的数据(3.0.3.0版本以后支持)。 - INTERP 可以与伪列 _isfilled 一起使用,显示返回结果是否为原始记录或插值算法产生的数据(3.0.3.0版本以后支持)。

View File

@ -26,7 +26,7 @@ description: 合法字符集和命名中的限制规则
- 数据库名最大长度为 64 字节 - 数据库名最大长度为 64 字节
- 表名最大长度为 192 字节,不包括数据库名前缀和分隔符 - 表名最大长度为 192 字节,不包括数据库名前缀和分隔符
- 每行数据最大长度 48KB (注意:数据行内每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置) - 每行数据最大长度 48KB(从 3.0.5.0 版本开始为 64KB (注意:数据行内每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)
- 列名最大长度为 64 字节 - 列名最大长度为 64 字节
- 最多允许 4096 列,最少需要 2 列,第一列必须是时间戳。 - 最多允许 4096 列,最少需要 2 列,第一列必须是时间戳。
- 标签名最大长度为 64 字节 - 标签名最大长度为 64 字节

View File

@ -87,7 +87,7 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000
:::tip :::tip
无模式所有的处理逻辑,仍会遵循 TDengine 对数据结构的底层限制,例如每行数据的总长度不能超过 无模式所有的处理逻辑,仍会遵循 TDengine 对数据结构的底层限制,例如每行数据的总长度不能超过
48KB标签值的总长度不超过16KB。这方面的具体限制约束请参见 [TDengine SQL 边界限制](/taos-sql/limit) 48KB(从 3.0.5.0 版本开始为 64KB标签值的总长度不超过16KB。这方面的具体限制约束请参见 [TDengine SQL 边界限制](/taos-sql/limit)
::: :::

View File

@ -16,169 +16,78 @@ TDengine Source Connector 用于把数据实时地从 TDengine 读出来发送
![TDengine Database Kafka Connector -- streaming integration with kafka connect](kafka/streaming-integration-with-kafka-connect.webp) ![TDengine Database Kafka Connector -- streaming integration with kafka connect](kafka/streaming-integration-with-kafka-connect.webp)
## 什么是 Confluent
[Confluent](https://www.confluent.io/) 在 Kafka 的基础上增加很多扩展功能。包括:
1. Schema Registry
2. REST 代理
3. 非 Java 客户端
4. 很多打包好的 Kafka Connect 插件
5. 管理和监控 Kafka 的 GUI —— Confluent 控制中心
这些扩展功能有的包含在社区版本的 Confluent 中,有的只有企业版能用。
![TDengine Database Kafka Connector -- Confluent introduction](kafka/confluentPlatform.webp)
Confluent 企业版提供了 `confluent` 命令行工具管理各个组件。
## 前置条件 ## 前置条件
运行本教程中示例的前提条件。 运行本教程中示例的前提条件。
1. Linux 操作系统 1. Linux 操作系统
2. 已安装 Java 8 和 Maven 2. 已安装 Java 8 和 Maven
3. 已安装 Git 3. 已安装 Git、curl、vi
4. 已安装并启动 TDengine。如果还没有可参考[安装和卸载](/operation/pkg-install) 4. 已安装并启动 TDengine。如果还没有可参考[安装和卸载](/operation/pkg-install)
## 安装 Confluent ## 安装 Kafka
Confluent 提供了 Docker 和二进制包两种安装方式。本文仅介绍二进制包方式安装。
在任意目录下执行: 在任意目录下执行:
``` ```shell
curl -O http://packages.confluent.io/archive/7.1/confluent-7.1.1.tar.gz curl -O https://downloads.apache.org/kafka/3.4.0/kafka_2.13-3.4.0.tgz
tar xzf confluent-7.1.1.tar.gz -C /opt/ tar xzf kafka_2.13-3.4.0.tgz -C /opt/
ln -s /opt/kafka_2.13-3.4.0 /opt/kafka
``` ```
然后需要把 `$CONFLUENT_HOME/bin` 目录加入 PATH。 然后需要把 `$KAFKA_HOME/bin` 目录加入 PATH。
```title=".profile" ```title=".profile"
export CONFLUENT_HOME=/opt/confluent-7.1.1 export KAFKA_HOME=/opt/kafka
export PATH=$CONFLUENT_HOME/bin:$PATH export PATH=$PATH:$KAFKA_HOME/bin
``` ```
以上脚本可以追加到当前用户的 profile 文件(~/.profile 或 ~/.bash_profile 以上脚本可以追加到当前用户的 profile 文件(~/.profile 或 ~/.bash_profile
安装完成之后,可以输入`confluent version`做简单验证:
```
# confluent version
confluent - Confluent CLI
Version: v2.6.1
Git Ref: 6d920590
Build Date: 2022-02-18T06:14:21Z
Go Version: go1.17.6 (linux/amd64)
Development: false
```
## 安装 TDengine Connector 插件 ## 安装 TDengine Connector 插件
### 从源码安装 ### 编译插件
``` ```shell
git clone --branch 3.0 https://github.com/taosdata/kafka-connect-tdengine.git git clone --branch 3.0 https://github.com/taosdata/kafka-connect-tdengine.git
cd kafka-connect-tdengine cd kafka-connect-tdengine
mvn clean package mvn clean package -Dmaven.test.skip=true
unzip -d $CONFLUENT_HOME/share/java/ target/components/packages/taosdata-kafka-connect-tdengine-*.zip unzip -d $KAFKA_HOME/components/ target/components/packages/taosdata-kafka-connect-tdengine-*.zip
``` ```
以上脚本先 clone 项目源码,然后用 Maven 编译打包。打包完成后在 `target/components/packages/` 目录生成了插件的 zip 包。把这个 zip 包解压到安装插件的路径即可。上面的示例中使用了内置的插件安装路径: `$CONFLUENT_HOME/share/java/`。 以上脚本先 clone 项目源码,然后用 Maven 编译打包。打包完成后在 `target/components/packages/` 目录生成了插件的 zip 包。把这个 zip 包解压到安装插件的路径即可。上面的示例中使用了内置的插件安装路径: `$KAFKA_HOME/components/`
### 用 confluent-hub 安装 ### 配置插件
[Confluent Hub](https://www.confluent.io/hub) 提供下载 Kafka Connect 插件的服务。在 TDengine Kafka Connector 发布到 Confluent Hub 后可以使用命令工具 `confluent-hub` 安装。 将 kafka-connect-tdengine 插件加入 `$KAFKA_HOME/config/connect-distributed.properties` 配置文件 plugin.path 中
**TDengine Kafka Connector 目前没有正式发布,不能用这种方式安装**。
## 启动 Confluent ```properties
plugin.path=/usr/share/java,/opt/kafka/components
```
confluent local services start
``` ```
:::note ## 启动 Kafka
一定要先安装插件再启动 Confluent, 否则加载插件会失败。
:::
:::tip ```shell
若某组件启动失败,可尝试清空数据,重新启动。数据目录在启动时将被打印到控制台,比如 zookeeper-server-start.sh -daemon $KAFKA_HOME/config/zookeeper.properties
```title="控制台输出日志" {1} kafka-server-start.sh -daemon $KAFKA_HOME/config/server.properties
Using CONFLUENT_CURRENT: /tmp/confluent.106668
Starting ZooKeeper connect-distributed.sh -daemon $KAFKA_HOME/config/connect-distributed.properties
ZooKeeper is [UP]
Starting Kafka
Kafka is [UP]
Starting Schema Registry
Schema Registry is [UP]
Starting Kafka REST
Kafka REST is [UP]
Starting Connect
Connect is [UP]
Starting ksqlDB Server
ksqlDB Server is [UP]
Starting Control Center
Control Center is [UP]
``` ```
清空数据可执行 `rm -rf /tmp/confluent.106668` ### 验证 kafka Connect 是否启动成功
:::
### 验证各个组件是否启动成功
输入命令: 输入命令:
``` ```shell
confluent local services status curl http://localhost:8083/connectors
``` ```
如果各组件都启动成功,会得到如下输出: 如果各组件都启动成功,会得到如下输出:
```txt
[]
``` ```
Connect is [UP]
Control Center is [UP]
Kafka is [UP]
Kafka REST is [UP]
ksqlDB Server is [UP]
Schema Registry is [UP]
ZooKeeper is [UP]
```
### 验证插件是否安装成功
在 Kafka Connect 组件完全启动后,可用以下命令列出成功加载的插件:
```
confluent local services connect plugin list
```
如果成功安装,会输出如下:
```txt {4,9}
Available Connect Plugins:
[
{
"class": "com.taosdata.kafka.connect.sink.TDengineSinkConnector",
"type": "sink",
"version": "1.0.0"
},
{
"class": "com.taosdata.kafka.connect.source.TDengineSourceConnector",
"type": "source",
"version": "1.0.0"
},
......
```
如果插件安装失败,请检查 Kafka Connect 的启动日志是否有异常信息,用以下命令输出日志路径:
```
echo `cat /tmp/confluent.current`/connect/connect.stdout
```
该命令的输出类似: `/tmp/confluent.104086/connect/connect.stdout`
与日志文件 `connect.stdout` 同一目录,还有一个文件名为: `connect.properties`。在这个文件的末尾,可以看到最终生效的 `plugin.path` 它是一系列用逗号分割的路径。如果插件安装失败,很可能是因为实际的安装路径不包含在 `plugin.path` 中。
## TDengine Sink Connector 的使用 ## TDengine Sink Connector 的使用
@ -188,40 +97,47 @@ TDengine Sink Connector 内部使用 TDengine [无模式写入接口](../../conn
下面的示例将主题 meters 的数据,同步到目标数据库 power。数据格式为 InfluxDB Line 协议格式。 下面的示例将主题 meters 的数据,同步到目标数据库 power。数据格式为 InfluxDB Line 协议格式。
### 添加配置文件 ### 添加 Sink Connector 配置文件
``` ```shell
mkdir ~/test mkdir ~/test
cd ~/test cd ~/test
vi sink-demo.properties vi sink-demo.json
``` ```
sink-demo.properties 内容如下: sink-demo.json 内容如下:
```ini title="sink-demo.properties" ```json title="sink-demo.json"
name=TDengineSinkConnector {
connector.class=com.taosdata.kafka.connect.sink.TDengineSinkConnector "name": "TDengineSinkConnector",
tasks.max=1 "config": {
topics=meters "connector.class":"com.taosdata.kafka.connect.sink.TDengineSinkConnector",
connection.url=jdbc:TAOS://127.0.0.1:6030 "tasks.max": "1",
connection.user=root "topics": "meters",
connection.password=taosdata "connection.url": "jdbc:TAOS://127.0.0.1:6030",
connection.database=power "connection.user": "root",
db.schemaless=line "connection.password": "taosdata",
data.precision=ns "connection.database": "power",
key.converter=org.apache.kafka.connect.storage.StringConverter "db.schemaless": "line",
value.converter=org.apache.kafka.connect.storage.StringConverter "data.precision": "ns",
"key.converter": "org.apache.kafka.connect.storage.StringConverter",
"value.converter": "org.apache.kafka.connect.storage.StringConverter",
"errors.tolerance": "all",
"errors.deadletterqueue.topic.name": "dead_letter_topic",
"errors.deadletterqueue.topic.replication.factor": 1
}
}
``` ```
关键配置说明: 关键配置说明:
1. `topics=meters``connection.database=power`, 表示订阅主题 meters 的数据,并写入数据库 power。 1. `"topics": "meters"` 和 `"connection.database": "power"`, 表示订阅主题 meters 的数据,并写入数据库 power。
2. `db.schemaless=line`, 表示使用 InfluxDB Line 协议格式的数据。 2. `"db.schemaless": "line"`, 表示使用 InfluxDB Line 协议格式的数据。
### 创建 Connector 实例 ### 创建 Sink Connector 实例
``` ```shell
confluent local services connect connector load TDengineSinkConnector --config ./sink-demo.properties curl -X POST -d @sink-demo.json http://localhost:8083/connectors -H "Content-Type: application/json"
``` ```
若以上命令执行成功,则有如下输出: 若以上命令执行成功,则有如下输出:
@ -241,7 +157,10 @@ confluent local services connect connector load TDengineSinkConnector --config .
"tasks.max": "1", "tasks.max": "1",
"topics": "meters", "topics": "meters",
"value.converter": "org.apache.kafka.connect.storage.StringConverter", "value.converter": "org.apache.kafka.connect.storage.StringConverter",
"name": "TDengineSinkConnector" "name": "TDengineSinkConnector",
"errors.tolerance": "all",
"errors.deadletterqueue.topic.name": "dead_letter_topic",
"errors.deadletterqueue.topic.replication.factor": "1",
}, },
"tasks": [], "tasks": [],
"type": "sink" "type": "sink"
@ -261,8 +180,8 @@ meters,location=California.LosAngeles,groupid=3 current=11.3,voltage=221,phase=0
使用 kafka-console-producer 向主题 meters 添加测试数据。 使用 kafka-console-producer 向主题 meters 添加测试数据。
``` ```shell
cat test-data.txt | kafka-console-producer --broker-list localhost:9092 --topic meters cat test-data.txt | kafka-console-producer.sh --broker-list localhost:9092 --topic meters
``` ```
:::note :::note
@ -273,12 +192,12 @@ cat test-data.txt | kafka-console-producer --broker-list localhost:9092 --topic
使用 TDengine CLI 验证同步是否成功。 使用 TDengine CLI 验证同步是否成功。
``` ```sql
taos> use power; taos> use power;
Database changed. Database changed.
taos> select * from meters; taos> select * from meters;
ts | current | voltage | phase | groupid | location | _ts | current | voltage | phase | groupid | location |
=============================================================================================================================================================== ===============================================================================================================================================================
2022-03-28 09:56:51.249000000 | 11.800000000 | 221.000000000 | 0.280000000 | 2 | California.LosAngeles | 2022-03-28 09:56:51.249000000 | 11.800000000 | 221.000000000 | 0.280000000 | 2 | California.LosAngeles |
2022-03-28 09:56:51.250000000 | 13.400000000 | 223.000000000 | 0.290000000 | 2 | California.LosAngeles | 2022-03-28 09:56:51.250000000 | 13.400000000 | 223.000000000 | 0.290000000 | 2 | California.LosAngeles |
@ -297,29 +216,34 @@ TDengine Source Connector 会将 TDengine 数据表中的数据转换成 [Influx
下面的示例程序同步数据库 test 中的数据到主题 tdengine-source-test。 下面的示例程序同步数据库 test 中的数据到主题 tdengine-source-test。
### 添加配置文件 ### 添加 Source Connector 配置文件
``` ```shell
vi source-demo.properties vi source-demo.json
``` ```
输入以下内容: 输入以下内容:
```ini title="source-demo.properties" ```json title="source-demo.json"
name=TDengineSourceConnector {
connector.class=com.taosdata.kafka.connect.source.TDengineSourceConnector "name":"TDengineSourceConnector",
tasks.max=1 "config":{
connection.url=jdbc:TAOS://127.0.0.1:6030 "connector.class": "com.taosdata.kafka.connect.source.TDengineSourceConnector",
connection.username=root "tasks.max": 1,
connection.password=taosdata "connection.url": "jdbc:TAOS://127.0.0.1:6030",
connection.database=test "connection.username": "root",
connection.attempts=3 "connection.password": "taosdata",
connection.backoff.ms=5000 "connection.database": "test",
topic.prefix=tdengine-source- "connection.attempts": 3,
poll.interval.ms=1000 "connection.backoff.ms": 5000,
fetch.max.rows=100 "topic.prefix": "tdengine-source",
key.converter=org.apache.kafka.connect.storage.StringConverter "poll.interval.ms": 1000,
value.converter=org.apache.kafka.connect.storage.StringConverter "fetch.max.rows": 100,
"topic.per.stable": true,
"key.converter": "org.apache.kafka.connect.storage.StringConverter",
"value.converter": "org.apache.kafka.connect.storage.StringConverter"
}
}
``` ```
### 准备测试数据 ### 准备测试数据
@ -344,27 +268,27 @@ INSERT INTO d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES('2018-1
使用 TDengine CLI, 执行 SQL 文件。 使用 TDengine CLI, 执行 SQL 文件。
``` ```shell
taos -f prepare-source-data.sql taos -f prepare-source-data.sql
``` ```
### 创建 Connector 实例 ### 创建 Source Connector 实例
``` ```shell
confluent local services connect connector load TDengineSourceConnector --config source-demo.properties curl -X POST -d @source-demo.json http://localhost:8083/connectors -H "Content-Type: application/json"
``` ```
### 查看 topic 数据 ### 查看 topic 数据
使用 kafka-console-consumer 命令行工具监控主题 tdengine-source-test 中的数据。一开始会输出所有历史数据, 往 TDengine 插入两条新的数据之后kafka-console-consumer 也立即输出了新增的两条数据。 输出数据 InfluxDB line protocol 的格式。 使用 kafka-console-consumer 命令行工具监控主题 tdengine-source-test 中的数据。一开始会输出所有历史数据, 往 TDengine 插入两条新的数据之后kafka-console-consumer 也立即输出了新增的两条数据。 输出数据 InfluxDB line protocol 的格式。
``` ```shell
kafka-console-consumer --bootstrap-server localhost:9092 --from-beginning --topic tdengine-source-test kafka-console-consumer.sh --bootstrap-server localhost:9092 --from-beginning --topic tdengine-source-test-meters
``` ```
输出: 输出:
``` ```txt
...... ......
meters,location="California.SanFrancisco",groupid=2i32 current=10.3f32,voltage=219i32,phase=0.31f32 1538548685000000000 meters,location="California.SanFrancisco",groupid=2i32 current=10.3f32,voltage=219i32,phase=0.31f32 1538548685000000000
meters,location="California.SanFrancisco",groupid=2i32 current=12.6f32,voltage=218i32,phase=0.33f32 1538548695000000000 meters,location="California.SanFrancisco",groupid=2i32 current=12.6f32,voltage=218i32,phase=0.33f32 1538548695000000000
@ -373,7 +297,7 @@ meters,location="California.SanFrancisco",groupid=2i32 current=12.6f32,voltage=2
此时会显示所有历史数据。切换到 TDengine CLI 插入两条新的数据: 此时会显示所有历史数据。切换到 TDengine CLI 插入两条新的数据:
``` ```sql
USE test; USE test;
INSERT INTO d1001 VALUES (now, 13.3, 229, 0.38); INSERT INTO d1001 VALUES (now, 13.3, 229, 0.38);
INSERT INTO d1002 VALUES (now, 16.3, 233, 0.22); INSERT INTO d1002 VALUES (now, 16.3, 233, 0.22);
@ -387,15 +311,15 @@ INSERT INTO d1002 VALUES (now, 16.3, 233, 0.22);
查看当前活跃的 connector 查看当前活跃的 connector
``` ```shell
confluent local services connect connector status curl http://localhost:8083/connectors
``` ```
如果按照前述操作,此时应有两个活跃的 connector。使用下面的命令 unload 如果按照前述操作,此时应有两个活跃的 connector。使用下面的命令 unload
``` ```shell
confluent local services connect connector unload TDengineSinkConnector curl -X DELETE http://localhost:8083/connectors/TDengineSinkConnector
confluent local services connect connector unload TDengineSourceConnector curl -X DELETE http://localhost:8083/connectors/TDengineSourceConnector
``` ```
## 配置参考 ## 配置参考
@ -442,15 +366,12 @@ confluent local services connect connector unload TDengineSourceConnector
## 其他说明 ## 其他说明
1. 插件的安装位置可以自定义请参考官方文档https://docs.confluent.io/home/connect/self-managed/install.html#install-connector-manually。 1. 关于如何在独立安装的 Kafka 环境使用 Kafka Connect 插件, 请参考官方文档:<https://kafka.apache.org/documentation/#connect>
2. 本教程的示例程序使用了 Confluent 平台,但是 TDengine Kafka Connector 本身同样适用于独立安装的 Kafka, 且配置方法相同。关于如何在独立安装的 Kafka 环境使用 Kafka Connect 插件, 请参考官方文档: https://kafka.apache.org/documentation/#connect。
## 问题反馈 ## 问题反馈
无论遇到任何问题,都欢迎在本项目的 Github 仓库反馈: https://github.com/taosdata/kafka-connect-tdengine/issues 无论遇到任何问题,都欢迎在本项目的 Github 仓库反馈:<https://github.com/taosdata/kafka-connect-tdengine/issues>
## 参考 ## 参考
1. https://www.confluent.io/what-is-apache-kafka 1. <https://kafka.apache.org/documentation/>
2. https://developer.confluent.io/learn-kafka/kafka-connect/intro
3. https://docs.confluent.io/platform/current/platform.html

View File

@ -10,6 +10,10 @@ TDengine 2.x 各版本安装包请访问[这里](https://www.taosdata.com/all-do
import Release from "/components/ReleaseV3"; import Release from "/components/ReleaseV3";
## 3.0.5.0
<Release type="tdengine" version="3.0.5.0" />
## 3.0.4.2 ## 3.0.4.2
<Release type="tdengine" version="3.0.4.2" /> <Release type="tdengine" version="3.0.4.2" />

View File

@ -10,6 +10,10 @@ taosTools 各版本安装包下载链接如下:
import Release from "/components/ReleaseV3"; import Release from "/components/ReleaseV3";
## 2.5.1
<Release type="tools" version="2.5.1" />
## 2.5.0 ## 2.5.0
<Release type="tools" version="2.5.0" /> <Release type="tools" version="2.5.0" />

View File

@ -0,0 +1,70 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.taosdata</groupId>
<artifactId>consumer</artifactId>
<version>1.0-SNAPSHOT</version>
<properties>
<maven.compiler.source>8</maven.compiler.source>
<maven.compiler.target>8</maven.compiler.target>
</properties>
<dependencies>
<dependency>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>3.2.1</version>
</dependency>
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
<version>30.1.1-jre</version>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-assembly-plugin</artifactId>
<version>3.3.0</version>
<executions>
<execution>
<id>ConsumerDemo</id>
<configuration>
<finalName>ConsumerDemo</finalName>
<archive>
<manifest>
<mainClass>com.taosdata.ConsumerDemo</mainClass>
</manifest>
</archive>
<descriptorRefs>
<descriptorRef>jar-with-dependencies</descriptorRef>
</descriptorRefs>
</configuration>
<phase>package</phase>
<goals>
<goal>single</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<configuration>
<source>8</source>
<target>8</target>
<encoding>UTF-8</encoding>
</configuration>
</plugin>
</plugins>
</build>
</project>

View File

@ -0,0 +1,52 @@
# How to Run the Consumer Demo Code On Linux OS
TDengine's Consumer demo project is organized in a Maven way so that users can easily compile, package and run the project. If you don't have Maven on your server, you may install it using
```
sudo apt-get install maven
```
## Install TDengine Client and TaosAdapter
Make sure you have already installed a tdengine client on your current develop environment.
Download the tdengine package on our website: ``https://www.taosdata.com/cn/all-downloads/`` and install the client.
## Run Consumer Demo using mvn plugin
run command:
```
mvn clean compile exec:java -Dexec.mainClass="com.taosdata.ConsumerDemo"
```
## Custom configuration
```shell
# the host of TDengine server
export TAOS_HOST="127.0.0.1"
# the port of TDengine server
export TAOS_PORT="6041"
# the consumer type, can be "ws" or "jni"
export TAOS_TYPE="ws"
# the number of consumers
export TAOS_JDBC_CONSUMER_NUM="1"
# the number of processors to consume
export TAOS_JDBC_PROCESSOR_NUM="2"
# the number of records to be consumed per processor per second
export TAOS_JDBC_RATE_PER_PROCESSOR="1000"
# poll wait time in ms
export TAOS_JDBC_POLL_SLEEP="100"
```
## Run Consumer Demo using jar
To compile the demo project, go to the source directory ``TDengine/tests/examples/JDBC/consumer-demo`` and execute
```
mvn clean package assembly:single
```
To run ConsumerDemo.jar, go to ``TDengine/tests/examples/JDBC/consumer-demo`` and execute
```
java -jar target/ConsumerDemo-jar-with-dependencies.jar
```

View File

@ -0,0 +1,43 @@
package com.taosdata;
import java.sql.Timestamp;
public class Bean {
private Timestamp ts;
private Integer c1;
private String c2;
public Timestamp getTs() {
return ts;
}
public void setTs(Timestamp ts) {
this.ts = ts;
}
public Integer getC1() {
return c1;
}
public void setC1(Integer c1) {
this.c1 = c1;
}
public String getC2() {
return c2;
}
public void setC2(String c2) {
this.c2 = c2;
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder("Bean {");
sb.append("ts=").append(ts);
sb.append(", c1=").append(c1);
sb.append(", c2='").append(c2).append('\'');
sb.append('}');
return sb.toString();
}
}

View File

@ -0,0 +1,6 @@
package com.taosdata;
import com.taosdata.jdbc.tmq.ReferenceDeserializer;
public class BeanDeserializer extends ReferenceDeserializer<Bean> {
}

View File

@ -0,0 +1,78 @@
package com.taosdata;
public class Config {
public static final String TOPIC = "test_consumer";
public static final String TAOS_HOST = "127.0.0.1";
public static final String TAOS_PORT = "6041";
public static final String TAOS_TYPE = "ws";
public static final int TAOS_JDBC_CONSUMER_NUM = 1;
public static final int TAOS_JDBC_PROCESSOR_NUM = 2;
public static final int TAOS_JDBC_RATE_PER_PROCESSOR = 1000;
public static final int TAOS_JDBC_POLL_SLEEP = 100;
private final int consumerNum;
private final int processCapacity;
private final int rate;
private final int pollSleep;
private final String type;
private final String host;
private final String port;
public Config(String type, String host, String port, int consumerNum, int processCapacity, int rate, int pollSleep) {
this.type = type;
this.consumerNum = consumerNum;
this.processCapacity = processCapacity;
this.rate = rate;
this.pollSleep = pollSleep;
this.host = host;
this.port = port;
}
public int getConsumerNum() {
return consumerNum;
}
public int getProcessCapacity() {
return processCapacity;
}
public int getRate() {
return rate;
}
public int getPollSleep() {
return pollSleep;
}
public String getHost() {
return host;
}
public String getPort() {
return port;
}
public String getType() {
return type;
}
public static Config getFromENV() {
String host = System.getenv("TAOS_HOST") != null ? System.getenv("TAOS_HOST") : TAOS_HOST;
String port = System.getenv("TAOS_PORT") != null ? System.getenv("TAOS_PORT") : TAOS_PORT;
String type = System.getenv("TAOS_TYPE") != null ? System.getenv("TAOS_TYPE") : TAOS_TYPE;
String c = System.getenv("TAOS_JDBC_CONSUMER_NUM");
int num = c != null ? Integer.parseInt(c) : TAOS_JDBC_CONSUMER_NUM;
String p = System.getenv("TAOS_JDBC_PROCESSOR_NUM");
int capacity = p != null ? Integer.parseInt(p) : TAOS_JDBC_PROCESSOR_NUM;
String r = System.getenv("TAOS_JDBC_RATE_PER_PROCESSOR");
int rate = r != null ? Integer.parseInt(r) : TAOS_JDBC_RATE_PER_PROCESSOR;
String s = System.getenv("TAOS_JDBC_POLL_SLEEP");
int sleep = s != null ? Integer.parseInt(s) : TAOS_JDBC_POLL_SLEEP;
return new Config(type, host, port, num, capacity, rate, sleep);
}
}

View File

@ -0,0 +1,65 @@
package com.taosdata;
import com.taosdata.jdbc.tmq.TMQConstants;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.Properties;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import static com.taosdata.Config.*;
public class ConsumerDemo {
public static void main(String[] args) throws SQLException {
// Config
Config config = Config.getFromENV();
// Generated data
mockData();
Properties prop = new Properties();
prop.setProperty(TMQConstants.CONNECT_TYPE, config.getType());
prop.setProperty(TMQConstants.BOOTSTRAP_SERVERS, config.getHost() + ":" + config.getPort());
prop.setProperty(TMQConstants.CONNECT_USER, "root");
prop.setProperty(TMQConstants.CONNECT_PASS, "taosdata");
prop.setProperty(TMQConstants.MSG_WITH_TABLE_NAME, "true");
prop.setProperty(TMQConstants.ENABLE_AUTO_COMMIT, "true");
prop.setProperty(TMQConstants.GROUP_ID, "gId");
prop.setProperty(TMQConstants.VALUE_DESERIALIZER, "com.taosdata.BeanDeserializer");
for (int i = 0; i < config.getConsumerNum() - 1; i++) {
new Thread(new Worker(prop, config)).start();
}
new Worker(prop, config).run();
}
public static void mockData() throws SQLException {
String dbName = "test_consumer";
String tableName = "st";
String url = "jdbc:TAOS-RS://" + TAOS_HOST + ":" + TAOS_PORT + "/?user=root&password=taosdata&batchfetch=true";
Connection connection = DriverManager.getConnection(url);
Statement statement = connection.createStatement();
statement.executeUpdate("create database if not exists " + dbName + " WAL_RETENTION_PERIOD 3650");
statement.executeUpdate("use " + dbName);
statement.executeUpdate("create table if not exists " + tableName + " (ts timestamp, c1 int, c2 nchar(100)) ");
statement.executeUpdate("create topic if not exists " + TOPIC + " as select ts, c1, c2 from " + tableName);
ScheduledExecutorService scheduledExecutorService = Executors.newSingleThreadScheduledExecutor(r -> {
Thread t = new Thread(r);
t.setName("mock-data-thread-" + t.getId());
return t;
});
AtomicInteger atomic = new AtomicInteger();
scheduledExecutorService.scheduleWithFixedDelay(() -> {
int i = atomic.getAndIncrement();
try {
statement.executeUpdate("insert into " + tableName + " values(now, " + i + ",'" + i + "')");
} catch (SQLException e) {
// ignore
}
}, 0, 10, TimeUnit.MILLISECONDS);
}
}

View File

@ -0,0 +1,60 @@
package com.taosdata;
import com.google.common.util.concurrent.RateLimiter;
import com.taosdata.jdbc.tmq.ConsumerRecord;
import com.taosdata.jdbc.tmq.ConsumerRecords;
import com.taosdata.jdbc.tmq.TaosConsumer;
import java.sql.SQLException;
import java.time.Duration;
import java.time.LocalDateTime;
import java.util.Collections;
import java.util.Properties;
import java.util.concurrent.ForkJoinPool;
import java.util.concurrent.Semaphore;
public class Worker implements Runnable {
int sleepTime;
int rate;
ForkJoinPool pool = new ForkJoinPool();
Semaphore semaphore;
TaosConsumer<Bean> consumer;
public Worker(Properties prop, Config config) throws SQLException {
consumer = new TaosConsumer<>(prop);
consumer.subscribe(Collections.singletonList(Config.TOPIC));
semaphore = new Semaphore(config.getProcessCapacity());
sleepTime = config.getPollSleep();
rate = config.getRate();
}
@Override
public void run() {
while (!Thread.interrupted()) {
try {
// 控制请求频率
if (semaphore.tryAcquire()) {
ConsumerRecords<Bean> records = consumer.poll(Duration.ofMillis(sleepTime));
pool.submit(() -> {
RateLimiter limiter = RateLimiter.create(rate);
try {
for (ConsumerRecord<Bean> record : records) {
// 流量控制
limiter.acquire();
// 业务处理数据
System.out.println("[" + LocalDateTime.now() + "] Thread id:" + Thread.currentThread().getId() + " -> " + record.value());
}
} finally {
semaphore.release();
}
});
}
} catch (SQLException e) {
e.printStackTrace();
}
}
}
}

View File

@ -5,7 +5,7 @@
#spring.datasource.password=taosdata #spring.datasource.password=taosdata
# datasource config - JDBC-RESTful # datasource config - JDBC-RESTful
spring.datasource.driver-class-name=com.taosdata.jdbc.rs.RestfulDriver spring.datasource.driver-class-name=com.taosdata.jdbc.rs.RestfulDriver
spring.datasource.url=jdbc:TAOS-RS://localhost:6041/test?timezone=UTC-8&charset=UTF-8&locale=en_US.UTF-8 spring.datasource.url=jdbc:TAOS-RS://localhost:6041/test
spring.datasource.username=root spring.datasource.username=root
spring.datasource.password=taosdata spring.datasource.password=taosdata
spring.datasource.druid.initial-size=5 spring.datasource.druid.initial-size=5

View File

@ -42,27 +42,27 @@ IF (TD_LINUX)
) )
target_link_libraries(tmq target_link_libraries(tmq
taos_static taos
) )
target_link_libraries(stream_demo target_link_libraries(stream_demo
taos_static taos
) )
target_link_libraries(schemaless target_link_libraries(schemaless
taos_static taos
) )
target_link_libraries(prepare target_link_libraries(prepare
taos_static taos
) )
target_link_libraries(demo target_link_libraries(demo
taos_static taos
) )
target_link_libraries(asyncdemo target_link_libraries(asyncdemo
taos_static taos
) )
SET_TARGET_PROPERTIES(tmq PROPERTIES OUTPUT_NAME tmq) SET_TARGET_PROPERTIES(tmq PROPERTIES OUTPUT_NAME tmq)

View File

@ -236,6 +236,7 @@ typedef struct SColumnInfoData {
}; };
SColumnInfo info; // column info SColumnInfo info; // column info
bool hasNull; // if current column data has null value. bool hasNull; // if current column data has null value.
bool reassigned; // if current column data is reassigned.
} SColumnInfoData; } SColumnInfoData;
typedef struct SQueryTableDataCond { typedef struct SQueryTableDataCond {

View File

@ -178,6 +178,7 @@ int32_t getJsonValueLen(const char* data);
int32_t colDataSetVal(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, bool isNull); int32_t colDataSetVal(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, bool isNull);
int32_t colDataAppend(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, bool isNull); int32_t colDataAppend(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, bool isNull);
int32_t colDataReassignVal(SColumnInfoData* pColumnInfoData, uint32_t dstRowIdx, uint32_t srcRowIdx, const char* pData);
int32_t colDataSetNItems(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, uint32_t numOfRows, bool trimValue); int32_t colDataSetNItems(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, uint32_t numOfRows, bool trimValue);
int32_t colDataMergeCol(SColumnInfoData* pColumnInfoData, int32_t numOfRow1, int32_t* capacity, int32_t colDataMergeCol(SColumnInfoData* pColumnInfoData, int32_t numOfRow1, int32_t* capacity,
const SColumnInfoData* pSource, int32_t numOfRow2); const SColumnInfoData* pSource, int32_t numOfRow2);
@ -244,6 +245,7 @@ int32_t buildSubmitReqFromDataBlock(SSubmitReq2** pReq, const SSDataBlock* pData
tb_uid_t suid); tb_uid_t suid);
char* buildCtbNameByGroupId(const char* stbName, uint64_t groupId); char* buildCtbNameByGroupId(const char* stbName, uint64_t groupId);
int32_t buildCtbNameByGroupIdImpl(const char* stbName, uint64_t groupId, char* pBuf);
void trimDataBlock(SSDataBlock* pBlock, int32_t totalRows, const bool* pBoolList); void trimDataBlock(SSDataBlock* pBlock, int32_t totalRows, const bool* pBoolList);

View File

@ -82,6 +82,7 @@ extern int64_t tsVndCommitMaxIntervalMs;
// mnode // mnode
extern int64_t tsMndSdbWriteDelta; extern int64_t tsMndSdbWriteDelta;
extern int64_t tsMndLogRetention; extern int64_t tsMndLogRetention;
extern bool tsMndSkipGrant;
// monitor // monitor
extern bool tsEnableMonitor; extern bool tsEnableMonitor;

View File

@ -16,13 +16,13 @@
#ifndef TDENGINE_STORAGEAPI_H #ifndef TDENGINE_STORAGEAPI_H
#define TDENGINE_STORAGEAPI_H #define TDENGINE_STORAGEAPI_H
#include "tsimplehash.h"
#include "tscalablebf.h"
#include "taosdef.h"
#include "tmsg.h"
#include "tcommon.h"
#include "index.h"
#include "function.h" #include "function.h"
#include "index.h"
#include "taosdef.h"
#include "tcommon.h"
#include "tmsg.h"
#include "tscalablebf.h"
#include "tsimplehash.h"
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
@ -46,7 +46,7 @@ typedef struct SMetaEntry {
int8_t type; int8_t type;
int8_t flags; // TODO: need refactor? int8_t flags; // TODO: need refactor?
tb_uid_t uid; tb_uid_t uid;
char * name; char* name;
union { union {
struct { struct {
SSchemaWrapper schemaRow; SSchemaWrapper schemaRow;
@ -57,43 +57,45 @@ typedef struct SMetaEntry {
int64_t ctime; int64_t ctime;
int32_t ttlDays; int32_t ttlDays;
int32_t commentLen; int32_t commentLen;
char * comment; char* comment;
tb_uid_t suid; tb_uid_t suid;
uint8_t *pTags; uint8_t* pTags;
} ctbEntry; } ctbEntry;
struct { struct {
int64_t ctime; int64_t ctime;
int32_t ttlDays; int32_t ttlDays;
int32_t commentLen; int32_t commentLen;
char * comment; char* comment;
int32_t ncid; // next column id int32_t ncid; // next column id
SSchemaWrapper schemaRow; SSchemaWrapper schemaRow;
} ntbEntry; } ntbEntry;
struct { struct {
STSma *tsma; STSma* tsma;
} smaEntry; } smaEntry;
}; };
uint8_t *pBuf; uint8_t* pBuf;
} SMetaEntry; } SMetaEntry;
typedef struct SMetaReader { typedef struct SMetaReader {
int32_t flags; int32_t flags;
void * pMeta; void* pMeta;
SDecoder coder; SDecoder coder;
SMetaEntry me; SMetaEntry me;
void * pBuf; void* pBuf;
int32_t szBuf; int32_t szBuf;
struct SStoreMeta* pAPI; struct SStoreMeta* pAPI;
} SMetaReader; } SMetaReader;
typedef struct SMTbCursor { typedef struct SMTbCursor {
void * pDbc; void* pMeta;
void * pKey; void* pDbc;
void * pVal; void* pKey;
void* pVal;
int32_t kLen; int32_t kLen;
int32_t vLen; int32_t vLen;
SMetaReader mr; SMetaReader mr;
int8_t paused;
} SMTbCursor; } SMTbCursor;
typedef struct SRowBuffPos { typedef struct SRowBuffPos {
@ -107,22 +109,22 @@ typedef struct SRowBuffPos {
typedef struct SMetaTableInfo { typedef struct SMetaTableInfo {
int64_t suid; int64_t suid;
int64_t uid; int64_t uid;
SSchemaWrapper *schema; SSchemaWrapper* schema;
char tbName[TSDB_TABLE_NAME_LEN]; char tbName[TSDB_TABLE_NAME_LEN];
} SMetaTableInfo; } SMetaTableInfo;
typedef struct SSnapContext { typedef struct SSnapContext {
SMeta * pMeta; // todo remove it SMeta* pMeta; // todo remove it
int64_t snapVersion; int64_t snapVersion;
void * pCur; void* pCur;
int64_t suid; int64_t suid;
int8_t subType; int8_t subType;
SHashObj * idVersion; SHashObj* idVersion;
SHashObj * suidInfo; SHashObj* suidInfo;
SArray * idList; SArray* idList;
int32_t index; int32_t index;
bool withMeta; bool withMeta;
bool queryMeta; // true-get meta, false-get data bool queryMeta; // true-get meta, false-get data
} SSnapContext; } SSnapContext;
typedef struct { typedef struct {
@ -139,10 +141,9 @@ typedef struct {
// int32_t tqReaderSeek(STqReader *pReader, int64_t ver, const char *id); // int32_t tqReaderSeek(STqReader *pReader, int64_t ver, const char *id);
// bool tqNextBlockInWal(STqReader* pReader, const char* idstr); // bool tqNextBlockInWal(STqReader* pReader, const char* idstr);
// bool tqNextBlockImpl(STqReader *pReader, const char* idstr); // bool tqNextBlockImpl(STqReader *pReader, const char* idstr);
// int32_t getTableInfoFromSnapshot(SSnapContext *ctx, void **pBuf, int32_t *contLen, int16_t *type, int64_t *uid); // int32_t getTableInfoFromSnapshot(SSnapContext *ctx, void **pBuf, int32_t *contLen, int16_t *type, int64_t
// SMetaTableInfo getMetaTableInfoFromSnapshot(SSnapContext *ctx); // *uid); SMetaTableInfo getMetaTableInfoFromSnapshot(SSnapContext *ctx); int32_t setForSnapShot(SSnapContext
// int32_t setForSnapShot(SSnapContext *ctx, int64_t uid); // *ctx, int64_t uid); int32_t destroySnapContext(SSnapContext *ctx);
// int32_t destroySnapContext(SSnapContext *ctx);
// clang-format off // clang-format off
/*-------------------------------------------------new api format---------------------------------------------------*/ /*-------------------------------------------------new api format---------------------------------------------------*/
@ -219,53 +220,64 @@ typedef struct SStoreTqReader {
bool (*tqReaderIsQueriedTable)(); bool (*tqReaderIsQueriedTable)();
bool (*tqReaderCurrentBlockConsumed)(); bool (*tqReaderCurrentBlockConsumed)();
struct SWalReader *(*tqReaderGetWalReader)(); // todo remove it struct SWalReader* (*tqReaderGetWalReader)(); // todo remove it
int32_t (*tqReaderRetrieveTaosXBlock)(); // todo remove it int32_t (*tqReaderRetrieveTaosXBlock)(); // todo remove it
int32_t (*tqReaderSetSubmitMsg)(); // todo remove it int32_t (*tqReaderSetSubmitMsg)(); // todo remove it
bool (*tqReaderNextBlockFilterOut)(); bool (*tqReaderNextBlockFilterOut)();
} SStoreTqReader; } SStoreTqReader;
typedef struct SStoreSnapshotFn { typedef struct SStoreSnapshotFn {
int32_t (*createSnapshot)(SSnapContext *ctx, int64_t uid); int32_t (*createSnapshot)(SSnapContext* ctx, int64_t uid);
int32_t (*destroySnapshot)(SSnapContext *ctx); int32_t (*destroySnapshot)(SSnapContext* ctx);
SMetaTableInfo (*getMetaTableInfoFromSnapshot)(SSnapContext* ctx); SMetaTableInfo (*getMetaTableInfoFromSnapshot)(SSnapContext* ctx);
int32_t (*getTableInfoFromSnapshot)(SSnapContext* ctx, void** pBuf, int32_t* contLen, int16_t* type, int64_t* uid); int32_t (*getTableInfoFromSnapshot)(SSnapContext* ctx, void** pBuf, int32_t* contLen, int16_t* type, int64_t* uid);
} SStoreSnapshotFn; } SStoreSnapshotFn;
typedef struct SStoreMeta { typedef struct SStoreMeta {
SMTbCursor *(*openTableMetaCursor)(void *pVnode); // metaOpenTbCursor SMTbCursor* (*openTableMetaCursor)(void* pVnode); // metaOpenTbCursor
void (*closeTableMetaCursor)(SMTbCursor *pTbCur); // metaCloseTbCursor void (*closeTableMetaCursor)(SMTbCursor* pTbCur); // metaCloseTbCursor
int32_t (*cursorNext)(SMTbCursor *pTbCur, ETableType jumpTableType); // metaTbCursorNext void (*pauseTableMetaCursor)(SMTbCursor* pTbCur); // metaPauseTbCursor
int32_t (*cursorPrev)(SMTbCursor *pTbCur, ETableType jumpTableType); // metaTbCursorPrev void (*resumeTableMetaCursor)(SMTbCursor* pTbCur, int8_t first); // metaResumeTbCursor
int32_t (*cursorNext)(SMTbCursor* pTbCur, ETableType jumpTableType); // metaTbCursorNext
int32_t (*cursorPrev)(SMTbCursor* pTbCur, ETableType jumpTableType); // metaTbCursorPrev
int32_t (*getTableTags)(void *pVnode, uint64_t suid, SArray *uidList); int32_t (*getTableTags)(void* pVnode, uint64_t suid, SArray* uidList);
int32_t (*getTableTagsByUid)(void *pVnode, int64_t suid, SArray *uidList); int32_t (*getTableTagsByUid)(void* pVnode, int64_t suid, SArray* uidList);
const void *(*extractTagVal)(const void *tag, int16_t type, STagVal *tagVal); // todo remove it const void* (*extractTagVal)(const void* tag, int16_t type, STagVal* tagVal); // todo remove it
int32_t (*getTableUidByName)(void *pVnode, char *tbName, uint64_t *uid); int32_t (*getTableUidByName)(void* pVnode, char* tbName, uint64_t* uid);
int32_t (*getTableTypeByName)(void *pVnode, char *tbName, ETableType *tbType); int32_t (*getTableTypeByName)(void* pVnode, char* tbName, ETableType* tbType);
int32_t (*getTableNameByUid)(void *pVnode, uint64_t uid, char *tbName); int32_t (*getTableNameByUid)(void* pVnode, uint64_t uid, char* tbName);
bool (*isTableExisted)(void *pVnode, tb_uid_t uid); bool (*isTableExisted)(void* pVnode, tb_uid_t uid);
int32_t (*metaGetCachedTbGroup)(void* pVnode, tb_uid_t suid, const uint8_t* pKey, int32_t keyLen, SArray** pList); int32_t (*metaGetCachedTbGroup)(void* pVnode, tb_uid_t suid, const uint8_t* pKey, int32_t keyLen, SArray** pList);
int32_t (*metaPutTbGroupToCache)(void* pVnode, uint64_t suid, const void* pKey, int32_t keyLen, void* pPayload, int32_t payloadLen); int32_t (*metaPutTbGroupToCache)(void* pVnode, uint64_t suid, const void* pKey, int32_t keyLen, void* pPayload,
int32_t payloadLen);
int32_t (*getCachedTableList)(void* pVnode, tb_uid_t suid, const uint8_t* pKey, int32_t keyLen, SArray* pList1, bool* acquireRes); int32_t (*getCachedTableList)(void* pVnode, tb_uid_t suid, const uint8_t* pKey, int32_t keyLen, SArray* pList1,
int32_t (*putCachedTableList)(void* pVnode, uint64_t suid, const void* pKey, int32_t keyLen, void* pPayload, int32_t payloadLen, double selectivityRatio); bool* acquireRes);
int32_t (*putCachedTableList)(void* pVnode, uint64_t suid, const void* pKey, int32_t keyLen, void* pPayload,
int32_t payloadLen, double selectivityRatio);
void *(*storeGetIndexInfo)(); void* (*storeGetIndexInfo)();
void *(*getInvertIndex)(void* pVnode); void* (*getInvertIndex)(void* pVnode);
int32_t (*getChildTableList)(void *pVnode, int64_t suid, SArray *list); // support filter and non-filter cases. [vnodeGetCtbIdList & vnodeGetCtbIdListByFilter] int32_t (*getChildTableList)(
int32_t (*storeGetTableList)(void* pVnode, int8_t type, SArray* pList); // vnodeGetStbIdList & vnodeGetAllTableList void* pVnode, int64_t suid,
void *storeGetVersionRange; SArray* list); // support filter and non-filter cases. [vnodeGetCtbIdList & vnodeGetCtbIdListByFilter]
void *storeGetLastTimestamp; int32_t (*storeGetTableList)(void* pVnode, int8_t type, SArray* pList); // vnodeGetStbIdList & vnodeGetAllTableList
void* storeGetVersionRange;
void* storeGetLastTimestamp;
int32_t (*getTableSchema)(void *pVnode, int64_t uid, STSchema **pSchema, int64_t *suid); // tsdbGetTableSchema int32_t (*getTableSchema)(void* pVnode, int64_t uid, STSchema** pSchema, int64_t* suid); // tsdbGetTableSchema
// db name, vgId, numOfTables, numOfSTables // db name, vgId, numOfTables, numOfSTables
int32_t (*getNumOfChildTables)(void* pVnode, int64_t uid, int64_t* numOfTables); // int32_t metaGetStbStats(SMeta *pMeta, int64_t uid, SMetaStbStats *pInfo); int32_t (*getNumOfChildTables)(
void (*getBasicInfo)(void *pVnode, const char **dbname, int32_t *vgId, int64_t* numOfTables, int64_t* numOfNormalTables);// vnodeGetInfo(void *pVnode, const char **dbname, int32_t *vgId) & metaGetTbNum(SMeta *pMeta) & metaGetNtbNum(SMeta *pMeta); void* pVnode, int64_t uid,
int64_t* numOfTables); // int32_t metaGetStbStats(SMeta *pMeta, int64_t uid, SMetaStbStats *pInfo);
void (*getBasicInfo)(void* pVnode, const char** dbname, int32_t* vgId, int64_t* numOfTables,
int64_t* numOfNormalTables); // vnodeGetInfo(void *pVnode, const char **dbname, int32_t *vgId) &
// metaGetTbNum(SMeta *pMeta) & metaGetNtbNum(SMeta *pMeta);
int64_t (*getNumOfRowsInMem)(void* pVnode); int64_t (*getNumOfRowsInMem)(void* pVnode);
/** /**
@ -276,24 +288,24 @@ int32_t vnodeGetStbIdList(void *pVnode, int64_t suid, SArray *list);
} SStoreMeta; } SStoreMeta;
typedef struct SStoreMetaReader { typedef struct SStoreMetaReader {
void (*initReader)(SMetaReader *pReader, void *pVnode, int32_t flags, SStoreMeta* pAPI); void (*initReader)(SMetaReader* pReader, void* pVnode, int32_t flags, SStoreMeta* pAPI);
void (*clearReader)(SMetaReader *pReader); void (*clearReader)(SMetaReader* pReader);
void (*readerReleaseLock)(SMetaReader *pReader); void (*readerReleaseLock)(SMetaReader* pReader);
int32_t (*getTableEntryByUid)(SMetaReader *pReader, tb_uid_t uid); int32_t (*getTableEntryByUid)(SMetaReader* pReader, tb_uid_t uid);
int32_t (*getTableEntryByName)(SMetaReader *pReader, const char *name); int32_t (*getTableEntryByName)(SMetaReader* pReader, const char* name);
int32_t (*getEntryGetUidCache)(SMetaReader *pReader, tb_uid_t uid); int32_t (*getEntryGetUidCache)(SMetaReader* pReader, tb_uid_t uid);
} SStoreMetaReader; } SStoreMetaReader;
typedef struct SUpdateInfo { typedef struct SUpdateInfo {
SArray *pTsBuckets; SArray* pTsBuckets;
uint64_t numBuckets; uint64_t numBuckets;
SArray *pTsSBFs; SArray* pTsSBFs;
uint64_t numSBFs; uint64_t numSBFs;
int64_t interval; int64_t interval;
int64_t watermark; int64_t watermark;
TSKEY minTS; TSKEY minTS;
SScalableBf *pCloseWinSBF; SScalableBf* pCloseWinSBF;
SHashObj *pMap; SHashObj* pMap;
uint64_t maxDataVersion; uint64_t maxDataVersion;
} SUpdateInfo; } SUpdateInfo;
@ -312,15 +324,15 @@ typedef struct SStateStore {
int32_t (*streamStateAddIfNotExist)(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen); int32_t (*streamStateAddIfNotExist)(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen);
int32_t (*streamStateReleaseBuf)(SStreamState* pState, const SWinKey* key, void* pVal); int32_t (*streamStateReleaseBuf)(SStreamState* pState, const SWinKey* key, void* pVal);
void (*streamStateFreeVal)(void* val); void (*streamStateFreeVal)(void* val);
int32_t (*streamStatePut)(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen); int32_t (*streamStatePut)(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen);
int32_t (*streamStateGet)(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen); int32_t (*streamStateGet)(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen);
bool (*streamStateCheck)(SStreamState* pState, const SWinKey* key); bool (*streamStateCheck)(SStreamState* pState, const SWinKey* key);
int32_t (*streamStateGetByPos)(SStreamState* pState, void* pos, void** pVal); int32_t (*streamStateGetByPos)(SStreamState* pState, void* pos, void** pVal);
int32_t (*streamStateDel)(SStreamState* pState, const SWinKey* key); int32_t (*streamStateDel)(SStreamState* pState, const SWinKey* key);
int32_t (*streamStateClear)(SStreamState* pState); int32_t (*streamStateClear)(SStreamState* pState);
void (*streamStateSetNumber)(SStreamState* pState, int32_t number); void (*streamStateSetNumber)(SStreamState* pState, int32_t number);
int32_t (*streamStateSaveInfo)(SStreamState* pState, void* pKey, int32_t keyLen, void* pVal, int32_t vLen); int32_t (*streamStateSaveInfo)(SStreamState* pState, void* pKey, int32_t keyLen, void* pVal, int32_t vLen);
int32_t (*streamStateGetInfo)(SStreamState* pState, void* pKey, int32_t keyLen, void** pVal, int32_t* pLen); int32_t (*streamStateGetInfo)(SStreamState* pState, void* pKey, int32_t keyLen, void** pVal, int32_t* pLen);
@ -331,36 +343,37 @@ typedef struct SStateStore {
int32_t (*streamStateCurNext)(SStreamState* pState, SStreamStateCur* pCur); int32_t (*streamStateCurNext)(SStreamState* pState, SStreamStateCur* pCur);
int32_t (*streamStateCurPrev)(SStreamState* pState, SStreamStateCur* pCur); int32_t (*streamStateCurPrev)(SStreamState* pState, SStreamStateCur* pCur);
SStreamStateCur* (*streamStateGetAndCheckCur)(SStreamState* pState, SWinKey* key); SStreamStateCur* (*streamStateGetAndCheckCur)(SStreamState* pState, SWinKey* key);
SStreamStateCur* (*streamStateSeekKeyNext)(SStreamState* pState, const SWinKey* key); SStreamStateCur* (*streamStateSeekKeyNext)(SStreamState* pState, const SWinKey* key);
SStreamStateCur* (*streamStateFillSeekKeyNext)(SStreamState* pState, const SWinKey* key); SStreamStateCur* (*streamStateFillSeekKeyNext)(SStreamState* pState, const SWinKey* key);
SStreamStateCur* (*streamStateFillSeekKeyPrev)(SStreamState* pState, const SWinKey* key); SStreamStateCur* (*streamStateFillSeekKeyPrev)(SStreamState* pState, const SWinKey* key);
void (*streamStateFreeCur)(SStreamStateCur* pCur); void (*streamStateFreeCur)(SStreamStateCur* pCur);
int32_t (*streamStateGetGroupKVByCur)(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen); int32_t (*streamStateGetGroupKVByCur)(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen);
int32_t (*streamStateGetKVByCur)(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen); int32_t (*streamStateGetKVByCur)(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen);
int32_t (*streamStateSessionAddIfNotExist)(SStreamState* pState, SSessionKey* key, TSKEY gap, void** pVal, int32_t* pVLen); int32_t (*streamStateSessionAddIfNotExist)(SStreamState* pState, SSessionKey* key, TSKEY gap, void** pVal,
int32_t* pVLen);
int32_t (*streamStateSessionPut)(SStreamState* pState, const SSessionKey* key, const void* value, int32_t vLen); int32_t (*streamStateSessionPut)(SStreamState* pState, const SSessionKey* key, const void* value, int32_t vLen);
int32_t (*streamStateSessionGet)(SStreamState* pState, SSessionKey* key, void** pVal, int32_t* pVLen); int32_t (*streamStateSessionGet)(SStreamState* pState, SSessionKey* key, void** pVal, int32_t* pVLen);
int32_t (*streamStateSessionDel)(SStreamState* pState, const SSessionKey* key); int32_t (*streamStateSessionDel)(SStreamState* pState, const SSessionKey* key);
int32_t (*streamStateSessionClear)(SStreamState* pState); int32_t (*streamStateSessionClear)(SStreamState* pState);
int32_t (*streamStateSessionGetKVByCur)(SStreamStateCur* pCur, SSessionKey* pKey, void** pVal, int32_t* pVLen); int32_t (*streamStateSessionGetKVByCur)(SStreamStateCur* pCur, SSessionKey* pKey, void** pVal, int32_t* pVLen);
int32_t (*streamStateStateAddIfNotExist)(SStreamState* pState, SSessionKey* key, char* pKeyData, int32_t keyDataLen, int32_t (*streamStateStateAddIfNotExist)(SStreamState* pState, SSessionKey* key, char* pKeyData, int32_t keyDataLen,
state_key_cmpr_fn fn, void** pVal, int32_t* pVLen); state_key_cmpr_fn fn, void** pVal, int32_t* pVLen);
int32_t (*streamStateSessionGetKeyByRange)(SStreamState* pState, const SSessionKey* range, SSessionKey* curKey); int32_t (*streamStateSessionGetKeyByRange)(SStreamState* pState, const SSessionKey* range, SSessionKey* curKey);
SUpdateInfo* (*updateInfoInit)(int64_t interval, int32_t precision, int64_t watermark); SUpdateInfo* (*updateInfoInit)(int64_t interval, int32_t precision, int64_t watermark);
TSKEY (*updateInfoFillBlockData)(SUpdateInfo *pInfo, SSDataBlock *pBlock, int32_t primaryTsCol); TSKEY (*updateInfoFillBlockData)(SUpdateInfo* pInfo, SSDataBlock* pBlock, int32_t primaryTsCol);
bool (*updateInfoIsUpdated)(SUpdateInfo *pInfo, uint64_t tableId, TSKEY ts); bool (*updateInfoIsUpdated)(SUpdateInfo* pInfo, uint64_t tableId, TSKEY ts);
bool (*updateInfoIsTableInserted)(SUpdateInfo *pInfo, int64_t tbUid); bool (*updateInfoIsTableInserted)(SUpdateInfo* pInfo, int64_t tbUid);
void (*updateInfoDestroy)(SUpdateInfo *pInfo); void (*updateInfoDestroy)(SUpdateInfo* pInfo);
SUpdateInfo* (*updateInfoInitP)(SInterval *pInterval, int64_t watermark); SUpdateInfo* (*updateInfoInitP)(SInterval* pInterval, int64_t watermark);
void (*updateInfoAddCloseWindowSBF)(SUpdateInfo *pInfo); void (*updateInfoAddCloseWindowSBF)(SUpdateInfo* pInfo);
void (*updateInfoDestoryColseWinSBF)(SUpdateInfo *pInfo); void (*updateInfoDestoryColseWinSBF)(SUpdateInfo* pInfo);
int32_t (*updateInfoSerialize)(void *buf, int32_t bufLen, const SUpdateInfo *pInfo); int32_t (*updateInfoSerialize)(void* buf, int32_t bufLen, const SUpdateInfo* pInfo);
int32_t (*updateInfoDeserialize)(void *buf, int32_t bufLen, SUpdateInfo *pInfo); int32_t (*updateInfoDeserialize)(void* buf, int32_t bufLen, SUpdateInfo* pInfo);
SStreamStateCur* (*streamStateSessionSeekKeyNext)(SStreamState* pState, const SSessionKey* key); SStreamStateCur* (*streamStateSessionSeekKeyNext)(SStreamState* pState, const SSessionKey* key);
SStreamStateCur* (*streamStateSessionSeekKeyCurrentPrev)(SStreamState* pState, const SSessionKey* key); SStreamStateCur* (*streamStateSessionSeekKeyCurrentPrev)(SStreamState* pState, const SSessionKey* key);
@ -374,11 +387,11 @@ typedef struct SStateStore {
bool (*needClearDiskBuff)(struct SStreamFileState* pFileState); bool (*needClearDiskBuff)(struct SStreamFileState* pFileState);
SStreamState* (*streamStateOpen)(char* path, void* pTask, bool specPath, int32_t szPage, int32_t pages); SStreamState* (*streamStateOpen)(char* path, void* pTask, bool specPath, int32_t szPage, int32_t pages);
void (*streamStateClose)(SStreamState* pState, bool remove); void (*streamStateClose)(SStreamState* pState, bool remove);
int32_t (*streamStateBegin)(SStreamState* pState); int32_t (*streamStateBegin)(SStreamState* pState);
int32_t (*streamStateCommit)(SStreamState* pState); int32_t (*streamStateCommit)(SStreamState* pState);
void (*streamStateDestroy)(SStreamState* pState, bool remove); void (*streamStateDestroy)(SStreamState* pState, bool remove);
int32_t (*streamStateDeleteCheckPoint)(SStreamState* pState, TSKEY mark); int32_t (*streamStateDeleteCheckPoint)(SStreamState* pState, TSKEY mark);
} SStateStore; } SStateStore;
typedef struct SStorageAPI { typedef struct SStorageAPI {

View File

@ -163,6 +163,7 @@ typedef struct {
int64_t checkPointId; int64_t checkPointId;
int32_t taskId; int32_t taskId;
int64_t streamId; int64_t streamId;
int64_t streamBackendRid;
} SStreamState; } SStreamState;
typedef struct SFunctionStateStore { typedef struct SFunctionStateStore {

View File

@ -328,6 +328,8 @@ void nodesListInsertList(SNodeList* pTarget, SListCell* pPos, SNodeList* p
SNode* nodesListGetNode(SNodeList* pList, int32_t index); SNode* nodesListGetNode(SNodeList* pList, int32_t index);
SListCell* nodesListGetCell(SNodeList* pList, int32_t index); SListCell* nodesListGetCell(SNodeList* pList, int32_t index);
void nodesDestroyList(SNodeList* pList); void nodesDestroyList(SNodeList* pList);
bool nodesListMatch(const SNodeList* pList, const SNodeList* pSubList);
// Only clear the linked list structure, without releasing the elements inside // Only clear the linked list structure, without releasing the elements inside
void nodesClearList(SNodeList* pList); void nodesClearList(SNodeList* pList);
@ -346,6 +348,7 @@ void nodesRewriteExprPostOrder(SNode** pNode, FNodeRewriter rewriter, void* pCon
void nodesRewriteExprsPostOrder(SNodeList* pList, FNodeRewriter rewriter, void* pContext); void nodesRewriteExprsPostOrder(SNodeList* pList, FNodeRewriter rewriter, void* pContext);
bool nodesEqualNode(const SNode* a, const SNode* b); bool nodesEqualNode(const SNode* a, const SNode* b);
bool nodesMatchNode(const SNode* pSub, const SNode* pNode);
SNode* nodesCloneNode(const SNode* pNode); SNode* nodesCloneNode(const SNode* pNode);
SNodeList* nodesCloneList(const SNodeList* pList); SNodeList* nodesCloneList(const SNodeList* pList);

View File

@ -241,6 +241,12 @@ typedef enum EFillMode {
FILL_MODE_NEXT FILL_MODE_NEXT
} EFillMode; } EFillMode;
typedef enum ETimeLineMode {
TIME_LINE_NONE = 1,
TIME_LINE_MULTI,
TIME_LINE_GLOBAL,
} ETimeLineMode;
typedef struct SFillNode { typedef struct SFillNode {
ENodeType type; // QUERY_NODE_FILL ENodeType type; // QUERY_NODE_FILL
EFillMode mode; EFillMode mode;
@ -263,50 +269,50 @@ typedef struct SCaseWhenNode {
} SCaseWhenNode; } SCaseWhenNode;
typedef struct SSelectStmt { typedef struct SSelectStmt {
ENodeType type; // QUERY_NODE_SELECT_STMT ENodeType type; // QUERY_NODE_SELECT_STMT
bool isDistinct; bool isDistinct;
SNodeList* pProjectionList; SNodeList* pProjectionList;
SNode* pFromTable; SNode* pFromTable;
SNode* pWhere; SNode* pWhere;
SNodeList* pPartitionByList; SNodeList* pPartitionByList;
SNodeList* pTags; // for create stream SNodeList* pTags; // for create stream
SNode* pSubtable; // for create stream SNode* pSubtable; // for create stream
SNode* pWindow; SNode* pWindow;
SNodeList* pGroupByList; // SGroupingSetNode SNodeList* pGroupByList; // SGroupingSetNode
SNode* pHaving; SNode* pHaving;
SNode* pRange; SNode* pRange;
SNode* pEvery; SNode* pEvery;
SNode* pFill; SNode* pFill;
SNodeList* pOrderByList; // SOrderByExprNode SNodeList* pOrderByList; // SOrderByExprNode
SLimitNode* pLimit; SLimitNode* pLimit;
SLimitNode* pSlimit; SLimitNode* pSlimit;
STimeWindow timeRange; STimeWindow timeRange;
char stmtName[TSDB_TABLE_NAME_LEN]; char stmtName[TSDB_TABLE_NAME_LEN];
uint8_t precision; uint8_t precision;
int32_t selectFuncNum; int32_t selectFuncNum;
int32_t returnRows; // EFuncReturnRows int32_t returnRows; // EFuncReturnRows
bool isEmptyResult; ETimeLineMode timeLineResMode;
bool isTimeLineResult; bool isEmptyResult;
bool isSubquery; bool isSubquery;
bool hasAggFuncs; bool hasAggFuncs;
bool hasRepeatScanFuncs; bool hasRepeatScanFuncs;
bool hasIndefiniteRowsFunc; bool hasIndefiniteRowsFunc;
bool hasMultiRowsFunc; bool hasMultiRowsFunc;
bool hasSelectFunc; bool hasSelectFunc;
bool hasSelectValFunc; bool hasSelectValFunc;
bool hasOtherVectorFunc; bool hasOtherVectorFunc;
bool hasUniqueFunc; bool hasUniqueFunc;
bool hasTailFunc; bool hasTailFunc;
bool hasInterpFunc; bool hasInterpFunc;
bool hasInterpPseudoColFunc; bool hasInterpPseudoColFunc;
bool hasLastRowFunc; bool hasLastRowFunc;
bool hasLastFunc; bool hasLastFunc;
bool hasTimeLineFunc; bool hasTimeLineFunc;
bool hasUdaf; bool hasUdaf;
bool hasStateKey; bool hasStateKey;
bool onlyHasKeepOrderFunc; bool onlyHasKeepOrderFunc;
bool groupSort; bool groupSort;
bool tagScan; bool tagScan;
} SSelectStmt; } SSelectStmt;
typedef enum ESetOperatorType { SET_OP_TYPE_UNION_ALL = 1, SET_OP_TYPE_UNION } ESetOperatorType; typedef enum ESetOperatorType { SET_OP_TYPE_UNION_ALL = 1, SET_OP_TYPE_UNION } ESetOperatorType;
@ -321,6 +327,7 @@ typedef struct SSetOperator {
SNode* pLimit; SNode* pLimit;
char stmtName[TSDB_TABLE_NAME_LEN]; char stmtName[TSDB_TABLE_NAME_LEN];
uint8_t precision; uint8_t precision;
ETimeLineMode timeLineResMode;
} SSetOperator; } SSetOperator;
typedef enum ESqlClause { typedef enum ESqlClause {

View File

@ -343,6 +343,7 @@ struct SStreamTask {
int64_t checkpointingId; int64_t checkpointingId;
int32_t checkpointAlignCnt; int32_t checkpointAlignCnt;
struct SStreamMeta* pMeta; struct SStreamMeta* pMeta;
SSHashObj* pNameMap;
}; };
// meta // meta
@ -360,7 +361,6 @@ typedef struct SStreamMeta {
SRWLatch lock; SRWLatch lock;
int32_t walScanCounter; int32_t walScanCounter;
void* streamBackend; void* streamBackend;
int32_t streamBackendId;
int64_t streamBackendRid; int64_t streamBackendRid;
SHashObj* pTaskBackendUnique; SHashObj* pTaskBackendUnique;
} SStreamMeta; } SStreamMeta;

View File

@ -22,21 +22,20 @@ extern "C" {
// If the error is in a third-party library, place this header file under the third-party library header file. // If the error is in a third-party library, place this header file under the third-party library header file.
// When you want to use this feature, you should find or add the same function in the following sectio // When you want to use this feature, you should find or add the same function in the following sectio
// #if !defined(WINDOWS) #if !defined(WINDOWS)
// #ifndef ALLOW_FORBID_FUNC #ifndef ALLOW_FORBID_FUNC
// #define malloc MALLOC_FUNC_TAOS_FORBID #define malloc MALLOC_FUNC_TAOS_FORBID
// #define calloc CALLOC_FUNC_TAOS_FORBID #define calloc CALLOC_FUNC_TAOS_FORBID
// #define realloc REALLOC_FUNC_TAOS_FORBID #define realloc REALLOC_FUNC_TAOS_FORBID
// #define free FREE_FUNC_TAOS_FORBID #define free FREE_FUNC_TAOS_FORBID
// #ifdef strdup #ifdef strdup
// #undef strdup #undef strdup
// #define strdup STRDUP_FUNC_TAOS_FORBID #define strdup STRDUP_FUNC_TAOS_FORBID
// #endif #endif
// #endif // ifndef ALLOW_FORBID_FUNC #endif // ifndef ALLOW_FORBID_FUNC
// #endif // if !defined(WINDOWS) #endif // if !defined(WINDOWS)
// // #define taosMemoryFree malloc
// #define taosMemoryMalloc malloc // #define taosMemoryMalloc malloc
// #define taosMemoryCalloc calloc // #define taosMemoryCalloc calloc
// #define taosMemoryRealloc realloc // #define taosMemoryRealloc realloc

View File

@ -32,7 +32,7 @@ extern "C" {
#define TD_VER_MAX UINT64_MAX // TODO: use the real max version from query handle #define TD_VER_MAX UINT64_MAX // TODO: use the real max version from query handle
// Bytes for each type. // Bytes for each type.
extern const int32_t TYPE_BYTES[17]; extern const int32_t TYPE_BYTES[21];
// TODO: replace and remove code below // TODO: replace and remove code below
#define CHAR_BYTES sizeof(char) #define CHAR_BYTES sizeof(char)

View File

@ -80,5 +80,4 @@ fi
# there can not libtaos.so*, otherwise ln -s error # there can not libtaos.so*, otherwise ln -s error
${csudo}rm -f ${install_main_dir}/driver/libtaos.* || : ${csudo}rm -f ${install_main_dir}/driver/libtaos.* || :
[ -f ${install_main_dir}/driver/librocksdb.* ] && ${csudo}rm -f ${install_main_dir}/driver/librocksdb.* || :
[ -f ${install_main_dir}/driver/libtaosws.so ] && ${csudo}rm -f ${install_main_dir}/driver/libtaosws.so || : [ -f ${install_main_dir}/driver/libtaosws.so ] && ${csudo}rm -f ${install_main_dir}/driver/libtaosws.so || :

View File

@ -40,7 +40,6 @@ else
${csudo}rm -f ${inc_link_dir}/taosudf.h || : ${csudo}rm -f ${inc_link_dir}/taosudf.h || :
[ -f ${inc_link_dir}/taosws.h ] && ${csudo}rm -f ${inc_link_dir}/taosws.h || : [ -f ${inc_link_dir}/taosws.h ] && ${csudo}rm -f ${inc_link_dir}/taosws.h || :
${csudo}rm -f ${lib_link_dir}/libtaos.* || : ${csudo}rm -f ${lib_link_dir}/libtaos.* || :
[ -f ${lib_link_dir}/librocksdb.* ] && ${csudo}rm -f ${lib_link_dir}/librocksdb.* || :
[ -f ${lib_link_dir}/libtaosws.so ] && ${csudo}rm -f ${lib_link_dir}/libtaosws.so || : [ -f ${lib_link_dir}/libtaosws.so ] && ${csudo}rm -f ${lib_link_dir}/libtaosws.so || :
${csudo}rm -f ${log_link_dir} || : ${csudo}rm -f ${log_link_dir} || :

View File

@ -31,7 +31,6 @@ cd ${pkg_dir}
libfile="libtaos.so.${tdengine_ver}" libfile="libtaos.so.${tdengine_ver}"
wslibfile="libtaosws.so" wslibfile="libtaosws.so"
rocksdblib="librocksdb.so.8"
# create install dir # create install dir
install_home_path="/usr/local/taos" install_home_path="/usr/local/taos"
@ -95,7 +94,6 @@ fi
cp ${compile_dir}/build/bin/taos ${pkg_dir}${install_home_path}/bin cp ${compile_dir}/build/bin/taos ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/build/lib/${libfile} ${pkg_dir}${install_home_path}/driver cp ${compile_dir}/build/lib/${libfile} ${pkg_dir}${install_home_path}/driver
[ -f ${compile_dir}/build/lib/${rocksdblib} ] && cp ${compile_dir}/build/lib/${rocksdblib} ${pkg_dir}${install_home_path}/driver ||:
[ -f ${compile_dir}/build/lib/${wslibfile} ] && cp ${compile_dir}/build/lib/${wslibfile} ${pkg_dir}${install_home_path}/driver ||: [ -f ${compile_dir}/build/lib/${wslibfile} ] && cp ${compile_dir}/build/lib/${wslibfile} ${pkg_dir}${install_home_path}/driver ||:
cp ${compile_dir}/../include/client/taos.h ${pkg_dir}${install_home_path}/include cp ${compile_dir}/../include/client/taos.h ${pkg_dir}${install_home_path}/include
cp ${compile_dir}/../include/common/taosdef.h ${pkg_dir}${install_home_path}/include cp ${compile_dir}/../include/common/taosdef.h ${pkg_dir}${install_home_path}/include

View File

@ -45,7 +45,6 @@ echo buildroot: %{buildroot}
libfile="libtaos.so.%{_version}" libfile="libtaos.so.%{_version}"
wslibfile="libtaosws.so" wslibfile="libtaosws.so"
rocksdblib="librocksdb.so.8"
# create install path, and cp file # create install path, and cp file
mkdir -p %{buildroot}%{homepath}/bin mkdir -p %{buildroot}%{homepath}/bin
@ -93,7 +92,6 @@ if [ -f %{_compiledir}/build/bin/taosadapter ]; then
fi fi
cp %{_compiledir}/build/lib/${libfile} %{buildroot}%{homepath}/driver cp %{_compiledir}/build/lib/${libfile} %{buildroot}%{homepath}/driver
[ -f %{_compiledir}/build/lib/${wslibfile} ] && cp %{_compiledir}/build/lib/${wslibfile} %{buildroot}%{homepath}/driver ||: [ -f %{_compiledir}/build/lib/${wslibfile} ] && cp %{_compiledir}/build/lib/${wslibfile} %{buildroot}%{homepath}/driver ||:
[ -f %{_compiledir}/build/lib/${rocksdblib} ] && cp %{_compiledir}/build/lib/${rocksdblib} %{buildroot}%{homepath}/driver ||:
cp %{_compiledir}/../include/client/taos.h %{buildroot}%{homepath}/include cp %{_compiledir}/../include/client/taos.h %{buildroot}%{homepath}/include
cp %{_compiledir}/../include/common/taosdef.h %{buildroot}%{homepath}/include cp %{_compiledir}/../include/common/taosdef.h %{buildroot}%{homepath}/include
cp %{_compiledir}/../include/util/taoserror.h %{buildroot}%{homepath}/include cp %{_compiledir}/../include/util/taoserror.h %{buildroot}%{homepath}/include
@ -176,7 +174,6 @@ fi
# there can not libtaos.so*, otherwise ln -s error # there can not libtaos.so*, otherwise ln -s error
${csudo}rm -f %{homepath}/driver/libtaos* || : ${csudo}rm -f %{homepath}/driver/libtaos* || :
${csudo}rm -f %{homepath}/driver/librocksdb* || :
#Scripts executed after installation #Scripts executed after installation
%post %post
@ -222,7 +219,6 @@ if [ $1 -eq 0 ];then
${csudo}rm -f ${inc_link_dir}/taoserror.h || : ${csudo}rm -f ${inc_link_dir}/taoserror.h || :
${csudo}rm -f ${inc_link_dir}/taosudf.h || : ${csudo}rm -f ${inc_link_dir}/taosudf.h || :
${csudo}rm -f ${lib_link_dir}/libtaos.* || : ${csudo}rm -f ${lib_link_dir}/libtaos.* || :
${csudo}rm -f ${lib_link_dir}/librocksdb.* || :
${csudo}rm -f ${log_link_dir} || : ${csudo}rm -f ${log_link_dir} || :
${csudo}rm -f ${data_link_dir} || : ${csudo}rm -f ${data_link_dir} || :

View File

@ -250,30 +250,18 @@ function install_lib() {
# Remove links # Remove links
${csudo}rm -f ${lib_link_dir}/libtaos.* || : ${csudo}rm -f ${lib_link_dir}/libtaos.* || :
${csudo}rm -f ${lib64_link_dir}/libtaos.* || : ${csudo}rm -f ${lib64_link_dir}/libtaos.* || :
${csudo}rm -f ${lib_link_dir}/librocksdb.* || :
${csudo}rm -f ${lib64_link_dir}/librocksdb.* || :
#${csudo}rm -rf ${v15_java_app_dir} || : #${csudo}rm -rf ${v15_java_app_dir} || :
${csudo}cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo}chmod 777 ${install_main_dir}/driver/* ${csudo}cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo}chmod 777 ${install_main_dir}/driver/*
${csudo}ln -sf ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1 ${csudo}ln -sf ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1
${csudo}ln -sf ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so ${csudo}ln -sf ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so
${csudo}ln -sf ${install_main_dir}/driver/librocksdb.* ${lib_link_dir}/librocksdb.so.8
${csudo}ln -sf ${lib_link_dir}/librocksdb.so.8 ${lib_link_dir}/librocksdb.so
${csudo}ln -sf ${install_main_dir}/driver/librocksdb.* ${lib_link_dir}/librocksdb.so.8
${csudo}ln -sf ${lib_link_dir}/librocksdb.so.8 ${lib_link_dir}/librocksdb.so
[ -f ${install_main_dir}/driver/libtaosws.so ] && ${csudo}ln -sf ${install_main_dir}/driver/libtaosws.so ${lib_link_dir}/libtaosws.so || : [ -f ${install_main_dir}/driver/libtaosws.so ] && ${csudo}ln -sf ${install_main_dir}/driver/libtaosws.so ${lib_link_dir}/libtaosws.so || :
if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.so ]]; then if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.so ]]; then
${csudo}ln -sf ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || : ${csudo}ln -sf ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || :
${csudo}ln -sf ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || : ${csudo}ln -sf ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || :
${csudo}ln -sf ${install_main_dir}/driver/librocksdb.* ${lib64_link_dir}/librocksdb.so.8 || :
${csudo}ln -sf ${lib64_link_dir}/librocksdb.so.8 ${lib64_link_dir}/librocksdb.so || :
[ -f ${install_main_dir}/libtaosws.so ] && ${csudo}ln -sf ${install_main_dir}/libtaosws.so ${lib64_link_dir}/libtaosws.so || : [ -f ${install_main_dir}/libtaosws.so ] && ${csudo}ln -sf ${install_main_dir}/libtaosws.so ${lib64_link_dir}/libtaosws.so || :
fi fi

View File

@ -111,11 +111,9 @@ fi
if [ "$osType" == "Darwin" ]; then if [ "$osType" == "Darwin" ]; then
lib_files="${build_dir}/lib/libtaos.${version}.dylib" lib_files="${build_dir}/lib/libtaos.${version}.dylib"
wslib_files="${build_dir}/lib/libtaosws.dylib" wslib_files="${build_dir}/lib/libtaosws.dylib"
rocksdb_lib_files="${build_dir}/lib/librocksdb.dylib.8.1.1"
else else
lib_files="${build_dir}/lib/libtaos.so.${version}" lib_files="${build_dir}/lib/libtaos.so.${version}"
wslib_files="${build_dir}/lib/libtaosws.so" wslib_files="${build_dir}/lib/libtaosws.so"
rocksdb_lib_files="${build_dir}/lib/librocksdb.so.8.1.1"
fi fi
header_files="${code_dir}/include/client/taos.h ${code_dir}/include/common/taosdef.h ${code_dir}/include/util/taoserror.h ${code_dir}/include/libs/function/taosudf.h" header_files="${code_dir}/include/client/taos.h ${code_dir}/include/common/taosdef.h ${code_dir}/include/util/taoserror.h ${code_dir}/include/libs/function/taosudf.h"
@ -338,7 +336,6 @@ fi
# Copy driver # Copy driver
mkdir -p ${install_dir}/driver && cp ${lib_files} ${install_dir}/driver && echo "${versionComp}" >${install_dir}/driver/vercomp.txt mkdir -p ${install_dir}/driver && cp ${lib_files} ${install_dir}/driver && echo "${versionComp}" >${install_dir}/driver/vercomp.txt
[ -f ${wslib_files} ] && cp ${wslib_files} ${install_dir}/driver || : [ -f ${wslib_files} ] && cp ${wslib_files} ${install_dir}/driver || :
[ -f ${rocksdb_lib_files} ] && cp ${rocksdb_lib_files} ${install_dir}/driver || :
# Copy connector # Copy connector
if [ "$verMode" == "cluster" ]; then if [ "$verMode" == "cluster" ]; then

View File

@ -202,19 +202,10 @@ function install_lib() {
log_print "start install lib from ${lib_dir} to ${lib_link_dir}" log_print "start install lib from ${lib_dir} to ${lib_link_dir}"
${csudo}rm -f ${lib_link_dir}/libtaos* || : ${csudo}rm -f ${lib_link_dir}/libtaos* || :
${csudo}rm -f ${lib64_link_dir}/libtaos* || : ${csudo}rm -f ${lib64_link_dir}/libtaos* || :
#rocksdb
[ -f ${lib_link_dir}/librocksdb* ] && ${csudo}rm -f ${lib_link_dir}/librocksdb* || :
[ -f ${lib64_link_dir}/librocksdb* ] && ${csudo}rm -f ${lib64_link_dir}/librocksdb* || :
#rocksdb
[ -f ${lib_link_dir}/librocksdb* ] && ${csudo}rm -f ${lib_link_dir}/librocksdb* || :
[ -f ${lib64_link_dir}/librocksdb* ] && ${csudo}rm -f ${lib64_link_dir}/librocksdb* || :
[ -f ${lib_link_dir}/libtaosws.${lib_file_ext} ] && ${csudo}rm -f ${lib_link_dir}/libtaosws.${lib_file_ext} || : [ -f ${lib_link_dir}/libtaosws.${lib_file_ext} ] && ${csudo}rm -f ${lib_link_dir}/libtaosws.${lib_file_ext} || :
[ -f ${lib64_link_dir}/libtaosws.${lib_file_ext} ] && ${csudo}rm -f ${lib64_link_dir}/libtaosws.${lib_file_ext} || : [ -f ${lib64_link_dir}/libtaosws.${lib_file_ext} ] && ${csudo}rm -f ${lib64_link_dir}/libtaosws.${lib_file_ext} || :
${csudo}ln -s ${lib_dir}/librocksdb.* ${lib_link_dir}/librocksdb.${lib_file_ext_1} 2>>${install_log_path} || return 1
${csudo}ln -s ${lib_dir}/libtaos.* ${lib_link_dir}/libtaos.${lib_file_ext_1} 2>>${install_log_path} || return 1 ${csudo}ln -s ${lib_dir}/libtaos.* ${lib_link_dir}/libtaos.${lib_file_ext_1} 2>>${install_log_path} || return 1
${csudo}ln -s ${lib_link_dir}/libtaos.${lib_file_ext_1} ${lib_link_dir}/libtaos.${lib_file_ext} 2>>${install_log_path} || return 1 ${csudo}ln -s ${lib_link_dir}/libtaos.${lib_file_ext_1} ${lib_link_dir}/libtaos.${lib_file_ext} 2>>${install_log_path} || return 1
@ -223,7 +214,6 @@ function install_lib() {
if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.${lib_file_ext} ]]; then if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.${lib_file_ext} ]]; then
${csudo}ln -s ${lib_dir}/libtaos.* ${lib64_link_dir}/libtaos.${lib_file_ext_1} 2>>${install_log_path} || return 1 ${csudo}ln -s ${lib_dir}/libtaos.* ${lib64_link_dir}/libtaos.${lib_file_ext_1} 2>>${install_log_path} || return 1
${csudo}ln -s ${lib64_link_dir}/libtaos.${lib_file_ext_1} ${lib64_link_dir}/libtaos.${lib_file_ext} 2>>${install_log_path} || return 1 ${csudo}ln -s ${lib64_link_dir}/libtaos.${lib_file_ext_1} ${lib64_link_dir}/libtaos.${lib_file_ext} 2>>${install_log_path} || return 1
${csudo}ln -s ${lib_dir}/librocksdb.* ${lib64_link_dir}/librocksdb.${lib_file_ext_1} 2>>${install_log_path} || return 1
[ -f ${lib_dir}/libtaosws.${lib_file_ext} ] && ${csudo}ln -sf ${lib_dir}/libtaosws.${lib_file_ext} ${lib64_link_dir}/libtaosws.${lib_file_ext} 2>>${install_log_path} [ -f ${lib_dir}/libtaosws.${lib_file_ext} ] && ${csudo}ln -sf ${lib_dir}/libtaosws.${lib_file_ext} ${lib64_link_dir}/libtaosws.${lib_file_ext} 2>>${install_log_path}
fi fi

View File

@ -142,11 +142,9 @@ function clean_local_bin() {
function clean_lib() { function clean_lib() {
# Remove link # Remove link
${csudo}rm -f ${lib_link_dir}/libtaos.* || : ${csudo}rm -f ${lib_link_dir}/libtaos.* || :
${csudo}rm -f ${lib_link_dir}/librocksdb.* || :
[ -f ${lib_link_dir}/libtaosws.* ] && ${csudo}rm -f ${lib_link_dir}/libtaosws.* || : [ -f ${lib_link_dir}/libtaosws.* ] && ${csudo}rm -f ${lib_link_dir}/libtaosws.* || :
${csudo}rm -f ${lib64_link_dir}/libtaos.* || : ${csudo}rm -f ${lib64_link_dir}/libtaos.* || :
${csudo}rm -f ${lib64_link_dir}/librocksdb.* || :
[ -f ${lib64_link_dir}/libtaosws.* ] && ${csudo}rm -f ${lib64_link_dir}/libtaosws.* || : [ -f ${lib64_link_dir}/libtaosws.* ] && ${csudo}rm -f ${lib64_link_dir}/libtaosws.* || :
#${csudo}rm -rf ${v15_java_app_dir} || : #${csudo}rm -rf ${v15_java_app_dir} || :

View File

@ -23,6 +23,20 @@
int32_t colDataGetLength(const SColumnInfoData* pColumnInfoData, int32_t numOfRows) { int32_t colDataGetLength(const SColumnInfoData* pColumnInfoData, int32_t numOfRows) {
if (IS_VAR_DATA_TYPE(pColumnInfoData->info.type)) { if (IS_VAR_DATA_TYPE(pColumnInfoData->info.type)) {
if (pColumnInfoData->reassigned) {
int32_t totalSize = 0;
for (int32_t row = 0; row < numOfRows; ++row) {
char* pColData = pColumnInfoData->pData + pColumnInfoData->varmeta.offset[row];
int32_t colSize = 0;
if (pColumnInfoData->info.type == TSDB_DATA_TYPE_JSON) {
colSize = getJsonValueLen(pColData);
} else {
colSize = varDataTLen(pColData);
}
totalSize += colSize;
}
return totalSize;
}
return pColumnInfoData->varmeta.length; return pColumnInfoData->varmeta.length;
} else { } else {
if (pColumnInfoData->info.type == TSDB_DATA_TYPE_NULL) { if (pColumnInfoData->info.type == TSDB_DATA_TYPE_NULL) {
@ -122,6 +136,29 @@ int32_t colDataSetVal(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const
return 0; return 0;
} }
int32_t colDataReassignVal(SColumnInfoData* pColumnInfoData, uint32_t dstRowIdx, uint32_t srcRowIdx, const char* pData) {
int32_t type = pColumnInfoData->info.type;
if (IS_VAR_DATA_TYPE(type)) {
int32_t dataLen = 0;
if (type == TSDB_DATA_TYPE_JSON) {
dataLen = getJsonValueLen(pData);
} else {
dataLen = varDataTLen(pData);
}
SVarColAttr* pAttr = &pColumnInfoData->varmeta;
pColumnInfoData->varmeta.offset[dstRowIdx] = pColumnInfoData->varmeta.offset[srcRowIdx];
pColumnInfoData->reassigned = true;
} else {
memcpy(pColumnInfoData->pData + pColumnInfoData->info.bytes * dstRowIdx, pData, pColumnInfoData->info.bytes);
colDataClearNull_f(pColumnInfoData->nullbitmap, dstRowIdx);
}
return 0;
}
static int32_t colDataReserve(SColumnInfoData* pColumnInfoData, size_t newSize) { static int32_t colDataReserve(SColumnInfoData* pColumnInfoData, size_t newSize) {
if (!IS_VAR_DATA_TYPE(pColumnInfoData->info.type)) { if (!IS_VAR_DATA_TYPE(pColumnInfoData->info.type)) {
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
@ -576,8 +613,22 @@ int32_t blockDataToBuf(char* buf, const SSDataBlock* pBlock) {
*(int32_t*)pStart = dataSize; *(int32_t*)pStart = dataSize;
pStart += sizeof(int32_t); pStart += sizeof(int32_t);
memcpy(pStart, pCol->pData, dataSize); if (pCol->reassigned && IS_VAR_DATA_TYPE(pCol->info.type)) {
pStart += dataSize; for (int32_t row = 0; row < numOfRows; ++row) {
char* pColData = pCol->pData + pCol->varmeta.offset[row];
int32_t colSize = 0;
if (pCol->info.type == TSDB_DATA_TYPE_JSON) {
colSize = getJsonValueLen(pColData);
} else {
colSize = varDataTLen(pColData);
}
memcpy(pStart, pColData, colSize);
pStart += colSize;
}
} else {
memcpy(pStart, pCol->pData, dataSize);
pStart += dataSize;
}
} }
return 0; return 0;
@ -1587,7 +1638,20 @@ int32_t tEncodeDataBlock(void** buf, const SSDataBlock* pBlock) {
int32_t len = colDataGetLength(pColData, rows); int32_t len = colDataGetLength(pColData, rows);
tlen += taosEncodeFixedI32(buf, len); tlen += taosEncodeFixedI32(buf, len);
tlen += taosEncodeBinary(buf, pColData->pData, len); if (pColData->reassigned && IS_VAR_DATA_TYPE(pColData->info.type)) {
for (int32_t row = 0; row < rows; ++row) {
char* pData = pColData->pData + pColData->varmeta.offset[row];
int32_t colSize = 0;
if (pColData->info.type == TSDB_DATA_TYPE_JSON) {
colSize = getJsonValueLen(pData);
} else {
colSize = varDataTLen(pData);
}
tlen += taosEncodeBinary(buf, pData, colSize);
}
} else {
tlen += taosEncodeBinary(buf, pColData->pData, len);
}
} }
return tlen; return tlen;
} }
@ -1950,19 +2014,31 @@ _end:
} }
char* buildCtbNameByGroupId(const char* stbFullName, uint64_t groupId) { char* buildCtbNameByGroupId(const char* stbFullName, uint64_t groupId) {
if (stbFullName[0] == 0) { char* pBuf = taosMemoryCalloc(1, TSDB_TABLE_NAME_LEN + 1);
if (!pBuf) {
return NULL; return NULL;
} }
int32_t code = buildCtbNameByGroupIdImpl(stbFullName, groupId, pBuf);
if (code != TSDB_CODE_SUCCESS) {
taosMemoryFree(pBuf);
return NULL;
}
return pBuf;
}
int32_t buildCtbNameByGroupIdImpl(const char* stbFullName, uint64_t groupId, char* cname) {
if (stbFullName[0] == 0) {
return TSDB_CODE_FAILED;
}
SArray* tags = taosArrayInit(0, sizeof(SSmlKv)); SArray* tags = taosArrayInit(0, sizeof(SSmlKv));
if (tags == NULL) { if (tags == NULL) {
return NULL; return TSDB_CODE_FAILED;
} }
void* cname = taosMemoryCalloc(1, TSDB_TABLE_NAME_LEN + 1);
if (cname == NULL) { if (cname == NULL) {
taosArrayDestroy(tags); taosArrayDestroy(tags);
return NULL; return TSDB_CODE_FAILED;
} }
SSmlKv pTag = {.key = "group_id", SSmlKv pTag = {.key = "group_id",
@ -1984,9 +2060,9 @@ char* buildCtbNameByGroupId(const char* stbFullName, uint64_t groupId) {
taosArrayDestroy(tags); taosArrayDestroy(tags);
if ((rname.ctbShortName && rname.ctbShortName[0]) == 0) { if ((rname.ctbShortName && rname.ctbShortName[0]) == 0) {
return NULL; return TSDB_CODE_FAILED;
} }
return rname.ctbShortName; return TSDB_CODE_SUCCESS;
} }
int32_t blockEncode(const SSDataBlock* pBlock, char* data, int32_t numOfCols) { int32_t blockEncode(const SSDataBlock* pBlock, char* data, int32_t numOfCols) {
@ -2051,12 +2127,29 @@ int32_t blockEncode(const SSDataBlock* pBlock, char* data, int32_t numOfCols) {
data += metaSize; data += metaSize;
dataLen += metaSize; dataLen += metaSize;
colSizes[col] = colDataGetLength(pColRes, numOfRows); if (pColRes->reassigned && IS_VAR_DATA_TYPE(pColRes->info.type)) {
dataLen += colSizes[col]; colSizes[col] = 0;
if (pColRes->pData != NULL) { for (int32_t row = 0; row < numOfRows; ++row) {
memmove(data, pColRes->pData, colSizes[col]); char* pColData = pColRes->pData + pColRes->varmeta.offset[row];
int32_t colSize = 0;
if (pColRes->info.type == TSDB_DATA_TYPE_JSON) {
colSize = getJsonValueLen(pColData);
} else {
colSize = varDataTLen(pColData);
}
colSizes[col] += colSize;
dataLen += colSize;
memmove(data, pColData, colSize);
data += colSize;
}
} else {
colSizes[col] = colDataGetLength(pColRes, numOfRows);
dataLen += colSizes[col];
if (pColRes->pData != NULL) {
memmove(data, pColRes->pData, colSizes[col]);
}
data += colSizes[col];
} }
data += colSizes[col];
colSizes[col] = htonl(colSizes[col]); colSizes[col] = htonl(colSizes[col]);
// uError("blockEncode col bytes:%d, type:%d, size:%d, htonl size:%d", pColRes->info.bytes, pColRes->info.type, htonl(colSizes[col]), colSizes[col]); // uError("blockEncode col bytes:%d, type:%d, size:%d, htonl size:%d", pColRes->info.bytes, pColRes->info.type, htonl(colSizes[col]), colSizes[col]);

View File

@ -73,6 +73,7 @@ int64_t tsVndCommitMaxIntervalMs = 600 * 1000;
// mnode // mnode
int64_t tsMndSdbWriteDelta = 200; int64_t tsMndSdbWriteDelta = 200;
int64_t tsMndLogRetention = 2000; int64_t tsMndLogRetention = 2000;
bool tsMndSkipGrant = false;
// monitor // monitor
bool tsEnableMonitor = true; bool tsEnableMonitor = true;
@ -491,6 +492,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
if (cfgAddInt64(pCfg, "mndSdbWriteDelta", tsMndSdbWriteDelta, 20, 10000, 0) != 0) return -1; if (cfgAddInt64(pCfg, "mndSdbWriteDelta", tsMndSdbWriteDelta, 20, 10000, 0) != 0) return -1;
if (cfgAddInt64(pCfg, "mndLogRetention", tsMndLogRetention, 500, 10000, 0) != 0) return -1; if (cfgAddInt64(pCfg, "mndLogRetention", tsMndLogRetention, 500, 10000, 0) != 0) return -1;
if (cfgAddBool(pCfg, "skipGrant", tsMndSkipGrant, 0) != 0) return -1;
if (cfgAddBool(pCfg, "monitor", tsEnableMonitor, 0) != 0) return -1; if (cfgAddBool(pCfg, "monitor", tsEnableMonitor, 0) != 0) return -1;
if (cfgAddInt32(pCfg, "monitorInterval", tsMonitorInterval, 1, 200000, 0) != 0) return -1; if (cfgAddInt32(pCfg, "monitorInterval", tsMonitorInterval, 1, 200000, 0) != 0) return -1;
@ -890,6 +892,7 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
tsMndSdbWriteDelta = cfgGetItem(pCfg, "mndSdbWriteDelta")->i64; tsMndSdbWriteDelta = cfgGetItem(pCfg, "mndSdbWriteDelta")->i64;
tsMndLogRetention = cfgGetItem(pCfg, "mndLogRetention")->i64; tsMndLogRetention = cfgGetItem(pCfg, "mndLogRetention")->i64;
tsMndSkipGrant = cfgGetItem(pCfg, "skipGrant")->bval;
tsStartUdfd = cfgGetItem(pCfg, "udf")->bval; tsStartUdfd = cfgGetItem(pCfg, "udf")->bval;
tstrncpy(tsUdfdResFuncs, cfgGetItem(pCfg, "udfdResFuncs")->str, sizeof(tsUdfdResFuncs)); tstrncpy(tsUdfdResFuncs, cfgGetItem(pCfg, "udfdResFuncs")->str, sizeof(tsUdfdResFuncs));

View File

@ -17,7 +17,7 @@
#include "ttypes.h" #include "ttypes.h"
#include "tcompression.h" #include "tcompression.h"
const int32_t TYPE_BYTES[17] = { const int32_t TYPE_BYTES[21] = {
-1, // TSDB_DATA_TYPE_NULL -1, // TSDB_DATA_TYPE_NULL
CHAR_BYTES, // TSDB_DATA_TYPE_BOOL CHAR_BYTES, // TSDB_DATA_TYPE_BOOL
CHAR_BYTES, // TSDB_DATA_TYPE_TINYINT CHAR_BYTES, // TSDB_DATA_TYPE_TINYINT
@ -34,6 +34,10 @@ const int32_t TYPE_BYTES[17] = {
INT_BYTES, // TSDB_DATA_TYPE_UINT INT_BYTES, // TSDB_DATA_TYPE_UINT
sizeof(uint64_t), // TSDB_DATA_TYPE_UBIGINT sizeof(uint64_t), // TSDB_DATA_TYPE_UBIGINT
TSDB_MAX_JSON_TAG_LEN, // TSDB_DATA_TYPE_JSON TSDB_MAX_JSON_TAG_LEN, // TSDB_DATA_TYPE_JSON
TSDB_MAX_TAGS_LEN, // TSDB_DATA_TYPE_VARBINARY: placeholder, not implemented
TSDB_MAX_TAGS_LEN, // TSDB_DATA_TYPE_DECIMAL: placeholder, not implemented
TSDB_MAX_TAGS_LEN, // TSDB_DATA_TYPE_BLOB: placeholder, not implemented
TSDB_MAX_TAGS_LEN, // TSDB_DATA_TYPE_MEDIUMBLOB: placeholder, not implemented
sizeof(VarDataOffsetT), // TSDB_DATA_TYPE_GEOMETRY sizeof(VarDataOffsetT), // TSDB_DATA_TYPE_GEOMETRY
}; };
@ -57,6 +61,10 @@ tDataTypeDescriptor tDataTypes[TSDB_DATA_TYPE_MAX] = {
{TSDB_DATA_TYPE_UINT, 12, INT_BYTES, "INT UNSIGNED", 0, UINT32_MAX, tsCompressInt, tsDecompressInt}, {TSDB_DATA_TYPE_UINT, 12, INT_BYTES, "INT UNSIGNED", 0, UINT32_MAX, tsCompressInt, tsDecompressInt},
{TSDB_DATA_TYPE_UBIGINT, 15, LONG_BYTES, "BIGINT UNSIGNED", 0, UINT64_MAX, tsCompressBigint, tsDecompressBigint}, {TSDB_DATA_TYPE_UBIGINT, 15, LONG_BYTES, "BIGINT UNSIGNED", 0, UINT64_MAX, tsCompressBigint, tsDecompressBigint},
{TSDB_DATA_TYPE_JSON, 4, TSDB_MAX_JSON_TAG_LEN, "JSON", 0, 0, tsCompressString, tsDecompressString}, {TSDB_DATA_TYPE_JSON, 4, TSDB_MAX_JSON_TAG_LEN, "JSON", 0, 0, tsCompressString, tsDecompressString},
{TSDB_DATA_TYPE_VARBINARY, 9, 1, "VARBINARY", 0, 0, NULL, NULL}, // placeholder, not implemented
{TSDB_DATA_TYPE_DECIMAL, 7, 1, "DECIMAL", 0, 0, NULL, NULL}, // placeholder, not implemented
{TSDB_DATA_TYPE_BLOB, 4, 1, "BLOB", 0, 0, NULL, NULL}, // placeholder, not implemented
{TSDB_DATA_TYPE_MEDIUMBLOB, 10, 1, "MEDIUMBLOB", 0, 0, NULL, NULL}, // placeholder, not implemented
{TSDB_DATA_TYPE_GEOMETRY, 8, 1, "GEOMETRY", 0, 0, tsCompressString, tsDecompressString}, {TSDB_DATA_TYPE_GEOMETRY, 8, 1, "GEOMETRY", 0, 0, tsCompressString, tsDecompressString},
}; };

View File

@ -40,6 +40,8 @@ int32_t mndValidateUserPassInfo(SMnode *pMnode, SUserPassVersion *pUsers, int3
int32_t mndUserRemoveDb(SMnode *pMnode, STrans *pTrans, char *db); int32_t mndUserRemoveDb(SMnode *pMnode, STrans *pTrans, char *db);
int32_t mndUserRemoveTopic(SMnode *pMnode, STrans *pTrans, char *topic); int32_t mndUserRemoveTopic(SMnode *pMnode, STrans *pTrans, char *topic);
int32_t mndUserDupObj(SUserObj *pUser, SUserObj *pNew);
void mndUserFreeObj(SUserObj *pUser);
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

View File

@ -446,7 +446,8 @@ static int32_t mndSetCreateDbUndoLogs(SMnode *pMnode, STrans *pTrans, SDbObj *pD
return 0; return 0;
} }
static int32_t mndSetCreateDbCommitLogs(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroups) { static int32_t mndSetCreateDbCommitLogs(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroups,
SUserObj *pUserDuped) {
SSdbRaw *pDbRaw = mndDbActionEncode(pDb); SSdbRaw *pDbRaw = mndDbActionEncode(pDb);
if (pDbRaw == NULL) return -1; if (pDbRaw == NULL) return -1;
if (mndTransAppendCommitlog(pTrans, pDbRaw) != 0) return -1; if (mndTransAppendCommitlog(pTrans, pDbRaw) != 0) return -1;
@ -459,6 +460,13 @@ static int32_t mndSetCreateDbCommitLogs(SMnode *pMnode, STrans *pTrans, SDbObj *
if (sdbSetRawStatus(pVgRaw, SDB_STATUS_READY) != 0) return -1; if (sdbSetRawStatus(pVgRaw, SDB_STATUS_READY) != 0) return -1;
} }
if (pUserDuped) {
SSdbRaw *pUserRaw = mndUserActionEncode(pUserDuped);
if (pUserRaw == NULL) return -1;
if (mndTransAppendCommitlog(pTrans, pUserRaw) != 0) return -1;
if (sdbSetRawStatus(pUserRaw, SDB_STATUS_READY) != 0) return -1;
}
return 0; return 0;
} }
@ -565,6 +573,15 @@ static int32_t mndCreateDb(SMnode *pMnode, SRpcMsg *pReq, SCreateDbReq *pCreate,
return -1; return -1;
} }
// add database privileges for user
SUserObj newUserObj = {0}, *pNewUserDuped = NULL;
if (!pUser->superUser) {
if (mndUserDupObj(pUser, &newUserObj) != 0) goto _OVER;
taosHashPut(newUserObj.readDbs, dbObj.name, strlen(dbObj.name) + 1, dbObj.name, TSDB_FILENAME_LEN);
taosHashPut(newUserObj.writeDbs, dbObj.name, strlen(dbObj.name) + 1, dbObj.name, TSDB_FILENAME_LEN);
pNewUserDuped = &newUserObj;
}
int32_t code = -1; int32_t code = -1;
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_DB, pReq, "create-db"); STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_DB, pReq, "create-db");
if (pTrans == NULL) goto _OVER; if (pTrans == NULL) goto _OVER;
@ -577,7 +594,7 @@ static int32_t mndCreateDb(SMnode *pMnode, SRpcMsg *pReq, SCreateDbReq *pCreate,
mndTransSetOper(pTrans, MND_OPER_CREATE_DB); mndTransSetOper(pTrans, MND_OPER_CREATE_DB);
if (mndSetCreateDbRedoLogs(pMnode, pTrans, &dbObj, pVgroups) != 0) goto _OVER; if (mndSetCreateDbRedoLogs(pMnode, pTrans, &dbObj, pVgroups) != 0) goto _OVER;
if (mndSetCreateDbUndoLogs(pMnode, pTrans, &dbObj, pVgroups) != 0) goto _OVER; if (mndSetCreateDbUndoLogs(pMnode, pTrans, &dbObj, pVgroups) != 0) goto _OVER;
if (mndSetCreateDbCommitLogs(pMnode, pTrans, &dbObj, pVgroups) != 0) goto _OVER; if (mndSetCreateDbCommitLogs(pMnode, pTrans, &dbObj, pVgroups, pNewUserDuped) != 0) goto _OVER;
if (mndSetCreateDbRedoActions(pMnode, pTrans, &dbObj, pVgroups) != 0) goto _OVER; if (mndSetCreateDbRedoActions(pMnode, pTrans, &dbObj, pVgroups) != 0) goto _OVER;
if (mndSetCreateDbUndoActions(pMnode, pTrans, &dbObj, pVgroups) != 0) goto _OVER; if (mndSetCreateDbUndoActions(pMnode, pTrans, &dbObj, pVgroups) != 0) goto _OVER;
if (mndTransPrepare(pMnode, pTrans) != 0) goto _OVER; if (mndTransPrepare(pMnode, pTrans) != 0) goto _OVER;
@ -586,6 +603,7 @@ static int32_t mndCreateDb(SMnode *pMnode, SRpcMsg *pReq, SCreateDbReq *pCreate,
_OVER: _OVER:
taosMemoryFree(pVgroups); taosMemoryFree(pVgroups);
mndUserFreeObj(&newUserObj);
mndTransDrop(pTrans); mndTransDrop(pTrans);
return code; return code;
} }

View File

@ -245,7 +245,7 @@ static int32_t mndProcessConnectReq(SRpcMsg *pReq) {
goto _OVER; goto _OVER;
} }
if (strncmp(connReq.passwd, pUser->pass, TSDB_PASSWORD_LEN - 1) != 0) { if (strncmp(connReq.passwd, pUser->pass, TSDB_PASSWORD_LEN - 1) != 0 && !tsMndSkipGrant) {
mGError("user:%s, failed to login from %s since invalid pass, input:%s", pReq->info.conn.user, ip, connReq.passwd); mGError("user:%s, failed to login from %s since invalid pass, input:%s", pReq->info.conn.user, ip, connReq.passwd);
code = TSDB_CODE_MND_AUTH_FAILURE; code = TSDB_CODE_MND_AUTH_FAILURE;
goto _OVER; goto _OVER;

View File

@ -488,7 +488,7 @@ SHashObj *mndDupUseDbHash(SHashObj *pOld) {
return pNew; return pNew;
} }
static int32_t mndUserDupObj(SUserObj *pUser, SUserObj *pNew) { int32_t mndUserDupObj(SUserObj *pUser, SUserObj *pNew) {
memcpy(pNew, pUser, sizeof(SUserObj)); memcpy(pNew, pUser, sizeof(SUserObj));
pNew->authVersion++; pNew->authVersion++;
pNew->updateTime = taosGetTimestampMs(); pNew->updateTime = taosGetTimestampMs();
@ -508,7 +508,7 @@ static int32_t mndUserDupObj(SUserObj *pUser, SUserObj *pNew) {
return 0; return 0;
} }
static void mndUserFreeObj(SUserObj *pUser) { void mndUserFreeObj(SUserObj *pUser) {
taosHashCleanup(pUser->readDbs); taosHashCleanup(pUser->readDbs);
taosHashCleanup(pUser->writeDbs); taosHashCleanup(pUser->writeDbs);
taosHashCleanup(pUser->topics); taosHashCleanup(pUser->topics);

View File

@ -103,7 +103,7 @@ target_link_libraries(
# PUBLIC bdb # PUBLIC bdb
# PUBLIC scalar # PUBLIC scalar
PUBLIC rocksdb-shared PUBLIC rocksdb
PUBLIC transport PUBLIC transport
PUBLIC stream PUBLIC stream
PUBLIC index PUBLIC index

View File

@ -107,10 +107,12 @@ struct SQueryNode {
typedef SVCreateTbReq STbCfg; typedef SVCreateTbReq STbCfg;
typedef SVCreateTSmaReq SSmaCfg; typedef SVCreateTSmaReq SSmaCfg;
SMTbCursor *metaOpenTbCursor(void *pVnode); SMTbCursor* metaOpenTbCursor(void* pVnode);
void metaCloseTbCursor(SMTbCursor *pTbCur); void metaCloseTbCursor(SMTbCursor* pTbCur);
int32_t metaTbCursorNext(SMTbCursor *pTbCur, ETableType jumpTableType); void metaPauseTbCursor(SMTbCursor* pTbCur);
int32_t metaTbCursorPrev(SMTbCursor *pTbCur, ETableType jumpTableType); void metaResumeTbCursor(SMTbCursor* pTbCur, int8_t first);
int32_t metaTbCursorNext(SMTbCursor* pTbCur, ETableType jumpTableType);
int32_t metaTbCursorPrev(SMTbCursor* pTbCur, ETableType jumpTableType);
#endif #endif
@ -146,6 +148,7 @@ int metaAlterSTable(SMeta* pMeta, int64_t version, SVCreateStbReq* p
int metaDropSTable(SMeta* pMeta, int64_t verison, SVDropStbReq* pReq, SArray* tbUidList); int metaDropSTable(SMeta* pMeta, int64_t verison, SVDropStbReq* pReq, SArray* tbUidList);
int metaCreateTable(SMeta* pMeta, int64_t version, SVCreateTbReq* pReq, STableMetaRsp** pMetaRsp); int metaCreateTable(SMeta* pMeta, int64_t version, SVCreateTbReq* pReq, STableMetaRsp** pMetaRsp);
int metaDropTable(SMeta* pMeta, int64_t version, SVDropTbReq* pReq, SArray* tbUids, int64_t* tbUid); int metaDropTable(SMeta* pMeta, int64_t version, SVDropTbReq* pReq, SArray* tbUids, int64_t* tbUid);
int32_t metaTrimTables(SMeta* pMeta);
int metaTtlDropTable(SMeta* pMeta, int64_t ttl, SArray* tbUids); int metaTtlDropTable(SMeta* pMeta, int64_t ttl, SArray* tbUids);
int metaAlterTable(SMeta* pMeta, int64_t version, SVAlterTbReq* pReq, STableMetaRsp* pMetaRsp); int metaAlterTable(SMeta* pMeta, int64_t version, SVAlterTbReq* pReq, STableMetaRsp* pMetaRsp);
SSchemaWrapper* metaGetTableSchema(SMeta* pMeta, tb_uid_t uid, int32_t sver, int lock); SSchemaWrapper* metaGetTableSchema(SMeta* pMeta, tb_uid_t uid, int32_t sver, int lock);
@ -154,8 +157,8 @@ int32_t metaGetTbTSchemaEx(SMeta* pMeta, tb_uid_t suid, tb_uid_t uid, in
int metaGetTableEntryByName(SMetaReader* pReader, const char* name); int metaGetTableEntryByName(SMetaReader* pReader, const char* name);
int metaAlterCache(SMeta* pMeta, int32_t nPage); int metaAlterCache(SMeta* pMeta, int32_t nPage);
int32_t metaUidCacheClear(SMeta* pMeta, uint64_t suid); int32_t metaUidCacheClear(SMeta* pMeta, uint64_t suid);
int32_t metaTbGroupCacheClear(SMeta *pMeta, uint64_t suid); int32_t metaTbGroupCacheClear(SMeta* pMeta, uint64_t suid);
int metaAddIndexToSTable(SMeta* pMeta, int64_t version, SVCreateStbReq* pReq); int metaAddIndexToSTable(SMeta* pMeta, int64_t version, SVCreateStbReq* pReq);
int metaDropIndexFromSTable(SMeta* pMeta, int64_t version, SDropIndexReq* pReq); int metaDropIndexFromSTable(SMeta* pMeta, int64_t version, SDropIndexReq* pReq);
@ -492,7 +495,6 @@ struct SCompactInfo {
void initStorageAPI(SStorageAPI* pAPI); void initStorageAPI(SStorageAPI* pAPI);
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

View File

@ -71,7 +71,7 @@ _err:
} }
bool metaIsTableExist(void *pVnode, tb_uid_t uid) { bool metaIsTableExist(void *pVnode, tb_uid_t uid) {
SVnode* pVnodeObj = pVnode; SVnode *pVnodeObj = pVnode;
metaRLock(pVnodeObj->pMeta); // query uid.idx metaRLock(pVnodeObj->pMeta); // query uid.idx
if (tdbTbGet(pVnodeObj->pMeta->pUidIdx, &uid, sizeof(uid), NULL, NULL) < 0) { if (tdbTbGet(pVnodeObj->pMeta->pUidIdx, &uid, sizeof(uid), NULL, NULL) < 0) {
@ -143,7 +143,7 @@ tb_uid_t metaGetTableEntryUidByName(SMeta *pMeta, const char *name) {
int metaGetTableNameByUid(void *pVnode, uint64_t uid, char *tbName) { int metaGetTableNameByUid(void *pVnode, uint64_t uid, char *tbName) {
int code = 0; int code = 0;
SMetaReader mr = {0}; SMetaReader mr = {0};
metaReaderDoInit(&mr, ((SVnode*)pVnode)->pMeta, 0); metaReaderInit(&mr, ((SVnode*)pVnode)->pMeta, 0);
code = metaReaderGetTableEntryByUid(&mr, uid); code = metaReaderGetTableEntryByUid(&mr, uid);
if (code < 0) { if (code < 0) {
metaReaderClear(&mr); metaReaderClear(&mr);
@ -179,7 +179,7 @@ int metaGetTableUidByName(void *pVnode, char *tbName, uint64_t *uid) {
SMetaReader *pReader = &mr; SMetaReader *pReader = &mr;
// query name.idx // query name.idx
if (tdbTbGet(((SMeta*)pReader->pMeta)->pNameIdx, tbName, strlen(tbName) + 1, &pReader->pBuf, &pReader->szBuf) < 0) { if (tdbTbGet(((SMeta *)pReader->pMeta)->pNameIdx, tbName, strlen(tbName) + 1, &pReader->pBuf, &pReader->szBuf) < 0) {
terrno = TSDB_CODE_PAR_TABLE_NOT_EXIST; terrno = TSDB_CODE_PAR_TABLE_NOT_EXIST;
metaReaderClear(&mr); metaReaderClear(&mr);
return -1; return -1;
@ -195,7 +195,7 @@ int metaGetTableUidByName(void *pVnode, char *tbName, uint64_t *uid) {
int metaGetTableTypeByName(void *pVnode, char *tbName, ETableType *tbType) { int metaGetTableTypeByName(void *pVnode, char *tbName, ETableType *tbType) {
int code = 0; int code = 0;
SMetaReader mr = {0}; SMetaReader mr = {0};
metaReaderDoInit(&mr, ((SVnode*)pVnode)->pMeta, 0); metaReaderInit(&mr, ((SVnode*)pVnode)->pMeta, 0);
code = metaGetTableEntryByName(&mr, tbName); code = metaGetTableEntryByName(&mr, tbName);
if (code == 0) *tbType = mr.me.type; if (code == 0) *tbType = mr.me.type;
@ -222,11 +222,12 @@ SMTbCursor *metaOpenTbCursor(void *pVnode) {
} }
SVnode* pVnodeObj = pVnode; SVnode* pVnodeObj = pVnode;
metaReaderDoInit(&pTbCur->mr, pVnodeObj->pMeta, 0); metaReaderInit(&pTbCur->mr, pVnodeObj->pMeta, 0);
tdbTbcOpen(pVnodeObj->pMeta->pUidIdx, (TBC **)&pTbCur->pDbc, NULL); // tdbTbcMoveToFirst((TBC *)pTbCur->pDbc);
pTbCur->pMeta = pVnodeObj->pMeta;
tdbTbcMoveToFirst((TBC *)pTbCur->pDbc); pTbCur->paused = 1;
metaResumeTbCursor(pTbCur, 1);
return pTbCur; return pTbCur;
} }
@ -234,14 +235,45 @@ void metaCloseTbCursor(SMTbCursor *pTbCur) {
if (pTbCur) { if (pTbCur) {
tdbFree(pTbCur->pKey); tdbFree(pTbCur->pKey);
tdbFree(pTbCur->pVal); tdbFree(pTbCur->pVal);
metaReaderClear(&pTbCur->mr); if (!pTbCur->paused) {
if (pTbCur->pDbc) { metaReaderClear(&pTbCur->mr);
tdbTbcClose((TBC *)pTbCur->pDbc); if (pTbCur->pDbc) {
tdbTbcClose((TBC *)pTbCur->pDbc);
}
} }
taosMemoryFree(pTbCur); taosMemoryFree(pTbCur);
} }
} }
void metaPauseTbCursor(SMTbCursor *pTbCur) {
if (!pTbCur->paused) {
metaReaderClear(&pTbCur->mr);
tdbTbcClose((TBC *)pTbCur->pDbc);
pTbCur->paused = 1;
}
}
void metaResumeTbCursor(SMTbCursor *pTbCur, int8_t first) {
if (pTbCur->paused) {
metaReaderInit(&pTbCur->mr, pTbCur->pMeta, 0);
tdbTbcOpen(((SMeta *)pTbCur->pMeta)->pUidIdx, (TBC **)&pTbCur->pDbc, NULL);
if (first) {
tdbTbcMoveToFirst((TBC *)pTbCur->pDbc);
} else {
int c = 0;
tdbTbcMoveTo(pTbCur->pDbc, pTbCur->pKey, pTbCur->kLen, &c);
if (c < 0) {
tdbTbcMoveToPrev(pTbCur->pDbc);
} else {
tdbTbcMoveToNext(pTbCur->pDbc);
}
}
pTbCur->paused = 0;
}
}
int32_t metaTbCursorNext(SMTbCursor *pTbCur, ETableType jumpTableType) { int32_t metaTbCursorNext(SMTbCursor *pTbCur, ETableType jumpTableType) {
int ret; int ret;
void *pBuf; void *pBuf;
@ -974,7 +1006,7 @@ typedef struct {
} SIdxCursor; } SIdxCursor;
int32_t metaFilterCreateTime(void *pVnode, SMetaFltParam *arg, SArray *pUids) { int32_t metaFilterCreateTime(void *pVnode, SMetaFltParam *arg, SArray *pUids) {
SMeta *pMeta = ((SVnode*)pVnode)->pMeta; SMeta *pMeta = ((SVnode *)pVnode)->pMeta;
SMetaFltParam *param = arg; SMetaFltParam *param = arg;
int32_t ret = 0; int32_t ret = 0;
@ -1034,7 +1066,7 @@ END:
} }
int32_t metaFilterTableName(void *pVnode, SMetaFltParam *arg, SArray *pUids) { int32_t metaFilterTableName(void *pVnode, SMetaFltParam *arg, SArray *pUids) {
SMeta *pMeta = ((SVnode*)pVnode)->pMeta; SMeta *pMeta = ((SVnode *)pVnode)->pMeta;
SMetaFltParam *param = arg; SMetaFltParam *param = arg;
int32_t ret = 0; int32_t ret = 0;
char *buf = NULL; char *buf = NULL;
@ -1101,7 +1133,7 @@ END:
return ret; return ret;
} }
int32_t metaFilterTtl(void *pVnode, SMetaFltParam *arg, SArray *pUids) { int32_t metaFilterTtl(void *pVnode, SMetaFltParam *arg, SArray *pUids) {
SMeta *pMeta = ((SVnode*)pVnode)->pMeta; SMeta *pMeta = ((SVnode *)pVnode)->pMeta;
SMetaFltParam *param = arg; SMetaFltParam *param = arg;
int32_t ret = 0; int32_t ret = 0;
char *buf = NULL; char *buf = NULL;
@ -1132,7 +1164,7 @@ END:
return 0; return 0;
} }
int32_t metaFilterTableIds(void *pVnode, SMetaFltParam *arg, SArray *pUids) { int32_t metaFilterTableIds(void *pVnode, SMetaFltParam *arg, SArray *pUids) {
SMeta *pMeta = ((SVnode*)pVnode)->pMeta; SMeta *pMeta = ((SVnode *)pVnode)->pMeta;
SMetaFltParam *param = arg; SMetaFltParam *param = arg;
SMetaEntry oStbEntry = {0}; SMetaEntry oStbEntry = {0};
@ -1318,7 +1350,7 @@ static int32_t metaGetTableTagByUid(SMeta *pMeta, int64_t suid, int64_t uid, voi
} }
int32_t metaGetTableTagsByUids(void *pVnode, int64_t suid, SArray *uidList) { int32_t metaGetTableTagsByUids(void *pVnode, int64_t suid, SArray *uidList) {
SMeta* pMeta = ((SVnode*) pVnode)->pMeta; SMeta *pMeta = ((SVnode *)pVnode)->pMeta;
const int32_t LIMIT = 128; const int32_t LIMIT = 128;
int32_t isLock = false; int32_t isLock = false;
@ -1350,8 +1382,8 @@ int32_t metaGetTableTagsByUids(void *pVnode, int64_t suid, SArray *uidList) {
return 0; return 0;
} }
int32_t metaGetTableTags(void* pVnode, uint64_t suid, SArray *pUidTagInfo) { int32_t metaGetTableTags(void *pVnode, uint64_t suid, SArray *pUidTagInfo) {
SMCtbCursor *pCur = metaOpenCtbCursor(((SVnode*)pVnode)->pMeta, suid, 1); SMCtbCursor *pCur = metaOpenCtbCursor(((SVnode *)pVnode)->pMeta, suid, 1);
// If len > 0 means there already have uids, and we only want the // If len > 0 means there already have uids, and we only want the
// tags of the specified tables, of which uid in the uid list. Otherwise, all table tags are retrieved and kept // tags of the specified tables, of which uid in the uid list. Otherwise, all table tags are retrieved and kept
@ -1456,11 +1488,11 @@ _exit:
return code; return code;
} }
int32_t metaGetStbStats(void *pVnode, int64_t uid, int64_t* numOfTables) { int32_t metaGetStbStats(void *pVnode, int64_t uid, int64_t *numOfTables) {
int32_t code = 0; int32_t code = 0;
*numOfTables = 0; *numOfTables = 0;
SVnode* pVnodeObj = pVnode; SVnode *pVnodeObj = pVnode;
metaRLock(pVnodeObj->pMeta); metaRLock(pVnodeObj->pMeta);
// fast path: search cache // fast path: search cache

View File

@ -838,22 +838,96 @@ int metaDropTable(SMeta *pMeta, int64_t version, SVDropTbReq *pReq, SArray *tbUi
return 0; return 0;
} }
static void metaDropTables(SMeta *pMeta, SArray *tbUids) {
metaWLock(pMeta);
for (int i = 0; i < TARRAY_SIZE(tbUids); ++i) {
tb_uid_t uid = *(tb_uid_t *)taosArrayGet(tbUids, i);
metaDropTableByUid(pMeta, uid, NULL);
metaDebug("batch drop table:%" PRId64, uid);
}
metaULock(pMeta);
}
static int32_t metaFilterTableByHash(SMeta *pMeta, SArray *uidList) {
int32_t code = 0;
// 1, tranverse table's
// 2, validate table name using vnodeValidateTableHash
// 3, push invalidated table's uid into uidList
TBC *pCur;
code = tdbTbcOpen(pMeta->pTbDb, &pCur, NULL);
if (code < 0) {
return code;
}
code = tdbTbcMoveToFirst(pCur);
if (code) {
tdbTbcClose(pCur);
return code;
}
void *pData = NULL, *pKey = NULL;
int nData = 0, nKey = 0;
while (1) {
int32_t ret = tdbTbcNext(pCur, &pKey, &nKey, &pData, &nData);
if (ret < 0) {
break;
}
SMetaEntry me = {0};
SDecoder dc = {0};
tDecoderInit(&dc, pData, nData);
metaDecodeEntry(&dc, &me);
if (me.type != TSDB_SUPER_TABLE) {
int32_t ret = vnodeValidateTableHash(pMeta->pVnode, me.name);
if (TSDB_CODE_VND_HASH_MISMATCH == ret) {
taosArrayPush(uidList, &me.uid);
}
}
tDecoderClear(&dc);
}
tdbFree(pData);
tdbFree(pKey);
tdbTbcClose(pCur);
return 0;
}
int32_t metaTrimTables(SMeta *pMeta) {
int32_t code = 0;
SArray *tbUids = taosArrayInit(8, sizeof(int64_t));
if (tbUids == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}
code = metaFilterTableByHash(pMeta, tbUids);
if (code != 0) {
goto end;
}
if (TARRAY_SIZE(tbUids) == 0) {
goto end;
}
metaDropTables(pMeta, tbUids);
end:
taosArrayDestroy(tbUids);
return code;
}
int metaTtlDropTable(SMeta *pMeta, int64_t ttl, SArray *tbUids) { int metaTtlDropTable(SMeta *pMeta, int64_t ttl, SArray *tbUids) {
int ret = metaTtlSmaller(pMeta, ttl, tbUids); int ret = metaTtlSmaller(pMeta, ttl, tbUids);
if (ret != 0) { if (ret != 0) {
return ret; return ret;
} }
if (taosArrayGetSize(tbUids) == 0) { if (TARRAY_SIZE(tbUids) == 0) {
return 0; return 0;
} }
metaWLock(pMeta); metaDropTables(pMeta, tbUids);
for (int i = 0; i < taosArrayGetSize(tbUids); ++i) {
tb_uid_t *uid = (tb_uid_t *)taosArrayGet(tbUids, i);
metaDropTableByUid(pMeta, *uid, NULL);
metaDebug("ttl drop table:%" PRId64, *uid);
}
metaULock(pMeta);
return 0; return 0;
} }
@ -999,7 +1073,7 @@ static int metaDropTableByUid(SMeta *pMeta, tb_uid_t uid, int *type) {
metaUpdateStbStats(pMeta, e.ctbEntry.suid, -1); metaUpdateStbStats(pMeta, e.ctbEntry.suid, -1);
metaUidCacheClear(pMeta, e.ctbEntry.suid); metaUidCacheClear(pMeta, e.ctbEntry.suid);
metaTbGroupCacheClear(pMeta, e.ctbEntry.suid); metaTbGroupCacheClear(pMeta, e.ctbEntry.suid);
} else if (e.type == TSDB_NORMAL_TABLE) { } else if (e.type == TSDB_NORMAL_TABLE) {
// drop schema.db (todo) // drop schema.db (todo)
@ -1011,7 +1085,7 @@ static int metaDropTableByUid(SMeta *pMeta, tb_uid_t uid, int *type) {
metaStatsCacheDrop(pMeta, uid); metaStatsCacheDrop(pMeta, uid);
metaUidCacheClear(pMeta, uid); metaUidCacheClear(pMeta, uid);
metaTbGroupCacheClear(pMeta, uid); metaTbGroupCacheClear(pMeta, uid);
--pMeta->pVnode->config.vndStats.numOfSTables; --pMeta->pVnode->config.vndStats.numOfSTables;
} }
@ -1432,7 +1506,7 @@ static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pA
((STag *)(ctbEntry.ctbEntry.pTags))->len, pMeta->txn); ((STag *)(ctbEntry.ctbEntry.pTags))->len, pMeta->txn);
metaUidCacheClear(pMeta, ctbEntry.ctbEntry.suid); metaUidCacheClear(pMeta, ctbEntry.ctbEntry.suid);
metaTbGroupCacheClear(pMeta, ctbEntry.ctbEntry.suid); metaTbGroupCacheClear(pMeta, ctbEntry.ctbEntry.suid);
metaULock(pMeta); metaULock(pMeta);

View File

@ -298,10 +298,8 @@ void tqSinkToTablePipeline(SStreamTask* pTask, void* vnode, int64_t ver, void* d
if (res == TSDB_CODE_SUCCESS) { if (res == TSDB_CODE_SUCCESS) {
memcpy(ctbName, pTableSinkInfo->tbName, strlen(pTableSinkInfo->tbName)); memcpy(ctbName, pTableSinkInfo->tbName, strlen(pTableSinkInfo->tbName));
} else { } else {
char* tmp = buildCtbNameByGroupId(stbFullName, pDataBlock->info.id.groupId); buildCtbNameByGroupIdImpl(stbFullName, pDataBlock->info.id.groupId, ctbName);
memcpy(ctbName, tmp, strlen(tmp)); memcpy(pTableSinkInfo->tbName, ctbName, strlen(ctbName));
memcpy(pTableSinkInfo->tbName, tmp, strlen(tmp));
taosMemoryFree(tmp);
tqDebug("vgId:%d, gropuId:%" PRIu64 " datablock table name is null", TD_VID(pVnode), tqDebug("vgId:%d, gropuId:%" PRIu64 " datablock table name is null", TD_VID(pVnode),
pDataBlock->info.id.groupId); pDataBlock->info.id.groupId);
} }

View File

@ -13,10 +13,10 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/ */
#include "storageapi.h"
#include "vnodeInt.h"
#include "tstreamUpdate.h"
#include "meta.h" #include "meta.h"
#include "storageapi.h"
#include "tstreamUpdate.h"
#include "vnodeInt.h"
static void initTsdbReaderAPI(TsdReader* pReader); static void initTsdbReaderAPI(TsdReader* pReader);
static void initMetadataAPI(SStoreMeta* pMeta); static void initMetadataAPI(SStoreMeta* pMeta);
@ -56,10 +56,10 @@ void initTsdbReaderAPI(TsdReader* pReader) {
pReader->tsdReaderResetStatus = tsdbReaderReset; pReader->tsdReaderResetStatus = tsdbReaderReset;
pReader->tsdReaderGetDataBlockDistInfo = tsdbGetFileBlocksDistInfo; pReader->tsdReaderGetDataBlockDistInfo = tsdbGetFileBlocksDistInfo;
pReader->tsdReaderGetNumOfInMemRows = tsdbGetNumOfRowsInMemTable; // todo this function should be moved away pReader->tsdReaderGetNumOfInMemRows = tsdbGetNumOfRowsInMemTable; // todo this function should be moved away
pReader->tsdSetQueryTableList = tsdbSetTableList; pReader->tsdSetQueryTableList = tsdbSetTableList;
pReader->tsdSetReaderTaskId = (void (*)(void *, const char *))tsdbReaderSetId; pReader->tsdSetReaderTaskId = (void (*)(void*, const char*))tsdbReaderSetId;
} }
void initMetadataAPI(SStoreMeta* pMeta) { void initMetadataAPI(SStoreMeta* pMeta) {
@ -67,6 +67,8 @@ void initMetadataAPI(SStoreMeta* pMeta) {
pMeta->openTableMetaCursor = metaOpenTbCursor; pMeta->openTableMetaCursor = metaOpenTbCursor;
pMeta->closeTableMetaCursor = metaCloseTbCursor; pMeta->closeTableMetaCursor = metaCloseTbCursor;
pMeta->pauseTableMetaCursor = metaPauseTbCursor;
pMeta->resumeTableMetaCursor = metaResumeTbCursor;
pMeta->cursorNext = metaTbCursorNext; pMeta->cursorNext = metaTbCursorNext;
pMeta->cursorPrev = metaTbCursorPrev; pMeta->cursorPrev = metaTbCursorPrev;
@ -78,7 +80,7 @@ void initMetadataAPI(SStoreMeta* pMeta) {
pMeta->storeGetIndexInfo = vnodeGetIdx; pMeta->storeGetIndexInfo = vnodeGetIdx;
pMeta->getInvertIndex = vnodeGetIvtIdx; pMeta->getInvertIndex = vnodeGetIvtIdx;
pMeta->extractTagVal = (const void *(*)(const void *, int16_t, STagVal *))metaGetTableTagVal; pMeta->extractTagVal = (const void* (*)(const void*, int16_t, STagVal*))metaGetTableTagVal;
pMeta->getTableTags = metaGetTableTags; pMeta->getTableTags = metaGetTableTags;
pMeta->getTableTagsByUid = metaGetTableTagsByUids; pMeta->getTableTagsByUid = metaGetTableTagsByUids;
@ -86,7 +88,7 @@ void initMetadataAPI(SStoreMeta* pMeta) {
pMeta->getTableTypeByName = metaGetTableTypeByName; pMeta->getTableTypeByName = metaGetTableTypeByName;
pMeta->getTableNameByUid = metaGetTableNameByUid; pMeta->getTableNameByUid = metaGetTableNameByUid;
pMeta->getTableSchema = tsdbGetTableSchema; // todo refactor pMeta->getTableSchema = tsdbGetTableSchema; // todo refactor
pMeta->storeGetTableList = vnodeGetTableList; pMeta->storeGetTableList = vnodeGetTableList;
pMeta->getCachedTableList = metaGetCachedTableUidList; pMeta->getCachedTableList = metaGetCachedTableUidList;
@ -106,7 +108,7 @@ void initTqAPI(SStoreTqReader* pTq) {
pTq->tqReaderNextBlockInWal = tqNextBlockInWal; pTq->tqReaderNextBlockInWal = tqNextBlockInWal;
pTq->tqNextBlockImpl = tqNextBlockImpl;// todo remove it pTq->tqNextBlockImpl = tqNextBlockImpl; // todo remove it
pTq->tqReaderAddTables = tqReaderAddTbUidList; pTq->tqReaderAddTables = tqReaderAddTbUidList;
pTq->tqReaderSetQueryTableList = tqReaderSetTbUidList; pTq->tqReaderSetQueryTableList = tqReaderSetTbUidList;
@ -116,10 +118,10 @@ void initTqAPI(SStoreTqReader* pTq) {
pTq->tqReaderIsQueriedTable = tqReaderIsQueriedTable; pTq->tqReaderIsQueriedTable = tqReaderIsQueriedTable;
pTq->tqReaderCurrentBlockConsumed = tqCurrentBlockConsumed; pTq->tqReaderCurrentBlockConsumed = tqCurrentBlockConsumed;
pTq->tqReaderGetWalReader = tqGetWalReader; // todo remove it pTq->tqReaderGetWalReader = tqGetWalReader; // todo remove it
pTq->tqReaderRetrieveTaosXBlock = tqRetrieveTaosxBlock; // todo remove it pTq->tqReaderRetrieveTaosXBlock = tqRetrieveTaosxBlock; // todo remove it
pTq->tqReaderSetSubmitMsg = tqReaderSetSubmitMsg; // todo remove it pTq->tqReaderSetSubmitMsg = tqReaderSetSubmitMsg; // todo remove it
pTq->tqGetResultBlock = tqGetResultBlock; pTq->tqGetResultBlock = tqGetResultBlock;
pTq->tqReaderNextBlockFilterOut = tqNextDataBlockFilterOut; pTq->tqReaderNextBlockFilterOut = tqNextDataBlockFilterOut;
@ -199,7 +201,7 @@ void initStateStoreAPI(SStateStore* pStore) {
pStore->streamStateClose = streamStateClose; pStore->streamStateClose = streamStateClose;
pStore->streamStateBegin = streamStateBegin; pStore->streamStateBegin = streamStateBegin;
pStore->streamStateCommit = streamStateCommit; pStore->streamStateCommit = streamStateCommit;
pStore->streamStateDestroy= streamStateDestroy; pStore->streamStateDestroy = streamStateDestroy;
pStore->streamStateDeleteCheckPoint = streamStateDeleteCheckPoint; pStore->streamStateDeleteCheckPoint = streamStateDeleteCheckPoint;
} }
@ -239,4 +241,4 @@ void initSnapshotFn(SStoreSnapshotFn* pSnapshot) {
pSnapshot->destroySnapshot = destroySnapContext; pSnapshot->destroySnapshot = destroySnapContext;
pSnapshot->getMetaTableInfoFromSnapshot = getMetaTableInfoFromSnapshot; pSnapshot->getMetaTableInfoFromSnapshot = getMetaTableInfoFromSnapshot;
pSnapshot->getTableInfoFromSnapshot = getTableInfoFromSnapshot; pSnapshot->getTableInfoFromSnapshot = getTableInfoFromSnapshot;
} }

View File

@ -245,11 +245,11 @@ _exit:
static int32_t vnodePreProcessDeleteMsg(SVnode *pVnode, SRpcMsg *pMsg) { static int32_t vnodePreProcessDeleteMsg(SVnode *pVnode, SRpcMsg *pMsg) {
int32_t code = 0; int32_t code = 0;
int32_t size; int32_t size;
int32_t ret; int32_t ret;
uint8_t *pCont; uint8_t *pCont;
SEncoder *pCoder = &(SEncoder){0}; SEncoder *pCoder = &(SEncoder){0};
SDeleteRes res = {0}; SDeleteRes res = {0};
SReadHandle handle = {.config = &pVnode->config, .vnode = pVnode, .pMsgCb = &pVnode->msgCb}; SReadHandle handle = {.config = &pVnode->config, .vnode = pVnode, .pMsgCb = &pVnode->msgCb};
initStorageAPI(&handle.api); initStorageAPI(&handle.api);
@ -316,8 +316,7 @@ int32_t vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, int64_t ver, SRpcMsg
return -1; return -1;
} }
vDebug("vgId:%d, start to process write request %s, index:%" PRId64, TD_VID(pVnode), TMSG_INFO(pMsg->msgType), vDebug("vgId:%d, start to process write request %s, index:%" PRId64, TD_VID(pVnode), TMSG_INFO(pMsg->msgType), ver);
ver);
ASSERT(pVnode->state.applyTerm <= pMsg->info.conn.applyTerm); ASSERT(pVnode->state.applyTerm <= pMsg->info.conn.applyTerm);
ASSERT(pVnode->state.applied + 1 == ver); ASSERT(pVnode->state.applied + 1 == ver);
@ -1495,6 +1494,7 @@ static int32_t vnodeConsolidateAlterHashRange(SVnode *pVnode, int64_t ver) {
pVnode->config.hashBegin, pVnode->config.hashEnd, ver); pVnode->config.hashBegin, pVnode->config.hashEnd, ver);
// TODO: trim meta of tables from TDB per hash range [pVnode->config.hashBegin, pVnode->config.hashEnd] // TODO: trim meta of tables from TDB per hash range [pVnode->config.hashBegin, pVnode->config.hashEnd]
code = metaTrimTables(pVnode->pMeta);
return code; return code;
} }
@ -1508,8 +1508,7 @@ static int32_t vnodeProcessAlterConfirmReq(SVnode *pVnode, int64_t ver, void *pR
code = vnodeConsolidateAlterHashRange(pVnode, ver); code = vnodeConsolidateAlterHashRange(pVnode, ver);
if (code < 0) { if (code < 0) {
vError("vgId:%d, failed to consolidate alter hashrange since %s. version:%" PRId64, TD_VID(pVnode), terrstr(), vError("vgId:%d, failed to consolidate alter hashrange since %s. version:%" PRId64, TD_VID(pVnode), terrstr(), ver);
ver);
goto _exit; goto _exit;
} }
pVnode->config.hashChange = false; pVnode->config.hashChange = false;

View File

@ -1555,10 +1555,13 @@ int32_t ctgChkSetAuthRes(SCatalog* pCtg, SCtgAuthReq* req, SCtgAuthRsp* res) {
char dbFName[TSDB_DB_FNAME_LEN]; char dbFName[TSDB_DB_FNAME_LEN];
tNameGetFullDbName(&pReq->tbName, dbFName); tNameGetFullDbName(&pReq->tbName, dbFName);
// since that we add read/write previliges when create db, there is no need to check createdDbs
#if 0
if (pInfo->createdDbs && taosHashGet(pInfo->createdDbs, dbFName, strlen(dbFName))) { if (pInfo->createdDbs && taosHashGet(pInfo->createdDbs, dbFName, strlen(dbFName))) {
pRes->pass = true; pRes->pass = true;
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
#endif
switch (pReq->type) { switch (pReq->type) {
case AUTH_TYPE_READ: { case AUTH_TYPE_READ: {

View File

@ -457,6 +457,7 @@ typedef struct SStreamIntervalOperatorInfo {
int64_t dataVersion; int64_t dataVersion;
SStateStore statestore; SStateStore statestore;
bool recvGetAll; bool recvGetAll;
SHashObj* pFinalPullDataMap;
} SStreamIntervalOperatorInfo; } SStreamIntervalOperatorInfo;
typedef struct SDataGroupInfo { typedef struct SDataGroupInfo {

View File

@ -647,6 +647,8 @@ uint64_t calcGroupId(char* pData, int32_t len) {
// NOTE: only extract the initial 8 bytes of the final MD5 digest // NOTE: only extract the initial 8 bytes of the final MD5 digest
uint64_t id = 0; uint64_t id = 0;
memcpy(&id, context.digest, sizeof(uint64_t)); memcpy(&id, context.digest, sizeof(uint64_t));
if (0 == id)
memcpy(&id, context.digest + 8, sizeof(uint64_t));
return id; return id;
} }

View File

@ -319,6 +319,11 @@ void destroyMergeJoinOperator(void* param) {
} }
nodesDestroyNode(pJoinOperator->pCondAfterMerge); nodesDestroyNode(pJoinOperator->pCondAfterMerge);
taosArrayDestroy(pJoinOperator->rowCtx.leftCreatedBlocks);
taosArrayDestroy(pJoinOperator->rowCtx.rightCreatedBlocks);
taosArrayDestroy(pJoinOperator->rowCtx.leftRowLocations);
taosArrayDestroy(pJoinOperator->rowCtx.rightRowLocations);
pJoinOperator->pRes = blockDataDestroy(pJoinOperator->pRes); pJoinOperator->pRes = blockDataDestroy(pJoinOperator->pRes);
taosMemoryFreeClear(param); taosMemoryFreeClear(param);
} }

View File

@ -213,6 +213,8 @@ static int32_t doIngroupLimitOffset(SLimitInfo* pLimitInfo, uint64_t groupId, SS
} else { } else {
if (limitReached && (pLimitInfo->slimit.limit >= 0 && pLimitInfo->slimit.limit <= pLimitInfo->numOfOutputGroups)) { if (limitReached && (pLimitInfo->slimit.limit >= 0 && pLimitInfo->slimit.limit <= pLimitInfo->numOfOutputGroups)) {
setOperatorCompleted(pOperator); setOperatorCompleted(pOperator);
} else if (limitReached && groupId == 0) {
setOperatorCompleted(pOperator);
} }
} }

View File

@ -1867,7 +1867,7 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
TSKEY maxTs = pAPI->stateStore.updateInfoFillBlockData(pInfo->pUpdateInfo, pInfo->pRecoverRes, pInfo->primaryTsIndex); TSKEY maxTs = pAPI->stateStore.updateInfoFillBlockData(pInfo->pUpdateInfo, pInfo->pRecoverRes, pInfo->primaryTsIndex);
pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, maxTs); pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, maxTs);
} else { } else {
pInfo->pUpdateInfo->maxDataVersion = pTaskInfo->streamInfo.fillHistoryVer2; pInfo->pUpdateInfo->maxDataVersion = TMAX(pInfo->pUpdateInfo->maxDataVersion, pTaskInfo->streamInfo.fillHistoryVer2);
doCheckUpdate(pInfo, pInfo->pRecoverRes->info.window.ekey, pInfo->pRecoverRes); doCheckUpdate(pInfo, pInfo->pRecoverRes->info.window.ekey, pInfo->pRecoverRes);
} }
} }

View File

@ -153,6 +153,7 @@ SSDataBlock* getSortedBlockData(SSortHandle* pHandle, SSDataBlock* pDataBlock, i
colDataAssign(pDst, pSrc, p->info.rows, &pDataBlock->info); colDataAssign(pDst, pSrc, p->info.rows, &pDataBlock->info);
} }
pDataBlock->info.dataLoad = 1;
pDataBlock->info.rows = p->info.rows; pDataBlock->info.rows = p->info.rows;
} }

View File

@ -25,6 +25,7 @@
#include "tdatablock.h" #include "tdatablock.h"
#include "tmsg.h" #include "tmsg.h"
#include "index.h"
#include "operator.h" #include "operator.h"
#include "query.h" #include "query.h"
#include "querytask.h" #include "querytask.h"
@ -32,7 +33,6 @@
#include "tcompare.h" #include "tcompare.h"
#include "thash.h" #include "thash.h"
#include "ttypes.h" #include "ttypes.h"
#include "index.h"
typedef int (*__optSysFilter)(void* a, void* b, int16_t dtype); typedef int (*__optSysFilter)(void* a, void* b, int16_t dtype);
typedef int32_t (*__sys_filte)(void* pMeta, SNode* cond, SArray* result); typedef int32_t (*__sys_filte)(void* pMeta, SNode* cond, SArray* result);
@ -540,12 +540,12 @@ static SSDataBlock* sysTableScanUserCols(SOperatorInfo* pOperator) {
int32_t restore = pInfo->restore; int32_t restore = pInfo->restore;
pInfo->restore = false; pInfo->restore = false;
while (restore || ((ret = pAPI->metaFn.cursorNext(pInfo->pCur, TSDB_TABLE_MAX)) == 0)) { while (restore || ((ret = pAPI->metaFn.cursorNext(pInfo->pCur, TSDB_TABLE_MAX)) == 0)) {
if (restore) { if (restore) {
restore = false; restore = false;
} }
char typeName[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0}; char typeName[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0};
char tableName[TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE] = {0}; char tableName[TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE] = {0};
@ -626,8 +626,8 @@ static SSDataBlock* sysTableScanUserCols(SOperatorInfo* pOperator) {
} }
static SSDataBlock* sysTableScanUserTags(SOperatorInfo* pOperator) { static SSDataBlock* sysTableScanUserTags(SOperatorInfo* pOperator) {
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
SStorageAPI* pAPI = &pTaskInfo->storageAPI; SStorageAPI* pAPI = &pTaskInfo->storageAPI;
SSysTableScanInfo* pInfo = pOperator->info; SSysTableScanInfo* pInfo = pOperator->info;
if (pOperator->status == OP_EXEC_DONE) { if (pOperator->status == OP_EXEC_DONE) {
@ -1100,8 +1100,8 @@ int32_t buildSysDbTableInfo(const SSysTableScanInfo* pInfo, int32_t capacity) {
} }
static SSDataBlock* sysTableBuildUserTablesByUids(SOperatorInfo* pOperator) { static SSDataBlock* sysTableBuildUserTablesByUids(SOperatorInfo* pOperator) {
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
SStorageAPI* pAPI = &pTaskInfo->storageAPI; SStorageAPI* pAPI = &pTaskInfo->storageAPI;
SSysTableScanInfo* pInfo = pOperator->info; SSysTableScanInfo* pInfo = pOperator->info;
@ -1288,11 +1288,16 @@ static SSDataBlock* sysTableBuildUserTablesByUids(SOperatorInfo* pOperator) {
static SSDataBlock* sysTableBuildUserTables(SOperatorInfo* pOperator) { static SSDataBlock* sysTableBuildUserTables(SOperatorInfo* pOperator) {
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
SStorageAPI* pAPI = &pTaskInfo->storageAPI; SStorageAPI* pAPI = &pTaskInfo->storageAPI;
int8_t firstMetaCursor = 0;
SSysTableScanInfo* pInfo = pOperator->info; SSysTableScanInfo* pInfo = pOperator->info;
if (pInfo->pCur == NULL) { if (pInfo->pCur == NULL) {
pInfo->pCur = pAPI->metaFn.openTableMetaCursor(pInfo->readHandle.vnode); pInfo->pCur = pAPI->metaFn.openTableMetaCursor(pInfo->readHandle.vnode);
firstMetaCursor = 1;
}
if (!firstMetaCursor) {
pAPI->metaFn.resumeTableMetaCursor(pInfo->pCur, 0);
} }
blockDataCleanup(pInfo->pRes); blockDataCleanup(pInfo->pRes);
@ -1436,12 +1441,14 @@ static SSDataBlock* sysTableBuildUserTables(SOperatorInfo* pOperator) {
numOfRows = 0; numOfRows = 0;
if (pInfo->pRes->info.rows > 0) { if (pInfo->pRes->info.rows > 0) {
pAPI->metaFn.pauseTableMetaCursor(pInfo->pCur);
break; break;
} }
} }
} }
if (numOfRows > 0) { if (numOfRows > 0) {
pAPI->metaFn.pauseTableMetaCursor(pInfo->pCur);
p->info.rows = numOfRows; p->info.rows = numOfRows;
pInfo->pRes->info.rows = numOfRows; pInfo->pRes->info.rows = numOfRows;
@ -1485,7 +1492,8 @@ static SSDataBlock* sysTableScanUserTables(SOperatorInfo* pOperator) {
} else { } else {
if (pInfo->showRewrite == false) { if (pInfo->showRewrite == false) {
if (pCondition != NULL && pInfo->pIdx == NULL) { if (pCondition != NULL && pInfo->pIdx == NULL) {
SSTabFltArg arg = {.pMeta = pInfo->readHandle.vnode, .pVnode = pInfo->readHandle.vnode, .pAPI = &pTaskInfo->storageAPI}; SSTabFltArg arg = {
.pMeta = pInfo->readHandle.vnode, .pVnode = pInfo->readHandle.vnode, .pAPI = &pTaskInfo->storageAPI};
SSysTableIndex* idx = taosMemoryMalloc(sizeof(SSysTableIndex)); SSysTableIndex* idx = taosMemoryMalloc(sizeof(SSysTableIndex));
idx->init = 0; idx->init = 0;
@ -1827,7 +1835,7 @@ void destroySysScanOperator(void* param) {
pInfo->pIdx = NULL; pInfo->pIdx = NULL;
} }
if(pInfo->pSchema) { if (pInfo->pSchema) {
taosHashCleanup(pInfo->pSchema); taosHashCleanup(pInfo->pSchema);
pInfo->pSchema = NULL; pInfo->pSchema = NULL;
} }
@ -2144,7 +2152,7 @@ static int32_t optSysTabFilte(void* arg, SNode* cond, SArray* result) {
return -1; return -1;
} }
static int32_t doGetTableRowSize(SReadHandle *pHandle, uint64_t uid, int32_t* rowLen, const char* idstr) { static int32_t doGetTableRowSize(SReadHandle* pHandle, uint64_t uid, int32_t* rowLen, const char* idstr) {
*rowLen = 0; *rowLen = 0;
SMetaReader mr = {0}; SMetaReader mr = {0};
@ -2194,17 +2202,17 @@ static SSDataBlock* doBlockInfoScan(SOperatorInfo* pOperator) {
SBlockDistInfo* pBlockScanInfo = pOperator->info; SBlockDistInfo* pBlockScanInfo = pOperator->info;
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
SStorageAPI* pAPI = &pTaskInfo->storageAPI; SStorageAPI* pAPI = &pTaskInfo->storageAPI;
STableBlockDistInfo blockDistInfo = {.minRows = INT_MAX, .maxRows = INT_MIN}; STableBlockDistInfo blockDistInfo = {.minRows = INT_MAX, .maxRows = INT_MIN};
int32_t code = doGetTableRowSize(&pBlockScanInfo->readHandle, pBlockScanInfo->uid, int32_t code = doGetTableRowSize(&pBlockScanInfo->readHandle, pBlockScanInfo->uid, (int32_t*)&blockDistInfo.rowSize,
(int32_t*)&blockDistInfo.rowSize, GET_TASKID(pTaskInfo)); GET_TASKID(pTaskInfo));
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
T_LONG_JMP(pTaskInfo->env, code); T_LONG_JMP(pTaskInfo->env, code);
} }
pAPI->tsdReader.tsdReaderGetDataBlockDistInfo(pBlockScanInfo->pHandle, &blockDistInfo); pAPI->tsdReader.tsdReaderGetDataBlockDistInfo(pBlockScanInfo->pHandle, &blockDistInfo);
blockDistInfo.numOfInmemRows = (int32_t) pAPI->tsdReader.tsdReaderGetNumOfInMemRows(pBlockScanInfo->pHandle); blockDistInfo.numOfInmemRows = (int32_t)pAPI->tsdReader.tsdReaderGetNumOfInMemRows(pBlockScanInfo->pHandle);
SSDataBlock* pBlock = pBlockScanInfo->pResBlock; SSDataBlock* pBlock = pBlockScanInfo->pResBlock;
@ -2289,7 +2297,8 @@ SOperatorInfo* createDataBlockInfoScanOperator(SReadHandle* readHandle, SBlockDi
size_t num = tableListGetSize(pTableListInfo); size_t num = tableListGetSize(pTableListInfo);
void* pList = tableListGetInfo(pTableListInfo, 0); void* pList = tableListGetInfo(pTableListInfo, 0);
code = readHandle->api.tsdReader.tsdReaderOpen(readHandle->vnode, &cond, pList, num, pInfo->pResBlock, (void**)&pInfo->pHandle, pTaskInfo->id.str, false, NULL); code = readHandle->api.tsdReader.tsdReaderOpen(readHandle->vnode, &cond, pList, num, pInfo->pResBlock,
(void**)&pInfo->pHandle, pTaskInfo->id.str, false, NULL);
cleanupQueryTableDataCond(&cond); cleanupQueryTableDataCond(&cond);
if (code != 0) { if (code != 0) {
goto _error; goto _error;
@ -2316,4 +2325,4 @@ _error:
taosMemoryFreeClear(pInfo); taosMemoryFreeClear(pInfo);
taosMemoryFreeClear(pOperator); taosMemoryFreeClear(pOperator);
return NULL; return NULL;
} }

View File

@ -257,7 +257,8 @@ static bool genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp
// output the result // output the result
bool hasInterp = true; int32_t fillColIndex = 0;
bool hasInterp = true;
for (int32_t j = 0; j < pExprSup->numOfExprs; ++j) { for (int32_t j = 0; j < pExprSup->numOfExprs; ++j) {
SExprInfo* pExprInfo = &pExprSup->pExprInfo[j]; SExprInfo* pExprInfo = &pExprSup->pExprInfo[j];
@ -307,7 +308,7 @@ static bool genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp
case TSDB_FILL_SET_VALUE: case TSDB_FILL_SET_VALUE:
case TSDB_FILL_SET_VALUE_F: { case TSDB_FILL_SET_VALUE_F: {
SVariant* pVar = &pSliceInfo->pFillColInfo[j].fillVal; SVariant* pVar = &pSliceInfo->pFillColInfo[fillColIndex].fillVal;
if (pDst->info.type == TSDB_DATA_TYPE_FLOAT) { if (pDst->info.type == TSDB_DATA_TYPE_FLOAT) {
float v = 0; float v = 0;
@ -342,6 +343,8 @@ static bool genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp
} }
colDataSetVal(pDst, rows, (char*)&v, false); colDataSetVal(pDst, rows, (char*)&v, false);
} }
++fillColIndex;
break; break;
} }

View File

@ -1306,6 +1306,8 @@ static bool doDeleteWindow(SOperatorInfo* pOperator, TSKEY ts, uint64_t groupId)
return true; return true;
} }
static int32_t getChildIndex(SSDataBlock* pBlock) { return pBlock->info.childId; }
static void doDeleteWindows(SOperatorInfo* pOperator, SInterval* pInterval, SSDataBlock* pBlock, SArray* pUpWins, static void doDeleteWindows(SOperatorInfo* pOperator, SInterval* pInterval, SSDataBlock* pBlock, SArray* pUpWins,
SSHashObj* pUpdatedMap) { SSHashObj* pUpdatedMap) {
SStreamIntervalOperatorInfo* pInfo = pOperator->info; SStreamIntervalOperatorInfo* pInfo = pOperator->info;
@ -1340,8 +1342,14 @@ static void doDeleteWindows(SOperatorInfo* pOperator, SInterval* pInterval, SSDa
SWinKey winRes = {.ts = win.skey, .groupId = winGpId}; SWinKey winRes = {.ts = win.skey, .groupId = winGpId};
void* chIds = taosHashGet(pInfo->pPullDataMap, &winRes, sizeof(SWinKey)); void* chIds = taosHashGet(pInfo->pPullDataMap, &winRes, sizeof(SWinKey));
if (chIds) { if (chIds) {
getNextTimeWindow(pInterval, &win, TSDB_ORDER_ASC); int32_t childId = getChildIndex(pBlock);
continue; SArray* chArray = *(void**)chIds;
int32_t index = taosArraySearchIdx(chArray, &childId, compareInt32Val, TD_EQ);
if (index != -1) {
qDebug("===stream===try push delete window%" PRId64 "chId:%d ,continue", win.skey, childId);
getNextTimeWindow(pInterval, &win, TSDB_ORDER_ASC);
continue;
}
} }
bool res = doDeleteWindow(pOperator, win.skey, winGpId); bool res = doDeleteWindow(pOperator, win.skey, winGpId);
if (pUpWins && res) { if (pUpWins && res) {
@ -1497,6 +1505,7 @@ void destroyStreamFinalIntervalOperatorInfo(void* param) {
taosArrayDestroy(*(void**)pIte); taosArrayDestroy(*(void**)pIte);
} }
taosHashCleanup(pInfo->pPullDataMap); taosHashCleanup(pInfo->pPullDataMap);
taosHashCleanup(pInfo->pFinalPullDataMap);
taosArrayDestroy(pInfo->pPullWins); taosArrayDestroy(pInfo->pPullWins);
blockDataDestroy(pInfo->pPullDataRes); blockDataDestroy(pInfo->pPullDataRes);
taosArrayDestroy(pInfo->pDelWins); taosArrayDestroy(pInfo->pDelWins);
@ -2067,8 +2076,6 @@ void addPullWindow(SHashObj* pMap, SWinKey* pWinRes, int32_t size) {
taosHashPut(pMap, pWinRes, sizeof(SWinKey), &childIds, sizeof(void*)); taosHashPut(pMap, pWinRes, sizeof(SWinKey), &childIds, sizeof(void*));
} }
static int32_t getChildIndex(SSDataBlock* pBlock) { return pBlock->info.childId; }
static void clearStreamIntervalOperator(SStreamIntervalOperatorInfo* pInfo) { static void clearStreamIntervalOperator(SStreamIntervalOperatorInfo* pInfo) {
tSimpleHashClear(pInfo->aggSup.pResultRowHashTable); tSimpleHashClear(pInfo->aggSup.pResultRowHashTable);
clearDiskbasedBuf(pInfo->aggSup.pResultBuf); clearDiskbasedBuf(pInfo->aggSup.pResultBuf);
@ -2112,7 +2119,7 @@ static void doBuildPullDataBlock(SArray* array, int32_t* pIndex, SSDataBlock* pB
blockDataUpdateTsWindow(pBlock, 0); blockDataUpdateTsWindow(pBlock, 0);
} }
void processPullOver(SSDataBlock* pBlock, SHashObj* pMap, SInterval* pInterval) { void processPullOver(SSDataBlock* pBlock, SHashObj* pMap, SHashObj* pFinalMap, SInterval* pInterval, SArray* pPullWins, int32_t numOfCh, SOperatorInfo* pOperator) {
SColumnInfoData* pStartCol = taosArrayGet(pBlock->pDataBlock, CALCULATE_START_TS_COLUMN_INDEX); SColumnInfoData* pStartCol = taosArrayGet(pBlock->pDataBlock, CALCULATE_START_TS_COLUMN_INDEX);
TSKEY* tsData = (TSKEY*)pStartCol->pData; TSKEY* tsData = (TSKEY*)pStartCol->pData;
SColumnInfoData* pEndCol = taosArrayGet(pBlock->pDataBlock, CALCULATE_END_TS_COLUMN_INDEX); SColumnInfoData* pEndCol = taosArrayGet(pBlock->pDataBlock, CALCULATE_END_TS_COLUMN_INDEX);
@ -2136,6 +2143,22 @@ void processPullOver(SSDataBlock* pBlock, SHashObj* pMap, SInterval* pInterval)
taosArrayDestroy(chArray); taosArrayDestroy(chArray);
taosHashRemove(pMap, &winRes, sizeof(SWinKey)); taosHashRemove(pMap, &winRes, sizeof(SWinKey));
qDebug("===stream===retrive pull data over.window %" PRId64 , winRes.ts); qDebug("===stream===retrive pull data over.window %" PRId64 , winRes.ts);
void* pFinalCh = taosHashGet(pFinalMap, &winRes, sizeof(SWinKey));
if (pFinalCh) {
taosHashRemove(pFinalMap, &winRes, sizeof(SWinKey));
doDeleteWindow(pOperator, winRes.ts, winRes.groupId);
STimeWindow nextWin = getFinalTimeWindow(winRes.ts, pInterval);
SPullWindowInfo pull = {.window = nextWin,
.groupId = winRes.groupId,
.calWin.skey = nextWin.skey,
.calWin.ekey = nextWin.skey};
// add pull data request
if (savePullWindow(&pull, pPullWins) == TSDB_CODE_SUCCESS) {
addPullWindow(pMap, &winRes, numOfCh);
qDebug("===stream===prepare final retrive for delete %" PRId64 ", size:%d", winRes.ts, numOfCh);
}
}
} }
} }
} }
@ -2144,7 +2167,7 @@ void processPullOver(SSDataBlock* pBlock, SHashObj* pMap, SInterval* pInterval)
} }
} }
static void addRetriveWindow(SArray* wins, SStreamIntervalOperatorInfo* pInfo) { static void addRetriveWindow(SArray* wins, SStreamIntervalOperatorInfo* pInfo, int32_t childId) {
int32_t size = taosArrayGetSize(wins); int32_t size = taosArrayGetSize(wins);
for (int32_t i = 0; i < size; i++) { for (int32_t i = 0; i < size; i++) {
SWinKey* winKey = taosArrayGet(wins, i); SWinKey* winKey = taosArrayGet(wins, i);
@ -2161,6 +2184,14 @@ static void addRetriveWindow(SArray* wins, SStreamIntervalOperatorInfo* pInfo) {
addPullWindow(pInfo->pPullDataMap, winKey, pInfo->numOfChild); addPullWindow(pInfo->pPullDataMap, winKey, pInfo->numOfChild);
qDebug("===stream===prepare retrive for delete %" PRId64 ", size:%d", winKey->ts, pInfo->numOfChild); qDebug("===stream===prepare retrive for delete %" PRId64 ", size:%d", winKey->ts, pInfo->numOfChild);
} }
} else {
SArray* chArray = *(void**)chIds;
int32_t index = taosArraySearchIdx(chArray, &childId, compareInt32Val, TD_EQ);
qDebug("===stream===check final retrive %" PRId64",chid:%d", winKey->ts, index);
if (index == -1) {
qDebug("===stream===add final retrive %" PRId64, winKey->ts);
taosHashPut(pInfo->pFinalPullDataMap, winKey, sizeof(SWinKey), NULL, 0);
}
} }
} }
} }
@ -2314,7 +2345,7 @@ static void doStreamIntervalAggImpl(SOperatorInfo* pOperatorInfo, SSDataBlock* p
} }
while (1) { while (1) {
bool isClosed = isCloseWindow(&nextWin, &pInfo->twAggSup); bool isClosed = isCloseWindow(&nextWin, &pInfo->twAggSup);
if ((pInfo->ignoreExpiredData && isClosed) || !inSlidingWindow(&pInfo->interval, &nextWin, &pSDataBlock->info)) { if ((pInfo->ignoreExpiredData && isClosed && !IS_FINAL_OP(pInfo)) || !inSlidingWindow(&pInfo->interval, &nextWin, &pSDataBlock->info)) {
startPos = getNexWindowPos(&pInfo->interval, &pSDataBlock->info, tsCols, startPos, nextWin.ekey, &nextWin); startPos = getNexWindowPos(&pInfo->interval, &pSDataBlock->info, tsCols, startPos, nextWin.ekey, &nextWin);
if (startPos < 0) { if (startPos < 0) {
break; break;
@ -2554,7 +2585,8 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
SArray* delWins = taosArrayInit(8, sizeof(SWinKey)); SArray* delWins = taosArrayInit(8, sizeof(SWinKey));
doDeleteWindows(pOperator, &pInfo->interval, pBlock, delWins, pInfo->pUpdatedMap); doDeleteWindows(pOperator, &pInfo->interval, pBlock, delWins, pInfo->pUpdatedMap);
if (IS_FINAL_OP(pInfo)) { if (IS_FINAL_OP(pInfo)) {
addRetriveWindow(delWins, pInfo); int32_t chId = getChildIndex(pBlock);
addRetriveWindow(delWins, pInfo, chId);
if (pBlock->info.type != STREAM_CLEAR) { if (pBlock->info.type != STREAM_CLEAR) {
taosArrayAddAll(pInfo->pDelWins, delWins); taosArrayAddAll(pInfo->pDelWins, delWins);
} }
@ -2589,7 +2621,7 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
} }
continue; continue;
} else if (pBlock->info.type == STREAM_PULL_OVER && IS_FINAL_OP(pInfo)) { } else if (pBlock->info.type == STREAM_PULL_OVER && IS_FINAL_OP(pInfo)) {
processPullOver(pBlock, pInfo->pPullDataMap, &pInfo->interval); processPullOver(pBlock, pInfo->pPullDataMap, pInfo->pFinalPullDataMap, &pInfo->interval, pInfo->pPullWins, pInfo->numOfChild, pOperator);
continue; continue;
} else if (pBlock->info.type == STREAM_CREATE_CHILD_TABLE) { } else if (pBlock->info.type == STREAM_CREATE_CHILD_TABLE) {
return pBlock; return pBlock;
@ -2772,6 +2804,7 @@ SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream,
pInfo->pullIndex = 0; pInfo->pullIndex = 0;
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
pInfo->pPullDataMap = taosHashInit(64, hashFn, false, HASH_NO_LOCK); pInfo->pPullDataMap = taosHashInit(64, hashFn, false, HASH_NO_LOCK);
pInfo->pFinalPullDataMap = taosHashInit(64, hashFn, false, HASH_NO_LOCK);
pInfo->pPullDataRes = createSpecialDataBlock(STREAM_RETRIEVE); pInfo->pPullDataRes = createSpecialDataBlock(STREAM_RETRIEVE);
pInfo->ignoreExpiredData = pIntervalPhyNode->window.igExpired; pInfo->ignoreExpiredData = pIntervalPhyNode->window.igExpired;
pInfo->ignoreExpiredDataSaved = false; pInfo->ignoreExpiredDataSaved = false;
@ -4963,6 +4996,7 @@ SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SPhys
pInfo->pPhyNode = NULL; // create new child pInfo->pPhyNode = NULL; // create new child
pInfo->pPullDataMap = NULL; pInfo->pPullDataMap = NULL;
pInfo->pFinalPullDataMap = NULL;
pInfo->pPullWins = NULL; // SPullWindowInfo pInfo->pPullWins = NULL; // SPullWindowInfo
pInfo->pullIndex = 0; pInfo->pullIndex = 0;
pInfo->pPullDataRes = NULL; pInfo->pPullDataRes = NULL;

View File

@ -791,7 +791,21 @@ int32_t convertDataBlockToUdfDataBlock(SSDataBlock *block, SUdfDataBlock *udfBlo
memcpy(udfCol->colData.varLenCol.varOffsets, col->varmeta.offset, udfCol->colData.varLenCol.varOffsetsLen); memcpy(udfCol->colData.varLenCol.varOffsets, col->varmeta.offset, udfCol->colData.varLenCol.varOffsetsLen);
udfCol->colData.varLenCol.payloadLen = colDataGetLength(col, udfBlock->numOfRows); udfCol->colData.varLenCol.payloadLen = colDataGetLength(col, udfBlock->numOfRows);
udfCol->colData.varLenCol.payload = taosMemoryMalloc(udfCol->colData.varLenCol.payloadLen); udfCol->colData.varLenCol.payload = taosMemoryMalloc(udfCol->colData.varLenCol.payloadLen);
memcpy(udfCol->colData.varLenCol.payload, col->pData, udfCol->colData.varLenCol.payloadLen); if (col->reassigned) {
for (int32_t row = 0; row < udfCol->colData.numOfRows; ++row) {
char* pColData = col->pData + col->varmeta.offset[row];
int32_t colSize = 0;
if (col->info.type == TSDB_DATA_TYPE_JSON) {
colSize = getJsonValueLen(pColData);
} else {
colSize = varDataTLen(pColData);
}
memcpy(udfCol->colData.varLenCol.payload, pColData, colSize);
udfCol->colData.varLenCol.payload += colSize;
}
} else {
memcpy(udfCol->colData.varLenCol.payload, col->pData, udfCol->colData.varLenCol.payloadLen);
}
} else { } else {
udfCol->colData.fixLenCol.nullBitmapLen = BitmapLen(udfCol->colData.numOfRows); udfCol->colData.fixLenCol.nullBitmapLen = BitmapLen(udfCol->colData.numOfRows);
int32_t bitmapLen = udfCol->colData.fixLenCol.nullBitmapLen; int32_t bitmapLen = udfCol->colData.fixLenCol.nullBitmapLen;

View File

@ -669,7 +669,7 @@ static int32_t selectStmtCopy(const SSelectStmt* pSrc, SSelectStmt* pDst) {
COPY_CHAR_ARRAY_FIELD(stmtName); COPY_CHAR_ARRAY_FIELD(stmtName);
COPY_SCALAR_FIELD(precision); COPY_SCALAR_FIELD(precision);
COPY_SCALAR_FIELD(isEmptyResult); COPY_SCALAR_FIELD(isEmptyResult);
COPY_SCALAR_FIELD(isTimeLineResult); COPY_SCALAR_FIELD(timeLineResMode);
COPY_SCALAR_FIELD(hasAggFuncs); COPY_SCALAR_FIELD(hasAggFuncs);
COPY_SCALAR_FIELD(hasRepeatScanFuncs); COPY_SCALAR_FIELD(hasRepeatScanFuncs);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;

View File

@ -0,0 +1,180 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "querynodes.h"
#define MATCH_SCALAR_FIELD(fldname) \
do { \
if (p->fldname != pSub->fldname) return false; \
} while (0)
#define MATCH_STRING(a, b) (((a) != NULL && (b) != NULL) ? (strcmp((a), (b)) == 0) : (a) == (b))
#define MATCH_VARDATA(a, b) \
(((a) != NULL && (b) != NULL) \
? (varDataLen((a)) == varDataLen((b)) && memcmp(varDataVal((a)), varDataVal((b)), varDataLen((a))) == 0) \
: (a) == (b))
#define MATCH_STRING_FIELD(fldname) \
do { \
if (!MATCH_STRING(p->fldname, pSub->fldname)) return false; \
} while (0)
#define MATCH_VARDATA_FIELD(fldname) \
do { \
if (!MATCH_VARDATA(p->fldname, pSub->fldname)) return false; \
} while (0)
#define MATCH_OBJECT_FIELD(fldname, matchFunc) \
do { \
if (!matchFunc(p->fldname, pSub->fldname)) return false; \
} while (0)
#define MATCH_NODE_FIELD(fldname) \
do { \
if (!nodesMatchNode(pSub->fldname, p->fldname)) return false; \
} while (0)
#define MATCH_NODE_LIST_FIELD(fldname) \
do { \
if (!nodesListMatch(p->fldname, pSub->fldname)) return false; \
} while (0)
bool nodesListMatchExists(const SNodeList* pList, const SNode* pTarget) {
if (NULL == pList || NULL == pTarget) {
return false;
}
SNode* node = NULL;
bool exists = false;
FOREACH(node, pList) {
if (nodesMatchNode(node, pTarget)) {
exists = true;
break;
}
}
return exists;
}
bool nodesListMatch(const SNodeList* pList, const SNodeList* pSubList) {
if (pList == pSubList) {
return true;
}
if (NULL == pList || NULL == pSubList) {
return false;
}
if (pList->length != pSubList->length) {
return false;
}
SNode* node = NULL;
bool match = true;
FOREACH(node, pList) {
if (!nodesListMatchExists(pSubList, node)) {
match = false;
break;
}
}
return match;
}
static bool columnNodeMatch(const SColumnNode* pSub, const SColumnNode* p) {
if (0 == strcmp(p->colName, pSub->node.aliasName)) {
return true;
}
return false;
}
static bool valueNodeMatch(const SValueNode* pSub, const SValueNode* p) {
return nodesEqualNode((SNode*)pSub, (SNode*)p);
}
static bool operatorNodeMatch(const SOperatorNode* pSub, const SOperatorNode* p) {
MATCH_SCALAR_FIELD(opType);
MATCH_NODE_FIELD(pLeft);
MATCH_NODE_FIELD(pRight);
return true;
}
static bool logicConditionNodeMatch(const SLogicConditionNode* pSub, const SLogicConditionNode* p) {
MATCH_SCALAR_FIELD(condType);
MATCH_NODE_LIST_FIELD(pParameterList);
return true;
}
static bool functionNodeMatch(const SFunctionNode* pSub, const SFunctionNode* p) {
MATCH_SCALAR_FIELD(funcId);
MATCH_STRING_FIELD(functionName);
MATCH_NODE_LIST_FIELD(pParameterList);
return true;
}
static bool whenThenNodeMatch(const SWhenThenNode* pSub, const SWhenThenNode* p) {
MATCH_NODE_FIELD(pWhen);
MATCH_NODE_FIELD(pThen);
return true;
}
static bool caseWhenNodeMatch(const SCaseWhenNode* pSub, const SCaseWhenNode* p) {
MATCH_NODE_FIELD(pCase);
MATCH_NODE_FIELD(pElse);
MATCH_NODE_LIST_FIELD(pWhenThenList);
return true;
}
bool nodesMatchNode(const SNode* pSub, const SNode* p) {
if (pSub == p) {
return true;
}
if (NULL == pSub || NULL == p) {
return false;
}
if (nodeType(pSub) != nodeType(p)) {
return false;
}
switch (nodeType(p)) {
case QUERY_NODE_COLUMN:
return columnNodeMatch((const SColumnNode*)pSub, (const SColumnNode*)p);
case QUERY_NODE_VALUE:
return valueNodeMatch((const SValueNode*)pSub, (const SValueNode*)p);
case QUERY_NODE_OPERATOR:
return operatorNodeMatch((const SOperatorNode*)pSub, (const SOperatorNode*)p);
case QUERY_NODE_LOGIC_CONDITION:
return logicConditionNodeMatch((const SLogicConditionNode*)pSub, (const SLogicConditionNode*)p);
case QUERY_NODE_FUNCTION:
return functionNodeMatch((const SFunctionNode*)pSub, (const SFunctionNode*)p);
case QUERY_NODE_WHEN_THEN:
return whenThenNodeMatch((const SWhenThenNode*)pSub, (const SWhenThenNode*)p);
case QUERY_NODE_CASE_WHEN:
return caseWhenNodeMatch((const SCaseWhenNode*)pSub, (const SCaseWhenNode*)p);
case QUERY_NODE_REAL_TABLE:
case QUERY_NODE_TEMP_TABLE:
case QUERY_NODE_JOIN_TABLE:
case QUERY_NODE_GROUPING_SET:
case QUERY_NODE_ORDER_BY_EXPR:
case QUERY_NODE_LIMIT:
return false;
default:
break;
}
return false;
}

View File

@ -127,6 +127,7 @@ SNode* createIntervalWindowNode(SAstCreateContext* pCxt, SNode* pInterval, SNode
SNode* createFillNode(SAstCreateContext* pCxt, EFillMode mode, SNode* pValues); SNode* createFillNode(SAstCreateContext* pCxt, EFillMode mode, SNode* pValues);
SNode* createGroupingSetNode(SAstCreateContext* pCxt, SNode* pNode); SNode* createGroupingSetNode(SAstCreateContext* pCxt, SNode* pNode);
SNode* createInterpTimeRange(SAstCreateContext* pCxt, SNode* pStart, SNode* pEnd); SNode* createInterpTimeRange(SAstCreateContext* pCxt, SNode* pStart, SNode* pEnd);
SNode* createInterpTimePoint(SAstCreateContext* pCxt, SNode* pPoint);
SNode* createWhenThenNode(SAstCreateContext* pCxt, SNode* pWhen, SNode* pThen); SNode* createWhenThenNode(SAstCreateContext* pCxt, SNode* pWhen, SNode* pThen);
SNode* createCaseWhenNode(SAstCreateContext* pCxt, SNode* pCase, SNodeList* pWhenThenList, SNode* pElse); SNode* createCaseWhenNode(SAstCreateContext* pCxt, SNode* pCase, SNodeList* pWhenThenList, SNode* pElse);

View File

@ -84,6 +84,7 @@ int32_t getNumOfColumns(const STableMeta* pTableMeta);
int32_t getNumOfTags(const STableMeta* pTableMeta); int32_t getNumOfTags(const STableMeta* pTableMeta);
STableComInfo getTableInfo(const STableMeta* pTableMeta); STableComInfo getTableInfo(const STableMeta* pTableMeta);
STableMeta* tableMetaDup(const STableMeta* pTableMeta); STableMeta* tableMetaDup(const STableMeta* pTableMeta);
int32_t getTableTypeFromTableNode(SNode *pTable);
int32_t trimString(const char* src, int32_t len, char* dst, int32_t dlen); int32_t trimString(const char* src, int32_t len, char* dst, int32_t dlen);
int32_t getVnodeSysTableTargetName(int32_t acctId, SNode* pWhere, SName* pName); int32_t getVnodeSysTableTargetName(int32_t acctId, SNode* pWhere, SName* pName);

View File

@ -1095,6 +1095,8 @@ having_clause_opt(A) ::= HAVING search_condition(B).
range_opt(A) ::= . { A = NULL; } range_opt(A) ::= . { A = NULL; }
range_opt(A) ::= range_opt(A) ::=
RANGE NK_LP expr_or_subquery(B) NK_COMMA expr_or_subquery(C) NK_RP. { A = createInterpTimeRange(pCxt, releaseRawExprNode(pCxt, B), releaseRawExprNode(pCxt, C)); } RANGE NK_LP expr_or_subquery(B) NK_COMMA expr_or_subquery(C) NK_RP. { A = createInterpTimeRange(pCxt, releaseRawExprNode(pCxt, B), releaseRawExprNode(pCxt, C)); }
range_opt(A) ::=
RANGE NK_LP expr_or_subquery(B) NK_RP. { A = createInterpTimePoint(pCxt, releaseRawExprNode(pCxt, B)); }
every_opt(A) ::= . { A = NULL; } every_opt(A) ::= . { A = NULL; }
every_opt(A) ::= EVERY NK_LP duration_literal(B) NK_RP. { A = releaseRawExprNode(pCxt, B); } every_opt(A) ::= EVERY NK_LP duration_literal(B) NK_RP. { A = releaseRawExprNode(pCxt, B); }

View File

@ -695,6 +695,11 @@ SNode* createInterpTimeRange(SAstCreateContext* pCxt, SNode* pStart, SNode* pEnd
return createBetweenAnd(pCxt, createPrimaryKeyCol(pCxt, NULL), pStart, pEnd); return createBetweenAnd(pCxt, createPrimaryKeyCol(pCxt, NULL), pStart, pEnd);
} }
SNode* createInterpTimePoint(SAstCreateContext* pCxt, SNode* pPoint) {
CHECK_PARSER_STATUS(pCxt);
return createOperatorNode(pCxt, OP_TYPE_EQUAL, createPrimaryKeyCol(pCxt, NULL), pPoint);
}
SNode* createWhenThenNode(SAstCreateContext* pCxt, SNode* pWhen, SNode* pThen) { SNode* createWhenThenNode(SAstCreateContext* pCxt, SNode* pWhen, SNode* pThen) {
CHECK_PARSER_STATUS(pCxt); CHECK_PARSER_STATUS(pCxt);
SWhenThenNode* pWhenThen = (SWhenThenNode*)nodesMakeNode(QUERY_NODE_WHEN_THEN); SWhenThenNode* pWhenThen = (SWhenThenNode*)nodesMakeNode(QUERY_NODE_WHEN_THEN);

View File

@ -707,6 +707,10 @@ static bool isWindowPseudoColumnFunc(const SNode* pNode) {
return (QUERY_NODE_FUNCTION == nodeType(pNode) && fmIsWindowPseudoColumnFunc(((SFunctionNode*)pNode)->funcId)); return (QUERY_NODE_FUNCTION == nodeType(pNode) && fmIsWindowPseudoColumnFunc(((SFunctionNode*)pNode)->funcId));
} }
static bool isInterpFunc(const SNode* pNode) {
return (QUERY_NODE_FUNCTION == nodeType(pNode) && fmIsInterpFunc(((SFunctionNode*)pNode)->funcId));
}
static bool isInterpPseudoColumnFunc(const SNode* pNode) { static bool isInterpPseudoColumnFunc(const SNode* pNode) {
return (QUERY_NODE_FUNCTION == nodeType(pNode) && fmIsInterpPseudoColumnFunc(((SFunctionNode*)pNode)->funcId)); return (QUERY_NODE_FUNCTION == nodeType(pNode) && fmIsInterpPseudoColumnFunc(((SFunctionNode*)pNode)->funcId));
} }
@ -758,18 +762,40 @@ static SNodeList* getProjectList(const SNode* pNode) {
static bool isTimeLineQuery(SNode* pStmt) { static bool isTimeLineQuery(SNode* pStmt) {
if (QUERY_NODE_SELECT_STMT == nodeType(pStmt)) { if (QUERY_NODE_SELECT_STMT == nodeType(pStmt)) {
return ((SSelectStmt*)pStmt)->isTimeLineResult; return (TIME_LINE_MULTI == ((SSelectStmt*)pStmt)->timeLineResMode) || (TIME_LINE_GLOBAL == ((SSelectStmt*)pStmt)->timeLineResMode);
} else if (QUERY_NODE_SET_OPERATOR == nodeType(pStmt)) {
return TIME_LINE_GLOBAL == ((SSetOperator*)pStmt)->timeLineResMode;
} else { } else {
return false; return false;
} }
} }
static bool isGlobalTimeLineQuery(SNode* pStmt) { static bool isGlobalTimeLineQuery(SNode* pStmt) {
if (!isTimeLineQuery(pStmt)) { if (QUERY_NODE_SELECT_STMT == nodeType(pStmt)) {
return TIME_LINE_GLOBAL == ((SSelectStmt*)pStmt)->timeLineResMode;
} else if (QUERY_NODE_SET_OPERATOR == nodeType(pStmt)) {
return TIME_LINE_GLOBAL == ((SSetOperator*)pStmt)->timeLineResMode;
} else {
return false; return false;
} }
SSelectStmt* pSelect = (SSelectStmt*)pStmt; }
return NULL == pSelect->pPartitionByList || NULL != pSelect->pOrderByList;
static bool isTimeLineAlignedQuery(SNode* pStmt) {
SSelectStmt *pSelect = (SSelectStmt *)pStmt;
if (isGlobalTimeLineQuery(((STempTableNode*)pSelect->pFromTable)->pSubquery)) {
return true;
}
if (!isTimeLineQuery(((STempTableNode*)pSelect->pFromTable)->pSubquery)) {
return false;
}
if (QUERY_NODE_SELECT_STMT != nodeType(((STempTableNode*)pSelect->pFromTable)->pSubquery)) {
return false;
}
SSelectStmt *pSub = (SSelectStmt *)((STempTableNode*)pSelect->pFromTable)->pSubquery;
if (nodesListMatch(pSelect->pPartitionByList, pSub->pPartitionByList)) {
return true;
}
return false;
} }
static bool isPrimaryKeyImpl(SNode* pExpr) { static bool isPrimaryKeyImpl(SNode* pExpr) {
@ -1575,7 +1601,7 @@ static int32_t translateTimelineFunc(STranslateContext* pCxt, SFunctionNode* pFu
} }
SSelectStmt* pSelect = (SSelectStmt*)pCxt->pCurrStmt; SSelectStmt* pSelect = (SSelectStmt*)pCxt->pCurrStmt;
if (NULL != pSelect->pFromTable && QUERY_NODE_TEMP_TABLE == nodeType(pSelect->pFromTable) && if (NULL != pSelect->pFromTable && QUERY_NODE_TEMP_TABLE == nodeType(pSelect->pFromTable) &&
!isTimeLineQuery(((STempTableNode*)pSelect->pFromTable)->pSubquery)) { !isGlobalTimeLineQuery(((STempTableNode*)pSelect->pFromTable)->pSubquery) && !isTimeLineAlignedQuery(pCxt->pCurrStmt)) {
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC, return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC,
"%s function requires valid time series input", pFunc->functionName); "%s function requires valid time series input", pFunc->functionName);
} }
@ -2289,7 +2315,7 @@ static int32_t checkAggColCoexist(STranslateContext* pCxt, SSelectStmt* pSelect)
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
if (!pSelect->onlyHasKeepOrderFunc) { if (!pSelect->onlyHasKeepOrderFunc) {
pSelect->isTimeLineResult = false; pSelect->timeLineResMode = TIME_LINE_NONE;
} }
CheckAggColCoexistCxt cxt = {.pTranslateCxt = pCxt, .existCol = false}; CheckAggColCoexistCxt cxt = {.pTranslateCxt = pCxt, .existCol = false};
nodesRewriteExprs(pSelect->pProjectionList, doCheckAggColCoexist, &cxt); nodesRewriteExprs(pSelect->pProjectionList, doCheckAggColCoexist, &cxt);
@ -2636,9 +2662,9 @@ static int32_t replaceTbName(STranslateContext* pCxt, SSelectStmt* pSelect) {
static int32_t checkJoinTable(STranslateContext* pCxt, SJoinTableNode* pJoinTable) { static int32_t checkJoinTable(STranslateContext* pCxt, SJoinTableNode* pJoinTable) {
if ((QUERY_NODE_TEMP_TABLE == nodeType(pJoinTable->pLeft) && if ((QUERY_NODE_TEMP_TABLE == nodeType(pJoinTable->pLeft) &&
!isTimeLineQuery(((STempTableNode*)pJoinTable->pLeft)->pSubquery)) || !isGlobalTimeLineQuery(((STempTableNode*)pJoinTable->pLeft)->pSubquery)) ||
(QUERY_NODE_TEMP_TABLE == nodeType(pJoinTable->pRight) && (QUERY_NODE_TEMP_TABLE == nodeType(pJoinTable->pRight) &&
!isTimeLineQuery(((STempTableNode*)pJoinTable->pRight)->pSubquery))) { !isGlobalTimeLineQuery(((STempTableNode*)pJoinTable->pRight)->pSubquery))) {
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_SUPPORT_JOIN, return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_SUPPORT_JOIN,
"Join requires valid time series input"); "Join requires valid time series input");
} }
@ -2673,7 +2699,7 @@ static int32_t translateTable(STranslateContext* pCxt, SNode* pTable) {
} }
if (TSDB_SYSTEM_TABLE == pRealTable->pMeta->tableType) { if (TSDB_SYSTEM_TABLE == pRealTable->pMeta->tableType) {
if (isSelectStmt(pCxt->pCurrStmt)) { if (isSelectStmt(pCxt->pCurrStmt)) {
((SSelectStmt*)pCxt->pCurrStmt)->isTimeLineResult = false; ((SSelectStmt*)pCxt->pCurrStmt)->timeLineResMode = TIME_LINE_NONE;
} else if (isDeleteStmt(pCxt->pCurrStmt)) { } else if (isDeleteStmt(pCxt->pCurrStmt)) {
code = TSDB_CODE_TSC_INVALID_OPERATION; code = TSDB_CODE_TSC_INVALID_OPERATION;
break; break;
@ -3008,7 +3034,7 @@ static int32_t translateOrderBy(STranslateContext* pCxt, SSelectStmt* pSelect) {
} }
static EDealRes needFillImpl(SNode* pNode, void* pContext) { static EDealRes needFillImpl(SNode* pNode, void* pContext) {
if (isAggFunc(pNode) && FUNCTION_TYPE_GROUP_KEY != ((SFunctionNode*)pNode)->funcType) { if ((isAggFunc(pNode) || isInterpFunc(pNode)) && FUNCTION_TYPE_GROUP_KEY != ((SFunctionNode*)pNode)->funcType) {
*(bool*)pContext = true; *(bool*)pContext = true;
return DEAL_RES_END; return DEAL_RES_END;
} }
@ -3032,7 +3058,7 @@ static int32_t convertFillValue(STranslateContext* pCxt, SDataType dt, SNodeList
code = scalarCalculateConstants(pCaseFunc, &pCell->pNode); code = scalarCalculateConstants(pCaseFunc, &pCell->pNode);
} }
if (TSDB_CODE_SUCCESS == code && QUERY_NODE_VALUE != nodeType(pCell->pNode)) { if (TSDB_CODE_SUCCESS == code && QUERY_NODE_VALUE != nodeType(pCell->pNode)) {
code = generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Fill value is just a constant"); code = generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Fill value can only accept constant");
} else if (TSDB_CODE_SUCCESS != code) { } else if (TSDB_CODE_SUCCESS != code) {
code = generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Filled data type mismatch"); code = generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Filled data type mismatch");
} }
@ -3056,6 +3082,7 @@ static int32_t checkFillValues(STranslateContext* pCxt, SFillNode* pFill, SNodeL
if (TSDB_CODE_SUCCESS != code) { if (TSDB_CODE_SUCCESS != code) {
return code; return code;
} }
++fillNo; ++fillNo;
} }
} }
@ -3152,7 +3179,7 @@ static int32_t translateGroupBy(STranslateContext* pCxt, SSelectStmt* pSelect) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_GROUPBY_WINDOW_COEXIST); return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_GROUPBY_WINDOW_COEXIST);
} }
pCxt->currClause = SQL_CLAUSE_GROUP_BY; pCxt->currClause = SQL_CLAUSE_GROUP_BY;
pSelect->isTimeLineResult = false; pSelect->timeLineResMode = TIME_LINE_NONE;
return translateExprList(pCxt, pSelect->pGroupByList); return translateExprList(pCxt, pSelect->pGroupByList);
} }
@ -3475,6 +3502,22 @@ static int32_t createDefaultFillNode(STranslateContext* pCxt, SNode** pOutput) {
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
static int32_t createDefaultEveryNode(STranslateContext* pCxt, SNode** pOutput) {
SValueNode* pEvery = (SValueNode*)nodesMakeNode(QUERY_NODE_VALUE);
if (NULL == pEvery) {
return TSDB_CODE_OUT_OF_MEMORY;
}
pEvery->node.resType.type = TSDB_DATA_TYPE_BIGINT;
pEvery->node.resType.bytes = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes;
pEvery->isDuration = true;
pEvery->literal = taosStrdup("1s");
*pOutput = (SNode*)pEvery;
return TSDB_CODE_SUCCESS;
}
static int32_t checkEvery(STranslateContext* pCxt, SValueNode* pInterval) { static int32_t checkEvery(STranslateContext* pCxt, SValueNode* pInterval) {
int32_t len = strlen(pInterval->literal); int32_t len = strlen(pInterval->literal);
@ -3490,7 +3533,12 @@ static int32_t checkEvery(STranslateContext* pCxt, SValueNode* pInterval) {
static int32_t translateInterpEvery(STranslateContext* pCxt, SNode** pEvery) { static int32_t translateInterpEvery(STranslateContext* pCxt, SNode** pEvery) {
int32_t code = TSDB_CODE_SUCCESS; int32_t code = TSDB_CODE_SUCCESS;
code = checkEvery(pCxt, (SValueNode*)(*pEvery)); if (NULL == *pEvery) {
code = createDefaultEveryNode(pCxt, pEvery);
}
if (TSDB_CODE_SUCCESS == code) {
code = checkEvery(pCxt, (SValueNode*)(*pEvery));
}
if (TSDB_CODE_SUCCESS == code) { if (TSDB_CODE_SUCCESS == code) {
code = translateExpr(pCxt, pEvery); code = translateExpr(pCxt, pEvery);
} }
@ -3519,6 +3567,9 @@ static int32_t translateInterpFill(STranslateContext* pCxt, SSelectStmt* pSelect
if (TSDB_CODE_SUCCESS == code) { if (TSDB_CODE_SUCCESS == code) {
code = checkFill(pCxt, (SFillNode*)pSelect->pFill, (SValueNode*)pSelect->pEvery, true); code = checkFill(pCxt, (SFillNode*)pSelect->pFill, (SValueNode*)pSelect->pEvery, true);
} }
if (TSDB_CODE_SUCCESS == code) {
code = checkFillValues(pCxt, (SFillNode*)pSelect->pFill, pSelect->pProjectionList);
}
return code; return code;
} }
@ -3536,8 +3587,12 @@ static int32_t translateInterp(STranslateContext* pCxt, SSelectStmt* pSelect) {
} }
if (NULL == pSelect->pRange || NULL == pSelect->pEvery || NULL == pSelect->pFill) { if (NULL == pSelect->pRange || NULL == pSelect->pEvery || NULL == pSelect->pFill) {
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_INTERP_CLAUSE, if (pSelect->pRange != NULL && QUERY_NODE_OPERATOR == nodeType(pSelect->pRange) && pSelect->pEvery == NULL) {
"Missing RANGE clause, EVERY clause or FILL clause"); // single point interp every can be omitted
} else {
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_INTERP_CLAUSE,
"Missing RANGE clause, EVERY clause or FILL clause");
}
} }
int32_t code = translateExpr(pCxt, &pSelect->pRange); int32_t code = translateExpr(pCxt, &pSelect->pRange);
@ -3552,7 +3607,18 @@ static int32_t translateInterp(STranslateContext* pCxt, SSelectStmt* pSelect) {
static int32_t translatePartitionBy(STranslateContext* pCxt, SSelectStmt* pSelect) { static int32_t translatePartitionBy(STranslateContext* pCxt, SSelectStmt* pSelect) {
pCxt->currClause = SQL_CLAUSE_PARTITION_BY; pCxt->currClause = SQL_CLAUSE_PARTITION_BY;
int32_t code = translateExprList(pCxt, pSelect->pPartitionByList); int32_t code = TSDB_CODE_SUCCESS;
if (pSelect->pPartitionByList) {
int8_t typeType = getTableTypeFromTableNode(pSelect->pFromTable);
SNode* pPar = nodesListGetNode(pSelect->pPartitionByList, 0);
if (!((TSDB_NORMAL_TABLE == typeType || TSDB_CHILD_TABLE == typeType) &&
1 == pSelect->pPartitionByList->length && (QUERY_NODE_FUNCTION == nodeType(pPar) && FUNCTION_TYPE_TBNAME == ((SFunctionNode*)pPar)->funcType))) {
pSelect->timeLineResMode = TIME_LINE_MULTI;
}
code = translateExprList(pCxt, pSelect->pPartitionByList);
}
if (TSDB_CODE_SUCCESS == code) { if (TSDB_CODE_SUCCESS == code) {
code = translateExprList(pCxt, pSelect->pTags); code = translateExprList(pCxt, pSelect->pTags);
} }
@ -3681,9 +3747,9 @@ static void resetResultTimeline(SSelectStmt* pSelect) {
if ((QUERY_NODE_TEMP_TABLE == nodeType(pSelect->pFromTable) && if ((QUERY_NODE_TEMP_TABLE == nodeType(pSelect->pFromTable) &&
isPrimaryKey((STempTableNode*)pSelect->pFromTable, pOrder)) || isPrimaryKey((STempTableNode*)pSelect->pFromTable, pOrder)) ||
(QUERY_NODE_TEMP_TABLE != nodeType(pSelect->pFromTable) && isPrimaryKeyImpl(pOrder))) { (QUERY_NODE_TEMP_TABLE != nodeType(pSelect->pFromTable) && isPrimaryKeyImpl(pOrder))) {
pSelect->isTimeLineResult = true; pSelect->timeLineResMode = TIME_LINE_GLOBAL;
} else { } else {
pSelect->isTimeLineResult = false; pSelect->timeLineResMode = TIME_LINE_NONE;
} }
} }
@ -3753,7 +3819,7 @@ static int32_t translateSelectFrom(STranslateContext* pCxt, SSelectStmt* pSelect
if (TSDB_CODE_SUCCESS == code) { if (TSDB_CODE_SUCCESS == code) {
code = replaceTbName(pCxt, pSelect); code = replaceTbName(pCxt, pSelect);
} }
return code; return code;
} }
@ -3813,8 +3879,13 @@ static int32_t translateSetOperProject(STranslateContext* pCxt, SSetOperator* pS
pLeftExpr = pLeftFuncExpr; pLeftExpr = pLeftFuncExpr;
} }
snprintf(pRightExpr->aliasName, sizeof(pRightExpr->aliasName), "%s", pLeftExpr->aliasName); snprintf(pRightExpr->aliasName, sizeof(pRightExpr->aliasName), "%s", pLeftExpr->aliasName);
if (TSDB_CODE_SUCCESS != nodesListMakeStrictAppend(&pSetOperator->pProjectionList, SNode* pProj = createSetOperProject(pSetOperator->stmtName, pLeft);
createSetOperProject(pSetOperator->stmtName, pLeft))) { if (QUERY_NODE_COLUMN == nodeType(pLeft) && QUERY_NODE_COLUMN == nodeType(pRight)
&& ((SColumnNode*)pLeft)->colId == PRIMARYKEY_TIMESTAMP_COL_ID
&& ((SColumnNode*)pRight)->colId == PRIMARYKEY_TIMESTAMP_COL_ID) {
((SColumnNode*)pProj)->colId = PRIMARYKEY_TIMESTAMP_COL_ID;
}
if (TSDB_CODE_SUCCESS != nodesListMakeStrictAppend(&pSetOperator->pProjectionList, pProj)) {
return TSDB_CODE_OUT_OF_MEMORY; return TSDB_CODE_OUT_OF_MEMORY;
} }
} }
@ -3826,6 +3897,10 @@ static uint8_t calcSetOperatorPrecision(SSetOperator* pSetOperator) {
} }
static int32_t translateSetOperOrderBy(STranslateContext* pCxt, SSetOperator* pSetOperator) { static int32_t translateSetOperOrderBy(STranslateContext* pCxt, SSetOperator* pSetOperator) {
if (NULL == pSetOperator->pOrderByList || pSetOperator->pOrderByList->length <= 0) {
return TSDB_CODE_SUCCESS;
}
bool other; bool other;
int32_t code = translateOrderByPosition(pCxt, pSetOperator->pProjectionList, pSetOperator->pOrderByList, &other); int32_t code = translateOrderByPosition(pCxt, pSetOperator->pProjectionList, pSetOperator->pOrderByList, &other);
if (TSDB_CODE_SUCCESS == code) { if (TSDB_CODE_SUCCESS == code) {
@ -3838,6 +3913,14 @@ static int32_t translateSetOperOrderBy(STranslateContext* pCxt, SSetOperator* pS
if (TSDB_CODE_SUCCESS == code) { if (TSDB_CODE_SUCCESS == code) {
code = replaceOrderByAlias(pCxt, pSetOperator->pProjectionList, pSetOperator->pOrderByList); code = replaceOrderByAlias(pCxt, pSetOperator->pProjectionList, pSetOperator->pOrderByList);
} }
if (TSDB_CODE_SUCCESS == code) {
SNode* pOrder = ((SOrderByExprNode*)nodesListGetNode(pSetOperator->pOrderByList, 0))->pExpr;
if (isPrimaryKeyImpl(pOrder)) {
pSetOperator->timeLineResMode = TIME_LINE_GLOBAL;
} else {
pSetOperator->timeLineResMode = TIME_LINE_NONE;
}
}
return code; return code;
} }
@ -6324,7 +6407,7 @@ static int32_t subtableExprHasColumnOrPseudoColumn(SNode* pNode) {
static int32_t checkStreamQuery(STranslateContext* pCxt, SCreateStreamStmt* pStmt) { static int32_t checkStreamQuery(STranslateContext* pCxt, SCreateStreamStmt* pStmt) {
SSelectStmt* pSelect = (SSelectStmt*)pStmt->pQuery; SSelectStmt* pSelect = (SSelectStmt*)pStmt->pQuery;
if (TSDB_DATA_TYPE_TIMESTAMP != ((SExprNode*)nodesListGetNode(pSelect->pProjectionList, 0))->resType.type || if (TSDB_DATA_TYPE_TIMESTAMP != ((SExprNode*)nodesListGetNode(pSelect->pProjectionList, 0))->resType.type ||
!pSelect->isTimeLineResult || crossTableWithoutAggOper(pSelect) || NULL != pSelect->pOrderByList || !isTimeLineQuery(pStmt->pQuery) || crossTableWithoutAggOper(pSelect) || NULL != pSelect->pOrderByList ||
crossTableWithUdaf(pSelect) || isEventWindowQuery(pSelect) || hasJsonTypeProjection(pSelect)) { crossTableWithUdaf(pSelect) || isEventWindowQuery(pSelect) || hasJsonTypeProjection(pSelect)) {
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, "Unsupported stream query"); return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, "Unsupported stream query");
} }

View File

@ -138,7 +138,7 @@ static char* getSyntaxErrFormat(int32_t errCode) {
case TSDB_CODE_PAR_CANNOT_DROP_PRIMARY_KEY: case TSDB_CODE_PAR_CANNOT_DROP_PRIMARY_KEY:
return "Primary timestamp column cannot be dropped"; return "Primary timestamp column cannot be dropped";
case TSDB_CODE_PAR_INVALID_MODIFY_COL: case TSDB_CODE_PAR_INVALID_MODIFY_COL:
return "Only binary/nchar column length could be modified, and the length can only be increased, not decreased"; return "Only binary/nchar/geometry column length could be modified, and the length can only be increased, not decreased";
case TSDB_CODE_PAR_INVALID_TBNAME: case TSDB_CODE_PAR_INVALID_TBNAME:
return "Invalid tbname pseudo column"; return "Invalid tbname pseudo column";
case TSDB_CODE_PAR_INVALID_FUNCTION_NAME: case TSDB_CODE_PAR_INVALID_FUNCTION_NAME:
@ -249,6 +249,17 @@ int32_t getNumOfTags(const STableMeta* pTableMeta) { return getTableInfo(pTableM
STableComInfo getTableInfo(const STableMeta* pTableMeta) { return pTableMeta->tableInfo; } STableComInfo getTableInfo(const STableMeta* pTableMeta) { return pTableMeta->tableInfo; }
int32_t getTableTypeFromTableNode(SNode *pTable) {
if (NULL == pTable) {
return -1;
}
if (QUERY_NODE_REAL_TABLE != nodeType(pTable)) {
return -1;
}
return ((SRealTableNode *)pTable)->pMeta->tableType;
}
STableMeta* tableMetaDup(const STableMeta* pTableMeta) { STableMeta* tableMetaDup(const STableMeta* pTableMeta) {
int32_t numOfFields = TABLE_TOTAL_COL_NUM(pTableMeta); int32_t numOfFields = TABLE_TOTAL_COL_NUM(pTableMeta);
if (numOfFields > TSDB_MAX_COLUMNS || numOfFields < TSDB_MIN_COLUMNS) { if (numOfFields > TSDB_MAX_COLUMNS || numOfFields < TSDB_MIN_COLUMNS) {
@ -684,7 +695,7 @@ SNode* createSelectStmtImpl(bool isDistinct, SNodeList* pProjectionList, SNode*
select->pProjectionList = pProjectionList; select->pProjectionList = pProjectionList;
select->pFromTable = pTable; select->pFromTable = pTable;
sprintf(select->stmtName, "%p", select); sprintf(select->stmtName, "%p", select);
select->isTimeLineResult = true; select->timeLineResMode = select->isDistinct ? TIME_LINE_NONE : TIME_LINE_GLOBAL;
select->onlyHasKeepOrderFunc = true; select->onlyHasKeepOrderFunc = true;
select->timeRange = TSWINDOW_INITIALIZER; select->timeRange = TSWINDOW_INITIALIZER;
return (SNode*)select; return (SNode*)select;

File diff suppressed because it is too large Load Diff

View File

@ -117,6 +117,15 @@ TEST_F(ParserSelectTest, timelineFunc) {
run("SELECT LAST(*), FIRST(*) FROM t1 INTERVAL(10s)"); run("SELECT LAST(*), FIRST(*) FROM t1 INTERVAL(10s)");
run("SELECT diff(c1) FROM t1"); run("SELECT diff(c1) FROM t1");
run("select diff(ts) from (select _wstart as ts, count(*) from st1 partition by tbname interval(1d))", TSDB_CODE_PAR_NOT_ALLOWED_FUNC);
run("select diff(ts) from (select _wstart as ts, count(*) from st1 partition by tbname interval(1d) order by ts)");
run("select t1.* from st1s1 t1, (select _wstart as ts, count(*) from st1s2 partition by tbname interval(1d)) WHERE t1.ts = t2.ts", TSDB_CODE_PAR_NOT_SUPPORT_JOIN);
run("select t1.* from st1s1 t1, (select _wstart as ts, count(*) from st1s2 partition by tbname interval(1d) order by ts) t2 WHERE t1.ts = t2.ts");
} }
TEST_F(ParserSelectTest, selectFunc) { TEST_F(ParserSelectTest, selectFunc) {
@ -325,6 +334,10 @@ TEST_F(ParserSelectTest, subquery) {
run("SELECT SUM(a) FROM (SELECT MAX(c1) a, _wstart FROM st1s1 PARTITION BY TBNAME INTERVAL(1m) ORDER BY _WSTART) " run("SELECT SUM(a) FROM (SELECT MAX(c1) a, _wstart FROM st1s1 PARTITION BY TBNAME INTERVAL(1m) ORDER BY _WSTART) "
"INTERVAL(1n)"); "INTERVAL(1n)");
run("SELECT diff(a) FROM (SELECT _wstart, tag1, tag2, MAX(c1) a FROM st1 PARTITION BY tag1 INTERVAL(1m)) PARTITION BY tag1");
run("SELECT diff(a) FROM (SELECT _wstart, tag1, tag2, MAX(c1) a FROM st1 PARTITION BY tag1 INTERVAL(1m)) PARTITION BY tag2", TSDB_CODE_PAR_NOT_ALLOWED_FUNC);
run("SELECT _C0 FROM (SELECT _ROWTS, ts FROM st1s1)"); run("SELECT _C0 FROM (SELECT _ROWTS, ts FROM st1s1)");
run("SELECT ts FROM (SELECT t1.ts FROM st1s1 t1)"); run("SELECT ts FROM (SELECT t1.ts FROM st1s1 t1)");

View File

@ -11,7 +11,7 @@ if(${BUILD_WITH_ROCKSDB})
IF (TD_LINUX) IF (TD_LINUX)
target_link_libraries( target_link_libraries(
stream stream
PUBLIC rocksdb-shared tdb PUBLIC rocksdb tdb
PRIVATE os util transport qcom executor wal index PRIVATE os util transport qcom executor wal index
) )
ELSE() ELSE()

View File

@ -122,12 +122,17 @@ char* streamDefaultIterKey_rocksdb(void* iter, int32_t* len);
char* streamDefaultIterVal_rocksdb(void* iter, int32_t* len); char* streamDefaultIterVal_rocksdb(void* iter, int32_t* len);
// batch func // batch func
int streamStateGetCfIdx(SStreamState* pState, const char* funcName);
void* streamStateCreateBatch(); void* streamStateCreateBatch();
int32_t streamStateGetBatchSize(void* pBatch); int32_t streamStateGetBatchSize(void* pBatch);
void streamStateClearBatch(void* pBatch); void streamStateClearBatch(void* pBatch);
void streamStateDestroyBatch(void* pBatch); void streamStateDestroyBatch(void* pBatch);
int32_t streamStatePutBatch(SStreamState* pState, const char* cfName, rocksdb_writebatch_t* pBatch, void* key, int32_t streamStatePutBatch(SStreamState* pState, const char* cfName, rocksdb_writebatch_t* pBatch, void* key,
void* val, int32_t vlen, int64_t ttl); void* val, int32_t vlen, int64_t ttl);
int32_t streamStatePutBatchOptimize(SStreamState* pState, int32_t cfIdx, rocksdb_writebatch_t* pBatch, void* key,
void* val, int32_t vlen, int64_t ttl, void* tmpBuf);
int32_t streamStatePutBatch_rocksdb(SStreamState* pState, void* pBatch); int32_t streamStatePutBatch_rocksdb(SStreamState* pState, void* pBatch);
// int32_t streamDefaultIter_rocksdb(SStreamState* pState, const void* start, const void* end, SArray* result); // int32_t streamDefaultIter_rocksdb(SStreamState* pState, const void* start, const void* end, SArray* result);
#endif #endif

View File

@ -37,8 +37,9 @@ void streamRetryDispatchStreamBlock(SStreamTask* pTask, int64_t waitDuration)
int32_t streamDispatchStreamBlock(SStreamTask* pTask); int32_t streamDispatchStreamBlock(SStreamTask* pTask);
SStreamDataBlock* createStreamDataFromDispatchMsg(const SStreamDispatchReq* pReq, int32_t blockType, int32_t srcVg); SStreamDataBlock* createStreamDataFromDispatchMsg(const SStreamDispatchReq* pReq, int32_t blockType, int32_t srcVg);
SStreamDataBlock* createStreamBlockFromResults(SStreamQueueItem* pItem, SStreamTask* pTask, int64_t resultSize, SArray* pRes); SStreamDataBlock* createStreamBlockFromResults(SStreamQueueItem* pItem, SStreamTask* pTask, int64_t resultSize,
void destroyStreamDataBlock(SStreamDataBlock* pBlock); SArray* pRes);
void destroyStreamDataBlock(SStreamDataBlock* pBlock);
int32_t streamRetrieveReqToData(const SStreamRetrieveReq* pReq, SStreamDataBlock* pData); int32_t streamRetrieveReqToData(const SStreamRetrieveReq* pReq, SStreamDataBlock* pData);
int32_t streamBroadcastToChildren(SStreamTask* pTask, const SSDataBlock* pBlock); int32_t streamBroadcastToChildren(SStreamTask* pTask, const SSDataBlock* pBlock);
@ -53,6 +54,8 @@ int32_t streamDoDispatchRecoverFinishMsg(SStreamTask* pTask, const SStreamRecove
SStreamQueueItem* streamMergeQueueItem(SStreamQueueItem* dst, SStreamQueueItem* pElem); SStreamQueueItem* streamMergeQueueItem(SStreamQueueItem* dst, SStreamQueueItem* pElem);
extern int32_t streamBackendId;
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

View File

@ -16,7 +16,9 @@
#include "streamBackendRocksdb.h" #include "streamBackendRocksdb.h"
#include "executor.h" #include "executor.h"
#include "query.h" #include "query.h"
#include "streamInc.h"
#include "tcommon.h" #include "tcommon.h"
#include "tref.h"
typedef struct SCompactFilteFactory { typedef struct SCompactFilteFactory {
void* status; void* status;
@ -79,8 +81,8 @@ const char* compareParKeyName(void* name);
const char* comparePartagKeyName(void* name); const char* comparePartagKeyName(void* name);
void* streamBackendInit(const char* path) { void* streamBackendInit(const char* path) {
qDebug("init stream backend"); qDebug("start to init stream backend at %s", path);
SBackendHandle* pHandle = calloc(1, sizeof(SBackendHandle)); SBackendHandle* pHandle = taosMemoryCalloc(1, sizeof(SBackendHandle));
pHandle->list = tdListNew(sizeof(SCfComparator)); pHandle->list = tdListNew(sizeof(SCfComparator));
taosThreadMutexInit(&pHandle->mutex, NULL); taosThreadMutexInit(&pHandle->mutex, NULL);
taosThreadMutexInit(&pHandle->cfMutex, NULL); taosThreadMutexInit(&pHandle->cfMutex, NULL);
@ -119,6 +121,7 @@ void* streamBackendInit(const char* path) {
if (err != NULL) { if (err != NULL) {
qError("failed to open rocksdb, path:%s, reason:%s", path, err); qError("failed to open rocksdb, path:%s, reason:%s", path, err);
taosMemoryFreeClear(err); taosMemoryFreeClear(err);
goto _EXIT;
} }
} else { } else {
/* /*
@ -129,6 +132,7 @@ void* streamBackendInit(const char* path) {
if (cfs != NULL) { if (cfs != NULL) {
rocksdb_list_column_families_destroy(cfs, nCf); rocksdb_list_column_families_destroy(cfs, nCf);
} }
qDebug("succ to init stream backend at %s, backend:%p", path, pHandle);
return (void*)pHandle; return (void*)pHandle;
_EXIT: _EXIT:
@ -140,7 +144,8 @@ _EXIT:
taosHashCleanup(pHandle->cfInst); taosHashCleanup(pHandle->cfInst);
rocksdb_compactionfilterfactory_destroy(pHandle->filterFactory); rocksdb_compactionfilterfactory_destroy(pHandle->filterFactory);
tdListFree(pHandle->list); tdListFree(pHandle->list);
free(pHandle); taosMemoryFree(pHandle);
qDebug("failed to init stream backend at %s", path);
return NULL; return NULL;
} }
void streamBackendCleanup(void* arg) { void streamBackendCleanup(void* arg) {
@ -168,19 +173,20 @@ void streamBackendCleanup(void* arg) {
rocksdb_env_destroy(pHandle->env); rocksdb_env_destroy(pHandle->env);
rocksdb_cache_destroy(pHandle->cache); rocksdb_cache_destroy(pHandle->cache);
taosThreadMutexDestroy(&pHandle->mutex);
SListNode* head = tdListPopHead(pHandle->list); SListNode* head = tdListPopHead(pHandle->list);
while (head != NULL) { while (head != NULL) {
streamStateDestroyCompar(head->data); streamStateDestroyCompar(head->data);
taosMemoryFree(head); taosMemoryFree(head);
head = tdListPopHead(pHandle->list); head = tdListPopHead(pHandle->list);
} }
// rocksdb_compactionfilterfactory_destroy(pHandle->filterFactory);
tdListFree(pHandle->list); tdListFree(pHandle->list);
taosThreadMutexDestroy(&pHandle->mutex);
taosThreadMutexDestroy(&pHandle->cfMutex); taosThreadMutexDestroy(&pHandle->cfMutex);
taosMemoryFree(pHandle); taosMemoryFree(pHandle);
qDebug("destroy stream backend backend:%p", pHandle);
return; return;
} }
SListNode* streamBackendAddCompare(void* backend, void* arg) { SListNode* streamBackendAddCompare(void* backend, void* arg) {
@ -204,7 +210,6 @@ void streamBackendDelCompare(void* backend, void* arg) {
} }
void streamStateDestroy_rocksdb(SStreamState* pState, bool remove) { streamStateCloseBackend(pState, remove); } void streamStateDestroy_rocksdb(SStreamState* pState, bool remove) { streamStateCloseBackend(pState, remove); }
static bool streamStateIterSeekAndValid(rocksdb_iterator_t* iter, char* buf, size_t len); static bool streamStateIterSeekAndValid(rocksdb_iterator_t* iter, char* buf, size_t len);
int streamGetInit(SStreamState* pState, const char* funcName);
// |key|-----value------| // |key|-----value------|
// |key|ttl|len|userData| // |key|ttl|len|userData|
@ -551,14 +556,20 @@ typedef struct {
int32_t encodeValueFunc(void* value, int32_t vlen, int64_t ttl, char** dest) { int32_t encodeValueFunc(void* value, int32_t vlen, int64_t ttl, char** dest) {
SStreamValue key = {.unixTimestamp = ttl, .len = vlen, .data = (char*)(value)}; SStreamValue key = {.unixTimestamp = ttl, .len = vlen, .data = (char*)(value)};
int32_t len = 0;
char* p = taosMemoryCalloc(1, sizeof(int64_t) + sizeof(int32_t) + key.len); if (*dest == NULL) {
char* buf = p; char* p = taosMemoryCalloc(1, sizeof(int64_t) + sizeof(int32_t) + key.len);
int32_t len = 0; char* buf = p;
len += taosEncodeFixedI64((void**)&buf, key.unixTimestamp); len += taosEncodeFixedI64((void**)&buf, key.unixTimestamp);
len += taosEncodeFixedI32((void**)&buf, key.len); len += taosEncodeFixedI32((void**)&buf, key.len);
len += taosEncodeBinary((void**)&buf, (char*)value, vlen); len += taosEncodeBinary((void**)&buf, (char*)value, vlen);
*dest = p; *dest = p;
} else {
char* buf = *dest;
len += taosEncodeFixedI64((void**)&buf, key.unixTimestamp);
len += taosEncodeFixedI32((void**)&buf, key.len);
len += taosEncodeBinary((void**)&buf, (char*)value, vlen);
}
return len; return len;
} }
/* /*
@ -707,7 +718,7 @@ int32_t streamStateOpenBackendCf(void* backend, char* name, char** cfs, int32_t
rocksdb_options_set_block_based_table_factory((rocksdb_options_t*)cfOpts[i], tableOpt); rocksdb_options_set_block_based_table_factory((rocksdb_options_t*)cfOpts[i], tableOpt);
params[i].tableOpt = tableOpt; params[i].tableOpt = tableOpt;
int idx = streamGetInit(NULL, funcname); int idx = streamStateGetCfIdx(NULL, funcname);
SCfInit* cfPara = &ginitDict[idx]; SCfInit* cfPara = &ginitDict[idx];
rocksdb_comparator_t* compare = rocksdb_comparator_t* compare =
@ -738,7 +749,7 @@ int32_t streamStateOpenBackendCf(void* backend, char* name, char** cfs, int32_t
char idstr[128] = {0}; char idstr[128] = {0};
sprintf(idstr, "0x%" PRIx64 "-%d", streamId, taskId); sprintf(idstr, "0x%" PRIx64 "-%d", streamId, taskId);
int idx = streamGetInit(NULL, funcname); int idx = streamStateGetCfIdx(NULL, funcname);
RocksdbCfInst* inst = NULL; RocksdbCfInst* inst = NULL;
RocksdbCfInst** pInst = taosHashGet(handle->cfInst, idstr, strlen(idstr) + 1); RocksdbCfInst** pInst = taosHashGet(handle->cfInst, idstr, strlen(idstr) + 1);
@ -803,7 +814,8 @@ int32_t streamStateOpenBackendCf(void* backend, char* name, char** cfs, int32_t
return 0; return 0;
} }
int streamStateOpenBackend(void* backend, SStreamState* pState) { int streamStateOpenBackend(void* backend, SStreamState* pState) {
qInfo("start to open backend, %p 0x%" PRIx64 "-%d", pState, pState->streamId, pState->taskId); qInfo("start to open state %p on backend %p 0x%" PRIx64 "-%d", pState, backend, pState->streamId, pState->taskId);
taosAcquireRef(streamBackendId, pState->streamBackendRid);
SBackendHandle* handle = backend; SBackendHandle* handle = backend;
sprintf(pState->pTdbState->idstr, "0x%" PRIx64 "-%d", pState->streamId, pState->taskId); sprintf(pState->pTdbState->idstr, "0x%" PRIx64 "-%d", pState->streamId, pState->taskId);
@ -865,8 +877,8 @@ int streamStateOpenBackend(void* backend, SStreamState* pState) {
taosThreadRwlockInit(&pState->pTdbState->rwLock, NULL); taosThreadRwlockInit(&pState->pTdbState->rwLock, NULL);
SCfComparator compare = {.comp = pCompare, .numOfComp = cfLen}; SCfComparator compare = {.comp = pCompare, .numOfComp = cfLen};
pState->pTdbState->pComparNode = streamBackendAddCompare(handle, &compare); pState->pTdbState->pComparNode = streamBackendAddCompare(handle, &compare);
// rocksdb_writeoptions_disable_WAL(pState->pTdbState->writeOpts, 1); rocksdb_writeoptions_disable_WAL(pState->pTdbState->writeOpts, 1);
qInfo("succ to open backend, %p, 0x%" PRIx64 "-%d", pState, pState->streamId, pState->taskId); qInfo("succ to open state %p on backend, %p, 0x%" PRIx64 "-%d", pState, handle, pState->streamId, pState->taskId);
return 0; return 0;
} }
@ -882,8 +894,8 @@ void streamStateCloseBackend(SStreamState* pState, bool remove) {
taosThreadMutexUnlock(&pHandle->cfMutex); taosThreadMutexUnlock(&pHandle->cfMutex);
char* status[] = {"close", "drop"}; char* status[] = {"close", "drop"};
qInfo("start to %s backend, %p, 0x%" PRIx64 "-%d", status[remove == false ? 0 : 1], pState, pState->streamId, qInfo("start to close %s state %p on backend %p 0x%" PRIx64 "-%d", status[remove == false ? 0 : 1], pState, pHandle,
pState->taskId); pState->streamId, pState->taskId);
if (pState->pTdbState->rocksdb == NULL) { if (pState->pTdbState->rocksdb == NULL) {
return; return;
} }
@ -938,6 +950,7 @@ void streamStateCloseBackend(SStreamState* pState, bool remove) {
taosThreadRwlockDestroy(&pState->pTdbState->rwLock); taosThreadRwlockDestroy(&pState->pTdbState->rwLock);
pState->pTdbState->rocksdb = NULL; pState->pTdbState->rocksdb = NULL;
taosReleaseRef(streamBackendId, pState->streamBackendRid);
} }
void streamStateDestroyCompar(void* arg) { void streamStateDestroyCompar(void* arg) {
SCfComparator* comp = (SCfComparator*)arg; SCfComparator* comp = (SCfComparator*)arg;
@ -947,7 +960,7 @@ void streamStateDestroyCompar(void* arg) {
taosMemoryFree(comp->comp); taosMemoryFree(comp->comp);
} }
int streamGetInit(SStreamState* pState, const char* funcName) { int streamStateGetCfIdx(SStreamState* pState, const char* funcName) {
int idx = -1; int idx = -1;
size_t len = strlen(funcName); size_t len = strlen(funcName);
for (int i = 0; i < sizeof(ginitDict) / sizeof(ginitDict[0]); i++) { for (int i = 0; i < sizeof(ginitDict) / sizeof(ginitDict[0]); i++) {
@ -994,7 +1007,7 @@ bool streamStateIterSeekAndValid(rocksdb_iterator_t* iter, char* buf, size_t len
} }
rocksdb_iterator_t* streamStateIterCreate(SStreamState* pState, const char* cfName, rocksdb_snapshot_t** snapshot, rocksdb_iterator_t* streamStateIterCreate(SStreamState* pState, const char* cfName, rocksdb_snapshot_t** snapshot,
rocksdb_readoptions_t** readOpt) { rocksdb_readoptions_t** readOpt) {
int idx = streamGetInit(pState, cfName); int idx = streamStateGetCfIdx(pState, cfName);
if (snapshot != NULL) { if (snapshot != NULL) {
*snapshot = (rocksdb_snapshot_t*)rocksdb_create_snapshot(pState->pTdbState->rocksdb); *snapshot = (rocksdb_snapshot_t*)rocksdb_create_snapshot(pState->pTdbState->rocksdb);
@ -1014,7 +1027,7 @@ rocksdb_iterator_t* streamStateIterCreate(SStreamState* pState, const char* cfNa
code = 0; \ code = 0; \
char buf[128] = {0}; \ char buf[128] = {0}; \
char* err = NULL; \ char* err = NULL; \
int i = streamGetInit(pState, funcname); \ int i = streamStateGetCfIdx(pState, funcname); \
if (i < 0) { \ if (i < 0) { \
qWarn("streamState failed to get cf name: %s", funcname); \ qWarn("streamState failed to get cf name: %s", funcname); \
code = -1; \ code = -1; \
@ -1045,7 +1058,7 @@ rocksdb_iterator_t* streamStateIterCreate(SStreamState* pState, const char* cfNa
code = 0; \ code = 0; \
char buf[128] = {0}; \ char buf[128] = {0}; \
char* err = NULL; \ char* err = NULL; \
int i = streamGetInit(pState, funcname); \ int i = streamStateGetCfIdx(pState, funcname); \
if (i < 0) { \ if (i < 0) { \
qWarn("streamState failed to get cf name: %s", funcname); \ qWarn("streamState failed to get cf name: %s", funcname); \
code = -1; \ code = -1; \
@ -1093,7 +1106,7 @@ rocksdb_iterator_t* streamStateIterCreate(SStreamState* pState, const char* cfNa
code = 0; \ code = 0; \
char buf[128] = {0}; \ char buf[128] = {0}; \
char* err = NULL; \ char* err = NULL; \
int i = streamGetInit(pState, funcname); \ int i = streamStateGetCfIdx(pState, funcname); \
if (i < 0) { \ if (i < 0) { \
qWarn("streamState failed to get cf name: %s_%s", pState->pTdbState->idstr, funcname); \ qWarn("streamState failed to get cf name: %s_%s", pState->pTdbState->idstr, funcname); \
code = -1; \ code = -1; \
@ -2033,7 +2046,7 @@ void streamStateClearBatch(void* pBatch) { rocksdb_writebatch_clear((rocksdb_
void streamStateDestroyBatch(void* pBatch) { rocksdb_writebatch_destroy((rocksdb_writebatch_t*)pBatch); } void streamStateDestroyBatch(void* pBatch) { rocksdb_writebatch_destroy((rocksdb_writebatch_t*)pBatch); }
int32_t streamStatePutBatch(SStreamState* pState, const char* cfName, rocksdb_writebatch_t* pBatch, void* key, int32_t streamStatePutBatch(SStreamState* pState, const char* cfName, rocksdb_writebatch_t* pBatch, void* key,
void* val, int32_t vlen, int64_t ttl) { void* val, int32_t vlen, int64_t ttl) {
int i = streamGetInit(pState, cfName); int i = streamStateGetCfIdx(pState, cfName);
if (i < 0) { if (i < 0) {
qError("streamState failed to put to cf name:%s", cfName); qError("streamState failed to put to cf name:%s", cfName);
@ -2049,6 +2062,21 @@ int32_t streamStatePutBatch(SStreamState* pState, const char* cfName, rocksdb_wr
taosMemoryFree(ttlV); taosMemoryFree(ttlV);
return 0; return 0;
} }
int32_t streamStatePutBatchOptimize(SStreamState* pState, int32_t cfIdx, rocksdb_writebatch_t* pBatch, void* key,
void* val, int32_t vlen, int64_t ttl, void* tmpBuf) {
char buf[128] = {0};
int32_t klen = ginitDict[cfIdx].enFunc((void*)key, buf);
char* ttlV = tmpBuf;
int32_t ttlVLen = ginitDict[cfIdx].enValueFunc(val, vlen, ttl, &ttlV);
rocksdb_column_family_handle_t* pCf = pState->pTdbState->pHandle[ginitDict[cfIdx].idx];
rocksdb_writebatch_put_cf((rocksdb_writebatch_t*)pBatch, pCf, buf, (size_t)klen, ttlV, (size_t)ttlVLen);
if (tmpBuf == NULL) {
taosMemoryFree(ttlV);
}
return 0;
}
int32_t streamStatePutBatch_rocksdb(SStreamState* pState, void* pBatch) { int32_t streamStatePutBatch_rocksdb(SStreamState* pState, void* pBatch) {
char* err = NULL; char* err = NULL;
rocksdb_write(pState->pTdbState->rocksdb, pState->pTdbState->writeOpts, (rocksdb_writebatch_t*)pBatch, &err); rocksdb_write(pState->pTdbState->rocksdb, pState->pTdbState->writeOpts, (rocksdb_writebatch_t*)pBatch, &err);

View File

@ -16,6 +16,13 @@
#include <util/ttimer.h> #include <util/ttimer.h>
#include "streamInc.h" #include "streamInc.h"
#define MAX_BLOCK_NAME_NUM 1024
typedef struct SBlockName {
uint32_t hashValue;
char parTbName[TSDB_TABLE_NAME_LEN];
} SBlockName;
static int32_t tEncodeStreamDispatchReq(SEncoder* pEncoder, const SStreamDispatchReq* pReq) { static int32_t tEncodeStreamDispatchReq(SEncoder* pEncoder, const SStreamDispatchReq* pReq) {
if (tStartEncode(pEncoder) < 0) return -1; if (tStartEncode(pEncoder) < 0) return -1;
if (tEncodeI64(pEncoder, pReq->streamId) < 0) return -1; if (tEncodeI64(pEncoder, pReq->streamId) < 0) return -1;
@ -352,26 +359,46 @@ FAIL:
int32_t streamSearchAndAddBlock(SStreamTask* pTask, SStreamDispatchReq* pReqs, SSDataBlock* pDataBlock, int32_t vgSz, int32_t streamSearchAndAddBlock(SStreamTask* pTask, SStreamDispatchReq* pReqs, SSDataBlock* pDataBlock, int32_t vgSz,
int64_t groupId) { int64_t groupId) {
char* ctbName = taosMemoryCalloc(1, TSDB_TABLE_FNAME_LEN); uint32_t hashValue = 0;
if (ctbName == NULL) {
return -1;
}
if (pDataBlock->info.parTbName[0]) {
snprintf(ctbName, TSDB_TABLE_NAME_LEN, "%s.%s", pTask->shuffleDispatcher.dbInfo.db, pDataBlock->info.parTbName);
} else {
char* ctbShortName = buildCtbNameByGroupId(pTask->shuffleDispatcher.stbFullName, groupId);
snprintf(ctbName, TSDB_TABLE_NAME_LEN, "%s.%s", pTask->shuffleDispatcher.dbInfo.db, ctbShortName);
taosMemoryFree(ctbShortName);
}
SArray* vgInfo = pTask->shuffleDispatcher.dbInfo.pVgroupInfos; SArray* vgInfo = pTask->shuffleDispatcher.dbInfo.pVgroupInfos;
if (pTask->pNameMap == NULL) {
pTask->pNameMap = tSimpleHashInit(1024, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT));
}
/*uint32_t hashValue = MurmurHash3_32(ctbName, strlen(ctbName));*/ void* pVal = tSimpleHashGet(pTask->pNameMap, &groupId, sizeof(int64_t));
SUseDbRsp* pDbInfo = &pTask->shuffleDispatcher.dbInfo; if (pVal) {
uint32_t hashValue = SBlockName* pBln = (SBlockName*)pVal;
taosGetTbHashVal(ctbName, strlen(ctbName), pDbInfo->hashMethod, pDbInfo->hashPrefix, pDbInfo->hashSuffix); hashValue = pBln->hashValue;
taosMemoryFree(ctbName); if (!pDataBlock->info.parTbName[0]) {
memcpy(pDataBlock->info.parTbName, pBln->parTbName, strlen(pBln->parTbName));
}
} else {
char* ctbName = taosMemoryCalloc(1, TSDB_TABLE_FNAME_LEN);
if (ctbName == NULL) {
return -1;
}
if (pDataBlock->info.parTbName[0]) {
snprintf(ctbName, TSDB_TABLE_NAME_LEN, "%s.%s", pTask->shuffleDispatcher.dbInfo.db, pDataBlock->info.parTbName);
} else {
buildCtbNameByGroupIdImpl(pTask->shuffleDispatcher.stbFullName, groupId, pDataBlock->info.parTbName);
snprintf(ctbName, TSDB_TABLE_NAME_LEN, "%s.%s", pTask->shuffleDispatcher.dbInfo.db, pDataBlock->info.parTbName);
}
SArray* vgInfo = pTask->shuffleDispatcher.dbInfo.pVgroupInfos;
/*uint32_t hashValue = MurmurHash3_32(ctbName, strlen(ctbName));*/
SUseDbRsp* pDbInfo = &pTask->shuffleDispatcher.dbInfo;
hashValue =
taosGetTbHashVal(ctbName, strlen(ctbName), pDbInfo->hashMethod, pDbInfo->hashPrefix, pDbInfo->hashSuffix);
taosMemoryFree(ctbName);
SBlockName bln = {0};
bln.hashValue = hashValue;
memcpy(bln.parTbName, pDataBlock->info.parTbName, strlen(pDataBlock->info.parTbName));
if (tSimpleHashGetSize(pTask->pNameMap) < MAX_BLOCK_NAME_NUM) {
tSimpleHashPut(pTask->pNameMap, &groupId, sizeof(int64_t), &bln, sizeof(SBlockName));
}
}
bool found = false; bool found = false;
// TODO: optimize search // TODO: optimize search

View File

@ -20,7 +20,7 @@
#include "ttimer.h" #include "ttimer.h"
static TdThreadOnce streamMetaModuleInit = PTHREAD_ONCE_INIT; static TdThreadOnce streamMetaModuleInit = PTHREAD_ONCE_INIT;
static int32_t streamBackendId = 0; int32_t streamBackendId = 0;
static void streamMetaEnvInit() { streamBackendId = taosOpenRef(20, streamBackendCleanup); } static void streamMetaEnvInit() { streamBackendId = taosOpenRef(20, streamBackendCleanup); }
void streamMetaInit() { taosThreadOnce(&streamMetaModuleInit, streamMetaEnvInit); } void streamMetaInit() { taosThreadOnce(&streamMetaModuleInit, streamMetaEnvInit); }
@ -79,7 +79,6 @@ SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandF
pMeta->vgId = vgId; pMeta->vgId = vgId;
pMeta->ahandle = ahandle; pMeta->ahandle = ahandle;
pMeta->expandFunc = expandFunc; pMeta->expandFunc = expandFunc;
pMeta->streamBackendId = streamBackendId;
memset(streamPath, 0, len); memset(streamPath, 0, len);
sprintf(streamPath, "%s/%s", pMeta->path, "state"); sprintf(streamPath, "%s/%s", pMeta->path, "state");

View File

@ -106,7 +106,7 @@ SStreamState* streamStateOpen(char* path, void* pTask, bool specPath, int32_t sz
} }
SStreamTask* pStreamTask = pTask; SStreamTask* pStreamTask = pTask;
char statePath[1024]; char statePath[1024];
if (!specPath) { if (!specPath) {
sprintf(statePath, "%s/%d", path, pStreamTask->id.taskId); sprintf(statePath, "%s/%d", path, pStreamTask->id.taskId);
} else { } else {
@ -119,10 +119,10 @@ SStreamState* streamStateOpen(char* path, void* pTask, bool specPath, int32_t sz
#ifdef USE_ROCKSDB #ifdef USE_ROCKSDB
SStreamMeta* pMeta = pStreamTask->pMeta; SStreamMeta* pMeta = pStreamTask->pMeta;
taosAcquireRef(pMeta->streamBackendId, pMeta->streamBackendRid); pState->streamBackendRid = pMeta->streamBackendRid;
int code = streamStateOpenBackend(pMeta->streamBackend, pState); int code = streamStateOpenBackend(pMeta->streamBackend, pState);
if (code == -1) { if (code == -1) {
taosReleaseRef(pMeta->streamBackendId, pMeta->streamBackendRid); taosReleaseRef(streamBackendId, pMeta->streamBackendRid);
taosMemoryFree(pState); taosMemoryFree(pState);
pState = NULL; pState = NULL;
} }
@ -222,9 +222,7 @@ _err:
void streamStateClose(SStreamState* pState, bool remove) { void streamStateClose(SStreamState* pState, bool remove) {
SStreamTask* pTask = pState->pTdbState->pOwner; SStreamTask* pTask = pState->pTdbState->pOwner;
#ifdef USE_ROCKSDB #ifdef USE_ROCKSDB
// streamStateCloseBackend(pState);
streamStateDestroy(pState, remove); streamStateDestroy(pState, remove);
taosReleaseRef(pTask->pMeta->streamBackendId, pTask->pMeta->streamBackendRid);
#else #else
tdbCommit(pState->pTdbState->db, pState->pTdbState->txn); tdbCommit(pState->pTdbState->db, pState->pTdbState->txn);
tdbPostCommit(pState->pTdbState->db, pState->pTdbState->txn); tdbPostCommit(pState->pTdbState->db, pState->pTdbState->txn);
@ -278,10 +276,10 @@ int32_t streamStateCommit(SStreamState* pState) {
int32_t streamStateFuncPut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen) { int32_t streamStateFuncPut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen) {
#ifdef USE_ROCKSDB #ifdef USE_ROCKSDB
void* pVal = NULL; void* pVal = NULL;
int32_t len = 0; int32_t len = 0;
int32_t code = getRowBuff(pState->pFileState, (void*)key, sizeof(SWinKey), &pVal, &len); int32_t code = getRowBuff(pState->pFileState, (void*)key, sizeof(SWinKey), &pVal, &len);
char* buf = ((SRowBuffPos*)pVal)->pRowBuff; char* buf = ((SRowBuffPos*)pVal)->pRowBuff;
uint32_t rowSize = streamFileStateGeSelectRowSize(pState->pFileState); uint32_t rowSize = streamFileStateGeSelectRowSize(pState->pFileState);
memcpy(buf + len - rowSize, value, vLen); memcpy(buf + len - rowSize, value, vLen);
return code; return code;
@ -291,10 +289,10 @@ int32_t streamStateFuncPut(SStreamState* pState, const SWinKey* key, const void*
} }
int32_t streamStateFuncGet(SStreamState* pState, const SWinKey* key, void** ppVal, int32_t* pVLen) { int32_t streamStateFuncGet(SStreamState* pState, const SWinKey* key, void** ppVal, int32_t* pVLen) {
#ifdef USE_ROCKSDB #ifdef USE_ROCKSDB
void* pVal = NULL; void* pVal = NULL;
int32_t len = 0; int32_t len = 0;
int32_t code = getRowBuff(pState->pFileState, (void*)key, sizeof(SWinKey), (void**)(&pVal), &len); int32_t code = getRowBuff(pState->pFileState, (void*)key, sizeof(SWinKey), (void**)(&pVal), &len);
char* buf = ((SRowBuffPos*)pVal)->pRowBuff; char* buf = ((SRowBuffPos*)pVal)->pRowBuff;
uint32_t rowSize = streamFileStateGeSelectRowSize(pState->pFileState); uint32_t rowSize = streamFileStateGeSelectRowSize(pState->pFileState);
*ppVal = buf + len - rowSize; *ppVal = buf + len - rowSize;
return code; return code;

View File

@ -239,5 +239,9 @@ void tFreeStreamTask(SStreamTask* pTask) {
taosMemoryFree((void*)pTask->id.idStr); taosMemoryFree((void*)pTask->id.idStr);
} }
if (pTask->pNameMap) {
tSimpleHashCleanup(pTask->pNameMap);
}
taosMemoryFree(pTask); taosMemoryFree(pTask);
} }

View File

@ -365,6 +365,11 @@ int32_t flushSnapshot(SStreamFileState* pFileState, SStreamSnapshot* pSnapshot,
int32_t numOfElems = listNEles(pSnapshot); int32_t numOfElems = listNEles(pSnapshot);
SListNode* pNode = NULL; SListNode* pNode = NULL;
int idx = streamStateGetCfIdx(pFileState->pFileStore, "state");
int32_t len = pFileState->rowSize + sizeof(uint64_t) + sizeof(int32_t) + 1;
char* buf = taosMemoryCalloc(1, len);
void* batch = streamStateCreateBatch(); void* batch = streamStateCreateBatch();
while ((pNode = tdListNext(&iter)) != NULL && code == TSDB_CODE_SUCCESS) { while ((pNode = tdListNext(&iter)) != NULL && code == TSDB_CODE_SUCCESS) {
SRowBuffPos* pPos = *(SRowBuffPos**)pNode->data; SRowBuffPos* pPos = *(SRowBuffPos**)pNode->data;
@ -376,10 +381,13 @@ int32_t flushSnapshot(SStreamFileState* pFileState, SStreamSnapshot* pSnapshot,
} }
SStateKey sKey = {.key = *((SWinKey*)pPos->pKey), .opNum = ((SStreamState*)pFileState->pFileStore)->number}; SStateKey sKey = {.key = *((SWinKey*)pPos->pKey), .opNum = ((SStreamState*)pFileState->pFileStore)->number};
code = streamStatePutBatch(pFileState->pFileStore, "state", batch, &sKey, pPos->pRowBuff, pFileState->rowSize, 0); code = streamStatePutBatchOptimize(pFileState->pFileStore, idx, batch, &sKey, pPos->pRowBuff, pFileState->rowSize,
0, buf);
// todo handle failure // todo handle failure
// qDebug("===stream===put %" PRId64 " to disc, code:%d, size:%d", sKey.key.ts, code, pFileState->rowSize); memset(buf, 0, len);
// qDebug("===stream===put %" PRId64 " to disc, res %d", sKey.key.ts, code);
} }
taosMemoryFree(buf);
if (streamStateGetBatchSize(batch) > 0) { if (streamStateGetBatchSize(batch) > 0) {
code = streamStatePutBatch_rocksdb(pFileState->pFileStore, batch); code = streamStatePutBatch_rocksdb(pFileState->pFileStore, batch);
@ -442,7 +450,7 @@ int32_t deleteExpiredCheckPoint(SStreamFileState* pFileState, TSKEY mark) {
if (code != 0 || len == 0 || val == NULL) { if (code != 0 || len == 0 || val == NULL) {
return TSDB_CODE_FAILED; return TSDB_CODE_FAILED;
} }
memcpy(val, buf, len); memcpy(buf, val, len);
buf[len] = 0; buf[len] = 0;
maxCheckPointId = atol((char*)buf); maxCheckPointId = atol((char*)buf);
taosMemoryFree(val); taosMemoryFree(val);
@ -456,7 +464,7 @@ int32_t deleteExpiredCheckPoint(SStreamFileState* pFileState, TSKEY mark) {
if (code != 0) { if (code != 0) {
return TSDB_CODE_FAILED; return TSDB_CODE_FAILED;
} }
memcpy(val, buf, len); memcpy(buf, val, len);
buf[len] = 0; buf[len] = 0;
taosMemoryFree(val); taosMemoryFree(val);

View File

@ -542,7 +542,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_TIMELINE_FUNC, "Invalid timeline fu
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_PASSWD, "Invalid password") TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_PASSWD, "Invalid password")
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_ALTER_TABLE, "Invalid alter table statement") TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_ALTER_TABLE, "Invalid alter table statement")
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_CANNOT_DROP_PRIMARY_KEY, "Primary timestamp column cannot be dropped") TAOS_DEFINE_ERROR(TSDB_CODE_PAR_CANNOT_DROP_PRIMARY_KEY, "Primary timestamp column cannot be dropped")
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_MODIFY_COL, "Only binary/nchar column length could be modified, and the length can only be increased, not decreased") TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_MODIFY_COL, "Only binary/nchar/geometry column length could be modified, and the length can only be increased, not decreased")
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_TBNAME, "Invalid tbname pseudo column") TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_TBNAME, "Invalid tbname pseudo column")
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_FUNCTION_NAME, "Invalid function name") TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_FUNCTION_NAME, "Invalid function name")
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_COMMENT_TOO_LONG, "Comment too long") TAOS_DEFINE_ERROR(TSDB_CODE_PAR_COMMENT_TOO_LONG, "Comment too long")

View File

@ -1,36 +1,11 @@
system sh/stop_dnodes.sh system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1 system sh/deploy.sh -n dnode1 -i 1
system sh/deploy.sh -n dnode2 -i 2
system sh/exec.sh -n dnode1 -s start system sh/exec.sh -n dnode1 -s start
sleep 50 sleep 50
sql connect sql connect
sql create dnode $hostname2 port 7200
system sh/exec.sh -n dnode2 -s start
print ===== step1
$x = 0
step1:
$x = $x + 1
sleep 1000
if $x == 10 then
print ====> dnode not ready!
return -1
endi
sql select * from information_schema.ins_dnodes
print ===> $data00 $data01 $data02 $data03 $data04 $data05
print ===> $data10 $data11 $data12 $data13 $data14 $data15
if $rows != 2 then
return -1
endi
if $data(1)[4] != ready then
goto step1
endi
if $data(2)[4] != ready then
goto step1
endi
print ===== step2 print ===== step2
sql drop stream if exists stream_t1; sql drop stream if exists stream_t1;
@ -248,10 +223,56 @@ sql insert into ts3 values(1648791223002,2,2,3,1.1);
sql insert into ts4 values(1648791233003,3,2,3,2.1); sql insert into ts4 values(1648791233003,3,2,3,2.1);
sql insert into ts3 values(1648791243004,4,2,43,73.1); sql insert into ts3 values(1648791243004,4,2,43,73.1);
sql insert into ts4 values(1648791213002,24,22,23,4.1); sql insert into ts4 values(1648791213002,24,22,23,4.1);
$loop_count = 0
loop032:
$loop_count = $loop_count + 1
if $loop_count == 30 then
return -1
endi
sleep 1000
print 6-0 select * from streamtST1;
sql select * from streamtST1;
if $rows != 4 then
print =====rows=$rows
goto loop032
endi
if $data01 != 8 then
print =6====data01=$data01
goto loop032
endi
sql insert into ts3 values(1648791243005,4,20,3,3.1); sql insert into ts3 values(1648791243005,4,20,3,3.1);
sql insert into ts4 values(1648791243006,4,2,3,3.1) (1648791243007,4,2,3,3.1) ; sql insert into ts4 values(1648791243006,4,2,3,3.1) (1648791243007,4,2,3,3.1) ;
sql insert into ts3 values(1648791243008,4,2,30,3.1) (1648791243009,4,2,3,3.1) (1648791243010,4,2,3,3.1) ; sql insert into ts3 values(1648791243008,4,2,30,3.1) (1648791243009,4,2,3,3.1) (1648791243010,4,2,3,3.1) ;
sql insert into ts4 values(1648791243011,4,2,3,3.1) (1648791243012,34,32,33,3.1) (1648791243013,4,2,3,3.1) (1648791243014,4,2,13,3.1); sql insert into ts4 values(1648791243011,4,2,3,3.1) (1648791243012,34,32,33,3.1) (1648791243013,4,2,3,3.1) (1648791243014,4,2,13,3.1);
$loop_count = 0
loop033:
$loop_count = $loop_count + 1
if $loop_count == 30 then
return -1
endi
sleep 1000
print 6-1 select * from streamtST1;
sql select * from streamtST1;
if $rows != 4 then
print =====rows=$rows
goto loop033
endi
if $data01 != 8 then
print =6====data01=$data01
goto loop033
endi
sql insert into ts3 values(1648791243005,4,42,3,3.1) (1648791243003,4,2,33,3.1) (1648791243006,4,2,3,3.1) (1648791213001,1,52,13,1.0) (1648791223001,22,22,83,1.1) ; sql insert into ts3 values(1648791243005,4,42,3,3.1) (1648791243003,4,2,33,3.1) (1648791243006,4,2,3,3.1) (1648791213001,1,52,13,1.0) (1648791223001,22,22,83,1.1) ;
$loop_count = 0 $loop_count = 0

View File

@ -132,12 +132,12 @@ if $loop_count == 10 then
return -1 return -1
endi endi
if $data01 != 1 then if $data01 != 2 then
print =====data01=$data01 print =====data01=$data01
goto loop4 goto loop4
endi endi
if $data02 != 1 then if $data02 != 2 then
print =====data02=$data02 print =====data02=$data02
goto loop4 goto loop4
endi endi

Some files were not shown because too many files have changed in this diff Show More