diff --git a/docs/en/07-develop/07-tmq.mdx b/docs/en/07-develop/07-tmq.mdx
index d951923de5..578f38e73d 100644
--- a/docs/en/07-develop/07-tmq.mdx
+++ b/docs/en/07-develop/07-tmq.mdx
@@ -105,6 +105,12 @@ class Consumer:
def poll(self, timeout: float = 1.0):
pass
+ def assignment(self):
+ pass
+
+ def poll(self, timeout: float = 1.0):
+ pass
+
def close(self):
pass
diff --git a/docs/en/14-reference/03-connector/04-java.mdx b/docs/en/14-reference/03-connector/04-java.mdx
index db49e5f395..9c5a852c70 100644
--- a/docs/en/14-reference/03-connector/04-java.mdx
+++ b/docs/en/14-reference/03-connector/04-java.mdx
@@ -32,25 +32,22 @@ TDengine's JDBC driver implementation is as consistent as possible with the rela
Native connections are supported on the same platforms as the TDengine client driver.
REST connection supports all platforms that can run Java.
-## Version support
-
-Please refer to [version support list](/reference/connector#version-support)
-
## Recent update logs
-| taos-jdbcdriver version | major changes |
-| :---------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------: |
-| 3.2.1 | JDBC REST connection supports schemaless/prepareStatement over WebSocket |
-| 3.2.0 | This version has been deprecated |
-| 3.1.0 | JDBC REST connection supports subscription over WebSocket |
-| 3.0.1 - 3.0.4 | fix the resultSet data is parsed incorrectly sometimes. 3.0.1 is compiled on JDK 11, you are advised to use other version in the JDK 8 environment |
-| 3.0.0 | Support for TDengine 3.0 |
-| 2.0.42 | fix wasNull interface return value in WebSocket connection |
-| 2.0.41 | fix decode method of username and password in REST connection |
-| 2.0.39 - 2.0.40 | Add REST connection/request timeout parameters |
-| 2.0.38 | JDBC REST connections add bulk pull function |
-| 2.0.37 | Support json tags |
-| 2.0.36 | Support schemaless writing |
+| taos-jdbcdriver version | major changes | TDengine version |
+| :---------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------: | :--------------: |
+| 3.2.1 | subscription add seek function | 3.0.5.0 or later |
+| 3.2.1 | JDBC REST connection supports schemaless/prepareStatement over WebSocket | 3.0.3.0 or later |
+| 3.2.0 | This version has been deprecated | - |
+| 3.1.0 | JDBC REST connection supports subscription over WebSocket | - |
+| 3.0.1 - 3.0.4 | fix the resultSet data is parsed incorrectly sometimes. 3.0.1 is compiled on JDK 11, you are advised to use other version in the JDK 8 environment | - |
+| 3.0.0 | Support for TDengine 3.0 | 3.0.0.0 or later |
+| 2.0.42 | fix wasNull interface return value in WebSocket connection | - |
+| 2.0.41 | fix decode method of username and password in REST connection | - |
+| 2.0.39 - 2.0.40 | Add REST connection/request timeout parameters | - |
+| 2.0.38 | JDBC REST connections add bulk pull function | - |
+| 2.0.37 | Support json tags | - |
+| 2.0.36 | Support schemaless writing | - |
**Note**: adding `batchfetch` to the REST connection and setting it to true will enable the WebSocket connection.
@@ -102,6 +99,8 @@ For specific error codes, please refer to.
| 0x2319 | user is required | The user name information is missing when creating the connection |
| 0x231a | password is required | Password information is missing when creating a connection |
| 0x231c | httpEntity is null, sql: | Execution exception occurred during the REST connection |
+| 0x231d | can't create connection with server within | Increase the connection time by adding the httpConnectTimeout parameter, or check the connection to the taos adapter. |
+| 0x231e | failed to complete the task within the specified time | Increase the execution time by adding the messageWaitTimeout parameter, or check the connection to the taos adapter. |
| 0x2350 | unknown error | Unknown exception, please return to the developer on github. |
| 0x2352 | Unsupported encoding | An unsupported character encoding set is specified under the native Connection. |
| 0x2353 | internal error of database, please see taoslog for more details | An error occurs when the prepare statement is executed on the native connection. Check the taos log to locate the fault. |
@@ -117,8 +116,8 @@ For specific error codes, please refer to.
| 0x2376 | failed to set consumer topic, topic name is empty | During data subscription creation, the subscription topic name is empty. Check that the specified topic name is correct. |
| 0x2377 | consumer reference has been destroyed | The subscription data transfer channel has been closed. Please check the connection to TDengine. |
| 0x2378 | consumer create error | Failed to create a data subscription. Check the taos log according to the error message to locate the fault. |
-| - | can't create connection with server within | Increase the connection time by adding the httpConnectTimeout parameter, or check the connection to the taos adapter. |
-| - | failed to complete the task within the specified time | Increase the execution time by adding the messageWaitTimeout parameter, or check the connection to the taos adapter. |
+| 0x2379 | seek offset must not be a negative number | The seek interface parameter cannot be negative. Use the correct parameter |
+| 0x237a | vGroup not found in result set | subscription is not bound to the VGroup due to the rebalance mechanism |
- [TDengine Java Connector](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java)
@@ -169,7 +168,7 @@ Add following dependency in the `pom.xml` file of your Maven project:
com.taosdata.jdbc
taos-jdbcdriver
- 3.2.1
+ 3.2.2
```
@@ -913,14 +912,15 @@ public class SchemalessWsTest {
public static void main(String[] args) throws SQLException {
final String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata&batchfetch=true";
- Connection connection = DriverManager.getConnection(url);
- init(connection);
+ try(Connection connection = DriverManager.getConnection(url)){
+ init(connection);
- SchemalessWriter writer = new SchemalessWriter(connection, "test_ws_schemaless");
- writer.write(lineDemo, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO_SECONDS);
- writer.write(telnetDemo, SchemalessProtocolType.TELNET, SchemalessTimestampType.MILLI_SECONDS);
- writer.write(jsonDemo, SchemalessProtocolType.JSON, SchemalessTimestampType.SECONDS);
- System.exit(0);
+ try(SchemalessWriter writer = new SchemalessWriter(connection, "test_ws_schemaless")){
+ writer.write(lineDemo, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO_SECONDS);
+ writer.write(telnetDemo, SchemalessProtocolType.TELNET, SchemalessTimestampType.MILLI_SECONDS);
+ writer.write(jsonDemo, SchemalessProtocolType.JSON, SchemalessTimestampType.SECONDS);
+ }
+ }
}
private static void init(Connection connection) throws SQLException {
@@ -991,6 +991,17 @@ while(true) {
`poll` obtains one message each time it is run.
+#### Assignment subscription Offset
+
+```
+long position(TopicPartition partition) throws SQLException;
+Map position(String topic) throws SQLException;
+Map beginningOffsets(String topic) throws SQLException;
+Map endOffsets(String topic) throws SQLException;
+
+void seek(TopicPartition partition, long offset) throws SQLException;
+```
+
#### Close subscriptions
```java
diff --git a/docs/en/14-reference/03-connector/06-rust.mdx b/docs/en/14-reference/03-connector/06-rust.mdx
index f32e32f2ad..344bd3590e 100644
--- a/docs/en/14-reference/03-connector/06-rust.mdx
+++ b/docs/en/14-reference/03-connector/06-rust.mdx
@@ -31,7 +31,7 @@ Websocket connections are supported on all platforms that can run Go.
| connector-rust version | TDengine version | major features |
| :----------------: | :--------------: | :--------------------------------------------------: |
-| v0.8.8 | 3.0.5.0 or later | TMQ: Get consuming progress and seek offset to consume. |
+| v0.8.10 | 3.0.5.0 or later | TMQ: Get consuming progress and seek offset to consume. |
| v0.8.0 | 3.0.4.0 | Support schemaless insert. |
| v0.7.6 | 3.0.3.0 | Support req_id in query. |
| v0.6.0 | 3.0.0.0 | Base features. |
diff --git a/docs/en/14-reference/03-connector/07-python.mdx b/docs/en/14-reference/03-connector/07-python.mdx
index b263af8ea6..6bd02644d4 100644
--- a/docs/en/14-reference/03-connector/07-python.mdx
+++ b/docs/en/14-reference/03-connector/07-python.mdx
@@ -453,6 +453,170 @@ As the way to connect introduced above but add `req_id` argument.
+### Subscription
+
+Connector support data subscription. For more information about subscroption, please refer to [Data Subscription](../../../develop/tmq/).
+
+
+
+
+The `consumer` in the connector contains the subscription api.
+
+#### Create Consumer
+
+The syntax for creating a consumer is `consumer = Consumer(configs)`. For more subscription api parameters, please refer to [Data Subscription](../../../develop/tmq/).
+
+```python
+from taos.tmq import Consumer
+
+consumer = Consumer({"group.id": "local", "td.connect.ip": "127.0.0.1"})
+```
+
+#### Subscribe topics
+
+The `subscribe` function is used to subscribe to a list of topics.
+
+```python
+consumer.subscribe(['topic1', 'topic2'])
+```
+
+#### Consume
+
+The `poll` function is used to consume data in tmq. The parameter of the `poll` function is a value of type float representing the timeout in seconds. It returns a `Message` before timing out, or `None` on timing out. You have to handle error messages in response data.
+
+```python
+while True:
+ res = consumer.poll(1)
+ if not res:
+ continue
+ err = res.error()
+ if err is not None:
+ raise err
+ val = res.value()
+
+ for block in val:
+ print(block.fetchall())
+```
+
+#### assignment
+
+The `assignment` function is used to get the assignment of the topic.
+
+```python
+assignments = consumer.assignment()
+```
+
+#### Seek
+
+The `seek` function is used to reset the assignment of the topic.
+
+```python
+tp = TopicPartition(topic='topic1', partition=0, offset=0)
+consumer.seek(tp)
+```
+
+#### After consuming data
+
+You should unsubscribe to the topics and close the consumer after consuming.
+
+```python
+consumer.unsubscribe()
+consumer.close()
+```
+
+#### Tmq subscription example
+
+```python
+{{#include docs/examples/python/tmq_example.py}}
+```
+
+#### assignment and seek example
+
+```python
+{{#include docs/examples/python/tmq_assignment_example.py:taos_get_assignment_and_seek_demo}}
+```
+
+
+
+
+
+In addition to native connections, the connector also supports subscriptions via websockets.
+
+#### Create Consumer
+
+The syntax for creating a consumer is "consumer = consumer = Consumer(conf=configs)". You need to specify that the `td.connect.websocket.scheme` parameter is set to "ws" in the configuration. For more subscription api parameters, please refer to [Data Subscription](../../../develop/tmq/#create-a-consumer).
+
+```python
+import taosws
+
+consumer = taosws.(conf={"group.id": "local", "td.connect.websocket.scheme": "ws"})
+```
+
+#### subscribe topics
+
+The `subscribe` function is used to subscribe to a list of topics.
+
+```python
+consumer.subscribe(['topic1', 'topic2'])
+```
+
+#### Consume
+
+The `poll` function is used to consume data in tmq. The parameter of the `poll` function is a value of type float representing the timeout in seconds. It returns a `Message` before timing out, or `None` on timing out. You have to handle error messages in response data.
+
+```python
+while True:
+ res = consumer.poll(timeout=1.0)
+ if not res:
+ continue
+ err = res.error()
+ if err is not None:
+ raise err
+ for block in message:
+ for row in block:
+ print(row)
+```
+
+#### assignment
+
+The `assignment` function is used to get the assignment of the topic.
+
+```python
+assignments = consumer.assignment()
+```
+
+#### Seek
+
+The `seek` function is used to reset the assignment of the topic.
+
+```python
+consumer.seek(topic='topic1', partition=0, offset=0)
+```
+
+#### After consuming data
+
+You should unsubscribe to the topics and close the consumer after consuming.
+
+```python
+consumer.unsubscribe()
+consumer.close()
+```
+
+#### Subscription example
+
+```python
+{{#include docs/examples/python/tmq_websocket_example.py}}
+```
+
+#### Assignment and seek example
+
+```python
+{{#include docs/examples/python/tmq_websocket_assgnment_example.py:taosws_get_assignment_and_seek_demo}}
+```
+
+
+
+
### Schemaless Insert
Connector support schemaless insert.
@@ -507,7 +671,8 @@ Insert with req_id argument
| Example program links | Example program content |
| ------------------------------------------------------------------------------------------------------------- | ------------------- ---- |
-| [bind_multi.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/bind-multi.py) | parameter binding, bind multiple rows at once |
+| [bind_multi.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/bind-multi.py) | parameter binding,
+bind multiple rows at once |
| [bind_row.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/bind-row.py) | bind_row.py
| [insert_lines.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/insert-lines.py) | InfluxDB line protocol writing |
| [json_tag.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/json-tag.py) | Use JSON type tags |
diff --git a/docs/en/14-reference/12-config/index.md b/docs/en/14-reference/12-config/index.md
index 52ded6208a..2d290a5f49 100644
--- a/docs/en/14-reference/12-config/index.md
+++ b/docs/en/14-reference/12-config/index.md
@@ -5,7 +5,7 @@ description: This document describes the configuration parameters for the TDengi
## Configuration File on Server Side
-On the server side, the actual service of TDengine is provided by an executable `taosd` whose parameters can be configured in file `taos.cfg` to meet the requirements of different use cases. The default location of `taos.cfg` is `/etc/taos`, but can be changed by using `-c` parameter on the CLI of `taosd`. For example, the configuration file can be put under `/home/user` and used like below
+On the server side, the actual service of TDengine is provided by an executable `taosd` whose parameters can be configured in file `taos.cfg` to meet the requirements of different use cases. The default location of `taos.cfg` is `/etc/taos` on Linux system, it's located under `C:\TDengine` on Windows system. The location of configuration file can be specified by using `-c` parameter on the CLI of `taosd`. For example, on Linux system the configuration file can be put under `/home/user` and used like below
```
taosd -c /home/user
diff --git a/docs/en/20-third-party/11-kafka.md b/docs/en/20-third-party/11-kafka.md
index 1fc2b57a13..d40efc702c 100644
--- a/docs/en/20-third-party/11-kafka.md
+++ b/docs/en/20-third-party/11-kafka.md
@@ -27,11 +27,11 @@ TDengine Source Connector is used to read data from TDengine in real-time and se
Execute in any directory:
-````
+```shell
curl -O https://downloads.apache.org/kafka/3.4.0/kafka_2.13-3.4.0.tgz
tar xzf kafka_2.13-3.4.0.tgz -C /opt/
ln -s /opt/kafka_2.13-3.4.0 /opt/kafka
-````
+```
Then you need to add the `$KAFKA_HOME/bin` directory to the PATH.
@@ -181,7 +181,7 @@ meters,location=California.LoSangeles,groupid=3 current=11.3,voltage=221,phase=0
Use kafka-console-producer to write test data to the topic `meters`.
-```
+```shell
cat test-data.txt | kafka-console-producer.sh --broker-list localhost:9092 --topic meters
```
@@ -215,7 +215,7 @@ The role of the TDengine Source Connector is to push all the data of a specific
TDengine Source Connector will convert the data in TDengine data table into [InfluxDB Line protocol format](/develop/insert-data/influxdb-line/) or [OpenTSDB JSON protocol format](/develop/insert-data/opentsdb-json ) and then write to Kafka.
-The following sample program synchronizes the data in the database test to the topic tdengine-source-test.
+The following sample program synchronizes the data in the database test to the topic tdengine-test-meters.
### Add Source Connector configuration file
@@ -237,10 +237,13 @@ Input following content:
"connection.database": "test",
"connection.attempts": 3,
"connection.backoff.ms": 5000,
- "topic.prefix": "tdengine-source",
+ "topic.prefix": "tdengine",
+ "topic.delimiter": "-",
"poll.interval.ms": 1000,
"fetch.max.rows": 100,
"topic.per.stable": true,
+ "topic.ignore.db": false,
+ "out.format": "line",
"key.converter": "org.apache.kafka.connect.storage.StringConverter",
"value.converter": "org.apache.kafka.connect.storage.StringConverter"
}
@@ -281,10 +284,10 @@ curl -X POST -d @source-demo.json http://localhost:8083/connectors -H "Content-T
### View topic data
-Use the kafka-console-consumer command-line tool to monitor data in the topic tdengine-source-test. In the beginning, all historical data will be output. After inserting two new data into TDengine, kafka-console-consumer immediately outputs the two new data. The output is in InfluxDB line protocol format.
+Use the kafka-console-consumer command-line tool to monitor data in the topic tdengine-test-meters. In the beginning, all historical data will be output. After inserting two new data into TDengine, kafka-console-consumer immediately outputs the two new data. The output is in InfluxDB line protocol format.
````shell
-kafka-console-consumer.sh --bootstrap-server localhost:9092 --from-beginning --topic tdengine-source-test-meters
+kafka-console-consumer.sh --bootstrap-server localhost:9092 --from-beginning --topic tdengine-test-meters
````
output:
@@ -356,8 +359,11 @@ The following configuration items apply to TDengine Sink Connector and TDengine
3. `timestamp.initial`: Data synchronization start time. The format is 'yyyy-MM-dd HH:mm:ss'. If it is not set, the data importing to Kafka will be started from the first/oldest row in the database.
4. `poll.interval.ms`: The time interval for checking newly created tables or removed tables, default value is 1000.
5. `fetch.max.rows`: The maximum number of rows retrieved when retrieving the database, default is 100.
-6. `query.interval.ms`: The time range of reading data from TDengine each time, its unit is millisecond. It should be adjusted according to the data flow in rate, the default value is 1000.
-7. `topic.per.stable`: If it's set to true, it means one super table in TDengine corresponds to a topic in Kafka, the topic naming rule is `--`; if it's set to false, it means the whole DB corresponds to a topic in Kafka, the topic naming rule is `-`.
+6. `query.interval.ms`: The time range of reading data from TDengine each time, its unit is millisecond. It should be adjusted according to the data flow in rate, the default value is 0, this means to get all the data to the latest time.
+7. `out.format`: Result output format. `line` indicates that the output format is InfluxDB line protocol format, `json` indicates that the output format is json. The default is line.
+8. `topic.per.stable`: If it's set to true, it means one super table in TDengine corresponds to a topic in Kafka, the topic naming rule is ``; if it's set to false, it means the whole DB corresponds to a topic in Kafka, the topic naming rule is ``.
+9. `topic.ignore.db`: Whether the topic naming rule contains the database name: true indicates that the rule is ``, false indicates that the rule is ``, and the default is false. Does not take effect when `topic.per.stable` is set to false.
+10. `topic.delimiter`: topic name delimiter,default is `-`。
## Other notes
diff --git a/docs/examples/python/tmq_assignment_example.py b/docs/examples/python/tmq_assignment_example.py
new file mode 100644
index 0000000000..a07347a9b9
--- /dev/null
+++ b/docs/examples/python/tmq_assignment_example.py
@@ -0,0 +1,58 @@
+import taos
+from taos.tmq import Consumer
+import taosws
+
+
+def prepare():
+ conn = taos.connect()
+ conn.execute("drop topic if exists tmq_assignment_demo_topic")
+ conn.execute("drop database if exists tmq_assignment_demo_db")
+ conn.execute("create database if not exists tmq_assignment_demo_db wal_retention_period 3600")
+ conn.select_db("tmq_assignment_demo_db")
+ conn.execute(
+ "create table if not exists tmq_assignment_demo_table (ts timestamp, c1 int, c2 float, c3 binary(10)) tags(t1 int)")
+ conn.execute(
+ "create topic if not exists tmq_assignment_demo_topic as select ts, c1, c2, c3 from tmq_assignment_demo_table")
+ conn.execute("insert into d0 using tmq_assignment_demo_table tags (0) values (now-2s, 1, 1.0, 'tmq test')")
+ conn.execute("insert into d0 using tmq_assignment_demo_table tags (0) values (now-1s, 2, 2.0, 'tmq test')")
+ conn.execute("insert into d0 using tmq_assignment_demo_table tags (0) values (now, 3, 3.0, 'tmq test')")
+
+
+def taos_get_assignment_and_seek_demo():
+ prepare()
+ consumer = Consumer(
+ {
+ "group.id": "0",
+ # should disable snapshot,
+ # otherwise it will cause invalid params error
+ "experimental.snapshot.enable": "false",
+ }
+ )
+ consumer.subscribe(["tmq_assignment_demo_topic"])
+
+ # get topic assignment
+ assignments = consumer.assignment()
+ for assignment in assignments:
+ print(assignment)
+
+ # poll
+ consumer.poll(1)
+ consumer.poll(1)
+
+ # get topic assignment again
+ after_pool_assignments = consumer.assignment()
+ for assignment in after_pool_assignments:
+ print(assignment)
+
+ # seek to the beginning
+ for assignment in assignments:
+ consumer.seek(assignment)
+
+ # now the assignment should be the same as before poll
+ assignments = consumer.assignment()
+ for assignment in assignments:
+ print(assignment)
+
+
+if __name__ == '__main__':
+ taosws_get_assignment_and_seek_demo()
diff --git a/docs/examples/python/tmq_websocket_assgnment_example.py b/docs/examples/python/tmq_websocket_assgnment_example.py
new file mode 100644
index 0000000000..0f8e4a2804
--- /dev/null
+++ b/docs/examples/python/tmq_websocket_assgnment_example.py
@@ -0,0 +1,57 @@
+import taos
+import taosws
+
+
+def prepare():
+ conn = taos.connect()
+ conn.execute("drop topic if exists tmq_assignment_demo_topic")
+ conn.execute("drop database if exists tmq_assignment_demo_db")
+ conn.execute("create database if not exists tmq_assignment_demo_db wal_retention_period 3600")
+ conn.select_db("tmq_assignment_demo_db")
+ conn.execute(
+ "create table if not exists tmq_assignment_demo_table (ts timestamp, c1 int, c2 float, c3 binary(10)) tags(t1 int)")
+ conn.execute(
+ "create topic if not exists tmq_assignment_demo_topic as select ts, c1, c2, c3 from tmq_assignment_demo_table")
+ conn.execute("insert into d0 using tmq_assignment_demo_table tags (0) values (now-2s, 1, 1.0, 'tmq test')")
+ conn.execute("insert into d0 using tmq_assignment_demo_table tags (0) values (now-1s, 2, 2.0, 'tmq test')")
+ conn.execute("insert into d0 using tmq_assignment_demo_table tags (0) values (now, 3, 3.0, 'tmq test')")
+
+
+def taosws_get_assignment_and_seek_demo():
+ prepare()
+ consumer = taosws.Consumer(conf={
+ "td.connect.websocket.scheme": "ws",
+ # should disable snapshot,
+ # otherwise it will cause invalid params error
+ "experimental.snapshot.enable": "false",
+ "group.id": "0",
+ })
+ consumer.subscribe(["tmq_assignment_demo_topic"])
+
+ # get topic assignment
+ assignments = consumer.assignment()
+ for assignment in assignments:
+ print(assignment.to_string())
+
+ # poll
+ consumer.poll(1)
+ consumer.poll(1)
+
+ # get topic assignment again
+ after_poll_assignments = consumer.assignment()
+ for assignment in after_poll_assignments:
+ print(assignment.to_string())
+
+ # seek to the beginning
+ for assignment in assignments:
+ for a in assignment.assignments():
+ consumer.seek(assignment.topic(), a.vg_id(), a.offset())
+
+ # now the assignment should be the same as before poll
+ assignments = consumer.assignment()
+ for assignment in assignments:
+ print(assignment.to_string())
+
+
+if __name__ == '__main__':
+ taosws_get_assignment_and_seek_demo()
diff --git a/docs/zh/07-develop/07-tmq.mdx b/docs/zh/07-develop/07-tmq.mdx
index bfea926f53..a87a1f64f8 100644
--- a/docs/zh/07-develop/07-tmq.mdx
+++ b/docs/zh/07-develop/07-tmq.mdx
@@ -105,6 +105,12 @@ class Consumer:
def poll(self, timeout: float = 1.0):
pass
+ def assignment(self):
+ pass
+
+ def seek(self, partition):
+ pass
+
def close(self):
pass
diff --git a/docs/zh/08-connector/14-java.mdx b/docs/zh/08-connector/14-java.mdx
index 46800226d7..1588159b57 100644
--- a/docs/zh/08-connector/14-java.mdx
+++ b/docs/zh/08-connector/14-java.mdx
@@ -32,25 +32,22 @@ TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致
原生连接支持的平台和 TDengine 客户端驱动支持的平台一致。
REST 连接支持所有能运行 Java 的平台。
-## 版本支持
+## 版本历史
-请参考[版本支持列表](../#版本支持)
-
-## 最近更新记录
-
-| taos-jdbcdriver 版本 | 主要变化 |
-| :------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------: |
-| 3.2.1 | 新增功能:WebSocket 连接支持 schemaless 与 prepareStatement 写入。变更:consumer poll 返回结果集为 ConsumerRecord,可通过 value() 获取指定结果集数据。 |
-| 3.2.0 | 存在连接问题,不推荐使用 |
-| 3.1.0 | WebSocket 连接支持订阅功能 |
-| 3.0.1 - 3.0.4 | 修复一些情况下结果集数据解析错误的问题。3.0.1 在 JDK 11 环境编译,JDK 8 环境下建议使用其他版本 |
-| 3.0.0 | 支持 TDengine 3.0 |
-| 2.0.42 | 修在 WebSocket 连接中 wasNull 接口返回值 |
-| 2.0.41 | 修正 REST 连接中用户名和密码转码方式 |
-| 2.0.39 - 2.0.40 | 增加 REST 连接/请求 超时设置 |
-| 2.0.38 | JDBC REST 连接增加批量拉取功能 |
-| 2.0.37 | 增加对 json tag 支持 |
-| 2.0.36 | 增加对 schemaless 写入支持 |
+| taos-jdbcdriver 版本 | 主要变化 | TDengine 版本 |
+| :------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------: |
+| 3.2.2 | 新增功能:数据订阅支持 seek 功能。 | 3.0.5.0 及更高版本 |
+| 3.2.1 | 新增功能:WebSocket 连接支持 schemaless 与 prepareStatement 写入。变更:consumer poll 返回结果集为 ConsumerRecord,可通过 value() 获取指定结果集数据。 | 3.0.3.0 及更高版本 |
+| 3.2.0 | 存在连接问题,不推荐使用 | - |
+| 3.1.0 | WebSocket 连接支持订阅功能 | - |
+| 3.0.1 - 3.0.4 | 修复一些情况下结果集数据解析错误的问题。3.0.1 在 JDK 11 环境编译,JDK 8 环境下建议使用其他版本 | - |
+| 3.0.0 | 支持 TDengine 3.0 | 3.0.0.0 及更高版本 |
+| 2.0.42 | 修在 WebSocket 连接中 wasNull 接口返回值 | - |
+| 2.0.41 | 修正 REST 连接中用户名和密码转码方式 | - |
+| 2.0.39 - 2.0.40 | 增加 REST 连接/请求 超时设置 | - |
+| 2.0.38 | JDBC REST 连接增加批量拉取功能 | - |
+| 2.0.37 | 增加对 json tag 支持 | - |
+| 2.0.36 | 增加对 schemaless 写入支持 | - |
**注**:REST 连接中增加 `batchfetch` 参数并设置为 true,将开启 WebSocket 连接。
@@ -80,45 +77,47 @@ JDBC 连接器可能报错的错误码包括 4 种:
具体的错误码请参考:
-| Error Code | Description | Suggested Actions |
-| ---------- | --------------------------------------------------------------- | --------------------------------------------------------------------------------------- |
-| 0x2301 | connection already closed | 连接已经关闭,检查连接情况,或重新创建连接去执行相关指令。 |
-| 0x2302 | this operation is NOT supported currently! | 当前使用接口不支持,可以更换其他连接方式。 |
-| 0x2303 | invalid variables | 参数不合法,请检查相应接口规范,调整参数类型及大小。 |
-| 0x2304 | statement is closed | statement 已经关闭,请检查 statement 是否关闭后再次使用,或是连接是否正常。 |
-| 0x2305 | resultSet is closed | resultSet 结果集已经释放,请检查 resultSet 是否释放后再次使用。 |
-| 0x2306 | Batch is empty! | prepareStatement 添加参数后再执行 executeBatch。 |
-| 0x2307 | Can not issue data manipulation statements with executeQuery() | 更新操作应该使用 executeUpdate(),而不是 executeQuery()。 |
-| 0x2308 | Can not issue SELECT via executeUpdate() | 查询操作应该使用 executeQuery(),而不是 executeUpdate()。 |
-| 0x230d | parameter index out of range | 参数越界,请检查参数的合理范围。 |
-| 0x230e | connection already closed | 连接已经关闭,请检查 Connection 是否关闭后再次使用,或是连接是否正常。 |
-| 0x230f | unknown sql type in tdengine | 请检查 TDengine 支持的 Data Type 类型。 |
-| 0x2310 | can't register JDBC-JNI driver | 不能注册 JNI 驱动,请检查 url 是否填写正确。 |
-| 0x2312 | url is not set | 请检查 REST 连接 url 是否填写正确。 |
-| 0x2314 | numeric value out of range | 请检查获取结果集中数值类型是否使用了正确的接口。 |
-| 0x2315 | unknown taos type in tdengine | 在 TDengine 数据类型与 JDBC 数据类型转换时,是否指定了正确的 TDengine 数据类型。 |
-| 0x2317 | | REST 连接中使用了错误的请求类型。 |
-| 0x2318 | | REST 连接中出现了数据传输异常,请检查网络情况并重试。 |
-| 0x2319 | user is required | 创建连接时缺少用户名信息 |
-| 0x231a | password is required | 创建连接时缺少密码信息 |
-| 0x231c | httpEntity is null, sql: | REST 连接中执行出现异常 |
-| 0x2350 | unknown error | 未知异常,请在 github 反馈给开发人员。 |
-| 0x2352 | Unsupported encoding | 本地连接下指定了不支持的字符编码集 |
-| 0x2353 | internal error of database, please see taoslog for more details | 本地连接执行 prepareStatement 时出现错误,请检查 taos log 进行问题定位。 |
-| 0x2354 | JNI connection is NULL | 本地连接执行命令时,Connection 已经关闭。请检查与 TDengine 的连接情况。 |
-| 0x2355 | JNI result set is NULL | 本地连接获取结果集,结果集异常,请检查连接情况,并重试。 |
-| 0x2356 | invalid num of fields | 本地连接获取结果集的 meta 信息不匹配。 |
-| 0x2357 | empty sql string | 填写正确的 SQL 进行执行。 |
-| 0x2359 | JNI alloc memory failed, please see taoslog for more details | 本地连接分配内存错误,请检查 taos log 进行问题定位。 |
-| 0x2371 | consumer properties must not be null! | 创建订阅时参数为空,请填写正确的参数。 |
-| 0x2372 | configs contain empty key, failed to set consumer property | 参数 key 中包含空值,请填写正确的参数。 |
-| 0x2373 | failed to set consumer property, | 参数 value 中包含空值,请填写正确的参数。 |
-| 0x2375 | topic reference has been destroyed | 创建数据订阅过程中,topic 引用被释放。请检查与 TDengine 的连接情况。 |
-| 0x2376 | failed to set consumer topic, topic name is empty | 创建数据订阅过程中,订阅 topic 名称为空。请检查指定的 topic 名称是否填写正确。 |
-| 0x2377 | consumer reference has been destroyed | 订阅数据传输通道已经关闭,请检查与 TDengine 的连接情况。 |
-| 0x2378 | consumer create error | 创建数据订阅失败,请根据错误信息检查 taos log 进行问题定位。 |
-| - | can't create connection with server within | 通过增加参数 httpConnectTimeout 增加连接耗时,或是请检查与 taosAdapter 之间的连接情况。 |
-| - | failed to complete the task within the specified time | 通过增加参数 messageWaitTimeout 增加执行耗时,或是请检查与 taosAdapter 之间的连接情况。 |
+| Error Code | Description | Suggested Actions |
+| ---------- | --------------------------------------------------------------- | ----------------------------------------------------------------------------------------- |
+| 0x2301 | connection already closed | 连接已经关闭,检查连接情况,或重新创建连接去执行相关指令。 |
+| 0x2302 | this operation is NOT supported currently! | 当前使用接口不支持,可以更换其他连接方式。 |
+| 0x2303 | invalid variables | 参数不合法,请检查相应接口规范,调整参数类型及大小。 |
+| 0x2304 | statement is closed | statement 已经关闭,请检查 statement 是否关闭后再次使用,或是连接是否正常。 |
+| 0x2305 | resultSet is closed | resultSet 结果集已经释放,请检查 resultSet 是否释放后再次使用。 |
+| 0x2306 | Batch is empty! | prepareStatement 添加参数后再执行 executeBatch。 |
+| 0x2307 | Can not issue data manipulation statements with executeQuery() | 更新操作应该使用 executeUpdate(),而不是 executeQuery()。 |
+| 0x2308 | Can not issue SELECT via executeUpdate() | 查询操作应该使用 executeQuery(),而不是 executeUpdate()。 |
+| 0x230d | parameter index out of range | 参数越界,请检查参数的合理范围。 |
+| 0x230e | connection already closed | 连接已经关闭,请检查 Connection 是否关闭后再次使用,或是连接是否正常。 |
+| 0x230f | unknown sql type in tdengine | 请检查 TDengine 支持的 Data Type 类型。 |
+| 0x2310 | can't register JDBC-JNI driver | 不能注册 JNI 驱动,请检查 url 是否填写正确。 |
+| 0x2312 | url is not set | 请检查 REST 连接 url 是否填写正确。 |
+| 0x2314 | numeric value out of range | 请检查获取结果集中数值类型是否使用了正确的接口。 |
+| 0x2315 | unknown taos type in tdengine | 在 TDengine 数据类型与 JDBC 数据类型转换时,是否指定了正确的 TDengine 数据类型。 |
+| 0x2317 | | REST 连接中使用了错误的请求类型。 |
+| 0x2318 | | REST 连接中出现了数据传输异常,请检查网络情况并重试。 |
+| 0x2319 | user is required | 创建连接时缺少用户名信息 |
+| 0x231a | password is required | 创建连接时缺少密码信息 |
+| 0x231c | httpEntity is null, sql: | REST 连接中执行出现异常 |
+| 0x231d | can't create connection with server within | 通过增加参数 httpConnectTimeout 增加连接耗时,或是请检查与 taosAdapter 之间的连接情况。 |
+| 0x231e | failed to complete the task within the specified time | 通过增加参数 messageWaitTimeout 增加执行耗时,或是请检查与 taosAdapter 之间的连接情况。 |
+| 0x2350 | unknown error | 未知异常,请在 github 反馈给开发人员。 |
+| 0x2352 | Unsupported encoding | 本地连接下指定了不支持的字符编码集 |
+| 0x2353 | internal error of database, please see taoslog for more details | 本地连接执行 prepareStatement 时出现错误,请检查 taos log 进行问题定位。 |
+| 0x2354 | JNI connection is NULL | 本地连接执行命令时,Connection 已经关闭。请检查与 TDengine 的连接情况。 |
+| 0x2355 | JNI result set is NULL | 本地连接获取结果集,结果集异常,请检查连接情况,并重试。 |
+| 0x2356 | invalid num of fields | 本地连接获取结果集的 meta 信息不匹配。 |
+| 0x2357 | empty sql string | 填写正确的 SQL 进行执行。 |
+| 0x2359 | JNI alloc memory failed, please see taoslog for more details | 本地连接分配内存错误,请检查 taos log 进行问题定位。 |
+| 0x2371 | consumer properties must not be null! | 创建订阅时参数为空,请填写正确的参数。 |
+| 0x2372 | configs contain empty key, failed to set consumer property | 参数 key 中包含空值,请填写正确的参数。 |
+| 0x2373 | failed to set consumer property, | 参数 value 中包含空值,请填写正确的参数。 |
+| 0x2375 | topic reference has been destroyed | 创建数据订阅过程中,topic 引用被释放。请检查与 TDengine 的连接情况。 |
+| 0x2376 | failed to set consumer topic, topic name is empty | 创建数据订阅过程中,订阅 topic 名称为空。请检查指定的 topic 名称是否填写正确。 |
+| 0x2377 | consumer reference has been destroyed | 订阅数据传输通道已经关闭,请检查与 TDengine 的连接情况。 |
+| 0x2378 | consumer create error | 创建数据订阅失败,请根据错误信息检查 taos log 进行问题定位。 |
+| 0x2379 | seek offset must not be a negative number | seek 接口参数不能为负值,请使用正确的参数 |
+| 0x237a | vGroup not found in result set | VGroup 没有分配给当前 consumer,由于 Rebalance 机制导致 Consumer 与 VGroup 不是绑定的关系 |
- [TDengine Java Connector](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java)
@@ -169,7 +168,7 @@ Maven 项目中,在 pom.xml 中添加以下依赖:
com.taosdata.jdbc
taos-jdbcdriver
- 3.2.1
+ 3.2.2
```
@@ -916,14 +915,15 @@ public class SchemalessWsTest {
public static void main(String[] args) throws SQLException {
final String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata&batchfetch=true";
- Connection connection = DriverManager.getConnection(url);
- init(connection);
+ try(Connection connection = DriverManager.getConnection(url)){
+ init(connection);
- SchemalessWriter writer = new SchemalessWriter(connection, "test_ws_schemaless");
- writer.write(lineDemo, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO_SECONDS);
- writer.write(telnetDemo, SchemalessProtocolType.TELNET, SchemalessTimestampType.MILLI_SECONDS);
- writer.write(jsonDemo, SchemalessProtocolType.JSON, SchemalessTimestampType.SECONDS);
- System.exit(0);
+ try(SchemalessWriter writer = new SchemalessWriter(connection, "test_ws_schemaless")){
+ writer.write(lineDemo, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO_SECONDS);
+ writer.write(telnetDemo, SchemalessProtocolType.TELNET, SchemalessTimestampType.MILLI_SECONDS);
+ writer.write(jsonDemo, SchemalessProtocolType.JSON, SchemalessTimestampType.SECONDS);
+ }
+ }
}
private static void init(Connection connection) throws SQLException {
@@ -994,6 +994,17 @@ while(true) {
`poll` 每次调用获取一个消息。
+#### 指定订阅 Offset
+
+```
+long position(TopicPartition partition) throws SQLException;
+Map position(String topic) throws SQLException;
+Map beginningOffsets(String topic) throws SQLException;
+Map endOffsets(String topic) throws SQLException;
+
+void seek(TopicPartition partition, long offset) throws SQLException;
+```
+
#### 关闭订阅
```java
diff --git a/docs/zh/08-connector/26-rust.mdx b/docs/zh/08-connector/26-rust.mdx
index a02757b14e..c23228c8cf 100644
--- a/docs/zh/08-connector/26-rust.mdx
+++ b/docs/zh/08-connector/26-rust.mdx
@@ -30,7 +30,7 @@ Websocket 连接支持所有能运行 Rust 的平台。
| Rust 连接器版本 | TDengine 版本 | 主要功能 |
| :----------------: | :--------------: | :--------------------------------------------------: |
-| v0.8.8 | 3.0.5.0 or later | 消息订阅:获取消费进度及按照指定进度开始消费。 |
+| v0.8.10 | 3.0.5.0 or later | 消息订阅:获取消费进度及按照指定进度开始消费。 |
| v0.8.0 | 3.0.4.0 | 支持无模式写入。 |
| v0.7.6 | 3.0.3.0 | 支持在请求中使用 req_id。 |
| v0.6.0 | 3.0.0.0 | 基础功能。 |
diff --git a/docs/zh/08-connector/30-python.mdx b/docs/zh/08-connector/30-python.mdx
index 1037d66f17..10fb2238ee 100644
--- a/docs/zh/08-connector/30-python.mdx
+++ b/docs/zh/08-connector/30-python.mdx
@@ -456,27 +456,169 @@ TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线
### 数据订阅
-连接器支持数据订阅功能,数据订阅功能请参考 [数据订阅](../../develop/tmq/)。
+连接器支持数据订阅功能,数据订阅功能请参考 [数据订阅文档](../../develop/tmq/)。
-`Consumer` 提供了 Python 连接器订阅 TMQ 数据的 API,相关 API 定义请参考 [数据订阅文档](../../develop/tmq/#%E4%B8%BB%E8%A6%81%E6%95%B0%E6%8D%AE%E7%BB%93%E6%9E%84%E5%92%8C-api)。
+`Consumer` 提供了 Python 连接器订阅 TMQ 数据的 API。
+
+#### 创建 Consumer
+
+创建 Consumer 语法为 `consumer = Consumer(configs)`,参数定义请参考 [数据订阅文档](../../develop/tmq/#%E5%88%9B%E5%BB%BA%E6%B6%88%E8%B4%B9%E8%80%85-consumer)。
+
+```python
+from taos.tmq import Consumer
+
+consumer = Consumer({"group.id": "local", "td.connect.ip": "127.0.0.1"})
+```
+
+#### 订阅 topics
+
+Comsumer API 的 `subscribe` 方法用于订阅 topics,consumer 支持同时订阅多个 topic。
+
+```python
+consumer.subscribe(['topic1', 'topic2'])
+```
+
+#### 消费数据
+
+Consumer API 的 `poll` 方法用于消费数据,`poll` 方法接收一个 float 类型的超时时间,超时时间单位为秒(s),`poll` 方法在超时之前返回一条 Message 类型的数据或超时返回 `None`。消费者必须通过 Message 的 `error()` 方法校验返回数据的 error 信息。
+
+```python
+while True:
+ res = consumer.poll(1)
+ if not res:
+ continue
+ err = res.error()
+ if err is not None:
+ raise err
+ val = res.value()
+
+ for block in val:
+ print(block.fetchall())
+```
+
+#### 获取消费进度
+
+Consumer API 的 `assignment` 方法用于获取 Consumer 订阅的所有 topic 的消费进度,返回结果类型为 TopicPartition 列表。
+
+```python
+assignments = consumer.assignment()
+```
+
+#### 重置消费进度
+
+Consumer API 的 `seek` 方法用于重置 Consumer 的消费进度到指定位置,方法参数类型为 TopicPartition。
+
+```python
+tp = TopicPartition(topic='topic1', partition=0, offset=0)
+consumer.seek(tp)
+```
+
+#### 结束消费
+
+消费结束后,应当取消订阅,并关闭 Consumer。
+
+```python
+consumer.unsubscribe()
+consumer.close()
+```
+
+#### tmq 订阅示例代码
```python
{{#include docs/examples/python/tmq_example.py}}
```
+#### 获取和重置消费进度示例代码
+
+```python
+{{#include docs/examples/python/tmq_assignment_example.py:taos_get_assignment_and_seek_demo}}
+```
+
-除了原生的连接方式,Python 连接器还支持通过 websocket 订阅 TMQ 数据。
+除了原生的连接方式,Python 连接器还支持通过 websocket 订阅 TMQ 数据,使用 websocket 方式订阅 TMQ 数据需要安装 `taos-ws-py`。
+
+taosws `Consumer` API 提供了基于 Websocket 订阅 TMQ 数据的 API。
+
+#### 创建 Consumer
+
+创建 Consumer 语法为 `consumer = Consumer(conf=configs)`,使用时需要指定 `td.connect.websocket.scheme` 参数值为 "ws",参数定义请参考 [数据订阅文档](../../develop/tmq/#%E5%88%9B%E5%BB%BA%E6%B6%88%E8%B4%B9%E8%80%85-consumer)。
+
+```python
+import taosws
+
+consumer = taosws.(conf={"group.id": "local", "td.connect.websocket.scheme": "ws"})
+```
+
+#### 订阅 topics
+
+Comsumer API 的 `subscribe` 方法用于订阅 topics,consumer 支持同时订阅多个 topic。
+
+```python
+consumer.subscribe(['topic1', 'topic2'])
+```
+
+#### 消费数据
+
+Consumer API 的 `poll` 方法用于消费数据,`poll` 方法接收一个 float 类型的超时时间,超时时间单位为秒(s),`poll` 方法在超时之前返回一条 Message 类型的数据或超时返回 `None`。消费者必须通过 Message 的 `error()` 方法校验返回数据的 error 信息。
+
+```python
+while True:
+ res = consumer.poll(timeout=1.0)
+ if not res:
+ continue
+ err = res.error()
+ if err is not None:
+ raise err
+ for block in message:
+ for row in block:
+ print(row)
+```
+
+#### 获取消费进度
+
+Consumer API 的 `assignment` 方法用于获取 Consumer 订阅的所有 topic 的消费进度,返回结果类型为 TopicPartition 列表。
+
+```python
+assignments = consumer.assignment()
+```
+
+#### 重置消费进度
+
+Consumer API 的 `seek` 方法用于重置 Consumer 的消费进度到指定位置。
+
+```python
+consumer.seek(topic='topic1', partition=0, offset=0)
+```
+
+#### 结束消费
+
+消费结束后,应当取消订阅,并关闭 Consumer。
+
+```python
+consumer.unsubscribe()
+consumer.close()
+```
+
+#### tmq 订阅示例代码
```python
{{#include docs/examples/python/tmq_websocket_example.py}}
```
+连接器提供了 `assignment` 接口,用于获取 topic assignment 的功能,可以查询订阅的 topic 的消费进度,并提供 `seek` 接口,用于重置 topic 的消费进度。
+
+#### 获取和重置消费进度示例代码
+
+```python
+{{#include docs/examples/python/tmq_websocket_assgnment_example.py:taosws_get_assignment_and_seek_demo}}
+```
+
diff --git a/docs/zh/14-reference/12-config/index.md b/docs/zh/14-reference/12-config/index.md
index 68f44d1e65..115d0ca2c7 100644
--- a/docs/zh/14-reference/12-config/index.md
+++ b/docs/zh/14-reference/12-config/index.md
@@ -5,7 +5,7 @@ description: "TDengine 客户端和服务配置列表"
## 为服务端指定配置文件
-TDengine 系统后台服务由 taosd 提供,可以在配置文件 taos.cfg 里修改配置参数,以满足不同场景的需求。配置文件的缺省位置在/etc/taos 目录,可以通过 taosd 命令行执行参数 -c 指定配置文件目录。比如,指定配置文件位于`/home/user` 这个目录:
+TDengine 系统后台服务由 taosd 提供,可以在配置文件 taos.cfg 里修改配置参数,以满足不同场景的需求。在 Linux 系统上,配置文件的缺省位置在 `/etc/taos` 目录,在 Windows 系统上缺省位置在 `C:\TDengine` 。可以通过 taosd 命令行执行参数 -c 指定配置文件所在目录。比如,在 Linux 系统上可以指定配置文件位于 `/home/user` 这个目录:
```
taosd -c /home/user
diff --git a/docs/zh/20-third-party/11-kafka.md b/docs/zh/20-third-party/11-kafka.md
index 641e2d5174..76e546c345 100644
--- a/docs/zh/20-third-party/11-kafka.md
+++ b/docs/zh/20-third-party/11-kafka.md
@@ -214,7 +214,7 @@ TDengine Source Connector 的作用是将 TDengine 某个数据库某一时刻
TDengine Source Connector 会将 TDengine 数据表中的数据转换成 [InfluxDB Line 协议格式](/develop/insert-data/influxdb-line/) 或 [OpenTSDB JSON 协议格式](/develop/insert-data/opentsdb-json), 然后写入 Kafka。
-下面的示例程序同步数据库 test 中的数据到主题 tdengine-source-test。
+下面的示例程序同步数据库 test 中的数据到主题 tdengine-test-meters。
### 添加 Source Connector 配置文件
@@ -236,10 +236,13 @@ vi source-demo.json
"connection.database": "test",
"connection.attempts": 3,
"connection.backoff.ms": 5000,
- "topic.prefix": "tdengine-source",
+ "topic.prefix": "tdengine",
+ "topic.delimiter": "-",
"poll.interval.ms": 1000,
"fetch.max.rows": 100,
"topic.per.stable": true,
+ "topic.ignore.db": false,
+ "out.format": "line",
"key.converter": "org.apache.kafka.connect.storage.StringConverter",
"value.converter": "org.apache.kafka.connect.storage.StringConverter"
}
@@ -280,10 +283,10 @@ curl -X POST -d @source-demo.json http://localhost:8083/connectors -H "Content-T
### 查看 topic 数据
-使用 kafka-console-consumer 命令行工具监控主题 tdengine-source-test 中的数据。一开始会输出所有历史数据, 往 TDengine 插入两条新的数据之后,kafka-console-consumer 也立即输出了新增的两条数据。 输出数据 InfluxDB line protocol 的格式。
+使用 kafka-console-consumer 命令行工具监控主题 tdengine-test-meters 中的数据。一开始会输出所有历史数据, 往 TDengine 插入两条新的数据之后,kafka-console-consumer 也立即输出了新增的两条数据。 输出数据 InfluxDB line protocol 的格式。
```shell
-kafka-console-consumer.sh --bootstrap-server localhost:9092 --from-beginning --topic tdengine-source-test-meters
+kafka-console-consumer.sh --bootstrap-server localhost:9092 --from-beginning --topic tdengine-test-meters
```
输出:
@@ -361,8 +364,11 @@ curl -X DELETE http://localhost:8083/connectors/TDengineSourceConnector
3. `timestamp.initial`: 数据同步起始时间。格式为'yyyy-MM-dd HH:mm:ss',若未指定则从指定 DB 中最早的一条记录开始。
4. `poll.interval.ms`: 检查是否有新建或删除的表的时间间隔,单位为 ms。默认为 1000。
5. `fetch.max.rows` : 检索数据库时最大检索条数。 默认为 100。
-6. `query.interval.ms`: 从 TDengine 一次读取数据的时间跨度,需要根据表中的数据特征合理配置,避免一次查询的数据量过大或过小;在具体的环境中建议通过测试设置一个较优值,默认值为 1000.
-7. `topic.per.stable`: 如果设置为true,表示一个超级表对应一个 Kafka topic,topic的命名规则 `--`;如果设置为 false,则指定的 DB 中的所有数据进入一个 Kafka topic,topic 的命名规则为 `-`
+6. `query.interval.ms`: 从 TDengine 一次读取数据的时间跨度,需要根据表中的数据特征合理配置,避免一次查询的数据量过大或过小;在具体的环境中建议通过测试设置一个较优值,默认值为 0,即获取到当前最新时间的所有数据。
+7. `out.format` : 结果集输出格式。`line` 表示输出格式为 InfluxDB Line 协议格式,`json` 表示输出格式是 json。默认为 line。
+8. `topic.per.stable`: 如果设置为 true,表示一个超级表对应一个 Kafka topic,topic的命名规则 ``;如果设置为 false,则指定的 DB 中的所有数据进入一个 Kafka topic,topic 的命名规则为 ``
+9. `topic.ignore.db`: topic 命名规则是否包含 database 名称,true 表示规则为 ``,false 表示规则为 ``,默认 false。此配置项在 `topic.per.stable` 设置为 false 时不生效。
+10. `topic.delimiter`: topic 名称分割符,默认为 `-`。
## 其他说明
diff --git a/examples/JDBC/springbootdemo/src/main/resources/application.properties b/examples/JDBC/springbootdemo/src/main/resources/application.properties
index bf21047395..c523952fb6 100644
--- a/examples/JDBC/springbootdemo/src/main/resources/application.properties
+++ b/examples/JDBC/springbootdemo/src/main/resources/application.properties
@@ -5,7 +5,7 @@
#spring.datasource.password=taosdata
# datasource config - JDBC-RESTful
spring.datasource.driver-class-name=com.taosdata.jdbc.rs.RestfulDriver
-spring.datasource.url=jdbc:TAOS-RS://localhost:6041/test?timezone=UTC-8&charset=UTF-8&locale=en_US.UTF-8
+spring.datasource.url=jdbc:TAOS-RS://localhost:6041/test
spring.datasource.username=root
spring.datasource.password=taosdata
spring.datasource.druid.initial-size=5
diff --git a/include/common/tmsg.h b/include/common/tmsg.h
index d78e771fcf..fa2d4984bf 100644
--- a/include/common/tmsg.h
+++ b/include/common/tmsg.h
@@ -1975,6 +1975,7 @@ typedef struct {
SArray* fillNullCols; // array of SColLocation
int64_t deleteMark;
int8_t igUpdate;
+ int64_t lastTs;
} SCMCreateStreamReq;
typedef struct {
@@ -2033,6 +2034,12 @@ typedef struct {
char cgroup[TSDB_CGROUP_LEN];
char clientId[256];
SArray* topicNames; // SArray
+
+ int8_t withTbName;
+ int8_t useSnapshot;
+ int8_t autoCommit;
+ int32_t autoCommitInterval;
+ int8_t resetOffsetCfg;
} SCMSubscribeReq;
static FORCE_INLINE int32_t tSerializeSCMSubscribeReq(void** buf, const SCMSubscribeReq* pReq) {
@@ -2047,6 +2054,13 @@ static FORCE_INLINE int32_t tSerializeSCMSubscribeReq(void** buf, const SCMSubsc
for (int32_t i = 0; i < topicNum; i++) {
tlen += taosEncodeString(buf, (char*)taosArrayGetP(pReq->topicNames, i));
}
+
+ tlen += taosEncodeFixedI8(buf, pReq->withTbName);
+ tlen += taosEncodeFixedI8(buf, pReq->useSnapshot);
+ tlen += taosEncodeFixedI8(buf, pReq->autoCommit);
+ tlen += taosEncodeFixedI32(buf, pReq->autoCommitInterval);
+ tlen += taosEncodeFixedI8(buf, pReq->resetOffsetCfg);
+
return tlen;
}
@@ -2064,6 +2078,12 @@ static FORCE_INLINE void* tDeserializeSCMSubscribeReq(void* buf, SCMSubscribeReq
buf = taosDecodeString(buf, &name);
taosArrayPush(pReq->topicNames, &name);
}
+
+ buf = taosDecodeFixedI8(buf, &pReq->withTbName);
+ buf = taosDecodeFixedI8(buf, &pReq->useSnapshot);
+ buf = taosDecodeFixedI8(buf, &pReq->autoCommit);
+ buf = taosDecodeFixedI32(buf, &pReq->autoCommitInterval);
+ buf = taosDecodeFixedI8(buf, &pReq->resetOffsetCfg);
return buf;
}
@@ -2455,15 +2475,6 @@ typedef struct {
char cgroup[TSDB_CGROUP_LEN];
} SMqAskEpReq;
-typedef struct {
- int64_t consumerId;
- int32_t epoch;
-} SMqHbReq;
-
-typedef struct {
- int8_t reserved;
-} SMqHbRsp;
-
typedef struct {
int32_t key;
int32_t valueLen;
@@ -2487,6 +2498,7 @@ typedef struct {
int64_t stime; // timestamp precision ms
int64_t reqRid;
bool stableQuery;
+ bool isSubQuery;
char fqdn[TSDB_FQDN_LEN];
int32_t subPlanNum;
SArray* subDesc; // SArray
@@ -2891,7 +2903,7 @@ int32_t tDecodeSMqCMCommitOffsetReq(SDecoder* decoder, SMqCMCommitOffsetReq* pRe
// tqOffset
enum {
TMQ_OFFSET__RESET_NONE = -3,
- TMQ_OFFSET__RESET_EARLIEAST = -2,
+ TMQ_OFFSET__RESET_EARLIEST = -2,
TMQ_OFFSET__RESET_LATEST = -1,
TMQ_OFFSET__LOG = 1,
TMQ_OFFSET__SNAPSHOT_DATA = 2,
@@ -3354,6 +3366,28 @@ static FORCE_INLINE void tDeleteSMqAskEpRsp(SMqAskEpRsp* pRsp) {
taosArrayDestroyEx(pRsp->topics, (FDelete)tDeleteMqSubTopicEp);
}
+typedef struct {
+ int32_t vgId;
+ STqOffsetVal offset;
+ int64_t rows;
+}OffsetRows;
+
+typedef struct{
+ char topicName[TSDB_TOPIC_FNAME_LEN];
+ SArray* offsetRows;
+}TopicOffsetRows;
+
+typedef struct {
+ int64_t consumerId;
+ int32_t epoch;
+ SArray* topics;
+} SMqHbReq;
+
+typedef struct {
+ int8_t reserved;
+} SMqHbRsp;
+
+
#define TD_AUTO_CREATE_TABLE 0x1
typedef struct {
int64_t suid;
@@ -3478,10 +3512,8 @@ int32_t tSerializeSMqAskEpReq(void* buf, int32_t bufLen, SMqAskEpReq* pReq);
int32_t tDeserializeSMqAskEpReq(void* buf, int32_t bufLen, SMqAskEpReq* pReq);
int32_t tSerializeSMqHbReq(void* buf, int32_t bufLen, SMqHbReq* pReq);
int32_t tDeserializeSMqHbReq(void* buf, int32_t bufLen, SMqHbReq* pReq);
-int32_t tSerializeSMqAskEpReq(void* buf, int32_t bufLen, SMqAskEpReq* pReq);
-int32_t tDeserializeSMqAskEpReq(void* buf, int32_t bufLen, SMqAskEpReq* pReq);
-int32_t tSerializeSMqHbReq(void* buf, int32_t bufLen, SMqHbReq* pReq);
-int32_t tDeserializeSMqHbReq(void* buf, int32_t bufLen, SMqHbReq* pReq);
+int32_t tDeatroySMqHbReq(SMqHbReq* pReq);
+
#define SUBMIT_REQ_AUTO_CREATE_TABLE 0x1
#define SUBMIT_REQ_COLUMN_DATA_FORMAT 0x2
diff --git a/include/common/tmsgdef.h b/include/common/tmsgdef.h
index 6d849e164c..0880613c46 100644
--- a/include/common/tmsgdef.h
+++ b/include/common/tmsgdef.h
@@ -150,7 +150,7 @@ enum {
TD_DEF_MSG_TYPE(TDMT_MND_TMQ_HB, "consumer-hb", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_TMQ_DO_REBALANCE, "do-rebalance", SMqDoRebalanceMsg, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_TMQ_DROP_CGROUP, "drop-cgroup", SMqDropCGroupReq, SMqDropCGroupRsp)
- TD_DEF_MSG_TYPE(TDMT_MND_UNUSED2, "unused2", NULL, NULL)
+ TD_DEF_MSG_TYPE(TDMT_MND_CREATE_VG, "create-vg", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_TMQ_TIMER, "tmq-tmr", SMTimerReq, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_TELEM_TIMER, "telem-tmr", SMTimerReq, SMTimerReq)
TD_DEF_MSG_TYPE(TDMT_MND_TRANS_TIMER, "trans-tmr", NULL, NULL)
diff --git a/include/libs/catalog/catalog.h b/include/libs/catalog/catalog.h
index 9300deeb9a..7a7a13b285 100644
--- a/include/libs/catalog/catalog.h
+++ b/include/libs/catalog/catalog.h
@@ -87,6 +87,7 @@ typedef struct SCatalogReq {
bool dNodeRequired; // valid dnode
bool svrVerRequired;
bool forceUpdate;
+ bool cloned;
} SCatalogReq;
typedef struct SMetaRes {
diff --git a/include/libs/function/functionMgt.h b/include/libs/function/functionMgt.h
index f0c9cffd0f..55af50e0bc 100644
--- a/include/libs/function/functionMgt.h
+++ b/include/libs/function/functionMgt.h
@@ -233,6 +233,7 @@ bool fmIsGroupKeyFunc(int32_t funcId);
bool fmIsBlockDistFunc(int32_t funcId);
void getLastCacheDataType(SDataType* pType);
+SFunctionNode* createFunction(const char* pName, SNodeList* pParameterList);
int32_t fmGetDistMethod(const SFunctionNode* pFunc, SFunctionNode** pPartialFunc, SFunctionNode** pMergeFunc);
diff --git a/include/libs/nodes/cmdnodes.h b/include/libs/nodes/cmdnodes.h
index c8ce9634f5..3a36601b11 100644
--- a/include/libs/nodes/cmdnodes.h
+++ b/include/libs/nodes/cmdnodes.h
@@ -425,16 +425,18 @@ typedef struct SStreamOptions {
} SStreamOptions;
typedef struct SCreateStreamStmt {
- ENodeType type;
- char streamName[TSDB_TABLE_NAME_LEN];
- char targetDbName[TSDB_DB_NAME_LEN];
- char targetTabName[TSDB_TABLE_NAME_LEN];
- bool ignoreExists;
- SStreamOptions* pOptions;
- SNode* pQuery;
- SNodeList* pTags;
- SNode* pSubtable;
- SNodeList* pCols;
+ ENodeType type;
+ char streamName[TSDB_TABLE_NAME_LEN];
+ char targetDbName[TSDB_DB_NAME_LEN];
+ char targetTabName[TSDB_TABLE_NAME_LEN];
+ bool ignoreExists;
+ SStreamOptions* pOptions;
+ SNode* pQuery;
+ SNode* pPrevQuery;
+ SNodeList* pTags;
+ SNode* pSubtable;
+ SNodeList* pCols;
+ SCMCreateStreamReq* pReq;
} SCreateStreamStmt;
typedef struct SDropStreamStmt {
diff --git a/include/libs/nodes/plannodes.h b/include/libs/nodes/plannodes.h
index 02459ed951..f44b622cc0 100644
--- a/include/libs/nodes/plannodes.h
+++ b/include/libs/nodes/plannodes.h
@@ -617,6 +617,7 @@ typedef struct SQueryPlan {
int32_t numOfSubplans;
SNodeList* pSubplans; // Element is SNodeListNode. The execution level of subplan, starting from 0.
SExplainInfo explainInfo;
+ void* pPostPlan;
} SQueryPlan;
const char* dataOrderStr(EDataOrderLevel order);
diff --git a/include/libs/nodes/querynodes.h b/include/libs/nodes/querynodes.h
index 12890571f9..f570698395 100644
--- a/include/libs/nodes/querynodes.h
+++ b/include/libs/nodes/querynodes.h
@@ -441,7 +441,9 @@ typedef struct SQuery {
EQueryExecStage execStage;
EQueryExecMode execMode;
bool haveResultSet;
+ SNode* pPrevRoot;
SNode* pRoot;
+ SNode* pPostRoot;
int32_t numOfResCols;
SSchema* pResSchema;
int8_t precision;
diff --git a/include/libs/parser/parser.h b/include/libs/parser/parser.h
index 94fb6824d2..f253b47e50 100644
--- a/include/libs/parser/parser.h
+++ b/include/libs/parser/parser.h
@@ -74,6 +74,7 @@ int32_t qAnalyseSqlSemantic(SParseContext* pCxt, const struct SCatalogReq* pCata
const struct SMetaData* pMetaData, SQuery* pQuery);
int32_t qContinueParseSql(SParseContext* pCxt, struct SCatalogReq* pCatalogReq, const struct SMetaData* pMetaData,
SQuery* pQuery);
+int32_t qContinueParsePostQuery(SParseContext* pCxt, SQuery* pQuery, void** pResRow);
void qDestroyParseContext(SParseContext* pCxt);
diff --git a/include/libs/planner/planner.h b/include/libs/planner/planner.h
index 41c0e98084..1b523c0323 100644
--- a/include/libs/planner/planner.h
+++ b/include/libs/planner/planner.h
@@ -52,6 +52,7 @@ int32_t qCreateQueryPlan(SPlanContext* pCxt, SQueryPlan** pPlan, SArray* pExecNo
// @groupId id of a group of datasource subplans of this @pSubplan
// @pSource one execution location of this group of datasource subplans
int32_t qSetSubplanExecutionNode(SSubplan* pSubplan, int32_t groupId, SDownstreamSourceNode* pSource);
+int32_t qContinuePlanPostQuery(void *pPostPlan);
void qClearSubplanExecutionNode(SSubplan* pSubplan);
diff --git a/include/libs/sync/sync.h b/include/libs/sync/sync.h
index e86a4f9690..2a0a4b0f63 100644
--- a/include/libs/sync/sync.h
+++ b/include/libs/sync/sync.h
@@ -154,14 +154,14 @@ typedef struct SSnapshotMeta {
typedef struct SSyncFSM {
void* data;
- int32_t (*FpCommitCb)(const struct SSyncFSM* pFsm, SRpcMsg* pMsg, const SFsmCbMeta* pMeta);
+ int32_t (*FpCommitCb)(const struct SSyncFSM* pFsm, SRpcMsg* pMsg, SFsmCbMeta* pMeta);
SyncIndex (*FpAppliedIndexCb)(const struct SSyncFSM* pFsm);
- int32_t (*FpPreCommitCb)(const struct SSyncFSM* pFsm, SRpcMsg* pMsg, const SFsmCbMeta* pMeta);
- void (*FpRollBackCb)(const struct SSyncFSM* pFsm, SRpcMsg* pMsg, const SFsmCbMeta* pMeta);
+ int32_t (*FpPreCommitCb)(const struct SSyncFSM* pFsm, SRpcMsg* pMsg, SFsmCbMeta* pMeta);
+ void (*FpRollBackCb)(const struct SSyncFSM* pFsm, SRpcMsg* pMsg, SFsmCbMeta* pMeta);
void (*FpRestoreFinishCb)(const struct SSyncFSM* pFsm, const SyncIndex commitIdx);
- void (*FpReConfigCb)(const struct SSyncFSM* pFsm, SRpcMsg* pMsg, const SReConfigCbMeta* pMeta);
- void (*FpLeaderTransferCb)(const struct SSyncFSM* pFsm, SRpcMsg* pMsg, const SFsmCbMeta* pMeta);
+ void (*FpReConfigCb)(const struct SSyncFSM* pFsm, SRpcMsg* pMsg, SReConfigCbMeta* pMeta);
+ void (*FpLeaderTransferCb)(const struct SSyncFSM* pFsm, SRpcMsg* pMsg, SFsmCbMeta* pMeta);
bool (*FpApplyQueueEmptyCb)(const struct SSyncFSM* pFsm);
int32_t (*FpApplyQueueItems)(const struct SSyncFSM* pFsm);
diff --git a/include/libs/wal/wal.h b/include/libs/wal/wal.h
index 1aa08ff802..47230bc95c 100644
--- a/include/libs/wal/wal.h
+++ b/include/libs/wal/wal.h
@@ -214,7 +214,7 @@ int32_t walSkipFetchBody(SWalReader *pRead, const SWalCkHead *pHead);
void walRefFirstVer(SWal *, SWalRef *);
void walRefLastVer(SWal *, SWalRef *);
-SWalRef *walRefCommittedVer(SWal *);
+void walRefCommitVer(SWal *, SWalRef *);
SWalRef *walOpenRef(SWal *);
void walCloseRef(SWal *pWal, int64_t refId);
diff --git a/include/util/talgo.h b/include/util/talgo.h
index f9d51c4b5b..7c92c0fe87 100644
--- a/include/util/talgo.h
+++ b/include/util/talgo.h
@@ -31,7 +31,7 @@ typedef void *(*__array_item_dup_fn_t)(void *);
typedef void (*FDelete)(void *);
typedef int32_t (*FEncode)(void **buf, const void *dst);
-typedef void *(*FDecode)(const void *buf, void *dst);
+typedef void *(*FDecode)(const void *buf, void *dst, int8_t sver);
#define TD_EQ 0x1
#define TD_GT 0x2
diff --git a/include/util/taoserror.h b/include/util/taoserror.h
index 9e5229870e..889ee41a29 100644
--- a/include/util/taoserror.h
+++ b/include/util/taoserror.h
@@ -345,7 +345,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_MND_TRANS_CLOG_IS_NULL TAOS_DEF_ERROR_CODE(0, 0x03D4)
#define TSDB_CODE_MND_TRANS_NETWORK_UNAVAILL TAOS_DEF_ERROR_CODE(0, 0x03D5)
#define TSDB_CODE_MND_LAST_TRANS_NOT_FINISHED TAOS_DEF_ERROR_CODE(0, 0x03D6) //internal
-#define TSDB_CODE_MND_TRNAS_SYNC_TIMEOUT TAOS_DEF_ERROR_CODE(0, 0x03D7)
+#define TSDB_CODE_MND_TRANS_SYNC_TIMEOUT TAOS_DEF_ERROR_CODE(0, 0x03D7)
#define TSDB_CODE_MND_TRANS_UNKNOW_ERROR TAOS_DEF_ERROR_CODE(0, 0x03DF)
// mnode-mq
diff --git a/include/util/tarray.h b/include/util/tarray.h
index 4bf24b46b9..a93c695370 100644
--- a/include/util/tarray.h
+++ b/include/util/tarray.h
@@ -244,7 +244,7 @@ int32_t taosArraySearchIdx(const SArray* pArray, const void* key, __compar_fn_t
void taosArraySortPWithExt(SArray* pArray, __ext_compar_fn_t fn, const void* param);
int32_t taosEncodeArray(void** buf, const SArray* pArray, FEncode encode);
-void* taosDecodeArray(const void* buf, SArray** pArray, FDecode decode, int32_t dataSz);
+void* taosDecodeArray(const void* buf, SArray** pArray, FDecode decode, int32_t dataSz, int8_t sver);
#ifdef __cplusplus
}
diff --git a/include/util/tdef.h b/include/util/tdef.h
index 37eeb87fdd..69b012ecea 100644
--- a/include/util/tdef.h
+++ b/include/util/tdef.h
@@ -195,6 +195,7 @@ typedef enum ELogicConditionType {
#define TSDB_TABLE_NAME_LEN 193 // it is a null-terminated string
#define TSDB_TOPIC_NAME_LEN 193 // it is a null-terminated string
#define TSDB_CGROUP_LEN 193 // it is a null-terminated string
+#define TSDB_OFFSET_LEN 64 // it is a null-terminated string
#define TSDB_USER_CGROUP_LEN (TSDB_USER_LEN + TSDB_CGROUP_LEN) // it is a null-terminated string
#define TSDB_STREAM_NAME_LEN 193 // it is a null-terminated string
#define TSDB_DB_NAME_LEN 65
diff --git a/source/client/inc/clientInt.h b/source/client/inc/clientInt.h
index 18891bb932..fa444779f3 100644
--- a/source/client/inc/clientInt.h
+++ b/source/client/inc/clientInt.h
@@ -227,6 +227,12 @@ typedef struct {
STaosxRsp rsp;
} SMqTaosxRspObj;
+typedef struct SReqRelInfo {
+ uint64_t userRefId;
+ uint64_t prevRefId;
+ uint64_t nextRefId;
+} SReqRelInfo;
+
typedef struct SRequestObj {
int8_t resType; // query or tmq
uint64_t requestId;
@@ -250,10 +256,14 @@ typedef struct SRequestObj {
bool validateOnly; // todo refactor
bool killed;
bool inRetry;
+ bool isSubReq;
uint32_t prevCode; // previous error code: todo refactor, add update flag for catalog
uint32_t retry;
int64_t allocatorRefId;
SQuery* pQuery;
+ void* pPostPlan;
+ SReqRelInfo relation;
+ void* pWrapper;
} SRequestObj;
typedef struct SSyncQueryParam {
@@ -279,6 +289,7 @@ TAOS_RES* taosQueryImplWithReqid(TAOS* taos, const char* sql, bool validateOnly,
void taosAsyncQueryImpl(uint64_t connId, const char* sql, __taos_async_fn_t fp, void* param, bool validateOnly);
void taosAsyncQueryImplWithReqid(uint64_t connId, const char* sql, __taos_async_fn_t fp, void* param, bool validateOnly,
int64_t reqid);
+void taosAsyncFetchImpl(SRequestObj *pRequest, __taos_async_fn_t fp, void *param);
int32_t getVersion1BlockMetaSize(const char* p, int32_t numOfCols);
@@ -368,6 +379,7 @@ typedef struct SSqlCallbackWrapper {
SParseContext* pParseCtx;
SCatalogReq* pCatalogReq;
SRequestObj* pRequest;
+ void* pPlanInfo;
} SSqlCallbackWrapper;
SRequestObj* launchQueryImpl(SRequestObj* pRequest, SQuery* pQuery, bool keepQuery, void** res);
@@ -382,6 +394,12 @@ int32_t handleCreateTbExecRes(void* res, SCatalog* pCatalog);
bool qnodeRequired(SRequestObj* pRequest);
void continueInsertFromCsv(SSqlCallbackWrapper* pWrapper, SRequestObj* pRequest);
void destorySqlCallbackWrapper(SSqlCallbackWrapper* pWrapper);
+void handleQueryAnslyseRes(SSqlCallbackWrapper *pWrapper, SMetaData *pResultMeta, int32_t code);
+void restartAsyncQuery(SRequestObj *pRequest, int32_t code);
+int32_t buildPreviousRequest(SRequestObj *pRequest, const char* sql, SRequestObj** pNewRequest);
+int32_t prepareAndParseSqlSyntax(SSqlCallbackWrapper **ppWrapper, SRequestObj *pRequest, bool updateMetaForce);
+void returnToUser(SRequestObj* pRequest);
+void stopAllQueries(SRequestObj *pRequest);
#ifdef __cplusplus
}
diff --git a/source/client/src/clientEnv.c b/source/client/src/clientEnv.c
index 045642c2c2..c64bbfbdb6 100644
--- a/source/client/src/clientEnv.c
+++ b/source/client/src/clientEnv.c
@@ -358,6 +358,49 @@ int32_t releaseRequest(int64_t rid) { return taosReleaseRef(clientReqRefPool, ri
int32_t removeRequest(int64_t rid) { return taosRemoveRef(clientReqRefPool, rid); }
+
+void destroySubRequests(SRequestObj *pRequest) {
+ int32_t reqIdx = -1;
+ SRequestObj *pReqList[16] = {NULL};
+ uint64_t tmpRefId = 0;
+
+ if (pRequest->relation.userRefId && pRequest->relation.userRefId != pRequest->self) {
+ return;
+ }
+
+ SRequestObj* pTmp = pRequest;
+ while (pTmp->relation.prevRefId) {
+ tmpRefId = pTmp->relation.prevRefId;
+ pTmp = acquireRequest(tmpRefId);
+ if (pTmp) {
+ pReqList[++reqIdx] = pTmp;
+ releaseRequest(tmpRefId);
+ } else {
+ tscError("0x%" PRIx64 ", prev req ref 0x%" PRIx64 " is not there, reqId:0x%" PRIx64, pTmp->self,
+ tmpRefId, pTmp->requestId);
+ break;
+ }
+ }
+
+ for (int32_t i = reqIdx; i >= 0; i--) {
+ removeRequest(pReqList[i]->self);
+ }
+
+ tmpRefId = pRequest->relation.nextRefId;
+ while (tmpRefId) {
+ pTmp = acquireRequest(tmpRefId);
+ if (pTmp) {
+ tmpRefId = pTmp->relation.nextRefId;
+ removeRequest(pTmp->self);
+ releaseRequest(pTmp->self);
+ } else {
+ tscError("0x%" PRIx64 " is not there", tmpRefId);
+ break;
+ }
+ }
+}
+
+
void doDestroyRequest(void *p) {
if (NULL == p) {
return;
@@ -368,10 +411,14 @@ void doDestroyRequest(void *p) {
uint64_t reqId = pRequest->requestId;
tscTrace("begin to destroy request %" PRIx64 " p:%p", reqId, pRequest);
+ destroySubRequests(pRequest);
+
taosHashRemove(pRequest->pTscObj->pRequests, &pRequest->self, sizeof(pRequest->self));
schedulerFreeJob(&pRequest->body.queryJob, 0);
+ destorySqlCallbackWrapper(pRequest->pWrapper);
+
taosMemoryFreeClear(pRequest->msgBuf);
taosMemoryFreeClear(pRequest->pDb);
@@ -412,6 +459,63 @@ void destroyRequest(SRequestObj *pRequest) {
removeRequest(pRequest->self);
}
+void taosStopQueryImpl(SRequestObj *pRequest) {
+ pRequest->killed = true;
+
+ // It is not a query, no need to stop.
+ if (NULL == pRequest->pQuery || QUERY_EXEC_MODE_SCHEDULE != pRequest->pQuery->execMode) {
+ tscDebug("request 0x%" PRIx64 " no need to be killed since not query", pRequest->requestId);
+ return;
+ }
+
+ schedulerFreeJob(&pRequest->body.queryJob, TSDB_CODE_TSC_QUERY_KILLED);
+ tscDebug("request %" PRIx64 " killed", pRequest->requestId);
+}
+
+void stopAllQueries(SRequestObj *pRequest) {
+ int32_t reqIdx = -1;
+ SRequestObj *pReqList[16] = {NULL};
+ uint64_t tmpRefId = 0;
+
+ if (pRequest->relation.userRefId && pRequest->relation.userRefId != pRequest->self) {
+ return;
+ }
+
+ SRequestObj* pTmp = pRequest;
+ while (pTmp->relation.prevRefId) {
+ tmpRefId = pTmp->relation.prevRefId;
+ pTmp = acquireRequest(tmpRefId);
+ if (pTmp) {
+ pReqList[++reqIdx] = pTmp;
+ releaseRequest(tmpRefId);
+ } else {
+ tscError("0x%" PRIx64 ", prev req ref 0x%" PRIx64 " is not there, reqId:0x%" PRIx64, pTmp->self,
+ tmpRefId, pTmp->requestId);
+ break;
+ }
+ }
+
+ for (int32_t i = reqIdx; i >= 0; i--) {
+ taosStopQueryImpl(pReqList[i]);
+ }
+
+ taosStopQueryImpl(pRequest);
+
+ tmpRefId = pRequest->relation.nextRefId;
+ while (tmpRefId) {
+ pTmp = acquireRequest(tmpRefId);
+ if (pTmp) {
+ tmpRefId = pTmp->relation.nextRefId;
+ taosStopQueryImpl(pTmp);
+ releaseRequest(pTmp->self);
+ } else {
+ tscError("0x%" PRIx64 " is not there", tmpRefId);
+ break;
+ }
+ }
+}
+
+
void crashReportThreadFuncUnexpectedStopped(void) { atomic_store_32(&clientStop, -1); }
static void *tscCrashReportThreadFp(void *param) {
diff --git a/source/client/src/clientHb.c b/source/client/src/clientHb.c
index 2dddfec2bd..cbfa48b322 100644
--- a/source/client/src/clientHb.c
+++ b/source/client/src/clientHb.c
@@ -464,6 +464,7 @@ int32_t hbBuildQueryDesc(SQueryHbReqBasic *hbBasic, STscObj *pObj) {
desc.useconds = now - pRequest->metric.start;
desc.reqRid = pRequest->self;
desc.stableQuery = pRequest->stableQuery;
+ desc.isSubQuery = pRequest->isSubReq;
taosGetFqdn(desc.fqdn);
desc.subPlanNum = pRequest->body.subplanNum;
diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c
index 5963e419e1..2a73156e8a 100644
--- a/source/client/src/clientImpl.c
+++ b/source/client/src/clientImpl.c
@@ -237,6 +237,17 @@ int32_t buildRequest(uint64_t connId, const char* sql, int sqlLen, void* param,
return TSDB_CODE_SUCCESS;
}
+int32_t buildPreviousRequest(SRequestObj *pRequest, const char* sql, SRequestObj** pNewRequest) {
+ int32_t code = buildRequest(pRequest->pTscObj->id, sql, strlen(sql), pRequest, pRequest->validateOnly, pNewRequest, 0);
+ if (TSDB_CODE_SUCCESS == code) {
+ pRequest->relation.prevRefId = (*pNewRequest)->self;
+ (*pNewRequest)->relation.nextRefId = pRequest->self;
+ (*pNewRequest)->relation.userRefId = pRequest->self;
+ (*pNewRequest)->isSubReq = true;
+ }
+ return code;
+}
+
int32_t parseSql(SRequestObj* pRequest, bool topicQuery, SQuery** pQuery, SStmtCallback* pStmtCb) {
STscObj* pTscObj = pRequest->pTscObj;
@@ -878,6 +889,81 @@ static bool incompletaFileParsing(SNode* pStmt) {
return QUERY_NODE_VNODE_MODIFY_STMT != nodeType(pStmt) ? false : ((SVnodeModifyOpStmt*)pStmt)->fileProcessing;
}
+void continuePostSubQuery(SRequestObj* pRequest, TAOS_ROW row) {
+ SSqlCallbackWrapper* pWrapper = pRequest->pWrapper;
+ int32_t code = nodesAcquireAllocator(pWrapper->pParseCtx->allocatorId);
+ if (TSDB_CODE_SUCCESS == code) {
+ int64_t analyseStart = taosGetTimestampUs();
+ code = qContinueParsePostQuery(pWrapper->pParseCtx, pRequest->pQuery, (void**)row);
+ pRequest->metric.analyseCostUs += taosGetTimestampUs() - analyseStart;
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = qContinuePlanPostQuery(pRequest->pPostPlan);
+ }
+ nodesReleaseAllocator(pWrapper->pParseCtx->allocatorId);
+
+ handleQueryAnslyseRes(pWrapper, NULL, code);
+}
+
+void returnToUser(SRequestObj* pRequest) {
+ if (pRequest->relation.userRefId == pRequest->self || 0 == pRequest->relation.userRefId) {
+ // return to client
+ pRequest->body.queryFp(pRequest->body.param, pRequest, pRequest->code);
+ return;
+ }
+
+ SRequestObj* pUserReq = acquireRequest(pRequest->relation.userRefId);
+ if (pUserReq) {
+ pUserReq->code = pRequest->code;
+ // return to client
+ pUserReq->body.queryFp(pUserReq->body.param, pUserReq, pUserReq->code);
+ releaseRequest(pRequest->relation.userRefId);
+ return;
+ } else {
+ tscError("0x%" PRIx64 ", user ref 0x%" PRIx64 " is not there, reqId:0x%" PRIx64, pRequest->self,
+ pRequest->relation.userRefId, pRequest->requestId);
+ }
+}
+
+void postSubQueryFetchCb(void* param, TAOS_RES* res, int32_t rowNum) {
+ SRequestObj* pRequest = (SRequestObj*)res;
+ if (pRequest->code) {
+ returnToUser(pRequest);
+ return;
+ }
+
+ TAOS_ROW row = NULL;
+ if (rowNum > 0) {
+ row = taos_fetch_row(res); // for single row only now
+ }
+
+ SRequestObj* pNextReq = acquireRequest(pRequest->relation.nextRefId);
+ if (pNextReq) {
+ continuePostSubQuery(pNextReq, row);
+ releaseRequest(pRequest->relation.nextRefId);
+ } else {
+ tscError("0x%" PRIx64 ", next req ref 0x%" PRIx64 " is not there, reqId:0x%" PRIx64, pRequest->self,
+ pRequest->relation.nextRefId, pRequest->requestId);
+ }
+}
+
+void handlePostSubQuery(SSqlCallbackWrapper* pWrapper) {
+ SRequestObj* pRequest = pWrapper->pRequest;
+ if (TD_RES_QUERY(pRequest)) {
+ taosAsyncFetchImpl(pRequest, postSubQueryFetchCb, pWrapper);
+ return;
+ }
+
+ SRequestObj* pNextReq = acquireRequest(pRequest->relation.nextRefId);
+ if (pNextReq) {
+ continuePostSubQuery(pNextReq, NULL);
+ releaseRequest(pRequest->relation.nextRefId);
+ } else {
+ tscError("0x%" PRIx64 ", next req ref 0x%" PRIx64 " is not there, reqId:0x%" PRIx64, pRequest->self,
+ pRequest->relation.nextRefId, pRequest->requestId);
+ }
+}
+
// todo refacto the error code mgmt
void schedulerExecCb(SExecResult* pResult, void* param, int32_t code) {
SSqlCallbackWrapper* pWrapper = param;
@@ -912,12 +998,7 @@ void schedulerExecCb(SExecResult* pResult, void* param, int32_t code) {
if (code != TSDB_CODE_SUCCESS && NEED_CLIENT_HANDLE_ERROR(code) && pRequest->sqlstr != NULL) {
tscDebug("0x%" PRIx64 " client retry to handle the error, code:%s, tryCount:%d, reqId:0x%" PRIx64, pRequest->self,
tstrerror(code), pRequest->retry, pRequest->requestId);
- pRequest->prevCode = code;
- schedulerFreeJob(&pRequest->body.queryJob, 0);
- qDestroyQuery(pRequest->pQuery);
- pRequest->pQuery = NULL;
- destorySqlCallbackWrapper(pWrapper);
- doAsyncQuery(pRequest, true);
+ restartAsyncQuery(pRequest, code);
return;
}
@@ -938,10 +1019,15 @@ void schedulerExecCb(SExecResult* pResult, void* param, int32_t code) {
return;
}
- destorySqlCallbackWrapper(pWrapper);
+ if (pRequest->relation.nextRefId) {
+ handlePostSubQuery(pWrapper);
+ } else {
+ destorySqlCallbackWrapper(pWrapper);
+ pRequest->pWrapper = NULL;
- // return to client
- pRequest->body.queryFp(pRequest->body.param, pRequest, code);
+ // return to client
+ pRequest->body.queryFp(pRequest->body.param, pRequest, code);
+ }
}
SRequestObj* launchQueryImpl(SRequestObj* pRequest, SQuery* pQuery, bool keepQuery, void** res) {
@@ -1049,6 +1135,7 @@ static int32_t asyncExecSchQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaDat
pRequest->requestId);
} else {
pRequest->body.subplanNum = pDag->numOfSubplans;
+ TSWAP(pRequest->pPostPlan, pDag->pPostPlan);
}
pRequest->metric.execStart = taosGetTimestampUs();
@@ -1084,6 +1171,7 @@ static int32_t asyncExecSchQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaDat
tscDebug("0x%" PRIx64 " plan not executed, code:%s 0x%" PRIx64, pRequest->self, tstrerror(code),
pRequest->requestId);
destorySqlCallbackWrapper(pWrapper);
+ pRequest->pWrapper = NULL;
if (TSDB_CODE_SUCCESS != code) {
pRequest->code = terrno;
}
@@ -1103,6 +1191,7 @@ void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaData* pResultM
pRequest->body.execMode = pQuery->execMode;
if (QUERY_EXEC_MODE_SCHEDULE != pRequest->body.execMode) {
destorySqlCallbackWrapper(pWrapper);
+ pRequest->pWrapper = NULL;
}
if (pQuery->pRoot && !pRequest->inRetry) {
@@ -2402,3 +2491,90 @@ TAOS_RES* taosQueryImplWithReqid(TAOS* taos, const char* sql, bool validateOnly,
return pRequest;
}
+
+
+static void fetchCallback(void *pResult, void *param, int32_t code) {
+ SRequestObj *pRequest = (SRequestObj *)param;
+
+ SReqResultInfo *pResultInfo = &pRequest->body.resInfo;
+
+ tscDebug("0x%" PRIx64 " enter scheduler fetch cb, code:%d - %s, reqId:0x%" PRIx64, pRequest->self, code,
+ tstrerror(code), pRequest->requestId);
+
+ pResultInfo->pData = pResult;
+ pResultInfo->numOfRows = 0;
+
+ if (code != TSDB_CODE_SUCCESS) {
+ pRequest->code = code;
+ taosMemoryFreeClear(pResultInfo->pData);
+ pRequest->body.fetchFp(pRequest->body.param, pRequest, 0);
+ return;
+ }
+
+ if (pRequest->code != TSDB_CODE_SUCCESS) {
+ taosMemoryFreeClear(pResultInfo->pData);
+ pRequest->body.fetchFp(pRequest->body.param, pRequest, 0);
+ return;
+ }
+
+ pRequest->code =
+ setQueryResultFromRsp(pResultInfo, (const SRetrieveTableRsp *)pResultInfo->pData, pResultInfo->convertUcs4, true);
+ if (pRequest->code != TSDB_CODE_SUCCESS) {
+ pResultInfo->numOfRows = 0;
+ pRequest->code = code;
+ tscError("0x%" PRIx64 " fetch results failed, code:%s, reqId:0x%" PRIx64, pRequest->self, tstrerror(code),
+ pRequest->requestId);
+ } else {
+ tscDebug("0x%" PRIx64 " fetch results, numOfRows:%" PRId64 " total Rows:%" PRId64 ", complete:%d, reqId:0x%" PRIx64,
+ pRequest->self, pResultInfo->numOfRows, pResultInfo->totalRows, pResultInfo->completed,
+ pRequest->requestId);
+
+ STscObj *pTscObj = pRequest->pTscObj;
+ SAppClusterSummary *pActivity = &pTscObj->pAppInfo->summary;
+ atomic_add_fetch_64((int64_t *)&pActivity->fetchBytes, pRequest->body.resInfo.payloadLen);
+ }
+
+ pRequest->body.fetchFp(pRequest->body.param, pRequest, pResultInfo->numOfRows);
+}
+
+void taosAsyncFetchImpl(SRequestObj *pRequest, __taos_async_fn_t fp, void *param) {
+ pRequest->body.fetchFp = fp;
+ pRequest->body.param = param;
+
+ SReqResultInfo *pResultInfo = &pRequest->body.resInfo;
+
+ // this query has no results or error exists, return directly
+ if (taos_num_fields(pRequest) == 0 || pRequest->code != TSDB_CODE_SUCCESS) {
+ pResultInfo->numOfRows = 0;
+ pRequest->body.fetchFp(param, pRequest, pResultInfo->numOfRows);
+ return;
+ }
+
+ // all data has returned to App already, no need to try again
+ if (pResultInfo->completed) {
+ // it is a local executed query, no need to do async fetch
+ if (QUERY_EXEC_MODE_SCHEDULE != pRequest->body.execMode) {
+ if (pResultInfo->localResultFetched) {
+ pResultInfo->numOfRows = 0;
+ pResultInfo->current = 0;
+ } else {
+ pResultInfo->localResultFetched = true;
+ }
+ } else {
+ pResultInfo->numOfRows = 0;
+ }
+
+ pRequest->body.fetchFp(param, pRequest, pResultInfo->numOfRows);
+ return;
+ }
+
+ SSchedulerReq req = {
+ .syncReq = false,
+ .fetchFp = fetchCallback,
+ .cbParam = pRequest,
+ };
+
+ schedulerFetchRows(pRequest->body.queryJob, &req);
+}
+
+
diff --git a/source/client/src/clientMain.c b/source/client/src/clientMain.c
index 63a4e5d2e5..7573fd5968 100644
--- a/source/client/src/clientMain.c
+++ b/source/client/src/clientMain.c
@@ -563,22 +563,13 @@ int taos_select_db(TAOS *taos, const char *db) {
return code;
}
+
void taos_stop_query(TAOS_RES *res) {
if (res == NULL || TD_RES_TMQ(res) || TD_RES_TMQ_META(res) || TD_RES_TMQ_METADATA(res)) {
return;
}
- SRequestObj *pRequest = (SRequestObj *)res;
- pRequest->killed = true;
-
- // It is not a query, no need to stop.
- if (NULL == pRequest->pQuery || QUERY_EXEC_MODE_SCHEDULE != pRequest->pQuery->execMode) {
- tscDebug("request 0x%" PRIx64 " no need to be killed since not query", pRequest->requestId);
- return;
- }
-
- schedulerFreeJob(&pRequest->body.queryJob, TSDB_CODE_TSC_QUERY_KILLED);
- tscDebug("request %" PRIx64 " killed", pRequest->requestId);
+ stopAllQueries((SRequestObj*)res);
}
bool taos_is_null(TAOS_RES *res, int32_t row, int32_t col) {
@@ -774,8 +765,13 @@ static void destoryCatalogReq(SCatalogReq *pCatalogReq) {
taosArrayDestroy(pCatalogReq->pDbVgroup);
taosArrayDestroy(pCatalogReq->pDbCfg);
taosArrayDestroy(pCatalogReq->pDbInfo);
- taosArrayDestroyEx(pCatalogReq->pTableMeta, destoryTablesReq);
- taosArrayDestroyEx(pCatalogReq->pTableHash, destoryTablesReq);
+ if (pCatalogReq->cloned) {
+ taosArrayDestroy(pCatalogReq->pTableMeta);
+ taosArrayDestroy(pCatalogReq->pTableHash);
+ } else {
+ taosArrayDestroyEx(pCatalogReq->pTableMeta, destoryTablesReq);
+ taosArrayDestroyEx(pCatalogReq->pTableHash, destoryTablesReq);
+ }
taosArrayDestroy(pCatalogReq->pUdf);
taosArrayDestroy(pCatalogReq->pIndex);
taosArrayDestroy(pCatalogReq->pUser);
@@ -794,26 +790,108 @@ void destorySqlCallbackWrapper(SSqlCallbackWrapper *pWrapper) {
taosMemoryFree(pWrapper);
}
+void destroyCtxInRequest(SRequestObj* pRequest) {
+ schedulerFreeJob(&pRequest->body.queryJob, 0);
+ qDestroyQuery(pRequest->pQuery);
+ pRequest->pQuery = NULL;
+ destorySqlCallbackWrapper(pRequest->pWrapper);
+ pRequest->pWrapper = NULL;
+}
+
+
static void doAsyncQueryFromAnalyse(SMetaData *pResultMeta, void *param, int32_t code) {
SSqlCallbackWrapper *pWrapper = (SSqlCallbackWrapper *)param;
SRequestObj *pRequest = pWrapper->pRequest;
SQuery *pQuery = pRequest->pQuery;
- int64_t analyseStart = taosGetTimestampUs();
- pRequest->metric.ctgCostUs = analyseStart - pRequest->metric.ctgStart;
qDebug("0x%" PRIx64 " start to semantic analysis, reqId:0x%" PRIx64, pRequest->self, pRequest->requestId);
- if (code == TSDB_CODE_SUCCESS) {
+ int64_t analyseStart = taosGetTimestampUs();
+ pRequest->metric.ctgCostUs = analyseStart - pRequest->metric.ctgStart;
+
+ if (TSDB_CODE_SUCCESS == code) {
code = qAnalyseSqlSemantic(pWrapper->pParseCtx, pWrapper->pCatalogReq, pResultMeta, pQuery);
+ }
+
+ pRequest->metric.analyseCostUs += taosGetTimestampUs() - analyseStart;
+
+ handleQueryAnslyseRes(pWrapper, pResultMeta, code);
+}
+
+int32_t cloneCatalogReq(SCatalogReq* * ppTarget, SCatalogReq* pSrc) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ SCatalogReq* pTarget = taosMemoryCalloc(1, sizeof(SCatalogReq));
+ if (pTarget == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ } else {
+ pTarget->pDbVgroup = taosArrayDup(pSrc->pDbVgroup, NULL);
+ pTarget->pDbCfg = taosArrayDup(pSrc->pDbCfg, NULL);
+ pTarget->pDbInfo = taosArrayDup(pSrc->pDbInfo, NULL);
+ pTarget->pTableMeta = taosArrayDup(pSrc->pTableMeta, NULL);
+ pTarget->pTableHash = taosArrayDup(pSrc->pTableHash, NULL);
+ pTarget->pUdf = taosArrayDup(pSrc->pUdf, NULL);
+ pTarget->pIndex = taosArrayDup(pSrc->pIndex, NULL);
+ pTarget->pUser = taosArrayDup(pSrc->pUser, NULL);
+ pTarget->pTableIndex = taosArrayDup(pSrc->pTableIndex, NULL);
+ pTarget->pTableCfg = taosArrayDup(pSrc->pTableCfg, NULL);
+ pTarget->pTableTag = taosArrayDup(pSrc->pTableTag, NULL);
+ pTarget->qNodeRequired = pSrc->qNodeRequired;
+ pTarget->dNodeRequired = pSrc->dNodeRequired;
+ pTarget->svrVerRequired = pSrc->svrVerRequired;
+ pTarget->forceUpdate = pSrc->forceUpdate;
+ pTarget->cloned = true;
+
+ *ppTarget = pTarget;
+ }
+
+ return code;
+}
+
+
+void handleSubQueryFromAnalyse(SSqlCallbackWrapper *pWrapper, SMetaData *pResultMeta, SNode* pRoot) {
+ SRequestObj* pNewRequest = NULL;
+ SSqlCallbackWrapper* pNewWrapper = NULL;
+ int32_t code = buildPreviousRequest(pWrapper->pRequest, pWrapper->pRequest->sqlstr, &pNewRequest);
+ if (code) {
+ handleQueryAnslyseRes(pWrapper, pResultMeta, code);
+ return;
+ }
+
+ pNewRequest->pQuery = (SQuery*)nodesMakeNode(QUERY_NODE_QUERY);
+ if (NULL == pNewRequest->pQuery) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ } else {
+ pNewRequest->pQuery->pRoot = pRoot;
+ pRoot = NULL;
+ pNewRequest->pQuery->execStage = QUERY_EXEC_STAGE_ANALYSE;
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = prepareAndParseSqlSyntax(&pNewWrapper, pNewRequest, false);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = cloneCatalogReq(&pNewWrapper->pCatalogReq, pWrapper->pCatalogReq);
+ }
+ doAsyncQueryFromAnalyse(pResultMeta, pNewWrapper, code);
+ nodesDestroyNode(pRoot);
+}
+
+void handleQueryAnslyseRes(SSqlCallbackWrapper *pWrapper, SMetaData *pResultMeta, int32_t code) {
+ SRequestObj *pRequest = pWrapper->pRequest;
+ SQuery *pQuery = pRequest->pQuery;
+
+ if (code == TSDB_CODE_SUCCESS && pQuery->pPrevRoot) {
+ SNode* prevRoot = pQuery->pPrevRoot;
+ pQuery->pPrevRoot = NULL;
+ handleSubQueryFromAnalyse(pWrapper, pResultMeta, prevRoot);
+ return;
+ }
+
+ if (code == TSDB_CODE_SUCCESS) {
pRequest->stableQuery = pQuery->stableQuery;
if (pQuery->pRoot) {
pRequest->stmtType = pQuery->pRoot->type;
}
- }
- pRequest->metric.analyseCostUs = taosGetTimestampUs() - analyseStart;
-
- if (code == TSDB_CODE_SUCCESS) {
if (pQuery->haveResultSet) {
setResSchemaInfo(&pRequest->body.resInfo, pQuery->pResSchema, pQuery->numOfResCols);
setResPrecision(&pRequest->body.resInfo, pQuery->precision);
@@ -826,14 +904,14 @@ static void doAsyncQueryFromAnalyse(SMetaData *pResultMeta, void *param, int32_t
launchAsyncQuery(pRequest, pQuery, pResultMeta, pWrapper);
} else {
destorySqlCallbackWrapper(pWrapper);
+ pRequest->pWrapper = NULL;
qDestroyQuery(pRequest->pQuery);
pRequest->pQuery = NULL;
if (NEED_CLIENT_HANDLE_ERROR(code)) {
tscDebug("0x%" PRIx64 " client retry to handle the error, code:%d - %s, tryCount:%d, reqId:0x%" PRIx64,
pRequest->self, code, tstrerror(code), pRequest->retry, pRequest->requestId);
- pRequest->prevCode = code;
- doAsyncQuery(pRequest, true);
+ restartAsyncQuery(pRequest, code);
return;
}
@@ -841,7 +919,7 @@ static void doAsyncQueryFromAnalyse(SMetaData *pResultMeta, void *param, int32_t
tscError("0x%" PRIx64 " error occurs, code:%s, return to user app, reqId:0x%" PRIx64, pRequest->self,
tstrerror(code), pRequest->requestId);
pRequest->code = code;
- pRequest->body.queryFp(pRequest->body.param, pRequest, code);
+ returnToUser(pRequest);
}
}
@@ -904,6 +982,7 @@ static void doAsyncQueryFromParse(SMetaData *pResultMeta, void *param, int32_t c
tscError("0x%" PRIx64 " error happens, code:%d - %s, reqId:0x%" PRIx64, pWrapper->pRequest->self, code,
tstrerror(code), pWrapper->pRequest->requestId);
destorySqlCallbackWrapper(pWrapper);
+ pRequest->pWrapper = NULL;
terrno = code;
pRequest->code = code;
pRequest->body.queryFp(pRequest->body.param, pRequest, code);
@@ -920,6 +999,7 @@ void continueInsertFromCsv(SSqlCallbackWrapper *pWrapper, SRequestObj *pRequest)
tscError("0x%" PRIx64 " error happens, code:%d - %s, reqId:0x%" PRIx64, pWrapper->pRequest->self, code,
tstrerror(code), pWrapper->pRequest->requestId);
destorySqlCallbackWrapper(pWrapper);
+ pRequest->pWrapper = NULL;
terrno = code;
pRequest->code = code;
pRequest->body.queryFp(pRequest->body.param, pRequest, code);
@@ -967,27 +1047,16 @@ int32_t createParseContext(const SRequestObj *pRequest, SParseContext **pCxt) {
return TSDB_CODE_SUCCESS;
}
-void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) {
+int32_t prepareAndParseSqlSyntax(SSqlCallbackWrapper **ppWrapper, SRequestObj *pRequest, bool updateMetaForce) {
+ int32_t code = TSDB_CODE_SUCCESS;
STscObj *pTscObj = pRequest->pTscObj;
- SSqlCallbackWrapper *pWrapper = NULL;
- int32_t code = TSDB_CODE_SUCCESS;
-
- if (pRequest->retry++ > REQUEST_TOTAL_EXEC_TIMES) {
- code = pRequest->prevCode;
- terrno = code;
- pRequest->code = code;
- tscDebug("call sync query cb with code: %s", tstrerror(code));
- pRequest->body.queryFp(pRequest->body.param, pRequest, code);
- return;
- }
-
- if (TSDB_CODE_SUCCESS == code) {
- pWrapper = taosMemoryCalloc(1, sizeof(SSqlCallbackWrapper));
- if (pWrapper == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- } else {
- pWrapper->pRequest = pRequest;
- }
+ SSqlCallbackWrapper *pWrapper = taosMemoryCalloc(1, sizeof(SSqlCallbackWrapper));
+ if (pWrapper == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ } else {
+ pWrapper->pRequest = pRequest;
+ pRequest->pWrapper = pWrapper;
+ *ppWrapper = pWrapper;
}
if (TSDB_CODE_SUCCESS == code) {
@@ -999,7 +1068,7 @@ void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) {
code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &pWrapper->pParseCtx->pCatalog);
}
- if (TSDB_CODE_SUCCESS == code) {
+ if (TSDB_CODE_SUCCESS == code && NULL == pRequest->pQuery) {
int64_t syntaxStart = taosGetTimestampUs();
pWrapper->pCatalogReq = taosMemoryCalloc(1, sizeof(SCatalogReq));
@@ -1014,6 +1083,27 @@ void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) {
pRequest->metric.parseCostUs += taosGetTimestampUs() - syntaxStart;
}
+ return code;
+}
+
+
+void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) {
+ SSqlCallbackWrapper *pWrapper = NULL;
+ int32_t code = TSDB_CODE_SUCCESS;
+
+ if (pRequest->retry++ > REQUEST_TOTAL_EXEC_TIMES) {
+ code = pRequest->prevCode;
+ terrno = code;
+ pRequest->code = code;
+ tscDebug("call sync query cb with code: %s", tstrerror(code));
+ pRequest->body.queryFp(pRequest->body.param, pRequest, code);
+ return;
+ }
+
+ if (TSDB_CODE_SUCCESS == code) {
+ code = prepareAndParseSqlSyntax(&pWrapper, pRequest, updateMetaForce);
+ }
+
if (TSDB_CODE_SUCCESS == code) {
pRequest->stmtType = pRequest->pQuery->pRoot->type;
code = phaseAsyncQuery(pWrapper);
@@ -1023,6 +1113,7 @@ void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) {
tscError("0x%" PRIx64 " error happens, code:%d - %s, reqId:0x%" PRIx64, pRequest->self, code, tstrerror(code),
pRequest->requestId);
destorySqlCallbackWrapper(pWrapper);
+ pRequest->pWrapper = NULL;
qDestroyQuery(pRequest->pQuery);
pRequest->pQuery = NULL;
@@ -1040,48 +1131,57 @@ void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) {
}
}
-static void fetchCallback(void *pResult, void *param, int32_t code) {
- SRequestObj *pRequest = (SRequestObj *)param;
-
- SReqResultInfo *pResultInfo = &pRequest->body.resInfo;
-
- tscDebug("0x%" PRIx64 " enter scheduler fetch cb, code:%d - %s, reqId:0x%" PRIx64, pRequest->self, code,
- tstrerror(code), pRequest->requestId);
-
- pResultInfo->pData = pResult;
- pResultInfo->numOfRows = 0;
-
- if (code != TSDB_CODE_SUCCESS) {
- pRequest->code = code;
- taosMemoryFreeClear(pResultInfo->pData);
- pRequest->body.fetchFp(pRequest->body.param, pRequest, 0);
- return;
+void restartAsyncQuery(SRequestObj *pRequest, int32_t code) {
+ int32_t reqIdx = 0;
+ SRequestObj *pReqList[16] = {NULL};
+ SRequestObj *pUserReq = NULL;
+ pReqList[0] = pRequest;
+ uint64_t tmpRefId = 0;
+ SRequestObj* pTmp = pRequest;
+ while (pTmp->relation.prevRefId) {
+ tmpRefId = pTmp->relation.prevRefId;
+ pTmp = acquireRequest(tmpRefId);
+ if (pTmp) {
+ pReqList[++reqIdx] = pTmp;
+ releaseRequest(tmpRefId);
+ } else {
+ tscError("0x%" PRIx64 ", prev req ref 0x%" PRIx64 " is not there, reqId:0x%" PRIx64, pTmp->self,
+ tmpRefId, pTmp->requestId);
+ break;
+ }
}
- if (pRequest->code != TSDB_CODE_SUCCESS) {
- taosMemoryFreeClear(pResultInfo->pData);
- pRequest->body.fetchFp(pRequest->body.param, pRequest, 0);
- return;
+ tmpRefId = pRequest->relation.nextRefId;
+ while (tmpRefId) {
+ pTmp = acquireRequest(tmpRefId);
+ if (pTmp) {
+ tmpRefId = pTmp->relation.nextRefId;
+ removeRequest(pTmp->self);
+ releaseRequest(pTmp->self);
+ } else {
+ tscError("0x%" PRIx64 " is not there", tmpRefId);
+ break;
+ }
}
- pRequest->code =
- setQueryResultFromRsp(pResultInfo, (const SRetrieveTableRsp *)pResultInfo->pData, pResultInfo->convertUcs4, true);
- if (pRequest->code != TSDB_CODE_SUCCESS) {
- pResultInfo->numOfRows = 0;
- pRequest->code = code;
- tscError("0x%" PRIx64 " fetch results failed, code:%s, reqId:0x%" PRIx64, pRequest->self, tstrerror(code),
- pRequest->requestId);
+ for (int32_t i = reqIdx; i >= 0; i--) {
+ destroyCtxInRequest(pReqList[i]);
+ if (pReqList[i]->relation.userRefId == pReqList[i]->self || 0 == pReqList[i]->relation.userRefId) {
+ pUserReq = pReqList[i];
+ } else {
+ removeRequest(pReqList[i]->self);
+ }
+ }
+
+ if (pUserReq) {
+ pUserReq->prevCode = code;
+ memset(&pUserReq->relation, 0, sizeof(pUserReq->relation));
} else {
- tscDebug("0x%" PRIx64 " fetch results, numOfRows:%" PRId64 " total Rows:%" PRId64 ", complete:%d, reqId:0x%" PRIx64,
- pRequest->self, pResultInfo->numOfRows, pResultInfo->totalRows, pResultInfo->completed,
- pRequest->requestId);
-
- STscObj *pTscObj = pRequest->pTscObj;
- SAppClusterSummary *pActivity = &pTscObj->pAppInfo->summary;
- atomic_add_fetch_64((int64_t *)&pActivity->fetchBytes, pRequest->body.resInfo.payloadLen);
+ tscError("user req is missing");
+ return;
}
- pRequest->body.fetchFp(pRequest->body.param, pRequest, pResultInfo->numOfRows);
+ doAsyncQuery(pUserReq, true);
}
void taos_fetch_rows_a(TAOS_RES *res, __taos_async_fn_t fp, void *param) {
@@ -1095,43 +1195,8 @@ void taos_fetch_rows_a(TAOS_RES *res, __taos_async_fn_t fp, void *param) {
}
SRequestObj *pRequest = res;
- pRequest->body.fetchFp = fp;
- pRequest->body.param = param;
- SReqResultInfo *pResultInfo = &pRequest->body.resInfo;
-
- // this query has no results or error exists, return directly
- if (taos_num_fields(pRequest) == 0 || pRequest->code != TSDB_CODE_SUCCESS) {
- pResultInfo->numOfRows = 0;
- pRequest->body.fetchFp(param, pRequest, pResultInfo->numOfRows);
- return;
- }
-
- // all data has returned to App already, no need to try again
- if (pResultInfo->completed) {
- // it is a local executed query, no need to do async fetch
- if (QUERY_EXEC_MODE_SCHEDULE != pRequest->body.execMode) {
- if (pResultInfo->localResultFetched) {
- pResultInfo->numOfRows = 0;
- pResultInfo->current = 0;
- } else {
- pResultInfo->localResultFetched = true;
- }
- } else {
- pResultInfo->numOfRows = 0;
- }
-
- pRequest->body.fetchFp(param, pRequest, pResultInfo->numOfRows);
- return;
- }
-
- SSchedulerReq req = {
- .syncReq = false,
- .fetchFp = fetchCallback,
- .cbParam = pRequest,
- };
-
- schedulerFetchRows(pRequest->body.queryJob, &req);
+ taosAsyncFetchImpl(pRequest, fp, param);
}
void taos_fetch_raw_block_a(TAOS_RES *res, __taos_async_fn_t fp, void *param) {
diff --git a/source/client/src/clientSml.c b/source/client/src/clientSml.c
index bea237d09e..efa955ec84 100644
--- a/source/client/src/clientSml.c
+++ b/source/client/src/clientSml.c
@@ -1756,9 +1756,8 @@ TAOS_RES *taos_schemaless_insert_inner(TAOS *taos, char *lines[], char *rawLine,
request->code = code;
info->cost.endTime = taosGetTimestampUs();
info->cost.code = code;
- if (code == TSDB_CODE_TDB_INVALID_TABLE_SCHEMA_VER || code == TSDB_CODE_SDB_OBJ_CREATING ||
- code == TSDB_CODE_PAR_VALUE_TOO_LONG || code == TSDB_CODE_MND_TRANS_CONFLICT ||
- code == TSDB_CODE_PAR_TABLE_NOT_EXIST) {
+ if (NEED_CLIENT_HANDLE_ERROR(code) || code == TSDB_CODE_SDB_OBJ_CREATING ||
+ code == TSDB_CODE_PAR_VALUE_TOO_LONG || code == TSDB_CODE_MND_TRANS_CONFLICT) {
if (cnt++ >= 10) {
uInfo("SML:%" PRIx64 " retry:%d/10 end code:%d, msg:%s", info->id, cnt, code, tstrerror(code));
break;
diff --git a/source/client/src/clientTmq.c b/source/client/src/clientTmq.c
index e1b2b9c48b..26887e2ade 100644
--- a/source/client/src/clientTmq.c
+++ b/source/client/src/clientTmq.c
@@ -82,7 +82,7 @@ struct tmq_t {
int8_t useSnapshot;
int8_t autoCommit;
int32_t autoCommitInterval;
- int32_t resetOffsetCfg;
+ int8_t resetOffsetCfg;
uint64_t consumerId;
bool hbBgEnable;
tmq_commit_cb* commitCb;
@@ -99,6 +99,7 @@ struct tmq_t {
// poll info
int64_t pollCnt;
int64_t totalRows;
+ bool needReportOffsetRows;
// timer
tmr_h hbLiveTimer;
@@ -264,7 +265,7 @@ tmq_conf_t* tmq_conf_new() {
conf->withTbName = false;
conf->autoCommit = true;
conf->autoCommitInterval = DEFAULT_AUTO_COMMIT_INTERVAL;
- conf->resetOffset = TMQ_OFFSET__RESET_EARLIEAST;
+ conf->resetOffset = TMQ_OFFSET__RESET_EARLIEST;
conf->hbBgEnable = true;
return conf;
@@ -318,7 +319,7 @@ tmq_conf_res_t tmq_conf_set(tmq_conf_t* conf, const char* key, const char* value
conf->resetOffset = TMQ_OFFSET__RESET_NONE;
return TMQ_CONF_OK;
} else if (strcasecmp(value, "earliest") == 0) {
- conf->resetOffset = TMQ_OFFSET__RESET_EARLIEAST;
+ conf->resetOffset = TMQ_OFFSET__RESET_EARLIEST;
return TMQ_CONF_OK;
} else if (strcasecmp(value, "latest") == 0) {
conf->resetOffset = TMQ_OFFSET__RESET_LATEST;
@@ -567,10 +568,10 @@ static int32_t doSendCommitMsg(tmq_t* tmq, SMqClientVg* pVg, const char* pTopicN
atomic_add_fetch_32(&pParamSet->totalRspNum, 1);
SEp* pEp = GET_ACTIVE_EP(&pVg->epSet);
- char offsetBuf[80] = {0};
+ char offsetBuf[TSDB_OFFSET_LEN] = {0};
tFormatOffset(offsetBuf, tListLen(offsetBuf), &pOffset->offset.val);
- char commitBuf[80] = {0};
+ char commitBuf[TSDB_OFFSET_LEN] = {0};
tFormatOffset(commitBuf, tListLen(commitBuf), &pVg->offsetInfo.committedOffset);
tscDebug("consumer:0x%" PRIx64 " topic:%s on vgId:%d send offset:%s prev:%s, ep:%s:%d, ordinal:%d/%d, req:0x%" PRIx64,
tmq->consumerId, pOffset->offset.subKey, pVg->vgId, offsetBuf, commitBuf, pEp->fqdn, pEp->port, index + 1,
@@ -796,6 +797,25 @@ void tmqSendHbReq(void* param, void* tmrId) {
SMqHbReq req = {0};
req.consumerId = tmq->consumerId;
req.epoch = tmq->epoch;
+ if(tmq->needReportOffsetRows){
+ req.topics = taosArrayInit(taosArrayGetSize(tmq->clientTopics), sizeof(TopicOffsetRows));
+ for(int i = 0; i < taosArrayGetSize(tmq->clientTopics); i++){
+ SMqClientTopic* pTopic = taosArrayGet(tmq->clientTopics, i);
+ int32_t numOfVgroups = taosArrayGetSize(pTopic->vgs);
+ TopicOffsetRows* data = taosArrayReserve(req.topics, 1);
+ strcpy(data->topicName, pTopic->topicName);
+ data->offsetRows = taosArrayInit(numOfVgroups, sizeof(OffsetRows));
+ for(int j = 0; j < numOfVgroups; j++){
+ SMqClientVg* pVg = taosArrayGet(pTopic->vgs, j);
+ OffsetRows* offRows = taosArrayReserve(data->offsetRows, 1);
+ offRows->vgId = pVg->vgId;
+ offRows->rows = pVg->numOfRows;
+ offRows->offset = pVg->offsetInfo.committedOffset;
+ tscDebug("report offset: %d", offRows->offset.type);
+ }
+ }
+ tmq->needReportOffsetRows = false;
+ }
int32_t tlen = tSerializeSMqHbReq(NULL, 0, &req);
if (tlen < 0) {
@@ -835,6 +855,7 @@ void tmqSendHbReq(void* param, void* tmrId) {
asyncSendMsgToServer(tmq->pTscObj->pAppInfo->pTransporter, &epSet, &transporterId, sendInfo);
OVER:
+ tDeatroySMqHbReq(&req);
taosTmrReset(tmqSendHbReq, 1000, param, tmqMgmt.timer, &tmq->hbLiveTimer);
taosReleaseRef(tmqMgmt.rsetId, refId);
}
@@ -969,6 +990,14 @@ int32_t tmq_subscription(tmq_t* tmq, tmq_list_t** topics) {
}
int32_t tmq_unsubscribe(tmq_t* tmq) {
+ if (tmq->autoCommit) {
+ int32_t rsp = tmq_commit_sync(tmq, NULL);
+ if (rsp != 0) {
+ return rsp;
+ }
+ }
+ taosSsleep(2); // sleep 2s for hb to send offset and rows to server
+
int32_t rsp;
int32_t retryCnt = 0;
tmq_list_t* lst = tmq_list_new();
@@ -1063,6 +1092,7 @@ tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) {
pTmq->status = TMQ_CONSUMER_STATUS__INIT;
pTmq->pollCnt = 0;
pTmq->epoch = 0;
+ pTmq->needReportOffsetRows = true;
// set conf
strcpy(pTmq->clientId, conf->clientId);
@@ -1107,7 +1137,7 @@ tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) {
pTmq->hbLiveTimer = taosTmrStart(tmqSendHbReq, 1000, pRefId, tmqMgmt.timer);
}
- char buf[80] = {0};
+ char buf[TSDB_OFFSET_LEN] = {0};
STqOffsetVal offset = {.type = pTmq->resetOffsetCfg};
tFormatOffset(buf, tListLen(buf), &offset);
tscInfo("consumer:0x%" PRIx64 " is setup, refId:%" PRId64
@@ -1123,7 +1153,7 @@ _failed:
}
int32_t tmq_subscribe(tmq_t* tmq, const tmq_list_t* topic_list) {
- const int32_t MAX_RETRY_COUNT = 120 * 60; // let's wait for 2 mins at most
+ const int32_t MAX_RETRY_COUNT = 120 * 2; // let's wait for 2 mins at most
const SArray* container = &topic_list->container;
int32_t sz = taosArrayGetSize(container);
void* buf = NULL;
@@ -1143,6 +1173,12 @@ int32_t tmq_subscribe(tmq_t* tmq, const tmq_list_t* topic_list) {
goto FAIL;
}
+ req.withTbName = tmq->withTbName;
+ req.useSnapshot = tmq->useSnapshot;
+ req.autoCommit = tmq->autoCommit;
+ req.autoCommitInterval = tmq->autoCommitInterval;
+ req.resetOffsetCfg = tmq->resetOffsetCfg;
+
for (int32_t i = 0; i < sz; i++) {
char* topic = taosArrayGetP(container, i);
@@ -1375,8 +1411,8 @@ int32_t tmqPollCb(void* param, SDataBuf* pMsg, int32_t code) {
tDecoderClear(&decoder);
memcpy(&pRspWrapper->dataRsp, pMsg->pData, sizeof(SMqRspHead));
- char buf[80];
- tFormatOffset(buf, 80, &pRspWrapper->dataRsp.rspOffset);
+ char buf[TSDB_OFFSET_LEN];
+ tFormatOffset(buf, TSDB_OFFSET_LEN, &pRspWrapper->dataRsp.rspOffset);
tscDebug("consumer:0x%" PRIx64 " recv poll rsp, vgId:%d, req ver:%" PRId64 ", rsp:%s type %d, reqId:0x%" PRIx64,
tmq->consumerId, vgId, pRspWrapper->dataRsp.reqOffset.version, buf, rspType, requestId);
} else if (rspType == TMQ_MSG_TYPE__POLL_META_RSP) {
@@ -1523,8 +1559,8 @@ static bool doUpdateLocalEp(tmq_t* tmq, int32_t epoch, const SMqAskEpRsp* pRsp)
SMqClientVg* pVgCur = taosArrayGet(pTopicCur->vgs, j);
makeTopicVgroupKey(vgKey, pTopicCur->topicName, pVgCur->vgId);
- char buf[80];
- tFormatOffset(buf, 80, &pVgCur->offsetInfo.currentOffset);
+ char buf[TSDB_OFFSET_LEN];
+ tFormatOffset(buf, TSDB_OFFSET_LEN, &pVgCur->offsetInfo.currentOffset);
tscDebug("consumer:0x%" PRIx64 ", epoch:%d vgId:%d vgKey:%s, offset:%s", tmq->consumerId, epoch, pVgCur->vgId,
vgKey, buf);
@@ -1673,7 +1709,7 @@ SMqRspObj* tmqBuildRspFromWrapper(SMqPollRspWrapper* pWrapper, SMqClientVg* pVg,
return pRspObj;
}
-SMqTaosxRspObj* tmqBuildTaosxRspFromWrapper(SMqPollRspWrapper* pWrapper) {
+SMqTaosxRspObj* tmqBuildTaosxRspFromWrapper(SMqPollRspWrapper* pWrapper, SMqClientVg* pVg, int64_t* numOfRows) {
SMqTaosxRspObj* pRspObj = taosMemoryCalloc(1, sizeof(SMqTaosxRspObj));
pRspObj->resType = RES_TYPE__TMQ_METADATA;
tstrncpy(pRspObj->topic, pWrapper->topicHandle->topicName, TSDB_TOPIC_FNAME_LEN);
@@ -1688,6 +1724,13 @@ SMqTaosxRspObj* tmqBuildTaosxRspFromWrapper(SMqPollRspWrapper* pWrapper) {
setResSchemaInfo(&pRspObj->resInfo, pWrapper->topicHandle->schema.pSchema, pWrapper->topicHandle->schema.nCols);
}
+ // extract the rows in this data packet
+ for (int32_t i = 0; i < pRspObj->rsp.blockNum; ++i) {
+ SRetrieveTableRsp* pRetrieve = (SRetrieveTableRsp*)taosArrayGetP(pRspObj->rsp.blockData, i);
+ int64_t rows = htobe64(pRetrieve->numOfRows);
+ pVg->numOfRows += rows;
+ (*numOfRows) += rows;
+ }
return pRspObj;
}
@@ -1745,7 +1788,7 @@ static int32_t doTmqPollImpl(tmq_t* pTmq, SMqClientTopic* pTopic, SMqClientVg* p
sendInfo->msgType = TDMT_VND_TMQ_CONSUME;
int64_t transporterId = 0;
- char offsetFormatBuf[80];
+ char offsetFormatBuf[TSDB_OFFSET_LEN];
tFormatOffset(offsetFormatBuf, tListLen(offsetFormatBuf), &pVg->offsetInfo.currentOffset);
tscDebug("consumer:0x%" PRIx64 " send poll to %s vgId:%d, epoch %d, req:%s, reqId:0x%" PRIx64, pTmq->consumerId,
@@ -1882,8 +1925,8 @@ static void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) {
pVg->offsetInfo.walVerEnd = pDataRsp->head.walever;
pVg->receivedInfoFromVnode = true;
- char buf[80];
- tFormatOffset(buf, 80, &pDataRsp->rspOffset);
+ char buf[TSDB_OFFSET_LEN];
+ tFormatOffset(buf, TSDB_OFFSET_LEN, &pDataRsp->rspOffset);
if (pDataRsp->blockNum == 0) {
tscDebug("consumer:0x%" PRIx64 " empty block received, vgId:%d, offset:%s, vg total:%" PRId64
" total:%" PRId64 " reqId:0x%" PRIx64,
@@ -1985,13 +2028,13 @@ static void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) {
if (pollRspWrapper->taosxRsp.createTableNum == 0) {
pRsp = tmqBuildRspFromWrapper(pollRspWrapper, pVg, &numOfRows);
} else {
- pRsp = tmqBuildTaosxRspFromWrapper(pollRspWrapper);
+ pRsp = tmqBuildTaosxRspFromWrapper(pollRspWrapper, pVg, &numOfRows);
}
tmq->totalRows += numOfRows;
- char buf[80];
- tFormatOffset(buf, 80, &pVg->offsetInfo.currentOffset);
+ char buf[TSDB_OFFSET_LEN];
+ tFormatOffset(buf, TSDB_OFFSET_LEN, &pVg->offsetInfo.currentOffset);
tscDebug("consumer:0x%" PRIx64 " process taosx poll rsp, vgId:%d, offset:%s, blocks:%d, rows:%" PRId64
", vg total:%" PRId64 " total:%" PRId64 " reqId:0x%" PRIx64,
tmq->consumerId, pVg->vgId, buf, pollRspWrapper->dataRsp.blockNum, numOfRows, pVg->numOfRows,
@@ -2110,6 +2153,7 @@ int32_t tmq_consumer_close(tmq_t* tmq) {
return rsp;
}
}
+ taosSsleep(2); // sleep 2s for hb to send offset and rows to server
int32_t retryCnt = 0;
tmq_list_t* lst = tmq_list_new();
@@ -2411,6 +2455,7 @@ int32_t tmqCommitDone(SMqCommitCbParamSet* pParamSet) {
// if no more waiting rsp
pParamSet->callbackFn(tmq, pParamSet->code, pParamSet->userParam);
taosMemoryFree(pParamSet);
+ tmq->needReportOffsetRows = true;
taosReleaseRef(tmqMgmt.rsetId, refId);
return 0;
@@ -2608,7 +2653,7 @@ int32_t tmq_get_topic_assignment(tmq_t* tmq, const char* pTopicName, tmq_topic_a
sendInfo->msgType = TDMT_VND_TMQ_VG_WALINFO;
int64_t transporterId = 0;
- char offsetFormatBuf[80];
+ char offsetFormatBuf[TSDB_OFFSET_LEN];
tFormatOffset(offsetFormatBuf, tListLen(offsetFormatBuf), &pClientVg->offsetInfo.currentOffset);
tscDebug("consumer:0x%" PRIx64 " %s retrieve wal info vgId:%d, epoch %d, req:%s, reqId:0x%" PRIx64,
@@ -2645,7 +2690,7 @@ int32_t tmq_get_topic_assignment(tmq_t* tmq, const char* pTopicName, tmq_topic_a
pOffsetInfo->currentOffset.type = TMQ_OFFSET__LOG;
- char offsetBuf[80] = {0};
+ char offsetBuf[TSDB_OFFSET_LEN] = {0};
tFormatOffset(offsetBuf, tListLen(offsetBuf), &pOffsetInfo->currentOffset);
tscDebug("vgId:%d offset is update to:%s", p->vgId, offsetBuf);
diff --git a/source/common/src/systable.c b/source/common/src/systable.c
index db0cc78de6..4b27f50f41 100644
--- a/source/common/src/systable.c
+++ b/source/common/src/systable.c
@@ -291,6 +291,8 @@ static const SSysDbTableSchema subscriptionSchema[] = {
{.name = "consumer_group", .bytes = TSDB_CGROUP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
{.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false},
{.name = "consumer_id", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false},
+ {.name = "offset", .bytes = TSDB_OFFSET_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
+ {.name = "rows", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false},
};
static const SSysDbTableSchema vnodesSchema[] = {
@@ -359,6 +361,11 @@ static const SSysDbTableSchema consumerSchema[] = {
{.name = "up_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
{.name = "subscribe_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
{.name = "rebalance_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
+ {.name = "msg.with.table.name", .bytes = 1, .type = TSDB_DATA_TYPE_SMALLINT, .sysInfo = false},
+ {.name = "experimental.snapshot.enable", .bytes = 1, .type = TSDB_DATA_TYPE_SMALLINT, .sysInfo = false},
+ {.name = "enable.auto.commit", .bytes = 1, .type = TSDB_DATA_TYPE_SMALLINT, .sysInfo = false},
+ {.name = "auto.commit.interval.ms", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false},
+ {.name = "auto.offset.reset", .bytes = TSDB_OFFSET_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
};
static const SSysDbTableSchema offsetSchema[] = {
@@ -381,6 +388,7 @@ static const SSysDbTableSchema querySchema[] = {
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
{.name = "exec_usec", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false},
{.name = "stable_query", .bytes = 1, .type = TSDB_DATA_TYPE_BOOL, .sysInfo = false},
+ {.name = "sub_query", .bytes = 1, .type = TSDB_DATA_TYPE_BOOL, .sysInfo = false},
{.name = "sub_num", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false},
{.name = "sub_status", .bytes = TSDB_SHOW_SUBQUERY_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "sql", .bytes = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c
index ac035e0a2b..4cc6b34ca2 100644
--- a/source/common/src/tmsg.c
+++ b/source/common/src/tmsg.c
@@ -224,6 +224,7 @@ static int32_t tSerializeSClientHbReq(SEncoder *pEncoder, const SClientHbReq *pR
if (tEncodeI64(pEncoder, desc->stime) < 0) return -1;
if (tEncodeI64(pEncoder, desc->reqRid) < 0) return -1;
if (tEncodeI8(pEncoder, desc->stableQuery) < 0) return -1;
+ if (tEncodeI8(pEncoder, desc->isSubQuery) < 0) return -1;
if (tEncodeCStr(pEncoder, desc->fqdn) < 0) return -1;
if (tEncodeI32(pEncoder, desc->subPlanNum) < 0) return -1;
@@ -291,6 +292,7 @@ static int32_t tDeserializeSClientHbReq(SDecoder *pDecoder, SClientHbReq *pReq)
if (tDecodeI64(pDecoder, &desc.stime) < 0) return -1;
if (tDecodeI64(pDecoder, &desc.reqRid) < 0) return -1;
if (tDecodeI8(pDecoder, (int8_t *)&desc.stableQuery) < 0) return -1;
+ if (tDecodeI8(pDecoder, (int8_t *)&desc.isSubQuery) < 0) return -1;
if (tDecodeCStrTo(pDecoder, desc.fqdn) < 0) return -1;
if (tDecodeI32(pDecoder, &desc.subPlanNum) < 0) return -1;
@@ -5338,6 +5340,15 @@ int32_t tDeserializeSMqAskEpReq(void *buf, int32_t bufLen, SMqAskEpReq *pReq) {
return 0;
}
+int32_t tDeatroySMqHbReq(SMqHbReq* pReq){
+ for(int i = 0; i < taosArrayGetSize(pReq->topics); i++){
+ TopicOffsetRows* vgs = taosArrayGet(pReq->topics, i);
+ if(vgs) taosArrayDestroy(vgs->offsetRows);
+ }
+ taosArrayDestroy(pReq->topics);
+ return 0;
+}
+
int32_t tSerializeSMqHbReq(void *buf, int32_t bufLen, SMqHbReq *pReq) {
SEncoder encoder = {0};
tEncoderInit(&encoder, buf, bufLen);
@@ -5346,6 +5357,21 @@ int32_t tSerializeSMqHbReq(void *buf, int32_t bufLen, SMqHbReq *pReq) {
if (tEncodeI64(&encoder, pReq->consumerId) < 0) return -1;
if (tEncodeI32(&encoder, pReq->epoch) < 0) return -1;
+ int32_t sz = taosArrayGetSize(pReq->topics);
+ if (tEncodeI32(&encoder, sz) < 0) return -1;
+ for (int32_t i = 0; i < sz; ++i) {
+ TopicOffsetRows* vgs = (TopicOffsetRows*)taosArrayGet(pReq->topics, i);
+ if (tEncodeCStr(&encoder, vgs->topicName) < 0) return -1;
+ int32_t szVgs = taosArrayGetSize(vgs->offsetRows);
+ if (tEncodeI32(&encoder, szVgs) < 0) return -1;
+ for (int32_t j = 0; j < szVgs; ++j) {
+ OffsetRows *offRows = taosArrayGet(vgs->offsetRows, j);
+ if (tEncodeI32(&encoder, offRows->vgId) < 0) return -1;
+ if (tEncodeI64(&encoder, offRows->rows) < 0) return -1;
+ if (tEncodeSTqOffsetVal(&encoder, &offRows->offset) < 0) return -1;
+ }
+ }
+
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
@@ -5362,7 +5388,28 @@ int32_t tDeserializeSMqHbReq(void *buf, int32_t bufLen, SMqHbReq *pReq) {
if (tDecodeI64(&decoder, &pReq->consumerId) < 0) return -1;
if (tDecodeI32(&decoder, &pReq->epoch) < 0) return -1;
-
+ int32_t sz = 0;
+ if (tDecodeI32(&decoder, &sz) < 0) return -1;
+ if(sz > 0){
+ pReq->topics = taosArrayInit(sz, sizeof(TopicOffsetRows));
+ if (NULL == pReq->topics) return -1;
+ for (int32_t i = 0; i < sz; ++i) {
+ TopicOffsetRows* data = taosArrayReserve(pReq->topics, 1);
+ tDecodeCStrTo(&decoder, data->topicName);
+ int32_t szVgs = 0;
+ if (tDecodeI32(&decoder, &szVgs) < 0) return -1;
+ if(szVgs > 0){
+ data->offsetRows = taosArrayInit(szVgs, sizeof(OffsetRows));
+ if (NULL == data->offsetRows) return -1;
+ for (int32_t j= 0; j < szVgs; ++j) {
+ OffsetRows* offRows = taosArrayReserve(data->offsetRows, 1);
+ if (tDecodeI32(&decoder, &offRows->vgId) < 0) return -1;
+ if (tDecodeI64(&decoder, &offRows->rows) < 0) return -1;
+ if (tDecodeSTqOffsetVal(&decoder, &offRows->offset) < 0) return -1;
+ }
+ }
+ }
+ }
tEndDecode(&decoder);
tDecoderClear(&decoder);
@@ -6122,6 +6169,7 @@ int32_t tSerializeSCMCreateStreamReq(void *buf, int32_t bufLen, const SCMCreateS
}
if (tEncodeI64(&encoder, pReq->deleteMark) < 0) return -1;
if (tEncodeI8(&encoder, pReq->igUpdate) < 0) return -1;
+ if (tEncodeI64(&encoder, pReq->lastTs) < 0) return -1;
tEndEncode(&encoder);
@@ -6207,6 +6255,7 @@ int32_t tDeserializeSCMCreateStreamReq(void *buf, int32_t bufLen, SCMCreateStrea
if (tDecodeI64(&decoder, &pReq->deleteMark) < 0) return -1;
if (tDecodeI8(&decoder, &pReq->igUpdate) < 0) return -1;
+ if (tDecodeI64(&decoder, &pReq->lastTs) < 0) return -1;
tEndDecode(&decoder);
@@ -6273,6 +6322,9 @@ int32_t tDeserializeSMRecoverStreamReq(void *buf, int32_t bufLen, SMRecoverStrea
}
void tFreeSCMCreateStreamReq(SCMCreateStreamReq *pReq) {
+ if (NULL == pReq) {
+ return;
+ }
taosArrayDestroy(pReq->pTags);
taosMemoryFreeClear(pReq->sql);
taosMemoryFreeClear(pReq->ast);
@@ -7086,15 +7138,15 @@ int32_t tDecodeSTqOffsetVal(SDecoder *pDecoder, STqOffsetVal *pOffsetVal) {
int32_t tFormatOffset(char *buf, int32_t maxLen, const STqOffsetVal *pVal) {
if (pVal->type == TMQ_OFFSET__RESET_NONE) {
- snprintf(buf, maxLen, "offset(reset to none)");
- } else if (pVal->type == TMQ_OFFSET__RESET_EARLIEAST) {
- snprintf(buf, maxLen, "offset(reset to earlieast)");
+ snprintf(buf, maxLen, "none");
+ } else if (pVal->type == TMQ_OFFSET__RESET_EARLIEST) {
+ snprintf(buf, maxLen, "earliest");
} else if (pVal->type == TMQ_OFFSET__RESET_LATEST) {
- snprintf(buf, maxLen, "offset(reset to latest)");
+ snprintf(buf, maxLen, "latest");
} else if (pVal->type == TMQ_OFFSET__LOG) {
- snprintf(buf, maxLen, "offset(log) ver:%" PRId64, pVal->version);
+ snprintf(buf, maxLen, "log:%" PRId64, pVal->version);
} else if (pVal->type == TMQ_OFFSET__SNAPSHOT_DATA || pVal->type == TMQ_OFFSET__SNAPSHOT_META) {
- snprintf(buf, maxLen, "offset(snapshot) uid:%" PRId64 " ts:%" PRId64, pVal->uid, pVal->ts);
+ snprintf(buf, maxLen, "snapshot:%" PRId64 "|%" PRId64, pVal->uid, pVal->ts);
} else {
return TSDB_CODE_INVALID_PARA;
}
@@ -7112,7 +7164,7 @@ bool tOffsetEqual(const STqOffsetVal *pLeft, const STqOffsetVal *pRight) {
return pLeft->uid == pRight->uid;
} else {
ASSERT(0);
- /*ASSERT(pLeft->type == TMQ_OFFSET__RESET_NONE || pLeft->type == TMQ_OFFSET__RESET_EARLIEAST ||*/
+ /*ASSERT(pLeft->type == TMQ_OFFSET__RESET_NONE || pLeft->type == TMQ_OFFSET__RESET_EARLIEST ||*/
/*pLeft->type == TMQ_OFFSET__RESET_LATEST);*/
/*return true;*/
}
diff --git a/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c b/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c
index 86210983bd..1e59dd1805 100644
--- a/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c
+++ b/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c
@@ -165,7 +165,7 @@ SArray *mmGetMsgHandles() {
if (dmSetMgmtHandle(pArray, TDMT_MND_TMQ_DROP_TOPIC, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_TMQ_SUBSCRIBE, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_TMQ_ASK_EP, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;
- if (dmSetMgmtHandle(pArray, TDMT_MND_TMQ_HB, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;
+ if (dmSetMgmtHandle(pArray, TDMT_MND_TMQ_HB, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_TMQ_DROP_CGROUP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_TMQ_DROP_CGROUP_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_KILL_TRANS, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c
index 0f11273e6e..da26753404 100644
--- a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c
+++ b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c
@@ -264,6 +264,7 @@ int32_t vmProcessCreateVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
snprintf(path, TSDB_FILENAME_LEN, "vnode%svnode%d", TD_DIRSEP, vnodeCfg.vgId);
+#if 0
if (pMgmt->pTfs) {
if (tfsDirExistAt(pMgmt->pTfs, path, (SDiskID){0})) {
terrno = TSDB_CODE_VND_DIR_ALREADY_EXIST;
@@ -277,6 +278,7 @@ int32_t vmProcessCreateVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
return -1;
}
}
+#endif
if (vnodeCreate(path, &vnodeCfg, pMgmt->pTfs) < 0) {
tFreeSCreateVnodeReq(&req);
diff --git a/source/dnode/mnode/impl/inc/mndDef.h b/source/dnode/mnode/impl/inc/mndDef.h
index 82b714e6eb..62beee0303 100644
--- a/source/dnode/mnode/impl/inc/mndDef.h
+++ b/source/dnode/mnode/impl/inc/mndDef.h
@@ -108,7 +108,7 @@ typedef enum {
TRN_STAGE_UNDO_ACTION = 3,
TRN_STAGE_COMMIT = 4,
TRN_STAGE_COMMIT_ACTION = 5,
- TRN_STAGE_FINISHED = 6,
+ TRN_STAGE_FINISH = 6,
TRN_STAGE_PRE_FINISH = 7
} ETrnStage;
@@ -157,6 +157,7 @@ typedef struct {
void* rpcRsp;
int32_t rpcRspLen;
int32_t redoActionPos;
+ SArray* prepareActions;
SArray* redoActions;
SArray* undoActions;
SArray* commitActions;
@@ -550,33 +551,40 @@ typedef struct {
int64_t upTime;
int64_t subscribeTime;
int64_t rebalanceTime;
+
+ int8_t withTbName;
+ int8_t useSnapshot;
+ int8_t autoCommit;
+ int32_t autoCommitInterval;
+ int32_t resetOffsetCfg;
} SMqConsumerObj;
SMqConsumerObj* tNewSMqConsumerObj(int64_t consumerId, char cgroup[TSDB_CGROUP_LEN]);
void tDeleteSMqConsumerObj(SMqConsumerObj* pConsumer);
int32_t tEncodeSMqConsumerObj(void** buf, const SMqConsumerObj* pConsumer);
-void* tDecodeSMqConsumerObj(const void* buf, SMqConsumerObj* pConsumer);
+void* tDecodeSMqConsumerObj(const void* buf, SMqConsumerObj* pConsumer, int8_t sver);
typedef struct {
int32_t vgId;
- char* qmsg; // SubPlanToString
+// char* qmsg; // SubPlanToString
SEpSet epSet;
} SMqVgEp;
SMqVgEp* tCloneSMqVgEp(const SMqVgEp* pVgEp);
void tDeleteSMqVgEp(SMqVgEp* pVgEp);
int32_t tEncodeSMqVgEp(void** buf, const SMqVgEp* pVgEp);
-void* tDecodeSMqVgEp(const void* buf, SMqVgEp* pVgEp);
+void* tDecodeSMqVgEp(const void* buf, SMqVgEp* pVgEp, int8_t sver);
typedef struct {
int64_t consumerId; // -1 for unassigned
SArray* vgs; // SArray
+ SArray* offsetRows; // SArray
} SMqConsumerEp;
-SMqConsumerEp* tCloneSMqConsumerEp(const SMqConsumerEp* pEp);
-void tDeleteSMqConsumerEp(void* pEp);
+//SMqConsumerEp* tCloneSMqConsumerEp(const SMqConsumerEp* pEp);
+//void tDeleteSMqConsumerEp(void* pEp);
int32_t tEncodeSMqConsumerEp(void** buf, const SMqConsumerEp* pEp);
-void* tDecodeSMqConsumerEp(const void* buf, SMqConsumerEp* pEp);
+void* tDecodeSMqConsumerEp(const void* buf, SMqConsumerEp* pEp, int8_t sver);
typedef struct {
char key[TSDB_SUBSCRIBE_KEY_LEN];
@@ -588,14 +596,16 @@ typedef struct {
int64_t stbUid;
SHashObj* consumerHash; // consumerId -> SMqConsumerEp
SArray* unassignedVgs; // SArray
+ SArray* offsetRows;
char dbName[TSDB_DB_FNAME_LEN];
+ char* qmsg; // SubPlanToString
} SMqSubscribeObj;
SMqSubscribeObj* tNewSubscribeObj(const char key[TSDB_SUBSCRIBE_KEY_LEN]);
SMqSubscribeObj* tCloneSubscribeObj(const SMqSubscribeObj* pSub);
void tDeleteSubscribeObj(SMqSubscribeObj* pSub);
int32_t tEncodeSubscribeObj(void** buf, const SMqSubscribeObj* pSub);
-void* tDecodeSubscribeObj(const void* buf, SMqSubscribeObj* pSub);
+void* tDecodeSubscribeObj(const void* buf, SMqSubscribeObj* pSub, int8_t sver);
typedef struct {
int32_t epoch;
@@ -687,12 +697,12 @@ int32_t tEncodeSStreamObj(SEncoder* pEncoder, const SStreamObj* pObj);
int32_t tDecodeSStreamObj(SDecoder* pDecoder, SStreamObj* pObj, int32_t sver);
void tFreeStreamObj(SStreamObj* pObj);
-typedef struct {
- char streamName[TSDB_STREAM_FNAME_LEN];
- int64_t uid;
- int64_t streamUid;
- SArray* childInfo; // SArray
-} SStreamCheckpointObj;
+//typedef struct {
+// char streamName[TSDB_STREAM_FNAME_LEN];
+// int64_t uid;
+// int64_t streamUid;
+// SArray* childInfo; // SArray
+//} SStreamCheckpointObj;
#ifdef __cplusplus
}
diff --git a/source/dnode/mnode/impl/inc/mndTrans.h b/source/dnode/mnode/impl/inc/mndTrans.h
index 03434573c4..625546aa55 100644
--- a/source/dnode/mnode/impl/inc/mndTrans.h
+++ b/source/dnode/mnode/impl/inc/mndTrans.h
@@ -70,6 +70,7 @@ int32_t mndTransAppendRedolog(STrans *pTrans, SSdbRaw *pRaw);
int32_t mndTransAppendUndolog(STrans *pTrans, SSdbRaw *pRaw);
int32_t mndTransAppendCommitlog(STrans *pTrans, SSdbRaw *pRaw);
int32_t mndTransAppendNullLog(STrans *pTrans);
+int32_t mndTransAppendPrepareAction(STrans *pTrans, STransAction *pAction);
int32_t mndTransAppendRedoAction(STrans *pTrans, STransAction *pAction);
int32_t mndTransAppendUndoAction(STrans *pTrans, STransAction *pAction);
void mndTransSetRpcRsp(STrans *pTrans, void *pCont, int32_t contLen);
@@ -78,15 +79,23 @@ void mndTransSetDbName(STrans *pTrans, const char *dbname, const char *stbnam
void mndTransSetSerial(STrans *pTrans);
void mndTransSetParallel(STrans *pTrans);
void mndTransSetOper(STrans *pTrans, EOperType oper);
-int32_t mndTrancCheckConflict(SMnode *pMnode, STrans *pTrans);
-
+int32_t mndTransCheckConflict(SMnode *pMnode, STrans *pTrans);
+static int32_t mndTrancCheckConflict(SMnode *pMnode, STrans *pTrans) {
+ return mndTransCheckConflict(pMnode, pTrans);
+}
int32_t mndTransPrepare(SMnode *pMnode, STrans *pTrans);
int32_t mndTransProcessRsp(SRpcMsg *pRsp);
void mndTransPullup(SMnode *pMnode);
int32_t mndKillTrans(SMnode *pMnode, STrans *pTrans);
-void mndTransExecute(SMnode *pMnode, STrans *pTrans, bool isLeader);
+void mndTransExecute(SMnode *pMnode, STrans *pTrans);
+void mndTransRefresh(SMnode *pMnode, STrans *pTrans);
int32_t mndSetRpcInfoForDbTrans(SMnode *pMnode, SRpcMsg *pMsg, EOperType oper, const char *dbname);
+SSdbRaw *mndTransEncode(STrans *pTrans);
+SSdbRow *mndTransDecode(SSdbRaw *pRaw);
+void mndTransDropData(STrans *pTrans);
+
+bool mndTransPerformPrepareStage(SMnode *pMnode, STrans *pTrans);
#ifdef __cplusplus
}
#endif
diff --git a/source/dnode/mnode/impl/inc/mndVgroup.h b/source/dnode/mnode/impl/inc/mndVgroup.h
index 0cd1228f25..7c2f8b5b65 100644
--- a/source/dnode/mnode/impl/inc/mndVgroup.h
+++ b/source/dnode/mnode/impl/inc/mndVgroup.h
@@ -27,6 +27,7 @@ void mndCleanupVgroup(SMnode *pMnode);
SVgObj *mndAcquireVgroup(SMnode *pMnode, int32_t vgId);
void mndReleaseVgroup(SMnode *pMnode, SVgObj *pVgroup);
SSdbRaw *mndVgroupActionEncode(SVgObj *pVgroup);
+SSdbRow *mndVgroupActionDecode(SSdbRaw *pRaw);
SEpSet mndGetVgroupEpset(SMnode *pMnode, const SVgObj *pVgroup);
int32_t mndGetVnodesNum(SMnode *pMnode, int32_t dnodeId);
void mndSortVnodeGid(SVgObj *pVgroup);
@@ -36,6 +37,7 @@ int64_t mndGetVgroupMemory(SMnode *pMnode, SDbObj *pDb, SVgObj *pVgroup);
SArray *mndBuildDnodesArray(SMnode *, int32_t exceptDnodeId);
int32_t mndAllocSmaVgroup(SMnode *, SDbObj *pDb, SVgObj *pVgroup);
int32_t mndAllocVgroup(SMnode *, SDbObj *pDb, SVgObj **ppVgroups);
+int32_t mndAddPrepareNewVgAction(SMnode *, STrans *pTrans, SVgObj *pVg);
int32_t mndAddCreateVnodeAction(SMnode *, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup, SVnodeGid *pVgid);
int32_t mndAddAlterVnodeConfirmAction(SMnode *, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup);
int32_t mndAddAlterVnodeAction(SMnode *, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup, tmsg_t msgType);
diff --git a/source/dnode/mnode/impl/src/mndConsumer.c b/source/dnode/mnode/impl/src/mndConsumer.c
index 117c1082a5..c8ee35f402 100644
--- a/source/dnode/mnode/impl/src/mndConsumer.c
+++ b/source/dnode/mnode/impl/src/mndConsumer.c
@@ -23,7 +23,7 @@
#include "tcompare.h"
#include "tname.h"
-#define MND_CONSUMER_VER_NUMBER 1
+#define MND_CONSUMER_VER_NUMBER 2
#define MND_CONSUMER_RESERVE_SIZE 64
#define MND_CONSUMER_LOST_HB_CNT 6
@@ -391,12 +391,13 @@ static int32_t mndProcessMqTimerMsg(SRpcMsg *pMsg) {
}
static int32_t mndProcessMqHbReq(SRpcMsg *pMsg) {
+ int32_t code = 0;
SMnode *pMnode = pMsg->info.node;
SMqHbReq req = {0};
- if (tDeserializeSMqHbReq(pMsg->pCont, pMsg->contLen, &req) < 0) {
+ if ((code = tDeserializeSMqHbReq(pMsg->pCont, pMsg->contLen, &req)) < 0) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
- return -1;
+ goto end;
}
int64_t consumerId = req.consumerId;
@@ -404,7 +405,8 @@ static int32_t mndProcessMqHbReq(SRpcMsg *pMsg) {
if (pConsumer == NULL) {
mError("consumer:0x%" PRIx64 " not exist", consumerId);
terrno = TSDB_CODE_MND_CONSUMER_NOT_EXIST;
- return -1;
+ code = -1;
+ goto end;
}
atomic_store_32(&pConsumer->hbStatus, 0);
@@ -424,9 +426,28 @@ static int32_t mndProcessMqHbReq(SRpcMsg *pMsg) {
tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &pRpcMsg);
}
+ for(int i = 0; i < taosArrayGetSize(req.topics); i++){
+ TopicOffsetRows* data = taosArrayGet(req.topics, i);
+ mDebug("heartbeat report offset rows.%s:%s", pConsumer->cgroup, data->topicName);
+
+ SMqSubscribeObj *pSub = mndAcquireSubscribe(pMnode, pConsumer->cgroup, data->topicName);
+ taosWLockLatch(&pSub->lock);
+ SMqConsumerEp *pConsumerEp = taosHashGet(pSub->consumerHash, &consumerId, sizeof(int64_t));
+ if(pConsumerEp){
+ taosArrayDestroy(pConsumerEp->offsetRows);
+ pConsumerEp->offsetRows = data->offsetRows;
+ data->offsetRows = NULL;
+ }
+ taosWUnLockLatch(&pSub->lock);
+
+ mndReleaseSubscribe(pMnode, pSub);
+ }
+
mndReleaseConsumer(pMnode, pConsumer);
- return 0;
+end:
+ tDeatroySMqHbReq(&req);
+ return code;
}
static int32_t mndProcessAskEpReq(SRpcMsg *pMsg) {
@@ -644,7 +665,7 @@ int32_t mndProcessSubscribeReq(SRpcMsg *pMsg) {
SCMSubscribeReq subscribe = {0};
tDeserializeSCMSubscribeReq(msgStr, &subscribe);
- uint64_t consumerId = subscribe.consumerId;
+ int64_t consumerId = subscribe.consumerId;
char *cgroup = subscribe.cgroup;
SMqConsumerObj *pExistedConsumer = NULL;
SMqConsumerObj *pConsumerNew = NULL;
@@ -675,6 +696,12 @@ int32_t mndProcessSubscribeReq(SRpcMsg *pMsg) {
pConsumerNew = tNewSMqConsumerObj(consumerId, cgroup);
tstrncpy(pConsumerNew->clientId, subscribe.clientId, tListLen(pConsumerNew->clientId));
+ pConsumerNew->withTbName = subscribe.withTbName;
+ pConsumerNew->useSnapshot = subscribe.useSnapshot;
+ pConsumerNew->autoCommit = subscribe.autoCommit;
+ pConsumerNew->autoCommitInterval = subscribe.autoCommitInterval;
+ pConsumerNew->resetOffsetCfg = subscribe.resetOffsetCfg;
+
// set the update type
pConsumerNew->updateType = CONSUMER_UPDATE__REBALANCE;
taosArrayDestroy(pConsumerNew->assignedTopics);
@@ -822,7 +849,7 @@ SSdbRow *mndConsumerActionDecode(SSdbRaw *pRaw) {
goto CM_DECODE_OVER;
}
- if (sver != MND_CONSUMER_VER_NUMBER) {
+ if (sver < 1 || sver > MND_CONSUMER_VER_NUMBER) {
terrno = TSDB_CODE_SDB_INVALID_DATA_VER;
goto CM_DECODE_OVER;
}
@@ -849,7 +876,7 @@ SSdbRow *mndConsumerActionDecode(SSdbRaw *pRaw) {
SDB_GET_BINARY(pRaw, dataPos, buf, len, CM_DECODE_OVER);
SDB_GET_RESERVE(pRaw, dataPos, MND_CONSUMER_RESERVE_SIZE, CM_DECODE_OVER);
- if (tDecodeSMqConsumerObj(buf, pConsumer) == NULL) {
+ if (tDecodeSMqConsumerObj(buf, pConsumer, sver) == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY; // TODO set correct error code
goto CM_DECODE_OVER;
}
@@ -1159,6 +1186,26 @@ static int32_t mndRetrieveConsumer(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)&pConsumer->rebalanceTime, pConsumer->rebalanceTime == 0);
+ pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
+ colDataSetVal(pColInfo, numOfRows, (const char *)&pConsumer->withTbName, false);
+
+ pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
+ colDataSetVal(pColInfo, numOfRows, (const char *)&pConsumer->useSnapshot, false);
+
+ pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
+ colDataSetVal(pColInfo, numOfRows, (const char *)&pConsumer->autoCommit, false);
+
+ pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
+ colDataSetVal(pColInfo, numOfRows, (const char *)&pConsumer->autoCommitInterval, false);
+
+ char buf[TSDB_OFFSET_LEN + VARSTR_HEADER_SIZE] = {0};
+ STqOffsetVal pVal = {.type = pConsumer->resetOffsetCfg};
+ tFormatOffset(varDataVal(buf), TSDB_OFFSET_LEN, &pVal);
+ varDataSetLen(buf, strlen(varDataVal(buf)));
+
+ pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
+ colDataSetVal(pColInfo, numOfRows, (const char *)buf, false);
+
numOfRows++;
}
diff --git a/source/dnode/mnode/impl/src/mndDb.c b/source/dnode/mnode/impl/src/mndDb.c
index 1a6b401918..47619f89ce 100644
--- a/source/dnode/mnode/impl/src/mndDb.c
+++ b/source/dnode/mnode/impl/src/mndDb.c
@@ -414,6 +414,13 @@ static void mndSetDefaultDbCfg(SDbCfg *pCfg) {
if (pCfg->tsdbPageSize <= 0) pCfg->tsdbPageSize = TSDB_DEFAULT_TSDB_PAGESIZE;
}
+static int32_t mndSetPrepareNewVgActions(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroups) {
+ for (int32_t v = 0; v < pDb->cfg.numOfVgroups; ++v) {
+ if (mndAddPrepareNewVgAction(pMnode, pTrans, (pVgroups + v)) != 0) return -1;
+ }
+ return 0;
+}
+
static int32_t mndSetCreateDbRedoLogs(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroups) {
SSdbRaw *pDbRaw = mndDbActionEncode(pDb);
if (pDbRaw == NULL) return -1;
@@ -424,7 +431,7 @@ static int32_t mndSetCreateDbRedoLogs(SMnode *pMnode, STrans *pTrans, SDbObj *pD
SSdbRaw *pVgRaw = mndVgroupActionEncode(pVgroups + v);
if (pVgRaw == NULL) return -1;
if (mndTransAppendRedolog(pTrans, pVgRaw) != 0) return -1;
- if (sdbSetRawStatus(pVgRaw, SDB_STATUS_CREATING) != 0) return -1;
+ if (sdbSetRawStatus(pVgRaw, SDB_STATUS_UPDATE) != 0) return -1;
}
return 0;
@@ -589,9 +596,10 @@ static int32_t mndCreateDb(SMnode *pMnode, SRpcMsg *pReq, SCreateDbReq *pCreate,
mInfo("trans:%d, used to create db:%s", pTrans->id, pCreate->db);
mndTransSetDbName(pTrans, dbObj.name, NULL);
- if (mndTrancCheckConflict(pMnode, pTrans) != 0) goto _OVER;
+ if (mndTransCheckConflict(pMnode, pTrans) != 0) goto _OVER;
mndTransSetOper(pTrans, MND_OPER_CREATE_DB);
+ if (mndSetPrepareNewVgActions(pMnode, pTrans, &dbObj, pVgroups) != 0) goto _OVER;
if (mndSetCreateDbRedoLogs(pMnode, pTrans, &dbObj, pVgroups) != 0) goto _OVER;
if (mndSetCreateDbUndoLogs(pMnode, pTrans, &dbObj, pVgroups) != 0) goto _OVER;
if (mndSetCreateDbCommitLogs(pMnode, pTrans, &dbObj, pVgroups, pNewUserDuped) != 0) goto _OVER;
@@ -832,7 +840,7 @@ static int32_t mndAlterDb(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pOld, SDbObj *p
int32_t code = -1;
mndTransSetDbName(pTrans, pOld->name, NULL);
- if (mndTrancCheckConflict(pMnode, pTrans) != 0) goto _OVER;
+ if (mndTransCheckConflict(pMnode, pTrans) != 0) goto _OVER;
if (mndSetAlterDbRedoLogs(pMnode, pTrans, pOld, pNew) != 0) goto _OVER;
if (mndSetAlterDbCommitLogs(pMnode, pTrans, pOld, pNew) != 0) goto _OVER;
@@ -1129,7 +1137,7 @@ static int32_t mndDropDb(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb) {
mInfo("trans:%d start to drop db:%s", pTrans->id, pDb->name);
mndTransSetDbName(pTrans, pDb->name, NULL);
- if (mndTrancCheckConflict(pMnode, pTrans) != 0) {
+ if (mndTransCheckConflict(pMnode, pTrans) != 0) {
goto _OVER;
}
diff --git a/source/dnode/mnode/impl/src/mndDef.c b/source/dnode/mnode/impl/src/mndDef.c
index 6dab018236..f0fa40cacf 100644
--- a/source/dnode/mnode/impl/src/mndDef.c
+++ b/source/dnode/mnode/impl/src/mndDef.c
@@ -187,14 +187,14 @@ SMqVgEp *tCloneSMqVgEp(const SMqVgEp *pVgEp) {
SMqVgEp *pVgEpNew = taosMemoryMalloc(sizeof(SMqVgEp));
if (pVgEpNew == NULL) return NULL;
pVgEpNew->vgId = pVgEp->vgId;
- pVgEpNew->qmsg = taosStrdup(pVgEp->qmsg);
+// pVgEpNew->qmsg = taosStrdup(pVgEp->qmsg);
pVgEpNew->epSet = pVgEp->epSet;
return pVgEpNew;
}
void tDeleteSMqVgEp(SMqVgEp *pVgEp) {
if (pVgEp) {
- taosMemoryFreeClear(pVgEp->qmsg);
+// taosMemoryFreeClear(pVgEp->qmsg);
taosMemoryFree(pVgEp);
}
}
@@ -202,14 +202,18 @@ void tDeleteSMqVgEp(SMqVgEp *pVgEp) {
int32_t tEncodeSMqVgEp(void **buf, const SMqVgEp *pVgEp) {
int32_t tlen = 0;
tlen += taosEncodeFixedI32(buf, pVgEp->vgId);
- tlen += taosEncodeString(buf, pVgEp->qmsg);
+// tlen += taosEncodeString(buf, pVgEp->qmsg);
tlen += taosEncodeSEpSet(buf, &pVgEp->epSet);
return tlen;
}
-void *tDecodeSMqVgEp(const void *buf, SMqVgEp *pVgEp) {
+void *tDecodeSMqVgEp(const void *buf, SMqVgEp *pVgEp, int8_t sver) {
buf = taosDecodeFixedI32(buf, &pVgEp->vgId);
- buf = taosDecodeString(buf, &pVgEp->qmsg);
+ if(sver == 1){
+ uint64_t size = 0;
+ buf = taosDecodeVariantU64(buf, &size);
+ buf = POINTER_SHIFT(buf, size);
+ }
buf = taosDecodeSEpSet(buf, &pVgEp->epSet);
return (void *)buf;
}
@@ -321,10 +325,15 @@ int32_t tEncodeSMqConsumerObj(void **buf, const SMqConsumerObj *pConsumer) {
tlen += taosEncodeFixedI32(buf, 0);
}
+ tlen += taosEncodeFixedI8(buf, pConsumer->withTbName);
+ tlen += taosEncodeFixedI8(buf, pConsumer->useSnapshot);
+ tlen += taosEncodeFixedI8(buf, pConsumer->autoCommit);
+ tlen += taosEncodeFixedI32(buf, pConsumer->autoCommitInterval);
+ tlen += taosEncodeFixedI32(buf, pConsumer->resetOffsetCfg);
return tlen;
}
-void *tDecodeSMqConsumerObj(const void *buf, SMqConsumerObj *pConsumer) {
+void *tDecodeSMqConsumerObj(const void *buf, SMqConsumerObj *pConsumer, int8_t sver) {
int32_t sz;
buf = taosDecodeFixedI64(buf, &pConsumer->consumerId);
buf = taosDecodeStringTo(buf, pConsumer->clientId);
@@ -375,50 +384,95 @@ void *tDecodeSMqConsumerObj(const void *buf, SMqConsumerObj *pConsumer) {
taosArrayPush(pConsumer->assignedTopics, &topic);
}
+ if(sver > 1){
+ buf = taosDecodeFixedI8(buf, &pConsumer->withTbName);
+ buf = taosDecodeFixedI8(buf, &pConsumer->useSnapshot);
+ buf = taosDecodeFixedI8(buf, &pConsumer->autoCommit);
+ buf = taosDecodeFixedI32(buf, &pConsumer->autoCommitInterval);
+ buf = taosDecodeFixedI32(buf, &pConsumer->resetOffsetCfg);
+ }
return (void *)buf;
}
-SMqConsumerEp *tCloneSMqConsumerEp(const SMqConsumerEp *pConsumerEpOld) {
- SMqConsumerEp *pConsumerEpNew = taosMemoryMalloc(sizeof(SMqConsumerEp));
- if (pConsumerEpNew == NULL) return NULL;
- pConsumerEpNew->consumerId = pConsumerEpOld->consumerId;
- pConsumerEpNew->vgs = taosArrayDup(pConsumerEpOld->vgs, (__array_item_dup_fn_t)tCloneSMqVgEp);
- return pConsumerEpNew;
-}
-
-void tDeleteSMqConsumerEp(void *data) {
- SMqConsumerEp *pConsumerEp = (SMqConsumerEp *)data;
- taosArrayDestroyP(pConsumerEp->vgs, (FDelete)tDeleteSMqVgEp);
-}
+//SMqConsumerEp *tCloneSMqConsumerEp(const SMqConsumerEp *pConsumerEpOld) {
+// SMqConsumerEp *pConsumerEpNew = taosMemoryMalloc(sizeof(SMqConsumerEp));
+// if (pConsumerEpNew == NULL) return NULL;
+// pConsumerEpNew->consumerId = pConsumerEpOld->consumerId;
+// pConsumerEpNew->vgs = taosArrayDup(pConsumerEpOld->vgs, NULL);
+// return pConsumerEpNew;
+//}
+//
+//void tDeleteSMqConsumerEp(void *data) {
+// SMqConsumerEp *pConsumerEp = (SMqConsumerEp *)data;
+// taosArrayDestroy(pConsumerEp->vgs);
+//}
int32_t tEncodeSMqConsumerEp(void **buf, const SMqConsumerEp *pConsumerEp) {
int32_t tlen = 0;
tlen += taosEncodeFixedI64(buf, pConsumerEp->consumerId);
tlen += taosEncodeArray(buf, pConsumerEp->vgs, (FEncode)tEncodeSMqVgEp);
-#if 0
- int32_t sz = taosArrayGetSize(pConsumerEp->vgs);
- tlen += taosEncodeFixedI32(buf, sz);
- for (int32_t i = 0; i < sz; i++) {
- SMqVgEp *pVgEp = taosArrayGetP(pConsumerEp->vgs, i);
- tlen += tEncodeSMqVgEp(buf, pVgEp);
+ int32_t szVgs = taosArrayGetSize(pConsumerEp->offsetRows);
+ tlen += taosEncodeFixedI32(buf, szVgs);
+ for (int32_t j= 0; j < szVgs; ++j) {
+ OffsetRows *offRows = taosArrayGet(pConsumerEp->offsetRows, j);
+ tlen += taosEncodeFixedI32(buf, offRows->vgId);
+ tlen += taosEncodeFixedI64(buf, offRows->rows);
+ tlen += taosEncodeFixedI8(buf, offRows->offset.type);
+ if (offRows->offset.type == TMQ_OFFSET__SNAPSHOT_DATA || offRows->offset.type == TMQ_OFFSET__SNAPSHOT_META) {
+ tlen += taosEncodeFixedI64(buf, offRows->offset.uid);
+ tlen += taosEncodeFixedI64(buf, offRows->offset.ts);
+ } else if (offRows->offset.type == TMQ_OFFSET__LOG) {
+ tlen += taosEncodeFixedI64(buf, offRows->offset.version);
+ } else {
+ // do nothing
+ }
}
-#endif
+//#if 0
+// int32_t sz = taosArrayGetSize(pConsumerEp->vgs);
+// tlen += taosEncodeFixedI32(buf, sz);
+// for (int32_t i = 0; i < sz; i++) {
+// SMqVgEp *pVgEp = taosArrayGetP(pConsumerEp->vgs, i);
+// tlen += tEncodeSMqVgEp(buf, pVgEp);
+// }
+//#endif
return tlen;
}
-void *tDecodeSMqConsumerEp(const void *buf, SMqConsumerEp *pConsumerEp) {
+void *tDecodeSMqConsumerEp(const void *buf, SMqConsumerEp *pConsumerEp, int8_t sver) {
buf = taosDecodeFixedI64(buf, &pConsumerEp->consumerId);
- buf = taosDecodeArray(buf, &pConsumerEp->vgs, (FDecode)tDecodeSMqVgEp, sizeof(SMqVgEp));
-#if 0
- int32_t sz;
- buf = taosDecodeFixedI32(buf, &sz);
- pConsumerEp->vgs = taosArrayInit(sz, sizeof(void *));
- for (int32_t i = 0; i < sz; i++) {
- SMqVgEp *pVgEp = taosMemoryMalloc(sizeof(SMqVgEp));
- buf = tDecodeSMqVgEp(buf, pVgEp);
- taosArrayPush(pConsumerEp->vgs, &pVgEp);
+ buf = taosDecodeArray(buf, &pConsumerEp->vgs, (FDecode)tDecodeSMqVgEp, sizeof(SMqVgEp), sver);
+ if (sver > 1){
+ int32_t szVgs = 0;
+ buf = taosDecodeFixedI32(buf, &szVgs);
+ if(szVgs > 0){
+ pConsumerEp->offsetRows = taosArrayInit(szVgs, sizeof(OffsetRows));
+ if (NULL == pConsumerEp->offsetRows) return NULL;
+ for (int32_t j= 0; j < szVgs; ++j) {
+ OffsetRows* offRows = taosArrayReserve(pConsumerEp->offsetRows, 1);
+ buf = taosDecodeFixedI32(buf, &offRows->vgId);
+ buf = taosDecodeFixedI64(buf, &offRows->rows);
+ buf = taosDecodeFixedI8(buf, &offRows->offset.type);
+ if (offRows->offset.type == TMQ_OFFSET__SNAPSHOT_DATA || offRows->offset.type == TMQ_OFFSET__SNAPSHOT_META) {
+ buf = taosDecodeFixedI64(buf, &offRows->offset.uid);
+ buf = taosDecodeFixedI64(buf, &offRows->offset.ts);
+ } else if (offRows->offset.type == TMQ_OFFSET__LOG) {
+ buf = taosDecodeFixedI64(buf, &offRows->offset.version);
+ } else {
+ // do nothing
+ }
+ }
+ }
}
-#endif
+//#if 0
+// int32_t sz;
+// buf = taosDecodeFixedI32(buf, &sz);
+// pConsumerEp->vgs = taosArrayInit(sz, sizeof(void *));
+// for (int32_t i = 0; i < sz; i++) {
+// SMqVgEp *pVgEp = taosMemoryMalloc(sizeof(SMqVgEp));
+// buf = tDecodeSMqVgEp(buf, pVgEp);
+// taosArrayPush(pConsumerEp->vgs, &pVgEp);
+// }
+//#endif
return (void *)buf;
}
@@ -468,7 +522,9 @@ SMqSubscribeObj *tCloneSubscribeObj(const SMqSubscribeObj *pSub) {
taosHashPut(pSubNew->consumerHash, &newEp.consumerId, sizeof(int64_t), &newEp, sizeof(SMqConsumerEp));
}
pSubNew->unassignedVgs = taosArrayDup(pSub->unassignedVgs, (__array_item_dup_fn_t)tCloneSMqVgEp);
+ pSubNew->offsetRows = taosArrayDup(pSub->offsetRows, NULL);
memcpy(pSubNew->dbName, pSub->dbName, TSDB_DB_FNAME_LEN);
+ pSubNew->qmsg = taosStrdup(pSub->qmsg);
return pSubNew;
}
@@ -479,9 +535,12 @@ void tDeleteSubscribeObj(SMqSubscribeObj *pSub) {
if (pIter == NULL) break;
SMqConsumerEp *pConsumerEp = (SMqConsumerEp *)pIter;
taosArrayDestroyP(pConsumerEp->vgs, (FDelete)tDeleteSMqVgEp);
+ taosArrayDestroy(pConsumerEp->offsetRows);
}
taosHashCleanup(pSub->consumerHash);
taosArrayDestroyP(pSub->unassignedVgs, (FDelete)tDeleteSMqVgEp);
+ taosMemoryFreeClear(pSub->qmsg);
+ taosArrayDestroy(pSub->offsetRows);
}
int32_t tEncodeSubscribeObj(void **buf, const SMqSubscribeObj *pSub) {
@@ -508,10 +567,28 @@ int32_t tEncodeSubscribeObj(void **buf, const SMqSubscribeObj *pSub) {
if (cnt != sz) return -1;
tlen += taosEncodeArray(buf, pSub->unassignedVgs, (FEncode)tEncodeSMqVgEp);
tlen += taosEncodeString(buf, pSub->dbName);
+
+ int32_t szVgs = taosArrayGetSize(pSub->offsetRows);
+ tlen += taosEncodeFixedI32(buf, szVgs);
+ for (int32_t j= 0; j < szVgs; ++j) {
+ OffsetRows *offRows = taosArrayGet(pSub->offsetRows, j);
+ tlen += taosEncodeFixedI32(buf, offRows->vgId);
+ tlen += taosEncodeFixedI64(buf, offRows->rows);
+ tlen += taosEncodeFixedI8(buf, offRows->offset.type);
+ if (offRows->offset.type == TMQ_OFFSET__SNAPSHOT_DATA || offRows->offset.type == TMQ_OFFSET__SNAPSHOT_META) {
+ tlen += taosEncodeFixedI64(buf, offRows->offset.uid);
+ tlen += taosEncodeFixedI64(buf, offRows->offset.ts);
+ } else if (offRows->offset.type == TMQ_OFFSET__LOG) {
+ tlen += taosEncodeFixedI64(buf, offRows->offset.version);
+ } else {
+ // do nothing
+ }
+ }
+ tlen += taosEncodeString(buf, pSub->qmsg);
return tlen;
}
-void *tDecodeSubscribeObj(const void *buf, SMqSubscribeObj *pSub) {
+void *tDecodeSubscribeObj(const void *buf, SMqSubscribeObj *pSub, int8_t sver) {
//
buf = taosDecodeStringTo(buf, pSub->key);
buf = taosDecodeFixedI64(buf, &pSub->dbUid);
@@ -526,74 +603,98 @@ void *tDecodeSubscribeObj(const void *buf, SMqSubscribeObj *pSub) {
pSub->consumerHash = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_NO_LOCK);
for (int32_t i = 0; i < sz; i++) {
SMqConsumerEp consumerEp = {0};
- buf = tDecodeSMqConsumerEp(buf, &consumerEp);
+ buf = tDecodeSMqConsumerEp(buf, &consumerEp, sver);
taosHashPut(pSub->consumerHash, &consumerEp.consumerId, sizeof(int64_t), &consumerEp, sizeof(SMqConsumerEp));
}
- buf = taosDecodeArray(buf, &pSub->unassignedVgs, (FDecode)tDecodeSMqVgEp, sizeof(SMqVgEp));
+ buf = taosDecodeArray(buf, &pSub->unassignedVgs, (FDecode)tDecodeSMqVgEp, sizeof(SMqVgEp), sver);
buf = taosDecodeStringTo(buf, pSub->dbName);
+
+ if (sver > 1){
+ int32_t szVgs = 0;
+ buf = taosDecodeFixedI32(buf, &szVgs);
+ if(szVgs > 0){
+ pSub->offsetRows = taosArrayInit(szVgs, sizeof(OffsetRows));
+ if (NULL == pSub->offsetRows) return NULL;
+ for (int32_t j= 0; j < szVgs; ++j) {
+ OffsetRows* offRows = taosArrayReserve(pSub->offsetRows, 1);
+ buf = taosDecodeFixedI32(buf, &offRows->vgId);
+ buf = taosDecodeFixedI64(buf, &offRows->rows);
+ buf = taosDecodeFixedI8(buf, &offRows->offset.type);
+ if (offRows->offset.type == TMQ_OFFSET__SNAPSHOT_DATA || offRows->offset.type == TMQ_OFFSET__SNAPSHOT_META) {
+ buf = taosDecodeFixedI64(buf, &offRows->offset.uid);
+ buf = taosDecodeFixedI64(buf, &offRows->offset.ts);
+ } else if (offRows->offset.type == TMQ_OFFSET__LOG) {
+ buf = taosDecodeFixedI64(buf, &offRows->offset.version);
+ } else {
+ // do nothing
+ }
+ }
+ }
+ buf = taosDecodeString(buf, &pSub->qmsg);
+ }
return (void *)buf;
}
-SMqSubActionLogEntry *tCloneSMqSubActionLogEntry(SMqSubActionLogEntry *pEntry) {
- SMqSubActionLogEntry *pEntryNew = taosMemoryMalloc(sizeof(SMqSubActionLogEntry));
- if (pEntryNew == NULL) return NULL;
- pEntryNew->epoch = pEntry->epoch;
- pEntryNew->consumers = taosArrayDup(pEntry->consumers, (__array_item_dup_fn_t)tCloneSMqConsumerEp);
- return pEntryNew;
-}
+//SMqSubActionLogEntry *tCloneSMqSubActionLogEntry(SMqSubActionLogEntry *pEntry) {
+// SMqSubActionLogEntry *pEntryNew = taosMemoryMalloc(sizeof(SMqSubActionLogEntry));
+// if (pEntryNew == NULL) return NULL;
+// pEntryNew->epoch = pEntry->epoch;
+// pEntryNew->consumers = taosArrayDup(pEntry->consumers, (__array_item_dup_fn_t)tCloneSMqConsumerEp);
+// return pEntryNew;
+//}
+//
+//void tDeleteSMqSubActionLogEntry(SMqSubActionLogEntry *pEntry) {
+// taosArrayDestroyEx(pEntry->consumers, (FDelete)tDeleteSMqConsumerEp);
+//}
-void tDeleteSMqSubActionLogEntry(SMqSubActionLogEntry *pEntry) {
- taosArrayDestroyEx(pEntry->consumers, (FDelete)tDeleteSMqConsumerEp);
-}
+//int32_t tEncodeSMqSubActionLogEntry(void **buf, const SMqSubActionLogEntry *pEntry) {
+// int32_t tlen = 0;
+// tlen += taosEncodeFixedI32(buf, pEntry->epoch);
+// tlen += taosEncodeArray(buf, pEntry->consumers, (FEncode)tEncodeSMqSubActionLogEntry);
+// return tlen;
+//}
+//
+//void *tDecodeSMqSubActionLogEntry(const void *buf, SMqSubActionLogEntry *pEntry) {
+// buf = taosDecodeFixedI32(buf, &pEntry->epoch);
+// buf = taosDecodeArray(buf, &pEntry->consumers, (FDecode)tDecodeSMqSubActionLogEntry, sizeof(SMqSubActionLogEntry));
+// return (void *)buf;
+//}
-int32_t tEncodeSMqSubActionLogEntry(void **buf, const SMqSubActionLogEntry *pEntry) {
- int32_t tlen = 0;
- tlen += taosEncodeFixedI32(buf, pEntry->epoch);
- tlen += taosEncodeArray(buf, pEntry->consumers, (FEncode)tEncodeSMqSubActionLogEntry);
- return tlen;
-}
+//SMqSubActionLogObj *tCloneSMqSubActionLogObj(SMqSubActionLogObj *pLog) {
+// SMqSubActionLogObj *pLogNew = taosMemoryMalloc(sizeof(SMqSubActionLogObj));
+// if (pLogNew == NULL) return pLogNew;
+// memcpy(pLogNew->key, pLog->key, TSDB_SUBSCRIBE_KEY_LEN);
+// pLogNew->logs = taosArrayDup(pLog->logs, (__array_item_dup_fn_t)tCloneSMqConsumerEp);
+// return pLogNew;
+//}
+//
+//void tDeleteSMqSubActionLogObj(SMqSubActionLogObj *pLog) {
+// taosArrayDestroyEx(pLog->logs, (FDelete)tDeleteSMqConsumerEp);
+//}
-void *tDecodeSMqSubActionLogEntry(const void *buf, SMqSubActionLogEntry *pEntry) {
- buf = taosDecodeFixedI32(buf, &pEntry->epoch);
- buf = taosDecodeArray(buf, &pEntry->consumers, (FDecode)tDecodeSMqSubActionLogEntry, sizeof(SMqSubActionLogEntry));
- return (void *)buf;
-}
-
-SMqSubActionLogObj *tCloneSMqSubActionLogObj(SMqSubActionLogObj *pLog) {
- SMqSubActionLogObj *pLogNew = taosMemoryMalloc(sizeof(SMqSubActionLogObj));
- if (pLogNew == NULL) return pLogNew;
- memcpy(pLogNew->key, pLog->key, TSDB_SUBSCRIBE_KEY_LEN);
- pLogNew->logs = taosArrayDup(pLog->logs, (__array_item_dup_fn_t)tCloneSMqConsumerEp);
- return pLogNew;
-}
-
-void tDeleteSMqSubActionLogObj(SMqSubActionLogObj *pLog) {
- taosArrayDestroyEx(pLog->logs, (FDelete)tDeleteSMqConsumerEp);
-}
-
-int32_t tEncodeSMqSubActionLogObj(void **buf, const SMqSubActionLogObj *pLog) {
- int32_t tlen = 0;
- tlen += taosEncodeString(buf, pLog->key);
- tlen += taosEncodeArray(buf, pLog->logs, (FEncode)tEncodeSMqSubActionLogEntry);
- return tlen;
-}
-
-void *tDecodeSMqSubActionLogObj(const void *buf, SMqSubActionLogObj *pLog) {
- buf = taosDecodeStringTo(buf, pLog->key);
- buf = taosDecodeArray(buf, &pLog->logs, (FDecode)tDecodeSMqSubActionLogEntry, sizeof(SMqSubActionLogEntry));
- return (void *)buf;
-}
-
-int32_t tEncodeSMqOffsetObj(void **buf, const SMqOffsetObj *pOffset) {
- int32_t tlen = 0;
- tlen += taosEncodeString(buf, pOffset->key);
- tlen += taosEncodeFixedI64(buf, pOffset->offset);
- return tlen;
-}
-
-void *tDecodeSMqOffsetObj(void *buf, SMqOffsetObj *pOffset) {
- buf = taosDecodeStringTo(buf, pOffset->key);
- buf = taosDecodeFixedI64(buf, &pOffset->offset);
- return buf;
-}
+//int32_t tEncodeSMqSubActionLogObj(void **buf, const SMqSubActionLogObj *pLog) {
+// int32_t tlen = 0;
+// tlen += taosEncodeString(buf, pLog->key);
+// tlen += taosEncodeArray(buf, pLog->logs, (FEncode)tEncodeSMqSubActionLogEntry);
+// return tlen;
+//}
+//
+//void *tDecodeSMqSubActionLogObj(const void *buf, SMqSubActionLogObj *pLog) {
+// buf = taosDecodeStringTo(buf, pLog->key);
+// buf = taosDecodeArray(buf, &pLog->logs, (FDecode)tDecodeSMqSubActionLogEntry, sizeof(SMqSubActionLogEntry));
+// return (void *)buf;
+//}
+//
+//int32_t tEncodeSMqOffsetObj(void **buf, const SMqOffsetObj *pOffset) {
+// int32_t tlen = 0;
+// tlen += taosEncodeString(buf, pOffset->key);
+// tlen += taosEncodeFixedI64(buf, pOffset->offset);
+// return tlen;
+//}
+//
+//void *tDecodeSMqOffsetObj(void *buf, SMqOffsetObj *pOffset) {
+// buf = taosDecodeStringTo(buf, pOffset->key);
+// buf = taosDecodeFixedI64(buf, &pOffset->offset);
+// return buf;
+//}
diff --git a/source/dnode/mnode/impl/src/mndDnode.c b/source/dnode/mnode/impl/src/mndDnode.c
index 73dbb243a1..bb92bfb4c7 100644
--- a/source/dnode/mnode/impl/src/mndDnode.c
+++ b/source/dnode/mnode/impl/src/mndDnode.c
@@ -632,7 +632,7 @@ static int32_t mndCreateDnode(SMnode *pMnode, SRpcMsg *pReq, SCreateDnodeReq *pC
pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_GLOBAL, pReq, "create-dnode");
if (pTrans == NULL) goto _OVER;
mInfo("trans:%d, used to create dnode:%s", pTrans->id, dnodeObj.ep);
- if (mndTrancCheckConflict(pMnode, pTrans) != 0) goto _OVER;
+ if (mndTransCheckConflict(pMnode, pTrans) != 0) goto _OVER;
pRaw = mndDnodeActionEncode(&dnodeObj);
if (pRaw == NULL || mndTransAppendCommitlog(pTrans, pRaw) != 0) goto _OVER;
@@ -889,7 +889,7 @@ static int32_t mndDropDnode(SMnode *pMnode, SRpcMsg *pReq, SDnodeObj *pDnode, SM
if (pTrans == NULL) goto _OVER;
mndTransSetSerial(pTrans);
mInfo("trans:%d, used to drop dnode:%d, force:%d", pTrans->id, pDnode->id, force);
- if (mndTrancCheckConflict(pMnode, pTrans) != 0) goto _OVER;
+ if (mndTransCheckConflict(pMnode, pTrans) != 0) goto _OVER;
pRaw = mndDnodeActionEncode(pDnode);
if (pRaw == NULL) goto _OVER;
diff --git a/source/dnode/mnode/impl/src/mndIndex.c b/source/dnode/mnode/impl/src/mndIndex.c
index 83172acf64..2d2637b8ce 100644
--- a/source/dnode/mnode/impl/src/mndIndex.c
+++ b/source/dnode/mnode/impl/src/mndIndex.c
@@ -645,7 +645,7 @@ int32_t mndAddIndexImpl(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SStbObj *pSt
// mInfo("trans:%d, used to add index to stb:%s", pTrans->id, pStb->name);
mndTransSetDbName(pTrans, pDb->name, pStb->name);
- if (mndTrancCheckConflict(pMnode, pTrans) != 0) goto _OVER;
+ if (mndTransCheckConflict(pMnode, pTrans) != 0) goto _OVER;
mndTransSetSerial(pTrans);
@@ -721,7 +721,7 @@ static int32_t mndDropIdx(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SIdxObj *p
mInfo("trans:%d, used to drop idx:%s", pTrans->id, pIdx->name);
mndTransSetDbName(pTrans, pDb->name, NULL);
- if (mndTrancCheckConflict(pMnode, pTrans) != 0) goto _OVER;
+ if (mndTransCheckConflict(pMnode, pTrans) != 0) goto _OVER;
mndTransSetSerial(pTrans);
if (mndSetDropIdxRedoLogs(pMnode, pTrans, pIdx) != 0) goto _OVER;
@@ -860,4 +860,4 @@ int32_t mndDropIdxsByDb(SMnode *pMnode, STrans *pTrans, SDbObj *pDb) {
}
return 0;
-}
\ No newline at end of file
+}
diff --git a/source/dnode/mnode/impl/src/mndMnode.c b/source/dnode/mnode/impl/src/mndMnode.c
index d0b10a5768..91fe1257d2 100644
--- a/source/dnode/mnode/impl/src/mndMnode.c
+++ b/source/dnode/mnode/impl/src/mndMnode.c
@@ -578,7 +578,7 @@ static int32_t mndCreateMnode(SMnode *pMnode, SRpcMsg *pReq, SDnodeObj *pDnode,
if (pTrans == NULL) goto _OVER;
mndTransSetSerial(pTrans);
mInfo("trans:%d, used to create mnode:%d", pTrans->id, pCreate->dnodeId);
- if (mndTrancCheckConflict(pMnode, pTrans) != 0) goto _OVER;
+ if (mndTransCheckConflict(pMnode, pTrans) != 0) goto _OVER;
SMnodeObj mnodeObj = {0};
mnodeObj.id = pDnode->id;
@@ -732,7 +732,7 @@ static int32_t mndDropMnode(SMnode *pMnode, SRpcMsg *pReq, SMnodeObj *pObj) {
if (pTrans == NULL) goto _OVER;
mndTransSetSerial(pTrans);
mInfo("trans:%d, used to drop mnode:%d", pTrans->id, pObj->id);
- if (mndTrancCheckConflict(pMnode, pTrans) != 0) goto _OVER;
+ if (mndTransCheckConflict(pMnode, pTrans) != 0) goto _OVER;
if (mndSetDropMnodeInfoToTrans(pMnode, pTrans, pObj, false) != 0) goto _OVER;
if (mndTransPrepare(pMnode, pTrans) != 0) goto _OVER;
diff --git a/source/dnode/mnode/impl/src/mndProfile.c b/source/dnode/mnode/impl/src/mndProfile.c
index a1d815189c..460e75b422 100644
--- a/source/dnode/mnode/impl/src/mndProfile.c
+++ b/source/dnode/mnode/impl/src/mndProfile.c
@@ -834,6 +834,9 @@ static int32_t mndRetrieveQueries(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *p
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)&pQuery->stableQuery, false);
+ pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
+ colDataSetVal(pColInfo, numOfRows, (const char *)&pQuery->isSubQuery, false);
+
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)&pQuery->subPlanNum, false);
diff --git a/source/dnode/mnode/impl/src/mndScheduler.c b/source/dnode/mnode/impl/src/mndScheduler.c
index 64082536da..9a611fe46a 100644
--- a/source/dnode/mnode/impl/src/mndScheduler.c
+++ b/source/dnode/mnode/impl/src/mndScheduler.c
@@ -570,25 +570,21 @@ int32_t mndSchedInitSubEp(SMnode* pMnode, const SMqTopicObj* pTopic, SMqSubscrib
mDebug("init subscription %s for topic:%s assign vgId:%d", pSub->key, pTopic->name, pVgEp->vgId);
- if (pSubplan) {
- int32_t msgLen;
-
- pSubplan->execNode.epSet = pVgEp->epSet;
- pSubplan->execNode.nodeId = pVgEp->vgId;
-
- if (qSubPlanToString(pSubplan, &pVgEp->qmsg, &msgLen) < 0) {
- sdbRelease(pSdb, pVgroup);
- qDestroyQueryPlan(pPlan);
- terrno = TSDB_CODE_QRY_INVALID_INPUT;
- return -1;
- }
- } else {
- pVgEp->qmsg = taosStrdup("");
- }
-
sdbRelease(pSdb, pVgroup);
}
+ if (pSubplan) {
+ int32_t msgLen;
+
+ if (qSubPlanToString(pSubplan, &pSub->qmsg, &msgLen) < 0) {
+ qDestroyQueryPlan(pPlan);
+ terrno = TSDB_CODE_QRY_INVALID_INPUT;
+ return -1;
+ }
+ } else {
+ pSub->qmsg = taosStrdup("");
+ }
+
qDestroyQueryPlan(pPlan);
return 0;
}
diff --git a/source/dnode/mnode/impl/src/mndSma.c b/source/dnode/mnode/impl/src/mndSma.c
index 42ad9e24d5..c337d85b68 100644
--- a/source/dnode/mnode/impl/src/mndSma.c
+++ b/source/dnode/mnode/impl/src/mndSma.c
@@ -388,7 +388,7 @@ static int32_t mndSetCreateSmaVgroupRedoLogs(SMnode *pMnode, STrans *pTrans, SVg
SSdbRaw *pVgRaw = mndVgroupActionEncode(pVgroup);
if (pVgRaw == NULL) return -1;
if (mndTransAppendRedolog(pTrans, pVgRaw) != 0) return -1;
- if (sdbSetRawStatus(pVgRaw, SDB_STATUS_CREATING) != 0) return -1;
+ if (sdbSetRawStatus(pVgRaw, SDB_STATUS_UPDATE) != 0) return -1;
return 0;
}
@@ -622,11 +622,11 @@ static int32_t mndCreateSma(SMnode *pMnode, SRpcMsg *pReq, SMCreateSmaReq *pCrea
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_DB, pReq, "create-sma");
if (pTrans == NULL) goto _OVER;
mndTransSetDbName(pTrans, pDb->name, NULL);
- if (mndTrancCheckConflict(pMnode, pTrans) != 0) goto _OVER;
+ if (mndTransCheckConflict(pMnode, pTrans) != 0) goto _OVER;
mndTransSetSerial(pTrans);
mInfo("trans:%d, used to create sma:%s stream:%s", pTrans->id, pCreate->name, streamObj.name);
-
+ if (mndAddPrepareNewVgAction(pMnode, pTrans, &streamObj.fixedSinkVg) != 0) goto _OVER;
if (mndSetCreateSmaRedoLogs(pMnode, pTrans, &smaObj) != 0) goto _OVER;
if (mndSetCreateSmaVgroupRedoLogs(pMnode, pTrans, &streamObj.fixedSinkVg) != 0) goto _OVER;
if (mndSetCreateSmaCommitLogs(pMnode, pTrans, &smaObj) != 0) goto _OVER;
@@ -845,7 +845,7 @@ static int32_t mndDropSma(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SSmaObj *p
mInfo("trans:%d, used to drop sma:%s", pTrans->id, pSma->name);
mndTransSetDbName(pTrans, pDb->name, NULL);
- if (mndTrancCheckConflict(pMnode, pTrans) != 0) goto _OVER;
+ if (mndTransCheckConflict(pMnode, pTrans) != 0) goto _OVER;
mndTransSetSerial(pTrans);
diff --git a/source/dnode/mnode/impl/src/mndStb.c b/source/dnode/mnode/impl/src/mndStb.c
index 04a1af1509..3c8b93192d 100644
--- a/source/dnode/mnode/impl/src/mndStb.c
+++ b/source/dnode/mnode/impl/src/mndStb.c
@@ -874,7 +874,7 @@ _OVER:
int32_t mndAddStbToTrans(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SStbObj *pStb) {
mndTransSetDbName(pTrans, pDb->name, pStb->name);
- if (mndTrancCheckConflict(pMnode, pTrans) != 0) return -1;
+ if (mndTransCheckConflict(pMnode, pTrans) != 0) return -1;
if (mndSetCreateStbRedoLogs(pMnode, pTrans, pDb, pStb) != 0) return -1;
if (mndSetCreateStbUndoLogs(pMnode, pTrans, pDb, pStb) != 0) return -1;
if (mndSetCreateStbCommitLogs(pMnode, pTrans, pDb, pStb) != 0) return -1;
@@ -1970,7 +1970,7 @@ static int32_t mndAlterStbImp(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SStbOb
mInfo("trans:%d, used to alter stb:%s", pTrans->id, pStb->name);
mndTransSetDbName(pTrans, pDb->name, pStb->name);
- if (mndTrancCheckConflict(pMnode, pTrans) != 0) goto _OVER;
+ if (mndTransCheckConflict(pMnode, pTrans) != 0) goto _OVER;
if (needRsp) {
void *pCont = NULL;
@@ -2000,7 +2000,7 @@ static int32_t mndAlterStbAndUpdateTagIdxImp(SMnode *pMnode, SRpcMsg *pReq, SDbO
mInfo("trans:%d, used to alter stb:%s", pTrans->id, pStb->name);
mndTransSetDbName(pTrans, pDb->name, pStb->name);
- if (mndTrancCheckConflict(pMnode, pTrans) != 0) goto _OVER;
+ if (mndTransCheckConflict(pMnode, pTrans) != 0) goto _OVER;
if (needRsp) {
void *pCont = NULL;
@@ -2244,7 +2244,7 @@ static int32_t mndDropStb(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SStbObj *p
mInfo("trans:%d, used to drop stb:%s", pTrans->id, pStb->name);
mndTransSetDbName(pTrans, pDb->name, pStb->name);
- if (mndTrancCheckConflict(pMnode, pTrans) != 0) goto _OVER;
+ if (mndTransCheckConflict(pMnode, pTrans) != 0) goto _OVER;
if (mndSetDropStbRedoLogs(pMnode, pTrans, pStb) != 0) goto _OVER;
if (mndSetDropStbCommitLogs(pMnode, pTrans, pStb) != 0) goto _OVER;
@@ -3300,7 +3300,7 @@ static int32_t mndCheckIndexReq(SCreateTagIndexReq *pReq) {
mInfo("trans:%d, used to add index to stb:%s", pTrans->id, pStb->name);
mndTransSetDbName(pTrans, pDb->name, pStb->name);
- if (mndTrancCheckConflict(pMnode, pTrans) != 0) goto _OVER;
+ if (mndTransCheckConflict(pMnode, pTrans) != 0) goto _OVER;
if (mndSetAlterStbRedoLogs(pMnode, pTrans, pDb, pStb) != 0) goto _OVER;
if (mndSetAlterStbCommitLogs(pMnode, pTrans, pDb, pStb) != 0) goto _OVER;
diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c
index bcd31c2906..7faaa42ffe 100644
--- a/source/dnode/mnode/impl/src/mndStream.c
+++ b/source/dnode/mnode/impl/src/mndStream.c
@@ -740,7 +740,7 @@ static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) {
mInfo("trans:%d, used to create stream:%s", pTrans->id, createStreamReq.name);
mndTransSetDbName(pTrans, createStreamReq.sourceDB, streamObj.targetDb);
- if (mndTrancCheckConflict(pMnode, pTrans) != 0) {
+ if (mndTransCheckConflict(pMnode, pTrans) != 0) {
mndTransDrop(pTrans);
goto _OVER;
}
@@ -1012,6 +1012,7 @@ static int32_t mndProcessStreamDoCheckpoint(SRpcMsg *pReq) {
mndTransSetDbName(pTrans, pStream->sourceDb, pStream->targetDb);
if (mndTrancCheckConflict(pMnode, pTrans) != 0) {
mError("failed to checkpoint since stream %s", tstrerror(TSDB_CODE_MND_TRANS_CONFLICT));
+
mndReleaseStream(pMnode, pStream);
mndTransDrop(pTrans);
return -1;
@@ -1142,7 +1143,7 @@ static int32_t mndProcessDropStreamReq(SRpcMsg *pReq) {
mInfo("trans:%d, used to drop stream:%s", pTrans->id, dropReq.name);
mndTransSetDbName(pTrans, pStream->sourceDb, pStream->targetDb);
- if (mndTrancCheckConflict(pMnode, pTrans) != 0) {
+ if (mndTransCheckConflict(pMnode, pTrans) != 0) {
sdbRelease(pMnode->pSdb, pStream);
mndTransDrop(pTrans);
return -1;
@@ -1510,7 +1511,7 @@ static int32_t mndProcessPauseStreamReq(SRpcMsg *pReq) {
mInfo("trans:%d, used to pause stream:%s", pTrans->id, pauseReq.name);
mndTransSetDbName(pTrans, pStream->sourceDb, pStream->targetDb);
- if (mndTrancCheckConflict(pMnode, pTrans) != 0) {
+ if (mndTransCheckConflict(pMnode, pTrans) != 0) {
sdbRelease(pMnode->pSdb, pStream);
mndTransDrop(pTrans);
return -1;
@@ -1617,7 +1618,7 @@ static int32_t mndProcessResumeStreamReq(SRpcMsg *pReq) {
mInfo("trans:%d, used to pause stream:%s", pTrans->id, pauseReq.name);
mndTransSetDbName(pTrans, pStream->sourceDb, pStream->targetDb);
- if (mndTrancCheckConflict(pMnode, pTrans) != 0) {
+ if (mndTransCheckConflict(pMnode, pTrans) != 0) {
sdbRelease(pMnode->pSdb, pStream);
mndTransDrop(pTrans);
return -1;
diff --git a/source/dnode/mnode/impl/src/mndSubscribe.c b/source/dnode/mnode/impl/src/mndSubscribe.c
index 74421afa33..61691a30d5 100644
--- a/source/dnode/mnode/impl/src/mndSubscribe.c
+++ b/source/dnode/mnode/impl/src/mndSubscribe.c
@@ -24,7 +24,7 @@
#include "tcompare.h"
#include "tname.h"
-#define MND_SUBSCRIBE_VER_NUMBER 1
+#define MND_SUBSCRIBE_VER_NUMBER 2
#define MND_SUBSCRIBE_RESERVE_SIZE 64
#define MND_SUBSCRIBE_REBALANCE_CNT 3
@@ -99,13 +99,23 @@ static SMqSubscribeObj *mndCreateSubscription(SMnode *pMnode, const SMqTopicObj
return pSub;
}
-static int32_t mndBuildSubChangeReq(void **pBuf, int32_t *pLen, const SMqSubscribeObj *pSub,
- const SMqRebOutputVg *pRebVg) {
+static int32_t mndBuildSubChangeReq(void **pBuf, int32_t *pLen, SMqSubscribeObj *pSub,
+ const SMqRebOutputVg *pRebVg, SSubplan* pPlan) {
SMqRebVgReq req = {0};
req.oldConsumerId = pRebVg->oldConsumerId;
req.newConsumerId = pRebVg->newConsumerId;
req.vgId = pRebVg->pVgEp->vgId;
- req.qmsg = pRebVg->pVgEp->qmsg;
+ if(pPlan){
+ pPlan->execNode.epSet = pRebVg->pVgEp->epSet;
+ pPlan->execNode.nodeId = pRebVg->pVgEp->vgId;
+ int32_t msgLen;
+ if (qSubPlanToString(pPlan, &req.qmsg, &msgLen) < 0) {
+ terrno = TSDB_CODE_QRY_INVALID_INPUT;
+ return -1;
+ }
+ }else{
+ req.qmsg = taosStrdup("");
+ }
req.subType = pSub->subType;
req.withMeta = pSub->withMeta;
req.suid = pSub->stbUid;
@@ -115,6 +125,7 @@ static int32_t mndBuildSubChangeReq(void **pBuf, int32_t *pLen, const SMqSubscri
int32_t ret = 0;
tEncodeSize(tEncodeSMqRebVgReq, &req, tlen, ret);
if (ret < 0) {
+ taosMemoryFree(req.qmsg);
return -1;
}
@@ -122,6 +133,7 @@ static int32_t mndBuildSubChangeReq(void **pBuf, int32_t *pLen, const SMqSubscri
void *buf = taosMemoryMalloc(tlen);
if (buf == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
+ taosMemoryFree(req.qmsg);
return -1;
}
@@ -135,17 +147,19 @@ static int32_t mndBuildSubChangeReq(void **pBuf, int32_t *pLen, const SMqSubscri
if (tEncodeSMqRebVgReq(&encoder, &req) < 0) {
taosMemoryFreeClear(buf);
tEncoderClear(&encoder);
+ taosMemoryFree(req.qmsg);
return -1;
}
tEncoderClear(&encoder);
*pBuf = buf;
*pLen = tlen;
+ taosMemoryFree(req.qmsg);
return 0;
}
-static int32_t mndPersistSubChangeVgReq(SMnode *pMnode, STrans *pTrans, const SMqSubscribeObj *pSub,
- const SMqRebOutputVg *pRebVg) {
+static int32_t mndPersistSubChangeVgReq(SMnode *pMnode, STrans *pTrans, SMqSubscribeObj *pSub,
+ const SMqRebOutputVg *pRebVg, SSubplan* pPlan) {
// if (pRebVg->oldConsumerId == pRebVg->newConsumerId) {
// terrno = TSDB_CODE_MND_INVALID_SUB_OPTION;
// return -1;
@@ -153,7 +167,7 @@ static int32_t mndPersistSubChangeVgReq(SMnode *pMnode, STrans *pTrans, const SM
void *buf;
int32_t tlen;
- if (mndBuildSubChangeReq(&buf, &tlen, pSub, pRebVg) < 0) {
+ if (mndBuildSubChangeReq(&buf, &tlen, pSub, pRebVg, pPlan) < 0) {
return -1;
}
@@ -255,7 +269,7 @@ static void doAddNewConsumers(SMqRebOutputObj *pOutput, const SMqRebInputObj *pI
for (int32_t i = 0; i < numOfNewConsumers; i++) {
int64_t consumerId = *(int64_t *)taosArrayGet(pInput->pRebInfo->newConsumers, i);
- SMqConsumerEp newConsumerEp;
+ SMqConsumerEp newConsumerEp = {0};
newConsumerEp.consumerId = consumerId;
newConsumerEp.vgs = taosArrayInit(0, sizeof(void *));
@@ -449,8 +463,44 @@ static int32_t mndDoRebalance(SMnode *pMnode, const SMqRebInputObj *pInput, SMqR
SMqRebOutputVg* pRebOutput = (SMqRebOutputVg *)pRemovedIter;
taosArrayPush(pOutput->rebVgs, pRebOutput);
- if(taosHashGetSize(pOutput->pSub->consumerHash) == 0){ // if all consumer is removed, put all vg into unassigned
- taosArrayPush(pOutput->pSub->unassignedVgs, &pRebOutput->pVgEp);
+ if(taosHashGetSize(pOutput->pSub->consumerHash) == 0){ // if all consumer is removed
+ taosArrayPush(pOutput->pSub->unassignedVgs, &pRebOutput->pVgEp); // put all vg into unassigned
+ }
+ }
+
+ if(taosHashGetSize(pOutput->pSub->consumerHash) == 0) { // if all consumer is removed
+ SMqSubscribeObj *pSub = mndAcquireSubscribeByKey(pMnode, pInput->pRebInfo->key); // put all offset rows
+ if (pSub) {
+ taosRLockLatch(&pSub->lock);
+ bool init = false;
+ if (pOutput->pSub->offsetRows == NULL) {
+ pOutput->pSub->offsetRows = taosArrayInit(4, sizeof(OffsetRows));
+ init = true;
+ }
+ pIter = NULL;
+ while (1) {
+ pIter = taosHashIterate(pSub->consumerHash, pIter);
+ if (pIter == NULL) break;
+ SMqConsumerEp *pConsumerEp = (SMqConsumerEp *)pIter;
+ if (init) {
+ taosArrayAddAll(pOutput->pSub->offsetRows, pConsumerEp->offsetRows);
+// mDebug("pSub->offsetRows is init");
+ } else {
+ for (int j = 0; j < taosArrayGetSize(pConsumerEp->offsetRows); j++) {
+ OffsetRows *d1 = taosArrayGet(pConsumerEp->offsetRows, j);
+ for (int i = 0; i < taosArrayGetSize(pOutput->pSub->offsetRows); i++) {
+ OffsetRows *d2 = taosArrayGet(pOutput->pSub->offsetRows, i);
+ if (d1->vgId == d2->vgId) {
+ d2->rows += d1->rows;
+ d2->offset = d1->offset;
+// mDebug("pSub->offsetRows add vgId:%d, after:%"PRId64", before:%"PRId64, d2->vgId, d2->rows, d1->rows);
+ }
+ }
+ }
+ }
+ }
+ taosRUnLockLatch(&pSub->lock);
+ mndReleaseSubscribe(pMnode, pSub);
}
}
@@ -483,14 +533,25 @@ static int32_t mndDoRebalance(SMnode *pMnode, const SMqRebInputObj *pInput, SMqR
}
static int32_t mndPersistRebResult(SMnode *pMnode, SRpcMsg *pMsg, const SMqRebOutputObj *pOutput) {
+ struct SSubplan* pPlan = NULL;
+ if(strcmp(pOutput->pSub->qmsg, "") != 0){
+ int32_t code = qStringToSubplan(pOutput->pSub->qmsg, &pPlan);
+ if (code != TSDB_CODE_SUCCESS) {
+ terrno = code;
+ return -1;
+ }
+ }
+
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_DB_INSIDE, pMsg, "tmq-reb");
if (pTrans == NULL) {
+ nodesDestroyNode((SNode*)pPlan);
return -1;
}
mndTransSetDbName(pTrans, pOutput->pSub->dbName, NULL);
- if (mndTrancCheckConflict(pMnode, pTrans) != 0) {
+ if (mndTransCheckConflict(pMnode, pTrans) != 0) {
mndTransDrop(pTrans);
+ nodesDestroyNode((SNode*)pPlan);
return -1;
}
@@ -500,11 +561,13 @@ static int32_t mndPersistRebResult(SMnode *pMnode, SRpcMsg *pMsg, const SMqRebOu
int32_t vgNum = taosArrayGetSize(rebVgs);
for (int32_t i = 0; i < vgNum; i++) {
SMqRebOutputVg *pRebVg = taosArrayGet(rebVgs, i);
- if (mndPersistSubChangeVgReq(pMnode, pTrans, pOutput->pSub, pRebVg) < 0) {
+ if (mndPersistSubChangeVgReq(pMnode, pTrans, pOutput->pSub, pRebVg, pPlan) < 0) {
mndTransDrop(pTrans);
+ nodesDestroyNode((SNode*)pPlan);
return -1;
}
}
+ nodesDestroyNode((SNode*)pPlan);
// 2. redo log: subscribe and vg assignment
// subscribe
@@ -809,7 +872,7 @@ static SSdbRow *mndSubActionDecode(SSdbRaw *pRaw) {
int8_t sver = 0;
if (sdbGetRawSoftVer(pRaw, &sver) != 0) goto SUB_DECODE_OVER;
- if (sver != MND_SUBSCRIBE_VER_NUMBER) {
+ if (sver > MND_SUBSCRIBE_VER_NUMBER || sver < 1) {
terrno = TSDB_CODE_SDB_INVALID_DATA_VER;
goto SUB_DECODE_OVER;
}
@@ -828,7 +891,7 @@ static SSdbRow *mndSubActionDecode(SSdbRaw *pRaw) {
SDB_GET_BINARY(pRaw, dataPos, buf, tlen, SUB_DECODE_OVER);
SDB_GET_RESERVE(pRaw, dataPos, MND_SUBSCRIBE_RESERVE_SIZE, SUB_DECODE_OVER);
- if (tDecodeSubscribeObj(buf, pSub) == NULL) {
+ if (tDecodeSubscribeObj(buf, pSub, sver) == NULL) {
goto SUB_DECODE_OVER;
}
@@ -890,6 +953,10 @@ static int32_t mndSubActionUpdate(SSdb *pSdb, SMqSubscribeObj *pOldSub, SMqSubsc
pOldSub->unassignedVgs = pNewSub->unassignedVgs;
pNewSub->unassignedVgs = tmp1;
+ SArray *tmp2 = pOldSub->offsetRows;
+ pOldSub->offsetRows = pNewSub->offsetRows;
+ pNewSub->offsetRows = tmp2;
+
taosWUnLockLatch(&pOldSub->lock);
return 0;
}
@@ -1028,6 +1095,61 @@ END:
return code;
}
+static int32_t buildResult(SSDataBlock *pBlock, int32_t* numOfRows, int64_t consumerId, const char* topic, const char* cgroup, SArray* vgs, SArray *offsetRows){
+ int32_t sz = taosArrayGetSize(vgs);
+ for (int32_t j = 0; j < sz; j++) {
+ SMqVgEp *pVgEp = taosArrayGetP(vgs, j);
+
+ SColumnInfoData *pColInfo;
+ int32_t cols = 0;
+
+ pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
+ colDataSetVal(pColInfo, *numOfRows, (const char *)topic, false);
+
+ pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
+ colDataSetVal(pColInfo, *numOfRows, (const char *)cgroup, false);
+
+ // vg id
+ pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
+ colDataSetVal(pColInfo, *numOfRows, (const char *)&pVgEp->vgId, false);
+
+ // consumer id
+ pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
+ colDataSetVal(pColInfo, *numOfRows, (const char *)&consumerId, consumerId == -1);
+
+ mDebug("mnd show subscriptions: topic %s, consumer:0x%" PRIx64 " cgroup %s vgid %d", varDataVal(topic),
+ consumerId, varDataVal(cgroup), pVgEp->vgId);
+
+ // offset
+ OffsetRows *data = NULL;
+ for(int i = 0; i < taosArrayGetSize(offsetRows); i++){
+ OffsetRows *tmp = taosArrayGet(offsetRows, i);
+ if(tmp->vgId != pVgEp->vgId){
+ continue;
+ }
+ data = tmp;
+ }
+ if(data){
+ // vg id
+ char buf[TSDB_OFFSET_LEN + VARSTR_HEADER_SIZE] = {0};
+ tFormatOffset(varDataVal(buf), TSDB_OFFSET_LEN, &data->offset);
+ varDataSetLen(buf, strlen(varDataVal(buf)));
+ pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
+ colDataSetVal(pColInfo, *numOfRows, (const char *)buf, false);
+ pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
+ colDataSetVal(pColInfo, *numOfRows, (const char *)&data->rows, false);
+ }else{
+ pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
+ colDataSetNULL(pColInfo, *numOfRows);
+ pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
+ colDataSetNULL(pColInfo, *numOfRows);
+ mError("mnd show subscriptions: do not find vgId:%d in offsetRows", pVgEp->vgId);
+ }
+ (*numOfRows)++;
+ }
+ return 0;
+}
+
int32_t mndRetrieveSubscribe(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, int32_t rowsCapacity) {
SMnode *pMnode = pReq->info.node;
SSdb *pSdb = pMnode->pSdb;
@@ -1048,6 +1170,13 @@ int32_t mndRetrieveSubscribe(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock
blockDataEnsureCapacity(pBlock, numOfRows + pSub->vgNum);
}
+ // topic and cgroup
+ char topic[TSDB_TOPIC_FNAME_LEN + VARSTR_HEADER_SIZE] = {0};
+ char cgroup[TSDB_CGROUP_LEN + VARSTR_HEADER_SIZE] = {0};
+ mndSplitSubscribeKey(pSub->key, varDataVal(topic), varDataVal(cgroup), false);
+ varDataSetLen(topic, strlen(varDataVal(topic)));
+ varDataSetLen(cgroup, strlen(varDataVal(cgroup)));
+
SMqConsumerEp *pConsumerEp = NULL;
void *pIter = NULL;
while (1) {
@@ -1055,97 +1184,11 @@ int32_t mndRetrieveSubscribe(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock
if (pIter == NULL) break;
pConsumerEp = (SMqConsumerEp *)pIter;
- int32_t sz = taosArrayGetSize(pConsumerEp->vgs);
- for (int32_t j = 0; j < sz; j++) {
- SMqVgEp *pVgEp = taosArrayGetP(pConsumerEp->vgs, j);
-
- SColumnInfoData *pColInfo;
- int32_t cols = 0;
-
- // topic and cgroup
- char topic[TSDB_TOPIC_FNAME_LEN + VARSTR_HEADER_SIZE] = {0};
- char cgroup[TSDB_CGROUP_LEN + VARSTR_HEADER_SIZE] = {0};
- mndSplitSubscribeKey(pSub->key, varDataVal(topic), varDataVal(cgroup), false);
- varDataSetLen(topic, strlen(varDataVal(topic)));
- varDataSetLen(cgroup, strlen(varDataVal(cgroup)));
-
- pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
- colDataSetVal(pColInfo, numOfRows, (const char *)topic, false);
-
- pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
- colDataSetVal(pColInfo, numOfRows, (const char *)cgroup, false);
-
- // vg id
- pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
- colDataSetVal(pColInfo, numOfRows, (const char *)&pVgEp->vgId, false);
-
- // consumer id
- pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
- colDataSetVal(pColInfo, numOfRows, (const char *)&pConsumerEp->consumerId, false);
-
- mDebug("mnd show subscriptions: topic %s, consumer:0x%" PRIx64 " cgroup %s vgid %d", varDataVal(topic),
- pConsumerEp->consumerId, varDataVal(cgroup), pVgEp->vgId);
-
- // offset
-#if 0
- // subscribe time
- pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
- colDataSetVal(pColInfo, numOfRows, (const char *)&pSub->subscribeTime, false);
-
- // rebalance time
- pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
- colDataSetVal(pColInfo, numOfRows, (const char *)&pSub->rebalanceTime, pConsumer->rebalanceTime == 0);
-#endif
-
- numOfRows++;
- }
+ buildResult(pBlock, &numOfRows, pConsumerEp->consumerId, topic, cgroup, pConsumerEp->vgs, pConsumerEp->offsetRows);
}
// do not show for cleared subscription
- int32_t sz = taosArrayGetSize(pSub->unassignedVgs);
- for (int32_t i = 0; i < sz; i++) {
- SMqVgEp *pVgEp = taosArrayGetP(pSub->unassignedVgs, i);
-
- SColumnInfoData *pColInfo;
- int32_t cols = 0;
-
- // topic and cgroup
- char topic[TSDB_TOPIC_FNAME_LEN + VARSTR_HEADER_SIZE] = {0};
- char cgroup[TSDB_CGROUP_LEN + VARSTR_HEADER_SIZE] = {0};
- mndSplitSubscribeKey(pSub->key, varDataVal(topic), varDataVal(cgroup), false);
- varDataSetLen(topic, strlen(varDataVal(topic)));
- varDataSetLen(cgroup, strlen(varDataVal(cgroup)));
-
- pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
- colDataSetVal(pColInfo, numOfRows, (const char *)topic, false);
-
- pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
- colDataSetVal(pColInfo, numOfRows, (const char *)cgroup, false);
-
- // vg id
- pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
- colDataSetVal(pColInfo, numOfRows, (const char *)&pVgEp->vgId, false);
-
- // consumer id
- pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
- colDataSetVal(pColInfo, numOfRows, NULL, true);
-
- mDebug("mnd show subscriptions(unassigned): topic %s, cgroup %s vgid %d", varDataVal(topic), varDataVal(cgroup),
- pVgEp->vgId);
-
- // offset
-#if 0
- // subscribe time
- pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
- colDataSetVal(pColInfo, numOfRows, (const char *)&pSub->subscribeTime, false);
-
- // rebalance time
- pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
- colDataSetVal(pColInfo, numOfRows, (const char *)&pSub->rebalanceTime, pConsumer->rebalanceTime == 0);
-#endif
-
- numOfRows++;
- }
+ buildResult(pBlock, &numOfRows, -1, topic, cgroup, pSub->unassignedVgs, pSub->offsetRows);
pBlock->info.rows = numOfRows;
diff --git a/source/dnode/mnode/impl/src/mndSync.c b/source/dnode/mnode/impl/src/mndSync.c
index 0a6df02f5f..68bfe09b5e 100644
--- a/source/dnode/mnode/impl/src/mndSync.c
+++ b/source/dnode/mnode/impl/src/mndSync.c
@@ -17,6 +17,7 @@
#include "mndSync.h"
#include "mndCluster.h"
#include "mndTrans.h"
+#include "mndVgroup.h"
static int32_t mndSyncEqCtrlMsg(const SMsgCb *msgcb, SRpcMsg *pMsg) {
if (pMsg == NULL || pMsg->pCont == NULL) {
@@ -73,76 +74,200 @@ static int32_t mndSyncSendMsg(const SEpSet *pEpSet, SRpcMsg *pMsg) {
return code;
}
-int32_t mndProcessWriteMsg(const SSyncFSM *pFsm, SRpcMsg *pMsg, const SFsmCbMeta *pMeta) {
- SMnode *pMnode = pFsm->data;
+static int32_t mndTransValidatePrepareAction(SMnode *pMnode, STrans *pTrans, STransAction *pAction) {
+ SSdbRow *pRow = NULL;
+ int32_t code = -1;
+
+ if (pAction->msgType == TDMT_MND_CREATE_VG) {
+ pRow = mndVgroupActionDecode(pAction->pRaw);
+ if (pRow == NULL) goto _OUT;
+
+ SVgObj *pVgroup = sdbGetRowObj(pRow);
+ if (pVgroup == NULL) goto _OUT;
+
+ int32_t maxVgId = sdbGetMaxId(pMnode->pSdb, SDB_VGROUP);
+ if (maxVgId > pVgroup->vgId) {
+ mError("trans:%d, failed to satisfy vgroup id %d of prepare action. maxVgId:%d", pTrans->id, pVgroup->vgId,
+ maxVgId);
+ goto _OUT;
+ }
+ }
+
+ code = 0;
+_OUT:
+ taosMemoryFreeClear(pRow);
+ return code;
+}
+
+static int32_t mndTransValidatePrepareStage(SMnode *pMnode, STrans *pTrans) {
+ int32_t code = -1;
+ int32_t action = 0;
+
+ int32_t numOfActions = taosArrayGetSize(pTrans->prepareActions);
+ if (numOfActions == 0) {
+ code = 0;
+ goto _OUT;
+ }
+
+ mInfo("trans:%d, validate %d prepare actions.", pTrans->id, numOfActions);
+
+ for (action = 0; action < numOfActions; ++action) {
+ STransAction *pAction = taosArrayGet(pTrans->prepareActions, action);
+
+ if (pAction->actionType != TRANS_ACTION_RAW) {
+ mError("trans:%d, prepare action:%d of unexpected type:%d", pTrans->id, action, pAction->actionType);
+ goto _OUT;
+ }
+
+ code = mndTransValidatePrepareAction(pMnode, pTrans, pAction);
+ if (code != 0) {
+ mError("trans:%d, failed to validate prepare action: %d, numOfActions:%d", pTrans->id, action, numOfActions);
+ goto _OUT;
+ }
+ }
+
+ code = 0;
+_OUT:
+ return code;
+}
+
+static int32_t mndTransValidateImp(SMnode *pMnode, STrans *pTrans) {
+ if (pTrans->stage == TRN_STAGE_PREPARE) {
+ if (mndTransCheckConflict(pMnode, pTrans) < 0) {
+ mError("trans:%d, failed to validate trans conflicts.", pTrans->id);
+ return -1;
+ }
+
+ return mndTransValidatePrepareStage(pMnode, pTrans);
+ }
+ return 0;
+}
+
+static int32_t mndTransValidate(SMnode *pMnode, SSdbRaw *pRaw) {
+ STrans *pTrans = NULL;
+ int32_t code = -1;
+
+ SSdbRow *pRow = mndTransDecode(pRaw);
+ if (pRow == NULL) goto _OUT;
+
+ pTrans = sdbGetRowObj(pRow);
+ if (pTrans == NULL) goto _OUT;
+
+ code = mndTransValidateImp(pMnode, pTrans);
+
+_OUT:
+ if (pTrans) mndTransDropData(pTrans);
+ if (pRow) taosMemoryFreeClear(pRow);
+ if (code) terrno = (terrno ? terrno : TSDB_CODE_MND_TRANS_CONFLICT);
+ return code;
+}
+
+int32_t mndProcessWriteMsg(SMnode *pMnode, SRpcMsg *pMsg, SFsmCbMeta *pMeta) {
+ terrno = TSDB_CODE_SUCCESS;
SSyncMgmt *pMgmt = &pMnode->syncMgmt;
SSdbRaw *pRaw = pMsg->pCont;
-
+ STrans *pTrans = NULL;
+ int32_t code = -1;
int32_t transId = sdbGetIdFromRaw(pMnode->pSdb, pRaw);
+
+ if (transId <= 0) {
+ mError("trans:%d, invalid commit msg, cache transId:%d seq:%" PRId64, transId, pMgmt->transId, pMgmt->transSeq);
+ terrno = TSDB_CODE_INVALID_MSG;
+ goto _OUT;
+ }
+
mInfo("trans:%d, is proposed, saved:%d code:0x%x, apply index:%" PRId64 " term:%" PRIu64 " config:%" PRId64
" role:%s raw:%p sec:%d seq:%" PRId64,
transId, pMgmt->transId, pMeta->code, pMeta->index, pMeta->term, pMeta->lastConfigIndex, syncStr(pMeta->state),
pRaw, pMgmt->transSec, pMgmt->transSeq);
- if (pMeta->code == 0) {
- int32_t code = sdbWriteWithoutFree(pMnode->pSdb, pRaw);
- if (code != 0) {
- mError("trans:%d, failed to write to sdb since %s", transId, terrstr());
- return 0;
- }
- sdbSetApplyInfo(pMnode->pSdb, pMeta->index, pMeta->term, pMeta->lastConfigIndex);
+ code = mndTransValidate(pMnode, pRaw);
+ if (code != 0) {
+ mError("trans:%d, failed to validate requested trans since %s", transId, terrstr());
+ code = 0;
+ pMeta->code = terrno;
+ goto _OUT;
}
- taosThreadMutexLock(&pMgmt->lock);
- pMgmt->errCode = pMeta->code;
-
- if (transId <= 0) {
- taosThreadMutexUnlock(&pMgmt->lock);
- mError("trans:%d, invalid commit msg, cache transId:%d seq:%" PRId64, transId, pMgmt->transId, pMgmt->transSeq);
- } else if (transId == pMgmt->transId) {
- if (pMgmt->errCode != 0) {
- mError("trans:%d, failed to propose since %s, post sem", transId, tstrerror(pMgmt->errCode));
- } else {
- mInfo("trans:%d, is proposed and post sem, seq:%" PRId64, transId, pMgmt->transSeq);
- }
- pMgmt->transId = 0;
- pMgmt->transSec = 0;
- pMgmt->transSeq = 0;
- tsem_post(&pMgmt->syncSem);
- taosThreadMutexUnlock(&pMgmt->lock);
- } else {
- taosThreadMutexUnlock(&pMgmt->lock);
- STrans *pTrans = mndAcquireTrans(pMnode, transId);
- if (pTrans != NULL) {
- mInfo("trans:%d, execute in mnode which not leader or sync timeout, createTime:%" PRId64 " saved trans:%d",
- transId, pTrans->createdTime, pMgmt->transId);
- mndTransExecute(pMnode, pTrans, false);
- mndReleaseTrans(pMnode, pTrans);
- } else {
- mError("trans:%d, not found while execute in mnode since %s", transId, terrstr());
- }
+ code = sdbWriteWithoutFree(pMnode->pSdb, pRaw);
+ if (code != 0) {
+ mError("trans:%d, failed to write to sdb since %s", transId, terrstr());
+ code = 0;
+ pMeta->code = terrno;
+ goto _OUT;
}
+ pTrans = mndAcquireTrans(pMnode, transId);
+ if (pTrans == NULL) {
+ mError("trans:%d, not found while execute in mnode since %s", transId, terrstr());
+ goto _OUT;
+ }
+
+ if (pTrans->stage == TRN_STAGE_PREPARE) {
+ bool continueExec = mndTransPerformPrepareStage(pMnode, pTrans);
+ if (!continueExec) goto _OUT;
+ }
+
+ if (pTrans->id != pMgmt->transId) {
+ mInfo("trans:%d, execute in mnode which not leader or sync timeout, createTime:%" PRId64 " saved trans:%d",
+ pTrans->id, pTrans->createdTime, pMgmt->transId);
+ mndTransRefresh(pMnode, pTrans);
+ }
+
+ sdbSetApplyInfo(pMnode->pSdb, pMeta->index, pMeta->term, pMeta->lastConfigIndex);
sdbWriteFile(pMnode->pSdb, tsMndSdbWriteDelta);
+ code = 0;
+
+_OUT:
+ if (pTrans) mndReleaseTrans(pMnode, pTrans);
+ return code;
+}
+
+static int32_t mndPostMgmtCode(SMnode *pMnode, int32_t code) {
+ SSyncMgmt *pMgmt = &pMnode->syncMgmt;
+ taosThreadMutexLock(&pMgmt->lock);
+ if (pMgmt->transId == 0) {
+ goto _OUT;
+ }
+
+ pMgmt->transId = 0;
+ pMgmt->transSec = 0;
+ pMgmt->transSeq = 0;
+ pMgmt->errCode = code;
+ tsem_post(&pMgmt->syncSem);
+
+ if (pMgmt->errCode != 0) {
+ mError("trans:%d, failed to propose since %s, post sem", pMgmt->transId, tstrerror(pMgmt->errCode));
+ } else {
+ mInfo("trans:%d, is proposed and post sem, seq:%" PRId64, pMgmt->transId, pMgmt->transSeq);
+ }
+
+_OUT:
+ taosThreadMutexUnlock(&pMgmt->lock);
return 0;
}
-int32_t mndSyncCommitMsg(const SSyncFSM *pFsm, SRpcMsg *pMsg, const SFsmCbMeta *pMeta) {
- int32_t code = 0;
+int32_t mndSyncCommitMsg(const SSyncFSM *pFsm, SRpcMsg *pMsg, SFsmCbMeta *pMeta) {
+ SMnode *pMnode = pFsm->data;
+ int32_t code = pMsg->code;
+ if (code != 0) {
+ goto _OUT;
+ }
+
pMsg->info.conn.applyIndex = pMeta->index;
pMsg->info.conn.applyTerm = pMeta->term;
+ pMeta->code = 0;
- if (pMsg->code == 0) {
- SMnode *pMnode = pFsm->data;
- atomic_store_64(&pMnode->applied, pMsg->info.conn.applyIndex);
- }
+ atomic_store_64(&pMnode->applied, pMsg->info.conn.applyIndex);
if (!syncUtilUserCommit(pMsg->msgType)) {
- goto _out;
+ goto _OUT;
}
- code = mndProcessWriteMsg(pFsm, pMsg, pMeta);
-_out:
+ code = mndProcessWriteMsg(pMnode, pMsg, pMeta);
+
+_OUT:
+ mndPostMgmtCode(pMnode, code ? code : pMeta->code);
rpcFreeCont(pMsg->pCont);
pMsg->pCont = NULL;
return code;
diff --git a/source/dnode/mnode/impl/src/mndTopic.c b/source/dnode/mnode/impl/src/mndTopic.c
index f1ee7bca3b..91bcbf5f4e 100644
--- a/source/dnode/mnode/impl/src/mndTopic.c
+++ b/source/dnode/mnode/impl/src/mndTopic.c
@@ -753,7 +753,7 @@ static int32_t mndProcessDropTopicReq(SRpcMsg *pReq) {
}
mndTransSetDbName(pTrans, pTopic->db, NULL);
- if (mndTrancCheckConflict(pMnode, pTrans) != 0) {
+ if (mndTransCheckConflict(pMnode, pTrans) != 0) {
mndReleaseTopic(pMnode, pTopic);
mndTransDrop(pTrans);
return -1;
diff --git a/source/dnode/mnode/impl/src/mndTrans.c b/source/dnode/mnode/impl/src/mndTrans.c
index cfb5bef9d0..7ebaf6dda5 100644
--- a/source/dnode/mnode/impl/src/mndTrans.c
+++ b/source/dnode/mnode/impl/src/mndTrans.c
@@ -23,28 +23,25 @@
#include "mndSync.h"
#include "mndUser.h"
-#define TRANS_VER_NUMBER 1
+#define TRANS_VER1_NUMBER 1
+#define TRANS_VER2_NUMBER 2
#define TRANS_ARRAY_SIZE 8
#define TRANS_RESERVE_SIZE 48
-static SSdbRaw *mndTransActionEncode(STrans *pTrans);
-static SSdbRow *mndTransActionDecode(SSdbRaw *pRaw);
static int32_t mndTransActionInsert(SSdb *pSdb, STrans *pTrans);
static int32_t mndTransActionUpdate(SSdb *pSdb, STrans *OldTrans, STrans *pOld);
-static int32_t mndTransActionDelete(SSdb *pSdb, STrans *pTrans, bool callFunc);
+static int32_t mndTransDelete(SSdb *pSdb, STrans *pTrans, bool callFunc);
static int32_t mndTransAppendLog(SArray *pArray, SSdbRaw *pRaw);
static int32_t mndTransAppendAction(SArray *pArray, STransAction *pAction);
static void mndTransDropLogs(SArray *pArray);
static void mndTransDropActions(SArray *pArray);
-static void mndTransDropData(STrans *pTrans);
static int32_t mndTransExecuteActions(SMnode *pMnode, STrans *pTrans, SArray *pArray);
static int32_t mndTransExecuteRedoLogs(SMnode *pMnode, STrans *pTrans);
static int32_t mndTransExecuteUndoLogs(SMnode *pMnode, STrans *pTrans);
static int32_t mndTransExecuteRedoActions(SMnode *pMnode, STrans *pTrans);
static int32_t mndTransExecuteUndoActions(SMnode *pMnode, STrans *pTrans);
static int32_t mndTransExecuteCommitActions(SMnode *pMnode, STrans *pTrans);
-static bool mndTransPerformPrepareStage(SMnode *pMnode, STrans *pTrans);
static bool mndTransPerformRedoLogStage(SMnode *pMnode, STrans *pTrans);
static bool mndTransPerformRedoActionStage(SMnode *pMnode, STrans *pTrans);
static bool mndTransPerformUndoLogStage(SMnode *pMnode, STrans *pTrans);
@@ -52,7 +49,7 @@ static bool mndTransPerformUndoActionStage(SMnode *pMnode, STrans *pTrans);
static bool mndTransPerformCommitActionStage(SMnode *pMnode, STrans *pTrans);
static bool mndTransPerformCommitStage(SMnode *pMnode, STrans *pTrans);
static bool mndTransPerformRollbackStage(SMnode *pMnode, STrans *pTrans);
-static bool mndTransPerfromFinishedStage(SMnode *pMnode, STrans *pTrans);
+static bool mndTransPerformFinishStage(SMnode *pMnode, STrans *pTrans);
static bool mndCannotExecuteTransAction(SMnode *pMnode) { return !pMnode->deploy && !mndIsLeader(pMnode); }
static void mndTransSendRpcRsp(SMnode *pMnode, STrans *pTrans);
@@ -67,11 +64,11 @@ int32_t mndInitTrans(SMnode *pMnode) {
SSdbTable table = {
.sdbType = SDB_TRANS,
.keyType = SDB_KEY_INT32,
- .encodeFp = (SdbEncodeFp)mndTransActionEncode,
- .decodeFp = (SdbDecodeFp)mndTransActionDecode,
+ .encodeFp = (SdbEncodeFp)mndTransEncode,
+ .decodeFp = (SdbDecodeFp)mndTransDecode,
.insertFp = (SdbInsertFp)mndTransActionInsert,
.updateFp = (SdbUpdateFp)mndTransActionUpdate,
- .deleteFp = (SdbDeleteFp)mndTransActionDelete,
+ .deleteFp = (SdbDeleteFp)mndTransDelete,
};
mndSetMsgHandle(pMnode, TDMT_MND_TRANS_TIMER, mndProcessTransTimer);
@@ -103,15 +100,55 @@ static int32_t mndTransGetActionsSize(SArray *pArray) {
return rawDataLen;
}
-static SSdbRaw *mndTransActionEncode(STrans *pTrans) {
- terrno = TSDB_CODE_OUT_OF_MEMORY;
+
+static int32_t mndTransEncodeAction(SSdbRaw *pRaw, int32_t *offset, SArray *pActions, int32_t actionsNum) {
+ int32_t dataPos = *offset;
+ int8_t unused = 0;
+ int32_t ret = -1;
+
+ for (int32_t i = 0; i < actionsNum; ++i) {
+ STransAction *pAction = taosArrayGet(pActions, i);
+ SDB_SET_INT32(pRaw, dataPos, pAction->id, _OVER)
+ SDB_SET_INT32(pRaw, dataPos, pAction->errCode, _OVER)
+ SDB_SET_INT32(pRaw, dataPos, pAction->acceptableCode, _OVER)
+ SDB_SET_INT32(pRaw, dataPos, pAction->retryCode, _OVER)
+ SDB_SET_INT8(pRaw, dataPos, pAction->actionType, _OVER)
+ SDB_SET_INT8(pRaw, dataPos, pAction->stage, _OVER)
+ SDB_SET_INT8(pRaw, dataPos, pAction->reserved, _OVER)
+ if (pAction->actionType == TRANS_ACTION_RAW) {
+ int32_t len = sdbGetRawTotalSize(pAction->pRaw);
+ SDB_SET_INT8(pRaw, dataPos, unused /*pAction->rawWritten*/, _OVER)
+ SDB_SET_INT32(pRaw, dataPos, len, _OVER)
+ SDB_SET_BINARY(pRaw, dataPos, (void *)pAction->pRaw, len, _OVER)
+ } else if (pAction->actionType == TRANS_ACTION_MSG) {
+ SDB_SET_BINARY(pRaw, dataPos, (void *)&pAction->epSet, sizeof(SEpSet), _OVER)
+ SDB_SET_INT16(pRaw, dataPos, pAction->msgType, _OVER)
+ SDB_SET_INT8(pRaw, dataPos, unused /*pAction->msgSent*/, _OVER)
+ SDB_SET_INT8(pRaw, dataPos, unused /*pAction->msgReceived*/, _OVER)
+ SDB_SET_INT32(pRaw, dataPos, pAction->contLen, _OVER)
+ SDB_SET_BINARY(pRaw, dataPos, pAction->pCont, pAction->contLen, _OVER)
+ } else {
+ // nothing
+ }
+ }
+ ret = 0;
+
+_OVER:
+ *offset = dataPos;
+ return ret;
+}
+
+SSdbRaw *mndTransEncode(STrans *pTrans) {
+ terrno = TSDB_CODE_INVALID_MSG;
+ int8_t sver = taosArrayGetSize(pTrans->prepareActions) ? TRANS_VER2_NUMBER : TRANS_VER1_NUMBER;
int32_t rawDataLen = sizeof(STrans) + TRANS_RESERVE_SIZE + pTrans->paramLen;
+ rawDataLen += mndTransGetActionsSize(pTrans->prepareActions);
rawDataLen += mndTransGetActionsSize(pTrans->redoActions);
rawDataLen += mndTransGetActionsSize(pTrans->undoActions);
rawDataLen += mndTransGetActionsSize(pTrans->commitActions);
- SSdbRaw *pRaw = sdbAllocRaw(SDB_TRANS, TRANS_VER_NUMBER, rawDataLen);
+ SSdbRaw *pRaw = sdbAllocRaw(SDB_TRANS, sver, rawDataLen);
if (pRaw == NULL) {
mError("trans:%d, failed to alloc raw since %s", pTrans->id, terrstr());
return NULL;
@@ -131,91 +168,22 @@ static SSdbRaw *mndTransActionEncode(STrans *pTrans) {
SDB_SET_BINARY(pRaw, dataPos, pTrans->stbname, TSDB_TABLE_FNAME_LEN, _OVER)
SDB_SET_INT32(pRaw, dataPos, pTrans->redoActionPos, _OVER)
+ int32_t prepareActionNum = taosArrayGetSize(pTrans->prepareActions);
int32_t redoActionNum = taosArrayGetSize(pTrans->redoActions);
int32_t undoActionNum = taosArrayGetSize(pTrans->undoActions);
int32_t commitActionNum = taosArrayGetSize(pTrans->commitActions);
+
+ if (sver > TRANS_VER1_NUMBER) {
+ SDB_SET_INT32(pRaw, dataPos, prepareActionNum, _OVER)
+ }
SDB_SET_INT32(pRaw, dataPos, redoActionNum, _OVER)
SDB_SET_INT32(pRaw, dataPos, undoActionNum, _OVER)
SDB_SET_INT32(pRaw, dataPos, commitActionNum, _OVER)
- int8_t unused = 0;
- for (int32_t i = 0; i < redoActionNum; ++i) {
- STransAction *pAction = taosArrayGet(pTrans->redoActions, i);
- SDB_SET_INT32(pRaw, dataPos, pAction->id, _OVER)
- SDB_SET_INT32(pRaw, dataPos, pAction->errCode, _OVER)
- SDB_SET_INT32(pRaw, dataPos, pAction->acceptableCode, _OVER)
- SDB_SET_INT32(pRaw, dataPos, pAction->retryCode, _OVER)
- SDB_SET_INT8(pRaw, dataPos, pAction->actionType, _OVER)
- SDB_SET_INT8(pRaw, dataPos, pAction->stage, _OVER)
- SDB_SET_INT8(pRaw, dataPos, pAction->reserved, _OVER)
- if (pAction->actionType == TRANS_ACTION_RAW) {
- int32_t len = sdbGetRawTotalSize(pAction->pRaw);
- SDB_SET_INT8(pRaw, dataPos, unused /*pAction->rawWritten*/, _OVER)
- SDB_SET_INT32(pRaw, dataPos, len, _OVER)
- SDB_SET_BINARY(pRaw, dataPos, (void *)pAction->pRaw, len, _OVER)
- } else if (pAction->actionType == TRANS_ACTION_MSG) {
- SDB_SET_BINARY(pRaw, dataPos, (void *)&pAction->epSet, sizeof(SEpSet), _OVER)
- SDB_SET_INT16(pRaw, dataPos, pAction->msgType, _OVER)
- SDB_SET_INT8(pRaw, dataPos, unused /*pAction->msgSent*/, _OVER)
- SDB_SET_INT8(pRaw, dataPos, unused /*pAction->msgReceived*/, _OVER)
- SDB_SET_INT32(pRaw, dataPos, pAction->contLen, _OVER)
- SDB_SET_BINARY(pRaw, dataPos, pAction->pCont, pAction->contLen, _OVER)
- } else {
- // nothing
- }
- }
-
- for (int32_t i = 0; i < undoActionNum; ++i) {
- STransAction *pAction = taosArrayGet(pTrans->undoActions, i);
- SDB_SET_INT32(pRaw, dataPos, pAction->id, _OVER)
- SDB_SET_INT32(pRaw, dataPos, pAction->errCode, _OVER)
- SDB_SET_INT32(pRaw, dataPos, pAction->acceptableCode, _OVER)
- SDB_SET_INT32(pRaw, dataPos, pAction->retryCode, _OVER)
- SDB_SET_INT8(pRaw, dataPos, pAction->actionType, _OVER)
- SDB_SET_INT8(pRaw, dataPos, pAction->stage, _OVER)
- SDB_SET_INT8(pRaw, dataPos, pAction->reserved, _OVER)
- if (pAction->actionType == TRANS_ACTION_RAW) {
- int32_t len = sdbGetRawTotalSize(pAction->pRaw);
- SDB_SET_INT8(pRaw, dataPos, unused /*pAction->rawWritten*/, _OVER)
- SDB_SET_INT32(pRaw, dataPos, len, _OVER)
- SDB_SET_BINARY(pRaw, dataPos, (void *)pAction->pRaw, len, _OVER)
- } else if (pAction->actionType == TRANS_ACTION_MSG) {
- SDB_SET_BINARY(pRaw, dataPos, (void *)&pAction->epSet, sizeof(SEpSet), _OVER)
- SDB_SET_INT16(pRaw, dataPos, pAction->msgType, _OVER)
- SDB_SET_INT8(pRaw, dataPos, unused /*pAction->msgSent*/, _OVER)
- SDB_SET_INT8(pRaw, dataPos, unused /*pAction->msgReceived*/, _OVER)
- SDB_SET_INT32(pRaw, dataPos, pAction->contLen, _OVER)
- SDB_SET_BINARY(pRaw, dataPos, pAction->pCont, pAction->contLen, _OVER)
- } else {
- // nothing
- }
- }
-
- for (int32_t i = 0; i < commitActionNum; ++i) {
- STransAction *pAction = taosArrayGet(pTrans->commitActions, i);
- SDB_SET_INT32(pRaw, dataPos, pAction->id, _OVER)
- SDB_SET_INT32(pRaw, dataPos, pAction->errCode, _OVER)
- SDB_SET_INT32(pRaw, dataPos, pAction->acceptableCode, _OVER)
- SDB_SET_INT32(pRaw, dataPos, pAction->retryCode, _OVER)
- SDB_SET_INT8(pRaw, dataPos, pAction->actionType, _OVER)
- SDB_SET_INT8(pRaw, dataPos, pAction->stage, _OVER)
- SDB_SET_INT8(pRaw, dataPos, pAction->reserved, _OVER)
- if (pAction->actionType == TRANS_ACTION_RAW) {
- int32_t len = sdbGetRawTotalSize(pAction->pRaw);
- SDB_SET_INT8(pRaw, dataPos, unused /*pAction->rawWritten*/, _OVER)
- SDB_SET_INT32(pRaw, dataPos, len, _OVER)
- SDB_SET_BINARY(pRaw, dataPos, (void *)pAction->pRaw, len, _OVER)
- } else if (pAction->actionType == TRANS_ACTION_MSG) {
- SDB_SET_BINARY(pRaw, dataPos, (void *)&pAction->epSet, sizeof(SEpSet), _OVER)
- SDB_SET_INT16(pRaw, dataPos, pAction->msgType, _OVER)
- SDB_SET_INT8(pRaw, dataPos, unused /*pAction->msgSent*/, _OVER)
- SDB_SET_INT8(pRaw, dataPos, unused /*pAction->msgReceived*/, _OVER)
- SDB_SET_INT32(pRaw, dataPos, pAction->contLen, _OVER)
- SDB_SET_BINARY(pRaw, dataPos, pAction->pCont, pAction->contLen, _OVER)
- } else {
- // nothing
- }
- }
+ if (mndTransEncodeAction(pRaw, &dataPos, pTrans->prepareActions, prepareActionNum) < 0) goto _OVER;
+ if (mndTransEncodeAction(pRaw, &dataPos, pTrans->redoActions, redoActionNum) < 0) goto _OVER;
+ if (mndTransEncodeAction(pRaw, &dataPos, pTrans->undoActions, undoActionNum) < 0) goto _OVER;
+ if (mndTransEncodeAction(pRaw, &dataPos, pTrans->commitActions, commitActionNum) < 0) goto _OVER;
SDB_SET_INT32(pRaw, dataPos, pTrans->startFunc, _OVER)
SDB_SET_INT32(pRaw, dataPos, pTrans->stopFunc, _OVER)
@@ -242,23 +210,76 @@ _OVER:
return pRaw;
}
-static SSdbRow *mndTransActionDecode(SSdbRaw *pRaw) {
- terrno = TSDB_CODE_OUT_OF_MEMORY;
+static int32_t mndTransDecodeAction(SSdbRaw *pRaw, int32_t *offset, SArray *pActions, int32_t actionNum) {
+ STransAction action = {0};
+ int32_t dataPos = *offset;
+ int8_t unused = 0;
+ int8_t stage = 0;
+ int8_t actionType = 0;
+ int32_t dataLen = 0;
+ int32_t ret = -1;
+
+ for (int32_t i = 0; i < actionNum; ++i) {
+ memset(&action, 0, sizeof(action));
+ SDB_GET_INT32(pRaw, dataPos, &action.id, _OVER)
+ SDB_GET_INT32(pRaw, dataPos, &action.errCode, _OVER)
+ SDB_GET_INT32(pRaw, dataPos, &action.acceptableCode, _OVER)
+ SDB_GET_INT32(pRaw, dataPos, &action.retryCode, _OVER)
+ SDB_GET_INT8(pRaw, dataPos, &actionType, _OVER)
+ action.actionType = actionType;
+ SDB_GET_INT8(pRaw, dataPos, &stage, _OVER)
+ action.stage = stage;
+ SDB_GET_INT8(pRaw, dataPos, &action.reserved, _OVER)
+ if (action.actionType == TRANS_ACTION_RAW) {
+ SDB_GET_INT8(pRaw, dataPos, &unused /*&action.rawWritten*/, _OVER)
+ SDB_GET_INT32(pRaw, dataPos, &dataLen, _OVER)
+ action.pRaw = taosMemoryMalloc(dataLen);
+ if (action.pRaw == NULL) goto _OVER;
+ mTrace("raw:%p, is created", action.pRaw);
+ SDB_GET_BINARY(pRaw, dataPos, (void *)action.pRaw, dataLen, _OVER);
+ if (taosArrayPush(pActions, &action) == NULL) goto _OVER;
+ action.pRaw = NULL;
+ } else if (action.actionType == TRANS_ACTION_MSG) {
+ SDB_GET_BINARY(pRaw, dataPos, (void *)&action.epSet, sizeof(SEpSet), _OVER);
+ tmsgUpdateDnodeEpSet(&action.epSet);
+ SDB_GET_INT16(pRaw, dataPos, &action.msgType, _OVER)
+ SDB_GET_INT8(pRaw, dataPos, &unused /*&action.msgSent*/, _OVER)
+ SDB_GET_INT8(pRaw, dataPos, &unused /*&action.msgReceived*/, _OVER)
+ SDB_GET_INT32(pRaw, dataPos, &action.contLen, _OVER)
+ action.pCont = taosMemoryMalloc(action.contLen);
+ if (action.pCont == NULL) goto _OVER;
+ SDB_GET_BINARY(pRaw, dataPos, action.pCont, action.contLen, _OVER);
+ if (taosArrayPush(pActions, &action) == NULL) goto _OVER;
+ action.pCont = NULL;
+ } else {
+ if (taosArrayPush(pActions, &action) == NULL) goto _OVER;
+ }
+ }
+ ret = 0;
+
+_OVER:
+ *offset = dataPos;
+ taosMemoryFreeClear(action.pCont);
+ return ret;
+}
+
+SSdbRow *mndTransDecode(SSdbRaw *pRaw) {
+ terrno = TSDB_CODE_INVALID_MSG;
SSdbRow *pRow = NULL;
STrans *pTrans = NULL;
char *pData = NULL;
int32_t dataLen = 0;
int8_t sver = 0;
+ int32_t prepareActionNum = 0;
int32_t redoActionNum = 0;
int32_t undoActionNum = 0;
int32_t commitActionNum = 0;
int32_t dataPos = 0;
- STransAction action = {0};
if (sdbGetRawSoftVer(pRaw, &sver) != 0) goto _OVER;
- if (sver != TRANS_VER_NUMBER) {
+ if (sver != TRANS_VER1_NUMBER && sver != TRANS_VER2_NUMBER) {
terrno = TSDB_CODE_SDB_INVALID_DATA_VER;
goto _OVER;
}
@@ -294,127 +315,28 @@ static SSdbRow *mndTransActionDecode(SSdbRaw *pRaw) {
SDB_GET_BINARY(pRaw, dataPos, pTrans->dbname, TSDB_TABLE_FNAME_LEN, _OVER)
SDB_GET_BINARY(pRaw, dataPos, pTrans->stbname, TSDB_TABLE_FNAME_LEN, _OVER)
SDB_GET_INT32(pRaw, dataPos, &pTrans->redoActionPos, _OVER)
+
+ if (sver > TRANS_VER1_NUMBER) {
+ SDB_GET_INT32(pRaw, dataPos, &prepareActionNum, _OVER)
+ }
SDB_GET_INT32(pRaw, dataPos, &redoActionNum, _OVER)
SDB_GET_INT32(pRaw, dataPos, &undoActionNum, _OVER)
SDB_GET_INT32(pRaw, dataPos, &commitActionNum, _OVER)
+ pTrans->prepareActions = taosArrayInit(prepareActionNum, sizeof(STransAction));
pTrans->redoActions = taosArrayInit(redoActionNum, sizeof(STransAction));
pTrans->undoActions = taosArrayInit(undoActionNum, sizeof(STransAction));
pTrans->commitActions = taosArrayInit(commitActionNum, sizeof(STransAction));
+ if (pTrans->prepareActions == NULL) goto _OVER;
if (pTrans->redoActions == NULL) goto _OVER;
if (pTrans->undoActions == NULL) goto _OVER;
if (pTrans->commitActions == NULL) goto _OVER;
- int8_t unused = 0;
- for (int32_t i = 0; i < redoActionNum; ++i) {
- memset(&action, 0, sizeof(action));
- SDB_GET_INT32(pRaw, dataPos, &action.id, _OVER)
- SDB_GET_INT32(pRaw, dataPos, &action.errCode, _OVER)
- SDB_GET_INT32(pRaw, dataPos, &action.acceptableCode, _OVER)
- SDB_GET_INT32(pRaw, dataPos, &action.retryCode, _OVER)
- SDB_GET_INT8(pRaw, dataPos, &actionType, _OVER)
- action.actionType = actionType;
- SDB_GET_INT8(pRaw, dataPos, &stage, _OVER)
- action.stage = stage;
- SDB_GET_INT8(pRaw, dataPos, &action.reserved, _OVER)
- if (action.actionType == TRANS_ACTION_RAW) {
- SDB_GET_INT8(pRaw, dataPos, &unused /*&action.rawWritten*/, _OVER)
- SDB_GET_INT32(pRaw, dataPos, &dataLen, _OVER)
- action.pRaw = taosMemoryMalloc(dataLen);
- if (action.pRaw == NULL) goto _OVER;
- mTrace("raw:%p, is created", action.pRaw);
- SDB_GET_BINARY(pRaw, dataPos, (void *)action.pRaw, dataLen, _OVER);
- if (taosArrayPush(pTrans->redoActions, &action) == NULL) goto _OVER;
- action.pRaw = NULL;
- } else if (action.actionType == TRANS_ACTION_MSG) {
- SDB_GET_BINARY(pRaw, dataPos, (void *)&action.epSet, sizeof(SEpSet), _OVER);
- tmsgUpdateDnodeEpSet(&action.epSet);
- SDB_GET_INT16(pRaw, dataPos, &action.msgType, _OVER)
- SDB_GET_INT8(pRaw, dataPos, &unused /*&action.msgSent*/, _OVER)
- SDB_GET_INT8(pRaw, dataPos, &unused /*&action.msgReceived*/, _OVER)
- SDB_GET_INT32(pRaw, dataPos, &action.contLen, _OVER)
- action.pCont = taosMemoryMalloc(action.contLen);
- if (action.pCont == NULL) goto _OVER;
- SDB_GET_BINARY(pRaw, dataPos, action.pCont, action.contLen, _OVER);
- if (taosArrayPush(pTrans->redoActions, &action) == NULL) goto _OVER;
- action.pCont = NULL;
- } else {
- if (taosArrayPush(pTrans->redoActions, &action) == NULL) goto _OVER;
- }
- }
-
- for (int32_t i = 0; i < undoActionNum; ++i) {
- memset(&action, 0, sizeof(action));
- SDB_GET_INT32(pRaw, dataPos, &action.id, _OVER)
- SDB_GET_INT32(pRaw, dataPos, &action.errCode, _OVER)
- SDB_GET_INT32(pRaw, dataPos, &action.acceptableCode, _OVER)
- SDB_GET_INT32(pRaw, dataPos, &action.retryCode, _OVER)
- SDB_GET_INT8(pRaw, dataPos, &actionType, _OVER)
- action.actionType = actionType;
- SDB_GET_INT8(pRaw, dataPos, &stage, _OVER)
- action.stage = stage;
- SDB_GET_INT8(pRaw, dataPos, &action.reserved, _OVER)
- if (action.actionType == TRANS_ACTION_RAW) {
- SDB_GET_INT8(pRaw, dataPos, &unused /*&action.rawWritten*/, _OVER)
- SDB_GET_INT32(pRaw, dataPos, &dataLen, _OVER)
- action.pRaw = taosMemoryMalloc(dataLen);
- if (action.pRaw == NULL) goto _OVER;
- mTrace("raw:%p, is created", action.pRaw);
- SDB_GET_BINARY(pRaw, dataPos, (void *)action.pRaw, dataLen, _OVER);
- if (taosArrayPush(pTrans->undoActions, &action) == NULL) goto _OVER;
- action.pRaw = NULL;
- } else if (action.actionType == TRANS_ACTION_MSG) {
- SDB_GET_BINARY(pRaw, dataPos, (void *)&action.epSet, sizeof(SEpSet), _OVER);
- SDB_GET_INT16(pRaw, dataPos, &action.msgType, _OVER)
- SDB_GET_INT8(pRaw, dataPos, &unused /*&action.msgSent*/, _OVER)
- SDB_GET_INT8(pRaw, dataPos, &unused /*&action.msgReceived*/, _OVER)
- SDB_GET_INT32(pRaw, dataPos, &action.contLen, _OVER)
- action.pCont = taosMemoryMalloc(action.contLen);
- if (action.pCont == NULL) goto _OVER;
- SDB_GET_BINARY(pRaw, dataPos, action.pCont, action.contLen, _OVER);
- if (taosArrayPush(pTrans->undoActions, &action) == NULL) goto _OVER;
- action.pCont = NULL;
- } else {
- if (taosArrayPush(pTrans->undoActions, &action) == NULL) goto _OVER;
- }
- }
-
- for (int32_t i = 0; i < commitActionNum; ++i) {
- memset(&action, 0, sizeof(action));
- SDB_GET_INT32(pRaw, dataPos, &action.id, _OVER)
- SDB_GET_INT32(pRaw, dataPos, &action.errCode, _OVER)
- SDB_GET_INT32(pRaw, dataPos, &action.acceptableCode, _OVER)
- SDB_GET_INT32(pRaw, dataPos, &action.retryCode, _OVER)
- SDB_GET_INT8(pRaw, dataPos, &actionType, _OVER)
- action.actionType = actionType;
- SDB_GET_INT8(pRaw, dataPos, &stage, _OVER)
- action.stage = stage;
- SDB_GET_INT8(pRaw, dataPos, &action.reserved, _OVER)
- if (action.actionType) {
- SDB_GET_INT8(pRaw, dataPos, &unused /*&action.rawWritten*/, _OVER)
- SDB_GET_INT32(pRaw, dataPos, &dataLen, _OVER)
- action.pRaw = taosMemoryMalloc(dataLen);
- if (action.pRaw == NULL) goto _OVER;
- mTrace("raw:%p, is created", action.pRaw);
- SDB_GET_BINARY(pRaw, dataPos, (void *)action.pRaw, dataLen, _OVER);
- if (taosArrayPush(pTrans->commitActions, &action) == NULL) goto _OVER;
- action.pRaw = NULL;
- } else if (action.actionType == TRANS_ACTION_MSG) {
- SDB_GET_BINARY(pRaw, dataPos, (void *)&action.epSet, sizeof(SEpSet), _OVER);
- SDB_GET_INT16(pRaw, dataPos, &action.msgType, _OVER)
- SDB_GET_INT8(pRaw, dataPos, &unused /*&action.msgSent*/, _OVER)
- SDB_GET_INT8(pRaw, dataPos, &unused /*&action.msgReceived*/, _OVER)
- SDB_GET_INT32(pRaw, dataPos, &action.contLen, _OVER)
- action.pCont = taosMemoryMalloc(action.contLen);
- if (action.pCont == NULL) goto _OVER;
- SDB_GET_BINARY(pRaw, dataPos, action.pCont, action.contLen, _OVER);
- if (taosArrayPush(pTrans->commitActions, &action) == NULL) goto _OVER;
- action.pCont = NULL;
- } else {
- if (taosArrayPush(pTrans->redoActions, &action) == NULL) goto _OVER;
- }
- }
+ if (mndTransDecodeAction(pRaw, &dataPos, pTrans->prepareActions, prepareActionNum) < 0) goto _OVER;
+ if (mndTransDecodeAction(pRaw, &dataPos, pTrans->redoActions, redoActionNum) < 0) goto _OVER;
+ if (mndTransDecodeAction(pRaw, &dataPos, pTrans->undoActions, undoActionNum) < 0) goto _OVER;
+ if (mndTransDecodeAction(pRaw, &dataPos, pTrans->commitActions, commitActionNum) < 0) goto _OVER;
SDB_GET_INT32(pRaw, dataPos, &pTrans->startFunc, _OVER)
SDB_GET_INT32(pRaw, dataPos, &pTrans->stopFunc, _OVER)
@@ -434,7 +356,6 @@ _OVER:
mError("trans:%d, failed to parse from raw:%p since %s", pTrans->id, pRaw, terrstr());
mndTransDropData(pTrans);
taosMemoryFreeClear(pRow);
- taosMemoryFreeClear(action.pCont);
return NULL;
}
@@ -458,7 +379,7 @@ static const char *mndTransStr(ETrnStage stage) {
return "commit";
case TRN_STAGE_COMMIT_ACTION:
return "commitAction";
- case TRN_STAGE_FINISHED:
+ case TRN_STAGE_FINISH:
return "finished";
case TRN_STAGE_PRE_FINISH:
return "pre-finish";
@@ -519,7 +440,11 @@ static int32_t mndTransActionInsert(SSdb *pSdb, STrans *pTrans) {
return 0;
}
-static void mndTransDropData(STrans *pTrans) {
+void mndTransDropData(STrans *pTrans) {
+ if (pTrans->prepareActions != NULL) {
+ mndTransDropActions(pTrans->prepareActions);
+ pTrans->prepareActions = NULL;
+ }
if (pTrans->redoActions != NULL) {
mndTransDropActions(pTrans->redoActions);
pTrans->redoActions = NULL;
@@ -549,7 +474,7 @@ static void mndTransDropData(STrans *pTrans) {
(void)taosThreadMutexDestroy(&pTrans->mutex);
}
-static int32_t mndTransActionDelete(SSdb *pSdb, STrans *pTrans, bool callFunc) {
+static int32_t mndTransDelete(SSdb *pSdb, STrans *pTrans, bool callFunc) {
mInfo("trans:%d, perform delete action, row:%p stage:%s callfunc:%d, stopFunc:%d", pTrans->id, pTrans,
mndTransStr(pTrans->stage), callFunc, pTrans->stopFunc);
@@ -586,10 +511,11 @@ static int32_t mndTransActionUpdate(SSdb *pSdb, STrans *pOld, STrans *pNew) {
pOld->id, pOld, mndTransStr(pOld->stage), pOld->createdTime, pNew, mndTransStr(pNew->stage),
pNew->createdTime);
// only occured while sync timeout
- terrno = TSDB_CODE_MND_TRNAS_SYNC_TIMEOUT;
+ terrno = TSDB_CODE_MND_TRANS_SYNC_TIMEOUT;
return -1;
}
+ mndTransUpdateActions(pOld->prepareActions, pNew->prepareActions);
mndTransUpdateActions(pOld->redoActions, pNew->redoActions);
mndTransUpdateActions(pOld->undoActions, pNew->undoActions);
mndTransUpdateActions(pOld->commitActions, pNew->commitActions);
@@ -607,7 +533,7 @@ static int32_t mndTransActionUpdate(SSdb *pSdb, STrans *pOld, STrans *pNew) {
}
if (pOld->stage == TRN_STAGE_PRE_FINISH) {
- pOld->stage = TRN_STAGE_FINISHED;
+ pOld->stage = TRN_STAGE_FINISH;
mTrace("trans:%d, stage from pre-finish to finished since perform update action", pNew->id);
}
@@ -646,6 +572,7 @@ STrans *mndTransCreate(SMnode *pMnode, ETrnPolicy policy, ETrnConflct conflict,
pTrans->conflict = conflict;
pTrans->exec = TRN_EXEC_PARALLEL;
pTrans->createdTime = taosGetTimestampMs();
+ pTrans->prepareActions = taosArrayInit(TRANS_ARRAY_SIZE, sizeof(STransAction));
pTrans->redoActions = taosArrayInit(TRANS_ARRAY_SIZE, sizeof(STransAction));
pTrans->undoActions = taosArrayInit(TRANS_ARRAY_SIZE, sizeof(STransAction));
pTrans->commitActions = taosArrayInit(TRANS_ARRAY_SIZE, sizeof(STransAction));
@@ -728,6 +655,13 @@ int32_t mndTransAppendCommitlog(STrans *pTrans, SSdbRaw *pRaw) {
return mndTransAppendAction(pTrans->commitActions, &action);
}
+int32_t mndTransAppendPrepareAction(STrans *pTrans, STransAction *pAction) {
+ pAction->stage = TRN_STAGE_PREPARE;
+ pAction->actionType = TRANS_ACTION_RAW;
+ pAction->mTraceId = pTrans->mTraceId;
+ return mndTransAppendAction(pTrans->prepareActions, pAction);
+}
+
int32_t mndTransAppendRedoAction(STrans *pTrans, STransAction *pAction) {
pAction->stage = TRN_STAGE_REDO_ACTION;
pAction->actionType = TRANS_ACTION_MSG;
@@ -800,7 +734,7 @@ void mndTransSetParallel(STrans *pTrans) { pTrans->exec = TRN_EXEC_PARALLEL; }
void mndTransSetOper(STrans *pTrans, EOperType oper) { pTrans->oper = oper; }
static int32_t mndTransSync(SMnode *pMnode, STrans *pTrans) {
- SSdbRaw *pRaw = mndTransActionEncode(pTrans);
+ SSdbRaw *pRaw = mndTransEncode(pTrans);
if (pRaw == NULL) {
mError("trans:%d, failed to encode while sync trans since %s", pTrans->id, terrstr());
return -1;
@@ -872,7 +806,7 @@ static bool mndCheckTransConflict(SMnode *pMnode, STrans *pNew) {
return conflict;
}
-int32_t mndTrancCheckConflict(SMnode *pMnode, STrans *pTrans) {
+int32_t mndTransCheckConflict(SMnode *pMnode, STrans *pTrans) {
if (pTrans->conflict == TRN_CONFLICT_DB || pTrans->conflict == TRN_CONFLICT_DB_INSIDE) {
if (strlen(pTrans->dbname) == 0 && strlen(pTrans->stbname) == 0) {
terrno = TSDB_CODE_MND_TRANS_CONFLICT;
@@ -891,7 +825,7 @@ int32_t mndTrancCheckConflict(SMnode *pMnode, STrans *pTrans) {
}
int32_t mndTransPrepare(SMnode *pMnode, STrans *pTrans) {
- if (mndTrancCheckConflict(pMnode, pTrans) != 0) {
+ if (mndTransCheckConflict(pMnode, pTrans) != 0) {
return -1;
}
@@ -922,7 +856,7 @@ int32_t mndTransPrepare(SMnode *pMnode, STrans *pTrans) {
pTrans->rpcRsp = NULL;
pTrans->rpcRspLen = 0;
- mndTransExecute(pMnode, pNew, true);
+ mndTransExecute(pMnode, pNew);
mndReleaseTrans(pMnode, pNew);
return 0;
}
@@ -961,7 +895,7 @@ static void mndTransSendRpcRsp(SMnode *pMnode, STrans *pTrans) {
bool sendRsp = false;
int32_t code = pTrans->code;
- if (pTrans->stage == TRN_STAGE_FINISHED) {
+ if (pTrans->stage == TRN_STAGE_FINISH) {
sendRsp = true;
}
@@ -1003,7 +937,7 @@ static void mndTransSendRpcRsp(SMnode *pMnode, STrans *pTrans) {
code = TSDB_CODE_MND_TRANS_NETWORK_UNAVAILL;
}
if (code == TSDB_CODE_SYN_TIMEOUT) {
- code = TSDB_CODE_MND_TRNAS_SYNC_TIMEOUT;
+ code = TSDB_CODE_MND_TRANS_SYNC_TIMEOUT;
}
if (i != 0 && code == 0) {
@@ -1104,7 +1038,7 @@ int32_t mndTransProcessRsp(SRpcMsg *pRsp) {
mInfo("trans:%d, invalid action, index:%d, code:0x%x", transId, action, pRsp->code);
}
- mndTransExecute(pMnode, pTrans, true);
+ mndTransExecute(pMnode, pTrans);
_OVER:
mndReleaseTrans(pMnode, pTrans);
@@ -1392,8 +1326,25 @@ static int32_t mndTransExecuteRedoActionsSerial(SMnode *pMnode, STrans *pTrans)
return code;
}
-static bool mndTransPerformPrepareStage(SMnode *pMnode, STrans *pTrans) {
+bool mndTransPerformPrepareStage(SMnode *pMnode, STrans *pTrans) {
bool continueExec = true;
+ int32_t code = 0;
+
+ int32_t numOfActions = taosArrayGetSize(pTrans->prepareActions);
+ if (numOfActions == 0) goto _OVER;
+
+ mInfo("trans:%d, execute %d prepare actions.", pTrans->id, numOfActions);
+
+ for (int32_t action = 0; action < numOfActions; ++action) {
+ STransAction *pAction = taosArrayGet(pTrans->prepareActions, action);
+ code = mndTransExecSingleAction(pMnode, pTrans, pAction);
+ if (code != 0) {
+ mError("trans:%d, failed to execute prepare action:%d, numOfActions:%d", pTrans->id, action, numOfActions);
+ return false;
+ }
+ }
+
+_OVER:
pTrans->stage = TRN_STAGE_REDO_ACTION;
mInfo("trans:%d, stage from prepare to redoAction", pTrans->id);
return continueExec;
@@ -1476,7 +1427,7 @@ static bool mndTransPerformCommitActionStage(SMnode *pMnode, STrans *pTrans) {
if (code == 0) {
pTrans->code = 0;
- pTrans->stage = TRN_STAGE_FINISHED; // TRN_STAGE_PRE_FINISH is not necessary
+ pTrans->stage = TRN_STAGE_FINISH; // TRN_STAGE_PRE_FINISH is not necessary
mInfo("trans:%d, stage from commitAction to finished", pTrans->id);
continueExec = true;
} else {
@@ -1528,14 +1479,14 @@ static bool mndTransPerformRollbackStage(SMnode *pMnode, STrans *pTrans) {
return continueExec;
}
-static bool mndTransPerfromPreFinishedStage(SMnode *pMnode, STrans *pTrans) {
+static bool mndTransPerformPreFinishStage(SMnode *pMnode, STrans *pTrans) {
if (mndCannotExecuteTransAction(pMnode)) return false;
bool continueExec = true;
int32_t code = mndTransPreFinish(pMnode, pTrans);
if (code == 0) {
- pTrans->stage = TRN_STAGE_FINISHED;
+ pTrans->stage = TRN_STAGE_FINISH;
mInfo("trans:%d, stage from pre-finish to finish", pTrans->id);
continueExec = true;
} else {
@@ -1547,10 +1498,10 @@ static bool mndTransPerfromPreFinishedStage(SMnode *pMnode, STrans *pTrans) {
return continueExec;
}
-static bool mndTransPerfromFinishedStage(SMnode *pMnode, STrans *pTrans) {
+static bool mndTransPerformFinishStage(SMnode *pMnode, STrans *pTrans) {
bool continueExec = false;
- SSdbRaw *pRaw = mndTransActionEncode(pTrans);
+ SSdbRaw *pRaw = mndTransEncode(pTrans);
if (pRaw == NULL) {
mError("trans:%d, failed to encode while finish trans since %s", pTrans->id, terrstr());
return false;
@@ -1567,12 +1518,12 @@ static bool mndTransPerfromFinishedStage(SMnode *pMnode, STrans *pTrans) {
return continueExec;
}
-void mndTransExecute(SMnode *pMnode, STrans *pTrans, bool isLeader) {
+void mndTransExecuteImp(SMnode *pMnode, STrans *pTrans, bool topHalf) {
bool continueExec = true;
while (continueExec) {
- mInfo("trans:%d, continue to execute, stage:%s createTime:%" PRId64 " leader:%d", pTrans->id,
- mndTransStr(pTrans->stage), pTrans->createdTime, isLeader);
+ mInfo("trans:%d, continue to execute, stage:%s createTime:%" PRId64 " topHalf:%d", pTrans->id,
+ mndTransStr(pTrans->stage), pTrans->createdTime, topHalf);
pTrans->lastExecTime = taosGetTimestampMs();
switch (pTrans->stage) {
case TRN_STAGE_PREPARE:
@@ -1582,7 +1533,7 @@ void mndTransExecute(SMnode *pMnode, STrans *pTrans, bool isLeader) {
continueExec = mndTransPerformRedoActionStage(pMnode, pTrans);
break;
case TRN_STAGE_COMMIT:
- if (isLeader) {
+ if (topHalf) {
continueExec = mndTransPerformCommitStage(pMnode, pTrans);
} else {
mInfo("trans:%d, can not commit since not leader", pTrans->id);
@@ -1593,7 +1544,7 @@ void mndTransExecute(SMnode *pMnode, STrans *pTrans, bool isLeader) {
continueExec = mndTransPerformCommitActionStage(pMnode, pTrans);
break;
case TRN_STAGE_ROLLBACK:
- if (isLeader) {
+ if (topHalf) {
continueExec = mndTransPerformRollbackStage(pMnode, pTrans);
} else {
mInfo("trans:%d, can not rollback since not leader", pTrans->id);
@@ -1604,15 +1555,15 @@ void mndTransExecute(SMnode *pMnode, STrans *pTrans, bool isLeader) {
continueExec = mndTransPerformUndoActionStage(pMnode, pTrans);
break;
case TRN_STAGE_PRE_FINISH:
- if (isLeader) {
- continueExec = mndTransPerfromPreFinishedStage(pMnode, pTrans);
+ if (topHalf) {
+ continueExec = mndTransPerformPreFinishStage(pMnode, pTrans);
} else {
mInfo("trans:%d, can not pre-finish since not leader", pTrans->id);
continueExec = false;
}
break;
- case TRN_STAGE_FINISHED:
- continueExec = mndTransPerfromFinishedStage(pMnode, pTrans);
+ case TRN_STAGE_FINISH:
+ continueExec = mndTransPerformFinishStage(pMnode, pTrans);
break;
default:
continueExec = false;
@@ -1623,6 +1574,16 @@ void mndTransExecute(SMnode *pMnode, STrans *pTrans, bool isLeader) {
mndTransSendRpcRsp(pMnode, pTrans);
}
+void mndTransExecute(SMnode *pMnode, STrans *pTrans) {
+ bool topHalf = true;
+ return mndTransExecuteImp(pMnode, pTrans, topHalf);
+}
+
+void mndTransRefresh(SMnode *pMnode, STrans *pTrans) {
+ bool topHalf = false;
+ return mndTransExecuteImp(pMnode, pTrans, topHalf);
+}
+
static int32_t mndProcessTransTimer(SRpcMsg *pReq) {
mTrace("start to process trans timer");
mndTransPullup(pReq->info.node);
@@ -1649,7 +1610,7 @@ int32_t mndKillTrans(SMnode *pMnode, STrans *pTrans) {
pAction->errCode = 0;
}
- mndTransExecute(pMnode, pTrans, true);
+ mndTransExecute(pMnode, pTrans);
return 0;
}
@@ -1707,7 +1668,7 @@ void mndTransPullup(SMnode *pMnode) {
int32_t *pTransId = taosArrayGet(pArray, i);
STrans *pTrans = mndAcquireTrans(pMnode, *pTransId);
if (pTrans != NULL) {
- mndTransExecute(pMnode, pTrans, true);
+ mndTransExecute(pMnode, pTrans);
}
mndReleaseTrans(pMnode, pTrans);
}
diff --git a/source/dnode/mnode/impl/src/mndVgroup.c b/source/dnode/mnode/impl/src/mndVgroup.c
index b7a6378bd8..36e8755a3e 100644
--- a/source/dnode/mnode/impl/src/mndVgroup.c
+++ b/source/dnode/mnode/impl/src/mndVgroup.c
@@ -28,7 +28,6 @@
#define VGROUP_VER_NUMBER 1
#define VGROUP_RESERVE_SIZE 64
-static SSdbRow *mndVgroupActionDecode(SSdbRaw *pRaw);
static int32_t mndVgroupActionInsert(SSdb *pSdb, SVgObj *pVgroup);
static int32_t mndVgroupActionDelete(SSdb *pSdb, SVgObj *pVgroup);
static int32_t mndVgroupActionUpdate(SSdb *pSdb, SVgObj *pOld, SVgObj *pNew);
@@ -483,15 +482,15 @@ static void *mndBuildDisableVnodeWriteReq(SMnode *pMnode, SDbObj *pDb, int32_t v
return pReq;
}
-static void *mndBuildAlterVnodeHashRangeReq(SMnode *pMnode, SVgObj *pVgroup, int32_t dstVgId, int32_t *pContLen) {
+static void *mndBuildAlterVnodeHashRangeReq(SMnode *pMnode, int32_t srcVgId, SVgObj *pVgroup, int32_t *pContLen) {
SAlterVnodeHashRangeReq alterReq = {
- .srcVgId = pVgroup->vgId,
- .dstVgId = dstVgId,
+ .srcVgId = srcVgId,
+ .dstVgId = pVgroup->vgId,
.hashBegin = pVgroup->hashBegin,
.hashEnd = pVgroup->hashEnd,
};
- mInfo("vgId:%d, build alter vnode hashrange req, dstVgId:%d, hashrange:[%u, %u]", pVgroup->vgId, dstVgId,
+ mInfo("vgId:%d, build alter vnode hashrange req, dstVgId:%d, hashrange:[%u, %u]", srcVgId, pVgroup->vgId,
pVgroup->hashBegin, pVgroup->hashEnd);
int32_t contLen = tSerializeSAlterVnodeHashRangeReq(NULL, 0, &alterReq);
if (contLen < 0) {
@@ -1207,12 +1206,12 @@ int32_t mndAddAlterVnodeConfirmAction(SMnode *pMnode, STrans *pTrans, SDbObj *pD
return 0;
}
-static int32_t mndAddAlterVnodeHashRangeAction(SMnode *pMnode, STrans *pTrans, SVgObj *pVgroup, int32_t dstVgId) {
+static int32_t mndAddAlterVnodeHashRangeAction(SMnode *pMnode, STrans *pTrans, int32_t srcVgId, SVgObj *pVgroup) {
STransAction action = {0};
action.epSet = mndGetVgroupEpset(pMnode, pVgroup);
int32_t contLen = 0;
- void *pReq = mndBuildAlterVnodeHashRangeReq(pMnode, pVgroup, dstVgId, &contLen);
+ void *pReq = mndBuildAlterVnodeHashRangeReq(pMnode, srcVgId, pVgroup, &contLen);
if (pReq == NULL) return -1;
action.pCont = pReq;
@@ -1247,6 +1246,21 @@ int32_t mndAddAlterVnodeConfigAction(SMnode *pMnode, STrans *pTrans, SDbObj *pDb
return 0;
}
+int32_t mndAddPrepareNewVgAction(SMnode *pMnode, STrans *pTrans, SVgObj *pVg) {
+ SSdbRaw *pRaw = mndVgroupActionEncode(pVg);
+ if (pRaw == NULL) goto _err;
+
+ STransAction action = {.pRaw = pRaw, .msgType = TDMT_MND_CREATE_VG};
+ if (mndTransAppendPrepareAction(pTrans, &action) != 0) goto _err;
+ (void)sdbSetRawStatus(pRaw, SDB_STATUS_CREATING);
+ pRaw = NULL;
+ return 0;
+
+_err:
+ sdbFreeRaw(pRaw);
+ return -1;
+}
+
int32_t mndAddAlterVnodeReplicaAction(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup, int32_t dnodeId) {
SDnodeObj *pDnode = mndAcquireDnode(pMnode, dnodeId);
if (pDnode == NULL) return -1;
@@ -2241,10 +2255,13 @@ static int32_t mndAddAdjustVnodeHashRangeAction(SMnode *pMnode, STrans *pTrans,
return 0;
}
-static int32_t mndTransCommitVgStatus(STrans *pTrans, SVgObj *pVg, ESdbStatus vgStatus) {
+typedef int32_t (*FpTransActionCb)(STrans *pTrans, SSdbRaw *pRaw);
+
+static int32_t mndAddVgStatusAction(STrans *pTrans, SVgObj *pVg, ESdbStatus vgStatus, ETrnStage stage) {
+ FpTransActionCb appendActionCb = (stage == TRN_STAGE_COMMIT_ACTION) ? mndTransAppendCommitlog : mndTransAppendRedolog;
SSdbRaw *pRaw = mndVgroupActionEncode(pVg);
if (pRaw == NULL) goto _err;
- if (mndTransAppendCommitlog(pTrans, pRaw) != 0) goto _err;
+ if (appendActionCb(pTrans, pRaw) != 0) goto _err;
(void)sdbSetRawStatus(pRaw, vgStatus);
pRaw = NULL;
return 0;
@@ -2253,18 +2270,32 @@ _err:
return -1;
}
+static int32_t mndAddDbStatusAction(STrans *pTrans, SDbObj *pDb, ESdbStatus dbStatus, ETrnStage stage) {
+ FpTransActionCb appendActionCb = (stage == TRN_STAGE_COMMIT_ACTION) ? mndTransAppendCommitlog : mndTransAppendRedolog;
+ SSdbRaw *pRaw = mndDbActionEncode(pDb);
+ if (pRaw == NULL) goto _err;
+ if (appendActionCb(pTrans, pRaw) != 0) goto _err;
+ (void)sdbSetRawStatus(pRaw, dbStatus);
+ pRaw = NULL;
+ return 0;
+_err:
+ sdbFreeRaw(pRaw);
+ return -1;
+}
+
int32_t mndSplitVgroup(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SVgObj *pVgroup) {
int32_t code = -1;
STrans *pTrans = NULL;
- SSdbRaw *pRaw = NULL;
SDbObj dbObj = {0};
SArray *pArray = mndBuildDnodesArray(pMnode, 0);
- pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_GLOBAL, pReq, "split-vgroup");
+ pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_DB, pReq, "split-vgroup");
if (pTrans == NULL) goto _OVER;
mndTransSetSerial(pTrans);
mInfo("trans:%d, used to split vgroup, vgId:%d", pTrans->id, pVgroup->vgId);
+ mndTransSetDbName(pTrans, pDb->name, NULL);
+
SVgObj newVg1 = {0};
memcpy(&newVg1, pVgroup, sizeof(SVgObj));
mInfo("vgId:%d, vgroup info before split, replica:%d hashBegin:%u hashEnd:%u", newVg1.vgId, newVg1.replica,
@@ -2316,32 +2347,25 @@ int32_t mndSplitVgroup(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SVgObj *pVgro
// alter vgId and hash range
int32_t maxVgId = sdbGetMaxId(pMnode->pSdb, SDB_VGROUP);
- if (mndAddAlterVnodeHashRangeAction(pMnode, pTrans, &newVg1, maxVgId) != 0) goto _OVER;
+ int32_t srcVgId = newVg1.vgId;
newVg1.vgId = maxVgId;
+ if (mndAddPrepareNewVgAction(pMnode, pTrans, &newVg1) != 0) goto _OVER;
+ if (mndAddAlterVnodeHashRangeAction(pMnode, pTrans, srcVgId, &newVg1) != 0) goto _OVER;
maxVgId++;
- if (mndAddAlterVnodeHashRangeAction(pMnode, pTrans, &newVg2, maxVgId) != 0) goto _OVER;
+ srcVgId = newVg2.vgId;
newVg2.vgId = maxVgId;
+ if (mndAddPrepareNewVgAction(pMnode, pTrans, &newVg2) != 0) goto _OVER;
+ if (mndAddAlterVnodeHashRangeAction(pMnode, pTrans, srcVgId, &newVg2) != 0) goto _OVER;
if (mndAddAlterVnodeConfirmAction(pMnode, pTrans, pDb, &newVg1) != 0) goto _OVER;
-
if (mndAddAlterVnodeConfirmAction(pMnode, pTrans, pDb, &newVg2) != 0) goto _OVER;
- // adjust vgroup replica
- if (pDb->cfg.replications != newVg1.replica) {
- if (mndBuildAlterVgroupAction(pMnode, pTrans, pDb, pDb, &newVg1, pArray) != 0) goto _OVER;
- } else {
- if (mndTransCommitVgStatus(pTrans, &newVg1, SDB_STATUS_READY) < 0) goto _OVER;
- }
-
- if (pDb->cfg.replications != newVg2.replica) {
- if (mndBuildAlterVgroupAction(pMnode, pTrans, pDb, pDb, &newVg2, pArray) != 0) goto _OVER;
- } else {
- if (mndTransCommitVgStatus(pTrans, &newVg2, SDB_STATUS_READY) < 0) goto _OVER;
- }
-
- if (mndTransCommitVgStatus(pTrans, pVgroup, SDB_STATUS_DROPPED) < 0) goto _OVER;
+ if (mndAddVgStatusAction(pTrans, &newVg1, SDB_STATUS_READY, TRN_STAGE_REDO_ACTION) < 0) goto _OVER;
+ if (mndAddVgStatusAction(pTrans, &newVg2, SDB_STATUS_READY, TRN_STAGE_REDO_ACTION) < 0) goto _OVER;
+ if (mndAddVgStatusAction(pTrans, pVgroup, SDB_STATUS_DROPPED, TRN_STAGE_REDO_ACTION) < 0) goto _OVER;
+ // update db status
memcpy(&dbObj, pDb, sizeof(SDbObj));
if (dbObj.cfg.pRetensions != NULL) {
dbObj.cfg.pRetensions = taosArrayDup(pDb->cfg.pRetensions, NULL);
@@ -2350,11 +2374,27 @@ int32_t mndSplitVgroup(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SVgObj *pVgro
dbObj.vgVersion++;
dbObj.updateTime = taosGetTimestampMs();
dbObj.cfg.numOfVgroups++;
- pRaw = mndDbActionEncode(&dbObj);
- if (pRaw == NULL) goto _OVER;
- if (mndTransAppendCommitlog(pTrans, pRaw) != 0) goto _OVER;
- (void)sdbSetRawStatus(pRaw, SDB_STATUS_READY);
- pRaw = NULL;
+ if (mndAddDbStatusAction(pTrans, &dbObj, SDB_STATUS_READY, TRN_STAGE_REDO_ACTION) < 0) goto _OVER;
+
+ // adjust vgroup replica
+ if (pDb->cfg.replications != newVg1.replica) {
+ if (mndBuildAlterVgroupAction(pMnode, pTrans, pDb, pDb, &newVg1, pArray) != 0) goto _OVER;
+ } else {
+ if (mndAddVgStatusAction(pTrans, &newVg1, SDB_STATUS_READY, TRN_STAGE_COMMIT_ACTION) < 0) goto _OVER;
+ }
+
+ if (pDb->cfg.replications != newVg2.replica) {
+ if (mndBuildAlterVgroupAction(pMnode, pTrans, pDb, pDb, &newVg2, pArray) != 0) goto _OVER;
+ } else {
+ if (mndAddVgStatusAction(pTrans, &newVg2, SDB_STATUS_READY, TRN_STAGE_COMMIT_ACTION) < 0) goto _OVER;
+ }
+
+ if (mndAddVgStatusAction(pTrans, pVgroup, SDB_STATUS_DROPPED, TRN_STAGE_COMMIT_ACTION) < 0) goto _OVER;
+
+ // commit db status
+ dbObj.vgVersion++;
+ dbObj.updateTime = taosGetTimestampMs();
+ if (mndAddDbStatusAction(pTrans, &dbObj, SDB_STATUS_READY, TRN_STAGE_COMMIT_ACTION) < 0) goto _OVER;
if (mndTransPrepare(pMnode, pTrans) != 0) goto _OVER;
code = 0;
@@ -2362,7 +2402,6 @@ int32_t mndSplitVgroup(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SVgObj *pVgro
_OVER:
taosArrayDestroy(pArray);
mndTransDrop(pTrans);
- sdbFreeRaw(pRaw);
taosArrayDestroy(dbObj.cfg.pRetensions);
return code;
}
diff --git a/source/dnode/mnode/sdb/inc/sdb.h b/source/dnode/mnode/sdb/inc/sdb.h
index e9a9e425e3..3c96d8a2fd 100644
--- a/source/dnode/mnode/sdb/inc/sdb.h
+++ b/source/dnode/mnode/sdb/inc/sdb.h
@@ -122,6 +122,7 @@ typedef enum {
SDB_STATUS_DROPPING = 2,
SDB_STATUS_DROPPED = 3,
SDB_STATUS_READY = 4,
+ SDB_STATUS_UPDATE = 5,
} ESdbStatus;
typedef enum {
diff --git a/source/dnode/mnode/sdb/src/sdbHash.c b/source/dnode/mnode/sdb/src/sdbHash.c
index f1cee6395b..258b22d8ee 100644
--- a/source/dnode/mnode/sdb/src/sdbHash.c
+++ b/source/dnode/mnode/sdb/src/sdbHash.c
@@ -256,6 +256,7 @@ int32_t sdbWriteWithoutFree(SSdb *pSdb, SSdbRaw *pRaw) {
code = sdbInsertRow(pSdb, hash, pRaw, pRow, keySize);
break;
case SDB_STATUS_READY:
+ case SDB_STATUS_UPDATE:
case SDB_STATUS_DROPPING:
code = sdbUpdateRow(pSdb, hash, pRaw, pRow, keySize);
break;
diff --git a/source/dnode/vnode/src/inc/tq.h b/source/dnode/vnode/src/inc/tq.h
index 4ba8d6d69f..b35dc71ed9 100644
--- a/source/dnode/vnode/src/inc/tq.h
+++ b/source/dnode/vnode/src/inc/tq.h
@@ -139,6 +139,7 @@ static STqMgmt tqMgmt = {0};
int32_t tEncodeSTqHandle(SEncoder* pEncoder, const STqHandle* pHandle);
int32_t tDecodeSTqHandle(SDecoder* pDecoder, STqHandle* pHandle);
+void tqDestroyTqHandle(void* data);
// tqRead
int32_t tqScanTaosx(STQ* pTq, const STqHandle* pHandle, STaosxRsp* pRsp, SMqMetaRsp* pMetaRsp, STqOffsetVal* offset);
@@ -161,6 +162,8 @@ int32_t tqMetaRestoreHandle(STQ* pTq);
int32_t tqMetaSaveCheckInfo(STQ* pTq, const char* key, const void* value, int32_t vLen);
int32_t tqMetaDeleteCheckInfo(STQ* pTq, const char* key);
int32_t tqMetaRestoreCheckInfo(STQ* pTq);
+int32_t tqMetaGetHandle(STQ* pTq, const char* key);
+int32_t tqCreateHandle(STQ* pTq, SMqRebVgReq* req, STqHandle* handle);
STqOffsetStore* tqOffsetOpen(STQ* pTq);
void tqOffsetClose(STqOffsetStore*);
diff --git a/source/dnode/vnode/src/meta/metaTable.c b/source/dnode/vnode/src/meta/metaTable.c
index eb169fbdc2..0d0716f2f0 100644
--- a/source/dnode/vnode/src/meta/metaTable.c
+++ b/source/dnode/vnode/src/meta/metaTable.c
@@ -879,9 +879,13 @@ static int32_t metaFilterTableByHash(SMeta *pMeta, SArray *uidList) {
SDecoder dc = {0};
tDecoderInit(&dc, pData, nData);
metaDecodeEntry(&dc, &me);
+
if (me.type != TSDB_SUPER_TABLE) {
- int32_t ret = vnodeValidateTableHash(pMeta->pVnode, me.name);
- if (TSDB_CODE_VND_HASH_MISMATCH == ret) {
+ char tbFName[TSDB_TABLE_FNAME_LEN + 1];
+ snprintf(tbFName, sizeof(tbFName), "%s.%s", pMeta->pVnode->config.dbname, me.name);
+ tbFName[TSDB_TABLE_FNAME_LEN] = '\0';
+ int32_t ret = vnodeValidateTableHash(pMeta->pVnode, tbFName);
+ if (ret < 0 && terrno == TSDB_CODE_VND_HASH_MISMATCH) {
taosArrayPush(uidList, &me.uid);
}
}
@@ -910,6 +914,7 @@ int32_t metaTrimTables(SMeta *pMeta) {
goto end;
}
+ metaInfo("vgId:%d, trim %ld tables", TD_VID(pMeta->pVnode), taosArrayGetSize(tbUids));
metaDropTables(pMeta, tbUids);
end:
diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c
index 5e06daeb14..c1196e505c 100644
--- a/source/dnode/vnode/src/tq/tq.c
+++ b/source/dnode/vnode/src/tq/tq.c
@@ -62,7 +62,7 @@ void tqCleanUp() {
}
}
-static void destroyTqHandle(void* data) {
+void tqDestroyTqHandle(void* data) {
STqHandle* pData = (STqHandle*)data;
qDestroyTask(pData->execHandle.task);
@@ -102,7 +102,7 @@ STQ* tqOpen(const char* path, SVnode* pVnode) {
pTq->walLogLastVer = pVnode->pWal->vers.lastVer;
pTq->pHandle = taosHashInit(64, MurmurHash3_32, true, HASH_ENTRY_LOCK);
- taosHashSetFreeFp(pTq->pHandle, destroyTqHandle);
+ taosHashSetFreeFp(pTq->pHandle, tqDestroyTqHandle);
taosInitRWLatch(&pTq->lock);
pTq->pPushMgr = taosHashInit(64, MurmurHash3_32, false, HASH_NO_LOCK);
@@ -243,8 +243,8 @@ int32_t tqPushDataRsp(STqHandle* pHandle, int32_t vgId) {
tqDoSendDataRsp(&pHandle->msg->info, &dataRsp, pHandle->epoch, pHandle->consumerId, TMQ_MSG_TYPE__POLL_RSP, sver,
ever);
- char buf1[80] = {0};
- char buf2[80] = {0};
+ char buf1[TSDB_OFFSET_LEN] = {0};
+ char buf2[TSDB_OFFSET_LEN] = {0};
tFormatOffset(buf1, tListLen(buf1), &dataRsp.reqOffset);
tFormatOffset(buf2, tListLen(buf2), &dataRsp.rspOffset);
tqDebug("vgId:%d, from consumer:0x%" PRIx64 " (epoch %d) push rsp, block num: %d, req:%s, rsp:%s", vgId,
@@ -259,10 +259,10 @@ int32_t tqSendDataRsp(STqHandle* pHandle, const SRpcMsg* pMsg, const SMqPollReq*
tqDoSendDataRsp(&pMsg->info, pRsp, pReq->epoch, pReq->consumerId, type, sver, ever);
- char buf1[80] = {0};
- char buf2[80] = {0};
- tFormatOffset(buf1, 80, &pRsp->reqOffset);
- tFormatOffset(buf2, 80, &pRsp->rspOffset);
+ char buf1[TSDB_OFFSET_LEN] = {0};
+ char buf2[TSDB_OFFSET_LEN] = {0};
+ tFormatOffset(buf1, TSDB_OFFSET_LEN, &pRsp->reqOffset);
+ tFormatOffset(buf2, TSDB_OFFSET_LEN, &pRsp->rspOffset);
tqDebug("vgId:%d consumer:0x%" PRIx64 " (epoch %d) send rsp, block num:%d, req:%s, rsp:%s, reqId:0x%" PRIx64, vgId,
pReq->consumerId, pReq->epoch, pRsp->blockNum, buf1, buf2, pReq->reqId);
@@ -481,8 +481,8 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
pHandle->epoch = reqEpoch;
}
- char buf[80];
- tFormatOffset(buf, 80, &reqOffset);
+ char buf[TSDB_OFFSET_LEN];
+ tFormatOffset(buf, TSDB_OFFSET_LEN, &reqOffset);
tqDebug("tmq poll: consumer:0x%" PRIx64 " (epoch %d), subkey %s, recv poll req vgId:%d, req:%s, reqId:0x%" PRIx64,
consumerId, req.epoch, pHandle->subKey, vgId, buf, req.reqId);
@@ -559,7 +559,7 @@ int32_t tqProcessVgWalInfoReq(STQ* pTq, SRpcMsg* pMsg) {
} else {
dataRsp.rspOffset.version = currentVer; // return current consume offset value
}
- } else if (reqOffset.type == TMQ_OFFSET__RESET_EARLIEAST) {
+ } else if (reqOffset.type == TMQ_OFFSET__RESET_EARLIEST) {
dataRsp.rspOffset.version = sver; // not consume yet, set the earliest position
} else if (reqOffset.type == TMQ_OFFSET__RESET_LATEST) {
dataRsp.rspOffset.version = ever;
@@ -661,13 +661,17 @@ int32_t tqProcessSubscribeReq(STQ* pTq, int64_t sversion, char* msg, int32_t msg
return -1;
}
- SVnode* pVnode = pTq->pVnode;
- int32_t vgId = TD_VID(pVnode);
-
- tqDebug("vgId:%d, tq process sub req:%s, Id:0x%" PRIx64 " -> Id:0x%" PRIx64, pVnode->config.vgId, req.subKey,
+ tqDebug("vgId:%d, tq process sub req:%s, Id:0x%" PRIx64 " -> Id:0x%" PRIx64, pTq->pVnode->config.vgId, req.subKey,
req.oldConsumerId, req.newConsumerId);
- STqHandle* pHandle = taosHashGet(pTq->pHandle, req.subKey, strlen(req.subKey));
+ STqHandle* pHandle = NULL;
+ while(1){
+ pHandle = taosHashGet(pTq->pHandle, req.subKey, strlen(req.subKey));
+ if (pHandle || tqMetaGetHandle(pTq, req.subKey) < 0){
+ break;
+ }
+ }
+
if (pHandle == NULL) {
if (req.oldConsumerId != -1) {
tqError("vgId:%d, build new consumer handle %s for consumer:0x%" PRIx64 ", but old consumerId:0x%" PRIx64,
@@ -678,86 +682,13 @@ int32_t tqProcessSubscribeReq(STQ* pTq, int64_t sversion, char* msg, int32_t msg
tqError("vgId:%d, tq invalid re-balance request, new consumerId %" PRId64 "", req.vgId, req.newConsumerId);
goto end;
}
-
- STqHandle tqHandle = {0};
- pHandle = &tqHandle;
-
- memcpy(pHandle->subKey, req.subKey, TSDB_SUBSCRIBE_KEY_LEN);
- pHandle->consumerId = req.newConsumerId;
- pHandle->epoch = -1;
-
- pHandle->execHandle.subType = req.subType;
- pHandle->fetchMeta = req.withMeta;
-
- // TODO version should be assigned and refed during preprocess
- SWalRef* pRef = walRefCommittedVer(pVnode->pWal);
- if (pRef == NULL) {
- ret = -1;
+ STqHandle handle = {0};
+ ret = tqCreateHandle(pTq, &req, &handle);
+ if(ret < 0){
+ tqDestroyTqHandle(&handle);
goto end;
}
-
- int64_t ver = pRef->refVer;
- pHandle->pRef = pRef;
-
- SReadHandle handle = {.vnode = pVnode, .initTableReader = true, .initTqReader = true, .version = ver};
- initStorageAPI(&handle.api);
-
- pHandle->snapshotVer = ver;
-
- if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
- pHandle->execHandle.execCol.qmsg = taosStrdup(req.qmsg);
-
- pHandle->execHandle.task = qCreateQueueExecTaskInfo(pHandle->execHandle.execCol.qmsg, &handle, vgId,
- &pHandle->execHandle.numOfCols, req.newConsumerId);
- void* scanner = NULL;
- qExtractStreamScanner(pHandle->execHandle.task, &scanner);
- pHandle->execHandle.pTqReader = qExtractReaderFromStreamScanner(scanner);
- } else if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__DB) {
- pHandle->pWalReader = walOpenReader(pVnode->pWal, NULL);
- pHandle->execHandle.pTqReader = tqReaderOpen(pVnode);
-
- pHandle->execHandle.execDb.pFilterOutTbUid =
- taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_ENTRY_LOCK);
- buildSnapContext(handle.vnode, handle.version, 0, pHandle->execHandle.subType, pHandle->fetchMeta,
- (SSnapContext**)(&handle.sContext));
-
- pHandle->execHandle.task = qCreateQueueExecTaskInfo(NULL, &handle, vgId, NULL, req.newConsumerId);
- } else if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__TABLE) {
- pHandle->pWalReader = walOpenReader(pVnode->pWal, NULL);
- pHandle->execHandle.execTb.suid = req.suid;
- pHandle->execHandle.execTb.qmsg = taosStrdup(req.qmsg);
-
- if (strcmp(pHandle->execHandle.execTb.qmsg, "") != 0) {
- if (nodesStringToNode(pHandle->execHandle.execTb.qmsg, &pHandle->execHandle.execTb.node) != 0) {
- tqError("nodesStringToNode error in sub stable, since %s, vgId:%d, subkey:%s consumer:0x%" PRIx64, terrstr(),
- pVnode->config.vgId, req.subKey, pHandle->consumerId);
- return -1;
- }
- }
-
- buildSnapContext(handle.vnode, handle.version, req.suid, pHandle->execHandle.subType, pHandle->fetchMeta,
- (SSnapContext**)(&handle.sContext));
- pHandle->execHandle.task = qCreateQueueExecTaskInfo(NULL, &handle, vgId, NULL, req.newConsumerId);
-
- SArray* tbUidList = NULL;
- ret = qGetTableList(req.suid, pVnode, pHandle->execHandle.execTb.node, &tbUidList, pHandle->execHandle.task);
- if (ret != TDB_CODE_SUCCESS) {
- tqError("qGetTableList error:%d vgId:%d, subkey:%s consumer:0x%" PRIx64, ret, pVnode->config.vgId, req.subKey,
- pHandle->consumerId);
- taosArrayDestroy(tbUidList);
- goto end;
- }
- tqDebug("tq try to get ctb for stb subscribe, vgId:%d, subkey:%s consumer:0x%" PRIx64 " suid:%" PRId64,
- pVnode->config.vgId, req.subKey, pHandle->consumerId, req.suid);
- pHandle->execHandle.pTqReader = tqReaderOpen(pVnode);
- tqReaderSetTbUidList(pHandle->execHandle.pTqReader, tbUidList, NULL);
- taosArrayDestroy(tbUidList);
- }
-
- taosHashPut(pTq->pHandle, req.subKey, strlen(req.subKey), pHandle, sizeof(STqHandle));
- tqDebug("try to persist handle %s consumer:0x%" PRIx64, req.subKey, pHandle->consumerId);
- ret = tqMetaSaveHandle(pTq, req.subKey, pHandle);
- goto end;
+ ret = tqMetaSaveHandle(pTq, req.subKey, &handle);
} else {
taosWLockLatch(&pTq->lock);
diff --git a/source/dnode/vnode/src/tq/tqMeta.c b/source/dnode/vnode/src/tq/tqMeta.c
index ba6d7cb501..3b0e6749c2 100644
--- a/source/dnode/vnode/src/tq/tqMeta.c
+++ b/source/dnode/vnode/src/tq/tqMeta.c
@@ -88,9 +88,9 @@ int32_t tqMetaOpen(STQ* pTq) {
return -1;
}
- if (tqMetaRestoreHandle(pTq) < 0) {
- return -1;
- }
+// if (tqMetaRestoreHandle(pTq) < 0) {
+// return -1;
+// }
if (tqMetaRestoreCheckInfo(pTq) < 0) {
return -1;
@@ -274,6 +274,120 @@ int32_t tqMetaDeleteHandle(STQ* pTq, const char* key) {
return 0;
}
+static int buildHandle(STQ* pTq, STqHandle* handle){
+ SVnode* pVnode = pTq->pVnode;
+ int32_t vgId = TD_VID(pVnode);
+
+ handle->pRef = walOpenRef(pVnode->pWal);
+ if (handle->pRef == NULL) {
+ return -1;
+ }
+ walSetRefVer(handle->pRef, handle->snapshotVer);
+
+ SReadHandle reader = {
+ .vnode = pVnode,
+ .initTableReader = true,
+ .initTqReader = true,
+ .version = handle->snapshotVer,
+ };
+
+ initStorageAPI(&reader.api);
+
+ if (handle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
+ handle->execHandle.task =
+ qCreateQueueExecTaskInfo(handle->execHandle.execCol.qmsg, &reader, vgId, &handle->execHandle.numOfCols, handle->consumerId);
+ if (handle->execHandle.task == NULL) {
+ tqError("cannot create exec task for %s", handle->subKey);
+ return -1;
+ }
+ void* scanner = NULL;
+ qExtractStreamScanner(handle->execHandle.task, &scanner);
+ if (scanner == NULL) {
+ tqError("cannot extract stream scanner for %s", handle->subKey);
+ return -1;
+ }
+ handle->execHandle.pTqReader = qExtractReaderFromStreamScanner(scanner);
+ if (handle->execHandle.pTqReader == NULL) {
+ tqError("cannot extract exec reader for %s", handle->subKey);
+ return -1;
+ }
+ } else if (handle->execHandle.subType == TOPIC_SUB_TYPE__DB) {
+ handle->pWalReader = walOpenReader(pVnode->pWal, NULL);
+ handle->execHandle.pTqReader = tqReaderOpen(pVnode);
+
+ buildSnapContext(reader.vnode, reader.version, 0, handle->execHandle.subType, handle->fetchMeta,
+ (SSnapContext**)(&reader.sContext));
+ handle->execHandle.task = qCreateQueueExecTaskInfo(NULL, &reader, vgId, NULL, handle->consumerId);
+ } else if (handle->execHandle.subType == TOPIC_SUB_TYPE__TABLE) {
+ handle->pWalReader = walOpenReader(pVnode->pWal, NULL);
+
+ if(handle->execHandle.execTb.qmsg != NULL && strcmp(handle->execHandle.execTb.qmsg, "") != 0) {
+ if (nodesStringToNode(handle->execHandle.execTb.qmsg, &handle->execHandle.execTb.node) != 0) {
+ tqError("nodesStringToNode error in sub stable, since %s", terrstr());
+ return -1;
+ }
+ }
+ buildSnapContext(reader.vnode, reader.version, handle->execHandle.execTb.suid, handle->execHandle.subType,
+ handle->fetchMeta, (SSnapContext**)(&reader.sContext));
+ handle->execHandle.task = qCreateQueueExecTaskInfo(NULL, &reader, vgId, NULL, handle->consumerId);
+
+ SArray* tbUidList = NULL;
+ int ret = qGetTableList(handle->execHandle.execTb.suid, pVnode, handle->execHandle.execTb.node, &tbUidList, handle->execHandle.task);
+ if(ret != TDB_CODE_SUCCESS) {
+ tqError("qGetTableList error:%d handle %s consumer:0x%" PRIx64, ret, handle->subKey, handle->consumerId);
+ taosArrayDestroy(tbUidList);
+ return -1;
+ }
+ tqDebug("vgId:%d, tq try to get ctb for stb subscribe, suid:%" PRId64, pVnode->config.vgId, handle->execHandle.execTb.suid);
+ handle->execHandle.pTqReader = tqReaderOpen(pVnode);
+ tqReaderSetTbUidList(handle->execHandle.pTqReader, tbUidList, NULL);
+ taosArrayDestroy(tbUidList);
+ }
+ return 0;
+}
+
+static int restoreHandle(STQ* pTq, void* pVal, int vLen, STqHandle* handle){
+ int32_t vgId = TD_VID(pTq->pVnode);
+ SDecoder decoder;
+ tDecoderInit(&decoder, (uint8_t*)pVal, vLen);
+ tDecodeSTqHandle(&decoder, handle);
+ tDecoderClear(&decoder);
+
+ if(buildHandle(pTq, handle) < 0){
+ return -1;
+ }
+ tqDebug("tq restore %s consumer %" PRId64 " vgId:%d", handle->subKey, handle->consumerId, vgId);
+ return taosHashPut(pTq->pHandle, handle->subKey, strlen(handle->subKey), handle, sizeof(STqHandle));
+}
+
+int32_t tqCreateHandle(STQ* pTq, SMqRebVgReq* req, STqHandle* handle){
+ int32_t vgId = TD_VID(pTq->pVnode);
+
+ memcpy(handle->subKey, req->subKey, TSDB_SUBSCRIBE_KEY_LEN);
+ handle->consumerId = req->newConsumerId;
+ handle->epoch = -1;
+
+ handle->execHandle.subType = req->subType;
+ handle->fetchMeta = req->withMeta;
+ if(req->subType == TOPIC_SUB_TYPE__COLUMN){
+ handle->execHandle.execCol.qmsg = taosStrdup(req->qmsg);
+ }else if(req->subType == TOPIC_SUB_TYPE__DB){
+ handle->execHandle.execDb.pFilterOutTbUid =
+ taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_ENTRY_LOCK);
+ }else if(req->subType == TOPIC_SUB_TYPE__TABLE){
+ handle->execHandle.execTb.suid = req->suid;
+ handle->execHandle.execTb.qmsg = taosStrdup(req->qmsg);
+ }
+
+ handle->snapshotVer = walGetLastVer(pTq->pVnode->pWal);
+
+ if(buildHandle(pTq, handle) < 0){
+ return -1;
+ }
+ tqDebug("tq restore %s consumer %" PRId64 " vgId:%d", handle->subKey, handle->consumerId, vgId);
+ return taosHashPut(pTq->pHandle, handle->subKey, strlen(handle->subKey), handle, sizeof(STqHandle));
+}
+
int32_t tqMetaRestoreHandle(STQ* pTq) {
int code = 0;
TBC* pCur = NULL;
@@ -281,97 +395,40 @@ int32_t tqMetaRestoreHandle(STQ* pTq) {
return -1;
}
- int32_t vgId = TD_VID(pTq->pVnode);
void* pKey = NULL;
int kLen = 0;
void* pVal = NULL;
int vLen = 0;
- SDecoder decoder;
tdbTbcMoveToFirst(pCur);
while (tdbTbcNext(pCur, &pKey, &kLen, &pVal, &vLen) == 0) {
STqHandle handle = {0};
- tDecoderInit(&decoder, (uint8_t*)pVal, vLen);
- tDecodeSTqHandle(&decoder, &handle);
- tDecoderClear(&decoder);
-
- handle.pRef = walOpenRef(pTq->pVnode->pWal);
- if (handle.pRef == NULL) {
- code = -1;
- goto end;
+ code = restoreHandle(pTq, pVal, vLen, &handle);
+ if (code < 0){
+ tqDestroyTqHandle(&handle);
+ break;
}
- walSetRefVer(handle.pRef, handle.snapshotVer);
-
- SReadHandle reader = {
- .vnode = pTq->pVnode,
- .initTableReader = true,
- .initTqReader = true,
- .version = handle.snapshotVer
- };
-
- initStorageAPI(&reader.api);
-
- if (handle.execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
- handle.execHandle.task =
- qCreateQueueExecTaskInfo(handle.execHandle.execCol.qmsg, &reader, vgId, &handle.execHandle.numOfCols, 0);
- if (handle.execHandle.task == NULL) {
- tqError("cannot create exec task for %s", handle.subKey);
- code = -1;
- goto end;
- }
- void* scanner = NULL;
- qExtractStreamScanner(handle.execHandle.task, &scanner);
- if (scanner == NULL) {
- tqError("cannot extract stream scanner for %s", handle.subKey);
- code = -1;
- goto end;
- }
- handle.execHandle.pTqReader = qExtractReaderFromStreamScanner(scanner);
- if (handle.execHandle.pTqReader == NULL) {
- tqError("cannot extract exec reader for %s", handle.subKey);
- code = -1;
- goto end;
- }
- } else if (handle.execHandle.subType == TOPIC_SUB_TYPE__DB) {
- handle.pWalReader = walOpenReader(pTq->pVnode->pWal, NULL);
- handle.execHandle.pTqReader = tqReaderOpen(pTq->pVnode);
-
- buildSnapContext(reader.vnode, reader.version, 0, handle.execHandle.subType, handle.fetchMeta,
- (SSnapContext**)(&reader.sContext));
- handle.execHandle.task = qCreateQueueExecTaskInfo(NULL, &reader, vgId, NULL, 0);
- } else if (handle.execHandle.subType == TOPIC_SUB_TYPE__TABLE) {
- handle.pWalReader = walOpenReader(pTq->pVnode->pWal, NULL);
-
- if(handle.execHandle.execTb.qmsg != NULL && strcmp(handle.execHandle.execTb.qmsg, "") != 0) {
- if (nodesStringToNode(handle.execHandle.execTb.qmsg, &handle.execHandle.execTb.node) != 0) {
- tqError("nodesStringToNode error in sub stable, since %s", terrstr());
- return -1;
- }
- }
- buildSnapContext(reader.vnode, reader.version, handle.execHandle.execTb.suid, handle.execHandle.subType,
- handle.fetchMeta, (SSnapContext**)(&reader.sContext));
- handle.execHandle.task = qCreateQueueExecTaskInfo(NULL, &reader, vgId, NULL, 0);
-
- SArray* tbUidList = NULL;
- int ret = qGetTableList(handle.execHandle.execTb.suid, pTq->pVnode, handle.execHandle.execTb.node, &tbUidList, handle.execHandle.task);
- if(ret != TDB_CODE_SUCCESS) {
- tqError("qGetTableList error:%d handle %s consumer:0x%" PRIx64, ret, handle.subKey, handle.consumerId);
- taosArrayDestroy(tbUidList);
- goto end;
- }
- tqDebug("vgId:%d, tq try to get ctb for stb subscribe, suid:%" PRId64, pTq->pVnode->config.vgId, handle.execHandle.execTb.suid);
- handle.execHandle.pTqReader = tqReaderOpen(pTq->pVnode);
- tqReaderSetTbUidList(handle.execHandle.pTqReader, tbUidList, NULL);
- taosArrayDestroy(tbUidList);
- }
- tqDebug("tq restore %s consumer %" PRId64 " vgId:%d", handle.subKey, handle.consumerId, vgId);
- taosHashPut(pTq->pHandle, pKey, kLen, &handle, sizeof(STqHandle));
}
-end:
tdbFree(pKey);
tdbFree(pVal);
tdbTbcClose(pCur);
return code;
}
+
+int32_t tqMetaGetHandle(STQ* pTq, const char* key) {
+ void* pVal = NULL;
+ int vLen = 0;
+
+ if (tdbTbGet(pTq->pExecStore, key, (int)strlen(key), &pVal, &vLen) < 0) {
+ return -1;
+ }
+ STqHandle handle = {0};
+ int code = restoreHandle(pTq, pVal, vLen, &handle);
+ if (code < 0){
+ tqDestroyTqHandle(&handle);
+ }
+ tdbFree(pVal);
+ return code;
+}
diff --git a/source/dnode/vnode/src/tq/tqUtil.c b/source/dnode/vnode/src/tq/tqUtil.c
index a34e765e50..8607fd754e 100644
--- a/source/dnode/vnode/src/tq/tqUtil.c
+++ b/source/dnode/vnode/src/tq/tqUtil.c
@@ -99,15 +99,15 @@ static int32_t extractResetOffsetVal(STqOffsetVal* pOffsetVal, STQ* pTq, STqHand
if (pOffset != NULL) {
*pOffsetVal = pOffset->val;
- char formatBuf[80];
- tFormatOffset(formatBuf, 80, pOffsetVal);
+ char formatBuf[TSDB_OFFSET_LEN];
+ tFormatOffset(formatBuf, TSDB_OFFSET_LEN, pOffsetVal);
tqDebug("tmq poll: consumer:0x%" PRIx64
", subkey %s, vgId:%d, existed offset found, offset reset to %s and continue. reqId:0x%" PRIx64,
consumerId, pHandle->subKey, vgId, formatBuf, pRequest->reqId);
return 0;
} else {
// no poll occurs in this vnode for this topic, let's seek to the right offset value.
- if (reqOffset.type == TMQ_OFFSET__RESET_EARLIEAST) {
+ if (reqOffset.type == TMQ_OFFSET__RESET_EARLIEST) {
if (pRequest->useSnapshot) {
tqDebug("tmq poll: consumer:0x%" PRIx64 ", subkey:%s, vgId:%d, (earliest) set offset to be snapshot",
consumerId, pHandle->subKey, vgId);
@@ -186,8 +186,8 @@ static int32_t extractDataAndRspForNormalSubscribe(STQ* pTq, STqHandle* pHandle,
code = tqSendDataRsp(pHandle, pMsg, pRequest, (SMqDataRsp*)&dataRsp, TMQ_MSG_TYPE__POLL_RSP, vgId);
end : {
- char buf[80] = {0};
- tFormatOffset(buf, 80, &dataRsp.rspOffset);
+ char buf[TSDB_OFFSET_LEN] = {0};
+ tFormatOffset(buf, TSDB_OFFSET_LEN, &dataRsp.rspOffset);
tqDebug("tmq poll: consumer:0x%" PRIx64 ", subkey %s, vgId:%d, rsp block:%d, rsp offset type:%s, reqId:0x%" PRIx64
" code:%d",
consumerId, pHandle->subKey, vgId, dataRsp.blockNum, buf, pRequest->reqId, code);
diff --git a/source/dnode/vnode/src/vnd/vnodeCfg.c b/source/dnode/vnode/src/vnd/vnodeCfg.c
index faa4d2fc57..efe82e1783 100644
--- a/source/dnode/vnode/src/vnd/vnodeCfg.c
+++ b/source/dnode/vnode/src/vnd/vnodeCfg.c
@@ -325,7 +325,7 @@ int vnodeValidateTableHash(SVnode *pVnode, char *tableFName) {
if (hashValue < pVnode->config.hashBegin || hashValue > pVnode->config.hashEnd) {
terrno = TSDB_CODE_VND_HASH_MISMATCH;
- return TSDB_CODE_VND_HASH_MISMATCH;
+ return -1;
}
return 0;
diff --git a/source/dnode/vnode/src/vnd/vnodeSync.c b/source/dnode/vnode/src/vnd/vnodeSync.c
index 29f1ddc50f..ff551e6534 100644
--- a/source/dnode/vnode/src/vnd/vnodeSync.c
+++ b/source/dnode/vnode/src/vnd/vnodeSync.c
@@ -431,7 +431,7 @@ static int32_t vnodeSyncApplyMsg(const SSyncFSM *pFsm, SRpcMsg *pMsg, const SFsm
return tmsgPutToQueue(&pVnode->msgCb, APPLY_QUEUE, pMsg);
}
-static int32_t vnodeSyncCommitMsg(const SSyncFSM *pFsm, SRpcMsg *pMsg, const SFsmCbMeta *pMeta) {
+static int32_t vnodeSyncCommitMsg(const SSyncFSM *pFsm, SRpcMsg *pMsg, SFsmCbMeta *pMeta) {
if (pMsg->code == 0) {
return vnodeSyncApplyMsg(pFsm, pMsg, pMeta);
}
@@ -451,7 +451,7 @@ static int32_t vnodeSyncCommitMsg(const SSyncFSM *pFsm, SRpcMsg *pMsg, const SFs
return 0;
}
-static int32_t vnodeSyncPreCommitMsg(const SSyncFSM *pFsm, SRpcMsg *pMsg, const SFsmCbMeta *pMeta) {
+static int32_t vnodeSyncPreCommitMsg(const SSyncFSM *pFsm, SRpcMsg *pMsg, SFsmCbMeta *pMeta) {
if (pMeta->isWeak == 1) {
return vnodeSyncApplyMsg(pFsm, pMsg, pMeta);
}
@@ -463,7 +463,7 @@ static SyncIndex vnodeSyncAppliedIndex(const SSyncFSM *pFSM) {
return atomic_load_64(&pVnode->state.applied);
}
-static void vnodeSyncRollBackMsg(const SSyncFSM *pFsm, SRpcMsg *pMsg, const SFsmCbMeta *pMeta) {
+static void vnodeSyncRollBackMsg(const SSyncFSM *pFsm, SRpcMsg *pMsg, SFsmCbMeta *pMeta) {
SVnode *pVnode = pFsm->data;
vTrace("vgId:%d, rollback-cb is excuted, fsm:%p, index:%" PRId64 ", weak:%d, code:%d, state:%d %s, type:%s",
pVnode->config.vgId, pFsm, pMeta->index, pMeta->isWeak, pMeta->code, pMeta->state, syncStr(pMeta->state),
diff --git a/source/libs/function/src/functionMgt.c b/source/libs/function/src/functionMgt.c
index 18f6e8050b..327bc7da71 100644
--- a/source/libs/function/src/functionMgt.c
+++ b/source/libs/function/src/functionMgt.c
@@ -344,7 +344,7 @@ static int32_t getFuncInfo(SFunctionNode* pFunc) {
return fmGetFuncInfo(pFunc, msg, sizeof(msg));
}
-static SFunctionNode* createFunction(const char* pName, SNodeList* pParameterList) {
+SFunctionNode* createFunction(const char* pName, SNodeList* pParameterList) {
SFunctionNode* pFunc = (SFunctionNode*)nodesMakeNode(QUERY_NODE_FUNCTION);
if (NULL == pFunc) {
return NULL;
diff --git a/source/libs/nodes/src/nodesUtilFuncs.c b/source/libs/nodes/src/nodesUtilFuncs.c
index 39e288f694..ee2bc7e442 100644
--- a/source/libs/nodes/src/nodesUtilFuncs.c
+++ b/source/libs/nodes/src/nodesUtilFuncs.c
@@ -953,6 +953,8 @@ void nodesDestroyNode(SNode* pNode) {
nodesDestroyNode(pStmt->pQuery);
nodesDestroyList(pStmt->pTags);
nodesDestroyNode(pStmt->pSubtable);
+ tFreeSCMCreateStreamReq(pStmt->pReq);
+ taosMemoryFreeClear(pStmt->pReq);
break;
}
case QUERY_NODE_DROP_STREAM_STMT: // no pointer field
@@ -1052,6 +1054,7 @@ void nodesDestroyNode(SNode* pNode) {
case QUERY_NODE_QUERY: {
SQuery* pQuery = (SQuery*)pNode;
nodesDestroyNode(pQuery->pRoot);
+ nodesDestroyNode(pQuery->pPostRoot);
taosMemoryFreeClear(pQuery->pResSchema);
if (NULL != pQuery->pCmdMsg) {
taosMemoryFreeClear(pQuery->pCmdMsg->pMsg);
diff --git a/source/libs/parser/inc/parInt.h b/source/libs/parser/inc/parInt.h
index 66aec272d7..d79aa84bb8 100644
--- a/source/libs/parser/inc/parInt.h
+++ b/source/libs/parser/inc/parInt.h
@@ -34,6 +34,7 @@ int32_t authenticate(SParseContext* pParseCxt, SQuery* pQuery, SParseMetaCache*
int32_t translate(SParseContext* pParseCxt, SQuery* pQuery, SParseMetaCache* pMetaCache);
int32_t extractResultSchema(const SNode* pRoot, int32_t* numOfCols, SSchema** pSchema);
int32_t calculateConstant(SParseContext* pParseCxt, SQuery* pQuery);
+int32_t translatePostCreateStream(SParseContext* pParseCxt, SQuery* pQuery, void** pResRow);
#ifdef __cplusplus
}
diff --git a/source/libs/parser/src/parAstParser.c b/source/libs/parser/src/parAstParser.c
index 801d43e2a4..fdec9cba79 100644
--- a/source/libs/parser/src/parAstParser.c
+++ b/source/libs/parser/src/parAstParser.c
@@ -384,6 +384,10 @@ static int32_t collectMetaKeyFromCreateStream(SCollectMetaKeyCxt* pCxt, SCreateS
if (TSDB_CODE_SUCCESS == code) {
code = collectMetaKeyFromQuery(pCxt, pStmt->pQuery);
}
+ if (TSDB_CODE_SUCCESS == code && pStmt->pOptions->fillHistory) {
+ SSelectStmt* pSelect = (SSelectStmt*)pStmt->pQuery;
+ code = reserveDbCfgForLastRow(pCxt, pSelect->pFromTable);
+ }
return code;
}
diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c
index 5890ba15f4..43c4a53896 100644
--- a/source/libs/parser/src/parTranslater.c
+++ b/source/libs/parser/src/parTranslater.c
@@ -53,6 +53,8 @@ typedef struct STranslateContext {
bool createStream;
bool stableQuery;
bool showRewrite;
+ SNode* pPrevRoot;
+ SNode* pPostRoot;
} STranslateContext;
typedef struct SBuildTopicContext {
@@ -276,6 +278,10 @@ static const SSysTableShowAdapter sysTableShowAdapter[] = {
static int32_t translateSubquery(STranslateContext* pCxt, SNode* pNode);
static int32_t translateQuery(STranslateContext* pCxt, SNode* pNode);
static EDealRes translateValue(STranslateContext* pCxt, SValueNode* pVal);
+static int32_t createSimpleSelectStmtFromProjList(const char* pDb, const char* pTable, SNodeList* pProjectionList, SSelectStmt** pStmt);
+static int32_t createLastTsSelectStmt(char* pDb, char* pTable, STableMeta* pMeta, SNode** pQuery);
+static int32_t setQuery(STranslateContext* pCxt, SQuery* pQuery);
+static int32_t setRefreshMate(STranslateContext* pCxt, SQuery* pQuery);
static bool afterGroupBy(ESqlClause clause) { return clause > SQL_CLAUSE_GROUP_BY; }
@@ -6763,6 +6769,54 @@ static int32_t translateStreamTargetTable(STranslateContext* pCxt, SCreateStream
return code;
}
+static int32_t createLastTsSelectStmt(char* pDb, char* pTable, STableMeta* pMeta, SNode** pQuery) {
+ SColumnNode* col = (SColumnNode*)nodesMakeNode(QUERY_NODE_COLUMN);
+ if (NULL == col) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+
+ strcpy(col->tableAlias, pTable);
+ strcpy(col->colName, pMeta->schema[0].name);
+ SNodeList* pParamterList = nodesMakeList();
+ if (NULL == pParamterList) {
+ nodesDestroyNode((SNode *)col);
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+
+ int32_t code = nodesListStrictAppend(pParamterList, (SNode *)col);
+ if (code) {
+ nodesDestroyNode((SNode *)col);
+ nodesDestroyList(pParamterList);
+ return code;
+ }
+
+ SNode* pFunc = (SNode*)createFunction("last", pParamterList);
+ if (NULL == pFunc) {
+ nodesDestroyList(pParamterList);
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+
+ SNodeList* pProjectionList = nodesMakeList();
+ if (NULL == pProjectionList) {
+ nodesDestroyList(pParamterList);
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ code = nodesListStrictAppend(pProjectionList, pFunc);
+ if (code) {
+ nodesDestroyNode(pFunc);
+ nodesDestroyList(pProjectionList);
+ return code;
+ }
+
+ code = createSimpleSelectStmtFromProjList(pDb, pTable, pProjectionList, (SSelectStmt **)pQuery);
+ if (code) {
+ nodesDestroyList(pProjectionList);
+ return code;
+ }
+
+ return code;
+}
+
static int32_t buildCreateStreamQuery(STranslateContext* pCxt, SCreateStreamStmt* pStmt, SCMCreateStreamReq* pReq) {
pCxt->createStream = true;
STableMeta* pMeta = NULL;
@@ -6789,6 +6843,18 @@ static int32_t buildCreateStreamQuery(STranslateContext* pCxt, SCreateStreamStmt
getSourceDatabase(pStmt->pQuery, pCxt->pParseCxt->acctId, pReq->sourceDB);
code = nodesNodeToString(pStmt->pQuery, false, &pReq->ast, NULL);
}
+ if (TSDB_CODE_SUCCESS == code && pStmt->pOptions->fillHistory) {
+ SRealTableNode* pTable = (SRealTableNode*)(((SSelectStmt*)pStmt->pQuery)->pFromTable);
+ code = createLastTsSelectStmt(pTable->table.dbName, pTable->table.tableName, pTable->pMeta, &pStmt->pPrevQuery);
+/*
+ if (TSDB_CODE_SUCCESS == code) {
+ STranslateContext cxt = {0};
+ int32_t code = initTranslateContext(pCxt->pParseCxt, pCxt->pMetaCache, &cxt);
+ code = translateQuery(&cxt, pStmt->pPrevQuery);
+ destroyTranslateContext(&cxt);
+ }
+*/
+ }
taosMemoryFree(pMeta);
return code;
}
@@ -6855,13 +6921,86 @@ static int32_t translateCreateStream(STranslateContext* pCxt, SCreateStreamStmt*
code = buildCreateStreamReq(pCxt, pStmt, &createReq);
}
if (TSDB_CODE_SUCCESS == code) {
- code = buildCmdMsg(pCxt, TDMT_MND_CREATE_STREAM, (FSerializeFunc)tSerializeSCMCreateStreamReq, &createReq);
+ if (NULL == pStmt->pPrevQuery) {
+ code = buildCmdMsg(pCxt, TDMT_MND_CREATE_STREAM, (FSerializeFunc)tSerializeSCMCreateStreamReq, &createReq);
+ } else {
+ pStmt->pReq = taosMemoryMalloc(sizeof(createReq));
+ if (NULL == pStmt->pReq) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ } else {
+ memcpy(pStmt->pReq, &createReq, sizeof(createReq));
+ memset(&createReq, 0, sizeof(createReq));
+ TSWAP(pCxt->pPrevRoot, pStmt->pPrevQuery);
+ }
+ }
}
tFreeSCMCreateStreamReq(&createReq);
return code;
}
+int32_t buildIntervalForCreateStream(SCreateStreamStmt* pStmt, SInterval* pInterval) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ if (QUERY_NODE_SELECT_STMT != nodeType(pStmt->pQuery)) {
+ return code;
+ }
+ SSelectStmt* pSelect = (SSelectStmt*)pStmt->pQuery;
+ if (NULL == pSelect->pWindow || QUERY_NODE_INTERVAL_WINDOW != nodeType(pSelect->pWindow)) {
+ return code;
+ }
+
+ SIntervalWindowNode* pWindow = (SIntervalWindowNode*)pSelect->pWindow;
+ pInterval->interval = ((SValueNode*)pWindow->pInterval)->datum.i;
+ pInterval->intervalUnit = ((SValueNode*)pWindow->pInterval)->unit;
+ pInterval->offset = (NULL != pWindow->pOffset ? ((SValueNode*)pWindow->pOffset)->datum.i : 0);
+ pInterval->sliding = (NULL != pWindow->pSliding ? ((SValueNode*)pWindow->pSliding)->datum.i : pInterval->interval);
+ pInterval->slidingUnit =
+ (NULL != pWindow->pSliding ? ((SValueNode*)pWindow->pSliding)->unit : pInterval->intervalUnit);
+ pInterval->precision = ((SColumnNode*)pWindow->pCol)->node.resType.precision;
+
+ return code;
+}
+
+int32_t translatePostCreateStream(SParseContext* pParseCxt, SQuery* pQuery, void** pResRow) {
+ SCreateStreamStmt* pStmt = (SCreateStreamStmt*)pQuery->pRoot;
+ STranslateContext cxt = {0};
+ SInterval interval = {0};
+ int64_t lastTs = 0;
+
+ int32_t code = initTranslateContext(pParseCxt, NULL, &cxt);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = buildIntervalForCreateStream(pStmt, &interval);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ if (pResRow && pResRow[0]) {
+ lastTs = *(int64_t*)pResRow[0];
+ } else if (interval.interval > 0) {
+ lastTs = convertTimePrecision(taosGetTimestampMs(), TSDB_TIME_PRECISION_MILLI, interval.precision);
+ } else {
+ lastTs = taosGetTimestampMs();
+ }
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ if (interval.interval > 0) {
+ pStmt->pReq->lastTs = taosTimeTruncate(lastTs, &interval);
+ } else {
+ pStmt->pReq->lastTs = lastTs;
+ }
+ code = buildCmdMsg(&cxt, TDMT_MND_CREATE_STREAM, (FSerializeFunc)tSerializeSCMCreateStreamReq, pStmt->pReq);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = setQuery(&cxt, pQuery);
+ }
+ setRefreshMate(&cxt, pQuery);
+ destroyTranslateContext(&cxt);
+
+ tFreeSCMCreateStreamReq(pStmt->pReq);
+ taosMemoryFreeClear(pStmt->pReq);
+
+ return code;
+}
+
+
static int32_t translateDropStream(STranslateContext* pCxt, SDropStreamStmt* pStmt) {
SMDropStreamReq dropReq = {0};
SName name;
@@ -7542,8 +7681,7 @@ static SNodeList* createProjectCols(int32_t ncols, const char* const pCols[]) {
return pProjections;
}
-static int32_t createSimpleSelectStmt(const char* pDb, const char* pTable, int32_t numOfProjs,
- const char* const pProjCol[], SSelectStmt** pStmt) {
+static int32_t createSimpleSelectStmtImpl(const char* pDb, const char* pTable, SNodeList* pProjectionList, SSelectStmt** pStmt) {
SSelectStmt* pSelect = (SSelectStmt*)nodesMakeNode(QUERY_NODE_SELECT_STMT);
if (NULL == pSelect) {
return TSDB_CODE_OUT_OF_MEMORY;
@@ -7559,27 +7697,38 @@ static int32_t createSimpleSelectStmt(const char* pDb, const char* pTable, int32
snprintf(pRealTable->table.tableName, sizeof(pRealTable->table.tableName), "%s", pTable);
snprintf(pRealTable->table.tableAlias, sizeof(pRealTable->table.tableAlias), "%s", pTable);
pSelect->pFromTable = (SNode*)pRealTable;
-
- if (numOfProjs >= 0) {
- pSelect->pProjectionList = createProjectCols(numOfProjs, pProjCol);
- if (NULL == pSelect->pProjectionList) {
- nodesDestroyNode((SNode*)pSelect);
- return TSDB_CODE_OUT_OF_MEMORY;
- }
- }
+ pSelect->pProjectionList = pProjectionList;
*pStmt = pSelect;
return TSDB_CODE_SUCCESS;
}
+
+static int32_t createSimpleSelectStmtFromCols(const char* pDb, const char* pTable, int32_t numOfProjs,
+ const char* const pProjCol[], SSelectStmt** pStmt) {
+ SNodeList* pProjectionList = NULL;
+ if (numOfProjs >= 0) {
+ pProjectionList = createProjectCols(numOfProjs, pProjCol);
+ if (NULL == pProjectionList) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ }
+
+ return createSimpleSelectStmtImpl(pDb, pTable, pProjectionList, pStmt);
+}
+
+static int32_t createSimpleSelectStmtFromProjList(const char* pDb, const char* pTable, SNodeList* pProjectionList, SSelectStmt** pStmt) {
+ return createSimpleSelectStmtImpl(pDb, pTable, pProjectionList, pStmt);
+}
+
static int32_t createSelectStmtForShow(ENodeType showType, SSelectStmt** pStmt) {
const SSysTableShowAdapter* pShow = &sysTableShowAdapter[showType - SYSTABLE_SHOW_TYPE_OFFSET];
- return createSimpleSelectStmt(pShow->pDbName, pShow->pTableName, pShow->numOfShowCols, pShow->pShowCols, pStmt);
+ return createSimpleSelectStmtFromCols(pShow->pDbName, pShow->pTableName, pShow->numOfShowCols, pShow->pShowCols, pStmt);
}
static int32_t createSelectStmtForShowTableDist(SShowTableDistributedStmt* pStmt, SSelectStmt** pOutput) {
- return createSimpleSelectStmt(pStmt->dbName, pStmt->tableName, 0, NULL, pOutput);
+ return createSimpleSelectStmtFromCols(pStmt->dbName, pStmt->tableName, 0, NULL, pOutput);
}
static int32_t createOperatorNode(EOperatorType opType, const char* pColName, SNode* pRight, SNode** pOp) {
@@ -7713,7 +7862,7 @@ static int32_t createShowTableTagsProjections(SNodeList** pProjections, SNodeLis
static int32_t rewriteShowStableTags(STranslateContext* pCxt, SQuery* pQuery) {
SShowTableTagsStmt* pShow = (SShowTableTagsStmt*)pQuery->pRoot;
SSelectStmt* pSelect = NULL;
- int32_t code = createSimpleSelectStmt(((SValueNode*)pShow->pDbName)->literal, ((SValueNode*)pShow->pTbName)->literal,
+ int32_t code = createSimpleSelectStmtFromCols(((SValueNode*)pShow->pDbName)->literal, ((SValueNode*)pShow->pTbName)->literal,
-1, NULL, &pSelect);
if (TSDB_CODE_SUCCESS == code) {
code = createShowTableTagsProjections(&pSelect->pProjectionList, &pShow->pTags);
@@ -9038,6 +9187,7 @@ static int32_t setQuery(STranslateContext* pCxt, SQuery* pQuery) {
}
break;
default:
+ pQuery->haveResultSet = false;
pQuery->execMode = QUERY_EXEC_MODE_RPC;
if (NULL != pCxt->pCmdMsg) {
TSWAP(pQuery->pCmdMsg, pCxt->pCmdMsg);
@@ -9072,6 +9222,10 @@ int32_t translate(SParseContext* pParseCxt, SQuery* pQuery, SParseMetaCache* pMe
if (TSDB_CODE_SUCCESS == code) {
code = translateQuery(&cxt, pQuery->pRoot);
}
+ if (TSDB_CODE_SUCCESS == code && (cxt.pPrevRoot || cxt.pPostRoot)) {
+ pQuery->pPrevRoot = cxt.pPrevRoot;
+ pQuery->pPostRoot = cxt.pPostRoot;
+ }
if (TSDB_CODE_SUCCESS == code) {
code = setQuery(&cxt, pQuery);
}
diff --git a/source/libs/parser/src/parser.c b/source/libs/parser/src/parser.c
index 28d116c381..cbddaf8115 100644
--- a/source/libs/parser/src/parser.c
+++ b/source/libs/parser/src/parser.c
@@ -204,7 +204,7 @@ int32_t qAnalyseSqlSemantic(SParseContext* pCxt, const struct SCatalogReq* pCata
const struct SMetaData* pMetaData, SQuery* pQuery) {
SParseMetaCache metaCache = {0};
int32_t code = nodesAcquireAllocator(pCxt->allocatorId);
- if (TSDB_CODE_SUCCESS == code) {
+ if (TSDB_CODE_SUCCESS == code && pCatalogReq) {
code = putMetaDataToCache(pCatalogReq, pMetaData, &metaCache);
}
if (TSDB_CODE_SUCCESS == code) {
@@ -221,6 +221,19 @@ int32_t qContinueParseSql(SParseContext* pCxt, struct SCatalogReq* pCatalogReq,
return parseInsertSql(pCxt, &pQuery, pCatalogReq, pMetaData);
}
+int32_t qContinueParsePostQuery(SParseContext* pCxt, SQuery* pQuery, void** pResRow) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ switch (nodeType(pQuery->pRoot)) {
+ case QUERY_NODE_CREATE_STREAM_STMT:
+ code = translatePostCreateStream(pCxt, pQuery, pResRow);
+ break;
+ default:
+ break;
+ }
+
+ return code;
+}
+
void qDestroyParseContext(SParseContext* pCxt) {
if (NULL == pCxt) {
return;
diff --git a/source/libs/parser/test/parInitialCTest.cpp b/source/libs/parser/test/parInitialCTest.cpp
index a4e8bdd87a..f6dfa93ab2 100644
--- a/source/libs/parser/test/parInitialCTest.cpp
+++ b/source/libs/parser/test/parInitialCTest.cpp
@@ -885,12 +885,12 @@ TEST_F(ParserInitialCTest, createStream) {
setCreateStreamReq(
"s1", "test",
- "create stream if not exists s1 trigger max_delay 20s watermark 10s ignore expired 0 fill_history 1 ignore "
+ "create stream if not exists s1 trigger max_delay 20s watermark 10s ignore expired 0 fill_history 0 ignore "
"update 1 into st3 as select count(*) from t1 interval(10s)",
"st3", 1);
setStreamOptions(STREAM_CREATE_STABLE_TRUE, STREAM_TRIGGER_MAX_DELAY, 20 * MILLISECOND_PER_SECOND,
- 10 * MILLISECOND_PER_SECOND, 0, 1, 1);
- run("CREATE STREAM IF NOT EXISTS s1 TRIGGER MAX_DELAY 20s WATERMARK 10s IGNORE EXPIRED 0 FILL_HISTORY 1 IGNORE "
+ 10 * MILLISECOND_PER_SECOND, 0, 0, 1);
+ run("CREATE STREAM IF NOT EXISTS s1 TRIGGER MAX_DELAY 20s WATERMARK 10s IGNORE EXPIRED 0 FILL_HISTORY 0 IGNORE "
"UPDATE 1 INTO st3 AS SELECT COUNT(*) FROM t1 INTERVAL(10S)");
clearCreateStreamReq();
diff --git a/source/libs/planner/src/planner.c b/source/libs/planner/src/planner.c
index 58b8e53478..2fcc8510d4 100644
--- a/source/libs/planner/src/planner.c
+++ b/source/libs/planner/src/planner.c
@@ -97,6 +97,12 @@ static int32_t setSubplanExecutionNode(SPhysiNode* pNode, int32_t groupId, SDown
return TSDB_CODE_SUCCESS;
}
+int32_t qContinuePlanPostQuery(void *pPostPlan) {
+ //TODO
+ return TSDB_CODE_SUCCESS;
+}
+
+
int32_t qSetSubplanExecutionNode(SSubplan* subplan, int32_t groupId, SDownstreamSourceNode* pSource) {
planDebug("QID:0x%" PRIx64 " set subplan execution node, groupId:%d", subplan->id.queryId, groupId);
return setSubplanExecutionNode(subplan->pNode, groupId, pSource);
diff --git a/source/libs/stream/inc/streamBackendRocksdb.h b/source/libs/stream/inc/streamBackendRocksdb.h
index a7490ea422..ef4589f1a9 100644
--- a/source/libs/stream/inc/streamBackendRocksdb.h
+++ b/source/libs/stream/inc/streamBackendRocksdb.h
@@ -123,12 +123,17 @@ char* streamDefaultIterKey_rocksdb(void* iter, int32_t* len);
char* streamDefaultIterVal_rocksdb(void* iter, int32_t* len);
// batch func
+int streamStateGetCfIdx(SStreamState* pState, const char* funcName);
void* streamStateCreateBatch();
int32_t streamStateGetBatchSize(void* pBatch);
void streamStateClearBatch(void* pBatch);
void streamStateDestroyBatch(void* pBatch);
int32_t streamStatePutBatch(SStreamState* pState, const char* cfName, rocksdb_writebatch_t* pBatch, void* key,
void* val, int32_t vlen, int64_t ttl);
+
+int32_t streamStatePutBatchOptimize(SStreamState* pState, int32_t cfIdx, rocksdb_writebatch_t* pBatch, void* key,
+ void* val, int32_t vlen, int64_t ttl, void* tmpBuf);
+
int32_t streamStatePutBatch_rocksdb(SStreamState* pState, void* pBatch);
// int32_t streamDefaultIter_rocksdb(SStreamState* pState, const void* start, const void* end, SArray* result);
#endif
\ No newline at end of file
diff --git a/source/libs/stream/src/streamBackendRocksdb.c b/source/libs/stream/src/streamBackendRocksdb.c
index cc8634a692..e180f23206 100644
--- a/source/libs/stream/src/streamBackendRocksdb.c
+++ b/source/libs/stream/src/streamBackendRocksdb.c
@@ -82,6 +82,8 @@ const char* compareParKeyName(void* name);
const char* comparePartagKeyName(void* name);
void* streamBackendInit(const char* path) {
+ uint32_t dbMemLimit = nextPow2(tsMaxStreamBackendCache) << 20;
+
qDebug("start to init stream backend at %s", path);
SBackendHandle* pHandle = taosMemoryCalloc(1, sizeof(SBackendHandle));
pHandle->list = tdListNew(sizeof(SCfComparator));
@@ -91,19 +93,22 @@ void* streamBackendInit(const char* path) {
rocksdb_env_t* env = rocksdb_create_default_env(); // rocksdb_envoptions_create();
- rocksdb_cache_t* cache = rocksdb_cache_create_lru(64 << 20);
+ int32_t nBGThread = tsNumOfSnodeStreamThreads <= 2 ? 1 : tsNumOfSnodeStreamThreads / 2;
+ rocksdb_env_set_low_priority_background_threads(env, nBGThread);
+ rocksdb_env_set_high_priority_background_threads(env, nBGThread);
+
+ rocksdb_cache_t* cache = rocksdb_cache_create_lru(dbMemLimit / 2);
rocksdb_options_t* opts = rocksdb_options_create();
rocksdb_options_set_env(opts, env);
rocksdb_options_set_create_if_missing(opts, 1);
rocksdb_options_set_create_missing_column_families(opts, 1);
- rocksdb_options_set_write_buffer_size(opts, 48 << 20);
- rocksdb_options_set_max_total_wal_size(opts, 128 << 20);
+ rocksdb_options_set_max_total_wal_size(opts, dbMemLimit);
rocksdb_options_set_recycle_log_file_num(opts, 6);
- rocksdb_options_set_max_write_buffer_number(opts, 2);
+ rocksdb_options_set_max_write_buffer_number(opts, 3);
rocksdb_options_set_info_log_level(opts, 0);
- uint32_t dbLimit = nextPow2(tsMaxStreamBackendCache);
- rocksdb_options_set_db_write_buffer_size(opts, dbLimit << 20);
+ rocksdb_options_set_db_write_buffer_size(opts, dbMemLimit);
+ rocksdb_options_set_write_buffer_size(opts, dbMemLimit / 2);
pHandle->env = env;
pHandle->dbOpt = opts;
@@ -256,7 +261,6 @@ void streamBackendDelCompare(void* backend, void* arg) {
}
void streamStateDestroy_rocksdb(SStreamState* pState, bool remove) { streamStateCloseBackend(pState, remove); }
static bool streamStateIterSeekAndValid(rocksdb_iterator_t* iter, char* buf, size_t len);
-int streamGetInit(SStreamState* pState, const char* funcName);
// |key|-----value------|
// |key|ttl|len|userData|
@@ -603,14 +607,20 @@ typedef struct {
int32_t encodeValueFunc(void* value, int32_t vlen, int64_t ttl, char** dest) {
SStreamValue key = {.unixTimestamp = ttl, .len = vlen, .data = (char*)(value)};
-
- char* p = taosMemoryCalloc(1, sizeof(int64_t) + sizeof(int32_t) + key.len);
- char* buf = p;
- int32_t len = 0;
- len += taosEncodeFixedI64((void**)&buf, key.unixTimestamp);
- len += taosEncodeFixedI32((void**)&buf, key.len);
- len += taosEncodeBinary((void**)&buf, (char*)value, vlen);
- *dest = p;
+ int32_t len = 0;
+ if (*dest == NULL) {
+ char* p = taosMemoryCalloc(1, sizeof(int64_t) + sizeof(int32_t) + key.len);
+ char* buf = p;
+ len += taosEncodeFixedI64((void**)&buf, key.unixTimestamp);
+ len += taosEncodeFixedI32((void**)&buf, key.len);
+ len += taosEncodeBinary((void**)&buf, (char*)value, vlen);
+ *dest = p;
+ } else {
+ char* buf = *dest;
+ len += taosEncodeFixedI64((void**)&buf, key.unixTimestamp);
+ len += taosEncodeFixedI32((void**)&buf, key.len);
+ len += taosEncodeBinary((void**)&buf, (char*)value, vlen);
+ }
return len;
}
/*
@@ -760,7 +770,7 @@ int32_t streamStateOpenBackendCf(void* backend, char* name, char** cfs, int32_t
rocksdb_options_set_block_based_table_factory((rocksdb_options_t*)cfOpts[i], tableOpt);
params[i].tableOpt = tableOpt;
- int idx = streamGetInit(NULL, funcname);
+ int idx = streamStateGetCfIdx(NULL, funcname);
SCfInit* cfPara = &ginitDict[idx];
rocksdb_comparator_t* compare =
@@ -791,7 +801,7 @@ int32_t streamStateOpenBackendCf(void* backend, char* name, char** cfs, int32_t
char idstr[128] = {0};
sprintf(idstr, "0x%" PRIx64 "-%d", streamId, taskId);
- int idx = streamGetInit(NULL, funcname);
+ int idx = streamStateGetCfIdx(NULL, funcname);
RocksdbCfInst* inst = NULL;
RocksdbCfInst** pInst = taosHashGet(handle->cfInst, idstr, strlen(idstr) + 1);
@@ -925,7 +935,7 @@ int streamStateOpenBackend(void* backend, SStreamState* pState) {
taosThreadRwlockInit(&pState->pTdbState->rwLock, NULL);
SCfComparator compare = {.comp = pCompare, .numOfComp = cfLen};
pState->pTdbState->pComparNode = streamBackendAddCompare(handle, &compare);
- // rocksdb_writeoptions_disable_WAL(pState->pTdbState->writeOpts, 1);
+ rocksdb_writeoptions_disable_WAL(pState->pTdbState->writeOpts, 1);
qInfo("succ to open state %p on backend, %p, 0x%" PRIx64 "-%d", pState, handle, pState->streamId, pState->taskId);
return 0;
}
@@ -1008,7 +1018,7 @@ void streamStateDestroyCompar(void* arg) {
taosMemoryFree(comp->comp);
}
-int streamGetInit(SStreamState* pState, const char* funcName) {
+int streamStateGetCfIdx(SStreamState* pState, const char* funcName) {
int idx = -1;
size_t len = strlen(funcName);
for (int i = 0; i < sizeof(ginitDict) / sizeof(ginitDict[0]); i++) {
@@ -1055,7 +1065,7 @@ bool streamStateIterSeekAndValid(rocksdb_iterator_t* iter, char* buf, size_t len
}
rocksdb_iterator_t* streamStateIterCreate(SStreamState* pState, const char* cfName, rocksdb_snapshot_t** snapshot,
rocksdb_readoptions_t** readOpt) {
- int idx = streamGetInit(pState, cfName);
+ int idx = streamStateGetCfIdx(pState, cfName);
if (snapshot != NULL) {
*snapshot = (rocksdb_snapshot_t*)rocksdb_create_snapshot(pState->pTdbState->rocksdb);
@@ -1075,7 +1085,7 @@ rocksdb_iterator_t* streamStateIterCreate(SStreamState* pState, const char* cfNa
code = 0; \
char buf[128] = {0}; \
char* err = NULL; \
- int i = streamGetInit(pState, funcname); \
+ int i = streamStateGetCfIdx(pState, funcname); \
if (i < 0) { \
qWarn("streamState failed to get cf name: %s", funcname); \
code = -1; \
@@ -1106,7 +1116,7 @@ rocksdb_iterator_t* streamStateIterCreate(SStreamState* pState, const char* cfNa
code = 0; \
char buf[128] = {0}; \
char* err = NULL; \
- int i = streamGetInit(pState, funcname); \
+ int i = streamStateGetCfIdx(pState, funcname); \
if (i < 0) { \
qWarn("streamState failed to get cf name: %s", funcname); \
code = -1; \
@@ -1154,7 +1164,7 @@ rocksdb_iterator_t* streamStateIterCreate(SStreamState* pState, const char* cfNa
code = 0; \
char buf[128] = {0}; \
char* err = NULL; \
- int i = streamGetInit(pState, funcname); \
+ int i = streamStateGetCfIdx(pState, funcname); \
if (i < 0) { \
qWarn("streamState failed to get cf name: %s_%s", pState->pTdbState->idstr, funcname); \
code = -1; \
@@ -2094,7 +2104,7 @@ void streamStateClearBatch(void* pBatch) { rocksdb_writebatch_clear((rocksdb_
void streamStateDestroyBatch(void* pBatch) { rocksdb_writebatch_destroy((rocksdb_writebatch_t*)pBatch); }
int32_t streamStatePutBatch(SStreamState* pState, const char* cfName, rocksdb_writebatch_t* pBatch, void* key,
void* val, int32_t vlen, int64_t ttl) {
- int i = streamGetInit(pState, cfName);
+ int i = streamStateGetCfIdx(pState, cfName);
if (i < 0) {
qError("streamState failed to put to cf name:%s", cfName);
@@ -2110,6 +2120,21 @@ int32_t streamStatePutBatch(SStreamState* pState, const char* cfName, rocksdb_wr
taosMemoryFree(ttlV);
return 0;
}
+int32_t streamStatePutBatchOptimize(SStreamState* pState, int32_t cfIdx, rocksdb_writebatch_t* pBatch, void* key,
+ void* val, int32_t vlen, int64_t ttl, void* tmpBuf) {
+ char buf[128] = {0};
+ int32_t klen = ginitDict[cfIdx].enFunc((void*)key, buf);
+ char* ttlV = tmpBuf;
+ int32_t ttlVLen = ginitDict[cfIdx].enValueFunc(val, vlen, ttl, &ttlV);
+
+ rocksdb_column_family_handle_t* pCf = pState->pTdbState->pHandle[ginitDict[cfIdx].idx];
+ rocksdb_writebatch_put_cf((rocksdb_writebatch_t*)pBatch, pCf, buf, (size_t)klen, ttlV, (size_t)ttlVLen);
+
+ if (tmpBuf == NULL) {
+ taosMemoryFree(ttlV);
+ }
+ return 0;
+}
int32_t streamStatePutBatch_rocksdb(SStreamState* pState, void* pBatch) {
char* err = NULL;
rocksdb_write(pState->pTdbState->rocksdb, pState->pTdbState->writeOpts, (rocksdb_writebatch_t*)pBatch, &err);
diff --git a/source/libs/stream/src/tstreamFileState.c b/source/libs/stream/src/tstreamFileState.c
index bfaeca89f6..dc9a1f80bb 100644
--- a/source/libs/stream/src/tstreamFileState.c
+++ b/source/libs/stream/src/tstreamFileState.c
@@ -350,6 +350,11 @@ int32_t flushSnapshot(SStreamFileState* pFileState, SStreamSnapshot* pSnapshot,
const int32_t BATCH_LIMIT = 256;
SListNode* pNode = NULL;
+ int idx = streamStateGetCfIdx(pFileState->pFileStore, "state");
+
+ int32_t len = pFileState->rowSize + sizeof(uint64_t) + sizeof(int32_t) + 1;
+ char* buf = taosMemoryCalloc(1, len);
+
void* batch = streamStateCreateBatch();
while ((pNode = tdListNext(&iter)) != NULL && code == TSDB_CODE_SUCCESS) {
SRowBuffPos* pPos = *(SRowBuffPos**)pNode->data;
@@ -360,9 +365,13 @@ int32_t flushSnapshot(SStreamFileState* pFileState, SStreamSnapshot* pSnapshot,
}
SStateKey sKey = {.key = *((SWinKey*)pPos->pKey), .opNum = ((SStreamState*)pFileState->pFileStore)->number};
- code = streamStatePutBatch(pFileState->pFileStore, "state", batch, &sKey, pPos->pRowBuff, pFileState->rowSize, 0);
+ code = streamStatePutBatchOptimize(pFileState->pFileStore, idx, batch, &sKey, pPos->pRowBuff, pFileState->rowSize,
+ 0, buf);
+ memset(buf, 0, len);
qDebug("===stream===put %" PRId64 " to disc, res %d", sKey.key.ts, code);
}
+ taosMemoryFree(buf);
+
if (streamStateGetBatchSize(batch) > 0) {
code = streamStatePutBatch_rocksdb(pFileState->pFileStore, batch);
}
diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c
index f64c2a9560..ccf7c3e4a4 100644
--- a/source/libs/sync/src/syncMain.c
+++ b/source/libs/sync/src/syncMain.c
@@ -618,8 +618,7 @@ int32_t syncNodePropose(SSyncNode* pSyncNode, SRpcMsg* pMsg, bool isWeak, int64_
return -1;
}
- // not restored, vnode enable
- if (!pSyncNode->restoreFinish && pSyncNode->vgId != 1) {
+ if (!pSyncNode->restoreFinish) {
terrno = TSDB_CODE_SYN_PROPOSE_NOT_READY;
sNError(pSyncNode, "failed to sync propose since not ready, type:%s, last:%" PRId64 ", cmt:%" PRId64,
TMSG_INFO(pMsg->msgType), syncNodeGetLastIndex(pSyncNode), pSyncNode->commitIndex);
diff --git a/source/libs/wal/src/walRef.c b/source/libs/wal/src/walRef.c
index eb36389f1d..2f1bcfee83 100644
--- a/source/libs/wal/src/walRef.c
+++ b/source/libs/wal/src/walRef.c
@@ -81,26 +81,11 @@ void walRefLastVer(SWal *pWal, SWalRef *pRef) {
wDebug("vgId:%d, wal ref version %" PRId64 " for last", pWal->cfg.vgId, ver);
}
-SWalRef *walRefCommittedVer(SWal *pWal) {
- SWalRef *pRef = walOpenRef(pWal);
- if (pRef == NULL) {
- terrno = TSDB_CODE_OUT_OF_MEMORY;
- return NULL;
- }
+void walRefCommitVer(SWal *pWal, SWalRef *pRef) {
taosThreadMutexLock(&pWal->mutex);
-
int64_t ver = walGetCommittedVer(pWal);
-
- wDebug("vgId:%d, wal ref version %" PRId64 " for committed", pWal->cfg.vgId, ver);
-
pRef->refVer = ver;
- // bsearch in fileSet
- SWalFileInfo tmpInfo;
- tmpInfo.firstVer = ver;
- SWalFileInfo *pRet = taosArraySearch(pWal->fileInfoSet, &tmpInfo, compareWalFileInfo, TD_LE);
- ASSERT(pRet != NULL);
- // pRef->refFile = pRet->firstVer;
taosThreadMutexUnlock(&pWal->mutex);
- return pRef;
+ wDebug("vgId:%d, wal ref version %" PRId64 " for committed", pWal->cfg.vgId, ver);
}
diff --git a/source/util/src/tarray.c b/source/util/src/tarray.c
index 6c7c5ddb0d..8906391a9a 100644
--- a/source/util/src/tarray.c
+++ b/source/util/src/tarray.c
@@ -476,13 +476,13 @@ int32_t taosEncodeArray(void** buf, const SArray* pArray, FEncode encode) {
return tlen;
}
-void* taosDecodeArray(const void* buf, SArray** pArray, FDecode decode, int32_t dataSz) {
+void* taosDecodeArray(const void* buf, SArray** pArray, FDecode decode, int32_t dataSz, int8_t sver) {
int32_t sz;
buf = taosDecodeFixedI32(buf, &sz);
*pArray = taosArrayInit(sz, sizeof(void*));
for (int32_t i = 0; i < sz; i++) {
void* data = taosMemoryCalloc(1, dataSz);
- buf = decode(buf, data);
+ buf = decode(buf, data, sver);
taosArrayPush(*pArray, &data);
}
return (void*)buf;
diff --git a/source/util/src/terror.c b/source/util/src/terror.c
index 7cf95dcdea..a66af6e732 100644
--- a/source/util/src/terror.c
+++ b/source/util/src/terror.c
@@ -275,7 +275,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_TRANS_CONFLICT, "Conflict transaction
TAOS_DEFINE_ERROR(TSDB_CODE_MND_TRANS_CLOG_IS_NULL, "Transaction commitlog is null")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_TRANS_NETWORK_UNAVAILL, "Unable to establish connection While execute transaction and will continue in the background")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_LAST_TRANS_NOT_FINISHED, "Last Transaction not finished")
-TAOS_DEFINE_ERROR(TSDB_CODE_MND_TRNAS_SYNC_TIMEOUT, "Sync timeout While execute transaction and will continue in the background")
+TAOS_DEFINE_ERROR(TSDB_CODE_MND_TRANS_SYNC_TIMEOUT, "Sync timeout While execute transaction and will continue in the background")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_TRANS_UNKNOW_ERROR, "Unknown transaction error")
// mnode-mq
diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task
index a81209b835..801e6aa1bd 100644
--- a/tests/parallel_test/cases.task
+++ b/tests/parallel_test/cases.task
@@ -616,6 +616,8 @@
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/rowlength64k_4.py -Q 2
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/rowlength64k_4.py -Q 3
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/rowlength64k_4.py -Q 4
+,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/precisionUS.py
+,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/precisionNS.py
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/show.py
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/information_schema.py
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/abs.py
diff --git a/tests/pytest/tools/taosdumpTest.py b/tests/pytest/tools/taosdumpTest.py
index 4bfb7d5ba3..aafd365f34 100644
--- a/tests/pytest/tools/taosdumpTest.py
+++ b/tests/pytest/tools/taosdumpTest.py
@@ -92,9 +92,9 @@ class TDTestCase:
else:
tdLog.info("taosdump found: %s" % binPath)
- os.system("%s -y --databases db -o ./taosdumptest/tmp1" % binPath)
+ os.system("%s --databases db -o ./taosdumptest/tmp1" % binPath)
os.system(
- "%s -y --databases db1 -o ./taosdumptest/tmp2" %
+ "%s --databases db1 -o ./taosdumptest/tmp2" %
binPath)
tdSql.execute("drop database db")
@@ -172,7 +172,7 @@ class TDTestCase:
tdSql.query("show stables")
tdSql.checkRows(2)
os.system(
- "%s -y --databases db12312313231231321312312312_323 -o ./taosdumptest/tmp1" %
+ "%s --databases db12312313231231321312312312_323 -o ./taosdumptest/tmp1" %
binPath)
tdSql.execute("drop database db12312313231231321312312312_323")
os.system("%s -i ./taosdumptest/tmp1" % binPath)
diff --git a/tests/pytest/tools/taosdumpTest2.py b/tests/pytest/tools/taosdumpTest2.py
index 8a85ce10ed..36e0480e57 100644
--- a/tests/pytest/tools/taosdumpTest2.py
+++ b/tests/pytest/tools/taosdumpTest2.py
@@ -97,7 +97,7 @@ class TDTestCase:
tdSql.query("show databases")
tdSql.checkRows(2)
- os.system("%s -i ./taosdumptest/tmp -y" % binPath)
+ os.system("%s -i ./taosdumptest/tmp" % binPath)
tdSql.query("show databases")
tdSql.checkRows(3)
@@ -125,13 +125,13 @@ class TDTestCase:
os.system("rm ./taosdumptest/tmp/*.sql")
os.system("rm ./taosdumptest/tmp/*.avro*")
os.system("rm -rf ./taosdumptest/tmp/taosdump.*")
- os.system("%s -D test -o ./taosdumptest/tmp -y" % binPath)
+ os.system("%s -D test -o ./taosdumptest/tmp" % binPath)
tdSql.execute("drop database test")
tdSql.query("show databases")
tdSql.checkRows(3)
- os.system("%s -i ./taosdumptest/tmp -y" % binPath)
+ os.system("%s -i ./taosdumptest/tmp" % binPath)
tdSql.execute("use test")
tdSql.query("show stables")
diff --git a/tests/pytest/tools/taosdumpTestNanoSupport.py b/tests/pytest/tools/taosdumpTestNanoSupport.py
index c40462b8db..2a3990614a 100644
--- a/tests/pytest/tools/taosdumpTestNanoSupport.py
+++ b/tests/pytest/tools/taosdumpTestNanoSupport.py
@@ -134,15 +134,15 @@ class TDTestCase:
# dump all data
os.system(
- "%s -y -g --databases timedb1 -o ./taosdumptest/dumptmp1" %
+ "%s -g --databases timedb1 -o ./taosdumptest/dumptmp1" %
binPath)
# dump part data with -S -E
os.system(
- '%s -y -g --databases timedb1 -S 1625068810000000000 -E 1625068860000000000 -o ./taosdumptest/dumptmp2 ' %
+ '%s -g --databases timedb1 -S 1625068810000000000 -E 1625068860000000000 -o ./taosdumptest/dumptmp2 ' %
binPath)
os.system(
- '%s -y -g --databases timedb1 -S 1625068810000000000 -o ./taosdumptest/dumptmp3 ' %
+ '%s -g --databases timedb1 -S 1625068810000000000 -o ./taosdumptest/dumptmp3 ' %
binPath)
tdSql.execute("drop database timedb1")
@@ -200,14 +200,14 @@ class TDTestCase:
self.createdb(precision="us")
os.system(
- "%s -y -g --databases timedb1 -o ./taosdumptest/dumptmp1" %
+ "%s -g --databases timedb1 -o ./taosdumptest/dumptmp1" %
binPath)
os.system(
- '%s -y -g --databases timedb1 -S 1625068810000000 -E 1625068860000000 -o ./taosdumptest/dumptmp2 ' %
+ '%s -g --databases timedb1 -S 1625068810000000 -E 1625068860000000 -o ./taosdumptest/dumptmp2 ' %
binPath)
os.system(
- '%s -y -g --databases timedb1 -S 1625068810000000 -o ./taosdumptest/dumptmp3 ' %
+ '%s -g --databases timedb1 -S 1625068810000000 -o ./taosdumptest/dumptmp3 ' %
binPath)
os.system("%s -i ./taosdumptest/dumptmp1" % binPath)
@@ -269,14 +269,14 @@ class TDTestCase:
self.createdb(precision="ms")
os.system(
- "%s -y -g --databases timedb1 -o ./taosdumptest/dumptmp1" %
+ "%s -g --databases timedb1 -o ./taosdumptest/dumptmp1" %
binPath)
os.system(
- '%s -y -g --databases timedb1 -S 1625068810000 -E 1625068860000 -o ./taosdumptest/dumptmp2 ' %
+ '%s -g --databases timedb1 -S 1625068810000 -E 1625068860000 -o ./taosdumptest/dumptmp2 ' %
binPath)
os.system(
- '%s -y -g --databases timedb1 -S 1625068810000 -o ./taosdumptest/dumptmp3 ' %
+ '%s -g --databases timedb1 -S 1625068810000 -o ./taosdumptest/dumptmp3 ' %
binPath)
os.system("%s -i ./taosdumptest/dumptmp1" % binPath)
diff --git a/tests/system-test/1-insert/precisionNS.py b/tests/system-test/1-insert/precisionNS.py
new file mode 100644
index 0000000000..be8f1e21dc
--- /dev/null
+++ b/tests/system-test/1-insert/precisionNS.py
@@ -0,0 +1,293 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import random
+import time
+
+import taos
+from util.log import *
+from util.cases import *
+from util.sql import *
+
+class TDTestCase:
+
+ # get col value and total max min ...
+ def getColsValue(self, i, j):
+ # c1 value
+ if random.randint(1, 10) == 5:
+ c1 = None
+ else:
+ c1 = 1
+
+ # c2 value
+ if j % 3200 == 0:
+ c2 = 8764231
+ elif random.randint(1, 10) == 5:
+ c2 = None
+ else:
+ c2 = random.randint(-87654297, 98765321)
+
+
+ value = f"({self.ts}, "
+
+ # c1
+ if c1 is None:
+ value += "null,"
+ else:
+ self.c1Cnt += 1
+ value += f"{c1},"
+ # c2
+ if c2 is None:
+ value += "null,"
+ else:
+ value += f"{c2},"
+ # total count
+ self.c2Cnt += 1
+ # max
+ if self.c2Max is None:
+ self.c2Max = c2
+ else:
+ if c2 > self.c2Max:
+ self.c2Max = c2
+ # min
+ if self.c2Min is None:
+ self.c2Min = c2
+ else:
+ if c2 < self.c2Min:
+ self.c2Min = c2
+ # sum
+ if self.c2Sum is None:
+ self.c2Sum = c2
+ else:
+ self.c2Sum += c2
+
+ # c3 same with ts
+ value += f"{self.ts})"
+
+ # move next
+ self.ts += 1
+
+ return value
+
+ # insert data
+ def insertData(self):
+ tdLog.info("insert data ....")
+ sqls = ""
+ for i in range(self.childCnt):
+ # insert child table
+ values = ""
+ pre_insert = f"insert into t{i} values "
+ for j in range(self.childRow):
+ if values == "":
+ values = self.getColsValue(i, j)
+ else:
+ values += "," + self.getColsValue(i, j)
+
+ # batch insert
+ if j % self.batchSize == 0 and values != "":
+ sql = pre_insert + values
+ tdSql.execute(sql)
+ values = ""
+ # append last
+ if values != "":
+ sql = pre_insert + values
+ tdSql.execute(sql)
+ values = ""
+
+ sql = "flush database db;"
+ tdLog.info(sql)
+ tdSql.execute(sql)
+ # insert finished
+ tdLog.info(f"insert data successfully.\n"
+ f" inserted child table = {self.childCnt}\n"
+ f" inserted child rows = {self.childRow}\n"
+ f" total inserted rows = {self.childCnt*self.childRow}\n")
+ return
+
+
+ # prepareEnv
+ def prepareEnv(self):
+ # init
+ self.ts = 1680000000000*1000*1000
+ self.childCnt = 5
+ self.childRow = 10000
+ self.batchSize = 5000
+
+ # total
+ self.c1Cnt = 0
+ self.c2Cnt = 0
+ self.c2Max = None
+ self.c2Min = None
+ self.c2Sum = None
+
+ # create database db
+ sql = f"create database db vgroups 2 precision 'ns' "
+ tdLog.info(sql)
+ tdSql.execute(sql)
+ sql = f"use db"
+ tdSql.execute(sql)
+
+ # create super talbe st
+ sql = f"create table st(ts timestamp, c1 int, c2 bigint, ts1 timestamp) tags(area int)"
+ tdLog.info(sql)
+ tdSql.execute(sql)
+
+ # create child table
+ for i in range(self.childCnt):
+ sql = f"create table t{i} using st tags({i}) "
+ tdSql.execute(sql)
+
+ # create stream
+ sql = "create stream ma into sta as select count(ts) from st interval(100b)"
+ tdLog.info(sql)
+ tdSql.execute(sql)
+
+ # insert data
+ self.insertData()
+
+ # check data correct
+ def checkExpect(self, sql, expectVal):
+ tdSql.query(sql)
+ rowCnt = tdSql.getRows()
+ for i in range(rowCnt):
+ val = tdSql.getData(i,0)
+ if val != expectVal:
+ tdLog.exit(f"Not expect . query={val} expect={expectVal} i={i} sql={sql}")
+ return False
+
+ tdLog.info(f"check expect ok. sql={sql} expect ={expectVal} rowCnt={rowCnt}")
+ return True
+
+
+
+
+ # check time macro
+ def checkTimeMacro(self):
+ # 2 week
+ val = 2
+ nsval = val*7*24*60*60*1000*1000*1000
+ expectVal = self.childCnt * self.childRow
+ sql = f"select count(ts) from st where timediff(ts - {val}w, ts1) = {nsval} "
+ self.checkExpect(sql, expectVal)
+
+ # 20 day
+ val = 20
+ nsval = val*24*60*60*1000*1000*1000
+ uint = "d"
+ sql = f"select count(ts) from st where timediff(ts - {val}{uint}, ts1) = {nsval} "
+ self.checkExpect(sql, expectVal)
+
+ # 30 hour
+ val = 30
+ nsval = val*60*60*1000*1000*1000
+ uint = "h"
+ sql = f"select count(ts) from st where timediff(ts - {val}{uint}, ts1) = {nsval} "
+ self.checkExpect(sql, expectVal)
+
+ # 90 minutes
+ val = 90
+ nsval = val*60*1000*1000*1000
+ uint = "m"
+ sql = f"select count(ts) from st where timediff(ts - {val}{uint}, ts1) = {nsval} "
+ self.checkExpect(sql, expectVal)
+ # 2s
+ val = 2
+ nsval = val*1000*1000*1000
+ uint = "s"
+ sql = f"select count(ts) from st where timediff(ts - {val}{uint}, ts1) = {nsval} "
+ self.checkExpect(sql, expectVal)
+ # 20a
+ val = 5
+ nsval = val*1000*1000
+ uint = "a"
+ sql = f"select count(ts) from st where timediff(ts - {val}{uint}, ts1) = {nsval} "
+ self.checkExpect(sql, expectVal)
+ # 300u
+ val = 300
+ nsval = val*1000
+ uint = "u"
+ sql = f"select count(ts) from st where timediff(ts - {val}{uint}, ts1) = {nsval} "
+ self.checkExpect(sql, expectVal)
+ # 8b
+ val = 8
+ sql = f"select timediff(ts - {val}b, ts1) from st "
+ self.checkExpect(sql, val)
+
+ # init
+ def init(self, conn, logSql, replicaVar=1):
+ seed = time.clock_gettime(time.CLOCK_REALTIME)
+ random.seed(seed)
+ self.replicaVar = int(replicaVar)
+ tdLog.debug(f"start to excute {__file__}")
+ tdSql.init(conn.cursor(), True)
+
+ # where
+ def checkWhere(self):
+ cnt = 300
+ start = self.ts - cnt
+ sql = f"select count(ts) from st where ts >= {start} and ts <= {self.ts}"
+ self.checkExpect(sql, cnt)
+
+ for i in range(50):
+ cnt = random.randint(1,40000)
+ base = 2000
+ start = self.ts - cnt - base
+ end = self.ts - base
+ sql = f"select count(ts) from st where ts >= {start} and ts < {end}"
+ self.checkExpect(sql, cnt)
+
+ # stream
+ def checkStream(self):
+ allRows = self.childCnt * self.childRow
+ # ensure write data is expected
+ sql = "select count(*) from (select diff(ts) as a from (select ts from st order by ts asc)) where a=1;"
+ self.checkExpect(sql, allRows - 1)
+
+ # stream count is ok
+ sql =f"select count(*) from sta"
+ cnt = int(allRows / 100) - 1 # last window is not close, so need reduce one
+ self.checkExpect(sql, cnt)
+
+ # check fields
+ sql =f"select count(*) from sta where `count(ts)` != 100"
+ self.checkExpect(sql, 0)
+
+ # check timestamp
+ sql =f"select count(*) from (select diff(`_wstart`) from sta)"
+ self.checkExpect(sql, cnt - 1)
+ sql =f"select count(*) from (select diff(`_wstart`) as a from sta) where a != 100"
+ self.checkExpect(sql, 0)
+
+ # run
+ def run(self):
+ # prepare env
+ self.prepareEnv()
+
+ # time macro like 1w 1d 1h 1m 1s 1a 1u 1b
+ self.checkTimeMacro()
+
+ # check where
+ self.checkWhere()
+
+ # check stream
+ self.checkStream()
+
+ # stop
+ def stop(self):
+ tdSql.close()
+ tdLog.success(f"{__file__} successfully executed")
+
+
+tdCases.addLinux(__file__, TDTestCase())
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/system-test/1-insert/precisionUS.py b/tests/system-test/1-insert/precisionUS.py
new file mode 100644
index 0000000000..1b41d66010
--- /dev/null
+++ b/tests/system-test/1-insert/precisionUS.py
@@ -0,0 +1,287 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import random
+import time
+
+import taos
+from util.log import *
+from util.cases import *
+from util.sql import *
+
+class TDTestCase:
+
+ # get col value and total max min ...
+ def getColsValue(self, i, j):
+ # c1 value
+ if random.randint(1, 10) == 5:
+ c1 = None
+ else:
+ c1 = 1
+
+ # c2 value
+ if j % 3200 == 0:
+ c2 = 8764231
+ elif random.randint(1, 10) == 5:
+ c2 = None
+ else:
+ c2 = random.randint(-87654297, 98765321)
+
+
+ value = f"({self.ts}, "
+
+ # c1
+ if c1 is None:
+ value += "null,"
+ else:
+ self.c1Cnt += 1
+ value += f"{c1},"
+ # c2
+ if c2 is None:
+ value += "null,"
+ else:
+ value += f"{c2},"
+ # total count
+ self.c2Cnt += 1
+ # max
+ if self.c2Max is None:
+ self.c2Max = c2
+ else:
+ if c2 > self.c2Max:
+ self.c2Max = c2
+ # min
+ if self.c2Min is None:
+ self.c2Min = c2
+ else:
+ if c2 < self.c2Min:
+ self.c2Min = c2
+ # sum
+ if self.c2Sum is None:
+ self.c2Sum = c2
+ else:
+ self.c2Sum += c2
+
+ # c3 same with ts
+ value += f"{self.ts})"
+
+ # move next
+ self.ts += 1
+
+ return value
+
+ # insert data
+ def insertData(self):
+ tdLog.info("insert data ....")
+ sqls = ""
+ for i in range(self.childCnt):
+ # insert child table
+ values = ""
+ pre_insert = f"insert into t{i} values "
+ for j in range(self.childRow):
+ if values == "":
+ values = self.getColsValue(i, j)
+ else:
+ values += "," + self.getColsValue(i, j)
+
+ # batch insert
+ if j % self.batchSize == 0 and values != "":
+ sql = pre_insert + values
+ tdSql.execute(sql)
+ values = ""
+ # append last
+ if values != "":
+ sql = pre_insert + values
+ tdSql.execute(sql)
+ values = ""
+
+ sql = "flush database db;"
+ tdLog.info(sql)
+ tdSql.execute(sql)
+ # insert finished
+ tdLog.info(f"insert data successfully.\n"
+ f" inserted child table = {self.childCnt}\n"
+ f" inserted child rows = {self.childRow}\n"
+ f" total inserted rows = {self.childCnt*self.childRow}\n")
+ return
+
+
+ # prepareEnv
+ def prepareEnv(self):
+ # init
+ self.ts = 1680000000000*1000
+ self.childCnt = 5
+ self.childRow = 10000
+ self.batchSize = 5000
+
+ # total
+ self.c1Cnt = 0
+ self.c2Cnt = 0
+ self.c2Max = None
+ self.c2Min = None
+ self.c2Sum = None
+
+ # create database db
+ sql = f"create database db vgroups 2 precision 'us' "
+ tdLog.info(sql)
+ tdSql.execute(sql)
+ sql = f"use db"
+ tdSql.execute(sql)
+
+ # create super talbe st
+ sql = f"create table st(ts timestamp, c1 int, c2 bigint, ts1 timestamp) tags(area int)"
+ tdLog.info(sql)
+ tdSql.execute(sql)
+
+ # create child table
+ for i in range(self.childCnt):
+ sql = f"create table t{i} using st tags({i}) "
+ tdSql.execute(sql)
+
+ # create stream
+ sql = "create stream ma into sta as select count(ts) from st interval(100u)"
+ tdLog.info(sql)
+ tdSql.execute(sql)
+
+ # insert data
+ self.insertData()
+
+ # check data correct
+ def checkExpect(self, sql, expectVal):
+ tdSql.query(sql)
+ rowCnt = tdSql.getRows()
+ for i in range(rowCnt):
+ val = tdSql.getData(i,0)
+ if val != expectVal:
+ tdLog.exit(f"Not expect . query={val} expect={expectVal} i={i} sql={sql}")
+ return False
+
+ tdLog.info(f"check expect ok. sql={sql} expect ={expectVal} rowCnt={rowCnt}")
+ return True
+
+
+ # check time macro
+ def checkTimeMacro(self):
+ # 2 week
+ val = 2
+ usval = val*7*24*60*60*1000*1000
+ expectVal = self.childCnt * self.childRow
+ sql = f"select count(ts) from st where timediff(ts - {val}w, ts1) = {usval} "
+ self.checkExpect(sql, expectVal)
+
+ # 20 day
+ val = 20
+ usval = val*24*60*60*1000*1000
+ uint = "d"
+ sql = f"select count(ts) from st where timediff(ts - {val}{uint}, ts1) = {usval} "
+ self.checkExpect(sql, expectVal)
+
+ # 30 hour
+ val = 30
+ usval = val*60*60*1000*1000
+ uint = "h"
+ sql = f"select count(ts) from st where timediff(ts - {val}{uint}, ts1) = {usval} "
+ self.checkExpect(sql, expectVal)
+
+ # 90 minutes
+ val = 90
+ usval = val*60*1000*1000
+ uint = "m"
+ sql = f"select count(ts) from st where timediff(ts - {val}{uint}, ts1) = {usval} "
+ self.checkExpect(sql, expectVal)
+ # 2s
+ val = 2
+ usval = val*1000*1000
+ uint = "s"
+ sql = f"select count(ts) from st where timediff(ts - {val}{uint}, ts1) = {usval} "
+ self.checkExpect(sql, expectVal)
+ # 20a
+ val = 20
+ usval = val*1000
+ uint = "a"
+ sql = f"select count(ts) from st where timediff(ts - {val}{uint}, ts1) = {usval} "
+ self.checkExpect(sql, expectVal)
+ # 300u
+ val = 300
+ usval = val*1
+ uint = "u"
+ sql = f"select count(ts) from st where timediff(ts - {val}{uint}, ts1) = {usval} "
+ self.checkExpect(sql, expectVal)
+
+ # init
+ def init(self, conn, logSql, replicaVar=1):
+ seed = time.clock_gettime(time.CLOCK_REALTIME)
+ random.seed(seed)
+ self.replicaVar = int(replicaVar)
+ tdLog.debug(f"start to excute {__file__}")
+ tdSql.init(conn.cursor(), True)
+
+ # where
+ def checkWhere(self):
+ cnt = 300
+ start = self.ts - cnt
+ sql = f"select count(ts) from st where ts >= {start} and ts <= {self.ts}"
+ self.checkExpect(sql, cnt)
+
+ for i in range(50):
+ cnt = random.randint(1,40000)
+ base = 2000
+ start = self.ts - cnt - base
+ end = self.ts - base
+ sql = f"select count(ts) from st where ts >= {start} and ts < {end}"
+ self.checkExpect(sql, cnt)
+
+ # stream
+ def checkStream(self):
+ allRows = self.childCnt * self.childRow
+ # ensure write data is expected
+ sql = "select count(*) from (select diff(ts) as a from (select ts from st order by ts asc)) where a=1;"
+ self.checkExpect(sql, allRows - 1)
+
+ # stream count is ok
+ sql =f"select count(*) from sta"
+ cnt = int(allRows / 100) - 1 # last window is not close, so need reduce one
+ self.checkExpect(sql, cnt)
+
+ # check fields
+ sql =f"select count(*) from sta where `count(ts)` != 100"
+ self.checkExpect(sql, 0)
+
+ # check timestamp
+ sql =f"select count(*) from (select diff(`_wstart`) from sta)"
+ self.checkExpect(sql, cnt - 1)
+ sql =f"select count(*) from (select diff(`_wstart`) as a from sta) where a != 100"
+ self.checkExpect(sql, 0)
+
+ # run
+ def run(self):
+ # prepare env
+ self.prepareEnv()
+
+ # time macro like 1w 1d 1h 1m 1s 1a 1u
+ self.checkTimeMacro()
+
+ # check where
+ self.checkWhere()
+
+ # check stream
+ self.checkStream()
+
+ # stop
+ def stop(self):
+ tdSql.close()
+ tdLog.success(f"{__file__} successfully executed")
+
+
+tdCases.addLinux(__file__, TDTestCase())
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/system-test/2-query/odbc.py b/tests/system-test/2-query/odbc.py
index 5241406b65..8fbad93995 100644
--- a/tests/system-test/2-query/odbc.py
+++ b/tests/system-test/2-query/odbc.py
@@ -21,9 +21,7 @@ class TDTestCase:
tdSql.execute("create table db.stb (ts timestamp, c1 bool, c2 tinyint, c3 smallint, c4 int, c5 bigint, c6 tinyint unsigned, c7 smallint unsigned, c8 int unsigned, c9 bigint unsigned, c10 float, c11 double, c12 varchar(100), c13 nchar(100)) tags(t int)")
tdSql.execute("insert into db.ctb using db.stb tags(1) (ts, c1) values (now, 1)")
- tdSql.query("select count(*) from information_schema.ins_columns")
- # enterprise version: 288, community version: 280
- tdSql.checkData(0, 0, 288)
+ tdSql.execute("select count(*) from information_schema.ins_columns")
tdSql.query("select * from information_schema.ins_columns where table_name = 'ntb'")
tdSql.checkRows(14)
diff --git a/tests/system-test/7-tmq/checkOffsetRowParams.py b/tests/system-test/7-tmq/checkOffsetRowParams.py
new file mode 100644
index 0000000000..17c80c68bf
--- /dev/null
+++ b/tests/system-test/7-tmq/checkOffsetRowParams.py
@@ -0,0 +1,313 @@
+
+import taos
+import sys
+import time
+import socket
+import os
+import threading
+from enum import Enum
+
+from util.log import *
+from util.sql import *
+from util.cases import *
+from util.dnodes import *
+sys.path.append("./7-tmq")
+from tmqCommon import *
+
+class actionType(Enum):
+ CREATE_DATABASE = 0
+ CREATE_STABLE = 1
+ CREATE_CTABLE = 2
+ INSERT_DATA = 3
+
+class TDTestCase:
+ hostname = socket.gethostname()
+ #rpcDebugFlagVal = '143'
+ #clientCfgDict = {'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''}
+ #clientCfgDict["rpcDebugFlag"] = rpcDebugFlagVal
+ #updatecfgDict = {'clientCfg': {}, 'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''}
+ #updatecfgDict["rpcDebugFlag"] = rpcDebugFlagVal
+ #print ("===================: ", updatecfgDict)
+
+ def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
+ tdLog.debug(f"start to excute {__file__}")
+ tdSql.init(conn.cursor())
+ #tdSql.init(conn.cursor(), logSql) # output sql.txt file
+
+ def getBuildPath(self):
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+
+ if ("community" in selfPath):
+ projPath = selfPath[:selfPath.find("community")]
+ else:
+ projPath = selfPath[:selfPath.find("tests")]
+
+ for root, dirs, files in os.walk(projPath):
+ if ("taosd" in files or "taosd.exe" in files):
+ rootRealPath = os.path.dirname(os.path.realpath(root))
+ if ("packaging" not in rootRealPath):
+ buildPath = root[:len(root) - len("/build/bin")]
+ break
+ return buildPath
+
+ def newcur(self,cfg,host,port):
+ user = "root"
+ password = "taosdata"
+ con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port)
+ cur=con.cursor()
+ print(cur)
+ return cur
+
+ def initConsumerTable(self,cdbName='cdb'):
+ tdLog.info("create consume database, and consume info table, and consume result table")
+ tdSql.query("create database if not exists %s vgroups 1 wal_retention_period 3600"%(cdbName))
+ tdSql.query("drop table if exists %s.consumeinfo "%(cdbName))
+ tdSql.query("drop table if exists %s.consumeresult "%(cdbName))
+
+ tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName)
+ tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName)
+
+ def initConsumerInfoTable(self,cdbName='cdb'):
+ tdLog.info("drop consumeinfo table")
+ tdSql.query("drop table if exists %s.consumeinfo "%(cdbName))
+ tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName)
+
+ def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'):
+ sql = "insert into %s.consumeinfo values "%cdbName
+ sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit)
+ tdLog.info("consume info sql: %s"%sql)
+ tdSql.query(sql)
+
+ def selectConsumeResult(self,expectRows,cdbName='cdb'):
+ resultList=[]
+ while 1:
+ tdSql.query("select * from %s.consumeresult"%cdbName)
+ #tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3))
+ if tdSql.getRows() == expectRows:
+ break
+ else:
+ time.sleep(5)
+
+ for i in range(expectRows):
+ tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3)))
+ resultList.append(tdSql.getData(i , 3))
+
+ return resultList
+
+ def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0):
+ if valgrind == 1:
+ logFile = cfgPath + '/../log/valgrind-tmq.log'
+ shellCmd = 'nohup valgrind --log-file=' + logFile
+ shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes '
+
+ if (platform.system().lower() == 'windows'):
+ shellCmd = 'mintty -h never -w hide ' + buildPath + '\\build\\bin\\tmq_sim.exe -c ' + cfgPath
+ shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
+ shellCmd += "> nul 2>&1 &"
+ else:
+ shellCmd = 'nohup ' + buildPath + '/build/bin/tmq_sim -c ' + cfgPath
+ shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
+ shellCmd += "> /dev/null 2>&1 &"
+ tdLog.info(shellCmd)
+ os.system(shellCmd)
+
+ def create_database(self,tsql, dbName,dropFlag=1,vgroups=4,replica=1):
+ if dropFlag == 1:
+ tsql.execute("drop database if exists %s"%(dbName))
+
+ tsql.execute("create database if not exists %s vgroups %d replica %d wal_retention_period 3600"%(dbName, vgroups, replica))
+ tdLog.debug("complete to create database %s"%(dbName))
+ return
+
+ def create_stable(self,tsql, dbName,stbName):
+ tsql.execute("create table if not exists %s.%s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%(dbName, stbName))
+ tdLog.debug("complete to create %s.%s" %(dbName, stbName))
+ return
+
+ def create_ctables(self,tsql, dbName,stbName,ctbNum):
+ tsql.execute("use %s" %dbName)
+ pre_create = "create table"
+ sql = pre_create
+ #tdLog.debug("doing create one stable %s and %d child table in %s ..." %(stbname, count ,dbname))
+ for i in range(ctbNum):
+ sql += " %s_%d using %s tags(%d)"%(stbName,i,stbName,i+1)
+ if (i > 0) and (i%100 == 0):
+ tsql.execute(sql)
+ sql = pre_create
+ if sql != pre_create:
+ tsql.execute(sql)
+
+ tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName))
+ return
+
+ def insert_data(self,tsql,dbName,stbName,ctbNum,rowsPerTbl,batchNum,startTs=0):
+ tdLog.debug("start to insert data ............")
+ tsql.execute("use %s" %dbName)
+ pre_insert = "insert into "
+ sql = pre_insert
+
+ if startTs == 0:
+ t = time.time()
+ startTs = int(round(t * 1000))
+
+ #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows))
+ rowsOfSql = 0
+ for i in range(ctbNum):
+ sql += " %s_%d values "%(stbName,i)
+ for j in range(rowsPerTbl):
+ sql += "(%d, %d, 'tmqrow_%d') "%(startTs + j, j, j)
+ rowsOfSql += 1
+ if (j > 0) and ((rowsOfSql == batchNum) or (j == rowsPerTbl - 1)):
+ tsql.execute(sql)
+ rowsOfSql = 0
+ if j < rowsPerTbl - 1:
+ sql = "insert into %s_%d values " %(stbName,i)
+ else:
+ sql = "insert into "
+ #end sql
+ if sql != pre_insert:
+ #print("insert sql:%s"%sql)
+ tsql.execute(sql)
+ tdLog.debug("insert data ............ [OK]")
+ return
+
+ def prepareEnv(self, **parameterDict):
+ # create new connector for my thread
+ tsql=self.newcur(parameterDict['cfg'], 'localhost', 6030)
+
+ if parameterDict["actionType"] == actionType.CREATE_DATABASE:
+ self.create_database(tsql, parameterDict["dbName"])
+ elif parameterDict["actionType"] == actionType.CREATE_STABLE:
+ self.create_stable(tsql, parameterDict["dbName"], parameterDict["stbName"])
+ elif parameterDict["actionType"] == actionType.CREATE_CTABLE:
+ self.create_ctables(tsql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"])
+ elif parameterDict["actionType"] == actionType.INSERT_DATA:
+ self.insert_data(tsql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"], \
+ parameterDict["rowsPerTbl"],parameterDict["batchNum"])
+ else:
+ tdLog.exit("not support's action: ", parameterDict["actionType"])
+
+ return
+
+ def tmqCase1(self, cfgPath, buildPath):
+ tdLog.printNoPrefix("======== test case 1: ")
+
+ self.initConsumerTable()
+
+ # create and start thread
+ parameterDict = {'cfg': '', \
+ 'actionType': 0, \
+ 'dbName': 'db1', \
+ 'dropFlag': 1, \
+ 'vgroups': 4, \
+ 'replica': 1, \
+ 'stbName': 'stb1', \
+ 'ctbNum': 10, \
+ 'rowsPerTbl': 10000, \
+ 'batchNum': 100, \
+ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000
+
+ self.create_database(tdSql, parameterDict["dbName"])
+ self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"])
+
+ tdLog.info("create topics from stb1")
+ topicFromStb1 = 'topic_stb1'
+
+ tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName']))
+ consumerId = 0
+ expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"]
+ topicList = topicFromStb1
+ ifcheckdata = 0
+ ifManualCommit = 0
+ keyList = 'group.id:cgrp1,\
+ enable.auto.commit:true,\
+ auto.commit.interval.ms:2000,\
+ auto.offset.reset:earliest'
+ self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
+
+ tdLog.info("start consume processor")
+ pollDelay = 20
+ showMsg = 1
+ showRow = 1
+ self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
+
+ tdLog.info("start show subscriptions 1")
+ while(1):
+ tdSql.query("show subscriptions")
+ if (tdSql.getRows() == 0):
+ tdLog.info("sleep")
+ time.sleep(1)
+ elif (tdSql.queryResult[0][4] != None):
+ # tdSql.checkData(0, 4, "earliest")
+ tdSql.checkData(0, 5, 0)
+ break
+
+ time.sleep(2)
+ tdLog.info("start insert data")
+ self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"])
+ self.insert_data(tdSql,\
+ parameterDict["dbName"],\
+ parameterDict["stbName"],\
+ parameterDict["ctbNum"],\
+ parameterDict["rowsPerTbl"],\
+ parameterDict["batchNum"])
+
+ time.sleep(2)
+ tdLog.info("start show subscriptions 2")
+ tdSql.query("show subscriptions")
+ tdSql.checkRows(4)
+ print(tdSql.queryResult)
+ # tdSql.checkData(0, 4, 'offset(log) ver:103')
+ tdSql.checkData(0, 5, 10000)
+ # tdSql.checkData(1, 4, 'offset(log) ver:103')
+ tdSql.checkData(1, 5, 10000)
+ # tdSql.checkData(2, 4, 'offset(log) ver:303')
+ tdSql.checkData(2, 5, 50000)
+ # tdSql.checkData(3, 4, 'offset(log) ver:239')
+ tdSql.checkData(3, 5, 30000)
+
+ tdLog.info("insert process end, and start to check consume result")
+ expectRows = 1
+ resultList = self.selectConsumeResult(expectRows)
+
+ time.sleep(2)
+ tdLog.info("start show subscriptions 3")
+ tdSql.query("show subscriptions")
+ tdSql.checkRows(4)
+ print(tdSql.queryResult)
+ tdSql.checkData(0, 3, None)
+ # tdSql.checkData(0, 4, 'offset(log) ver:103')
+ tdSql.checkData(0, 5, 10000)
+ # tdSql.checkData(1, 4, 'offset(log) ver:103')
+ tdSql.checkData(1, 5, 10000)
+ # tdSql.checkData(2, 4, 'offset(log) ver:303')
+ tdSql.checkData(2, 5, 50000)
+ # tdSql.checkData(3, 4, 'offset(log) ver:239')
+ tdSql.checkData(3, 5, 30000)
+
+ tdSql.query("drop topic %s"%topicFromStb1)
+
+ tdLog.printNoPrefix("======== test case 1 end ...... ")
+
+ def run(self):
+ tdSql.prepare()
+ buildPath = self.getBuildPath()
+ if (buildPath == ""):
+ tdLog.exit("taosd not found!")
+ else:
+ tdLog.info("taosd found in %s" % buildPath)
+ cfgPath = buildPath + "/../sim/psim/cfg"
+ tdLog.info("cfgPath: %s" % cfgPath)
+
+ self.tmqCase1(cfgPath, buildPath)
+ # self.tmqCase2(cfgPath, buildPath)
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success(f"{__file__} successfully executed")
+
+event = threading.Event()
+
+tdCases.addLinux(__file__, TDTestCase())
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt
index 1461a7b373..9e8d91b6b6 100644
--- a/tools/CMakeLists.txt
+++ b/tools/CMakeLists.txt
@@ -121,20 +121,20 @@ ELSE ()
BUILD_COMMAND
COMMAND set CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client
COMMAND set CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib
- COMMAND go build -a -o taosadapter.exe -ldflags "-X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}"
-# COMMAND go build -a -o taosadapter.exe -ldflags "-s -w -X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}"
-# COMMAND go build -a -o taosadapter-debug.exe -ldflags "-X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}"
+ # COMMAND go build -a -o taosadapter.exe -ldflags "-X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}"
+ COMMAND go build -a -o taosadapter.exe -ldflags "-s -w -X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}"
+ COMMAND go build -a -o taosadapter-debug.exe -ldflags "-X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}"
INSTALL_COMMAND
-# COMMAND cmake -E echo "Comparessing taosadapter.exe"
-# COMMAND cmake -E time upx taosadapter.exe
+ COMMAND cmake -E echo "Comparessing taosadapter.exe"
+ COMMAND cmake -E time upx taosadapter.exe
COMMAND cmake -E echo "Copy taosadapter.exe"
COMMAND cmake -E copy taosadapter.exe ${CMAKE_BINARY_DIR}/build/bin/taosadapter.exe
COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/test/cfg/
COMMAND cmake -E echo "Copy taosadapter.toml"
COMMAND cmake -E copy ./example/config/taosadapter.toml ${CMAKE_BINARY_DIR}/test/cfg/
-# COMMAND cmake -E echo "Copy taosadapter-debug.exe"
-# COMMAND cmake -E copy taosadapter-debug.exe ${CMAKE_BINARY_DIR}/build/bin
+ COMMAND cmake -E echo "Copy taosadapter-debug.exe"
+ COMMAND cmake -E copy taosadapter-debug.exe ${CMAKE_BINARY_DIR}/build/bin
)
ELSE (TD_WINDOWS)
MESSAGE("Building taosAdapter on non-Windows")
@@ -149,20 +149,20 @@ ELSE ()
PATCH_COMMAND
COMMAND git clean -f -d
BUILD_COMMAND
- COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -ldflags "-X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}"
-# COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -ldflags "-s -w -X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}"
-# COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -o taosadapter-debug -ldflags "-X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}"
+ # COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -ldflags "-X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}"
+ COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -ldflags "-s -w -X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}"
+ COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -o taosadapter-debug -ldflags "-X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}"
INSTALL_COMMAND
-# COMMAND cmake -E echo "Comparessing taosadapter.exe"
-# COMMAND upx taosadapter || :
+ COMMAND cmake -E echo "Comparessing taosadapter.exe"
+ COMMAND upx taosadapter || :
COMMAND cmake -E echo "Copy taosadapter"
COMMAND cmake -E copy taosadapter ${CMAKE_BINARY_DIR}/build/bin
COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/test/cfg/
COMMAND cmake -E echo "Copy taosadapter.toml"
COMMAND cmake -E copy ./example/config/taosadapter.toml ${CMAKE_BINARY_DIR}/test/cfg/
COMMAND cmake -E copy ./taosadapter.service ${CMAKE_BINARY_DIR}/test/cfg/
-# COMMAND cmake -E echo "Copy taosadapter-debug"
-# COMMAND cmake -E copy taosadapter-debug ${CMAKE_BINARY_DIR}/build/bin
+ COMMAND cmake -E echo "Copy taosadapter-debug"
+ COMMAND cmake -E copy taosadapter-debug ${CMAKE_BINARY_DIR}/build/bin
)
ENDIF (TD_WINDOWS)
ENDIF ()