diff --git a/README-CN.md b/README-CN.md
index 44542747eb..f830404af3 100644
--- a/README-CN.md
+++ b/README-CN.md
@@ -52,7 +52,7 @@ TDengine 还提供一组辅助工具软件 taosTools,目前它包含 taosBench
### Ubuntu 18.04 及以上版本 & Debian:
```bash
-sudo apt-get install -y gcc cmake build-essential git libssl-dev libgflags2.2 libgflags-dev libgeos-dev
+sudo apt-get install -y gcc cmake build-essential git libssl-dev libgflags2.2 libgflags-dev
```
#### 为 taos-tools 安装编译需要的软件
@@ -68,14 +68,14 @@ sudo apt install build-essential libjansson-dev libsnappy-dev liblzma-dev libz-d
```bash
sudo yum install epel-release
sudo yum update
-sudo yum install -y gcc gcc-c++ make cmake3 git openssl-devel geos geos-devel
+sudo yum install -y gcc gcc-c++ make cmake3 git openssl-devel
sudo ln -sf /usr/bin/cmake3 /usr/bin/cmake
```
### CentOS 8/Fedora/Rocky Linux
```bash
-sudo dnf install -y gcc gcc-c++ make cmake epel-release git openssl-devel geos geos-devel
+sudo dnf install -y gcc gcc-c++ make cmake epel-release git openssl-devel
```
#### 在 CentOS 上构建 taosTools 安装依赖软件
@@ -117,7 +117,7 @@ scl enable devtoolset-9 -- bash
### macOS
```
-brew install argp-standalone pkgconfig geos
+brew install argp-standalone pkgconfig
```
### 设置 golang 开发环境
diff --git a/README.md b/README.md
index c81e2d309d..f477a51a1f 100644
--- a/README.md
+++ b/README.md
@@ -60,7 +60,7 @@ To build TDengine, use [CMake](https://cmake.org/) 3.0.2 or higher versions in t
### Ubuntu 18.04 and above or Debian
```bash
-sudo apt-get install -y gcc cmake build-essential git libssl-dev libgflags2.2 libgflags-dev libgeos-dev
+sudo apt-get install -y gcc cmake build-essential git libssl-dev libgflags2.2 libgflags-dev
```
#### Install build dependencies for taosTools
@@ -76,14 +76,14 @@ sudo apt install build-essential libjansson-dev libsnappy-dev liblzma-dev libz-d
```bash
sudo yum install epel-release
sudo yum update
-sudo yum install -y gcc gcc-c++ make cmake3 git openssl-devel geos geos-devel
+sudo yum install -y gcc gcc-c++ make cmake3 git openssl-devel
sudo ln -sf /usr/bin/cmake3 /usr/bin/cmake
```
### CentOS 8/Fedora/Rocky Linux
```bash
-sudo dnf install -y gcc gcc-c++ make cmake epel-release git openssl-devel geos geos-devel
+sudo dnf install -y gcc gcc-c++ make cmake epel-release git openssl-devel
```
#### Install build dependencies for taosTools on CentOS
@@ -124,7 +124,7 @@ scl enable devtoolset-9 -- bash
### macOS
```
-brew install argp-standalone pkgconfig geos
+brew install argp-standalone pkgconfig
```
### Setup golang environment
diff --git a/cmake/cmake.define b/cmake/cmake.define
index f3caf49da3..4b0adc31a3 100644
--- a/cmake/cmake.define
+++ b/cmake/cmake.define
@@ -115,18 +115,6 @@ ELSE ()
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${GCC_COVERAGE_COMPILE_FLAGS} ${GCC_COVERAGE_LINK_FLAGS}")
ENDIF ()
- IF (${BUILD_SANITIZER})
- SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0")
- SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0")
- MESSAGE(STATUS "Compile with Address Sanitizer!")
- ELSEIF (${BUILD_RELEASE})
- SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -O3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
- SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-reserved-user-defined-literal -Wno-literal-suffix -Werror=return-type -fPIC -O3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
- ELSE ()
- SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -g3 -gdwarf-2 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
- SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-reserved-user-defined-literal -g3 -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
- ENDIF ()
-
# disable all assert
IF ((${DISABLE_ASSERT} MATCHES "true") OR (${DISABLE_ASSERTS} MATCHES "true"))
ADD_DEFINITIONS(-DDISABLE_ASSERT)
@@ -168,4 +156,20 @@ ELSE ()
MESSAGE(STATUS "SIMD instructions (FMA/AVX/AVX2) is ACTIVATED")
ENDIF()
+ # build mode
+ SET(CMAKE_C_FLAGS_REL "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -O3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
+ SET(CMAKE_CXX_FLAGS_REL "${CMAKE_CXX_FLAGS} -Werror -Wno-reserved-user-defined-literal -Wno-literal-suffix -Werror=return-type -fPIC -O3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
+
+ IF (${BUILD_SANITIZER})
+ SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0")
+ SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0")
+ MESSAGE(STATUS "Compile with Address Sanitizer!")
+ ELSEIF (${BUILD_RELEASE})
+ SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS_REL}")
+ SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS_REL}")
+ ELSE ()
+ SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -g3 -gdwarf-2 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
+ SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-reserved-user-defined-literal -g3 -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
+ ENDIF ()
+
ENDIF ()
diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt
index 59986a3b3c..0d02634c7e 100644
--- a/contrib/CMakeLists.txt
+++ b/contrib/CMakeLists.txt
@@ -229,7 +229,10 @@ endif(${BUILD_WITH_LEVELDB})
# To support rocksdb build on ubuntu: sudo apt-get install libgflags-dev
if(${BUILD_WITH_ROCKSDB})
if(${TD_LINUX})
- SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized -Wno-error=unused-but-set-variable -Wno-error=unused-variable -Wno-error=unused-function -Wno-errno=unused-private-field -Wno-error=unused-result")
+ SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS_REL} -Wno-error=maybe-uninitialized -Wno-error=unused-but-set-variable -Wno-error=unused-variable -Wno-error=unused-function -Wno-errno=unused-private-field -Wno-error=unused-result")
+ IF ("${CMAKE_BUILD_TYPE}" STREQUAL "")
+ SET(CMAKE_BUILD_TYPE Release)
+ endif()
endif(${TD_LINUX})
MESSAGE(STATUS "CXXXX STATUS CONFIG: " ${CMAKE_CXX_FLAGS})
@@ -253,7 +256,7 @@ if(${BUILD_WITH_ROCKSDB})
endif(${TD_DARWIN})
if(${TD_WINDOWS})
- option(WITH_JNI "" OFF)
+ option(WITH_JNI "" OFF)
endif(${TD_WINDOWS})
if(${TD_WINDOWS})
@@ -265,7 +268,7 @@ if(${BUILD_WITH_ROCKSDB})
option(WITH_FALLOCATE "" OFF)
option(WITH_JEMALLOC "" OFF)
option(WITH_GFLAGS "" OFF)
- option(PORTABLE "" OFF)
+ option(PORTABLE "" ON)
option(WITH_LIBURING "" OFF)
option(FAIL_ON_WARNINGS OFF)
@@ -274,7 +277,7 @@ if(${BUILD_WITH_ROCKSDB})
option(WITH_TOOLS "" OFF)
option(WITH_LIBURING "" OFF)
IF (TD_LINUX)
- option(ROCKSDB_BUILD_SHARED "Build shared versions of the RocksDB libraries" ON)
+ option(ROCKSDB_BUILD_SHARED "Build shared versions of the RocksDB libraries" OFF)
ELSE()
option(ROCKSDB_BUILD_SHARED "Build shared versions of the RocksDB libraries" OFF)
ENDIF()
@@ -485,6 +488,13 @@ endif(${BUILD_ADDR2LINE})
# geos
if(${BUILD_GEOS})
+ if(${TD_LINUX})
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS_REL}")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS_REL}")
+ IF ("${CMAKE_BUILD_TYPE}" STREQUAL "")
+ SET(CMAKE_BUILD_TYPE Release)
+ endif()
+ endif(${TD_LINUX})
option(BUILD_SHARED_LIBS "Build GEOS with shared libraries" OFF)
add_subdirectory(geos EXCLUDE_FROM_ALL)
target_include_directories(
diff --git a/docs/en/07-develop/07-tmq.mdx b/docs/en/07-develop/07-tmq.mdx
index d951923de5..578f38e73d 100644
--- a/docs/en/07-develop/07-tmq.mdx
+++ b/docs/en/07-develop/07-tmq.mdx
@@ -105,6 +105,12 @@ class Consumer:
def poll(self, timeout: float = 1.0):
pass
+ def assignment(self):
+ pass
+
+ def poll(self, timeout: float = 1.0):
+ pass
+
def close(self):
pass
diff --git a/docs/en/14-reference/03-connector/04-java.mdx b/docs/en/14-reference/03-connector/04-java.mdx
index db49e5f395..9c5a852c70 100644
--- a/docs/en/14-reference/03-connector/04-java.mdx
+++ b/docs/en/14-reference/03-connector/04-java.mdx
@@ -32,25 +32,22 @@ TDengine's JDBC driver implementation is as consistent as possible with the rela
Native connections are supported on the same platforms as the TDengine client driver.
REST connection supports all platforms that can run Java.
-## Version support
-
-Please refer to [version support list](/reference/connector#version-support)
-
## Recent update logs
-| taos-jdbcdriver version | major changes |
-| :---------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------: |
-| 3.2.1 | JDBC REST connection supports schemaless/prepareStatement over WebSocket |
-| 3.2.0 | This version has been deprecated |
-| 3.1.0 | JDBC REST connection supports subscription over WebSocket |
-| 3.0.1 - 3.0.4 | fix the resultSet data is parsed incorrectly sometimes. 3.0.1 is compiled on JDK 11, you are advised to use other version in the JDK 8 environment |
-| 3.0.0 | Support for TDengine 3.0 |
-| 2.0.42 | fix wasNull interface return value in WebSocket connection |
-| 2.0.41 | fix decode method of username and password in REST connection |
-| 2.0.39 - 2.0.40 | Add REST connection/request timeout parameters |
-| 2.0.38 | JDBC REST connections add bulk pull function |
-| 2.0.37 | Support json tags |
-| 2.0.36 | Support schemaless writing |
+| taos-jdbcdriver version | major changes | TDengine version |
+| :---------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------: | :--------------: |
+| 3.2.1 | subscription add seek function | 3.0.5.0 or later |
+| 3.2.1 | JDBC REST connection supports schemaless/prepareStatement over WebSocket | 3.0.3.0 or later |
+| 3.2.0 | This version has been deprecated | - |
+| 3.1.0 | JDBC REST connection supports subscription over WebSocket | - |
+| 3.0.1 - 3.0.4 | fix the resultSet data is parsed incorrectly sometimes. 3.0.1 is compiled on JDK 11, you are advised to use other version in the JDK 8 environment | - |
+| 3.0.0 | Support for TDengine 3.0 | 3.0.0.0 or later |
+| 2.0.42 | fix wasNull interface return value in WebSocket connection | - |
+| 2.0.41 | fix decode method of username and password in REST connection | - |
+| 2.0.39 - 2.0.40 | Add REST connection/request timeout parameters | - |
+| 2.0.38 | JDBC REST connections add bulk pull function | - |
+| 2.0.37 | Support json tags | - |
+| 2.0.36 | Support schemaless writing | - |
**Note**: adding `batchfetch` to the REST connection and setting it to true will enable the WebSocket connection.
@@ -102,6 +99,8 @@ For specific error codes, please refer to.
| 0x2319 | user is required | The user name information is missing when creating the connection |
| 0x231a | password is required | Password information is missing when creating a connection |
| 0x231c | httpEntity is null, sql: | Execution exception occurred during the REST connection |
+| 0x231d | can't create connection with server within | Increase the connection time by adding the httpConnectTimeout parameter, or check the connection to the taos adapter. |
+| 0x231e | failed to complete the task within the specified time | Increase the execution time by adding the messageWaitTimeout parameter, or check the connection to the taos adapter. |
| 0x2350 | unknown error | Unknown exception, please return to the developer on github. |
| 0x2352 | Unsupported encoding | An unsupported character encoding set is specified under the native Connection. |
| 0x2353 | internal error of database, please see taoslog for more details | An error occurs when the prepare statement is executed on the native connection. Check the taos log to locate the fault. |
@@ -117,8 +116,8 @@ For specific error codes, please refer to.
| 0x2376 | failed to set consumer topic, topic name is empty | During data subscription creation, the subscription topic name is empty. Check that the specified topic name is correct. |
| 0x2377 | consumer reference has been destroyed | The subscription data transfer channel has been closed. Please check the connection to TDengine. |
| 0x2378 | consumer create error | Failed to create a data subscription. Check the taos log according to the error message to locate the fault. |
-| - | can't create connection with server within | Increase the connection time by adding the httpConnectTimeout parameter, or check the connection to the taos adapter. |
-| - | failed to complete the task within the specified time | Increase the execution time by adding the messageWaitTimeout parameter, or check the connection to the taos adapter. |
+| 0x2379 | seek offset must not be a negative number | The seek interface parameter cannot be negative. Use the correct parameter |
+| 0x237a | vGroup not found in result set | subscription is not bound to the VGroup due to the rebalance mechanism |
- [TDengine Java Connector](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java)
@@ -169,7 +168,7 @@ Add following dependency in the `pom.xml` file of your Maven project:
com.taosdata.jdbc
taos-jdbcdriver
- 3.2.1
+ 3.2.2
```
@@ -913,14 +912,15 @@ public class SchemalessWsTest {
public static void main(String[] args) throws SQLException {
final String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata&batchfetch=true";
- Connection connection = DriverManager.getConnection(url);
- init(connection);
+ try(Connection connection = DriverManager.getConnection(url)){
+ init(connection);
- SchemalessWriter writer = new SchemalessWriter(connection, "test_ws_schemaless");
- writer.write(lineDemo, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO_SECONDS);
- writer.write(telnetDemo, SchemalessProtocolType.TELNET, SchemalessTimestampType.MILLI_SECONDS);
- writer.write(jsonDemo, SchemalessProtocolType.JSON, SchemalessTimestampType.SECONDS);
- System.exit(0);
+ try(SchemalessWriter writer = new SchemalessWriter(connection, "test_ws_schemaless")){
+ writer.write(lineDemo, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO_SECONDS);
+ writer.write(telnetDemo, SchemalessProtocolType.TELNET, SchemalessTimestampType.MILLI_SECONDS);
+ writer.write(jsonDemo, SchemalessProtocolType.JSON, SchemalessTimestampType.SECONDS);
+ }
+ }
}
private static void init(Connection connection) throws SQLException {
@@ -991,6 +991,17 @@ while(true) {
`poll` obtains one message each time it is run.
+#### Assignment subscription Offset
+
+```
+long position(TopicPartition partition) throws SQLException;
+Map position(String topic) throws SQLException;
+Map beginningOffsets(String topic) throws SQLException;
+Map endOffsets(String topic) throws SQLException;
+
+void seek(TopicPartition partition, long offset) throws SQLException;
+```
+
#### Close subscriptions
```java
diff --git a/docs/en/14-reference/03-connector/05-go.mdx b/docs/en/14-reference/03-connector/05-go.mdx
index 0088f23006..06d643c6c8 100644
--- a/docs/en/14-reference/03-connector/05-go.mdx
+++ b/docs/en/14-reference/03-connector/05-go.mdx
@@ -29,7 +29,7 @@ REST connections are supported on all platforms that can run Go.
## Version support
-Please refer to [version support list](/reference/connector#version-support)
+Please refer to [version support list](https://github.com/taosdata/driver-go#remind)
## Supported features
@@ -379,6 +379,15 @@ Note: `tmq.TopicPartition` is reserved for compatibility purpose
Commit information.
+* `func (c *Consumer) Assignment() (partitions []tmq.TopicPartition, err error)`
+
+Get Assignment(TDengine >= 3.0.5.0 and driver-go >= v3.5.0 are required).
+
+* `func (c *Consumer) Seek(partition tmq.TopicPartition, ignoredTimeoutMs int) error`
+Note: `ignoredTimeoutMs` is reserved for compatibility purpose
+
+Seek offset(TDengine >= 3.0.5.0 and driver-go >= v3.5.0 are required).
+
* `func (c *Consumer) Unsubscribe() error`
Unsubscribe.
@@ -468,6 +477,15 @@ Note: `tmq.TopicPartition` is reserved for compatibility purpose
Commit information.
+* `func (c *Consumer) Assignment() (partitions []tmq.TopicPartition, err error)`
+
+Get Assignment(TDengine >= 3.0.5.0 and driver-go >= v3.5.0 are required).
+
+* `func (c *Consumer) Seek(partition tmq.TopicPartition, ignoredTimeoutMs int) error`
+Note: `ignoredTimeoutMs` is reserved for compatibility purpose
+
+Seek offset(TDengine >= 3.0.5.0 and driver-go >= v3.5.0 are required).
+
* `func (c *Consumer) Unsubscribe() error`
Unsubscribe.
@@ -476,7 +494,7 @@ Unsubscribe.
Close consumer.
-For a complete example see [GitHub sample file](https://github.com/taosdata/driver-go/blob/3.0/examples/tmqoverws/main.go)
+For a complete example see [GitHub sample file](https://github.com/taosdata/driver-go/blob/main/examples/tmqoverws/main.go)
### parameter binding via WebSocket
@@ -524,7 +542,7 @@ For a complete example see [GitHub sample file](https://github.com/taosdata/driv
Closes the parameter binding.
-For a complete example see [GitHub sample file](https://github.com/taosdata/driver-go/blob/3.0/examples/stmtoverws/main.go)
+For a complete example see [GitHub sample file](https://github.com/taosdata/driver-go/blob/main/examples/stmtoverws/main.go)
## API Reference
diff --git a/docs/en/14-reference/03-connector/06-rust.mdx b/docs/en/14-reference/03-connector/06-rust.mdx
index 99c3d2c066..344bd3590e 100644
--- a/docs/en/14-reference/03-connector/06-rust.mdx
+++ b/docs/en/14-reference/03-connector/06-rust.mdx
@@ -27,9 +27,14 @@ The source code for the Rust connectors is located on [GitHub](https://github.co
Native connections are supported on the same platforms as the TDengine client driver.
Websocket connections are supported on all platforms that can run Go.
-## Version support
+## Version history
-Please refer to [version support list](/reference/connector#version-support)
+| connector-rust version | TDengine version | major features |
+| :----------------: | :--------------: | :--------------------------------------------------: |
+| v0.8.10 | 3.0.5.0 or later | TMQ: Get consuming progress and seek offset to consume. |
+| v0.8.0 | 3.0.4.0 | Support schemaless insert. |
+| v0.7.6 | 3.0.3.0 | Support req_id in query. |
+| v0.6.0 | 3.0.0.0 | Base features. |
The Rust Connector is still under rapid development and is not guaranteed to be backward compatible before 1.0. We recommend using TDengine version 3.0 or higher to avoid known issues.
@@ -499,6 +504,22 @@ The TMQ is of [futures::Stream](https://docs.rs/futures/latest/futures/stream/in
}
```
+Get assignments:
+
+Version requirements connector-rust >= v0.8.8, TDengine >= 3.0.5.0
+
+```rust
+let assignments = consumer.assignments().await.unwrap();
+```
+
+Seek offset:
+
+Version requirements connector-rust >= v0.8.8, TDengine >= 3.0.5.0
+
+```rust
+consumer.offset_seek(topic, vgroup_id, offset).await;
+```
+
Unsubscribe:
```rust
@@ -513,7 +534,7 @@ The following parameters can be configured for the TMQ DSN. Only `group.id` is m
- `enable.auto.commit`: Automatically commits. This can be enabled when data consistency is not essential.
- `auto.commit.interval.ms`: Interval for automatic commits.
-For more information, see [GitHub sample file](https://github.com/taosdata/taos-connector-rust/blob/main/examples/subscribe.rs).
+For more information, see [GitHub sample file](https://github.com/taosdata/TDengine/blob/3.0/docs/examples/rust/nativeexample/examples/subscribe_demo.rs).
For information about other structure APIs, see the [Rust documentation](https://docs.rs/taos).
diff --git a/docs/en/14-reference/03-connector/07-python.mdx b/docs/en/14-reference/03-connector/07-python.mdx
index b263af8ea6..6bd02644d4 100644
--- a/docs/en/14-reference/03-connector/07-python.mdx
+++ b/docs/en/14-reference/03-connector/07-python.mdx
@@ -453,6 +453,170 @@ As the way to connect introduced above but add `req_id` argument.
+### Subscription
+
+Connector support data subscription. For more information about subscroption, please refer to [Data Subscription](../../../develop/tmq/).
+
+
+
+
+The `consumer` in the connector contains the subscription api.
+
+#### Create Consumer
+
+The syntax for creating a consumer is `consumer = Consumer(configs)`. For more subscription api parameters, please refer to [Data Subscription](../../../develop/tmq/).
+
+```python
+from taos.tmq import Consumer
+
+consumer = Consumer({"group.id": "local", "td.connect.ip": "127.0.0.1"})
+```
+
+#### Subscribe topics
+
+The `subscribe` function is used to subscribe to a list of topics.
+
+```python
+consumer.subscribe(['topic1', 'topic2'])
+```
+
+#### Consume
+
+The `poll` function is used to consume data in tmq. The parameter of the `poll` function is a value of type float representing the timeout in seconds. It returns a `Message` before timing out, or `None` on timing out. You have to handle error messages in response data.
+
+```python
+while True:
+ res = consumer.poll(1)
+ if not res:
+ continue
+ err = res.error()
+ if err is not None:
+ raise err
+ val = res.value()
+
+ for block in val:
+ print(block.fetchall())
+```
+
+#### assignment
+
+The `assignment` function is used to get the assignment of the topic.
+
+```python
+assignments = consumer.assignment()
+```
+
+#### Seek
+
+The `seek` function is used to reset the assignment of the topic.
+
+```python
+tp = TopicPartition(topic='topic1', partition=0, offset=0)
+consumer.seek(tp)
+```
+
+#### After consuming data
+
+You should unsubscribe to the topics and close the consumer after consuming.
+
+```python
+consumer.unsubscribe()
+consumer.close()
+```
+
+#### Tmq subscription example
+
+```python
+{{#include docs/examples/python/tmq_example.py}}
+```
+
+#### assignment and seek example
+
+```python
+{{#include docs/examples/python/tmq_assignment_example.py:taos_get_assignment_and_seek_demo}}
+```
+
+
+
+
+
+In addition to native connections, the connector also supports subscriptions via websockets.
+
+#### Create Consumer
+
+The syntax for creating a consumer is "consumer = consumer = Consumer(conf=configs)". You need to specify that the `td.connect.websocket.scheme` parameter is set to "ws" in the configuration. For more subscription api parameters, please refer to [Data Subscription](../../../develop/tmq/#create-a-consumer).
+
+```python
+import taosws
+
+consumer = taosws.(conf={"group.id": "local", "td.connect.websocket.scheme": "ws"})
+```
+
+#### subscribe topics
+
+The `subscribe` function is used to subscribe to a list of topics.
+
+```python
+consumer.subscribe(['topic1', 'topic2'])
+```
+
+#### Consume
+
+The `poll` function is used to consume data in tmq. The parameter of the `poll` function is a value of type float representing the timeout in seconds. It returns a `Message` before timing out, or `None` on timing out. You have to handle error messages in response data.
+
+```python
+while True:
+ res = consumer.poll(timeout=1.0)
+ if not res:
+ continue
+ err = res.error()
+ if err is not None:
+ raise err
+ for block in message:
+ for row in block:
+ print(row)
+```
+
+#### assignment
+
+The `assignment` function is used to get the assignment of the topic.
+
+```python
+assignments = consumer.assignment()
+```
+
+#### Seek
+
+The `seek` function is used to reset the assignment of the topic.
+
+```python
+consumer.seek(topic='topic1', partition=0, offset=0)
+```
+
+#### After consuming data
+
+You should unsubscribe to the topics and close the consumer after consuming.
+
+```python
+consumer.unsubscribe()
+consumer.close()
+```
+
+#### Subscription example
+
+```python
+{{#include docs/examples/python/tmq_websocket_example.py}}
+```
+
+#### Assignment and seek example
+
+```python
+{{#include docs/examples/python/tmq_websocket_assgnment_example.py:taosws_get_assignment_and_seek_demo}}
+```
+
+
+
+
### Schemaless Insert
Connector support schemaless insert.
@@ -507,7 +671,8 @@ Insert with req_id argument
| Example program links | Example program content |
| ------------------------------------------------------------------------------------------------------------- | ------------------- ---- |
-| [bind_multi.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/bind-multi.py) | parameter binding, bind multiple rows at once |
+| [bind_multi.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/bind-multi.py) | parameter binding,
+bind multiple rows at once |
| [bind_row.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/bind-row.py) | bind_row.py
| [insert_lines.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/insert-lines.py) | InfluxDB line protocol writing |
| [json_tag.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/json-tag.py) | Use JSON type tags |
diff --git a/docs/en/14-reference/06-taosdump.md b/docs/en/14-reference/06-taosdump.md
index 7348add4bd..6d5547e7a9 100644
--- a/docs/en/14-reference/06-taosdump.md
+++ b/docs/en/14-reference/06-taosdump.md
@@ -79,8 +79,6 @@ Usage: taosdump [OPTION...] dbname [tbname ...]
-e, --escape-character Use escaped character for database name
-N, --without-property Dump database without its properties.
-s, --schemaonly Only dump table schemas.
- -y, --answer-yes Input yes for prompt. It will skip data file
- checking!
-d, --avro-codec=snappy Choose an avro codec among null, deflate, snappy,
and lzma.
-S, --start-time=START_TIME Start time to dump. Either epoch or
diff --git a/docs/en/14-reference/12-config/index.md b/docs/en/14-reference/12-config/index.md
old mode 100644
new mode 100755
index 52ded6208a..bfc5aabe7b
--- a/docs/en/14-reference/12-config/index.md
+++ b/docs/en/14-reference/12-config/index.md
@@ -5,7 +5,7 @@ description: This document describes the configuration parameters for the TDengi
## Configuration File on Server Side
-On the server side, the actual service of TDengine is provided by an executable `taosd` whose parameters can be configured in file `taos.cfg` to meet the requirements of different use cases. The default location of `taos.cfg` is `/etc/taos`, but can be changed by using `-c` parameter on the CLI of `taosd`. For example, the configuration file can be put under `/home/user` and used like below
+On the server side, the actual service of TDengine is provided by an executable `taosd` whose parameters can be configured in file `taos.cfg` to meet the requirements of different use cases. The default location of `taos.cfg` is `/etc/taos` on Linux system, it's located under `C:\TDengine` on Windows system. The location of configuration file can be specified by using `-c` parameter on the CLI of `taosd`. For example, on Linux system the configuration file can be put under `/home/user` and used like below
```
taosd -c /home/user
@@ -365,6 +365,16 @@ The charset that takes effect is UTF-8.
| Unit | GB |
| Default Value | 2.0 |
+### metaCacheMaxSize
+
+| Attribute | Description |
+| ------------- | ------------------------------------------------------------------------------------------------- |
+| Applicable | Client Only |
+| Meaning | Maximum meta cache size in single client process |
+| Unit | MB |
+| Default Value | -1 (No limitation) |
+
+
## Cluster Parameters
### supportVnodes
@@ -433,6 +443,26 @@ The charset that takes effect is UTF-8.
| Default Value | 0 |
| Note | When it's bigger than 0, the log file would be renamed to "taosdlog.xxx" in which "xxx" is the timestamp when the file is changed last time |
+### slowLogThreshold
+
+| Attribute | Description |
+| ------------- | -------------------------------------------------------------------------------------------------------- |
+| Applicable | Client only |
+| Meaning | When an operation execution time exceeds this threshold, the operation will be logged in slow log file |
+| Unit | second |
+| Default Value | 3 |
+| Note | All slow operations will be logged in file "taosSlowLog" in the log directory |
+
+### slowLogScope
+
+| Attribute | Description |
+| --------------- | ----------------------------------------------------------------------- |
+| Applicable | Client only |
+| Meaning | Slow log type to be logged |
+| Optional Values | ALL, QUERY, INSERT, OTHERS, NONE |
+| Default Value | ALL |
+| Note | All slow operations will be logged by default, one option could be set |
+
### debugFlag
| Attribute | Description |
diff --git a/docs/en/20-third-party/11-kafka.md b/docs/en/20-third-party/11-kafka.md
index f09ebb274c..d40efc702c 100644
--- a/docs/en/20-third-party/11-kafka.md
+++ b/docs/en/20-third-party/11-kafka.md
@@ -16,165 +16,79 @@ TDengine Source Connector is used to read data from TDengine in real-time and se

-## What is Confluent?
-
-[Confluent](https://www.confluent.io/) adds many extensions to Kafka. include:
-
-1. Schema Registry
-2. REST Proxy
-3. Non-Java Clients
-4. Many packaged Kafka Connect plugins
-5. GUI for managing and monitoring Kafka - Confluent Control Center
-
-Some of these extensions are available in the community version of Confluent. Some are only available in the enterprise version.
-
-
-Confluent Enterprise Edition provides the `confluent` command-line tool to manage various components.
-
## Prerequisites
1. Linux operating system
2. Java 8 and Maven installed
-3. Git is installed
+3. Git/curl/vi is installed
4. TDengine is installed and started. If not, please refer to [Installation and Uninstallation](/operation/pkg-install)
-## Install Confluent
-
-Confluent provides two installation methods: Docker and binary packages. This article only introduces binary package installation.
+## Install Kafka
Execute in any directory:
-````
-curl -O http://packages.confluent.io/archive/7.1/confluent-7.1.1.tar.gz
-tar xzf confluent-7.1.1.tar.gz -C /opt/
-````
+```shell
+curl -O https://downloads.apache.org/kafka/3.4.0/kafka_2.13-3.4.0.tgz
+tar xzf kafka_2.13-3.4.0.tgz -C /opt/
+ln -s /opt/kafka_2.13-3.4.0 /opt/kafka
+```
-Then you need to add the `$CONFLUENT_HOME/bin` directory to the PATH.
+Then you need to add the `$KAFKA_HOME/bin` directory to the PATH.
```title=".profile"
-export CONFLUENT_HOME=/opt/confluent-7.1.1
-export PATH=$CONFLUENT_HOME/bin:$PATH
+export KAFKA_HOME=/opt/kafka
+export PATH=$PATH:$KAFKA_HOME/bin
```
Users can append the above script to the current user's profile file (~/.profile or ~/.bash_profile)
-After the installation is complete, you can enter `confluent version` for simple verification:
-
-```
-# confluent version
-confluent - Confluent CLI
-
-Version: v2.6.1
-Git Ref: 6d920590
-Build Date: 2022-02-18T06:14:21Z
-Go Version: go1.17.6 (linux/amd64)
-Development: false
-```
-
## Install TDengine Connector plugin
### Install from source code
-```
+```shell
git clone --branch 3.0 https://github.com/taosdata/kafka-connect-tdengine.git
cd kafka-connect-tdengine
-mvn clean package
-unzip -d $CONFLUENT_HOME/share/java/ target/components/packages/taosdata-kafka-connect-tdengine-*.zip
+mvn clean package -Dmaven.test.skip=true
+unzip -d $KAFKA_HOME/components/ target/components/packages/taosdata-kafka-connect-tdengine-*.zip
```
-The above script first clones the project source code and then compiles and packages it with Maven. After the package is complete, the zip package of the plugin is generated in the `target/components/packages/` directory. Unzip this zip package to plugin path. We used `$CONFLUENT_HOME/share/java/` above because it's a build in plugin path.
+The above script first clones the project source code and then compiles and packages it with Maven. After the package is complete, the zip package of the plugin is generated in the `target/components/packages/` directory. Unzip this zip package to plugin path. We used `$KAFKA_HOME/components/` above because it's a build in plugin path.
-### Install with confluent-hub
+### Add configuration file
-[Confluent Hub](https://www.confluent.io/hub) provides a service to download Kafka Connect plugins. After TDengine Kafka Connector is published to Confluent Hub, it can be installed using the command tool `confluent-hub`.
-**TDengine Kafka Connector is currently not officially released and cannot be installed in this way**.
+add kafka-connect-tdengine plugin path to `plugin.path` in `$KAFKA_HOME/config/connect-distributed.properties`.
-## Start Confluent
-
-```
-confluent local services start
+```properties
+plugin.path=/usr/share/java,/opt/kafka/components
```
-:::note
-Be sure to install the plugin before starting Confluent. Otherwise, Kafka Connect will fail to discover the plugins.
-:::
+## Start Kafka Services
-:::tip
-If a component fails to start, try clearing the data and restarting. The data directory will be printed to the console at startup, e.g.:
+Use command bellow to start all services:
-```title="Console output log" {1}
-Using CONFLUENT_CURRENT: /tmp/confluent.106668
-Starting ZooKeeper
-ZooKeeper is [UP]
-Starting Kafka
-Kafka is [UP]
-Starting Schema Registry
-Schema Registry is [UP]
-Starting Kafka REST
-Kafka REST is [UP]
-Starting Connect
-Connect is [UP]
-Starting ksqlDB Server
-ksqlDB Server is [UP]
-Starting Control Center
-Control Center is [UP]
-```
+```shell
+zookeeper-server-start.sh -daemon $KAFKA_HOME/config/zookeeper.properties
-To clear data, execute `rm -rf /tmp/confluent.106668`.
-:::
+kafka-server-start.sh -daemon $KAFKA_HOME/config/server.properties
-### Check Confluent Services Status
+connect-distributed.sh -daemon $KAFKA_HOME/config/connect-distributed.properties
-Use command bellow to check the status of all service:
-
-```
-confluent local services status
-```
-
-The expected output is:
-```
-Connect is [UP]
-Control Center is [UP]
-Kafka is [UP]
-Kafka REST is [UP]
-ksqlDB Server is [UP]
-Schema Registry is [UP]
-ZooKeeper is [UP]
```
### Check Successfully Loaded Plugin
After Kafka Connect was completely started, you can use bellow command to check if our plugins are installed successfully:
-```
-confluent local services connect plugin list
+
+```shell
+curl http://localhost:8083/connectors
```
-The output should contains `TDengineSinkConnector` and `TDengineSourceConnector` as bellow:
+The output as bellow:
+```txt
+[]
```
-Available Connect Plugins:
-[
- {
- "class": "com.taosdata.kafka.connect.sink.TDengineSinkConnector",
- "type": "sink",
- "version": "1.0.0"
- },
- {
- "class": "com.taosdata.kafka.connect.source.TDengineSourceConnector",
- "type": "source",
- "version": "1.0.0"
- },
-......
-```
-
-If not, please check the log file of Kafka Connect. To view the log file path, please execute:
-
-```
-echo `cat /tmp/confluent.current`/connect/connect.stdout
-```
-It should produce a path like:`/tmp/confluent.104086/connect/connect.stdout`
-
-Besides log file `connect.stdout` there is a file named `connect.properties`. At the end of this file you can see the effective `plugin.path` which is a series of paths joined by comma. If Kafka Connect not found our plugins, it's probably because the installed path is not included in `plugin.path`.
## The use of TDengine Sink Connector
@@ -184,40 +98,47 @@ TDengine Sink Connector internally uses TDengine [modeless write interface](/ref
The following example synchronizes the data of the topic meters to the target database power. The data format is the InfluxDB Line protocol format.
-### Add configuration file
+### Add Sink Connector configuration file
-```
+```shell
mkdir ~/test
cd ~/test
-vi sink-demo.properties
+vi sink-demo.json
```
-sink-demo.properties' content is following:
+sink-demo.json' content is following:
-```ini title="sink-demo.properties"
-name=TDengineSinkConnector
-connector.class=com.taosdata.kafka.connect.sink.TDengineSinkConnector
-tasks.max=1
-topics=meters
-connection.url=jdbc:TAOS://127.0.0.1:6030
-connection.user=root
-connection.password=taosdata
-connection.database=power
-db.schemaless=line
-data.precision=ns
-key.converter=org.apache.kafka.connect.storage.StringConverter
-value.converter=org.apache.kafka.connect.storage.StringConverter
+```json title="sink-demo.json"
+{
+ "name": "TDengineSinkConnector",
+ "config": {
+ "connector.class":"com.taosdata.kafka.connect.sink.TDengineSinkConnector",
+ "tasks.max": "1",
+ "topics": "meters",
+ "connection.url": "jdbc:TAOS://127.0.0.1:6030",
+ "connection.user": "root",
+ "connection.password": "taosdata",
+ "connection.database": "power",
+ "db.schemaless": "line",
+ "data.precision": "ns",
+ "key.converter": "org.apache.kafka.connect.storage.StringConverter",
+ "value.converter": "org.apache.kafka.connect.storage.StringConverter",
+ "errors.tolerance": "all",
+ "errors.deadletterqueue.topic.name": "dead_letter_topic",
+ "errors.deadletterqueue.topic.replication.factor": 1
+ }
+}
```
Key configuration instructions:
-1. `topics=meters` and `connection.database=power` means to subscribe to the data of the topic meters and write to the database power.
-2. `db.schemaless=line` means the data in the InfluxDB Line protocol format.
+1. `"topics": "meters"` and `"connection.database": "power"` means to subscribe to the data of the topic meters and write to the database power.
+2. `"db.schemaless": "line"` means the data in the InfluxDB Line protocol format.
-### Create Connector instance
+### Create Sink Connector instance
-````
-confluent local services connect connector load TDengineSinkConnector --config ./sink-demo.properties
+````shell
+curl -X POST -d @sink-demo.json http://localhost:8083/connectors -H "Content-Type: application/json"
````
If the above command is executed successfully, the output is as follows:
@@ -237,7 +158,10 @@ If the above command is executed successfully, the output is as follows:
"tasks.max": "1",
"topics": "meters",
"value.converter": "org.apache.kafka.connect.storage.StringConverter",
- "name": "TDengineSinkConnector"
+ "name": "TDengineSinkConnector",
+ "errors.tolerance": "all",
+ "errors.deadletterqueue.topic.name": "dead_letter_topic",
+ "errors.deadletterqueue.topic.replication.factor": "1",
},
"tasks": [],
"type": "sink"
@@ -257,8 +181,8 @@ meters,location=California.LoSangeles,groupid=3 current=11.3,voltage=221,phase=0
Use kafka-console-producer to write test data to the topic `meters`.
-```
-cat test-data.txt | kafka-console-producer --broker-list localhost:9092 --topic meters
+```shell
+cat test-data.txt | kafka-console-producer.sh --broker-list localhost:9092 --topic meters
```
:::note
@@ -269,12 +193,12 @@ TDengine Sink Connector will automatically create the database if the target dat
Use the TDengine CLI to verify that the sync was successful.
-```
+```sql
taos> use power;
Database changed.
taos> select * from meters;
- ts | current | voltage | phase | groupid | location |
+ _ts | current | voltage | phase | groupid | location |
===============================================================================================================================================================
2022-03-28 09:56:51.249000000 | 11.800000000 | 221.000000000 | 0.280000000 | 2 | California.LosAngeles |
2022-03-28 09:56:51.250000000 | 13.400000000 | 223.000000000 | 0.290000000 | 2 | California.LosAngeles |
@@ -291,31 +215,39 @@ The role of the TDengine Source Connector is to push all the data of a specific
TDengine Source Connector will convert the data in TDengine data table into [InfluxDB Line protocol format](/develop/insert-data/influxdb-line/) or [OpenTSDB JSON protocol format](/develop/insert-data/opentsdb-json ) and then write to Kafka.
-The following sample program synchronizes the data in the database test to the topic tdengine-source-test.
+The following sample program synchronizes the data in the database test to the topic tdengine-test-meters.
-### Add configuration file
+### Add Source Connector configuration file
-```
-vi source-demo.properties
+```shell
+vi source-demo.json
```
Input following content:
-```ini title="source-demo.properties"
-name=TDengineSourceConnector
-connector.class=com.taosdata.kafka.connect.source.TDengineSourceConnector
-tasks.max=1
-connection.url=jdbc:TAOS://127.0.0.1:6030
-connection.username=root
-connection.password=taosdata
-connection.database=test
-connection.attempts=3
-connection.backoff.ms=5000
-topic.prefix=tdengine-source-
-poll.interval.ms=1000
-fetch.max.rows=100
-key.converter=org.apache.kafka.connect.storage.StringConverter
-value.converter=org.apache.kafka.connect.storage.StringConverter
+```json title="source-demo.json"
+{
+ "name":"TDengineSourceConnector",
+ "config":{
+ "connector.class": "com.taosdata.kafka.connect.source.TDengineSourceConnector",
+ "tasks.max": 1,
+ "connection.url": "jdbc:TAOS://127.0.0.1:6030",
+ "connection.username": "root",
+ "connection.password": "taosdata",
+ "connection.database": "test",
+ "connection.attempts": 3,
+ "connection.backoff.ms": 5000,
+ "topic.prefix": "tdengine",
+ "topic.delimiter": "-",
+ "poll.interval.ms": 1000,
+ "fetch.max.rows": 100,
+ "topic.per.stable": true,
+ "topic.ignore.db": false,
+ "out.format": "line",
+ "key.converter": "org.apache.kafka.connect.storage.StringConverter",
+ "value.converter": "org.apache.kafka.connect.storage.StringConverter"
+ }
+}
```
### Prepare test data
@@ -340,40 +272,40 @@ INSERT INTO d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES('2018-1
Use TDengine CLI to execute SQL script
-```
+```shell
taos -f prepare-source-data.sql
```
### Create Connector instance
-````
-confluent local services connect connector load TDengineSourceConnector --config source-demo.properties
-````
+```shell
+curl -X POST -d @source-demo.json http://localhost:8083/connectors -H "Content-Type: application/json"
+```
### View topic data
-Use the kafka-console-consumer command-line tool to monitor data in the topic tdengine-source-test. In the beginning, all historical data will be output. After inserting two new data into TDengine, kafka-console-consumer immediately outputs the two new data. The output is in InfluxDB line protocol format.
+Use the kafka-console-consumer command-line tool to monitor data in the topic tdengine-test-meters. In the beginning, all historical data will be output. After inserting two new data into TDengine, kafka-console-consumer immediately outputs the two new data. The output is in InfluxDB line protocol format.
-````
-kafka-console-consumer --bootstrap-server localhost:9092 --from-beginning --topic tdengine-source-test
+````shell
+kafka-console-consumer.sh --bootstrap-server localhost:9092 --from-beginning --topic tdengine-test-meters
````
output:
-````
+```txt
......
meters,location="California.SanFrancisco",groupid=2i32 current=10.3f32,voltage=219i32,phase=0.31f32 1538548685000000000
meters,location="California.SanFrancisco",groupid=2i32 current=12.6f32,voltage=218i32,phase=0.33f32 1538548695000000000
......
-````
+```
All historical data is displayed. Switch to the TDengine CLI and insert two new pieces of data:
-````
+```sql
USE test;
INSERT INTO d1001 VALUES (now, 13.3, 229, 0.38);
INSERT INTO d1002 VALUES (now, 16.3, 233, 0.22);
-````
+```
Switch back to kafka-console-consumer, and the command line window has printed out the two pieces of data just inserted.
@@ -383,16 +315,16 @@ After testing, use the unload command to stop the loaded connector.
View currently active connectors:
-````
-confluent local services connect connector status
-````
+```shell
+curl http://localhost:8083/connectors
+```
You should now have two active connectors if you followed the previous steps. Use the following command to unload:
-````
-confluent local services connect connector unload TDengineSinkConnector
-confluent local services connect connector unload TDengineSourceConnector
-````
+```shell
+curl -X DELETE http://localhost:8083/connectors/TDengineSinkConnector
+curl -X DELETE http://localhost:8083/connectors/TDengineSourceConnector
+```
## Configuration reference
@@ -427,22 +359,20 @@ The following configuration items apply to TDengine Sink Connector and TDengine
3. `timestamp.initial`: Data synchronization start time. The format is 'yyyy-MM-dd HH:mm:ss'. If it is not set, the data importing to Kafka will be started from the first/oldest row in the database.
4. `poll.interval.ms`: The time interval for checking newly created tables or removed tables, default value is 1000.
5. `fetch.max.rows`: The maximum number of rows retrieved when retrieving the database, default is 100.
-6. `query.interval.ms`: The time range of reading data from TDengine each time, its unit is millisecond. It should be adjusted according to the data flow in rate, the default value is 1000.
-7. `topic.per.stable`: If it's set to true, it means one super table in TDengine corresponds to a topic in Kafka, the topic naming rule is `--`; if it's set to false, it means the whole DB corresponds to a topic in Kafka, the topic naming rule is `-`.
-
-
+6. `query.interval.ms`: The time range of reading data from TDengine each time, its unit is millisecond. It should be adjusted according to the data flow in rate, the default value is 0, this means to get all the data to the latest time.
+7. `out.format`: Result output format. `line` indicates that the output format is InfluxDB line protocol format, `json` indicates that the output format is json. The default is line.
+8. `topic.per.stable`: If it's set to true, it means one super table in TDengine corresponds to a topic in Kafka, the topic naming rule is ``; if it's set to false, it means the whole DB corresponds to a topic in Kafka, the topic naming rule is ``.
+9. `topic.ignore.db`: Whether the topic naming rule contains the database name: true indicates that the rule is ``, false indicates that the rule is ``, and the default is false. Does not take effect when `topic.per.stable` is set to false.
+10. `topic.delimiter`: topic name delimiter,default is `-`。
## Other notes
-1. To install plugin to a customized location, refer to https://docs.confluent.io/home/connect/self-managed/install.html#install-connector-manually.
-2. To use Kafka Connect without confluent, refer to https://kafka.apache.org/documentation/#connect.
+1. To use Kafka Connect, refer to .
## Feedback
-https://github.com/taosdata/kafka-connect-tdengine/issues
+
## Reference
-1. https://www.confluent.io/what-is-apache-kafka
-2. https://developer.confluent.io/learn-kafka/kafka-connect/intro
-3. https://docs.confluent.io/platform/current/platform.html
+1. For more information, see
diff --git a/docs/examples/python/tmq_assignment_example.py b/docs/examples/python/tmq_assignment_example.py
new file mode 100644
index 0000000000..a07347a9b9
--- /dev/null
+++ b/docs/examples/python/tmq_assignment_example.py
@@ -0,0 +1,58 @@
+import taos
+from taos.tmq import Consumer
+import taosws
+
+
+def prepare():
+ conn = taos.connect()
+ conn.execute("drop topic if exists tmq_assignment_demo_topic")
+ conn.execute("drop database if exists tmq_assignment_demo_db")
+ conn.execute("create database if not exists tmq_assignment_demo_db wal_retention_period 3600")
+ conn.select_db("tmq_assignment_demo_db")
+ conn.execute(
+ "create table if not exists tmq_assignment_demo_table (ts timestamp, c1 int, c2 float, c3 binary(10)) tags(t1 int)")
+ conn.execute(
+ "create topic if not exists tmq_assignment_demo_topic as select ts, c1, c2, c3 from tmq_assignment_demo_table")
+ conn.execute("insert into d0 using tmq_assignment_demo_table tags (0) values (now-2s, 1, 1.0, 'tmq test')")
+ conn.execute("insert into d0 using tmq_assignment_demo_table tags (0) values (now-1s, 2, 2.0, 'tmq test')")
+ conn.execute("insert into d0 using tmq_assignment_demo_table tags (0) values (now, 3, 3.0, 'tmq test')")
+
+
+def taos_get_assignment_and_seek_demo():
+ prepare()
+ consumer = Consumer(
+ {
+ "group.id": "0",
+ # should disable snapshot,
+ # otherwise it will cause invalid params error
+ "experimental.snapshot.enable": "false",
+ }
+ )
+ consumer.subscribe(["tmq_assignment_demo_topic"])
+
+ # get topic assignment
+ assignments = consumer.assignment()
+ for assignment in assignments:
+ print(assignment)
+
+ # poll
+ consumer.poll(1)
+ consumer.poll(1)
+
+ # get topic assignment again
+ after_pool_assignments = consumer.assignment()
+ for assignment in after_pool_assignments:
+ print(assignment)
+
+ # seek to the beginning
+ for assignment in assignments:
+ consumer.seek(assignment)
+
+ # now the assignment should be the same as before poll
+ assignments = consumer.assignment()
+ for assignment in assignments:
+ print(assignment)
+
+
+if __name__ == '__main__':
+ taosws_get_assignment_and_seek_demo()
diff --git a/docs/examples/python/tmq_websocket_assgnment_example.py b/docs/examples/python/tmq_websocket_assgnment_example.py
new file mode 100644
index 0000000000..0f8e4a2804
--- /dev/null
+++ b/docs/examples/python/tmq_websocket_assgnment_example.py
@@ -0,0 +1,57 @@
+import taos
+import taosws
+
+
+def prepare():
+ conn = taos.connect()
+ conn.execute("drop topic if exists tmq_assignment_demo_topic")
+ conn.execute("drop database if exists tmq_assignment_demo_db")
+ conn.execute("create database if not exists tmq_assignment_demo_db wal_retention_period 3600")
+ conn.select_db("tmq_assignment_demo_db")
+ conn.execute(
+ "create table if not exists tmq_assignment_demo_table (ts timestamp, c1 int, c2 float, c3 binary(10)) tags(t1 int)")
+ conn.execute(
+ "create topic if not exists tmq_assignment_demo_topic as select ts, c1, c2, c3 from tmq_assignment_demo_table")
+ conn.execute("insert into d0 using tmq_assignment_demo_table tags (0) values (now-2s, 1, 1.0, 'tmq test')")
+ conn.execute("insert into d0 using tmq_assignment_demo_table tags (0) values (now-1s, 2, 2.0, 'tmq test')")
+ conn.execute("insert into d0 using tmq_assignment_demo_table tags (0) values (now, 3, 3.0, 'tmq test')")
+
+
+def taosws_get_assignment_and_seek_demo():
+ prepare()
+ consumer = taosws.Consumer(conf={
+ "td.connect.websocket.scheme": "ws",
+ # should disable snapshot,
+ # otherwise it will cause invalid params error
+ "experimental.snapshot.enable": "false",
+ "group.id": "0",
+ })
+ consumer.subscribe(["tmq_assignment_demo_topic"])
+
+ # get topic assignment
+ assignments = consumer.assignment()
+ for assignment in assignments:
+ print(assignment.to_string())
+
+ # poll
+ consumer.poll(1)
+ consumer.poll(1)
+
+ # get topic assignment again
+ after_poll_assignments = consumer.assignment()
+ for assignment in after_poll_assignments:
+ print(assignment.to_string())
+
+ # seek to the beginning
+ for assignment in assignments:
+ for a in assignment.assignments():
+ consumer.seek(assignment.topic(), a.vg_id(), a.offset())
+
+ # now the assignment should be the same as before poll
+ assignments = consumer.assignment()
+ for assignment in assignments:
+ print(assignment.to_string())
+
+
+if __name__ == '__main__':
+ taosws_get_assignment_and_seek_demo()
diff --git a/docs/zh/05-get-started/03-package.md b/docs/zh/05-get-started/03-package.md
index 1cd0076ba5..bab6377c7e 100644
--- a/docs/zh/05-get-started/03-package.md
+++ b/docs/zh/05-get-started/03-package.md
@@ -299,7 +299,7 @@ SELECT COUNT(*) FROM test.meters WHERE location = "California.SanFrancisco";
SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters WHERE groupId = 10;
```
-对表 `d10` 按 10 每秒进行平均值、最大值和最小值聚合统计:
+对表 `d10` 按每 10 秒进行平均值、最大值和最小值聚合统计:
```sql
SELECT FIRST(ts), AVG(current), MAX(voltage), MIN(phase) FROM test.d10 INTERVAL(10s);
diff --git a/docs/zh/07-develop/07-tmq.mdx b/docs/zh/07-develop/07-tmq.mdx
index bfea926f53..a87a1f64f8 100644
--- a/docs/zh/07-develop/07-tmq.mdx
+++ b/docs/zh/07-develop/07-tmq.mdx
@@ -105,6 +105,12 @@ class Consumer:
def poll(self, timeout: float = 1.0):
pass
+ def assignment(self):
+ pass
+
+ def seek(self, partition):
+ pass
+
def close(self):
pass
diff --git a/docs/zh/08-connector/14-java.mdx b/docs/zh/08-connector/14-java.mdx
index 46800226d7..1588159b57 100644
--- a/docs/zh/08-connector/14-java.mdx
+++ b/docs/zh/08-connector/14-java.mdx
@@ -32,25 +32,22 @@ TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致
原生连接支持的平台和 TDengine 客户端驱动支持的平台一致。
REST 连接支持所有能运行 Java 的平台。
-## 版本支持
+## 版本历史
-请参考[版本支持列表](../#版本支持)
-
-## 最近更新记录
-
-| taos-jdbcdriver 版本 | 主要变化 |
-| :------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------: |
-| 3.2.1 | 新增功能:WebSocket 连接支持 schemaless 与 prepareStatement 写入。变更:consumer poll 返回结果集为 ConsumerRecord,可通过 value() 获取指定结果集数据。 |
-| 3.2.0 | 存在连接问题,不推荐使用 |
-| 3.1.0 | WebSocket 连接支持订阅功能 |
-| 3.0.1 - 3.0.4 | 修复一些情况下结果集数据解析错误的问题。3.0.1 在 JDK 11 环境编译,JDK 8 环境下建议使用其他版本 |
-| 3.0.0 | 支持 TDengine 3.0 |
-| 2.0.42 | 修在 WebSocket 连接中 wasNull 接口返回值 |
-| 2.0.41 | 修正 REST 连接中用户名和密码转码方式 |
-| 2.0.39 - 2.0.40 | 增加 REST 连接/请求 超时设置 |
-| 2.0.38 | JDBC REST 连接增加批量拉取功能 |
-| 2.0.37 | 增加对 json tag 支持 |
-| 2.0.36 | 增加对 schemaless 写入支持 |
+| taos-jdbcdriver 版本 | 主要变化 | TDengine 版本 |
+| :------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------: |
+| 3.2.2 | 新增功能:数据订阅支持 seek 功能。 | 3.0.5.0 及更高版本 |
+| 3.2.1 | 新增功能:WebSocket 连接支持 schemaless 与 prepareStatement 写入。变更:consumer poll 返回结果集为 ConsumerRecord,可通过 value() 获取指定结果集数据。 | 3.0.3.0 及更高版本 |
+| 3.2.0 | 存在连接问题,不推荐使用 | - |
+| 3.1.0 | WebSocket 连接支持订阅功能 | - |
+| 3.0.1 - 3.0.4 | 修复一些情况下结果集数据解析错误的问题。3.0.1 在 JDK 11 环境编译,JDK 8 环境下建议使用其他版本 | - |
+| 3.0.0 | 支持 TDengine 3.0 | 3.0.0.0 及更高版本 |
+| 2.0.42 | 修在 WebSocket 连接中 wasNull 接口返回值 | - |
+| 2.0.41 | 修正 REST 连接中用户名和密码转码方式 | - |
+| 2.0.39 - 2.0.40 | 增加 REST 连接/请求 超时设置 | - |
+| 2.0.38 | JDBC REST 连接增加批量拉取功能 | - |
+| 2.0.37 | 增加对 json tag 支持 | - |
+| 2.0.36 | 增加对 schemaless 写入支持 | - |
**注**:REST 连接中增加 `batchfetch` 参数并设置为 true,将开启 WebSocket 连接。
@@ -80,45 +77,47 @@ JDBC 连接器可能报错的错误码包括 4 种:
具体的错误码请参考:
-| Error Code | Description | Suggested Actions |
-| ---------- | --------------------------------------------------------------- | --------------------------------------------------------------------------------------- |
-| 0x2301 | connection already closed | 连接已经关闭,检查连接情况,或重新创建连接去执行相关指令。 |
-| 0x2302 | this operation is NOT supported currently! | 当前使用接口不支持,可以更换其他连接方式。 |
-| 0x2303 | invalid variables | 参数不合法,请检查相应接口规范,调整参数类型及大小。 |
-| 0x2304 | statement is closed | statement 已经关闭,请检查 statement 是否关闭后再次使用,或是连接是否正常。 |
-| 0x2305 | resultSet is closed | resultSet 结果集已经释放,请检查 resultSet 是否释放后再次使用。 |
-| 0x2306 | Batch is empty! | prepareStatement 添加参数后再执行 executeBatch。 |
-| 0x2307 | Can not issue data manipulation statements with executeQuery() | 更新操作应该使用 executeUpdate(),而不是 executeQuery()。 |
-| 0x2308 | Can not issue SELECT via executeUpdate() | 查询操作应该使用 executeQuery(),而不是 executeUpdate()。 |
-| 0x230d | parameter index out of range | 参数越界,请检查参数的合理范围。 |
-| 0x230e | connection already closed | 连接已经关闭,请检查 Connection 是否关闭后再次使用,或是连接是否正常。 |
-| 0x230f | unknown sql type in tdengine | 请检查 TDengine 支持的 Data Type 类型。 |
-| 0x2310 | can't register JDBC-JNI driver | 不能注册 JNI 驱动,请检查 url 是否填写正确。 |
-| 0x2312 | url is not set | 请检查 REST 连接 url 是否填写正确。 |
-| 0x2314 | numeric value out of range | 请检查获取结果集中数值类型是否使用了正确的接口。 |
-| 0x2315 | unknown taos type in tdengine | 在 TDengine 数据类型与 JDBC 数据类型转换时,是否指定了正确的 TDengine 数据类型。 |
-| 0x2317 | | REST 连接中使用了错误的请求类型。 |
-| 0x2318 | | REST 连接中出现了数据传输异常,请检查网络情况并重试。 |
-| 0x2319 | user is required | 创建连接时缺少用户名信息 |
-| 0x231a | password is required | 创建连接时缺少密码信息 |
-| 0x231c | httpEntity is null, sql: | REST 连接中执行出现异常 |
-| 0x2350 | unknown error | 未知异常,请在 github 反馈给开发人员。 |
-| 0x2352 | Unsupported encoding | 本地连接下指定了不支持的字符编码集 |
-| 0x2353 | internal error of database, please see taoslog for more details | 本地连接执行 prepareStatement 时出现错误,请检查 taos log 进行问题定位。 |
-| 0x2354 | JNI connection is NULL | 本地连接执行命令时,Connection 已经关闭。请检查与 TDengine 的连接情况。 |
-| 0x2355 | JNI result set is NULL | 本地连接获取结果集,结果集异常,请检查连接情况,并重试。 |
-| 0x2356 | invalid num of fields | 本地连接获取结果集的 meta 信息不匹配。 |
-| 0x2357 | empty sql string | 填写正确的 SQL 进行执行。 |
-| 0x2359 | JNI alloc memory failed, please see taoslog for more details | 本地连接分配内存错误,请检查 taos log 进行问题定位。 |
-| 0x2371 | consumer properties must not be null! | 创建订阅时参数为空,请填写正确的参数。 |
-| 0x2372 | configs contain empty key, failed to set consumer property | 参数 key 中包含空值,请填写正确的参数。 |
-| 0x2373 | failed to set consumer property, | 参数 value 中包含空值,请填写正确的参数。 |
-| 0x2375 | topic reference has been destroyed | 创建数据订阅过程中,topic 引用被释放。请检查与 TDengine 的连接情况。 |
-| 0x2376 | failed to set consumer topic, topic name is empty | 创建数据订阅过程中,订阅 topic 名称为空。请检查指定的 topic 名称是否填写正确。 |
-| 0x2377 | consumer reference has been destroyed | 订阅数据传输通道已经关闭,请检查与 TDengine 的连接情况。 |
-| 0x2378 | consumer create error | 创建数据订阅失败,请根据错误信息检查 taos log 进行问题定位。 |
-| - | can't create connection with server within | 通过增加参数 httpConnectTimeout 增加连接耗时,或是请检查与 taosAdapter 之间的连接情况。 |
-| - | failed to complete the task within the specified time | 通过增加参数 messageWaitTimeout 增加执行耗时,或是请检查与 taosAdapter 之间的连接情况。 |
+| Error Code | Description | Suggested Actions |
+| ---------- | --------------------------------------------------------------- | ----------------------------------------------------------------------------------------- |
+| 0x2301 | connection already closed | 连接已经关闭,检查连接情况,或重新创建连接去执行相关指令。 |
+| 0x2302 | this operation is NOT supported currently! | 当前使用接口不支持,可以更换其他连接方式。 |
+| 0x2303 | invalid variables | 参数不合法,请检查相应接口规范,调整参数类型及大小。 |
+| 0x2304 | statement is closed | statement 已经关闭,请检查 statement 是否关闭后再次使用,或是连接是否正常。 |
+| 0x2305 | resultSet is closed | resultSet 结果集已经释放,请检查 resultSet 是否释放后再次使用。 |
+| 0x2306 | Batch is empty! | prepareStatement 添加参数后再执行 executeBatch。 |
+| 0x2307 | Can not issue data manipulation statements with executeQuery() | 更新操作应该使用 executeUpdate(),而不是 executeQuery()。 |
+| 0x2308 | Can not issue SELECT via executeUpdate() | 查询操作应该使用 executeQuery(),而不是 executeUpdate()。 |
+| 0x230d | parameter index out of range | 参数越界,请检查参数的合理范围。 |
+| 0x230e | connection already closed | 连接已经关闭,请检查 Connection 是否关闭后再次使用,或是连接是否正常。 |
+| 0x230f | unknown sql type in tdengine | 请检查 TDengine 支持的 Data Type 类型。 |
+| 0x2310 | can't register JDBC-JNI driver | 不能注册 JNI 驱动,请检查 url 是否填写正确。 |
+| 0x2312 | url is not set | 请检查 REST 连接 url 是否填写正确。 |
+| 0x2314 | numeric value out of range | 请检查获取结果集中数值类型是否使用了正确的接口。 |
+| 0x2315 | unknown taos type in tdengine | 在 TDengine 数据类型与 JDBC 数据类型转换时,是否指定了正确的 TDengine 数据类型。 |
+| 0x2317 | | REST 连接中使用了错误的请求类型。 |
+| 0x2318 | | REST 连接中出现了数据传输异常,请检查网络情况并重试。 |
+| 0x2319 | user is required | 创建连接时缺少用户名信息 |
+| 0x231a | password is required | 创建连接时缺少密码信息 |
+| 0x231c | httpEntity is null, sql: | REST 连接中执行出现异常 |
+| 0x231d | can't create connection with server within | 通过增加参数 httpConnectTimeout 增加连接耗时,或是请检查与 taosAdapter 之间的连接情况。 |
+| 0x231e | failed to complete the task within the specified time | 通过增加参数 messageWaitTimeout 增加执行耗时,或是请检查与 taosAdapter 之间的连接情况。 |
+| 0x2350 | unknown error | 未知异常,请在 github 反馈给开发人员。 |
+| 0x2352 | Unsupported encoding | 本地连接下指定了不支持的字符编码集 |
+| 0x2353 | internal error of database, please see taoslog for more details | 本地连接执行 prepareStatement 时出现错误,请检查 taos log 进行问题定位。 |
+| 0x2354 | JNI connection is NULL | 本地连接执行命令时,Connection 已经关闭。请检查与 TDengine 的连接情况。 |
+| 0x2355 | JNI result set is NULL | 本地连接获取结果集,结果集异常,请检查连接情况,并重试。 |
+| 0x2356 | invalid num of fields | 本地连接获取结果集的 meta 信息不匹配。 |
+| 0x2357 | empty sql string | 填写正确的 SQL 进行执行。 |
+| 0x2359 | JNI alloc memory failed, please see taoslog for more details | 本地连接分配内存错误,请检查 taos log 进行问题定位。 |
+| 0x2371 | consumer properties must not be null! | 创建订阅时参数为空,请填写正确的参数。 |
+| 0x2372 | configs contain empty key, failed to set consumer property | 参数 key 中包含空值,请填写正确的参数。 |
+| 0x2373 | failed to set consumer property, | 参数 value 中包含空值,请填写正确的参数。 |
+| 0x2375 | topic reference has been destroyed | 创建数据订阅过程中,topic 引用被释放。请检查与 TDengine 的连接情况。 |
+| 0x2376 | failed to set consumer topic, topic name is empty | 创建数据订阅过程中,订阅 topic 名称为空。请检查指定的 topic 名称是否填写正确。 |
+| 0x2377 | consumer reference has been destroyed | 订阅数据传输通道已经关闭,请检查与 TDengine 的连接情况。 |
+| 0x2378 | consumer create error | 创建数据订阅失败,请根据错误信息检查 taos log 进行问题定位。 |
+| 0x2379 | seek offset must not be a negative number | seek 接口参数不能为负值,请使用正确的参数 |
+| 0x237a | vGroup not found in result set | VGroup 没有分配给当前 consumer,由于 Rebalance 机制导致 Consumer 与 VGroup 不是绑定的关系 |
- [TDengine Java Connector](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java)
@@ -169,7 +168,7 @@ Maven 项目中,在 pom.xml 中添加以下依赖:
com.taosdata.jdbc
taos-jdbcdriver
- 3.2.1
+ 3.2.2
```
@@ -916,14 +915,15 @@ public class SchemalessWsTest {
public static void main(String[] args) throws SQLException {
final String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata&batchfetch=true";
- Connection connection = DriverManager.getConnection(url);
- init(connection);
+ try(Connection connection = DriverManager.getConnection(url)){
+ init(connection);
- SchemalessWriter writer = new SchemalessWriter(connection, "test_ws_schemaless");
- writer.write(lineDemo, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO_SECONDS);
- writer.write(telnetDemo, SchemalessProtocolType.TELNET, SchemalessTimestampType.MILLI_SECONDS);
- writer.write(jsonDemo, SchemalessProtocolType.JSON, SchemalessTimestampType.SECONDS);
- System.exit(0);
+ try(SchemalessWriter writer = new SchemalessWriter(connection, "test_ws_schemaless")){
+ writer.write(lineDemo, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO_SECONDS);
+ writer.write(telnetDemo, SchemalessProtocolType.TELNET, SchemalessTimestampType.MILLI_SECONDS);
+ writer.write(jsonDemo, SchemalessProtocolType.JSON, SchemalessTimestampType.SECONDS);
+ }
+ }
}
private static void init(Connection connection) throws SQLException {
@@ -994,6 +994,17 @@ while(true) {
`poll` 每次调用获取一个消息。
+#### 指定订阅 Offset
+
+```
+long position(TopicPartition partition) throws SQLException;
+Map position(String topic) throws SQLException;
+Map beginningOffsets(String topic) throws SQLException;
+Map endOffsets(String topic) throws SQLException;
+
+void seek(TopicPartition partition, long offset) throws SQLException;
+```
+
#### 关闭订阅
```java
diff --git a/docs/zh/08-connector/20-go.mdx b/docs/zh/08-connector/20-go.mdx
index fd6df992b5..d431be35cb 100644
--- a/docs/zh/08-connector/20-go.mdx
+++ b/docs/zh/08-connector/20-go.mdx
@@ -30,7 +30,7 @@ REST 连接支持所有能运行 Go 的平台。
## 版本支持
-请参考[版本支持列表](../#版本支持)
+请参考[版本支持列表](https://github.com/taosdata/driver-go#remind)
## 支持的功能特性
@@ -383,6 +383,15 @@ func main() {
提交消息。
+* `func (c *Consumer) Assignment() (partitions []tmq.TopicPartition, err error)`
+
+ 获取消费进度。(需要 TDengine >= 3.0.5.0, driver-go >= v3.5.0)
+
+* `func (c *Consumer) Seek(partition tmq.TopicPartition, ignoredTimeoutMs int) error`
+注意:出于兼容目的保留 `ignoredTimeoutMs` 参数,当前未使用
+
+ 按照指定的进度消费。(需要 TDengine >= 3.0.5.0, driver-go >= v3.5.0)
+
* `func (c *Consumer) Close() error`
关闭连接。
@@ -468,11 +477,20 @@ func main() {
提交消息。
+* `func (c *Consumer) Assignment() (partitions []tmq.TopicPartition, err error)`
+
+ 获取消费进度。(需要 TDengine >= 3.0.5.0, driver-go >= v3.5.0)
+
+* `func (c *Consumer) Seek(partition tmq.TopicPartition, ignoredTimeoutMs int) error`
+注意:出于兼容目的保留 `ignoredTimeoutMs` 参数,当前未使用
+
+ 按照指定的进度消费。(需要 TDengine >= 3.0.5.0, driver-go >= v3.5.0)
+
* `func (c *Consumer) Close() error`
关闭连接。
-完整订阅示例参见 [GitHub 示例文件](https://github.com/taosdata/driver-go/blob/3.0/examples/tmqoverws/main.go)
+完整订阅示例参见 [GitHub 示例文件](https://github.com/taosdata/driver-go/blob/main/examples/tmqoverws/main.go)
### 通过 WebSocket 进行参数绑定
@@ -520,7 +538,7 @@ func main() {
结束参数绑定。
-完整参数绑定示例参见 [GitHub 示例文件](https://github.com/taosdata/driver-go/blob/3.0/examples/stmtoverws/main.go)
+完整参数绑定示例参见 [GitHub 示例文件](https://github.com/taosdata/driver-go/blob/main/examples/stmtoverws/main.go)
## API 参考
diff --git a/docs/zh/08-connector/26-rust.mdx b/docs/zh/08-connector/26-rust.mdx
index 41a429b026..c23228c8cf 100644
--- a/docs/zh/08-connector/26-rust.mdx
+++ b/docs/zh/08-connector/26-rust.mdx
@@ -26,9 +26,14 @@ import RustQuery from "../07-develop/04-query-data/_rust.mdx"
原生连接支持的平台和 TDengine 客户端驱动支持的平台一致。
Websocket 连接支持所有能运行 Rust 的平台。
-## 版本支持
+## 版本历史
-请参考[版本支持列表](../#版本支持)
+| Rust 连接器版本 | TDengine 版本 | 主要功能 |
+| :----------------: | :--------------: | :--------------------------------------------------: |
+| v0.8.10 | 3.0.5.0 or later | 消息订阅:获取消费进度及按照指定进度开始消费。 |
+| v0.8.0 | 3.0.4.0 | 支持无模式写入。 |
+| v0.7.6 | 3.0.3.0 | 支持在请求中使用 req_id。 |
+| v0.6.0 | 3.0.0.0 | 基础功能。 |
Rust 连接器仍然在快速开发中,1.0 之前无法保证其向后兼容。建议使用 3.0 版本以上的 TDengine,以避免已知问题。
@@ -502,6 +507,22 @@ TMQ 消息队列是一个 [futures::Stream](https://docs.rs/futures/latest/futur
}
```
+获取消费进度:
+
+版本要求 connector-rust >= v0.8.8, TDengine >= 3.0.5.0
+
+```rust
+let assignments = consumer.assignments().await.unwrap();
+```
+
+按照指定的进度消费:
+
+版本要求 connector-rust >= v0.8.8, TDengine >= 3.0.5.0
+
+```rust
+consumer.offset_seek(topic, vgroup_id, offset).await;
+```
+
停止订阅:
```rust
@@ -516,7 +537,7 @@ consumer.unsubscribe().await;
- `enable.auto.commit`: 当设置为 `true` 时,将启用自动标记模式,当对数据一致性不敏感时,可以启用此方式。
- `auto.commit.interval.ms`: 自动标记的时间间隔。
-完整订阅示例参见 [GitHub 示例文件](https://github.com/taosdata/taos-connector-rust/blob/main/examples/subscribe.rs).
+完整订阅示例参见 [GitHub 示例文件](https://github.com/taosdata/TDengine/blob/3.0/docs/examples/rust/nativeexample/examples/subscribe_demo.rs).
其他相关结构体 API 使用说明请移步 Rust 文档托管网页:。
diff --git a/docs/zh/08-connector/30-python.mdx b/docs/zh/08-connector/30-python.mdx
index 1037d66f17..10fb2238ee 100644
--- a/docs/zh/08-connector/30-python.mdx
+++ b/docs/zh/08-connector/30-python.mdx
@@ -456,27 +456,169 @@ TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线
### 数据订阅
-连接器支持数据订阅功能,数据订阅功能请参考 [数据订阅](../../develop/tmq/)。
+连接器支持数据订阅功能,数据订阅功能请参考 [数据订阅文档](../../develop/tmq/)。
-`Consumer` 提供了 Python 连接器订阅 TMQ 数据的 API,相关 API 定义请参考 [数据订阅文档](../../develop/tmq/#%E4%B8%BB%E8%A6%81%E6%95%B0%E6%8D%AE%E7%BB%93%E6%9E%84%E5%92%8C-api)。
+`Consumer` 提供了 Python 连接器订阅 TMQ 数据的 API。
+
+#### 创建 Consumer
+
+创建 Consumer 语法为 `consumer = Consumer(configs)`,参数定义请参考 [数据订阅文档](../../develop/tmq/#%E5%88%9B%E5%BB%BA%E6%B6%88%E8%B4%B9%E8%80%85-consumer)。
+
+```python
+from taos.tmq import Consumer
+
+consumer = Consumer({"group.id": "local", "td.connect.ip": "127.0.0.1"})
+```
+
+#### 订阅 topics
+
+Comsumer API 的 `subscribe` 方法用于订阅 topics,consumer 支持同时订阅多个 topic。
+
+```python
+consumer.subscribe(['topic1', 'topic2'])
+```
+
+#### 消费数据
+
+Consumer API 的 `poll` 方法用于消费数据,`poll` 方法接收一个 float 类型的超时时间,超时时间单位为秒(s),`poll` 方法在超时之前返回一条 Message 类型的数据或超时返回 `None`。消费者必须通过 Message 的 `error()` 方法校验返回数据的 error 信息。
+
+```python
+while True:
+ res = consumer.poll(1)
+ if not res:
+ continue
+ err = res.error()
+ if err is not None:
+ raise err
+ val = res.value()
+
+ for block in val:
+ print(block.fetchall())
+```
+
+#### 获取消费进度
+
+Consumer API 的 `assignment` 方法用于获取 Consumer 订阅的所有 topic 的消费进度,返回结果类型为 TopicPartition 列表。
+
+```python
+assignments = consumer.assignment()
+```
+
+#### 重置消费进度
+
+Consumer API 的 `seek` 方法用于重置 Consumer 的消费进度到指定位置,方法参数类型为 TopicPartition。
+
+```python
+tp = TopicPartition(topic='topic1', partition=0, offset=0)
+consumer.seek(tp)
+```
+
+#### 结束消费
+
+消费结束后,应当取消订阅,并关闭 Consumer。
+
+```python
+consumer.unsubscribe()
+consumer.close()
+```
+
+#### tmq 订阅示例代码
```python
{{#include docs/examples/python/tmq_example.py}}
```
+#### 获取和重置消费进度示例代码
+
+```python
+{{#include docs/examples/python/tmq_assignment_example.py:taos_get_assignment_and_seek_demo}}
+```
+
-除了原生的连接方式,Python 连接器还支持通过 websocket 订阅 TMQ 数据。
+除了原生的连接方式,Python 连接器还支持通过 websocket 订阅 TMQ 数据,使用 websocket 方式订阅 TMQ 数据需要安装 `taos-ws-py`。
+
+taosws `Consumer` API 提供了基于 Websocket 订阅 TMQ 数据的 API。
+
+#### 创建 Consumer
+
+创建 Consumer 语法为 `consumer = Consumer(conf=configs)`,使用时需要指定 `td.connect.websocket.scheme` 参数值为 "ws",参数定义请参考 [数据订阅文档](../../develop/tmq/#%E5%88%9B%E5%BB%BA%E6%B6%88%E8%B4%B9%E8%80%85-consumer)。
+
+```python
+import taosws
+
+consumer = taosws.(conf={"group.id": "local", "td.connect.websocket.scheme": "ws"})
+```
+
+#### 订阅 topics
+
+Comsumer API 的 `subscribe` 方法用于订阅 topics,consumer 支持同时订阅多个 topic。
+
+```python
+consumer.subscribe(['topic1', 'topic2'])
+```
+
+#### 消费数据
+
+Consumer API 的 `poll` 方法用于消费数据,`poll` 方法接收一个 float 类型的超时时间,超时时间单位为秒(s),`poll` 方法在超时之前返回一条 Message 类型的数据或超时返回 `None`。消费者必须通过 Message 的 `error()` 方法校验返回数据的 error 信息。
+
+```python
+while True:
+ res = consumer.poll(timeout=1.0)
+ if not res:
+ continue
+ err = res.error()
+ if err is not None:
+ raise err
+ for block in message:
+ for row in block:
+ print(row)
+```
+
+#### 获取消费进度
+
+Consumer API 的 `assignment` 方法用于获取 Consumer 订阅的所有 topic 的消费进度,返回结果类型为 TopicPartition 列表。
+
+```python
+assignments = consumer.assignment()
+```
+
+#### 重置消费进度
+
+Consumer API 的 `seek` 方法用于重置 Consumer 的消费进度到指定位置。
+
+```python
+consumer.seek(topic='topic1', partition=0, offset=0)
+```
+
+#### 结束消费
+
+消费结束后,应当取消订阅,并关闭 Consumer。
+
+```python
+consumer.unsubscribe()
+consumer.close()
+```
+
+#### tmq 订阅示例代码
```python
{{#include docs/examples/python/tmq_websocket_example.py}}
```
+连接器提供了 `assignment` 接口,用于获取 topic assignment 的功能,可以查询订阅的 topic 的消费进度,并提供 `seek` 接口,用于重置 topic 的消费进度。
+
+#### 获取和重置消费进度示例代码
+
+```python
+{{#include docs/examples/python/tmq_websocket_assgnment_example.py:taosws_get_assignment_and_seek_demo}}
+```
+
diff --git a/docs/zh/14-reference/06-taosdump.md b/docs/zh/14-reference/06-taosdump.md
index 8ff1287c3e..12122edd32 100644
--- a/docs/zh/14-reference/06-taosdump.md
+++ b/docs/zh/14-reference/06-taosdump.md
@@ -82,8 +82,6 @@ Usage: taosdump [OPTION...] dbname [tbname ...]
-e, --escape-character Use escaped character for database name
-N, --without-property Dump database without its properties.
-s, --schemaonly Only dump tables' schema.
- -y, --answer-yes Input yes for prompt. It will skip data file
- checking!
-d, --avro-codec=snappy Choose an avro codec among null, deflate, snappy,
and lzma.
-S, --start-time=START_TIME Start time to dump. Either epoch or
diff --git a/docs/zh/14-reference/12-config/index.md b/docs/zh/14-reference/12-config/index.md
old mode 100644
new mode 100755
index 68f44d1e65..51748b68c4
--- a/docs/zh/14-reference/12-config/index.md
+++ b/docs/zh/14-reference/12-config/index.md
@@ -5,7 +5,7 @@ description: "TDengine 客户端和服务配置列表"
## 为服务端指定配置文件
-TDengine 系统后台服务由 taosd 提供,可以在配置文件 taos.cfg 里修改配置参数,以满足不同场景的需求。配置文件的缺省位置在/etc/taos 目录,可以通过 taosd 命令行执行参数 -c 指定配置文件目录。比如,指定配置文件位于`/home/user` 这个目录:
+TDengine 系统后台服务由 taosd 提供,可以在配置文件 taos.cfg 里修改配置参数,以满足不同场景的需求。在 Linux 系统上,配置文件的缺省位置在 `/etc/taos` 目录,在 Windows 系统上缺省位置在 `C:\TDengine` 。可以通过 taosd 命令行执行参数 -c 指定配置文件所在目录。比如,在 Linux 系统上可以指定配置文件位于 `/home/user` 这个目录:
```
taosd -c /home/user
@@ -384,6 +384,15 @@ charset 的有效值是 UTF-8。
| 单位 | GB |
| 缺省值 | 2.0 |
+### metaCacheMaxSize
+
+| 属性 | 说明 |
+| -------- | ---------------------------------------------- |
+| 适用范围 | 仅客户端适用 |
+| 含义 | 指定单个客户端元数据缓存大小的最大值 |
+| 单位 | MB |
+| 缺省值 | -1 (无限制) |
+
## 集群相关
### supportVnodes
@@ -452,6 +461,26 @@ charset 的有效值是 UTF-8。
| 缺省值 | 0 |
| 补充说明 | 大于 0 时,日志文件会被重命名为 taosdlog.xxx,其中 xxx 为日志文件最后修改的时间戳。 |
+### slowLogThreshold
+
+| 属性 | 说明 |
+| -------- | ------------------------------------------------------------- |
+| 适用范围 | 仅客户端适用 |
+| 含义 | 指定慢查询门限值,大于等于门限值认为是慢查询 |
+| 单位 | 秒 |
+| 缺省值 | 3 |
+| 补充说明 | 每个客户端中所有慢查询会被记录在日志目录下的taosSlowLog文件中 |
+
+### slowLogScope
+
+| 属性 | 说明 |
+| -------- | --------------------------------------------------------------|
+| 适用范围 | 仅客户端适用 |
+| 含义 | 指定启动记录哪些类型的慢查询 |
+| 可选值 | ALL, QUERY, INSERT, OTHERS, NONE |
+| 缺省值 | ALL |
+| 补充说明 | 默认记录所有类型的慢查询,可通过配置只记录某一类型的慢查询 |
+
### debugFlag
| 属性 | 说明 |
diff --git a/docs/zh/20-third-party/11-kafka.md b/docs/zh/20-third-party/11-kafka.md
index 97e78c2fde..76e546c345 100644
--- a/docs/zh/20-third-party/11-kafka.md
+++ b/docs/zh/20-third-party/11-kafka.md
@@ -16,169 +16,78 @@ TDengine Source Connector 用于把数据实时地从 TDengine 读出来发送

-## 什么是 Confluent?
-
-[Confluent](https://www.confluent.io/) 在 Kafka 的基础上增加很多扩展功能。包括:
-
-1. Schema Registry
-2. REST 代理
-3. 非 Java 客户端
-4. 很多打包好的 Kafka Connect 插件
-5. 管理和监控 Kafka 的 GUI —— Confluent 控制中心
-
-这些扩展功能有的包含在社区版本的 Confluent 中,有的只有企业版能用。
-
-
-Confluent 企业版提供了 `confluent` 命令行工具管理各个组件。
-
## 前置条件
运行本教程中示例的前提条件。
1. Linux 操作系统
2. 已安装 Java 8 和 Maven
-3. 已安装 Git
+3. 已安装 Git、curl、vi
4. 已安装并启动 TDengine。如果还没有可参考[安装和卸载](/operation/pkg-install)
-## 安装 Confluent
-
-Confluent 提供了 Docker 和二进制包两种安装方式。本文仅介绍二进制包方式安装。
+## 安装 Kafka
在任意目录下执行:
-```
-curl -O http://packages.confluent.io/archive/7.1/confluent-7.1.1.tar.gz
-tar xzf confluent-7.1.1.tar.gz -C /opt/
+```shell
+curl -O https://downloads.apache.org/kafka/3.4.0/kafka_2.13-3.4.0.tgz
+tar xzf kafka_2.13-3.4.0.tgz -C /opt/
+ln -s /opt/kafka_2.13-3.4.0 /opt/kafka
```
-然后需要把 `$CONFLUENT_HOME/bin` 目录加入 PATH。
+然后需要把 `$KAFKA_HOME/bin` 目录加入 PATH。
```title=".profile"
-export CONFLUENT_HOME=/opt/confluent-7.1.1
-export PATH=$CONFLUENT_HOME/bin:$PATH
+export KAFKA_HOME=/opt/kafka
+export PATH=$PATH:$KAFKA_HOME/bin
```
以上脚本可以追加到当前用户的 profile 文件(~/.profile 或 ~/.bash_profile)
-安装完成之后,可以输入`confluent version`做简单验证:
-
-```
-# confluent version
-confluent - Confluent CLI
-
-Version: v2.6.1
-Git Ref: 6d920590
-Build Date: 2022-02-18T06:14:21Z
-Go Version: go1.17.6 (linux/amd64)
-Development: false
-```
-
## 安装 TDengine Connector 插件
-### 从源码安装
+### 编译插件
-```
+```shell
git clone --branch 3.0 https://github.com/taosdata/kafka-connect-tdengine.git
cd kafka-connect-tdengine
-mvn clean package
-unzip -d $CONFLUENT_HOME/share/java/ target/components/packages/taosdata-kafka-connect-tdengine-*.zip
+mvn clean package -Dmaven.test.skip=true
+unzip -d $KAFKA_HOME/components/ target/components/packages/taosdata-kafka-connect-tdengine-*.zip
```
-以上脚本先 clone 项目源码,然后用 Maven 编译打包。打包完成后在 `target/components/packages/` 目录生成了插件的 zip 包。把这个 zip 包解压到安装插件的路径即可。上面的示例中使用了内置的插件安装路径: `$CONFLUENT_HOME/share/java/`。
+以上脚本先 clone 项目源码,然后用 Maven 编译打包。打包完成后在 `target/components/packages/` 目录生成了插件的 zip 包。把这个 zip 包解压到安装插件的路径即可。上面的示例中使用了内置的插件安装路径: `$KAFKA_HOME/components/`。
-### 用 confluent-hub 安装
+### 配置插件
-[Confluent Hub](https://www.confluent.io/hub) 提供下载 Kafka Connect 插件的服务。在 TDengine Kafka Connector 发布到 Confluent Hub 后可以使用命令工具 `confluent-hub` 安装。
-**TDengine Kafka Connector 目前没有正式发布,不能用这种方式安装**。
+将 kafka-connect-tdengine 插件加入 `$KAFKA_HOME/config/connect-distributed.properties` 配置文件 plugin.path 中
-## 启动 Confluent
-
-```
-confluent local services start
+```properties
+plugin.path=/usr/share/java,/opt/kafka/components
```
-:::note
-一定要先安装插件再启动 Confluent, 否则加载插件会失败。
-:::
+## 启动 Kafka
-:::tip
-若某组件启动失败,可尝试清空数据,重新启动。数据目录在启动时将被打印到控制台,比如 :
+```shell
+zookeeper-server-start.sh -daemon $KAFKA_HOME/config/zookeeper.properties
-```title="控制台输出日志" {1}
-Using CONFLUENT_CURRENT: /tmp/confluent.106668
-Starting ZooKeeper
-ZooKeeper is [UP]
-Starting Kafka
-Kafka is [UP]
-Starting Schema Registry
-Schema Registry is [UP]
-Starting Kafka REST
-Kafka REST is [UP]
-Starting Connect
-Connect is [UP]
-Starting ksqlDB Server
-ksqlDB Server is [UP]
-Starting Control Center
-Control Center is [UP]
+kafka-server-start.sh -daemon $KAFKA_HOME/config/server.properties
+
+connect-distributed.sh -daemon $KAFKA_HOME/config/connect-distributed.properties
```
-清空数据可执行 `rm -rf /tmp/confluent.106668`。
-:::
-
-### 验证各个组件是否启动成功
+### 验证 kafka Connect 是否启动成功
输入命令:
-```
-confluent local services status
+```shell
+curl http://localhost:8083/connectors
```
如果各组件都启动成功,会得到如下输出:
+```txt
+[]
```
-Connect is [UP]
-Control Center is [UP]
-Kafka is [UP]
-Kafka REST is [UP]
-ksqlDB Server is [UP]
-Schema Registry is [UP]
-ZooKeeper is [UP]
-```
-
-### 验证插件是否安装成功
-
-在 Kafka Connect 组件完全启动后,可用以下命令列出成功加载的插件:
-
-```
-confluent local services connect plugin list
-```
-
-如果成功安装,会输出如下:
-
-```txt {4,9}
-Available Connect Plugins:
-[
- {
- "class": "com.taosdata.kafka.connect.sink.TDengineSinkConnector",
- "type": "sink",
- "version": "1.0.0"
- },
- {
- "class": "com.taosdata.kafka.connect.source.TDengineSourceConnector",
- "type": "source",
- "version": "1.0.0"
- },
-......
-```
-
-如果插件安装失败,请检查 Kafka Connect 的启动日志是否有异常信息,用以下命令输出日志路径:
-```
-echo `cat /tmp/confluent.current`/connect/connect.stdout
-```
-该命令的输出类似: `/tmp/confluent.104086/connect/connect.stdout`。
-
-与日志文件 `connect.stdout` 同一目录,还有一个文件名为: `connect.properties`。在这个文件的末尾,可以看到最终生效的 `plugin.path`, 它是一系列用逗号分割的路径。如果插件安装失败,很可能是因为实际的安装路径不包含在 `plugin.path` 中。
-
## TDengine Sink Connector 的使用
@@ -188,40 +97,47 @@ TDengine Sink Connector 内部使用 TDengine [无模式写入接口](../../conn
下面的示例将主题 meters 的数据,同步到目标数据库 power。数据格式为 InfluxDB Line 协议格式。
-### 添加配置文件
+### 添加 Sink Connector 配置文件
-```
+```shell
mkdir ~/test
cd ~/test
-vi sink-demo.properties
+vi sink-demo.json
```
-sink-demo.properties 内容如下:
+sink-demo.json 内容如下:
-```ini title="sink-demo.properties"
-name=TDengineSinkConnector
-connector.class=com.taosdata.kafka.connect.sink.TDengineSinkConnector
-tasks.max=1
-topics=meters
-connection.url=jdbc:TAOS://127.0.0.1:6030
-connection.user=root
-connection.password=taosdata
-connection.database=power
-db.schemaless=line
-data.precision=ns
-key.converter=org.apache.kafka.connect.storage.StringConverter
-value.converter=org.apache.kafka.connect.storage.StringConverter
+```json title="sink-demo.json"
+{
+ "name": "TDengineSinkConnector",
+ "config": {
+ "connector.class":"com.taosdata.kafka.connect.sink.TDengineSinkConnector",
+ "tasks.max": "1",
+ "topics": "meters",
+ "connection.url": "jdbc:TAOS://127.0.0.1:6030",
+ "connection.user": "root",
+ "connection.password": "taosdata",
+ "connection.database": "power",
+ "db.schemaless": "line",
+ "data.precision": "ns",
+ "key.converter": "org.apache.kafka.connect.storage.StringConverter",
+ "value.converter": "org.apache.kafka.connect.storage.StringConverter",
+ "errors.tolerance": "all",
+ "errors.deadletterqueue.topic.name": "dead_letter_topic",
+ "errors.deadletterqueue.topic.replication.factor": 1
+ }
+}
```
关键配置说明:
-1. `topics=meters` 和 `connection.database=power`, 表示订阅主题 meters 的数据,并写入数据库 power。
-2. `db.schemaless=line`, 表示使用 InfluxDB Line 协议格式的数据。
+1. `"topics": "meters"` 和 `"connection.database": "power"`, 表示订阅主题 meters 的数据,并写入数据库 power。
+2. `"db.schemaless": "line"`, 表示使用 InfluxDB Line 协议格式的数据。
-### 创建 Connector 实例
+### 创建 Sink Connector 实例
-```
-confluent local services connect connector load TDengineSinkConnector --config ./sink-demo.properties
+```shell
+curl -X POST -d @sink-demo.json http://localhost:8083/connectors -H "Content-Type: application/json"
```
若以上命令执行成功,则有如下输出:
@@ -241,7 +157,10 @@ confluent local services connect connector load TDengineSinkConnector --config .
"tasks.max": "1",
"topics": "meters",
"value.converter": "org.apache.kafka.connect.storage.StringConverter",
- "name": "TDengineSinkConnector"
+ "name": "TDengineSinkConnector",
+ "errors.tolerance": "all",
+ "errors.deadletterqueue.topic.name": "dead_letter_topic",
+ "errors.deadletterqueue.topic.replication.factor": "1",
},
"tasks": [],
"type": "sink"
@@ -261,8 +180,8 @@ meters,location=California.LosAngeles,groupid=3 current=11.3,voltage=221,phase=0
使用 kafka-console-producer 向主题 meters 添加测试数据。
-```
-cat test-data.txt | kafka-console-producer --broker-list localhost:9092 --topic meters
+```shell
+cat test-data.txt | kafka-console-producer.sh --broker-list localhost:9092 --topic meters
```
:::note
@@ -273,12 +192,12 @@ cat test-data.txt | kafka-console-producer --broker-list localhost:9092 --topic
使用 TDengine CLI 验证同步是否成功。
-```
+```sql
taos> use power;
Database changed.
taos> select * from meters;
- ts | current | voltage | phase | groupid | location |
+ _ts | current | voltage | phase | groupid | location |
===============================================================================================================================================================
2022-03-28 09:56:51.249000000 | 11.800000000 | 221.000000000 | 0.280000000 | 2 | California.LosAngeles |
2022-03-28 09:56:51.250000000 | 13.400000000 | 223.000000000 | 0.290000000 | 2 | California.LosAngeles |
@@ -295,31 +214,39 @@ TDengine Source Connector 的作用是将 TDengine 某个数据库某一时刻
TDengine Source Connector 会将 TDengine 数据表中的数据转换成 [InfluxDB Line 协议格式](/develop/insert-data/influxdb-line/) 或 [OpenTSDB JSON 协议格式](/develop/insert-data/opentsdb-json), 然后写入 Kafka。
-下面的示例程序同步数据库 test 中的数据到主题 tdengine-source-test。
+下面的示例程序同步数据库 test 中的数据到主题 tdengine-test-meters。
-### 添加配置文件
+### 添加 Source Connector 配置文件
-```
-vi source-demo.properties
+```shell
+vi source-demo.json
```
输入以下内容:
-```ini title="source-demo.properties"
-name=TDengineSourceConnector
-connector.class=com.taosdata.kafka.connect.source.TDengineSourceConnector
-tasks.max=1
-connection.url=jdbc:TAOS://127.0.0.1:6030
-connection.username=root
-connection.password=taosdata
-connection.database=test
-connection.attempts=3
-connection.backoff.ms=5000
-topic.prefix=tdengine-source-
-poll.interval.ms=1000
-fetch.max.rows=100
-key.converter=org.apache.kafka.connect.storage.StringConverter
-value.converter=org.apache.kafka.connect.storage.StringConverter
+```json title="source-demo.json"
+{
+ "name":"TDengineSourceConnector",
+ "config":{
+ "connector.class": "com.taosdata.kafka.connect.source.TDengineSourceConnector",
+ "tasks.max": 1,
+ "connection.url": "jdbc:TAOS://127.0.0.1:6030",
+ "connection.username": "root",
+ "connection.password": "taosdata",
+ "connection.database": "test",
+ "connection.attempts": 3,
+ "connection.backoff.ms": 5000,
+ "topic.prefix": "tdengine",
+ "topic.delimiter": "-",
+ "poll.interval.ms": 1000,
+ "fetch.max.rows": 100,
+ "topic.per.stable": true,
+ "topic.ignore.db": false,
+ "out.format": "line",
+ "key.converter": "org.apache.kafka.connect.storage.StringConverter",
+ "value.converter": "org.apache.kafka.connect.storage.StringConverter"
+ }
+}
```
### 准备测试数据
@@ -344,27 +271,27 @@ INSERT INTO d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES('2018-1
使用 TDengine CLI, 执行 SQL 文件。
-```
+```shell
taos -f prepare-source-data.sql
```
-### 创建 Connector 实例
+### 创建 Source Connector 实例
-```
-confluent local services connect connector load TDengineSourceConnector --config source-demo.properties
+```shell
+curl -X POST -d @source-demo.json http://localhost:8083/connectors -H "Content-Type: application/json"
```
### 查看 topic 数据
-使用 kafka-console-consumer 命令行工具监控主题 tdengine-source-test 中的数据。一开始会输出所有历史数据, 往 TDengine 插入两条新的数据之后,kafka-console-consumer 也立即输出了新增的两条数据。 输出数据 InfluxDB line protocol 的格式。
+使用 kafka-console-consumer 命令行工具监控主题 tdengine-test-meters 中的数据。一开始会输出所有历史数据, 往 TDengine 插入两条新的数据之后,kafka-console-consumer 也立即输出了新增的两条数据。 输出数据 InfluxDB line protocol 的格式。
-```
-kafka-console-consumer --bootstrap-server localhost:9092 --from-beginning --topic tdengine-source-test
+```shell
+kafka-console-consumer.sh --bootstrap-server localhost:9092 --from-beginning --topic tdengine-test-meters
```
输出:
-```
+```txt
......
meters,location="California.SanFrancisco",groupid=2i32 current=10.3f32,voltage=219i32,phase=0.31f32 1538548685000000000
meters,location="California.SanFrancisco",groupid=2i32 current=12.6f32,voltage=218i32,phase=0.33f32 1538548695000000000
@@ -373,7 +300,7 @@ meters,location="California.SanFrancisco",groupid=2i32 current=12.6f32,voltage=2
此时会显示所有历史数据。切换到 TDengine CLI, 插入两条新的数据:
-```
+```sql
USE test;
INSERT INTO d1001 VALUES (now, 13.3, 229, 0.38);
INSERT INTO d1002 VALUES (now, 16.3, 233, 0.22);
@@ -387,15 +314,15 @@ INSERT INTO d1002 VALUES (now, 16.3, 233, 0.22);
查看当前活跃的 connector:
-```
-confluent local services connect connector status
+```shell
+curl http://localhost:8083/connectors
```
如果按照前述操作,此时应有两个活跃的 connector。使用下面的命令 unload:
-```
-confluent local services connect connector unload TDengineSinkConnector
-confluent local services connect connector unload TDengineSourceConnector
+```shell
+curl -X DELETE http://localhost:8083/connectors/TDengineSinkConnector
+curl -X DELETE http://localhost:8083/connectors/TDengineSourceConnector
```
## 配置参考
@@ -437,20 +364,20 @@ confluent local services connect connector unload TDengineSourceConnector
3. `timestamp.initial`: 数据同步起始时间。格式为'yyyy-MM-dd HH:mm:ss',若未指定则从指定 DB 中最早的一条记录开始。
4. `poll.interval.ms`: 检查是否有新建或删除的表的时间间隔,单位为 ms。默认为 1000。
5. `fetch.max.rows` : 检索数据库时最大检索条数。 默认为 100。
-6. `query.interval.ms`: 从 TDengine 一次读取数据的时间跨度,需要根据表中的数据特征合理配置,避免一次查询的数据量过大或过小;在具体的环境中建议通过测试设置一个较优值,默认值为 1000.
-7. `topic.per.stable`: 如果设置为true,表示一个超级表对应一个 Kafka topic,topic的命名规则 `--`;如果设置为 false,则指定的 DB 中的所有数据进入一个 Kafka topic,topic 的命名规则为 `-`
+6. `query.interval.ms`: 从 TDengine 一次读取数据的时间跨度,需要根据表中的数据特征合理配置,避免一次查询的数据量过大或过小;在具体的环境中建议通过测试设置一个较优值,默认值为 0,即获取到当前最新时间的所有数据。
+7. `out.format` : 结果集输出格式。`line` 表示输出格式为 InfluxDB Line 协议格式,`json` 表示输出格式是 json。默认为 line。
+8. `topic.per.stable`: 如果设置为 true,表示一个超级表对应一个 Kafka topic,topic的命名规则 ``;如果设置为 false,则指定的 DB 中的所有数据进入一个 Kafka topic,topic 的命名规则为 ``
+9. `topic.ignore.db`: topic 命名规则是否包含 database 名称,true 表示规则为 ``,false 表示规则为 ``,默认 false。此配置项在 `topic.per.stable` 设置为 false 时不生效。
+10. `topic.delimiter`: topic 名称分割符,默认为 `-`。
## 其他说明
-1. 插件的安装位置可以自定义,请参考官方文档:https://docs.confluent.io/home/connect/self-managed/install.html#install-connector-manually。
-2. 本教程的示例程序使用了 Confluent 平台,但是 TDengine Kafka Connector 本身同样适用于独立安装的 Kafka, 且配置方法相同。关于如何在独立安装的 Kafka 环境使用 Kafka Connect 插件, 请参考官方文档: https://kafka.apache.org/documentation/#connect。
+1. 关于如何在独立安装的 Kafka 环境使用 Kafka Connect 插件, 请参考官方文档:。
## 问题反馈
-无论遇到任何问题,都欢迎在本项目的 Github 仓库反馈: https://github.com/taosdata/kafka-connect-tdengine/issues。
+无论遇到任何问题,都欢迎在本项目的 Github 仓库反馈:。
## 参考
-1. https://www.confluent.io/what-is-apache-kafka
-2. https://developer.confluent.io/learn-kafka/kafka-connect/intro
-3. https://docs.confluent.io/platform/current/platform.html
+1.
diff --git a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcDemo.java b/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcDemo.java
index 5bc2340308..aeb75cc3a2 100644
--- a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcDemo.java
+++ b/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcDemo.java
@@ -51,27 +51,27 @@ public class JdbcDemo {
private void createDatabase() {
String sql = "create database if not exists " + dbName;
- exuete(sql);
+ execute(sql);
}
private void useDatabase() {
String sql = "use " + dbName;
- exuete(sql);
+ execute(sql);
}
private void dropTable() {
final String sql = "drop table if exists " + dbName + "." + tbName + "";
- exuete(sql);
+ execute(sql);
}
private void createTable() {
final String sql = "create table if not exists " + dbName + "." + tbName + " (ts timestamp, temperature float, humidity int)";
- exuete(sql);
+ execute(sql);
}
private void insert() {
final String sql = "insert into " + dbName + "." + tbName + " (ts, temperature, humidity) values(now, 20.5, 34)";
- exuete(sql);
+ execute(sql);
}
private void select() {
@@ -120,7 +120,7 @@ public class JdbcDemo {
System.out.println("[ " + (succeed ? "OK" : "ERROR!") + " ] time cost: " + cost + " ms, execute statement ====> " + sql);
}
- private void exuete(String sql) {
+ private void execute(String sql) {
long start = System.currentTimeMillis();
try (Statement statement = connection.createStatement()) {
boolean execute = statement.execute(sql);
diff --git a/examples/JDBC/consumer-demo/pom.xml b/examples/JDBC/consumer-demo/pom.xml
index aa3cb154e5..6199efb76e 100644
--- a/examples/JDBC/consumer-demo/pom.xml
+++ b/examples/JDBC/consumer-demo/pom.xml
@@ -22,7 +22,7 @@
com.google.guava
guava
- 30.1.1-jre
+ 32.0.0-jre
diff --git a/examples/JDBC/springbootdemo/src/main/resources/application.properties b/examples/JDBC/springbootdemo/src/main/resources/application.properties
index bf21047395..c523952fb6 100644
--- a/examples/JDBC/springbootdemo/src/main/resources/application.properties
+++ b/examples/JDBC/springbootdemo/src/main/resources/application.properties
@@ -5,7 +5,7 @@
#spring.datasource.password=taosdata
# datasource config - JDBC-RESTful
spring.datasource.driver-class-name=com.taosdata.jdbc.rs.RestfulDriver
-spring.datasource.url=jdbc:TAOS-RS://localhost:6041/test?timezone=UTC-8&charset=UTF-8&locale=en_US.UTF-8
+spring.datasource.url=jdbc:TAOS-RS://localhost:6041/test
spring.datasource.username=root
spring.datasource.password=taosdata
spring.datasource.druid.initial-size=5
diff --git a/examples/c/CMakeLists.txt b/examples/c/CMakeLists.txt
index e14c4e60d9..07fc2fd71b 100644
--- a/examples/c/CMakeLists.txt
+++ b/examples/c/CMakeLists.txt
@@ -42,27 +42,27 @@ IF (TD_LINUX)
)
target_link_libraries(tmq
- taos_static
+ taos
)
target_link_libraries(stream_demo
- taos_static
+ taos
)
target_link_libraries(schemaless
- taos_static
+ taos
)
target_link_libraries(prepare
- taos_static
+ taos
)
target_link_libraries(demo
- taos_static
+ taos
)
target_link_libraries(asyncdemo
- taos_static
+ taos
)
SET_TARGET_PROPERTIES(tmq PROPERTIES OUTPUT_NAME tmq)
diff --git a/include/common/tdatablock.h b/include/common/tdatablock.h
index 53fc07c3f3..6cb7d88523 100644
--- a/include/common/tdatablock.h
+++ b/include/common/tdatablock.h
@@ -248,6 +248,7 @@ int32_t buildSubmitReqFromDataBlock(SSubmitReq2** pReq, const SSDataBlock* pData
tb_uid_t suid);
char* buildCtbNameByGroupId(const char* stbName, uint64_t groupId);
+int32_t buildCtbNameByGroupIdImpl(const char* stbName, uint64_t groupId, char* pBuf);
static FORCE_INLINE int32_t blockGetEncodeSize(const SSDataBlock* pBlock) {
return blockDataGetSerialMetaSize(taosArrayGetSize(pBlock->pDataBlock)) + blockDataGetSize(pBlock);
diff --git a/include/common/tglobal.h b/include/common/tglobal.h
index 01281a6dc7..cd423bf4c9 100644
--- a/include/common/tglobal.h
+++ b/include/common/tglobal.h
@@ -82,6 +82,7 @@ extern int64_t tsVndCommitMaxIntervalMs;
// mnode
extern int64_t tsMndSdbWriteDelta;
extern int64_t tsMndLogRetention;
+extern int8_t tsGrant;
extern bool tsMndSkipGrant;
// monitor
@@ -198,6 +199,7 @@ void taosSetAllDebugFlag(int32_t flag, bool rewrite);
void taosSetDebugFlag(int32_t *pFlagPtr, const char *flagName, int32_t flagVal, bool rewrite);
int32_t taosApplyLocalCfg(SConfig *pCfg, char *name);
void taosLocalCfgForbiddenToChange(char *name, bool *forbidden);
+int8_t taosGranted();
#ifdef __cplusplus
}
diff --git a/include/common/tmsg.h b/include/common/tmsg.h
index d78e771fcf..7c5182d76c 100644
--- a/include/common/tmsg.h
+++ b/include/common/tmsg.h
@@ -1975,6 +1975,7 @@ typedef struct {
SArray* fillNullCols; // array of SColLocation
int64_t deleteMark;
int8_t igUpdate;
+ int64_t lastTs;
} SCMCreateStreamReq;
typedef struct {
@@ -2033,6 +2034,11 @@ typedef struct {
char cgroup[TSDB_CGROUP_LEN];
char clientId[256];
SArray* topicNames; // SArray
+
+ int8_t withTbName;
+ int8_t autoCommit;
+ int32_t autoCommitInterval;
+ int8_t resetOffsetCfg;
} SCMSubscribeReq;
static FORCE_INLINE int32_t tSerializeSCMSubscribeReq(void** buf, const SCMSubscribeReq* pReq) {
@@ -2047,6 +2053,12 @@ static FORCE_INLINE int32_t tSerializeSCMSubscribeReq(void** buf, const SCMSubsc
for (int32_t i = 0; i < topicNum; i++) {
tlen += taosEncodeString(buf, (char*)taosArrayGetP(pReq->topicNames, i));
}
+
+ tlen += taosEncodeFixedI8(buf, pReq->withTbName);
+ tlen += taosEncodeFixedI8(buf, pReq->autoCommit);
+ tlen += taosEncodeFixedI32(buf, pReq->autoCommitInterval);
+ tlen += taosEncodeFixedI8(buf, pReq->resetOffsetCfg);
+
return tlen;
}
@@ -2064,6 +2076,11 @@ static FORCE_INLINE void* tDeserializeSCMSubscribeReq(void* buf, SCMSubscribeReq
buf = taosDecodeString(buf, &name);
taosArrayPush(pReq->topicNames, &name);
}
+
+ buf = taosDecodeFixedI8(buf, &pReq->withTbName);
+ buf = taosDecodeFixedI8(buf, &pReq->autoCommit);
+ buf = taosDecodeFixedI32(buf, &pReq->autoCommitInterval);
+ buf = taosDecodeFixedI8(buf, &pReq->resetOffsetCfg);
return buf;
}
@@ -2455,15 +2472,6 @@ typedef struct {
char cgroup[TSDB_CGROUP_LEN];
} SMqAskEpReq;
-typedef struct {
- int64_t consumerId;
- int32_t epoch;
-} SMqHbReq;
-
-typedef struct {
- int8_t reserved;
-} SMqHbRsp;
-
typedef struct {
int32_t key;
int32_t valueLen;
@@ -2487,6 +2495,7 @@ typedef struct {
int64_t stime; // timestamp precision ms
int64_t reqRid;
bool stableQuery;
+ bool isSubQuery;
char fqdn[TSDB_FQDN_LEN];
int32_t subPlanNum;
SArray* subDesc; // SArray
@@ -2891,7 +2900,7 @@ int32_t tDecodeSMqCMCommitOffsetReq(SDecoder* decoder, SMqCMCommitOffsetReq* pRe
// tqOffset
enum {
TMQ_OFFSET__RESET_NONE = -3,
- TMQ_OFFSET__RESET_EARLIEAST = -2,
+ TMQ_OFFSET__RESET_EARLIEST = -2,
TMQ_OFFSET__RESET_LATEST = -1,
TMQ_OFFSET__LOG = 1,
TMQ_OFFSET__SNAPSHOT_DATA = 2,
@@ -3354,6 +3363,28 @@ static FORCE_INLINE void tDeleteSMqAskEpRsp(SMqAskEpRsp* pRsp) {
taosArrayDestroyEx(pRsp->topics, (FDelete)tDeleteMqSubTopicEp);
}
+typedef struct {
+ int32_t vgId;
+ STqOffsetVal offset;
+ int64_t rows;
+}OffsetRows;
+
+typedef struct{
+ char topicName[TSDB_TOPIC_FNAME_LEN];
+ SArray* offsetRows;
+}TopicOffsetRows;
+
+typedef struct {
+ int64_t consumerId;
+ int32_t epoch;
+ SArray* topics;
+} SMqHbReq;
+
+typedef struct {
+ int8_t reserved;
+} SMqHbRsp;
+
+
#define TD_AUTO_CREATE_TABLE 0x1
typedef struct {
int64_t suid;
@@ -3478,10 +3509,8 @@ int32_t tSerializeSMqAskEpReq(void* buf, int32_t bufLen, SMqAskEpReq* pReq);
int32_t tDeserializeSMqAskEpReq(void* buf, int32_t bufLen, SMqAskEpReq* pReq);
int32_t tSerializeSMqHbReq(void* buf, int32_t bufLen, SMqHbReq* pReq);
int32_t tDeserializeSMqHbReq(void* buf, int32_t bufLen, SMqHbReq* pReq);
-int32_t tSerializeSMqAskEpReq(void* buf, int32_t bufLen, SMqAskEpReq* pReq);
-int32_t tDeserializeSMqAskEpReq(void* buf, int32_t bufLen, SMqAskEpReq* pReq);
-int32_t tSerializeSMqHbReq(void* buf, int32_t bufLen, SMqHbReq* pReq);
-int32_t tDeserializeSMqHbReq(void* buf, int32_t bufLen, SMqHbReq* pReq);
+int32_t tDeatroySMqHbReq(SMqHbReq* pReq);
+
#define SUBMIT_REQ_AUTO_CREATE_TABLE 0x1
#define SUBMIT_REQ_COLUMN_DATA_FORMAT 0x2
diff --git a/include/common/tmsgdef.h b/include/common/tmsgdef.h
index 1f2d597496..2cf8eacdac 100644
--- a/include/common/tmsgdef.h
+++ b/include/common/tmsgdef.h
@@ -150,7 +150,7 @@ enum {
TD_DEF_MSG_TYPE(TDMT_MND_TMQ_HB, "consumer-hb", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_TMQ_DO_REBALANCE, "do-rebalance", SMqDoRebalanceMsg, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_TMQ_DROP_CGROUP, "drop-cgroup", SMqDropCGroupReq, SMqDropCGroupRsp)
- TD_DEF_MSG_TYPE(TDMT_MND_UNUSED2, "unused2", NULL, NULL)
+ TD_DEF_MSG_TYPE(TDMT_MND_CREATE_VG, "create-vg", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_TMQ_TIMER, "tmq-tmr", SMTimerReq, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_TELEM_TIMER, "telem-tmr", SMTimerReq, SMTimerReq)
TD_DEF_MSG_TYPE(TDMT_MND_TRANS_TIMER, "trans-tmr", NULL, NULL)
@@ -310,6 +310,7 @@ enum {
TD_DEF_MSG_TYPE(TDMT_VND_TMQ_ADD_CHECKINFO, "vnode-tmq-add-checkinfo", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_TMQ_DEL_CHECKINFO, "vnode-del-checkinfo", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_TMQ_CONSUME, "vnode-tmq-consume", SMqPollReq, SMqDataBlkRsp)
+ TD_DEF_MSG_TYPE(TDMT_VND_TMQ_CONSUME_PUSH, "vnode-tmq-consume-push", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_TMQ_VG_WALINFO, "vnode-tmq-vg-walinfo", SMqPollReq, SMqDataBlkRsp)
TD_DEF_MSG_TYPE(TDMT_VND_TMQ_MAX_MSG, "vnd-tmq-max", NULL, NULL)
diff --git a/include/libs/catalog/catalog.h b/include/libs/catalog/catalog.h
index 9300deeb9a..7a7a13b285 100644
--- a/include/libs/catalog/catalog.h
+++ b/include/libs/catalog/catalog.h
@@ -87,6 +87,7 @@ typedef struct SCatalogReq {
bool dNodeRequired; // valid dnode
bool svrVerRequired;
bool forceUpdate;
+ bool cloned;
} SCatalogReq;
typedef struct SMetaRes {
diff --git a/include/libs/function/function.h b/include/libs/function/function.h
index e015f4182e..c92ce254a8 100644
--- a/include/libs/function/function.h
+++ b/include/libs/function/function.h
@@ -163,6 +163,7 @@ typedef struct {
int64_t checkPointId;
int32_t taskId;
int64_t streamId;
+ int64_t streamBackendRid;
} SStreamState;
typedef struct SFunctionStateStore {
diff --git a/include/libs/function/functionMgt.h b/include/libs/function/functionMgt.h
index f0c9cffd0f..55af50e0bc 100644
--- a/include/libs/function/functionMgt.h
+++ b/include/libs/function/functionMgt.h
@@ -233,6 +233,7 @@ bool fmIsGroupKeyFunc(int32_t funcId);
bool fmIsBlockDistFunc(int32_t funcId);
void getLastCacheDataType(SDataType* pType);
+SFunctionNode* createFunction(const char* pName, SNodeList* pParameterList);
int32_t fmGetDistMethod(const SFunctionNode* pFunc, SFunctionNode** pPartialFunc, SFunctionNode** pMergeFunc);
diff --git a/include/libs/nodes/cmdnodes.h b/include/libs/nodes/cmdnodes.h
index c8ce9634f5..3a36601b11 100644
--- a/include/libs/nodes/cmdnodes.h
+++ b/include/libs/nodes/cmdnodes.h
@@ -425,16 +425,18 @@ typedef struct SStreamOptions {
} SStreamOptions;
typedef struct SCreateStreamStmt {
- ENodeType type;
- char streamName[TSDB_TABLE_NAME_LEN];
- char targetDbName[TSDB_DB_NAME_LEN];
- char targetTabName[TSDB_TABLE_NAME_LEN];
- bool ignoreExists;
- SStreamOptions* pOptions;
- SNode* pQuery;
- SNodeList* pTags;
- SNode* pSubtable;
- SNodeList* pCols;
+ ENodeType type;
+ char streamName[TSDB_TABLE_NAME_LEN];
+ char targetDbName[TSDB_DB_NAME_LEN];
+ char targetTabName[TSDB_TABLE_NAME_LEN];
+ bool ignoreExists;
+ SStreamOptions* pOptions;
+ SNode* pQuery;
+ SNode* pPrevQuery;
+ SNodeList* pTags;
+ SNode* pSubtable;
+ SNodeList* pCols;
+ SCMCreateStreamReq* pReq;
} SCreateStreamStmt;
typedef struct SDropStreamStmt {
diff --git a/include/libs/nodes/plannodes.h b/include/libs/nodes/plannodes.h
index 02459ed951..f44b622cc0 100644
--- a/include/libs/nodes/plannodes.h
+++ b/include/libs/nodes/plannodes.h
@@ -617,6 +617,7 @@ typedef struct SQueryPlan {
int32_t numOfSubplans;
SNodeList* pSubplans; // Element is SNodeListNode. The execution level of subplan, starting from 0.
SExplainInfo explainInfo;
+ void* pPostPlan;
} SQueryPlan;
const char* dataOrderStr(EDataOrderLevel order);
diff --git a/include/libs/nodes/querynodes.h b/include/libs/nodes/querynodes.h
index 12890571f9..f570698395 100644
--- a/include/libs/nodes/querynodes.h
+++ b/include/libs/nodes/querynodes.h
@@ -441,7 +441,9 @@ typedef struct SQuery {
EQueryExecStage execStage;
EQueryExecMode execMode;
bool haveResultSet;
+ SNode* pPrevRoot;
SNode* pRoot;
+ SNode* pPostRoot;
int32_t numOfResCols;
SSchema* pResSchema;
int8_t precision;
diff --git a/include/libs/parser/parser.h b/include/libs/parser/parser.h
index 94fb6824d2..f253b47e50 100644
--- a/include/libs/parser/parser.h
+++ b/include/libs/parser/parser.h
@@ -74,6 +74,7 @@ int32_t qAnalyseSqlSemantic(SParseContext* pCxt, const struct SCatalogReq* pCata
const struct SMetaData* pMetaData, SQuery* pQuery);
int32_t qContinueParseSql(SParseContext* pCxt, struct SCatalogReq* pCatalogReq, const struct SMetaData* pMetaData,
SQuery* pQuery);
+int32_t qContinueParsePostQuery(SParseContext* pCxt, SQuery* pQuery, void** pResRow);
void qDestroyParseContext(SParseContext* pCxt);
diff --git a/include/libs/planner/planner.h b/include/libs/planner/planner.h
index 41c0e98084..1b523c0323 100644
--- a/include/libs/planner/planner.h
+++ b/include/libs/planner/planner.h
@@ -52,6 +52,7 @@ int32_t qCreateQueryPlan(SPlanContext* pCxt, SQueryPlan** pPlan, SArray* pExecNo
// @groupId id of a group of datasource subplans of this @pSubplan
// @pSource one execution location of this group of datasource subplans
int32_t qSetSubplanExecutionNode(SSubplan* pSubplan, int32_t groupId, SDownstreamSourceNode* pSource);
+int32_t qContinuePlanPostQuery(void *pPostPlan);
void qClearSubplanExecutionNode(SSubplan* pSubplan);
diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h
index 8316e6ef50..73c88fae8d 100644
--- a/include/libs/stream/tstream.h
+++ b/include/libs/stream/tstream.h
@@ -327,6 +327,7 @@ struct SStreamTask {
int64_t checkpointingId;
int32_t checkpointAlignCnt;
struct SStreamMeta* pMeta;
+ SSHashObj* pNameMap;
};
// meta
@@ -344,7 +345,6 @@ typedef struct SStreamMeta {
SRWLatch lock;
int32_t walScanCounter;
void* streamBackend;
- int32_t streamBackendId;
int64_t streamBackendRid;
SHashObj* pTaskBackendUnique;
} SStreamMeta;
diff --git a/include/libs/sync/sync.h b/include/libs/sync/sync.h
index e86a4f9690..2a0a4b0f63 100644
--- a/include/libs/sync/sync.h
+++ b/include/libs/sync/sync.h
@@ -154,14 +154,14 @@ typedef struct SSnapshotMeta {
typedef struct SSyncFSM {
void* data;
- int32_t (*FpCommitCb)(const struct SSyncFSM* pFsm, SRpcMsg* pMsg, const SFsmCbMeta* pMeta);
+ int32_t (*FpCommitCb)(const struct SSyncFSM* pFsm, SRpcMsg* pMsg, SFsmCbMeta* pMeta);
SyncIndex (*FpAppliedIndexCb)(const struct SSyncFSM* pFsm);
- int32_t (*FpPreCommitCb)(const struct SSyncFSM* pFsm, SRpcMsg* pMsg, const SFsmCbMeta* pMeta);
- void (*FpRollBackCb)(const struct SSyncFSM* pFsm, SRpcMsg* pMsg, const SFsmCbMeta* pMeta);
+ int32_t (*FpPreCommitCb)(const struct SSyncFSM* pFsm, SRpcMsg* pMsg, SFsmCbMeta* pMeta);
+ void (*FpRollBackCb)(const struct SSyncFSM* pFsm, SRpcMsg* pMsg, SFsmCbMeta* pMeta);
void (*FpRestoreFinishCb)(const struct SSyncFSM* pFsm, const SyncIndex commitIdx);
- void (*FpReConfigCb)(const struct SSyncFSM* pFsm, SRpcMsg* pMsg, const SReConfigCbMeta* pMeta);
- void (*FpLeaderTransferCb)(const struct SSyncFSM* pFsm, SRpcMsg* pMsg, const SFsmCbMeta* pMeta);
+ void (*FpReConfigCb)(const struct SSyncFSM* pFsm, SRpcMsg* pMsg, SReConfigCbMeta* pMeta);
+ void (*FpLeaderTransferCb)(const struct SSyncFSM* pFsm, SRpcMsg* pMsg, SFsmCbMeta* pMeta);
bool (*FpApplyQueueEmptyCb)(const struct SSyncFSM* pFsm);
int32_t (*FpApplyQueueItems)(const struct SSyncFSM* pFsm);
diff --git a/include/libs/wal/wal.h b/include/libs/wal/wal.h
index 1aa08ff802..47230bc95c 100644
--- a/include/libs/wal/wal.h
+++ b/include/libs/wal/wal.h
@@ -214,7 +214,7 @@ int32_t walSkipFetchBody(SWalReader *pRead, const SWalCkHead *pHead);
void walRefFirstVer(SWal *, SWalRef *);
void walRefLastVer(SWal *, SWalRef *);
-SWalRef *walRefCommittedVer(SWal *);
+void walRefCommitVer(SWal *, SWalRef *);
SWalRef *walOpenRef(SWal *);
void walCloseRef(SWal *pWal, int64_t refId);
diff --git a/include/os/osMemory.h b/include/os/osMemory.h
index 18cd0d9cc6..683d10e926 100644
--- a/include/os/osMemory.h
+++ b/include/os/osMemory.h
@@ -22,21 +22,20 @@ extern "C" {
// If the error is in a third-party library, place this header file under the third-party library header file.
// When you want to use this feature, you should find or add the same function in the following sectio
-// #if !defined(WINDOWS)
+#if !defined(WINDOWS)
-// #ifndef ALLOW_FORBID_FUNC
-// #define malloc MALLOC_FUNC_TAOS_FORBID
-// #define calloc CALLOC_FUNC_TAOS_FORBID
-// #define realloc REALLOC_FUNC_TAOS_FORBID
-// #define free FREE_FUNC_TAOS_FORBID
-// #ifdef strdup
-// #undef strdup
-// #define strdup STRDUP_FUNC_TAOS_FORBID
-// #endif
-// #endif // ifndef ALLOW_FORBID_FUNC
-// #endif // if !defined(WINDOWS)
+#ifndef ALLOW_FORBID_FUNC
+#define malloc MALLOC_FUNC_TAOS_FORBID
+#define calloc CALLOC_FUNC_TAOS_FORBID
+#define realloc REALLOC_FUNC_TAOS_FORBID
+#define free FREE_FUNC_TAOS_FORBID
+#ifdef strdup
+#undef strdup
+#define strdup STRDUP_FUNC_TAOS_FORBID
+#endif
+#endif // ifndef ALLOW_FORBID_FUNC
+#endif // if !defined(WINDOWS)
-// // #define taosMemoryFree malloc
// #define taosMemoryMalloc malloc
// #define taosMemoryCalloc calloc
// #define taosMemoryRealloc realloc
diff --git a/include/util/talgo.h b/include/util/talgo.h
index f9d51c4b5b..7c92c0fe87 100644
--- a/include/util/talgo.h
+++ b/include/util/talgo.h
@@ -31,7 +31,7 @@ typedef void *(*__array_item_dup_fn_t)(void *);
typedef void (*FDelete)(void *);
typedef int32_t (*FEncode)(void **buf, const void *dst);
-typedef void *(*FDecode)(const void *buf, void *dst);
+typedef void *(*FDecode)(const void *buf, void *dst, int8_t sver);
#define TD_EQ 0x1
#define TD_GT 0x2
diff --git a/include/util/taoserror.h b/include/util/taoserror.h
index 9e5229870e..889ee41a29 100644
--- a/include/util/taoserror.h
+++ b/include/util/taoserror.h
@@ -345,7 +345,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_MND_TRANS_CLOG_IS_NULL TAOS_DEF_ERROR_CODE(0, 0x03D4)
#define TSDB_CODE_MND_TRANS_NETWORK_UNAVAILL TAOS_DEF_ERROR_CODE(0, 0x03D5)
#define TSDB_CODE_MND_LAST_TRANS_NOT_FINISHED TAOS_DEF_ERROR_CODE(0, 0x03D6) //internal
-#define TSDB_CODE_MND_TRNAS_SYNC_TIMEOUT TAOS_DEF_ERROR_CODE(0, 0x03D7)
+#define TSDB_CODE_MND_TRANS_SYNC_TIMEOUT TAOS_DEF_ERROR_CODE(0, 0x03D7)
#define TSDB_CODE_MND_TRANS_UNKNOW_ERROR TAOS_DEF_ERROR_CODE(0, 0x03DF)
// mnode-mq
diff --git a/include/util/tarray.h b/include/util/tarray.h
index 4bf24b46b9..a93c695370 100644
--- a/include/util/tarray.h
+++ b/include/util/tarray.h
@@ -244,7 +244,7 @@ int32_t taosArraySearchIdx(const SArray* pArray, const void* key, __compar_fn_t
void taosArraySortPWithExt(SArray* pArray, __ext_compar_fn_t fn, const void* param);
int32_t taosEncodeArray(void** buf, const SArray* pArray, FEncode encode);
-void* taosDecodeArray(const void* buf, SArray** pArray, FDecode decode, int32_t dataSz);
+void* taosDecodeArray(const void* buf, SArray** pArray, FDecode decode, int32_t dataSz, int8_t sver);
#ifdef __cplusplus
}
diff --git a/include/util/tdef.h b/include/util/tdef.h
index 37eeb87fdd..69b012ecea 100644
--- a/include/util/tdef.h
+++ b/include/util/tdef.h
@@ -195,6 +195,7 @@ typedef enum ELogicConditionType {
#define TSDB_TABLE_NAME_LEN 193 // it is a null-terminated string
#define TSDB_TOPIC_NAME_LEN 193 // it is a null-terminated string
#define TSDB_CGROUP_LEN 193 // it is a null-terminated string
+#define TSDB_OFFSET_LEN 64 // it is a null-terminated string
#define TSDB_USER_CGROUP_LEN (TSDB_USER_LEN + TSDB_CGROUP_LEN) // it is a null-terminated string
#define TSDB_STREAM_NAME_LEN 193 // it is a null-terminated string
#define TSDB_DB_NAME_LEN 65
diff --git a/packaging/checkPackageRuning.py b/packaging/checkPackageRuning.py
index 2edeeb6dbb..96e2378fb3 100755
--- a/packaging/checkPackageRuning.py
+++ b/packaging/checkPackageRuning.py
@@ -42,8 +42,8 @@ else:
# os.system("rm -rf /var/lib/taos/*")
# os.system("systemctl restart taosd ")
-# wait a moment ,at least 5 seconds
-time.sleep(5)
+# wait a moment ,at least 10 seconds
+time.sleep(10)
# prepare data by taosBenchmark
diff --git a/packaging/deb/DEBIAN/preinst b/packaging/deb/DEBIAN/preinst
index d6558d5b3b..904a946e20 100644
--- a/packaging/deb/DEBIAN/preinst
+++ b/packaging/deb/DEBIAN/preinst
@@ -80,5 +80,4 @@ fi
# there can not libtaos.so*, otherwise ln -s error
${csudo}rm -f ${install_main_dir}/driver/libtaos.* || :
-[ -f ${install_main_dir}/driver/librocksdb.* ] && ${csudo}rm -f ${install_main_dir}/driver/librocksdb.* || :
[ -f ${install_main_dir}/driver/libtaosws.so ] && ${csudo}rm -f ${install_main_dir}/driver/libtaosws.so || :
diff --git a/packaging/deb/DEBIAN/prerm b/packaging/deb/DEBIAN/prerm
index 8f8d472867..0d63115a04 100644
--- a/packaging/deb/DEBIAN/prerm
+++ b/packaging/deb/DEBIAN/prerm
@@ -40,7 +40,6 @@ else
${csudo}rm -f ${inc_link_dir}/taosudf.h || :
[ -f ${inc_link_dir}/taosws.h ] && ${csudo}rm -f ${inc_link_dir}/taosws.h || :
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
- [ -f ${lib_link_dir}/librocksdb.* ] && ${csudo}rm -f ${lib_link_dir}/librocksdb.* || :
[ -f ${lib_link_dir}/libtaosws.so ] && ${csudo}rm -f ${lib_link_dir}/libtaosws.so || :
${csudo}rm -f ${log_link_dir} || :
diff --git a/packaging/deb/makedeb.sh b/packaging/deb/makedeb.sh
index 024c69deb1..07819159c4 100755
--- a/packaging/deb/makedeb.sh
+++ b/packaging/deb/makedeb.sh
@@ -31,7 +31,6 @@ cd ${pkg_dir}
libfile="libtaos.so.${tdengine_ver}"
wslibfile="libtaosws.so"
-rocksdblib="librocksdb.so.8"
# create install dir
install_home_path="/usr/local/taos"
@@ -95,7 +94,6 @@ fi
cp ${compile_dir}/build/bin/taos ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/build/lib/${libfile} ${pkg_dir}${install_home_path}/driver
-[ -f ${compile_dir}/build/lib/${rocksdblib} ] && cp ${compile_dir}/build/lib/${rocksdblib} ${pkg_dir}${install_home_path}/driver ||:
[ -f ${compile_dir}/build/lib/${wslibfile} ] && cp ${compile_dir}/build/lib/${wslibfile} ${pkg_dir}${install_home_path}/driver ||:
cp ${compile_dir}/../include/client/taos.h ${pkg_dir}${install_home_path}/include
cp ${compile_dir}/../include/common/taosdef.h ${pkg_dir}${install_home_path}/include
@@ -126,12 +124,12 @@ if [ -f ${compile_dir}/build/bin/jemalloc-config ]; then
cp ${compile_dir}/build/lib/libjemalloc.so.2 ${pkg_dir}${install_user_local_path}/lib/
ln -sf libjemalloc.so.2 ${pkg_dir}${install_user_local_path}/lib/libjemalloc.so
fi
- if [ -f ${compile_dir}/build/lib/libjemalloc.a ]; then
- cp ${compile_dir}/build/lib/libjemalloc.a ${pkg_dir}${install_user_local_path}/lib/
- fi
- if [ -f ${compile_dir}/build/lib/libjemalloc_pic.a ]; then
- cp ${compile_dir}/build/lib/libjemalloc_pic.a ${pkg_dir}${install_user_local_path}/lib/
- fi
+ # if [ -f ${compile_dir}/build/lib/libjemalloc.a ]; then
+ # cp ${compile_dir}/build/lib/libjemalloc.a ${pkg_dir}${install_user_local_path}/lib/
+ # fi
+ # if [ -f ${compile_dir}/build/lib/libjemalloc_pic.a ]; then
+ # cp ${compile_dir}/build/lib/libjemalloc_pic.a ${pkg_dir}${install_user_local_path}/lib/
+ # fi
if [ -f ${compile_dir}/build/lib/pkgconfig/jemalloc.pc ]; then
cp ${compile_dir}/build/lib/pkgconfig/jemalloc.pc ${pkg_dir}${install_user_local_path}/lib/pkgconfig/
fi
diff --git a/packaging/rpm/tdengine.spec b/packaging/rpm/tdengine.spec
index 2b056c376a..846d17e7f6 100644
--- a/packaging/rpm/tdengine.spec
+++ b/packaging/rpm/tdengine.spec
@@ -45,7 +45,6 @@ echo buildroot: %{buildroot}
libfile="libtaos.so.%{_version}"
wslibfile="libtaosws.so"
-rocksdblib="librocksdb.so.8"
# create install path, and cp file
mkdir -p %{buildroot}%{homepath}/bin
@@ -93,7 +92,6 @@ if [ -f %{_compiledir}/build/bin/taosadapter ]; then
fi
cp %{_compiledir}/build/lib/${libfile} %{buildroot}%{homepath}/driver
[ -f %{_compiledir}/build/lib/${wslibfile} ] && cp %{_compiledir}/build/lib/${wslibfile} %{buildroot}%{homepath}/driver ||:
-[ -f %{_compiledir}/build/lib/${rocksdblib} ] && cp %{_compiledir}/build/lib/${rocksdblib} %{buildroot}%{homepath}/driver ||:
cp %{_compiledir}/../include/client/taos.h %{buildroot}%{homepath}/include
cp %{_compiledir}/../include/common/taosdef.h %{buildroot}%{homepath}/include
cp %{_compiledir}/../include/util/taoserror.h %{buildroot}%{homepath}/include
@@ -125,12 +123,12 @@ if [ -f %{_compiledir}/build/bin/jemalloc-config ]; then
cp %{_compiledir}/build/lib/libjemalloc.so.2 %{buildroot}%{homepath}/jemalloc/lib
ln -sf libjemalloc.so.2 %{buildroot}%{homepath}/jemalloc/lib/libjemalloc.so
fi
- if [ -f %{_compiledir}/build/lib/libjemalloc.a ]; then
- cp %{_compiledir}/build/lib/libjemalloc.a %{buildroot}%{homepath}/jemalloc/lib
- fi
- if [ -f %{_compiledir}/build/lib/libjemalloc_pic.a ]; then
- cp %{_compiledir}/build/lib/libjemalloc_pic.a %{buildroot}%{homepath}/jemalloc/lib
- fi
+# if [ -f %{_compiledir}/build/lib/libjemalloc.a ]; then
+# cp %{_compiledir}/build/lib/libjemalloc.a %{buildroot}%{homepath}/jemalloc/lib
+# fi
+# if [ -f %{_compiledir}/build/lib/libjemalloc_pic.a ]; then
+# cp %{_compiledir}/build/lib/libjemalloc_pic.a %{buildroot}%{homepath}/jemalloc/lib
+# fi
if [ -f %{_compiledir}/build/lib/pkgconfig/jemalloc.pc ]; then
cp %{_compiledir}/build/lib/pkgconfig/jemalloc.pc %{buildroot}%{homepath}/jemalloc/lib/pkgconfig
fi
@@ -176,7 +174,6 @@ fi
# there can not libtaos.so*, otherwise ln -s error
${csudo}rm -f %{homepath}/driver/libtaos* || :
-${csudo}rm -f %{homepath}/driver/librocksdb* || :
#Scripts executed after installation
%post
@@ -222,7 +219,6 @@ if [ $1 -eq 0 ];then
${csudo}rm -f ${inc_link_dir}/taoserror.h || :
${csudo}rm -f ${inc_link_dir}/taosudf.h || :
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
- ${csudo}rm -f ${lib_link_dir}/librocksdb.* || :
${csudo}rm -f ${log_link_dir} || :
${csudo}rm -f ${data_link_dir} || :
diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh
index 9aa019f218..f311714f3d 100755
--- a/packaging/tools/install.sh
+++ b/packaging/tools/install.sh
@@ -250,30 +250,18 @@ function install_lib() {
# Remove links
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
${csudo}rm -f ${lib64_link_dir}/libtaos.* || :
- ${csudo}rm -f ${lib_link_dir}/librocksdb.* || :
- ${csudo}rm -f ${lib64_link_dir}/librocksdb.* || :
#${csudo}rm -rf ${v15_java_app_dir} || :
${csudo}cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo}chmod 777 ${install_main_dir}/driver/*
${csudo}ln -sf ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1
${csudo}ln -sf ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so
- ${csudo}ln -sf ${install_main_dir}/driver/librocksdb.* ${lib_link_dir}/librocksdb.so.8
- ${csudo}ln -sf ${lib_link_dir}/librocksdb.so.8 ${lib_link_dir}/librocksdb.so
-
- ${csudo}ln -sf ${install_main_dir}/driver/librocksdb.* ${lib_link_dir}/librocksdb.so.8
- ${csudo}ln -sf ${lib_link_dir}/librocksdb.so.8 ${lib_link_dir}/librocksdb.so
-
-
[ -f ${install_main_dir}/driver/libtaosws.so ] && ${csudo}ln -sf ${install_main_dir}/driver/libtaosws.so ${lib_link_dir}/libtaosws.so || :
if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.so ]]; then
${csudo}ln -sf ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || :
${csudo}ln -sf ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || :
- ${csudo}ln -sf ${install_main_dir}/driver/librocksdb.* ${lib64_link_dir}/librocksdb.so.8 || :
- ${csudo}ln -sf ${lib64_link_dir}/librocksdb.so.8 ${lib64_link_dir}/librocksdb.so || :
-
[ -f ${install_main_dir}/libtaosws.so ] && ${csudo}ln -sf ${install_main_dir}/libtaosws.so ${lib64_link_dir}/libtaosws.so || :
fi
@@ -327,13 +315,13 @@ function install_jemalloc() {
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.so.2 /usr/local/lib
${csudo}ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so
${csudo}/usr/bin/install -c -d /usr/local/lib
- if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then
- ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib
- fi
- if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
- ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib
- fi
- if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
+ # if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then
+ # ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib
+ # fi
+ # if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
+ # ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib
+ # fi
+ if [ -f ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc ]; then
${csudo}/usr/bin/install -c -d /usr/local/lib/pkgconfig
${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig
fi
diff --git a/packaging/tools/install_client.sh b/packaging/tools/install_client.sh
index 53b9c80f10..8b845ca8f4 100755
--- a/packaging/tools/install_client.sh
+++ b/packaging/tools/install_client.sh
@@ -214,13 +214,13 @@ function install_jemalloc() {
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.so.2 /usr/local/lib
${csudo}ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so
${csudo}/usr/bin/install -c -d /usr/local/lib
- if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then
- ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib
- fi
- if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
- ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib
- fi
- if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
+ # if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then
+ # ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib
+ # fi
+ # if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
+ # ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib
+ # fi
+ if [ -f ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc ]; then
${csudo}/usr/bin/install -c -d /usr/local/lib/pkgconfig
${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig
fi
diff --git a/packaging/tools/make_install.sh b/packaging/tools/make_install.sh
index 98c5245cd3..c5c70e0aa2 100755
--- a/packaging/tools/make_install.sh
+++ b/packaging/tools/make_install.sh
@@ -241,10 +241,10 @@ function install_jemalloc() {
${csudo}/usr/bin/install -c -m 755 ${binary_dir}/build/lib/libjemalloc.so.2 /usr/local/lib
${csudo}ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so > /dev/null 2>&1
${csudo}/usr/bin/install -c -d /usr/local/lib
- [ -f ${binary_dir}/build/lib/libjemalloc.a ] &&
- ${csudo}/usr/bin/install -c -m 755 ${binary_dir}/build/lib/libjemalloc.a /usr/local/lib
- [ -f ${binary_dir}/build/lib/libjemalloc_pic.a ] &&
- ${csudo}/usr/bin/install -c -m 755 ${binary_dir}/build/lib/libjemalloc_pic.a /usr/local/lib
+ # [ -f ${binary_dir}/build/lib/libjemalloc.a ] &&
+ # ${csudo}/usr/bin/install -c -m 755 ${binary_dir}/build/lib/libjemalloc.a /usr/local/lib
+ # [ -f ${binary_dir}/build/lib/libjemalloc_pic.a ] &&
+ # ${csudo}/usr/bin/install -c -m 755 ${binary_dir}/build/lib/libjemalloc_pic.a /usr/local/lib
if [ -f "${binary_dir}/build/lib/pkgconfig/jemalloc.pc" ]; then
${csudo}/usr/bin/install -c -d /usr/local/lib/pkgconfig
${csudo}/usr/bin/install -c -m 644 ${binary_dir}/build/lib/pkgconfig/jemalloc.pc \
diff --git a/packaging/tools/makeclient.sh b/packaging/tools/makeclient.sh
index c4b074f903..cd59294fe7 100755
--- a/packaging/tools/makeclient.sh
+++ b/packaging/tools/makeclient.sh
@@ -118,12 +118,12 @@ if [ -f ${build_dir}/bin/jemalloc-config ]; then
cp ${build_dir}/lib/libjemalloc.so.2 ${install_dir}/jemalloc/lib
ln -sf libjemalloc.so.2 ${install_dir}/jemalloc/lib/libjemalloc.so
fi
- if [ -f ${build_dir}/lib/libjemalloc.a ]; then
- cp ${build_dir}/lib/libjemalloc.a ${install_dir}/jemalloc/lib
- fi
- if [ -f ${build_dir}/lib/libjemalloc_pic.a ]; then
- cp ${build_dir}/lib/libjemalloc_pic.a ${install_dir}/jemalloc/lib
- fi
+ # if [ -f ${build_dir}/lib/libjemalloc.a ]; then
+ # cp ${build_dir}/lib/libjemalloc.a ${install_dir}/jemalloc/lib
+ # fi
+ # if [ -f ${build_dir}/lib/libjemalloc_pic.a ]; then
+ # cp ${build_dir}/lib/libjemalloc_pic.a ${install_dir}/jemalloc/lib
+ # fi
if [ -f ${build_dir}/lib/pkgconfig/jemalloc.pc ]; then
cp ${build_dir}/lib/pkgconfig/jemalloc.pc ${install_dir}/jemalloc/lib/pkgconfig
fi
diff --git a/packaging/tools/makepkg.sh b/packaging/tools/makepkg.sh
index ab45c684c4..6c389502b7 100755
--- a/packaging/tools/makepkg.sh
+++ b/packaging/tools/makepkg.sh
@@ -111,11 +111,9 @@ fi
if [ "$osType" == "Darwin" ]; then
lib_files="${build_dir}/lib/libtaos.${version}.dylib"
wslib_files="${build_dir}/lib/libtaosws.dylib"
- rocksdb_lib_files="${build_dir}/lib/librocksdb.dylib.8.1.1"
else
lib_files="${build_dir}/lib/libtaos.so.${version}"
wslib_files="${build_dir}/lib/libtaosws.so"
- rocksdb_lib_files="${build_dir}/lib/librocksdb.so.8.1.1"
fi
header_files="${code_dir}/include/client/taos.h ${code_dir}/include/common/taosdef.h ${code_dir}/include/util/taoserror.h ${code_dir}/include/libs/function/taosudf.h"
@@ -219,12 +217,12 @@ if [ -f ${build_dir}/bin/jemalloc-config ]; then
cp ${build_dir}/lib/libjemalloc.so.2 ${install_dir}/jemalloc/lib
ln -sf libjemalloc.so.2 ${install_dir}/jemalloc/lib/libjemalloc.so
fi
- if [ -f ${build_dir}/lib/libjemalloc.a ]; then
- cp ${build_dir}/lib/libjemalloc.a ${install_dir}/jemalloc/lib
- fi
- if [ -f ${build_dir}/lib/libjemalloc_pic.a ]; then
- cp ${build_dir}/lib/libjemalloc_pic.a ${install_dir}/jemalloc/lib
- fi
+ # if [ -f ${build_dir}/lib/libjemalloc.a ]; then
+ # cp ${build_dir}/lib/libjemalloc.a ${install_dir}/jemalloc/lib
+ # fi
+ # if [ -f ${build_dir}/lib/libjemalloc_pic.a ]; then
+ # cp ${build_dir}/lib/libjemalloc_pic.a ${install_dir}/jemalloc/lib
+ # fi
if [ -f ${build_dir}/lib/pkgconfig/jemalloc.pc ]; then
cp ${build_dir}/lib/pkgconfig/jemalloc.pc ${install_dir}/jemalloc/lib/pkgconfig
fi
@@ -338,7 +336,6 @@ fi
# Copy driver
mkdir -p ${install_dir}/driver && cp ${lib_files} ${install_dir}/driver && echo "${versionComp}" >${install_dir}/driver/vercomp.txt
[ -f ${wslib_files} ] && cp ${wslib_files} ${install_dir}/driver || :
-[ -f ${rocksdb_lib_files} ] && cp ${rocksdb_lib_files} ${install_dir}/driver || :
# Copy connector
if [ "$verMode" == "cluster" ]; then
diff --git a/packaging/tools/post.sh b/packaging/tools/post.sh
index 10de87966f..e79a10c9e9 100755
--- a/packaging/tools/post.sh
+++ b/packaging/tools/post.sh
@@ -169,13 +169,13 @@ function install_jemalloc() {
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.so.2 /usr/local/lib
${csudo}ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so
${csudo}/usr/bin/install -c -d /usr/local/lib
- if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then
- ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib
- fi
- if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
- ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib
- fi
- if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
+ # if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then
+ # ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib
+ # fi
+ # if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
+ # ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib
+ # fi
+ if [ -f ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc ]; then
${csudo}/usr/bin/install -c -d /usr/local/lib/pkgconfig
${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig
fi
@@ -202,19 +202,10 @@ function install_lib() {
log_print "start install lib from ${lib_dir} to ${lib_link_dir}"
${csudo}rm -f ${lib_link_dir}/libtaos* || :
${csudo}rm -f ${lib64_link_dir}/libtaos* || :
-
- #rocksdb
- [ -f ${lib_link_dir}/librocksdb* ] && ${csudo}rm -f ${lib_link_dir}/librocksdb* || :
- [ -f ${lib64_link_dir}/librocksdb* ] && ${csudo}rm -f ${lib64_link_dir}/librocksdb* || :
-
- #rocksdb
- [ -f ${lib_link_dir}/librocksdb* ] && ${csudo}rm -f ${lib_link_dir}/librocksdb* || :
- [ -f ${lib64_link_dir}/librocksdb* ] && ${csudo}rm -f ${lib64_link_dir}/librocksdb* || :
[ -f ${lib_link_dir}/libtaosws.${lib_file_ext} ] && ${csudo}rm -f ${lib_link_dir}/libtaosws.${lib_file_ext} || :
[ -f ${lib64_link_dir}/libtaosws.${lib_file_ext} ] && ${csudo}rm -f ${lib64_link_dir}/libtaosws.${lib_file_ext} || :
- ${csudo}ln -s ${lib_dir}/librocksdb.* ${lib_link_dir}/librocksdb.${lib_file_ext_1} 2>>${install_log_path} || return 1
${csudo}ln -s ${lib_dir}/libtaos.* ${lib_link_dir}/libtaos.${lib_file_ext_1} 2>>${install_log_path} || return 1
${csudo}ln -s ${lib_link_dir}/libtaos.${lib_file_ext_1} ${lib_link_dir}/libtaos.${lib_file_ext} 2>>${install_log_path} || return 1
@@ -223,7 +214,6 @@ function install_lib() {
if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.${lib_file_ext} ]]; then
${csudo}ln -s ${lib_dir}/libtaos.* ${lib64_link_dir}/libtaos.${lib_file_ext_1} 2>>${install_log_path} || return 1
${csudo}ln -s ${lib64_link_dir}/libtaos.${lib_file_ext_1} ${lib64_link_dir}/libtaos.${lib_file_ext} 2>>${install_log_path} || return 1
- ${csudo}ln -s ${lib_dir}/librocksdb.* ${lib64_link_dir}/librocksdb.${lib_file_ext_1} 2>>${install_log_path} || return 1
[ -f ${lib_dir}/libtaosws.${lib_file_ext} ] && ${csudo}ln -sf ${lib_dir}/libtaosws.${lib_file_ext} ${lib64_link_dir}/libtaosws.${lib_file_ext} 2>>${install_log_path}
fi
diff --git a/packaging/tools/remove.sh b/packaging/tools/remove.sh
index a17b29983c..be2c26c309 100755
--- a/packaging/tools/remove.sh
+++ b/packaging/tools/remove.sh
@@ -142,11 +142,9 @@ function clean_local_bin() {
function clean_lib() {
# Remove link
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
- ${csudo}rm -f ${lib_link_dir}/librocksdb.* || :
[ -f ${lib_link_dir}/libtaosws.* ] && ${csudo}rm -f ${lib_link_dir}/libtaosws.* || :
${csudo}rm -f ${lib64_link_dir}/libtaos.* || :
- ${csudo}rm -f ${lib64_link_dir}/librocksdb.* || :
[ -f ${lib64_link_dir}/libtaosws.* ] && ${csudo}rm -f ${lib64_link_dir}/libtaosws.* || :
#${csudo}rm -rf ${v15_java_app_dir} || :
diff --git a/source/client/inc/clientInt.h b/source/client/inc/clientInt.h
index 18891bb932..fa444779f3 100644
--- a/source/client/inc/clientInt.h
+++ b/source/client/inc/clientInt.h
@@ -227,6 +227,12 @@ typedef struct {
STaosxRsp rsp;
} SMqTaosxRspObj;
+typedef struct SReqRelInfo {
+ uint64_t userRefId;
+ uint64_t prevRefId;
+ uint64_t nextRefId;
+} SReqRelInfo;
+
typedef struct SRequestObj {
int8_t resType; // query or tmq
uint64_t requestId;
@@ -250,10 +256,14 @@ typedef struct SRequestObj {
bool validateOnly; // todo refactor
bool killed;
bool inRetry;
+ bool isSubReq;
uint32_t prevCode; // previous error code: todo refactor, add update flag for catalog
uint32_t retry;
int64_t allocatorRefId;
SQuery* pQuery;
+ void* pPostPlan;
+ SReqRelInfo relation;
+ void* pWrapper;
} SRequestObj;
typedef struct SSyncQueryParam {
@@ -279,6 +289,7 @@ TAOS_RES* taosQueryImplWithReqid(TAOS* taos, const char* sql, bool validateOnly,
void taosAsyncQueryImpl(uint64_t connId, const char* sql, __taos_async_fn_t fp, void* param, bool validateOnly);
void taosAsyncQueryImplWithReqid(uint64_t connId, const char* sql, __taos_async_fn_t fp, void* param, bool validateOnly,
int64_t reqid);
+void taosAsyncFetchImpl(SRequestObj *pRequest, __taos_async_fn_t fp, void *param);
int32_t getVersion1BlockMetaSize(const char* p, int32_t numOfCols);
@@ -368,6 +379,7 @@ typedef struct SSqlCallbackWrapper {
SParseContext* pParseCtx;
SCatalogReq* pCatalogReq;
SRequestObj* pRequest;
+ void* pPlanInfo;
} SSqlCallbackWrapper;
SRequestObj* launchQueryImpl(SRequestObj* pRequest, SQuery* pQuery, bool keepQuery, void** res);
@@ -382,6 +394,12 @@ int32_t handleCreateTbExecRes(void* res, SCatalog* pCatalog);
bool qnodeRequired(SRequestObj* pRequest);
void continueInsertFromCsv(SSqlCallbackWrapper* pWrapper, SRequestObj* pRequest);
void destorySqlCallbackWrapper(SSqlCallbackWrapper* pWrapper);
+void handleQueryAnslyseRes(SSqlCallbackWrapper *pWrapper, SMetaData *pResultMeta, int32_t code);
+void restartAsyncQuery(SRequestObj *pRequest, int32_t code);
+int32_t buildPreviousRequest(SRequestObj *pRequest, const char* sql, SRequestObj** pNewRequest);
+int32_t prepareAndParseSqlSyntax(SSqlCallbackWrapper **ppWrapper, SRequestObj *pRequest, bool updateMetaForce);
+void returnToUser(SRequestObj* pRequest);
+void stopAllQueries(SRequestObj *pRequest);
#ifdef __cplusplus
}
diff --git a/source/client/src/clientEnv.c b/source/client/src/clientEnv.c
index 045642c2c2..c64bbfbdb6 100644
--- a/source/client/src/clientEnv.c
+++ b/source/client/src/clientEnv.c
@@ -358,6 +358,49 @@ int32_t releaseRequest(int64_t rid) { return taosReleaseRef(clientReqRefPool, ri
int32_t removeRequest(int64_t rid) { return taosRemoveRef(clientReqRefPool, rid); }
+
+void destroySubRequests(SRequestObj *pRequest) {
+ int32_t reqIdx = -1;
+ SRequestObj *pReqList[16] = {NULL};
+ uint64_t tmpRefId = 0;
+
+ if (pRequest->relation.userRefId && pRequest->relation.userRefId != pRequest->self) {
+ return;
+ }
+
+ SRequestObj* pTmp = pRequest;
+ while (pTmp->relation.prevRefId) {
+ tmpRefId = pTmp->relation.prevRefId;
+ pTmp = acquireRequest(tmpRefId);
+ if (pTmp) {
+ pReqList[++reqIdx] = pTmp;
+ releaseRequest(tmpRefId);
+ } else {
+ tscError("0x%" PRIx64 ", prev req ref 0x%" PRIx64 " is not there, reqId:0x%" PRIx64, pTmp->self,
+ tmpRefId, pTmp->requestId);
+ break;
+ }
+ }
+
+ for (int32_t i = reqIdx; i >= 0; i--) {
+ removeRequest(pReqList[i]->self);
+ }
+
+ tmpRefId = pRequest->relation.nextRefId;
+ while (tmpRefId) {
+ pTmp = acquireRequest(tmpRefId);
+ if (pTmp) {
+ tmpRefId = pTmp->relation.nextRefId;
+ removeRequest(pTmp->self);
+ releaseRequest(pTmp->self);
+ } else {
+ tscError("0x%" PRIx64 " is not there", tmpRefId);
+ break;
+ }
+ }
+}
+
+
void doDestroyRequest(void *p) {
if (NULL == p) {
return;
@@ -368,10 +411,14 @@ void doDestroyRequest(void *p) {
uint64_t reqId = pRequest->requestId;
tscTrace("begin to destroy request %" PRIx64 " p:%p", reqId, pRequest);
+ destroySubRequests(pRequest);
+
taosHashRemove(pRequest->pTscObj->pRequests, &pRequest->self, sizeof(pRequest->self));
schedulerFreeJob(&pRequest->body.queryJob, 0);
+ destorySqlCallbackWrapper(pRequest->pWrapper);
+
taosMemoryFreeClear(pRequest->msgBuf);
taosMemoryFreeClear(pRequest->pDb);
@@ -412,6 +459,63 @@ void destroyRequest(SRequestObj *pRequest) {
removeRequest(pRequest->self);
}
+void taosStopQueryImpl(SRequestObj *pRequest) {
+ pRequest->killed = true;
+
+ // It is not a query, no need to stop.
+ if (NULL == pRequest->pQuery || QUERY_EXEC_MODE_SCHEDULE != pRequest->pQuery->execMode) {
+ tscDebug("request 0x%" PRIx64 " no need to be killed since not query", pRequest->requestId);
+ return;
+ }
+
+ schedulerFreeJob(&pRequest->body.queryJob, TSDB_CODE_TSC_QUERY_KILLED);
+ tscDebug("request %" PRIx64 " killed", pRequest->requestId);
+}
+
+void stopAllQueries(SRequestObj *pRequest) {
+ int32_t reqIdx = -1;
+ SRequestObj *pReqList[16] = {NULL};
+ uint64_t tmpRefId = 0;
+
+ if (pRequest->relation.userRefId && pRequest->relation.userRefId != pRequest->self) {
+ return;
+ }
+
+ SRequestObj* pTmp = pRequest;
+ while (pTmp->relation.prevRefId) {
+ tmpRefId = pTmp->relation.prevRefId;
+ pTmp = acquireRequest(tmpRefId);
+ if (pTmp) {
+ pReqList[++reqIdx] = pTmp;
+ releaseRequest(tmpRefId);
+ } else {
+ tscError("0x%" PRIx64 ", prev req ref 0x%" PRIx64 " is not there, reqId:0x%" PRIx64, pTmp->self,
+ tmpRefId, pTmp->requestId);
+ break;
+ }
+ }
+
+ for (int32_t i = reqIdx; i >= 0; i--) {
+ taosStopQueryImpl(pReqList[i]);
+ }
+
+ taosStopQueryImpl(pRequest);
+
+ tmpRefId = pRequest->relation.nextRefId;
+ while (tmpRefId) {
+ pTmp = acquireRequest(tmpRefId);
+ if (pTmp) {
+ tmpRefId = pTmp->relation.nextRefId;
+ taosStopQueryImpl(pTmp);
+ releaseRequest(pTmp->self);
+ } else {
+ tscError("0x%" PRIx64 " is not there", tmpRefId);
+ break;
+ }
+ }
+}
+
+
void crashReportThreadFuncUnexpectedStopped(void) { atomic_store_32(&clientStop, -1); }
static void *tscCrashReportThreadFp(void *param) {
diff --git a/source/client/src/clientHb.c b/source/client/src/clientHb.c
index 2dddfec2bd..cbfa48b322 100644
--- a/source/client/src/clientHb.c
+++ b/source/client/src/clientHb.c
@@ -464,6 +464,7 @@ int32_t hbBuildQueryDesc(SQueryHbReqBasic *hbBasic, STscObj *pObj) {
desc.useconds = now - pRequest->metric.start;
desc.reqRid = pRequest->self;
desc.stableQuery = pRequest->stableQuery;
+ desc.isSubQuery = pRequest->isSubReq;
taosGetFqdn(desc.fqdn);
desc.subPlanNum = pRequest->body.subplanNum;
diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c
index 5963e419e1..2a73156e8a 100644
--- a/source/client/src/clientImpl.c
+++ b/source/client/src/clientImpl.c
@@ -237,6 +237,17 @@ int32_t buildRequest(uint64_t connId, const char* sql, int sqlLen, void* param,
return TSDB_CODE_SUCCESS;
}
+int32_t buildPreviousRequest(SRequestObj *pRequest, const char* sql, SRequestObj** pNewRequest) {
+ int32_t code = buildRequest(pRequest->pTscObj->id, sql, strlen(sql), pRequest, pRequest->validateOnly, pNewRequest, 0);
+ if (TSDB_CODE_SUCCESS == code) {
+ pRequest->relation.prevRefId = (*pNewRequest)->self;
+ (*pNewRequest)->relation.nextRefId = pRequest->self;
+ (*pNewRequest)->relation.userRefId = pRequest->self;
+ (*pNewRequest)->isSubReq = true;
+ }
+ return code;
+}
+
int32_t parseSql(SRequestObj* pRequest, bool topicQuery, SQuery** pQuery, SStmtCallback* pStmtCb) {
STscObj* pTscObj = pRequest->pTscObj;
@@ -878,6 +889,81 @@ static bool incompletaFileParsing(SNode* pStmt) {
return QUERY_NODE_VNODE_MODIFY_STMT != nodeType(pStmt) ? false : ((SVnodeModifyOpStmt*)pStmt)->fileProcessing;
}
+void continuePostSubQuery(SRequestObj* pRequest, TAOS_ROW row) {
+ SSqlCallbackWrapper* pWrapper = pRequest->pWrapper;
+ int32_t code = nodesAcquireAllocator(pWrapper->pParseCtx->allocatorId);
+ if (TSDB_CODE_SUCCESS == code) {
+ int64_t analyseStart = taosGetTimestampUs();
+ code = qContinueParsePostQuery(pWrapper->pParseCtx, pRequest->pQuery, (void**)row);
+ pRequest->metric.analyseCostUs += taosGetTimestampUs() - analyseStart;
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = qContinuePlanPostQuery(pRequest->pPostPlan);
+ }
+ nodesReleaseAllocator(pWrapper->pParseCtx->allocatorId);
+
+ handleQueryAnslyseRes(pWrapper, NULL, code);
+}
+
+void returnToUser(SRequestObj* pRequest) {
+ if (pRequest->relation.userRefId == pRequest->self || 0 == pRequest->relation.userRefId) {
+ // return to client
+ pRequest->body.queryFp(pRequest->body.param, pRequest, pRequest->code);
+ return;
+ }
+
+ SRequestObj* pUserReq = acquireRequest(pRequest->relation.userRefId);
+ if (pUserReq) {
+ pUserReq->code = pRequest->code;
+ // return to client
+ pUserReq->body.queryFp(pUserReq->body.param, pUserReq, pUserReq->code);
+ releaseRequest(pRequest->relation.userRefId);
+ return;
+ } else {
+ tscError("0x%" PRIx64 ", user ref 0x%" PRIx64 " is not there, reqId:0x%" PRIx64, pRequest->self,
+ pRequest->relation.userRefId, pRequest->requestId);
+ }
+}
+
+void postSubQueryFetchCb(void* param, TAOS_RES* res, int32_t rowNum) {
+ SRequestObj* pRequest = (SRequestObj*)res;
+ if (pRequest->code) {
+ returnToUser(pRequest);
+ return;
+ }
+
+ TAOS_ROW row = NULL;
+ if (rowNum > 0) {
+ row = taos_fetch_row(res); // for single row only now
+ }
+
+ SRequestObj* pNextReq = acquireRequest(pRequest->relation.nextRefId);
+ if (pNextReq) {
+ continuePostSubQuery(pNextReq, row);
+ releaseRequest(pRequest->relation.nextRefId);
+ } else {
+ tscError("0x%" PRIx64 ", next req ref 0x%" PRIx64 " is not there, reqId:0x%" PRIx64, pRequest->self,
+ pRequest->relation.nextRefId, pRequest->requestId);
+ }
+}
+
+void handlePostSubQuery(SSqlCallbackWrapper* pWrapper) {
+ SRequestObj* pRequest = pWrapper->pRequest;
+ if (TD_RES_QUERY(pRequest)) {
+ taosAsyncFetchImpl(pRequest, postSubQueryFetchCb, pWrapper);
+ return;
+ }
+
+ SRequestObj* pNextReq = acquireRequest(pRequest->relation.nextRefId);
+ if (pNextReq) {
+ continuePostSubQuery(pNextReq, NULL);
+ releaseRequest(pRequest->relation.nextRefId);
+ } else {
+ tscError("0x%" PRIx64 ", next req ref 0x%" PRIx64 " is not there, reqId:0x%" PRIx64, pRequest->self,
+ pRequest->relation.nextRefId, pRequest->requestId);
+ }
+}
+
// todo refacto the error code mgmt
void schedulerExecCb(SExecResult* pResult, void* param, int32_t code) {
SSqlCallbackWrapper* pWrapper = param;
@@ -912,12 +998,7 @@ void schedulerExecCb(SExecResult* pResult, void* param, int32_t code) {
if (code != TSDB_CODE_SUCCESS && NEED_CLIENT_HANDLE_ERROR(code) && pRequest->sqlstr != NULL) {
tscDebug("0x%" PRIx64 " client retry to handle the error, code:%s, tryCount:%d, reqId:0x%" PRIx64, pRequest->self,
tstrerror(code), pRequest->retry, pRequest->requestId);
- pRequest->prevCode = code;
- schedulerFreeJob(&pRequest->body.queryJob, 0);
- qDestroyQuery(pRequest->pQuery);
- pRequest->pQuery = NULL;
- destorySqlCallbackWrapper(pWrapper);
- doAsyncQuery(pRequest, true);
+ restartAsyncQuery(pRequest, code);
return;
}
@@ -938,10 +1019,15 @@ void schedulerExecCb(SExecResult* pResult, void* param, int32_t code) {
return;
}
- destorySqlCallbackWrapper(pWrapper);
+ if (pRequest->relation.nextRefId) {
+ handlePostSubQuery(pWrapper);
+ } else {
+ destorySqlCallbackWrapper(pWrapper);
+ pRequest->pWrapper = NULL;
- // return to client
- pRequest->body.queryFp(pRequest->body.param, pRequest, code);
+ // return to client
+ pRequest->body.queryFp(pRequest->body.param, pRequest, code);
+ }
}
SRequestObj* launchQueryImpl(SRequestObj* pRequest, SQuery* pQuery, bool keepQuery, void** res) {
@@ -1049,6 +1135,7 @@ static int32_t asyncExecSchQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaDat
pRequest->requestId);
} else {
pRequest->body.subplanNum = pDag->numOfSubplans;
+ TSWAP(pRequest->pPostPlan, pDag->pPostPlan);
}
pRequest->metric.execStart = taosGetTimestampUs();
@@ -1084,6 +1171,7 @@ static int32_t asyncExecSchQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaDat
tscDebug("0x%" PRIx64 " plan not executed, code:%s 0x%" PRIx64, pRequest->self, tstrerror(code),
pRequest->requestId);
destorySqlCallbackWrapper(pWrapper);
+ pRequest->pWrapper = NULL;
if (TSDB_CODE_SUCCESS != code) {
pRequest->code = terrno;
}
@@ -1103,6 +1191,7 @@ void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaData* pResultM
pRequest->body.execMode = pQuery->execMode;
if (QUERY_EXEC_MODE_SCHEDULE != pRequest->body.execMode) {
destorySqlCallbackWrapper(pWrapper);
+ pRequest->pWrapper = NULL;
}
if (pQuery->pRoot && !pRequest->inRetry) {
@@ -2402,3 +2491,90 @@ TAOS_RES* taosQueryImplWithReqid(TAOS* taos, const char* sql, bool validateOnly,
return pRequest;
}
+
+
+static void fetchCallback(void *pResult, void *param, int32_t code) {
+ SRequestObj *pRequest = (SRequestObj *)param;
+
+ SReqResultInfo *pResultInfo = &pRequest->body.resInfo;
+
+ tscDebug("0x%" PRIx64 " enter scheduler fetch cb, code:%d - %s, reqId:0x%" PRIx64, pRequest->self, code,
+ tstrerror(code), pRequest->requestId);
+
+ pResultInfo->pData = pResult;
+ pResultInfo->numOfRows = 0;
+
+ if (code != TSDB_CODE_SUCCESS) {
+ pRequest->code = code;
+ taosMemoryFreeClear(pResultInfo->pData);
+ pRequest->body.fetchFp(pRequest->body.param, pRequest, 0);
+ return;
+ }
+
+ if (pRequest->code != TSDB_CODE_SUCCESS) {
+ taosMemoryFreeClear(pResultInfo->pData);
+ pRequest->body.fetchFp(pRequest->body.param, pRequest, 0);
+ return;
+ }
+
+ pRequest->code =
+ setQueryResultFromRsp(pResultInfo, (const SRetrieveTableRsp *)pResultInfo->pData, pResultInfo->convertUcs4, true);
+ if (pRequest->code != TSDB_CODE_SUCCESS) {
+ pResultInfo->numOfRows = 0;
+ pRequest->code = code;
+ tscError("0x%" PRIx64 " fetch results failed, code:%s, reqId:0x%" PRIx64, pRequest->self, tstrerror(code),
+ pRequest->requestId);
+ } else {
+ tscDebug("0x%" PRIx64 " fetch results, numOfRows:%" PRId64 " total Rows:%" PRId64 ", complete:%d, reqId:0x%" PRIx64,
+ pRequest->self, pResultInfo->numOfRows, pResultInfo->totalRows, pResultInfo->completed,
+ pRequest->requestId);
+
+ STscObj *pTscObj = pRequest->pTscObj;
+ SAppClusterSummary *pActivity = &pTscObj->pAppInfo->summary;
+ atomic_add_fetch_64((int64_t *)&pActivity->fetchBytes, pRequest->body.resInfo.payloadLen);
+ }
+
+ pRequest->body.fetchFp(pRequest->body.param, pRequest, pResultInfo->numOfRows);
+}
+
+void taosAsyncFetchImpl(SRequestObj *pRequest, __taos_async_fn_t fp, void *param) {
+ pRequest->body.fetchFp = fp;
+ pRequest->body.param = param;
+
+ SReqResultInfo *pResultInfo = &pRequest->body.resInfo;
+
+ // this query has no results or error exists, return directly
+ if (taos_num_fields(pRequest) == 0 || pRequest->code != TSDB_CODE_SUCCESS) {
+ pResultInfo->numOfRows = 0;
+ pRequest->body.fetchFp(param, pRequest, pResultInfo->numOfRows);
+ return;
+ }
+
+ // all data has returned to App already, no need to try again
+ if (pResultInfo->completed) {
+ // it is a local executed query, no need to do async fetch
+ if (QUERY_EXEC_MODE_SCHEDULE != pRequest->body.execMode) {
+ if (pResultInfo->localResultFetched) {
+ pResultInfo->numOfRows = 0;
+ pResultInfo->current = 0;
+ } else {
+ pResultInfo->localResultFetched = true;
+ }
+ } else {
+ pResultInfo->numOfRows = 0;
+ }
+
+ pRequest->body.fetchFp(param, pRequest, pResultInfo->numOfRows);
+ return;
+ }
+
+ SSchedulerReq req = {
+ .syncReq = false,
+ .fetchFp = fetchCallback,
+ .cbParam = pRequest,
+ };
+
+ schedulerFetchRows(pRequest->body.queryJob, &req);
+}
+
+
diff --git a/source/client/src/clientMain.c b/source/client/src/clientMain.c
index 63a4e5d2e5..7573fd5968 100644
--- a/source/client/src/clientMain.c
+++ b/source/client/src/clientMain.c
@@ -563,22 +563,13 @@ int taos_select_db(TAOS *taos, const char *db) {
return code;
}
+
void taos_stop_query(TAOS_RES *res) {
if (res == NULL || TD_RES_TMQ(res) || TD_RES_TMQ_META(res) || TD_RES_TMQ_METADATA(res)) {
return;
}
- SRequestObj *pRequest = (SRequestObj *)res;
- pRequest->killed = true;
-
- // It is not a query, no need to stop.
- if (NULL == pRequest->pQuery || QUERY_EXEC_MODE_SCHEDULE != pRequest->pQuery->execMode) {
- tscDebug("request 0x%" PRIx64 " no need to be killed since not query", pRequest->requestId);
- return;
- }
-
- schedulerFreeJob(&pRequest->body.queryJob, TSDB_CODE_TSC_QUERY_KILLED);
- tscDebug("request %" PRIx64 " killed", pRequest->requestId);
+ stopAllQueries((SRequestObj*)res);
}
bool taos_is_null(TAOS_RES *res, int32_t row, int32_t col) {
@@ -774,8 +765,13 @@ static void destoryCatalogReq(SCatalogReq *pCatalogReq) {
taosArrayDestroy(pCatalogReq->pDbVgroup);
taosArrayDestroy(pCatalogReq->pDbCfg);
taosArrayDestroy(pCatalogReq->pDbInfo);
- taosArrayDestroyEx(pCatalogReq->pTableMeta, destoryTablesReq);
- taosArrayDestroyEx(pCatalogReq->pTableHash, destoryTablesReq);
+ if (pCatalogReq->cloned) {
+ taosArrayDestroy(pCatalogReq->pTableMeta);
+ taosArrayDestroy(pCatalogReq->pTableHash);
+ } else {
+ taosArrayDestroyEx(pCatalogReq->pTableMeta, destoryTablesReq);
+ taosArrayDestroyEx(pCatalogReq->pTableHash, destoryTablesReq);
+ }
taosArrayDestroy(pCatalogReq->pUdf);
taosArrayDestroy(pCatalogReq->pIndex);
taosArrayDestroy(pCatalogReq->pUser);
@@ -794,26 +790,108 @@ void destorySqlCallbackWrapper(SSqlCallbackWrapper *pWrapper) {
taosMemoryFree(pWrapper);
}
+void destroyCtxInRequest(SRequestObj* pRequest) {
+ schedulerFreeJob(&pRequest->body.queryJob, 0);
+ qDestroyQuery(pRequest->pQuery);
+ pRequest->pQuery = NULL;
+ destorySqlCallbackWrapper(pRequest->pWrapper);
+ pRequest->pWrapper = NULL;
+}
+
+
static void doAsyncQueryFromAnalyse(SMetaData *pResultMeta, void *param, int32_t code) {
SSqlCallbackWrapper *pWrapper = (SSqlCallbackWrapper *)param;
SRequestObj *pRequest = pWrapper->pRequest;
SQuery *pQuery = pRequest->pQuery;
- int64_t analyseStart = taosGetTimestampUs();
- pRequest->metric.ctgCostUs = analyseStart - pRequest->metric.ctgStart;
qDebug("0x%" PRIx64 " start to semantic analysis, reqId:0x%" PRIx64, pRequest->self, pRequest->requestId);
- if (code == TSDB_CODE_SUCCESS) {
+ int64_t analyseStart = taosGetTimestampUs();
+ pRequest->metric.ctgCostUs = analyseStart - pRequest->metric.ctgStart;
+
+ if (TSDB_CODE_SUCCESS == code) {
code = qAnalyseSqlSemantic(pWrapper->pParseCtx, pWrapper->pCatalogReq, pResultMeta, pQuery);
+ }
+
+ pRequest->metric.analyseCostUs += taosGetTimestampUs() - analyseStart;
+
+ handleQueryAnslyseRes(pWrapper, pResultMeta, code);
+}
+
+int32_t cloneCatalogReq(SCatalogReq* * ppTarget, SCatalogReq* pSrc) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ SCatalogReq* pTarget = taosMemoryCalloc(1, sizeof(SCatalogReq));
+ if (pTarget == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ } else {
+ pTarget->pDbVgroup = taosArrayDup(pSrc->pDbVgroup, NULL);
+ pTarget->pDbCfg = taosArrayDup(pSrc->pDbCfg, NULL);
+ pTarget->pDbInfo = taosArrayDup(pSrc->pDbInfo, NULL);
+ pTarget->pTableMeta = taosArrayDup(pSrc->pTableMeta, NULL);
+ pTarget->pTableHash = taosArrayDup(pSrc->pTableHash, NULL);
+ pTarget->pUdf = taosArrayDup(pSrc->pUdf, NULL);
+ pTarget->pIndex = taosArrayDup(pSrc->pIndex, NULL);
+ pTarget->pUser = taosArrayDup(pSrc->pUser, NULL);
+ pTarget->pTableIndex = taosArrayDup(pSrc->pTableIndex, NULL);
+ pTarget->pTableCfg = taosArrayDup(pSrc->pTableCfg, NULL);
+ pTarget->pTableTag = taosArrayDup(pSrc->pTableTag, NULL);
+ pTarget->qNodeRequired = pSrc->qNodeRequired;
+ pTarget->dNodeRequired = pSrc->dNodeRequired;
+ pTarget->svrVerRequired = pSrc->svrVerRequired;
+ pTarget->forceUpdate = pSrc->forceUpdate;
+ pTarget->cloned = true;
+
+ *ppTarget = pTarget;
+ }
+
+ return code;
+}
+
+
+void handleSubQueryFromAnalyse(SSqlCallbackWrapper *pWrapper, SMetaData *pResultMeta, SNode* pRoot) {
+ SRequestObj* pNewRequest = NULL;
+ SSqlCallbackWrapper* pNewWrapper = NULL;
+ int32_t code = buildPreviousRequest(pWrapper->pRequest, pWrapper->pRequest->sqlstr, &pNewRequest);
+ if (code) {
+ handleQueryAnslyseRes(pWrapper, pResultMeta, code);
+ return;
+ }
+
+ pNewRequest->pQuery = (SQuery*)nodesMakeNode(QUERY_NODE_QUERY);
+ if (NULL == pNewRequest->pQuery) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ } else {
+ pNewRequest->pQuery->pRoot = pRoot;
+ pRoot = NULL;
+ pNewRequest->pQuery->execStage = QUERY_EXEC_STAGE_ANALYSE;
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = prepareAndParseSqlSyntax(&pNewWrapper, pNewRequest, false);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = cloneCatalogReq(&pNewWrapper->pCatalogReq, pWrapper->pCatalogReq);
+ }
+ doAsyncQueryFromAnalyse(pResultMeta, pNewWrapper, code);
+ nodesDestroyNode(pRoot);
+}
+
+void handleQueryAnslyseRes(SSqlCallbackWrapper *pWrapper, SMetaData *pResultMeta, int32_t code) {
+ SRequestObj *pRequest = pWrapper->pRequest;
+ SQuery *pQuery = pRequest->pQuery;
+
+ if (code == TSDB_CODE_SUCCESS && pQuery->pPrevRoot) {
+ SNode* prevRoot = pQuery->pPrevRoot;
+ pQuery->pPrevRoot = NULL;
+ handleSubQueryFromAnalyse(pWrapper, pResultMeta, prevRoot);
+ return;
+ }
+
+ if (code == TSDB_CODE_SUCCESS) {
pRequest->stableQuery = pQuery->stableQuery;
if (pQuery->pRoot) {
pRequest->stmtType = pQuery->pRoot->type;
}
- }
- pRequest->metric.analyseCostUs = taosGetTimestampUs() - analyseStart;
-
- if (code == TSDB_CODE_SUCCESS) {
if (pQuery->haveResultSet) {
setResSchemaInfo(&pRequest->body.resInfo, pQuery->pResSchema, pQuery->numOfResCols);
setResPrecision(&pRequest->body.resInfo, pQuery->precision);
@@ -826,14 +904,14 @@ static void doAsyncQueryFromAnalyse(SMetaData *pResultMeta, void *param, int32_t
launchAsyncQuery(pRequest, pQuery, pResultMeta, pWrapper);
} else {
destorySqlCallbackWrapper(pWrapper);
+ pRequest->pWrapper = NULL;
qDestroyQuery(pRequest->pQuery);
pRequest->pQuery = NULL;
if (NEED_CLIENT_HANDLE_ERROR(code)) {
tscDebug("0x%" PRIx64 " client retry to handle the error, code:%d - %s, tryCount:%d, reqId:0x%" PRIx64,
pRequest->self, code, tstrerror(code), pRequest->retry, pRequest->requestId);
- pRequest->prevCode = code;
- doAsyncQuery(pRequest, true);
+ restartAsyncQuery(pRequest, code);
return;
}
@@ -841,7 +919,7 @@ static void doAsyncQueryFromAnalyse(SMetaData *pResultMeta, void *param, int32_t
tscError("0x%" PRIx64 " error occurs, code:%s, return to user app, reqId:0x%" PRIx64, pRequest->self,
tstrerror(code), pRequest->requestId);
pRequest->code = code;
- pRequest->body.queryFp(pRequest->body.param, pRequest, code);
+ returnToUser(pRequest);
}
}
@@ -904,6 +982,7 @@ static void doAsyncQueryFromParse(SMetaData *pResultMeta, void *param, int32_t c
tscError("0x%" PRIx64 " error happens, code:%d - %s, reqId:0x%" PRIx64, pWrapper->pRequest->self, code,
tstrerror(code), pWrapper->pRequest->requestId);
destorySqlCallbackWrapper(pWrapper);
+ pRequest->pWrapper = NULL;
terrno = code;
pRequest->code = code;
pRequest->body.queryFp(pRequest->body.param, pRequest, code);
@@ -920,6 +999,7 @@ void continueInsertFromCsv(SSqlCallbackWrapper *pWrapper, SRequestObj *pRequest)
tscError("0x%" PRIx64 " error happens, code:%d - %s, reqId:0x%" PRIx64, pWrapper->pRequest->self, code,
tstrerror(code), pWrapper->pRequest->requestId);
destorySqlCallbackWrapper(pWrapper);
+ pRequest->pWrapper = NULL;
terrno = code;
pRequest->code = code;
pRequest->body.queryFp(pRequest->body.param, pRequest, code);
@@ -967,27 +1047,16 @@ int32_t createParseContext(const SRequestObj *pRequest, SParseContext **pCxt) {
return TSDB_CODE_SUCCESS;
}
-void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) {
+int32_t prepareAndParseSqlSyntax(SSqlCallbackWrapper **ppWrapper, SRequestObj *pRequest, bool updateMetaForce) {
+ int32_t code = TSDB_CODE_SUCCESS;
STscObj *pTscObj = pRequest->pTscObj;
- SSqlCallbackWrapper *pWrapper = NULL;
- int32_t code = TSDB_CODE_SUCCESS;
-
- if (pRequest->retry++ > REQUEST_TOTAL_EXEC_TIMES) {
- code = pRequest->prevCode;
- terrno = code;
- pRequest->code = code;
- tscDebug("call sync query cb with code: %s", tstrerror(code));
- pRequest->body.queryFp(pRequest->body.param, pRequest, code);
- return;
- }
-
- if (TSDB_CODE_SUCCESS == code) {
- pWrapper = taosMemoryCalloc(1, sizeof(SSqlCallbackWrapper));
- if (pWrapper == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- } else {
- pWrapper->pRequest = pRequest;
- }
+ SSqlCallbackWrapper *pWrapper = taosMemoryCalloc(1, sizeof(SSqlCallbackWrapper));
+ if (pWrapper == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ } else {
+ pWrapper->pRequest = pRequest;
+ pRequest->pWrapper = pWrapper;
+ *ppWrapper = pWrapper;
}
if (TSDB_CODE_SUCCESS == code) {
@@ -999,7 +1068,7 @@ void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) {
code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &pWrapper->pParseCtx->pCatalog);
}
- if (TSDB_CODE_SUCCESS == code) {
+ if (TSDB_CODE_SUCCESS == code && NULL == pRequest->pQuery) {
int64_t syntaxStart = taosGetTimestampUs();
pWrapper->pCatalogReq = taosMemoryCalloc(1, sizeof(SCatalogReq));
@@ -1014,6 +1083,27 @@ void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) {
pRequest->metric.parseCostUs += taosGetTimestampUs() - syntaxStart;
}
+ return code;
+}
+
+
+void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) {
+ SSqlCallbackWrapper *pWrapper = NULL;
+ int32_t code = TSDB_CODE_SUCCESS;
+
+ if (pRequest->retry++ > REQUEST_TOTAL_EXEC_TIMES) {
+ code = pRequest->prevCode;
+ terrno = code;
+ pRequest->code = code;
+ tscDebug("call sync query cb with code: %s", tstrerror(code));
+ pRequest->body.queryFp(pRequest->body.param, pRequest, code);
+ return;
+ }
+
+ if (TSDB_CODE_SUCCESS == code) {
+ code = prepareAndParseSqlSyntax(&pWrapper, pRequest, updateMetaForce);
+ }
+
if (TSDB_CODE_SUCCESS == code) {
pRequest->stmtType = pRequest->pQuery->pRoot->type;
code = phaseAsyncQuery(pWrapper);
@@ -1023,6 +1113,7 @@ void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) {
tscError("0x%" PRIx64 " error happens, code:%d - %s, reqId:0x%" PRIx64, pRequest->self, code, tstrerror(code),
pRequest->requestId);
destorySqlCallbackWrapper(pWrapper);
+ pRequest->pWrapper = NULL;
qDestroyQuery(pRequest->pQuery);
pRequest->pQuery = NULL;
@@ -1040,48 +1131,57 @@ void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) {
}
}
-static void fetchCallback(void *pResult, void *param, int32_t code) {
- SRequestObj *pRequest = (SRequestObj *)param;
-
- SReqResultInfo *pResultInfo = &pRequest->body.resInfo;
-
- tscDebug("0x%" PRIx64 " enter scheduler fetch cb, code:%d - %s, reqId:0x%" PRIx64, pRequest->self, code,
- tstrerror(code), pRequest->requestId);
-
- pResultInfo->pData = pResult;
- pResultInfo->numOfRows = 0;
-
- if (code != TSDB_CODE_SUCCESS) {
- pRequest->code = code;
- taosMemoryFreeClear(pResultInfo->pData);
- pRequest->body.fetchFp(pRequest->body.param, pRequest, 0);
- return;
+void restartAsyncQuery(SRequestObj *pRequest, int32_t code) {
+ int32_t reqIdx = 0;
+ SRequestObj *pReqList[16] = {NULL};
+ SRequestObj *pUserReq = NULL;
+ pReqList[0] = pRequest;
+ uint64_t tmpRefId = 0;
+ SRequestObj* pTmp = pRequest;
+ while (pTmp->relation.prevRefId) {
+ tmpRefId = pTmp->relation.prevRefId;
+ pTmp = acquireRequest(tmpRefId);
+ if (pTmp) {
+ pReqList[++reqIdx] = pTmp;
+ releaseRequest(tmpRefId);
+ } else {
+ tscError("0x%" PRIx64 ", prev req ref 0x%" PRIx64 " is not there, reqId:0x%" PRIx64, pTmp->self,
+ tmpRefId, pTmp->requestId);
+ break;
+ }
}
- if (pRequest->code != TSDB_CODE_SUCCESS) {
- taosMemoryFreeClear(pResultInfo->pData);
- pRequest->body.fetchFp(pRequest->body.param, pRequest, 0);
- return;
+ tmpRefId = pRequest->relation.nextRefId;
+ while (tmpRefId) {
+ pTmp = acquireRequest(tmpRefId);
+ if (pTmp) {
+ tmpRefId = pTmp->relation.nextRefId;
+ removeRequest(pTmp->self);
+ releaseRequest(pTmp->self);
+ } else {
+ tscError("0x%" PRIx64 " is not there", tmpRefId);
+ break;
+ }
}
- pRequest->code =
- setQueryResultFromRsp(pResultInfo, (const SRetrieveTableRsp *)pResultInfo->pData, pResultInfo->convertUcs4, true);
- if (pRequest->code != TSDB_CODE_SUCCESS) {
- pResultInfo->numOfRows = 0;
- pRequest->code = code;
- tscError("0x%" PRIx64 " fetch results failed, code:%s, reqId:0x%" PRIx64, pRequest->self, tstrerror(code),
- pRequest->requestId);
+ for (int32_t i = reqIdx; i >= 0; i--) {
+ destroyCtxInRequest(pReqList[i]);
+ if (pReqList[i]->relation.userRefId == pReqList[i]->self || 0 == pReqList[i]->relation.userRefId) {
+ pUserReq = pReqList[i];
+ } else {
+ removeRequest(pReqList[i]->self);
+ }
+ }
+
+ if (pUserReq) {
+ pUserReq->prevCode = code;
+ memset(&pUserReq->relation, 0, sizeof(pUserReq->relation));
} else {
- tscDebug("0x%" PRIx64 " fetch results, numOfRows:%" PRId64 " total Rows:%" PRId64 ", complete:%d, reqId:0x%" PRIx64,
- pRequest->self, pResultInfo->numOfRows, pResultInfo->totalRows, pResultInfo->completed,
- pRequest->requestId);
-
- STscObj *pTscObj = pRequest->pTscObj;
- SAppClusterSummary *pActivity = &pTscObj->pAppInfo->summary;
- atomic_add_fetch_64((int64_t *)&pActivity->fetchBytes, pRequest->body.resInfo.payloadLen);
+ tscError("user req is missing");
+ return;
}
- pRequest->body.fetchFp(pRequest->body.param, pRequest, pResultInfo->numOfRows);
+ doAsyncQuery(pUserReq, true);
}
void taos_fetch_rows_a(TAOS_RES *res, __taos_async_fn_t fp, void *param) {
@@ -1095,43 +1195,8 @@ void taos_fetch_rows_a(TAOS_RES *res, __taos_async_fn_t fp, void *param) {
}
SRequestObj *pRequest = res;
- pRequest->body.fetchFp = fp;
- pRequest->body.param = param;
- SReqResultInfo *pResultInfo = &pRequest->body.resInfo;
-
- // this query has no results or error exists, return directly
- if (taos_num_fields(pRequest) == 0 || pRequest->code != TSDB_CODE_SUCCESS) {
- pResultInfo->numOfRows = 0;
- pRequest->body.fetchFp(param, pRequest, pResultInfo->numOfRows);
- return;
- }
-
- // all data has returned to App already, no need to try again
- if (pResultInfo->completed) {
- // it is a local executed query, no need to do async fetch
- if (QUERY_EXEC_MODE_SCHEDULE != pRequest->body.execMode) {
- if (pResultInfo->localResultFetched) {
- pResultInfo->numOfRows = 0;
- pResultInfo->current = 0;
- } else {
- pResultInfo->localResultFetched = true;
- }
- } else {
- pResultInfo->numOfRows = 0;
- }
-
- pRequest->body.fetchFp(param, pRequest, pResultInfo->numOfRows);
- return;
- }
-
- SSchedulerReq req = {
- .syncReq = false,
- .fetchFp = fetchCallback,
- .cbParam = pRequest,
- };
-
- schedulerFetchRows(pRequest->body.queryJob, &req);
+ taosAsyncFetchImpl(pRequest, fp, param);
}
void taos_fetch_raw_block_a(TAOS_RES *res, __taos_async_fn_t fp, void *param) {
diff --git a/source/client/src/clientMsgHandler.c b/source/client/src/clientMsgHandler.c
index 6d53f2b4c5..d6fdb29b59 100644
--- a/source/client/src/clientMsgHandler.c
+++ b/source/client/src/clientMsgHandler.c
@@ -77,6 +77,7 @@ int32_t processConnectRsp(void* param, SDataBuf* pMsg, int32_t code) {
}
if ((code = taosCheckVersionCompatibleFromStr(version, connectRsp.sVer, 3)) != 0) {
+ tscError("version not compatible. client version: %s, server version: %s", version, connectRsp.sVer);
setErrno(pRequest, code);
tsem_post(&pRequest->body.rspSem);
goto End;
diff --git a/source/client/src/clientSml.c b/source/client/src/clientSml.c
index bea237d09e..503120fe85 100644
--- a/source/client/src/clientSml.c
+++ b/source/client/src/clientSml.c
@@ -1553,17 +1553,8 @@ static int32_t smlParseLine(SSmlHandle *info, char *lines[], char *rawLine, char
}
}
- char cTmp = 0; // for print tmp if is raw
- if (info->isRawLine) {
- cTmp = tmp[len];
- tmp[len] = '\0';
- }
-
uDebug("SML:0x%" PRIx64 " smlParseLine israw:%d, numLines:%d, protocol:%d, len:%d, sql:%s", info->id,
- info->isRawLine, numLines, info->protocol, len, tmp);
- if (info->isRawLine) {
- tmp[len] = cTmp;
- }
+ info->isRawLine, numLines, info->protocol, len, info->isRawLine ? "rawdata" : tmp);
if (info->protocol == TSDB_SML_LINE_PROTOCOL) {
if (info->dataFormat) {
@@ -1584,8 +1575,7 @@ static int32_t smlParseLine(SSmlHandle *info, char *lines[], char *rawLine, char
code = TSDB_CODE_SML_INVALID_PROTOCOL_TYPE;
}
if (code != TSDB_CODE_SUCCESS) {
- tmp[len] = '\0';
- uError("SML:0x%" PRIx64 " smlParseLine failed. line %d : %s", info->id, i, tmp);
+ uError("SML:0x%" PRIx64 " smlParseLine failed. line %d : %s", info->id, i, info->isRawLine ? "rawdata" : tmp);
return code;
}
if (info->reRun) {
@@ -1756,9 +1746,8 @@ TAOS_RES *taos_schemaless_insert_inner(TAOS *taos, char *lines[], char *rawLine,
request->code = code;
info->cost.endTime = taosGetTimestampUs();
info->cost.code = code;
- if (code == TSDB_CODE_TDB_INVALID_TABLE_SCHEMA_VER || code == TSDB_CODE_SDB_OBJ_CREATING ||
- code == TSDB_CODE_PAR_VALUE_TOO_LONG || code == TSDB_CODE_MND_TRANS_CONFLICT ||
- code == TSDB_CODE_PAR_TABLE_NOT_EXIST) {
+ if (NEED_CLIENT_HANDLE_ERROR(code) || code == TSDB_CODE_SDB_OBJ_CREATING ||
+ code == TSDB_CODE_PAR_VALUE_TOO_LONG || code == TSDB_CODE_MND_TRANS_CONFLICT) {
if (cnt++ >= 10) {
uInfo("SML:%" PRIx64 " retry:%d/10 end code:%d, msg:%s", info->id, cnt, code, tstrerror(code));
break;
diff --git a/source/client/src/clientTmq.c b/source/client/src/clientTmq.c
index e1b2b9c48b..e7927cd0ae 100644
--- a/source/client/src/clientTmq.c
+++ b/source/client/src/clientTmq.c
@@ -82,7 +82,7 @@ struct tmq_t {
int8_t useSnapshot;
int8_t autoCommit;
int32_t autoCommitInterval;
- int32_t resetOffsetCfg;
+ int8_t resetOffsetCfg;
uint64_t consumerId;
bool hbBgEnable;
tmq_commit_cb* commitCb;
@@ -99,6 +99,7 @@ struct tmq_t {
// poll info
int64_t pollCnt;
int64_t totalRows;
+ bool needReportOffsetRows;
// timer
tmr_h hbLiveTimer;
@@ -264,7 +265,7 @@ tmq_conf_t* tmq_conf_new() {
conf->withTbName = false;
conf->autoCommit = true;
conf->autoCommitInterval = DEFAULT_AUTO_COMMIT_INTERVAL;
- conf->resetOffset = TMQ_OFFSET__RESET_EARLIEAST;
+ conf->resetOffset = TMQ_OFFSET__RESET_EARLIEST;
conf->hbBgEnable = true;
return conf;
@@ -318,7 +319,7 @@ tmq_conf_res_t tmq_conf_set(tmq_conf_t* conf, const char* key, const char* value
conf->resetOffset = TMQ_OFFSET__RESET_NONE;
return TMQ_CONF_OK;
} else if (strcasecmp(value, "earliest") == 0) {
- conf->resetOffset = TMQ_OFFSET__RESET_EARLIEAST;
+ conf->resetOffset = TMQ_OFFSET__RESET_EARLIEST;
return TMQ_CONF_OK;
} else if (strcasecmp(value, "latest") == 0) {
conf->resetOffset = TMQ_OFFSET__RESET_LATEST;
@@ -357,7 +358,7 @@ tmq_conf_res_t tmq_conf_set(tmq_conf_t* conf, const char* key, const char* value
return TMQ_CONF_OK;
}
- if (strcasecmp(key, "enable.heartbeat.background") == 0) {
+// if (strcasecmp(key, "enable.heartbeat.background") == 0) {
// if (strcasecmp(value, "true") == 0) {
// conf->hbBgEnable = true;
// return TMQ_CONF_OK;
@@ -365,10 +366,10 @@ tmq_conf_res_t tmq_conf_set(tmq_conf_t* conf, const char* key, const char* value
// conf->hbBgEnable = false;
// return TMQ_CONF_OK;
// } else {
- tscError("the default value of enable.heartbeat.background is true, can not be seted");
- return TMQ_CONF_INVALID;
+// tscError("the default value of enable.heartbeat.background is true, can not be seted");
+// return TMQ_CONF_INVALID;
// }
- }
+// }
if (strcasecmp(key, "td.connect.ip") == 0) {
conf->ip = taosStrdup(value);
@@ -422,30 +423,30 @@ char** tmq_list_to_c_array(const tmq_list_t* list) {
return container->pData;
}
-static SMqClientVg* foundClientVg(SArray* pTopicList, const char* pName, int32_t vgId, int32_t* index,
- int32_t* numOfVgroups) {
- int32_t numOfTopics = taosArrayGetSize(pTopicList);
- *index = -1;
- *numOfVgroups = 0;
-
- for (int32_t i = 0; i < numOfTopics; ++i) {
- SMqClientTopic* pTopic = taosArrayGet(pTopicList, i);
- if (strcmp(pTopic->topicName, pName) != 0) {
- continue;
- }
-
- *numOfVgroups = taosArrayGetSize(pTopic->vgs);
- for (int32_t j = 0; j < (*numOfVgroups); ++j) {
- SMqClientVg* pClientVg = taosArrayGet(pTopic->vgs, j);
- if (pClientVg->vgId == vgId) {
- *index = j;
- return pClientVg;
- }
- }
- }
-
- return NULL;
-}
+//static SMqClientVg* foundClientVg(SArray* pTopicList, const char* pName, int32_t vgId, int32_t* index,
+// int32_t* numOfVgroups) {
+// int32_t numOfTopics = taosArrayGetSize(pTopicList);
+// *index = -1;
+// *numOfVgroups = 0;
+//
+// for (int32_t i = 0; i < numOfTopics; ++i) {
+// SMqClientTopic* pTopic = taosArrayGet(pTopicList, i);
+// if (strcmp(pTopic->topicName, pName) != 0) {
+// continue;
+// }
+//
+// *numOfVgroups = taosArrayGetSize(pTopic->vgs);
+// for (int32_t j = 0; j < (*numOfVgroups); ++j) {
+// SMqClientVg* pClientVg = taosArrayGet(pTopic->vgs, j);
+// if (pClientVg->vgId == vgId) {
+// *index = j;
+// return pClientVg;
+// }
+// }
+// }
+//
+// return NULL;
+//}
// Two problems do not need to be addressed here
// 1. update to of epset. the response of poll request will automatically handle this problem
@@ -567,12 +568,12 @@ static int32_t doSendCommitMsg(tmq_t* tmq, SMqClientVg* pVg, const char* pTopicN
atomic_add_fetch_32(&pParamSet->totalRspNum, 1);
SEp* pEp = GET_ACTIVE_EP(&pVg->epSet);
- char offsetBuf[80] = {0};
+ char offsetBuf[TSDB_OFFSET_LEN] = {0};
tFormatOffset(offsetBuf, tListLen(offsetBuf), &pOffset->offset.val);
- char commitBuf[80] = {0};
+ char commitBuf[TSDB_OFFSET_LEN] = {0};
tFormatOffset(commitBuf, tListLen(commitBuf), &pVg->offsetInfo.committedOffset);
- tscDebug("consumer:0x%" PRIx64 " topic:%s on vgId:%d send offset:%s prev:%s, ep:%s:%d, ordinal:%d/%d, req:0x%" PRIx64,
+ tscInfo("consumer:0x%" PRIx64 " topic:%s on vgId:%d send offset:%s prev:%s, ep:%s:%d, ordinal:%d/%d, req:0x%" PRIx64,
tmq->consumerId, pOffset->offset.subKey, pVg->vgId, offsetBuf, commitBuf, pEp->fqdn, pEp->port, index + 1,
totalVgroups, pMsgSendInfo->requestId);
@@ -796,6 +797,27 @@ void tmqSendHbReq(void* param, void* tmrId) {
SMqHbReq req = {0};
req.consumerId = tmq->consumerId;
req.epoch = tmq->epoch;
+ if(tmq->needReportOffsetRows){
+ req.topics = taosArrayInit(taosArrayGetSize(tmq->clientTopics), sizeof(TopicOffsetRows));
+ for(int i = 0; i < taosArrayGetSize(tmq->clientTopics); i++){
+ SMqClientTopic* pTopic = taosArrayGet(tmq->clientTopics, i);
+ int32_t numOfVgroups = taosArrayGetSize(pTopic->vgs);
+ TopicOffsetRows* data = taosArrayReserve(req.topics, 1);
+ strcpy(data->topicName, pTopic->topicName);
+ data->offsetRows = taosArrayInit(numOfVgroups, sizeof(OffsetRows));
+ for(int j = 0; j < numOfVgroups; j++){
+ SMqClientVg* pVg = taosArrayGet(pTopic->vgs, j);
+ OffsetRows* offRows = taosArrayReserve(data->offsetRows, 1);
+ offRows->vgId = pVg->vgId;
+ offRows->rows = pVg->numOfRows;
+ offRows->offset = pVg->offsetInfo.committedOffset;
+ char buf[TSDB_OFFSET_LEN] = {0};
+ tFormatOffset(buf, TSDB_OFFSET_LEN, &offRows->offset);
+ tscInfo("report offset: vgId:%d, offset:%s, rows:%"PRId64, offRows->vgId, buf, offRows->rows);
+ }
+ }
+ tmq->needReportOffsetRows = false;
+ }
int32_t tlen = tSerializeSMqHbReq(NULL, 0, &req);
if (tlen < 0) {
@@ -835,13 +857,14 @@ void tmqSendHbReq(void* param, void* tmrId) {
asyncSendMsgToServer(tmq->pTscObj->pAppInfo->pTransporter, &epSet, &transporterId, sendInfo);
OVER:
+ tDeatroySMqHbReq(&req);
taosTmrReset(tmqSendHbReq, 1000, param, tmqMgmt.timer, &tmq->hbLiveTimer);
taosReleaseRef(tmqMgmt.rsetId, refId);
}
static void defaultCommitCbFn(tmq_t* pTmq, int32_t code, void* param) {
if (code != 0) {
- tscDebug("consumer:0x%" PRIx64 ", failed to commit offset, code:%s", pTmq->consumerId, tstrerror(code));
+ tscError("consumer:0x%" PRIx64 ", failed to commit offset, code:%s", pTmq->consumerId, tstrerror(code));
}
}
@@ -969,6 +992,14 @@ int32_t tmq_subscription(tmq_t* tmq, tmq_list_t** topics) {
}
int32_t tmq_unsubscribe(tmq_t* tmq) {
+ if (tmq->autoCommit) {
+ int32_t rsp = tmq_commit_sync(tmq, NULL);
+ if (rsp != 0) {
+ return rsp;
+ }
+ }
+ taosSsleep(2); // sleep 2s for hb to send offset and rows to server
+
int32_t rsp;
int32_t retryCnt = 0;
tmq_list_t* lst = tmq_list_new();
@@ -1063,6 +1094,7 @@ tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) {
pTmq->status = TMQ_CONSUMER_STATUS__INIT;
pTmq->pollCnt = 0;
pTmq->epoch = 0;
+ pTmq->needReportOffsetRows = true;
// set conf
strcpy(pTmq->clientId, conf->clientId);
@@ -1107,7 +1139,7 @@ tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) {
pTmq->hbLiveTimer = taosTmrStart(tmqSendHbReq, 1000, pRefId, tmqMgmt.timer);
}
- char buf[80] = {0};
+ char buf[TSDB_OFFSET_LEN] = {0};
STqOffsetVal offset = {.type = pTmq->resetOffsetCfg};
tFormatOffset(buf, tListLen(buf), &offset);
tscInfo("consumer:0x%" PRIx64 " is setup, refId:%" PRId64
@@ -1123,7 +1155,7 @@ _failed:
}
int32_t tmq_subscribe(tmq_t* tmq, const tmq_list_t* topic_list) {
- const int32_t MAX_RETRY_COUNT = 120 * 60; // let's wait for 2 mins at most
+ const int32_t MAX_RETRY_COUNT = 120 * 2; // let's wait for 2 mins at most
const SArray* container = &topic_list->container;
int32_t sz = taosArrayGetSize(container);
void* buf = NULL;
@@ -1131,7 +1163,7 @@ int32_t tmq_subscribe(tmq_t* tmq, const tmq_list_t* topic_list) {
SCMSubscribeReq req = {0};
int32_t code = 0;
- tscDebug("consumer:0x%" PRIx64 " cgroup:%s, subscribe %d topics", tmq->consumerId, tmq->groupId, sz);
+ tscInfo("consumer:0x%" PRIx64 " cgroup:%s, subscribe %d topics", tmq->consumerId, tmq->groupId, sz);
req.consumerId = tmq->consumerId;
tstrncpy(req.clientId, tmq->clientId, 256);
@@ -1143,6 +1175,11 @@ int32_t tmq_subscribe(tmq_t* tmq, const tmq_list_t* topic_list) {
goto FAIL;
}
+ req.withTbName = tmq->withTbName;
+ req.autoCommit = tmq->autoCommit;
+ req.autoCommitInterval = tmq->autoCommitInterval;
+ req.resetOffsetCfg = tmq->resetOffsetCfg;
+
for (int32_t i = 0; i < sz; i++) {
char* topic = taosArrayGetP(container, i);
@@ -1154,7 +1191,7 @@ int32_t tmq_subscribe(tmq_t* tmq, const tmq_list_t* topic_list) {
}
tNameExtractFullName(&name, topicFName);
- tscDebug("consumer:0x%" PRIx64 " subscribe topic:%s", tmq->consumerId, topicFName);
+ tscInfo("consumer:0x%" PRIx64 " subscribe topic:%s", tmq->consumerId, topicFName);
taosArrayPush(req.topicNames, &topicFName);
}
@@ -1215,7 +1252,7 @@ int32_t tmq_subscribe(tmq_t* tmq, const tmq_list_t* topic_list) {
goto FAIL;
}
- tscDebug("consumer:0x%" PRIx64 ", mnd not ready for subscribe, retry:%d in 500ms", tmq->consumerId, retryCnt);
+ tscInfo("consumer:0x%" PRIx64 ", mnd not ready for subscribe, retry:%d in 500ms", tmq->consumerId, retryCnt);
taosMsleep(500);
}
@@ -1375,8 +1412,8 @@ int32_t tmqPollCb(void* param, SDataBuf* pMsg, int32_t code) {
tDecoderClear(&decoder);
memcpy(&pRspWrapper->dataRsp, pMsg->pData, sizeof(SMqRspHead));
- char buf[80];
- tFormatOffset(buf, 80, &pRspWrapper->dataRsp.rspOffset);
+ char buf[TSDB_OFFSET_LEN];
+ tFormatOffset(buf, TSDB_OFFSET_LEN, &pRspWrapper->dataRsp.rspOffset);
tscDebug("consumer:0x%" PRIx64 " recv poll rsp, vgId:%d, req ver:%" PRId64 ", rsp:%s type %d, reqId:0x%" PRIx64,
tmq->consumerId, vgId, pRspWrapper->dataRsp.reqOffset.version, buf, rspType, requestId);
} else if (rspType == TMQ_MSG_TYPE__POLL_META_RSP) {
@@ -1442,7 +1479,7 @@ static void initClientTopicFromRsp(SMqClientTopic* pTopic, SMqSubTopicEp* pTopic
tstrncpy(pTopic->topicName, pTopicEp->topic, TSDB_TOPIC_FNAME_LEN);
tstrncpy(pTopic->db, pTopicEp->db, TSDB_DB_FNAME_LEN);
- tscDebug("consumer:0x%" PRIx64 ", update topic:%s, new numOfVgs:%d", tmq->consumerId, pTopic->topicName, vgNumGet);
+ tscInfo("consumer:0x%" PRIx64 ", update topic:%s, new numOfVgs:%d", tmq->consumerId, pTopic->topicName, vgNumGet);
pTopic->vgs = taosArrayInit(vgNumGet, sizeof(SMqClientVg));
for (int32_t j = 0; j < vgNumGet; j++) {
@@ -1495,7 +1532,7 @@ static bool doUpdateLocalEp(tmq_t* tmq, int32_t epoch, const SMqAskEpRsp* pRsp)
int32_t topicNumGet = taosArrayGetSize(pRsp->topics);
char vgKey[TSDB_TOPIC_FNAME_LEN + 22];
- tscDebug("consumer:0x%" PRIx64 " update ep epoch from %d to epoch %d, incoming topics:%d, existed topics:%d",
+ tscInfo("consumer:0x%" PRIx64 " update ep epoch from %d to epoch %d, incoming topics:%d, existed topics:%d",
tmq->consumerId, tmq->epoch, epoch, topicNumGet, topicNumCur);
if (epoch <= tmq->epoch) {
return false;
@@ -1518,14 +1555,14 @@ static bool doUpdateLocalEp(tmq_t* tmq, int32_t epoch, const SMqAskEpRsp* pRsp)
SMqClientTopic* pTopicCur = taosArrayGet(tmq->clientTopics, i);
if (pTopicCur->vgs) {
int32_t vgNumCur = taosArrayGetSize(pTopicCur->vgs);
- tscDebug("consumer:0x%" PRIx64 ", current vg num: %d", tmq->consumerId, vgNumCur);
+ tscInfo("consumer:0x%" PRIx64 ", current vg num: %d", tmq->consumerId, vgNumCur);
for (int32_t j = 0; j < vgNumCur; j++) {
SMqClientVg* pVgCur = taosArrayGet(pTopicCur->vgs, j);
makeTopicVgroupKey(vgKey, pTopicCur->topicName, pVgCur->vgId);
- char buf[80];
- tFormatOffset(buf, 80, &pVgCur->offsetInfo.currentOffset);
- tscDebug("consumer:0x%" PRIx64 ", epoch:%d vgId:%d vgKey:%s, offset:%s", tmq->consumerId, epoch, pVgCur->vgId,
+ char buf[TSDB_OFFSET_LEN];
+ tFormatOffset(buf, TSDB_OFFSET_LEN, &pVgCur->offsetInfo.currentOffset);
+ tscInfo("consumer:0x%" PRIx64 ", epoch:%d vgId:%d vgKey:%s, offset:%s", tmq->consumerId, epoch, pVgCur->vgId,
vgKey, buf);
SVgroupSaveInfo info = {.offset = pVgCur->offsetInfo.currentOffset, .numOfRows = pVgCur->numOfRows};
@@ -1555,7 +1592,7 @@ static bool doUpdateLocalEp(tmq_t* tmq, int32_t epoch, const SMqAskEpRsp* pRsp)
atomic_store_8(&tmq->status, flag);
atomic_store_32(&tmq->epoch, epoch);
- tscDebug("consumer:0x%" PRIx64 " update topic info completed", tmq->consumerId);
+ tscInfo("consumer:0x%" PRIx64 " update topic info completed", tmq->consumerId);
return set;
}
@@ -1591,7 +1628,7 @@ int32_t askEpCallbackFn(void* param, SDataBuf* pMsg, int32_t code) {
SMqRspHead* head = pMsg->pData;
int32_t epoch = atomic_load_32(&tmq->epoch);
if (head->epoch <= epoch) {
- tscDebug("consumer:0x%" PRIx64 ", recv ep, msg epoch %d, current epoch %d, no need to update local ep",
+ tscInfo("consumer:0x%" PRIx64 ", recv ep, msg epoch %d, current epoch %d, no need to update local ep",
tmq->consumerId, head->epoch, epoch);
if (tmq->status == TMQ_CONSUMER_STATUS__RECOVER) {
@@ -1603,7 +1640,7 @@ int32_t askEpCallbackFn(void* param, SDataBuf* pMsg, int32_t code) {
}
} else {
- tscDebug("consumer:0x%" PRIx64 ", recv ep, msg epoch %d, current epoch %d, update local ep", tmq->consumerId,
+ tscInfo("consumer:0x%" PRIx64 ", recv ep, msg epoch %d, current epoch %d, update local ep", tmq->consumerId,
head->epoch, epoch);
}
@@ -1673,7 +1710,7 @@ SMqRspObj* tmqBuildRspFromWrapper(SMqPollRspWrapper* pWrapper, SMqClientVg* pVg,
return pRspObj;
}
-SMqTaosxRspObj* tmqBuildTaosxRspFromWrapper(SMqPollRspWrapper* pWrapper) {
+SMqTaosxRspObj* tmqBuildTaosxRspFromWrapper(SMqPollRspWrapper* pWrapper, SMqClientVg* pVg, int64_t* numOfRows) {
SMqTaosxRspObj* pRspObj = taosMemoryCalloc(1, sizeof(SMqTaosxRspObj));
pRspObj->resType = RES_TYPE__TMQ_METADATA;
tstrncpy(pRspObj->topic, pWrapper->topicHandle->topicName, TSDB_TOPIC_FNAME_LEN);
@@ -1688,6 +1725,13 @@ SMqTaosxRspObj* tmqBuildTaosxRspFromWrapper(SMqPollRspWrapper* pWrapper) {
setResSchemaInfo(&pRspObj->resInfo, pWrapper->topicHandle->schema.pSchema, pWrapper->topicHandle->schema.nCols);
}
+ // extract the rows in this data packet
+ for (int32_t i = 0; i < pRspObj->rsp.blockNum; ++i) {
+ SRetrieveTableRsp* pRetrieve = (SRetrieveTableRsp*)taosArrayGetP(pRspObj->rsp.blockData, i);
+ int64_t rows = htobe64(pRetrieve->numOfRows);
+ pVg->numOfRows += rows;
+ (*numOfRows) += rows;
+ }
return pRspObj;
}
@@ -1745,7 +1789,7 @@ static int32_t doTmqPollImpl(tmq_t* pTmq, SMqClientTopic* pTopic, SMqClientVg* p
sendInfo->msgType = TDMT_VND_TMQ_CONSUME;
int64_t transporterId = 0;
- char offsetFormatBuf[80];
+ char offsetFormatBuf[TSDB_OFFSET_LEN];
tFormatOffset(offsetFormatBuf, tListLen(offsetFormatBuf), &pVg->offsetInfo.currentOffset);
tscDebug("consumer:0x%" PRIx64 " send poll to %s vgId:%d, epoch %d, req:%s, reqId:0x%" PRIx64, pTmq->consumerId,
@@ -1882,8 +1926,8 @@ static void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) {
pVg->offsetInfo.walVerEnd = pDataRsp->head.walever;
pVg->receivedInfoFromVnode = true;
- char buf[80];
- tFormatOffset(buf, 80, &pDataRsp->rspOffset);
+ char buf[TSDB_OFFSET_LEN];
+ tFormatOffset(buf, TSDB_OFFSET_LEN, &pDataRsp->rspOffset);
if (pDataRsp->blockNum == 0) {
tscDebug("consumer:0x%" PRIx64 " empty block received, vgId:%d, offset:%s, vg total:%" PRId64
" total:%" PRId64 " reqId:0x%" PRIx64,
@@ -1985,13 +2029,13 @@ static void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) {
if (pollRspWrapper->taosxRsp.createTableNum == 0) {
pRsp = tmqBuildRspFromWrapper(pollRspWrapper, pVg, &numOfRows);
} else {
- pRsp = tmqBuildTaosxRspFromWrapper(pollRspWrapper);
+ pRsp = tmqBuildTaosxRspFromWrapper(pollRspWrapper, pVg, &numOfRows);
}
tmq->totalRows += numOfRows;
- char buf[80];
- tFormatOffset(buf, 80, &pVg->offsetInfo.currentOffset);
+ char buf[TSDB_OFFSET_LEN];
+ tFormatOffset(buf, TSDB_OFFSET_LEN, &pVg->offsetInfo.currentOffset);
tscDebug("consumer:0x%" PRIx64 " process taosx poll rsp, vgId:%d, offset:%s, blocks:%d, rows:%" PRId64
", vg total:%" PRId64 " total:%" PRId64 " reqId:0x%" PRIx64,
tmq->consumerId, pVg->vgId, buf, pollRspWrapper->dataRsp.blockNum, numOfRows, pVg->numOfRows,
@@ -2024,12 +2068,12 @@ TAOS_RES* tmq_consumer_poll(tmq_t* tmq, int64_t timeout) {
void* rspObj;
int64_t startTime = taosGetTimestampMs();
- tscDebug("consumer:0x%" PRIx64 " start to poll at %" PRId64 ", timeout:%" PRId64, tmq->consumerId, startTime,
+ tscInfo("consumer:0x%" PRIx64 " start to poll at %" PRId64 ", timeout:%" PRId64, tmq->consumerId, startTime,
timeout);
// in no topic status, delayed task also need to be processed
if (atomic_load_8(&tmq->status) == TMQ_CONSUMER_STATUS__INIT) {
- tscDebug("consumer:0x%" PRIx64 " poll return since consumer is init", tmq->consumerId);
+ tscInfo("consumer:0x%" PRIx64 " poll return since consumer is init", tmq->consumerId);
taosMsleep(500); // sleep for a while
return NULL;
}
@@ -2041,7 +2085,7 @@ TAOS_RES* tmq_consumer_poll(tmq_t* tmq, int64_t timeout) {
return NULL;
}
- tscDebug("consumer:0x%" PRIx64 " not ready, retry:%d/40 in 500ms", tmq->consumerId, retryCnt);
+ tscInfo("consumer:0x%" PRIx64 " not ready, retry:%d/40 in 500ms", tmq->consumerId, retryCnt);
taosMsleep(500);
}
}
@@ -2050,7 +2094,7 @@ TAOS_RES* tmq_consumer_poll(tmq_t* tmq, int64_t timeout) {
tmqHandleAllDelayedTask(tmq);
if (tmqPollImpl(tmq, timeout) < 0) {
- tscDebug("consumer:0x%" PRIx64 " return due to poll error", tmq->consumerId);
+ tscError("consumer:0x%" PRIx64 " return due to poll error", tmq->consumerId);
}
rspObj = tmqHandleAllRsp(tmq, timeout, false);
@@ -2058,7 +2102,7 @@ TAOS_RES* tmq_consumer_poll(tmq_t* tmq, int64_t timeout) {
tscDebug("consumer:0x%" PRIx64 " return rsp %p", tmq->consumerId, rspObj);
return (TAOS_RES*)rspObj;
} else if (terrno == TSDB_CODE_TQ_NO_COMMITTED_OFFSET) {
- tscDebug("consumer:0x%" PRIx64 " return null since no committed offset", tmq->consumerId);
+ tscInfo("consumer:0x%" PRIx64 " return null since no committed offset", tmq->consumerId);
return NULL;
}
@@ -2066,7 +2110,7 @@ TAOS_RES* tmq_consumer_poll(tmq_t* tmq, int64_t timeout) {
int64_t currentTime = taosGetTimestampMs();
int64_t elapsedTime = currentTime - startTime;
if (elapsedTime > timeout) {
- tscDebug("consumer:0x%" PRIx64 " (epoch %d) timeout, no rsp, start time %" PRId64 ", current time %" PRId64,
+ tscInfo("consumer:0x%" PRIx64 " (epoch %d) timeout, no rsp, start time %" PRId64 ", current time %" PRId64,
tmq->consumerId, tmq->epoch, startTime, currentTime);
return NULL;
}
@@ -2099,7 +2143,7 @@ static void displayConsumeStatistics(const tmq_t* pTmq) {
}
int32_t tmq_consumer_close(tmq_t* tmq) {
- tscDebug("consumer:0x%" PRIx64 " start to close consumer, status:%d", tmq->consumerId, tmq->status);
+ tscInfo("consumer:0x%" PRIx64 " start to close consumer, status:%d", tmq->consumerId, tmq->status);
displayConsumeStatistics(tmq);
if (tmq->status == TMQ_CONSUMER_STATUS__READY) {
@@ -2110,6 +2154,7 @@ int32_t tmq_consumer_close(tmq_t* tmq) {
return rsp;
}
}
+ taosSsleep(2); // sleep 2s for hb to send offset and rows to server
int32_t retryCnt = 0;
tmq_list_t* lst = tmq_list_new();
@@ -2125,7 +2170,7 @@ int32_t tmq_consumer_close(tmq_t* tmq) {
tmq_list_destroy(lst);
} else {
- tscWarn("consumer:0x%" PRIx64 " not in ready state, close it directly", tmq->consumerId);
+ tscInfo("consumer:0x%" PRIx64 " not in ready state, close it directly", tmq->consumerId);
}
taosRemoveRef(tmqMgmt.rsetId, tmq->refId);
@@ -2388,7 +2433,7 @@ void asyncAskEp(tmq_t* pTmq, __tmq_askep_fn_t askEpFn, void* param) {
sendInfo->msgType = TDMT_MND_TMQ_ASK_EP;
SEpSet epSet = getEpSet_s(&pTmq->pTscObj->pAppInfo->mgmtEp);
- tscDebug("consumer:0x%" PRIx64 " ask ep from mnode, reqId:0x%" PRIx64, pTmq->consumerId, sendInfo->requestId);
+ tscInfo("consumer:0x%" PRIx64 " ask ep from mnode, reqId:0x%" PRIx64, pTmq->consumerId, sendInfo->requestId);
int64_t transporterId = 0;
asyncSendMsgToServer(pTmq->pTscObj->pAppInfo->pTransporter, &epSet, &transporterId, sendInfo);
@@ -2411,6 +2456,7 @@ int32_t tmqCommitDone(SMqCommitCbParamSet* pParamSet) {
// if no more waiting rsp
pParamSet->callbackFn(tmq, pParamSet->code, pParamSet->userParam);
taosMemoryFree(pParamSet);
+ tmq->needReportOffsetRows = true;
taosReleaseRef(tmqMgmt.rsetId, refId);
return 0;
@@ -2608,10 +2654,10 @@ int32_t tmq_get_topic_assignment(tmq_t* tmq, const char* pTopicName, tmq_topic_a
sendInfo->msgType = TDMT_VND_TMQ_VG_WALINFO;
int64_t transporterId = 0;
- char offsetFormatBuf[80];
+ char offsetFormatBuf[TSDB_OFFSET_LEN];
tFormatOffset(offsetFormatBuf, tListLen(offsetFormatBuf), &pClientVg->offsetInfo.currentOffset);
- tscDebug("consumer:0x%" PRIx64 " %s retrieve wal info vgId:%d, epoch %d, req:%s, reqId:0x%" PRIx64,
+ tscInfo("consumer:0x%" PRIx64 " %s retrieve wal info vgId:%d, epoch %d, req:%s, reqId:0x%" PRIx64,
tmq->consumerId, pTopic->topicName, pClientVg->vgId, tmq->epoch, offsetFormatBuf, req.reqId);
asyncSendMsgToServer(tmq->pTscObj->pAppInfo->pTransporter, &pClientVg->epSet, &transporterId, sendInfo);
}
@@ -2645,10 +2691,10 @@ int32_t tmq_get_topic_assignment(tmq_t* tmq, const char* pTopicName, tmq_topic_a
pOffsetInfo->currentOffset.type = TMQ_OFFSET__LOG;
- char offsetBuf[80] = {0};
+ char offsetBuf[TSDB_OFFSET_LEN] = {0};
tFormatOffset(offsetBuf, tListLen(offsetBuf), &pOffsetInfo->currentOffset);
- tscDebug("vgId:%d offset is update to:%s", p->vgId, offsetBuf);
+ tscInfo("vgId:%d offset is update to:%s", p->vgId, offsetBuf);
pOffsetInfo->walVerBegin = p->begin;
pOffsetInfo->walVerEnd = p->end;
@@ -2727,7 +2773,7 @@ int32_t tmq_offset_seek(tmq_t* tmq, const char* pTopicName, int32_t vgId, int64_
SMqRspObj rspObj = {.resType = RES_TYPE__TMQ, .vgId = pVg->vgId};
tstrncpy(rspObj.topic, tname, tListLen(rspObj.topic));
- tscDebug("consumer:0x%" PRIx64 " seek to %" PRId64 " on vgId:%d", tmq->consumerId, offset, pVg->vgId);
+ tscInfo("consumer:0x%" PRIx64 " seek to %" PRId64 " on vgId:%d", tmq->consumerId, offset, pVg->vgId);
SSyncCommitInfo* pInfo = taosMemoryMalloc(sizeof(SSyncCommitInfo));
if (pInfo == NULL) {
diff --git a/source/common/src/systable.c b/source/common/src/systable.c
index db0cc78de6..7cd33955c1 100644
--- a/source/common/src/systable.c
+++ b/source/common/src/systable.c
@@ -280,7 +280,7 @@ static const SSysDbTableSchema topicSchema[] = {
{.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
{.name = "sql", .bytes = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
- {.name = "schema", .bytes = TSDB_SHOW_SCHEMA_JSON_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
+ {.name = "schema", .bytes = TSDB_MAX_BINARY_LEN, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
{.name = "meta", .bytes = 4 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
{.name = "type", .bytes = 8 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
};
@@ -291,6 +291,8 @@ static const SSysDbTableSchema subscriptionSchema[] = {
{.name = "consumer_group", .bytes = TSDB_CGROUP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
{.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false},
{.name = "consumer_id", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false},
+ {.name = "offset", .bytes = TSDB_OFFSET_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
+ {.name = "rows", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false},
};
static const SSysDbTableSchema vnodesSchema[] = {
@@ -359,6 +361,7 @@ static const SSysDbTableSchema consumerSchema[] = {
{.name = "up_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
{.name = "subscribe_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
{.name = "rebalance_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
+ {.name = "parameters", .bytes = 64 + TSDB_OFFSET_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
};
static const SSysDbTableSchema offsetSchema[] = {
@@ -381,6 +384,7 @@ static const SSysDbTableSchema querySchema[] = {
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
{.name = "exec_usec", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false},
{.name = "stable_query", .bytes = 1, .type = TSDB_DATA_TYPE_BOOL, .sysInfo = false},
+ {.name = "sub_query", .bytes = 1, .type = TSDB_DATA_TYPE_BOOL, .sysInfo = false},
{.name = "sub_num", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false},
{.name = "sub_status", .bytes = TSDB_SHOW_SUBQUERY_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "sql", .bytes = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c
index 24e978b0ea..79b730721e 100644
--- a/source/common/src/tdatablock.c
+++ b/source/common/src/tdatablock.c
@@ -1590,18 +1590,35 @@ size_t blockDataGetCapacityInRow(const SSDataBlock* pBlock, size_t pageSize, int
int32_t nRows = payloadSize / rowSize;
ASSERT(nRows >= 1);
- // the true value must be less than the value of nRows
- int32_t additional = 0;
+ int32_t numVarCols = 0;
+ int32_t numFixCols = 0;
for (int32_t i = 0; i < numOfCols; ++i) {
SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, i);
if (IS_VAR_DATA_TYPE(pCol->info.type)) {
- additional += nRows * sizeof(int32_t);
+ ++numVarCols;
} else {
- additional += BitmapLen(nRows);
+ ++numFixCols;
}
}
- int32_t newRows = (payloadSize - additional) / rowSize;
+ // find the data payload whose size is greater than payloadSize
+ int result = -1;
+ int start = 1;
+ int end = nRows;
+ while (start <= end) {
+ int mid = start + (end - start) / 2;
+ //data size + var data type columns offset + fixed data type columns bitmap len
+ int midSize = rowSize * mid + numVarCols * sizeof(int32_t) * mid + numFixCols * BitmapLen(mid);
+ if (midSize > payloadSize) {
+ result = mid;
+ end = mid - 1;
+ } else {
+ start = mid + 1;
+ }
+ }
+
+ int32_t newRows = (result != -1) ? result - 1 : nRows;
+ // the true value must be less than the value of nRows
ASSERT(newRows <= nRows && newRows >= 1);
return newRows;
@@ -2465,19 +2482,31 @@ _end:
}
char* buildCtbNameByGroupId(const char* stbFullName, uint64_t groupId) {
- if (stbFullName[0] == 0) {
+ char* pBuf = taosMemoryCalloc(1, TSDB_TABLE_NAME_LEN + 1);
+ if (!pBuf) {
return NULL;
}
+ int32_t code = buildCtbNameByGroupIdImpl(stbFullName, groupId, pBuf);
+ if (code != TSDB_CODE_SUCCESS) {
+ taosMemoryFree(pBuf);
+ return NULL;
+ }
+ return pBuf;
+}
+
+int32_t buildCtbNameByGroupIdImpl(const char* stbFullName, uint64_t groupId, char* cname) {
+ if (stbFullName[0] == 0) {
+ return TSDB_CODE_FAILED;
+ }
SArray* tags = taosArrayInit(0, sizeof(SSmlKv));
if (tags == NULL) {
- return NULL;
+ return TSDB_CODE_FAILED;
}
- void* cname = taosMemoryCalloc(1, TSDB_TABLE_NAME_LEN + 1);
if (cname == NULL) {
taosArrayDestroy(tags);
- return NULL;
+ return TSDB_CODE_FAILED;
}
SSmlKv pTag = {.key = "group_id",
@@ -2499,9 +2528,9 @@ char* buildCtbNameByGroupId(const char* stbFullName, uint64_t groupId) {
taosArrayDestroy(tags);
if ((rname.ctbShortName && rname.ctbShortName[0]) == 0) {
- return NULL;
+ return TSDB_CODE_FAILED;
}
- return rname.ctbShortName;
+ return TSDB_CODE_SUCCESS;
}
int32_t blockEncode(const SSDataBlock* pBlock, char* data, int32_t numOfCols) {
diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c
index a79351d5cc..22a0a77d6a 100644
--- a/source/common/src/tglobal.c
+++ b/source/common/src/tglobal.c
@@ -14,6 +14,7 @@
*/
#define _DEFAULT_SOURCE
+#include "os.h"
#include "tglobal.h"
#include "tconfig.h"
#include "tgrant.h"
@@ -73,6 +74,7 @@ int64_t tsVndCommitMaxIntervalMs = 600 * 1000;
// mnode
int64_t tsMndSdbWriteDelta = 200;
int64_t tsMndLogRetention = 2000;
+int8_t tsGrant = 1;
bool tsMndSkipGrant = false;
// monitor
@@ -1525,3 +1527,5 @@ void taosSetAllDebugFlag(int32_t flag, bool rewrite) {
taosSetDebugFlag(&metaDebugFlag, "metaDebugFlag", flag, rewrite);
uInfo("all debug flag are set to %d", flag);
}
+
+int8_t taosGranted() { return atomic_load_8(&tsGrant); }
diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c
index ac035e0a2b..4cc6b34ca2 100644
--- a/source/common/src/tmsg.c
+++ b/source/common/src/tmsg.c
@@ -224,6 +224,7 @@ static int32_t tSerializeSClientHbReq(SEncoder *pEncoder, const SClientHbReq *pR
if (tEncodeI64(pEncoder, desc->stime) < 0) return -1;
if (tEncodeI64(pEncoder, desc->reqRid) < 0) return -1;
if (tEncodeI8(pEncoder, desc->stableQuery) < 0) return -1;
+ if (tEncodeI8(pEncoder, desc->isSubQuery) < 0) return -1;
if (tEncodeCStr(pEncoder, desc->fqdn) < 0) return -1;
if (tEncodeI32(pEncoder, desc->subPlanNum) < 0) return -1;
@@ -291,6 +292,7 @@ static int32_t tDeserializeSClientHbReq(SDecoder *pDecoder, SClientHbReq *pReq)
if (tDecodeI64(pDecoder, &desc.stime) < 0) return -1;
if (tDecodeI64(pDecoder, &desc.reqRid) < 0) return -1;
if (tDecodeI8(pDecoder, (int8_t *)&desc.stableQuery) < 0) return -1;
+ if (tDecodeI8(pDecoder, (int8_t *)&desc.isSubQuery) < 0) return -1;
if (tDecodeCStrTo(pDecoder, desc.fqdn) < 0) return -1;
if (tDecodeI32(pDecoder, &desc.subPlanNum) < 0) return -1;
@@ -5338,6 +5340,15 @@ int32_t tDeserializeSMqAskEpReq(void *buf, int32_t bufLen, SMqAskEpReq *pReq) {
return 0;
}
+int32_t tDeatroySMqHbReq(SMqHbReq* pReq){
+ for(int i = 0; i < taosArrayGetSize(pReq->topics); i++){
+ TopicOffsetRows* vgs = taosArrayGet(pReq->topics, i);
+ if(vgs) taosArrayDestroy(vgs->offsetRows);
+ }
+ taosArrayDestroy(pReq->topics);
+ return 0;
+}
+
int32_t tSerializeSMqHbReq(void *buf, int32_t bufLen, SMqHbReq *pReq) {
SEncoder encoder = {0};
tEncoderInit(&encoder, buf, bufLen);
@@ -5346,6 +5357,21 @@ int32_t tSerializeSMqHbReq(void *buf, int32_t bufLen, SMqHbReq *pReq) {
if (tEncodeI64(&encoder, pReq->consumerId) < 0) return -1;
if (tEncodeI32(&encoder, pReq->epoch) < 0) return -1;
+ int32_t sz = taosArrayGetSize(pReq->topics);
+ if (tEncodeI32(&encoder, sz) < 0) return -1;
+ for (int32_t i = 0; i < sz; ++i) {
+ TopicOffsetRows* vgs = (TopicOffsetRows*)taosArrayGet(pReq->topics, i);
+ if (tEncodeCStr(&encoder, vgs->topicName) < 0) return -1;
+ int32_t szVgs = taosArrayGetSize(vgs->offsetRows);
+ if (tEncodeI32(&encoder, szVgs) < 0) return -1;
+ for (int32_t j = 0; j < szVgs; ++j) {
+ OffsetRows *offRows = taosArrayGet(vgs->offsetRows, j);
+ if (tEncodeI32(&encoder, offRows->vgId) < 0) return -1;
+ if (tEncodeI64(&encoder, offRows->rows) < 0) return -1;
+ if (tEncodeSTqOffsetVal(&encoder, &offRows->offset) < 0) return -1;
+ }
+ }
+
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
@@ -5362,7 +5388,28 @@ int32_t tDeserializeSMqHbReq(void *buf, int32_t bufLen, SMqHbReq *pReq) {
if (tDecodeI64(&decoder, &pReq->consumerId) < 0) return -1;
if (tDecodeI32(&decoder, &pReq->epoch) < 0) return -1;
-
+ int32_t sz = 0;
+ if (tDecodeI32(&decoder, &sz) < 0) return -1;
+ if(sz > 0){
+ pReq->topics = taosArrayInit(sz, sizeof(TopicOffsetRows));
+ if (NULL == pReq->topics) return -1;
+ for (int32_t i = 0; i < sz; ++i) {
+ TopicOffsetRows* data = taosArrayReserve(pReq->topics, 1);
+ tDecodeCStrTo(&decoder, data->topicName);
+ int32_t szVgs = 0;
+ if (tDecodeI32(&decoder, &szVgs) < 0) return -1;
+ if(szVgs > 0){
+ data->offsetRows = taosArrayInit(szVgs, sizeof(OffsetRows));
+ if (NULL == data->offsetRows) return -1;
+ for (int32_t j= 0; j < szVgs; ++j) {
+ OffsetRows* offRows = taosArrayReserve(data->offsetRows, 1);
+ if (tDecodeI32(&decoder, &offRows->vgId) < 0) return -1;
+ if (tDecodeI64(&decoder, &offRows->rows) < 0) return -1;
+ if (tDecodeSTqOffsetVal(&decoder, &offRows->offset) < 0) return -1;
+ }
+ }
+ }
+ }
tEndDecode(&decoder);
tDecoderClear(&decoder);
@@ -6122,6 +6169,7 @@ int32_t tSerializeSCMCreateStreamReq(void *buf, int32_t bufLen, const SCMCreateS
}
if (tEncodeI64(&encoder, pReq->deleteMark) < 0) return -1;
if (tEncodeI8(&encoder, pReq->igUpdate) < 0) return -1;
+ if (tEncodeI64(&encoder, pReq->lastTs) < 0) return -1;
tEndEncode(&encoder);
@@ -6207,6 +6255,7 @@ int32_t tDeserializeSCMCreateStreamReq(void *buf, int32_t bufLen, SCMCreateStrea
if (tDecodeI64(&decoder, &pReq->deleteMark) < 0) return -1;
if (tDecodeI8(&decoder, &pReq->igUpdate) < 0) return -1;
+ if (tDecodeI64(&decoder, &pReq->lastTs) < 0) return -1;
tEndDecode(&decoder);
@@ -6273,6 +6322,9 @@ int32_t tDeserializeSMRecoverStreamReq(void *buf, int32_t bufLen, SMRecoverStrea
}
void tFreeSCMCreateStreamReq(SCMCreateStreamReq *pReq) {
+ if (NULL == pReq) {
+ return;
+ }
taosArrayDestroy(pReq->pTags);
taosMemoryFreeClear(pReq->sql);
taosMemoryFreeClear(pReq->ast);
@@ -7086,15 +7138,15 @@ int32_t tDecodeSTqOffsetVal(SDecoder *pDecoder, STqOffsetVal *pOffsetVal) {
int32_t tFormatOffset(char *buf, int32_t maxLen, const STqOffsetVal *pVal) {
if (pVal->type == TMQ_OFFSET__RESET_NONE) {
- snprintf(buf, maxLen, "offset(reset to none)");
- } else if (pVal->type == TMQ_OFFSET__RESET_EARLIEAST) {
- snprintf(buf, maxLen, "offset(reset to earlieast)");
+ snprintf(buf, maxLen, "none");
+ } else if (pVal->type == TMQ_OFFSET__RESET_EARLIEST) {
+ snprintf(buf, maxLen, "earliest");
} else if (pVal->type == TMQ_OFFSET__RESET_LATEST) {
- snprintf(buf, maxLen, "offset(reset to latest)");
+ snprintf(buf, maxLen, "latest");
} else if (pVal->type == TMQ_OFFSET__LOG) {
- snprintf(buf, maxLen, "offset(log) ver:%" PRId64, pVal->version);
+ snprintf(buf, maxLen, "log:%" PRId64, pVal->version);
} else if (pVal->type == TMQ_OFFSET__SNAPSHOT_DATA || pVal->type == TMQ_OFFSET__SNAPSHOT_META) {
- snprintf(buf, maxLen, "offset(snapshot) uid:%" PRId64 " ts:%" PRId64, pVal->uid, pVal->ts);
+ snprintf(buf, maxLen, "snapshot:%" PRId64 "|%" PRId64, pVal->uid, pVal->ts);
} else {
return TSDB_CODE_INVALID_PARA;
}
@@ -7112,7 +7164,7 @@ bool tOffsetEqual(const STqOffsetVal *pLeft, const STqOffsetVal *pRight) {
return pLeft->uid == pRight->uid;
} else {
ASSERT(0);
- /*ASSERT(pLeft->type == TMQ_OFFSET__RESET_NONE || pLeft->type == TMQ_OFFSET__RESET_EARLIEAST ||*/
+ /*ASSERT(pLeft->type == TMQ_OFFSET__RESET_NONE || pLeft->type == TMQ_OFFSET__RESET_EARLIEST ||*/
/*pLeft->type == TMQ_OFFSET__RESET_LATEST);*/
/*return true;*/
}
diff --git a/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c b/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c
index 7f8f6a48fa..d975eb1cd1 100644
--- a/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c
+++ b/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c
@@ -163,7 +163,7 @@ SArray *mmGetMsgHandles() {
if (dmSetMgmtHandle(pArray, TDMT_MND_TMQ_DROP_TOPIC, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_TMQ_SUBSCRIBE, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_TMQ_ASK_EP, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;
- if (dmSetMgmtHandle(pArray, TDMT_MND_TMQ_HB, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;
+ if (dmSetMgmtHandle(pArray, TDMT_MND_TMQ_HB, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_TMQ_DROP_CGROUP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_TMQ_DROP_CGROUP_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_KILL_TRANS, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c
index 814a155cfb..513cadc814 100644
--- a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c
+++ b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c
@@ -265,6 +265,7 @@ int32_t vmProcessCreateVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
snprintf(path, TSDB_FILENAME_LEN, "vnode%svnode%d", TD_DIRSEP, vnodeCfg.vgId);
+#if 0
if (pMgmt->pTfs) {
if (tfsDirExistAt(pMgmt->pTfs, path, (SDiskID){0})) {
terrno = TSDB_CODE_VND_DIR_ALREADY_EXIST;
@@ -278,8 +279,9 @@ int32_t vmProcessCreateVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
return -1;
}
}
+#endif
-if (vnodeCreate(path, &vnodeCfg, pMgmt->pTfs) < 0) {
+ if (vnodeCreate(path, &vnodeCfg, pMgmt->pTfs) < 0) {
tFreeSCreateVnodeReq(&req);
dError("vgId:%d, failed to create vnode since %s", req.vgId, terrstr());
code = terrno;
@@ -712,6 +714,7 @@ SArray *vmGetMsgHandles() {
if (dmSetMgmtHandle(pArray, TDMT_VND_TMQ_ADD_CHECKINFO, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_TMQ_DEL_CHECKINFO, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_TMQ_CONSUME, vmPutMsgToQueryQueue, 0) == NULL) goto _OVER;
+ if (dmSetMgmtHandle(pArray, TDMT_VND_TMQ_CONSUME_PUSH, vmPutMsgToQueryQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_TMQ_VG_WALINFO, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_DELETE, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_BATCH_DEL, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
diff --git a/source/dnode/mgmt/node_mgmt/src/dmTransport.c b/source/dnode/mgmt/node_mgmt/src/dmTransport.c
index 7ecf2fd234..ea46b70693 100644
--- a/source/dnode/mgmt/node_mgmt/src/dmTransport.c
+++ b/source/dnode/mgmt/node_mgmt/src/dmTransport.c
@@ -23,10 +23,6 @@ static inline void dmBuildMnodeRedirectRsp(SDnode *pDnode, SRpcMsg *pMsg) {
SEpSet epSet = {0};
dmGetMnodeEpSetForRedirect(&pDnode->data, pMsg, &epSet);
- if (epSet.numOfEps == 1) {
- return;
- }
-
const int32_t contLen = tSerializeSEpSet(NULL, 0, &epSet);
pMsg->pCont = rpcMallocCont(contLen);
if (pMsg->pCont == NULL) {
diff --git a/source/dnode/mgmt/node_util/inc/dmUtil.h b/source/dnode/mgmt/node_util/inc/dmUtil.h
index 98ef8cd95b..85057e5916 100644
--- a/source/dnode/mgmt/node_util/inc/dmUtil.h
+++ b/source/dnode/mgmt/node_util/inc/dmUtil.h
@@ -105,6 +105,7 @@ typedef struct {
SHashObj *dnodeHash;
TdThreadRwlock lock;
SMsgCb msgCb;
+ bool validMnodeEps;
} SDnodeData;
typedef struct {
diff --git a/source/dnode/mgmt/node_util/src/dmEps.c b/source/dnode/mgmt/node_util/src/dmEps.c
index 45cc4bb711..1564a09035 100644
--- a/source/dnode/mgmt/node_util/src/dmEps.c
+++ b/source/dnode/mgmt/node_util/src/dmEps.c
@@ -288,6 +288,8 @@ static void dmResetEps(SDnodeData *pData, SArray *dnodeEps) {
taosHashPut(pData->dnodeHash, &pDnodeEp->id, sizeof(int32_t), pDnodeEp, sizeof(SDnodeEp));
}
+ pData->validMnodeEps = true;
+
dmPrintEps(pData);
}
@@ -348,6 +350,7 @@ void dmRotateMnodeEpSet(SDnodeData *pData) {
}
void dmGetMnodeEpSetForRedirect(SDnodeData *pData, SRpcMsg *pMsg, SEpSet *pEpSet) {
+ if(!pData->validMnodeEps) return;
dmGetMnodeEpSet(pData, pEpSet);
dTrace("msg is redirected, handle:%p num:%d use:%d", pMsg->info.handle, pEpSet->numOfEps, pEpSet->inUse);
for (int32_t i = 0; i < pEpSet->numOfEps; ++i) {
diff --git a/source/dnode/mnode/impl/inc/mndDef.h b/source/dnode/mnode/impl/inc/mndDef.h
index 82b714e6eb..44bd8f74c8 100644
--- a/source/dnode/mnode/impl/inc/mndDef.h
+++ b/source/dnode/mnode/impl/inc/mndDef.h
@@ -108,7 +108,7 @@ typedef enum {
TRN_STAGE_UNDO_ACTION = 3,
TRN_STAGE_COMMIT = 4,
TRN_STAGE_COMMIT_ACTION = 5,
- TRN_STAGE_FINISHED = 6,
+ TRN_STAGE_FINISH = 6,
TRN_STAGE_PRE_FINISH = 7
} ETrnStage;
@@ -157,6 +157,7 @@ typedef struct {
void* rpcRsp;
int32_t rpcRspLen;
int32_t redoActionPos;
+ SArray* prepareActions;
SArray* redoActions;
SArray* undoActions;
SArray* commitActions;
@@ -550,33 +551,39 @@ typedef struct {
int64_t upTime;
int64_t subscribeTime;
int64_t rebalanceTime;
+
+ int8_t withTbName;
+ int8_t autoCommit;
+ int32_t autoCommitInterval;
+ int32_t resetOffsetCfg;
} SMqConsumerObj;
SMqConsumerObj* tNewSMqConsumerObj(int64_t consumerId, char cgroup[TSDB_CGROUP_LEN]);
void tDeleteSMqConsumerObj(SMqConsumerObj* pConsumer);
int32_t tEncodeSMqConsumerObj(void** buf, const SMqConsumerObj* pConsumer);
-void* tDecodeSMqConsumerObj(const void* buf, SMqConsumerObj* pConsumer);
+void* tDecodeSMqConsumerObj(const void* buf, SMqConsumerObj* pConsumer, int8_t sver);
typedef struct {
int32_t vgId;
- char* qmsg; // SubPlanToString
+// char* qmsg; // SubPlanToString
SEpSet epSet;
} SMqVgEp;
SMqVgEp* tCloneSMqVgEp(const SMqVgEp* pVgEp);
void tDeleteSMqVgEp(SMqVgEp* pVgEp);
int32_t tEncodeSMqVgEp(void** buf, const SMqVgEp* pVgEp);
-void* tDecodeSMqVgEp(const void* buf, SMqVgEp* pVgEp);
+void* tDecodeSMqVgEp(const void* buf, SMqVgEp* pVgEp, int8_t sver);
typedef struct {
int64_t consumerId; // -1 for unassigned
SArray* vgs; // SArray
+ SArray* offsetRows; // SArray
} SMqConsumerEp;
-SMqConsumerEp* tCloneSMqConsumerEp(const SMqConsumerEp* pEp);
-void tDeleteSMqConsumerEp(void* pEp);
+//SMqConsumerEp* tCloneSMqConsumerEp(const SMqConsumerEp* pEp);
+//void tDeleteSMqConsumerEp(void* pEp);
int32_t tEncodeSMqConsumerEp(void** buf, const SMqConsumerEp* pEp);
-void* tDecodeSMqConsumerEp(const void* buf, SMqConsumerEp* pEp);
+void* tDecodeSMqConsumerEp(const void* buf, SMqConsumerEp* pEp, int8_t sver);
typedef struct {
char key[TSDB_SUBSCRIBE_KEY_LEN];
@@ -588,14 +595,16 @@ typedef struct {
int64_t stbUid;
SHashObj* consumerHash; // consumerId -> SMqConsumerEp
SArray* unassignedVgs; // SArray
+ SArray* offsetRows;
char dbName[TSDB_DB_FNAME_LEN];
+ char* qmsg; // SubPlanToString
} SMqSubscribeObj;
SMqSubscribeObj* tNewSubscribeObj(const char key[TSDB_SUBSCRIBE_KEY_LEN]);
SMqSubscribeObj* tCloneSubscribeObj(const SMqSubscribeObj* pSub);
void tDeleteSubscribeObj(SMqSubscribeObj* pSub);
int32_t tEncodeSubscribeObj(void** buf, const SMqSubscribeObj* pSub);
-void* tDecodeSubscribeObj(const void* buf, SMqSubscribeObj* pSub);
+void* tDecodeSubscribeObj(const void* buf, SMqSubscribeObj* pSub, int8_t sver);
typedef struct {
int32_t epoch;
@@ -687,12 +696,12 @@ int32_t tEncodeSStreamObj(SEncoder* pEncoder, const SStreamObj* pObj);
int32_t tDecodeSStreamObj(SDecoder* pDecoder, SStreamObj* pObj, int32_t sver);
void tFreeStreamObj(SStreamObj* pObj);
-typedef struct {
- char streamName[TSDB_STREAM_FNAME_LEN];
- int64_t uid;
- int64_t streamUid;
- SArray* childInfo; // SArray
-} SStreamCheckpointObj;
+//typedef struct {
+// char streamName[TSDB_STREAM_FNAME_LEN];
+// int64_t uid;
+// int64_t streamUid;
+// SArray* childInfo; // SArray
+//} SStreamCheckpointObj;
#ifdef __cplusplus
}
diff --git a/source/dnode/mnode/impl/inc/mndTrans.h b/source/dnode/mnode/impl/inc/mndTrans.h
index 03434573c4..625546aa55 100644
--- a/source/dnode/mnode/impl/inc/mndTrans.h
+++ b/source/dnode/mnode/impl/inc/mndTrans.h
@@ -70,6 +70,7 @@ int32_t mndTransAppendRedolog(STrans *pTrans, SSdbRaw *pRaw);
int32_t mndTransAppendUndolog(STrans *pTrans, SSdbRaw *pRaw);
int32_t mndTransAppendCommitlog(STrans *pTrans, SSdbRaw *pRaw);
int32_t mndTransAppendNullLog(STrans *pTrans);
+int32_t mndTransAppendPrepareAction(STrans *pTrans, STransAction *pAction);
int32_t mndTransAppendRedoAction(STrans *pTrans, STransAction *pAction);
int32_t mndTransAppendUndoAction(STrans *pTrans, STransAction *pAction);
void mndTransSetRpcRsp(STrans *pTrans, void *pCont, int32_t contLen);
@@ -78,15 +79,23 @@ void mndTransSetDbName(STrans *pTrans, const char *dbname, const char *stbnam
void mndTransSetSerial(STrans *pTrans);
void mndTransSetParallel(STrans *pTrans);
void mndTransSetOper(STrans *pTrans, EOperType oper);
-int32_t mndTrancCheckConflict(SMnode *pMnode, STrans *pTrans);
-
+int32_t mndTransCheckConflict(SMnode *pMnode, STrans *pTrans);
+static int32_t mndTrancCheckConflict(SMnode *pMnode, STrans *pTrans) {
+ return mndTransCheckConflict(pMnode, pTrans);
+}
int32_t mndTransPrepare(SMnode *pMnode, STrans *pTrans);
int32_t mndTransProcessRsp(SRpcMsg *pRsp);
void mndTransPullup(SMnode *pMnode);
int32_t mndKillTrans(SMnode *pMnode, STrans *pTrans);
-void mndTransExecute(SMnode *pMnode, STrans *pTrans, bool isLeader);
+void mndTransExecute(SMnode *pMnode, STrans *pTrans);
+void mndTransRefresh(SMnode *pMnode, STrans *pTrans);
int32_t mndSetRpcInfoForDbTrans(SMnode *pMnode, SRpcMsg *pMsg, EOperType oper, const char *dbname);
+SSdbRaw *mndTransEncode(STrans *pTrans);
+SSdbRow *mndTransDecode(SSdbRaw *pRaw);
+void mndTransDropData(STrans *pTrans);
+
+bool mndTransPerformPrepareStage(SMnode *pMnode, STrans *pTrans);
#ifdef __cplusplus
}
#endif
diff --git a/source/dnode/mnode/impl/inc/mndVgroup.h b/source/dnode/mnode/impl/inc/mndVgroup.h
index 0cd1228f25..7c2f8b5b65 100644
--- a/source/dnode/mnode/impl/inc/mndVgroup.h
+++ b/source/dnode/mnode/impl/inc/mndVgroup.h
@@ -27,6 +27,7 @@ void mndCleanupVgroup(SMnode *pMnode);
SVgObj *mndAcquireVgroup(SMnode *pMnode, int32_t vgId);
void mndReleaseVgroup(SMnode *pMnode, SVgObj *pVgroup);
SSdbRaw *mndVgroupActionEncode(SVgObj *pVgroup);
+SSdbRow *mndVgroupActionDecode(SSdbRaw *pRaw);
SEpSet mndGetVgroupEpset(SMnode *pMnode, const SVgObj *pVgroup);
int32_t mndGetVnodesNum(SMnode *pMnode, int32_t dnodeId);
void mndSortVnodeGid(SVgObj *pVgroup);
@@ -36,6 +37,7 @@ int64_t mndGetVgroupMemory(SMnode *pMnode, SDbObj *pDb, SVgObj *pVgroup);
SArray *mndBuildDnodesArray(SMnode *, int32_t exceptDnodeId);
int32_t mndAllocSmaVgroup(SMnode *, SDbObj *pDb, SVgObj *pVgroup);
int32_t mndAllocVgroup(SMnode *, SDbObj *pDb, SVgObj **ppVgroups);
+int32_t mndAddPrepareNewVgAction(SMnode *, STrans *pTrans, SVgObj *pVg);
int32_t mndAddCreateVnodeAction(SMnode *, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup, SVnodeGid *pVgid);
int32_t mndAddAlterVnodeConfirmAction(SMnode *, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup);
int32_t mndAddAlterVnodeAction(SMnode *, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup, tmsg_t msgType);
diff --git a/source/dnode/mnode/impl/src/mndConsumer.c b/source/dnode/mnode/impl/src/mndConsumer.c
index 117c1082a5..4dded61ce3 100644
--- a/source/dnode/mnode/impl/src/mndConsumer.c
+++ b/source/dnode/mnode/impl/src/mndConsumer.c
@@ -23,7 +23,7 @@
#include "tcompare.h"
#include "tname.h"
-#define MND_CONSUMER_VER_NUMBER 1
+#define MND_CONSUMER_VER_NUMBER 2
#define MND_CONSUMER_RESERVE_SIZE 64
#define MND_CONSUMER_LOST_HB_CNT 6
@@ -391,12 +391,13 @@ static int32_t mndProcessMqTimerMsg(SRpcMsg *pMsg) {
}
static int32_t mndProcessMqHbReq(SRpcMsg *pMsg) {
+ int32_t code = 0;
SMnode *pMnode = pMsg->info.node;
SMqHbReq req = {0};
- if (tDeserializeSMqHbReq(pMsg->pCont, pMsg->contLen, &req) < 0) {
+ if ((code = tDeserializeSMqHbReq(pMsg->pCont, pMsg->contLen, &req)) < 0) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
- return -1;
+ goto end;
}
int64_t consumerId = req.consumerId;
@@ -404,7 +405,8 @@ static int32_t mndProcessMqHbReq(SRpcMsg *pMsg) {
if (pConsumer == NULL) {
mError("consumer:0x%" PRIx64 " not exist", consumerId);
terrno = TSDB_CODE_MND_CONSUMER_NOT_EXIST;
- return -1;
+ code = -1;
+ goto end;
}
atomic_store_32(&pConsumer->hbStatus, 0);
@@ -424,9 +426,28 @@ static int32_t mndProcessMqHbReq(SRpcMsg *pMsg) {
tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &pRpcMsg);
}
+ for(int i = 0; i < taosArrayGetSize(req.topics); i++){
+ TopicOffsetRows* data = taosArrayGet(req.topics, i);
+ mDebug("heartbeat report offset rows.%s:%s", pConsumer->cgroup, data->topicName);
+
+ SMqSubscribeObj *pSub = mndAcquireSubscribe(pMnode, pConsumer->cgroup, data->topicName);
+ taosWLockLatch(&pSub->lock);
+ SMqConsumerEp *pConsumerEp = taosHashGet(pSub->consumerHash, &consumerId, sizeof(int64_t));
+ if(pConsumerEp){
+ taosArrayDestroy(pConsumerEp->offsetRows);
+ pConsumerEp->offsetRows = data->offsetRows;
+ data->offsetRows = NULL;
+ }
+ taosWUnLockLatch(&pSub->lock);
+
+ mndReleaseSubscribe(pMnode, pSub);
+ }
+
mndReleaseConsumer(pMnode, pConsumer);
- return 0;
+end:
+ tDeatroySMqHbReq(&req);
+ return code;
}
static int32_t mndProcessAskEpReq(SRpcMsg *pMsg) {
@@ -644,7 +665,7 @@ int32_t mndProcessSubscribeReq(SRpcMsg *pMsg) {
SCMSubscribeReq subscribe = {0};
tDeserializeSCMSubscribeReq(msgStr, &subscribe);
- uint64_t consumerId = subscribe.consumerId;
+ int64_t consumerId = subscribe.consumerId;
char *cgroup = subscribe.cgroup;
SMqConsumerObj *pExistedConsumer = NULL;
SMqConsumerObj *pConsumerNew = NULL;
@@ -675,6 +696,11 @@ int32_t mndProcessSubscribeReq(SRpcMsg *pMsg) {
pConsumerNew = tNewSMqConsumerObj(consumerId, cgroup);
tstrncpy(pConsumerNew->clientId, subscribe.clientId, tListLen(pConsumerNew->clientId));
+ pConsumerNew->withTbName = subscribe.withTbName;
+ pConsumerNew->autoCommit = subscribe.autoCommit;
+ pConsumerNew->autoCommitInterval = subscribe.autoCommitInterval;
+ pConsumerNew->resetOffsetCfg = subscribe.resetOffsetCfg;
+
// set the update type
pConsumerNew->updateType = CONSUMER_UPDATE__REBALANCE;
taosArrayDestroy(pConsumerNew->assignedTopics);
@@ -822,7 +848,7 @@ SSdbRow *mndConsumerActionDecode(SSdbRaw *pRaw) {
goto CM_DECODE_OVER;
}
- if (sver != MND_CONSUMER_VER_NUMBER) {
+ if (sver < 1 || sver > MND_CONSUMER_VER_NUMBER) {
terrno = TSDB_CODE_SDB_INVALID_DATA_VER;
goto CM_DECODE_OVER;
}
@@ -849,7 +875,7 @@ SSdbRow *mndConsumerActionDecode(SSdbRaw *pRaw) {
SDB_GET_BINARY(pRaw, dataPos, buf, len, CM_DECODE_OVER);
SDB_GET_RESERVE(pRaw, dataPos, MND_CONSUMER_RESERVE_SIZE, CM_DECODE_OVER);
- if (tDecodeSMqConsumerObj(buf, pConsumer) == NULL) {
+ if (tDecodeSMqConsumerObj(buf, pConsumer, sver) == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY; // TODO set correct error code
goto CM_DECODE_OVER;
}
@@ -1159,6 +1185,17 @@ static int32_t mndRetrieveConsumer(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)&pConsumer->rebalanceTime, pConsumer->rebalanceTime == 0);
+ char buf[TSDB_OFFSET_LEN] = {0};
+ STqOffsetVal pVal = {.type = pConsumer->resetOffsetCfg};
+ tFormatOffset(buf, TSDB_OFFSET_LEN, &pVal);
+
+ char parasStr[64 + TSDB_OFFSET_LEN + VARSTR_HEADER_SIZE] = {0};
+ sprintf(varDataVal(parasStr), "tbname:%d,commit:%d,interval:%d,reset:%s", pConsumer->withTbName, pConsumer->autoCommit, pConsumer->autoCommitInterval, buf);
+ varDataSetLen(parasStr, strlen(varDataVal(parasStr)));
+
+ pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
+ colDataSetVal(pColInfo, numOfRows, (const char *)parasStr, false);
+
numOfRows++;
}
diff --git a/source/dnode/mnode/impl/src/mndDb.c b/source/dnode/mnode/impl/src/mndDb.c
index 1a6b401918..47619f89ce 100644
--- a/source/dnode/mnode/impl/src/mndDb.c
+++ b/source/dnode/mnode/impl/src/mndDb.c
@@ -414,6 +414,13 @@ static void mndSetDefaultDbCfg(SDbCfg *pCfg) {
if (pCfg->tsdbPageSize <= 0) pCfg->tsdbPageSize = TSDB_DEFAULT_TSDB_PAGESIZE;
}
+static int32_t mndSetPrepareNewVgActions(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroups) {
+ for (int32_t v = 0; v < pDb->cfg.numOfVgroups; ++v) {
+ if (mndAddPrepareNewVgAction(pMnode, pTrans, (pVgroups + v)) != 0) return -1;
+ }
+ return 0;
+}
+
static int32_t mndSetCreateDbRedoLogs(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroups) {
SSdbRaw *pDbRaw = mndDbActionEncode(pDb);
if (pDbRaw == NULL) return -1;
@@ -424,7 +431,7 @@ static int32_t mndSetCreateDbRedoLogs(SMnode *pMnode, STrans *pTrans, SDbObj *pD
SSdbRaw *pVgRaw = mndVgroupActionEncode(pVgroups + v);
if (pVgRaw == NULL) return -1;
if (mndTransAppendRedolog(pTrans, pVgRaw) != 0) return -1;
- if (sdbSetRawStatus(pVgRaw, SDB_STATUS_CREATING) != 0) return -1;
+ if (sdbSetRawStatus(pVgRaw, SDB_STATUS_UPDATE) != 0) return -1;
}
return 0;
@@ -589,9 +596,10 @@ static int32_t mndCreateDb(SMnode *pMnode, SRpcMsg *pReq, SCreateDbReq *pCreate,
mInfo("trans:%d, used to create db:%s", pTrans->id, pCreate->db);
mndTransSetDbName(pTrans, dbObj.name, NULL);
- if (mndTrancCheckConflict(pMnode, pTrans) != 0) goto _OVER;
+ if (mndTransCheckConflict(pMnode, pTrans) != 0) goto _OVER;
mndTransSetOper(pTrans, MND_OPER_CREATE_DB);
+ if (mndSetPrepareNewVgActions(pMnode, pTrans, &dbObj, pVgroups) != 0) goto _OVER;
if (mndSetCreateDbRedoLogs(pMnode, pTrans, &dbObj, pVgroups) != 0) goto _OVER;
if (mndSetCreateDbUndoLogs(pMnode, pTrans, &dbObj, pVgroups) != 0) goto _OVER;
if (mndSetCreateDbCommitLogs(pMnode, pTrans, &dbObj, pVgroups, pNewUserDuped) != 0) goto _OVER;
@@ -832,7 +840,7 @@ static int32_t mndAlterDb(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pOld, SDbObj *p
int32_t code = -1;
mndTransSetDbName(pTrans, pOld->name, NULL);
- if (mndTrancCheckConflict(pMnode, pTrans) != 0) goto _OVER;
+ if (mndTransCheckConflict(pMnode, pTrans) != 0) goto _OVER;
if (mndSetAlterDbRedoLogs(pMnode, pTrans, pOld, pNew) != 0) goto _OVER;
if (mndSetAlterDbCommitLogs(pMnode, pTrans, pOld, pNew) != 0) goto _OVER;
@@ -1129,7 +1137,7 @@ static int32_t mndDropDb(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb) {
mInfo("trans:%d start to drop db:%s", pTrans->id, pDb->name);
mndTransSetDbName(pTrans, pDb->name, NULL);
- if (mndTrancCheckConflict(pMnode, pTrans) != 0) {
+ if (mndTransCheckConflict(pMnode, pTrans) != 0) {
goto _OVER;
}
diff --git a/source/dnode/mnode/impl/src/mndDef.c b/source/dnode/mnode/impl/src/mndDef.c
index 6dab018236..09c4053f93 100644
--- a/source/dnode/mnode/impl/src/mndDef.c
+++ b/source/dnode/mnode/impl/src/mndDef.c
@@ -187,14 +187,14 @@ SMqVgEp *tCloneSMqVgEp(const SMqVgEp *pVgEp) {
SMqVgEp *pVgEpNew = taosMemoryMalloc(sizeof(SMqVgEp));
if (pVgEpNew == NULL) return NULL;
pVgEpNew->vgId = pVgEp->vgId;
- pVgEpNew->qmsg = taosStrdup(pVgEp->qmsg);
+// pVgEpNew->qmsg = taosStrdup(pVgEp->qmsg);
pVgEpNew->epSet = pVgEp->epSet;
return pVgEpNew;
}
void tDeleteSMqVgEp(SMqVgEp *pVgEp) {
if (pVgEp) {
- taosMemoryFreeClear(pVgEp->qmsg);
+// taosMemoryFreeClear(pVgEp->qmsg);
taosMemoryFree(pVgEp);
}
}
@@ -202,14 +202,18 @@ void tDeleteSMqVgEp(SMqVgEp *pVgEp) {
int32_t tEncodeSMqVgEp(void **buf, const SMqVgEp *pVgEp) {
int32_t tlen = 0;
tlen += taosEncodeFixedI32(buf, pVgEp->vgId);
- tlen += taosEncodeString(buf, pVgEp->qmsg);
+// tlen += taosEncodeString(buf, pVgEp->qmsg);
tlen += taosEncodeSEpSet(buf, &pVgEp->epSet);
return tlen;
}
-void *tDecodeSMqVgEp(const void *buf, SMqVgEp *pVgEp) {
+void *tDecodeSMqVgEp(const void *buf, SMqVgEp *pVgEp, int8_t sver) {
buf = taosDecodeFixedI32(buf, &pVgEp->vgId);
- buf = taosDecodeString(buf, &pVgEp->qmsg);
+ if(sver == 1){
+ uint64_t size = 0;
+ buf = taosDecodeVariantU64(buf, &size);
+ buf = POINTER_SHIFT(buf, size);
+ }
buf = taosDecodeSEpSet(buf, &pVgEp->epSet);
return (void *)buf;
}
@@ -321,10 +325,14 @@ int32_t tEncodeSMqConsumerObj(void **buf, const SMqConsumerObj *pConsumer) {
tlen += taosEncodeFixedI32(buf, 0);
}
+ tlen += taosEncodeFixedI8(buf, pConsumer->withTbName);
+ tlen += taosEncodeFixedI8(buf, pConsumer->autoCommit);
+ tlen += taosEncodeFixedI32(buf, pConsumer->autoCommitInterval);
+ tlen += taosEncodeFixedI32(buf, pConsumer->resetOffsetCfg);
return tlen;
}
-void *tDecodeSMqConsumerObj(const void *buf, SMqConsumerObj *pConsumer) {
+void *tDecodeSMqConsumerObj(const void *buf, SMqConsumerObj *pConsumer, int8_t sver) {
int32_t sz;
buf = taosDecodeFixedI64(buf, &pConsumer->consumerId);
buf = taosDecodeStringTo(buf, pConsumer->clientId);
@@ -375,50 +383,94 @@ void *tDecodeSMqConsumerObj(const void *buf, SMqConsumerObj *pConsumer) {
taosArrayPush(pConsumer->assignedTopics, &topic);
}
+ if(sver > 1){
+ buf = taosDecodeFixedI8(buf, &pConsumer->withTbName);
+ buf = taosDecodeFixedI8(buf, &pConsumer->autoCommit);
+ buf = taosDecodeFixedI32(buf, &pConsumer->autoCommitInterval);
+ buf = taosDecodeFixedI32(buf, &pConsumer->resetOffsetCfg);
+ }
return (void *)buf;
}
-SMqConsumerEp *tCloneSMqConsumerEp(const SMqConsumerEp *pConsumerEpOld) {
- SMqConsumerEp *pConsumerEpNew = taosMemoryMalloc(sizeof(SMqConsumerEp));
- if (pConsumerEpNew == NULL) return NULL;
- pConsumerEpNew->consumerId = pConsumerEpOld->consumerId;
- pConsumerEpNew->vgs = taosArrayDup(pConsumerEpOld->vgs, (__array_item_dup_fn_t)tCloneSMqVgEp);
- return pConsumerEpNew;
-}
-
-void tDeleteSMqConsumerEp(void *data) {
- SMqConsumerEp *pConsumerEp = (SMqConsumerEp *)data;
- taosArrayDestroyP(pConsumerEp->vgs, (FDelete)tDeleteSMqVgEp);
-}
+//SMqConsumerEp *tCloneSMqConsumerEp(const SMqConsumerEp *pConsumerEpOld) {
+// SMqConsumerEp *pConsumerEpNew = taosMemoryMalloc(sizeof(SMqConsumerEp));
+// if (pConsumerEpNew == NULL) return NULL;
+// pConsumerEpNew->consumerId = pConsumerEpOld->consumerId;
+// pConsumerEpNew->vgs = taosArrayDup(pConsumerEpOld->vgs, NULL);
+// return pConsumerEpNew;
+//}
+//
+//void tDeleteSMqConsumerEp(void *data) {
+// SMqConsumerEp *pConsumerEp = (SMqConsumerEp *)data;
+// taosArrayDestroy(pConsumerEp->vgs);
+//}
int32_t tEncodeSMqConsumerEp(void **buf, const SMqConsumerEp *pConsumerEp) {
int32_t tlen = 0;
tlen += taosEncodeFixedI64(buf, pConsumerEp->consumerId);
tlen += taosEncodeArray(buf, pConsumerEp->vgs, (FEncode)tEncodeSMqVgEp);
-#if 0
- int32_t sz = taosArrayGetSize(pConsumerEp->vgs);
- tlen += taosEncodeFixedI32(buf, sz);
- for (int32_t i = 0; i < sz; i++) {
- SMqVgEp *pVgEp = taosArrayGetP(pConsumerEp->vgs, i);
- tlen += tEncodeSMqVgEp(buf, pVgEp);
+ int32_t szVgs = taosArrayGetSize(pConsumerEp->offsetRows);
+ tlen += taosEncodeFixedI32(buf, szVgs);
+ for (int32_t j= 0; j < szVgs; ++j) {
+ OffsetRows *offRows = taosArrayGet(pConsumerEp->offsetRows, j);
+ tlen += taosEncodeFixedI32(buf, offRows->vgId);
+ tlen += taosEncodeFixedI64(buf, offRows->rows);
+ tlen += taosEncodeFixedI8(buf, offRows->offset.type);
+ if (offRows->offset.type == TMQ_OFFSET__SNAPSHOT_DATA || offRows->offset.type == TMQ_OFFSET__SNAPSHOT_META) {
+ tlen += taosEncodeFixedI64(buf, offRows->offset.uid);
+ tlen += taosEncodeFixedI64(buf, offRows->offset.ts);
+ } else if (offRows->offset.type == TMQ_OFFSET__LOG) {
+ tlen += taosEncodeFixedI64(buf, offRows->offset.version);
+ } else {
+ // do nothing
+ }
}
-#endif
+//#if 0
+// int32_t sz = taosArrayGetSize(pConsumerEp->vgs);
+// tlen += taosEncodeFixedI32(buf, sz);
+// for (int32_t i = 0; i < sz; i++) {
+// SMqVgEp *pVgEp = taosArrayGetP(pConsumerEp->vgs, i);
+// tlen += tEncodeSMqVgEp(buf, pVgEp);
+// }
+//#endif
return tlen;
}
-void *tDecodeSMqConsumerEp(const void *buf, SMqConsumerEp *pConsumerEp) {
+void *tDecodeSMqConsumerEp(const void *buf, SMqConsumerEp *pConsumerEp, int8_t sver) {
buf = taosDecodeFixedI64(buf, &pConsumerEp->consumerId);
- buf = taosDecodeArray(buf, &pConsumerEp->vgs, (FDecode)tDecodeSMqVgEp, sizeof(SMqVgEp));
-#if 0
- int32_t sz;
- buf = taosDecodeFixedI32(buf, &sz);
- pConsumerEp->vgs = taosArrayInit(sz, sizeof(void *));
- for (int32_t i = 0; i < sz; i++) {
- SMqVgEp *pVgEp = taosMemoryMalloc(sizeof(SMqVgEp));
- buf = tDecodeSMqVgEp(buf, pVgEp);
- taosArrayPush(pConsumerEp->vgs, &pVgEp);
+ buf = taosDecodeArray(buf, &pConsumerEp->vgs, (FDecode)tDecodeSMqVgEp, sizeof(SMqVgEp), sver);
+ if (sver > 1){
+ int32_t szVgs = 0;
+ buf = taosDecodeFixedI32(buf, &szVgs);
+ if(szVgs > 0){
+ pConsumerEp->offsetRows = taosArrayInit(szVgs, sizeof(OffsetRows));
+ if (NULL == pConsumerEp->offsetRows) return NULL;
+ for (int32_t j= 0; j < szVgs; ++j) {
+ OffsetRows* offRows = taosArrayReserve(pConsumerEp->offsetRows, 1);
+ buf = taosDecodeFixedI32(buf, &offRows->vgId);
+ buf = taosDecodeFixedI64(buf, &offRows->rows);
+ buf = taosDecodeFixedI8(buf, &offRows->offset.type);
+ if (offRows->offset.type == TMQ_OFFSET__SNAPSHOT_DATA || offRows->offset.type == TMQ_OFFSET__SNAPSHOT_META) {
+ buf = taosDecodeFixedI64(buf, &offRows->offset.uid);
+ buf = taosDecodeFixedI64(buf, &offRows->offset.ts);
+ } else if (offRows->offset.type == TMQ_OFFSET__LOG) {
+ buf = taosDecodeFixedI64(buf, &offRows->offset.version);
+ } else {
+ // do nothing
+ }
+ }
+ }
}
-#endif
+//#if 0
+// int32_t sz;
+// buf = taosDecodeFixedI32(buf, &sz);
+// pConsumerEp->vgs = taosArrayInit(sz, sizeof(void *));
+// for (int32_t i = 0; i < sz; i++) {
+// SMqVgEp *pVgEp = taosMemoryMalloc(sizeof(SMqVgEp));
+// buf = tDecodeSMqVgEp(buf, pVgEp);
+// taosArrayPush(pConsumerEp->vgs, &pVgEp);
+// }
+//#endif
return (void *)buf;
}
@@ -468,7 +520,9 @@ SMqSubscribeObj *tCloneSubscribeObj(const SMqSubscribeObj *pSub) {
taosHashPut(pSubNew->consumerHash, &newEp.consumerId, sizeof(int64_t), &newEp, sizeof(SMqConsumerEp));
}
pSubNew->unassignedVgs = taosArrayDup(pSub->unassignedVgs, (__array_item_dup_fn_t)tCloneSMqVgEp);
+ pSubNew->offsetRows = taosArrayDup(pSub->offsetRows, NULL);
memcpy(pSubNew->dbName, pSub->dbName, TSDB_DB_FNAME_LEN);
+ pSubNew->qmsg = taosStrdup(pSub->qmsg);
return pSubNew;
}
@@ -479,9 +533,12 @@ void tDeleteSubscribeObj(SMqSubscribeObj *pSub) {
if (pIter == NULL) break;
SMqConsumerEp *pConsumerEp = (SMqConsumerEp *)pIter;
taosArrayDestroyP(pConsumerEp->vgs, (FDelete)tDeleteSMqVgEp);
+ taosArrayDestroy(pConsumerEp->offsetRows);
}
taosHashCleanup(pSub->consumerHash);
taosArrayDestroyP(pSub->unassignedVgs, (FDelete)tDeleteSMqVgEp);
+ taosMemoryFreeClear(pSub->qmsg);
+ taosArrayDestroy(pSub->offsetRows);
}
int32_t tEncodeSubscribeObj(void **buf, const SMqSubscribeObj *pSub) {
@@ -508,10 +565,28 @@ int32_t tEncodeSubscribeObj(void **buf, const SMqSubscribeObj *pSub) {
if (cnt != sz) return -1;
tlen += taosEncodeArray(buf, pSub->unassignedVgs, (FEncode)tEncodeSMqVgEp);
tlen += taosEncodeString(buf, pSub->dbName);
+
+ int32_t szVgs = taosArrayGetSize(pSub->offsetRows);
+ tlen += taosEncodeFixedI32(buf, szVgs);
+ for (int32_t j= 0; j < szVgs; ++j) {
+ OffsetRows *offRows = taosArrayGet(pSub->offsetRows, j);
+ tlen += taosEncodeFixedI32(buf, offRows->vgId);
+ tlen += taosEncodeFixedI64(buf, offRows->rows);
+ tlen += taosEncodeFixedI8(buf, offRows->offset.type);
+ if (offRows->offset.type == TMQ_OFFSET__SNAPSHOT_DATA || offRows->offset.type == TMQ_OFFSET__SNAPSHOT_META) {
+ tlen += taosEncodeFixedI64(buf, offRows->offset.uid);
+ tlen += taosEncodeFixedI64(buf, offRows->offset.ts);
+ } else if (offRows->offset.type == TMQ_OFFSET__LOG) {
+ tlen += taosEncodeFixedI64(buf, offRows->offset.version);
+ } else {
+ // do nothing
+ }
+ }
+ tlen += taosEncodeString(buf, pSub->qmsg);
return tlen;
}
-void *tDecodeSubscribeObj(const void *buf, SMqSubscribeObj *pSub) {
+void *tDecodeSubscribeObj(const void *buf, SMqSubscribeObj *pSub, int8_t sver) {
//
buf = taosDecodeStringTo(buf, pSub->key);
buf = taosDecodeFixedI64(buf, &pSub->dbUid);
@@ -526,74 +601,98 @@ void *tDecodeSubscribeObj(const void *buf, SMqSubscribeObj *pSub) {
pSub->consumerHash = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_NO_LOCK);
for (int32_t i = 0; i < sz; i++) {
SMqConsumerEp consumerEp = {0};
- buf = tDecodeSMqConsumerEp(buf, &consumerEp);
+ buf = tDecodeSMqConsumerEp(buf, &consumerEp, sver);
taosHashPut(pSub->consumerHash, &consumerEp.consumerId, sizeof(int64_t), &consumerEp, sizeof(SMqConsumerEp));
}
- buf = taosDecodeArray(buf, &pSub->unassignedVgs, (FDecode)tDecodeSMqVgEp, sizeof(SMqVgEp));
+ buf = taosDecodeArray(buf, &pSub->unassignedVgs, (FDecode)tDecodeSMqVgEp, sizeof(SMqVgEp), sver);
buf = taosDecodeStringTo(buf, pSub->dbName);
+
+ if (sver > 1){
+ int32_t szVgs = 0;
+ buf = taosDecodeFixedI32(buf, &szVgs);
+ if(szVgs > 0){
+ pSub->offsetRows = taosArrayInit(szVgs, sizeof(OffsetRows));
+ if (NULL == pSub->offsetRows) return NULL;
+ for (int32_t j= 0; j < szVgs; ++j) {
+ OffsetRows* offRows = taosArrayReserve(pSub->offsetRows, 1);
+ buf = taosDecodeFixedI32(buf, &offRows->vgId);
+ buf = taosDecodeFixedI64(buf, &offRows->rows);
+ buf = taosDecodeFixedI8(buf, &offRows->offset.type);
+ if (offRows->offset.type == TMQ_OFFSET__SNAPSHOT_DATA || offRows->offset.type == TMQ_OFFSET__SNAPSHOT_META) {
+ buf = taosDecodeFixedI64(buf, &offRows->offset.uid);
+ buf = taosDecodeFixedI64(buf, &offRows->offset.ts);
+ } else if (offRows->offset.type == TMQ_OFFSET__LOG) {
+ buf = taosDecodeFixedI64(buf, &offRows->offset.version);
+ } else {
+ // do nothing
+ }
+ }
+ }
+ buf = taosDecodeString(buf, &pSub->qmsg);
+ }
return (void *)buf;
}
-SMqSubActionLogEntry *tCloneSMqSubActionLogEntry(SMqSubActionLogEntry *pEntry) {
- SMqSubActionLogEntry *pEntryNew = taosMemoryMalloc(sizeof(SMqSubActionLogEntry));
- if (pEntryNew == NULL) return NULL;
- pEntryNew->epoch = pEntry->epoch;
- pEntryNew->consumers = taosArrayDup(pEntry->consumers, (__array_item_dup_fn_t)tCloneSMqConsumerEp);
- return pEntryNew;
-}
+//SMqSubActionLogEntry *tCloneSMqSubActionLogEntry(SMqSubActionLogEntry *pEntry) {
+// SMqSubActionLogEntry *pEntryNew = taosMemoryMalloc(sizeof(SMqSubActionLogEntry));
+// if (pEntryNew == NULL) return NULL;
+// pEntryNew->epoch = pEntry->epoch;
+// pEntryNew->consumers = taosArrayDup(pEntry->consumers, (__array_item_dup_fn_t)tCloneSMqConsumerEp);
+// return pEntryNew;
+//}
+//
+//void tDeleteSMqSubActionLogEntry(SMqSubActionLogEntry *pEntry) {
+// taosArrayDestroyEx(pEntry->consumers, (FDelete)tDeleteSMqConsumerEp);
+//}
-void tDeleteSMqSubActionLogEntry(SMqSubActionLogEntry *pEntry) {
- taosArrayDestroyEx(pEntry->consumers, (FDelete)tDeleteSMqConsumerEp);
-}
+//int32_t tEncodeSMqSubActionLogEntry(void **buf, const SMqSubActionLogEntry *pEntry) {
+// int32_t tlen = 0;
+// tlen += taosEncodeFixedI32(buf, pEntry->epoch);
+// tlen += taosEncodeArray(buf, pEntry->consumers, (FEncode)tEncodeSMqSubActionLogEntry);
+// return tlen;
+//}
+//
+//void *tDecodeSMqSubActionLogEntry(const void *buf, SMqSubActionLogEntry *pEntry) {
+// buf = taosDecodeFixedI32(buf, &pEntry->epoch);
+// buf = taosDecodeArray(buf, &pEntry->consumers, (FDecode)tDecodeSMqSubActionLogEntry, sizeof(SMqSubActionLogEntry));
+// return (void *)buf;
+//}
-int32_t tEncodeSMqSubActionLogEntry(void **buf, const SMqSubActionLogEntry *pEntry) {
- int32_t tlen = 0;
- tlen += taosEncodeFixedI32(buf, pEntry->epoch);
- tlen += taosEncodeArray(buf, pEntry->consumers, (FEncode)tEncodeSMqSubActionLogEntry);
- return tlen;
-}
+//SMqSubActionLogObj *tCloneSMqSubActionLogObj(SMqSubActionLogObj *pLog) {
+// SMqSubActionLogObj *pLogNew = taosMemoryMalloc(sizeof(SMqSubActionLogObj));
+// if (pLogNew == NULL) return pLogNew;
+// memcpy(pLogNew->key, pLog->key, TSDB_SUBSCRIBE_KEY_LEN);
+// pLogNew->logs = taosArrayDup(pLog->logs, (__array_item_dup_fn_t)tCloneSMqConsumerEp);
+// return pLogNew;
+//}
+//
+//void tDeleteSMqSubActionLogObj(SMqSubActionLogObj *pLog) {
+// taosArrayDestroyEx(pLog->logs, (FDelete)tDeleteSMqConsumerEp);
+//}
-void *tDecodeSMqSubActionLogEntry(const void *buf, SMqSubActionLogEntry *pEntry) {
- buf = taosDecodeFixedI32(buf, &pEntry->epoch);
- buf = taosDecodeArray(buf, &pEntry->consumers, (FDecode)tDecodeSMqSubActionLogEntry, sizeof(SMqSubActionLogEntry));
- return (void *)buf;
-}
-
-SMqSubActionLogObj *tCloneSMqSubActionLogObj(SMqSubActionLogObj *pLog) {
- SMqSubActionLogObj *pLogNew = taosMemoryMalloc(sizeof(SMqSubActionLogObj));
- if (pLogNew == NULL) return pLogNew;
- memcpy(pLogNew->key, pLog->key, TSDB_SUBSCRIBE_KEY_LEN);
- pLogNew->logs = taosArrayDup(pLog->logs, (__array_item_dup_fn_t)tCloneSMqConsumerEp);
- return pLogNew;
-}
-
-void tDeleteSMqSubActionLogObj(SMqSubActionLogObj *pLog) {
- taosArrayDestroyEx(pLog->logs, (FDelete)tDeleteSMqConsumerEp);
-}
-
-int32_t tEncodeSMqSubActionLogObj(void **buf, const SMqSubActionLogObj *pLog) {
- int32_t tlen = 0;
- tlen += taosEncodeString(buf, pLog->key);
- tlen += taosEncodeArray(buf, pLog->logs, (FEncode)tEncodeSMqSubActionLogEntry);
- return tlen;
-}
-
-void *tDecodeSMqSubActionLogObj(const void *buf, SMqSubActionLogObj *pLog) {
- buf = taosDecodeStringTo(buf, pLog->key);
- buf = taosDecodeArray(buf, &pLog->logs, (FDecode)tDecodeSMqSubActionLogEntry, sizeof(SMqSubActionLogEntry));
- return (void *)buf;
-}
-
-int32_t tEncodeSMqOffsetObj(void **buf, const SMqOffsetObj *pOffset) {
- int32_t tlen = 0;
- tlen += taosEncodeString(buf, pOffset->key);
- tlen += taosEncodeFixedI64(buf, pOffset->offset);
- return tlen;
-}
-
-void *tDecodeSMqOffsetObj(void *buf, SMqOffsetObj *pOffset) {
- buf = taosDecodeStringTo(buf, pOffset->key);
- buf = taosDecodeFixedI64(buf, &pOffset->offset);
- return buf;
-}
+//int32_t tEncodeSMqSubActionLogObj(void **buf, const SMqSubActionLogObj *pLog) {
+// int32_t tlen = 0;
+// tlen += taosEncodeString(buf, pLog->key);
+// tlen += taosEncodeArray(buf, pLog->logs, (FEncode)tEncodeSMqSubActionLogEntry);
+// return tlen;
+//}
+//
+//void *tDecodeSMqSubActionLogObj(const void *buf, SMqSubActionLogObj *pLog) {
+// buf = taosDecodeStringTo(buf, pLog->key);
+// buf = taosDecodeArray(buf, &pLog->logs, (FDecode)tDecodeSMqSubActionLogEntry, sizeof(SMqSubActionLogEntry));
+// return (void *)buf;
+//}
+//
+//int32_t tEncodeSMqOffsetObj(void **buf, const SMqOffsetObj *pOffset) {
+// int32_t tlen = 0;
+// tlen += taosEncodeString(buf, pOffset->key);
+// tlen += taosEncodeFixedI64(buf, pOffset->offset);
+// return tlen;
+//}
+//
+//void *tDecodeSMqOffsetObj(void *buf, SMqOffsetObj *pOffset) {
+// buf = taosDecodeStringTo(buf, pOffset->key);
+// buf = taosDecodeFixedI64(buf, &pOffset->offset);
+// return buf;
+//}
diff --git a/source/dnode/mnode/impl/src/mndDnode.c b/source/dnode/mnode/impl/src/mndDnode.c
index 73dbb243a1..bb92bfb4c7 100644
--- a/source/dnode/mnode/impl/src/mndDnode.c
+++ b/source/dnode/mnode/impl/src/mndDnode.c
@@ -632,7 +632,7 @@ static int32_t mndCreateDnode(SMnode *pMnode, SRpcMsg *pReq, SCreateDnodeReq *pC
pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_GLOBAL, pReq, "create-dnode");
if (pTrans == NULL) goto _OVER;
mInfo("trans:%d, used to create dnode:%s", pTrans->id, dnodeObj.ep);
- if (mndTrancCheckConflict(pMnode, pTrans) != 0) goto _OVER;
+ if (mndTransCheckConflict(pMnode, pTrans) != 0) goto _OVER;
pRaw = mndDnodeActionEncode(&dnodeObj);
if (pRaw == NULL || mndTransAppendCommitlog(pTrans, pRaw) != 0) goto _OVER;
@@ -889,7 +889,7 @@ static int32_t mndDropDnode(SMnode *pMnode, SRpcMsg *pReq, SDnodeObj *pDnode, SM
if (pTrans == NULL) goto _OVER;
mndTransSetSerial(pTrans);
mInfo("trans:%d, used to drop dnode:%d, force:%d", pTrans->id, pDnode->id, force);
- if (mndTrancCheckConflict(pMnode, pTrans) != 0) goto _OVER;
+ if (mndTransCheckConflict(pMnode, pTrans) != 0) goto _OVER;
pRaw = mndDnodeActionEncode(pDnode);
if (pRaw == NULL) goto _OVER;
diff --git a/source/dnode/mnode/impl/src/mndIndex.c b/source/dnode/mnode/impl/src/mndIndex.c
index 83172acf64..2d2637b8ce 100644
--- a/source/dnode/mnode/impl/src/mndIndex.c
+++ b/source/dnode/mnode/impl/src/mndIndex.c
@@ -645,7 +645,7 @@ int32_t mndAddIndexImpl(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SStbObj *pSt
// mInfo("trans:%d, used to add index to stb:%s", pTrans->id, pStb->name);
mndTransSetDbName(pTrans, pDb->name, pStb->name);
- if (mndTrancCheckConflict(pMnode, pTrans) != 0) goto _OVER;
+ if (mndTransCheckConflict(pMnode, pTrans) != 0) goto _OVER;
mndTransSetSerial(pTrans);
@@ -721,7 +721,7 @@ static int32_t mndDropIdx(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SIdxObj *p
mInfo("trans:%d, used to drop idx:%s", pTrans->id, pIdx->name);
mndTransSetDbName(pTrans, pDb->name, NULL);
- if (mndTrancCheckConflict(pMnode, pTrans) != 0) goto _OVER;
+ if (mndTransCheckConflict(pMnode, pTrans) != 0) goto _OVER;
mndTransSetSerial(pTrans);
if (mndSetDropIdxRedoLogs(pMnode, pTrans, pIdx) != 0) goto _OVER;
@@ -860,4 +860,4 @@ int32_t mndDropIdxsByDb(SMnode *pMnode, STrans *pTrans, SDbObj *pDb) {
}
return 0;
-}
\ No newline at end of file
+}
diff --git a/source/dnode/mnode/impl/src/mndMnode.c b/source/dnode/mnode/impl/src/mndMnode.c
index d0b10a5768..91fe1257d2 100644
--- a/source/dnode/mnode/impl/src/mndMnode.c
+++ b/source/dnode/mnode/impl/src/mndMnode.c
@@ -578,7 +578,7 @@ static int32_t mndCreateMnode(SMnode *pMnode, SRpcMsg *pReq, SDnodeObj *pDnode,
if (pTrans == NULL) goto _OVER;
mndTransSetSerial(pTrans);
mInfo("trans:%d, used to create mnode:%d", pTrans->id, pCreate->dnodeId);
- if (mndTrancCheckConflict(pMnode, pTrans) != 0) goto _OVER;
+ if (mndTransCheckConflict(pMnode, pTrans) != 0) goto _OVER;
SMnodeObj mnodeObj = {0};
mnodeObj.id = pDnode->id;
@@ -732,7 +732,7 @@ static int32_t mndDropMnode(SMnode *pMnode, SRpcMsg *pReq, SMnodeObj *pObj) {
if (pTrans == NULL) goto _OVER;
mndTransSetSerial(pTrans);
mInfo("trans:%d, used to drop mnode:%d", pTrans->id, pObj->id);
- if (mndTrancCheckConflict(pMnode, pTrans) != 0) goto _OVER;
+ if (mndTransCheckConflict(pMnode, pTrans) != 0) goto _OVER;
if (mndSetDropMnodeInfoToTrans(pMnode, pTrans, pObj, false) != 0) goto _OVER;
if (mndTransPrepare(pMnode, pTrans) != 0) goto _OVER;
diff --git a/source/dnode/mnode/impl/src/mndProfile.c b/source/dnode/mnode/impl/src/mndProfile.c
index a1d815189c..06bb46772a 100644
--- a/source/dnode/mnode/impl/src/mndProfile.c
+++ b/source/dnode/mnode/impl/src/mndProfile.c
@@ -227,6 +227,7 @@ static int32_t mndProcessConnectReq(SRpcMsg *pReq) {
}
if ((code = taosCheckVersionCompatibleFromStr(connReq.sVer, version, 3)) != 0) {
+ mGError("version not compatible. client version: %s, server version: %s", connReq.sVer, version);
terrno = code;
goto _OVER;
}
@@ -834,6 +835,9 @@ static int32_t mndRetrieveQueries(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *p
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)&pQuery->stableQuery, false);
+ pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
+ colDataSetVal(pColInfo, numOfRows, (const char *)&pQuery->isSubQuery, false);
+
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)&pQuery->subPlanNum, false);
diff --git a/source/dnode/mnode/impl/src/mndScheduler.c b/source/dnode/mnode/impl/src/mndScheduler.c
index 64082536da..9a611fe46a 100644
--- a/source/dnode/mnode/impl/src/mndScheduler.c
+++ b/source/dnode/mnode/impl/src/mndScheduler.c
@@ -570,25 +570,21 @@ int32_t mndSchedInitSubEp(SMnode* pMnode, const SMqTopicObj* pTopic, SMqSubscrib
mDebug("init subscription %s for topic:%s assign vgId:%d", pSub->key, pTopic->name, pVgEp->vgId);
- if (pSubplan) {
- int32_t msgLen;
-
- pSubplan->execNode.epSet = pVgEp->epSet;
- pSubplan->execNode.nodeId = pVgEp->vgId;
-
- if (qSubPlanToString(pSubplan, &pVgEp->qmsg, &msgLen) < 0) {
- sdbRelease(pSdb, pVgroup);
- qDestroyQueryPlan(pPlan);
- terrno = TSDB_CODE_QRY_INVALID_INPUT;
- return -1;
- }
- } else {
- pVgEp->qmsg = taosStrdup("");
- }
-
sdbRelease(pSdb, pVgroup);
}
+ if (pSubplan) {
+ int32_t msgLen;
+
+ if (qSubPlanToString(pSubplan, &pSub->qmsg, &msgLen) < 0) {
+ qDestroyQueryPlan(pPlan);
+ terrno = TSDB_CODE_QRY_INVALID_INPUT;
+ return -1;
+ }
+ } else {
+ pSub->qmsg = taosStrdup("");
+ }
+
qDestroyQueryPlan(pPlan);
return 0;
}
diff --git a/source/dnode/mnode/impl/src/mndSma.c b/source/dnode/mnode/impl/src/mndSma.c
index 42ad9e24d5..c337d85b68 100644
--- a/source/dnode/mnode/impl/src/mndSma.c
+++ b/source/dnode/mnode/impl/src/mndSma.c
@@ -388,7 +388,7 @@ static int32_t mndSetCreateSmaVgroupRedoLogs(SMnode *pMnode, STrans *pTrans, SVg
SSdbRaw *pVgRaw = mndVgroupActionEncode(pVgroup);
if (pVgRaw == NULL) return -1;
if (mndTransAppendRedolog(pTrans, pVgRaw) != 0) return -1;
- if (sdbSetRawStatus(pVgRaw, SDB_STATUS_CREATING) != 0) return -1;
+ if (sdbSetRawStatus(pVgRaw, SDB_STATUS_UPDATE) != 0) return -1;
return 0;
}
@@ -622,11 +622,11 @@ static int32_t mndCreateSma(SMnode *pMnode, SRpcMsg *pReq, SMCreateSmaReq *pCrea
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_DB, pReq, "create-sma");
if (pTrans == NULL) goto _OVER;
mndTransSetDbName(pTrans, pDb->name, NULL);
- if (mndTrancCheckConflict(pMnode, pTrans) != 0) goto _OVER;
+ if (mndTransCheckConflict(pMnode, pTrans) != 0) goto _OVER;
mndTransSetSerial(pTrans);
mInfo("trans:%d, used to create sma:%s stream:%s", pTrans->id, pCreate->name, streamObj.name);
-
+ if (mndAddPrepareNewVgAction(pMnode, pTrans, &streamObj.fixedSinkVg) != 0) goto _OVER;
if (mndSetCreateSmaRedoLogs(pMnode, pTrans, &smaObj) != 0) goto _OVER;
if (mndSetCreateSmaVgroupRedoLogs(pMnode, pTrans, &streamObj.fixedSinkVg) != 0) goto _OVER;
if (mndSetCreateSmaCommitLogs(pMnode, pTrans, &smaObj) != 0) goto _OVER;
@@ -845,7 +845,7 @@ static int32_t mndDropSma(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SSmaObj *p
mInfo("trans:%d, used to drop sma:%s", pTrans->id, pSma->name);
mndTransSetDbName(pTrans, pDb->name, NULL);
- if (mndTrancCheckConflict(pMnode, pTrans) != 0) goto _OVER;
+ if (mndTransCheckConflict(pMnode, pTrans) != 0) goto _OVER;
mndTransSetSerial(pTrans);
diff --git a/source/dnode/mnode/impl/src/mndStb.c b/source/dnode/mnode/impl/src/mndStb.c
index d58b9fd4bf..1ed42a46da 100644
--- a/source/dnode/mnode/impl/src/mndStb.c
+++ b/source/dnode/mnode/impl/src/mndStb.c
@@ -874,7 +874,7 @@ _OVER:
int32_t mndAddStbToTrans(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SStbObj *pStb) {
mndTransSetDbName(pTrans, pDb->name, pStb->name);
- if (mndTrancCheckConflict(pMnode, pTrans) != 0) return -1;
+ if (mndTransCheckConflict(pMnode, pTrans) != 0) return -1;
if (mndSetCreateStbRedoLogs(pMnode, pTrans, pDb, pStb) != 0) return -1;
if (mndSetCreateStbUndoLogs(pMnode, pTrans, pDb, pStb) != 0) return -1;
if (mndSetCreateStbCommitLogs(pMnode, pTrans, pDb, pStb) != 0) return -1;
@@ -1968,7 +1968,7 @@ static int32_t mndAlterStbImp(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SStbOb
mInfo("trans:%d, used to alter stb:%s", pTrans->id, pStb->name);
mndTransSetDbName(pTrans, pDb->name, pStb->name);
- if (mndTrancCheckConflict(pMnode, pTrans) != 0) goto _OVER;
+ if (mndTransCheckConflict(pMnode, pTrans) != 0) goto _OVER;
if (needRsp) {
void *pCont = NULL;
@@ -1998,7 +1998,7 @@ static int32_t mndAlterStbAndUpdateTagIdxImp(SMnode *pMnode, SRpcMsg *pReq, SDbO
mInfo("trans:%d, used to alter stb:%s", pTrans->id, pStb->name);
mndTransSetDbName(pTrans, pDb->name, pStb->name);
- if (mndTrancCheckConflict(pMnode, pTrans) != 0) goto _OVER;
+ if (mndTransCheckConflict(pMnode, pTrans) != 0) goto _OVER;
if (needRsp) {
void *pCont = NULL;
@@ -2242,7 +2242,7 @@ static int32_t mndDropStb(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SStbObj *p
mInfo("trans:%d, used to drop stb:%s", pTrans->id, pStb->name);
mndTransSetDbName(pTrans, pDb->name, pStb->name);
- if (mndTrancCheckConflict(pMnode, pTrans) != 0) goto _OVER;
+ if (mndTransCheckConflict(pMnode, pTrans) != 0) goto _OVER;
if (mndSetDropStbRedoLogs(pMnode, pTrans, pStb) != 0) goto _OVER;
if (mndSetDropStbCommitLogs(pMnode, pTrans, pStb) != 0) goto _OVER;
@@ -3159,8 +3159,14 @@ static int32_t mndRetrieveStbCol(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB
SSdb *pSdb = pMnode->pSdb;
SStbObj *pStb = NULL;
- int32_t numOfRows = buildSysDbColsInfo(pBlock, pShow->db, pShow->filterTb);
- mDebug("mndRetrieveStbCol get system table cols, rows:%d, db:%s", numOfRows, pShow->db);
+
+ int32_t numOfRows = 0;
+ if (!pShow->sysDbRsp) {
+ numOfRows = buildSysDbColsInfo(pBlock, pShow->db, pShow->filterTb);
+ mDebug("mndRetrieveStbCol get system table cols, rows:%d, db:%s", numOfRows, pShow->db);
+ pShow->sysDbRsp = true;
+ }
+
SDbObj *pDb = NULL;
if (strlen(pShow->db) > 0) {
pDb = mndAcquireDb(pMnode, pShow->db);
@@ -3298,7 +3304,7 @@ static int32_t mndCheckIndexReq(SCreateTagIndexReq *pReq) {
mInfo("trans:%d, used to add index to stb:%s", pTrans->id, pStb->name);
mndTransSetDbName(pTrans, pDb->name, pStb->name);
- if (mndTrancCheckConflict(pMnode, pTrans) != 0) goto _OVER;
+ if (mndTransCheckConflict(pMnode, pTrans) != 0) goto _OVER;
if (mndSetAlterStbRedoLogs(pMnode, pTrans, pDb, pStb) != 0) goto _OVER;
if (mndSetAlterStbCommitLogs(pMnode, pTrans, pDb, pStb) != 0) goto _OVER;
diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c
index 39a1fa223f..60678f1a34 100644
--- a/source/dnode/mnode/impl/src/mndStream.c
+++ b/source/dnode/mnode/impl/src/mndStream.c
@@ -735,7 +735,7 @@ static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) {
mInfo("trans:%d, used to create stream:%s", pTrans->id, createStreamReq.name);
mndTransSetDbName(pTrans, createStreamReq.sourceDB, streamObj.targetDb);
- if (mndTrancCheckConflict(pMnode, pTrans) != 0) {
+ if (mndTransCheckConflict(pMnode, pTrans) != 0) {
mndTransDrop(pTrans);
goto _OVER;
}
@@ -890,7 +890,7 @@ static int32_t mndProcessStreamDoCheckpoint(SRpcMsg *pReq) {
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_DB_INSIDE, pReq, "stream-checkpoint");
if (pTrans == NULL) return -1;
mndTransSetDbName(pTrans, pStream->sourceDb, pStream->targetDb);
- if (mndTrancCheckConflict(pMnode, pTrans) != 0) {
+ if (mndTransCheckConflict(pMnode, pTrans) != 0) {
mndReleaseStream(pMnode, pStream);
mndTransDrop(pTrans);
return -1;
@@ -1001,7 +1001,7 @@ static int32_t mndProcessDropStreamReq(SRpcMsg *pReq) {
mInfo("trans:%d, used to drop stream:%s", pTrans->id, dropReq.name);
mndTransSetDbName(pTrans, pStream->sourceDb, pStream->targetDb);
- if (mndTrancCheckConflict(pMnode, pTrans) != 0) {
+ if (mndTransCheckConflict(pMnode, pTrans) != 0) {
sdbRelease(pMnode->pSdb, pStream);
mndTransDrop(pTrans);
return -1;
@@ -1369,7 +1369,7 @@ static int32_t mndProcessPauseStreamReq(SRpcMsg *pReq) {
mInfo("trans:%d, used to pause stream:%s", pTrans->id, pauseReq.name);
mndTransSetDbName(pTrans, pStream->sourceDb, pStream->targetDb);
- if (mndTrancCheckConflict(pMnode, pTrans) != 0) {
+ if (mndTransCheckConflict(pMnode, pTrans) != 0) {
sdbRelease(pMnode->pSdb, pStream);
mndTransDrop(pTrans);
return -1;
@@ -1477,7 +1477,7 @@ static int32_t mndProcessResumeStreamReq(SRpcMsg *pReq) {
mInfo("trans:%d, used to pause stream:%s", pTrans->id, pauseReq.name);
mndTransSetDbName(pTrans, pStream->sourceDb, pStream->targetDb);
- if (mndTrancCheckConflict(pMnode, pTrans) != 0) {
+ if (mndTransCheckConflict(pMnode, pTrans) != 0) {
sdbRelease(pMnode->pSdb, pStream);
mndTransDrop(pTrans);
return -1;
diff --git a/source/dnode/mnode/impl/src/mndSubscribe.c b/source/dnode/mnode/impl/src/mndSubscribe.c
index 74421afa33..61691a30d5 100644
--- a/source/dnode/mnode/impl/src/mndSubscribe.c
+++ b/source/dnode/mnode/impl/src/mndSubscribe.c
@@ -24,7 +24,7 @@
#include "tcompare.h"
#include "tname.h"
-#define MND_SUBSCRIBE_VER_NUMBER 1
+#define MND_SUBSCRIBE_VER_NUMBER 2
#define MND_SUBSCRIBE_RESERVE_SIZE 64
#define MND_SUBSCRIBE_REBALANCE_CNT 3
@@ -99,13 +99,23 @@ static SMqSubscribeObj *mndCreateSubscription(SMnode *pMnode, const SMqTopicObj
return pSub;
}
-static int32_t mndBuildSubChangeReq(void **pBuf, int32_t *pLen, const SMqSubscribeObj *pSub,
- const SMqRebOutputVg *pRebVg) {
+static int32_t mndBuildSubChangeReq(void **pBuf, int32_t *pLen, SMqSubscribeObj *pSub,
+ const SMqRebOutputVg *pRebVg, SSubplan* pPlan) {
SMqRebVgReq req = {0};
req.oldConsumerId = pRebVg->oldConsumerId;
req.newConsumerId = pRebVg->newConsumerId;
req.vgId = pRebVg->pVgEp->vgId;
- req.qmsg = pRebVg->pVgEp->qmsg;
+ if(pPlan){
+ pPlan->execNode.epSet = pRebVg->pVgEp->epSet;
+ pPlan->execNode.nodeId = pRebVg->pVgEp->vgId;
+ int32_t msgLen;
+ if (qSubPlanToString(pPlan, &req.qmsg, &msgLen) < 0) {
+ terrno = TSDB_CODE_QRY_INVALID_INPUT;
+ return -1;
+ }
+ }else{
+ req.qmsg = taosStrdup("");
+ }
req.subType = pSub->subType;
req.withMeta = pSub->withMeta;
req.suid = pSub->stbUid;
@@ -115,6 +125,7 @@ static int32_t mndBuildSubChangeReq(void **pBuf, int32_t *pLen, const SMqSubscri
int32_t ret = 0;
tEncodeSize(tEncodeSMqRebVgReq, &req, tlen, ret);
if (ret < 0) {
+ taosMemoryFree(req.qmsg);
return -1;
}
@@ -122,6 +133,7 @@ static int32_t mndBuildSubChangeReq(void **pBuf, int32_t *pLen, const SMqSubscri
void *buf = taosMemoryMalloc(tlen);
if (buf == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
+ taosMemoryFree(req.qmsg);
return -1;
}
@@ -135,17 +147,19 @@ static int32_t mndBuildSubChangeReq(void **pBuf, int32_t *pLen, const SMqSubscri
if (tEncodeSMqRebVgReq(&encoder, &req) < 0) {
taosMemoryFreeClear(buf);
tEncoderClear(&encoder);
+ taosMemoryFree(req.qmsg);
return -1;
}
tEncoderClear(&encoder);
*pBuf = buf;
*pLen = tlen;
+ taosMemoryFree(req.qmsg);
return 0;
}
-static int32_t mndPersistSubChangeVgReq(SMnode *pMnode, STrans *pTrans, const SMqSubscribeObj *pSub,
- const SMqRebOutputVg *pRebVg) {
+static int32_t mndPersistSubChangeVgReq(SMnode *pMnode, STrans *pTrans, SMqSubscribeObj *pSub,
+ const SMqRebOutputVg *pRebVg, SSubplan* pPlan) {
// if (pRebVg->oldConsumerId == pRebVg->newConsumerId) {
// terrno = TSDB_CODE_MND_INVALID_SUB_OPTION;
// return -1;
@@ -153,7 +167,7 @@ static int32_t mndPersistSubChangeVgReq(SMnode *pMnode, STrans *pTrans, const SM
void *buf;
int32_t tlen;
- if (mndBuildSubChangeReq(&buf, &tlen, pSub, pRebVg) < 0) {
+ if (mndBuildSubChangeReq(&buf, &tlen, pSub, pRebVg, pPlan) < 0) {
return -1;
}
@@ -255,7 +269,7 @@ static void doAddNewConsumers(SMqRebOutputObj *pOutput, const SMqRebInputObj *pI
for (int32_t i = 0; i < numOfNewConsumers; i++) {
int64_t consumerId = *(int64_t *)taosArrayGet(pInput->pRebInfo->newConsumers, i);
- SMqConsumerEp newConsumerEp;
+ SMqConsumerEp newConsumerEp = {0};
newConsumerEp.consumerId = consumerId;
newConsumerEp.vgs = taosArrayInit(0, sizeof(void *));
@@ -449,8 +463,44 @@ static int32_t mndDoRebalance(SMnode *pMnode, const SMqRebInputObj *pInput, SMqR
SMqRebOutputVg* pRebOutput = (SMqRebOutputVg *)pRemovedIter;
taosArrayPush(pOutput->rebVgs, pRebOutput);
- if(taosHashGetSize(pOutput->pSub->consumerHash) == 0){ // if all consumer is removed, put all vg into unassigned
- taosArrayPush(pOutput->pSub->unassignedVgs, &pRebOutput->pVgEp);
+ if(taosHashGetSize(pOutput->pSub->consumerHash) == 0){ // if all consumer is removed
+ taosArrayPush(pOutput->pSub->unassignedVgs, &pRebOutput->pVgEp); // put all vg into unassigned
+ }
+ }
+
+ if(taosHashGetSize(pOutput->pSub->consumerHash) == 0) { // if all consumer is removed
+ SMqSubscribeObj *pSub = mndAcquireSubscribeByKey(pMnode, pInput->pRebInfo->key); // put all offset rows
+ if (pSub) {
+ taosRLockLatch(&pSub->lock);
+ bool init = false;
+ if (pOutput->pSub->offsetRows == NULL) {
+ pOutput->pSub->offsetRows = taosArrayInit(4, sizeof(OffsetRows));
+ init = true;
+ }
+ pIter = NULL;
+ while (1) {
+ pIter = taosHashIterate(pSub->consumerHash, pIter);
+ if (pIter == NULL) break;
+ SMqConsumerEp *pConsumerEp = (SMqConsumerEp *)pIter;
+ if (init) {
+ taosArrayAddAll(pOutput->pSub->offsetRows, pConsumerEp->offsetRows);
+// mDebug("pSub->offsetRows is init");
+ } else {
+ for (int j = 0; j < taosArrayGetSize(pConsumerEp->offsetRows); j++) {
+ OffsetRows *d1 = taosArrayGet(pConsumerEp->offsetRows, j);
+ for (int i = 0; i < taosArrayGetSize(pOutput->pSub->offsetRows); i++) {
+ OffsetRows *d2 = taosArrayGet(pOutput->pSub->offsetRows, i);
+ if (d1->vgId == d2->vgId) {
+ d2->rows += d1->rows;
+ d2->offset = d1->offset;
+// mDebug("pSub->offsetRows add vgId:%d, after:%"PRId64", before:%"PRId64, d2->vgId, d2->rows, d1->rows);
+ }
+ }
+ }
+ }
+ }
+ taosRUnLockLatch(&pSub->lock);
+ mndReleaseSubscribe(pMnode, pSub);
}
}
@@ -483,14 +533,25 @@ static int32_t mndDoRebalance(SMnode *pMnode, const SMqRebInputObj *pInput, SMqR
}
static int32_t mndPersistRebResult(SMnode *pMnode, SRpcMsg *pMsg, const SMqRebOutputObj *pOutput) {
+ struct SSubplan* pPlan = NULL;
+ if(strcmp(pOutput->pSub->qmsg, "") != 0){
+ int32_t code = qStringToSubplan(pOutput->pSub->qmsg, &pPlan);
+ if (code != TSDB_CODE_SUCCESS) {
+ terrno = code;
+ return -1;
+ }
+ }
+
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_DB_INSIDE, pMsg, "tmq-reb");
if (pTrans == NULL) {
+ nodesDestroyNode((SNode*)pPlan);
return -1;
}
mndTransSetDbName(pTrans, pOutput->pSub->dbName, NULL);
- if (mndTrancCheckConflict(pMnode, pTrans) != 0) {
+ if (mndTransCheckConflict(pMnode, pTrans) != 0) {
mndTransDrop(pTrans);
+ nodesDestroyNode((SNode*)pPlan);
return -1;
}
@@ -500,11 +561,13 @@ static int32_t mndPersistRebResult(SMnode *pMnode, SRpcMsg *pMsg, const SMqRebOu
int32_t vgNum = taosArrayGetSize(rebVgs);
for (int32_t i = 0; i < vgNum; i++) {
SMqRebOutputVg *pRebVg = taosArrayGet(rebVgs, i);
- if (mndPersistSubChangeVgReq(pMnode, pTrans, pOutput->pSub, pRebVg) < 0) {
+ if (mndPersistSubChangeVgReq(pMnode, pTrans, pOutput->pSub, pRebVg, pPlan) < 0) {
mndTransDrop(pTrans);
+ nodesDestroyNode((SNode*)pPlan);
return -1;
}
}
+ nodesDestroyNode((SNode*)pPlan);
// 2. redo log: subscribe and vg assignment
// subscribe
@@ -809,7 +872,7 @@ static SSdbRow *mndSubActionDecode(SSdbRaw *pRaw) {
int8_t sver = 0;
if (sdbGetRawSoftVer(pRaw, &sver) != 0) goto SUB_DECODE_OVER;
- if (sver != MND_SUBSCRIBE_VER_NUMBER) {
+ if (sver > MND_SUBSCRIBE_VER_NUMBER || sver < 1) {
terrno = TSDB_CODE_SDB_INVALID_DATA_VER;
goto SUB_DECODE_OVER;
}
@@ -828,7 +891,7 @@ static SSdbRow *mndSubActionDecode(SSdbRaw *pRaw) {
SDB_GET_BINARY(pRaw, dataPos, buf, tlen, SUB_DECODE_OVER);
SDB_GET_RESERVE(pRaw, dataPos, MND_SUBSCRIBE_RESERVE_SIZE, SUB_DECODE_OVER);
- if (tDecodeSubscribeObj(buf, pSub) == NULL) {
+ if (tDecodeSubscribeObj(buf, pSub, sver) == NULL) {
goto SUB_DECODE_OVER;
}
@@ -890,6 +953,10 @@ static int32_t mndSubActionUpdate(SSdb *pSdb, SMqSubscribeObj *pOldSub, SMqSubsc
pOldSub->unassignedVgs = pNewSub->unassignedVgs;
pNewSub->unassignedVgs = tmp1;
+ SArray *tmp2 = pOldSub->offsetRows;
+ pOldSub->offsetRows = pNewSub->offsetRows;
+ pNewSub->offsetRows = tmp2;
+
taosWUnLockLatch(&pOldSub->lock);
return 0;
}
@@ -1028,6 +1095,61 @@ END:
return code;
}
+static int32_t buildResult(SSDataBlock *pBlock, int32_t* numOfRows, int64_t consumerId, const char* topic, const char* cgroup, SArray* vgs, SArray *offsetRows){
+ int32_t sz = taosArrayGetSize(vgs);
+ for (int32_t j = 0; j < sz; j++) {
+ SMqVgEp *pVgEp = taosArrayGetP(vgs, j);
+
+ SColumnInfoData *pColInfo;
+ int32_t cols = 0;
+
+ pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
+ colDataSetVal(pColInfo, *numOfRows, (const char *)topic, false);
+
+ pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
+ colDataSetVal(pColInfo, *numOfRows, (const char *)cgroup, false);
+
+ // vg id
+ pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
+ colDataSetVal(pColInfo, *numOfRows, (const char *)&pVgEp->vgId, false);
+
+ // consumer id
+ pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
+ colDataSetVal(pColInfo, *numOfRows, (const char *)&consumerId, consumerId == -1);
+
+ mDebug("mnd show subscriptions: topic %s, consumer:0x%" PRIx64 " cgroup %s vgid %d", varDataVal(topic),
+ consumerId, varDataVal(cgroup), pVgEp->vgId);
+
+ // offset
+ OffsetRows *data = NULL;
+ for(int i = 0; i < taosArrayGetSize(offsetRows); i++){
+ OffsetRows *tmp = taosArrayGet(offsetRows, i);
+ if(tmp->vgId != pVgEp->vgId){
+ continue;
+ }
+ data = tmp;
+ }
+ if(data){
+ // vg id
+ char buf[TSDB_OFFSET_LEN + VARSTR_HEADER_SIZE] = {0};
+ tFormatOffset(varDataVal(buf), TSDB_OFFSET_LEN, &data->offset);
+ varDataSetLen(buf, strlen(varDataVal(buf)));
+ pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
+ colDataSetVal(pColInfo, *numOfRows, (const char *)buf, false);
+ pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
+ colDataSetVal(pColInfo, *numOfRows, (const char *)&data->rows, false);
+ }else{
+ pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
+ colDataSetNULL(pColInfo, *numOfRows);
+ pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
+ colDataSetNULL(pColInfo, *numOfRows);
+ mError("mnd show subscriptions: do not find vgId:%d in offsetRows", pVgEp->vgId);
+ }
+ (*numOfRows)++;
+ }
+ return 0;
+}
+
int32_t mndRetrieveSubscribe(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, int32_t rowsCapacity) {
SMnode *pMnode = pReq->info.node;
SSdb *pSdb = pMnode->pSdb;
@@ -1048,6 +1170,13 @@ int32_t mndRetrieveSubscribe(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock
blockDataEnsureCapacity(pBlock, numOfRows + pSub->vgNum);
}
+ // topic and cgroup
+ char topic[TSDB_TOPIC_FNAME_LEN + VARSTR_HEADER_SIZE] = {0};
+ char cgroup[TSDB_CGROUP_LEN + VARSTR_HEADER_SIZE] = {0};
+ mndSplitSubscribeKey(pSub->key, varDataVal(topic), varDataVal(cgroup), false);
+ varDataSetLen(topic, strlen(varDataVal(topic)));
+ varDataSetLen(cgroup, strlen(varDataVal(cgroup)));
+
SMqConsumerEp *pConsumerEp = NULL;
void *pIter = NULL;
while (1) {
@@ -1055,97 +1184,11 @@ int32_t mndRetrieveSubscribe(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock
if (pIter == NULL) break;
pConsumerEp = (SMqConsumerEp *)pIter;
- int32_t sz = taosArrayGetSize(pConsumerEp->vgs);
- for (int32_t j = 0; j < sz; j++) {
- SMqVgEp *pVgEp = taosArrayGetP(pConsumerEp->vgs, j);
-
- SColumnInfoData *pColInfo;
- int32_t cols = 0;
-
- // topic and cgroup
- char topic[TSDB_TOPIC_FNAME_LEN + VARSTR_HEADER_SIZE] = {0};
- char cgroup[TSDB_CGROUP_LEN + VARSTR_HEADER_SIZE] = {0};
- mndSplitSubscribeKey(pSub->key, varDataVal(topic), varDataVal(cgroup), false);
- varDataSetLen(topic, strlen(varDataVal(topic)));
- varDataSetLen(cgroup, strlen(varDataVal(cgroup)));
-
- pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
- colDataSetVal(pColInfo, numOfRows, (const char *)topic, false);
-
- pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
- colDataSetVal(pColInfo, numOfRows, (const char *)cgroup, false);
-
- // vg id
- pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
- colDataSetVal(pColInfo, numOfRows, (const char *)&pVgEp->vgId, false);
-
- // consumer id
- pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
- colDataSetVal(pColInfo, numOfRows, (const char *)&pConsumerEp->consumerId, false);
-
- mDebug("mnd show subscriptions: topic %s, consumer:0x%" PRIx64 " cgroup %s vgid %d", varDataVal(topic),
- pConsumerEp->consumerId, varDataVal(cgroup), pVgEp->vgId);
-
- // offset
-#if 0
- // subscribe time
- pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
- colDataSetVal(pColInfo, numOfRows, (const char *)&pSub->subscribeTime, false);
-
- // rebalance time
- pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
- colDataSetVal(pColInfo, numOfRows, (const char *)&pSub->rebalanceTime, pConsumer->rebalanceTime == 0);
-#endif
-
- numOfRows++;
- }
+ buildResult(pBlock, &numOfRows, pConsumerEp->consumerId, topic, cgroup, pConsumerEp->vgs, pConsumerEp->offsetRows);
}
// do not show for cleared subscription
- int32_t sz = taosArrayGetSize(pSub->unassignedVgs);
- for (int32_t i = 0; i < sz; i++) {
- SMqVgEp *pVgEp = taosArrayGetP(pSub->unassignedVgs, i);
-
- SColumnInfoData *pColInfo;
- int32_t cols = 0;
-
- // topic and cgroup
- char topic[TSDB_TOPIC_FNAME_LEN + VARSTR_HEADER_SIZE] = {0};
- char cgroup[TSDB_CGROUP_LEN + VARSTR_HEADER_SIZE] = {0};
- mndSplitSubscribeKey(pSub->key, varDataVal(topic), varDataVal(cgroup), false);
- varDataSetLen(topic, strlen(varDataVal(topic)));
- varDataSetLen(cgroup, strlen(varDataVal(cgroup)));
-
- pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
- colDataSetVal(pColInfo, numOfRows, (const char *)topic, false);
-
- pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
- colDataSetVal(pColInfo, numOfRows, (const char *)cgroup, false);
-
- // vg id
- pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
- colDataSetVal(pColInfo, numOfRows, (const char *)&pVgEp->vgId, false);
-
- // consumer id
- pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
- colDataSetVal(pColInfo, numOfRows, NULL, true);
-
- mDebug("mnd show subscriptions(unassigned): topic %s, cgroup %s vgid %d", varDataVal(topic), varDataVal(cgroup),
- pVgEp->vgId);
-
- // offset
-#if 0
- // subscribe time
- pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
- colDataSetVal(pColInfo, numOfRows, (const char *)&pSub->subscribeTime, false);
-
- // rebalance time
- pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
- colDataSetVal(pColInfo, numOfRows, (const char *)&pSub->rebalanceTime, pConsumer->rebalanceTime == 0);
-#endif
-
- numOfRows++;
- }
+ buildResult(pBlock, &numOfRows, -1, topic, cgroup, pSub->unassignedVgs, pSub->offsetRows);
pBlock->info.rows = numOfRows;
diff --git a/source/dnode/mnode/impl/src/mndSync.c b/source/dnode/mnode/impl/src/mndSync.c
index 0a6df02f5f..68bfe09b5e 100644
--- a/source/dnode/mnode/impl/src/mndSync.c
+++ b/source/dnode/mnode/impl/src/mndSync.c
@@ -17,6 +17,7 @@
#include "mndSync.h"
#include "mndCluster.h"
#include "mndTrans.h"
+#include "mndVgroup.h"
static int32_t mndSyncEqCtrlMsg(const SMsgCb *msgcb, SRpcMsg *pMsg) {
if (pMsg == NULL || pMsg->pCont == NULL) {
@@ -73,76 +74,200 @@ static int32_t mndSyncSendMsg(const SEpSet *pEpSet, SRpcMsg *pMsg) {
return code;
}
-int32_t mndProcessWriteMsg(const SSyncFSM *pFsm, SRpcMsg *pMsg, const SFsmCbMeta *pMeta) {
- SMnode *pMnode = pFsm->data;
+static int32_t mndTransValidatePrepareAction(SMnode *pMnode, STrans *pTrans, STransAction *pAction) {
+ SSdbRow *pRow = NULL;
+ int32_t code = -1;
+
+ if (pAction->msgType == TDMT_MND_CREATE_VG) {
+ pRow = mndVgroupActionDecode(pAction->pRaw);
+ if (pRow == NULL) goto _OUT;
+
+ SVgObj *pVgroup = sdbGetRowObj(pRow);
+ if (pVgroup == NULL) goto _OUT;
+
+ int32_t maxVgId = sdbGetMaxId(pMnode->pSdb, SDB_VGROUP);
+ if (maxVgId > pVgroup->vgId) {
+ mError("trans:%d, failed to satisfy vgroup id %d of prepare action. maxVgId:%d", pTrans->id, pVgroup->vgId,
+ maxVgId);
+ goto _OUT;
+ }
+ }
+
+ code = 0;
+_OUT:
+ taosMemoryFreeClear(pRow);
+ return code;
+}
+
+static int32_t mndTransValidatePrepareStage(SMnode *pMnode, STrans *pTrans) {
+ int32_t code = -1;
+ int32_t action = 0;
+
+ int32_t numOfActions = taosArrayGetSize(pTrans->prepareActions);
+ if (numOfActions == 0) {
+ code = 0;
+ goto _OUT;
+ }
+
+ mInfo("trans:%d, validate %d prepare actions.", pTrans->id, numOfActions);
+
+ for (action = 0; action < numOfActions; ++action) {
+ STransAction *pAction = taosArrayGet(pTrans->prepareActions, action);
+
+ if (pAction->actionType != TRANS_ACTION_RAW) {
+ mError("trans:%d, prepare action:%d of unexpected type:%d", pTrans->id, action, pAction->actionType);
+ goto _OUT;
+ }
+
+ code = mndTransValidatePrepareAction(pMnode, pTrans, pAction);
+ if (code != 0) {
+ mError("trans:%d, failed to validate prepare action: %d, numOfActions:%d", pTrans->id, action, numOfActions);
+ goto _OUT;
+ }
+ }
+
+ code = 0;
+_OUT:
+ return code;
+}
+
+static int32_t mndTransValidateImp(SMnode *pMnode, STrans *pTrans) {
+ if (pTrans->stage == TRN_STAGE_PREPARE) {
+ if (mndTransCheckConflict(pMnode, pTrans) < 0) {
+ mError("trans:%d, failed to validate trans conflicts.", pTrans->id);
+ return -1;
+ }
+
+ return mndTransValidatePrepareStage(pMnode, pTrans);
+ }
+ return 0;
+}
+
+static int32_t mndTransValidate(SMnode *pMnode, SSdbRaw *pRaw) {
+ STrans *pTrans = NULL;
+ int32_t code = -1;
+
+ SSdbRow *pRow = mndTransDecode(pRaw);
+ if (pRow == NULL) goto _OUT;
+
+ pTrans = sdbGetRowObj(pRow);
+ if (pTrans == NULL) goto _OUT;
+
+ code = mndTransValidateImp(pMnode, pTrans);
+
+_OUT:
+ if (pTrans) mndTransDropData(pTrans);
+ if (pRow) taosMemoryFreeClear(pRow);
+ if (code) terrno = (terrno ? terrno : TSDB_CODE_MND_TRANS_CONFLICT);
+ return code;
+}
+
+int32_t mndProcessWriteMsg(SMnode *pMnode, SRpcMsg *pMsg, SFsmCbMeta *pMeta) {
+ terrno = TSDB_CODE_SUCCESS;
SSyncMgmt *pMgmt = &pMnode->syncMgmt;
SSdbRaw *pRaw = pMsg->pCont;
-
+ STrans *pTrans = NULL;
+ int32_t code = -1;
int32_t transId = sdbGetIdFromRaw(pMnode->pSdb, pRaw);
+
+ if (transId <= 0) {
+ mError("trans:%d, invalid commit msg, cache transId:%d seq:%" PRId64, transId, pMgmt->transId, pMgmt->transSeq);
+ terrno = TSDB_CODE_INVALID_MSG;
+ goto _OUT;
+ }
+
mInfo("trans:%d, is proposed, saved:%d code:0x%x, apply index:%" PRId64 " term:%" PRIu64 " config:%" PRId64
" role:%s raw:%p sec:%d seq:%" PRId64,
transId, pMgmt->transId, pMeta->code, pMeta->index, pMeta->term, pMeta->lastConfigIndex, syncStr(pMeta->state),
pRaw, pMgmt->transSec, pMgmt->transSeq);
- if (pMeta->code == 0) {
- int32_t code = sdbWriteWithoutFree(pMnode->pSdb, pRaw);
- if (code != 0) {
- mError("trans:%d, failed to write to sdb since %s", transId, terrstr());
- return 0;
- }
- sdbSetApplyInfo(pMnode->pSdb, pMeta->index, pMeta->term, pMeta->lastConfigIndex);
+ code = mndTransValidate(pMnode, pRaw);
+ if (code != 0) {
+ mError("trans:%d, failed to validate requested trans since %s", transId, terrstr());
+ code = 0;
+ pMeta->code = terrno;
+ goto _OUT;
}
- taosThreadMutexLock(&pMgmt->lock);
- pMgmt->errCode = pMeta->code;
-
- if (transId <= 0) {
- taosThreadMutexUnlock(&pMgmt->lock);
- mError("trans:%d, invalid commit msg, cache transId:%d seq:%" PRId64, transId, pMgmt->transId, pMgmt->transSeq);
- } else if (transId == pMgmt->transId) {
- if (pMgmt->errCode != 0) {
- mError("trans:%d, failed to propose since %s, post sem", transId, tstrerror(pMgmt->errCode));
- } else {
- mInfo("trans:%d, is proposed and post sem, seq:%" PRId64, transId, pMgmt->transSeq);
- }
- pMgmt->transId = 0;
- pMgmt->transSec = 0;
- pMgmt->transSeq = 0;
- tsem_post(&pMgmt->syncSem);
- taosThreadMutexUnlock(&pMgmt->lock);
- } else {
- taosThreadMutexUnlock(&pMgmt->lock);
- STrans *pTrans = mndAcquireTrans(pMnode, transId);
- if (pTrans != NULL) {
- mInfo("trans:%d, execute in mnode which not leader or sync timeout, createTime:%" PRId64 " saved trans:%d",
- transId, pTrans->createdTime, pMgmt->transId);
- mndTransExecute(pMnode, pTrans, false);
- mndReleaseTrans(pMnode, pTrans);
- } else {
- mError("trans:%d, not found while execute in mnode since %s", transId, terrstr());
- }
+ code = sdbWriteWithoutFree(pMnode->pSdb, pRaw);
+ if (code != 0) {
+ mError("trans:%d, failed to write to sdb since %s", transId, terrstr());
+ code = 0;
+ pMeta->code = terrno;
+ goto _OUT;
}
+ pTrans = mndAcquireTrans(pMnode, transId);
+ if (pTrans == NULL) {
+ mError("trans:%d, not found while execute in mnode since %s", transId, terrstr());
+ goto _OUT;
+ }
+
+ if (pTrans->stage == TRN_STAGE_PREPARE) {
+ bool continueExec = mndTransPerformPrepareStage(pMnode, pTrans);
+ if (!continueExec) goto _OUT;
+ }
+
+ if (pTrans->id != pMgmt->transId) {
+ mInfo("trans:%d, execute in mnode which not leader or sync timeout, createTime:%" PRId64 " saved trans:%d",
+ pTrans->id, pTrans->createdTime, pMgmt->transId);
+ mndTransRefresh(pMnode, pTrans);
+ }
+
+ sdbSetApplyInfo(pMnode->pSdb, pMeta->index, pMeta->term, pMeta->lastConfigIndex);
sdbWriteFile(pMnode->pSdb, tsMndSdbWriteDelta);
+ code = 0;
+
+_OUT:
+ if (pTrans) mndReleaseTrans(pMnode, pTrans);
+ return code;
+}
+
+static int32_t mndPostMgmtCode(SMnode *pMnode, int32_t code) {
+ SSyncMgmt *pMgmt = &pMnode->syncMgmt;
+ taosThreadMutexLock(&pMgmt->lock);
+ if (pMgmt->transId == 0) {
+ goto _OUT;
+ }
+
+ pMgmt->transId = 0;
+ pMgmt->transSec = 0;
+ pMgmt->transSeq = 0;
+ pMgmt->errCode = code;
+ tsem_post(&pMgmt->syncSem);
+
+ if (pMgmt->errCode != 0) {
+ mError("trans:%d, failed to propose since %s, post sem", pMgmt->transId, tstrerror(pMgmt->errCode));
+ } else {
+ mInfo("trans:%d, is proposed and post sem, seq:%" PRId64, pMgmt->transId, pMgmt->transSeq);
+ }
+
+_OUT:
+ taosThreadMutexUnlock(&pMgmt->lock);
return 0;
}
-int32_t mndSyncCommitMsg(const SSyncFSM *pFsm, SRpcMsg *pMsg, const SFsmCbMeta *pMeta) {
- int32_t code = 0;
+int32_t mndSyncCommitMsg(const SSyncFSM *pFsm, SRpcMsg *pMsg, SFsmCbMeta *pMeta) {
+ SMnode *pMnode = pFsm->data;
+ int32_t code = pMsg->code;
+ if (code != 0) {
+ goto _OUT;
+ }
+
pMsg->info.conn.applyIndex = pMeta->index;
pMsg->info.conn.applyTerm = pMeta->term;
+ pMeta->code = 0;
- if (pMsg->code == 0) {
- SMnode *pMnode = pFsm->data;
- atomic_store_64(&pMnode->applied, pMsg->info.conn.applyIndex);
- }
+ atomic_store_64(&pMnode->applied, pMsg->info.conn.applyIndex);
if (!syncUtilUserCommit(pMsg->msgType)) {
- goto _out;
+ goto _OUT;
}
- code = mndProcessWriteMsg(pFsm, pMsg, pMeta);
-_out:
+ code = mndProcessWriteMsg(pMnode, pMsg, pMeta);
+
+_OUT:
+ mndPostMgmtCode(pMnode, code ? code : pMeta->code);
rpcFreeCont(pMsg->pCont);
pMsg->pCont = NULL;
return code;
diff --git a/source/dnode/mnode/impl/src/mndTopic.c b/source/dnode/mnode/impl/src/mndTopic.c
index f1ee7bca3b..d3cb19231e 100644
--- a/source/dnode/mnode/impl/src/mndTopic.c
+++ b/source/dnode/mnode/impl/src/mndTopic.c
@@ -753,7 +753,7 @@ static int32_t mndProcessDropTopicReq(SRpcMsg *pReq) {
}
mndTransSetDbName(pTrans, pTopic->db, NULL);
- if (mndTrancCheckConflict(pMnode, pTrans) != 0) {
+ if (mndTransCheckConflict(pMnode, pTrans) != 0) {
mndReleaseTopic(pMnode, pTopic);
mndTransDrop(pTrans);
return -1;
@@ -912,12 +912,14 @@ static int32_t mndRetrieveTopic(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBl
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)&pTopic->createTime, false);
- char sql[TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE] = {0};
+ char *sql = taosMemoryMalloc(strlen(pTopic->sql) + VARSTR_HEADER_SIZE);
STR_TO_VARSTR(sql, pTopic->sql);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)sql, false);
+ taosMemoryFree(sql);
+
char *schemaJson = taosMemoryMalloc(TSDB_SHOW_SCHEMA_JSON_LEN + VARSTR_HEADER_SIZE);
if(pTopic->subType == TOPIC_SUB_TYPE__COLUMN){
schemaToJson(pTopic->schema.pSchema, pTopic->schema.nCols, schemaJson);
diff --git a/source/dnode/mnode/impl/src/mndTrans.c b/source/dnode/mnode/impl/src/mndTrans.c
index cfb5bef9d0..7ebaf6dda5 100644
--- a/source/dnode/mnode/impl/src/mndTrans.c
+++ b/source/dnode/mnode/impl/src/mndTrans.c
@@ -23,28 +23,25 @@
#include "mndSync.h"
#include "mndUser.h"
-#define TRANS_VER_NUMBER 1
+#define TRANS_VER1_NUMBER 1
+#define TRANS_VER2_NUMBER 2
#define TRANS_ARRAY_SIZE 8
#define TRANS_RESERVE_SIZE 48
-static SSdbRaw *mndTransActionEncode(STrans *pTrans);
-static SSdbRow *mndTransActionDecode(SSdbRaw *pRaw);
static int32_t mndTransActionInsert(SSdb *pSdb, STrans *pTrans);
static int32_t mndTransActionUpdate(SSdb *pSdb, STrans *OldTrans, STrans *pOld);
-static int32_t mndTransActionDelete(SSdb *pSdb, STrans *pTrans, bool callFunc);
+static int32_t mndTransDelete(SSdb *pSdb, STrans *pTrans, bool callFunc);
static int32_t mndTransAppendLog(SArray *pArray, SSdbRaw *pRaw);
static int32_t mndTransAppendAction(SArray *pArray, STransAction *pAction);
static void mndTransDropLogs(SArray *pArray);
static void mndTransDropActions(SArray *pArray);
-static void mndTransDropData(STrans *pTrans);
static int32_t mndTransExecuteActions(SMnode *pMnode, STrans *pTrans, SArray *pArray);
static int32_t mndTransExecuteRedoLogs(SMnode *pMnode, STrans *pTrans);
static int32_t mndTransExecuteUndoLogs(SMnode *pMnode, STrans *pTrans);
static int32_t mndTransExecuteRedoActions(SMnode *pMnode, STrans *pTrans);
static int32_t mndTransExecuteUndoActions(SMnode *pMnode, STrans *pTrans);
static int32_t mndTransExecuteCommitActions(SMnode *pMnode, STrans *pTrans);
-static bool mndTransPerformPrepareStage(SMnode *pMnode, STrans *pTrans);
static bool mndTransPerformRedoLogStage(SMnode *pMnode, STrans *pTrans);
static bool mndTransPerformRedoActionStage(SMnode *pMnode, STrans *pTrans);
static bool mndTransPerformUndoLogStage(SMnode *pMnode, STrans *pTrans);
@@ -52,7 +49,7 @@ static bool mndTransPerformUndoActionStage(SMnode *pMnode, STrans *pTrans);
static bool mndTransPerformCommitActionStage(SMnode *pMnode, STrans *pTrans);
static bool mndTransPerformCommitStage(SMnode *pMnode, STrans *pTrans);
static bool mndTransPerformRollbackStage(SMnode *pMnode, STrans *pTrans);
-static bool mndTransPerfromFinishedStage(SMnode *pMnode, STrans *pTrans);
+static bool mndTransPerformFinishStage(SMnode *pMnode, STrans *pTrans);
static bool mndCannotExecuteTransAction(SMnode *pMnode) { return !pMnode->deploy && !mndIsLeader(pMnode); }
static void mndTransSendRpcRsp(SMnode *pMnode, STrans *pTrans);
@@ -67,11 +64,11 @@ int32_t mndInitTrans(SMnode *pMnode) {
SSdbTable table = {
.sdbType = SDB_TRANS,
.keyType = SDB_KEY_INT32,
- .encodeFp = (SdbEncodeFp)mndTransActionEncode,
- .decodeFp = (SdbDecodeFp)mndTransActionDecode,
+ .encodeFp = (SdbEncodeFp)mndTransEncode,
+ .decodeFp = (SdbDecodeFp)mndTransDecode,
.insertFp = (SdbInsertFp)mndTransActionInsert,
.updateFp = (SdbUpdateFp)mndTransActionUpdate,
- .deleteFp = (SdbDeleteFp)mndTransActionDelete,
+ .deleteFp = (SdbDeleteFp)mndTransDelete,
};
mndSetMsgHandle(pMnode, TDMT_MND_TRANS_TIMER, mndProcessTransTimer);
@@ -103,15 +100,55 @@ static int32_t mndTransGetActionsSize(SArray *pArray) {
return rawDataLen;
}
-static SSdbRaw *mndTransActionEncode(STrans *pTrans) {
- terrno = TSDB_CODE_OUT_OF_MEMORY;
+
+static int32_t mndTransEncodeAction(SSdbRaw *pRaw, int32_t *offset, SArray *pActions, int32_t actionsNum) {
+ int32_t dataPos = *offset;
+ int8_t unused = 0;
+ int32_t ret = -1;
+
+ for (int32_t i = 0; i < actionsNum; ++i) {
+ STransAction *pAction = taosArrayGet(pActions, i);
+ SDB_SET_INT32(pRaw, dataPos, pAction->id, _OVER)
+ SDB_SET_INT32(pRaw, dataPos, pAction->errCode, _OVER)
+ SDB_SET_INT32(pRaw, dataPos, pAction->acceptableCode, _OVER)
+ SDB_SET_INT32(pRaw, dataPos, pAction->retryCode, _OVER)
+ SDB_SET_INT8(pRaw, dataPos, pAction->actionType, _OVER)
+ SDB_SET_INT8(pRaw, dataPos, pAction->stage, _OVER)
+ SDB_SET_INT8(pRaw, dataPos, pAction->reserved, _OVER)
+ if (pAction->actionType == TRANS_ACTION_RAW) {
+ int32_t len = sdbGetRawTotalSize(pAction->pRaw);
+ SDB_SET_INT8(pRaw, dataPos, unused /*pAction->rawWritten*/, _OVER)
+ SDB_SET_INT32(pRaw, dataPos, len, _OVER)
+ SDB_SET_BINARY(pRaw, dataPos, (void *)pAction->pRaw, len, _OVER)
+ } else if (pAction->actionType == TRANS_ACTION_MSG) {
+ SDB_SET_BINARY(pRaw, dataPos, (void *)&pAction->epSet, sizeof(SEpSet), _OVER)
+ SDB_SET_INT16(pRaw, dataPos, pAction->msgType, _OVER)
+ SDB_SET_INT8(pRaw, dataPos, unused /*pAction->msgSent*/, _OVER)
+ SDB_SET_INT8(pRaw, dataPos, unused /*pAction->msgReceived*/, _OVER)
+ SDB_SET_INT32(pRaw, dataPos, pAction->contLen, _OVER)
+ SDB_SET_BINARY(pRaw, dataPos, pAction->pCont, pAction->contLen, _OVER)
+ } else {
+ // nothing
+ }
+ }
+ ret = 0;
+
+_OVER:
+ *offset = dataPos;
+ return ret;
+}
+
+SSdbRaw *mndTransEncode(STrans *pTrans) {
+ terrno = TSDB_CODE_INVALID_MSG;
+ int8_t sver = taosArrayGetSize(pTrans->prepareActions) ? TRANS_VER2_NUMBER : TRANS_VER1_NUMBER;
int32_t rawDataLen = sizeof(STrans) + TRANS_RESERVE_SIZE + pTrans->paramLen;
+ rawDataLen += mndTransGetActionsSize(pTrans->prepareActions);
rawDataLen += mndTransGetActionsSize(pTrans->redoActions);
rawDataLen += mndTransGetActionsSize(pTrans->undoActions);
rawDataLen += mndTransGetActionsSize(pTrans->commitActions);
- SSdbRaw *pRaw = sdbAllocRaw(SDB_TRANS, TRANS_VER_NUMBER, rawDataLen);
+ SSdbRaw *pRaw = sdbAllocRaw(SDB_TRANS, sver, rawDataLen);
if (pRaw == NULL) {
mError("trans:%d, failed to alloc raw since %s", pTrans->id, terrstr());
return NULL;
@@ -131,91 +168,22 @@ static SSdbRaw *mndTransActionEncode(STrans *pTrans) {
SDB_SET_BINARY(pRaw, dataPos, pTrans->stbname, TSDB_TABLE_FNAME_LEN, _OVER)
SDB_SET_INT32(pRaw, dataPos, pTrans->redoActionPos, _OVER)
+ int32_t prepareActionNum = taosArrayGetSize(pTrans->prepareActions);
int32_t redoActionNum = taosArrayGetSize(pTrans->redoActions);
int32_t undoActionNum = taosArrayGetSize(pTrans->undoActions);
int32_t commitActionNum = taosArrayGetSize(pTrans->commitActions);
+
+ if (sver > TRANS_VER1_NUMBER) {
+ SDB_SET_INT32(pRaw, dataPos, prepareActionNum, _OVER)
+ }
SDB_SET_INT32(pRaw, dataPos, redoActionNum, _OVER)
SDB_SET_INT32(pRaw, dataPos, undoActionNum, _OVER)
SDB_SET_INT32(pRaw, dataPos, commitActionNum, _OVER)
- int8_t unused = 0;
- for (int32_t i = 0; i < redoActionNum; ++i) {
- STransAction *pAction = taosArrayGet(pTrans->redoActions, i);
- SDB_SET_INT32(pRaw, dataPos, pAction->id, _OVER)
- SDB_SET_INT32(pRaw, dataPos, pAction->errCode, _OVER)
- SDB_SET_INT32(pRaw, dataPos, pAction->acceptableCode, _OVER)
- SDB_SET_INT32(pRaw, dataPos, pAction->retryCode, _OVER)
- SDB_SET_INT8(pRaw, dataPos, pAction->actionType, _OVER)
- SDB_SET_INT8(pRaw, dataPos, pAction->stage, _OVER)
- SDB_SET_INT8(pRaw, dataPos, pAction->reserved, _OVER)
- if (pAction->actionType == TRANS_ACTION_RAW) {
- int32_t len = sdbGetRawTotalSize(pAction->pRaw);
- SDB_SET_INT8(pRaw, dataPos, unused /*pAction->rawWritten*/, _OVER)
- SDB_SET_INT32(pRaw, dataPos, len, _OVER)
- SDB_SET_BINARY(pRaw, dataPos, (void *)pAction->pRaw, len, _OVER)
- } else if (pAction->actionType == TRANS_ACTION_MSG) {
- SDB_SET_BINARY(pRaw, dataPos, (void *)&pAction->epSet, sizeof(SEpSet), _OVER)
- SDB_SET_INT16(pRaw, dataPos, pAction->msgType, _OVER)
- SDB_SET_INT8(pRaw, dataPos, unused /*pAction->msgSent*/, _OVER)
- SDB_SET_INT8(pRaw, dataPos, unused /*pAction->msgReceived*/, _OVER)
- SDB_SET_INT32(pRaw, dataPos, pAction->contLen, _OVER)
- SDB_SET_BINARY(pRaw, dataPos, pAction->pCont, pAction->contLen, _OVER)
- } else {
- // nothing
- }
- }
-
- for (int32_t i = 0; i < undoActionNum; ++i) {
- STransAction *pAction = taosArrayGet(pTrans->undoActions, i);
- SDB_SET_INT32(pRaw, dataPos, pAction->id, _OVER)
- SDB_SET_INT32(pRaw, dataPos, pAction->errCode, _OVER)
- SDB_SET_INT32(pRaw, dataPos, pAction->acceptableCode, _OVER)
- SDB_SET_INT32(pRaw, dataPos, pAction->retryCode, _OVER)
- SDB_SET_INT8(pRaw, dataPos, pAction->actionType, _OVER)
- SDB_SET_INT8(pRaw, dataPos, pAction->stage, _OVER)
- SDB_SET_INT8(pRaw, dataPos, pAction->reserved, _OVER)
- if (pAction->actionType == TRANS_ACTION_RAW) {
- int32_t len = sdbGetRawTotalSize(pAction->pRaw);
- SDB_SET_INT8(pRaw, dataPos, unused /*pAction->rawWritten*/, _OVER)
- SDB_SET_INT32(pRaw, dataPos, len, _OVER)
- SDB_SET_BINARY(pRaw, dataPos, (void *)pAction->pRaw, len, _OVER)
- } else if (pAction->actionType == TRANS_ACTION_MSG) {
- SDB_SET_BINARY(pRaw, dataPos, (void *)&pAction->epSet, sizeof(SEpSet), _OVER)
- SDB_SET_INT16(pRaw, dataPos, pAction->msgType, _OVER)
- SDB_SET_INT8(pRaw, dataPos, unused /*pAction->msgSent*/, _OVER)
- SDB_SET_INT8(pRaw, dataPos, unused /*pAction->msgReceived*/, _OVER)
- SDB_SET_INT32(pRaw, dataPos, pAction->contLen, _OVER)
- SDB_SET_BINARY(pRaw, dataPos, pAction->pCont, pAction->contLen, _OVER)
- } else {
- // nothing
- }
- }
-
- for (int32_t i = 0; i < commitActionNum; ++i) {
- STransAction *pAction = taosArrayGet(pTrans->commitActions, i);
- SDB_SET_INT32(pRaw, dataPos, pAction->id, _OVER)
- SDB_SET_INT32(pRaw, dataPos, pAction->errCode, _OVER)
- SDB_SET_INT32(pRaw, dataPos, pAction->acceptableCode, _OVER)
- SDB_SET_INT32(pRaw, dataPos, pAction->retryCode, _OVER)
- SDB_SET_INT8(pRaw, dataPos, pAction->actionType, _OVER)
- SDB_SET_INT8(pRaw, dataPos, pAction->stage, _OVER)
- SDB_SET_INT8(pRaw, dataPos, pAction->reserved, _OVER)
- if (pAction->actionType == TRANS_ACTION_RAW) {
- int32_t len = sdbGetRawTotalSize(pAction->pRaw);
- SDB_SET_INT8(pRaw, dataPos, unused /*pAction->rawWritten*/, _OVER)
- SDB_SET_INT32(pRaw, dataPos, len, _OVER)
- SDB_SET_BINARY(pRaw, dataPos, (void *)pAction->pRaw, len, _OVER)
- } else if (pAction->actionType == TRANS_ACTION_MSG) {
- SDB_SET_BINARY(pRaw, dataPos, (void *)&pAction->epSet, sizeof(SEpSet), _OVER)
- SDB_SET_INT16(pRaw, dataPos, pAction->msgType, _OVER)
- SDB_SET_INT8(pRaw, dataPos, unused /*pAction->msgSent*/, _OVER)
- SDB_SET_INT8(pRaw, dataPos, unused /*pAction->msgReceived*/, _OVER)
- SDB_SET_INT32(pRaw, dataPos, pAction->contLen, _OVER)
- SDB_SET_BINARY(pRaw, dataPos, pAction->pCont, pAction->contLen, _OVER)
- } else {
- // nothing
- }
- }
+ if (mndTransEncodeAction(pRaw, &dataPos, pTrans->prepareActions, prepareActionNum) < 0) goto _OVER;
+ if (mndTransEncodeAction(pRaw, &dataPos, pTrans->redoActions, redoActionNum) < 0) goto _OVER;
+ if (mndTransEncodeAction(pRaw, &dataPos, pTrans->undoActions, undoActionNum) < 0) goto _OVER;
+ if (mndTransEncodeAction(pRaw, &dataPos, pTrans->commitActions, commitActionNum) < 0) goto _OVER;
SDB_SET_INT32(pRaw, dataPos, pTrans->startFunc, _OVER)
SDB_SET_INT32(pRaw, dataPos, pTrans->stopFunc, _OVER)
@@ -242,23 +210,76 @@ _OVER:
return pRaw;
}
-static SSdbRow *mndTransActionDecode(SSdbRaw *pRaw) {
- terrno = TSDB_CODE_OUT_OF_MEMORY;
+static int32_t mndTransDecodeAction(SSdbRaw *pRaw, int32_t *offset, SArray *pActions, int32_t actionNum) {
+ STransAction action = {0};
+ int32_t dataPos = *offset;
+ int8_t unused = 0;
+ int8_t stage = 0;
+ int8_t actionType = 0;
+ int32_t dataLen = 0;
+ int32_t ret = -1;
+
+ for (int32_t i = 0; i < actionNum; ++i) {
+ memset(&action, 0, sizeof(action));
+ SDB_GET_INT32(pRaw, dataPos, &action.id, _OVER)
+ SDB_GET_INT32(pRaw, dataPos, &action.errCode, _OVER)
+ SDB_GET_INT32(pRaw, dataPos, &action.acceptableCode, _OVER)
+ SDB_GET_INT32(pRaw, dataPos, &action.retryCode, _OVER)
+ SDB_GET_INT8(pRaw, dataPos, &actionType, _OVER)
+ action.actionType = actionType;
+ SDB_GET_INT8(pRaw, dataPos, &stage, _OVER)
+ action.stage = stage;
+ SDB_GET_INT8(pRaw, dataPos, &action.reserved, _OVER)
+ if (action.actionType == TRANS_ACTION_RAW) {
+ SDB_GET_INT8(pRaw, dataPos, &unused /*&action.rawWritten*/, _OVER)
+ SDB_GET_INT32(pRaw, dataPos, &dataLen, _OVER)
+ action.pRaw = taosMemoryMalloc(dataLen);
+ if (action.pRaw == NULL) goto _OVER;
+ mTrace("raw:%p, is created", action.pRaw);
+ SDB_GET_BINARY(pRaw, dataPos, (void *)action.pRaw, dataLen, _OVER);
+ if (taosArrayPush(pActions, &action) == NULL) goto _OVER;
+ action.pRaw = NULL;
+ } else if (action.actionType == TRANS_ACTION_MSG) {
+ SDB_GET_BINARY(pRaw, dataPos, (void *)&action.epSet, sizeof(SEpSet), _OVER);
+ tmsgUpdateDnodeEpSet(&action.epSet);
+ SDB_GET_INT16(pRaw, dataPos, &action.msgType, _OVER)
+ SDB_GET_INT8(pRaw, dataPos, &unused /*&action.msgSent*/, _OVER)
+ SDB_GET_INT8(pRaw, dataPos, &unused /*&action.msgReceived*/, _OVER)
+ SDB_GET_INT32(pRaw, dataPos, &action.contLen, _OVER)
+ action.pCont = taosMemoryMalloc(action.contLen);
+ if (action.pCont == NULL) goto _OVER;
+ SDB_GET_BINARY(pRaw, dataPos, action.pCont, action.contLen, _OVER);
+ if (taosArrayPush(pActions, &action) == NULL) goto _OVER;
+ action.pCont = NULL;
+ } else {
+ if (taosArrayPush(pActions, &action) == NULL) goto _OVER;
+ }
+ }
+ ret = 0;
+
+_OVER:
+ *offset = dataPos;
+ taosMemoryFreeClear(action.pCont);
+ return ret;
+}
+
+SSdbRow *mndTransDecode(SSdbRaw *pRaw) {
+ terrno = TSDB_CODE_INVALID_MSG;
SSdbRow *pRow = NULL;
STrans *pTrans = NULL;
char *pData = NULL;
int32_t dataLen = 0;
int8_t sver = 0;
+ int32_t prepareActionNum = 0;
int32_t redoActionNum = 0;
int32_t undoActionNum = 0;
int32_t commitActionNum = 0;
int32_t dataPos = 0;
- STransAction action = {0};
if (sdbGetRawSoftVer(pRaw, &sver) != 0) goto _OVER;
- if (sver != TRANS_VER_NUMBER) {
+ if (sver != TRANS_VER1_NUMBER && sver != TRANS_VER2_NUMBER) {
terrno = TSDB_CODE_SDB_INVALID_DATA_VER;
goto _OVER;
}
@@ -294,127 +315,28 @@ static SSdbRow *mndTransActionDecode(SSdbRaw *pRaw) {
SDB_GET_BINARY(pRaw, dataPos, pTrans->dbname, TSDB_TABLE_FNAME_LEN, _OVER)
SDB_GET_BINARY(pRaw, dataPos, pTrans->stbname, TSDB_TABLE_FNAME_LEN, _OVER)
SDB_GET_INT32(pRaw, dataPos, &pTrans->redoActionPos, _OVER)
+
+ if (sver > TRANS_VER1_NUMBER) {
+ SDB_GET_INT32(pRaw, dataPos, &prepareActionNum, _OVER)
+ }
SDB_GET_INT32(pRaw, dataPos, &redoActionNum, _OVER)
SDB_GET_INT32(pRaw, dataPos, &undoActionNum, _OVER)
SDB_GET_INT32(pRaw, dataPos, &commitActionNum, _OVER)
+ pTrans->prepareActions = taosArrayInit(prepareActionNum, sizeof(STransAction));
pTrans->redoActions = taosArrayInit(redoActionNum, sizeof(STransAction));
pTrans->undoActions = taosArrayInit(undoActionNum, sizeof(STransAction));
pTrans->commitActions = taosArrayInit(commitActionNum, sizeof(STransAction));
+ if (pTrans->prepareActions == NULL) goto _OVER;
if (pTrans->redoActions == NULL) goto _OVER;
if (pTrans->undoActions == NULL) goto _OVER;
if (pTrans->commitActions == NULL) goto _OVER;
- int8_t unused = 0;
- for (int32_t i = 0; i < redoActionNum; ++i) {
- memset(&action, 0, sizeof(action));
- SDB_GET_INT32(pRaw, dataPos, &action.id, _OVER)
- SDB_GET_INT32(pRaw, dataPos, &action.errCode, _OVER)
- SDB_GET_INT32(pRaw, dataPos, &action.acceptableCode, _OVER)
- SDB_GET_INT32(pRaw, dataPos, &action.retryCode, _OVER)
- SDB_GET_INT8(pRaw, dataPos, &actionType, _OVER)
- action.actionType = actionType;
- SDB_GET_INT8(pRaw, dataPos, &stage, _OVER)
- action.stage = stage;
- SDB_GET_INT8(pRaw, dataPos, &action.reserved, _OVER)
- if (action.actionType == TRANS_ACTION_RAW) {
- SDB_GET_INT8(pRaw, dataPos, &unused /*&action.rawWritten*/, _OVER)
- SDB_GET_INT32(pRaw, dataPos, &dataLen, _OVER)
- action.pRaw = taosMemoryMalloc(dataLen);
- if (action.pRaw == NULL) goto _OVER;
- mTrace("raw:%p, is created", action.pRaw);
- SDB_GET_BINARY(pRaw, dataPos, (void *)action.pRaw, dataLen, _OVER);
- if (taosArrayPush(pTrans->redoActions, &action) == NULL) goto _OVER;
- action.pRaw = NULL;
- } else if (action.actionType == TRANS_ACTION_MSG) {
- SDB_GET_BINARY(pRaw, dataPos, (void *)&action.epSet, sizeof(SEpSet), _OVER);
- tmsgUpdateDnodeEpSet(&action.epSet);
- SDB_GET_INT16(pRaw, dataPos, &action.msgType, _OVER)
- SDB_GET_INT8(pRaw, dataPos, &unused /*&action.msgSent*/, _OVER)
- SDB_GET_INT8(pRaw, dataPos, &unused /*&action.msgReceived*/, _OVER)
- SDB_GET_INT32(pRaw, dataPos, &action.contLen, _OVER)
- action.pCont = taosMemoryMalloc(action.contLen);
- if (action.pCont == NULL) goto _OVER;
- SDB_GET_BINARY(pRaw, dataPos, action.pCont, action.contLen, _OVER);
- if (taosArrayPush(pTrans->redoActions, &action) == NULL) goto _OVER;
- action.pCont = NULL;
- } else {
- if (taosArrayPush(pTrans->redoActions, &action) == NULL) goto _OVER;
- }
- }
-
- for (int32_t i = 0; i < undoActionNum; ++i) {
- memset(&action, 0, sizeof(action));
- SDB_GET_INT32(pRaw, dataPos, &action.id, _OVER)
- SDB_GET_INT32(pRaw, dataPos, &action.errCode, _OVER)
- SDB_GET_INT32(pRaw, dataPos, &action.acceptableCode, _OVER)
- SDB_GET_INT32(pRaw, dataPos, &action.retryCode, _OVER)
- SDB_GET_INT8(pRaw, dataPos, &actionType, _OVER)
- action.actionType = actionType;
- SDB_GET_INT8(pRaw, dataPos, &stage, _OVER)
- action.stage = stage;
- SDB_GET_INT8(pRaw, dataPos, &action.reserved, _OVER)
- if (action.actionType == TRANS_ACTION_RAW) {
- SDB_GET_INT8(pRaw, dataPos, &unused /*&action.rawWritten*/, _OVER)
- SDB_GET_INT32(pRaw, dataPos, &dataLen, _OVER)
- action.pRaw = taosMemoryMalloc(dataLen);
- if (action.pRaw == NULL) goto _OVER;
- mTrace("raw:%p, is created", action.pRaw);
- SDB_GET_BINARY(pRaw, dataPos, (void *)action.pRaw, dataLen, _OVER);
- if (taosArrayPush(pTrans->undoActions, &action) == NULL) goto _OVER;
- action.pRaw = NULL;
- } else if (action.actionType == TRANS_ACTION_MSG) {
- SDB_GET_BINARY(pRaw, dataPos, (void *)&action.epSet, sizeof(SEpSet), _OVER);
- SDB_GET_INT16(pRaw, dataPos, &action.msgType, _OVER)
- SDB_GET_INT8(pRaw, dataPos, &unused /*&action.msgSent*/, _OVER)
- SDB_GET_INT8(pRaw, dataPos, &unused /*&action.msgReceived*/, _OVER)
- SDB_GET_INT32(pRaw, dataPos, &action.contLen, _OVER)
- action.pCont = taosMemoryMalloc(action.contLen);
- if (action.pCont == NULL) goto _OVER;
- SDB_GET_BINARY(pRaw, dataPos, action.pCont, action.contLen, _OVER);
- if (taosArrayPush(pTrans->undoActions, &action) == NULL) goto _OVER;
- action.pCont = NULL;
- } else {
- if (taosArrayPush(pTrans->undoActions, &action) == NULL) goto _OVER;
- }
- }
-
- for (int32_t i = 0; i < commitActionNum; ++i) {
- memset(&action, 0, sizeof(action));
- SDB_GET_INT32(pRaw, dataPos, &action.id, _OVER)
- SDB_GET_INT32(pRaw, dataPos, &action.errCode, _OVER)
- SDB_GET_INT32(pRaw, dataPos, &action.acceptableCode, _OVER)
- SDB_GET_INT32(pRaw, dataPos, &action.retryCode, _OVER)
- SDB_GET_INT8(pRaw, dataPos, &actionType, _OVER)
- action.actionType = actionType;
- SDB_GET_INT8(pRaw, dataPos, &stage, _OVER)
- action.stage = stage;
- SDB_GET_INT8(pRaw, dataPos, &action.reserved, _OVER)
- if (action.actionType) {
- SDB_GET_INT8(pRaw, dataPos, &unused /*&action.rawWritten*/, _OVER)
- SDB_GET_INT32(pRaw, dataPos, &dataLen, _OVER)
- action.pRaw = taosMemoryMalloc(dataLen);
- if (action.pRaw == NULL) goto _OVER;
- mTrace("raw:%p, is created", action.pRaw);
- SDB_GET_BINARY(pRaw, dataPos, (void *)action.pRaw, dataLen, _OVER);
- if (taosArrayPush(pTrans->commitActions, &action) == NULL) goto _OVER;
- action.pRaw = NULL;
- } else if (action.actionType == TRANS_ACTION_MSG) {
- SDB_GET_BINARY(pRaw, dataPos, (void *)&action.epSet, sizeof(SEpSet), _OVER);
- SDB_GET_INT16(pRaw, dataPos, &action.msgType, _OVER)
- SDB_GET_INT8(pRaw, dataPos, &unused /*&action.msgSent*/, _OVER)
- SDB_GET_INT8(pRaw, dataPos, &unused /*&action.msgReceived*/, _OVER)
- SDB_GET_INT32(pRaw, dataPos, &action.contLen, _OVER)
- action.pCont = taosMemoryMalloc(action.contLen);
- if (action.pCont == NULL) goto _OVER;
- SDB_GET_BINARY(pRaw, dataPos, action.pCont, action.contLen, _OVER);
- if (taosArrayPush(pTrans->commitActions, &action) == NULL) goto _OVER;
- action.pCont = NULL;
- } else {
- if (taosArrayPush(pTrans->redoActions, &action) == NULL) goto _OVER;
- }
- }
+ if (mndTransDecodeAction(pRaw, &dataPos, pTrans->prepareActions, prepareActionNum) < 0) goto _OVER;
+ if (mndTransDecodeAction(pRaw, &dataPos, pTrans->redoActions, redoActionNum) < 0) goto _OVER;
+ if (mndTransDecodeAction(pRaw, &dataPos, pTrans->undoActions, undoActionNum) < 0) goto _OVER;
+ if (mndTransDecodeAction(pRaw, &dataPos, pTrans->commitActions, commitActionNum) < 0) goto _OVER;
SDB_GET_INT32(pRaw, dataPos, &pTrans->startFunc, _OVER)
SDB_GET_INT32(pRaw, dataPos, &pTrans->stopFunc, _OVER)
@@ -434,7 +356,6 @@ _OVER:
mError("trans:%d, failed to parse from raw:%p since %s", pTrans->id, pRaw, terrstr());
mndTransDropData(pTrans);
taosMemoryFreeClear(pRow);
- taosMemoryFreeClear(action.pCont);
return NULL;
}
@@ -458,7 +379,7 @@ static const char *mndTransStr(ETrnStage stage) {
return "commit";
case TRN_STAGE_COMMIT_ACTION:
return "commitAction";
- case TRN_STAGE_FINISHED:
+ case TRN_STAGE_FINISH:
return "finished";
case TRN_STAGE_PRE_FINISH:
return "pre-finish";
@@ -519,7 +440,11 @@ static int32_t mndTransActionInsert(SSdb *pSdb, STrans *pTrans) {
return 0;
}
-static void mndTransDropData(STrans *pTrans) {
+void mndTransDropData(STrans *pTrans) {
+ if (pTrans->prepareActions != NULL) {
+ mndTransDropActions(pTrans->prepareActions);
+ pTrans->prepareActions = NULL;
+ }
if (pTrans->redoActions != NULL) {
mndTransDropActions(pTrans->redoActions);
pTrans->redoActions = NULL;
@@ -549,7 +474,7 @@ static void mndTransDropData(STrans *pTrans) {
(void)taosThreadMutexDestroy(&pTrans->mutex);
}
-static int32_t mndTransActionDelete(SSdb *pSdb, STrans *pTrans, bool callFunc) {
+static int32_t mndTransDelete(SSdb *pSdb, STrans *pTrans, bool callFunc) {
mInfo("trans:%d, perform delete action, row:%p stage:%s callfunc:%d, stopFunc:%d", pTrans->id, pTrans,
mndTransStr(pTrans->stage), callFunc, pTrans->stopFunc);
@@ -586,10 +511,11 @@ static int32_t mndTransActionUpdate(SSdb *pSdb, STrans *pOld, STrans *pNew) {
pOld->id, pOld, mndTransStr(pOld->stage), pOld->createdTime, pNew, mndTransStr(pNew->stage),
pNew->createdTime);
// only occured while sync timeout
- terrno = TSDB_CODE_MND_TRNAS_SYNC_TIMEOUT;
+ terrno = TSDB_CODE_MND_TRANS_SYNC_TIMEOUT;
return -1;
}
+ mndTransUpdateActions(pOld->prepareActions, pNew->prepareActions);
mndTransUpdateActions(pOld->redoActions, pNew->redoActions);
mndTransUpdateActions(pOld->undoActions, pNew->undoActions);
mndTransUpdateActions(pOld->commitActions, pNew->commitActions);
@@ -607,7 +533,7 @@ static int32_t mndTransActionUpdate(SSdb *pSdb, STrans *pOld, STrans *pNew) {
}
if (pOld->stage == TRN_STAGE_PRE_FINISH) {
- pOld->stage = TRN_STAGE_FINISHED;
+ pOld->stage = TRN_STAGE_FINISH;
mTrace("trans:%d, stage from pre-finish to finished since perform update action", pNew->id);
}
@@ -646,6 +572,7 @@ STrans *mndTransCreate(SMnode *pMnode, ETrnPolicy policy, ETrnConflct conflict,
pTrans->conflict = conflict;
pTrans->exec = TRN_EXEC_PARALLEL;
pTrans->createdTime = taosGetTimestampMs();
+ pTrans->prepareActions = taosArrayInit(TRANS_ARRAY_SIZE, sizeof(STransAction));
pTrans->redoActions = taosArrayInit(TRANS_ARRAY_SIZE, sizeof(STransAction));
pTrans->undoActions = taosArrayInit(TRANS_ARRAY_SIZE, sizeof(STransAction));
pTrans->commitActions = taosArrayInit(TRANS_ARRAY_SIZE, sizeof(STransAction));
@@ -728,6 +655,13 @@ int32_t mndTransAppendCommitlog(STrans *pTrans, SSdbRaw *pRaw) {
return mndTransAppendAction(pTrans->commitActions, &action);
}
+int32_t mndTransAppendPrepareAction(STrans *pTrans, STransAction *pAction) {
+ pAction->stage = TRN_STAGE_PREPARE;
+ pAction->actionType = TRANS_ACTION_RAW;
+ pAction->mTraceId = pTrans->mTraceId;
+ return mndTransAppendAction(pTrans->prepareActions, pAction);
+}
+
int32_t mndTransAppendRedoAction(STrans *pTrans, STransAction *pAction) {
pAction->stage = TRN_STAGE_REDO_ACTION;
pAction->actionType = TRANS_ACTION_MSG;
@@ -800,7 +734,7 @@ void mndTransSetParallel(STrans *pTrans) { pTrans->exec = TRN_EXEC_PARALLEL; }
void mndTransSetOper(STrans *pTrans, EOperType oper) { pTrans->oper = oper; }
static int32_t mndTransSync(SMnode *pMnode, STrans *pTrans) {
- SSdbRaw *pRaw = mndTransActionEncode(pTrans);
+ SSdbRaw *pRaw = mndTransEncode(pTrans);
if (pRaw == NULL) {
mError("trans:%d, failed to encode while sync trans since %s", pTrans->id, terrstr());
return -1;
@@ -872,7 +806,7 @@ static bool mndCheckTransConflict(SMnode *pMnode, STrans *pNew) {
return conflict;
}
-int32_t mndTrancCheckConflict(SMnode *pMnode, STrans *pTrans) {
+int32_t mndTransCheckConflict(SMnode *pMnode, STrans *pTrans) {
if (pTrans->conflict == TRN_CONFLICT_DB || pTrans->conflict == TRN_CONFLICT_DB_INSIDE) {
if (strlen(pTrans->dbname) == 0 && strlen(pTrans->stbname) == 0) {
terrno = TSDB_CODE_MND_TRANS_CONFLICT;
@@ -891,7 +825,7 @@ int32_t mndTrancCheckConflict(SMnode *pMnode, STrans *pTrans) {
}
int32_t mndTransPrepare(SMnode *pMnode, STrans *pTrans) {
- if (mndTrancCheckConflict(pMnode, pTrans) != 0) {
+ if (mndTransCheckConflict(pMnode, pTrans) != 0) {
return -1;
}
@@ -922,7 +856,7 @@ int32_t mndTransPrepare(SMnode *pMnode, STrans *pTrans) {
pTrans->rpcRsp = NULL;
pTrans->rpcRspLen = 0;
- mndTransExecute(pMnode, pNew, true);
+ mndTransExecute(pMnode, pNew);
mndReleaseTrans(pMnode, pNew);
return 0;
}
@@ -961,7 +895,7 @@ static void mndTransSendRpcRsp(SMnode *pMnode, STrans *pTrans) {
bool sendRsp = false;
int32_t code = pTrans->code;
- if (pTrans->stage == TRN_STAGE_FINISHED) {
+ if (pTrans->stage == TRN_STAGE_FINISH) {
sendRsp = true;
}
@@ -1003,7 +937,7 @@ static void mndTransSendRpcRsp(SMnode *pMnode, STrans *pTrans) {
code = TSDB_CODE_MND_TRANS_NETWORK_UNAVAILL;
}
if (code == TSDB_CODE_SYN_TIMEOUT) {
- code = TSDB_CODE_MND_TRNAS_SYNC_TIMEOUT;
+ code = TSDB_CODE_MND_TRANS_SYNC_TIMEOUT;
}
if (i != 0 && code == 0) {
@@ -1104,7 +1038,7 @@ int32_t mndTransProcessRsp(SRpcMsg *pRsp) {
mInfo("trans:%d, invalid action, index:%d, code:0x%x", transId, action, pRsp->code);
}
- mndTransExecute(pMnode, pTrans, true);
+ mndTransExecute(pMnode, pTrans);
_OVER:
mndReleaseTrans(pMnode, pTrans);
@@ -1392,8 +1326,25 @@ static int32_t mndTransExecuteRedoActionsSerial(SMnode *pMnode, STrans *pTrans)
return code;
}
-static bool mndTransPerformPrepareStage(SMnode *pMnode, STrans *pTrans) {
+bool mndTransPerformPrepareStage(SMnode *pMnode, STrans *pTrans) {
bool continueExec = true;
+ int32_t code = 0;
+
+ int32_t numOfActions = taosArrayGetSize(pTrans->prepareActions);
+ if (numOfActions == 0) goto _OVER;
+
+ mInfo("trans:%d, execute %d prepare actions.", pTrans->id, numOfActions);
+
+ for (int32_t action = 0; action < numOfActions; ++action) {
+ STransAction *pAction = taosArrayGet(pTrans->prepareActions, action);
+ code = mndTransExecSingleAction(pMnode, pTrans, pAction);
+ if (code != 0) {
+ mError("trans:%d, failed to execute prepare action:%d, numOfActions:%d", pTrans->id, action, numOfActions);
+ return false;
+ }
+ }
+
+_OVER:
pTrans->stage = TRN_STAGE_REDO_ACTION;
mInfo("trans:%d, stage from prepare to redoAction", pTrans->id);
return continueExec;
@@ -1476,7 +1427,7 @@ static bool mndTransPerformCommitActionStage(SMnode *pMnode, STrans *pTrans) {
if (code == 0) {
pTrans->code = 0;
- pTrans->stage = TRN_STAGE_FINISHED; // TRN_STAGE_PRE_FINISH is not necessary
+ pTrans->stage = TRN_STAGE_FINISH; // TRN_STAGE_PRE_FINISH is not necessary
mInfo("trans:%d, stage from commitAction to finished", pTrans->id);
continueExec = true;
} else {
@@ -1528,14 +1479,14 @@ static bool mndTransPerformRollbackStage(SMnode *pMnode, STrans *pTrans) {
return continueExec;
}
-static bool mndTransPerfromPreFinishedStage(SMnode *pMnode, STrans *pTrans) {
+static bool mndTransPerformPreFinishStage(SMnode *pMnode, STrans *pTrans) {
if (mndCannotExecuteTransAction(pMnode)) return false;
bool continueExec = true;
int32_t code = mndTransPreFinish(pMnode, pTrans);
if (code == 0) {
- pTrans->stage = TRN_STAGE_FINISHED;
+ pTrans->stage = TRN_STAGE_FINISH;
mInfo("trans:%d, stage from pre-finish to finish", pTrans->id);
continueExec = true;
} else {
@@ -1547,10 +1498,10 @@ static bool mndTransPerfromPreFinishedStage(SMnode *pMnode, STrans *pTrans) {
return continueExec;
}
-static bool mndTransPerfromFinishedStage(SMnode *pMnode, STrans *pTrans) {
+static bool mndTransPerformFinishStage(SMnode *pMnode, STrans *pTrans) {
bool continueExec = false;
- SSdbRaw *pRaw = mndTransActionEncode(pTrans);
+ SSdbRaw *pRaw = mndTransEncode(pTrans);
if (pRaw == NULL) {
mError("trans:%d, failed to encode while finish trans since %s", pTrans->id, terrstr());
return false;
@@ -1567,12 +1518,12 @@ static bool mndTransPerfromFinishedStage(SMnode *pMnode, STrans *pTrans) {
return continueExec;
}
-void mndTransExecute(SMnode *pMnode, STrans *pTrans, bool isLeader) {
+void mndTransExecuteImp(SMnode *pMnode, STrans *pTrans, bool topHalf) {
bool continueExec = true;
while (continueExec) {
- mInfo("trans:%d, continue to execute, stage:%s createTime:%" PRId64 " leader:%d", pTrans->id,
- mndTransStr(pTrans->stage), pTrans->createdTime, isLeader);
+ mInfo("trans:%d, continue to execute, stage:%s createTime:%" PRId64 " topHalf:%d", pTrans->id,
+ mndTransStr(pTrans->stage), pTrans->createdTime, topHalf);
pTrans->lastExecTime = taosGetTimestampMs();
switch (pTrans->stage) {
case TRN_STAGE_PREPARE:
@@ -1582,7 +1533,7 @@ void mndTransExecute(SMnode *pMnode, STrans *pTrans, bool isLeader) {
continueExec = mndTransPerformRedoActionStage(pMnode, pTrans);
break;
case TRN_STAGE_COMMIT:
- if (isLeader) {
+ if (topHalf) {
continueExec = mndTransPerformCommitStage(pMnode, pTrans);
} else {
mInfo("trans:%d, can not commit since not leader", pTrans->id);
@@ -1593,7 +1544,7 @@ void mndTransExecute(SMnode *pMnode, STrans *pTrans, bool isLeader) {
continueExec = mndTransPerformCommitActionStage(pMnode, pTrans);
break;
case TRN_STAGE_ROLLBACK:
- if (isLeader) {
+ if (topHalf) {
continueExec = mndTransPerformRollbackStage(pMnode, pTrans);
} else {
mInfo("trans:%d, can not rollback since not leader", pTrans->id);
@@ -1604,15 +1555,15 @@ void mndTransExecute(SMnode *pMnode, STrans *pTrans, bool isLeader) {
continueExec = mndTransPerformUndoActionStage(pMnode, pTrans);
break;
case TRN_STAGE_PRE_FINISH:
- if (isLeader) {
- continueExec = mndTransPerfromPreFinishedStage(pMnode, pTrans);
+ if (topHalf) {
+ continueExec = mndTransPerformPreFinishStage(pMnode, pTrans);
} else {
mInfo("trans:%d, can not pre-finish since not leader", pTrans->id);
continueExec = false;
}
break;
- case TRN_STAGE_FINISHED:
- continueExec = mndTransPerfromFinishedStage(pMnode, pTrans);
+ case TRN_STAGE_FINISH:
+ continueExec = mndTransPerformFinishStage(pMnode, pTrans);
break;
default:
continueExec = false;
@@ -1623,6 +1574,16 @@ void mndTransExecute(SMnode *pMnode, STrans *pTrans, bool isLeader) {
mndTransSendRpcRsp(pMnode, pTrans);
}
+void mndTransExecute(SMnode *pMnode, STrans *pTrans) {
+ bool topHalf = true;
+ return mndTransExecuteImp(pMnode, pTrans, topHalf);
+}
+
+void mndTransRefresh(SMnode *pMnode, STrans *pTrans) {
+ bool topHalf = false;
+ return mndTransExecuteImp(pMnode, pTrans, topHalf);
+}
+
static int32_t mndProcessTransTimer(SRpcMsg *pReq) {
mTrace("start to process trans timer");
mndTransPullup(pReq->info.node);
@@ -1649,7 +1610,7 @@ int32_t mndKillTrans(SMnode *pMnode, STrans *pTrans) {
pAction->errCode = 0;
}
- mndTransExecute(pMnode, pTrans, true);
+ mndTransExecute(pMnode, pTrans);
return 0;
}
@@ -1707,7 +1668,7 @@ void mndTransPullup(SMnode *pMnode) {
int32_t *pTransId = taosArrayGet(pArray, i);
STrans *pTrans = mndAcquireTrans(pMnode, *pTransId);
if (pTrans != NULL) {
- mndTransExecute(pMnode, pTrans, true);
+ mndTransExecute(pMnode, pTrans);
}
mndReleaseTrans(pMnode, pTrans);
}
diff --git a/source/dnode/mnode/impl/src/mndVgroup.c b/source/dnode/mnode/impl/src/mndVgroup.c
index b7a6378bd8..36e8755a3e 100644
--- a/source/dnode/mnode/impl/src/mndVgroup.c
+++ b/source/dnode/mnode/impl/src/mndVgroup.c
@@ -28,7 +28,6 @@
#define VGROUP_VER_NUMBER 1
#define VGROUP_RESERVE_SIZE 64
-static SSdbRow *mndVgroupActionDecode(SSdbRaw *pRaw);
static int32_t mndVgroupActionInsert(SSdb *pSdb, SVgObj *pVgroup);
static int32_t mndVgroupActionDelete(SSdb *pSdb, SVgObj *pVgroup);
static int32_t mndVgroupActionUpdate(SSdb *pSdb, SVgObj *pOld, SVgObj *pNew);
@@ -483,15 +482,15 @@ static void *mndBuildDisableVnodeWriteReq(SMnode *pMnode, SDbObj *pDb, int32_t v
return pReq;
}
-static void *mndBuildAlterVnodeHashRangeReq(SMnode *pMnode, SVgObj *pVgroup, int32_t dstVgId, int32_t *pContLen) {
+static void *mndBuildAlterVnodeHashRangeReq(SMnode *pMnode, int32_t srcVgId, SVgObj *pVgroup, int32_t *pContLen) {
SAlterVnodeHashRangeReq alterReq = {
- .srcVgId = pVgroup->vgId,
- .dstVgId = dstVgId,
+ .srcVgId = srcVgId,
+ .dstVgId = pVgroup->vgId,
.hashBegin = pVgroup->hashBegin,
.hashEnd = pVgroup->hashEnd,
};
- mInfo("vgId:%d, build alter vnode hashrange req, dstVgId:%d, hashrange:[%u, %u]", pVgroup->vgId, dstVgId,
+ mInfo("vgId:%d, build alter vnode hashrange req, dstVgId:%d, hashrange:[%u, %u]", srcVgId, pVgroup->vgId,
pVgroup->hashBegin, pVgroup->hashEnd);
int32_t contLen = tSerializeSAlterVnodeHashRangeReq(NULL, 0, &alterReq);
if (contLen < 0) {
@@ -1207,12 +1206,12 @@ int32_t mndAddAlterVnodeConfirmAction(SMnode *pMnode, STrans *pTrans, SDbObj *pD
return 0;
}
-static int32_t mndAddAlterVnodeHashRangeAction(SMnode *pMnode, STrans *pTrans, SVgObj *pVgroup, int32_t dstVgId) {
+static int32_t mndAddAlterVnodeHashRangeAction(SMnode *pMnode, STrans *pTrans, int32_t srcVgId, SVgObj *pVgroup) {
STransAction action = {0};
action.epSet = mndGetVgroupEpset(pMnode, pVgroup);
int32_t contLen = 0;
- void *pReq = mndBuildAlterVnodeHashRangeReq(pMnode, pVgroup, dstVgId, &contLen);
+ void *pReq = mndBuildAlterVnodeHashRangeReq(pMnode, srcVgId, pVgroup, &contLen);
if (pReq == NULL) return -1;
action.pCont = pReq;
@@ -1247,6 +1246,21 @@ int32_t mndAddAlterVnodeConfigAction(SMnode *pMnode, STrans *pTrans, SDbObj *pDb
return 0;
}
+int32_t mndAddPrepareNewVgAction(SMnode *pMnode, STrans *pTrans, SVgObj *pVg) {
+ SSdbRaw *pRaw = mndVgroupActionEncode(pVg);
+ if (pRaw == NULL) goto _err;
+
+ STransAction action = {.pRaw = pRaw, .msgType = TDMT_MND_CREATE_VG};
+ if (mndTransAppendPrepareAction(pTrans, &action) != 0) goto _err;
+ (void)sdbSetRawStatus(pRaw, SDB_STATUS_CREATING);
+ pRaw = NULL;
+ return 0;
+
+_err:
+ sdbFreeRaw(pRaw);
+ return -1;
+}
+
int32_t mndAddAlterVnodeReplicaAction(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup, int32_t dnodeId) {
SDnodeObj *pDnode = mndAcquireDnode(pMnode, dnodeId);
if (pDnode == NULL) return -1;
@@ -2241,10 +2255,13 @@ static int32_t mndAddAdjustVnodeHashRangeAction(SMnode *pMnode, STrans *pTrans,
return 0;
}
-static int32_t mndTransCommitVgStatus(STrans *pTrans, SVgObj *pVg, ESdbStatus vgStatus) {
+typedef int32_t (*FpTransActionCb)(STrans *pTrans, SSdbRaw *pRaw);
+
+static int32_t mndAddVgStatusAction(STrans *pTrans, SVgObj *pVg, ESdbStatus vgStatus, ETrnStage stage) {
+ FpTransActionCb appendActionCb = (stage == TRN_STAGE_COMMIT_ACTION) ? mndTransAppendCommitlog : mndTransAppendRedolog;
SSdbRaw *pRaw = mndVgroupActionEncode(pVg);
if (pRaw == NULL) goto _err;
- if (mndTransAppendCommitlog(pTrans, pRaw) != 0) goto _err;
+ if (appendActionCb(pTrans, pRaw) != 0) goto _err;
(void)sdbSetRawStatus(pRaw, vgStatus);
pRaw = NULL;
return 0;
@@ -2253,18 +2270,32 @@ _err:
return -1;
}
+static int32_t mndAddDbStatusAction(STrans *pTrans, SDbObj *pDb, ESdbStatus dbStatus, ETrnStage stage) {
+ FpTransActionCb appendActionCb = (stage == TRN_STAGE_COMMIT_ACTION) ? mndTransAppendCommitlog : mndTransAppendRedolog;
+ SSdbRaw *pRaw = mndDbActionEncode(pDb);
+ if (pRaw == NULL) goto _err;
+ if (appendActionCb(pTrans, pRaw) != 0) goto _err;
+ (void)sdbSetRawStatus(pRaw, dbStatus);
+ pRaw = NULL;
+ return 0;
+_err:
+ sdbFreeRaw(pRaw);
+ return -1;
+}
+
int32_t mndSplitVgroup(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SVgObj *pVgroup) {
int32_t code = -1;
STrans *pTrans = NULL;
- SSdbRaw *pRaw = NULL;
SDbObj dbObj = {0};
SArray *pArray = mndBuildDnodesArray(pMnode, 0);
- pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_GLOBAL, pReq, "split-vgroup");
+ pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_DB, pReq, "split-vgroup");
if (pTrans == NULL) goto _OVER;
mndTransSetSerial(pTrans);
mInfo("trans:%d, used to split vgroup, vgId:%d", pTrans->id, pVgroup->vgId);
+ mndTransSetDbName(pTrans, pDb->name, NULL);
+
SVgObj newVg1 = {0};
memcpy(&newVg1, pVgroup, sizeof(SVgObj));
mInfo("vgId:%d, vgroup info before split, replica:%d hashBegin:%u hashEnd:%u", newVg1.vgId, newVg1.replica,
@@ -2316,32 +2347,25 @@ int32_t mndSplitVgroup(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SVgObj *pVgro
// alter vgId and hash range
int32_t maxVgId = sdbGetMaxId(pMnode->pSdb, SDB_VGROUP);
- if (mndAddAlterVnodeHashRangeAction(pMnode, pTrans, &newVg1, maxVgId) != 0) goto _OVER;
+ int32_t srcVgId = newVg1.vgId;
newVg1.vgId = maxVgId;
+ if (mndAddPrepareNewVgAction(pMnode, pTrans, &newVg1) != 0) goto _OVER;
+ if (mndAddAlterVnodeHashRangeAction(pMnode, pTrans, srcVgId, &newVg1) != 0) goto _OVER;
maxVgId++;
- if (mndAddAlterVnodeHashRangeAction(pMnode, pTrans, &newVg2, maxVgId) != 0) goto _OVER;
+ srcVgId = newVg2.vgId;
newVg2.vgId = maxVgId;
+ if (mndAddPrepareNewVgAction(pMnode, pTrans, &newVg2) != 0) goto _OVER;
+ if (mndAddAlterVnodeHashRangeAction(pMnode, pTrans, srcVgId, &newVg2) != 0) goto _OVER;
if (mndAddAlterVnodeConfirmAction(pMnode, pTrans, pDb, &newVg1) != 0) goto _OVER;
-
if (mndAddAlterVnodeConfirmAction(pMnode, pTrans, pDb, &newVg2) != 0) goto _OVER;
- // adjust vgroup replica
- if (pDb->cfg.replications != newVg1.replica) {
- if (mndBuildAlterVgroupAction(pMnode, pTrans, pDb, pDb, &newVg1, pArray) != 0) goto _OVER;
- } else {
- if (mndTransCommitVgStatus(pTrans, &newVg1, SDB_STATUS_READY) < 0) goto _OVER;
- }
-
- if (pDb->cfg.replications != newVg2.replica) {
- if (mndBuildAlterVgroupAction(pMnode, pTrans, pDb, pDb, &newVg2, pArray) != 0) goto _OVER;
- } else {
- if (mndTransCommitVgStatus(pTrans, &newVg2, SDB_STATUS_READY) < 0) goto _OVER;
- }
-
- if (mndTransCommitVgStatus(pTrans, pVgroup, SDB_STATUS_DROPPED) < 0) goto _OVER;
+ if (mndAddVgStatusAction(pTrans, &newVg1, SDB_STATUS_READY, TRN_STAGE_REDO_ACTION) < 0) goto _OVER;
+ if (mndAddVgStatusAction(pTrans, &newVg2, SDB_STATUS_READY, TRN_STAGE_REDO_ACTION) < 0) goto _OVER;
+ if (mndAddVgStatusAction(pTrans, pVgroup, SDB_STATUS_DROPPED, TRN_STAGE_REDO_ACTION) < 0) goto _OVER;
+ // update db status
memcpy(&dbObj, pDb, sizeof(SDbObj));
if (dbObj.cfg.pRetensions != NULL) {
dbObj.cfg.pRetensions = taosArrayDup(pDb->cfg.pRetensions, NULL);
@@ -2350,11 +2374,27 @@ int32_t mndSplitVgroup(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SVgObj *pVgro
dbObj.vgVersion++;
dbObj.updateTime = taosGetTimestampMs();
dbObj.cfg.numOfVgroups++;
- pRaw = mndDbActionEncode(&dbObj);
- if (pRaw == NULL) goto _OVER;
- if (mndTransAppendCommitlog(pTrans, pRaw) != 0) goto _OVER;
- (void)sdbSetRawStatus(pRaw, SDB_STATUS_READY);
- pRaw = NULL;
+ if (mndAddDbStatusAction(pTrans, &dbObj, SDB_STATUS_READY, TRN_STAGE_REDO_ACTION) < 0) goto _OVER;
+
+ // adjust vgroup replica
+ if (pDb->cfg.replications != newVg1.replica) {
+ if (mndBuildAlterVgroupAction(pMnode, pTrans, pDb, pDb, &newVg1, pArray) != 0) goto _OVER;
+ } else {
+ if (mndAddVgStatusAction(pTrans, &newVg1, SDB_STATUS_READY, TRN_STAGE_COMMIT_ACTION) < 0) goto _OVER;
+ }
+
+ if (pDb->cfg.replications != newVg2.replica) {
+ if (mndBuildAlterVgroupAction(pMnode, pTrans, pDb, pDb, &newVg2, pArray) != 0) goto _OVER;
+ } else {
+ if (mndAddVgStatusAction(pTrans, &newVg2, SDB_STATUS_READY, TRN_STAGE_COMMIT_ACTION) < 0) goto _OVER;
+ }
+
+ if (mndAddVgStatusAction(pTrans, pVgroup, SDB_STATUS_DROPPED, TRN_STAGE_COMMIT_ACTION) < 0) goto _OVER;
+
+ // commit db status
+ dbObj.vgVersion++;
+ dbObj.updateTime = taosGetTimestampMs();
+ if (mndAddDbStatusAction(pTrans, &dbObj, SDB_STATUS_READY, TRN_STAGE_COMMIT_ACTION) < 0) goto _OVER;
if (mndTransPrepare(pMnode, pTrans) != 0) goto _OVER;
code = 0;
@@ -2362,7 +2402,6 @@ int32_t mndSplitVgroup(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SVgObj *pVgro
_OVER:
taosArrayDestroy(pArray);
mndTransDrop(pTrans);
- sdbFreeRaw(pRaw);
taosArrayDestroy(dbObj.cfg.pRetensions);
return code;
}
diff --git a/source/dnode/mnode/sdb/inc/sdb.h b/source/dnode/mnode/sdb/inc/sdb.h
index e9a9e425e3..3c96d8a2fd 100644
--- a/source/dnode/mnode/sdb/inc/sdb.h
+++ b/source/dnode/mnode/sdb/inc/sdb.h
@@ -122,6 +122,7 @@ typedef enum {
SDB_STATUS_DROPPING = 2,
SDB_STATUS_DROPPED = 3,
SDB_STATUS_READY = 4,
+ SDB_STATUS_UPDATE = 5,
} ESdbStatus;
typedef enum {
diff --git a/source/dnode/mnode/sdb/src/sdbHash.c b/source/dnode/mnode/sdb/src/sdbHash.c
index f1cee6395b..258b22d8ee 100644
--- a/source/dnode/mnode/sdb/src/sdbHash.c
+++ b/source/dnode/mnode/sdb/src/sdbHash.c
@@ -256,6 +256,7 @@ int32_t sdbWriteWithoutFree(SSdb *pSdb, SSdbRaw *pRaw) {
code = sdbInsertRow(pSdb, hash, pRaw, pRow, keySize);
break;
case SDB_STATUS_READY:
+ case SDB_STATUS_UPDATE:
case SDB_STATUS_DROPPING:
code = sdbUpdateRow(pSdb, hash, pRaw, pRow, keySize);
break;
diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt
index b18cb8e282..b7bfc57cd5 100644
--- a/source/dnode/vnode/CMakeLists.txt
+++ b/source/dnode/vnode/CMakeLists.txt
@@ -103,7 +103,7 @@ target_link_libraries(
# PUBLIC bdb
# PUBLIC scalar
- PUBLIC rocksdb-shared
+ PUBLIC rocksdb
PUBLIC transport
PUBLIC stream
PUBLIC index
diff --git a/source/dnode/vnode/src/inc/tq.h b/source/dnode/vnode/src/inc/tq.h
index 4ba8d6d69f..b35dc71ed9 100644
--- a/source/dnode/vnode/src/inc/tq.h
+++ b/source/dnode/vnode/src/inc/tq.h
@@ -139,6 +139,7 @@ static STqMgmt tqMgmt = {0};
int32_t tEncodeSTqHandle(SEncoder* pEncoder, const STqHandle* pHandle);
int32_t tDecodeSTqHandle(SDecoder* pDecoder, STqHandle* pHandle);
+void tqDestroyTqHandle(void* data);
// tqRead
int32_t tqScanTaosx(STQ* pTq, const STqHandle* pHandle, STaosxRsp* pRsp, SMqMetaRsp* pMetaRsp, STqOffsetVal* offset);
@@ -161,6 +162,8 @@ int32_t tqMetaRestoreHandle(STQ* pTq);
int32_t tqMetaSaveCheckInfo(STQ* pTq, const char* key, const void* value, int32_t vLen);
int32_t tqMetaDeleteCheckInfo(STQ* pTq, const char* key);
int32_t tqMetaRestoreCheckInfo(STQ* pTq);
+int32_t tqMetaGetHandle(STQ* pTq, const char* key);
+int32_t tqCreateHandle(STQ* pTq, SMqRebVgReq* req, STqHandle* handle);
STqOffsetStore* tqOffsetOpen(STQ* pTq);
void tqOffsetClose(STqOffsetStore*);
diff --git a/source/dnode/vnode/src/inc/vnodeInt.h b/source/dnode/vnode/src/inc/vnodeInt.h
index 2811fc35b0..d3998285f4 100644
--- a/source/dnode/vnode/src/inc/vnodeInt.h
+++ b/source/dnode/vnode/src/inc/vnodeInt.h
@@ -229,6 +229,7 @@ int32_t tqProcessDeleteSubReq(STQ* pTq, int64_t version, char* msg, int32_t msgL
int32_t tqProcessOffsetCommitReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen);
int32_t tqProcessSeekReq(STQ* pTq, int64_t sversion, char* msg, int32_t msgLen);
int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg);
+int32_t tqProcessPollPush(STQ* pTq, SRpcMsg* pMsg);
int32_t tqProcessVgWalInfoReq(STQ* pTq, SRpcMsg* pMsg);
// tq-stream
diff --git a/source/dnode/vnode/src/meta/metaTable.c b/source/dnode/vnode/src/meta/metaTable.c
index eb169fbdc2..0d0716f2f0 100644
--- a/source/dnode/vnode/src/meta/metaTable.c
+++ b/source/dnode/vnode/src/meta/metaTable.c
@@ -879,9 +879,13 @@ static int32_t metaFilterTableByHash(SMeta *pMeta, SArray *uidList) {
SDecoder dc = {0};
tDecoderInit(&dc, pData, nData);
metaDecodeEntry(&dc, &me);
+
if (me.type != TSDB_SUPER_TABLE) {
- int32_t ret = vnodeValidateTableHash(pMeta->pVnode, me.name);
- if (TSDB_CODE_VND_HASH_MISMATCH == ret) {
+ char tbFName[TSDB_TABLE_FNAME_LEN + 1];
+ snprintf(tbFName, sizeof(tbFName), "%s.%s", pMeta->pVnode->config.dbname, me.name);
+ tbFName[TSDB_TABLE_FNAME_LEN] = '\0';
+ int32_t ret = vnodeValidateTableHash(pMeta->pVnode, tbFName);
+ if (ret < 0 && terrno == TSDB_CODE_VND_HASH_MISMATCH) {
taosArrayPush(uidList, &me.uid);
}
}
@@ -910,6 +914,7 @@ int32_t metaTrimTables(SMeta *pMeta) {
goto end;
}
+ metaInfo("vgId:%d, trim %ld tables", TD_VID(pMeta->pVnode), taosArrayGetSize(tbUids));
metaDropTables(pMeta, tbUids);
end:
diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c
index aa6bbbe9df..de750aaa39 100644
--- a/source/dnode/vnode/src/tq/tq.c
+++ b/source/dnode/vnode/src/tq/tq.c
@@ -62,7 +62,7 @@ void tqCleanUp() {
}
}
-static void destroyTqHandle(void* data) {
+void tqDestroyTqHandle(void* data) {
STqHandle* pData = (STqHandle*)data;
qDestroyTask(pData->execHandle.task);
@@ -102,7 +102,7 @@ STQ* tqOpen(const char* path, SVnode* pVnode) {
pTq->walLogLastVer = pVnode->pWal->vers.lastVer;
pTq->pHandle = taosHashInit(64, MurmurHash3_32, true, HASH_ENTRY_LOCK);
- taosHashSetFreeFp(pTq->pHandle, destroyTqHandle);
+ taosHashSetFreeFp(pTq->pHandle, tqDestroyTqHandle);
taosInitRWLatch(&pTq->lock);
pTq->pPushMgr = taosHashInit(64, MurmurHash3_32, false, HASH_NO_LOCK);
@@ -243,8 +243,8 @@ int32_t tqPushDataRsp(STqHandle* pHandle, int32_t vgId) {
tqDoSendDataRsp(&pHandle->msg->info, &dataRsp, pHandle->epoch, pHandle->consumerId, TMQ_MSG_TYPE__POLL_RSP, sver,
ever);
- char buf1[80] = {0};
- char buf2[80] = {0};
+ char buf1[TSDB_OFFSET_LEN] = {0};
+ char buf2[TSDB_OFFSET_LEN] = {0};
tFormatOffset(buf1, tListLen(buf1), &dataRsp.reqOffset);
tFormatOffset(buf2, tListLen(buf2), &dataRsp.rspOffset);
tqDebug("vgId:%d, from consumer:0x%" PRIx64 " (epoch %d) push rsp, block num: %d, req:%s, rsp:%s", vgId,
@@ -259,12 +259,12 @@ int32_t tqSendDataRsp(STqHandle* pHandle, const SRpcMsg* pMsg, const SMqPollReq*
tqDoSendDataRsp(&pMsg->info, pRsp, pReq->epoch, pReq->consumerId, type, sver, ever);
- char buf1[80] = {0};
- char buf2[80] = {0};
- tFormatOffset(buf1, 80, &pRsp->reqOffset);
- tFormatOffset(buf2, 80, &pRsp->rspOffset);
+ char buf1[TSDB_OFFSET_LEN] = {0};
+ char buf2[TSDB_OFFSET_LEN] = {0};
+ tFormatOffset(buf1, TSDB_OFFSET_LEN, &pRsp->reqOffset);
+ tFormatOffset(buf2, TSDB_OFFSET_LEN, &pRsp->rspOffset);
- tqDebug("vgId:%d consumer:0x%" PRIx64 " (epoch %d) send rsp, block num:%d, req:%s, rsp:%s, reqId:0x%" PRIx64, vgId,
+ tqDebug("tmq poll vgId:%d consumer:0x%" PRIx64 " (epoch %d) send rsp, block num:%d, req:%s, rsp:%s, reqId:0x%" PRIx64, vgId,
pReq->consumerId, pReq->epoch, pRsp->blockNum, buf1, buf2, pReq->reqId);
return 0;
@@ -421,6 +421,35 @@ int32_t tqCheckColModifiable(STQ* pTq, int64_t tbUid, int32_t colId) {
return 0;
}
+int32_t tqProcessPollPush(STQ* pTq, SRpcMsg* pMsg) {
+ int32_t vgId = TD_VID(pTq->pVnode);
+ taosWLockLatch(&pTq->lock);
+ if (taosHashGetSize(pTq->pPushMgr) > 0) {
+ void* pIter = taosHashIterate(pTq->pPushMgr, NULL);
+
+ while (pIter) {
+ STqHandle* pHandle = *(STqHandle**)pIter;
+ tqDebug("vgId:%d start set submit for pHandle:%p, consumer:0x%" PRIx64, vgId, pHandle, pHandle->consumerId);
+
+ if (ASSERT(pHandle->msg != NULL)) {
+ tqError("pHandle->msg should not be null");
+ break;
+ }else{
+ SRpcMsg msg = {.msgType = TDMT_VND_TMQ_CONSUME, .pCont = pHandle->msg->pCont, .contLen = pHandle->msg->contLen, .info = pHandle->msg->info};
+ tmsgPutToQueue(&pTq->pVnode->msgCb, QUERY_QUEUE, &msg);
+ taosMemoryFree(pHandle->msg);
+ pHandle->msg = NULL;
+ }
+
+ pIter = taosHashIterate(pTq->pPushMgr, pIter);
+ }
+
+ taosHashClear(pTq->pPushMgr);
+ }
+ taosWUnLockLatch(&pTq->lock);
+ return 0;
+}
+
int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
SMqPollReq req = {0};
int code = 0;
@@ -481,8 +510,8 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
pHandle->epoch = reqEpoch;
}
- char buf[80];
- tFormatOffset(buf, 80, &reqOffset);
+ char buf[TSDB_OFFSET_LEN];
+ tFormatOffset(buf, TSDB_OFFSET_LEN, &reqOffset);
tqDebug("tmq poll: consumer:0x%" PRIx64 " (epoch %d), subkey %s, recv poll req vgId:%d, req:%s, reqId:0x%" PRIx64,
consumerId, req.epoch, pHandle->subKey, vgId, buf, req.reqId);
@@ -559,7 +588,7 @@ int32_t tqProcessVgWalInfoReq(STQ* pTq, SRpcMsg* pMsg) {
} else {
dataRsp.rspOffset.version = currentVer; // return current consume offset value
}
- } else if (reqOffset.type == TMQ_OFFSET__RESET_EARLIEAST) {
+ } else if (reqOffset.type == TMQ_OFFSET__RESET_EARLIEST) {
dataRsp.rspOffset.version = sver; // not consume yet, set the earliest position
} else if (reqOffset.type == TMQ_OFFSET__RESET_LATEST) {
dataRsp.rspOffset.version = ever;
@@ -661,13 +690,17 @@ int32_t tqProcessSubscribeReq(STQ* pTq, int64_t sversion, char* msg, int32_t msg
return -1;
}
- SVnode* pVnode = pTq->pVnode;
- int32_t vgId = TD_VID(pVnode);
-
- tqDebug("vgId:%d, tq process sub req:%s, Id:0x%" PRIx64 " -> Id:0x%" PRIx64, pVnode->config.vgId, req.subKey,
+ tqDebug("vgId:%d, tq process sub req:%s, Id:0x%" PRIx64 " -> Id:0x%" PRIx64, pTq->pVnode->config.vgId, req.subKey,
req.oldConsumerId, req.newConsumerId);
- STqHandle* pHandle = taosHashGet(pTq->pHandle, req.subKey, strlen(req.subKey));
+ STqHandle* pHandle = NULL;
+ while(1){
+ pHandle = taosHashGet(pTq->pHandle, req.subKey, strlen(req.subKey));
+ if (pHandle || tqMetaGetHandle(pTq, req.subKey) < 0){
+ break;
+ }
+ }
+
if (pHandle == NULL) {
if (req.oldConsumerId != -1) {
tqError("vgId:%d, build new consumer handle %s for consumer:0x%" PRIx64 ", but old consumerId:0x%" PRIx64,
@@ -678,86 +711,13 @@ int32_t tqProcessSubscribeReq(STQ* pTq, int64_t sversion, char* msg, int32_t msg
tqError("vgId:%d, tq invalid re-balance request, new consumerId %" PRId64 "", req.vgId, req.newConsumerId);
goto end;
}
-
- STqHandle tqHandle = {0};
- pHandle = &tqHandle;
-
- memcpy(pHandle->subKey, req.subKey, TSDB_SUBSCRIBE_KEY_LEN);
- pHandle->consumerId = req.newConsumerId;
- pHandle->epoch = -1;
-
- pHandle->execHandle.subType = req.subType;
- pHandle->fetchMeta = req.withMeta;
-
- // TODO version should be assigned and refed during preprocess
- SWalRef* pRef = walRefCommittedVer(pVnode->pWal);
- if (pRef == NULL) {
- ret = -1;
+ STqHandle handle = {0};
+ ret = tqCreateHandle(pTq, &req, &handle);
+ if(ret < 0){
+ tqDestroyTqHandle(&handle);
goto end;
}
-
- int64_t ver = pRef->refVer;
- pHandle->pRef = pRef;
-
- SReadHandle handle = {.vnode = pVnode, .initTableReader = true, .initTqReader = true, .version = ver};
- initStorageAPI(&handle.api);
-
- pHandle->snapshotVer = ver;
-
- if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
- pHandle->execHandle.execCol.qmsg = taosStrdup(req.qmsg);
-
- pHandle->execHandle.task = qCreateQueueExecTaskInfo(pHandle->execHandle.execCol.qmsg, &handle, vgId,
- &pHandle->execHandle.numOfCols, req.newConsumerId);
- void* scanner = NULL;
- qExtractStreamScanner(pHandle->execHandle.task, &scanner);
- pHandle->execHandle.pTqReader = qExtractReaderFromStreamScanner(scanner);
- } else if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__DB) {
- pHandle->pWalReader = walOpenReader(pVnode->pWal, NULL);
- pHandle->execHandle.pTqReader = tqReaderOpen(pVnode);
-
- pHandle->execHandle.execDb.pFilterOutTbUid =
- taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_ENTRY_LOCK);
- buildSnapContext(handle.vnode, handle.version, 0, pHandle->execHandle.subType, pHandle->fetchMeta,
- (SSnapContext**)(&handle.sContext));
-
- pHandle->execHandle.task = qCreateQueueExecTaskInfo(NULL, &handle, vgId, NULL, req.newConsumerId);
- } else if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__TABLE) {
- pHandle->pWalReader = walOpenReader(pVnode->pWal, NULL);
- pHandle->execHandle.execTb.suid = req.suid;
- pHandle->execHandle.execTb.qmsg = taosStrdup(req.qmsg);
-
- if (strcmp(pHandle->execHandle.execTb.qmsg, "") != 0) {
- if (nodesStringToNode(pHandle->execHandle.execTb.qmsg, &pHandle->execHandle.execTb.node) != 0) {
- tqError("nodesStringToNode error in sub stable, since %s, vgId:%d, subkey:%s consumer:0x%" PRIx64, terrstr(),
- pVnode->config.vgId, req.subKey, pHandle->consumerId);
- return -1;
- }
- }
-
- buildSnapContext(handle.vnode, handle.version, req.suid, pHandle->execHandle.subType, pHandle->fetchMeta,
- (SSnapContext**)(&handle.sContext));
- pHandle->execHandle.task = qCreateQueueExecTaskInfo(NULL, &handle, vgId, NULL, req.newConsumerId);
-
- SArray* tbUidList = NULL;
- ret = qGetTableList(req.suid, pVnode, pHandle->execHandle.execTb.node, &tbUidList, pHandle->execHandle.task);
- if (ret != TDB_CODE_SUCCESS) {
- tqError("qGetTableList error:%d vgId:%d, subkey:%s consumer:0x%" PRIx64, ret, pVnode->config.vgId, req.subKey,
- pHandle->consumerId);
- taosArrayDestroy(tbUidList);
- goto end;
- }
- tqDebug("tq try to get ctb for stb subscribe, vgId:%d, subkey:%s consumer:0x%" PRIx64 " suid:%" PRId64,
- pVnode->config.vgId, req.subKey, pHandle->consumerId, req.suid);
- pHandle->execHandle.pTqReader = tqReaderOpen(pVnode);
- tqReaderSetTbUidList(pHandle->execHandle.pTqReader, tbUidList, NULL);
- taosArrayDestroy(tbUidList);
- }
-
- taosHashPut(pTq->pHandle, req.subKey, strlen(req.subKey), pHandle, sizeof(STqHandle));
- tqDebug("try to persist handle %s consumer:0x%" PRIx64, req.subKey, pHandle->consumerId);
- ret = tqMetaSaveHandle(pTq, req.subKey, pHandle);
- goto end;
+ ret = tqMetaSaveHandle(pTq, req.subKey, &handle);
} else {
taosWLockLatch(&pTq->lock);
diff --git a/source/dnode/vnode/src/tq/tqMeta.c b/source/dnode/vnode/src/tq/tqMeta.c
index ba6d7cb501..3b0e6749c2 100644
--- a/source/dnode/vnode/src/tq/tqMeta.c
+++ b/source/dnode/vnode/src/tq/tqMeta.c
@@ -88,9 +88,9 @@ int32_t tqMetaOpen(STQ* pTq) {
return -1;
}
- if (tqMetaRestoreHandle(pTq) < 0) {
- return -1;
- }
+// if (tqMetaRestoreHandle(pTq) < 0) {
+// return -1;
+// }
if (tqMetaRestoreCheckInfo(pTq) < 0) {
return -1;
@@ -274,6 +274,120 @@ int32_t tqMetaDeleteHandle(STQ* pTq, const char* key) {
return 0;
}
+static int buildHandle(STQ* pTq, STqHandle* handle){
+ SVnode* pVnode = pTq->pVnode;
+ int32_t vgId = TD_VID(pVnode);
+
+ handle->pRef = walOpenRef(pVnode->pWal);
+ if (handle->pRef == NULL) {
+ return -1;
+ }
+ walSetRefVer(handle->pRef, handle->snapshotVer);
+
+ SReadHandle reader = {
+ .vnode = pVnode,
+ .initTableReader = true,
+ .initTqReader = true,
+ .version = handle->snapshotVer,
+ };
+
+ initStorageAPI(&reader.api);
+
+ if (handle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
+ handle->execHandle.task =
+ qCreateQueueExecTaskInfo(handle->execHandle.execCol.qmsg, &reader, vgId, &handle->execHandle.numOfCols, handle->consumerId);
+ if (handle->execHandle.task == NULL) {
+ tqError("cannot create exec task for %s", handle->subKey);
+ return -1;
+ }
+ void* scanner = NULL;
+ qExtractStreamScanner(handle->execHandle.task, &scanner);
+ if (scanner == NULL) {
+ tqError("cannot extract stream scanner for %s", handle->subKey);
+ return -1;
+ }
+ handle->execHandle.pTqReader = qExtractReaderFromStreamScanner(scanner);
+ if (handle->execHandle.pTqReader == NULL) {
+ tqError("cannot extract exec reader for %s", handle->subKey);
+ return -1;
+ }
+ } else if (handle->execHandle.subType == TOPIC_SUB_TYPE__DB) {
+ handle->pWalReader = walOpenReader(pVnode->pWal, NULL);
+ handle->execHandle.pTqReader = tqReaderOpen(pVnode);
+
+ buildSnapContext(reader.vnode, reader.version, 0, handle->execHandle.subType, handle->fetchMeta,
+ (SSnapContext**)(&reader.sContext));
+ handle->execHandle.task = qCreateQueueExecTaskInfo(NULL, &reader, vgId, NULL, handle->consumerId);
+ } else if (handle->execHandle.subType == TOPIC_SUB_TYPE__TABLE) {
+ handle->pWalReader = walOpenReader(pVnode->pWal, NULL);
+
+ if(handle->execHandle.execTb.qmsg != NULL && strcmp(handle->execHandle.execTb.qmsg, "") != 0) {
+ if (nodesStringToNode(handle->execHandle.execTb.qmsg, &handle->execHandle.execTb.node) != 0) {
+ tqError("nodesStringToNode error in sub stable, since %s", terrstr());
+ return -1;
+ }
+ }
+ buildSnapContext(reader.vnode, reader.version, handle->execHandle.execTb.suid, handle->execHandle.subType,
+ handle->fetchMeta, (SSnapContext**)(&reader.sContext));
+ handle->execHandle.task = qCreateQueueExecTaskInfo(NULL, &reader, vgId, NULL, handle->consumerId);
+
+ SArray* tbUidList = NULL;
+ int ret = qGetTableList(handle->execHandle.execTb.suid, pVnode, handle->execHandle.execTb.node, &tbUidList, handle->execHandle.task);
+ if(ret != TDB_CODE_SUCCESS) {
+ tqError("qGetTableList error:%d handle %s consumer:0x%" PRIx64, ret, handle->subKey, handle->consumerId);
+ taosArrayDestroy(tbUidList);
+ return -1;
+ }
+ tqDebug("vgId:%d, tq try to get ctb for stb subscribe, suid:%" PRId64, pVnode->config.vgId, handle->execHandle.execTb.suid);
+ handle->execHandle.pTqReader = tqReaderOpen(pVnode);
+ tqReaderSetTbUidList(handle->execHandle.pTqReader, tbUidList, NULL);
+ taosArrayDestroy(tbUidList);
+ }
+ return 0;
+}
+
+static int restoreHandle(STQ* pTq, void* pVal, int vLen, STqHandle* handle){
+ int32_t vgId = TD_VID(pTq->pVnode);
+ SDecoder decoder;
+ tDecoderInit(&decoder, (uint8_t*)pVal, vLen);
+ tDecodeSTqHandle(&decoder, handle);
+ tDecoderClear(&decoder);
+
+ if(buildHandle(pTq, handle) < 0){
+ return -1;
+ }
+ tqDebug("tq restore %s consumer %" PRId64 " vgId:%d", handle->subKey, handle->consumerId, vgId);
+ return taosHashPut(pTq->pHandle, handle->subKey, strlen(handle->subKey), handle, sizeof(STqHandle));
+}
+
+int32_t tqCreateHandle(STQ* pTq, SMqRebVgReq* req, STqHandle* handle){
+ int32_t vgId = TD_VID(pTq->pVnode);
+
+ memcpy(handle->subKey, req->subKey, TSDB_SUBSCRIBE_KEY_LEN);
+ handle->consumerId = req->newConsumerId;
+ handle->epoch = -1;
+
+ handle->execHandle.subType = req->subType;
+ handle->fetchMeta = req->withMeta;
+ if(req->subType == TOPIC_SUB_TYPE__COLUMN){
+ handle->execHandle.execCol.qmsg = taosStrdup(req->qmsg);
+ }else if(req->subType == TOPIC_SUB_TYPE__DB){
+ handle->execHandle.execDb.pFilterOutTbUid =
+ taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_ENTRY_LOCK);
+ }else if(req->subType == TOPIC_SUB_TYPE__TABLE){
+ handle->execHandle.execTb.suid = req->suid;
+ handle->execHandle.execTb.qmsg = taosStrdup(req->qmsg);
+ }
+
+ handle->snapshotVer = walGetLastVer(pTq->pVnode->pWal);
+
+ if(buildHandle(pTq, handle) < 0){
+ return -1;
+ }
+ tqDebug("tq restore %s consumer %" PRId64 " vgId:%d", handle->subKey, handle->consumerId, vgId);
+ return taosHashPut(pTq->pHandle, handle->subKey, strlen(handle->subKey), handle, sizeof(STqHandle));
+}
+
int32_t tqMetaRestoreHandle(STQ* pTq) {
int code = 0;
TBC* pCur = NULL;
@@ -281,97 +395,40 @@ int32_t tqMetaRestoreHandle(STQ* pTq) {
return -1;
}
- int32_t vgId = TD_VID(pTq->pVnode);
void* pKey = NULL;
int kLen = 0;
void* pVal = NULL;
int vLen = 0;
- SDecoder decoder;
tdbTbcMoveToFirst(pCur);
while (tdbTbcNext(pCur, &pKey, &kLen, &pVal, &vLen) == 0) {
STqHandle handle = {0};
- tDecoderInit(&decoder, (uint8_t*)pVal, vLen);
- tDecodeSTqHandle(&decoder, &handle);
- tDecoderClear(&decoder);
-
- handle.pRef = walOpenRef(pTq->pVnode->pWal);
- if (handle.pRef == NULL) {
- code = -1;
- goto end;
+ code = restoreHandle(pTq, pVal, vLen, &handle);
+ if (code < 0){
+ tqDestroyTqHandle(&handle);
+ break;
}
- walSetRefVer(handle.pRef, handle.snapshotVer);
-
- SReadHandle reader = {
- .vnode = pTq->pVnode,
- .initTableReader = true,
- .initTqReader = true,
- .version = handle.snapshotVer
- };
-
- initStorageAPI(&reader.api);
-
- if (handle.execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
- handle.execHandle.task =
- qCreateQueueExecTaskInfo(handle.execHandle.execCol.qmsg, &reader, vgId, &handle.execHandle.numOfCols, 0);
- if (handle.execHandle.task == NULL) {
- tqError("cannot create exec task for %s", handle.subKey);
- code = -1;
- goto end;
- }
- void* scanner = NULL;
- qExtractStreamScanner(handle.execHandle.task, &scanner);
- if (scanner == NULL) {
- tqError("cannot extract stream scanner for %s", handle.subKey);
- code = -1;
- goto end;
- }
- handle.execHandle.pTqReader = qExtractReaderFromStreamScanner(scanner);
- if (handle.execHandle.pTqReader == NULL) {
- tqError("cannot extract exec reader for %s", handle.subKey);
- code = -1;
- goto end;
- }
- } else if (handle.execHandle.subType == TOPIC_SUB_TYPE__DB) {
- handle.pWalReader = walOpenReader(pTq->pVnode->pWal, NULL);
- handle.execHandle.pTqReader = tqReaderOpen(pTq->pVnode);
-
- buildSnapContext(reader.vnode, reader.version, 0, handle.execHandle.subType, handle.fetchMeta,
- (SSnapContext**)(&reader.sContext));
- handle.execHandle.task = qCreateQueueExecTaskInfo(NULL, &reader, vgId, NULL, 0);
- } else if (handle.execHandle.subType == TOPIC_SUB_TYPE__TABLE) {
- handle.pWalReader = walOpenReader(pTq->pVnode->pWal, NULL);
-
- if(handle.execHandle.execTb.qmsg != NULL && strcmp(handle.execHandle.execTb.qmsg, "") != 0) {
- if (nodesStringToNode(handle.execHandle.execTb.qmsg, &handle.execHandle.execTb.node) != 0) {
- tqError("nodesStringToNode error in sub stable, since %s", terrstr());
- return -1;
- }
- }
- buildSnapContext(reader.vnode, reader.version, handle.execHandle.execTb.suid, handle.execHandle.subType,
- handle.fetchMeta, (SSnapContext**)(&reader.sContext));
- handle.execHandle.task = qCreateQueueExecTaskInfo(NULL, &reader, vgId, NULL, 0);
-
- SArray* tbUidList = NULL;
- int ret = qGetTableList(handle.execHandle.execTb.suid, pTq->pVnode, handle.execHandle.execTb.node, &tbUidList, handle.execHandle.task);
- if(ret != TDB_CODE_SUCCESS) {
- tqError("qGetTableList error:%d handle %s consumer:0x%" PRIx64, ret, handle.subKey, handle.consumerId);
- taosArrayDestroy(tbUidList);
- goto end;
- }
- tqDebug("vgId:%d, tq try to get ctb for stb subscribe, suid:%" PRId64, pTq->pVnode->config.vgId, handle.execHandle.execTb.suid);
- handle.execHandle.pTqReader = tqReaderOpen(pTq->pVnode);
- tqReaderSetTbUidList(handle.execHandle.pTqReader, tbUidList, NULL);
- taosArrayDestroy(tbUidList);
- }
- tqDebug("tq restore %s consumer %" PRId64 " vgId:%d", handle.subKey, handle.consumerId, vgId);
- taosHashPut(pTq->pHandle, pKey, kLen, &handle, sizeof(STqHandle));
}
-end:
tdbFree(pKey);
tdbFree(pVal);
tdbTbcClose(pCur);
return code;
}
+
+int32_t tqMetaGetHandle(STQ* pTq, const char* key) {
+ void* pVal = NULL;
+ int vLen = 0;
+
+ if (tdbTbGet(pTq->pExecStore, key, (int)strlen(key), &pVal, &vLen) < 0) {
+ return -1;
+ }
+ STqHandle handle = {0};
+ int code = restoreHandle(pTq, pVal, vLen, &handle);
+ if (code < 0){
+ tqDestroyTqHandle(&handle);
+ }
+ tdbFree(pVal);
+ return code;
+}
diff --git a/source/dnode/vnode/src/tq/tqPush.c b/source/dnode/vnode/src/tq/tqPush.c
index 4c2e19dcfb..4048ebe3f9 100644
--- a/source/dnode/vnode/src/tq/tqPush.c
+++ b/source/dnode/vnode/src/tq/tqPush.c
@@ -17,35 +17,16 @@
#include "vnd.h"
int32_t tqProcessSubmitReqForSubscribe(STQ* pTq) {
- int32_t vgId = TD_VID(pTq->pVnode);
-
- taosWLockLatch(&pTq->lock);
-
- if (taosHashGetSize(pTq->pPushMgr) > 0) {
- void* pIter = taosHashIterate(pTq->pPushMgr, NULL);
-
- while (pIter) {
- STqHandle* pHandle = *(STqHandle**)pIter;
- tqDebug("vgId:%d start set submit for pHandle:%p, consumer:0x%" PRIx64, vgId, pHandle, pHandle->consumerId);
-
- if (ASSERT(pHandle->msg != NULL)) {
- tqError("pHandle->msg should not be null");
- break;
- }else{
- SRpcMsg msg = {.msgType = TDMT_VND_TMQ_CONSUME, .pCont = pHandle->msg->pCont, .contLen = pHandle->msg->contLen, .info = pHandle->msg->info};
- tmsgPutToQueue(&pTq->pVnode->msgCb, QUERY_QUEUE, &msg);
- taosMemoryFree(pHandle->msg);
- pHandle->msg = NULL;
- }
-
- pIter = taosHashIterate(pTq->pPushMgr, pIter);
- }
-
- taosHashClear(pTq->pPushMgr);
+ if (taosHashGetSize(pTq->pPushMgr) <= 0) {
+ return 0;
}
-
- // unlock
- taosWUnLockLatch(&pTq->lock);
+ SRpcMsg msg = {.msgType = TDMT_VND_TMQ_CONSUME_PUSH};
+ msg.pCont = rpcMallocCont(sizeof(SMsgHead));
+ msg.contLen = sizeof(SMsgHead);
+ SMsgHead *pHead = msg.pCont;
+ pHead->vgId = TD_VID(pTq->pVnode);
+ pHead->contLen = msg.contLen;
+ tmsgPutToQueue(&pTq->pVnode->msgCb, QUERY_QUEUE, &msg);
return 0;
}
diff --git a/source/dnode/vnode/src/tq/tqSink.c b/source/dnode/vnode/src/tq/tqSink.c
index db1b5ed902..9349c6eb0d 100644
--- a/source/dnode/vnode/src/tq/tqSink.c
+++ b/source/dnode/vnode/src/tq/tqSink.c
@@ -298,10 +298,8 @@ void tqSinkToTablePipeline(SStreamTask* pTask, void* vnode, int64_t ver, void* d
if (res == TSDB_CODE_SUCCESS) {
memcpy(ctbName, pTableSinkInfo->tbName, strlen(pTableSinkInfo->tbName));
} else {
- char* tmp = buildCtbNameByGroupId(stbFullName, pDataBlock->info.id.groupId);
- memcpy(ctbName, tmp, strlen(tmp));
- memcpy(pTableSinkInfo->tbName, tmp, strlen(tmp));
- taosMemoryFree(tmp);
+ buildCtbNameByGroupIdImpl(stbFullName, pDataBlock->info.id.groupId, ctbName);
+ memcpy(pTableSinkInfo->tbName, ctbName, strlen(ctbName));
tqDebug("vgId:%d, gropuId:%" PRIu64 " datablock table name is null", TD_VID(pVnode),
pDataBlock->info.id.groupId);
}
diff --git a/source/dnode/vnode/src/tq/tqUtil.c b/source/dnode/vnode/src/tq/tqUtil.c
index a34e765e50..a301d82c30 100644
--- a/source/dnode/vnode/src/tq/tqUtil.c
+++ b/source/dnode/vnode/src/tq/tqUtil.c
@@ -99,15 +99,15 @@ static int32_t extractResetOffsetVal(STqOffsetVal* pOffsetVal, STQ* pTq, STqHand
if (pOffset != NULL) {
*pOffsetVal = pOffset->val;
- char formatBuf[80];
- tFormatOffset(formatBuf, 80, pOffsetVal);
+ char formatBuf[TSDB_OFFSET_LEN];
+ tFormatOffset(formatBuf, TSDB_OFFSET_LEN, pOffsetVal);
tqDebug("tmq poll: consumer:0x%" PRIx64
", subkey %s, vgId:%d, existed offset found, offset reset to %s and continue. reqId:0x%" PRIx64,
consumerId, pHandle->subKey, vgId, formatBuf, pRequest->reqId);
return 0;
} else {
// no poll occurs in this vnode for this topic, let's seek to the right offset value.
- if (reqOffset.type == TMQ_OFFSET__RESET_EARLIEAST) {
+ if (reqOffset.type == TMQ_OFFSET__RESET_EARLIEST) {
if (pRequest->useSnapshot) {
tqDebug("tmq poll: consumer:0x%" PRIx64 ", subkey:%s, vgId:%d, (earliest) set offset to be snapshot",
consumerId, pHandle->subKey, vgId);
@@ -168,7 +168,7 @@ static int32_t extractDataAndRspForNormalSubscribe(STQ* pTq, STqHandle* pHandle,
qSetTaskId(pHandle->execHandle.task, consumerId, pRequest->reqId);
code = tqScanData(pTq, pHandle, &dataRsp, pOffset);
- if(code != 0 && terrno != TSDB_CODE_WAL_LOG_NOT_EXIST) {
+ if (code != 0 && terrno != TSDB_CODE_WAL_LOG_NOT_EXIST) {
goto end;
}
@@ -176,25 +176,28 @@ static int32_t extractDataAndRspForNormalSubscribe(STQ* pTq, STqHandle* pHandle,
if (terrno == TSDB_CODE_WAL_LOG_NOT_EXIST && dataRsp.blockNum == 0) {
// lock
taosWLockLatch(&pTq->lock);
- code = tqRegisterPushHandle(pTq, pHandle, pMsg);
- taosWUnLockLatch(&pTq->lock);
- tDeleteMqDataRsp(&dataRsp);
- return code;
+ int64_t ver = walGetCommittedVer(pTq->pVnode->pWal);
+ if (pOffset->version >= ver ||
+ dataRsp.rspOffset.version >= ver) { // check if there are data again to avoid lost data
+ code = tqRegisterPushHandle(pTq, pHandle, pMsg);
+ taosWUnLockLatch(&pTq->lock);
+ goto end;
+ } else {
+ taosWUnLockLatch(&pTq->lock);
+ }
}
- // NOTE: this pHandle->consumerId may have been changed already.
code = tqSendDataRsp(pHandle, pMsg, pRequest, (SMqDataRsp*)&dataRsp, TMQ_MSG_TYPE__POLL_RSP, vgId);
end : {
- char buf[80] = {0};
- tFormatOffset(buf, 80, &dataRsp.rspOffset);
+ char buf[TSDB_OFFSET_LEN] = {0};
+ tFormatOffset(buf, TSDB_OFFSET_LEN, &dataRsp.rspOffset);
tqDebug("tmq poll: consumer:0x%" PRIx64 ", subkey %s, vgId:%d, rsp block:%d, rsp offset type:%s, reqId:0x%" PRIx64
" code:%d",
consumerId, pHandle->subKey, vgId, dataRsp.blockNum, buf, pRequest->reqId, code);
tDeleteMqDataRsp(&dataRsp);
-}
-
return code;
+ }
}
static int32_t extractDataAndRspForDbStbSubscribe(STQ* pTq, STqHandle* pHandle, const SMqPollReq* pRequest,
@@ -428,4 +431,4 @@ int32_t tqDoSendDataRsp(const SRpcHandleInfo* pRpcHandleInfo, const SMqDataRsp*
tmsgSendRsp(&rsp);
return 0;
-}
\ No newline at end of file
+}
diff --git a/source/dnode/vnode/src/tsdb/tsdbCache.c b/source/dnode/vnode/src/tsdb/tsdbCache.c
index c659c8f4a2..4ec66f82a6 100644
--- a/source/dnode/vnode/src/tsdb/tsdbCache.c
+++ b/source/dnode/vnode/src/tsdb/tsdbCache.c
@@ -691,6 +691,7 @@ static int32_t tsdbCacheLoadFromRaw(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArr
.colVal = COL_VAL_NONE(idxKey->key.cid, pr->pSchema->columns[slotIds[i]].type)};
if (!pLastCol) {
pLastCol = &noneCol;
+ reallocVarData(&pLastCol->colVal);
}
taosArraySet(pLastArray, idxKey->idx, pLastCol);
@@ -2848,14 +2849,16 @@ static int32_t mergeLastCid(tb_uid_t uid, STsdb *pTsdb, SArray **ppLastArray, SC
tsdbRowGetColVal(pRow, pTSchema, slotIds[iCol], pColVal);
*pCol = (SLastCol){.ts = rowTs, .colVal = *pColVal};
- if (IS_VAR_DATA_TYPE(pColVal->type) && pColVal->value.nData > 0) {
+ if (IS_VAR_DATA_TYPE(pColVal->type) /*&& pColVal->value.nData > 0*/) {
pCol->colVal.value.pData = taosMemoryMalloc(pCol->colVal.value.nData);
if (pCol->colVal.value.pData == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
code = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
}
- memcpy(pCol->colVal.value.pData, pColVal->value.pData, pColVal->value.nData);
+ if (pColVal->value.nData > 0) {
+ memcpy(pCol->colVal.value.pData, pColVal->value.pData, pColVal->value.nData);
+ }
}
if (!COL_VAL_IS_VALUE(pColVal)) {
@@ -3016,14 +3019,16 @@ static int32_t mergeLastRowCid(tb_uid_t uid, STsdb *pTsdb, SArray **ppLastArray,
tsdbRowGetColVal(pRow, pTSchema, slotIds[iCol], pColVal);
*pCol = (SLastCol){.ts = rowTs, .colVal = *pColVal};
- if (IS_VAR_DATA_TYPE(pColVal->type) && pColVal->value.nData > 0) {
+ if (IS_VAR_DATA_TYPE(pColVal->type) /*&& pColVal->value.nData > 0*/) {
pCol->colVal.value.pData = taosMemoryMalloc(pCol->colVal.value.nData);
if (pCol->colVal.value.pData == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
code = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
}
- memcpy(pCol->colVal.value.pData, pColVal->value.pData, pColVal->value.nData);
+ if (pColVal->value.nData > 0) {
+ memcpy(pCol->colVal.value.pData, pColVal->value.pData, pColVal->value.nData);
+ }
}
/*if (COL_VAL_IS_NONE(pColVal)) {
diff --git a/source/dnode/vnode/src/vnd/vnodeCfg.c b/source/dnode/vnode/src/vnd/vnodeCfg.c
index faa4d2fc57..efe82e1783 100644
--- a/source/dnode/vnode/src/vnd/vnodeCfg.c
+++ b/source/dnode/vnode/src/vnd/vnodeCfg.c
@@ -325,7 +325,7 @@ int vnodeValidateTableHash(SVnode *pVnode, char *tableFName) {
if (hashValue < pVnode->config.hashBegin || hashValue > pVnode->config.hashEnd) {
terrno = TSDB_CODE_VND_HASH_MISMATCH;
- return TSDB_CODE_VND_HASH_MISMATCH;
+ return -1;
}
return 0;
diff --git a/source/dnode/vnode/src/vnd/vnodeOpen.c b/source/dnode/vnode/src/vnd/vnodeOpen.c
index b5e7c6875b..0655a46388 100644
--- a/source/dnode/vnode/src/vnd/vnodeOpen.c
+++ b/source/dnode/vnode/src/vnd/vnodeOpen.c
@@ -129,6 +129,12 @@ int32_t vnodeAlterReplica(const char *path, SAlterVnodeReplicaReq *pReq, STfs *p
return 0;
}
+static int32_t vnodeVgroupIdLen(int32_t vgId) {
+ char tmp[TSDB_FILENAME_LEN];
+ sprintf(tmp, "%d", vgId);
+ return strlen(tmp);
+}
+
int32_t vnodeRenameVgroupId(const char *srcPath, const char *dstPath, int32_t srcVgId, int32_t dstVgId, STfs *pTfs) {
int32_t ret = tfsRename(pTfs, srcPath, dstPath);
if (ret != 0) return ret;
@@ -154,8 +160,7 @@ int32_t vnodeRenameVgroupId(const char *srcPath, const char *dstPath, int32_t sr
int32_t tsdbFileVgId = atoi(tsdbFilePrefixPos + 6);
if (tsdbFileVgId == srcVgId) {
- char *tsdbFileSurfixPos = strstr(tsdbFilePrefixPos, "f");
- if (tsdbFileSurfixPos == NULL) continue;
+ char *tsdbFileSurfixPos = tsdbFilePrefixPos + 6 + vnodeVgroupIdLen(srcVgId);
tsdbFilePrefixPos[6] = 0;
snprintf(newRname, TSDB_FILENAME_LEN, "%s%d%s", oldRname, dstVgId, tsdbFileSurfixPos);
diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c
index c3fb5e5ad4..c2e577848b 100644
--- a/source/dnode/vnode/src/vnd/vnodeSvr.c
+++ b/source/dnode/vnode/src/vnd/vnodeSvr.c
@@ -238,6 +238,10 @@ static int32_t vnodePreProcessSubmitMsg(SVnode *pVnode, SRpcMsg *pMsg) {
tEndDecode(pCoder);
_exit:
+ if (code) {
+ vError("vgId:%d, failed to preprocess submit request since %s, msg type:%d", TD_VID(pVnode), tstrerror(code),
+ pMsg->msgType);
+ }
tDecoderClear(pCoder);
return code;
}
@@ -297,7 +301,7 @@ int32_t vnodePreProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg) {
_exit:
if (code) {
- vError("vgId%d failed to preprocess write request since %s, msg type:%d", TD_VID(pVnode), tstrerror(code),
+ vError("vgId:%d, failed to preprocess write request since %s, msg type:%d", TD_VID(pVnode), tstrerror(code),
pMsg->msgType);
}
return code;
@@ -505,7 +509,7 @@ int32_t vnodePreprocessQueryMsg(SVnode *pVnode, SRpcMsg *pMsg) {
int32_t vnodeProcessQueryMsg(SVnode *pVnode, SRpcMsg *pMsg) {
vTrace("message in vnode query queue is processing");
- if ((pMsg->msgType == TDMT_SCH_QUERY || pMsg->msgType == TDMT_VND_TMQ_CONSUME) && !syncIsReadyForRead(pVnode->sync)) {
+ if ((pMsg->msgType == TDMT_SCH_QUERY || pMsg->msgType == TDMT_VND_TMQ_CONSUME || pMsg->msgType == TDMT_VND_TMQ_CONSUME_PUSH) && !syncIsReadyForRead(pVnode->sync)) {
vnodeRedirectRpcMsg(pVnode, pMsg, terrno);
return 0;
}
@@ -526,6 +530,8 @@ int32_t vnodeProcessQueryMsg(SVnode *pVnode, SRpcMsg *pMsg) {
return qWorkerProcessCQueryMsg(&handle, pVnode->pQuery, pMsg, 0);
case TDMT_VND_TMQ_CONSUME:
return tqProcessPollReq(pVnode->pTq, pMsg);
+ case TDMT_VND_TMQ_CONSUME_PUSH:
+ return tqProcessPollPush(pVnode->pTq, pMsg);
default:
vError("unknown msg type:%d in query queue", pMsg->msgType);
return TSDB_CODE_APP_ERROR;
@@ -559,8 +565,8 @@ int32_t vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) {
return vnodeGetTableCfg(pVnode, pMsg, true);
case TDMT_VND_BATCH_META:
return vnodeGetBatchMeta(pVnode, pMsg);
- case TDMT_VND_TMQ_CONSUME:
- return tqProcessPollReq(pVnode->pTq, pMsg);
+// case TDMT_VND_TMQ_CONSUME:
+// return tqProcessPollReq(pVnode->pTq, pMsg);
case TDMT_VND_TMQ_VG_WALINFO:
return tqProcessVgWalInfoReq(pVnode->pTq, pMsg);
case TDMT_STREAM_TASK_RUN:
diff --git a/source/dnode/vnode/src/vnd/vnodeSync.c b/source/dnode/vnode/src/vnd/vnodeSync.c
index 29f1ddc50f..ff551e6534 100644
--- a/source/dnode/vnode/src/vnd/vnodeSync.c
+++ b/source/dnode/vnode/src/vnd/vnodeSync.c
@@ -431,7 +431,7 @@ static int32_t vnodeSyncApplyMsg(const SSyncFSM *pFsm, SRpcMsg *pMsg, const SFsm
return tmsgPutToQueue(&pVnode->msgCb, APPLY_QUEUE, pMsg);
}
-static int32_t vnodeSyncCommitMsg(const SSyncFSM *pFsm, SRpcMsg *pMsg, const SFsmCbMeta *pMeta) {
+static int32_t vnodeSyncCommitMsg(const SSyncFSM *pFsm, SRpcMsg *pMsg, SFsmCbMeta *pMeta) {
if (pMsg->code == 0) {
return vnodeSyncApplyMsg(pFsm, pMsg, pMeta);
}
@@ -451,7 +451,7 @@ static int32_t vnodeSyncCommitMsg(const SSyncFSM *pFsm, SRpcMsg *pMsg, const SFs
return 0;
}
-static int32_t vnodeSyncPreCommitMsg(const SSyncFSM *pFsm, SRpcMsg *pMsg, const SFsmCbMeta *pMeta) {
+static int32_t vnodeSyncPreCommitMsg(const SSyncFSM *pFsm, SRpcMsg *pMsg, SFsmCbMeta *pMeta) {
if (pMeta->isWeak == 1) {
return vnodeSyncApplyMsg(pFsm, pMsg, pMeta);
}
@@ -463,7 +463,7 @@ static SyncIndex vnodeSyncAppliedIndex(const SSyncFSM *pFSM) {
return atomic_load_64(&pVnode->state.applied);
}
-static void vnodeSyncRollBackMsg(const SSyncFSM *pFsm, SRpcMsg *pMsg, const SFsmCbMeta *pMeta) {
+static void vnodeSyncRollBackMsg(const SSyncFSM *pFsm, SRpcMsg *pMsg, SFsmCbMeta *pMeta) {
SVnode *pVnode = pFsm->data;
vTrace("vgId:%d, rollback-cb is excuted, fsm:%p, index:%" PRId64 ", weak:%d, code:%d, state:%d %s, type:%s",
pVnode->config.vgId, pFsm, pMeta->index, pMeta->isWeak, pMeta->code, pMeta->state, syncStr(pMeta->state),
diff --git a/source/libs/catalog/inc/catalogInt.h b/source/libs/catalog/inc/catalogInt.h
index 695bd4eb38..5746ea2340 100644
--- a/source/libs/catalog/inc/catalogInt.h
+++ b/source/libs/catalog/inc/catalogInt.h
@@ -950,8 +950,8 @@ int32_t ctgCloneMetaOutput(STableMetaOutput* output, STableMetaOutput** pOutput)
int32_t ctgGenerateVgList(SCatalog* pCtg, SHashObj* vgHash, SArray** pList);
void ctgFreeJob(void* job);
void ctgFreeHandleImpl(SCatalog* pCtg);
-int32_t ctgGetVgInfoFromHashValue(SCatalog* pCtg, SDBVgInfo* dbInfo, const SName* pTableName, SVgroupInfo* pVgroup);
-int32_t ctgGetVgInfosFromHashValue(SCatalog* pCtg, SCtgTaskReq* tReq, SDBVgInfo* dbInfo, SCtgTbHashsCtx* pCtx,
+int32_t ctgGetVgInfoFromHashValue(SCatalog* pCtg, SEpSet* pMgmtEps, SDBVgInfo* dbInfo, const SName* pTableName, SVgroupInfo* pVgroup);
+int32_t ctgGetVgInfosFromHashValue(SCatalog* pCtg, SEpSet* pMgmgEpSet, SCtgTaskReq* tReq, SDBVgInfo* dbInfo, SCtgTbHashsCtx* pCtx,
char* dbFName, SArray* pNames, bool update);
int32_t ctgGetVgIdsFromHashValue(SCatalog* pCtg, SDBVgInfo* dbInfo, char* dbFName, const char* pTbs[], int32_t tbNum,
int32_t* vgId);
diff --git a/source/libs/catalog/src/catalog.c b/source/libs/catalog/src/catalog.c
index 03df240929..f736e9be98 100644
--- a/source/libs/catalog/src/catalog.c
+++ b/source/libs/catalog/src/catalog.c
@@ -568,7 +568,7 @@ int32_t ctgGetTbHashVgroup(SCatalog* pCtg, SRequestConnInfo* pConn, const SName*
return TSDB_CODE_SUCCESS;
}
- CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, vgInfo ? vgInfo : dbCache->vgCache.vgInfo, pTableName, pVgroup));
+ CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, pConn ? &pConn->mgmtEps : NULL, vgInfo ? vgInfo : dbCache->vgCache.vgInfo, pTableName, pVgroup));
_return:
@@ -629,7 +629,7 @@ int32_t ctgGetCachedTbVgMeta(SCatalog* pCtg, const SName* pTableName, SVgroupInf
return TSDB_CODE_SUCCESS;
}
- CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, dbCache->vgCache.vgInfo, pTableName, pVgroup));
+ CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, NULL, dbCache->vgCache.vgInfo, pTableName, pVgroup));
ctgRUnlockVgInfo(dbCache);
diff --git a/source/libs/catalog/src/ctgAsync.c b/source/libs/catalog/src/ctgAsync.c
index 2b78b8dd13..562343c9c7 100644
--- a/source/libs/catalog/src/ctgAsync.c
+++ b/source/libs/catalog/src/ctgAsync.c
@@ -1112,7 +1112,7 @@ int32_t ctgHandleGetTbMetaRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf
SUseDbOutput* pOut = (SUseDbOutput*)pMsgCtx->out;
SVgroupInfo vgInfo = {0};
- CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, pOut->dbVgroup, pName, &vgInfo));
+ CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, &pConn->mgmtEps, pOut->dbVgroup, pName, &vgInfo));
ctgDebug("will refresh tbmeta, not supposed to be stb, tbName:%s, flag:%d", tNameGetTableName(pName), flag);
@@ -1132,7 +1132,7 @@ int32_t ctgHandleGetTbMetaRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf
CTG_ERR_RET(ctgAcquireVgInfoFromCache(pCtg, dbFName, &dbCache));
if (NULL != dbCache) {
SVgroupInfo vgInfo = {0};
- CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, dbCache->vgCache.vgInfo, pName, &vgInfo));
+ CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, &pConn->mgmtEps, dbCache->vgCache.vgInfo, pName, &vgInfo));
ctgDebug("will refresh tbmeta, supposed to be stb, tbName:%s, flag:%d", tNameGetTableName(pName), flag);
@@ -1282,7 +1282,7 @@ int32_t ctgHandleGetTbMetasRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBu
SUseDbOutput* pOut = (SUseDbOutput*)pMsgCtx->out;
SVgroupInfo vgInfo = {0};
- CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, pOut->dbVgroup, pName, &vgInfo));
+ CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, &pConn->mgmtEps, pOut->dbVgroup, pName, &vgInfo));
ctgTaskDebug("will refresh tbmeta, not supposed to be stb, tbName:%s, flag:%d", tNameGetTableName(pName), flag);
@@ -1302,7 +1302,7 @@ int32_t ctgHandleGetTbMetasRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBu
CTG_ERR_RET(ctgAcquireVgInfoFromCache(pCtg, dbFName, &dbCache));
if (NULL != dbCache) {
SVgroupInfo vgInfo = {0};
- CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, dbCache->vgCache.vgInfo, pName, &vgInfo));
+ CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, &pConn->mgmtEps, dbCache->vgCache.vgInfo, pName, &vgInfo));
ctgTaskDebug("will refresh tbmeta, supposed to be stb, tbName:%s, flag:%d", tNameGetTableName(pName), flag);
@@ -1501,7 +1501,7 @@ int32_t ctgHandleGetTbHashRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf
CTG_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY);
}
- CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, pOut->dbVgroup, ctx->pName, (SVgroupInfo*)pTask->res));
+ CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, &pTask->pJob->conn.mgmtEps, pOut->dbVgroup, ctx->pName, (SVgroupInfo*)pTask->res));
CTG_ERR_JRET(ctgUpdateVgroupEnqueue(pCtg, ctx->dbFName, pOut->dbId, pOut->dbVgroup, false));
pOut->dbVgroup = NULL;
@@ -1536,7 +1536,7 @@ int32_t ctgHandleGetTbHashsRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBu
SUseDbOutput* pOut = (SUseDbOutput*)pMsgCtx->out;
STablesReq* pReq = taosArrayGet(ctx->pNames, pFetch->dbIdx);
- CTG_ERR_JRET(ctgGetVgInfosFromHashValue(pCtg, tReq, pOut->dbVgroup, ctx, pMsgCtx->target, pReq->pTables, true));
+ CTG_ERR_JRET(ctgGetVgInfosFromHashValue(pCtg, &pTask->pJob->conn.mgmtEps, tReq, pOut->dbVgroup, ctx, pMsgCtx->target, pReq->pTables, true));
CTG_ERR_JRET(ctgUpdateVgroupEnqueue(pCtg, pMsgCtx->target, pOut->dbId, pOut->dbVgroup, false));
pOut->dbVgroup = NULL;
@@ -1799,7 +1799,7 @@ int32_t ctgAsyncRefreshTbMeta(SCtgTaskReq* tReq, int32_t flag, SName* pName, int
CTG_ERR_RET(ctgAcquireVgInfoFromCache(pCtg, dbFName, &dbCache));
if (dbCache) {
SVgroupInfo vgInfo = {0};
- CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, dbCache->vgCache.vgInfo, pName, &vgInfo));
+ CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, &pConn->mgmtEps, dbCache->vgCache.vgInfo, pName, &vgInfo));
ctgDebug("will refresh tbmeta, not supposed to be stb, tbName:%s, flag:%d", tNameGetTableName(pName), flag);
@@ -1948,7 +1948,7 @@ int32_t ctgLaunchGetTbHashTask(SCtgTask* pTask) {
if (NULL == pTask->res) {
CTG_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY);
}
- CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, dbCache->vgCache.vgInfo, pCtx->pName, (SVgroupInfo*)pTask->res));
+ CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, &pConn->mgmtEps, dbCache->vgCache.vgInfo, pCtx->pName, (SVgroupInfo*)pTask->res));
ctgReleaseVgInfoToCache(pCtg, dbCache);
dbCache = NULL;
@@ -1996,7 +1996,7 @@ int32_t ctgLaunchGetTbHashsTask(SCtgTask* pTask) {
tReq.pTask = pTask;
tReq.msgIdx = -1;
CTG_ERR_JRET(
- ctgGetVgInfosFromHashValue(pCtg, &tReq, dbCache->vgCache.vgInfo, pCtx, pReq->dbFName, pReq->pTables, false));
+ ctgGetVgInfosFromHashValue(pCtg, &pConn->mgmtEps, &tReq, dbCache->vgCache.vgInfo, pCtx, pReq->dbFName, pReq->pTables, false));
ctgReleaseVgInfoToCache(pCtg, dbCache);
dbCache = NULL;
@@ -2375,7 +2375,7 @@ int32_t ctgGetTbCfgCb(SCtgTask* pTask) {
SDBVgInfo* pDb = (SDBVgInfo*)pTask->subRes.res;
pCtx->pVgInfo = taosMemoryCalloc(1, sizeof(SVgroupInfo));
- CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pTask->pJob->pCtg, pDb, pCtx->pName, pCtx->pVgInfo));
+ CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pTask->pJob->pCtg, &pTask->pJob->conn.mgmtEps, pDb, pCtx->pName, pCtx->pVgInfo));
}
CTG_RET(ctgLaunchGetTbCfgTask(pTask));
@@ -2395,7 +2395,7 @@ int32_t ctgGetTbTagCb(SCtgTask* pTask) {
if (NULL == pCtx->pVgInfo) {
pCtx->pVgInfo = taosMemoryCalloc(1, sizeof(SVgroupInfo));
- CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pTask->pJob->pCtg, pDb, pCtx->pName, pCtx->pVgInfo));
+ CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pTask->pJob->pCtg, &pTask->pJob->conn.mgmtEps, pDb, pCtx->pName, pCtx->pVgInfo));
}
CTG_RET(ctgLaunchGetTbTagTask(pTask));
diff --git a/source/libs/catalog/src/ctgCache.c b/source/libs/catalog/src/ctgCache.c
index ef4040e22b..c856211635 100644
--- a/source/libs/catalog/src/ctgCache.c
+++ b/source/libs/catalog/src/ctgCache.c
@@ -2989,7 +2989,7 @@ int32_t ctgGetTbHashVgroupFromCache(SCatalog *pCtg, const SName *pTableName, SVg
}
*pVgroup = taosMemoryCalloc(1, sizeof(SVgroupInfo));
- CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, dbCache->vgCache.vgInfo, pTableName, *pVgroup));
+ CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, NULL, dbCache->vgCache.vgInfo, pTableName, *pVgroup));
_return:
diff --git a/source/libs/catalog/src/ctgUtil.c b/source/libs/catalog/src/ctgUtil.c
index e623b7969d..e7abbc5ead 100644
--- a/source/libs/catalog/src/ctgUtil.c
+++ b/source/libs/catalog/src/ctgUtil.c
@@ -969,7 +969,7 @@ int32_t ctgHashValueComp(void const* lp, void const* rp) {
return 0;
}
-int32_t ctgGetVgInfoFromHashValue(SCatalog* pCtg, SDBVgInfo* dbInfo, const SName* pTableName, SVgroupInfo* pVgroup) {
+int32_t ctgGetVgInfoFromHashValue(SCatalog* pCtg, SEpSet* pMgmtEps, SDBVgInfo* dbInfo, const SName* pTableName, SVgroupInfo* pVgroup) {
int32_t code = 0;
CTG_ERR_RET(ctgMakeVgArray(dbInfo));
@@ -977,6 +977,14 @@ int32_t ctgGetVgInfoFromHashValue(SCatalog* pCtg, SDBVgInfo* dbInfo, const SName
char db[TSDB_DB_FNAME_LEN] = {0};
tNameGetFullDbName(pTableName, db);
+ if (IS_SYS_DBNAME(pTableName->dbname)) {
+ pVgroup->vgId = MNODE_HANDLE;
+ if (pMgmtEps) {
+ memcpy(&pVgroup->epSet, pMgmtEps, sizeof(pVgroup->epSet));
+ }
+ return TSDB_CODE_SUCCESS;
+ }
+
if (vgNum <= 0) {
ctgError("db vgroup cache invalid, db:%s, vgroup number:%d", db, vgNum);
CTG_ERR_RET(TSDB_CODE_TSC_DB_NOT_SELECTED);
@@ -1020,23 +1028,53 @@ int32_t ctgGetVgInfoFromHashValue(SCatalog* pCtg, SDBVgInfo* dbInfo, const SName
CTG_RET(code);
}
-int32_t ctgGetVgInfosFromHashValue(SCatalog* pCtg, SCtgTaskReq* tReq, SDBVgInfo* dbInfo, SCtgTbHashsCtx* pCtx,
+int32_t ctgGetVgInfosFromHashValue(SCatalog* pCtg, SEpSet* pMgmgEpSet, SCtgTaskReq* tReq, SDBVgInfo* dbInfo, SCtgTbHashsCtx* pCtx,
char* dbFName, SArray* pNames, bool update) {
int32_t code = 0;
SCtgTask* pTask = tReq->pTask;
SMetaRes res = {0};
+ SVgroupInfo* vgInfo = NULL;
CTG_ERR_RET(ctgMakeVgArray(dbInfo));
+ int32_t tbNum = taosArrayGetSize(pNames);
+
+ char* pSep = strchr(dbFName, '.');
+ if (pSep && IS_SYS_DBNAME(pSep + 1)) {
+ SVgroupInfo mgmtInfo = {0};
+ mgmtInfo.vgId = MNODE_HANDLE;
+ if (pMgmgEpSet) {
+ memcpy(&mgmtInfo.epSet, pMgmgEpSet, sizeof(mgmtInfo.epSet));
+ }
+ for (int32_t i = 0; i < tbNum; ++i) {
+ vgInfo = taosMemoryMalloc(sizeof(SVgroupInfo));
+ if (NULL == vgInfo) {
+ CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
+ }
+
+ memcpy(vgInfo, &mgmtInfo, sizeof(mgmtInfo));
+
+ ctgDebug("Got tb hash vgroup, vgId:%d, epNum %d, current %s port %d", vgInfo->vgId, vgInfo->epSet.numOfEps,
+ vgInfo->epSet.eps[vgInfo->epSet.inUse].fqdn, vgInfo->epSet.eps[vgInfo->epSet.inUse].port);
+
+ if (update) {
+ SCtgFetch* pFetch = taosArrayGet(pCtx->pFetchs, tReq->msgIdx);
+ SMetaRes* pRes = taosArrayGet(pCtx->pResList, pFetch->resIdx + i);
+ pRes->pRes = vgInfo;
+ } else {
+ res.pRes = vgInfo;
+ taosArrayPush(pCtx->pResList, &res);
+ }
+ }
+ return TSDB_CODE_SUCCESS;
+ }
+
int32_t vgNum = taosArrayGetSize(dbInfo->vgArray);
if (vgNum <= 0) {
ctgError("db vgroup cache invalid, db:%s, vgroup number:%d", dbFName, vgNum);
CTG_ERR_RET(TSDB_CODE_CTG_INTERNAL_ERROR);
}
- SVgroupInfo* vgInfo = NULL;
- int32_t tbNum = taosArrayGetSize(pNames);
-
if (1 == vgNum) {
for (int32_t i = 0; i < tbNum; ++i) {
vgInfo = taosMemoryMalloc(sizeof(SVgroupInfo));
diff --git a/source/libs/executor/src/aggregateoperator.c b/source/libs/executor/src/aggregateoperator.c
index b34c6f4b82..a32f482007 100644
--- a/source/libs/executor/src/aggregateoperator.c
+++ b/source/libs/executor/src/aggregateoperator.c
@@ -461,8 +461,12 @@ int32_t doInitAggInfoSup(SAggSupporter* pAggSup, SqlFunctionCtx* pCtx, int32_t n
uint32_t defaultPgsz = 0;
uint32_t defaultBufsz = 0;
- getBufferPgSize(pAggSup->resultRowSize, &defaultPgsz, &defaultBufsz);
-
+ code = getBufferPgSize(pAggSup->resultRowSize, &defaultPgsz, &defaultBufsz);
+ if (code) {
+ qError("failed to get buff page size, rowSize:%d", pAggSup->resultRowSize);
+ return code;
+ }
+
if (!osTempSpaceAvailable()) {
code = TSDB_CODE_NO_DISKSPACE;
qError("Init stream agg supporter failed since %s, key:%s, tempDir:%s", terrstr(code), pKey, tsTempDir);
diff --git a/source/libs/executor/src/eventwindowoperator.c b/source/libs/executor/src/eventwindowoperator.c
index b5dea6d94d..f7dddf6b29 100644
--- a/source/libs/executor/src/eventwindowoperator.c
+++ b/source/libs/executor/src/eventwindowoperator.c
@@ -174,6 +174,7 @@ void destroyEWindowOperatorInfo(void* param) {
colDataDestroy(&pInfo->twAggSup.timeWindowData);
cleanupAggSup(&pInfo->aggSup);
+ cleanupExprSupp(&pInfo->scalarSup);
taosMemoryFreeClear(param);
}
diff --git a/source/libs/executor/src/executorInt.c b/source/libs/executor/src/executorInt.c
index fbc0512a26..42b8a9d31c 100644
--- a/source/libs/executor/src/executorInt.c
+++ b/source/libs/executor/src/executorInt.c
@@ -922,8 +922,13 @@ void destroyExprInfo(SExprInfo* pExpr, int32_t numOfExprs) {
int32_t getBufferPgSize(int32_t rowSize, uint32_t* defaultPgsz, uint32_t* defaultBufsz) {
*defaultPgsz = 4096;
+ uint32_t last = *defaultPgsz;
while (*defaultPgsz < rowSize * 4) {
*defaultPgsz <<= 1u;
+ if (*defaultPgsz < last) {
+ return TSDB_CODE_INVALID_PARA;
+ }
+ last = *defaultPgsz;
}
// The default buffer for each operator in query is 10MB.
@@ -932,6 +937,9 @@ int32_t getBufferPgSize(int32_t rowSize, uint32_t* defaultPgsz, uint32_t* defaul
*defaultBufsz = 4096 * 2560;
if ((*defaultBufsz) <= (*defaultPgsz)) {
(*defaultBufsz) = (*defaultPgsz) * 4;
+ if (*defaultBufsz < ((int64_t)(*defaultPgsz)) * 4) {
+ return TSDB_CODE_INVALID_PARA;
+ }
}
return 0;
diff --git a/source/libs/executor/src/groupoperator.c b/source/libs/executor/src/groupoperator.c
index 7aac639027..e3292bb063 100644
--- a/source/libs/executor/src/groupoperator.c
+++ b/source/libs/executor/src/groupoperator.c
@@ -647,6 +647,8 @@ uint64_t calcGroupId(char* pData, int32_t len) {
// NOTE: only extract the initial 8 bytes of the final MD5 digest
uint64_t id = 0;
memcpy(&id, context.digest, sizeof(uint64_t));
+ if (0 == id)
+ memcpy(&id, context.digest + 8, sizeof(uint64_t));
return id;
}
@@ -869,7 +871,12 @@ SOperatorInfo* createPartitionOperatorInfo(SOperatorInfo* downstream, SPartition
uint32_t defaultBufsz = 0;
pInfo->binfo.pRes = createDataBlockFromDescNode(pPartNode->node.pOutputDataBlockDesc);
- getBufferPgSize(pInfo->binfo.pRes->info.rowSize, &defaultPgsz, &defaultBufsz);
+ int32_t code = getBufferPgSize(pInfo->binfo.pRes->info.rowSize, &defaultPgsz, &defaultBufsz);
+ if (code != TSDB_CODE_SUCCESS) {
+ terrno = code;
+ pTaskInfo->code = code;
+ goto _error;
+ }
if (!osTempSpaceAvailable()) {
terrno = TSDB_CODE_NO_DISKSPACE;
@@ -878,7 +885,7 @@ SOperatorInfo* createPartitionOperatorInfo(SOperatorInfo* downstream, SPartition
goto _error;
}
- int32_t code = createDiskbasedBuf(&pInfo->pBuf, defaultPgsz, defaultBufsz, pTaskInfo->id.str, tsTempDir);
+ code = createDiskbasedBuf(&pInfo->pBuf, defaultPgsz, defaultBufsz, pTaskInfo->id.str, tsTempDir);
if (code != TSDB_CODE_SUCCESS) {
terrno = code;
pTaskInfo->code = code;
diff --git a/source/libs/executor/src/joinoperator.c b/source/libs/executor/src/joinoperator.c
index 442f8162ed..73143fdba7 100644
--- a/source/libs/executor/src/joinoperator.c
+++ b/source/libs/executor/src/joinoperator.c
@@ -319,6 +319,11 @@ void destroyMergeJoinOperator(void* param) {
}
nodesDestroyNode(pJoinOperator->pCondAfterMerge);
+ taosArrayDestroy(pJoinOperator->rowCtx.leftCreatedBlocks);
+ taosArrayDestroy(pJoinOperator->rowCtx.rightCreatedBlocks);
+ taosArrayDestroy(pJoinOperator->rowCtx.leftRowLocations);
+ taosArrayDestroy(pJoinOperator->rowCtx.rightRowLocations);
+
pJoinOperator->pRes = blockDataDestroy(pJoinOperator->pRes);
taosMemoryFreeClear(param);
}
diff --git a/source/libs/executor/src/projectoperator.c b/source/libs/executor/src/projectoperator.c
index dde6f7c0e8..e7de826d4b 100644
--- a/source/libs/executor/src/projectoperator.c
+++ b/source/libs/executor/src/projectoperator.c
@@ -213,6 +213,8 @@ static int32_t doIngroupLimitOffset(SLimitInfo* pLimitInfo, uint64_t groupId, SS
} else {
if (limitReached && (pLimitInfo->slimit.limit >= 0 && pLimitInfo->slimit.limit <= pLimitInfo->numOfOutputGroups)) {
setOperatorCompleted(pOperator);
+ } else if (limitReached && groupId == 0) {
+ setOperatorCompleted(pOperator);
}
}
diff --git a/source/libs/executor/src/timesliceoperator.c b/source/libs/executor/src/timesliceoperator.c
index 416925a311..25cb94f7e1 100644
--- a/source/libs/executor/src/timesliceoperator.c
+++ b/source/libs/executor/src/timesliceoperator.c
@@ -259,7 +259,8 @@ static bool genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp
// output the result
- bool hasInterp = true;
+ int32_t fillColIndex = 0;
+ bool hasInterp = true;
for (int32_t j = 0; j < pExprSup->numOfExprs; ++j) {
SExprInfo* pExprInfo = &pExprSup->pExprInfo[j];
@@ -309,7 +310,7 @@ static bool genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp
case TSDB_FILL_SET_VALUE:
case TSDB_FILL_SET_VALUE_F: {
- SVariant* pVar = &pSliceInfo->pFillColInfo[j].fillVal;
+ SVariant* pVar = &pSliceInfo->pFillColInfo[fillColIndex].fillVal;
if (pDst->info.type == TSDB_DATA_TYPE_FLOAT) {
float v = 0;
@@ -344,6 +345,8 @@ static bool genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp
}
colDataSetVal(pDst, rows, (char*)&v, false);
}
+
+ ++fillColIndex;
break;
}
diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c
index d6429fd121..2676e097f9 100644
--- a/source/libs/executor/src/timewindowoperator.c
+++ b/source/libs/executor/src/timewindowoperator.c
@@ -2345,7 +2345,7 @@ static void doStreamIntervalAggImpl(SOperatorInfo* pOperatorInfo, SSDataBlock* p
}
while (1) {
bool isClosed = isCloseWindow(&nextWin, &pInfo->twAggSup);
- if ((pInfo->ignoreExpiredData && isClosed) || !inSlidingWindow(&pInfo->interval, &nextWin, &pSDataBlock->info)) {
+ if ((pInfo->ignoreExpiredData && isClosed && !IS_FINAL_OP(pInfo)) || !inSlidingWindow(&pInfo->interval, &nextWin, &pSDataBlock->info)) {
startPos = getNexWindowPos(&pInfo->interval, &pSDataBlock->info, tsCols, startPos, nextWin.ekey, &nextWin);
if (startPos < 0) {
break;
diff --git a/source/libs/executor/src/tsort.c b/source/libs/executor/src/tsort.c
index 783597df67..3033441aad 100644
--- a/source/libs/executor/src/tsort.c
+++ b/source/libs/executor/src/tsort.c
@@ -101,7 +101,11 @@ static int32_t sortComparCleanup(SMsortComparParam* cmpParam) {
for (int32_t i = 0; i < cmpParam->numOfSources; ++i) {
SSortSource* pSource = cmpParam->pSources[i];
blockDataDestroy(pSource->src.pBlock);
+ if (pSource->pageIdList) {
+ taosArrayDestroy(pSource->pageIdList);
+ }
taosMemoryFreeClear(pSource);
+ cmpParam->pSources[i] = NULL;
}
cmpParam->numOfSources = 0;
@@ -123,9 +127,11 @@ void tsortClearOrderdSource(SArray* pOrderedSource, int64_t *fetchUs, int64_t *f
// release pageIdList
if ((*pSource)->pageIdList) {
taosArrayDestroy((*pSource)->pageIdList);
+ (*pSource)->pageIdList = NULL;
}
if ((*pSource)->param && !(*pSource)->onlyRef) {
taosMemoryFree((*pSource)->param);
+ (*pSource)->param = NULL;
}
if (!(*pSource)->onlyRef && (*pSource)->src.pBlock) {
diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c
index 1265c64c8c..4365cd8b95 100644
--- a/source/libs/function/src/builtinsimpl.c
+++ b/source/libs/function/src/builtinsimpl.c
@@ -2660,7 +2660,7 @@ bool diffFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResInfo) {
} else {
pDiffInfo->ignoreNegative = false;
}
- pDiffInfo->includeNull = false;
+ pDiffInfo->includeNull = true;
pDiffInfo->firstOutput = false;
return true;
}
diff --git a/source/libs/function/src/functionMgt.c b/source/libs/function/src/functionMgt.c
index 18f6e8050b..327bc7da71 100644
--- a/source/libs/function/src/functionMgt.c
+++ b/source/libs/function/src/functionMgt.c
@@ -344,7 +344,7 @@ static int32_t getFuncInfo(SFunctionNode* pFunc) {
return fmGetFuncInfo(pFunc, msg, sizeof(msg));
}
-static SFunctionNode* createFunction(const char* pName, SNodeList* pParameterList) {
+SFunctionNode* createFunction(const char* pName, SNodeList* pParameterList) {
SFunctionNode* pFunc = (SFunctionNode*)nodesMakeNode(QUERY_NODE_FUNCTION);
if (NULL == pFunc) {
return NULL;
diff --git a/source/libs/nodes/src/nodesUtilFuncs.c b/source/libs/nodes/src/nodesUtilFuncs.c
index 39e288f694..15232b95b6 100644
--- a/source/libs/nodes/src/nodesUtilFuncs.c
+++ b/source/libs/nodes/src/nodesUtilFuncs.c
@@ -953,6 +953,8 @@ void nodesDestroyNode(SNode* pNode) {
nodesDestroyNode(pStmt->pQuery);
nodesDestroyList(pStmt->pTags);
nodesDestroyNode(pStmt->pSubtable);
+ tFreeSCMCreateStreamReq(pStmt->pReq);
+ taosMemoryFreeClear(pStmt->pReq);
break;
}
case QUERY_NODE_DROP_STREAM_STMT: // no pointer field
@@ -1052,6 +1054,7 @@ void nodesDestroyNode(SNode* pNode) {
case QUERY_NODE_QUERY: {
SQuery* pQuery = (SQuery*)pNode;
nodesDestroyNode(pQuery->pRoot);
+ nodesDestroyNode(pQuery->pPostRoot);
taosMemoryFreeClear(pQuery->pResSchema);
if (NULL != pQuery->pCmdMsg) {
taosMemoryFreeClear(pQuery->pCmdMsg->pMsg);
@@ -1953,9 +1956,9 @@ static uint32_t funcNodeHash(const char* pKey, uint32_t len) {
}
static int32_t funcNodeEqual(const void* pLeft, const void* pRight, size_t len) {
- if (0 != strcmp((*(const SExprNode**)pLeft)->aliasName, (*(const SExprNode**)pRight)->aliasName)) {
- return 1;
- }
+ // if (0 != strcmp((*(const SExprNode**)pLeft)->aliasName, (*(const SExprNode**)pRight)->aliasName)) {
+ // return 1;
+ // }
return nodesEqualNode(*(const SNode**)pLeft, *(const SNode**)pRight) ? 0 : 1;
}
diff --git a/source/libs/parser/inc/parAst.h b/source/libs/parser/inc/parAst.h
index 4fecb1cd33..ff394467f6 100644
--- a/source/libs/parser/inc/parAst.h
+++ b/source/libs/parser/inc/parAst.h
@@ -210,7 +210,7 @@ SNode* createCreateTopicStmtUseDb(SAstCreateContext* pCxt, bool ignoreExists, ST
SNode* createCreateTopicStmtUseTable(SAstCreateContext* pCxt, bool ignoreExists, SToken* pTopicName, SNode* pRealTable,
bool withMeta, SNode* pWhere);
SNode* createDropTopicStmt(SAstCreateContext* pCxt, bool ignoreNotExists, SToken* pTopicName);
-SNode* createDropCGroupStmt(SAstCreateContext* pCxt, bool ignoreNotExists, const SToken* pCGroupId, SToken* pTopicName);
+SNode* createDropCGroupStmt(SAstCreateContext* pCxt, bool ignoreNotExists, SToken* pCGroupId, SToken* pTopicName);
SNode* createAlterLocalStmt(SAstCreateContext* pCxt, const SToken* pConfig, const SToken* pValue);
SNode* createDefaultExplainOptions(SAstCreateContext* pCxt);
SNode* setExplainVerbose(SAstCreateContext* pCxt, SNode* pOptions, const SToken* pVal);
diff --git a/source/libs/parser/inc/parInt.h b/source/libs/parser/inc/parInt.h
index 66aec272d7..d79aa84bb8 100644
--- a/source/libs/parser/inc/parInt.h
+++ b/source/libs/parser/inc/parInt.h
@@ -34,6 +34,7 @@ int32_t authenticate(SParseContext* pParseCxt, SQuery* pQuery, SParseMetaCache*
int32_t translate(SParseContext* pParseCxt, SQuery* pQuery, SParseMetaCache* pMetaCache);
int32_t extractResultSchema(const SNode* pRoot, int32_t* numOfCols, SSchema** pSchema);
int32_t calculateConstant(SParseContext* pParseCxt, SQuery* pQuery);
+int32_t translatePostCreateStream(SParseContext* pParseCxt, SQuery* pQuery, void** pResRow);
#ifdef __cplusplus
}
diff --git a/source/libs/parser/src/parAstCreater.c b/source/libs/parser/src/parAstCreater.c
index e5a0fc3d76..e08153c341 100644
--- a/source/libs/parser/src/parAstCreater.c
+++ b/source/libs/parser/src/parAstCreater.c
@@ -210,6 +210,15 @@ static bool checkTopicName(SAstCreateContext* pCxt, SToken* pTopicName) {
return true;
}
+static bool checkCGroupName(SAstCreateContext* pCxt, SToken* pCGroup) {
+ trimEscape(pCGroup);
+ if (pCGroup->n >= TSDB_CGROUP_LEN) {
+ pCxt->errCode = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_IDENTIFIER_NAME, pCGroup->z);
+ return false;
+ }
+ return true;
+}
+
static bool checkStreamName(SAstCreateContext* pCxt, SToken* pStreamName) {
trimEscape(pStreamName);
if (pStreamName->n >= TSDB_STREAM_NAME_LEN) {
@@ -1751,12 +1760,15 @@ SNode* createDropTopicStmt(SAstCreateContext* pCxt, bool ignoreNotExists, SToken
return (SNode*)pStmt;
}
-SNode* createDropCGroupStmt(SAstCreateContext* pCxt, bool ignoreNotExists, const SToken* pCGroupId,
+SNode* createDropCGroupStmt(SAstCreateContext* pCxt, bool ignoreNotExists, SToken* pCGroupId,
SToken* pTopicName) {
CHECK_PARSER_STATUS(pCxt);
if (!checkTopicName(pCxt, pTopicName)) {
return NULL;
}
+ if (!checkCGroupName(pCxt, pCGroupId)) {
+ return NULL;
+ }
SDropCGroupStmt* pStmt = (SDropCGroupStmt*)nodesMakeNode(QUERY_NODE_DROP_CGROUP_STMT);
CHECK_OUT_OF_MEM(pStmt);
pStmt->ignoreNotExists = ignoreNotExists;
diff --git a/source/libs/parser/src/parAstParser.c b/source/libs/parser/src/parAstParser.c
index 801d43e2a4..fdec9cba79 100644
--- a/source/libs/parser/src/parAstParser.c
+++ b/source/libs/parser/src/parAstParser.c
@@ -384,6 +384,10 @@ static int32_t collectMetaKeyFromCreateStream(SCollectMetaKeyCxt* pCxt, SCreateS
if (TSDB_CODE_SUCCESS == code) {
code = collectMetaKeyFromQuery(pCxt, pStmt->pQuery);
}
+ if (TSDB_CODE_SUCCESS == code && pStmt->pOptions->fillHistory) {
+ SSelectStmt* pSelect = (SSelectStmt*)pStmt->pQuery;
+ code = reserveDbCfgForLastRow(pCxt, pSelect->pFromTable);
+ }
return code;
}
diff --git a/source/libs/parser/src/parCalcConst.c b/source/libs/parser/src/parCalcConst.c
index 01b62a9051..49d27f6083 100644
--- a/source/libs/parser/src/parCalcConst.c
+++ b/source/libs/parser/src/parCalcConst.c
@@ -311,6 +311,9 @@ static int32_t calcConstDelete(SCalcConstContext* pCxt, SDeleteStmt* pDelete) {
if (TSDB_CODE_SUCCESS == code) {
code = calcConstStmtCondition(pCxt, &pDelete->pWhere, &pDelete->deleteZeroRows);
}
+ if (code == TSDB_CODE_SUCCESS && pDelete->timeRange.skey > pDelete->timeRange.ekey) {
+ pDelete->deleteZeroRows = true;
+ }
return code;
}
@@ -465,6 +468,9 @@ static bool isEmptyResultQuery(SNode* pStmt) {
}
break;
}
+ case QUERY_NODE_DELETE_STMT:
+ isEmptyResult = ((SDeleteStmt*)pStmt)->deleteZeroRows;
+ break;
default:
break;
}
diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c
index 598ad798ce..bad9cc5b12 100644
--- a/source/libs/parser/src/parTranslater.c
+++ b/source/libs/parser/src/parTranslater.c
@@ -53,6 +53,8 @@ typedef struct STranslateContext {
bool createStream;
bool stableQuery;
bool showRewrite;
+ SNode* pPrevRoot;
+ SNode* pPostRoot;
} STranslateContext;
typedef struct SBuildTopicContext {
@@ -276,6 +278,10 @@ static const SSysTableShowAdapter sysTableShowAdapter[] = {
static int32_t translateSubquery(STranslateContext* pCxt, SNode* pNode);
static int32_t translateQuery(STranslateContext* pCxt, SNode* pNode);
static EDealRes translateValue(STranslateContext* pCxt, SValueNode* pVal);
+static int32_t createSimpleSelectStmtFromProjList(const char* pDb, const char* pTable, SNodeList* pProjectionList, SSelectStmt** pStmt);
+static int32_t createLastTsSelectStmt(char* pDb, char* pTable, STableMeta* pMeta, SNode** pQuery);
+static int32_t setQuery(STranslateContext* pCxt, SQuery* pQuery);
+static int32_t setRefreshMate(STranslateContext* pCxt, SQuery* pQuery);
static bool afterGroupBy(ESqlClause clause) { return clause > SQL_CLAUSE_GROUP_BY; }
@@ -707,6 +713,10 @@ static bool isWindowPseudoColumnFunc(const SNode* pNode) {
return (QUERY_NODE_FUNCTION == nodeType(pNode) && fmIsWindowPseudoColumnFunc(((SFunctionNode*)pNode)->funcId));
}
+static bool isInterpFunc(const SNode* pNode) {
+ return (QUERY_NODE_FUNCTION == nodeType(pNode) && fmIsInterpFunc(((SFunctionNode*)pNode)->funcId));
+}
+
static bool isInterpPseudoColumnFunc(const SNode* pNode) {
return (QUERY_NODE_FUNCTION == nodeType(pNode) && fmIsInterpPseudoColumnFunc(((SFunctionNode*)pNode)->funcId));
}
@@ -1374,13 +1384,33 @@ static bool isCountStar(SFunctionNode* pFunc) {
return (QUERY_NODE_COLUMN == nodeType(pPara) && 0 == strcmp(((SColumnNode*)pPara)->colName, "*"));
}
+static int32_t rewriteCountStarAsCount1(STranslateContext* pCxt, SFunctionNode* pCount) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ SValueNode* pVal = (SValueNode*)nodesMakeNode(QUERY_NODE_VALUE);
+ if (NULL == pVal) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ pVal->node.resType.type = TSDB_DATA_TYPE_INT;
+ pVal->node.resType.bytes = tDataTypes[TSDB_DATA_TYPE_INT].bytes;
+ const int32_t val = 1;
+ nodesSetValueNodeValue(pVal, (void*)&val);
+ pVal->translate = true;
+ nodesListErase(pCount->pParameterList, nodesListGetCell(pCount->pParameterList, 0));
+ code = nodesListAppend(pCount->pParameterList, (SNode*)pVal);
+ return code;
+}
+
// count(*) is rewritten as count(ts) for scannning optimization
static int32_t rewriteCountStar(STranslateContext* pCxt, SFunctionNode* pCount) {
SColumnNode* pCol = (SColumnNode*)nodesListGetNode(pCount->pParameterList, 0);
STableNode* pTable = NULL;
int32_t code = findTable(pCxt, ('\0' == pCol->tableAlias[0] ? NULL : pCol->tableAlias), &pTable);
- if (TSDB_CODE_SUCCESS == code && QUERY_NODE_REAL_TABLE == nodeType(pTable)) {
- setColumnInfoBySchema((SRealTableNode*)pTable, ((SRealTableNode*)pTable)->pMeta->schema, -1, pCol);
+ if (TSDB_CODE_SUCCESS == code) {
+ if (QUERY_NODE_REAL_TABLE == nodeType(pTable)) {
+ setColumnInfoBySchema((SRealTableNode*)pTable, ((SRealTableNode*)pTable)->pMeta->schema, -1, pCol);
+ } else {
+ code = rewriteCountStarAsCount1(pCxt, pCount);
+ }
}
return code;
}
@@ -3030,7 +3060,7 @@ static int32_t translateOrderBy(STranslateContext* pCxt, SSelectStmt* pSelect) {
}
static EDealRes needFillImpl(SNode* pNode, void* pContext) {
- if (isAggFunc(pNode) && FUNCTION_TYPE_GROUP_KEY != ((SFunctionNode*)pNode)->funcType) {
+ if ((isAggFunc(pNode) || isInterpFunc(pNode)) && FUNCTION_TYPE_GROUP_KEY != ((SFunctionNode*)pNode)->funcType) {
*(bool*)pContext = true;
return DEAL_RES_END;
}
@@ -3045,16 +3075,16 @@ static bool needFill(SNode* pNode) {
static int32_t convertFillValue(STranslateContext* pCxt, SDataType dt, SNodeList* pValues, int32_t index) {
SListCell* pCell = nodesListGetCell(pValues, index);
- if (dataTypeEqual(&dt, &((SExprNode*)pCell->pNode)->resType)) {
+ if (dataTypeEqual(&dt, &((SExprNode*)pCell->pNode)->resType) && (QUERY_NODE_VALUE == nodeType(pCell->pNode))) {
return TSDB_CODE_SUCCESS;
}
- SNode* pCaseFunc = NULL;
- int32_t code = createCastFunc(pCxt, pCell->pNode, dt, &pCaseFunc);
+ SNode* pCastFunc = NULL;
+ int32_t code = createCastFunc(pCxt, pCell->pNode, dt, &pCastFunc);
if (TSDB_CODE_SUCCESS == code) {
- code = scalarCalculateConstants(pCaseFunc, &pCell->pNode);
+ code = scalarCalculateConstants(pCastFunc, &pCell->pNode);
}
if (TSDB_CODE_SUCCESS == code && QUERY_NODE_VALUE != nodeType(pCell->pNode)) {
- code = generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Fill value is just a constant");
+ code = generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Fill value can only accept constant");
} else if (TSDB_CODE_SUCCESS != code) {
code = generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Filled data type mismatch");
}
@@ -3078,6 +3108,7 @@ static int32_t checkFillValues(STranslateContext* pCxt, SFillNode* pFill, SNodeL
if (TSDB_CODE_SUCCESS != code) {
return code;
}
+
++fillNo;
}
}
@@ -3562,6 +3593,9 @@ static int32_t translateInterpFill(STranslateContext* pCxt, SSelectStmt* pSelect
if (TSDB_CODE_SUCCESS == code) {
code = checkFill(pCxt, (SFillNode*)pSelect->pFill, (SValueNode*)pSelect->pEvery, true);
}
+ if (TSDB_CODE_SUCCESS == code) {
+ code = checkFillValues(pCxt, (SFillNode*)pSelect->pFill, pSelect->pProjectionList);
+ }
return code;
}
@@ -4965,6 +4999,7 @@ static int32_t buildTableForSampleAst(SSampleAstInfo* pInfo, SNode** pOutput) {
}
snprintf(pTable->table.dbName, sizeof(pTable->table.dbName), "%s", pInfo->pDbName);
snprintf(pTable->table.tableName, sizeof(pTable->table.tableName), "%s", pInfo->pTableName);
+ snprintf(pTable->table.tableAlias, sizeof(pTable->table.tableAlias), "%s", pInfo->pTableName);
TSWAP(pTable->pMeta, pInfo->pRollupTableMeta);
*pOutput = (SNode*)pTable;
return TSDB_CODE_SUCCESS;
@@ -6755,6 +6790,54 @@ static int32_t translateStreamTargetTable(STranslateContext* pCxt, SCreateStream
return code;
}
+static int32_t createLastTsSelectStmt(char* pDb, char* pTable, STableMeta* pMeta, SNode** pQuery) {
+ SColumnNode* col = (SColumnNode*)nodesMakeNode(QUERY_NODE_COLUMN);
+ if (NULL == col) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+
+ strcpy(col->tableAlias, pTable);
+ strcpy(col->colName, pMeta->schema[0].name);
+ SNodeList* pParamterList = nodesMakeList();
+ if (NULL == pParamterList) {
+ nodesDestroyNode((SNode *)col);
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+
+ int32_t code = nodesListStrictAppend(pParamterList, (SNode *)col);
+ if (code) {
+ nodesDestroyNode((SNode *)col);
+ nodesDestroyList(pParamterList);
+ return code;
+ }
+
+ SNode* pFunc = (SNode*)createFunction("last", pParamterList);
+ if (NULL == pFunc) {
+ nodesDestroyList(pParamterList);
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+
+ SNodeList* pProjectionList = nodesMakeList();
+ if (NULL == pProjectionList) {
+ nodesDestroyList(pParamterList);
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ code = nodesListStrictAppend(pProjectionList, pFunc);
+ if (code) {
+ nodesDestroyNode(pFunc);
+ nodesDestroyList(pProjectionList);
+ return code;
+ }
+
+ code = createSimpleSelectStmtFromProjList(pDb, pTable, pProjectionList, (SSelectStmt **)pQuery);
+ if (code) {
+ nodesDestroyList(pProjectionList);
+ return code;
+ }
+
+ return code;
+}
+
static int32_t buildCreateStreamQuery(STranslateContext* pCxt, SCreateStreamStmt* pStmt, SCMCreateStreamReq* pReq) {
pCxt->createStream = true;
STableMeta* pMeta = NULL;
@@ -6781,6 +6864,18 @@ static int32_t buildCreateStreamQuery(STranslateContext* pCxt, SCreateStreamStmt
getSourceDatabase(pStmt->pQuery, pCxt->pParseCxt->acctId, pReq->sourceDB);
code = nodesNodeToString(pStmt->pQuery, false, &pReq->ast, NULL);
}
+ if (TSDB_CODE_SUCCESS == code && pStmt->pOptions->fillHistory) {
+ SRealTableNode* pTable = (SRealTableNode*)(((SSelectStmt*)pStmt->pQuery)->pFromTable);
+ code = createLastTsSelectStmt(pTable->table.dbName, pTable->table.tableName, pTable->pMeta, &pStmt->pPrevQuery);
+/*
+ if (TSDB_CODE_SUCCESS == code) {
+ STranslateContext cxt = {0};
+ int32_t code = initTranslateContext(pCxt->pParseCxt, pCxt->pMetaCache, &cxt);
+ code = translateQuery(&cxt, pStmt->pPrevQuery);
+ destroyTranslateContext(&cxt);
+ }
+*/
+ }
taosMemoryFree(pMeta);
return code;
}
@@ -6847,13 +6942,86 @@ static int32_t translateCreateStream(STranslateContext* pCxt, SCreateStreamStmt*
code = buildCreateStreamReq(pCxt, pStmt, &createReq);
}
if (TSDB_CODE_SUCCESS == code) {
- code = buildCmdMsg(pCxt, TDMT_MND_CREATE_STREAM, (FSerializeFunc)tSerializeSCMCreateStreamReq, &createReq);
+ if (NULL == pStmt->pPrevQuery) {
+ code = buildCmdMsg(pCxt, TDMT_MND_CREATE_STREAM, (FSerializeFunc)tSerializeSCMCreateStreamReq, &createReq);
+ } else {
+ pStmt->pReq = taosMemoryMalloc(sizeof(createReq));
+ if (NULL == pStmt->pReq) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ } else {
+ memcpy(pStmt->pReq, &createReq, sizeof(createReq));
+ memset(&createReq, 0, sizeof(createReq));
+ TSWAP(pCxt->pPrevRoot, pStmt->pPrevQuery);
+ }
+ }
}
tFreeSCMCreateStreamReq(&createReq);
return code;
}
+int32_t buildIntervalForCreateStream(SCreateStreamStmt* pStmt, SInterval* pInterval) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ if (QUERY_NODE_SELECT_STMT != nodeType(pStmt->pQuery)) {
+ return code;
+ }
+ SSelectStmt* pSelect = (SSelectStmt*)pStmt->pQuery;
+ if (NULL == pSelect->pWindow || QUERY_NODE_INTERVAL_WINDOW != nodeType(pSelect->pWindow)) {
+ return code;
+ }
+
+ SIntervalWindowNode* pWindow = (SIntervalWindowNode*)pSelect->pWindow;
+ pInterval->interval = ((SValueNode*)pWindow->pInterval)->datum.i;
+ pInterval->intervalUnit = ((SValueNode*)pWindow->pInterval)->unit;
+ pInterval->offset = (NULL != pWindow->pOffset ? ((SValueNode*)pWindow->pOffset)->datum.i : 0);
+ pInterval->sliding = (NULL != pWindow->pSliding ? ((SValueNode*)pWindow->pSliding)->datum.i : pInterval->interval);
+ pInterval->slidingUnit =
+ (NULL != pWindow->pSliding ? ((SValueNode*)pWindow->pSliding)->unit : pInterval->intervalUnit);
+ pInterval->precision = ((SColumnNode*)pWindow->pCol)->node.resType.precision;
+
+ return code;
+}
+
+int32_t translatePostCreateStream(SParseContext* pParseCxt, SQuery* pQuery, void** pResRow) {
+ SCreateStreamStmt* pStmt = (SCreateStreamStmt*)pQuery->pRoot;
+ STranslateContext cxt = {0};
+ SInterval interval = {0};
+ int64_t lastTs = 0;
+
+ int32_t code = initTranslateContext(pParseCxt, NULL, &cxt);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = buildIntervalForCreateStream(pStmt, &interval);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ if (pResRow && pResRow[0]) {
+ lastTs = *(int64_t*)pResRow[0];
+ } else if (interval.interval > 0) {
+ lastTs = convertTimePrecision(taosGetTimestampMs(), TSDB_TIME_PRECISION_MILLI, interval.precision);
+ } else {
+ lastTs = taosGetTimestampMs();
+ }
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ if (interval.interval > 0) {
+ pStmt->pReq->lastTs = taosTimeTruncate(lastTs, &interval);
+ } else {
+ pStmt->pReq->lastTs = lastTs;
+ }
+ code = buildCmdMsg(&cxt, TDMT_MND_CREATE_STREAM, (FSerializeFunc)tSerializeSCMCreateStreamReq, pStmt->pReq);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = setQuery(&cxt, pQuery);
+ }
+ setRefreshMate(&cxt, pQuery);
+ destroyTranslateContext(&cxt);
+
+ tFreeSCMCreateStreamReq(pStmt->pReq);
+ taosMemoryFreeClear(pStmt->pReq);
+
+ return code;
+}
+
+
static int32_t translateDropStream(STranslateContext* pCxt, SDropStreamStmt* pStmt) {
SMDropStreamReq dropReq = {0};
SName name;
@@ -7534,8 +7702,7 @@ static SNodeList* createProjectCols(int32_t ncols, const char* const pCols[]) {
return pProjections;
}
-static int32_t createSimpleSelectStmt(const char* pDb, const char* pTable, int32_t numOfProjs,
- const char* const pProjCol[], SSelectStmt** pStmt) {
+static int32_t createSimpleSelectStmtImpl(const char* pDb, const char* pTable, SNodeList* pProjectionList, SSelectStmt** pStmt) {
SSelectStmt* pSelect = (SSelectStmt*)nodesMakeNode(QUERY_NODE_SELECT_STMT);
if (NULL == pSelect) {
return TSDB_CODE_OUT_OF_MEMORY;
@@ -7551,27 +7718,38 @@ static int32_t createSimpleSelectStmt(const char* pDb, const char* pTable, int32
snprintf(pRealTable->table.tableName, sizeof(pRealTable->table.tableName), "%s", pTable);
snprintf(pRealTable->table.tableAlias, sizeof(pRealTable->table.tableAlias), "%s", pTable);
pSelect->pFromTable = (SNode*)pRealTable;
-
- if (numOfProjs >= 0) {
- pSelect->pProjectionList = createProjectCols(numOfProjs, pProjCol);
- if (NULL == pSelect->pProjectionList) {
- nodesDestroyNode((SNode*)pSelect);
- return TSDB_CODE_OUT_OF_MEMORY;
- }
- }
+ pSelect->pProjectionList = pProjectionList;
*pStmt = pSelect;
return TSDB_CODE_SUCCESS;
}
+
+static int32_t createSimpleSelectStmtFromCols(const char* pDb, const char* pTable, int32_t numOfProjs,
+ const char* const pProjCol[], SSelectStmt** pStmt) {
+ SNodeList* pProjectionList = NULL;
+ if (numOfProjs >= 0) {
+ pProjectionList = createProjectCols(numOfProjs, pProjCol);
+ if (NULL == pProjectionList) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ }
+
+ return createSimpleSelectStmtImpl(pDb, pTable, pProjectionList, pStmt);
+}
+
+static int32_t createSimpleSelectStmtFromProjList(const char* pDb, const char* pTable, SNodeList* pProjectionList, SSelectStmt** pStmt) {
+ return createSimpleSelectStmtImpl(pDb, pTable, pProjectionList, pStmt);
+}
+
static int32_t createSelectStmtForShow(ENodeType showType, SSelectStmt** pStmt) {
const SSysTableShowAdapter* pShow = &sysTableShowAdapter[showType - SYSTABLE_SHOW_TYPE_OFFSET];
- return createSimpleSelectStmt(pShow->pDbName, pShow->pTableName, pShow->numOfShowCols, pShow->pShowCols, pStmt);
+ return createSimpleSelectStmtFromCols(pShow->pDbName, pShow->pTableName, pShow->numOfShowCols, pShow->pShowCols, pStmt);
}
static int32_t createSelectStmtForShowTableDist(SShowTableDistributedStmt* pStmt, SSelectStmt** pOutput) {
- return createSimpleSelectStmt(pStmt->dbName, pStmt->tableName, 0, NULL, pOutput);
+ return createSimpleSelectStmtFromCols(pStmt->dbName, pStmt->tableName, 0, NULL, pOutput);
}
static int32_t createOperatorNode(EOperatorType opType, const char* pColName, SNode* pRight, SNode** pOp) {
@@ -7705,7 +7883,7 @@ static int32_t createShowTableTagsProjections(SNodeList** pProjections, SNodeLis
static int32_t rewriteShowStableTags(STranslateContext* pCxt, SQuery* pQuery) {
SShowTableTagsStmt* pShow = (SShowTableTagsStmt*)pQuery->pRoot;
SSelectStmt* pSelect = NULL;
- int32_t code = createSimpleSelectStmt(((SValueNode*)pShow->pDbName)->literal, ((SValueNode*)pShow->pTbName)->literal,
+ int32_t code = createSimpleSelectStmtFromCols(((SValueNode*)pShow->pDbName)->literal, ((SValueNode*)pShow->pTbName)->literal,
-1, NULL, &pSelect);
if (TSDB_CODE_SUCCESS == code) {
code = createShowTableTagsProjections(&pSelect->pProjectionList, &pShow->pTags);
@@ -9030,6 +9208,7 @@ static int32_t setQuery(STranslateContext* pCxt, SQuery* pQuery) {
}
break;
default:
+ pQuery->haveResultSet = false;
pQuery->execMode = QUERY_EXEC_MODE_RPC;
if (NULL != pCxt->pCmdMsg) {
TSWAP(pQuery->pCmdMsg, pCxt->pCmdMsg);
@@ -9064,6 +9243,10 @@ int32_t translate(SParseContext* pParseCxt, SQuery* pQuery, SParseMetaCache* pMe
if (TSDB_CODE_SUCCESS == code) {
code = translateQuery(&cxt, pQuery->pRoot);
}
+ if (TSDB_CODE_SUCCESS == code && (cxt.pPrevRoot || cxt.pPostRoot)) {
+ pQuery->pPrevRoot = cxt.pPrevRoot;
+ pQuery->pPostRoot = cxt.pPostRoot;
+ }
if (TSDB_CODE_SUCCESS == code) {
code = setQuery(&cxt, pQuery);
}
diff --git a/source/libs/parser/src/parser.c b/source/libs/parser/src/parser.c
index 28d116c381..cbddaf8115 100644
--- a/source/libs/parser/src/parser.c
+++ b/source/libs/parser/src/parser.c
@@ -204,7 +204,7 @@ int32_t qAnalyseSqlSemantic(SParseContext* pCxt, const struct SCatalogReq* pCata
const struct SMetaData* pMetaData, SQuery* pQuery) {
SParseMetaCache metaCache = {0};
int32_t code = nodesAcquireAllocator(pCxt->allocatorId);
- if (TSDB_CODE_SUCCESS == code) {
+ if (TSDB_CODE_SUCCESS == code && pCatalogReq) {
code = putMetaDataToCache(pCatalogReq, pMetaData, &metaCache);
}
if (TSDB_CODE_SUCCESS == code) {
@@ -221,6 +221,19 @@ int32_t qContinueParseSql(SParseContext* pCxt, struct SCatalogReq* pCatalogReq,
return parseInsertSql(pCxt, &pQuery, pCatalogReq, pMetaData);
}
+int32_t qContinueParsePostQuery(SParseContext* pCxt, SQuery* pQuery, void** pResRow) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ switch (nodeType(pQuery->pRoot)) {
+ case QUERY_NODE_CREATE_STREAM_STMT:
+ code = translatePostCreateStream(pCxt, pQuery, pResRow);
+ break;
+ default:
+ break;
+ }
+
+ return code;
+}
+
void qDestroyParseContext(SParseContext* pCxt) {
if (NULL == pCxt) {
return;
diff --git a/source/libs/parser/test/parInitialCTest.cpp b/source/libs/parser/test/parInitialCTest.cpp
index a4e8bdd87a..f6dfa93ab2 100644
--- a/source/libs/parser/test/parInitialCTest.cpp
+++ b/source/libs/parser/test/parInitialCTest.cpp
@@ -885,12 +885,12 @@ TEST_F(ParserInitialCTest, createStream) {
setCreateStreamReq(
"s1", "test",
- "create stream if not exists s1 trigger max_delay 20s watermark 10s ignore expired 0 fill_history 1 ignore "
+ "create stream if not exists s1 trigger max_delay 20s watermark 10s ignore expired 0 fill_history 0 ignore "
"update 1 into st3 as select count(*) from t1 interval(10s)",
"st3", 1);
setStreamOptions(STREAM_CREATE_STABLE_TRUE, STREAM_TRIGGER_MAX_DELAY, 20 * MILLISECOND_PER_SECOND,
- 10 * MILLISECOND_PER_SECOND, 0, 1, 1);
- run("CREATE STREAM IF NOT EXISTS s1 TRIGGER MAX_DELAY 20s WATERMARK 10s IGNORE EXPIRED 0 FILL_HISTORY 1 IGNORE "
+ 10 * MILLISECOND_PER_SECOND, 0, 0, 1);
+ run("CREATE STREAM IF NOT EXISTS s1 TRIGGER MAX_DELAY 20s WATERMARK 10s IGNORE EXPIRED 0 FILL_HISTORY 0 IGNORE "
"UPDATE 1 INTO st3 AS SELECT COUNT(*) FROM t1 INTERVAL(10S)");
clearCreateStreamReq();
diff --git a/source/libs/planner/src/planner.c b/source/libs/planner/src/planner.c
index 58b8e53478..2fcc8510d4 100644
--- a/source/libs/planner/src/planner.c
+++ b/source/libs/planner/src/planner.c
@@ -97,6 +97,12 @@ static int32_t setSubplanExecutionNode(SPhysiNode* pNode, int32_t groupId, SDown
return TSDB_CODE_SUCCESS;
}
+int32_t qContinuePlanPostQuery(void *pPostPlan) {
+ //TODO
+ return TSDB_CODE_SUCCESS;
+}
+
+
int32_t qSetSubplanExecutionNode(SSubplan* subplan, int32_t groupId, SDownstreamSourceNode* pSource) {
planDebug("QID:0x%" PRIx64 " set subplan execution node, groupId:%d", subplan->id.queryId, groupId);
return setSubplanExecutionNode(subplan->pNode, groupId, pSource);
diff --git a/source/libs/qworker/CMakeLists.txt b/source/libs/qworker/CMakeLists.txt
index 8ba8b79ab8..7a984cd000 100644
--- a/source/libs/qworker/CMakeLists.txt
+++ b/source/libs/qworker/CMakeLists.txt
@@ -7,15 +7,9 @@ target_include_directories(
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
)
-IF (TD_GRANT)
- TARGET_LINK_LIBRARIES(qworker
- PRIVATE os util transport nodes planner qcom executor index grant
- )
-ELSE ()
- TARGET_LINK_LIBRARIES(qworker
- PRIVATE os util transport nodes planner qcom executor index
- )
-ENDIF()
+TARGET_LINK_LIBRARIES(qworker
+ PRIVATE os util transport nodes planner qcom executor index
+ )
if(${BUILD_TEST})
ADD_SUBDIRECTORY(test)
diff --git a/source/libs/qworker/src/qwMsg.c b/source/libs/qworker/src/qwMsg.c
index 231e597724..508e957e26 100644
--- a/source/libs/qworker/src/qwMsg.c
+++ b/source/libs/qworker/src/qwMsg.c
@@ -366,7 +366,7 @@ int32_t qWorkerPreprocessQueryMsg(void *qWorkerMgmt, SRpcMsg *pMsg, bool chkGran
QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT);
}
- if (chkGrant && (!TEST_SHOW_REWRITE_MASK(msg.msgMask)) && (grantCheck(TSDB_GRANT_TIME) != TSDB_CODE_SUCCESS)) {
+ if (chkGrant && (!TEST_SHOW_REWRITE_MASK(msg.msgMask)) && !taosGranted()) {
QW_ELOG("query failed cause of grant expired, msgMask:%d", msg.msgMask);
tFreeSSubQueryMsg(&msg);
QW_ERR_RET(TSDB_CODE_GRANT_EXPIRED);
diff --git a/source/libs/qworker/src/qworker.c b/source/libs/qworker/src/qworker.c
index 9db0495081..09e550b6dc 100644
--- a/source/libs/qworker/src/qworker.c
+++ b/source/libs/qworker/src/qworker.c
@@ -824,6 +824,7 @@ int32_t qwProcessCQuery(QW_FPARAMS_DEF, SQWMsg *qwMsg) {
break;
}
QW_UNLOCK(QW_WRITE, &ctx->lock);
+ queryStop = false;
} while (true);
input.code = code;
diff --git a/source/libs/stream/CMakeLists.txt b/source/libs/stream/CMakeLists.txt
index fa6c709c8f..d1ef7fe3c1 100644
--- a/source/libs/stream/CMakeLists.txt
+++ b/source/libs/stream/CMakeLists.txt
@@ -11,7 +11,7 @@ if(${BUILD_WITH_ROCKSDB})
IF (TD_LINUX)
target_link_libraries(
stream
- PUBLIC rocksdb-shared tdb
+ PUBLIC rocksdb tdb
PRIVATE os util transport qcom executor wal index
)
ELSE()
diff --git a/source/libs/stream/inc/streamBackendRocksdb.h b/source/libs/stream/inc/streamBackendRocksdb.h
index 1cbd7b042c..da4e442f1a 100644
--- a/source/libs/stream/inc/streamBackendRocksdb.h
+++ b/source/libs/stream/inc/streamBackendRocksdb.h
@@ -122,12 +122,17 @@ char* streamDefaultIterKey_rocksdb(void* iter, int32_t* len);
char* streamDefaultIterVal_rocksdb(void* iter, int32_t* len);
// batch func
+int streamStateGetCfIdx(SStreamState* pState, const char* funcName);
void* streamStateCreateBatch();
int32_t streamStateGetBatchSize(void* pBatch);
void streamStateClearBatch(void* pBatch);
void streamStateDestroyBatch(void* pBatch);
int32_t streamStatePutBatch(SStreamState* pState, const char* cfName, rocksdb_writebatch_t* pBatch, void* key,
void* val, int32_t vlen, int64_t ttl);
+
+int32_t streamStatePutBatchOptimize(SStreamState* pState, int32_t cfIdx, rocksdb_writebatch_t* pBatch, void* key,
+ void* val, int32_t vlen, int64_t ttl, void* tmpBuf);
+
int32_t streamStatePutBatch_rocksdb(SStreamState* pState, void* pBatch);
// int32_t streamDefaultIter_rocksdb(SStreamState* pState, const void* start, const void* end, SArray* result);
#endif
\ No newline at end of file
diff --git a/source/libs/stream/inc/streamInc.h b/source/libs/stream/inc/streamInc.h
index 2c1956998a..c7ee308b61 100644
--- a/source/libs/stream/inc/streamInc.h
+++ b/source/libs/stream/inc/streamInc.h
@@ -36,8 +36,9 @@ static SStreamGlobalEnv streamEnv;
int32_t streamDispatchStreamBlock(SStreamTask* pTask);
SStreamDataBlock* createStreamDataFromDispatchMsg(const SStreamDispatchReq* pReq, int32_t blockType, int32_t srcVg);
-SStreamDataBlock* createStreamBlockFromResults(SStreamQueueItem* pItem, SStreamTask* pTask, int64_t resultSize, SArray* pRes);
-void destroyStreamDataBlock(SStreamDataBlock* pBlock);
+SStreamDataBlock* createStreamBlockFromResults(SStreamQueueItem* pItem, SStreamTask* pTask, int64_t resultSize,
+ SArray* pRes);
+void destroyStreamDataBlock(SStreamDataBlock* pBlock);
int32_t streamRetrieveReqToData(const SStreamRetrieveReq* pReq, SStreamDataBlock* pData);
int32_t streamDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* data);
@@ -53,6 +54,8 @@ int32_t streamDispatchOneRecoverFinishReq(SStreamTask* pTask, const SStreamRecov
SStreamQueueItem* streamMergeQueueItem(SStreamQueueItem* dst, SStreamQueueItem* pElem);
+extern int32_t streamBackendId;
+
#ifdef __cplusplus
}
#endif
diff --git a/source/libs/stream/src/streamBackendRocksdb.c b/source/libs/stream/src/streamBackendRocksdb.c
index b3995f020b..cebe4e8204 100644
--- a/source/libs/stream/src/streamBackendRocksdb.c
+++ b/source/libs/stream/src/streamBackendRocksdb.c
@@ -16,7 +16,9 @@
#include "streamBackendRocksdb.h"
#include "executor.h"
#include "query.h"
+#include "streamInc.h"
#include "tcommon.h"
+#include "tref.h"
typedef struct SCompactFilteFactory {
void* status;
@@ -79,8 +81,10 @@ const char* compareParKeyName(void* name);
const char* comparePartagKeyName(void* name);
void* streamBackendInit(const char* path) {
- qDebug("init stream backend");
- SBackendHandle* pHandle = calloc(1, sizeof(SBackendHandle));
+ uint32_t dbMemLimit = nextPow2(tsMaxStreamBackendCache) << 20;
+
+ qDebug("start to init stream backend at %s", path);
+ SBackendHandle* pHandle = taosMemoryCalloc(1, sizeof(SBackendHandle));
pHandle->list = tdListNew(sizeof(SCfComparator));
taosThreadMutexInit(&pHandle->mutex, NULL);
taosThreadMutexInit(&pHandle->cfMutex, NULL);
@@ -88,19 +92,22 @@ void* streamBackendInit(const char* path) {
rocksdb_env_t* env = rocksdb_create_default_env(); // rocksdb_envoptions_create();
- rocksdb_cache_t* cache = rocksdb_cache_create_lru(64 << 20);
+ int32_t nBGThread = tsNumOfSnodeStreamThreads <= 2 ? 1 : tsNumOfSnodeStreamThreads / 2;
+ rocksdb_env_set_low_priority_background_threads(env, nBGThread);
+ rocksdb_env_set_high_priority_background_threads(env, nBGThread);
+
+ rocksdb_cache_t* cache = rocksdb_cache_create_lru(dbMemLimit / 2);
rocksdb_options_t* opts = rocksdb_options_create();
rocksdb_options_set_env(opts, env);
rocksdb_options_set_create_if_missing(opts, 1);
rocksdb_options_set_create_missing_column_families(opts, 1);
- rocksdb_options_set_write_buffer_size(opts, 48 << 20);
- rocksdb_options_set_max_total_wal_size(opts, 128 << 20);
+ rocksdb_options_set_max_total_wal_size(opts, dbMemLimit);
rocksdb_options_set_recycle_log_file_num(opts, 6);
- rocksdb_options_set_max_write_buffer_number(opts, 2);
+ rocksdb_options_set_max_write_buffer_number(opts, 3);
rocksdb_options_set_info_log_level(opts, 0);
- uint32_t dbLimit = nextPow2(tsMaxStreamBackendCache);
- rocksdb_options_set_db_write_buffer_size(opts, dbLimit << 20);
+ rocksdb_options_set_db_write_buffer_size(opts, dbMemLimit);
+ rocksdb_options_set_write_buffer_size(opts, dbMemLimit / 2);
pHandle->env = env;
pHandle->dbOpt = opts;
@@ -119,6 +126,7 @@ void* streamBackendInit(const char* path) {
if (err != NULL) {
qError("failed to open rocksdb, path:%s, reason:%s", path, err);
taosMemoryFreeClear(err);
+ goto _EXIT;
}
} else {
/*
@@ -129,6 +137,7 @@ void* streamBackendInit(const char* path) {
if (cfs != NULL) {
rocksdb_list_column_families_destroy(cfs, nCf);
}
+ qDebug("succ to init stream backend at %s, backend:%p", path, pHandle);
return (void*)pHandle;
_EXIT:
@@ -140,7 +149,8 @@ _EXIT:
taosHashCleanup(pHandle->cfInst);
rocksdb_compactionfilterfactory_destroy(pHandle->filterFactory);
tdListFree(pHandle->list);
- free(pHandle);
+ taosMemoryFree(pHandle);
+ qDebug("failed to init stream backend at %s", path);
return NULL;
}
void streamBackendCleanup(void* arg) {
@@ -168,19 +178,20 @@ void streamBackendCleanup(void* arg) {
rocksdb_env_destroy(pHandle->env);
rocksdb_cache_destroy(pHandle->cache);
- taosThreadMutexDestroy(&pHandle->mutex);
SListNode* head = tdListPopHead(pHandle->list);
while (head != NULL) {
streamStateDestroyCompar(head->data);
taosMemoryFree(head);
head = tdListPopHead(pHandle->list);
}
- // rocksdb_compactionfilterfactory_destroy(pHandle->filterFactory);
+
tdListFree(pHandle->list);
+ taosThreadMutexDestroy(&pHandle->mutex);
+
taosThreadMutexDestroy(&pHandle->cfMutex);
taosMemoryFree(pHandle);
-
+ qDebug("destroy stream backend backend:%p", pHandle);
return;
}
SListNode* streamBackendAddCompare(void* backend, void* arg) {
@@ -204,7 +215,6 @@ void streamBackendDelCompare(void* backend, void* arg) {
}
void streamStateDestroy_rocksdb(SStreamState* pState, bool remove) { streamStateCloseBackend(pState, remove); }
static bool streamStateIterSeekAndValid(rocksdb_iterator_t* iter, char* buf, size_t len);
-int streamGetInit(SStreamState* pState, const char* funcName);
// |key|-----value------|
// |key|ttl|len|userData|
@@ -551,14 +561,20 @@ typedef struct {
int32_t encodeValueFunc(void* value, int32_t vlen, int64_t ttl, char** dest) {
SStreamValue key = {.unixTimestamp = ttl, .len = vlen, .data = (char*)(value)};
-
- char* p = taosMemoryCalloc(1, sizeof(int64_t) + sizeof(int32_t) + key.len);
- char* buf = p;
- int32_t len = 0;
- len += taosEncodeFixedI64((void**)&buf, key.unixTimestamp);
- len += taosEncodeFixedI32((void**)&buf, key.len);
- len += taosEncodeBinary((void**)&buf, (char*)value, vlen);
- *dest = p;
+ int32_t len = 0;
+ if (*dest == NULL) {
+ char* p = taosMemoryCalloc(1, sizeof(int64_t) + sizeof(int32_t) + key.len);
+ char* buf = p;
+ len += taosEncodeFixedI64((void**)&buf, key.unixTimestamp);
+ len += taosEncodeFixedI32((void**)&buf, key.len);
+ len += taosEncodeBinary((void**)&buf, (char*)value, vlen);
+ *dest = p;
+ } else {
+ char* buf = *dest;
+ len += taosEncodeFixedI64((void**)&buf, key.unixTimestamp);
+ len += taosEncodeFixedI32((void**)&buf, key.len);
+ len += taosEncodeBinary((void**)&buf, (char*)value, vlen);
+ }
return len;
}
/*
@@ -707,7 +723,7 @@ int32_t streamStateOpenBackendCf(void* backend, char* name, char** cfs, int32_t
rocksdb_options_set_block_based_table_factory((rocksdb_options_t*)cfOpts[i], tableOpt);
params[i].tableOpt = tableOpt;
- int idx = streamGetInit(NULL, funcname);
+ int idx = streamStateGetCfIdx(NULL, funcname);
SCfInit* cfPara = &ginitDict[idx];
rocksdb_comparator_t* compare =
@@ -738,7 +754,7 @@ int32_t streamStateOpenBackendCf(void* backend, char* name, char** cfs, int32_t
char idstr[128] = {0};
sprintf(idstr, "0x%" PRIx64 "-%d", streamId, taskId);
- int idx = streamGetInit(NULL, funcname);
+ int idx = streamStateGetCfIdx(NULL, funcname);
RocksdbCfInst* inst = NULL;
RocksdbCfInst** pInst = taosHashGet(handle->cfInst, idstr, strlen(idstr) + 1);
@@ -803,7 +819,8 @@ int32_t streamStateOpenBackendCf(void* backend, char* name, char** cfs, int32_t
return 0;
}
int streamStateOpenBackend(void* backend, SStreamState* pState) {
- qInfo("start to open backend, %p 0x%" PRIx64 "-%d", pState, pState->streamId, pState->taskId);
+ qInfo("start to open state %p on backend %p 0x%" PRIx64 "-%d", pState, backend, pState->streamId, pState->taskId);
+ taosAcquireRef(streamBackendId, pState->streamBackendRid);
SBackendHandle* handle = backend;
sprintf(pState->pTdbState->idstr, "0x%" PRIx64 "-%d", pState->streamId, pState->taskId);
@@ -865,8 +882,8 @@ int streamStateOpenBackend(void* backend, SStreamState* pState) {
taosThreadRwlockInit(&pState->pTdbState->rwLock, NULL);
SCfComparator compare = {.comp = pCompare, .numOfComp = cfLen};
pState->pTdbState->pComparNode = streamBackendAddCompare(handle, &compare);
- // rocksdb_writeoptions_disable_WAL(pState->pTdbState->writeOpts, 1);
- qInfo("succ to open backend, %p, 0x%" PRIx64 "-%d", pState, pState->streamId, pState->taskId);
+ rocksdb_writeoptions_disable_WAL(pState->pTdbState->writeOpts, 1);
+ qInfo("succ to open state %p on backend, %p, 0x%" PRIx64 "-%d", pState, handle, pState->streamId, pState->taskId);
return 0;
}
@@ -882,8 +899,8 @@ void streamStateCloseBackend(SStreamState* pState, bool remove) {
taosThreadMutexUnlock(&pHandle->cfMutex);
char* status[] = {"close", "drop"};
- qInfo("start to %s backend, %p, 0x%" PRIx64 "-%d", status[remove == false ? 0 : 1], pState, pState->streamId,
- pState->taskId);
+ qInfo("start to close %s state %p on backend %p 0x%" PRIx64 "-%d", status[remove == false ? 0 : 1], pState, pHandle,
+ pState->streamId, pState->taskId);
if (pState->pTdbState->rocksdb == NULL) {
return;
}
@@ -938,6 +955,7 @@ void streamStateCloseBackend(SStreamState* pState, bool remove) {
taosThreadRwlockDestroy(&pState->pTdbState->rwLock);
pState->pTdbState->rocksdb = NULL;
+ taosReleaseRef(streamBackendId, pState->streamBackendRid);
}
void streamStateDestroyCompar(void* arg) {
SCfComparator* comp = (SCfComparator*)arg;
@@ -947,7 +965,7 @@ void streamStateDestroyCompar(void* arg) {
taosMemoryFree(comp->comp);
}
-int streamGetInit(SStreamState* pState, const char* funcName) {
+int streamStateGetCfIdx(SStreamState* pState, const char* funcName) {
int idx = -1;
size_t len = strlen(funcName);
for (int i = 0; i < sizeof(ginitDict) / sizeof(ginitDict[0]); i++) {
@@ -994,7 +1012,7 @@ bool streamStateIterSeekAndValid(rocksdb_iterator_t* iter, char* buf, size_t len
}
rocksdb_iterator_t* streamStateIterCreate(SStreamState* pState, const char* cfName, rocksdb_snapshot_t** snapshot,
rocksdb_readoptions_t** readOpt) {
- int idx = streamGetInit(pState, cfName);
+ int idx = streamStateGetCfIdx(pState, cfName);
if (snapshot != NULL) {
*snapshot = (rocksdb_snapshot_t*)rocksdb_create_snapshot(pState->pTdbState->rocksdb);
@@ -1014,7 +1032,7 @@ rocksdb_iterator_t* streamStateIterCreate(SStreamState* pState, const char* cfNa
code = 0; \
char buf[128] = {0}; \
char* err = NULL; \
- int i = streamGetInit(pState, funcname); \
+ int i = streamStateGetCfIdx(pState, funcname); \
if (i < 0) { \
qWarn("streamState failed to get cf name: %s", funcname); \
code = -1; \
@@ -1045,7 +1063,7 @@ rocksdb_iterator_t* streamStateIterCreate(SStreamState* pState, const char* cfNa
code = 0; \
char buf[128] = {0}; \
char* err = NULL; \
- int i = streamGetInit(pState, funcname); \
+ int i = streamStateGetCfIdx(pState, funcname); \
if (i < 0) { \
qWarn("streamState failed to get cf name: %s", funcname); \
code = -1; \
@@ -1093,7 +1111,7 @@ rocksdb_iterator_t* streamStateIterCreate(SStreamState* pState, const char* cfNa
code = 0; \
char buf[128] = {0}; \
char* err = NULL; \
- int i = streamGetInit(pState, funcname); \
+ int i = streamStateGetCfIdx(pState, funcname); \
if (i < 0) { \
qWarn("streamState failed to get cf name: %s_%s", pState->pTdbState->idstr, funcname); \
code = -1; \
@@ -2033,7 +2051,7 @@ void streamStateClearBatch(void* pBatch) { rocksdb_writebatch_clear((rocksdb_
void streamStateDestroyBatch(void* pBatch) { rocksdb_writebatch_destroy((rocksdb_writebatch_t*)pBatch); }
int32_t streamStatePutBatch(SStreamState* pState, const char* cfName, rocksdb_writebatch_t* pBatch, void* key,
void* val, int32_t vlen, int64_t ttl) {
- int i = streamGetInit(pState, cfName);
+ int i = streamStateGetCfIdx(pState, cfName);
if (i < 0) {
qError("streamState failed to put to cf name:%s", cfName);
@@ -2049,6 +2067,21 @@ int32_t streamStatePutBatch(SStreamState* pState, const char* cfName, rocksdb_wr
taosMemoryFree(ttlV);
return 0;
}
+int32_t streamStatePutBatchOptimize(SStreamState* pState, int32_t cfIdx, rocksdb_writebatch_t* pBatch, void* key,
+ void* val, int32_t vlen, int64_t ttl, void* tmpBuf) {
+ char buf[128] = {0};
+ int32_t klen = ginitDict[cfIdx].enFunc((void*)key, buf);
+ char* ttlV = tmpBuf;
+ int32_t ttlVLen = ginitDict[cfIdx].enValueFunc(val, vlen, ttl, &ttlV);
+
+ rocksdb_column_family_handle_t* pCf = pState->pTdbState->pHandle[ginitDict[cfIdx].idx];
+ rocksdb_writebatch_put_cf((rocksdb_writebatch_t*)pBatch, pCf, buf, (size_t)klen, ttlV, (size_t)ttlVLen);
+
+ if (tmpBuf == NULL) {
+ taosMemoryFree(ttlV);
+ }
+ return 0;
+}
int32_t streamStatePutBatch_rocksdb(SStreamState* pState, void* pBatch) {
char* err = NULL;
rocksdb_write(pState->pTdbState->rocksdb, pState->pTdbState->writeOpts, (rocksdb_writebatch_t*)pBatch, &err);
diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c
index 9cb0a56644..922a1f5345 100644
--- a/source/libs/stream/src/streamDispatch.c
+++ b/source/libs/stream/src/streamDispatch.c
@@ -15,6 +15,13 @@
#include "streamInc.h"
+#define MAX_BLOCK_NAME_NUM 1024
+
+typedef struct SBlockName {
+ uint32_t hashValue;
+ char parTbName[TSDB_TABLE_NAME_LEN];
+} SBlockName;
+
int32_t tEncodeStreamDispatchReq(SEncoder* pEncoder, const SStreamDispatchReq* pReq) {
if (tStartEncode(pEncoder) < 0) return -1;
if (tEncodeI64(pEncoder, pReq->streamId) < 0) return -1;
@@ -331,26 +338,46 @@ FAIL:
int32_t streamSearchAndAddBlock(SStreamTask* pTask, SStreamDispatchReq* pReqs, SSDataBlock* pDataBlock, int32_t vgSz,
int64_t groupId) {
- char* ctbName = taosMemoryCalloc(1, TSDB_TABLE_FNAME_LEN);
- if (ctbName == NULL) {
- return -1;
- }
-
- if (pDataBlock->info.parTbName[0]) {
- snprintf(ctbName, TSDB_TABLE_NAME_LEN, "%s.%s", pTask->shuffleDispatcher.dbInfo.db, pDataBlock->info.parTbName);
- } else {
- char* ctbShortName = buildCtbNameByGroupId(pTask->shuffleDispatcher.stbFullName, groupId);
- snprintf(ctbName, TSDB_TABLE_NAME_LEN, "%s.%s", pTask->shuffleDispatcher.dbInfo.db, ctbShortName);
- taosMemoryFree(ctbShortName);
- }
-
+ uint32_t hashValue = 0;
SArray* vgInfo = pTask->shuffleDispatcher.dbInfo.pVgroupInfos;
+ if (pTask->pNameMap == NULL) {
+ pTask->pNameMap = tSimpleHashInit(1024, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT));
+ }
- /*uint32_t hashValue = MurmurHash3_32(ctbName, strlen(ctbName));*/
- SUseDbRsp* pDbInfo = &pTask->shuffleDispatcher.dbInfo;
- uint32_t hashValue =
- taosGetTbHashVal(ctbName, strlen(ctbName), pDbInfo->hashMethod, pDbInfo->hashPrefix, pDbInfo->hashSuffix);
- taosMemoryFree(ctbName);
+ void* pVal = tSimpleHashGet(pTask->pNameMap, &groupId, sizeof(int64_t));
+ if (pVal) {
+ SBlockName* pBln = (SBlockName*)pVal;
+ hashValue = pBln->hashValue;
+ if (!pDataBlock->info.parTbName[0]) {
+ memcpy(pDataBlock->info.parTbName, pBln->parTbName, strlen(pBln->parTbName));
+ }
+ } else {
+ char* ctbName = taosMemoryCalloc(1, TSDB_TABLE_FNAME_LEN);
+ if (ctbName == NULL) {
+ return -1;
+ }
+
+ if (pDataBlock->info.parTbName[0]) {
+ snprintf(ctbName, TSDB_TABLE_NAME_LEN, "%s.%s", pTask->shuffleDispatcher.dbInfo.db, pDataBlock->info.parTbName);
+ } else {
+ buildCtbNameByGroupIdImpl(pTask->shuffleDispatcher.stbFullName, groupId, pDataBlock->info.parTbName);
+ snprintf(ctbName, TSDB_TABLE_NAME_LEN, "%s.%s", pTask->shuffleDispatcher.dbInfo.db, pDataBlock->info.parTbName);
+ }
+
+ SArray* vgInfo = pTask->shuffleDispatcher.dbInfo.pVgroupInfos;
+
+ /*uint32_t hashValue = MurmurHash3_32(ctbName, strlen(ctbName));*/
+ SUseDbRsp* pDbInfo = &pTask->shuffleDispatcher.dbInfo;
+ hashValue =
+ taosGetTbHashVal(ctbName, strlen(ctbName), pDbInfo->hashMethod, pDbInfo->hashPrefix, pDbInfo->hashSuffix);
+ taosMemoryFree(ctbName);
+ SBlockName bln = {0};
+ bln.hashValue = hashValue;
+ memcpy(bln.parTbName, pDataBlock->info.parTbName, strlen(pDataBlock->info.parTbName));
+ if (tSimpleHashGetSize(pTask->pNameMap) < MAX_BLOCK_NAME_NUM) {
+ tSimpleHashPut(pTask->pNameMap, &groupId, sizeof(int64_t), &bln, sizeof(SBlockName));
+ }
+ }
bool found = false;
// TODO: optimize search
diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c
index 8c26052fdb..5c31b1dd60 100644
--- a/source/libs/stream/src/streamMeta.c
+++ b/source/libs/stream/src/streamMeta.c
@@ -20,7 +20,7 @@
#include "ttimer.h"
static TdThreadOnce streamMetaModuleInit = PTHREAD_ONCE_INIT;
-static int32_t streamBackendId = 0;
+int32_t streamBackendId = 0;
static void streamMetaEnvInit() { streamBackendId = taosOpenRef(20, streamBackendCleanup); }
void streamMetaInit() { taosThreadOnce(&streamMetaModuleInit, streamMetaEnvInit); }
@@ -79,7 +79,6 @@ SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandF
pMeta->vgId = vgId;
pMeta->ahandle = ahandle;
pMeta->expandFunc = expandFunc;
- pMeta->streamBackendId = streamBackendId;
memset(streamPath, 0, len);
sprintf(streamPath, "%s/%s", pMeta->path, "state");
@@ -90,6 +89,9 @@ SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandF
}
pMeta->streamBackend = streamBackendInit(streamPath);
+ if (pMeta->streamBackend == NULL) {
+ goto _err;
+ }
pMeta->streamBackendRid = taosAddRef(streamBackendId, pMeta->streamBackend);
taosMemoryFree(streamPath);
diff --git a/source/libs/stream/src/streamState.c b/source/libs/stream/src/streamState.c
index 71a21ac150..967c7733c9 100644
--- a/source/libs/stream/src/streamState.c
+++ b/source/libs/stream/src/streamState.c
@@ -106,7 +106,7 @@ SStreamState* streamStateOpen(char* path, void* pTask, bool specPath, int32_t sz
}
SStreamTask* pStreamTask = pTask;
- char statePath[1024];
+ char statePath[1024];
if (!specPath) {
sprintf(statePath, "%s/%d", path, pStreamTask->id.taskId);
} else {
@@ -119,10 +119,10 @@ SStreamState* streamStateOpen(char* path, void* pTask, bool specPath, int32_t sz
#ifdef USE_ROCKSDB
SStreamMeta* pMeta = pStreamTask->pMeta;
- taosAcquireRef(pMeta->streamBackendId, pMeta->streamBackendRid);
+ pState->streamBackendRid = pMeta->streamBackendRid;
int code = streamStateOpenBackend(pMeta->streamBackend, pState);
if (code == -1) {
- taosReleaseRef(pMeta->streamBackendId, pMeta->streamBackendRid);
+ taosReleaseRef(streamBackendId, pMeta->streamBackendRid);
taosMemoryFree(pState);
pState = NULL;
}
@@ -222,9 +222,7 @@ _err:
void streamStateClose(SStreamState* pState, bool remove) {
SStreamTask* pTask = pState->pTdbState->pOwner;
#ifdef USE_ROCKSDB
- // streamStateCloseBackend(pState);
streamStateDestroy(pState, remove);
- taosReleaseRef(pTask->pMeta->streamBackendId, pTask->pMeta->streamBackendRid);
#else
tdbCommit(pState->pTdbState->db, pState->pTdbState->txn);
tdbPostCommit(pState->pTdbState->db, pState->pTdbState->txn);
@@ -278,10 +276,10 @@ int32_t streamStateCommit(SStreamState* pState) {
int32_t streamStateFuncPut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen) {
#ifdef USE_ROCKSDB
- void* pVal = NULL;
- int32_t len = 0;
- int32_t code = getRowBuff(pState->pFileState, (void*)key, sizeof(SWinKey), &pVal, &len);
- char* buf = ((SRowBuffPos*)pVal)->pRowBuff;
+ void* pVal = NULL;
+ int32_t len = 0;
+ int32_t code = getRowBuff(pState->pFileState, (void*)key, sizeof(SWinKey), &pVal, &len);
+ char* buf = ((SRowBuffPos*)pVal)->pRowBuff;
uint32_t rowSize = streamFileStateGeSelectRowSize(pState->pFileState);
memcpy(buf + len - rowSize, value, vLen);
return code;
@@ -291,10 +289,10 @@ int32_t streamStateFuncPut(SStreamState* pState, const SWinKey* key, const void*
}
int32_t streamStateFuncGet(SStreamState* pState, const SWinKey* key, void** ppVal, int32_t* pVLen) {
#ifdef USE_ROCKSDB
- void* pVal = NULL;
- int32_t len = 0;
- int32_t code = getRowBuff(pState->pFileState, (void*)key, sizeof(SWinKey), (void**)(&pVal), &len);
- char* buf = ((SRowBuffPos*)pVal)->pRowBuff;
+ void* pVal = NULL;
+ int32_t len = 0;
+ int32_t code = getRowBuff(pState->pFileState, (void*)key, sizeof(SWinKey), (void**)(&pVal), &len);
+ char* buf = ((SRowBuffPos*)pVal)->pRowBuff;
uint32_t rowSize = streamFileStateGeSelectRowSize(pState->pFileState);
*ppVal = buf + len - rowSize;
return code;
diff --git a/source/libs/stream/src/streamTask.c b/source/libs/stream/src/streamTask.c
index a0caffd41f..284d1ecab6 100644
--- a/source/libs/stream/src/streamTask.c
+++ b/source/libs/stream/src/streamTask.c
@@ -224,5 +224,9 @@ void tFreeStreamTask(SStreamTask* pTask) {
taosMemoryFree((void*)pTask->id.idStr);
}
+ if (pTask->pNameMap) {
+ tSimpleHashCleanup(pTask->pNameMap);
+ }
+
taosMemoryFree(pTask);
}
diff --git a/source/libs/stream/src/tstreamFileState.c b/source/libs/stream/src/tstreamFileState.c
index bc84509728..dc9a1f80bb 100644
--- a/source/libs/stream/src/tstreamFileState.c
+++ b/source/libs/stream/src/tstreamFileState.c
@@ -350,6 +350,11 @@ int32_t flushSnapshot(SStreamFileState* pFileState, SStreamSnapshot* pSnapshot,
const int32_t BATCH_LIMIT = 256;
SListNode* pNode = NULL;
+ int idx = streamStateGetCfIdx(pFileState->pFileStore, "state");
+
+ int32_t len = pFileState->rowSize + sizeof(uint64_t) + sizeof(int32_t) + 1;
+ char* buf = taosMemoryCalloc(1, len);
+
void* batch = streamStateCreateBatch();
while ((pNode = tdListNext(&iter)) != NULL && code == TSDB_CODE_SUCCESS) {
SRowBuffPos* pPos = *(SRowBuffPos**)pNode->data;
@@ -360,9 +365,13 @@ int32_t flushSnapshot(SStreamFileState* pFileState, SStreamSnapshot* pSnapshot,
}
SStateKey sKey = {.key = *((SWinKey*)pPos->pKey), .opNum = ((SStreamState*)pFileState->pFileStore)->number};
- code = streamStatePutBatch(pFileState->pFileStore, "state", batch, &sKey, pPos->pRowBuff, pFileState->rowSize, 0);
+ code = streamStatePutBatchOptimize(pFileState->pFileStore, idx, batch, &sKey, pPos->pRowBuff, pFileState->rowSize,
+ 0, buf);
+ memset(buf, 0, len);
qDebug("===stream===put %" PRId64 " to disc, res %d", sKey.key.ts, code);
}
+ taosMemoryFree(buf);
+
if (streamStateGetBatchSize(batch) > 0) {
code = streamStatePutBatch_rocksdb(pFileState->pFileStore, batch);
}
@@ -419,7 +428,7 @@ int32_t deleteExpiredCheckPoint(SStreamFileState* pFileState, TSKEY mark) {
if (code != 0 || len == 0 || val == NULL) {
return TSDB_CODE_FAILED;
}
- memcpy(val, buf, len);
+ memcpy(buf, val, len);
buf[len] = 0;
maxCheckPointId = atol((char*)buf);
taosMemoryFree(val);
@@ -433,7 +442,7 @@ int32_t deleteExpiredCheckPoint(SStreamFileState* pFileState, TSKEY mark) {
if (code != 0) {
return TSDB_CODE_FAILED;
}
- memcpy(val, buf, len);
+ memcpy(buf, val, len);
buf[len] = 0;
taosMemoryFree(val);
diff --git a/source/libs/stream/test/CMakeLists.txt b/source/libs/stream/test/CMakeLists.txt
index 049bfbbb3a..629b04ae51 100644
--- a/source/libs/stream/test/CMakeLists.txt
+++ b/source/libs/stream/test/CMakeLists.txt
@@ -8,20 +8,9 @@ AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} SOURCE_LIST)
# bloomFilterTest
ADD_EXECUTABLE(streamUpdateTest "tstreamUpdateTest.cpp")
-#TARGET_LINK_LIBRARIES(
-# streamUpdateTest
-# PUBLIC os util common gtest gtest_main stream executor
-#)
-
-IF (TD_GRANT)
- TARGET_LINK_LIBRARIES(streamUpdateTest
- PUBLIC os util common gtest gtest_main stream executor index grant
- )
-ELSE ()
- TARGET_LINK_LIBRARIES(streamUpdateTest
- PUBLIC os util common gtest gtest_main stream executor index
- )
-ENDIF()
+TARGET_LINK_LIBRARIES(streamUpdateTest
+ PUBLIC os util common gtest gtest_main stream executor index
+ )
TARGET_INCLUDE_DIRECTORIES(
streamUpdateTest
diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c
index f64c2a9560..ccf7c3e4a4 100644
--- a/source/libs/sync/src/syncMain.c
+++ b/source/libs/sync/src/syncMain.c
@@ -618,8 +618,7 @@ int32_t syncNodePropose(SSyncNode* pSyncNode, SRpcMsg* pMsg, bool isWeak, int64_
return -1;
}
- // not restored, vnode enable
- if (!pSyncNode->restoreFinish && pSyncNode->vgId != 1) {
+ if (!pSyncNode->restoreFinish) {
terrno = TSDB_CODE_SYN_PROPOSE_NOT_READY;
sNError(pSyncNode, "failed to sync propose since not ready, type:%s, last:%" PRId64 ", cmt:%" PRId64,
TMSG_INFO(pMsg->msgType), syncNodeGetLastIndex(pSyncNode), pSyncNode->commitIndex);
diff --git a/source/libs/wal/src/walRef.c b/source/libs/wal/src/walRef.c
index eb36389f1d..2f1bcfee83 100644
--- a/source/libs/wal/src/walRef.c
+++ b/source/libs/wal/src/walRef.c
@@ -81,26 +81,11 @@ void walRefLastVer(SWal *pWal, SWalRef *pRef) {
wDebug("vgId:%d, wal ref version %" PRId64 " for last", pWal->cfg.vgId, ver);
}
-SWalRef *walRefCommittedVer(SWal *pWal) {
- SWalRef *pRef = walOpenRef(pWal);
- if (pRef == NULL) {
- terrno = TSDB_CODE_OUT_OF_MEMORY;
- return NULL;
- }
+void walRefCommitVer(SWal *pWal, SWalRef *pRef) {
taosThreadMutexLock(&pWal->mutex);
-
int64_t ver = walGetCommittedVer(pWal);
-
- wDebug("vgId:%d, wal ref version %" PRId64 " for committed", pWal->cfg.vgId, ver);
-
pRef->refVer = ver;
- // bsearch in fileSet
- SWalFileInfo tmpInfo;
- tmpInfo.firstVer = ver;
- SWalFileInfo *pRet = taosArraySearch(pWal->fileInfoSet, &tmpInfo, compareWalFileInfo, TD_LE);
- ASSERT(pRet != NULL);
- // pRef->refFile = pRet->firstVer;
taosThreadMutexUnlock(&pWal->mutex);
- return pRef;
+ wDebug("vgId:%d, wal ref version %" PRId64 " for committed", pWal->cfg.vgId, ver);
}
diff --git a/source/os/CMakeLists.txt b/source/os/CMakeLists.txt
index db066a82b6..46e28b529d 100644
--- a/source/os/CMakeLists.txt
+++ b/source/os/CMakeLists.txt
@@ -64,7 +64,7 @@ else()
endif()
IF (JEMALLOC_ENABLED)
- target_link_libraries(os PUBLIC -ljemalloc)
+ target_link_libraries(os PUBLIC -L${CMAKE_BINARY_DIR}/build/lib -ljemalloc)
ENDIF ()
if(${BUILD_TEST})
diff --git a/source/util/src/tarray.c b/source/util/src/tarray.c
index 6c7c5ddb0d..8906391a9a 100644
--- a/source/util/src/tarray.c
+++ b/source/util/src/tarray.c
@@ -476,13 +476,13 @@ int32_t taosEncodeArray(void** buf, const SArray* pArray, FEncode encode) {
return tlen;
}
-void* taosDecodeArray(const void* buf, SArray** pArray, FDecode decode, int32_t dataSz) {
+void* taosDecodeArray(const void* buf, SArray** pArray, FDecode decode, int32_t dataSz, int8_t sver) {
int32_t sz;
buf = taosDecodeFixedI32(buf, &sz);
*pArray = taosArrayInit(sz, sizeof(void*));
for (int32_t i = 0; i < sz; i++) {
void* data = taosMemoryCalloc(1, dataSz);
- buf = decode(buf, data);
+ buf = decode(buf, data, sver);
taosArrayPush(*pArray, &data);
}
return (void*)buf;
diff --git a/source/util/src/terror.c b/source/util/src/terror.c
index 7cf95dcdea..a66af6e732 100644
--- a/source/util/src/terror.c
+++ b/source/util/src/terror.c
@@ -275,7 +275,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_TRANS_CONFLICT, "Conflict transaction
TAOS_DEFINE_ERROR(TSDB_CODE_MND_TRANS_CLOG_IS_NULL, "Transaction commitlog is null")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_TRANS_NETWORK_UNAVAILL, "Unable to establish connection While execute transaction and will continue in the background")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_LAST_TRANS_NOT_FINISHED, "Last Transaction not finished")
-TAOS_DEFINE_ERROR(TSDB_CODE_MND_TRNAS_SYNC_TIMEOUT, "Sync timeout While execute transaction and will continue in the background")
+TAOS_DEFINE_ERROR(TSDB_CODE_MND_TRANS_SYNC_TIMEOUT, "Sync timeout While execute transaction and will continue in the background")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_TRANS_UNKNOW_ERROR, "Unknown transaction error")
// mnode-mq
diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task
index a81209b835..cd47cb2d16 100644
--- a/tests/parallel_test/cases.task
+++ b/tests/parallel_test/cases.task
@@ -449,6 +449,7 @@
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/subscribeStb2.py
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/subscribeStb3.py
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/subscribeDb0.py -N 3 -n 3
+,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/ins_topics_test.py
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/delete_stable.py
@@ -616,6 +617,8 @@
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/rowlength64k_4.py -Q 2
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/rowlength64k_4.py -Q 3
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/rowlength64k_4.py -Q 4
+,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/precisionUS.py
+,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/precisionNS.py
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/show.py
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/information_schema.py
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/abs.py
@@ -821,6 +824,8 @@
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/tagFilter.py
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/projectionDesc.py
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/ts_3398.py -N 3 -n 3
+,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/ts_3405.py -N 3 -n 3
+,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/ts_3423.py -N 3 -n 3
,,n,system-test,python3 ./test.py -f 2-query/queryQnode.py
,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode1mnode.py
@@ -930,7 +935,7 @@
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/csum.py -Q 2
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/mavg.py -Q 2
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/sample.py -Q 2
-,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/function_diff.py -Q 2
+#,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/function_diff.py -Q 2
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/unique.py -Q 2
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stateduration.py -Q 2
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/function_stateduration.py -Q 2
@@ -1025,7 +1030,7 @@
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/csum.py -Q 3
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/mavg.py -Q 3
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/sample.py -Q 3
-,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/function_diff.py -Q 3
+#,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/function_diff.py -Q 3
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/unique.py -Q 3
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stateduration.py -Q 3
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/function_stateduration.py -Q 3
@@ -1121,7 +1126,7 @@
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/mavg.py -Q 4
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/sample.py -Q 4
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/cast.py -Q 4
-,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/function_diff.py -Q 4
+#,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/function_diff.py -Q 4
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/unique.py -Q 4
#,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stateduration.py -Q 4
#,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/function_stateduration.py -Q 4
diff --git a/tests/pytest/tools/taosdumpTest.py b/tests/pytest/tools/taosdumpTest.py
index 4bfb7d5ba3..aafd365f34 100644
--- a/tests/pytest/tools/taosdumpTest.py
+++ b/tests/pytest/tools/taosdumpTest.py
@@ -92,9 +92,9 @@ class TDTestCase:
else:
tdLog.info("taosdump found: %s" % binPath)
- os.system("%s -y --databases db -o ./taosdumptest/tmp1" % binPath)
+ os.system("%s --databases db -o ./taosdumptest/tmp1" % binPath)
os.system(
- "%s -y --databases db1 -o ./taosdumptest/tmp2" %
+ "%s --databases db1 -o ./taosdumptest/tmp2" %
binPath)
tdSql.execute("drop database db")
@@ -172,7 +172,7 @@ class TDTestCase:
tdSql.query("show stables")
tdSql.checkRows(2)
os.system(
- "%s -y --databases db12312313231231321312312312_323 -o ./taosdumptest/tmp1" %
+ "%s --databases db12312313231231321312312312_323 -o ./taosdumptest/tmp1" %
binPath)
tdSql.execute("drop database db12312313231231321312312312_323")
os.system("%s -i ./taosdumptest/tmp1" % binPath)
diff --git a/tests/pytest/tools/taosdumpTest2.py b/tests/pytest/tools/taosdumpTest2.py
index 8a85ce10ed..36e0480e57 100644
--- a/tests/pytest/tools/taosdumpTest2.py
+++ b/tests/pytest/tools/taosdumpTest2.py
@@ -97,7 +97,7 @@ class TDTestCase:
tdSql.query("show databases")
tdSql.checkRows(2)
- os.system("%s -i ./taosdumptest/tmp -y" % binPath)
+ os.system("%s -i ./taosdumptest/tmp" % binPath)
tdSql.query("show databases")
tdSql.checkRows(3)
@@ -125,13 +125,13 @@ class TDTestCase:
os.system("rm ./taosdumptest/tmp/*.sql")
os.system("rm ./taosdumptest/tmp/*.avro*")
os.system("rm -rf ./taosdumptest/tmp/taosdump.*")
- os.system("%s -D test -o ./taosdumptest/tmp -y" % binPath)
+ os.system("%s -D test -o ./taosdumptest/tmp" % binPath)
tdSql.execute("drop database test")
tdSql.query("show databases")
tdSql.checkRows(3)
- os.system("%s -i ./taosdumptest/tmp -y" % binPath)
+ os.system("%s -i ./taosdumptest/tmp" % binPath)
tdSql.execute("use test")
tdSql.query("show stables")
diff --git a/tests/pytest/tools/taosdumpTestNanoSupport.py b/tests/pytest/tools/taosdumpTestNanoSupport.py
index c40462b8db..2a3990614a 100644
--- a/tests/pytest/tools/taosdumpTestNanoSupport.py
+++ b/tests/pytest/tools/taosdumpTestNanoSupport.py
@@ -134,15 +134,15 @@ class TDTestCase:
# dump all data
os.system(
- "%s -y -g --databases timedb1 -o ./taosdumptest/dumptmp1" %
+ "%s -g --databases timedb1 -o ./taosdumptest/dumptmp1" %
binPath)
# dump part data with -S -E
os.system(
- '%s -y -g --databases timedb1 -S 1625068810000000000 -E 1625068860000000000 -o ./taosdumptest/dumptmp2 ' %
+ '%s -g --databases timedb1 -S 1625068810000000000 -E 1625068860000000000 -o ./taosdumptest/dumptmp2 ' %
binPath)
os.system(
- '%s -y -g --databases timedb1 -S 1625068810000000000 -o ./taosdumptest/dumptmp3 ' %
+ '%s -g --databases timedb1 -S 1625068810000000000 -o ./taosdumptest/dumptmp3 ' %
binPath)
tdSql.execute("drop database timedb1")
@@ -200,14 +200,14 @@ class TDTestCase:
self.createdb(precision="us")
os.system(
- "%s -y -g --databases timedb1 -o ./taosdumptest/dumptmp1" %
+ "%s -g --databases timedb1 -o ./taosdumptest/dumptmp1" %
binPath)
os.system(
- '%s -y -g --databases timedb1 -S 1625068810000000 -E 1625068860000000 -o ./taosdumptest/dumptmp2 ' %
+ '%s -g --databases timedb1 -S 1625068810000000 -E 1625068860000000 -o ./taosdumptest/dumptmp2 ' %
binPath)
os.system(
- '%s -y -g --databases timedb1 -S 1625068810000000 -o ./taosdumptest/dumptmp3 ' %
+ '%s -g --databases timedb1 -S 1625068810000000 -o ./taosdumptest/dumptmp3 ' %
binPath)
os.system("%s -i ./taosdumptest/dumptmp1" % binPath)
@@ -269,14 +269,14 @@ class TDTestCase:
self.createdb(precision="ms")
os.system(
- "%s -y -g --databases timedb1 -o ./taosdumptest/dumptmp1" %
+ "%s -g --databases timedb1 -o ./taosdumptest/dumptmp1" %
binPath)
os.system(
- '%s -y -g --databases timedb1 -S 1625068810000 -E 1625068860000 -o ./taosdumptest/dumptmp2 ' %
+ '%s -g --databases timedb1 -S 1625068810000 -E 1625068860000 -o ./taosdumptest/dumptmp2 ' %
binPath)
os.system(
- '%s -y -g --databases timedb1 -S 1625068810000 -o ./taosdumptest/dumptmp3 ' %
+ '%s -g --databases timedb1 -S 1625068810000 -o ./taosdumptest/dumptmp3 ' %
binPath)
os.system("%s -i ./taosdumptest/dumptmp1" % binPath)
diff --git a/tests/script/tsim/parser/projection_limit_offset.sim b/tests/script/tsim/parser/projection_limit_offset.sim
index 2d99b0a296..cab46a93d3 100644
--- a/tests/script/tsim/parser/projection_limit_offset.sim
+++ b/tests/script/tsim/parser/projection_limit_offset.sim
@@ -380,10 +380,10 @@ if $row != 8 then
endi
sql select diff(k) from tm0
-if $row != 3 then
+if $row != 4 then
return -1
endi
-if $data20 != -1 then
+if $data20 != NULL then
return -1
endi
diff --git a/tests/script/tsim/query/delete_and_query.sim b/tests/script/tsim/query/delete_and_query.sim
new file mode 100644
index 0000000000..3004ababa1
--- /dev/null
+++ b/tests/script/tsim/query/delete_and_query.sim
@@ -0,0 +1,25 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/exec.sh -n dnode1 -s start
+sql connect
+
+sql create database if not exists test
+sql use test
+sql create table t1 (ts timestamp, c2 int)
+sql insert into t1 values(now, 1)
+
+sql delete from t1 where ts is null
+sql delete from t1 where ts < now
+sql select ts from t1 order by ts asc
+
+print ----------rows: $rows
+if $rows != 0 then
+ return -1
+endi
+
+sql select ts from t1 order by ts desc
+print ----------rows: $rows
+if $rows != 0 then
+ return -1
+endi
+
diff --git a/tests/script/tsim/query/sys_tbname.sim b/tests/script/tsim/query/sys_tbname.sim
index 849aeb2ac5..f49a8e0a7d 100644
--- a/tests/script/tsim/query/sys_tbname.sim
+++ b/tests/script/tsim/query/sys_tbname.sim
@@ -131,4 +131,8 @@ print $rows
if $rows != 9 then
return -1
endi
+
+print =========================== td-24781
+sql select DISTINCT (`precision`) from `information_schema`.`ins_databases` PARTITION BY `precision`
+
#system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/query/unionall_as_table.sim b/tests/script/tsim/query/unionall_as_table.sim
index 4d8f990718..f8145d4e97 100644
--- a/tests/script/tsim/query/unionall_as_table.sim
+++ b/tests/script/tsim/query/unionall_as_table.sim
@@ -42,4 +42,12 @@ endi
if $data00 != 4 then
return -1
endi
+
+sql create table ctcount(ts timestamp, f int);
+sql insert into ctcount(ts) values(now)(now+1s);
+sql select count(*) from (select f from ctcount);
+print $data00
+if $data00 != 2 then
+ return -1
+endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/sma/tsmaCreateInsertQuery.sim b/tests/script/tsim/sma/tsmaCreateInsertQuery.sim
index 242231e408..60f769d2ae 100644
--- a/tests/script/tsim/sma/tsmaCreateInsertQuery.sim
+++ b/tests/script/tsim/sma/tsmaCreateInsertQuery.sim
@@ -340,6 +340,80 @@ if $data05 != 30.000000000 then
return -1
endi
+print =============== select with _wstart/order by _wstart from stb from file in designated vgroup
+sql select _wstart, _wend, min(c1),max(c2),max(c1) from stb interval(5m,10s) sliding(5m) order by _wstart;
+print $data00 $data01 $data02 $data03 $data04
+if $rows != 1 then
+ print rows $rows != 1
+ return -1
+endi
+
+if $data02 != -13 then
+ print data02 $data02 != -13
+ return -1
+endi
+
+if $data03 != 20.00000 then
+ print data03 $data03 != 20.00000
+ return -1
+endi
+
+if $data04 != 20 then
+ print data04 $data04 != 20
+ return -1
+endi
+
+print =============== select without _wstart/with order by _wstart from stb from file in designated vgroup
+sql select _wend, min(c1),max(c2),max(c1) from stb interval(5m,10s) sliding(5m) order by _wstart;
+print $data00 $data01 $data02 $data03
+if $rows != 1 then
+ print rows $rows != 1
+ return -1
+endi
+
+if $data01 != -13 then
+ print data01 $data01 != -13
+ return -1
+endi
+
+if $data02 != 20.00000 then
+ print data02 $data02 != 20.00000
+ return -1
+endi
+
+if $data03 != 20 then
+ print data03 $data03 != 20
+ return -1
+endi
+
+print =============== select * from stb from file in common vgroups
+sql select _wstart, _wend, min(c1),max(c2),max(c1),max(c3) from stb interval(5m,10s) sliding(5m) order by _wstart;
+print $data00 $data01 $data02 $data03 $data04 $data05
+if $rows != 1 then
+ print rows $rows != 1
+ return -1
+endi
+
+if $data02 != -13 then
+ print data02 $data02 != -13
+ return -1
+endi
+
+if $data03 != 20.00000 then
+ print data03 $data03 != 20.00000
+ return -1
+endi
+
+if $data04 != 20 then
+ print data04 $data04 != 20
+ return -1
+endi
+
+if $data05 != 30.000000000 then
+ print data05 $data05 != 30.000000000
+ return -1
+endi
+
system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/stream/ignoreExpiredData.sim b/tests/script/tsim/stream/ignoreExpiredData.sim
index 27920dd539..884b7cbb5f 100644
--- a/tests/script/tsim/stream/ignoreExpiredData.sim
+++ b/tests/script/tsim/stream/ignoreExpiredData.sim
@@ -132,12 +132,12 @@ if $loop_count == 10 then
return -1
endi
-if $data01 != 1 then
+if $data01 != 2 then
print =====data01=$data01
goto loop4
endi
-if $data02 != 1 then
+if $data02 != 2 then
print =====data02=$data02
goto loop4
endi
diff --git a/tests/script/tsim/stream/sliding.sim b/tests/script/tsim/stream/sliding.sim
index 3312ccbec4..05eb7dacba 100644
--- a/tests/script/tsim/stream/sliding.sim
+++ b/tests/script/tsim/stream/sliding.sim
@@ -576,13 +576,6 @@ $loop_count = 0
print step 7
-loop4:
-sleep 100
-
-$loop_count = $loop_count + 1
-if $loop_count == 10 then
- return -1
-endi
sql create database test3 vgroups 6;
sql use test3;
diff --git a/tests/system-test/0-others/multilevel.py b/tests/system-test/0-others/multilevel.py
index 7ad4eba645..f086dcb735 100644
--- a/tests/system-test/0-others/multilevel.py
+++ b/tests/system-test/0-others/multilevel.py
@@ -116,7 +116,7 @@ class TDTestCase:
tdSql.checkRows(1000)
tdLog.info("================= step3")
tdSql.execute('drop database test')
- for i in range(50):
+ for i in range(10):
tdSql.execute("create database test%d duration 1" %(i))
tdSql.execute("use test%d" %(i))
tdSql.execute("create table tb (ts timestamp,i int)")
diff --git a/tests/system-test/0-others/user_privilege_show.py b/tests/system-test/0-others/user_privilege_show.py
new file mode 100644
index 0000000000..9f49778ba8
--- /dev/null
+++ b/tests/system-test/0-others/user_privilege_show.py
@@ -0,0 +1,267 @@
+from itertools import product
+import taos
+from taos.tmq import *
+from util.cases import *
+from util.common import *
+from util.log import *
+from util.sql import *
+from util.sqlset import *
+
+
+class TDTestCase:
+ """This test case is used to veirfy the show create stable/table command for
+ the different user privilege(TS-3469)
+ """
+ def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
+ tdLog.debug("start to execute %s" % __file__)
+ # init the tdsql
+ tdSql.init(conn.cursor())
+ self.setsql = TDSetSql()
+ # user info
+ self.username = 'test'
+ self.password = 'test'
+ # db info
+ self.dbname = "user_privilege_show"
+ self.stbname = 'stb'
+ self.common_tbname = "tb"
+ self.ctbname_list = ["ct1", "ct2"]
+ self.column_dict = {
+ 'ts': 'timestamp',
+ 'col1': 'float',
+ 'col2': 'int',
+ }
+ self.tag_dict = {
+ 'ctbname': 'binary(10)'
+ }
+
+ # privilege check scenario info
+ self.privilege_check_dic = {}
+ self.senario_type = ["stable", "table", "ctable"]
+ self.priv_type = ["read", "write", "all", "none"]
+ # stable senarios
+ # include the show stable xxx command test senarios and expect result, true as have privilege, false as no privilege
+ # the list element is (db_privilege, stable_privilege, expect_res)
+ st_senarios_list = []
+ for senario in list(product(self.priv_type, repeat=2)):
+ expect_res = True
+ if senario == ("write", "write") or senario == ("none", "none") or senario == ("none", "write") or senario == ("write", "none"):
+ expect_res = False
+ st_senarios_list.append(senario + (expect_res,))
+ # self.privilege_check_dic["stable"] = st_senarios_list
+
+ # table senarios
+ # the list element is (db_privilege, table_privilege, expect_res)
+ self.privilege_check_dic["table"] = st_senarios_list
+
+ # child table senarios
+ # the list element is (db_privilege, stable_privilege, ctable_privilege, expect_res)
+ ct_senarios_list = []
+ for senario in list(product(self.priv_type, repeat=3)):
+ expect_res = True
+ if senario[2] == "write" or (senario[2] == "none" and senario[1] == "write") or (senario[2] == "none" and senario[1] == "none" and senario[0] == "write"):
+ expect_res = False
+ ct_senarios_list.append(senario + (expect_res,))
+ self.privilege_check_dic["ctable"] = ct_senarios_list
+
+ def prepare_data(self, senario_type):
+ """Create the db and data for test
+ """
+ if senario_type == "stable":
+ # db name
+ self.dbname = self.dbname + '_stable'
+ elif senario_type == "table":
+ # db name
+ self.dbname = self.dbname + '_table'
+ else:
+ # db name
+ self.dbname = self.dbname + '_ctable'
+
+ # create datebase
+ tdSql.execute(f"create database {self.dbname}")
+ tdLog.debug("sql:" + f"create database {self.dbname}")
+ tdSql.execute(f"use {self.dbname}")
+ tdLog.debug("sql:" + f"use {self.dbname}")
+
+ # create tables
+ if "_stable" in self.dbname:
+ # create stable
+ tdSql.execute(self.setsql.set_create_stable_sql(self.stbname, self.column_dict, self.tag_dict))
+ tdLog.debug("Create stable {} successfully".format(self.stbname))
+ elif "_table" in self.dbname:
+ # create common table
+ tdSql.execute(f"create table {self.common_tbname}(ts timestamp, col1 float, col2 int)")
+ tdLog.debug("sql:" + f"create table {self.common_tbname}(ts timestamp, col1 float, col2 int)")
+ else:
+ # create stable and child table
+ tdSql.execute(self.setsql.set_create_stable_sql(self.stbname, self.column_dict, self.tag_dict))
+ tdLog.debug("Create stable {} successfully".format(self.stbname))
+ for ctname in self.ctbname_list:
+ tdSql.execute(f"create table {ctname} using {self.stbname} tags('{ctname}')")
+ tdLog.debug("sql:" + f"create table {ctname} using {self.stbname} tags('{ctname}')")
+
+ def create_user(self):
+ """Create the user for test
+ """
+ tdSql.execute(f'create user {self.username} pass "{self.password}"')
+ tdLog.debug("sql:" + f'create user {self.username} pass "{self.password}"')
+
+ def grant_privilege(self, username, privilege, privilege_obj, ctable_include=False, tag_condition=None):
+ """Add the privilege for the user
+ """
+ try:
+ if ctable_include and tag_condition:
+ tdSql.execute(f'grant {privilege} on {self.dbname}.{privilege_obj} with {tag_condition} to {username}')
+ tdLog.debug("sql:" + f'grant {privilege} on {self.dbname}.{privilege_obj} with {tag_condition} to {username}')
+ else:
+ tdSql.execute(f'grant {privilege} on {self.dbname}.{privilege_obj} to {username}')
+ tdLog.debug("sql:" + f'grant {privilege} on {self.dbname}.{privilege_obj} to {username}')
+ except Exception as ex:
+ tdLog.exit(ex)
+
+ def remove_privilege(self, username, privilege, privilege_obj, ctable_include=False, tag_condition=None):
+ """Remove the privilege for the user
+ """
+ try:
+ if ctable_include and tag_condition:
+ tdSql.execute(f'revoke {privilege} on {self.dbname}.{privilege_obj} with {tag_condition} from {username}')
+ tdLog.debug("sql:" + f'revoke {privilege} on {self.dbname}.{privilege_obj} with {tag_condition} from {username}')
+ else:
+ tdSql.execute(f'revoke {privilege} on {self.dbname}.{privilege_obj} from {username}')
+ tdLog.debug("sql:" + f'revoke {privilege} on {self.dbname}.{privilege_obj} from {username}')
+ except Exception as ex:
+ tdLog.exit(ex)
+
+ def run(self):
+ """Currently, the test case can't be executed for all of the privilege combinations cause
+ the table privilege isn't finished by dev team, only left one senario:
+ db read privilege for user and show create table command; will udpate the test case once
+ the table privilege function is finished
+ """
+ self.create_user()
+
+ # temp solution only for the db read privilege verification
+ self.prepare_data("table")
+ # grant db read privilege
+ self.grant_privilege(self.username, "read", "*")
+ # create the taos connection with -utest -ptest
+ testconn = taos.connect(user=self.username, password=self.password)
+ testconn.execute("use %s;" % self.dbname)
+ # show the user privileges
+ res = testconn.query("select * from information_schema.ins_user_privileges;")
+ tdLog.debug("Current information_schema.ins_user_privileges values: {}".format(res.fetch_all()))
+ # query execution
+ sql = "show create table " + self.common_tbname + ";"
+ tdLog.debug("sql: %s" % sql)
+ res = testconn.query(sql)
+ # query result
+ tdLog.debug("sql res:" + str(res.fetch_all()))
+ # remove the privilege
+ self.remove_privilege(self.username, "read", "*")
+ # clear env
+ testconn.close()
+ tdSql.execute(f"drop database {self.dbname}")
+
+ """
+ for senario_type in self.privilege_check_dic.keys():
+ tdLog.debug(f"---------check the {senario_type} privilege----------")
+ self.prepare_data(senario_type)
+ for senario in self.privilege_check_dic[senario_type]:
+ # grant db privilege
+ if senario[0] != "none":
+ self.grant_privilege(self.username, senario[0], "*")
+ # grant stable privilege
+ if senario[1] != "none":
+ self.grant_privilege(self.username, senario[1], self.stbname if senario_type == "stable" or senario_type == "ctable" else self.common_tbname)
+ if senario_type == "stable" or senario_type == "table":
+ tdLog.debug(f"check the db privilege: {senario[0]}, (s)table privilege: {senario[1]}")
+ else:
+ if senario[2] != "none":
+ # grant child table privilege
+ self.grant_privilege(self.username, senario[2], self.stbname, True, "ctbname='ct1'")
+ tdLog.debug(f"check the db privilege: {senario[0]}, (s)table privilege: {senario[1]}, ctable privilege: {senario[2]}")
+ testconn = taos.connect(user=self.username, password=self.password)
+ tdLog.debug("Create taos connection with user: {}, password: {}".format(self.username, self.password))
+ try:
+ testconn.execute("use %s;" % self.dbname)
+ except BaseException as ex:
+ if (senario_type in ["stable", "table"] and senario[0] == "none" and senario[1] == "none") or (senario_type == "ctable" and senario[0] == "none" and senario[1] == "none" and senario[2] == "none"):
+ continue
+ else:
+ tdLog.exit(ex)
+
+ # query privileges for user
+ res = testconn.query("select * from information_schema.ins_user_privileges;")
+ tdLog.debug("Current information_schema.ins_user_privileges values: {}".format(res.fetch_all()))
+
+ if senario_type == "stable" or senario_type == "table":
+ sql = "show create " + (("stable " + self.stbname) if senario_type == "stable" else (f"table {self.dbname}." + self.common_tbname + ";"))
+ if senario[2]:
+ tdLog.debug("sql: %s" % sql)
+ tdLog.debug(f"expected result: {senario[2]}")
+ res = testconn.query(sql)
+ tdLog.debug("sql res:" + res.fetch_all())
+ else:
+ exception_flag = False
+ try:
+ tdLog.debug("sql: %s" % sql)
+ tdLog.debug(f"expected result: {senario[2]}")
+ res = testconn.query(sql)
+ tdLog.debug("sql res:" + res.fetch_all())
+ except BaseException:
+ exception_flag = True
+ caller = inspect.getframeinfo(inspect.stack()[1][0])
+ tdLog.debug(f"{caller.filename}({caller.lineno}) failed to check the db privilege {senario[0]} and stable privilege {senario[1]} failed as expected")
+ if not exception_flag:
+ pass
+ # tdLog.exit("The expected exception isn't occurred")
+ else:
+ sql = f"show create table {self.dbname}.{self.ctbname_list[0]};"
+ if senario[3]:
+ tdLog.debug("sql: %s" % sql)
+ tdLog.debug(f"expected result: {senario[3]}")
+ res = testconn.query(sql)
+ tdLog.debug(res.fetch_all())
+ else:
+ exception_flag = False
+ try:
+ tdLog.debug("sql: %s" % sql)
+ tdLog.debug(f"expected result: {senario[3]}")
+ res = testconn.query(sql)
+ tdLog.debug(res.fetch_all())
+ except BaseException:
+ exception_flag = True
+ caller = inspect.getframeinfo(inspect.stack()[1][0])
+ tdLog.debug(f"{caller.filename}({caller.lineno}) failed to check the db privilege {senario[0]}, stable privilege {senario[1]} and ctable privilege {senario[2]} failed as expected")
+ if not exception_flag:
+ pass
+ # tdLog.exit("The expected exception isn't occurred")
+
+ # remove db privilege
+ if senario[0] != "none":
+ self.remove_privilege(self.username, senario[0], "*")
+ # remove stable privilege
+ if senario[1] != "none":
+ self.remove_privilege(self.username, senario[1], self.stbname if senario_type == "stable" else self.common_tbname)
+ # remove child table privilege
+ if senario_type == "ctable":
+ if senario[2] != "none":
+ self.remove_privilege(self.username, senario[2], self.ctbname_list[0], True, "ctbname='ct1'")
+ testconn.close()
+
+ # remove the database
+ tdSql.execute(f"drop database {self.dbname}")
+ # reset the dbname
+ self.dbname = "user_privilege_show"
+ """
+
+ def stop(self):
+ # remove the user
+ tdSql.execute(f'drop user {self.username}')
+ # close the connection
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/system-test/1-insert/precisionNS.py b/tests/system-test/1-insert/precisionNS.py
new file mode 100644
index 0000000000..be8f1e21dc
--- /dev/null
+++ b/tests/system-test/1-insert/precisionNS.py
@@ -0,0 +1,293 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import random
+import time
+
+import taos
+from util.log import *
+from util.cases import *
+from util.sql import *
+
+class TDTestCase:
+
+ # get col value and total max min ...
+ def getColsValue(self, i, j):
+ # c1 value
+ if random.randint(1, 10) == 5:
+ c1 = None
+ else:
+ c1 = 1
+
+ # c2 value
+ if j % 3200 == 0:
+ c2 = 8764231
+ elif random.randint(1, 10) == 5:
+ c2 = None
+ else:
+ c2 = random.randint(-87654297, 98765321)
+
+
+ value = f"({self.ts}, "
+
+ # c1
+ if c1 is None:
+ value += "null,"
+ else:
+ self.c1Cnt += 1
+ value += f"{c1},"
+ # c2
+ if c2 is None:
+ value += "null,"
+ else:
+ value += f"{c2},"
+ # total count
+ self.c2Cnt += 1
+ # max
+ if self.c2Max is None:
+ self.c2Max = c2
+ else:
+ if c2 > self.c2Max:
+ self.c2Max = c2
+ # min
+ if self.c2Min is None:
+ self.c2Min = c2
+ else:
+ if c2 < self.c2Min:
+ self.c2Min = c2
+ # sum
+ if self.c2Sum is None:
+ self.c2Sum = c2
+ else:
+ self.c2Sum += c2
+
+ # c3 same with ts
+ value += f"{self.ts})"
+
+ # move next
+ self.ts += 1
+
+ return value
+
+ # insert data
+ def insertData(self):
+ tdLog.info("insert data ....")
+ sqls = ""
+ for i in range(self.childCnt):
+ # insert child table
+ values = ""
+ pre_insert = f"insert into t{i} values "
+ for j in range(self.childRow):
+ if values == "":
+ values = self.getColsValue(i, j)
+ else:
+ values += "," + self.getColsValue(i, j)
+
+ # batch insert
+ if j % self.batchSize == 0 and values != "":
+ sql = pre_insert + values
+ tdSql.execute(sql)
+ values = ""
+ # append last
+ if values != "":
+ sql = pre_insert + values
+ tdSql.execute(sql)
+ values = ""
+
+ sql = "flush database db;"
+ tdLog.info(sql)
+ tdSql.execute(sql)
+ # insert finished
+ tdLog.info(f"insert data successfully.\n"
+ f" inserted child table = {self.childCnt}\n"
+ f" inserted child rows = {self.childRow}\n"
+ f" total inserted rows = {self.childCnt*self.childRow}\n")
+ return
+
+
+ # prepareEnv
+ def prepareEnv(self):
+ # init
+ self.ts = 1680000000000*1000*1000
+ self.childCnt = 5
+ self.childRow = 10000
+ self.batchSize = 5000
+
+ # total
+ self.c1Cnt = 0
+ self.c2Cnt = 0
+ self.c2Max = None
+ self.c2Min = None
+ self.c2Sum = None
+
+ # create database db
+ sql = f"create database db vgroups 2 precision 'ns' "
+ tdLog.info(sql)
+ tdSql.execute(sql)
+ sql = f"use db"
+ tdSql.execute(sql)
+
+ # create super talbe st
+ sql = f"create table st(ts timestamp, c1 int, c2 bigint, ts1 timestamp) tags(area int)"
+ tdLog.info(sql)
+ tdSql.execute(sql)
+
+ # create child table
+ for i in range(self.childCnt):
+ sql = f"create table t{i} using st tags({i}) "
+ tdSql.execute(sql)
+
+ # create stream
+ sql = "create stream ma into sta as select count(ts) from st interval(100b)"
+ tdLog.info(sql)
+ tdSql.execute(sql)
+
+ # insert data
+ self.insertData()
+
+ # check data correct
+ def checkExpect(self, sql, expectVal):
+ tdSql.query(sql)
+ rowCnt = tdSql.getRows()
+ for i in range(rowCnt):
+ val = tdSql.getData(i,0)
+ if val != expectVal:
+ tdLog.exit(f"Not expect . query={val} expect={expectVal} i={i} sql={sql}")
+ return False
+
+ tdLog.info(f"check expect ok. sql={sql} expect ={expectVal} rowCnt={rowCnt}")
+ return True
+
+
+
+
+ # check time macro
+ def checkTimeMacro(self):
+ # 2 week
+ val = 2
+ nsval = val*7*24*60*60*1000*1000*1000
+ expectVal = self.childCnt * self.childRow
+ sql = f"select count(ts) from st where timediff(ts - {val}w, ts1) = {nsval} "
+ self.checkExpect(sql, expectVal)
+
+ # 20 day
+ val = 20
+ nsval = val*24*60*60*1000*1000*1000
+ uint = "d"
+ sql = f"select count(ts) from st where timediff(ts - {val}{uint}, ts1) = {nsval} "
+ self.checkExpect(sql, expectVal)
+
+ # 30 hour
+ val = 30
+ nsval = val*60*60*1000*1000*1000
+ uint = "h"
+ sql = f"select count(ts) from st where timediff(ts - {val}{uint}, ts1) = {nsval} "
+ self.checkExpect(sql, expectVal)
+
+ # 90 minutes
+ val = 90
+ nsval = val*60*1000*1000*1000
+ uint = "m"
+ sql = f"select count(ts) from st where timediff(ts - {val}{uint}, ts1) = {nsval} "
+ self.checkExpect(sql, expectVal)
+ # 2s
+ val = 2
+ nsval = val*1000*1000*1000
+ uint = "s"
+ sql = f"select count(ts) from st where timediff(ts - {val}{uint}, ts1) = {nsval} "
+ self.checkExpect(sql, expectVal)
+ # 20a
+ val = 5
+ nsval = val*1000*1000
+ uint = "a"
+ sql = f"select count(ts) from st where timediff(ts - {val}{uint}, ts1) = {nsval} "
+ self.checkExpect(sql, expectVal)
+ # 300u
+ val = 300
+ nsval = val*1000
+ uint = "u"
+ sql = f"select count(ts) from st where timediff(ts - {val}{uint}, ts1) = {nsval} "
+ self.checkExpect(sql, expectVal)
+ # 8b
+ val = 8
+ sql = f"select timediff(ts - {val}b, ts1) from st "
+ self.checkExpect(sql, val)
+
+ # init
+ def init(self, conn, logSql, replicaVar=1):
+ seed = time.clock_gettime(time.CLOCK_REALTIME)
+ random.seed(seed)
+ self.replicaVar = int(replicaVar)
+ tdLog.debug(f"start to excute {__file__}")
+ tdSql.init(conn.cursor(), True)
+
+ # where
+ def checkWhere(self):
+ cnt = 300
+ start = self.ts - cnt
+ sql = f"select count(ts) from st where ts >= {start} and ts <= {self.ts}"
+ self.checkExpect(sql, cnt)
+
+ for i in range(50):
+ cnt = random.randint(1,40000)
+ base = 2000
+ start = self.ts - cnt - base
+ end = self.ts - base
+ sql = f"select count(ts) from st where ts >= {start} and ts < {end}"
+ self.checkExpect(sql, cnt)
+
+ # stream
+ def checkStream(self):
+ allRows = self.childCnt * self.childRow
+ # ensure write data is expected
+ sql = "select count(*) from (select diff(ts) as a from (select ts from st order by ts asc)) where a=1;"
+ self.checkExpect(sql, allRows - 1)
+
+ # stream count is ok
+ sql =f"select count(*) from sta"
+ cnt = int(allRows / 100) - 1 # last window is not close, so need reduce one
+ self.checkExpect(sql, cnt)
+
+ # check fields
+ sql =f"select count(*) from sta where `count(ts)` != 100"
+ self.checkExpect(sql, 0)
+
+ # check timestamp
+ sql =f"select count(*) from (select diff(`_wstart`) from sta)"
+ self.checkExpect(sql, cnt - 1)
+ sql =f"select count(*) from (select diff(`_wstart`) as a from sta) where a != 100"
+ self.checkExpect(sql, 0)
+
+ # run
+ def run(self):
+ # prepare env
+ self.prepareEnv()
+
+ # time macro like 1w 1d 1h 1m 1s 1a 1u 1b
+ self.checkTimeMacro()
+
+ # check where
+ self.checkWhere()
+
+ # check stream
+ self.checkStream()
+
+ # stop
+ def stop(self):
+ tdSql.close()
+ tdLog.success(f"{__file__} successfully executed")
+
+
+tdCases.addLinux(__file__, TDTestCase())
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/system-test/1-insert/precisionUS.py b/tests/system-test/1-insert/precisionUS.py
new file mode 100644
index 0000000000..1b41d66010
--- /dev/null
+++ b/tests/system-test/1-insert/precisionUS.py
@@ -0,0 +1,287 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import random
+import time
+
+import taos
+from util.log import *
+from util.cases import *
+from util.sql import *
+
+class TDTestCase:
+
+ # get col value and total max min ...
+ def getColsValue(self, i, j):
+ # c1 value
+ if random.randint(1, 10) == 5:
+ c1 = None
+ else:
+ c1 = 1
+
+ # c2 value
+ if j % 3200 == 0:
+ c2 = 8764231
+ elif random.randint(1, 10) == 5:
+ c2 = None
+ else:
+ c2 = random.randint(-87654297, 98765321)
+
+
+ value = f"({self.ts}, "
+
+ # c1
+ if c1 is None:
+ value += "null,"
+ else:
+ self.c1Cnt += 1
+ value += f"{c1},"
+ # c2
+ if c2 is None:
+ value += "null,"
+ else:
+ value += f"{c2},"
+ # total count
+ self.c2Cnt += 1
+ # max
+ if self.c2Max is None:
+ self.c2Max = c2
+ else:
+ if c2 > self.c2Max:
+ self.c2Max = c2
+ # min
+ if self.c2Min is None:
+ self.c2Min = c2
+ else:
+ if c2 < self.c2Min:
+ self.c2Min = c2
+ # sum
+ if self.c2Sum is None:
+ self.c2Sum = c2
+ else:
+ self.c2Sum += c2
+
+ # c3 same with ts
+ value += f"{self.ts})"
+
+ # move next
+ self.ts += 1
+
+ return value
+
+ # insert data
+ def insertData(self):
+ tdLog.info("insert data ....")
+ sqls = ""
+ for i in range(self.childCnt):
+ # insert child table
+ values = ""
+ pre_insert = f"insert into t{i} values "
+ for j in range(self.childRow):
+ if values == "":
+ values = self.getColsValue(i, j)
+ else:
+ values += "," + self.getColsValue(i, j)
+
+ # batch insert
+ if j % self.batchSize == 0 and values != "":
+ sql = pre_insert + values
+ tdSql.execute(sql)
+ values = ""
+ # append last
+ if values != "":
+ sql = pre_insert + values
+ tdSql.execute(sql)
+ values = ""
+
+ sql = "flush database db;"
+ tdLog.info(sql)
+ tdSql.execute(sql)
+ # insert finished
+ tdLog.info(f"insert data successfully.\n"
+ f" inserted child table = {self.childCnt}\n"
+ f" inserted child rows = {self.childRow}\n"
+ f" total inserted rows = {self.childCnt*self.childRow}\n")
+ return
+
+
+ # prepareEnv
+ def prepareEnv(self):
+ # init
+ self.ts = 1680000000000*1000
+ self.childCnt = 5
+ self.childRow = 10000
+ self.batchSize = 5000
+
+ # total
+ self.c1Cnt = 0
+ self.c2Cnt = 0
+ self.c2Max = None
+ self.c2Min = None
+ self.c2Sum = None
+
+ # create database db
+ sql = f"create database db vgroups 2 precision 'us' "
+ tdLog.info(sql)
+ tdSql.execute(sql)
+ sql = f"use db"
+ tdSql.execute(sql)
+
+ # create super talbe st
+ sql = f"create table st(ts timestamp, c1 int, c2 bigint, ts1 timestamp) tags(area int)"
+ tdLog.info(sql)
+ tdSql.execute(sql)
+
+ # create child table
+ for i in range(self.childCnt):
+ sql = f"create table t{i} using st tags({i}) "
+ tdSql.execute(sql)
+
+ # create stream
+ sql = "create stream ma into sta as select count(ts) from st interval(100u)"
+ tdLog.info(sql)
+ tdSql.execute(sql)
+
+ # insert data
+ self.insertData()
+
+ # check data correct
+ def checkExpect(self, sql, expectVal):
+ tdSql.query(sql)
+ rowCnt = tdSql.getRows()
+ for i in range(rowCnt):
+ val = tdSql.getData(i,0)
+ if val != expectVal:
+ tdLog.exit(f"Not expect . query={val} expect={expectVal} i={i} sql={sql}")
+ return False
+
+ tdLog.info(f"check expect ok. sql={sql} expect ={expectVal} rowCnt={rowCnt}")
+ return True
+
+
+ # check time macro
+ def checkTimeMacro(self):
+ # 2 week
+ val = 2
+ usval = val*7*24*60*60*1000*1000
+ expectVal = self.childCnt * self.childRow
+ sql = f"select count(ts) from st where timediff(ts - {val}w, ts1) = {usval} "
+ self.checkExpect(sql, expectVal)
+
+ # 20 day
+ val = 20
+ usval = val*24*60*60*1000*1000
+ uint = "d"
+ sql = f"select count(ts) from st where timediff(ts - {val}{uint}, ts1) = {usval} "
+ self.checkExpect(sql, expectVal)
+
+ # 30 hour
+ val = 30
+ usval = val*60*60*1000*1000
+ uint = "h"
+ sql = f"select count(ts) from st where timediff(ts - {val}{uint}, ts1) = {usval} "
+ self.checkExpect(sql, expectVal)
+
+ # 90 minutes
+ val = 90
+ usval = val*60*1000*1000
+ uint = "m"
+ sql = f"select count(ts) from st where timediff(ts - {val}{uint}, ts1) = {usval} "
+ self.checkExpect(sql, expectVal)
+ # 2s
+ val = 2
+ usval = val*1000*1000
+ uint = "s"
+ sql = f"select count(ts) from st where timediff(ts - {val}{uint}, ts1) = {usval} "
+ self.checkExpect(sql, expectVal)
+ # 20a
+ val = 20
+ usval = val*1000
+ uint = "a"
+ sql = f"select count(ts) from st where timediff(ts - {val}{uint}, ts1) = {usval} "
+ self.checkExpect(sql, expectVal)
+ # 300u
+ val = 300
+ usval = val*1
+ uint = "u"
+ sql = f"select count(ts) from st where timediff(ts - {val}{uint}, ts1) = {usval} "
+ self.checkExpect(sql, expectVal)
+
+ # init
+ def init(self, conn, logSql, replicaVar=1):
+ seed = time.clock_gettime(time.CLOCK_REALTIME)
+ random.seed(seed)
+ self.replicaVar = int(replicaVar)
+ tdLog.debug(f"start to excute {__file__}")
+ tdSql.init(conn.cursor(), True)
+
+ # where
+ def checkWhere(self):
+ cnt = 300
+ start = self.ts - cnt
+ sql = f"select count(ts) from st where ts >= {start} and ts <= {self.ts}"
+ self.checkExpect(sql, cnt)
+
+ for i in range(50):
+ cnt = random.randint(1,40000)
+ base = 2000
+ start = self.ts - cnt - base
+ end = self.ts - base
+ sql = f"select count(ts) from st where ts >= {start} and ts < {end}"
+ self.checkExpect(sql, cnt)
+
+ # stream
+ def checkStream(self):
+ allRows = self.childCnt * self.childRow
+ # ensure write data is expected
+ sql = "select count(*) from (select diff(ts) as a from (select ts from st order by ts asc)) where a=1;"
+ self.checkExpect(sql, allRows - 1)
+
+ # stream count is ok
+ sql =f"select count(*) from sta"
+ cnt = int(allRows / 100) - 1 # last window is not close, so need reduce one
+ self.checkExpect(sql, cnt)
+
+ # check fields
+ sql =f"select count(*) from sta where `count(ts)` != 100"
+ self.checkExpect(sql, 0)
+
+ # check timestamp
+ sql =f"select count(*) from (select diff(`_wstart`) from sta)"
+ self.checkExpect(sql, cnt - 1)
+ sql =f"select count(*) from (select diff(`_wstart`) as a from sta) where a != 100"
+ self.checkExpect(sql, 0)
+
+ # run
+ def run(self):
+ # prepare env
+ self.prepareEnv()
+
+ # time macro like 1w 1d 1h 1m 1s 1a 1u
+ self.checkTimeMacro()
+
+ # check where
+ self.checkWhere()
+
+ # check stream
+ self.checkStream()
+
+ # stop
+ def stop(self):
+ tdSql.close()
+ tdLog.success(f"{__file__} successfully executed")
+
+
+tdCases.addLinux(__file__, TDTestCase())
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/system-test/2-query/diff.py b/tests/system-test/2-query/diff.py
index d48a01db6a..cdea8964b4 100644
--- a/tests/system-test/2-query/diff.py
+++ b/tests/system-test/2-query/diff.py
@@ -52,6 +52,95 @@ class TDTestCase:
tdSql.checkData(0, 0, None)
tdSql.checkData(1, 0, None)
+ # handle null values
+ tdSql.execute(
+ f"create table {dbname}.ntb_null(ts timestamp,c1 int,c2 double,c3 float,c4 bool)")
+ tdSql.execute(f"insert into {dbname}.ntb_null values(now, 1, 1.0, NULL, NULL)")
+ tdSql.execute(f"insert into {dbname}.ntb_null values(now, NULL, 2.0, 2.0, NULL)")
+ tdSql.execute(f"insert into {dbname}.ntb_null values(now, 2, NULL, NULL, false)")
+ tdSql.execute(f"insert into {dbname}.ntb_null values(now, NULL, 1.0, 1.0, NULL)")
+ tdSql.execute(f"insert into {dbname}.ntb_null values(now, NULL, 3.0, NULL, true)")
+ tdSql.execute(f"insert into {dbname}.ntb_null values(now, 3, NULL, 3.0, NULL)")
+ tdSql.execute(f"insert into {dbname}.ntb_null values(now, 1, NULL, NULL, true)")
+
+ tdSql.query(f"select diff(c1) from {dbname}.ntb_null")
+ tdSql.checkRows(6)
+ tdSql.checkData(0, 0, None)
+ tdSql.checkData(1, 0, 1)
+ tdSql.checkData(2, 0, None)
+ tdSql.checkData(3, 0, None)
+ tdSql.checkData(4, 0, 1)
+ tdSql.checkData(5, 0, -2)
+
+ tdSql.query(f"select diff(c2) from {dbname}.ntb_null")
+ tdSql.checkRows(6)
+ tdSql.checkData(0, 0, 1)
+ tdSql.checkData(1, 0, None)
+ tdSql.checkData(2, 0, -1)
+ tdSql.checkData(3, 0, 2)
+ tdSql.checkData(4, 0, None)
+ tdSql.checkData(5, 0, None)
+
+ tdSql.query(f"select diff(c3) from {dbname}.ntb_null")
+ tdSql.checkRows(6)
+ tdSql.checkData(0, 0, None)
+ tdSql.checkData(1, 0, None)
+ tdSql.checkData(2, 0, -1)
+ tdSql.checkData(3, 0, None)
+ tdSql.checkData(4, 0, 2)
+ tdSql.checkData(5, 0, None)
+
+ tdSql.query(f"select diff(c4) from {dbname}.ntb_null")
+ tdSql.checkRows(6)
+ tdSql.checkData(0, 0, None)
+ tdSql.checkData(1, 0, None)
+ tdSql.checkData(2, 0, None)
+ tdSql.checkData(3, 0, 1)
+ tdSql.checkData(4, 0, None)
+ tdSql.checkData(5, 0, 0)
+
+ tdSql.query(f"select diff(c1),diff(c2),diff(c3),diff(c4) from {dbname}.ntb_null")
+ tdSql.checkRows(6)
+ tdSql.checkData(0, 0, None)
+ tdSql.checkData(1, 0, 1)
+ tdSql.checkData(2, 0, None)
+ tdSql.checkData(3, 0, None)
+ tdSql.checkData(4, 0, 1)
+ tdSql.checkData(5, 0, -2)
+ tdSql.checkData(0, 1, 1)
+ tdSql.checkData(1, 1, None)
+ tdSql.checkData(2, 1, -1)
+ tdSql.checkData(3, 1, 2)
+ tdSql.checkData(4, 1, None)
+ tdSql.checkData(5, 1, None)
+ tdSql.checkData(0, 2, None)
+ tdSql.checkData(1, 2, None)
+ tdSql.checkData(2, 2, -1)
+ tdSql.checkData(3, 2, None)
+ tdSql.checkData(4, 2, 2)
+ tdSql.checkData(5, 2, None)
+ tdSql.checkData(0, 3, None)
+ tdSql.checkData(1, 3, None)
+ tdSql.checkData(2, 3, None)
+ tdSql.checkData(3, 3, 1)
+ tdSql.checkData(4, 3, None)
+ tdSql.checkData(5, 3, 0)
+
+ tdSql.query(f"select diff(c1),diff(c2),diff(c3),diff(c4) from {dbname}.ntb_null where c1 is not null")
+ tdSql.checkRows(3)
+ tdSql.checkData(0, 0, 1)
+ tdSql.checkData(1, 0, 1)
+ tdSql.checkData(2, 0, -2)
+ tdSql.checkData(0, 1, None)
+ tdSql.checkData(1, 1, None)
+ tdSql.checkData(2, 1, None)
+ tdSql.checkData(0, 2, None)
+ tdSql.checkData(1, 2, None)
+ tdSql.checkData(2, 2, None)
+ tdSql.checkData(0, 3, None)
+ tdSql.checkData(1, 3, None)
+ tdSql.checkData(2, 3, 1)
+
tdSql.execute(f'''create table {dbname}.stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''')
tdSql.execute(f"create table {dbname}.stb_1 using {dbname}.stb tags('beijing')")
@@ -103,6 +192,9 @@ class TDTestCase:
tdSql.error(f"select diff(col1,1.23) from {dbname}.stb_1")
tdSql.error(f"select diff(col1,-1) from {dbname}.stb_1")
tdSql.query(f"select ts,diff(col1),ts from {dbname}.stb_1")
+ tdSql.error(f"select diff(col1, 1),diff(col2) from {dbname}.stb_1")
+ tdSql.error(f"select diff(col1, 1),diff(col2, 0) from {dbname}.stb_1")
+ tdSql.error(f"select diff(col1, 1),diff(col2, 1) from {dbname}.stb_1")
tdSql.query(f"select diff(ts) from {dbname}.stb_1")
tdSql.checkRows(10)
diff --git a/tests/system-test/2-query/function_diff.py b/tests/system-test/2-query/function_diff.py
index c3f3789a69..493e59265e 100644
--- a/tests/system-test/2-query/function_diff.py
+++ b/tests/system-test/2-query/function_diff.py
@@ -127,22 +127,33 @@ class TDTestCase:
return
else:
- tdSql.query(f"select {col} from {table_expr} {re.sub('limit [0-9]*|offset [0-9]*','',condition)}")
+ sql = f"select {col} from {table_expr} {re.sub('limit [0-9]*|offset [0-9]*','',condition)}"
+ tdSql.query(sql)
offset_val = condition.split("offset")[1].split(" ")[1] if "offset" in condition else 0
pre_result = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None]
if (platform.system().lower() == 'windows' and pre_result.dtype == 'int32'):
pre_result = np.array(pre_result, dtype = 'int64')
pre_diff = np.diff(pre_result)[offset_val:]
- tdSql.query(self.diff_query_form(
- col=col, alias=alias, table_expr=table_expr, condition=condition
- ))
-
- for i in range(tdSql.queryRows):
- print(f"case in {line}: ", end='')
- if isinstance(pre_diff[i] , float ):
- pass
- else:
- tdSql.checkData(i, 0, pre_diff[i])
+ if len(pre_diff) > 0:
+ sql =self.diff_query_form(col=col, alias=alias, table_expr=table_expr, condition=condition)
+ tdSql.query(sql)
+ j = 0
+ diff_cnt = len(pre_diff)
+ for i in range(tdSql.queryRows):
+ print(f"case in {line}: i={i} j={j} pre_diff[j]={pre_diff[j]} ", end='')
+ if isinstance(pre_diff[j] , float ):
+ if j + 1 < diff_cnt:
+ j += 1
+ pass
+ else:
+ if tdSql.getData(i,0) != None:
+ tdSql.checkData(i, 0, pre_diff[j])
+ if j + 1 < diff_cnt:
+ j += 1
+ else:
+ print(f"getData i={i} is None j={j} ")
+ else:
+ print("pre_diff len is zero.")
pass
@@ -354,31 +365,31 @@ class TDTestCase:
tdSql.checkRows(229)
tdSql.checkData(0,0,0)
tdSql.query("select diff(c1) from db.stb1 partition by tbname ")
- tdSql.checkRows(190)
+ tdSql.checkRows(220)
tdSql.query("select diff(st1+c1) from db.stb1 partition by tbname")
- tdSql.checkRows(190)
+ tdSql.checkRows(220)
tdSql.query("select diff(st1+c1) from db.stb1 partition by tbname")
- tdSql.checkRows(190)
+ tdSql.checkRows(220)
tdSql.query("select diff(st1+c1) from db.stb1 partition by tbname")
- tdSql.checkRows(190)
+ tdSql.checkRows(220)
# bug need fix
tdSql.query("select diff(st1+c1) from db.stb1 partition by tbname")
- tdSql.checkRows(190)
+ tdSql.checkRows(220)
# bug need fix
tdSql.query("select tbname , diff(c1) from db.stb1 partition by tbname")
- tdSql.checkRows(190)
+ tdSql.checkRows(220)
tdSql.query("select tbname , diff(st1) from db.stb1 partition by tbname")
tdSql.checkRows(220)
# partition by tags
tdSql.query("select st1 , diff(c1) from db.stb1 partition by st1")
- tdSql.checkRows(190)
+ tdSql.checkRows(220)
tdSql.query("select diff(c1) from db.stb1 partition by st1")
- tdSql.checkRows(190)
+ tdSql.checkRows(220)
def diff_test_run(self) :
diff --git a/tests/system-test/2-query/interp.py b/tests/system-test/2-query/interp.py
index 9b3dd60246..47a4bc4dcf 100644
--- a/tests/system-test/2-query/interp.py
+++ b/tests/system-test/2-query/interp.py
@@ -225,6 +225,101 @@ class TDTestCase:
tdSql.checkData(2, 0, 12)
tdSql.checkData(3, 0, 12)
+ ## test fill value with scalar expression
+ # data types
+ tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:16', '2020-02-01 00:00:19') every(1s) fill(value, 1 + 2)")
+ tdSql.checkRows(4)
+ tdSql.checkData(0, 0, 3)
+ tdSql.checkData(1, 0, 3)
+ tdSql.checkData(2, 0, 3)
+ tdSql.checkData(3, 0, 3)
+
+ tdSql.query(f"select interp(c1) from {dbname}.{tbname} range('2020-02-01 00:00:16', '2020-02-01 00:00:19') every(1s) fill(value, 1 + 2)")
+ tdSql.checkRows(4)
+ tdSql.checkData(0, 0, 3)
+ tdSql.checkData(1, 0, 3)
+ tdSql.checkData(2, 0, 3)
+ tdSql.checkData(3, 0, 3)
+
+ tdSql.query(f"select interp(c2) from {dbname}.{tbname} range('2020-02-01 00:00:16', '2020-02-01 00:00:19') every(1s) fill(value, 1 + 2)")
+ tdSql.checkRows(4)
+ tdSql.checkData(0, 0, 3)
+ tdSql.checkData(1, 0, 3)
+ tdSql.checkData(2, 0, 3)
+ tdSql.checkData(3, 0, 3)
+
+ tdSql.query(f"select interp(c3) from {dbname}.{tbname} range('2020-02-01 00:00:16', '2020-02-01 00:00:19') every(1s) fill(value, 1 + 2)")
+ tdSql.checkRows(4)
+ tdSql.checkData(0, 0, 3)
+ tdSql.checkData(1, 0, 3)
+ tdSql.checkData(2, 0, 3)
+ tdSql.checkData(3, 0, 3)
+
+ tdSql.query(f"select interp(c4) from {dbname}.{tbname} range('2020-02-01 00:00:16', '2020-02-01 00:00:19') every(1s) fill(value, 1 + 2)")
+ tdSql.checkRows(4)
+ tdSql.checkData(0, 0, 3.0)
+ tdSql.checkData(1, 0, 3.0)
+ tdSql.checkData(2, 0, 3.0)
+ tdSql.checkData(3, 0, 3.0)
+
+ tdSql.query(f"select interp(c5) from {dbname}.{tbname} range('2020-02-01 00:00:16', '2020-02-01 00:00:19') every(1s) fill(value, 1 + 2)")
+ tdSql.checkRows(4)
+ tdSql.checkData(0, 0, 3.0)
+ tdSql.checkData(1, 0, 3.0)
+ tdSql.checkData(2, 0, 3.0)
+ tdSql.checkData(3, 0, 3.0)
+
+ tdSql.query(f"select interp(c6) from {dbname}.{tbname} range('2020-02-01 00:00:16', '2020-02-01 00:00:19') every(1s) fill(value, 1 + 2)")
+ tdSql.checkRows(4)
+ tdSql.checkData(0, 0, True)
+ tdSql.checkData(1, 0, True)
+ tdSql.checkData(2, 0, True)
+ tdSql.checkData(3, 0, True)
+
+ # expr types
+ tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:16', '2020-02-01 00:00:19') every(1s) fill(value, 1.0 + 2.0)")
+ tdSql.checkRows(4)
+ tdSql.checkData(0, 0, 3)
+ tdSql.checkData(1, 0, 3)
+ tdSql.checkData(2, 0, 3)
+ tdSql.checkData(3, 0, 3)
+
+ tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:16', '2020-02-01 00:00:19') every(1s) fill(value, 1 + 2.5)")
+ tdSql.checkRows(4)
+ tdSql.checkData(0, 0, 3)
+ tdSql.checkData(1, 0, 3)
+ tdSql.checkData(2, 0, 3)
+ tdSql.checkData(3, 0, 3)
+
+ tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:16', '2020-02-01 00:00:19') every(1s) fill(value, 1 + '2')")
+ tdSql.checkRows(4)
+ tdSql.checkData(0, 0, 3)
+ tdSql.checkData(1, 0, 3)
+ tdSql.checkData(2, 0, 3)
+ tdSql.checkData(3, 0, 3)
+
+ tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:16', '2020-02-01 00:00:19') every(1s) fill(value, 1 + '2.0')")
+ tdSql.checkRows(4)
+ tdSql.checkData(0, 0, 3)
+ tdSql.checkData(1, 0, 3)
+ tdSql.checkData(2, 0, 3)
+ tdSql.checkData(3, 0, 3)
+
+ tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:16', '2020-02-01 00:00:19') every(1s) fill(value, '3' + 'abc')")
+ tdSql.checkRows(4)
+ tdSql.checkData(0, 0, 3)
+ tdSql.checkData(1, 0, 3)
+ tdSql.checkData(2, 0, 3)
+ tdSql.checkData(3, 0, 3)
+
+ tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:16', '2020-02-01 00:00:19') every(1s) fill(value, '2' + '1abc')")
+ tdSql.checkRows(4)
+ tdSql.checkData(0, 0, 3)
+ tdSql.checkData(1, 0, 3)
+ tdSql.checkData(2, 0, 3)
+ tdSql.checkData(3, 0, 3)
+
+
tdLog.printNoPrefix("==========step5:fill prev")
## {. . .}
@@ -1765,47 +1860,10 @@ class TDTestCase:
tdSql.checkData(60, 1, 60) #
# test fill value
- tdSql.query(f"select interp(c0),interp(c1) from {dbname}.{tbname1} range('2020-02-02 00:00:00', '2020-02-02 00:01:00') every(1s) fill(value, 123)")
+ tdSql.query(f"select _irowts, interp(c0), _irowts, interp(c1), _irowts from {dbname}.{tbname1} range('2020-02-02 00:00:00', '2020-02-02 00:01:00') every(1s) fill(value, 123, 456)")
tdSql.checkRows(61)
- tdSql.checkCols(2)
- tdSql.checkData(0, 0, 0) #
- tdSql.checkData(1, 0, 123)
- tdSql.checkData(4, 0, 123)
- tdSql.checkData(5, 0, None) #
- tdSql.checkData(6, 0, 123)
- tdSql.checkData(9, 0, 123)
- tdSql.checkData(10, 0, 10) #
- tdSql.checkData(11, 0, 123)
- tdSql.checkData(14, 0, 123)
- tdSql.checkData(15, 0, None) #
- tdSql.checkData(16, 0, 123)
- tdSql.checkData(19, 0, 123)
- tdSql.checkData(20, 0, 20) #
- tdSql.checkData(21, 0, 123)
- tdSql.checkData(24, 0, 123)
- tdSql.checkData(25, 0, None) #
- tdSql.checkData(26, 0, 123)
- tdSql.checkData(29, 0, 123)
- tdSql.checkData(30, 0, 30) #
- tdSql.checkData(31, 0, 123)
- tdSql.checkData(34, 0, 123)
- tdSql.checkData(35, 0, 35) #
- tdSql.checkData(36, 0, 123)
- tdSql.checkData(39, 0, 123)
- tdSql.checkData(40, 0, 40) #
- tdSql.checkData(41, 0, 123)
- tdSql.checkData(44, 0, 123)
- tdSql.checkData(45, 0, None) #
- tdSql.checkData(46, 0, 123)
- tdSql.checkData(49, 0, 123)
- tdSql.checkData(50, 0, 50) #
- tdSql.checkData(51, 0, 123)
- tdSql.checkData(54, 0, 123)
- tdSql.checkData(55, 0, None) #
- tdSql.checkData(59, 0, 123)
- tdSql.checkData(60, 0, 55) #
-
- tdSql.checkData(0, 1, None) #
+ tdSql.checkCols(5)
+ tdSql.checkData(0, 1, 0) #
tdSql.checkData(1, 1, 123)
tdSql.checkData(4, 1, 123)
tdSql.checkData(5, 1, None) #
@@ -1817,7 +1875,7 @@ class TDTestCase:
tdSql.checkData(15, 1, None) #
tdSql.checkData(16, 1, 123)
tdSql.checkData(19, 1, 123)
- tdSql.checkData(20, 1, None) #
+ tdSql.checkData(20, 1, 20) #
tdSql.checkData(21, 1, 123)
tdSql.checkData(24, 1, 123)
tdSql.checkData(25, 1, None) #
@@ -1826,22 +1884,137 @@ class TDTestCase:
tdSql.checkData(30, 1, 30) #
tdSql.checkData(31, 1, 123)
tdSql.checkData(34, 1, 123)
- tdSql.checkData(35, 1, None) #
+ tdSql.checkData(35, 1, 35) #
tdSql.checkData(36, 1, 123)
tdSql.checkData(39, 1, 123)
tdSql.checkData(40, 1, 40) #
tdSql.checkData(41, 1, 123)
tdSql.checkData(44, 1, 123)
- tdSql.checkData(45, 1, 45) #
+ tdSql.checkData(45, 1, None) #
tdSql.checkData(46, 1, 123)
tdSql.checkData(49, 1, 123)
- tdSql.checkData(50, 1, None) #
+ tdSql.checkData(50, 1, 50) #
tdSql.checkData(51, 1, 123)
tdSql.checkData(54, 1, 123)
tdSql.checkData(55, 1, None) #
- tdSql.checkData(56, 1, 123)
tdSql.checkData(59, 1, 123)
- tdSql.checkData(60, 1, 60) #
+ tdSql.checkData(60, 1, 55) #
+
+ tdSql.checkData(0, 3, None) #
+ tdSql.checkData(1, 3, 456)
+ tdSql.checkData(4, 3, 456)
+ tdSql.checkData(5, 3, None) #
+ tdSql.checkData(6, 3, 456)
+ tdSql.checkData(9, 3, 456)
+ tdSql.checkData(10, 3, 10) #
+ tdSql.checkData(11, 3, 456)
+ tdSql.checkData(14, 3, 456)
+ tdSql.checkData(15, 3, None) #
+ tdSql.checkData(16, 3, 456)
+ tdSql.checkData(19, 3, 456)
+ tdSql.checkData(20, 3, None) #
+ tdSql.checkData(21, 3, 456)
+ tdSql.checkData(24, 3, 456)
+ tdSql.checkData(25, 3, None) #
+ tdSql.checkData(26, 3, 456)
+ tdSql.checkData(29, 3, 456)
+ tdSql.checkData(30, 3, 30) #
+ tdSql.checkData(31, 3, 456)
+ tdSql.checkData(34, 3, 456)
+ tdSql.checkData(35, 3, None) #
+ tdSql.checkData(36, 3, 456)
+ tdSql.checkData(39, 3, 456)
+ tdSql.checkData(40, 3, 40) #
+ tdSql.checkData(41, 3, 456)
+ tdSql.checkData(44, 3, 456)
+ tdSql.checkData(45, 3, 45) #
+ tdSql.checkData(46, 3, 456)
+ tdSql.checkData(49, 3, 456)
+ tdSql.checkData(50, 3, None) #
+ tdSql.checkData(51, 3, 456)
+ tdSql.checkData(54, 3, 456)
+ tdSql.checkData(55, 3, None) #
+ tdSql.checkData(56, 3, 456)
+ tdSql.checkData(59, 3, 456)
+ tdSql.checkData(60, 3, 60) #
+
+ tdSql.query(f"select _isfilled, interp(c0), _isfilled, interp(c1), _isfilled from {dbname}.{tbname1} range('2020-02-02 00:00:00', '2020-02-02 00:01:00') every(1s) fill(value, 123 + 123, 234 + 234)")
+ tdSql.checkRows(61)
+ tdSql.checkCols(5)
+ tdSql.checkData(0, 1, 0) #
+ tdSql.checkData(1, 1, 246)
+ tdSql.checkData(4, 1, 246)
+ tdSql.checkData(5, 1, None) #
+ tdSql.checkData(6, 1, 246)
+ tdSql.checkData(9, 1, 246)
+ tdSql.checkData(10, 1, 10) #
+ tdSql.checkData(11, 1, 246)
+ tdSql.checkData(14, 1, 246)
+ tdSql.checkData(15, 1, None) #
+ tdSql.checkData(16, 1, 246)
+ tdSql.checkData(19, 1, 246)
+ tdSql.checkData(20, 1, 20) #
+ tdSql.checkData(21, 1, 246)
+ tdSql.checkData(24, 1, 246)
+ tdSql.checkData(25, 1, None) #
+ tdSql.checkData(26, 1, 246)
+ tdSql.checkData(29, 1, 246)
+ tdSql.checkData(30, 1, 30) #
+ tdSql.checkData(31, 1, 246)
+ tdSql.checkData(34, 1, 246)
+ tdSql.checkData(35, 1, 35) #
+ tdSql.checkData(36, 1, 246)
+ tdSql.checkData(39, 1, 246)
+ tdSql.checkData(40, 1, 40) #
+ tdSql.checkData(41, 1, 246)
+ tdSql.checkData(44, 1, 246)
+ tdSql.checkData(45, 1, None) #
+ tdSql.checkData(46, 1, 246)
+ tdSql.checkData(49, 1, 246)
+ tdSql.checkData(50, 1, 50) #
+ tdSql.checkData(51, 1, 246)
+ tdSql.checkData(54, 1, 246)
+ tdSql.checkData(55, 1, None) #
+ tdSql.checkData(59, 1, 246)
+ tdSql.checkData(60, 1, 55) #
+
+ tdSql.checkData(0, 3, None) #
+ tdSql.checkData(1, 3, 468)
+ tdSql.checkData(4, 3, 468)
+ tdSql.checkData(5, 3, None) #
+ tdSql.checkData(6, 3, 468)
+ tdSql.checkData(9, 3, 468)
+ tdSql.checkData(10, 3, 10) #
+ tdSql.checkData(11, 3, 468)
+ tdSql.checkData(14, 3, 468)
+ tdSql.checkData(15, 3, None) #
+ tdSql.checkData(16, 3, 468)
+ tdSql.checkData(19, 3, 468)
+ tdSql.checkData(20, 3, None) #
+ tdSql.checkData(21, 3, 468)
+ tdSql.checkData(24, 3, 468)
+ tdSql.checkData(25, 3, None) #
+ tdSql.checkData(26, 3, 468)
+ tdSql.checkData(29, 3, 468)
+ tdSql.checkData(30, 3, 30) #
+ tdSql.checkData(31, 3, 468)
+ tdSql.checkData(34, 3, 468)
+ tdSql.checkData(35, 3, None) #
+ tdSql.checkData(36, 3, 468)
+ tdSql.checkData(39, 3, 468)
+ tdSql.checkData(40, 3, 40) #
+ tdSql.checkData(41, 3, 468)
+ tdSql.checkData(44, 3, 468)
+ tdSql.checkData(45, 3, 45) #
+ tdSql.checkData(46, 3, 468)
+ tdSql.checkData(49, 3, 468)
+ tdSql.checkData(50, 3, None) #
+ tdSql.checkData(51, 3, 468)
+ tdSql.checkData(54, 3, 468)
+ tdSql.checkData(55, 3, None) #
+ tdSql.checkData(56, 3, 468)
+ tdSql.checkData(59, 3, 468)
+ tdSql.checkData(60, 3, 60) #
# test fill prev
tdSql.query(f"select interp(c0),interp(c1) from {dbname}.{tbname1} range('2020-02-02 00:00:00', '2020-02-02 00:01:00') every(1s) fill(prev)")
@@ -2016,7 +2189,7 @@ class TDTestCase:
tdSql.checkData(3, i, None)
tdSql.checkData(4, i, None)
- tdSql.query(f"select interp(c0),interp(c1),interp(c2),interp(c3) from {dbname}.{tbname} range('2020-02-09 00:00:05', '2020-02-13 00:00:05') every(1d) fill(value, 1)")
+ tdSql.query(f"select interp(c0),interp(c1),interp(c2),interp(c3) from {dbname}.{tbname} range('2020-02-09 00:00:05', '2020-02-13 00:00:05') every(1d) fill(value, 1, 1, 1, 1)")
tdSql.checkRows(5)
tdSql.checkCols(4)
@@ -2442,6 +2615,10 @@ class TDTestCase:
tdSql.error(f"select interp(c0) from {dbname}.{tbname} where _isfilled = true range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d) fill(null)")
tdSql.error(f"select interp(c0) from {dbname}.{tbname} where _irowts > 0 range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d) fill(null)")
+ # fill value number mismatch
+ tdSql.error(f"select interp(c0) from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d) fill(value, 1, 2)")
+ tdSql.error(f"select interp(c0), interp(c1) from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d) fill(value, 1)")
+
diff --git a/tests/system-test/2-query/max_partition.py b/tests/system-test/2-query/max_partition.py
index dec24010fc..fbd3488aab 100644
--- a/tests/system-test/2-query/max_partition.py
+++ b/tests/system-test/2-query/max_partition.py
@@ -172,7 +172,7 @@ class TDTestCase:
tdSql.checkRows(90)
tdSql.query(f"select c1 , diff(c1 , 0) from {dbname}.stb partition by c1")
- tdSql.checkRows(90)
+ tdSql.checkRows(140)
tdSql.query(f"select c1 , csum(c1) from {dbname}.stb partition by c1")
tdSql.checkRows(100)
diff --git a/tests/system-test/2-query/odbc.py b/tests/system-test/2-query/odbc.py
index 5241406b65..8fbad93995 100644
--- a/tests/system-test/2-query/odbc.py
+++ b/tests/system-test/2-query/odbc.py
@@ -21,9 +21,7 @@ class TDTestCase:
tdSql.execute("create table db.stb (ts timestamp, c1 bool, c2 tinyint, c3 smallint, c4 int, c5 bigint, c6 tinyint unsigned, c7 smallint unsigned, c8 int unsigned, c9 bigint unsigned, c10 float, c11 double, c12 varchar(100), c13 nchar(100)) tags(t int)")
tdSql.execute("insert into db.ctb using db.stb tags(1) (ts, c1) values (now, 1)")
- tdSql.query("select count(*) from information_schema.ins_columns")
- # enterprise version: 288, community version: 280
- tdSql.checkData(0, 0, 288)
+ tdSql.execute("select count(*) from information_schema.ins_columns")
tdSql.query("select * from information_schema.ins_columns where table_name = 'ntb'")
tdSql.checkRows(14)
diff --git a/tests/system-test/2-query/ts_3405.py b/tests/system-test/2-query/ts_3405.py
new file mode 100644
index 0000000000..521fef9432
--- /dev/null
+++ b/tests/system-test/2-query/ts_3405.py
@@ -0,0 +1,59 @@
+from util.log import *
+from util.sql import *
+from util.cases import *
+from util.sqlset import *
+import datetime
+
+class TDTestCase:
+ """This test case is used to verify the query performance for the merge scans process of
+ multiple tables join
+ """
+ def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), False)
+
+ def run(self):
+ # test case for https://jira.taosdata.com:18080/browse/TS-3405:
+ # create db
+ ret = tdSql.execute("CREATE DATABASE IF NOT EXISTS statistics2 REPLICA {} DURATION 14400m KEEP 5256000m,5256000m,5256000m PRECISION 'ms' MINROWS 100 MAXROWS 4096 COMP 2;".format(self.replicaVar))
+ tdSql.execute("use statistics2;")
+
+ # create stable
+ ret = tdSql.execute("CREATE STABLE IF NOT EXISTS statistics2.`pg`(`day` timestamp,`lt_3` int,`c3_3` int,`c6_3` int,`c9_3` int,`c12_3` int,`c15_3` int,`c18_3` int,`c21_3` int,`c24_3` int,`c27_3` int,`ge_3` int) TAGS(`vin` binary(32));")
+ ret = tdSql.execute("CREATE STABLE IF NOT EXISTS statistics2.`b`(`day` timestamp, `month` int) TAGS(`group_path` binary(32),`vin` binary(32));")
+ ret = tdSql.execute("CREATE STABLE IF NOT EXISTS statistics2.`g`(`day` timestamp,`run_state` tinyint) TAGS(`vin` binary(32));")
+
+ # insert the data to table
+ insertRows = 30000
+ for i in range(insertRows):
+ ts = datetime.datetime.strptime('2023-05-01 00:00:00.000', '%Y-%m-%d %H:%M:%S.%f') + datetime.timedelta(seconds=i)
+ tdSql.execute("insert into d1001 using statistics2.`pg` tags('test') values ('{}', {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}) \
+ d2001 using statistics2.`b` tags('1#%', 'test') values ('{}', {}) \
+ d3001 using statistics2.`g` tags('test') values ('{}', {});".format(ts, i, i, i+1, i+2, i+3, i+4, i+5, i+6, i+7, i+8, i+9, ts, 5, ts, 1))
+ tdLog.info("insert %d rows" % (insertRows))
+
+ # execute the sql statements
+ ret = tdSql.query("SELECT sum(pg.lt_3) es1,sum(pg.c3_3) es2,sum(pg.c6_3) es3,sum(pg.c9_3) es4,sum(pg.c12_3) es5,sum(pg.c15_3) es6,sum(pg.c18_3) es7,sum(pg.c21_3) es8,sum(pg.c24_3) es9,sum(pg.c27_3) es10,sum(pg.ge_3) es11 FROM statistics2.b b,statistics2.pg pg,statistics2.g g WHERE b.`day` = pg.`day` AND b.`day` = g.`day` AND b.vin = pg.vin AND b.vin = g.vin AND b.vin IS NOT NULL AND b.`group_path` LIKE '1#%';")
+ # check the first query result
+ if (449985000, 449985000, 450015000, 450045000, 450075000, 450105000, 450135000, 450165000, 450195000, 450225000, 450255000) in tdSql.queryResult:
+ tdLog.info("first query result is correct")
+ else:
+ tdLog.info("first query result is wrong")
+
+ ret = tdSql.query("SELECT sum(pg.lt_3) es1, sum(pg.c3_3) es2, sum(pg.c6_3) es3, sum(pg.c9_3) es4, sum(pg.c12_3) es5, sum(pg.c15_3) es6, sum(pg.c18_3) es7, sum(pg.c21_3) es8, sum(pg.c24_3) es9, sum(pg.c27_3) es10, sum(pg.ge_3) es11 FROM (select * from statistics2.b order by day,month) b, (select * from statistics2.pg order by day,lt_3 ) pg, (select * from statistics2.g order by day,run_state) g WHERE b.`day` = pg.`day` AND b.`day` = g.`day` AND b.vin = pg.vin AND b.vin = g.vin AND b.vin IS NOT NULL;")
+ # check the second query result
+ if (449985000, 449985000, 450015000, 450045000, 450075000, 450105000, 450135000, 450165000, 450195000, 450225000, 450255000) in tdSql.queryResult:
+ tdLog.info("second query result is correct")
+ else:
+ tdLog.info("second query result is wrong")
+
+
+ def stop(self):
+ # clear the db
+ tdSql.execute("drop database if exists statistics2;")
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/system-test/2-query/ts_3423.py b/tests/system-test/2-query/ts_3423.py
new file mode 100644
index 0000000000..97298b96be
--- /dev/null
+++ b/tests/system-test/2-query/ts_3423.py
@@ -0,0 +1,69 @@
+from util.log import *
+from util.sql import *
+from util.cases import *
+from util.sqlset import *
+import datetime
+import random
+
+class TDTestCase:
+ """This test case is used to verify last(*) query result is correct when the data
+ is group by tag for stable
+ """
+ def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), False)
+
+ def run(self):
+ # test case for https://jira.taosdata.com:18080/browse/TS-3423:
+ # create db
+ ret = tdSql.execute("CREATE DATABASE IF NOT EXISTS ts_3423 REPLICA {} DURATION 14400m KEEP 5256000m,5256000m,5256000m PRECISION 'ms' MINROWS 100 MAXROWS 4096 COMP 2;".format(self.replicaVar))
+ tdSql.execute("use ts_3423;")
+
+ # create stable
+ ret = tdSql.execute("CREATE STABLE IF NOT EXISTS ts_3423.`st_last`(`ts` timestamp,`n1` int,`n2` float) TAGS(`groupname` binary(32));")
+
+ # insert the data to table
+ insertRows = 10
+ child_table_num = 10
+ for i in range(insertRows):
+ ts = datetime.datetime.strptime('2023-05-01 00:00:00.000', '%Y-%m-%d %H:%M:%S.%f') + datetime.timedelta(seconds=i)
+ for j in range(child_table_num):
+ ret = tdSql.execute("insert into {} using ts_3423.`st_last` tags('{}') values ('{}', {}, {})".format("d" + str(j), "group" + str(j), str(ts), str(i+1), random.random()))
+ tdLog.info("insert %d rows for every child table" % (insertRows))
+
+ # cache model list
+ cache_model = ["none", "last_row", "last_value", "both"]
+ query_res = []
+
+ # execute the sql statements first
+ ret = tdSql.query("select `cachemodel` from information_schema.ins_databases where name='ts_3423'")
+ current_cache_model = tdSql.queryResult[0][0]
+ tdLog.info("query on cache model {}".format(current_cache_model))
+ ret = tdSql.query("select last(*) from st_last group by groupname;")
+ # save the results
+ query_res.append(len(tdSql.queryResult))
+ # remove the current cache model
+ cache_model.remove(current_cache_model)
+
+ for item in cache_model:
+ tdSql.execute("alter database ts_3423 cachemodel '{}';".format(item))
+ # execute the sql statements
+ ret = tdSql.query("select last(*) from st_last group by groupname;")
+ tdLog.info("query on cache model {}".format(item))
+ query_res.append(len(tdSql.queryResult))
+ # check the result
+ res = True if query_res.count(child_table_num) == 4 else False
+ if res:
+ tdLog.info("query result is correct and same among different cache model")
+ else:
+ tdLog.info("query result is wrong")
+
+ def stop(self):
+ # clear the db
+ tdSql.execute("drop database if exists ts_3423;")
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/system-test/7-tmq/checkOffsetRowParams.py b/tests/system-test/7-tmq/checkOffsetRowParams.py
new file mode 100644
index 0000000000..8a24148064
--- /dev/null
+++ b/tests/system-test/7-tmq/checkOffsetRowParams.py
@@ -0,0 +1,317 @@
+
+import taos
+import sys
+import time
+import socket
+import os
+import threading
+from enum import Enum
+
+from util.log import *
+from util.sql import *
+from util.cases import *
+from util.dnodes import *
+sys.path.append("./7-tmq")
+from tmqCommon import *
+
+class actionType(Enum):
+ CREATE_DATABASE = 0
+ CREATE_STABLE = 1
+ CREATE_CTABLE = 2
+ INSERT_DATA = 3
+
+class TDTestCase:
+ hostname = socket.gethostname()
+ #rpcDebugFlagVal = '143'
+ #clientCfgDict = {'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''}
+ #clientCfgDict["rpcDebugFlag"] = rpcDebugFlagVal
+ #updatecfgDict = {'clientCfg': {}, 'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''}
+ #updatecfgDict["rpcDebugFlag"] = rpcDebugFlagVal
+ #print ("===================: ", updatecfgDict)
+
+ def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
+ tdLog.debug(f"start to excute {__file__}")
+ tdSql.init(conn.cursor())
+ #tdSql.init(conn.cursor(), logSql) # output sql.txt file
+
+ def getBuildPath(self):
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+
+ if ("community" in selfPath):
+ projPath = selfPath[:selfPath.find("community")]
+ else:
+ projPath = selfPath[:selfPath.find("tests")]
+
+ for root, dirs, files in os.walk(projPath):
+ if ("taosd" in files or "taosd.exe" in files):
+ rootRealPath = os.path.dirname(os.path.realpath(root))
+ if ("packaging" not in rootRealPath):
+ buildPath = root[:len(root) - len("/build/bin")]
+ break
+ return buildPath
+
+ def newcur(self,cfg,host,port):
+ user = "root"
+ password = "taosdata"
+ con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port)
+ cur=con.cursor()
+ print(cur)
+ return cur
+
+ def initConsumerTable(self,cdbName='cdb'):
+ tdLog.info("create consume database, and consume info table, and consume result table")
+ tdSql.query("create database if not exists %s vgroups 1 wal_retention_period 3600"%(cdbName))
+ tdSql.query("drop table if exists %s.consumeinfo "%(cdbName))
+ tdSql.query("drop table if exists %s.consumeresult "%(cdbName))
+
+ tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName)
+ tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName)
+
+ def initConsumerInfoTable(self,cdbName='cdb'):
+ tdLog.info("drop consumeinfo table")
+ tdSql.query("drop table if exists %s.consumeinfo "%(cdbName))
+ tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName)
+
+ def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'):
+ sql = "insert into %s.consumeinfo values "%cdbName
+ sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit)
+ tdLog.info("consume info sql: %s"%sql)
+ tdSql.query(sql)
+
+ def selectConsumeResult(self,expectRows,cdbName='cdb'):
+ resultList=[]
+ while 1:
+ tdSql.query("select * from %s.consumeresult"%cdbName)
+ #tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3))
+ if tdSql.getRows() == expectRows:
+ break
+ else:
+ time.sleep(5)
+
+ for i in range(expectRows):
+ tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3)))
+ resultList.append(tdSql.getData(i , 3))
+
+ return resultList
+
+ def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0):
+ if valgrind == 1:
+ logFile = cfgPath + '/../log/valgrind-tmq.log'
+ shellCmd = 'nohup valgrind --log-file=' + logFile
+ shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes '
+
+ if (platform.system().lower() == 'windows'):
+ shellCmd = 'mintty -h never -w hide ' + buildPath + '\\build\\bin\\tmq_sim.exe -c ' + cfgPath
+ shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
+ shellCmd += "> nul 2>&1 &"
+ else:
+ shellCmd = 'nohup ' + buildPath + '/build/bin/tmq_sim -c ' + cfgPath
+ shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
+ shellCmd += "> /dev/null 2>&1 &"
+ tdLog.info(shellCmd)
+ os.system(shellCmd)
+
+ def create_database(self,tsql, dbName,dropFlag=1,vgroups=4,replica=1):
+ if dropFlag == 1:
+ tsql.execute("drop database if exists %s"%(dbName))
+
+ tsql.execute("create database if not exists %s vgroups %d replica %d wal_retention_period 3600"%(dbName, vgroups, replica))
+ tdLog.debug("complete to create database %s"%(dbName))
+ return
+
+ def create_stable(self,tsql, dbName,stbName):
+ tsql.execute("create table if not exists %s.%s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%(dbName, stbName))
+ tdLog.debug("complete to create %s.%s" %(dbName, stbName))
+ return
+
+ def create_ctables(self,tsql, dbName,stbName,ctbNum):
+ tsql.execute("use %s" %dbName)
+ pre_create = "create table"
+ sql = pre_create
+ #tdLog.debug("doing create one stable %s and %d child table in %s ..." %(stbname, count ,dbname))
+ for i in range(ctbNum):
+ sql += " %s_%d using %s tags(%d)"%(stbName,i,stbName,i+1)
+ if (i > 0) and (i%100 == 0):
+ tsql.execute(sql)
+ sql = pre_create
+ if sql != pre_create:
+ tsql.execute(sql)
+
+ tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName))
+ return
+
+ def insert_data(self,tsql,dbName,stbName,ctbNum,rowsPerTbl,batchNum,startTs=0):
+ tdLog.debug("start to insert data ............")
+ tsql.execute("use %s" %dbName)
+ pre_insert = "insert into "
+ sql = pre_insert
+
+ if startTs == 0:
+ t = time.time()
+ startTs = int(round(t * 1000))
+
+ #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows))
+ rowsOfSql = 0
+ for i in range(ctbNum):
+ sql += " %s_%d values "%(stbName,i)
+ for j in range(rowsPerTbl):
+ sql += "(%d, %d, 'tmqrow_%d') "%(startTs + j, j, j)
+ rowsOfSql += 1
+ if (j > 0) and ((rowsOfSql == batchNum) or (j == rowsPerTbl - 1)):
+ tsql.execute(sql)
+ rowsOfSql = 0
+ if j < rowsPerTbl - 1:
+ sql = "insert into %s_%d values " %(stbName,i)
+ else:
+ sql = "insert into "
+ #end sql
+ if sql != pre_insert:
+ #print("insert sql:%s"%sql)
+ tsql.execute(sql)
+ tdLog.debug("insert data ............ [OK]")
+ return
+
+ def prepareEnv(self, **parameterDict):
+ # create new connector for my thread
+ tsql=self.newcur(parameterDict['cfg'], 'localhost', 6030)
+
+ if parameterDict["actionType"] == actionType.CREATE_DATABASE:
+ self.create_database(tsql, parameterDict["dbName"])
+ elif parameterDict["actionType"] == actionType.CREATE_STABLE:
+ self.create_stable(tsql, parameterDict["dbName"], parameterDict["stbName"])
+ elif parameterDict["actionType"] == actionType.CREATE_CTABLE:
+ self.create_ctables(tsql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"])
+ elif parameterDict["actionType"] == actionType.INSERT_DATA:
+ self.insert_data(tsql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"], \
+ parameterDict["rowsPerTbl"],parameterDict["batchNum"])
+ else:
+ tdLog.exit("not support's action: ", parameterDict["actionType"])
+
+ return
+
+ def tmqCase1(self, cfgPath, buildPath):
+ tdLog.printNoPrefix("======== test case 1: ")
+
+ self.initConsumerTable()
+
+ # create and start thread
+ parameterDict = {'cfg': '', \
+ 'actionType': 0, \
+ 'dbName': 'db1', \
+ 'dropFlag': 1, \
+ 'vgroups': 4, \
+ 'replica': 1, \
+ 'stbName': 'stb1', \
+ 'ctbNum': 10, \
+ 'rowsPerTbl': 10000, \
+ 'batchNum': 100, \
+ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000
+
+ self.create_database(tdSql, parameterDict["dbName"])
+ self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"])
+
+ tdLog.info("create topics from stb1")
+ topicFromStb1 = 'topic_stb1'
+
+ tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName']))
+ consumerId = 0
+ expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"]
+ topicList = topicFromStb1
+ ifcheckdata = 0
+ ifManualCommit = 0
+ keyList = 'group.id:cgrp1,\
+ enable.auto.commit:true,\
+ auto.commit.interval.ms:2000,\
+ auto.offset.reset:earliest'
+ self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
+
+ tdLog.info("start consume processor")
+ pollDelay = 20
+ showMsg = 1
+ showRow = 1
+ self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
+
+ tdLog.info("start show subscriptions 1")
+ while(1):
+ tdSql.query("show subscriptions")
+ if (tdSql.getRows() == 0):
+ tdLog.info("sleep")
+ time.sleep(1)
+ elif (tdSql.queryResult[0][4] != None):
+ # tdSql.checkData(0, 4, "earliest")
+ tdSql.checkData(0, 5, 0)
+ break
+
+ tdSql.query("show consumers")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 8, "tbname:1,commit:1,interval:2000,reset:earliest")
+
+ time.sleep(2)
+ tdLog.info("start insert data")
+ self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"])
+ self.insert_data(tdSql,\
+ parameterDict["dbName"],\
+ parameterDict["stbName"],\
+ parameterDict["ctbNum"],\
+ parameterDict["rowsPerTbl"],\
+ parameterDict["batchNum"])
+
+ time.sleep(2)
+ tdLog.info("start show subscriptions 2")
+ tdSql.query("show subscriptions")
+ tdSql.checkRows(4)
+ print(tdSql.queryResult)
+ # tdSql.checkData(0, 4, 'offset(log) ver:103')
+ tdSql.checkData(0, 5, 10000)
+ # tdSql.checkData(1, 4, 'offset(log) ver:103')
+ tdSql.checkData(1, 5, 10000)
+ # tdSql.checkData(2, 4, 'offset(log) ver:303')
+ tdSql.checkData(2, 5, 50000)
+ # tdSql.checkData(3, 4, 'offset(log) ver:239')
+ tdSql.checkData(3, 5, 30000)
+
+ tdLog.info("insert process end, and start to check consume result")
+ expectRows = 1
+ resultList = self.selectConsumeResult(expectRows)
+
+ time.sleep(2)
+ tdLog.info("start show subscriptions 3")
+ tdSql.query("show subscriptions")
+ tdSql.checkRows(4)
+ print(tdSql.queryResult)
+ tdSql.checkData(0, 3, None)
+ # tdSql.checkData(0, 4, 'offset(log) ver:103')
+ tdSql.checkData(0, 5, 10000)
+ # tdSql.checkData(1, 4, 'offset(log) ver:103')
+ tdSql.checkData(1, 5, 10000)
+ # tdSql.checkData(2, 4, 'offset(log) ver:303')
+ tdSql.checkData(2, 5, 50000)
+ # tdSql.checkData(3, 4, 'offset(log) ver:239')
+ tdSql.checkData(3, 5, 30000)
+
+ tdSql.query("drop topic %s"%topicFromStb1)
+
+ tdLog.printNoPrefix("======== test case 1 end ...... ")
+
+ def run(self):
+ tdSql.prepare()
+ buildPath = self.getBuildPath()
+ if (buildPath == ""):
+ tdLog.exit("taosd not found!")
+ else:
+ tdLog.info("taosd found in %s" % buildPath)
+ cfgPath = buildPath + "/../sim/psim/cfg"
+ tdLog.info("cfgPath: %s" % cfgPath)
+
+ self.tmqCase1(cfgPath, buildPath)
+ # self.tmqCase2(cfgPath, buildPath)
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success(f"{__file__} successfully executed")
+
+event = threading.Event()
+
+tdCases.addLinux(__file__, TDTestCase())
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt
index 1461a7b373..9e8d91b6b6 100644
--- a/tools/CMakeLists.txt
+++ b/tools/CMakeLists.txt
@@ -121,20 +121,20 @@ ELSE ()
BUILD_COMMAND
COMMAND set CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client
COMMAND set CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib
- COMMAND go build -a -o taosadapter.exe -ldflags "-X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}"
-# COMMAND go build -a -o taosadapter.exe -ldflags "-s -w -X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}"
-# COMMAND go build -a -o taosadapter-debug.exe -ldflags "-X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}"
+ # COMMAND go build -a -o taosadapter.exe -ldflags "-X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}"
+ COMMAND go build -a -o taosadapter.exe -ldflags "-s -w -X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}"
+ COMMAND go build -a -o taosadapter-debug.exe -ldflags "-X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}"
INSTALL_COMMAND
-# COMMAND cmake -E echo "Comparessing taosadapter.exe"
-# COMMAND cmake -E time upx taosadapter.exe
+ COMMAND cmake -E echo "Comparessing taosadapter.exe"
+ COMMAND cmake -E time upx taosadapter.exe
COMMAND cmake -E echo "Copy taosadapter.exe"
COMMAND cmake -E copy taosadapter.exe ${CMAKE_BINARY_DIR}/build/bin/taosadapter.exe
COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/test/cfg/
COMMAND cmake -E echo "Copy taosadapter.toml"
COMMAND cmake -E copy ./example/config/taosadapter.toml ${CMAKE_BINARY_DIR}/test/cfg/
-# COMMAND cmake -E echo "Copy taosadapter-debug.exe"
-# COMMAND cmake -E copy taosadapter-debug.exe ${CMAKE_BINARY_DIR}/build/bin
+ COMMAND cmake -E echo "Copy taosadapter-debug.exe"
+ COMMAND cmake -E copy taosadapter-debug.exe ${CMAKE_BINARY_DIR}/build/bin
)
ELSE (TD_WINDOWS)
MESSAGE("Building taosAdapter on non-Windows")
@@ -149,20 +149,20 @@ ELSE ()
PATCH_COMMAND
COMMAND git clean -f -d
BUILD_COMMAND
- COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -ldflags "-X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}"
-# COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -ldflags "-s -w -X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}"
-# COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -o taosadapter-debug -ldflags "-X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}"
+ # COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -ldflags "-X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}"
+ COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -ldflags "-s -w -X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}"
+ COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -o taosadapter-debug -ldflags "-X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}"
INSTALL_COMMAND
-# COMMAND cmake -E echo "Comparessing taosadapter.exe"
-# COMMAND upx taosadapter || :
+ COMMAND cmake -E echo "Comparessing taosadapter.exe"
+ COMMAND upx taosadapter || :
COMMAND cmake -E echo "Copy taosadapter"
COMMAND cmake -E copy taosadapter ${CMAKE_BINARY_DIR}/build/bin
COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/test/cfg/
COMMAND cmake -E echo "Copy taosadapter.toml"
COMMAND cmake -E copy ./example/config/taosadapter.toml ${CMAKE_BINARY_DIR}/test/cfg/
COMMAND cmake -E copy ./taosadapter.service ${CMAKE_BINARY_DIR}/test/cfg/
-# COMMAND cmake -E echo "Copy taosadapter-debug"
-# COMMAND cmake -E copy taosadapter-debug ${CMAKE_BINARY_DIR}/build/bin
+ COMMAND cmake -E echo "Copy taosadapter-debug"
+ COMMAND cmake -E copy taosadapter-debug ${CMAKE_BINARY_DIR}/build/bin
)
ENDIF (TD_WINDOWS)
ENDIF ()
diff --git a/tools/shell/inc/shellInt.h b/tools/shell/inc/shellInt.h
index 6345647e2f..57415f8335 100644
--- a/tools/shell/inc/shellInt.h
+++ b/tools/shell/inc/shellInt.h
@@ -45,6 +45,8 @@
#define SHELL_MAX_PKG_NUM 1 * 1024 * 1024
#define SHELL_MIN_PKG_NUM 1
#define SHELL_DEF_PKG_NUM 100
+#define SHELL_FLOAT_WIDTH 20
+#define SHELL_DOUBLE_WIDTH 25
typedef struct {
char* hist[SHELL_MAX_HISTORY_SIZE];
diff --git a/tools/shell/src/shellAuto.c b/tools/shell/src/shellAuto.c
index 19a888fe82..41cdb0f928 100644
--- a/tools/shell/src/shellAuto.c
+++ b/tools/shell/src/shellAuto.c
@@ -91,9 +91,14 @@ SWords shellCommands[] = {
{"create stream into as select", 0, 0, NULL}, // 26 append sub sql
{"create topic as select", 0, 0, NULL}, // 27 append sub sql
{"create function as outputtype language ", 0, 0, NULL},
+ {"create or replace as outputtype language ", 0, 0, NULL},
{"create aggregate function as outputtype bufsize language ", 0, 0, NULL},
+ {"create or replace aggregate function as outputtype bufsize language ", 0, 0, NULL},
{"create user pass sysinfo 0;", 0, 0, NULL},
{"create user pass sysinfo 1;", 0, 0, NULL},
+#ifdef TD_ENTERPRISE
+ {"compact database ", 0, 0, NULL},
+#endif
{"describe ", 0, 0, NULL},
{"delete from where ", 0, 0, NULL},
{"drop database ", 0, 0, NULL},
@@ -117,7 +122,11 @@ SWords shellCommands[] = {
{"kill connection ;", 0, 0, NULL},
{"kill query ", 0, 0, NULL},
{"kill transaction ", 0, 0, NULL},
+#ifdef TD_ENTERPRISE
{"merge vgroup ", 0, 0, NULL},
+#endif
+ {"pause stream ;", 0, 0, NULL},
+ {"resume stream ;", 0, 0, NULL},
{"reset query cache;", 0, 0, NULL},
{"restore dnode ;", 0, 0, NULL},
{"restore vnode on dnode ;", 0, 0, NULL},
@@ -173,7 +182,9 @@ SWords shellCommands[] = {
{"show vgroups;", 0, 0, NULL},
{"show consumers;", 0, 0, NULL},
{"show grants;", 0, 0, NULL},
+#ifdef TD_ENTERPRISE
{"split vgroup ", 0, 0, NULL},
+#endif
{"insert into values(", 0, 0, NULL},
{"insert into using tags(", 0, 0, NULL},
{"insert into using values(", 0, 0, NULL},
@@ -432,9 +443,10 @@ void showHelp() {
kill connection ; \n\
kill query ; \n\
kill transaction ;\n\
- ----- M ----- \n\
- merge vgroup ...\n\
+ ----- P ----- \n\
+ pause stream ;\n\
----- R ----- \n\
+ resume stream ;\n\
reset query cache;\n\
restore dnode ;\n\
restore vnode on dnode ;\n\
@@ -489,14 +501,20 @@ void showHelp() {
show vgroups;\n\
show consumers;\n\
show grants;\n\
- split vgroup ...\n\
----- T ----- \n\
trim database ;\n\
----- U ----- \n\
use ;");
- printf("\n\n");
+#ifdef TD_ENTERPRISE
+ printf(
+ "\n\n\
+ ----- special commands on enterpise version ----- \n\
+ compact database ; \n\
+ split vgroup ;");
+#endif
+ printf("\n\n");
// define in getDuration() function
printf(
"\
diff --git a/tools/shell/src/shellEngine.c b/tools/shell/src/shellEngine.c
index 7b30052659..865d4680a3 100644
--- a/tools/shell/src/shellEngine.c
+++ b/tools/shell/src/shellEngine.c
@@ -326,6 +326,7 @@ void shellDumpFieldToFile(TdFilePtr pFile, const char *val, TAOS_FIELD *field, i
char quotationStr[2];
quotationStr[0] = '\"';
quotationStr[1] = 0;
+ int32_t width;
int n;
char buf[TSDB_MAX_BYTES_PER_ROW];
@@ -358,20 +359,27 @@ void shellDumpFieldToFile(TdFilePtr pFile, const char *val, TAOS_FIELD *field, i
taosFprintfFile(pFile, "%" PRIu64, *((uint64_t *)val));
break;
case TSDB_DATA_TYPE_FLOAT:
+ width = SHELL_FLOAT_WIDTH;
if (tsEnableScience) {
- taosFprintfFile(pFile, "%e", GET_FLOAT_VAL(val));
+ taosFprintfFile(pFile, "%*e", width, GET_FLOAT_VAL(val));
} else {
- taosFprintfFile(pFile, "%.5f", GET_FLOAT_VAL(val));
+ n = snprintf(buf, TSDB_MAX_BYTES_PER_ROW, "%*.5f", width, GET_FLOAT_VAL(val));
+ if (n > SHELL_FLOAT_WIDTH) {
+ taosFprintfFile(pFile, "%*e", width, GET_FLOAT_VAL(val));
+ } else {
+ taosFprintfFile(pFile, "%s", buf);
+ }
}
break;
case TSDB_DATA_TYPE_DOUBLE:
+ width = SHELL_DOUBLE_WIDTH;
if (tsEnableScience) {
- snprintf(buf, TSDB_MAX_BYTES_PER_ROW, "%*.9e", 23, GET_DOUBLE_VAL(val));
- taosFprintfFile(pFile, "%s", buf);
+ snprintf(buf, TSDB_MAX_BYTES_PER_ROW, "%.9e", GET_DOUBLE_VAL(val));
+ taosFprintfFile(pFile, "%*s", width, buf);
} else {
- n = snprintf(buf, TSDB_MAX_BYTES_PER_ROW, "%*.9f", length, GET_DOUBLE_VAL(val));
- if (n > TMAX(25, length)) {
- taosFprintfFile(pFile, "%*.15e", length, GET_DOUBLE_VAL(val));
+ n = snprintf(buf, TSDB_MAX_BYTES_PER_ROW, "%*.9f", width, GET_DOUBLE_VAL(val));
+ if (n > SHELL_DOUBLE_WIDTH) {
+ taosFprintfFile(pFile, "%*.15e", width, GET_DOUBLE_VAL(val));
} else {
taosFprintfFile(pFile, "%s", buf);
}
@@ -607,7 +615,7 @@ void shellPrintField(const char *val, TAOS_FIELD *field, int32_t width, int32_t
printf("%*e", width, GET_FLOAT_VAL(val));
} else {
n = snprintf(buf, TSDB_MAX_BYTES_PER_ROW, "%*.5f", width, GET_FLOAT_VAL(val));
- if (n > TMAX(20, width)) {
+ if (n > SHELL_FLOAT_WIDTH) {
printf("%*e", width, GET_FLOAT_VAL(val));
} else {
printf("%s", buf);
@@ -620,7 +628,7 @@ void shellPrintField(const char *val, TAOS_FIELD *field, int32_t width, int32_t
printf("%*s", width, buf);
} else {
n = snprintf(buf, TSDB_MAX_BYTES_PER_ROW, "%*.9f", width, GET_DOUBLE_VAL(val));
- if (n > TMAX(25, width)) {
+ if (n > SHELL_DOUBLE_WIDTH) {
printf("%*.15e", width, GET_DOUBLE_VAL(val));
} else {
printf("%s", buf);
@@ -757,10 +765,10 @@ int32_t shellCalcColWidth(TAOS_FIELD *field, int32_t precision) {
return TMAX(21, width); // '-9223372036854775807'
case TSDB_DATA_TYPE_FLOAT:
- return TMAX(20, width);
+ return TMAX(SHELL_FLOAT_WIDTH, width);
case TSDB_DATA_TYPE_DOUBLE:
- return TMAX(25, width);
+ return TMAX(SHELL_DOUBLE_WIDTH, width);
case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_GEOMETRY:
diff --git a/utils/test/c/CMakeLists.txt b/utils/test/c/CMakeLists.txt
index 87b0d11d1c..71dfd710a5 100644
--- a/utils/test/c/CMakeLists.txt
+++ b/utils/test/c/CMakeLists.txt
@@ -9,35 +9,35 @@ add_executable(get_db_name_test get_db_name_test.c)
add_executable(tmq_offset tmqOffset.c)
target_link_libraries(
tmq_offset
- PUBLIC taos_static
+ PUBLIC taos
PUBLIC util
PUBLIC common
PUBLIC os
)
target_link_libraries(
create_table
- PUBLIC taos_static
+ PUBLIC taos
PUBLIC util
PUBLIC common
PUBLIC os
)
target_link_libraries(
tmq_demo
- PUBLIC taos_static
+ PUBLIC taos
PUBLIC util
PUBLIC common
PUBLIC os
)
target_link_libraries(
tmq_sim
- PUBLIC taos_static
+ PUBLIC taos
PUBLIC util
PUBLIC common
PUBLIC os
)
target_link_libraries(
tmq_taosx_ci
- PUBLIC taos_static
+ PUBLIC taos
PUBLIC util
PUBLIC common
PUBLIC os
@@ -45,7 +45,7 @@ target_link_libraries(
target_link_libraries(
write_raw_block_test
- PUBLIC taos_static
+ PUBLIC taos
PUBLIC util
PUBLIC common
PUBLIC os
@@ -53,7 +53,7 @@ target_link_libraries(
target_link_libraries(
sml_test
- PUBLIC taos_static
+ PUBLIC taos
PUBLIC util
PUBLIC common
PUBLIC os
@@ -61,7 +61,7 @@ target_link_libraries(
target_link_libraries(
get_db_name_test
- PUBLIC taos_static
+ PUBLIC taos
PUBLIC util
PUBLIC common
PUBLIC os
diff --git a/utils/tsim/CMakeLists.txt b/utils/tsim/CMakeLists.txt
index c2cf7ac3c5..81737809d9 100644
--- a/utils/tsim/CMakeLists.txt
+++ b/utils/tsim/CMakeLists.txt
@@ -2,7 +2,7 @@ aux_source_directory(src TSIM_SRC)
add_executable(tsim ${TSIM_SRC})
target_link_libraries(
tsim
- PUBLIC taos_static
+ PUBLIC taos
PUBLIC util
PUBLIC common
PUBLIC os