fix:conflict
This commit is contained in:
commit
e019ed2cc3
|
@ -52,7 +52,7 @@ TDengine 还提供一组辅助工具软件 taosTools,目前它包含 taosBench
|
|||
### Ubuntu 18.04 及以上版本 & Debian:
|
||||
|
||||
```bash
|
||||
sudo apt-get install -y gcc cmake build-essential git libssl-dev libgflags2.2 libgflags-dev libgeos-dev
|
||||
sudo apt-get install -y gcc cmake build-essential git libssl-dev libgflags2.2 libgflags-dev
|
||||
```
|
||||
|
||||
#### 为 taos-tools 安装编译需要的软件
|
||||
|
@ -68,14 +68,14 @@ sudo apt install build-essential libjansson-dev libsnappy-dev liblzma-dev libz-d
|
|||
```bash
|
||||
sudo yum install epel-release
|
||||
sudo yum update
|
||||
sudo yum install -y gcc gcc-c++ make cmake3 git openssl-devel geos geos-devel
|
||||
sudo yum install -y gcc gcc-c++ make cmake3 git openssl-devel
|
||||
sudo ln -sf /usr/bin/cmake3 /usr/bin/cmake
|
||||
```
|
||||
|
||||
### CentOS 8/Fedora/Rocky Linux
|
||||
|
||||
```bash
|
||||
sudo dnf install -y gcc gcc-c++ make cmake epel-release git openssl-devel geos geos-devel
|
||||
sudo dnf install -y gcc gcc-c++ make cmake epel-release git openssl-devel
|
||||
```
|
||||
|
||||
#### 在 CentOS 上构建 taosTools 安装依赖软件
|
||||
|
@ -117,7 +117,7 @@ scl enable devtoolset-9 -- bash
|
|||
### macOS
|
||||
|
||||
```
|
||||
brew install argp-standalone pkgconfig geos
|
||||
brew install argp-standalone pkgconfig
|
||||
```
|
||||
|
||||
### 设置 golang 开发环境
|
||||
|
|
|
@ -60,7 +60,7 @@ To build TDengine, use [CMake](https://cmake.org/) 3.0.2 or higher versions in t
|
|||
### Ubuntu 18.04 and above or Debian
|
||||
|
||||
```bash
|
||||
sudo apt-get install -y gcc cmake build-essential git libssl-dev libgflags2.2 libgflags-dev libgeos-dev
|
||||
sudo apt-get install -y gcc cmake build-essential git libssl-dev libgflags2.2 libgflags-dev
|
||||
```
|
||||
|
||||
#### Install build dependencies for taosTools
|
||||
|
@ -76,14 +76,14 @@ sudo apt install build-essential libjansson-dev libsnappy-dev liblzma-dev libz-d
|
|||
```bash
|
||||
sudo yum install epel-release
|
||||
sudo yum update
|
||||
sudo yum install -y gcc gcc-c++ make cmake3 git openssl-devel geos geos-devel
|
||||
sudo yum install -y gcc gcc-c++ make cmake3 git openssl-devel
|
||||
sudo ln -sf /usr/bin/cmake3 /usr/bin/cmake
|
||||
```
|
||||
|
||||
### CentOS 8/Fedora/Rocky Linux
|
||||
|
||||
```bash
|
||||
sudo dnf install -y gcc gcc-c++ make cmake epel-release git openssl-devel geos geos-devel
|
||||
sudo dnf install -y gcc gcc-c++ make cmake epel-release git openssl-devel
|
||||
```
|
||||
|
||||
#### Install build dependencies for taosTools on CentOS
|
||||
|
@ -124,7 +124,7 @@ scl enable devtoolset-9 -- bash
|
|||
### macOS
|
||||
|
||||
```
|
||||
brew install argp-standalone pkgconfig geos
|
||||
brew install argp-standalone pkgconfig
|
||||
```
|
||||
|
||||
### Setup golang environment
|
||||
|
|
|
@ -115,18 +115,6 @@ ELSE ()
|
|||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${GCC_COVERAGE_COMPILE_FLAGS} ${GCC_COVERAGE_LINK_FLAGS}")
|
||||
ENDIF ()
|
||||
|
||||
IF (${BUILD_SANITIZER})
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0")
|
||||
MESSAGE(STATUS "Compile with Address Sanitizer!")
|
||||
ELSEIF (${BUILD_RELEASE})
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -O3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-reserved-user-defined-literal -Wno-literal-suffix -Werror=return-type -fPIC -O3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
||||
ELSE ()
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -g3 -gdwarf-2 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-reserved-user-defined-literal -g3 -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
||||
ENDIF ()
|
||||
|
||||
# disable all assert
|
||||
IF ((${DISABLE_ASSERT} MATCHES "true") OR (${DISABLE_ASSERTS} MATCHES "true"))
|
||||
ADD_DEFINITIONS(-DDISABLE_ASSERT)
|
||||
|
@ -168,4 +156,20 @@ ELSE ()
|
|||
MESSAGE(STATUS "SIMD instructions (FMA/AVX/AVX2) is ACTIVATED")
|
||||
ENDIF()
|
||||
|
||||
# build mode
|
||||
SET(CMAKE_C_FLAGS_REL "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -O3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
||||
SET(CMAKE_CXX_FLAGS_REL "${CMAKE_CXX_FLAGS} -Werror -Wno-reserved-user-defined-literal -Wno-literal-suffix -Werror=return-type -fPIC -O3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
||||
|
||||
IF (${BUILD_SANITIZER})
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0")
|
||||
MESSAGE(STATUS "Compile with Address Sanitizer!")
|
||||
ELSEIF (${BUILD_RELEASE})
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS_REL}")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS_REL}")
|
||||
ELSE ()
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -g3 -gdwarf-2 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-reserved-user-defined-literal -g3 -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
||||
ENDIF ()
|
||||
|
||||
ENDIF ()
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
IF (DEFINED VERNUMBER)
|
||||
SET(TD_VER_NUMBER ${VERNUMBER})
|
||||
ELSE ()
|
||||
SET(TD_VER_NUMBER "3.0.4.3")
|
||||
SET(TD_VER_NUMBER "3.0.5.0")
|
||||
ENDIF ()
|
||||
|
||||
IF (DEFINED VERCOMPATIBLE)
|
||||
|
|
|
@ -1,11 +1,16 @@
|
|||
|
||||
# rocksdb
|
||||
#set(librocksdb_src ${TD_CONTRIB_DIR}/rocksdb)
|
||||
#set(librocksdb_binary ${TD_CONTRIB_DIR}/rocksdb/build)
|
||||
|
||||
ExternalProject_Add(rocksdb
|
||||
GIT_REPOSITORY https://github.com/facebook/rocksdb.git
|
||||
GIT_TAG v8.1.1
|
||||
SOURCE_DIR "${TD_CONTRIB_DIR}/rocksdb"
|
||||
CONFIGURE_COMMAND ""
|
||||
BUILD_COMMAND ""
|
||||
INSTALL_COMMAND ""
|
||||
TEST_COMMAND ""
|
||||
)
|
||||
URL https://github.com/facebook/rocksdb/archive/refs/tags/v8.1.1.tar.gz
|
||||
URL_HASH MD5=3b4c97ee45df9c8a5517308d31ab008b
|
||||
DOWNLOAD_NO_PROGRESS 1
|
||||
DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download"
|
||||
SOURCE_DIR "${TD_CONTRIB_DIR}/rocksdb"
|
||||
CONFIGURE_COMMAND ""
|
||||
BUILD_COMMAND ""
|
||||
INSTALL_COMMAND ""
|
||||
TEST_COMMAND ""
|
||||
)
|
||||
|
|
|
@ -227,9 +227,22 @@ endif(${BUILD_WITH_LEVELDB})
|
|||
|
||||
# rocksdb
|
||||
# To support rocksdb build on ubuntu: sudo apt-get install libgflags-dev
|
||||
if (${BUILD_WITH_UV})
|
||||
if(${TD_LINUX})
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS_REL}")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS_REL}")
|
||||
IF ("${CMAKE_BUILD_TYPE}" STREQUAL "")
|
||||
SET(CMAKE_BUILD_TYPE Release)
|
||||
endif()
|
||||
endif(${TD_LINUX})
|
||||
endif (${BUILD_WITH_UV})
|
||||
|
||||
if(${BUILD_WITH_ROCKSDB})
|
||||
if(${TD_LINUX})
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized -Wno-error=unused-but-set-variable -Wno-error=unused-variable -Wno-error=unused-function -Wno-errno=unused-private-field -Wno-error=unused-result")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS_REL} -Wno-error=maybe-uninitialized -Wno-error=unused-but-set-variable -Wno-error=unused-variable -Wno-error=unused-function -Wno-errno=unused-private-field -Wno-error=unused-result")
|
||||
IF ("${CMAKE_BUILD_TYPE}" STREQUAL "")
|
||||
SET(CMAKE_BUILD_TYPE Release)
|
||||
endif()
|
||||
endif(${TD_LINUX})
|
||||
MESSAGE(STATUS "CXXXX STATUS CONFIG: " ${CMAKE_CXX_FLAGS})
|
||||
|
||||
|
@ -253,7 +266,7 @@ if(${BUILD_WITH_ROCKSDB})
|
|||
endif(${TD_DARWIN})
|
||||
|
||||
if(${TD_WINDOWS})
|
||||
option(WITH_JNI "" OFF)
|
||||
option(WITH_JNI "" OFF)
|
||||
endif(${TD_WINDOWS})
|
||||
|
||||
if(${TD_WINDOWS})
|
||||
|
@ -265,7 +278,7 @@ if(${BUILD_WITH_ROCKSDB})
|
|||
option(WITH_FALLOCATE "" OFF)
|
||||
option(WITH_JEMALLOC "" OFF)
|
||||
option(WITH_GFLAGS "" OFF)
|
||||
option(PORTABLE "" OFF)
|
||||
option(PORTABLE "" ON)
|
||||
option(WITH_LIBURING "" OFF)
|
||||
option(FAIL_ON_WARNINGS OFF)
|
||||
|
||||
|
@ -274,7 +287,7 @@ if(${BUILD_WITH_ROCKSDB})
|
|||
option(WITH_TOOLS "" OFF)
|
||||
option(WITH_LIBURING "" OFF)
|
||||
IF (TD_LINUX)
|
||||
option(ROCKSDB_BUILD_SHARED "Build shared versions of the RocksDB libraries" ON)
|
||||
option(ROCKSDB_BUILD_SHARED "Build shared versions of the RocksDB libraries" OFF)
|
||||
ELSE()
|
||||
option(ROCKSDB_BUILD_SHARED "Build shared versions of the RocksDB libraries" OFF)
|
||||
ENDIF()
|
||||
|
@ -485,6 +498,13 @@ endif(${BUILD_ADDR2LINE})
|
|||
|
||||
# geos
|
||||
if(${BUILD_GEOS})
|
||||
if(${TD_LINUX})
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS_REL}")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS_REL}")
|
||||
IF ("${CMAKE_BUILD_TYPE}" STREQUAL "")
|
||||
SET(CMAKE_BUILD_TYPE Release)
|
||||
endif()
|
||||
endif(${TD_LINUX})
|
||||
option(BUILD_SHARED_LIBS "Build GEOS with shared libraries" OFF)
|
||||
add_subdirectory(geos EXCLUDE_FROM_ALL)
|
||||
target_include_directories(
|
||||
|
|
|
@ -45,7 +45,7 @@ In TDengine, the data types below can be used when specifying a column or tag.
|
|||
:::note
|
||||
|
||||
- Only ASCII visible characters are suggested to be used in a column or tag of BINARY type. Multi-byte characters must be stored in NCHAR type.
|
||||
- The length of BINARY can be up to 16,374 bytes. The string value must be quoted with single quotes. You must specify a length in bytes for a BINARY value, for example binary(20) for up to twenty single-byte characters. If the data exceeds the specified length, an error will occur. The literal single quote inside the string must be preceded with back slash like `\'`
|
||||
- The length of BINARY can be up to 16,374(data column is 65,517 and tag column is 16,382 since version 3.0.5.0) bytes. The string value must be quoted with single quotes. You must specify a length in bytes for a BINARY value, for example binary(20) for up to twenty single-byte characters. If the data exceeds the specified length, an error will occur. The literal single quote inside the string must be preceded with back slash like `\'`
|
||||
- Numeric values in SQL statements will be determined as integer or float type according to whether there is decimal point or whether scientific notation is used, so attention must be paid to avoid overflow. For example, 9999999999999999999 will be considered as overflow because it exceeds the upper limit of long integer, but 9999999999999999999.0 will be considered as a legal float number.
|
||||
|
||||
:::
|
||||
|
|
|
@ -45,7 +45,7 @@ table_option: {
|
|||
|
||||
1. The first column of a table MUST be of type TIMESTAMP. It is automatically set as the primary key.
|
||||
2. The maximum length of the table name is 192 bytes.
|
||||
3. The maximum length of each row is 48k bytes, please note that the extra 2 bytes used by each BINARY/NCHAR column are also counted.
|
||||
3. The maximum length of each row is 48k(64k since version 3.0.5.0) bytes, please note that the extra 2 bytes used by each BINARY/NCHAR column are also counted.
|
||||
4. The name of the subtable can only consist of characters from the English alphabet, digits and underscore. Table names can't start with a digit. Table names are case insensitive.
|
||||
5. The maximum length in bytes must be specified when using BINARY or NCHAR types.
|
||||
6. Escape character "\`" can be used to avoid the conflict between table names and reserved keywords, above rules will be bypassed when using escape character on table names, but the upper limit for the name length is still valid. The table names specified using escape character are case sensitive.
|
||||
|
|
|
@ -26,7 +26,7 @@ The following characters cannot occur in a password: single quotation marks ('),
|
|||
|
||||
- Maximum length of database name is 64 bytes
|
||||
- Maximum length of table name is 192 bytes, excluding the database name prefix and the separator.
|
||||
- Maximum length of each data row is 48K bytes. Note that the upper limit includes the extra 2 bytes consumed by each column of BINARY/NCHAR type.
|
||||
- Maximum length of each data row is 48K(64K since version 3.0.5.0) bytes. Note that the upper limit includes the extra 2 bytes consumed by each column of BINARY/NCHAR type.
|
||||
- The maximum length of a column name is 64 bytes.
|
||||
- Maximum number of columns is 4096. There must be at least 2 columns, and the first column must be timestamp.
|
||||
- The maximum length of a tag name is 64 bytes
|
||||
|
|
|
@ -32,25 +32,22 @@ TDengine's JDBC driver implementation is as consistent as possible with the rela
|
|||
Native connections are supported on the same platforms as the TDengine client driver.
|
||||
REST connection supports all platforms that can run Java.
|
||||
|
||||
## Version support
|
||||
|
||||
Please refer to [version support list](/reference/connector#version-support)
|
||||
|
||||
## Recent update logs
|
||||
|
||||
| taos-jdbcdriver version | major changes |
|
||||
| :---------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------: |
|
||||
| 3.2.1 | JDBC REST connection supports schemaless/prepareStatement over WebSocket |
|
||||
| 3.2.0 | This version has been deprecated |
|
||||
| 3.1.0 | JDBC REST connection supports subscription over WebSocket |
|
||||
| 3.0.1 - 3.0.4 | fix the resultSet data is parsed incorrectly sometimes. 3.0.1 is compiled on JDK 11, you are advised to use other version in the JDK 8 environment |
|
||||
| 3.0.0 | Support for TDengine 3.0 |
|
||||
| 2.0.42 | fix wasNull interface return value in WebSocket connection |
|
||||
| 2.0.41 | fix decode method of username and password in REST connection |
|
||||
| 2.0.39 - 2.0.40 | Add REST connection/request timeout parameters |
|
||||
| 2.0.38 | JDBC REST connections add bulk pull function |
|
||||
| 2.0.37 | Support json tags |
|
||||
| 2.0.36 | Support schemaless writing |
|
||||
| taos-jdbcdriver version | major changes | TDengine version |
|
||||
| :---------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------: | :--------------: |
|
||||
| 3.2.1 | subscription add seek function | 3.0.5.0 or later |
|
||||
| 3.2.1 | JDBC REST connection supports schemaless/prepareStatement over WebSocket | 3.0.3.0 or later |
|
||||
| 3.2.0 | This version has been deprecated | - |
|
||||
| 3.1.0 | JDBC REST connection supports subscription over WebSocket | - |
|
||||
| 3.0.1 - 3.0.4 | fix the resultSet data is parsed incorrectly sometimes. 3.0.1 is compiled on JDK 11, you are advised to use other version in the JDK 8 environment | - |
|
||||
| 3.0.0 | Support for TDengine 3.0 | 3.0.0.0 or later |
|
||||
| 2.0.42 | fix wasNull interface return value in WebSocket connection | - |
|
||||
| 2.0.41 | fix decode method of username and password in REST connection | - |
|
||||
| 2.0.39 - 2.0.40 | Add REST connection/request timeout parameters | - |
|
||||
| 2.0.38 | JDBC REST connections add bulk pull function | - |
|
||||
| 2.0.37 | Support json tags | - |
|
||||
| 2.0.36 | Support schemaless writing | - |
|
||||
|
||||
**Note**: adding `batchfetch` to the REST connection and setting it to true will enable the WebSocket connection.
|
||||
|
||||
|
@ -102,6 +99,8 @@ For specific error codes, please refer to.
|
|||
| 0x2319 | user is required | The user name information is missing when creating the connection |
|
||||
| 0x231a | password is required | Password information is missing when creating a connection |
|
||||
| 0x231c | httpEntity is null, sql: | Execution exception occurred during the REST connection |
|
||||
| 0x231d | can't create connection with server within | Increase the connection time by adding the httpConnectTimeout parameter, or check the connection to the taos adapter. |
|
||||
| 0x231e | failed to complete the task within the specified time | Increase the execution time by adding the messageWaitTimeout parameter, or check the connection to the taos adapter. |
|
||||
| 0x2350 | unknown error | Unknown exception, please return to the developer on github. |
|
||||
| 0x2352 | Unsupported encoding | An unsupported character encoding set is specified under the native Connection. |
|
||||
| 0x2353 | internal error of database, please see taoslog for more details | An error occurs when the prepare statement is executed on the native connection. Check the taos log to locate the fault. |
|
||||
|
@ -117,8 +116,8 @@ For specific error codes, please refer to.
|
|||
| 0x2376 | failed to set consumer topic, topic name is empty | During data subscription creation, the subscription topic name is empty. Check that the specified topic name is correct. |
|
||||
| 0x2377 | consumer reference has been destroyed | The subscription data transfer channel has been closed. Please check the connection to TDengine. |
|
||||
| 0x2378 | consumer create error | Failed to create a data subscription. Check the taos log according to the error message to locate the fault. |
|
||||
| - | can't create connection with server within | Increase the connection time by adding the httpConnectTimeout parameter, or check the connection to the taos adapter. |
|
||||
| - | failed to complete the task within the specified time | Increase the execution time by adding the messageWaitTimeout parameter, or check the connection to the taos adapter. |
|
||||
| 0x2379 | seek offset must not be a negative number | The seek interface parameter cannot be negative. Use the correct parameter |
|
||||
| 0x237a | vGroup not found in result set | subscription is not bound to the VGroup due to the rebalance mechanism |
|
||||
|
||||
- [TDengine Java Connector](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java)
|
||||
<!-- - [TDengine_ERROR_CODE](../error-code) -->
|
||||
|
@ -169,7 +168,7 @@ Add following dependency in the `pom.xml` file of your Maven project:
|
|||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>3.2.1</version>
|
||||
<version>3.2.2</version>
|
||||
</dependency>
|
||||
```
|
||||
|
||||
|
@ -913,14 +912,15 @@ public class SchemalessWsTest {
|
|||
|
||||
public static void main(String[] args) throws SQLException {
|
||||
final String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata&batchfetch=true";
|
||||
Connection connection = DriverManager.getConnection(url);
|
||||
init(connection);
|
||||
try(Connection connection = DriverManager.getConnection(url)){
|
||||
init(connection);
|
||||
|
||||
SchemalessWriter writer = new SchemalessWriter(connection, "test_ws_schemaless");
|
||||
writer.write(lineDemo, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO_SECONDS);
|
||||
writer.write(telnetDemo, SchemalessProtocolType.TELNET, SchemalessTimestampType.MILLI_SECONDS);
|
||||
writer.write(jsonDemo, SchemalessProtocolType.JSON, SchemalessTimestampType.SECONDS);
|
||||
System.exit(0);
|
||||
try(SchemalessWriter writer = new SchemalessWriter(connection, "test_ws_schemaless")){
|
||||
writer.write(lineDemo, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO_SECONDS);
|
||||
writer.write(telnetDemo, SchemalessProtocolType.TELNET, SchemalessTimestampType.MILLI_SECONDS);
|
||||
writer.write(jsonDemo, SchemalessProtocolType.JSON, SchemalessTimestampType.SECONDS);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static void init(Connection connection) throws SQLException {
|
||||
|
@ -991,6 +991,17 @@ while(true) {
|
|||
|
||||
`poll` obtains one message each time it is run.
|
||||
|
||||
#### Assignment subscription Offset
|
||||
|
||||
```
|
||||
long position(TopicPartition partition) throws SQLException;
|
||||
Map<TopicPartition, Long> position(String topic) throws SQLException;
|
||||
Map<TopicPartition, Long> beginningOffsets(String topic) throws SQLException;
|
||||
Map<TopicPartition, Long> endOffsets(String topic) throws SQLException;
|
||||
|
||||
void seek(TopicPartition partition, long offset) throws SQLException;
|
||||
```
|
||||
|
||||
#### Close subscriptions
|
||||
|
||||
```java
|
||||
|
@ -1256,6 +1267,7 @@ The source code of the sample application is under `TDengine/examples/JDBC`:
|
|||
- connectionPools: using taos-jdbcdriver in connection pools such as HikariCP, Druid, dbcp, c3p0, etc.
|
||||
- SpringJdbcTemplate: using taos-jdbcdriver in Spring JdbcTemplate.
|
||||
- mybatisplus-demo: using taos-jdbcdriver in Springboot + Mybatis.
|
||||
- consumer-demo: consumer TDengine data example, the consumption rate can be controlled by parameters.
|
||||
|
||||
[JDBC example](https://github.com/taosdata/TDengine/tree/3.0/examples/JDBC)
|
||||
|
||||
|
|
|
@ -29,7 +29,7 @@ REST connections are supported on all platforms that can run Go.
|
|||
|
||||
## Version support
|
||||
|
||||
Please refer to [version support list](/reference/connector#version-support)
|
||||
Please refer to [version support list](https://github.com/taosdata/driver-go#remind)
|
||||
|
||||
## Supported features
|
||||
|
||||
|
@ -379,6 +379,15 @@ Note: `tmq.TopicPartition` is reserved for compatibility purpose
|
|||
|
||||
Commit information.
|
||||
|
||||
* `func (c *Consumer) Assignment() (partitions []tmq.TopicPartition, err error)`
|
||||
|
||||
Get Assignment(TDengine >= 3.0.5.0 and driver-go >= v3.5.0 are required).
|
||||
|
||||
* `func (c *Consumer) Seek(partition tmq.TopicPartition, ignoredTimeoutMs int) error`
|
||||
Note: `ignoredTimeoutMs` is reserved for compatibility purpose
|
||||
|
||||
Seek offset(TDengine >= 3.0.5.0 and driver-go >= v3.5.0 are required).
|
||||
|
||||
* `func (c *Consumer) Unsubscribe() error`
|
||||
|
||||
Unsubscribe.
|
||||
|
@ -468,6 +477,15 @@ Note: `tmq.TopicPartition` is reserved for compatibility purpose
|
|||
|
||||
Commit information.
|
||||
|
||||
* `func (c *Consumer) Assignment() (partitions []tmq.TopicPartition, err error)`
|
||||
|
||||
Get Assignment(TDengine >= 3.0.5.0 and driver-go >= v3.5.0 are required).
|
||||
|
||||
* `func (c *Consumer) Seek(partition tmq.TopicPartition, ignoredTimeoutMs int) error`
|
||||
Note: `ignoredTimeoutMs` is reserved for compatibility purpose
|
||||
|
||||
Seek offset(TDengine >= 3.0.5.0 and driver-go >= v3.5.0 are required).
|
||||
|
||||
* `func (c *Consumer) Unsubscribe() error`
|
||||
|
||||
Unsubscribe.
|
||||
|
@ -476,7 +494,7 @@ Unsubscribe.
|
|||
|
||||
Close consumer.
|
||||
|
||||
For a complete example see [GitHub sample file](https://github.com/taosdata/driver-go/blob/3.0/examples/tmqoverws/main.go)
|
||||
For a complete example see [GitHub sample file](https://github.com/taosdata/driver-go/blob/main/examples/tmqoverws/main.go)
|
||||
|
||||
### parameter binding via WebSocket
|
||||
|
||||
|
@ -524,7 +542,7 @@ For a complete example see [GitHub sample file](https://github.com/taosdata/driv
|
|||
|
||||
Closes the parameter binding.
|
||||
|
||||
For a complete example see [GitHub sample file](https://github.com/taosdata/driver-go/blob/3.0/examples/stmtoverws/main.go)
|
||||
For a complete example see [GitHub sample file](https://github.com/taosdata/driver-go/blob/main/examples/stmtoverws/main.go)
|
||||
|
||||
## API Reference
|
||||
|
||||
|
|
|
@ -27,9 +27,14 @@ The source code for the Rust connectors is located on [GitHub](https://github.co
|
|||
Native connections are supported on the same platforms as the TDengine client driver.
|
||||
Websocket connections are supported on all platforms that can run Go.
|
||||
|
||||
## Version support
|
||||
## Version history
|
||||
|
||||
Please refer to [version support list](/reference/connector#version-support)
|
||||
| connector-rust version | TDengine version | major features |
|
||||
| :----------------: | :--------------: | :--------------------------------------------------: |
|
||||
| v0.8.10 | 3.0.5.0 or later | TMQ: Get consuming progress and seek offset to consume. |
|
||||
| v0.8.0 | 3.0.4.0 | Support schemaless insert. |
|
||||
| v0.7.6 | 3.0.3.0 | Support req_id in query. |
|
||||
| v0.6.0 | 3.0.0.0 | Base features. |
|
||||
|
||||
The Rust Connector is still under rapid development and is not guaranteed to be backward compatible before 1.0. We recommend using TDengine version 3.0 or higher to avoid known issues.
|
||||
|
||||
|
@ -499,6 +504,22 @@ The TMQ is of [futures::Stream](https://docs.rs/futures/latest/futures/stream/in
|
|||
}
|
||||
```
|
||||
|
||||
Get assignments:
|
||||
|
||||
Version requirements connector-rust >= v0.8.8, TDengine >= 3.0.5.0
|
||||
|
||||
```rust
|
||||
let assignments = consumer.assignments().await.unwrap();
|
||||
```
|
||||
|
||||
Seek offset:
|
||||
|
||||
Version requirements connector-rust >= v0.8.8, TDengine >= 3.0.5.0
|
||||
|
||||
```rust
|
||||
consumer.offset_seek(topic, vgroup_id, offset).await;
|
||||
```
|
||||
|
||||
Unsubscribe:
|
||||
|
||||
```rust
|
||||
|
@ -513,7 +534,7 @@ The following parameters can be configured for the TMQ DSN. Only `group.id` is m
|
|||
- `enable.auto.commit`: Automatically commits. This can be enabled when data consistency is not essential.
|
||||
- `auto.commit.interval.ms`: Interval for automatic commits.
|
||||
|
||||
For more information, see [GitHub sample file](https://github.com/taosdata/taos-connector-rust/blob/main/examples/subscribe.rs).
|
||||
For more information, see [GitHub sample file](https://github.com/taosdata/TDengine/blob/3.0/docs/examples/rust/nativeexample/examples/subscribe_demo.rs).
|
||||
|
||||
For information about other structure APIs, see the [Rust documentation](https://docs.rs/taos).
|
||||
|
||||
|
|
|
@ -79,8 +79,6 @@ Usage: taosdump [OPTION...] dbname [tbname ...]
|
|||
-e, --escape-character Use escaped character for database name
|
||||
-N, --without-property Dump database without its properties.
|
||||
-s, --schemaonly Only dump table schemas.
|
||||
-y, --answer-yes Input yes for prompt. It will skip data file
|
||||
checking!
|
||||
-d, --avro-codec=snappy Choose an avro codec among null, deflate, snappy,
|
||||
and lzma.
|
||||
-S, --start-time=START_TIME Start time to dump. Either epoch or
|
||||
|
|
|
@ -5,7 +5,7 @@ description: This document describes the configuration parameters for the TDengi
|
|||
|
||||
## Configuration File on Server Side
|
||||
|
||||
On the server side, the actual service of TDengine is provided by an executable `taosd` whose parameters can be configured in file `taos.cfg` to meet the requirements of different use cases. The default location of `taos.cfg` is `/etc/taos`, but can be changed by using `-c` parameter on the CLI of `taosd`. For example, the configuration file can be put under `/home/user` and used like below
|
||||
On the server side, the actual service of TDengine is provided by an executable `taosd` whose parameters can be configured in file `taos.cfg` to meet the requirements of different use cases. The default location of `taos.cfg` is `/etc/taos` on Linux system, it's located under `C:\TDengine` on Windows system. The location of configuration file can be specified by using `-c` parameter on the CLI of `taosd`. For example, on Linux system the configuration file can be put under `/home/user` and used like below
|
||||
|
||||
```
|
||||
taosd -c /home/user
|
||||
|
@ -365,6 +365,16 @@ The charset that takes effect is UTF-8.
|
|||
| Unit | GB |
|
||||
| Default Value | 2.0 |
|
||||
|
||||
### metaCacheMaxSize
|
||||
|
||||
| Attribute | Description |
|
||||
| ------------- | ------------------------------------------------------------------------------------------------- |
|
||||
| Applicable | Client Only |
|
||||
| Meaning | Maximum meta cache size in single client process |
|
||||
| Unit | MB |
|
||||
| Default Value | -1 (No limitation) |
|
||||
|
||||
|
||||
## Cluster Parameters
|
||||
|
||||
### supportVnodes
|
||||
|
@ -433,6 +443,26 @@ The charset that takes effect is UTF-8.
|
|||
| Default Value | 0 |
|
||||
| Note | When it's bigger than 0, the log file would be renamed to "taosdlog.xxx" in which "xxx" is the timestamp when the file is changed last time |
|
||||
|
||||
### slowLogThreshold
|
||||
|
||||
| Attribute | Description |
|
||||
| ------------- | -------------------------------------------------------------------------------------------------------- |
|
||||
| Applicable | Client only |
|
||||
| Meaning | When an operation execution time exceeds this threshold, the operation will be logged in slow log file |
|
||||
| Unit | second |
|
||||
| Default Value | 3 |
|
||||
| Note | All slow operations will be logged in file "taosSlowLog" in the log directory |
|
||||
|
||||
### slowLogScope
|
||||
|
||||
| Attribute | Description |
|
||||
| --------------- | ----------------------------------------------------------------------- |
|
||||
| Applicable | Client only |
|
||||
| Meaning | Slow log type to be logged |
|
||||
| Optional Values | ALL, QUERY, INSERT, OTHERS, NONE |
|
||||
| Default Value | ALL |
|
||||
| Note | All slow operations will be logged by default, one option could be set |
|
||||
|
||||
### debugFlag
|
||||
|
||||
| Attribute | Description |
|
||||
|
|
|
@ -90,7 +90,7 @@ You can configure smlChildTableName in taos.cfg to specify table names, for exam
|
|||
Note: TDengine 3.0.3.0 and later automatically detect whether order is consistent. This parameter is no longer used.
|
||||
|
||||
:::tip
|
||||
All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed 48 KB and the total length of a tag value cannot exceed 16 KB. See [TDengine SQL Boundary Limits](/taos-sql/limit) for specific constraints in this area.
|
||||
All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed 48 KB(64 KB since version 3.0.5.0) and the total length of a tag value cannot exceed 16 KB. See [TDengine SQL Boundary Limits](/taos-sql/limit) for specific constraints in this area.
|
||||
:::
|
||||
|
||||
## Time resolution recognition
|
||||
|
|
|
@ -16,165 +16,79 @@ TDengine Source Connector is used to read data from TDengine in real-time and se
|
|||
|
||||

|
||||
|
||||
## What is Confluent?
|
||||
|
||||
[Confluent](https://www.confluent.io/) adds many extensions to Kafka. include:
|
||||
|
||||
1. Schema Registry
|
||||
2. REST Proxy
|
||||
3. Non-Java Clients
|
||||
4. Many packaged Kafka Connect plugins
|
||||
5. GUI for managing and monitoring Kafka - Confluent Control Center
|
||||
|
||||
Some of these extensions are available in the community version of Confluent. Some are only available in the enterprise version.
|
||||

|
||||
|
||||
Confluent Enterprise Edition provides the `confluent` command-line tool to manage various components.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
1. Linux operating system
|
||||
2. Java 8 and Maven installed
|
||||
3. Git is installed
|
||||
3. Git/curl/vi is installed
|
||||
4. TDengine is installed and started. If not, please refer to [Installation and Uninstallation](/operation/pkg-install)
|
||||
|
||||
## Install Confluent
|
||||
|
||||
Confluent provides two installation methods: Docker and binary packages. This article only introduces binary package installation.
|
||||
## Install Kafka
|
||||
|
||||
Execute in any directory:
|
||||
|
||||
````
|
||||
curl -O http://packages.confluent.io/archive/7.1/confluent-7.1.1.tar.gz
|
||||
tar xzf confluent-7.1.1.tar.gz -C /opt/
|
||||
````
|
||||
```shell
|
||||
curl -O https://downloads.apache.org/kafka/3.4.0/kafka_2.13-3.4.0.tgz
|
||||
tar xzf kafka_2.13-3.4.0.tgz -C /opt/
|
||||
ln -s /opt/kafka_2.13-3.4.0 /opt/kafka
|
||||
```
|
||||
|
||||
Then you need to add the `$CONFLUENT_HOME/bin` directory to the PATH.
|
||||
Then you need to add the `$KAFKA_HOME/bin` directory to the PATH.
|
||||
|
||||
```title=".profile"
|
||||
export CONFLUENT_HOME=/opt/confluent-7.1.1
|
||||
export PATH=$CONFLUENT_HOME/bin:$PATH
|
||||
export KAFKA_HOME=/opt/kafka
|
||||
export PATH=$PATH:$KAFKA_HOME/bin
|
||||
```
|
||||
|
||||
Users can append the above script to the current user's profile file (~/.profile or ~/.bash_profile)
|
||||
|
||||
After the installation is complete, you can enter `confluent version` for simple verification:
|
||||
|
||||
```
|
||||
# confluent version
|
||||
confluent - Confluent CLI
|
||||
|
||||
Version: v2.6.1
|
||||
Git Ref: 6d920590
|
||||
Build Date: 2022-02-18T06:14:21Z
|
||||
Go Version: go1.17.6 (linux/amd64)
|
||||
Development: false
|
||||
```
|
||||
|
||||
## Install TDengine Connector plugin
|
||||
|
||||
### Install from source code
|
||||
|
||||
```
|
||||
```shell
|
||||
git clone --branch 3.0 https://github.com/taosdata/kafka-connect-tdengine.git
|
||||
cd kafka-connect-tdengine
|
||||
mvn clean package
|
||||
unzip -d $CONFLUENT_HOME/share/java/ target/components/packages/taosdata-kafka-connect-tdengine-*.zip
|
||||
mvn clean package -Dmaven.test.skip=true
|
||||
unzip -d $KAFKA_HOME/components/ target/components/packages/taosdata-kafka-connect-tdengine-*.zip
|
||||
```
|
||||
|
||||
The above script first clones the project source code and then compiles and packages it with Maven. After the package is complete, the zip package of the plugin is generated in the `target/components/packages/` directory. Unzip this zip package to plugin path. We used `$CONFLUENT_HOME/share/java/` above because it's a build in plugin path.
|
||||
The above script first clones the project source code and then compiles and packages it with Maven. After the package is complete, the zip package of the plugin is generated in the `target/components/packages/` directory. Unzip this zip package to plugin path. We used `$KAFKA_HOME/components/` above because it's a build in plugin path.
|
||||
|
||||
### Install with confluent-hub
|
||||
### Add configuration file
|
||||
|
||||
[Confluent Hub](https://www.confluent.io/hub) provides a service to download Kafka Connect plugins. After TDengine Kafka Connector is published to Confluent Hub, it can be installed using the command tool `confluent-hub`.
|
||||
**TDengine Kafka Connector is currently not officially released and cannot be installed in this way**.
|
||||
add kafka-connect-tdengine plugin path to `plugin.path` in `$KAFKA_HOME/config/connect-distributed.properties`.
|
||||
|
||||
## Start Confluent
|
||||
|
||||
```
|
||||
confluent local services start
|
||||
```properties
|
||||
plugin.path=/usr/share/java,/opt/kafka/components
|
||||
```
|
||||
|
||||
:::note
|
||||
Be sure to install the plugin before starting Confluent. Otherwise, Kafka Connect will fail to discover the plugins.
|
||||
:::
|
||||
## Start Kafka Services
|
||||
|
||||
:::tip
|
||||
If a component fails to start, try clearing the data and restarting. The data directory will be printed to the console at startup, e.g.:
|
||||
Use command bellow to start all services:
|
||||
|
||||
```title="Console output log" {1}
|
||||
Using CONFLUENT_CURRENT: /tmp/confluent.106668
|
||||
Starting ZooKeeper
|
||||
ZooKeeper is [UP]
|
||||
Starting Kafka
|
||||
Kafka is [UP]
|
||||
Starting Schema Registry
|
||||
Schema Registry is [UP]
|
||||
Starting Kafka REST
|
||||
Kafka REST is [UP]
|
||||
Starting Connect
|
||||
Connect is [UP]
|
||||
Starting ksqlDB Server
|
||||
ksqlDB Server is [UP]
|
||||
Starting Control Center
|
||||
Control Center is [UP]
|
||||
```
|
||||
```shell
|
||||
zookeeper-server-start.sh -daemon $KAFKA_HOME/config/zookeeper.properties
|
||||
|
||||
To clear data, execute `rm -rf /tmp/confluent.106668`.
|
||||
:::
|
||||
kafka-server-start.sh -daemon $KAFKA_HOME/config/server.properties
|
||||
|
||||
### Check Confluent Services Status
|
||||
connect-distributed.sh -daemon $KAFKA_HOME/config/connect-distributed.properties
|
||||
|
||||
Use command bellow to check the status of all service:
|
||||
|
||||
```
|
||||
confluent local services status
|
||||
```
|
||||
|
||||
The expected output is:
|
||||
```
|
||||
Connect is [UP]
|
||||
Control Center is [UP]
|
||||
Kafka is [UP]
|
||||
Kafka REST is [UP]
|
||||
ksqlDB Server is [UP]
|
||||
Schema Registry is [UP]
|
||||
ZooKeeper is [UP]
|
||||
```
|
||||
|
||||
### Check Successfully Loaded Plugin
|
||||
|
||||
After Kafka Connect was completely started, you can use bellow command to check if our plugins are installed successfully:
|
||||
```
|
||||
confluent local services connect plugin list
|
||||
|
||||
```shell
|
||||
curl http://localhost:8083/connectors
|
||||
```
|
||||
|
||||
The output should contains `TDengineSinkConnector` and `TDengineSourceConnector` as bellow:
|
||||
The output as bellow:
|
||||
|
||||
```txt
|
||||
[]
|
||||
```
|
||||
Available Connect Plugins:
|
||||
[
|
||||
{
|
||||
"class": "com.taosdata.kafka.connect.sink.TDengineSinkConnector",
|
||||
"type": "sink",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
{
|
||||
"class": "com.taosdata.kafka.connect.source.TDengineSourceConnector",
|
||||
"type": "source",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
......
|
||||
```
|
||||
|
||||
If not, please check the log file of Kafka Connect. To view the log file path, please execute:
|
||||
|
||||
```
|
||||
echo `cat /tmp/confluent.current`/connect/connect.stdout
|
||||
```
|
||||
It should produce a path like:`/tmp/confluent.104086/connect/connect.stdout`
|
||||
|
||||
Besides log file `connect.stdout` there is a file named `connect.properties`. At the end of this file you can see the effective `plugin.path` which is a series of paths joined by comma. If Kafka Connect not found our plugins, it's probably because the installed path is not included in `plugin.path`.
|
||||
|
||||
## The use of TDengine Sink Connector
|
||||
|
||||
|
@ -184,40 +98,47 @@ TDengine Sink Connector internally uses TDengine [modeless write interface](/ref
|
|||
|
||||
The following example synchronizes the data of the topic meters to the target database power. The data format is the InfluxDB Line protocol format.
|
||||
|
||||
### Add configuration file
|
||||
### Add Sink Connector configuration file
|
||||
|
||||
```
|
||||
```shell
|
||||
mkdir ~/test
|
||||
cd ~/test
|
||||
vi sink-demo.properties
|
||||
vi sink-demo.json
|
||||
```
|
||||
|
||||
sink-demo.properties' content is following:
|
||||
sink-demo.json' content is following:
|
||||
|
||||
```ini title="sink-demo.properties"
|
||||
name=TDengineSinkConnector
|
||||
connector.class=com.taosdata.kafka.connect.sink.TDengineSinkConnector
|
||||
tasks.max=1
|
||||
topics=meters
|
||||
connection.url=jdbc:TAOS://127.0.0.1:6030
|
||||
connection.user=root
|
||||
connection.password=taosdata
|
||||
connection.database=power
|
||||
db.schemaless=line
|
||||
data.precision=ns
|
||||
key.converter=org.apache.kafka.connect.storage.StringConverter
|
||||
value.converter=org.apache.kafka.connect.storage.StringConverter
|
||||
```json title="sink-demo.json"
|
||||
{
|
||||
"name": "TDengineSinkConnector",
|
||||
"config": {
|
||||
"connector.class":"com.taosdata.kafka.connect.sink.TDengineSinkConnector",
|
||||
"tasks.max": "1",
|
||||
"topics": "meters",
|
||||
"connection.url": "jdbc:TAOS://127.0.0.1:6030",
|
||||
"connection.user": "root",
|
||||
"connection.password": "taosdata",
|
||||
"connection.database": "power",
|
||||
"db.schemaless": "line",
|
||||
"data.precision": "ns",
|
||||
"key.converter": "org.apache.kafka.connect.storage.StringConverter",
|
||||
"value.converter": "org.apache.kafka.connect.storage.StringConverter",
|
||||
"errors.tolerance": "all",
|
||||
"errors.deadletterqueue.topic.name": "dead_letter_topic",
|
||||
"errors.deadletterqueue.topic.replication.factor": 1
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Key configuration instructions:
|
||||
|
||||
1. `topics=meters` and `connection.database=power` means to subscribe to the data of the topic meters and write to the database power.
|
||||
2. `db.schemaless=line` means the data in the InfluxDB Line protocol format.
|
||||
1. `"topics": "meters"` and `"connection.database": "power"` means to subscribe to the data of the topic meters and write to the database power.
|
||||
2. `"db.schemaless": "line"` means the data in the InfluxDB Line protocol format.
|
||||
|
||||
### Create Connector instance
|
||||
### Create Sink Connector instance
|
||||
|
||||
````
|
||||
confluent local services connect connector load TDengineSinkConnector --config ./sink-demo.properties
|
||||
````shell
|
||||
curl -X POST -d @sink-demo.json http://localhost:8083/connectors -H "Content-Type: application/json"
|
||||
````
|
||||
|
||||
If the above command is executed successfully, the output is as follows:
|
||||
|
@ -237,7 +158,10 @@ If the above command is executed successfully, the output is as follows:
|
|||
"tasks.max": "1",
|
||||
"topics": "meters",
|
||||
"value.converter": "org.apache.kafka.connect.storage.StringConverter",
|
||||
"name": "TDengineSinkConnector"
|
||||
"name": "TDengineSinkConnector",
|
||||
"errors.tolerance": "all",
|
||||
"errors.deadletterqueue.topic.name": "dead_letter_topic",
|
||||
"errors.deadletterqueue.topic.replication.factor": "1",
|
||||
},
|
||||
"tasks": [],
|
||||
"type": "sink"
|
||||
|
@ -257,8 +181,8 @@ meters,location=California.LoSangeles,groupid=3 current=11.3,voltage=221,phase=0
|
|||
|
||||
Use kafka-console-producer to write test data to the topic `meters`.
|
||||
|
||||
```
|
||||
cat test-data.txt | kafka-console-producer --broker-list localhost:9092 --topic meters
|
||||
```shell
|
||||
cat test-data.txt | kafka-console-producer.sh --broker-list localhost:9092 --topic meters
|
||||
```
|
||||
|
||||
:::note
|
||||
|
@ -269,12 +193,12 @@ TDengine Sink Connector will automatically create the database if the target dat
|
|||
|
||||
Use the TDengine CLI to verify that the sync was successful.
|
||||
|
||||
```
|
||||
```sql
|
||||
taos> use power;
|
||||
Database changed.
|
||||
|
||||
taos> select * from meters;
|
||||
ts | current | voltage | phase | groupid | location |
|
||||
_ts | current | voltage | phase | groupid | location |
|
||||
===============================================================================================================================================================
|
||||
2022-03-28 09:56:51.249000000 | 11.800000000 | 221.000000000 | 0.280000000 | 2 | California.LosAngeles |
|
||||
2022-03-28 09:56:51.250000000 | 13.400000000 | 223.000000000 | 0.290000000 | 2 | California.LosAngeles |
|
||||
|
@ -291,31 +215,39 @@ The role of the TDengine Source Connector is to push all the data of a specific
|
|||
|
||||
TDengine Source Connector will convert the data in TDengine data table into [InfluxDB Line protocol format](/develop/insert-data/influxdb-line/) or [OpenTSDB JSON protocol format](/develop/insert-data/opentsdb-json ) and then write to Kafka.
|
||||
|
||||
The following sample program synchronizes the data in the database test to the topic tdengine-source-test.
|
||||
The following sample program synchronizes the data in the database test to the topic tdengine-test-meters.
|
||||
|
||||
### Add configuration file
|
||||
### Add Source Connector configuration file
|
||||
|
||||
```
|
||||
vi source-demo.properties
|
||||
```shell
|
||||
vi source-demo.json
|
||||
```
|
||||
|
||||
Input following content:
|
||||
|
||||
```ini title="source-demo.properties"
|
||||
name=TDengineSourceConnector
|
||||
connector.class=com.taosdata.kafka.connect.source.TDengineSourceConnector
|
||||
tasks.max=1
|
||||
connection.url=jdbc:TAOS://127.0.0.1:6030
|
||||
connection.username=root
|
||||
connection.password=taosdata
|
||||
connection.database=test
|
||||
connection.attempts=3
|
||||
connection.backoff.ms=5000
|
||||
topic.prefix=tdengine-source-
|
||||
poll.interval.ms=1000
|
||||
fetch.max.rows=100
|
||||
key.converter=org.apache.kafka.connect.storage.StringConverter
|
||||
value.converter=org.apache.kafka.connect.storage.StringConverter
|
||||
```json title="source-demo.json"
|
||||
{
|
||||
"name":"TDengineSourceConnector",
|
||||
"config":{
|
||||
"connector.class": "com.taosdata.kafka.connect.source.TDengineSourceConnector",
|
||||
"tasks.max": 1,
|
||||
"connection.url": "jdbc:TAOS://127.0.0.1:6030",
|
||||
"connection.username": "root",
|
||||
"connection.password": "taosdata",
|
||||
"connection.database": "test",
|
||||
"connection.attempts": 3,
|
||||
"connection.backoff.ms": 5000,
|
||||
"topic.prefix": "tdengine",
|
||||
"topic.delimiter": "-",
|
||||
"poll.interval.ms": 1000,
|
||||
"fetch.max.rows": 100,
|
||||
"topic.per.stable": true,
|
||||
"topic.ignore.db": false,
|
||||
"out.format": "line",
|
||||
"key.converter": "org.apache.kafka.connect.storage.StringConverter",
|
||||
"value.converter": "org.apache.kafka.connect.storage.StringConverter"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Prepare test data
|
||||
|
@ -340,40 +272,40 @@ INSERT INTO d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES('2018-1
|
|||
|
||||
Use TDengine CLI to execute SQL script
|
||||
|
||||
```
|
||||
```shell
|
||||
taos -f prepare-source-data.sql
|
||||
```
|
||||
|
||||
### Create Connector instance
|
||||
|
||||
````
|
||||
confluent local services connect connector load TDengineSourceConnector --config source-demo.properties
|
||||
````
|
||||
```shell
|
||||
curl -X POST -d @source-demo.json http://localhost:8083/connectors -H "Content-Type: application/json"
|
||||
```
|
||||
|
||||
### View topic data
|
||||
|
||||
Use the kafka-console-consumer command-line tool to monitor data in the topic tdengine-source-test. In the beginning, all historical data will be output. After inserting two new data into TDengine, kafka-console-consumer immediately outputs the two new data. The output is in InfluxDB line protocol format.
|
||||
Use the kafka-console-consumer command-line tool to monitor data in the topic tdengine-test-meters. In the beginning, all historical data will be output. After inserting two new data into TDengine, kafka-console-consumer immediately outputs the two new data. The output is in InfluxDB line protocol format.
|
||||
|
||||
````
|
||||
kafka-console-consumer --bootstrap-server localhost:9092 --from-beginning --topic tdengine-source-test
|
||||
````shell
|
||||
kafka-console-consumer.sh --bootstrap-server localhost:9092 --from-beginning --topic tdengine-test-meters
|
||||
````
|
||||
|
||||
output:
|
||||
|
||||
````
|
||||
```txt
|
||||
......
|
||||
meters,location="California.SanFrancisco",groupid=2i32 current=10.3f32,voltage=219i32,phase=0.31f32 1538548685000000000
|
||||
meters,location="California.SanFrancisco",groupid=2i32 current=12.6f32,voltage=218i32,phase=0.33f32 1538548695000000000
|
||||
......
|
||||
````
|
||||
```
|
||||
|
||||
All historical data is displayed. Switch to the TDengine CLI and insert two new pieces of data:
|
||||
|
||||
````
|
||||
```sql
|
||||
USE test;
|
||||
INSERT INTO d1001 VALUES (now, 13.3, 229, 0.38);
|
||||
INSERT INTO d1002 VALUES (now, 16.3, 233, 0.22);
|
||||
````
|
||||
```
|
||||
|
||||
Switch back to kafka-console-consumer, and the command line window has printed out the two pieces of data just inserted.
|
||||
|
||||
|
@ -383,16 +315,16 @@ After testing, use the unload command to stop the loaded connector.
|
|||
|
||||
View currently active connectors:
|
||||
|
||||
````
|
||||
confluent local services connect connector status
|
||||
````
|
||||
```shell
|
||||
curl http://localhost:8083/connectors
|
||||
```
|
||||
|
||||
You should now have two active connectors if you followed the previous steps. Use the following command to unload:
|
||||
|
||||
````
|
||||
confluent local services connect connector unload TDengineSinkConnector
|
||||
confluent local services connect connector unload TDengineSourceConnector
|
||||
````
|
||||
```shell
|
||||
curl -X DELETE http://localhost:8083/connectors/TDengineSinkConnector
|
||||
curl -X DELETE http://localhost:8083/connectors/TDengineSourceConnector
|
||||
```
|
||||
|
||||
## Configuration reference
|
||||
|
||||
|
@ -427,22 +359,20 @@ The following configuration items apply to TDengine Sink Connector and TDengine
|
|||
3. `timestamp.initial`: Data synchronization start time. The format is 'yyyy-MM-dd HH:mm:ss'. If it is not set, the data importing to Kafka will be started from the first/oldest row in the database.
|
||||
4. `poll.interval.ms`: The time interval for checking newly created tables or removed tables, default value is 1000.
|
||||
5. `fetch.max.rows`: The maximum number of rows retrieved when retrieving the database, default is 100.
|
||||
6. `query.interval.ms`: The time range of reading data from TDengine each time, its unit is millisecond. It should be adjusted according to the data flow in rate, the default value is 1000.
|
||||
7. `topic.per.stable`: If it's set to true, it means one super table in TDengine corresponds to a topic in Kafka, the topic naming rule is `<topic.prefix>-<connection.database>-<stable.name>`; if it's set to false, it means the whole DB corresponds to a topic in Kafka, the topic naming rule is `<topic.prefix>-<connection.database>`.
|
||||
|
||||
|
||||
6. `query.interval.ms`: The time range of reading data from TDengine each time, its unit is millisecond. It should be adjusted according to the data flow in rate, the default value is 0, this means to get all the data to the latest time.
|
||||
7. `out.format`: Result output format. `line` indicates that the output format is InfluxDB line protocol format, `json` indicates that the output format is json. The default is line.
|
||||
8. `topic.per.stable`: If it's set to true, it means one super table in TDengine corresponds to a topic in Kafka, the topic naming rule is `<topic.prefix><topic.delimiter><connection.database><topic.delimiter><stable.name>`; if it's set to false, it means the whole DB corresponds to a topic in Kafka, the topic naming rule is `<topic.prefix><topic.delimiter><connection.database>`.
|
||||
9. `topic.ignore.db`: Whether the topic naming rule contains the database name: true indicates that the rule is `<topic.prefix><topic.delimiter><stable.name>`, false indicates that the rule is `<topic.prefix><topic.delimiter><connection.database><topic.delimiter><stable.name>`, and the default is false. Does not take effect when `topic.per.stable` is set to false.
|
||||
10. `topic.delimiter`: topic name delimiter,default is `-`。
|
||||
|
||||
## Other notes
|
||||
|
||||
1. To install plugin to a customized location, refer to https://docs.confluent.io/home/connect/self-managed/install.html#install-connector-manually.
|
||||
2. To use Kafka Connect without confluent, refer to https://kafka.apache.org/documentation/#connect.
|
||||
1. To use Kafka Connect, refer to <https://kafka.apache.org/documentation/#connect>.
|
||||
|
||||
## Feedback
|
||||
|
||||
https://github.com/taosdata/kafka-connect-tdengine/issues
|
||||
<https://github.com/taosdata/kafka-connect-tdengine/issues>
|
||||
|
||||
## Reference
|
||||
|
||||
1. https://www.confluent.io/what-is-apache-kafka
|
||||
2. https://developer.confluent.io/learn-kafka/kafka-connect/intro
|
||||
3. https://docs.confluent.io/platform/current/platform.html
|
||||
1. For more information, see <https://kafka.apache.org/documentation/>
|
||||
|
|
|
@ -10,6 +10,10 @@ For TDengine 2.x installation packages by version, please visit [here](https://w
|
|||
|
||||
import Release from "/components/ReleaseV3";
|
||||
|
||||
## 3.0.5.0
|
||||
|
||||
<Release type="tdengine" version="3.0.5.0" />
|
||||
|
||||
## 3.0.4.2
|
||||
|
||||
<Release type="tdengine" version="3.0.4.2" />
|
||||
|
|
|
@ -10,6 +10,10 @@ For other historical version installers, please visit [here](https://www.taosdat
|
|||
|
||||
import Release from "/components/ReleaseV3";
|
||||
|
||||
## 2.5.1
|
||||
|
||||
<Release type="tools" version="2.5.1" />
|
||||
|
||||
## 2.5.0
|
||||
|
||||
<Release type="tools" version="2.5.0" />
|
||||
|
|
|
@ -299,7 +299,7 @@ SELECT COUNT(*) FROM test.meters WHERE location = "California.SanFrancisco";
|
|||
SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters WHERE groupId = 10;
|
||||
```
|
||||
|
||||
对表 `d10` 按 10 每秒进行平均值、最大值和最小值聚合统计:
|
||||
对表 `d10` 按每 10 秒进行平均值、最大值和最小值聚合统计:
|
||||
|
||||
```sql
|
||||
SELECT FIRST(ts), AVG(current), MAX(voltage), MIN(phase) FROM test.d10 INTERVAL(10s);
|
||||
|
|
|
@ -32,25 +32,22 @@ TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致
|
|||
原生连接支持的平台和 TDengine 客户端驱动支持的平台一致。
|
||||
REST 连接支持所有能运行 Java 的平台。
|
||||
|
||||
## 版本支持
|
||||
## 版本历史
|
||||
|
||||
请参考[版本支持列表](../#版本支持)
|
||||
|
||||
## 最近更新记录
|
||||
|
||||
| taos-jdbcdriver 版本 | 主要变化 |
|
||||
| :------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------: |
|
||||
| 3.2.1 | 新增功能:WebSocket 连接支持 schemaless 与 prepareStatement 写入。变更:consumer poll 返回结果集为 ConsumerRecord,可通过 value() 获取指定结果集数据。 |
|
||||
| 3.2.0 | 存在连接问题,不推荐使用 |
|
||||
| 3.1.0 | WebSocket 连接支持订阅功能 |
|
||||
| 3.0.1 - 3.0.4 | 修复一些情况下结果集数据解析错误的问题。3.0.1 在 JDK 11 环境编译,JDK 8 环境下建议使用其他版本 |
|
||||
| 3.0.0 | 支持 TDengine 3.0 |
|
||||
| 2.0.42 | 修在 WebSocket 连接中 wasNull 接口返回值 |
|
||||
| 2.0.41 | 修正 REST 连接中用户名和密码转码方式 |
|
||||
| 2.0.39 - 2.0.40 | 增加 REST 连接/请求 超时设置 |
|
||||
| 2.0.38 | JDBC REST 连接增加批量拉取功能 |
|
||||
| 2.0.37 | 增加对 json tag 支持 |
|
||||
| 2.0.36 | 增加对 schemaless 写入支持 |
|
||||
| taos-jdbcdriver 版本 | 主要变化 | TDengine 版本 |
|
||||
| :------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------: |
|
||||
| 3.2.2 | 新增功能:数据订阅支持 seek 功能。 | 3.0.5.0 及更高版本 |
|
||||
| 3.2.1 | 新增功能:WebSocket 连接支持 schemaless 与 prepareStatement 写入。变更:consumer poll 返回结果集为 ConsumerRecord,可通过 value() 获取指定结果集数据。 | 3.0.3.0 及更高版本 |
|
||||
| 3.2.0 | 存在连接问题,不推荐使用 | - |
|
||||
| 3.1.0 | WebSocket 连接支持订阅功能 | - |
|
||||
| 3.0.1 - 3.0.4 | 修复一些情况下结果集数据解析错误的问题。3.0.1 在 JDK 11 环境编译,JDK 8 环境下建议使用其他版本 | - |
|
||||
| 3.0.0 | 支持 TDengine 3.0 | 3.0.0.0 及更高版本 |
|
||||
| 2.0.42 | 修在 WebSocket 连接中 wasNull 接口返回值 | - |
|
||||
| 2.0.41 | 修正 REST 连接中用户名和密码转码方式 | - |
|
||||
| 2.0.39 - 2.0.40 | 增加 REST 连接/请求 超时设置 | - |
|
||||
| 2.0.38 | JDBC REST 连接增加批量拉取功能 | - |
|
||||
| 2.0.37 | 增加对 json tag 支持 | - |
|
||||
| 2.0.36 | 增加对 schemaless 写入支持 | - |
|
||||
|
||||
**注**:REST 连接中增加 `batchfetch` 参数并设置为 true,将开启 WebSocket 连接。
|
||||
|
||||
|
@ -80,45 +77,47 @@ JDBC 连接器可能报错的错误码包括 4 种:
|
|||
|
||||
具体的错误码请参考:
|
||||
|
||||
| Error Code | Description | Suggested Actions |
|
||||
| ---------- | --------------------------------------------------------------- | --------------------------------------------------------------------------------------- |
|
||||
| 0x2301 | connection already closed | 连接已经关闭,检查连接情况,或重新创建连接去执行相关指令。 |
|
||||
| 0x2302 | this operation is NOT supported currently! | 当前使用接口不支持,可以更换其他连接方式。 |
|
||||
| 0x2303 | invalid variables | 参数不合法,请检查相应接口规范,调整参数类型及大小。 |
|
||||
| 0x2304 | statement is closed | statement 已经关闭,请检查 statement 是否关闭后再次使用,或是连接是否正常。 |
|
||||
| 0x2305 | resultSet is closed | resultSet 结果集已经释放,请检查 resultSet 是否释放后再次使用。 |
|
||||
| 0x2306 | Batch is empty! | prepareStatement 添加参数后再执行 executeBatch。 |
|
||||
| 0x2307 | Can not issue data manipulation statements with executeQuery() | 更新操作应该使用 executeUpdate(),而不是 executeQuery()。 |
|
||||
| 0x2308 | Can not issue SELECT via executeUpdate() | 查询操作应该使用 executeQuery(),而不是 executeUpdate()。 |
|
||||
| 0x230d | parameter index out of range | 参数越界,请检查参数的合理范围。 |
|
||||
| 0x230e | connection already closed | 连接已经关闭,请检查 Connection 是否关闭后再次使用,或是连接是否正常。 |
|
||||
| 0x230f | unknown sql type in tdengine | 请检查 TDengine 支持的 Data Type 类型。 |
|
||||
| 0x2310 | can't register JDBC-JNI driver | 不能注册 JNI 驱动,请检查 url 是否填写正确。 |
|
||||
| 0x2312 | url is not set | 请检查 REST 连接 url 是否填写正确。 |
|
||||
| 0x2314 | numeric value out of range | 请检查获取结果集中数值类型是否使用了正确的接口。 |
|
||||
| 0x2315 | unknown taos type in tdengine | 在 TDengine 数据类型与 JDBC 数据类型转换时,是否指定了正确的 TDengine 数据类型。 |
|
||||
| 0x2317 | | REST 连接中使用了错误的请求类型。 |
|
||||
| 0x2318 | | REST 连接中出现了数据传输异常,请检查网络情况并重试。 |
|
||||
| 0x2319 | user is required | 创建连接时缺少用户名信息 |
|
||||
| 0x231a | password is required | 创建连接时缺少密码信息 |
|
||||
| 0x231c | httpEntity is null, sql: | REST 连接中执行出现异常 |
|
||||
| 0x2350 | unknown error | 未知异常,请在 github 反馈给开发人员。 |
|
||||
| 0x2352 | Unsupported encoding | 本地连接下指定了不支持的字符编码集 |
|
||||
| 0x2353 | internal error of database, please see taoslog for more details | 本地连接执行 prepareStatement 时出现错误,请检查 taos log 进行问题定位。 |
|
||||
| 0x2354 | JNI connection is NULL | 本地连接执行命令时,Connection 已经关闭。请检查与 TDengine 的连接情况。 |
|
||||
| 0x2355 | JNI result set is NULL | 本地连接获取结果集,结果集异常,请检查连接情况,并重试。 |
|
||||
| 0x2356 | invalid num of fields | 本地连接获取结果集的 meta 信息不匹配。 |
|
||||
| 0x2357 | empty sql string | 填写正确的 SQL 进行执行。 |
|
||||
| 0x2359 | JNI alloc memory failed, please see taoslog for more details | 本地连接分配内存错误,请检查 taos log 进行问题定位。 |
|
||||
| 0x2371 | consumer properties must not be null! | 创建订阅时参数为空,请填写正确的参数。 |
|
||||
| 0x2372 | configs contain empty key, failed to set consumer property | 参数 key 中包含空值,请填写正确的参数。 |
|
||||
| 0x2373 | failed to set consumer property, | 参数 value 中包含空值,请填写正确的参数。 |
|
||||
| 0x2375 | topic reference has been destroyed | 创建数据订阅过程中,topic 引用被释放。请检查与 TDengine 的连接情况。 |
|
||||
| 0x2376 | failed to set consumer topic, topic name is empty | 创建数据订阅过程中,订阅 topic 名称为空。请检查指定的 topic 名称是否填写正确。 |
|
||||
| 0x2377 | consumer reference has been destroyed | 订阅数据传输通道已经关闭,请检查与 TDengine 的连接情况。 |
|
||||
| 0x2378 | consumer create error | 创建数据订阅失败,请根据错误信息检查 taos log 进行问题定位。 |
|
||||
| - | can't create connection with server within | 通过增加参数 httpConnectTimeout 增加连接耗时,或是请检查与 taosAdapter 之间的连接情况。 |
|
||||
| - | failed to complete the task within the specified time | 通过增加参数 messageWaitTimeout 增加执行耗时,或是请检查与 taosAdapter 之间的连接情况。 |
|
||||
| Error Code | Description | Suggested Actions |
|
||||
| ---------- | --------------------------------------------------------------- | ----------------------------------------------------------------------------------------- |
|
||||
| 0x2301 | connection already closed | 连接已经关闭,检查连接情况,或重新创建连接去执行相关指令。 |
|
||||
| 0x2302 | this operation is NOT supported currently! | 当前使用接口不支持,可以更换其他连接方式。 |
|
||||
| 0x2303 | invalid variables | 参数不合法,请检查相应接口规范,调整参数类型及大小。 |
|
||||
| 0x2304 | statement is closed | statement 已经关闭,请检查 statement 是否关闭后再次使用,或是连接是否正常。 |
|
||||
| 0x2305 | resultSet is closed | resultSet 结果集已经释放,请检查 resultSet 是否释放后再次使用。 |
|
||||
| 0x2306 | Batch is empty! | prepareStatement 添加参数后再执行 executeBatch。 |
|
||||
| 0x2307 | Can not issue data manipulation statements with executeQuery() | 更新操作应该使用 executeUpdate(),而不是 executeQuery()。 |
|
||||
| 0x2308 | Can not issue SELECT via executeUpdate() | 查询操作应该使用 executeQuery(),而不是 executeUpdate()。 |
|
||||
| 0x230d | parameter index out of range | 参数越界,请检查参数的合理范围。 |
|
||||
| 0x230e | connection already closed | 连接已经关闭,请检查 Connection 是否关闭后再次使用,或是连接是否正常。 |
|
||||
| 0x230f | unknown sql type in tdengine | 请检查 TDengine 支持的 Data Type 类型。 |
|
||||
| 0x2310 | can't register JDBC-JNI driver | 不能注册 JNI 驱动,请检查 url 是否填写正确。 |
|
||||
| 0x2312 | url is not set | 请检查 REST 连接 url 是否填写正确。 |
|
||||
| 0x2314 | numeric value out of range | 请检查获取结果集中数值类型是否使用了正确的接口。 |
|
||||
| 0x2315 | unknown taos type in tdengine | 在 TDengine 数据类型与 JDBC 数据类型转换时,是否指定了正确的 TDengine 数据类型。 |
|
||||
| 0x2317 | | REST 连接中使用了错误的请求类型。 |
|
||||
| 0x2318 | | REST 连接中出现了数据传输异常,请检查网络情况并重试。 |
|
||||
| 0x2319 | user is required | 创建连接时缺少用户名信息 |
|
||||
| 0x231a | password is required | 创建连接时缺少密码信息 |
|
||||
| 0x231c | httpEntity is null, sql: | REST 连接中执行出现异常 |
|
||||
| 0x231d | can't create connection with server within | 通过增加参数 httpConnectTimeout 增加连接耗时,或是请检查与 taosAdapter 之间的连接情况。 |
|
||||
| 0x231e | failed to complete the task within the specified time | 通过增加参数 messageWaitTimeout 增加执行耗时,或是请检查与 taosAdapter 之间的连接情况。 |
|
||||
| 0x2350 | unknown error | 未知异常,请在 github 反馈给开发人员。 |
|
||||
| 0x2352 | Unsupported encoding | 本地连接下指定了不支持的字符编码集 |
|
||||
| 0x2353 | internal error of database, please see taoslog for more details | 本地连接执行 prepareStatement 时出现错误,请检查 taos log 进行问题定位。 |
|
||||
| 0x2354 | JNI connection is NULL | 本地连接执行命令时,Connection 已经关闭。请检查与 TDengine 的连接情况。 |
|
||||
| 0x2355 | JNI result set is NULL | 本地连接获取结果集,结果集异常,请检查连接情况,并重试。 |
|
||||
| 0x2356 | invalid num of fields | 本地连接获取结果集的 meta 信息不匹配。 |
|
||||
| 0x2357 | empty sql string | 填写正确的 SQL 进行执行。 |
|
||||
| 0x2359 | JNI alloc memory failed, please see taoslog for more details | 本地连接分配内存错误,请检查 taos log 进行问题定位。 |
|
||||
| 0x2371 | consumer properties must not be null! | 创建订阅时参数为空,请填写正确的参数。 |
|
||||
| 0x2372 | configs contain empty key, failed to set consumer property | 参数 key 中包含空值,请填写正确的参数。 |
|
||||
| 0x2373 | failed to set consumer property, | 参数 value 中包含空值,请填写正确的参数。 |
|
||||
| 0x2375 | topic reference has been destroyed | 创建数据订阅过程中,topic 引用被释放。请检查与 TDengine 的连接情况。 |
|
||||
| 0x2376 | failed to set consumer topic, topic name is empty | 创建数据订阅过程中,订阅 topic 名称为空。请检查指定的 topic 名称是否填写正确。 |
|
||||
| 0x2377 | consumer reference has been destroyed | 订阅数据传输通道已经关闭,请检查与 TDengine 的连接情况。 |
|
||||
| 0x2378 | consumer create error | 创建数据订阅失败,请根据错误信息检查 taos log 进行问题定位。 |
|
||||
| 0x2379 | seek offset must not be a negative number | seek 接口参数不能为负值,请使用正确的参数 |
|
||||
| 0x237a | vGroup not found in result set | VGroup 没有分配给当前 consumer,由于 Rebalance 机制导致 Consumer 与 VGroup 不是绑定的关系 |
|
||||
|
||||
- [TDengine Java Connector](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java)
|
||||
<!-- - [TDengine_ERROR_CODE](../error-code) -->
|
||||
|
@ -169,7 +168,7 @@ Maven 项目中,在 pom.xml 中添加以下依赖:
|
|||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>3.2.1</version>
|
||||
<version>3.2.2</version>
|
||||
</dependency>
|
||||
```
|
||||
|
||||
|
@ -916,14 +915,15 @@ public class SchemalessWsTest {
|
|||
|
||||
public static void main(String[] args) throws SQLException {
|
||||
final String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata&batchfetch=true";
|
||||
Connection connection = DriverManager.getConnection(url);
|
||||
init(connection);
|
||||
try(Connection connection = DriverManager.getConnection(url)){
|
||||
init(connection);
|
||||
|
||||
SchemalessWriter writer = new SchemalessWriter(connection, "test_ws_schemaless");
|
||||
writer.write(lineDemo, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO_SECONDS);
|
||||
writer.write(telnetDemo, SchemalessProtocolType.TELNET, SchemalessTimestampType.MILLI_SECONDS);
|
||||
writer.write(jsonDemo, SchemalessProtocolType.JSON, SchemalessTimestampType.SECONDS);
|
||||
System.exit(0);
|
||||
try(SchemalessWriter writer = new SchemalessWriter(connection, "test_ws_schemaless")){
|
||||
writer.write(lineDemo, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO_SECONDS);
|
||||
writer.write(telnetDemo, SchemalessProtocolType.TELNET, SchemalessTimestampType.MILLI_SECONDS);
|
||||
writer.write(jsonDemo, SchemalessProtocolType.JSON, SchemalessTimestampType.SECONDS);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static void init(Connection connection) throws SQLException {
|
||||
|
@ -994,6 +994,17 @@ while(true) {
|
|||
|
||||
`poll` 每次调用获取一个消息。
|
||||
|
||||
#### 指定订阅 Offset
|
||||
|
||||
```
|
||||
long position(TopicPartition partition) throws SQLException;
|
||||
Map<TopicPartition, Long> position(String topic) throws SQLException;
|
||||
Map<TopicPartition, Long> beginningOffsets(String topic) throws SQLException;
|
||||
Map<TopicPartition, Long> endOffsets(String topic) throws SQLException;
|
||||
|
||||
void seek(TopicPartition partition, long offset) throws SQLException;
|
||||
```
|
||||
|
||||
#### 关闭订阅
|
||||
|
||||
```java
|
||||
|
@ -1258,6 +1269,7 @@ public static void main(String[] args) throws Exception {
|
|||
- connectionPools:HikariCP, Druid, dbcp, c3p0 等连接池中使用 taos-jdbcdriver。
|
||||
- SpringJdbcTemplate:Spring JdbcTemplate 中使用 taos-jdbcdriver。
|
||||
- mybatisplus-demo:Springboot + Mybatis 中使用 taos-jdbcdriver。
|
||||
- consumer-demo:Consumer 消费 TDengine 数据示例,可通过参数控制消费速度。
|
||||
|
||||
请参考:[JDBC example](https://github.com/taosdata/TDengine/tree/3.0/examples/JDBC)
|
||||
|
||||
|
|
|
@ -30,7 +30,7 @@ REST 连接支持所有能运行 Go 的平台。
|
|||
|
||||
## 版本支持
|
||||
|
||||
请参考[版本支持列表](../#版本支持)
|
||||
请参考[版本支持列表](https://github.com/taosdata/driver-go#remind)
|
||||
|
||||
## 支持的功能特性
|
||||
|
||||
|
@ -383,6 +383,15 @@ func main() {
|
|||
|
||||
提交消息。
|
||||
|
||||
* `func (c *Consumer) Assignment() (partitions []tmq.TopicPartition, err error)`
|
||||
|
||||
获取消费进度。(需要 TDengine >= 3.0.5.0, driver-go >= v3.5.0)
|
||||
|
||||
* `func (c *Consumer) Seek(partition tmq.TopicPartition, ignoredTimeoutMs int) error`
|
||||
注意:出于兼容目的保留 `ignoredTimeoutMs` 参数,当前未使用
|
||||
|
||||
按照指定的进度消费。(需要 TDengine >= 3.0.5.0, driver-go >= v3.5.0)
|
||||
|
||||
* `func (c *Consumer) Close() error`
|
||||
|
||||
关闭连接。
|
||||
|
@ -468,11 +477,20 @@ func main() {
|
|||
|
||||
提交消息。
|
||||
|
||||
* `func (c *Consumer) Assignment() (partitions []tmq.TopicPartition, err error)`
|
||||
|
||||
获取消费进度。(需要 TDengine >= 3.0.5.0, driver-go >= v3.5.0)
|
||||
|
||||
* `func (c *Consumer) Seek(partition tmq.TopicPartition, ignoredTimeoutMs int) error`
|
||||
注意:出于兼容目的保留 `ignoredTimeoutMs` 参数,当前未使用
|
||||
|
||||
按照指定的进度消费。(需要 TDengine >= 3.0.5.0, driver-go >= v3.5.0)
|
||||
|
||||
* `func (c *Consumer) Close() error`
|
||||
|
||||
关闭连接。
|
||||
|
||||
完整订阅示例参见 [GitHub 示例文件](https://github.com/taosdata/driver-go/blob/3.0/examples/tmqoverws/main.go)
|
||||
完整订阅示例参见 [GitHub 示例文件](https://github.com/taosdata/driver-go/blob/main/examples/tmqoverws/main.go)
|
||||
|
||||
### 通过 WebSocket 进行参数绑定
|
||||
|
||||
|
@ -520,7 +538,7 @@ func main() {
|
|||
|
||||
结束参数绑定。
|
||||
|
||||
完整参数绑定示例参见 [GitHub 示例文件](https://github.com/taosdata/driver-go/blob/3.0/examples/stmtoverws/main.go)
|
||||
完整参数绑定示例参见 [GitHub 示例文件](https://github.com/taosdata/driver-go/blob/main/examples/stmtoverws/main.go)
|
||||
|
||||
## API 参考
|
||||
|
||||
|
|
|
@ -26,9 +26,14 @@ import RustQuery from "../07-develop/04-query-data/_rust.mdx"
|
|||
原生连接支持的平台和 TDengine 客户端驱动支持的平台一致。
|
||||
Websocket 连接支持所有能运行 Rust 的平台。
|
||||
|
||||
## 版本支持
|
||||
## 版本历史
|
||||
|
||||
请参考[版本支持列表](../#版本支持)
|
||||
| Rust 连接器版本 | TDengine 版本 | 主要功能 |
|
||||
| :----------------: | :--------------: | :--------------------------------------------------: |
|
||||
| v0.8.10 | 3.0.5.0 or later | 消息订阅:获取消费进度及按照指定进度开始消费。 |
|
||||
| v0.8.0 | 3.0.4.0 | 支持无模式写入。 |
|
||||
| v0.7.6 | 3.0.3.0 | 支持在请求中使用 req_id。 |
|
||||
| v0.6.0 | 3.0.0.0 | 基础功能。 |
|
||||
|
||||
Rust 连接器仍然在快速开发中,1.0 之前无法保证其向后兼容。建议使用 3.0 版本以上的 TDengine,以避免已知问题。
|
||||
|
||||
|
@ -502,6 +507,22 @@ TMQ 消息队列是一个 [futures::Stream](https://docs.rs/futures/latest/futur
|
|||
}
|
||||
```
|
||||
|
||||
获取消费进度:
|
||||
|
||||
版本要求 connector-rust >= v0.8.8, TDengine >= 3.0.5.0
|
||||
|
||||
```rust
|
||||
let assignments = consumer.assignments().await.unwrap();
|
||||
```
|
||||
|
||||
按照指定的进度消费:
|
||||
|
||||
版本要求 connector-rust >= v0.8.8, TDengine >= 3.0.5.0
|
||||
|
||||
```rust
|
||||
consumer.offset_seek(topic, vgroup_id, offset).await;
|
||||
```
|
||||
|
||||
停止订阅:
|
||||
|
||||
```rust
|
||||
|
@ -516,7 +537,7 @@ consumer.unsubscribe().await;
|
|||
- `enable.auto.commit`: 当设置为 `true` 时,将启用自动标记模式,当对数据一致性不敏感时,可以启用此方式。
|
||||
- `auto.commit.interval.ms`: 自动标记的时间间隔。
|
||||
|
||||
完整订阅示例参见 [GitHub 示例文件](https://github.com/taosdata/taos-connector-rust/blob/main/examples/subscribe.rs).
|
||||
完整订阅示例参见 [GitHub 示例文件](https://github.com/taosdata/TDengine/blob/3.0/docs/examples/rust/nativeexample/examples/subscribe_demo.rs).
|
||||
|
||||
其他相关结构体 API 使用说明请移步 Rust 文档托管网页:<https://docs.rs/taos>。
|
||||
|
||||
|
|
|
@ -45,9 +45,9 @@ CREATE DATABASE db_name PRECISION 'ns';
|
|||
|
||||
:::note
|
||||
|
||||
- 表的每行长度不能超过 48KB(注意:每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)。
|
||||
- 表的每行长度不能超过 48KB(从 3.0.5.0 版本开始为 64KB)(注意:每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)。
|
||||
- 虽然 BINARY 类型在底层存储上支持字节型的二进制字符,但不同编程语言对二进制数据的处理方式并不保证一致,因此建议在 BINARY 类型中只存储 ASCII 可见字符,而避免存储不可见字符。多字节的数据,例如中文字符,则需要使用 NCHAR 类型进行保存。如果强行使用 BINARY 类型保存中文字符,虽然有时也能正常读写,但并不带有字符集信息,很容易出现数据乱码甚至数据损坏等情况。
|
||||
- BINARY 类型理论上最长可以有 16,374 字节。BINARY 仅支持字符串输入,字符串两端需使用单引号引用。使用时须指定大小,如 BINARY(20) 定义了最长为 20 个单字节字符的字符串,每个字符占 1 字节的存储空间,总共固定占用 20 字节的空间,此时如果用户字符串超出 20 字节将会报错。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示,即 `\'`。
|
||||
- BINARY 类型理论上最长可以有 16,374(从 3.0.5.0 版本开始,数据列为 65,517,标签列为 16,382) 字节。BINARY 仅支持字符串输入,字符串两端需使用单引号引用。使用时须指定大小,如 BINARY(20) 定义了最长为 20 个单字节字符的字符串,每个字符占 1 字节的存储空间,总共固定占用 20 字节的空间,此时如果用户字符串超出 20 字节将会报错。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示,即 `\'`。
|
||||
- SQL 语句中的数值类型将依据是否存在小数点,或使用科学计数法表示,来判断数值类型是否为整型或者浮点型,因此在使用时要注意相应类型越界的情况。例如,9999999999999999999 会认为超过长整型的上边界而溢出,而 9999999999999999999.0 会被认为是有效的浮点数。
|
||||
|
||||
:::
|
||||
|
|
|
@ -43,7 +43,7 @@ table_option: {
|
|||
|
||||
1. 表的第一个字段必须是 TIMESTAMP,并且系统自动将其设为主键;
|
||||
2. 表名最大长度为 192;
|
||||
3. 表的每行长度不能超过 48KB;(注意:每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)
|
||||
3. 表的每行长度不能超过 48KB(从 3.0.5.0 版本开始为 64KB);(注意:每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)
|
||||
4. 子表名只能由字母、数字和下划线组成,且不能以数字开头,不区分大小写
|
||||
5. 使用数据类型 binary 或 nchar,需指定其最长的字节数,如 binary(20),表示 20 字节;
|
||||
6. 为了兼容支持更多形式的表名,TDengine 引入新的转义符 "\`",可以让表名与关键词不冲突,同时不受限于上述表名称合法性约束检查。但是同样具有长度限制要求。使用转义字符以后,不再对转义字符中的内容进行大小写统一。
|
||||
|
|
|
@ -26,7 +26,7 @@ description: 合法字符集和命名中的限制规则
|
|||
|
||||
- 数据库名最大长度为 64 字节
|
||||
- 表名最大长度为 192 字节,不包括数据库名前缀和分隔符
|
||||
- 每行数据最大长度 48KB (注意:数据行内每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)
|
||||
- 每行数据最大长度 48KB(从 3.0.5.0 版本开始为 64KB) (注意:数据行内每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)
|
||||
- 列名最大长度为 64 字节
|
||||
- 最多允许 4096 列,最少需要 2 列,第一列必须是时间戳。
|
||||
- 标签名最大长度为 64 字节
|
||||
|
|
|
@ -82,8 +82,6 @@ Usage: taosdump [OPTION...] dbname [tbname ...]
|
|||
-e, --escape-character Use escaped character for database name
|
||||
-N, --without-property Dump database without its properties.
|
||||
-s, --schemaonly Only dump tables' schema.
|
||||
-y, --answer-yes Input yes for prompt. It will skip data file
|
||||
checking!
|
||||
-d, --avro-codec=snappy Choose an avro codec among null, deflate, snappy,
|
||||
and lzma.
|
||||
-S, --start-time=START_TIME Start time to dump. Either epoch or
|
||||
|
|
|
@ -5,7 +5,7 @@ description: "TDengine 客户端和服务配置列表"
|
|||
|
||||
## 为服务端指定配置文件
|
||||
|
||||
TDengine 系统后台服务由 taosd 提供,可以在配置文件 taos.cfg 里修改配置参数,以满足不同场景的需求。配置文件的缺省位置在/etc/taos 目录,可以通过 taosd 命令行执行参数 -c 指定配置文件目录。比如,指定配置文件位于`/home/user` 这个目录:
|
||||
TDengine 系统后台服务由 taosd 提供,可以在配置文件 taos.cfg 里修改配置参数,以满足不同场景的需求。在 Linux 系统上,配置文件的缺省位置在 `/etc/taos` 目录,在 Windows 系统上缺省位置在 `C:\TDengine` 。可以通过 taosd 命令行执行参数 -c 指定配置文件所在目录。比如,在 Linux 系统上可以指定配置文件位于 `/home/user` 这个目录:
|
||||
|
||||
```
|
||||
taosd -c /home/user
|
||||
|
@ -384,6 +384,15 @@ charset 的有效值是 UTF-8。
|
|||
| 单位 | GB |
|
||||
| 缺省值 | 2.0 |
|
||||
|
||||
### metaCacheMaxSize
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | ---------------------------------------------- |
|
||||
| 适用范围 | 仅客户端适用 |
|
||||
| 含义 | 指定单个客户端元数据缓存大小的最大值 |
|
||||
| 单位 | MB |
|
||||
| 缺省值 | -1 (无限制) |
|
||||
|
||||
## 集群相关
|
||||
|
||||
### supportVnodes
|
||||
|
@ -452,6 +461,26 @@ charset 的有效值是 UTF-8。
|
|||
| 缺省值 | 0 |
|
||||
| 补充说明 | 大于 0 时,日志文件会被重命名为 taosdlog.xxx,其中 xxx 为日志文件最后修改的时间戳。 |
|
||||
|
||||
### slowLogThreshold
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | ------------------------------------------------------------- |
|
||||
| 适用范围 | 仅客户端适用 |
|
||||
| 含义 | 指定慢查询门限值,大于等于门限值认为是慢查询 |
|
||||
| 单位 | 秒 |
|
||||
| 缺省值 | 3 |
|
||||
| 补充说明 | 每个客户端中所有慢查询会被记录在日志目录下的taosSlowLog文件中 |
|
||||
|
||||
### slowLogScope
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | --------------------------------------------------------------|
|
||||
| 适用范围 | 仅客户端适用 |
|
||||
| 含义 | 指定启动记录哪些类型的慢查询 |
|
||||
| 可选值 | ALL, QUERY, INSERT, OTHERS, NONE |
|
||||
| 缺省值 | ALL |
|
||||
| 补充说明 | 默认记录所有类型的慢查询,可通过配置只记录某一类型的慢查询 |
|
||||
|
||||
### debugFlag
|
||||
|
||||
| 属性 | 说明 |
|
||||
|
|
|
@ -87,7 +87,7 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000
|
|||
|
||||
:::tip
|
||||
无模式所有的处理逻辑,仍会遵循 TDengine 对数据结构的底层限制,例如每行数据的总长度不能超过
|
||||
48KB,标签值的总长度不超过16KB。这方面的具体限制约束请参见 [TDengine SQL 边界限制](/taos-sql/limit)
|
||||
48KB(从 3.0.5.0 版本开始为 64KB),标签值的总长度不超过16KB。这方面的具体限制约束请参见 [TDengine SQL 边界限制](/taos-sql/limit)
|
||||
|
||||
:::
|
||||
|
||||
|
|
|
@ -16,169 +16,78 @@ TDengine Source Connector 用于把数据实时地从 TDengine 读出来发送
|
|||
|
||||

|
||||
|
||||
## 什么是 Confluent?
|
||||
|
||||
[Confluent](https://www.confluent.io/) 在 Kafka 的基础上增加很多扩展功能。包括:
|
||||
|
||||
1. Schema Registry
|
||||
2. REST 代理
|
||||
3. 非 Java 客户端
|
||||
4. 很多打包好的 Kafka Connect 插件
|
||||
5. 管理和监控 Kafka 的 GUI —— Confluent 控制中心
|
||||
|
||||
这些扩展功能有的包含在社区版本的 Confluent 中,有的只有企业版能用。
|
||||

|
||||
|
||||
Confluent 企业版提供了 `confluent` 命令行工具管理各个组件。
|
||||
|
||||
## 前置条件
|
||||
|
||||
运行本教程中示例的前提条件。
|
||||
|
||||
1. Linux 操作系统
|
||||
2. 已安装 Java 8 和 Maven
|
||||
3. 已安装 Git
|
||||
3. 已安装 Git、curl、vi
|
||||
4. 已安装并启动 TDengine。如果还没有可参考[安装和卸载](/operation/pkg-install)
|
||||
|
||||
## 安装 Confluent
|
||||
|
||||
Confluent 提供了 Docker 和二进制包两种安装方式。本文仅介绍二进制包方式安装。
|
||||
## 安装 Kafka
|
||||
|
||||
在任意目录下执行:
|
||||
|
||||
```
|
||||
curl -O http://packages.confluent.io/archive/7.1/confluent-7.1.1.tar.gz
|
||||
tar xzf confluent-7.1.1.tar.gz -C /opt/
|
||||
```shell
|
||||
curl -O https://downloads.apache.org/kafka/3.4.0/kafka_2.13-3.4.0.tgz
|
||||
tar xzf kafka_2.13-3.4.0.tgz -C /opt/
|
||||
ln -s /opt/kafka_2.13-3.4.0 /opt/kafka
|
||||
```
|
||||
|
||||
然后需要把 `$CONFLUENT_HOME/bin` 目录加入 PATH。
|
||||
然后需要把 `$KAFKA_HOME/bin` 目录加入 PATH。
|
||||
|
||||
```title=".profile"
|
||||
export CONFLUENT_HOME=/opt/confluent-7.1.1
|
||||
export PATH=$CONFLUENT_HOME/bin:$PATH
|
||||
export KAFKA_HOME=/opt/kafka
|
||||
export PATH=$PATH:$KAFKA_HOME/bin
|
||||
```
|
||||
|
||||
以上脚本可以追加到当前用户的 profile 文件(~/.profile 或 ~/.bash_profile)
|
||||
|
||||
安装完成之后,可以输入`confluent version`做简单验证:
|
||||
|
||||
```
|
||||
# confluent version
|
||||
confluent - Confluent CLI
|
||||
|
||||
Version: v2.6.1
|
||||
Git Ref: 6d920590
|
||||
Build Date: 2022-02-18T06:14:21Z
|
||||
Go Version: go1.17.6 (linux/amd64)
|
||||
Development: false
|
||||
```
|
||||
|
||||
## 安装 TDengine Connector 插件
|
||||
|
||||
### 从源码安装
|
||||
### 编译插件
|
||||
|
||||
```
|
||||
```shell
|
||||
git clone --branch 3.0 https://github.com/taosdata/kafka-connect-tdengine.git
|
||||
cd kafka-connect-tdengine
|
||||
mvn clean package
|
||||
unzip -d $CONFLUENT_HOME/share/java/ target/components/packages/taosdata-kafka-connect-tdengine-*.zip
|
||||
mvn clean package -Dmaven.test.skip=true
|
||||
unzip -d $KAFKA_HOME/components/ target/components/packages/taosdata-kafka-connect-tdengine-*.zip
|
||||
```
|
||||
|
||||
以上脚本先 clone 项目源码,然后用 Maven 编译打包。打包完成后在 `target/components/packages/` 目录生成了插件的 zip 包。把这个 zip 包解压到安装插件的路径即可。上面的示例中使用了内置的插件安装路径: `$CONFLUENT_HOME/share/java/`。
|
||||
以上脚本先 clone 项目源码,然后用 Maven 编译打包。打包完成后在 `target/components/packages/` 目录生成了插件的 zip 包。把这个 zip 包解压到安装插件的路径即可。上面的示例中使用了内置的插件安装路径: `$KAFKA_HOME/components/`。
|
||||
|
||||
### 用 confluent-hub 安装
|
||||
### 配置插件
|
||||
|
||||
[Confluent Hub](https://www.confluent.io/hub) 提供下载 Kafka Connect 插件的服务。在 TDengine Kafka Connector 发布到 Confluent Hub 后可以使用命令工具 `confluent-hub` 安装。
|
||||
**TDengine Kafka Connector 目前没有正式发布,不能用这种方式安装**。
|
||||
将 kafka-connect-tdengine 插件加入 `$KAFKA_HOME/config/connect-distributed.properties` 配置文件 plugin.path 中
|
||||
|
||||
## 启动 Confluent
|
||||
|
||||
```
|
||||
confluent local services start
|
||||
```properties
|
||||
plugin.path=/usr/share/java,/opt/kafka/components
|
||||
```
|
||||
|
||||
:::note
|
||||
一定要先安装插件再启动 Confluent, 否则加载插件会失败。
|
||||
:::
|
||||
## 启动 Kafka
|
||||
|
||||
:::tip
|
||||
若某组件启动失败,可尝试清空数据,重新启动。数据目录在启动时将被打印到控制台,比如 :
|
||||
```shell
|
||||
zookeeper-server-start.sh -daemon $KAFKA_HOME/config/zookeeper.properties
|
||||
|
||||
```title="控制台输出日志" {1}
|
||||
Using CONFLUENT_CURRENT: /tmp/confluent.106668
|
||||
Starting ZooKeeper
|
||||
ZooKeeper is [UP]
|
||||
Starting Kafka
|
||||
Kafka is [UP]
|
||||
Starting Schema Registry
|
||||
Schema Registry is [UP]
|
||||
Starting Kafka REST
|
||||
Kafka REST is [UP]
|
||||
Starting Connect
|
||||
Connect is [UP]
|
||||
Starting ksqlDB Server
|
||||
ksqlDB Server is [UP]
|
||||
Starting Control Center
|
||||
Control Center is [UP]
|
||||
kafka-server-start.sh -daemon $KAFKA_HOME/config/server.properties
|
||||
|
||||
connect-distributed.sh -daemon $KAFKA_HOME/config/connect-distributed.properties
|
||||
```
|
||||
|
||||
清空数据可执行 `rm -rf /tmp/confluent.106668`。
|
||||
:::
|
||||
|
||||
### 验证各个组件是否启动成功
|
||||
### 验证 kafka Connect 是否启动成功
|
||||
|
||||
输入命令:
|
||||
|
||||
```
|
||||
confluent local services status
|
||||
```shell
|
||||
curl http://localhost:8083/connectors
|
||||
```
|
||||
|
||||
如果各组件都启动成功,会得到如下输出:
|
||||
|
||||
```txt
|
||||
[]
|
||||
```
|
||||
Connect is [UP]
|
||||
Control Center is [UP]
|
||||
Kafka is [UP]
|
||||
Kafka REST is [UP]
|
||||
ksqlDB Server is [UP]
|
||||
Schema Registry is [UP]
|
||||
ZooKeeper is [UP]
|
||||
```
|
||||
|
||||
### 验证插件是否安装成功
|
||||
|
||||
在 Kafka Connect 组件完全启动后,可用以下命令列出成功加载的插件:
|
||||
|
||||
```
|
||||
confluent local services connect plugin list
|
||||
```
|
||||
|
||||
如果成功安装,会输出如下:
|
||||
|
||||
```txt {4,9}
|
||||
Available Connect Plugins:
|
||||
[
|
||||
{
|
||||
"class": "com.taosdata.kafka.connect.sink.TDengineSinkConnector",
|
||||
"type": "sink",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
{
|
||||
"class": "com.taosdata.kafka.connect.source.TDengineSourceConnector",
|
||||
"type": "source",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
......
|
||||
```
|
||||
|
||||
如果插件安装失败,请检查 Kafka Connect 的启动日志是否有异常信息,用以下命令输出日志路径:
|
||||
```
|
||||
echo `cat /tmp/confluent.current`/connect/connect.stdout
|
||||
```
|
||||
该命令的输出类似: `/tmp/confluent.104086/connect/connect.stdout`。
|
||||
|
||||
与日志文件 `connect.stdout` 同一目录,还有一个文件名为: `connect.properties`。在这个文件的末尾,可以看到最终生效的 `plugin.path`, 它是一系列用逗号分割的路径。如果插件安装失败,很可能是因为实际的安装路径不包含在 `plugin.path` 中。
|
||||
|
||||
|
||||
## TDengine Sink Connector 的使用
|
||||
|
||||
|
@ -188,40 +97,47 @@ TDengine Sink Connector 内部使用 TDengine [无模式写入接口](../../conn
|
|||
|
||||
下面的示例将主题 meters 的数据,同步到目标数据库 power。数据格式为 InfluxDB Line 协议格式。
|
||||
|
||||
### 添加配置文件
|
||||
### 添加 Sink Connector 配置文件
|
||||
|
||||
```
|
||||
```shell
|
||||
mkdir ~/test
|
||||
cd ~/test
|
||||
vi sink-demo.properties
|
||||
vi sink-demo.json
|
||||
```
|
||||
|
||||
sink-demo.properties 内容如下:
|
||||
sink-demo.json 内容如下:
|
||||
|
||||
```ini title="sink-demo.properties"
|
||||
name=TDengineSinkConnector
|
||||
connector.class=com.taosdata.kafka.connect.sink.TDengineSinkConnector
|
||||
tasks.max=1
|
||||
topics=meters
|
||||
connection.url=jdbc:TAOS://127.0.0.1:6030
|
||||
connection.user=root
|
||||
connection.password=taosdata
|
||||
connection.database=power
|
||||
db.schemaless=line
|
||||
data.precision=ns
|
||||
key.converter=org.apache.kafka.connect.storage.StringConverter
|
||||
value.converter=org.apache.kafka.connect.storage.StringConverter
|
||||
```json title="sink-demo.json"
|
||||
{
|
||||
"name": "TDengineSinkConnector",
|
||||
"config": {
|
||||
"connector.class":"com.taosdata.kafka.connect.sink.TDengineSinkConnector",
|
||||
"tasks.max": "1",
|
||||
"topics": "meters",
|
||||
"connection.url": "jdbc:TAOS://127.0.0.1:6030",
|
||||
"connection.user": "root",
|
||||
"connection.password": "taosdata",
|
||||
"connection.database": "power",
|
||||
"db.schemaless": "line",
|
||||
"data.precision": "ns",
|
||||
"key.converter": "org.apache.kafka.connect.storage.StringConverter",
|
||||
"value.converter": "org.apache.kafka.connect.storage.StringConverter",
|
||||
"errors.tolerance": "all",
|
||||
"errors.deadletterqueue.topic.name": "dead_letter_topic",
|
||||
"errors.deadletterqueue.topic.replication.factor": 1
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
关键配置说明:
|
||||
|
||||
1. `topics=meters` 和 `connection.database=power`, 表示订阅主题 meters 的数据,并写入数据库 power。
|
||||
2. `db.schemaless=line`, 表示使用 InfluxDB Line 协议格式的数据。
|
||||
1. `"topics": "meters"` 和 `"connection.database": "power"`, 表示订阅主题 meters 的数据,并写入数据库 power。
|
||||
2. `"db.schemaless": "line"`, 表示使用 InfluxDB Line 协议格式的数据。
|
||||
|
||||
### 创建 Connector 实例
|
||||
### 创建 Sink Connector 实例
|
||||
|
||||
```
|
||||
confluent local services connect connector load TDengineSinkConnector --config ./sink-demo.properties
|
||||
```shell
|
||||
curl -X POST -d @sink-demo.json http://localhost:8083/connectors -H "Content-Type: application/json"
|
||||
```
|
||||
|
||||
若以上命令执行成功,则有如下输出:
|
||||
|
@ -241,7 +157,10 @@ confluent local services connect connector load TDengineSinkConnector --config .
|
|||
"tasks.max": "1",
|
||||
"topics": "meters",
|
||||
"value.converter": "org.apache.kafka.connect.storage.StringConverter",
|
||||
"name": "TDengineSinkConnector"
|
||||
"name": "TDengineSinkConnector",
|
||||
"errors.tolerance": "all",
|
||||
"errors.deadletterqueue.topic.name": "dead_letter_topic",
|
||||
"errors.deadletterqueue.topic.replication.factor": "1",
|
||||
},
|
||||
"tasks": [],
|
||||
"type": "sink"
|
||||
|
@ -261,8 +180,8 @@ meters,location=California.LosAngeles,groupid=3 current=11.3,voltage=221,phase=0
|
|||
|
||||
使用 kafka-console-producer 向主题 meters 添加测试数据。
|
||||
|
||||
```
|
||||
cat test-data.txt | kafka-console-producer --broker-list localhost:9092 --topic meters
|
||||
```shell
|
||||
cat test-data.txt | kafka-console-producer.sh --broker-list localhost:9092 --topic meters
|
||||
```
|
||||
|
||||
:::note
|
||||
|
@ -273,12 +192,12 @@ cat test-data.txt | kafka-console-producer --broker-list localhost:9092 --topic
|
|||
|
||||
使用 TDengine CLI 验证同步是否成功。
|
||||
|
||||
```
|
||||
```sql
|
||||
taos> use power;
|
||||
Database changed.
|
||||
|
||||
taos> select * from meters;
|
||||
ts | current | voltage | phase | groupid | location |
|
||||
_ts | current | voltage | phase | groupid | location |
|
||||
===============================================================================================================================================================
|
||||
2022-03-28 09:56:51.249000000 | 11.800000000 | 221.000000000 | 0.280000000 | 2 | California.LosAngeles |
|
||||
2022-03-28 09:56:51.250000000 | 13.400000000 | 223.000000000 | 0.290000000 | 2 | California.LosAngeles |
|
||||
|
@ -295,31 +214,39 @@ TDengine Source Connector 的作用是将 TDengine 某个数据库某一时刻
|
|||
|
||||
TDengine Source Connector 会将 TDengine 数据表中的数据转换成 [InfluxDB Line 协议格式](/develop/insert-data/influxdb-line/) 或 [OpenTSDB JSON 协议格式](/develop/insert-data/opentsdb-json), 然后写入 Kafka。
|
||||
|
||||
下面的示例程序同步数据库 test 中的数据到主题 tdengine-source-test。
|
||||
下面的示例程序同步数据库 test 中的数据到主题 tdengine-test-meters。
|
||||
|
||||
### 添加配置文件
|
||||
### 添加 Source Connector 配置文件
|
||||
|
||||
```
|
||||
vi source-demo.properties
|
||||
```shell
|
||||
vi source-demo.json
|
||||
```
|
||||
|
||||
输入以下内容:
|
||||
|
||||
```ini title="source-demo.properties"
|
||||
name=TDengineSourceConnector
|
||||
connector.class=com.taosdata.kafka.connect.source.TDengineSourceConnector
|
||||
tasks.max=1
|
||||
connection.url=jdbc:TAOS://127.0.0.1:6030
|
||||
connection.username=root
|
||||
connection.password=taosdata
|
||||
connection.database=test
|
||||
connection.attempts=3
|
||||
connection.backoff.ms=5000
|
||||
topic.prefix=tdengine-source-
|
||||
poll.interval.ms=1000
|
||||
fetch.max.rows=100
|
||||
key.converter=org.apache.kafka.connect.storage.StringConverter
|
||||
value.converter=org.apache.kafka.connect.storage.StringConverter
|
||||
```json title="source-demo.json"
|
||||
{
|
||||
"name":"TDengineSourceConnector",
|
||||
"config":{
|
||||
"connector.class": "com.taosdata.kafka.connect.source.TDengineSourceConnector",
|
||||
"tasks.max": 1,
|
||||
"connection.url": "jdbc:TAOS://127.0.0.1:6030",
|
||||
"connection.username": "root",
|
||||
"connection.password": "taosdata",
|
||||
"connection.database": "test",
|
||||
"connection.attempts": 3,
|
||||
"connection.backoff.ms": 5000,
|
||||
"topic.prefix": "tdengine",
|
||||
"topic.delimiter": "-",
|
||||
"poll.interval.ms": 1000,
|
||||
"fetch.max.rows": 100,
|
||||
"topic.per.stable": true,
|
||||
"topic.ignore.db": false,
|
||||
"out.format": "line",
|
||||
"key.converter": "org.apache.kafka.connect.storage.StringConverter",
|
||||
"value.converter": "org.apache.kafka.connect.storage.StringConverter"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 准备测试数据
|
||||
|
@ -344,27 +271,27 @@ INSERT INTO d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES('2018-1
|
|||
|
||||
使用 TDengine CLI, 执行 SQL 文件。
|
||||
|
||||
```
|
||||
```shell
|
||||
taos -f prepare-source-data.sql
|
||||
```
|
||||
|
||||
### 创建 Connector 实例
|
||||
### 创建 Source Connector 实例
|
||||
|
||||
```
|
||||
confluent local services connect connector load TDengineSourceConnector --config source-demo.properties
|
||||
```shell
|
||||
curl -X POST -d @source-demo.json http://localhost:8083/connectors -H "Content-Type: application/json"
|
||||
```
|
||||
|
||||
### 查看 topic 数据
|
||||
|
||||
使用 kafka-console-consumer 命令行工具监控主题 tdengine-source-test 中的数据。一开始会输出所有历史数据, 往 TDengine 插入两条新的数据之后,kafka-console-consumer 也立即输出了新增的两条数据。 输出数据 InfluxDB line protocol 的格式。
|
||||
使用 kafka-console-consumer 命令行工具监控主题 tdengine-test-meters 中的数据。一开始会输出所有历史数据, 往 TDengine 插入两条新的数据之后,kafka-console-consumer 也立即输出了新增的两条数据。 输出数据 InfluxDB line protocol 的格式。
|
||||
|
||||
```
|
||||
kafka-console-consumer --bootstrap-server localhost:9092 --from-beginning --topic tdengine-source-test
|
||||
```shell
|
||||
kafka-console-consumer.sh --bootstrap-server localhost:9092 --from-beginning --topic tdengine-test-meters
|
||||
```
|
||||
|
||||
输出:
|
||||
|
||||
```
|
||||
```txt
|
||||
......
|
||||
meters,location="California.SanFrancisco",groupid=2i32 current=10.3f32,voltage=219i32,phase=0.31f32 1538548685000000000
|
||||
meters,location="California.SanFrancisco",groupid=2i32 current=12.6f32,voltage=218i32,phase=0.33f32 1538548695000000000
|
||||
|
@ -373,7 +300,7 @@ meters,location="California.SanFrancisco",groupid=2i32 current=12.6f32,voltage=2
|
|||
|
||||
此时会显示所有历史数据。切换到 TDengine CLI, 插入两条新的数据:
|
||||
|
||||
```
|
||||
```sql
|
||||
USE test;
|
||||
INSERT INTO d1001 VALUES (now, 13.3, 229, 0.38);
|
||||
INSERT INTO d1002 VALUES (now, 16.3, 233, 0.22);
|
||||
|
@ -387,15 +314,15 @@ INSERT INTO d1002 VALUES (now, 16.3, 233, 0.22);
|
|||
|
||||
查看当前活跃的 connector:
|
||||
|
||||
```
|
||||
confluent local services connect connector status
|
||||
```shell
|
||||
curl http://localhost:8083/connectors
|
||||
```
|
||||
|
||||
如果按照前述操作,此时应有两个活跃的 connector。使用下面的命令 unload:
|
||||
|
||||
```
|
||||
confluent local services connect connector unload TDengineSinkConnector
|
||||
confluent local services connect connector unload TDengineSourceConnector
|
||||
```shell
|
||||
curl -X DELETE http://localhost:8083/connectors/TDengineSinkConnector
|
||||
curl -X DELETE http://localhost:8083/connectors/TDengineSourceConnector
|
||||
```
|
||||
|
||||
## 配置参考
|
||||
|
@ -437,20 +364,20 @@ confluent local services connect connector unload TDengineSourceConnector
|
|||
3. `timestamp.initial`: 数据同步起始时间。格式为'yyyy-MM-dd HH:mm:ss',若未指定则从指定 DB 中最早的一条记录开始。
|
||||
4. `poll.interval.ms`: 检查是否有新建或删除的表的时间间隔,单位为 ms。默认为 1000。
|
||||
5. `fetch.max.rows` : 检索数据库时最大检索条数。 默认为 100。
|
||||
6. `query.interval.ms`: 从 TDengine 一次读取数据的时间跨度,需要根据表中的数据特征合理配置,避免一次查询的数据量过大或过小;在具体的环境中建议通过测试设置一个较优值,默认值为 1000.
|
||||
7. `topic.per.stable`: 如果设置为true,表示一个超级表对应一个 Kafka topic,topic的命名规则 `<topic.prefix>-<connection.database>-<stable.name>`;如果设置为 false,则指定的 DB 中的所有数据进入一个 Kafka topic,topic 的命名规则为 `<topic.prefix>-<connection.database>`
|
||||
6. `query.interval.ms`: 从 TDengine 一次读取数据的时间跨度,需要根据表中的数据特征合理配置,避免一次查询的数据量过大或过小;在具体的环境中建议通过测试设置一个较优值,默认值为 0,即获取到当前最新时间的所有数据。
|
||||
7. `out.format` : 结果集输出格式。`line` 表示输出格式为 InfluxDB Line 协议格式,`json` 表示输出格式是 json。默认为 line。
|
||||
8. `topic.per.stable`: 如果设置为 true,表示一个超级表对应一个 Kafka topic,topic的命名规则 `<topic.prefix><topic.delimiter><connection.database><topic.delimiter><stable.name>`;如果设置为 false,则指定的 DB 中的所有数据进入一个 Kafka topic,topic 的命名规则为 `<topic.prefix><topic.delimiter><connection.database>`
|
||||
9. `topic.ignore.db`: topic 命名规则是否包含 database 名称,true 表示规则为 `<topic.prefix><topic.delimiter><stable.name>`,false 表示规则为 `<topic.prefix><topic.delimiter><connection.database><topic.delimiter><stable.name>`,默认 false。此配置项在 `topic.per.stable` 设置为 false 时不生效。
|
||||
10. `topic.delimiter`: topic 名称分割符,默认为 `-`。
|
||||
|
||||
## 其他说明
|
||||
|
||||
1. 插件的安装位置可以自定义,请参考官方文档:https://docs.confluent.io/home/connect/self-managed/install.html#install-connector-manually。
|
||||
2. 本教程的示例程序使用了 Confluent 平台,但是 TDengine Kafka Connector 本身同样适用于独立安装的 Kafka, 且配置方法相同。关于如何在独立安装的 Kafka 环境使用 Kafka Connect 插件, 请参考官方文档: https://kafka.apache.org/documentation/#connect。
|
||||
1. 关于如何在独立安装的 Kafka 环境使用 Kafka Connect 插件, 请参考官方文档:<https://kafka.apache.org/documentation/#connect>。
|
||||
|
||||
## 问题反馈
|
||||
|
||||
无论遇到任何问题,都欢迎在本项目的 Github 仓库反馈: https://github.com/taosdata/kafka-connect-tdengine/issues。
|
||||
无论遇到任何问题,都欢迎在本项目的 Github 仓库反馈:<https://github.com/taosdata/kafka-connect-tdengine/issues>。
|
||||
|
||||
## 参考
|
||||
|
||||
1. https://www.confluent.io/what-is-apache-kafka
|
||||
2. https://developer.confluent.io/learn-kafka/kafka-connect/intro
|
||||
3. https://docs.confluent.io/platform/current/platform.html
|
||||
1. <https://kafka.apache.org/documentation/>
|
||||
|
|
|
@ -10,6 +10,10 @@ TDengine 2.x 各版本安装包请访问[这里](https://www.taosdata.com/all-do
|
|||
|
||||
import Release from "/components/ReleaseV3";
|
||||
|
||||
## 3.0.5.0
|
||||
|
||||
<Release type="tdengine" version="3.0.5.0" />
|
||||
|
||||
## 3.0.4.2
|
||||
|
||||
<Release type="tdengine" version="3.0.4.2" />
|
||||
|
|
|
@ -10,6 +10,10 @@ taosTools 各版本安装包下载链接如下:
|
|||
|
||||
import Release from "/components/ReleaseV3";
|
||||
|
||||
## 2.5.1
|
||||
|
||||
<Release type="tools" version="2.5.1" />
|
||||
|
||||
## 2.5.0
|
||||
|
||||
<Release type="tools" version="2.5.0" />
|
||||
|
|
|
@ -51,27 +51,27 @@ public class JdbcDemo {
|
|||
|
||||
private void createDatabase() {
|
||||
String sql = "create database if not exists " + dbName;
|
||||
exuete(sql);
|
||||
execute(sql);
|
||||
}
|
||||
|
||||
private void useDatabase() {
|
||||
String sql = "use " + dbName;
|
||||
exuete(sql);
|
||||
execute(sql);
|
||||
}
|
||||
|
||||
private void dropTable() {
|
||||
final String sql = "drop table if exists " + dbName + "." + tbName + "";
|
||||
exuete(sql);
|
||||
execute(sql);
|
||||
}
|
||||
|
||||
private void createTable() {
|
||||
final String sql = "create table if not exists " + dbName + "." + tbName + " (ts timestamp, temperature float, humidity int)";
|
||||
exuete(sql);
|
||||
execute(sql);
|
||||
}
|
||||
|
||||
private void insert() {
|
||||
final String sql = "insert into " + dbName + "." + tbName + " (ts, temperature, humidity) values(now, 20.5, 34)";
|
||||
exuete(sql);
|
||||
execute(sql);
|
||||
}
|
||||
|
||||
private void select() {
|
||||
|
@ -120,7 +120,7 @@ public class JdbcDemo {
|
|||
System.out.println("[ " + (succeed ? "OK" : "ERROR!") + " ] time cost: " + cost + " ms, execute statement ====> " + sql);
|
||||
}
|
||||
|
||||
private void exuete(String sql) {
|
||||
private void execute(String sql) {
|
||||
long start = System.currentTimeMillis();
|
||||
try (Statement statement = connection.createStatement()) {
|
||||
boolean execute = statement.execute(sql);
|
||||
|
|
|
@ -0,0 +1,70 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<groupId>com.taosdata</groupId>
|
||||
<artifactId>consumer</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
|
||||
<properties>
|
||||
<maven.compiler.source>8</maven.compiler.source>
|
||||
<maven.compiler.target>8</maven.compiler.target>
|
||||
</properties>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>3.2.1</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.google.guava</groupId>
|
||||
<artifactId>guava</artifactId>
|
||||
<version>32.0.0-jre</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-assembly-plugin</artifactId>
|
||||
<version>3.3.0</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>ConsumerDemo</id>
|
||||
<configuration>
|
||||
<finalName>ConsumerDemo</finalName>
|
||||
<archive>
|
||||
<manifest>
|
||||
<mainClass>com.taosdata.ConsumerDemo</mainClass>
|
||||
</manifest>
|
||||
</archive>
|
||||
<descriptorRefs>
|
||||
<descriptorRef>jar-with-dependencies</descriptorRef>
|
||||
</descriptorRefs>
|
||||
</configuration>
|
||||
<phase>package</phase>
|
||||
<goals>
|
||||
<goal>single</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-compiler-plugin</artifactId>
|
||||
<configuration>
|
||||
<source>8</source>
|
||||
<target>8</target>
|
||||
<encoding>UTF-8</encoding>
|
||||
</configuration>
|
||||
</plugin>
|
||||
|
||||
</plugins>
|
||||
</build>
|
||||
|
||||
</project>
|
|
@ -0,0 +1,52 @@
|
|||
# How to Run the Consumer Demo Code On Linux OS
|
||||
TDengine's Consumer demo project is organized in a Maven way so that users can easily compile, package and run the project. If you don't have Maven on your server, you may install it using
|
||||
```
|
||||
sudo apt-get install maven
|
||||
```
|
||||
|
||||
## Install TDengine Client and TaosAdapter
|
||||
Make sure you have already installed a tdengine client on your current develop environment.
|
||||
Download the tdengine package on our website: ``https://www.taosdata.com/cn/all-downloads/`` and install the client.
|
||||
|
||||
## Run Consumer Demo using mvn plugin
|
||||
run command:
|
||||
```
|
||||
mvn clean compile exec:java -Dexec.mainClass="com.taosdata.ConsumerDemo"
|
||||
```
|
||||
|
||||
## Custom configuration
|
||||
```shell
|
||||
# the host of TDengine server
|
||||
export TAOS_HOST="127.0.0.1"
|
||||
|
||||
# the port of TDengine server
|
||||
export TAOS_PORT="6041"
|
||||
|
||||
# the consumer type, can be "ws" or "jni"
|
||||
export TAOS_TYPE="ws"
|
||||
|
||||
# the number of consumers
|
||||
export TAOS_JDBC_CONSUMER_NUM="1"
|
||||
|
||||
# the number of processors to consume
|
||||
export TAOS_JDBC_PROCESSOR_NUM="2"
|
||||
|
||||
# the number of records to be consumed per processor per second
|
||||
export TAOS_JDBC_RATE_PER_PROCESSOR="1000"
|
||||
|
||||
# poll wait time in ms
|
||||
export TAOS_JDBC_POLL_SLEEP="100"
|
||||
```
|
||||
|
||||
## Run Consumer Demo using jar
|
||||
|
||||
To compile the demo project, go to the source directory ``TDengine/tests/examples/JDBC/consumer-demo`` and execute
|
||||
```
|
||||
mvn clean package assembly:single
|
||||
```
|
||||
|
||||
To run ConsumerDemo.jar, go to ``TDengine/tests/examples/JDBC/consumer-demo`` and execute
|
||||
```
|
||||
java -jar target/ConsumerDemo-jar-with-dependencies.jar
|
||||
```
|
||||
|
|
@ -0,0 +1,43 @@
|
|||
package com.taosdata;
|
||||
|
||||
import java.sql.Timestamp;
|
||||
|
||||
public class Bean {
|
||||
private Timestamp ts;
|
||||
private Integer c1;
|
||||
private String c2;
|
||||
|
||||
public Timestamp getTs() {
|
||||
return ts;
|
||||
}
|
||||
|
||||
public void setTs(Timestamp ts) {
|
||||
this.ts = ts;
|
||||
}
|
||||
|
||||
public Integer getC1() {
|
||||
return c1;
|
||||
}
|
||||
|
||||
public void setC1(Integer c1) {
|
||||
this.c1 = c1;
|
||||
}
|
||||
|
||||
public String getC2() {
|
||||
return c2;
|
||||
}
|
||||
|
||||
public void setC2(String c2) {
|
||||
this.c2 = c2;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
final StringBuilder sb = new StringBuilder("Bean {");
|
||||
sb.append("ts=").append(ts);
|
||||
sb.append(", c1=").append(c1);
|
||||
sb.append(", c2='").append(c2).append('\'');
|
||||
sb.append('}');
|
||||
return sb.toString();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,6 @@
|
|||
package com.taosdata;
|
||||
|
||||
import com.taosdata.jdbc.tmq.ReferenceDeserializer;
|
||||
|
||||
public class BeanDeserializer extends ReferenceDeserializer<Bean> {
|
||||
}
|
|
@ -0,0 +1,78 @@
|
|||
package com.taosdata;
|
||||
|
||||
public class Config {
|
||||
public static final String TOPIC = "test_consumer";
|
||||
public static final String TAOS_HOST = "127.0.0.1";
|
||||
public static final String TAOS_PORT = "6041";
|
||||
public static final String TAOS_TYPE = "ws";
|
||||
public static final int TAOS_JDBC_CONSUMER_NUM = 1;
|
||||
public static final int TAOS_JDBC_PROCESSOR_NUM = 2;
|
||||
public static final int TAOS_JDBC_RATE_PER_PROCESSOR = 1000;
|
||||
public static final int TAOS_JDBC_POLL_SLEEP = 100;
|
||||
|
||||
private final int consumerNum;
|
||||
private final int processCapacity;
|
||||
private final int rate;
|
||||
private final int pollSleep;
|
||||
private final String type;
|
||||
private final String host;
|
||||
private final String port;
|
||||
|
||||
public Config(String type, String host, String port, int consumerNum, int processCapacity, int rate, int pollSleep) {
|
||||
this.type = type;
|
||||
this.consumerNum = consumerNum;
|
||||
this.processCapacity = processCapacity;
|
||||
this.rate = rate;
|
||||
this.pollSleep = pollSleep;
|
||||
this.host = host;
|
||||
this.port = port;
|
||||
}
|
||||
|
||||
public int getConsumerNum() {
|
||||
return consumerNum;
|
||||
}
|
||||
|
||||
public int getProcessCapacity() {
|
||||
return processCapacity;
|
||||
}
|
||||
|
||||
public int getRate() {
|
||||
return rate;
|
||||
}
|
||||
|
||||
public int getPollSleep() {
|
||||
return pollSleep;
|
||||
}
|
||||
|
||||
public String getHost() {
|
||||
return host;
|
||||
}
|
||||
|
||||
public String getPort() {
|
||||
return port;
|
||||
}
|
||||
|
||||
public String getType() {
|
||||
return type;
|
||||
}
|
||||
|
||||
public static Config getFromENV() {
|
||||
String host = System.getenv("TAOS_HOST") != null ? System.getenv("TAOS_HOST") : TAOS_HOST;
|
||||
String port = System.getenv("TAOS_PORT") != null ? System.getenv("TAOS_PORT") : TAOS_PORT;
|
||||
String type = System.getenv("TAOS_TYPE") != null ? System.getenv("TAOS_TYPE") : TAOS_TYPE;
|
||||
|
||||
String c = System.getenv("TAOS_JDBC_CONSUMER_NUM");
|
||||
int num = c != null ? Integer.parseInt(c) : TAOS_JDBC_CONSUMER_NUM;
|
||||
|
||||
String p = System.getenv("TAOS_JDBC_PROCESSOR_NUM");
|
||||
int capacity = p != null ? Integer.parseInt(p) : TAOS_JDBC_PROCESSOR_NUM;
|
||||
|
||||
String r = System.getenv("TAOS_JDBC_RATE_PER_PROCESSOR");
|
||||
int rate = r != null ? Integer.parseInt(r) : TAOS_JDBC_RATE_PER_PROCESSOR;
|
||||
|
||||
String s = System.getenv("TAOS_JDBC_POLL_SLEEP");
|
||||
int sleep = s != null ? Integer.parseInt(s) : TAOS_JDBC_POLL_SLEEP;
|
||||
|
||||
return new Config(type, host, port, num, capacity, rate, sleep);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,65 @@
|
|||
package com.taosdata;
|
||||
|
||||
import com.taosdata.jdbc.tmq.TMQConstants;
|
||||
|
||||
import java.sql.Connection;
|
||||
import java.sql.DriverManager;
|
||||
import java.sql.SQLException;
|
||||
import java.sql.Statement;
|
||||
import java.util.Properties;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.ScheduledExecutorService;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import static com.taosdata.Config.*;
|
||||
|
||||
public class ConsumerDemo {
|
||||
public static void main(String[] args) throws SQLException {
|
||||
// Config
|
||||
Config config = Config.getFromENV();
|
||||
// Generated data
|
||||
mockData();
|
||||
|
||||
Properties prop = new Properties();
|
||||
prop.setProperty(TMQConstants.CONNECT_TYPE, config.getType());
|
||||
prop.setProperty(TMQConstants.BOOTSTRAP_SERVERS, config.getHost() + ":" + config.getPort());
|
||||
prop.setProperty(TMQConstants.CONNECT_USER, "root");
|
||||
prop.setProperty(TMQConstants.CONNECT_PASS, "taosdata");
|
||||
prop.setProperty(TMQConstants.MSG_WITH_TABLE_NAME, "true");
|
||||
prop.setProperty(TMQConstants.ENABLE_AUTO_COMMIT, "true");
|
||||
prop.setProperty(TMQConstants.GROUP_ID, "gId");
|
||||
prop.setProperty(TMQConstants.VALUE_DESERIALIZER, "com.taosdata.BeanDeserializer");
|
||||
for (int i = 0; i < config.getConsumerNum() - 1; i++) {
|
||||
new Thread(new Worker(prop, config)).start();
|
||||
}
|
||||
new Worker(prop, config).run();
|
||||
}
|
||||
|
||||
public static void mockData() throws SQLException {
|
||||
String dbName = "test_consumer";
|
||||
String tableName = "st";
|
||||
String url = "jdbc:TAOS-RS://" + TAOS_HOST + ":" + TAOS_PORT + "/?user=root&password=taosdata&batchfetch=true";
|
||||
Connection connection = DriverManager.getConnection(url);
|
||||
Statement statement = connection.createStatement();
|
||||
statement.executeUpdate("create database if not exists " + dbName + " WAL_RETENTION_PERIOD 3650");
|
||||
statement.executeUpdate("use " + dbName);
|
||||
statement.executeUpdate("create table if not exists " + tableName + " (ts timestamp, c1 int, c2 nchar(100)) ");
|
||||
statement.executeUpdate("create topic if not exists " + TOPIC + " as select ts, c1, c2 from " + tableName);
|
||||
|
||||
ScheduledExecutorService scheduledExecutorService = Executors.newSingleThreadScheduledExecutor(r -> {
|
||||
Thread t = new Thread(r);
|
||||
t.setName("mock-data-thread-" + t.getId());
|
||||
return t;
|
||||
});
|
||||
AtomicInteger atomic = new AtomicInteger();
|
||||
scheduledExecutorService.scheduleWithFixedDelay(() -> {
|
||||
int i = atomic.getAndIncrement();
|
||||
try {
|
||||
statement.executeUpdate("insert into " + tableName + " values(now, " + i + ",'" + i + "')");
|
||||
} catch (SQLException e) {
|
||||
// ignore
|
||||
}
|
||||
}, 0, 10, TimeUnit.MILLISECONDS);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,60 @@
|
|||
package com.taosdata;
|
||||
|
||||
import com.google.common.util.concurrent.RateLimiter;
|
||||
import com.taosdata.jdbc.tmq.ConsumerRecord;
|
||||
import com.taosdata.jdbc.tmq.ConsumerRecords;
|
||||
import com.taosdata.jdbc.tmq.TaosConsumer;
|
||||
|
||||
import java.sql.SQLException;
|
||||
import java.time.Duration;
|
||||
import java.time.LocalDateTime;
|
||||
import java.util.Collections;
|
||||
import java.util.Properties;
|
||||
import java.util.concurrent.ForkJoinPool;
|
||||
import java.util.concurrent.Semaphore;
|
||||
|
||||
public class Worker implements Runnable {
|
||||
|
||||
int sleepTime;
|
||||
int rate;
|
||||
|
||||
ForkJoinPool pool = new ForkJoinPool();
|
||||
Semaphore semaphore;
|
||||
|
||||
TaosConsumer<Bean> consumer;
|
||||
|
||||
public Worker(Properties prop, Config config) throws SQLException {
|
||||
consumer = new TaosConsumer<>(prop);
|
||||
consumer.subscribe(Collections.singletonList(Config.TOPIC));
|
||||
semaphore = new Semaphore(config.getProcessCapacity());
|
||||
sleepTime = config.getPollSleep();
|
||||
rate = config.getRate();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
while (!Thread.interrupted()) {
|
||||
try {
|
||||
// 控制请求频率
|
||||
if (semaphore.tryAcquire()) {
|
||||
ConsumerRecords<Bean> records = consumer.poll(Duration.ofMillis(sleepTime));
|
||||
pool.submit(() -> {
|
||||
RateLimiter limiter = RateLimiter.create(rate);
|
||||
try {
|
||||
for (ConsumerRecord<Bean> record : records) {
|
||||
// 流量控制
|
||||
limiter.acquire();
|
||||
// 业务处理数据
|
||||
System.out.println("[" + LocalDateTime.now() + "] Thread id:" + Thread.currentThread().getId() + " -> " + record.value());
|
||||
}
|
||||
} finally {
|
||||
semaphore.release();
|
||||
}
|
||||
});
|
||||
}
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -5,7 +5,7 @@
|
|||
#spring.datasource.password=taosdata
|
||||
# datasource config - JDBC-RESTful
|
||||
spring.datasource.driver-class-name=com.taosdata.jdbc.rs.RestfulDriver
|
||||
spring.datasource.url=jdbc:TAOS-RS://localhost:6041/test?timezone=UTC-8&charset=UTF-8&locale=en_US.UTF-8
|
||||
spring.datasource.url=jdbc:TAOS-RS://localhost:6041/test
|
||||
spring.datasource.username=root
|
||||
spring.datasource.password=taosdata
|
||||
spring.datasource.druid.initial-size=5
|
||||
|
|
|
@ -42,27 +42,27 @@ IF (TD_LINUX)
|
|||
)
|
||||
|
||||
target_link_libraries(tmq
|
||||
taos_static
|
||||
taos
|
||||
)
|
||||
|
||||
target_link_libraries(stream_demo
|
||||
taos_static
|
||||
taos
|
||||
)
|
||||
|
||||
target_link_libraries(schemaless
|
||||
taos_static
|
||||
taos
|
||||
)
|
||||
|
||||
target_link_libraries(prepare
|
||||
taos_static
|
||||
taos
|
||||
)
|
||||
|
||||
target_link_libraries(demo
|
||||
taos_static
|
||||
taos
|
||||
)
|
||||
|
||||
target_link_libraries(asyncdemo
|
||||
taos_static
|
||||
taos
|
||||
)
|
||||
|
||||
SET_TARGET_PROPERTIES(tmq PROPERTIES OUTPUT_NAME tmq)
|
||||
|
|
|
@ -231,6 +231,7 @@ typedef struct SColumnInfoData {
|
|||
};
|
||||
SColumnInfo info; // column info
|
||||
bool hasNull; // if current column data has null value.
|
||||
bool reassigned; // if current column data is reassigned.
|
||||
} SColumnInfoData;
|
||||
|
||||
typedef struct SQueryTableDataCond {
|
||||
|
|
|
@ -178,6 +178,7 @@ int32_t getJsonValueLen(const char* data);
|
|||
|
||||
int32_t colDataSetVal(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, bool isNull);
|
||||
int32_t colDataAppend(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, bool isNull);
|
||||
int32_t colDataReassignVal(SColumnInfoData* pColumnInfoData, uint32_t dstRowIdx, uint32_t srcRowIdx, const char* pData);
|
||||
int32_t colDataSetNItems(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, uint32_t numOfRows, bool trimValue);
|
||||
int32_t colDataMergeCol(SColumnInfoData* pColumnInfoData, int32_t numOfRow1, int32_t* capacity,
|
||||
const SColumnInfoData* pSource, int32_t numOfRow2);
|
||||
|
|
|
@ -3354,6 +3354,17 @@ static FORCE_INLINE void tDeleteSMqAskEpRsp(SMqAskEpRsp* pRsp) {
|
|||
taosArrayDestroyEx(pRsp->topics, (FDelete)tDeleteMqSubTopicEp);
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
int32_t vgId;
|
||||
STqOffsetVal offset;
|
||||
int64_t rows;
|
||||
}OffsetRows;
|
||||
|
||||
typedef struct{
|
||||
char topicName[TSDB_TOPIC_FNAME_LEN];
|
||||
SArray* offsetRows;
|
||||
}TopicOffsetRows;
|
||||
|
||||
#define TD_AUTO_CREATE_TABLE 0x1
|
||||
typedef struct {
|
||||
int64_t suid;
|
||||
|
|
|
@ -163,6 +163,7 @@ typedef struct {
|
|||
int64_t checkPointId;
|
||||
int32_t taskId;
|
||||
int64_t streamId;
|
||||
int64_t streamBackendRid;
|
||||
} SStreamState;
|
||||
|
||||
typedef struct SFunctionStateStore {
|
||||
|
|
|
@ -344,7 +344,6 @@ typedef struct SStreamMeta {
|
|||
SRWLatch lock;
|
||||
int32_t walScanCounter;
|
||||
void* streamBackend;
|
||||
int32_t streamBackendId;
|
||||
int64_t streamBackendRid;
|
||||
SHashObj* pTaskBackendUnique;
|
||||
} SStreamMeta;
|
||||
|
|
|
@ -214,7 +214,6 @@ int32_t walSkipFetchBody(SWalReader *pRead, const SWalCkHead *pHead);
|
|||
|
||||
void walRefFirstVer(SWal *, SWalRef *);
|
||||
void walRefLastVer(SWal *, SWalRef *);
|
||||
SWalRef *walRefCommittedVer(SWal *);
|
||||
|
||||
SWalRef *walOpenRef(SWal *);
|
||||
void walCloseRef(SWal *pWal, int64_t refId);
|
||||
|
|
|
@ -22,21 +22,20 @@ extern "C" {
|
|||
|
||||
// If the error is in a third-party library, place this header file under the third-party library header file.
|
||||
// When you want to use this feature, you should find or add the same function in the following sectio
|
||||
// #if !defined(WINDOWS)
|
||||
#if !defined(WINDOWS)
|
||||
|
||||
// #ifndef ALLOW_FORBID_FUNC
|
||||
// #define malloc MALLOC_FUNC_TAOS_FORBID
|
||||
// #define calloc CALLOC_FUNC_TAOS_FORBID
|
||||
// #define realloc REALLOC_FUNC_TAOS_FORBID
|
||||
// #define free FREE_FUNC_TAOS_FORBID
|
||||
// #ifdef strdup
|
||||
// #undef strdup
|
||||
// #define strdup STRDUP_FUNC_TAOS_FORBID
|
||||
// #endif
|
||||
// #endif // ifndef ALLOW_FORBID_FUNC
|
||||
// #endif // if !defined(WINDOWS)
|
||||
#ifndef ALLOW_FORBID_FUNC
|
||||
#define malloc MALLOC_FUNC_TAOS_FORBID
|
||||
#define calloc CALLOC_FUNC_TAOS_FORBID
|
||||
#define realloc REALLOC_FUNC_TAOS_FORBID
|
||||
#define free FREE_FUNC_TAOS_FORBID
|
||||
#ifdef strdup
|
||||
#undef strdup
|
||||
#define strdup STRDUP_FUNC_TAOS_FORBID
|
||||
#endif
|
||||
#endif // ifndef ALLOW_FORBID_FUNC
|
||||
#endif // if !defined(WINDOWS)
|
||||
|
||||
// // #define taosMemoryFree malloc
|
||||
// #define taosMemoryMalloc malloc
|
||||
// #define taosMemoryCalloc calloc
|
||||
// #define taosMemoryRealloc realloc
|
||||
|
|
|
@ -31,7 +31,7 @@ typedef void *(*__array_item_dup_fn_t)(void *);
|
|||
|
||||
typedef void (*FDelete)(void *);
|
||||
typedef int32_t (*FEncode)(void **buf, const void *dst);
|
||||
typedef void *(*FDecode)(const void *buf, void *dst);
|
||||
typedef void *(*FDecode)(const void *buf, void *dst, int8_t sver);
|
||||
|
||||
#define TD_EQ 0x1
|
||||
#define TD_GT 0x2
|
||||
|
|
|
@ -244,7 +244,7 @@ int32_t taosArraySearchIdx(const SArray* pArray, const void* key, __compar_fn_t
|
|||
void taosArraySortPWithExt(SArray* pArray, __ext_compar_fn_t fn, const void* param);
|
||||
|
||||
int32_t taosEncodeArray(void** buf, const SArray* pArray, FEncode encode);
|
||||
void* taosDecodeArray(const void* buf, SArray** pArray, FDecode decode, int32_t dataSz);
|
||||
void* taosDecodeArray(const void* buf, SArray** pArray, FDecode decode, int32_t dataSz, int8_t sver);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -42,8 +42,8 @@ else:
|
|||
# os.system("rm -rf /var/lib/taos/*")
|
||||
# os.system("systemctl restart taosd ")
|
||||
|
||||
# wait a moment ,at least 5 seconds
|
||||
time.sleep(5)
|
||||
# wait a moment ,at least 10 seconds
|
||||
time.sleep(10)
|
||||
|
||||
# prepare data by taosBenchmark
|
||||
|
||||
|
|
|
@ -80,5 +80,4 @@ fi
|
|||
|
||||
# there can not libtaos.so*, otherwise ln -s error
|
||||
${csudo}rm -f ${install_main_dir}/driver/libtaos.* || :
|
||||
[ -f ${install_main_dir}/driver/librocksdb.* ] && ${csudo}rm -f ${install_main_dir}/driver/librocksdb.* || :
|
||||
[ -f ${install_main_dir}/driver/libtaosws.so ] && ${csudo}rm -f ${install_main_dir}/driver/libtaosws.so || :
|
||||
|
|
|
@ -40,7 +40,6 @@ else
|
|||
${csudo}rm -f ${inc_link_dir}/taosudf.h || :
|
||||
[ -f ${inc_link_dir}/taosws.h ] && ${csudo}rm -f ${inc_link_dir}/taosws.h || :
|
||||
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
|
||||
[ -f ${lib_link_dir}/librocksdb.* ] && ${csudo}rm -f ${lib_link_dir}/librocksdb.* || :
|
||||
[ -f ${lib_link_dir}/libtaosws.so ] && ${csudo}rm -f ${lib_link_dir}/libtaosws.so || :
|
||||
|
||||
${csudo}rm -f ${log_link_dir} || :
|
||||
|
|
|
@ -31,7 +31,6 @@ cd ${pkg_dir}
|
|||
|
||||
libfile="libtaos.so.${tdengine_ver}"
|
||||
wslibfile="libtaosws.so"
|
||||
rocksdblib="librocksdb.so.8"
|
||||
|
||||
# create install dir
|
||||
install_home_path="/usr/local/taos"
|
||||
|
@ -95,7 +94,6 @@ fi
|
|||
|
||||
cp ${compile_dir}/build/bin/taos ${pkg_dir}${install_home_path}/bin
|
||||
cp ${compile_dir}/build/lib/${libfile} ${pkg_dir}${install_home_path}/driver
|
||||
[ -f ${compile_dir}/build/lib/${rocksdblib} ] && cp ${compile_dir}/build/lib/${rocksdblib} ${pkg_dir}${install_home_path}/driver ||:
|
||||
[ -f ${compile_dir}/build/lib/${wslibfile} ] && cp ${compile_dir}/build/lib/${wslibfile} ${pkg_dir}${install_home_path}/driver ||:
|
||||
cp ${compile_dir}/../include/client/taos.h ${pkg_dir}${install_home_path}/include
|
||||
cp ${compile_dir}/../include/common/taosdef.h ${pkg_dir}${install_home_path}/include
|
||||
|
@ -126,12 +124,12 @@ if [ -f ${compile_dir}/build/bin/jemalloc-config ]; then
|
|||
cp ${compile_dir}/build/lib/libjemalloc.so.2 ${pkg_dir}${install_user_local_path}/lib/
|
||||
ln -sf libjemalloc.so.2 ${pkg_dir}${install_user_local_path}/lib/libjemalloc.so
|
||||
fi
|
||||
if [ -f ${compile_dir}/build/lib/libjemalloc.a ]; then
|
||||
cp ${compile_dir}/build/lib/libjemalloc.a ${pkg_dir}${install_user_local_path}/lib/
|
||||
fi
|
||||
if [ -f ${compile_dir}/build/lib/libjemalloc_pic.a ]; then
|
||||
cp ${compile_dir}/build/lib/libjemalloc_pic.a ${pkg_dir}${install_user_local_path}/lib/
|
||||
fi
|
||||
# if [ -f ${compile_dir}/build/lib/libjemalloc.a ]; then
|
||||
# cp ${compile_dir}/build/lib/libjemalloc.a ${pkg_dir}${install_user_local_path}/lib/
|
||||
# fi
|
||||
# if [ -f ${compile_dir}/build/lib/libjemalloc_pic.a ]; then
|
||||
# cp ${compile_dir}/build/lib/libjemalloc_pic.a ${pkg_dir}${install_user_local_path}/lib/
|
||||
# fi
|
||||
if [ -f ${compile_dir}/build/lib/pkgconfig/jemalloc.pc ]; then
|
||||
cp ${compile_dir}/build/lib/pkgconfig/jemalloc.pc ${pkg_dir}${install_user_local_path}/lib/pkgconfig/
|
||||
fi
|
||||
|
|
|
@ -45,7 +45,6 @@ echo buildroot: %{buildroot}
|
|||
|
||||
libfile="libtaos.so.%{_version}"
|
||||
wslibfile="libtaosws.so"
|
||||
rocksdblib="librocksdb.so.8"
|
||||
|
||||
# create install path, and cp file
|
||||
mkdir -p %{buildroot}%{homepath}/bin
|
||||
|
@ -93,7 +92,6 @@ if [ -f %{_compiledir}/build/bin/taosadapter ]; then
|
|||
fi
|
||||
cp %{_compiledir}/build/lib/${libfile} %{buildroot}%{homepath}/driver
|
||||
[ -f %{_compiledir}/build/lib/${wslibfile} ] && cp %{_compiledir}/build/lib/${wslibfile} %{buildroot}%{homepath}/driver ||:
|
||||
[ -f %{_compiledir}/build/lib/${rocksdblib} ] && cp %{_compiledir}/build/lib/${rocksdblib} %{buildroot}%{homepath}/driver ||:
|
||||
cp %{_compiledir}/../include/client/taos.h %{buildroot}%{homepath}/include
|
||||
cp %{_compiledir}/../include/common/taosdef.h %{buildroot}%{homepath}/include
|
||||
cp %{_compiledir}/../include/util/taoserror.h %{buildroot}%{homepath}/include
|
||||
|
@ -125,12 +123,12 @@ if [ -f %{_compiledir}/build/bin/jemalloc-config ]; then
|
|||
cp %{_compiledir}/build/lib/libjemalloc.so.2 %{buildroot}%{homepath}/jemalloc/lib
|
||||
ln -sf libjemalloc.so.2 %{buildroot}%{homepath}/jemalloc/lib/libjemalloc.so
|
||||
fi
|
||||
if [ -f %{_compiledir}/build/lib/libjemalloc.a ]; then
|
||||
cp %{_compiledir}/build/lib/libjemalloc.a %{buildroot}%{homepath}/jemalloc/lib
|
||||
fi
|
||||
if [ -f %{_compiledir}/build/lib/libjemalloc_pic.a ]; then
|
||||
cp %{_compiledir}/build/lib/libjemalloc_pic.a %{buildroot}%{homepath}/jemalloc/lib
|
||||
fi
|
||||
# if [ -f %{_compiledir}/build/lib/libjemalloc.a ]; then
|
||||
# cp %{_compiledir}/build/lib/libjemalloc.a %{buildroot}%{homepath}/jemalloc/lib
|
||||
# fi
|
||||
# if [ -f %{_compiledir}/build/lib/libjemalloc_pic.a ]; then
|
||||
# cp %{_compiledir}/build/lib/libjemalloc_pic.a %{buildroot}%{homepath}/jemalloc/lib
|
||||
# fi
|
||||
if [ -f %{_compiledir}/build/lib/pkgconfig/jemalloc.pc ]; then
|
||||
cp %{_compiledir}/build/lib/pkgconfig/jemalloc.pc %{buildroot}%{homepath}/jemalloc/lib/pkgconfig
|
||||
fi
|
||||
|
@ -176,7 +174,6 @@ fi
|
|||
|
||||
# there can not libtaos.so*, otherwise ln -s error
|
||||
${csudo}rm -f %{homepath}/driver/libtaos* || :
|
||||
${csudo}rm -f %{homepath}/driver/librocksdb* || :
|
||||
|
||||
#Scripts executed after installation
|
||||
%post
|
||||
|
@ -222,7 +219,6 @@ if [ $1 -eq 0 ];then
|
|||
${csudo}rm -f ${inc_link_dir}/taoserror.h || :
|
||||
${csudo}rm -f ${inc_link_dir}/taosudf.h || :
|
||||
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
|
||||
${csudo}rm -f ${lib_link_dir}/librocksdb.* || :
|
||||
|
||||
${csudo}rm -f ${log_link_dir} || :
|
||||
${csudo}rm -f ${data_link_dir} || :
|
||||
|
|
|
@ -250,30 +250,18 @@ function install_lib() {
|
|||
# Remove links
|
||||
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
|
||||
${csudo}rm -f ${lib64_link_dir}/libtaos.* || :
|
||||
${csudo}rm -f ${lib_link_dir}/librocksdb.* || :
|
||||
${csudo}rm -f ${lib64_link_dir}/librocksdb.* || :
|
||||
#${csudo}rm -rf ${v15_java_app_dir} || :
|
||||
${csudo}cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo}chmod 777 ${install_main_dir}/driver/*
|
||||
|
||||
${csudo}ln -sf ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1
|
||||
${csudo}ln -sf ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so
|
||||
|
||||
${csudo}ln -sf ${install_main_dir}/driver/librocksdb.* ${lib_link_dir}/librocksdb.so.8
|
||||
${csudo}ln -sf ${lib_link_dir}/librocksdb.so.8 ${lib_link_dir}/librocksdb.so
|
||||
|
||||
${csudo}ln -sf ${install_main_dir}/driver/librocksdb.* ${lib_link_dir}/librocksdb.so.8
|
||||
${csudo}ln -sf ${lib_link_dir}/librocksdb.so.8 ${lib_link_dir}/librocksdb.so
|
||||
|
||||
|
||||
[ -f ${install_main_dir}/driver/libtaosws.so ] && ${csudo}ln -sf ${install_main_dir}/driver/libtaosws.so ${lib_link_dir}/libtaosws.so || :
|
||||
|
||||
if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.so ]]; then
|
||||
${csudo}ln -sf ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || :
|
||||
${csudo}ln -sf ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || :
|
||||
|
||||
${csudo}ln -sf ${install_main_dir}/driver/librocksdb.* ${lib64_link_dir}/librocksdb.so.8 || :
|
||||
${csudo}ln -sf ${lib64_link_dir}/librocksdb.so.8 ${lib64_link_dir}/librocksdb.so || :
|
||||
|
||||
[ -f ${install_main_dir}/libtaosws.so ] && ${csudo}ln -sf ${install_main_dir}/libtaosws.so ${lib64_link_dir}/libtaosws.so || :
|
||||
fi
|
||||
|
||||
|
@ -327,13 +315,13 @@ function install_jemalloc() {
|
|||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.so.2 /usr/local/lib
|
||||
${csudo}ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so
|
||||
${csudo}/usr/bin/install -c -d /usr/local/lib
|
||||
if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then
|
||||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
|
||||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
|
||||
# if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then
|
||||
# ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib
|
||||
# fi
|
||||
# if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
|
||||
# ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib
|
||||
# fi
|
||||
if [ -f ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc ]; then
|
||||
${csudo}/usr/bin/install -c -d /usr/local/lib/pkgconfig
|
||||
${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig
|
||||
fi
|
||||
|
|
|
@ -214,13 +214,13 @@ function install_jemalloc() {
|
|||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.so.2 /usr/local/lib
|
||||
${csudo}ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so
|
||||
${csudo}/usr/bin/install -c -d /usr/local/lib
|
||||
if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then
|
||||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
|
||||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
|
||||
# if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then
|
||||
# ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib
|
||||
# fi
|
||||
# if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
|
||||
# ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib
|
||||
# fi
|
||||
if [ -f ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc ]; then
|
||||
${csudo}/usr/bin/install -c -d /usr/local/lib/pkgconfig
|
||||
${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig
|
||||
fi
|
||||
|
|
|
@ -241,10 +241,10 @@ function install_jemalloc() {
|
|||
${csudo}/usr/bin/install -c -m 755 ${binary_dir}/build/lib/libjemalloc.so.2 /usr/local/lib
|
||||
${csudo}ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so > /dev/null 2>&1
|
||||
${csudo}/usr/bin/install -c -d /usr/local/lib
|
||||
[ -f ${binary_dir}/build/lib/libjemalloc.a ] &&
|
||||
${csudo}/usr/bin/install -c -m 755 ${binary_dir}/build/lib/libjemalloc.a /usr/local/lib
|
||||
[ -f ${binary_dir}/build/lib/libjemalloc_pic.a ] &&
|
||||
${csudo}/usr/bin/install -c -m 755 ${binary_dir}/build/lib/libjemalloc_pic.a /usr/local/lib
|
||||
# [ -f ${binary_dir}/build/lib/libjemalloc.a ] &&
|
||||
# ${csudo}/usr/bin/install -c -m 755 ${binary_dir}/build/lib/libjemalloc.a /usr/local/lib
|
||||
# [ -f ${binary_dir}/build/lib/libjemalloc_pic.a ] &&
|
||||
# ${csudo}/usr/bin/install -c -m 755 ${binary_dir}/build/lib/libjemalloc_pic.a /usr/local/lib
|
||||
if [ -f "${binary_dir}/build/lib/pkgconfig/jemalloc.pc" ]; then
|
||||
${csudo}/usr/bin/install -c -d /usr/local/lib/pkgconfig
|
||||
${csudo}/usr/bin/install -c -m 644 ${binary_dir}/build/lib/pkgconfig/jemalloc.pc \
|
||||
|
|
|
@ -118,12 +118,12 @@ if [ -f ${build_dir}/bin/jemalloc-config ]; then
|
|||
cp ${build_dir}/lib/libjemalloc.so.2 ${install_dir}/jemalloc/lib
|
||||
ln -sf libjemalloc.so.2 ${install_dir}/jemalloc/lib/libjemalloc.so
|
||||
fi
|
||||
if [ -f ${build_dir}/lib/libjemalloc.a ]; then
|
||||
cp ${build_dir}/lib/libjemalloc.a ${install_dir}/jemalloc/lib
|
||||
fi
|
||||
if [ -f ${build_dir}/lib/libjemalloc_pic.a ]; then
|
||||
cp ${build_dir}/lib/libjemalloc_pic.a ${install_dir}/jemalloc/lib
|
||||
fi
|
||||
# if [ -f ${build_dir}/lib/libjemalloc.a ]; then
|
||||
# cp ${build_dir}/lib/libjemalloc.a ${install_dir}/jemalloc/lib
|
||||
# fi
|
||||
# if [ -f ${build_dir}/lib/libjemalloc_pic.a ]; then
|
||||
# cp ${build_dir}/lib/libjemalloc_pic.a ${install_dir}/jemalloc/lib
|
||||
# fi
|
||||
if [ -f ${build_dir}/lib/pkgconfig/jemalloc.pc ]; then
|
||||
cp ${build_dir}/lib/pkgconfig/jemalloc.pc ${install_dir}/jemalloc/lib/pkgconfig
|
||||
fi
|
||||
|
|
|
@ -111,11 +111,9 @@ fi
|
|||
if [ "$osType" == "Darwin" ]; then
|
||||
lib_files="${build_dir}/lib/libtaos.${version}.dylib"
|
||||
wslib_files="${build_dir}/lib/libtaosws.dylib"
|
||||
rocksdb_lib_files="${build_dir}/lib/librocksdb.dylib.8.1.1"
|
||||
else
|
||||
lib_files="${build_dir}/lib/libtaos.so.${version}"
|
||||
wslib_files="${build_dir}/lib/libtaosws.so"
|
||||
rocksdb_lib_files="${build_dir}/lib/librocksdb.so.8.1.1"
|
||||
fi
|
||||
header_files="${code_dir}/include/client/taos.h ${code_dir}/include/common/taosdef.h ${code_dir}/include/util/taoserror.h ${code_dir}/include/libs/function/taosudf.h"
|
||||
|
||||
|
@ -219,12 +217,12 @@ if [ -f ${build_dir}/bin/jemalloc-config ]; then
|
|||
cp ${build_dir}/lib/libjemalloc.so.2 ${install_dir}/jemalloc/lib
|
||||
ln -sf libjemalloc.so.2 ${install_dir}/jemalloc/lib/libjemalloc.so
|
||||
fi
|
||||
if [ -f ${build_dir}/lib/libjemalloc.a ]; then
|
||||
cp ${build_dir}/lib/libjemalloc.a ${install_dir}/jemalloc/lib
|
||||
fi
|
||||
if [ -f ${build_dir}/lib/libjemalloc_pic.a ]; then
|
||||
cp ${build_dir}/lib/libjemalloc_pic.a ${install_dir}/jemalloc/lib
|
||||
fi
|
||||
# if [ -f ${build_dir}/lib/libjemalloc.a ]; then
|
||||
# cp ${build_dir}/lib/libjemalloc.a ${install_dir}/jemalloc/lib
|
||||
# fi
|
||||
# if [ -f ${build_dir}/lib/libjemalloc_pic.a ]; then
|
||||
# cp ${build_dir}/lib/libjemalloc_pic.a ${install_dir}/jemalloc/lib
|
||||
# fi
|
||||
if [ -f ${build_dir}/lib/pkgconfig/jemalloc.pc ]; then
|
||||
cp ${build_dir}/lib/pkgconfig/jemalloc.pc ${install_dir}/jemalloc/lib/pkgconfig
|
||||
fi
|
||||
|
@ -338,7 +336,6 @@ fi
|
|||
# Copy driver
|
||||
mkdir -p ${install_dir}/driver && cp ${lib_files} ${install_dir}/driver && echo "${versionComp}" >${install_dir}/driver/vercomp.txt
|
||||
[ -f ${wslib_files} ] && cp ${wslib_files} ${install_dir}/driver || :
|
||||
[ -f ${rocksdb_lib_files} ] && cp ${rocksdb_lib_files} ${install_dir}/driver || :
|
||||
|
||||
# Copy connector
|
||||
if [ "$verMode" == "cluster" ]; then
|
||||
|
|
|
@ -169,13 +169,13 @@ function install_jemalloc() {
|
|||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.so.2 /usr/local/lib
|
||||
${csudo}ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so
|
||||
${csudo}/usr/bin/install -c -d /usr/local/lib
|
||||
if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then
|
||||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
|
||||
${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib
|
||||
fi
|
||||
if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
|
||||
# if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then
|
||||
# ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib
|
||||
# fi
|
||||
# if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then
|
||||
# ${csudo}/usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib
|
||||
# fi
|
||||
if [ -f ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc ]; then
|
||||
${csudo}/usr/bin/install -c -d /usr/local/lib/pkgconfig
|
||||
${csudo}/usr/bin/install -c -m 644 ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig
|
||||
fi
|
||||
|
@ -202,19 +202,10 @@ function install_lib() {
|
|||
log_print "start install lib from ${lib_dir} to ${lib_link_dir}"
|
||||
${csudo}rm -f ${lib_link_dir}/libtaos* || :
|
||||
${csudo}rm -f ${lib64_link_dir}/libtaos* || :
|
||||
|
||||
#rocksdb
|
||||
[ -f ${lib_link_dir}/librocksdb* ] && ${csudo}rm -f ${lib_link_dir}/librocksdb* || :
|
||||
[ -f ${lib64_link_dir}/librocksdb* ] && ${csudo}rm -f ${lib64_link_dir}/librocksdb* || :
|
||||
|
||||
#rocksdb
|
||||
[ -f ${lib_link_dir}/librocksdb* ] && ${csudo}rm -f ${lib_link_dir}/librocksdb* || :
|
||||
[ -f ${lib64_link_dir}/librocksdb* ] && ${csudo}rm -f ${lib64_link_dir}/librocksdb* || :
|
||||
|
||||
[ -f ${lib_link_dir}/libtaosws.${lib_file_ext} ] && ${csudo}rm -f ${lib_link_dir}/libtaosws.${lib_file_ext} || :
|
||||
[ -f ${lib64_link_dir}/libtaosws.${lib_file_ext} ] && ${csudo}rm -f ${lib64_link_dir}/libtaosws.${lib_file_ext} || :
|
||||
|
||||
${csudo}ln -s ${lib_dir}/librocksdb.* ${lib_link_dir}/librocksdb.${lib_file_ext_1} 2>>${install_log_path} || return 1
|
||||
${csudo}ln -s ${lib_dir}/libtaos.* ${lib_link_dir}/libtaos.${lib_file_ext_1} 2>>${install_log_path} || return 1
|
||||
${csudo}ln -s ${lib_link_dir}/libtaos.${lib_file_ext_1} ${lib_link_dir}/libtaos.${lib_file_ext} 2>>${install_log_path} || return 1
|
||||
|
||||
|
@ -223,7 +214,6 @@ function install_lib() {
|
|||
if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.${lib_file_ext} ]]; then
|
||||
${csudo}ln -s ${lib_dir}/libtaos.* ${lib64_link_dir}/libtaos.${lib_file_ext_1} 2>>${install_log_path} || return 1
|
||||
${csudo}ln -s ${lib64_link_dir}/libtaos.${lib_file_ext_1} ${lib64_link_dir}/libtaos.${lib_file_ext} 2>>${install_log_path} || return 1
|
||||
${csudo}ln -s ${lib_dir}/librocksdb.* ${lib64_link_dir}/librocksdb.${lib_file_ext_1} 2>>${install_log_path} || return 1
|
||||
|
||||
[ -f ${lib_dir}/libtaosws.${lib_file_ext} ] && ${csudo}ln -sf ${lib_dir}/libtaosws.${lib_file_ext} ${lib64_link_dir}/libtaosws.${lib_file_ext} 2>>${install_log_path}
|
||||
fi
|
||||
|
|
|
@ -142,11 +142,9 @@ function clean_local_bin() {
|
|||
function clean_lib() {
|
||||
# Remove link
|
||||
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
|
||||
${csudo}rm -f ${lib_link_dir}/librocksdb.* || :
|
||||
[ -f ${lib_link_dir}/libtaosws.* ] && ${csudo}rm -f ${lib_link_dir}/libtaosws.* || :
|
||||
|
||||
${csudo}rm -f ${lib64_link_dir}/libtaos.* || :
|
||||
${csudo}rm -f ${lib64_link_dir}/librocksdb.* || :
|
||||
[ -f ${lib64_link_dir}/libtaosws.* ] && ${csudo}rm -f ${lib64_link_dir}/libtaosws.* || :
|
||||
#${csudo}rm -rf ${v15_java_app_dir} || :
|
||||
|
||||
|
|
|
@ -1553,17 +1553,8 @@ static int32_t smlParseLine(SSmlHandle *info, char *lines[], char *rawLine, char
|
|||
}
|
||||
}
|
||||
|
||||
char cTmp = 0; // for print tmp if is raw
|
||||
if (info->isRawLine) {
|
||||
cTmp = tmp[len];
|
||||
tmp[len] = '\0';
|
||||
}
|
||||
|
||||
uDebug("SML:0x%" PRIx64 " smlParseLine israw:%d, numLines:%d, protocol:%d, len:%d, sql:%s", info->id,
|
||||
info->isRawLine, numLines, info->protocol, len, tmp);
|
||||
if (info->isRawLine) {
|
||||
tmp[len] = cTmp;
|
||||
}
|
||||
info->isRawLine, numLines, info->protocol, len, info->isRawLine ? "rawdata" : tmp);
|
||||
|
||||
if (info->protocol == TSDB_SML_LINE_PROTOCOL) {
|
||||
if (info->dataFormat) {
|
||||
|
@ -1584,8 +1575,7 @@ static int32_t smlParseLine(SSmlHandle *info, char *lines[], char *rawLine, char
|
|||
code = TSDB_CODE_SML_INVALID_PROTOCOL_TYPE;
|
||||
}
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tmp[len] = '\0';
|
||||
uError("SML:0x%" PRIx64 " smlParseLine failed. line %d : %s", info->id, i, tmp);
|
||||
uError("SML:0x%" PRIx64 " smlParseLine failed. line %d : %s", info->id, i, info->isRawLine ? "rawdata" : tmp);
|
||||
return code;
|
||||
}
|
||||
if (info->reRun) {
|
||||
|
@ -1756,9 +1746,8 @@ TAOS_RES *taos_schemaless_insert_inner(TAOS *taos, char *lines[], char *rawLine,
|
|||
request->code = code;
|
||||
info->cost.endTime = taosGetTimestampUs();
|
||||
info->cost.code = code;
|
||||
if (code == TSDB_CODE_TDB_INVALID_TABLE_SCHEMA_VER || code == TSDB_CODE_SDB_OBJ_CREATING ||
|
||||
code == TSDB_CODE_PAR_VALUE_TOO_LONG || code == TSDB_CODE_MND_TRANS_CONFLICT ||
|
||||
code == TSDB_CODE_PAR_TABLE_NOT_EXIST) {
|
||||
if (NEED_CLIENT_HANDLE_ERROR(code) || code == TSDB_CODE_SDB_OBJ_CREATING ||
|
||||
code == TSDB_CODE_PAR_VALUE_TOO_LONG || code == TSDB_CODE_MND_TRANS_CONFLICT) {
|
||||
if (cnt++ >= 10) {
|
||||
uInfo("SML:%" PRIx64 " retry:%d/10 end code:%d, msg:%s", info->id, cnt, code, tstrerror(code));
|
||||
break;
|
||||
|
|
|
@ -280,7 +280,7 @@ static const SSysDbTableSchema topicSchema[] = {
|
|||
{.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
|
||||
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
|
||||
{.name = "sql", .bytes = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
|
||||
{.name = "schema", .bytes = TSDB_SHOW_SCHEMA_JSON_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
|
||||
{.name = "schema", .bytes = TSDB_MAX_BINARY_LEN, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
|
||||
{.name = "meta", .bytes = 4 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
|
||||
{.name = "type", .bytes = 8 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
|
||||
};
|
||||
|
|
|
@ -23,6 +23,20 @@
|
|||
|
||||
int32_t colDataGetLength(const SColumnInfoData* pColumnInfoData, int32_t numOfRows) {
|
||||
if (IS_VAR_DATA_TYPE(pColumnInfoData->info.type)) {
|
||||
if (pColumnInfoData->reassigned) {
|
||||
int32_t totalSize = 0;
|
||||
for (int32_t row = 0; row < numOfRows; ++row) {
|
||||
char* pColData = pColumnInfoData->pData + pColumnInfoData->varmeta.offset[row];
|
||||
int32_t colSize = 0;
|
||||
if (pColumnInfoData->info.type == TSDB_DATA_TYPE_JSON) {
|
||||
colSize = getJsonValueLen(pColData);
|
||||
} else {
|
||||
colSize = varDataTLen(pColData);
|
||||
}
|
||||
totalSize += colSize;
|
||||
}
|
||||
return totalSize;
|
||||
}
|
||||
return pColumnInfoData->varmeta.length;
|
||||
} else {
|
||||
if (pColumnInfoData->info.type == TSDB_DATA_TYPE_NULL) {
|
||||
|
@ -126,6 +140,29 @@ int32_t colDataSetVal(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t colDataReassignVal(SColumnInfoData* pColumnInfoData, uint32_t dstRowIdx, uint32_t srcRowIdx, const char* pData) {
|
||||
int32_t type = pColumnInfoData->info.type;
|
||||
if (IS_VAR_DATA_TYPE(type)) {
|
||||
int32_t dataLen = 0;
|
||||
if (type == TSDB_DATA_TYPE_JSON) {
|
||||
dataLen = getJsonValueLen(pData);
|
||||
} else {
|
||||
dataLen = varDataTLen(pData);
|
||||
}
|
||||
|
||||
SVarColAttr* pAttr = &pColumnInfoData->varmeta;
|
||||
|
||||
pColumnInfoData->varmeta.offset[dstRowIdx] = pColumnInfoData->varmeta.offset[srcRowIdx];
|
||||
pColumnInfoData->reassigned = true;
|
||||
} else {
|
||||
memcpy(pColumnInfoData->pData + pColumnInfoData->info.bytes * dstRowIdx, pData, pColumnInfoData->info.bytes);
|
||||
colDataClearNull_f(pColumnInfoData->nullbitmap, dstRowIdx);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int32_t colDataReserve(SColumnInfoData* pColumnInfoData, size_t newSize) {
|
||||
if (!IS_VAR_DATA_TYPE(pColumnInfoData->info.type)) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -580,8 +617,22 @@ int32_t blockDataToBuf(char* buf, const SSDataBlock* pBlock) {
|
|||
*(int32_t*)pStart = dataSize;
|
||||
pStart += sizeof(int32_t);
|
||||
|
||||
memcpy(pStart, pCol->pData, dataSize);
|
||||
pStart += dataSize;
|
||||
if (pCol->reassigned && IS_VAR_DATA_TYPE(pCol->info.type)) {
|
||||
for (int32_t row = 0; row < numOfRows; ++row) {
|
||||
char* pColData = pCol->pData + pCol->varmeta.offset[row];
|
||||
int32_t colSize = 0;
|
||||
if (pCol->info.type == TSDB_DATA_TYPE_JSON) {
|
||||
colSize = getJsonValueLen(pColData);
|
||||
} else {
|
||||
colSize = varDataTLen(pColData);
|
||||
}
|
||||
memcpy(pStart, pColData, colSize);
|
||||
pStart += colSize;
|
||||
}
|
||||
} else {
|
||||
memcpy(pStart, pCol->pData, dataSize);
|
||||
pStart += dataSize;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -1539,18 +1590,35 @@ size_t blockDataGetCapacityInRow(const SSDataBlock* pBlock, size_t pageSize, int
|
|||
int32_t nRows = payloadSize / rowSize;
|
||||
ASSERT(nRows >= 1);
|
||||
|
||||
// the true value must be less than the value of nRows
|
||||
int32_t additional = 0;
|
||||
int32_t numVarCols = 0;
|
||||
int32_t numFixCols = 0;
|
||||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, i);
|
||||
if (IS_VAR_DATA_TYPE(pCol->info.type)) {
|
||||
additional += nRows * sizeof(int32_t);
|
||||
++numVarCols;
|
||||
} else {
|
||||
additional += BitmapLen(nRows);
|
||||
++numFixCols;
|
||||
}
|
||||
}
|
||||
|
||||
int32_t newRows = (payloadSize - additional) / rowSize;
|
||||
// find the data payload whose size is greater than payloadSize
|
||||
int result = -1;
|
||||
int start = 1;
|
||||
int end = nRows;
|
||||
while (start <= end) {
|
||||
int mid = start + (end - start) / 2;
|
||||
//data size + var data type columns offset + fixed data type columns bitmap len
|
||||
int midSize = rowSize * mid + numVarCols * sizeof(int32_t) * mid + numFixCols * BitmapLen(mid);
|
||||
if (midSize > payloadSize) {
|
||||
result = mid;
|
||||
end = mid - 1;
|
||||
} else {
|
||||
start = mid + 1;
|
||||
}
|
||||
}
|
||||
|
||||
int32_t newRows = (result != -1) ? result - 1 : nRows;
|
||||
// the true value must be less than the value of nRows
|
||||
ASSERT(newRows <= nRows && newRows >= 1);
|
||||
|
||||
return newRows;
|
||||
|
@ -1741,7 +1809,20 @@ int32_t tEncodeDataBlock(void** buf, const SSDataBlock* pBlock) {
|
|||
int32_t len = colDataGetLength(pColData, rows);
|
||||
tlen += taosEncodeFixedI32(buf, len);
|
||||
|
||||
tlen += taosEncodeBinary(buf, pColData->pData, len);
|
||||
if (pColData->reassigned && IS_VAR_DATA_TYPE(pColData->info.type)) {
|
||||
for (int32_t row = 0; row < rows; ++row) {
|
||||
char* pData = pColData->pData + pColData->varmeta.offset[row];
|
||||
int32_t colSize = 0;
|
||||
if (pColData->info.type == TSDB_DATA_TYPE_JSON) {
|
||||
colSize = getJsonValueLen(pData);
|
||||
} else {
|
||||
colSize = varDataTLen(pData);
|
||||
}
|
||||
tlen += taosEncodeBinary(buf, pData, colSize);
|
||||
}
|
||||
} else {
|
||||
tlen += taosEncodeBinary(buf, pColData->pData, len);
|
||||
}
|
||||
}
|
||||
return tlen;
|
||||
}
|
||||
|
@ -2502,12 +2583,29 @@ int32_t blockEncode(const SSDataBlock* pBlock, char* data, int32_t numOfCols) {
|
|||
data += metaSize;
|
||||
dataLen += metaSize;
|
||||
|
||||
colSizes[col] = colDataGetLength(pColRes, numOfRows);
|
||||
dataLen += colSizes[col];
|
||||
if (pColRes->pData != NULL) {
|
||||
memmove(data, pColRes->pData, colSizes[col]);
|
||||
if (pColRes->reassigned && IS_VAR_DATA_TYPE(pColRes->info.type)) {
|
||||
colSizes[col] = 0;
|
||||
for (int32_t row = 0; row < numOfRows; ++row) {
|
||||
char* pColData = pColRes->pData + pColRes->varmeta.offset[row];
|
||||
int32_t colSize = 0;
|
||||
if (pColRes->info.type == TSDB_DATA_TYPE_JSON) {
|
||||
colSize = getJsonValueLen(pColData);
|
||||
} else {
|
||||
colSize = varDataTLen(pColData);
|
||||
}
|
||||
colSizes[col] += colSize;
|
||||
dataLen += colSize;
|
||||
memmove(data, pColData, colSize);
|
||||
data += colSize;
|
||||
}
|
||||
} else {
|
||||
colSizes[col] = colDataGetLength(pColRes, numOfRows);
|
||||
dataLen += colSizes[col];
|
||||
if (pColRes->pData != NULL) {
|
||||
memmove(data, pColRes->pData, colSizes[col]);
|
||||
}
|
||||
data += colSizes[col];
|
||||
}
|
||||
data += colSizes[col];
|
||||
|
||||
colSizes[col] = htonl(colSizes[col]);
|
||||
// uError("blockEncode col bytes:%d, type:%d, size:%d, htonl size:%d", pColRes->info.bytes, pColRes->info.type, htonl(colSizes[col]), colSizes[col]);
|
||||
|
|
|
@ -23,10 +23,6 @@ static inline void dmBuildMnodeRedirectRsp(SDnode *pDnode, SRpcMsg *pMsg) {
|
|||
SEpSet epSet = {0};
|
||||
dmGetMnodeEpSetForRedirect(&pDnode->data, pMsg, &epSet);
|
||||
|
||||
if (epSet.numOfEps == 1) {
|
||||
return;
|
||||
}
|
||||
|
||||
const int32_t contLen = tSerializeSEpSet(NULL, 0, &epSet);
|
||||
pMsg->pCont = rpcMallocCont(contLen);
|
||||
if (pMsg->pCont == NULL) {
|
||||
|
|
|
@ -105,6 +105,7 @@ typedef struct {
|
|||
SHashObj *dnodeHash;
|
||||
TdThreadRwlock lock;
|
||||
SMsgCb msgCb;
|
||||
bool validMnodeEps;
|
||||
} SDnodeData;
|
||||
|
||||
typedef struct {
|
||||
|
|
|
@ -288,6 +288,8 @@ static void dmResetEps(SDnodeData *pData, SArray *dnodeEps) {
|
|||
taosHashPut(pData->dnodeHash, &pDnodeEp->id, sizeof(int32_t), pDnodeEp, sizeof(SDnodeEp));
|
||||
}
|
||||
|
||||
pData->validMnodeEps = true;
|
||||
|
||||
dmPrintEps(pData);
|
||||
}
|
||||
|
||||
|
@ -348,6 +350,7 @@ void dmRotateMnodeEpSet(SDnodeData *pData) {
|
|||
}
|
||||
|
||||
void dmGetMnodeEpSetForRedirect(SDnodeData *pData, SRpcMsg *pMsg, SEpSet *pEpSet) {
|
||||
if(!pData->validMnodeEps) return;
|
||||
dmGetMnodeEpSet(pData, pEpSet);
|
||||
dTrace("msg is redirected, handle:%p num:%d use:%d", pMsg->info.handle, pEpSet->numOfEps, pEpSet->inUse);
|
||||
for (int32_t i = 0; i < pEpSet->numOfEps; ++i) {
|
||||
|
|
|
@ -559,24 +559,25 @@ void* tDecodeSMqConsumerObj(const void* buf, SMqConsumerObj* pConsumer
|
|||
|
||||
typedef struct {
|
||||
int32_t vgId;
|
||||
char* qmsg; // SubPlanToString
|
||||
// char* qmsg; // SubPlanToString
|
||||
SEpSet epSet;
|
||||
} SMqVgEp;
|
||||
|
||||
SMqVgEp* tCloneSMqVgEp(const SMqVgEp* pVgEp);
|
||||
void tDeleteSMqVgEp(SMqVgEp* pVgEp);
|
||||
int32_t tEncodeSMqVgEp(void** buf, const SMqVgEp* pVgEp);
|
||||
void* tDecodeSMqVgEp(const void* buf, SMqVgEp* pVgEp);
|
||||
void* tDecodeSMqVgEp(const void* buf, SMqVgEp* pVgEp, int8_t sver);
|
||||
|
||||
typedef struct {
|
||||
int64_t consumerId; // -1 for unassigned
|
||||
SArray* vgs; // SArray<SMqVgEp*>
|
||||
SArray* offsetRows; // SArray<OffsetRows*>
|
||||
} SMqConsumerEp;
|
||||
|
||||
SMqConsumerEp* tCloneSMqConsumerEp(const SMqConsumerEp* pEp);
|
||||
void tDeleteSMqConsumerEp(void* pEp);
|
||||
int32_t tEncodeSMqConsumerEp(void** buf, const SMqConsumerEp* pEp);
|
||||
void* tDecodeSMqConsumerEp(const void* buf, SMqConsumerEp* pEp);
|
||||
void* tDecodeSMqConsumerEp(const void* buf, SMqConsumerEp* pEp, int8_t sver);
|
||||
|
||||
typedef struct {
|
||||
char key[TSDB_SUBSCRIBE_KEY_LEN];
|
||||
|
@ -588,34 +589,36 @@ typedef struct {
|
|||
int64_t stbUid;
|
||||
SHashObj* consumerHash; // consumerId -> SMqConsumerEp
|
||||
SArray* unassignedVgs; // SArray<SMqVgEp*>
|
||||
SArray* offsetRows;
|
||||
char dbName[TSDB_DB_FNAME_LEN];
|
||||
char* qmsg; // SubPlanToString
|
||||
} SMqSubscribeObj;
|
||||
|
||||
SMqSubscribeObj* tNewSubscribeObj(const char key[TSDB_SUBSCRIBE_KEY_LEN]);
|
||||
SMqSubscribeObj* tCloneSubscribeObj(const SMqSubscribeObj* pSub);
|
||||
void tDeleteSubscribeObj(SMqSubscribeObj* pSub);
|
||||
int32_t tEncodeSubscribeObj(void** buf, const SMqSubscribeObj* pSub);
|
||||
void* tDecodeSubscribeObj(const void* buf, SMqSubscribeObj* pSub);
|
||||
void* tDecodeSubscribeObj(const void* buf, SMqSubscribeObj* pSub, int8_t sver);
|
||||
|
||||
typedef struct {
|
||||
int32_t epoch;
|
||||
SArray* consumers; // SArray<SMqConsumerEp*>
|
||||
} SMqSubActionLogEntry;
|
||||
//typedef struct {
|
||||
// int32_t epoch;
|
||||
// SArray* consumers; // SArray<SMqConsumerEp*>
|
||||
//} SMqSubActionLogEntry;
|
||||
|
||||
SMqSubActionLogEntry* tCloneSMqSubActionLogEntry(SMqSubActionLogEntry* pEntry);
|
||||
void tDeleteSMqSubActionLogEntry(SMqSubActionLogEntry* pEntry);
|
||||
int32_t tEncodeSMqSubActionLogEntry(void** buf, const SMqSubActionLogEntry* pEntry);
|
||||
void* tDecodeSMqSubActionLogEntry(const void* buf, SMqSubActionLogEntry* pEntry);
|
||||
|
||||
typedef struct {
|
||||
char key[TSDB_SUBSCRIBE_KEY_LEN];
|
||||
SArray* logs; // SArray<SMqSubActionLogEntry*>
|
||||
} SMqSubActionLogObj;
|
||||
|
||||
SMqSubActionLogObj* tCloneSMqSubActionLogObj(SMqSubActionLogObj* pLog);
|
||||
void tDeleteSMqSubActionLogObj(SMqSubActionLogObj* pLog);
|
||||
int32_t tEncodeSMqSubActionLogObj(void** buf, const SMqSubActionLogObj* pLog);
|
||||
void* tDecodeSMqSubActionLogObj(const void* buf, SMqSubActionLogObj* pLog);
|
||||
//SMqSubActionLogEntry* tCloneSMqSubActionLogEntry(SMqSubActionLogEntry* pEntry);
|
||||
//void tDeleteSMqSubActionLogEntry(SMqSubActionLogEntry* pEntry);
|
||||
//int32_t tEncodeSMqSubActionLogEntry(void** buf, const SMqSubActionLogEntry* pEntry);
|
||||
//void* tDecodeSMqSubActionLogEntry(const void* buf, SMqSubActionLogEntry* pEntry);
|
||||
//
|
||||
//typedef struct {
|
||||
// char key[TSDB_SUBSCRIBE_KEY_LEN];
|
||||
// SArray* logs; // SArray<SMqSubActionLogEntry*>
|
||||
//} SMqSubActionLogObj;
|
||||
//
|
||||
//SMqSubActionLogObj* tCloneSMqSubActionLogObj(SMqSubActionLogObj* pLog);
|
||||
//void tDeleteSMqSubActionLogObj(SMqSubActionLogObj* pLog);
|
||||
//int32_t tEncodeSMqSubActionLogObj(void** buf, const SMqSubActionLogObj* pLog);
|
||||
//void* tDecodeSMqSubActionLogObj(const void* buf, SMqSubActionLogObj* pLog);
|
||||
|
||||
typedef struct {
|
||||
int32_t oldConsumerNum;
|
||||
|
@ -634,7 +637,7 @@ typedef struct {
|
|||
SArray* removedConsumers; // SArray<int64_t>
|
||||
SArray* modifyConsumers; // SArray<int64_t>
|
||||
SMqSubscribeObj* pSub;
|
||||
SMqSubActionLogEntry* pLogEntry;
|
||||
// SMqSubActionLogEntry* pLogEntry;
|
||||
} SMqRebOutputObj;
|
||||
|
||||
typedef struct {
|
||||
|
|
|
@ -642,7 +642,7 @@ int32_t mndProcessSubscribeReq(SRpcMsg *pMsg) {
|
|||
SCMSubscribeReq subscribe = {0};
|
||||
tDeserializeSCMSubscribeReq(msgStr, &subscribe);
|
||||
|
||||
uint64_t consumerId = subscribe.consumerId;
|
||||
int64_t consumerId = subscribe.consumerId;
|
||||
char *cgroup = subscribe.cgroup;
|
||||
SMqConsumerObj *pExistedConsumer = NULL;
|
||||
SMqConsumerObj *pConsumerNew = NULL;
|
||||
|
|
|
@ -187,14 +187,14 @@ SMqVgEp *tCloneSMqVgEp(const SMqVgEp *pVgEp) {
|
|||
SMqVgEp *pVgEpNew = taosMemoryMalloc(sizeof(SMqVgEp));
|
||||
if (pVgEpNew == NULL) return NULL;
|
||||
pVgEpNew->vgId = pVgEp->vgId;
|
||||
pVgEpNew->qmsg = taosStrdup(pVgEp->qmsg);
|
||||
// pVgEpNew->qmsg = taosStrdup(pVgEp->qmsg);
|
||||
pVgEpNew->epSet = pVgEp->epSet;
|
||||
return pVgEpNew;
|
||||
}
|
||||
|
||||
void tDeleteSMqVgEp(SMqVgEp *pVgEp) {
|
||||
if (pVgEp) {
|
||||
taosMemoryFreeClear(pVgEp->qmsg);
|
||||
// taosMemoryFreeClear(pVgEp->qmsg);
|
||||
taosMemoryFree(pVgEp);
|
||||
}
|
||||
}
|
||||
|
@ -202,14 +202,18 @@ void tDeleteSMqVgEp(SMqVgEp *pVgEp) {
|
|||
int32_t tEncodeSMqVgEp(void **buf, const SMqVgEp *pVgEp) {
|
||||
int32_t tlen = 0;
|
||||
tlen += taosEncodeFixedI32(buf, pVgEp->vgId);
|
||||
tlen += taosEncodeString(buf, pVgEp->qmsg);
|
||||
// tlen += taosEncodeString(buf, pVgEp->qmsg);
|
||||
tlen += taosEncodeSEpSet(buf, &pVgEp->epSet);
|
||||
return tlen;
|
||||
}
|
||||
|
||||
void *tDecodeSMqVgEp(const void *buf, SMqVgEp *pVgEp) {
|
||||
void *tDecodeSMqVgEp(const void *buf, SMqVgEp *pVgEp, int8_t sver) {
|
||||
buf = taosDecodeFixedI32(buf, &pVgEp->vgId);
|
||||
buf = taosDecodeString(buf, &pVgEp->qmsg);
|
||||
if(sver == 1){
|
||||
uint64_t size = 0;
|
||||
buf = taosDecodeVariantU64(buf, &size);
|
||||
buf = POINTER_SHIFT(buf, size);
|
||||
}
|
||||
buf = taosDecodeSEpSet(buf, &pVgEp->epSet);
|
||||
return (void *)buf;
|
||||
}
|
||||
|
@ -399,6 +403,22 @@ int32_t tEncodeSMqConsumerEp(void **buf, const SMqConsumerEp *pConsumerEp) {
|
|||
int32_t tlen = 0;
|
||||
tlen += taosEncodeFixedI64(buf, pConsumerEp->consumerId);
|
||||
tlen += taosEncodeArray(buf, pConsumerEp->vgs, (FEncode)tEncodeSMqVgEp);
|
||||
int32_t szVgs = taosArrayGetSize(pConsumerEp->offsetRows);
|
||||
tlen += taosEncodeFixedI32(buf, szVgs);
|
||||
for (int32_t j= 0; j < szVgs; ++j) {
|
||||
OffsetRows *offRows = taosArrayGet(pConsumerEp->offsetRows, j);
|
||||
tlen += taosEncodeFixedI32(buf, offRows->vgId);
|
||||
tlen += taosEncodeFixedI64(buf, offRows->rows);
|
||||
tlen += taosEncodeFixedI8(buf, offRows->offset.type);
|
||||
if (offRows->offset.type == TMQ_OFFSET__SNAPSHOT_DATA || offRows->offset.type == TMQ_OFFSET__SNAPSHOT_META) {
|
||||
tlen += taosEncodeFixedI64(buf, offRows->offset.uid);
|
||||
tlen += taosEncodeFixedI64(buf, offRows->offset.ts);
|
||||
} else if (offRows->offset.type == TMQ_OFFSET__LOG) {
|
||||
tlen += taosEncodeFixedI64(buf, offRows->offset.version);
|
||||
} else {
|
||||
// do nothing
|
||||
}
|
||||
}
|
||||
#if 0
|
||||
int32_t sz = taosArrayGetSize(pConsumerEp->vgs);
|
||||
tlen += taosEncodeFixedI32(buf, sz);
|
||||
|
@ -410,9 +430,9 @@ int32_t tEncodeSMqConsumerEp(void **buf, const SMqConsumerEp *pConsumerEp) {
|
|||
return tlen;
|
||||
}
|
||||
|
||||
void *tDecodeSMqConsumerEp(const void *buf, SMqConsumerEp *pConsumerEp) {
|
||||
void *tDecodeSMqConsumerEp(const void *buf, SMqConsumerEp *pConsumerEp, int8_t sver) {
|
||||
buf = taosDecodeFixedI64(buf, &pConsumerEp->consumerId);
|
||||
buf = taosDecodeArray(buf, &pConsumerEp->vgs, (FDecode)tDecodeSMqVgEp, sizeof(SMqVgEp));
|
||||
buf = taosDecodeArray(buf, &pConsumerEp->vgs, (FDecode)tDecodeSMqVgEp, sizeof(SMqVgEp), sver);
|
||||
#if 0
|
||||
int32_t sz;
|
||||
buf = taosDecodeFixedI32(buf, &sz);
|
||||
|
@ -423,7 +443,28 @@ void *tDecodeSMqConsumerEp(const void *buf, SMqConsumerEp *pConsumerEp) {
|
|||
taosArrayPush(pConsumerEp->vgs, &pVgEp);
|
||||
}
|
||||
#endif
|
||||
|
||||
if (sver > 1){
|
||||
int32_t szVgs = 0;
|
||||
buf = taosDecodeFixedI32(buf, &szVgs);
|
||||
if(szVgs > 0){
|
||||
pConsumerEp->offsetRows = taosArrayInit(szVgs, sizeof(OffsetRows));
|
||||
if (NULL == pConsumerEp->offsetRows) return NULL;
|
||||
for (int32_t j= 0; j < szVgs; ++j) {
|
||||
OffsetRows* offRows = taosArrayReserve(pConsumerEp->offsetRows, 1);
|
||||
buf = taosDecodeFixedI32(buf, &offRows->vgId);
|
||||
buf = taosDecodeFixedI64(buf, &offRows->rows);
|
||||
buf = taosDecodeFixedI8(buf, &offRows->offset.type);
|
||||
if (offRows->offset.type == TMQ_OFFSET__SNAPSHOT_DATA || offRows->offset.type == TMQ_OFFSET__SNAPSHOT_META) {
|
||||
buf = taosDecodeFixedI64(buf, &offRows->offset.uid);
|
||||
buf = taosDecodeFixedI64(buf, &offRows->offset.ts);
|
||||
} else if (offRows->offset.type == TMQ_OFFSET__LOG) {
|
||||
buf = taosDecodeFixedI64(buf, &offRows->offset.version);
|
||||
} else {
|
||||
// do nothing
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return (void *)buf;
|
||||
}
|
||||
|
||||
|
@ -472,7 +513,9 @@ SMqSubscribeObj *tCloneSubscribeObj(const SMqSubscribeObj *pSub) {
|
|||
taosHashPut(pSubNew->consumerHash, &newEp.consumerId, sizeof(int64_t), &newEp, sizeof(SMqConsumerEp));
|
||||
}
|
||||
pSubNew->unassignedVgs = taosArrayDup(pSub->unassignedVgs, (__array_item_dup_fn_t)tCloneSMqVgEp);
|
||||
pSubNew->offsetRows = taosArrayDup(pSub->offsetRows, NULL);
|
||||
memcpy(pSubNew->dbName, pSub->dbName, TSDB_DB_FNAME_LEN);
|
||||
pSubNew->qmsg = taosStrdup(pSub->qmsg);
|
||||
return pSubNew;
|
||||
}
|
||||
|
||||
|
@ -486,6 +529,8 @@ void tDeleteSubscribeObj(SMqSubscribeObj *pSub) {
|
|||
}
|
||||
taosHashCleanup(pSub->consumerHash);
|
||||
taosArrayDestroyP(pSub->unassignedVgs, (FDelete)tDeleteSMqVgEp);
|
||||
taosMemoryFreeClear(pSub->qmsg);
|
||||
taosArrayDestroy(pSub->offsetRows);
|
||||
}
|
||||
|
||||
int32_t tEncodeSubscribeObj(void **buf, const SMqSubscribeObj *pSub) {
|
||||
|
@ -512,10 +557,27 @@ int32_t tEncodeSubscribeObj(void **buf, const SMqSubscribeObj *pSub) {
|
|||
if (cnt != sz) return -1;
|
||||
tlen += taosEncodeArray(buf, pSub->unassignedVgs, (FEncode)tEncodeSMqVgEp);
|
||||
tlen += taosEncodeString(buf, pSub->dbName);
|
||||
int32_t szVgs = taosArrayGetSize(pSub->offsetRows);
|
||||
tlen += taosEncodeFixedI32(buf, szVgs);
|
||||
for (int32_t j= 0; j < szVgs; ++j) {
|
||||
OffsetRows *offRows = taosArrayGet(pSub->offsetRows, j);
|
||||
tlen += taosEncodeFixedI32(buf, offRows->vgId);
|
||||
tlen += taosEncodeFixedI64(buf, offRows->rows);
|
||||
tlen += taosEncodeFixedI8(buf, offRows->offset.type);
|
||||
if (offRows->offset.type == TMQ_OFFSET__SNAPSHOT_DATA || offRows->offset.type == TMQ_OFFSET__SNAPSHOT_META) {
|
||||
tlen += taosEncodeFixedI64(buf, offRows->offset.uid);
|
||||
tlen += taosEncodeFixedI64(buf, offRows->offset.ts);
|
||||
} else if (offRows->offset.type == TMQ_OFFSET__LOG) {
|
||||
tlen += taosEncodeFixedI64(buf, offRows->offset.version);
|
||||
} else {
|
||||
// do nothing
|
||||
}
|
||||
}
|
||||
tlen += taosEncodeString(buf, pSub->qmsg);
|
||||
return tlen;
|
||||
}
|
||||
|
||||
void *tDecodeSubscribeObj(const void *buf, SMqSubscribeObj *pSub) {
|
||||
void *tDecodeSubscribeObj(const void *buf, SMqSubscribeObj *pSub, int8_t sver) {
|
||||
//
|
||||
buf = taosDecodeStringTo(buf, pSub->key);
|
||||
buf = taosDecodeFixedI64(buf, &pSub->dbUid);
|
||||
|
@ -530,74 +592,97 @@ void *tDecodeSubscribeObj(const void *buf, SMqSubscribeObj *pSub) {
|
|||
pSub->consumerHash = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_NO_LOCK);
|
||||
for (int32_t i = 0; i < sz; i++) {
|
||||
SMqConsumerEp consumerEp = {0};
|
||||
buf = tDecodeSMqConsumerEp(buf, &consumerEp);
|
||||
buf = tDecodeSMqConsumerEp(buf, &consumerEp, sver);
|
||||
taosHashPut(pSub->consumerHash, &consumerEp.consumerId, sizeof(int64_t), &consumerEp, sizeof(SMqConsumerEp));
|
||||
}
|
||||
|
||||
buf = taosDecodeArray(buf, &pSub->unassignedVgs, (FDecode)tDecodeSMqVgEp, sizeof(SMqVgEp));
|
||||
buf = taosDecodeArray(buf, &pSub->unassignedVgs, (FDecode)tDecodeSMqVgEp, sizeof(SMqVgEp), sver);
|
||||
buf = taosDecodeStringTo(buf, pSub->dbName);
|
||||
if (sver > 1){
|
||||
int32_t szVgs = 0;
|
||||
buf = taosDecodeFixedI32(buf, &szVgs);
|
||||
if(szVgs > 0){
|
||||
pSub->offsetRows = taosArrayInit(szVgs, sizeof(OffsetRows));
|
||||
if (NULL == pSub->offsetRows) return NULL;
|
||||
for (int32_t j= 0; j < szVgs; ++j) {
|
||||
OffsetRows* offRows = taosArrayReserve(pSub->offsetRows, 1);
|
||||
buf = taosDecodeFixedI32(buf, &offRows->vgId);
|
||||
buf = taosDecodeFixedI64(buf, &offRows->rows);
|
||||
buf = taosDecodeFixedI8(buf, &offRows->offset.type);
|
||||
if (offRows->offset.type == TMQ_OFFSET__SNAPSHOT_DATA || offRows->offset.type == TMQ_OFFSET__SNAPSHOT_META) {
|
||||
buf = taosDecodeFixedI64(buf, &offRows->offset.uid);
|
||||
buf = taosDecodeFixedI64(buf, &offRows->offset.ts);
|
||||
} else if (offRows->offset.type == TMQ_OFFSET__LOG) {
|
||||
buf = taosDecodeFixedI64(buf, &offRows->offset.version);
|
||||
} else {
|
||||
// do nothing
|
||||
}
|
||||
}
|
||||
}
|
||||
buf = taosDecodeString(buf, &pSub->qmsg);
|
||||
}
|
||||
return (void *)buf;
|
||||
}
|
||||
|
||||
SMqSubActionLogEntry *tCloneSMqSubActionLogEntry(SMqSubActionLogEntry *pEntry) {
|
||||
SMqSubActionLogEntry *pEntryNew = taosMemoryMalloc(sizeof(SMqSubActionLogEntry));
|
||||
if (pEntryNew == NULL) return NULL;
|
||||
pEntryNew->epoch = pEntry->epoch;
|
||||
pEntryNew->consumers = taosArrayDup(pEntry->consumers, (__array_item_dup_fn_t)tCloneSMqConsumerEp);
|
||||
return pEntryNew;
|
||||
}
|
||||
|
||||
void tDeleteSMqSubActionLogEntry(SMqSubActionLogEntry *pEntry) {
|
||||
taosArrayDestroyEx(pEntry->consumers, (FDelete)tDeleteSMqConsumerEp);
|
||||
}
|
||||
|
||||
int32_t tEncodeSMqSubActionLogEntry(void **buf, const SMqSubActionLogEntry *pEntry) {
|
||||
int32_t tlen = 0;
|
||||
tlen += taosEncodeFixedI32(buf, pEntry->epoch);
|
||||
tlen += taosEncodeArray(buf, pEntry->consumers, (FEncode)tEncodeSMqSubActionLogEntry);
|
||||
return tlen;
|
||||
}
|
||||
|
||||
void *tDecodeSMqSubActionLogEntry(const void *buf, SMqSubActionLogEntry *pEntry) {
|
||||
buf = taosDecodeFixedI32(buf, &pEntry->epoch);
|
||||
buf = taosDecodeArray(buf, &pEntry->consumers, (FDecode)tDecodeSMqSubActionLogEntry, sizeof(SMqSubActionLogEntry));
|
||||
return (void *)buf;
|
||||
}
|
||||
|
||||
SMqSubActionLogObj *tCloneSMqSubActionLogObj(SMqSubActionLogObj *pLog) {
|
||||
SMqSubActionLogObj *pLogNew = taosMemoryMalloc(sizeof(SMqSubActionLogObj));
|
||||
if (pLogNew == NULL) return pLogNew;
|
||||
memcpy(pLogNew->key, pLog->key, TSDB_SUBSCRIBE_KEY_LEN);
|
||||
pLogNew->logs = taosArrayDup(pLog->logs, (__array_item_dup_fn_t)tCloneSMqConsumerEp);
|
||||
return pLogNew;
|
||||
}
|
||||
|
||||
void tDeleteSMqSubActionLogObj(SMqSubActionLogObj *pLog) {
|
||||
taosArrayDestroyEx(pLog->logs, (FDelete)tDeleteSMqConsumerEp);
|
||||
}
|
||||
|
||||
int32_t tEncodeSMqSubActionLogObj(void **buf, const SMqSubActionLogObj *pLog) {
|
||||
int32_t tlen = 0;
|
||||
tlen += taosEncodeString(buf, pLog->key);
|
||||
tlen += taosEncodeArray(buf, pLog->logs, (FEncode)tEncodeSMqSubActionLogEntry);
|
||||
return tlen;
|
||||
}
|
||||
|
||||
void *tDecodeSMqSubActionLogObj(const void *buf, SMqSubActionLogObj *pLog) {
|
||||
buf = taosDecodeStringTo(buf, pLog->key);
|
||||
buf = taosDecodeArray(buf, &pLog->logs, (FDecode)tDecodeSMqSubActionLogEntry, sizeof(SMqSubActionLogEntry));
|
||||
return (void *)buf;
|
||||
}
|
||||
|
||||
int32_t tEncodeSMqOffsetObj(void **buf, const SMqOffsetObj *pOffset) {
|
||||
int32_t tlen = 0;
|
||||
tlen += taosEncodeString(buf, pOffset->key);
|
||||
tlen += taosEncodeFixedI64(buf, pOffset->offset);
|
||||
return tlen;
|
||||
}
|
||||
|
||||
void *tDecodeSMqOffsetObj(void *buf, SMqOffsetObj *pOffset) {
|
||||
buf = taosDecodeStringTo(buf, pOffset->key);
|
||||
buf = taosDecodeFixedI64(buf, &pOffset->offset);
|
||||
return buf;
|
||||
}
|
||||
//SMqSubActionLogEntry *tCloneSMqSubActionLogEntry(SMqSubActionLogEntry *pEntry) {
|
||||
// SMqSubActionLogEntry *pEntryNew = taosMemoryMalloc(sizeof(SMqSubActionLogEntry));
|
||||
// if (pEntryNew == NULL) return NULL;
|
||||
// pEntryNew->epoch = pEntry->epoch;
|
||||
// pEntryNew->consumers = taosArrayDup(pEntry->consumers, (__array_item_dup_fn_t)tCloneSMqConsumerEp);
|
||||
// return pEntryNew;
|
||||
//}
|
||||
//
|
||||
//void tDeleteSMqSubActionLogEntry(SMqSubActionLogEntry *pEntry) {
|
||||
// taosArrayDestroyEx(pEntry->consumers, (FDelete)tDeleteSMqConsumerEp);
|
||||
//}
|
||||
//
|
||||
//int32_t tEncodeSMqSubActionLogEntry(void **buf, const SMqSubActionLogEntry *pEntry) {
|
||||
// int32_t tlen = 0;
|
||||
// tlen += taosEncodeFixedI32(buf, pEntry->epoch);
|
||||
// tlen += taosEncodeArray(buf, pEntry->consumers, (FEncode)tEncodeSMqSubActionLogEntry);
|
||||
// return tlen;
|
||||
//}
|
||||
//
|
||||
//void *tDecodeSMqSubActionLogEntry(const void *buf, SMqSubActionLogEntry *pEntry) {
|
||||
// buf = taosDecodeFixedI32(buf, &pEntry->epoch);
|
||||
// buf = taosDecodeArray(buf, &pEntry->consumers, (FDecode)tDecodeSMqSubActionLogEntry, sizeof(SMqSubActionLogEntry));
|
||||
// return (void *)buf;
|
||||
//}
|
||||
//
|
||||
//SMqSubActionLogObj *tCloneSMqSubActionLogObj(SMqSubActionLogObj *pLog) {
|
||||
// SMqSubActionLogObj *pLogNew = taosMemoryMalloc(sizeof(SMqSubActionLogObj));
|
||||
// if (pLogNew == NULL) return pLogNew;
|
||||
// memcpy(pLogNew->key, pLog->key, TSDB_SUBSCRIBE_KEY_LEN);
|
||||
// pLogNew->logs = taosArrayDup(pLog->logs, (__array_item_dup_fn_t)tCloneSMqConsumerEp);
|
||||
// return pLogNew;
|
||||
//}
|
||||
//
|
||||
//void tDeleteSMqSubActionLogObj(SMqSubActionLogObj *pLog) {
|
||||
// taosArrayDestroyEx(pLog->logs, (FDelete)tDeleteSMqConsumerEp);
|
||||
//}
|
||||
//
|
||||
//int32_t tEncodeSMqSubActionLogObj(void **buf, const SMqSubActionLogObj *pLog) {
|
||||
// int32_t tlen = 0;
|
||||
// tlen += taosEncodeString(buf, pLog->key);
|
||||
// tlen += taosEncodeArray(buf, pLog->logs, (FEncode)tEncodeSMqSubActionLogEntry);
|
||||
// return tlen;
|
||||
//}
|
||||
//
|
||||
//void *tDecodeSMqSubActionLogObj(const void *buf, SMqSubActionLogObj *pLog) {
|
||||
// buf = taosDecodeStringTo(buf, pLog->key);
|
||||
// buf = taosDecodeArray(buf, &pLog->logs, (FDecode)tDecodeSMqSubActionLogEntry, sizeof(SMqSubActionLogEntry));
|
||||
// return (void *)buf;
|
||||
//}
|
||||
//
|
||||
//int32_t tEncodeSMqOffsetObj(void **buf, const SMqOffsetObj *pOffset) {
|
||||
// int32_t tlen = 0;
|
||||
// tlen += taosEncodeString(buf, pOffset->key);
|
||||
// tlen += taosEncodeFixedI64(buf, pOffset->offset);
|
||||
// return tlen;
|
||||
//}
|
||||
//
|
||||
//void *tDecodeSMqOffsetObj(void *buf, SMqOffsetObj *pOffset) {
|
||||
// buf = taosDecodeStringTo(buf, pOffset->key);
|
||||
// buf = taosDecodeFixedI64(buf, &pOffset->offset);
|
||||
// return buf;
|
||||
//}
|
||||
|
|
|
@ -570,25 +570,21 @@ int32_t mndSchedInitSubEp(SMnode* pMnode, const SMqTopicObj* pTopic, SMqSubscrib
|
|||
|
||||
mDebug("init subscription %s for topic:%s assign vgId:%d", pSub->key, pTopic->name, pVgEp->vgId);
|
||||
|
||||
if (pSubplan) {
|
||||
int32_t msgLen;
|
||||
|
||||
pSubplan->execNode.epSet = pVgEp->epSet;
|
||||
pSubplan->execNode.nodeId = pVgEp->vgId;
|
||||
|
||||
if (qSubPlanToString(pSubplan, &pVgEp->qmsg, &msgLen) < 0) {
|
||||
sdbRelease(pSdb, pVgroup);
|
||||
qDestroyQueryPlan(pPlan);
|
||||
terrno = TSDB_CODE_QRY_INVALID_INPUT;
|
||||
return -1;
|
||||
}
|
||||
} else {
|
||||
pVgEp->qmsg = taosStrdup("");
|
||||
}
|
||||
|
||||
sdbRelease(pSdb, pVgroup);
|
||||
}
|
||||
|
||||
if (pSubplan) {
|
||||
int32_t msgLen;
|
||||
|
||||
if (qSubPlanToString(pSubplan, &pSub->qmsg, &msgLen) < 0) {
|
||||
qDestroyQueryPlan(pPlan);
|
||||
terrno = TSDB_CODE_QRY_INVALID_INPUT;
|
||||
return -1;
|
||||
}
|
||||
} else {
|
||||
pSub->qmsg = taosStrdup("");
|
||||
}
|
||||
|
||||
qDestroyQueryPlan(pPlan);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -3159,8 +3159,14 @@ static int32_t mndRetrieveStbCol(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB
|
|||
SSdb *pSdb = pMnode->pSdb;
|
||||
SStbObj *pStb = NULL;
|
||||
|
||||
int32_t numOfRows = buildSysDbColsInfo(pBlock, pShow->db, pShow->filterTb);
|
||||
mDebug("mndRetrieveStbCol get system table cols, rows:%d, db:%s", numOfRows, pShow->db);
|
||||
|
||||
int32_t numOfRows = 0;
|
||||
if (!pShow->sysDbRsp) {
|
||||
numOfRows = buildSysDbColsInfo(pBlock, pShow->db, pShow->filterTb);
|
||||
mDebug("mndRetrieveStbCol get system table cols, rows:%d, db:%s", numOfRows, pShow->db);
|
||||
pShow->sysDbRsp = true;
|
||||
}
|
||||
|
||||
SDbObj *pDb = NULL;
|
||||
if (strlen(pShow->db) > 0) {
|
||||
pDb = mndAcquireDb(pMnode, pShow->db);
|
||||
|
|
|
@ -24,7 +24,7 @@
|
|||
#include "tcompare.h"
|
||||
#include "tname.h"
|
||||
|
||||
#define MND_SUBSCRIBE_VER_NUMBER 1
|
||||
#define MND_SUBSCRIBE_VER_NUMBER 2
|
||||
#define MND_SUBSCRIBE_RESERVE_SIZE 64
|
||||
|
||||
#define MND_SUBSCRIBE_REBALANCE_CNT 3
|
||||
|
@ -99,13 +99,23 @@ static SMqSubscribeObj *mndCreateSubscription(SMnode *pMnode, const SMqTopicObj
|
|||
return pSub;
|
||||
}
|
||||
|
||||
static int32_t mndBuildSubChangeReq(void **pBuf, int32_t *pLen, const SMqSubscribeObj *pSub,
|
||||
const SMqRebOutputVg *pRebVg) {
|
||||
static int32_t mndBuildSubChangeReq(void **pBuf, int32_t *pLen, SMqSubscribeObj *pSub,
|
||||
const SMqRebOutputVg *pRebVg, SSubplan* pPlan) {
|
||||
SMqRebVgReq req = {0};
|
||||
req.oldConsumerId = pRebVg->oldConsumerId;
|
||||
req.newConsumerId = pRebVg->newConsumerId;
|
||||
req.vgId = pRebVg->pVgEp->vgId;
|
||||
req.qmsg = pRebVg->pVgEp->qmsg;
|
||||
if(pPlan){
|
||||
pPlan->execNode.epSet = pRebVg->pVgEp->epSet;
|
||||
pPlan->execNode.nodeId = pRebVg->pVgEp->vgId;
|
||||
int32_t msgLen;
|
||||
if (qSubPlanToString(pPlan, &req.qmsg, &msgLen) < 0) {
|
||||
terrno = TSDB_CODE_QRY_INVALID_INPUT;
|
||||
return -1;
|
||||
}
|
||||
}else{
|
||||
req.qmsg = taosStrdup("");
|
||||
}
|
||||
req.subType = pSub->subType;
|
||||
req.withMeta = pSub->withMeta;
|
||||
req.suid = pSub->stbUid;
|
||||
|
@ -115,6 +125,7 @@ static int32_t mndBuildSubChangeReq(void **pBuf, int32_t *pLen, const SMqSubscri
|
|||
int32_t ret = 0;
|
||||
tEncodeSize(tEncodeSMqRebVgReq, &req, tlen, ret);
|
||||
if (ret < 0) {
|
||||
taosMemoryFree(req.qmsg);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -122,6 +133,7 @@ static int32_t mndBuildSubChangeReq(void **pBuf, int32_t *pLen, const SMqSubscri
|
|||
void *buf = taosMemoryMalloc(tlen);
|
||||
if (buf == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
taosMemoryFree(req.qmsg);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -135,17 +147,19 @@ static int32_t mndBuildSubChangeReq(void **pBuf, int32_t *pLen, const SMqSubscri
|
|||
if (tEncodeSMqRebVgReq(&encoder, &req) < 0) {
|
||||
taosMemoryFreeClear(buf);
|
||||
tEncoderClear(&encoder);
|
||||
taosMemoryFree(req.qmsg);
|
||||
return -1;
|
||||
}
|
||||
tEncoderClear(&encoder);
|
||||
*pBuf = buf;
|
||||
*pLen = tlen;
|
||||
|
||||
taosMemoryFree(req.qmsg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int32_t mndPersistSubChangeVgReq(SMnode *pMnode, STrans *pTrans, const SMqSubscribeObj *pSub,
|
||||
const SMqRebOutputVg *pRebVg) {
|
||||
static int32_t mndPersistSubChangeVgReq(SMnode *pMnode, STrans *pTrans, SMqSubscribeObj *pSub,
|
||||
const SMqRebOutputVg *pRebVg, SSubplan* pPlan) {
|
||||
if (pRebVg->oldConsumerId == pRebVg->newConsumerId) {
|
||||
terrno = TSDB_CODE_MND_INVALID_SUB_OPTION;
|
||||
return -1;
|
||||
|
@ -153,7 +167,7 @@ static int32_t mndPersistSubChangeVgReq(SMnode *pMnode, STrans *pTrans, const SM
|
|||
|
||||
void *buf;
|
||||
int32_t tlen;
|
||||
if (mndBuildSubChangeReq(&buf, &tlen, pSub, pRebVg) < 0) {
|
||||
if (mndBuildSubChangeReq(&buf, &tlen, pSub, pRebVg, pPlan) < 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -255,7 +269,7 @@ static void doAddNewConsumers(SMqRebOutputObj *pOutput, const SMqRebInputObj *pI
|
|||
for (int32_t i = 0; i < numOfNewConsumers; i++) {
|
||||
int64_t consumerId = *(int64_t *)taosArrayGet(pInput->pRebInfo->newConsumers, i);
|
||||
|
||||
SMqConsumerEp newConsumerEp;
|
||||
SMqConsumerEp newConsumerEp = {0};
|
||||
newConsumerEp.consumerId = consumerId;
|
||||
newConsumerEp.vgs = taosArrayInit(0, sizeof(void *));
|
||||
|
||||
|
@ -483,14 +497,25 @@ static int32_t mndDoRebalance(SMnode *pMnode, const SMqRebInputObj *pInput, SMqR
|
|||
}
|
||||
|
||||
static int32_t mndPersistRebResult(SMnode *pMnode, SRpcMsg *pMsg, const SMqRebOutputObj *pOutput) {
|
||||
struct SSubplan* pPlan = NULL;
|
||||
if(strcmp(pOutput->pSub->qmsg, "") != 0){
|
||||
int32_t code = qStringToSubplan(pOutput->pSub->qmsg, &pPlan);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
terrno = code;
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_DB_INSIDE, pMsg, "tmq-reb");
|
||||
if (pTrans == NULL) {
|
||||
nodesDestroyNode((SNode*)pPlan);
|
||||
return -1;
|
||||
}
|
||||
|
||||
mndTransSetDbName(pTrans, pOutput->pSub->dbName, NULL);
|
||||
if (mndTrancCheckConflict(pMnode, pTrans) != 0) {
|
||||
mndTransDrop(pTrans);
|
||||
nodesDestroyNode((SNode*)pPlan);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -500,11 +525,13 @@ static int32_t mndPersistRebResult(SMnode *pMnode, SRpcMsg *pMsg, const SMqRebOu
|
|||
int32_t vgNum = taosArrayGetSize(rebVgs);
|
||||
for (int32_t i = 0; i < vgNum; i++) {
|
||||
SMqRebOutputVg *pRebVg = taosArrayGet(rebVgs, i);
|
||||
if (mndPersistSubChangeVgReq(pMnode, pTrans, pOutput->pSub, pRebVg) < 0) {
|
||||
if (mndPersistSubChangeVgReq(pMnode, pTrans, pOutput->pSub, pRebVg, pPlan) < 0) {
|
||||
mndTransDrop(pTrans);
|
||||
nodesDestroyNode((SNode*)pPlan);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
nodesDestroyNode((SNode*)pPlan);
|
||||
|
||||
// 2. redo log: subscribe and vg assignment
|
||||
// subscribe
|
||||
|
@ -815,7 +842,7 @@ static SSdbRow *mndSubActionDecode(SSdbRaw *pRaw) {
|
|||
int8_t sver = 0;
|
||||
if (sdbGetRawSoftVer(pRaw, &sver) != 0) goto SUB_DECODE_OVER;
|
||||
|
||||
if (sver != MND_SUBSCRIBE_VER_NUMBER) {
|
||||
if (sver > MND_SUBSCRIBE_VER_NUMBER || sver < 1) {
|
||||
terrno = TSDB_CODE_SDB_INVALID_DATA_VER;
|
||||
goto SUB_DECODE_OVER;
|
||||
}
|
||||
|
@ -834,7 +861,7 @@ static SSdbRow *mndSubActionDecode(SSdbRaw *pRaw) {
|
|||
SDB_GET_BINARY(pRaw, dataPos, buf, tlen, SUB_DECODE_OVER);
|
||||
SDB_GET_RESERVE(pRaw, dataPos, MND_SUBSCRIBE_RESERVE_SIZE, SUB_DECODE_OVER);
|
||||
|
||||
if (tDecodeSubscribeObj(buf, pSub) == NULL) {
|
||||
if (tDecodeSubscribeObj(buf, pSub, sver) == NULL) {
|
||||
goto SUB_DECODE_OVER;
|
||||
}
|
||||
|
||||
|
|
|
@ -910,12 +910,14 @@ static int32_t mndRetrieveTopic(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBl
|
|||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataSetVal(pColInfo, numOfRows, (const char *)&pTopic->createTime, false);
|
||||
|
||||
char sql[TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE] = {0};
|
||||
char *sql = taosMemoryMalloc(strlen(pTopic->sql) + VARSTR_HEADER_SIZE);
|
||||
STR_TO_VARSTR(sql, pTopic->sql);
|
||||
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataSetVal(pColInfo, numOfRows, (const char *)sql, false);
|
||||
|
||||
taosMemoryFree(sql);
|
||||
|
||||
char *schemaJson = taosMemoryMalloc(TSDB_SHOW_SCHEMA_JSON_LEN + VARSTR_HEADER_SIZE);
|
||||
if(pTopic->subType == TOPIC_SUB_TYPE__COLUMN){
|
||||
schemaToJson(pTopic->schema.pSchema, pTopic->schema.nCols, schemaJson);
|
||||
|
|
|
@ -103,7 +103,7 @@ target_link_libraries(
|
|||
|
||||
# PUBLIC bdb
|
||||
# PUBLIC scalar
|
||||
PUBLIC rocksdb-shared
|
||||
PUBLIC rocksdb
|
||||
PUBLIC transport
|
||||
PUBLIC stream
|
||||
PUBLIC index
|
||||
|
|
|
@ -139,6 +139,7 @@ static STqMgmt tqMgmt = {0};
|
|||
|
||||
int32_t tEncodeSTqHandle(SEncoder* pEncoder, const STqHandle* pHandle);
|
||||
int32_t tDecodeSTqHandle(SDecoder* pDecoder, STqHandle* pHandle);
|
||||
void tqDestroyTqHandle(void* data);
|
||||
|
||||
// tqRead
|
||||
int32_t tqScanTaosx(STQ* pTq, const STqHandle* pHandle, STaosxRsp* pRsp, SMqMetaRsp* pMetaRsp, STqOffsetVal* offset);
|
||||
|
@ -161,6 +162,8 @@ int32_t tqMetaRestoreHandle(STQ* pTq);
|
|||
int32_t tqMetaSaveCheckInfo(STQ* pTq, const char* key, const void* value, int32_t vLen);
|
||||
int32_t tqMetaDeleteCheckInfo(STQ* pTq, const char* key);
|
||||
int32_t tqMetaRestoreCheckInfo(STQ* pTq);
|
||||
int32_t tqMetaGetHandle(STQ* pTq, const char* key);
|
||||
int32_t tqCreateHandle(STQ* pTq, SMqRebVgReq* req, STqHandle* handle);
|
||||
|
||||
STqOffsetStore* tqOffsetOpen(STQ* pTq);
|
||||
void tqOffsetClose(STqOffsetStore*);
|
||||
|
|
|
@ -62,7 +62,7 @@ void tqCleanUp() {
|
|||
}
|
||||
}
|
||||
|
||||
static void destroyTqHandle(void* data) {
|
||||
void tqDestroyTqHandle(void* data) {
|
||||
STqHandle* pData = (STqHandle*)data;
|
||||
qDestroyTask(pData->execHandle.task);
|
||||
|
||||
|
@ -102,7 +102,7 @@ STQ* tqOpen(const char* path, SVnode* pVnode) {
|
|||
pTq->walLogLastVer = pVnode->pWal->vers.lastVer;
|
||||
|
||||
pTq->pHandle = taosHashInit(64, MurmurHash3_32, true, HASH_ENTRY_LOCK);
|
||||
taosHashSetFreeFp(pTq->pHandle, destroyTqHandle);
|
||||
taosHashSetFreeFp(pTq->pHandle, tqDestroyTqHandle);
|
||||
|
||||
taosInitRWLatch(&pTq->lock);
|
||||
pTq->pPushMgr = taosHashInit(64, MurmurHash3_32, false, HASH_NO_LOCK);
|
||||
|
@ -690,103 +690,33 @@ int32_t tqProcessSubscribeReq(STQ* pTq, int64_t sversion, char* msg, int32_t msg
|
|||
return -1;
|
||||
}
|
||||
|
||||
SVnode* pVnode = pTq->pVnode;
|
||||
int32_t vgId = TD_VID(pVnode);
|
||||
|
||||
tqDebug("vgId:%d, tq process sub req:%s, Id:0x%" PRIx64 " -> Id:0x%" PRIx64, pVnode->config.vgId, req.subKey,
|
||||
tqDebug("vgId:%d, tq process sub req:%s, Id:0x%" PRIx64 " -> Id:0x%" PRIx64, pTq->pVnode->config.vgId, req.subKey,
|
||||
req.oldConsumerId, req.newConsumerId);
|
||||
|
||||
STqHandle* pHandle = taosHashGet(pTq->pHandle, req.subKey, strlen(req.subKey));
|
||||
STqHandle* pHandle = NULL;
|
||||
while(1){
|
||||
pHandle = taosHashGet(pTq->pHandle, req.subKey, strlen(req.subKey));
|
||||
if (pHandle || tqMetaGetHandle(pTq, req.subKey) < 0){
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (pHandle == NULL) {
|
||||
if (req.oldConsumerId != -1) {
|
||||
tqError("vgId:%d, build new consumer handle %s for consumer:0x%" PRIx64 ", but old consumerId:0x%" PRIx64,
|
||||
req.vgId, req.subKey, req.newConsumerId, req.oldConsumerId);
|
||||
}
|
||||
|
||||
if (req.newConsumerId == -1) {
|
||||
tqError("vgId:%d, tq invalid re-balance request, new consumerId %" PRId64 "", req.vgId, req.newConsumerId);
|
||||
goto end;
|
||||
}
|
||||
|
||||
STqHandle tqHandle = {0};
|
||||
pHandle = &tqHandle;
|
||||
|
||||
memcpy(pHandle->subKey, req.subKey, TSDB_SUBSCRIBE_KEY_LEN);
|
||||
pHandle->consumerId = req.newConsumerId;
|
||||
pHandle->epoch = -1;
|
||||
|
||||
pHandle->execHandle.subType = req.subType;
|
||||
pHandle->fetchMeta = req.withMeta;
|
||||
|
||||
// TODO version should be assigned and refed during preprocess
|
||||
SWalRef* pRef = walRefCommittedVer(pVnode->pWal);
|
||||
if (pRef == NULL) {
|
||||
ret = -1;
|
||||
STqHandle handle = {0};
|
||||
ret = tqCreateHandle(pTq, &req, &handle);
|
||||
if(ret < 0){
|
||||
tqDestroyTqHandle(&handle);
|
||||
goto end;
|
||||
}
|
||||
|
||||
int64_t ver = pRef->refVer;
|
||||
pHandle->pRef = pRef;
|
||||
|
||||
SReadHandle handle = {.vnode = pVnode, .initTableReader = true, .initTqReader = true, .version = ver};
|
||||
initStorageAPI(&handle.api);
|
||||
|
||||
pHandle->snapshotVer = ver;
|
||||
|
||||
if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
|
||||
pHandle->execHandle.execCol.qmsg = taosStrdup(req.qmsg);
|
||||
|
||||
pHandle->execHandle.task = qCreateQueueExecTaskInfo(pHandle->execHandle.execCol.qmsg, &handle, vgId,
|
||||
&pHandle->execHandle.numOfCols, req.newConsumerId);
|
||||
void* scanner = NULL;
|
||||
qExtractStreamScanner(pHandle->execHandle.task, &scanner);
|
||||
pHandle->execHandle.pTqReader = qExtractReaderFromStreamScanner(scanner);
|
||||
} else if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__DB) {
|
||||
pHandle->pWalReader = walOpenReader(pVnode->pWal, NULL);
|
||||
pHandle->execHandle.pTqReader = tqReaderOpen(pVnode);
|
||||
|
||||
pHandle->execHandle.execDb.pFilterOutTbUid =
|
||||
taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_ENTRY_LOCK);
|
||||
buildSnapContext(handle.vnode, handle.version, 0, pHandle->execHandle.subType, pHandle->fetchMeta,
|
||||
(SSnapContext**)(&handle.sContext));
|
||||
|
||||
pHandle->execHandle.task = qCreateQueueExecTaskInfo(NULL, &handle, vgId, NULL, req.newConsumerId);
|
||||
} else if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__TABLE) {
|
||||
pHandle->pWalReader = walOpenReader(pVnode->pWal, NULL);
|
||||
pHandle->execHandle.execTb.suid = req.suid;
|
||||
pHandle->execHandle.execTb.qmsg = taosStrdup(req.qmsg);
|
||||
|
||||
if (strcmp(pHandle->execHandle.execTb.qmsg, "") != 0) {
|
||||
if (nodesStringToNode(pHandle->execHandle.execTb.qmsg, &pHandle->execHandle.execTb.node) != 0) {
|
||||
tqError("nodesStringToNode error in sub stable, since %s, vgId:%d, subkey:%s consumer:0x%" PRIx64, terrstr(),
|
||||
pVnode->config.vgId, req.subKey, pHandle->consumerId);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
buildSnapContext(handle.vnode, handle.version, req.suid, pHandle->execHandle.subType, pHandle->fetchMeta,
|
||||
(SSnapContext**)(&handle.sContext));
|
||||
pHandle->execHandle.task = qCreateQueueExecTaskInfo(NULL, &handle, vgId, NULL, req.newConsumerId);
|
||||
|
||||
SArray* tbUidList = NULL;
|
||||
ret = qGetTableList(req.suid, pVnode, pHandle->execHandle.execTb.node, &tbUidList, pHandle->execHandle.task);
|
||||
if (ret != TDB_CODE_SUCCESS) {
|
||||
tqError("qGetTableList error:%d vgId:%d, subkey:%s consumer:0x%" PRIx64, ret, pVnode->config.vgId, req.subKey,
|
||||
pHandle->consumerId);
|
||||
taosArrayDestroy(tbUidList);
|
||||
goto end;
|
||||
}
|
||||
tqDebug("tq try to get ctb for stb subscribe, vgId:%d, subkey:%s consumer:0x%" PRIx64 " suid:%" PRId64,
|
||||
pVnode->config.vgId, req.subKey, pHandle->consumerId, req.suid);
|
||||
pHandle->execHandle.pTqReader = tqReaderOpen(pVnode);
|
||||
tqReaderSetTbUidList(pHandle->execHandle.pTqReader, tbUidList, NULL);
|
||||
taosArrayDestroy(tbUidList);
|
||||
}
|
||||
|
||||
taosHashPut(pTq->pHandle, req.subKey, strlen(req.subKey), pHandle, sizeof(STqHandle));
|
||||
tqDebug("try to persist handle %s consumer:0x%" PRIx64, req.subKey, pHandle->consumerId);
|
||||
ret = tqMetaSaveHandle(pTq, req.subKey, pHandle);
|
||||
goto end;
|
||||
ret = tqMetaSaveHandle(pTq, req.subKey, &handle);
|
||||
} else {
|
||||
taosWLockLatch(&pTq->lock);
|
||||
|
||||
|
|
|
@ -88,9 +88,9 @@ int32_t tqMetaOpen(STQ* pTq) {
|
|||
return -1;
|
||||
}
|
||||
|
||||
if (tqMetaRestoreHandle(pTq) < 0) {
|
||||
return -1;
|
||||
}
|
||||
// if (tqMetaRestoreHandle(pTq) < 0) {
|
||||
// return -1;
|
||||
// }
|
||||
|
||||
if (tqMetaRestoreCheckInfo(pTq) < 0) {
|
||||
return -1;
|
||||
|
@ -274,6 +274,120 @@ int32_t tqMetaDeleteHandle(STQ* pTq, const char* key) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int buildHandle(STQ* pTq, STqHandle* handle){
|
||||
SVnode* pVnode = pTq->pVnode;
|
||||
int32_t vgId = TD_VID(pVnode);
|
||||
|
||||
handle->pRef = walOpenRef(pVnode->pWal);
|
||||
if (handle->pRef == NULL) {
|
||||
return -1;
|
||||
}
|
||||
walSetRefVer(handle->pRef, handle->snapshotVer);
|
||||
|
||||
SReadHandle reader = {
|
||||
.vnode = pVnode,
|
||||
.initTableReader = true,
|
||||
.initTqReader = true,
|
||||
.version = handle->snapshotVer,
|
||||
};
|
||||
|
||||
initStorageAPI(&reader.api);
|
||||
|
||||
if (handle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
|
||||
handle->execHandle.task =
|
||||
qCreateQueueExecTaskInfo(handle->execHandle.execCol.qmsg, &reader, vgId, &handle->execHandle.numOfCols, handle->consumerId);
|
||||
if (handle->execHandle.task == NULL) {
|
||||
tqError("cannot create exec task for %s", handle->subKey);
|
||||
return -1;
|
||||
}
|
||||
void* scanner = NULL;
|
||||
qExtractStreamScanner(handle->execHandle.task, &scanner);
|
||||
if (scanner == NULL) {
|
||||
tqError("cannot extract stream scanner for %s", handle->subKey);
|
||||
return -1;
|
||||
}
|
||||
handle->execHandle.pTqReader = qExtractReaderFromStreamScanner(scanner);
|
||||
if (handle->execHandle.pTqReader == NULL) {
|
||||
tqError("cannot extract exec reader for %s", handle->subKey);
|
||||
return -1;
|
||||
}
|
||||
} else if (handle->execHandle.subType == TOPIC_SUB_TYPE__DB) {
|
||||
handle->pWalReader = walOpenReader(pVnode->pWal, NULL);
|
||||
handle->execHandle.pTqReader = tqReaderOpen(pVnode);
|
||||
|
||||
buildSnapContext(reader.vnode, reader.version, 0, handle->execHandle.subType, handle->fetchMeta,
|
||||
(SSnapContext**)(&reader.sContext));
|
||||
handle->execHandle.task = qCreateQueueExecTaskInfo(NULL, &reader, vgId, NULL, handle->consumerId);
|
||||
} else if (handle->execHandle.subType == TOPIC_SUB_TYPE__TABLE) {
|
||||
handle->pWalReader = walOpenReader(pVnode->pWal, NULL);
|
||||
|
||||
if(handle->execHandle.execTb.qmsg != NULL && strcmp(handle->execHandle.execTb.qmsg, "") != 0) {
|
||||
if (nodesStringToNode(handle->execHandle.execTb.qmsg, &handle->execHandle.execTb.node) != 0) {
|
||||
tqError("nodesStringToNode error in sub stable, since %s", terrstr());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
buildSnapContext(reader.vnode, reader.version, handle->execHandle.execTb.suid, handle->execHandle.subType,
|
||||
handle->fetchMeta, (SSnapContext**)(&reader.sContext));
|
||||
handle->execHandle.task = qCreateQueueExecTaskInfo(NULL, &reader, vgId, NULL, handle->consumerId);
|
||||
|
||||
SArray* tbUidList = NULL;
|
||||
int ret = qGetTableList(handle->execHandle.execTb.suid, pVnode, handle->execHandle.execTb.node, &tbUidList, handle->execHandle.task);
|
||||
if(ret != TDB_CODE_SUCCESS) {
|
||||
tqError("qGetTableList error:%d handle %s consumer:0x%" PRIx64, ret, handle->subKey, handle->consumerId);
|
||||
taosArrayDestroy(tbUidList);
|
||||
return -1;
|
||||
}
|
||||
tqDebug("vgId:%d, tq try to get ctb for stb subscribe, suid:%" PRId64, pVnode->config.vgId, handle->execHandle.execTb.suid);
|
||||
handle->execHandle.pTqReader = tqReaderOpen(pVnode);
|
||||
tqReaderSetTbUidList(handle->execHandle.pTqReader, tbUidList, NULL);
|
||||
taosArrayDestroy(tbUidList);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int restoreHandle(STQ* pTq, void* pVal, int vLen, STqHandle* handle){
|
||||
int32_t vgId = TD_VID(pTq->pVnode);
|
||||
SDecoder decoder;
|
||||
tDecoderInit(&decoder, (uint8_t*)pVal, vLen);
|
||||
tDecodeSTqHandle(&decoder, handle);
|
||||
tDecoderClear(&decoder);
|
||||
|
||||
if(buildHandle(pTq, handle) < 0){
|
||||
return -1;
|
||||
}
|
||||
tqDebug("tq restore %s consumer %" PRId64 " vgId:%d", handle->subKey, handle->consumerId, vgId);
|
||||
return taosHashPut(pTq->pHandle, handle->subKey, strlen(handle->subKey), handle, sizeof(STqHandle));
|
||||
}
|
||||
|
||||
int32_t tqCreateHandle(STQ* pTq, SMqRebVgReq* req, STqHandle* handle){
|
||||
int32_t vgId = TD_VID(pTq->pVnode);
|
||||
|
||||
memcpy(handle->subKey, req->subKey, TSDB_SUBSCRIBE_KEY_LEN);
|
||||
handle->consumerId = req->newConsumerId;
|
||||
handle->epoch = -1;
|
||||
|
||||
handle->execHandle.subType = req->subType;
|
||||
handle->fetchMeta = req->withMeta;
|
||||
if(req->subType == TOPIC_SUB_TYPE__COLUMN){
|
||||
handle->execHandle.execCol.qmsg = taosStrdup(req->qmsg);
|
||||
}else if(req->subType == TOPIC_SUB_TYPE__DB){
|
||||
handle->execHandle.execDb.pFilterOutTbUid =
|
||||
taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_ENTRY_LOCK);
|
||||
}else if(req->subType == TOPIC_SUB_TYPE__TABLE){
|
||||
handle->execHandle.execTb.suid = req->suid;
|
||||
handle->execHandle.execTb.qmsg = taosStrdup(req->qmsg);
|
||||
}
|
||||
|
||||
handle->snapshotVer = walGetCommittedVer(pTq->pVnode->pWal);
|
||||
|
||||
if(buildHandle(pTq, handle) < 0){
|
||||
return -1;
|
||||
}
|
||||
tqDebug("tq restore %s consumer %" PRId64 " vgId:%d", handle->subKey, handle->consumerId, vgId);
|
||||
return taosHashPut(pTq->pHandle, handle->subKey, strlen(handle->subKey), handle, sizeof(STqHandle));
|
||||
}
|
||||
|
||||
int32_t tqMetaRestoreHandle(STQ* pTq) {
|
||||
int code = 0;
|
||||
TBC* pCur = NULL;
|
||||
|
@ -281,97 +395,40 @@ int32_t tqMetaRestoreHandle(STQ* pTq) {
|
|||
return -1;
|
||||
}
|
||||
|
||||
int32_t vgId = TD_VID(pTq->pVnode);
|
||||
void* pKey = NULL;
|
||||
int kLen = 0;
|
||||
void* pVal = NULL;
|
||||
int vLen = 0;
|
||||
SDecoder decoder;
|
||||
|
||||
tdbTbcMoveToFirst(pCur);
|
||||
|
||||
while (tdbTbcNext(pCur, &pKey, &kLen, &pVal, &vLen) == 0) {
|
||||
STqHandle handle = {0};
|
||||
tDecoderInit(&decoder, (uint8_t*)pVal, vLen);
|
||||
tDecodeSTqHandle(&decoder, &handle);
|
||||
tDecoderClear(&decoder);
|
||||
|
||||
handle.pRef = walOpenRef(pTq->pVnode->pWal);
|
||||
if (handle.pRef == NULL) {
|
||||
code = -1;
|
||||
goto end;
|
||||
code = restoreHandle(pTq, pVal, vLen, &handle);
|
||||
if (code < 0) {
|
||||
tqDestroyTqHandle(&handle);
|
||||
break;
|
||||
}
|
||||
walSetRefVer(handle.pRef, handle.snapshotVer);
|
||||
|
||||
SReadHandle reader = {
|
||||
.vnode = pTq->pVnode,
|
||||
.initTableReader = true,
|
||||
.initTqReader = true,
|
||||
.version = handle.snapshotVer
|
||||
};
|
||||
|
||||
initStorageAPI(&reader.api);
|
||||
|
||||
if (handle.execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
|
||||
handle.execHandle.task =
|
||||
qCreateQueueExecTaskInfo(handle.execHandle.execCol.qmsg, &reader, vgId, &handle.execHandle.numOfCols, 0);
|
||||
if (handle.execHandle.task == NULL) {
|
||||
tqError("cannot create exec task for %s", handle.subKey);
|
||||
code = -1;
|
||||
goto end;
|
||||
}
|
||||
void* scanner = NULL;
|
||||
qExtractStreamScanner(handle.execHandle.task, &scanner);
|
||||
if (scanner == NULL) {
|
||||
tqError("cannot extract stream scanner for %s", handle.subKey);
|
||||
code = -1;
|
||||
goto end;
|
||||
}
|
||||
handle.execHandle.pTqReader = qExtractReaderFromStreamScanner(scanner);
|
||||
if (handle.execHandle.pTqReader == NULL) {
|
||||
tqError("cannot extract exec reader for %s", handle.subKey);
|
||||
code = -1;
|
||||
goto end;
|
||||
}
|
||||
} else if (handle.execHandle.subType == TOPIC_SUB_TYPE__DB) {
|
||||
handle.pWalReader = walOpenReader(pTq->pVnode->pWal, NULL);
|
||||
handle.execHandle.pTqReader = tqReaderOpen(pTq->pVnode);
|
||||
|
||||
buildSnapContext(reader.vnode, reader.version, 0, handle.execHandle.subType, handle.fetchMeta,
|
||||
(SSnapContext**)(&reader.sContext));
|
||||
handle.execHandle.task = qCreateQueueExecTaskInfo(NULL, &reader, vgId, NULL, 0);
|
||||
} else if (handle.execHandle.subType == TOPIC_SUB_TYPE__TABLE) {
|
||||
handle.pWalReader = walOpenReader(pTq->pVnode->pWal, NULL);
|
||||
|
||||
if(handle.execHandle.execTb.qmsg != NULL && strcmp(handle.execHandle.execTb.qmsg, "") != 0) {
|
||||
if (nodesStringToNode(handle.execHandle.execTb.qmsg, &handle.execHandle.execTb.node) != 0) {
|
||||
tqError("nodesStringToNode error in sub stable, since %s", terrstr());
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
buildSnapContext(reader.vnode, reader.version, handle.execHandle.execTb.suid, handle.execHandle.subType,
|
||||
handle.fetchMeta, (SSnapContext**)(&reader.sContext));
|
||||
handle.execHandle.task = qCreateQueueExecTaskInfo(NULL, &reader, vgId, NULL, 0);
|
||||
|
||||
SArray* tbUidList = NULL;
|
||||
int ret = qGetTableList(handle.execHandle.execTb.suid, pTq->pVnode, handle.execHandle.execTb.node, &tbUidList, handle.execHandle.task);
|
||||
if(ret != TDB_CODE_SUCCESS) {
|
||||
tqError("qGetTableList error:%d handle %s consumer:0x%" PRIx64, ret, handle.subKey, handle.consumerId);
|
||||
taosArrayDestroy(tbUidList);
|
||||
goto end;
|
||||
}
|
||||
tqDebug("vgId:%d, tq try to get ctb for stb subscribe, suid:%" PRId64, pTq->pVnode->config.vgId, handle.execHandle.execTb.suid);
|
||||
handle.execHandle.pTqReader = tqReaderOpen(pTq->pVnode);
|
||||
tqReaderSetTbUidList(handle.execHandle.pTqReader, tbUidList, NULL);
|
||||
taosArrayDestroy(tbUidList);
|
||||
}
|
||||
tqDebug("tq restore %s consumer %" PRId64 " vgId:%d", handle.subKey, handle.consumerId, vgId);
|
||||
taosHashPut(pTq->pHandle, pKey, kLen, &handle, sizeof(STqHandle));
|
||||
}
|
||||
|
||||
end:
|
||||
tdbFree(pKey);
|
||||
tdbFree(pVal);
|
||||
tdbTbcClose(pCur);
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t tqMetaGetHandle(STQ* pTq, const char* key) {
|
||||
void* pVal = NULL;
|
||||
int vLen = 0;
|
||||
|
||||
if (tdbTbGet(pTq->pExecStore, key, (int)strlen(key), &pVal, &vLen) < 0) {
|
||||
return -1;
|
||||
}
|
||||
STqHandle handle = {0};
|
||||
int code = restoreHandle(pTq, pVal, vLen, &handle);
|
||||
if (code < 0){
|
||||
tqDestroyTqHandle(&handle);
|
||||
}
|
||||
tdbFree(pVal);
|
||||
return code;
|
||||
}
|
||||
|
|
|
@ -691,6 +691,7 @@ static int32_t tsdbCacheLoadFromRaw(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArr
|
|||
.colVal = COL_VAL_NONE(idxKey->key.cid, pr->pSchema->columns[slotIds[i]].type)};
|
||||
if (!pLastCol) {
|
||||
pLastCol = &noneCol;
|
||||
reallocVarData(&pLastCol->colVal);
|
||||
}
|
||||
|
||||
taosArraySet(pLastArray, idxKey->idx, pLastCol);
|
||||
|
@ -2848,14 +2849,16 @@ static int32_t mergeLastCid(tb_uid_t uid, STsdb *pTsdb, SArray **ppLastArray, SC
|
|||
tsdbRowGetColVal(pRow, pTSchema, slotIds[iCol], pColVal);
|
||||
|
||||
*pCol = (SLastCol){.ts = rowTs, .colVal = *pColVal};
|
||||
if (IS_VAR_DATA_TYPE(pColVal->type) && pColVal->value.nData > 0) {
|
||||
if (IS_VAR_DATA_TYPE(pColVal->type) /*&& pColVal->value.nData > 0*/) {
|
||||
pCol->colVal.value.pData = taosMemoryMalloc(pCol->colVal.value.nData);
|
||||
if (pCol->colVal.value.pData == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto _err;
|
||||
}
|
||||
memcpy(pCol->colVal.value.pData, pColVal->value.pData, pColVal->value.nData);
|
||||
if (pColVal->value.nData > 0) {
|
||||
memcpy(pCol->colVal.value.pData, pColVal->value.pData, pColVal->value.nData);
|
||||
}
|
||||
}
|
||||
|
||||
if (!COL_VAL_IS_VALUE(pColVal)) {
|
||||
|
@ -3016,14 +3019,16 @@ static int32_t mergeLastRowCid(tb_uid_t uid, STsdb *pTsdb, SArray **ppLastArray,
|
|||
tsdbRowGetColVal(pRow, pTSchema, slotIds[iCol], pColVal);
|
||||
|
||||
*pCol = (SLastCol){.ts = rowTs, .colVal = *pColVal};
|
||||
if (IS_VAR_DATA_TYPE(pColVal->type) && pColVal->value.nData > 0) {
|
||||
if (IS_VAR_DATA_TYPE(pColVal->type) /*&& pColVal->value.nData > 0*/) {
|
||||
pCol->colVal.value.pData = taosMemoryMalloc(pCol->colVal.value.nData);
|
||||
if (pCol->colVal.value.pData == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto _err;
|
||||
}
|
||||
memcpy(pCol->colVal.value.pData, pColVal->value.pData, pColVal->value.nData);
|
||||
if (pColVal->value.nData > 0) {
|
||||
memcpy(pCol->colVal.value.pData, pColVal->value.pData, pColVal->value.nData);
|
||||
}
|
||||
}
|
||||
|
||||
/*if (COL_VAL_IS_NONE(pColVal)) {
|
||||
|
|
|
@ -238,6 +238,10 @@ static int32_t vnodePreProcessSubmitMsg(SVnode *pVnode, SRpcMsg *pMsg) {
|
|||
tEndDecode(pCoder);
|
||||
|
||||
_exit:
|
||||
if (code) {
|
||||
vError("vgId:%d, failed to preprocess submit request since %s, msg type:%d", TD_VID(pVnode), tstrerror(code),
|
||||
pMsg->msgType);
|
||||
}
|
||||
tDecoderClear(pCoder);
|
||||
return code;
|
||||
}
|
||||
|
@ -245,11 +249,11 @@ _exit:
|
|||
static int32_t vnodePreProcessDeleteMsg(SVnode *pVnode, SRpcMsg *pMsg) {
|
||||
int32_t code = 0;
|
||||
|
||||
int32_t size;
|
||||
int32_t ret;
|
||||
uint8_t *pCont;
|
||||
SEncoder *pCoder = &(SEncoder){0};
|
||||
SDeleteRes res = {0};
|
||||
int32_t size;
|
||||
int32_t ret;
|
||||
uint8_t *pCont;
|
||||
SEncoder *pCoder = &(SEncoder){0};
|
||||
SDeleteRes res = {0};
|
||||
|
||||
SReadHandle handle = {.config = &pVnode->config, .vnode = pVnode, .pMsgCb = &pVnode->msgCb};
|
||||
initStorageAPI(&handle.api);
|
||||
|
@ -297,7 +301,7 @@ int32_t vnodePreProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg) {
|
|||
|
||||
_exit:
|
||||
if (code) {
|
||||
vError("vgId%d failed to preprocess write request since %s, msg type:%d", TD_VID(pVnode), tstrerror(code),
|
||||
vError("vgId:%d, failed to preprocess write request since %s, msg type:%d", TD_VID(pVnode), tstrerror(code),
|
||||
pMsg->msgType);
|
||||
}
|
||||
return code;
|
||||
|
@ -316,8 +320,7 @@ int32_t vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, int64_t ver, SRpcMsg
|
|||
return -1;
|
||||
}
|
||||
|
||||
vDebug("vgId:%d, start to process write request %s, index:%" PRId64, TD_VID(pVnode), TMSG_INFO(pMsg->msgType),
|
||||
ver);
|
||||
vDebug("vgId:%d, start to process write request %s, index:%" PRId64, TD_VID(pVnode), TMSG_INFO(pMsg->msgType), ver);
|
||||
|
||||
ASSERT(pVnode->state.applyTerm <= pMsg->info.conn.applyTerm);
|
||||
ASSERT(pVnode->state.applied + 1 == ver);
|
||||
|
@ -1494,8 +1497,7 @@ static int32_t vnodeProcessAlterConfirmReq(SVnode *pVnode, int64_t ver, void *pR
|
|||
|
||||
code = vnodeConsolidateAlterHashRange(pVnode, ver);
|
||||
if (code < 0) {
|
||||
vError("vgId:%d, failed to consolidate alter hashrange since %s. version:%" PRId64, TD_VID(pVnode), terrstr(),
|
||||
ver);
|
||||
vError("vgId:%d, failed to consolidate alter hashrange since %s. version:%" PRId64, TD_VID(pVnode), terrstr(), ver);
|
||||
goto _exit;
|
||||
}
|
||||
pVnode->config.hashChange = false;
|
||||
|
|
|
@ -950,8 +950,8 @@ int32_t ctgCloneMetaOutput(STableMetaOutput* output, STableMetaOutput** pOutput)
|
|||
int32_t ctgGenerateVgList(SCatalog* pCtg, SHashObj* vgHash, SArray** pList);
|
||||
void ctgFreeJob(void* job);
|
||||
void ctgFreeHandleImpl(SCatalog* pCtg);
|
||||
int32_t ctgGetVgInfoFromHashValue(SCatalog* pCtg, SDBVgInfo* dbInfo, const SName* pTableName, SVgroupInfo* pVgroup);
|
||||
int32_t ctgGetVgInfosFromHashValue(SCatalog* pCtg, SCtgTaskReq* tReq, SDBVgInfo* dbInfo, SCtgTbHashsCtx* pCtx,
|
||||
int32_t ctgGetVgInfoFromHashValue(SCatalog* pCtg, SEpSet* pMgmtEps, SDBVgInfo* dbInfo, const SName* pTableName, SVgroupInfo* pVgroup);
|
||||
int32_t ctgGetVgInfosFromHashValue(SCatalog* pCtg, SEpSet* pMgmgEpSet, SCtgTaskReq* tReq, SDBVgInfo* dbInfo, SCtgTbHashsCtx* pCtx,
|
||||
char* dbFName, SArray* pNames, bool update);
|
||||
int32_t ctgGetVgIdsFromHashValue(SCatalog* pCtg, SDBVgInfo* dbInfo, char* dbFName, const char* pTbs[], int32_t tbNum,
|
||||
int32_t* vgId);
|
||||
|
|
|
@ -568,7 +568,7 @@ int32_t ctgGetTbHashVgroup(SCatalog* pCtg, SRequestConnInfo* pConn, const SName*
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, vgInfo ? vgInfo : dbCache->vgCache.vgInfo, pTableName, pVgroup));
|
||||
CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, pConn ? &pConn->mgmtEps : NULL, vgInfo ? vgInfo : dbCache->vgCache.vgInfo, pTableName, pVgroup));
|
||||
|
||||
_return:
|
||||
|
||||
|
@ -629,7 +629,7 @@ int32_t ctgGetCachedTbVgMeta(SCatalog* pCtg, const SName* pTableName, SVgroupInf
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, dbCache->vgCache.vgInfo, pTableName, pVgroup));
|
||||
CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, NULL, dbCache->vgCache.vgInfo, pTableName, pVgroup));
|
||||
|
||||
ctgRUnlockVgInfo(dbCache);
|
||||
|
||||
|
|
|
@ -1112,7 +1112,7 @@ int32_t ctgHandleGetTbMetaRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf
|
|||
SUseDbOutput* pOut = (SUseDbOutput*)pMsgCtx->out;
|
||||
|
||||
SVgroupInfo vgInfo = {0};
|
||||
CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, pOut->dbVgroup, pName, &vgInfo));
|
||||
CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, &pConn->mgmtEps, pOut->dbVgroup, pName, &vgInfo));
|
||||
|
||||
ctgDebug("will refresh tbmeta, not supposed to be stb, tbName:%s, flag:%d", tNameGetTableName(pName), flag);
|
||||
|
||||
|
@ -1132,7 +1132,7 @@ int32_t ctgHandleGetTbMetaRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf
|
|||
CTG_ERR_RET(ctgAcquireVgInfoFromCache(pCtg, dbFName, &dbCache));
|
||||
if (NULL != dbCache) {
|
||||
SVgroupInfo vgInfo = {0};
|
||||
CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, dbCache->vgCache.vgInfo, pName, &vgInfo));
|
||||
CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, &pConn->mgmtEps, dbCache->vgCache.vgInfo, pName, &vgInfo));
|
||||
|
||||
ctgDebug("will refresh tbmeta, supposed to be stb, tbName:%s, flag:%d", tNameGetTableName(pName), flag);
|
||||
|
||||
|
@ -1282,7 +1282,7 @@ int32_t ctgHandleGetTbMetasRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBu
|
|||
SUseDbOutput* pOut = (SUseDbOutput*)pMsgCtx->out;
|
||||
|
||||
SVgroupInfo vgInfo = {0};
|
||||
CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, pOut->dbVgroup, pName, &vgInfo));
|
||||
CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, &pConn->mgmtEps, pOut->dbVgroup, pName, &vgInfo));
|
||||
|
||||
ctgTaskDebug("will refresh tbmeta, not supposed to be stb, tbName:%s, flag:%d", tNameGetTableName(pName), flag);
|
||||
|
||||
|
@ -1302,7 +1302,7 @@ int32_t ctgHandleGetTbMetasRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBu
|
|||
CTG_ERR_RET(ctgAcquireVgInfoFromCache(pCtg, dbFName, &dbCache));
|
||||
if (NULL != dbCache) {
|
||||
SVgroupInfo vgInfo = {0};
|
||||
CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, dbCache->vgCache.vgInfo, pName, &vgInfo));
|
||||
CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, &pConn->mgmtEps, dbCache->vgCache.vgInfo, pName, &vgInfo));
|
||||
|
||||
ctgTaskDebug("will refresh tbmeta, supposed to be stb, tbName:%s, flag:%d", tNameGetTableName(pName), flag);
|
||||
|
||||
|
@ -1501,7 +1501,7 @@ int32_t ctgHandleGetTbHashRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf
|
|||
CTG_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY);
|
||||
}
|
||||
|
||||
CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, pOut->dbVgroup, ctx->pName, (SVgroupInfo*)pTask->res));
|
||||
CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, &pTask->pJob->conn.mgmtEps, pOut->dbVgroup, ctx->pName, (SVgroupInfo*)pTask->res));
|
||||
|
||||
CTG_ERR_JRET(ctgUpdateVgroupEnqueue(pCtg, ctx->dbFName, pOut->dbId, pOut->dbVgroup, false));
|
||||
pOut->dbVgroup = NULL;
|
||||
|
@ -1536,7 +1536,7 @@ int32_t ctgHandleGetTbHashsRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBu
|
|||
SUseDbOutput* pOut = (SUseDbOutput*)pMsgCtx->out;
|
||||
|
||||
STablesReq* pReq = taosArrayGet(ctx->pNames, pFetch->dbIdx);
|
||||
CTG_ERR_JRET(ctgGetVgInfosFromHashValue(pCtg, tReq, pOut->dbVgroup, ctx, pMsgCtx->target, pReq->pTables, true));
|
||||
CTG_ERR_JRET(ctgGetVgInfosFromHashValue(pCtg, &pTask->pJob->conn.mgmtEps, tReq, pOut->dbVgroup, ctx, pMsgCtx->target, pReq->pTables, true));
|
||||
|
||||
CTG_ERR_JRET(ctgUpdateVgroupEnqueue(pCtg, pMsgCtx->target, pOut->dbId, pOut->dbVgroup, false));
|
||||
pOut->dbVgroup = NULL;
|
||||
|
@ -1799,7 +1799,7 @@ int32_t ctgAsyncRefreshTbMeta(SCtgTaskReq* tReq, int32_t flag, SName* pName, int
|
|||
CTG_ERR_RET(ctgAcquireVgInfoFromCache(pCtg, dbFName, &dbCache));
|
||||
if (dbCache) {
|
||||
SVgroupInfo vgInfo = {0};
|
||||
CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, dbCache->vgCache.vgInfo, pName, &vgInfo));
|
||||
CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, &pConn->mgmtEps, dbCache->vgCache.vgInfo, pName, &vgInfo));
|
||||
|
||||
ctgDebug("will refresh tbmeta, not supposed to be stb, tbName:%s, flag:%d", tNameGetTableName(pName), flag);
|
||||
|
||||
|
@ -1948,7 +1948,7 @@ int32_t ctgLaunchGetTbHashTask(SCtgTask* pTask) {
|
|||
if (NULL == pTask->res) {
|
||||
CTG_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY);
|
||||
}
|
||||
CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, dbCache->vgCache.vgInfo, pCtx->pName, (SVgroupInfo*)pTask->res));
|
||||
CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, &pConn->mgmtEps, dbCache->vgCache.vgInfo, pCtx->pName, (SVgroupInfo*)pTask->res));
|
||||
|
||||
ctgReleaseVgInfoToCache(pCtg, dbCache);
|
||||
dbCache = NULL;
|
||||
|
@ -1996,7 +1996,7 @@ int32_t ctgLaunchGetTbHashsTask(SCtgTask* pTask) {
|
|||
tReq.pTask = pTask;
|
||||
tReq.msgIdx = -1;
|
||||
CTG_ERR_JRET(
|
||||
ctgGetVgInfosFromHashValue(pCtg, &tReq, dbCache->vgCache.vgInfo, pCtx, pReq->dbFName, pReq->pTables, false));
|
||||
ctgGetVgInfosFromHashValue(pCtg, &pConn->mgmtEps, &tReq, dbCache->vgCache.vgInfo, pCtx, pReq->dbFName, pReq->pTables, false));
|
||||
|
||||
ctgReleaseVgInfoToCache(pCtg, dbCache);
|
||||
dbCache = NULL;
|
||||
|
@ -2375,7 +2375,7 @@ int32_t ctgGetTbCfgCb(SCtgTask* pTask) {
|
|||
SDBVgInfo* pDb = (SDBVgInfo*)pTask->subRes.res;
|
||||
|
||||
pCtx->pVgInfo = taosMemoryCalloc(1, sizeof(SVgroupInfo));
|
||||
CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pTask->pJob->pCtg, pDb, pCtx->pName, pCtx->pVgInfo));
|
||||
CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pTask->pJob->pCtg, &pTask->pJob->conn.mgmtEps, pDb, pCtx->pName, pCtx->pVgInfo));
|
||||
}
|
||||
|
||||
CTG_RET(ctgLaunchGetTbCfgTask(pTask));
|
||||
|
@ -2395,7 +2395,7 @@ int32_t ctgGetTbTagCb(SCtgTask* pTask) {
|
|||
|
||||
if (NULL == pCtx->pVgInfo) {
|
||||
pCtx->pVgInfo = taosMemoryCalloc(1, sizeof(SVgroupInfo));
|
||||
CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pTask->pJob->pCtg, pDb, pCtx->pName, pCtx->pVgInfo));
|
||||
CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pTask->pJob->pCtg, &pTask->pJob->conn.mgmtEps, pDb, pCtx->pName, pCtx->pVgInfo));
|
||||
}
|
||||
|
||||
CTG_RET(ctgLaunchGetTbTagTask(pTask));
|
||||
|
|
|
@ -2989,7 +2989,7 @@ int32_t ctgGetTbHashVgroupFromCache(SCatalog *pCtg, const SName *pTableName, SVg
|
|||
}
|
||||
|
||||
*pVgroup = taosMemoryCalloc(1, sizeof(SVgroupInfo));
|
||||
CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, dbCache->vgCache.vgInfo, pTableName, *pVgroup));
|
||||
CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, NULL, dbCache->vgCache.vgInfo, pTableName, *pVgroup));
|
||||
|
||||
_return:
|
||||
|
||||
|
|
|
@ -969,7 +969,7 @@ int32_t ctgHashValueComp(void const* lp, void const* rp) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t ctgGetVgInfoFromHashValue(SCatalog* pCtg, SDBVgInfo* dbInfo, const SName* pTableName, SVgroupInfo* pVgroup) {
|
||||
int32_t ctgGetVgInfoFromHashValue(SCatalog* pCtg, SEpSet* pMgmtEps, SDBVgInfo* dbInfo, const SName* pTableName, SVgroupInfo* pVgroup) {
|
||||
int32_t code = 0;
|
||||
CTG_ERR_RET(ctgMakeVgArray(dbInfo));
|
||||
|
||||
|
@ -977,6 +977,14 @@ int32_t ctgGetVgInfoFromHashValue(SCatalog* pCtg, SDBVgInfo* dbInfo, const SName
|
|||
char db[TSDB_DB_FNAME_LEN] = {0};
|
||||
tNameGetFullDbName(pTableName, db);
|
||||
|
||||
if (IS_SYS_DBNAME(pTableName->dbname)) {
|
||||
pVgroup->vgId = MNODE_HANDLE;
|
||||
if (pMgmtEps) {
|
||||
memcpy(&pVgroup->epSet, pMgmtEps, sizeof(pVgroup->epSet));
|
||||
}
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
if (vgNum <= 0) {
|
||||
ctgError("db vgroup cache invalid, db:%s, vgroup number:%d", db, vgNum);
|
||||
CTG_ERR_RET(TSDB_CODE_TSC_DB_NOT_SELECTED);
|
||||
|
@ -1020,23 +1028,53 @@ int32_t ctgGetVgInfoFromHashValue(SCatalog* pCtg, SDBVgInfo* dbInfo, const SName
|
|||
CTG_RET(code);
|
||||
}
|
||||
|
||||
int32_t ctgGetVgInfosFromHashValue(SCatalog* pCtg, SCtgTaskReq* tReq, SDBVgInfo* dbInfo, SCtgTbHashsCtx* pCtx,
|
||||
int32_t ctgGetVgInfosFromHashValue(SCatalog* pCtg, SEpSet* pMgmgEpSet, SCtgTaskReq* tReq, SDBVgInfo* dbInfo, SCtgTbHashsCtx* pCtx,
|
||||
char* dbFName, SArray* pNames, bool update) {
|
||||
int32_t code = 0;
|
||||
SCtgTask* pTask = tReq->pTask;
|
||||
SMetaRes res = {0};
|
||||
SVgroupInfo* vgInfo = NULL;
|
||||
|
||||
CTG_ERR_RET(ctgMakeVgArray(dbInfo));
|
||||
|
||||
int32_t tbNum = taosArrayGetSize(pNames);
|
||||
|
||||
char* pSep = strchr(dbFName, '.');
|
||||
if (pSep && IS_SYS_DBNAME(pSep + 1)) {
|
||||
SVgroupInfo mgmtInfo = {0};
|
||||
mgmtInfo.vgId = MNODE_HANDLE;
|
||||
if (pMgmgEpSet) {
|
||||
memcpy(&mgmtInfo.epSet, pMgmgEpSet, sizeof(mgmtInfo.epSet));
|
||||
}
|
||||
for (int32_t i = 0; i < tbNum; ++i) {
|
||||
vgInfo = taosMemoryMalloc(sizeof(SVgroupInfo));
|
||||
if (NULL == vgInfo) {
|
||||
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
|
||||
}
|
||||
|
||||
memcpy(vgInfo, &mgmtInfo, sizeof(mgmtInfo));
|
||||
|
||||
ctgDebug("Got tb hash vgroup, vgId:%d, epNum %d, current %s port %d", vgInfo->vgId, vgInfo->epSet.numOfEps,
|
||||
vgInfo->epSet.eps[vgInfo->epSet.inUse].fqdn, vgInfo->epSet.eps[vgInfo->epSet.inUse].port);
|
||||
|
||||
if (update) {
|
||||
SCtgFetch* pFetch = taosArrayGet(pCtx->pFetchs, tReq->msgIdx);
|
||||
SMetaRes* pRes = taosArrayGet(pCtx->pResList, pFetch->resIdx + i);
|
||||
pRes->pRes = vgInfo;
|
||||
} else {
|
||||
res.pRes = vgInfo;
|
||||
taosArrayPush(pCtx->pResList, &res);
|
||||
}
|
||||
}
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t vgNum = taosArrayGetSize(dbInfo->vgArray);
|
||||
if (vgNum <= 0) {
|
||||
ctgError("db vgroup cache invalid, db:%s, vgroup number:%d", dbFName, vgNum);
|
||||
CTG_ERR_RET(TSDB_CODE_CTG_INTERNAL_ERROR);
|
||||
}
|
||||
|
||||
SVgroupInfo* vgInfo = NULL;
|
||||
int32_t tbNum = taosArrayGetSize(pNames);
|
||||
|
||||
if (1 == vgNum) {
|
||||
for (int32_t i = 0; i < tbNum; ++i) {
|
||||
vgInfo = taosMemoryMalloc(sizeof(SVgroupInfo));
|
||||
|
|
|
@ -457,6 +457,7 @@ typedef struct SStreamIntervalOperatorInfo {
|
|||
int64_t dataVersion;
|
||||
SStateStore statestore;
|
||||
bool recvGetAll;
|
||||
SHashObj* pFinalPullDataMap;
|
||||
} SStreamIntervalOperatorInfo;
|
||||
|
||||
typedef struct SDataGroupInfo {
|
||||
|
|
|
@ -461,8 +461,12 @@ int32_t doInitAggInfoSup(SAggSupporter* pAggSup, SqlFunctionCtx* pCtx, int32_t n
|
|||
|
||||
uint32_t defaultPgsz = 0;
|
||||
uint32_t defaultBufsz = 0;
|
||||
getBufferPgSize(pAggSup->resultRowSize, &defaultPgsz, &defaultBufsz);
|
||||
|
||||
code = getBufferPgSize(pAggSup->resultRowSize, &defaultPgsz, &defaultBufsz);
|
||||
if (code) {
|
||||
qError("failed to get buff page size, rowSize:%d", pAggSup->resultRowSize);
|
||||
return code;
|
||||
}
|
||||
|
||||
if (!osTempSpaceAvailable()) {
|
||||
code = TSDB_CODE_NO_DISKSPACE;
|
||||
qError("Init stream agg supporter failed since %s, key:%s, tempDir:%s", terrstr(code), pKey, tsTempDir);
|
||||
|
|
|
@ -174,6 +174,7 @@ void destroyEWindowOperatorInfo(void* param) {
|
|||
colDataDestroy(&pInfo->twAggSup.timeWindowData);
|
||||
|
||||
cleanupAggSup(&pInfo->aggSup);
|
||||
cleanupExprSupp(&pInfo->scalarSup);
|
||||
taosMemoryFreeClear(param);
|
||||
}
|
||||
|
||||
|
|
|
@ -26,8 +26,8 @@
|
|||
#include "executil.h"
|
||||
#include "executorInt.h"
|
||||
#include "querytask.h"
|
||||
#include "tcompression.h"
|
||||
#include "storageapi.h"
|
||||
#include "tcompression.h"
|
||||
|
||||
typedef struct tagFilterAssist {
|
||||
SHashObj* colHash;
|
||||
|
@ -42,13 +42,13 @@ typedef enum {
|
|||
} FilterCondType;
|
||||
|
||||
static FilterCondType checkTagCond(SNode* cond);
|
||||
static int32_t optimizeTbnameInCond(void* metaHandle, int64_t suid, SArray* list, SNode* pTagCond, SStorageAPI* pAPI);
|
||||
static int32_t optimizeTbnameInCondImpl(void* metaHandle, SArray* list, SNode* pTagCond, SStorageAPI* pStoreAPI);
|
||||
static int32_t optimizeTbnameInCond(void* metaHandle, int64_t suid, SArray* list, SNode* pTagCond, SStorageAPI* pAPI);
|
||||
static int32_t optimizeTbnameInCondImpl(void* metaHandle, SArray* list, SNode* pTagCond, SStorageAPI* pStoreAPI);
|
||||
|
||||
static int32_t getTableList(void* pVnode, SScanPhysiNode* pScanNode, SNode* pTagCond,
|
||||
SNode* pTagIndexCond, STableListInfo* pListInfo, uint8_t* digest, const char* idstr, SStorageAPI* pStorageAPI);
|
||||
static SSDataBlock* createTagValBlockForFilter(SArray* pColList, int32_t numOfTables, SArray* pUidTagList,
|
||||
void* pVnode, SStorageAPI* pStorageAPI);
|
||||
static int32_t getTableList(void* pVnode, SScanPhysiNode* pScanNode, SNode* pTagCond, SNode* pTagIndexCond,
|
||||
STableListInfo* pListInfo, uint8_t* digest, const char* idstr, SStorageAPI* pStorageAPI);
|
||||
static SSDataBlock* createTagValBlockForFilter(SArray* pColList, int32_t numOfTables, SArray* pUidTagList, void* pVnode,
|
||||
SStorageAPI* pStorageAPI);
|
||||
|
||||
static int64_t getLimit(const SNode* pLimit) { return NULL == pLimit ? -1 : ((SLimitNode*)pLimit)->limit; }
|
||||
static int64_t getOffset(const SNode* pLimit) { return NULL == pLimit ? -1 : ((SLimitNode*)pLimit)->offset; }
|
||||
|
@ -302,7 +302,7 @@ EDealRes doTranslateTagExpr(SNode** pNode, void* pContext) {
|
|||
return DEAL_RES_CONTINUE;
|
||||
}
|
||||
|
||||
int32_t isQualifiedTable(STableKeyInfo* info, SNode* pTagCond, void* metaHandle, bool* pQualified, SStorageAPI *pAPI) {
|
||||
int32_t isQualifiedTable(STableKeyInfo* info, SNode* pTagCond, void* metaHandle, bool* pQualified, SStorageAPI* pAPI) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
SMetaReader mr = {0};
|
||||
|
||||
|
@ -495,7 +495,8 @@ int32_t getColInfoResultForGroupby(void* pVnode, SNodeList* group, STableListInf
|
|||
genTbGroupDigest((SNode*)listNode, digest, &context);
|
||||
nodesFree(listNode);
|
||||
|
||||
pAPI->metaFn.metaGetCachedTbGroup(pVnode, pTableListInfo->idInfo.suid, context.digest, tListLen(context.digest), &tableList);
|
||||
pAPI->metaFn.metaGetCachedTbGroup(pVnode, pTableListInfo->idInfo.suid, context.digest, tListLen(context.digest),
|
||||
&tableList);
|
||||
if (tableList) {
|
||||
taosArrayDestroy(pTableListInfo->pTableList);
|
||||
pTableListInfo->pTableList = tableList;
|
||||
|
@ -632,7 +633,8 @@ int32_t getColInfoResultForGroupby(void* pVnode, SNodeList* group, STableListInf
|
|||
|
||||
if (tsTagFilterCache) {
|
||||
tableList = taosArrayDup(pTableListInfo->pTableList, NULL);
|
||||
pAPI->metaFn.metaPutTbGroupToCache(pVnode, pTableListInfo->idInfo.suid, context.digest, tListLen(context.digest), tableList, taosArrayGetSize(tableList) * sizeof(STableKeyInfo));
|
||||
pAPI->metaFn.metaPutTbGroupToCache(pVnode, pTableListInfo->idInfo.suid, context.digest, tListLen(context.digest),
|
||||
tableList, taosArrayGetSize(tableList) * sizeof(STableKeyInfo));
|
||||
}
|
||||
|
||||
// int64_t st2 = taosGetTimestampUs();
|
||||
|
@ -776,7 +778,8 @@ static int32_t optimizeTbnameInCond(void* pVnode, int64_t suid, SArray* list, SN
|
|||
}
|
||||
|
||||
// only return uid that does not contained in pExistedUidList
|
||||
static int32_t optimizeTbnameInCondImpl(void* pVnode, SArray* pExistedUidList, SNode* pTagCond, SStorageAPI* pStoreAPI) {
|
||||
static int32_t optimizeTbnameInCondImpl(void* pVnode, SArray* pExistedUidList, SNode* pTagCond,
|
||||
SStorageAPI* pStoreAPI) {
|
||||
if (nodeType(pTagCond) != QUERY_NODE_OPERATOR) {
|
||||
return -1;
|
||||
}
|
||||
|
@ -839,8 +842,8 @@ static int32_t optimizeTbnameInCondImpl(void* pVnode, SArray* pExistedUidList, S
|
|||
return -1;
|
||||
}
|
||||
|
||||
static SSDataBlock* createTagValBlockForFilter(SArray* pColList, int32_t numOfTables, SArray* pUidTagList,
|
||||
void* pVnode, SStorageAPI* pStorageAPI) {
|
||||
static SSDataBlock* createTagValBlockForFilter(SArray* pColList, int32_t numOfTables, SArray* pUidTagList, void* pVnode,
|
||||
SStorageAPI* pStorageAPI) {
|
||||
SSDataBlock* pResBlock = createDataBlock();
|
||||
if (pResBlock == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
|
@ -1080,8 +1083,8 @@ int32_t getTableList(void* pVnode, SScanPhysiNode* pScanNode, SNode* pTagCond, S
|
|||
genTagFilterDigest(pTagCond, &context);
|
||||
|
||||
bool acquired = false;
|
||||
pStorageAPI->metaFn.getCachedTableList(pVnode, pScanNode->suid, context.digest, tListLen(context.digest), pUidList,
|
||||
&acquired);
|
||||
pStorageAPI->metaFn.getCachedTableList(pVnode, pScanNode->suid, context.digest, tListLen(context.digest),
|
||||
pUidList, &acquired);
|
||||
if (acquired) {
|
||||
digest[0] = 1;
|
||||
memcpy(digest + 1, context.digest, tListLen(context.digest));
|
||||
|
@ -1097,13 +1100,15 @@ int32_t getTableList(void* pVnode, SScanPhysiNode* pScanNode, SNode* pTagCond, S
|
|||
if (pTagIndexCond) {
|
||||
void* pIndex = pStorageAPI->metaFn.getInvertIndex(pVnode);
|
||||
|
||||
SIndexMetaArg metaArg = {
|
||||
.metaEx = pVnode, .idx = pStorageAPI->metaFn.storeGetIndexInfo(pVnode), .ivtIdx = pIndex, .suid = pScanNode->uid};
|
||||
SIndexMetaArg metaArg = {.metaEx = pVnode,
|
||||
.idx = pStorageAPI->metaFn.storeGetIndexInfo(pVnode),
|
||||
.ivtIdx = pIndex,
|
||||
.suid = pScanNode->uid};
|
||||
|
||||
status = SFLT_NOT_INDEX;
|
||||
code = doFilterTag(pTagIndexCond, &metaArg, pUidList, &status, &pStorageAPI->metaFilter);
|
||||
if (code != 0 || status == SFLT_NOT_INDEX) { // temporarily disable it for performance sake
|
||||
qWarn("failed to get tableIds from index, suid:%" PRIu64, pScanNode->uid);
|
||||
qDebug("failed to get tableIds from index, suid:%" PRIu64, pScanNode->uid);
|
||||
code = TSDB_CODE_SUCCESS;
|
||||
} else {
|
||||
qInfo("succ to get filter result, table num: %d", (int)taosArrayGetSize(pUidList));
|
||||
|
@ -1128,7 +1133,8 @@ int32_t getTableList(void* pVnode, SScanPhysiNode* pScanNode, SNode* pTagCond, S
|
|||
memcpy(pPayload + sizeof(int32_t), taosArrayGet(pUidList, 0), numOfTables * sizeof(uint64_t));
|
||||
}
|
||||
|
||||
// metaUidFilterCachePut(metaHandle, pScanNode->suid, context.digest, tListLen(context.digest), pPayload, size, 1);
|
||||
// metaUidFilterCachePut(metaHandle, pScanNode->suid, context.digest, tListLen(context.digest), pPayload,
|
||||
// size, 1);
|
||||
digest[0] = 1;
|
||||
memcpy(digest + 1, context.digest, tListLen(context.digest));
|
||||
}
|
||||
|
@ -1152,15 +1158,17 @@ _end:
|
|||
return code;
|
||||
}
|
||||
|
||||
int32_t qGetTableList(int64_t suid, void* pVnode, void* node, SArray **tableList, void* pTaskInfo){
|
||||
SSubplan *pSubplan = (SSubplan *)node;
|
||||
int32_t qGetTableList(int64_t suid, void* pVnode, void* node, SArray** tableList, void* pTaskInfo) {
|
||||
SSubplan* pSubplan = (SSubplan*)node;
|
||||
SScanPhysiNode pNode = {0};
|
||||
pNode.suid = suid;
|
||||
pNode.uid = suid;
|
||||
pNode.tableType = TSDB_SUPER_TABLE;
|
||||
STableListInfo* pTableListInfo = tableListCreate();
|
||||
uint8_t digest[17] = {0};
|
||||
int code = getTableList(pVnode, &pNode, pSubplan ? pSubplan->pTagCond : NULL, pSubplan ? pSubplan->pTagIndexCond : NULL, pTableListInfo, digest, "qGetTableList", &((SExecTaskInfo*)pTaskInfo)->storageAPI);
|
||||
uint8_t digest[17] = {0};
|
||||
int code =
|
||||
getTableList(pVnode, &pNode, pSubplan ? pSubplan->pTagCond : NULL, pSubplan ? pSubplan->pTagIndexCond : NULL,
|
||||
pTableListInfo, digest, "qGetTableList", &((SExecTaskInfo*)pTaskInfo)->storageAPI);
|
||||
*tableList = pTableListInfo->pTableList;
|
||||
pTableListInfo->pTableList = NULL;
|
||||
tableListDestroy(pTableListInfo);
|
||||
|
@ -1181,7 +1189,7 @@ size_t getTableTagsBufLen(const SNodeList* pGroups) {
|
|||
}
|
||||
|
||||
int32_t getGroupIdFromTagsVal(void* pVnode, uint64_t uid, SNodeList* pGroupNode, char* keyBuf, uint64_t* pGroupId,
|
||||
SStorageAPI* pAPI) {
|
||||
SStorageAPI* pAPI) {
|
||||
SMetaReader mr = {0};
|
||||
|
||||
pAPI->metaReaderFn.initReader(&mr, pVnode, 0, &pAPI->metaFn);
|
||||
|
@ -1560,7 +1568,8 @@ static int32_t setSelectValueColumnInfo(SqlFunctionCtx* pCtx, int32_t numOfOutpu
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
SqlFunctionCtx* createSqlFunctionCtx(SExprInfo* pExprInfo, int32_t numOfOutput, int32_t** rowEntryInfoOffset, SFunctionStateStore* pStore) {
|
||||
SqlFunctionCtx* createSqlFunctionCtx(SExprInfo* pExprInfo, int32_t numOfOutput, int32_t** rowEntryInfoOffset,
|
||||
SFunctionStateStore* pStore) {
|
||||
SqlFunctionCtx* pFuncCtx = (SqlFunctionCtx*)taosMemoryCalloc(numOfOutput, sizeof(SqlFunctionCtx));
|
||||
if (pFuncCtx == NULL) {
|
||||
return NULL;
|
||||
|
@ -1849,7 +1858,7 @@ void getNextTimeWindow(const SInterval* pInterval, STimeWindow* tw, int32_t orde
|
|||
}
|
||||
|
||||
struct tm tm;
|
||||
time_t t = (time_t) key;
|
||||
time_t t = (time_t)key;
|
||||
taosLocalTime(&t, &tm, NULL);
|
||||
|
||||
int mon = (int)(tm.tm_year * 12 + tm.tm_mon + duration * factor);
|
||||
|
@ -2079,8 +2088,8 @@ static int32_t sortTableGroup(STableListInfo* pTableListInfo) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t buildGroupIdMapForAllTables(STableListInfo* pTableListInfo, SReadHandle* pHandle, SScanPhysiNode* pScanNode, SNodeList* group,
|
||||
bool groupSort, uint8_t *digest, SStorageAPI* pAPI) {
|
||||
int32_t buildGroupIdMapForAllTables(STableListInfo* pTableListInfo, SReadHandle* pHandle, SScanPhysiNode* pScanNode,
|
||||
SNodeList* group, bool groupSort, uint8_t* digest, SStorageAPI* pAPI) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
|
||||
bool groupByTbname = groupbyTbname(group);
|
||||
|
@ -2132,7 +2141,8 @@ int32_t createScanTableListInfo(SScanPhysiNode* pScanNode, SNodeList* pGroupTags
|
|||
}
|
||||
|
||||
uint8_t digest[17] = {0};
|
||||
int32_t code = getTableList(pHandle->vnode, pScanNode, pTagCond, pTagIndexCond, pTableListInfo, digest, idStr, &pTaskInfo->storageAPI);
|
||||
int32_t code = getTableList(pHandle->vnode, pScanNode, pTagCond, pTagIndexCond, pTableListInfo, digest, idStr,
|
||||
&pTaskInfo->storageAPI);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
qError("failed to getTableList, code: %s", tstrerror(code));
|
||||
return code;
|
||||
|
@ -2150,7 +2160,8 @@ int32_t createScanTableListInfo(SScanPhysiNode* pScanNode, SNodeList* pGroupTags
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
code = buildGroupIdMapForAllTables(pTableListInfo, pHandle, pScanNode, pGroupTags, groupSort, digest, &pTaskInfo->storageAPI);
|
||||
code = buildGroupIdMapForAllTables(pTableListInfo, pHandle, pScanNode, pGroupTags, groupSort, digest,
|
||||
&pTaskInfo->storageAPI);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
|
|
@ -562,7 +562,6 @@ void extractQualifiedTupleByFilterResult(SSDataBlock* pBlock, const SColumnInfoD
|
|||
int32_t numOfRows = 0;
|
||||
if (IS_VAR_DATA_TYPE(pDst->info.type)) {
|
||||
int32_t j = 0;
|
||||
pDst->varmeta.length = 0;
|
||||
|
||||
while (j < totalRows) {
|
||||
if (pIndicator[j] == 0) {
|
||||
|
@ -573,18 +572,8 @@ void extractQualifiedTupleByFilterResult(SSDataBlock* pBlock, const SColumnInfoD
|
|||
if (colDataIsNull_var(pDst, j)) {
|
||||
colDataSetNull_var(pDst, numOfRows);
|
||||
} else {
|
||||
// fix address sanitizer error. p1 may point to memory that will change during realloc of colDataSetVal, first copy it to p2
|
||||
char* p1 = colDataGetVarData(pDst, j);
|
||||
int32_t len = 0;
|
||||
if (pDst->info.type == TSDB_DATA_TYPE_JSON) {
|
||||
len = getJsonValueLen(p1);
|
||||
} else {
|
||||
len = varDataTLen(p1);
|
||||
}
|
||||
char* p2 = taosMemoryMalloc(len);
|
||||
memcpy(p2, p1, len);
|
||||
colDataSetVal(pDst, numOfRows, p2, false);
|
||||
taosMemoryFree(p2);
|
||||
colDataReassignVal(pDst, numOfRows, j, p1);
|
||||
}
|
||||
numOfRows += 1;
|
||||
j += 1;
|
||||
|
@ -933,8 +922,13 @@ void destroyExprInfo(SExprInfo* pExpr, int32_t numOfExprs) {
|
|||
|
||||
int32_t getBufferPgSize(int32_t rowSize, uint32_t* defaultPgsz, uint32_t* defaultBufsz) {
|
||||
*defaultPgsz = 4096;
|
||||
uint32_t last = *defaultPgsz;
|
||||
while (*defaultPgsz < rowSize * 4) {
|
||||
*defaultPgsz <<= 1u;
|
||||
if (*defaultPgsz < last) {
|
||||
return TSDB_CODE_INVALID_PARA;
|
||||
}
|
||||
last = *defaultPgsz;
|
||||
}
|
||||
|
||||
// The default buffer for each operator in query is 10MB.
|
||||
|
@ -943,6 +937,9 @@ int32_t getBufferPgSize(int32_t rowSize, uint32_t* defaultPgsz, uint32_t* defaul
|
|||
*defaultBufsz = 4096 * 2560;
|
||||
if ((*defaultBufsz) <= (*defaultPgsz)) {
|
||||
(*defaultBufsz) = (*defaultPgsz) * 4;
|
||||
if (*defaultBufsz < ((int64_t)(*defaultPgsz)) * 4) {
|
||||
return TSDB_CODE_INVALID_PARA;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -869,7 +869,12 @@ SOperatorInfo* createPartitionOperatorInfo(SOperatorInfo* downstream, SPartition
|
|||
uint32_t defaultBufsz = 0;
|
||||
|
||||
pInfo->binfo.pRes = createDataBlockFromDescNode(pPartNode->node.pOutputDataBlockDesc);
|
||||
getBufferPgSize(pInfo->binfo.pRes->info.rowSize, &defaultPgsz, &defaultBufsz);
|
||||
int32_t code = getBufferPgSize(pInfo->binfo.pRes->info.rowSize, &defaultPgsz, &defaultBufsz);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
terrno = code;
|
||||
pTaskInfo->code = code;
|
||||
goto _error;
|
||||
}
|
||||
|
||||
if (!osTempSpaceAvailable()) {
|
||||
terrno = TSDB_CODE_NO_DISKSPACE;
|
||||
|
@ -878,7 +883,7 @@ SOperatorInfo* createPartitionOperatorInfo(SOperatorInfo* downstream, SPartition
|
|||
goto _error;
|
||||
}
|
||||
|
||||
int32_t code = createDiskbasedBuf(&pInfo->pBuf, defaultPgsz, defaultBufsz, pTaskInfo->id.str, tsTempDir);
|
||||
code = createDiskbasedBuf(&pInfo->pBuf, defaultPgsz, defaultBufsz, pTaskInfo->id.str, tsTempDir);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
terrno = code;
|
||||
pTaskInfo->code = code;
|
||||
|
|
|
@ -1865,7 +1865,7 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
|
|||
TSKEY maxTs = pAPI->stateStore.updateInfoFillBlockData(pInfo->pUpdateInfo, pInfo->pRecoverRes, pInfo->primaryTsIndex);
|
||||
pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, maxTs);
|
||||
} else {
|
||||
pInfo->pUpdateInfo->maxDataVersion = pTaskInfo->streamInfo.fillHistoryVer2;
|
||||
pInfo->pUpdateInfo->maxDataVersion = TMAX(pInfo->pUpdateInfo->maxDataVersion, pTaskInfo->streamInfo.fillHistoryVer2);
|
||||
doCheckUpdate(pInfo, pInfo->pRecoverRes->info.window.ekey, pInfo->pRecoverRes);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1306,6 +1306,8 @@ static bool doDeleteWindow(SOperatorInfo* pOperator, TSKEY ts, uint64_t groupId)
|
|||
return true;
|
||||
}
|
||||
|
||||
static int32_t getChildIndex(SSDataBlock* pBlock) { return pBlock->info.childId; }
|
||||
|
||||
static void doDeleteWindows(SOperatorInfo* pOperator, SInterval* pInterval, SSDataBlock* pBlock, SArray* pUpWins,
|
||||
SSHashObj* pUpdatedMap) {
|
||||
SStreamIntervalOperatorInfo* pInfo = pOperator->info;
|
||||
|
@ -1340,8 +1342,14 @@ static void doDeleteWindows(SOperatorInfo* pOperator, SInterval* pInterval, SSDa
|
|||
SWinKey winRes = {.ts = win.skey, .groupId = winGpId};
|
||||
void* chIds = taosHashGet(pInfo->pPullDataMap, &winRes, sizeof(SWinKey));
|
||||
if (chIds) {
|
||||
getNextTimeWindow(pInterval, &win, TSDB_ORDER_ASC);
|
||||
continue;
|
||||
int32_t childId = getChildIndex(pBlock);
|
||||
SArray* chArray = *(void**)chIds;
|
||||
int32_t index = taosArraySearchIdx(chArray, &childId, compareInt32Val, TD_EQ);
|
||||
if (index != -1) {
|
||||
qDebug("===stream===try push delete window%" PRId64 "chId:%d ,continue", win.skey, childId);
|
||||
getNextTimeWindow(pInterval, &win, TSDB_ORDER_ASC);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
bool res = doDeleteWindow(pOperator, win.skey, winGpId);
|
||||
if (pUpWins && res) {
|
||||
|
@ -1497,6 +1505,7 @@ void destroyStreamFinalIntervalOperatorInfo(void* param) {
|
|||
taosArrayDestroy(*(void**)pIte);
|
||||
}
|
||||
taosHashCleanup(pInfo->pPullDataMap);
|
||||
taosHashCleanup(pInfo->pFinalPullDataMap);
|
||||
taosArrayDestroy(pInfo->pPullWins);
|
||||
blockDataDestroy(pInfo->pPullDataRes);
|
||||
taosArrayDestroy(pInfo->pDelWins);
|
||||
|
@ -2067,8 +2076,6 @@ void addPullWindow(SHashObj* pMap, SWinKey* pWinRes, int32_t size) {
|
|||
taosHashPut(pMap, pWinRes, sizeof(SWinKey), &childIds, sizeof(void*));
|
||||
}
|
||||
|
||||
static int32_t getChildIndex(SSDataBlock* pBlock) { return pBlock->info.childId; }
|
||||
|
||||
static void clearStreamIntervalOperator(SStreamIntervalOperatorInfo* pInfo) {
|
||||
tSimpleHashClear(pInfo->aggSup.pResultRowHashTable);
|
||||
clearDiskbasedBuf(pInfo->aggSup.pResultBuf);
|
||||
|
@ -2112,7 +2119,7 @@ static void doBuildPullDataBlock(SArray* array, int32_t* pIndex, SSDataBlock* pB
|
|||
blockDataUpdateTsWindow(pBlock, 0);
|
||||
}
|
||||
|
||||
void processPullOver(SSDataBlock* pBlock, SHashObj* pMap, SInterval* pInterval) {
|
||||
void processPullOver(SSDataBlock* pBlock, SHashObj* pMap, SHashObj* pFinalMap, SInterval* pInterval, SArray* pPullWins, int32_t numOfCh, SOperatorInfo* pOperator) {
|
||||
SColumnInfoData* pStartCol = taosArrayGet(pBlock->pDataBlock, CALCULATE_START_TS_COLUMN_INDEX);
|
||||
TSKEY* tsData = (TSKEY*)pStartCol->pData;
|
||||
SColumnInfoData* pEndCol = taosArrayGet(pBlock->pDataBlock, CALCULATE_END_TS_COLUMN_INDEX);
|
||||
|
@ -2136,6 +2143,22 @@ void processPullOver(SSDataBlock* pBlock, SHashObj* pMap, SInterval* pInterval)
|
|||
taosArrayDestroy(chArray);
|
||||
taosHashRemove(pMap, &winRes, sizeof(SWinKey));
|
||||
qDebug("===stream===retrive pull data over.window %" PRId64 , winRes.ts);
|
||||
|
||||
void* pFinalCh = taosHashGet(pFinalMap, &winRes, sizeof(SWinKey));
|
||||
if (pFinalCh) {
|
||||
taosHashRemove(pFinalMap, &winRes, sizeof(SWinKey));
|
||||
doDeleteWindow(pOperator, winRes.ts, winRes.groupId);
|
||||
STimeWindow nextWin = getFinalTimeWindow(winRes.ts, pInterval);
|
||||
SPullWindowInfo pull = {.window = nextWin,
|
||||
.groupId = winRes.groupId,
|
||||
.calWin.skey = nextWin.skey,
|
||||
.calWin.ekey = nextWin.skey};
|
||||
// add pull data request
|
||||
if (savePullWindow(&pull, pPullWins) == TSDB_CODE_SUCCESS) {
|
||||
addPullWindow(pMap, &winRes, numOfCh);
|
||||
qDebug("===stream===prepare final retrive for delete %" PRId64 ", size:%d", winRes.ts, numOfCh);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2144,7 +2167,7 @@ void processPullOver(SSDataBlock* pBlock, SHashObj* pMap, SInterval* pInterval)
|
|||
}
|
||||
}
|
||||
|
||||
static void addRetriveWindow(SArray* wins, SStreamIntervalOperatorInfo* pInfo) {
|
||||
static void addRetriveWindow(SArray* wins, SStreamIntervalOperatorInfo* pInfo, int32_t childId) {
|
||||
int32_t size = taosArrayGetSize(wins);
|
||||
for (int32_t i = 0; i < size; i++) {
|
||||
SWinKey* winKey = taosArrayGet(wins, i);
|
||||
|
@ -2161,6 +2184,14 @@ static void addRetriveWindow(SArray* wins, SStreamIntervalOperatorInfo* pInfo) {
|
|||
addPullWindow(pInfo->pPullDataMap, winKey, pInfo->numOfChild);
|
||||
qDebug("===stream===prepare retrive for delete %" PRId64 ", size:%d", winKey->ts, pInfo->numOfChild);
|
||||
}
|
||||
} else {
|
||||
SArray* chArray = *(void**)chIds;
|
||||
int32_t index = taosArraySearchIdx(chArray, &childId, compareInt32Val, TD_EQ);
|
||||
qDebug("===stream===check final retrive %" PRId64",chid:%d", winKey->ts, index);
|
||||
if (index == -1) {
|
||||
qDebug("===stream===add final retrive %" PRId64, winKey->ts);
|
||||
taosHashPut(pInfo->pFinalPullDataMap, winKey, sizeof(SWinKey), NULL, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2554,7 +2585,8 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
|
|||
SArray* delWins = taosArrayInit(8, sizeof(SWinKey));
|
||||
doDeleteWindows(pOperator, &pInfo->interval, pBlock, delWins, pInfo->pUpdatedMap);
|
||||
if (IS_FINAL_OP(pInfo)) {
|
||||
addRetriveWindow(delWins, pInfo);
|
||||
int32_t chId = getChildIndex(pBlock);
|
||||
addRetriveWindow(delWins, pInfo, chId);
|
||||
if (pBlock->info.type != STREAM_CLEAR) {
|
||||
taosArrayAddAll(pInfo->pDelWins, delWins);
|
||||
}
|
||||
|
@ -2589,7 +2621,7 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
|
|||
}
|
||||
continue;
|
||||
} else if (pBlock->info.type == STREAM_PULL_OVER && IS_FINAL_OP(pInfo)) {
|
||||
processPullOver(pBlock, pInfo->pPullDataMap, &pInfo->interval);
|
||||
processPullOver(pBlock, pInfo->pPullDataMap, pInfo->pFinalPullDataMap, &pInfo->interval, pInfo->pPullWins, pInfo->numOfChild, pOperator);
|
||||
continue;
|
||||
} else if (pBlock->info.type == STREAM_CREATE_CHILD_TABLE) {
|
||||
return pBlock;
|
||||
|
@ -2772,6 +2804,7 @@ SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream,
|
|||
pInfo->pullIndex = 0;
|
||||
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
|
||||
pInfo->pPullDataMap = taosHashInit(64, hashFn, false, HASH_NO_LOCK);
|
||||
pInfo->pFinalPullDataMap = taosHashInit(64, hashFn, false, HASH_NO_LOCK);
|
||||
pInfo->pPullDataRes = createSpecialDataBlock(STREAM_RETRIEVE);
|
||||
pInfo->ignoreExpiredData = pIntervalPhyNode->window.igExpired;
|
||||
pInfo->ignoreExpiredDataSaved = false;
|
||||
|
@ -4963,6 +4996,7 @@ SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SPhys
|
|||
|
||||
pInfo->pPhyNode = NULL; // create new child
|
||||
pInfo->pPullDataMap = NULL;
|
||||
pInfo->pFinalPullDataMap = NULL;
|
||||
pInfo->pPullWins = NULL; // SPullWindowInfo
|
||||
pInfo->pullIndex = 0;
|
||||
pInfo->pPullDataRes = NULL;
|
||||
|
|
|
@ -2660,7 +2660,7 @@ bool diffFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResInfo) {
|
|||
} else {
|
||||
pDiffInfo->ignoreNegative = false;
|
||||
}
|
||||
pDiffInfo->includeNull = false;
|
||||
pDiffInfo->includeNull = true;
|
||||
pDiffInfo->firstOutput = false;
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -791,7 +791,21 @@ int32_t convertDataBlockToUdfDataBlock(SSDataBlock *block, SUdfDataBlock *udfBlo
|
|||
memcpy(udfCol->colData.varLenCol.varOffsets, col->varmeta.offset, udfCol->colData.varLenCol.varOffsetsLen);
|
||||
udfCol->colData.varLenCol.payloadLen = colDataGetLength(col, udfBlock->numOfRows);
|
||||
udfCol->colData.varLenCol.payload = taosMemoryMalloc(udfCol->colData.varLenCol.payloadLen);
|
||||
memcpy(udfCol->colData.varLenCol.payload, col->pData, udfCol->colData.varLenCol.payloadLen);
|
||||
if (col->reassigned) {
|
||||
for (int32_t row = 0; row < udfCol->colData.numOfRows; ++row) {
|
||||
char* pColData = col->pData + col->varmeta.offset[row];
|
||||
int32_t colSize = 0;
|
||||
if (col->info.type == TSDB_DATA_TYPE_JSON) {
|
||||
colSize = getJsonValueLen(pColData);
|
||||
} else {
|
||||
colSize = varDataTLen(pColData);
|
||||
}
|
||||
memcpy(udfCol->colData.varLenCol.payload, pColData, colSize);
|
||||
udfCol->colData.varLenCol.payload += colSize;
|
||||
}
|
||||
} else {
|
||||
memcpy(udfCol->colData.varLenCol.payload, col->pData, udfCol->colData.varLenCol.payloadLen);
|
||||
}
|
||||
} else {
|
||||
udfCol->colData.fixLenCol.nullBitmapLen = BitmapLen(udfCol->colData.numOfRows);
|
||||
int32_t bitmapLen = udfCol->colData.fixLenCol.nullBitmapLen;
|
||||
|
|
|
@ -209,7 +209,7 @@ SNode* createCreateTopicStmtUseDb(SAstCreateContext* pCxt, bool ignoreExists, ST
|
|||
SNode* createCreateTopicStmtUseTable(SAstCreateContext* pCxt, bool ignoreExists, SToken* pTopicName, SNode* pRealTable,
|
||||
bool withMeta, SNode* pWhere);
|
||||
SNode* createDropTopicStmt(SAstCreateContext* pCxt, bool ignoreNotExists, SToken* pTopicName);
|
||||
SNode* createDropCGroupStmt(SAstCreateContext* pCxt, bool ignoreNotExists, const SToken* pCGroupId, SToken* pTopicName);
|
||||
SNode* createDropCGroupStmt(SAstCreateContext* pCxt, bool ignoreNotExists, SToken* pCGroupId, SToken* pTopicName);
|
||||
SNode* createAlterLocalStmt(SAstCreateContext* pCxt, const SToken* pConfig, const SToken* pValue);
|
||||
SNode* createDefaultExplainOptions(SAstCreateContext* pCxt);
|
||||
SNode* setExplainVerbose(SAstCreateContext* pCxt, SNode* pOptions, const SToken* pVal);
|
||||
|
|
|
@ -210,6 +210,15 @@ static bool checkTopicName(SAstCreateContext* pCxt, SToken* pTopicName) {
|
|||
return true;
|
||||
}
|
||||
|
||||
static bool checkCGroupName(SAstCreateContext* pCxt, SToken* pCGroup) {
|
||||
trimEscape(pCGroup);
|
||||
if (pCGroup->n >= TSDB_CGROUP_LEN) {
|
||||
pCxt->errCode = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_IDENTIFIER_NAME, pCGroup->z);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool checkStreamName(SAstCreateContext* pCxt, SToken* pStreamName) {
|
||||
trimEscape(pStreamName);
|
||||
if (pStreamName->n >= TSDB_STREAM_NAME_LEN) {
|
||||
|
@ -1746,12 +1755,15 @@ SNode* createDropTopicStmt(SAstCreateContext* pCxt, bool ignoreNotExists, SToken
|
|||
return (SNode*)pStmt;
|
||||
}
|
||||
|
||||
SNode* createDropCGroupStmt(SAstCreateContext* pCxt, bool ignoreNotExists, const SToken* pCGroupId,
|
||||
SNode* createDropCGroupStmt(SAstCreateContext* pCxt, bool ignoreNotExists, SToken* pCGroupId,
|
||||
SToken* pTopicName) {
|
||||
CHECK_PARSER_STATUS(pCxt);
|
||||
if (!checkTopicName(pCxt, pTopicName)) {
|
||||
return NULL;
|
||||
}
|
||||
if (!checkCGroupName(pCxt, pCGroupId)) {
|
||||
return NULL;
|
||||
}
|
||||
SDropCGroupStmt* pStmt = (SDropCGroupStmt*)nodesMakeNode(QUERY_NODE_DROP_CGROUP_STMT);
|
||||
CHECK_OUT_OF_MEM(pStmt);
|
||||
pStmt->ignoreNotExists = ignoreNotExists;
|
||||
|
|
|
@ -311,6 +311,9 @@ static int32_t calcConstDelete(SCalcConstContext* pCxt, SDeleteStmt* pDelete) {
|
|||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = calcConstStmtCondition(pCxt, &pDelete->pWhere, &pDelete->deleteZeroRows);
|
||||
}
|
||||
if (code == TSDB_CODE_SUCCESS && pDelete->timeRange.skey > pDelete->timeRange.ekey) {
|
||||
pDelete->deleteZeroRows = true;
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -465,6 +468,9 @@ static bool isEmptyResultQuery(SNode* pStmt) {
|
|||
}
|
||||
break;
|
||||
}
|
||||
case QUERY_NODE_DELETE_STMT:
|
||||
isEmptyResult = ((SDeleteStmt*)pStmt)->deleteZeroRows;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -1352,13 +1352,33 @@ static bool isCountStar(SFunctionNode* pFunc) {
|
|||
return (QUERY_NODE_COLUMN == nodeType(pPara) && 0 == strcmp(((SColumnNode*)pPara)->colName, "*"));
|
||||
}
|
||||
|
||||
static int32_t rewriteCountStarAsCount1(STranslateContext* pCxt, SFunctionNode* pCount) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
SValueNode* pVal = (SValueNode*)nodesMakeNode(QUERY_NODE_VALUE);
|
||||
if (NULL == pVal) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
pVal->node.resType.type = TSDB_DATA_TYPE_INT;
|
||||
pVal->node.resType.bytes = tDataTypes[TSDB_DATA_TYPE_INT].bytes;
|
||||
const int32_t val = 1;
|
||||
nodesSetValueNodeValue(pVal, (void*)&val);
|
||||
pVal->translate = true;
|
||||
nodesListErase(pCount->pParameterList, nodesListGetCell(pCount->pParameterList, 0));
|
||||
code = nodesListAppend(pCount->pParameterList, (SNode*)pVal);
|
||||
return code;
|
||||
}
|
||||
|
||||
// count(*) is rewritten as count(ts) for scannning optimization
|
||||
static int32_t rewriteCountStar(STranslateContext* pCxt, SFunctionNode* pCount) {
|
||||
SColumnNode* pCol = (SColumnNode*)nodesListGetNode(pCount->pParameterList, 0);
|
||||
STableNode* pTable = NULL;
|
||||
int32_t code = findTable(pCxt, ('\0' == pCol->tableAlias[0] ? NULL : pCol->tableAlias), &pTable);
|
||||
if (TSDB_CODE_SUCCESS == code && QUERY_NODE_REAL_TABLE == nodeType(pTable)) {
|
||||
setColumnInfoBySchema((SRealTableNode*)pTable, ((SRealTableNode*)pTable)->pMeta->schema, -1, pCol);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
if (QUERY_NODE_REAL_TABLE == nodeType(pTable)) {
|
||||
setColumnInfoBySchema((SRealTableNode*)pTable, ((SRealTableNode*)pTable)->pMeta->schema, -1, pCol);
|
||||
} else {
|
||||
code = rewriteCountStarAsCount1(pCxt, pCount);
|
||||
}
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue