Merge branch 'mark/tmq' of https://github.com/taosdata/TDengine into mark/tmq

This commit is contained in:
wangmm0220 2023-07-06 17:03:15 +08:00
commit 8e29de0c4d
266 changed files with 20115 additions and 5482 deletions

28
.pre-commit-config.yaml Normal file
View File

@ -0,0 +1,28 @@
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v2.3.0
hooks:
- id: check-yaml
- id: check-json
- id: end-of-file-fixer
- id: trailing-whitespace
repos:
- repo: https://github.com/psf/black
rev: stable
hooks:
- id: black
repos:
- repo: https://github.com/pocc/pre-commit-hooks
rev: master
hooks:
- id: cppcheck
args: ["--error-exitcode=0"]
repos:
- repo: https://github.com/crate-ci/typos
rev: v1.15.7
hooks:
- id: typos

View File

@ -314,7 +314,7 @@ def pre_test_build_win() {
cd %WIN_CONNECTOR_ROOT%
python.exe -m pip install --upgrade pip
python -m pip uninstall taospy -y
python -m pip install taospy==2.7.6
python -m pip install taospy==2.7.10
xcopy /e/y/i/f %WIN_INTERNAL_ROOT%\\debug\\build\\lib\\taos.dll C:\\Windows\\System32
'''
return 1

View File

@ -1,6 +1,6 @@
cmake_minimum_required(VERSION 3.0)
set(CMAKE_VERBOSE_MAKEFILE OFF)
set(CMAKE_VERBOSE_MAKEFILE ON)
set(TD_BUILD_TAOSA_INTERNAL FALSE)
#set output directory

View File

@ -121,6 +121,12 @@ IF ("${CPUTYPE}" STREQUAL "")
SET(TD_LOONGARCH_64 TRUE)
ADD_DEFINITIONS("-D_TD_LOONGARCH_")
ADD_DEFINITIONS("-D_TD_LOONGARCH_64")
ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "mips64")
SET(PLATFORM_ARCH_STR "mips")
MESSAGE(STATUS "input cpuType: mips64")
SET(TD_MIPS_64 TRUE)
ADD_DEFINITIONS("-D_TD_MIPS_")
ADD_DEFINITIONS("-D_TD_MIPS_64")
ENDIF ()
ELSE ()
# if generate ARM version:
@ -176,11 +182,13 @@ set(TD_DEPS_DIR "x86")
if (TD_LINUX)
IF (TD_ARM_64 OR TD_ARM_32)
set(TD_DEPS_DIR "arm")
ELSEIF (TD_MIPS_64)
set(TD_DEPS_DIR "mips")
ELSE()
set(TD_DEPS_DIR "x86")
ENDIF()
endif()
MESSAGE(STATUS "DEPS_DIR" ${TD_DEPS_DIR})
MESSAGE(STATUS "DEPS_DIR: " ${TD_DEPS_DIR})
MESSAGE("C Compiler: ${CMAKE_C_COMPILER} (${CMAKE_C_COMPILER_ID}, ${CMAKE_C_COMPILER_VERSION})")
MESSAGE("CXX Compiler: ${CMAKE_CXX_COMPILER} (${CMAKE_C_COMPILER_ID}, ${CMAKE_CXX_COMPILER_VERSION})")

View File

@ -2,7 +2,7 @@
IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER})
ELSE ()
SET(TD_VER_NUMBER "3.0.5.2.alpha")
SET(TD_VER_NUMBER "3.0.6.1.alpha")
ENDIF ()
IF (DEFINED VERCOMPATIBLE)

View File

@ -2,7 +2,7 @@
# taos-tools
ExternalProject_Add(taos-tools
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
GIT_TAG 3.0
GIT_TAG main
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE

View File

@ -77,7 +77,6 @@ if(${BUILD_WITH_LEVELDB})
cat("${TD_SUPPORT_DIR}/leveldb_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
endif(${BUILD_WITH_LEVELDB})
if (${BUILD_CONTRIB})
if(${BUILD_WITH_ROCKSDB})
cat("${TD_SUPPORT_DIR}/rocksdb_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
@ -96,7 +95,6 @@ else()
endif()
endif()
# canonical-raft
if(${BUILD_WITH_CRAFT})
cat("${TD_SUPPORT_DIR}/craft_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})

BIN
deps/mips/rocksdb_static/librocksdb.a vendored Normal file

Binary file not shown.

2844
deps/mips/rocksdb_static/rocksdb/c.h vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -33,7 +33,7 @@ The below SQL statement is used to insert one row into table "d1001".
INSERT INTO d1001 VALUES (ts1, 10.3, 219, 0.31);
```
`ts1` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detial, refer to [TDengine SQL insert timestamp section](/taos-sql/insert).
`ts1` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detail, refer to [TDengine SQL insert timestamp section](/taos-sql/insert).
### Insert Multiple Rows
@ -43,7 +43,7 @@ Multiple rows can be inserted in a single SQL statement. The example below inser
INSERT INTO d1001 VALUES (ts2, 10.2, 220, 0.23) (ts2, 10.3, 218, 0.25);
```
`ts1` and `ts2` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detial, refer to [TDengine SQL insert timestamp section](/taos-sql/insert).
`ts1` and `ts2` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detail, refer to [TDengine SQL insert timestamp section](/taos-sql/insert).
### Insert into Multiple Tables
@ -53,7 +53,7 @@ Data can be inserted into multiple tables in the same SQL statement. The example
INSERT INTO d1001 VALUES (ts1, 10.3, 219, 0.31) (ts2, 12.6, 218, 0.33) d1002 VALUES (ts3, 12.3, 221, 0.31);
```
`ts1`, `ts2` and `ts3` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detial, refer to [TDengine SQL insert timestamp section](/taos-sql/insert).
`ts1`, `ts2` and `ts3` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detail, refer to [TDengine SQL insert timestamp section](/taos-sql/insert).
For more details about `INSERT` please refer to [INSERT](/taos-sql/insert).

View File

@ -105,6 +105,12 @@ class Consumer:
def poll(self, timeout: float = 1.0):
pass
def assignment(self):
pass
def poll(self, timeout: float = 1.0):
pass
def close(self):
pass
@ -238,6 +244,8 @@ The following SQL statement creates a topic in TDengine:
CREATE TOPIC topic_name AS SELECT ts, c1, c2, c3 FROM tmqdb.stb WHERE c1 > 1;
```
- There is an upper limit to the number of topics created, controlled by the parameter tmqMaxTopicNum, with a default of 20
Multiple subscription types are supported.
#### Subscribe to a Column
@ -259,14 +267,15 @@ You can subscribe to a topic through a SELECT statement. Statements that specify
Syntax:
```sql
CREATE TOPIC topic_name AS STABLE stb_name
CREATE TOPIC topic_name [with meta] AS STABLE stb_name [where_condition]
```
Creating a topic in this manner differs from a `SELECT * from stbName` statement as follows:
- The table schema can be modified.
- Unstructured data is returned. The format of the data returned changes based on the supertable schema.
- A different table schema may exist for every data block to be processed.
- The 'with meta' parameter is optional. When selected, statements such as creating super tables and sub tables will be returned, mainly used for Taosx to perform super table migration
- The 'where_condition' parameter is optional and will be used to filter and subscribe to sub tables that meet the criteria. Where conditions cannot have ordinary columns, only tags or tbnames. Functions can be used in where conditions to filter tags, but cannot be aggregate functions because sub table tag values cannot be aggregated. It can also be a constant expression, such as 2>1 (subscribing to all child tables), Or false (subscribe to 0 sub tables)
- The data returned does not include tags.
### Subscribe to a Database
@ -274,10 +283,12 @@ Creating a topic in this manner differs from a `SELECT * from stbName` statement
Syntax:
```sql
CREATE TOPIC topic_name [WITH META] AS DATABASE db_name;
CREATE TOPIC topic_name [with meta] AS DATABASE db_name;
```
This SQL statement creates a subscription to all tables in the database. You can add the `WITH META` parameter to include schema changes in the subscription, including creating and deleting supertables; adding, deleting, and modifying columns; and creating, deleting, and modifying the tags of subtables. Consumers can determine the message type from the API. Note that this differs from Kafka.
This SQL statement creates a subscription to all tables in the database.
- The 'with meta' parameter is optional. When selected, it will return statements for creating all super tables and sub tables in the database, mainly used for Taosx database migration
## Create a Consumer
@ -289,7 +300,7 @@ You configure the following parameters when creating a consumer:
| `td.connect.user` | string | User Name | |
| `td.connect.pass` | string | Password | |
| `td.connect.port` | string | Port of the server side | |
| `group.id` | string | Consumer group ID; consumers with the same ID are in the same group | **Required**. Maximum length: 192. |
| `group.id` | string | Consumer group ID; consumers with the same ID are in the same group | **Required**. Maximum length: 192. Each topic can create up to 100 consumer groups. |
| `client.id` | string | Client ID | Maximum length: 192. |
| `auto.offset.reset` | enum | Initial offset for the consumer group | Specify `earliest`, `latest`, or `none`(default) |
| `enable.auto.commit` | boolean | Commit automatically; true: user application doesn't need to explicitly commit; false: user application need to handle commit by itself | Default value is true |

View File

@ -17,7 +17,7 @@ When you create a user-defined function, you must implement standard interface f
- For aggregate functions, implement the `aggfn_start`, `aggfn`, and `aggfn_finish` interface functions.
- To initialize your function, implement the `udf_init` function. To terminate your function, implement the `udf_destroy` function.
There are strict naming conventions for these interface functions. The names of the start, finish, init, and destroy interfaces must be <udf-name\>_start, <udf-name\>_finish, <udf-name\>_init, and <udf-name\>_destroy, respectively. Replace `scalarfn`, `aggfn`, and `udf` with the name of your user-defined function.
There are strict naming conventions for these interface functions. The names of the start, finish, init, and destroy interfaces must be `_start`, `_finish`, `_init`, and `_destroy`, respectively. Replace `scalarfn`, `aggfn`, and `udf` with the name of your user-defined function.
### Implementing a Scalar Function in C
The implementation of a scalar function is described as follows:
@ -318,7 +318,7 @@ The implementation of a scalar UDF is described as follows:
def process(input: datablock) -> tuple[output_type]:
```
Description: this function prcesses datablock, which is the input; you can use datablock.data(row, col) to access the python object at location(row,col); the output is a tuple object consisted of objects of type outputtype
Description: this function processes datablock, which is the input; you can use datablock.data(row, col) to access the python object at location(row,col); the output is a tuple object consisted of objects of type outputtype
#### Aggregate UDF Interface
@ -356,7 +356,7 @@ def process(input: datablock) -> tuple[output_type]:
# return tuple object consisted of object of type outputtype
```
Noteprocess() must be implemeted, init() and destroy() must be defined too but they can do nothing.
Noteprocess() must be implemented, init() and destroy() must be defined too but they can do nothing.
#### Aggregate Template
@ -377,7 +377,7 @@ def finish(buf: bytes) -> output_type:
#return obj of type outputtype
```
Note: aggregate UDF requires init(), destroy(), start(), reduce() and finish() to be impemented. start() generates the initial result in buffer, then the input data is divided into multiple row data blocks, reduce() is invoked for each data block `inputs` and intermediate `buf`, finally finish() is invoked to generate final result from the intermediate result `buf`.
Note: aggregate UDF requires init(), destroy(), start(), reduce() and finish() to be implemented. start() generates the initial result in buffer, then the input data is divided into multiple row data blocks, reduce() is invoked for each data block `inputs` and intermediate `buf`, finally finish() is invoked to generate final result from the intermediate result `buf`.
### Data Mapping between TDengine SQL and Python UDF
@ -559,7 +559,7 @@ Note: Prior to TDengine 3.0.5.0 (excluding), updating a UDF requires to restart
#### Sample 3: UDF with n arguments
A UDF which accepts n intergers, likee (x1, x2, ..., xn) and output the sum of the product of each value and its sequence number: 1 * x1 + 2 * x2 + ... + n * xn. If there is `null` in the input, then the result is `null`. The difference from sample 1 is that it can accept any number of columns as input and process each column. Assume the program is written in /root/udf/nsum.py:
A UDF which accepts n integers, likee (x1, x2, ..., xn) and output the sum of the product of each value and its sequence number: 1 * x1 + 2 * x2 + ... + n * xn. If there is `null` in the input, then the result is `null`. The difference from sample 1 is that it can accept any number of columns as input and process each column. Assume the program is written in /root/udf/nsum.py:
```python
def init():
@ -607,7 +607,7 @@ Query OK, 4 row(s) in set (0.010653s)
#### Sample 4: Utilize 3rd party package
A UDF which accepts a timestamp and output the next closed Sunday. This sample requires to use third party package `moment`, you need to install it firslty.
A UDF which accepts a timestamp and output the next closed Sunday. This sample requires to use third party package `moment`, you need to install it firstly.
```shell
pip3 install moment
@ -701,7 +701,7 @@ Query OK, 4 row(s) in set (1.011474s)
#### Sample 5: Aggregate Function
An aggregate function which calculates the difference of the maximum and the minimum in a column. An aggregate funnction takes multiple rows as input and output only one data. The execution process of an aggregate UDF is like map-reduce, the framework divides the input into multiple parts, each mapper processes one block and the reducer aggregates the result of the mappers. The reduce() of Python UDF has the functionality of both map() and reduce(). The reduce() takes two arguments: the data to be processed; and the result of other tasks executing reduce(). For exmaple, assume the code is in `/root/udf/myspread.py`.
An aggregate function which calculates the difference of the maximum and the minimum in a column. An aggregate funnction takes multiple rows as input and output only one data. The execution process of an aggregate UDF is like map-reduce, the framework divides the input into multiple parts, each mapper processes one block and the reducer aggregates the result of the mappers. The reduce() of Python UDF has the functionality of both map() and reduce(). The reduce() takes two arguments: the data to be processed; and the result of other tasks executing reduce(). For example, assume the code is in `/root/udf/myspread.py`.
```python
import io
@ -755,7 +755,7 @@ In this example, we implemented an aggregate function, and added some logging.
2. log() is the function for logging, it converts the input object to string and output with an end of line
3. destroy() closes the log file \
4. start() returns the initial buffer for storing the intermediate result
5. reduce() processes each daa block and aggregates the result
5. reduce() processes each data block and aggregates the result
6. finish() converts the final buffer() to final result\
Create the UDF.

View File

@ -43,7 +43,7 @@ database_option: {
## Parameters
- BUFFER: specifies the size (in MB) of the write buffer for each vnode. Enter a value between 3 and 16384. The default value is 96.
- BUFFER: specifies the size (in MB) of the write buffer for each vnode. Enter a value between 3 and 16384. The default value is 256.
- CACHEMODEL: specifies how the latest data in subtables is stored in the cache. The default value is none.
- none: The latest data is not cached.
- last_row: The last row of each subtable is cached. This option significantly improves the performance of the LAST_ROW function.

View File

@ -55,7 +55,7 @@ window_clause: {
| INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] [WATERMARK(watermark_val)] [FILL(fill_mod_and_val)]
interp_clause:
RANGE(ts_val, ts_val) EVERY(every_val) FILL(fill_mod_and_val)
RANGE(ts_val [, ts_val]) EVERY(every_val) FILL(fill_mod_and_val)
partition_by_clause:
PARTITION BY expr [, expr] ...

View File

@ -672,7 +672,7 @@ If you input a specific column, the number of non-null values in the column is r
ELAPSED(ts_primary_key [, time_unit])
```
**Description**: `elapsed` function can be used to calculate the continuous time length in which there is valid data. If it's used with `INTERVAL` clause, the returned result is the calculated time length within each time window. If it's used without `INTERVAL` caluse, the returned result is the calculated time length within the specified time range. Please be noted that the return value of `elapsed` is the number of `time_unit` in the calculated time length.
**Description**: `elapsed` function can be used to calculate the continuous time length in which there is valid data. If it's used with `INTERVAL` clause, the returned result is the calculated time length within each time window. If it's used without `INTERVAL` clause, the returned result is the calculated time length within the specified time range. Please be noted that the return value of `elapsed` is the number of `time_unit` in the calculated time length.
**Return value type**: Double if the input value is not NULL;
@ -889,9 +889,10 @@ ignore_null_values: {
- `INTERP` is used to get the value that matches the specified time slice from a column. If no such value exists an interpolation value will be returned based on `FILL` parameter.
- The input data of `INTERP` is the value of the specified column and a `where` clause can be used to filter the original data. If no `where` condition is specified then all original data is the input.
- `INTERP` must be used along with `RANGE`, `EVERY`, `FILL` keywords.
- The output time range of `INTERP` is specified by `RANGE(timestamp1,timestamp2)` parameter, with timestamp1 <= timestamp2. timestamp1 is the starting point of the output time range and must be specified. timestamp2 is the ending point of the output time range and must be specified.
- The output time range of `INTERP` is specified by `RANGE(timestamp1,timestamp2)` parameter, with timestamp1 <= timestamp2. timestamp1 is the starting point of the output time range. timestamp2 is the ending point of the output time range.
- The number of rows in the result set of `INTERP` is determined by the parameter `EVERY(time_unit)`. Starting from timestamp1, one interpolation is performed for every time interval specified `time_unit` parameter. The parameter `time_unit` must be an integer, with no quotes, with a time unit of: a(millisecond)), s(second), m(minute), h(hour), d(day), or w(week). For example, `EVERY(500a)` will interpolate every 500 milliseconds.
- Interpolation is performed based on `FILL` parameter. For more information about FILL clause, see [FILL Clause](../distinguished/#fill-clause).
- When only one timestamp value is specified in `RANGE` clause, `INTERP` is used to generate interpolation at this point in time. In this case, `EVERY` clause can be omitted. For example, SELECT INTERP(col) FROM tb RANGE('2023-01-01 00:00:00') FILL(linear).
- `INTERP` can be applied to supertable by interpolating primary key sorted data of all its childtables. It can also be used with `partition by tbname` when applied to supertable to generate interpolation on each single timeline.
- Pseudocolumn `_irowts` can be used along with `INTERP` to return the timestamps associated with interpolation points(support after version 3.0.2.0).
- Pseudocolumn `_isfilled` can be used along with `INTERP` to indicate whether the results are original records or data points generated by interpolation algorithm(support after version 3.0.3.0).
@ -902,7 +903,7 @@ ignore_null_values: {
- We want to downsample every 1 hour and use a linear fill for missing values. Note the order in which the "partition by" clause and the "range", "every" and "fill" parameters are used.
```sql
SELECT _irowts,INTERP(current) FROM test.meters PARTITION BY TBNAME RANGE('2017-07-22 00:00:00','2017-07-24 12:25:00') EVERY(1h) FILL(LINEAR)
SELECT _irowts,INTERP(current) FROM test.meters PARTITION BY TBNAME RANGE('2017-07-22 00:00:00','2017-07-24 12:25:00') EVERY(1h) FILL(LINEAR)
```
### LAST
@ -998,18 +999,14 @@ SAMPLE(expr, k)
**Description**: _k_ sampling values of a specific column. The applicable range of _k_ is [1,1000].
**Return value type**: Same as the column being operated plus the associated timestamp
**Return value type**: Same as the column being operated
**Applicable data types**: Any data type except for tags of STable
**Applicable data types**: Any data type
**Applicable nested query**: Inner query and Outer query
**Applicable table types**: standard tables and supertables
**More explanations**:
- This function cannot be used in expression calculation.
### TAIL
@ -1054,11 +1051,11 @@ TOP(expr, k)
UNIQUE(expr)
```
**Description**: The values that occur the first time in the specified column. The effect is similar to `distinct` keyword, but it can also be used to match tags or timestamp. The first occurrence of a timestamp or tag is used.
**Description**: The values that occur the first time in the specified column. The effect is similar to `distinct` keyword.
**Return value type**:Same as the data type of the column being operated upon
**Applicable column types**: Any data types except for timestamp
**Applicable column types**: Any data types
**Applicable table types**: table, STable

View File

@ -21,7 +21,7 @@ part_list can be any scalar expression, such as a column, constant, scalar funct
A PARTITION BY clause is processed as follows:
- The PARTITION BY clause must occur after the WHERE clause
- The PARTITION BY caluse partitions the data according to the specified dimensions, then perform computation on each partition. The performed computation is determined by the rest of the statement - a window clause, GROUP BY clause, or SELECT clause.
- The PARTITION BY clause partitions the data according to the specified dimensions, then perform computation on each partition. The performed computation is determined by the rest of the statement - a window clause, GROUP BY clause, or SELECT clause.
- The PARTITION BY clause can be used together with a window clause or GROUP BY clause. In this case, the window or GROUP BY clause takes effect on every partition. For example, the following statement partitions the table by the location tag, performs downsampling over a 10 minute window, and returns the maximum value:
```sql

View File

@ -36,7 +36,7 @@ Shows information about connections to the system.
SHOW CONSUMERS;
```
Shows information about all active consumers in the system.
Shows information about all consumers in the system.
## SHOW CREATE DATABASE

View File

@ -36,7 +36,8 @@ REST connection supports all platforms that can run Java.
| taos-jdbcdriver version | major changes | TDengine version |
| :---------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------: | :--------------: |
| 3.2.1 | subscription add seek function | 3.0.5.0 or later |
| 3.2.3 | Fixed resultSet data parsing failure in some cases | 3.0.5.0 or later |
| 3.2.2 | subscription add seek function | 3.0.5.0 or later |
| 3.2.1 | JDBC REST connection supports schemaless/prepareStatement over WebSocket | 3.0.3.0 or later |
| 3.2.0 | This version has been deprecated | - |
| 3.1.0 | JDBC REST connection supports subscription over WebSocket | - |
@ -284,9 +285,9 @@ The configuration parameters in the URL are as follows:
- batchfetch: true: pulls result sets in batches when executing queries; false: pulls result sets row by row. The default value is: false. batchfetch uses HTTP for data transfer. JDBC REST supports batch pulls. taos-jdbcdriver and TDengine transfer data via WebSocket connection. Compared with HTTP, WebSocket enables JDBC REST connection to support large data volume querying and improve query performance.
- charset: specify the charset to parse the string, this parameter is valid only when set batchfetch to true.
- batchErrorIgnore: true: when executing executeBatch of Statement, if one SQL execution fails in the middle, continue to execute the following SQL. false: no longer execute any statement after the failed SQL. The default value is: false.
- httpConnectTimeout: REST connection timeout in milliseconds, the default value is 5000 ms.
- httpSocketTimeout: socket timeout in milliseconds, the default value is 5000 ms. It only takes effect when batchfetch is false.
- messageWaitTimeout: message transmission timeout in milliseconds, the default value is 3000 ms. It only takes effect when batchfetch is true.
- httpConnectTimeout: REST connection timeout in milliseconds, the default value is 60000 ms.
- httpSocketTimeout: socket timeout in milliseconds, the default value is 60000 ms. It only takes effect when batchfetch is false.
- messageWaitTimeout: message transmission timeout in milliseconds, the default value is 60000 ms. It only takes effect when batchfetch is true.
- useSSL: connecting Securely Using SSL. true: using SSL connection, false: not using SSL connection.
- httpPoolSize: size of REST concurrent requests. The default value is 20.
@ -352,9 +353,9 @@ The configuration parameters in properties are as follows.
- TSDBDriver.PROPERTY_KEY_CHARSET: In the character set used by the client, the default value is the system character set.
- TSDBDriver.PROPERTY_KEY_LOCALE: this only takes effect when using JDBC native connection. Client language environment, the default value is system current locale.
- TSDBDriver.PROPERTY_KEY_TIME_ZONE: only takes effect when using JDBC native connection. In the time zone used by the client, the default value is the system's current time zone.
- TSDBDriver.HTTP_CONNECT_TIMEOUT: REST connection timeout in milliseconds, the default value is 5000 ms. It only takes effect when using JDBC REST connection.
- TSDBDriver.HTTP_SOCKET_TIMEOUT: socket timeout in milliseconds, the default value is 5000 ms. It only takes effect when using JDBC REST connection and batchfetch is false.
- TSDBDriver.PROPERTY_KEY_MESSAGE_WAIT_TIMEOUT: message transmission timeout in milliseconds, the default value is 3000 ms. It only takes effect when using JDBC REST connection and batchfetch is true.
- TSDBDriver.HTTP_CONNECT_TIMEOUT: REST connection timeout in milliseconds, the default value is 60000 ms. It only takes effect when using JDBC REST connection.
- TSDBDriver.HTTP_SOCKET_TIMEOUT: socket timeout in milliseconds, the default value is 60000 ms. It only takes effect when using JDBC REST connection and batchfetch is false.
- TSDBDriver.PROPERTY_KEY_MESSAGE_WAIT_TIMEOUT: message transmission timeout in milliseconds, the default value is 60000 ms. It only takes effect when using JDBC REST connection and batchfetch is true.
- TSDBDriver.PROPERTY_KEY_USE_SSL: connecting Securely Using SSL. true: using SSL connection, false: not using SSL connection. It only takes effect when using JDBC REST connection.
- TSDBDriver.HTTP_POOL_SIZE: size of REST concurrent requests. The default value is 20.
For JDBC native connections, you can specify other parameters, such as log level, SQL length, etc., by specifying URL and Properties. For more detailed configuration, please refer to [Client Configuration](/reference/config/#Client-Only).

File diff suppressed because it is too large Load Diff

View File

@ -31,21 +31,57 @@ Websocket connections are supported on all platforms that can run Go.
| connector-rust version | TDengine version | major features |
| :----------------: | :--------------: | :--------------------------------------------------: |
| v0.8.10 | 3.0.5.0 or later | TMQ: Get consuming progress and seek offset to consume. |
| v0.8.12 | 3.0.5.0 or later | TMQ: Get consuming progress and seek offset to consume. |
| v0.8.0 | 3.0.4.0 | Support schemaless insert. |
| v0.7.6 | 3.0.3.0 | Support req_id in query. |
| v0.6.0 | 3.0.0.0 | Base features. |
The Rust Connector is still under rapid development and is not guaranteed to be backward compatible before 1.0. We recommend using TDengine version 3.0 or higher to avoid known issues.
## Installation
## Handling exceptions
After the error is reported, the specific information of the error can be obtained:
```rust
match conn.exec(sql) {
Ok(_) => {
Ok(())
}
Err(e) => {
eprintln!("ERROR: {:?}", e);
Err(e)
}
}
```
## TDengine DataType vs. Rust DataType
TDengine currently supports timestamp, number, character, Boolean type, and the corresponding type conversion with Rust is as follows:
| TDengine DataType | Rust DataType |
| ----------------- | ----------------- |
| TIMESTAMP | Timestamp |
| INT | i32 |
| BIGINT | i64 |
| FLOAT | f32 |
| DOUBLE | f64 |
| SMALLINT | i16 |
| TINYINT | i8 |
| BOOL | bool |
| BINARY | Vec<u8\> |
| NCHAR | String |
| JSON | serde_json::Value |
Note: Only TAG supports JSON types
## Installation Steps
### Pre-installation preparation
* Install the Rust development toolchain
* If using the native connection, please install the TDengine client driver. Please refer to [install client driver](/reference/connector#install-client-driver)
### Add taos dependency
### Install the connectors
Depending on the connection method, add the [taos][taos] dependency in your Rust project as follows:
@ -146,7 +182,8 @@ let builder = TaosBuilder::from_dsn("taos://localhost:6030")?;
let conn1 = builder.build();
// use websocket protocol.
let conn2 = TaosBuilder::from_dsn("taos+ws://localhost:6041")?;
let builder2 = TaosBuilder::from_dsn("taos+ws://localhost:6041")?;
let conn2 = builder2.build();
```
After the connection is established, you can perform operations on your database.
@ -228,41 +265,191 @@ There are two ways to query data: Using built-in types or the [serde](https://se
## Usage examples
### Write data
### Create database and tables
#### SQL Write
```rust
use taos::*;
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let dsn = "taos://localhost:6030";
let builder = TaosBuilder::from_dsn(dsn)?;
let taos = builder.build()?;
let db = "query";
// create database
taos.exec_many([
format!("DROP DATABASE IF EXISTS `{db}`"),
format!("CREATE DATABASE `{db}`"),
format!("USE `{db}`"),
])
.await?;
// create table
taos.exec_many([
// create super table
"CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) \
TAGS (`groupid` INT, `location` BINARY(16))",
// create child table
"CREATE TABLE `d0` USING `meters` TAGS(0, 'Los Angles')",
]).await?;
}
```
> The query is consistent with operating a relational database. When using subscripts to get the contents of the returned fields, you have to start from 1. However, we recommend using the field names to get the values of the fields in the result set.
### Insert data
<RustInsert />
#### STMT Write
<RustBind />
#### Schemaless Write
<RustSml />
### Query data
<RustQuery />
## API Reference
### execute SQL with req_id
### Connector Constructor
You create a connector constructor by using a DSN.
This req_id can be used to request link tracing.
```rust
let cfg = TaosBuilder::default().build()?;
let rs = taos.query_with_req_id("select * from stable where tag1 is null", 1)?;
```
You use the builder object to create multiple connections.
### Writing data via parameter binding
TDengine has significantly improved the bind APIs to support data writing (INSERT) scenarios. Writing data in this way avoids the resource consumption of SQL syntax parsing, resulting in significant write performance improvements in many cases.
Parameter binding details see [API Reference](#stmt-api)
<RustBind />
### Schemaless Writing
TDengine supports schemaless writing. It is compatible with InfluxDB's Line Protocol, OpenTSDB's telnet line protocol, and OpenTSDB's JSON format protocol. For more information, see [Schemaless Writing](../../schemaless).
<RustSml />
### Schemaless with req_id
This req_id can be used to request link tracing.
```rust
let conn: Taos = cfg.build();
let sml_data = SmlDataBuilder::default()
.protocol(SchemalessProtocol::Line)
.data(data)
.req_id(100u64)
.build()?;
client.put(&sml_data)?
```
### Connection pooling
### Data Subscription
TDengine starts subscriptions through [TMQ](../../../taos-sql/tmq/).
#### Create a Topic
```rust
taos.exec_many([
// create topic for subscription
format!("CREATE TOPIC tmq_meters with META AS DATABASE {db}")
])
.await?;
```
#### Create a Consumer
You create a TMQ connector by using a DSN.
```rust
let tmq = TmqBuilder::from_dsn("taos://localhost:6030/?group.id=test")?;
```
Create a consumer:
```rust
let mut consumer = tmq.build()?;
```
#### Subscribe to consume data
A single consumer can subscribe to one or more topics.
```rust
consumer.subscribe(["tmq_meters"]).await?;
```
The TMQ is of [futures::Stream](https://docs.rs/futures/latest/futures/stream/index.html) type. You can use the corresponding API to consume each message in the queue and then use `.commit` to mark them as consumed.
```rust
{
let mut stream = consumer.stream();
while let Some((offset, message)) = stream.try_next().await? {
// get information from offset
// the topic
let topic = offset.topic();
// the vgroup id, like partition id in kafka.
let vgroup_id = offset.vgroup_id();
println!("* in vgroup id {vgroup_id} of topic {topic}\n");
if let Some(data) = message.into_data() {
while let Some(block) = data.fetch_raw_block().await? {
// one block for one table, get table name if needed
let name = block.table_name();
let records: Vec<Record> = block.deserialize().try_collect()?;
println!(
"** table: {}, got {} records: {:#?}\n",
name.unwrap(),
records.len(),
records
);
}
}
consumer.commit(offset).await?;
}
}
```
Get assignments
Version requirements connector-rust >= v0.8.8, TDengine >= 3.0.5.0
```rust
let assignments = consumer.assignments().await.unwrap();
```
#### Assignment subscription Offset
Seek offset
Version requirements connector-rust >= v0.8.8, TDengine >= 3.0.5.0
```rust
consumer.offset_seek(topic, vgroup_id, offset).await;
```
#### Close subscriptions
```rust
consumer.unsubscribe().await;
```
The following parameters can be configured for the TMQ DSN. Only `group.id` is mandatory.
- `group.id`: Within a consumer group, load balancing is implemented by consuming messages on an at-least-once basis.
- `client.id`: Subscriber client ID.
- `auto.offset.reset`: Initial point of subscription. *earliest* subscribes from the beginning, and *latest* subscribes from the newest message. The default is earliest. Note: This parameter is set per consumer group.
- `enable.auto.commit`: Automatically commits. This can be enabled when data consistency is not essential.
- `auto.commit.interval.ms`: Interval for automatic commits.
#### Full Sample Code
For more information, see [GitHub sample file](https://github.com/taosdata/TDengine/blob/3.0/docs/examples/rust/nativeexample/examples/subscribe_demo.rs).
### Use with connection pool
In complex applications, we recommend enabling connection pools. [taos] implements connection pools based on [r2d2].
@ -292,7 +479,17 @@ In the application code, use `pool.get()? ` to get a connection object [Taos].
let taos = pool.get()?;
```
### Connectors
### More sample programs
The source code of the sample application is under `TDengine/examples/rust` :
[rust example](https://github.com/taosdata/TDengine/tree/3.0/examples/rust)
## Frequently Asked Questions
For additional troubleshooting, see [FAQ](../../../train-faq/faq).
## API Reference
The [Taos][struct.Taos] object provides an API to perform operations on multiple databases.
@ -378,9 +575,13 @@ Note that Rust asynchronous functions and an asynchronous runtime are required.
- `.create_database(database: &str)`: Executes the `CREATE DATABASE` statement.
- `.use_database(database: &str)`: Executes the `USE` statement.
In addition, this structure is also the entry point for [Parameter Binding](#Parameter Binding Interface) and [Line Protocol Interface](#Line Protocol Interface). Please refer to the specific API descriptions for usage.
In addition, this structure is also the entry point for Parameter Binding and Line Protocol Interface. Please refer to the specific API descriptions for usage.
### Bind Interface
<p>
<a id="stmt-api" style={{color:'#141414'}}>
Bind Interface
</a>
</p>
Similar to the C interface, Rust provides the bind interface's wrapping. First, the [Taos][struct.taos] object creates a parameter binding object [Stmt] for an SQL statement.
@ -391,7 +592,7 @@ stmt.prepare("INSERT INTO ? USING meters TAGS(?, ?) VALUES(?, ?, ?, ?)")?;
The bind object provides a set of interfaces for implementing parameter binding.
#### `.set_tbname(name)`
`.set_tbname(name)`
To bind table names.
@ -400,7 +601,7 @@ let mut stmt = taos.stmt("insert into ? values(? ,?)")?;
stmt.set_tbname("d0")?;
```
#### `.set_tags(&[tag])`
`.set_tags(&[tag])`
Bind sub-table table names and tag values when the SQL statement uses a super table.
@ -410,7 +611,7 @@ stmt.set_tbname("d0")?;
stmt.set_tags(&[Value::VarChar("taos".to_string())])?;
```
#### `.bind(&[column])`
`.bind(&[column])`
Bind value types. Use the [ColumnView] structure to create and bind the required types.
@ -434,7 +635,7 @@ let params = vec![
let rows = stmt.bind(&params)?.add_batch()?.execute()?;
```
#### `.execute()`
`.execute()`
Execute SQL. [Stmt] objects can be reused, re-binded, and executed after execution. Before execution, ensure that all data has been added to the queue with `.add_batch`.
@ -449,92 +650,6 @@ stmt.execute()?;
For a working example, see [GitHub](https://github.com/taosdata/taos-connector-rust/blob/main/examples/bind.rs).
### Subscriptions
TDengine starts subscriptions through [TMQ](../../../taos-sql/tmq/).
You create a TMQ connector by using a DSN.
```rust
let tmq = TmqBuilder::from_dsn("taos://localhost:6030/?group.id=test")?;
```
Create a consumer:
```rust
let mut consumer = tmq.build()?;
```
A single consumer can subscribe to one or more topics.
```rust
consumer.subscribe(["tmq_meters"]).await?;
```
The TMQ is of [futures::Stream](https://docs.rs/futures/latest/futures/stream/index.html) type. You can use the corresponding API to consume each message in the queue and then use `.commit` to mark them as consumed.
```rust
{
let mut stream = consumer.stream();
while let Some((offset, message)) = stream.try_next().await? {
// get information from offset
// the topic
let topic = offset.topic();
// the vgroup id, like partition id in kafka.
let vgroup_id = offset.vgroup_id();
println!("* in vgroup id {vgroup_id} of topic {topic}\n");
if let Some(data) = message.into_data() {
while let Some(block) = data.fetch_raw_block().await? {
// one block for one table, get table name if needed
let name = block.table_name();
let records: Vec<Record> = block.deserialize().try_collect()?;
println!(
"** table: {}, got {} records: {:#?}\n",
name.unwrap(),
records.len(),
records
);
}
}
consumer.commit(offset).await?;
}
}
```
Get assignments
Version requirements connector-rust >= v0.8.8, TDengine >= 3.0.5.0
```rust
let assignments = consumer.assignments().await.unwrap();
```
Seek offset
Version requirements connector-rust >= v0.8.8, TDengine >= 3.0.5.0
```rust
consumer.offset_seek(topic, vgroup_id, offset).await;
```
Unsubscribe:
```rust
consumer.unsubscribe().await;
```
The following parameters can be configured for the TMQ DSN. Only `group.id` is mandatory.
- `group.id`: Within a consumer group, load balancing is implemented by consuming messages on an at-least-once basis.
- `client.id`: Subscriber client ID.
- `auto.offset.reset`: Initial point of subscription. *earliest* subscribes from the beginning, and *latest* subscribes from the newest message. The default is earliest. Note: This parameter is set per consumer group.
- `enable.auto.commit`: Automatically commits. This can be enabled when data consistency is not essential.
- `auto.commit.interval.ms`: Interval for automatic commits.
For more information, see [GitHub sample file](https://github.com/taosdata/TDengine/blob/3.0/docs/examples/rust/nativeexample/examples/subscribe_demo.rs).
For information about other structure APIs, see the [Rust documentation](https://docs.rs/taos).

View File

@ -20,14 +20,72 @@ The source code for the Python connector is hosted on [GitHub](https://github.co
- The [supported platforms](/reference/connector/#supported-platforms) for the native connection are the same as the ones supported by the TDengine client.
- REST connections are supported on all platforms that can run Python.
### Supported features
- Native connections support all the core features of TDengine, including connection management, SQL execution, bind interface, subscriptions, and schemaless writing.
- REST connections support features such as connection management and SQL execution. (SQL execution allows you to: manage databases, tables, and supertables, write data, query data, create continuous queries, etc.).
## Version selection
We recommend using the latest version of `taospy`, regardless of the version of TDengine.
## Supported features
|Python Connector Version|major changes|
|:-------------------:|:----:|
|2.7.9|support for getting assignment and seek function on subscription|
|2.7.8|add `execute_many` method|
- Native connections support all the core features of TDengine, including connection management, SQL execution, bind interface, subscriptions, and schemaless writing.
- REST connections support features such as connection management and SQL execution. (SQL execution allows you to: manage databases, tables, and supertables, write data, query data, create continuous queries, etc.).
|Python Websocket Connector Version|major changes|
|:----------------------------:|:-----:|
|0.2.5|1. support for getting assignment and seek function on subscription <br/> 2. support schemaless <br/> 3. support STMT|
|0.2.4|support `unsubscribe` on subscription|
## Handling Exceptions
There are 4 types of exception in python connector.
- The exception of Python Connector itself.
- The exception of native library.
- The exception of websocket
- The exception of subscription.
- The exception of other TDengine function modules.
|Error Type|Description|Suggested Actions|
|:--------:|:---------:|:---------------:|
|InterfaceError|the native library is too old that it cannot support the function|please check the TDengine client version|
|ConnectionError|connection error|please check TDengine's status and the connection params|
|DatabaseError|database error|please upgrade Python connector to latest|
|OperationalError|operation error||
|ProgrammingError|||
|StatementError|the exception of stmt||
|ResultError|||
|SchemalessError|the exception of stmt schemaless||
|TmqError|the exception of stmt tmq||
It usually uses try-expect to handle exceptions in python. For exception handling, please refer to [Python Errors and Exceptions Documentation](https://docs.python.org/3/tutorial/errors.html).
All exceptions from the Python Connector are thrown directly. Applications should handle these exceptions. For example:
```python
{{#include docs/examples/python/handle_exception.py}}
```
## TDengine DataType vs. Python DataType
TDengine currently supports timestamp, number, character, Boolean type, and the corresponding type conversion with Python is as follows:
|TDengine DataType|Python DataType|
|:---------------:|:-------------:|
|TIMESTAMP|datetime|
|INT|int|
|BIGINT|int|
|FLOAT|float|
|DOUBLE|int|
|SMALLINT|int|
|TINYINT|int|
|BOOL|bool|
|BINARY|str|
|NCHAR|str|
|JSON|str|
## Installation
@ -343,6 +401,8 @@ For a more detailed description of the `sql()` method, please refer to [RestClie
</TabItem>
<TabItem value="websocket" label="WebSocket connection">
The `Connection` class contains both an implementation of the PEP249 Connection interface (e.g., the `cursor()` method and the `close()` method) and many extensions (e.g., the `execute()`, `query()`, `schemaless_insert()`, and `subscribe()` methods).
```python
{{#include docs/examples/python/connect_websocket_examples.py:basic}}
```
@ -353,6 +413,46 @@ For a more detailed description of the `sql()` method, please refer to [RestClie
</TabItem>
</Tabs>
### Querying Data
<Tabs defaultValue="rest">
<TabItem value="native" label="native connection">
The `query` method of the `TaosConnection` class can be used to query data and return the result data of type `TaosResult`.
```python
{{#include docs/examples/python/connection_usage_native_reference.py:query}}
```
:::tip
The queried results can only be fetched once. For example, only one of `fetch_all()` and `fetch_all_into_dict()` can be used in the example above. Repeated fetches will result in an empty list.
:::
</TabItem>
<TabItem value="rest" label="REST connection">
The `RestClient` class is a direct wrapper for the [REST API](/reference/rest-api). It contains only a `sql()` method for executing arbitrary SQL statements and returning the result.
```python
{{#include docs/examples/python/rest_client_example.py}}
```
For a more detailed description of the `sql()` method, please refer to [RestClient](https://docs.taosdata.com/api/taospy/taosrest/restclient.html).
</TabItem>
<TabItem value="websocket" label="WebSocket connection">
The `query` method of the `TaosConnection` class can be used to query data and return the result data of type `TaosResult`.
```python
{{#include docs/examples/python/connect_websocket_examples.py:basic}}
```
</TabItem>
</Tabs>
### Usage with req_id
By using the optional req_id parameter, you can specify a request ID that can be used for tracing.
@ -453,6 +553,170 @@ As the way to connect introduced above but add `req_id` argument.
</TabItem>
</Tabs>
### Subscription
Connector support data subscription. For more information about subscroption, please refer to [Data Subscription](../../../develop/tmq/).
<Tabs defaultValue="native">
<TabItem value="native" label="native connection">
The `consumer` in the connector contains the subscription api.
##### Create Consumer
The syntax for creating a consumer is `consumer = Consumer(configs)`. For more subscription api parameters, please refer to [Data Subscription](../../../develop/tmq/).
```python
from taos.tmq import Consumer
consumer = Consumer({"group.id": "local", "td.connect.ip": "127.0.0.1"})
```
##### Subscribe topics
The `subscribe` function is used to subscribe to a list of topics.
```python
consumer.subscribe(['topic1', 'topic2'])
```
##### Consume
The `poll` function is used to consume data in tmq. The parameter of the `poll` function is a value of type float representing the timeout in seconds. It returns a `Message` before timing out, or `None` on timing out. You have to handle error messages in response data.
```python
while True:
res = consumer.poll(1)
if not res:
continue
err = res.error()
if err is not None:
raise err
val = res.value()
for block in val:
print(block.fetchall())
```
##### assignment
The `assignment` function is used to get the assignment of the topic.
```python
assignments = consumer.assignment()
```
##### Seek
The `seek` function is used to reset the assignment of the topic.
```python
tp = TopicPartition(topic='topic1', partition=0, offset=0)
consumer.seek(tp)
```
##### After consuming data
You should unsubscribe to the topics and close the consumer after consuming.
```python
consumer.unsubscribe()
consumer.close()
```
##### Tmq subscription example
```python
{{#include docs/examples/python/tmq_example.py}}
```
##### assignment and seek example
```python
{{#include docs/examples/python/tmq_assignment_example.py:taos_get_assignment_and_seek_demo}}
```
</TabItem>
<TabItem value="websocket" label="WebSocket connection">
In addition to native connections, the connector also supports subscriptions via websockets.
##### Create Consumer
The syntax for creating a consumer is "consumer = consumer = Consumer(conf=configs)". You need to specify that the `td.connect.websocket.scheme` parameter is set to "ws" in the configuration. For more subscription api parameters, please refer to [Data Subscription](../../../develop/tmq/#create-a-consumer).
```python
import taosws
consumer = taosws.(conf={"group.id": "local", "td.connect.websocket.scheme": "ws"})
```
##### subscribe topics
The `subscribe` function is used to subscribe to a list of topics.
```python
consumer.subscribe(['topic1', 'topic2'])
```
##### Consume
The `poll` function is used to consume data in tmq. The parameter of the `poll` function is a value of type float representing the timeout in seconds. It returns a `Message` before timing out, or `None` on timing out. You have to handle error messages in response data.
```python
while True:
res = consumer.poll(timeout=1.0)
if not res:
continue
err = res.error()
if err is not None:
raise err
for block in message:
for row in block:
print(row)
```
##### assignment
The `assignment` function is used to get the assignment of the topic.
```python
assignments = consumer.assignment()
```
##### Seek
The `seek` function is used to reset the assignment of the topic.
```python
consumer.seek(topic='topic1', partition=0, offset=0)
```
##### After consuming data
You should unsubscribe to the topics and close the consumer after consuming.
```python
consumer.unsubscribe()
consumer.close()
```
##### Subscription example
```python
{{#include docs/examples/python/tmq_websocket_example.py}}
```
##### Assignment and seek example
```python
{{#include docs/examples/python/tmq_websocket_assgnment_example.py:taosws_get_assignment_and_seek_demo}}
```
</TabItem>
</Tabs>
### Schemaless Insert
Connector support schemaless insert.
@ -460,19 +724,19 @@ Connector support schemaless insert.
<Tabs defaultValue="list">
<TabItem value="list" label="List Insert">
Simple insert
##### Simple insert
```python
{{#include docs/examples/python/schemaless_insert.py}}
```
Insert with ttl argument
##### Insert with ttl argument
```python
{{#include docs/examples/python/schemaless_insert_ttl.py}}
```
Insert with req_id argument
##### Insert with req_id argument
```python
{{#include docs/examples/python/schemaless_insert_req_id.py}}
@ -482,19 +746,19 @@ Insert with req_id argument
<TabItem value="raw" label="Raw Insert">
Simple insert
##### Simple insert
```python
{{#include docs/examples/python/schemaless_insert_raw.py}}
```
Insert with ttl argument
##### Insert with ttl argument
```python
{{#include docs/examples/python/schemaless_insert_raw_ttl.py}}
```
Insert with req_id argument
##### Insert with req_id argument
```python
{{#include docs/examples/python/schemaless_insert_raw_req_id.py}}
@ -503,11 +767,143 @@ Insert with req_id argument
</TabItem>
</Tabs>
### Parameter Binding
The Python connector provides a parameter binding api for inserting data. Similar to most databases, TDengine currently only supports the question mark `?` to indicate the parameters to be bound.
<Tabs>
<TabItem value="native" label="native connection">
##### Create Stmt
Call the `statement` method in `Connection` to create the `stmt` for parameter binding.
```
import taos
conn = taos.connect()
stmt = conn.statement("insert into log values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)")
```
##### parameter binding
Call the `new_multi_binds` function to create the parameter list for parameter bindings.
```
params = new_multi_binds(16)
params[0].timestamp((1626861392589, 1626861392590, 1626861392591))
params[1].bool((True, None, False))
params[2].tinyint([-128, -128, None]) # -128 is tinyint null
params[3].tinyint([0, 127, None])
params[4].smallint([3, None, 2])
params[5].int([3, 4, None])
params[6].bigint([3, 4, None])
params[7].tinyint_unsigned([3, 4, None])
params[8].smallint_unsigned([3, 4, None])
params[9].int_unsigned([3, 4, None])
params[10].bigint_unsigned([3, 4, None])
params[11].float([3, None, 1])
params[12].double([3, None, 1.2])
params[13].binary(["abc", "dddafadfadfadfadfa", None])
params[14].nchar(["涛思数据", None, "a long string with 中文字符"])
params[15].timestamp([None, None, 1626861392591])
```
Call the `bind_param` (for a single row) method or the `bind_param_batch` (for multiple rows) method to set the values.
```
stmt.bind_param_batch(params)
```
##### execute sql
Call `execute` method to execute sql.
```
stmt.execute()
```
##### Close Stmt
```
stmt.close()
```
##### Example
```python
{{#include docs/examples/python/stmt_example.py}}
```
</TabItem>
<TabItem value="websocket" label="WebSocket connection">
##### Create Stmt
Call the `statement` method in `Connection` to create the `stmt` for parameter binding.
```
import taosws
conn = taosws.connect('taosws://localhost:6041/test')
stmt = conn.statement()
```
##### Prepare sql
Call `prepare` method in stmt to prepare sql.
```
stmt.prepare("insert into t1 values (?, ?, ?, ?)")
```
##### parameter binding
Call the `bind_param` method to bind parameters.
```
stmt.bind_param([
taosws.millis_timestamps_to_column([1686844800000, 1686844801000, 1686844802000, 1686844803000]),
taosws.ints_to_column([1, 2, 3, 4]),
taosws.floats_to_column([1.1, 2.2, 3.3, 4.4]),
taosws.varchar_to_column(['a', 'b', 'c', 'd']),
])
```
Call the `add_batch` method to add parameters to the batch.
```
stmt.add_batch()
```
##### execute sql
Call `execute` method to execute sql.
```
stmt.execute()
```
##### Close Stmt
```
stmt.close()
```
##### Example
```python
{{#include docs/examples/python/stmt_websocket_example.py}}
```
</TabItem>
</Tabs>
### Other sample programs
| Example program links | Example program content |
| ------------------------------------------------------------------------------------------------------------- | ------------------- ---- |
| [bind_multi.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/bind-multi.py) | parameter binding, bind multiple rows at once |
| [bind_multi.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/bind-multi.py) | parameter binding,
bind multiple rows at once |
| [bind_row.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/bind-row.py) | bind_row.py
| [insert_lines.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/insert-lines.py) | InfluxDB line protocol writing |
| [json_tag.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/json-tag.py) | Use JSON type tags |
@ -515,14 +911,6 @@ Insert with req_id argument
## Other notes
### Exception handling
All errors from database operations are thrown directly as exceptions and the error message from the database is passed up the exception stack. The application is responsible for exception handling. For example:
```python
{{#include docs/examples/python/handle_exception.py}}
```
### About nanoseconds
Due to the current imperfection of Python's nanosecond support (see link below), the current implementation returns integers at nanosecond precision instead of the `datetime` type produced by `ms` and `us`, which application developers will need to handle on their own. And it is recommended to use pandas' to_datetime(). The Python Connector may modify the interface in the future if Python officially supports nanoseconds in full.

36
docs/en/20-third-party/14-dbeaver.md vendored Normal file
View File

@ -0,0 +1,36 @@
---
sidebar_label: DBeaver
title: DBeaver
description: You can use DBeaver to access your data stored in TDengine and TDengine Cloud.
---
[DBeaver](https://dbeaver.io/) is a popular cross-platform database management tool that facilitates data management for developers, database administrators, data analysts, and other users. Starting from version 23.1.1, DBeaver natively supports TDengine and can be used to manage TDengine Cloud as well as TDengine clusters deployed on-premises.
## Prerequisites
To use DBeaver to manage TDengine, you need to prepare the following:
- Install DBeaver. DBeaver supports mainstream operating systems including Windows, macOS, and Linux. Please make sure you download and install the correct version (23.1.1+) and platform package. Please refer to the [official DBeaver documentation](https://github.com/dbeaver/dbeaver/wiki/Installation) for detailed installation steps.
- If you use an on-premises TDengine cluster, please make sure that TDengine and taosAdapter are deployed and running properly. For detailed information, please refer to the taosAdapter User Manual.
## Use DBeaver to access on-premises TDengine cluster
1. Start the DBeaver application, click the button or menu item to choose **New Database Connection**, and then select **TDengine** in the **Timeseries** category.
![Connect TDengine with DBeaver](./dbeaver/dbeaver-connect-tdengine-en.webp)
2. Configure the TDengine connection by filling in the host address, port number, username, and password. If TDengine is deployed on the local machine, you are only required to fill in the username and password. The default username is root and the default password is taosdata. Click **Test Connection** to check whether the connection is workable. If you do not have the TDengine Java connector installed on the local machine, DBeaver will prompt you to download and install it.
![Configure the TDengine connection](./dbeaver/dbeaver-config-tdengine-en.webp))
3. If the connection is successful, it will be displayed as shown in the following figure. If the connection fails, please check whether the TDengine service and taosAdapter are running correctly and whether the host address, port number, username, and password are correct.
![Connection successful](./dbeaver/dbeaver-connect-tdengine-test-en.webp)
4. Use DBeaver to select databases and tables and browse your data stored in TDengine.
![Browse TDengine data with DBeaver](./dbeaver/dbeaver-browse-data-en.webp)
5. You can also manipulate TDengine data by executing SQL commands.
![Use SQL commands to manipulate TDengine data in DBeaver](./dbeaver/dbeaver-sql-execution-en.webp)

Binary file not shown.

After

Width:  |  Height:  |  Size: 73 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 70 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 50 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 36 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 36 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 35 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 39 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 59 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 57 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 41 KiB

View File

@ -10,6 +10,10 @@ For TDengine 2.x installation packages by version, please visit [here](https://w
import Release from "/components/ReleaseV3";
## 3.0.6.0
<Release type="tdengine" version="3.0.6.0" />
## 3.0.5.1
<Release type="tdengine" version="3.0.5.1" />

View File

@ -0,0 +1,82 @@
#!
import taosws
import taos
db_name = 'test_ws_stmt'
def before():
taos_conn = taos.connect()
taos_conn.execute("drop database if exists %s" % db_name)
taos_conn.execute("create database %s" % db_name)
taos_conn.select_db(db_name)
taos_conn.execute("create table t1 (ts timestamp, a int, b float, c varchar(10))")
taos_conn.execute(
"create table stb1 (ts timestamp, a int, b float, c varchar(10)) tags (t1 int, t2 binary(10))")
taos_conn.close()
def stmt_insert():
before()
conn = taosws.connect('taosws://root:taosdata@localhost:6041/%s' % db_name)
while True:
try:
stmt = conn.statement()
stmt.prepare("insert into t1 values (?, ?, ?, ?)")
stmt.bind_param([
taosws.millis_timestamps_to_column([1686844800000, 1686844801000, 1686844802000, 1686844803000]),
taosws.ints_to_column([1, 2, 3, 4]),
taosws.floats_to_column([1.1, 2.2, 3.3, 4.4]),
taosws.varchar_to_column(['a', 'b', 'c', 'd']),
])
stmt.add_batch()
rows = stmt.execute()
print(rows)
stmt.close()
except Exception as e:
if 'Retry needed' in e.args[0]: # deal with [0x0125] Retry needed
continue
else:
raise e
break
def stmt_insert_into_stable():
before()
conn = taosws.connect("taosws://root:taosdata@localhost:6041/%s" % db_name)
while True:
try:
stmt = conn.statement()
stmt.prepare("insert into ? using stb1 tags (?, ?) values (?, ?, ?, ?)")
stmt.set_tbname('stb1_1')
stmt.set_tags([
taosws.int_to_tag(1),
taosws.varchar_to_tag('aaa'),
])
stmt.bind_param([
taosws.millis_timestamps_to_column([1686844800000, 1686844801000, 1686844802000, 1686844803000]),
taosws.ints_to_column([1, 2, 3, 4]),
taosws.floats_to_column([1.1, 2.2, 3.3, 4.4]),
taosws.varchar_to_column(['a', 'b', 'c', 'd']),
])
stmt.add_batch()
rows = stmt.execute()
print(rows)
stmt.close()
except Exception as e:
if 'Retry needed' in e.args[0]: # deal with [0x0125] Retry needed
continue
else:
raise e
break

View File

@ -0,0 +1,78 @@
#!
import time
import taosws
import taos
def before_test(db_name):
taos_conn = taos.connect()
taos_conn.execute("drop database if exists %s" % db_name)
taos_conn.execute("create database %s" % db_name)
taos_conn.select_db(db_name)
taos_conn.execute("create table t1 (ts timestamp, a int, b float, c varchar(10))")
taos_conn.execute(
"create table stb1 (ts timestamp, a int, b float, c varchar(10)) tags (t1 int, t2 binary(10))")
taos_conn.close()
def after_test(db_name):
taos_conn = taos.connect()
taos_conn.execute("drop database if exists %s" % db_name)
taos_conn.close()
def stmt_insert():
db_name = 'test_ws_stmt_{}'.format(int(time.time()))
before_test(db_name)
conn = taosws.connect('taosws://root:taosdata@localhost:6041/%s' % db_name)
stmt = conn.statement()
stmt.prepare("insert into t1 values (?, ?, ?, ?)")
stmt.bind_param([
taosws.millis_timestamps_to_column([1686844800000, 1686844801000, 1686844802000, 1686844803000]),
taosws.ints_to_column([1, 2, 3, 4]),
taosws.floats_to_column([1.1, 2.2, 3.3, 4.4]),
taosws.varchar_to_column(['a', 'b', 'c', 'd']),
])
stmt.add_batch()
rows = stmt.execute()
assert rows == 4
stmt.close()
after_test(db_name)
def stmt_insert_into_stable():
db_name = 'test_ws_stmt_{}'.format(int(time.time()))
before_test(db_name)
conn = taosws.connect("taosws://root:taosdata@localhost:6041/%s" % db_name)
stmt = conn.statement()
stmt.prepare("insert into ? using stb1 tags (?, ?) values (?, ?, ?, ?)")
stmt.set_tbname('stb1_1')
stmt.set_tags([
taosws.int_to_tag(1),
taosws.varchar_to_tag('aaa'),
])
stmt.bind_param([
taosws.millis_timestamps_to_column([1686844800000, 1686844801000, 1686844802000, 1686844803000]),
taosws.ints_to_column([1, 2, 3, 4]),
taosws.floats_to_column([1.1, 2.2, 3.3, 4.4]),
taosws.varchar_to_column(['a', 'b', 'c', 'd']),
])
stmt.add_batch()
rows = stmt.execute()
assert rows == 4
stmt.close()
after_test(db_name)
if __name__ == '__main__':
stmt_insert()
stmt_insert_into_stable()

View File

@ -0,0 +1,58 @@
import taos
from taos.tmq import Consumer
import taosws
def prepare():
conn = taos.connect()
conn.execute("drop topic if exists tmq_assignment_demo_topic")
conn.execute("drop database if exists tmq_assignment_demo_db")
conn.execute("create database if not exists tmq_assignment_demo_db wal_retention_period 3600")
conn.select_db("tmq_assignment_demo_db")
conn.execute(
"create table if not exists tmq_assignment_demo_table (ts timestamp, c1 int, c2 float, c3 binary(10)) tags(t1 int)")
conn.execute(
"create topic if not exists tmq_assignment_demo_topic as select ts, c1, c2, c3 from tmq_assignment_demo_table")
conn.execute("insert into d0 using tmq_assignment_demo_table tags (0) values (now-2s, 1, 1.0, 'tmq test')")
conn.execute("insert into d0 using tmq_assignment_demo_table tags (0) values (now-1s, 2, 2.0, 'tmq test')")
conn.execute("insert into d0 using tmq_assignment_demo_table tags (0) values (now, 3, 3.0, 'tmq test')")
def taos_get_assignment_and_seek_demo():
prepare()
consumer = Consumer(
{
"group.id": "0",
# should disable snapshot,
# otherwise it will cause invalid params error
"experimental.snapshot.enable": "false",
}
)
consumer.subscribe(["tmq_assignment_demo_topic"])
# get topic assignment
assignments = consumer.assignment()
for assignment in assignments:
print(assignment)
# poll
consumer.poll(1)
consumer.poll(1)
# get topic assignment again
after_pool_assignments = consumer.assignment()
for assignment in after_pool_assignments:
print(assignment)
# seek to the beginning
for assignment in assignments:
consumer.seek(assignment)
# now the assignment should be the same as before poll
assignments = consumer.assignment()
for assignment in assignments:
print(assignment)
if __name__ == '__main__':
taosws_get_assignment_and_seek_demo()

View File

@ -0,0 +1,57 @@
import taos
import taosws
def prepare():
conn = taos.connect()
conn.execute("drop topic if exists tmq_assignment_demo_topic")
conn.execute("drop database if exists tmq_assignment_demo_db")
conn.execute("create database if not exists tmq_assignment_demo_db wal_retention_period 3600")
conn.select_db("tmq_assignment_demo_db")
conn.execute(
"create table if not exists tmq_assignment_demo_table (ts timestamp, c1 int, c2 float, c3 binary(10)) tags(t1 int)")
conn.execute(
"create topic if not exists tmq_assignment_demo_topic as select ts, c1, c2, c3 from tmq_assignment_demo_table")
conn.execute("insert into d0 using tmq_assignment_demo_table tags (0) values (now-2s, 1, 1.0, 'tmq test')")
conn.execute("insert into d0 using tmq_assignment_demo_table tags (0) values (now-1s, 2, 2.0, 'tmq test')")
conn.execute("insert into d0 using tmq_assignment_demo_table tags (0) values (now, 3, 3.0, 'tmq test')")
def taosws_get_assignment_and_seek_demo():
prepare()
consumer = taosws.Consumer(conf={
"td.connect.websocket.scheme": "ws",
# should disable snapshot,
# otherwise it will cause invalid params error
"experimental.snapshot.enable": "false",
"group.id": "0",
})
consumer.subscribe(["tmq_assignment_demo_topic"])
# get topic assignment
assignments = consumer.assignment()
for assignment in assignments:
print(assignment.to_string())
# poll
consumer.poll(1)
consumer.poll(1)
# get topic assignment again
after_poll_assignments = consumer.assignment()
for assignment in after_poll_assignments:
print(assignment.to_string())
# seek to the beginning
for assignment in assignments:
for a in assignment.assignments():
consumer.seek(assignment.topic(), a.vg_id(), a.offset())
# now the assignment should be the same as before poll
assignments = consumer.assignment()
for assignment in assignments:
print(assignment.to_string())
if __name__ == '__main__':
taosws_get_assignment_and_seek_demo()

View File

@ -105,6 +105,12 @@ class Consumer:
def poll(self, timeout: float = 1.0):
pass
def assignment(self):
pass
def seek(self, partition):
pass
def close(self):
pass
@ -237,6 +243,7 @@ TDengine 使用 SQL 创建一个 topic
```sql
CREATE TOPIC topic_name AS SELECT ts, c1, c2, c3 FROM tmqdb.stb WHERE c1 > 1;
```
- topic创建个数有上限通过参数 tmqMaxTopicNum 控制,默认 20 个
TMQ 支持多种订阅类型:
@ -259,14 +266,15 @@ CREATE TOPIC topic_name as subquery
语法:
```sql
CREATE TOPIC topic_name AS STABLE stb_name
CREATE TOPIC topic_name [with meta] AS STABLE stb_name [where_condition]
```
与 `SELECT * from stbName` 订阅的区别是:
- 不会限制用户的表结构变更。
- 返回的是非结构化的数据:返回数据的结构会随之超级表的表结构变化而变化。
- 用户对于要处理的每一个数据块都可能有不同的表结构。
- with meta 参数可选选择时将返回创建超级表子表等语句主要用于taosx做超级表迁移
- where_condition 参数可选选择时将用来过滤符合条件的子表订阅这些子表。where 条件里不能有普通列只能是tag或tbnamewhere条件里可以用函数用来过滤tag但是不能是聚合函数因为子表tag值无法做聚合。也可以是常量表达式比如 2 > 1订阅全部子表或者 false订阅0个子表
- 返回数据不包含标签。
### 数据库订阅
@ -274,11 +282,13 @@ CREATE TOPIC topic_name AS STABLE stb_name
语法:
```sql
CREATE TOPIC topic_name AS DATABASE db_name;
CREATE TOPIC topic_name [with meta] AS DATABASE db_name;
```
通过该语句可创建一个包含数据库所有表数据的订阅
- with meta 参数可选选择时将返回创建数据库里所有超级表子表的语句主要用于taosx做数据库迁移
## 创建消费者 *consumer*
消费者需要通过一系列配置选项创建,基础配置项如下表所示:
@ -289,7 +299,7 @@ CREATE TOPIC topic_name AS DATABASE db_name;
| `td.connect.user` | string | 用户名 | |
| `td.connect.pass` | string | 密码 | |
| `td.connect.port` | integer | 服务端的端口号 | |
| `group.id` | string | 消费组 ID同一消费组共享消费进度 | **必填项**。最大长度192。 |
| `group.id` | string | 消费组 ID同一消费组共享消费进度 | <br />**必填项**。最大长度192。<br />每个topic最多可建立100个 consumer group |
| `client.id` | string | 客户端 ID | 最大长度192。 |
| `auto.offset.reset` | enum | 消费组订阅的初始位置 | <br />`earliest`: default;从头开始订阅; <br/>`latest`: 仅从最新数据开始订阅; <br/>`none`: 没有提交的 offset 无法订阅 |
| `enable.auto.commit` | boolean | 是否启用消费位点自动提交true: 自动提交客户端应用无需commitfalse客户端应用需要自行commit | 默认值为 true |

View File

@ -17,7 +17,7 @@ TDengine 支持通过 C/Python 语言进行 UDF 定义。接下来结合示例
- 聚合函数需要实现聚合接口函数 aggfn_start aggfn aggfn_finish。
- 如果需要初始化,实现 udf_init如果需要清理工作实现udf_destroy。
接口函数的名称是 UDF 名称,或者是 UDF 名称和特定后缀_start, _finish, _init, _destroy)的连接。列表中的scalarfnaggfn, udf需要替换成udf函数名。
接口函数的名称是 UDF 名称,或者是 UDF 名称和特定后缀(`_start`, `_finish`, `_init`, `_destroy`)的连接。列表中的scalarfnaggfn, udf需要替换成udf函数名。
### 用 C 语言实现标量函数
标量函数实现模板如下

View File

@ -36,14 +36,15 @@ REST 连接支持所有能运行 Java 的平台。
| taos-jdbcdriver 版本 | 主要变化 | TDengine 版本 |
| :------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------: |
| 3.2.3 | 修复 ResultSet 在一些情况数据解析失败 | - |
| 3.2.2 | 新增功能:数据订阅支持 seek 功能。 | 3.0.5.0 及更高版本 |
| 3.2.1 | 新增功能WebSocket 连接支持 schemaless 与 prepareStatement 写入。变更consumer poll 返回结果集为 ConsumerRecord可通过 value() 获取指定结果集数据。 | 3.0.3.0 及更高版本 |
| 3.2.0 | 存在连接问题,不推荐使用 | - |
| 3.1.0 | WebSocket 连接支持订阅功能 | - |
| 3.0.1 - 3.0.4 | 修复一些情况下结果集数据解析错误的问题。3.0.1 在 JDK 11 环境编译JDK 8 环境下建议使用其他版本 | - |
| 3.0.0 | 支持 TDengine 3.0 | 3.0.0.0 及更高版本 |
| 2.0.42 | 修 WebSocket 连接中 wasNull 接口返回值 | - |
| 2.0.41 | 修 REST 连接中用户名和密码转码方式 | - |
| 2.0.42 | 修 WebSocket 连接中 wasNull 接口返回值 | - |
| 2.0.41 | 修 REST 连接中用户名和密码转码方式 | - |
| 2.0.39 - 2.0.40 | 增加 REST 连接/请求 超时设置 | - |
| 2.0.38 | JDBC REST 连接增加批量拉取功能 | - |
| 2.0.37 | 增加对 json tag 支持 | - |
@ -287,9 +288,9 @@ url 中的配置参数如下:
- batchfetch: true在执行查询时批量拉取结果集false逐行拉取结果集。默认值为false。逐行拉取结果集使用 HTTP 方式进行数据传输。JDBC REST 连接支持批量拉取数据功能。taos-jdbcdriver 与 TDengine 之间通过 WebSocket 连接进行数据传输。相较于 HTTPWebSocket 可以使 JDBC REST 连接支持大数据量查询,并提升查询性能。
- charset: 当开启批量拉取数据时,指定解析字符串数据的字符集。
- batchErrorIgnoretrue在执行 Statement 的 executeBatch 时,如果中间有一条 SQL 执行失败,继续执行下面的 SQL 了。false不再执行失败 SQL 后的任何语句。默认值为false。
- httpConnectTimeout: 连接超时时间,单位 ms 默认值为 5000。
- httpSocketTimeout: socket 超时时间,单位 ms默认值为 5000。仅在 batchfetch 设置为 false 时生效。
- messageWaitTimeout: 消息超时时间, 单位 ms 默认值为 3000。 仅在 batchfetch 设置为 true 时生效。
- httpConnectTimeout: 连接超时时间,单位 ms 默认值为 60000。
- httpSocketTimeout: socket 超时时间,单位 ms默认值为 60000。仅在 batchfetch 设置为 false 时生效。
- messageWaitTimeout: 消息超时时间, 单位 ms 默认值为 60000。 仅在 batchfetch 设置为 true 时生效。
- useSSL: 连接中是否使用 SSL。
- httpPoolSize: REST 并发请求大小,默认 20。
@ -355,9 +356,9 @@ properties 中的配置参数如下:
- TSDBDriver.PROPERTY_KEY_CHARSET客户端使用的字符集默认值为系统字符集。
- TSDBDriver.PROPERTY_KEY_LOCALE仅在使用 JDBC 原生连接时生效。 客户端语言环境,默认值系统当前 locale。
- TSDBDriver.PROPERTY_KEY_TIME_ZONE仅在使用 JDBC 原生连接时生效。 客户端使用的时区,默认值为系统当前时区。
- TSDBDriver.HTTP_CONNECT_TIMEOUT: 连接超时时间,单位 ms 默认值为 5000。仅在 REST 连接时生效。
- TSDBDriver.HTTP_SOCKET_TIMEOUT: socket 超时时间,单位 ms默认值为 5000。仅在 REST 连接且 batchfetch 设置为 false 时生效。
- TSDBDriver.PROPERTY_KEY_MESSAGE_WAIT_TIMEOUT: 消息超时时间, 单位 ms 默认值为 3000。 仅在 REST 连接且 batchfetch 设置为 true 时生效。
- TSDBDriver.HTTP_CONNECT_TIMEOUT: 连接超时时间,单位 ms 默认值为 60000。仅在 REST 连接时生效。
- TSDBDriver.HTTP_SOCKET_TIMEOUT: socket 超时时间,单位 ms默认值为 60000。仅在 REST 连接且 batchfetch 设置为 false 时生效。
- TSDBDriver.PROPERTY_KEY_MESSAGE_WAIT_TIMEOUT: 消息超时时间, 单位 ms 默认值为 60000。 仅在 REST 连接且 batchfetch 设置为 true 时生效。
- TSDBDriver.PROPERTY_KEY_USE_SSL: 连接中是否使用 SSL。仅在 REST 连接时生效。
- TSDBDriver.HTTP_POOL_SIZE: REST 并发请求大小,默认 20。
此外对 JDBC 原生连接,通过指定 URL 和 Properties 还可以指定其他参数比如日志级别、SQL 长度等。更多详细配置请参考[客户端配置](/reference/config/#仅客户端适用)。

File diff suppressed because it is too large Load Diff

View File

@ -30,21 +30,57 @@ Websocket 连接支持所有能运行 Rust 的平台。
| Rust 连接器版本 | TDengine 版本 | 主要功能 |
| :----------------: | :--------------: | :--------------------------------------------------: |
| v0.8.10 | 3.0.5.0 or later | 消息订阅:获取消费进度及按照指定进度开始消费。 |
| v0.8.12 | 3.0.5.0 or later | 消息订阅:获取消费进度及按照指定进度开始消费。 |
| v0.8.0 | 3.0.4.0 | 支持无模式写入。 |
| v0.7.6 | 3.0.3.0 | 支持在请求中使用 req_id。 |
| v0.6.0 | 3.0.0.0 | 基础功能。 |
Rust 连接器仍然在快速开发中1.0 之前无法保证其向后兼容。建议使用 3.0 版本以上的 TDengine以避免已知问题。
## 安装
## 处理错误
在报错后,可以获取到错误的具体信息:
```rust
match conn.exec(sql) {
Ok(_) => {
Ok(())
}
Err(e) => {
eprintln!("ERROR: {:?}", e);
Err(e)
}
}
```
## TDengine DataType 和 Rust DataType
TDengine 目前支持时间戳、数字、字符、布尔类型,与 Rust 对应类型转换如下:
| TDengine DataType | Rust DataType |
| ----------------- | ----------------- |
| TIMESTAMP | Timestamp |
| INT | i32 |
| BIGINT | i64 |
| FLOAT | f32 |
| DOUBLE | f64 |
| SMALLINT | i16 |
| TINYINT | i8 |
| BOOL | bool |
| BINARY | Vec<u8\> |
| NCHAR | String |
| JSON | serde_json::Value |
**注意**JSON 类型仅在 tag 中支持。
## 安装步骤
### 安装前准备
* 安装 Rust 开发工具链
* 如果使用原生连接,请安装 TDengine 客户端驱动,具体步骤请参考[安装客户端驱动](../#安装客户端驱动)
### 添加 taos 依赖
### 安装连接器
根据选择的连接方式,按照如下说明在 [Rust](https://rust-lang.org) 项目中添加 [taos][taos] 依赖:
@ -151,7 +187,8 @@ let builder = TaosBuilder::from_dsn("taos://localhost:6030")?;
let conn1 = builder.build();
// use websocket protocol.
let conn2 = TaosBuilder::from_dsn("taos+ws://localhost:6041")?;
let builder2 = TaosBuilder::from_dsn("taos+ws://localhost:6041")?;
let conn2 = builder2.build();
```
建立连接后,您可以进行相关数据库操作:
@ -233,41 +270,191 @@ async fn demo(taos: &Taos, db: &str) -> Result<(), Error> {
## 使用示例
### 写入数据
### 创建数据库和表
#### SQL 写入
```rust
use taos::*;
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let dsn = "taos://localhost:6030";
let builder = TaosBuilder::from_dsn(dsn)?;
let taos = builder.build()?;
let db = "query";
// create database
taos.exec_many([
format!("DROP DATABASE IF EXISTS `{db}`"),
format!("CREATE DATABASE `{db}`"),
format!("USE `{db}`"),
])
.await?;
// create table
taos.exec_many([
// create super table
"CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) \
TAGS (`groupid` INT, `location` BINARY(16))",
// create child table
"CREATE TABLE `d0` USING `meters` TAGS(0, 'Los Angles')",
]).await?;
}
```
> **注意**:如果不使用 `use db` 指定数据库,则后续对表的操作都需要增加数据库名称作为前缀,如 db.tb。
### 插入数据
<RustInsert />
#### STMT 写入
<RustBind />
#### Schemaless 写入
<RustSml />
### 查询数据
<RustQuery />
## API 参考
### 执行带有 req_id 的 SQL
### 连接构造器
通过 DSN 来构建一个连接器构造器。
此 req_id 可用于请求链路追踪。
```rust
let cfg = TaosBuilder::default().build()?;
let rs = taos.query_with_req_id("select * from stable where tag1 is null", 1)?;
```
使用 `builder` 对象创建多个连接:
### 通过参数绑定写入数据
TDengine 的 Rust 连接器实现了参数绑定方式对数据写入INSERT场景的支持。采用这种方式写入数据时能避免 SQL 语法解析的资源消耗,从而在很多情况下显著提升写入性能。
参数绑定接口详见[API参考](#stmt-api)
<RustBind />
### 无模式写入
TDengine 支持无模式写入功能。无模式写入兼容 InfluxDB 的 行协议Line Protocol、OpenTSDB 的 telnet 行协议和 OpenTSDB 的 JSON 格式协议。详情请参见[无模式写入](../../reference/schemaless/)。
<RustSml />
### 执行带有 req_id 的无模式写入
此 req_id 可用于请求链路追踪。
```rust
let conn: Taos = cfg.build();
let sml_data = SmlDataBuilder::default()
.protocol(SchemalessProtocol::Line)
.data(data)
.req_id(100u64)
.build()?;
client.put(&sml_data)?
```
### 连接池
### 数据订阅
TDengine 通过消息队列 [TMQ](../../../taos-sql/tmq/) 启动一个订阅。
#### 创建 Topic
```rust
taos.exec_many([
// create topic for subscription
format!("CREATE TOPIC tmq_meters with META AS DATABASE {db}")
])
.await?;
```
#### 创建 Consumer
从 DSN 开始,构建一个 TMQ 连接器。
```rust
let tmq = TmqBuilder::from_dsn("taos://localhost:6030/?group.id=test")?;
```
创建消费者:
```rust
let mut consumer = tmq.build()?;
```
#### 订阅消费数据
消费者可订阅一个或多个 `TOPIC`。
```rust
consumer.subscribe(["tmq_meters"]).await?;
```
TMQ 消息队列是一个 [futures::Stream](https://docs.rs/futures/latest/futures/stream/index.html) 类型,可以使用相应 API 对每个消息进行消费,并通过 `.commit` 进行已消费标记。
```rust
{
let mut stream = consumer.stream();
while let Some((offset, message)) = stream.try_next().await? {
// get information from offset
// the topic
let topic = offset.topic();
// the vgroup id, like partition id in kafka.
let vgroup_id = offset.vgroup_id();
println!("* in vgroup id {vgroup_id} of topic {topic}\n");
if let Some(data) = message.into_data() {
while let Some(block) = data.fetch_raw_block().await? {
// one block for one table, get table name if needed
let name = block.table_name();
let records: Vec<Record> = block.deserialize().try_collect()?;
println!(
"** table: {}, got {} records: {:#?}\n",
name.unwrap(),
records.len(),
records
);
}
}
consumer.commit(offset).await?;
}
}
```
获取消费进度:
版本要求 connector-rust >= v0.8.8 TDengine >= 3.0.5.0
```rust
let assignments = consumer.assignments().await.unwrap();
```
#### 指定订阅 Offset
按照指定的进度消费:
版本要求 connector-rust >= v0.8.8 TDengine >= 3.0.5.0
```rust
consumer.offset_seek(topic, vgroup_id, offset).await;
```
#### 关闭订阅
```rust
consumer.unsubscribe().await;
```
对于 TMQ DSN, 有以下配置项可以进行设置,需要注意的是,`group.id` 是必须的。
- `group.id`: 同一个消费者组,将以至少消费一次的方式进行消息负载均衡。
- `client.id`: 可选的订阅客户端识别项。
- `auto.offset.reset`: 可选初始化订阅起点, *earliest* 为从头开始订阅, *latest* 为仅从最新数据开始订阅,默认为从头订阅。注意,此选项在同一个 `group.id` 中仅生效一次。
- `enable.auto.commit`: 当设置为 `true` 时,将启用自动标记模式,当对数据一致性不敏感时,可以启用此方式。
- `auto.commit.interval.ms`: 自动标记的时间间隔。
#### 完整示例
完整订阅示例参见 [GitHub 示例文件](https://github.com/taosdata/TDengine/blob/3.0/docs/examples/rust/nativeexample/examples/subscribe_demo.rs).
### 与连接池使用
在复杂应用中,建议启用连接池。[taos] 的连接池默认(异步模式)使用 [deadpool] 实现。
@ -295,7 +482,17 @@ let pool: Pool<TaosBuilder> = Pool::builder(Manager::from_dsn(self.dsn.clone()).
let taos = pool.get()?;
```
### 连接
### 更多示例程序
示例程序源码位于 `TDengine/examples/rust` 下:
请参考:[rust example](https://github.com/taosdata/TDengine/tree/3.0/examples/rust)
## 常见问题
请参考 [FAQ](../../../train-faq/faq)
## API 参考
[Taos][struct.Taos] 对象提供了多个数据库操作的 API
@ -381,9 +578,13 @@ let taos = pool.get()?;
- `.create_database(database: &str)`: 执行 `CREATE DATABASE` 语句。
- `.use_database(database: &str)`: 执行 `USE` 语句。
除此之外,该结构也是 [参数绑定](#参数绑定接口) [行协议接口](#行协议接口) 的入口,使用方法请参考具体的 API 说明。
除此之外,该结构也是参数绑定和行协议接口的入口,使用方法请参考具体的 API 说明。
### 参数绑定接口
<p>
<a id="stmt-api" style={{color:'#141414'}}>
参数绑定接口
</a>
</p>
与 C 接口类似Rust 提供参数绑定接口。首先,通过 [Taos][struct.Taos] 对象创建一个 SQL 语句的参数绑定对象 [Stmt]
@ -394,7 +595,7 @@ stmt.prepare("INSERT INTO ? USING meters TAGS(?, ?) VALUES(?, ?, ?, ?)")?;
参数绑定对象提供了一组接口用于实现参数绑定:
#### `.set_tbname(name)`
`.set_tbname(name)`
用于绑定表名。
@ -403,7 +604,7 @@ let mut stmt = taos.stmt("insert into ? values(? ,?)")?;
stmt.set_tbname("d0")?;
```
#### `.set_tags(&[tag])`
`.set_tags(&[tag])`
当 SQL 语句使用超级表时,用于绑定子表表名和标签值:
@ -413,7 +614,7 @@ stmt.set_tbname("d0")?;
stmt.set_tags(&[Value::VarChar("涛思".to_string())])?;
```
#### `.bind(&[column])`
`.bind(&[column])`
用于绑定值类型。使用 [ColumnView] 结构体构建需要的类型并绑定:
@ -437,7 +638,7 @@ let params = vec![
let rows = stmt.bind(&params)?.add_batch()?.execute()?;
```
#### `.execute()`
`.execute()`
执行 SQL。[Stmt] 对象可以复用,在执行后可以重新绑定并执行。执行前请确保所有数据已通过 `.add_batch` 加入到执行队列中。
@ -452,92 +653,6 @@ stmt.execute()?;
一个可运行的示例请见 [GitHub 上的示例](https://github.com/taosdata/taos-connector-rust/blob/main/examples/bind.rs)。
### 订阅
TDengine 通过消息队列 [TMQ](../../../taos-sql/tmq/) 启动一个订阅。
从 DSN 开始,构建一个 TMQ 连接器。
```rust
let tmq = TmqBuilder::from_dsn("taos://localhost:6030/?group.id=test")?;
```
创建消费者:
```rust
let mut consumer = tmq.build()?;
```
消费者可订阅一个或多个 `TOPIC`。
```rust
consumer.subscribe(["tmq_meters"]).await?;
```
TMQ 消息队列是一个 [futures::Stream](https://docs.rs/futures/latest/futures/stream/index.html) 类型,可以使用相应 API 对每个消息进行消费,并通过 `.commit` 进行已消费标记。
```rust
{
let mut stream = consumer.stream();
while let Some((offset, message)) = stream.try_next().await? {
// get information from offset
// the topic
let topic = offset.topic();
// the vgroup id, like partition id in kafka.
let vgroup_id = offset.vgroup_id();
println!("* in vgroup id {vgroup_id} of topic {topic}\n");
if let Some(data) = message.into_data() {
while let Some(block) = data.fetch_raw_block().await? {
// one block for one table, get table name if needed
let name = block.table_name();
let records: Vec<Record> = block.deserialize().try_collect()?;
println!(
"** table: {}, got {} records: {:#?}\n",
name.unwrap(),
records.len(),
records
);
}
}
consumer.commit(offset).await?;
}
}
```
获取消费进度:
版本要求 connector-rust >= v0.8.8 TDengine >= 3.0.5.0
```rust
let assignments = consumer.assignments().await.unwrap();
```
按照指定的进度消费:
版本要求 connector-rust >= v0.8.8 TDengine >= 3.0.5.0
```rust
consumer.offset_seek(topic, vgroup_id, offset).await;
```
停止订阅:
```rust
consumer.unsubscribe().await;
```
对于 TMQ DSN, 有以下配置项可以进行设置,需要注意的是,`group.id` 是必须的。
- `group.id`: 同一个消费者组,将以至少消费一次的方式进行消息负载均衡。
- `client.id`: 可选的订阅客户端识别项。
- `auto.offset.reset`: 可选初始化订阅起点, *earliest* 为从头开始订阅, *latest* 为仅从最新数据开始订阅,默认为从头订阅。注意,此选项在同一个 `group.id` 中仅生效一次。
- `enable.auto.commit`: 当设置为 `true` 时,将启用自动标记模式,当对数据一致性不敏感时,可以启用此方式。
- `auto.commit.interval.ms`: 自动标记的时间间隔。
完整订阅示例参见 [GitHub 示例文件](https://github.com/taosdata/TDengine/blob/3.0/docs/examples/rust/nativeexample/examples/subscribe_demo.rs).
其他相关结构体 API 使用说明请移步 Rust 文档托管网页:<https://docs.rs/taos>。

View File

@ -1,4 +1,5 @@
---
toc_max_heading_level: 4
sidebar_label: Python
title: TDengine Python Connector
description: "taospy 是 TDengine 的官方 Python 连接器。taospy 提供了丰富的 API 使得 Python 应用可以很方便地使用 TDengine。tasopy 对 TDengine 的原生接口和 REST 接口都进行了封装, 分别对应 tasopy 的两个子模块taos 和 taosrest。除了对原生接口和 REST 接口的封装taospy 还提供了符合 Python 数据访问规范(PEP 249)的编程接口。这使得 taospy 和很多第三方工具集成变得简单,比如 SQLAlchemy 和 pandas"
@ -21,18 +22,76 @@ Python 连接器的源码托管在 [GitHub](https://github.com/taosdata/taos-con
- 原生连接[支持的平台](../#支持的平台)和 TDengine 客户端支持的平台一致。
- REST 连接支持所有能运行 Python 的平台。
## 版本选择
无论使用什么版本的 TDengine 都建议使用最新版本的 `taospy`。
## 支持的功能
### 支持的功能
- 原生连接支持 TDengine 的所有核心功能, 包括: 连接管理、执行 SQL、参数绑定、订阅、无模式写入schemaless
- REST 连接支持的功能包括:连接管理、执行 SQL。 (通过执行 SQL 可以: 管理数据库、管理表和超级表、写入数据、查询数据、创建连续查询等)。
## 安装
## 历史版本
### 准备
无论使用什么版本的 TDengine 都建议使用最新版本的 `taospy`。
|Python Connector 版本|主要变化|
|:-------------------:|:----:|
|2.7.9|数据订阅支持获取消费进度和重置消费进度|
|2.7.8|新增 `execute_many`|
|Python Websocket Connector 版本|主要变化|
|:----------------------------:|:-----:|
|0.2.5|1. 数据订阅支持获取消费进度和重置消费进度 <br/> 2. 支持 schemaless <br/> 3. 支持 STMT|
|0.2.4|数据订阅新增取消订阅方法|
## 处理异常
Python 连接器可能会产生 4 种异常:
- Python 连接器本身的异常
- 原生连接方式的异常
- websocket 连接方式异常
- 数据订阅异常
- TDengine 其他功能模块的异常
|Error Type|Description|Suggested Actions|
|:--------:|:---------:|:---------------:|
|InterfaceError|taosc 版本太低,不支持所使用的接口|请检查 TDengine 客户端版本|
|ConnectionError|数据库链接错误|请检查 TDengine 服务端状态和连接参数|
|DatabaseError|数据库错误|请检查 TDengine 服务端版本,并将 Python 连接器升级到最新版|
|OperationalError|操作错误|API 使用错误,请检查代码|
|ProgrammingError|||
|StatementError|stmt 相关异常||
|ResultError|||
|SchemalessError|schemaless 相关异常||
|TmqError|tmq 相关异常||
Python 中通常通过 try-expect 处理异常,异常处理相关请参考 [Python 错误和异常文档](https://docs.python.org/3/tutorial/errors.html)。
Python Connector 的所有数据库操作如果出现异常,都会直接抛出来。由应用程序负责异常处理。比如:
```python
{{#include docs/examples/python/handle_exception.py}}
```
TDengine DataType 和 Python DataType
TDengine 目前支持时间戳、数字、字符、布尔类型,与 Python 对应类型转换如下:
|TDengine DataType|Python DataType|
|:---------------:|:-------------:|
|TIMESTAMP|datetime|
|INT|int|
|BIGINT|int|
|FLOAT|float|
|DOUBLE|int|
|SMALLINT|int|
|TINYINT|int|
|BOOL|bool|
|BINARY|str|
|NCHAR|str|
|JSON|str|
## 安装步骤
### 安装前准备
1. 安装 Python。新近版本 taospy 包要求 Python 3.6.2+。早期版本 taospy 包要求 Python 3.7+。taos-ws-py 包要求 Python 3.7+。如果系统上还没有 Python 可参考 [Python BeginnersGuide](https://wiki.python.org/moin/BeginnersGuide/Download) 安装。
2. 安装 [pip](https://pypi.org/project/pip/)。大部分情况下 Python 的安装包都自带了 pip 工具, 如果没有请参考 [pip documentation](https://pip.pypa.io/en/stable/installation/) 安装。
@ -274,7 +333,7 @@ Transfer-Encoding: chunked
</TabItem>
</Tabs>
## 示例程序
## 使用示例
### 基本使用
@ -343,6 +402,10 @@ TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线
</TabItem>
<TabItem value="websocket" label="WebSocket 连接">
##### Connection 类的使用
`Connection` 类既包含对 PEP249 Connection 接口的实现(如cursor方法和 close 方法),也包含很多扩展功能(如: execute、 query、schemaless_insert 和 subscribe 方法。
```python
{{#include docs/examples/python/connect_websocket_examples.py:basic}}
```
@ -353,6 +416,46 @@ TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线
</TabItem>
</Tabs>
### 查询数据
<Tabs defaultValue="rest">
<TabItem value="native" label="原生连接">
`TaosConnection` 类的 `query` 方法可以用来查询数据,返回 `TaosResult` 类型的结果数据。
```python
{{#include docs/examples/python/connection_usage_native_reference.py:query}}
```
:::tip
查询结果只能获取一次。比如上面的示例中 `fetch_all()` 和 `fetch_all_into_dict()` 只能用一个。重复获取得到的结果为空列表。
:::
</TabItem>
<TabItem value="rest" label="REST 连接">
RestClient 类是对于 REST API 的直接封装。它只包含一个 sql() 方法用于执行任意 SQL 语句, 并返回执行结果。
```python
{{#include docs/examples/python/rest_client_example.py}}
```
对于 `sql()` 方法更详细的介绍, 请参考 [RestClient](https://docs.taosdata.com/api/taospy/taosrest/restclient.html)。
</TabItem>
<TabItem value="websocket" label="WebSocket 连接">
`TaosConnection` 类的 `query` 方法可以用来查询数据,返回 `TaosResult` 类型的结果数据。
```python
{{#include docs/examples/python/connect_websocket_examples.py:basic}}
```
</TabItem>
</Tabs>
### 与 req_id 一起使用
使用可选的 req_id 参数,指定请求 id可以用于 tracing
@ -456,27 +559,169 @@ TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线
### 数据订阅
连接器支持数据订阅功能,数据订阅功能请参考 [数据订阅](../../develop/tmq/)。
连接器支持数据订阅功能,数据订阅功能请参考 [数据订阅文档](../../develop/tmq/)。
<Tabs defaultValue="native">
<TabItem value="native" label="原生连接">
`Consumer` 提供了 Python 连接器订阅 TMQ 数据的 API相关 API 定义请参考 [数据订阅文档](../../develop/tmq/#%E4%B8%BB%E8%A6%81%E6%95%B0%E6%8D%AE%E7%BB%93%E6%9E%84%E5%92%8C-api)。
`Consumer` 提供了 Python 连接器订阅 TMQ 数据的 API。
##### 创建 Consumer
创建 Consumer 语法为 `consumer = Consumer(configs)`,参数定义请参考 [数据订阅文档](../../develop/tmq/#%E5%88%9B%E5%BB%BA%E6%B6%88%E8%B4%B9%E8%80%85-consumer)。
```python
from taos.tmq import Consumer
consumer = Consumer({"group.id": "local", "td.connect.ip": "127.0.0.1"})
```
##### 订阅 topics
Consumer API 的 `subscribe` 方法用于订阅 topicsconsumer 支持同时订阅多个 topic。
```python
consumer.subscribe(['topic1', 'topic2'])
```
##### 消费数据
Consumer API 的 `poll` 方法用于消费数据,`poll` 方法接收一个 float 类型的超时时间超时时间单位为秒s`poll` 方法在超时之前返回一条 Message 类型的数据或超时返回 `None`。消费者必须通过 Message 的 `error()` 方法校验返回数据的 error 信息。
```python
while True:
res = consumer.poll(1)
if not res:
continue
err = res.error()
if err is not None:
raise err
val = res.value()
for block in val:
print(block.fetchall())
```
##### 获取消费进度
Consumer API 的 `assignment` 方法用于获取 Consumer 订阅的所有 topic 的消费进度,返回结果类型为 TopicPartition 列表。
```python
assignments = consumer.assignment()
```
##### 指定订阅 Offset
Consumer API 的 `seek` 方法用于重置 Consumer 的消费进度到指定位置,方法参数类型为 TopicPartition。
```python
tp = TopicPartition(topic='topic1', partition=0, offset=0)
consumer.seek(tp)
```
##### 关闭订阅
消费结束后,应当取消订阅,并关闭 Consumer。
```python
consumer.unsubscribe()
consumer.close()
```
##### 完整示例
```python
{{#include docs/examples/python/tmq_example.py}}
```
##### 获取和重置消费进度示例代码
```python
{{#include docs/examples/python/tmq_assignment_example.py:taos_get_assignment_and_seek_demo}}
```
</TabItem>
<TabItem value="websocket" label="WebSocket 连接">
除了原生的连接方式Python 连接器还支持通过 websocket 订阅 TMQ 数据。
除了原生的连接方式Python 连接器还支持通过 websocket 订阅 TMQ 数据,使用 websocket 方式订阅 TMQ 数据需要安装 `taos-ws-py`。
taosws `Consumer` API 提供了基于 Websocket 订阅 TMQ 数据的 API。
##### 创建 Consumer
创建 Consumer 语法为 `consumer = Consumer(conf=configs)`,使用时需要指定 `td.connect.websocket.scheme` 参数值为 "ws",参数定义请参考 [数据订阅文档](../../develop/tmq/#%E5%88%9B%E5%BB%BA%E6%B6%88%E8%B4%B9%E8%80%85-consumer)。
```python
import taosws
consumer = taosws.(conf={"group.id": "local", "td.connect.websocket.scheme": "ws"})
```
##### 订阅 topics
Consumer API 的 `subscribe` 方法用于订阅 topicsconsumer 支持同时订阅多个 topic。
```python
consumer.subscribe(['topic1', 'topic2'])
```
##### 消费数据
Consumer API 的 `poll` 方法用于消费数据,`poll` 方法接收一个 float 类型的超时时间超时时间单位为秒s`poll` 方法在超时之前返回一条 Message 类型的数据或超时返回 `None`。消费者必须通过 Message 的 `error()` 方法校验返回数据的 error 信息。
```python
while True:
res = consumer.poll(timeout=1.0)
if not res:
continue
err = res.error()
if err is not None:
raise err
for block in message:
for row in block:
print(row)
```
##### 获取消费进度
Consumer API 的 `assignment` 方法用于获取 Consumer 订阅的所有 topic 的消费进度,返回结果类型为 TopicPartition 列表。
```python
assignments = consumer.assignment()
```
##### 重置消费进度
Consumer API 的 `seek` 方法用于重置 Consumer 的消费进度到指定位置。
```python
consumer.seek(topic='topic1', partition=0, offset=0)
```
##### 结束消费
消费结束后,应当取消订阅,并关闭 Consumer。
```python
consumer.unsubscribe()
consumer.close()
```
##### tmq 订阅示例代码
```python
{{#include docs/examples/python/tmq_websocket_example.py}}
```
连接器提供了 `assignment` 接口,用于获取 topic assignment 的功能,可以查询订阅的 topic 的消费进度,并提供 `seek` 接口,用于重置 topic 的消费进度。
##### 获取和重置消费进度示例代码
```python
{{#include docs/examples/python/tmq_websocket_assgnment_example.py:taosws_get_assignment_and_seek_demo}}
```
</TabItem>
</Tabs>
@ -487,19 +732,19 @@ TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线
<Tabs defaultValue="list">
<TabItem value="list" label="List 写入">
简单写入
##### 简单写入
```python
{{#include docs/examples/python/schemaless_insert.py}}
```
带有 ttl 参数的写入
##### 带有 ttl 参数的写入
```python
{{#include docs/examples/python/schemaless_insert_ttl.py}}
```
带有 req_id 参数的写入
##### 带有 req_id 参数的写入
```python
{{#include docs/examples/python/schemaless_insert_req_id.py}}
@ -509,19 +754,19 @@ TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线
<TabItem value="raw" label="Raw 写入">
简单写入
##### 简单写入
```python
{{#include docs/examples/python/schemaless_insert_raw.py}}
```
带有 ttl 参数的写入
##### 带有 ttl 参数的写入
```python
{{#include docs/examples/python/schemaless_insert_raw_ttl.py}}
```
带有 req_id 参数的写入
##### 带有 req_id 参数的写入
```python
{{#include docs/examples/python/schemaless_insert_raw_req_id.py}}
@ -530,7 +775,142 @@ TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线
</TabItem>
</Tabs>
### 其它示例程序
### 通过参数绑定写入数据
TDengine 的 Python 连接器支持参数绑定风格的 Prepare API 方式写入数据,和大多数数据库类似,目前仅支持用 `?` 来代表待绑定的参数。
<Tabs>
<TabItem value="native" label="原生连接">
##### 创建 stmt
Python 连接器的 `Connection` 提供了 `statement` 方法用于创建参数绑定对象 stmt该方法接收 sql 字符串作为参数sql 字符串目前仅支持用 `?` 来代表绑定的参数。
```
import taos
conn = taos.connect()
stmt = conn.statement("insert into log values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)")
```
##### 参数绑定
调用 `new_multi_binds` 函数创建 params 列表,用于参数绑定。
```
params = new_multi_binds(16)
params[0].timestamp((1626861392589, 1626861392590, 1626861392591))
params[1].bool((True, None, False))
params[2].tinyint([-128, -128, None]) # -128 is tinyint null
params[3].tinyint([0, 127, None])
params[4].smallint([3, None, 2])
params[5].int([3, 4, None])
params[6].bigint([3, 4, None])
params[7].tinyint_unsigned([3, 4, None])
params[8].smallint_unsigned([3, 4, None])
params[9].int_unsigned([3, 4, None])
params[10].bigint_unsigned([3, 4, None])
params[11].float([3, None, 1])
params[12].double([3, None, 1.2])
params[13].binary(["abc", "dddafadfadfadfadfa", None])
params[14].nchar(["涛思数据", None, "a long string with 中文字符"])
params[15].timestamp([None, None, 1626861392591])
```
调用 stmt 的 `bind_param` 以单行的方式设置 values 或 `bind_param_batch` 以多行的方式设置 values 方法绑定参数。
```
stmt.bind_param_batch(params)
```
##### 执行 sql
调用 stmt 的 `execute` 方法执行 sql
```
stmt.execute()
```
##### 关闭 stmt
最后需要关闭 stmt。
```
stmt.close()
```
##### 示例代码
```python
{{#include docs/examples/python/stmt_example.py}}
```
</TabItem>
<TabItem value="websocket" label="WebSocket 连接">
##### 创建 stmt
Python WebSocket 连接器的 `Connection` 提供了 `statement` 方法用于创建参数绑定对象 stmt该方法接收 sql 字符串作为参数sql 字符串目前仅支持用 `?` 来代表绑定的参数。
```
import taosws
conn = taosws.connect('taosws://localhost:6041/test')
stmt = conn.statement()
```
##### 解析 sql
调用 stmt 的 `prepare` 方法来解析 insert 语句。
```
stmt.prepare("insert into t1 values (?, ?, ?, ?)")
```
##### 参数绑定
调用 stmt 的 `bind_param` 方法绑定参数。
```
stmt.bind_param([
taosws.millis_timestamps_to_column([1686844800000, 1686844801000, 1686844802000, 1686844803000]),
taosws.ints_to_column([1, 2, 3, 4]),
taosws.floats_to_column([1.1, 2.2, 3.3, 4.4]),
taosws.varchar_to_column(['a', 'b', 'c', 'd']),
])
```
调用 stmt 的 `add_batch` 方法,将参数加入批处理。
```
stmt.add_batch()
```
##### 执行 sql
调用 stmt 的 `execute` 方法执行 sql
```
stmt.execute()
```
##### 关闭 stmt
最后需要关闭 stmt。
```
stmt.close()
```
##### 示例代码
```python
{{#include docs/examples/python/stmt_websocket_example.py}}
```
</TabItem>
</Tabs>
### 更多示例程序
| 示例程序链接 | 示例程序内容 |
| ------------------------------------------------------------------------------------------------------------- | ----------------------- |
@ -542,14 +922,6 @@ TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线
## 其它说明
### 异常处理
所有数据库操作如果出现异常,都会直接抛出来。由应用程序负责异常处理。比如:
```python
{{#include docs/examples/python/handle_exception.py}}
```
``
### 关于纳秒 (nanosecond)
由于目前 Python 对 nanosecond 支持的不完善(见下面的链接),目前的实现方式是在 nanosecond 精度时返回整数,而不是 ms 和 us 返回的 datetime 类型,应用开发者需要自行处理,建议使用 pandas 的 to_datetime()。未来如果 Python 正式完整支持了纳秒Python 连接器可能会修改相关接口。

View File

@ -42,7 +42,7 @@ database_option: {
### 参数说明
- BUFFER: 一个 VNODE 写入内存池大小,单位为 MB默认为 96最小为 3最大为 16384。
- BUFFER: 一个 VNODE 写入内存池大小,单位为 MB默认为 256最小为 3最大为 16384。
- CACHEMODEL表示是否在内存中缓存子表的最近数据。默认为 none。
- none表示不缓存。
- last_row表示缓存子表最近一行数据。这将显著改善 LAST_ROW 函数的性能表现。

View File

@ -55,7 +55,7 @@ window_clause: {
| INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] [WATERMARK(watermark_val)] [FILL(fill_mod_and_val)]
interp_clause:
RANGE(ts_val, ts_val) EVERY(every_val) FILL(fill_mod_and_val)
RANGE(ts_val [, ts_val]) EVERY(every_val) FILL(fill_mod_and_val)
partition_by_clause:
PARTITION BY expr [, expr] ...

View File

@ -890,9 +890,10 @@ ignore_null_values: {
- INTERP 用于在指定时间断面获取指定列的记录值,如果该时间断面不存在符合条件的行数据,那么会根据 FILL 参数的设定进行插值。
- INTERP 的输入数据为指定列的数据可以通过条件语句where 子句)来对原始列数据进行过滤,如果没有指定过滤条件则输入为全部数据。
- INTERP 需要同时与 RANGEEVERY 和 FILL 关键字一起使用。
- INTERP 的输出时间范围根据 RANGE(timestamp1,timestamp2)字段来指定,需满足 timestamp1 <= timestamp2。其中 timestamp1(必选值)为输出时间范围的起始值,即如果 timestamp1 时刻符合插值条件则 timestamp1 为输出的第一条记录timestamp2(必选值)为输出时间范围的结束值,即输出的最后一条记录的 timestamp 不能大于 timestamp2。
- INTERP 的输出时间范围根据 RANGE(timestamp1, timestamp2)字段来指定,需满足 timestamp1 <= timestamp2。其中 timestamp1 为输出时间范围的起始值,即如果 timestamp1 时刻符合插值条件则 timestamp1 为输出的第一条记录timestamp2 为输出时间范围的结束值,即输出的最后一条记录的 timestamp 不能大于 timestamp2。
- INTERP 根据 EVERY(time_unit) 字段来确定输出时间范围内的结果条数,即从 timestamp1 开始每隔固定长度的时间time_unit 值进行插值time_unit 可取值时间单位1a(毫秒)1s(秒)1m(分)1h(小时)1d(天)1w(周)。例如 EVERY(500a) 将对于指定数据每500毫秒间隔进行一次插值.
- INTERP 根据 FILL 字段来决定在每个符合输出条件的时刻如何进行插值。关于 FILL 子句如何使用请参考 [FILL 子句](../distinguished/#fill-子句)
- INTERP 可以在 RANGE 字段中只指定唯一的时间戳对单个时间点进行插值在这种情况下EVERY 字段可以省略。例如SELECT INTERP(col) FROM tb RANGE('2023-01-01 00:00:00') FILL(linear).
- INTERP 作用于超级表时, 会将该超级表下的所有子表数据按照主键列排序后进行插值计算,也可以搭配 PARTITION BY tbname 使用,将结果强制规约到单个时间线。
- INTERP 可以与伪列 _irowts 一起使用,返回插值点所对应的时间戳(3.0.2.0版本以后支持)。
- INTERP 可以与伪列 _isfilled 一起使用,显示返回结果是否为原始记录或插值算法产生的数据(3.0.3.0版本以后支持)。
@ -990,18 +991,14 @@ SAMPLE(expr, k)
**功能说明** 获取数据的 k 个采样值。参数 k 的合法输入范围是 1≤ k ≤ 1000。
**返回结果类型** 同原始数据类型 返回结果中带有该行记录的时间戳
**返回结果类型** 同原始数据类型。
**适用数据类型** 在超级表查询中使用时,不能应用在标签之上
**适用数据类型** 全部类型字段
**嵌套子查询支持** 适用于内层查询和外层查询。
**适用于**:表和超级表。
**使用说明**
- 不能参与表达式计算;该函数可以应用在普通表和超级表上;
### TAIL
@ -1046,11 +1043,11 @@ TOP(expr, k)
UNIQUE(expr)
```
**功能说明**:返回该列的数值首次出现的值。该函数功能与 distinct 相似,但是可以匹配标签和时间戳信息。可以针对除时间列以外的字段进行查询,可以匹配标签和时间戳,其中的标签和时间戳是第一次出现时刻的标签和时间戳
**功能说明**:返回该列数据首次出现的值。该函数功能与 distinct 相似。
**返回数据类型**:同应用的字段。
**适用数据类型**适合于除时间类型以外的字段。
**适用数据类型**全部类型字段。
**适用于**: 表和超级表。

View File

@ -36,7 +36,7 @@ SHOW CONNECTIONS;
SHOW CONSUMERS;
```
显示当前数据库下所有活跃的消费者的信息。
显示当前数据库下所有消费者的信息。
## SHOW CREATE DATABASE

37
docs/zh/20-third-party/13-dbeaver.md vendored Normal file
View File

@ -0,0 +1,37 @@
---
sidebar_label: DBeaver
title: DBeaver
description: 使用 DBeaver 存取 TDengine 数据的详细指南
---
DBeaver 是一款流行的跨平台数据库管理工具方便开发者、数据库管理员、数据分析师等用户管理数据。DBeaver 从 23.1.1 版本开始内嵌支持 TDengine。既支持独立部署的 TDengine 集群也支持 TDengine Cloud。
## 前置条件
使用 DBeaver 管理 TDengine 需要以下几方面的准备工作。
- 安装 DBeaver。DBeaver 支持主流操作系统包括 Windows、macOS 和 Linux。请注意[下载](https://dbeaver.io/download/)正确平台和版本23.1.1+)的安装包。详细安装步骤请参考 [DBeaver 官方文档](https://github.com/dbeaver/dbeaver/wiki/Installation)。
- 如果使用独立部署的 TDengine 集群,请确认 TDengine 正常运行,并且 taosAdapter 已经安装并正常运行,具体细节请参考 [taosAdapter 的使用手册](/reference/taosadapter)。
## 使用 DBeaver 访问内部部署的 TDengine
1. 启动 DBeaver 应用,点击按钮或菜单项选择“连接到数据库”,然后在时间序列分类栏中选择 TDengine。
![DBeaver 连接 TDengine](./dbeaver/dbeaver-connect-tdengine-zh.webp)
2. 配置 TDengine 连接,填入主机地址、端口号、用户名和密码。如果 TDengine 部署在本机,可以只填用户名和密码,默认用户名为 root默认密码为 taosdata。点击“测试连接”可以对连接是否可用进行测试。如果本机没有安装 TDengine Java
连接器DBeaver 会提示下载安装。
![配置 TDengine 连接](./dbeaver/dbeaver-config-tdengine-zh.webp)
3. 连接成功将显示如下图所示。如果显示连接失败,请检查 TDengine 服务和 taosAdapter 是否正确运行,主机地址、端口号、用户名和密码是否正确。
![连接成功](./dbeaver/dbeaver-connect-tdengine-test-zh.webp)
4. 使用 DBeaver 选择数据库和表可以浏览 TDengine 服务的数据。
![DBeaver 浏览 TDengine 数据](./dbeaver/dbeaver-browse-data-zh.webp)
5. 也可以通过执行 SQL 命令的方式对 TDengine 数据进行操作。
![DBeaver SQL 命令](./dbeaver/dbeaver-sql-execution-zh.webp)

Binary file not shown.

After

Width:  |  Height:  |  Size: 79 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 72 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 53 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 38 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 37 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 37 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 38 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 62 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 57 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 39 KiB

View File

@ -10,6 +10,10 @@ TDengine 2.x 各版本安装包请访问[这里](https://www.taosdata.com/all-do
import Release from "/components/ReleaseV3";
## 3.0.6.0
<Release type="tdengine" version="3.0.6.0" />
## 3.0.5.1
<Release type="tdengine" version="3.0.5.1" />

View File

@ -198,6 +198,7 @@ typedef struct SDataBlockInfo {
SBlockID id;
int16_t hasVarCol;
int16_t dataLoad; // denote if the data is loaded or not
uint8_t scanFlag;
// TODO: optimize and remove following
int64_t version; // used for stream, and need serialization

View File

@ -187,6 +187,7 @@ int32_t colDataAssign(SColumnInfoData* pColumnInfoData, const SColumnInfoData* p
int32_t blockDataUpdateTsWindow(SSDataBlock* pDataBlock, int32_t tsColumnIndex);
int32_t colDataGetLength(const SColumnInfoData* pColumnInfoData, int32_t numOfRows);
int32_t colDataGetRowLength(const SColumnInfoData* pColumnInfoData, int32_t rowIdx);
void colDataTrim(SColumnInfoData* pColumnInfoData);
size_t blockDataGetNumOfCols(const SSDataBlock* pBlock);
@ -248,6 +249,7 @@ int32_t buildSubmitReqFromDataBlock(SSubmitReq2** pReq, const SSDataBlock* pData
tb_uid_t suid);
char* buildCtbNameByGroupId(const char* stbName, uint64_t groupId);
int32_t buildCtbNameByGroupIdImpl(const char* stbName, uint64_t groupId, char* pBuf);
static FORCE_INLINE int32_t blockGetEncodeSize(const SSDataBlock* pBlock) {
return blockDataGetSerialMetaSize(taosArrayGetSize(pBlock->pDataBlock)) + blockDataGetSize(pBlock);

View File

@ -83,6 +83,7 @@ extern int64_t tsVndCommitMaxIntervalMs;
extern int64_t tsMndSdbWriteDelta;
extern int64_t tsMndLogRetention;
extern int8_t tsGrant;
extern bool tsMndSkipGrant;
// monitor
extern bool tsEnableMonitor;
@ -119,6 +120,7 @@ extern bool tsQueryUseNodeAllocator;
extern bool tsKeepColumnName;
extern bool tsEnableQueryHb;
extern bool tsEnableScience;
extern bool tsTtlChangeOnWrite;
extern int32_t tsRedirectPeriod;
extern int32_t tsRedirectFactor;
extern int32_t tsRedirectMaxPeriod;
@ -184,6 +186,7 @@ extern int64_t tsStreamBufferSize;
extern int64_t tsCheckpointInterval;
extern bool tsFilterScalarMode;
extern int32_t tsMaxStreamBackendCache;
extern int32_t tsPQSortMemThreshold;
// #define NEEDTO_COMPRESSS_MSG(size) (tsCompressMsgSize != -1 && (size) > tsCompressMsgSize)

View File

@ -106,7 +106,6 @@ enum {
HEARTBEAT_KEY_DBINFO,
HEARTBEAT_KEY_STBINFO,
HEARTBEAT_KEY_TMQ,
HEARTBEAT_KEY_USER_PASSINFO,
};
typedef enum _mgmt_table {
@ -636,6 +635,7 @@ typedef struct {
SEpSet epSet;
int32_t svrTimestamp;
int32_t passVer;
int32_t authVer;
char sVer[TSDB_VERSION_LEN];
char sDetailVer[128];
} SConnectRsp;
@ -703,6 +703,7 @@ int32_t tDeserializeSGetUserAuthReq(void* buf, int32_t bufLen, SGetUserAuthReq*
typedef struct {
char user[TSDB_USER_LEN];
int32_t version;
int32_t passVer;
int8_t superAuth;
int8_t sysInfo;
int8_t enable;
@ -719,14 +720,6 @@ int32_t tSerializeSGetUserAuthRsp(void* buf, int32_t bufLen, SGetUserAuthRsp* pR
int32_t tDeserializeSGetUserAuthRsp(void* buf, int32_t bufLen, SGetUserAuthRsp* pRsp);
void tFreeSGetUserAuthRsp(SGetUserAuthRsp* pRsp);
typedef struct SUserPassVersion {
char user[TSDB_USER_LEN];
int32_t version;
} SUserPassVersion;
typedef SGetUserAuthReq SGetUserPassReq;
typedef SUserPassVersion SGetUserPassRsp;
/*
* for client side struct, only column id, type, bytes are necessary
* But for data in vnode side, we need all the following information.
@ -945,7 +938,7 @@ int32_t tSerializeSVTrimDbReq(void* buf, int32_t bufLen, SVTrimDbReq* pReq);
int32_t tDeserializeSVTrimDbReq(void* buf, int32_t bufLen, SVTrimDbReq* pReq);
typedef struct {
int32_t timestamp;
int32_t timestampSec;
} SVDropTtlTableReq;
int32_t tSerializeSVDropTtlTableReq(void* buf, int32_t bufLen, SVDropTtlTableReq* pReq);
@ -1070,14 +1063,6 @@ int32_t tSerializeSUserAuthBatchRsp(void* buf, int32_t bufLen, SUserAuthBatchRsp
int32_t tDeserializeSUserAuthBatchRsp(void* buf, int32_t bufLen, SUserAuthBatchRsp* pRsp);
void tFreeSUserAuthBatchRsp(SUserAuthBatchRsp* pRsp);
typedef struct {
SArray* pArray; // Array of SGetUserPassRsp
} SUserPassBatchRsp;
int32_t tSerializeSUserPassBatchRsp(void* buf, int32_t bufLen, SUserPassBatchRsp* pRsp);
int32_t tDeserializeSUserPassBatchRsp(void* buf, int32_t bufLen, SUserPassBatchRsp* pRsp);
void tFreeSUserPassBatchRsp(SUserPassBatchRsp* pRsp);
typedef struct {
char db[TSDB_DB_FNAME_LEN];
STimeWindow timeRange;
@ -1975,6 +1960,7 @@ typedef struct {
SArray* fillNullCols; // array of SColLocation
int64_t deleteMark;
int8_t igUpdate;
int64_t lastTs;
} SCMCreateStreamReq;
typedef struct {
@ -2033,6 +2019,11 @@ typedef struct {
char cgroup[TSDB_CGROUP_LEN];
char clientId[256];
SArray* topicNames; // SArray<char**>
int8_t withTbName;
int8_t autoCommit;
int32_t autoCommitInterval;
int8_t resetOffsetCfg;
} SCMSubscribeReq;
static FORCE_INLINE int32_t tSerializeSCMSubscribeReq(void** buf, const SCMSubscribeReq* pReq) {
@ -2047,6 +2038,12 @@ static FORCE_INLINE int32_t tSerializeSCMSubscribeReq(void** buf, const SCMSubsc
for (int32_t i = 0; i < topicNum; i++) {
tlen += taosEncodeString(buf, (char*)taosArrayGetP(pReq->topicNames, i));
}
tlen += taosEncodeFixedI8(buf, pReq->withTbName);
tlen += taosEncodeFixedI8(buf, pReq->autoCommit);
tlen += taosEncodeFixedI32(buf, pReq->autoCommitInterval);
tlen += taosEncodeFixedI8(buf, pReq->resetOffsetCfg);
return tlen;
}
@ -2064,6 +2061,11 @@ static FORCE_INLINE void* tDeserializeSCMSubscribeReq(void* buf, SCMSubscribeReq
buf = taosDecodeString(buf, &name);
taosArrayPush(pReq->topicNames, &name);
}
buf = taosDecodeFixedI8(buf, &pReq->withTbName);
buf = taosDecodeFixedI8(buf, &pReq->autoCommit);
buf = taosDecodeFixedI32(buf, &pReq->autoCommitInterval);
buf = taosDecodeFixedI8(buf, &pReq->resetOffsetCfg);
return buf;
}
@ -2261,7 +2263,7 @@ typedef struct SVCreateTbReq {
int32_t flags;
char* name;
tb_uid_t uid;
int64_t ctime;
int64_t btime;
int32_t ttl;
int32_t commentLen;
char* comment;
@ -2398,10 +2400,12 @@ typedef struct {
int32_t newTTL;
int32_t newCommentLen;
char* newComment;
int64_t ctimeMs; // fill by vnode
} SVAlterTbReq;
int32_t tEncodeSVAlterTbReq(SEncoder* pEncoder, const SVAlterTbReq* pReq);
int32_t tDecodeSVAlterTbReq(SDecoder* pDecoder, SVAlterTbReq* pReq);
int32_t tDecodeSVAlterTbReqSetCtime(SDecoder* pDecoder, SVAlterTbReq* pReq, int64_t ctimeMs);
typedef struct {
int32_t code;
@ -2455,15 +2459,6 @@ typedef struct {
char cgroup[TSDB_CGROUP_LEN];
} SMqAskEpReq;
typedef struct {
int64_t consumerId;
int32_t epoch;
} SMqHbReq;
typedef struct {
int8_t reserved;
} SMqHbRsp;
typedef struct {
int32_t key;
int32_t valueLen;
@ -2487,6 +2482,7 @@ typedef struct {
int64_t stime; // timestamp precision ms
int64_t reqRid;
bool stableQuery;
bool isSubQuery;
char fqdn[TSDB_FQDN_LEN];
int32_t subPlanNum;
SArray* subDesc; // SArray<SQuerySubDesc>
@ -2891,7 +2887,7 @@ int32_t tDecodeSMqCMCommitOffsetReq(SDecoder* decoder, SMqCMCommitOffsetReq* pRe
// tqOffset
enum {
TMQ_OFFSET__RESET_NONE = -3,
TMQ_OFFSET__RESET_EARLIEAST = -2,
TMQ_OFFSET__RESET_EARLIEST = -2,
TMQ_OFFSET__RESET_LATEST = -1,
TMQ_OFFSET__LOG = 1,
TMQ_OFFSET__SNAPSHOT_DATA = 2,
@ -3365,6 +3361,16 @@ typedef struct{
SArray* offsetRows;
}TopicOffsetRows;
typedef struct {
int64_t consumerId;
int32_t epoch;
SArray* topics;
} SMqHbReq;
typedef struct {
int8_t reserved;
} SMqHbRsp;
#define TD_AUTO_CREATE_TABLE 0x1
typedef struct {
int64_t suid;
@ -3416,6 +3422,7 @@ typedef struct SDeleteRes {
int64_t affectedRows;
char tableFName[TSDB_TABLE_NAME_LEN];
char tsColName[TSDB_COL_NAME_LEN];
int64_t ctimeMs; // fill by vnode
} SDeleteRes;
int32_t tEncodeDeleteRes(SEncoder* pCoder, const SDeleteRes* pRes);
@ -3434,10 +3441,12 @@ int32_t tDecodeSSingleDeleteReq(SDecoder* pCoder, SSingleDeleteReq* pReq);
typedef struct {
int64_t suid;
SArray* deleteReqs; // SArray<SSingleDeleteReq>
int64_t ctimeMs; // fill by vnode
} SBatchDeleteReq;
int32_t tEncodeSBatchDeleteReq(SEncoder* pCoder, const SBatchDeleteReq* pReq);
int32_t tDecodeSBatchDeleteReq(SDecoder* pCoder, SBatchDeleteReq* pReq);
int32_t tDecodeSBatchDeleteReqSetCtime(SDecoder* pDecoder, SBatchDeleteReq* pReq, int64_t ctimeMs);
typedef struct {
int32_t msgIdx;
@ -3489,10 +3498,8 @@ int32_t tSerializeSMqAskEpReq(void* buf, int32_t bufLen, SMqAskEpReq* pReq);
int32_t tDeserializeSMqAskEpReq(void* buf, int32_t bufLen, SMqAskEpReq* pReq);
int32_t tSerializeSMqHbReq(void* buf, int32_t bufLen, SMqHbReq* pReq);
int32_t tDeserializeSMqHbReq(void* buf, int32_t bufLen, SMqHbReq* pReq);
int32_t tSerializeSMqAskEpReq(void* buf, int32_t bufLen, SMqAskEpReq* pReq);
int32_t tDeserializeSMqAskEpReq(void* buf, int32_t bufLen, SMqAskEpReq* pReq);
int32_t tSerializeSMqHbReq(void* buf, int32_t bufLen, SMqHbReq* pReq);
int32_t tDeserializeSMqHbReq(void* buf, int32_t bufLen, SMqHbReq* pReq);
int32_t tDeatroySMqHbReq(SMqHbReq* pReq);
#define SUBMIT_REQ_AUTO_CREATE_TABLE 0x1
#define SUBMIT_REQ_COLUMN_DATA_FORMAT 0x2
@ -3507,6 +3514,7 @@ typedef struct {
SArray* aRowP;
SArray* aCol;
};
int64_t ctimeMs;
} SSubmitTbData;
typedef struct {

View File

@ -150,7 +150,7 @@ enum {
TD_DEF_MSG_TYPE(TDMT_MND_TMQ_HB, "consumer-hb", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_TMQ_DO_REBALANCE, "do-rebalance", SMqDoRebalanceMsg, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_TMQ_DROP_CGROUP, "drop-cgroup", SMqDropCGroupReq, SMqDropCGroupRsp)
TD_DEF_MSG_TYPE(TDMT_MND_UNUSED2, "unused2", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_CREATE_VG, "create-vg", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_TMQ_TIMER, "tmq-tmr", SMTimerReq, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_TELEM_TIMER, "telem-tmr", SMTimerReq, SMTimerReq)
TD_DEF_MSG_TYPE(TDMT_MND_TRANS_TIMER, "trans-tmr", NULL, NULL)

View File

@ -87,6 +87,7 @@ typedef struct SCatalogReq {
bool dNodeRequired; // valid dnode
bool svrVerRequired;
bool forceUpdate;
bool cloned;
} SCatalogReq;
typedef struct SMetaRes {

View File

@ -16,13 +16,13 @@
#ifndef TDENGINE_STORAGEAPI_H
#define TDENGINE_STORAGEAPI_H
#include "tsimplehash.h"
#include "tscalablebf.h"
#include "taosdef.h"
#include "tmsg.h"
#include "tcommon.h"
#include "index.h"
#include "function.h"
#include "index.h"
#include "taosdef.h"
#include "tcommon.h"
#include "tmsg.h"
#include "tscalablebf.h"
#include "tsimplehash.h"
#ifdef __cplusplus
extern "C" {
@ -46,7 +46,7 @@ typedef struct SMetaEntry {
int8_t type;
int8_t flags; // TODO: need refactor?
tb_uid_t uid;
char * name;
char* name;
union {
struct {
SSchemaWrapper schemaRow;
@ -54,46 +54,48 @@ typedef struct SMetaEntry {
SRSmaParam rsmaParam;
} stbEntry;
struct {
int64_t ctime;
int64_t btime;
int32_t ttlDays;
int32_t commentLen;
char * comment;
char* comment;
tb_uid_t suid;
uint8_t *pTags;
uint8_t* pTags;
} ctbEntry;
struct {
int64_t ctime;
int64_t btime;
int32_t ttlDays;
int32_t commentLen;
char * comment;
char* comment;
int32_t ncid; // next column id
SSchemaWrapper schemaRow;
} ntbEntry;
struct {
STSma *tsma;
STSma* tsma;
} smaEntry;
};
uint8_t *pBuf;
uint8_t* pBuf;
} SMetaEntry;
typedef struct SMetaReader {
int32_t flags;
void * pMeta;
SDecoder coder;
SMetaEntry me;
void * pBuf;
int32_t szBuf;
struct SStoreMeta* pAPI;
int32_t flags;
void* pMeta;
SDecoder coder;
SMetaEntry me;
void* pBuf;
int32_t szBuf;
struct SStoreMeta* pAPI;
} SMetaReader;
typedef struct SMTbCursor {
void * pDbc;
void * pKey;
void * pVal;
void* pMeta;
void* pDbc;
void* pKey;
void* pVal;
int32_t kLen;
int32_t vLen;
SMetaReader mr;
int8_t paused;
} SMTbCursor;
typedef struct SRowBuffPos {
@ -107,22 +109,22 @@ typedef struct SRowBuffPos {
typedef struct SMetaTableInfo {
int64_t suid;
int64_t uid;
SSchemaWrapper *schema;
SSchemaWrapper* schema;
char tbName[TSDB_TABLE_NAME_LEN];
} SMetaTableInfo;
typedef struct SSnapContext {
SMeta * pMeta; // todo remove it
int64_t snapVersion;
void * pCur;
int64_t suid;
int8_t subType;
SHashObj * idVersion;
SHashObj * suidInfo;
SArray * idList;
int32_t index;
bool withMeta;
bool queryMeta; // true-get meta, false-get data
SMeta* pMeta; // todo remove it
int64_t snapVersion;
void* pCur;
int64_t suid;
int8_t subType;
SHashObj* idVersion;
SHashObj* suidInfo;
SArray* idList;
int32_t index;
bool withMeta;
bool queryMeta; // true-get meta, false-get data
} SSnapContext;
typedef struct {
@ -139,10 +141,9 @@ typedef struct {
// int32_t tqReaderSeek(STqReader *pReader, int64_t ver, const char *id);
// bool tqNextBlockInWal(STqReader* pReader, const char* idstr);
// bool tqNextBlockImpl(STqReader *pReader, const char* idstr);
// int32_t getTableInfoFromSnapshot(SSnapContext *ctx, void **pBuf, int32_t *contLen, int16_t *type, int64_t *uid);
// SMetaTableInfo getMetaTableInfoFromSnapshot(SSnapContext *ctx);
// int32_t setForSnapShot(SSnapContext *ctx, int64_t uid);
// int32_t destroySnapContext(SSnapContext *ctx);
// int32_t getTableInfoFromSnapshot(SSnapContext *ctx, void **pBuf, int32_t *contLen, int16_t *type, int64_t
// *uid); SMetaTableInfo getMetaTableInfoFromSnapshot(SSnapContext *ctx); int32_t setForSnapShot(SSnapContext
// *ctx, int64_t uid); int32_t destroySnapContext(SSnapContext *ctx);
// clang-format off
/*-------------------------------------------------new api format---------------------------------------------------*/
@ -219,16 +220,16 @@ typedef struct SStoreTqReader {
bool (*tqReaderIsQueriedTable)();
bool (*tqReaderCurrentBlockConsumed)();
struct SWalReader *(*tqReaderGetWalReader)(); // todo remove it
int32_t (*tqReaderRetrieveTaosXBlock)(); // todo remove it
struct SWalReader* (*tqReaderGetWalReader)(); // todo remove it
int32_t (*tqReaderRetrieveTaosXBlock)(); // todo remove it
int32_t (*tqReaderSetSubmitMsg)(); // todo remove it
bool (*tqReaderNextBlockFilterOut)();
} SStoreTqReader;
typedef struct SStoreSnapshotFn {
int32_t (*createSnapshot)(SSnapContext *ctx, int64_t uid);
int32_t (*destroySnapshot)(SSnapContext *ctx);
int32_t (*createSnapshot)(SSnapContext* ctx, int64_t uid);
int32_t (*destroySnapshot)(SSnapContext* ctx);
SMetaTableInfo (*getMetaTableInfoFromSnapshot)(SSnapContext* ctx);
int32_t (*getTableInfoFromSnapshot)(SSnapContext* ctx, void** pBuf, int32_t* contLen, int16_t* type, int64_t* uid);
} SStoreSnapshotFn;
@ -252,42 +253,54 @@ int32_t metaUidFilterCachePut(SMeta *pMeta, uint64_t suid, const void *pKey, in
int32_t payloadLen, double selectivityRatio);
tb_uid_t metaGetTableEntryUidByName(SMeta *pMeta, const char *name);
int32_t metaGetCachedTbGroup(SMeta* pMeta, tb_uid_t suid, const uint8_t* pKey, int32_t keyLen, SArray** pList);
int32_t metaPutTbGroupToCache(SMeta* pMeta, uint64_t suid, const void* pKey, int32_t keyLen, void* pPayload, int32_t payloadLen);
int32_t metaPutTbGroupToCache(SMeta* pMeta, uint64_t suid, const void* pKey, int32_t keyLen, void* pPayload, int32_t
payloadLen);
*/
typedef struct SStoreMeta {
SMTbCursor *(*openTableMetaCursor)(void *pVnode); // metaOpenTbCursor
void (*closeTableMetaCursor)(SMTbCursor *pTbCur); // metaCloseTbCursor
int32_t (*cursorNext)(SMTbCursor *pTbCur, ETableType jumpTableType); // metaTbCursorNext
int32_t (*cursorPrev)(SMTbCursor *pTbCur, ETableType jumpTableType); // metaTbCursorPrev
SMTbCursor* (*openTableMetaCursor)(void* pVnode); // metaOpenTbCursor
void (*closeTableMetaCursor)(SMTbCursor* pTbCur); // metaCloseTbCursor
void (*pauseTableMetaCursor)(SMTbCursor* pTbCur); // metaPauseTbCursor
void (*resumeTableMetaCursor)(SMTbCursor* pTbCur, int8_t first); // metaResumeTbCursor
int32_t (*cursorNext)(SMTbCursor* pTbCur, ETableType jumpTableType); // metaTbCursorNext
int32_t (*cursorPrev)(SMTbCursor* pTbCur, ETableType jumpTableType); // metaTbCursorPrev
int32_t (*getTableTags)(void *pVnode, uint64_t suid, SArray *uidList);
int32_t (*getTableTagsByUid)(void *pVnode, int64_t suid, SArray *uidList);
const void *(*extractTagVal)(const void *tag, int16_t type, STagVal *tagVal); // todo remove it
int32_t (*getTableTags)(void* pVnode, uint64_t suid, SArray* uidList);
int32_t (*getTableTagsByUid)(void* pVnode, int64_t suid, SArray* uidList);
const void* (*extractTagVal)(const void* tag, int16_t type, STagVal* tagVal); // todo remove it
int32_t (*getTableUidByName)(void *pVnode, char *tbName, uint64_t *uid);
int32_t (*getTableTypeByName)(void *pVnode, char *tbName, ETableType *tbType);
int32_t (*getTableNameByUid)(void *pVnode, uint64_t uid, char *tbName);
bool (*isTableExisted)(void *pVnode, tb_uid_t uid);
int32_t (*getTableUidByName)(void* pVnode, char* tbName, uint64_t* uid);
int32_t (*getTableTypeByName)(void* pVnode, char* tbName, ETableType* tbType);
int32_t (*getTableNameByUid)(void* pVnode, uint64_t uid, char* tbName);
bool (*isTableExisted)(void* pVnode, tb_uid_t uid);
int32_t (*metaGetCachedTbGroup)(void* pVnode, tb_uid_t suid, const uint8_t* pKey, int32_t keyLen, SArray** pList);
int32_t (*metaPutTbGroupToCache)(void* pVnode, uint64_t suid, const void* pKey, int32_t keyLen, void* pPayload, int32_t payloadLen);
int32_t (*metaGetCachedTbGroup)(void* pVnode, tb_uid_t suid, const uint8_t* pKey, int32_t keyLen, SArray** pList);
int32_t (*metaPutTbGroupToCache)(void* pVnode, uint64_t suid, const void* pKey, int32_t keyLen, void* pPayload,
int32_t payloadLen);
int32_t (*getCachedTableList)(void* pVnode, tb_uid_t suid, const uint8_t* pKey, int32_t keyLen, SArray* pList1, bool* acquireRes);
int32_t (*putCachedTableList)(void* pVnode, uint64_t suid, const void* pKey, int32_t keyLen, void* pPayload, int32_t payloadLen, double selectivityRatio);
int32_t (*getCachedTableList)(void* pVnode, tb_uid_t suid, const uint8_t* pKey, int32_t keyLen, SArray* pList1,
bool* acquireRes);
int32_t (*putCachedTableList)(void* pVnode, uint64_t suid, const void* pKey, int32_t keyLen, void* pPayload,
int32_t payloadLen, double selectivityRatio);
void *(*storeGetIndexInfo)();
void *(*getInvertIndex)(void* pVnode);
int32_t (*getChildTableList)(void *pVnode, int64_t suid, SArray *list); // support filter and non-filter cases. [vnodeGetCtbIdList & vnodeGetCtbIdListByFilter]
int32_t (*storeGetTableList)(void* pVnode, int8_t type, SArray* pList); // vnodeGetStbIdList & vnodeGetAllTableList
void *storeGetVersionRange;
void *storeGetLastTimestamp;
void* (*storeGetIndexInfo)();
void* (*getInvertIndex)(void* pVnode);
int32_t (*getChildTableList)(
void* pVnode, int64_t suid,
SArray* list); // support filter and non-filter cases. [vnodeGetCtbIdList & vnodeGetCtbIdListByFilter]
int32_t (*storeGetTableList)(void* pVnode, int8_t type, SArray* pList); // vnodeGetStbIdList & vnodeGetAllTableList
void* storeGetVersionRange;
void* storeGetLastTimestamp;
int32_t (*getTableSchema)(void *pVnode, int64_t uid, STSchema **pSchema, int64_t *suid); // tsdbGetTableSchema
int32_t (*getTableSchema)(void* pVnode, int64_t uid, STSchema** pSchema, int64_t* suid); // tsdbGetTableSchema
// db name, vgId, numOfTables, numOfSTables
int32_t (*getNumOfChildTables)(void* pVnode, int64_t uid, int64_t* numOfTables); // int32_t metaGetStbStats(SMeta *pMeta, int64_t uid, SMetaStbStats *pInfo);
void (*getBasicInfo)(void *pVnode, const char **dbname, int32_t *vgId, int64_t* numOfTables, int64_t* numOfNormalTables);// vnodeGetInfo(void *pVnode, const char **dbname, int32_t *vgId) & metaGetTbNum(SMeta *pMeta) & metaGetNtbNum(SMeta *pMeta);
int32_t (*getNumOfChildTables)(
void* pVnode, int64_t uid,
int64_t* numOfTables); // int32_t metaGetStbStats(SMeta *pMeta, int64_t uid, SMetaStbStats *pInfo);
void (*getBasicInfo)(void* pVnode, const char** dbname, int32_t* vgId, int64_t* numOfTables,
int64_t* numOfNormalTables); // vnodeGetInfo(void *pVnode, const char **dbname, int32_t *vgId) &
// metaGetTbNum(SMeta *pMeta) & metaGetNtbNum(SMeta *pMeta);
int64_t (*getNumOfRowsInMem)(void* pVnode);
/**
@ -298,24 +311,24 @@ int32_t vnodeGetStbIdList(void *pVnode, int64_t suid, SArray *list);
} SStoreMeta;
typedef struct SStoreMetaReader {
void (*initReader)(SMetaReader *pReader, void *pVnode, int32_t flags, SStoreMeta* pAPI);
void (*clearReader)(SMetaReader *pReader);
void (*readerReleaseLock)(SMetaReader *pReader);
int32_t (*getTableEntryByUid)(SMetaReader *pReader, tb_uid_t uid);
int32_t (*getTableEntryByName)(SMetaReader *pReader, const char *name);
int32_t (*getEntryGetUidCache)(SMetaReader *pReader, tb_uid_t uid);
void (*initReader)(SMetaReader* pReader, void* pVnode, int32_t flags, SStoreMeta* pAPI);
void (*clearReader)(SMetaReader* pReader);
void (*readerReleaseLock)(SMetaReader* pReader);
int32_t (*getTableEntryByUid)(SMetaReader* pReader, tb_uid_t uid);
int32_t (*getTableEntryByName)(SMetaReader* pReader, const char* name);
int32_t (*getEntryGetUidCache)(SMetaReader* pReader, tb_uid_t uid);
} SStoreMetaReader;
typedef struct SUpdateInfo {
SArray *pTsBuckets;
SArray* pTsBuckets;
uint64_t numBuckets;
SArray *pTsSBFs;
SArray* pTsSBFs;
uint64_t numSBFs;
int64_t interval;
int64_t watermark;
TSKEY minTS;
SScalableBf *pCloseWinSBF;
SHashObj *pMap;
SScalableBf* pCloseWinSBF;
SHashObj* pMap;
uint64_t maxDataVersion;
} SUpdateInfo;
@ -334,15 +347,15 @@ typedef struct SStateStore {
int32_t (*streamStateAddIfNotExist)(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen);
int32_t (*streamStateReleaseBuf)(SStreamState* pState, const SWinKey* key, void* pVal);
void (*streamStateFreeVal)(void* val);
void (*streamStateFreeVal)(void* val);
int32_t (*streamStatePut)(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen);
int32_t (*streamStateGet)(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen);
bool (*streamStateCheck)(SStreamState* pState, const SWinKey* key);
bool (*streamStateCheck)(SStreamState* pState, const SWinKey* key);
int32_t (*streamStateGetByPos)(SStreamState* pState, void* pos, void** pVal);
int32_t (*streamStateDel)(SStreamState* pState, const SWinKey* key);
int32_t (*streamStateClear)(SStreamState* pState);
void (*streamStateSetNumber)(SStreamState* pState, int32_t number);
void (*streamStateSetNumber)(SStreamState* pState, int32_t number);
int32_t (*streamStateSaveInfo)(SStreamState* pState, void* pKey, int32_t keyLen, void* pVal, int32_t vLen);
int32_t (*streamStateGetInfo)(SStreamState* pState, void* pKey, int32_t keyLen, void** pVal, int32_t* pLen);
@ -353,36 +366,37 @@ typedef struct SStateStore {
int32_t (*streamStateCurNext)(SStreamState* pState, SStreamStateCur* pCur);
int32_t (*streamStateCurPrev)(SStreamState* pState, SStreamStateCur* pCur);
SStreamStateCur* (*streamStateGetAndCheckCur)(SStreamState* pState, SWinKey* key);
SStreamStateCur* (*streamStateSeekKeyNext)(SStreamState* pState, const SWinKey* key);
SStreamStateCur* (*streamStateFillSeekKeyNext)(SStreamState* pState, const SWinKey* key);
SStreamStateCur* (*streamStateFillSeekKeyPrev)(SStreamState* pState, const SWinKey* key);
void (*streamStateFreeCur)(SStreamStateCur* pCur);
SStreamStateCur* (*streamStateGetAndCheckCur)(SStreamState* pState, SWinKey* key);
SStreamStateCur* (*streamStateSeekKeyNext)(SStreamState* pState, const SWinKey* key);
SStreamStateCur* (*streamStateFillSeekKeyNext)(SStreamState* pState, const SWinKey* key);
SStreamStateCur* (*streamStateFillSeekKeyPrev)(SStreamState* pState, const SWinKey* key);
void (*streamStateFreeCur)(SStreamStateCur* pCur);
int32_t (*streamStateGetGroupKVByCur)(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen);
int32_t (*streamStateGetKVByCur)(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen);
int32_t (*streamStateSessionAddIfNotExist)(SStreamState* pState, SSessionKey* key, TSKEY gap, void** pVal, int32_t* pVLen);
int32_t (*streamStateSessionAddIfNotExist)(SStreamState* pState, SSessionKey* key, TSKEY gap, void** pVal,
int32_t* pVLen);
int32_t (*streamStateSessionPut)(SStreamState* pState, const SSessionKey* key, const void* value, int32_t vLen);
int32_t (*streamStateSessionGet)(SStreamState* pState, SSessionKey* key, void** pVal, int32_t* pVLen);
int32_t (*streamStateSessionDel)(SStreamState* pState, const SSessionKey* key);
int32_t (*streamStateSessionClear)(SStreamState* pState);
int32_t (*streamStateSessionGetKVByCur)(SStreamStateCur* pCur, SSessionKey* pKey, void** pVal, int32_t* pVLen);
int32_t (*streamStateStateAddIfNotExist)(SStreamState* pState, SSessionKey* key, char* pKeyData, int32_t keyDataLen,
state_key_cmpr_fn fn, void** pVal, int32_t* pVLen);
state_key_cmpr_fn fn, void** pVal, int32_t* pVLen);
int32_t (*streamStateSessionGetKeyByRange)(SStreamState* pState, const SSessionKey* range, SSessionKey* curKey);
SUpdateInfo* (*updateInfoInit)(int64_t interval, int32_t precision, int64_t watermark);
TSKEY (*updateInfoFillBlockData)(SUpdateInfo *pInfo, SSDataBlock *pBlock, int32_t primaryTsCol);
bool (*updateInfoIsUpdated)(SUpdateInfo *pInfo, uint64_t tableId, TSKEY ts);
bool (*updateInfoIsTableInserted)(SUpdateInfo *pInfo, int64_t tbUid);
void (*updateInfoDestroy)(SUpdateInfo *pInfo);
SUpdateInfo* (*updateInfoInit)(int64_t interval, int32_t precision, int64_t watermark);
TSKEY (*updateInfoFillBlockData)(SUpdateInfo* pInfo, SSDataBlock* pBlock, int32_t primaryTsCol);
bool (*updateInfoIsUpdated)(SUpdateInfo* pInfo, uint64_t tableId, TSKEY ts);
bool (*updateInfoIsTableInserted)(SUpdateInfo* pInfo, int64_t tbUid);
void (*updateInfoDestroy)(SUpdateInfo* pInfo);
SUpdateInfo* (*updateInfoInitP)(SInterval *pInterval, int64_t watermark);
void (*updateInfoAddCloseWindowSBF)(SUpdateInfo *pInfo);
void (*updateInfoDestoryColseWinSBF)(SUpdateInfo *pInfo);
int32_t (*updateInfoSerialize)(void *buf, int32_t bufLen, const SUpdateInfo *pInfo);
int32_t (*updateInfoDeserialize)(void *buf, int32_t bufLen, SUpdateInfo *pInfo);
SUpdateInfo* (*updateInfoInitP)(SInterval* pInterval, int64_t watermark);
void (*updateInfoAddCloseWindowSBF)(SUpdateInfo* pInfo);
void (*updateInfoDestoryColseWinSBF)(SUpdateInfo* pInfo);
int32_t (*updateInfoSerialize)(void* buf, int32_t bufLen, const SUpdateInfo* pInfo);
int32_t (*updateInfoDeserialize)(void* buf, int32_t bufLen, SUpdateInfo* pInfo);
SStreamStateCur* (*streamStateSessionSeekKeyNext)(SStreamState* pState, const SSessionKey* key);
SStreamStateCur* (*streamStateSessionSeekKeyCurrentPrev)(SStreamState* pState, const SSessionKey* key);
@ -396,11 +410,11 @@ typedef struct SStateStore {
bool (*needClearDiskBuff)(struct SStreamFileState* pFileState);
SStreamState* (*streamStateOpen)(char* path, void* pTask, bool specPath, int32_t szPage, int32_t pages);
void (*streamStateClose)(SStreamState* pState, bool remove);
int32_t (*streamStateBegin)(SStreamState* pState);
int32_t (*streamStateCommit)(SStreamState* pState);
void (*streamStateDestroy)(SStreamState* pState, bool remove);
int32_t (*streamStateDeleteCheckPoint)(SStreamState* pState, TSKEY mark);
void (*streamStateClose)(SStreamState* pState, bool remove);
int32_t (*streamStateBegin)(SStreamState* pState);
int32_t (*streamStateCommit)(SStreamState* pState);
void (*streamStateDestroy)(SStreamState* pState, bool remove);
int32_t (*streamStateDeleteCheckPoint)(SStreamState* pState, TSKEY mark);
} SStateStore;
typedef struct SStorageAPI {

View File

@ -233,6 +233,7 @@ bool fmIsGroupKeyFunc(int32_t funcId);
bool fmIsBlockDistFunc(int32_t funcId);
void getLastCacheDataType(SDataType* pType);
SFunctionNode* createFunction(const char* pName, SNodeList* pParameterList);
int32_t fmGetDistMethod(const SFunctionNode* pFunc, SFunctionNode** pPartialFunc, SFunctionNode** pMergeFunc);

View File

@ -111,6 +111,12 @@ int32_t udfStartUdfd(int32_t startDnodeId);
*/
int32_t udfStopUdfd();
/**
* get udfd pid
*
*/
int32_t udfGetUdfdPid(int32_t* pUdfdPid);
#ifdef __cplusplus
}
#endif

View File

@ -425,16 +425,18 @@ typedef struct SStreamOptions {
} SStreamOptions;
typedef struct SCreateStreamStmt {
ENodeType type;
char streamName[TSDB_TABLE_NAME_LEN];
char targetDbName[TSDB_DB_NAME_LEN];
char targetTabName[TSDB_TABLE_NAME_LEN];
bool ignoreExists;
SStreamOptions* pOptions;
SNode* pQuery;
SNodeList* pTags;
SNode* pSubtable;
SNodeList* pCols;
ENodeType type;
char streamName[TSDB_TABLE_NAME_LEN];
char targetDbName[TSDB_DB_NAME_LEN];
char targetTabName[TSDB_TABLE_NAME_LEN];
bool ignoreExists;
SStreamOptions* pOptions;
SNode* pQuery;
SNode* pPrevQuery;
SNodeList* pTags;
SNode* pSubtable;
SNodeList* pCols;
SCMCreateStreamReq* pReq;
} SCreateStreamStmt;
typedef struct SDropStreamStmt {

View File

@ -328,6 +328,8 @@ void nodesListInsertList(SNodeList* pTarget, SListCell* pPos, SNodeList* p
SNode* nodesListGetNode(SNodeList* pList, int32_t index);
SListCell* nodesListGetCell(SNodeList* pList, int32_t index);
void nodesDestroyList(SNodeList* pList);
bool nodesListMatch(const SNodeList* pList, const SNodeList* pSubList);
// Only clear the linked list structure, without releasing the elements inside
void nodesClearList(SNodeList* pList);
@ -346,6 +348,7 @@ void nodesRewriteExprPostOrder(SNode** pNode, FNodeRewriter rewriter, void* pCon
void nodesRewriteExprsPostOrder(SNodeList* pList, FNodeRewriter rewriter, void* pContext);
bool nodesEqualNode(const SNode* a, const SNode* b);
bool nodesMatchNode(const SNode* pSub, const SNode* pNode);
SNode* nodesCloneNode(const SNode* pNode);
SNodeList* nodesCloneList(const SNodeList* pList);

View File

@ -53,6 +53,8 @@ typedef struct SLogicNode {
EDataOrderLevel requireDataOrder; // requirements for input data
EDataOrderLevel resultDataOrder; // properties of the output data
EGroupAction groupAction;
EOrder inputTsOrder;
EOrder outputTsOrder;
} SLogicNode;
typedef enum EScanType {
@ -111,7 +113,6 @@ typedef struct SJoinLogicNode {
SNode* pMergeCondition;
SNode* pOnConditions;
bool isSingleTableJoin;
EOrder inputTsOrder;
SNode* pColEqualOnConditions;
} SJoinLogicNode;
@ -229,8 +230,6 @@ typedef struct SWindowLogicNode {
int8_t igExpired;
int8_t igCheckUpdate;
EWindowAlgorithm windowAlgo;
EOrder inputTsOrder;
EOrder outputTsOrder;
} SWindowLogicNode;
typedef struct SFillLogicNode {
@ -241,13 +240,13 @@ typedef struct SFillLogicNode {
SNode* pWStartTs;
SNode* pValues; // SNodeListNode
STimeWindow timeRange;
EOrder inputTsOrder;
} SFillLogicNode;
typedef struct SSortLogicNode {
SLogicNode node;
SNodeList* pSortKeys;
bool groupSort;
int64_t maxRows;
} SSortLogicNode;
typedef struct SPartitionLogicNode {
@ -310,6 +309,8 @@ typedef struct SDataBlockDescNode {
typedef struct SPhysiNode {
ENodeType type;
EOrder inputTsOrder;
EOrder outputTsOrder;
SDataBlockDescNode* pOutputDataBlockDesc;
SNode* pConditions;
SNodeList* pChildren;
@ -406,7 +407,6 @@ typedef struct SSortMergeJoinPhysiNode {
SNode* pMergeCondition;
SNode* pOnConditions;
SNodeList* pTargets;
EOrder inputTsOrder;
SNode* pColEqualOnConditions;
} SSortMergeJoinPhysiNode;
@ -460,8 +460,6 @@ typedef struct SWindowPhysiNode {
int64_t watermark;
int64_t deleteMark;
int8_t igExpired;
EOrder inputTsOrder;
EOrder outputTsOrder;
bool mergeDataBlock;
} SWindowPhysiNode;
@ -488,7 +486,6 @@ typedef struct SFillPhysiNode {
SNode* pWStartTs; // SColumnNode
SNode* pValues; // SNodeListNode
STimeWindow timeRange;
EOrder inputTsOrder;
} SFillPhysiNode;
typedef SFillPhysiNode SStreamFillPhysiNode;
@ -527,6 +524,7 @@ typedef struct SSortPhysiNode {
SNodeList* pExprs; // these are expression list of order_by_clause and parameter expression of aggregate function
SNodeList* pSortKeys; // element is SOrderByExprNode, and SOrderByExprNode::pExpr is SColumnNode
SNodeList* pTargets;
int64_t maxRows;
} SSortPhysiNode;
typedef SSortPhysiNode SGroupSortPhysiNode;
@ -617,6 +615,7 @@ typedef struct SQueryPlan {
int32_t numOfSubplans;
SNodeList* pSubplans; // Element is SNodeListNode. The execution level of subplan, starting from 0.
SExplainInfo explainInfo;
void* pPostPlan;
} SQueryPlan;
const char* dataOrderStr(EDataOrderLevel order);

View File

@ -52,6 +52,7 @@ typedef struct SExprNode {
SArray* pAssociation;
bool orderAlias;
bool asAlias;
bool asParam;
} SExprNode;
typedef enum EColumnType {
@ -69,6 +70,7 @@ typedef struct SColumnNode {
uint64_t tableId;
int8_t tableType;
col_id_t colId;
uint16_t projIdx; // the idx in project list, start from 1
EColumnType colType; // column or tag
bool hasIndex;
char dbName[TSDB_DB_NAME_LEN];
@ -241,6 +243,12 @@ typedef enum EFillMode {
FILL_MODE_NEXT
} EFillMode;
typedef enum ETimeLineMode {
TIME_LINE_NONE = 1,
TIME_LINE_MULTI,
TIME_LINE_GLOBAL,
} ETimeLineMode;
typedef struct SFillNode {
ENodeType type; // QUERY_NODE_FILL
EFillMode mode;
@ -263,50 +271,50 @@ typedef struct SCaseWhenNode {
} SCaseWhenNode;
typedef struct SSelectStmt {
ENodeType type; // QUERY_NODE_SELECT_STMT
bool isDistinct;
SNodeList* pProjectionList;
SNode* pFromTable;
SNode* pWhere;
SNodeList* pPartitionByList;
SNodeList* pTags; // for create stream
SNode* pSubtable; // for create stream
SNode* pWindow;
SNodeList* pGroupByList; // SGroupingSetNode
SNode* pHaving;
SNode* pRange;
SNode* pEvery;
SNode* pFill;
SNodeList* pOrderByList; // SOrderByExprNode
SLimitNode* pLimit;
SLimitNode* pSlimit;
STimeWindow timeRange;
char stmtName[TSDB_TABLE_NAME_LEN];
uint8_t precision;
int32_t selectFuncNum;
int32_t returnRows; // EFuncReturnRows
bool isEmptyResult;
bool isTimeLineResult;
bool isSubquery;
bool hasAggFuncs;
bool hasRepeatScanFuncs;
bool hasIndefiniteRowsFunc;
bool hasMultiRowsFunc;
bool hasSelectFunc;
bool hasSelectValFunc;
bool hasOtherVectorFunc;
bool hasUniqueFunc;
bool hasTailFunc;
bool hasInterpFunc;
bool hasInterpPseudoColFunc;
bool hasLastRowFunc;
bool hasLastFunc;
bool hasTimeLineFunc;
bool hasUdaf;
bool hasStateKey;
bool onlyHasKeepOrderFunc;
bool groupSort;
bool tagScan;
ENodeType type; // QUERY_NODE_SELECT_STMT
bool isDistinct;
SNodeList* pProjectionList;
SNode* pFromTable;
SNode* pWhere;
SNodeList* pPartitionByList;
SNodeList* pTags; // for create stream
SNode* pSubtable; // for create stream
SNode* pWindow;
SNodeList* pGroupByList; // SGroupingSetNode
SNode* pHaving;
SNode* pRange;
SNode* pEvery;
SNode* pFill;
SNodeList* pOrderByList; // SOrderByExprNode
SLimitNode* pLimit;
SLimitNode* pSlimit;
STimeWindow timeRange;
char stmtName[TSDB_TABLE_NAME_LEN];
uint8_t precision;
int32_t selectFuncNum;
int32_t returnRows; // EFuncReturnRows
ETimeLineMode timeLineResMode;
bool isEmptyResult;
bool isSubquery;
bool hasAggFuncs;
bool hasRepeatScanFuncs;
bool hasIndefiniteRowsFunc;
bool hasMultiRowsFunc;
bool hasSelectFunc;
bool hasSelectValFunc;
bool hasOtherVectorFunc;
bool hasUniqueFunc;
bool hasTailFunc;
bool hasInterpFunc;
bool hasInterpPseudoColFunc;
bool hasLastRowFunc;
bool hasLastFunc;
bool hasTimeLineFunc;
bool hasUdaf;
bool hasStateKey;
bool onlyHasKeepOrderFunc;
bool groupSort;
bool tagScan;
} SSelectStmt;
typedef enum ESetOperatorType { SET_OP_TYPE_UNION_ALL = 1, SET_OP_TYPE_UNION } ESetOperatorType;
@ -321,6 +329,7 @@ typedef struct SSetOperator {
SNode* pLimit;
char stmtName[TSDB_TABLE_NAME_LEN];
uint8_t precision;
ETimeLineMode timeLineResMode;
} SSetOperator;
typedef enum ESqlClause {
@ -434,7 +443,9 @@ typedef struct SQuery {
EQueryExecStage execStage;
EQueryExecMode execMode;
bool haveResultSet;
SNode* pPrevRoot;
SNode* pRoot;
SNode* pPostRoot;
int32_t numOfResCols;
SSchema* pResSchema;
int8_t precision;

View File

@ -74,6 +74,7 @@ int32_t qAnalyseSqlSemantic(SParseContext* pCxt, const struct SCatalogReq* pCata
const struct SMetaData* pMetaData, SQuery* pQuery);
int32_t qContinueParseSql(SParseContext* pCxt, struct SCatalogReq* pCatalogReq, const struct SMetaData* pMetaData,
SQuery* pQuery);
int32_t qContinueParsePostQuery(SParseContext* pCxt, SQuery* pQuery, void** pResRow);
void qDestroyParseContext(SParseContext* pCxt);

View File

@ -52,6 +52,7 @@ int32_t qCreateQueryPlan(SPlanContext* pCxt, SQueryPlan** pPlan, SArray* pExecNo
// @groupId id of a group of datasource subplans of this @pSubplan
// @pSource one execution location of this group of datasource subplans
int32_t qSetSubplanExecutionNode(SSubplan* pSubplan, int32_t groupId, SDownstreamSourceNode* pSource);
int32_t qContinuePlanPostQuery(void *pPostPlan);
void qClearSubplanExecutionNode(SSubplan* pSubplan);

View File

@ -281,7 +281,8 @@ extern int32_t (*queryProcessMsgRsp[TDMT_MAX])(void* output, char* msg, int32_t
(_code) == TSDB_CODE_PAR_INVALID_DROP_COL || ((_code) == TSDB_CODE_TDB_INVALID_TABLE_ID))
#define NEED_CLIENT_REFRESH_VG_ERROR(_code) \
((_code) == TSDB_CODE_VND_HASH_MISMATCH || (_code) == TSDB_CODE_VND_INVALID_VGROUP_ID)
#define NEED_CLIENT_REFRESH_TBLMETA_ERROR(_code) ((_code) == TSDB_CODE_TDB_INVALID_TABLE_SCHEMA_VER)
#define NEED_CLIENT_REFRESH_TBLMETA_ERROR(_code) \
((_code) == TSDB_CODE_TDB_INVALID_TABLE_SCHEMA_VER || (_code) == TSDB_CODE_MND_INVALID_SCHEMA_VER)
#define NEED_CLIENT_HANDLE_ERROR(_code) \
(NEED_CLIENT_RM_TBLMETA_ERROR(_code) || NEED_CLIENT_REFRESH_VG_ERROR(_code) || \
NEED_CLIENT_REFRESH_TBLMETA_ERROR(_code))

View File

@ -327,6 +327,7 @@ struct SStreamTask {
int64_t checkpointingId;
int32_t checkpointAlignCnt;
struct SStreamMeta* pMeta;
SSHashObj* pNameMap;
};
// meta

View File

@ -154,14 +154,14 @@ typedef struct SSnapshotMeta {
typedef struct SSyncFSM {
void* data;
int32_t (*FpCommitCb)(const struct SSyncFSM* pFsm, SRpcMsg* pMsg, const SFsmCbMeta* pMeta);
int32_t (*FpCommitCb)(const struct SSyncFSM* pFsm, SRpcMsg* pMsg, SFsmCbMeta* pMeta);
SyncIndex (*FpAppliedIndexCb)(const struct SSyncFSM* pFsm);
int32_t (*FpPreCommitCb)(const struct SSyncFSM* pFsm, SRpcMsg* pMsg, const SFsmCbMeta* pMeta);
void (*FpRollBackCb)(const struct SSyncFSM* pFsm, SRpcMsg* pMsg, const SFsmCbMeta* pMeta);
int32_t (*FpPreCommitCb)(const struct SSyncFSM* pFsm, SRpcMsg* pMsg, SFsmCbMeta* pMeta);
void (*FpRollBackCb)(const struct SSyncFSM* pFsm, SRpcMsg* pMsg, SFsmCbMeta* pMeta);
void (*FpRestoreFinishCb)(const struct SSyncFSM* pFsm, const SyncIndex commitIdx);
void (*FpReConfigCb)(const struct SSyncFSM* pFsm, SRpcMsg* pMsg, const SReConfigCbMeta* pMeta);
void (*FpLeaderTransferCb)(const struct SSyncFSM* pFsm, SRpcMsg* pMsg, const SFsmCbMeta* pMeta);
void (*FpReConfigCb)(const struct SSyncFSM* pFsm, SRpcMsg* pMsg, SReConfigCbMeta* pMeta);
void (*FpLeaderTransferCb)(const struct SSyncFSM* pFsm, SRpcMsg* pMsg, SFsmCbMeta* pMeta);
bool (*FpApplyQueueEmptyCb)(const struct SSyncFSM* pFsm);
int32_t (*FpApplyQueueItems)(const struct SSyncFSM* pFsm);

View File

@ -66,8 +66,8 @@ int32_t* taosGetErrno();
#define TSDB_CODE_RPC_BROKEN_LINK TAOS_DEF_ERROR_CODE(0, 0x0018) //
#define TSDB_CODE_RPC_TIMEOUT TAOS_DEF_ERROR_CODE(0, 0x0019) //
#define TSDB_CODE_RPC_SOMENODE_NOT_CONNECTED TAOS_DEF_ERROR_CODE(0, 0x0020) // "Vgroup could not be connected"
#define TSDB_CODE_RPC_SOMENODE_BROKEN_LINK TAOS_DEF_ERROR_CODE(0, 0x0021) //
#define TSDB_CODE_RPC_MAX_SESSIONS TAOS_DEF_ERROR_CODE(0, 0x0022) //
#define TSDB_CODE_RPC_SOMENODE_BROKEN_LINK TAOS_DEF_ERROR_CODE(0, 0x0021) //
#define TSDB_CODE_RPC_MAX_SESSIONS TAOS_DEF_ERROR_CODE(0, 0x0022) //
@ -277,7 +277,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_MND_INVALID_FUNC_COMMENT TAOS_DEF_ERROR_CODE(0, 0x0378)
#define TSDB_CODE_MND_INVALID_FUNC_RETRIEVE TAOS_DEF_ERROR_CODE(0, 0x0379)
// mnode-db
#define TSDB_CODE_MND_DB_NOT_SELECTED TAOS_DEF_ERROR_CODE(0, 0x0380)
@ -288,9 +288,9 @@ int32_t* taosGetErrno();
#define TSDB_CODE_MND_TOO_MANY_DATABASES TAOS_DEF_ERROR_CODE(0, 0x0385)
#define TSDB_CODE_MND_DB_IN_DROPPING TAOS_DEF_ERROR_CODE(0, 0x0386) //
// #define TSDB_CODE_MND_VGROUP_NOT_READY TAOS_DEF_ERROR_CODE(0, 0x0387) // 2.x
#define TSDB_CODE_MND_DB_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x0388) //
#define TSDB_CODE_MND_DB_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x0388) //
#define TSDB_CODE_MND_INVALID_DB_ACCT TAOS_DEF_ERROR_CODE(0, 0x0389) // internal
#define TSDB_CODE_MND_DB_OPTION_UNCHANGED TAOS_DEF_ERROR_CODE(0, 0x038A) //
#define TSDB_CODE_MND_DB_OPTION_UNCHANGED TAOS_DEF_ERROR_CODE(0, 0x038A) //
#define TSDB_CODE_MND_DB_INDEX_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x038B)
#define TSDB_CODE_MND_DB_RETENTION_PERIOD_ZERO TAOS_DEF_ERROR_CODE(0, 0x038C)
// #define TSDB_CODE_MND_INVALID_DB_OPTION_DAYS TAOS_DEF_ERROR_CODE(0, 0x0390) // 2.x
@ -345,7 +345,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_MND_TRANS_CLOG_IS_NULL TAOS_DEF_ERROR_CODE(0, 0x03D4)
#define TSDB_CODE_MND_TRANS_NETWORK_UNAVAILL TAOS_DEF_ERROR_CODE(0, 0x03D5)
#define TSDB_CODE_MND_LAST_TRANS_NOT_FINISHED TAOS_DEF_ERROR_CODE(0, 0x03D6) //internal
#define TSDB_CODE_MND_TRNAS_SYNC_TIMEOUT TAOS_DEF_ERROR_CODE(0, 0x03D7)
#define TSDB_CODE_MND_TRANS_SYNC_TIMEOUT TAOS_DEF_ERROR_CODE(0, 0x03D7)
#define TSDB_CODE_MND_TRANS_UNKNOW_ERROR TAOS_DEF_ERROR_CODE(0, 0x03DF)
// mnode-mq
@ -516,6 +516,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_QRY_JSON_IN_GROUP_ERROR TAOS_DEF_ERROR_CODE(0, 0x072E)
#define TSDB_CODE_QRY_JOB_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x072F)
#define TSDB_CODE_QRY_QWORKER_QUIT TAOS_DEF_ERROR_CODE(0, 0x0730)
#define TSDB_CODE_QRY_GEO_NOT_SUPPORT_ERROR TAOS_DEF_ERROR_CODE(0, 0x0731)
// grant
#define TSDB_CODE_GRANT_EXPIRED TAOS_DEF_ERROR_CODE(0, 0x0800)
@ -780,7 +781,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_TDLITE_IVLD_OPEN_DIR TAOS_DEF_ERROR_CODE(0, 0x5101)
// UTIL
#define TSDB_CODE_UTIL_QUEUE_OUT_OF_MEMORY TAOS_DEF_ERROR_CODE(0, 0x6000)
#define TSDB_CODE_UTIL_QUEUE_OUT_OF_MEMORY TAOS_DEF_ERROR_CODE(0, 0x6000)
#ifdef __cplusplus
}

View File

@ -244,7 +244,7 @@ int32_t taosArraySearchIdx(const SArray* pArray, const void* key, __compar_fn_t
void taosArraySortPWithExt(SArray* pArray, __ext_compar_fn_t fn, const void* param);
int32_t taosEncodeArray(void** buf, const SArray* pArray, FEncode encode);
void* taosDecodeArray(const void* buf, SArray** pArray, FDecode decode, int32_t dataSz, int8_t sver);
void* taosDecodeArray(const void* buf, SArray** pArray, FDecode decode, int32_t dataSz, int8_t sver);
#ifdef __cplusplus
}

View File

@ -79,6 +79,7 @@ int32_t compareDoubleVal(const void *pLeft, const void *pRight);
int32_t compareLenPrefixedStr(const void *pLeft, const void *pRight);
int32_t compareLenPrefixedWStr(const void *pLeft, const void *pRight);
int32_t compareLenBinaryVal(const void *pLeft, const void *pRight);
int32_t comparestrRegexMatch(const void *pLeft, const void *pRight);
int32_t comparestrRegexNMatch(const void *pLeft, const void *pRight);

View File

@ -32,7 +32,7 @@ extern "C" {
#define TD_VER_MAX UINT64_MAX // TODO: use the real max version from query handle
// Bytes for each type.
extern const int32_t TYPE_BYTES[17];
extern const int32_t TYPE_BYTES[21];
// TODO: replace and remove code below
#define CHAR_BYTES sizeof(char)
@ -195,6 +195,7 @@ typedef enum ELogicConditionType {
#define TSDB_TABLE_NAME_LEN 193 // it is a null-terminated string
#define TSDB_TOPIC_NAME_LEN 193 // it is a null-terminated string
#define TSDB_CGROUP_LEN 193 // it is a null-terminated string
#define TSDB_OFFSET_LEN 64 // it is a null-terminated string
#define TSDB_USER_CGROUP_LEN (TSDB_USER_LEN + TSDB_CGROUP_LEN) // it is a null-terminated string
#define TSDB_STREAM_NAME_LEN 193 // it is a null-terminated string
#define TSDB_DB_NAME_LEN 65

View File

@ -17,6 +17,7 @@
#define _TD_UTIL_HEAP_H_
#include "os.h"
#include "tarray.h"
#ifdef __cplusplus
extern "C" {
@ -58,6 +59,48 @@ void heapDequeue(Heap* heap);
size_t heapSize(Heap* heap);
typedef bool (*pq_comp_fn)(void* l, void* r, void* param);
typedef struct PriorityQueueNode {
void* data;
} PriorityQueueNode;
typedef struct PriorityQueue PriorityQueue;
PriorityQueue* createPriorityQueue(pq_comp_fn fn, FDelete deleteFn, void* param);
void taosPQSetFn(PriorityQueue* pq, pq_comp_fn fn);
void destroyPriorityQueue(PriorityQueue* pq);
PriorityQueueNode* taosPQTop(PriorityQueue* pq);
size_t taosPQSize(PriorityQueue* pq);
void taosPQPush(PriorityQueue* pq, const PriorityQueueNode* node);
void taosPQPop(PriorityQueue* pq);
typedef struct BoundedQueue BoundedQueue;
BoundedQueue* createBoundedQueue(uint32_t maxSize, pq_comp_fn fn, FDelete deleteFn, void* param);
void taosBQSetFn(BoundedQueue* q, pq_comp_fn fn);
void destroyBoundedQueue(BoundedQueue* q);
void taosBQPush(BoundedQueue* q, PriorityQueueNode* n);
PriorityQueueNode* taosBQTop(BoundedQueue* q);
size_t taosBQSize(BoundedQueue* q);
size_t taosBQMaxSize(BoundedQueue* q);
void taosBQBuildHeap(BoundedQueue* q);
void taosBQPop(BoundedQueue* q);
#ifdef __cplusplus
}
#endif

View File

@ -63,7 +63,7 @@ typedef struct {
// statistics
int32_t reportCnt;
int32_t connKeyCnt;
int32_t passKeyCnt; // with passVer call back
int8_t connHbFlag; // 0 init, 1 send req, 2 get resp
int64_t reportBytes; // not implemented
int64_t startTime;
// ctl
@ -83,8 +83,9 @@ typedef struct {
int8_t threadStop;
int8_t quitByKill;
TdThread thread;
TdThreadMutex lock; // used when app init and cleanup
TdThreadMutex lock; // used when app init and cleanup
SHashObj* appSummary;
SHashObj* appHbHash; // key: clusterId
SArray* appHbMgrs; // SArray<SAppHbMgr*> one for each cluster
FHbReqHandle reqHandle[CONN_TYPE__MAX];
FHbRspHandle rspHandle[CONN_TYPE__MAX];
@ -146,6 +147,7 @@ typedef struct STscObj {
int64_t id; // ref ID returned by taosAddRef
TdThreadMutex mutex; // used to protect the operation on db
int32_t numOfReqs; // number of sqlObj bound to this connection
int32_t authVer;
SAppInstInfo* pAppInfo;
SHashObj* pRequests;
SPassInfo passInfo;
@ -227,6 +229,12 @@ typedef struct {
STaosxRsp rsp;
} SMqTaosxRspObj;
typedef struct SReqRelInfo {
uint64_t userRefId;
uint64_t prevRefId;
uint64_t nextRefId;
} SReqRelInfo;
typedef struct SRequestObj {
int8_t resType; // query or tmq
uint64_t requestId;
@ -250,10 +258,14 @@ typedef struct SRequestObj {
bool validateOnly; // todo refactor
bool killed;
bool inRetry;
bool isSubReq;
uint32_t prevCode; // previous error code: todo refactor, add update flag for catalog
uint32_t retry;
int64_t allocatorRefId;
SQuery* pQuery;
void* pPostPlan;
SReqRelInfo relation;
void* pWrapper;
} SRequestObj;
typedef struct SSyncQueryParam {
@ -279,6 +291,7 @@ TAOS_RES* taosQueryImplWithReqid(TAOS* taos, const char* sql, bool validateOnly,
void taosAsyncQueryImpl(uint64_t connId, const char* sql, __taos_async_fn_t fp, void* param, bool validateOnly);
void taosAsyncQueryImplWithReqid(uint64_t connId, const char* sql, __taos_async_fn_t fp, void* param, bool validateOnly,
int64_t reqid);
void taosAsyncFetchImpl(SRequestObj *pRequest, __taos_async_fn_t fp, void *param);
int32_t getVersion1BlockMetaSize(const char* p, int32_t numOfCols);
@ -368,6 +381,7 @@ typedef struct SSqlCallbackWrapper {
SParseContext* pParseCtx;
SCatalogReq* pCatalogReq;
SRequestObj* pRequest;
void* pPlanInfo;
} SSqlCallbackWrapper;
SRequestObj* launchQueryImpl(SRequestObj* pRequest, SQuery* pQuery, bool keepQuery, void** res);
@ -382,6 +396,12 @@ int32_t handleCreateTbExecRes(void* res, SCatalog* pCatalog);
bool qnodeRequired(SRequestObj* pRequest);
void continueInsertFromCsv(SSqlCallbackWrapper* pWrapper, SRequestObj* pRequest);
void destorySqlCallbackWrapper(SSqlCallbackWrapper* pWrapper);
void handleQueryAnslyseRes(SSqlCallbackWrapper *pWrapper, SMetaData *pResultMeta, int32_t code);
void restartAsyncQuery(SRequestObj *pRequest, int32_t code);
int32_t buildPreviousRequest(SRequestObj *pRequest, const char* sql, SRequestObj** pNewRequest);
int32_t prepareAndParseSqlSyntax(SSqlCallbackWrapper **ppWrapper, SRequestObj *pRequest, bool updateMetaForce);
void returnToUser(SRequestObj* pRequest);
void stopAllQueries(SRequestObj *pRequest);
#ifdef __cplusplus
}

View File

@ -358,6 +358,49 @@ int32_t releaseRequest(int64_t rid) { return taosReleaseRef(clientReqRefPool, ri
int32_t removeRequest(int64_t rid) { return taosRemoveRef(clientReqRefPool, rid); }
void destroySubRequests(SRequestObj *pRequest) {
int32_t reqIdx = -1;
SRequestObj *pReqList[16] = {NULL};
uint64_t tmpRefId = 0;
if (pRequest->relation.userRefId && pRequest->relation.userRefId != pRequest->self) {
return;
}
SRequestObj* pTmp = pRequest;
while (pTmp->relation.prevRefId) {
tmpRefId = pTmp->relation.prevRefId;
pTmp = acquireRequest(tmpRefId);
if (pTmp) {
pReqList[++reqIdx] = pTmp;
releaseRequest(tmpRefId);
} else {
tscError("0x%" PRIx64 ", prev req ref 0x%" PRIx64 " is not there, reqId:0x%" PRIx64, pTmp->self,
tmpRefId, pTmp->requestId);
break;
}
}
for (int32_t i = reqIdx; i >= 0; i--) {
removeRequest(pReqList[i]->self);
}
tmpRefId = pRequest->relation.nextRefId;
while (tmpRefId) {
pTmp = acquireRequest(tmpRefId);
if (pTmp) {
tmpRefId = pTmp->relation.nextRefId;
removeRequest(pTmp->self);
releaseRequest(pTmp->self);
} else {
tscError("0x%" PRIx64 " is not there", tmpRefId);
break;
}
}
}
void doDestroyRequest(void *p) {
if (NULL == p) {
return;
@ -368,10 +411,14 @@ void doDestroyRequest(void *p) {
uint64_t reqId = pRequest->requestId;
tscTrace("begin to destroy request %" PRIx64 " p:%p", reqId, pRequest);
destroySubRequests(pRequest);
taosHashRemove(pRequest->pTscObj->pRequests, &pRequest->self, sizeof(pRequest->self));
schedulerFreeJob(&pRequest->body.queryJob, 0);
destorySqlCallbackWrapper(pRequest->pWrapper);
taosMemoryFreeClear(pRequest->msgBuf);
taosMemoryFreeClear(pRequest->pDb);
@ -412,6 +459,63 @@ void destroyRequest(SRequestObj *pRequest) {
removeRequest(pRequest->self);
}
void taosStopQueryImpl(SRequestObj *pRequest) {
pRequest->killed = true;
// It is not a query, no need to stop.
if (NULL == pRequest->pQuery || QUERY_EXEC_MODE_SCHEDULE != pRequest->pQuery->execMode) {
tscDebug("request 0x%" PRIx64 " no need to be killed since not query", pRequest->requestId);
return;
}
schedulerFreeJob(&pRequest->body.queryJob, TSDB_CODE_TSC_QUERY_KILLED);
tscDebug("request %" PRIx64 " killed", pRequest->requestId);
}
void stopAllQueries(SRequestObj *pRequest) {
int32_t reqIdx = -1;
SRequestObj *pReqList[16] = {NULL};
uint64_t tmpRefId = 0;
if (pRequest->relation.userRefId && pRequest->relation.userRefId != pRequest->self) {
return;
}
SRequestObj* pTmp = pRequest;
while (pTmp->relation.prevRefId) {
tmpRefId = pTmp->relation.prevRefId;
pTmp = acquireRequest(tmpRefId);
if (pTmp) {
pReqList[++reqIdx] = pTmp;
releaseRequest(tmpRefId);
} else {
tscError("0x%" PRIx64 ", prev req ref 0x%" PRIx64 " is not there, reqId:0x%" PRIx64, pTmp->self,
tmpRefId, pTmp->requestId);
break;
}
}
for (int32_t i = reqIdx; i >= 0; i--) {
taosStopQueryImpl(pReqList[i]);
}
taosStopQueryImpl(pRequest);
tmpRefId = pRequest->relation.nextRefId;
while (tmpRefId) {
pTmp = acquireRequest(tmpRefId);
if (pTmp) {
tmpRefId = pTmp->relation.nextRefId;
taosStopQueryImpl(pTmp);
releaseRequest(pTmp->self);
} else {
tscError("0x%" PRIx64 " is not there", tmpRefId);
break;
}
}
}
void crashReportThreadFuncUnexpectedStopped(void) { atomic_store_32(&clientStop, -1); }
static void *tscCrashReportThreadFp(void *param) {

View File

@ -22,10 +22,10 @@
typedef struct {
union {
struct {
int64_t clusterId;
int32_t passKeyCnt;
int32_t passVer;
int32_t reqCnt;
SAppHbMgr *pAppHbMgr;
int64_t clusterId;
int32_t reqCnt;
int8_t connHbFlag;
};
};
} SHbParam;
@ -34,12 +34,14 @@ static SClientHbMgr clientHbMgr = {0};
static int32_t hbCreateThread();
static void hbStopThread();
static int32_t hbUpdateUserAuthInfo(SAppHbMgr *pAppHbMgr, SUserAuthBatchRsp *batchRsp);
static int32_t hbMqHbReqHandle(SClientHbKey *connKey, void *param, SClientHbReq *req) { return 0; }
static int32_t hbMqHbRspHandle(SAppHbMgr *pAppHbMgr, SClientHbRsp *pRsp) { return 0; }
static int32_t hbProcessUserAuthInfoRsp(void *value, int32_t valueLen, struct SCatalog *pCatalog) {
static int32_t hbProcessUserAuthInfoRsp(void *value, int32_t valueLen, struct SCatalog *pCatalog,
SAppHbMgr *pAppHbMgr) {
int32_t code = 0;
SUserAuthBatchRsp batchRsp = {0};
@ -56,54 +58,68 @@ static int32_t hbProcessUserAuthInfoRsp(void *value, int32_t valueLen, struct SC
catalogUpdateUserAuthInfo(pCatalog, rsp);
}
if (numOfBatchs > 0) hbUpdateUserAuthInfo(pAppHbMgr, &batchRsp);
atomic_val_compare_exchange_8(&pAppHbMgr->connHbFlag, 1, 2);
taosArrayDestroy(batchRsp.pArray);
return TSDB_CODE_SUCCESS;
}
static int32_t hbProcessUserPassInfoRsp(void *value, int32_t valueLen, SClientHbKey *connKey, SAppHbMgr *pAppHbMgr) {
int32_t code = 0;
int32_t numOfBatchs = 0;
SUserPassBatchRsp batchRsp = {0};
if (tDeserializeSUserPassBatchRsp(value, valueLen, &batchRsp) != 0) {
code = TSDB_CODE_INVALID_MSG;
return code;
}
numOfBatchs = taosArrayGetSize(batchRsp.pArray);
SClientHbReq *pReq = NULL;
while ((pReq = taosHashIterate(pAppHbMgr->activeInfo, pReq))) {
STscObj *pTscObj = (STscObj *)acquireTscObj(pReq->connKey.tscRid);
if (!pTscObj) {
continue;
}
SPassInfo *passInfo = &pTscObj->passInfo;
if (!passInfo->fp) {
releaseTscObj(pReq->connKey.tscRid);
static int32_t hbUpdateUserAuthInfo(SAppHbMgr *pAppHbMgr, SUserAuthBatchRsp *batchRsp) {
uint64_t clusterId = pAppHbMgr->pAppInstInfo->clusterId;
for (int i = 0; i < TARRAY_SIZE(clientHbMgr.appHbMgrs); ++i) {
SAppHbMgr *hbMgr = taosArrayGetP(clientHbMgr.appHbMgrs, i);
if (!hbMgr || hbMgr->pAppInstInfo->clusterId != clusterId) {
continue;
}
for (int32_t i = 0; i < numOfBatchs; ++i) {
SGetUserPassRsp *rsp = taosArrayGet(batchRsp.pArray, i);
if (0 == strncmp(rsp->user, pTscObj->user, TSDB_USER_LEN)) {
int32_t oldVer = atomic_load_32(&passInfo->ver);
if (oldVer < rsp->version) {
atomic_store_32(&passInfo->ver, rsp->version);
if (passInfo->fp) {
(*passInfo->fp)(passInfo->param, &passInfo->ver, TAOS_NOTIFY_PASSVER);
SClientHbReq *pReq = NULL;
SGetUserAuthRsp *pRsp = NULL;
while ((pReq = taosHashIterate(hbMgr->activeInfo, pReq))) {
STscObj *pTscObj = (STscObj *)acquireTscObj(pReq->connKey.tscRid);
if (!pTscObj) {
continue;
}
if (!pRsp) {
for (int32_t j = 0; j < TARRAY_SIZE(batchRsp->pArray); ++j) {
SGetUserAuthRsp *rsp = TARRAY_GET_ELEM(batchRsp->pArray, j);
if (0 == strncmp(rsp->user, pTscObj->user, TSDB_USER_LEN)) {
pRsp = rsp;
break;
}
tscDebug("update passVer of user %s from %d to %d, tscRid:%" PRIi64, rsp->user, oldVer,
}
if (!pRsp) {
releaseTscObj(pReq->connKey.tscRid);
break;
}
}
pTscObj->authVer = pRsp->version;
if (pTscObj->sysInfo != pRsp->sysInfo) {
tscDebug("update sysInfo of user %s from %" PRIi8 " to %" PRIi8 ", tscRid:%" PRIi64, pRsp->user,
pTscObj->sysInfo, pRsp->sysInfo, pTscObj->id);
pTscObj->sysInfo = pRsp->sysInfo;
}
if (pTscObj->passInfo.fp) {
SPassInfo *passInfo = &pTscObj->passInfo;
int32_t oldVer = atomic_load_32(&passInfo->ver);
if (oldVer < pRsp->passVer) {
atomic_store_32(&passInfo->ver, pRsp->passVer);
if (passInfo->fp) {
(*passInfo->fp)(passInfo->param, &pRsp->passVer, TAOS_NOTIFY_PASSVER);
}
tscDebug("update passVer of user %s from %d to %d, tscRid:%" PRIi64, pRsp->user, oldVer,
atomic_load_32(&passInfo->ver), pTscObj->id);
}
break;
}
releaseTscObj(pReq->connKey.tscRid);
}
releaseTscObj(pReq->connKey.tscRid);
}
taosArrayDestroy(batchRsp.pArray);
return code;
return 0;
}
static int32_t hbGenerateVgInfoFromRsp(SDBVgInfo **pInfo, SUseDbRsp *rsp) {
@ -316,7 +332,7 @@ static int32_t hbQueryHbRspHandle(SAppHbMgr *pAppHbMgr, SClientHbRsp *pRsp) {
break;
}
hbProcessUserAuthInfoRsp(kv->value, kv->valueLen, pCatalog);
hbProcessUserAuthInfoRsp(kv->value, kv->valueLen, pCatalog, pAppHbMgr);
break;
}
case HEARTBEAT_KEY_DBINFO: {
@ -353,15 +369,6 @@ static int32_t hbQueryHbRspHandle(SAppHbMgr *pAppHbMgr, SClientHbRsp *pRsp) {
hbProcessStbInfoRsp(kv->value, kv->valueLen, pCatalog);
break;
}
case HEARTBEAT_KEY_USER_PASSINFO: {
if (kv->valueLen <= 0 || NULL == kv->value) {
tscError("invalid hb user pass info, len:%d, value:%p", kv->valueLen, kv->value);
break;
}
hbProcessUserPassInfoRsp(kv->value, kv->valueLen, &pRsp->connKey, pAppHbMgr);
break;
}
default:
tscError("invalid hb key type:%d", kv->key);
break;
@ -464,6 +471,7 @@ int32_t hbBuildQueryDesc(SQueryHbReqBasic *hbBasic, STscObj *pObj) {
desc.useconds = now - pRequest->metric.start;
desc.reqRid = pRequest->self;
desc.stableQuery = pRequest->stableQuery;
desc.isSubQuery = pRequest->isSubReq;
taosGetFqdn(desc.fqdn);
desc.subPlanNum = pRequest->body.subplanNum;
@ -542,7 +550,7 @@ int32_t hbGetQueryBasicInfo(SClientHbKey *connKey, SClientHbReq *req) {
return TSDB_CODE_SUCCESS;
}
static int32_t hbGetUserBasicInfo(SClientHbKey *connKey, SHbParam *param, SClientHbReq *req) {
static int32_t hbGetUserAuthInfo(SClientHbKey *connKey, SHbParam *param, SClientHbReq *req) {
STscObj *pTscObj = (STscObj *)acquireTscObj(connKey->tscRid);
if (!pTscObj) {
tscWarn("tscObj rid %" PRIx64 " not exist", connKey->tscRid);
@ -551,46 +559,61 @@ static int32_t hbGetUserBasicInfo(SClientHbKey *connKey, SHbParam *param, SClien
int32_t code = 0;
if (param && (param->passVer != INT32_MIN) && (param->passVer <= pTscObj->passInfo.ver)) {
tscDebug("hb got user basic info, no need since passVer %d <= %d", param->passVer, pTscObj->passInfo.ver);
SKv kv = {.key = HEARTBEAT_KEY_USER_AUTHINFO};
SKv *pKv = NULL;
if ((pKv = taosHashGet(req->info, &kv.key, sizeof(kv.key)))) {
int32_t userNum = pKv->valueLen / sizeof(SUserAuthVersion);
SUserAuthVersion *userAuths = (SUserAuthVersion *)pKv->value;
for (int32_t i = 0; i < userNum; ++i) {
SUserAuthVersion *pUserAuth = userAuths + i;
// both key and user exist, update version
if (strncmp(pUserAuth->user, pTscObj->user, TSDB_USER_LEN) == 0) {
pUserAuth->version = htonl(-1); // force get userAuthInfo
goto _return;
}
}
// key exists, user not exist, append user
SUserAuthVersion *qUserAuth =
(SUserAuthVersion *)taosMemoryRealloc(pKv->value, (userNum + 1) * sizeof(SUserAuthVersion));
if (qUserAuth) {
strncpy((qUserAuth + userNum)->user, pTscObj->user, TSDB_USER_LEN);
(qUserAuth + userNum)->version = htonl(-1); // force get userAuthInfo
pKv->value = qUserAuth;
pKv->valueLen += sizeof(SUserAuthVersion);
} else {
code = TSDB_CODE_OUT_OF_MEMORY;
}
goto _return;
}
SUserPassVersion *user = taosMemoryMalloc(sizeof(SUserPassVersion));
// key/user not exist, add user
SUserAuthVersion *user = taosMemoryMalloc(sizeof(SUserAuthVersion));
if (!user) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _return;
}
strncpy(user->user, pTscObj->user, TSDB_USER_LEN);
user->version = htonl(pTscObj->passInfo.ver);
user->version = htonl(-1); // force get userAuthInfo
kv.valueLen = sizeof(SUserAuthVersion);
kv.value = user;
SKv kv = {
.key = HEARTBEAT_KEY_USER_PASSINFO,
.valueLen = sizeof(SUserPassVersion),
.value = user,
};
tscDebug("hb got user basic info, valueLen:%d, user:%s, passVer:%d, tscRid:%" PRIi64, kv.valueLen, user->user,
pTscObj->passInfo.ver, connKey->tscRid);
tscDebug("hb got user auth info, valueLen:%d, user:%s, authVer:%d, tscRid:%" PRIi64, kv.valueLen, user->user,
pTscObj->authVer, connKey->tscRid);
if (!req->info) {
req->info = taosHashInit(64, hbKeyHashFunc, 1, HASH_ENTRY_LOCK);
}
if (taosHashPut(req->info, &kv.key, sizeof(kv.key), &kv, sizeof(kv)) < 0) {
taosMemoryFree(user);
code = terrno ? terrno : TSDB_CODE_APP_ERROR;
goto _return;
}
// assign the passVer
if (param) {
param->passVer = pTscObj->passInfo.ver;
}
_return:
releaseTscObj(connKey->tscRid);
if (code) {
tscError("hb got user basic info failed since %s", terrstr(code));
tscError("hb got user auth info failed since %s", terrstr(code));
}
return code;
@ -748,14 +771,21 @@ int32_t hbQueryHbReqHandle(SClientHbKey *connKey, void *param, SClientHbReq *req
hbGetQueryBasicInfo(connKey, req);
if (hbParam->passKeyCnt > 0) {
hbGetUserBasicInfo(connKey, hbParam, req);
}
if (hbParam->reqCnt == 0) {
code = hbGetExpiredUserInfo(connKey, pCatalog, req);
if (TSDB_CODE_SUCCESS != code) {
return code;
if (!taosHashGet(clientHbMgr.appHbHash, &hbParam->clusterId, sizeof(hbParam->clusterId))) {
code = hbGetExpiredUserInfo(connKey, pCatalog, req);
if (TSDB_CODE_SUCCESS != code) {
return code;
}
}
// invoke after hbGetExpiredUserInfo
if (2 != atomic_load_8(&hbParam->pAppHbMgr->connHbFlag)) {
code = hbGetUserAuthInfo(connKey, hbParam, req);
if (TSDB_CODE_SUCCESS != code) {
return code;
}
atomic_store_8(&hbParam->pAppHbMgr->connHbFlag, 1);
}
code = hbGetExpiredDBInfo(connKey, pCatalog, req);
@ -769,7 +799,7 @@ int32_t hbQueryHbReqHandle(SClientHbKey *connKey, void *param, SClientHbReq *req
}
}
++hbParam->reqCnt; // success to get catalog info
++hbParam->reqCnt; // success to get catalog info
return TSDB_CODE_SUCCESS;
}
@ -814,9 +844,9 @@ SClientHbBatchReq *hbGatherAllInfo(SAppHbMgr *pAppHbMgr) {
if (param.clusterId == 0) {
// init
param.clusterId = pOneReq->clusterId;
param.passVer = INT32_MIN;
param.pAppHbMgr = pAppHbMgr;
param.connHbFlag = atomic_load_8(&pAppHbMgr->connHbFlag);
}
param.passKeyCnt = atomic_load_32(&pAppHbMgr->passKeyCnt);
break;
}
default:
@ -900,6 +930,10 @@ static void *hbThreadFunc(void *param) {
int sz = taosArrayGetSize(clientHbMgr.appHbMgrs);
if (sz > 0) {
hbGatherAppInfo();
if (sz > 1 && !clientHbMgr.appHbHash) {
clientHbMgr.appHbHash = taosHashInit(0, taosGetDefaultHashFunction(TSDB_DATA_TYPE_UBIGINT), false, HASH_NO_LOCK);
}
taosHashClear(clientHbMgr.appHbHash);
}
for (int i = 0; i < sz; i++) {
@ -952,7 +986,7 @@ static void *hbThreadFunc(void *param) {
asyncSendMsgToServer(pAppInstInfo->pTransporter, &epSet, &transporterId, pInfo);
tFreeClientHbBatchReq(pReq);
// hbClearReqInfo(pAppHbMgr);
taosHashPut(clientHbMgr.appHbHash, &pAppHbMgr->pAppInstInfo->clusterId, sizeof(uint64_t), NULL, 0);
atomic_add_fetch_32(&pAppHbMgr->reportCnt, 1);
}
@ -960,6 +994,7 @@ static void *hbThreadFunc(void *param) {
taosMsleep(HEARTBEAT_INTERVAL);
}
taosHashCleanup(clientHbMgr.appHbHash);
return NULL;
}
@ -1008,7 +1043,7 @@ SAppHbMgr *appHbMgrInit(SAppInstInfo *pAppInstInfo, char *key) {
// init stat
pAppHbMgr->startTime = taosGetTimestampMs();
pAppHbMgr->connKeyCnt = 0;
pAppHbMgr->passKeyCnt = 0;
pAppHbMgr->connHbFlag = 0;
pAppHbMgr->reportCnt = 0;
pAppHbMgr->reportBytes = 0;
pAppHbMgr->key = taosStrdup(key);
@ -1126,7 +1161,6 @@ void hbMgrCleanUp() {
appHbMgrCleanup();
taosArrayDestroy(clientHbMgr.appHbMgrs);
taosThreadMutexUnlock(&clientHbMgr.lock);
clientHbMgr.appHbMgrs = NULL;
}
@ -1179,12 +1213,6 @@ void hbDeregisterConn(STscObj *pTscObj, SClientHbKey connKey) {
}
atomic_sub_fetch_32(&pAppHbMgr->connKeyCnt, 1);
taosThreadMutexLock(&pTscObj->mutex);
if (pTscObj->passInfo.fp) {
atomic_sub_fetch_32(&pAppHbMgr->passKeyCnt, 1);
}
taosThreadMutexUnlock(&pTscObj->mutex);
}
// set heart beat thread quit mode , if quicByKill 1 then kill thread else quit from inner

View File

@ -237,6 +237,17 @@ int32_t buildRequest(uint64_t connId, const char* sql, int sqlLen, void* param,
return TSDB_CODE_SUCCESS;
}
int32_t buildPreviousRequest(SRequestObj *pRequest, const char* sql, SRequestObj** pNewRequest) {
int32_t code = buildRequest(pRequest->pTscObj->id, sql, strlen(sql), pRequest, pRequest->validateOnly, pNewRequest, 0);
if (TSDB_CODE_SUCCESS == code) {
pRequest->relation.prevRefId = (*pNewRequest)->self;
(*pNewRequest)->relation.nextRefId = pRequest->self;
(*pNewRequest)->relation.userRefId = pRequest->self;
(*pNewRequest)->isSubReq = true;
}
return code;
}
int32_t parseSql(SRequestObj* pRequest, bool topicQuery, SQuery** pQuery, SStmtCallback* pStmtCb) {
STscObj* pTscObj = pRequest->pTscObj;
@ -878,6 +889,81 @@ static bool incompletaFileParsing(SNode* pStmt) {
return QUERY_NODE_VNODE_MODIFY_STMT != nodeType(pStmt) ? false : ((SVnodeModifyOpStmt*)pStmt)->fileProcessing;
}
void continuePostSubQuery(SRequestObj* pRequest, TAOS_ROW row) {
SSqlCallbackWrapper* pWrapper = pRequest->pWrapper;
int32_t code = nodesAcquireAllocator(pWrapper->pParseCtx->allocatorId);
if (TSDB_CODE_SUCCESS == code) {
int64_t analyseStart = taosGetTimestampUs();
code = qContinueParsePostQuery(pWrapper->pParseCtx, pRequest->pQuery, (void**)row);
pRequest->metric.analyseCostUs += taosGetTimestampUs() - analyseStart;
}
if (TSDB_CODE_SUCCESS == code) {
code = qContinuePlanPostQuery(pRequest->pPostPlan);
}
nodesReleaseAllocator(pWrapper->pParseCtx->allocatorId);
handleQueryAnslyseRes(pWrapper, NULL, code);
}
void returnToUser(SRequestObj* pRequest) {
if (pRequest->relation.userRefId == pRequest->self || 0 == pRequest->relation.userRefId) {
// return to client
pRequest->body.queryFp(pRequest->body.param, pRequest, pRequest->code);
return;
}
SRequestObj* pUserReq = acquireRequest(pRequest->relation.userRefId);
if (pUserReq) {
pUserReq->code = pRequest->code;
// return to client
pUserReq->body.queryFp(pUserReq->body.param, pUserReq, pUserReq->code);
releaseRequest(pRequest->relation.userRefId);
return;
} else {
tscError("0x%" PRIx64 ", user ref 0x%" PRIx64 " is not there, reqId:0x%" PRIx64, pRequest->self,
pRequest->relation.userRefId, pRequest->requestId);
}
}
void postSubQueryFetchCb(void* param, TAOS_RES* res, int32_t rowNum) {
SRequestObj* pRequest = (SRequestObj*)res;
if (pRequest->code) {
returnToUser(pRequest);
return;
}
TAOS_ROW row = NULL;
if (rowNum > 0) {
row = taos_fetch_row(res); // for single row only now
}
SRequestObj* pNextReq = acquireRequest(pRequest->relation.nextRefId);
if (pNextReq) {
continuePostSubQuery(pNextReq, row);
releaseRequest(pRequest->relation.nextRefId);
} else {
tscError("0x%" PRIx64 ", next req ref 0x%" PRIx64 " is not there, reqId:0x%" PRIx64, pRequest->self,
pRequest->relation.nextRefId, pRequest->requestId);
}
}
void handlePostSubQuery(SSqlCallbackWrapper* pWrapper) {
SRequestObj* pRequest = pWrapper->pRequest;
if (TD_RES_QUERY(pRequest)) {
taosAsyncFetchImpl(pRequest, postSubQueryFetchCb, pWrapper);
return;
}
SRequestObj* pNextReq = acquireRequest(pRequest->relation.nextRefId);
if (pNextReq) {
continuePostSubQuery(pNextReq, NULL);
releaseRequest(pRequest->relation.nextRefId);
} else {
tscError("0x%" PRIx64 ", next req ref 0x%" PRIx64 " is not there, reqId:0x%" PRIx64, pRequest->self,
pRequest->relation.nextRefId, pRequest->requestId);
}
}
// todo refacto the error code mgmt
void schedulerExecCb(SExecResult* pResult, void* param, int32_t code) {
SSqlCallbackWrapper* pWrapper = param;
@ -912,12 +998,7 @@ void schedulerExecCb(SExecResult* pResult, void* param, int32_t code) {
if (code != TSDB_CODE_SUCCESS && NEED_CLIENT_HANDLE_ERROR(code) && pRequest->sqlstr != NULL) {
tscDebug("0x%" PRIx64 " client retry to handle the error, code:%s, tryCount:%d, reqId:0x%" PRIx64, pRequest->self,
tstrerror(code), pRequest->retry, pRequest->requestId);
pRequest->prevCode = code;
schedulerFreeJob(&pRequest->body.queryJob, 0);
qDestroyQuery(pRequest->pQuery);
pRequest->pQuery = NULL;
destorySqlCallbackWrapper(pWrapper);
doAsyncQuery(pRequest, true);
restartAsyncQuery(pRequest, code);
return;
}
@ -938,10 +1019,15 @@ void schedulerExecCb(SExecResult* pResult, void* param, int32_t code) {
return;
}
destorySqlCallbackWrapper(pWrapper);
if (pRequest->relation.nextRefId) {
handlePostSubQuery(pWrapper);
} else {
destorySqlCallbackWrapper(pWrapper);
pRequest->pWrapper = NULL;
// return to client
pRequest->body.queryFp(pRequest->body.param, pRequest, code);
// return to client
pRequest->body.queryFp(pRequest->body.param, pRequest, code);
}
}
SRequestObj* launchQueryImpl(SRequestObj* pRequest, SQuery* pQuery, bool keepQuery, void** res) {
@ -1049,6 +1135,7 @@ static int32_t asyncExecSchQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaDat
pRequest->requestId);
} else {
pRequest->body.subplanNum = pDag->numOfSubplans;
TSWAP(pRequest->pPostPlan, pDag->pPostPlan);
}
pRequest->metric.execStart = taosGetTimestampUs();
@ -1084,6 +1171,7 @@ static int32_t asyncExecSchQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaDat
tscDebug("0x%" PRIx64 " plan not executed, code:%s 0x%" PRIx64, pRequest->self, tstrerror(code),
pRequest->requestId);
destorySqlCallbackWrapper(pWrapper);
pRequest->pWrapper = NULL;
if (TSDB_CODE_SUCCESS != code) {
pRequest->code = terrno;
}
@ -1103,6 +1191,7 @@ void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaData* pResultM
pRequest->body.execMode = pQuery->execMode;
if (QUERY_EXEC_MODE_SCHEDULE != pRequest->body.execMode) {
destorySqlCallbackWrapper(pWrapper);
pRequest->pWrapper = NULL;
}
if (pQuery->pRoot && !pRequest->inRetry) {
@ -2404,3 +2493,90 @@ TAOS_RES* taosQueryImplWithReqid(TAOS* taos, const char* sql, bool validateOnly,
return pRequest;
}
static void fetchCallback(void *pResult, void *param, int32_t code) {
SRequestObj *pRequest = (SRequestObj *)param;
SReqResultInfo *pResultInfo = &pRequest->body.resInfo;
tscDebug("0x%" PRIx64 " enter scheduler fetch cb, code:%d - %s, reqId:0x%" PRIx64, pRequest->self, code,
tstrerror(code), pRequest->requestId);
pResultInfo->pData = pResult;
pResultInfo->numOfRows = 0;
if (code != TSDB_CODE_SUCCESS) {
pRequest->code = code;
taosMemoryFreeClear(pResultInfo->pData);
pRequest->body.fetchFp(pRequest->body.param, pRequest, 0);
return;
}
if (pRequest->code != TSDB_CODE_SUCCESS) {
taosMemoryFreeClear(pResultInfo->pData);
pRequest->body.fetchFp(pRequest->body.param, pRequest, 0);
return;
}
pRequest->code =
setQueryResultFromRsp(pResultInfo, (const SRetrieveTableRsp *)pResultInfo->pData, pResultInfo->convertUcs4, true);
if (pRequest->code != TSDB_CODE_SUCCESS) {
pResultInfo->numOfRows = 0;
pRequest->code = code;
tscError("0x%" PRIx64 " fetch results failed, code:%s, reqId:0x%" PRIx64, pRequest->self, tstrerror(code),
pRequest->requestId);
} else {
tscDebug("0x%" PRIx64 " fetch results, numOfRows:%" PRId64 " total Rows:%" PRId64 ", complete:%d, reqId:0x%" PRIx64,
pRequest->self, pResultInfo->numOfRows, pResultInfo->totalRows, pResultInfo->completed,
pRequest->requestId);
STscObj *pTscObj = pRequest->pTscObj;
SAppClusterSummary *pActivity = &pTscObj->pAppInfo->summary;
atomic_add_fetch_64((int64_t *)&pActivity->fetchBytes, pRequest->body.resInfo.payloadLen);
}
pRequest->body.fetchFp(pRequest->body.param, pRequest, pResultInfo->numOfRows);
}
void taosAsyncFetchImpl(SRequestObj *pRequest, __taos_async_fn_t fp, void *param) {
pRequest->body.fetchFp = fp;
pRequest->body.param = param;
SReqResultInfo *pResultInfo = &pRequest->body.resInfo;
// this query has no results or error exists, return directly
if (taos_num_fields(pRequest) == 0 || pRequest->code != TSDB_CODE_SUCCESS) {
pResultInfo->numOfRows = 0;
pRequest->body.fetchFp(param, pRequest, pResultInfo->numOfRows);
return;
}
// all data has returned to App already, no need to try again
if (pResultInfo->completed) {
// it is a local executed query, no need to do async fetch
if (QUERY_EXEC_MODE_SCHEDULE != pRequest->body.execMode) {
if (pResultInfo->localResultFetched) {
pResultInfo->numOfRows = 0;
pResultInfo->current = 0;
} else {
pResultInfo->localResultFetched = true;
}
} else {
pResultInfo->numOfRows = 0;
}
pRequest->body.fetchFp(param, pRequest, pResultInfo->numOfRows);
return;
}
SSchedulerReq req = {
.syncReq = false,
.fetchFp = fetchCallback,
.cbParam = pRequest,
};
schedulerFetchRows(pRequest->body.queryJob, &req);
}

View File

@ -135,11 +135,6 @@ int taos_set_notify_cb(TAOS *taos, __taos_notify_fn_t fp, void *param, int type)
switch (type) {
case TAOS_NOTIFY_PASSVER: {
taosThreadMutexLock(&pObj->mutex);
if (fp && !pObj->passInfo.fp) {
atomic_add_fetch_32(&pObj->pAppInfo->pAppHbMgr->passKeyCnt, 1);
} else if (!fp && pObj->passInfo.fp) {
atomic_sub_fetch_32(&pObj->pAppInfo->pAppHbMgr->passKeyCnt, 1);
}
pObj->passInfo.fp = fp;
pObj->passInfo.param = param;
taosThreadMutexUnlock(&pObj->mutex);
@ -563,22 +558,13 @@ int taos_select_db(TAOS *taos, const char *db) {
return code;
}
void taos_stop_query(TAOS_RES *res) {
if (res == NULL || TD_RES_TMQ(res) || TD_RES_TMQ_META(res) || TD_RES_TMQ_METADATA(res)) {
return;
}
SRequestObj *pRequest = (SRequestObj *)res;
pRequest->killed = true;
// It is not a query, no need to stop.
if (NULL == pRequest->pQuery || QUERY_EXEC_MODE_SCHEDULE != pRequest->pQuery->execMode) {
tscDebug("request 0x%" PRIx64 " no need to be killed since not query", pRequest->requestId);
return;
}
schedulerFreeJob(&pRequest->body.queryJob, TSDB_CODE_TSC_QUERY_KILLED);
tscDebug("request %" PRIx64 " killed", pRequest->requestId);
stopAllQueries((SRequestObj*)res);
}
bool taos_is_null(TAOS_RES *res, int32_t row, int32_t col) {
@ -774,8 +760,13 @@ static void destoryCatalogReq(SCatalogReq *pCatalogReq) {
taosArrayDestroy(pCatalogReq->pDbVgroup);
taosArrayDestroy(pCatalogReq->pDbCfg);
taosArrayDestroy(pCatalogReq->pDbInfo);
taosArrayDestroyEx(pCatalogReq->pTableMeta, destoryTablesReq);
taosArrayDestroyEx(pCatalogReq->pTableHash, destoryTablesReq);
if (pCatalogReq->cloned) {
taosArrayDestroy(pCatalogReq->pTableMeta);
taosArrayDestroy(pCatalogReq->pTableHash);
} else {
taosArrayDestroyEx(pCatalogReq->pTableMeta, destoryTablesReq);
taosArrayDestroyEx(pCatalogReq->pTableHash, destoryTablesReq);
}
taosArrayDestroy(pCatalogReq->pUdf);
taosArrayDestroy(pCatalogReq->pIndex);
taosArrayDestroy(pCatalogReq->pUser);
@ -794,26 +785,108 @@ void destorySqlCallbackWrapper(SSqlCallbackWrapper *pWrapper) {
taosMemoryFree(pWrapper);
}
void destroyCtxInRequest(SRequestObj* pRequest) {
schedulerFreeJob(&pRequest->body.queryJob, 0);
qDestroyQuery(pRequest->pQuery);
pRequest->pQuery = NULL;
destorySqlCallbackWrapper(pRequest->pWrapper);
pRequest->pWrapper = NULL;
}
static void doAsyncQueryFromAnalyse(SMetaData *pResultMeta, void *param, int32_t code) {
SSqlCallbackWrapper *pWrapper = (SSqlCallbackWrapper *)param;
SRequestObj *pRequest = pWrapper->pRequest;
SQuery *pQuery = pRequest->pQuery;
int64_t analyseStart = taosGetTimestampUs();
pRequest->metric.ctgCostUs = analyseStart - pRequest->metric.ctgStart;
qDebug("0x%" PRIx64 " start to semantic analysis, reqId:0x%" PRIx64, pRequest->self, pRequest->requestId);
if (code == TSDB_CODE_SUCCESS) {
int64_t analyseStart = taosGetTimestampUs();
pRequest->metric.ctgCostUs = analyseStart - pRequest->metric.ctgStart;
if (TSDB_CODE_SUCCESS == code) {
code = qAnalyseSqlSemantic(pWrapper->pParseCtx, pWrapper->pCatalogReq, pResultMeta, pQuery);
}
pRequest->metric.analyseCostUs += taosGetTimestampUs() - analyseStart;
handleQueryAnslyseRes(pWrapper, pResultMeta, code);
}
int32_t cloneCatalogReq(SCatalogReq* * ppTarget, SCatalogReq* pSrc) {
int32_t code = TSDB_CODE_SUCCESS;
SCatalogReq* pTarget = taosMemoryCalloc(1, sizeof(SCatalogReq));
if (pTarget == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
} else {
pTarget->pDbVgroup = taosArrayDup(pSrc->pDbVgroup, NULL);
pTarget->pDbCfg = taosArrayDup(pSrc->pDbCfg, NULL);
pTarget->pDbInfo = taosArrayDup(pSrc->pDbInfo, NULL);
pTarget->pTableMeta = taosArrayDup(pSrc->pTableMeta, NULL);
pTarget->pTableHash = taosArrayDup(pSrc->pTableHash, NULL);
pTarget->pUdf = taosArrayDup(pSrc->pUdf, NULL);
pTarget->pIndex = taosArrayDup(pSrc->pIndex, NULL);
pTarget->pUser = taosArrayDup(pSrc->pUser, NULL);
pTarget->pTableIndex = taosArrayDup(pSrc->pTableIndex, NULL);
pTarget->pTableCfg = taosArrayDup(pSrc->pTableCfg, NULL);
pTarget->pTableTag = taosArrayDup(pSrc->pTableTag, NULL);
pTarget->qNodeRequired = pSrc->qNodeRequired;
pTarget->dNodeRequired = pSrc->dNodeRequired;
pTarget->svrVerRequired = pSrc->svrVerRequired;
pTarget->forceUpdate = pSrc->forceUpdate;
pTarget->cloned = true;
*ppTarget = pTarget;
}
return code;
}
void handleSubQueryFromAnalyse(SSqlCallbackWrapper *pWrapper, SMetaData *pResultMeta, SNode* pRoot) {
SRequestObj* pNewRequest = NULL;
SSqlCallbackWrapper* pNewWrapper = NULL;
int32_t code = buildPreviousRequest(pWrapper->pRequest, pWrapper->pRequest->sqlstr, &pNewRequest);
if (code) {
handleQueryAnslyseRes(pWrapper, pResultMeta, code);
return;
}
pNewRequest->pQuery = (SQuery*)nodesMakeNode(QUERY_NODE_QUERY);
if (NULL == pNewRequest->pQuery) {
code = TSDB_CODE_OUT_OF_MEMORY;
} else {
pNewRequest->pQuery->pRoot = pRoot;
pRoot = NULL;
pNewRequest->pQuery->execStage = QUERY_EXEC_STAGE_ANALYSE;
}
if (TSDB_CODE_SUCCESS == code) {
code = prepareAndParseSqlSyntax(&pNewWrapper, pNewRequest, false);
}
if (TSDB_CODE_SUCCESS == code) {
code = cloneCatalogReq(&pNewWrapper->pCatalogReq, pWrapper->pCatalogReq);
}
doAsyncQueryFromAnalyse(pResultMeta, pNewWrapper, code);
nodesDestroyNode(pRoot);
}
void handleQueryAnslyseRes(SSqlCallbackWrapper *pWrapper, SMetaData *pResultMeta, int32_t code) {
SRequestObj *pRequest = pWrapper->pRequest;
SQuery *pQuery = pRequest->pQuery;
if (code == TSDB_CODE_SUCCESS && pQuery->pPrevRoot) {
SNode* prevRoot = pQuery->pPrevRoot;
pQuery->pPrevRoot = NULL;
handleSubQueryFromAnalyse(pWrapper, pResultMeta, prevRoot);
return;
}
if (code == TSDB_CODE_SUCCESS) {
pRequest->stableQuery = pQuery->stableQuery;
if (pQuery->pRoot) {
pRequest->stmtType = pQuery->pRoot->type;
}
}
pRequest->metric.analyseCostUs = taosGetTimestampUs() - analyseStart;
if (code == TSDB_CODE_SUCCESS) {
if (pQuery->haveResultSet) {
setResSchemaInfo(&pRequest->body.resInfo, pQuery->pResSchema, pQuery->numOfResCols);
setResPrecision(&pRequest->body.resInfo, pQuery->precision);
@ -826,14 +899,14 @@ static void doAsyncQueryFromAnalyse(SMetaData *pResultMeta, void *param, int32_t
launchAsyncQuery(pRequest, pQuery, pResultMeta, pWrapper);
} else {
destorySqlCallbackWrapper(pWrapper);
pRequest->pWrapper = NULL;
qDestroyQuery(pRequest->pQuery);
pRequest->pQuery = NULL;
if (NEED_CLIENT_HANDLE_ERROR(code)) {
tscDebug("0x%" PRIx64 " client retry to handle the error, code:%d - %s, tryCount:%d, reqId:0x%" PRIx64,
pRequest->self, code, tstrerror(code), pRequest->retry, pRequest->requestId);
pRequest->prevCode = code;
doAsyncQuery(pRequest, true);
restartAsyncQuery(pRequest, code);
return;
}
@ -841,7 +914,7 @@ static void doAsyncQueryFromAnalyse(SMetaData *pResultMeta, void *param, int32_t
tscError("0x%" PRIx64 " error occurs, code:%s, return to user app, reqId:0x%" PRIx64, pRequest->self,
tstrerror(code), pRequest->requestId);
pRequest->code = code;
pRequest->body.queryFp(pRequest->body.param, pRequest, code);
returnToUser(pRequest);
}
}
@ -904,6 +977,7 @@ static void doAsyncQueryFromParse(SMetaData *pResultMeta, void *param, int32_t c
tscError("0x%" PRIx64 " error happens, code:%d - %s, reqId:0x%" PRIx64, pWrapper->pRequest->self, code,
tstrerror(code), pWrapper->pRequest->requestId);
destorySqlCallbackWrapper(pWrapper);
pRequest->pWrapper = NULL;
terrno = code;
pRequest->code = code;
pRequest->body.queryFp(pRequest->body.param, pRequest, code);
@ -920,6 +994,7 @@ void continueInsertFromCsv(SSqlCallbackWrapper *pWrapper, SRequestObj *pRequest)
tscError("0x%" PRIx64 " error happens, code:%d - %s, reqId:0x%" PRIx64, pWrapper->pRequest->self, code,
tstrerror(code), pWrapper->pRequest->requestId);
destorySqlCallbackWrapper(pWrapper);
pRequest->pWrapper = NULL;
terrno = code;
pRequest->code = code;
pRequest->body.queryFp(pRequest->body.param, pRequest, code);
@ -967,27 +1042,16 @@ int32_t createParseContext(const SRequestObj *pRequest, SParseContext **pCxt) {
return TSDB_CODE_SUCCESS;
}
void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) {
int32_t prepareAndParseSqlSyntax(SSqlCallbackWrapper **ppWrapper, SRequestObj *pRequest, bool updateMetaForce) {
int32_t code = TSDB_CODE_SUCCESS;
STscObj *pTscObj = pRequest->pTscObj;
SSqlCallbackWrapper *pWrapper = NULL;
int32_t code = TSDB_CODE_SUCCESS;
if (pRequest->retry++ > REQUEST_TOTAL_EXEC_TIMES) {
code = pRequest->prevCode;
terrno = code;
pRequest->code = code;
tscDebug("call sync query cb with code: %s", tstrerror(code));
pRequest->body.queryFp(pRequest->body.param, pRequest, code);
return;
}
if (TSDB_CODE_SUCCESS == code) {
pWrapper = taosMemoryCalloc(1, sizeof(SSqlCallbackWrapper));
if (pWrapper == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
} else {
pWrapper->pRequest = pRequest;
}
SSqlCallbackWrapper *pWrapper = taosMemoryCalloc(1, sizeof(SSqlCallbackWrapper));
if (pWrapper == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
} else {
pWrapper->pRequest = pRequest;
pRequest->pWrapper = pWrapper;
*ppWrapper = pWrapper;
}
if (TSDB_CODE_SUCCESS == code) {
@ -999,7 +1063,7 @@ void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) {
code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &pWrapper->pParseCtx->pCatalog);
}
if (TSDB_CODE_SUCCESS == code) {
if (TSDB_CODE_SUCCESS == code && NULL == pRequest->pQuery) {
int64_t syntaxStart = taosGetTimestampUs();
pWrapper->pCatalogReq = taosMemoryCalloc(1, sizeof(SCatalogReq));
@ -1014,6 +1078,27 @@ void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) {
pRequest->metric.parseCostUs += taosGetTimestampUs() - syntaxStart;
}
return code;
}
void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) {
SSqlCallbackWrapper *pWrapper = NULL;
int32_t code = TSDB_CODE_SUCCESS;
if (pRequest->retry++ > REQUEST_TOTAL_EXEC_TIMES) {
code = pRequest->prevCode;
terrno = code;
pRequest->code = code;
tscDebug("call sync query cb with code: %s", tstrerror(code));
pRequest->body.queryFp(pRequest->body.param, pRequest, code);
return;
}
if (TSDB_CODE_SUCCESS == code) {
code = prepareAndParseSqlSyntax(&pWrapper, pRequest, updateMetaForce);
}
if (TSDB_CODE_SUCCESS == code) {
pRequest->stmtType = pRequest->pQuery->pRoot->type;
code = phaseAsyncQuery(pWrapper);
@ -1023,12 +1108,14 @@ void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) {
tscError("0x%" PRIx64 " error happens, code:%d - %s, reqId:0x%" PRIx64, pRequest->self, code, tstrerror(code),
pRequest->requestId);
destorySqlCallbackWrapper(pWrapper);
pRequest->pWrapper = NULL;
qDestroyQuery(pRequest->pQuery);
pRequest->pQuery = NULL;
if (NEED_CLIENT_HANDLE_ERROR(code)) {
tscDebug("0x%" PRIx64 " client retry to handle the error, code:%d - %s, tryCount:%d, reqId:0x%" PRIx64,
pRequest->self, code, tstrerror(code), pRequest->retry, pRequest->requestId);
refreshMeta(pRequest->pTscObj, pRequest);
pRequest->prevCode = code;
doAsyncQuery(pRequest, true);
return;
@ -1040,48 +1127,57 @@ void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) {
}
}
static void fetchCallback(void *pResult, void *param, int32_t code) {
SRequestObj *pRequest = (SRequestObj *)param;
SReqResultInfo *pResultInfo = &pRequest->body.resInfo;
tscDebug("0x%" PRIx64 " enter scheduler fetch cb, code:%d - %s, reqId:0x%" PRIx64, pRequest->self, code,
tstrerror(code), pRequest->requestId);
pResultInfo->pData = pResult;
pResultInfo->numOfRows = 0;
if (code != TSDB_CODE_SUCCESS) {
pRequest->code = code;
taosMemoryFreeClear(pResultInfo->pData);
pRequest->body.fetchFp(pRequest->body.param, pRequest, 0);
return;
void restartAsyncQuery(SRequestObj *pRequest, int32_t code) {
int32_t reqIdx = 0;
SRequestObj *pReqList[16] = {NULL};
SRequestObj *pUserReq = NULL;
pReqList[0] = pRequest;
uint64_t tmpRefId = 0;
SRequestObj* pTmp = pRequest;
while (pTmp->relation.prevRefId) {
tmpRefId = pTmp->relation.prevRefId;
pTmp = acquireRequest(tmpRefId);
if (pTmp) {
pReqList[++reqIdx] = pTmp;
releaseRequest(tmpRefId);
} else {
tscError("0x%" PRIx64 ", prev req ref 0x%" PRIx64 " is not there, reqId:0x%" PRIx64, pTmp->self,
tmpRefId, pTmp->requestId);
break;
}
}
if (pRequest->code != TSDB_CODE_SUCCESS) {
taosMemoryFreeClear(pResultInfo->pData);
pRequest->body.fetchFp(pRequest->body.param, pRequest, 0);
return;
tmpRefId = pRequest->relation.nextRefId;
while (tmpRefId) {
pTmp = acquireRequest(tmpRefId);
if (pTmp) {
tmpRefId = pTmp->relation.nextRefId;
removeRequest(pTmp->self);
releaseRequest(pTmp->self);
} else {
tscError("0x%" PRIx64 " is not there", tmpRefId);
break;
}
}
pRequest->code =
setQueryResultFromRsp(pResultInfo, (const SRetrieveTableRsp *)pResultInfo->pData, pResultInfo->convertUcs4, true);
if (pRequest->code != TSDB_CODE_SUCCESS) {
pResultInfo->numOfRows = 0;
pRequest->code = code;
tscError("0x%" PRIx64 " fetch results failed, code:%s, reqId:0x%" PRIx64, pRequest->self, tstrerror(code),
pRequest->requestId);
for (int32_t i = reqIdx; i >= 0; i--) {
destroyCtxInRequest(pReqList[i]);
if (pReqList[i]->relation.userRefId == pReqList[i]->self || 0 == pReqList[i]->relation.userRefId) {
pUserReq = pReqList[i];
} else {
removeRequest(pReqList[i]->self);
}
}
if (pUserReq) {
pUserReq->prevCode = code;
memset(&pUserReq->relation, 0, sizeof(pUserReq->relation));
} else {
tscDebug("0x%" PRIx64 " fetch results, numOfRows:%" PRId64 " total Rows:%" PRId64 ", complete:%d, reqId:0x%" PRIx64,
pRequest->self, pResultInfo->numOfRows, pResultInfo->totalRows, pResultInfo->completed,
pRequest->requestId);
STscObj *pTscObj = pRequest->pTscObj;
SAppClusterSummary *pActivity = &pTscObj->pAppInfo->summary;
atomic_add_fetch_64((int64_t *)&pActivity->fetchBytes, pRequest->body.resInfo.payloadLen);
tscError("user req is missing");
return;
}
pRequest->body.fetchFp(pRequest->body.param, pRequest, pResultInfo->numOfRows);
doAsyncQuery(pUserReq, true);
}
void taos_fetch_rows_a(TAOS_RES *res, __taos_async_fn_t fp, void *param) {
@ -1095,43 +1191,8 @@ void taos_fetch_rows_a(TAOS_RES *res, __taos_async_fn_t fp, void *param) {
}
SRequestObj *pRequest = res;
pRequest->body.fetchFp = fp;
pRequest->body.param = param;
SReqResultInfo *pResultInfo = &pRequest->body.resInfo;
// this query has no results or error exists, return directly
if (taos_num_fields(pRequest) == 0 || pRequest->code != TSDB_CODE_SUCCESS) {
pResultInfo->numOfRows = 0;
pRequest->body.fetchFp(param, pRequest, pResultInfo->numOfRows);
return;
}
// all data has returned to App already, no need to try again
if (pResultInfo->completed) {
// it is a local executed query, no need to do async fetch
if (QUERY_EXEC_MODE_SCHEDULE != pRequest->body.execMode) {
if (pResultInfo->localResultFetched) {
pResultInfo->numOfRows = 0;
pResultInfo->current = 0;
} else {
pResultInfo->localResultFetched = true;
}
} else {
pResultInfo->numOfRows = 0;
}
pRequest->body.fetchFp(param, pRequest, pResultInfo->numOfRows);
return;
}
SSchedulerReq req = {
.syncReq = false,
.fetchFp = fetchCallback,
.cbParam = pRequest,
};
schedulerFetchRows(pRequest->body.queryJob, &req);
taosAsyncFetchImpl(pRequest, fp, param);
}
void taos_fetch_raw_block_a(TAOS_RES *res, __taos_async_fn_t fp, void *param) {

View File

@ -77,6 +77,7 @@ int32_t processConnectRsp(void* param, SDataBuf* pMsg, int32_t code) {
}
if ((code = taosCheckVersionCompatibleFromStr(version, connectRsp.sVer, 3)) != 0) {
tscError("version not compatible. client version: %s, server version: %s", version, connectRsp.sVer);
setErrno(pRequest, code);
tsem_post(&pRequest->body.rspSem);
goto End;
@ -130,6 +131,7 @@ int32_t processConnectRsp(void* param, SDataBuf* pMsg, int32_t code) {
pTscObj->connType = connectRsp.connType;
pTscObj->passInfo.ver = connectRsp.passVer;
pTscObj->authVer = connectRsp.authVer;
hbRegisterConn(pTscObj->pAppInfo->pAppHbMgr, pTscObj->id, connectRsp.clusterId, connectRsp.connType);

View File

@ -749,6 +749,9 @@ static int32_t smlSendMetaMsg(SSmlHandle *info, SName *pName, SArray *pColumns,
pReq.suid = pTableMeta->uid;
pReq.source = TD_REQ_FROM_TAOX;
pSql = (action == SCHEMA_ACTION_ADD_COLUMN) ? "sml_add_column" : "sml_modify_column_size";
} else{
uError("SML:0x%" PRIx64 " invalid action:%d", info->id, action);
goto end;
}
code = buildRequest(info->taos->id, pSql, strlen(pSql), NULL, false, &pRequest, 0);

View File

@ -202,7 +202,7 @@ static int32_t smlParseTagKv(SSmlHandle *info, char **sql, char *sqlEnd, SSmlLin
bool keyEscaped = false;
size_t keyLenEscaped = 0;
while (*sql < sqlEnd) {
if (unlikely(IS_COMMA(*sql))) {
if (unlikely(IS_SPACE(*sql) || IS_COMMA(*sql))) {
smlBuildInvalidDataMsg(&info->msgBuf, "invalid data", *sql);
return TSDB_CODE_SML_INVALID_DATA;
}
@ -410,7 +410,7 @@ static int32_t smlParseColKv(SSmlHandle *info, char **sql, char *sqlEnd, SSmlLin
bool keyEscaped = false;
size_t keyLenEscaped = 0;
while (*sql < sqlEnd) {
if (unlikely(IS_COMMA(*sql))) {
if (unlikely(IS_SPACE(*sql) || IS_COMMA(*sql))) {
smlBuildInvalidDataMsg(&info->msgBuf, "invalid data", *sql);
return TSDB_CODE_SML_INVALID_DATA;
}
@ -436,19 +436,20 @@ static int32_t smlParseColKv(SSmlHandle *info, char **sql, char *sqlEnd, SSmlLin
size_t valueLen = 0;
bool valueEscaped = false;
size_t valueLenEscaped = 0;
bool isInQuote = false;
int quoteNum = 0;
const char *escapeChar = NULL;
while (*sql < sqlEnd) {
// parse value
if (unlikely(*(*sql) == QUOTE && (*(*sql - 1) != SLASH || (*sql - 1) == escapeChar))) {
isInQuote = !isInQuote;
quoteNum++;
(*sql)++;
continue;
}
if (!isInQuote) {
if (unlikely(IS_SPACE(*sql) || IS_COMMA(*sql))) {
if(quoteNum > 2){
break;
}
continue;
}
if (quoteNum % 2 == 0 && (unlikely(IS_SPACE(*sql) || IS_COMMA(*sql)))) {
break;
}
if (IS_SLASH_LETTER_IN_FIELD_VALUE(*sql) && (*sql - 1) != escapeChar) {
escapeChar = *sql;
@ -460,8 +461,8 @@ static int32_t smlParseColKv(SSmlHandle *info, char **sql, char *sqlEnd, SSmlLin
}
valueLen = *sql - value;
if (unlikely(isInQuote)) {
smlBuildInvalidDataMsg(&info->msgBuf, "only one quote", value);
if (unlikely(quoteNum != 0 && quoteNum != 2)) {
smlBuildInvalidDataMsg(&info->msgBuf, "unbalanced quotes", value);
return TSDB_CODE_SML_INVALID_DATA;
}
if (unlikely(valueLen == 0)) {

View File

@ -939,8 +939,6 @@ int stmtClose(TAOS_STMT* stmt) {
stmtCleanSQLInfo(pStmt);
taosMemoryFree(stmt);
STMT_DLOG_E("stmt freed");
return TSDB_CODE_SUCCESS;
}

View File

@ -82,7 +82,7 @@ struct tmq_t {
int8_t useSnapshot;
int8_t autoCommit;
int32_t autoCommitInterval;
int32_t resetOffsetCfg;
int8_t resetOffsetCfg;
uint64_t consumerId;
bool hbBgEnable;
tmq_commit_cb* commitCb;
@ -99,6 +99,7 @@ struct tmq_t {
// poll info
int64_t pollCnt;
int64_t totalRows;
// bool needReportOffsetRows;
// timer
tmr_h hbLiveTimer;
@ -264,7 +265,7 @@ tmq_conf_t* tmq_conf_new() {
conf->withTbName = false;
conf->autoCommit = true;
conf->autoCommitInterval = DEFAULT_AUTO_COMMIT_INTERVAL;
conf->resetOffset = TMQ_OFFSET__RESET_EARLIEAST;
conf->resetOffset = TMQ_OFFSET__RESET_EARLIEST;
conf->hbBgEnable = true;
return conf;
@ -318,7 +319,7 @@ tmq_conf_res_t tmq_conf_set(tmq_conf_t* conf, const char* key, const char* value
conf->resetOffset = TMQ_OFFSET__RESET_NONE;
return TMQ_CONF_OK;
} else if (strcasecmp(value, "earliest") == 0) {
conf->resetOffset = TMQ_OFFSET__RESET_EARLIEAST;
conf->resetOffset = TMQ_OFFSET__RESET_EARLIEST;
return TMQ_CONF_OK;
} else if (strcasecmp(value, "latest") == 0) {
conf->resetOffset = TMQ_OFFSET__RESET_LATEST;
@ -357,7 +358,7 @@ tmq_conf_res_t tmq_conf_set(tmq_conf_t* conf, const char* key, const char* value
return TMQ_CONF_OK;
}
if (strcasecmp(key, "enable.heartbeat.background") == 0) {
// if (strcasecmp(key, "enable.heartbeat.background") == 0) {
// if (strcasecmp(value, "true") == 0) {
// conf->hbBgEnable = true;
// return TMQ_CONF_OK;
@ -365,10 +366,10 @@ tmq_conf_res_t tmq_conf_set(tmq_conf_t* conf, const char* key, const char* value
// conf->hbBgEnable = false;
// return TMQ_CONF_OK;
// } else {
tscError("the default value of enable.heartbeat.background is true, can not be seted");
return TMQ_CONF_INVALID;
// tscError("the default value of enable.heartbeat.background is true, can not be seted");
// return TMQ_CONF_INVALID;
// }
}
// }
if (strcasecmp(key, "td.connect.ip") == 0) {
conf->ip = taosStrdup(value);
@ -422,30 +423,30 @@ char** tmq_list_to_c_array(const tmq_list_t* list) {
return container->pData;
}
static SMqClientVg* foundClientVg(SArray* pTopicList, const char* pName, int32_t vgId, int32_t* index,
int32_t* numOfVgroups) {
int32_t numOfTopics = taosArrayGetSize(pTopicList);
*index = -1;
*numOfVgroups = 0;
for (int32_t i = 0; i < numOfTopics; ++i) {
SMqClientTopic* pTopic = taosArrayGet(pTopicList, i);
if (strcmp(pTopic->topicName, pName) != 0) {
continue;
}
*numOfVgroups = taosArrayGetSize(pTopic->vgs);
for (int32_t j = 0; j < (*numOfVgroups); ++j) {
SMqClientVg* pClientVg = taosArrayGet(pTopic->vgs, j);
if (pClientVg->vgId == vgId) {
*index = j;
return pClientVg;
}
}
}
return NULL;
}
//static SMqClientVg* foundClientVg(SArray* pTopicList, const char* pName, int32_t vgId, int32_t* index,
// int32_t* numOfVgroups) {
// int32_t numOfTopics = taosArrayGetSize(pTopicList);
// *index = -1;
// *numOfVgroups = 0;
//
// for (int32_t i = 0; i < numOfTopics; ++i) {
// SMqClientTopic* pTopic = taosArrayGet(pTopicList, i);
// if (strcmp(pTopic->topicName, pName) != 0) {
// continue;
// }
//
// *numOfVgroups = taosArrayGetSize(pTopic->vgs);
// for (int32_t j = 0; j < (*numOfVgroups); ++j) {
// SMqClientVg* pClientVg = taosArrayGet(pTopic->vgs, j);
// if (pClientVg->vgId == vgId) {
// *index = j;
// return pClientVg;
// }
// }
// }
//
// return NULL;
//}
// Two problems do not need to be addressed here
// 1. update to of epset. the response of poll request will automatically handle this problem
@ -567,12 +568,12 @@ static int32_t doSendCommitMsg(tmq_t* tmq, SMqClientVg* pVg, const char* pTopicN
atomic_add_fetch_32(&pParamSet->totalRspNum, 1);
SEp* pEp = GET_ACTIVE_EP(&pVg->epSet);
char offsetBuf[80] = {0};
char offsetBuf[TSDB_OFFSET_LEN] = {0};
tFormatOffset(offsetBuf, tListLen(offsetBuf), &pOffset->offset.val);
char commitBuf[80] = {0};
char commitBuf[TSDB_OFFSET_LEN] = {0};
tFormatOffset(commitBuf, tListLen(commitBuf), &pVg->offsetInfo.committedOffset);
tscDebug("consumer:0x%" PRIx64 " topic:%s on vgId:%d send offset:%s prev:%s, ep:%s:%d, ordinal:%d/%d, req:0x%" PRIx64,
tscInfo("consumer:0x%" PRIx64 " topic:%s on vgId:%d send offset:%s prev:%s, ep:%s:%d, ordinal:%d/%d, req:0x%" PRIx64,
tmq->consumerId, pOffset->offset.subKey, pVg->vgId, offsetBuf, commitBuf, pEp->fqdn, pEp->port, index + 1,
totalVgroups, pMsgSendInfo->requestId);
@ -635,6 +636,7 @@ static void asyncCommitOffset(tmq_t* tmq, const TAOS_RES* pRes, int32_t type, tm
pParamSet->callbackFn = pCommitFp;
pParamSet->userParam = userParam;
taosRLockLatch(&tmq->lock);
int32_t numOfTopics = taosArrayGetSize(tmq->clientTopics);
tscDebug("consumer:0x%" PRIx64 " do manual commit offset for %s, vgId:%d", tmq->consumerId, pTopicName, vgId);
@ -645,13 +647,14 @@ static void asyncCommitOffset(tmq_t* tmq, const TAOS_RES* pRes, int32_t type, tm
pTopicName, numOfTopics);
taosMemoryFree(pParamSet);
pCommitFp(tmq, TSDB_CODE_SUCCESS, userParam);
taosRUnLockLatch(&tmq->lock);
return;
}
int32_t j = 0;
int32_t numOfVgroups = taosArrayGetSize(pTopic->vgs);
for (j = 0; j < numOfVgroups; j++) {
SMqClientVg* pVg = taosArrayGet(pTopic->vgs, j);
SMqClientVg* pVg = (SMqClientVg*)taosArrayGet(pTopic->vgs, j);
if (pVg->vgId == vgId) {
break;
}
@ -662,10 +665,11 @@ static void asyncCommitOffset(tmq_t* tmq, const TAOS_RES* pRes, int32_t type, tm
vgId, numOfVgroups, pTopicName);
taosMemoryFree(pParamSet);
pCommitFp(tmq, TSDB_CODE_SUCCESS, userParam);
taosRUnLockLatch(&tmq->lock);
return;
}
SMqClientVg* pVg = taosArrayGet(pTopic->vgs, j);
SMqClientVg* pVg = (SMqClientVg*)taosArrayGet(pTopic->vgs, j);
if (pVg->offsetInfo.currentOffset.type > 0 && !tOffsetEqual(&pVg->offsetInfo.currentOffset, &pVg->offsetInfo.committedOffset)) {
code = doSendCommitMsg(tmq, pVg, pTopic->topicName, pParamSet, j, numOfVgroups, type);
@ -678,6 +682,7 @@ static void asyncCommitOffset(tmq_t* tmq, const TAOS_RES* pRes, int32_t type, tm
taosMemoryFree(pParamSet);
pCommitFp(tmq, code, userParam);
}
taosRUnLockLatch(&tmq->lock);
}
static void asyncCommitAllOffsets(tmq_t* tmq, tmq_commit_cb* pCommitFp, void* userParam) {
@ -695,6 +700,7 @@ static void asyncCommitAllOffsets(tmq_t* tmq, tmq_commit_cb* pCommitFp, void* us
// init as 1 to prevent concurrency issue
pParamSet->waitingRspNum = 1;
taosRLockLatch(&tmq->lock);
int32_t numOfTopics = taosArrayGetSize(tmq->clientTopics);
tscDebug("consumer:0x%" PRIx64 " start to commit offset for %d topics", tmq->consumerId, numOfTopics);
@ -724,6 +730,7 @@ static void asyncCommitAllOffsets(tmq_t* tmq, tmq_commit_cb* pCommitFp, void* us
}
}
}
taosRUnLockLatch(&tmq->lock);
tscDebug("consumer:0x%" PRIx64 " total commit:%d for %d topics", tmq->consumerId, pParamSet->waitingRspNum - 1,
numOfTopics);
@ -741,13 +748,15 @@ static void asyncCommitAllOffsets(tmq_t* tmq, tmq_commit_cb* pCommitFp, void* us
static void generateTimedTask(int64_t refId, int32_t type) {
tmq_t* tmq = taosAcquireRef(tmqMgmt.rsetId, refId);
if (tmq != NULL) {
int8_t* pTaskType = taosAllocateQitem(sizeof(int8_t), DEF_QITEM, 0);
*pTaskType = type;
taosWriteQitem(tmq->delayedTask, pTaskType);
tsem_post(&tmq->rspSem);
taosReleaseRef(tmqMgmt.rsetId, refId);
}
if(tmq == NULL) return;
int8_t* pTaskType = taosAllocateQitem(sizeof(int8_t), DEF_QITEM, 0);
if(pTaskType == NULL) return;
*pTaskType = type;
taosWriteQitem(tmq->delayedTask, pTaskType);
tsem_post(&tmq->rspSem);
taosReleaseRef(tmqMgmt.rsetId, refId);
}
void tmqAssignAskEpTask(void* param, void* tmrId) {
@ -762,19 +771,19 @@ void tmqAssignDelayedCommitTask(void* param, void* tmrId) {
taosMemoryFree(param);
}
void tmqAssignDelayedReportTask(void* param, void* tmrId) {
int64_t refId = *(int64_t*)param;
tmq_t* tmq = taosAcquireRef(tmqMgmt.rsetId, refId);
if (tmq != NULL) {
int8_t* pTaskType = taosAllocateQitem(sizeof(int8_t), DEF_QITEM, 0);
*pTaskType = TMQ_DELAYED_TASK__REPORT;
taosWriteQitem(tmq->delayedTask, pTaskType);
tsem_post(&tmq->rspSem);
}
taosReleaseRef(tmqMgmt.rsetId, refId);
taosMemoryFree(param);
}
//void tmqAssignDelayedReportTask(void* param, void* tmrId) {
// int64_t refId = *(int64_t*)param;
// tmq_t* tmq = taosAcquireRef(tmqMgmt.rsetId, refId);
// if (tmq != NULL) {
// int8_t* pTaskType = taosAllocateQitem(sizeof(int8_t), DEF_QITEM, 0);
// *pTaskType = TMQ_DELAYED_TASK__REPORT;
// taosWriteQitem(tmq->delayedTask, pTaskType);
// tsem_post(&tmq->rspSem);
// }
//
// taosReleaseRef(tmqMgmt.rsetId, refId);
// taosMemoryFree(param);
//}
int32_t tmqHbCb(void* param, SDataBuf* pMsg, int32_t code) {
if (pMsg) {
@ -796,6 +805,29 @@ void tmqSendHbReq(void* param, void* tmrId) {
SMqHbReq req = {0};
req.consumerId = tmq->consumerId;
req.epoch = tmq->epoch;
taosRLockLatch(&tmq->lock);
// if(tmq->needReportOffsetRows){
req.topics = taosArrayInit(taosArrayGetSize(tmq->clientTopics), sizeof(TopicOffsetRows));
for(int i = 0; i < taosArrayGetSize(tmq->clientTopics); i++){
SMqClientTopic* pTopic = taosArrayGet(tmq->clientTopics, i);
int32_t numOfVgroups = taosArrayGetSize(pTopic->vgs);
TopicOffsetRows* data = taosArrayReserve(req.topics, 1);
strcpy(data->topicName, pTopic->topicName);
data->offsetRows = taosArrayInit(numOfVgroups, sizeof(OffsetRows));
for(int j = 0; j < numOfVgroups; j++){
SMqClientVg* pVg = taosArrayGet(pTopic->vgs, j);
OffsetRows* offRows = taosArrayReserve(data->offsetRows, 1);
offRows->vgId = pVg->vgId;
offRows->rows = pVg->numOfRows;
offRows->offset = pVg->offsetInfo.currentOffset;
char buf[TSDB_OFFSET_LEN] = {0};
tFormatOffset(buf, TSDB_OFFSET_LEN, &offRows->offset);
tscInfo("consumer:0x%" PRIx64 ",report offset: vgId:%d, offset:%s, rows:%"PRId64, tmq->consumerId, offRows->vgId, buf, offRows->rows);
}
}
// tmq->needReportOffsetRows = false;
// }
taosRUnLockLatch(&tmq->lock);
int32_t tlen = tSerializeSMqHbReq(NULL, 0, &req);
if (tlen < 0) {
@ -835,13 +867,14 @@ void tmqSendHbReq(void* param, void* tmrId) {
asyncSendMsgToServer(tmq->pTscObj->pAppInfo->pTransporter, &epSet, &transporterId, sendInfo);
OVER:
tDeatroySMqHbReq(&req);
taosTmrReset(tmqSendHbReq, 1000, param, tmqMgmt.timer, &tmq->hbLiveTimer);
taosReleaseRef(tmqMgmt.rsetId, refId);
}
static void defaultCommitCbFn(tmq_t* pTmq, int32_t code, void* param) {
if (code != 0) {
tscDebug("consumer:0x%" PRIx64 ", failed to commit offset, code:%s", pTmq->consumerId, tstrerror(code));
tscError("consumer:0x%" PRIx64 ", failed to commit offset, code:%s", pTmq->consumerId, tstrerror(code));
}
}
@ -961,14 +994,24 @@ int32_t tmq_subscription(tmq_t* tmq, tmq_list_t** topics) {
if (*topics == NULL) {
*topics = tmq_list_new();
}
taosRLockLatch(&tmq->lock);
for (int i = 0; i < taosArrayGetSize(tmq->clientTopics); i++) {
SMqClientTopic* topic = taosArrayGet(tmq->clientTopics, i);
tmq_list_append(*topics, strchr(topic->topicName, '.') + 1);
}
taosRUnLockLatch(&tmq->lock);
return 0;
}
int32_t tmq_unsubscribe(tmq_t* tmq) {
if (tmq->autoCommit) {
int32_t rsp = tmq_commit_sync(tmq, NULL);
if (rsp != 0) {
return rsp;
}
}
taosSsleep(2); // sleep 2s for hb to send offset and rows to server
int32_t rsp;
int32_t retryCnt = 0;
tmq_list_t* lst = tmq_list_new();
@ -1063,6 +1106,7 @@ tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) {
pTmq->status = TMQ_CONSUMER_STATUS__INIT;
pTmq->pollCnt = 0;
pTmq->epoch = 0;
// pTmq->needReportOffsetRows = true;
// set conf
strcpy(pTmq->clientId, conf->clientId);
@ -1107,7 +1151,7 @@ tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) {
pTmq->hbLiveTimer = taosTmrStart(tmqSendHbReq, 1000, pRefId, tmqMgmt.timer);
}
char buf[80] = {0};
char buf[TSDB_OFFSET_LEN] = {0};
STqOffsetVal offset = {.type = pTmq->resetOffsetCfg};
tFormatOffset(buf, tListLen(buf), &offset);
tscInfo("consumer:0x%" PRIx64 " is setup, refId:%" PRId64
@ -1123,7 +1167,7 @@ _failed:
}
int32_t tmq_subscribe(tmq_t* tmq, const tmq_list_t* topic_list) {
const int32_t MAX_RETRY_COUNT = 120 * 60; // let's wait for 2 mins at most
const int32_t MAX_RETRY_COUNT = 120 * 2; // let's wait for 2 mins at most
const SArray* container = &topic_list->container;
int32_t sz = taosArrayGetSize(container);
void* buf = NULL;
@ -1131,7 +1175,7 @@ int32_t tmq_subscribe(tmq_t* tmq, const tmq_list_t* topic_list) {
SCMSubscribeReq req = {0};
int32_t code = 0;
tscDebug("consumer:0x%" PRIx64 " cgroup:%s, subscribe %d topics", tmq->consumerId, tmq->groupId, sz);
tscInfo("consumer:0x%" PRIx64 " cgroup:%s, subscribe %d topics", tmq->consumerId, tmq->groupId, sz);
req.consumerId = tmq->consumerId;
tstrncpy(req.clientId, tmq->clientId, 256);
@ -1143,6 +1187,11 @@ int32_t tmq_subscribe(tmq_t* tmq, const tmq_list_t* topic_list) {
goto FAIL;
}
req.withTbName = tmq->withTbName;
req.autoCommit = tmq->autoCommit;
req.autoCommitInterval = tmq->autoCommitInterval;
req.resetOffsetCfg = tmq->resetOffsetCfg;
for (int32_t i = 0; i < sz; i++) {
char* topic = taosArrayGetP(container, i);
@ -1154,7 +1203,7 @@ int32_t tmq_subscribe(tmq_t* tmq, const tmq_list_t* topic_list) {
}
tNameExtractFullName(&name, topicFName);
tscDebug("consumer:0x%" PRIx64 " subscribe topic:%s", tmq->consumerId, topicFName);
tscInfo("consumer:0x%" PRIx64 " subscribe topic:%s", tmq->consumerId, topicFName);
taosArrayPush(req.topicNames, &topicFName);
}
@ -1215,7 +1264,7 @@ int32_t tmq_subscribe(tmq_t* tmq, const tmq_list_t* topic_list) {
goto FAIL;
}
tscDebug("consumer:0x%" PRIx64 ", mnd not ready for subscribe, retry:%d in 500ms", tmq->consumerId, retryCnt);
tscInfo("consumer:0x%" PRIx64 ", mnd not ready for subscribe, retry:%d in 500ms", tmq->consumerId, retryCnt);
taosMsleep(500);
}
@ -1375,8 +1424,8 @@ int32_t tmqPollCb(void* param, SDataBuf* pMsg, int32_t code) {
tDecoderClear(&decoder);
memcpy(&pRspWrapper->dataRsp, pMsg->pData, sizeof(SMqRspHead));
char buf[80];
tFormatOffset(buf, 80, &pRspWrapper->dataRsp.rspOffset);
char buf[TSDB_OFFSET_LEN];
tFormatOffset(buf, TSDB_OFFSET_LEN, &pRspWrapper->dataRsp.rspOffset);
tscDebug("consumer:0x%" PRIx64 " recv poll rsp, vgId:%d, req ver:%" PRId64 ", rsp:%s type %d, reqId:0x%" PRIx64,
tmq->consumerId, vgId, pRspWrapper->dataRsp.reqOffset.version, buf, rspType, requestId);
} else if (rspType == TMQ_MSG_TYPE__POLL_META_RSP) {
@ -1426,7 +1475,8 @@ CREATE_MSG_FAIL:
}
typedef struct SVgroupSaveInfo {
STqOffsetVal offset;
STqOffsetVal currentOffset;
STqOffsetVal commitOffset;
int64_t numOfRows;
} SVgroupSaveInfo;
@ -1442,7 +1492,7 @@ static void initClientTopicFromRsp(SMqClientTopic* pTopic, SMqSubTopicEp* pTopic
tstrncpy(pTopic->topicName, pTopicEp->topic, TSDB_TOPIC_FNAME_LEN);
tstrncpy(pTopic->db, pTopicEp->db, TSDB_DB_FNAME_LEN);
tscDebug("consumer:0x%" PRIx64 ", update topic:%s, new numOfVgs:%d", tmq->consumerId, pTopic->topicName, vgNumGet);
tscInfo("consumer:0x%" PRIx64 ", update topic:%s, new numOfVgs:%d", tmq->consumerId, pTopic->topicName, vgNumGet);
pTopic->vgs = taosArrayInit(vgNumGet, sizeof(SMqClientVg));
for (int32_t j = 0; j < vgNumGet; j++) {
@ -1451,12 +1501,8 @@ static void initClientTopicFromRsp(SMqClientTopic* pTopic, SMqSubTopicEp* pTopic
makeTopicVgroupKey(vgKey, pTopic->topicName, pVgEp->vgId);
SVgroupSaveInfo* pInfo = taosHashGet(pVgOffsetHashMap, vgKey, strlen(vgKey));
int64_t numOfRows = 0;
STqOffsetVal offsetNew = {.type = tmq->resetOffsetCfg};
if (pInfo != NULL) {
offsetNew = pInfo->offset;
numOfRows = pInfo->numOfRows;
}
STqOffsetVal offsetNew = {0};
offsetNew.type = tmq->resetOffsetCfg;
SMqClientVg clientVg = {
.pollCnt = 0,
@ -1465,11 +1511,11 @@ static void initClientTopicFromRsp(SMqClientTopic* pTopic, SMqSubTopicEp* pTopic
.vgStatus = TMQ_VG_STATUS__IDLE,
.vgSkipCnt = 0,
.emptyBlockReceiveTs = 0,
.numOfRows = numOfRows,
.numOfRows = pInfo ? pInfo->numOfRows : 0,
};
clientVg.offsetInfo.currentOffset = offsetNew;
clientVg.offsetInfo.committedOffset = offsetNew;
clientVg.offsetInfo.currentOffset = pInfo ? pInfo->currentOffset : offsetNew;
clientVg.offsetInfo.committedOffset = pInfo ? pInfo->commitOffset : offsetNew;
clientVg.offsetInfo.walVerBegin = -1;
clientVg.offsetInfo.walVerEnd = -1;
clientVg.seekUpdated = false;
@ -1491,12 +1537,7 @@ static void freeClientVgInfo(void* param) {
static bool doUpdateLocalEp(tmq_t* tmq, int32_t epoch, const SMqAskEpRsp* pRsp) {
bool set = false;
int32_t topicNumCur = taosArrayGetSize(tmq->clientTopics);
int32_t topicNumGet = taosArrayGetSize(pRsp->topics);
char vgKey[TSDB_TOPIC_FNAME_LEN + 22];
tscDebug("consumer:0x%" PRIx64 " update ep epoch from %d to epoch %d, incoming topics:%d, existed topics:%d",
tmq->consumerId, tmq->epoch, epoch, topicNumGet, topicNumCur);
if (epoch <= tmq->epoch) {
return false;
}
@ -1512,23 +1553,29 @@ static bool doUpdateLocalEp(tmq_t* tmq, int32_t epoch, const SMqAskEpRsp* pRsp)
return false;
}
taosWLockLatch(&tmq->lock);
int32_t topicNumCur = taosArrayGetSize(tmq->clientTopics);
char vgKey[TSDB_TOPIC_FNAME_LEN + 22];
tscInfo("consumer:0x%" PRIx64 " update ep epoch from %d to epoch %d, incoming topics:%d, existed topics:%d",
tmq->consumerId, tmq->epoch, epoch, topicNumGet, topicNumCur);
// todo extract method
for (int32_t i = 0; i < topicNumCur; i++) {
// find old topic
SMqClientTopic* pTopicCur = taosArrayGet(tmq->clientTopics, i);
if (pTopicCur->vgs) {
int32_t vgNumCur = taosArrayGetSize(pTopicCur->vgs);
tscDebug("consumer:0x%" PRIx64 ", current vg num: %d", tmq->consumerId, vgNumCur);
tscInfo("consumer:0x%" PRIx64 ", current vg num: %d", tmq->consumerId, vgNumCur);
for (int32_t j = 0; j < vgNumCur; j++) {
SMqClientVg* pVgCur = taosArrayGet(pTopicCur->vgs, j);
makeTopicVgroupKey(vgKey, pTopicCur->topicName, pVgCur->vgId);
char buf[80];
tFormatOffset(buf, 80, &pVgCur->offsetInfo.currentOffset);
tscDebug("consumer:0x%" PRIx64 ", epoch:%d vgId:%d vgKey:%s, offset:%s", tmq->consumerId, epoch, pVgCur->vgId,
char buf[TSDB_OFFSET_LEN];
tFormatOffset(buf, TSDB_OFFSET_LEN, &pVgCur->offsetInfo.currentOffset);
tscInfo("consumer:0x%" PRIx64 ", epoch:%d vgId:%d vgKey:%s, offset:%s", tmq->consumerId, epoch, pVgCur->vgId,
vgKey, buf);
SVgroupSaveInfo info = {.offset = pVgCur->offsetInfo.currentOffset, .numOfRows = pVgCur->numOfRows};
SVgroupSaveInfo info = {.currentOffset = pVgCur->offsetInfo.currentOffset, .commitOffset = pVgCur->offsetInfo.committedOffset, .numOfRows = pVgCur->numOfRows};
taosHashPut(pVgOffsetHashMap, vgKey, strlen(vgKey), &info, sizeof(SVgroupSaveInfo));
}
}
@ -1543,7 +1590,6 @@ static bool doUpdateLocalEp(tmq_t* tmq, int32_t epoch, const SMqAskEpRsp* pRsp)
taosHashCleanup(pVgOffsetHashMap);
taosWLockLatch(&tmq->lock);
// destroy current buffered existed topics info
if (tmq->clientTopics) {
taosArrayDestroyEx(tmq->clientTopics, freeClientVgInfo);
@ -1555,7 +1601,7 @@ static bool doUpdateLocalEp(tmq_t* tmq, int32_t epoch, const SMqAskEpRsp* pRsp)
atomic_store_8(&tmq->status, flag);
atomic_store_32(&tmq->epoch, epoch);
tscDebug("consumer:0x%" PRIx64 " update topic info completed", tmq->consumerId);
tscInfo("consumer:0x%" PRIx64 " update topic info completed", tmq->consumerId);
return set;
}
@ -1591,7 +1637,7 @@ int32_t askEpCallbackFn(void* param, SDataBuf* pMsg, int32_t code) {
SMqRspHead* head = pMsg->pData;
int32_t epoch = atomic_load_32(&tmq->epoch);
if (head->epoch <= epoch) {
tscDebug("consumer:0x%" PRIx64 ", recv ep, msg epoch %d, current epoch %d, no need to update local ep",
tscInfo("consumer:0x%" PRIx64 ", recv ep, msg epoch %d, current epoch %d, no need to update local ep",
tmq->consumerId, head->epoch, epoch);
if (tmq->status == TMQ_CONSUMER_STATUS__RECOVER) {
@ -1603,7 +1649,7 @@ int32_t askEpCallbackFn(void* param, SDataBuf* pMsg, int32_t code) {
}
} else {
tscDebug("consumer:0x%" PRIx64 ", recv ep, msg epoch %d, current epoch %d, update local ep", tmq->consumerId,
tscInfo("consumer:0x%" PRIx64 ", recv ep, msg epoch %d, current epoch %d, update local ep", tmq->consumerId,
head->epoch, epoch);
}
@ -1673,7 +1719,7 @@ SMqRspObj* tmqBuildRspFromWrapper(SMqPollRspWrapper* pWrapper, SMqClientVg* pVg,
return pRspObj;
}
SMqTaosxRspObj* tmqBuildTaosxRspFromWrapper(SMqPollRspWrapper* pWrapper) {
SMqTaosxRspObj* tmqBuildTaosxRspFromWrapper(SMqPollRspWrapper* pWrapper, SMqClientVg* pVg, int64_t* numOfRows) {
SMqTaosxRspObj* pRspObj = taosMemoryCalloc(1, sizeof(SMqTaosxRspObj));
pRspObj->resType = RES_TYPE__TMQ_METADATA;
tstrncpy(pRspObj->topic, pWrapper->topicHandle->topicName, TSDB_TOPIC_FNAME_LEN);
@ -1688,6 +1734,13 @@ SMqTaosxRspObj* tmqBuildTaosxRspFromWrapper(SMqPollRspWrapper* pWrapper) {
setResSchemaInfo(&pRspObj->resInfo, pWrapper->topicHandle->schema.pSchema, pWrapper->topicHandle->schema.nCols);
}
// extract the rows in this data packet
for (int32_t i = 0; i < pRspObj->rsp.blockNum; ++i) {
SRetrieveTableRsp* pRetrieve = (SRetrieveTableRsp*)taosArrayGetP(pRspObj->rsp.blockData, i);
int64_t rows = htobe64(pRetrieve->numOfRows);
pVg->numOfRows += rows;
(*numOfRows) += rows;
}
return pRspObj;
}
@ -1745,7 +1798,7 @@ static int32_t doTmqPollImpl(tmq_t* pTmq, SMqClientTopic* pTopic, SMqClientVg* p
sendInfo->msgType = TDMT_VND_TMQ_CONSUME;
int64_t transporterId = 0;
char offsetFormatBuf[80];
char offsetFormatBuf[TSDB_OFFSET_LEN];
tFormatOffset(offsetFormatBuf, tListLen(offsetFormatBuf), &pVg->offsetInfo.currentOffset);
tscDebug("consumer:0x%" PRIx64 " send poll to %s vgId:%d, epoch %d, req:%s, reqId:0x%" PRIx64, pTmq->consumerId,
@ -1764,6 +1817,9 @@ static int32_t tmqPollImpl(tmq_t* tmq, int64_t timeout) {
if(atomic_load_8(&tmq->status) == TMQ_CONSUMER_STATUS__RECOVER){
return 0;
}
int32_t code = 0;
taosWLockLatch(&tmq->lock);
int32_t numOfTopics = taosArrayGetSize(tmq->clientTopics);
tscDebug("consumer:0x%" PRIx64 " start to poll data, numOfTopics:%d", tmq->consumerId, numOfTopics);
@ -1773,7 +1829,7 @@ static int32_t tmqPollImpl(tmq_t* tmq, int64_t timeout) {
for (int j = 0; j < numOfVg; j++) {
SMqClientVg* pVg = taosArrayGet(pTopic->vgs, j);
if (taosGetTimestampMs() - pVg->emptyBlockReceiveTs < EMPTY_BLOCK_POLL_IDLE_DURATION) { // less than 100ms
if (taosGetTimestampMs() - pVg->emptyBlockReceiveTs < EMPTY_BLOCK_POLL_IDLE_DURATION) { // less than 10ms
tscTrace("consumer:0x%" PRIx64 " epoch %d, vgId:%d idle for 10ms before start next poll", tmq->consumerId,
tmq->epoch, pVg->vgId);
continue;
@ -1788,15 +1844,17 @@ static int32_t tmqPollImpl(tmq_t* tmq, int64_t timeout) {
}
atomic_store_32(&pVg->vgSkipCnt, 0);
int32_t code = doTmqPollImpl(tmq, pTopic, pVg, timeout);
code = doTmqPollImpl(tmq, pTopic, pVg, timeout);
if (code != TSDB_CODE_SUCCESS) {
return code;
goto end;
}
}
}
tscDebug("consumer:0x%" PRIx64 " end to poll data", tmq->consumerId);
return 0;
end:
taosWUnLockLatch(&tmq->lock);
tscDebug("consumer:0x%" PRIx64 " end to poll data, code:%d", tmq->consumerId, code);
return code;
}
static int32_t tmqHandleNoPollRsp(tmq_t* tmq, SMqRspWrapper* rspWrapper, bool* pReset) {
@ -1848,12 +1906,14 @@ static void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) {
SMqDataRsp* pDataRsp = &pollRspWrapper->dataRsp;
if (pDataRsp->head.epoch == consumerEpoch) {
taosWLockLatch(&tmq->lock);
SMqClientVg* pVg = getVgInfo(tmq, pollRspWrapper->topicName, pollRspWrapper->vgId);
pollRspWrapper->vgHandle = pVg;
pollRspWrapper->topicHandle = getTopicInfo(tmq, pollRspWrapper->topicName);
if(pollRspWrapper->vgHandle == NULL || pollRspWrapper->topicHandle == NULL){
tscError("consumer:0x%" PRIx64 " get vg or topic error, topic:%s vgId:%d", tmq->consumerId,
pollRspWrapper->topicName, pollRspWrapper->vgId);
taosWUnLockLatch(&tmq->lock);
return NULL;
}
// update the epset
@ -1882,11 +1942,11 @@ static void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) {
pVg->offsetInfo.walVerEnd = pDataRsp->head.walever;
pVg->receivedInfoFromVnode = true;
char buf[80];
tFormatOffset(buf, 80, &pDataRsp->rspOffset);
char buf[TSDB_OFFSET_LEN];
tFormatOffset(buf, TSDB_OFFSET_LEN, &pDataRsp->rspOffset);
if (pDataRsp->blockNum == 0) {
tscDebug("consumer:0x%" PRIx64 " empty block received, vgId:%d, offset:%s, vg total:%" PRId64
" total:%" PRId64 " reqId:0x%" PRIx64,
", total:%" PRId64 ", reqId:0x%" PRIx64,
tmq->consumerId, pVg->vgId, buf, pVg->numOfRows, tmq->totalRows, pollRspWrapper->reqId);
pRspWrapper = tmqFreeRspWrapper(pRspWrapper);
pVg->emptyBlockReceiveTs = taosGetTimestampMs();
@ -1897,12 +1957,14 @@ static void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) {
tmq->totalRows += numOfRows;
pVg->emptyBlockReceiveTs = 0;
tscDebug("consumer:0x%" PRIx64 " process poll rsp, vgId:%d, offset:%s, blocks:%d, rows:%" PRId64
" vg total:%" PRId64 " total:%" PRId64 ", reqId:0x%" PRIx64,
", vg total:%" PRId64 ", total:%" PRId64 ", reqId:0x%" PRIx64,
tmq->consumerId, pVg->vgId, buf, pDataRsp->blockNum, numOfRows, pVg->numOfRows, tmq->totalRows,
pollRspWrapper->reqId);
taosFreeQitem(pollRspWrapper);
taosWUnLockLatch(&tmq->lock);
return pRsp;
}
taosWUnLockLatch(&tmq->lock);
} else {
tscDebug("consumer:0x%" PRIx64 " vgId:%d msg discard since epoch mismatch: msg epoch %d, consumer epoch %d",
tmq->consumerId, pollRspWrapper->vgId, pDataRsp->head.epoch, consumerEpoch);
@ -1917,12 +1979,14 @@ static void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) {
tscDebug("consumer:0x%" PRIx64 " process meta rsp", tmq->consumerId);
if (pollRspWrapper->metaRsp.head.epoch == consumerEpoch) {
taosWLockLatch(&tmq->lock);
SMqClientVg* pVg = getVgInfo(tmq, pollRspWrapper->topicName, pollRspWrapper->vgId);
pollRspWrapper->vgHandle = pVg;
pollRspWrapper->topicHandle = getTopicInfo(tmq, pollRspWrapper->topicName);
if(pollRspWrapper->vgHandle == NULL || pollRspWrapper->topicHandle == NULL){
tscError("consumer:0x%" PRIx64 " get vg or topic error, topic:%s vgId:%d", tmq->consumerId,
pollRspWrapper->topicName, pollRspWrapper->vgId);
taosWUnLockLatch(&tmq->lock);
return NULL;
}
@ -1934,6 +1998,7 @@ static void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) {
// build rsp
SMqMetaRspObj* pRsp = tmqBuildMetaRspFromWrapper(pollRspWrapper);
taosFreeQitem(pollRspWrapper);
taosWUnLockLatch(&tmq->lock);
return pRsp;
} else {
tscDebug("consumer:0x%" PRIx64 " vgId:%d msg discard since epoch mismatch: msg epoch %d, consumer epoch %d",
@ -1946,12 +2011,14 @@ static void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) {
int32_t consumerEpoch = atomic_load_32(&tmq->epoch);
if (pollRspWrapper->taosxRsp.head.epoch == consumerEpoch) {
taosWLockLatch(&tmq->lock);
SMqClientVg* pVg = getVgInfo(tmq, pollRspWrapper->topicName, pollRspWrapper->vgId);
pollRspWrapper->vgHandle = pVg;
pollRspWrapper->topicHandle = getTopicInfo(tmq, pollRspWrapper->topicName);
if(pollRspWrapper->vgHandle == NULL || pollRspWrapper->topicHandle == NULL){
tscError("consumer:0x%" PRIx64 " get vg or topic error, topic:%s vgId:%d", tmq->consumerId,
pollRspWrapper->topicName, pollRspWrapper->vgId);
taosWUnLockLatch(&tmq->lock);
return NULL;
}
@ -1969,37 +2036,36 @@ static void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) {
atomic_store_32(&pVg->vgStatus, TMQ_VG_STATUS__IDLE);
if (pollRspWrapper->taosxRsp.blockNum == 0) {
tscDebug("consumer:0x%" PRIx64 " taosx empty block received, vgId:%d, vg total:%" PRId64 " reqId:0x%" PRIx64,
tscDebug("consumer:0x%" PRIx64 " taosx empty block received, vgId:%d, vg total:%" PRId64 ", reqId:0x%" PRIx64,
tmq->consumerId, pVg->vgId, pVg->numOfRows, pollRspWrapper->reqId);
pVg->emptyBlockReceiveTs = taosGetTimestampMs();
pRspWrapper = tmqFreeRspWrapper(pRspWrapper);
taosFreeQitem(pollRspWrapper);
continue;
} else {
pVg->emptyBlockReceiveTs = 0; // reset the ts
// build rsp
void* pRsp = NULL;
int64_t numOfRows = 0;
if (pollRspWrapper->taosxRsp.createTableNum == 0) {
pRsp = tmqBuildRspFromWrapper(pollRspWrapper, pVg, &numOfRows);
} else {
pRsp = tmqBuildTaosxRspFromWrapper(pollRspWrapper, pVg, &numOfRows);
}
tmq->totalRows += numOfRows;
char buf[TSDB_OFFSET_LEN];
tFormatOffset(buf, TSDB_OFFSET_LEN, &pVg->offsetInfo.currentOffset);
tscDebug("consumer:0x%" PRIx64 " process taosx poll rsp, vgId:%d, offset:%s, blocks:%d, rows:%" PRId64
", vg total:%" PRId64 ", total:%" PRId64 ", reqId:0x%" PRIx64,
tmq->consumerId, pVg->vgId, buf, pollRspWrapper->dataRsp.blockNum, numOfRows, pVg->numOfRows,
tmq->totalRows, pollRspWrapper->reqId);
taosFreeQitem(pollRspWrapper);
taosWUnLockLatch(&tmq->lock);
return pRsp;
}
// build rsp
void* pRsp = NULL;
int64_t numOfRows = 0;
if (pollRspWrapper->taosxRsp.createTableNum == 0) {
pRsp = tmqBuildRspFromWrapper(pollRspWrapper, pVg, &numOfRows);
} else {
pRsp = tmqBuildTaosxRspFromWrapper(pollRspWrapper);
}
tmq->totalRows += numOfRows;
char buf[80];
tFormatOffset(buf, 80, &pVg->offsetInfo.currentOffset);
tscDebug("consumer:0x%" PRIx64 " process taosx poll rsp, vgId:%d, offset:%s, blocks:%d, rows:%" PRId64
", vg total:%" PRId64 " total:%" PRId64 " reqId:0x%" PRIx64,
tmq->consumerId, pVg->vgId, buf, pollRspWrapper->dataRsp.blockNum, numOfRows, pVg->numOfRows,
tmq->totalRows, pollRspWrapper->reqId);
taosFreeQitem(pollRspWrapper);
return pRsp;
taosWUnLockLatch(&tmq->lock);
} else {
tscDebug("consumer:0x%" PRIx64 " vgId:%d msg discard since epoch mismatch: msg epoch %d, consumer epoch %d",
tmq->consumerId, pollRspWrapper->vgId, pollRspWrapper->taosxRsp.head.epoch, consumerEpoch);
@ -2024,12 +2090,12 @@ TAOS_RES* tmq_consumer_poll(tmq_t* tmq, int64_t timeout) {
void* rspObj;
int64_t startTime = taosGetTimestampMs();
tscDebug("consumer:0x%" PRIx64 " start to poll at %" PRId64 ", timeout:%" PRId64, tmq->consumerId, startTime,
tscInfo("consumer:0x%" PRIx64 " start to poll at %" PRId64 ", timeout:%" PRId64, tmq->consumerId, startTime,
timeout);
// in no topic status, delayed task also need to be processed
if (atomic_load_8(&tmq->status) == TMQ_CONSUMER_STATUS__INIT) {
tscDebug("consumer:0x%" PRIx64 " poll return since consumer is init", tmq->consumerId);
tscInfo("consumer:0x%" PRIx64 " poll return since consumer is init", tmq->consumerId);
taosMsleep(500); // sleep for a while
return NULL;
}
@ -2041,7 +2107,7 @@ TAOS_RES* tmq_consumer_poll(tmq_t* tmq, int64_t timeout) {
return NULL;
}
tscDebug("consumer:0x%" PRIx64 " not ready, retry:%d/40 in 500ms", tmq->consumerId, retryCnt);
tscInfo("consumer:0x%" PRIx64 " not ready, retry:%d/40 in 500ms", tmq->consumerId, retryCnt);
taosMsleep(500);
}
}
@ -2050,7 +2116,7 @@ TAOS_RES* tmq_consumer_poll(tmq_t* tmq, int64_t timeout) {
tmqHandleAllDelayedTask(tmq);
if (tmqPollImpl(tmq, timeout) < 0) {
tscDebug("consumer:0x%" PRIx64 " return due to poll error", tmq->consumerId);
tscError("consumer:0x%" PRIx64 " return due to poll error", tmq->consumerId);
}
rspObj = tmqHandleAllRsp(tmq, timeout, false);
@ -2058,7 +2124,7 @@ TAOS_RES* tmq_consumer_poll(tmq_t* tmq, int64_t timeout) {
tscDebug("consumer:0x%" PRIx64 " return rsp %p", tmq->consumerId, rspObj);
return (TAOS_RES*)rspObj;
} else if (terrno == TSDB_CODE_TQ_NO_COMMITTED_OFFSET) {
tscDebug("consumer:0x%" PRIx64 " return null since no committed offset", tmq->consumerId);
tscInfo("consumer:0x%" PRIx64 " return null since no committed offset", tmq->consumerId);
return NULL;
}
@ -2066,7 +2132,7 @@ TAOS_RES* tmq_consumer_poll(tmq_t* tmq, int64_t timeout) {
int64_t currentTime = taosGetTimestampMs();
int64_t elapsedTime = currentTime - startTime;
if (elapsedTime > timeout) {
tscDebug("consumer:0x%" PRIx64 " (epoch %d) timeout, no rsp, start time %" PRId64 ", current time %" PRId64,
tscInfo("consumer:0x%" PRIx64 " (epoch %d) timeout, no rsp, start time %" PRId64 ", current time %" PRId64,
tmq->consumerId, tmq->epoch, startTime, currentTime);
return NULL;
}
@ -2078,7 +2144,8 @@ TAOS_RES* tmq_consumer_poll(tmq_t* tmq, int64_t timeout) {
}
}
static void displayConsumeStatistics(const tmq_t* pTmq) {
static void displayConsumeStatistics(tmq_t* pTmq) {
taosRLockLatch(&pTmq->lock);
int32_t numOfTopics = taosArrayGetSize(pTmq->clientTopics);
tscDebug("consumer:0x%" PRIx64 " closing poll:%" PRId64 " rows:%" PRId64 " topics:%d, final epoch:%d",
pTmq->consumerId, pTmq->pollCnt, pTmq->totalRows, numOfTopics, pTmq->epoch);
@ -2094,12 +2161,12 @@ static void displayConsumeStatistics(const tmq_t* pTmq) {
tscDebug("topic:%s, %d. vgId:%d rows:%" PRId64, pTopics->topicName, j, pVg->vgId, pVg->numOfRows);
}
}
taosRUnLockLatch(&pTmq->lock);
tscDebug("consumer:0x%" PRIx64 " rows dist end", pTmq->consumerId);
}
int32_t tmq_consumer_close(tmq_t* tmq) {
tscDebug("consumer:0x%" PRIx64 " start to close consumer, status:%d", tmq->consumerId, tmq->status);
tscInfo("consumer:0x%" PRIx64 " start to close consumer, status:%d", tmq->consumerId, tmq->status);
displayConsumeStatistics(tmq);
if (tmq->status == TMQ_CONSUMER_STATUS__READY) {
@ -2110,6 +2177,7 @@ int32_t tmq_consumer_close(tmq_t* tmq) {
return rsp;
}
}
taosSsleep(2); // sleep 2s for hb to send offset and rows to server
int32_t retryCnt = 0;
tmq_list_t* lst = tmq_list_new();
@ -2125,7 +2193,7 @@ int32_t tmq_consumer_close(tmq_t* tmq) {
tmq_list_destroy(lst);
} else {
tscWarn("consumer:0x%" PRIx64 " not in ready state, close it directly", tmq->consumerId);
tscInfo("consumer:0x%" PRIx64 " not in ready state, close it directly", tmq->consumerId);
}
taosRemoveRef(tmqMgmt.rsetId, tmq->refId);
@ -2388,7 +2456,7 @@ void asyncAskEp(tmq_t* pTmq, __tmq_askep_fn_t askEpFn, void* param) {
sendInfo->msgType = TDMT_MND_TMQ_ASK_EP;
SEpSet epSet = getEpSet_s(&pTmq->pTscObj->pAppInfo->mgmtEp);
tscDebug("consumer:0x%" PRIx64 " ask ep from mnode, reqId:0x%" PRIx64, pTmq->consumerId, sendInfo->requestId);
tscInfo("consumer:0x%" PRIx64 " ask ep from mnode, reqId:0x%" PRIx64, pTmq->consumerId, sendInfo->requestId);
int64_t transporterId = 0;
asyncSendMsgToServer(pTmq->pTscObj->pAppInfo->pTransporter, &epSet, &transporterId, sendInfo);
@ -2411,6 +2479,7 @@ int32_t tmqCommitDone(SMqCommitCbParamSet* pParamSet) {
// if no more waiting rsp
pParamSet->callbackFn(tmq, pParamSet->code, pParamSet->userParam);
taosMemoryFree(pParamSet);
// tmq->needReportOffsetRows = true;
taosReleaseRef(tmqMgmt.rsetId, refId);
return 0;
@ -2502,14 +2571,18 @@ int32_t tmq_get_topic_assignment(tmq_t* tmq, const char* pTopicName, tmq_topic_a
int32_t* numOfAssignment) {
*numOfAssignment = 0;
*assignment = NULL;
SMqVgCommon* pCommon = NULL;
int32_t accId = tmq->pTscObj->acctId;
char tname[128] = {0};
sprintf(tname, "%d.%s", accId, pTopicName);
int32_t code = TSDB_CODE_SUCCESS;
taosWLockLatch(&tmq->lock);
SMqClientTopic* pTopic = getTopicByName(tmq, tname);
if (pTopic == NULL) {
return TSDB_CODE_INVALID_PARA;
code = TSDB_CODE_INVALID_PARA;
goto end;
}
// in case of snapshot is opened, no valid offset will return
@ -2519,7 +2592,8 @@ int32_t tmq_get_topic_assignment(tmq_t* tmq, const char* pTopicName, tmq_topic_a
if (*assignment == NULL) {
tscError("consumer:0x%" PRIx64 " failed to malloc buffer, size:%" PRIzu, tmq->consumerId,
(*numOfAssignment) * sizeof(tmq_topic_assignment));
return TSDB_CODE_OUT_OF_MEMORY;
code = TSDB_CODE_OUT_OF_MEMORY;
goto end;
}
bool needFetch = false;
@ -2544,10 +2618,11 @@ int32_t tmq_get_topic_assignment(tmq_t* tmq, const char* pTopicName, tmq_topic_a
}
if (needFetch) {
SMqVgCommon* pCommon = taosMemoryCalloc(1, sizeof(SMqVgCommon));
pCommon = taosMemoryCalloc(1, sizeof(SMqVgCommon));
if (pCommon == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return terrno;
code = terrno;
goto end;
}
pCommon->pList= taosArrayInit(4, sizeof(tmq_topic_assignment));
@ -2562,8 +2637,8 @@ int32_t tmq_get_topic_assignment(tmq_t* tmq, const char* pTopicName, tmq_topic_a
SMqVgWalInfoParam* pParam = taosMemoryMalloc(sizeof(SMqVgWalInfoParam));
if (pParam == NULL) {
destroyCommonInfo(pCommon);
return terrno;
code = terrno;
goto end;
}
pParam->epoch = tmq->epoch;
@ -2577,30 +2652,30 @@ int32_t tmq_get_topic_assignment(tmq_t* tmq, const char* pTopicName, tmq_topic_a
int32_t msgSize = tSerializeSMqPollReq(NULL, 0, &req);
if (msgSize < 0) {
taosMemoryFree(pParam);
destroyCommonInfo(pCommon);
return terrno;
code = terrno;
goto end;
}
char* msg = taosMemoryCalloc(1, msgSize);
if (NULL == msg) {
taosMemoryFree(pParam);
destroyCommonInfo(pCommon);
return terrno;
code = terrno;
goto end;
}
if (tSerializeSMqPollReq(msg, msgSize, &req) < 0) {
taosMemoryFree(msg);
taosMemoryFree(pParam);
destroyCommonInfo(pCommon);
return terrno;
code = terrno;
goto end;
}
SMsgSendInfo* sendInfo = taosMemoryCalloc(1, sizeof(SMsgSendInfo));
if (sendInfo == NULL) {
taosMemoryFree(pParam);
taosMemoryFree(msg);
destroyCommonInfo(pCommon);
return terrno;
code = terrno;
goto end;
}
sendInfo->msgInfo = (SDataBuf){.pData = msg, .len = msgSize, .handle = NULL};
@ -2611,29 +2686,26 @@ int32_t tmq_get_topic_assignment(tmq_t* tmq, const char* pTopicName, tmq_topic_a
sendInfo->msgType = TDMT_VND_TMQ_VG_WALINFO;
int64_t transporterId = 0;
char offsetFormatBuf[80];
char offsetFormatBuf[TSDB_OFFSET_LEN];
tFormatOffset(offsetFormatBuf, tListLen(offsetFormatBuf), &pClientVg->offsetInfo.currentOffset);
tscDebug("consumer:0x%" PRIx64 " %s retrieve wal info vgId:%d, epoch %d, req:%s, reqId:0x%" PRIx64,
tscInfo("consumer:0x%" PRIx64 " %s retrieve wal info vgId:%d, epoch %d, req:%s, reqId:0x%" PRIx64,
tmq->consumerId, pTopic->topicName, pClientVg->vgId, tmq->epoch, offsetFormatBuf, req.reqId);
asyncSendMsgToServer(tmq->pTscObj->pAppInfo->pTransporter, &pClientVg->epSet, &transporterId, sendInfo);
}
tsem_wait(&pCommon->rsp);
int32_t code = pCommon->code;
code = pCommon->code;
terrno = code;
if (code != TSDB_CODE_SUCCESS) {
taosMemoryFree(*assignment);
*assignment = NULL;
*numOfAssignment = 0;
} else {
int32_t num = taosArrayGetSize(pCommon->pList);
for(int32_t i = 0; i < num; ++i) {
(*assignment)[i] = *(tmq_topic_assignment*)taosArrayGet(pCommon->pList, i);
}
*numOfAssignment = num;
goto end;
}
int32_t num = taosArrayGetSize(pCommon->pList);
for(int32_t i = 0; i < num; ++i) {
(*assignment)[i] = *(tmq_topic_assignment*)taosArrayGet(pCommon->pList, i);
}
*numOfAssignment = num;
for (int32_t j = 0; j < (*numOfAssignment); ++j) {
tmq_topic_assignment* p = &(*assignment)[j];
@ -2648,10 +2720,10 @@ int32_t tmq_get_topic_assignment(tmq_t* tmq, const char* pTopicName, tmq_topic_a
pOffsetInfo->currentOffset.type = TMQ_OFFSET__LOG;
char offsetBuf[80] = {0};
char offsetBuf[TSDB_OFFSET_LEN] = {0};
tFormatOffset(offsetBuf, tListLen(offsetBuf), &pOffsetInfo->currentOffset);
tscDebug("vgId:%d offset is update to:%s", p->vgId, offsetBuf);
tscInfo("vgId:%d offset is update to:%s", p->vgId, offsetBuf);
pOffsetInfo->walVerBegin = p->begin;
pOffsetInfo->walVerEnd = p->end;
@ -2659,12 +2731,17 @@ int32_t tmq_get_topic_assignment(tmq_t* tmq, const char* pTopicName, tmq_topic_a
pOffsetInfo->committedOffset.version = p->currentOffset;
}
}
destroyCommonInfo(pCommon);
return code;
} else {
return TSDB_CODE_SUCCESS;
}
end:
if(code != TSDB_CODE_SUCCESS){
taosMemoryFree(*assignment);
*assignment = NULL;
*numOfAssignment = 0;
}
destroyCommonInfo(pCommon);
taosWUnLockLatch(&tmq->lock);
return code;
}
void tmq_free_assignment(tmq_topic_assignment* pAssignment) {
@ -2685,9 +2762,11 @@ int32_t tmq_offset_seek(tmq_t* tmq, const char* pTopicName, int32_t vgId, int64_
char tname[128] = {0};
sprintf(tname, "%d.%s", accId, pTopicName);
taosWLockLatch(&tmq->lock);
SMqClientTopic* pTopic = getTopicByName(tmq, tname);
if (pTopic == NULL) {
tscError("consumer:0x%" PRIx64 " invalid topic name:%s", tmq->consumerId, pTopicName);
taosWUnLockLatch(&tmq->lock);
return TSDB_CODE_INVALID_PARA;
}
@ -2703,6 +2782,7 @@ int32_t tmq_offset_seek(tmq_t* tmq, const char* pTopicName, int32_t vgId, int64_
if (pVg == NULL) {
tscError("consumer:0x%" PRIx64 " invalid vgroup id:%d", tmq->consumerId, vgId);
taosWUnLockLatch(&tmq->lock);
return TSDB_CODE_INVALID_PARA;
}
@ -2711,12 +2791,14 @@ int32_t tmq_offset_seek(tmq_t* tmq, const char* pTopicName, int32_t vgId, int64_
int32_t type = pOffsetInfo->currentOffset.type;
if (type != TMQ_OFFSET__LOG && !OFFSET_IS_RESET_OFFSET(type)) {
tscError("consumer:0x%" PRIx64 " offset type:%d not wal version, seek not allowed", tmq->consumerId, type);
taosWUnLockLatch(&tmq->lock);
return TSDB_CODE_INVALID_PARA;
}
if (type == TMQ_OFFSET__LOG && (offset < pOffsetInfo->walVerBegin || offset > pOffsetInfo->walVerEnd)) {
tscError("consumer:0x%" PRIx64 " invalid seek params, offset:%" PRId64 ", valid range:[%" PRId64 ", %" PRId64 "]",
tmq->consumerId, offset, pOffsetInfo->walVerBegin, pOffsetInfo->walVerEnd);
taosWUnLockLatch(&tmq->lock);
return TSDB_CODE_INVALID_PARA;
}
@ -2730,7 +2812,8 @@ int32_t tmq_offset_seek(tmq_t* tmq, const char* pTopicName, int32_t vgId, int64_
SMqRspObj rspObj = {.resType = RES_TYPE__TMQ, .vgId = pVg->vgId};
tstrncpy(rspObj.topic, tname, tListLen(rspObj.topic));
tscDebug("consumer:0x%" PRIx64 " seek to %" PRId64 " on vgId:%d", tmq->consumerId, offset, pVg->vgId);
tscInfo("consumer:0x%" PRIx64 " seek to %" PRId64 " on vgId:%d", tmq->consumerId, offset, pVg->vgId);
taosWUnLockLatch(&tmq->lock);
SSyncCommitInfo* pInfo = taosMemoryMalloc(sizeof(SSyncCommitInfo));
if (pInfo == NULL) {

View File

@ -224,6 +224,8 @@ TEST(testCase, smlParseCols_Error_Test) {
"st,tt=aa c 1=2 1626006833639000000,",
//field value double quote,slash
"st,tt=aa c=\"a\"a\" 1626006833639000000,",
"escape_test,tag1=\"tag1_value\",tag2=\"tag2_value\" co l0=\"col0_value\",col1=\"col1_value\" 1680918783010000000",
"escape_test,tag1=\"tag1_value\",tag2=\"tag2_value\" col0=\"co\"l\"0_value\",col1=\"col1_value\" 1680918783010000000"
};
SSmlHandle *info = smlBuildSmlInfo(NULL);

View File

@ -160,9 +160,9 @@ static const SSysDbTableSchema streamSchema[] = {
static const SSysDbTableSchema streamTaskSchema[] = {
{.name = "stream_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "task_id", .bytes = 8, .type = TSDB_DATA_TYPE_INT, .sysInfo = false},
{.name = "task_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false},
{.name = "node_type", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "node_id", .bytes = 8, .type = TSDB_DATA_TYPE_INT, .sysInfo = false},
{.name = "node_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false},
{.name = "level", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "status", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
};
@ -291,6 +291,8 @@ static const SSysDbTableSchema subscriptionSchema[] = {
{.name = "consumer_group", .bytes = TSDB_CGROUP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
{.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false},
{.name = "consumer_id", .bytes = 32, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
{.name = "offset", .bytes = TSDB_OFFSET_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
{.name = "rows", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false},
};
static const SSysDbTableSchema vnodesSchema[] = {
@ -359,6 +361,7 @@ static const SSysDbTableSchema consumerSchema[] = {
{.name = "up_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
{.name = "subscribe_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
{.name = "rebalance_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
{.name = "parameters", .bytes = 64 + TSDB_OFFSET_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
};
static const SSysDbTableSchema offsetSchema[] = {
@ -381,6 +384,7 @@ static const SSysDbTableSchema querySchema[] = {
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
{.name = "exec_usec", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false},
{.name = "stable_query", .bytes = 1, .type = TSDB_DATA_TYPE_BOOL, .sysInfo = false},
{.name = "sub_query", .bytes = 1, .type = TSDB_DATA_TYPE_BOOL, .sysInfo = false},
{.name = "sub_num", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false},
{.name = "sub_status", .bytes = TSDB_SHOW_SUBQUERY_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "sql", .bytes = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},

View File

@ -47,6 +47,17 @@ int32_t colDataGetLength(const SColumnInfoData* pColumnInfoData, int32_t numOfRo
}
}
int32_t colDataGetRowLength(const SColumnInfoData* pColumnInfoData, int32_t rowIdx) {
if (colDataIsNull_s(pColumnInfoData, rowIdx)) return 0;
if (!IS_VAR_DATA_TYPE(pColumnInfoData->info.type)) return pColumnInfoData->info.bytes;
if (pColumnInfoData->info.type == TSDB_DATA_TYPE_JSON)
return getJsonValueLen(colDataGetData(pColumnInfoData, rowIdx));
else
return varDataTLen(colDataGetData(pColumnInfoData, rowIdx));
}
int32_t colDataGetFullLength(const SColumnInfoData* pColumnInfoData, int32_t numOfRows) {
if (IS_VAR_DATA_TYPE(pColumnInfoData->info.type)) {
return pColumnInfoData->varmeta.length + sizeof(int32_t) * numOfRows;
@ -791,8 +802,8 @@ size_t blockDataGetRowSize(SSDataBlock* pBlock) {
* @return
*/
size_t blockDataGetSerialMetaSize(uint32_t numOfCols) {
// | version | total length | total rows | total columns | flag seg| block group id | column schema | each column
// length |
// | version | total length | total rows | total columns | flag seg| block group id | column schema
// | each column length |
return sizeof(int32_t) + sizeof(int32_t) + sizeof(int32_t) + sizeof(int32_t) + sizeof(int32_t) + sizeof(uint64_t) +
numOfCols * (sizeof(int8_t) + sizeof(int32_t)) + numOfCols * sizeof(int32_t);
}
@ -2483,19 +2494,31 @@ _end:
}
char* buildCtbNameByGroupId(const char* stbFullName, uint64_t groupId) {
if (stbFullName[0] == 0) {
char* pBuf = taosMemoryCalloc(1, TSDB_TABLE_NAME_LEN + 1);
if (!pBuf) {
return NULL;
}
int32_t code = buildCtbNameByGroupIdImpl(stbFullName, groupId, pBuf);
if (code != TSDB_CODE_SUCCESS) {
taosMemoryFree(pBuf);
return NULL;
}
return pBuf;
}
int32_t buildCtbNameByGroupIdImpl(const char* stbFullName, uint64_t groupId, char* cname) {
if (stbFullName[0] == 0) {
return TSDB_CODE_FAILED;
}
SArray* tags = taosArrayInit(0, sizeof(SSmlKv));
if (tags == NULL) {
return NULL;
return TSDB_CODE_FAILED;
}
void* cname = taosMemoryCalloc(1, TSDB_TABLE_NAME_LEN + 1);
if (cname == NULL) {
taosArrayDestroy(tags);
return NULL;
return TSDB_CODE_FAILED;
}
SSmlKv pTag = {.key = "group_id",
@ -2517,9 +2540,9 @@ char* buildCtbNameByGroupId(const char* stbFullName, uint64_t groupId) {
taosArrayDestroy(tags);
if ((rname.ctbShortName && rname.ctbShortName[0]) == 0) {
return NULL;
return TSDB_CODE_FAILED;
}
return rname.ctbShortName;
return TSDB_CODE_SUCCESS;
}
int32_t blockEncode(const SSDataBlock* pBlock, char* data, int32_t numOfCols) {

View File

@ -62,6 +62,7 @@ int32_t tsNumOfQnodeFetchThreads = 1;
int32_t tsNumOfSnodeStreamThreads = 4;
int32_t tsNumOfSnodeWriteThreads = 1;
int32_t tsMaxStreamBackendCache = 128; // M
int32_t tsPQSortMemThreshold = 16; // M
// sync raft
int32_t tsElectInterval = 25 * 1000;
@ -75,6 +76,7 @@ int64_t tsVndCommitMaxIntervalMs = 600 * 1000;
int64_t tsMndSdbWriteDelta = 200;
int64_t tsMndLogRetention = 2000;
int8_t tsGrant = 1;
bool tsMndSkipGrant = false;
// monitor
bool tsEnableMonitor = true;
@ -111,6 +113,7 @@ int32_t tsQueryRspPolicy = 0;
int64_t tsQueryMaxConcurrentTables = 200; // unit is TSDB_TABLE_NUM_UNIT
bool tsEnableQueryHb = false;
bool tsEnableScience = false; // on taos-cli show float and doulbe with scientific notation if true
bool tsTtlChangeOnWrite = false; // ttl delete time changes on last write if true
int32_t tsQuerySmaOptimize = 0;
int32_t tsQueryRsmaTolerance = 1000; // the tolerance time (ms) to judge from which level to query rsma data.
bool tsQueryPlannerTrace = false;
@ -495,6 +498,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
if (cfgAddInt64(pCfg, "mndSdbWriteDelta", tsMndSdbWriteDelta, 20, 10000, 0) != 0) return -1;
if (cfgAddInt64(pCfg, "mndLogRetention", tsMndLogRetention, 500, 10000, 0) != 0) return -1;
if (cfgAddBool(pCfg, "skipGrant", tsMndSkipGrant, 0) != 0) return -1;
if (cfgAddBool(pCfg, "monitor", tsEnableMonitor, 0) != 0) return -1;
if (cfgAddInt32(pCfg, "monitorInterval", tsMonitorInterval, 1, 200000, 0) != 0) return -1;
@ -509,10 +513,13 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
if (cfgAddString(pCfg, "telemetryServer", tsTelemServer, 0) != 0) return -1;
if (cfgAddInt32(pCfg, "telemetryPort", tsTelemPort, 1, 65056, 0) != 0) return -1;
if (cfgAddInt32(pCfg, "tmqMaxTopicNum", tmqMaxTopicNum, 1, 10000, 1) != 0) return -1;
if (cfgAddInt32(pCfg, "transPullupInterval", tsTransPullupInterval, 1, 10000, 1) != 0) return -1;
if (cfgAddInt32(pCfg, "mqRebalanceInterval", tsMqRebalanceInterval, 1, 10000, 1) != 0) return -1;
if (cfgAddInt32(pCfg, "ttlUnit", tsTtlUnit, 1, 86400 * 365, 1) != 0) return -1;
if (cfgAddInt32(pCfg, "ttlPushInterval", tsTtlPushInterval, 1, 100000, 1) != 0) return -1;
if (cfgAddBool(pCfg, "ttlChangeOnWrite", tsTtlChangeOnWrite, 0) != 0) return -1;
if (cfgAddInt32(pCfg, "uptimeInterval", tsUptimeInterval, 1, 100000, 1) != 0) return -1;
if (cfgAddInt32(pCfg, "queryRsmaTolerance", tsQueryRsmaTolerance, 0, 900000, 0) != 0) return -1;
@ -531,6 +538,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
if (cfgAddBool(pCfg, "filterScalarMode", tsFilterScalarMode, 0) != 0) return -1;
if (cfgAddInt32(pCfg, "maxStreamBackendCache", tsMaxStreamBackendCache, 16, 1024, 0) != 0) return -1;
if (cfgAddInt32(pCfg, "pqSortMemThreshold", tsPQSortMemThreshold, 1, 10240, 0) != 0) return -1;
GRANT_CFG_ADD;
return 0;
@ -779,7 +787,6 @@ static int32_t taosSetClientCfg(SConfig *pCfg) {
tstrncpy(tsSmlTagName, cfgGetItem(pCfg, "smlTagName")->str, TSDB_COL_NAME_LEN);
// tsSmlDataFormat = cfgGetItem(pCfg, "smlDataFormat")->bval;
tmqMaxTopicNum = cfgGetItem(pCfg, "tmqMaxTopicNum")->i32;
// tsSmlBatchSize = cfgGetItem(pCfg, "smlBatchSize")->i32;
tsMaxInsertBatchRows = cfgGetItem(pCfg, "maxInsertBatchRows")->i32;
@ -874,10 +881,13 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
tsEnableTelem = cfgGetItem(pCfg, "telemetryReporting")->bval;
tsEnableCrashReport = cfgGetItem(pCfg, "crashReporting")->bval;
tsTtlChangeOnWrite = cfgGetItem(pCfg, "ttlChangeOnWrite")->bval;
tsTelemInterval = cfgGetItem(pCfg, "telemetryInterval")->i32;
tstrncpy(tsTelemServer, cfgGetItem(pCfg, "telemetryServer")->str, TSDB_FQDN_LEN);
tsTelemPort = (uint16_t)cfgGetItem(pCfg, "telemetryPort")->i32;
tmqMaxTopicNum= cfgGetItem(pCfg, "tmqMaxTopicNum")->i32;
tsTransPullupInterval = cfgGetItem(pCfg, "transPullupInterval")->i32;
tsMqRebalanceInterval = cfgGetItem(pCfg, "mqRebalanceInterval")->i32;
tsTtlUnit = cfgGetItem(pCfg, "ttlUnit")->i32;
@ -895,6 +905,7 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
tsMndSdbWriteDelta = cfgGetItem(pCfg, "mndSdbWriteDelta")->i64;
tsMndLogRetention = cfgGetItem(pCfg, "mndLogRetention")->i64;
tsMndSkipGrant = cfgGetItem(pCfg, "skipGrant")->bval;
tsStartUdfd = cfgGetItem(pCfg, "udf")->bval;
tstrncpy(tsUdfdResFuncs, cfgGetItem(pCfg, "udfdResFuncs")->str, sizeof(tsUdfdResFuncs));
@ -911,6 +922,7 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
tsFilterScalarMode = cfgGetItem(pCfg, "filterScalarMode")->bval;
tsMaxStreamBackendCache = cfgGetItem(pCfg, "maxStreamBackendCache")->i32;
tsPQSortMemThreshold = cfgGetItem(pCfg, "pqSortMemThreshold")->i32;
GRANT_CFG_GET;
return 0;
@ -978,6 +990,8 @@ int32_t taosApplyLocalCfg(SConfig *pCfg, char *name) {
taosSetCoreDump(enableCore);
} else if (strcasecmp("enableQueryHb", name) == 0) {
tsEnableQueryHb = cfgGetItem(pCfg, "enableQueryHb")->bval;
} else if (strcasecmp("ttlChangeOnWrite", name) == 0) {
tsTtlChangeOnWrite = cfgGetItem(pCfg, "ttlChangeOnWrite")->bval;
}
break;
}
@ -1199,8 +1213,6 @@ int32_t taosApplyLocalCfg(SConfig *pCfg, char *name) {
cfgSetItem(pCfg, "secondEp", tsSecond, pSecondpItem->stype);
} else if (strcasecmp("smlChildTableName", name) == 0) {
tstrncpy(tsSmlChildTableName, cfgGetItem(pCfg, "smlChildTableName")->str, TSDB_TABLE_NAME_LEN);
} else if (strcasecmp("tmqMaxTopicNum", name) == 0) {
tmqMaxTopicNum = cfgGetItem(pCfg, "tmqMaxTopicNum")->i32;
} else if (strcasecmp("smlTagName", name) == 0) {
tstrncpy(tsSmlTagName, cfgGetItem(pCfg, "smlTagName")->str, TSDB_COL_NAME_LEN);
// } else if (strcasecmp("smlDataFormat", name) == 0) {

View File

@ -30,6 +30,9 @@
#include "tlog.h"
static int32_t tDecodeSVAlterTbReqCommon(SDecoder *pDecoder, SVAlterTbReq *pReq);
static int32_t tDecodeSBatchDeleteReqCommon(SDecoder *pDecoder, SBatchDeleteReq *pReq);
int32_t tInitSubmitMsgIter(const SSubmitReq *pMsg, SSubmitMsgIter *pIter) {
if (pMsg == NULL) {
terrno = TSDB_CODE_TDB_SUBMIT_MSG_MSSED_UP;
@ -224,6 +227,7 @@ static int32_t tSerializeSClientHbReq(SEncoder *pEncoder, const SClientHbReq *pR
if (tEncodeI64(pEncoder, desc->stime) < 0) return -1;
if (tEncodeI64(pEncoder, desc->reqRid) < 0) return -1;
if (tEncodeI8(pEncoder, desc->stableQuery) < 0) return -1;
if (tEncodeI8(pEncoder, desc->isSubQuery) < 0) return -1;
if (tEncodeCStr(pEncoder, desc->fqdn) < 0) return -1;
if (tEncodeI32(pEncoder, desc->subPlanNum) < 0) return -1;
@ -291,6 +295,7 @@ static int32_t tDeserializeSClientHbReq(SDecoder *pDecoder, SClientHbReq *pReq)
if (tDecodeI64(pDecoder, &desc.stime) < 0) return -1;
if (tDecodeI64(pDecoder, &desc.reqRid) < 0) return -1;
if (tDecodeI8(pDecoder, (int8_t *)&desc.stableQuery) < 0) return -1;
if (tDecodeI8(pDecoder, (int8_t *)&desc.isSubQuery) < 0) return -1;
if (tDecodeCStrTo(pDecoder, desc.fqdn) < 0) return -1;
if (tDecodeI32(pDecoder, &desc.subPlanNum) < 0) return -1;
@ -1518,6 +1523,9 @@ int32_t tSerializeSGetUserAuthRspImpl(SEncoder *pEncoder, SGetUserAuthRsp *pRsp)
useDb = taosHashIterate(pRsp->useDbs, useDb);
}
// since 3.0.7.0
if (tEncodeI32(pEncoder, pRsp->passVer) < 0) return -1;
return 0;
}
@ -1639,6 +1647,12 @@ int32_t tDeserializeSGetUserAuthRspImpl(SDecoder *pDecoder, SGetUserAuthRsp *pRs
taosHashPut(pRsp->useDbs, key, strlen(key), &ref, sizeof(ref));
taosMemoryFree(key);
}
// since 3.0.7.0
if (!tDecodeIsEnd(pDecoder)) {
if (tDecodeI32(pDecoder, &pRsp->passVer) < 0) return -1;
} else {
pRsp->passVer = 0;
}
}
return 0;
@ -1723,7 +1737,7 @@ int32_t tDeserializeSDropDnodeReq(void *buf, int32_t bufLen, SDropDnodeReq *pReq
} else {
pReq->unsafe = false;
}
tEndDecode(&decoder);
tDecoderClear(&decoder);
@ -3024,59 +3038,6 @@ void tFreeSUserAuthBatchRsp(SUserAuthBatchRsp *pRsp) {
taosArrayDestroy(pRsp->pArray);
}
int32_t tSerializeSUserPassBatchRsp(void *buf, int32_t bufLen, SUserPassBatchRsp *pRsp) {
SEncoder encoder = {0};
tEncoderInit(&encoder, buf, bufLen);
if (tStartEncode(&encoder) < 0) return -1;
int32_t numOfBatch = taosArrayGetSize(pRsp->pArray);
if (tEncodeI32(&encoder, numOfBatch) < 0) return -1;
for (int32_t i = 0; i < numOfBatch; ++i) {
SGetUserPassRsp *pUserPassRsp = taosArrayGet(pRsp->pArray, i);
if (tEncodeCStr(&encoder, pUserPassRsp->user) < 0) return -1;
if (tEncodeI32(&encoder, pUserPassRsp->version) < 0) return -1;
}
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
tEncoderClear(&encoder);
return tlen;
}
int32_t tDeserializeSUserPassBatchRsp(void *buf, int32_t bufLen, SUserPassBatchRsp *pRsp) {
SDecoder decoder = {0};
tDecoderInit(&decoder, buf, bufLen);
if (tStartDecode(&decoder) < 0) return -1;
int32_t numOfBatch = taosArrayGetSize(pRsp->pArray);
if (tDecodeI32(&decoder, &numOfBatch) < 0) return -1;
pRsp->pArray = taosArrayInit(numOfBatch, sizeof(SGetUserPassRsp));
if (pRsp->pArray == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}
for (int32_t i = 0; i < numOfBatch; ++i) {
SGetUserPassRsp rsp = {0};
if (tDecodeCStrTo(&decoder, rsp.user) < 0) return -1;
if (tDecodeI32(&decoder, &rsp.version) < 0) return -1;
taosArrayPush(pRsp->pArray, &rsp);
}
tEndDecode(&decoder);
tDecoderClear(&decoder);
return 0;
}
void tFreeSUserPassBatchRsp(SUserPassBatchRsp *pRsp) {
if(pRsp) {
taosArrayDestroy(pRsp->pArray);
}
}
int32_t tSerializeSDbCfgReq(void *buf, int32_t bufLen, SDbCfgReq *pReq) {
SEncoder encoder = {0};
tEncoderInit(&encoder, buf, bufLen);
@ -3159,7 +3120,7 @@ int32_t tSerializeSVDropTtlTableReq(void *buf, int32_t bufLen, SVDropTtlTableReq
tEncoderInit(&encoder, buf, bufLen);
if (tStartEncode(&encoder) < 0) return -1;
if (tEncodeI32(&encoder, pReq->timestamp) < 0) return -1;
if (tEncodeI32(&encoder, pReq->timestampSec) < 0) return -1;
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
@ -3172,7 +3133,7 @@ int32_t tDeserializeSVDropTtlTableReq(void *buf, int32_t bufLen, SVDropTtlTableR
tDecoderInit(&decoder, buf, bufLen);
if (tStartDecode(&decoder) < 0) return -1;
if (tDecodeI32(&decoder, &pReq->timestamp) < 0) return -1;
if (tDecodeI32(&decoder, &pReq->timestampSec) < 0) return -1;
tEndDecode(&decoder);
tDecoderClear(&decoder);
@ -3740,8 +3701,8 @@ int32_t tSerializeSSTbHbRsp(void *buf, int32_t bufLen, SSTbHbRsp *pRsp) {
if (tEncodeI32(&encoder, pIndexRsp->indexSize) < 0) return -1;
int32_t num = taosArrayGetSize(pIndexRsp->pIndex);
if (tEncodeI32(&encoder, num) < 0) return -1;
for (int32_t i = 0; i < num; ++i) {
STableIndexInfo *pInfo = (STableIndexInfo *)taosArrayGet(pIndexRsp->pIndex, i);
for (int32_t j = 0; j < num; ++j) {
STableIndexInfo *pInfo = (STableIndexInfo *)taosArrayGet(pIndexRsp->pIndex, j);
if (tSerializeSTableIndexInfo(&encoder, pInfo) < 0) return -1;
}
}
@ -3807,7 +3768,7 @@ int32_t tDeserializeSSTbHbRsp(void *buf, int32_t bufLen, SSTbHbRsp *pRsp) {
tableIndexRsp.pIndex = taosArrayInit(num, sizeof(STableIndexInfo));
if (NULL == tableIndexRsp.pIndex) return -1;
STableIndexInfo info;
for (int32_t i = 0; i < num; ++i) {
for (int32_t j = 0; j < num; ++j) {
if (tDeserializeSTableIndexInfo(&decoder, &info) < 0) return -1;
if (NULL == taosArrayPush(tableIndexRsp.pIndex, &info)) {
taosMemoryFree(info.expr);
@ -4154,6 +4115,7 @@ int32_t tSerializeSConnectRsp(void *buf, int32_t bufLen, SConnectRsp *pRsp) {
if (tEncodeCStr(&encoder, pRsp->sVer) < 0) return -1;
if (tEncodeCStr(&encoder, pRsp->sDetailVer) < 0) return -1;
if (tEncodeI32(&encoder, pRsp->passVer) < 0) return -1;
if (tEncodeI32(&encoder, pRsp->authVer) < 0) return -1;
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
@ -4183,6 +4145,12 @@ int32_t tDeserializeSConnectRsp(void *buf, int32_t bufLen, SConnectRsp *pRsp) {
} else {
pRsp->passVer = 0;
}
// since 3.0.7.0
if (!tDecodeIsEnd(&decoder)) {
if (tDecodeI32(&decoder, &pRsp->authVer) < 0) return -1;
} else {
pRsp->authVer = 0;
}
tEndDecode(&decoder);
@ -4669,7 +4637,7 @@ int32_t tDeserializeSAlterVnodeReplicaReq(void *buf, int32_t bufLen, SAlterVnode
if (tDecodeSReplica(&decoder, pReplica) < 0) return -1;
}
}
tEndDecode(&decoder);
tDecoderClear(&decoder);
return 0;
@ -5338,6 +5306,15 @@ int32_t tDeserializeSMqAskEpReq(void *buf, int32_t bufLen, SMqAskEpReq *pReq) {
return 0;
}
int32_t tDeatroySMqHbReq(SMqHbReq* pReq){
for(int i = 0; i < taosArrayGetSize(pReq->topics); i++){
TopicOffsetRows* vgs = taosArrayGet(pReq->topics, i);
if(vgs) taosArrayDestroy(vgs->offsetRows);
}
taosArrayDestroy(pReq->topics);
return 0;
}
int32_t tSerializeSMqHbReq(void *buf, int32_t bufLen, SMqHbReq *pReq) {
SEncoder encoder = {0};
tEncoderInit(&encoder, buf, bufLen);
@ -5346,6 +5323,21 @@ int32_t tSerializeSMqHbReq(void *buf, int32_t bufLen, SMqHbReq *pReq) {
if (tEncodeI64(&encoder, pReq->consumerId) < 0) return -1;
if (tEncodeI32(&encoder, pReq->epoch) < 0) return -1;
int32_t sz = taosArrayGetSize(pReq->topics);
if (tEncodeI32(&encoder, sz) < 0) return -1;
for (int32_t i = 0; i < sz; ++i) {
TopicOffsetRows* vgs = (TopicOffsetRows*)taosArrayGet(pReq->topics, i);
if (tEncodeCStr(&encoder, vgs->topicName) < 0) return -1;
int32_t szVgs = taosArrayGetSize(vgs->offsetRows);
if (tEncodeI32(&encoder, szVgs) < 0) return -1;
for (int32_t j = 0; j < szVgs; ++j) {
OffsetRows *offRows = taosArrayGet(vgs->offsetRows, j);
if (tEncodeI32(&encoder, offRows->vgId) < 0) return -1;
if (tEncodeI64(&encoder, offRows->rows) < 0) return -1;
if (tEncodeSTqOffsetVal(&encoder, &offRows->offset) < 0) return -1;
}
}
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
@ -5362,7 +5354,28 @@ int32_t tDeserializeSMqHbReq(void *buf, int32_t bufLen, SMqHbReq *pReq) {
if (tDecodeI64(&decoder, &pReq->consumerId) < 0) return -1;
if (tDecodeI32(&decoder, &pReq->epoch) < 0) return -1;
int32_t sz = 0;
if (tDecodeI32(&decoder, &sz) < 0) return -1;
if(sz > 0){
pReq->topics = taosArrayInit(sz, sizeof(TopicOffsetRows));
if (NULL == pReq->topics) return -1;
for (int32_t i = 0; i < sz; ++i) {
TopicOffsetRows* data = taosArrayReserve(pReq->topics, 1);
tDecodeCStrTo(&decoder, data->topicName);
int32_t szVgs = 0;
if (tDecodeI32(&decoder, &szVgs) < 0) return -1;
if(szVgs > 0){
data->offsetRows = taosArrayInit(szVgs, sizeof(OffsetRows));
if (NULL == data->offsetRows) return -1;
for (int32_t j= 0; j < szVgs; ++j) {
OffsetRows* offRows = taosArrayReserve(data->offsetRows, 1);
if (tDecodeI32(&decoder, &offRows->vgId) < 0) return -1;
if (tDecodeI64(&decoder, &offRows->rows) < 0) return -1;
if (tDecodeSTqOffsetVal(&decoder, &offRows->offset) < 0) return -1;
}
}
}
}
tEndDecode(&decoder);
tDecoderClear(&decoder);
@ -6122,6 +6135,7 @@ int32_t tSerializeSCMCreateStreamReq(void *buf, int32_t bufLen, const SCMCreateS
}
if (tEncodeI64(&encoder, pReq->deleteMark) < 0) return -1;
if (tEncodeI8(&encoder, pReq->igUpdate) < 0) return -1;
if (tEncodeI64(&encoder, pReq->lastTs) < 0) return -1;
tEndEncode(&encoder);
@ -6207,6 +6221,7 @@ int32_t tDeserializeSCMCreateStreamReq(void *buf, int32_t bufLen, SCMCreateStrea
if (tDecodeI64(&decoder, &pReq->deleteMark) < 0) return -1;
if (tDecodeI8(&decoder, &pReq->igUpdate) < 0) return -1;
if (tDecodeI64(&decoder, &pReq->lastTs) < 0) return -1;
tEndDecode(&decoder);
@ -6273,6 +6288,9 @@ int32_t tDeserializeSMRecoverStreamReq(void *buf, int32_t bufLen, SMRecoverStrea
}
void tFreeSCMCreateStreamReq(SCMCreateStreamReq *pReq) {
if (NULL == pReq) {
return;
}
taosArrayDestroy(pReq->pTags);
taosMemoryFreeClear(pReq->sql);
taosMemoryFreeClear(pReq->ast);
@ -6357,7 +6375,7 @@ int tEncodeSVCreateTbReq(SEncoder *pCoder, const SVCreateTbReq *pReq) {
if (tEncodeI32v(pCoder, pReq->flags) < 0) return -1;
if (tEncodeCStr(pCoder, pReq->name) < 0) return -1;
if (tEncodeI64(pCoder, pReq->uid) < 0) return -1;
if (tEncodeI64(pCoder, pReq->ctime) < 0) return -1;
if (tEncodeI64(pCoder, pReq->btime) < 0) return -1;
if (tEncodeI32(pCoder, pReq->ttl) < 0) return -1;
if (tEncodeI8(pCoder, pReq->type) < 0) return -1;
if (tEncodeI32(pCoder, pReq->commentLen) < 0) return -1;
@ -6392,7 +6410,7 @@ int tDecodeSVCreateTbReq(SDecoder *pCoder, SVCreateTbReq *pReq) {
if (tDecodeI32v(pCoder, &pReq->flags) < 0) return -1;
if (tDecodeCStr(pCoder, &pReq->name) < 0) return -1;
if (tDecodeI64(pCoder, &pReq->uid) < 0) return -1;
if (tDecodeI64(pCoder, &pReq->ctime) < 0) return -1;
if (tDecodeI64(pCoder, &pReq->btime) < 0) return -1;
if (tDecodeI32(pCoder, &pReq->ttl) < 0) return -1;
if (tDecodeI8(pCoder, &pReq->type) < 0) return -1;
if (tDecodeI32(pCoder, &pReq->commentLen) < 0) return -1;
@ -6857,14 +6875,13 @@ int32_t tEncodeSVAlterTbReq(SEncoder *pEncoder, const SVAlterTbReq *pReq) {
default:
break;
}
if (tEncodeI64(pEncoder, pReq->ctimeMs) < 0) return -1;
tEndEncode(pEncoder);
return 0;
}
int32_t tDecodeSVAlterTbReq(SDecoder *pDecoder, SVAlterTbReq *pReq) {
if (tStartDecode(pDecoder) < 0) return -1;
static int32_t tDecodeSVAlterTbReqCommon(SDecoder *pDecoder, SVAlterTbReq *pReq) {
if (tDecodeCStr(pDecoder, &pReq->tbName) < 0) return -1;
if (tDecodeI8(pDecoder, &pReq->action) < 0) return -1;
if (tDecodeI32(pDecoder, &pReq->colId) < 0) return -1;
@ -6908,6 +6925,31 @@ int32_t tDecodeSVAlterTbReq(SDecoder *pDecoder, SVAlterTbReq *pReq) {
default:
break;
}
return 0;
}
int32_t tDecodeSVAlterTbReq(SDecoder *pDecoder, SVAlterTbReq *pReq) {
if (tStartDecode(pDecoder) < 0) return -1;
if (tDecodeSVAlterTbReqCommon(pDecoder, pReq) < 0) return -1;
pReq->ctimeMs = 0;
if (!tDecodeIsEnd(pDecoder)) {
if (tDecodeI64(pDecoder, &pReq->ctimeMs) < 0) return -1;
}
tEndDecode(pDecoder);
return 0;
}
int32_t tDecodeSVAlterTbReqSetCtime(SDecoder* pDecoder, SVAlterTbReq* pReq, int64_t ctimeMs) {
if (tStartDecode(pDecoder) < 0) return -1;
if (tDecodeSVAlterTbReqCommon(pDecoder, pReq) < 0) return -1;
pReq->ctimeMs = 0;
if (!tDecodeIsEnd(pDecoder)) {
*(int64_t *)(pDecoder->data + pDecoder->pos) = ctimeMs;
if (tDecodeI64(pDecoder, &pReq->ctimeMs) < 0) return -1;
}
tEndDecode(pDecoder);
return 0;
@ -7086,15 +7128,15 @@ int32_t tDecodeSTqOffsetVal(SDecoder *pDecoder, STqOffsetVal *pOffsetVal) {
int32_t tFormatOffset(char *buf, int32_t maxLen, const STqOffsetVal *pVal) {
if (pVal->type == TMQ_OFFSET__RESET_NONE) {
snprintf(buf, maxLen, "offset(reset to none)");
} else if (pVal->type == TMQ_OFFSET__RESET_EARLIEAST) {
snprintf(buf, maxLen, "offset(reset to earlieast)");
snprintf(buf, maxLen, "none");
} else if (pVal->type == TMQ_OFFSET__RESET_EARLIEST) {
snprintf(buf, maxLen, "earliest");
} else if (pVal->type == TMQ_OFFSET__RESET_LATEST) {
snprintf(buf, maxLen, "offset(reset to latest)");
snprintf(buf, maxLen, "latest");
} else if (pVal->type == TMQ_OFFSET__LOG) {
snprintf(buf, maxLen, "offset(log) ver:%" PRId64, pVal->version);
snprintf(buf, maxLen, "wal:%" PRId64, pVal->version);
} else if (pVal->type == TMQ_OFFSET__SNAPSHOT_DATA || pVal->type == TMQ_OFFSET__SNAPSHOT_META) {
snprintf(buf, maxLen, "offset(snapshot) uid:%" PRId64 " ts:%" PRId64, pVal->uid, pVal->ts);
snprintf(buf, maxLen, "tsdb:%" PRId64 "|%" PRId64, pVal->uid, pVal->ts);
} else {
return TSDB_CODE_INVALID_PARA;
}
@ -7112,7 +7154,7 @@ bool tOffsetEqual(const STqOffsetVal *pLeft, const STqOffsetVal *pRight) {
return pLeft->uid == pRight->uid;
} else {
ASSERT(0);
/*ASSERT(pLeft->type == TMQ_OFFSET__RESET_NONE || pLeft->type == TMQ_OFFSET__RESET_EARLIEAST ||*/
/*ASSERT(pLeft->type == TMQ_OFFSET__RESET_NONE || pLeft->type == TMQ_OFFSET__RESET_EARLIEST ||*/
/*pLeft->type == TMQ_OFFSET__RESET_LATEST);*/
/*return true;*/
}
@ -7186,6 +7228,7 @@ int32_t tEncodeDeleteRes(SEncoder *pCoder, const SDeleteRes *pRes) {
if (tEncodeCStr(pCoder, pRes->tableFName) < 0) return -1;
if (tEncodeCStr(pCoder, pRes->tsColName) < 0) return -1;
if (tEncodeI64(pCoder, pRes->ctimeMs) < 0) return -1;
return 0;
}
@ -7205,6 +7248,11 @@ int32_t tDecodeDeleteRes(SDecoder *pCoder, SDeleteRes *pRes) {
if (tDecodeCStrTo(pCoder, pRes->tableFName) < 0) return -1;
if (tDecodeCStrTo(pCoder, pRes->tsColName) < 0) return -1;
pRes->ctimeMs = 0;
if (!tDecodeIsEnd(pCoder)) {
if (tDecodeI64(pCoder, &pRes->ctimeMs) < 0) return -1;
}
return 0;
}
@ -7428,10 +7476,11 @@ int32_t tEncodeSBatchDeleteReq(SEncoder *pEncoder, const SBatchDeleteReq *pReq)
SSingleDeleteReq *pOneReq = taosArrayGet(pReq->deleteReqs, i);
if (tEncodeSSingleDeleteReq(pEncoder, pOneReq) < 0) return -1;
}
if (tEncodeI64(pEncoder, pReq->ctimeMs) < 0) return -1;
return 0;
}
int32_t tDecodeSBatchDeleteReq(SDecoder *pDecoder, SBatchDeleteReq *pReq) {
static int32_t tDecodeSBatchDeleteReqCommon(SDecoder *pDecoder, SBatchDeleteReq *pReq) {
if (tDecodeI64(pDecoder, &pReq->suid) < 0) return -1;
int32_t sz;
if (tDecodeI32(pDecoder, &sz) < 0) return -1;
@ -7445,6 +7494,27 @@ int32_t tDecodeSBatchDeleteReq(SDecoder *pDecoder, SBatchDeleteReq *pReq) {
return 0;
}
int32_t tDecodeSBatchDeleteReq(SDecoder *pDecoder, SBatchDeleteReq *pReq) {
if (tDecodeSBatchDeleteReqCommon(pDecoder, pReq)) return -1;
pReq->ctimeMs = 0;
if (!tDecodeIsEnd(pDecoder)) {
if (tDecodeI64(pDecoder, &pReq->ctimeMs) < 0) return -1;
}
return 0;
}
int32_t tDecodeSBatchDeleteReqSetCtime(SDecoder *pDecoder, SBatchDeleteReq *pReq, int64_t ctimeMs) {
if (tDecodeSBatchDeleteReqCommon(pDecoder, pReq)) return -1;
pReq->ctimeMs = 0;
if (!tDecodeIsEnd(pDecoder)) {
*(int64_t *)(pDecoder->data + pDecoder->pos) = ctimeMs;
if (tDecodeI64(pDecoder, &pReq->ctimeMs) < 0) return -1;
}
return 0;
}
static int32_t tEncodeSSubmitTbData(SEncoder *pCoder, const SSubmitTbData *pSubmitTbData) {
if (tStartEncode(pCoder) < 0) return -1;
@ -7479,6 +7549,7 @@ static int32_t tEncodeSSubmitTbData(SEncoder *pCoder, const SSubmitTbData *pSubm
pCoder->pos += rows[iRow]->len;
}
}
if (tEncodeI64(pCoder, pSubmitTbData->ctimeMs) < 0) return -1;
tEndEncode(pCoder);
return 0;
@ -7559,6 +7630,14 @@ static int32_t tDecodeSSubmitTbData(SDecoder *pCoder, SSubmitTbData *pSubmitTbDa
}
}
pSubmitTbData->ctimeMs = 0;
if (!tDecodeIsEnd(pCoder)) {
if (tDecodeI64(pCoder, &pSubmitTbData->ctimeMs) < 0) {
code = TSDB_CODE_INVALID_MSG;
goto _exit;
}
}
tEndDecode(pCoder);
_exit:

View File

@ -969,7 +969,7 @@ void taosFormatUtcTime(char* buf, int32_t bufLen, int64_t t, int32_t precision)
default:
fractionLen = 0;
ASSERT(false);
return;
}
if (taosLocalTime(&quot, &ptm, buf) == NULL) {

View File

@ -17,7 +17,7 @@
#include "ttypes.h"
#include "tcompression.h"
const int32_t TYPE_BYTES[17] = {
const int32_t TYPE_BYTES[21] = {
-1, // TSDB_DATA_TYPE_NULL
CHAR_BYTES, // TSDB_DATA_TYPE_BOOL
CHAR_BYTES, // TSDB_DATA_TYPE_TINYINT
@ -34,6 +34,10 @@ const int32_t TYPE_BYTES[17] = {
INT_BYTES, // TSDB_DATA_TYPE_UINT
sizeof(uint64_t), // TSDB_DATA_TYPE_UBIGINT
TSDB_MAX_JSON_TAG_LEN, // TSDB_DATA_TYPE_JSON
TSDB_MAX_TAGS_LEN, // TSDB_DATA_TYPE_VARBINARY: placeholder, not implemented
TSDB_MAX_TAGS_LEN, // TSDB_DATA_TYPE_DECIMAL: placeholder, not implemented
TSDB_MAX_TAGS_LEN, // TSDB_DATA_TYPE_BLOB: placeholder, not implemented
TSDB_MAX_TAGS_LEN, // TSDB_DATA_TYPE_MEDIUMBLOB: placeholder, not implemented
sizeof(VarDataOffsetT), // TSDB_DATA_TYPE_GEOMETRY
};
@ -57,6 +61,10 @@ tDataTypeDescriptor tDataTypes[TSDB_DATA_TYPE_MAX] = {
{TSDB_DATA_TYPE_UINT, 12, INT_BYTES, "INT UNSIGNED", 0, UINT32_MAX, tsCompressInt, tsDecompressInt},
{TSDB_DATA_TYPE_UBIGINT, 15, LONG_BYTES, "BIGINT UNSIGNED", 0, UINT64_MAX, tsCompressBigint, tsDecompressBigint},
{TSDB_DATA_TYPE_JSON, 4, TSDB_MAX_JSON_TAG_LEN, "JSON", 0, 0, tsCompressString, tsDecompressString},
{TSDB_DATA_TYPE_VARBINARY, 9, 1, "VARBINARY", 0, 0, NULL, NULL}, // placeholder, not implemented
{TSDB_DATA_TYPE_DECIMAL, 7, 1, "DECIMAL", 0, 0, NULL, NULL}, // placeholder, not implemented
{TSDB_DATA_TYPE_BLOB, 4, 1, "BLOB", 0, 0, NULL, NULL}, // placeholder, not implemented
{TSDB_DATA_TYPE_MEDIUMBLOB, 10, 1, "MEDIUMBLOB", 0, 0, NULL, NULL}, // placeholder, not implemented
{TSDB_DATA_TYPE_GEOMETRY, 8, 1, "GEOMETRY", 0, 0, tsCompressString, tsDecompressString},
};

View File

@ -163,7 +163,7 @@ SArray *mmGetMsgHandles() {
if (dmSetMgmtHandle(pArray, TDMT_MND_TMQ_DROP_TOPIC, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_TMQ_SUBSCRIBE, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_TMQ_ASK_EP, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_TMQ_HB, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_TMQ_HB, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_TMQ_DROP_CGROUP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_TMQ_DROP_CGROUP_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_KILL_TRANS, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;

Some files were not shown because too many files have changed in this diff Show More