Merge remote-tracking branch 'origin/3.0' into feat/TD-22746

This commit is contained in:
dapan1121 2023-05-10 17:47:11 +08:00
commit daedb3618b
145 changed files with 4009 additions and 914 deletions

View File

@ -120,8 +120,14 @@ ELSE ()
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0")
MESSAGE(STATUS "Compile with Address Sanitizer!")
ELSE ()
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-reserved-user-defined-literal -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
MESSAGE(STATUS "XXXXXXXXXXXXXX Clang/AppleClang" ${TD_DARWIN})
IF (${TD_DARWIN})
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-y2k")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-reserved-user-defined-literal -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-y2k")
ELSE ()
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-reserved-user-defined-literal -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
ENDIF ()
ENDIF ()
# disable all assert

View File

@ -109,7 +109,7 @@ option(
option(
BUILD_WITH_ROCKSDB
"If build with rocksdb"
OFF
ON
)
option(

View File

@ -2,7 +2,7 @@
IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER})
ELSE ()
SET(TD_VER_NUMBER "3.0.4.0")
SET(TD_VER_NUMBER "3.0.4.1")
ENDIF ()
IF (DEFINED VERCOMPATIBLE)

View File

@ -2,7 +2,7 @@
# taosadapter
ExternalProject_Add(taosadapter
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
GIT_TAG e02ddb2
GIT_TAG ae8d51c
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE

View File

@ -223,17 +223,31 @@ endif(${BUILD_WITH_LEVELDB})
# rocksdb
# To support rocksdb build on ubuntu: sudo apt-get install libgflags-dev
if(${BUILD_WITH_ROCKSDB})
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized")
#SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized")
option(WITH_TESTS "" OFF)
option(WITH_BENCHMARK_TOOLS "" OFF)
option(WITH_TOOLS "" OFF)
option(WITH_LIBURING "" OFF)
option(WITH_IOSTATS_CONTEXT "" OFF)
option(WITH_PERF_CONTEXT "" OFF)
option(FAIL_ON_WARNINGS "" OFF)
#option(WITH_JEMALLOC "" ON)
option(ROCKSDB_BUILD_SHARED "Build shared versions of the RocksDB libraries" OFF)
IF (${TD_WINDOWS})
option(WITH_MD_LIBRARY "build with MD" OFF)
set(SYSTEM_LIBS ${SYSTEM_LIBS} shlwapi.lib rpcrt4.lib)
endif(${TD_WINDOWS})
add_subdirectory(rocksdb EXCLUDE_FROM_ALL)
target_include_directories(
rocksdb
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/rocksdb/include>
)
IF (${TD_DARWIN})
target_compile_options(
rocksdb
PRIVATE -Wno-unused-private-field
)
endif(${TD_DARWIN})
endif(${BUILD_WITH_ROCKSDB})
# lucene

View File

@ -1,6 +1,8 @@
message("contrib test/rocksdb:" ${BUILD_DEPENDENCY_TESTS})
add_executable(rocksdbTest "")
target_sources(rocksdbTest
PRIVATE
"${CMAKE_CURRENT_SOURCE_DIR}/main.c"
)
target_link_libraries(rocksdbTest rocksdb)
target_link_libraries(rocksdbTest rocksdb)

View File

@ -25,10 +25,12 @@ int main(int argc, char const *argv[]) {
// Read
rocksdb_readoptions_t *readoptions = rocksdb_readoptions_create();
rocksdb_readoptions_set_snapshot(readoptions, rocksdb_create_snapshot(db));
//rocksdb_readoptions_set_snapshot(readoptions, rocksdb_create_snapshot(db));
char buf[256] = {0};
size_t vallen = 0;
char * val = rocksdb_get(db, readoptions, "key", 3, &vallen, &err);
printf("val:%s\n", val);
snprintf(buf, vallen+5, "val:%s", val);
printf("%ld %ld %s\n", strlen(val), vallen, buf);
// Update
// rocksdb_put(db, writeoptions, "key", 3, "eulav", 5, &err);
@ -43,4 +45,4 @@ int main(int argc, char const *argv[]) {
rocksdb_close(db);
return 0;
}
}

View File

@ -44,7 +44,7 @@ For more details on features, please read through the entire documentation.
## Competitive Advantages
By making full use of [characteristics of time series data](https://tdengine.com/tsdb/characteristics-of-time-series-data/), TDengine differentiates itself from other [time series databases](https://tdengine.com/tsdb), with the following advantages.
By making full use of [characteristics of time series data](https://tdengine.com/tsdb/characteristics-of-time-series-data/), TDengine differentiates itself from other [time series databases](https://tdengine.com/tsdb/), with the following advantages.
- **[High-Performance](https://tdengine.com/tdengine/high-performance-time-series-database/)**: TDengine is the only time-series database to solve the high cardinality issue to support billions of data collection points while out performing other time-series databases for data ingestion, querying and data compression.
@ -123,13 +123,12 @@ As a high-performance, scalable and SQL supported time-series database, TDengine
## Comparison with other databases
- [Writing Performance Comparison of TDengine and InfluxDB ](https://tdengine.com/performance-comparison-of-tdengine-and-influxdb/)
- [Query Performance Comparison of TDengine and InfluxDB](https://tdengine.com/query-performance-comparison-test-report-tdengine-vs-influxdb/)
- [TDengine vs OpenTSDB](https://tdengine.com/performance-tdengine-vs-opentsdb/)
- [TDengine vs Cassandra](https://tdengine.com/performance-tdengine-vs-cassandra/)
- [TDengine vs InfluxDB](https://tdengine.com/performance-tdengine-vs-influxdb/)
- [TDengine vs. InfluxDB](https://tdengine.com/tsdb-comparison-influxdb-vs-tdengine/)
- [TDengine vs. TimescaleDB](https://tdengine.com/tsdb-comparison-timescaledb-vs-tdengine/)
- [TDengine vs. OpenTSDB](https://tdengine.com/performance-tdengine-vs-opentsdb/)
- [TDengine vs. Cassandra](https://tdengine.com/performance-tdengine-vs-cassandra/)
## More readings
- [Introduction to Time-Series Database](https://tdengine.com/tsdb/)
- [Introduction to TDengine competitive advantages](https://tdengine.com/tdengine/)

View File

@ -6,7 +6,7 @@ description: This document describes how to install TDengine in a Docker contain
This document describes how to install TDengine in a Docker container and perform queries and inserts.
- The easiest way to explore TDengine is through [TDengine Cloud](http://cloud.tdengine.com).
- The easiest way to explore TDengine is through [TDengine Cloud](https://cloud.tdengine.com).
- To get started with TDengine in a non-containerized environment, see [Quick Install from Package](../../get-started/package).
- If you want to view the source code, build TDengine yourself, or contribute to the project, see the [TDengine GitHub repository](https://github.com/taosdata/TDengine).

View File

@ -10,7 +10,7 @@ import PkgListV3 from "/components/PkgListV3";
This document describes how to install TDengine on Linux/Windows/macOS and perform queries and inserts.
- The easiest way to explore TDengine is through [TDengine Cloud](http://cloud.tdengine.com).
- The easiest way to explore TDengine is through [TDengine Cloud](https://cloud.tdengine.com).
- To get started with TDengine on Docker, see [Quick Install on Docker](../../get-started/docker).
- If you want to view the source code, build TDengine yourself, or contribute to the project, see the [TDengine GitHub repository](https://github.com/taosdata/TDengine).
@ -208,6 +208,8 @@ The following `launchctl` commands can help you manage TDengine service:
- Check TDengine Server status: `sudo launchctl list | grep taosd`
- Check TDengine Server status details: `launchctl print system/com.tdengine.taosd`
:::info
- Please use `sudo` to run `launchctl` to manage _com.tdengine.taosd_ with administrator privileges.
- The administrator privilege is required for service management to enhance security.

View File

@ -288,6 +288,6 @@ Prior to establishing connection, please make sure TDengine is already running a
</Tabs>
:::tip
If the connection fails, in most cases it's caused by improper configuration for FQDN or firewall. Please refer to the section "Unable to establish connection" in [FAQ](https://docs.tdengine.com/train-faq/faq).
If the connection fails, in most cases it's caused by improper configuration for FQDN or firewall. Please refer to the section "Unable to establish connection" in [FAQ](../../train-faq/faq).
:::

View File

@ -0,0 +1,3 @@
```rust
{{#include docs/examples/rust/nativeexample/examples/schemaless_insert_line.rs}}
```

View File

@ -222,7 +222,7 @@ A database including one supertable and two subtables is created as follows:
```sql
DROP DATABASE IF EXISTS tmqdb;
CREATE DATABASE tmqdb;
CREATE DATABASE tmqdb WAL_RETENTION_PERIOD 3600;
CREATE TABLE tmqdb.stb (ts TIMESTAMP, c1 INT, c2 FLOAT, c3 VARCHAR(16)) TAGS(t1 INT, t3 VARCHAR(16));
CREATE TABLE tmqdb.ctb0 USING tmqdb.stb TAGS(0, "subtable0");
CREATE TABLE tmqdb.ctb1 USING tmqdb.stb TAGS(1, "subtable1");

View File

@ -6,10 +6,12 @@ description: This document describes how to create user-defined functions (UDF),
The built-in functions of TDengine may not be sufficient for the use cases of every application. In this case, you can define custom functions for use in TDengine queries. These are known as user-defined functions (UDF). A user-defined function takes one column of data or the result of a subquery as its input.
TDengine supports user-defined functions written in C or C++. This document describes the usage of user-defined functions.
User-defined functions can be scalar functions or aggregate functions. Scalar functions, such as `abs`, `sin`, and `concat`, output a value for every row of data. Aggregate functions, such as `avg` and `max` output one value for multiple rows of data.
TDengine supports user-defined functions written in C or Python. This document describes the usage of user-defined functions.
## Implement a UDF in C
When you create a user-defined function, you must implement standard interface functions:
- For scalar functions, implement the `scalarfn` interface function.
- For aggregate functions, implement the `aggfn_start`, `aggfn`, and `aggfn_finish` interface functions.
@ -17,7 +19,7 @@ When you create a user-defined function, you must implement standard interface f
There are strict naming conventions for these interface functions. The names of the start, finish, init, and destroy interfaces must be <udf-name\>_start, <udf-name\>_finish, <udf-name\>_init, and <udf-name\>_destroy, respectively. Replace `scalarfn`, `aggfn`, and `udf` with the name of your user-defined function.
## Implementing a Scalar Function
### Implementing a Scalar Function in C
The implementation of a scalar function is described as follows:
```c
#include "taos.h"
@ -49,7 +51,7 @@ int32_t scalarfn_destroy() {
```
Replace `scalarfn` with the name of your function.
## Implementing an Aggregate Function
### Implementing an Aggregate Function in C
The implementation of an aggregate function is described as follows:
```c
@ -100,7 +102,7 @@ int32_t aggfn_destroy() {
```
Replace `aggfn` with the name of your function.
## Interface Functions
### UDF Interface Definition in C
There are strict naming conventions for interface functions. The names of the start, finish, init, and destroy interfaces must be <udf-name\>_start, <udf-name\>_finish, <udf-name\>_init, and <udf-name\>_destroy, respectively. Replace `scalarfn`, `aggfn`, and `udf` with the name of your user-defined function.
@ -108,8 +110,7 @@ Interface functions return a value that indicates whether the operation was succ
For information about the parameters for interface functions, see Data Model
### Interfaces for Scalar Functions
#### Scalar Interface
`int32_t scalarfn(SUdfDataBlock* inputDataBlock, SUdfColumn *resultColumn)`
Replace `scalarfn` with the name of your function. This function performs scalar calculations on data blocks. You can configure a value through the parameters in the `resultColumn` structure.
@ -118,7 +119,7 @@ The parameters in the function are defined as follows:
- inputDataBlock: The data block to input.
- resultColumn: The column to output. The column to output.
### Interfaces for Aggregate Functions
#### Aggregate Interface
`int32_t aggfn_start(SUdfInterBuf *interBuf)`
@ -126,7 +127,7 @@ The parameters in the function are defined as follows:
`int32_t aggfn_finish(SUdfInterBuf* interBuf, SUdfInterBuf *result)`
Replace `aggfn` with the name of your function. In the function, aggfn_start is called to generate a result buffer. Data is then divided between multiple blocks, and aggfn is called on each block to update the result. Finally, aggfn_finish is called to generate final results from the intermediate results. The final result contains only one or zero data points.
Replace `aggfn` with the name of your function. In the function, aggfn_start is called to generate a result buffer. Data is then divided between multiple blocks, and the `aggfn` function is called on each block to update the result. Finally, aggfn_finish is called to generate the final results from the intermediate results. The final result contains only one or zero data points.
The parameters in the function are defined as follows:
- interBuf: The intermediate result buffer.
@ -135,15 +136,15 @@ The parameters in the function are defined as follows:
- result: The final result.
### Initializing and Terminating User-Defined Functions
#### Initialization and Cleanup Interface
`int32_t udf_init()`
`int32_t udf_destroy()`
Replace `udf`with the name of your function. udf_init initializes the function. udf_destroy terminates the function. If it is not necessary to initialize your function, udf_init is not required. If it is not necessary to terminate your function, udf_destroy is not required.
Replace `udf` with the name of your function. udf_init initializes the function. udf_destroy terminates the function. If it is not necessary to initialize your function, udf_init is not required. If it is not necessary to terminate your function, udf_destroy is not required.
## Data Structure of User-Defined Functions
### Data Structures for UDF in C
```c
typedef struct SUdfColumnMeta {
int16_t type;
@ -193,7 +194,7 @@ typedef struct SUdfInterBuf {
```
The data structure is described as follows:
- The SUdfDataBlock block includes the number of rows (numOfRows) and number of columns (numCols). udfCols[i] (0 <= i <= numCols-1) indicates that each column is of type SUdfColumn.
- The SUdfDataBlock block includes the number of rows (numOfRows) and the number of columns (numCols). udfCols[i] (0 <= i <= numCols-1) indicates that each column is of type SUdfColumn.
- SUdfColumn includes the definition of the data type of the column (colMeta) and the data in the column (colData).
- The member definitions of SUdfColumnMeta are the same as the data type definitions in `taos.h`.
- The data in SUdfColumnData can become longer. varLenCol indicates variable-length data, and fixLenCol indicates fixed-length data.
@ -201,9 +202,9 @@ The data structure is described as follows:
Additional functions are defined in `taosudf.h` to make it easier to work with these structures.
## Compile UDF
### Compiling C UDF
To use your user-defined function in TDengine, first compile it to a dynamically linked library (DLL).
To use your user-defined function in TDengine, first, compile it to a shared library.
For example, the sample UDF `bit_and.c` can be compiled into a DLL as follows:
@ -213,12 +214,9 @@ gcc -g -O0 -fPIC -shared bit_and.c -o libbitand.so
The generated DLL file `libbitand.so` can now be used to implement your function. Note: GCC 7.5 or later is required.
## Manage and Use User-Defined Functions
After compiling your function into a DLL, you add it to TDengine. For more information, see [User-Defined Functions](../12-taos-sql/26-udf.md).
### UDF Sample Code in C
## Sample Code
### Sample scalar function: [bit_and](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/bit_and.c)
#### Scalar function: [bit_and](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/bit_and.c)
The bit_and function implements bitwise addition for multiple columns. If there is only one column, the column is returned. The bit_and function ignores null values.
@ -231,7 +229,7 @@ The bit_and function implements bitwise addition for multiple columns. If there
</details>
### Sample aggregate function: [l2norm](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/l2norm.c)
#### Aggregate function 1: [l2norm](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/l2norm.c)
The l2norm function finds the second-order norm for all data in the input column. This squares the values, takes a cumulative sum, and finds the square root.
@ -243,3 +241,151 @@ The l2norm function finds the second-order norm for all data in the input column
```
</details>
#### Aggregate function 2: [max_vol](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/max_vol.c)
The max_vol function returns a string concatenating the deviceId column, the row number and column number of the maximum voltage and the maximum voltage given several voltage columns as input.
Create Table:
```bash
create table battery(ts timestamp, vol1 float, vol2 float, vol3 float, deviceId varchar(16));
```
Create the UDF:
```bash
create aggregate function max_vol as '/root/udf/libmaxvol.so' outputtype binary(64) bufsize 10240 language 'C';
```
Use the UDF in the query
```bash
select max_vol(vol1,vol2,vol3,deviceid) from battery;
```
<details>
<summary>max_vol.c</summary>
```c
{{#include tests/script/sh/max_vol.c}}
```
</details>
## Implement a UDF in Python
Implement the specified interface functions when implementing a UDF in Python.
- implement `process` function for the scalar UDF。
- implement `start`, `reduce`, `finish` for the aggregate UDF。
- implement `init` for initialization and `destroy` for termination。
### Implement a Scalar UDF in Python
The implementation of a scalar UDF is described as follows:
```Python
def init():
# initialization
def destroy():
# destroy
def process(input: datablock) -> tuple[output_type]:
# process input datablock,
# datablock.data(row, col) is to access the python object in location(row,col)
# return tuple object consisted of object of type outputtype
```
### Implement an Aggregate UDF in Python
The implementation of an aggregate function is described as follows:
```Python
def init():
#initialization
def destroy():
#destroy
def start() -> bytes:
#return serialize(init_state)
def reduce(inputs: datablock, buf: bytes) -> bytes
# deserialize buf to state
# reduce the inputs and state into new_state.
# use inputs.data(i,j) to access python ojbect of location(i,j)
# serialize new_state into new_state_bytes
return new_state_bytes
def finish(buf: bytes) -> output_type:
#return obj of type outputtype
```
### Python UDF Interface Definition
#### Scalar interface
```Python
def process(input: datablock) -> tuple[output_type]:
```
- `input` is a data block two-dimension matrix-like object, of which method `data(row, col)` returns the Python object located at location (`row`, `col`)
- return a Python tuple object, of which each item is a Python object of type `output_type`
#### Aggregate Interface
```Python
def start() -> bytes:
def reduce(input: datablock, buf: bytes) -> bytes
def finish(buf: bytes) -> output_type:
```
- first `start()` is called to return the initial result in type `bytes`
- then the input data are divided into multiple data blocks and for each block `input`, `reduce` is called with the data block `input` and the current result `buf` bytes and generates a new intermediate result buffer.
- finally, the `finish` function is called on the intermediate result `buf` and outputs 0 or 1 data of type `output_type`
#### Initialization and Cleanup Interface
```Python
def init()
def destroy()
```
Implement `init` for initialization and `destroy` for termination.
### Data Mapping between TDengine SQL and Python UDF
The following table describes the mapping between TDengine SQL data type and Python UDF Data Type. The `NULL` value of all TDengine SQL types is mapped to the `None` value in Python.
| **TDengine SQL Data Type** | **Python Data Type** |
| :-----------------------: | ------------ |
|TINYINT / SMALLINT / INT / BIGINT | int |
|TINYINT UNSIGNED / SMALLINT UNSIGNED / INT UNSIGNED / BIGINT UNSIGNED | int |
|FLOAT / DOUBLE | float |
|BOOL | bool |
|BINARY / VARCHAR / NCHAR | bytes|
|TIMESTAMP | int |
|JSON and other types | Not Supported |
### Installing Python UDF
1. Install Python package `taospyudf` that executes Python UDF
```bash
sudo pip install taospyudf
ldconfig
```
2. If PYTHONPATH is needed to find Python packages when the Python UDF executes, include the PYTHONPATH contents into the udfdLdLibPath variable of the taos.cfg configuration file
### Python UDF Sample Code
#### Scalar Function [pybitand](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/pybitand.py)
The `pybitand` function implements bitwise addition for multiple columns. If there is only one column, the column is returned. The `pybitand` function ignores null values.
<details>
<summary>pybitand.py</summary>
```Python
{{#include tests/script/sh/pybitand.py}}
```
</details>
#### Aggregate Function [pyl2norm](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/pyl2norm.py)
The `pyl2norm` function finds the second-order norm for all data in the input column. This squares the values, takes a cumulative sum, and finds the square root.
<details>
<summary>pyl2norm.py</summary>
```c
{{#include tests/script/sh/pyl2norm.py}}
```
</details>
## Manage and Use UDF
You need to add UDF to TDengine before using it in SQL queries. For more information about how to manage UDF and how to invoke UDF, please see [Manage and Use UDF](../12-taos-sql/26-udf.md).

View File

@ -72,8 +72,8 @@ database_option: {
- 0: The database can contain multiple supertables.
- 1: The database can contain only one supertable.
- STT_TRIGGER: specifies the number of file merges triggered by flushed files. The default is 8, ranging from 1 to 16. For high-frequency scenarios with few tables, it is recommended to use the default configuration or a smaller value for this parameter; For multi-table low-frequency scenarios, it is recommended to configure this parameter with a larger value.
- TABLE_PREFIXThe prefix length in the table name that is ignored when distributing table to vnode based on table name.
- TABLE_SUFFIXThe suffix length in the table name that is ignored when distributing table to vnode based on table name.
- TABLE_PREFIX The prefix in the table name that is ignored when distributing a table to a vgroup when it's a positive number, or only the prefix is used when distributing a table to a vgroup, the default value is 0; For example, if the table name v30001, then "0001" is used if TSDB_PREFIX is set to 2 but "v3" is used if TSDB_PREFIX is set to -2; It can help you to control the distribution of tables.
- TABLE_SUFFIXThe suffix in the table name that is ignored when distributing a table to a vgroup when it's a positive number, or only the suffix is used when distributing a table to a vgroup, the default value is 0; For example, if the table name v30001, then "v300" is used if TSDB_SUFFIX is set to 2 but "01" is used if TSDB_SUFFIX is set to -2; It can help you to control the distribution of tables.
- TSDB_PAGESIZE: The page size of the data storage engine in a vnode. The unit is KB. The default is 4 KB. The range is 1 to 16384, that is, 1 KB to 16 MB.
- WAL_RETENTION_PERIOD: specifies the maximum time of which WAL files are to be kept for consumption. This parameter is used for data subscription. Enter a time in seconds. The default value 0. A value of 0 indicates that WAL files are not required to keep for consumption. Alter it with a proper value at first to create topics.
- WAL_RETENTION_SIZE: specifies the maximum total size of which WAL files are to be kept for consumption. This parameter is used for data subscription. Enter a size in KB. The default value is 0. A value of 0 indicates that the total size of WAL files to keep for consumption has no upper limit.

View File

@ -6,7 +6,7 @@ description: Use Tag Index to Improve Query Performance
## Introduction
Prior to TDengine 3.0.3.0 (excluded)only one index is created by default on the first tag of each super talbe, but it's not allowed to dynamically create index on any other tags. From version 3.0.30, you can dynamically create index on any tag of any type. The index created automatically by TDengine is still valid. Query performance can benefit from indexes if you use properly.
Prior to TDengine 3.0.3.0 (excluded)only one index is created by default on the first tag of each super table, but it's not allowed to dynamically create index on any other tags. From version 3.0.30, you can dynamically create index on any tag of any type. The index created automatically by TDengine is still valid. Query performance can benefit from indexes if you use properly.
## Syntax
@ -48,4 +48,4 @@ You can also add filter conditions to limit the results.
6. You can' create index on a normal table or a child table.
7. If the unique values of a tag column are too few, it's better not to create index on such tag columns, the benefit would be very small.
7. If the unique values of a tag column are too few, it's better not to create index on such tag columns, the benefit would be very small.

View File

@ -5,9 +5,9 @@ description: This document describes the standard SQL functions available in TDe
toc_max_heading_level: 4
---
## Single Row Functions
## Scalar Functions
Single row functions return a result for each row.
Scalar functions return one result for each row.
### Mathematical Functions

View File

@ -13,8 +13,11 @@ Because stream processing is built in to TDengine, you are no longer reliant on
```sql
CREATE STREAM [IF NOT EXISTS] stream_name [stream_options] INTO stb_name SUBTABLE(expression) AS subquery
stream_options: {
TRIGGER [AT_ONCE | WINDOW_CLOSE | MAX_DELAY time]
WATERMARK time
TRIGGER [AT_ONCE | WINDOW_CLOSE | MAX_DELAY time]
WATERMARK time
IGNORE EXPIRED [0|1]
DELETE_MARK time
FILL_HISTORY [0|1]
}
```
@ -141,3 +144,27 @@ The data in expired windows is tagged as expired. TDengine stream processing pro
2. Recalculate the data. In this method, all data in the window is reobtained from the database and recalculated. The latest results are then returned.
In both of these methods, configuring the watermark is essential for obtaining accurate results (if expired data is dropped) and avoiding repeated triggers that affect system performance (if expired data is recalculated).
## Supported functions
All [scalar functions](../function/#scalar-functions) are available in stream processing. All [Aggregate functions](../function/#aggregate-functions) and [Selection functions](../function/#selection-functions) are available in stream processing, except the followings:
- [leastsquares](../function/#leastsquares)
- [percentile](../function/#percentile)
- [top](../function/#top)
- [bottom](../function/#bottom)
- [elapsed](../function/#elapsed)
- [interp](../function/#interp)
- [derivative](../function/#derivative)
- [irate](../function/#irate)
- [twa](../function/#twa)
- [histogram](../function/#histogram)
- [diff](../function/#diff)
- [statecount](../function/#statecount)
- [stateduration](../function/#stateduration)
- [csum](../function/#csum)
- [mavg](../function/#mavg)
- [sample](../function/#sample)
- [tail](../function/#tail)
- [unique](../function/#unique)
- [mode](../function/#mode)

View File

@ -120,6 +120,9 @@ Provides information about user-defined functions.
| 5 | create_time | TIMESTAMP | Creation time |
| 6 | code_len | INT | Length of the source code |
| 7 | bufsize | INT | Buffer size |
| 8 | func_language | BINARY(31) | UDF programming language |
| 9 | func_body | BINARY(16384) | UDF function body |
| 10 | func_version | INT | UDF function version. starting from 0. Increasing by 1 each time it is updated|
## INS_INDEXES

View File

@ -7,17 +7,18 @@ description: This document describes the SQL statements related to user-defined
You can create user-defined functions and import them into TDengine.
## Create UDF
SQL command can be executed on the host where the generated UDF DLL resides to load the UDF DLL into TDengine. This operation cannot be done through REST interface or web console. Once created, any client of the current TDengine can use these UDF functions in their SQL commands. UDF are stored in the management node of TDengine. The UDFs loaded in TDengine would be still available after TDengine is restarted.
SQL command can be executed on the host where the generated UDF DLL resides to load the UDF DLL into TDengine. This operation cannot be done through REST interface or web console. Once created, any client of the current TDengine can use these UDF functions in their SQL commands. UDF is stored in the management node of TDengine. The UDFs loaded in TDengine would be still available after TDengine is restarted.
When creating UDF, the type of UDF, i.e. a scalar function or aggregate function must be specified. If the specified type is wrong, the SQL statements using the function would fail with errors. The input data type and output data type must be consistent with the UDF definition.
- Create Scalar Function
```sql
CREATE FUNCTION function_name AS library_path OUTPUTTYPE output_type;
CREATE [OR REPLACE] FUNCTION function_name AS library_path OUTPUTTYPE output_type [LANGUAGE 'C|Python'];
```
- function_name: The scalar function name to be used in SQL statement which must be consistent with the UDF name and is also the name of the compiled DLL (.so file).
- library_path: The absolute path of the DLL file including the name of the shared object file (.so). The path must be quoted with single or double quotes.
- OR REPLACE: if the UDF exists, the UDF properties are modified
- function_name: The scalar function name to be used in the SQL statement
- LANGUAGE 'C|Python': the programming language of UDF. Now C or Python is supported. If this clause is omitted, C is assumed as the programming language.
- library_path: For C programming language, The absolute path of the DLL file including the name of the shared object file (.so). For Python programming language, the absolute path of the Python UDF script. The path must be quoted with single or double quotes.
- output_type: The data type of the results of the UDF.
For example, the following SQL statement can be used to create a UDF from `libbitand.so`.
@ -25,14 +26,20 @@ CREATE FUNCTION function_name AS library_path OUTPUTTYPE output_type;
```sql
CREATE FUNCTION bit_and AS "/home/taos/udf_example/libbitand.so" OUTPUTTYPE INT;
```
For Example, the following SQL statement can be used to modify the existing function `bit_and`. The OUTPUT type is changed to BIGINT and the programming language is changed to Python.
```sql
CREATE OR REPLACE FUNCTION bit_and AS "/home/taos/udf_example/bit_and.py" OUTPUTTYPE BIGINT LANGUAGE 'Python';
```
- Create Aggregate Function
```sql
CREATE AGGREGATE FUNCTION function_name AS library_path OUTPUTTYPE output_type [ BUFSIZE buffer_size ];
```
- function_name: The aggregate function name to be used in SQL statement which must be consistent with the udfNormalFunc name and is also the name of the compiled DLL (.so file).
- library_path: The absolute path of the DLL file including the name of the shared object file (.so). The path must be quoted with single or double quotes.
- OR REPLACE: if the UDF exists, the UDF properties are modified
- function_name: The aggregate function name to be used in the SQL statement
- LANGUAGE 'C|Python': the programming language of the UDF. Now C or Python is supported. If this clause is omitted, C is assumed as the programming language.
- library_path: For C programming language, The absolute path of the DLL file including the name of the shared object file (.so). For Python programming language, the absolute path of the Python UDF script. The path must be quoted with single or double quotes.
- output_type: The output data type, the value is the literal string of the supported TDengine data type.
- buffer_size: The size of the intermediate buffer in bytes. This parameter is optional.
@ -41,6 +48,11 @@ CREATE AGGREGATE FUNCTION function_name AS library_path OUTPUTTYPE output_type [
```sql
CREATE AGGREGATE FUNCTION l2norm AS "/home/taos/udf_example/libl2norm.so" OUTPUTTYPE DOUBLE bufsize 8;
```
For example, the following SQL statement modifies the buffer size of existing UDF `l2norm` to 64
```sql
CREATE AGGREGATE FUNCTION l2norm AS "/home/taos/udf_example/libl2norm.so" OUTPUTTYPE DOUBLE bufsize 64;
```
For more information about user-defined functions, see [User-Defined Functions](/develop/udf).
## Manage UDF
@ -61,9 +73,9 @@ SHOW FUNCTIONS;
## Call UDF
The function name specified when creating UDF can be used directly in SQL statements, just like builtin functions. For example:
The function name specified when creating UDF can be used directly in SQL statements, just like built-in functions. For example:
```sql
SELECT bit_and(c1,c2) FROM table;
```
The above SQL statement invokes function X for column c1 and c2 on table. You can use query keywords like WHERE with user-defined functions.
The above SQL statement invokes function X for columns c1 and c2 on the table. You can use query keywords like WHERE with user-defined functions.

View File

@ -27,7 +27,7 @@ The following data types can be used in the schema for standard tables.
| - | :------- | :-------- | :------- |
| 1 | ALTER ACCOUNT | Deprecated| This Enterprise Edition-only statement has been removed. It returns the error "This statement is no longer supported."
| 2 | ALTER ALL DNODES | Added | Modifies the configuration of all dnodes.
| 3 | ALTER DATABASE | Modified | Deprecated<ul><li>QUORUM: Specified the required number of confirmations. TDengine 3.0 provides strict consistency by default and doesn't allow to change to weak consitency. </li><li>BLOCKS: Specified the memory blocks used by each vnode. BUFFER is now used to specify the size of the write cache pool for each vnode. </li><li>UPDATE: Specified whether update operations were supported. All databases now support updating data in certain columns. </li><li>CACHELAST: Specified how to cache the newest row of data. CACHEMODEL now replaces CACHELAST. </li><li>COMP: Cannot be modified. <br/>Added</li><li>CACHEMODEL: Specifies whether to cache the latest subtable data. </li><li>CACHESIZE: Specifies the size of the cache for the newest subtable data. </li><li>WAL_FSYNC_PERIOD: Replaces the FSYNC parameter. </li><li>WAL_LEVEL: Replaces the WAL parameter. </li><li>WAL_RETENTION_PERIOD: specifies the time after which WAL files are deleted. This parameter is used for data subscription. </li><li>WAL_RETENTION_SIZE: specifies the size at which WAL files are deleted. This parameter is used for data subscription. <br/>Modified</li><li>REPLICA: Cannot be modified. </li><li>KEEP: Now supports units. </li></ul>
| 3 | ALTER DATABASE | Modified | Deprecated<ul><li>QUORUM: Specified the required number of confirmations. TDengine 3.0 provides strict consistency by default and doesn't allow to change to weak consistency. </li><li>BLOCKS: Specified the memory blocks used by each vnode. BUFFER is now used to specify the size of the write cache pool for each vnode. </li><li>UPDATE: Specified whether update operations were supported. All databases now support updating data in certain columns. </li><li>CACHELAST: Specified how to cache the newest row of data. CACHEMODEL now replaces CACHELAST. </li><li>COMP: Cannot be modified. <br/>Added</li><li>CACHEMODEL: Specifies whether to cache the latest subtable data. </li><li>CACHESIZE: Specifies the size of the cache for the newest subtable data. </li><li>WAL_FSYNC_PERIOD: Replaces the FSYNC parameter. </li><li>WAL_LEVEL: Replaces the WAL parameter. </li><li>WAL_RETENTION_PERIOD: specifies the time after which WAL files are deleted. This parameter is used for data subscription. </li><li>WAL_RETENTION_SIZE: specifies the size at which WAL files are deleted. This parameter is used for data subscription. <br/>Modified</li><li>REPLICA: Cannot be modified. </li><li>KEEP: Now supports units. </li></ul>
| 4 | ALTER STABLE | Modified | Deprecated<ul><li>CHANGE TAG: Modified the name of a tag. Replaced by RENAME TAG. <br/>Added</li><li>RENAME TAG: Replaces CHANGE TAG. </li><li>COMMENT: Specifies comments for a supertable. </li></ul>
| 5 | ALTER TABLE | Modified | Deprecated<ul><li>CHANGE TAG: Modified the name of a tag. Replaced by RENAME TAG. <br/>Added</li><li>RENAME TAG: Replaces CHANGE TAG. </li><li>COMMENT: Specifies comments for a standard table. </li><li>TTL: Specifies the time-to-live for a standard table. </li></ul>
| 6 | ALTER USER | Modified | Deprecated<ul><li>PRIVILEGE: Specified user permissions. Replaced by GRANT and REVOKE. <br/>Added</li><li>ENABLE: Enables or disables a user. </li><li>SYSINFO: Specifies whether a user can query system information. </li></ul>

View File

@ -423,6 +423,6 @@ In addition to writing data using the SQL method or the parameter binding API, w
**Description**
- The above seven interfaces are extension interfaces, which are mainly used to pass ttl and reqid parameters, and can be used as needed.
- Withing _raw interfaces represent data through the passed parameters lines and len. In order to solve the problem that the original interface data contains '\0' and is truncated. The totalRows pointer returns the number of parsed data rows.
- Withing _ttl interfaces can pass the ttl parameter to control the ttl expiration time of the table.
- Withing _reqid interfaces can track the entire call chain by passing the reqid parameter.
- Within _raw interfaces represent data through the passed parameters lines and len. In order to solve the problem that the original interface data contains '\0' and is truncated. The totalRows pointer returns the number of parsed data rows.
- Within _ttl interfaces can pass the ttl parameter to control the ttl expiration time of the table.
- Within _reqid interfaces can track the entire call chain by passing the reqid parameter.

View File

@ -36,23 +36,121 @@ REST connection supports all platforms that can run Java.
Please refer to [version support list](/reference/connector#version-support)
## Recent update logs
| taos-jdbcdriver version | major changes |
| :---------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------: |
| 3.2.1 | JDBC REST connection supports schemaless/prepareStatement over WebSocket |
| 3.2.0 | This version has been deprecated |
| 3.1.0 | JDBC REST connection supports subscription over WebSocket |
| 3.0.1 - 3.0.4 | fix the resultSet data is parsed incorrectly sometimes. 3.0.1 is compiled on JDK 11, you are advised to use other version in the JDK 8 environment |
| 3.0.0 | Support for TDengine 3.0 |
| 2.0.42 | fix wasNull interface return value in WebSocket connection |
| 2.0.41 | fix decode method of username and password in REST connection |
| 2.0.39 - 2.0.40 | Add REST connection/request timeout parameters |
| 2.0.38 | JDBC REST connections add bulk pull function |
| 2.0.37 | Support json tags |
| 2.0.36 | Support schemaless writing |
**Note**: adding `batchfetch` to the REST connection and setting it to true will enable the WebSocket connection.
### Handling exceptions
After an error is reported, the error message and error code can be obtained through SQLException.
```java
try (Statement statement = connection.createStatement()) {
// executeQuery
ResultSet resultSet = statement.executeQuery(sql);
// print result
printResult(resultSet);
} catch (SQLException e) {
System.out.println("ERROR Message: " + e.getMessage());
System.out.println("ERROR Code: " + e.getErrorCode());
e.printStackTrace();
}
```
There are four types of error codes that the JDBC connector can report:
- Error code of the JDBC driver itself (error code between 0x2301 and 0x2350),
- Error code of the native connection method (error code between 0x2351 and 0x2360)
- Error code of the consumer method (error code between 0x2371 and 0x2380)
- Error code of other TDengine function modules.
For specific error codes, please refer to.
| Error Code | Description | Suggested Actions |
| ---------- | --------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- |
| 0x2301 | connection already closed | The connection has been closed, check the connection status, or recreate the connection to execute the relevant instructions. |
| 0x2302 | this operation is NOT supported currently! | The current interface does not support the connection. You can use another connection mode. |
| 0x2303 | invalid variables | The parameter is invalid. Check the interface specification and adjust the parameter type and size. |
| 0x2304 | statement is closed | The statement is closed. Check whether the statement is closed and used again, or whether the connection is normal. |
| 0x2305 | resultSet is closed | result set The result set is released. Check whether the result set is released and used again. |
| 0x2306 | Batch is empty! | prepare statement Add parameters and then execute batch. |
| 0x2307 | Can not issue data manipulation statements with executeQuery() | The update operation should use execute update(), not execute query(). |
| 0x2308 | Can not issue SELECT via executeUpdate() | The query operation should use execute query(), not execute update(). |
| 0x2309 | invalid sql for executeQuery: (?) | - |
| 0x230a | Database not specified or available | - |
| 0x230b | invalid sql for executeUpdate: (?) | - |
| 0x230c | invalid sql for execute: (?) | - |
| 0x230d | parameter index out of range | The parameter is out of bounds. Check the proper range of the parameter. |
| 0x230e | connection already closed | The connection has been closed. Please check whether the connection is closed and used again, or whether the connection is normal. |
| 0x230f | unknown sql type in tdengine | Check the data type supported by TDengine. |
| 0x2310 | can't register JDBC-JNI driver | The native driver cannot be registered. Please check whether the url is correct. |
| 0x2311 | can't register JDBC-RESTful driver | - |
| 0x2312 | url is not set | Check whether the REST connection url is correct. |
| 0x2313 | invalid sql | - |
| 0x2314 | numeric value out of range | Check that the correct interface is used for the numeric types in the obtained result set. |
| 0x2315 | unknown taos type in tdengine | Whether the correct TDengine data type is specified when converting the TDengine data type to the JDBC data type. |
| 0x2316 | unknown timestamp precision | - |
| 0x2317 | | wrong request type was used in the REST connection. |
| 0x2318 | | data transmission exception occurred during the REST connection. Please check the network status and try again. |
| 0x2319 | user is required | The user name information is missing when creating the connection |
| 0x231a | password is required | Password information is missing when creating a connection |
| 0x231b | invalid json format | - |
| 0x231c | httpEntity is null, sql: | Execution exception occurred during the REST connection |
| 0x2350 | unknown error | Unknown exception, please return to the developer on github. |
| 0x2351 | failed to create subscription | - |
| 0x2352 | Unsupported encoding | An unsupported character encoding set is specified under the native Connection. |
| 0x2353 | internal error of database, please see taoslog for more details | An error occurs when the prepare statement is executed on the native connection. Check the taos log to locate the fault. |
| 0x2354 | JNI connection is NULL | When the command is executed, the native Connection is closed. Check the connection to TDengine. |
| 0x2355 | JNI result set is NULL | The result set is abnormal. Please check the connection status and try again. |
| 0x2356 | invalid num of fields | The meta information of the result set obtained by the native connection does not match. |
| 0x2357 | empty sql string | Fill in the correct SQL for execution. |
| 0x2358 | fetch to the end of resultSet | - |
| 0x2359 | JNI alloc memory failed, please see taoslog for more details | Memory allocation for the native connection failed. Check the taos log to locate the problem. |
| 0x2371 | consumer properties must not be null! | The parameter is empty when you create a subscription. Please fill in the correct parameter. |
| 0x2372 | configs contain empty key, failed to set consumer property | The parameter key contains a null value. Please enter the correct parameter. |
| 0x2373 | failed to set consumer property, | The parameter value contains a null value. Please enter the correct parameter. |
| 0x2374 | consumer config error | - |
| 0x2375 | topic reference has been destroyed | The topic reference is released during the creation of the data subscription. Check the connection to TDengine. |
| 0x2376 | failed to set consumer topic, topic name is empty | During data subscription creation, the subscription topic name is empty. Check that the specified topic name is correct. |
| 0x2377 | consumer reference has been destroyed | The subscription data transfer channel has been closed. Please check the connection to TDengine. |
| 0x2378 | consumer create error | Failed to create a data subscription. Check the taos log according to the error message to locate the fault. |
| - | can't create connection with server within: | Increase the connection time by adding the httpConnectTimeout parameter, or check the connection to the taos adapter. |
| - | failed to complete the task within the specified time : | Increase the execution time by adding the messageWaitTimeout parameter, or check the connection to the taos adapter. |
- [TDengine Java Connector](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java)
<!-- - [TDengine_ERROR_CODE](../error-code) -->
## TDengine DataType vs. Java DataType
TDengine currently supports timestamp, number, character, Boolean type, and the corresponding type conversion with Java is as follows:
| TDengine DataType | JDBCType |
| ----------------- | ---------------------------------- |
| TIMESTAMP | java.sql.Timestamp |
| INT | java.lang.Integer |
| BIGINT | java.lang.Long |
| FLOAT | java.lang.Float |
| DOUBLE | java.lang.Double |
| SMALLINT | java.lang.Short |
| TINYINT | java.lang.Byte |
| BOOL | java.lang.Boolean |
| BINARY | byte array |
| NCHAR | java.lang.String |
| JSON | java.lang.String |
| TDengine DataType | JDBCType |
| ----------------- | ------------------ |
| TIMESTAMP | java.sql.Timestamp |
| INT | java.lang.Integer |
| BIGINT | java.lang.Long |
| FLOAT | java.lang.Float |
| DOUBLE | java.lang.Double |
| SMALLINT | java.lang.Short |
| TINYINT | java.lang.Byte |
| BOOL | java.lang.Boolean |
| BINARY | byte array |
| NCHAR | java.lang.String |
| JSON | java.lang.String |
**Note**: Only TAG supports JSON types
@ -82,7 +180,7 @@ Add following dependency in the `pom.xml` file of your Maven project:
<dependency>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>3.0.0</version>
<version>3.2.1</version>
</dependency>
```
@ -97,7 +195,7 @@ cd taos-connector-jdbc
mvn clean install -Dmaven.test.skip=true
```
After you have compiled taos-jdbcdriver, the `taos-jdbcdriver-3.0.*-dist.jar` file is created in the target directory. The compiled JAR file is automatically stored in your local Maven repository.
After you have compiled taos-jdbcdriver, the `taos-jdbcdriver-3.2.*-dist.jar` file is created in the target directory. The compiled JAR file is automatically stored in your local Maven repository.
</TabItem>
</Tabs>
@ -227,7 +325,7 @@ In addition to getting the connection from the specified URL, you can use Proper
Note:
- The client parameter set in the application is process-level. If you want to update the parameters of the client, you need to restart the application. This is because the client parameter is a global parameter that takes effect only the first time the application is set.
- The following sample code is based on taos-jdbcdriver-3.0.0.
- The following sample code is based on taos-jdbcdriver-3.1.0.
```java
public Connection getConn() throws Exception{
@ -333,30 +431,6 @@ while(resultSet.next()){
> The query is consistent with operating a relational database. When using subscripts to get the contents of the returned fields, you have to start from 1. However, we recommend using the field names to get the values of the fields in the result set.
### Handling exceptions
After an error is reported, the error message and error code can be obtained through SQLException.
```java
try (Statement statement = connection.createStatement()) {
// executeQuery
ResultSet resultSet = statement.executeQuery(sql);
// print result
printResult(resultSet);
} catch (SQLException e) {
System.out.println("ERROR Message: " + e.getMessage());
System.out.println("ERROR Code: " + e.getErrorCode());
e.printStackTrace();
}
```
There are three types of error codes that the JDBC connector can report: - Error code of the JDBC driver itself (error code between 0x2301 and 0x2350), - Error code of the native connection method (error code between 0x2351 and 0x2400), and - Error code of other TDengine function modules.
For specific error codes, please refer to.
- [TDengine Java Connector](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java)
<!-- - [TDengine_ERROR_CODE](../error-code) -->
### Writing data via parameter binding
TDengine has significantly improved the bind APIs to support data writing (INSERT) scenarios. Writing data in this way avoids the resource consumption of SQL syntax parsing, resulting in significant write performance improvements in many cases.
@ -364,9 +438,12 @@ TDengine has significantly improved the bind APIs to support data writing (INSER
**Note:**
- JDBC REST connections do not currently support bind interface
- The following sample code is based on taos-jdbcdriver-3.0.0
- The following sample code is based on taos-jdbcdriver-3.2.1
- The setString method should be called for binary type data, and the setNString method should be called for nchar type data
- both setString and setNString require the user to declare the width of the corresponding column in the size parameter of the table definition
- Do not use `db.?` in prepareStatement when specify the database with the table name, should directly use `?`, then specify the database in setTableName, for example: `prepareStatement.setTableName("db.t1")`.
<Tabs defaultValue="native">
<TabItem value="native" label="native connection">
```java
public class ParameterBindingDemo {
@ -594,21 +671,7 @@ public class ParameterBindingDemo {
}
```
The methods to set TAGS values:
```java
public void setTagNull(int index, int type)
public void setTagBoolean(int index, boolean value)
public void setTagInt(int index, int value)
public void setTagByte(int index, byte value)
public void setTagShort(int index, short value)
public void setTagLong(int index, long value)
public void setTagTimestamp(int index, long value)
public void setTagFloat(int index, float value)
public void setTagDouble(int index, double value)
public void setTagString(int index, String value)
public void setTagNString(int index, String value)
```
**Note**: both setString and setNString require the user to declare the width of the corresponding column in the size parameter of the table definition
The methods to set VALUES columns:
@ -625,17 +688,203 @@ public void setString(int columnIndex, ArrayList<String> list, int size) throws
public void setNString(int columnIndex, ArrayList<String> list, int size) throws SQLException
```
</TabItem>
<TabItem value="ws" label="WebSocket connection">
```java
public class ParameterBindingDemo {
private static final String host = "127.0.0.1";
private static final Random random = new Random(System.currentTimeMillis());
private static final int BINARY_COLUMN_SIZE = 30;
private static final String[] schemaList = {
"create table stable1(ts timestamp, f1 tinyint, f2 smallint, f3 int, f4 bigint) tags(t1 tinyint, t2 smallint, t3 int, t4 bigint)",
"create table stable2(ts timestamp, f1 float, f2 double) tags(t1 float, t2 double)",
"create table stable3(ts timestamp, f1 bool) tags(t1 bool)",
"create table stable4(ts timestamp, f1 binary(" + BINARY_COLUMN_SIZE + ")) tags(t1 binary(" + BINARY_COLUMN_SIZE + "))",
"create table stable5(ts timestamp, f1 nchar(" + BINARY_COLUMN_SIZE + ")) tags(t1 nchar(" + BINARY_COLUMN_SIZE + "))"
};
private static final int numOfSubTable = 10, numOfRow = 10;
public static void main(String[] args) throws SQLException {
String jdbcUrl = "jdbc:TAOS-RS://" + host + ":6041/?batchfetch=true";
Connection conn = DriverManager.getConnection(jdbcUrl, "root", "taosdata");
init(conn);
bindInteger(conn);
bindFloat(conn);
bindBoolean(conn);
bindBytes(conn);
bindString(conn);
conn.close();
}
private static void init(Connection conn) throws SQLException {
try (Statement stmt = conn.createStatement()) {
stmt.execute("drop database if exists test_ws_parabind");
stmt.execute("create database if not exists test_ws_parabind");
stmt.execute("use test_ws_parabind");
for (int i = 0; i < schemaList.length; i++) {
stmt.execute(schemaList[i]);
}
}
}
private static void bindInteger(Connection conn) throws SQLException {
String sql = "insert into ? using stable1 tags(?,?,?,?) values(?,?,?,?,?)";
try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) {
for (int i = 1; i <= numOfSubTable; i++) {
// set table name
pstmt.setTableName("t1_" + i);
// set tags
pstmt.setTagByte(1, Byte.parseByte(Integer.toString(random.nextInt(Byte.MAX_VALUE))));
pstmt.setTagShort(2, Short.parseShort(Integer.toString(random.nextInt(Short.MAX_VALUE))));
pstmt.setTagInt(3, random.nextInt(Integer.MAX_VALUE));
pstmt.setTagLong(4, random.nextLong());
// set columns
long current = System.currentTimeMillis();
for (int j = 0; j < numOfRow; j++) {
pstmt.setTimestamp(1, new Timestamp(current + j));
pstmt.setByte(2, Byte.parseByte(Integer.toString(random.nextInt(Byte.MAX_VALUE))));
pstmt.setShort(3, Short.parseShort(Integer.toString(random.nextInt(Short.MAX_VALUE))));
pstmt.setInt(4, random.nextInt(Integer.MAX_VALUE));
pstmt.setLong(5, random.nextLong());
pstmt.addBatch();
}
pstmt.executeBatch();
}
}
}
private static void bindFloat(Connection conn) throws SQLException {
String sql = "insert into ? using stable2 tags(?,?) values(?,?,?)";
try(TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) {
for (int i = 1; i <= numOfSubTable; i++) {
// set table name
pstmt.setTableName("t2_" + i);
// set tags
pstmt.setTagFloat(1, random.nextFloat());
pstmt.setTagDouble(2, random.nextDouble());
// set columns
long current = System.currentTimeMillis();
for (int j = 0; j < numOfRow; j++) {
pstmt.setTimestamp(1, new Timestamp(current + j));
pstmt.setFloat(2, random.nextFloat());
pstmt.setDouble(3, random.nextDouble());
pstmt.addBatch();
}
pstmt.executeBatch();
}
}
}
private static void bindBoolean(Connection conn) throws SQLException {
String sql = "insert into ? using stable3 tags(?) values(?,?)";
try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) {
for (int i = 1; i <= numOfSubTable; i++) {
// set table name
pstmt.setTableName("t3_" + i);
// set tags
pstmt.setTagBoolean(1, random.nextBoolean());
// set columns
long current = System.currentTimeMillis();
for (int j = 0; j < numOfRow; j++) {
pstmt.setTimestamp(1, new Timestamp(current + j));
pstmt.setBoolean(2, random.nextBoolean());
pstmt.addBatch();
}
pstmt.executeBatch();
}
}
}
private static void bindBytes(Connection conn) throws SQLException {
String sql = "insert into ? using stable4 tags(?) values(?,?)";
try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) {
for (int i = 1; i <= numOfSubTable; i++) {
// set table name
pstmt.setTableName("t4_" + i);
// set tags
pstmt.setTagString(1, new String("abc"));
// set columns
long current = System.currentTimeMillis();
for (int j = 0; j < numOfRow; j++) {
pstmt.setTimestamp(1, new Timestamp(current + j));
pstmt.setString(2, "abc");
pstmt.addBatch();
}
pstmt.executeBatch();
}
}
}
private static void bindString(Connection conn) throws SQLException {
String sql = "insert into ? using stable5 tags(?) values(?,?)";
try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) {
for (int i = 1; i <= numOfSubTable; i++) {
// set table name
pstmt.setTableName("t5_" + i);
// set tags
pstmt.setTagNString(1, "California.SanFrancisco");
// set columns
long current = System.currentTimeMillis();
for (int j = 0; j < numOfRow; j++) {
pstmt.setTimestamp(0, new Timestamp(current + j));
pstmt.setNString(1, "California.SanFrancisco");
pstmt.addBatch();
}
pstmt.executeBatch();
}
}
}
}
```
</TabItem>
</Tabs>
The methods to set TAGS values:
```java
public void setTagNull(int index, int type)
public void setTagBoolean(int index, boolean value)
public void setTagInt(int index, int value)
public void setTagByte(int index, byte value)
public void setTagShort(int index, short value)
public void setTagLong(int index, long value)
public void setTagTimestamp(int index, long value)
public void setTagFloat(int index, float value)
public void setTagDouble(int index, double value)
public void setTagString(int index, String value)
public void setTagNString(int index, String value)
```
### Schemaless Writing
TDengine supports schemaless writing. It is compatible with InfluxDB's Line Protocol, OpenTSDB's telnet line protocol, and OpenTSDB's JSON format protocol. For more information, see [Schemaless Writing](../../schemaless).
Note:
- JDBC REST connections do not currently support schemaless writes
- The following sample code is based on taos-jdbcdriver-3.0.0
<Tabs defaultValue="native">
<TabItem value="native" label="native connection">
```java
public class SchemalessInsertTest {
public class SchemalessJniTest {
private static final String host = "127.0.0.1";
private static final String lineDemo = "st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000";
private static final String telnetDemo = "stb0_0 1626006833 4 host=host0 interface=eth0";
@ -663,6 +912,41 @@ public class SchemalessInsertTest {
}
```
</TabItem>
<TabItem value="ws" label="WebSocket connection">
```java
public class SchemalessWsTest {
private static final String host = "127.0.0.1";
private static final String lineDemo = "st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000";
private static final String telnetDemo = "stb0_0 1626006833 4 host=host0 interface=eth0";
private static final String jsonDemo = "{\"metric\": \"meter_current\",\"timestamp\": 1626846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}";
public static void main(String[] args) throws SQLException {
final String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata&batchfetch=true";
Connection connection = DriverManager.getConnection(url);
init(connection);
SchemalessWriter writer = new SchemalessWriter(connection, "test_ws_schemaless");
writer.write(lineDemo, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO_SECONDS);
writer.write(telnetDemo, SchemalessProtocolType.TELNET, SchemalessTimestampType.MILLI_SECONDS);
writer.write(jsonDemo, SchemalessProtocolType.JSON, SchemalessTimestampType.SECONDS);
System.exit(0);
}
private static void init(Connection connection) throws SQLException {
try (Statement stmt = connection.createStatement()) {
stmt.executeUpdate("drop database if exists test_ws_schemaless");
stmt.executeUpdate("create database if not exists test_ws_schemaless keep 36500");
stmt.executeUpdate("use test_ws_schemaless");
}
}
}
```
</TabItem>
</Tabs>
### Data Subscription
The TDengine Java Connector supports subscription functionality with the following application API.
@ -706,8 +990,9 @@ TaosConsumer consumer = new TaosConsumer<>(config);
```java
while(true) {
ConsumerRecords<ResultBean> records = consumer.poll(Duration.ofMillis(100));
for (ResultBean record : records) {
process(record);
for (ConsumerRecord<ResultBean> record : records) {
ResultBean bean = record.value();
process(bean);
}
}
```
@ -760,8 +1045,9 @@ public abstract class ConsumerLoop {
while (!shutdown.get()) {
ConsumerRecords<ResultBean> records = consumer.poll(Duration.ofMillis(100));
for (ResultBean record : records) {
process(record);
for (ConsumerRecord<ResultBean> record : records) {
ResultBean bean = record.value();
process(bean);
}
}
consumer.unsubscribe();
@ -836,8 +1122,9 @@ public abstract class ConsumerLoop {
while (!shutdown.get()) {
ConsumerRecords<ResultBean> records = consumer.poll(Duration.ofMillis(100));
for (ResultBean record : records) {
process(record);
for (ConsumerRecord<ResultBean> record : records) {
ResultBean bean = record.value();
process(bean);
}
}
consumer.unsubscribe();
@ -963,20 +1250,6 @@ The source code of the sample application is under `TDengine/examples/JDBC`:
[JDBC example](https://github.com/taosdata/TDengine/tree/3.0/examples/JDBC)
## Recent update logs
| taos-jdbcdriver version | major changes |
| :---------------------: | :--------------------------------------------: |
| 3.1.0 | JDBC REST connection supports subscription over WebSocket |
| 3.0.1 - 3.0.4 | fix the resultSet data is parsed incorrectly sometimes. 3.0.1 is compiled on JDK 11, you are advised to use other version in the JDK 8 environment |
| 3.0.0 | Support for TDengine 3.0 |
| 2.0.42 | fix wasNull interface return value in WebSocket connection |
| 2.0.41 | fix decode method of username and password in REST connection |
| 2.0.39 - 2.0.40 | Add REST connection/request timeout parameters |
| 2.0.38 | JDBC REST connections add bulk pull function |
| 2.0.37 | Support json tags |
| 2.0.36 | Support schemaless writing |
## Frequently Asked Questions
1. Why is there no performance improvement when using Statement's `addBatch()` and `executeBatch()` to perform `batch data writing/update`?
@ -999,9 +1272,9 @@ The source code of the sample application is under `TDengine/examples/JDBC`:
4. java.lang.NoSuchMethodError: setByteArray
**Cause**: taos-jbdcdriver 3.* only supports TDengine 3.0 and later.
**Cause**: taos-jbdcdriver 3.\* only supports TDengine 3.0 and later.
**Solution**: Use taos-jdbcdriver 2.* with your TDengine 2.* deployment.
**Solution**: Use taos-jdbcdriver 2.\* with your TDengine 2.\* deployment.
5. java.lang.NoSuchMethodError: java.nio.ByteBuffer.position(I)Ljava/nio/ByteBuffer; ... taos-jdbcdriver-3.0.1.jar

View File

@ -11,6 +11,7 @@ import TabItem from '@theme/TabItem';
import Preparition from "./_preparation.mdx"
import RustInsert from "../../07-develop/03-insert-data/_rust_sql.mdx"
import RustBind from "../../07-develop/03-insert-data/_rust_stmt.mdx"
import RustSml from "../../07-develop/03-insert-data/_rust_schemaless.mdx"
import RustQuery from "../../07-develop/04-query-data/_rust.mdx"
[![Crates.io](https://img.shields.io/crates/v/taos)](https://crates.io/crates/taos) ![Crates.io](https://img.shields.io/crates/d/taos) [![docs.rs](https://img.shields.io/docsrs/taos)](https://docs.rs/taos)
@ -232,6 +233,10 @@ There are two ways to query data: Using built-in types or the [serde](https://se
<RustBind />
#### Schemaless Write
<RustSml />
### Query data
<RustQuery />

View File

@ -62,7 +62,7 @@ The different database framework specifications for various programming language
| **Regular Query** | Support | Support | Support | Support | Support | Support |
| **Parameter Binding** | Not Supported | Not Supported | Support | Support | Not Supported | Support |
| **Subscription (TMQ) ** | Supported | Support | Support | Not Supported | Not Supported | Support |
| **Schemaless** | Not Supported | Not Supported | Not Supported | Not Supported | Not Supported | Not Supported |
| **Schemaless** | Supported | Not Supported | Not Supported | Not Supported | Not Supported | Not Supported |
| **Bulk Pulling (based on WebSocket) ** | Support | Support | Support | Support | Support | Support |
| **DataFrame** | Not Supported | Support | Not Supported | Not Supported | Not Supported | Not Supported |

View File

@ -76,6 +76,7 @@ Usage: taosdump [OPTION...] dbname [tbname ...]
-A, --all-databases Dump all databases.
-D, --databases=DATABASES Dump listed databases. Use comma to separate
database names.
-e, --escape-character Use escaped character for database name
-N, --without-property Dump database without its properties.
-s, --schemaonly Only dump table schemas.
-y, --answer-yes Input yes for prompt. It will skip data file

View File

@ -12,8 +12,8 @@ After TDengine starts, it automatically writes many metrics in specific interval
To deploy TDinsight, we need
- a single-node TDengine server or a multi-node TDengine cluster and a [Grafana] server are required. This dashboard requires TDengine 3.0.1.0 and above, with the monitoring feature enabled. For detailed configuration, please refer to [TDengine monitoring configuration](../config/#monitoring-parameters).
- taosAdapter has been instaleld and running, please refer to [taosAdapter](../taosadapter).
- taosKeeper has been installed and running, please refer to [taosKeeper](../taoskeeper).
- taosAdapter has been installed and running, please refer to [taosAdapter](../taosadapter).
- taosKeeper has been installed and running, please refer to [taosKeeper](../taosKeeper).
Please record
- The endpoint of taosAdapter REST service, for example `http://tdengine.local:6041`

View File

@ -358,6 +358,17 @@ The charset that takes effect is UTF-8.
| Value Range | 0-4096 |
| Default Value | 2x the CPU cores |
## Performance Tuning
### numOfCommitThreads
| Attribute | Description |
| ------------- | ----------------------------------- |
| Applicable | Server Only |
| Meaning | Maximum number of threads to commit |
| Value Range | 0-1024 |
| Default Value | |
## Log Parameters
### logDir

View File

@ -35,7 +35,7 @@ Please refer to the [official documentation](https://grafana.com/grafana/downloa
### TDengine
Download the latest TDengine-server from the [Downloads](http://tdengine.com/en/all-downloads/) page on the TAOSData website and install it.
Download and install the [latest version of TDengine](https://docs.tdengine.com/releases/tdengine/).
## Data Connection Setup

View File

@ -38,7 +38,7 @@ Please refer to the [official documentation](https://grafana.com/grafana/downloa
### Install TDengine
Download the latest TDengine-server from the [Downloads](http://tdengine.com/en/all-downloads/) page on the TAOSData website and install it.
Download and install the [latest version of TDengine](https://docs.tdengine.com/releases/tdengine/).
## Data Connection Setup

View File

@ -32,7 +32,7 @@ TDengine 3.0 is not compatible with the configuration and data files from previo
2. Run `sudo rm -rf /var/log/taos/` to delete your log files.
3. Run `sudo rm -rf /var/lib/taos/` to delete your data files.
4. Install TDengine 3.0.
5. For assistance in migrating data to TDengine 3.0, contact [TDengine Support](https://tdengine.com/support).
5. For assistance in migrating data to TDengine 3.0, contact [TDengine Support](https://tdengine.com/support/).
### 2. How can I resolve the "Unable to establish connection" error?

View File

@ -10,6 +10,10 @@ For TDengine 2.x installation packages by version, please visit [here](https://w
import Release from "/components/ReleaseV3";
## 3.0.4.1
<Release type="tdengine" version="3.0.4.1" />
## 3.0.4.0
<Release type="tdengine" version="3.0.4.0" />

View File

@ -10,6 +10,10 @@ For other historical version installers, please visit [here](https://www.taosdat
import Release from "/components/ReleaseV3";
## 2.5.0
<Release type="tools" version="2.5.0" />
## 2.4.12
<Release type="tools" version="2.4.12" />

View File

@ -8,7 +8,7 @@
#include <sys/time.h>
#include <taos.h>
typedef int16_t VarDataLenT;
typedef uint16_t VarDataLenT;
#define TSDB_NCHAR_SIZE sizeof(int32_t)
#define VARSTR_HEADER_SIZE sizeof(VarDataLenT)

View File

@ -6,7 +6,7 @@
#include <string.h>
#include <taos.h>
typedef int16_t VarDataLenT;
typedef uint16_t VarDataLenT;
#define TSDB_NCHAR_SIZE sizeof(int32_t)
#define VARSTR_HEADER_SIZE sizeof(VarDataLenT)

View File

@ -22,7 +22,7 @@
<dependency>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>3.1.0</version>
<version>3.2.1</version>
</dependency>
<!-- ANCHOR_END: dep-->
<dependency>

View File

@ -1,5 +1,6 @@
package com.taos.example;
import com.taosdata.jdbc.tmq.ConsumerRecord;
import com.taosdata.jdbc.tmq.ConsumerRecords;
import com.taosdata.jdbc.tmq.TMQConstants;
import com.taosdata.jdbc.tmq.TaosConsumer;
@ -64,7 +65,8 @@ public class SubscribeDemo {
consumer.subscribe(Collections.singletonList(TOPIC));
while (!shutdown.get()) {
ConsumerRecords<Meters> meters = consumer.poll(Duration.ofMillis(100));
for (Meters meter : meters) {
for (ConsumerRecord<Meters> recode : meters) {
Meters meter = recode.value();
System.out.println(meter);
}
}

View File

@ -0,0 +1,74 @@
use taos_query::common::SchemalessPrecision;
use taos_query::common::SchemalessProtocol;
use taos_query::common::SmlDataBuilder;
use crate::AsyncQueryable;
use crate::AsyncTBuilder;
use crate::TaosBuilder;
async fn put_json() -> anyhow::Result<()> {
// std::env::set_var("RUST_LOG", "taos=trace");
std::env::set_var("RUST_LOG", "taos=debug");
pretty_env_logger::init();
let dsn =
std::env::var("TDENGINE_ClOUD_DSN").unwrap_or("http://localhost:6041".to_string());
log::debug!("dsn: {:?}", &dsn);
let client = TaosBuilder::from_dsn(dsn)?.build().await?;
let db = "demo_schemaless_ws";
client.exec(format!("drop database if exists {db}")).await?;
client
.exec(format!("create database if not exists {db}"))
.await?;
// should specify database before insert
client.exec(format!("use {db}")).await?;
// SchemalessProtocol::Json
let data = [
r#"[{"metric": "meters.current", "timestamp": 1681345954000, "value": 10.3, "tags": {"location": "California.SanFrancisco", "groupid": 2}}, {"metric": "meters.voltage", "timestamp": 1648432611249, "value": 219, "tags": {"location": "California.LosAngeles", "groupid": 1}}, {"metric": "meters.current", "timestamp": 1648432611250, "value": 12.6, "tags": {"location": "California.SanFrancisco", "groupid": 2}}, {"metric": "meters.voltage", "timestamp": 1648432611250, "value": 221, "tags": {"location": "California.LosAngeles", "groupid": 1}}]"#
]
.map(String::from)
.to_vec();
// demo with all fields
let sml_data = SmlDataBuilder::default()
.protocol(SchemalessProtocol::Json)
.precision(SchemalessPrecision::Millisecond)
.data(data.clone())
.ttl(1000)
.req_id(300u64)
.build()?;
assert_eq!(client.put(&sml_data).await?, ());
// demo with default precision
let sml_data = SmlDataBuilder::default()
.protocol(SchemalessProtocol::Json)
.data(data.clone())
.ttl(1000)
.req_id(301u64)
.build()?;
assert_eq!(client.put(&sml_data).await?, ());
// demo with default ttl
let sml_data = SmlDataBuilder::default()
.protocol(SchemalessProtocol::Json)
.data(data.clone())
.req_id(302u64)
.build()?;
assert_eq!(client.put(&sml_data).await?, ());
// demo with default req_id
let sml_data = SmlDataBuilder::default()
.protocol(SchemalessProtocol::Json)
.data(data.clone())
.build()?;
assert_eq!(client.put(&sml_data).await?, ());
client.exec(format!("drop database if exists {db}")).await?;
Ok(())
}

View File

@ -0,0 +1,78 @@
use taos_query::common::SchemalessPrecision;
use taos_query::common::SchemalessProtocol;
use taos_query::common::SmlDataBuilder;
use crate::AsyncQueryable;
use crate::AsyncTBuilder;
use crate::TaosBuilder;
async fn put_line() -> anyhow::Result<()> {
// std::env::set_var("RUST_LOG", "taos=trace");
std::env::set_var("RUST_LOG", "taos=debug");
pretty_env_logger::init();
let dsn =
std::env::var("TDENGINE_ClOUD_DSN").unwrap_or("http://localhost:6041".to_string());
log::debug!("dsn: {:?}", &dsn);
let client = TaosBuilder::from_dsn(dsn)?.build().await?;
let db = "demo_schemaless_ws";
client.exec(format!("drop database if exists {db}")).await?;
client
.exec(format!("create database if not exists {db}"))
.await?;
// should specify database before insert
client.exec(format!("use {db}")).await?;
let data = [
"measurement,host=host1 field1=2i,field2=2.0 1577837300000",
"measurement,host=host1 field1=2i,field2=2.0 1577837400000",
"measurement,host=host1 field1=2i,field2=2.0 1577837500000",
"measurement,host=host1 field1=2i,field2=2.0 1577837600000",
]
.map(String::from)
.to_vec();
// demo with all fields
let sml_data = SmlDataBuilder::default()
.protocol(SchemalessProtocol::Line)
.precision(SchemalessPrecision::Millisecond)
.data(data.clone())
.ttl(1000)
.req_id(100u64)
.build()?;
assert_eq!(client.put(&sml_data).await?, ());
// demo with default ttl
let sml_data = SmlDataBuilder::default()
.protocol(SchemalessProtocol::Line)
.precision(SchemalessPrecision::Millisecond)
.data(data.clone())
.req_id(101u64)
.build()?;
assert_eq!(client.put(&sml_data).await?, ());
// demo with default ttl and req_id
let sml_data = SmlDataBuilder::default()
.protocol(SchemalessProtocol::Line)
.precision(SchemalessPrecision::Millisecond)
.data(data.clone())
.build()?;
assert_eq!(client.put(&sml_data).await?, ());
// demo with default precision
let sml_data = SmlDataBuilder::default()
.protocol(SchemalessProtocol::Line)
.data(data)
.req_id(103u64)
.build()?;
assert_eq!(client.put(&sml_data).await?, ());
client.exec(format!("drop database if exists {db}")).await?;
Ok(())
}

View File

@ -0,0 +1,80 @@
use taos_query::common::SchemalessPrecision;
use taos_query::common::SchemalessProtocol;
use taos_query::common::SmlDataBuilder;
use crate::AsyncQueryable;
use crate::AsyncTBuilder;
use crate::TaosBuilder;
async fn put_telnet() -> anyhow::Result<()> {
// std::env::set_var("RUST_LOG", "taos=trace");
std::env::set_var("RUST_LOG", "taos=debug");
pretty_env_logger::init();
let dsn =
std::env::var("TDENGINE_ClOUD_DSN").unwrap_or("http://localhost:6041".to_string());
log::debug!("dsn: {:?}", &dsn);
let client = TaosBuilder::from_dsn(dsn)?.build().await?;
let db = "demo_schemaless_ws";
client.exec(format!("drop database if exists {db}")).await?;
client
.exec(format!("create database if not exists {db}"))
.await?;
// should specify database before insert
client.exec(format!("use {db}")).await?;
let data = [
"meters.current 1648432611249 10.3 location=California.SanFrancisco group=2",
"meters.current 1648432611250 12.6 location=California.SanFrancisco group=2",
"meters.current 1648432611249 10.8 location=California.LosAngeles group=3",
"meters.current 1648432611250 11.3 location=California.LosAngeles group=3",
"meters.voltage 1648432611249 219 location=California.SanFrancisco group=2",
"meters.voltage 1648432611250 218 location=California.SanFrancisco group=2",
"meters.voltage 1648432611249 221 location=California.LosAngeles group=3",
"meters.voltage 1648432611250 217 location=California.LosAngeles group=3",
]
.map(String::from)
.to_vec();
// demo with all fields
let sml_data = SmlDataBuilder::default()
.protocol(SchemalessProtocol::Telnet)
.precision(SchemalessPrecision::Millisecond)
.data(data.clone())
.ttl(1000)
.req_id(200u64)
.build()?;
assert_eq!(client.put(&sml_data).await?, ());
// demo with default precision
let sml_data = SmlDataBuilder::default()
.protocol(SchemalessProtocol::Telnet)
.data(data.clone())
.ttl(1000)
.req_id(201u64)
.build()?;
assert_eq!(client.put(&sml_data).await?, ());
// demo with default ttl
let sml_data = SmlDataBuilder::default()
.protocol(SchemalessProtocol::Telnet)
.data(data.clone())
.req_id(202u64)
.build()?;
assert_eq!(client.put(&sml_data).await?, ());
// demo with default req_id
let sml_data = SmlDataBuilder::default()
.protocol(SchemalessProtocol::Telnet)
.data(data.clone())
.build()?;
assert_eq!(client.put(&sml_data).await?, ());
client.exec(format!("drop database if exists {db}")).await?;
Ok(())
}

View File

@ -45,7 +45,7 @@ async fn main() -> anyhow::Result<()> {
taos.exec_many([
format!("DROP TOPIC IF EXISTS tmq_meters"),
format!("DROP DATABASE IF EXISTS `{db}`"),
format!("CREATE DATABASE `{db}`"),
format!("CREATE DATABASE `{db}` WAL_RETENTION_PERIOD 3600"),
format!("USE `{db}`"),
// create super table
format!("CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) TAGS (`groupid` INT, `location` BINARY(24))"),

View File

@ -100,7 +100,8 @@ sudo apt-get install tdengine
:::tip
apt-get 方式只适用于 Debian 或 Ubuntu 系统。
::::
:::
</TabItem>
<TabItem label="Windows 安装" value="windows">
@ -206,6 +207,8 @@ Active: inactive (dead)
- 查看服务状态:`sudo launchctl list | grep taosd`
- 查看服务详细信息:`launchctl print system/com.tdengine.taosd`
:::info
- `launchctl` 命令管理`com.tdengine.taosd`需要管理员权限,务必在前面加 `sudo` 来增强安全性。

View File

@ -38,7 +38,7 @@ meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0
- field_set 中的每个数据项都需要对自身的数据类型进行描述, 比如 1.2f32 代表 FLOAT 类型的数值 1.2, 如果不带类型后缀会被当作 DOUBLE 处理;
- timestamp 支持多种时间精度。写入数据的时候需要用参数指定时间精度,支持从小时到纳秒的 6 种时间精度。
- 为了提高写入的效率,默认假设同一个超级表中 field_set 的顺序是一样的(第一条数据包含所有的 field后面的数据按照这个顺序如果顺序不一样需要配置参数 smlDataFormat 为 false否则数据写入按照相同顺序写入库中数据会异常。3.0.1.3 之后的版本 smlDataFormat 默认为 false从3.0.3.0开始,该配置废弃) [TDengine 无模式写入参考指南](/reference/schemaless/#无模式写入行协议)
- 默认产生的子表名是根据规则生成的唯一 ID 值。用户也可以通过在 taos.cfg 里配置 smlChildTableName 参数来指定某个标签值作为子表名。该标签值应该具有全局唯一性。举例如下假设有个标签名为tname, 配置 smlChildTableName=tname, 插入数据为 st,tname=cpu1,t1=4 c1=3 1626006833639000000 则创建的子表名为 cpu1。注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set其他的行会忽略。[TDengine 无模式写入参考指南](/reference/schemaless/#无模式写入行协议)
- 默认产生的子表名是根据规则生成的唯一 ID 值。用户也可以通过在client端的 taos.cfg 里配置 smlChildTableName 参数来指定某个标签值作为子表名。该标签值应该具有全局唯一性。举例如下假设有个标签名为tname, 配置 smlChildTableName=tname, 插入数据为 st,tname=cpu1,t1=4 c1=3 1626006833639000000 则创建的子表名为 cpu1。注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set其他的行会忽略。[TDengine 无模式写入参考指南](/reference/schemaless/#无模式写入行协议)
:::
要了解更多可参考:[InfluxDB Line 协议官方文档](https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/) 和 [TDengine 无模式写入参考指南](/reference/schemaless/#无模式写入行协议)

View File

@ -32,7 +32,7 @@ OpenTSDB 行协议同样采用一行字符串来表示一行数据。OpenTSDB
meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3
```
- 默认生产的子表名是根据规则生成的唯一 ID 值。用户也可以通过在 taos.cfg 里配置 smlChildTableName 参数来指定某个标签值作为子表名。该标签值应该具有全局唯一性。举例如下假设有个标签名为tname, 配置 smlChildTableName=tname, 插入数据为 meters.current 1648432611250 11.3 tname=cpu1 location=California.LosAngeles groupid=3 则创建的表名为 cpu1注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set其他的行会忽略
- 默认生产的子表名是根据规则生成的唯一 ID 值。用户也可以通过在client端的 taos.cfg 里配置 smlChildTableName 参数来指定某个标签值作为子表名。该标签值应该具有全局唯一性。举例如下假设有个标签名为tname, 配置 smlChildTableName=tname, 插入数据为 meters.current 1648432611250 11.3 tname=cpu1 location=California.LosAngeles groupid=3 则创建的表名为 cpu1注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set其他的行会忽略
参考 [OpenTSDB Telnet API 文档](http://opentsdb.net/docs/build/html/api_telnet/put.html)。
## 示例代码

View File

@ -47,7 +47,7 @@ OpenTSDB JSON 格式协议采用一个 JSON 字符串表示一行或多行数据
:::note
- 对于 JSON 格式协议TDengine 并不会自动把所有标签转成 NCHAR 类型, 字符串将将转为 NCHAR 类型, 数值将同样转换为 DOUBLE 类型。
- 默认生成的子表名是根据规则生成的唯一 ID 值。用户也可以通过在 taos.cfg 里配置 smlChildTableName 参数来指定某个标签值作为子表名。该标签值应该具有全局唯一性。举例如下假设有个标签名为tname, 配置 smlChildTableName=tname, 插入数据为 `"tags": { "host": "web02","dc": "lga","tname":"cpu1"}` 则创建的子表名为 cpu1。注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set其他的行会忽略
- 默认生成的子表名是根据规则生成的唯一 ID 值。用户也可以通过在client端的 taos.cfg 里配置 smlChildTableName 参数来指定某个标签值作为子表名。该标签值应该具有全局唯一性。举例如下假设有个标签名为tname, 配置 smlChildTableName=tname, 插入数据为 `"tags": { "host": "web02","dc": "lga","tname":"cpu1"}` 则创建的子表名为 cpu1。注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set其他的行会忽略
:::
## 示例代码

View File

@ -0,0 +1,3 @@
```rust
{{#include docs/examples/rust/nativeexample/examples/schemaless_insert_line.rs}}
```

View File

@ -221,7 +221,7 @@ void Close()
```sql
DROP DATABASE IF EXISTS tmqdb;
CREATE DATABASE tmqdb;
CREATE DATABASE tmqdb WAL_RETENTION_PERIOD 3600;
CREATE TABLE tmqdb.stb (ts TIMESTAMP, c1 INT, c2 FLOAT, c3 VARCHAR(16)) TAGS(t1 INT, t3 VARCHAR(16));
CREATE TABLE tmqdb.ctb0 USING tmqdb.stb TAGS(0, "subtable0");
CREATE TABLE tmqdb.ctb1 USING tmqdb.stb TAGS(1, "subtable1");

View File

@ -6,18 +6,20 @@ description: "支持用户编码的聚合函数和标量函数,在查询中嵌
在有些应用场景中,应用逻辑需要的查询无法直接使用系统内置的函数来表示。利用 UDF(User Defined Function) 功能TDengine 可以插入用户编写的处理代码并在查询中使用它们,就能够很方便地解决特殊应用场景中的使用需求。 UDF 通常以数据表中的一列数据做为输入,同时支持以嵌套子查询的结果作为输入。
TDengine 支持通过 C/C++ 语言进行 UDF 定义。接下来结合示例讲解 UDF 的使用方法。
用户可以通过 UDF 实现两类函数:标量函数和聚合函数。标量函数对每行数据输出一个值,如求绝对值 abs正弦函数 sin字符串拼接函数 concat 等。聚合函数对多行数据进行输出一个值,如求平均数 avg最大值 max 等。
实现 UDF 时,需要实现规定的接口函数
TDengine 支持通过 C/Python 语言进行 UDF 定义。接下来结合示例讲解 UDF 的使用方法。
## 用 C 语言实现 UDF
使用 C 语言实现 UDF 时,需要实现规定的接口函数
- 标量函数需要实现标量接口函数 scalarfn 。
- 聚合函数需要实现聚合接口函数 aggfn_start aggfn aggfn_finish。
- 如果需要初始化,实现 udf_init如果需要清理工作实现udf_destroy。
接口函数的名称是 UDF 名称,或者是 UDF 名称和特定后缀_start, _finish, _init, _destroy)的连接。列表中的scalarfnaggfn, udf需要替换成udf函数名。
## 实现标量函数
### 用 C 语言实现标量函数
标量函数实现模板如下
```c
#include "taos.h"
@ -49,7 +51,7 @@ int32_t scalarfn_destroy() {
```
scalarfn 为函数名的占位符需要替换成函数名如bit_and。
## 实现聚合函数
### 用 C 语言实现聚合函数
聚合函数的实现模板如下
```c
@ -100,7 +102,7 @@ int32_t aggfn_destroy() {
```
aggfn为函数名的占位符需要修改为自己的函数名如l2norm。
## 接口函数定义
### C 语言 UDF 接口函数定义
接口函数的名称是 udf 名称,或者是 udf 名称和特定后缀_start, _finish, _init, _destroy)的连接。以下描述中函数名称中的 scalarfnaggfn, udf 需要替换成udf函数名。
@ -108,7 +110,7 @@ aggfn为函数名的占位符需要修改为自己的函数名如l2norm。
接口函数参数类型见数据结构定义。
### 标量接口函数
#### 标量函数接口
`int32_t scalarfn(SUdfDataBlock* inputDataBlock, SUdfColumn *resultColumn)`
@ -118,7 +120,7 @@ aggfn为函数名的占位符需要修改为自己的函数名如l2norm。
- inputDataBlock: 输入的数据块
- resultColumn: 输出列
### 聚合接口函数
#### 聚合函数接口
`int32_t aggfn_start(SUdfInterBuf *interBuf)`
@ -135,7 +137,7 @@ aggfn为函数名的占位符需要修改为自己的函数名如l2norm。
- result最终结果。
### UDF 初始化和销毁
#### 初始化和销毁接口
`int32_t udf_init()`
`int32_t udf_destroy()`
@ -143,7 +145,7 @@ aggfn为函数名的占位符需要修改为自己的函数名如l2norm。
其中 udf 是函数名的占位符。udf_init 完成初始化工作。 udf_destroy 完成清理工作。如果没有初始化工作无需定义udf_init函数。如果没有清理工作无需定义udf_destroy函数。
## UDF 数据结构
### C 语言 UDF 数据结构
```c
typedef struct SUdfColumnMeta {
int16_t type;
@ -201,7 +203,7 @@ typedef struct SUdfInterBuf {
为了更好的操作以上数据结构,提供了一些便利函数,定义在 taosudf.h。
## 编译 UDF
### 编译 C UDF
用户定义函数的 C 语言源代码无法直接被 TDengine 系统使用,而是需要先编译为 动态链接库,之后才能载入 TDengine 系统。
@ -213,12 +215,9 @@ gcc -g -O0 -fPIC -shared bit_and.c -o libbitand.so
这样就准备好了动态链接库 libbitand.so 文件,可以供后文创建 UDF 时使用了。为了保证可靠的系统运行,编译器 GCC 推荐使用 7.5 及以上版本。
## 管理和使用UDF
编译好的UDF还需要将其加入到系统才能被正常的SQL调用。关于如何管理和使用UDF参见[UDF使用说明](../12-taos-sql/26-udf.md)
### C UDF 示例代码
## 示例代码
### 标量函数示例 [bit_and](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/bit_and.c)
#### 标量函数示例 [bit_and](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/bit_and.c)
bit_add 实现多列的按位与功能。如果只有一列返回这一列。bit_add 忽略空值。
@ -231,7 +230,7 @@ bit_add 实现多列的按位与功能。如果只有一列,返回这一列。
</details>
### 聚合函数示例1 返回值为数值类型 [l2norm](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/l2norm.c)
#### 聚合函数示例1 返回值为数值类型 [l2norm](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/l2norm.c)
l2norm 实现了输入列的所有数据的二阶范数,即对每个数据先平方,再累加求和,最后开方。
@ -244,7 +243,7 @@ l2norm 实现了输入列的所有数据的二阶范数,即对每个数据先
</details>
### 聚合函数示例2 返回值为字符串类型 [max_vol](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/max_vol.c)
#### 聚合函数示例2 返回值为字符串类型 [max_vol](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/max_vol.c)
max_vol 实现了从多个输入的电压列中找到最大电压返回由设备ID + 最大电压所在(行,列)+ 最大电压值 组成的组合字符串值
@ -268,4 +267,125 @@ select max_vol(vol1,vol2,vol3,deviceid) from battery;
{{#include tests/script/sh/max_vol.c}}
```
</details>
</details>
## 用 Python 语言实现 UDF
使用 Python 语言实现 UDF 时,需要实现规定的接口函数
- 标量函数需要实现标量接口函数 process 。
- 聚合函数需要实现聚合接口函数 start reduce finish。
- 如果需要初始化,实现 init如果需要清理工作实现 destroy。
### 用 Python 实现标量函数
标量函数实现模版如下
```Python
def init():
# initialization
def destroy():
# destroy
def process(input: datablock) -> tuple[output_type]:
# process input datablock,
# datablock.data(row, col) is to access the python object in location(row,col)
# return tuple object consisted of object of type outputtype
```
### 用 Python 实现聚合函数
聚合函数实现模版如下
```Python
def init():
#initialization
def destroy():
#destroy
def start() -> bytes:
#return serialize(init_state)
def reduce(inputs: datablock, buf: bytes) -> bytes
# deserialize buf to state
# reduce the inputs and state into new_state.
# use inputs.data(i,j) to access python ojbect of location(i,j)
# serialize new_state into new_state_bytes
return new_state_bytes
def finish(buf: bytes) -> output_type:
#return obj of type outputtype
```
### Python UDF 接口函数定义
#### 标量函数接口
```Python
def process(input: datablock) -> tuple[output_type]:
```
- input:datablock 类似二维矩阵,通过成员方法 data(row,col)返回位于 row 行col 列的 python 对象
- 返回值是一个 Python 对象元组,每个元素类型为输出类型。
#### 聚合函数接口
```Python
def start() -> bytes:
def reduce(inputs: datablock, buf: bytes) -> bytes
def finish(buf: bytes) -> output_type:
```
首先调用 start 生成最初结果 buffer然后输入数据会被分为多个行数据块对每个数据块 inputs 和当前中间结果 buf 调用 reduce得到新的中间结果最后再调用 finish 从中间结果 buf 产生最终输出,最终输出只能含 0 或 1 条数据。
#### 初始化和销毁接口
```Python
def init()
def destroy()
```
其中 init 完成初始化工作。 destroy 完成清理工作。如果没有初始化工作,无需定义 init 函数。如果没有清理工作,无需定义 destroy 函数。
### Python 和 TDengine之间的数据类型映射
下表描述了TDengine SQL数据类型和Python数据类型的映射。任何类型的NULL值都映射成Python的None值。
| **TDengine SQL数据类型** | **Python数据类型** |
| :-----------------------: | ------------ |
|TINYINT / SMALLINT / INT / BIGINT | int |
|TINYINT UNSIGNED / SMALLINT UNSIGNED / INT UNSIGNED / BIGINT UNSIGNED | int |
|FLOAT / DOUBLE | float |
|BOOL | bool |
|BINARY / VARCHAR / NCHAR | bytes|
|TIMESTAMP | int |
|JSON and other types | 不支持 |
### Python UDF 环境的安装
1. 安装 taospyudf 包。此包执行Python UDF程序。
```bash
sudo pip install taospyudf
ldconfig
```
2. 如果 Python UDF 程序执行时,通过 PYTHONPATH 引用其它的包,可以设置 taos.cfg 的 UdfdLdLibPath 变量为PYTHONPATH的内容
### Python UDF 示例代码
#### 标量函数示例 [pybitand](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/pybitand.py)
pybitand 实现多列的按位与功能。如果只有一列返回这一列。pybitand 忽略空值。
<details>
<summary>pybitand.py</summary>
```Python
{{#include tests/script/sh/pybitand.py}}
```
</details>
#### 聚合函数示例 [pyl2norm](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/pyl2norm.py)
pyl2norm 实现了输入列的所有数据的二阶范数,即对每个数据先平方,再累加求和,最后开方。
<details>
<summary>pyl2norm.py</summary>
```c
{{#include tests/script/sh/pyl2norm.py}}
```
</details>
## 管理和使用 UDF
在使用 UDF 之前需要先将其加入到 TDengine 系统中。关于如何管理和使用 UDF请参考[管理和使用 UDF](../12-taos-sql/26-udf.md)

View File

@ -36,23 +36,121 @@ REST 连接支持所有能运行 Java 的平台。
请参考[版本支持列表](../#版本支持)
## 最近更新记录
| taos-jdbcdriver 版本 | 主要变化 |
| :------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------: |
| 3.2.1 | 新增功能WebSocket 连接支持 schemaless 与 prepareStatement 写入。变更consumer poll 返回结果集为 ConsumerRecord可通过 value() 获取指定结果集数据。 |
| 3.2.0 | 存在连接问题,不推荐使用 |
| 3.1.0 | WebSocket 连接支持订阅功能 |
| 3.0.1 - 3.0.4 | 修复一些情况下结果集数据解析错误的问题。3.0.1 在 JDK 11 环境编译JDK 8 环境下建议使用其他版本 |
| 3.0.0 | 支持 TDengine 3.0 |
| 2.0.42 | 修在 WebSocket 连接中 wasNull 接口返回值 |
| 2.0.41 | 修正 REST 连接中用户名和密码转码方式 |
| 2.0.39 - 2.0.40 | 增加 REST 连接/请求 超时设置 |
| 2.0.38 | JDBC REST 连接增加批量拉取功能 |
| 2.0.37 | 增加对 json tag 支持 |
| 2.0.36 | 增加对 schemaless 写入支持 |
**注**REST 连接中增加 `batchfetch` 参数并设置为 true将开启 WebSocket 连接。
## 处理异常
在报错后,通过 SQLException 可以获取到错误的信息和错误码:
```java
try (Statement statement = connection.createStatement()) {
// executeQuery
ResultSet resultSet = statement.executeQuery(sql);
// print result
printResult(resultSet);
} catch (SQLException e) {
System.out.println("ERROR Message: " + e.getMessage());
System.out.println("ERROR Code: " + e.getErrorCode());
e.printStackTrace();
}
```
JDBC 连接器可能报错的错误码包括 4 种:
- JDBC driver 本身的报错(错误码在 0x2301 到 0x2350 之间)
- 原生连接方法的报错(错误码在 0x2351 到 0x2360 之间)
- 数据订阅的报错(错误码在 0x2371 到 0x2380 之间)
- TDengine 其他功能模块的报错。
具体的错误码请参考:
| Error Code | Description | Suggested Actions |
| ---------- | --------------------------------------------------------------- | --------------------------------------------------------------------------------------- |
| 0x2301 | connection already closed | 连接已经关闭,检查连接情况,或重新创建连接去执行相关指令。 |
| 0x2302 | this operation is NOT supported currently! | 当前使用接口不支持,可以更换其他连接方式。 |
| 0x2303 | invalid variables | 参数不合法,请检查相应接口规范,调整参数类型及大小。 |
| 0x2304 | statement is closed | statement 已经关闭,请检查 statement 是否关闭后再次使用,或是连接是否正常。 |
| 0x2305 | resultSet is closed | resultSet 结果集已经释放,请检查 resultSet 是否释放后再次使用。 |
| 0x2306 | Batch is empty! | prepareStatement 添加参数后再执行 executeBatch。 |
| 0x2307 | Can not issue data manipulation statements with executeQuery() | 更新操作应该使用 executeUpdate(),而不是 executeQuery()。 |
| 0x2308 | Can not issue SELECT via executeUpdate() | 查询操作应该使用 executeQuery(),而不是 executeUpdate()。 |
| 0x2309 | invalid sql for executeQuery: (?) | - |
| 0x230a | Database not specified or available | - |
| 0x230b | invalid sql for executeUpdate: (?) | - |
| 0x230c | invalid sql for execute: (?) | - |
| 0x230d | parameter index out of range | 参数越界,请检查参数的合理范围。 |
| 0x230e | connection already closed | 连接已经关闭,请检查 Connection 是否关闭后再次使用,或是连接是否正常。 |
| 0x230f | unknown sql type in tdengine | 请检查 TDengine 支持的 Data Type 类型。 |
| 0x2310 | can't register JDBC-JNI driver | 不能注册 JNI 驱动,请检查 url 是否填写正确。 |
| 0x2311 | can't register JDBC-RESTful driver | - |
| 0x2312 | url is not set | 请检查 REST 连接 url 是否填写正确。 |
| 0x2313 | invalid sql | - |
| 0x2314 | numeric value out of range | 请检查获取结果集中数值类型是否使用了正确的接口。 |
| 0x2315 | unknown taos type in tdengine | 在 TDengine 数据类型与 JDBC 数据类型转换时,是否指定了正确的 TDengine 数据类型。 |
| 0x2316 | unknown timestamp precision | - |
| 0x2317 | | REST 连接中使用了错误的请求类型。 |
| 0x2318 | | REST 连接中出现了数据传输异常,请检查网络情况并重试。 |
| 0x2319 | user is required | 创建连接时缺少用户名信息 |
| 0x231a | password is required | 创建连接时缺少密码信息 |
| 0x231b | invalid json format | - |
| 0x231c | httpEntity is null, sql: | REST 连接中执行出现异常 |
| 0x2350 | unknown error | 未知异常,请在 github 返回给开发人员。 |
| 0x2351 | failed to create subscription | - |
| 0x2352 | Unsupported encoding | 本地连接下指定了不支持的字符编码集 |
| 0x2353 | internal error of database, please see taoslog for more details | 本地连接执行 prepareStatement 时出现错误,请检查 taos log 进行问题定位。 |
| 0x2354 | JNI connection is NULL | 本地连接执行命令时Connection 已经关闭。请检查与 TDengine 的连接情况。 |
| 0x2355 | JNI result set is NULL | 本地连接获取结果集,结果集异常,请检查连接情况,并重试。 |
| 0x2356 | invalid num of fields | 本地连接获取结果集的 meta 信息不匹配。 |
| 0x2357 | empty sql string | 填写正确的 SQL 进行执行。 |
| 0x2358 | fetch to the end of resultSet | - |
| 0x2359 | JNI alloc memory failed, please see taoslog for more details | 本地连接分配内存错误,请检查 taos log 进行问题定位。 |
| 0x2371 | consumer properties must not be null! | 创建订阅时参数为空,请填写正确的参数。 |
| 0x2372 | configs contain empty key, failed to set consumer property | 参数 key 中包含空值,请填写正确的参数。 |
| 0x2373 | failed to set consumer property, | 参数 value 中包含空值,请填写正确的参数。 |
| 0x2374 | consumer config error | - |
| 0x2375 | topic reference has been destroyed | 创建数据订阅过程中topic 引用被释放。请检查与 TDengine 的连接情况。 |
| 0x2376 | failed to set consumer topic, topic name is empty | 创建数据订阅过程中,订阅 topic 名称为空。请检查指定的 topic 名称是否填写正确。 |
| 0x2377 | consumer reference has been destroyed | 订阅数据传输通道已经关闭,请检查与 TDengine 的连接情况。 |
| 0x2378 | consumer create error | 创建数据订阅失败,请根据错误信息检查 taos log 进行问题定位。 |
| - | can't create connection with server within: | 通过增加参数 httpConnectTimeout 增加连接耗时,或是请检查与 taosAdapter 之间的连接情况。 |
| - | failed to complete the task within the specified time : | 通过增加参数 messageWaitTimeout 增加执行耗时,或是请检查与 taosAdapter 之间的连接情况。 |
- [TDengine Java Connector](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java)
<!-- - [TDengine_ERROR_CODE](../error-code) -->
## TDengine DataType 和 Java DataType
TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对应类型转换如下:
| TDengine DataType | JDBCType |
| ----------------- | ---------------------------------- |
| TIMESTAMP | java.sql.Timestamp |
| INT | java.lang.Integer |
| BIGINT | java.lang.Long |
| FLOAT | java.lang.Float |
| DOUBLE | java.lang.Double |
| SMALLINT | java.lang.Short |
| TINYINT | java.lang.Byte |
| BOOL | java.lang.Boolean |
| BINARY | byte array |
| NCHAR | java.lang.String |
| JSON | java.lang.String |
| TDengine DataType | JDBCType |
| ----------------- | ------------------ |
| TIMESTAMP | java.sql.Timestamp |
| INT | java.lang.Integer |
| BIGINT | java.lang.Long |
| FLOAT | java.lang.Float |
| DOUBLE | java.lang.Double |
| SMALLINT | java.lang.Short |
| TINYINT | java.lang.Byte |
| BOOL | java.lang.Boolean |
| BINARY | byte array |
| NCHAR | java.lang.String |
| JSON | java.lang.String |
**注意**JSON 类型仅在 tag 中支持。
@ -82,7 +180,7 @@ Maven 项目中,在 pom.xml 中添加以下依赖:
<dependency>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>3.0.0</version>
<version>3.2.1</version>
</dependency>
```
@ -97,7 +195,7 @@ cd taos-connector-jdbc
mvn clean install -Dmaven.test.skip=true
```
编译后,在 target 目录下会产生 taos-jdbcdriver-3.0.*-dist.jar 的 jar 包,并自动将编译的 jar 文件放在本地的 Maven 仓库中。
编译后,在 target 目录下会产生 taos-jdbcdriver-3.2.\*-dist.jar 的 jar 包,并自动将编译的 jar 文件放在本地的 Maven 仓库中。
</TabItem>
</Tabs>
@ -230,7 +328,7 @@ INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('California.SanFra
**注意**
- 应用中设置的 client parameter 为进程级别的,即如果要更新 client 的参数,需要重启应用。这是因为 client parameter 是全局参数,仅在应用程序的第一次设置生效。
- 以下示例代码基于 taos-jdbcdriver-3.0.0。
- 以下示例代码基于 taos-jdbcdriver-3.1.0。
```java
public Connection getConn() throws Exception{
@ -272,7 +370,7 @@ properties 中的配置参数如下:
- TSDBDriver.HTTP_SOCKET_TIMEOUT: socket 超时时间,单位 ms默认值为 5000。仅在 REST 连接且 batchfetch 设置为 false 时生效。
- TSDBDriver.PROPERTY_KEY_MESSAGE_WAIT_TIMEOUT: 消息超时时间, 单位 ms 默认值为 3000。 仅在 REST 连接且 batchfetch 设置为 true 时生效。
- TSDBDriver.PROPERTY_KEY_USE_SSL: 连接中是否使用 SSL。仅在 REST 连接时生效。
此外对 JDBC 原生连接,通过指定 URL 和 Properties 还可以指定其他参数比如日志级别、SQL 长度等。更多详细配置请参考[客户端配置](/reference/config/#仅客户端适用)。
此外对 JDBC 原生连接,通过指定 URL 和 Properties 还可以指定其他参数比如日志级别、SQL 长度等。更多详细配置请参考[客户端配置](/reference/config/#仅客户端适用)。
### 配置参数的优先级
@ -336,30 +434,6 @@ while(resultSet.next()){
> 查询和操作关系型数据库一致,使用下标获取返回字段内容时从 1 开始,建议使用字段名称获取。
### 处理异常
在报错后,通过 SQLException 可以获取到错误的信息和错误码:
```java
try (Statement statement = connection.createStatement()) {
// executeQuery
ResultSet resultSet = statement.executeQuery(sql);
// print result
printResult(resultSet);
} catch (SQLException e) {
System.out.println("ERROR Message: " + e.getMessage());
System.out.println("ERROR Code: " + e.getErrorCode());
e.printStackTrace();
}
```
JDBC 连接器可能报错的错误码包括 3 种JDBC driver 本身的报错(错误码在 0x2301 到 0x2350 之间),原生连接方法的报错(错误码在 0x2351 到 0x2400 之间TDengine 其他功能模块的报错。
具体的错误码请参考:
- [TDengine Java Connector](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java)
<!-- - [TDengine_ERROR_CODE](../error-code) -->
### 通过参数绑定写入数据
TDengine 的 JDBC 原生连接实现大幅改进了参数绑定方式对数据写入INSERT场景的支持。采用这种方式写入数据时能避免 SQL 语法解析的资源消耗,从而在很多情况下显著提升写入性能。
@ -367,9 +441,12 @@ TDengine 的 JDBC 原生连接实现大幅改进了参数绑定方式对数据
**注意**
- JDBC REST 连接目前不支持参数绑定
- 以下示例代码基于 taos-jdbcdriver-3.0.0
- 以下示例代码基于 taos-jdbcdriver-3.2.1
- binary 类型数据需要调用 setString 方法nchar 类型数据需要调用 setNString 方法
- setString 和 setNString 都要求用户在 size 参数里声明表定义中对应列的列宽
- 预处理语句中指定数据库与子表名称不要使用 `db.?`,应直接使用 `?`,然后在 setTableName 中指定数据库,如:`prepareStatement.setTableName("db.t1")`。
<Tabs defaultValue="native">
<TabItem value="native" label="原生连接">
```java
public class ParameterBindingDemo {
@ -597,21 +674,7 @@ public class ParameterBindingDemo {
}
```
用于设定 TAGS 取值的方法总共有:
```java
public void setTagNull(int index, int type)
public void setTagBoolean(int index, boolean value)
public void setTagInt(int index, int value)
public void setTagByte(int index, byte value)
public void setTagShort(int index, short value)
public void setTagLong(int index, long value)
public void setTagTimestamp(int index, long value)
public void setTagFloat(int index, float value)
public void setTagDouble(int index, double value)
public void setTagString(int index, String value)
public void setTagNString(int index, String value)
```
**注**setString 和 setNString 都要求用户在 size 参数里声明表定义中对应列的列宽
用于设定 VALUES 数据列的取值的方法总共有:
@ -628,17 +691,203 @@ public void setString(int columnIndex, ArrayList<String> list, int size) throws
public void setNString(int columnIndex, ArrayList<String> list, int size) throws SQLException
```
</TabItem>
<TabItem value="rest" label="WebSocket 连接">
```java
public class ParameterBindingDemo {
private static final String host = "127.0.0.1";
private static final Random random = new Random(System.currentTimeMillis());
private static final int BINARY_COLUMN_SIZE = 30;
private static final String[] schemaList = {
"create table stable1(ts timestamp, f1 tinyint, f2 smallint, f3 int, f4 bigint) tags(t1 tinyint, t2 smallint, t3 int, t4 bigint)",
"create table stable2(ts timestamp, f1 float, f2 double) tags(t1 float, t2 double)",
"create table stable3(ts timestamp, f1 bool) tags(t1 bool)",
"create table stable4(ts timestamp, f1 binary(" + BINARY_COLUMN_SIZE + ")) tags(t1 binary(" + BINARY_COLUMN_SIZE + "))",
"create table stable5(ts timestamp, f1 nchar(" + BINARY_COLUMN_SIZE + ")) tags(t1 nchar(" + BINARY_COLUMN_SIZE + "))"
};
private static final int numOfSubTable = 10, numOfRow = 10;
public static void main(String[] args) throws SQLException {
String jdbcUrl = "jdbc:TAOS-RS://" + host + ":6041/?batchfetch=true";
Connection conn = DriverManager.getConnection(jdbcUrl, "root", "taosdata");
init(conn);
bindInteger(conn);
bindFloat(conn);
bindBoolean(conn);
bindBytes(conn);
bindString(conn);
conn.close();
}
private static void init(Connection conn) throws SQLException {
try (Statement stmt = conn.createStatement()) {
stmt.execute("drop database if exists test_ws_parabind");
stmt.execute("create database if not exists test_ws_parabind");
stmt.execute("use test_ws_parabind");
for (int i = 0; i < schemaList.length; i++) {
stmt.execute(schemaList[i]);
}
}
}
private static void bindInteger(Connection conn) throws SQLException {
String sql = "insert into ? using stable1 tags(?,?,?,?) values(?,?,?,?,?)";
try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) {
for (int i = 1; i <= numOfSubTable; i++) {
// set table name
pstmt.setTableName("t1_" + i);
// set tags
pstmt.setTagByte(1, Byte.parseByte(Integer.toString(random.nextInt(Byte.MAX_VALUE))));
pstmt.setTagShort(2, Short.parseShort(Integer.toString(random.nextInt(Short.MAX_VALUE))));
pstmt.setTagInt(3, random.nextInt(Integer.MAX_VALUE));
pstmt.setTagLong(4, random.nextLong());
// set columns
long current = System.currentTimeMillis();
for (int j = 0; j < numOfRow; j++) {
pstmt.setTimestamp(1, new Timestamp(current + j));
pstmt.setByte(2, Byte.parseByte(Integer.toString(random.nextInt(Byte.MAX_VALUE))));
pstmt.setShort(3, Short.parseShort(Integer.toString(random.nextInt(Short.MAX_VALUE))));
pstmt.setInt(4, random.nextInt(Integer.MAX_VALUE));
pstmt.setLong(5, random.nextLong());
pstmt.addBatch();
}
pstmt.executeBatch();
}
}
}
private static void bindFloat(Connection conn) throws SQLException {
String sql = "insert into ? using stable2 tags(?,?) values(?,?,?)";
try(TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) {
for (int i = 1; i <= numOfSubTable; i++) {
// set table name
pstmt.setTableName("t2_" + i);
// set tags
pstmt.setTagFloat(1, random.nextFloat());
pstmt.setTagDouble(2, random.nextDouble());
// set columns
long current = System.currentTimeMillis();
for (int j = 0; j < numOfRow; j++) {
pstmt.setTimestamp(1, new Timestamp(current + j));
pstmt.setFloat(2, random.nextFloat());
pstmt.setDouble(3, random.nextDouble());
pstmt.addBatch();
}
pstmt.executeBatch();
}
}
}
private static void bindBoolean(Connection conn) throws SQLException {
String sql = "insert into ? using stable3 tags(?) values(?,?)";
try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) {
for (int i = 1; i <= numOfSubTable; i++) {
// set table name
pstmt.setTableName("t3_" + i);
// set tags
pstmt.setTagBoolean(1, random.nextBoolean());
// set columns
long current = System.currentTimeMillis();
for (int j = 0; j < numOfRow; j++) {
pstmt.setTimestamp(1, new Timestamp(current + j));
pstmt.setBoolean(2, random.nextBoolean());
pstmt.addBatch();
}
pstmt.executeBatch();
}
}
}
private static void bindBytes(Connection conn) throws SQLException {
String sql = "insert into ? using stable4 tags(?) values(?,?)";
try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) {
for (int i = 1; i <= numOfSubTable; i++) {
// set table name
pstmt.setTableName("t4_" + i);
// set tags
pstmt.setTagString(1, new String("abc"));
// set columns
long current = System.currentTimeMillis();
for (int j = 0; j < numOfRow; j++) {
pstmt.setTimestamp(1, new Timestamp(current + j));
pstmt.setString(2, "abc");
pstmt.addBatch();
}
pstmt.executeBatch();
}
}
}
private static void bindString(Connection conn) throws SQLException {
String sql = "insert into ? using stable5 tags(?) values(?,?)";
try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) {
for (int i = 1; i <= numOfSubTable; i++) {
// set table name
pstmt.setTableName("t5_" + i);
// set tags
pstmt.setTagNString(1, "California.SanFrancisco");
// set columns
long current = System.currentTimeMillis();
for (int j = 0; j < numOfRow; j++) {
pstmt.setTimestamp(0, new Timestamp(current + j));
pstmt.setNString(1, "California.SanFrancisco");
pstmt.addBatch();
}
pstmt.executeBatch();
}
}
}
}
```
</TabItem>
</Tabs>
用于设定 TAGS 取值的方法总共有:
```java
public void setTagNull(int index, int type)
public void setTagBoolean(int index, boolean value)
public void setTagInt(int index, int value)
public void setTagByte(int index, byte value)
public void setTagShort(int index, short value)
public void setTagLong(int index, long value)
public void setTagTimestamp(int index, long value)
public void setTagFloat(int index, float value)
public void setTagDouble(int index, double value)
public void setTagString(int index, String value)
public void setTagNString(int index, String value)
```
### 无模式写入
TDengine 支持无模式写入功能。无模式写入兼容 InfluxDB 的 行协议Line Protocol、OpenTSDB 的 telnet 行协议和 OpenTSDB 的 JSON 格式协议。详情请参见[无模式写入](../../reference/schemaless/)。
**注意**
- JDBC REST 连接目前不支持无模式写入
- 以下示例代码基于 taos-jdbcdriver-3.0.0
<Tabs defaultValue="native">
<TabItem value="native" label="原生连接">
```java
public class SchemalessInsertTest {
public class SchemalessJniTest {
private static final String host = "127.0.0.1";
private static final String lineDemo = "st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000";
private static final String telnetDemo = "stb0_0 1626006833 4 host=host0 interface=eth0";
@ -666,6 +915,41 @@ public class SchemalessInsertTest {
}
```
</TabItem>
<TabItem value="ws" label="WebSocket 连接">
```java
public class SchemalessWsTest {
private static final String host = "127.0.0.1";
private static final String lineDemo = "st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000";
private static final String telnetDemo = "stb0_0 1626006833 4 host=host0 interface=eth0";
private static final String jsonDemo = "{\"metric\": \"meter_current\",\"timestamp\": 1626846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}";
public static void main(String[] args) throws SQLException {
final String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata&batchfetch=true";
Connection connection = DriverManager.getConnection(url);
init(connection);
SchemalessWriter writer = new SchemalessWriter(connection, "test_ws_schemaless");
writer.write(lineDemo, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO_SECONDS);
writer.write(telnetDemo, SchemalessProtocolType.TELNET, SchemalessTimestampType.MILLI_SECONDS);
writer.write(jsonDemo, SchemalessProtocolType.JSON, SchemalessTimestampType.SECONDS);
System.exit(0);
}
private static void init(Connection connection) throws SQLException {
try (Statement stmt = connection.createStatement()) {
stmt.executeUpdate("drop database if exists test_ws_schemaless");
stmt.executeUpdate("create database if not exists test_ws_schemaless keep 36500");
stmt.executeUpdate("use test_ws_schemaless");
}
}
}
```
</TabItem>
</Tabs>
### 数据订阅
TDengine Java 连接器支持订阅功能,应用 API 如下:
@ -702,15 +986,16 @@ TaosConsumer consumer = new TaosConsumer<>(config);
- td.connect.type: 连接方式。jni表示使用动态库连接的方式ws/WebSocket表示使用 WebSocket 进行数据通信。默认为 jni 方式。
- httpConnectTimeout创建连接超时参数单位 ms默认为 5000 ms。仅在 WebSocket 连接下有效。
- messageWaitTimeout数据传输超时参数单位 ms默认为 10000 ms。仅在 WebSocket 连接下有效。
其他参数请参考:[Consumer 参数列表](../../../develop/tmq#创建-consumer-以及consumer-group)
其他参数请参考:[Consumer 参数列表](../../../develop/tmq#创建-consumer-以及consumer-group)
#### 订阅消费数据
```java
while(true) {
ConsumerRecords<ResultBean> records = consumer.poll(Duration.ofMillis(100));
for (ResultBean record : records) {
process(record);
for (ConsumerRecord<ResultBean> record : records) {
ResultBean bean = record.value();
process(bean);
}
}
```
@ -761,8 +1046,9 @@ public abstract class ConsumerLoop {
while (!shutdown.get()) {
ConsumerRecords<ResultBean> records = consumer.poll(Duration.ofMillis(100));
for (ResultBean record : records) {
process(record);
for (ConsumerRecord<ResultBean> record : records) {
ResultBean bean = record.value();
process(bean);
}
}
consumer.unsubscribe();
@ -839,8 +1125,9 @@ public abstract class ConsumerLoop {
while (!shutdown.get()) {
ConsumerRecords<ResultBean> records = consumer.poll(Duration.ofMillis(100));
for (ResultBean record : records) {
process(record);
for (ConsumerRecord<ResultBean> record : records) {
ResultBean bean = record.value();
process(bean);
}
}
consumer.unsubscribe();
@ -966,20 +1253,6 @@ public static void main(String[] args) throws Exception {
请参考:[JDBC example](https://github.com/taosdata/TDengine/tree/3.0/examples/JDBC)
## 最近更新记录
| taos-jdbcdriver 版本 | 主要变化 |
| :------------------: | :----------------------------: |
| 3.1.0 | WebSocket 连接支持订阅功能 |
| 3.0.1 - 3.0.4 | 修复一些情况下结果集数据解析错误的问题。3.0.1 在 JDK 11 环境编译JDK 8 环境下建议使用其他版本 |
| 3.0.0 | 支持 TDengine 3.0 |
| 2.0.42 | 修在 WebSocket 连接中 wasNull 接口返回值 |
| 2.0.41 | 修正 REST 连接中用户名和密码转码方式 |
| 2.0.39 - 2.0.40 | 增加 REST 连接/请求 超时设置 |
| 2.0.38 | JDBC REST 连接增加批量拉取功能 |
| 2.0.37 | 增加对 json tag 支持 |
| 2.0.36 | 增加对 schemaless 写入支持 |
## 常见问题
1. 使用 Statement 的 `addBatch()` 和 `executeBatch()` 来执行“批量写入/更新”,为什么没有带来性能上的提升?
@ -1002,9 +1275,9 @@ public static void main(String[] args) throws Exception {
4. java.lang.NoSuchMethodError: setByteArray
**原因**taos-jdbcdriver 3.* 版本仅支持 TDengine 3.0 及以上版本。
**原因**taos-jdbcdriver 3.\* 版本仅支持 TDengine 3.0 及以上版本。
**解决方法** 使用 taos-jdbcdriver 2.* 版本连接 TDengine 2.* 版本。
**解决方法** 使用 taos-jdbcdriver 2.\* 版本连接 TDengine 2.\* 版本。
5. java.lang.NoSuchMethodError: java.nio.ByteBuffer.position(I)Ljava/nio/ByteBuffer; ... taos-jdbcdriver-3.0.1.jar

View File

@ -10,6 +10,7 @@ import TabItem from '@theme/TabItem';
import Preparation from "./_preparation.mdx"
import RustInsert from "../07-develop/03-insert-data/_rust_sql.mdx"
import RustBind from "../07-develop/03-insert-data/_rust_stmt.mdx"
import RustSml from "../07-develop/03-insert-data/_rust_schemaless.mdx"
import RustQuery from "../07-develop/04-query-data/_rust.mdx"
[![Crates.io](https://img.shields.io/crates/v/taos)](https://crates.io/crates/taos) ![Crates.io](https://img.shields.io/crates/d/taos) [![docs.rs](https://img.shields.io/docsrs/taos)](https://docs.rs/taos)
@ -230,6 +231,10 @@ async fn demo(taos: &Taos, db: &str) -> Result<(), Error> {
<RustBind />
#### Schemaless 写入
<RustSml />
### 查询数据
<RustQuery />

View File

@ -61,7 +61,7 @@ TDengine 版本更新往往会增加新的功能特性,列表中的连接器
| **普通查询** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
| **参数绑定** | 暂不支持 | 暂不支持 | 支持 | 支持 | 暂不支持 | 支持 |
| **数据订阅TMQ** | 支持 | 支持 | 支持 | 暂不支持 | 暂不支持 | 支持 |
| **Schemaless** | 暂不支持 | 暂不支持 | 暂不支持 | 暂不支持 | 暂不支持 | 暂不支持 |
| **Schemaless** | 支持 | 暂不支持 | 暂不支持 | 暂不支持 | 暂不支持 | 暂不支持 |
| **批量拉取(基于 WebSocket** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
| **DataFrame** | 不支持 | 支持 | 不支持 | 不支持 | 不支持 | 不支持 |

View File

@ -71,8 +71,8 @@ database_option: {
- 0表示可以创建多张超级表。
- 1表示只可以创建一张超级表。
- STT_TRIGGER表示落盘文件触发文件合并的个数。默认为 1范围 1 到 16。对于少表高频场景此参数建议使用默认配置或较小的值而对于多表低频场景此参数建议配置较大的值。
- TABLE_PREFIX内部存储引擎根据表名分配存储该表数据的 VNODE 时要忽略的前缀的长度。
- TABLE_SUFFIX内部存储引擎根据表名分配存储该表数据的 VNODE 时要忽略的后缀的长度
- TABLE_PREFIX当其为正值时,在决定把一个表分配到哪个 vgroup 时要忽略表名中指定长度的前缀;当其为负值时,在决定把一个表分配到哪个 vgroup 时只使用表名中指定长度的前缀;例如,假定表名为 "v30001",当 TSDB_PREFIX = 2 时 使用 "0001" 来决定分配到哪个 vgroup ,当 TSDB_PREFIX = -2 时使用 "v3" 来决定分配到哪个 vgroup
- TABLE_SUFFIX当其为正值时,在决定把一个表分配到哪个 vgroup 时要忽略表名中指定长度的后缀;当其为负值时,在决定把一个表分配到哪个 vgroup 时只使用表名中指定长度的后缀;例如,假定表名为 "v30001",当 TSDB_SUFFIX = 2 时 使用 "v300" 来决定分配到哪个 vgroup ,当 TSDB_SUFFIX = -2 时使用 "01" 来决定分配到哪个 vgroup
- TSDB_PAGESIZE一个 VNODE 中时序数据存储引擎的页大小,单位为 KB默认为 4 KB。范围为 1 到 16384即 1 KB到 16 MB。
- WAL_RETENTION_PERIOD: 为了数据订阅消费需要WAL日志文件额外保留的最大时长策略。WAL日志清理不受订阅客户端消费状态影响。单位为 s。默认为 0表示无需为订阅保留。新建订阅应先设置恰当的时长策略。
- WAL_RETENTION_SIZE为了数据订阅消费需要WAL日志文件额外保留的最大累计大小策略。单位为 KB。默认为 0表示累计大小无上限。

View File

@ -15,6 +15,7 @@ stream_options: {
IGNORE EXPIRED [0|1]
DELETE_MARK time
FILL_HISTORY [0|1]
IGNORE UPDATE [0|1]
}
```
@ -169,7 +170,7 @@ T3 时刻最新事件到达T 向后推移超过了第二个窗口关闭的
在 window_close 或 max_delay 模式下,窗口关闭直接影响推送结果。在 at_once 模式下,窗口关闭只与内存占用有关。
## 流式计算的过期数据处理策略
## 流式计算对于过期数据的处理策略
对于已关闭的窗口,再次落入该窗口中的数据被标记为过期数据.
@ -177,11 +178,20 @@ TDengine 对于过期数据提供两种处理方式,由 IGNORE EXPIRED 选项
1. 重新计算,即 IGNORE EXPIRED 0从 TSDB 中重新查找对应窗口的所有数据并重新计算得到最新结果
2. 直接丢弃, 即 IGNORE EXPIRED 1默认配置忽略过期数据
2. 直接丢弃即 IGNORE EXPIRED 1默认配置忽略过期数据
无论在哪种模式下watermark 都应该被妥善设置,来得到正确结果(直接丢弃模式)或避免频繁触发重算带来的性能开销(重新计算模式)。
## 流式计算对于修改数据的处理策略
TDengine 对于修改数据提供两种处理方式,由 IGNORE UPDATE 选项指定:
1. 检查数据是否被修改,即 IGNORE UPDATE 0默认配置如果被修改则重新计算对应窗口。
2. 不检查数据是否被修改,全部按增量数据计算,即 IGNORE UPDATE 1。
## 写入已存在的超级表
```sql
[field1_name,...]
@ -216,26 +226,26 @@ T = 最新事件时间 - DELETE_MARK
## 流式计算支持的函数
1. 所有的单行函数均可用于流计算
2. 以下 19 个聚合函数不能在创建流计算的 SQL 语句中使用
```
leastsquares
percentile
top
bottom
elapsed
interp
derivative
irate
twa
histogram
diff
statecount
stateduration
csum
mavg
sample
tail
unique
mode
```
1. 所有的 [单行函数](../function/#单行函数) 均可用于流计算。
2. 以下 19 个聚合/选择函数 <b>不能</b> 应用在创建流计算的 SQL 语句。此外的其他类型的函数均可用于流计算。
- [leastsquares](../function/#leastsquares)
- [percentile](../function/#percentile)
- [top](../function/#top)
- [bottom](../function/#bottom)
- [elapsed](../function/#elapsed)
- [interp](../function/#interp)
- [derivative](../function/#derivative)
- [irate](../function/#irate)
- [twa](../function/#twa)
- [histogram](../function/#histogram)
- [diff](../function/#diff)
- [statecount](../function/#statecount)
- [stateduration](../function/#stateduration)
- [csum](../function/#csum)
- [mavg](../function/#mavg)
- [sample](../function/#sample)
- [tail](../function/#tail)
- [unique](../function/#unique)
- [mode](../function/#mode)

View File

@ -120,6 +120,10 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
| 5 | create_time | TIMESTAMP | 创建时间 |
| 6 | code_len | INT | 代码长度 |
| 7 | bufsize | INT | buffer 大小 |
| 8 | func_language | BINARY(31) | 自定义函数编程语言 |
| 9 | func_body | BINARY(16384) | 函数体定义 |
| 10 | func_version | INT | 函数版本号。初始版本为0每次替换更新版本号加1。|
## INS_INDEXES

View File

@ -13,27 +13,34 @@ description: 使用 UDF 的详细指南
- 创建标量函数
```sql
CREATE FUNCTION function_name AS library_path OUTPUTTYPE output_type;
CREATE [OR REPLACE] FUNCTION function_name AS library_path OUTPUTTYPE output_type [LANGUAGE 'C|Python'];
```
- function_name标量函数未来在 SQL 中被调用时的函数名,必须与函数实现中 udf 的实际名称一致;
- library_path包含 UDF 函数实现的动态链接库的库文件绝对路径(指的是库文件在当前客户端所在主机上的保存路径,通常是指向一个 .so 文件),这个路径需要用英文单引号或英文双引号括起来;
- OR REPLACE: 如果函数已经存在,会修改已有的函数属性。
- function_name标量函数未来在 SQL 中被调用时的函数名;
- LANGUAGE 'C|Python'函数编程语言目前支持C语言和Python语言。 如果这个从句忽略编程语言是C语言
- library_path如果编程语言是C路径是包含 UDF 函数实现的动态链接库的库文件绝对路径(指的是库文件在当前客户端所在主机上的保存路径,通常是指向一个 .so 文件。如果编程语言是Python路径是包含 UDF 函数实现的Python文件路径。这个路径需要用英文单引号或英文双引号括起来
- output_type此函数计算结果的数据类型名称
例如,如下语句可以把 libbitand.so 创建为系统中可用的 UDF
例如,如下语句可以把 libbitand.so 创建为系统中可用的 UDF
```sql
CREATE FUNCTION bit_and AS "/home/taos/udf_example/libbitand.so" OUTPUTTYPE INT;
```
例如,使用以下语句可以修改已经定义的 bit_and 函数,输出类型是 BIGINT使用Python语言实现。
```sql
CREATE OR REPLACE FUNCTION bit_and AS "/home/taos/udf_example/bit_and.py" OUTPUTTYPE BIGINT LANGUAGE 'Python';
```
- 创建聚合函数:
```sql
CREATE AGGREGATE FUNCTION function_name AS library_path OUTPUTTYPE output_type [ BUFSIZE buffer_size ];
CREATE [OR REPLACE] AGGREGATE FUNCTION function_name AS library_path OUTPUTTYPE output_type [ BUFSIZE buffer_size ] [LANGUAGE 'C|Python'];
```
- OR REPLACE: 如果函数已经存在,会修改已有的函数属性。
- function_name聚合函数未来在 SQL 中被调用时的函数名,必须与函数实现中 udfNormalFunc 的实际名称一致;
- library_path包含 UDF 函数实现的动态链接库的库文件绝对路径(指的是库文件在当前客户端所在主机上的保存路径,通常是指向一个 .so 文件),这个路径需要用英文单引号或英文双引号括起来;
- output_type此函数计算结果的数据类型与上文中 udfNormalFunc 的 itype 参数不同,这里不是使用数字表示法,而是直接写类型名称即可;
- LANGUAGE 'C|Python'函数编程语言目前支持C语言和Python语言。
- library_path如果编程语言是C路径是包含 UDF 函数实现的动态链接库的库文件绝对路径(指的是库文件在当前客户端所在主机上的保存路径,通常是指向一个 .so 文件。如果编程语言是Python路径是包含 UDF 函数实现的Python文件路径。这个路径需要用英文单引号或英文双引号括起来
- output_type此函数计算结果的数据类型名称
- buffer_size中间计算结果的缓冲区大小单位是字节。如果不使用可以不设置。
例如,如下语句可以把 libl2norm.so 创建为系统中可用的 UDF
@ -41,6 +48,11 @@ CREATE AGGREGATE FUNCTION function_name AS library_path OUTPUTTYPE output_type [
```sql
CREATE AGGREGATE FUNCTION l2norm AS "/home/taos/udf_example/libl2norm.so" OUTPUTTYPE DOUBLE bufsize 8;
```
例如,使用以下语句可以修改已经定义的 l2norm 函数的缓冲区大小为64。
```sql
CREATE AGGREGATE FUNCTION l2norm AS "/home/taos/udf_example/libl2norm.so" OUTPUTTYPE DOUBLE bufsize 64;
```
关于如何开发自定义函数,请参考 [UDF使用说明](/develop/udf)。
## 管理 UDF

View File

@ -27,13 +27,13 @@ description: "TDengine 3.0 版本的语法变更说明"
| - | :------- | :-------- | :------- |
| 1 | ALTER ACCOUNT | 废除 | 2.x中为企业版功能3.0不再支持。语法暂时保留了执行报“This statement is no longer supported”错误。
| 2 | ALTER ALL DNODES | 新增 | 修改所有DNODE的参数。
| 3 | ALTER DATABASE | 调整 | 废除<ul><li>QUORUM写入需要的副本确认数。3.0 版本默认行为是强一致性,且不支持修改为弱一致性。</li><li>BLOCKSVNODE使用的内存块数。3.0版本使用BUFFER来表示VNODE写入内存池的大小。</li><li>UPDATE更新操作的支持模式。3.0版本所有数据库都支持部分列更新。</li><li>CACHELAST缓存最新一行数据的模式。3.0版本用CACHEMODEL代替。</li><li>COMP3.0版本暂不支持修改。</li><br/>新增<li>CACHEMODEL表示是否在内存中缓存子表的最近数据。</li><li>CACHESIZE表示缓存子表最近数据的内存大小。</li><li>WAL_FSYNC_PERIOD代替原FSYNC参数。</li><li>WAL_LEVEL代替原WAL参数。</li><li>WAL_RETENTION_PERIOD3.0.4.0版本新增wal文件的额外保留策略用于数据订阅。</li><li>WAL_RETENTION_SIZE3.0.4.0版本新增wal文件的额外保留策略用于数据订阅。<br/>调整</li><li>REPLICA3.0.0版本暂不支持修改。</li><li>KEEP3.0版本新增支持带单位的设置方式。</li></ul>
| 3 | ALTER DATABASE | 调整 | <p>废除</p><ul><li>QUORUM写入需要的副本确认数。3.0 版本默认行为是强一致性,且不支持修改为弱一致性。</li><li>BLOCKSVNODE使用的内存块数。3.0版本使用BUFFER来表示VNODE写入内存池的大小。</li><li>UPDATE更新操作的支持模式。3.0版本所有数据库都支持部分列更新。</li><li>CACHELAST缓存最新一行数据的模式。3.0版本用CACHEMODEL代替。</li><li>COMP3.0版本暂不支持修改。</li></ul><p>新增</p><ul><li>CACHEMODEL表示是否在内存中缓存子表的最近数据。</li><li>CACHESIZE表示缓存子表最近数据的内存大小。</li><li>WAL_FSYNC_PERIOD代替原FSYNC参数。</li><li>WAL_LEVEL代替原WAL参数。</li><li>WAL_RETENTION_PERIOD3.0.4.0版本新增wal文件的额外保留策略用于数据订阅。</li><li>WAL_RETENTION_SIZE3.0.4.0版本新增wal文件的额外保留策略用于数据订阅。</li></ul><p>调整</p><ul><li>KEEP3.0版本新增支持带单位的设置方式。</li></ul>
| 4 | ALTER STABLE | 调整 | 废除<ul><li>CHANGE TAG修改标签列的名称。3.0版本使用RENAME TAG代替。<br/>新增</li><li>RENAME TAG代替原CHANGE TAG子句。</li><li>COMMENT修改超级表的注释。</li></ul>
| 5 | ALTER TABLE | 调整 | 废除<ul><li>CHANGE TAG修改标签列的名称。3.0版本使用RENAME TAG代替。<br/>新增</li><li>RENAME TAG代替原CHANGE TAG子句。</li><li>COMMENT修改表的注释。</li><li>TTL修改表的生命周期。</li></ul>
| 6 | ALTER USER | 调整 | 废除<ul><li>PRIVILEGE修改用户权限。3.0版本使用GRANT和REVOKE来授予和回收权限。<br/>新增</li><li>ENABLE启用或停用此用户。</li><li>SYSINFO修改用户是否可查看系统信息。</li></ul>
| 7 | COMPACT VNODES | 暂不支持 | 整理指定VNODE的数据。3.0.0版本暂不支持。
| 8 | CREATE ACCOUNT | 废除 | 2.x中为企业版功能3.0不再支持。语法暂时保留了执行报“This statement is no longer supported”错误。
| 9 | CREATE DATABASE | 调整 | 废除<ul><li>BLOCKSVNODE使用的内存块数。3.0版本使用BUFFER来表示VNODE写入内存池的大小。</li><li>CACHEVNODE使用的内存块的大小。3.0版本使用BUFFER来表示VNODE写入内存池的大小。</li><li>CACHELAST缓存最新一行数据的模式。3.0版本用CACHEMODEL代替。</li><li>DAYS数据文件存储数据的时间跨度。3.0版本使用DURATION代替。</li><li>FSYNC当 WAL 设置为 2 时,执行 fsync 的周期。3.0版本使用WAL_FSYNC_PERIOD代替。</li><li>QUORUM写入需要的副本确认数。3.0版本使用STRICT来指定强一致还是弱一致。</li><li>UPDATE更新操作的支持模式。3.0版本所有数据库都支持部分列更新。</li><li>WALWAL 级别。3.0版本使用WAL_LEVEL代替。<br/>新增</li><li>BUFFER一个 VNODE 写入内存池大小。</li><li>CACHEMODEL表示是否在内存中缓存子表的最近数据。</li><li>CACHESIZE表示缓存子表最近数据的内存大小。</li><li>DURATION代替原DAYS参数。新增支持带单位的设置方式。</li><li>PAGES一个 VNODE 中元数据存储引擎的缓存页个数。</li><li>PAGESIZE一个 VNODE 中元数据存储引擎的页大小。</li><li>RETENTIONS表示数据的聚合周期和保存时长。</li><li>STRICT表示数据同步的一致性要求。</li><li>SINGLE_STABLE表示此数据库中是否只可以创建一个超级表。</li><li>VGROUPS数据库中初始VGROUP的数目。</li><li>WAL_FSYNC_PERIOD代替原FSYNC参数。</li><li>WAL_LEVEL代替原WAL参数。</li><li>WAL_RETENTION_PERIODwal文件的额外保留策略用于数据订阅。</li><li>WAL_RETENTION_SIZEwal文件的额外保留策略用于数据订阅。</li><li>WAL_ROLL_PERIODwal文件切换时长。</li><li>WAL_SEGMENT_SIZEwal单个文件大小。<br/>调整</li><li>KEEP3.0版本新增支持带单位的设置方式。</li></ul>
| 9 | CREATE DATABASE | 调整 | <p>废除</p><ul><li>BLOCKSVNODE使用的内存块数。3.0版本使用BUFFER来表示VNODE写入内存池的大小。</li><li>CACHEVNODE使用的内存块的大小。3.0版本使用BUFFER来表示VNODE写入内存池的大小。</li><li>CACHELAST缓存最新一行数据的模式。3.0版本用CACHEMODEL代替。</li><li>DAYS数据文件存储数据的时间跨度。3.0版本使用DURATION代替。</li><li>FSYNC当 WAL 设置为 2 时,执行 fsync 的周期。3.0版本使用WAL_FSYNC_PERIOD代替。</li><li>QUORUM写入需要的副本确认数。3.0版本使用STRICT来指定强一致还是弱一致。</li><li>UPDATE更新操作的支持模式。3.0版本所有数据库都支持部分列更新。</li><li>WALWAL 级别。3.0版本使用WAL_LEVEL代替。</li></ul><p>新增</p><ul><li>BUFFER一个 VNODE 写入内存池大小。</li><li>CACHEMODEL表示是否在内存中缓存子表的最近数据。</li><li>CACHESIZE表示缓存子表最近数据的内存大小。</li><li>DURATION代替原DAYS参数。新增支持带单位的设置方式。</li><li>PAGES一个 VNODE 中元数据存储引擎的缓存页个数。</li><li>PAGESIZE一个 VNODE 中元数据存储引擎的页大小。</li><li>RETENTIONS表示数据的聚合周期和保存时长。</li><li>STRICT表示数据同步的一致性要求。</li><li>SINGLE_STABLE表示此数据库中是否只可以创建一个超级表。</li><li>VGROUPS数据库中初始VGROUP的数目。</li><li>WAL_FSYNC_PERIOD代替原FSYNC参数。</li><li>WAL_LEVEL代替原WAL参数。</li><li>WAL_RETENTION_PERIODwal文件的额外保留策略用于数据订阅。</li><li>WAL_RETENTION_SIZEwal文件的额外保留策略用于数据订阅。</li><li>WAL_ROLL_PERIODwal文件切换时长。</li><li>WAL_SEGMENT_SIZEwal单个文件大小。</li></ul><p>调整</p><ul><li>KEEP3.0版本新增支持带单位的设置方式。</li></ul>
| 10 | CREATE DNODE | 调整 | 新增主机名和端口号分开指定语法<ul><li>CREATE DNODE dnode_host_name PORT port_val</li></ul>
| 11 | CREATE INDEX | 新增 | 创建SMA索引。
| 12 | CREATE MNODE | 新增 | 创建管理节点。

View File

@ -79,6 +79,7 @@ Usage: taosdump [OPTION...] dbname [tbname ...]
-A, --all-databases Dump all databases.
-D, --databases=DATABASES Dump inputted databases. Use comma to separate
databases' name.
-e, --escape-character Use escaped character for database name
-N, --without-property Dump database without its properties.
-s, --schemaonly Only dump tables' schema.
-y, --answer-yes Input yes for prompt. It will skip data file

View File

@ -356,7 +356,7 @@ charset 的有效值是 UTF-8。
| 适用范围 | 仅服务端适用 |
| 含义 | dnode 支持的最大 vnode 数目 |
| 取值范围 | 0-4096 |
| 缺省值 | CPU 核数的 2 倍 |
| 缺省值 | CPU 核数的 2 倍 |
## 性能调优
@ -366,6 +366,7 @@ charset 的有效值是 UTF-8。
| -------- | ---------------------- |
| 适用范围 | 仅服务端适用 |
| 含义 | 设置写入线程的最大数量 |
| 取值范围 | 0-1024 |
| 缺省值 | |
## 日志相关

View File

@ -10,6 +10,10 @@ TDengine 2.x 各版本安装包请访问[这里](https://www.taosdata.com/all-do
import Release from "/components/ReleaseV3";
## 3.0.4.1
<Release type="tdengine" version="3.0.4.1" />
## 3.0.4.0
<Release type="tdengine" version="3.0.4.0" />

View File

@ -10,6 +10,14 @@ taosTools 各版本安装包下载链接如下:
import Release from "/components/ReleaseV3";
## 2.5.0
<Release type="tools" version="2.5.0" />
## 2.5.0
<Release type="tools" version="2.5.0" />
## 2.4.12
<Release type="tools" version="2.4.12" />

View File

@ -24,6 +24,12 @@
extern "C" {
#endif
#define SLOW_LOG_TYPE_QUERY 0x1
#define SLOW_LOG_TYPE_INSERT 0x2
#define SLOW_LOG_TYPE_OTHERS 0x4
#define SLOW_LOG_TYPE_ALL 0xFFFFFFFF
// cluster
extern char tsFirst[];
extern char tsSecond[];
@ -119,6 +125,8 @@ extern int32_t tsRedirectMaxPeriod;
extern int32_t tsMaxRetryWaitTime;
extern bool tsUseAdapter;
extern int32_t tsMetaCacheMaxSize;
extern int32_t tsSlowLogThreshold;
extern int32_t tsSlowLogScope;
// client
extern int32_t tsMinSlidingTime;

View File

@ -225,7 +225,6 @@ enum {
TD_DEF_MSG_TYPE(TDMT_VND_COMMIT, "vnode-commit", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_CREATE_INDEX, "vnode-create-index", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_DROP_INDEX, "vnode-drop-index", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_DISABLE_WRITE, "vnode-disable-write", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_MAX_MSG, "vnd-max", NULL, NULL)

View File

@ -99,11 +99,11 @@ typedef struct SSubsidiaryResInfo {
} SSubsidiaryResInfo;
typedef struct SResultDataInfo {
int16_t precision;
int16_t scale;
int16_t type;
int16_t bytes;
int32_t interBufSize;
int16_t precision;
int16_t scale;
int16_t type;
uint16_t bytes;
int32_t interBufSize;
} SResultDataInfo;
#define GET_RES_INFO(ctx) ((ctx)->resultInfo)

View File

@ -185,6 +185,7 @@ typedef struct SMergeLogicNode {
int32_t numOfChannels;
int32_t srcGroupId;
bool groupSort;
bool ignoreGroupId;
} SMergeLogicNode;
typedef enum EWindowType {
@ -444,6 +445,7 @@ typedef struct SMergePhysiNode {
int32_t numOfChannels;
int32_t srcGroupId;
bool groupSort;
bool ignoreGroupId;
} SMergePhysiNode;
typedef struct SWinodwPhysiNode {

View File

@ -114,7 +114,7 @@ STableDataCxt* smlInitTableDataCtx(SQuery* query, STableMeta* pTableMeta);
int32_t smlBindData(SQuery* handle, bool dataFormat, SArray* tags, SArray* colsSchema, SArray* cols,
STableMeta* pTableMeta, char* tableName, const char* sTableName, int32_t sTableNameLen, int32_t ttl,
char* msgBuf, int16_t msgBufLen);
char* msgBuf, int32_t msgBufLen);
int32_t smlBuildOutput(SQuery* handle, SHashObj* pVgHash);
int rawBlockBindData(SQuery *query, STableMeta* pTableMeta, void* data, SVCreateTbReq* pCreateTb, TAOS_FIELD *fields, int numFields, bool needChangeLength);

View File

@ -103,7 +103,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_CHECKSUM_ERROR TAOS_DEF_ERROR_CODE(0, 0x011F) // internal
#define TSDB_CODE_COMPRESS_ERROR TAOS_DEF_ERROR_CODE(0, 0x0120)
#define TSDB_CODE_MSG_NOT_PROCESSED TAOS_DEF_ERROR_CODE(0, 0x0121) //
#define TSDB_CODE_MSG_NOT_PROCESSED TAOS_DEF_ERROR_CODE(0, 0x0121)
#define TSDB_CODE_CFG_NOT_FOUND TAOS_DEF_ERROR_CODE(0, 0x0122)
#define TSDB_CODE_REPEAT_INIT TAOS_DEF_ERROR_CODE(0, 0x0123)
#define TSDB_CODE_DUP_KEY TAOS_DEF_ERROR_CODE(0, 0x0124)
@ -118,9 +118,10 @@ int32_t* taosGetErrno();
#define TSDB_CODE_MSG_ENCODE_ERROR TAOS_DEF_ERROR_CODE(0, 0x012D)
#define TSDB_CODE_NO_ENOUGH_DISKSPACE TAOS_DEF_ERROR_CODE(0, 0x012E)
#define TSDB_CODE_APP_IS_STARTING TAOS_DEF_ERROR_CODE(0, 0x0130) //
#define TSDB_CODE_APP_IS_STOPPING TAOS_DEF_ERROR_CODE(0, 0x0131) //
#define TSDB_CODE_IVLD_DATA_FMT TAOS_DEF_ERROR_CODE(0, 0x0132) //
#define TSDB_CODE_APP_IS_STARTING TAOS_DEF_ERROR_CODE(0, 0x0130)
#define TSDB_CODE_APP_IS_STOPPING TAOS_DEF_ERROR_CODE(0, 0x0131)
#define TSDB_CODE_INVALID_DATA_FMT TAOS_DEF_ERROR_CODE(0, 0x0132)
#define TSDB_CODE_INVALID_CFG_VALUE TAOS_DEF_ERROR_CODE(0, 0x0133)
//client
#define TSDB_CODE_TSC_INVALID_OPERATION TAOS_DEF_ERROR_CODE(0, 0x0200)
@ -538,20 +539,20 @@ int32_t* taosGetErrno();
// #define TSDB_CODE_SYN_INVALID_CHECKSUM TAOS_DEF_ERROR_CODE(0, 0x0908) // 2.x
// #define TSDB_CODE_SYN_INVALID_MSGLEN TAOS_DEF_ERROR_CODE(0, 0x0909) // 2.x
// #define TSDB_CODE_SYN_INVALID_MSGTYPE TAOS_DEF_ERROR_CODE(0, 0x090A) // 2.x
#define TSDB_CODE_SYN_IS_LEADER TAOS_DEF_ERROR_CODE(0, 0x090B)
// #define TSDB_CODE_SYN_IS_LEADER TAOS_DEF_ERROR_CODE(0, 0x090B) // unused
#define TSDB_CODE_SYN_NOT_LEADER TAOS_DEF_ERROR_CODE(0, 0x090C)
#define TSDB_CODE_SYN_ONE_REPLICA TAOS_DEF_ERROR_CODE(0, 0x090D)
#define TSDB_CODE_SYN_NOT_IN_NEW_CONFIG TAOS_DEF_ERROR_CODE(0, 0x090E)
#define TSDB_CODE_SYN_NEW_CONFIG_ERROR TAOS_DEF_ERROR_CODE(0, 0x090F) // internal
#define TSDB_CODE_SYN_RECONFIG_NOT_READY TAOS_DEF_ERROR_CODE(0, 0x0910)
// #define TSDB_CODE_SYN_ONE_REPLICA TAOS_DEF_ERROR_CODE(0, 0x090D) // unused
// #define TSDB_CODE_SYN_NOT_IN_NEW_CONFIG TAOS_DEF_ERROR_CODE(0, 0x090E) // unused
#define TSDB_CODE_SYN_NEW_CONFIG_ERROR TAOS_DEF_ERROR_CODE(0, 0x090F) // internal
// #define TSDB_CODE_SYN_RECONFIG_NOT_READY TAOS_DEF_ERROR_CODE(0, 0x0910) // unused
#define TSDB_CODE_SYN_PROPOSE_NOT_READY TAOS_DEF_ERROR_CODE(0, 0x0911)
#define TSDB_CODE_SYN_STANDBY_NOT_READY TAOS_DEF_ERROR_CODE(0, 0x0912)
#define TSDB_CODE_SYN_BATCH_ERROR TAOS_DEF_ERROR_CODE(0, 0x0913)
// #define TSDB_CODE_SYN_STANDBY_NOT_READY TAOS_DEF_ERROR_CODE(0, 0x0912) // unused
// #define TSDB_CODE_SYN_BATCH_ERROR TAOS_DEF_ERROR_CODE(0, 0x0913) // unused
#define TSDB_CODE_SYN_RESTORING TAOS_DEF_ERROR_CODE(0, 0x0914)
#define TSDB_CODE_SYN_INVALID_SNAPSHOT_MSG TAOS_DEF_ERROR_CODE(0, 0x0915) // internal
#define TSDB_CODE_SYN_INVALID_SNAPSHOT_MSG TAOS_DEF_ERROR_CODE(0, 0x0915) // internal
#define TSDB_CODE_SYN_BUFFER_FULL TAOS_DEF_ERROR_CODE(0, 0x0916)
#define TSDB_CODE_SYN_WRITE_STALL TAOS_DEF_ERROR_CODE(0, 0x0917)
#define TSDB_CODE_SYN_NEGO_WIN_EXCEEDED TAOS_DEF_ERROR_CODE(0, 0X0918)
#define TSDB_CODE_SYN_NEGOTIATION_WIN_FULL TAOS_DEF_ERROR_CODE(0, 0x0918)
#define TSDB_CODE_SYN_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x09FF)
// tq
@ -572,7 +573,7 @@ int32_t* taosGetErrno();
// wal
// #define TSDB_CODE_WAL_APP_ERROR TAOS_DEF_ERROR_CODE(0, 0x1000) // 2.x
#define TSDB_CODE_WAL_FILE_CORRUPTED TAOS_DEF_ERROR_CODE(0, 0x1001)
#define TSDB_CODE_WAL_SIZE_LIMIT TAOS_DEF_ERROR_CODE(0, 0x1002)
// #define TSDB_CODE_WAL_SIZE_LIMIT TAOS_DEF_ERROR_CODE(0, 0x1002) // unused
#define TSDB_CODE_WAL_INVALID_VER TAOS_DEF_ERROR_CODE(0, 0x1003)
// #define TSDB_CODE_WAL_OUT_OF_MEMORY TAOS_DEF_ERROR_CODE(0, 0x1004) // 2.x
#define TSDB_CODE_WAL_LOG_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x1005)

View File

@ -232,13 +232,7 @@ typedef enum ELogicConditionType {
#define TSDB_QUERY_ID_LEN 26
#define TSDB_TRANS_OPER_LEN 16
/**
* In some scenarios uint16_t (0~65535) is used to store the row len.
* - Firstly, we use 65531(65535 - 4), as the SDataRow/SKVRow contains 4 bits header.
* - Secondly, if all cols are VarDataT type except primary key, we need 4 bits to store the offset, thus
* the final value is 65531-(4096-1)*4 = 49151.
*/
#define TSDB_MAX_BYTES_PER_ROW 49151
#define TSDB_MAX_BYTES_PER_ROW 65531 // 49151:65531
#define TSDB_MAX_TAGS_LEN 16384
#define TSDB_MAX_TAGS 128
@ -410,9 +404,9 @@ typedef enum ELogicConditionType {
#define TSDB_EXPLAIN_RESULT_ROW_SIZE (16 * 1024)
#define TSDB_EXPLAIN_RESULT_COLUMN_NAME "QUERY_PLAN"
#define TSDB_MAX_FIELD_LEN 16384
#define TSDB_MAX_BINARY_LEN (TSDB_MAX_FIELD_LEN - TSDB_KEYSIZE) // keep 16384
#define TSDB_MAX_NCHAR_LEN (TSDB_MAX_FIELD_LEN - TSDB_KEYSIZE) // keep 16384
#define TSDB_MAX_FIELD_LEN 65519 // 16384:65519
#define TSDB_MAX_BINARY_LEN TSDB_MAX_FIELD_LEN // 16384-8:65519
#define TSDB_MAX_NCHAR_LEN TSDB_MAX_FIELD_LEN // 16384-8:65519
#define PRIMARYKEY_TIMESTAMP_COL_ID 1
#define COL_REACH_END(colId, maxColId) ((colId) > (maxColId))

View File

@ -83,6 +83,12 @@ void taosPrintLongString(const char *flags, ELogLevel level, int32_t dflag, cons
#endif
;
void taosPrintSlowLog(const char *format, ...)
#ifdef __GNUC__
__attribute__((format(printf, 1, 2)))
#endif
;
bool taosAssertDebug(bool condition, const char *file, int32_t line, const char *format, ...);
bool taosAssertRelease(bool condition);

View File

@ -341,7 +341,7 @@ if [ "$verMode" == "cluster" ] || [ "$verMode" == "cloud" ]; then
tmp_pwd=`pwd`
cd ${install_dir}/connector
if [ ! -d taos-connector-jdbc ];then
git clone -b main --depth=1 https://github.com/taosdata/taos-connector-jdbc.git ||:
git clone -b 3.1.0 --depth=1 https://github.com/taosdata/taos-connector-jdbc.git ||:
fi
cd taos-connector-jdbc
mvn clean package -Dmaven.test.skip=true

View File

@ -361,7 +361,7 @@ void stopAllRequests(SHashObj* pRequests);
// conn level
int hbRegisterConn(SAppHbMgr* pAppHbMgr, int64_t tscRefId, int64_t clusterId, int8_t connType);
void hbDeregisterConn(SAppHbMgr* pAppHbMgr, SClientHbKey connKey, void* param);
void hbDeregisterConn(STscObj* pTscObj, SClientHbKey connKey);
typedef struct SSqlCallbackWrapper {
SParseContext* pParseCtx;

View File

@ -42,7 +42,7 @@ SAppInfo appInfo;
int64_t lastClusterId = 0;
int32_t clientReqRefPool = -1;
int32_t clientConnRefPool = -1;
int32_t clientStop = 0;
int32_t clientStop = -1;
int32_t timestampDeltaLimit = 900; // s
@ -69,7 +69,6 @@ static int32_t registerRequest(SRequestObj *pRequest, STscObj *pTscObj) {
}
static void deregisterRequest(SRequestObj *pRequest) {
const static int64_t SLOW_QUERY_INTERVAL = 3000000L; // todo configurable
if (pRequest == NULL) {
tscError("pRequest == NULL");
return;
@ -80,6 +79,7 @@ static void deregisterRequest(SRequestObj *pRequest) {
int32_t currentInst = atomic_sub_fetch_64((int64_t *)&pActivity->currentRequests, 1);
int32_t num = atomic_sub_fetch_32(&pTscObj->numOfReqs, 1);
int32_t reqType = SLOW_LOG_TYPE_OTHERS;
int64_t duration = taosGetTimestampUs() - pRequest->metric.start;
tscDebug("0x%" PRIx64 " free Request from connObj: 0x%" PRIx64 ", reqId:0x%" PRIx64
@ -95,6 +95,7 @@ static void deregisterRequest(SRequestObj *pRequest) {
duration, pRequest->metric.parseCostUs, pRequest->metric.ctgCostUs, pRequest->metric.analyseCostUs,
pRequest->metric.planCostUs, pRequest->metric.execCostUs);
atomic_add_fetch_64((int64_t *)&pActivity->insertElapsedTime, duration);
reqType = SLOW_LOG_TYPE_INSERT;
} else if (QUERY_NODE_SELECT_STMT == pRequest->stmtType) {
tscDebug("query duration %" PRId64 "us: parseCost:%" PRId64 "us, ctgCost:%" PRId64 "us, analyseCost:%" PRId64
"us, planCost:%" PRId64 "us, exec:%" PRId64 "us",
@ -102,12 +103,16 @@ static void deregisterRequest(SRequestObj *pRequest) {
pRequest->metric.planCostUs, pRequest->metric.execCostUs);
atomic_add_fetch_64((int64_t *)&pActivity->queryElapsedTime, duration);
reqType = SLOW_LOG_TYPE_QUERY;
}
}
if (duration >= SLOW_QUERY_INTERVAL) {
if (duration >= (tsSlowLogThreshold * 1000000UL)) {
atomic_add_fetch_64((int64_t *)&pActivity->numOfSlowQueries, 1);
tscWarnL("slow query: %s, duration:%" PRId64, pRequest->sqlstr, duration);
if (tsSlowLogScope & reqType) {
taosPrintSlowLog("PID:%d, Conn:%u, QID:0x%" PRIx64 ", Start:%" PRId64 ", Duration:%" PRId64 "us, SQL:%s",
taosGetPId(), pTscObj->connId, pRequest->requestId, pRequest->metric.start, duration, pRequest->sqlstr);
}
}
releaseTscObj(pTscObj->id);
@ -239,7 +244,7 @@ void destroyTscObj(void *pObj) {
tscTrace("begin to destroy tscObj %" PRIx64 " p:%p", tscId, pTscObj);
SClientHbKey connKey = {.tscRid = pTscObj->id, .connType = pTscObj->connType};
hbDeregisterConn(pTscObj->pAppInfo->pAppHbMgr, connKey, pTscObj->passInfo.fp);
hbDeregisterConn(pTscObj, connKey);
destroyAllRequests(pTscObj->pRequests);
taosHashCleanup(pTscObj->pRequests);
@ -427,8 +432,12 @@ static void *tscCrashReportThreadFp(void *param) {
}
#endif
if (-1 != atomic_val_compare_exchange_32(&clientStop, -1, 0)) {
return NULL;
}
while (1) {
if (clientStop) break;
if (clientStop > 0) break;
if (loopTimes++ < reportPeriodNum) {
taosMsleep(sleepTime);
continue;
@ -466,7 +475,7 @@ static void *tscCrashReportThreadFp(void *param) {
loopTimes = 0;
}
clientStop = -1;
clientStop = -2;
return NULL;
}

View File

@ -994,6 +994,7 @@ SAppHbMgr *appHbMgrInit(SAppInstInfo *pAppInstInfo, char *key) {
// init stat
pAppHbMgr->startTime = taosGetTimestampMs();
pAppHbMgr->connKeyCnt = 0;
pAppHbMgr->passKeyCnt = 0;
pAppHbMgr->reportCnt = 0;
pAppHbMgr->reportBytes = 0;
pAppHbMgr->key = taosStrdup(key);
@ -1154,7 +1155,8 @@ int hbRegisterConn(SAppHbMgr *pAppHbMgr, int64_t tscRefId, int64_t clusterId, in
}
}
void hbDeregisterConn(SAppHbMgr *pAppHbMgr, SClientHbKey connKey, void *param) {
void hbDeregisterConn(STscObj *pTscObj, SClientHbKey connKey) {
SAppHbMgr *pAppHbMgr = pTscObj->pAppInfo->pAppHbMgr;
SClientHbReq *pReq = taosHashAcquire(pAppHbMgr->activeInfo, &connKey, sizeof(SClientHbKey));
if (pReq) {
tFreeClientHbReq(pReq);
@ -1167,7 +1169,10 @@ void hbDeregisterConn(SAppHbMgr *pAppHbMgr, SClientHbKey connKey, void *param) {
}
atomic_sub_fetch_32(&pAppHbMgr->connKeyCnt, 1);
if (param) {
taosThreadMutexLock(&pTscObj->mutex);
if (pTscObj->passInfo.fp) {
atomic_sub_fetch_32(&pAppHbMgr->passKeyCnt, 1);
}
}
taosThreadMutexUnlock(&pTscObj->mutex);
}

View File

@ -1248,6 +1248,11 @@ STscObj* taosConnectImpl(const char* user, const char* auth, const char* db, __t
return NULL;
}
pRequest->sqlstr = taosStrdup("taos_connect");
if (pRequest->sqlstr) {
pRequest->sqlLen = strlen(pRequest->sqlstr);
}
SMsgSendInfo* body = buildConnectMsg(pRequest);
int64_t transporterId = 0;
@ -1257,7 +1262,7 @@ STscObj* taosConnectImpl(const char* user, const char* auth, const char* db, __t
if (pRequest->code != TSDB_CODE_SUCCESS) {
const char* errorMsg =
(pRequest->code == TSDB_CODE_RPC_FQDN_ERROR) ? taos_errstr(pRequest) : tstrerror(pRequest->code);
fprintf(stderr, "failed to connect to server, reason: %s\n\n", errorMsg);
tscError("failed to connect to server, reason: %s", errorMsg);
terrno = pRequest->code;
destroyRequest(pRequest);

View File

@ -134,11 +134,15 @@ int taos_set_notify_cb(TAOS *taos, __taos_notify_fn_t fp, void *param, int type)
switch (type) {
case TAOS_NOTIFY_PASSVER: {
taosThreadMutexLock(&pObj->mutex);
if (fp && !pObj->passInfo.fp) {
atomic_add_fetch_32(&pObj->pAppInfo->pAppHbMgr->passKeyCnt, 1);
} else if (!fp && pObj->passInfo.fp) {
atomic_sub_fetch_32(&pObj->pAppInfo->pAppHbMgr->passKeyCnt, 1);
}
pObj->passInfo.fp = fp;
pObj->passInfo.param = param;
if (fp) {
atomic_add_fetch_32(&pObj->pAppInfo->pAppHbMgr->passKeyCnt, 1);
}
taosThreadMutexUnlock(&pObj->mutex);
break;
}
default: {
@ -1338,6 +1342,8 @@ int taos_load_table_info(TAOS *taos, const char *tableNameList) {
goto _return;
}
pRequest->syncQuery = true;
STscObj *pTscObj = pRequest->pTscObj;
code = transferTableNameList(tableNameList, pTscObj->acctId, pTscObj->db, &catalogReq.pTableMeta);
if (code) {
@ -1364,7 +1370,7 @@ int taos_load_table_info(TAOS *taos, const char *tableNameList) {
tsem_wait(&pParam->sem);
_return:
taosArrayDestroy(catalogReq.pTableMeta);
taosArrayDestroyEx(catalogReq.pTableMeta, destoryTablesReq);
destroyRequest(pRequest);
return code;
}

View File

@ -558,15 +558,21 @@ static int32_t smlGenerateSchemaAction(SSchema *colField, SHashObj *colHash, SSm
return 0;
}
#define BOUNDARY 1024
static int32_t smlFindNearestPowerOf2(int32_t length, uint8_t type) {
int32_t result = 1;
while (result <= length) {
result *= 2;
if (length >= BOUNDARY){
result = length;
}else{
while (result <= length) {
result <<= 1;
}
}
if (type == TSDB_DATA_TYPE_BINARY && result > TSDB_MAX_BINARY_LEN - VARSTR_HEADER_SIZE) {
result = TSDB_MAX_BINARY_LEN - VARSTR_HEADER_SIZE;
} else if (type == TSDB_DATA_TYPE_NCHAR && result > (TSDB_MAX_BINARY_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE) {
result = (TSDB_MAX_BINARY_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
} else if (type == TSDB_DATA_TYPE_NCHAR && result > (TSDB_MAX_NCHAR_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE) {
result = (TSDB_MAX_NCHAR_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
}
if (type == TSDB_DATA_TYPE_NCHAR) {
@ -649,6 +655,17 @@ static int32_t smlBuildFieldsList(SSmlHandle *info, SSchema *schemaField, SHashO
field->bytes = getBytes(kv->type, kv->length);
}
}
int32_t maxLen = isTag ? TSDB_MAX_TAGS_LEN : TSDB_MAX_BYTES_PER_ROW;
int32_t len = 0;
for (int j = 0; j < taosArrayGetSize(results); ++j) {
SField *field = taosArrayGet(results, j);
len += field->bytes;
}
if (len > maxLen) {
return isTag ? TSDB_CODE_PAR_INVALID_TAGS_LENGTH : TSDB_CODE_PAR_INVALID_ROW_LENGTH;
}
return TSDB_CODE_SUCCESS;
}
@ -660,6 +677,7 @@ static int32_t smlSendMetaMsg(SSmlHandle *info, SName *pName, SArray *pColumns,
SMCreateStbReq pReq = {0};
int32_t code = TSDB_CODE_SUCCESS;
SCmdMsgInfo pCmdMsg = {0};
char *pSql = NULL;
// put front for free
pReq.numOfColumns = taosArrayGetSize(pColumns);
@ -667,7 +685,27 @@ static int32_t smlSendMetaMsg(SSmlHandle *info, SName *pName, SArray *pColumns,
pReq.numOfTags = taosArrayGetSize(pTags);
pReq.pTags = pTags;
code = buildRequest(info->taos->id, "", 0, NULL, false, &pRequest, 0);
if (action == SCHEMA_ACTION_CREATE_STABLE) {
pReq.colVer = 1;
pReq.tagVer = 1;
pReq.suid = 0;
pReq.source = TD_REQ_FROM_APP;
pSql = "sml_create_stable";
} else if (action == SCHEMA_ACTION_ADD_TAG || action == SCHEMA_ACTION_CHANGE_TAG_SIZE) {
pReq.colVer = pTableMeta->sversion;
pReq.tagVer = pTableMeta->tversion + 1;
pReq.suid = pTableMeta->uid;
pReq.source = TD_REQ_FROM_TAOX;
pSql = (action == SCHEMA_ACTION_ADD_TAG) ? "sml_add_tag" : "sml_modify_tag_size";
} else if (action == SCHEMA_ACTION_ADD_COLUMN || action == SCHEMA_ACTION_CHANGE_COLUMN_SIZE) {
pReq.colVer = pTableMeta->sversion + 1;
pReq.tagVer = pTableMeta->tversion;
pReq.suid = pTableMeta->uid;
pReq.source = TD_REQ_FROM_TAOX;
pSql = (action == SCHEMA_ACTION_ADD_COLUMN) ? "sml_add_column" : "sml_modify_column_size";
}
code = buildRequest(info->taos->id, pSql, strlen(pSql), NULL, false, &pRequest, 0);
if (code != TSDB_CODE_SUCCESS) {
goto end;
}
@ -678,23 +716,6 @@ static int32_t smlSendMetaMsg(SSmlHandle *info, SName *pName, SArray *pColumns,
goto end;
}
if (action == SCHEMA_ACTION_CREATE_STABLE) {
pReq.colVer = 1;
pReq.tagVer = 1;
pReq.suid = 0;
pReq.source = TD_REQ_FROM_APP;
} else if (action == SCHEMA_ACTION_ADD_TAG || action == SCHEMA_ACTION_CHANGE_TAG_SIZE) {
pReq.colVer = pTableMeta->sversion;
pReq.tagVer = pTableMeta->tversion + 1;
pReq.suid = pTableMeta->uid;
pReq.source = TD_REQ_FROM_TAOX;
} else if (action == SCHEMA_ACTION_ADD_COLUMN || action == SCHEMA_ACTION_CHANGE_COLUMN_SIZE) {
pReq.colVer = pTableMeta->sversion + 1;
pReq.tagVer = pTableMeta->tversion;
pReq.suid = pTableMeta->uid;
pReq.source = TD_REQ_FROM_TAOX;
}
if (pReq.numOfTags == 0) {
pReq.numOfTags = 1;
SField field = {0};
@ -777,11 +798,15 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) {
code = smlBuildFieldsList(info, NULL, NULL, sTableData->tags, pTags, 0, true);
if (code != TSDB_CODE_SUCCESS) {
uError("SML:0x%" PRIx64 " smlBuildFieldsList tag1 failed. %s", info->id, pName.tname);
taosArrayDestroy(pColumns);
taosArrayDestroy(pTags);
goto end;
}
code = smlBuildFieldsList(info, NULL, NULL, sTableData->cols, pColumns, 0, false);
if (code != TSDB_CODE_SUCCESS) {
uError("SML:0x%" PRIx64 " smlBuildFieldsList col1 failed. %s", info->id, pName.tname);
taosArrayDestroy(pColumns);
taosArrayDestroy(pTags);
goto end;
}
code = smlSendMetaMsg(info, &pName, pColumns, pTags, NULL, SCHEMA_ACTION_CREATE_STABLE);
@ -833,6 +858,23 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) {
pTableMeta->tableInfo.numOfColumns, true);
if (code != TSDB_CODE_SUCCESS) {
uError("SML:0x%" PRIx64 " smlBuildFieldsList tag2 failed. %s", info->id, pName.tname);
taosArrayDestroy(pColumns);
taosArrayDestroy(pTags);
goto end;
}
if (taosArrayGetSize(pTags) + pTableMeta->tableInfo.numOfColumns > TSDB_MAX_COLUMNS) {
uError("SML:0x%" PRIx64 " too many columns than 4096", info->id);
code = TSDB_CODE_PAR_TOO_MANY_COLUMNS;
taosArrayDestroy(pColumns);
taosArrayDestroy(pTags);
goto end;
}
if (taosArrayGetSize(pTags) > TSDB_MAX_TAGS) {
uError("SML:0x%" PRIx64 " too many tags than 128", info->id);
code = TSDB_CODE_PAR_INVALID_TAGS_NUM;
taosArrayDestroy(pColumns);
taosArrayDestroy(pTags);
goto end;
}
@ -887,6 +929,16 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) {
pTableMeta->tableInfo.numOfColumns, false);
if (code != TSDB_CODE_SUCCESS) {
uError("SML:0x%" PRIx64 " smlBuildFieldsList col2 failed. %s", info->id, pName.tname);
taosArrayDestroy(pColumns);
taosArrayDestroy(pTags);
goto end;
}
if (taosArrayGetSize(pColumns) + pTableMeta->tableInfo.numOfTags > TSDB_MAX_COLUMNS) {
uError("SML:0x%" PRIx64 " too many columns than 4096", info->id);
code = TSDB_CODE_PAR_TOO_MANY_COLUMNS;
taosArrayDestroy(pColumns);
taosArrayDestroy(pTags);
goto end;
}
@ -1167,6 +1219,7 @@ static int32_t smlPushCols(SArray *colsArray, SArray *cols) {
SSmlKv *kv = (SSmlKv *)taosArrayGet(cols, i);
taosHashPut(kvHash, kv->key, kv->keyLen, &kv, POINTER_BYTES);
if (terrno == TSDB_CODE_DUP_KEY) {
taosHashCleanup(kvHash);
return terrno;
}
}
@ -1240,12 +1293,12 @@ static int32_t smlParseLineBottom(SSmlHandle *info) {
uDebug("SML:0x%" PRIx64 " smlParseLineBottom add meta, format:%d, linenum:%d", info->id, info->dataFormat,
info->lineNum);
SSmlSTableMeta *meta = smlBuildSTableMeta(info->dataFormat);
taosHashPut(info->superTables, elements->measure, elements->measureLen, &meta, POINTER_BYTES);
smlInsertMeta(meta->tagHash, meta->tags, tinfo->tags);
if (terrno == TSDB_CODE_DUP_KEY) {
return terrno;
}
smlInsertMeta(meta->colHash, meta->cols, elements->colArray);
taosHashPut(info->superTables, elements->measure, elements->measureLen, &meta, POINTER_BYTES);
}
}
uDebug("SML:0x%" PRIx64 " smlParseLineBottom end, format:%d, linenum:%d", info->id, info->dataFormat, info->lineNum);
@ -1494,8 +1547,11 @@ static int smlProcess(SSmlHandle *info, char *lines[], char *rawLine, char *rawL
do {
code = smlModifyDBSchemas(info);
if (code == 0) break;
taosMsleep(500);
if (code == 0 || code == TSDB_CODE_SML_INVALID_DATA || code == TSDB_CODE_PAR_TOO_MANY_COLUMNS ||
code == TSDB_CODE_PAR_INVALID_TAGS_NUM || code == TSDB_CODE_PAR_INVALID_ROW_LENGTH ||
code == TSDB_CODE_PAR_INVALID_TAGS_LENGTH)
break;
taosMsleep(100);
uInfo("SML:0x%" PRIx64 " smlModifyDBSchemas retry code:%s, times:%d", info->id, tstrerror(code), retryNum);
} while (retryNum++ < taosHashGetSize(info->superTables) * MAX_RETRY_TIMES);
@ -1514,6 +1570,44 @@ static int smlProcess(SSmlHandle *info, char *lines[], char *rawLine, char *rawL
return code;
}
void smlSetReqSQL(SRequestObj *request, char *lines[], char *rawLine, char *rawLineEnd) {
if (tsSlowLogScope & SLOW_LOG_TYPE_INSERT) {
int32_t len = 0;
int32_t rlen = 0;
char* p = NULL;
if (lines && lines[0]) {
len = strlen(lines[0]);
p = lines[0];
} else if (rawLine) {
if (rawLineEnd) {
len = rawLineEnd - rawLine;
} else {
len = strlen(rawLine);
}
p = rawLine;
}
if (NULL == p) {
return;
}
rlen = TMIN(len, TSDB_MAX_ALLOWED_SQL_LEN);
rlen = TMAX(rlen, 0);
char *sql = taosMemoryMalloc(rlen + 1);
if (NULL == sql) {
uError("malloc %d for sml sql failed", rlen + 1);
return;
}
memcpy(sql, p, rlen);
sql[rlen] = 0;
request->sqlstr = sql;
request->sqlLen = rlen;
}
}
TAOS_RES *taos_schemaless_insert_inner(TAOS *taos, char *lines[], char *rawLine, char *rawLineEnd, int numLines,
int protocol, int precision, int32_t ttl, int64_t reqid) {
int32_t code = TSDB_CODE_SUCCESS;
@ -1546,6 +1640,8 @@ TAOS_RES *taos_schemaless_insert_inner(TAOS *taos, char *lines[], char *rawLine,
info->msgBuf.len = ERROR_MSG_BUF_DEFAULT_SIZE;
info->lineNum = numLines;
smlSetReqSQL(request, lines, rawLine, rawLineEnd);
SSmlMsgBuf msg = {ERROR_MSG_BUF_DEFAULT_SIZE, request->msgBuf};
if (request->pDb == NULL) {
request->code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;

View File

@ -575,7 +575,7 @@ static int32_t smlConvertJSONString(SSmlKv *pVal, char *typeStr, cJSON *value) {
uError("OTD:invalid type(%s) for JSON String", typeStr);
return TSDB_CODE_TSC_INVALID_JSON_TYPE;
}
pVal->length = (int16_t)strlen(value->valuestring);
pVal->length = strlen(value->valuestring);
if (pVal->type == TSDB_DATA_TYPE_BINARY && pVal->length > TSDB_MAX_BINARY_LEN - VARSTR_HEADER_SIZE) {
return TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN;

View File

@ -236,7 +236,7 @@ static int32_t smlParseTagKv(SSmlHandle *info, char **sql, char *sqlEnd, SSmlLin
PROCESS_SLASH(value, valueLen)
}
if (unlikely(valueLen > (TSDB_MAX_NCHAR_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE)) {
if (unlikely(valueLen > (TSDB_MAX_TAGS_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE)) {
return TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN;
}

View File

@ -158,7 +158,7 @@ static int32_t smlParseTelnetTags(SSmlHandle *info, char *data, char *sqlEnd, SS
return TSDB_CODE_TSC_INVALID_VALUE;
}
if (unlikely(valueLen > (TSDB_MAX_NCHAR_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE)) {
if (unlikely(valueLen > (TSDB_MAX_TAGS_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE)) {
return TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN;
}

View File

@ -500,7 +500,7 @@ int32_t tRowGet(SRow *pRow, STSchema *pTSchema, int32_t iCol, SColVal *pColVal)
break;
default:
ASSERTS(0, "invalid row format");
return TSDB_CODE_IVLD_DATA_FMT;
return TSDB_CODE_INVALID_DATA_FMT;
}
if (bv == BIT_FLG_NONE) {
@ -755,7 +755,7 @@ SColVal *tRowIterNext(SRowIter *pIter) {
}
if (pIter->pRow->flag == HAS_NULL) {
pIter->cv = COL_VAL_NULL(pTColumn->type, pTColumn->colId);
pIter->cv = COL_VAL_NULL(pTColumn->colId, pTColumn->type);
goto _exit;
}
@ -938,7 +938,7 @@ static int32_t tRowTupleUpsertColData(SRow *pRow, STSchema *pTSchema, SColData *
break;
default:
ASSERTS(0, "Invalid row flag");
return TSDB_CODE_IVLD_DATA_FMT;
return TSDB_CODE_INVALID_DATA_FMT;
}
while (pColData) {
@ -963,7 +963,7 @@ static int32_t tRowTupleUpsertColData(SRow *pRow, STSchema *pTSchema, SColData *
break;
default:
ASSERTS(0, "Invalid row flag");
return TSDB_CODE_IVLD_DATA_FMT;
return TSDB_CODE_INVALID_DATA_FMT;
}
if (bv == BIT_FLG_NONE) {
@ -1054,7 +1054,7 @@ static int32_t tRowKVUpsertColData(SRow *pRow, STSchema *pTSchema, SColData *aCo
pData = pv + ((uint32_t *)pKVIdx->idx)[iCol];
} else {
ASSERTS(0, "Invalid KV row format");
return TSDB_CODE_IVLD_DATA_FMT;
return TSDB_CODE_INVALID_DATA_FMT;
}
int16_t cid;
@ -2439,7 +2439,7 @@ _exit:
int32_t tColDataAddValueByDataBlock(SColData *pColData, int8_t type, int32_t bytes, int32_t nRows, char *lengthOrbitmap,
char *data) {
int32_t code = 0;
if(data == NULL){
if (data == NULL) {
for (int32_t i = 0; i < nRows; ++i) {
code = tColDataAppendValueImpl[pColData->flag][CV_FLAG_NONE](pColData, NULL, 0);
}
@ -2453,8 +2453,9 @@ int32_t tColDataAddValueByDataBlock(SColData *pColData, int8_t type, int32_t byt
code = tColDataAppendValueImpl[pColData->flag][CV_FLAG_NULL](pColData, NULL, 0);
if (code) goto _exit;
} else {
if(ASSERT(varDataTLen(data + offset) <= bytes)){
uError("var data length invalid, varDataTLen(data + offset):%d <= bytes:%d", (int)varDataTLen(data + offset), bytes);
if (ASSERT(varDataTLen(data + offset) <= bytes)) {
uError("var data length invalid, varDataTLen(data + offset):%d <= bytes:%d", (int)varDataTLen(data + offset),
bytes);
code = TSDB_CODE_INVALID_PARA;
goto _exit;
}

View File

@ -118,7 +118,8 @@ int32_t tsRedirectMaxPeriod = 1000;
int32_t tsMaxRetryWaitTime = 10000;
bool tsUseAdapter = false;
int32_t tsMetaCacheMaxSize = -1; // MB
int32_t tsSlowLogThreshold = 3; // seconds
int32_t tsSlowLogScope = SLOW_LOG_TYPE_ALL;
/*
* denote if the server needs to compress response message at the application layer to client, including query rsp,
@ -347,6 +348,8 @@ static int32_t taosAddClientCfg(SConfig *pCfg) {
if (cfgAddBool(pCfg, "crashReporting", tsEnableCrashReport, true) != 0) return -1;
if (cfgAddInt64(pCfg, "queryMaxConcurrentTables", tsQueryMaxConcurrentTables, INT64_MIN, INT64_MAX, 1) != 0) return -1;
if (cfgAddInt32(pCfg, "metaCacheMaxSize", tsMetaCacheMaxSize, -1, INT32_MAX, 1) != 0) return -1;
if (cfgAddInt32(pCfg, "slowLogThreshold", tsSlowLogThreshold, 0, INT32_MAX, true) != 0) return -1;
if (cfgAddString(pCfg, "slowLogScope", "", true) != 0) return -1;
tsNumOfRpcThreads = tsNumOfCores / 2;
tsNumOfRpcThreads = TRANGE(tsNumOfRpcThreads, 2, TSDB_MAX_RPC_THREADS);
@ -694,6 +697,42 @@ static void taosSetServerLogCfg(SConfig *pCfg) {
metaDebugFlag = cfgGetItem(pCfg, "metaDebugFlag")->i32;
}
static int32_t taosSetSlowLogScope(char *pScope) {
if (NULL == pScope || 0 == strlen(pScope)) {
tsSlowLogScope = SLOW_LOG_TYPE_ALL;
return 0;
}
if (0 == strcasecmp(pScope, "all")) {
tsSlowLogScope = SLOW_LOG_TYPE_ALL;
return 0;
}
if (0 == strcasecmp(pScope, "query")) {
tsSlowLogScope = SLOW_LOG_TYPE_QUERY;
return 0;
}
if (0 == strcasecmp(pScope, "insert")) {
tsSlowLogScope = SLOW_LOG_TYPE_INSERT;
return 0;
}
if (0 == strcasecmp(pScope, "others")) {
tsSlowLogScope = SLOW_LOG_TYPE_OTHERS;
return 0;
}
if (0 == strcasecmp(pScope, "none")) {
tsSlowLogScope = 0;
return 0;
}
uError("Invalid slowLog scope value:%s", pScope);
terrno = TSDB_CODE_INVALID_CFG_VALUE;
return -1;
}
static int32_t taosSetClientCfg(SConfig *pCfg) {
tstrncpy(tsLocalFqdn, cfgGetItem(pCfg, "fqdn")->str, TSDB_FQDN_LEN);
tsServerPort = (uint16_t)cfgGetItem(pCfg, "serverPort")->i32;
@ -745,6 +784,10 @@ static int32_t taosSetClientCfg(SConfig *pCfg) {
tsEnableCrashReport = cfgGetItem(pCfg, "crashReporting")->bval;
tsQueryMaxConcurrentTables = cfgGetItem(pCfg, "queryMaxConcurrentTables")->i64;
tsMetaCacheMaxSize = cfgGetItem(pCfg, "metaCacheMaxSize")->i32;
tsSlowLogThreshold = cfgGetItem(pCfg, "slowLogThreshold")->i32;
if (taosSetSlowLogScope(cfgGetItem(pCfg, "slowLogScope")->str)) {
return -1;
}
tsMaxRetryWaitTime = cfgGetItem(pCfg, "maxRetryWaitTime")->i32;
@ -1165,6 +1208,12 @@ int32_t taosApplyLocalCfg(SConfig *pCfg, char *name) {
sDebugFlag = cfgGetItem(pCfg, "sDebugFlag")->i32;
} else if (strcasecmp("smaDebugFlag", name) == 0) {
smaDebugFlag = cfgGetItem(pCfg, "smaDebugFlag")->i32;
} else if (strcasecmp("slowLogThreshold", name) == 0) {
tsSlowLogThreshold = cfgGetItem(pCfg, "slowLogThreshold")->i32;
} else if (strcasecmp("slowLogScope", name) == 0) {
if (taosSetSlowLogScope(cfgGetItem(pCfg, "slowLogScope")->str)) {
return -1;
}
}
break;
}

View File

@ -60,7 +60,7 @@ static void vmProcessMgmtQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
if (IsReq(pMsg)) {
if (code != 0) {
if (terrno != 0) code = terrno;
dGError("msg:%p, failed to process since %s, type:%s", pMsg, terrstr(code), TMSG_INFO(pMsg->msgType));
dGError("msg:%p, failed to process since %s, type:%s", pMsg, tstrerror(code), TMSG_INFO(pMsg->msgType));
}
vmSendRsp(pMsg, code);
}

View File

@ -118,7 +118,7 @@ typedef enum {
} ETrnPolicy;
typedef enum {
TRN_EXEC_PRARLLEL = 0,
TRN_EXEC_PARALLEL = 0,
TRN_EXEC_SERIAL = 1,
} ETrnExec;
@ -177,6 +177,7 @@ typedef struct {
SArray* pRpcArray;
SRWLatch lockRpcArray;
int64_t mTraceId;
TdThreadMutex mutex;
} STrans;
typedef struct {

View File

@ -76,6 +76,7 @@ void mndTransSetRpcRsp(STrans *pTrans, void *pCont, int32_t contLen);
void mndTransSetCb(STrans *pTrans, ETrnFunc startFunc, ETrnFunc stopFunc, void *param, int32_t paramLen);
void mndTransSetDbName(STrans *pTrans, const char *dbname, const char *stbname);
void mndTransSetSerial(STrans *pTrans);
void mndTransSetParallel(STrans *pTrans);
void mndTransSetOper(STrans *pTrans, EOperType oper);
int32_t mndTrancCheckConflict(SMnode *pMnode, STrans *pTrans);

View File

@ -50,6 +50,8 @@ void *mndBuildCreateVnodeReq(SMnode *, SDnodeObj *pDnode, SDbObj *pDb, SVgObj *p
void *mndBuildDropVnodeReq(SMnode *, SDnodeObj *pDnode, SDbObj *pDb, SVgObj *pVgroup, int32_t *pContLen);
bool mndVgroupInDb(SVgObj *pVgroup, int64_t dbUid);
int32_t mndSplitVgroup(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SVgObj *pVgroup);
#ifdef __cplusplus
}
#endif

View File

@ -556,7 +556,7 @@ RETRIEVE_FUNC_OVER:
return code;
}
static void *mnodeGenTypeStr(char *buf, int32_t buflen, uint8_t type, int16_t len) {
static void *mnodeGenTypeStr(char *buf, int32_t buflen, uint8_t type, int32_t len) {
char *msg = "unknown";
if (type >= sizeof(tDataTypes) / sizeof(tDataTypes[0])) {
return msg;

View File

@ -546,6 +546,7 @@ static void mndTransDropData(STrans *pTrans) {
pTrans->param = NULL;
pTrans->paramLen = 0;
}
(void)taosThreadMutexDestroy(&pTrans->mutex);
}
static int32_t mndTransActionDelete(SSdb *pSdb, STrans *pTrans, bool callFunc) {
@ -643,7 +644,7 @@ STrans *mndTransCreate(SMnode *pMnode, ETrnPolicy policy, ETrnConflct conflict,
pTrans->stage = TRN_STAGE_PREPARE;
pTrans->policy = policy;
pTrans->conflict = conflict;
pTrans->exec = TRN_EXEC_PRARLLEL;
pTrans->exec = TRN_EXEC_PARALLEL;
pTrans->createdTime = taosGetTimestampMs();
pTrans->redoActions = taosArrayInit(TRANS_ARRAY_SIZE, sizeof(STransAction));
pTrans->undoActions = taosArrayInit(TRANS_ARRAY_SIZE, sizeof(STransAction));
@ -651,6 +652,7 @@ STrans *mndTransCreate(SMnode *pMnode, ETrnPolicy policy, ETrnConflct conflict,
pTrans->pRpcArray = taosArrayInit(1, sizeof(SRpcHandleInfo));
pTrans->mTraceId = pReq ? TRACE_GET_ROOTID(&pReq->info.traceId) : 0;
taosInitRWLatch(&pTrans->lockRpcArray);
taosThreadMutexInit(&pTrans->mutex, NULL);
if (pTrans->redoActions == NULL || pTrans->undoActions == NULL || pTrans->commitActions == NULL ||
pTrans->pRpcArray == NULL) {
@ -793,6 +795,8 @@ void mndTransSetDbName(STrans *pTrans, const char *dbname, const char *stbname)
void mndTransSetSerial(STrans *pTrans) { pTrans->exec = TRN_EXEC_SERIAL; }
void mndTransSetParallel(STrans *pTrans) { pTrans->exec = TRN_EXEC_PARALLEL; }
void mndTransSetOper(STrans *pTrans, EOperType oper) { pTrans->oper = oper; }
static int32_t mndTransSync(SMnode *pMnode, STrans *pTrans) {
@ -1307,7 +1311,13 @@ static int32_t mndTransExecuteRedoActionsSerial(SMnode *pMnode, STrans *pTrans)
int32_t code = 0;
int32_t numOfActions = taosArrayGetSize(pTrans->redoActions);
if (numOfActions == 0) return code;
if (pTrans->redoActionPos >= numOfActions) return code;
taosThreadMutexLock(&pTrans->mutex);
if (pTrans->redoActionPos >= numOfActions) {
taosThreadMutexUnlock(&pTrans->mutex);
return code;
}
mInfo("trans:%d, execute %d actions serial, current redoAction:%d", pTrans->id, numOfActions, pTrans->redoActionPos);
@ -1377,6 +1387,8 @@ static int32_t mndTransExecuteRedoActionsSerial(SMnode *pMnode, STrans *pTrans)
}
}
taosThreadMutexUnlock(&pTrans->mutex);
return code;
}

View File

@ -1917,7 +1917,7 @@ int32_t mndAddVgroupBalanceToTrans(SMnode *pMnode, SVgObj *pVgroup, STrans *pTra
int32_t vgid = pVgroup->vgId;
int8_t replica = pVgroup->replica;
if(pVgroup->replica <= 1) {
if(pVgroup->replica <= 1) {
mInfo("trans:%d, vgid:%d no need to balance, replica:%d", pTrans->id, vgid, replica);
return -1;
}
@ -1951,6 +1951,19 @@ int32_t mndAddVgroupBalanceToTrans(SMnode *pMnode, SVgObj *pVgroup, STrans *pTra
return -1;
}
SDbObj *pDb = mndAcquireDb(pMnode, pVgroup->dbName);
if (pDb == NULL) {
mError("trans:%d, vgid:%d failed to be balanced to dnode:%d, because db not exist", pTrans->id, vgid, dnodeId);
return -1;
}
if (mndAddAlterVnodeConfirmAction(pMnode, pTrans, pDb, pVgroup) != 0) {
mError("trans:%d, vgid:%d failed to be balanced to dnode:%d", pTrans->id, vgid, dnodeId);
return -1;
}
mndReleaseDb(pMnode, pDb);
SSdbRaw *pRaw = mndVgroupActionEncode(pVgroup);
if (pRaw == NULL) {
mError("trans:%d, vgid:%d failed to encode action to dnode:%d", pTrans->id, vgid, dnodeId);
@ -1965,7 +1978,8 @@ int32_t mndAddVgroupBalanceToTrans(SMnode *pMnode, SVgObj *pVgroup, STrans *pTra
}
else
{
mInfo("trans:%d, vgid:%d cant be balanced to dnode:%d, exist:%d, online:%d", pTrans->id, vgid, dnodeId, exist, online);
mInfo("trans:%d, vgid:%d cant be balanced to dnode:%d, exist:%d, online:%d",
pTrans->id, vgid, dnodeId, exist, online);
}
return 0;
@ -2084,6 +2098,8 @@ int32_t mndBuildAlterVgroupAction(SMnode *pMnode, STrans *pTrans, SDbObj *pOldDb
return -1;
}
mndSortVnodeGid(&newVgroup);
{
SSdbRaw *pVgRaw = mndVgroupActionEncode(&newVgroup);
if (pVgRaw == NULL) return -1;
@ -2101,7 +2117,19 @@ static int32_t mndAddAdjustVnodeHashRangeAction(SMnode *pMnode, STrans *pTrans,
return 0;
}
static int32_t mndSplitVgroup(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SVgObj *pVgroup) {
static int32_t mndTransCommitVgStatus(STrans *pTrans, SVgObj *pVg, ESdbStatus vgStatus) {
SSdbRaw *pRaw = mndVgroupActionEncode(pVg);
if (pRaw == NULL) goto _err;
if (mndTransAppendCommitlog(pTrans, pRaw) != 0) goto _err;
(void)sdbSetRawStatus(pRaw, vgStatus);
pRaw = NULL;
return 0;
_err:
sdbFreeRaw(pRaw);
return -1;
}
int32_t mndSplitVgroup(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SVgObj *pVgroup) {
int32_t code = -1;
STrans *pTrans = NULL;
SSdbRaw *pRaw = NULL;
@ -2162,6 +2190,7 @@ static int32_t mndSplitVgroup(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SVgObj
mInfo("vgId:%d, vnode:%d dnode:%d", newVg2.vgId, i, newVg2.vnodeGid[i].dnodeId);
}
// alter vgId and hash range
int32_t maxVgId = sdbGetMaxId(pMnode->pSdb, SDB_VGROUP);
if (mndAddAlterVnodeHashRangeAction(pMnode, pTrans, &newVg1, maxVgId) != 0) goto _OVER;
newVg1.vgId = maxVgId;
@ -2170,31 +2199,24 @@ static int32_t mndSplitVgroup(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SVgObj
if (mndAddAlterVnodeHashRangeAction(pMnode, pTrans, &newVg2, maxVgId) != 0) goto _OVER;
newVg2.vgId = maxVgId;
if (mndAddAlterVnodeConfirmAction(pMnode, pTrans, pDb, &newVg1) != 0) goto _OVER;
if (mndAddAlterVnodeConfirmAction(pMnode, pTrans, pDb, &newVg2) != 0) goto _OVER;
// adjust vgroup replica
if (pDb->cfg.replications != newVg1.replica) {
if (mndBuildAlterVgroupAction(pMnode, pTrans, pDb, pDb, &newVg1, pArray) != 0) goto _OVER;
} else {
if (mndTransCommitVgStatus(pTrans, &newVg1, SDB_STATUS_READY) < 0) goto _OVER;
}
if (pDb->cfg.replications != newVg2.replica) {
if (mndBuildAlterVgroupAction(pMnode, pTrans, pDb, pDb, &newVg2, pArray) != 0) goto _OVER;
} else {
if (mndTransCommitVgStatus(pTrans, &newVg2, SDB_STATUS_READY) < 0) goto _OVER;
}
pRaw = mndVgroupActionEncode(&newVg1);
if (pRaw == NULL) goto _OVER;
if (mndTransAppendCommitlog(pTrans, pRaw) != 0) goto _OVER;
(void)sdbSetRawStatus(pRaw, SDB_STATUS_READY);
pRaw = NULL;
pRaw = mndVgroupActionEncode(&newVg2);
if (pRaw == NULL) goto _OVER;
if (mndTransAppendCommitlog(pTrans, pRaw) != 0) goto _OVER;
(void)sdbSetRawStatus(pRaw, SDB_STATUS_READY);
pRaw = NULL;
pRaw = mndVgroupActionEncode(pVgroup);
if (pRaw == NULL) goto _OVER;
if (mndTransAppendCommitlog(pTrans, pRaw) != 0) goto _OVER;
(void)sdbSetRawStatus(pRaw, SDB_STATUS_DROPPED);
pRaw = NULL;
if (mndTransCommitVgStatus(pTrans, pVgroup, SDB_STATUS_DROPPED) < 0) goto _OVER;
memcpy(&dbObj, pDb, sizeof(SDbObj));
if (dbObj.cfg.pRetensions != NULL) {
@ -2221,37 +2243,13 @@ _OVER:
return code;
}
static int32_t mndProcessSplitVgroupMsg(SRpcMsg *pReq) {
SMnode *pMnode = pReq->info.node;
int32_t code = -1;
SVgObj *pVgroup = NULL;
SDbObj *pDb = NULL;
extern int32_t mndProcessSplitVgroupMsgImp(SRpcMsg *pReq);
SSplitVgroupReq req = {0};
if (tDeserializeSSplitVgroupReq(pReq->pCont, pReq->contLen, &req) != 0) {
terrno = TSDB_CODE_INVALID_MSG;
goto _OVER;
}
static int32_t mndProcessSplitVgroupMsg(SRpcMsg *pReq) { return mndProcessSplitVgroupMsgImp(pReq); }
mInfo("vgId:%d, start to split", req.vgId);
if (mndCheckOperPrivilege(pMnode, pReq->info.conn.user, MND_OPER_SPLIT_VGROUP) != 0) {
goto _OVER;
}
pVgroup = mndAcquireVgroup(pMnode, req.vgId);
if (pVgroup == NULL) goto _OVER;
pDb = mndAcquireDb(pMnode, pVgroup->dbName);
if (pDb == NULL) goto _OVER;
code = mndSplitVgroup(pMnode, pReq, pDb, pVgroup);
if (code == 0) code = TSDB_CODE_ACTION_IN_PROGRESS;
_OVER:
mndReleaseVgroup(pMnode, pVgroup);
mndReleaseDb(pMnode, pDb);
return code;
}
#ifndef TD_ENTERPRISE
int32_t mndProcessSplitVgroupMsgImp(SRpcMsg *pReq) { return 0; }
#endif
static int32_t mndSetBalanceVgroupInfoToTrans(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup,
SDnodeObj *pSrc, SDnodeObj *pDst) {

View File

@ -84,6 +84,7 @@ target_include_directories(
PUBLIC "inc"
PUBLIC "src/inc"
PUBLIC "${TD_SOURCE_DIR}/include/libs/scalar"
PUBLIC "${TD_SOURCE_DIR}/contrib/rocksdb/include"
)
target_link_libraries(
vnode
@ -100,6 +101,7 @@ target_link_libraries(
# PUBLIC bdb
# PUBLIC scalar
PUBLIC rocksdb
PUBLIC transport
PUBLIC stream
PUBLIC index

View File

@ -182,7 +182,7 @@ int32_t tsdbSetTableList(STsdbReader *pReader, const void *pTableList, int32_t n
int32_t tsdbReaderOpen(SVnode *pVnode, SQueryTableDataCond *pCond, void *pTableList, int32_t numOfTables,
SSDataBlock *pResBlock, STsdbReader **ppReader, const char *idstr, bool countOnly);
void tsdbReaderSetId(STsdbReader* pReader, const char* idstr);
void tsdbReaderSetId(STsdbReader *pReader, const char *idstr);
void tsdbReaderClose(STsdbReader *pReader);
int32_t tsdbNextDataBlock(STsdbReader *pReader, bool *hasNext);
int32_t tsdbRetrieveDatablockSMA(STsdbReader *pReader, SSDataBlock *pDataBlock, bool *allHave);
@ -196,8 +196,9 @@ void *tsdbGetIvtIdx(SMeta *pMeta);
uint64_t getReaderMaxVersion(STsdbReader *pReader);
int32_t tsdbCacherowsReaderOpen(void *pVnode, int32_t type, void *pTableIdList, int32_t numOfTables, int32_t numOfCols,
uint64_t suid, void **pReader, const char *idstr);
int32_t tsdbRetrieveCacheRows(void *pReader, SSDataBlock *pResBlock, const int32_t *slotIds, SArray *pTableUids);
SArray *pCidList, int32_t *pSlotIds, uint64_t suid, void **pReader, const char *idstr);
int32_t tsdbRetrieveCacheRows(void *pReader, SSDataBlock *pResBlock, const int32_t *slotIds, const int32_t *dstSlotIds,
SArray *pTableUids);
void *tsdbCacherowsReaderClose(void *pReader);
int32_t tsdbGetTableSchema(SVnode *pVnode, int64_t uid, STSchema **pSchema, int64_t *suid);
@ -343,6 +344,7 @@ struct SVnodeCfg {
SVnodeStats vndStats;
uint32_t hashBegin;
uint32_t hashEnd;
bool hashChange;
int16_t sttTrigger;
int16_t hashPrefix;
int16_t hashSuffix;

View File

@ -343,6 +343,16 @@ struct STsdbFS {
SArray *aDFileSet; // SArray<SDFileSet>
};
typedef struct {
rocksdb_t *db;
rocksdb_options_t *options;
rocksdb_flushoptions_t *flushoptions;
rocksdb_writeoptions_t *writeoptions;
rocksdb_readoptions_t *readoptions;
rocksdb_writebatch_t *writebatch;
TdThreadMutex rMutex;
} SRocksCache;
struct STsdb {
char *path;
SVnode *pVnode;
@ -355,6 +365,7 @@ struct STsdb {
TdThreadMutex lruMutex;
SLRUCache *biCache;
TdThreadMutex biMutex;
SRocksCache rCache;
};
struct TSDBKEY {
@ -777,6 +788,8 @@ typedef struct SCacheRowsReader {
uint64_t suid;
char **transferBuf; // todo remove it soon
int32_t numOfCols;
SArray *pCidList;
int32_t *pSlotIds;
int32_t type;
int32_t tableIndex; // currently returned result tables
STableKeyInfo *pTableList; // table id list
@ -796,6 +809,10 @@ typedef struct {
int32_t tsdbOpenCache(STsdb *pTsdb);
void tsdbCloseCache(STsdb *pTsdb);
int32_t tsdbCacheUpdate(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSDBROW *row);
int32_t tsdbCacheGet(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArray, SCacheRowsReader *pr, int32_t ltype);
int32_t tsdbCacheDel(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSKEY sKey, TSKEY eKey);
int32_t tsdbCacheInsertLast(SLRUCache *pCache, tb_uid_t uid, TSDBROW *row, STsdb *pTsdb);
int32_t tsdbCacheInsertLastrow(SLRUCache *pCache, STsdb *pTsdb, tb_uid_t uid, TSDBROW *row, bool dup);
int32_t tsdbCacheGetLastH(SLRUCache *pCache, tb_uid_t uid, SCacheRowsReader *pr, LRUHandle **h);

View File

@ -19,6 +19,7 @@
#include "executor.h"
#include "filter.h"
#include "qworker.h"
#include "rocksdb/c.h"
#include "sync.h"
#include "tRealloc.h"
#include "tchecksum.h"
@ -177,6 +178,7 @@ int tsdbClose(STsdb** pTsdb);
int32_t tsdbBegin(STsdb* pTsdb);
int32_t tsdbPrepareCommit(STsdb* pTsdb);
int32_t tsdbCommit(STsdb* pTsdb, SCommitInfo* pInfo);
int32_t tsdbCacheCommit(STsdb* pTsdb);
int32_t tsdbCompact(STsdb* pTsdb, SCompactInfo* pInfo);
int32_t tsdbFinishCommit(STsdb* pTsdb);
int32_t tsdbRollbackCommit(STsdb* pTsdb);
@ -193,9 +195,9 @@ STQ* tqOpen(const char* path, SVnode* pVnode);
void tqClose(STQ*);
int tqPushMsg(STQ*, void* msg, int32_t msgLen, tmsg_t msgType, int64_t ver);
int tqRegisterPushHandle(STQ* pTq, void* pHandle, const SMqPollReq* pRequest, SRpcMsg* pRpcMsg, SMqDataRsp* pDataRsp,
int32_t type);
int32_t type);
int tqUnregisterPushHandle(STQ* pTq, const char* pKey, int32_t keyLen, uint64_t consumerId, bool rspConsumer);
int tqStartStreamTasks(STQ* pTq); // restore all stream tasks after vnode launching completed.
int tqStartStreamTasks(STQ* pTq); // restore all stream tasks after vnode launching completed.
int tqCommit(STQ*);
int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd);

View File

@ -12,7 +12,6 @@
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "tsdb.h"
static int32_t tsdbOpenBICache(STsdb *pTsdb) {
@ -47,6 +46,512 @@ static void tsdbCloseBICache(STsdb *pTsdb) {
}
}
#define ROCKS_KEY_LEN 64
static void tsdbGetRocksPath(STsdb *pTsdb, char *path) {
SVnode *pVnode = pTsdb->pVnode;
if (pVnode->pTfs) {
if (path) {
snprintf(path, TSDB_FILENAME_LEN, "%s%s%s%scache.rdb", tfsGetPrimaryPath(pTsdb->pVnode->pTfs), TD_DIRSEP,
pTsdb->path, TD_DIRSEP);
}
} else {
if (path) {
snprintf(path, TSDB_FILENAME_LEN, "%s%scache.rdb", pTsdb->path, TD_DIRSEP);
}
}
}
static int32_t tsdbOpenRocksCache(STsdb *pTsdb) {
int32_t code = 0;
rocksdb_options_t *options = rocksdb_options_create();
if (NULL == options) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
}
rocksdb_options_set_create_if_missing(options, 1);
// rocksdb_options_set_inplace_update_support(options, 1);
// rocksdb_options_set_allow_concurrent_memtable_write(options, 0);
rocksdb_writeoptions_t *writeoptions = rocksdb_writeoptions_create();
if (NULL == writeoptions) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _err2;
}
// rocksdb_writeoptions_disable_WAL(writeoptions, 1);
rocksdb_readoptions_t *readoptions = rocksdb_readoptions_create();
if (NULL == readoptions) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _err2;
}
char *err = NULL;
char cachePath[TSDB_FILENAME_LEN] = {0};
tsdbGetRocksPath(pTsdb, cachePath);
rocksdb_t *db = rocksdb_open(options, cachePath, &err);
if (NULL == db) {
code = -1;
goto _err3;
}
rocksdb_flushoptions_t *flushoptions = rocksdb_flushoptions_create();
if (NULL == flushoptions) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _err4;
}
rocksdb_writebatch_t *writebatch = rocksdb_writebatch_create();
pTsdb->rCache.writebatch = writebatch;
pTsdb->rCache.options = options;
pTsdb->rCache.writeoptions = writeoptions;
pTsdb->rCache.readoptions = readoptions;
pTsdb->rCache.flushoptions = flushoptions;
pTsdb->rCache.db = db;
taosThreadMutexInit(&pTsdb->rCache.rMutex, NULL);
return code;
_err4:
rocksdb_readoptions_destroy(readoptions);
_err3:
rocksdb_writeoptions_destroy(writeoptions);
_err2:
rocksdb_options_destroy(options);
_err:
return code;
}
static void tsdbCloseRocksCache(STsdb *pTsdb) {
rocksdb_close(pTsdb->rCache.db);
rocksdb_flushoptions_destroy(pTsdb->rCache.flushoptions);
rocksdb_writebatch_destroy(pTsdb->rCache.writebatch);
rocksdb_readoptions_destroy(pTsdb->rCache.readoptions);
rocksdb_writeoptions_destroy(pTsdb->rCache.writeoptions);
rocksdb_options_destroy(pTsdb->rCache.options);
taosThreadMutexDestroy(&pTsdb->rCache.rMutex);
}
int32_t tsdbCacheCommit(STsdb *pTsdb) {
int32_t code = 0;
char *err = NULL;
rocksdb_flush(pTsdb->rCache.db, pTsdb->rCache.flushoptions, &err);
if (NULL != err) {
tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, err);
rocksdb_free(err);
code = -1;
}
return code;
}
SLastCol *tsdbCacheDeserialize(char const *value) {
if (!value) {
return NULL;
}
SLastCol *pLastCol = (SLastCol *)value;
SColVal *pColVal = &pLastCol->colVal;
if (IS_VAR_DATA_TYPE(pColVal->type)) {
if (pColVal->value.nData > 0) {
pColVal->value.pData = (char *)value + sizeof(*pLastCol);
} else {
pColVal->value.pData = NULL;
}
}
return pLastCol;
}
void tsdbCacheSerialize(SLastCol *pLastCol, char **value, size_t *size) {
SColVal *pColVal = &pLastCol->colVal;
size_t length = sizeof(*pLastCol);
if (IS_VAR_DATA_TYPE(pColVal->type)) {
length += pColVal->value.nData;
}
*value = taosMemoryMalloc(length);
*(SLastCol *)(*value) = *pLastCol;
if (IS_VAR_DATA_TYPE(pColVal->type)) {
uint8_t *pVal = pColVal->value.pData;
SColVal *pDColVal = &((SLastCol *)(*value))->colVal;
pDColVal->value.pData = *value + sizeof(*pLastCol);
if (pColVal->value.nData > 0) {
memcpy(pDColVal->value.pData, pVal, pColVal->value.nData);
} else {
pDColVal->value.pData = NULL;
}
}
*size = length;
}
static SLastCol *tsdbCacheLookup(STsdb *pTsdb, tb_uid_t uid, int16_t cid, char const *lstring) {
SLastCol *pLastCol = NULL;
char *err = NULL;
size_t vlen = 0;
char key[ROCKS_KEY_LEN];
size_t klen = snprintf(key, ROCKS_KEY_LEN, "%" PRIi64 ":%" PRIi16 ":%s", uid, cid, lstring);
char *value = NULL;
value = rocksdb_get(pTsdb->rCache.db, pTsdb->rCache.readoptions, key, klen, &vlen, &err);
if (NULL != err) {
tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, err);
rocksdb_free(err);
}
pLastCol = tsdbCacheDeserialize(value);
return pLastCol;
}
int32_t tsdbCacheUpdate(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSDBROW *pRow) {
int32_t code = 0;
// 1, fetch schema
STSchema *pTSchema = NULL;
int32_t sver = TSDBROW_SVERSION(pRow);
code = metaGetTbTSchemaEx(pTsdb->pVnode->pMeta, suid, uid, sver, &pTSchema);
if (code != TSDB_CODE_SUCCESS) {
terrno = code;
return -1;
}
// 2, iterate col values into array
SArray *aColVal = taosArrayInit(32, sizeof(SColVal));
STSDBRowIter iter = {0};
tsdbRowIterOpen(&iter, pRow, pTSchema);
for (SColVal *pColVal = tsdbRowIterNext(&iter); pColVal; pColVal = tsdbRowIterNext(&iter)) {
/*
if (IS_VAR_DATA_TYPE(pColVal->type)) {
uint8_t *pVal = pColVal->value.pData;
pColVal->value.pData = NULL;
code = tRealloc(&pColVal->value.pData, pColVal->value.nData);
if (code) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _exit;
}
if (pColVal->value.nData) {
memcpy(pColVal->value.pData, pVal, pColVal->value.nData);
}
}
*/
taosArrayPush(aColVal, pColVal);
}
tsdbRowClose(&iter);
// 3, build keys & multi get from rocks
int num_keys = TARRAY_SIZE(aColVal);
char **keys_list = taosMemoryCalloc(num_keys * 2, sizeof(char *));
size_t *keys_list_sizes = taosMemoryCalloc(num_keys * 2, sizeof(size_t));
for (int i = 0; i < num_keys; ++i) {
SColVal *pColVal = (SColVal *)taosArrayGet(aColVal, i);
int16_t cid = pColVal->cid;
char *keys = taosMemoryCalloc(2, ROCKS_KEY_LEN);
int last_key_len = snprintf(keys, ROCKS_KEY_LEN, "%" PRIi64 ":%" PRIi16 ":last", uid, cid);
if (last_key_len >= ROCKS_KEY_LEN) {
tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, tstrerror(code));
}
int lr_key_len = snprintf(keys + ROCKS_KEY_LEN, ROCKS_KEY_LEN, "%" PRIi64 ":%" PRIi16 ":last_row", uid, cid);
if (lr_key_len >= ROCKS_KEY_LEN) {
tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, tstrerror(code));
}
keys_list[i] = keys;
keys_list[num_keys + i] = keys + ROCKS_KEY_LEN;
keys_list_sizes[i] = last_key_len;
keys_list_sizes[num_keys + i] = lr_key_len;
}
char **values_list = taosMemoryCalloc(num_keys * 2, sizeof(char *));
size_t *values_list_sizes = taosMemoryCalloc(num_keys * 2, sizeof(size_t));
char **errs = taosMemoryCalloc(num_keys * 2, sizeof(char *));
taosThreadMutexLock(&pTsdb->rCache.rMutex);
rocksdb_multi_get(pTsdb->rCache.db, pTsdb->rCache.readoptions, num_keys * 2, (const char *const *)keys_list,
keys_list_sizes, values_list, values_list_sizes, errs);
for (int i = 0; i < num_keys; ++i) {
taosMemoryFree(keys_list[i]);
}
for (int i = 0; i < num_keys * 2; ++i) {
rocksdb_free(errs[i]);
}
taosMemoryFree(keys_list);
taosMemoryFree(keys_list_sizes);
taosMemoryFree(errs);
TSKEY keyTs = TSDBROW_TS(pRow);
rocksdb_writebatch_t *wb = pTsdb->rCache.writebatch;
for (int i = 0; i < num_keys; ++i) {
SColVal *pColVal = (SColVal *)taosArrayGet(aColVal, i);
if (COL_VAL_IS_VALUE(pColVal)) {
SLastCol *pLastCol = tsdbCacheDeserialize(values_list[i]);
if (NULL == pLastCol || pLastCol->ts <= keyTs) {
char *value = NULL;
size_t vlen = 0;
tsdbCacheSerialize(&(SLastCol){.ts = keyTs, .colVal = *pColVal}, &value, &vlen);
char key[ROCKS_KEY_LEN];
size_t klen = snprintf(key, ROCKS_KEY_LEN, "%" PRIi64 ":%" PRIi16 ":last", uid, pColVal->cid);
rocksdb_writebatch_put(wb, key, klen, value, vlen);
taosMemoryFree(value);
}
}
if (!COL_VAL_IS_NONE(pColVal)) {
SLastCol *pLastCol = tsdbCacheDeserialize(values_list[i + num_keys]);
if (NULL == pLastCol || pLastCol->ts <= keyTs) {
char *value = NULL;
size_t vlen = 0;
tsdbCacheSerialize(&(SLastCol){.ts = keyTs, .colVal = *pColVal}, &value, &vlen);
char key[ROCKS_KEY_LEN];
size_t klen = snprintf(key, ROCKS_KEY_LEN, "%" PRIi64 ":%" PRIi16 ":last_row", uid, pColVal->cid);
rocksdb_writebatch_put(wb, key, klen, value, vlen);
taosMemoryFree(value);
}
}
rocksdb_free(values_list[i]);
rocksdb_free(values_list[i + num_keys]);
}
taosMemoryFree(values_list);
taosMemoryFree(values_list_sizes);
char *err = NULL;
rocksdb_write(pTsdb->rCache.db, pTsdb->rCache.writeoptions, wb, &err);
if (NULL != err) {
tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, err);
rocksdb_free(err);
}
taosThreadMutexUnlock(&pTsdb->rCache.rMutex);
rocksdb_writebatch_clear(wb);
_exit:
taosArrayDestroy(aColVal);
taosMemoryFree(pTSchema);
return code;
}
static int32_t mergeLastCid(tb_uid_t uid, STsdb *pTsdb, SArray **ppLastArray, SCacheRowsReader *pr, int16_t *aCols,
int nCols, int16_t *slotIds);
static int32_t mergeLastRowCid(tb_uid_t uid, STsdb *pTsdb, SArray **ppLastArray, SCacheRowsReader *pr, int16_t *aCols,
int nCols, int16_t *slotIds);
int32_t tsdbCacheGet(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArray, SCacheRowsReader *pr, int32_t ltype) {
static char const *alstring[2] = {"last_row", "last"};
char const *lstring = alstring[ltype];
int32_t code = 0;
SArray *pCidList = pr->pCidList;
int num_keys = TARRAY_SIZE(pCidList);
char **keys_list = taosMemoryCalloc(num_keys, sizeof(char *));
size_t *keys_list_sizes = taosMemoryCalloc(num_keys, sizeof(size_t));
for (int i = 0; i < num_keys; ++i) {
int16_t cid = *(int16_t *)taosArrayGet(pCidList, i);
char *keys = taosMemoryCalloc(2, ROCKS_KEY_LEN);
int last_key_len = snprintf(keys, ROCKS_KEY_LEN, "%" PRIi64 ":%" PRIi16 ":%s", uid, cid, lstring);
if (last_key_len >= ROCKS_KEY_LEN) {
tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, tstrerror(code));
}
keys_list[i] = keys;
keys_list_sizes[i] = last_key_len;
}
char **values_list = taosMemoryCalloc(num_keys, sizeof(char *));
size_t *values_list_sizes = taosMemoryCalloc(num_keys, sizeof(size_t));
char **errs = taosMemoryCalloc(num_keys, sizeof(char *));
rocksdb_multi_get(pTsdb->rCache.db, pTsdb->rCache.readoptions, num_keys, (const char *const *)keys_list,
keys_list_sizes, values_list, values_list_sizes, errs);
for (int i = 0; i < num_keys; ++i) {
taosMemoryFree(keys_list[i]);
rocksdb_free(errs[i]);
}
taosMemoryFree(keys_list);
taosMemoryFree(keys_list_sizes);
taosMemoryFree(errs);
for (int i = 0; i < num_keys; ++i) {
bool freeCol = true;
SArray *pTmpColArray = NULL;
SLastCol *pLastCol = tsdbCacheDeserialize(values_list[i]);
int16_t cid = *(int16_t *)taosArrayGet(pCidList, i);
SLastCol noneCol = {.ts = TSKEY_MIN, .colVal = COL_VAL_NONE(cid, pr->pSchema->columns[pr->pSlotIds[i]].type)};
if (pLastCol) {
SColVal *pColVal = &pLastCol->colVal;
if (IS_VAR_DATA_TYPE(pColVal->type)) {
uint8_t *pVal = pColVal->value.pData;
pColVal->value.pData = taosMemoryMalloc(pColVal->value.nData);
if (pColVal->value.nData) {
memcpy(pColVal->value.pData, pVal, pColVal->value.nData);
}
}
} else {
taosThreadMutexLock(&pTsdb->rCache.rMutex);
pLastCol = tsdbCacheLookup(pTsdb, uid, cid, lstring);
if (!pLastCol) {
// recalc: load from tsdb
int16_t aCols[1] = {cid};
int16_t slotIds[1] = {pr->pSlotIds[i]};
pTmpColArray = NULL;
if (ltype) {
mergeLastCid(uid, pTsdb, &pTmpColArray, pr, aCols, 1, slotIds);
} else {
mergeLastRowCid(uid, pTsdb, &pTmpColArray, pr, aCols, 1, slotIds);
}
if (pTmpColArray && TARRAY_SIZE(pTmpColArray) >= 1) {
pLastCol = taosArrayGet(pTmpColArray, 0);
freeCol = false;
}
// still null, then make up a none col value
if (!pLastCol) {
pLastCol = &noneCol;
freeCol = false;
}
// store result back to rocks cache
rocksdb_writebatch_t *wb = pTsdb->rCache.writebatch;
char *value = NULL;
size_t vlen = 0;
tsdbCacheSerialize(pLastCol, &value, &vlen);
char key[ROCKS_KEY_LEN];
size_t klen = snprintf(key, ROCKS_KEY_LEN, "%" PRIi64 ":%" PRIi16 ":%s", uid, pLastCol->colVal.cid, lstring);
rocksdb_writebatch_put(wb, key, klen, value, vlen);
taosMemoryFree(value);
char *err = NULL;
rocksdb_write(pTsdb->rCache.db, pTsdb->rCache.writeoptions, wb, &err);
if (NULL != err) {
tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, err);
rocksdb_free(err);
}
} else {
SColVal *pColVal = &pLastCol->colVal;
if (IS_VAR_DATA_TYPE(pColVal->type)) {
uint8_t *pVal = pColVal->value.pData;
pColVal->value.pData = taosMemoryMalloc(pColVal->value.nData);
if (pColVal->value.nData) {
memcpy(pColVal->value.pData, pVal, pColVal->value.nData);
}
}
}
taosThreadMutexUnlock(&pTsdb->rCache.rMutex);
}
taosArrayPush(pLastArray, pLastCol);
taosArrayDestroy(pTmpColArray);
if (freeCol) {
taosMemoryFree(pLastCol);
}
}
taosMemoryFree(values_list);
taosMemoryFree(values_list_sizes);
return code;
}
int32_t tsdbCacheDel(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSKEY sKey, TSKEY eKey) {
int32_t code = 0;
// 1, fetch schema
STSchema *pTSchema = NULL;
int32_t sver = -1;
code = metaGetTbTSchemaEx(pTsdb->pVnode->pMeta, suid, uid, sver, &pTSchema);
if (code != TSDB_CODE_SUCCESS) {
terrno = code;
return -1;
}
// 3, build keys & multi get from rocks
int num_keys = pTSchema->numOfCols;
char **keys_list = taosMemoryCalloc(num_keys * 2, sizeof(char *));
size_t *keys_list_sizes = taosMemoryCalloc(num_keys * 2, sizeof(size_t));
for (int i = 0; i < num_keys; ++i) {
int16_t cid = pTSchema->columns[i].colId;
char *keys = taosMemoryCalloc(2, ROCKS_KEY_LEN);
int last_key_len = snprintf(keys, ROCKS_KEY_LEN, "%" PRIi64 ":%" PRIi16 ":last", uid, cid);
if (last_key_len >= ROCKS_KEY_LEN) {
tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, tstrerror(code));
}
int lr_key_len = snprintf(keys + ROCKS_KEY_LEN, ROCKS_KEY_LEN, "%" PRIi64 ":%" PRIi16 ":last_row", uid, cid);
if (lr_key_len >= ROCKS_KEY_LEN) {
tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, tstrerror(code));
}
keys_list[i] = keys;
keys_list[num_keys + i] = keys + ROCKS_KEY_LEN;
keys_list_sizes[i] = last_key_len;
keys_list_sizes[num_keys + i] = lr_key_len;
}
char **values_list = taosMemoryCalloc(num_keys * 2, sizeof(char *));
size_t *values_list_sizes = taosMemoryCalloc(num_keys * 2, sizeof(size_t));
char **errs = taosMemoryCalloc(num_keys * 2, sizeof(char *));
taosThreadMutexLock(&pTsdb->rCache.rMutex);
rocksdb_multi_get(pTsdb->rCache.db, pTsdb->rCache.readoptions, num_keys * 2, (const char *const *)keys_list,
keys_list_sizes, values_list, values_list_sizes, errs);
for (int i = 0; i < num_keys; ++i) {
taosMemoryFree(keys_list[i]);
}
for (int i = 0; i < num_keys * 2; ++i) {
rocksdb_free(errs[i]);
}
taosMemoryFree(keys_list);
taosMemoryFree(keys_list_sizes);
taosMemoryFree(errs);
rocksdb_writebatch_t *wb = pTsdb->rCache.writebatch;
for (int i = 0; i < num_keys; ++i) {
SLastCol *pLastCol = tsdbCacheDeserialize(values_list[i]);
if (NULL != pLastCol && (pLastCol->ts <= eKey && pLastCol->ts >= sKey)) {
char key[ROCKS_KEY_LEN];
size_t klen = snprintf(key, ROCKS_KEY_LEN, "%" PRIi64 ":%" PRIi16 ":last", uid, pLastCol->colVal.cid);
rocksdb_writebatch_delete(wb, key, klen);
}
pLastCol = tsdbCacheDeserialize(values_list[i + num_keys]);
if (NULL != pLastCol && (pLastCol->ts <= eKey && pLastCol->ts >= sKey)) {
char key[ROCKS_KEY_LEN];
size_t klen = snprintf(key, ROCKS_KEY_LEN, "%" PRIi64 ":%" PRIi16 ":last_row", uid, pLastCol->colVal.cid);
rocksdb_writebatch_delete(wb, key, klen);
}
rocksdb_free(values_list[i]);
rocksdb_free(values_list[i + num_keys]);
}
taosMemoryFree(values_list);
taosMemoryFree(values_list_sizes);
char *err = NULL;
rocksdb_write(pTsdb->rCache.db, pTsdb->rCache.writeoptions, wb, &err);
if (NULL != err) {
tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, err);
rocksdb_free(err);
}
taosThreadMutexUnlock(&pTsdb->rCache.rMutex);
rocksdb_writebatch_clear(wb);
_exit:
taosMemoryFree(pTSchema);
return code;
}
int32_t tsdbOpenCache(STsdb *pTsdb) {
int32_t code = 0;
SLRUCache *pCache = NULL;
@ -64,6 +569,12 @@ int32_t tsdbOpenCache(STsdb *pTsdb) {
goto _err;
}
code = tsdbOpenRocksCache(pTsdb);
if (code != TSDB_CODE_SUCCESS) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
}
taosLRUCacheSetStrictCapacity(pCache, false);
taosThreadMutexInit(&pTsdb->lruMutex, NULL);
@ -84,6 +595,7 @@ void tsdbCloseCache(STsdb *pTsdb) {
}
tsdbCloseBICache(pTsdb);
tsdbCloseRocksCache(pTsdb);
}
static void getTableCacheKey(tb_uid_t uid, int cacheType, char *key, int *len) {
@ -170,7 +682,7 @@ int32_t tsdbCacheDeleteLast(SLRUCache *pCache, tb_uid_t uid, TSKEY eKey) {
return code;
}
/*
int32_t tsdbCacheDelete(SLRUCache *pCache, tb_uid_t uid, TSKEY eKey) {
int32_t code = 0;
char key[32] = {0};
@ -225,7 +737,7 @@ int32_t tsdbCacheDelete(SLRUCache *pCache, tb_uid_t uid, TSKEY eKey) {
return code;
}
*/
int32_t tsdbCacheInsertLastrow(SLRUCache *pCache, STsdb *pTsdb, tb_uid_t uid, TSDBROW *row, bool dup) {
int32_t code = 0;
STSRow *cacheRow = NULL;
@ -879,7 +1391,12 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow, bool *pIgnoreEarlie
*pIgnoreEarlierTs = false;
tBlockDataReset(state->pBlockData);
TABLEID tid = {.suid = state->suid, .uid = state->uid};
code = tBlockDataInit(state->pBlockData, &tid, state->pTSchema, aCols, nCols);
int nTmpCols = nCols;
if (aCols[0] == PRIMARYKEY_TIMESTAMP_COL_ID && nCols == 1) {
nTmpCols = 0;
skipBlock = false;
}
code = tBlockDataInit(state->pBlockData, &tid, state->pTSchema, aCols, nTmpCols);
if (code) goto _err;
code = tsdbReadDataBlock(*state->pDataFReader, &block, state->pBlockData);
@ -1423,6 +1940,21 @@ static int32_t initLastColArray(STSchema *pTSchema, SArray **ppColArray) {
return TSDB_CODE_SUCCESS;
}
static int32_t initLastColArrayPartial(STSchema *pTSchema, SArray **ppColArray, int16_t *slotIds, int nCols) {
SArray *pColArray = taosArrayInit(nCols, sizeof(SLastCol));
if (NULL == pColArray) {
return TSDB_CODE_OUT_OF_MEMORY;
}
for (int32_t i = 0; i < nCols; ++i) {
int16_t slotId = slotIds[i];
SLastCol col = {.ts = 0, .colVal = COL_VAL_NULL(pTSchema->columns[slotId].colId, pTSchema->columns[slotId].type)};
taosArrayPush(pColArray, &col);
}
*ppColArray = pColArray;
return TSDB_CODE_SUCCESS;
}
static int32_t cloneTSchema(STSchema *pSrc, STSchema **ppDst) {
int32_t len = sizeof(STSchema) + sizeof(STColumn) * pSrc->numOfCols;
*ppDst = taosMemoryMalloc(len);
@ -1761,6 +2293,342 @@ _err:
return code;
}
static int32_t mergeLastCid(tb_uid_t uid, STsdb *pTsdb, SArray **ppLastArray, SCacheRowsReader *pr, int16_t *aCols,
int nCols, int16_t *slotIds) {
STSchema *pTSchema = pr->pSchema; // metaGetTbTSchema(pTsdb->pVnode->pMeta, uid, -1, 1);
int16_t nLastCol = nCols;
int16_t noneCol = 0;
bool setNoneCol = false;
bool hasRow = false;
bool ignoreEarlierTs = false;
SArray *pColArray = NULL;
SColVal *pColVal = &(SColVal){0};
int32_t code = initLastColArrayPartial(pTSchema, &pColArray, slotIds, nCols);
if (TSDB_CODE_SUCCESS != code) {
return code;
}
SArray *aColArray = taosArrayInit(nCols, sizeof(int16_t));
if (NULL == aColArray) {
taosArrayDestroy(pColArray);
return TSDB_CODE_OUT_OF_MEMORY;
}
for (int i = 0; i < nCols; ++i) {
taosArrayPush(aColArray, &aCols[i]);
}
TSKEY lastRowTs = TSKEY_MAX;
CacheNextRowIter iter = {0};
nextRowIterOpen(&iter, uid, pTsdb, pTSchema, pr->suid, pr->pLoadInfo, pr->pReadSnap, &pr->pDataFReader,
&pr->pDataFReaderLast, pr->lastTs);
do {
TSDBROW *pRow = NULL;
nextRowIterGet(&iter, &pRow, &ignoreEarlierTs, true, TARRAY_DATA(aColArray), TARRAY_SIZE(aColArray));
if (!pRow) {
break;
}
hasRow = true;
int32_t sversion = TSDBROW_SVERSION(pRow);
if (sversion != -1) {
code = updateTSchema(sversion, pr, uid);
if (TSDB_CODE_SUCCESS != code) {
goto _err;
}
pTSchema = pr->pCurrSchema;
}
// int16_t nCol = pTSchema->numOfCols;
TSKEY rowTs = TSDBROW_TS(pRow);
if (lastRowTs == TSKEY_MAX) {
lastRowTs = rowTs;
for (int16_t iCol = noneCol; iCol < nCols; ++iCol) {
if (iCol >= nLastCol) {
break;
}
SLastCol *pCol = taosArrayGet(pColArray, iCol);
if (pCol->colVal.cid != pTSchema->columns[slotIds[iCol]].colId) {
continue;
}
if (slotIds[iCol] == 0) {
STColumn *pTColumn = &pTSchema->columns[0];
*pColVal = COL_VAL_VALUE(pTColumn->colId, pTColumn->type, (SValue){.val = rowTs});
taosArraySet(pColArray, 0, &(SLastCol){.ts = rowTs, .colVal = *pColVal});
continue;
}
tsdbRowGetColVal(pRow, pTSchema, slotIds[iCol], pColVal);
*pCol = (SLastCol){.ts = rowTs, .colVal = *pColVal};
if (IS_VAR_DATA_TYPE(pColVal->type) && pColVal->value.nData > 0) {
pCol->colVal.value.pData = taosMemoryMalloc(pCol->colVal.value.nData);
if (pCol->colVal.value.pData == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
code = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
}
memcpy(pCol->colVal.value.pData, pColVal->value.pData, pColVal->value.nData);
}
if (!COL_VAL_IS_VALUE(pColVal)) {
if (!setNoneCol) {
noneCol = iCol;
setNoneCol = true;
}
} else {
int32_t aColIndex = taosArraySearchIdx(aColArray, &pColVal->cid, compareInt16Val, TD_EQ);
if (aColIndex >= 0) {
taosArrayRemove(aColArray, aColIndex);
}
}
}
if (!setNoneCol) {
// done, goto return pColArray
break;
} else {
continue;
}
}
// merge into pColArray
setNoneCol = false;
for (int16_t iCol = noneCol; iCol < nCols; ++iCol) {
if (iCol >= nLastCol) {
break;
}
// high version's column value
SLastCol *lastColVal = (SLastCol *)taosArrayGet(pColArray, iCol);
if (lastColVal->colVal.cid != pTSchema->columns[slotIds[iCol]].colId) {
continue;
}
SColVal *tColVal = &lastColVal->colVal;
tsdbRowGetColVal(pRow, pTSchema, slotIds[iCol], pColVal);
if (!COL_VAL_IS_VALUE(tColVal) && COL_VAL_IS_VALUE(pColVal)) {
SLastCol lastCol = {.ts = rowTs, .colVal = *pColVal};
if (IS_VAR_DATA_TYPE(pColVal->type) && pColVal->value.nData > 0) {
SLastCol *pLastCol = (SLastCol *)taosArrayGet(pColArray, iCol);
taosMemoryFree(pLastCol->colVal.value.pData);
lastCol.colVal.value.pData = taosMemoryMalloc(lastCol.colVal.value.nData);
if (lastCol.colVal.value.pData == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
code = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
}
memcpy(lastCol.colVal.value.pData, pColVal->value.pData, pColVal->value.nData);
}
taosArraySet(pColArray, iCol, &lastCol);
int32_t aColIndex = taosArraySearchIdx(aColArray, &lastCol.colVal.cid, compareInt16Val, TD_EQ);
taosArrayRemove(aColArray, aColIndex);
} else if (!COL_VAL_IS_VALUE(tColVal) && !COL_VAL_IS_VALUE(pColVal) && !setNoneCol) {
noneCol = iCol;
setNoneCol = true;
}
}
} while (setNoneCol);
if (!hasRow) {
if (ignoreEarlierTs) {
taosArrayDestroy(pColArray);
pColArray = NULL;
} else {
taosArrayClear(pColArray);
}
}
*ppLastArray = pColArray;
nextRowIterClose(&iter);
taosArrayDestroy(aColArray);
return code;
_err:
nextRowIterClose(&iter);
// taosMemoryFreeClear(pTSchema);
*ppLastArray = NULL;
taosArrayDestroy(pColArray);
taosArrayDestroy(aColArray);
return code;
}
static int32_t mergeLastRowCid(tb_uid_t uid, STsdb *pTsdb, SArray **ppLastArray, SCacheRowsReader *pr, int16_t *aCols,
int nCols, int16_t *slotIds) {
STSchema *pTSchema = pr->pSchema; // metaGetTbTSchema(pTsdb->pVnode->pMeta, uid, -1, 1);
int16_t nLastCol = nCols;
int16_t noneCol = 0;
bool setNoneCol = false;
bool hasRow = false;
bool ignoreEarlierTs = false;
SArray *pColArray = NULL;
SColVal *pColVal = &(SColVal){0};
int32_t code = initLastColArrayPartial(pTSchema, &pColArray, slotIds, nCols);
if (TSDB_CODE_SUCCESS != code) {
return code;
}
SArray *aColArray = taosArrayInit(nCols, sizeof(int16_t));
if (NULL == aColArray) {
taosArrayDestroy(pColArray);
return TSDB_CODE_OUT_OF_MEMORY;
}
for (int i = 0; i < nCols; ++i) {
taosArrayPush(aColArray, &aCols[i]);
}
TSKEY lastRowTs = TSKEY_MAX;
CacheNextRowIter iter = {0};
nextRowIterOpen(&iter, uid, pTsdb, pTSchema, pr->suid, pr->pLoadInfo, pr->pReadSnap, &pr->pDataFReader,
&pr->pDataFReaderLast, pr->lastTs);
do {
TSDBROW *pRow = NULL;
nextRowIterGet(&iter, &pRow, &ignoreEarlierTs, true, TARRAY_DATA(aColArray), TARRAY_SIZE(aColArray));
if (!pRow) {
break;
}
hasRow = true;
int32_t sversion = TSDBROW_SVERSION(pRow);
if (sversion != -1) {
code = updateTSchema(sversion, pr, uid);
if (TSDB_CODE_SUCCESS != code) {
goto _err;
}
pTSchema = pr->pCurrSchema;
}
// int16_t nCol = pTSchema->numOfCols;
TSKEY rowTs = TSDBROW_TS(pRow);
if (lastRowTs == TSKEY_MAX) {
lastRowTs = rowTs;
for (int16_t iCol = noneCol; iCol < nCols; ++iCol) {
if (iCol >= nLastCol) {
break;
}
SLastCol *pCol = taosArrayGet(pColArray, iCol);
if (pCol->colVal.cid != pTSchema->columns[slotIds[iCol]].colId) {
continue;
}
if (slotIds[iCol] == 0) {
STColumn *pTColumn = &pTSchema->columns[0];
*pColVal = COL_VAL_VALUE(pTColumn->colId, pTColumn->type, (SValue){.val = rowTs});
taosArraySet(pColArray, 0, &(SLastCol){.ts = rowTs, .colVal = *pColVal});
continue;
}
tsdbRowGetColVal(pRow, pTSchema, slotIds[iCol], pColVal);
*pCol = (SLastCol){.ts = rowTs, .colVal = *pColVal};
if (IS_VAR_DATA_TYPE(pColVal->type) && pColVal->value.nData > 0) {
pCol->colVal.value.pData = taosMemoryMalloc(pCol->colVal.value.nData);
if (pCol->colVal.value.pData == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
code = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
}
memcpy(pCol->colVal.value.pData, pColVal->value.pData, pColVal->value.nData);
}
if (COL_VAL_IS_NONE(pColVal)) {
if (!setNoneCol) {
noneCol = iCol;
setNoneCol = true;
}
} else {
int32_t aColIndex = taosArraySearchIdx(aColArray, &pColVal->cid, compareInt16Val, TD_EQ);
if (aColIndex >= 0) {
taosArrayRemove(aColArray, aColIndex);
}
}
}
if (!setNoneCol) {
// done, goto return pColArray
break;
} else {
continue;
}
}
// merge into pColArray
setNoneCol = false;
for (int16_t iCol = noneCol; iCol < nCols; ++iCol) {
if (iCol >= nLastCol) {
break;
}
// high version's column value
SLastCol *lastColVal = (SLastCol *)taosArrayGet(pColArray, iCol);
if (lastColVal->colVal.cid != pTSchema->columns[slotIds[iCol]].colId) {
continue;
}
SColVal *tColVal = &lastColVal->colVal;
tsdbRowGetColVal(pRow, pTSchema, slotIds[iCol], pColVal);
if (COL_VAL_IS_NONE(tColVal) && !COL_VAL_IS_NONE(pColVal)) {
SLastCol lastCol = {.ts = rowTs, .colVal = *pColVal};
if (IS_VAR_DATA_TYPE(pColVal->type) && pColVal->value.nData > 0) {
SLastCol *pLastCol = (SLastCol *)taosArrayGet(pColArray, iCol);
taosMemoryFree(pLastCol->colVal.value.pData);
lastCol.colVal.value.pData = taosMemoryMalloc(lastCol.colVal.value.nData);
if (lastCol.colVal.value.pData == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
code = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
}
memcpy(lastCol.colVal.value.pData, pColVal->value.pData, pColVal->value.nData);
}
taosArraySet(pColArray, iCol, &lastCol);
int32_t aColIndex = taosArraySearchIdx(aColArray, &lastCol.colVal.cid, compareInt16Val, TD_EQ);
taosArrayRemove(aColArray, aColIndex);
} else if (COL_VAL_IS_NONE(tColVal) && !COL_VAL_IS_NONE(pColVal) && !setNoneCol) {
noneCol = iCol;
setNoneCol = true;
}
}
} while (setNoneCol);
if (!hasRow) {
if (ignoreEarlierTs) {
taosArrayDestroy(pColArray);
pColArray = NULL;
} else {
taosArrayClear(pColArray);
}
}
*ppLastArray = pColArray;
nextRowIterClose(&iter);
taosArrayDestroy(aColArray);
return code;
_err:
nextRowIterClose(&iter);
*ppLastArray = NULL;
taosArrayDestroy(pColArray);
taosArrayDestroy(aColArray);
return code;
}
int32_t tsdbCacheGetLastrowH(SLRUCache *pCache, tb_uid_t uid, SCacheRowsReader *pr, LRUHandle **handle) {
int32_t code = 0;
char key[32] = {0};

View File

@ -21,47 +21,30 @@
#define HASTYPE(_type, _t) (((_type) & (_t)) == (_t))
static int32_t saveOneRow(SArray* pRow, SSDataBlock* pBlock, SCacheRowsReader* pReader, const int32_t* slotIds,
void** pRes, const char* idStr) {
const int32_t* dstSlotIds, void** pRes, const char* idStr) {
int32_t numOfRows = pBlock->info.rows;
bool allNullRow = true;
if (HASTYPE(pReader->type, CACHESCAN_RETRIEVE_LAST)) {
bool allNullRow = true;
for (int32_t i = 0; i < pReader->numOfCols; ++i) {
SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, i);
SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, dstSlotIds[i]);
SFirstLastRes* p = (SFirstLastRes*)varDataVal(pRes[i]);
int32_t slotId = slotIds[i];
SLastCol* pColVal = (SLastCol*)taosArrayGet(pRow, i);
if (slotIds[i] == -1) { // the primary timestamp
SLastCol* pColVal = (SLastCol*)taosArrayGet(pRow, 0);
p->ts = pColVal->ts;
p->bytes = TSDB_KEYSIZE;
*(int64_t*)p->buf = pColVal->ts;
allNullRow = false;
} else {
int32_t slotId = slotIds[i];
// add check for null value, caused by the modification of table schema (new column added).
if (slotId >= taosArrayGetSize(pRow)) {
p->ts = 0;
p->isNull = true;
colDataSetNULL(pColInfoData, numOfRows);
continue;
}
p->ts = pColVal->ts;
p->isNull = !COL_VAL_IS_VALUE(&pColVal->colVal);
allNullRow = p->isNull & allNullRow;
SLastCol* pColVal = (SLastCol*)taosArrayGet(pRow, slotId);
if (!p->isNull) {
if (IS_VAR_DATA_TYPE(pColVal->colVal.type)) {
varDataSetLen(p->buf, pColVal->colVal.value.nData);
p->ts = pColVal->ts;
p->isNull = !COL_VAL_IS_VALUE(&pColVal->colVal);
allNullRow = p->isNull & allNullRow;
if (!p->isNull) {
if (IS_VAR_DATA_TYPE(pColVal->colVal.type)) {
varDataSetLen(p->buf, pColVal->colVal.value.nData);
memcpy(varDataVal(p->buf), pColVal->colVal.value.pData, pColVal->colVal.value.nData);
p->bytes = pColVal->colVal.value.nData + VARSTR_HEADER_SIZE; // binary needs to plus the header size
} else {
memcpy(p->buf, &pColVal->colVal.value, pReader->pSchema->columns[slotId].bytes);
p->bytes = pReader->pSchema->columns[slotId].bytes;
}
memcpy(varDataVal(p->buf), pColVal->colVal.value.pData, pColVal->colVal.value.nData);
p->bytes = pColVal->colVal.value.nData + VARSTR_HEADER_SIZE; // binary needs to plus the header size
} else {
memcpy(p->buf, &pColVal->colVal.value, pReader->pSchema->columns[slotId].bytes);
p->bytes = pReader->pSchema->columns[slotId].bytes;
}
}
@ -74,36 +57,31 @@ static int32_t saveOneRow(SArray* pRow, SSDataBlock* pBlock, SCacheRowsReader* p
pBlock->info.rows += allNullRow ? 0 : 1;
} else if (HASTYPE(pReader->type, CACHESCAN_RETRIEVE_LAST_ROW)) {
for (int32_t i = 0; i < pReader->numOfCols; ++i) {
SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, i);
SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, dstSlotIds[i]);
if (slotIds[i] == -1) {
SLastCol* pColVal = (SLastCol*)taosArrayGet(pRow, 0);
colDataSetVal(pColInfoData, numOfRows, (const char*)&pColVal->ts, false);
} else {
int32_t slotId = slotIds[i];
// add check for null value, caused by the modification of table schema (new column added).
if (slotId >= taosArrayGetSize(pRow)) {
int32_t slotId = slotIds[i];
SLastCol* pColVal = (SLastCol*)taosArrayGet(pRow, i);
SColVal* pVal = &pColVal->colVal;
if (COL_VAL_IS_NONE(&pColVal->colVal)) {
continue;
}
allNullRow = false;
if (IS_VAR_DATA_TYPE(pColVal->colVal.type)) {
if (!COL_VAL_IS_VALUE(&pColVal->colVal)) {
colDataSetNULL(pColInfoData, numOfRows);
continue;
}
SLastCol* pColVal = (SLastCol*)taosArrayGet(pRow, slotId);
SColVal* pVal = &pColVal->colVal;
if (IS_VAR_DATA_TYPE(pColVal->colVal.type)) {
if (!COL_VAL_IS_VALUE(&pColVal->colVal)) {
colDataSetNULL(pColInfoData, numOfRows);
} else {
varDataSetLen(pReader->transferBuf[slotId], pVal->value.nData);
memcpy(varDataVal(pReader->transferBuf[slotId]), pVal->value.pData, pVal->value.nData);
colDataSetVal(pColInfoData, numOfRows, pReader->transferBuf[slotId], false);
}
} else {
colDataSetVal(pColInfoData, numOfRows, (const char*)&pVal->value.val, !COL_VAL_IS_VALUE(pVal));
varDataSetLen(pReader->transferBuf[slotId], pVal->value.nData);
memcpy(varDataVal(pReader->transferBuf[slotId]), pVal->value.pData, pVal->value.nData);
colDataSetVal(pColInfoData, numOfRows, pReader->transferBuf[slotId], false);
}
} else {
colDataSetVal(pColInfoData, numOfRows, (const char*)&pVal->value.val, !COL_VAL_IS_VALUE(pVal));
}
}
pBlock->info.rows += 1;
pBlock->info.rows += allNullRow ? 0 : 1;
} else {
tsdbError("invalid retrieve type:%d, %s", pReader->type, idStr);
return TSDB_CODE_INVALID_PARA;
@ -143,7 +121,7 @@ static int32_t setTableSchema(SCacheRowsReader* p, uint64_t suid, const char* id
}
int32_t tsdbCacherowsReaderOpen(void* pVnode, int32_t type, void* pTableIdList, int32_t numOfTables, int32_t numOfCols,
uint64_t suid, void** pReader, const char* idstr) {
SArray* pCidList, int32_t* pSlotIds, uint64_t suid, void** pReader, const char* idstr) {
*pReader = NULL;
SCacheRowsReader* p = taosMemoryCalloc(1, sizeof(SCacheRowsReader));
if (p == NULL) {
@ -155,6 +133,8 @@ int32_t tsdbCacherowsReaderOpen(void* pVnode, int32_t type, void* pTableIdList,
p->pTsdb = p->pVnode->pTsdb;
p->verRange = (SVersionRange){.minVer = 0, .maxVer = UINT64_MAX};
p->numOfCols = numOfCols;
p->pCidList = pCidList;
p->pSlotIds = pSlotIds;
p->suid = suid;
if (numOfTables == 0) {
@ -226,32 +206,9 @@ void* tsdbCacherowsReaderClose(void* pReader) {
return NULL;
}
static int32_t doExtractCacheRow(SCacheRowsReader* pr, SLRUCache* lruCache, uint64_t uid, SArray** pRow,
LRUHandle** h) {
int32_t code = TSDB_CODE_SUCCESS;
*pRow = NULL;
if (HASTYPE(pr->type, CACHESCAN_RETRIEVE_LAST_ROW)) {
code = tsdbCacheGetLastrowH(lruCache, uid, pr, h);
} else {
code = tsdbCacheGetLastH(lruCache, uid, pr, h);
}
if (code != TSDB_CODE_SUCCESS) {
return code;
}
// no data in the table of Uid
if (*h != NULL) {
*pRow = (SArray*)taosLRUCacheValue(lruCache, *h);
}
return code;
}
static void freeItem(void* pItem) {
SLastCol* pCol = (SLastCol*)pItem;
if (IS_VAR_DATA_TYPE(pCol->colVal.type)) {
if (IS_VAR_DATA_TYPE(pCol->colVal.type) && pCol->colVal.value.pData) {
taosMemoryFree(pCol->colVal.value.pData);
}
}
@ -277,19 +234,17 @@ static int32_t tsdbCacheQueryReseek(void* pQHandle) {
}
}
int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32_t* slotIds, SArray* pTableUidList) {
int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32_t* slotIds, const int32_t* dstSlotIds,
SArray* pTableUidList) {
if (pReader == NULL || pResBlock == NULL) {
return TSDB_CODE_INVALID_PARA;
}
SCacheRowsReader* pr = pReader;
int32_t code = TSDB_CODE_SUCCESS;
SLRUCache* lruCache = pr->pVnode->pTsdb->lruCache;
LRUHandle* h = NULL;
SArray* pRow = NULL;
bool hasRes = false;
SArray* pLastCols = NULL;
int32_t code = TSDB_CODE_SUCCESS;
SArray* pRow = taosArrayInit(TARRAY_SIZE(pr->pCidList), sizeof(SLastCol));
bool hasRes = false;
SArray* pLastCols = NULL;
void** pRes = taosMemoryCalloc(pr->numOfCols, POINTER_BYTES);
if (pRes == NULL) {
@ -298,20 +253,22 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32
}
for (int32_t j = 0; j < pr->numOfCols; ++j) {
pRes[j] = taosMemoryCalloc(
1, sizeof(SFirstLastRes) + pr->pSchema->columns[-1 == slotIds[j] ? 0 : slotIds[j]].bytes + VARSTR_HEADER_SIZE);
pRes[j] =
taosMemoryCalloc(1, sizeof(SFirstLastRes) + pr->pSchema->columns[/*-1 == slotIds[j] ? 0 : */ slotIds[j]].bytes +
VARSTR_HEADER_SIZE);
SFirstLastRes* p = (SFirstLastRes*)varDataVal(pRes[j]);
p->ts = INT64_MIN;
}
pLastCols = taosArrayInit(pr->pSchema->numOfCols, sizeof(SLastCol));
pLastCols = taosArrayInit(pr->numOfCols, sizeof(SLastCol));
if (pLastCols == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _end;
}
for (int32_t i = 0; i < pr->pSchema->numOfCols; ++i) {
struct STColumn* pCol = &pr->pSchema->columns[i];
for (int32_t i = 0; i < pr->numOfCols; ++i) {
int32_t slotId = slotIds[i];
struct STColumn* pCol = &pr->pSchema->columns[slotId];
SLastCol p = {.ts = INT64_MIN, .colVal.type = pCol->type, .colVal.flag = CV_FLAG_NULL};
if (IS_VAR_DATA_TYPE(pCol->type)) {
@ -328,6 +285,8 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32
pr->pDataFReader = NULL;
pr->pDataFReaderLast = NULL;
int32_t ltype = (pr->type & CACHESCAN_RETRIEVE_LAST) >> 3;
// retrieve the only one last row of all tables in the uid list.
if (HASTYPE(pr->type, CACHESCAN_RETRIEVE_TYPE_SINGLE)) {
int64_t st = taosGetTimestampUs();
@ -335,16 +294,14 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32
for (int32_t i = 0; i < pr->numOfTables; ++i) {
STableKeyInfo* pKeyInfo = &pr->pTableList[i];
code = doExtractCacheRow(pr, lruCache, pKeyInfo->uid, &pRow, &h);
if (code != TSDB_CODE_SUCCESS) {
goto _end;
}
if (h == NULL) {
tsdbCacheGet(pr->pTsdb, pKeyInfo->uid, pRow, pr, ltype);
if (TARRAY_SIZE(pRow) <= 0) {
taosArrayClearEx(pRow, freeItem);
continue;
}
if (taosArrayGetSize(pRow) <= 0) {
tsdbCacheRelease(lruCache, h);
SLastCol* pColVal = (SLastCol*)taosArrayGet(pRow, 0);
if (COL_VAL_IS_NONE(&pColVal->colVal)) {
taosArrayClearEx(pRow, freeItem);
continue;
}
@ -352,47 +309,34 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32
bool hasNotNullRow = true;
int64_t singleTableLastTs = INT64_MAX;
for (int32_t k = 0; k < pr->numOfCols; ++k) {
int32_t slotId = slotIds[k];
SLastCol* p = taosArrayGet(pLastCols, k);
SLastCol* pColVal = (SLastCol*)taosArrayGet(pRow, k);
if (slotId == -1) { // the primary timestamp
SLastCol* p = taosArrayGet(pLastCols, 0);
SLastCol* pCol = (SLastCol*)taosArrayGet(pRow, 0);
if (pCol->ts > p->ts) {
hasRes = true;
p->ts = pCol->ts;
p->colVal = pCol->colVal;
singleTableLastTs = pCol->ts;
if (pColVal->ts > p->ts) {
if (!COL_VAL_IS_VALUE(&pColVal->colVal) && HASTYPE(pr->type, CACHESCAN_RETRIEVE_LAST)) {
if (!COL_VAL_IS_VALUE(&p->colVal)) {
hasNotNullRow = false;
}
continue;
}
} else {
SLastCol* p = taosArrayGet(pLastCols, slotId);
SLastCol* pColVal = (SLastCol*)taosArrayGet(pRow, slotId);
if (pColVal->ts > p->ts) {
if (!COL_VAL_IS_VALUE(&pColVal->colVal) && HASTYPE(pr->type, CACHESCAN_RETRIEVE_LAST)) {
if (!COL_VAL_IS_VALUE(&p->colVal)) {
hasNotNullRow = false;
}
continue;
hasRes = true;
p->ts = pColVal->ts;
if (pColVal->ts < singleTableLastTs && HASTYPE(pr->type, CACHESCAN_RETRIEVE_LAST)) {
singleTableLastTs = pColVal->ts;
}
if (!IS_VAR_DATA_TYPE(pColVal->colVal.type)) {
p->colVal = pColVal->colVal;
} else {
if (COL_VAL_IS_VALUE(&pColVal->colVal)) {
memcpy(p->colVal.value.pData, pColVal->colVal.value.pData, pColVal->colVal.value.nData);
}
hasRes = true;
p->ts = pColVal->ts;
if (pColVal->ts < singleTableLastTs && HASTYPE(pr->type, CACHESCAN_RETRIEVE_LAST)) {
singleTableLastTs = pColVal->ts;
}
if (!IS_VAR_DATA_TYPE(pColVal->colVal.type)) {
p->colVal = pColVal->colVal;
} else {
if (COL_VAL_IS_VALUE(&pColVal->colVal)) {
memcpy(p->colVal.value.pData, pColVal->colVal.value.pData, pColVal->colVal.value.nData);
}
p->colVal.value.nData = pColVal->colVal.value.nData;
p->colVal.type = pColVal->colVal.type;
p->colVal.flag = pColVal->colVal.flag;
p->colVal.cid = pColVal->colVal.cid;
}
p->colVal.value.nData = pColVal->colVal.value.nData;
p->colVal.type = pColVal->colVal.type;
p->colVal.flag = pColVal->colVal.flag;
p->colVal.cid = pColVal->colVal.cid;
}
}
}
@ -414,33 +358,31 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32
taosArraySet(pTableUidList, 0, &pKeyInfo->uid);
}
tsdbCacheRelease(lruCache, h);
taosArrayClearEx(pRow, freeItem);
}
if (hasRes) {
saveOneRow(pLastCols, pResBlock, pr, slotIds, pRes, pr->idstr);
saveOneRow(pLastCols, pResBlock, pr, slotIds, dstSlotIds, pRes, pr->idstr);
}
} else if (HASTYPE(pr->type, CACHESCAN_RETRIEVE_TYPE_ALL)) {
for (int32_t i = pr->tableIndex; i < pr->numOfTables; ++i) {
STableKeyInfo* pKeyInfo = &pr->pTableList[i];
code = doExtractCacheRow(pr, lruCache, pKeyInfo->uid, &pRow, &h);
if (code != TSDB_CODE_SUCCESS) {
goto _end;
}
if (h == NULL) {
tsdbCacheGet(pr->pTsdb, pKeyInfo->uid, pRow, pr, ltype);
if (TARRAY_SIZE(pRow) <= 0) {
taosArrayClearEx(pRow, freeItem);
continue;
}
if (taosArrayGetSize(pRow) <= 0) {
tsdbCacheRelease(lruCache, h);
SLastCol* pColVal = (SLastCol*)taosArrayGet(pRow, 0);
if (COL_VAL_IS_NONE(&pColVal->colVal)) {
taosArrayClearEx(pRow, freeItem);
continue;
}
saveOneRow(pRow, pResBlock, pr, slotIds, pRes, pr->idstr);
// TODO reset the pRes
saveOneRow(pRow, pResBlock, pr, slotIds, dstSlotIds, pRes, pr->idstr);
taosArrayClearEx(pRow, freeItem);
taosArrayPush(pTableUidList, &pKeyInfo->uid);
tsdbCacheRelease(lruCache, h);
pr->tableIndex += 1;
if (pResBlock->info.rows >= pResBlock->info.capacity) {
@ -466,6 +408,7 @@ _end:
}
taosMemoryFree(pRes);
taosArrayDestroyEx(pRow, freeItem);
taosArrayDestroyEx(pLastCols, freeItem);
return code;
}

View File

@ -140,7 +140,6 @@ int32_t tsdbDeleteTableData(STsdb *pTsdb, int64_t version, tb_uid_t suid, tb_uid
SMemTable *pMemTable = pTsdb->mem;
STbData *pTbData = NULL;
SVBufPool *pPool = pTsdb->pVnode->inUse;
TSDBKEY lastKey = {.version = version, .ts = eKey};
// check if table exists
SMetaInfo info;
@ -181,7 +180,7 @@ int32_t tsdbDeleteTableData(STsdb *pTsdb, int64_t version, tb_uid_t suid, tb_uid
pMemTable->nDel++;
pMemTable->minVer = TMIN(pMemTable->minVer, version);
pMemTable->maxVer = TMIN(pMemTable->maxVer, version);
/*
if (TSDB_CACHE_LAST_ROW(pMemTable->pTsdb->pVnode->config) && tsdbKeyCmprFn(&lastKey, &pTbData->maxKey) >= 0) {
tsdbCacheDeleteLastrow(pTsdb->lruCache, pTbData->uid, eKey);
}
@ -189,6 +188,10 @@ int32_t tsdbDeleteTableData(STsdb *pTsdb, int64_t version, tb_uid_t suid, tb_uid
if (TSDB_CACHE_LAST(pMemTable->pTsdb->pVnode->config)) {
tsdbCacheDeleteLast(pTsdb->lruCache, pTbData->uid, eKey);
}
*/
if (eKey >= pTbData->maxKey && sKey <= pTbData->maxKey) {
tsdbCacheDel(pTsdb, suid, uid, sKey, eKey);
}
tsdbTrace("vgId:%d, delete data from table suid:%" PRId64 " uid:%" PRId64 " skey:%" PRId64 " eKey:%" PRId64
" at version %" PRId64,
@ -284,8 +287,8 @@ bool tsdbTbDataIterNext(STbDataIter *pIter) {
int64_t tsdbCountTbDataRows(STbData *pTbData) {
SMemSkipListNode *pNode = pTbData->sl.pHead;
int64_t rowsNum = 0;
int64_t rowsNum = 0;
while (NULL != pNode) {
pNode = SL_GET_NODE_FORWARD(pNode, 0);
if (pNode == pTbData->sl.pTail) {
@ -298,17 +301,17 @@ int64_t tsdbCountTbDataRows(STbData *pTbData) {
return rowsNum;
}
void tsdbMemTableCountRows(SMemTable *pMemTable, SHashObj* pTableMap, int64_t *rowsNum) {
void tsdbMemTableCountRows(SMemTable *pMemTable, SHashObj *pTableMap, int64_t *rowsNum) {
taosRLockLatch(&pMemTable->latch);
for (int32_t i = 0; i < pMemTable->nBucket; ++i) {
STbData *pTbData = pMemTable->aBucket[i];
while (pTbData) {
void* p = taosHashGet(pTableMap, &pTbData->uid, sizeof(pTbData->uid));
void *p = taosHashGet(pTableMap, &pTbData->uid, sizeof(pTbData->uid));
if (p == NULL) {
pTbData = pTbData->next;
continue;
}
*rowsNum += tsdbCountTbDataRows(pTbData);
pTbData = pTbData->next;
}
@ -668,15 +671,8 @@ static int32_t tsdbInsertColDataToTable(SMemTable *pMemTable, STbData *pTbData,
if (key.ts >= pTbData->maxKey) {
pTbData->maxKey = key.ts;
if (TSDB_CACHE_LAST_ROW(pMemTable->pTsdb->pVnode->config)) {
tsdbCacheInsertLastrow(pMemTable->pTsdb->lruCache, pMemTable->pTsdb, pTbData->uid, &lRow, true);
}
}
if (TSDB_CACHE_LAST(pMemTable->pTsdb->pVnode->config)) {
tsdbCacheInsertLast(pMemTable->pTsdb->lruCache, pTbData->uid, &lRow, pMemTable->pTsdb);
}
tsdbCacheUpdate(pMemTable->pTsdb, pTbData->suid, pTbData->uid, &lRow);
// SMemTable
pMemTable->minKey = TMIN(pMemTable->minKey, pTbData->minKey);
@ -736,15 +732,8 @@ static int32_t tsdbInsertRowDataToTable(SMemTable *pMemTable, STbData *pTbData,
if (key.ts >= pTbData->maxKey) {
pTbData->maxKey = key.ts;
if (TSDB_CACHE_LAST_ROW(pMemTable->pTsdb->pVnode->config)) {
tsdbCacheInsertLastrow(pMemTable->pTsdb->lruCache, pMemTable->pTsdb, pTbData->uid, &lRow, true);
}
}
if (TSDB_CACHE_LAST(pMemTable->pTsdb->pVnode->config)) {
tsdbCacheInsertLast(pMemTable->pTsdb->lruCache, pTbData->uid, &lRow, pMemTable->pTsdb);
}
tsdbCacheUpdate(pMemTable->pTsdb, pTbData->suid, pTbData->uid, &lRow);
// SMemTable
pMemTable->minKey = TMIN(pMemTable->minKey, pTbData->minKey);

View File

@ -60,19 +60,19 @@ int vnodeCheckCfg(const SVnodeCfg *pCfg) {
const char* vnodeRoleToStr(ESyncRole role) {
switch (role) {
case TAOS_SYNC_ROLE_VOTER:
return "voter";
return "true";
case TAOS_SYNC_ROLE_LEARNER:
return "learner";
return "false";
default:
return "unknown";
}
}
const ESyncRole vnodeStrToRole(char* str) {
if(strcmp(str, "voter") == 0){
if(strcmp(str, "true") == 0){
return TAOS_SYNC_ROLE_VOTER;
}
if(strcmp(str, "learner") == 0){
if(strcmp(str, "false") == 0){
return TAOS_SYNC_ROLE_LEARNER;
}
@ -134,12 +134,12 @@ int vnodeEncodeConfig(const void *pObj, SJson *pJson) {
if (tjsonAddIntegerToObject(pJson, "sstTrigger", pCfg->sttTrigger) < 0) return -1;
if (tjsonAddIntegerToObject(pJson, "hashBegin", pCfg->hashBegin) < 0) return -1;
if (tjsonAddIntegerToObject(pJson, "hashEnd", pCfg->hashEnd) < 0) return -1;
if (tjsonAddIntegerToObject(pJson, "hashChange", pCfg->hashChange) < 0) return -1;
if (tjsonAddIntegerToObject(pJson, "hashMethod", pCfg->hashMethod) < 0) return -1;
if (tjsonAddIntegerToObject(pJson, "hashPrefix", pCfg->hashPrefix) < 0) return -1;
if (tjsonAddIntegerToObject(pJson, "hashSuffix", pCfg->hashSuffix) < 0) return -1;
if (tjsonAddIntegerToObject(pJson, "syncCfg.replicaNum", pCfg->syncCfg.replicaNum) < 0) return -1;
if (tjsonAddIntegerToObject(pJson, "syncCfg.totalReplicaNum", pCfg->syncCfg.totalReplicaNum) < 0) return -1;
if (tjsonAddIntegerToObject(pJson, "syncCfg.myIndex", pCfg->syncCfg.myIndex) < 0) return -1;
if (tjsonAddIntegerToObject(pJson, "vndStats.stables", pCfg->vndStats.numOfSTables) < 0) return -1;
@ -161,7 +161,7 @@ int vnodeEncodeConfig(const void *pObj, SJson *pJson) {
if (tjsonAddStringToObject(info, "nodeFqdn", pNode->nodeFqdn) < 0) return -1;
if (tjsonAddIntegerToObject(info, "nodeId", pNode->nodeId) < 0) return -1;
if (tjsonAddIntegerToObject(info, "clusterId", pNode->clusterId) < 0) return -1;
if (tjsonAddStringToObject(info, "nodeRole", vnodeRoleToStr(pNode->nodeRole)) < 0) return -1;
if (tjsonAddStringToObject(info, "isReplica", vnodeRoleToStr(pNode->nodeRole)) < 0) return -1;
if (tjsonAddItemToArray(nodeInfo, info) < 0) return -1;
vDebug("vgId:%d, encode config, replica:%d ep:%s:%u dnode:%d", pCfg->vgId, i, pNode->nodeFqdn, pNode->nodePort,
pNode->nodeId);
@ -250,6 +250,8 @@ int vnodeDecodeConfig(const SJson *pJson, void *pObj) {
if (code < 0) return -1;
tjsonGetNumberValue(pJson, "hashEnd", pCfg->hashEnd, code);
if (code < 0) return -1;
tjsonGetNumberValue(pJson, "hashChange", pCfg->hashChange, code);
if (code < 0) return -1;
tjsonGetNumberValue(pJson, "hashMethod", pCfg->hashMethod, code);
if (code < 0) return -1;
tjsonGetNumberValue(pJson, "hashPrefix", pCfg->hashPrefix, code);
@ -259,8 +261,6 @@ int vnodeDecodeConfig(const SJson *pJson, void *pObj) {
tjsonGetNumberValue(pJson, "syncCfg.replicaNum", pCfg->syncCfg.replicaNum, code);
if (code < 0) return -1;
tjsonGetNumberValue(pJson, "syncCfg.totalReplicaNum", pCfg->syncCfg.totalReplicaNum, code);
if (code < 0) return -1;
tjsonGetNumberValue(pJson, "syncCfg.myIndex", pCfg->syncCfg.myIndex, code);
if (code < 0) return -1;
@ -277,10 +277,7 @@ int vnodeDecodeConfig(const SJson *pJson, void *pObj) {
SJson *nodeInfo = tjsonGetObjectItem(pJson, "syncCfg.nodeInfo");
int arraySize = tjsonGetArraySize(nodeInfo);
if(pCfg->syncCfg.totalReplicaNum == 0 && pCfg->syncCfg.replicaNum > 0){
pCfg->syncCfg.totalReplicaNum = pCfg->syncCfg.replicaNum;
}
if (arraySize != pCfg->syncCfg.totalReplicaNum) return -1;
pCfg->syncCfg.totalReplicaNum = arraySize;
vDebug("vgId:%d, decode config, replicas:%d totalReplicas:%d selfIndex:%d", pCfg->vgId, pCfg->syncCfg.replicaNum,
pCfg->syncCfg.totalReplicaNum, pCfg->syncCfg.myIndex);
@ -296,7 +293,7 @@ int vnodeDecodeConfig(const SJson *pJson, void *pObj) {
tjsonGetNumberValue(info, "clusterId", pNode->clusterId, code);
if (code < 0) return -1;
char role[10] = {0};
code = tjsonGetStringValue(info, "nodeRole", role);
code = tjsonGetStringValue(info, "isReplica", role);
if (code < 0) return -1;
if(strlen(role) != 0){
pNode->nodeRole = vnodeStrToRole(role);

View File

@ -144,8 +144,8 @@ _exit:
}
int vnodeShouldCommit(SVnode *pVnode, bool atExit) {
bool diskAvail = osDataSpaceAvailable();
bool needCommit = false;
bool diskAvail = osDataSpaceAvailable();
bool needCommit = false;
taosThreadMutexLock(&pVnode->mutex);
if (pVnode->inUse && diskAvail) {
@ -439,6 +439,9 @@ static int vnodeCommitImpl(SCommitInfo *pInfo) {
code = tsdbCommit(pVnode->pTsdb, pInfo);
TSDB_CHECK_CODE(code, lino, _exit);
code = tsdbCacheCommit(pVnode->pTsdb);
TSDB_CHECK_CODE(code, lino, _exit);
if (VND_IS_RSMA(pVnode)) {
code = smaCommit(pVnode->pSma, pInfo);
TSDB_CHECK_CODE(code, lino, _exit);

View File

@ -198,6 +198,7 @@ int32_t vnodeAlterHashRange(const char *srcPath, const char *dstPath, SAlterVnod
info.config.vgId = pReq->dstVgId;
info.config.hashBegin = pReq->hashBegin;
info.config.hashEnd = pReq->hashEnd;
info.config.hashChange = true;
info.config.walCfg.vgId = pReq->dstVgId;
SSyncCfg *pCfg = &info.config.syncCfg;
@ -235,8 +236,6 @@ int32_t vnodeAlterHashRange(const char *srcPath, const char *dstPath, SAlterVnod
return -1;
}
// todo vnode compact here
vInfo("vgId:%d, vnode hashrange is altered", info.config.vgId);
return 0;
}

View File

@ -425,7 +425,10 @@ int32_t vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, int64_t version, SRp
}
} break;
case TDMT_VND_ALTER_CONFIRM:
vnodeProcessAlterConfirmReq(pVnode, version, pReq, len, pRsp);
needCommit = pVnode->config.hashChange;
if (vnodeProcessAlterConfirmReq(pVnode, version, pReq, len, pRsp) < 0) {
goto _err;
}
break;
case TDMT_VND_ALTER_CONFIG:
vnodeProcessAlterConfigReq(pVnode, version, pReq, len, pRsp);
@ -1453,15 +1456,39 @@ int32_t vnodeProcessCreateTSma(SVnode *pVnode, void *pCont, uint32_t contLen) {
return vnodeProcessCreateTSmaReq(pVnode, 1, pCont, contLen, NULL);
}
static int32_t vnodeConsolidateAlterHashRange(SVnode *pVnode, int64_t version) {
int32_t code = TSDB_CODE_SUCCESS;
vInfo("vgId:%d, trim meta of tables per hash range [%" PRIu32 ", %" PRIu32 "]. apply-index:%" PRId64, TD_VID(pVnode),
pVnode->config.hashBegin, pVnode->config.hashEnd, version);
// TODO: trim meta of tables from TDB per hash range [pVnode->config.hashBegin, pVnode->config.hashEnd]
return code;
}
static int32_t vnodeProcessAlterConfirmReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp) {
vInfo("vgId:%d, vnode management handle msgType:alter-confirm, alter replica confim msg is processed",
TD_VID(pVnode));
vInfo("vgId:%d, vnode handle msgType:alter-confirm, alter confim msg is processed", TD_VID(pVnode));
int32_t code = TSDB_CODE_SUCCESS;
if (!pVnode->config.hashChange) {
goto _exit;
}
code = vnodeConsolidateAlterHashRange(pVnode, version);
if (code < 0) {
vError("vgId:%d, failed to consolidate alter hashrange since %s. version:%" PRId64, TD_VID(pVnode), terrstr(),
version);
goto _exit;
}
pVnode->config.hashChange = false;
_exit:
pRsp->msgType = TDMT_VND_ALTER_CONFIRM_RSP;
pRsp->code = TSDB_CODE_SUCCESS;
pRsp->code = code;
pRsp->pCont = NULL;
pRsp->contLen = 0;
return 0;
return code;
}
static int32_t vnodeProcessAlterConfigReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp) {

View File

@ -1128,6 +1128,11 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
EXPLAIN_ROW_END();
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
EXPLAIN_ROW_NEW(level + 1, EXPLAIN_OUTPUT_FORMAT);
EXPLAIN_ROW_APPEND(EXPLAIN_IGNORE_GROUPID_FORMAT, pMergeNode->ignoreGroupId ? "true" : "false");
EXPLAIN_ROW_END();
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
EXPLAIN_ROW_NEW(level + 1, EXPLAIN_MERGE_KEYS_FORMAT);
if (pMergeNode->groupSort) {
EXPLAIN_ROW_APPEND(EXPLAIN_STRING_TYPE_FORMAT, "_group_id asc");

View File

@ -366,6 +366,7 @@ typedef struct STagScanInfo {
int32_t curPos;
SReadHandle readHandle;
STableListInfo* pTableListInfo;
SLimitNode* pSlimit;
} STagScanInfo;
typedef enum EStreamScanMode {

Some files were not shown because too many files have changed in this diff Show More