Merge branch '3.0' of github.com:taosdata/TDengine into szhou/ip-whitelist
This commit is contained in:
commit
f280da26da
|
@ -11,7 +11,7 @@ ExternalProject_Add(aprutil-1
|
||||||
BUILD_IN_SOURCE TRUE
|
BUILD_IN_SOURCE TRUE
|
||||||
BUILD_ALWAYS 1
|
BUILD_ALWAYS 1
|
||||||
#UPDATE_COMMAND ""
|
#UPDATE_COMMAND ""
|
||||||
CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1/ --with-apr=$ENV{HOME}/.cos-local.1 --without-expat
|
CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1/ --with-apr=$ENV{HOME}/.cos-local.1
|
||||||
#CONFIGURE_COMMAND ./configure --with-apr=/usr/local/apr
|
#CONFIGURE_COMMAND ./configure --with-apr=/usr/local/apr
|
||||||
BUILD_COMMAND make
|
BUILD_COMMAND make
|
||||||
INSTALL_COMMAND make install
|
INSTALL_COMMAND make install
|
||||||
|
|
|
@ -113,7 +113,19 @@ Set<String> subscription() throws SQLException;
|
||||||
|
|
||||||
ConsumerRecords<V> poll(Duration timeout) throws SQLException;
|
ConsumerRecords<V> poll(Duration timeout) throws SQLException;
|
||||||
|
|
||||||
|
Set<TopicPartition> assignment() throws SQLException;
|
||||||
|
long position(TopicPartition partition) throws SQLException;
|
||||||
|
Map<TopicPartition, Long> position(String topic) throws SQLException;
|
||||||
|
Map<TopicPartition, Long> beginningOffsets(String topic) throws SQLException;
|
||||||
|
Map<TopicPartition, Long> endOffsets(String topic) throws SQLException;
|
||||||
|
Map<TopicPartition, OffsetAndMetadata> committed(Set<TopicPartition> partitions) throws SQLException;
|
||||||
|
|
||||||
|
void seek(TopicPartition partition, long offset) throws SQLException;
|
||||||
|
void seekToBeginning(Collection<TopicPartition> partitions) throws SQLException;
|
||||||
|
void seekToEnd(Collection<TopicPartition> partitions) throws SQLException;
|
||||||
|
|
||||||
void commitSync() throws SQLException;
|
void commitSync() throws SQLException;
|
||||||
|
void commitSync(Map<TopicPartition, OffsetAndMetadata> offsets) throws SQLException;
|
||||||
|
|
||||||
void close() throws SQLException;
|
void close() throws SQLException;
|
||||||
```
|
```
|
||||||
|
|
|
@ -7,9 +7,9 @@ description: This document describes how to query data in TDengine.
|
||||||
## Syntax
|
## Syntax
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT {DATABASE() | CLIENT_VERSION() | SERVER_VERSION() | SERVER_STATUS() | NOW() | TODAY() | TIMEZONE()}
|
SELECT {DATABASE() | CLIENT_VERSION() | SERVER_VERSION() | SERVER_STATUS() | NOW() | TODAY() | TIMEZONE() | CURRENT_USER() | USER() }
|
||||||
|
|
||||||
SELECT [DISTINCT] select_list
|
SELECT [hints] [DISTINCT] [TAGS] select_list
|
||||||
from_clause
|
from_clause
|
||||||
[WHERE condition]
|
[WHERE condition]
|
||||||
[partition_by_clause]
|
[partition_by_clause]
|
||||||
|
@ -21,6 +21,11 @@ SELECT [DISTINCT] select_list
|
||||||
[LIMIT limit_val [OFFSET offset_val]]
|
[LIMIT limit_val [OFFSET offset_val]]
|
||||||
[>> export_file]
|
[>> export_file]
|
||||||
|
|
||||||
|
hints: /*+ [hint([hint_param_list])] [hint([hint_param_list])] */
|
||||||
|
|
||||||
|
hint:
|
||||||
|
BATCH_SCAN | NO_BATCH_SCAN
|
||||||
|
|
||||||
select_list:
|
select_list:
|
||||||
select_expr [, select_expr] ...
|
select_expr [, select_expr] ...
|
||||||
|
|
||||||
|
@ -70,6 +75,29 @@ order_expr:
|
||||||
{expr | position | c_alias} [DESC | ASC] [NULLS FIRST | NULLS LAST]
|
{expr | position | c_alias} [DESC | ASC] [NULLS FIRST | NULLS LAST]
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Hints
|
||||||
|
|
||||||
|
Hints are a means of user control over query optimization for individual statements. Hints will be ignore automatically if they are not applicable to the current query statement. The specific instructions are as follows:
|
||||||
|
|
||||||
|
- Hints syntax starts with `/*+` and ends with `*/`, spaces are allowed before or after.
|
||||||
|
- Hints syntax can only follow the SELECT keyword.
|
||||||
|
- Each hints can contain multiple hint, separated by spaces. When multiple hints conflict or are identical, whichever comes first takes effect.
|
||||||
|
- When an error occurs with a hint in hints, the effective hint before the error is still valid, and the current and subsequent hints are ignored.
|
||||||
|
- hint_param_list are arguments to each hint, which varies according to each hint.
|
||||||
|
|
||||||
|
The list of currently supported Hints is as follows:
|
||||||
|
|
||||||
|
| **Hint** | **Params** | **Comment** | **Scopt** |
|
||||||
|
| :-----------: | -------------- | -------------------------- | -------------------------- |
|
||||||
|
| BATCH_SCAN | None | Batch table scan | JOIN statment for stable |
|
||||||
|
| NO_BATCH_SCAN | None | Sequential table scan | JOIN statment for stable |
|
||||||
|
|
||||||
|
For example:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT /*+ BATCH_SCAN() */ a.ts FROM stable1 a, stable2 b where a.tag0 = b.tag0 and a.ts = b.ts;
|
||||||
|
```
|
||||||
|
|
||||||
## Lists
|
## Lists
|
||||||
|
|
||||||
A query can be performed on some or all columns. Data and tag columns can all be included in the SELECT list.
|
A query can be performed on some or all columns. Data and tag columns can all be included in the SELECT list.
|
||||||
|
@ -167,7 +195,7 @@ The following SQL statement returns the number of subtables within the meters su
|
||||||
SELECT COUNT(*) FROM (SELECT DISTINCT TBNAME FROM meters);
|
SELECT COUNT(*) FROM (SELECT DISTINCT TBNAME FROM meters);
|
||||||
```
|
```
|
||||||
|
|
||||||
In the preceding two statements, only tags can be used as filtering conditions in the WHERE clause. For example:
|
In the preceding two statements, only tags can be used as filtering conditions in the WHERE clause.
|
||||||
|
|
||||||
**\_QSTART and \_QEND**
|
**\_QSTART and \_QEND**
|
||||||
|
|
||||||
|
@ -197,6 +225,14 @@ The \_IROWTS pseudocolumn can only be used with INTERP function. This pseudocolu
|
||||||
select _irowts, interp(current) from meters range('2020-01-01 10:00:00', '2020-01-01 10:30:00') every(1s) fill(linear);
|
select _irowts, interp(current) from meters range('2020-01-01 10:00:00', '2020-01-01 10:30:00') every(1s) fill(linear);
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### TAGS Query
|
||||||
|
|
||||||
|
The TAGS keyword returns only tag columns from all child tables when only tag columns are specified. One row containing tag columns is returned for each child table.
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT TAGS tag_name [, tag_name ...] FROM stb_name
|
||||||
|
```
|
||||||
|
|
||||||
## Query Objects
|
## Query Objects
|
||||||
|
|
||||||
`FROM` can be followed by a number of tables or super tables, or can be followed by a sub-query.
|
`FROM` can be followed by a number of tables or super tables, or can be followed by a sub-query.
|
||||||
|
@ -209,8 +245,7 @@ You can perform INNER JOIN statements based on the primary key. The following co
|
||||||
3. For supertables, the ON condition must be equivalent to the primary key. In addition, the tag columns of the tables on which the INNER JOIN is performed must have a one-to-one relationship. You cannot specify an OR condition.
|
3. For supertables, the ON condition must be equivalent to the primary key. In addition, the tag columns of the tables on which the INNER JOIN is performed must have a one-to-one relationship. You cannot specify an OR condition.
|
||||||
4. The tables that are included in a JOIN clause must be of the same type (supertable, standard table, or subtable).
|
4. The tables that are included in a JOIN clause must be of the same type (supertable, standard table, or subtable).
|
||||||
5. You can include subqueries before and after the JOIN keyword.
|
5. You can include subqueries before and after the JOIN keyword.
|
||||||
6. You cannot include more than ten tables in a JOIN clause.
|
6. You cannot include a FILL clause and a JOIN clause in the same statement.
|
||||||
7. You cannot include a FILL clause and a JOIN clause in the same statement.
|
|
||||||
|
|
||||||
## GROUP BY
|
## GROUP BY
|
||||||
|
|
||||||
|
@ -301,6 +336,12 @@ SELECT TODAY();
|
||||||
SELECT TIMEZONE();
|
SELECT TIMEZONE();
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Obtain Current User
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT CURRENT_USER();
|
||||||
|
```
|
||||||
|
|
||||||
## Regular Expression
|
## Regular Expression
|
||||||
|
|
||||||
### Syntax
|
### Syntax
|
||||||
|
@ -355,7 +396,7 @@ SELECT AVG(CASE WHEN voltage < 200 or voltage > 250 THEN 220 ELSE voltage END) F
|
||||||
|
|
||||||
## JOIN
|
## JOIN
|
||||||
|
|
||||||
TDengine supports the `INTER JOIN` based on the timestamp primary key, that is, the `JOIN` condition must contain the timestamp primary key. As long as the requirement of timestamp-based primary key is met, `INTER JOIN` can be made between normal tables, sub-tables, super tables and sub-queries at will, and there is no limit on the number of tables.
|
TDengine supports the `INTER JOIN` based on the timestamp primary key, that is, the `JOIN` condition must contain the timestamp primary key. As long as the requirement of timestamp-based primary key is met, `INTER JOIN` can be made between normal tables, sub-tables, super tables and sub-queries at will, and there is no limit on the number of tables, primary key and other conditions must be combined with `AND` operator.
|
||||||
|
|
||||||
For standard tables:
|
For standard tables:
|
||||||
|
|
||||||
|
|
|
@ -1275,6 +1275,14 @@ SELECT SERVER_STATUS();
|
||||||
|
|
||||||
**Description**: The server status.
|
**Description**: The server status.
|
||||||
|
|
||||||
|
### CURRENT_USER
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT CURRENT_USER();
|
||||||
|
```
|
||||||
|
|
||||||
|
**Description**: get current user.
|
||||||
|
|
||||||
|
|
||||||
## Geometry Functions
|
## Geometry Functions
|
||||||
|
|
||||||
|
|
|
@ -178,7 +178,7 @@ The following list shows all reserved keywords:
|
||||||
|
|
||||||
- MATCH
|
- MATCH
|
||||||
- MAX_DELAY
|
- MAX_DELAY
|
||||||
- MAX_SPEED
|
- BWLIMIT
|
||||||
- MAXROWS
|
- MAXROWS
|
||||||
- MERGE
|
- MERGE
|
||||||
- META
|
- META
|
||||||
|
|
|
@ -22,6 +22,14 @@ SHOW CLUSTER;
|
||||||
|
|
||||||
Shows information about the current cluster.
|
Shows information about the current cluster.
|
||||||
|
|
||||||
|
## SHOW CLUSTER ALIVE
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SHOW CLUSTER ALIVE;
|
||||||
|
```
|
||||||
|
|
||||||
|
It is used to check whether the cluster is available or not. Return value: 0 means unavailable, 1 means available, 2 means partially available (some dnodes are offline, the other dnodes are available)
|
||||||
|
|
||||||
## SHOW CONNECTIONS
|
## SHOW CONNECTIONS
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
|
|
|
@ -17,7 +17,7 @@ You can use the SHOW CONNECTIONS statement to find the conn_id.
|
||||||
## Terminate a Query
|
## Terminate a Query
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
KILL QUERY kill_id;
|
KILL QUERY 'kill_id';
|
||||||
```
|
```
|
||||||
|
|
||||||
You can use the SHOW QUERIES statement to find the kill_id.
|
You can use the SHOW QUERIES statement to find the kill_id.
|
||||||
|
|
|
@ -168,6 +168,12 @@ The base API is used to do things like create database connections and provide a
|
||||||
|
|
||||||
:::
|
:::
|
||||||
|
|
||||||
|
- `TAOS *taos_connect_auth(const char *host, const char *user, const char *auth, const char *db, uint16_t port)`
|
||||||
|
|
||||||
|
The function is the same as taos_connect. Except that the pass parameter is replaced by auth, other parameters are the same as taos_connect.
|
||||||
|
|
||||||
|
- auth: the 32-bit lowercase md5 of the raw password
|
||||||
|
|
||||||
- `char *taos_get_server_info(TAOS *taos)`
|
- `char *taos_get_server_info(TAOS *taos)`
|
||||||
|
|
||||||
Get server-side version information.
|
Get server-side version information.
|
||||||
|
@ -184,6 +190,14 @@ The base API is used to do things like create database connections and provide a
|
||||||
- If len is less than the space required to store the db (including the last '\0'), an error is returned. The truncated data assigned in the database ends with '\0'.
|
- If len is less than the space required to store the db (including the last '\0'), an error is returned. The truncated data assigned in the database ends with '\0'.
|
||||||
- If len is greater than or equal to the space required to store the db (including the last '\0'), return normal 0, and assign the db name ending with '\0' in the database.
|
- If len is greater than or equal to the space required to store the db (including the last '\0'), return normal 0, and assign the db name ending with '\0' in the database.
|
||||||
|
|
||||||
|
- `int taos_set_notify_cb(TAOS *taos, __taos_notify_fn_t fp, void *param, int type)`
|
||||||
|
|
||||||
|
Set the event callback function.
|
||||||
|
|
||||||
|
- fp: event callback function pointer. Declaration:typedef void (*__taos_notify_fn_t)(void *param, void *ext, int type);Param is a user-defined parameter, ext is an extended parameter (depending on the event type, and returns the user password version for TAOS_NOTIFY_PASSVER), and type is the event type
|
||||||
|
- param: user-defined parameter
|
||||||
|
- type: event type. Value range: 1) TAOS_NOTIFY_PASSVER: User password changed
|
||||||
|
|
||||||
- `void taos_close(TAOS *taos)`
|
- `void taos_close(TAOS *taos)`
|
||||||
|
|
||||||
Closes the connection, where `taos` is the handle returned by `taos_connect()`.
|
Closes the connection, where `taos` is the handle returned by `taos_connect()`.
|
||||||
|
@ -307,21 +321,20 @@ The specific functions related to the interface are as follows (see also the [pr
|
||||||
|
|
||||||
Parse a SQL command, and bind the parsed result and parameter information to `stmt`. If the parameter length is greater than 0, use this parameter as the length of the SQL command. If it is equal to 0, the length of the SQL command will be determined automatically.
|
Parse a SQL command, and bind the parsed result and parameter information to `stmt`. If the parameter length is greater than 0, use this parameter as the length of the SQL command. If it is equal to 0, the length of the SQL command will be determined automatically.
|
||||||
|
|
||||||
- `int taos_stmt_bind_param(TAOS_STMT *stmt, TAOS_BIND *bind)`
|
- `int taos_stmt_bind_param(TAOS_STMT *stmt, TAOS_MULTI_BIND *bind)`
|
||||||
|
|
||||||
Not as efficient as `taos_stmt_bind_param_batch()`, but can support non-INSERT type SQL statements.
|
Not as efficient as `taos_stmt_bind_param_batch()`, but can support non-INSERT type SQL statements.
|
||||||
To bind parameters, bind points to an array (representing the row of data to be bound), making sure that the number and order of the elements in this array are the same as the parameters in the SQL statement. taos_bind is used similarly to MYSQL_BIND in MySQL, as defined below.
|
To bind parameters, bind points to an array (representing the row of data to be bound), making sure that the number and order of the elements in this array are the same as the parameters in the SQL statement. taos_bind is used similarly to MYSQL_BIND in MySQL, as defined below.
|
||||||
|
|
||||||
```c
|
```c
|
||||||
typedef struct TAOS_BIND {
|
typedef struct TAOS_MULTI_BIND {
|
||||||
int buffer_type;
|
int buffer_type;
|
||||||
void * buffer;
|
void *buffer;
|
||||||
uintptr_t buffer_length; // not in use
|
uintptr_t buffer_length;
|
||||||
uintptr_t * length;
|
uint32_t *length;
|
||||||
int * is_null;
|
char *is_null;
|
||||||
int is_unsigned; // not in use
|
int num;
|
||||||
int * error; // not in use
|
} TAOS_MULTI_BIND;
|
||||||
} TAOS_BIND;
|
|
||||||
```
|
```
|
||||||
|
|
||||||
- `int taos_stmt_set_tbname(TAOS_STMT* stmt, const char* name)`
|
- `int taos_stmt_set_tbname(TAOS_STMT* stmt, const char* name)`
|
||||||
|
@ -329,7 +342,7 @@ The specific functions related to the interface are as follows (see also the [pr
|
||||||
(Available in 2.1.1.0 and later versions, only supported for replacing parameter values in INSERT statements)
|
(Available in 2.1.1.0 and later versions, only supported for replacing parameter values in INSERT statements)
|
||||||
When the table name in the SQL command uses `? ` placeholder, you can use this function to bind a specific table name.
|
When the table name in the SQL command uses `? ` placeholder, you can use this function to bind a specific table name.
|
||||||
|
|
||||||
- `int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags)`
|
- `int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_MULTI_BIND* tags)`
|
||||||
|
|
||||||
(Available in 2.1.2.0 and later versions, only supported for replacing parameter values in INSERT statements)
|
(Available in 2.1.2.0 and later versions, only supported for replacing parameter values in INSERT statements)
|
||||||
When the table name and TAGS in the SQL command both use `? `, you can use this function to bind the specific table name and the specific TAGS value. The most typical usage scenario is an INSERT statement that uses the automatic table building function (the current version does not support specifying specific TAGS columns.) The number of columns in the TAGS parameter needs to be the same as the number of TAGS requested in the SQL command.
|
When the table name and TAGS in the SQL command both use `? `, you can use this function to bind the specific table name and the specific TAGS value. The most typical usage scenario is an INSERT statement that uses the automatic table building function (the current version does not support specifying specific TAGS columns.) The number of columns in the TAGS parameter needs to be the same as the number of TAGS requested in the SQL command.
|
||||||
|
@ -358,6 +371,14 @@ The specific functions related to the interface are as follows (see also the [pr
|
||||||
|
|
||||||
Execute the prepared statement. Currently, a statement can only be executed once.
|
Execute the prepared statement. Currently, a statement can only be executed once.
|
||||||
|
|
||||||
|
- `int taos_stmt_affected_rows(TAOS_STMT *stmt)`
|
||||||
|
|
||||||
|
Gets the number of rows affected by executing bind statements multiple times.
|
||||||
|
|
||||||
|
- `int taos_stmt_affected_rows_once(TAOS_STMT *stmt)`
|
||||||
|
|
||||||
|
Gets the number of rows affected by executing a bind statement once.
|
||||||
|
|
||||||
- `TAOS_RES* taos_stmt_use_result(TAOS_STMT *stmt)`
|
- `TAOS_RES* taos_stmt_use_result(TAOS_STMT *stmt)`
|
||||||
|
|
||||||
Gets the result set of a statement. Use the result set in the same way as in the non-parametric call. When finished, `taos_free_result()` should be called on this result set to free resources.
|
Gets the result set of a statement. Use the result set in the same way as in the non-parametric call. When finished, `taos_free_result()` should be called on this result set to free resources.
|
||||||
|
|
|
@ -36,6 +36,7 @@ REST connection supports all platforms that can run Java.
|
||||||
|
|
||||||
| taos-jdbcdriver version | major changes | TDengine version |
|
| taos-jdbcdriver version | major changes | TDengine version |
|
||||||
| :---------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------: | :--------------: |
|
| :---------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------: | :--------------: |
|
||||||
|
| 3.2.5 | Subscription add committed() and assignment() method | 3.1.0.3 or later |
|
||||||
| 3.2.4 | Subscription add the enable.auto.commit parameter and the unsubscribe() method in the WebSocket connection | - |
|
| 3.2.4 | Subscription add the enable.auto.commit parameter and the unsubscribe() method in the WebSocket connection | - |
|
||||||
| 3.2.3 | Fixed resultSet data parsing failure in some cases | - |
|
| 3.2.3 | Fixed resultSet data parsing failure in some cases | - |
|
||||||
| 3.2.2 | Subscription add seek function | 3.0.5.0 or later |
|
| 3.2.2 | Subscription add seek function | 3.0.5.0 or later |
|
||||||
|
@ -1019,14 +1020,19 @@ while(true) {
|
||||||
#### Assignment subscription Offset
|
#### Assignment subscription Offset
|
||||||
|
|
||||||
```java
|
```java
|
||||||
|
// get topicPartition
|
||||||
|
Set<TopicPartition> assignment() throws SQLException;
|
||||||
// get offset
|
// get offset
|
||||||
long position(TopicPartition partition) throws SQLException;
|
long position(TopicPartition partition) throws SQLException;
|
||||||
Map<TopicPartition, Long> position(String topic) throws SQLException;
|
Map<TopicPartition, Long> position(String topic) throws SQLException;
|
||||||
Map<TopicPartition, Long> beginningOffsets(String topic) throws SQLException;
|
Map<TopicPartition, Long> beginningOffsets(String topic) throws SQLException;
|
||||||
Map<TopicPartition, Long> endOffsets(String topic) throws SQLException;
|
Map<TopicPartition, Long> endOffsets(String topic) throws SQLException;
|
||||||
|
Map<TopicPartition, OffsetAndMetadata> committed(Set<TopicPartition> partitions) throws SQLException;
|
||||||
|
|
||||||
// Overrides the fetch offsets that the consumer will use on the next poll(timeout).
|
// Overrides the fetch offsets that the consumer will use on the next poll(timeout).
|
||||||
void seek(TopicPartition partition, long offset) throws SQLException;
|
void seek(TopicPartition partition, long offset) throws SQLException;
|
||||||
|
void seekToBeginning(Collection<TopicPartition> partitions) throws SQLException;
|
||||||
|
void seekToEnd(Collection<TopicPartition> partitions) throws SQLException;
|
||||||
```
|
```
|
||||||
|
|
||||||
Example usage is as follows.
|
Example usage is as follows.
|
||||||
|
@ -1052,6 +1058,18 @@ try (TaosConsumer<ResultBean> consumer = new TaosConsumer<>(properties)) {
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
#### Commit offset
|
||||||
|
|
||||||
|
If `enable.auto.commit` is false, offset can be submitted manually.
|
||||||
|
|
||||||
|
```java
|
||||||
|
void commitSync() throws SQLException;
|
||||||
|
void commitSync(Map<TopicPartition, OffsetAndMetadata> offsets) throws SQLException;
|
||||||
|
// async commit only support jni connection
|
||||||
|
void commitAsync(OffsetCommitCallback<V> callback) throws SQLException;
|
||||||
|
void commitAsync(Map<TopicPartition, OffsetAndMetadata> offsets, OffsetCommitCallback<V> callback) throws SQLException;
|
||||||
|
```
|
||||||
|
|
||||||
#### Close subscriptions
|
#### Close subscriptions
|
||||||
|
|
||||||
```java
|
```java
|
||||||
|
|
|
@ -30,6 +30,10 @@ The source code of `TDengine.Connector` is hosted on [GitHub](https://github.com
|
||||||
|
|
||||||
The supported platforms are the same as those supported by the TDengine client driver.
|
The supported platforms are the same as those supported by the TDengine client driver.
|
||||||
|
|
||||||
|
:::note
|
||||||
|
Please note TDengine does not support 32bit Windows any more.
|
||||||
|
:::
|
||||||
|
|
||||||
## Version support
|
## Version support
|
||||||
|
|
||||||
Please refer to [version support list](/reference/connector#version-support)
|
Please refer to [version support list](/reference/connector#version-support)
|
||||||
|
|
|
@ -102,6 +102,8 @@ Usage: taosdump [OPTION...] dbname [tbname ...]
|
||||||
-L, --loose-mode Use loose mode if the table name and column name
|
-L, --loose-mode Use loose mode if the table name and column name
|
||||||
use letter and number only. Default is NOT.
|
use letter and number only. Default is NOT.
|
||||||
-n, --no-escape No escape char '`'. Default is using it.
|
-n, --no-escape No escape char '`'. Default is using it.
|
||||||
|
-Q, --dot-replace Repalce dot character with underline character in
|
||||||
|
the table name.
|
||||||
-T, --thread-num=THREAD_NUM Number of thread for dump in file. Default is
|
-T, --thread-num=THREAD_NUM Number of thread for dump in file. Default is
|
||||||
8.
|
8.
|
||||||
-C, --cloud=CLOUD_DSN specify a DSN to access TDengine cloud service
|
-C, --cloud=CLOUD_DSN specify a DSN to access TDengine cloud service
|
||||||
|
|
|
@ -74,7 +74,7 @@ grafana-cli plugins install tdengine-datasource
|
||||||
sudo -u grafana grafana-cli plugins install tdengine-datasource
|
sudo -u grafana grafana-cli plugins install tdengine-datasource
|
||||||
```
|
```
|
||||||
|
|
||||||
You can also download zip files from [GitHub](https://github.com/taosdata/grafanaplugin/releases/tag/latest) or [Grafana](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation) and install manually. The commands are as follows:
|
You can also download zip files from [GitHub](https://github.com/taosdata/grafanaplugin/releases/latest) or [Grafana](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation) and install manually. The commands are as follows:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
GF_VERSION=3.3.1
|
GF_VERSION=3.3.1
|
||||||
|
|
|
@ -10,6 +10,10 @@ For TDengine 2.x installation packages by version, please visit [here](https://t
|
||||||
|
|
||||||
import Release from "/components/ReleaseV3";
|
import Release from "/components/ReleaseV3";
|
||||||
|
|
||||||
|
## 3.1.0.3
|
||||||
|
|
||||||
|
<Release type="tdengine" version="3.1.0.3" />
|
||||||
|
|
||||||
## 3.1.0.2
|
## 3.1.0.2
|
||||||
|
|
||||||
<Release type="tdengine" version="3.1.0.2" />
|
<Release type="tdengine" version="3.1.0.2" />
|
||||||
|
|
|
@ -78,6 +78,7 @@ int printRow(char *str, TAOS_ROW row, TAOS_FIELD *fields, int numFields) {
|
||||||
} break;
|
} break;
|
||||||
|
|
||||||
case TSDB_DATA_TYPE_BINARY:
|
case TSDB_DATA_TYPE_BINARY:
|
||||||
|
case TSDB_DATA_TYPE_VARBINARY:
|
||||||
case TSDB_DATA_TYPE_NCHAR:
|
case TSDB_DATA_TYPE_NCHAR:
|
||||||
case TSDB_DATA_TYPE_GEOMETRY: {
|
case TSDB_DATA_TYPE_GEOMETRY: {
|
||||||
int32_t charLen = varDataLen((char *)row[i] - VARSTR_HEADER_SIZE);
|
int32_t charLen = varDataLen((char *)row[i] - VARSTR_HEADER_SIZE);
|
||||||
|
|
|
@ -51,7 +51,7 @@ void insertData(TAOS *taos) {
|
||||||
int code = taos_stmt_prepare(stmt, sql, 0);
|
int code = taos_stmt_prepare(stmt, sql, 0);
|
||||||
checkErrorCode(stmt, code, "failed to execute taos_stmt_prepare");
|
checkErrorCode(stmt, code, "failed to execute taos_stmt_prepare");
|
||||||
// bind table name and tags
|
// bind table name and tags
|
||||||
TAOS_BIND tags[2];
|
TAOS_MULTI_BIND tags[2];
|
||||||
char *location = "California.SanFrancisco";
|
char *location = "California.SanFrancisco";
|
||||||
int groupId = 2;
|
int groupId = 2;
|
||||||
tags[0].buffer_type = TSDB_DATA_TYPE_BINARY;
|
tags[0].buffer_type = TSDB_DATA_TYPE_BINARY;
|
||||||
|
@ -144,4 +144,4 @@ int main() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// output:
|
// output:
|
||||||
// successfully inserted 2 rows
|
// successfully inserted 2 rows
|
||||||
|
|
|
@ -76,6 +76,7 @@ int printRow(char *str, TAOS_ROW row, TAOS_FIELD *fields, int numFields) {
|
||||||
} break;
|
} break;
|
||||||
|
|
||||||
case TSDB_DATA_TYPE_BINARY:
|
case TSDB_DATA_TYPE_BINARY:
|
||||||
|
case TSDB_DATA_TYPE_VARBINARY:
|
||||||
case TSDB_DATA_TYPE_NCHAR:
|
case TSDB_DATA_TYPE_NCHAR:
|
||||||
case TSDB_DATA_TYPE_GEOMETRY: {
|
case TSDB_DATA_TYPE_GEOMETRY: {
|
||||||
int32_t charLen = varDataLen((char *)row[i] - VARSTR_HEADER_SIZE);
|
int32_t charLen = varDataLen((char *)row[i] - VARSTR_HEADER_SIZE);
|
||||||
|
|
|
@ -58,7 +58,7 @@ void insertData(TAOS *taos) {
|
||||||
int code = taos_stmt_prepare(stmt, sql, 0);
|
int code = taos_stmt_prepare(stmt, sql, 0);
|
||||||
checkErrorCode(stmt, code, "failed to execute taos_stmt_prepare");
|
checkErrorCode(stmt, code, "failed to execute taos_stmt_prepare");
|
||||||
// bind table name and tags
|
// bind table name and tags
|
||||||
TAOS_BIND tags[2];
|
TAOS_MULTI_BIND tags[2];
|
||||||
char* location = "California.SanFrancisco";
|
char* location = "California.SanFrancisco";
|
||||||
int groupId = 2;
|
int groupId = 2;
|
||||||
tags[0].buffer_type = TSDB_DATA_TYPE_BINARY;
|
tags[0].buffer_type = TSDB_DATA_TYPE_BINARY;
|
||||||
|
@ -82,7 +82,7 @@ void insertData(TAOS *taos) {
|
||||||
{1648432611749, 12.6, 218, 0.33},
|
{1648432611749, 12.6, 218, 0.33},
|
||||||
};
|
};
|
||||||
|
|
||||||
TAOS_BIND values[4];
|
TAOS_MULTI_BIND values[4];
|
||||||
values[0].buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
|
values[0].buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
|
||||||
values[0].buffer_length = sizeof(int64_t);
|
values[0].buffer_length = sizeof(int64_t);
|
||||||
values[0].length = &values[0].buffer_length;
|
values[0].length = &values[0].buffer_length;
|
||||||
|
@ -138,4 +138,4 @@ int main() {
|
||||||
|
|
||||||
|
|
||||||
// output:
|
// output:
|
||||||
// successfully inserted 2 rows
|
// successfully inserted 2 rows
|
||||||
|
|
|
@ -6,7 +6,14 @@ toc_max_heading_level: 2
|
||||||
|
|
||||||
TDengine 是一款开源、高性能、云原生的[时序数据库](https://tdengine.com/tsdb/),且针对物联网、车联网、工业互联网、金融、IT 运维等场景进行了优化。TDengine 的代码,包括集群功能,都在 GNU AGPL v3.0 下开源。除核心的时序数据库功能外,TDengine 还提供[缓存](../develop/cache/)、[数据订阅](../develop/tmq)、[流式计算](../develop/stream)等其它功能以降低系统复杂度及研发和运维成本。
|
TDengine 是一款开源、高性能、云原生的[时序数据库](https://tdengine.com/tsdb/),且针对物联网、车联网、工业互联网、金融、IT 运维等场景进行了优化。TDengine 的代码,包括集群功能,都在 GNU AGPL v3.0 下开源。除核心的时序数据库功能外,TDengine 还提供[缓存](../develop/cache/)、[数据订阅](../develop/tmq)、[流式计算](../develop/stream)等其它功能以降低系统复杂度及研发和运维成本。
|
||||||
|
|
||||||
本章节介绍 TDengine 的主要功能、竞争优势、适用场景、与其他数据库的对比测试等等,让大家对 TDengine 有个整体的了解。
|
本章节介绍 TDengine 的主要产品和功能、竞争优势、适用场景、与其他数据库的对比测试等等,让大家对 TDengine 有个整体的了解。
|
||||||
|
|
||||||
|
## 主要产品
|
||||||
|
|
||||||
|
TDengine 有三个主要产品:TDengine Pro (即 TDengine 企业版),TDengine Cloud,和 TDengine OSS,关于它们的具体定义请参考
|
||||||
|
- [TDengine 企业版](https://www.taosdata.com/tdengine-pro)
|
||||||
|
- [TDengine 云服务](https://cloud.taosdata.com/?utm_source=menu&utm_medium=webcn)
|
||||||
|
- [TDengine 开源版](https://www.taosdata.com/tdengine-oss)
|
||||||
|
|
||||||
## 主要功能
|
## 主要功能
|
||||||
|
|
||||||
|
|
|
@ -115,7 +115,19 @@ Set<String> subscription() throws SQLException;
|
||||||
|
|
||||||
ConsumerRecords<V> poll(Duration timeout) throws SQLException;
|
ConsumerRecords<V> poll(Duration timeout) throws SQLException;
|
||||||
|
|
||||||
|
Set<TopicPartition> assignment() throws SQLException;
|
||||||
|
long position(TopicPartition partition) throws SQLException;
|
||||||
|
Map<TopicPartition, Long> position(String topic) throws SQLException;
|
||||||
|
Map<TopicPartition, Long> beginningOffsets(String topic) throws SQLException;
|
||||||
|
Map<TopicPartition, Long> endOffsets(String topic) throws SQLException;
|
||||||
|
Map<TopicPartition, OffsetAndMetadata> committed(Set<TopicPartition> partitions) throws SQLException;
|
||||||
|
|
||||||
|
void seek(TopicPartition partition, long offset) throws SQLException;
|
||||||
|
void seekToBeginning(Collection<TopicPartition> partitions) throws SQLException;
|
||||||
|
void seekToEnd(Collection<TopicPartition> partitions) throws SQLException;
|
||||||
|
|
||||||
void commitSync() throws SQLException;
|
void commitSync() throws SQLException;
|
||||||
|
void commitSync(Map<TopicPartition, OffsetAndMetadata> offsets) throws SQLException;
|
||||||
|
|
||||||
void close() throws SQLException;
|
void close() throws SQLException;
|
||||||
```
|
```
|
||||||
|
|
|
@ -256,6 +256,12 @@ int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields)
|
||||||
|
|
||||||
:::
|
:::
|
||||||
|
|
||||||
|
- `TAOS *taos_connect_auth(const char *host, const char *user, const char *auth, const char *db, uint16_t port)`
|
||||||
|
|
||||||
|
功能同 taos_connect。除 pass 参数替换为 auth 外,其他参数同 taos_connect。
|
||||||
|
|
||||||
|
- auth: 原始密码取 32 位小写 md5
|
||||||
|
|
||||||
- `char *taos_get_server_info(TAOS *taos)`
|
- `char *taos_get_server_info(TAOS *taos)`
|
||||||
|
|
||||||
获取服务端版本信息。
|
获取服务端版本信息。
|
||||||
|
@ -272,6 +278,14 @@ int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields)
|
||||||
- 如果,len 小于 存储db需要的空间(包含最后的'\0'),返回错误,database里赋值截断的数据,以'\0'结尾。
|
- 如果,len 小于 存储db需要的空间(包含最后的'\0'),返回错误,database里赋值截断的数据,以'\0'结尾。
|
||||||
- 如果,len 大于等于 存储db需要的空间(包含最后的'\0'),返回正常0,database里赋值以'\0‘结尾的db名。
|
- 如果,len 大于等于 存储db需要的空间(包含最后的'\0'),返回正常0,database里赋值以'\0‘结尾的db名。
|
||||||
|
|
||||||
|
- `int taos_set_notify_cb(TAOS *taos, __taos_notify_fn_t fp, void *param, int type)`
|
||||||
|
|
||||||
|
设置事件回调函数。
|
||||||
|
|
||||||
|
- fp 事件回调函数指针。函数声明:typedef void (*__taos_notify_fn_t)(void *param, void *ext, int type);其中, param 为用户自定义参数,ext 为扩展参数(依赖事件类型,针对 TAOS_NOTIFY_PASSVER 返回用户密码版本),type 为事件类型
|
||||||
|
- param 用户自定义参数
|
||||||
|
- type 事件类型。取值范围:1)TAOS_NOTIFY_PASSVER: 用户密码改变
|
||||||
|
|
||||||
- `void taos_close(TAOS *taos)`
|
- `void taos_close(TAOS *taos)`
|
||||||
|
|
||||||
关闭连接,其中`taos`是 `taos_connect()` 返回的句柄。
|
关闭连接,其中`taos`是 `taos_connect()` 返回的句柄。
|
||||||
|
@ -396,21 +410,20 @@ TDengine 的异步 API 均采用非阻塞调用模式。应用程序可以用多
|
||||||
|
|
||||||
解析一条 SQL 语句,将解析结果和参数信息绑定到 stmt 上,如果参数 length 大于 0,将使用此参数作为 SQL 语句的长度,如等于 0,将自动判断 SQL 语句的长度。
|
解析一条 SQL 语句,将解析结果和参数信息绑定到 stmt 上,如果参数 length 大于 0,将使用此参数作为 SQL 语句的长度,如等于 0,将自动判断 SQL 语句的长度。
|
||||||
|
|
||||||
- `int taos_stmt_bind_param(TAOS_STMT *stmt, TAOS_BIND *bind)`
|
- `int taos_stmt_bind_param(TAOS_STMT *stmt, TAOS_MULTI_BIND *bind)`
|
||||||
|
|
||||||
不如 `taos_stmt_bind_param_batch()` 效率高,但可以支持非 INSERT 类型的 SQL 语句。
|
不如 `taos_stmt_bind_param_batch()` 效率高,但可以支持非 INSERT 类型的 SQL 语句。
|
||||||
进行参数绑定,bind 指向一个数组(代表所要绑定的一行数据),需保证此数组中的元素数量和顺序与 SQL 语句中的参数完全一致。TAOS_BIND 的使用方法与 MySQL 中的 MYSQL_BIND 类似,具体定义如下:
|
进行参数绑定,bind 指向一个数组(代表所要绑定的一行数据),需保证此数组中的元素数量和顺序与 SQL 语句中的参数完全一致。TAOS_MULTI_BIND 的使用方法与 MySQL 中的 MYSQL_BIND 类似,具体定义如下:
|
||||||
|
|
||||||
```c
|
```c
|
||||||
typedef struct TAOS_BIND {
|
typedef struct TAOS_MULTI_BIND {
|
||||||
int buffer_type;
|
int buffer_type;
|
||||||
void * buffer;
|
void *buffer;
|
||||||
uintptr_t buffer_length; // not in use
|
uintptr_t buffer_length;
|
||||||
uintptr_t * length;
|
uint32_t *length;
|
||||||
int * is_null;
|
char *is_null;
|
||||||
int is_unsigned; // not in use
|
int num; // the number of columns
|
||||||
int * error; // not in use
|
} TAOS_MULTI_BIND;
|
||||||
} TAOS_BIND;
|
|
||||||
```
|
```
|
||||||
|
|
||||||
- `int taos_stmt_set_tbname(TAOS_STMT* stmt, const char* name)`
|
- `int taos_stmt_set_tbname(TAOS_STMT* stmt, const char* name)`
|
||||||
|
@ -418,7 +431,7 @@ TDengine 的异步 API 均采用非阻塞调用模式。应用程序可以用多
|
||||||
(2.1.1.0 版本新增,仅支持用于替换 INSERT 语句中的参数值)
|
(2.1.1.0 版本新增,仅支持用于替换 INSERT 语句中的参数值)
|
||||||
当 SQL 语句中的表名使用了 `?` 占位时,可以使用此函数绑定一个具体的表名。
|
当 SQL 语句中的表名使用了 `?` 占位时,可以使用此函数绑定一个具体的表名。
|
||||||
|
|
||||||
- `int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags)`
|
- `int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_MULTI_BIND* tags)`
|
||||||
|
|
||||||
(2.1.2.0 版本新增,仅支持用于替换 INSERT 语句中的参数值)
|
(2.1.2.0 版本新增,仅支持用于替换 INSERT 语句中的参数值)
|
||||||
当 SQL 语句中的表名和 TAGS 都使用了 `?` 占位时,可以使用此函数绑定具体的表名和具体的 TAGS 取值。最典型的使用场景是使用了自动建表功能的 INSERT 语句(目前版本不支持指定具体的 TAGS 列)。TAGS 参数中的列数量需要与 SQL 语句中要求的 TAGS 数量完全一致。
|
当 SQL 语句中的表名和 TAGS 都使用了 `?` 占位时,可以使用此函数绑定具体的表名和具体的 TAGS 取值。最典型的使用场景是使用了自动建表功能的 INSERT 语句(目前版本不支持指定具体的 TAGS 列)。TAGS 参数中的列数量需要与 SQL 语句中要求的 TAGS 数量完全一致。
|
||||||
|
@ -428,17 +441,6 @@ TDengine 的异步 API 均采用非阻塞调用模式。应用程序可以用多
|
||||||
(2.1.1.0 版本新增,仅支持用于替换 INSERT 语句中的参数值)
|
(2.1.1.0 版本新增,仅支持用于替换 INSERT 语句中的参数值)
|
||||||
以多列的方式传递待绑定的数据,需要保证这里传递的数据列的顺序、列的数量与 SQL 语句中的 VALUES 参数完全一致。TAOS_MULTI_BIND 的具体定义如下:
|
以多列的方式传递待绑定的数据,需要保证这里传递的数据列的顺序、列的数量与 SQL 语句中的 VALUES 参数完全一致。TAOS_MULTI_BIND 的具体定义如下:
|
||||||
|
|
||||||
```c
|
|
||||||
typedef struct TAOS_MULTI_BIND {
|
|
||||||
int buffer_type;
|
|
||||||
void * buffer;
|
|
||||||
uintptr_t buffer_length;
|
|
||||||
uintptr_t * length;
|
|
||||||
char * is_null;
|
|
||||||
int num; // the number of columns
|
|
||||||
} TAOS_MULTI_BIND;
|
|
||||||
```
|
|
||||||
|
|
||||||
- `int taos_stmt_add_batch(TAOS_STMT *stmt)`
|
- `int taos_stmt_add_batch(TAOS_STMT *stmt)`
|
||||||
|
|
||||||
将当前绑定的参数加入批处理中,调用此函数后,可以再次调用 `taos_stmt_bind_param()` 或 `taos_stmt_bind_param_batch()` 绑定新的参数。需要注意,此函数仅支持 INSERT/IMPORT 语句,如果是 SELECT 等其他 SQL 语句,将返回错误。
|
将当前绑定的参数加入批处理中,调用此函数后,可以再次调用 `taos_stmt_bind_param()` 或 `taos_stmt_bind_param_batch()` 绑定新的参数。需要注意,此函数仅支持 INSERT/IMPORT 语句,如果是 SELECT 等其他 SQL 语句,将返回错误。
|
||||||
|
@ -447,6 +449,14 @@ TDengine 的异步 API 均采用非阻塞调用模式。应用程序可以用多
|
||||||
|
|
||||||
执行准备好的语句。目前,一条语句只能执行一次。
|
执行准备好的语句。目前,一条语句只能执行一次。
|
||||||
|
|
||||||
|
- `int taos_stmt_affected_rows(TAOS_STMT *stmt)`
|
||||||
|
|
||||||
|
获取执行多次绑定语句影响的行数。
|
||||||
|
|
||||||
|
- `int taos_stmt_affected_rows_once(TAOS_STMT *stmt)`
|
||||||
|
|
||||||
|
获取执行一次绑定语句影响的行数。
|
||||||
|
|
||||||
- `TAOS_RES* taos_stmt_use_result(TAOS_STMT *stmt)`
|
- `TAOS_RES* taos_stmt_use_result(TAOS_STMT *stmt)`
|
||||||
|
|
||||||
获取语句的结果集。结果集的使用方式与非参数化调用时一致,使用完成后,应对此结果集调用 `taos_free_result()` 以释放资源。
|
获取语句的结果集。结果集的使用方式与非参数化调用时一致,使用完成后,应对此结果集调用 `taos_free_result()` 以释放资源。
|
||||||
|
|
|
@ -36,6 +36,7 @@ REST 连接支持所有能运行 Java 的平台。
|
||||||
|
|
||||||
| taos-jdbcdriver 版本 | 主要变化 | TDengine 版本 |
|
| taos-jdbcdriver 版本 | 主要变化 | TDengine 版本 |
|
||||||
| :------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------: |
|
| :------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------: |
|
||||||
|
| 3.2.5 | 数据订阅增加 committed()、assignment() 方法 | 3.1.0.3 及更高版本 |
|
||||||
| 3.2.4 | 数据订阅在 WebSocket 连接下增加 enable.auto.commit 参数,以及 unsubscribe() 方法。 | - |
|
| 3.2.4 | 数据订阅在 WebSocket 连接下增加 enable.auto.commit 参数,以及 unsubscribe() 方法。 | - |
|
||||||
| 3.2.3 | 修复 ResultSet 在一些情况数据解析失败 | - |
|
| 3.2.3 | 修复 ResultSet 在一些情况数据解析失败 | - |
|
||||||
| 3.2.2 | 新增功能:数据订阅支持 seek 功能。 | 3.0.5.0 及更高版本 |
|
| 3.2.2 | 新增功能:数据订阅支持 seek 功能。 | 3.0.5.0 及更高版本 |
|
||||||
|
@ -1022,14 +1023,19 @@ while(true) {
|
||||||
#### 指定订阅 Offset
|
#### 指定订阅 Offset
|
||||||
|
|
||||||
```java
|
```java
|
||||||
|
// 获取订阅的 topicPartition
|
||||||
|
Set<TopicPartition> assignment() throws SQLException;
|
||||||
// 获取 offset
|
// 获取 offset
|
||||||
long position(TopicPartition partition) throws SQLException;
|
long position(TopicPartition partition) throws SQLException;
|
||||||
Map<TopicPartition, Long> position(String topic) throws SQLException;
|
Map<TopicPartition, Long> position(String topic) throws SQLException;
|
||||||
Map<TopicPartition, Long> beginningOffsets(String topic) throws SQLException;
|
Map<TopicPartition, Long> beginningOffsets(String topic) throws SQLException;
|
||||||
Map<TopicPartition, Long> endOffsets(String topic) throws SQLException;
|
Map<TopicPartition, Long> endOffsets(String topic) throws SQLException;
|
||||||
|
Map<TopicPartition, OffsetAndMetadata> committed(Set<TopicPartition> partitions) throws SQLException;
|
||||||
|
|
||||||
// 指定下一次 poll 中使用的 offset
|
// 指定下一次 poll 中使用的 offset
|
||||||
void seek(TopicPartition partition, long offset) throws SQLException;
|
void seek(TopicPartition partition, long offset) throws SQLException;
|
||||||
|
void seekToBeginning(Collection<TopicPartition> partitions) throws SQLException;
|
||||||
|
void seekToEnd(Collection<TopicPartition> partitions) throws SQLException;
|
||||||
```
|
```
|
||||||
|
|
||||||
示例代码:
|
示例代码:
|
||||||
|
@ -1055,6 +1061,18 @@ try (TaosConsumer<ResultBean> consumer = new TaosConsumer<>(properties)) {
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
#### 提交 Offset
|
||||||
|
|
||||||
|
当`enable.auto.commit`为 false 时,可以手动提交 offset。
|
||||||
|
|
||||||
|
```java
|
||||||
|
void commitSync() throws SQLException;
|
||||||
|
void commitSync(Map<TopicPartition, OffsetAndMetadata> offsets) throws SQLException;
|
||||||
|
// 异步提交仅在 native 连接下有效
|
||||||
|
void commitAsync(OffsetCommitCallback<V> callback) throws SQLException;
|
||||||
|
void commitAsync(Map<TopicPartition, OffsetAndMetadata> offsets, OffsetCommitCallback<V> callback) throws SQLException;
|
||||||
|
```
|
||||||
|
|
||||||
#### 关闭订阅
|
#### 关闭订阅
|
||||||
|
|
||||||
```java
|
```java
|
||||||
|
|
|
@ -29,6 +29,10 @@ import CSAsyncQuery from "../07-develop/04-query-data/_cs_async.mdx"
|
||||||
|
|
||||||
支持的平台和 TDengine 客户端驱动支持的平台一致。
|
支持的平台和 TDengine 客户端驱动支持的平台一致。
|
||||||
|
|
||||||
|
:::note
|
||||||
|
注意 TDengine 不再支持 32 位 Windows 平台。
|
||||||
|
:::
|
||||||
|
|
||||||
## 版本支持
|
## 版本支持
|
||||||
|
|
||||||
请参考[版本支持列表](../#版本支持)
|
请参考[版本支持列表](../#版本支持)
|
||||||
|
|
|
@ -143,6 +143,7 @@ phpize && ./configure --enable-swoole && make -j && make install
|
||||||
| `TDengine\TSDB_DATA_TYPE_FLOAT` | float |
|
| `TDengine\TSDB_DATA_TYPE_FLOAT` | float |
|
||||||
| `TDengine\TSDB_DATA_TYPE_DOUBLE` | double |
|
| `TDengine\TSDB_DATA_TYPE_DOUBLE` | double |
|
||||||
| `TDengine\TSDB_DATA_TYPE_BINARY` | binary |
|
| `TDengine\TSDB_DATA_TYPE_BINARY` | binary |
|
||||||
|
| `TDengine\TSDB_DATA_TYPE_VARBINARY` | varbinary |
|
||||||
| `TDengine\TSDB_DATA_TYPE_TIMESTAMP` | timestamp |
|
| `TDengine\TSDB_DATA_TYPE_TIMESTAMP` | timestamp |
|
||||||
| `TDengine\TSDB_DATA_TYPE_NCHAR` | nchar |
|
| `TDengine\TSDB_DATA_TYPE_NCHAR` | nchar |
|
||||||
| `TDengine\TSDB_DATA_TYPE_UTINYINT` | utinyint |
|
| `TDengine\TSDB_DATA_TYPE_UTINYINT` | utinyint |
|
||||||
|
|
|
@ -7,9 +7,9 @@ description: 查询数据的详细语法
|
||||||
## 查询语法
|
## 查询语法
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT {DATABASE() | CLIENT_VERSION() | SERVER_VERSION() | SERVER_STATUS() | NOW() | TODAY() | TIMEZONE()}
|
SELECT {DATABASE() | CLIENT_VERSION() | SERVER_VERSION() | SERVER_STATUS() | NOW() | TODAY() | TIMEZONE() | CURRENT_USER() | USER() }
|
||||||
|
|
||||||
SELECT [DISTINCT] select_list
|
SELECT [hints] [DISTINCT] [TAGS] select_list
|
||||||
from_clause
|
from_clause
|
||||||
[WHERE condition]
|
[WHERE condition]
|
||||||
[partition_by_clause]
|
[partition_by_clause]
|
||||||
|
@ -21,6 +21,11 @@ SELECT [DISTINCT] select_list
|
||||||
[LIMIT limit_val [OFFSET offset_val]]
|
[LIMIT limit_val [OFFSET offset_val]]
|
||||||
[>> export_file]
|
[>> export_file]
|
||||||
|
|
||||||
|
hints: /*+ [hint([hint_param_list])] [hint([hint_param_list])] */
|
||||||
|
|
||||||
|
hint:
|
||||||
|
BATCH_SCAN | NO_BATCH_SCAN
|
||||||
|
|
||||||
select_list:
|
select_list:
|
||||||
select_expr [, select_expr] ...
|
select_expr [, select_expr] ...
|
||||||
|
|
||||||
|
@ -70,6 +75,29 @@ order_expr:
|
||||||
{expr | position | c_alias} [DESC | ASC] [NULLS FIRST | NULLS LAST]
|
{expr | position | c_alias} [DESC | ASC] [NULLS FIRST | NULLS LAST]
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Hints
|
||||||
|
|
||||||
|
Hints 是用户控制单个语句查询优化的一种手段,当 Hint 不适用于当前的查询语句时会被自动忽略,具体说明如下:
|
||||||
|
|
||||||
|
- Hints 语法以`/*+`开始,终于`*/`,前后可有空格。
|
||||||
|
- Hints 语法只能跟随在 SELECT 关键字后。
|
||||||
|
- 每个 Hints 可以包含多个 Hint,Hint 间以空格分开,当多个 Hint 冲突或相同时以先出现的为准。
|
||||||
|
- 当 Hints 中某个 Hint 出现错误时,错误出现之前的有效 Hint 仍然有效,当前及之后的 Hint 被忽略。
|
||||||
|
- hint_param_list 是每个 Hint 的参数,根据每个 Hint 的不同而不同。
|
||||||
|
|
||||||
|
目前支持的 Hints 列表如下:
|
||||||
|
|
||||||
|
| **Hint** | **参数** | **说明** | **适用范围** |
|
||||||
|
| :-----------: | -------------- | -------------------------- | -------------------------- |
|
||||||
|
| BATCH_SCAN | 无 | 采用批量读表的方式 | 超级表 JOIN 语句 |
|
||||||
|
| NO_BATCH_SCAN | 无 | 采用顺序读表的方式 | 超级表 JOIN 语句 |
|
||||||
|
|
||||||
|
举例:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT /*+ BATCH_SCAN() */ a.ts FROM stable1 a, stable2 b where a.tag0 = b.tag0 and a.ts = b.ts;
|
||||||
|
```
|
||||||
|
|
||||||
## 列表
|
## 列表
|
||||||
|
|
||||||
查询语句可以指定部分或全部列作为返回结果。数据列和标签列都可以出现在列表中。
|
查询语句可以指定部分或全部列作为返回结果。数据列和标签列都可以出现在列表中。
|
||||||
|
@ -132,6 +160,16 @@ SELECT DISTINCT col_name [, col_name ...] FROM tb_name;
|
||||||
|
|
||||||
:::
|
:::
|
||||||
|
|
||||||
|
### 标签查询
|
||||||
|
|
||||||
|
当查询的列只有标签列时,`TAGS` 关键字可以指定返回所有子表的标签列。每个子表只返回一行标签列。
|
||||||
|
|
||||||
|
返回所有子表的标签列:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT TAGS tag_name [, tag_name ...] FROM stb_name
|
||||||
|
```
|
||||||
|
|
||||||
### 结果集列名
|
### 结果集列名
|
||||||
|
|
||||||
`SELECT`子句中,如果不指定返回结果集合的列名,结果集列名称默认使用`SELECT`子句中的表达式名称作为列名称。此外,用户可使用`AS`来重命名返回结果集合中列的名称。例如:
|
`SELECT`子句中,如果不指定返回结果集合的列名,结果集列名称默认使用`SELECT`子句中的表达式名称作为列名称。此外,用户可使用`AS`来重命名返回结果集合中列的名称。例如:
|
||||||
|
@ -167,7 +205,7 @@ SELECT table_name, tag_name, tag_type, tag_value FROM information_schema.ins_tag
|
||||||
SELECT COUNT(*) FROM (SELECT DISTINCT TBNAME FROM meters);
|
SELECT COUNT(*) FROM (SELECT DISTINCT TBNAME FROM meters);
|
||||||
```
|
```
|
||||||
|
|
||||||
以上两个查询均只支持在 WHERE 条件子句中添加针对标签(TAGS)的过滤条件。例如:
|
以上两个查询均只支持在 WHERE 条件子句中添加针对标签(TAGS)的过滤条件。
|
||||||
|
|
||||||
**\_QSTART/\_QEND**
|
**\_QSTART/\_QEND**
|
||||||
|
|
||||||
|
@ -209,8 +247,7 @@ TDengine 支持基于时间戳主键的 INNER JOIN,规则如下:
|
||||||
3. 对于超级表,ON 条件在时间戳主键的等值条件之外,还要求有可以一一对应的标签列等值条件,不支持 OR 条件。
|
3. 对于超级表,ON 条件在时间戳主键的等值条件之外,还要求有可以一一对应的标签列等值条件,不支持 OR 条件。
|
||||||
4. 参与 JOIN 计算的表只能是同一种类型,即只能都是超级表,或都是子表,或都是普通表。
|
4. 参与 JOIN 计算的表只能是同一种类型,即只能都是超级表,或都是子表,或都是普通表。
|
||||||
5. JOIN 两侧均支持子查询。
|
5. JOIN 两侧均支持子查询。
|
||||||
6. 参与 JOIN 的表个数上限为 10 个。
|
6. 不支持与 FILL 子句混合使用。
|
||||||
7. 不支持与 FILL 子句混合使用。
|
|
||||||
|
|
||||||
## GROUP BY
|
## GROUP BY
|
||||||
|
|
||||||
|
@ -301,6 +338,12 @@ SELECT TODAY();
|
||||||
SELECT TIMEZONE();
|
SELECT TIMEZONE();
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### 获取当前用户
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT CURRENT_USER();
|
||||||
|
```
|
||||||
|
|
||||||
## 正则表达式过滤
|
## 正则表达式过滤
|
||||||
|
|
||||||
### 语法
|
### 语法
|
||||||
|
@ -354,7 +397,7 @@ SELECT AVG(CASE WHEN voltage < 200 or voltage > 250 THEN 220 ELSE voltage END) F
|
||||||
|
|
||||||
## JOIN 子句
|
## JOIN 子句
|
||||||
|
|
||||||
TDengine 支持基于时间戳主键的内连接,即 JOIN 条件必须包含时间戳主键。只要满足基于时间戳主键这个要求,普通表、子表、超级表和子查询之间可以随意的进行内连接,且对表个数没有限制。
|
TDengine 支持基于时间戳主键的内连接,即 JOIN 条件必须包含时间戳主键。只要满足基于时间戳主键这个要求,普通表、子表、超级表和子查询之间可以随意的进行内连接,且对表个数没有限制,其它连接条件与主键间必须是 AND 操作。
|
||||||
|
|
||||||
普通表与普通表之间的 JOIN 操作:
|
普通表与普通表之间的 JOIN 操作:
|
||||||
|
|
||||||
|
|
|
@ -1266,6 +1266,14 @@ SELECT SERVER_STATUS();
|
||||||
|
|
||||||
**说明**:检测服务端是否所有 dnode 都在线,如果是则返回成功,否则返回无法建立连接的错误。
|
**说明**:检测服务端是否所有 dnode 都在线,如果是则返回成功,否则返回无法建立连接的错误。
|
||||||
|
|
||||||
|
### CURRENT_USER
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT CURRENT_USER();
|
||||||
|
```
|
||||||
|
|
||||||
|
**说明**:获取当前用户。
|
||||||
|
|
||||||
|
|
||||||
## Geometry 函数
|
## Geometry 函数
|
||||||
|
|
||||||
|
|
|
@ -31,7 +31,7 @@ select max(current) from meters partition by location interval(10m)
|
||||||
|
|
||||||
## 窗口切分查询
|
## 窗口切分查询
|
||||||
|
|
||||||
TDengine 支持按时间窗口切分方式进行聚合结果查询,比如温度传感器每秒采集一次数据,但需查询每隔 10 分钟的温度平均值。这种场景下可以使用窗口子句来获得需要的查询结果。窗口子句用于针对查询的数据集合按照窗口切分成为查询子集并进行聚合,窗口包含时间窗口(time window)、状态窗口(status window)、会话窗口(session window)、条件窗口(event window)四种窗口。其中时间窗口又可划分为滑动时间窗口和翻转时间窗口。
|
TDengine 支持按时间窗口切分方式进行聚合结果查询,比如温度传感器每秒采集一次数据,但需查询每隔 10 分钟的温度平均值。这种场景下可以使用窗口子句来获得需要的查询结果。窗口子句用于针对查询的数据集合按照窗口切分成为查询子集并进行聚合,窗口包含时间窗口(time window)、状态窗口(status window)、会话窗口(session window)、事件窗口(event window)四种窗口。其中时间窗口又可划分为滑动时间窗口和翻转时间窗口。
|
||||||
|
|
||||||
窗口子句语法如下:
|
窗口子句语法如下:
|
||||||
|
|
||||||
|
|
|
@ -178,7 +178,7 @@ description: TDengine 保留关键字的详细列表
|
||||||
|
|
||||||
- MATCH
|
- MATCH
|
||||||
- MAX_DELAY
|
- MAX_DELAY
|
||||||
- MAX_SPEED
|
- BWLIMIT
|
||||||
- MAXROWS
|
- MAXROWS
|
||||||
- MERGE
|
- MERGE
|
||||||
- META
|
- META
|
||||||
|
|
|
@ -22,6 +22,14 @@ SHOW CLUSTER;
|
||||||
|
|
||||||
显示当前集群的信息
|
显示当前集群的信息
|
||||||
|
|
||||||
|
## SHOW CLUSTER ALIVE
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SHOW CLUSTER ALIVE;
|
||||||
|
```
|
||||||
|
|
||||||
|
查询当前集群的状态是否可用,返回值: 0:不可用 1:完全可用 2:部分可用(集群中部分节点下线,但其它节点仍可以正常使用)
|
||||||
|
|
||||||
## SHOW CONNECTIONS
|
## SHOW CONNECTIONS
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
|
|
|
@ -4,87 +4,85 @@ title: 权限管理
|
||||||
description: 企业版中才具有的权限管理功能
|
description: 企业版中才具有的权限管理功能
|
||||||
---
|
---
|
||||||
|
|
||||||
本节讲述如何在 TDengine 中进行权限管理的相关操作。权限管理是 TDengine 企业版的特有功能,本节只列举了一些基本的权限管理功能作为示例,更丰富的权限管理请联系 TDengine 销售或市场团队。
|
本节讲述如何在 TDengine 中进行权限管理的相关操作。权限管理是 TDengine 企业版的特有功能,欲试用 TDengine 企业版请联系 TDengine 销售或市场团队。
|
||||||
|
|
||||||
## 创建用户
|
TDengine 中的权限管理分为用户管理、数据库授权管理以及消息订阅授权管理。
|
||||||
|
|
||||||
|
当 TDengine 安装并部署成功后,系统中内置有 "root" 用户。持有默认 "root" 用户密码的系统管理员应该第一时间修改 root 用户的密码,并根据业务需要创建普通用户并为这些用户授予适当的权限。在未授权的情况下,普通用户可以创建 DATABASE,并拥有自己创建的 DATABASE 的所有权限,包括删除数据库、修改数据库、查询时序数据和写入时序数据。超级用户可以给普通用户授予其他(即非该用户所创建的) DATABASE 的读写权限,使其可以在这些 DATABASE 上读写数据,但不能对其进行删除和修改数据库的操作。超级用户或者 topic 的创建者也可以给其它用户授予对某个 topic 的订阅权限。
|
||||||
|
|
||||||
|
## 用户管理
|
||||||
|
|
||||||
|
用户管理涉及用户的整个生命周期,从创建用户、对用户进行授权、撤销对用户的授权、查看用户信息、直到删除用户。
|
||||||
|
|
||||||
|
### 创建用户
|
||||||
|
|
||||||
|
创建用户的操作只能由 root 用户进行,语法如下
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE USER user_name PASS 'password' [SYSINFO {1\|0}];
|
||||||
|
```
|
||||||
|
|
||||||
|
说明:
|
||||||
|
|
||||||
|
- user_name 最长为 23 字节。
|
||||||
|
- password 最长为 128 字节,合法字符包括"a-zA-Z0-9!?\$%\^&\*()_–+={[}]:;@\~\#\|\<,\>.?/",不可以出现单双引号、撇号、反斜杠和空格,且不可以为空。
|
||||||
|
- SYSINFO 表示用户是否可以查看系统信息。1 表示可以查看,0 表示不可以查看。系统信息包括服务端配置信息、服务端各种节点信息(如 DNODE、QNODE等)、存储相关的信息等。默认为可以查看系统信息。
|
||||||
|
|
||||||
|
示例:创建密码为123456且可以查看系统信息的用户 test
|
||||||
|
|
||||||
|
```
|
||||||
|
SQL taos\> create user test pass '123456' sysinfo 1; Query OK, 0 of 0 rows affected (0.001254s)
|
||||||
|
```
|
||||||
|
|
||||||
|
### 查看用户
|
||||||
|
|
||||||
|
查看系统中的用户信息请使用 show users 命令,示例如下
|
||||||
|
|
||||||
|
```sql
|
||||||
|
show users;
|
||||||
|
```
|
||||||
|
|
||||||
|
也可以通过查询系统表 `INFORMATION_SCHEMA.INS_USERS` 获取系统中的用户信息,示例如下
|
||||||
|
|
||||||
|
```sql
|
||||||
|
select * from information_schema.ins_users;
|
||||||
|
```
|
||||||
|
|
||||||
|
### 删除用户
|
||||||
|
|
||||||
|
删除用户请使用
|
||||||
|
|
||||||
|
```sql
|
||||||
|
DROP USER user_name;
|
||||||
|
```
|
||||||
|
|
||||||
|
### 修改用户信息
|
||||||
|
|
||||||
|
修改用户信息的命令如下
|
||||||
|
|
||||||
|
```sql
|
||||||
|
ALTER USER user_name alter_user_clause alter_user_clause: { PASS 'literal' \| ENABLE value \| SYSINFO value }
|
||||||
|
```
|
||||||
|
|
||||||
|
说明:
|
||||||
|
|
||||||
|
- PASS:修改用户密码。
|
||||||
|
- ENABLE:修改用户是否启用。1 表示启用此用户,0 表示禁用此用户。
|
||||||
|
- SYSINFO:修改用户是否可查看系统信息。1 表示可以查看系统信息,0 表示不可以查看系统信息。
|
||||||
|
|
||||||
|
示例:禁用 test 用户
|
||||||
|
|
||||||
|
```sql
|
||||||
|
alter user test enable 0; Query OK, 0 of 0 rows affected (0.001160s)
|
||||||
|
```
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
CREATE USER use_name PASS 'password' [SYSINFO {1|0}];
|
CREATE USER use_name PASS 'password' [SYSINFO {1|0}];
|
||||||
```
|
```
|
||||||
|
|
||||||
创建用户。
|
## 访问控制
|
||||||
|
|
||||||
use_name 最长为 23 字节。
|
在 TDengine 企业版中,系统管理员可以根据业务和数据安全的需要控制任意一个用户对每一个数据库、订阅甚至表级别的访问。
|
||||||
|
|
||||||
password 最长为 31 字节,合法字符包括"a-zA-Z0-9!?$%^&*()_–+={[}]:;@~#|<,>.?/",不可以出现单双引号、撇号、反斜杠和空格,且不可以为空。
|
|
||||||
|
|
||||||
SYSINFO 表示用户是否可以查看系统信息。1 表示可以查看,0 表示不可以查看。系统信息包括服务端配置信息、服务端各种节点信息(如 DNODE、QNODE等)、存储相关的信息等。默认为可以查看系统信息。
|
|
||||||
|
|
||||||
例如,创建密码为123456且可以查看系统信息的用户test如下:
|
|
||||||
|
|
||||||
```sql
|
|
||||||
taos> create user test pass '123456' sysinfo 1;
|
|
||||||
Query OK, 0 of 0 rows affected (0.001254s)
|
|
||||||
```
|
|
||||||
|
|
||||||
## 查看用户
|
|
||||||
|
|
||||||
```sql
|
|
||||||
SHOW USERS;
|
|
||||||
```
|
|
||||||
|
|
||||||
查看用户信息。
|
|
||||||
|
|
||||||
```sql
|
|
||||||
taos> show users;
|
|
||||||
name | super | enable | sysinfo | create_time |
|
|
||||||
================================================================================
|
|
||||||
test | 0 | 1 | 1 | 2022-08-29 15:10:27.315 |
|
|
||||||
root | 1 | 1 | 1 | 2022-08-29 15:03:34.710 |
|
|
||||||
Query OK, 2 rows in database (0.001657s)
|
|
||||||
```
|
|
||||||
|
|
||||||
也可以通过查询INFORMATION_SCHEMA.INS_USERS系统表来查看用户信息,例如:
|
|
||||||
|
|
||||||
```sql
|
|
||||||
taos> select * from information_schema.ins_users;
|
|
||||||
name | super | enable | sysinfo | create_time |
|
|
||||||
================================================================================
|
|
||||||
test | 0 | 1 | 1 | 2022-08-29 15:10:27.315 |
|
|
||||||
root | 1 | 1 | 1 | 2022-08-29 15:03:34.710 |
|
|
||||||
Query OK, 2 rows in database (0.001953s)
|
|
||||||
```
|
|
||||||
|
|
||||||
## 删除用户
|
|
||||||
|
|
||||||
```sql
|
|
||||||
DROP USER user_name;
|
|
||||||
```
|
|
||||||
|
|
||||||
## 修改用户信息
|
|
||||||
|
|
||||||
```sql
|
|
||||||
ALTER USER user_name alter_user_clause
|
|
||||||
|
|
||||||
alter_user_clause: {
|
|
||||||
PASS 'literal'
|
|
||||||
| ENABLE value
|
|
||||||
| SYSINFO value
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
- PASS:修改用户密码。
|
|
||||||
- ENABLE:修改用户是否启用。1 表示启用此用户,0 表示禁用此用户。
|
|
||||||
- SYSINFO:修改用户是否可查看系统信息。1 表示可以查看系统信息,0 表示不可以查看系统信息。
|
|
||||||
|
|
||||||
例如,禁用 test 用户:
|
|
||||||
|
|
||||||
```sql
|
|
||||||
taos> alter user test enable 0;
|
|
||||||
Query OK, 0 of 0 rows affected (0.001160s)
|
|
||||||
```
|
|
||||||
|
|
||||||
## 授权
|
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
GRANT privileges ON priv_level TO user_name
|
GRANT privileges ON priv_level TO user_name
|
||||||
|
@ -105,14 +103,106 @@ priv_level : {
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
对用户授权。授权功能只包含在企业版中。
|
### 数据库权限
|
||||||
|
|
||||||
授权级别支持到DATABASE,权限有READ和WRITE两种。
|
|
||||||
|
|
||||||
TDengine 有超级用户和普通用户两类用户。超级用户缺省创建为root,拥有所有权限。使用超级用户创建出来的用户为普通用户。在未授权的情况下,普通用户可以创建DATABASE,并拥有自己创建的DATABASE的所有权限,包括删除数据库、修改数据库、查询时序数据和写入时序数据。超级用户可以给普通用户授予其他DATABASE的读写权限,使其可以在此DATABASE上读写数据,但不能对其进行删除和修改数据库的操作。
|
TDengine 有超级用户和普通用户两类用户。超级用户缺省创建为root,拥有所有权限。使用超级用户创建出来的用户为普通用户。在未授权的情况下,普通用户可以创建 DATABASE,并拥有自己创建的 DATABASE 的所有权限,包括删除数据库、修改数据库、查询时序数据和写入时序数据。超级用户可以给普通用户授予其他 DATABASE 的读写权限,使其可以在此 DATABASE 上读写数据,但不能对其进行删除和修改数据库的操作。
|
||||||
|
|
||||||
对于非DATABASE的对象,如USER、DNODE、UDF、QNODE等,普通用户只有读权限(一般为SHOW命令),不能创建和修改。
|
对于非DATABASE的对象,如USER、DNODE、UDF、QNODE等,普通用户只有读权限(一般为SHOW命令),不能创建和修改。
|
||||||
|
|
||||||
|
对数据库的访问权限包含读和写两种权限,它们可以被分别授予,也可以被同时授予。
|
||||||
|
|
||||||
|
补充说明
|
||||||
|
|
||||||
|
- priv_level 格式中 "." 之前为数据库名称, "." 之后为表名称
|
||||||
|
- "dbname.\*" 意思是名为 "dbname" 的数据库中的所有表
|
||||||
|
- "\*.\*" 意思是所有数据库名中的所有表
|
||||||
|
|
||||||
|
**下表中总结了数据库权限的各种组合**
|
||||||
|
|
||||||
|
对 root 用户和普通用户的权限的说明如下表
|
||||||
|
|
||||||
|
| 用户 | 描述 | 权限说明 |
|
||||||
|
|----------|------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||||
|
| 超级用户 | 只有 root 是超级用户 | DB 外部 所有操作权限,例如user、dnode、udf、qnode等的CRUD DB 权限,包括 创建 删除 更新,例如修改 Option,移动 Vgruop等 读 写 Enable/Disable 用户 |
|
||||||
|
| 普通用户 | 除 root 以外的其它用户均为普通用户 | 在可读的 DB 中,普通用户可以进行读操作 select describe show subscribe 在可写 DB 的内部,用户可以进行写操作: 创建、删除、修改 超级表 创建、删除、修改 子表 创建、删除、修改 topic 写入数据 被限制系统信息时,不可进行如下操作 show dnode、mnode、vgroups、qnode、snode 修改用户包括自身密码 show db时只能看到自己的db,并且不能看到vgroups、副本、cache等信息 无论是否被限制系统信息,都可以 管理 udf 可以创建 DB 自己创建的 DB 具备所有权限 非自己创建的 DB ,参照读、写列表中的权限 |
|
||||||
|
|
||||||
|
### 消息订阅授权
|
||||||
|
|
||||||
|
任意用户都可以在自己拥有读权限的数据库上创建 topic。超级用户 root 可以在任意数据库上创建 topic。每个 topic 的订阅权限都可以被独立授权给任何用户,不管该用户是否拥有该数据库的访问权限。删除 topic 只能由 root 用户或者该 topic 的创建者进行。topic 只能由超级用户、topic的创建者或者被显式授予 subscribe 权限的用户订阅。
|
||||||
|
|
||||||
|
授予订阅权限的语法如下:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
GRANT privileges ON priv_level TO user_name privileges : { ALL | priv_type [, priv_type] ... } priv_type : { SUBSCRIBE } priv_level : { topic_name }
|
||||||
|
```
|
||||||
|
|
||||||
|
### 基于标签的授权(表级授权)
|
||||||
|
|
||||||
|
从 TDengine 3.0.5.0 开始,我们支持按标签授权某个超级表中部分特定的子表。具体的 SQL 语法如下。
|
||||||
|
|
||||||
|
```sql
|
||||||
|
GRANT privileges ON priv_level [WITH tag_condition] TO user_name
|
||||||
|
|
||||||
|
privileges : {
|
||||||
|
ALL
|
||||||
|
| SUBSCRIBE
|
||||||
|
| priv_type [, priv_type] ...
|
||||||
|
}
|
||||||
|
|
||||||
|
priv_type : {
|
||||||
|
READ
|
||||||
|
| WRITE
|
||||||
|
}
|
||||||
|
|
||||||
|
priv_level : {
|
||||||
|
dbname.tbname
|
||||||
|
| dbname.*
|
||||||
|
| *.*
|
||||||
|
| topic_name
|
||||||
|
}
|
||||||
|
|
||||||
|
REVOKE privileges ON priv_level [WITH tag_condition] FROM user_name
|
||||||
|
|
||||||
|
privileges : {
|
||||||
|
ALL
|
||||||
|
| priv_type [, priv_type] ...
|
||||||
|
}
|
||||||
|
|
||||||
|
priv_type : {
|
||||||
|
READ
|
||||||
|
| WRITE
|
||||||
|
}
|
||||||
|
|
||||||
|
priv_level : {
|
||||||
|
dbname.tbname
|
||||||
|
| dbname.*
|
||||||
|
| *.*
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
上面 SQL 的语义为:
|
||||||
|
|
||||||
|
- 用户可以通过 dbname.tbname 来为指定的表(包括超级表和普通表)授予或回收其读写权限,不支持直接对子表授予或回收权限。
|
||||||
|
- 用户可以通过 dbname.tbname 和 WITH 子句来为符合条件的所有子表授予或回收其读写权限。使用 WITH 子句时,权限级别必须为超级表。
|
||||||
|
|
||||||
|
**表级权限和数据库权限的关系**
|
||||||
|
|
||||||
|
下表列出了在不同的数据库授权和表级授权的组合下产生的实际权限。
|
||||||
|
|
||||||
|
| |**表无授权** | **表读授权** | **表读授权有标签条件** | **表写授权** | **表写授权有标签条件** |
|
||||||
|
| -------------- | ---------------- | -------- | ---------- | ------ | ----------- |
|
||||||
|
| **数据库无授权** | 无授权 | 对此表有读权限,对数据库下的其他表无权限 | 对此表符合标签权限的子表有读权限,对数据库下的其他表无权限 | 对此表有写权限,对数据库下的其他表无权限 | 对此表符合标签权限的子表有写权限,对数据库下的其他表无权限 |
|
||||||
|
| **数据库读授权** | 对所有表有读权限 | 对所有表有读权限 | 对此表符合标签权限的子表有读权限,对数据库下的其他表有读权限 | 对此表有写权限,对所有表有读权限 | 对此表符合标签权限的子表有写权限,所有表有读权限 |
|
||||||
|
| **数据库写授权** | 对所有表有写权限 | 对此表有读权限,对所有表有写权限 | 对此表符合标签权限的子表有读权限,对所有表有写权限 | 对所有表有写权限 | 对此表符合标签权限的子表有写权限,数据库下的其他表有写权限 |
|
||||||
|
|
||||||
|
### 查看用户授权
|
||||||
|
|
||||||
|
使用下面的命令可以显示一个用户所拥有的授权:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
show user privileges
|
||||||
|
```
|
||||||
## 撤销授权
|
## 撤销授权
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
|
@ -135,4 +225,15 @@ priv_level : {
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
收回对用户的授权。授权功能只包含在企业版中。
|
### 撤销授权
|
||||||
|
|
||||||
|
1. 撤销数据库访问的授权
|
||||||
|
|
||||||
|
```sql
|
||||||
|
REVOKE privileges ON priv_level FROM user_name privileges : { ALL \| priv_type [, priv_type] ... } priv_type : { READ \| WRITE } priv_level : { dbname.\* \| \*.\* }
|
||||||
|
```
|
||||||
|
|
||||||
|
2. 撤销数据订阅的授权
|
||||||
|
|
||||||
|
```sql
|
||||||
|
REVOKE privileges ON priv_level FROM user_name privileges : { ALL \| priv_type [, priv_type] ... } priv_type : { SUBSCRIBE } priv_level : { topi_name }
|
||||||
|
|
|
@ -17,7 +17,7 @@ conn_id 可以通过 `SHOW CONNECTIONS` 获取。
|
||||||
## 终止查询
|
## 终止查询
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
KILL QUERY kill_id;
|
KILL QUERY 'kill_id';
|
||||||
```
|
```
|
||||||
|
|
||||||
kill_id 可以通过 `SHOW QUERIES` 获取。
|
kill_id 可以通过 `SHOW QUERIES` 获取。
|
||||||
|
|
|
@ -105,6 +105,8 @@ Usage: taosdump [OPTION...] dbname [tbname ...]
|
||||||
-L, --loose-mode Using loose mode if the table name and column name
|
-L, --loose-mode Using loose mode if the table name and column name
|
||||||
use letter and number only. Default is NOT.
|
use letter and number only. Default is NOT.
|
||||||
-n, --no-escape No escape char '`'. Default is using it.
|
-n, --no-escape No escape char '`'. Default is using it.
|
||||||
|
-Q, --dot-replace Repalce dot character with underline character in
|
||||||
|
the table name.
|
||||||
-T, --thread-num=THREAD_NUM Number of thread for dump in file. Default is
|
-T, --thread-num=THREAD_NUM Number of thread for dump in file. Default is
|
||||||
8.
|
8.
|
||||||
-C, --cloud=CLOUD_DSN specify a DSN to access TDengine cloud service
|
-C, --cloud=CLOUD_DSN specify a DSN to access TDengine cloud service
|
||||||
|
|
|
@ -0,0 +1,80 @@
|
||||||
|
---
|
||||||
|
title: 集群运维
|
||||||
|
description: TDengine 提供了多种集群运维手段以使集群运行更健康更高效
|
||||||
|
---
|
||||||
|
|
||||||
|
为了使集群运行更健康更高效,TDengine 企业版提供了一些运维手段来帮助系统管理员更好地运维集群。
|
||||||
|
|
||||||
|
## 数据重整
|
||||||
|
|
||||||
|
TDengine 面向多种写入场景,在有些写入场景下,TDengine 的存储会导致数据存储的放大或数据文件的空洞等。这一方面影响数据的存储效率,另一方面也会影响查询效率。为了解决上述问题,TDengine 企业版提供了对数据的重整功能,即 DATA COMPACT 功能,将存储的数据文件重新整理,删除文件空洞和无效数据,提高数据的组织度,从而提高存储和查询的效率。
|
||||||
|
|
||||||
|
**语法**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
COMPACT DATABASE db_name [start with 'XXXX'] [end with 'YYYY'];
|
||||||
|
```
|
||||||
|
|
||||||
|
**效果**
|
||||||
|
|
||||||
|
- 扫描并压缩指定的 DB 中所有 VGROUP 中 VNODE 的所有数据文件
|
||||||
|
- COMPCAT 会删除被删除数据以及被删除的表的数据
|
||||||
|
- COMPACT 会合并多个 STT 文件
|
||||||
|
- 可通过 start with 关键字指定 COMPACT 数据的起始时间
|
||||||
|
- 可通过 end with 关键字指定 COMPACT 数据的终止时间
|
||||||
|
|
||||||
|
**补充说明**
|
||||||
|
|
||||||
|
- COMPACT 为异步,执行 COMPACT 命令后不会等 COMPACT 结束就会返回。如果上一个 COMPACT 没有完成则再发起一个 COMPACT 任务,则会等上一个任务完成后再返回。
|
||||||
|
- COMPACT 可能阻塞写入,但不阻塞查询
|
||||||
|
- COMPACT 的进度不可观测
|
||||||
|
|
||||||
|
## 集群负载再平衡
|
||||||
|
|
||||||
|
当多副本集群中的一个或多个节点因为升级或其它原因而重启后,有可能出现集群中各个 dnode 负载不均衡的现象,极端情况下会出现所有 vgroup 的 leader 都位于同一个 dnode 的情况。为了解决这个问题,可以使用下面的命令
|
||||||
|
|
||||||
|
```sql
|
||||||
|
balance vgroup leader;
|
||||||
|
```
|
||||||
|
|
||||||
|
**功能**
|
||||||
|
|
||||||
|
让所有的 vgroup 的 leade r在各自的replica节点上均匀分布。这个命令会让 vgroup 强制重新选举,通过重新选举,在选举的过程中,变换 vgroup 的leader,通过这个方式,最终让leader均匀分布。
|
||||||
|
|
||||||
|
**注意**
|
||||||
|
|
||||||
|
Raft选举本身带有随机性,所以通过选举的重新分布产生的均匀分布也是带有一定的概率,不会完全的均匀。**该命令的副作用是影响查询和写入**,在vgroup重新选举时,从开始选举到选举出新的 leader 这段时间,这 个vgroup 无法写入和查询。选举过程一般在秒级完成。所有的vgroup会依次逐个重新选举。
|
||||||
|
|
||||||
|
## 恢复数据节点
|
||||||
|
|
||||||
|
在多节点三副本的集群环境中,如果某个 dnode 的磁盘损坏,该 dnode 会自动退出,但集群中其它的 dnode 仍然能够继续提供写入和查询服务。
|
||||||
|
|
||||||
|
在更换了损坏的磁盘后,如果想要让曾经主动退出的 dnode 重新加入集群提供服务,可以通过 `restore dnode` 命令来恢复该数据节点上的部分或全部逻辑节点,该功能依赖多副本中的其它副本进行数据复制,所以只在集群中 dnode 数量大于等于 3 且副本数为 3 的情况下能够工作。
|
||||||
|
|
||||||
|
|
||||||
|
```sql
|
||||||
|
restore dnode <dnode_id>;# 恢复dnode上的mnode,所有vnode和qnode
|
||||||
|
restore mnode on dnode <dnode_id>;# 恢复dnode上的mnode
|
||||||
|
restore vnode on dnode <dnode_id> ;# 恢复dnode上的所有vnode
|
||||||
|
restore qnode on dnode <dnode_id>;# 恢复dnode上的qnode
|
||||||
|
```
|
||||||
|
|
||||||
|
**限制**
|
||||||
|
- 该功能是基于已有的复制功能的恢复,不是灾难恢复或者备份恢复,所以对于要恢复的 mnode 和 vnode来说,使用该命令的前提是还存在该 mnode 或 vnode 的其它两个副本仍然能够正常工作。
|
||||||
|
- 该命令不能修复数据目录中的个别文件的损坏或者丢失。例如,如果某个 mnode 或者 vnode 中的个别文件或数据损坏,无法单独恢复损坏的某个文件或者某块数据。此时,可以选择将该 mnode/vnode 的数据全部清空再进行恢复。
|
||||||
|
|
||||||
|
|
||||||
|
## 虚拟组分裂 (Scale Out)
|
||||||
|
|
||||||
|
当一个 vgroup 因为子表数过多而导致 CPU 或 Disk 资源使用量负载过高时,增加 dnode 节点后,可通过 `split vgroup` 命令把该 vgroup 分裂为两个虚拟组。分裂完成后,新产生的两个 vgroup 承担原来由一个 vgroup 提供的读写服务。这也是 TDengine 为企业版用户提供的 scale out 集群的能力。
|
||||||
|
|
||||||
|
```sql
|
||||||
|
split vgroup <vgroup_id>
|
||||||
|
```
|
||||||
|
|
||||||
|
**注意**
|
||||||
|
- 单副本库虚拟组,在分裂完成后,历史时序数据总磁盘空间使用量,可能会翻倍。所以,在执行该操作之前,通过增加 dnode 节点方式,确保集群中有足够的 CPU 和磁盘资源,避免资源不足现象发生。
|
||||||
|
- 该命令为 DB 级事务;执行过程,当前DB的其它管理事务将会被拒绝。集群中,其它DB不受影响。
|
||||||
|
- 分裂任务执行过程中,可持续提供读写服务;期间,可能存在可感知的短暂的读写业务中断。
|
||||||
|
- 在分裂过程中,不支持流和订阅。分裂结束后,历史 WAL 会清空。
|
||||||
|
- 分裂过程中,可支持节点宕机重启容错;但不支持节点磁盘故障容错。
|
|
@ -0,0 +1,178 @@
|
||||||
|
---
|
||||||
|
title: Web 管理工具
|
||||||
|
description: 基于 Web 的系统管理工具
|
||||||
|
---
|
||||||
|
|
||||||
|
## 简介
|
||||||
|
|
||||||
|
为了易于企业版用户更容易使用和管理数据库,TDengine 3.0 企业版提供了一个全新的可视化组件 taosExplorer。用户能够在其中方便地管理数据库管理系统中中各元素(数据库、超级表、子表)的生命周期,执行查询,监控系统状态,管理用户和授权,完成数据备份和恢复,与其它集群之间进行数据同步,导出数据,管理主题和流计算。
|
||||||
|
|
||||||
|
**欲体验基于 Web 的 TDengine 系统管理能力,请联系 TDengine 市场或销售团队**
|
||||||
|
|
||||||
|
## 部署服务
|
||||||
|
|
||||||
|
### 准备工作
|
||||||
|
|
||||||
|
1. taosExplorer 没有独立的安装包,请使用 taosX 安装包进行安装。
|
||||||
|
2. 在启动 taosExplorer 之前,请先确认 TDengine 集群已经正确设置并运行(即 taosd 服务),taosAdapter 也已经正确设置和运行并与 TDengine 集群保持连接状态。如果想要使用数据备份和恢复或者数据同步功能,请确保 taosX 服务和 Agent 服务也已经正确设置和运行。
|
||||||
|
|
||||||
|
### 配置
|
||||||
|
|
||||||
|
在启动 taosExplorer 之前,请确保配置文件中的内容正确。
|
||||||
|
|
||||||
|
```TOML
|
||||||
|
listen = "0.0.0.0:6060"
|
||||||
|
log_level = "info"
|
||||||
|
cluster = "http://localhost:6041"
|
||||||
|
x_api = "http://localhost:6050"
|
||||||
|
```
|
||||||
|
|
||||||
|
说明:
|
||||||
|
|
||||||
|
- listen - taosExplorer 对外提供服务的地址
|
||||||
|
- log_level - 日志级别,可选值为 "debug", "info", "warn", "error", "fatal"
|
||||||
|
- cluster - TDengine集群的 taosadapter 地址
|
||||||
|
- x_api - taosX 的服务地址
|
||||||
|
|
||||||
|
### 启动
|
||||||
|
|
||||||
|
然后启动 taosExplorer,可以直接在命令行执行 taos-explorer 或者使用下面的 systemctl 脚本用 systemctl 来启动 taosExplorer 服务
|
||||||
|
|
||||||
|
```shell
|
||||||
|
[Unit]
|
||||||
|
Description=Explorer for TDengine
|
||||||
|
After=network-online.target
|
||||||
|
Wants=network-online.target
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Type=simple
|
||||||
|
ExecStart=/usr/bin/taos-explorer
|
||||||
|
Restart=always
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
|
```
|
||||||
|
|
||||||
|
### 问题排查
|
||||||
|
|
||||||
|
1. 当通过浏览器打开taosExplorer站点遇到“无法访问此网站”的错误信息时,请通过命令行登录taosExplorer所在机器,并使用命令systemctl status taos-explorer.service检查服务的状态,如果返回的状态是inactive,请使用命令systemctl start taos-explorer.service启动服务。
|
||||||
|
2. 如果需要获取taosExplorer的详细日志,可通过命令journalctl -u taos-explorer
|
||||||
|
|
||||||
|
## 登录
|
||||||
|
|
||||||
|
在 TDengine 管理系统的登录页面,输入正确的用户名和密码后,点击登录按钮,即可登录。
|
||||||
|
|
||||||
|
说明:
|
||||||
|
- 这里的用户,需要在所连接的 TDengine 中创建,TDengine 默认的用户名和密码为`root/taosdata`;
|
||||||
|
- 在 TDengine 中创建用户时,默认会设置用户的 SYSINFO 属性值为1, 表示该用户可以查看系统信息,只有 SYSINFO 属性为 1 的用户才能正常登录 TDengine 管理系统。
|
||||||
|
|
||||||
|
## 面板
|
||||||
|
|
||||||
|
taosExplorer 内置了一个简单的仪表盘展示以下集群信息,点击左侧功能列表中的 "面板" 可以启用此功能。
|
||||||
|
|
||||||
|
- 默认的仪表盘会返回对应 Grafana 的安装配置向导
|
||||||
|
- 配置过 Grafana 的仪表盘在点击' 面板' 时会跳转到对应的配置地址(该地址来源于 /profile 接口的返回值)
|
||||||
|
|
||||||
|
|
||||||
|
## 数据浏览器
|
||||||
|
|
||||||
|
点击功能列表的“数据浏览器”入口,在“数据浏览器”中可以创建和删除数据库、创建和删除超级表和子表,执行SQL语句,查看SQL语句的执行结果。此外,超级管理员还有对数据库的管理权限,其他用户不提供该功能。
|
||||||
|
|
||||||
|
具体权限有:
|
||||||
|
|
||||||
|
1.查看(提供数据库/超级表/普通表的基本信息)
|
||||||
|
|
||||||
|
2.编辑 (编辑数据库/超级表/普通表的信息)
|
||||||
|
|
||||||
|
3.数据库管理权限 (仅限超级管理员,该操作可以给指定用户配置数据库管理权限)
|
||||||
|
|
||||||
|
4.删除 (删除数据库/超级表/普通表)
|
||||||
|
|
||||||
|
5.追加 (选择对应的数据库/超级表/普通表名称直接追加到右侧sql输入区域,避免了手工输入)
|
||||||
|
|
||||||
|
|
||||||
|
## 系统管理
|
||||||
|
|
||||||
|
点击功能列表中的“系统管理”入口,可以创建用户、对用户进行访问授权、以及删除用户。还能够对当前所管理的集群中的数据进行备份和恢复。也可以配置一个远程 TDengine 的地址进行数据同步。同时也提供了集群信息和许可证的信息以及代理信息以供查看。系统管理 菜单只有 root 用户才有权限看到
|
||||||
|
|
||||||
|
### 用户管理
|
||||||
|
|
||||||
|
点击“系统管理”后,默认会进入“用户”标签页。
|
||||||
|
在用户列表,可以查看系统中已存在的用户及其创建时间,并可以对用户进行启用、禁用,编辑(包括修改密码,数据库的读写权限等),删除等操作。
|
||||||
|
点击用户列表右上方的“+新增”按钮,即可打开“新增用户”对话框:
|
||||||
|
1. 输入新增用户的用户名称,必填
|
||||||
|
2. 输入新增用户的登录密码,必填,密码长度要求为8-16个字符,且至少要满足以下4个条件中的3个:大写字母,小写字母,数字,特殊字符
|
||||||
|
3. 选择新增用户对系统中已存在的数据库的读写权限,非必填,默认情况下,新增用户对所有已存在的数据库无读写权限
|
||||||
|
4. 提写完成后,点击确定按钮,即可新增用户。
|
||||||
|
|
||||||
|
### 系统信息
|
||||||
|
|
||||||
|
点击“集群”标签后,可以查看DNodes, MNodes和QNodes的状态、创建时间等信息,并可以对以上节点进行新增和删除操作。
|
||||||
|
|
||||||
|
### 许可证管理
|
||||||
|
|
||||||
|
点击“许可证”标签后,可以查看系统和系统和各连接器的许可证信息。
|
||||||
|
点击位于“许可证”标签页右上角的“激活许可证”按钮,输入“激活码”和“连接器激活码”后,点击“确定”按钮,即可激活,激活码请联系 TDengine 客户成功团队获取。
|
||||||
|
|
||||||
|
## 数据订阅
|
||||||
|
|
||||||
|
本章节,将介绍如何在 TDengine 集群中,创建主题,并将其分享给其他用户,以及如何查看一个主题的消费者信息。
|
||||||
|
|
||||||
|
通过 Explorer, 您可以轻松地完成对数据订阅的管理,从而更好地利用 TDengine 提供的数据订阅能力。
|
||||||
|
点击左侧导航栏中的“数据订阅”,即可跳转至数据订阅配置管理页面。
|
||||||
|
您可以通过以下两种方式创建主题:使用向导和自定义 SQL 语句。通过自定义 SQL 创建主题时,您需要了解 TDengine 提供的数据订阅 SQL 语句的语法,并保证其正确性。
|
||||||
|
|
||||||
|
注: 对于数据订阅的详细说明,可参考官方文档中关于“数据订阅”章节,创建数据订阅之前需要先准备源数据库(或源数据库包含相应的超级表或者表),其中源数据库需配置wal_retention_period > 0 。
|
||||||
|
|
||||||
|
包括主题,消费者,共享主题和示例代码
|
||||||
|
|
||||||
|
### 创建主题
|
||||||
|
|
||||||
|
1. 在“主题”标签页,点击“新增新主题”按钮以后,选择向导窗格,然后输入“主题名称”;
|
||||||
|
2. 在“数据库”下拉列表中,选择相应的数据库;
|
||||||
|
3. 在“类型”标签下,选择“数据库” 或 “超级表” 或 “子查询”,这里以默认值“数据库”为例;
|
||||||
|
4. 然后点击“创建” 按钮,即可创建对应的主题。
|
||||||
|
|
||||||
|
### 分享主题
|
||||||
|
|
||||||
|
1. 在“共享主题”标签页,在“主题“下拉列表中,选择将要分享的主题;
|
||||||
|
2. 点击“添加可消费该主题的用户”按钮,然后在“用户名”下拉列表中选择相应的用户,然后点击“新增”,即可将该主题分享给此用户。
|
||||||
|
|
||||||
|
|
||||||
|
### 查看消费者信息
|
||||||
|
|
||||||
|
1. 通过执行下一节“示例代码”所述的“完整实例”,即可消费共享主题
|
||||||
|
2. 在“消费者”标签页,可查看到消费者的有关信息
|
||||||
|
|
||||||
|
### 示例代码
|
||||||
|
|
||||||
|
1. 在“示例代码”标签页,在“主题“下拉列表中,选择相应的主题;
|
||||||
|
2. 选择您熟悉的语言,然后您可以阅读以及使用这部分示例代码用来”创建消费“,”订阅主题“,通过执行 “完整实例”中的程序即可消费共享主题
|
||||||
|
|
||||||
|
## 流计算
|
||||||
|
|
||||||
|
通过 Explorer, 您可以轻松地完成对流的管理,从而更好地利用 TDengine 提供的流计算能力。
|
||||||
|
点击左侧导航栏中的“流计算”,即可跳转至流计算配置管理页面。
|
||||||
|
您可以通过以下两种方式创建流:流计算向导和自定义 SQL 语句。当前,通过流计算向导创建流时,暂不支持分组功能。通过自定义 SQL 创建流时,您需要了解 TDengine 提供的流计算 SQL 语句的语法,并保证其正确性。
|
||||||
|
|
||||||
|
注: 对于流计算的详细说明,可参考官方文档中关于“流式计算”章节,创建流计算之前需要先准备源数据库以及相应的超级表或表、输出的数据库。
|
||||||
|
|
||||||
|
### 流计算向导
|
||||||
|
|
||||||
|
1. 点击“创建流计算”按钮以后,选择流计算向导窗格,然后输入“流名称”;
|
||||||
|
2. 在“输出”部分,输入相应的“数据库”,“超级表”以及“子表前缀”;
|
||||||
|
3. 在“源”部分,选择相应的“数据库”,然后根据具体情况,选择使用“超级表”或“表”:
|
||||||
|
1. 如果使用“超级表“,请从“超级表”下拉列表中选择相应的超级表, 并在“字段设置”区域,选择相应的字段
|
||||||
|
2. 如果使用“表“,请从“表”下拉列表中选择相应的表, 并在“字段设置”区域,选择相应的字段
|
||||||
|
4. 对于窗口设置,根据需要选择”SESSION“, "STATE"或"INTERVAL", 并配置相应的值;
|
||||||
|
5. 对于”执行“部分,选择相应的”触发器“类型,并设置“Watermark”, "Ignore Expired", "DELETE_MARK", "FILL_HISTORY", "IGNORE UPDATE";
|
||||||
|
6. 然后点击“创建” 按钮,即可创建对应的流计算。
|
||||||
|
|
||||||
|
### 使用 SQL 语句建流
|
||||||
|
|
||||||
|
1. 点击“创建流计算”按钮以后,选择流计算SQL窗格,然后输入类似如下的SQL语句(反引号内为源数据库以及相应的超级表或表、输出的数据库,请按您的环境更新反引号内的内容)
|
||||||
|
|
||||||
|
```shell
|
||||||
|
CREATE STREAM `test_stream` TRIGGER WINDOW_CLOSE IGNORE EXPIRED 1 INTO `db_name`.`stable1` SUBTABLE(CONCAT('table1',tbname)) AS SELECT count(*) FROM `test_db`.`stable_name` PARTITION BY tbname INTERVAL(1m)
|
||||||
|
```
|
||||||
|
2. 点击“创建”按钮,即可创建对应的流计算。
|
|
@ -0,0 +1,56 @@
|
||||||
|
---
|
||||||
|
title: 多级存储
|
||||||
|
---
|
||||||
|
|
||||||
|
## 多级存储
|
||||||
|
|
||||||
|
说明:多级存储功能仅企业版支持。
|
||||||
|
|
||||||
|
在默认配置下,TDengine 会将所有数据保存在 /var/lib/taos 目录下,而且每个 vnode 的数据文件保存在该目录下的不同目录。为扩大存储空间,尽量减少文件读取的瓶颈,提高数据吞吐率 TDengine 可通过配置系统参数 dataDir 让多个挂载的硬盘被系统同时使用。
|
||||||
|
|
||||||
|
除此之外,TDengine 也提供了数据分级存储的功能,将不同时间段的数据存储在挂载的不同介质上的目录里,从而实现不同“热度”的数据存储在不同的存储介质上,充分利用存储,节约成本。比如,最新采集的数据需要经常访问,对硬盘的读取性能要求高,那么用户可以配置将这些数据存储在 SSD 盘上。超过一定期限的数据,查询需求量没有那么高,那么可以存储在相对便宜的 HDD 盘上。
|
||||||
|
|
||||||
|
多级存储支持 3 级,每级最多可配置 16 个挂载点。
|
||||||
|
|
||||||
|
TDengine 多级存储配置方式如下(在配置文件/etc/taos/taos.cfg 中):
|
||||||
|
|
||||||
|
```
|
||||||
|
dataDir [path] <level> <primary>
|
||||||
|
```
|
||||||
|
|
||||||
|
- path: 挂载点的文件夹路径
|
||||||
|
- level: 介质存储等级,取值为 0,1,2。
|
||||||
|
0 级存储最新的数据,1 级存储次新的数据,2 级存储最老的数据,省略默认为 0。
|
||||||
|
各级存储之间的数据流向:0 级存储 -> 1 级存储 -> 2 级存储。
|
||||||
|
同一存储等级可挂载多个硬盘,同一存储等级上的数据文件分布在该存储等级的所有硬盘上。
|
||||||
|
需要说明的是,数据在不同级别的存储介质上的移动,是由系统自动完成的,用户无需干预。
|
||||||
|
- primary: 是否为主挂载点,0(否)或 1(是),省略默认为 1。
|
||||||
|
|
||||||
|
在配置中,只允许一个主挂载点的存在(level=0,primary=1),例如采用如下的配置方式:
|
||||||
|
|
||||||
|
```
|
||||||
|
dataDir /mnt/data1 0 1
|
||||||
|
dataDir /mnt/data2 0 0
|
||||||
|
dataDir /mnt/data3 1 0
|
||||||
|
dataDir /mnt/data4 1 0
|
||||||
|
dataDir /mnt/data5 2 0
|
||||||
|
dataDir /mnt/data6 2 0
|
||||||
|
```
|
||||||
|
|
||||||
|
:::note
|
||||||
|
|
||||||
|
1. 多级存储不允许跨级配置,合法的配置方案有:仅 0 级,仅 0 级+ 1 级,以及 0 级+ 1 级+ 2 级。而不允许只配置 level=0 和 level=2,而不配置 level=1。
|
||||||
|
2. 禁止手动移除使用中的挂载盘,挂载盘目前不支持非本地的网络盘。
|
||||||
|
3. 多级存储目前不支持删除已经挂载的硬盘的功能。
|
||||||
|
|
||||||
|
:::
|
||||||
|
|
||||||
|
## 0 级负载均衡
|
||||||
|
|
||||||
|
在多级存储中,有且只有一个主挂载点,主挂载点承担了系统中最重要的元数据在座,同时各个 vnode 的主目录均存在于当前 dnode 主挂载点上,从而导致该 dnode 的写入性能受限于单个磁盘的 IO 吞吐能力。
|
||||||
|
|
||||||
|
从 TDengine 3.1.0.0 开始,如果一个 dnode 配置了多个 0 级挂载点,我们将该 dnode 上所有 vnode 的主目录均衡分布在所有的 0 级挂载点上,由这些 0 级挂载点共同承担写入负荷。在网络 I/O 及其它处理资源不成为瓶颈的情况下,通过优化集群配置,测试结果证明整个系统的写入能力和 0 级挂载点的数量呈现线性关系,即随着 0 级挂载点数量的增加,整个系统的写入能力也成倍增加。
|
||||||
|
|
||||||
|
## 同级挂载点选择策略
|
||||||
|
|
||||||
|
一般情况下,当 TDengine 要从同级挂载点中选择一个用于生成新的数据文件时,采用 round robin 策略进行选择。但现实中有可能每个磁盘的容量不相同,或者容量相同但写入的数据量不相同,这就导致会出现每个磁盘上的可用空间不均衡,在实际进行选择时有可能会选择到一个剩余空间已经很小的磁盘。为了解决这个问题,从 3.1.1.0 开始引入了一个新的配置 `minDiskFreeSize`,当某块磁盘上的可用空间小于等于这个阈值时,该磁盘将不再被选择用于生成新的数据文件。该配置项的单位为字节,其值应该大于 2GB,即会跳过可用空间小于 2GB 的挂载点。
|
|
@ -0,0 +1,972 @@
|
||||||
|
---
|
||||||
|
title: 数据接入、同步和备份
|
||||||
|
---
|
||||||
|
|
||||||
|
## 简介
|
||||||
|
|
||||||
|
为了能够方便地将各种数据源中的数据导入 TDengine 3.0,TDengine 3.0 企业版提供了一个全新的工具 taosX 用于帮助用户快速将其它数据源中的数据传输到 TDengine 中。 taosX 定义了自己的集成框架,方便扩展新的数据源。目前支持的数据源有 TDengine 自身(即从一个 TDengine 集群到另一个 TDengine 集群),Pi, OPC UA。除了数据接入外,taosX 还支持数据备份、数据同步、数据迁移以及数据导出功能。
|
||||||
|
|
||||||
|
**欲体验 taosX 的各种数据接入能力,请联系 TDengine 市场或销售团队。**
|
||||||
|
|
||||||
|
## 使用前提
|
||||||
|
|
||||||
|
使用 taosX 需要已经部署好 TDengine 中的 taosd 和 taosAdapter,具体细节请参考 [系统部署](../../deployment/deploy)
|
||||||
|
|
||||||
|
**使用限制**:taosX 只能用于企业版数据库服务端。
|
||||||
|
|
||||||
|
## 安装与配置
|
||||||
|
|
||||||
|
安装 taosX 需要使用独立的 taosX 安装包,其中除了 taosX 之外,还包含 Pi 连接器(限 Windows), OPC 连接器, InfluxDB 连接器, MQTT 连接器,以及必要的 Agent 组件,taosX + Agent + 某个连接器可以用于将相应数据源的数据同步到 TDengine。taosX 安装包中还包含了 taos-explorer 这个可视化管理组件
|
||||||
|
|
||||||
|
### Linux 安装
|
||||||
|
|
||||||
|
下载需要的 taosX 安装包,下文以安装包 `taosx-1.0.0-linux-x64.tar.gz` 为例展示如何安装:
|
||||||
|
|
||||||
|
``` bash
|
||||||
|
# 在任意目录下解压文件
|
||||||
|
tar -zxf taosx-1.0.0-linux-x64.tar.gz
|
||||||
|
cd taosx-1.0.0-linux-x64
|
||||||
|
|
||||||
|
# 安装
|
||||||
|
sudo ./install.sh
|
||||||
|
|
||||||
|
# 验证
|
||||||
|
taosx -V
|
||||||
|
# taosx 1.0.0-494d280c (built linux-x86_64 2023-06-21 11:06:00 +08:00)
|
||||||
|
taosx-agent -V
|
||||||
|
# taosx-agent 1.0.0-494d280c (built linux-x86_64 2023-06-21 11:06:01 +08:00)
|
||||||
|
|
||||||
|
# 卸载
|
||||||
|
cd /usr/local/taosx
|
||||||
|
sudo ./uninstall.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
**常见问题:**
|
||||||
|
|
||||||
|
1. 安装后系统中增加了哪些文件?
|
||||||
|
* /usr/bin: taosx, taosx-agent, taos-explorer
|
||||||
|
* /usr/local/taosx/plugins: influxdb, mqtt, opc
|
||||||
|
* /etc/systemd/system:taosx.service, taosx-agent.service, taos-explorer.service
|
||||||
|
* /usr/local/taosx: uninstall.sh
|
||||||
|
* /etc/taox: agent.toml, explorer.toml
|
||||||
|
|
||||||
|
2. taosx -V 提示 "Command not found" 应该如何解决?
|
||||||
|
* 检验问题1,保证所有的文件都被复制到对应的目录
|
||||||
|
``` bash
|
||||||
|
ls /usr/bin | grep taosx
|
||||||
|
```
|
||||||
|
|
||||||
|
### Windows 安装
|
||||||
|
|
||||||
|
- 下载需要的 taosX 安装包,例如 taosx-1.0.0-Windows-x64-installer.exe,执行安装
|
||||||
|
- 可使用 uninstall_taosx.exe 进行卸载
|
||||||
|
- 命令行执行 ```sc start/stop taosx``` 启动/停止 taosx 服务
|
||||||
|
- 命令行执行 ```sc start/stop taosx-agent``` 启动/停止 taosx-agent 服务
|
||||||
|
- 命令行执行 ```sc start/stop taos-explorer``` 启动/停止 taosx-agent 服务
|
||||||
|
- windows 默认安装在```C:\Program Files\taosX```,目录结构如下:
|
||||||
|
~~~
|
||||||
|
├── bin
|
||||||
|
│ ├── taosx.exe
|
||||||
|
│ ├── taosx-srv.exe
|
||||||
|
│ ├── taosx-srv.xml
|
||||||
|
│ ├── taosx-agent.exe
|
||||||
|
│ ├── taosx-agent-srv.exe
|
||||||
|
│ ├── taosx-agent-srv.xml
|
||||||
|
│ ├── taos-explorer.exe
|
||||||
|
│ ├── taos-explorer-srv.exe
|
||||||
|
│ └── taos-explorer-srv.xml
|
||||||
|
├── plugins
|
||||||
|
│ ├── influxdb
|
||||||
|
│ │ └── taosx-inflxdb.jar
|
||||||
|
│ ├── mqtt
|
||||||
|
│ │ └── taosx-mqtt.exe
|
||||||
|
│ ├── opc
|
||||||
|
│ | └── taosx-opc.exe
|
||||||
|
│ ├── pi
|
||||||
|
│ | └── taosx-pi.exe
|
||||||
|
│ | └── taosx-pi-backfill.exe
|
||||||
|
│ | └── ...
|
||||||
|
└── config
|
||||||
|
│ ├── agent.toml
|
||||||
|
│ ├── explorer.toml
|
||||||
|
├── uninstall_taosx.exe
|
||||||
|
├── uninstall_taosx.dat
|
||||||
|
~~~
|
||||||
|
|
||||||
|
**运行模式**
|
||||||
|
|
||||||
|
taosX 是进行数据同步与复制的核心组件,以下运行模式指 taosX 的运行模式,其它组件的运行模式在 taosX 的不同运行模式下与之适配。
|
||||||
|
|
||||||
|
## 命令行模式
|
||||||
|
|
||||||
|
可以直接在命令行上添加必要的参数直接启动 taosX 即为命令行模式运行。当命令行参数所指定的任务完成后 taosX 会自动停止。taosX 在运行中如果出现错误也会自动停止。也可以在任意时刻使用 ctrl+c 停止 taosX 的运行。本节介绍如何使用 taosX 的各种使用场景下的命令行。
|
||||||
|
|
||||||
|
### 命令行参数说明
|
||||||
|
|
||||||
|
**注意:部分参数暂无法通过 explorer设置【见:其他参数说明】,之后会逐步开放) **
|
||||||
|
|
||||||
|
命令行执行示例:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
taosx -f <from-DSN> -t <to-DSN> <其他参数>
|
||||||
|
```
|
||||||
|
|
||||||
|
以下参数说明及示例中若无特殊说明 `<content>` 的格式均为占位符,使用时需要使用实际参数进行替换。
|
||||||
|
|
||||||
|
### DSN (Data Source Name)
|
||||||
|
|
||||||
|
taosX 命令行模式使用 DSN 来表示一个数据源(来源或目的源),典型的 DSN 如下:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# url-like
|
||||||
|
<driver>[+<protocol>]://[[<username>:<password>@]<host>:<port>][/<object>][?<p1>=<v1>[&<p2>=<v2>]]
|
||||||
|
|------|------------|---|-----------|-----------|------|------|----------|-----------------------|
|
||||||
|
|driver| protocol | | username | password | host | port | object | params |
|
||||||
|
|
||||||
|
// url 示例
|
||||||
|
tmq+ws://root:taosdata@localhost:6030/db1?timeout=never
|
||||||
|
```
|
||||||
|
[] 中的数据都为可选参数。
|
||||||
|
|
||||||
|
1. 不同的驱动 (driver) 拥有不同的参数。driver 包含如下选项:
|
||||||
|
|
||||||
|
- taos:使用查询接口从 TDengine 获取数据
|
||||||
|
- tmq:启用数据订阅从 TDengine 获取数据
|
||||||
|
- local:数据备份或恢复
|
||||||
|
- pi: 启用 pi-connector从 pi 数据库中获取数据
|
||||||
|
- opc:启用 opc-connector 从 opc-server 中获取数据
|
||||||
|
- mqtt: 启用 mqtt-connector 获取 mqtt-broker 中的数据
|
||||||
|
- kafka: 启用 Kafka 连接器从 Kafka Topics 中订阅消息写入
|
||||||
|
- influxdb: 启用 influxdb 连接器从 InfluxDB 获取数据
|
||||||
|
- csv:从 CSV 文件解析数据
|
||||||
|
|
||||||
|
2. +protocol 包含如下选项:
|
||||||
|
- +ws: 当 driver 取值为 taos 或 tmq 时使用,表示使用 rest 获取数据。不使用 +ws 则表示使用原生连接获取数据,此时需要 taosx 所在的服务器安装 taosc。
|
||||||
|
- +ua: 当 driver 取值为 opc 时使用,表示采集的数据的 opc-server 为 opc-ua
|
||||||
|
- +da: 当 driver 取值为 opc 时使用,表示采集的数据的 opc-server 为 opc-da
|
||||||
|
|
||||||
|
3. host:port 表示数据源的地址和端口。
|
||||||
|
4. object 表示具体的数据源,可以是TDengine的数据库、超级表、表,也可以是本地备份文件的路径,也可以是对应数据源服务器中的数据库。
|
||||||
|
5. username 和 password 表示该数据源的用户名和密码。
|
||||||
|
6. params 代表了 dsn 的参数。
|
||||||
|
|
||||||
|
### 其它参数说明
|
||||||
|
|
||||||
|
1. parser 通过 --parser 或 -p 设置,设置 transform 的 parser 生效。可以通过 Explorer 在如 CSV,MQTT,KAFKA 数据源的任务配置进行设置。
|
||||||
|
|
||||||
|
配置示例:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
--parser "{\"parse\":{\"ts\":{\"as\":\"timestamp(ms)\"},\"topic\":{\"as\":\"varchar\",\"alias\":\"t\"},\"partition\":{\"as\":\"int\",\"alias\":\"p\"},\"offset\":{\"as\":\"bigint\",\"alias\":\"o\"},\"key\":{\"as\":\"binary\",\"alias\":\"k\"},\"value\":{\"as\":\"binary\",\"alias\":\"v\"}},\"model\":[{\"name\":\"t_{t}\",\"using\":\"kafka_data\",\"tags\":[\"t\",\"p\"],\"columns\":[\"ts\",\"o\",\"k\",\"v\"]}]}"
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
2. transform 通过 --transform 或 -T 设置,配置数据同步(仅支持 2.6 到 3.0 以及 3.0 之间同步)过程中对于表名及表字段的一些操作。暂无法通过 Explorer 进行设置。配置说明如下:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
1.AddTag,为表添加 TAG。设置示例:-T add-tag:<tag1>=<value1>。
|
||||||
|
2.表重命名:
|
||||||
|
2.1 重命名表限定
|
||||||
|
2.1.1 RenameTable:对所有符合条件的表进行重命名。
|
||||||
|
2.1.2 RenameChildTable:对所有符合条件的子表进行重命名。
|
||||||
|
2.1.3 RenameSuperTable:对所有符合条件的超级表进行重命名。
|
||||||
|
2.2 重命名方式
|
||||||
|
2.2.1 Prefix:添加前缀。
|
||||||
|
2.2.2 Suffix:添加后缀。
|
||||||
|
2.2.3 Template:模板方式。
|
||||||
|
2.2.4 ReplaceWithRegex:正则替换。taosx 1.1.0 新增。
|
||||||
|
重命名配置方式:
|
||||||
|
<表限定>:<重命名方式>:<重命名值>
|
||||||
|
使用示例:
|
||||||
|
1.为所有表添加前缀 <prefix>
|
||||||
|
--transform rename-table:prefix:<prefix>
|
||||||
|
2.为符合条件的表替换前缀:prefix1 替换为 prefix2,以下示例中的 <> 为正则表达式的不再是占位符。
|
||||||
|
-T rename-child-table:replace_with_regex:^prefix1(?<old>)::prefix2_$old
|
||||||
|
|
||||||
|
示例说明:^prefix1(?<old>) 为正则表达式,该表达式会匹配表名中包含以 prefix1 开始的表名并将后缀部分记录为 old,prefix2$old 则会使用 prefix2 与 old 进行替换。注意:两部分使用关键字符 :: 进行分隔,所以需要保证正则表达式中不能包含该字符。
|
||||||
|
若有更复杂的替换需求请参考:https://docs.rs/regex/latest/regex/#example-replacement-with-named-capture-groups 或咨询 taosx 开发人员。
|
||||||
|
```
|
||||||
|
|
||||||
|
3. jobs 指定任务并发数,仅支持 tmq 任务。暂无法通过 Explorer 进行设置。通过 --jobs `<number>` 或 -j `<number>` 进行设置。
|
||||||
|
4. -v 用于指定 taosx 的日志级别,-v 表示启用 info 级别日志,-vv 对应 debug,-vvv 对应 trace。
|
||||||
|
|
||||||
|
|
||||||
|
### 从 TDengine 到 TDengine 的数据同步
|
||||||
|
|
||||||
|
#### TDengine 3.0 -> TDengine 3.0
|
||||||
|
|
||||||
|
在两个相同版本 (都是 3.0.x.y)的 TDengine 集群之间将源集群中的存量及增量数据同步到目标集群中。
|
||||||
|
|
||||||
|
命令行模式下支持的参数如下:
|
||||||
|
|
||||||
|
| 参数名称 | 说明 | 默认值 |
|
||||||
|
|-----------|------------------------------------------------------------------|----------------------------|
|
||||||
|
| group.id | 订阅使用的分组ID | 若为空则使用 hash 生成一个 |
|
||||||
|
| client.id | 订阅使用的客户端ID | taosx |
|
||||||
|
| timeout | 监听数据的超时时间,当设置为 never 表示 taosx 不会停止持续监听。 | 500ms |
|
||||||
|
| offset | 从指定的 offset 开始订阅,格式为 `<vgroup_id>:<offset>`,若有多个 vgroup 则用半角逗号隔开 | 若为空则从 0 开始订阅 |
|
||||||
|
| token | 目标源参数。 认证使用参数。 | 无 |
|
||||||
|
|
||||||
|
示例:
|
||||||
|
```shell
|
||||||
|
taosx run \
|
||||||
|
-f 'tmq://root:taosdata@localhost:6030/db1?group.id=taosx1&client.id=taosx&timeout=never&offset=2:10' \
|
||||||
|
-t 'taos://root:taosdata@another.com:6030/db2'
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
#### TDengine 2.6 -> TDengine 3.0
|
||||||
|
|
||||||
|
将 2.6 版本 TDengine 集群中的数据迁移到 3.0 版本 TDengine 集群。
|
||||||
|
|
||||||
|
#### 命令行参数
|
||||||
|
|
||||||
|
| 参数名称 | 说明 | 默认值 |
|
||||||
|
|--------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------|
|
||||||
|
| libraryPath | 在 option 模式下指定 taos 库路径 | 无 |
|
||||||
|
| configDir | 指定 taos.cfg 配置文件路径 | 无 |
|
||||||
|
| mode | 数据源参数。 history 表示历史数据。 realtime 表示实时同步。 all 表示以上两种。 | history |
|
||||||
|
| restro | 数据源参数。 在同步实时数据前回溯指定时间长度的数据进行同步。 restro=10m 表示回溯最近 10 分钟的数据以后,启动实时同步。 | 无 |
|
||||||
|
| interval | 数据源参数。 轮询间隔 ,mode=realtime&interval=5s 指定轮询间隔为 5s | 无 |
|
||||||
|
| excursion | 数据源参数。 允许一段时间的乱序数据 | 500ms |
|
||||||
|
| stables | 数据源参数。 仅同步指定超级表的数据,多个超级表名用英文逗号 ,分隔 | 无 |
|
||||||
|
| tables | 数据源参数。 仅同步指定子表的数据,表名格式为 {stable}.{table} 或 {table},多个表名用英文逗号 , 分隔,支持 @filepath 的方式输入一个文件,每行视为一个表名,如 tables=@./tables.txt 表示从 ./tables.txt 中按行读取每个表名,空行将被忽略。 | 无 |
|
||||||
|
| select-from-stable | 数据源参数。 从超级表获取 select {columns} from stable where tbname in ({tbnames}) ,这种情况 tables 使用 {stable}.{table} 数据格式,如 meters.d0 表示 meters 超级表下面的 d0 子表。 | 默认使用 select \* from table 获取数据 |
|
||||||
|
| assert | 目标源参数。 taos:///db1?assert 将检测数据库是否存在,如不存在,将自动创建目标数据库。 | 默认不自动创建库。 |
|
||||||
|
| force-stmt | 目标源参数。 当 TDengine 版本大于 3.0 时,仍然使用 STMT 方式写入。 | 默认为 raw block 写入方式 |
|
||||||
|
| batch-size | 目标源参数。 设置 STMT 写入模式下的最大批次插入条数。 | |
|
||||||
|
| interval | 目标源参数。 每批次写入后的休眠时间。 | 无 |
|
||||||
|
| max-sql-length | 目标源参数。 用于建表的 SQL 最大长度,单位为 bytes。 | 默认 800_000 字节。 |
|
||||||
|
| failes-to | 目标源参数。 添加此参数,值为文件路径,将写入错误的表及其错误原因写入该文件,正常执行其他表的同步任务。 | 默认写入错误立即退出。 |
|
||||||
|
| timeout-per-table | 目标源参数。 为子表或普通表同步任务添加超时。 | 无 |
|
||||||
|
| update-tags | 目标源参数。 检查子表存在与否,不存在时正常建表,存在时检查标签值是否一致,不一致则更新。 | 无 |
|
||||||
|
|
||||||
|
#### 示例
|
||||||
|
|
||||||
|
1.使用原生连接同步数据
|
||||||
|
|
||||||
|
```shell
|
||||||
|
taosx run \
|
||||||
|
-f 'taos://td1:6030/db1?libraryPath=./libtaos.so.2.6.0.30&mode=all' \
|
||||||
|
-t 'taos://td2:6030/db2?libraryPath=./libtaos.so.3.0.1.8&assert \
|
||||||
|
-v
|
||||||
|
```
|
||||||
|
|
||||||
|
2.使用 WebSocket 同步数据超级表 stable1 和 stable2 的数据
|
||||||
|
|
||||||
|
```shell
|
||||||
|
taosx run \
|
||||||
|
-f 'taos+ws://<username>:<password>@td1:6041/db1?stables=stable1,stable2' \
|
||||||
|
-t 'taos+wss://td2:6041/db2?assert&token=<token> \
|
||||||
|
-v
|
||||||
|
```
|
||||||
|
|
||||||
|
### 从 TDengine 备份数据文件到本地
|
||||||
|
|
||||||
|
示例:
|
||||||
|
```shell
|
||||||
|
taosx run -f 'tmq://root:taosdata@td1:6030/db1' -t 'local:/path_directory/'
|
||||||
|
|
||||||
|
```
|
||||||
|
以上示例执行的结果及参数说明:
|
||||||
|
|
||||||
|
将集群 td1 中的数据库 db1 的所有数据,备份到 taosx 所在设备的 /path_directory 路径下。
|
||||||
|
|
||||||
|
数据源(-f 参数的 DSN)的 object 支持配置为 数据库级(dbname)、超级表级(dbname.stablename)、子表/普通表级(dbname.tablename),对应备份数据的级别数据库级、超级表级、子表/普通表级
|
||||||
|
|
||||||
|
|
||||||
|
### 从本地数据文件恢复到 TDengine
|
||||||
|
|
||||||
|
#### 示例
|
||||||
|
```shell
|
||||||
|
taosx run -f 'local:/path_directory/' -t 'taos://root:taosdata@td2:6030/db1?assert'
|
||||||
|
```
|
||||||
|
|
||||||
|
以上示例执行的结果:
|
||||||
|
|
||||||
|
将 taosx 所在设备 /path_directory 路径下已备份的数据文件,恢复到集群 td2 的数据库 db1 中,如果 db1 不存在,则自动建库。
|
||||||
|
|
||||||
|
目标源(-t 参数的 DSN)中的 object 支持配置为数据库(dbname)、超级表(dbname.stablename)、子表/普通表(dbname.tablename),对应备份数据的级别数据库级、超级表级、子表/普通表级,前提是备份的数据文件也是对应的数据库级、超级表级、子表/普通表级数据。
|
||||||
|
|
||||||
|
|
||||||
|
#### 常见错误排查
|
||||||
|
|
||||||
|
(1) 如果使用原生连接,任务启动失败并报以下错误:
|
||||||
|
|
||||||
|
```text
|
||||||
|
Error: tmq to td task exec error
|
||||||
|
|
||||||
|
Caused by:
|
||||||
|
[0x000B] Unable to establish connection
|
||||||
|
```
|
||||||
|
产生原因是与数据源的端口链接异常,需检查数据源 FQDN 是否联通及端口 6030 是否可正常访问。
|
||||||
|
|
||||||
|
(2) 如果使用 WebSocket 连接,任务启动失败并报以下错误:
|
||||||
|
|
||||||
|
```text
|
||||||
|
Error: tmq to td task exec error
|
||||||
|
|
||||||
|
Caused by:
|
||||||
|
0: WebSocket internal error: IO error: failed to lookup address information: Temporary failure in name resolution
|
||||||
|
1: IO error: failed to lookup address information: Temporary failure in name resolution
|
||||||
|
2: failed to lookup address information: Temporary failure in name resolution
|
||||||
|
```
|
||||||
|
|
||||||
|
使用 WebSocket 连接时可能遇到多种错误类型,错误信息可以在 ”Caused by“ 后查看,以下是几种可能的错误:
|
||||||
|
|
||||||
|
- "Temporary failure in name resolution": DNS 解析错误,检查 IP 或 FQDN 是否能够正常访问。
|
||||||
|
- "IO error: Connection refused (os error 111)": 端口访问失败,检查端口是否配置正确或是否已开启和可访问。
|
||||||
|
- "IO error: received corrupt message": 消息解析失败,可能是使用了 wss 方式启用了 SSL,但源端口不支持。
|
||||||
|
- "HTTP error: *": 可能连接到错误的 taosAdapter 端口或 LSB/Nginx/Proxy 配置错误。
|
||||||
|
- "WebSocket protocol error: Handshake not finished": WebSocket 连接错误,通常是因为配置的端口不正确。
|
||||||
|
|
||||||
|
(3) 如果任务启动失败并报以下错误:
|
||||||
|
|
||||||
|
```text
|
||||||
|
Error: tmq to td task exec error
|
||||||
|
|
||||||
|
Caused by:
|
||||||
|
[0x038C] WAL retention period is zero
|
||||||
|
```
|
||||||
|
|
||||||
|
是由于源端数据库 WAL 配置错误,无法订阅。
|
||||||
|
|
||||||
|
解决方式:
|
||||||
|
修改数据 WAL 配置:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
alter database test wal_retention_period 3600;
|
||||||
|
```
|
||||||
|
|
||||||
|
### 从 OPC-UA 同步数据到 TDengine
|
||||||
|
|
||||||
|
#### 配置参数
|
||||||
|
|
||||||
|
| 参数名称 | 类型 | 描述 |
|
||||||
|
|-----------------|--------|-----------------------------------------------------------------------------|
|
||||||
|
| interval | int | 采集间隔(单位:秒),默认为1秒 |
|
||||||
|
| concurrent | int | 采集器并发数,默认为1 |
|
||||||
|
| batch_size | int | 采集器上报的批次点位数,默认为100 |
|
||||||
|
| batch_timeout | int | 采集器上报的超时时间(单位:秒),默认为20秒 |
|
||||||
|
| connect_timeout | int | 连接的超时时间(单位:秒),默认为10秒 |
|
||||||
|
| request_timeout | int | 请求的超时时间(单位:秒),默认为10秒 |
|
||||||
|
| security_policy | string | OPC-UA连接安全策略(可配置为None/Basic128Rsa15/Basic256/Basic256Sha256) |
|
||||||
|
| security_mode | string | OPC-UA连接模式(可配置为None/Sign/SignAndEncrypt) |
|
||||||
|
| certificate | string | cert.pem的路径。当安全模式或策略不是”无”时生效 |
|
||||||
|
| private_key | string | key.pem的路径。 当安全模式或策略不是”无”时生效 |
|
||||||
|
| csv_config_file | string | 包含 OPC UA 的点位配置和表配置。与配置 csv_config_file 配置互斥,csv_config_file 优先生效|
|
||||||
|
| ua.nodes | string | OPC-UA 测点的 NodeID。和 opc_table_config 配置结合使用,两者需要同时配置。与配置 csv_config_file 配置互斥,csv_config_file 优先生效。配置格式为 <nodeid\>::<code\>,code 用于建子表。|
|
||||||
|
| opc_table_config | string | OPCUA 单列模式表配置。需要与 ua.nodes 配合使用。|
|
||||||
|
| debug | bool | 启用 OPC 连接器的 debug 日志。默认为 false。|
|
||||||
|
| enable | bool | 原始数据存储。默认为 false|
|
||||||
|
| path | string | 原始数据存储路径。enable 为 true 时必须配置。|
|
||||||
|
| keep | int | 原始数据保存天数。enable 为 true 时必须配置。|
|
||||||
|
|
||||||
|
补充:
|
||||||
|
1. opc_table_config 说明:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"stable_prefix": "meters", // 超级表前缀
|
||||||
|
"column_configs":
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"column_name": "received_time", // 存储接收时间
|
||||||
|
"column_type": "timestamp",
|
||||||
|
"column_alias": "ts", // 接收时间建表列用列名为 ts
|
||||||
|
"is_primary_key": true // 接收时间时间戳作为主键
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"column_name": "original_time",
|
||||||
|
"column_type": "timestamp",
|
||||||
|
"column_alias": "ts_2",
|
||||||
|
"is_primary_key": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"column_name": "value", // 数据列
|
||||||
|
"column_alias": "valueaa", // 数据列别名
|
||||||
|
"is_primary_key": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"column_name": "quality", // 质量位列
|
||||||
|
"column_type": "int",
|
||||||
|
"column_alias": "quality11", // 质量位列别名
|
||||||
|
"is_primary_key": false
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### 示例
|
||||||
|
|
||||||
|
1. 使用 ua.nodes 和 opc_table_config 的配置示例:
|
||||||
|
采集 nodeid 为 ns=2;i=2 和 ns=2;i=3 的点位,将其写入到集群 tdengine 的 opc 库中超级表前缀为 meters,如果 ns=2;i=2 的点位类型为 float 则会创建 meters_float 的超级表,超级表使用 opc 接收的数据作为时间戳索引列,并且保留原始时间戳列,原始时间戳列名为 ts_2,数据列存储为 valueaa,同时存储质量数据到 quality11 列。
|
||||||
|
|
||||||
|
```shell
|
||||||
|
taosx run \
|
||||||
|
-f "opcua://uauser:uapass@localhost:4840?ua.nodes=ns=2;i=2::DSF1312,ns=2;i=3::DSF1313&opc_table_config={\"stable_prefix\": \"meters\", \"column_configs\": [{\"column_name\": \"received_time\", \"column_type\": \"timestamp\", \"column_alias\": \"ts\", \"is_primary_key\": true }, {\"column_name\": \"original_time\", \"column_type\": \"timestamp\", \"column_alias\": \"ts_2\", \"is_primary_key\": false }, {\"column_name\": \"value\", \"column_alias\": \"valueaa\", \"is_primary_key\": false }, {\"column_name\": \"quality\", \"column_type\": \"int\", \"column_alias\": \"quality11\", \"is_primary_key\": false } ] }" \
|
||||||
|
-t "taos://tdengine:6030/opc"
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
2. 使用 CSV 配置文件
|
||||||
|
|
||||||
|
```shell
|
||||||
|
taosx run -f "opcua://<server-info>?csv_config_file=@<file_path>" -t "taos+ws://tdengine:6041/opc"
|
||||||
|
```
|
||||||
|
|
||||||
|
#### CSV 配置文件模板
|
||||||
|
|
||||||
|
|
||||||
|
### 从 OPC-DA 同步数据到 TDengine (Windows)
|
||||||
|
|
||||||
|
#### 配置参数
|
||||||
|
|
||||||
|
| 参数名称 | 类型 | 描述 |
|
||||||
|
|-----------------|--------|-----------------------------------------------------------------------------|
|
||||||
|
| interval | int | 采集间隔(单位:秒),默认为1秒 |
|
||||||
|
| concurrent | int | 采集器并发数,默认为1 |
|
||||||
|
| batch_size | int | 采集器上报的批次点位数,默认为100 |
|
||||||
|
| batch_timeout | int | 采集器上报的超时时间(单位:秒),默认为20秒 |
|
||||||
|
| connect_timeout | int | 连接的超时时间(单位:秒),默认为10秒 |
|
||||||
|
| request_timeout | int | 请求的超时时间(单位:秒),默认为10秒 |
|
||||||
|
| csv_config_file | string | 包含 OPC UA 的点位配置和表配置。与 ua.nodes 两者之间需要配置一个。CSV 的配置模版参考:OPC 需求汇总及完成现状 |
|
||||||
|
| da.tags | string | OPC-UA 测点的 NodeID。和 opc_table_config 配置结合使用,两者需要同时配置。与配置 csv_config_file 配置互斥,csv_config_file 优先生效。|
|
||||||
|
| opc_table_config | string | OPCUA 单列模式表配置。需要与 da.tags 配合使用|
|
||||||
|
| debug | bool | 启用 OPC 连接器的 debug 日志。默认为 false。|
|
||||||
|
| enable | bool | 原始数据存储。默认为 false|
|
||||||
|
| path | string | 原始数据存储路径。enable 为 true 时必须配置。|
|
||||||
|
| keep | int | 原始数据保存天数。enable 为 true 时必须配置。|
|
||||||
|
|
||||||
|
#### 应用示例
|
||||||
|
|
||||||
|
```shell
|
||||||
|
taosx run \
|
||||||
|
-f "opc+da://Matrikon.OPC.Simulation.1?nodes=localhost&da.tags=Random.Real8::tb3::c1::int"
|
||||||
|
-t "taos://tdengine:6030/opc"
|
||||||
|
```
|
||||||
|
|
||||||
|
以上示例的执行结果:
|
||||||
|
|
||||||
|
采集 Matrikon.OPC.Simulation.1 服务器上 OPC DA 中 da.tags 为 Random.Real8的数据,数据类型为int,对应在 TDengine 中以表名为 tb3 ,列名为c1,列类型为 int 型 schema 来创建表(如果对应表已存在,则直接采集数据并写入)。
|
||||||
|
|
||||||
|
#### 常见错误排查
|
||||||
|
|
||||||
|
(1) 如果使用原生连接,任务启动失败并打印如下错误:
|
||||||
|
```text
|
||||||
|
Error: tmq to td task exec error
|
||||||
|
|
||||||
|
Caused by:
|
||||||
|
0: Error occurred while creating a new object: [0x000B] Unable to establish connection
|
||||||
|
```
|
||||||
|
解决方式:
|
||||||
|
|
||||||
|
检查目标端 TDengine 的 FQDN 是否联通及端口 6030 是否可正常访问。
|
||||||
|
|
||||||
|
(2) 如果使用 WebSocket 连接任务启动失败并打印如下错误::
|
||||||
|
|
||||||
|
```text
|
||||||
|
Error: tmq to td task exec error
|
||||||
|
|
||||||
|
Caused by:
|
||||||
|
0: WebSocket internal error: IO error: failed to lookup address information: Temporary failure in name resolution
|
||||||
|
1: IO error: failed to lookup address information: Temporary failure in name resolution
|
||||||
|
2: failed to lookup address information: Temporary failure in name resolution
|
||||||
|
```
|
||||||
|
|
||||||
|
使用 WebSocket 连接时可能遇到多种错误类型,错误信息可以在 ”Caused by“ 后查看,以下是几种可能的错误:
|
||||||
|
|
||||||
|
- "Temporary failure in name resolution": DNS 解析错误,检查目标端 TDengine的 IP 或 FQDN 是否能够正常访问。
|
||||||
|
- "IO error: Connection refused (os error 111)": 端口访问失败,检查目标端口是否配置正确或是否已开启和可访问(通常为6041端口)。
|
||||||
|
- "HTTP error: *": 可能连接到错误的 taosAdapter 端口或 LSB/Nginx/Proxy 配置错误。
|
||||||
|
- "WebSocket protocol error: Handshake not finished": WebSocket 连接错误,通常是因为配置的端口不正确。
|
||||||
|
|
||||||
|
### 从 PI 同步数据到 TDengine (Windows)
|
||||||
|
|
||||||
|
#### PI DSN 配置
|
||||||
|
|
||||||
|
PI DSN 的完整配置如下:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
pi://[<username>:<password>@]PIServerName/AFDatabaseName?[TemplateForPIPoint][&TemplateForAFElement][&PointList][&<PISystemName=pisys>][&<MaxWaitLen>][&UpdateInterval]
|
||||||
|
```
|
||||||
|
|
||||||
|
在 taosX CLI 运行时支持的参数如下,其中 TemplateForPIPoint、TemplateForAFElement、PointList 三个参数至少配置一项:
|
||||||
|
- PISystemName:选填,连接配置 PI 系统服务名,默认值与 PIServerName 一致
|
||||||
|
- MaxWaitLen:选填,数据最大缓冲条数,默认值为 1000 ,有效取值范围为 [1,10000]
|
||||||
|
- UpdateInterval:选填,PI System 取数据频率,默认值为 10000(毫秒:ms),有效取值范围为 [10,600000]
|
||||||
|
- TemplateForPIPoint:选填,使用 PI Point 模式将模板按照 element 的每个 Arrtribution 作为子表导入到 TDengine
|
||||||
|
- TemplateForAFElement:选填,使用 AF Point 模式将模板按照 element 的 Attribution 集合作为一个子表导入到 TDengine
|
||||||
|
- PointList:选填,使用 PointList 模式将指定csv文件中描述的点位信息在 PI 数据库中的数据导入到 TDengine
|
||||||
|
|
||||||
|
|
||||||
|
#### 应用示例
|
||||||
|
|
||||||
|
将位于服务器 WIN-2OA23UM12TN 中的 PI 数据库 Met1,模板 template1、template2配置为 TemplateForPIPoint模式,模板 template3、template4 配置为 TemplateForAFElement 模式,服务器 /home/ 路径下的点位文件 points.csv 配置为 PointList 模式,连接配置 PI 系统服务名为 PI,数据最大缓冲条数为1000,PI System 取数据频率为10000ms,将该库中的数据同步到 服务器 tdengine 的 pi 库中。完整的示例如下:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
taosx run \
|
||||||
|
-f "pi://WIN-2OA23UM12TN/Met1?TemplateForPIPoint=template1,template2&TemplateForAFElement=template3,template4" \
|
||||||
|
-t "taos://tdengine:6030/pi"
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
#### 常见错误排查
|
||||||
|
|
||||||
|
(1) 如果使用原生连接,任务启动失败并打印如下错误:
|
||||||
|
```text
|
||||||
|
Error: tmq to td task exec error
|
||||||
|
|
||||||
|
Caused by:
|
||||||
|
0: Error occurred while creating a new object: [0x000B] Unable to establish connection
|
||||||
|
```
|
||||||
|
解决方式:
|
||||||
|
|
||||||
|
检查目标端 TDengine 的 FQDN 是否联通及端口 6030 是否可正常访问。
|
||||||
|
|
||||||
|
(2) 如果使用 WebSocket 连接任务启动失败并打印如下错误::
|
||||||
|
|
||||||
|
```text
|
||||||
|
Error: tmq to td task exec error
|
||||||
|
|
||||||
|
Caused by:
|
||||||
|
0: WebSocket internal error: IO error: failed to lookup address information: Temporary failure in name resolution
|
||||||
|
1: IO error: failed to lookup address information: Temporary failure in name resolution
|
||||||
|
2: failed to lookup address information: Temporary failure in name resolution
|
||||||
|
```
|
||||||
|
|
||||||
|
使用 WebSocket 连接时可能遇到多种错误类型,错误信息可以在 ”Caused by“ 后查看,以下是几种可能的错误:
|
||||||
|
|
||||||
|
- "Temporary failure in name resolution": DNS 解析错误,检查目标端 TDengine的 IP 或 FQDN 是否能够正常访问。
|
||||||
|
- "IO error: Connection refused (os error 111)": 端口访问失败,检查目标端口是否配置正确或是否已开启和可访问(通常为6041端口)。
|
||||||
|
- "HTTP error: *": 可能连接到错误的 taosAdapter 端口或 LSB/Nginx/Proxy 配置错误。
|
||||||
|
- "WebSocket protocol error: Handshake not finished": WebSocket 连接错误,通常是因为配置的端口不正确。
|
||||||
|
|
||||||
|
|
||||||
|
### 从 InfluxDB 同步数据到 TDengine
|
||||||
|
|
||||||
|
#### 命令行参数
|
||||||
|
|
||||||
|
将数据从 InfluxDB 同步至 TDengine 的命令,如下所示:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
taosx run --from "<InfluxDB-DSN>" --to "<TDengine-DSN>"
|
||||||
|
```
|
||||||
|
|
||||||
|
其中,InfluxDB DSN 符合 DSN 的通用规则,这里仅对其特有的参数进行说明:
|
||||||
|
- version: 必填,InfluxDB 的版本,主要用于区分 1.x 与 2.x 两个版本,二者使用不同的认证参数;
|
||||||
|
- version = 1.x
|
||||||
|
- username: 必填,InfluxDB 用户,该用户至少在该组织中拥有读取权限;
|
||||||
|
- password: 必填,InfluxDB 用户的登陆密码;
|
||||||
|
- version = 2.x
|
||||||
|
- orgId: 必填,InfluxDB 中的 Orgnization ID;
|
||||||
|
- token: 必填,InfluxDB 中生成的 API token, 这个 token 至少要拥有以上 Bucket 的 Read 权限;
|
||||||
|
- bucket: 必填,InfluxDB 中的 Bucket 名称,一次只能同步一个 Bucket;
|
||||||
|
- measurements: 非必填,可以指定需要同步的多个 Measurements(英文逗号分割),未指定则同步全部;
|
||||||
|
- beginTime: 必填,格式为:YYYY-MM-DD'T'HH:MM:SS'Z', 时区采用 UTC 时区,例如:2023-06-01T00:00:00+0800, 即北京时间2023-06-01 00:00:00(东八区时间);
|
||||||
|
- endTime: 非必填,可以不指定该字段或值为空,格式与beginTime相同;如果未指定,提交任务后,将持续进行数据同步;
|
||||||
|
- readWindow: 非必填,可以不指定该字段或值为空,可选项为D、H、M(天、时、分);如果未指定,则默认按 M 拆分读取窗口。
|
||||||
|
|
||||||
|
#### 示例
|
||||||
|
|
||||||
|
将位于 192.168.1.10 的 InfluxDB 中, Bucket 名称为 test_bucket, 从UTC时间2023年06月01日00时00分00秒开始的数据,通过运行在 192.168.1.20 上的 taoskeeper, 同步至 TDengine 的 test_db 数据库中,完整的命令如下所示:
|
||||||
|
```bash
|
||||||
|
# version = 1.x
|
||||||
|
taosx run \
|
||||||
|
--from "influxdb+http://192.168.1.10:8086/?version=1.7&username=test&password=123456&bucket=test_bucket&measurements=&beginTime=2023-06-01T00:00:00+0800&readWindow=M" \
|
||||||
|
--to "taos+http://192.168.1.20:6041/test_db" \
|
||||||
|
-vv
|
||||||
|
|
||||||
|
# version = 2.x
|
||||||
|
taosx run \
|
||||||
|
--from "influxdb+http://192.168.1.10:8086/?version=2.7&orgId=3233855dc7e37d8d&token=OZ2sB6Ie6qcKcYAmcHnL-i3STfLVg_IRPQjPIzjsAQ4aUxCWzYhDesNape1tp8IsX9AH0ld41C-clTgo08CGYA==&bucket=test_bucket&measurements=&beginTime=2023-06-01T00:00:00+0800&readWindow=M" \
|
||||||
|
--to "taos+http://192.168.1.20:6041/test_db" \
|
||||||
|
-vv
|
||||||
|
```
|
||||||
|
|
||||||
|
在这个命令中,未指定endTime, 所以任务会长期运行,持续同步最新的数据。
|
||||||
|
|
||||||
|
|
||||||
|
### 从 OpenTSDB 同步数据到 TDengine
|
||||||
|
|
||||||
|
#### 命令行参数
|
||||||
|
|
||||||
|
将数据从 OpenTSDB 同步至 TDengine 的命令,如下所示:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
taosx run --from "<OpenTSDB-DSN>" --to "<TDengine-DSN>"
|
||||||
|
```
|
||||||
|
|
||||||
|
其中,OpenTSDB DSN 符合 DSN 的通用规则,这里仅对其特有的参数进行说明:
|
||||||
|
- metrics: 非必填,可以指定需要同步的多个 Metrics(英文逗号分割),未指定则同步全部;
|
||||||
|
- beginTime: 必填,格式为:YYYY-MM-DD'T'HH:MM:SS'Z', 时区采用 UTC 时区,例如:2023-06-01T00:00:00+0800, 即北京时间2023-06-01 00:00:00(东八区时间);
|
||||||
|
- endTime: 非必填,可以不指定该字段或值为空,格式与beginTime相同;如果未指定,提交任务后,将持续进行数据同步;
|
||||||
|
- readWindow: 非必填,可以不指定该字段或值为空,可选项为D、H、M(天、时、分);如果未指定,则默认按分钟拆分读取窗口。
|
||||||
|
|
||||||
|
#### 示例
|
||||||
|
|
||||||
|
将位于 192.168.1.10 的 OpenTSDB 中, Metric 名称为 test_metric1 与 test_metric2 的两个数据源, 从UTC时间2023年06月01日00时00分00秒开始的数据,通过运行在 192.168.1.20 上的 taoskeeper, 同步至 TDengine 的 test_db 数据库中,完整的命令如下所示:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
taosx run \
|
||||||
|
--from "opentsdb+http://192.168.1.10:4242/?metrics=test_metric1,test_metric2&beginTime=2023-06-01T00:00:00+0800&readWindow=M" \
|
||||||
|
--to "taos+http://192.168.1.20:6041/test_db" \
|
||||||
|
-vv
|
||||||
|
```
|
||||||
|
|
||||||
|
在这个命令中,未指定endTime, 所以任务会长期运行,持续同步最新的数据。
|
||||||
|
|
||||||
|
|
||||||
|
### 从 MQTT 同步数据到 TDengine
|
||||||
|
|
||||||
|
目前,MQTT 连接器仅支持从 MQTT 服务端消费 JSON 格式的消息,并将其同步至 TDengine. 命令如下所示:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
taosx run --from "<MQTT-DSN>" --to "<TDengine-DSN>" --parser "@<parser-config-file-path>"
|
||||||
|
```
|
||||||
|
|
||||||
|
其中:
|
||||||
|
- `--from` 用于指定 MQTT 数据源的 DSN
|
||||||
|
- `--to` 用于指定 TDengine 的 DSN
|
||||||
|
- `--parser` 用于指定一个 JSON 格式的配置文件,该文件决定了如何解析 JSON 格式的 MQTT 消息,以及写入 TDengine 时的超级表名、子表名、字段名称和类型,以及标签名称和类型等。
|
||||||
|
|
||||||
|
#### MQTT DSN 配置
|
||||||
|
|
||||||
|
MQTT DSN 符合 DSN 的通用规则,这里仅对其特有的参数进行说明:
|
||||||
|
- topics: 必填,用于配置监听的 MQTT 主题名称和连接器支持的最大 QoS, 采用 `<topic>::<max-Qos>` 的形式;支持配置多个主题,使用逗号分隔;配置主题时,还可以使用 MQTT 协议的支持的通配符#和+;
|
||||||
|
- version: 非必填,用于配置 MQTT 协议的版本,支持的版本包括:3.1/3.1.1/5.0, 默认值为3.1;
|
||||||
|
- clean_session: 非必填,用于配置连接器作为 MQTT 客户端连接至 MQTT 服务端时,服务端是否保存该会话信息,其默认值为 true, 即不保存会话信息;
|
||||||
|
- client_id: 必填,用于配置连接器作为 MQTT 客户端连接至 MQTT 服务端时的客户端 id;
|
||||||
|
- keep_alive: 非必填,用于配置连接器作为 MQTT 客户端,向 MQTT 服务端发出 PINGREG 消息后的等待时间,如果连接器在该时间内,未收到来自 MQTT 服务端的 PINGREQ, 连接器则主动断开连接;该配置的单位为秒,默认值为 60;
|
||||||
|
- ca: 非必填,用于指定连接器与 MQTT 服务端建立 SSL/TLS 连接时,使用的 CA 证书,其值为在证书文件的绝对路径前添加@, 例如:@/home/admin/certs/ca.crt;
|
||||||
|
- cert: 非必填,用于指定连接器与 MQTT 服务端建立 SSL/TLS 连接时,使用的客户端证书,其值为在证书文件的绝对路径前添加@, 例如:@/home/admin/certs/client.crt;
|
||||||
|
- cert_key: 非必填,用于指定连接器与 MQTT 服务端建立 SSL/TLS 连接时,使用的客户端私钥,其值为在私钥文件的绝对路径前添加@, 例如:@/home/admin/certs/client.key;
|
||||||
|
- log_level: 非必填,用于配置连接器的日志级别,连接器支持 error/warn/info/debug/trace 5种日志级别,默认值为 info.
|
||||||
|
|
||||||
|
一个完整的 MQTT DSN 示例如下:
|
||||||
|
```bash
|
||||||
|
mqtt://<username>:<password>@<mqtt-broker-ip>:8883?topics=testtopic/1::2&version=3.1&clean_session=true&log_level=info&client_id=taosdata_1234&keep_alive=60&ca=@/home/admin/certs/ca.crt&cert=@/home/admin/certs/client.crt&cert_key=@/home/admin/certs/client.key
|
||||||
|
```
|
||||||
|
|
||||||
|
#### MQTT 连接器的解释器配置
|
||||||
|
|
||||||
|
连接器的解释器配置文件,即`--parser`配置项的参数,它的值为一个 JSON 文件,其配置可分为`parse`和`model`两部分,模板如下所示:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"parse": {
|
||||||
|
"payload": {
|
||||||
|
"json": [
|
||||||
|
{
|
||||||
|
"name": "ts",
|
||||||
|
"alias": "ts",
|
||||||
|
"cast": "TIMESTAMP"
|
||||||
|
},
|
||||||
|
...
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"model": {
|
||||||
|
"using": "<stable-name>",
|
||||||
|
"name": "<subtable-prefix>{alias}",
|
||||||
|
"columns": [ ... ],
|
||||||
|
"tags": [ ... ]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
各字段的说明如下:
|
||||||
|
- parse 部分目前仅支持 json 一种 payload, json 字段的值是一个由 JSON Object 构成的 JSON Array:
|
||||||
|
- 每个 JSON Ojbect 包括 name, alias, cast 三个字段;
|
||||||
|
- name 字段用于指定如何从 MQTT 消息中提取字段,如果 MQTT 消息是一个简单的 JSON Object, 这里可以直接设置其字段名;如果 MQTT 消息是一个复杂的 JSON Object, 这里可以使用 JSON Path 提取字段,例如:`$.data.city`;
|
||||||
|
- alias 字段用于命名 MQTT 消息中的字段同步至 TDengine 后使用的名称;
|
||||||
|
- cast 字段用于指定 MQTT 消息中的字段同步至 TDengine 后使用的类型。
|
||||||
|
- model 部分用于设置 TDengine 超级表、子表、列和标签等信息:
|
||||||
|
- using 字段用于指定超级表名称;
|
||||||
|
- name 字段用于指定子表名称,它的值可以分为前缀和变量两部分,变量为 parse 部分设置的 alias 的值,需要使用{}, 例如:d{id};
|
||||||
|
- columns 字段用于设置 MQTT 消息中的哪些字段作为 TDengine 超级表中的列,取值为 parse 部分设置的 alias 的值;需要注意的是,这里的顺序会决定 TDengine 超级表中列的顺序,因此第一列必须为 TIMESTAMP 类型;
|
||||||
|
- tags 字段用于设置 MQTT 消息中的哪些字段作为 TDengine 超级表中的标签,取值为 parse 部分设置的 alias 的值。
|
||||||
|
|
||||||
|
#### 举例说明
|
||||||
|
|
||||||
|
在 192.168.1.10 的 1883 端口运行着一个 MQTT broker, 用户名、口令分别为admin, 123456; 现欲将其中的消息,通过运行在 192.168.1.20 的 taosadapter 同步至 TDengine 的 test 数据库中。MQTT 消息格式为:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"id": 1,
|
||||||
|
"current": 10.77,
|
||||||
|
"voltage": 222,
|
||||||
|
"phase": 0.77,
|
||||||
|
"groupid": 7,
|
||||||
|
"location": "California.SanDiego"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
MQTT 消息同步至 TDengine 时, 如果采用 meters 作为超级表名,前缀“d”拼接id字段的值作为子表名,ts, id, current, voltage, phase作为超级表的列,groupid, location作为超级表的标签,其解释器的配置如下:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"parse": {
|
||||||
|
"payload": {
|
||||||
|
"json": [
|
||||||
|
{
|
||||||
|
"name": "ts",
|
||||||
|
"alias": "ts",
|
||||||
|
"cast": "TIMESTAMP"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "id",
|
||||||
|
"alias": "id",
|
||||||
|
"cast": "INT"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "voltage",
|
||||||
|
"alias": "voltage",
|
||||||
|
"cast": "INT"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "phase",
|
||||||
|
"alias": "phase",
|
||||||
|
"cast": "FLOAT"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "current",
|
||||||
|
"alias": "current",
|
||||||
|
"cast": "FLOAT"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "groupid",
|
||||||
|
"alias": "groupid",
|
||||||
|
"cast": "INT"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "location",
|
||||||
|
"alias": "location",
|
||||||
|
"cast": "VARCHAR(20)"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"model": {
|
||||||
|
"name": "d{id}",
|
||||||
|
"using": "meters",
|
||||||
|
"columns": [
|
||||||
|
"ts",
|
||||||
|
"id",
|
||||||
|
"current",
|
||||||
|
"voltage",
|
||||||
|
"phase"
|
||||||
|
],
|
||||||
|
"tags": [
|
||||||
|
"groupid",
|
||||||
|
"location"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
如果以上parser配置位于`/home/admin/parser.json`中,那么完整的命令如下所示:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
taosx run \
|
||||||
|
-f "mqtt://admin:123456@192.168.1.10:1883?topics=testtopic/1::2&version=3.1&clean_session=true&log_level=info&client_id=1234&keep_alive=60" \
|
||||||
|
-t "taos+ws://192.168.1.20:6041/test"
|
||||||
|
--parser "@/home/admin/parser.json"
|
||||||
|
--verbose
|
||||||
|
```
|
||||||
|
|
||||||
|
### 从 Kafka 同步数据到 TDengine
|
||||||
|
|
||||||
|
#### 命令行参数
|
||||||
|
|
||||||
|
taosx 支持从 Kafka 消费数据,写入 TDengine。命令如下所示:
|
||||||
|
```sehll
|
||||||
|
taosx run -f "<Kafka-DSN>" -t "<TDengine-DSN>"
|
||||||
|
```
|
||||||
|
或
|
||||||
|
```shell
|
||||||
|
taosx run -f "<Kafka-DSN>" -t "<TDengine-DSN>" --parser "@<parser-config-file-path>"
|
||||||
|
```
|
||||||
|
其中:
|
||||||
|
- -f或--from: Kafka 的 DSN
|
||||||
|
- -t或--to :TDengine 的 DSN
|
||||||
|
- --parser :一个 JSON 格式的配置文件,或JSON格式的字符串。
|
||||||
|
|
||||||
|
#### Kafka DSN 配置的配置
|
||||||
|
|
||||||
|
| 参数 | 说明 | 必填? | 缺省值 | 适用于 | 示例 |
|
||||||
|
|-----|---------------|----------|---------|---------|----------|
|
||||||
|
| group| 消费者的group。允许组为空字符串,在这种情况下,生成的消费者将是无组的 | 否 | "" | 源端 | |
|
||||||
|
| topics | 指定要使用的主题。指定主题的所有可用分区都将被使用,除非在指定 topic_partitions 时被覆盖。| 该参数或topic_partitions必须至少指定一个,以便将主题分配给消费者。| None | 源端 | topics=tp1,tp2 |
|
||||||
|
| topic_partitions | 显式指定要使用的主题分区。只使用已标识主题的指定分区。 | 该参数或topics必须至少指定一个,以便将主题分配给消费者。 | None | 源端 | topic_partitions=tp1:0..2,tp2:1 |
|
||||||
|
| fallback_offset | topic偏移量时可能的值:- Earliest:接收最早的可用偏移量; - Latest:接收最近的偏移量; - ByTime(i64):用于请求在某一特定时间(ms)之前的所有消息;Unix时间戳(毫秒) | 否 | Earliest | 源端 | fallback_offset=Earliest |
|
||||||
|
| offset_storage | 定义在获取或提交组偏移量时,要使用的可用存储:- Zookeeper:基于Zookeeper的存储(从kafka 0.8.1开始可用);- Kafka:基于Kafka的存储(从Kafka 0.8.2开始可用)。这是组存储其偏移量的首选方法。 | 否 | Kafka | 源端 | offset_storage=Kafka |
|
||||||
|
| timeout | 从kafka订阅数据时,如果超时后没有获取到有效数据,退出 | 否 | 500 | 源端 | timeout=never |
|
||||||
|
| use_ssl | 是否使用SSL认证 | 否 | | 源端 | |
|
||||||
|
| cert | SSL证书的文件路径 | 否 | | | 源端 | |
|
||||||
|
| cert_key | SSL证书key的文件路径 | 否 | | 源端 ||
|
||||||
|
|
||||||
|
|
||||||
|
#### 示例一
|
||||||
|
|
||||||
|
从192.168.1.92服务器的Kafka实例中消费数据,同步到192.168.1.92上的TDengine,不使用parser。
|
||||||
|
|
||||||
|
1. kafka
|
||||||
|
|
||||||
|
```shell
|
||||||
|
#!/bin/bash
|
||||||
|
KAFKA_HOME=/root/zyyang/kafka_2.13-3.1.0
|
||||||
|
$KAFKA_HOME/bin/kafka-topics.sh --bootstrap-server 127.0.0.1:9092 --topic tp1 --delete
|
||||||
|
$KAFKA_HOME/bin/kafka-topics.sh --bootstrap-server 127.0.0.1:9092 --topic tp2 --delete
|
||||||
|
$KAFKA_HOME/bin/kafka-topics.sh --bootstrap-server 127.0.0.1:9092 --topic tp1 --partitions 5 --replication-factor 1 --create
|
||||||
|
$KAFKA_HOME/bin/kafka-topics.sh --bootstrap-server 127.0.0.1:9092 --topic tp2 --partitions 1 --replication-factor 1 --create
|
||||||
|
$KAFKA_HOME/bin/kafka-console-producer.sh --bootstrap-server 127.0.0.1:9092 --topic tp1 << EOF
|
||||||
|
{"id": 1, "message": "hello"}
|
||||||
|
{"id": 2, "message": "hello"}
|
||||||
|
{"id": 3, "message": "hello"}
|
||||||
|
{"id": 4, "message": "hello"}
|
||||||
|
{"id": 5, "message": "hello"}
|
||||||
|
EOF
|
||||||
|
$KAFKA_HOME/bin/kafka-console-producer.sh --bootstrap-server 127.0.0.1:9092 --topic tp2 << EOF
|
||||||
|
{"id": 1, "message": "aaa"}
|
||||||
|
{"id": 2, "message": "aaa"}
|
||||||
|
{"id": 3, "message": "aaa"}
|
||||||
|
{"id": 4, "message": "aaa"}
|
||||||
|
{"id": 5, "message": "aaa"}
|
||||||
|
EOF
|
||||||
|
$KAFKA_HOME/bin/kafka-topics.sh --bootstrap-server 127.0.0.1:9092 --topic tp1 --describe
|
||||||
|
$KAFKA_HOME/bin/kafka-topics.sh --bootstrap-server 127.0.0.1:9092 --topic tp2 --describe
|
||||||
|
```
|
||||||
|
|
||||||
|
2. TDengine
|
||||||
|
|
||||||
|
```shell
|
||||||
|
drop database if exists kafka_to_taos;
|
||||||
|
create database if not exists kafka_to_taos precision 'ms';
|
||||||
|
use kafka_to_taos;
|
||||||
|
```
|
||||||
|
|
||||||
|
3. taosx
|
||||||
|
|
||||||
|
```shell
|
||||||
|
taosx run -f "kafka://192.168.1.92:9092/?topics=tp1,tp2&timeout=5000" -t "taos://192.168.1.92:6030/kafka_to_taos" --parser "{\"parse\":{\"ts\":{\"as\":\"timestamp(ms)\"},\"topic\":{\"as\":\"varchar\",\"alias\":\"t\"},\"partition\":{\"as\":\"int\",\"alias\":\"p\"},\"offset\":{\"as\":\"bigint\",\"alias\":\"o\"},\"key\":{\"as\":\"binary\",\"alias\":\"k\"},\"value\":{\"as\":\"binary\",\"alias\":\"v\"}},\"model\":[{\"name\":\"t_{t}\",\"using\":\"kafka_data\",\"tags\":[\"t\",\"p\"],\"columns\":[\"ts\",\"o\",\"k\",\"v\"]}]}"
|
||||||
|
```
|
||||||
|
|
||||||
|
#### 示例2
|
||||||
|
|
||||||
|
从192.168.1.92服务器的Kafka实例中消费数据,同步到192.168.1.92上的TDengine,使用parser解析value中的JSON数据。
|
||||||
|
|
||||||
|
1. kafka,同“示例1”
|
||||||
|
2. TDengine,同“示例1”
|
||||||
|
3. Taosx
|
||||||
|
|
||||||
|
```shell
|
||||||
|
taosx run -f "kafka://192.168.1.92:9092/?topics=tp1,tp2&timeout=5000" -t "taos://192.168.0.201:6030/kafka_to_taos" --parser "{\"parse\":{\"ts\":{\"as\":\"timestamp(ms)\"},\"topic\":{\"as\":\"varchar\",\"alias\":\"t\"},\"partition\":{\"as\":\"int\",\"alias\":\"p\"},\"offset\":{\"as\":\"bigint\",\"alias\":\"o\"},\"value\":{\"json\":[\"id::int\",\"message::binary\"]}},\"model\":[{\"name\":\"t_{t}\",\"using\":\"kafka_data\",\"tags\":[\"t\",\"p\"],\"columns\":[\"ts\",\"o\",\"id\",\"message\"]}]}"
|
||||||
|
```
|
||||||
|
|
||||||
|
## 服务模式
|
||||||
|
|
||||||
|
在服务模式下, 一共需要三个组件协同完成数据迁移。 taosX,Agent 以及 taosExplorer 均已服务态运行,各种操作通过 taosExplorer 的图形界面进行。taos-Explorer 组件除了数据迁移之外,还提供了使用 TDengine 的图形化界面。
|
||||||
|
|
||||||
|
### 部署 taosX
|
||||||
|
|
||||||
|
#### 配置
|
||||||
|
|
||||||
|
taosX 仅支持通过命令行参数进行配置。服务模式下,taosX 支持的命令行参数可以通过以下方式查看:
|
||||||
|
|
||||||
|
```
|
||||||
|
taosx serve --help
|
||||||
|
```
|
||||||
|
|
||||||
|
建议通过 Systemd 的方式,启动 taosX 的服务模式,其 Systemd 的配置文件位于:`/etc/systemd/system/taosx.service`. 如需修改 taosX 的启动参数,可以编辑该文件中的以下行:
|
||||||
|
|
||||||
|
```
|
||||||
|
ExecStart=/usr/bin/taosx serve -v
|
||||||
|
```
|
||||||
|
|
||||||
|
修改后,需执行以下命令重启 taosX 服务,使配置生效:
|
||||||
|
|
||||||
|
```
|
||||||
|
systemctl daemon-reload
|
||||||
|
systemctl restart taosx
|
||||||
|
```
|
||||||
|
|
||||||
|
#### 启动
|
||||||
|
|
||||||
|
Linux 系统上以 Systemd 的方式启动 taosX 的命令如下:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
systemctl start taosx
|
||||||
|
```
|
||||||
|
|
||||||
|
Windows 系统上,请在 "Services" 系统管理工具中找到 "taosX" 服务,然后点击 "启动这个服务"。
|
||||||
|
|
||||||
|
#### 问题排查
|
||||||
|
|
||||||
|
1. 如何修改 taosX 的日志级别?
|
||||||
|
|
||||||
|
taosX 的日志级别是通过命令行参数指定的,默认的日志级别为 Info, 具体参数如下:
|
||||||
|
- INFO: `taosx serve -v`
|
||||||
|
- DEBUG: `taosx serve -vv`
|
||||||
|
- TRACE: `taosx serve -vvv`
|
||||||
|
|
||||||
|
Systemd 方式启动时,如何修改命令行参数,请参考“配置”章节。
|
||||||
|
|
||||||
|
2. 如何查看 taosX 的日志?
|
||||||
|
|
||||||
|
以 Systemd 方式启动时,可通过 journalctl 命令查看日志。以滚动方式,实时查看最新日志的命令如下:
|
||||||
|
|
||||||
|
```
|
||||||
|
journalctl -u taosx -f
|
||||||
|
```
|
||||||
|
|
||||||
|
### 部署 Agent
|
||||||
|
|
||||||
|
#### 配置
|
||||||
|
|
||||||
|
Agent 默认的配置文件位于`/etc/taos/agent.toml`, 包含以下配置项:
|
||||||
|
- endpoint: 必填,taosX 的 GRPC endpoint
|
||||||
|
- token: 必填,在 taosExplorer 上创建 agent 时,产生的token
|
||||||
|
- debug_level: 非必填,默认为 info, 还支持 debug, trace 等级别
|
||||||
|
|
||||||
|
如下所示:
|
||||||
|
|
||||||
|
```TOML
|
||||||
|
endpoint = "grpc://<taosx-ip>:6055"
|
||||||
|
token = "<token>"
|
||||||
|
log_level = "debug"
|
||||||
|
```
|
||||||
|
|
||||||
|
日志保存时间设置
|
||||||
|
日志保存的天数可以通过环境变量进行设置 TAOSX_LOGS_KEEP_DAYS, 默认为 30 天。
|
||||||
|
|
||||||
|
```shell
|
||||||
|
export TAOSX_LOGS_KEEP_DAYS=7
|
||||||
|
```
|
||||||
|
|
||||||
|
#### 启动
|
||||||
|
|
||||||
|
Linux 系统上 Agent 可以通过 Systemd 命令启动:
|
||||||
|
|
||||||
|
```
|
||||||
|
systemctl start taosx-agent
|
||||||
|
```
|
||||||
|
|
||||||
|
Windows 系统上通过系统管理工具 "Services" 找到 taosx-agent 服务,然后启动它。
|
||||||
|
|
||||||
|
#### 问题排查
|
||||||
|
|
||||||
|
可以通过 journalctl 查看 Agent 的日志
|
||||||
|
|
||||||
|
```
|
||||||
|
journalctl -u taosx-agent -f
|
||||||
|
```
|
||||||
|
|
||||||
|
### 部署 taosExplorer
|
||||||
|
|
||||||
|
|
||||||
|
### 数据同步功能
|
||||||
|
|
||||||
|
请参考 taosExplorer
|
|
@ -0,0 +1,128 @@
|
||||||
|
---
|
||||||
|
title: 基于可视化界面的数据接入和数据迁移
|
||||||
|
---
|
||||||
|
|
||||||
|
本节讲述使用 taos Explorer 的可视化界面进行数据迁移,使用此功能需要依赖 taosd, taosAdapter, taosX, taos-explorer 等几个服务组件。关于 taosd 和 taosAdapter 的部署请参考 [系统部署](../../deployment/deploy),[taosX](../taosX),以及 [部署 taos-explorer](../../operation/web)
|
||||||
|
|
||||||
|
## 功能入口
|
||||||
|
|
||||||
|
点击 explorer 左侧功能列表中的 "数据写入",可以配置不同类型的数据源,包括 TDengine Subscription, PI, OPC-UA, OPC-DA, InfluxDB, MQTT,Kafka, CSV 等,将它们的数据写入到当前正在被管理的 TDengine 集群中。
|
||||||
|
|
||||||
|
## TDengine 订阅
|
||||||
|
|
||||||
|
进入TDengine订阅任务配置页面:
|
||||||
|
1. 在连接协议栏中,配置连接协议,默认为原生连接,可配置为WS、WSS;
|
||||||
|
2. 在服务器栏中配置服务器的 IP 或域名;
|
||||||
|
3. 在端口栏中配置连接的端口号,默认值为6030;
|
||||||
|
4. 在主题栏中,配置可以配置订阅一个或多个数据库,或超级表或普通表,也可以是一个已创建的 Topic;
|
||||||
|
5. 在认证栏,可以配置访问 TDengine 的用户名密码,用户名默认值为 root,密码默认值为 taosdata;如果数据源为云服务实例,则可以选择令牌认证方式并配置实例 token;
|
||||||
|
6. 在订阅初始位置栏,可配置从最早数据(earliest)或最晚(latest)数据开始订阅,默认为 earliest;
|
||||||
|
7. 在超时栏配置超时时间,可配置为 never: 表示无超时时间,持续进行订阅,也可指定超时时间:5s, 1m 等,支持单位 ms(毫秒),s(秒),m(分钟),h(小时),d(天),M(月),y(年)。
|
||||||
|
8. 在目标数据库栏中,选择本地 TDengine 的库作为目标库,点击 submit,即可启动一个 TDengine 订阅任务。
|
||||||
|
|
||||||
|
## Pi
|
||||||
|
|
||||||
|
1. 在 PI 数据接入页面,设置 PI 服务器的名称、AF 数据库名称。
|
||||||
|
2. 在监测点集栏,可以配置选择 Point 模式监测点集合、Point 模式监测的 AF 模板、AF 模式监测的 AF 模板。
|
||||||
|
3. 在 PI 系统设置栏,可以配置 PI 系统名,默认为 PI 服务器名。
|
||||||
|
4. 在 Data Queue 栏,可以配置 PI 连接器运行参数:MaxWaitLen(数据最大缓冲条数),默认值为 1000 ,有效取值范围为 [1,10000];UpdateInterval(PI System 取数据频率),默认值为 10000(毫秒:ms),有效取值范围为 [10,600000];重启补偿时间(Max Backfill Range,单位:天),每次重启服务时向前补偿该天数的数据,默认为1天。
|
||||||
|
5. 在目标数据库栏,选择需要写入的 TDengine 数据库,点击 submit ,即可启动一个 PI 数据接入任务。
|
||||||
|
|
||||||
|
## OPC-UA
|
||||||
|
|
||||||
|
1. 在 OPC-UA页面,配置 OPC-server 的地址,输入格式为 127.0.0.1:6666/OPCUA/ServerPath。
|
||||||
|
2. 在认证栏,选择访问方式。可以选择匿名访问、用户名密码访问、证书访问。使用证书访问时,需配置证书文件信息、私钥文件信息、OPC-UA 安全协议和 OPC-UA 安全策略
|
||||||
|
3. 在 Data Sets 栏,配置点位信息。(可通过“选择”按钮选择正则表达式过滤点位,每次最多能过滤出10条点位);点位配置有两种方式:1.手动输入点位信息 2.上传csv文件配置点位信息
|
||||||
|
4. 在连接配置栏,配置连接超时间隔和采集超时间隔(单位:秒),默认值为10秒。
|
||||||
|
5. 在采集配置栏,配置采集间隔(单位:秒)、点位数量、采集模式。采集模式可选择observe(轮询模式)和subscribe(订阅模式),默认值为observe。
|
||||||
|
6. 在库表配置栏,配置目标 TDengine 中存储数据的超级表、子表结构信息。
|
||||||
|
7. 在其他配置栏,配置并行度、单次采集上报批次(默认值100)、上报超时时间(单位:秒,默认值10)、是否开启debug级别日志。
|
||||||
|
8. 在目标数据库栏,选择需要写入的 TDengine 数据库,点击 submit,即可启动一个 OPC-UA 数据接入任务。
|
||||||
|
|
||||||
|
## OPC-DA
|
||||||
|
|
||||||
|
1. 在 OPC-DA页面,配置 OPC-server 的地址,输入格式为 127.0.0.1<,localhost>/Matrikon.OPC.Simulation.1。
|
||||||
|
2. 在数据点栏,配置 OPC-DA 采集点信息。(可通过“选择”按钮选择正则表达式过滤点位,每次最多能过滤出10条点位)。点位配置有两种方式:1.手动输入点位信息 2.上传csv文件配置点位信息
|
||||||
|
3. 在连接栏,配置连接超时时间(单位:秒,默认值为10秒)、采集超时时间(单位:秒,默认值为10秒)。
|
||||||
|
4. 在库表配置栏,配置目标 TDengine 中存储数据的超级表、子表结构信息。
|
||||||
|
5. 在其他配置栏,配置并行度、单次采集上报批次(默认值100)、上报超时时间(单位:秒,默认值10)、是否开启debug级别日志。
|
||||||
|
6. 在目标数据库栏,选择需要写入的 TDengine 数据库,点击 submit,即可启动一个 OPC-DA 数据接入任务。
|
||||||
|
|
||||||
|
## InfluxDB
|
||||||
|
|
||||||
|
进入 InfluxDB 数据源同步任务的编辑页面后:
|
||||||
|
1. 在服务器地址输入框, 输入 InfluxDB 服务器的地址,可以输入 IP 地址或域名,此项为必填字段;
|
||||||
|
2. 在端口输入框, 输入 InfluxDB 服务器端口,默认情况下,InfluxDB 监听8086端口的 HTTP 请求和8088端口的 HTTPS 请求,此项为必填字段;
|
||||||
|
3. 在组织 ID 输入框,输入将要同步的组织 ID,此项为必填字段;
|
||||||
|
4. 在令牌 Token 输入框,输入一个至少拥有读取这个组织 ID 下的指定 Bucket 权限的 Token, 此项为必填字段;
|
||||||
|
5. 在同步设置的起始时间项下,通过点选选择一个同步数据的起始时间,起始时间使用 UTC 时间, 此项为必填字段;
|
||||||
|
6. 在同步设置的结束时间项下,当不指定结束时间时,将持续进行最新数据的同步;当指定结束时间时,将只同步到这个结束时间为止; 结束时间使用 UTC 时间,此项为可选字段;
|
||||||
|
7. 在桶 Bucket 输入框,输入一个需要同步的 Bucket,目前只支持同步一个 Bucket 至 TDengine 数据库,此项为必填字段;
|
||||||
|
8. 在目标数据库下拉列表,选择一个将要写入的 TDengine 目标数据库 (注意:目前只支持同步到精度为纳秒的 TDengine 目标数据库),此项为必填字段;
|
||||||
|
9. 填写完成以上信息后,点击提交按钮,即可直接启动从 InfluxDB 到 TDengine 的数据同步。
|
||||||
|
|
||||||
|
## MQTT
|
||||||
|
|
||||||
|
进入 MQTT 数据源同步任务的编辑页面后:
|
||||||
|
1. 在 MQTT 地址卡片,输入 MQTT 地址,必填字段,包括 IP 和 端口号,例如:192.168.1.10:1883;
|
||||||
|
2. 在认证卡片,输入 MQTT 连接器访问 MQTT 服务器时的用户名和密码,这两个字段为选填字段,如果未输入,即采用匿名认证的方式;
|
||||||
|
3. 在 SSL 证书卡片,可以选择是否打开 SSL/TLS 开关,如果打开此开关,MQTT 连接器和 MQTT 服务器之间的通信将采用 SSL/TLS 的方式进行加密;打开这个开关后,会出现 CA, 客户端证书和客户端私钥三个必填配置项,可以在这里输入证书和私钥文件的内容;
|
||||||
|
4. 在连接卡片,可以配置以下信息:
|
||||||
|
- MQTT 协议:支持3.1/3.1.1/5.0三个版本;
|
||||||
|
- Client ID: MQTT 连接器连接 MQTT 服务器时所使用的客户端 ID, 用于标识客户端的身份;
|
||||||
|
- Keep Alive: 用于配置 MQTT 连接器与 MQTT 服务器之间的Keep Alive时间,默认值为60秒;
|
||||||
|
- Clean Session: 用于配置 MQTT 连接器是否以Clean Session的方式连接至 MQTT 服务器,默认值为True;
|
||||||
|
- 订阅主题及 QoS 配置:这里用来配置监听的 MQTT 主题,以及该主题支持的最大QoS, 主题和 QoS 的配置之间用::分隔,多个主题之间用,分隔,主题的配置可以支持 MQTT 协议的通配符#和+;
|
||||||
|
5. 在其他卡片,可以配置 MQTT 连接器的日志级别,支持 error, warn, info, debug, trace 5个级别,默认值为 info;
|
||||||
|
6. MQTT Payload 解析卡片,用于配置如何解析 MQTT 消息:
|
||||||
|
- 配置表的第一行为 ts 字段,该字段为 TIMESTAMP 类型,它的值为 MQTT 连接器收到 MQTT 消息的时间;
|
||||||
|
- 配置表的第二行为 topic 字段,为该消息的主题名称,可以选择将该字段作为列或者标签同步至 TDengine;
|
||||||
|
- 配置表的第三行为 qos 字段,为该消息的 QoS 属性,可以选择将该字段作为列或者标签同步至 TDengine;
|
||||||
|
- 剩余的配置项皆为自定义字段,每个字段都需要配置:字段(来源),列(目标),列类型(目标)。字段(来源)是指该 MQTT 消息中的字段名称,当前仅支持 JSON 类型的 MQTT 消息同步,可以使用 JSON Path 语法从 MQTT 消息中提取字段,例如:$.data.id; 列(目标)是指同步至 TDengine 后的字段名称;列类型(目标)是指同步至 TDengine 后的字段类型,可以从下拉列表中选择;当且仅当以上3个配置都填写后,才能新增下一个字段;
|
||||||
|
- 如果 MQTT 消息中包含时间戳,可以选择新增一个自定义字段,将其作为同步至 TDengine 时的主键;需要注意的是,MQTT 消息中时间戳的仅支持 Unix Timestamp格式,且该字段的列类型(目标)的选择,需要与创建 TDengine 数据库时的配置一致;
|
||||||
|
- 子表命名规则:用于配置子表名称,采用“前缀+{列类型(目标)}”的格式,例如:d{id};
|
||||||
|
- 超级表名:用于配置同步至 TDengine 时,采用的超级表名;
|
||||||
|
7. 在目标数据库卡片,可以选择同步至 TDengine 的数据库名称,支持直接从下拉列表中选择。
|
||||||
|
8. 填写完成以上信息后,点击提交按钮,即可直接启动从 MQTT 到 TDengine 的数据同步。
|
||||||
|
|
||||||
|
## Kafka
|
||||||
|
|
||||||
|
1. 在Kafka页面,配置Kafka选项,必填字段,包括:bootstrap_server,例如192.168.1.92:9092;
|
||||||
|
2. 如果使用SSL认证,在SSL认证卡中,选择cert和cert_key的文件路径;
|
||||||
|
3. 配置其他参数,topics、topic_partitions这2个参数至少填写一个,其他参数有默认值;
|
||||||
|
4. 如果消费的Kafka数据是JSON格式,可以配置parser卡片,对数据进行解析转换;
|
||||||
|
5. 在目标数据库卡片中,选择同步到TDengine的数据库名称,支持从下拉列表中选择;
|
||||||
|
6. 填写完以上信息后,点击提交按钮,即可启动从Kafka到TDengine的数据同步。
|
||||||
|
|
||||||
|
## CSV
|
||||||
|
|
||||||
|
1. 在CSV页面,配置CSV选项,可设置忽略前N行,可输入具体的数字
|
||||||
|
2. CSV的写入配置,设置批次写入量,默认是1000
|
||||||
|
3. CSV文件解析,用于获取CSV对应的列信息:
|
||||||
|
- 上传CSV文件或者输入CSV文件的地址
|
||||||
|
- 选择是否包包含Header
|
||||||
|
- 包含Header情况下直接执行下一步,查询出对应CSV的列信息,获取CSV的配置信息
|
||||||
|
- 不包含Header情况,需要输入自定列信息,并以逗号分隔,然后下一步,获取CSV的配置信息
|
||||||
|
- CSV的配置项,每个字段都需要配置:CSV列,DB列,列类型(目标),主键(整个配置只能有一个主键,且主键必须是TIMESTAMP类型),作为列,作为Tag。CSV列是指该 CSV文件中的列或者自定义的列;DB列是对应的数据表的列
|
||||||
|
- 子表命名规则:用于配置子表名称,采用“前缀+{列类型(目标)}”的格式,例如:d{id};
|
||||||
|
- 超级表名:用于配置同步至 TDengine 时,采用的超级表名;
|
||||||
|
4. 在目标数据库卡片,可以选择同步至 TDengine 的数据库名称,支持直接从下拉列表中选择。
|
||||||
|
5. 填写完成以上信息后,点击提交按钮,即可直接启动从 CSV到 TDengine 的数据同步。
|
||||||
|
|
||||||
|
|
||||||
|
## 备份和恢复
|
||||||
|
|
||||||
|
您可以将当前连接的 TDengine 集群中的数据备份至一个或多个本地文件中,稍后可以通过这些文件进行数据恢复。本章节将介绍数据备份和恢复的具体步骤。
|
||||||
|
|
||||||
|
### 备份数据到本地文件
|
||||||
|
|
||||||
|
1. 进入系统管理页面,点击【备份】进入数据备份页面,点击右上角【新增备份】。
|
||||||
|
2. 在数据备份配置页面中可以配置三个参数:
|
||||||
|
- 备份周期:必填项,配置每次执行数据备份的时间间隔,可通过下拉框选择每天、每 7 天、每 30 天执行一次数据备份,配置后,会在对应的备份周期的0:00时启动一次数据备份任务;
|
||||||
|
- 数据库:必填项,配置需要备份的数据库名(数据库的 wal_retention_period 参数需大于0);
|
||||||
|
- 目录:必填项,配置将数据备份到 taosX 所在运行环境中指定的路径下,如 /root/data_backup;
|
||||||
|
3. 点击【确定】,可创建数据备份任务。
|
||||||
|
|
||||||
|
### 从本地文件恢复
|
||||||
|
|
||||||
|
1. 完成数据备份任务创建后,在页面中对应的数据备份任务右侧点击【数据恢复】,可将已经备份到指定路径下的数据恢复到当前 TDengine 中。
|
|
@ -0,0 +1,3 @@
|
||||||
|
---
|
||||||
|
title: 数据集成
|
||||||
|
---
|
|
@ -10,6 +10,10 @@ TDengine 2.x 各版本安装包请访问[这里](https://www.taosdata.com/all-do
|
||||||
|
|
||||||
import Release from "/components/ReleaseV3";
|
import Release from "/components/ReleaseV3";
|
||||||
|
|
||||||
|
## 3.1.0.3
|
||||||
|
|
||||||
|
<Release type="tdengine" version="3.1.0.3" />
|
||||||
|
|
||||||
## 3.1.0.2
|
## 3.1.0.2
|
||||||
|
|
||||||
<Release type="tdengine" version="3.1.0.2" />
|
<Release type="tdengine" version="3.1.0.2" />
|
||||||
|
|
|
@ -133,6 +133,7 @@
|
||||||
<configuration>
|
<configuration>
|
||||||
<source>8</source>
|
<source>8</source>
|
||||||
<target>8</target>
|
<target>8</target>
|
||||||
|
<encoding>UTF-8</encoding>
|
||||||
</configuration>
|
</configuration>
|
||||||
</plugin>
|
</plugin>
|
||||||
|
|
||||||
|
|
|
@ -8,4 +8,4 @@ java -jar target/taosdemo-2.0.1-jar-with-dependencies.jar -host <hostname> -data
|
||||||
```
|
```
|
||||||
|
|
||||||
如果发生错误 Exception in thread "main" java.lang.UnsatisfiedLinkError: no taos in java.library.path
|
如果发生错误 Exception in thread "main" java.lang.UnsatisfiedLinkError: no taos in java.library.path
|
||||||
请检查是否安装 TDengine 客户端安装包或编译 TDengine 安装。如果确定已经安装过还出现这个错误,可以在命令行 java 后加 -Djava.library.path=/usr/local/lib 来指定寻找共享库的路径。
|
请检查是否安装 TDengine 客户端安装包或编译 TDengine 安装。如果确定已经安装过还出现这个错误,可以在命令行 java 后加 -Djava.library.path=/usr/lib 来指定寻找共享库的路径。
|
||||||
|
|
|
@ -102,6 +102,11 @@ extern uint16_t tsMonitorPort;
|
||||||
extern int32_t tsMonitorMaxLogs;
|
extern int32_t tsMonitorMaxLogs;
|
||||||
extern bool tsMonitorComp;
|
extern bool tsMonitorComp;
|
||||||
|
|
||||||
|
// audit
|
||||||
|
extern bool tsEnableAudit;
|
||||||
|
extern char tsAuditFqdn[];
|
||||||
|
extern uint16_t tsAuditPort;
|
||||||
|
|
||||||
// telem
|
// telem
|
||||||
extern bool tsEnableTelem;
|
extern bool tsEnableTelem;
|
||||||
extern int32_t tsTelemInterval;
|
extern int32_t tsTelemInterval;
|
||||||
|
@ -130,6 +135,7 @@ extern bool tsKeepColumnName;
|
||||||
extern bool tsEnableQueryHb;
|
extern bool tsEnableQueryHb;
|
||||||
extern bool tsEnableScience;
|
extern bool tsEnableScience;
|
||||||
extern bool tsTtlChangeOnWrite;
|
extern bool tsTtlChangeOnWrite;
|
||||||
|
extern int32_t tsTtlFlushThreshold;
|
||||||
extern int32_t tsRedirectPeriod;
|
extern int32_t tsRedirectPeriod;
|
||||||
extern int32_t tsRedirectFactor;
|
extern int32_t tsRedirectFactor;
|
||||||
extern int32_t tsRedirectMaxPeriod;
|
extern int32_t tsRedirectMaxPeriod;
|
||||||
|
@ -161,6 +167,7 @@ extern char tsCompressor[];
|
||||||
// tfs
|
// tfs
|
||||||
extern int32_t tsDiskCfgNum;
|
extern int32_t tsDiskCfgNum;
|
||||||
extern SDiskCfg tsDiskCfg[];
|
extern SDiskCfg tsDiskCfg[];
|
||||||
|
extern int64_t tsMinDiskFreeSize;
|
||||||
|
|
||||||
// udf
|
// udf
|
||||||
extern bool tsStartUdfd;
|
extern bool tsStartUdfd;
|
||||||
|
@ -185,7 +192,9 @@ extern int32_t tsTransPullupInterval;
|
||||||
extern int32_t tsMqRebalanceInterval;
|
extern int32_t tsMqRebalanceInterval;
|
||||||
extern int32_t tsStreamCheckpointTickInterval;
|
extern int32_t tsStreamCheckpointTickInterval;
|
||||||
extern int32_t tsTtlUnit;
|
extern int32_t tsTtlUnit;
|
||||||
extern int32_t tsTtlPushInterval;
|
extern int32_t tsTtlPushIntervalSec;
|
||||||
|
extern int32_t tsTtlBatchDropNum;
|
||||||
|
extern int32_t tsTrimVDbIntervalSec;
|
||||||
extern int32_t tsGrantHBInterval;
|
extern int32_t tsGrantHBInterval;
|
||||||
extern int32_t tsUptimeInterval;
|
extern int32_t tsUptimeInterval;
|
||||||
|
|
||||||
|
|
|
@ -443,7 +443,6 @@ typedef struct SField {
|
||||||
uint8_t type;
|
uint8_t type;
|
||||||
int8_t flags;
|
int8_t flags;
|
||||||
int32_t bytes;
|
int32_t bytes;
|
||||||
char comment[TSDB_COL_COMMENT_LEN];
|
|
||||||
} SField;
|
} SField;
|
||||||
|
|
||||||
typedef struct SRetention {
|
typedef struct SRetention {
|
||||||
|
@ -522,7 +521,6 @@ struct SSchema {
|
||||||
col_id_t colId;
|
col_id_t colId;
|
||||||
int32_t bytes;
|
int32_t bytes;
|
||||||
char name[TSDB_COL_NAME_LEN];
|
char name[TSDB_COL_NAME_LEN];
|
||||||
char comment[TSDB_COL_COMMENT_LEN];
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct SSchema2 {
|
struct SSchema2 {
|
||||||
|
@ -771,6 +769,8 @@ typedef struct {
|
||||||
char* pAst2;
|
char* pAst2;
|
||||||
int64_t deleteMark1;
|
int64_t deleteMark1;
|
||||||
int64_t deleteMark2;
|
int64_t deleteMark2;
|
||||||
|
int32_t sqlLen;
|
||||||
|
char* sql;
|
||||||
} SMCreateStbReq;
|
} SMCreateStbReq;
|
||||||
|
|
||||||
int32_t tSerializeSMCreateStbReq(void* buf, int32_t bufLen, SMCreateStbReq* pReq);
|
int32_t tSerializeSMCreateStbReq(void* buf, int32_t bufLen, SMCreateStbReq* pReq);
|
||||||
|
@ -791,6 +791,8 @@ typedef struct {
|
||||||
int8_t source; // 1-taosX or 0-taosClient
|
int8_t source; // 1-taosX or 0-taosClient
|
||||||
int8_t reserved[6];
|
int8_t reserved[6];
|
||||||
tb_uid_t suid;
|
tb_uid_t suid;
|
||||||
|
int32_t sqlLen;
|
||||||
|
char* sql;
|
||||||
} SMDropStbReq;
|
} SMDropStbReq;
|
||||||
|
|
||||||
int32_t tSerializeSMDropStbReq(void* buf, int32_t bufLen, SMDropStbReq* pReq);
|
int32_t tSerializeSMDropStbReq(void* buf, int32_t bufLen, SMDropStbReq* pReq);
|
||||||
|
@ -804,6 +806,8 @@ typedef struct {
|
||||||
int32_t ttl;
|
int32_t ttl;
|
||||||
int32_t commentLen;
|
int32_t commentLen;
|
||||||
char* comment;
|
char* comment;
|
||||||
|
int32_t sqlLen;
|
||||||
|
char* sql;
|
||||||
} SMAlterStbReq;
|
} SMAlterStbReq;
|
||||||
|
|
||||||
int32_t tSerializeSMAlterStbReq(void* buf, int32_t bufLen, SMAlterStbReq* pReq);
|
int32_t tSerializeSMAlterStbReq(void* buf, int32_t bufLen, SMAlterStbReq* pReq);
|
||||||
|
@ -873,6 +877,8 @@ int32_t tDeserializeSCreateAcctReq(void* buf, int32_t bufLen, SCreateAcctReq* pR
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
char user[TSDB_USER_LEN];
|
char user[TSDB_USER_LEN];
|
||||||
|
int32_t sqlLen;
|
||||||
|
char *sql;
|
||||||
} SDropUserReq, SDropAcctReq;
|
} SDropUserReq, SDropAcctReq;
|
||||||
|
|
||||||
int32_t tSerializeSDropUserReq(void* buf, int32_t bufLen, SDropUserReq* pReq);
|
int32_t tSerializeSDropUserReq(void* buf, int32_t bufLen, SDropUserReq* pReq);
|
||||||
|
@ -885,6 +891,8 @@ typedef struct {
|
||||||
int8_t enable;
|
int8_t enable;
|
||||||
char user[TSDB_USER_LEN];
|
char user[TSDB_USER_LEN];
|
||||||
char pass[TSDB_USET_PASSWORD_LEN];
|
char pass[TSDB_USET_PASSWORD_LEN];
|
||||||
|
int32_t sqlLen;
|
||||||
|
char* sql;
|
||||||
} SCreateUserReq;
|
} SCreateUserReq;
|
||||||
|
|
||||||
int32_t tSerializeSCreateUserReq(void* buf, int32_t bufLen, SCreateUserReq* pReq);
|
int32_t tSerializeSCreateUserReq(void* buf, int32_t bufLen, SCreateUserReq* pReq);
|
||||||
|
@ -901,6 +909,8 @@ typedef struct {
|
||||||
char tabName[TSDB_TABLE_NAME_LEN];
|
char tabName[TSDB_TABLE_NAME_LEN];
|
||||||
char* tagCond;
|
char* tagCond;
|
||||||
int32_t tagCondLen;
|
int32_t tagCondLen;
|
||||||
|
int32_t sqlLen;
|
||||||
|
char* sql;
|
||||||
} SAlterUserReq;
|
} SAlterUserReq;
|
||||||
|
|
||||||
int32_t tSerializeSAlterUserReq(void* buf, int32_t bufLen, SAlterUserReq* pReq);
|
int32_t tSerializeSAlterUserReq(void* buf, int32_t bufLen, SAlterUserReq* pReq);
|
||||||
|
@ -1063,6 +1073,8 @@ typedef struct {
|
||||||
int16_t hashPrefix;
|
int16_t hashPrefix;
|
||||||
int16_t hashSuffix;
|
int16_t hashSuffix;
|
||||||
int32_t tsdbPageSize;
|
int32_t tsdbPageSize;
|
||||||
|
int32_t sqlLen;
|
||||||
|
char* sql;
|
||||||
} SCreateDbReq;
|
} SCreateDbReq;
|
||||||
|
|
||||||
int32_t tSerializeSCreateDbReq(void* buf, int32_t bufLen, SCreateDbReq* pReq);
|
int32_t tSerializeSCreateDbReq(void* buf, int32_t bufLen, SCreateDbReq* pReq);
|
||||||
|
@ -1088,6 +1100,8 @@ typedef struct {
|
||||||
int32_t minRows;
|
int32_t minRows;
|
||||||
int32_t walRetentionPeriod;
|
int32_t walRetentionPeriod;
|
||||||
int32_t walRetentionSize;
|
int32_t walRetentionSize;
|
||||||
|
int32_t sqlLen;
|
||||||
|
char* sql;
|
||||||
} SAlterDbReq;
|
} SAlterDbReq;
|
||||||
|
|
||||||
int32_t tSerializeSAlterDbReq(void* buf, int32_t bufLen, SAlterDbReq* pReq);
|
int32_t tSerializeSAlterDbReq(void* buf, int32_t bufLen, SAlterDbReq* pReq);
|
||||||
|
@ -1096,6 +1110,8 @@ int32_t tDeserializeSAlterDbReq(void* buf, int32_t bufLen, SAlterDbReq* pReq);
|
||||||
typedef struct {
|
typedef struct {
|
||||||
char db[TSDB_DB_FNAME_LEN];
|
char db[TSDB_DB_FNAME_LEN];
|
||||||
int8_t ignoreNotExists;
|
int8_t ignoreNotExists;
|
||||||
|
int32_t sqlLen;
|
||||||
|
char* sql;
|
||||||
} SDropDbReq;
|
} SDropDbReq;
|
||||||
|
|
||||||
int32_t tSerializeSDropDbReq(void* buf, int32_t bufLen, SDropDbReq* pReq);
|
int32_t tSerializeSDropDbReq(void* buf, int32_t bufLen, SDropDbReq* pReq);
|
||||||
|
@ -1163,6 +1179,9 @@ int32_t tDeserializeSVTrimDbReq(void* buf, int32_t bufLen, SVTrimDbReq* pReq);
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
int32_t timestampSec;
|
int32_t timestampSec;
|
||||||
|
int32_t ttlDropMaxCount;
|
||||||
|
int32_t nUids;
|
||||||
|
SArray* pTbUids;
|
||||||
} SVDropTtlTableReq;
|
} SVDropTtlTableReq;
|
||||||
|
|
||||||
int32_t tSerializeSVDropTtlTableReq(void* buf, int32_t bufLen, SVDropTtlTableReq* pReq);
|
int32_t tSerializeSVDropTtlTableReq(void* buf, int32_t bufLen, SVDropTtlTableReq* pReq);
|
||||||
|
@ -1290,6 +1309,8 @@ void tFreeSUserAuthBatchRsp(SUserAuthBatchRsp* pRsp);
|
||||||
typedef struct {
|
typedef struct {
|
||||||
char db[TSDB_DB_FNAME_LEN];
|
char db[TSDB_DB_FNAME_LEN];
|
||||||
STimeWindow timeRange;
|
STimeWindow timeRange;
|
||||||
|
int32_t sqlLen;
|
||||||
|
char* sql;
|
||||||
} SCompactDbReq;
|
} SCompactDbReq;
|
||||||
|
|
||||||
int32_t tSerializeSCompactDbReq(void* buf, int32_t bufLen, SCompactDbReq* pReq);
|
int32_t tSerializeSCompactDbReq(void* buf, int32_t bufLen, SCompactDbReq* pReq);
|
||||||
|
@ -1419,6 +1440,7 @@ typedef struct {
|
||||||
int64_t numOfProcessedCQuery;
|
int64_t numOfProcessedCQuery;
|
||||||
int64_t numOfProcessedFetch;
|
int64_t numOfProcessedFetch;
|
||||||
int64_t numOfProcessedDrop;
|
int64_t numOfProcessedDrop;
|
||||||
|
int64_t numOfProcessedNotify;
|
||||||
int64_t numOfProcessedHb;
|
int64_t numOfProcessedHb;
|
||||||
int64_t numOfProcessedDelete;
|
int64_t numOfProcessedDelete;
|
||||||
int64_t cacheDataSize;
|
int64_t cacheDataSize;
|
||||||
|
@ -1852,6 +1874,8 @@ void tFreeSExplainRsp(SExplainRsp* pRsp);
|
||||||
typedef struct {
|
typedef struct {
|
||||||
char fqdn[TSDB_FQDN_LEN]; // end point, hostname:port
|
char fqdn[TSDB_FQDN_LEN]; // end point, hostname:port
|
||||||
int32_t port;
|
int32_t port;
|
||||||
|
int32_t sqlLen;
|
||||||
|
char* sql;
|
||||||
} SCreateDnodeReq;
|
} SCreateDnodeReq;
|
||||||
|
|
||||||
int32_t tSerializeSCreateDnodeReq(void* buf, int32_t bufLen, SCreateDnodeReq* pReq);
|
int32_t tSerializeSCreateDnodeReq(void* buf, int32_t bufLen, SCreateDnodeReq* pReq);
|
||||||
|
@ -1863,6 +1887,8 @@ typedef struct {
|
||||||
int32_t port;
|
int32_t port;
|
||||||
int8_t force;
|
int8_t force;
|
||||||
int8_t unsafe;
|
int8_t unsafe;
|
||||||
|
int32_t sqlLen;
|
||||||
|
char* sql;
|
||||||
} SDropDnodeReq;
|
} SDropDnodeReq;
|
||||||
|
|
||||||
int32_t tSerializeSDropDnodeReq(void* buf, int32_t bufLen, SDropDnodeReq* pReq);
|
int32_t tSerializeSDropDnodeReq(void* buf, int32_t bufLen, SDropDnodeReq* pReq);
|
||||||
|
@ -1878,6 +1904,8 @@ enum {
|
||||||
typedef struct {
|
typedef struct {
|
||||||
int32_t dnodeId;
|
int32_t dnodeId;
|
||||||
int8_t restoreType;
|
int8_t restoreType;
|
||||||
|
int32_t sqlLen;
|
||||||
|
char* sql;
|
||||||
} SRestoreDnodeReq;
|
} SRestoreDnodeReq;
|
||||||
|
|
||||||
int32_t tSerializeSRestoreDnodeReq(void* buf, int32_t bufLen, SRestoreDnodeReq* pReq);
|
int32_t tSerializeSRestoreDnodeReq(void* buf, int32_t bufLen, SRestoreDnodeReq* pReq);
|
||||||
|
@ -1887,6 +1915,8 @@ typedef struct {
|
||||||
int32_t dnodeId;
|
int32_t dnodeId;
|
||||||
char config[TSDB_DNODE_CONFIG_LEN];
|
char config[TSDB_DNODE_CONFIG_LEN];
|
||||||
char value[TSDB_DNODE_VALUE_LEN];
|
char value[TSDB_DNODE_VALUE_LEN];
|
||||||
|
int32_t sqlLen;
|
||||||
|
char* sql;
|
||||||
} SMCfgDnodeReq;
|
} SMCfgDnodeReq;
|
||||||
|
|
||||||
int32_t tSerializeSMCfgDnodeReq(void* buf, int32_t bufLen, SMCfgDnodeReq* pReq);
|
int32_t tSerializeSMCfgDnodeReq(void* buf, int32_t bufLen, SMCfgDnodeReq* pReq);
|
||||||
|
@ -1902,6 +1932,8 @@ int32_t tDeserializeSDCfgDnodeReq(void* buf, int32_t bufLen, SDCfgDnodeReq* pReq
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
int32_t dnodeId;
|
int32_t dnodeId;
|
||||||
|
int32_t sqlLen;
|
||||||
|
char *sql;
|
||||||
} SMCreateMnodeReq, SMDropMnodeReq, SDDropMnodeReq, SMCreateQnodeReq, SMDropQnodeReq, SDCreateQnodeReq, SDDropQnodeReq,
|
} SMCreateMnodeReq, SMDropMnodeReq, SDDropMnodeReq, SMCreateQnodeReq, SMDropQnodeReq, SDCreateQnodeReq, SDDropQnodeReq,
|
||||||
SMCreateSnodeReq, SMDropSnodeReq, SDCreateSnodeReq, SDDropSnodeReq;
|
SMCreateSnodeReq, SMDropSnodeReq, SDCreateSnodeReq, SDDropSnodeReq;
|
||||||
|
|
||||||
|
@ -1942,6 +1974,8 @@ int32_t tDeserializeSKillTransReq(void* buf, int32_t bufLen, SKillTransReq* pReq
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
int32_t useless; // useless
|
int32_t useless; // useless
|
||||||
|
int32_t sqlLen;
|
||||||
|
char* sql;
|
||||||
} SBalanceVgroupReq;
|
} SBalanceVgroupReq;
|
||||||
|
|
||||||
int32_t tSerializeSBalanceVgroupReq(void* buf, int32_t bufLen, SBalanceVgroupReq* pReq);
|
int32_t tSerializeSBalanceVgroupReq(void* buf, int32_t bufLen, SBalanceVgroupReq* pReq);
|
||||||
|
@ -1960,6 +1994,8 @@ typedef struct {
|
||||||
int32_t dnodeId1;
|
int32_t dnodeId1;
|
||||||
int32_t dnodeId2;
|
int32_t dnodeId2;
|
||||||
int32_t dnodeId3;
|
int32_t dnodeId3;
|
||||||
|
int32_t sqlLen;
|
||||||
|
char* sql;
|
||||||
} SRedistributeVgroupReq;
|
} SRedistributeVgroupReq;
|
||||||
|
|
||||||
int32_t tSerializeSRedistributeVgroupReq(void* buf, int32_t bufLen, SRedistributeVgroupReq* pReq);
|
int32_t tSerializeSRedistributeVgroupReq(void* buf, int32_t bufLen, SRedistributeVgroupReq* pReq);
|
||||||
|
@ -1967,6 +2003,8 @@ int32_t tDeserializeSRedistributeVgroupReq(void* buf, int32_t bufLen, SRedistrib
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
int32_t useless;
|
int32_t useless;
|
||||||
|
int32_t sqlLen;
|
||||||
|
char* sql;
|
||||||
} SBalanceVgroupLeaderReq;
|
} SBalanceVgroupLeaderReq;
|
||||||
|
|
||||||
int32_t tSerializeSBalanceVgroupLeaderReq(void* buf, int32_t bufLen, SBalanceVgroupLeaderReq* pReq);
|
int32_t tSerializeSBalanceVgroupLeaderReq(void* buf, int32_t bufLen, SBalanceVgroupLeaderReq* pReq);
|
||||||
|
@ -2160,8 +2198,24 @@ typedef struct {
|
||||||
|
|
||||||
int32_t tSerializeSTaskDropReq(void* buf, int32_t bufLen, STaskDropReq* pReq);
|
int32_t tSerializeSTaskDropReq(void* buf, int32_t bufLen, STaskDropReq* pReq);
|
||||||
int32_t tDeserializeSTaskDropReq(void* buf, int32_t bufLen, STaskDropReq* pReq);
|
int32_t tDeserializeSTaskDropReq(void* buf, int32_t bufLen, STaskDropReq* pReq);
|
||||||
int32_t tSerializeSTaskDropReq(void* buf, int32_t bufLen, STaskDropReq* pReq);
|
|
||||||
int32_t tDeserializeSTaskDropReq(void* buf, int32_t bufLen, STaskDropReq* pReq);
|
|
||||||
|
typedef enum {
|
||||||
|
TASK_NOTIFY_FINISHED = 1,
|
||||||
|
} ETaskNotifyType;
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
SMsgHead header;
|
||||||
|
uint64_t sId;
|
||||||
|
uint64_t queryId;
|
||||||
|
uint64_t taskId;
|
||||||
|
int64_t refId;
|
||||||
|
int32_t execId;
|
||||||
|
ETaskNotifyType type;
|
||||||
|
} STaskNotifyReq;
|
||||||
|
|
||||||
|
int32_t tSerializeSTaskNotifyReq(void* buf, int32_t bufLen, STaskNotifyReq* pReq);
|
||||||
|
int32_t tDeserializeSTaskNotifyReq(void* buf, int32_t bufLen, STaskNotifyReq* pReq);
|
||||||
|
|
||||||
int32_t tSerializeSQueryTableRsp(void* buf, int32_t bufLen, SQueryTableRsp* pRsp);
|
int32_t tSerializeSQueryTableRsp(void* buf, int32_t bufLen, SQueryTableRsp* pRsp);
|
||||||
int32_t tDeserializeSQueryTableRsp(void* buf, int32_t bufLen, SQueryTableRsp* pRsp);
|
int32_t tDeserializeSQueryTableRsp(void* buf, int32_t bufLen, SQueryTableRsp* pRsp);
|
||||||
|
@ -2210,6 +2264,7 @@ typedef struct {
|
||||||
int64_t deleteMark;
|
int64_t deleteMark;
|
||||||
int8_t igUpdate;
|
int8_t igUpdate;
|
||||||
int64_t lastTs;
|
int64_t lastTs;
|
||||||
|
int32_t sqlLen;
|
||||||
} SCMCreateStreamReq;
|
} SCMCreateStreamReq;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
|
@ -2246,6 +2301,7 @@ typedef struct {
|
||||||
char subDbName[TSDB_DB_FNAME_LEN];
|
char subDbName[TSDB_DB_FNAME_LEN];
|
||||||
char* ast;
|
char* ast;
|
||||||
char subStbName[TSDB_TABLE_FNAME_LEN];
|
char subStbName[TSDB_TABLE_FNAME_LEN];
|
||||||
|
int32_t sqlLen;
|
||||||
} SCMCreateTopicReq;
|
} SCMCreateTopicReq;
|
||||||
|
|
||||||
int32_t tSerializeSCMCreateTopicReq(void* buf, int32_t bufLen, const SCMCreateTopicReq* pReq);
|
int32_t tSerializeSCMCreateTopicReq(void* buf, int32_t bufLen, const SCMCreateTopicReq* pReq);
|
||||||
|
@ -2430,6 +2486,8 @@ typedef struct {
|
||||||
typedef struct {
|
typedef struct {
|
||||||
char name[TSDB_TOPIC_FNAME_LEN];
|
char name[TSDB_TOPIC_FNAME_LEN];
|
||||||
int8_t igNotExists;
|
int8_t igNotExists;
|
||||||
|
int32_t sqlLen;
|
||||||
|
char* sql;
|
||||||
} SMDropTopicReq;
|
} SMDropTopicReq;
|
||||||
|
|
||||||
int32_t tSerializeSMDropTopicReq(void* buf, int32_t bufLen, SMDropTopicReq* pReq);
|
int32_t tSerializeSMDropTopicReq(void* buf, int32_t bufLen, SMDropTopicReq* pReq);
|
||||||
|
@ -2529,6 +2587,8 @@ typedef struct SVCreateTbReq {
|
||||||
SSchemaWrapper schemaRow;
|
SSchemaWrapper schemaRow;
|
||||||
} ntb;
|
} ntb;
|
||||||
};
|
};
|
||||||
|
int32_t sqlLen;
|
||||||
|
char* sql;
|
||||||
} SVCreateTbReq;
|
} SVCreateTbReq;
|
||||||
|
|
||||||
int tEncodeSVCreateTbReq(SEncoder* pCoder, const SVCreateTbReq* pReq);
|
int tEncodeSVCreateTbReq(SEncoder* pCoder, const SVCreateTbReq* pReq);
|
||||||
|
@ -2632,9 +2692,6 @@ typedef struct {
|
||||||
int8_t type;
|
int8_t type;
|
||||||
int8_t flags;
|
int8_t flags;
|
||||||
int32_t bytes;
|
int32_t bytes;
|
||||||
bool hasColComment;
|
|
||||||
char* colComment;
|
|
||||||
int32_t colCommentLen;
|
|
||||||
// TSDB_ALTER_TABLE_DROP_COLUMN
|
// TSDB_ALTER_TABLE_DROP_COLUMN
|
||||||
// TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES
|
// TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES
|
||||||
int8_t colModType;
|
int8_t colModType;
|
||||||
|
@ -3006,6 +3063,8 @@ typedef struct {
|
||||||
typedef struct {
|
typedef struct {
|
||||||
char name[TSDB_STREAM_FNAME_LEN];
|
char name[TSDB_STREAM_FNAME_LEN];
|
||||||
int8_t igNotExists;
|
int8_t igNotExists;
|
||||||
|
int32_t sqlLen;
|
||||||
|
char* sql;
|
||||||
} SMDropStreamReq;
|
} SMDropStreamReq;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
|
|
|
@ -65,7 +65,7 @@ enum {
|
||||||
#define TD_NEW_MSG_SEG(TYPE) TYPE = ((TYPE##_SEG_CODE) << 8),
|
#define TD_NEW_MSG_SEG(TYPE) TYPE = ((TYPE##_SEG_CODE) << 8),
|
||||||
#define TD_DEF_MSG_TYPE(TYPE, MSG, REQ, RSP) TYPE, TYPE##_RSP,
|
#define TD_DEF_MSG_TYPE(TYPE, MSG, REQ, RSP) TYPE, TYPE##_RSP,
|
||||||
|
|
||||||
enum {
|
enum { // WARN: new msg should be appended to segment tail
|
||||||
#endif
|
#endif
|
||||||
TD_NEW_MSG_SEG(TDMT_DND_MSG)
|
TD_NEW_MSG_SEG(TDMT_DND_MSG)
|
||||||
TD_DEF_MSG_TYPE(TDMT_DND_CREATE_MNODE, "dnode-create-mnode", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_DND_CREATE_MNODE, "dnode-create-mnode", NULL, NULL)
|
||||||
|
@ -89,15 +89,15 @@ enum {
|
||||||
|
|
||||||
TD_NEW_MSG_SEG(TDMT_MND_MSG)
|
TD_NEW_MSG_SEG(TDMT_MND_MSG)
|
||||||
TD_DEF_MSG_TYPE(TDMT_MND_CONNECT, "connect", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_MND_CONNECT, "connect", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_MND_CREATE_ACCT, "create-acct", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_MND_CREATE_ACCT, "create-acct", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_MND_ALTER_ACCT, "alter-acct", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_MND_ALTER_ACCT, "alter-acct", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_MND_DROP_ACCT, "drop-acct", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_MND_DROP_ACCT, "drop-acct", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_MND_CREATE_USER, "create-user", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_MND_CREATE_USER, "create-user", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_MND_ALTER_USER, "alter-user", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_MND_ALTER_USER, "alter-user", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_MND_DROP_USER, "drop-user", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_MND_DROP_USER, "drop-user", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_MND_GET_USER_AUTH, "get-user-auth", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_MND_GET_USER_AUTH, "get-user-auth", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_MND_CREATE_DNODE, "create-dnode", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_MND_CREATE_DNODE, "create-dnode", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_MND_CONFIG_DNODE, "config-dnode", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_MND_CONFIG_DNODE, "config-dnode", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_MND_DROP_DNODE, "drop-dnode", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_MND_DROP_DNODE, "drop-dnode", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_MND_CREATE_MNODE, "create-mnode", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_MND_CREATE_MNODE, "create-mnode", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_MND_ALTER_MNODE, "alter-mnode", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_MND_ALTER_MNODE, "alter-mnode", NULL, NULL)
|
||||||
|
@ -182,6 +182,7 @@ enum {
|
||||||
TD_DEF_MSG_TYPE(TDMT_MND_RESTORE_DNODE, "restore-dnode", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_MND_RESTORE_DNODE, "restore-dnode", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_MND_PAUSE_STREAM, "pause-stream", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_MND_PAUSE_STREAM, "pause-stream", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_MND_RESUME_STREAM, "resume-stream", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_MND_RESUME_STREAM, "resume-stream", NULL, NULL)
|
||||||
|
TD_DEF_MSG_TYPE(TDMT_MND_TRIM_DB_TIMER, "trim-db-tmr", NULL, NULL)
|
||||||
|
|
||||||
TD_NEW_MSG_SEG(TDMT_VND_MSG)
|
TD_NEW_MSG_SEG(TDMT_VND_MSG)
|
||||||
TD_DEF_MSG_TYPE(TDMT_VND_SUBMIT, "submit", SSubmitReq, SSubmitRsp)
|
TD_DEF_MSG_TYPE(TDMT_VND_SUBMIT, "submit", SSubmitReq, SSubmitRsp)
|
||||||
|
@ -243,6 +244,7 @@ enum {
|
||||||
TD_DEF_MSG_TYPE(TDMT_SCH_DROP_TASK, "drop-task", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_SCH_DROP_TASK, "drop-task", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_SCH_EXPLAIN, "explain", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_SCH_EXPLAIN, "explain", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_SCH_LINK_BROKEN, "link-broken", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_SCH_LINK_BROKEN, "link-broken", NULL, NULL)
|
||||||
|
TD_DEF_MSG_TYPE(TDMT_SCH_TASK_NOTIFY, "task-notify", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_SCH_MAX_MSG, "sch-max", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_SCH_MAX_MSG, "sch-max", NULL, NULL)
|
||||||
|
|
||||||
|
|
||||||
|
@ -296,7 +298,7 @@ enum {
|
||||||
TD_DEF_MSG_TYPE(TDMT_SYNC_PRE_SNAPSHOT_REPLY, "sync-pre-snapshot-reply", NULL, NULL) // no longer used
|
TD_DEF_MSG_TYPE(TDMT_SYNC_PRE_SNAPSHOT_REPLY, "sync-pre-snapshot-reply", NULL, NULL) // no longer used
|
||||||
TD_DEF_MSG_TYPE(TDMT_SYNC_MAX_MSG, "sync-max", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_SYNC_MAX_MSG, "sync-max", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_SYNC_FORCE_FOLLOWER, "sync-force-become-follower", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_SYNC_FORCE_FOLLOWER, "sync-force-become-follower", NULL, NULL)
|
||||||
|
|
||||||
TD_NEW_MSG_SEG(TDMT_VND_STREAM_MSG)
|
TD_NEW_MSG_SEG(TDMT_VND_STREAM_MSG)
|
||||||
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_TRIGGER, "vnode-stream-trigger", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_TRIGGER, "vnode-stream-trigger", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_SCAN_HISTORY, "vnode-stream-scan-history", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_SCAN_HISTORY, "vnode-stream-scan-history", NULL, NULL)
|
||||||
|
|
|
@ -16,7 +16,6 @@
|
||||||
#ifndef _TD_COMMON_TOKEN_H_
|
#ifndef _TD_COMMON_TOKEN_H_
|
||||||
#define _TD_COMMON_TOKEN_H_
|
#define _TD_COMMON_TOKEN_H_
|
||||||
|
|
||||||
|
|
||||||
#define TK_OR 1
|
#define TK_OR 1
|
||||||
#define TK_AND 2
|
#define TK_AND 2
|
||||||
#define TK_UNION 3
|
#define TK_UNION 3
|
||||||
|
@ -116,7 +115,7 @@
|
||||||
#define TK_TABLE_PREFIX 97
|
#define TK_TABLE_PREFIX 97
|
||||||
#define TK_TABLE_SUFFIX 98
|
#define TK_TABLE_SUFFIX 98
|
||||||
#define TK_NK_COLON 99
|
#define TK_NK_COLON 99
|
||||||
#define TK_MAX_SPEED 100
|
#define TK_BWLIMIT 100
|
||||||
#define TK_START 101
|
#define TK_START 101
|
||||||
#define TK_TIMESTAMP 102
|
#define TK_TIMESTAMP 102
|
||||||
#define TK_END 103
|
#define TK_END 103
|
||||||
|
@ -132,25 +131,25 @@
|
||||||
#define TK_NK_EQ 113
|
#define TK_NK_EQ 113
|
||||||
#define TK_USING 114
|
#define TK_USING 114
|
||||||
#define TK_TAGS 115
|
#define TK_TAGS 115
|
||||||
#define TK_COMMENT 116
|
#define TK_BOOL 116
|
||||||
#define TK_BOOL 117
|
#define TK_TINYINT 117
|
||||||
#define TK_TINYINT 118
|
#define TK_SMALLINT 118
|
||||||
#define TK_SMALLINT 119
|
#define TK_INT 119
|
||||||
#define TK_INT 120
|
#define TK_INTEGER 120
|
||||||
#define TK_INTEGER 121
|
#define TK_BIGINT 121
|
||||||
#define TK_BIGINT 122
|
#define TK_FLOAT 122
|
||||||
#define TK_FLOAT 123
|
#define TK_DOUBLE 123
|
||||||
#define TK_DOUBLE 124
|
#define TK_BINARY 124
|
||||||
#define TK_BINARY 125
|
#define TK_NCHAR 125
|
||||||
#define TK_NCHAR 126
|
#define TK_UNSIGNED 126
|
||||||
#define TK_UNSIGNED 127
|
#define TK_JSON 127
|
||||||
#define TK_JSON 128
|
#define TK_VARCHAR 128
|
||||||
#define TK_VARCHAR 129
|
#define TK_MEDIUMBLOB 129
|
||||||
#define TK_MEDIUMBLOB 130
|
#define TK_BLOB 130
|
||||||
#define TK_BLOB 131
|
#define TK_VARBINARY 131
|
||||||
#define TK_VARBINARY 132
|
#define TK_GEOMETRY 132
|
||||||
#define TK_GEOMETRY 133
|
#define TK_DECIMAL 133
|
||||||
#define TK_DECIMAL 134
|
#define TK_COMMENT 134
|
||||||
#define TK_MAX_DELAY 135
|
#define TK_MAX_DELAY 135
|
||||||
#define TK_WATERMARK 136
|
#define TK_WATERMARK 136
|
||||||
#define TK_ROLLUP 137
|
#define TK_ROLLUP 137
|
||||||
|
@ -357,7 +356,6 @@
|
||||||
#define TK_VIEW 338
|
#define TK_VIEW 338
|
||||||
#define TK_WAL 339
|
#define TK_WAL 339
|
||||||
|
|
||||||
|
|
||||||
#define TK_NK_SPACE 600
|
#define TK_NK_SPACE 600
|
||||||
#define TK_NK_COMMENT 601
|
#define TK_NK_COMMENT 601
|
||||||
#define TK_NK_ILLEGAL 602
|
#define TK_NK_ILLEGAL 602
|
||||||
|
|
|
@ -269,8 +269,8 @@ typedef struct {
|
||||||
(IS_NUMERIC_TYPE(_t) || (_t) == (TSDB_DATA_TYPE_BOOL) || (_t) == (TSDB_DATA_TYPE_TIMESTAMP))
|
(IS_NUMERIC_TYPE(_t) || (_t) == (TSDB_DATA_TYPE_BOOL) || (_t) == (TSDB_DATA_TYPE_TIMESTAMP))
|
||||||
|
|
||||||
#define IS_VAR_DATA_TYPE(t) \
|
#define IS_VAR_DATA_TYPE(t) \
|
||||||
(((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_NCHAR) || ((t) == TSDB_DATA_TYPE_JSON) || ((t) == TSDB_DATA_TYPE_GEOMETRY))
|
(((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_VARBINARY) || ((t) == TSDB_DATA_TYPE_NCHAR) || ((t) == TSDB_DATA_TYPE_JSON) || ((t) == TSDB_DATA_TYPE_GEOMETRY))
|
||||||
#define IS_STR_DATA_TYPE(t) (((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_NCHAR))
|
#define IS_STR_DATA_TYPE(t) (((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_VARBINARY) || ((t) == TSDB_DATA_TYPE_NCHAR))
|
||||||
|
|
||||||
#define IS_VALID_TINYINT(_t) ((_t) >= INT8_MIN && (_t) <= INT8_MAX)
|
#define IS_VALID_TINYINT(_t) ((_t) >= INT8_MIN && (_t) <= INT8_MAX)
|
||||||
#define IS_VALID_SMALLINT(_t) ((_t) >= INT16_MIN && (_t) <= INT16_MAX)
|
#define IS_VALID_SMALLINT(_t) ((_t) >= INT16_MIN && (_t) <= INT16_MAX)
|
||||||
|
|
|
@ -0,0 +1,46 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||||
|
*
|
||||||
|
* This program is free software: you can use, redistribute, and/or modify
|
||||||
|
* it under the terms of the GNU Affero General Public License, version 3
|
||||||
|
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU Affero General Public License
|
||||||
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef _TD_AUDIT_H_
|
||||||
|
#define _TD_AUDIT_H_
|
||||||
|
|
||||||
|
#include "tarray.h"
|
||||||
|
#include "tdef.h"
|
||||||
|
#include "tlog.h"
|
||||||
|
#include "tmsg.h"
|
||||||
|
#include "tjson.h"
|
||||||
|
#include "tmsgcb.h"
|
||||||
|
#include "trpc.h"
|
||||||
|
#include "mnode.h"
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
extern "C" {
|
||||||
|
#endif
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
const char *server;
|
||||||
|
uint16_t port;
|
||||||
|
bool comp;
|
||||||
|
} SAuditCfg;
|
||||||
|
|
||||||
|
int32_t auditInit(const SAuditCfg *pCfg);
|
||||||
|
void auditSend(SJson *pJson);
|
||||||
|
void auditRecord(SRpcMsg *pReq, int64_t clusterId, char *operation, char *target1, char *target2, char *detail);
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif /*_TD_MONITOR_H_*/
|
|
@ -59,7 +59,7 @@ typedef struct SDataSinkMgtCfg {
|
||||||
uint32_t maxDataBlockNumPerQuery;
|
uint32_t maxDataBlockNumPerQuery;
|
||||||
} SDataSinkMgtCfg;
|
} SDataSinkMgtCfg;
|
||||||
|
|
||||||
int32_t dsDataSinkMgtInit(SDataSinkMgtCfg* cfg, SStorageAPI* pAPI);
|
int32_t dsDataSinkMgtInit(SDataSinkMgtCfg* cfg, SStorageAPI* pAPI, void** ppSinkManager);
|
||||||
|
|
||||||
typedef struct SInputData {
|
typedef struct SInputData {
|
||||||
const struct SSDataBlock* pData;
|
const struct SSDataBlock* pData;
|
||||||
|
@ -83,7 +83,7 @@ typedef struct SOutputData {
|
||||||
* @param pHandle output
|
* @param pHandle output
|
||||||
* @return error code
|
* @return error code
|
||||||
*/
|
*/
|
||||||
int32_t dsCreateDataSinker(const SDataSinkNode* pDataSink, DataSinkHandle* pHandle, void* pParam, const char* id);
|
int32_t dsCreateDataSinker(void* pSinkManager, const SDataSinkNode* pDataSink, DataSinkHandle* pHandle, void* pParam, const char* id);
|
||||||
|
|
||||||
int32_t dsDataSinkGetCacheSize(SDataSinkStat* pStat);
|
int32_t dsDataSinkGetCacheSize(SDataSinkStat* pStat);
|
||||||
|
|
||||||
|
|
|
@ -109,8 +109,8 @@ typedef uint16_t VarDataLenT; // maxVarDataLen: 65535
|
||||||
#define varDataLenByData(v) (*(VarDataLenT *)(((char *)(v)) - VARSTR_HEADER_SIZE))
|
#define varDataLenByData(v) (*(VarDataLenT *)(((char *)(v)) - VARSTR_HEADER_SIZE))
|
||||||
#define varDataSetLen(v, _len) (((VarDataLenT *)(v))[0] = (VarDataLenT)(_len))
|
#define varDataSetLen(v, _len) (((VarDataLenT *)(v))[0] = (VarDataLenT)(_len))
|
||||||
#define IS_VAR_DATA_TYPE(t) \
|
#define IS_VAR_DATA_TYPE(t) \
|
||||||
(((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_NCHAR) || ((t) == TSDB_DATA_TYPE_JSON) || ((t) == TSDB_DATA_TYPE_GEOMETRY))
|
(((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_VARBINARY) || ((t) == TSDB_DATA_TYPE_NCHAR) || ((t) == TSDB_DATA_TYPE_JSON) || ((t) == TSDB_DATA_TYPE_GEOMETRY))
|
||||||
#define IS_STR_DATA_TYPE(t) (((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_NCHAR))
|
#define IS_STR_DATA_TYPE(t) (((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_VARBINARY) || ((t) == TSDB_DATA_TYPE_NCHAR))
|
||||||
|
|
||||||
static FORCE_INLINE char *udfColDataGetData(const SUdfColumn *pColumn, int32_t row) {
|
static FORCE_INLINE char *udfColDataGetData(const SUdfColumn *pColumn, int32_t row) {
|
||||||
if (IS_VAR_DATA_TYPE(pColumn->colMeta.type)) {
|
if (IS_VAR_DATA_TYPE(pColumn->colMeta.type)) {
|
||||||
|
|
|
@ -23,11 +23,10 @@ extern "C" {
|
||||||
#include "query.h"
|
#include "query.h"
|
||||||
#include "querynodes.h"
|
#include "querynodes.h"
|
||||||
|
|
||||||
#define DESCRIBE_RESULT_COLS 5
|
#define DESCRIBE_RESULT_COLS 4
|
||||||
#define DESCRIBE_RESULT_FIELD_LEN (TSDB_COL_NAME_LEN - 1 + VARSTR_HEADER_SIZE)
|
#define DESCRIBE_RESULT_FIELD_LEN (TSDB_COL_NAME_LEN - 1 + VARSTR_HEADER_SIZE)
|
||||||
#define DESCRIBE_RESULT_TYPE_LEN (20 + VARSTR_HEADER_SIZE)
|
#define DESCRIBE_RESULT_TYPE_LEN (20 + VARSTR_HEADER_SIZE)
|
||||||
#define DESCRIBE_RESULT_NOTE_LEN (8 + VARSTR_HEADER_SIZE)
|
#define DESCRIBE_RESULT_NOTE_LEN (8 + VARSTR_HEADER_SIZE)
|
||||||
#define DESCRIBE_RESULT_COL_COMMENT_LEN (TSDB_COL_COMMENT_LEN)
|
|
||||||
|
|
||||||
#define SHOW_CREATE_DB_RESULT_COLS 2
|
#define SHOW_CREATE_DB_RESULT_COLS 2
|
||||||
#define SHOW_CREATE_DB_RESULT_FIELD1_LEN (TSDB_DB_NAME_LEN + VARSTR_HEADER_SIZE)
|
#define SHOW_CREATE_DB_RESULT_FIELD1_LEN (TSDB_DB_NAME_LEN + VARSTR_HEADER_SIZE)
|
||||||
|
@ -156,7 +155,7 @@ typedef struct SColumnDefNode {
|
||||||
ENodeType type;
|
ENodeType type;
|
||||||
char colName[TSDB_COL_NAME_LEN];
|
char colName[TSDB_COL_NAME_LEN];
|
||||||
SDataType dataType;
|
SDataType dataType;
|
||||||
char comments[TSDB_COL_COMMENT_LEN];
|
char comments[TSDB_TB_COMMENT_LEN];
|
||||||
bool sma;
|
bool sma;
|
||||||
} SColumnDefNode;
|
} SColumnDefNode;
|
||||||
|
|
||||||
|
@ -215,7 +214,6 @@ typedef struct SAlterTableStmt {
|
||||||
char newColName[TSDB_COL_NAME_LEN];
|
char newColName[TSDB_COL_NAME_LEN];
|
||||||
STableOptions* pOptions;
|
STableOptions* pOptions;
|
||||||
SDataType dataType;
|
SDataType dataType;
|
||||||
char colComment[TSDB_COL_COMMENT_LEN];
|
|
||||||
SValueNode* pVal;
|
SValueNode* pVal;
|
||||||
} SAlterTableStmt;
|
} SAlterTableStmt;
|
||||||
|
|
||||||
|
|
|
@ -114,6 +114,7 @@ int32_t smlBuildRow(STableDataCxt* pTableCxt);
|
||||||
int32_t smlBuildCol(STableDataCxt* pTableCxt, SSchema* schema, void* kv, int32_t index);
|
int32_t smlBuildCol(STableDataCxt* pTableCxt, SSchema* schema, void* kv, int32_t index);
|
||||||
STableDataCxt* smlInitTableDataCtx(SQuery* query, STableMeta* pTableMeta);
|
STableDataCxt* smlInitTableDataCtx(SQuery* query, STableMeta* pTableMeta);
|
||||||
|
|
||||||
|
void clearColValArraySml(SArray* pCols);
|
||||||
int32_t smlBindData(SQuery* handle, bool dataFormat, SArray* tags, SArray* colsSchema, SArray* cols,
|
int32_t smlBindData(SQuery* handle, bool dataFormat, SArray* tags, SArray* colsSchema, SArray* cols,
|
||||||
STableMeta* pTableMeta, char* tableName, const char* sTableName, int32_t sTableNameLen, int32_t ttl,
|
STableMeta* pTableMeta, char* tableName, const char* sTableName, int32_t sTableNameLen, int32_t ttl,
|
||||||
char* msgBuf, int32_t msgBufLen);
|
char* msgBuf, int32_t msgBufLen);
|
||||||
|
|
|
@ -45,6 +45,7 @@ typedef struct {
|
||||||
uint64_t cqueryProcessed;
|
uint64_t cqueryProcessed;
|
||||||
uint64_t fetchProcessed;
|
uint64_t fetchProcessed;
|
||||||
uint64_t dropProcessed;
|
uint64_t dropProcessed;
|
||||||
|
uint64_t notifyProcessed;
|
||||||
uint64_t hbProcessed;
|
uint64_t hbProcessed;
|
||||||
uint64_t deleteProcessed;
|
uint64_t deleteProcessed;
|
||||||
|
|
||||||
|
@ -90,6 +91,8 @@ int32_t qWorkerProcessCancelMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, in
|
||||||
|
|
||||||
int32_t qWorkerProcessDropMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts);
|
int32_t qWorkerProcessDropMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts);
|
||||||
|
|
||||||
|
int32_t qWorkerProcessNotifyMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts);
|
||||||
|
|
||||||
int32_t qWorkerProcessHbMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts);
|
int32_t qWorkerProcessHbMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts);
|
||||||
|
|
||||||
int32_t qWorkerProcessDeleteMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, SDeleteRes *pRes);
|
int32_t qWorkerProcessDeleteMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, SDeleteRes *pRes);
|
||||||
|
|
|
@ -189,7 +189,7 @@ int32_t streamInit();
|
||||||
void streamCleanUp();
|
void streamCleanUp();
|
||||||
|
|
||||||
SStreamQueue* streamQueueOpen(int64_t cap);
|
SStreamQueue* streamQueueOpen(int64_t cap);
|
||||||
void streamQueueClose(SStreamQueue* queue);
|
void streamQueueClose(SStreamQueue* pQueue, int32_t taskId);
|
||||||
|
|
||||||
static FORCE_INLINE void streamQueueProcessSuccess(SStreamQueue* queue) {
|
static FORCE_INLINE void streamQueueProcessSuccess(SStreamQueue* queue) {
|
||||||
ASSERT(atomic_load_8(&queue->status) == STREAM_QUEUE__PROCESSING);
|
ASSERT(atomic_load_8(&queue->status) == STREAM_QUEUE__PROCESSING);
|
||||||
|
|
|
@ -90,6 +90,11 @@ int8_t taosStr2Int8(const char *str, char **pEnd, int32_t radix);
|
||||||
uint8_t taosStr2UInt8(const char *str, char **pEnd, int32_t radix);
|
uint8_t taosStr2UInt8(const char *str, char **pEnd, int32_t radix);
|
||||||
double taosStr2Double(const char *str, char **pEnd);
|
double taosStr2Double(const char *str, char **pEnd);
|
||||||
float taosStr2Float(const char *str, char **pEnd);
|
float taosStr2Float(const char *str, char **pEnd);
|
||||||
|
int32_t taosHex2Ascii(const char *z, uint32_t n, void** data, uint32_t* size);
|
||||||
|
int32_t taosAscii2Hex(const char *z, uint32_t n, void** data, uint32_t* size);
|
||||||
|
//int32_t taosBin2Ascii(const char *z, uint32_t n, void** data, uint32_t* size);
|
||||||
|
bool isHex(const char* z, uint32_t n);
|
||||||
|
bool isValidateHex(const char* z, uint32_t n);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
|
|
|
@ -709,6 +709,7 @@ int32_t* taosGetErrno();
|
||||||
#define TSDB_CODE_PAR_INVALID_OPTR_USAGE TAOS_DEF_ERROR_CODE(0, 0x2667)
|
#define TSDB_CODE_PAR_INVALID_OPTR_USAGE TAOS_DEF_ERROR_CODE(0, 0x2667)
|
||||||
#define TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED_FUNC TAOS_DEF_ERROR_CODE(0, 0x2668)
|
#define TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED_FUNC TAOS_DEF_ERROR_CODE(0, 0x2668)
|
||||||
#define TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED TAOS_DEF_ERROR_CODE(0, 0x2669)
|
#define TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED TAOS_DEF_ERROR_CODE(0, 0x2669)
|
||||||
|
#define TSDB_CODE_PAR_INVALID_VARBINARY TAOS_DEF_ERROR_CODE(0, 0x266A)
|
||||||
#define TSDB_CODE_PAR_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x26FF)
|
#define TSDB_CODE_PAR_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x26FF)
|
||||||
|
|
||||||
//planner
|
//planner
|
||||||
|
|
|
@ -102,6 +102,7 @@ int32_t compareUint64ValDesc(const void *pLeft, const void *pRight);
|
||||||
|
|
||||||
int32_t compareLenPrefixedStrDesc(const void *pLeft, const void *pRight);
|
int32_t compareLenPrefixedStrDesc(const void *pLeft, const void *pRight);
|
||||||
int32_t compareLenPrefixedWStrDesc(const void *pLeft, const void *pRight);
|
int32_t compareLenPrefixedWStrDesc(const void *pLeft, const void *pRight);
|
||||||
|
int32_t compareLenBinaryValDesc(const void *pLeft, const void *pRight);
|
||||||
|
|
||||||
int32_t comparestrPatternMatch(const void *pLeft, const void *pRight);
|
int32_t comparestrPatternMatch(const void *pLeft, const void *pRight);
|
||||||
int32_t comparestrPatternNMatch(const void *pLeft, const void *pRight);
|
int32_t comparestrPatternNMatch(const void *pLeft, const void *pRight);
|
||||||
|
@ -202,7 +203,6 @@ int32_t compareUint64Uint32(const void *pLeft, const void *pRight);
|
||||||
|
|
||||||
__compar_fn_t getComparFunc(int32_t type, int32_t optr);
|
__compar_fn_t getComparFunc(int32_t type, int32_t optr);
|
||||||
__compar_fn_t getKeyComparFunc(int32_t keyType, int32_t order);
|
__compar_fn_t getKeyComparFunc(int32_t keyType, int32_t order);
|
||||||
int32_t doCompare(const char *a, const char *b, int32_t type, size_t size);
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
|
|
|
@ -200,7 +200,7 @@ typedef enum ELogicConditionType {
|
||||||
#define TSDB_STREAM_NAME_LEN 193 // it is a null-terminated string
|
#define TSDB_STREAM_NAME_LEN 193 // it is a null-terminated string
|
||||||
#define TSDB_DB_NAME_LEN 65
|
#define TSDB_DB_NAME_LEN 65
|
||||||
#define TSDB_DB_FNAME_LEN (TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN + TSDB_NAME_DELIMITER_LEN)
|
#define TSDB_DB_FNAME_LEN (TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN + TSDB_NAME_DELIMITER_LEN)
|
||||||
#define TSDB_PRIVILEDGE_CONDITION_LEN 48*1024
|
#define TSDB_PRIVILEDGE_CONDITION_LEN 48 * 1024
|
||||||
|
|
||||||
#define TSDB_FUNC_NAME_LEN 65
|
#define TSDB_FUNC_NAME_LEN 65
|
||||||
#define TSDB_FUNC_COMMENT_LEN 1024 * 1024
|
#define TSDB_FUNC_COMMENT_LEN 1024 * 1024
|
||||||
|
@ -230,7 +230,6 @@ typedef enum ELogicConditionType {
|
||||||
|
|
||||||
#define TSDB_APP_NAME_LEN TSDB_UNI_LEN
|
#define TSDB_APP_NAME_LEN TSDB_UNI_LEN
|
||||||
#define TSDB_TB_COMMENT_LEN 1025
|
#define TSDB_TB_COMMENT_LEN 1025
|
||||||
#define TSDB_COL_COMMENT_LEN 1025
|
|
||||||
|
|
||||||
#define TSDB_QUERY_ID_LEN 26
|
#define TSDB_QUERY_ID_LEN 26
|
||||||
#define TSDB_TRANS_OPER_LEN 16
|
#define TSDB_TRANS_OPER_LEN 16
|
||||||
|
@ -377,12 +376,12 @@ typedef enum ELogicConditionType {
|
||||||
#define TSDB_MAX_STT_TRIGGER 1
|
#define TSDB_MAX_STT_TRIGGER 1
|
||||||
#define TSDB_DEFAULT_SST_TRIGGER 1
|
#define TSDB_DEFAULT_SST_TRIGGER 1
|
||||||
#endif
|
#endif
|
||||||
#define TSDB_MIN_HASH_PREFIX (2 - TSDB_TABLE_NAME_LEN)
|
#define TSDB_MIN_HASH_PREFIX (2 - TSDB_TABLE_NAME_LEN)
|
||||||
#define TSDB_MAX_HASH_PREFIX (TSDB_TABLE_NAME_LEN - 2)
|
#define TSDB_MAX_HASH_PREFIX (TSDB_TABLE_NAME_LEN - 2)
|
||||||
#define TSDB_DEFAULT_HASH_PREFIX 0
|
#define TSDB_DEFAULT_HASH_PREFIX 0
|
||||||
#define TSDB_MIN_HASH_SUFFIX (2 - TSDB_TABLE_NAME_LEN)
|
#define TSDB_MIN_HASH_SUFFIX (2 - TSDB_TABLE_NAME_LEN)
|
||||||
#define TSDB_MAX_HASH_SUFFIX (TSDB_TABLE_NAME_LEN - 2)
|
#define TSDB_MAX_HASH_SUFFIX (TSDB_TABLE_NAME_LEN - 2)
|
||||||
#define TSDB_DEFAULT_HASH_SUFFIX 0
|
#define TSDB_DEFAULT_HASH_SUFFIX 0
|
||||||
|
|
||||||
#define TSDB_DB_MIN_WAL_RETENTION_PERIOD -1
|
#define TSDB_DB_MIN_WAL_RETENTION_PERIOD -1
|
||||||
#define TSDB_REP_DEF_DB_WAL_RET_PERIOD 3600
|
#define TSDB_REP_DEF_DB_WAL_RET_PERIOD 3600
|
||||||
|
@ -417,9 +416,10 @@ typedef enum ELogicConditionType {
|
||||||
#define TSDB_EXPLAIN_RESULT_COLUMN_NAME "QUERY_PLAN"
|
#define TSDB_EXPLAIN_RESULT_COLUMN_NAME "QUERY_PLAN"
|
||||||
|
|
||||||
#define TSDB_MAX_FIELD_LEN 65519 // 16384:65519
|
#define TSDB_MAX_FIELD_LEN 65519 // 16384:65519
|
||||||
#define TSDB_MAX_BINARY_LEN TSDB_MAX_FIELD_LEN // 16384-8:65519
|
#define TSDB_MAX_BINARY_LEN TSDB_MAX_FIELD_LEN // 16384-8:65519
|
||||||
#define TSDB_MAX_NCHAR_LEN TSDB_MAX_FIELD_LEN // 16384-8:65519
|
#define TSDB_MAX_NCHAR_LEN TSDB_MAX_FIELD_LEN // 16384-8:65519
|
||||||
#define TSDB_MAX_GEOMETRY_LEN TSDB_MAX_FIELD_LEN // 16384-8:65519
|
#define TSDB_MAX_GEOMETRY_LEN TSDB_MAX_FIELD_LEN // 16384-8:65519
|
||||||
|
#define TSDB_MAX_VARBINARY_LEN TSDB_MAX_FIELD_LEN // 16384-8:65519
|
||||||
|
|
||||||
#define PRIMARYKEY_TIMESTAMP_COL_ID 1
|
#define PRIMARYKEY_TIMESTAMP_COL_ID 1
|
||||||
#define COL_REACH_END(colId, maxColId) ((colId) > (maxColId))
|
#define COL_REACH_END(colId, maxColId) ((colId) > (maxColId))
|
||||||
|
|
|
@ -114,7 +114,7 @@ pipeline {
|
||||||
sync_source("${BRANCH_NAME}")
|
sync_source("${BRANCH_NAME}")
|
||||||
sh '''
|
sh '''
|
||||||
if [ "${verMode}" = "all" ];then
|
if [ "${verMode}" = "all" ];then
|
||||||
verMode="community enterprise"
|
verMode="enterprise"
|
||||||
fi
|
fi
|
||||||
verModeList=${verMode}
|
verModeList=${verMode}
|
||||||
for verModeSin in ${verModeList}
|
for verModeSin in ${verModeList}
|
||||||
|
@ -123,18 +123,6 @@ pipeline {
|
||||||
bash testpackage.sh -m ${verModeSin} -f server -l false -c x64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t tar
|
bash testpackage.sh -m ${verModeSin} -f server -l false -c x64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t tar
|
||||||
python3 checkPackageRuning.py
|
python3 checkPackageRuning.py
|
||||||
done
|
done
|
||||||
'''
|
|
||||||
|
|
||||||
sh '''
|
|
||||||
cd ${TDENGINE_ROOT_DIR}/packaging
|
|
||||||
bash testpackage.sh -m community -f server -l true -c x64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t tar
|
|
||||||
python3 checkPackageRuning.py
|
|
||||||
'''
|
|
||||||
|
|
||||||
sh '''
|
|
||||||
cd ${TDENGINE_ROOT_DIR}/packaging
|
|
||||||
bash testpackage.sh -m community -f server -l false -c x64 -v ${version} -o ${baseVersion} -s ${sourcePath} -t deb
|
|
||||||
python3 checkPackageRuning.py
|
|
||||||
'''
|
'''
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -37,6 +37,7 @@ else
|
||||||
${csudo}rm -f ${inc_link_dir}/taos.h || :
|
${csudo}rm -f ${inc_link_dir}/taos.h || :
|
||||||
${csudo}rm -f ${inc_link_dir}/taosdef.h || :
|
${csudo}rm -f ${inc_link_dir}/taosdef.h || :
|
||||||
${csudo}rm -f ${inc_link_dir}/taoserror.h || :
|
${csudo}rm -f ${inc_link_dir}/taoserror.h || :
|
||||||
|
${csudo}rm -f ${inc_link_dir}/tdef.h || :
|
||||||
${csudo}rm -f ${inc_link_dir}/taosudf.h || :
|
${csudo}rm -f ${inc_link_dir}/taosudf.h || :
|
||||||
[ -f ${inc_link_dir}/taosws.h ] && ${csudo}rm -f ${inc_link_dir}/taosws.h || :
|
[ -f ${inc_link_dir}/taosws.h ] && ${csudo}rm -f ${inc_link_dir}/taosws.h || :
|
||||||
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
|
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
|
||||||
|
|
|
@ -98,6 +98,7 @@ cp ${compile_dir}/build/lib/${libfile} ${pkg_dir}${install_home_pat
|
||||||
cp ${compile_dir}/../include/client/taos.h ${pkg_dir}${install_home_path}/include
|
cp ${compile_dir}/../include/client/taos.h ${pkg_dir}${install_home_path}/include
|
||||||
cp ${compile_dir}/../include/common/taosdef.h ${pkg_dir}${install_home_path}/include
|
cp ${compile_dir}/../include/common/taosdef.h ${pkg_dir}${install_home_path}/include
|
||||||
cp ${compile_dir}/../include/util/taoserror.h ${pkg_dir}${install_home_path}/include
|
cp ${compile_dir}/../include/util/taoserror.h ${pkg_dir}${install_home_path}/include
|
||||||
|
cp ${compile_dir}/../include/util/tdef.h ${pkg_dir}${install_home_path}/include
|
||||||
cp ${compile_dir}/../include/libs/function/taosudf.h ${pkg_dir}${install_home_path}/include
|
cp ${compile_dir}/../include/libs/function/taosudf.h ${pkg_dir}${install_home_path}/include
|
||||||
[ -f ${compile_dir}/build/include/taosws.h ] && cp ${compile_dir}/build/include/taosws.h ${pkg_dir}${install_home_path}/include ||:
|
[ -f ${compile_dir}/build/include/taosws.h ] && cp ${compile_dir}/build/include/taosws.h ${pkg_dir}${install_home_path}/include ||:
|
||||||
cp -r ${top_dir}/examples/* ${pkg_dir}${install_home_path}/examples
|
cp -r ${top_dir}/examples/* ${pkg_dir}${install_home_path}/examples
|
||||||
|
|
|
@ -95,6 +95,7 @@ cp %{_compiledir}/build/lib/${libfile} %{buildroot}%{homepath}/driv
|
||||||
cp %{_compiledir}/../include/client/taos.h %{buildroot}%{homepath}/include
|
cp %{_compiledir}/../include/client/taos.h %{buildroot}%{homepath}/include
|
||||||
cp %{_compiledir}/../include/common/taosdef.h %{buildroot}%{homepath}/include
|
cp %{_compiledir}/../include/common/taosdef.h %{buildroot}%{homepath}/include
|
||||||
cp %{_compiledir}/../include/util/taoserror.h %{buildroot}%{homepath}/include
|
cp %{_compiledir}/../include/util/taoserror.h %{buildroot}%{homepath}/include
|
||||||
|
cp %{_compiledir}/../include/util/tdef.h %{buildroot}%{homepath}/include
|
||||||
cp %{_compiledir}/../include/libs/function/taosudf.h %{buildroot}%{homepath}/include
|
cp %{_compiledir}/../include/libs/function/taosudf.h %{buildroot}%{homepath}/include
|
||||||
[ -f %{_compiledir}/build/include/taosws.h ] && cp %{_compiledir}/build/include/taosws.h %{buildroot}%{homepath}/include ||:
|
[ -f %{_compiledir}/build/include/taosws.h ] && cp %{_compiledir}/build/include/taosws.h %{buildroot}%{homepath}/include ||:
|
||||||
#cp -r %{_compiledir}/../src/connector/python %{buildroot}%{homepath}/connector
|
#cp -r %{_compiledir}/../src/connector/python %{buildroot}%{homepath}/connector
|
||||||
|
@ -217,6 +218,7 @@ if [ $1 -eq 0 ];then
|
||||||
${csudo}rm -f ${inc_link_dir}/taos.h || :
|
${csudo}rm -f ${inc_link_dir}/taos.h || :
|
||||||
${csudo}rm -f ${inc_link_dir}/taosdef.h || :
|
${csudo}rm -f ${inc_link_dir}/taosdef.h || :
|
||||||
${csudo}rm -f ${inc_link_dir}/taoserror.h || :
|
${csudo}rm -f ${inc_link_dir}/taoserror.h || :
|
||||||
|
${csudo}rm -f ${inc_link_dir}/tdef.h || :
|
||||||
${csudo}rm -f ${inc_link_dir}/taosudf.h || :
|
${csudo}rm -f ${inc_link_dir}/taosudf.h || :
|
||||||
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
|
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
|
||||||
|
|
||||||
|
|
|
@ -345,7 +345,7 @@ function install_jemalloc() {
|
||||||
}
|
}
|
||||||
|
|
||||||
function install_header() {
|
function install_header() {
|
||||||
${csudo}rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h ${inc_link_dir}/taosudf.h || :
|
${csudo}rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h ${inc_link_dir}/tdef.h ${inc_link_dir}/taosudf.h || :
|
||||||
|
|
||||||
[ -f ${inc_link_dir}/taosws.h ] && ${csudo}rm -f ${inc_link_dir}/taosws.h || :
|
[ -f ${inc_link_dir}/taosws.h ] && ${csudo}rm -f ${inc_link_dir}/taosws.h || :
|
||||||
|
|
||||||
|
@ -353,6 +353,7 @@ function install_header() {
|
||||||
${csudo}ln -sf ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
|
${csudo}ln -sf ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
|
||||||
${csudo}ln -sf ${install_main_dir}/include/taosdef.h ${inc_link_dir}/taosdef.h
|
${csudo}ln -sf ${install_main_dir}/include/taosdef.h ${inc_link_dir}/taosdef.h
|
||||||
${csudo}ln -sf ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
|
${csudo}ln -sf ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
|
||||||
|
${csudo}ln -sf ${install_main_dir}/include/tdef.h ${inc_link_dir}/tdef.h
|
||||||
${csudo}ln -sf ${install_main_dir}/include/taosudf.h ${inc_link_dir}/taosudf.h
|
${csudo}ln -sf ${install_main_dir}/include/taosudf.h ${inc_link_dir}/taosudf.h
|
||||||
|
|
||||||
[ -f ${install_main_dir}/include/taosws.h ] && ${csudo}ln -sf ${install_main_dir}/include/taosws.h ${inc_link_dir}/taosws.h || :
|
[ -f ${install_main_dir}/include/taosws.h ] && ${csudo}ln -sf ${install_main_dir}/include/taosws.h ${inc_link_dir}/taosws.h || :
|
||||||
|
@ -935,7 +936,7 @@ function updateProduct() {
|
||||||
fi
|
fi
|
||||||
echo
|
echo
|
||||||
echo -e "\033[44;32;1m${productName2} is updated successfully!${NC}"
|
echo -e "\033[44;32;1m${productName2} is updated successfully!${NC}"
|
||||||
echo -e "\033[44;32;1mTo manage ${productName2} instance, view documentation and explorer features, you need to install ${clientName2}Explorer ${NC}"
|
echo -e "\033[44;32;1mTo manage ${productName2} instance, view documentation or explorer features, please install ${clientName2}Explorer ${NC}"
|
||||||
else
|
else
|
||||||
install_bin
|
install_bin
|
||||||
install_config
|
install_config
|
||||||
|
@ -1028,7 +1029,7 @@ function installProduct() {
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo -e "\033[44;32;1m${productName2} is installed successfully!${NC}"
|
echo -e "\033[44;32;1m${productName2} is installed successfully!${NC}"
|
||||||
echo -e "\033[44;32;1mTo manage ${productName2} instance, view documentation and explorer features, you need to install ${clientName2}Explorer ${NC}"
|
echo -e "\033[44;32;1mTo manage ${productName2} instance, view documentation or explorer features, please install ${clientName2}Explorer ${NC}"
|
||||||
echo
|
echo
|
||||||
else # Only install client
|
else # Only install client
|
||||||
install_bin
|
install_bin
|
||||||
|
|
|
@ -180,10 +180,11 @@ function install_lib() {
|
||||||
}
|
}
|
||||||
|
|
||||||
function install_header() {
|
function install_header() {
|
||||||
${csudo}rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h ${inc_link_dir}/taosudf.h || :
|
${csudo}rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/tdef.h ${inc_link_dir}/taoserror.h ${inc_link_dir}/taosudf.h || :
|
||||||
${csudo}cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo}chmod 644 ${install_main_dir}/include/*
|
${csudo}cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo}chmod 644 ${install_main_dir}/include/*
|
||||||
${csudo}ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
|
${csudo}ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h
|
||||||
${csudo}ln -s ${install_main_dir}/include/taosdef.h ${inc_link_dir}/taosdef.h
|
${csudo}ln -s ${install_main_dir}/include/taosdef.h ${inc_link_dir}/taosdef.h
|
||||||
|
${csudo}ln -s ${install_main_dir}/include/tdef.h ${inc_link_dir}/tdef.h
|
||||||
${csudo}ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
|
${csudo}ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
|
||||||
${csudo}ln -s ${install_main_dir}/include/taosudf.h ${inc_link_dir}/taosudf.h
|
${csudo}ln -s ${install_main_dir}/include/taosudf.h ${inc_link_dir}/taosudf.h
|
||||||
|
|
||||||
|
|
|
@ -158,7 +158,6 @@ function install_bin() {
|
||||||
${csudo}rm -f ${bin_link_dir}/udfd || :
|
${csudo}rm -f ${bin_link_dir}/udfd || :
|
||||||
${csudo}rm -f ${bin_link_dir}/taosdemo || :
|
${csudo}rm -f ${bin_link_dir}/taosdemo || :
|
||||||
${csudo}rm -f ${bin_link_dir}/taosdump || :
|
${csudo}rm -f ${bin_link_dir}/taosdump || :
|
||||||
${csudo}rm -f ${bin_link_dir}/taosx || :
|
|
||||||
${csudo}rm -f ${bin_link_dir}/${uninstallScript} || :
|
${csudo}rm -f ${bin_link_dir}/${uninstallScript} || :
|
||||||
|
|
||||||
if [ "$osType" != "Darwin" ]; then
|
if [ "$osType" != "Darwin" ]; then
|
||||||
|
@ -348,9 +347,9 @@ function install_lib() {
|
||||||
|
|
||||||
function install_header() {
|
function install_header() {
|
||||||
${csudo}mkdir -p ${inc_link_dir}
|
${csudo}mkdir -p ${inc_link_dir}
|
||||||
${csudo}rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h ${inc_link_dir}/taosudf.h || :
|
${csudo}rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h ${inc_link_dir}/tdef.h ${inc_link_dir}/taosudf.h || :
|
||||||
[ -f ${inc_link_dir}/taosws.h ] && ${csudo}rm -f ${inc_link_dir}/taosws.h ||:
|
[ -f ${inc_link_dir}/taosws.h ] && ${csudo}rm -f ${inc_link_dir}/taosws.h ||:
|
||||||
${csudo}cp -f ${source_dir}/include/client/taos.h ${source_dir}/include/common/taosdef.h ${source_dir}/include/util/taoserror.h ${source_dir}/include/libs/function/taosudf.h \
|
${csudo}cp -f ${source_dir}/include/client/taos.h ${source_dir}/include/common/taosdef.h ${source_dir}/include/util/taoserror.h ${source_dir}/include/util/tdef.h ${source_dir}/include/libs/function/taosudf.h \
|
||||||
${install_main_dir}/include && ${csudo}chmod 644 ${install_main_dir}/include/*
|
${install_main_dir}/include && ${csudo}chmod 644 ${install_main_dir}/include/*
|
||||||
|
|
||||||
if [ -f ${binary_dir}/build/include/taosws.h ]; then
|
if [ -f ${binary_dir}/build/include/taosws.h ]; then
|
||||||
|
@ -361,6 +360,7 @@ function install_header() {
|
||||||
${csudo}ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h > /dev/null 2>&1
|
${csudo}ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h > /dev/null 2>&1
|
||||||
${csudo}ln -s ${install_main_dir}/include/taosdef.h ${inc_link_dir}/taosdef.h > /dev/null 2>&1
|
${csudo}ln -s ${install_main_dir}/include/taosdef.h ${inc_link_dir}/taosdef.h > /dev/null 2>&1
|
||||||
${csudo}ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h > /dev/null 2>&1
|
${csudo}ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h > /dev/null 2>&1
|
||||||
|
${csudo}ln -s ${install_main_dir}/include/tdef.h ${inc_link_dir}/tdef.h > /dev/null 2>&1
|
||||||
${csudo}ln -s ${install_main_dir}/include/taosudf.h ${inc_link_dir}/taosudf.h > /dev/null 2>&1
|
${csudo}ln -s ${install_main_dir}/include/taosudf.h ${inc_link_dir}/taosudf.h > /dev/null 2>&1
|
||||||
|
|
||||||
${csudo}chmod 644 ${install_main_dir}/include/*
|
${csudo}chmod 644 ${install_main_dir}/include/*
|
||||||
|
|
|
@ -83,7 +83,7 @@ else
|
||||||
wslib_files="${build_dir}/lib/libtaosws.dylib"
|
wslib_files="${build_dir}/lib/libtaosws.dylib"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
header_files="${code_dir}/include/client/taos.h ${code_dir}/include/common/taosdef.h ${code_dir}/include/util/taoserror.h ${code_dir}/include/libs/function/taosudf.h"
|
header_files="${code_dir}/include/client/taos.h ${code_dir}/include/common/taosdef.h ${code_dir}/include/util/taoserror.h ${code_dir}/include/util/tdef.h ${code_dir}/include/libs/function/taosudf.h"
|
||||||
wsheader_files="${build_dir}/include/taosws.h"
|
wsheader_files="${build_dir}/include/taosws.h"
|
||||||
|
|
||||||
if [ "$dbName" != "taos" ]; then
|
if [ "$dbName" != "taos" ]; then
|
||||||
|
|
|
@ -115,7 +115,7 @@ else
|
||||||
lib_files="${build_dir}/lib/libtaos.so.${version}"
|
lib_files="${build_dir}/lib/libtaos.so.${version}"
|
||||||
wslib_files="${build_dir}/lib/libtaosws.so"
|
wslib_files="${build_dir}/lib/libtaosws.so"
|
||||||
fi
|
fi
|
||||||
header_files="${code_dir}/include/client/taos.h ${code_dir}/include/common/taosdef.h ${code_dir}/include/util/taoserror.h ${code_dir}/include/libs/function/taosudf.h"
|
header_files="${code_dir}/include/client/taos.h ${code_dir}/include/common/taosdef.h ${code_dir}/include/util/taoserror.h ${code_dir}/include/util/tdef.h ${code_dir}/include/libs/function/taosudf.h"
|
||||||
|
|
||||||
wsheader_files="${build_dir}/include/taosws.h"
|
wsheader_files="${build_dir}/include/taosws.h"
|
||||||
|
|
||||||
|
|
|
@ -133,12 +133,13 @@ function kill_taosd() {
|
||||||
function install_include() {
|
function install_include() {
|
||||||
log_print "start install include from ${inc_dir} to ${inc_link_dir}"
|
log_print "start install include from ${inc_dir} to ${inc_link_dir}"
|
||||||
${csudo}mkdir -p ${inc_link_dir}
|
${csudo}mkdir -p ${inc_link_dir}
|
||||||
${csudo}rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h ${inc_link_dir}/taosudf.h || :
|
${csudo}rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taosdef.h ${inc_link_dir}/taoserror.h ${inc_link_dir}/tdef.h ${inc_link_dir}/taosudf.h || :
|
||||||
[ -f ${inc_link_dir}/taosws.h ] && ${csudo}rm -f ${inc_link_dir}/taosws.h ||:
|
[ -f ${inc_link_dir}/taosws.h ] && ${csudo}rm -f ${inc_link_dir}/taosws.h ||:
|
||||||
|
|
||||||
${csudo}ln -s ${inc_dir}/taos.h ${inc_link_dir}/taos.h
|
${csudo}ln -s ${inc_dir}/taos.h ${inc_link_dir}/taos.h
|
||||||
${csudo}ln -s ${inc_dir}/taosdef.h ${inc_link_dir}/taosdef.h
|
${csudo}ln -s ${inc_dir}/taosdef.h ${inc_link_dir}/taosdef.h
|
||||||
${csudo}ln -s ${inc_dir}/taoserror.h ${inc_link_dir}/taoserror.h
|
${csudo}ln -s ${inc_dir}/taoserror.h ${inc_link_dir}/taoserror.h
|
||||||
|
${csudo}ln -s ${inc_dir}/tdef.h ${inc_link_dir}/tdef.h
|
||||||
${csudo}ln -s ${inc_dir}/taosudf.h ${inc_link_dir}/taosudf.h
|
${csudo}ln -s ${inc_dir}/taosudf.h ${inc_link_dir}/taosudf.h
|
||||||
|
|
||||||
[ -f ${inc_dir}/taosws.h ] && ${csudo}ln -sf ${inc_dir}/taosws.h ${inc_link_dir}/taosws.h ||:
|
[ -f ${inc_dir}/taosws.h ] && ${csudo}ln -sf ${inc_dir}/taosws.h ${inc_link_dir}/taosws.h ||:
|
||||||
|
|
|
@ -143,6 +143,7 @@ ${csudo}rm -f ${cfg_link_dir}/*.new || :
|
||||||
${csudo}rm -f ${inc_link_dir}/taos.h || :
|
${csudo}rm -f ${inc_link_dir}/taos.h || :
|
||||||
${csudo}rm -f ${inc_link_dir}/taosdef.h || :
|
${csudo}rm -f ${inc_link_dir}/taosdef.h || :
|
||||||
${csudo}rm -f ${inc_link_dir}/taoserror.h || :
|
${csudo}rm -f ${inc_link_dir}/taoserror.h || :
|
||||||
|
${csudo}rm -f ${inc_link_dir}/tdef.h || :
|
||||||
${csudo}rm -f ${inc_link_dir}/taosudf.h || :
|
${csudo}rm -f ${inc_link_dir}/taosudf.h || :
|
||||||
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
|
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
|
||||||
${csudo}rm -f ${lib64_link_dir}/libtaos.* || :
|
${csudo}rm -f ${lib64_link_dir}/libtaos.* || :
|
||||||
|
|
|
@ -155,6 +155,7 @@ function clean_header() {
|
||||||
${csudo}rm -f ${inc_link_dir}/taos.h || :
|
${csudo}rm -f ${inc_link_dir}/taos.h || :
|
||||||
${csudo}rm -f ${inc_link_dir}/taosdef.h || :
|
${csudo}rm -f ${inc_link_dir}/taosdef.h || :
|
||||||
${csudo}rm -f ${inc_link_dir}/taoserror.h || :
|
${csudo}rm -f ${inc_link_dir}/taoserror.h || :
|
||||||
|
${csudo}rm -f ${inc_link_dir}/tdef.h || :
|
||||||
${csudo}rm -f ${inc_link_dir}/taosudf.h || :
|
${csudo}rm -f ${inc_link_dir}/taosudf.h || :
|
||||||
|
|
||||||
[ -f ${inc_link_dir}/taosws.h ] && ${csudo}rm -f ${inc_link_dir}/taosws.h || :
|
[ -f ${inc_link_dir}/taosws.h ] && ${csudo}rm -f ${inc_link_dir}/taosws.h || :
|
||||||
|
|
|
@ -73,6 +73,7 @@ function clean_header() {
|
||||||
${csudo}rm -f ${inc_link_dir}/taos.h || :
|
${csudo}rm -f ${inc_link_dir}/taos.h || :
|
||||||
${csudo}rm -f ${inc_link_dir}/taosdef.h || :
|
${csudo}rm -f ${inc_link_dir}/taosdef.h || :
|
||||||
${csudo}rm -f ${inc_link_dir}/taoserror.h || :
|
${csudo}rm -f ${inc_link_dir}/taoserror.h || :
|
||||||
|
${csudo}rm -f ${inc_link_dir}/tdef.h || :
|
||||||
${csudo}rm -f ${inc_link_dir}/taosudf.h || :
|
${csudo}rm -f ${inc_link_dir}/taosudf.h || :
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -251,7 +251,6 @@ int64_t smlParseOpenTsdbTime(SSmlHandle *info, const char *data, int32
|
||||||
int32_t smlClearForRerun(SSmlHandle *info);
|
int32_t smlClearForRerun(SSmlHandle *info);
|
||||||
int32_t smlParseValue(SSmlKv *pVal, SSmlMsgBuf *msg);
|
int32_t smlParseValue(SSmlKv *pVal, SSmlMsgBuf *msg);
|
||||||
uint8_t smlGetTimestampLen(int64_t num);
|
uint8_t smlGetTimestampLen(int64_t num);
|
||||||
void clearColValArray(SArray* pCols);
|
|
||||||
void smlDestroyTableInfo(void *para);
|
void smlDestroyTableInfo(void *para);
|
||||||
|
|
||||||
void freeSSmlKv(void* data);
|
void freeSSmlKv(void* data);
|
||||||
|
|
|
@ -92,6 +92,10 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_tmqSubscriptionIm
|
||||||
*/
|
*/
|
||||||
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_tmqCommitSync(JNIEnv *, jobject, jlong, jlong);
|
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_tmqCommitSync(JNIEnv *, jobject, jlong, jlong);
|
||||||
|
|
||||||
|
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_tmqCommitAllSync(JNIEnv *, jobject, jlong);
|
||||||
|
|
||||||
|
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_tmqCommitOffsetSyncImp(JNIEnv *, jobject, jlong, jstring,
|
||||||
|
jint, jlong);
|
||||||
/*
|
/*
|
||||||
* Class: com_taosdata_jdbc_tmq_TMQConnector
|
* Class: com_taosdata_jdbc_tmq_TMQConnector
|
||||||
* Method: tmqCommitAsync
|
* Method: tmqCommitAsync
|
||||||
|
@ -102,6 +106,12 @@ JNIEXPORT void JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_tmqCommitAsync(JN
|
||||||
JNIEXPORT void JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_consumerCommitAsync(JNIEnv *, jobject, jlong, jlong,
|
JNIEXPORT void JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_consumerCommitAsync(JNIEnv *, jobject, jlong, jlong,
|
||||||
jobject);
|
jobject);
|
||||||
|
|
||||||
|
JNIEXPORT void JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_consumerCommitAllAsync(JNIEnv *, jobject, jlong,
|
||||||
|
jobject);
|
||||||
|
|
||||||
|
JNIEXPORT void JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_consumerCommitOffsetAsync(JNIEnv *, jobject, jlong,
|
||||||
|
jstring, jint, jlong, jobject);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Class: com_taosdata_jdbc_tmq_TMQConnector
|
* Class: com_taosdata_jdbc_tmq_TMQConnector
|
||||||
* Method: tmqUnsubscribeImp
|
* Method: tmqUnsubscribeImp
|
||||||
|
@ -179,6 +189,11 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_tmqSeekImp(JNIEnv
|
||||||
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_tmqGetTopicAssignmentImp(JNIEnv *, jobject, jlong,
|
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_tmqGetTopicAssignmentImp(JNIEnv *, jobject, jlong,
|
||||||
jstring, jobject);
|
jstring, jobject);
|
||||||
|
|
||||||
|
JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_tmqCommittedImp(JNIEnv *, jobject, jlong, jstring,
|
||||||
|
jint);
|
||||||
|
|
||||||
|
JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_tmqPositionImp(JNIEnv *, jobject, jlong, jstring, jint);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -133,7 +133,8 @@ void closeTransporter(SAppInstInfo *pAppInfo) {
|
||||||
static bool clientRpcRfp(int32_t code, tmsg_t msgType) {
|
static bool clientRpcRfp(int32_t code, tmsg_t msgType) {
|
||||||
if (NEED_REDIRECT_ERROR(code)) {
|
if (NEED_REDIRECT_ERROR(code)) {
|
||||||
if (msgType == TDMT_SCH_QUERY || msgType == TDMT_SCH_MERGE_QUERY || msgType == TDMT_SCH_FETCH ||
|
if (msgType == TDMT_SCH_QUERY || msgType == TDMT_SCH_MERGE_QUERY || msgType == TDMT_SCH_FETCH ||
|
||||||
msgType == TDMT_SCH_MERGE_FETCH || msgType == TDMT_SCH_QUERY_HEARTBEAT || msgType == TDMT_SCH_DROP_TASK) {
|
msgType == TDMT_SCH_MERGE_FETCH || msgType == TDMT_SCH_QUERY_HEARTBEAT || msgType == TDMT_SCH_DROP_TASK ||
|
||||||
|
msgType == TDMT_SCH_TASK_NOTIFY) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
|
|
|
@ -503,7 +503,7 @@ void setResSchemaInfo(SReqResultInfo* pResInfo, const SSchema* pSchema, int32_t
|
||||||
pResInfo->userFields[i].bytes = pSchema[i].bytes;
|
pResInfo->userFields[i].bytes = pSchema[i].bytes;
|
||||||
pResInfo->userFields[i].type = pSchema[i].type;
|
pResInfo->userFields[i].type = pSchema[i].type;
|
||||||
|
|
||||||
if (pSchema[i].type == TSDB_DATA_TYPE_VARCHAR || pSchema[i].type == TSDB_DATA_TYPE_GEOMETRY) {
|
if (pSchema[i].type == TSDB_DATA_TYPE_VARCHAR || pSchema[i].type == TSDB_DATA_TYPE_VARBINARY || pSchema[i].type == TSDB_DATA_TYPE_GEOMETRY) {
|
||||||
pResInfo->userFields[i].bytes -= VARSTR_HEADER_SIZE;
|
pResInfo->userFields[i].bytes -= VARSTR_HEADER_SIZE;
|
||||||
} else if (pSchema[i].type == TSDB_DATA_TYPE_NCHAR || pSchema[i].type == TSDB_DATA_TYPE_JSON) {
|
} else if (pSchema[i].type == TSDB_DATA_TYPE_NCHAR || pSchema[i].type == TSDB_DATA_TYPE_JSON) {
|
||||||
pResInfo->userFields[i].bytes = (pResInfo->userFields[i].bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
|
pResInfo->userFields[i].bytes = (pResInfo->userFields[i].bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
|
||||||
|
|
|
@ -580,6 +580,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchRowImp(JNIEn
|
||||||
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetDoubleFp, i, (jdouble)dv);
|
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetDoubleFp, i, (jdouble)dv);
|
||||||
} break;
|
} break;
|
||||||
case TSDB_DATA_TYPE_BINARY:
|
case TSDB_DATA_TYPE_BINARY:
|
||||||
|
case TSDB_DATA_TYPE_VARBINARY:
|
||||||
case TSDB_DATA_TYPE_GEOMETRY: {
|
case TSDB_DATA_TYPE_GEOMETRY: {
|
||||||
memcpy(tmp, row[i], length[i]); // handle the case that terminated does not exist
|
memcpy(tmp, row[i], length[i]); // handle the case that terminated does not exist
|
||||||
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetStringFp, i, (*env)->NewStringUTF(env, tmp));
|
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetStringFp, i, (*env)->NewStringUTF(env, tmp));
|
||||||
|
|
|
@ -388,10 +388,11 @@ int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields)
|
||||||
} break;
|
} break;
|
||||||
|
|
||||||
case TSDB_DATA_TYPE_BINARY:
|
case TSDB_DATA_TYPE_BINARY:
|
||||||
|
case TSDB_DATA_TYPE_VARBINARY:
|
||||||
case TSDB_DATA_TYPE_NCHAR:
|
case TSDB_DATA_TYPE_NCHAR:
|
||||||
case TSDB_DATA_TYPE_GEOMETRY: {
|
case TSDB_DATA_TYPE_GEOMETRY: {
|
||||||
int32_t charLen = varDataLen((char *)row[i] - VARSTR_HEADER_SIZE);
|
int32_t charLen = varDataLen((char *)row[i] - VARSTR_HEADER_SIZE);
|
||||||
if (fields[i].type == TSDB_DATA_TYPE_BINARY || fields[i].type == TSDB_DATA_TYPE_GEOMETRY) {
|
if (fields[i].type == TSDB_DATA_TYPE_BINARY || fields[i].type == TSDB_DATA_TYPE_VARBINARY || fields[i].type == TSDB_DATA_TYPE_GEOMETRY) {
|
||||||
if (ASSERT(charLen <= fields[i].bytes && charLen >= 0)) {
|
if (ASSERT(charLen <= fields[i].bytes && charLen >= 0)) {
|
||||||
tscError("taos_print_row error binary. charLen:%d, fields[i].bytes:%d", charLen, fields[i].bytes);
|
tscError("taos_print_row error binary. charLen:%d, fields[i].bytes:%d", charLen, fields[i].bytes);
|
||||||
}
|
}
|
||||||
|
|
|
@ -56,7 +56,7 @@ static char* buildCreateTableJson(SSchemaWrapper* schemaRow, SSchemaWrapper* sch
|
||||||
cJSON_AddItemToObject(column, "name", cname);
|
cJSON_AddItemToObject(column, "name", cname);
|
||||||
cJSON* ctype = cJSON_CreateNumber(s->type);
|
cJSON* ctype = cJSON_CreateNumber(s->type);
|
||||||
cJSON_AddItemToObject(column, "type", ctype);
|
cJSON_AddItemToObject(column, "type", ctype);
|
||||||
if (s->type == TSDB_DATA_TYPE_BINARY || s->type == TSDB_DATA_TYPE_GEOMETRY) {
|
if (s->type == TSDB_DATA_TYPE_BINARY || s->type == TSDB_DATA_TYPE_VARBINARY|| s->type == TSDB_DATA_TYPE_GEOMETRY) {
|
||||||
int32_t length = s->bytes - VARSTR_HEADER_SIZE;
|
int32_t length = s->bytes - VARSTR_HEADER_SIZE;
|
||||||
cJSON* cbytes = cJSON_CreateNumber(length);
|
cJSON* cbytes = cJSON_CreateNumber(length);
|
||||||
cJSON_AddItemToObject(column, "length", cbytes);
|
cJSON_AddItemToObject(column, "length", cbytes);
|
||||||
|
@ -77,7 +77,7 @@ static char* buildCreateTableJson(SSchemaWrapper* schemaRow, SSchemaWrapper* sch
|
||||||
cJSON_AddItemToObject(tag, "name", tname);
|
cJSON_AddItemToObject(tag, "name", tname);
|
||||||
cJSON* ttype = cJSON_CreateNumber(s->type);
|
cJSON* ttype = cJSON_CreateNumber(s->type);
|
||||||
cJSON_AddItemToObject(tag, "type", ttype);
|
cJSON_AddItemToObject(tag, "type", ttype);
|
||||||
if (s->type == TSDB_DATA_TYPE_BINARY || s->type == TSDB_DATA_TYPE_GEOMETRY) {
|
if (s->type == TSDB_DATA_TYPE_BINARY || s->type == TSDB_DATA_TYPE_VARBINARY || s->type == TSDB_DATA_TYPE_GEOMETRY) {
|
||||||
int32_t length = s->bytes - VARSTR_HEADER_SIZE;
|
int32_t length = s->bytes - VARSTR_HEADER_SIZE;
|
||||||
cJSON* cbytes = cJSON_CreateNumber(length);
|
cJSON* cbytes = cJSON_CreateNumber(length);
|
||||||
cJSON_AddItemToObject(tag, "length", cbytes);
|
cJSON_AddItemToObject(tag, "length", cbytes);
|
||||||
|
@ -130,7 +130,7 @@ static char* buildAlterSTableJson(void* alterData, int32_t alterDataLen) {
|
||||||
cJSON* colType = cJSON_CreateNumber(field->type);
|
cJSON* colType = cJSON_CreateNumber(field->type);
|
||||||
cJSON_AddItemToObject(json, "colType", colType);
|
cJSON_AddItemToObject(json, "colType", colType);
|
||||||
|
|
||||||
if (field->type == TSDB_DATA_TYPE_BINARY || field->type == TSDB_DATA_TYPE_GEOMETRY) {
|
if (field->type == TSDB_DATA_TYPE_BINARY || field->type == TSDB_DATA_TYPE_VARBINARY || field->type == TSDB_DATA_TYPE_GEOMETRY) {
|
||||||
int32_t length = field->bytes - VARSTR_HEADER_SIZE;
|
int32_t length = field->bytes - VARSTR_HEADER_SIZE;
|
||||||
cJSON* cbytes = cJSON_CreateNumber(length);
|
cJSON* cbytes = cJSON_CreateNumber(length);
|
||||||
cJSON_AddItemToObject(json, "colLength", cbytes);
|
cJSON_AddItemToObject(json, "colLength", cbytes);
|
||||||
|
@ -155,7 +155,7 @@ static char* buildAlterSTableJson(void* alterData, int32_t alterDataLen) {
|
||||||
cJSON_AddItemToObject(json, "colName", colName);
|
cJSON_AddItemToObject(json, "colName", colName);
|
||||||
cJSON* colType = cJSON_CreateNumber(field->type);
|
cJSON* colType = cJSON_CreateNumber(field->type);
|
||||||
cJSON_AddItemToObject(json, "colType", colType);
|
cJSON_AddItemToObject(json, "colType", colType);
|
||||||
if (field->type == TSDB_DATA_TYPE_BINARY || field->type == TSDB_DATA_TYPE_GEOMETRY) {
|
if (field->type == TSDB_DATA_TYPE_BINARY || field->type == TSDB_DATA_TYPE_VARBINARY || field->type == TSDB_DATA_TYPE_GEOMETRY) {
|
||||||
int32_t length = field->bytes - VARSTR_HEADER_SIZE;
|
int32_t length = field->bytes - VARSTR_HEADER_SIZE;
|
||||||
cJSON* cbytes = cJSON_CreateNumber(length);
|
cJSON* cbytes = cJSON_CreateNumber(length);
|
||||||
cJSON_AddItemToObject(json, "colLength", cbytes);
|
cJSON_AddItemToObject(json, "colLength", cbytes);
|
||||||
|
@ -292,7 +292,13 @@ static void buildChildElement(cJSON* json, SVCreateTbReq* pCreateReq) {
|
||||||
|
|
||||||
cJSON* tvalue = NULL;
|
cJSON* tvalue = NULL;
|
||||||
if (IS_VAR_DATA_TYPE(pTagVal->type)) {
|
if (IS_VAR_DATA_TYPE(pTagVal->type)) {
|
||||||
char* buf = taosMemoryCalloc(pTagVal->nData + 3, 1);
|
char* buf = NULL;
|
||||||
|
if(pTagVal->type == TSDB_DATA_TYPE_VARBINARY){
|
||||||
|
buf = taosMemoryCalloc(pTagVal->nData*2 + 2 + 3, 1);
|
||||||
|
}else{
|
||||||
|
buf = taosMemoryCalloc(pTagVal->nData + 3, 1);
|
||||||
|
}
|
||||||
|
|
||||||
if (!buf) goto end;
|
if (!buf) goto end;
|
||||||
dataConverToStr(buf, pTagVal->type, pTagVal->pData, pTagVal->nData, NULL);
|
dataConverToStr(buf, pTagVal->type, pTagVal->pData, pTagVal->nData, NULL);
|
||||||
tvalue = cJSON_CreateString(buf);
|
tvalue = cJSON_CreateString(buf);
|
||||||
|
@ -457,7 +463,7 @@ static char* processAlterTable(SMqMetaRsp* metaRsp) {
|
||||||
cJSON* colType = cJSON_CreateNumber(vAlterTbReq.type);
|
cJSON* colType = cJSON_CreateNumber(vAlterTbReq.type);
|
||||||
cJSON_AddItemToObject(json, "colType", colType);
|
cJSON_AddItemToObject(json, "colType", colType);
|
||||||
|
|
||||||
if (vAlterTbReq.type == TSDB_DATA_TYPE_BINARY || vAlterTbReq.type == TSDB_DATA_TYPE_GEOMETRY) {
|
if (vAlterTbReq.type == TSDB_DATA_TYPE_BINARY || vAlterTbReq.type == TSDB_DATA_TYPE_VARBINARY || vAlterTbReq.type == TSDB_DATA_TYPE_GEOMETRY) {
|
||||||
int32_t length = vAlterTbReq.bytes - VARSTR_HEADER_SIZE;
|
int32_t length = vAlterTbReq.bytes - VARSTR_HEADER_SIZE;
|
||||||
cJSON* cbytes = cJSON_CreateNumber(length);
|
cJSON* cbytes = cJSON_CreateNumber(length);
|
||||||
cJSON_AddItemToObject(json, "colLength", cbytes);
|
cJSON_AddItemToObject(json, "colLength", cbytes);
|
||||||
|
@ -478,7 +484,7 @@ static char* processAlterTable(SMqMetaRsp* metaRsp) {
|
||||||
cJSON_AddItemToObject(json, "colName", colName);
|
cJSON_AddItemToObject(json, "colName", colName);
|
||||||
cJSON* colType = cJSON_CreateNumber(vAlterTbReq.colModType);
|
cJSON* colType = cJSON_CreateNumber(vAlterTbReq.colModType);
|
||||||
cJSON_AddItemToObject(json, "colType", colType);
|
cJSON_AddItemToObject(json, "colType", colType);
|
||||||
if (vAlterTbReq.colModType == TSDB_DATA_TYPE_BINARY || vAlterTbReq.colModType == TSDB_DATA_TYPE_GEOMETRY) {
|
if (vAlterTbReq.colModType == TSDB_DATA_TYPE_BINARY || vAlterTbReq.colModType == TSDB_DATA_TYPE_VARBINARY || vAlterTbReq.colModType == TSDB_DATA_TYPE_GEOMETRY) {
|
||||||
int32_t length = vAlterTbReq.colModBytes - VARSTR_HEADER_SIZE;
|
int32_t length = vAlterTbReq.colModBytes - VARSTR_HEADER_SIZE;
|
||||||
cJSON* cbytes = cJSON_CreateNumber(length);
|
cJSON* cbytes = cJSON_CreateNumber(length);
|
||||||
cJSON_AddItemToObject(json, "colLength", cbytes);
|
cJSON_AddItemToObject(json, "colLength", cbytes);
|
||||||
|
@ -515,7 +521,11 @@ static char* processAlterTable(SMqMetaRsp* metaRsp) {
|
||||||
}
|
}
|
||||||
buf = parseTagDatatoJson(vAlterTbReq.pTagVal);
|
buf = parseTagDatatoJson(vAlterTbReq.pTagVal);
|
||||||
} else {
|
} else {
|
||||||
buf = taosMemoryCalloc(vAlterTbReq.nTagVal + 1, 1);
|
if(vAlterTbReq.tagType == TSDB_DATA_TYPE_VARBINARY){
|
||||||
|
buf = taosMemoryCalloc(vAlterTbReq.nTagVal + 1, 1);
|
||||||
|
}else{
|
||||||
|
buf = taosMemoryCalloc(vAlterTbReq.nTagVal + 1, 1);
|
||||||
|
}
|
||||||
dataConverToStr(buf, vAlterTbReq.tagType, vAlterTbReq.pTagVal, vAlterTbReq.nTagVal, NULL);
|
dataConverToStr(buf, vAlterTbReq.tagType, vAlterTbReq.pTagVal, vAlterTbReq.nTagVal, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -687,14 +697,14 @@ static int32_t taosCreateStb(TAOS* taos, void* meta, int32_t metaLen) {
|
||||||
pReq.pColumns = taosArrayInit(req.schemaRow.nCols, sizeof(SField));
|
pReq.pColumns = taosArrayInit(req.schemaRow.nCols, sizeof(SField));
|
||||||
for (int32_t i = 0; i < req.schemaRow.nCols; i++) {
|
for (int32_t i = 0; i < req.schemaRow.nCols; i++) {
|
||||||
SSchema* pSchema = req.schemaRow.pSchema + i;
|
SSchema* pSchema = req.schemaRow.pSchema + i;
|
||||||
SField field = {.type = pSchema->type, .bytes = pSchema->bytes};
|
SField field = {.type = pSchema->type, .flags = pSchema->flags, .bytes = pSchema->bytes};
|
||||||
strcpy(field.name, pSchema->name);
|
strcpy(field.name, pSchema->name);
|
||||||
taosArrayPush(pReq.pColumns, &field);
|
taosArrayPush(pReq.pColumns, &field);
|
||||||
}
|
}
|
||||||
pReq.pTags = taosArrayInit(req.schemaTag.nCols, sizeof(SField));
|
pReq.pTags = taosArrayInit(req.schemaTag.nCols, sizeof(SField));
|
||||||
for (int32_t i = 0; i < req.schemaTag.nCols; i++) {
|
for (int32_t i = 0; i < req.schemaTag.nCols; i++) {
|
||||||
SSchema* pSchema = req.schemaTag.pSchema + i;
|
SSchema* pSchema = req.schemaTag.pSchema + i;
|
||||||
SField field = {.type = pSchema->type, .bytes = pSchema->bytes};
|
SField field = {.type = pSchema->type, .flags = pSchema->flags, .bytes = pSchema->bytes};
|
||||||
strcpy(field.name, pSchema->name);
|
strcpy(field.name, pSchema->name);
|
||||||
taosArrayPush(pReq.pTags, &field);
|
taosArrayPush(pReq.pTags, &field);
|
||||||
}
|
}
|
||||||
|
|
|
@ -218,7 +218,16 @@ int32_t smlSetCTableName(SSmlTableInfo *oneTable) {
|
||||||
|
|
||||||
if (strlen(oneTable->childTableName) == 0) {
|
if (strlen(oneTable->childTableName) == 0) {
|
||||||
SArray *dst = taosArrayDup(oneTable->tags, NULL);
|
SArray *dst = taosArrayDup(oneTable->tags, NULL);
|
||||||
RandTableName rName = {dst, oneTable->sTableName, (uint8_t)oneTable->sTableNameLen, oneTable->childTableName};
|
ASSERT(oneTable->sTableNameLen < TSDB_TABLE_NAME_LEN);
|
||||||
|
char superName[TSDB_TABLE_NAME_LEN] = {0};
|
||||||
|
RandTableName rName = {dst, NULL, (uint8_t)oneTable->sTableNameLen, oneTable->childTableName};
|
||||||
|
if(tsSmlDot2Underline){
|
||||||
|
memcpy(superName, oneTable->sTableName, oneTable->sTableNameLen);
|
||||||
|
smlStrReplace(superName, oneTable->sTableNameLen);
|
||||||
|
rName.stbFullName = superName;
|
||||||
|
}else{
|
||||||
|
rName.stbFullName = oneTable->sTableName;
|
||||||
|
}
|
||||||
|
|
||||||
buildChildTableName(&rName);
|
buildChildTableName(&rName);
|
||||||
taosArrayDestroy(dst);
|
taosArrayDestroy(dst);
|
||||||
|
@ -230,6 +239,9 @@ void getTableUid(SSmlHandle *info, SSmlLineInfo *currElement, SSmlTableInfo *tin
|
||||||
char key[TSDB_TABLE_NAME_LEN * 2 + 1] = {0};
|
char key[TSDB_TABLE_NAME_LEN * 2 + 1] = {0};
|
||||||
size_t nLen = strlen(tinfo->childTableName);
|
size_t nLen = strlen(tinfo->childTableName);
|
||||||
memcpy(key, currElement->measure, currElement->measureLen);
|
memcpy(key, currElement->measure, currElement->measureLen);
|
||||||
|
if(tsSmlDot2Underline){
|
||||||
|
smlStrReplace(key, currElement->measureLen);
|
||||||
|
}
|
||||||
memcpy(key + currElement->measureLen + 1, tinfo->childTableName, nLen);
|
memcpy(key + currElement->measureLen + 1, tinfo->childTableName, nLen);
|
||||||
void *uid =
|
void *uid =
|
||||||
taosHashGet(info->tableUids, key,
|
taosHashGet(info->tableUids, key,
|
||||||
|
@ -596,7 +608,7 @@ static int32_t smlGenerateSchemaAction(SSchema *colField, SHashObj *colHash, SSm
|
||||||
return TSDB_CODE_SML_INVALID_DATA;
|
return TSDB_CODE_SML_INVALID_DATA;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (((colField[*index].type == TSDB_DATA_TYPE_VARCHAR || colField[*index].type == TSDB_DATA_TYPE_GEOMETRY) &&
|
if (((colField[*index].type == TSDB_DATA_TYPE_VARCHAR || colField[*index].type == TSDB_DATA_TYPE_VARBINARY || colField[*index].type == TSDB_DATA_TYPE_GEOMETRY) &&
|
||||||
(colField[*index].bytes - VARSTR_HEADER_SIZE) < kv->length) ||
|
(colField[*index].bytes - VARSTR_HEADER_SIZE) < kv->length) ||
|
||||||
(colField[*index].type == TSDB_DATA_TYPE_NCHAR &&
|
(colField[*index].type == TSDB_DATA_TYPE_NCHAR &&
|
||||||
((colField[*index].bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE < kv->length))) {
|
((colField[*index].bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE < kv->length))) {
|
||||||
|
@ -627,7 +639,7 @@ static int32_t smlFindNearestPowerOf2(int32_t length, uint8_t type) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_GEOMETRY) && result > TSDB_MAX_BINARY_LEN - VARSTR_HEADER_SIZE) {
|
if ((type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_VARBINARY || type == TSDB_DATA_TYPE_GEOMETRY) && result > TSDB_MAX_BINARY_LEN - VARSTR_HEADER_SIZE) {
|
||||||
result = TSDB_MAX_BINARY_LEN - VARSTR_HEADER_SIZE;
|
result = TSDB_MAX_BINARY_LEN - VARSTR_HEADER_SIZE;
|
||||||
} else if (type == TSDB_DATA_TYPE_NCHAR && result > (TSDB_MAX_NCHAR_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE) {
|
} else if (type == TSDB_DATA_TYPE_NCHAR && result > (TSDB_MAX_NCHAR_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE) {
|
||||||
result = (TSDB_MAX_NCHAR_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
|
result = (TSDB_MAX_NCHAR_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
|
||||||
|
@ -635,7 +647,7 @@ static int32_t smlFindNearestPowerOf2(int32_t length, uint8_t type) {
|
||||||
|
|
||||||
if (type == TSDB_DATA_TYPE_NCHAR) {
|
if (type == TSDB_DATA_TYPE_NCHAR) {
|
||||||
result = result * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE;
|
result = result * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE;
|
||||||
} else if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_GEOMETRY) {
|
} else if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_VARBINARY || type == TSDB_DATA_TYPE_GEOMETRY) {
|
||||||
result = result + VARSTR_HEADER_SIZE;
|
result = result + VARSTR_HEADER_SIZE;
|
||||||
}
|
}
|
||||||
return result;
|
return result;
|
||||||
|
@ -679,7 +691,7 @@ static int32_t smlCheckMeta(SSchema *schema, int32_t length, SArray *cols, bool
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t getBytes(uint8_t type, int32_t length) {
|
static int32_t getBytes(uint8_t type, int32_t length) {
|
||||||
if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR || type == TSDB_DATA_TYPE_GEOMETRY) {
|
if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_VARBINARY || type == TSDB_DATA_TYPE_NCHAR || type == TSDB_DATA_TYPE_GEOMETRY) {
|
||||||
return smlFindNearestPowerOf2(length, type);
|
return smlFindNearestPowerOf2(length, type);
|
||||||
} else {
|
} else {
|
||||||
return tDataTypes[type].bytes;
|
return tDataTypes[type].bytes;
|
||||||
|
@ -1178,21 +1190,12 @@ void smlDestroyTableInfo(void *para) {
|
||||||
taosMemoryFree(tag);
|
taosMemoryFree(tag);
|
||||||
}
|
}
|
||||||
|
|
||||||
void clearColValArray(SArray *pCols) {
|
|
||||||
int32_t num = taosArrayGetSize(pCols);
|
|
||||||
for (int32_t i = 0; i < num; ++i) {
|
|
||||||
SColVal *pCol = taosArrayGet(pCols, i);
|
|
||||||
if (TSDB_DATA_TYPE_NCHAR == pCol->type) {
|
|
||||||
taosMemoryFreeClear(pCol->value.pData);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void freeSSmlKv(void *data) {
|
void freeSSmlKv(void *data) {
|
||||||
SSmlKv *kv = (SSmlKv *)data;
|
SSmlKv *kv = (SSmlKv *)data;
|
||||||
if (kv->keyEscaped) taosMemoryFree((void *)(kv->key));
|
if (kv->keyEscaped) taosMemoryFreeClear(kv->key);
|
||||||
if (kv->valueEscaped) taosMemoryFree((void *)(kv->value));
|
if (kv->valueEscaped) taosMemoryFreeClear(kv->value);
|
||||||
if (kv->type == TSDB_DATA_TYPE_GEOMETRY) geosFreeBuffer((void *)(kv->value));
|
if (kv->type == TSDB_DATA_TYPE_GEOMETRY) geosFreeBuffer((void *)(kv->value));
|
||||||
|
if (kv->type == TSDB_DATA_TYPE_VARBINARY) taosMemoryFreeClear(kv->value);
|
||||||
}
|
}
|
||||||
|
|
||||||
void smlDestroyInfo(SSmlHandle *info) {
|
void smlDestroyInfo(SSmlHandle *info) {
|
||||||
|
|
|
@ -569,6 +569,8 @@ static int32_t smlConvertJSONNumber(SSmlKv *pVal, char *typeStr, cJSON *value) {
|
||||||
static int32_t smlConvertJSONString(SSmlKv *pVal, char *typeStr, cJSON *value) {
|
static int32_t smlConvertJSONString(SSmlKv *pVal, char *typeStr, cJSON *value) {
|
||||||
if (strcasecmp(typeStr, "binary") == 0) {
|
if (strcasecmp(typeStr, "binary") == 0) {
|
||||||
pVal->type = TSDB_DATA_TYPE_BINARY;
|
pVal->type = TSDB_DATA_TYPE_BINARY;
|
||||||
|
} else if (strcasecmp(typeStr, "varbinary") == 0) {
|
||||||
|
pVal->type = TSDB_DATA_TYPE_VARBINARY;
|
||||||
} else if (strcasecmp(typeStr, "nchar") == 0) {
|
} else if (strcasecmp(typeStr, "nchar") == 0) {
|
||||||
pVal->type = TSDB_DATA_TYPE_NCHAR;
|
pVal->type = TSDB_DATA_TYPE_NCHAR;
|
||||||
} else {
|
} else {
|
||||||
|
@ -577,7 +579,7 @@ static int32_t smlConvertJSONString(SSmlKv *pVal, char *typeStr, cJSON *value) {
|
||||||
}
|
}
|
||||||
pVal->length = strlen(value->valuestring);
|
pVal->length = strlen(value->valuestring);
|
||||||
|
|
||||||
if (pVal->type == TSDB_DATA_TYPE_BINARY && pVal->length > TSDB_MAX_BINARY_LEN - VARSTR_HEADER_SIZE) {
|
if ((pVal->type == TSDB_DATA_TYPE_BINARY || pVal->type == TSDB_DATA_TYPE_VARBINARY) && pVal->length > TSDB_MAX_BINARY_LEN - VARSTR_HEADER_SIZE) {
|
||||||
return TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN;
|
return TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN;
|
||||||
}
|
}
|
||||||
if (pVal->type == TSDB_DATA_TYPE_NCHAR &&
|
if (pVal->type == TSDB_DATA_TYPE_NCHAR &&
|
||||||
|
@ -1010,7 +1012,7 @@ static int32_t smlParseJSONStringExt(SSmlHandle *info, cJSON *root, SSmlLineInfo
|
||||||
if (ret == TSDB_CODE_SUCCESS) {
|
if (ret == TSDB_CODE_SUCCESS) {
|
||||||
ret = smlBuildRow(info->currTableDataCtx);
|
ret = smlBuildRow(info->currTableDataCtx);
|
||||||
}
|
}
|
||||||
clearColValArray(info->currTableDataCtx->pValues);
|
clearColValArraySml(info->currTableDataCtx->pValues);
|
||||||
if (unlikely(ret != TSDB_CODE_SUCCESS)) {
|
if (unlikely(ret != TSDB_CODE_SUCCESS)) {
|
||||||
smlBuildInvalidDataMsg(&info->msgBuf, "smlBuildCol error", NULL);
|
smlBuildInvalidDataMsg(&info->msgBuf, "smlBuildCol error", NULL);
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -1214,7 +1216,7 @@ static int32_t smlParseJSONString(SSmlHandle *info, char **start, SSmlLineInfo *
|
||||||
if (ret == TSDB_CODE_SUCCESS) {
|
if (ret == TSDB_CODE_SUCCESS) {
|
||||||
ret = smlBuildRow(info->currTableDataCtx);
|
ret = smlBuildRow(info->currTableDataCtx);
|
||||||
}
|
}
|
||||||
clearColValArray(info->currTableDataCtx->pValues);
|
clearColValArraySml(info->currTableDataCtx->pValues);
|
||||||
if (unlikely(ret != TSDB_CODE_SUCCESS)) {
|
if (unlikely(ret != TSDB_CODE_SUCCESS)) {
|
||||||
smlBuildInvalidDataMsg(&info->msgBuf, "smlBuildCol error", NULL);
|
smlBuildInvalidDataMsg(&info->msgBuf, "smlBuildCol error", NULL);
|
||||||
return ret;
|
return ret;
|
||||||
|
|
|
@ -109,7 +109,7 @@ int32_t smlParseValue(SSmlKv *pVal, SSmlMsgBuf *msg) {
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
char* tmp = taosMemoryCalloc(pVal->length, 1);
|
char* tmp = taosMemoryCalloc(pVal->length, 1);
|
||||||
memcpy(tmp, pVal->value + 2, pVal->length - 3);
|
memcpy(tmp, pVal->value + NCHAR_ADD_LEN - 1, pVal->length - NCHAR_ADD_LEN);
|
||||||
code = doGeomFromText(tmp, (unsigned char **)&pVal->value, &pVal->length);
|
code = doGeomFromText(tmp, (unsigned char **)&pVal->value, &pVal->length);
|
||||||
taosMemoryFree(tmp);
|
taosMemoryFree(tmp);
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
@ -126,6 +126,44 @@ int32_t smlParseValue(SSmlKv *pVal, SSmlMsgBuf *msg) {
|
||||||
return TSDB_CODE_TSC_INVALID_VALUE;
|
return TSDB_CODE_TSC_INVALID_VALUE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (pVal->value[0] == 'b' || pVal->value[0] == 'B') { // varbinary
|
||||||
|
if (pVal->value[1] == '"' && pVal->value[pVal->length - 1] == '"' && pVal->length >= 3) {
|
||||||
|
pVal->type = TSDB_DATA_TYPE_VARBINARY;
|
||||||
|
if(isHex(pVal->value + NCHAR_ADD_LEN - 1, pVal->length - NCHAR_ADD_LEN)){
|
||||||
|
if(!isValidateHex(pVal->value + NCHAR_ADD_LEN - 1, pVal->length - NCHAR_ADD_LEN)){
|
||||||
|
return TSDB_CODE_PAR_INVALID_VARBINARY;
|
||||||
|
}
|
||||||
|
|
||||||
|
void* data = NULL;
|
||||||
|
uint32_t size = 0;
|
||||||
|
if(taosHex2Ascii(pVal->value + NCHAR_ADD_LEN - 1, pVal->length - NCHAR_ADD_LEN, &data, &size) < 0){
|
||||||
|
return TSDB_CODE_OUT_OF_MEMORY;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (size + VARSTR_HEADER_SIZE > TSDB_MAX_VARBINARY_LEN) {
|
||||||
|
taosMemoryFree(data);
|
||||||
|
return TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN;
|
||||||
|
}
|
||||||
|
pVal->value = data;
|
||||||
|
pVal->length = size;
|
||||||
|
}else{
|
||||||
|
pVal->length -= NCHAR_ADD_LEN;
|
||||||
|
if (pVal->length > TSDB_MAX_VARBINARY_LEN - VARSTR_HEADER_SIZE) {
|
||||||
|
return TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN;
|
||||||
|
}
|
||||||
|
void *data = taosMemoryMalloc(pVal->length);
|
||||||
|
if(data == NULL){
|
||||||
|
return TSDB_CODE_OUT_OF_MEMORY;
|
||||||
|
}
|
||||||
|
memcpy(data, pVal->value + (NCHAR_ADD_LEN - 1), pVal->length);
|
||||||
|
pVal->value = data;
|
||||||
|
}
|
||||||
|
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
return TSDB_CODE_TSC_INVALID_VALUE;
|
||||||
|
}
|
||||||
|
|
||||||
if (pVal->value[0] == 't' || pVal->value[0] == 'T') {
|
if (pVal->value[0] == 't' || pVal->value[0] == 'T') {
|
||||||
if (pVal->length == 1 ||
|
if (pVal->length == 1 ||
|
||||||
(pVal->length == 4 && (pVal->value[1] == 'r' || pVal->value[1] == 'R') &&
|
(pVal->length == 4 && (pVal->value[1] == 'r' || pVal->value[1] == 'R') &&
|
||||||
|
@ -414,7 +452,7 @@ static int32_t smlParseColKv(SSmlHandle *info, char **sql, char *sqlEnd, SSmlLin
|
||||||
SSmlKv kv = {.key = tag->name, .keyLen = strlen(tag->name), .type = tag->type};
|
SSmlKv kv = {.key = tag->name, .keyLen = strlen(tag->name), .type = tag->type};
|
||||||
if (tag->type == TSDB_DATA_TYPE_NCHAR) {
|
if (tag->type == TSDB_DATA_TYPE_NCHAR) {
|
||||||
kv.length = (tag->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
|
kv.length = (tag->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
|
||||||
} else if (tag->type == TSDB_DATA_TYPE_BINARY || tag->type == TSDB_DATA_TYPE_GEOMETRY) {
|
} else if (tag->type == TSDB_DATA_TYPE_BINARY || tag->type == TSDB_DATA_TYPE_GEOMETRY || tag->type == TSDB_DATA_TYPE_VARBINARY) {
|
||||||
kv.length = tag->bytes - VARSTR_HEADER_SIZE;
|
kv.length = tag->bytes - VARSTR_HEADER_SIZE;
|
||||||
}
|
}
|
||||||
taosArrayPush((*tmp)->cols, &kv);
|
taosArrayPush((*tmp)->cols, &kv);
|
||||||
|
@ -515,6 +553,10 @@ static int32_t smlParseColKv(SSmlHandle *info, char **sql, char *sqlEnd, SSmlLin
|
||||||
char *tmp = (char *)taosMemoryMalloc(kv.length);
|
char *tmp = (char *)taosMemoryMalloc(kv.length);
|
||||||
memcpy(tmp, kv.value, kv.length);
|
memcpy(tmp, kv.value, kv.length);
|
||||||
PROCESS_SLASH_IN_FIELD_VALUE(tmp, kv.length);
|
PROCESS_SLASH_IN_FIELD_VALUE(tmp, kv.length);
|
||||||
|
ASSERT(kv.type != TSDB_DATA_TYPE_GEOMETRY);
|
||||||
|
if(kv.type == TSDB_DATA_TYPE_VARBINARY){
|
||||||
|
taosMemoryFree((void*)kv.value);
|
||||||
|
}
|
||||||
kv.value = tmp;
|
kv.value = tmp;
|
||||||
kv.valueEscaped = valueEscaped;
|
kv.valueEscaped = valueEscaped;
|
||||||
}
|
}
|
||||||
|
@ -691,7 +733,7 @@ int32_t smlParseInfluxString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLine
|
||||||
ret = smlBuildRow(info->currTableDataCtx);
|
ret = smlBuildRow(info->currTableDataCtx);
|
||||||
}
|
}
|
||||||
|
|
||||||
clearColValArray(info->currTableDataCtx->pValues);
|
clearColValArraySml(info->currTableDataCtx->pValues);
|
||||||
if (unlikely(ret != TSDB_CODE_SUCCESS)) {
|
if (unlikely(ret != TSDB_CODE_SUCCESS)) {
|
||||||
smlBuildInvalidDataMsg(&info->msgBuf, "smlBuildCol error", NULL);
|
smlBuildInvalidDataMsg(&info->msgBuf, "smlBuildCol error", NULL);
|
||||||
return ret;
|
return ret;
|
||||||
|
|
|
@ -307,7 +307,7 @@ int32_t smlParseTelnetString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLine
|
||||||
if (ret == TSDB_CODE_SUCCESS) {
|
if (ret == TSDB_CODE_SUCCESS) {
|
||||||
ret = smlBuildRow(info->currTableDataCtx);
|
ret = smlBuildRow(info->currTableDataCtx);
|
||||||
}
|
}
|
||||||
clearColValArray(info->currTableDataCtx->pValues);
|
clearColValArraySml(info->currTableDataCtx->pValues);
|
||||||
if (unlikely(ret != TSDB_CODE_SUCCESS)) {
|
if (unlikely(ret != TSDB_CODE_SUCCESS)) {
|
||||||
smlBuildInvalidDataMsg(&info->msgBuf, "smlBuildCol error", NULL);
|
smlBuildInvalidDataMsg(&info->msgBuf, "smlBuildCol error", NULL);
|
||||||
return ret;
|
return ret;
|
||||||
|
|
|
@ -985,6 +985,10 @@ int32_t tmq_subscription(tmq_t* tmq, tmq_list_t** topics) {
|
||||||
|
|
||||||
int32_t tmq_unsubscribe(tmq_t* tmq) {
|
int32_t tmq_unsubscribe(tmq_t* tmq) {
|
||||||
if(tmq == NULL) return TSDB_CODE_INVALID_PARA;
|
if(tmq == NULL) return TSDB_CODE_INVALID_PARA;
|
||||||
|
if (tmq->status != TMQ_CONSUMER_STATUS__READY) {
|
||||||
|
tscInfo("consumer:0x%" PRIx64 " not in ready state, unsubscribe it directly", tmq->consumerId);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
if (tmq->autoCommit) {
|
if (tmq->autoCommit) {
|
||||||
int32_t rsp = tmq_commit_sync(tmq, NULL);
|
int32_t rsp = tmq_commit_sync(tmq, NULL);
|
||||||
if (rsp != 0) {
|
if (rsp != 0) {
|
||||||
|
|
|
@ -291,6 +291,39 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_tmqCommitSync(JNI
|
||||||
TAOS_RES *res = (TAOS_RES *)jres;
|
TAOS_RES *res = (TAOS_RES *)jres;
|
||||||
return tmq_commit_sync(tmq, res);
|
return tmq_commit_sync(tmq, res);
|
||||||
}
|
}
|
||||||
|
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_tmqCommitAllSync(JNIEnv *env, jobject jobj, jlong jtmq) {
|
||||||
|
tmq_t *tmq = (tmq_t *)jtmq;
|
||||||
|
if (tmq == NULL) {
|
||||||
|
jniError("jobj:%p, tmq is closed", jobj);
|
||||||
|
return TMQ_CONSUMER_NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
return tmq_commit_sync(tmq, NULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_tmqCommitOffsetSyncImp(JNIEnv *env, jobject jobj,
|
||||||
|
jlong jtmq, jstring jtopic,
|
||||||
|
jint vgId, jlong offset) {
|
||||||
|
tmq_t *tmq = (tmq_t *)jtmq;
|
||||||
|
if (tmq == NULL) {
|
||||||
|
jniDebug("jobj:%p, tmq is closed", jobj);
|
||||||
|
return TMQ_CONSUMER_NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (jtopic == NULL) {
|
||||||
|
jniDebug("jobj:%p, topic is null", jobj);
|
||||||
|
return TMQ_TOPIC_NULL;
|
||||||
|
}
|
||||||
|
const char *topicName = (*env)->GetStringUTFChars(env, jtopic, NULL);
|
||||||
|
|
||||||
|
int code = tmq_commit_offset_sync(tmq, topicName, vgId, offset);
|
||||||
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
jniError("jobj:%p, tmq commit offset error, code:%d, msg:%s", jobj, code, tmq_err2str(code));
|
||||||
|
}
|
||||||
|
|
||||||
|
(*env)->ReleaseStringUTFChars(env, jtopic, topicName);
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
|
||||||
// deprecated
|
// deprecated
|
||||||
JNIEXPORT void JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_tmqCommitAsync(JNIEnv *env, jobject jobj, jlong jtmq,
|
JNIEXPORT void JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_tmqCommitAsync(JNIEnv *env, jobject jobj, jlong jtmq,
|
||||||
|
@ -319,6 +352,27 @@ JNIEXPORT void JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_consumerCommitAsy
|
||||||
tmq_commit_async(tmq, res, consumer_callback, offset);
|
tmq_commit_async(tmq, res, consumer_callback, offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
JNIEXPORT void JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_consumerCommitAllAsync(JNIEnv *env, jobject jobj,
|
||||||
|
jlong jtmq, jobject offset) {
|
||||||
|
tmqGlobalMethod(env);
|
||||||
|
tmq_t *tmq = (tmq_t *)jtmq;
|
||||||
|
|
||||||
|
offset = (*env)->NewGlobalRef(env, offset);
|
||||||
|
tmq_commit_async(tmq, NULL, consumer_callback, offset);
|
||||||
|
}
|
||||||
|
|
||||||
|
JNIEXPORT void JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_consumerCommitOffsetAsync(JNIEnv *env, jobject jobj,
|
||||||
|
jlong jtmq, jstring jtopic,
|
||||||
|
jint vgId, jlong offset,
|
||||||
|
jobject callback) {
|
||||||
|
tmqGlobalMethod(env);
|
||||||
|
tmq_t *tmq = (tmq_t *)jtmq;
|
||||||
|
const char *topicName = (*env)->GetStringUTFChars(env, jtopic, NULL);
|
||||||
|
|
||||||
|
callback = (*env)->NewGlobalRef(env, callback);
|
||||||
|
tmq_commit_offset_async(tmq, topicName, vgId, offset, consumer_callback, callback);
|
||||||
|
}
|
||||||
|
|
||||||
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_tmqUnsubscribeImp(JNIEnv *env, jobject jobj,
|
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_tmqUnsubscribeImp(JNIEnv *env, jobject jobj,
|
||||||
jlong jtmq) {
|
jlong jtmq) {
|
||||||
tmq_t *tmq = (tmq_t *)jtmq;
|
tmq_t *tmq = (tmq_t *)jtmq;
|
||||||
|
@ -497,9 +551,9 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_tmqGetTopicAssign
|
||||||
int32_t res = tmq_get_topic_assignment(tmq, topicName, &pAssign, &numOfAssignment);
|
int32_t res = tmq_get_topic_assignment(tmq, topicName, &pAssign, &numOfAssignment);
|
||||||
|
|
||||||
if (res != TSDB_CODE_SUCCESS) {
|
if (res != TSDB_CODE_SUCCESS) {
|
||||||
(*env)->ReleaseStringUTFChars(env, jtopic, topicName);
|
|
||||||
jniError("jobj:%p, tmq get topic assignment error, topic:%s, code:%d, msg:%s", jobj, topicName, res,
|
jniError("jobj:%p, tmq get topic assignment error, topic:%s, code:%d, msg:%s", jobj, topicName, res,
|
||||||
tmq_err2str(res));
|
tmq_err2str(res));
|
||||||
|
(*env)->ReleaseStringUTFChars(env, jtopic, topicName);
|
||||||
tmq_free_assignment(pAssign);
|
tmq_free_assignment(pAssign);
|
||||||
return (jint)res;
|
return (jint)res;
|
||||||
}
|
}
|
||||||
|
@ -518,3 +572,55 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_tmqGetTopicAssign
|
||||||
tmq_free_assignment(pAssign);
|
tmq_free_assignment(pAssign);
|
||||||
return JNI_SUCCESS;
|
return JNI_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_tmqCommittedImp(JNIEnv *env, jobject jobj, jlong jtmq,
|
||||||
|
jstring jtopic, jint vgId) {
|
||||||
|
tmq_t *tmq = (tmq_t *)jtmq;
|
||||||
|
if (tmq == NULL) {
|
||||||
|
jniDebug("jobj:%p, tmq is closed", jobj);
|
||||||
|
return TMQ_CONSUMER_NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (jtopic == NULL) {
|
||||||
|
jniDebug("jobj:%p, topic is null", jobj);
|
||||||
|
return TMQ_TOPIC_NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
const char *topicName = (*env)->GetStringUTFChars(env, jtopic, NULL);
|
||||||
|
|
||||||
|
int64_t offset = tmq_committed(tmq, topicName, vgId);
|
||||||
|
|
||||||
|
if (offset < JNI_SUCCESS && offset != -2147467247) {
|
||||||
|
jniError("jobj:%p, tmq get committed offset error, topic:%s, vgId:%d, code:0x%" PRIx64 ", msg:%s", jobj, topicName,
|
||||||
|
vgId, offset, tmq_err2str(offset));
|
||||||
|
}
|
||||||
|
|
||||||
|
(*env)->ReleaseStringUTFChars(env, jtopic, topicName);
|
||||||
|
return (jlong)offset;
|
||||||
|
}
|
||||||
|
|
||||||
|
JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_tmqPositionImp(JNIEnv *env, jobject jobj, jlong jtmq,
|
||||||
|
jstring jtopic, jint vgId) {
|
||||||
|
tmq_t *tmq = (tmq_t *)jtmq;
|
||||||
|
if (tmq == NULL) {
|
||||||
|
jniDebug("jobj:%p, tmq is closed", jobj);
|
||||||
|
return TMQ_CONSUMER_NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (jtopic == NULL) {
|
||||||
|
jniDebug("jobj:%p, topic is null", jobj);
|
||||||
|
return TMQ_TOPIC_NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
const char *topicName = (*env)->GetStringUTFChars(env, jtopic, NULL);
|
||||||
|
|
||||||
|
int64_t offset = tmq_position(tmq, topicName, vgId);
|
||||||
|
|
||||||
|
if (offset < JNI_SUCCESS) {
|
||||||
|
jniError("jobj:%p, tmq get position error, topic:%s, vgId:%d, code:0x%" PRIx64 ", msg:%s", jobj, topicName, vgId,
|
||||||
|
offset, tmq_err2str(offset));
|
||||||
|
}
|
||||||
|
|
||||||
|
(*env)->ReleaseStringUTFChars(env, jtopic, topicName);
|
||||||
|
return (jlong)offset;
|
||||||
|
}
|
|
@ -1407,8 +1407,9 @@ SSDataBlock* blockCopyOneRow(const SSDataBlock* pDataBlock, int32_t rowIdx) {
|
||||||
for (int32_t i = 0; i < numOfCols; ++i) {
|
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||||
SColumnInfoData* pDst = taosArrayGet(pBlock->pDataBlock, i);
|
SColumnInfoData* pDst = taosArrayGet(pBlock->pDataBlock, i);
|
||||||
SColumnInfoData* pSrc = taosArrayGet(pDataBlock->pDataBlock, i);
|
SColumnInfoData* pSrc = taosArrayGet(pDataBlock->pDataBlock, i);
|
||||||
void* pData = colDataGetData(pSrc, rowIdx);
|
|
||||||
bool isNull = colDataIsNull(pSrc, pDataBlock->info.rows, rowIdx, NULL);
|
bool isNull = colDataIsNull(pSrc, pDataBlock->info.rows, rowIdx, NULL);
|
||||||
|
void* pData = NULL;
|
||||||
|
if (!isNull) pData = colDataGetData(pSrc, rowIdx);
|
||||||
colDataSetVal(pDst, 0, pData, isNull);
|
colDataSetVal(pDst, 0, pData, isNull);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1919,6 +1920,7 @@ char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** pDataBuf,
|
||||||
if (len >= size - 1) return dumpBuf;
|
if (len >= size - 1) return dumpBuf;
|
||||||
break;
|
break;
|
||||||
case TSDB_DATA_TYPE_VARCHAR:
|
case TSDB_DATA_TYPE_VARCHAR:
|
||||||
|
case TSDB_DATA_TYPE_VARBINARY:
|
||||||
case TSDB_DATA_TYPE_GEOMETRY: {
|
case TSDB_DATA_TYPE_GEOMETRY: {
|
||||||
memset(pBuf, 0, sizeof(pBuf));
|
memset(pBuf, 0, sizeof(pBuf));
|
||||||
char* pData = colDataGetVarData(pColInfoData, j);
|
char* pData = colDataGetVarData(pColInfoData, j);
|
||||||
|
@ -2018,6 +2020,7 @@ int32_t buildSubmitReqFromDataBlock(SSubmitReq2** ppReq, const SSDataBlock* pDat
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case TSDB_DATA_TYPE_NCHAR:
|
case TSDB_DATA_TYPE_NCHAR:
|
||||||
|
case TSDB_DATA_TYPE_VARBINARY:
|
||||||
case TSDB_DATA_TYPE_VARCHAR: { // TSDB_DATA_TYPE_BINARY
|
case TSDB_DATA_TYPE_VARCHAR: { // TSDB_DATA_TYPE_BINARY
|
||||||
ASSERT(pColInfoData->info.type == pCol->type);
|
ASSERT(pColInfoData->info.type == pCol->type);
|
||||||
if (colDataIsNull_s(pColInfoData, j)) {
|
if (colDataIsNull_s(pColInfoData, j)) {
|
||||||
|
@ -2031,7 +2034,6 @@ int32_t buildSubmitReqFromDataBlock(SSubmitReq2** ppReq, const SSDataBlock* pDat
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case TSDB_DATA_TYPE_VARBINARY:
|
|
||||||
case TSDB_DATA_TYPE_DECIMAL:
|
case TSDB_DATA_TYPE_DECIMAL:
|
||||||
case TSDB_DATA_TYPE_BLOB:
|
case TSDB_DATA_TYPE_BLOB:
|
||||||
case TSDB_DATA_TYPE_JSON:
|
case TSDB_DATA_TYPE_JSON:
|
||||||
|
|
|
@ -1146,6 +1146,7 @@ static int tTagValJsonCmprFn(const void *p1, const void *p2) {
|
||||||
|
|
||||||
static void debugPrintTagVal(int8_t type, const void *val, int32_t vlen, const char *tag, int32_t ln) {
|
static void debugPrintTagVal(int8_t type, const void *val, int32_t vlen, const char *tag, int32_t ln) {
|
||||||
switch (type) {
|
switch (type) {
|
||||||
|
case TSDB_DATA_TYPE_VARBINARY:
|
||||||
case TSDB_DATA_TYPE_JSON:
|
case TSDB_DATA_TYPE_JSON:
|
||||||
case TSDB_DATA_TYPE_VARCHAR:
|
case TSDB_DATA_TYPE_VARCHAR:
|
||||||
case TSDB_DATA_TYPE_NCHAR:
|
case TSDB_DATA_TYPE_NCHAR:
|
||||||
|
|
|
@ -95,6 +95,11 @@ uint16_t tsMonitorPort = 6043;
|
||||||
int32_t tsMonitorMaxLogs = 100;
|
int32_t tsMonitorMaxLogs = 100;
|
||||||
bool tsMonitorComp = false;
|
bool tsMonitorComp = false;
|
||||||
|
|
||||||
|
// audit
|
||||||
|
bool tsEnableAudit = false;
|
||||||
|
char tsAuditFqdn[TSDB_FQDN_LEN] = {0};
|
||||||
|
uint16_t tsAuditPort = 6043;
|
||||||
|
|
||||||
// telem
|
// telem
|
||||||
bool tsEnableTelem = true;
|
bool tsEnableTelem = true;
|
||||||
int32_t tsTelemInterval = 43200;
|
int32_t tsTelemInterval = 43200;
|
||||||
|
@ -124,7 +129,6 @@ int32_t tsQueryRspPolicy = 0;
|
||||||
int64_t tsQueryMaxConcurrentTables = 200; // unit is TSDB_TABLE_NUM_UNIT
|
int64_t tsQueryMaxConcurrentTables = 200; // unit is TSDB_TABLE_NUM_UNIT
|
||||||
bool tsEnableQueryHb = true;
|
bool tsEnableQueryHb = true;
|
||||||
bool tsEnableScience = false; // on taos-cli show float and doulbe with scientific notation if true
|
bool tsEnableScience = false; // on taos-cli show float and doulbe with scientific notation if true
|
||||||
bool tsTtlChangeOnWrite = false; // ttl delete time changes on last write if true
|
|
||||||
int32_t tsQuerySmaOptimize = 0;
|
int32_t tsQuerySmaOptimize = 0;
|
||||||
int32_t tsQueryRsmaTolerance = 1000; // the tolerance time (ms) to judge from which level to query rsma data.
|
int32_t tsQueryRsmaTolerance = 1000; // the tolerance time (ms) to judge from which level to query rsma data.
|
||||||
bool tsQueryPlannerTrace = false;
|
bool tsQueryPlannerTrace = false;
|
||||||
|
@ -186,6 +190,7 @@ int32_t tsCacheLazyLoadThreshold = 500;
|
||||||
|
|
||||||
int32_t tsDiskCfgNum = 0;
|
int32_t tsDiskCfgNum = 0;
|
||||||
SDiskCfg tsDiskCfg[TFS_MAX_DISKS] = {0};
|
SDiskCfg tsDiskCfg[TFS_MAX_DISKS] = {0};
|
||||||
|
int64_t tsMinDiskFreeSize = TFS_MIN_DISK_FREE_SIZE;
|
||||||
|
|
||||||
// stream scheduler
|
// stream scheduler
|
||||||
bool tsDeployOnSnode = true;
|
bool tsDeployOnSnode = true;
|
||||||
|
@ -225,12 +230,20 @@ bool tsStartUdfd = true;
|
||||||
// wal
|
// wal
|
||||||
int64_t tsWalFsyncDataSizeLimit = (100 * 1024 * 1024L);
|
int64_t tsWalFsyncDataSizeLimit = (100 * 1024 * 1024L);
|
||||||
|
|
||||||
|
// ttl
|
||||||
|
bool tsTtlChangeOnWrite = false; // if true, ttl delete time changes on last write
|
||||||
|
int32_t tsTtlFlushThreshold = 100; /* maximum number of dirty items in memory.
|
||||||
|
* if -1, flush will not be triggered by write-ops
|
||||||
|
*/
|
||||||
|
int32_t tsTtlBatchDropNum = 10000; // number of tables dropped per batch
|
||||||
|
|
||||||
// internal
|
// internal
|
||||||
int32_t tsTransPullupInterval = 2;
|
int32_t tsTransPullupInterval = 2;
|
||||||
int32_t tsMqRebalanceInterval = 2;
|
int32_t tsMqRebalanceInterval = 2;
|
||||||
int32_t tsStreamCheckpointTickInterval = 1;
|
int32_t tsStreamCheckpointTickInterval = 1;
|
||||||
int32_t tsTtlUnit = 86400;
|
int32_t tsTtlUnit = 86400;
|
||||||
int32_t tsTtlPushInterval = 3600;
|
int32_t tsTtlPushIntervalSec = 10;
|
||||||
|
int32_t tsTrimVDbIntervalSec = 60 * 60; // interval of trimming db in all vgroups
|
||||||
int32_t tsGrantHBInterval = 60;
|
int32_t tsGrantHBInterval = 60;
|
||||||
int32_t tsUptimeInterval = 300; // seconds
|
int32_t tsUptimeInterval = 300; // seconds
|
||||||
char tsUdfdResFuncs[512] = ""; // udfd resident funcs that teardown when udfd exits
|
char tsUdfdResFuncs[512] = ""; // udfd resident funcs that teardown when udfd exits
|
||||||
|
@ -239,8 +252,8 @@ bool tsDisableStream = false;
|
||||||
int64_t tsStreamBufferSize = 128 * 1024 * 1024;
|
int64_t tsStreamBufferSize = 128 * 1024 * 1024;
|
||||||
int64_t tsCheckpointInterval = 3 * 60 * 60 * 1000;
|
int64_t tsCheckpointInterval = 3 * 60 * 60 * 1000;
|
||||||
bool tsFilterScalarMode = false;
|
bool tsFilterScalarMode = false;
|
||||||
int32_t tsKeepTimeOffset = 0; // latency of data migration
|
int32_t tsKeepTimeOffset = 0; // latency of data migration
|
||||||
int tsResolveFQDNRetryTime = 100; //seconds
|
int tsResolveFQDNRetryTime = 100; // seconds
|
||||||
|
|
||||||
char tsS3Endpoint[TSDB_FQDN_LEN] = "<endpoint>";
|
char tsS3Endpoint[TSDB_FQDN_LEN] = "<endpoint>";
|
||||||
char tsS3AccessKey[TSDB_FQDN_LEN] = "<accesskey>";
|
char tsS3AccessKey[TSDB_FQDN_LEN] = "<accesskey>";
|
||||||
|
@ -305,9 +318,7 @@ int32_t taosSetS3Cfg(SConfig *pCfg) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct SConfig *taosGetCfg() {
|
struct SConfig *taosGetCfg() { return tsCfg; }
|
||||||
return tsCfg;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int32_t taosLoadCfg(SConfig *pCfg, const char **envCmd, const char *inputCfgDir, const char *envFile,
|
static int32_t taosLoadCfg(SConfig *pCfg, const char **envCmd, const char *inputCfgDir, const char *envFile,
|
||||||
char *apolloUrl) {
|
char *apolloUrl) {
|
||||||
|
@ -594,6 +605,10 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
|
||||||
if (cfgAddInt32(pCfg, "monitorMaxLogs", tsMonitorMaxLogs, 1, 1000000, CFG_SCOPE_SERVER) != 0) return -1;
|
if (cfgAddInt32(pCfg, "monitorMaxLogs", tsMonitorMaxLogs, 1, 1000000, CFG_SCOPE_SERVER) != 0) return -1;
|
||||||
if (cfgAddBool(pCfg, "monitorComp", tsMonitorComp, CFG_SCOPE_SERVER) != 0) return -1;
|
if (cfgAddBool(pCfg, "monitorComp", tsMonitorComp, CFG_SCOPE_SERVER) != 0) return -1;
|
||||||
|
|
||||||
|
if (cfgAddBool(pCfg, "audit", tsEnableAudit, CFG_SCOPE_SERVER) != 0) return -1;
|
||||||
|
if (cfgAddString(pCfg, "auditFqdn", tsAuditFqdn, CFG_SCOPE_SERVER) != 0) return -1;
|
||||||
|
if (cfgAddInt32(pCfg, "auditPort", tsAuditPort, 1, 65056, CFG_SCOPE_SERVER) != 0) return -1;
|
||||||
|
|
||||||
if (cfgAddBool(pCfg, "crashReporting", tsEnableCrashReport, CFG_SCOPE_BOTH) != 0) return -1;
|
if (cfgAddBool(pCfg, "crashReporting", tsEnableCrashReport, CFG_SCOPE_BOTH) != 0) return -1;
|
||||||
if (cfgAddBool(pCfg, "telemetryReporting", tsEnableTelem, CFG_SCOPE_BOTH) != 0) return -1;
|
if (cfgAddBool(pCfg, "telemetryReporting", tsEnableTelem, CFG_SCOPE_BOTH) != 0) return -1;
|
||||||
if (cfgAddInt32(pCfg, "telemetryInterval", tsTelemInterval, 1, 200000, CFG_SCOPE_BOTH) != 0) return -1;
|
if (cfgAddInt32(pCfg, "telemetryInterval", tsTelemInterval, 1, 200000, CFG_SCOPE_BOTH) != 0) return -1;
|
||||||
|
@ -605,8 +620,11 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
|
||||||
if (cfgAddInt32(pCfg, "transPullupInterval", tsTransPullupInterval, 1, 10000, CFG_SCOPE_SERVER) != 0) return -1;
|
if (cfgAddInt32(pCfg, "transPullupInterval", tsTransPullupInterval, 1, 10000, CFG_SCOPE_SERVER) != 0) return -1;
|
||||||
if (cfgAddInt32(pCfg, "mqRebalanceInterval", tsMqRebalanceInterval, 1, 10000, CFG_SCOPE_SERVER) != 0) return -1;
|
if (cfgAddInt32(pCfg, "mqRebalanceInterval", tsMqRebalanceInterval, 1, 10000, CFG_SCOPE_SERVER) != 0) return -1;
|
||||||
if (cfgAddInt32(pCfg, "ttlUnit", tsTtlUnit, 1, 86400 * 365, CFG_SCOPE_SERVER) != 0) return -1;
|
if (cfgAddInt32(pCfg, "ttlUnit", tsTtlUnit, 1, 86400 * 365, CFG_SCOPE_SERVER) != 0) return -1;
|
||||||
if (cfgAddInt32(pCfg, "ttlPushInterval", tsTtlPushInterval, 1, 100000, CFG_SCOPE_SERVER) != 0) return -1;
|
if (cfgAddInt32(pCfg, "ttlPushInterval", tsTtlPushIntervalSec, 1, 100000, CFG_SCOPE_SERVER) != 0) return -1;
|
||||||
|
if (cfgAddInt32(pCfg, "ttlBatchDropNum", tsTtlBatchDropNum, 0, INT32_MAX, CFG_SCOPE_SERVER) != 0) return -1;
|
||||||
if (cfgAddBool(pCfg, "ttlChangeOnWrite", tsTtlChangeOnWrite, CFG_SCOPE_SERVER) != 0) return -1;
|
if (cfgAddBool(pCfg, "ttlChangeOnWrite", tsTtlChangeOnWrite, CFG_SCOPE_SERVER) != 0) return -1;
|
||||||
|
if (cfgAddInt32(pCfg, "ttlFlushThreshold", tsTtlFlushThreshold, -1, 1000000, CFG_SCOPE_SERVER) != 0) return -1;
|
||||||
|
if (cfgAddInt32(pCfg, "trimVDbIntervalSec", tsTrimVDbIntervalSec, 1, 100000, CFG_SCOPE_SERVER) != 0) return -1;
|
||||||
if (cfgAddInt32(pCfg, "uptimeInterval", tsUptimeInterval, 1, 100000, CFG_SCOPE_SERVER) != 0) return -1;
|
if (cfgAddInt32(pCfg, "uptimeInterval", tsUptimeInterval, 1, 100000, CFG_SCOPE_SERVER) != 0) return -1;
|
||||||
if (cfgAddInt32(pCfg, "queryRsmaTolerance", tsQueryRsmaTolerance, 0, 900000, CFG_SCOPE_SERVER) != 0) return -1;
|
if (cfgAddInt32(pCfg, "queryRsmaTolerance", tsQueryRsmaTolerance, 0, 900000, CFG_SCOPE_SERVER) != 0) return -1;
|
||||||
|
|
||||||
|
@ -635,6 +653,11 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
|
||||||
if (cfgAddString(pCfg, "s3Endpoint", tsS3Endpoint, CFG_SCOPE_SERVER) != 0) return -1;
|
if (cfgAddString(pCfg, "s3Endpoint", tsS3Endpoint, CFG_SCOPE_SERVER) != 0) return -1;
|
||||||
if (cfgAddString(pCfg, "s3BucketName", tsS3BucketName, CFG_SCOPE_SERVER) != 0) return -1;
|
if (cfgAddString(pCfg, "s3BucketName", tsS3BucketName, CFG_SCOPE_SERVER) != 0) return -1;
|
||||||
|
|
||||||
|
// min free disk space used to check if the disk is full [50MB, 1GB]
|
||||||
|
if (cfgAddInt64(pCfg, "minDiskFreeSize", tsMinDiskFreeSize, TFS_MIN_DISK_FREE_SIZE, 1024 * 1024 * 1024,
|
||||||
|
CFG_SCOPE_SERVER) != 0)
|
||||||
|
return -1;
|
||||||
|
|
||||||
GRANT_CFG_ADD;
|
GRANT_CFG_ADD;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -987,9 +1010,14 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
|
||||||
tsMonitorComp = cfgGetItem(pCfg, "monitorComp")->bval;
|
tsMonitorComp = cfgGetItem(pCfg, "monitorComp")->bval;
|
||||||
tsQueryRspPolicy = cfgGetItem(pCfg, "queryRspPolicy")->i32;
|
tsQueryRspPolicy = cfgGetItem(pCfg, "queryRspPolicy")->i32;
|
||||||
|
|
||||||
|
tsEnableAudit = cfgGetItem(pCfg, "audit")->bval;
|
||||||
|
tstrncpy(tsAuditFqdn, cfgGetItem(pCfg, "auditFqdn")->str, TSDB_FQDN_LEN);
|
||||||
|
tsAuditPort = (uint16_t)cfgGetItem(pCfg, "auditPort")->i32;
|
||||||
|
|
||||||
tsEnableTelem = cfgGetItem(pCfg, "telemetryReporting")->bval;
|
tsEnableTelem = cfgGetItem(pCfg, "telemetryReporting")->bval;
|
||||||
tsEnableCrashReport = cfgGetItem(pCfg, "crashReporting")->bval;
|
tsEnableCrashReport = cfgGetItem(pCfg, "crashReporting")->bval;
|
||||||
tsTtlChangeOnWrite = cfgGetItem(pCfg, "ttlChangeOnWrite")->bval;
|
tsTtlChangeOnWrite = cfgGetItem(pCfg, "ttlChangeOnWrite")->bval;
|
||||||
|
tsTtlFlushThreshold = cfgGetItem(pCfg, "ttlFlushThreshold")->i32;
|
||||||
tsTelemInterval = cfgGetItem(pCfg, "telemetryInterval")->i32;
|
tsTelemInterval = cfgGetItem(pCfg, "telemetryInterval")->i32;
|
||||||
tstrncpy(tsTelemServer, cfgGetItem(pCfg, "telemetryServer")->str, TSDB_FQDN_LEN);
|
tstrncpy(tsTelemServer, cfgGetItem(pCfg, "telemetryServer")->str, TSDB_FQDN_LEN);
|
||||||
tsTelemPort = (uint16_t)cfgGetItem(pCfg, "telemetryPort")->i32;
|
tsTelemPort = (uint16_t)cfgGetItem(pCfg, "telemetryPort")->i32;
|
||||||
|
@ -999,7 +1027,9 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
|
||||||
tsTransPullupInterval = cfgGetItem(pCfg, "transPullupInterval")->i32;
|
tsTransPullupInterval = cfgGetItem(pCfg, "transPullupInterval")->i32;
|
||||||
tsMqRebalanceInterval = cfgGetItem(pCfg, "mqRebalanceInterval")->i32;
|
tsMqRebalanceInterval = cfgGetItem(pCfg, "mqRebalanceInterval")->i32;
|
||||||
tsTtlUnit = cfgGetItem(pCfg, "ttlUnit")->i32;
|
tsTtlUnit = cfgGetItem(pCfg, "ttlUnit")->i32;
|
||||||
tsTtlPushInterval = cfgGetItem(pCfg, "ttlPushInterval")->i32;
|
tsTtlPushIntervalSec = cfgGetItem(pCfg, "ttlPushInterval")->i32;
|
||||||
|
tsTtlBatchDropNum = cfgGetItem(pCfg, "ttlBatchDropNum")->i32;
|
||||||
|
tsTrimVDbIntervalSec = cfgGetItem(pCfg, "trimVDbIntervalSec")->i32;
|
||||||
tsUptimeInterval = cfgGetItem(pCfg, "uptimeInterval")->i32;
|
tsUptimeInterval = cfgGetItem(pCfg, "uptimeInterval")->i32;
|
||||||
tsQueryRsmaTolerance = cfgGetItem(pCfg, "queryRsmaTolerance")->i32;
|
tsQueryRsmaTolerance = cfgGetItem(pCfg, "queryRsmaTolerance")->i32;
|
||||||
|
|
||||||
|
@ -1034,6 +1064,7 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
|
||||||
tsMaxStreamBackendCache = cfgGetItem(pCfg, "maxStreamBackendCache")->i32;
|
tsMaxStreamBackendCache = cfgGetItem(pCfg, "maxStreamBackendCache")->i32;
|
||||||
tsPQSortMemThreshold = cfgGetItem(pCfg, "pqSortMemThreshold")->i32;
|
tsPQSortMemThreshold = cfgGetItem(pCfg, "pqSortMemThreshold")->i32;
|
||||||
tsResolveFQDNRetryTime = cfgGetItem(pCfg, "resolveFQDNRetryTime")->i32;
|
tsResolveFQDNRetryTime = cfgGetItem(pCfg, "resolveFQDNRetryTime")->i32;
|
||||||
|
tsMinDiskFreeSize = cfgGetItem(pCfg, "minDiskFreeSize")->i64;
|
||||||
|
|
||||||
GRANT_CFG_GET;
|
GRANT_CFG_GET;
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -1400,13 +1431,19 @@ int32_t taosApplyLocalCfg(SConfig *pCfg, char *name) {
|
||||||
} else if (strcasecmp("ttlUnit", name) == 0) {
|
} else if (strcasecmp("ttlUnit", name) == 0) {
|
||||||
tsTtlUnit = cfgGetItem(pCfg, "ttlUnit")->i32;
|
tsTtlUnit = cfgGetItem(pCfg, "ttlUnit")->i32;
|
||||||
} else if (strcasecmp("ttlPushInterval", name) == 0) {
|
} else if (strcasecmp("ttlPushInterval", name) == 0) {
|
||||||
tsTtlPushInterval = cfgGetItem(pCfg, "ttlPushInterval")->i32;
|
tsTtlPushIntervalSec = cfgGetItem(pCfg, "ttlPushInterval")->i32;
|
||||||
|
} else if (strcasecmp("ttlBatchDropNum", name) == 0) {
|
||||||
|
tsTtlBatchDropNum = cfgGetItem(pCfg, "ttlBatchDropNum")->i32;
|
||||||
|
} else if (strcasecmp("trimVDbIntervalSec", name) == 0) {
|
||||||
|
tsTrimVDbIntervalSec = cfgGetItem(pCfg, "trimVDbIntervalSec")->i32;
|
||||||
} else if (strcasecmp("tmrDebugFlag", name) == 0) {
|
} else if (strcasecmp("tmrDebugFlag", name) == 0) {
|
||||||
tmrDebugFlag = cfgGetItem(pCfg, "tmrDebugFlag")->i32;
|
tmrDebugFlag = cfgGetItem(pCfg, "tmrDebugFlag")->i32;
|
||||||
} else if (strcasecmp("tsdbDebugFlag", name) == 0) {
|
} else if (strcasecmp("tsdbDebugFlag", name) == 0) {
|
||||||
tsdbDebugFlag = cfgGetItem(pCfg, "tsdbDebugFlag")->i32;
|
tsdbDebugFlag = cfgGetItem(pCfg, "tsdbDebugFlag")->i32;
|
||||||
} else if (strcasecmp("tqDebugFlag", name) == 0) {
|
} else if (strcasecmp("tqDebugFlag", name) == 0) {
|
||||||
tqDebugFlag = cfgGetItem(pCfg, "tqDebugFlag")->i32;
|
tqDebugFlag = cfgGetItem(pCfg, "tqDebugFlag")->i32;
|
||||||
|
} else if (strcasecmp("ttlFlushThreshold", name) == 0) {
|
||||||
|
tsTtlFlushThreshold = cfgGetItem(pCfg, "ttlFlushThreshold")->i32;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -1608,6 +1645,27 @@ void taosCfgDynamicOptions(const char *option, const char *value) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (strcasecmp(option, "ttlPushInterval") == 0) {
|
||||||
|
int32_t newTtlPushInterval = atoi(value);
|
||||||
|
uInfo("ttlPushInterval set from %d to %d", tsTtlPushIntervalSec, newTtlPushInterval);
|
||||||
|
tsTtlPushIntervalSec = newTtlPushInterval;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (strcasecmp(option, "ttlBatchDropNum") == 0) {
|
||||||
|
int32_t newTtlBatchDropNum = atoi(value);
|
||||||
|
uInfo("ttlBatchDropNum set from %d to %d", tsTtlBatchDropNum, newTtlBatchDropNum);
|
||||||
|
tsTtlBatchDropNum = newTtlBatchDropNum;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (strcasecmp(option, "supportVnodes") == 0) {
|
||||||
|
int32_t newSupportVnodes = atoi(value);
|
||||||
|
uInfo("supportVnodes set from %d to %d", tsNumOfSupportVnodes, newSupportVnodes);
|
||||||
|
tsNumOfSupportVnodes = newSupportVnodes;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
const char *options[] = {
|
const char *options[] = {
|
||||||
"dDebugFlag", "vDebugFlag", "mDebugFlag", "wDebugFlag", "sDebugFlag", "tsdbDebugFlag", "tqDebugFlag",
|
"dDebugFlag", "vDebugFlag", "mDebugFlag", "wDebugFlag", "sDebugFlag", "tsdbDebugFlag", "tqDebugFlag",
|
||||||
"fsDebugFlag", "udfDebugFlag", "smaDebugFlag", "idxDebugFlag", "tdbDebugFlag", "tmrDebugFlag", "uDebugFlag",
|
"fsDebugFlag", "udfDebugFlag", "smaDebugFlag", "idxDebugFlag", "tdbDebugFlag", "tmrDebugFlag", "uDebugFlag",
|
||||||
|
|
|
@ -534,7 +534,6 @@ int32_t tSerializeSMCreateStbReq(void *buf, int32_t bufLen, SMCreateStbReq *pReq
|
||||||
if (tEncodeI8(&encoder, pField->flags) < 0) return -1;
|
if (tEncodeI8(&encoder, pField->flags) < 0) return -1;
|
||||||
if (tEncodeI32(&encoder, pField->bytes) < 0) return -1;
|
if (tEncodeI32(&encoder, pField->bytes) < 0) return -1;
|
||||||
if (tEncodeCStr(&encoder, pField->name) < 0) return -1;
|
if (tEncodeCStr(&encoder, pField->name) < 0) return -1;
|
||||||
if (tEncodeCStr(&encoder, pField->comment) < 0) return -1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int32_t i = 0; i < pReq->numOfTags; ++i) {
|
for (int32_t i = 0; i < pReq->numOfTags; ++i) {
|
||||||
|
@ -543,7 +542,6 @@ int32_t tSerializeSMCreateStbReq(void *buf, int32_t bufLen, SMCreateStbReq *pReq
|
||||||
if (tEncodeI8(&encoder, pField->flags) < 0) return -1;
|
if (tEncodeI8(&encoder, pField->flags) < 0) return -1;
|
||||||
if (tEncodeI32(&encoder, pField->bytes) < 0) return -1;
|
if (tEncodeI32(&encoder, pField->bytes) < 0) return -1;
|
||||||
if (tEncodeCStr(&encoder, pField->name) < 0) return -1;
|
if (tEncodeCStr(&encoder, pField->name) < 0) return -1;
|
||||||
if (tEncodeCStr(&encoder, pField->comment) < 0) return -1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int32_t i = 0; i < pReq->numOfFuncs; ++i) {
|
for (int32_t i = 0; i < pReq->numOfFuncs; ++i) {
|
||||||
|
@ -610,7 +608,6 @@ int32_t tDeserializeSMCreateStbReq(void *buf, int32_t bufLen, SMCreateStbReq *pR
|
||||||
if (tDecodeI8(&decoder, &field.flags) < 0) return -1;
|
if (tDecodeI8(&decoder, &field.flags) < 0) return -1;
|
||||||
if (tDecodeI32(&decoder, &field.bytes) < 0) return -1;
|
if (tDecodeI32(&decoder, &field.bytes) < 0) return -1;
|
||||||
if (tDecodeCStrTo(&decoder, field.name) < 0) return -1;
|
if (tDecodeCStrTo(&decoder, field.name) < 0) return -1;
|
||||||
if (tDecodeCStrTo(&decoder, field.comment) < 0) return -1;
|
|
||||||
if (taosArrayPush(pReq->pColumns, &field) == NULL) {
|
if (taosArrayPush(pReq->pColumns, &field) == NULL) {
|
||||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
return -1;
|
return -1;
|
||||||
|
@ -623,7 +620,6 @@ int32_t tDeserializeSMCreateStbReq(void *buf, int32_t bufLen, SMCreateStbReq *pR
|
||||||
if (tDecodeI8(&decoder, &field.flags) < 0) return -1;
|
if (tDecodeI8(&decoder, &field.flags) < 0) return -1;
|
||||||
if (tDecodeI32(&decoder, &field.bytes) < 0) return -1;
|
if (tDecodeI32(&decoder, &field.bytes) < 0) return -1;
|
||||||
if (tDecodeCStrTo(&decoder, field.name) < 0) return -1;
|
if (tDecodeCStrTo(&decoder, field.name) < 0) return -1;
|
||||||
if (tDecodeCStrTo(&decoder, field.comment) < 0) return -1;
|
|
||||||
if (taosArrayPush(pReq->pTags, &field) == NULL) {
|
if (taosArrayPush(pReq->pTags, &field) == NULL) {
|
||||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
return -1;
|
return -1;
|
||||||
|
@ -1096,6 +1092,7 @@ int32_t tSerializeSStatusReq(void *buf, int32_t bufLen, SStatusReq *pReq) {
|
||||||
if (tEncodeI64(&encoder, pReq->qload.numOfProcessedCQuery) < 0) return -1;
|
if (tEncodeI64(&encoder, pReq->qload.numOfProcessedCQuery) < 0) return -1;
|
||||||
if (tEncodeI64(&encoder, pReq->qload.numOfProcessedFetch) < 0) return -1;
|
if (tEncodeI64(&encoder, pReq->qload.numOfProcessedFetch) < 0) return -1;
|
||||||
if (tEncodeI64(&encoder, pReq->qload.numOfProcessedDrop) < 0) return -1;
|
if (tEncodeI64(&encoder, pReq->qload.numOfProcessedDrop) < 0) return -1;
|
||||||
|
if (tEncodeI64(&encoder, pReq->qload.numOfProcessedNotify) < 0) return -1;
|
||||||
if (tEncodeI64(&encoder, pReq->qload.numOfProcessedHb) < 0) return -1;
|
if (tEncodeI64(&encoder, pReq->qload.numOfProcessedHb) < 0) return -1;
|
||||||
if (tEncodeI64(&encoder, pReq->qload.numOfProcessedDelete) < 0) return -1;
|
if (tEncodeI64(&encoder, pReq->qload.numOfProcessedDelete) < 0) return -1;
|
||||||
if (tEncodeI64(&encoder, pReq->qload.cacheDataSize) < 0) return -1;
|
if (tEncodeI64(&encoder, pReq->qload.cacheDataSize) < 0) return -1;
|
||||||
|
@ -1193,6 +1190,7 @@ int32_t tDeserializeSStatusReq(void *buf, int32_t bufLen, SStatusReq *pReq) {
|
||||||
if (tDecodeI64(&decoder, &pReq->qload.numOfProcessedCQuery) < 0) return -1;
|
if (tDecodeI64(&decoder, &pReq->qload.numOfProcessedCQuery) < 0) return -1;
|
||||||
if (tDecodeI64(&decoder, &pReq->qload.numOfProcessedFetch) < 0) return -1;
|
if (tDecodeI64(&decoder, &pReq->qload.numOfProcessedFetch) < 0) return -1;
|
||||||
if (tDecodeI64(&decoder, &pReq->qload.numOfProcessedDrop) < 0) return -1;
|
if (tDecodeI64(&decoder, &pReq->qload.numOfProcessedDrop) < 0) return -1;
|
||||||
|
if (tDecodeI64(&decoder, &pReq->qload.numOfProcessedNotify) < 0) return -1;
|
||||||
if (tDecodeI64(&decoder, &pReq->qload.numOfProcessedHb) < 0) return -1;
|
if (tDecodeI64(&decoder, &pReq->qload.numOfProcessedHb) < 0) return -1;
|
||||||
if (tDecodeI64(&decoder, &pReq->qload.numOfProcessedDelete) < 0) return -1;
|
if (tDecodeI64(&decoder, &pReq->qload.numOfProcessedDelete) < 0) return -1;
|
||||||
if (tDecodeI64(&decoder, &pReq->qload.cacheDataSize) < 0) return -1;
|
if (tDecodeI64(&decoder, &pReq->qload.cacheDataSize) < 0) return -1;
|
||||||
|
@ -2327,7 +2325,7 @@ int32_t tDeserializeSTableCfgRsp(void *buf, int32_t bufLen, STableCfgRsp *pRsp)
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t totalCols = pRsp->numOfTags + pRsp->numOfColumns;
|
int32_t totalCols = pRsp->numOfTags + pRsp->numOfColumns;
|
||||||
pRsp->pSchemas = taosMemoryCalloc(totalCols, sizeof(SSchema));
|
pRsp->pSchemas = taosMemoryMalloc(sizeof(SSchema) * totalCols);
|
||||||
if (pRsp->pSchemas == NULL) return -1;
|
if (pRsp->pSchemas == NULL) return -1;
|
||||||
|
|
||||||
for (int32_t i = 0; i < totalCols; ++i) {
|
for (int32_t i = 0; i < totalCols; ++i) {
|
||||||
|
@ -3179,6 +3177,12 @@ int32_t tSerializeSVDropTtlTableReq(void *buf, int32_t bufLen, SVDropTtlTableReq
|
||||||
|
|
||||||
if (tStartEncode(&encoder) < 0) return -1;
|
if (tStartEncode(&encoder) < 0) return -1;
|
||||||
if (tEncodeI32(&encoder, pReq->timestampSec) < 0) return -1;
|
if (tEncodeI32(&encoder, pReq->timestampSec) < 0) return -1;
|
||||||
|
if (tEncodeI32(&encoder, pReq->ttlDropMaxCount) < 0) return -1;
|
||||||
|
if (tEncodeI32(&encoder, pReq->nUids) < 0) return -1;
|
||||||
|
for (int32_t i = 0; i < pReq->nUids; ++i) {
|
||||||
|
tb_uid_t *pTbUid = taosArrayGet(pReq->pTbUids, i);
|
||||||
|
if (tEncodeI64(&encoder, *pTbUid) < 0) return -1;
|
||||||
|
}
|
||||||
tEndEncode(&encoder);
|
tEndEncode(&encoder);
|
||||||
|
|
||||||
int32_t tlen = encoder.pos;
|
int32_t tlen = encoder.pos;
|
||||||
|
@ -3192,6 +3196,30 @@ int32_t tDeserializeSVDropTtlTableReq(void *buf, int32_t bufLen, SVDropTtlTableR
|
||||||
|
|
||||||
if (tStartDecode(&decoder) < 0) return -1;
|
if (tStartDecode(&decoder) < 0) return -1;
|
||||||
if (tDecodeI32(&decoder, &pReq->timestampSec) < 0) return -1;
|
if (tDecodeI32(&decoder, &pReq->timestampSec) < 0) return -1;
|
||||||
|
pReq->ttlDropMaxCount = INT32_MAX;
|
||||||
|
pReq->nUids = 0;
|
||||||
|
pReq->pTbUids = NULL;
|
||||||
|
if (!tDecodeIsEnd(&decoder)) {
|
||||||
|
if (tDecodeI32(&decoder, &pReq->ttlDropMaxCount) < 0) return -1;
|
||||||
|
if (tDecodeI32(&decoder, &pReq->nUids) < 0) return -1;
|
||||||
|
|
||||||
|
if (pReq->nUids > 0) {
|
||||||
|
pReq->pTbUids = taosArrayInit(pReq->nUids, sizeof(tb_uid_t));
|
||||||
|
if (pReq->pTbUids == NULL) {
|
||||||
|
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
tb_uid_t tbUid = 0;
|
||||||
|
for (int32_t i = 0; i < pReq->nUids; ++i) {
|
||||||
|
if (tDecodeI64(&decoder, &tbUid) < 0) return -1;
|
||||||
|
if (taosArrayPush(pReq->pTbUids, &tbUid) == NULL) {
|
||||||
|
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
tEndDecode(&decoder);
|
tEndDecode(&decoder);
|
||||||
|
|
||||||
tDecoderClear(&decoder);
|
tDecoderClear(&decoder);
|
||||||
|
@ -3710,7 +3738,7 @@ static int32_t tDecodeSTableMetaRsp(SDecoder *pDecoder, STableMetaRsp *pRsp) {
|
||||||
|
|
||||||
int32_t totalCols = pRsp->numOfTags + pRsp->numOfColumns;
|
int32_t totalCols = pRsp->numOfTags + pRsp->numOfColumns;
|
||||||
if (totalCols > 0) {
|
if (totalCols > 0) {
|
||||||
pRsp->pSchemas = taosMemoryCalloc(totalCols, sizeof(SSchema));
|
pRsp->pSchemas = taosMemoryMalloc(sizeof(SSchema) * totalCols);
|
||||||
if (pRsp->pSchemas == NULL) return -1;
|
if (pRsp->pSchemas == NULL) return -1;
|
||||||
|
|
||||||
for (int32_t i = 0; i < totalCols; ++i) {
|
for (int32_t i = 0; i < totalCols; ++i) {
|
||||||
|
@ -5848,6 +5876,64 @@ int32_t tDeserializeSTaskDropReq(void *buf, int32_t bufLen, STaskDropReq *pReq)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int32_t tSerializeSTaskNotifyReq(void *buf, int32_t bufLen, STaskNotifyReq *pReq) {
|
||||||
|
int32_t headLen = sizeof(SMsgHead);
|
||||||
|
if (buf != NULL) {
|
||||||
|
buf = (char *)buf + headLen;
|
||||||
|
bufLen -= headLen;
|
||||||
|
}
|
||||||
|
|
||||||
|
SEncoder encoder = {0};
|
||||||
|
tEncoderInit(&encoder, buf, bufLen);
|
||||||
|
if (tStartEncode(&encoder) < 0) return -1;
|
||||||
|
|
||||||
|
if (tEncodeU64(&encoder, pReq->sId) < 0) return -1;
|
||||||
|
if (tEncodeU64(&encoder, pReq->queryId) < 0) return -1;
|
||||||
|
if (tEncodeU64(&encoder, pReq->taskId) < 0) return -1;
|
||||||
|
if (tEncodeI64(&encoder, pReq->refId) < 0) return -1;
|
||||||
|
if (tEncodeI32(&encoder, pReq->execId) < 0) return -1;
|
||||||
|
if (tEncodeI32(&encoder, pReq->type) < 0) return -1;
|
||||||
|
|
||||||
|
tEndEncode(&encoder);
|
||||||
|
|
||||||
|
int32_t tlen = encoder.pos;
|
||||||
|
tEncoderClear(&encoder);
|
||||||
|
|
||||||
|
if (buf != NULL) {
|
||||||
|
SMsgHead *pHead = (SMsgHead *)((char *)buf - headLen);
|
||||||
|
pHead->vgId = htonl(pReq->header.vgId);
|
||||||
|
pHead->contLen = htonl(tlen + headLen);
|
||||||
|
}
|
||||||
|
|
||||||
|
return tlen + headLen;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t tDeserializeSTaskNotifyReq(void *buf, int32_t bufLen, STaskNotifyReq *pReq) {
|
||||||
|
int32_t headLen = sizeof(SMsgHead);
|
||||||
|
|
||||||
|
SMsgHead *pHead = buf;
|
||||||
|
pHead->vgId = pReq->header.vgId;
|
||||||
|
pHead->contLen = pReq->header.contLen;
|
||||||
|
|
||||||
|
SDecoder decoder = {0};
|
||||||
|
tDecoderInit(&decoder, (char *)buf + headLen, bufLen - headLen);
|
||||||
|
|
||||||
|
if (tStartDecode(&decoder) < 0) return -1;
|
||||||
|
|
||||||
|
if (tDecodeU64(&decoder, &pReq->sId) < 0) return -1;
|
||||||
|
if (tDecodeU64(&decoder, &pReq->queryId) < 0) return -1;
|
||||||
|
if (tDecodeU64(&decoder, &pReq->taskId) < 0) return -1;
|
||||||
|
if (tDecodeI64(&decoder, &pReq->refId) < 0) return -1;
|
||||||
|
if (tDecodeI32(&decoder, &pReq->execId) < 0) return -1;
|
||||||
|
if (tDecodeI32(&decoder, (int32_t*)&pReq->type) < 0) return -1;
|
||||||
|
|
||||||
|
tEndDecode(&decoder);
|
||||||
|
|
||||||
|
tDecoderClear(&decoder);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
int32_t tSerializeSQueryTableRsp(void *buf, int32_t bufLen, SQueryTableRsp *pRsp) {
|
int32_t tSerializeSQueryTableRsp(void *buf, int32_t bufLen, SQueryTableRsp *pRsp) {
|
||||||
SEncoder encoder = {0};
|
SEncoder encoder = {0};
|
||||||
tEncoderInit(&encoder, buf, bufLen);
|
tEncoderInit(&encoder, buf, bufLen);
|
||||||
|
|
|
@ -356,7 +356,7 @@ int32_t tdSTSRowNew(SArray *pArray, STSchema *pTSchema, STSRow **ppRow) {
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
varDataLen += sizeof(VarDataLenT);
|
varDataLen += sizeof(VarDataLenT);
|
||||||
if (pTColumn->type == TSDB_DATA_TYPE_VARCHAR || pTColumn->type == TSDB_DATA_TYPE_GEOMETRY) {
|
if (pTColumn->type == TSDB_DATA_TYPE_VARCHAR || pTColumn->type == TSDB_DATA_TYPE_VARBINARY || pTColumn->type == TSDB_DATA_TYPE_GEOMETRY) {
|
||||||
varDataLen += CHAR_BYTES;
|
varDataLen += CHAR_BYTES;
|
||||||
if (maxVarDataLen < CHAR_BYTES + sizeof(VarDataLenT)) {
|
if (maxVarDataLen < CHAR_BYTES + sizeof(VarDataLenT)) {
|
||||||
maxVarDataLen = CHAR_BYTES + sizeof(VarDataLenT);
|
maxVarDataLen = CHAR_BYTES + sizeof(VarDataLenT);
|
||||||
|
|
|
@ -296,7 +296,8 @@ static void writeDataToDisk(STSBuf* pTSBuf) {
|
||||||
metaLen += (int32_t)taosWriteFile(pTSBuf->pFile, &pBlock->tag.nType, sizeof(pBlock->tag.nType));
|
metaLen += (int32_t)taosWriteFile(pTSBuf->pFile, &pBlock->tag.nType, sizeof(pBlock->tag.nType));
|
||||||
|
|
||||||
int32_t trueLen = pBlock->tag.nLen;
|
int32_t trueLen = pBlock->tag.nLen;
|
||||||
if (pBlock->tag.nType == TSDB_DATA_TYPE_BINARY || pBlock->tag.nType == TSDB_DATA_TYPE_NCHAR || pBlock->tag.nType == TSDB_DATA_TYPE_GEOMETRY) {
|
if (pBlock->tag.nType == TSDB_DATA_TYPE_BINARY || pBlock->tag.nType == TSDB_DATA_TYPE_VARBINARY ||
|
||||||
|
pBlock->tag.nType == TSDB_DATA_TYPE_NCHAR || pBlock->tag.nType == TSDB_DATA_TYPE_GEOMETRY) {
|
||||||
metaLen += (int32_t)taosWriteFile(pTSBuf->pFile, &pBlock->tag.nLen, sizeof(pBlock->tag.nLen));
|
metaLen += (int32_t)taosWriteFile(pTSBuf->pFile, &pBlock->tag.nLen, sizeof(pBlock->tag.nLen));
|
||||||
metaLen += (int32_t)taosWriteFile(pTSBuf->pFile, pBlock->tag.pz, (size_t)pBlock->tag.nLen);
|
metaLen += (int32_t)taosWriteFile(pTSBuf->pFile, pBlock->tag.pz, (size_t)pBlock->tag.nLen);
|
||||||
} else if (pBlock->tag.nType == TSDB_DATA_TYPE_FLOAT) {
|
} else if (pBlock->tag.nType == TSDB_DATA_TYPE_FLOAT) {
|
||||||
|
@ -378,7 +379,8 @@ STSBlock* readDataFromDisk(STSBuf* pTSBuf, int32_t order, bool decomp) {
|
||||||
|
|
||||||
// NOTE: mix types tags are not supported
|
// NOTE: mix types tags are not supported
|
||||||
size_t sz = 0;
|
size_t sz = 0;
|
||||||
if (pBlock->tag.nType == TSDB_DATA_TYPE_BINARY || pBlock->tag.nType == TSDB_DATA_TYPE_NCHAR || pBlock->tag.nType == TSDB_DATA_TYPE_GEOMETRY) {
|
if (pBlock->tag.nType == TSDB_DATA_TYPE_BINARY || pBlock->tag.nType == TSDB_DATA_TYPE_VARBINARY ||
|
||||||
|
pBlock->tag.nType == TSDB_DATA_TYPE_NCHAR || pBlock->tag.nType == TSDB_DATA_TYPE_GEOMETRY) {
|
||||||
char* tp = taosMemoryRealloc(pBlock->tag.pz, pBlock->tag.nLen + 1);
|
char* tp = taosMemoryRealloc(pBlock->tag.pz, pBlock->tag.nLen + 1);
|
||||||
ASSERT(tp != NULL);
|
ASSERT(tp != NULL);
|
||||||
|
|
||||||
|
|
|
@ -61,7 +61,7 @@ tDataTypeDescriptor tDataTypes[TSDB_DATA_TYPE_MAX] = {
|
||||||
{TSDB_DATA_TYPE_UINT, 12, INT_BYTES, "INT UNSIGNED", 0, UINT32_MAX, tsCompressInt, tsDecompressInt},
|
{TSDB_DATA_TYPE_UINT, 12, INT_BYTES, "INT UNSIGNED", 0, UINT32_MAX, tsCompressInt, tsDecompressInt},
|
||||||
{TSDB_DATA_TYPE_UBIGINT, 15, LONG_BYTES, "BIGINT UNSIGNED", 0, UINT64_MAX, tsCompressBigint, tsDecompressBigint},
|
{TSDB_DATA_TYPE_UBIGINT, 15, LONG_BYTES, "BIGINT UNSIGNED", 0, UINT64_MAX, tsCompressBigint, tsDecompressBigint},
|
||||||
{TSDB_DATA_TYPE_JSON, 4, TSDB_MAX_JSON_TAG_LEN, "JSON", 0, 0, tsCompressString, tsDecompressString},
|
{TSDB_DATA_TYPE_JSON, 4, TSDB_MAX_JSON_TAG_LEN, "JSON", 0, 0, tsCompressString, tsDecompressString},
|
||||||
{TSDB_DATA_TYPE_VARBINARY, 9, 1, "VARBINARY", 0, 0, NULL, NULL}, // placeholder, not implemented
|
{TSDB_DATA_TYPE_VARBINARY, 9, 1, "VARBINARY", 0, 0, tsCompressString, tsDecompressString}, // placeholder, not implemented
|
||||||
{TSDB_DATA_TYPE_DECIMAL, 7, 1, "DECIMAL", 0, 0, NULL, NULL}, // placeholder, not implemented
|
{TSDB_DATA_TYPE_DECIMAL, 7, 1, "DECIMAL", 0, 0, NULL, NULL}, // placeholder, not implemented
|
||||||
{TSDB_DATA_TYPE_BLOB, 4, 1, "BLOB", 0, 0, NULL, NULL}, // placeholder, not implemented
|
{TSDB_DATA_TYPE_BLOB, 4, 1, "BLOB", 0, 0, NULL, NULL}, // placeholder, not implemented
|
||||||
{TSDB_DATA_TYPE_MEDIUMBLOB, 10, 1, "MEDIUMBLOB", 0, 0, NULL, NULL}, // placeholder, not implemented
|
{TSDB_DATA_TYPE_MEDIUMBLOB, 10, 1, "MEDIUMBLOB", 0, 0, NULL, NULL}, // placeholder, not implemented
|
||||||
|
@ -135,6 +135,7 @@ void assignVal(char *val, const char *src, int32_t len, int32_t type) {
|
||||||
*((int64_t *)val) = GET_INT64_VAL(src);
|
*((int64_t *)val) = GET_INT64_VAL(src);
|
||||||
break;
|
break;
|
||||||
case TSDB_DATA_TYPE_BINARY:
|
case TSDB_DATA_TYPE_BINARY:
|
||||||
|
case TSDB_DATA_TYPE_VARBINARY:
|
||||||
case TSDB_DATA_TYPE_GEOMETRY:
|
case TSDB_DATA_TYPE_GEOMETRY:
|
||||||
varDataCopy(val, src);
|
varDataCopy(val, src);
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -122,6 +122,7 @@ void taosVariantCreateFromBinary(SVariant *pVar, const char *pz, size_t len, uin
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case TSDB_DATA_TYPE_BINARY:
|
case TSDB_DATA_TYPE_BINARY:
|
||||||
|
case TSDB_DATA_TYPE_VARBINARY:
|
||||||
case TSDB_DATA_TYPE_GEOMETRY: { // todo refactor, extract a method
|
case TSDB_DATA_TYPE_GEOMETRY: { // todo refactor, extract a method
|
||||||
pVar->pz = taosMemoryCalloc(len + 1, sizeof(char));
|
pVar->pz = taosMemoryCalloc(len + 1, sizeof(char));
|
||||||
memcpy(pVar->pz, pz, len);
|
memcpy(pVar->pz, pz, len);
|
||||||
|
@ -141,7 +142,8 @@ void taosVariantDestroy(SVariant *pVar) {
|
||||||
if (pVar == NULL) return;
|
if (pVar == NULL) return;
|
||||||
|
|
||||||
if (pVar->nType == TSDB_DATA_TYPE_BINARY || pVar->nType == TSDB_DATA_TYPE_NCHAR ||
|
if (pVar->nType == TSDB_DATA_TYPE_BINARY || pVar->nType == TSDB_DATA_TYPE_NCHAR ||
|
||||||
pVar->nType == TSDB_DATA_TYPE_JSON || pVar->nType == TSDB_DATA_TYPE_GEOMETRY) {
|
pVar->nType == TSDB_DATA_TYPE_JSON || pVar->nType == TSDB_DATA_TYPE_GEOMETRY ||
|
||||||
|
pVar->nType == TSDB_DATA_TYPE_VARBINARY) {
|
||||||
taosMemoryFreeClear(pVar->pz);
|
taosMemoryFreeClear(pVar->pz);
|
||||||
pVar->nLen = 0;
|
pVar->nLen = 0;
|
||||||
}
|
}
|
||||||
|
@ -152,8 +154,9 @@ void taosVariantAssign(SVariant *pDst, const SVariant *pSrc) {
|
||||||
if (pSrc == NULL || pDst == NULL) return;
|
if (pSrc == NULL || pDst == NULL) return;
|
||||||
|
|
||||||
pDst->nType = pSrc->nType;
|
pDst->nType = pSrc->nType;
|
||||||
if (pSrc->nType == TSDB_DATA_TYPE_BINARY || pSrc->nType == TSDB_DATA_TYPE_NCHAR ||
|
if (pSrc->nType == TSDB_DATA_TYPE_BINARY ||pSrc->nType == TSDB_DATA_TYPE_VARBINARY ||
|
||||||
pSrc->nType == TSDB_DATA_TYPE_JSON || pSrc->nType == TSDB_DATA_TYPE_GEOMETRY) {
|
pSrc->nType == TSDB_DATA_TYPE_NCHAR || pSrc->nType == TSDB_DATA_TYPE_JSON ||
|
||||||
|
pSrc->nType == TSDB_DATA_TYPE_GEOMETRY) {
|
||||||
int32_t len = pSrc->nLen + TSDB_NCHAR_SIZE;
|
int32_t len = pSrc->nLen + TSDB_NCHAR_SIZE;
|
||||||
char *p = taosMemoryRealloc(pDst->pz, len);
|
char *p = taosMemoryRealloc(pDst->pz, len);
|
||||||
ASSERT(p);
|
ASSERT(p);
|
||||||
|
@ -185,7 +188,8 @@ int32_t taosVariantCompare(const SVariant *p1, const SVariant *p2) {
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (p1->nType == TSDB_DATA_TYPE_BINARY || p1->nType == TSDB_DATA_TYPE_NCHAR || p1->nType == TSDB_DATA_TYPE_GEOMETRY) {
|
if (p1->nType == TSDB_DATA_TYPE_BINARY || p1->nType == TSDB_DATA_TYPE_VARBINARY ||
|
||||||
|
p1->nType == TSDB_DATA_TYPE_NCHAR || p1->nType == TSDB_DATA_TYPE_GEOMETRY) {
|
||||||
if (p1->nLen == p2->nLen) {
|
if (p1->nLen == p2->nLen) {
|
||||||
return memcmp(p1->pz, p2->pz, p1->nLen);
|
return memcmp(p1->pz, p2->pz, p1->nLen);
|
||||||
} else {
|
} else {
|
||||||
|
@ -237,6 +241,7 @@ char *taosVariantGet(SVariant *pVar, int32_t type) {
|
||||||
case TSDB_DATA_TYPE_FLOAT:
|
case TSDB_DATA_TYPE_FLOAT:
|
||||||
return (char *)&pVar->f;
|
return (char *)&pVar->f;
|
||||||
case TSDB_DATA_TYPE_BINARY:
|
case TSDB_DATA_TYPE_BINARY:
|
||||||
|
case TSDB_DATA_TYPE_VARBINARY:
|
||||||
case TSDB_DATA_TYPE_JSON:
|
case TSDB_DATA_TYPE_JSON:
|
||||||
case TSDB_DATA_TYPE_GEOMETRY:
|
case TSDB_DATA_TYPE_GEOMETRY:
|
||||||
return (char *)pVar->pz;
|
return (char *)pVar->pz;
|
||||||
|
|
|
@ -33,10 +33,10 @@ int32_t mmProcessCreateReq(const SMgmtInputOpt *pInput, SRpcMsg *pMsg) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
SMnodeOpt option = {.deploy = true, .numOfReplicas = createReq.replica,
|
SMnodeOpt option = {.deploy = true, .numOfReplicas = createReq.replica,
|
||||||
.numOfTotalReplicas = createReq.replica + createReq.learnerReplica,
|
.numOfTotalReplicas = createReq.replica + createReq.learnerReplica,
|
||||||
.selfIndex = -1, .lastIndex = createReq.lastIndex};
|
.selfIndex = -1, .lastIndex = createReq.lastIndex};
|
||||||
|
|
||||||
memcpy(option.replicas, createReq.replicas, sizeof(createReq.replicas));
|
memcpy(option.replicas, createReq.replicas, sizeof(createReq.replicas));
|
||||||
for (int32_t i = 0; i < createReq.replica; ++i) {
|
for (int32_t i = 0; i < createReq.replica; ++i) {
|
||||||
if (createReq.replicas[i].id == pInput->pData->dnodeId) {
|
if (createReq.replicas[i].id == pInput->pData->dnodeId) {
|
||||||
|
@ -187,10 +187,12 @@ SArray *mmGetMsgHandles() {
|
||||||
if (dmSetMgmtHandle(pArray, TDMT_SCH_QUERY_HEARTBEAT, mmPutMsgToFetchQueue, 1) == NULL) goto _OVER;
|
if (dmSetMgmtHandle(pArray, TDMT_SCH_QUERY_HEARTBEAT, mmPutMsgToFetchQueue, 1) == NULL) goto _OVER;
|
||||||
if (dmSetMgmtHandle(pArray, TDMT_SCH_FETCH, mmPutMsgToFetchQueue, 1) == NULL) goto _OVER;
|
if (dmSetMgmtHandle(pArray, TDMT_SCH_FETCH, mmPutMsgToFetchQueue, 1) == NULL) goto _OVER;
|
||||||
if (dmSetMgmtHandle(pArray, TDMT_SCH_MERGE_FETCH, mmPutMsgToFetchQueue, 1) == NULL) goto _OVER;
|
if (dmSetMgmtHandle(pArray, TDMT_SCH_MERGE_FETCH, mmPutMsgToFetchQueue, 1) == NULL) goto _OVER;
|
||||||
|
if (dmSetMgmtHandle(pArray, TDMT_SCH_TASK_NOTIFY, mmPutMsgToFetchQueue, 1) == NULL) goto _OVER;
|
||||||
if (dmSetMgmtHandle(pArray, TDMT_VND_CREATE_STB_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
if (dmSetMgmtHandle(pArray, TDMT_VND_CREATE_STB_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||||
if (dmSetMgmtHandle(pArray, TDMT_VND_ALTER_STB_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
if (dmSetMgmtHandle(pArray, TDMT_VND_ALTER_STB_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||||
if (dmSetMgmtHandle(pArray, TDMT_VND_DROP_STB_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
if (dmSetMgmtHandle(pArray, TDMT_VND_DROP_STB_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||||
if (dmSetMgmtHandle(pArray, TDMT_VND_DROP_TTL_TABLE_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
if (dmSetMgmtHandle(pArray, TDMT_VND_DROP_TTL_TABLE_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||||
|
if (dmSetMgmtHandle(pArray, TDMT_VND_TRIM_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||||
if (dmSetMgmtHandle(pArray, TDMT_VND_CREATE_SMA_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
if (dmSetMgmtHandle(pArray, TDMT_VND_CREATE_SMA_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||||
if (dmSetMgmtHandle(pArray, TDMT_VND_DROP_SMA_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
if (dmSetMgmtHandle(pArray, TDMT_VND_DROP_SMA_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||||
if (dmSetMgmtHandle(pArray, TDMT_VND_TMQ_SUBSCRIBE_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
if (dmSetMgmtHandle(pArray, TDMT_VND_TMQ_SUBSCRIBE_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||||
|
|
|
@ -89,6 +89,7 @@ SArray *qmGetMsgHandles() {
|
||||||
|
|
||||||
if (dmSetMgmtHandle(pArray, TDMT_SCH_CANCEL_TASK, qmPutNodeMsgToFetchQueue, 1) == NULL) goto _OVER;
|
if (dmSetMgmtHandle(pArray, TDMT_SCH_CANCEL_TASK, qmPutNodeMsgToFetchQueue, 1) == NULL) goto _OVER;
|
||||||
if (dmSetMgmtHandle(pArray, TDMT_SCH_DROP_TASK, qmPutNodeMsgToFetchQueue, 1) == NULL) goto _OVER;
|
if (dmSetMgmtHandle(pArray, TDMT_SCH_DROP_TASK, qmPutNodeMsgToFetchQueue, 1) == NULL) goto _OVER;
|
||||||
|
if (dmSetMgmtHandle(pArray, TDMT_SCH_TASK_NOTIFY, qmPutNodeMsgToFetchQueue, 1) == NULL) goto _OVER;
|
||||||
|
|
||||||
code = 0;
|
code = 0;
|
||||||
_OVER:
|
_OVER:
|
||||||
|
|
|
@ -755,6 +755,7 @@ SArray *vmGetMsgHandles() {
|
||||||
if (dmSetMgmtHandle(pArray, TDMT_VND_TABLES_META, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER;
|
if (dmSetMgmtHandle(pArray, TDMT_VND_TABLES_META, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER;
|
||||||
if (dmSetMgmtHandle(pArray, TDMT_SCH_CANCEL_TASK, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER;
|
if (dmSetMgmtHandle(pArray, TDMT_SCH_CANCEL_TASK, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER;
|
||||||
if (dmSetMgmtHandle(pArray, TDMT_SCH_DROP_TASK, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER;
|
if (dmSetMgmtHandle(pArray, TDMT_SCH_DROP_TASK, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER;
|
||||||
|
if (dmSetMgmtHandle(pArray, TDMT_SCH_TASK_NOTIFY, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER;
|
||||||
if (dmSetMgmtHandle(pArray, TDMT_VND_CREATE_STB, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
if (dmSetMgmtHandle(pArray, TDMT_VND_CREATE_STB, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||||
if (dmSetMgmtHandle(pArray, TDMT_VND_DROP_TTL_TABLE, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
if (dmSetMgmtHandle(pArray, TDMT_VND_DROP_TTL_TABLE, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||||
if (dmSetMgmtHandle(pArray, TDMT_VND_ALTER_STB, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
if (dmSetMgmtHandle(pArray, TDMT_VND_ALTER_STB, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||||
|
|
|
@ -15,6 +15,7 @@
|
||||||
|
|
||||||
#define _DEFAULT_SOURCE
|
#define _DEFAULT_SOURCE
|
||||||
#include "dmMgmt.h"
|
#include "dmMgmt.h"
|
||||||
|
#include "audit.h"
|
||||||
|
|
||||||
#define STR_CASE_CMP(s, d) (0 == strcasecmp((s), (d)))
|
#define STR_CASE_CMP(s, d) (0 == strcasecmp((s), (d)))
|
||||||
#define STR_STR_CMP(s, d) (strstr((s), (d)))
|
#define STR_STR_CMP(s, d) (strstr((s), (d)))
|
||||||
|
@ -34,6 +35,16 @@
|
||||||
} \
|
} \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
|
#define DM_INIT_AUDIT() \
|
||||||
|
do { \
|
||||||
|
auditCfg.port = tsMonitorPort; \
|
||||||
|
auditCfg.server = tsMonitorFqdn; \
|
||||||
|
auditCfg.comp = tsMonitorComp; \
|
||||||
|
if (auditInit(&auditCfg) != 0) { \
|
||||||
|
return -1; \
|
||||||
|
} \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
#define DM_ERR_RTN(c) \
|
#define DM_ERR_RTN(c) \
|
||||||
do { \
|
do { \
|
||||||
code = (c); \
|
code = (c); \
|
||||||
|
@ -96,6 +107,14 @@ _exit:
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int32_t dmInitAudit() {
|
||||||
|
SAuditCfg auditCfg = {0};
|
||||||
|
|
||||||
|
DM_INIT_AUDIT();
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static bool dmDataSpaceAvailable() {
|
static bool dmDataSpaceAvailable() {
|
||||||
SDnode *pDnode = dmInstance();
|
SDnode *pDnode = dmInstance();
|
||||||
if (pDnode->pTfs) {
|
if (pDnode->pTfs) {
|
||||||
|
@ -176,6 +195,7 @@ int32_t dmInit() {
|
||||||
if (dmCheckRepeatInit(dmInstance()) != 0) return -1;
|
if (dmCheckRepeatInit(dmInstance()) != 0) return -1;
|
||||||
if (dmInitSystem() != 0) return -1;
|
if (dmInitSystem() != 0) return -1;
|
||||||
if (dmInitMonitor() != 0) return -1;
|
if (dmInitMonitor() != 0) return -1;
|
||||||
|
if (dmInitAudit() != 0) return -1;
|
||||||
if (dmInitDnode(dmInstance()) != 0) return -1;
|
if (dmInitDnode(dmInstance()) != 0) return -1;
|
||||||
|
|
||||||
dInfo("dnode env is initialized");
|
dInfo("dnode env is initialized");
|
||||||
|
|
|
@ -272,7 +272,7 @@ static bool rpcRfp(int32_t code, tmsg_t msgType) {
|
||||||
code == TSDB_CODE_SYN_RESTORING || code == TSDB_CODE_VND_STOPPED || code == TSDB_CODE_APP_IS_STARTING ||
|
code == TSDB_CODE_SYN_RESTORING || code == TSDB_CODE_VND_STOPPED || code == TSDB_CODE_APP_IS_STARTING ||
|
||||||
code == TSDB_CODE_APP_IS_STOPPING) {
|
code == TSDB_CODE_APP_IS_STOPPING) {
|
||||||
if (msgType == TDMT_SCH_QUERY || msgType == TDMT_SCH_MERGE_QUERY || msgType == TDMT_SCH_FETCH ||
|
if (msgType == TDMT_SCH_QUERY || msgType == TDMT_SCH_MERGE_QUERY || msgType == TDMT_SCH_FETCH ||
|
||||||
msgType == TDMT_SCH_MERGE_FETCH) {
|
msgType == TDMT_SCH_MERGE_FETCH || msgType == TDMT_SCH_TASK_NOTIFY) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
|
|
|
@ -16,7 +16,7 @@ target_include_directories(
|
||||||
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
|
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
|
||||||
)
|
)
|
||||||
target_link_libraries(
|
target_link_libraries(
|
||||||
mnode scheduler sdb wal transport cjson sync monitor executor qworker stream parser
|
mnode scheduler sdb wal transport cjson sync monitor executor qworker stream parser audit
|
||||||
)
|
)
|
||||||
|
|
||||||
IF (TD_GRANT)
|
IF (TD_GRANT)
|
||||||
|
|
|
@ -29,6 +29,9 @@
|
||||||
#include "mndUser.h"
|
#include "mndUser.h"
|
||||||
#include "mndVgroup.h"
|
#include "mndVgroup.h"
|
||||||
#include "systable.h"
|
#include "systable.h"
|
||||||
|
#include "tjson.h"
|
||||||
|
#include "thttp.h"
|
||||||
|
#include "audit.h"
|
||||||
|
|
||||||
#define DB_VER_NUMBER 1
|
#define DB_VER_NUMBER 1
|
||||||
#define DB_RESERVE_SIZE 46
|
#define DB_RESERVE_SIZE 46
|
||||||
|
@ -733,6 +736,8 @@ static int32_t mndProcessCreateDbReq(SRpcMsg *pReq) {
|
||||||
code = mndCreateDb(pMnode, pReq, &createReq, pUser);
|
code = mndCreateDb(pMnode, pReq, &createReq, pUser);
|
||||||
if (code == 0) code = TSDB_CODE_ACTION_IN_PROGRESS;
|
if (code == 0) code = TSDB_CODE_ACTION_IN_PROGRESS;
|
||||||
|
|
||||||
|
auditRecord(pReq, pMnode->clusterId, "createDB", createReq.db, "", "");
|
||||||
|
|
||||||
_OVER:
|
_OVER:
|
||||||
if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) {
|
if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) {
|
||||||
mError("db:%s, failed to create since %s", createReq.db, terrstr());
|
mError("db:%s, failed to create since %s", createReq.db, terrstr());
|
||||||
|
@ -975,6 +980,8 @@ static int32_t mndProcessAlterDbReq(SRpcMsg *pReq) {
|
||||||
if (code == 0) code = TSDB_CODE_ACTION_IN_PROGRESS;
|
if (code == 0) code = TSDB_CODE_ACTION_IN_PROGRESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
auditRecord(pReq, pMnode->clusterId, "alterDB", alterReq.db, "", "");
|
||||||
|
|
||||||
_OVER:
|
_OVER:
|
||||||
if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) {
|
if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) {
|
||||||
if (terrno != 0) code = terrno;
|
if (terrno != 0) code = terrno;
|
||||||
|
@ -1264,6 +1271,8 @@ static int32_t mndProcessDropDbReq(SRpcMsg *pReq) {
|
||||||
code = TSDB_CODE_ACTION_IN_PROGRESS;
|
code = TSDB_CODE_ACTION_IN_PROGRESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
auditRecord(pReq, pMnode->clusterId, "dropDB", dropReq.db, "", "");
|
||||||
|
|
||||||
_OVER:
|
_OVER:
|
||||||
if (code != TSDB_CODE_SUCCESS && code != TSDB_CODE_ACTION_IN_PROGRESS) {
|
if (code != TSDB_CODE_SUCCESS && code != TSDB_CODE_ACTION_IN_PROGRESS) {
|
||||||
mError("db:%s, failed to drop since %s", dropReq.db, terrstr());
|
mError("db:%s, failed to drop since %s", dropReq.db, terrstr());
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue