ENodeType
This commit is contained in:
commit
e4ab656ed5
|
@ -11,7 +11,7 @@ ExternalProject_Add(aprutil-1
|
|||
BUILD_IN_SOURCE TRUE
|
||||
BUILD_ALWAYS 1
|
||||
#UPDATE_COMMAND ""
|
||||
CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1/ --with-apr=$ENV{HOME}/.cos-local.1
|
||||
CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1/ --with-apr=$ENV{HOME}/.cos-local.1
|
||||
#CONFIGURE_COMMAND ./configure --with-apr=/usr/local/apr
|
||||
BUILD_COMMAND make
|
||||
INSTALL_COMMAND make install
|
||||
|
|
|
@ -9,7 +9,7 @@ ExternalProject_Add(curl
|
|||
BUILD_IN_SOURCE TRUE
|
||||
BUILD_ALWAYS 1
|
||||
#UPDATE_COMMAND ""
|
||||
CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1 --without-ssl --enable-shared=no --disable-ldap --disable-ldaps --without-brotli
|
||||
CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1 --without-ssl --enable-shared=no --disable-ldap --disable-ldaps --without-brotli --without-zstd
|
||||
#CONFIGURE_COMMAND ./configure --without-ssl
|
||||
BUILD_COMMAND make
|
||||
INSTALL_COMMAND make install
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
# cos
|
||||
ExternalProject_Add(mxml
|
||||
GIT_REPOSITORY https://github.com/michaelrsweet/mxml.git
|
||||
GIT_TAG release-2.10
|
||||
GIT_TAG release-2.12
|
||||
SOURCE_DIR "${TD_CONTRIB_DIR}/mxml"
|
||||
#BINARY_DIR ""
|
||||
BUILD_IN_SOURCE TRUE
|
||||
|
|
|
@ -35,11 +35,12 @@ meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0
|
|||
|
||||
:::note
|
||||
|
||||
- All the data in `tag_set` will be converted to NCHAR type automatically .
|
||||
- Each data in `field_set` must be self-descriptive for its data type. For example 1.2f32 means a value 1.2 of float type. Without the "f" type suffix, it will be treated as type double.
|
||||
- Multiple kinds of precision can be used for the `timestamp` field. Time precision can be from nanosecond (ns) to hour (h).
|
||||
- The child table name is created automatically in a rule to guarantee its uniqueness. But you can configure `smlChildTableName` in taos.cfg to specify a tag value as the table names if the tag value is unique globally. For example, if a tag is called `tname` and you set `smlChildTableName=tname` in taos.cfg, when you insert `st,tname=cpu1,t1=4 c1=3 1626006833639000000`, the child table `cpu1` will be created automatically. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
|
||||
- All the data in `tag_set` will be converted to NCHAR type automatically
|
||||
- Each data in `field_set` must be self-descriptive for its data type. For example 1.2f32 means a value 1.2 of float type. Without the "f" type suffix, it will be treated as type double
|
||||
- Multiple kinds of precision can be used for the `timestamp` field. Time precision can be from nanosecond (ns) to hour (h)
|
||||
- The child table name is created automatically in a rule to guarantee its uniqueness. But you can configure `smlChildTableName` in taos.cfg to specify a tag value as the table names if the tag value is unique globally. For example, if a tag is called `tname` and you set `smlChildTableName=tname` in taos.cfg, when you insert `st,tname=cpu1,t1=4 c1=3 1626006833639000000`, the child table `cpu1` will be created automatically. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored
|
||||
- It is assumed that the order of field_set in a supertable is consistent, meaning that the first record contains all fields and subsequent records store fields in the same order. If the order is not consistent, set smlDataFormat in taos.cfg to false. Otherwise, data will be written out of order and a database error will occur.(smlDataFormat in taos.cfg default to false after version of 3.0.1.3, smlDataFormat is discarded since 3.0.3.0)
|
||||
|
||||
:::
|
||||
|
||||
For more details please refer to [InfluxDB Line Protocol](https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/) and [TDengine Schemaless](/reference/schemaless/#Schemaless-Line-Protocol)
|
||||
|
|
|
@ -34,6 +34,7 @@ meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3
|
|||
```
|
||||
|
||||
- The child table name is created automatically in a rule to guarantee its uniqueness. But you can configure `smlChildTableName` in taos.cfg to specify a tag value as the table names if the tag value is unique globally. For example, if a tag is called `tname` and you set `smlChildTableName=tname` in taos.cfg, when you insert `st,tname=cpu1,t1=4 c1=3 1626006833639000000`, the child table `cpu1` will be automatically created. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
|
||||
|
||||
Please refer to [OpenTSDB Telnet API](http://opentsdb.net/docs/build/html/api_telnet/put.html) for more details.
|
||||
|
||||
## Examples
|
||||
|
@ -68,11 +69,11 @@ Database changed.
|
|||
taos> show stables;
|
||||
name |
|
||||
=================================
|
||||
meters.current |
|
||||
meters.voltage |
|
||||
meters_current |
|
||||
meters_voltage |
|
||||
Query OK, 2 row(s) in set (0.002544s)
|
||||
|
||||
taos> select tbname, * from `meters.current`;
|
||||
taos> select tbname, * from `meters_current`;
|
||||
tbname | _ts | _value | groupid | location |
|
||||
==================================================================================================================================
|
||||
t_0e7bcfa21a02331c06764f275... | 2022-03-28 09:56:51.249 | 10.800000000 | 3 | California.LosAngeles |
|
||||
|
@ -87,5 +88,5 @@ Query OK, 4 row(s) in set (0.005399s)
|
|||
If you want query the data of `location=California.LosAngeles groupid=3`, here is the query SQL:
|
||||
|
||||
```sql
|
||||
SELECT * FROM `meters.current` WHERE location = "California.LosAngeles" AND groupid = 3;
|
||||
SELECT * FROM `meters_current` WHERE location = "California.LosAngeles" AND groupid = 3;
|
||||
```
|
||||
|
|
|
@ -49,6 +49,7 @@ Please refer to [OpenTSDB HTTP API](http://opentsdb.net/docs/build/html/api_http
|
|||
|
||||
- In JSON protocol, strings will be converted to NCHAR type and numeric values will be converted to double type.
|
||||
- The child table name is created automatically in a rule to guarantee its uniqueness. But you can configure `smlChildTableName` in taos.cfg to specify a tag value as the table names if the tag value is unique globally. For example, if a tag is called `tname` and you set `smlChildTableName=tname` in taos.cfg, when you insert `st,tname=cpu1,t1=4 c1=3 1626006833639000000`, the child table `cpu1` will be automatically created. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
|
||||
|
||||
:::
|
||||
|
||||
## Examples
|
||||
|
@ -83,11 +84,11 @@ Database changed.
|
|||
taos> show stables;
|
||||
name |
|
||||
=================================
|
||||
meters.current |
|
||||
meters.voltage |
|
||||
meters_current |
|
||||
meters_voltage |
|
||||
Query OK, 2 row(s) in set (0.001954s)
|
||||
|
||||
taos> select * from `meters.current`;
|
||||
taos> select * from `meters_current`;
|
||||
_ts | _value | groupid | location |
|
||||
===================================================================================================================
|
||||
2022-03-28 09:56:51.249 | 10.300000000 | 2.000000000 | California.SanFrancisco |
|
||||
|
@ -100,5 +101,5 @@ Query OK, 2 row(s) in set (0.004076s)
|
|||
If you want query the data of "tags": {"location": "California.LosAngeles", "groupid": 1}, here is the query SQL:
|
||||
|
||||
```sql
|
||||
SELECT * FROM `meters.current` WHERE location = "California.LosAngeles" AND groupid = 3;
|
||||
SELECT * FROM `meters_current` WHERE location = "California.LosAngeles" AND groupid = 3;
|
||||
```
|
||||
|
|
|
@ -113,7 +113,19 @@ Set<String> subscription() throws SQLException;
|
|||
|
||||
ConsumerRecords<V> poll(Duration timeout) throws SQLException;
|
||||
|
||||
Set<TopicPartition> assignment() throws SQLException;
|
||||
long position(TopicPartition partition) throws SQLException;
|
||||
Map<TopicPartition, Long> position(String topic) throws SQLException;
|
||||
Map<TopicPartition, Long> beginningOffsets(String topic) throws SQLException;
|
||||
Map<TopicPartition, Long> endOffsets(String topic) throws SQLException;
|
||||
Map<TopicPartition, OffsetAndMetadata> committed(Set<TopicPartition> partitions) throws SQLException;
|
||||
|
||||
void seek(TopicPartition partition, long offset) throws SQLException;
|
||||
void seekToBeginning(Collection<TopicPartition> partitions) throws SQLException;
|
||||
void seekToEnd(Collection<TopicPartition> partitions) throws SQLException;
|
||||
|
||||
void commitSync() throws SQLException;
|
||||
void commitSync(Map<TopicPartition, OffsetAndMetadata> offsets) throws SQLException;
|
||||
|
||||
void close() throws SQLException;
|
||||
```
|
||||
|
|
|
@ -887,4 +887,4 @@ The `pycumsum` function finds the cumulative sum for all data in the input colum
|
|||
|
||||
</details>
|
||||
## Manage and Use UDF
|
||||
You need to add UDF to TDengine before using it in SQL queries. For more information about how to manage UDF and how to invoke UDF, please see [Manage and Use UDF](../12-taos-sql/26-udf.md).
|
||||
You need to add UDF to TDengine before using it in SQL queries. For more information about how to manage UDF and how to invoke UDF, please see [Manage and Use UDF](../taos-sql/udf/).
|
||||
|
|
|
@ -43,6 +43,8 @@ In TDengine, the data types below can be used when specifying a column or tag.
|
|||
| 15 | JSON | | JSON type can only be used on tags. A tag of json type is excluded with any other tags of any other type. |
|
||||
| 16 | VARCHAR | User-defined | Alias of BINARY |
|
||||
| 17 | GEOMETRY | User-defined | Geometry |
|
||||
| 18 | VARBINARY | User-defined | Binary data with variable length
|
||||
|
||||
:::note
|
||||
|
||||
- Each row of the table cannot be longer than 48KB (64KB since version 3.0.5.0) (note that each BINARY/NCHAR/GEOMETRY column takes up an additional 2 bytes of storage space).
|
||||
|
@ -57,7 +59,7 @@ In TDengine, the data types below can be used when specifying a column or tag.
|
|||
| 3 | POLYGON((1.0 1.0, 2.0 2.0, 1.0 1.0)) | 13+3*16 | 13+4094*16 | +16 |
|
||||
|
||||
- Numeric values in SQL statements will be determined as integer or float type according to whether there is decimal point or whether scientific notation is used, so attention must be paid to avoid overflow. For example, 9999999999999999999 will be considered as overflow because it exceeds the upper limit of long integer, but 9999999999999999999.0 will be considered as a legal float number.
|
||||
|
||||
- VARBINARY is a data type that stores binary data, with a maximum length of 65517 bytes and a maximum length of 16382 bytes for tag columns. Binary data can be written through SQL or schemaless (which needs to be converted to a string starting with \x), or written through stmt (which can directly use binary). Display starting with hexadecimal starting with \x.
|
||||
:::
|
||||
|
||||
## Constants
|
||||
|
|
|
@ -7,9 +7,9 @@ description: This document describes how to query data in TDengine.
|
|||
## Syntax
|
||||
|
||||
```sql
|
||||
SELECT {DATABASE() | CLIENT_VERSION() | SERVER_VERSION() | SERVER_STATUS() | NOW() | TODAY() | TIMEZONE()}
|
||||
SELECT {DATABASE() | CLIENT_VERSION() | SERVER_VERSION() | SERVER_STATUS() | NOW() | TODAY() | TIMEZONE() | CURRENT_USER() | USER() }
|
||||
|
||||
SELECT [DISTINCT] select_list
|
||||
SELECT [hints] [DISTINCT] [TAGS] select_list
|
||||
from_clause
|
||||
[WHERE condition]
|
||||
[partition_by_clause]
|
||||
|
@ -21,6 +21,11 @@ SELECT [DISTINCT] select_list
|
|||
[LIMIT limit_val [OFFSET offset_val]]
|
||||
[>> export_file]
|
||||
|
||||
hints: /*+ [hint([hint_param_list])] [hint([hint_param_list])] */
|
||||
|
||||
hint:
|
||||
BATCH_SCAN | NO_BATCH_SCAN
|
||||
|
||||
select_list:
|
||||
select_expr [, select_expr] ...
|
||||
|
||||
|
@ -70,6 +75,29 @@ order_expr:
|
|||
{expr | position | c_alias} [DESC | ASC] [NULLS FIRST | NULLS LAST]
|
||||
```
|
||||
|
||||
## Hints
|
||||
|
||||
Hints are a means of user control over query optimization for individual statements. Hints will be ignore automatically if they are not applicable to the current query statement. The specific instructions are as follows:
|
||||
|
||||
- Hints syntax starts with `/*+` and ends with `*/`, spaces are allowed before or after.
|
||||
- Hints syntax can only follow the SELECT keyword.
|
||||
- Each hints can contain multiple hint, separated by spaces. When multiple hints conflict or are identical, whichever comes first takes effect.
|
||||
- When an error occurs with a hint in hints, the effective hint before the error is still valid, and the current and subsequent hints are ignored.
|
||||
- hint_param_list are arguments to each hint, which varies according to each hint.
|
||||
|
||||
The list of currently supported Hints is as follows:
|
||||
|
||||
| **Hint** | **Params** | **Comment** | **Scopt** |
|
||||
| :-----------: | -------------- | -------------------------- | -------------------------- |
|
||||
| BATCH_SCAN | None | Batch table scan | JOIN statment for stable |
|
||||
| NO_BATCH_SCAN | None | Sequential table scan | JOIN statment for stable |
|
||||
|
||||
For example:
|
||||
|
||||
```sql
|
||||
SELECT /*+ BATCH_SCAN() */ a.ts FROM stable1 a, stable2 b where a.tag0 = b.tag0 and a.ts = b.ts;
|
||||
```
|
||||
|
||||
## Lists
|
||||
|
||||
A query can be performed on some or all columns. Data and tag columns can all be included in the SELECT list.
|
||||
|
@ -167,7 +195,7 @@ The following SQL statement returns the number of subtables within the meters su
|
|||
SELECT COUNT(*) FROM (SELECT DISTINCT TBNAME FROM meters);
|
||||
```
|
||||
|
||||
In the preceding two statements, only tags can be used as filtering conditions in the WHERE clause. For example:
|
||||
In the preceding two statements, only tags can be used as filtering conditions in the WHERE clause.
|
||||
|
||||
**\_QSTART and \_QEND**
|
||||
|
||||
|
@ -197,6 +225,14 @@ The \_IROWTS pseudocolumn can only be used with INTERP function. This pseudocolu
|
|||
select _irowts, interp(current) from meters range('2020-01-01 10:00:00', '2020-01-01 10:30:00') every(1s) fill(linear);
|
||||
```
|
||||
|
||||
### TAGS Query
|
||||
|
||||
The TAGS keyword returns only tag columns from all child tables when only tag columns are specified. One row containing tag columns is returned for each child table.
|
||||
|
||||
```sql
|
||||
SELECT TAGS tag_name [, tag_name ...] FROM stb_name
|
||||
```
|
||||
|
||||
## Query Objects
|
||||
|
||||
`FROM` can be followed by a number of tables or super tables, or can be followed by a sub-query.
|
||||
|
@ -209,8 +245,7 @@ You can perform INNER JOIN statements based on the primary key. The following co
|
|||
3. For supertables, the ON condition must be equivalent to the primary key. In addition, the tag columns of the tables on which the INNER JOIN is performed must have a one-to-one relationship. You cannot specify an OR condition.
|
||||
4. The tables that are included in a JOIN clause must be of the same type (supertable, standard table, or subtable).
|
||||
5. You can include subqueries before and after the JOIN keyword.
|
||||
6. You cannot include more than ten tables in a JOIN clause.
|
||||
7. You cannot include a FILL clause and a JOIN clause in the same statement.
|
||||
6. You cannot include a FILL clause and a JOIN clause in the same statement.
|
||||
|
||||
## GROUP BY
|
||||
|
||||
|
@ -301,6 +336,12 @@ SELECT TODAY();
|
|||
SELECT TIMEZONE();
|
||||
```
|
||||
|
||||
### Obtain Current User
|
||||
|
||||
```sql
|
||||
SELECT CURRENT_USER();
|
||||
```
|
||||
|
||||
## Regular Expression
|
||||
|
||||
### Syntax
|
||||
|
@ -355,7 +396,7 @@ SELECT AVG(CASE WHEN voltage < 200 or voltage > 250 THEN 220 ELSE voltage END) F
|
|||
|
||||
## JOIN
|
||||
|
||||
TDengine supports the `INTER JOIN` based on the timestamp primary key, that is, the `JOIN` condition must contain the timestamp primary key. As long as the requirement of timestamp-based primary key is met, `INTER JOIN` can be made between normal tables, sub-tables, super tables and sub-queries at will, and there is no limit on the number of tables.
|
||||
TDengine supports the `INTER JOIN` based on the timestamp primary key, that is, the `JOIN` condition must contain the timestamp primary key. As long as the requirement of timestamp-based primary key is met, `INTER JOIN` can be made between normal tables, sub-tables, super tables and sub-queries at will, and there is no limit on the number of tables, primary key and other conditions must be combined with `AND` operator.
|
||||
|
||||
For standard tables:
|
||||
|
||||
|
|
|
@ -49,3 +49,5 @@ You can also add filter conditions to limit the results.
|
|||
6. You can' create index on a normal table or a child table.
|
||||
|
||||
7. If the unique values of a tag column are too few, it's better not to create index on such tag columns, the benefit would be very small.
|
||||
|
||||
8. The newly created super table will randomly generate an index name for the first column of tags, which is composed of the name tag0 column with 23 random bytes, and can be rebuilt or dropped.
|
||||
|
|
|
@ -402,7 +402,7 @@ CAST(expr AS type_name)
|
|||
|
||||
**Return value type**: The type specified by parameter `type_name`
|
||||
|
||||
**Applicable data types**: All data types except JSON
|
||||
**Applicable data types**: All data types except JSON and VARBINARY. If type_name is VARBINARY, expr can only be VARCHAR.
|
||||
|
||||
**Nested query**: It can be used in both the outer query and inner query in a nested query.
|
||||
|
||||
|
@ -1275,6 +1275,14 @@ SELECT SERVER_STATUS();
|
|||
|
||||
**Description**: The server status.
|
||||
|
||||
### CURRENT_USER
|
||||
|
||||
```sql
|
||||
SELECT CURRENT_USER();
|
||||
```
|
||||
|
||||
**Description**: get current user.
|
||||
|
||||
|
||||
## Geometry Functions
|
||||
|
||||
|
|
|
@ -168,3 +168,11 @@ All [scalar functions](../function/#scalar-functions) are available in stream pr
|
|||
- [unique](../function/#unique)
|
||||
- [mode](../function/#mode)
|
||||
|
||||
## Pause\Resume stream
|
||||
1.pause stream
|
||||
PAUSE STREAM [IF EXISTS] stream_name;
|
||||
If "IF EXISTS" is not specified and the stream does not exist, an error will be reported; If "IF EXISTS" is specified and the stream does not exist, success is returned; If the stream exists, paused all stream tasks.
|
||||
|
||||
2.resume stream
|
||||
RESUME STREAM [IF EXISTS] [IGNORE UNTREATED] stream_name;
|
||||
If "IF EXISTS" is not specified and the stream does not exist, an error will be reported. If "IF EXISTS" is specified and the stream does not exist, success is returned; If the stream exists, all of the stream tasks will be resumed. If "IGNORE UntREATED" is specified, data written during the pause period of stream is ignored when resuming stream.
|
||||
|
|
|
@ -178,7 +178,7 @@ The following list shows all reserved keywords:
|
|||
|
||||
- MATCH
|
||||
- MAX_DELAY
|
||||
- MAX_SPEED
|
||||
- BWLIMIT
|
||||
- MAXROWS
|
||||
- MERGE
|
||||
- META
|
||||
|
|
|
@ -22,6 +22,14 @@ SHOW CLUSTER;
|
|||
|
||||
Shows information about the current cluster.
|
||||
|
||||
## SHOW CLUSTER ALIVE
|
||||
|
||||
```sql
|
||||
SHOW CLUSTER ALIVE;
|
||||
```
|
||||
|
||||
It is used to check whether the cluster is available or not. Return value: 0 means unavailable, 1 means available, 2 means partially available (some dnodes are offline, the other dnodes are available)
|
||||
|
||||
## SHOW CONNECTIONS
|
||||
|
||||
```sql
|
||||
|
|
|
@ -19,6 +19,9 @@ index_option:
|
|||
functions:
|
||||
function [, function] ...
|
||||
```
|
||||
### tag Indexing
|
||||
|
||||
[tag index](../tag-index)
|
||||
|
||||
### SMA Indexing
|
||||
|
||||
|
|
|
@ -17,7 +17,7 @@ You can use the SHOW CONNECTIONS statement to find the conn_id.
|
|||
## Terminate a Query
|
||||
|
||||
```sql
|
||||
KILL QUERY kill_id;
|
||||
KILL QUERY 'kill_id';
|
||||
```
|
||||
|
||||
You can use the SHOW QUERIES statement to find the kill_id.
|
||||
|
|
|
@ -135,7 +135,7 @@ The following describes the basic API, synchronous API, asynchronous API, subscr
|
|||
|
||||
The base API is used to do things like create database connections and provide a runtime environment for the execution of other APIs.
|
||||
|
||||
- `void taos_init()`
|
||||
- `int taos_init()`
|
||||
|
||||
Initializes the runtime environment. If the API is not actively called, the driver will automatically call the API when `taos_connect()` is called, so the program generally does not need to call it manually.
|
||||
|
||||
|
@ -168,6 +168,12 @@ The base API is used to do things like create database connections and provide a
|
|||
|
||||
:::
|
||||
|
||||
- `TAOS *taos_connect_auth(const char *host, const char *user, const char *auth, const char *db, uint16_t port)`
|
||||
|
||||
The function is the same as taos_connect. Except that the pass parameter is replaced by auth, other parameters are the same as taos_connect.
|
||||
|
||||
- auth: the 32-bit lowercase md5 of the raw password
|
||||
|
||||
- `char *taos_get_server_info(TAOS *taos)`
|
||||
|
||||
Get server-side version information.
|
||||
|
@ -184,6 +190,14 @@ The base API is used to do things like create database connections and provide a
|
|||
- If len is less than the space required to store the db (including the last '\0'), an error is returned. The truncated data assigned in the database ends with '\0'.
|
||||
- If len is greater than or equal to the space required to store the db (including the last '\0'), return normal 0, and assign the db name ending with '\0' in the database.
|
||||
|
||||
- `int taos_set_notify_cb(TAOS *taos, __taos_notify_fn_t fp, void *param, int type)`
|
||||
|
||||
Set the event callback function.
|
||||
|
||||
- fp: event callback function pointer. Declaration:typedef void (*__taos_notify_fn_t)(void *param, void *ext, int type);Param is a user-defined parameter, ext is an extended parameter (depending on the event type, and returns the user password version for TAOS_NOTIFY_PASSVER), and type is the event type
|
||||
- param: user-defined parameter
|
||||
- type: event type. Value range: 1) TAOS_NOTIFY_PASSVER: User password changed
|
||||
|
||||
- `void taos_close(TAOS *taos)`
|
||||
|
||||
Closes the connection, where `taos` is the handle returned by `taos_connect()`.
|
||||
|
@ -307,21 +321,20 @@ The specific functions related to the interface are as follows (see also the [pr
|
|||
|
||||
Parse a SQL command, and bind the parsed result and parameter information to `stmt`. If the parameter length is greater than 0, use this parameter as the length of the SQL command. If it is equal to 0, the length of the SQL command will be determined automatically.
|
||||
|
||||
- `int taos_stmt_bind_param(TAOS_STMT *stmt, TAOS_BIND *bind)`
|
||||
- `int taos_stmt_bind_param(TAOS_STMT *stmt, TAOS_MULTI_BIND *bind)`
|
||||
|
||||
Not as efficient as `taos_stmt_bind_param_batch()`, but can support non-INSERT type SQL statements.
|
||||
To bind parameters, bind points to an array (representing the row of data to be bound), making sure that the number and order of the elements in this array are the same as the parameters in the SQL statement. taos_bind is used similarly to MYSQL_BIND in MySQL, as defined below.
|
||||
|
||||
```c
|
||||
typedef struct TAOS_BIND {
|
||||
typedef struct TAOS_MULTI_BIND {
|
||||
int buffer_type;
|
||||
void * buffer;
|
||||
uintptr_t buffer_length; // not in use
|
||||
uintptr_t * length;
|
||||
int * is_null;
|
||||
int is_unsigned; // not in use
|
||||
int * error; // not in use
|
||||
} TAOS_BIND;
|
||||
void *buffer;
|
||||
uintptr_t buffer_length;
|
||||
uint32_t *length;
|
||||
char *is_null;
|
||||
int num;
|
||||
} TAOS_MULTI_BIND;
|
||||
```
|
||||
|
||||
- `int taos_stmt_set_tbname(TAOS_STMT* stmt, const char* name)`
|
||||
|
@ -329,7 +342,7 @@ The specific functions related to the interface are as follows (see also the [pr
|
|||
(Available in 2.1.1.0 and later versions, only supported for replacing parameter values in INSERT statements)
|
||||
When the table name in the SQL command uses `? ` placeholder, you can use this function to bind a specific table name.
|
||||
|
||||
- `int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags)`
|
||||
- `int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_MULTI_BIND* tags)`
|
||||
|
||||
(Available in 2.1.2.0 and later versions, only supported for replacing parameter values in INSERT statements)
|
||||
When the table name and TAGS in the SQL command both use `? `, you can use this function to bind the specific table name and the specific TAGS value. The most typical usage scenario is an INSERT statement that uses the automatic table building function (the current version does not support specifying specific TAGS columns.) The number of columns in the TAGS parameter needs to be the same as the number of TAGS requested in the SQL command.
|
||||
|
@ -358,6 +371,14 @@ The specific functions related to the interface are as follows (see also the [pr
|
|||
|
||||
Execute the prepared statement. Currently, a statement can only be executed once.
|
||||
|
||||
- `int taos_stmt_affected_rows(TAOS_STMT *stmt)`
|
||||
|
||||
Gets the number of rows affected by executing bind statements multiple times.
|
||||
|
||||
- `int taos_stmt_affected_rows_once(TAOS_STMT *stmt)`
|
||||
|
||||
Gets the number of rows affected by executing a bind statement once.
|
||||
|
||||
- `TAOS_RES* taos_stmt_use_result(TAOS_STMT *stmt)`
|
||||
|
||||
Gets the result set of a statement. Use the result set in the same way as in the non-parametric call. When finished, `taos_free_result()` should be called on this result set to free resources.
|
||||
|
@ -454,6 +475,7 @@ In addition to writing data using the SQL method or the parameter binding API, w
|
|||
- zero success,none zero failed, wrong message can be obtained through `char *tmq_err2str(int32_t code)`
|
||||
|
||||
- `int64_t tmq_committed(tmq_t *tmq, const char *pTopicName, int32_t vgId)`
|
||||
|
||||
**Function description**
|
||||
- get the committed offset
|
||||
|
||||
|
@ -467,9 +489,9 @@ In addition to writing data using the SQL method or the parameter binding API, w
|
|||
|
||||
**Function description**
|
||||
|
||||
The commit interface is divided into two types, each with synchronous and asynchronous interfaces:
|
||||
- The first type: based on message submission, submit the progress in the message. If the message passes NULL, submit the current progress of all vgroups consumed by the current consumer: tmq_commit_sync/tmq_commit_async
|
||||
- The second type: submit based on the offset of a Vgroup in a topic: tmq_commit_offset_sync/tmq_commit_offset_async
|
||||
- The commit interface is divided into two types, each with synchronous and asynchronous interfaces:
|
||||
- The first type: based on message submission, submit the progress in the message. If the message passes NULL, submit the current progress of all vgroups consumed by the current consumer: tmq_commit_sync/tmq_commit_async
|
||||
- The second type: submit based on the offset of a Vgroup in a topic: tmq_commit_offset_sync/tmq_commit_offset_async
|
||||
|
||||
**Parameter description**
|
||||
- msg:Message consumed, If the message passes NULL, submit the current progress of all vgroups consumed by the current consumer
|
||||
|
@ -513,4 +535,4 @@ In addition to writing data using the SQL method or the parameter binding API, w
|
|||
- topics: a list of topics subscribed by consumers,need to be freed by tmq_list_destroy
|
||||
|
||||
**Return value**
|
||||
- zero success,none zero failed, wrong message can be obtained through `char *tmq_err2str(int32_t code)`
|
||||
- zero success,none zero failed, wrong message can be obtained through `char *tmq_err2str(int32_t code)`
|
||||
|
|
|
@ -36,6 +36,7 @@ REST connection supports all platforms that can run Java.
|
|||
|
||||
| taos-jdbcdriver version | major changes | TDengine version |
|
||||
| :---------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------: | :--------------: |
|
||||
| 3.2.5 | Subscription add committed() and assignment() method | 3.1.0.3 or later |
|
||||
| 3.2.4 | Subscription add the enable.auto.commit parameter and the unsubscribe() method in the WebSocket connection | - |
|
||||
| 3.2.3 | Fixed resultSet data parsing failure in some cases | - |
|
||||
| 3.2.2 | Subscription add seek function | 3.0.5.0 or later |
|
||||
|
@ -1019,14 +1020,19 @@ while(true) {
|
|||
#### Assignment subscription Offset
|
||||
|
||||
```java
|
||||
// get topicPartition
|
||||
Set<TopicPartition> assignment() throws SQLException;
|
||||
// get offset
|
||||
long position(TopicPartition partition) throws SQLException;
|
||||
Map<TopicPartition, Long> position(String topic) throws SQLException;
|
||||
Map<TopicPartition, Long> beginningOffsets(String topic) throws SQLException;
|
||||
Map<TopicPartition, Long> endOffsets(String topic) throws SQLException;
|
||||
Map<TopicPartition, OffsetAndMetadata> committed(Set<TopicPartition> partitions) throws SQLException;
|
||||
|
||||
// Overrides the fetch offsets that the consumer will use on the next poll(timeout).
|
||||
void seek(TopicPartition partition, long offset) throws SQLException;
|
||||
void seekToBeginning(Collection<TopicPartition> partitions) throws SQLException;
|
||||
void seekToEnd(Collection<TopicPartition> partitions) throws SQLException;
|
||||
```
|
||||
|
||||
Example usage is as follows.
|
||||
|
@ -1052,6 +1058,18 @@ try (TaosConsumer<ResultBean> consumer = new TaosConsumer<>(properties)) {
|
|||
}
|
||||
```
|
||||
|
||||
#### Commit offset
|
||||
|
||||
If `enable.auto.commit` is false, offset can be submitted manually.
|
||||
|
||||
```java
|
||||
void commitSync() throws SQLException;
|
||||
void commitSync(Map<TopicPartition, OffsetAndMetadata> offsets) throws SQLException;
|
||||
// async commit only support jni connection
|
||||
void commitAsync(OffsetCommitCallback<V> callback) throws SQLException;
|
||||
void commitAsync(Map<TopicPartition, OffsetAndMetadata> offsets, OffsetCommitCallback<V> callback) throws SQLException;
|
||||
```
|
||||
|
||||
#### Close subscriptions
|
||||
|
||||
```java
|
||||
|
|
|
@ -30,6 +30,10 @@ The source code of `TDengine.Connector` is hosted on [GitHub](https://github.com
|
|||
|
||||
The supported platforms are the same as those supported by the TDengine client driver.
|
||||
|
||||
:::note
|
||||
Please note TDengine does not support 32bit Windows any more.
|
||||
:::
|
||||
|
||||
## Version support
|
||||
|
||||
Please refer to [version support list](/reference/connector#version-support)
|
||||
|
|
|
@ -102,6 +102,8 @@ Usage: taosdump [OPTION...] dbname [tbname ...]
|
|||
-L, --loose-mode Use loose mode if the table name and column name
|
||||
use letter and number only. Default is NOT.
|
||||
-n, --no-escape No escape char '`'. Default is using it.
|
||||
-Q, --dot-replace Repalce dot character with underline character in
|
||||
the table name.
|
||||
-T, --thread-num=THREAD_NUM Number of thread for dump in file. Default is
|
||||
8.
|
||||
-C, --cloud=CLOUD_DSN specify a DSN to access TDengine cloud service
|
||||
|
|
|
@ -13,7 +13,7 @@ After TDengine starts, it automatically writes many metrics in specific interval
|
|||
To deploy TDinsight, we need
|
||||
- a single-node TDengine server or a multi-node TDengine cluster and a [Grafana] server are required. This dashboard requires TDengine 3.0.1.0 and above, with the monitoring feature enabled. For detailed configuration, please refer to [TDengine monitoring configuration](../config/#monitoring-parameters).
|
||||
- taosAdapter has been installed and running, please refer to [taosAdapter](../taosadapter).
|
||||
- taosKeeper has been installed and running, please refer to [taosKeeper](../taosKeeper).
|
||||
- taosKeeper has been installed and running, please note the monitor-related items in taos.cfg file need be configured. Refer to [taosKeeper](../taosKeeper) for details.
|
||||
|
||||
Please record
|
||||
- The endpoint of taosAdapter REST service, for example `http://tdengine.local:6041`
|
||||
|
@ -80,7 +80,7 @@ chmod +x TDinsight.sh
|
|||
./TDinsight.sh
|
||||
```
|
||||
|
||||
This script will automatically download the latest [Grafana TDengine data source plugin](https://github.com/taosdata/grafanaplugin/releases/latest) and [TDinsight dashboard](https://github.com/taosdata/grafanaplugin/blob/master/dashboards/TDinsightV3.json) with configurable parameters for command-line options to the [Grafana Provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) configuration file to automate deployment and updates, etc. With the alert setting options provided by this script, you can also get built-in support for AliCloud SMS alert notifications.
|
||||
This script will automatically download the latest [Grafana TDengine data source plugin](https://github.com/taosdata/grafanaplugin/releases/latest) and [TDinsight dashboard](https://github.com/taosdata/grafanaplugin/blob/master/dashboards/TDinsightV3.json) with configurable parameters for command-line options to the [Grafana Provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) configuration file to automate deployment and updates, etc.
|
||||
|
||||
Assume you use TDengine and Grafana's default services on the same host. Run `. /TDinsight.sh` and open the Grafana browser window to see the TDinsight dashboard.
|
||||
|
||||
|
@ -112,9 +112,6 @@ Install and configure TDinsight dashboard in Grafana on Ubuntu 18.04/20.04 syste
|
|||
-i, --tdinsight-uid <string> Replace with a non-space ASCII code as the dashboard id. [default: tdinsight]
|
||||
-t, --tdinsight-title <string> Dashboard title. [default: TDinsight]
|
||||
-e, --tdinsight-editable If the provisioning dashboard could be editable. [default: false]
|
||||
|
||||
-E, --external-notifier <string> Apply external notifier uid to TDinsight dashboard.
|
||||
|
||||
```
|
||||
|
||||
Most command-line options can take effect the same as environment variables.
|
||||
|
@ -132,7 +129,10 @@ Most command-line options can take effect the same as environment variables.
|
|||
| -i | --tdinsight-uid | TDINSIGHT_DASHBOARD_UID | TDinsight `uid` of the dashboard. [default: tdinsight] |
|
||||
| -t | --tdinsight-title | TDINSIGHT_DASHBOARD_TITLE | TDinsight dashboard title. [Default: TDinsight] | -e | -tdinsight-title
|
||||
| -e | --tdinsight-editable | TDINSIGHT_DASHBOARD_EDITABLE | If the dashboard is configured to be editable. [Default: false] | -e | --external
|
||||
| -E | --external-notifier | EXTERNAL_NOTIFIER | Apply the external notifier uid to the TDinsight dashboard. | -s
|
||||
|
||||
:::note
|
||||
The `-E` option is deprecated. We use Grafana unified alerting function instead.
|
||||
:::
|
||||
|
||||
Suppose you start a TDengine database on host `tdengine` with HTTP API port `6041`, user `root1`, and password `pass5ord`. Execute the script.
|
||||
|
||||
|
@ -140,18 +140,6 @@ Suppose you start a TDengine database on host `tdengine` with HTTP API port `604
|
|||
sudo . /TDinsight.sh -a http://tdengine:6041 -u root1 -p pass5ord
|
||||
```
|
||||
|
||||
We provide a "-E" option to configure TDinsight to use the existing Notification Channel from the command line. Assuming your Grafana user and password is `admin:admin`, use the following command to get the `uid` of an existing notification channel.
|
||||
|
||||
```bash
|
||||
curl --no-progress-meter -u admin:admin http://localhost:3000/api/alert-notifications | jq
|
||||
```
|
||||
|
||||
Use the `uid` value obtained above as `-E` input.
|
||||
|
||||
```bash
|
||||
./TDinsight.sh -a http://tdengine:6041 -u root1 -p pass5ord -E existing-notifier
|
||||
```
|
||||
|
||||
If you want to monitor multiple TDengine clusters, you need to set up numerous TDinsight dashboards. Setting up non-default TDinsight requires some changes: the `-n` `-i` `-t` options need to be changed to non-default names, and `-N` and `-L` should also be changed if using the built-in SMS alerting feature.
|
||||
|
||||
```bash
|
||||
|
|
|
@ -32,8 +32,10 @@ All data in tag_set is automatically converted to the NCHAR data type and does n
|
|||
|
||||
In the schemaless writing data line protocol, each data item in the field_set needs to be described with its data type. Let's explain in detail:
|
||||
|
||||
- If there are English double quotes on both sides, it indicates the BINARY(32) type. For example, `"abc"`.
|
||||
- If there are double quotes on both sides and an L prefix, it means NCHAR(32) type. For example, `L"error message"`.
|
||||
- If there are English double quotes on both sides, it indicates the VARCHAR type. For example, `"abc"`.
|
||||
- If there are double quotes on both sides and a L/l prefix, it means NCHAR type. For example, `L"error message"`.
|
||||
- If there are double quotes on both sides and a G/g prefix, it means GEOMETRY type. For example `G"Point(4.343 89.342)"`.
|
||||
- If there are double quotes on both sides and a B/b prefix, it means VARBINARY type. Hexadecimal start with \x or string can be used in double quotes. For example `B"\x98f46e"` `B"hello"`.
|
||||
- Spaces, equals sign (=), comma (,), double quote ("), and backslash (\\) need to be escaped with a backslash (\\) in front. (All refer to the ASCII character). The rules are as follows:
|
||||
|
||||
| **Serial number** | **Element** | **Escape characters** |
|
||||
|
@ -110,7 +112,7 @@ You can configure smlChildTableName in taos.cfg to specify table names, for exam
|
|||
Note: TDengine 3.0.3.0 and later automatically detect whether order is consistent. This parameter is no longer used.
|
||||
9. Due to the fact that SQL table names do not support period (.), schemaless has also processed period (.). If there is a period (.) in the table name automatically created by schemaless, it will be automatically replaced with an underscore (\_). If you manually specify a sub table name, if there is a dot (.) in the sub table name, it will also be converted to an underscore (\_)
|
||||
10. Taos.cfg adds the configuration of smlTsDefaultName (with a string value), which only works on the client side. After configuration, the time column name of the schemaless automatic table creation can be set through this configuration. If not configured, defaults to _ts.
|
||||
|
||||
11. Super table name or child table name are case sensitive.
|
||||
:::tip
|
||||
All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed 48 KB(64 KB since version 3.0.5.0) and the total length of a tag value cannot exceed 16 KB. See [TDengine SQL Boundary Limits](/taos-sql/limit) for specific constraints in this area.
|
||||
:::
|
||||
|
|
|
@ -74,7 +74,7 @@ grafana-cli plugins install tdengine-datasource
|
|||
sudo -u grafana grafana-cli plugins install tdengine-datasource
|
||||
```
|
||||
|
||||
You can also download zip files from [GitHub](https://github.com/taosdata/grafanaplugin/releases/tag/latest) or [Grafana](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation) and install manually. The commands are as follows:
|
||||
You can also download zip files from [GitHub](https://github.com/taosdata/grafanaplugin/releases/latest) or [Grafana](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation) and install manually. The commands are as follows:
|
||||
|
||||
```bash
|
||||
GF_VERSION=3.3.1
|
||||
|
@ -218,11 +218,11 @@ The example to query the average system memory usage for the specified interval
|
|||
|
||||
### Importing the Dashboard
|
||||
|
||||
You can install TDinsight dashboard in data source configuration page (like `http://localhost:3000/datasources/edit/1/dashboards`) as a monitoring visualization tool for TDengine cluster. Ensure that you use TDinsight for 3.x.
|
||||
You can install TDinsight dashboard in data source configuration page (like `http://localhost:3000/datasources/edit/1/dashboards`) as a monitoring visualization tool for TDengine cluster. Ensure that you use TDinsight for 3.x. Please note TDinsight for 3.x needs to configure and run taoskeeper correctly. Check the [TDinsight User Manual](/reference/tdinsight/) for the details.
|
||||
|
||||

|
||||
|
||||
A dashboard for TDengine 2.x has been published on Grafana: [Dashboard 15167 - TDinsight](https://grafana.com/grafana/dashboards/15167)). Check the [TDinsight User Manual](/reference/tdinsight/) for the details.
|
||||
A dashboard for TDengine 2.x has been published on Grafana: [Dashboard 15167 - TDinsight](https://grafana.com/grafana/dashboards/15167)).
|
||||
|
||||
For more dashboards using TDengine data source, [search here in Grafana](https://grafana.com/grafana/dashboards/?dataSource=tdengine-datasource). Here is a sub list:
|
||||
|
||||
|
|
|
@ -10,6 +10,14 @@ For TDengine 2.x installation packages by version, please visit [here](https://t
|
|||
|
||||
import Release from "/components/ReleaseV3";
|
||||
|
||||
## 3.1.0.3
|
||||
|
||||
<Release type="tdengine" version="3.1.0.3" />
|
||||
|
||||
## 3.1.0.2
|
||||
|
||||
<Release type="tdengine" version="3.1.0.2" />
|
||||
|
||||
## 3.1.0.0
|
||||
|
||||
:::note IMPORTANT
|
||||
|
|
|
@ -78,6 +78,7 @@ int printRow(char *str, TAOS_ROW row, TAOS_FIELD *fields, int numFields) {
|
|||
} break;
|
||||
|
||||
case TSDB_DATA_TYPE_BINARY:
|
||||
case TSDB_DATA_TYPE_VARBINARY:
|
||||
case TSDB_DATA_TYPE_NCHAR:
|
||||
case TSDB_DATA_TYPE_GEOMETRY: {
|
||||
int32_t charLen = varDataLen((char *)row[i] - VARSTR_HEADER_SIZE);
|
||||
|
|
|
@ -51,7 +51,7 @@ void insertData(TAOS *taos) {
|
|||
int code = taos_stmt_prepare(stmt, sql, 0);
|
||||
checkErrorCode(stmt, code, "failed to execute taos_stmt_prepare");
|
||||
// bind table name and tags
|
||||
TAOS_BIND tags[2];
|
||||
TAOS_MULTI_BIND tags[2];
|
||||
char *location = "California.SanFrancisco";
|
||||
int groupId = 2;
|
||||
tags[0].buffer_type = TSDB_DATA_TYPE_BINARY;
|
||||
|
@ -144,4 +144,4 @@ int main() {
|
|||
}
|
||||
|
||||
// output:
|
||||
// successfully inserted 2 rows
|
||||
// successfully inserted 2 rows
|
||||
|
|
|
@ -76,6 +76,7 @@ int printRow(char *str, TAOS_ROW row, TAOS_FIELD *fields, int numFields) {
|
|||
} break;
|
||||
|
||||
case TSDB_DATA_TYPE_BINARY:
|
||||
case TSDB_DATA_TYPE_VARBINARY:
|
||||
case TSDB_DATA_TYPE_NCHAR:
|
||||
case TSDB_DATA_TYPE_GEOMETRY: {
|
||||
int32_t charLen = varDataLen((char *)row[i] - VARSTR_HEADER_SIZE);
|
||||
|
|
|
@ -58,7 +58,7 @@ void insertData(TAOS *taos) {
|
|||
int code = taos_stmt_prepare(stmt, sql, 0);
|
||||
checkErrorCode(stmt, code, "failed to execute taos_stmt_prepare");
|
||||
// bind table name and tags
|
||||
TAOS_BIND tags[2];
|
||||
TAOS_MULTI_BIND tags[2];
|
||||
char* location = "California.SanFrancisco";
|
||||
int groupId = 2;
|
||||
tags[0].buffer_type = TSDB_DATA_TYPE_BINARY;
|
||||
|
@ -82,7 +82,7 @@ void insertData(TAOS *taos) {
|
|||
{1648432611749, 12.6, 218, 0.33},
|
||||
};
|
||||
|
||||
TAOS_BIND values[4];
|
||||
TAOS_MULTI_BIND values[4];
|
||||
values[0].buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
|
||||
values[0].buffer_length = sizeof(int64_t);
|
||||
values[0].length = &values[0].buffer_length;
|
||||
|
@ -138,4 +138,4 @@ int main() {
|
|||
|
||||
|
||||
// output:
|
||||
// successfully inserted 2 rows
|
||||
// successfully inserted 2 rows
|
||||
|
|
|
@ -10,7 +10,7 @@ TDengine 充分利用了时序数据的特点,提出了“一个数据采集
|
|||
|
||||
如果你是开发工程师,请一定仔细阅读[开发指南](./develop)一章,该部分对数据库连接、建模、插入数据、查询、流式计算、缓存、数据订阅、用户自定义函数等功能都做了详细介绍,并配有各种编程语言的示例代码。大部分情况下,你只要复制粘贴示例代码,针对自己的应用稍作改动,就能跑起来。
|
||||
|
||||
我们已经生活在大数据时代,纵向扩展已经无法满足日益增长的业务需求,任何系统都必须具有水平扩展的能力,集群成为大数据以及 Database 系统的不可缺失功能。TDengine 团队不仅实现了集群功能,而且将这一重要核心功能开源。怎么部署、管理和维护 TDengine 集群,请仔细参考[部署集群](./deployment)一章。
|
||||
我们已经生活在大数据时代,纵向扩展已经无法满足日益增长的业务需求,任何系统都必须具有水平扩展的能力,集群成为大数据以及 Database 系统的不可缺失功能。TDengine 团队不仅实现了集群功能,而且将这一重要核心功能开源。怎么部署、管理和维护 TDengine 集群,请仔细参考[部署集群]一章。
|
||||
|
||||
TDengine 采用 SQL 作为查询语言,大大降低学习成本、降低迁移成本,但同时针对时序数据场景,又做了一些扩展,以支持插值、降采样、时间加权平均等操作。[SQL 手册](./taos-sql)一章详细描述了 SQL 语法、详细列出了各种支持的命令和函数。
|
||||
|
||||
|
@ -18,8 +18,6 @@ TDengine 采用 SQL 作为查询语言,大大降低学习成本、降低迁移
|
|||
|
||||
如果你对 TDengine 的外围工具、REST API、各种编程语言的连接器(Connector)想做更多详细了解,请看[参考指南](./reference)一章。
|
||||
|
||||
如果你对 TDengine 的内部架构设计很有兴趣,欢迎仔细阅读[技术内幕](./tdinternal)一章,里面对集群的设计、数据分区、分片、写入、读出、查询、聚合查询的流程都做了详细的介绍。如果你想研读 TDengine 代码甚至贡献代码,请一定仔细读完这一章。
|
||||
|
||||
最后,作为一个开源软件,欢迎大家的参与。如果发现文档有任何错误、描述不清晰的地方,请在每个页面的最下方,点击“编辑本文档”直接进行修改。
|
||||
|
||||
Together, we make a difference!
|
||||
|
|
|
@ -6,7 +6,14 @@ toc_max_heading_level: 2
|
|||
|
||||
TDengine 是一款开源、高性能、云原生的[时序数据库](https://tdengine.com/tsdb/),且针对物联网、车联网、工业互联网、金融、IT 运维等场景进行了优化。TDengine 的代码,包括集群功能,都在 GNU AGPL v3.0 下开源。除核心的时序数据库功能外,TDengine 还提供[缓存](../develop/cache/)、[数据订阅](../develop/tmq)、[流式计算](../develop/stream)等其它功能以降低系统复杂度及研发和运维成本。
|
||||
|
||||
本章节介绍 TDengine 的主要功能、竞争优势、适用场景、与其他数据库的对比测试等等,让大家对 TDengine 有个整体的了解。
|
||||
本章节介绍 TDengine 的主要产品和功能、竞争优势、适用场景、与其他数据库的对比测试等等,让大家对 TDengine 有个整体的了解。
|
||||
|
||||
## 主要产品
|
||||
|
||||
TDengine 有三个主要产品:TDengine Pro (即 TDengine 企业版),TDengine Cloud,和 TDengine OSS,关于它们的具体定义请参考
|
||||
- [TDengine 企业版](https://www.taosdata.com/tdengine-pro)
|
||||
- [TDengine 云服务](https://cloud.taosdata.com/?utm_source=menu&utm_medium=webcn)
|
||||
- [TDengine 开源版](https://www.taosdata.com/tdengine-oss)
|
||||
|
||||
## 主要功能
|
||||
|
||||
|
|
|
@ -34,11 +34,12 @@ meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0
|
|||
|
||||
:::note
|
||||
|
||||
- tag_set 中的所有的数据自动转化为 NCHAR 数据类型;
|
||||
- field_set 中的每个数据项都需要对自身的数据类型进行描述, 比如 1.2f32 代表 FLOAT 类型的数值 1.2, 如果不带类型后缀会被当作 DOUBLE 处理;
|
||||
- timestamp 支持多种时间精度。写入数据的时候需要用参数指定时间精度,支持从小时到纳秒的 6 种时间精度。
|
||||
- tag_set 中的所有的数据自动转化为 NCHAR 数据类型
|
||||
- field_set 中的每个数据项都需要对自身的数据类型进行描述, 比如 1.2f32 代表 FLOAT 类型的数值 1.2, 如果不带类型后缀会被当作 DOUBLE 处理
|
||||
- timestamp 支持多种时间精度。写入数据的时候需要用参数指定时间精度,支持从小时到纳秒的 6 种时间精度
|
||||
- 为了提高写入的效率,默认假设同一个超级表中 field_set 的顺序是一样的(第一条数据包含所有的 field,后面的数据按照这个顺序),如果顺序不一样,需要配置参数 smlDataFormat 为 false,否则,数据写入按照相同顺序写入,库中数据会异常。(3.0.1.3 之后的版本 smlDataFormat 默认为 false,从3.0.3.0开始,该配置废弃) [TDengine 无模式写入参考指南](/reference/schemaless/#无模式写入行协议)
|
||||
- 默认产生的子表名是根据规则生成的唯一 ID 值。用户也可以通过在client端的 taos.cfg 里配置 smlChildTableName 参数来指定某个标签值作为子表名。该标签值应该具有全局唯一性。举例如下:假设有个标签名为tname, 配置 smlChildTableName=tname, 插入数据为 st,tname=cpu1,t1=4 c1=3 1626006833639000000 则创建的子表名为 cpu1。注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set,其他的行会忽略)。[TDengine 无模式写入参考指南](/reference/schemaless/#无模式写入行协议)
|
||||
|
||||
:::
|
||||
|
||||
要了解更多可参考:[InfluxDB Line 协议官方文档](https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/) 和 [TDengine 无模式写入参考指南](/reference/schemaless/#无模式写入行协议)
|
||||
|
|
|
@ -67,11 +67,11 @@ Database changed.
|
|||
taos> SHOW STABLES;
|
||||
name |
|
||||
=================================
|
||||
meters.current |
|
||||
meters.voltage |
|
||||
meters_current |
|
||||
meters_voltage |
|
||||
Query OK, 2 row(s) in set (0.002544s)
|
||||
|
||||
taos> SELECT TBNAME, * FROM `meters.current`;
|
||||
taos> SELECT TBNAME, * FROM `meters_current`;
|
||||
tbname | _ts | _value | groupid | location |
|
||||
==================================================================================================================================
|
||||
t_0e7bcfa21a02331c06764f275... | 2022-03-28 09:56:51.249 | 10.800000000 | 3 | California.LosAngeles |
|
||||
|
@ -83,10 +83,10 @@ Query OK, 4 row(s) in set (0.005399s)
|
|||
|
||||
## SQL 查询示例
|
||||
|
||||
`meters.current` 是插入数据的超级表名。
|
||||
`meters_current` 是插入数据的超级表名。
|
||||
|
||||
可以通过超级表的 TAG 来过滤数据,比如查询 `location=California.LosAngeles groupid=3` 可以通过如下 SQL:
|
||||
|
||||
```sql
|
||||
SELECT * FROM `meters.current` WHERE location = "California.LosAngeles" AND groupid = 3;
|
||||
SELECT * FROM `meters_current` WHERE location = "California.LosAngeles" AND groupid = 3;
|
||||
```
|
||||
|
|
|
@ -48,6 +48,7 @@ OpenTSDB JSON 格式协议采用一个 JSON 字符串表示一行或多行数据
|
|||
|
||||
- 对于 JSON 格式协议,TDengine 并不会自动把所有标签转成 NCHAR 类型, 字符串将将转为 NCHAR 类型, 数值将同样转换为 DOUBLE 类型。
|
||||
- 默认生成的子表名是根据规则生成的唯一 ID 值。用户也可以通过在client端的 taos.cfg 里配置 smlChildTableName 参数来指定某个标签值作为子表名。该标签值应该具有全局唯一性。举例如下:假设有个标签名为tname, 配置 smlChildTableName=tname, 插入数据为 `"tags": { "host": "web02","dc": "lga","tname":"cpu1"}` 则创建的子表名为 cpu1。注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set,其他的行会忽略)。
|
||||
|
||||
:::
|
||||
|
||||
## 示例代码
|
||||
|
@ -82,8 +83,8 @@ Database changed.
|
|||
taos> SHOW STABLES;
|
||||
name |
|
||||
=================================
|
||||
meters.current |
|
||||
meters.voltage |
|
||||
meters_current |
|
||||
meters_voltage |
|
||||
Query OK, 2 row(s) in set (0.001954s)
|
||||
|
||||
taos> SELECT * FROM `meters.current`;
|
||||
|
@ -96,10 +97,10 @@ Query OK, 2 row(s) in set (0.004076s)
|
|||
|
||||
## SQL 查询示例
|
||||
|
||||
`meters.voltage` 是插入数据的超级表名。
|
||||
`meters_voltage` 是插入数据的超级表名。
|
||||
|
||||
可以通过超级表的 TAG 来过滤数据,比如查询 `location=California.LosAngeles groupid=1` 可以通过如下 SQL:
|
||||
|
||||
```sql
|
||||
SELECT * FROM `meters.current` WHERE location = "California.LosAngeles" AND groupid = 3;
|
||||
SELECT * FROM `meters_current` WHERE location = "California.LosAngeles" AND groupid = 3;
|
||||
```
|
||||
|
|
|
@ -115,7 +115,19 @@ Set<String> subscription() throws SQLException;
|
|||
|
||||
ConsumerRecords<V> poll(Duration timeout) throws SQLException;
|
||||
|
||||
Set<TopicPartition> assignment() throws SQLException;
|
||||
long position(TopicPartition partition) throws SQLException;
|
||||
Map<TopicPartition, Long> position(String topic) throws SQLException;
|
||||
Map<TopicPartition, Long> beginningOffsets(String topic) throws SQLException;
|
||||
Map<TopicPartition, Long> endOffsets(String topic) throws SQLException;
|
||||
Map<TopicPartition, OffsetAndMetadata> committed(Set<TopicPartition> partitions) throws SQLException;
|
||||
|
||||
void seek(TopicPartition partition, long offset) throws SQLException;
|
||||
void seekToBeginning(Collection<TopicPartition> partitions) throws SQLException;
|
||||
void seekToEnd(Collection<TopicPartition> partitions) throws SQLException;
|
||||
|
||||
void commitSync() throws SQLException;
|
||||
void commitSync(Map<TopicPartition, OffsetAndMetadata> offsets) throws SQLException;
|
||||
|
||||
void close() throws SQLException;
|
||||
```
|
||||
|
|
|
@ -296,7 +296,7 @@ ldconfig
|
|||
3. 如果 Python UDF 程序执行时,通过 PYTHONPATH 引用其它的包,可以设置 taos.cfg 的 UdfdLdLibPath 变量为PYTHONPATH的内容
|
||||
|
||||
4. 启动 `taosd` 服务
|
||||
细节请参考 [快速开始](../../get-started)
|
||||
细节请参考 [立即开始](../../get-started)
|
||||
|
||||
### 接口定义
|
||||
|
||||
|
@ -883,5 +883,5 @@ pycumsum 使用 numpy 计算输入列所有数据的累积和。
|
|||
|
||||
</details>
|
||||
## 管理和使用 UDF
|
||||
在使用 UDF 之前需要先将其加入到 TDengine 系统中。关于如何管理和使用 UDF,请参考[管理和使用 UDF](../12-taos-sql/26-udf.md)
|
||||
在使用 UDF 之前需要先将其加入到 TDengine 系统中。关于如何管理和使用 UDF,请参考[管理和使用 UDF](../../taos-sql/udf)
|
||||
|
||||
|
|
|
@ -223,7 +223,7 @@ int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields)
|
|||
|
||||
基础 API 用于完成创建数据库连接等工作,为其它 API 的执行提供运行时环境。
|
||||
|
||||
- `void taos_init()`
|
||||
- `int taos_init()`
|
||||
|
||||
初始化运行环境。如果没有主动调用该 API,那么调用 `taos_connect()` 时驱动将自动调用该 API,故程序一般无需手动调用。
|
||||
|
||||
|
@ -256,6 +256,12 @@ int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields)
|
|||
|
||||
:::
|
||||
|
||||
- `TAOS *taos_connect_auth(const char *host, const char *user, const char *auth, const char *db, uint16_t port)`
|
||||
|
||||
功能同 taos_connect。除 pass 参数替换为 auth 外,其他参数同 taos_connect。
|
||||
|
||||
- auth: 原始密码取 32 位小写 md5
|
||||
|
||||
- `char *taos_get_server_info(TAOS *taos)`
|
||||
|
||||
获取服务端版本信息。
|
||||
|
@ -272,6 +278,14 @@ int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields)
|
|||
- 如果,len 小于 存储db需要的空间(包含最后的'\0'),返回错误,database里赋值截断的数据,以'\0'结尾。
|
||||
- 如果,len 大于等于 存储db需要的空间(包含最后的'\0'),返回正常0,database里赋值以'\0‘结尾的db名。
|
||||
|
||||
- `int taos_set_notify_cb(TAOS *taos, __taos_notify_fn_t fp, void *param, int type)`
|
||||
|
||||
设置事件回调函数。
|
||||
|
||||
- fp 事件回调函数指针。函数声明:typedef void (*__taos_notify_fn_t)(void *param, void *ext, int type);其中, param 为用户自定义参数,ext 为扩展参数(依赖事件类型,针对 TAOS_NOTIFY_PASSVER 返回用户密码版本),type 为事件类型
|
||||
- param 用户自定义参数
|
||||
- type 事件类型。取值范围:1)TAOS_NOTIFY_PASSVER: 用户密码改变
|
||||
|
||||
- `void taos_close(TAOS *taos)`
|
||||
|
||||
关闭连接,其中`taos`是 `taos_connect()` 返回的句柄。
|
||||
|
@ -396,21 +410,20 @@ TDengine 的异步 API 均采用非阻塞调用模式。应用程序可以用多
|
|||
|
||||
解析一条 SQL 语句,将解析结果和参数信息绑定到 stmt 上,如果参数 length 大于 0,将使用此参数作为 SQL 语句的长度,如等于 0,将自动判断 SQL 语句的长度。
|
||||
|
||||
- `int taos_stmt_bind_param(TAOS_STMT *stmt, TAOS_BIND *bind)`
|
||||
- `int taos_stmt_bind_param(TAOS_STMT *stmt, TAOS_MULTI_BIND *bind)`
|
||||
|
||||
不如 `taos_stmt_bind_param_batch()` 效率高,但可以支持非 INSERT 类型的 SQL 语句。
|
||||
进行参数绑定,bind 指向一个数组(代表所要绑定的一行数据),需保证此数组中的元素数量和顺序与 SQL 语句中的参数完全一致。TAOS_BIND 的使用方法与 MySQL 中的 MYSQL_BIND 类似,具体定义如下:
|
||||
进行参数绑定,bind 指向一个数组(代表所要绑定的一行数据),需保证此数组中的元素数量和顺序与 SQL 语句中的参数完全一致。TAOS_MULTI_BIND 的使用方法与 MySQL 中的 MYSQL_BIND 类似,具体定义如下:
|
||||
|
||||
```c
|
||||
typedef struct TAOS_BIND {
|
||||
typedef struct TAOS_MULTI_BIND {
|
||||
int buffer_type;
|
||||
void * buffer;
|
||||
uintptr_t buffer_length; // not in use
|
||||
uintptr_t * length;
|
||||
int * is_null;
|
||||
int is_unsigned; // not in use
|
||||
int * error; // not in use
|
||||
} TAOS_BIND;
|
||||
void *buffer;
|
||||
uintptr_t buffer_length;
|
||||
uint32_t *length;
|
||||
char *is_null;
|
||||
int num; // the number of columns
|
||||
} TAOS_MULTI_BIND;
|
||||
```
|
||||
|
||||
- `int taos_stmt_set_tbname(TAOS_STMT* stmt, const char* name)`
|
||||
|
@ -418,7 +431,7 @@ TDengine 的异步 API 均采用非阻塞调用模式。应用程序可以用多
|
|||
(2.1.1.0 版本新增,仅支持用于替换 INSERT 语句中的参数值)
|
||||
当 SQL 语句中的表名使用了 `?` 占位时,可以使用此函数绑定一个具体的表名。
|
||||
|
||||
- `int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags)`
|
||||
- `int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_MULTI_BIND* tags)`
|
||||
|
||||
(2.1.2.0 版本新增,仅支持用于替换 INSERT 语句中的参数值)
|
||||
当 SQL 语句中的表名和 TAGS 都使用了 `?` 占位时,可以使用此函数绑定具体的表名和具体的 TAGS 取值。最典型的使用场景是使用了自动建表功能的 INSERT 语句(目前版本不支持指定具体的 TAGS 列)。TAGS 参数中的列数量需要与 SQL 语句中要求的 TAGS 数量完全一致。
|
||||
|
@ -428,17 +441,6 @@ TDengine 的异步 API 均采用非阻塞调用模式。应用程序可以用多
|
|||
(2.1.1.0 版本新增,仅支持用于替换 INSERT 语句中的参数值)
|
||||
以多列的方式传递待绑定的数据,需要保证这里传递的数据列的顺序、列的数量与 SQL 语句中的 VALUES 参数完全一致。TAOS_MULTI_BIND 的具体定义如下:
|
||||
|
||||
```c
|
||||
typedef struct TAOS_MULTI_BIND {
|
||||
int buffer_type;
|
||||
void * buffer;
|
||||
uintptr_t buffer_length;
|
||||
uintptr_t * length;
|
||||
char * is_null;
|
||||
int num; // the number of columns
|
||||
} TAOS_MULTI_BIND;
|
||||
```
|
||||
|
||||
- `int taos_stmt_add_batch(TAOS_STMT *stmt)`
|
||||
|
||||
将当前绑定的参数加入批处理中,调用此函数后,可以再次调用 `taos_stmt_bind_param()` 或 `taos_stmt_bind_param_batch()` 绑定新的参数。需要注意,此函数仅支持 INSERT/IMPORT 语句,如果是 SELECT 等其他 SQL 语句,将返回错误。
|
||||
|
@ -447,6 +449,14 @@ TDengine 的异步 API 均采用非阻塞调用模式。应用程序可以用多
|
|||
|
||||
执行准备好的语句。目前,一条语句只能执行一次。
|
||||
|
||||
- `int taos_stmt_affected_rows(TAOS_STMT *stmt)`
|
||||
|
||||
获取执行多次绑定语句影响的行数。
|
||||
|
||||
- `int taos_stmt_affected_rows_once(TAOS_STMT *stmt)`
|
||||
|
||||
获取执行一次绑定语句影响的行数。
|
||||
|
||||
- `TAOS_RES* taos_stmt_use_result(TAOS_STMT *stmt)`
|
||||
|
||||
获取语句的结果集。结果集的使用方式与非参数化调用时一致,使用完成后,应对此结果集调用 `taos_free_result()` 以释放资源。
|
||||
|
@ -542,6 +552,7 @@ TDengine 的异步 API 均采用非阻塞调用模式。应用程序可以用多
|
|||
- 错误码,0成功,非0失败,可通过 `char *tmq_err2str(int32_t code)` 函数获取错误信息。
|
||||
|
||||
- `int64_t tmq_committed(tmq_t *tmq, const char *pTopicName, int32_t vgId)`
|
||||
|
||||
**功能说明**
|
||||
- 获取当前 consumer 在某个 topic 和 vgroup上的 commit 位置。
|
||||
|
||||
|
@ -555,9 +566,9 @@ TDengine 的异步 API 均采用非阻塞调用模式。应用程序可以用多
|
|||
|
||||
**功能说明**
|
||||
|
||||
commit接口分为两种类型,每种类型有同步和异步接口:
|
||||
- 第一种类型:根据消息提交,提交消息里的进度,如果消息传NULL,提交当前consumer所有消费的vgroup的当前进度 : tmq_commit_sync/tmq_commit_async
|
||||
- 第二种类型:根据某个topic的某个vgroup的offset提交 : tmq_commit_offset_sync/tmq_commit_offset_async
|
||||
- commit接口分为两种类型,每种类型有同步和异步接口:
|
||||
- 第一种类型:根据消息提交,提交消息里的进度,如果消息传NULL,提交当前consumer所有消费的vgroup的当前进度 : tmq_commit_sync/tmq_commit_async
|
||||
- 第二种类型:根据某个topic的某个vgroup的offset提交 : tmq_commit_offset_sync/tmq_commit_offset_async
|
||||
|
||||
**参数说明**
|
||||
- msg:消费到的消息结构,如果msg传NULL,提交当前consumer所有消费的vgroup的当前进度
|
||||
|
@ -584,8 +595,7 @@ TDengine 的异步 API 均采用非阻塞调用模式。应用程序可以用多
|
|||
- `int32_t int64_t tmq_get_vgroup_offset(TAOS_RES* res)`
|
||||
|
||||
**功能说明**
|
||||
|
||||
获取 poll 消费到的数据的起始offset
|
||||
- 获取 poll 消费到的数据的起始offset
|
||||
|
||||
**参数说明**
|
||||
- msg:消费到的消息结构
|
||||
|
@ -596,10 +606,10 @@ TDengine 的异步 API 均采用非阻塞调用模式。应用程序可以用多
|
|||
- `int32_t int32_t tmq_subscription(tmq_t *tmq, tmq_list_t **topics)`
|
||||
|
||||
**功能说明**
|
||||
|
||||
获取消费者订阅的 topic 列表
|
||||
- 获取消费者订阅的 topic 列表
|
||||
|
||||
**参数说明**
|
||||
- topics: 获取的 topic 列表存储在这个结构中,接口内分配内存,需调用tmq_list_destroy释放
|
||||
|
||||
**返回值**
|
||||
- 错误码,0成功,非0失败,可通过 `char *tmq_err2str(int32_t code)` 函数获取错误信息
|
||||
- 错误码,0成功,非0失败,可通过 `char *tmq_err2str(int32_t code)` 函数获取错误信息
|
||||
|
|
|
@ -36,6 +36,7 @@ REST 连接支持所有能运行 Java 的平台。
|
|||
|
||||
| taos-jdbcdriver 版本 | 主要变化 | TDengine 版本 |
|
||||
| :------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------: |
|
||||
| 3.2.5 | 数据订阅增加 committed()、assignment() 方法 | 3.1.0.3 及更高版本 |
|
||||
| 3.2.4 | 数据订阅在 WebSocket 连接下增加 enable.auto.commit 参数,以及 unsubscribe() 方法。 | - |
|
||||
| 3.2.3 | 修复 ResultSet 在一些情况数据解析失败 | - |
|
||||
| 3.2.2 | 新增功能:数据订阅支持 seek 功能。 | 3.0.5.0 及更高版本 |
|
||||
|
@ -1022,14 +1023,19 @@ while(true) {
|
|||
#### 指定订阅 Offset
|
||||
|
||||
```java
|
||||
// 获取订阅的 topicPartition
|
||||
Set<TopicPartition> assignment() throws SQLException;
|
||||
// 获取 offset
|
||||
long position(TopicPartition partition) throws SQLException;
|
||||
Map<TopicPartition, Long> position(String topic) throws SQLException;
|
||||
Map<TopicPartition, Long> beginningOffsets(String topic) throws SQLException;
|
||||
Map<TopicPartition, Long> endOffsets(String topic) throws SQLException;
|
||||
Map<TopicPartition, OffsetAndMetadata> committed(Set<TopicPartition> partitions) throws SQLException;
|
||||
|
||||
// 指定下一次 poll 中使用的 offset
|
||||
void seek(TopicPartition partition, long offset) throws SQLException;
|
||||
void seekToBeginning(Collection<TopicPartition> partitions) throws SQLException;
|
||||
void seekToEnd(Collection<TopicPartition> partitions) throws SQLException;
|
||||
```
|
||||
|
||||
示例代码:
|
||||
|
@ -1055,6 +1061,18 @@ try (TaosConsumer<ResultBean> consumer = new TaosConsumer<>(properties)) {
|
|||
}
|
||||
```
|
||||
|
||||
#### 提交 Offset
|
||||
|
||||
当`enable.auto.commit`为 false 时,可以手动提交 offset。
|
||||
|
||||
```java
|
||||
void commitSync() throws SQLException;
|
||||
void commitSync(Map<TopicPartition, OffsetAndMetadata> offsets) throws SQLException;
|
||||
// 异步提交仅在 native 连接下有效
|
||||
void commitAsync(OffsetCommitCallback<V> callback) throws SQLException;
|
||||
void commitAsync(Map<TopicPartition, OffsetAndMetadata> offsets, OffsetCommitCallback<V> callback) throws SQLException;
|
||||
```
|
||||
|
||||
#### 关闭订阅
|
||||
|
||||
```java
|
||||
|
|
|
@ -29,6 +29,10 @@ import CSAsyncQuery from "../07-develop/04-query-data/_cs_async.mdx"
|
|||
|
||||
支持的平台和 TDengine 客户端驱动支持的平台一致。
|
||||
|
||||
:::note
|
||||
注意 TDengine 不再支持 32 位 Windows 平台。
|
||||
:::
|
||||
|
||||
## 版本支持
|
||||
|
||||
请参考[版本支持列表](../#版本支持)
|
||||
|
|
|
@ -143,6 +143,7 @@ phpize && ./configure --enable-swoole && make -j && make install
|
|||
| `TDengine\TSDB_DATA_TYPE_FLOAT` | float |
|
||||
| `TDengine\TSDB_DATA_TYPE_DOUBLE` | double |
|
||||
| `TDengine\TSDB_DATA_TYPE_BINARY` | binary |
|
||||
| `TDengine\TSDB_DATA_TYPE_VARBINARY` | varbinary |
|
||||
| `TDengine\TSDB_DATA_TYPE_TIMESTAMP` | timestamp |
|
||||
| `TDengine\TSDB_DATA_TYPE_NCHAR` | nchar |
|
||||
| `TDengine\TSDB_DATA_TYPE_UTINYINT` | utinyint |
|
||||
|
|
|
@ -4,8 +4,6 @@ import PkgListV3 from "/components/PkgListV3";
|
|||
|
||||
<PkgListV3 type={1} sys="Linux" />
|
||||
|
||||
[所有下载](../../releases/tdengine)
|
||||
|
||||
2. 解压缩软件包
|
||||
|
||||
将软件包放置在当前用户可读写的任意目录下,然后执行下面的命令:`tar -xzvf TDengine-client-VERSION.tar.gz`
|
||||
|
|
|
@ -4,8 +4,6 @@ import PkgListV3 from "/components/PkgListV3";
|
|||
|
||||
<PkgListV3 type={8} sys="macOS" />
|
||||
|
||||
[所有下载](../../releases/tdengine)
|
||||
|
||||
2. 执行安装程序,按提示选择默认值,完成安装。如果安装被阻止,可以右键或者按 Ctrl 点击安装包,选择 `打开`。
|
||||
3. 配置 taos.cfg
|
||||
|
||||
|
|
|
@ -3,9 +3,7 @@ import PkgListV3 from "/components/PkgListV3";
|
|||
1. 下载客户端安装包
|
||||
|
||||
<PkgListV3 type={4} sys="Windows" />
|
||||
|
||||
[所有下载](../../releases/tdengine)
|
||||
|
||||
|
||||
2. 执行安装程序,按提示选择默认值,完成安装
|
||||
3. 安装路径
|
||||
|
||||
|
|
|
@ -42,11 +42,12 @@ CREATE DATABASE db_name PRECISION 'ns';
|
|||
| 14 | NCHAR | 自定义 | 记录包含多字节字符在内的字符串,如中文字符。每个 NCHAR 字符占用 4 字节的存储空间。字符串两端使用单引号引用,字符串内的单引号需用转义字符 `\'`。NCHAR 使用时须指定字符串大小,类型为 NCHAR(10) 的列表示此列的字符串最多存储 10 个 NCHAR 字符。如果用户字符串长度超出声明长度,将会报错。 |
|
||||
| 15 | JSON | | JSON 数据类型, 只有 Tag 可以是 JSON 格式 |
|
||||
| 16 | VARCHAR | 自定义 | BINARY 类型的别名 |
|
||||
| 17 | GEOMETRY | 自定义 | 几何类型 |
|
||||
| 17 | GEOMETRY | 自定义 | 几何类型
|
||||
| 18 | VARBINARY | 自定义 | 可变长的二进制数据|
|
||||
|
||||
:::note
|
||||
|
||||
- 表的每行长度不能超过 48KB(从 3.0.5.0 版本开始为 64KB)(注意:每个 BINARY/NCHAR/GEOMETRY 类型的列还会额外占用 2 个字节的存储位置)。
|
||||
- 表的每行长度不能超过 48KB(从 3.0.5.0 版本开始为 64KB)(注意:每个 BINARY/NCHAR/GEOMETRY/VARBINARY 类型的列还会额外占用 2 个字节的存储位置)。
|
||||
- 虽然 BINARY 类型在底层存储上支持字节型的二进制字符,但不同编程语言对二进制数据的处理方式并不保证一致,因此建议在 BINARY 类型中只存储 ASCII 可见字符,而避免存储不可见字符。多字节的数据,例如中文字符,则需要使用 NCHAR 类型进行保存。如果强行使用 BINARY 类型保存中文字符,虽然有时也能正常读写,但并不带有字符集信息,很容易出现数据乱码甚至数据损坏等情况。
|
||||
- BINARY 类型理论上最长可以有 16,374(从 3.0.5.0 版本开始,数据列为 65,517,标签列为 16,382) 字节。BINARY 仅支持字符串输入,字符串两端需使用单引号引用。使用时须指定大小,如 BINARY(20) 定义了最长为 20 个单字节字符的字符串,每个字符占 1 字节的存储空间,总共固定占用 20 字节的空间,此时如果用户字符串超出 20 字节将会报错。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示,即 `\'`。
|
||||
- GEOMETRY 类型数据列为最大长度为 65,517 字节,标签列最大长度为 16,382 字节。支持 2D 的 POINT、LINESTRING 和 POLYGON 子类型数据。长度计算方式如下表所示:
|
||||
|
@ -58,6 +59,7 @@ CREATE DATABASE db_name PRECISION 'ns';
|
|||
| 3 | POLYGON((1.0 1.0, 2.0 2.0, 1.0 1.0)) | 13+3*16 | 13+4094*16 | +16 |
|
||||
|
||||
- SQL 语句中的数值类型将依据是否存在小数点,或使用科学计数法表示,来判断数值类型是否为整型或者浮点型,因此在使用时要注意相应类型越界的情况。例如,9999999999999999999 会认为超过长整型的上边界而溢出,而 9999999999999999999.0 会被认为是有效的浮点数。
|
||||
- VARBINARY 是一种存储二进制数据的数据类型,最大长度为 65,517 字节,标签列最大长度为 16,382 字节。可以通过sql或schemaless方式写入二进制数据(需要转换为\x开头的字符串写入),也可以通过stmt方式写入(可以直接使用二进制)。显示时通过16进制\x开头。
|
||||
|
||||
:::
|
||||
|
||||
|
|
|
@ -7,9 +7,9 @@ description: 查询数据的详细语法
|
|||
## 查询语法
|
||||
|
||||
```sql
|
||||
SELECT {DATABASE() | CLIENT_VERSION() | SERVER_VERSION() | SERVER_STATUS() | NOW() | TODAY() | TIMEZONE()}
|
||||
SELECT {DATABASE() | CLIENT_VERSION() | SERVER_VERSION() | SERVER_STATUS() | NOW() | TODAY() | TIMEZONE() | CURRENT_USER() | USER() }
|
||||
|
||||
SELECT [DISTINCT] select_list
|
||||
SELECT [hints] [DISTINCT] [TAGS] select_list
|
||||
from_clause
|
||||
[WHERE condition]
|
||||
[partition_by_clause]
|
||||
|
@ -21,6 +21,11 @@ SELECT [DISTINCT] select_list
|
|||
[LIMIT limit_val [OFFSET offset_val]]
|
||||
[>> export_file]
|
||||
|
||||
hints: /*+ [hint([hint_param_list])] [hint([hint_param_list])] */
|
||||
|
||||
hint:
|
||||
BATCH_SCAN | NO_BATCH_SCAN
|
||||
|
||||
select_list:
|
||||
select_expr [, select_expr] ...
|
||||
|
||||
|
@ -70,6 +75,29 @@ order_expr:
|
|||
{expr | position | c_alias} [DESC | ASC] [NULLS FIRST | NULLS LAST]
|
||||
```
|
||||
|
||||
## Hints
|
||||
|
||||
Hints 是用户控制单个语句查询优化的一种手段,当 Hint 不适用于当前的查询语句时会被自动忽略,具体说明如下:
|
||||
|
||||
- Hints 语法以`/*+`开始,终于`*/`,前后可有空格。
|
||||
- Hints 语法只能跟随在 SELECT 关键字后。
|
||||
- 每个 Hints 可以包含多个 Hint,Hint 间以空格分开,当多个 Hint 冲突或相同时以先出现的为准。
|
||||
- 当 Hints 中某个 Hint 出现错误时,错误出现之前的有效 Hint 仍然有效,当前及之后的 Hint 被忽略。
|
||||
- hint_param_list 是每个 Hint 的参数,根据每个 Hint 的不同而不同。
|
||||
|
||||
目前支持的 Hints 列表如下:
|
||||
|
||||
| **Hint** | **参数** | **说明** | **适用范围** |
|
||||
| :-----------: | -------------- | -------------------------- | -------------------------- |
|
||||
| BATCH_SCAN | 无 | 采用批量读表的方式 | 超级表 JOIN 语句 |
|
||||
| NO_BATCH_SCAN | 无 | 采用顺序读表的方式 | 超级表 JOIN 语句 |
|
||||
|
||||
举例:
|
||||
|
||||
```sql
|
||||
SELECT /*+ BATCH_SCAN() */ a.ts FROM stable1 a, stable2 b where a.tag0 = b.tag0 and a.ts = b.ts;
|
||||
```
|
||||
|
||||
## 列表
|
||||
|
||||
查询语句可以指定部分或全部列作为返回结果。数据列和标签列都可以出现在列表中。
|
||||
|
@ -132,6 +160,16 @@ SELECT DISTINCT col_name [, col_name ...] FROM tb_name;
|
|||
|
||||
:::
|
||||
|
||||
### 标签查询
|
||||
|
||||
当查询的列只有标签列时,`TAGS` 关键字可以指定返回所有子表的标签列。每个子表只返回一行标签列。
|
||||
|
||||
返回所有子表的标签列:
|
||||
|
||||
```sql
|
||||
SELECT TAGS tag_name [, tag_name ...] FROM stb_name
|
||||
```
|
||||
|
||||
### 结果集列名
|
||||
|
||||
`SELECT`子句中,如果不指定返回结果集合的列名,结果集列名称默认使用`SELECT`子句中的表达式名称作为列名称。此外,用户可使用`AS`来重命名返回结果集合中列的名称。例如:
|
||||
|
@ -167,7 +205,7 @@ SELECT table_name, tag_name, tag_type, tag_value FROM information_schema.ins_tag
|
|||
SELECT COUNT(*) FROM (SELECT DISTINCT TBNAME FROM meters);
|
||||
```
|
||||
|
||||
以上两个查询均只支持在 WHERE 条件子句中添加针对标签(TAGS)的过滤条件。例如:
|
||||
以上两个查询均只支持在 WHERE 条件子句中添加针对标签(TAGS)的过滤条件。
|
||||
|
||||
**\_QSTART/\_QEND**
|
||||
|
||||
|
@ -209,8 +247,7 @@ TDengine 支持基于时间戳主键的 INNER JOIN,规则如下:
|
|||
3. 对于超级表,ON 条件在时间戳主键的等值条件之外,还要求有可以一一对应的标签列等值条件,不支持 OR 条件。
|
||||
4. 参与 JOIN 计算的表只能是同一种类型,即只能都是超级表,或都是子表,或都是普通表。
|
||||
5. JOIN 两侧均支持子查询。
|
||||
6. 参与 JOIN 的表个数上限为 10 个。
|
||||
7. 不支持与 FILL 子句混合使用。
|
||||
6. 不支持与 FILL 子句混合使用。
|
||||
|
||||
## GROUP BY
|
||||
|
||||
|
@ -301,6 +338,12 @@ SELECT TODAY();
|
|||
SELECT TIMEZONE();
|
||||
```
|
||||
|
||||
### 获取当前用户
|
||||
|
||||
```sql
|
||||
SELECT CURRENT_USER();
|
||||
```
|
||||
|
||||
## 正则表达式过滤
|
||||
|
||||
### 语法
|
||||
|
@ -354,7 +397,7 @@ SELECT AVG(CASE WHEN voltage < 200 or voltage > 250 THEN 220 ELSE voltage END) F
|
|||
|
||||
## JOIN 子句
|
||||
|
||||
TDengine 支持基于时间戳主键的内连接,即 JOIN 条件必须包含时间戳主键。只要满足基于时间戳主键这个要求,普通表、子表、超级表和子查询之间可以随意的进行内连接,且对表个数没有限制。
|
||||
TDengine 支持基于时间戳主键的内连接,即 JOIN 条件必须包含时间戳主键。只要满足基于时间戳主键这个要求,普通表、子表、超级表和子查询之间可以随意的进行内连接,且对表个数没有限制,其它连接条件与主键间必须是 AND 操作。
|
||||
|
||||
普通表与普通表之间的 JOIN 操作:
|
||||
|
||||
|
|
|
@ -48,4 +48,6 @@ SELECT * FROM information_schema.INS_INDEXES
|
|||
|
||||
6. 不支持对普通和子表建立索引。
|
||||
|
||||
7. 如果某个 tag 列的唯一值较少时,不建议对其建立索引,这种情况下收效甚微。
|
||||
7. 如果某个 tag 列的唯一值较少时,不建议对其建立索引,这种情况下收效甚微。
|
||||
|
||||
8. 新建立的超级表,会给第一列tag,随机生成一个indexNewName, 生成规则是:tag0的name + 23个byte, 在系统表可以查,也可以按需要drop,行为和其他列tag 的索引一样
|
||||
|
|
|
@ -402,7 +402,7 @@ CAST(expr AS type_name)
|
|||
|
||||
**返回结果类型**:CAST 中指定的类型(type_name)。
|
||||
|
||||
**适用数据类型**:输入参数 expression 的类型可以是除JSON外的所有类型。
|
||||
**适用数据类型**:输入参数 expr 的类型可以是除JSON和VARBINARY外的所有类型。如果 type_name 为 VARBINARY,则 expr 只能是 VARCHAR 类型。
|
||||
|
||||
**嵌套子查询支持**:适用于内层查询和外层查询。
|
||||
|
||||
|
@ -1266,6 +1266,14 @@ SELECT SERVER_STATUS();
|
|||
|
||||
**说明**:检测服务端是否所有 dnode 都在线,如果是则返回成功,否则返回无法建立连接的错误。
|
||||
|
||||
### CURRENT_USER
|
||||
|
||||
```sql
|
||||
SELECT CURRENT_USER();
|
||||
```
|
||||
|
||||
**说明**:获取当前用户。
|
||||
|
||||
|
||||
## Geometry 函数
|
||||
|
||||
|
|
|
@ -31,7 +31,7 @@ select max(current) from meters partition by location interval(10m)
|
|||
|
||||
## 窗口切分查询
|
||||
|
||||
TDengine 支持按时间窗口切分方式进行聚合结果查询,比如温度传感器每秒采集一次数据,但需查询每隔 10 分钟的温度平均值。这种场景下可以使用窗口子句来获得需要的查询结果。窗口子句用于针对查询的数据集合按照窗口切分成为查询子集并进行聚合,窗口包含时间窗口(time window)、状态窗口(status window)、会话窗口(session window)、条件窗口(event window)四种窗口。其中时间窗口又可划分为滑动时间窗口和翻转时间窗口。
|
||||
TDengine 支持按时间窗口切分方式进行聚合结果查询,比如温度传感器每秒采集一次数据,但需查询每隔 10 分钟的温度平均值。这种场景下可以使用窗口子句来获得需要的查询结果。窗口子句用于针对查询的数据集合按照窗口切分成为查询子集并进行聚合,窗口包含时间窗口(time window)、状态窗口(status window)、会话窗口(session window)、事件窗口(event window)四种窗口。其中时间窗口又可划分为滑动时间窗口和翻转时间窗口。
|
||||
|
||||
窗口子句语法如下:
|
||||
|
||||
|
|
|
@ -201,7 +201,6 @@ TDengine 对于修改数据提供两种处理方式,由 IGNORE UPDATE 选项
|
|||
对于已经存在的超级表,检查列的schema信息
|
||||
1. 检查列的schema信息是否匹配,对于不匹配的,则自动进行类型转换,当前只有数据长度大于4096byte时才报错,其余场景都能进行类型转换。
|
||||
2. 检查列的个数是否相同,如果不同,需要显示的指定超级表与subquery的列的对应关系,否则报错;如果相同,可以指定对应关系,也可以不指定,不指定则按位置顺序对应。
|
||||
3. 至少自定义一个tag,否则报错。详见 自定义TAG
|
||||
|
||||
## 自定义TAG
|
||||
|
||||
|
|
|
@ -178,7 +178,7 @@ description: TDengine 保留关键字的详细列表
|
|||
|
||||
- MATCH
|
||||
- MAX_DELAY
|
||||
- MAX_SPEED
|
||||
- BWLIMIT
|
||||
- MAXROWS
|
||||
- MERGE
|
||||
- META
|
||||
|
|
|
@ -22,6 +22,14 @@ SHOW CLUSTER;
|
|||
|
||||
显示当前集群的信息
|
||||
|
||||
## SHOW CLUSTER ALIVE
|
||||
|
||||
```sql
|
||||
SHOW CLUSTER ALIVE;
|
||||
```
|
||||
|
||||
查询当前集群的状态是否可用,返回值: 0:不可用 1:完全可用 2:部分可用(集群中部分节点下线,但其它节点仍可以正常使用)
|
||||
|
||||
## SHOW CONNECTIONS
|
||||
|
||||
```sql
|
||||
|
|
|
@ -1,138 +0,0 @@
|
|||
---
|
||||
sidebar_label: 权限管理
|
||||
title: 权限管理
|
||||
description: 企业版中才具有的权限管理功能
|
||||
---
|
||||
|
||||
本节讲述如何在 TDengine 中进行权限管理的相关操作。权限管理是 TDengine 企业版的特有功能,本节只列举了一些基本的权限管理功能作为示例,更丰富的权限管理请联系 TDengine 销售或市场团队。
|
||||
|
||||
## 创建用户
|
||||
|
||||
```sql
|
||||
CREATE USER use_name PASS 'password' [SYSINFO {1|0}];
|
||||
```
|
||||
|
||||
创建用户。
|
||||
|
||||
use_name 最长为 23 字节。
|
||||
|
||||
password 最长为 31 字节,合法字符包括"a-zA-Z0-9!?$%^&*()_–+={[}]:;@~#|<,>.?/",不可以出现单双引号、撇号、反斜杠和空格,且不可以为空。
|
||||
|
||||
SYSINFO 表示用户是否可以查看系统信息。1 表示可以查看,0 表示不可以查看。系统信息包括服务端配置信息、服务端各种节点信息(如 DNODE、QNODE等)、存储相关的信息等。默认为可以查看系统信息。
|
||||
|
||||
例如,创建密码为123456且可以查看系统信息的用户test如下:
|
||||
|
||||
```sql
|
||||
taos> create user test pass '123456' sysinfo 1;
|
||||
Query OK, 0 of 0 rows affected (0.001254s)
|
||||
```
|
||||
|
||||
## 查看用户
|
||||
|
||||
```sql
|
||||
SHOW USERS;
|
||||
```
|
||||
|
||||
查看用户信息。
|
||||
|
||||
```sql
|
||||
taos> show users;
|
||||
name | super | enable | sysinfo | create_time |
|
||||
================================================================================
|
||||
test | 0 | 1 | 1 | 2022-08-29 15:10:27.315 |
|
||||
root | 1 | 1 | 1 | 2022-08-29 15:03:34.710 |
|
||||
Query OK, 2 rows in database (0.001657s)
|
||||
```
|
||||
|
||||
也可以通过查询INFORMATION_SCHEMA.INS_USERS系统表来查看用户信息,例如:
|
||||
|
||||
```sql
|
||||
taos> select * from information_schema.ins_users;
|
||||
name | super | enable | sysinfo | create_time |
|
||||
================================================================================
|
||||
test | 0 | 1 | 1 | 2022-08-29 15:10:27.315 |
|
||||
root | 1 | 1 | 1 | 2022-08-29 15:03:34.710 |
|
||||
Query OK, 2 rows in database (0.001953s)
|
||||
```
|
||||
|
||||
## 删除用户
|
||||
|
||||
```sql
|
||||
DROP USER user_name;
|
||||
```
|
||||
|
||||
## 修改用户信息
|
||||
|
||||
```sql
|
||||
ALTER USER user_name alter_user_clause
|
||||
|
||||
alter_user_clause: {
|
||||
PASS 'literal'
|
||||
| ENABLE value
|
||||
| SYSINFO value
|
||||
}
|
||||
```
|
||||
|
||||
- PASS:修改用户密码。
|
||||
- ENABLE:修改用户是否启用。1 表示启用此用户,0 表示禁用此用户。
|
||||
- SYSINFO:修改用户是否可查看系统信息。1 表示可以查看系统信息,0 表示不可以查看系统信息。
|
||||
|
||||
例如,禁用 test 用户:
|
||||
|
||||
```sql
|
||||
taos> alter user test enable 0;
|
||||
Query OK, 0 of 0 rows affected (0.001160s)
|
||||
```
|
||||
|
||||
## 授权
|
||||
|
||||
```sql
|
||||
GRANT privileges ON priv_level TO user_name
|
||||
|
||||
privileges : {
|
||||
ALL
|
||||
| priv_type [, priv_type] ...
|
||||
}
|
||||
|
||||
priv_type : {
|
||||
READ
|
||||
| WRITE
|
||||
}
|
||||
|
||||
priv_level : {
|
||||
dbname.*
|
||||
| *.*
|
||||
}
|
||||
```
|
||||
|
||||
对用户授权。授权功能只包含在企业版中。
|
||||
|
||||
授权级别支持到DATABASE,权限有READ和WRITE两种。
|
||||
|
||||
TDengine 有超级用户和普通用户两类用户。超级用户缺省创建为root,拥有所有权限。使用超级用户创建出来的用户为普通用户。在未授权的情况下,普通用户可以创建DATABASE,并拥有自己创建的DATABASE的所有权限,包括删除数据库、修改数据库、查询时序数据和写入时序数据。超级用户可以给普通用户授予其他DATABASE的读写权限,使其可以在此DATABASE上读写数据,但不能对其进行删除和修改数据库的操作。
|
||||
|
||||
对于非DATABASE的对象,如USER、DNODE、UDF、QNODE等,普通用户只有读权限(一般为SHOW命令),不能创建和修改。
|
||||
|
||||
## 撤销授权
|
||||
|
||||
```sql
|
||||
REVOKE privileges ON priv_level FROM user_name
|
||||
|
||||
privileges : {
|
||||
ALL
|
||||
| priv_type [, priv_type] ...
|
||||
}
|
||||
|
||||
priv_type : {
|
||||
READ
|
||||
| WRITE
|
||||
}
|
||||
|
||||
priv_level : {
|
||||
dbname.*
|
||||
| *.*
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
收回对用户的授权。授权功能只包含在企业版中。
|
|
@ -20,6 +20,9 @@ index_option:
|
|||
functions:
|
||||
function [, function] ...
|
||||
```
|
||||
### tag 索引
|
||||
|
||||
[tag 索引](../tag-index)
|
||||
|
||||
### SMA 索引
|
||||
|
|
@ -17,7 +17,7 @@ conn_id 可以通过 `SHOW CONNECTIONS` 获取。
|
|||
## 终止查询
|
||||
|
||||
```sql
|
||||
KILL QUERY kill_id;
|
||||
KILL QUERY 'kill_id';
|
||||
```
|
||||
|
||||
kill_id 可以通过 `SHOW QUERIES` 获取。
|
||||
|
|
|
@ -180,7 +180,7 @@ AllowWebSockets
|
|||
node_export 是一个机器指标的导出器。请访问 [https://github.com/prometheus/node_exporter](https://github.com/prometheus/node_exporter) 了解更多信息。
|
||||
- 支持 Prometheus remote_read 和 remote_write
|
||||
remote_read 和 remote_write 是 Prometheus 数据读写分离的集群方案。请访问[https://prometheus.io/blog/2019/10/10/remote-read-meets-streaming/#remote-apis](https://prometheus.io/blog/2019/10/10/remote-read-meets-streaming/#remote-apis) 了解更多信息。
|
||||
- 获取 table 所在的虚拟节点组(VGroup)的 VGroup ID。关于虚拟节点组(VGroup)的更多信息,请访问[整体架构文档](/tdinternal/arch/#主要逻辑单元) 。
|
||||
- 获取 table 所在的虚拟节点组(VGroup)的 VGroup ID。
|
||||
|
||||
## 接口
|
||||
|
||||
|
@ -245,7 +245,7 @@ Prometheus 使用的由 \*NIX 内核暴露的硬件和操作系统指标的输
|
|||
|
||||
### 获取 table 的 VGroup ID
|
||||
|
||||
可以访问 http 接口 `http://<fqdn>:6041/rest/vgid?db=<db>&table=<table>` 获取 table 的 VGroup ID。关于虚拟节点组(VGroup)的更多信息,请访问[整体架构文档](/tdinternal/arch/#主要逻辑单元) 。
|
||||
可以访问 http 接口 `http://<fqdn>:6041/rest/vgid?db=<db>&table=<table>` 获取 table 的 VGroup ID。
|
||||
|
||||
## 内存使用优化方法
|
||||
|
||||
|
|
|
@ -11,11 +11,7 @@ taosBenchmark (曾用名 taosdemo ) 是一个用于测试 TDengine 产品性能
|
|||
|
||||
## 安装
|
||||
|
||||
taosBenchmark 有两种安装方式:
|
||||
|
||||
- 安装 TDengine 官方安装包的同时会自动安装 taosBenchmark, 详情请参考[ TDengine 安装](/operation/pkg-install)。
|
||||
|
||||
- 单独编译 taos-tools 并安装, 详情请参考 [taos-tools](https://github.com/taosdata/taos-tools) 仓库。
|
||||
- 安装 TDengine 官方安装包的同时会自动安装 taosBenchmark
|
||||
|
||||
## 运行
|
||||
|
||||
|
|
|
@ -105,6 +105,8 @@ Usage: taosdump [OPTION...] dbname [tbname ...]
|
|||
-L, --loose-mode Using loose mode if the table name and column name
|
||||
use letter and number only. Default is NOT.
|
||||
-n, --no-escape No escape char '`'. Default is using it.
|
||||
-Q, --dot-replace Repalce dot character with underline character in
|
||||
the table name.
|
||||
-T, --thread-num=THREAD_NUM Number of thread for dump in file. Default is
|
||||
8.
|
||||
-C, --cloud=CLOUD_DSN specify a DSN to access TDengine cloud service
|
||||
|
|
|
@ -15,7 +15,7 @@ TDengine 通过 [taosKeeper](../taosKeeper) 将服务器的 CPU、内存、硬
|
|||
|
||||
- 单节点的 TDengine 服务器或多节点的 [TDengine] 集群,以及一个[Grafana]服务器。此仪表盘需要 TDengine 3.0.0.0 及以上,并开启监控服务,具体配置请参考:[TDengine 监控配置](../config/#监控相关)。
|
||||
- taosAdapter 已经安装并正常运行。具体细节请参考:[taosAdapter 使用手册](../taosadapter)
|
||||
- taosKeeper 已安装并正常运行。具体细节请参考:[taosKeeper 使用手册](../taosKeeper)
|
||||
- taosKeeper 已安装并正常运行。注意需要 taos.cfg 文件中打开 monitor 相关配置项,具体细节请参考:[taosKeeper 使用手册](../taosKeeper)
|
||||
|
||||
记录以下信息:
|
||||
|
||||
|
@ -120,7 +120,7 @@ chmod +x TDinsight.sh
|
|||
./TDinsight.sh
|
||||
```
|
||||
|
||||
这个脚本会自动下载最新的[Grafana TDengine 数据源插件](https://github.com/taosdata/grafanaplugin/releases/latest) 和 [TDinsight 仪表盘](https://github.com/taosdata/grafanaplugin/blob/master/dashboards/TDinsightV3.json) ,将命令行选项中的可配置参数转为 [Grafana Provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) 配置文件,以进行自动化部署及更新等操作。利用该脚本提供的告警设置选项,你还可以获得内置的阿里云短信告警通知支持。
|
||||
这个脚本会自动下载最新的[Grafana TDengine 数据源插件](https://github.com/taosdata/grafanaplugin/releases/latest) 和 [TDinsight 仪表盘](https://github.com/taosdata/grafanaplugin/blob/master/dashboards/TDinsightV3.json) ,将命令行选项中的可配置参数转为 [Grafana Provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) 配置文件,以进行自动化部署及更新等操作。
|
||||
|
||||
假设您在同一台主机上使用 TDengine 和 Grafana 的默认服务。运行 `./TDinsight.sh` 并打开 Grafana 浏览器窗口就可以看到 TDinsight 仪表盘了。
|
||||
|
||||
|
@ -152,9 +152,6 @@ Install and configure TDinsight dashboard in Grafana on Ubuntu 18.04/20.04 syste
|
|||
-i, --tdinsight-uid <string> Replace with a non-space ASCII code as the dashboard id. [default: tdinsight]
|
||||
-t, --tdinsight-title <string> Dashboard title. [default: TDinsight]
|
||||
-e, --tdinsight-editable If the provisioning dashboard could be editable. [default: false]
|
||||
|
||||
-E, --external-notifier <string> Apply external notifier uid to TDinsight dashboard.
|
||||
|
||||
```
|
||||
|
||||
大多数命令行选项都可以通过环境变量获得同样的效果。
|
||||
|
@ -172,7 +169,10 @@ Install and configure TDinsight dashboard in Grafana on Ubuntu 18.04/20.04 syste
|
|||
| -i | --tdinsight-uid | TDINSIGHT_DASHBOARD_UID | TDinsight 仪表盘`uid`。 [默认值:tdinsight] |
|
||||
| -t | --tdinsight-title | TDINSIGHT_DASHBOARD_TITLE | TDinsight 仪表盘标题。 [默认:TDinsight] |
|
||||
| -e | --tdinsight-editable | TDINSIGHT_DASHBOARD_EDITABLE | 如果配置仪表盘可以编辑。 [默认值:false] |
|
||||
| -E | --external-notifier | EXTERNAL_NOTIFIER | 将外部通知程序 uid 应用于 TDinsight 仪表盘。 |
|
||||
|
||||
:::note
|
||||
新版本插件使用 Grafana unified alerting 功能,`-E` 选项不再支持。
|
||||
:::
|
||||
|
||||
假设您在主机 `tdengine` 上启动 TDengine 数据库,HTTP API 端口为 `6041`,用户为 `root1`,密码为 `pass5ord`。执行脚本:
|
||||
|
||||
|
@ -180,18 +180,6 @@ Install and configure TDinsight dashboard in Grafana on Ubuntu 18.04/20.04 syste
|
|||
./TDinsight.sh -a http://tdengine:6041 -u root1 -p pass5ord
|
||||
```
|
||||
|
||||
我们提供了一个“-E”选项,用于从命令行配置 TDinsight 使用现有的通知通道(Notification Channel)。假设你的 Grafana 用户和密码是 `admin:admin`,使用以下命令获取已有的通知通道的`uid`:
|
||||
|
||||
```bash
|
||||
curl --no-progress-meter -u admin:admin http://localhost:3000/api/alert-notifications | jq
|
||||
```
|
||||
|
||||
使用上面获取的 `uid` 值作为 `-E` 输入。
|
||||
|
||||
```bash
|
||||
./TDinsight.sh -a http://tdengine:6041 -u root1 -p pass5ord -E existing-notifier
|
||||
```
|
||||
|
||||
如果要监控多个 TDengine 集群,则需要设置多个 TDinsight 仪表盘。设置非默认 TDinsight 需要进行一些更改: `-n` `-i` `-t` 选项需要更改为非默认名称,如果使用 内置短信告警功能,`-N` 和 `-L` 也应该改变。
|
||||
|
||||
```bash
|
||||
|
|
|
@ -33,8 +33,10 @@ tag_set 中的所有的数据自动转化为 nchar 数据类型,并不需要
|
|||
|
||||
在无模式写入数据行协议中,field_set 中的每个数据项都需要对自身的数据类型进行描述。具体来说:
|
||||
|
||||
- 如果两边有英文双引号,表示 BINARY(32) 类型。例如 `"abc"`。
|
||||
- 如果两边有英文双引号而且带有 L 前缀,表示 NCHAR(32) 类型。例如 `L"报错信息"`。
|
||||
- 如果两边有英文双引号,表示 VARCHAR 类型。例如 `"abc"`。
|
||||
- 如果两边有英文双引号而且带有 L或l 前缀,表示 NCHAR 类型。例如 `L"报错信息"`。
|
||||
- 如果两边有英文双引号而且带有 G或g 前缀,表示 GEOMETRY 类型。例如 `G"Point(4.343 89.342)"`。
|
||||
- 如果两边有英文双引号而且带有 B或b 前缀,表示 VARBINARY 类型,双引号内可以为\x开头的16进制或者字符串。例如 `B"\x98f46e"` `B"hello"`。
|
||||
- 对空格、等号(=)、逗号(,)、双引号(")、反斜杠(\),前面需要使用反斜杠(\)进行转义。(都指的是英文半角符号)。具体转义规则如下:
|
||||
|
||||
| **序号** | **域** | **需转义字符** |
|
||||
|
@ -106,6 +108,7 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000
|
|||
8. 为了提高写入的效率,默认假设同一个超级表中 field_set 的顺序是一样的(第一条数据包含所有的 field,后面的数据按照这个顺序),如果顺序不一样,需要配置参数 smlDataFormat 为 false,否则,数据写入按照相同顺序写入,库中数据会异常,从3.0.3.0开始,自动检测顺序是否一致,该配置废弃。
|
||||
9. 由于sql建表表名不支持点号(.),所以schemaless也对点号(.)做了处理,如果schemaless自动建表的表名如果有点号(.),会自动替换为下划线(\_)。如果手动指定子表名的话,子表名里有点号(.),同样转化为下划线(\_)。
|
||||
10. taos.cfg 增加 smlTsDefaultName 配置(值为字符串),只在client端起作用,配置后,schemaless自动建表的时间列名字可以通过该配置设置。不配置的话,默认为 _ts
|
||||
11. 无模式写入的数据超级表或子表名区分大小写
|
||||
|
||||
:::tip
|
||||
无模式所有的处理逻辑,仍会遵循 TDengine 对数据结构的底层限制,例如每行数据的总长度不能超过
|
||||
|
|
|
@ -13,12 +13,7 @@ taosKeeper 是 TDengine 3.0 版本监控指标的导出工具,通过简单的
|
|||
|
||||
## 安装
|
||||
|
||||
taosKeeper 有两种安装方式:
|
||||
taosKeeper 安装方式:
|
||||
|
||||
- 安装 TDengine 官方安装包的同时会自动安装 taosKeeper, 详情请参考[ TDengine 安装](/operation/pkg-install)。
|
||||
|
||||
- 单独编译 taosKeeper 并安装,详情请参考 [taosKeeper](https://github.com/taosdata/taoskeeper) 仓库。
|
||||
- 安装 TDengine 官方安装包的同时会自动安装 taosKeeper
|
||||
|
||||
## 配置和运行方式
|
||||
|
||||
|
|
|
@ -1,207 +0,0 @@
|
|||
---
|
||||
title: 安装和卸载
|
||||
description: 安装、卸载、启动、停止和升级
|
||||
---
|
||||
|
||||
import Tabs from "@theme/Tabs";
|
||||
import TabItem from "@theme/TabItem";
|
||||
|
||||
本节将介绍一些关于安装和卸载更深层次的内容,以及升级的注意事项。
|
||||
|
||||
## 安装
|
||||
|
||||
关于安装,请参考 [使用安装包立即开始](../../get-started/package)
|
||||
|
||||
|
||||
|
||||
## 安装目录说明
|
||||
|
||||
TDengine 成功安装后,主安装目录是 /usr/local/taos,目录内容如下:
|
||||
|
||||
```
|
||||
$ cd /usr/local/taos
|
||||
$ ll
|
||||
$ ll
|
||||
total 28
|
||||
drwxr-xr-x 7 root root 4096 Feb 22 09:34 ./
|
||||
drwxr-xr-x 12 root root 4096 Feb 22 09:34 ../
|
||||
drwxr-xr-x 2 root root 4096 Feb 22 09:34 bin/
|
||||
drwxr-xr-x 2 root root 4096 Feb 22 09:34 cfg/
|
||||
lrwxrwxrwx 1 root root 13 Feb 22 09:34 data -> /var/lib/taos/
|
||||
drwxr-xr-x 2 root root 4096 Feb 22 09:34 driver/
|
||||
drwxr-xr-x 10 root root 4096 Feb 22 09:34 examples/
|
||||
drwxr-xr-x 2 root root 4096 Feb 22 09:34 include/
|
||||
lrwxrwxrwx 1 root root 13 Feb 22 09:34 log -> /var/log/taos/
|
||||
```
|
||||
|
||||
- 自动生成配置文件目录、数据库目录、日志目录。
|
||||
- 配置文件缺省目录:/etc/taos/taos.cfg, 软链接到 /usr/local/taos/cfg/taos.cfg;
|
||||
- 数据库缺省目录:/var/lib/taos, 软链接到 /usr/local/taos/data;
|
||||
- 日志缺省目录:/var/log/taos, 软链接到 /usr/local/taos/log;
|
||||
- /usr/local/taos/bin 目录下的可执行文件,会软链接到 /usr/bin 目录下;
|
||||
- /usr/local/taos/driver 目录下的动态库文件,会软链接到 /usr/lib 目录下;
|
||||
- /usr/local/taos/include 目录下的头文件,会软链接到到 /usr/include 目录下;
|
||||
|
||||
## 卸载
|
||||
|
||||
<Tabs>
|
||||
<TabItem label="apt-get 卸载" value="aptremove">
|
||||
|
||||
TDengine 卸载命令如下:
|
||||
|
||||
```
|
||||
$ sudo apt-get remove tdengine
|
||||
Reading package lists... Done
|
||||
Building dependency tree
|
||||
Reading state information... Done
|
||||
The following packages will be REMOVED:
|
||||
tdengine
|
||||
0 upgraded, 0 newly installed, 1 to remove and 18 not upgraded.
|
||||
After this operation, 68.3 MB disk space will be freed.
|
||||
Do you want to continue? [Y/n] y
|
||||
(Reading database ... 135625 files and directories currently installed.)
|
||||
Removing tdengine (3.0.0.0) ...
|
||||
TDengine is removed successfully!
|
||||
|
||||
```
|
||||
|
||||
taosTools 卸载命令如下:
|
||||
|
||||
```
|
||||
$ sudo apt remove taostools
|
||||
Reading package lists... Done
|
||||
Building dependency tree
|
||||
Reading state information... Done
|
||||
The following packages will be REMOVED:
|
||||
taostools
|
||||
0 upgraded, 0 newly installed, 1 to remove and 0 not upgraded.
|
||||
After this operation, 68.3 MB disk space will be freed.
|
||||
Do you want to continue? [Y/n]
|
||||
(Reading database ... 147973 files and directories currently installed.)
|
||||
Removing taostools (2.1.2) ...
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
<TabItem label="Deb 卸载" value="debuninst">
|
||||
|
||||
TDengine 卸载命令如下:
|
||||
|
||||
```
|
||||
$ sudo dpkg -r tdengine
|
||||
(Reading database ... 120119 files and directories currently installed.)
|
||||
Removing tdengine (3.0.0.0) ...
|
||||
TDengine is removed successfully!
|
||||
|
||||
```
|
||||
|
||||
taosTools 卸载命令如下:
|
||||
|
||||
```
|
||||
$ sudo dpkg -r taostools
|
||||
(Reading database ... 147973 files and directories currently installed.)
|
||||
Removing taostools (2.1.2) ...
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
||||
<TabItem label="RPM 卸载" value="rpmuninst">
|
||||
|
||||
卸载 TDengine 命令如下:
|
||||
|
||||
```
|
||||
$ sudo rpm -e tdengine
|
||||
TDengine is removed successfully!
|
||||
```
|
||||
|
||||
卸载 taosTools 命令如下:
|
||||
|
||||
```
|
||||
sudo rpm -e taostools
|
||||
taosToole is removed successfully!
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
||||
<TabItem label="tar.gz 卸载" value="taruninst">
|
||||
|
||||
卸载 TDengine 命令如下:
|
||||
|
||||
```
|
||||
$ rmtaos
|
||||
TDengine is removed successfully!
|
||||
```
|
||||
|
||||
卸载 taosTools 命令如下:
|
||||
|
||||
```
|
||||
$ rmtaostools
|
||||
Start to uninstall taos tools ...
|
||||
|
||||
taos tools is uninstalled successfully!
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
||||
<TabItem label="Windows 卸载" value="windows">
|
||||
在 C:\TDengine 目录下,通过运行 unins000.exe 卸载程序来卸载 TDengine。
|
||||
</TabItem>
|
||||
|
||||
<TabItem label="Mac 卸载" value="mac">
|
||||
|
||||
卸载 TDengine 命令如下:
|
||||
|
||||
```
|
||||
$ rmtaos
|
||||
TDengine is removed successfully!
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
:::info
|
||||
|
||||
- TDengine 提供了多种安装包,但最好不要在一个系统上同时使用 tar.gz 安装包和 deb 或 rpm 安装包。否则会相互影响,导致在使用时出现问题。
|
||||
|
||||
- 对于 deb 包安装后,如果安装目录被手工误删了部分,出现卸载、或重新安装不能成功。此时,需要清除 TDengine 包的安装信息,执行如下命令:
|
||||
|
||||
```
|
||||
$ sudo rm -f /var/lib/dpkg/info/tdengine*
|
||||
```
|
||||
|
||||
然后再重新进行安装就可以了。
|
||||
|
||||
- 对于 rpm 包安装后,如果安装目录被手工误删了部分,出现卸载、或重新安装不能成功。此时,需要清除 TDengine 包的安装信息,执行如下命令:
|
||||
|
||||
```
|
||||
$ sudo rpm -e --noscripts tdengine
|
||||
```
|
||||
|
||||
然后再重新进行安装就可以了。
|
||||
|
||||
:::
|
||||
|
||||
## 卸载和更新文件说明
|
||||
|
||||
卸载安装包的时候,将保留配置文件、数据库文件和日志文件,即 /etc/taos/taos.cfg 、 /var/lib/taos 、 /var/log/taos 。如果用户确认后不需保留,可以手工删除,但一定要慎重,因为删除后,数据将永久丢失,不可以恢复!
|
||||
|
||||
如果是更新安装,当缺省配置文件( /etc/taos/taos.cfg )存在时,仍然使用已有的配置文件,安装包中携带的配置文件修改为 taos.cfg.orig 保存在 /usr/local/taos/cfg/ 目录,可以作为设置配置参数的参考样例;如果不存在配置文件,就使用安装包中自带的配置文件。
|
||||
|
||||
## 升级
|
||||
升级分为两个层面:升级安装包 和 升级运行中的实例。
|
||||
|
||||
升级安装包请遵循前述安装和卸载的步骤先卸载旧版本再安装新版本。
|
||||
|
||||
升级运行中的实例则要复杂得多,首先请注意版本号,TDengine 的版本号目前分为四段,如 2.4.0.14 和 2.4.0.16,只有前三段版本号一致(即只有第四段版本号不同)才能把一个运行中的实例进行升级。升级步骤如下:
|
||||
- 停止数据写入
|
||||
- 确保所有数据落盘,即写入时序数据库
|
||||
- 停止 TDengine 集群
|
||||
- 卸载旧版本并安装新版本
|
||||
- 重新启动 TDengine 集群
|
||||
- 进行简单的查询操作确认旧数据没有丢失
|
||||
- 进行简单的写入操作确认 TDengine 集群可用
|
||||
- 重新恢复业务数据的写入
|
||||
|
||||
:::warning
|
||||
TDengine 不保证低版本能够兼容高版本的数据,所以任何时候都不推荐降级
|
||||
|
||||
:::
|
|
@ -0,0 +1,80 @@
|
|||
---
|
||||
title: 集群运维
|
||||
description: TDengine 提供了多种集群运维手段以使集群运行更健康更高效
|
||||
---
|
||||
|
||||
为了使集群运行更健康更高效,TDengine 企业版提供了一些运维手段来帮助系统管理员更好地运维集群。
|
||||
|
||||
## 数据重整
|
||||
|
||||
TDengine 面向多种写入场景,在有些写入场景下,TDengine 的存储会导致数据存储的放大或数据文件的空洞等。这一方面影响数据的存储效率,另一方面也会影响查询效率。为了解决上述问题,TDengine 企业版提供了对数据的重整功能,即 DATA COMPACT 功能,将存储的数据文件重新整理,删除文件空洞和无效数据,提高数据的组织度,从而提高存储和查询的效率。
|
||||
|
||||
**语法**
|
||||
|
||||
```sql
|
||||
COMPACT DATABASE db_name [start with 'XXXX'] [end with 'YYYY'];
|
||||
```
|
||||
|
||||
**效果**
|
||||
|
||||
- 扫描并压缩指定的 DB 中所有 VGROUP 中 VNODE 的所有数据文件
|
||||
- COMPCAT 会删除被删除数据以及被删除的表的数据
|
||||
- COMPACT 会合并多个 STT 文件
|
||||
- 可通过 start with 关键字指定 COMPACT 数据的起始时间
|
||||
- 可通过 end with 关键字指定 COMPACT 数据的终止时间
|
||||
|
||||
**补充说明**
|
||||
|
||||
- COMPACT 为异步,执行 COMPACT 命令后不会等 COMPACT 结束就会返回。如果上一个 COMPACT 没有完成则再发起一个 COMPACT 任务,则会等上一个任务完成后再返回。
|
||||
- COMPACT 可能阻塞写入,但不阻塞查询
|
||||
- COMPACT 的进度不可观测
|
||||
|
||||
## 集群负载再平衡
|
||||
|
||||
当多副本集群中的一个或多个节点因为升级或其它原因而重启后,有可能出现集群中各个 dnode 负载不均衡的现象,极端情况下会出现所有 vgroup 的 leader 都位于同一个 dnode 的情况。为了解决这个问题,可以使用下面的命令
|
||||
|
||||
```sql
|
||||
balance vgroup leader;
|
||||
```
|
||||
|
||||
**功能**
|
||||
|
||||
让所有的 vgroup 的 leade r在各自的replica节点上均匀分布。这个命令会让 vgroup 强制重新选举,通过重新选举,在选举的过程中,变换 vgroup 的leader,通过这个方式,最终让leader均匀分布。
|
||||
|
||||
**注意**
|
||||
|
||||
Raft选举本身带有随机性,所以通过选举的重新分布产生的均匀分布也是带有一定的概率,不会完全的均匀。**该命令的副作用是影响查询和写入**,在vgroup重新选举时,从开始选举到选举出新的 leader 这段时间,这 个vgroup 无法写入和查询。选举过程一般在秒级完成。所有的vgroup会依次逐个重新选举。
|
||||
|
||||
## 恢复数据节点
|
||||
|
||||
在多节点三副本的集群环境中,如果某个 dnode 的磁盘损坏,该 dnode 会自动退出,但集群中其它的 dnode 仍然能够继续提供写入和查询服务。
|
||||
|
||||
在更换了损坏的磁盘后,如果想要让曾经主动退出的 dnode 重新加入集群提供服务,可以通过 `restore dnode` 命令来恢复该数据节点上的部分或全部逻辑节点,该功能依赖多副本中的其它副本进行数据复制,所以只在集群中 dnode 数量大于等于 3 且副本数为 3 的情况下能够工作。
|
||||
|
||||
|
||||
```sql
|
||||
restore dnode <dnode_id>;# 恢复dnode上的mnode,所有vnode和qnode
|
||||
restore mnode on dnode <dnode_id>;# 恢复dnode上的mnode
|
||||
restore vnode on dnode <dnode_id> ;# 恢复dnode上的所有vnode
|
||||
restore qnode on dnode <dnode_id>;# 恢复dnode上的qnode
|
||||
```
|
||||
|
||||
**限制**
|
||||
- 该功能是基于已有的复制功能的恢复,不是灾难恢复或者备份恢复,所以对于要恢复的 mnode 和 vnode来说,使用该命令的前提是还存在该 mnode 或 vnode 的其它两个副本仍然能够正常工作。
|
||||
- 该命令不能修复数据目录中的个别文件的损坏或者丢失。例如,如果某个 mnode 或者 vnode 中的个别文件或数据损坏,无法单独恢复损坏的某个文件或者某块数据。此时,可以选择将该 mnode/vnode 的数据全部清空再进行恢复。
|
||||
|
||||
|
||||
## 虚拟组分裂 (Scale Out)
|
||||
|
||||
当一个 vgroup 因为子表数过多而导致 CPU 或 Disk 资源使用量负载过高时,增加 dnode 节点后,可通过 `split vgroup` 命令把该 vgroup 分裂为两个虚拟组。分裂完成后,新产生的两个 vgroup 承担原来由一个 vgroup 提供的读写服务。这也是 TDengine 为企业版用户提供的 scale out 集群的能力。
|
||||
|
||||
```sql
|
||||
split vgroup <vgroup_id>
|
||||
```
|
||||
|
||||
**注意**
|
||||
- 单副本库虚拟组,在分裂完成后,历史时序数据总磁盘空间使用量,可能会翻倍。所以,在执行该操作之前,通过增加 dnode 节点方式,确保集群中有足够的 CPU 和磁盘资源,避免资源不足现象发生。
|
||||
- 该命令为 DB 级事务;执行过程,当前DB的其它管理事务将会被拒绝。集群中,其它DB不受影响。
|
||||
- 分裂任务执行过程中,可持续提供读写服务;期间,可能存在可感知的短暂的读写业务中断。
|
||||
- 在分裂过程中,不支持流和订阅。分裂结束后,历史 WAL 会清空。
|
||||
- 分裂过程中,可支持节点宕机重启容错;但不支持节点磁盘故障容错。
|
|
@ -0,0 +1,56 @@
|
|||
---
|
||||
title: 多级存储
|
||||
---
|
||||
|
||||
## 多级存储
|
||||
|
||||
说明:多级存储功能仅企业版支持。
|
||||
|
||||
在默认配置下,TDengine 会将所有数据保存在 /var/lib/taos 目录下,而且每个 vnode 的数据文件保存在该目录下的不同目录。为扩大存储空间,尽量减少文件读取的瓶颈,提高数据吞吐率 TDengine 可通过配置系统参数 dataDir 让多个挂载的硬盘被系统同时使用。
|
||||
|
||||
除此之外,TDengine 也提供了数据分级存储的功能,将不同时间段的数据存储在挂载的不同介质上的目录里,从而实现不同“热度”的数据存储在不同的存储介质上,充分利用存储,节约成本。比如,最新采集的数据需要经常访问,对硬盘的读取性能要求高,那么用户可以配置将这些数据存储在 SSD 盘上。超过一定期限的数据,查询需求量没有那么高,那么可以存储在相对便宜的 HDD 盘上。
|
||||
|
||||
多级存储支持 3 级,每级最多可配置 16 个挂载点。
|
||||
|
||||
TDengine 多级存储配置方式如下(在配置文件/etc/taos/taos.cfg 中):
|
||||
|
||||
```
|
||||
dataDir [path] <level> <primary>
|
||||
```
|
||||
|
||||
- path: 挂载点的文件夹路径
|
||||
- level: 介质存储等级,取值为 0,1,2。
|
||||
0 级存储最新的数据,1 级存储次新的数据,2 级存储最老的数据,省略默认为 0。
|
||||
各级存储之间的数据流向:0 级存储 -> 1 级存储 -> 2 级存储。
|
||||
同一存储等级可挂载多个硬盘,同一存储等级上的数据文件分布在该存储等级的所有硬盘上。
|
||||
需要说明的是,数据在不同级别的存储介质上的移动,是由系统自动完成的,用户无需干预。
|
||||
- primary: 是否为主挂载点,0(否)或 1(是),省略默认为 1。
|
||||
|
||||
在配置中,只允许一个主挂载点的存在(level=0,primary=1),例如采用如下的配置方式:
|
||||
|
||||
```
|
||||
dataDir /mnt/data1 0 1
|
||||
dataDir /mnt/data2 0 0
|
||||
dataDir /mnt/data3 1 0
|
||||
dataDir /mnt/data4 1 0
|
||||
dataDir /mnt/data5 2 0
|
||||
dataDir /mnt/data6 2 0
|
||||
```
|
||||
|
||||
:::note
|
||||
|
||||
1. 多级存储不允许跨级配置,合法的配置方案有:仅 0 级,仅 0 级+ 1 级,以及 0 级+ 1 级+ 2 级。而不允许只配置 level=0 和 level=2,而不配置 level=1。
|
||||
2. 禁止手动移除使用中的挂载盘,挂载盘目前不支持非本地的网络盘。
|
||||
3. 多级存储目前不支持删除已经挂载的硬盘的功能。
|
||||
|
||||
:::
|
||||
|
||||
## 0 级负载均衡
|
||||
|
||||
在多级存储中,有且只有一个主挂载点,主挂载点承担了系统中最重要的元数据在座,同时各个 vnode 的主目录均存在于当前 dnode 主挂载点上,从而导致该 dnode 的写入性能受限于单个磁盘的 IO 吞吐能力。
|
||||
|
||||
从 TDengine 3.1.0.0 开始,如果一个 dnode 配置了多个 0 级挂载点,我们将该 dnode 上所有 vnode 的主目录均衡分布在所有的 0 级挂载点上,由这些 0 级挂载点共同承担写入负荷。在网络 I/O 及其它处理资源不成为瓶颈的情况下,通过优化集群配置,测试结果证明整个系统的写入能力和 0 级挂载点的数量呈现线性关系,即随着 0 级挂载点数量的增加,整个系统的写入能力也成倍增加。
|
||||
|
||||
## 同级挂载点选择策略
|
||||
|
||||
一般情况下,当 TDengine 要从同级挂载点中选择一个用于生成新的数据文件时,采用 round robin 策略进行选择。但现实中有可能每个磁盘的容量不相同,或者容量相同但写入的数据量不相同,这就导致会出现每个磁盘上的可用空间不均衡,在实际进行选择时有可能会选择到一个剩余空间已经很小的磁盘。为了解决这个问题,从 3.1.1.0 开始引入了一个新的配置 `minDiskFreeSize`,当某块磁盘上的可用空间小于等于这个阈值时,该磁盘将不再被选择用于生成新的数据文件。该配置项的单位为字节,其值应该大于 2GB,即会跳过可用空间小于 2GB 的挂载点。
|
|
@ -218,11 +218,11 @@ docker run -d \
|
|||
|
||||
### 导入 Dashboard
|
||||
|
||||
在数据源配置页面,您可以为该数据源导入 TDinsight 面板,作为 TDengine 集群的监控可视化工具。如果 TDengine 服务端为 3.0 版本请选择 `TDinsight for 3.x` 导入。
|
||||
在数据源配置页面,您可以为该数据源导入 TDinsight 面板,作为 TDengine 集群的监控可视化工具。如果 TDengine 服务端为 3.0 版本请选择 `TDinsight for 3.x` 导入。注意 TDinsight for 3.x 需要运行和配置 taoskeeper,相关使用说明请见 [TDinsight 用户手册](/reference/tdinsight/)。
|
||||
|
||||

|
||||
|
||||
其中适配 TDengine 2.* 的 Dashboard 已发布在 Grafana:[Dashboard 15167 - TDinsight](https://grafana.com/grafana/dashboards/15167)) 。其他安装方式和相关使用说明请见 [TDinsight 用户手册](/reference/tdinsight/)。
|
||||
其中适配 TDengine 2.* 的 Dashboard 已发布在 Grafana:[Dashboard 15167 - TDinsight](https://grafana.com/grafana/dashboards/15167)) 。
|
||||
|
||||
使用 TDengine 作为数据源的其他面板,可以[在此搜索](https://grafana.com/grafana/dashboards/?dataSource=tdengine-datasource)。以下是一份不完全列表:
|
||||
|
||||
|
|
|
@ -23,7 +23,7 @@ TDengine Source Connector 用于把数据实时地从 TDengine 读出来发送
|
|||
1. Linux 操作系统
|
||||
2. 已安装 Java 8 和 Maven
|
||||
3. 已安装 Git、curl、vi
|
||||
4. 已安装并启动 TDengine。如果还没有可参考[安装和卸载](/operation/pkg-install)
|
||||
4. 已安装并启动 TDengine。
|
||||
|
||||
## 安装 Kafka
|
||||
|
||||
|
|
|
@ -0,0 +1,437 @@
|
|||
---
|
||||
sidebar_label: Seeq
|
||||
title: Seeq
|
||||
description: 如何使用 Seeq 和 TDengine 进行时序数据分析
|
||||
---
|
||||
|
||||
# 如何使用 Seeq 和 TDengine 进行时序数据分析
|
||||
|
||||
## 方案介绍
|
||||
|
||||
Seeq 是制造业和工业互联网(IIOT)高级分析软件。Seeq 支持在工艺制造组织中使用机器学习创新的新功能。这些功能使组织能够将自己或第三方机器学习算法部署到前线流程工程师和主题专家使用的高级分析应用程序,从而使单个数据科学家的努力扩展到许多前线员工。
|
||||
|
||||
通过 TDengine Java connector, Seeq 可以轻松支持查询 TDengine 提供的时序数据,并提供数据展现、分析、预测等功能。
|
||||
|
||||
### Seeq 安装方法
|
||||
|
||||
从 (Seeq 官网)[https://www.seeq.com/customer-download]下载相关软件,例如 Seeq Server 和 Seeq Data Lab 等。
|
||||
|
||||
### Seeq Server 安装和启动
|
||||
|
||||
```
|
||||
tar xvzf seeq-server-xxx.tar.gz
|
||||
cd seeq-server-installer
|
||||
sudo ./install
|
||||
|
||||
sudo seeq service enable
|
||||
sudo seeq start
|
||||
```
|
||||
|
||||
### Seeq Data Lab Server 安装和启动
|
||||
|
||||
Seeq Data Lab 需要安装在和 Seeq Server 不同的服务器上,并通过配置和 Seeq Server 互联。详细安装配置指令参见(Seeq 官方文档)[https://support.seeq.com/space/KB/1034059842]。
|
||||
|
||||
```
|
||||
tar xvf seeq-data-lab-<version>-64bit-linux.tar.gz
|
||||
sudo seeq-data-lab-installer/install -f /opt/seeq/seeq-data-lab -g /var/opt/seeq -u seeq
|
||||
sudo seeq config set Network/DataLab/Hostname localhost
|
||||
sudo seeq config set Network/DataLab/Port 34231 # the port of the Data Lab server (usually 34231)
|
||||
sudo seeq config set Network/Hostname <value> # the host IP or URL of the main Seeq Server
|
||||
|
||||
# If the main Seeq server is configured to listen over HTTPS
|
||||
sudo seeq config set Network/Webserver/SecurePort 443 # the secure port of the main Seeq Server (usually 443)
|
||||
|
||||
# If the main Seeq server is NOT configured to listen over HTTPS
|
||||
sudo seeq config set Network/Webserver/Port <value>
|
||||
|
||||
#On the main Seeq server, open a Seeq Command Prompt and set the hostname of the Data Lab server:
|
||||
sudo seeq config set Network/DataLab/Hostname <value> # the host IP (not URL) of the Data Lab server
|
||||
sudo seeq config set Network/DataLab/Port 34231 # the port of the Data Lab server (usually 34231
|
||||
```
|
||||
|
||||
## TDengine 本地实例安装方法
|
||||
|
||||
请参考(官网文档)[https://docs.taosdata.com/get-started/package/]。
|
||||
|
||||
## TDengine Cloud 访问方法
|
||||
如果使用 Seeq 连接 TDengine Cloud,请在 https://cloud.taosdata.com 申请帐号并登录查看如何访问 TDengine Cloud。
|
||||
|
||||
## 如何配置 Seeq 访问 TDengine
|
||||
|
||||
1. 查看 data 存储位置
|
||||
|
||||
```
|
||||
sudo seeq config get Folders/Data
|
||||
```
|
||||
|
||||
2. 从 maven.org 下载 TDengine Java connector 包,目前最新版本为(3.2.4)[https://repo1.maven.org/maven2/com/taosdata/jdbc/taos-jdbcdriver/3.2.4/taos-jdbcdriver-3.2.4-dist.jar],并拷贝至 data 存储位置的 plugins\lib 中。
|
||||
|
||||
3. 重新启动 seeq server
|
||||
|
||||
```
|
||||
sudo seeq restart
|
||||
```
|
||||
|
||||
4. 输入 License
|
||||
|
||||
使用浏览器访问 ip:34216 并按照说明输入 license。
|
||||
|
||||
## 使用 Seeq 分析 TDengine 时序数据
|
||||
|
||||
本章节演示如何使用 Seeq 软件配合 TDengine 进行时序数据分析。
|
||||
|
||||
### 场景介绍
|
||||
|
||||
示例场景为一个电力系统,用户每天从电站仪表收集用电量数据,并将其存储在 TDengine 集群中。现在用户想要预测电力消耗将会如何发展,并购买更多设备来支持它。用户电力消耗随着每月订单变化而不同,另外考虑到季节变化,电力消耗量会有所不同。这个城市位于北半球,所以在夏天会使用更多的电力。我们模拟数据来反映这些假定。
|
||||
|
||||
### 数据 Schema
|
||||
|
||||
```
|
||||
CREATE STABLE meters (ts TIMESTAMP, num INT, temperature FLOAT, goods INT) TAGS (device NCHAR(20));
|
||||
CREATE TABLE goods (ts1 TIMESTAMP, ts2 TIMESTAMP, goods FLOAT);
|
||||
```
|
||||
|
||||
!(Seeq demo schema)[./seeq/seeq-demo-schema.webp]
|
||||
|
||||
### 构造数据方法
|
||||
|
||||
```
|
||||
python mockdata.py
|
||||
taos -s "insert into power.goods select _wstart, _wstart + 10d, avg(goods) from power.meters interval(10d);"
|
||||
```
|
||||
源代码托管在(github 仓库)[https://github.com/sangshuduo/td-forecasting]。
|
||||
|
||||
### 使用 Seeq 进行数据分析
|
||||
|
||||
#### 配置数据源(Data Source)
|
||||
|
||||
使用 Seeq 管理员角色的帐号登录,并新建数据源。
|
||||
|
||||
- Power
|
||||
|
||||
```
|
||||
{
|
||||
"QueryDefinitions": [
|
||||
{
|
||||
"Name": "PowerNum",
|
||||
"Type": "SIGNAL",
|
||||
"Sql": "SELECT ts, num FROM meters",
|
||||
"Enabled": true,
|
||||
"TestMode": false,
|
||||
"TestQueriesDuringSync": true,
|
||||
"InProgressCapsulesEnabled": false,
|
||||
"Variables": null,
|
||||
"Properties": [
|
||||
{
|
||||
"Name": "Name",
|
||||
"Value": "Num",
|
||||
"Sql": null,
|
||||
"Uom": "string"
|
||||
},
|
||||
{
|
||||
"Name": "Interpolation Method",
|
||||
"Value": "linear",
|
||||
"Sql": null,
|
||||
"Uom": "string"
|
||||
},
|
||||
{
|
||||
"Name": "Maximum Interpolation",
|
||||
"Value": "2day",
|
||||
"Sql": null,
|
||||
"Uom": "string"
|
||||
}
|
||||
],
|
||||
"CapsuleProperties": null
|
||||
}
|
||||
],
|
||||
"Type": "GENERIC",
|
||||
"Hostname": null,
|
||||
"Port": 0,
|
||||
"DatabaseName": null,
|
||||
"Username": "root",
|
||||
"Password": "taosdata",
|
||||
"InitialSql": null,
|
||||
"TimeZone": null,
|
||||
"PrintRows": false,
|
||||
"UseWindowsAuth": false,
|
||||
"SqlFetchBatchSize": 100000,
|
||||
"UseSSL": false,
|
||||
"JdbcProperties": null,
|
||||
"GenericDatabaseConfig": {
|
||||
"DatabaseJdbcUrl": "jdbc:TAOS-RS://127.0.0.1:6041/power?user=root&password=taosdata",
|
||||
"SqlDriverClassName": "com.taosdata.jdbc.rs.RestfulDriver",
|
||||
"ResolutionInNanoseconds": 1000,
|
||||
"ZonedColumnTypes": []
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
- Goods
|
||||
|
||||
```
|
||||
{
|
||||
"QueryDefinitions": [
|
||||
{
|
||||
"Name": "PowerGoods",
|
||||
"Type": "CONDITION",
|
||||
"Sql": "SELECT ts1, ts2, goods FROM power.goods",
|
||||
"Enabled": true,
|
||||
"TestMode": false,
|
||||
"TestQueriesDuringSync": true,
|
||||
"InProgressCapsulesEnabled": false,
|
||||
"Variables": null,
|
||||
"Properties": [
|
||||
{
|
||||
"Name": "Name",
|
||||
"Value": "Goods",
|
||||
"Sql": null,
|
||||
"Uom": "string"
|
||||
},
|
||||
{
|
||||
"Name": "Maximum Duration",
|
||||
"Value": "10days",
|
||||
"Sql": null,
|
||||
"Uom": "string"
|
||||
}
|
||||
],
|
||||
"CapsuleProperties": [
|
||||
{
|
||||
"Name": "goods",
|
||||
"Value": "${columnResult}",
|
||||
"Column": "goods",
|
||||
"Uom": "string"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"Type": "GENERIC",
|
||||
"Hostname": null,
|
||||
"Port": 0,
|
||||
"DatabaseName": null,
|
||||
"Username": "root",
|
||||
"Password": "taosdata",
|
||||
"InitialSql": null,
|
||||
"TimeZone": null,
|
||||
"PrintRows": false,
|
||||
"UseWindowsAuth": false,
|
||||
"SqlFetchBatchSize": 100000,
|
||||
"UseSSL": false,
|
||||
"JdbcProperties": null,
|
||||
"GenericDatabaseConfig": {
|
||||
"DatabaseJdbcUrl": "jdbc:TAOS-RS://127.0.0.1:6041/power?user=root&password=taosdata",
|
||||
"SqlDriverClassName": "com.taosdata.jdbc.rs.RestfulDriver",
|
||||
"ResolutionInNanoseconds": 1000,
|
||||
"ZonedColumnTypes": []
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
- Temperature
|
||||
|
||||
```
|
||||
{
|
||||
"QueryDefinitions": [
|
||||
{
|
||||
"Name": "PowerNum",
|
||||
"Type": "SIGNAL",
|
||||
"Sql": "SELECT ts, temperature FROM meters",
|
||||
"Enabled": true,
|
||||
"TestMode": false,
|
||||
"TestQueriesDuringSync": true,
|
||||
"InProgressCapsulesEnabled": false,
|
||||
"Variables": null,
|
||||
"Properties": [
|
||||
{
|
||||
"Name": "Name",
|
||||
"Value": "Temperature",
|
||||
"Sql": null,
|
||||
"Uom": "string"
|
||||
},
|
||||
{
|
||||
"Name": "Interpolation Method",
|
||||
"Value": "linear",
|
||||
"Sql": null,
|
||||
"Uom": "string"
|
||||
},
|
||||
{
|
||||
"Name": "Maximum Interpolation",
|
||||
"Value": "2day",
|
||||
"Sql": null,
|
||||
"Uom": "string"
|
||||
}
|
||||
],
|
||||
"CapsuleProperties": null
|
||||
}
|
||||
],
|
||||
"Type": "GENERIC",
|
||||
"Hostname": null,
|
||||
"Port": 0,
|
||||
"DatabaseName": null,
|
||||
"Username": "root",
|
||||
"Password": "taosdata",
|
||||
"InitialSql": null,
|
||||
"TimeZone": null,
|
||||
"PrintRows": false,
|
||||
"UseWindowsAuth": false,
|
||||
"SqlFetchBatchSize": 100000,
|
||||
"UseSSL": false,
|
||||
"JdbcProperties": null,
|
||||
"GenericDatabaseConfig": {
|
||||
"DatabaseJdbcUrl": "jdbc:TAOS-RS://127.0.0.1:6041/power?user=root&password=taosdata",
|
||||
"SqlDriverClassName": "com.taosdata.jdbc.rs.RestfulDriver",
|
||||
"ResolutionInNanoseconds": 1000,
|
||||
"ZonedColumnTypes": []
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### 使用 Seeq Workbench
|
||||
|
||||
登录 Seeq 服务页面并新建 Seeq Workbench,通过选择数据源搜索结果和根据需要选择不同的工具,可以进行数据展现或预测,详细使用方法参见(官方知识库)[https://support.seeq.com/space/KB/146440193/Seeq+Workbench]。
|
||||
|
||||
!(Seeq Workbench)[./seeq/seeq-demo-workbench.webp]
|
||||
|
||||
#### 用 Seeq Data Lab Server 进行进一步的数据分析
|
||||
|
||||
登录 Seeq 服务页面并新建 Seeq Data Lab,可以进一步使用 Python 编程或其他机器学习工具进行更复杂的数据挖掘功能。
|
||||
|
||||
```Python
|
||||
from seeq import spy
|
||||
spy.options.compatibility = 189
|
||||
import pandas as pd
|
||||
import matplotlib
|
||||
import matplotlib.pyplot as plt
|
||||
import mlforecast
|
||||
import lightgbm as lgb
|
||||
from mlforecast.target_transforms import Differences
|
||||
from sklearn.linear_model import LinearRegression
|
||||
|
||||
ds = spy.search({'ID': "8C91A9C7-B6C2-4E18-AAAF-XXXXXXXXX"})
|
||||
print(ds)
|
||||
|
||||
sig = ds.loc[ds['Name'].isin(['Num'])]
|
||||
print(sig)
|
||||
|
||||
data = spy.pull(sig, start='2015-01-01', end='2022-12-31', grid=None)
|
||||
print("data.info()")
|
||||
data.info()
|
||||
print(data)
|
||||
#data.plot()
|
||||
|
||||
print("data[Num].info()")
|
||||
data['Num'].info()
|
||||
da = data['Num'].index.tolist()
|
||||
#print(da)
|
||||
|
||||
li = data['Num'].tolist()
|
||||
#print(li)
|
||||
|
||||
data2 = pd.DataFrame()
|
||||
data2['ds'] = da
|
||||
print('1st data2 ds info()')
|
||||
data2['ds'].info()
|
||||
|
||||
#data2['ds'] = pd.to_datetime(data2['ds']).to_timestamp()
|
||||
data2['ds'] = pd.to_datetime(data2['ds']).astype('int64')
|
||||
data2['y'] = li
|
||||
print('2nd data2 ds info()')
|
||||
data2['ds'].info()
|
||||
print(data2)
|
||||
|
||||
data2.insert(0, column = "unique_id", value="unique_id")
|
||||
|
||||
print("Forecasting ...")
|
||||
|
||||
forecast = mlforecast.MLForecast(
|
||||
models = lgb.LGBMRegressor(),
|
||||
freq = 1,
|
||||
lags=[365],
|
||||
target_transforms=[Differences([365])],
|
||||
)
|
||||
|
||||
forecast.fit(data2)
|
||||
predicts = forecast.predict(365)
|
||||
|
||||
pd.concat([data2, predicts]).set_index("ds").plot(title = "current data with forecast")
|
||||
plt.show()
|
||||
```
|
||||
|
||||
运行程序输出结果:
|
||||
|
||||
!(Seeq forecast result)[./seeq/seeq-forecast-result.webp]
|
||||
|
||||
### 配置 Seeq 数据源连接 TDengine Cloud
|
||||
|
||||
配置 Seeq 数据源连接 TDengine Cloud 和连接 TDengine 本地安装实例没有本质的不同,只要登录 TDengine Cloud 后选择“编程 - Java”并拷贝带 token 字符串的 JDBC 填写为 Seeq Data Source 的 DatabaseJdbcUrl 值。
|
||||
注意使用 TDengine Cloud 时 SQL 命令中需要指定数据库名称。
|
||||
|
||||
#### 用 TDengine Cloud 作为数据源的配置内容示例:
|
||||
|
||||
```
|
||||
{
|
||||
"QueryDefinitions": [
|
||||
{
|
||||
"Name": "CloudVoltage",
|
||||
"Type": "SIGNAL",
|
||||
"Sql": "SELECT ts, voltage FROM test.meters",
|
||||
"Enabled": true,
|
||||
"TestMode": false,
|
||||
"TestQueriesDuringSync": true,
|
||||
"InProgressCapsulesEnabled": false,
|
||||
"Variables": null,
|
||||
"Properties": [
|
||||
{
|
||||
"Name": "Name",
|
||||
"Value": "Voltage",
|
||||
"Sql": null,
|
||||
"Uom": "string"
|
||||
},
|
||||
{
|
||||
"Name": "Interpolation Method",
|
||||
"Value": "linear",
|
||||
"Sql": null,
|
||||
"Uom": "string"
|
||||
},
|
||||
{
|
||||
"Name": "Maximum Interpolation",
|
||||
"Value": "2day",
|
||||
"Sql": null,
|
||||
"Uom": "string"
|
||||
}
|
||||
],
|
||||
"CapsuleProperties": null
|
||||
}
|
||||
],
|
||||
"Type": "GENERIC",
|
||||
"Hostname": null,
|
||||
"Port": 0,
|
||||
"DatabaseName": null,
|
||||
"Username": "root",
|
||||
"Password": "taosdata",
|
||||
"InitialSql": null,
|
||||
"TimeZone": null,
|
||||
"PrintRows": false,
|
||||
"UseWindowsAuth": false,
|
||||
"SqlFetchBatchSize": 100000,
|
||||
"UseSSL": false,
|
||||
"JdbcProperties": null,
|
||||
"GenericDatabaseConfig": {
|
||||
"DatabaseJdbcUrl": "jdbc:TAOS-RS://gw.cloud.taosdata.com?useSSL=true&token=41ac9d61d641b6b334e8b76f45f5a8XXXXXXXXXX",
|
||||
"SqlDriverClassName": "com.taosdata.jdbc.rs.RestfulDriver",
|
||||
"ResolutionInNanoseconds": 1000,
|
||||
"ZonedColumnTypes": []
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### TDengine Cloud 作为数据源的 Seeq Workbench 界面示例
|
||||
|
||||
!(Seeq workbench with TDengine cloud)[./seeq/seeq-workbench-with-tdengine-cloud.webp]
|
||||
|
||||
## 方案总结
|
||||
|
||||
通过集成Seeq和TDengine,可以充分利用TDengine高效的存储和查询性能,同时也可以受益于Seeq提供给用户的强大数据可视化和分析功能。
|
||||
|
||||
这种集成使用户能够充分利用TDengine的高性能时序数据存储和检索,确保高效处理大量数据。同时,Seeq提供高级分析功能,如数据可视化、异常检测、相关性分析和预测建模,使用户能够获得有价值的洞察并基于数据进行决策。
|
||||
|
||||
综合来看,Seeq和TDengine共同为制造业、工业物联网和电力系统等各行各业的时序数据分析提供了综合解决方案。高效数据存储和先进的分析相结合,赋予用户充分发挥时序数据潜力的能力,推动运营改进,并支持预测和规划分析应用。
|
Binary file not shown.
After Width: | Height: | Size: 13 KiB |
Binary file not shown.
After Width: | Height: | Size: 56 KiB |
Binary file not shown.
After Width: | Height: | Size: 26 KiB |
Binary file not shown.
After Width: | Height: | Size: 47 KiB |
|
@ -10,6 +10,14 @@ TDengine 2.x 各版本安装包请访问[这里](https://www.taosdata.com/all-do
|
|||
|
||||
import Release from "/components/ReleaseV3";
|
||||
|
||||
## 3.1.0.3
|
||||
|
||||
<Release type="tdengine" version="3.1.0.3" />
|
||||
|
||||
## 3.1.0.2
|
||||
|
||||
<Release type="tdengine" version="3.1.0.2" />
|
||||
|
||||
## 3.1.0.0
|
||||
|
||||
<Release type="tdengine" version="3.1.0.0" />
|
||||
|
|
|
@ -133,6 +133,7 @@
|
|||
<configuration>
|
||||
<source>8</source>
|
||||
<target>8</target>
|
||||
<encoding>UTF-8</encoding>
|
||||
</configuration>
|
||||
</plugin>
|
||||
|
||||
|
|
|
@ -8,4 +8,4 @@ java -jar target/taosdemo-2.0.1-jar-with-dependencies.jar -host <hostname> -data
|
|||
```
|
||||
|
||||
如果发生错误 Exception in thread "main" java.lang.UnsatisfiedLinkError: no taos in java.library.path
|
||||
请检查是否安装 TDengine 客户端安装包或编译 TDengine 安装。如果确定已经安装过还出现这个错误,可以在命令行 java 后加 -Djava.library.path=/usr/local/lib 来指定寻找共享库的路径。
|
||||
请检查是否安装 TDengine 客户端安装包或编译 TDengine 安装。如果确定已经安装过还出现这个错误,可以在命令行 java 后加 -Djava.library.path=/usr/lib 来指定寻找共享库的路径。
|
||||
|
|
|
@ -32,12 +32,12 @@ static void queryDB(TAOS *taos, char *command) {
|
|||
taos_free_result(pSql);
|
||||
pSql = NULL;
|
||||
}
|
||||
|
||||
|
||||
pSql = taos_query(taos, command);
|
||||
code = taos_errno(pSql);
|
||||
if (0 == code) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (code != 0) {
|
||||
|
@ -63,7 +63,7 @@ int main(int argc, char *argv[]) {
|
|||
|
||||
TAOS *taos = taos_connect(argv[1], "root", "taosdata", NULL, 0);
|
||||
if (taos == NULL) {
|
||||
printf("failed to connect to server, reason:%s\n", "null taos"/*taos_errstr(taos)*/);
|
||||
printf("failed to connect to server, reason:%s\n", taos_errstr(NULL));
|
||||
exit(1);
|
||||
}
|
||||
for (int i = 0; i < 100; i++) {
|
||||
|
@ -86,14 +86,14 @@ void Test(TAOS *taos, char *qstr, int index) {
|
|||
for (i = 0; i < 10; ++i) {
|
||||
sprintf(qstr, "insert into m1 values (%" PRId64 ", %d, %d, %d, %d, %f, %lf, '%s')", (uint64_t)(1546300800000 + i * 1000), i, i, i, i*10000000, i*1.0, i*2.0, "hello");
|
||||
printf("qstr: %s\n", qstr);
|
||||
|
||||
|
||||
// note: how do you wanna do if taos_query returns non-NULL
|
||||
// if (taos_query(taos, qstr)) {
|
||||
// printf("insert row: %i, reason:%s\n", i, taos_errstr(taos));
|
||||
// }
|
||||
TAOS_RES *result1 = taos_query(taos, qstr);
|
||||
if (result1 == NULL || taos_errno(result1) != 0) {
|
||||
printf("failed to insert row, reason:%s\n", taos_errstr(result1));
|
||||
printf("failed to insert row, reason:%s\n", taos_errstr(result1));
|
||||
taos_free_result(result1);
|
||||
exit(1);
|
||||
} else {
|
||||
|
@ -107,7 +107,7 @@ void Test(TAOS *taos, char *qstr, int index) {
|
|||
sprintf(qstr, "SELECT * FROM m1");
|
||||
result = taos_query(taos, qstr);
|
||||
if (result == NULL || taos_errno(result) != 0) {
|
||||
printf("failed to select, reason:%s\n", taos_errstr(result));
|
||||
printf("failed to select, reason:%s\n", taos_errstr(result));
|
||||
taos_free_result(result);
|
||||
exit(1);
|
||||
}
|
||||
|
|
|
@ -43,7 +43,7 @@ int main(int argc, char *argv[])
|
|||
taos_free_result(result);
|
||||
|
||||
// create table
|
||||
const char* sql = "create table m1 (ts timestamp, b bool, v1 tinyint, v2 smallint, v4 int, v8 bigint, f4 float, f8 double, bin binary(40), blob nchar(10))";
|
||||
const char* sql = "create table m1 (ts timestamp, b bool, v1 tinyint, v2 smallint, v4 int, v8 bigint, f4 float, f8 double, bin binary(40), blob nchar(10), varbin varbinary(16))";
|
||||
result = taos_query(taos, sql);
|
||||
code = taos_errno(result);
|
||||
if (code != 0) {
|
||||
|
@ -68,6 +68,7 @@ int main(int argc, char *argv[])
|
|||
double f8;
|
||||
char bin[40];
|
||||
char blob[80];
|
||||
int8_t varbin[16];
|
||||
} v = {0};
|
||||
|
||||
int32_t boolLen = sizeof(int8_t);
|
||||
|
@ -80,7 +81,7 @@ int main(int argc, char *argv[])
|
|||
int32_t ncharLen = 30;
|
||||
|
||||
stmt = taos_stmt_init(taos);
|
||||
TAOS_MULTI_BIND params[10];
|
||||
TAOS_MULTI_BIND params[11];
|
||||
params[0].buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
|
||||
params[0].buffer_length = sizeof(v.ts);
|
||||
params[0].buffer = &v.ts;
|
||||
|
@ -152,9 +153,19 @@ int main(int argc, char *argv[])
|
|||
params[9].is_null = NULL;
|
||||
params[9].num = 1;
|
||||
|
||||
int8_t tmp[16] = {'a', 0, 1, 13, '1'};
|
||||
int32_t vbinLen = 5;
|
||||
memcpy(v.varbin, tmp, sizeof(v.varbin));
|
||||
params[10].buffer_type = TSDB_DATA_TYPE_VARBINARY;
|
||||
params[10].buffer_length = sizeof(v.varbin);
|
||||
params[10].buffer = v.varbin;
|
||||
params[10].length = &vbinLen;
|
||||
params[10].is_null = NULL;
|
||||
params[10].num = 1;
|
||||
|
||||
char is_null = 1;
|
||||
|
||||
sql = "insert into m1 values(?,?,?,?,?,?,?,?,?,?)";
|
||||
sql = "insert into m1 values(?,?,?,?,?,?,?,?,?,?,?)";
|
||||
code = taos_stmt_prepare(stmt, sql, 0);
|
||||
if (code != 0){
|
||||
printf("failed to execute taos_stmt_prepare. code:0x%x\n", code);
|
||||
|
@ -162,7 +173,7 @@ int main(int argc, char *argv[])
|
|||
v.ts = 1591060628000;
|
||||
for (int i = 0; i < 10; ++i) {
|
||||
v.ts += 1;
|
||||
for (int j = 1; j < 10; ++j) {
|
||||
for (int j = 1; j < 11; ++j) {
|
||||
params[j].is_null = ((i == j) ? &is_null : 0);
|
||||
}
|
||||
v.b = (int8_t)i % 2;
|
||||
|
@ -216,7 +227,7 @@ int main(int argc, char *argv[])
|
|||
printf("expect two rows, but %d rows are fetched\n", rows);
|
||||
}
|
||||
|
||||
taos_free_result(result);
|
||||
// taos_free_result(result);
|
||||
taos_stmt_close(stmt);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -280,7 +280,7 @@ void consume_repeatly(tmq_t* tmq) {
|
|||
|
||||
code = tmq_offset_seek(tmq, topic_name, p->vgId, p->begin);
|
||||
if (code != 0) {
|
||||
fprintf(stderr, "failed to seek to %ld, reason:%s", p->begin, tmq_err2str(code));
|
||||
fprintf(stderr, "failed to seek to %d, reason:%s", (int)p->begin, tmq_err2str(code));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -179,6 +179,8 @@ int32_t getJsonValueLen(const char* data);
|
|||
int32_t colDataSetVal(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, bool isNull);
|
||||
int32_t colDataReassignVal(SColumnInfoData* pColumnInfoData, uint32_t dstRowIdx, uint32_t srcRowIdx, const char* pData);
|
||||
int32_t colDataSetNItems(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, uint32_t numOfRows, bool trimValue);
|
||||
int32_t colDataCopyNItems(SColumnInfoData* pColumnInfoData, uint32_t currentRow, const char* pData,
|
||||
uint32_t numOfRows, bool isNull);
|
||||
int32_t colDataMergeCol(SColumnInfoData* pColumnInfoData, int32_t numOfRow1, int32_t* capacity,
|
||||
const SColumnInfoData* pSource, int32_t numOfRow2);
|
||||
int32_t colDataAssign(SColumnInfoData* pColumnInfoData, const SColumnInfoData* pSource, int32_t numOfRows,
|
||||
|
|
|
@ -102,6 +102,11 @@ extern uint16_t tsMonitorPort;
|
|||
extern int32_t tsMonitorMaxLogs;
|
||||
extern bool tsMonitorComp;
|
||||
|
||||
// audit
|
||||
extern bool tsEnableAudit;
|
||||
extern char tsAuditFqdn[];
|
||||
extern uint16_t tsAuditPort;
|
||||
|
||||
// telem
|
||||
extern bool tsEnableTelem;
|
||||
extern int32_t tsTelemInterval;
|
||||
|
@ -130,6 +135,7 @@ extern bool tsKeepColumnName;
|
|||
extern bool tsEnableQueryHb;
|
||||
extern bool tsEnableScience;
|
||||
extern bool tsTtlChangeOnWrite;
|
||||
extern int32_t tsTtlFlushThreshold;
|
||||
extern int32_t tsRedirectPeriod;
|
||||
extern int32_t tsRedirectFactor;
|
||||
extern int32_t tsRedirectMaxPeriod;
|
||||
|
@ -161,6 +167,7 @@ extern char tsCompressor[];
|
|||
// tfs
|
||||
extern int32_t tsDiskCfgNum;
|
||||
extern SDiskCfg tsDiskCfg[];
|
||||
extern int64_t tsMinDiskFreeSize;
|
||||
|
||||
// udf
|
||||
extern bool tsStartUdfd;
|
||||
|
@ -184,8 +191,11 @@ extern int64_t tsWalFsyncDataSizeLimit;
|
|||
extern int32_t tsTransPullupInterval;
|
||||
extern int32_t tsMqRebalanceInterval;
|
||||
extern int32_t tsStreamCheckpointTickInterval;
|
||||
extern int32_t tsStreamNodeCheckInterval;
|
||||
extern int32_t tsTtlUnit;
|
||||
extern int32_t tsTtlPushInterval;
|
||||
extern int32_t tsTtlPushIntervalSec;
|
||||
extern int32_t tsTtlBatchDropNum;
|
||||
extern int32_t tsTrimVDbIntervalSec;
|
||||
extern int32_t tsGrantHBInterval;
|
||||
extern int32_t tsUptimeInterval;
|
||||
|
||||
|
@ -194,7 +204,6 @@ extern int32_t tsRpcRetryInterval;
|
|||
|
||||
extern bool tsDisableStream;
|
||||
extern int64_t tsStreamBufferSize;
|
||||
extern int64_t tsCheckpointInterval;
|
||||
extern bool tsFilterScalarMode;
|
||||
extern int32_t tsKeepTimeOffset;
|
||||
extern int32_t tsMaxStreamBackendCache;
|
||||
|
|
|
@ -28,6 +28,22 @@ typedef struct SCorEpSet {
|
|||
} SCorEpSet;
|
||||
|
||||
#define GET_ACTIVE_EP(_eps) (&((_eps)->eps[(_eps)->inUse]))
|
||||
|
||||
#define EPSET_TO_STR(_eps, tbuf) \
|
||||
do { \
|
||||
int len = snprintf((tbuf), sizeof(tbuf), "epset:{"); \
|
||||
for (int _i = 0; _i < (_eps)->numOfEps; _i++) { \
|
||||
if (_i == (_eps)->numOfEps - 1) { \
|
||||
len += \
|
||||
snprintf((tbuf) + len, sizeof(tbuf) - len, "%d. %s:%d", _i, (_eps)->eps[_i].fqdn, (_eps)->eps[_i].port); \
|
||||
} else { \
|
||||
len += \
|
||||
snprintf((tbuf) + len, sizeof(tbuf) - len, "%d. %s:%d, ", _i, (_eps)->eps[_i].fqdn, (_eps)->eps[_i].port); \
|
||||
} \
|
||||
} \
|
||||
len += snprintf((tbuf) + len, sizeof(tbuf) - len, "}, inUse:%d", (_eps)->inUse); \
|
||||
} while (0);
|
||||
|
||||
int32_t taosGetFqdnPortFromEp(const char* ep, SEp* pEp);
|
||||
void addEpIntoEpSet(SEpSet* pEpSet, const char* fqdn, uint16_t port);
|
||||
|
||||
|
|
|
@ -77,7 +77,8 @@ static inline bool tmsgIsValid(tmsg_t type) {
|
|||
}
|
||||
static inline bool vnodeIsMsgBlock(tmsg_t type) {
|
||||
return (type == TDMT_VND_CREATE_TABLE) || (type == TDMT_VND_ALTER_TABLE) || (type == TDMT_VND_DROP_TABLE) ||
|
||||
(type == TDMT_VND_UPDATE_TAG_VAL) || (type == TDMT_VND_ALTER_CONFIRM) || (type == TDMT_VND_COMMIT);
|
||||
(type == TDMT_VND_UPDATE_TAG_VAL) || (type == TDMT_VND_ALTER_CONFIRM) || (type == TDMT_VND_COMMIT) ||
|
||||
(type == TDMT_SYNC_CONFIG_CHANGE);
|
||||
}
|
||||
|
||||
static inline bool syncUtilUserCommit(tmsg_t msgType) {
|
||||
|
@ -212,6 +213,215 @@ typedef enum _mgmt_table {
|
|||
#define TD_REQ_FROM_APP 0
|
||||
#define TD_REQ_FROM_TAOX 1
|
||||
|
||||
typedef enum ENodeType {
|
||||
// Syntax nodes are used in parser and planner module, and some are also used in executor module, such as COLUMN,
|
||||
// VALUE, OPERATOR, FUNCTION and so on.
|
||||
QUERY_NODE_COLUMN = 1,
|
||||
QUERY_NODE_VALUE,
|
||||
QUERY_NODE_OPERATOR,
|
||||
QUERY_NODE_LOGIC_CONDITION,
|
||||
QUERY_NODE_FUNCTION,
|
||||
QUERY_NODE_REAL_TABLE,
|
||||
QUERY_NODE_TEMP_TABLE,
|
||||
QUERY_NODE_JOIN_TABLE,
|
||||
QUERY_NODE_GROUPING_SET,
|
||||
QUERY_NODE_ORDER_BY_EXPR,
|
||||
QUERY_NODE_LIMIT,
|
||||
QUERY_NODE_STATE_WINDOW,
|
||||
QUERY_NODE_SESSION_WINDOW,
|
||||
QUERY_NODE_INTERVAL_WINDOW,
|
||||
QUERY_NODE_NODE_LIST,
|
||||
QUERY_NODE_FILL,
|
||||
QUERY_NODE_RAW_EXPR, // Only be used in parser module.
|
||||
QUERY_NODE_TARGET,
|
||||
QUERY_NODE_DATABLOCK_DESC,
|
||||
QUERY_NODE_SLOT_DESC,
|
||||
QUERY_NODE_COLUMN_DEF,
|
||||
QUERY_NODE_DOWNSTREAM_SOURCE,
|
||||
QUERY_NODE_DATABASE_OPTIONS,
|
||||
QUERY_NODE_TABLE_OPTIONS,
|
||||
QUERY_NODE_INDEX_OPTIONS,
|
||||
QUERY_NODE_EXPLAIN_OPTIONS,
|
||||
QUERY_NODE_STREAM_OPTIONS,
|
||||
QUERY_NODE_LEFT_VALUE,
|
||||
QUERY_NODE_COLUMN_REF,
|
||||
QUERY_NODE_WHEN_THEN,
|
||||
QUERY_NODE_CASE_WHEN,
|
||||
QUERY_NODE_EVENT_WINDOW,
|
||||
QUERY_NODE_HINT,
|
||||
|
||||
// Statement nodes are used in parser and planner module.
|
||||
QUERY_NODE_SET_OPERATOR = 100,
|
||||
QUERY_NODE_SELECT_STMT,
|
||||
QUERY_NODE_VNODE_MODIFY_STMT,
|
||||
QUERY_NODE_CREATE_DATABASE_STMT,
|
||||
QUERY_NODE_DROP_DATABASE_STMT,
|
||||
QUERY_NODE_ALTER_DATABASE_STMT,
|
||||
QUERY_NODE_FLUSH_DATABASE_STMT,
|
||||
QUERY_NODE_TRIM_DATABASE_STMT,
|
||||
QUERY_NODE_CREATE_TABLE_STMT,
|
||||
QUERY_NODE_CREATE_SUBTABLE_CLAUSE,
|
||||
QUERY_NODE_CREATE_MULTI_TABLES_STMT,
|
||||
QUERY_NODE_DROP_TABLE_CLAUSE,
|
||||
QUERY_NODE_DROP_TABLE_STMT,
|
||||
QUERY_NODE_DROP_SUPER_TABLE_STMT,
|
||||
QUERY_NODE_ALTER_TABLE_STMT,
|
||||
QUERY_NODE_ALTER_SUPER_TABLE_STMT,
|
||||
QUERY_NODE_CREATE_USER_STMT,
|
||||
QUERY_NODE_ALTER_USER_STMT,
|
||||
QUERY_NODE_DROP_USER_STMT,
|
||||
QUERY_NODE_USE_DATABASE_STMT,
|
||||
QUERY_NODE_CREATE_DNODE_STMT,
|
||||
QUERY_NODE_DROP_DNODE_STMT,
|
||||
QUERY_NODE_ALTER_DNODE_STMT,
|
||||
QUERY_NODE_CREATE_INDEX_STMT,
|
||||
QUERY_NODE_DROP_INDEX_STMT,
|
||||
QUERY_NODE_CREATE_QNODE_STMT,
|
||||
QUERY_NODE_DROP_QNODE_STMT,
|
||||
QUERY_NODE_CREATE_BNODE_STMT,
|
||||
QUERY_NODE_DROP_BNODE_STMT,
|
||||
QUERY_NODE_CREATE_SNODE_STMT,
|
||||
QUERY_NODE_DROP_SNODE_STMT,
|
||||
QUERY_NODE_CREATE_MNODE_STMT,
|
||||
QUERY_NODE_DROP_MNODE_STMT,
|
||||
QUERY_NODE_CREATE_TOPIC_STMT,
|
||||
QUERY_NODE_DROP_TOPIC_STMT,
|
||||
QUERY_NODE_DROP_CGROUP_STMT,
|
||||
QUERY_NODE_ALTER_LOCAL_STMT,
|
||||
QUERY_NODE_EXPLAIN_STMT,
|
||||
QUERY_NODE_DESCRIBE_STMT,
|
||||
QUERY_NODE_RESET_QUERY_CACHE_STMT,
|
||||
QUERY_NODE_COMPACT_DATABASE_STMT,
|
||||
QUERY_NODE_CREATE_FUNCTION_STMT,
|
||||
QUERY_NODE_DROP_FUNCTION_STMT,
|
||||
QUERY_NODE_CREATE_STREAM_STMT,
|
||||
QUERY_NODE_DROP_STREAM_STMT,
|
||||
QUERY_NODE_BALANCE_VGROUP_STMT,
|
||||
QUERY_NODE_MERGE_VGROUP_STMT,
|
||||
QUERY_NODE_REDISTRIBUTE_VGROUP_STMT,
|
||||
QUERY_NODE_SPLIT_VGROUP_STMT,
|
||||
QUERY_NODE_SYNCDB_STMT,
|
||||
QUERY_NODE_GRANT_STMT,
|
||||
QUERY_NODE_REVOKE_STMT,
|
||||
QUERY_NODE_SHOW_DNODES_STMT,
|
||||
QUERY_NODE_SHOW_MNODES_STMT,
|
||||
QUERY_NODE_SHOW_MODULES_STMT,
|
||||
QUERY_NODE_SHOW_QNODES_STMT,
|
||||
QUERY_NODE_SHOW_SNODES_STMT,
|
||||
QUERY_NODE_SHOW_BNODES_STMT,
|
||||
QUERY_NODE_SHOW_CLUSTER_STMT,
|
||||
QUERY_NODE_SHOW_DATABASES_STMT,
|
||||
QUERY_NODE_SHOW_FUNCTIONS_STMT,
|
||||
QUERY_NODE_SHOW_INDEXES_STMT,
|
||||
QUERY_NODE_SHOW_STABLES_STMT,
|
||||
QUERY_NODE_SHOW_STREAMS_STMT,
|
||||
QUERY_NODE_SHOW_TABLES_STMT,
|
||||
QUERY_NODE_SHOW_TAGS_STMT,
|
||||
QUERY_NODE_SHOW_USERS_STMT,
|
||||
QUERY_NODE_SHOW_LICENCES_STMT,
|
||||
QUERY_NODE_SHOW_VGROUPS_STMT,
|
||||
QUERY_NODE_SHOW_TOPICS_STMT,
|
||||
QUERY_NODE_SHOW_CONSUMERS_STMT,
|
||||
QUERY_NODE_SHOW_CONNECTIONS_STMT,
|
||||
QUERY_NODE_SHOW_QUERIES_STMT,
|
||||
QUERY_NODE_SHOW_APPS_STMT,
|
||||
QUERY_NODE_SHOW_VARIABLES_STMT,
|
||||
QUERY_NODE_SHOW_DNODE_VARIABLES_STMT,
|
||||
QUERY_NODE_SHOW_TRANSACTIONS_STMT,
|
||||
QUERY_NODE_SHOW_SUBSCRIPTIONS_STMT,
|
||||
QUERY_NODE_SHOW_VNODES_STMT,
|
||||
QUERY_NODE_SHOW_USER_PRIVILEGES_STMT,
|
||||
QUERY_NODE_SHOW_CREATE_DATABASE_STMT,
|
||||
QUERY_NODE_SHOW_CREATE_TABLE_STMT,
|
||||
QUERY_NODE_SHOW_CREATE_STABLE_STMT,
|
||||
QUERY_NODE_SHOW_TABLE_DISTRIBUTED_STMT,
|
||||
QUERY_NODE_SHOW_LOCAL_VARIABLES_STMT,
|
||||
QUERY_NODE_SHOW_SCORES_STMT,
|
||||
QUERY_NODE_SHOW_TABLE_TAGS_STMT,
|
||||
QUERY_NODE_KILL_CONNECTION_STMT,
|
||||
QUERY_NODE_KILL_QUERY_STMT,
|
||||
QUERY_NODE_KILL_TRANSACTION_STMT,
|
||||
QUERY_NODE_DELETE_STMT,
|
||||
QUERY_NODE_INSERT_STMT,
|
||||
QUERY_NODE_QUERY,
|
||||
QUERY_NODE_SHOW_DB_ALIVE_STMT,
|
||||
QUERY_NODE_SHOW_CLUSTER_ALIVE_STMT,
|
||||
QUERY_NODE_BALANCE_VGROUP_LEADER_STMT,
|
||||
QUERY_NODE_RESTORE_DNODE_STMT,
|
||||
QUERY_NODE_RESTORE_QNODE_STMT,
|
||||
QUERY_NODE_RESTORE_MNODE_STMT,
|
||||
QUERY_NODE_RESTORE_VNODE_STMT,
|
||||
QUERY_NODE_PAUSE_STREAM_STMT,
|
||||
QUERY_NODE_RESUME_STREAM_STMT,
|
||||
|
||||
// logic plan node
|
||||
QUERY_NODE_LOGIC_PLAN_SCAN = 1000,
|
||||
QUERY_NODE_LOGIC_PLAN_JOIN,
|
||||
QUERY_NODE_LOGIC_PLAN_AGG,
|
||||
QUERY_NODE_LOGIC_PLAN_PROJECT,
|
||||
QUERY_NODE_LOGIC_PLAN_VNODE_MODIFY,
|
||||
QUERY_NODE_LOGIC_PLAN_EXCHANGE,
|
||||
QUERY_NODE_LOGIC_PLAN_MERGE,
|
||||
QUERY_NODE_LOGIC_PLAN_WINDOW,
|
||||
QUERY_NODE_LOGIC_PLAN_FILL,
|
||||
QUERY_NODE_LOGIC_PLAN_SORT,
|
||||
QUERY_NODE_LOGIC_PLAN_PARTITION,
|
||||
QUERY_NODE_LOGIC_PLAN_INDEF_ROWS_FUNC,
|
||||
QUERY_NODE_LOGIC_PLAN_INTERP_FUNC,
|
||||
QUERY_NODE_LOGIC_SUBPLAN,
|
||||
QUERY_NODE_LOGIC_PLAN,
|
||||
QUERY_NODE_LOGIC_PLAN_GROUP_CACHE,
|
||||
QUERY_NODE_LOGIC_PLAN_DYN_QUERY_CTRL,
|
||||
|
||||
// physical plan node
|
||||
QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN = 1100,
|
||||
QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN,
|
||||
QUERY_NODE_PHYSICAL_PLAN_TABLE_SEQ_SCAN,
|
||||
QUERY_NODE_PHYSICAL_PLAN_TABLE_MERGE_SCAN,
|
||||
QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN,
|
||||
QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN,
|
||||
QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN,
|
||||
QUERY_NODE_PHYSICAL_PLAN_LAST_ROW_SCAN,
|
||||
QUERY_NODE_PHYSICAL_PLAN_PROJECT,
|
||||
QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN,
|
||||
QUERY_NODE_PHYSICAL_PLAN_HASH_AGG,
|
||||
QUERY_NODE_PHYSICAL_PLAN_EXCHANGE,
|
||||
QUERY_NODE_PHYSICAL_PLAN_MERGE,
|
||||
QUERY_NODE_PHYSICAL_PLAN_SORT,
|
||||
QUERY_NODE_PHYSICAL_PLAN_GROUP_SORT,
|
||||
QUERY_NODE_PHYSICAL_PLAN_HASH_INTERVAL,
|
||||
QUERY_NODE_PHYSICAL_PLAN_MERGE_INTERVAL,
|
||||
QUERY_NODE_PHYSICAL_PLAN_MERGE_ALIGNED_INTERVAL,
|
||||
QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL,
|
||||
QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL,
|
||||
QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL,
|
||||
QUERY_NODE_PHYSICAL_PLAN_FILL,
|
||||
QUERY_NODE_PHYSICAL_PLAN_STREAM_FILL,
|
||||
QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION,
|
||||
QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION,
|
||||
QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_SESSION,
|
||||
QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_SESSION,
|
||||
QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE,
|
||||
QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE,
|
||||
QUERY_NODE_PHYSICAL_PLAN_PARTITION,
|
||||
QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION,
|
||||
QUERY_NODE_PHYSICAL_PLAN_INDEF_ROWS_FUNC,
|
||||
QUERY_NODE_PHYSICAL_PLAN_INTERP_FUNC,
|
||||
QUERY_NODE_PHYSICAL_PLAN_DISPATCH,
|
||||
QUERY_NODE_PHYSICAL_PLAN_INSERT,
|
||||
QUERY_NODE_PHYSICAL_PLAN_QUERY_INSERT,
|
||||
QUERY_NODE_PHYSICAL_PLAN_DELETE,
|
||||
QUERY_NODE_PHYSICAL_SUBPLAN,
|
||||
QUERY_NODE_PHYSICAL_PLAN,
|
||||
QUERY_NODE_PHYSICAL_PLAN_TABLE_COUNT_SCAN,
|
||||
QUERY_NODE_PHYSICAL_PLAN_MERGE_EVENT,
|
||||
QUERY_NODE_PHYSICAL_PLAN_STREAM_EVENT,
|
||||
QUERY_NODE_PHYSICAL_PLAN_HASH_JOIN,
|
||||
QUERY_NODE_PHYSICAL_PLAN_GROUP_CACHE,
|
||||
QUERY_NODE_PHYSICAL_PLAN_DYN_QUERY_CTRL
|
||||
} ENodeType;
|
||||
|
||||
|
||||
typedef struct {
|
||||
int32_t vgId;
|
||||
char* dbFName;
|
||||
|
@ -231,7 +441,6 @@ typedef struct SField {
|
|||
uint8_t type;
|
||||
int8_t flags;
|
||||
int32_t bytes;
|
||||
char comment[TSDB_COL_COMMENT_LEN];
|
||||
} SField;
|
||||
|
||||
typedef struct SRetention {
|
||||
|
@ -310,7 +519,6 @@ struct SSchema {
|
|||
col_id_t colId;
|
||||
int32_t bytes;
|
||||
char name[TSDB_COL_NAME_LEN];
|
||||
char comment[TSDB_COL_COMMENT_LEN];
|
||||
};
|
||||
|
||||
struct SSchema2 {
|
||||
|
@ -743,6 +951,10 @@ typedef struct STimeWindow {
|
|||
TSKEY ekey;
|
||||
} STimeWindow;
|
||||
|
||||
typedef struct SQueryHint {
|
||||
bool batchScan;
|
||||
} SQueryHint;
|
||||
|
||||
typedef struct {
|
||||
int32_t tsOffset; // offset value in current msg body, NOTE: ts list is compressed
|
||||
int32_t tsLen; // total length of ts comp block
|
||||
|
@ -761,12 +973,18 @@ typedef struct {
|
|||
int64_t offset;
|
||||
} SInterval;
|
||||
|
||||
typedef struct {
|
||||
int32_t code;
|
||||
|
||||
typedef struct STbVerInfo {
|
||||
char tbFName[TSDB_TABLE_FNAME_LEN];
|
||||
int32_t sversion;
|
||||
int32_t tversion;
|
||||
} STbVerInfo;
|
||||
|
||||
|
||||
typedef struct {
|
||||
int32_t code;
|
||||
int64_t affectedRows;
|
||||
SArray* tbVerInfo; // STbVerInfo
|
||||
} SQueryTableRsp;
|
||||
|
||||
int32_t tSerializeSQueryTableRsp(void* buf, int32_t bufLen, SQueryTableRsp* pRsp);
|
||||
|
@ -941,6 +1159,9 @@ int32_t tDeserializeSVTrimDbReq(void* buf, int32_t bufLen, SVTrimDbReq* pReq);
|
|||
|
||||
typedef struct {
|
||||
int32_t timestampSec;
|
||||
int32_t ttlDropMaxCount;
|
||||
int32_t nUids;
|
||||
SArray* pTbUids;
|
||||
} SVDropTtlTableReq;
|
||||
|
||||
int32_t tSerializeSVDropTtlTableReq(void* buf, int32_t bufLen, SVDropTtlTableReq* pReq);
|
||||
|
@ -1165,6 +1386,9 @@ typedef struct {
|
|||
int32_t vgId;
|
||||
int8_t syncState;
|
||||
int8_t syncRestore;
|
||||
int64_t syncTerm;
|
||||
int64_t roleTimeMs;
|
||||
int64_t startTimeMs;
|
||||
int8_t syncCanRead;
|
||||
int64_t cacheUsage;
|
||||
int64_t numOfTables;
|
||||
|
@ -1178,12 +1402,13 @@ typedef struct {
|
|||
int64_t numOfBatchInsertReqs;
|
||||
int64_t numOfBatchInsertSuccessReqs;
|
||||
int32_t numOfCachedTables;
|
||||
int32_t learnerProgress; // use one reservered
|
||||
} SVnodeLoad;
|
||||
|
||||
typedef struct {
|
||||
int8_t syncState;
|
||||
int8_t syncRestore;
|
||||
int8_t syncState;
|
||||
int64_t syncTerm;
|
||||
int8_t syncRestore;
|
||||
int64_t roleTimeMs;
|
||||
} SMnodeLoad;
|
||||
|
||||
|
@ -1193,6 +1418,7 @@ typedef struct {
|
|||
int64_t numOfProcessedCQuery;
|
||||
int64_t numOfProcessedFetch;
|
||||
int64_t numOfProcessedDrop;
|
||||
int64_t numOfProcessedNotify;
|
||||
int64_t numOfProcessedHb;
|
||||
int64_t numOfProcessedDelete;
|
||||
int64_t cacheDataSize;
|
||||
|
@ -1319,6 +1545,7 @@ typedef struct {
|
|||
int8_t learnerReplica;
|
||||
int8_t learnerSelfIndex;
|
||||
SReplica learnerReplicas[TSDB_MAX_LEARNER_REPLICA];
|
||||
int32_t changeVersion;
|
||||
} SCreateVnodeReq;
|
||||
|
||||
int32_t tSerializeSCreateVnodeReq(void* buf, int32_t bufLen, SCreateVnodeReq* pReq);
|
||||
|
@ -1393,7 +1620,8 @@ typedef struct {
|
|||
int8_t learnerSelfIndex;
|
||||
int8_t learnerReplica;
|
||||
SReplica learnerReplicas[TSDB_MAX_LEARNER_REPLICA];
|
||||
} SAlterVnodeReplicaReq, SAlterVnodeTypeReq;
|
||||
int32_t changeVersion;
|
||||
} SAlterVnodeReplicaReq, SAlterVnodeTypeReq, SCheckLearnCatchupReq;
|
||||
|
||||
int32_t tSerializeSAlterVnodeReplicaReq(void* buf, int32_t bufLen, SAlterVnodeReplicaReq* pReq);
|
||||
int32_t tDeserializeSAlterVnodeReplicaReq(void* buf, int32_t bufLen, SAlterVnodeReplicaReq* pReq);
|
||||
|
@ -1411,7 +1639,8 @@ typedef struct {
|
|||
int32_t dstVgId;
|
||||
uint32_t hashBegin;
|
||||
uint32_t hashEnd;
|
||||
int64_t reserved;
|
||||
int32_t changeVersion;
|
||||
int32_t reserved;
|
||||
} SAlterVnodeHashRangeReq;
|
||||
|
||||
int32_t tSerializeSAlterVnodeHashRangeReq(void* buf, int32_t bufLen, SAlterVnodeHashRangeReq* pReq);
|
||||
|
@ -1833,12 +2062,26 @@ typedef struct {
|
|||
int32_t tversion;
|
||||
} SResReadyRsp;
|
||||
|
||||
|
||||
typedef struct SOperatorParam {
|
||||
int32_t opType;
|
||||
int32_t downstreamIdx;
|
||||
void* value;
|
||||
SArray* pChildren; //SArray<SOperatorParam*>
|
||||
} SOperatorParam;
|
||||
|
||||
typedef struct STableScanOperatorParam {
|
||||
bool tableSeq;
|
||||
SArray* pUidList;
|
||||
} STableScanOperatorParam;
|
||||
|
||||
typedef struct {
|
||||
SMsgHead header;
|
||||
uint64_t sId;
|
||||
uint64_t queryId;
|
||||
uint64_t taskId;
|
||||
int32_t execId;
|
||||
SMsgHead header;
|
||||
uint64_t sId;
|
||||
uint64_t queryId;
|
||||
uint64_t taskId;
|
||||
int32_t execId;
|
||||
SOperatorParam* pOpParam;
|
||||
} SResFetchReq;
|
||||
|
||||
int32_t tSerializeSResFetchReq(void* buf, int32_t bufLen, SResFetchReq* pReq);
|
||||
|
@ -1917,8 +2160,24 @@ typedef struct {
|
|||
|
||||
int32_t tSerializeSTaskDropReq(void* buf, int32_t bufLen, STaskDropReq* pReq);
|
||||
int32_t tDeserializeSTaskDropReq(void* buf, int32_t bufLen, STaskDropReq* pReq);
|
||||
int32_t tSerializeSTaskDropReq(void* buf, int32_t bufLen, STaskDropReq* pReq);
|
||||
int32_t tDeserializeSTaskDropReq(void* buf, int32_t bufLen, STaskDropReq* pReq);
|
||||
|
||||
|
||||
typedef enum {
|
||||
TASK_NOTIFY_FINISHED = 1,
|
||||
} ETaskNotifyType;
|
||||
|
||||
typedef struct {
|
||||
SMsgHead header;
|
||||
uint64_t sId;
|
||||
uint64_t queryId;
|
||||
uint64_t taskId;
|
||||
int64_t refId;
|
||||
int32_t execId;
|
||||
ETaskNotifyType type;
|
||||
} STaskNotifyReq;
|
||||
|
||||
int32_t tSerializeSTaskNotifyReq(void* buf, int32_t bufLen, STaskNotifyReq* pReq);
|
||||
int32_t tDeserializeSTaskNotifyReq(void* buf, int32_t bufLen, STaskNotifyReq* pReq);
|
||||
|
||||
int32_t tSerializeSQueryTableRsp(void* buf, int32_t bufLen, SQueryTableRsp* pRsp);
|
||||
int32_t tDeserializeSQueryTableRsp(void* buf, int32_t bufLen, SQueryTableRsp* pRsp);
|
||||
|
@ -2389,9 +2648,6 @@ typedef struct {
|
|||
int8_t type;
|
||||
int8_t flags;
|
||||
int32_t bytes;
|
||||
bool hasColComment;
|
||||
char* colComment;
|
||||
int32_t colCommentLen;
|
||||
// TSDB_ALTER_TABLE_DROP_COLUMN
|
||||
// TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES
|
||||
int8_t colModType;
|
||||
|
|
|
@ -65,7 +65,7 @@ enum {
|
|||
#define TD_NEW_MSG_SEG(TYPE) TYPE = ((TYPE##_SEG_CODE) << 8),
|
||||
#define TD_DEF_MSG_TYPE(TYPE, MSG, REQ, RSP) TYPE, TYPE##_RSP,
|
||||
|
||||
enum {
|
||||
enum { // WARN: new msg should be appended to segment tail
|
||||
#endif
|
||||
TD_NEW_MSG_SEG(TDMT_DND_MSG)
|
||||
TD_DEF_MSG_TYPE(TDMT_DND_CREATE_MNODE, "dnode-create-mnode", NULL, NULL)
|
||||
|
@ -85,18 +85,19 @@ enum {
|
|||
TD_DEF_MSG_TYPE(TDMT_DND_MAX_MSG, "dnd-max", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_DND_ALTER_MNODE_TYPE, "dnode-alter-mnode-type", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_DND_ALTER_VNODE_TYPE, "dnode-alter-vnode-type", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_DND_CHECK_VNODE_LEARNER_CATCHUP, "dnode-check-vnode-learner-catchup", NULL, NULL)
|
||||
|
||||
TD_NEW_MSG_SEG(TDMT_MND_MSG)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_CONNECT, "connect", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_CREATE_ACCT, "create-acct", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_CREATE_ACCT, "create-acct", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_ALTER_ACCT, "alter-acct", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_DROP_ACCT, "drop-acct", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_CREATE_USER, "create-user", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_CREATE_USER, "create-user", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_ALTER_USER, "alter-user", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_DROP_USER, "drop-user", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_GET_USER_AUTH, "get-user-auth", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_DROP_USER, "drop-user", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_GET_USER_AUTH, "get-user-auth", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_CREATE_DNODE, "create-dnode", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_CONFIG_DNODE, "config-dnode", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_CONFIG_DNODE, "config-dnode", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_DROP_DNODE, "drop-dnode", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_CREATE_MNODE, "create-mnode", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_ALTER_MNODE, "alter-mnode", NULL, NULL)
|
||||
|
@ -156,6 +157,7 @@ enum {
|
|||
TD_DEF_MSG_TYPE(TDMT_MND_TRANS_TIMER, "trans-tmr", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_TTL_TIMER, "ttl-tmr", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_GRANT_HB_TIMER, "grant-hb-tmr", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_NODECHECK_TIMER, "node-check-tmr", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_KILL_TRANS, "kill-trans", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_KILL_QUERY, "kill-query", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_KILL_CONN, "kill-conn", NULL, NULL)
|
||||
|
@ -174,13 +176,17 @@ enum {
|
|||
TD_DEF_MSG_TYPE(TDMT_MND_SERVER_VERSION, "server-version", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_UPTIME_TIMER, "uptime-timer", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_TMQ_LOST_CONSUMER_CLEAR, "lost-consumer-clear", NULL, NULL)
|
||||
// TD_DEF_MSG_TYPE(TDMT_MND_STREAM_CHECKPOINT_TIMER, "stream-checkpoint-tmr", NULL, NULL)
|
||||
// TD_DEF_MSG_TYPE(TDMT_MND_STREAM_BEGIN_CHECKPOINT, "stream-begin-checkpoint", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_STREAM_HEARTBEAT, "stream-heartbeat", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_MAX_MSG, "mnd-max", NULL, NULL)
|
||||
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_BALANCE_VGROUP_LEADER, "balance-vgroup-leader", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_RESTORE_DNODE, "restore-dnode", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_PAUSE_STREAM, "pause-stream", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_RESUME_STREAM, "resume-stream", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_STREAM_CHECKPOINT_TIMER, "stream-checkpoint-tmr", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_STREAM_BEGIN_CHECKPOINT, "stream-begin-checkpoint", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_STREAM_NODECHANGE_CHECK, "stream-nodechange-check", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_TRIM_DB_TIMER, "trim-db-tmr", NULL, NULL)
|
||||
|
||||
TD_NEW_MSG_SEG(TDMT_VND_MSG)
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_SUBMIT, "submit", SSubmitReq, SSubmitRsp)
|
||||
|
@ -242,6 +248,7 @@ enum {
|
|||
TD_DEF_MSG_TYPE(TDMT_SCH_DROP_TASK, "drop-task", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_SCH_EXPLAIN, "explain", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_SCH_LINK_BROKEN, "link-broken", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_SCH_TASK_NOTIFY, "task-notify", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_SCH_MAX_MSG, "sch-max", NULL, NULL)
|
||||
|
||||
|
||||
|
@ -252,15 +259,13 @@ enum {
|
|||
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_DISPATCH, "stream-task-dispatch", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_STREAM_UNUSED1, "stream-unused1", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_STREAM_RETRIEVE, "stream-retrieve", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_STREAM_SCAN_HISTORY, "stream-scan-history", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_STREAM_SCAN_HISTORY_FINISH, "stream-scan-history-finish", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_CHECK, "stream-task-check", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_CHECKPOINT, "stream-checkpoint", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_CHECKPOINT_READY, "stream-checkpoint-ready", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_REPORT_CHECKPOINT, "stream-report-checkpoint", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_RESTORE_CHECKPOINT, "stream-restore-checkpoint", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_STREAM_MAX_MSG, "stream-max", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_PAUSE, "stream-task-pause", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_RESUME, "stream-task-resume", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_STOP, "stream-task-stop", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_STREAM_MAX_MSG, "stream-max", NULL, NULL)
|
||||
|
||||
TD_NEW_MSG_SEG(TDMT_MON_MSG)
|
||||
TD_DEF_MSG_TYPE(TDMT_MON_MAX_MSG, "monitor-max", NULL, NULL)
|
||||
|
@ -295,11 +300,14 @@ enum {
|
|||
TD_DEF_MSG_TYPE(TDMT_SYNC_PRE_SNAPSHOT_REPLY, "sync-pre-snapshot-reply", NULL, NULL) // no longer used
|
||||
TD_DEF_MSG_TYPE(TDMT_SYNC_MAX_MSG, "sync-max", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_SYNC_FORCE_FOLLOWER, "sync-force-become-follower", NULL, NULL)
|
||||
|
||||
|
||||
TD_NEW_MSG_SEG(TDMT_VND_STREAM_MSG)
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_TRIGGER, "vnode-stream-trigger", NULL, NULL)
|
||||
// TD_DEF_MSG_TYPE(TDMT_VND_STREAM_TRIGGER, "vnode-stream-trigger", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_SCAN_HISTORY, "vnode-stream-scan-history", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_SCAN_HISTORY_FINISH, "vnode-stream-scan-history-finish", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_CHECK_POINT_SOURCE, "vnode-stream-checkpoint-source", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_TASK_UPDATE, "vnode-stream-update", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_TASK_CHECK, "vnode-stream-task-check", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_MAX_MSG, "vnd-stream-max", NULL, NULL)
|
||||
|
||||
TD_NEW_MSG_SEG(TDMT_VND_TMQ_MSG)
|
||||
|
|
|
@ -16,105 +16,105 @@
|
|||
#ifndef _TD_COMMON_TOKEN_H_
|
||||
#define _TD_COMMON_TOKEN_H_
|
||||
|
||||
#define TK_OR 1
|
||||
#define TK_AND 2
|
||||
#define TK_UNION 3
|
||||
#define TK_ALL 4
|
||||
#define TK_MINUS 5
|
||||
#define TK_EXCEPT 6
|
||||
#define TK_INTERSECT 7
|
||||
#define TK_NK_BITAND 8
|
||||
#define TK_NK_BITOR 9
|
||||
#define TK_NK_LSHIFT 10
|
||||
#define TK_NK_RSHIFT 11
|
||||
#define TK_NK_PLUS 12
|
||||
#define TK_NK_MINUS 13
|
||||
#define TK_NK_STAR 14
|
||||
#define TK_NK_SLASH 15
|
||||
#define TK_NK_REM 16
|
||||
#define TK_NK_CONCAT 17
|
||||
#define TK_CREATE 18
|
||||
#define TK_ACCOUNT 19
|
||||
#define TK_NK_ID 20
|
||||
#define TK_PASS 21
|
||||
#define TK_NK_STRING 22
|
||||
#define TK_ALTER 23
|
||||
#define TK_PPS 24
|
||||
#define TK_TSERIES 25
|
||||
#define TK_STORAGE 26
|
||||
#define TK_STREAMS 27
|
||||
#define TK_QTIME 28
|
||||
#define TK_DBS 29
|
||||
#define TK_USERS 30
|
||||
#define TK_CONNS 31
|
||||
#define TK_STATE 32
|
||||
#define TK_USER 33
|
||||
#define TK_ENABLE 34
|
||||
#define TK_NK_INTEGER 35
|
||||
#define TK_SYSINFO 36
|
||||
#define TK_DROP 37
|
||||
#define TK_GRANT 38
|
||||
#define TK_ON 39
|
||||
#define TK_TO 40
|
||||
#define TK_REVOKE 41
|
||||
#define TK_FROM 42
|
||||
#define TK_SUBSCRIBE 43
|
||||
#define TK_NK_COMMA 44
|
||||
#define TK_READ 45
|
||||
#define TK_WRITE 46
|
||||
#define TK_NK_DOT 47
|
||||
#define TK_WITH 48
|
||||
#define TK_DNODE 49
|
||||
#define TK_PORT 50
|
||||
#define TK_DNODES 51
|
||||
#define TK_RESTORE 52
|
||||
#define TK_NK_IPTOKEN 53
|
||||
#define TK_FORCE 54
|
||||
#define TK_UNSAFE 55
|
||||
#define TK_LOCAL 56
|
||||
#define TK_QNODE 57
|
||||
#define TK_BNODE 58
|
||||
#define TK_SNODE 59
|
||||
#define TK_MNODE 60
|
||||
#define TK_VNODE 61
|
||||
#define TK_DATABASE 62
|
||||
#define TK_USE 63
|
||||
#define TK_FLUSH 64
|
||||
#define TK_TRIM 65
|
||||
#define TK_COMPACT 66
|
||||
#define TK_IF 67
|
||||
#define TK_NOT 68
|
||||
#define TK_EXISTS 69
|
||||
#define TK_BUFFER 70
|
||||
#define TK_CACHEMODEL 71
|
||||
#define TK_CACHESIZE 72
|
||||
#define TK_COMP 73
|
||||
#define TK_DURATION 74
|
||||
#define TK_NK_VARIABLE 75
|
||||
#define TK_MAXROWS 76
|
||||
#define TK_MINROWS 77
|
||||
#define TK_KEEP 78
|
||||
#define TK_PAGES 79
|
||||
#define TK_PAGESIZE 80
|
||||
#define TK_TSDB_PAGESIZE 81
|
||||
#define TK_PRECISION 82
|
||||
#define TK_REPLICA 83
|
||||
#define TK_VGROUPS 84
|
||||
#define TK_SINGLE_STABLE 85
|
||||
#define TK_RETENTIONS 86
|
||||
#define TK_SCHEMALESS 87
|
||||
#define TK_WAL_LEVEL 88
|
||||
#define TK_WAL_FSYNC_PERIOD 89
|
||||
#define TK_WAL_RETENTION_PERIOD 90
|
||||
#define TK_WAL_RETENTION_SIZE 91
|
||||
#define TK_WAL_ROLL_PERIOD 92
|
||||
#define TK_WAL_SEGMENT_SIZE 93
|
||||
#define TK_STT_TRIGGER 94
|
||||
#define TK_TABLE_PREFIX 95
|
||||
#define TK_TABLE_SUFFIX 96
|
||||
#define TK_NK_COLON 97
|
||||
#define TK_MAX_SPEED 98
|
||||
#define TK_START 99
|
||||
#define TK_OR 1
|
||||
#define TK_AND 2
|
||||
#define TK_UNION 3
|
||||
#define TK_ALL 4
|
||||
#define TK_MINUS 5
|
||||
#define TK_EXCEPT 6
|
||||
#define TK_INTERSECT 7
|
||||
#define TK_NK_BITAND 8
|
||||
#define TK_NK_BITOR 9
|
||||
#define TK_NK_LSHIFT 10
|
||||
#define TK_NK_RSHIFT 11
|
||||
#define TK_NK_PLUS 12
|
||||
#define TK_NK_MINUS 13
|
||||
#define TK_NK_STAR 14
|
||||
#define TK_NK_SLASH 15
|
||||
#define TK_NK_REM 16
|
||||
#define TK_NK_CONCAT 17
|
||||
#define TK_CREATE 18
|
||||
#define TK_ACCOUNT 19
|
||||
#define TK_NK_ID 20
|
||||
#define TK_PASS 21
|
||||
#define TK_NK_STRING 22
|
||||
#define TK_ALTER 23
|
||||
#define TK_PPS 24
|
||||
#define TK_TSERIES 25
|
||||
#define TK_STORAGE 26
|
||||
#define TK_STREAMS 27
|
||||
#define TK_QTIME 28
|
||||
#define TK_DBS 29
|
||||
#define TK_USERS 30
|
||||
#define TK_CONNS 31
|
||||
#define TK_STATE 32
|
||||
#define TK_USER 33
|
||||
#define TK_ENABLE 34
|
||||
#define TK_NK_INTEGER 35
|
||||
#define TK_SYSINFO 36
|
||||
#define TK_DROP 37
|
||||
#define TK_GRANT 38
|
||||
#define TK_ON 39
|
||||
#define TK_TO 40
|
||||
#define TK_REVOKE 41
|
||||
#define TK_FROM 42
|
||||
#define TK_SUBSCRIBE 43
|
||||
#define TK_NK_COMMA 44
|
||||
#define TK_READ 45
|
||||
#define TK_WRITE 46
|
||||
#define TK_NK_DOT 47
|
||||
#define TK_WITH 48
|
||||
#define TK_DNODE 49
|
||||
#define TK_PORT 50
|
||||
#define TK_DNODES 51
|
||||
#define TK_RESTORE 52
|
||||
#define TK_NK_IPTOKEN 53
|
||||
#define TK_FORCE 54
|
||||
#define TK_UNSAFE 55
|
||||
#define TK_LOCAL 56
|
||||
#define TK_QNODE 57
|
||||
#define TK_BNODE 58
|
||||
#define TK_SNODE 59
|
||||
#define TK_MNODE 60
|
||||
#define TK_VNODE 61
|
||||
#define TK_DATABASE 62
|
||||
#define TK_USE 63
|
||||
#define TK_FLUSH 64
|
||||
#define TK_TRIM 65
|
||||
#define TK_COMPACT 66
|
||||
#define TK_IF 67
|
||||
#define TK_NOT 68
|
||||
#define TK_EXISTS 69
|
||||
#define TK_BUFFER 70
|
||||
#define TK_CACHEMODEL 71
|
||||
#define TK_CACHESIZE 72
|
||||
#define TK_COMP 73
|
||||
#define TK_DURATION 74
|
||||
#define TK_NK_VARIABLE 75
|
||||
#define TK_MAXROWS 76
|
||||
#define TK_MINROWS 77
|
||||
#define TK_KEEP 78
|
||||
#define TK_PAGES 79
|
||||
#define TK_PAGESIZE 80
|
||||
#define TK_TSDB_PAGESIZE 81
|
||||
#define TK_PRECISION 82
|
||||
#define TK_REPLICA 83
|
||||
#define TK_VGROUPS 84
|
||||
#define TK_SINGLE_STABLE 85
|
||||
#define TK_RETENTIONS 86
|
||||
#define TK_SCHEMALESS 87
|
||||
#define TK_WAL_LEVEL 88
|
||||
#define TK_WAL_FSYNC_PERIOD 89
|
||||
#define TK_WAL_RETENTION_PERIOD 90
|
||||
#define TK_WAL_RETENTION_SIZE 91
|
||||
#define TK_WAL_ROLL_PERIOD 92
|
||||
#define TK_WAL_SEGMENT_SIZE 93
|
||||
#define TK_STT_TRIGGER 94
|
||||
#define TK_TABLE_PREFIX 95
|
||||
#define TK_TABLE_SUFFIX 96
|
||||
#define TK_NK_COLON 97
|
||||
#define TK_BWLIMIT 98
|
||||
#define TK_START 99
|
||||
#define TK_TIMESTAMP 100
|
||||
#define TK_END 101
|
||||
#define TK_TABLE 102
|
||||
|
@ -130,25 +130,25 @@
|
|||
#define TK_NK_EQ 112
|
||||
#define TK_USING 113
|
||||
#define TK_TAGS 114
|
||||
#define TK_COMMENT 115
|
||||
#define TK_BOOL 116
|
||||
#define TK_TINYINT 117
|
||||
#define TK_SMALLINT 118
|
||||
#define TK_INT 119
|
||||
#define TK_INTEGER 120
|
||||
#define TK_BIGINT 121
|
||||
#define TK_FLOAT 122
|
||||
#define TK_DOUBLE 123
|
||||
#define TK_BINARY 124
|
||||
#define TK_NCHAR 125
|
||||
#define TK_UNSIGNED 126
|
||||
#define TK_JSON 127
|
||||
#define TK_VARCHAR 128
|
||||
#define TK_MEDIUMBLOB 129
|
||||
#define TK_BLOB 130
|
||||
#define TK_VARBINARY 131
|
||||
#define TK_GEOMETRY 132
|
||||
#define TK_DECIMAL 133
|
||||
#define TK_BOOL 115
|
||||
#define TK_TINYINT 116
|
||||
#define TK_SMALLINT 117
|
||||
#define TK_INT 118
|
||||
#define TK_INTEGER 119
|
||||
#define TK_BIGINT 120
|
||||
#define TK_FLOAT 121
|
||||
#define TK_DOUBLE 122
|
||||
#define TK_BINARY 123
|
||||
#define TK_NCHAR 124
|
||||
#define TK_UNSIGNED 125
|
||||
#define TK_JSON 126
|
||||
#define TK_VARCHAR 127
|
||||
#define TK_MEDIUMBLOB 128
|
||||
#define TK_BLOB 129
|
||||
#define TK_VARBINARY 130
|
||||
#define TK_GEOMETRY 131
|
||||
#define TK_DECIMAL 132
|
||||
#define TK_COMMENT 133
|
||||
#define TK_MAX_DELAY 134
|
||||
#define TK_WATERMARK 135
|
||||
#define TK_ROLLUP 136
|
||||
|
@ -276,91 +276,96 @@
|
|||
#define TK_JOIN 258
|
||||
#define TK_INNER 259
|
||||
#define TK_SELECT 260
|
||||
#define TK_DISTINCT 261
|
||||
#define TK_WHERE 262
|
||||
#define TK_PARTITION 263
|
||||
#define TK_BY 264
|
||||
#define TK_SESSION 265
|
||||
#define TK_STATE_WINDOW 266
|
||||
#define TK_EVENT_WINDOW 267
|
||||
#define TK_SLIDING 268
|
||||
#define TK_FILL 269
|
||||
#define TK_VALUE 270
|
||||
#define TK_VALUE_F 271
|
||||
#define TK_NONE 272
|
||||
#define TK_PREV 273
|
||||
#define TK_NULL_F 274
|
||||
#define TK_LINEAR 275
|
||||
#define TK_NEXT 276
|
||||
#define TK_HAVING 277
|
||||
#define TK_RANGE 278
|
||||
#define TK_EVERY 279
|
||||
#define TK_ORDER 280
|
||||
#define TK_SLIMIT 281
|
||||
#define TK_SOFFSET 282
|
||||
#define TK_LIMIT 283
|
||||
#define TK_OFFSET 284
|
||||
#define TK_ASC 285
|
||||
#define TK_NULLS 286
|
||||
#define TK_ABORT 287
|
||||
#define TK_AFTER 288
|
||||
#define TK_ATTACH 289
|
||||
#define TK_BEFORE 290
|
||||
#define TK_BEGIN 291
|
||||
#define TK_BITAND 292
|
||||
#define TK_BITNOT 293
|
||||
#define TK_BITOR 294
|
||||
#define TK_BLOCKS 295
|
||||
#define TK_CHANGE 296
|
||||
#define TK_COMMA 297
|
||||
#define TK_CONCAT 298
|
||||
#define TK_CONFLICT 299
|
||||
#define TK_COPY 300
|
||||
#define TK_DEFERRED 301
|
||||
#define TK_DELIMITERS 302
|
||||
#define TK_DETACH 303
|
||||
#define TK_DIVIDE 304
|
||||
#define TK_DOT 305
|
||||
#define TK_EACH 306
|
||||
#define TK_FAIL 307
|
||||
#define TK_FILE 308
|
||||
#define TK_FOR 309
|
||||
#define TK_GLOB 310
|
||||
#define TK_ID 311
|
||||
#define TK_IMMEDIATE 312
|
||||
#define TK_IMPORT 313
|
||||
#define TK_INITIALLY 314
|
||||
#define TK_INSTEAD 315
|
||||
#define TK_ISNULL 316
|
||||
#define TK_KEY 317
|
||||
#define TK_MODULES 318
|
||||
#define TK_NK_BITNOT 319
|
||||
#define TK_NK_SEMI 320
|
||||
#define TK_NOTNULL 321
|
||||
#define TK_OF 322
|
||||
#define TK_PLUS 323
|
||||
#define TK_PRIVILEGE 324
|
||||
#define TK_RAISE 325
|
||||
#define TK_RESTRICT 326
|
||||
#define TK_ROW 327
|
||||
#define TK_SEMI 328
|
||||
#define TK_STAR 329
|
||||
#define TK_STATEMENT 330
|
||||
#define TK_STRICT 331
|
||||
#define TK_STRING 332
|
||||
#define TK_TIMES 333
|
||||
#define TK_VALUES 334
|
||||
#define TK_VARIABLE 335
|
||||
#define TK_VIEW 336
|
||||
#define TK_WAL 337
|
||||
#define TK_NK_HINT 261
|
||||
#define TK_DISTINCT 262
|
||||
#define TK_WHERE 263
|
||||
#define TK_PARTITION 264
|
||||
#define TK_BY 265
|
||||
#define TK_SESSION 266
|
||||
#define TK_STATE_WINDOW 267
|
||||
#define TK_EVENT_WINDOW 268
|
||||
#define TK_SLIDING 269
|
||||
#define TK_FILL 270
|
||||
#define TK_VALUE 271
|
||||
#define TK_VALUE_F 272
|
||||
#define TK_NONE 273
|
||||
#define TK_PREV 274
|
||||
#define TK_NULL_F 275
|
||||
#define TK_LINEAR 276
|
||||
#define TK_NEXT 277
|
||||
#define TK_HAVING 278
|
||||
#define TK_RANGE 279
|
||||
#define TK_EVERY 280
|
||||
#define TK_ORDER 281
|
||||
#define TK_SLIMIT 282
|
||||
#define TK_SOFFSET 283
|
||||
#define TK_LIMIT 284
|
||||
#define TK_OFFSET 285
|
||||
#define TK_ASC 286
|
||||
#define TK_NULLS 287
|
||||
#define TK_ABORT 288
|
||||
#define TK_AFTER 289
|
||||
#define TK_ATTACH 290
|
||||
#define TK_BEFORE 291
|
||||
#define TK_BEGIN 292
|
||||
#define TK_BITAND 293
|
||||
#define TK_BITNOT 294
|
||||
#define TK_BITOR 295
|
||||
#define TK_BLOCKS 296
|
||||
#define TK_CHANGE 297
|
||||
#define TK_COMMA 298
|
||||
#define TK_CONCAT 299
|
||||
#define TK_CONFLICT 300
|
||||
#define TK_COPY 301
|
||||
#define TK_DEFERRED 302
|
||||
#define TK_DELIMITERS 303
|
||||
#define TK_DETACH 304
|
||||
#define TK_DIVIDE 305
|
||||
#define TK_DOT 306
|
||||
#define TK_EACH 307
|
||||
#define TK_FAIL 308
|
||||
#define TK_FILE 309
|
||||
#define TK_FOR 310
|
||||
#define TK_GLOB 311
|
||||
#define TK_ID 312
|
||||
#define TK_IMMEDIATE 313
|
||||
#define TK_IMPORT 314
|
||||
#define TK_INITIALLY 315
|
||||
#define TK_INSTEAD 316
|
||||
#define TK_ISNULL 317
|
||||
#define TK_KEY 318
|
||||
#define TK_MODULES 319
|
||||
#define TK_NK_BITNOT 320
|
||||
#define TK_NK_SEMI 321
|
||||
#define TK_NOTNULL 322
|
||||
#define TK_OF 323
|
||||
#define TK_PLUS 324
|
||||
#define TK_PRIVILEGE 325
|
||||
#define TK_RAISE 326
|
||||
#define TK_RESTRICT 327
|
||||
#define TK_ROW 328
|
||||
#define TK_SEMI 329
|
||||
#define TK_STAR 330
|
||||
#define TK_STATEMENT 331
|
||||
#define TK_STRICT 332
|
||||
#define TK_STRING 333
|
||||
#define TK_TIMES 334
|
||||
#define TK_VALUES 335
|
||||
#define TK_VARIABLE 336
|
||||
#define TK_VIEW 337
|
||||
#define TK_WAL 338
|
||||
|
||||
|
||||
#define TK_NK_SPACE 600
|
||||
#define TK_NK_COMMENT 601
|
||||
#define TK_NK_ILLEGAL 602
|
||||
#define TK_NK_HEX 603 // hex number 0x123
|
||||
#define TK_NK_OCT 604 // oct number
|
||||
#define TK_NK_BIN 605 // bin format data 0b111
|
||||
|
||||
#define TK_NK_SPACE 600
|
||||
#define TK_NK_COMMENT 601
|
||||
#define TK_NK_ILLEGAL 602
|
||||
#define TK_NK_HEX 603 // hex number 0x123
|
||||
#define TK_NK_OCT 604 // oct number
|
||||
#define TK_NK_BIN 605 // bin format data 0b111
|
||||
#define TK_BATCH_SCAN 606
|
||||
#define TK_NO_BATCH_SCAN 607
|
||||
|
||||
|
||||
#define TK_NK_NIL 65535
|
||||
|
||||
|
|
|
@ -269,8 +269,8 @@ typedef struct {
|
|||
(IS_NUMERIC_TYPE(_t) || (_t) == (TSDB_DATA_TYPE_BOOL) || (_t) == (TSDB_DATA_TYPE_TIMESTAMP))
|
||||
|
||||
#define IS_VAR_DATA_TYPE(t) \
|
||||
(((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_NCHAR) || ((t) == TSDB_DATA_TYPE_JSON) || ((t) == TSDB_DATA_TYPE_GEOMETRY))
|
||||
#define IS_STR_DATA_TYPE(t) (((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_NCHAR))
|
||||
(((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_VARBINARY) || ((t) == TSDB_DATA_TYPE_NCHAR) || ((t) == TSDB_DATA_TYPE_JSON) || ((t) == TSDB_DATA_TYPE_GEOMETRY))
|
||||
#define IS_STR_DATA_TYPE(t) (((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_VARBINARY) || ((t) == TSDB_DATA_TYPE_NCHAR))
|
||||
|
||||
#define IS_VALID_TINYINT(_t) ((_t) >= INT8_MIN && (_t) <= INT8_MAX)
|
||||
#define IS_VALID_SMALLINT(_t) ((_t) >= INT16_MIN && (_t) <= INT16_MAX)
|
||||
|
|
|
@ -0,0 +1,48 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef _TD_AUDIT_H_
|
||||
#define _TD_AUDIT_H_
|
||||
|
||||
#include "tarray.h"
|
||||
#include "tdef.h"
|
||||
#include "tlog.h"
|
||||
#include "tmsg.h"
|
||||
#include "tjson.h"
|
||||
#include "tmsgcb.h"
|
||||
#include "trpc.h"
|
||||
#include "mnode.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#define AUDIT_DETAIL_MAX 16000
|
||||
|
||||
typedef struct {
|
||||
const char *server;
|
||||
uint16_t port;
|
||||
bool comp;
|
||||
} SAuditCfg;
|
||||
|
||||
int32_t auditInit(const SAuditCfg *pCfg);
|
||||
void auditSend(SJson *pJson);
|
||||
void auditRecord(SRpcMsg *pReq, int64_t clusterId, char *operation, char *target1, char *target2, char *detail);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /*_TD_MONITOR_H_*/
|
|
@ -59,7 +59,7 @@ typedef struct SDataSinkMgtCfg {
|
|||
uint32_t maxDataBlockNumPerQuery;
|
||||
} SDataSinkMgtCfg;
|
||||
|
||||
int32_t dsDataSinkMgtInit(SDataSinkMgtCfg* cfg, SStorageAPI* pAPI);
|
||||
int32_t dsDataSinkMgtInit(SDataSinkMgtCfg* cfg, SStorageAPI* pAPI, void** ppSinkManager);
|
||||
|
||||
typedef struct SInputData {
|
||||
const struct SSDataBlock* pData;
|
||||
|
@ -83,7 +83,7 @@ typedef struct SOutputData {
|
|||
* @param pHandle output
|
||||
* @return error code
|
||||
*/
|
||||
int32_t dsCreateDataSinker(const SDataSinkNode* pDataSink, DataSinkHandle* pHandle, void* pParam, const char* id);
|
||||
int32_t dsCreateDataSinker(void* pSinkManager, const SDataSinkNode* pDataSink, DataSinkHandle* pHandle, void* pParam, const char* id);
|
||||
|
||||
int32_t dsDataSinkGetCacheSize(SDataSinkStat* pStat);
|
||||
|
||||
|
@ -97,6 +97,8 @@ int32_t dsPutDataBlock(DataSinkHandle handle, const SInputData* pInput, bool* pC
|
|||
|
||||
void dsEndPut(DataSinkHandle handle, uint64_t useconds);
|
||||
|
||||
void dsReset(DataSinkHandle handle);
|
||||
|
||||
/**
|
||||
* Get the length of the data returned by the next call to dsGetDataBlock.
|
||||
* @param handle
|
||||
|
|
|
@ -41,23 +41,21 @@ typedef struct {
|
|||
} SLocalFetch;
|
||||
|
||||
typedef struct {
|
||||
void* tqReader;
|
||||
void* config;
|
||||
void* vnode;
|
||||
void* mnd;
|
||||
SMsgCb* pMsgCb;
|
||||
int64_t version;
|
||||
bool initMetaReader;
|
||||
bool initTableReader;
|
||||
bool initTqReader;
|
||||
int32_t numOfVgroups;
|
||||
void* sContext; // SSnapContext*
|
||||
void* tqReader; // todo remove it
|
||||
void* vnode;
|
||||
void* mnd;
|
||||
SMsgCb* pMsgCb;
|
||||
int64_t version;
|
||||
uint64_t checkpointId;
|
||||
bool initTableReader;
|
||||
bool initTqReader;
|
||||
int32_t numOfVgroups;
|
||||
void* sContext; // SSnapContext*
|
||||
void* pStateBackend;
|
||||
int8_t fillHistory;
|
||||
STimeWindow winRange;
|
||||
|
||||
void* pStateBackend;
|
||||
struct SStorageAPI api;
|
||||
|
||||
int8_t fillHistory;
|
||||
STimeWindow winRange;
|
||||
} SReadHandle;
|
||||
|
||||
// in queue mode, data streams are seperated by msg
|
||||
|
@ -97,9 +95,6 @@ void qSetTaskId(qTaskInfo_t tinfo, uint64_t taskId, uint64_t queryId);
|
|||
|
||||
int32_t qSetStreamOpOpen(qTaskInfo_t tinfo);
|
||||
|
||||
// todo refactor
|
||||
void qGetCheckpointVersion(qTaskInfo_t tinfo, int64_t* dataVer, int64_t* ckId);
|
||||
|
||||
/**
|
||||
* Set multiple input data blocks for the stream scan.
|
||||
* @param tinfo
|
||||
|
@ -130,6 +125,10 @@ int32_t qSetSMAInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numOfBlocks,
|
|||
*/
|
||||
int32_t qUpdateTableListForStreamScanner(qTaskInfo_t tinfo, const SArray* tableIdList, bool isAdd);
|
||||
|
||||
bool qIsDynamicExecTask(qTaskInfo_t tinfo);
|
||||
|
||||
void qUpdateOperatorParam(qTaskInfo_t tinfo, void* pParam);
|
||||
|
||||
/**
|
||||
* Create the exec task object according to task json
|
||||
* @param readHandle
|
||||
|
@ -150,7 +149,7 @@ int32_t qCreateExecTask(SReadHandle* readHandle, int32_t vgId, uint64_t taskId,
|
|||
* @return
|
||||
*/
|
||||
int32_t qGetQueryTableSchemaVersion(qTaskInfo_t tinfo, char* dbName, char* tableName, int32_t* sversion,
|
||||
int32_t* tversion);
|
||||
int32_t* tversion, int32_t idx);
|
||||
|
||||
/**
|
||||
* The main task execution function, including query on both table and multiple tables,
|
||||
|
|
|
@ -106,6 +106,8 @@ typedef struct SMCtbCursor {
|
|||
void *pVal;
|
||||
int kLen;
|
||||
int vLen;
|
||||
int8_t paused;
|
||||
int lock;
|
||||
} SMCtbCursor;
|
||||
|
||||
typedef struct SRowBuffPos {
|
||||
|
@ -295,7 +297,9 @@ int32_t vnodeGetCtbIdListByFilter(void *pVnode, int64_t suid, SArray *list, bool
|
|||
int32_t vnodeGetStbIdList(void *pVnode, int64_t suid, SArray *list);
|
||||
*/
|
||||
SMCtbCursor* (*openCtbCursor)(void *pVnode, tb_uid_t uid, int lock);
|
||||
void (*closeCtbCursor)(SMCtbCursor *pCtbCur, int lock);
|
||||
int32_t (*resumeCtbCursor)(SMCtbCursor* pCtbCur, int8_t first);
|
||||
void (*pauseCtbCursor)(SMCtbCursor* pCtbCur);
|
||||
void (*closeCtbCursor)(SMCtbCursor *pCtbCur);
|
||||
tb_uid_t (*ctbCursorNext)(SMCtbCursor* pCur);
|
||||
} SStoreMeta;
|
||||
|
||||
|
@ -375,7 +379,7 @@ typedef struct SStateStore {
|
|||
state_key_cmpr_fn fn, void** pVal, int32_t* pVLen);
|
||||
int32_t (*streamStateSessionGetKeyByRange)(SStreamState* pState, const SSessionKey* range, SSessionKey* curKey);
|
||||
|
||||
SUpdateInfo* (*updateInfoInit)(int64_t interval, int32_t precision, int64_t watermark);
|
||||
SUpdateInfo* (*updateInfoInit)(int64_t interval, int32_t precision, int64_t watermark, bool igUp);
|
||||
TSKEY (*updateInfoFillBlockData)(SUpdateInfo* pInfo, SSDataBlock* pBlock, int32_t primaryTsCol);
|
||||
bool (*updateInfoIsUpdated)(SUpdateInfo* pInfo, uint64_t tableId, TSKEY ts);
|
||||
bool (*updateInfoIsTableInserted)(SUpdateInfo* pInfo, int64_t tbUid);
|
||||
|
@ -383,7 +387,7 @@ typedef struct SStateStore {
|
|||
void (*windowSBfDelete)(SUpdateInfo *pInfo, uint64_t count);
|
||||
void (*windowSBfAdd)(SUpdateInfo *pInfo, uint64_t count);
|
||||
|
||||
SUpdateInfo* (*updateInfoInitP)(SInterval* pInterval, int64_t watermark);
|
||||
SUpdateInfo* (*updateInfoInitP)(SInterval* pInterval, int64_t watermark, bool igUp);
|
||||
void (*updateInfoAddCloseWindowSBF)(SUpdateInfo* pInfo);
|
||||
void (*updateInfoDestoryColseWinSBF)(SUpdateInfo* pInfo);
|
||||
int32_t (*updateInfoSerialize)(void* buf, int32_t bufLen, const SUpdateInfo* pInfo);
|
||||
|
@ -394,7 +398,8 @@ typedef struct SStateStore {
|
|||
SStreamStateCur* (*streamStateSessionSeekKeyCurrentNext)(SStreamState* pState, const SSessionKey* key);
|
||||
|
||||
struct SStreamFileState* (*streamFileStateInit)(int64_t memSize, uint32_t keySize, uint32_t rowSize,
|
||||
uint32_t selectRowSize, GetTsFun fp, void* pFile, TSKEY delMark, const char*id);
|
||||
uint32_t selectRowSize, GetTsFun fp, void* pFile, TSKEY delMark,
|
||||
const char* id, int64_t ckId);
|
||||
|
||||
void (*streamFileStateDestroy)(struct SStreamFileState* pFileState);
|
||||
void (*streamFileStateClear)(struct SStreamFileState* pFileState);
|
||||
|
|
|
@ -122,6 +122,8 @@ typedef enum EFunctionType {
|
|||
FUNCTION_TYPE_IROWTS,
|
||||
FUNCTION_TYPE_ISFILLED,
|
||||
FUNCTION_TYPE_TAGS,
|
||||
FUNCTION_TYPE_TBUID,
|
||||
FUNCTION_TYPE_VGID,
|
||||
|
||||
// internal function
|
||||
FUNCTION_TYPE_SELECT_VALUE = 3750,
|
||||
|
@ -233,6 +235,7 @@ bool fmIsCumulativeFunc(int32_t funcId);
|
|||
bool fmIsInterpPseudoColumnFunc(int32_t funcId);
|
||||
bool fmIsGroupKeyFunc(int32_t funcId);
|
||||
bool fmIsBlockDistFunc(int32_t funcId);
|
||||
bool fmIsConstantResFunc(SFunctionNode* pFunc);
|
||||
|
||||
void getLastCacheDataType(SDataType* pType);
|
||||
SFunctionNode* createFunction(const char* pName, SNodeList* pParameterList);
|
||||
|
|
|
@ -109,8 +109,8 @@ typedef uint16_t VarDataLenT; // maxVarDataLen: 65535
|
|||
#define varDataLenByData(v) (*(VarDataLenT *)(((char *)(v)) - VARSTR_HEADER_SIZE))
|
||||
#define varDataSetLen(v, _len) (((VarDataLenT *)(v))[0] = (VarDataLenT)(_len))
|
||||
#define IS_VAR_DATA_TYPE(t) \
|
||||
(((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_NCHAR) || ((t) == TSDB_DATA_TYPE_JSON) || ((t) == TSDB_DATA_TYPE_GEOMETRY))
|
||||
#define IS_STR_DATA_TYPE(t) (((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_NCHAR))
|
||||
(((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_VARBINARY) || ((t) == TSDB_DATA_TYPE_NCHAR) || ((t) == TSDB_DATA_TYPE_JSON) || ((t) == TSDB_DATA_TYPE_GEOMETRY))
|
||||
#define IS_STR_DATA_TYPE(t) (((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_VARBINARY) || ((t) == TSDB_DATA_TYPE_NCHAR))
|
||||
|
||||
static FORCE_INLINE char *udfColDataGetData(const SUdfColumn *pColumn, int32_t row) {
|
||||
if (IS_VAR_DATA_TYPE(pColumn->colMeta.type)) {
|
||||
|
|
|
@ -23,11 +23,10 @@ extern "C" {
|
|||
#include "query.h"
|
||||
#include "querynodes.h"
|
||||
|
||||
#define DESCRIBE_RESULT_COLS 5
|
||||
#define DESCRIBE_RESULT_FIELD_LEN (TSDB_COL_NAME_LEN - 1 + VARSTR_HEADER_SIZE)
|
||||
#define DESCRIBE_RESULT_TYPE_LEN (20 + VARSTR_HEADER_SIZE)
|
||||
#define DESCRIBE_RESULT_NOTE_LEN (8 + VARSTR_HEADER_SIZE)
|
||||
#define DESCRIBE_RESULT_COL_COMMENT_LEN (TSDB_COL_COMMENT_LEN)
|
||||
#define DESCRIBE_RESULT_COLS 4
|
||||
#define DESCRIBE_RESULT_FIELD_LEN (TSDB_COL_NAME_LEN - 1 + VARSTR_HEADER_SIZE)
|
||||
#define DESCRIBE_RESULT_TYPE_LEN (20 + VARSTR_HEADER_SIZE)
|
||||
#define DESCRIBE_RESULT_NOTE_LEN (8 + VARSTR_HEADER_SIZE)
|
||||
|
||||
#define SHOW_CREATE_DB_RESULT_COLS 2
|
||||
#define SHOW_CREATE_DB_RESULT_FIELD1_LEN (TSDB_DB_NAME_LEN + VARSTR_HEADER_SIZE)
|
||||
|
@ -156,7 +155,7 @@ typedef struct SColumnDefNode {
|
|||
ENodeType type;
|
||||
char colName[TSDB_COL_NAME_LEN];
|
||||
SDataType dataType;
|
||||
char comments[TSDB_COL_COMMENT_LEN];
|
||||
char comments[TSDB_TB_COMMENT_LEN];
|
||||
bool sma;
|
||||
} SColumnDefNode;
|
||||
|
||||
|
@ -215,7 +214,6 @@ typedef struct SAlterTableStmt {
|
|||
char newColName[TSDB_COL_NAME_LEN];
|
||||
STableOptions* pOptions;
|
||||
SDataType dataType;
|
||||
char colComment[TSDB_COL_COMMENT_LEN];
|
||||
SValueNode* pVal;
|
||||
} SAlterTableStmt;
|
||||
|
||||
|
|
|
@ -21,6 +21,7 @@ extern "C" {
|
|||
#endif
|
||||
|
||||
#include "tdef.h"
|
||||
#include "tmsg.h"
|
||||
|
||||
#define nodeType(nodeptr) (((const SNode*)(nodeptr))->type)
|
||||
#define setNodeType(nodeptr, nodetype) (((SNode*)(nodeptr))->type = (nodetype))
|
||||
|
@ -78,209 +79,6 @@ extern "C" {
|
|||
(list) = NULL; \
|
||||
} while (0)
|
||||
|
||||
typedef enum ENodeType {
|
||||
// Syntax nodes are used in parser and planner module, and some are also used in executor module, such as COLUMN,
|
||||
// VALUE, OPERATOR, FUNCTION and so on.
|
||||
QUERY_NODE_COLUMN = 1,
|
||||
QUERY_NODE_VALUE,
|
||||
QUERY_NODE_OPERATOR,
|
||||
QUERY_NODE_LOGIC_CONDITION,
|
||||
QUERY_NODE_FUNCTION,
|
||||
QUERY_NODE_REAL_TABLE,
|
||||
QUERY_NODE_TEMP_TABLE,
|
||||
QUERY_NODE_JOIN_TABLE,
|
||||
QUERY_NODE_GROUPING_SET,
|
||||
QUERY_NODE_ORDER_BY_EXPR,
|
||||
QUERY_NODE_LIMIT,
|
||||
QUERY_NODE_STATE_WINDOW,
|
||||
QUERY_NODE_SESSION_WINDOW,
|
||||
QUERY_NODE_INTERVAL_WINDOW,
|
||||
QUERY_NODE_NODE_LIST,
|
||||
QUERY_NODE_FILL,
|
||||
QUERY_NODE_RAW_EXPR, // Only be used in parser module.
|
||||
QUERY_NODE_TARGET,
|
||||
QUERY_NODE_DATABLOCK_DESC,
|
||||
QUERY_NODE_SLOT_DESC,
|
||||
QUERY_NODE_COLUMN_DEF,
|
||||
QUERY_NODE_DOWNSTREAM_SOURCE,
|
||||
QUERY_NODE_DATABASE_OPTIONS,
|
||||
QUERY_NODE_TABLE_OPTIONS,
|
||||
QUERY_NODE_INDEX_OPTIONS,
|
||||
QUERY_NODE_EXPLAIN_OPTIONS,
|
||||
QUERY_NODE_STREAM_OPTIONS,
|
||||
QUERY_NODE_LEFT_VALUE,
|
||||
QUERY_NODE_COLUMN_REF,
|
||||
QUERY_NODE_WHEN_THEN,
|
||||
QUERY_NODE_CASE_WHEN,
|
||||
QUERY_NODE_EVENT_WINDOW,
|
||||
|
||||
// Statement nodes are used in parser and planner module.
|
||||
QUERY_NODE_SET_OPERATOR = 100,
|
||||
QUERY_NODE_SELECT_STMT,
|
||||
QUERY_NODE_VNODE_MODIFY_STMT,
|
||||
QUERY_NODE_CREATE_DATABASE_STMT,
|
||||
QUERY_NODE_DROP_DATABASE_STMT,
|
||||
QUERY_NODE_ALTER_DATABASE_STMT,
|
||||
QUERY_NODE_FLUSH_DATABASE_STMT,
|
||||
QUERY_NODE_TRIM_DATABASE_STMT,
|
||||
QUERY_NODE_CREATE_TABLE_STMT,
|
||||
QUERY_NODE_CREATE_SUBTABLE_CLAUSE,
|
||||
QUERY_NODE_CREATE_MULTI_TABLES_STMT,
|
||||
QUERY_NODE_DROP_TABLE_CLAUSE,
|
||||
QUERY_NODE_DROP_TABLE_STMT,
|
||||
QUERY_NODE_DROP_SUPER_TABLE_STMT,
|
||||
QUERY_NODE_ALTER_TABLE_STMT,
|
||||
QUERY_NODE_ALTER_SUPER_TABLE_STMT,
|
||||
QUERY_NODE_CREATE_USER_STMT,
|
||||
QUERY_NODE_ALTER_USER_STMT,
|
||||
QUERY_NODE_DROP_USER_STMT,
|
||||
QUERY_NODE_USE_DATABASE_STMT,
|
||||
QUERY_NODE_CREATE_DNODE_STMT,
|
||||
QUERY_NODE_DROP_DNODE_STMT,
|
||||
QUERY_NODE_ALTER_DNODE_STMT,
|
||||
QUERY_NODE_CREATE_INDEX_STMT,
|
||||
QUERY_NODE_DROP_INDEX_STMT,
|
||||
QUERY_NODE_CREATE_QNODE_STMT,
|
||||
QUERY_NODE_DROP_QNODE_STMT,
|
||||
QUERY_NODE_CREATE_BNODE_STMT,
|
||||
QUERY_NODE_DROP_BNODE_STMT,
|
||||
QUERY_NODE_CREATE_SNODE_STMT,
|
||||
QUERY_NODE_DROP_SNODE_STMT,
|
||||
QUERY_NODE_CREATE_MNODE_STMT,
|
||||
QUERY_NODE_DROP_MNODE_STMT,
|
||||
QUERY_NODE_CREATE_TOPIC_STMT,
|
||||
QUERY_NODE_DROP_TOPIC_STMT,
|
||||
QUERY_NODE_DROP_CGROUP_STMT,
|
||||
QUERY_NODE_ALTER_LOCAL_STMT,
|
||||
QUERY_NODE_EXPLAIN_STMT,
|
||||
QUERY_NODE_DESCRIBE_STMT,
|
||||
QUERY_NODE_RESET_QUERY_CACHE_STMT,
|
||||
QUERY_NODE_COMPACT_DATABASE_STMT,
|
||||
QUERY_NODE_CREATE_FUNCTION_STMT,
|
||||
QUERY_NODE_DROP_FUNCTION_STMT,
|
||||
QUERY_NODE_CREATE_STREAM_STMT,
|
||||
QUERY_NODE_DROP_STREAM_STMT,
|
||||
QUERY_NODE_BALANCE_VGROUP_STMT,
|
||||
QUERY_NODE_MERGE_VGROUP_STMT,
|
||||
QUERY_NODE_REDISTRIBUTE_VGROUP_STMT,
|
||||
QUERY_NODE_SPLIT_VGROUP_STMT,
|
||||
QUERY_NODE_SYNCDB_STMT,
|
||||
QUERY_NODE_GRANT_STMT,
|
||||
QUERY_NODE_REVOKE_STMT,
|
||||
QUERY_NODE_SHOW_DNODES_STMT,
|
||||
QUERY_NODE_SHOW_MNODES_STMT,
|
||||
// QUERY_NODE_SHOW_MODULES_STMT,
|
||||
QUERY_NODE_SHOW_QNODES_STMT,
|
||||
QUERY_NODE_SHOW_SNODES_STMT,
|
||||
QUERY_NODE_SHOW_BNODES_STMT,
|
||||
QUERY_NODE_SHOW_CLUSTER_STMT,
|
||||
QUERY_NODE_SHOW_DATABASES_STMT,
|
||||
QUERY_NODE_SHOW_FUNCTIONS_STMT,
|
||||
QUERY_NODE_SHOW_INDEXES_STMT,
|
||||
QUERY_NODE_SHOW_STABLES_STMT,
|
||||
QUERY_NODE_SHOW_STREAMS_STMT,
|
||||
QUERY_NODE_SHOW_TABLES_STMT,
|
||||
QUERY_NODE_SHOW_TAGS_STMT,
|
||||
QUERY_NODE_SHOW_USERS_STMT,
|
||||
QUERY_NODE_SHOW_LICENCES_STMT,
|
||||
QUERY_NODE_SHOW_VGROUPS_STMT,
|
||||
QUERY_NODE_SHOW_TOPICS_STMT,
|
||||
QUERY_NODE_SHOW_CONSUMERS_STMT,
|
||||
QUERY_NODE_SHOW_CONNECTIONS_STMT,
|
||||
QUERY_NODE_SHOW_QUERIES_STMT,
|
||||
QUERY_NODE_SHOW_APPS_STMT,
|
||||
QUERY_NODE_SHOW_VARIABLES_STMT,
|
||||
QUERY_NODE_SHOW_DNODE_VARIABLES_STMT,
|
||||
QUERY_NODE_SHOW_TRANSACTIONS_STMT,
|
||||
QUERY_NODE_SHOW_SUBSCRIPTIONS_STMT,
|
||||
QUERY_NODE_SHOW_VNODES_STMT,
|
||||
QUERY_NODE_SHOW_USER_PRIVILEGES_STMT,
|
||||
QUERY_NODE_SHOW_CREATE_DATABASE_STMT,
|
||||
QUERY_NODE_SHOW_CREATE_TABLE_STMT,
|
||||
QUERY_NODE_SHOW_CREATE_STABLE_STMT,
|
||||
QUERY_NODE_SHOW_TABLE_DISTRIBUTED_STMT,
|
||||
QUERY_NODE_SHOW_LOCAL_VARIABLES_STMT,
|
||||
QUERY_NODE_SHOW_SCORES_STMT,
|
||||
QUERY_NODE_SHOW_TABLE_TAGS_STMT,
|
||||
QUERY_NODE_KILL_CONNECTION_STMT,
|
||||
QUERY_NODE_KILL_QUERY_STMT,
|
||||
QUERY_NODE_KILL_TRANSACTION_STMT,
|
||||
QUERY_NODE_DELETE_STMT,
|
||||
QUERY_NODE_INSERT_STMT,
|
||||
QUERY_NODE_QUERY,
|
||||
QUERY_NODE_SHOW_DB_ALIVE_STMT,
|
||||
QUERY_NODE_SHOW_CLUSTER_ALIVE_STMT,
|
||||
QUERY_NODE_BALANCE_VGROUP_LEADER_STMT,
|
||||
QUERY_NODE_RESTORE_DNODE_STMT,
|
||||
QUERY_NODE_RESTORE_QNODE_STMT,
|
||||
QUERY_NODE_RESTORE_MNODE_STMT,
|
||||
QUERY_NODE_RESTORE_VNODE_STMT,
|
||||
QUERY_NODE_PAUSE_STREAM_STMT,
|
||||
QUERY_NODE_RESUME_STREAM_STMT,
|
||||
|
||||
// logic plan node
|
||||
QUERY_NODE_LOGIC_PLAN_SCAN = 1000,
|
||||
QUERY_NODE_LOGIC_PLAN_JOIN,
|
||||
QUERY_NODE_LOGIC_PLAN_AGG,
|
||||
QUERY_NODE_LOGIC_PLAN_PROJECT,
|
||||
QUERY_NODE_LOGIC_PLAN_VNODE_MODIFY,
|
||||
QUERY_NODE_LOGIC_PLAN_EXCHANGE,
|
||||
QUERY_NODE_LOGIC_PLAN_MERGE,
|
||||
QUERY_NODE_LOGIC_PLAN_WINDOW,
|
||||
QUERY_NODE_LOGIC_PLAN_FILL,
|
||||
QUERY_NODE_LOGIC_PLAN_SORT,
|
||||
QUERY_NODE_LOGIC_PLAN_PARTITION,
|
||||
QUERY_NODE_LOGIC_PLAN_INDEF_ROWS_FUNC,
|
||||
QUERY_NODE_LOGIC_PLAN_INTERP_FUNC,
|
||||
QUERY_NODE_LOGIC_SUBPLAN,
|
||||
QUERY_NODE_LOGIC_PLAN,
|
||||
|
||||
// physical plan node
|
||||
QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN = 1100,
|
||||
QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN,
|
||||
QUERY_NODE_PHYSICAL_PLAN_TABLE_SEQ_SCAN,
|
||||
QUERY_NODE_PHYSICAL_PLAN_TABLE_MERGE_SCAN,
|
||||
QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN,
|
||||
QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN,
|
||||
QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN,
|
||||
QUERY_NODE_PHYSICAL_PLAN_LAST_ROW_SCAN,
|
||||
QUERY_NODE_PHYSICAL_PLAN_PROJECT,
|
||||
QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN,
|
||||
QUERY_NODE_PHYSICAL_PLAN_HASH_AGG,
|
||||
QUERY_NODE_PHYSICAL_PLAN_EXCHANGE,
|
||||
QUERY_NODE_PHYSICAL_PLAN_MERGE,
|
||||
QUERY_NODE_PHYSICAL_PLAN_SORT,
|
||||
QUERY_NODE_PHYSICAL_PLAN_GROUP_SORT,
|
||||
QUERY_NODE_PHYSICAL_PLAN_HASH_INTERVAL,
|
||||
QUERY_NODE_PHYSICAL_PLAN_MERGE_INTERVAL,
|
||||
QUERY_NODE_PHYSICAL_PLAN_MERGE_ALIGNED_INTERVAL,
|
||||
QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL,
|
||||
QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL,
|
||||
QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL,
|
||||
QUERY_NODE_PHYSICAL_PLAN_STREAM_MID_INTERVAL,
|
||||
QUERY_NODE_PHYSICAL_PLAN_FILL,
|
||||
QUERY_NODE_PHYSICAL_PLAN_STREAM_FILL,
|
||||
QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION,
|
||||
QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION,
|
||||
QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_SESSION,
|
||||
QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_SESSION,
|
||||
QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE,
|
||||
QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE,
|
||||
QUERY_NODE_PHYSICAL_PLAN_PARTITION,
|
||||
QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION,
|
||||
QUERY_NODE_PHYSICAL_PLAN_INDEF_ROWS_FUNC,
|
||||
QUERY_NODE_PHYSICAL_PLAN_INTERP_FUNC,
|
||||
QUERY_NODE_PHYSICAL_PLAN_DISPATCH,
|
||||
QUERY_NODE_PHYSICAL_PLAN_INSERT,
|
||||
QUERY_NODE_PHYSICAL_PLAN_QUERY_INSERT,
|
||||
QUERY_NODE_PHYSICAL_PLAN_DELETE,
|
||||
QUERY_NODE_PHYSICAL_SUBPLAN,
|
||||
QUERY_NODE_PHYSICAL_PLAN,
|
||||
QUERY_NODE_PHYSICAL_PLAN_TABLE_COUNT_SCAN,
|
||||
QUERY_NODE_PHYSICAL_PLAN_MERGE_EVENT,
|
||||
QUERY_NODE_PHYSICAL_PLAN_STREAM_EVENT
|
||||
} ENodeType;
|
||||
|
||||
/**
|
||||
* The first field of a node of any type is guaranteed to be the ENodeType.
|
||||
* Hence the type of any node can be gotten by casting it to SNode.
|
||||
|
|
|
@ -42,10 +42,13 @@ typedef enum EGroupAction {
|
|||
|
||||
typedef struct SLogicNode {
|
||||
ENodeType type;
|
||||
bool dynamicOp;
|
||||
bool stmtRoot;
|
||||
SNodeList* pTargets; // SColumnNode
|
||||
SNode* pConditions;
|
||||
SNodeList* pChildren;
|
||||
struct SLogicNode* pParent;
|
||||
SNodeList* pHint;
|
||||
int32_t optimizedFlag;
|
||||
uint8_t precision;
|
||||
SNode* pLimit;
|
||||
|
@ -111,12 +114,17 @@ typedef struct SScanLogicNode {
|
|||
} SScanLogicNode;
|
||||
|
||||
typedef struct SJoinLogicNode {
|
||||
SLogicNode node;
|
||||
EJoinType joinType;
|
||||
SNode* pMergeCondition;
|
||||
SNode* pOnConditions;
|
||||
bool isSingleTableJoin;
|
||||
SNode* pColEqualOnConditions;
|
||||
SLogicNode node;
|
||||
EJoinType joinType;
|
||||
EJoinAlgorithm joinAlgo;
|
||||
SNode* pPrimKeyEqCond;
|
||||
SNode* pColEqCond;
|
||||
SNode* pTagEqCond;
|
||||
SNode* pTagOnCond;
|
||||
SNode* pOtherOnCond;
|
||||
bool isSingleTableJoin;
|
||||
bool hasSubQuery;
|
||||
bool isLowLevelJoin;
|
||||
} SJoinLogicNode;
|
||||
|
||||
typedef struct SAggLogicNode {
|
||||
|
@ -155,6 +163,28 @@ typedef struct SInterpFuncLogicNode {
|
|||
SNode* pTimeSeries; // SColumnNode
|
||||
} SInterpFuncLogicNode;
|
||||
|
||||
typedef struct SGroupCacheLogicNode {
|
||||
SLogicNode node;
|
||||
bool grpColsMayBeNull;
|
||||
bool grpByUid;
|
||||
bool globalGrp;
|
||||
bool batchFetch;
|
||||
SNodeList* pGroupCols;
|
||||
} SGroupCacheLogicNode;
|
||||
|
||||
typedef struct SDynQueryCtrlStbJoin {
|
||||
bool batchFetch;
|
||||
SNodeList* pVgList;
|
||||
SNodeList* pUidList;
|
||||
bool srcScan[2];
|
||||
} SDynQueryCtrlStbJoin;
|
||||
|
||||
typedef struct SDynQueryCtrlLogicNode {
|
||||
SLogicNode node;
|
||||
EDynQueryType qType;
|
||||
SDynQueryCtrlStbJoin stbJoin;
|
||||
} SDynQueryCtrlLogicNode;
|
||||
|
||||
typedef enum EModifyTableType { MODIFY_TABLE_TYPE_INSERT = 1, MODIFY_TABLE_TYPE_DELETE } EModifyTableType;
|
||||
|
||||
typedef struct SVnodeModifyLogicNode {
|
||||
|
@ -313,6 +343,7 @@ typedef struct SDataBlockDescNode {
|
|||
|
||||
typedef struct SPhysiNode {
|
||||
ENodeType type;
|
||||
bool dynamicOp;
|
||||
EOrder inputTsOrder;
|
||||
EOrder outputTsOrder;
|
||||
SDataBlockDescNode* pOutputDataBlockDesc;
|
||||
|
@ -414,12 +445,50 @@ typedef struct SInterpFuncPhysiNode {
|
|||
typedef struct SSortMergeJoinPhysiNode {
|
||||
SPhysiNode node;
|
||||
EJoinType joinType;
|
||||
SNode* pMergeCondition;
|
||||
SNode* pOnConditions;
|
||||
SNode* pPrimKeyCond;
|
||||
SNode* pColEqCond;
|
||||
SNode* pOtherOnCond;
|
||||
SNodeList* pTargets;
|
||||
SNode* pColEqualOnConditions;
|
||||
} SSortMergeJoinPhysiNode;
|
||||
|
||||
typedef struct SHashJoinPhysiNode {
|
||||
SPhysiNode node;
|
||||
EJoinType joinType;
|
||||
SNodeList* pOnLeft;
|
||||
SNodeList* pOnRight;
|
||||
SNode* pFilterConditions;
|
||||
SNodeList* pTargets;
|
||||
SQueryStat inputStat[2];
|
||||
|
||||
SNode* pPrimKeyCond;
|
||||
SNode* pColEqCond;
|
||||
SNode* pTagEqCond;
|
||||
} SHashJoinPhysiNode;
|
||||
|
||||
typedef struct SGroupCachePhysiNode {
|
||||
SPhysiNode node;
|
||||
bool grpColsMayBeNull;
|
||||
bool grpByUid;
|
||||
bool globalGrp;
|
||||
bool batchFetch;
|
||||
SNodeList* pGroupCols;
|
||||
} SGroupCachePhysiNode;
|
||||
|
||||
typedef struct SStbJoinDynCtrlBasic {
|
||||
bool batchFetch;
|
||||
int32_t vgSlot[2];
|
||||
int32_t uidSlot[2];
|
||||
bool srcScan[2];
|
||||
} SStbJoinDynCtrlBasic;
|
||||
|
||||
typedef struct SDynQueryCtrlPhysiNode {
|
||||
SPhysiNode node;
|
||||
EDynQueryType qType;
|
||||
union {
|
||||
SStbJoinDynCtrlBasic stbJoin;
|
||||
};
|
||||
} SDynQueryCtrlPhysiNode;
|
||||
|
||||
typedef struct SAggPhysiNode {
|
||||
SPhysiNode node;
|
||||
SNodeList* pExprs; // these are expression list of group_by_clause and parameter expression of aggregate function
|
||||
|
|
|
@ -116,6 +116,17 @@ typedef struct SLeftValueNode {
|
|||
ENodeType type;
|
||||
} SLeftValueNode;
|
||||
|
||||
typedef enum EHintOption {
|
||||
HINT_NO_BATCH_SCAN = 1,
|
||||
HINT_BATCH_SCAN,
|
||||
} EHintOption;
|
||||
|
||||
typedef struct SHintNode {
|
||||
ENodeType type;
|
||||
EHintOption option;
|
||||
void* value;
|
||||
} SHintNode;
|
||||
|
||||
typedef struct SOperatorNode {
|
||||
SExprNode node; // QUERY_NODE_OPERATOR
|
||||
EOperatorType opType;
|
||||
|
@ -169,11 +180,27 @@ typedef struct STempTableNode {
|
|||
SNode* pSubquery;
|
||||
} STempTableNode;
|
||||
|
||||
typedef enum EJoinType { JOIN_TYPE_INNER = 1 } EJoinType;
|
||||
typedef enum EJoinType {
|
||||
JOIN_TYPE_INNER = 1,
|
||||
JOIN_TYPE_LEFT,
|
||||
JOIN_TYPE_RIGHT,
|
||||
} EJoinType;
|
||||
|
||||
typedef enum EJoinAlgorithm {
|
||||
JOIN_ALGO_UNKNOWN = 0,
|
||||
JOIN_ALGO_MERGE,
|
||||
JOIN_ALGO_HASH,
|
||||
} EJoinAlgorithm;
|
||||
|
||||
typedef enum EDynQueryType {
|
||||
DYN_QTYPE_STB_HASH = 1,
|
||||
} EDynQueryType;
|
||||
|
||||
typedef struct SJoinTableNode {
|
||||
STableNode table; // QUERY_NODE_JOIN_TABLE
|
||||
EJoinType joinType;
|
||||
bool hasSubQuery;
|
||||
bool isLowLevelJoin;
|
||||
SNode* pLeft;
|
||||
SNode* pRight;
|
||||
SNode* pOnCond;
|
||||
|
@ -289,6 +316,7 @@ typedef struct SSelectStmt {
|
|||
SLimitNode* pLimit;
|
||||
SLimitNode* pSlimit;
|
||||
STimeWindow timeRange;
|
||||
SNodeList* pHint;
|
||||
char stmtName[TSDB_TABLE_NAME_LEN];
|
||||
uint8_t precision;
|
||||
int32_t selectFuncNum;
|
||||
|
@ -470,7 +498,7 @@ int32_t nodesCollectColumns(SSelectStmt* pSelect, ESqlClause clause, const char*
|
|||
int32_t nodesCollectColumnsFromNode(SNode* node, const char* pTableAlias, ECollectColType type, SNodeList** pCols);
|
||||
|
||||
typedef bool (*FFuncClassifier)(int32_t funcId);
|
||||
int32_t nodesCollectFuncs(SSelectStmt* pSelect, ESqlClause clause, FFuncClassifier classifier, SNodeList** pFuncs);
|
||||
int32_t nodesCollectFuncs(SSelectStmt* pSelect, ESqlClause clause, char* tableAlias, FFuncClassifier classifier, SNodeList** pFuncs);
|
||||
|
||||
int32_t nodesCollectSpecialNodes(SSelectStmt* pSelect, ESqlClause clause, ENodeType type, SNodeList** pNodes);
|
||||
|
||||
|
|
|
@ -114,6 +114,7 @@ int32_t smlBuildRow(STableDataCxt* pTableCxt);
|
|||
int32_t smlBuildCol(STableDataCxt* pTableCxt, SSchema* schema, void* kv, int32_t index);
|
||||
STableDataCxt* smlInitTableDataCtx(SQuery* query, STableMeta* pTableMeta);
|
||||
|
||||
void clearColValArraySml(SArray* pCols);
|
||||
int32_t smlBindData(SQuery* handle, bool dataFormat, SArray* tags, SArray* colsSchema, SArray* cols,
|
||||
STableMeta* pTableMeta, char* tableName, const char* sTableName, int32_t sTableNameLen, int32_t ttl,
|
||||
char* msgBuf, int32_t msgBufLen);
|
||||
|
|
|
@ -90,11 +90,6 @@ typedef struct SExecResult {
|
|||
void* res;
|
||||
} SExecResult;
|
||||
|
||||
typedef struct STbVerInfo {
|
||||
char tbFName[TSDB_TABLE_FNAME_LEN];
|
||||
int32_t sversion;
|
||||
int32_t tversion;
|
||||
} STbVerInfo;
|
||||
|
||||
#pragma pack(push, 1)
|
||||
typedef struct SCTableMeta {
|
||||
|
@ -212,6 +207,11 @@ typedef struct SQueryNodeStat {
|
|||
int32_t tableNum; // vg table number, unit is TSDB_TABLE_NUM_UNIT
|
||||
} SQueryNodeStat;
|
||||
|
||||
typedef struct SQueryStat {
|
||||
int64_t inputRowNum;
|
||||
int32_t inputRowSize;
|
||||
} SQueryStat;
|
||||
|
||||
int32_t initTaskQueue();
|
||||
int32_t cleanupTaskQueue();
|
||||
|
||||
|
|
|
@ -45,6 +45,7 @@ typedef struct {
|
|||
uint64_t cqueryProcessed;
|
||||
uint64_t fetchProcessed;
|
||||
uint64_t dropProcessed;
|
||||
uint64_t notifyProcessed;
|
||||
uint64_t hbProcessed;
|
||||
uint64_t deleteProcessed;
|
||||
|
||||
|
@ -90,6 +91,8 @@ int32_t qWorkerProcessCancelMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, in
|
|||
|
||||
int32_t qWorkerProcessDropMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts);
|
||||
|
||||
int32_t qWorkerProcessNotifyMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts);
|
||||
|
||||
int32_t qWorkerProcessHbMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts);
|
||||
|
||||
int32_t qWorkerProcessDeleteMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, SDeleteRes *pRes);
|
||||
|
|
|
@ -95,6 +95,8 @@ int32_t qStartTsFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *p
|
|||
int32_t qEndTsFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
||||
|
||||
int32_t qTbnameFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
||||
int32_t qTbUidFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
||||
int32_t qVgIdFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
||||
|
||||
/* Aggregation functions */
|
||||
int32_t countScalarFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue